aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/BUILD.bazel28
-rw-r--r--src/CMakeLists.txt26
-rw-r--r--src/common/cpuinfo/CpuInfo.cpp52
-rw-r--r--src/common/cpuinfo/CpuInfo.h9
-rw-r--r--src/common/cpuinfo/CpuIsaInfo.h8
-rw-r--r--src/core/CL/cl_kernels/common/scatter.cl154
-rw-r--r--src/core/CPP/CPPTypes.cpp23
-rw-r--r--src/core/NEON/NEAsymm.h16
-rw-r--r--src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.cpp6
-rw-r--r--src/core/NEON/kernels/NEReductionOperationKernel.cpp1785
-rw-r--r--src/core/NEON/kernels/NEReductionOperationKernel.h20
-rw-r--r--src/core/NEON/kernels/NEReorderKernel.cpp12
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp4
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp29
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp248
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp248
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp118
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp118
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp284
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp320
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp776
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp754
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1228
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1238
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp492
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp644
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp552
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp716
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp142
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp420
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp276
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp320
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp716
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp670
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1176
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1106
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp484
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp612
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp544
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp684
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp142
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp214
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp244
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp312
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp2830
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1720
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp2146
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp3204
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp168
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp498
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp608
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp416
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp2348
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp2830
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1720
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp2146
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp3204
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp168
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp498
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp608
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp416
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1824
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp2168
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp3586
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp1720
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp2146
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp3208
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp168
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp416
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp8
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp276
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp296
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp8
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp562
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp614
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp8
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp968
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1000
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp8
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp346
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp286
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp8
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp726
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp784
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp360
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp296
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp624
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp584
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp438
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp760
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp424
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp360
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp388
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp536
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp942
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp1254
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp554
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp790
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp1479
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp1531
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp767
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp1065
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp1641
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp1811
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp767
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp1065
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp1641
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp1811
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp767
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp1065
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp1641
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp1811
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp278
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp204
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp686
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp656
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1070
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1024
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp426
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp456
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp616
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp862
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp278
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp204
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp686
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp656
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp1070
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp1024
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp426
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp456
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp616
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp862
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp120
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp200
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp380
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp502
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp840
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp590
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp668
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp1000
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp410
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp516
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp666
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp840
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp590
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp668
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp1000
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp410
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp516
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp590
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp668
-rw-r--r--src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp1000
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp19
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp226
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp344
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp88
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp330
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp226
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp296
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp88
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp282
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp176
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp88
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp426
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp286
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp628
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp230
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp88
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp426
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp344
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp826
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp158
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp98
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp52
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp108
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp158
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp98
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp52
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp108
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp142
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp52
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp108
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp160
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp360
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp190
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp52
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp108
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp204
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp436
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp174
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp44
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp84
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp218
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp174
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp44
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp84
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp218
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp290
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp84
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp218
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp332
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp528
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp284
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp84
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp218
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp352
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp586
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp11
-rw-r--r--src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp6
-rw-r--r--src/core/NEON/kernels/arm_gemm/barrier.hpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/convolver.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp103
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp16
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp70
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp133
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp12
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp86
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int16.cpp16
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int8.cpp70
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp174
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_q8_mixed.cpp138
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp119
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp95
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp48
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_u8s8fp32.cpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_uint16.cpp16
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp68
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemv_batched.hpp6
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp66
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp8
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp80
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp168
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp152
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp80
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp110
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp110
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp218
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp132
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp196
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp54
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp346
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp324
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp68
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp162
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp140
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp136
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp160
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp68
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp154
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp192
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp154
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp192
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp136
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp144
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp76
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp345
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp345
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp358
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp334
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp358
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp334
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp76
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp363
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp118
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp48
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp122
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp150
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp122
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp150
-rw-r--r--src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp122
-rw-r--r--src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp247
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp592
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp566
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp480
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp1817
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp2457
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp1497
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp6
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp1181
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16/generic.cpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp11
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp48
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp28
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s16_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4/generic.cpp674
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/a55r1.cpp42
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/generic.cpp208
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/x1.cpp208
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u16_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4/generic.cpp404
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/a55r1.cpp42
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/generic.cpp208
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/x1.cpp208
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp11
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/a55r1.cpp470
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/generic.cpp486
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/x1.cpp488
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp1489
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp1789
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp3285
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp2439
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24.hpp3
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp1389
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp983
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp3
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp2017
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp1479
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp1593
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp875
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp1113
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp1619
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp1529
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp403
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp643
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp2475
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp1801
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp2163
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp1629
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp179
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp537
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp1529
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp403
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp643
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16.hpp103
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16/generic.cpp2027
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16.hpp101
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16/generic.cpp2099
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16.hpp116
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16/generic.cpp3264
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16.hpp112
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16/generic.cpp3450
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp1629
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp179
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp537
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp26
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp80
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp52
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp80
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp52
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12.hpp107
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12/generic.cpp294
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp80
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp52
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp5
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a53.cpp498
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55.cpp504
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55r1.cpp438
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/generic.cpp470
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/x1.cpp472
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp542
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_pretransposed/generic.cpp622
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_6x4.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_8x4.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp560
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL.hpp85
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL/generic.cpp776
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp560
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp612
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp770
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp770
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp244
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp314
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp394
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp11
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp221
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp11
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp462
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp11
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp360
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp252
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp320
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp400
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp427
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp380
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp366
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp5
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp5
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp5
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp5
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp5
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp5
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp226
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp252
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp284
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp427
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp380
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp366
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp1141
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp443
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp1077
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp443
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp765
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp6
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp751
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL.hpp110
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL/generic.cpp274
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp13
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp202
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp108
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp34
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp108
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp34
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp585
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp1023
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp337
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp755
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp337
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp585
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp383
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp607
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp629
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp1023
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp607
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp707
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp1303
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp1585
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp139
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp387
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp923
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp607
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp707
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL.hpp99
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL/generic.cpp1502
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL.hpp99
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL/generic.cpp1418
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL.hpp112
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL/generic.cpp1675
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp139
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp387
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp923
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp202
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp114
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp114
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mmla_8x3VL.hpp7
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp114
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp202
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL.hpp107
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL/generic.cpp297
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp114
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp9
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp202
-rw-r--r--src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp38
-rw-r--r--src/core/NEON/kernels/arm_gemm/performance_parameters.hpp8
-rw-r--r--src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp8
-rw-r--r--src/core/NEON/kernels/arm_gemm/quantized.cpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/std_transforms_fixed_trB.hpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/transform.cpp8
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a32_transpose_interleave_8way_32bit.hpp34
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp334
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp601
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp435
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp461
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp654
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp1164
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp290
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp290
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp12
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp404
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp351
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp298
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp794
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp602
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp268
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp1268
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp347
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp347
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp762
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp620
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp209
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp372
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp381
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp228
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp296
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp171
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp162
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp158
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp58
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp78
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp111
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp182
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp72
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp70
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp116
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp208
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp107
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp60
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp102
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp150
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL.hpp92
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_1x4.hpp92
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_2x2.hpp32
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp584
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp76
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp350
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp118
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp460
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp344
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp144
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp356
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp392
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp370
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp500
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp304
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp390
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp364
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp370
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp300
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp502
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp630
-rw-r--r--src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp382
-rw-r--r--src/core/NEON/kernels/arm_gemm/utils.hpp60
-rw-r--r--src/core/NEON/kernels/assembly/depthwise.hpp7
-rw-r--r--src/core/NEON/kernels/assembly/pool_common.hpp10
-rw-r--r--src/core/NEON/kernels/assembly/pooling.hpp13
-rw-r--r--src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp6
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/a64_fp16_4x4_3x3.cpp10
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x2_1x7.cpp8
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x4_1x5.cpp8
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x6_1x3.cpp8
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_3x3.cpp10
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_5x5.cpp10
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_4x4_3x3.cpp10
-rw-r--r--src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp12
-rw-r--r--src/core/common/Registrars.h22
-rw-r--r--src/core/helpers/LUTManager.cpp27
-rw-r--r--src/core/helpers/LUTManager.h18
-rw-r--r--src/core/utils/quantization/AsymmHelpers.cpp16
-rw-r--r--src/cpu/kernels/CpuActivationKernel.cpp18
-rw-r--r--src/cpu/kernels/CpuDequantizeKernel.cpp328
-rw-r--r--src/cpu/kernels/CpuDequantizeKernel.h16
-rw-r--r--src/cpu/kernels/CpuGemmLowpMatrixMultiplyKernel.cpp6
-rw-r--r--src/cpu/kernels/CpuKernelSelectionTypes.h1
-rw-r--r--src/cpu/kernels/CpuQuantizeKernel.cpp344
-rw-r--r--src/cpu/kernels/CpuQuantizeKernel.h26
-rw-r--r--src/cpu/kernels/CpuSoftmaxKernel.cpp52
-rw-r--r--src/cpu/kernels/CpuSoftmaxKernel.h13
-rw-r--r--src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h16
-rw-r--r--src/cpu/kernels/assembly/arm_gemm.hpp12
-rw-r--r--src/cpu/kernels/assembly/convolution_parameters.hpp10
-rw-r--r--src/cpu/kernels/assembly/gemm_common.hpp18
-rw-r--r--src/cpu/kernels/dequantize/generic/neon/fp16.cpp37
-rw-r--r--src/cpu/kernels/dequantize/generic/neon/fp32.cpp35
-rw-r--r--src/cpu/kernels/dequantize/generic/neon/impl.h340
-rw-r--r--src/cpu/kernels/dequantize/generic/neon/list.h43
-rw-r--r--src/cpu/kernels/gemm_matrix_mul/generic/neon/fp16.cpp6
-rw-r--r--src/cpu/kernels/quantize/generic/neon/fp16.cpp45
-rw-r--r--src/cpu/kernels/quantize/generic/neon/fp32.cpp48
-rw-r--r--src/cpu/kernels/quantize/generic/neon/impl.h330
-rw-r--r--src/cpu/kernels/quantize/generic/neon/integer.cpp82
-rw-r--r--src/cpu/kernels/quantize/generic/neon/list.h66
-rw-r--r--src/cpu/kernels/reduction_layer/generic/neon/fp16.cpp65
-rw-r--r--src/cpu/kernels/reduction_layer/generic/neon/fp32.cpp73
-rw-r--r--src/cpu/kernels/reduction_layer/generic/neon/impl.h1633
-rw-r--r--src/cpu/kernels/reduction_layer/generic/neon/integer.cpp62
-rw-r--r--src/cpu/kernels/reduction_layer/generic/neon/list.h66
-rw-r--r--src/cpu/kernels/reduction_layer/generic/neon/qasymm8.cpp63
-rw-r--r--src/cpu/kernels/reduction_layer/generic/neon/qasymm8_signed.cpp63
-rw-r--r--src/cpu/kernels/softmax/generic/neon/fp16.cpp28
-rw-r--r--src/cpu/kernels/softmax/generic/neon/fp32.cpp28
-rw-r--r--src/cpu/kernels/softmax/generic/neon/qasymm8.cpp28
-rw-r--r--src/cpu/kernels/softmax/generic/neon/qasymm8_signed.cpp28
-rw-r--r--src/cpu/kernels/softmax/generic/sme2/fp16.cpp9
-rw-r--r--src/cpu/kernels/softmax/generic/sme2/fp32.cpp9
-rw-r--r--src/cpu/kernels/softmax/generic/sme2/qasymm8.cpp634
-rw-r--r--src/cpu/kernels/softmax/generic/sme2/qasymm8_signed.cpp655
-rw-r--r--src/cpu/kernels/softmax/list.h41
-rw-r--r--src/cpu/operators/CpuConv2d.h13
-rw-r--r--src/cpu/operators/CpuGemmConv2d.h3
-rw-r--r--src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp75
-rw-r--r--src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h2
-rw-r--r--src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp280
-rw-r--r--src/gpu/cl/ClKernelLibrary.cpp1
-rw-r--r--src/gpu/cl/kernels/ClScatterKernel.cpp158
-rw-r--r--src/gpu/cl/kernels/ClScatterKernel.h1
-rw-r--r--src/gpu/cl/operators/ClScatter.cpp2
-rw-r--r--src/runtime/OMP/OMPScheduler.cpp35
-rw-r--r--src/runtime/experimental/operators/CpuGemm.cpp96
674 files changed, 144049 insertions, 122118 deletions
diff --git a/src/BUILD.bazel b/src/BUILD.bazel
index e3cac07de1..22521d1744 100644
--- a/src/BUILD.bazel
+++ b/src/BUILD.bazel
@@ -119,6 +119,8 @@ filegroup(
"cpu/kernels/lut/generic/sve2/u8.cpp",
"cpu/kernels/softmax/generic/sme2/fp16.cpp",
"cpu/kernels/softmax/generic/sme2/fp32.cpp",
+ "cpu/kernels/softmax/generic/sme2/qasymm8.cpp",
+ "cpu/kernels/softmax/generic/sme2/qasymm8_signed.cpp",
"cpu/kernels/softmax/generic/sve2/impl.cpp"] +
glob(["**/*.h",
"**/*.hpp",
@@ -246,6 +248,7 @@ filegroup(
"core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp",
"core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16fp32fp16_dot_16VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp",
@@ -278,6 +281,7 @@ filegroup(
"core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp",
@@ -302,6 +306,9 @@ filegroup(
"core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp",
@@ -315,6 +322,7 @@ filegroup(
"core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp",
@@ -519,9 +527,11 @@ filegroup(
"core/NEON/kernels/arm_gemm/gemm_fp32.cpp",
"core/NEON/kernels/arm_gemm/gemm_int16.cpp",
"core/NEON/kernels/arm_gemm/gemm_int8.cpp",
+ "core/NEON/kernels/arm_gemm/gemm_q8_mixed.cpp",
"core/NEON/kernels/arm_gemm/gemm_qint8.cpp",
"core/NEON/kernels/arm_gemm/gemm_quint8.cpp",
"core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp",
+ "core/NEON/kernels/arm_gemm/gemm_u8s8fp32.cpp",
"core/NEON/kernels/arm_gemm/gemm_uint16.cpp",
"core/NEON/kernels/arm_gemm/gemm_uint8.cpp",
"core/NEON/kernels/arm_gemm/interleave-8way.cpp",
@@ -572,6 +582,10 @@ filegroup(
"core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp",
@@ -581,6 +595,7 @@ filegroup(
"core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a53.cpp",
@@ -751,6 +766,8 @@ filegroup(
"cpu/kernels/depthwiseconv2d/generic/neon/impl.cpp",
"cpu/kernels/depthwiseconv2d/generic/neon/qasymm8.cpp",
"cpu/kernels/depthwiseconv2d/generic/neon/qasymm8_signed.cpp",
+ "cpu/kernels/dequantize/generic/neon/fp16.cpp",
+ "cpu/kernels/dequantize/generic/neon/fp32.cpp",
"cpu/kernels/directconv2d/nchw/all.cpp",
"cpu/kernels/directconv2d/nchw/fp16.cpp",
"cpu/kernels/directconv2d/nhwc/neon/fp16.cpp",
@@ -816,9 +833,17 @@ filegroup(
"cpu/kernels/pool3d/neon/fp32.cpp",
"cpu/kernels/pool3d/neon/qasymm8.cpp",
"cpu/kernels/pool3d/neon/qasymm8_signed.cpp",
+ "cpu/kernels/quantize/generic/neon/fp16.cpp",
+ "cpu/kernels/quantize/generic/neon/fp32.cpp",
+ "cpu/kernels/quantize/generic/neon/integer.cpp",
"cpu/kernels/range/generic/neon/fp16.cpp",
"cpu/kernels/range/generic/neon/fp32.cpp",
"cpu/kernels/range/generic/neon/integer.cpp",
+ "cpu/kernels/reduction_layer/generic/neon/fp16.cpp",
+ "cpu/kernels/reduction_layer/generic/neon/fp32.cpp",
+ "cpu/kernels/reduction_layer/generic/neon/integer.cpp",
+ "cpu/kernels/reduction_layer/generic/neon/qasymm8.cpp",
+ "cpu/kernels/reduction_layer/generic/neon/qasymm8_signed.cpp",
"cpu/kernels/roialign/generic/neon/fp16.cpp",
"cpu/kernels/roialign/generic/neon/fp32.cpp",
"cpu/kernels/roialign/generic/neon/qasymm8.cpp",
@@ -996,7 +1021,8 @@ filegroup(
"runtime/SubTensor.cpp",
"runtime/Tensor.cpp",
"runtime/TensorAllocator.cpp",
- "runtime/Utils.cpp"] +
+ "runtime/Utils.cpp",
+ "runtime/experimental/operators/CpuGemm.cpp"] +
glob(["**/*.h",
"**/*.hpp",
"**/*.inl"]),
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 984db79c18..0285245cfb 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -221,6 +221,7 @@ target_sources(
core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
core/NEON/kernels/arm_gemm/interleave_indirect-sve.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16fp32fp16_dot_16VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
@@ -253,6 +254,7 @@ target_sources(
core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
@@ -277,6 +279,9 @@ target_sources(
core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp
core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp
@@ -290,6 +295,7 @@ target_sources(
core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp
core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp
core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp
@@ -340,6 +346,8 @@ target_sources(
cpu/kernels/lut/generic/sve2/u8.cpp
cpu/kernels/softmax/generic/sme2/fp16.cpp
cpu/kernels/softmax/generic/sme2/fp32.cpp
+ cpu/kernels/softmax/generic/sme2/qasymm8.cpp
+ cpu/kernels/softmax/generic/sme2/qasymm8_signed.cpp
cpu/kernels/softmax/generic/sve2/impl.cpp
)
@@ -510,9 +518,11 @@ target_sources(
core/NEON/kernels/arm_gemm/gemm_fp32.cpp
core/NEON/kernels/arm_gemm/gemm_int16.cpp
core/NEON/kernels/arm_gemm/gemm_int8.cpp
+ core/NEON/kernels/arm_gemm/gemm_q8_mixed.cpp
core/NEON/kernels/arm_gemm/gemm_qint8.cpp
core/NEON/kernels/arm_gemm/gemm_quint8.cpp
core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp
+ core/NEON/kernels/arm_gemm/gemm_u8s8fp32.cpp
core/NEON/kernels/arm_gemm/gemm_uint16.cpp
core/NEON/kernels/arm_gemm/gemm_uint8.cpp
core/NEON/kernels/arm_gemm/interleave-8way.cpp
@@ -563,6 +573,10 @@ target_sources(
core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp
core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16/generic.cpp
core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp
core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp
core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp
@@ -572,6 +586,7 @@ target_sources(
core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp
core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp
core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12/generic.cpp
core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp
core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp
core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a53.cpp
@@ -742,6 +757,8 @@ target_sources(
cpu/kernels/depthwiseconv2d/generic/neon/impl.cpp
cpu/kernels/depthwiseconv2d/generic/neon/qasymm8.cpp
cpu/kernels/depthwiseconv2d/generic/neon/qasymm8_signed.cpp
+ cpu/kernels/dequantize/generic/neon/fp16.cpp
+ cpu/kernels/dequantize/generic/neon/fp32.cpp
cpu/kernels/directconv2d/nchw/all.cpp
cpu/kernels/directconv2d/nchw/fp16.cpp
cpu/kernels/directconv2d/nhwc/neon/fp16.cpp
@@ -807,9 +824,17 @@ target_sources(
cpu/kernels/pool3d/neon/fp32.cpp
cpu/kernels/pool3d/neon/qasymm8.cpp
cpu/kernels/pool3d/neon/qasymm8_signed.cpp
+ cpu/kernels/quantize/generic/neon/fp16.cpp
+ cpu/kernels/quantize/generic/neon/fp32.cpp
+ cpu/kernels/quantize/generic/neon/integer.cpp
cpu/kernels/range/generic/neon/fp16.cpp
cpu/kernels/range/generic/neon/fp32.cpp
cpu/kernels/range/generic/neon/integer.cpp
+ cpu/kernels/reduction_layer/generic/neon/fp16.cpp
+ cpu/kernels/reduction_layer/generic/neon/fp32.cpp
+ cpu/kernels/reduction_layer/generic/neon/integer.cpp
+ cpu/kernels/reduction_layer/generic/neon/qasymm8.cpp
+ cpu/kernels/reduction_layer/generic/neon/qasymm8_signed.cpp
cpu/kernels/roialign/generic/neon/fp16.cpp
cpu/kernels/roialign/generic/neon/fp32.cpp
cpu/kernels/roialign/generic/neon/qasymm8.cpp
@@ -988,4 +1013,5 @@ target_sources(
runtime/Tensor.cpp
runtime/TensorAllocator.cpp
runtime/Utils.cpp
+ runtime/experimental/operators/CpuGemm.cpp
) \ No newline at end of file
diff --git a/src/common/cpuinfo/CpuInfo.cpp b/src/common/cpuinfo/CpuInfo.cpp
index 93f51e599a..d46d8d7773 100644
--- a/src/common/cpuinfo/CpuInfo.cpp
+++ b/src/common/cpuinfo/CpuInfo.cpp
@@ -29,6 +29,7 @@
#include "support/StringSupport.h"
#include "support/ToolchainSupport.h"
+#include <map>
#include <sstream>
#if !defined(BARE_METAL)
@@ -269,6 +270,46 @@ int get_max_cpus()
}
return max_cpus;
}
+#if defined(__ANDROID__)
+std::vector<uint32_t> get_cpu_capacities()
+{
+ std::vector<uint32_t> cpu_capacities;
+ for (int i = 0; i < get_max_cpus(); ++i)
+ {
+ std::stringstream str;
+ str << "/sys/devices/system/cpu/cpu" << i << "/cpu_capacity";
+ std::ifstream file(str.str(), std::ios::in);
+ if (file.is_open())
+ {
+ std::string line;
+ if (bool(getline(file, line)))
+ {
+ cpu_capacities.emplace_back(support::cpp11::stoul(line));
+ }
+ }
+ }
+
+ return cpu_capacities;
+}
+
+uint32_t not_little_num_cpus_internal()
+{
+ std::vector<uint32_t> cpus_all = get_cpu_capacities();
+ std::vector<uint32_t> cpus_not_little;
+
+ std::vector<uint32_t>::iterator result = std::max_element(cpus_all.begin(), cpus_all.end());
+ uint32_t max_capacity = *result;
+ uint32_t threshold = max_capacity / 2;
+ for (unsigned int i = 0; i < cpus_all.size(); i++)
+ {
+ if (!(cpus_all[i] < threshold))
+ {
+ cpus_not_little.emplace_back(cpus_all[i]);
+ }
+ }
+ return cpus_not_little.size();
+}
+#endif /* defined(__ANDROID__) */
#elif defined(__aarch64__) && \
defined(__APPLE__) /* !defined(BARE_METAL) && !defined(__APPLE__) && (defined(__arm__) || defined(__aarch64__)) */
/** Query features through sysctlbyname
@@ -363,6 +404,8 @@ CpuInfo CpuInfo::build()
isainfo.neon = get_hw_capability("hw.optional.neon");
isainfo.fp16 = get_hw_capability("hw.optional.neon_fp16");
isainfo.dot = get_hw_capability("hw.optional.arm.FEAT_DotProd");
+ isainfo.bf16 = get_hw_capability("hw.optional.arm.FEAT_BF16");
+ isainfo.i8mm = get_hw_capability("hw.optional.arm.FEAT_I8MM");
CpuInfo info(isainfo, cpus_model);
return info;
#elif defined(__aarch64__) && defined(_WIN64) /* #elif defined(__aarch64__) && defined(__APPLE__) */
@@ -400,6 +443,15 @@ uint32_t CpuInfo::num_cpus() const
return _cpus.size();
}
+uint32_t CpuInfo::not_little_num_cpus() const
+{
+#if defined(__ANDROID__)
+ return not_little_num_cpus_internal();
+#else /* defined(__ANDROID__) */
+ return num_cpus();
+#endif /* defined(__ANDROID__) */
+}
+
uint32_t num_threads_hint()
{
unsigned int num_threads_hint = 1;
diff --git a/src/common/cpuinfo/CpuInfo.h b/src/common/cpuinfo/CpuInfo.h
index 953e4883c3..78d11e9610 100644
--- a/src/common/cpuinfo/CpuInfo.h
+++ b/src/common/cpuinfo/CpuInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2022 Arm Limited.
+ * Copyright (c) 2021-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef SRC_COMMON_CPUINFO_H
-#define SRC_COMMON_CPUINFO_H
+#ifndef ACL_SRC_COMMON_CPUINFO_CPUINFO_H
+#define ACL_SRC_COMMON_CPUINFO_CPUINFO_H
#include "src/common/cpuinfo/CpuIsaInfo.h"
#include "src/common/cpuinfo/CpuModel.h"
@@ -120,6 +120,7 @@ public:
CpuModel cpu_model(uint32_t cpuid) const;
CpuModel cpu_model() const;
uint32_t num_cpus() const;
+ uint32_t not_little_num_cpus() const;
private:
CpuIsaInfo _isa{};
@@ -135,4 +136,4 @@ private:
uint32_t num_threads_hint();
} // namespace cpuinfo
} // namespace arm_compute
-#endif /* SRC_COMMON_CPUINFO_H */
+#endif // ACL_SRC_COMMON_CPUINFO_CPUINFO_H
diff --git a/src/common/cpuinfo/CpuIsaInfo.h b/src/common/cpuinfo/CpuIsaInfo.h
index 9d6bc07b67..b1f5d220a4 100644
--- a/src/common/cpuinfo/CpuIsaInfo.h
+++ b/src/common/cpuinfo/CpuIsaInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2022 Arm Limited.
+ * Copyright (c) 2021-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef SRC_COMMON_CPUINFO_CPUISAINFO_H
-#define SRC_COMMON_CPUINFO_CPUISAINFO_H
+#ifndef ACL_SRC_COMMON_CPUINFO_CPUISAINFO_H
+#define ACL_SRC_COMMON_CPUINFO_CPUISAINFO_H
#include <cstdint>
@@ -81,4 +81,4 @@ init_cpu_isa_from_regs(uint64_t isar0, uint64_t isar1, uint64_t pfr0, uint64_t p
} // namespace cpuinfo
} // namespace arm_compute
-#endif /* SRC_COMMON_CPUINFO_CPUISAINFO_H */
+#endif // ACL_SRC_COMMON_CPUINFO_CPUISAINFO_H
diff --git a/src/core/CL/cl_kernels/common/scatter.cl b/src/core/CL/cl_kernels/common/scatter.cl
index 73b714e042..e3ec9cc98e 100644
--- a/src/core/CL/cl_kernels/common/scatter.cl
+++ b/src/core/CL/cl_kernels/common/scatter.cl
@@ -22,75 +22,131 @@
* SOFTWARE.
*/
#include "helpers.h"
-
-#if defined(INDICES_SHAPE_Y) && defined(DATA_TYPE) && defined(OUT_SHAPE_X) && defined(SCATTER_FUNCTION)
+#include "tile_helpers.h"
// The below defines the various reduce operations for our purposes.
// Where a corresponds to the existing value, and b the new value.
#define ADD_OP(a, b) ((a) + (b))
#define SUB_OP(a, b) ((a) - (b))
+
+#ifdef IS_FLOAT
#define MAX_OP(a, b) fmax(a, b)
#define MIN_OP(a, b) fmin(a, b)
+#else // ifdef IS_FLOAT
+#define MAX_OP(a, b) max(a, b)
+#define MIN_OP(a, b) min(a, b)
+#endif // ifdef IS_FLOAT
+
#define UPDATE_OP(a, b) (b)
-/** Performs the ScatterND operation
- * @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- * @note the size of the dst tensor in the "x" dimension should be passed using -DOUT_SHAPE_X at compile time.
- * @note the number of values in the indices tensor in the y-dim should be passed with -DINDICES_SHAPE_Y at compile time.
- * @note Negative indices are treated as out of bounds.
+#ifdef SCATTER_MP1D_2D_MPND
+
+/** This kernel performs scatter operation
+ *
+ * @note Datatype should be given as a compile-time argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
+ * @note Number of indices should be given as a compile-time argument using -DNUM_INDICES, e.g. -DNUM_INDICES=3
+ * @note Index length should be given as a compile-time argument using -DINDEX_LENGTH, e.g. -DINDEX_LENGTH=2
+ * @note Outermost output shapes should be given as a compile-time argument using -DOUT_SHAPE_N_MINUS_X, where
+ * X must be 1,2,3,4,5, e.g. -DOUT_SHAPE_N_MINUS_1=3, ...
+ * @note Number of elements to copy in a row should be given as a compile-time argument using -DN0, e.g. -DN0=4
+ * @note Number of partial elements at the edge to copy in a row should be given as a compile-time argument using
+ * -DPARTIAL_N0, e.g. -DPARTIAL_N0=2
+ * @note Scatter function should be given as a compile-time argument using -DSCATTER_FUNCTION, e.g. -DSCATTER_FUNCTION=ADD
+ * @note If the kernel should skip reading the output tensor, -DSKIP_OUTPUT_READ option should be provided.
+ * @note Kernel name in uppercase letters should be provided as a compile-time argument, e.g. -DSCATTER_MP1D_2D_MPND
*
- * @param[in] updates_ptr Pointer to the source tensor. Supported data types: All
- * @param[in] updates_stride_x Stride of the source tensor in X dimension (in bytes)
- * @param[in] updates_step_x updates_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] updates_stride_y Stride of the source tensor in Y dimension (in bytes)
- * @param[in] updates_step_y updates_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] updates_stride_z Stride of the source tensor in Y dimension (in bytes)
- * @param[in] updates_step_z updates_stride_z * number of elements along Z processed per work item (in bytes)
- * @param[in] updates_stride_w Stride of the source tensor in Z dimension (in bytes)
- * @param[in] updates_step_w updates_stride_w * number of elements along W processed per work item (in bytes)
- * @param[in] updates_offset_first_element_in_bytes Offset of the first element in the source tensor
- * @param[in] indices_ptr Pointer to the indices vector. Supported data types: S32.
- * @param[in] indices_stride_x Stride of the indices vector in X dimension (in bytes)
- * @param[in] indices_step_x updates_stride_x * number of elements along X processed per work item (in bytes)
- * @param[in] indices_offset_first_element_in_bytes Offset of the first element in the indices vector
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p updates_ptr
+ * @param[in] updates_ptr Pointer to the updates tensor. Data Types: F32
+ * @param[in] updates_stride_x Stride of the updates tensor in X dimension (in bytes)
+ * @param[in] updates_step_x updates_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] updates_stride_y Stride of the updates tensor in Y dimension (in bytes)
+ * @param[in] updates_step_y updates_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] updates_offset_first_element_in_bytes The offset of the first element in the updates tensor
+ * @param[in] indices_ptr Pointer to the indices tensor. Data Types: S32
+ * @param[in] indices_stride_x Stride of the indices tensor in X dimension (in bytes)
+ * @param[in] indices_step_x indices_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] indices_stride_y Stride of the indices tensor in Y dimension (in bytes)
+ * @param[in] indices_step_y indices_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] indices_offset_first_element_in_bytes The offset of the first element in the indices tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Same as @p upt_ptr
* @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per work item (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
* @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per work item (in bytes)
- * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per work item (in bytes)
- * @param[in] output_stride_w Stride of the destination tensor in W dimension (in bytes)
- * @param[in] output_step_w output_stride_w * number of elements along W processed per work item (in bytes)
- * @param[in] output_offset_first_element_in_bytes Offset of the first element in the destination tensor
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] upt_block_stride Update tensor data block stride in bytes
+ * @param[in] out_block_stride Output tensor data block stride in bytes
*/
-// The below kernel code is expected to be excecuted sequentially with a single thread to ensure a deterministic outcome.
-__kernel void scatter1D(
- TENSOR4D_DECLARATION(updates),
- TENSOR4D_DECLARATION(indices),
- TENSOR4D_DECLARATION(output))
+__kernel void scatter_mp1d_2d_mpnd(
+ IMAGE_DECLARATION(updates),
+ IMAGE_DECLARATION(indices),
+ IMAGE_DECLARATION(output),
+ int upt_block_stride,
+ int out_block_stride
+ )
{
- // Currently 1D - only iterate through y dimension of indices.
- unsigned int* indices_start_offset = (unsigned int*)(indices_ptr + indices_offset_first_element_in_bytes);
- DATA_TYPE* updates_start_offset = (DATA_TYPE*)(updates_ptr + updates_offset_first_element_in_bytes);
- DATA_TYPE* out_start_offset = (DATA_TYPE*)(output_ptr + output_offset_first_element_in_bytes);
- for (int px = 0; px < INDICES_SHAPE_Y; px++)
+ const int out_shape[5] = {OUT_SHAPE_N_MINUS_1, OUT_SHAPE_N_MINUS_2, OUT_SHAPE_N_MINUS_3,
+ OUT_SHAPE_N_MINUS_4, OUT_SHAPE_N_MINUS_5};
+
+ const int x = GET_SPATIAL_IDX(0, N0, PARTIAL_N0); // x-coordinate in the tensor
+ const int y = get_global_id(1); // collapsed y-coordinate (ignoring the outermost dimensions)
+
+ const bool x_cond = (PARTIAL_N0 != 0 && get_global_id(0) == 0);
+
+ uchar *ind_ptr_raw = indices_ptr + indices_offset_first_element_in_bytes;
+ const uchar *out_ptr_raw = output_ptr + output_offset_first_element_in_bytes
+ + x * sizeof(DATA_TYPE) + y * output_stride_y;
+
+ const uchar *upt_ptr_raw = updates_ptr + updates_offset_first_element_in_bytes
+ + x * sizeof(DATA_TYPE) + y * updates_stride_y;
+
+ for(int index_element = 0; index_element < NUM_INDICES; ++index_element)
{
- const int index_value = *(indices_start_offset);
- DATA_TYPE* out_addr = out_start_offset + index_value;
- if((index_value < OUT_SHAPE_X) && (index_value >= 0))
+ const int *ind_ptr = (const int *) (ind_ptr_raw);
+
+ // Out of bounds check
+ bool out_of_bounds = false;
+ LOOP_UNROLLING(int, i, 0, 1, INDEX_LENGTH,
+ {
+ if(ind_ptr[i] >= out_shape[i] || ind_ptr[i] < 0)
+ {
+ out_of_bounds = true;
+ }
+ });
+
+ ind_ptr_raw += indices_stride_y;
+
+ if(out_of_bounds)
{
- *(__global DATA_TYPE *)(out_addr) = SCATTER_FUNCTION(*(out_addr), *updates_start_offset);
+ continue;
}
- // Increment pointers.
- indices_start_offset++;
- updates_start_offset++;
+
+ // Index calculation
+ int index = 0;
+ LOOP_UNROLLING(int, i, 0, 1, INDEX_LENGTH,
+ {
+ index = index * out_shape[i] + ind_ptr[i];
+ });
+
+ DATA_TYPE *out_ptr = (DATA_TYPE *) (out_ptr_raw + index * out_block_stride);
+
+ const DATA_TYPE *upt_ptr = (const DATA_TYPE *) (upt_ptr_raw + index_element * upt_block_stride);
+
+ VEC_DATA_TYPE(DATA_TYPE, N0) data_in0 = VLOAD(N0)(0, (__global DATA_TYPE *) upt_ptr);
+
+#ifdef SKIP_OUTPUT_READ
+ STORE_VECTOR_SELECT(data_in, DATA_TYPE, (__global DATA_TYPE *) out_ptr, N0, PARTIAL_N0, x_cond);
+#else // ifdef SKIP_OUTPUT_READ
+ VEC_DATA_TYPE(DATA_TYPE, N0) data_out0 = VLOAD(N0)(0, (__global DATA_TYPE *) out_ptr);
+ data_out0 = SCATTER_FUNCTION(data_out0, data_in0);
+
+ STORE_VECTOR_SELECT(data_out, DATA_TYPE, (__global DATA_TYPE *) out_ptr, N0, PARTIAL_N0, x_cond);
+#endif // ifdef SKIP_OUTPUT_READ
}
}
-#endif //defined(DATA_TYPE) && defined(SCATTER_FUNCTION) && defined(OUT_SHAPE_X) && defined(INDICES_SHAPE_Y)
+#endif // SCATTER_MP1D_2D_MPND
-#if defined(DATA_TYPE) && defined(SCATTER_FUNCTION) && defined(OUT_SHAPE_X) && !defined(INDICES_SHAPE_Y)
+#ifdef SCATTER1D_PARALLEL
// NOTE : This code is non-deterministic and can only be excecuted with the "update" ScatterFunction
// This code is currently unusued as it requires changes to the existing test suite.
@@ -114,4 +170,4 @@ __kernel void scatter1D_parallel(
}
}
-#endif //defined(DATA_TYPE) && defined(SCATTER_FUNCTION) && defined(OUT_SHAPE_X) && !defined(INDICES_SHAPE_Y)
+#endif // SCATTER1D_PARALLEL
diff --git a/src/core/CPP/CPPTypes.cpp b/src/core/CPP/CPPTypes.cpp
index 9980db42f3..a6d08e5bad 100644
--- a/src/core/CPP/CPPTypes.cpp
+++ b/src/core/CPP/CPPTypes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022 Arm Limited.
+ * Copyright (c) 2018-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,6 +28,7 @@
#include "src/common/cpuinfo/CpuInfo.h"
#include "src/common/cpuinfo/CpuIsaInfo.h"
+#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
namespace arm_compute
{
@@ -135,4 +136,24 @@ unsigned int CPUInfo::get_L2_cache_size() const
{
return _impl->L2_cache_size;
}
+
+uint64_t CPUInfo::get_sme2_vector_length() const
+{
+#ifdef ARM_COMPUTE_ENABLE_SME2
+ if (this->has_sme2())
+ return arm_gemm::utils::sme::get_vector_length<int8_t>();
+ else
+ return 0;
+#else // ARM_COMPUTE_ENABLE_SME2
+ return 0;
+#endif // ARM_COMPUTE_ENABLE_SME2
+}
+unsigned int CPUInfo::get_cpu_num_excluding_little() const
+{
+#if defined(__ANDROID__)
+ return _impl->info.not_little_num_cpus();
+#else /* defined(__ANDROID__) */
+ return get_cpu_num();
+#endif /* defined(__ANDROID__) */
+}
} // namespace arm_compute
diff --git a/src/core/NEON/NEAsymm.h b/src/core/NEON/NEAsymm.h
index 5f4d08d0f6..b93e64a0ef 100644
--- a/src/core/NEON/NEAsymm.h
+++ b/src/core/NEON/NEAsymm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_NEASYMM_H
-#define ARM_COMPUTE_NEASYMM_H
+#ifndef ACL_SRC_CORE_NEON_NEASYMM_H
+#define ACL_SRC_CORE_NEON_NEASYMM_H
#include "src/core/NEON/NEMath.h"
#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
@@ -637,10 +637,10 @@ inline int32x4x4_t vquantize_internal(const float32x4x4_t &qv, float scale, int3
const float32x4_t vinvscale = vdupq_n_f32(1.f / scale);
const int32x4x4_t rf = {{
#ifdef __aarch64__
- vaddq_s32(vcvtaq_s32_f32(vmulq_f32(qv.val[0], vinvscale)), voffset),
- vaddq_s32(vcvtaq_s32_f32(vmulq_f32(qv.val[1], vinvscale)), voffset),
- vaddq_s32(vcvtaq_s32_f32(vmulq_f32(qv.val[2], vinvscale)), voffset),
- vaddq_s32(vcvtaq_s32_f32(vmulq_f32(qv.val[3], vinvscale)), voffset),
+ vaddq_s32(vcvtnq_s32_f32(vmulq_f32(qv.val[0], vinvscale)), voffset),
+ vaddq_s32(vcvtnq_s32_f32(vmulq_f32(qv.val[1], vinvscale)), voffset),
+ vaddq_s32(vcvtnq_s32_f32(vmulq_f32(qv.val[2], vinvscale)), voffset),
+ vaddq_s32(vcvtnq_s32_f32(vmulq_f32(qv.val[3], vinvscale)), voffset),
#else //__aarch64__
vaddq_s32(vcvtq_s32_f32(vmulq_f32(qv.val[0], vinvscale)), voffset),
vaddq_s32(vcvtq_s32_f32(vmulq_f32(qv.val[1], vinvscale)), voffset),
@@ -698,4 +698,4 @@ inline uint16x8x2_t vquantize_qasymm16(const float32x4x4_t &qv, const UniformQua
} // namespace arm_compute
#include "src/core/NEON/NEAsymm.inl"
-#endif // ARM_COMPUTE_NEASYMM_H
+#endif // ACL_SRC_CORE_NEON_NEASYMM_H
diff --git a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
index 717fd11485..153c36052a 100644
--- a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -78,11 +78,11 @@ static const BatchNormalizationKernel available_kernels[] = {
REGISTER_FP32_SVE(arm_compute::cpu::fp32_sve_batch_normalization)},
#endif /* !defined(ARM_COMPUTE_ENABLE_SVE) */
#if defined(ARM_COMPUTE_ENABLE_NEON)
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if ARM_COMPUTE_ENABLE_FP16
{"neon_fp16_batch_normalization",
[](const BatchNormalizationSelectorData &data) { return data.dt == DataType::F16; },
REGISTER_FP16_NEON(arm_compute::cpu::fp16_neon_batch_normalization)},
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
{"neon_fp32_batch_normalization",
[](const BatchNormalizationSelectorData &data) { return data.dt == DataType::F32; },
REGISTER_FP32_NEON(arm_compute::cpu::fp32_neon_batch_normalization)},
diff --git a/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp b/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp
index cb869838e2..694def1a3a 100644
--- a/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp
+++ b/src/core/NEON/kernels/NEBoundingBoxTransformKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022 Arm Limited.
+ * Copyright (c) 2019-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,11 +63,11 @@ static const BoundingBoxTransformKernel available_kernels[] = {
{"fp32_neon_boundingboxtransform",
[](const BoundingBoxTransformSelectorData &data) { return data.dt == DataType::F32; },
REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_boundingboxtransform)},
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ARM_COMPUTE_ENABLE_FP16
{"fp16_neon_boundingboxtransform",
[](const BoundingBoxTransformSelectorData &data) { return data.dt == DataType::F16; },
REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_boundingboxtransform)},
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#endif // ARM_COMPUTE_ENABLE_FP16
#if defined(ARM_COMPUTE_ENABLE_NEON)
{"qu16_neon_boundingboxtransform",
[](const BoundingBoxTransformSelectorData &data) { return data.dt == DataType::QASYMM16; },
diff --git a/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp b/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp
index 549319e49f..e23e3d020f 100644
--- a/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEGenerateProposalsLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022 Arm Limited.
+ * Copyright (c) 2019-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,10 +61,10 @@ static const ComputeAllAnchorsKernel available_kernels[] = {
{"neon_qu16_computeallanchors", [](const ComputeAllAnchorsData &data) { return data.dt == DataType::QSYMM16; },
REGISTER_QSYMM16_NEON(arm_compute::cpu::neon_qu16_computeallanchors)},
#endif //defined(ARM_COMPUTE_ENABLE_NEON)
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ARM_COMPUTE_ENABLE_FP16
{"neon_fp16_computeallanchors", [](const ComputeAllAnchorsData &data) { return data.dt == DataType::F16; },
REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_computeallanchors)},
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#endif // ARM_COMPUTE_ENABLE_FP16
{"neon_fp32_computeallanchors", [](const ComputeAllAnchorsData &data) { return data.dt == DataType::F32; },
REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_computeallanchors)},
};
diff --git a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp
index 0a1780f6ee..5883731088 100644
--- a/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022 Arm Limited.
+ * Copyright (c) 2019-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,10 +70,10 @@ struct InstanceNormKernel
static const InstanceNormKernel available_kernels[] = {
{"fp32_neon_instancenorm", [](const InstanceNormSelectorData &data) { return data.dt == DataType::F32; },
REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_instancenorm)},
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ARM_COMPUTE_ENABLE_FP16
{"fp16_neon_instancenorm", [](const InstanceNormSelectorData &data) { return data.dt == DataType::F16; },
REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_instancenorm)},
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#endif // ARM_COMPUTE_ENABLE_FP16
};
/** Micro-kernel selector
diff --git a/src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.cpp b/src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.cpp
index 451031d696..cfe4ac9a4c 100644
--- a/src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.cpp
+++ b/src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022 Arm Limited.
+ * Copyright (c) 2019-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -60,10 +60,10 @@ struct MeanStdDevNormKernel
static const std::vector<MeanStdDevNormKernel> available_kernels = {
{"fp32_neon_meanstddevnorm", [](const MeanStdDevNormSelectorData &data) { return data.dt == DataType::F32; },
REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_meanstddevnorm)},
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ARM_COMPUTE_ENABLE_FP16
{"fp16_neon_meanstddevnorm", [](const MeanStdDevNormSelectorData &data) { return data.dt == DataType::F16; },
REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_meanstddevnorm)},
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#endif // ARM_COMPUTE_ENABLE_FP16
{"qasymm8_neon_meanstddevnorm", [](const MeanStdDevNormSelectorData &data) { return data.dt == DataType::QASYMM8; },
REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_meanstddevnorm)},
};
diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.cpp b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
index 455d604b3b..5380e6ccce 100644
--- a/src/core/NEON/kernels/NEReductionOperationKernel.cpp
+++ b/src/core/NEON/kernels/NEReductionOperationKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -31,1747 +31,221 @@
#include "arm_compute/core/utils/misc/ShapeCalculator.h"
#include "arm_compute/core/Validate.h"
+#include "src/core/common/Registrars.h"
#include "src/core/CPP/Validate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/core/NEON/INEKernel.h"
-#include "src/core/NEON/NEMath.h"
#include "src/core/NEON/wrapper/wrapper.h"
-#include "support/SaturateCast.h"
-
-#include <arm_neon.h>
+#include "src/cpu/kernels/reduction_layer/generic/neon/list.h"
namespace arm_compute
{
-namespace
-{
-// Helper function that calls vqmovun/vqmvn, vcombine and vstore, allows templating of RedOpYZW_quantized
-template <typename T>
-void combine_and_store(int16x8_t t1, int16x8_t t2, Iterator &output, int offset = 0)
-{
- if (std::is_same<T, uint8_t>::value)
- {
- auto res = wrapper::vcombine(wrapper::vqmovun(t1), wrapper::vqmovun(t2));
- wrapper::vstore(output.ptr() + offset, res);
- }
- else
- {
- auto res = wrapper::vcombine(wrapper::vqmovn(t1), wrapper::vqmovn(t2));
- wrapper::vstore(reinterpret_cast<int8_t *>(output.ptr() + offset), res);
- }
-}
-
-template <typename T>
-uint32x4x4_t calculate_index(uint32_t idx, T a, T b, uint32x4x4_t c, ReductionOperation op, int axis)
-{
- uint32x4_t mask{0};
- if (op == ReductionOperation::ARG_IDX_MIN)
- {
- mask = wrapper::vcgt(b, a);
- }
- else
- {
- mask = wrapper::vclt(b, a);
- }
-
- uint32x4_t vec_idx = {idx, idx + 1, idx + 2, idx + 3};
- if (axis != 0)
- {
- vec_idx = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
- }
- uint32x4x4_t res = {{wrapper::vbsl(mask, vec_idx, c.val[0]), 0, 0, 0}};
-
- return res;
-}
-
-template <typename T>
-uint32x4x4_t calculate_index_quantized(uint32_t idx, T a, T b, uint32x4x4_t c, ReductionOperation op, int axis)
-{
- uint32x4x4_t mask{{0}};
- uint8x16_t mask_u8{0};
- if (op == ReductionOperation::ARG_IDX_MIN)
- {
- mask_u8 = wrapper::vcgt(b, a);
- }
- else
- {
- mask_u8 = wrapper::vclt(b, a);
- }
- auto wide_u16_1 =
- wrapper::vorr(vshll_n_u8(wrapper::vgetlow(mask_u8), 8), wrapper::vmovl(wrapper::vgetlow(mask_u8)));
- auto wide_u16_2 =
- wrapper::vorr(vshll_n_u8(wrapper::vgethigh(mask_u8), 8), wrapper::vmovl(wrapper::vgethigh(mask_u8)));
- mask.val[0] =
- wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_1), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_1)));
- mask.val[1] =
- wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_1), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_1)));
- mask.val[2] =
- wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_2), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_2)));
- mask.val[3] =
- wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_2), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_2)));
-
- uint32x4x4_t vec_idx = {{{idx + 0, idx + 1, idx + 2, idx + 3},
- {idx + 4, idx + 5, idx + 6, idx + 7},
- {idx + 8, idx + 9, idx + 10, idx + 11},
- {idx + 12, idx + 13, idx + 14, idx + 15}}};
- if (axis != 0)
- {
- vec_idx.val[0] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
- vec_idx.val[1] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
- vec_idx.val[2] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
- vec_idx.val[3] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
- }
- uint32x4x4_t res = {
- {vbslq_u32(mask.val[0], vec_idx.val[0], c.val[0]), vbslq_u32(mask.val[1], vec_idx.val[1], c.val[1]),
- vbslq_u32(mask.val[2], vec_idx.val[2], c.val[2]), vbslq_u32(mask.val[3], vec_idx.val[3], c.val[3])}};
-
- return res;
-}
-
-// Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
-template <typename T>
-inline typename std::enable_if<
- std::is_same<T, float32x4_t>::value || std::is_same<T, int32x4_t>::value,
- typename std::conditional<std::is_same<T, float32x4_t>::value, float32x2_t, int32x2_t>::type>::type
-calculate_min(T in)
-{
- auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
- return wrapper::vpmin(pmin, pmin);
-}
-
-// Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
-template <typename T>
-inline typename std::enable_if<
- std::is_same<T, uint8x16_t>::value || std::is_same<T, int8x16_t>::value,
- typename std::conditional<std::is_same<T, uint8x16_t>::value, uint8x8_t, int8x8_t>::type>::type
-calculate_min(T in)
-{
- auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
- pmin = wrapper::vpmin(pmin, pmin);
- pmin = wrapper::vpmin(pmin, pmin);
- return wrapper::vpmin(pmin, pmin);
-}
-
-// Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
-template <typename T>
-inline typename std::enable_if<
- std::is_same<T, float32x4_t>::value || std::is_same<T, int32x4_t>::value,
- typename std::conditional<std::is_same<T, float32x4_t>::value, float32x2_t, int32x2_t>::type>::type
-calculate_max(T in)
-{
- auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
- return wrapper::vpmax(pmax, pmax);
-}
-
-// Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
-template <typename T>
-inline typename std::enable_if<
- std::is_same<T, uint8x16_t>::value || std::is_same<T, int8x16_t>::value,
- typename std::conditional<std::is_same<T, uint8x16_t>::value, uint8x8_t, int8x8_t>::type>::type
-calculate_max(T in)
-{
- auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
- pmax = wrapper::vpmax(pmax, pmax);
- pmax = wrapper::vpmax(pmax, pmax);
- return wrapper::vpmax(pmax, pmax);
-}
-
-template <typename T>
-uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, T vec_res_value, ReductionOperation op)
-{
- uint32x4_t res_idx_mask{0};
- uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
-
- if (op == ReductionOperation::ARG_IDX_MIN)
- {
- auto pmin = calculate_min(vec_res_value);
- auto mask = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
- res_idx_mask = wrapper::vand(vec_res_idx.val[0], mask);
- }
- else
- {
- auto pmax = calculate_max(vec_res_value);
- auto mask = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
- res_idx_mask = wrapper::vand(vec_res_idx.val[0], mask);
- }
-
- res_idx_mask = wrapper::vadd(res_idx_mask, mask_ones);
- auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask), wrapper::vgetlow(res_idx_mask));
- pmin = wrapper::vpmin(pmin, pmin);
- uint32_t res = wrapper::vgetlane(pmin, 0);
-
- return (res - 0xFFFFFFFF);
-}
-
-template <typename T>
-uint32_t calculate_vector_index_quantized(uint32x4x4_t vec_res_idx, T vec_res_value, ReductionOperation op)
-{
- uint32x4x4_t res_idx_mask{{0}};
- uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
- uint8x16_t mask_u8{0};
- if (op == ReductionOperation::ARG_IDX_MIN)
- {
- auto pmin = calculate_min(vec_res_value);
- mask_u8 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
- }
- else
- {
- auto pmax = calculate_max(vec_res_value);
- mask_u8 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
- }
-
- // Widen vectors
- auto wide_u16_1 =
- wrapper::vorr(vshll_n_u8(wrapper::vgetlow(mask_u8), 8), wrapper::vmovl(wrapper::vgetlow(mask_u8)));
- auto wide_u16_2 =
- wrapper::vorr(vshll_n_u8(wrapper::vgethigh(mask_u8), 8), wrapper::vmovl(wrapper::vgethigh(mask_u8)));
- auto wide_u32_1 =
- wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_1), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_1)));
- auto wide_u32_2 =
- wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_1), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_1)));
- auto wide_u32_3 =
- wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_2), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_2)));
- auto wide_u32_4 =
- wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_2), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_2)));
- res_idx_mask.val[0] = wrapper::vand(vec_res_idx.val[0], wide_u32_1);
- res_idx_mask.val[1] = wrapper::vand(vec_res_idx.val[1], wide_u32_2);
- res_idx_mask.val[2] = wrapper::vand(vec_res_idx.val[2], wide_u32_3);
- res_idx_mask.val[3] = wrapper::vand(vec_res_idx.val[3], wide_u32_4);
- res_idx_mask.val[0] = wrapper::vadd(res_idx_mask.val[0], mask_ones);
- res_idx_mask.val[1] = wrapper::vadd(res_idx_mask.val[1], mask_ones);
- res_idx_mask.val[2] = wrapper::vadd(res_idx_mask.val[2], mask_ones);
- res_idx_mask.val[3] = wrapper::vadd(res_idx_mask.val[3], mask_ones);
-
- uint32_t res = 0xFFFFFFFF;
- int iter = 0;
- do
- {
- auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask.val[iter]), wrapper::vgetlow(res_idx_mask.val[iter]));
- pmin = wrapper::vpmin(pmin, pmin);
- res = std::min(wrapper::vgetlane(pmin, 0), res);
- iter++;
- } while (iter < 4);
-
- return (res - 0xFFFFFFFF);
-}
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template <>
-uint32x4x4_t
-calculate_index(uint32_t idx, float16x8_t a, float16x8_t b, uint32x4x4_t c, ReductionOperation op, int axis)
-{
- uint32x4x2_t mask{0};
- uint16x8_t mask_u16{0};
- if (op == ReductionOperation::ARG_IDX_MIN)
- {
- mask_u16 = wrapper::vcgt(b, a);
- }
- else
- {
- mask_u16 = wrapper::vclt(b, a);
- }
- mask.val[0] = wrapper::vmovl(wrapper::vgetlow(mask_u16));
- mask.val[1] = wrapper::vmovl(wrapper::vgethigh(mask_u16));
- uint32x4x2_t vec_idx = {{{idx + 0, idx + 1, idx + 2, idx + 3}, {idx + 4, idx + 5, idx + 6, idx + 7}}};
- if (axis != 0)
- {
- vec_idx.val[0] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
- vec_idx.val[1] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
- }
- uint32x4x4_t res = {wrapper::vbsl(mask.val[0], vec_idx.val[0], c.val[0]),
- wrapper::vbsl(mask.val[1], vec_idx.val[1], c.val[1]), 0, 0};
-
- return res;
-}
-
-// Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
-inline float16x4_t calculate_min(float16x8_t in)
-{
- auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
- pmin = wrapper::vpmin(pmin, pmin);
- return wrapper::vpmin(pmin, pmin);
-}
-// Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
-inline float16x4_t calculate_max(float16x8_t in)
-{
- auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
- pmax = wrapper::vpmax(pmax, pmax);
- return wrapper::vpmax(pmax, pmax);
-}
-
-template <>
-uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, float16x8_t vec_res_value, ReductionOperation op)
-{
- uint32x4x2_t res_idx_mask{0};
- uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
- uint16x8_t mask_u16;
- if (op == ReductionOperation::ARG_IDX_MIN)
- {
- auto pmin = calculate_min(vec_res_value);
- mask_u16 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
- }
- else
- {
- auto pmax = calculate_max(vec_res_value);
- mask_u16 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
- }
-
- // Widen vectors
- auto wide_u32_1 =
- wrapper::vorr(vshll_n_u16(wrapper::vgetlow(mask_u16), 8), wrapper::vmovl(wrapper::vgetlow(mask_u16)));
- auto wide_u32_2 =
- wrapper::vorr(vshll_n_u16(wrapper::vgethigh(mask_u16), 8), wrapper::vmovl(wrapper::vgethigh(mask_u16)));
- res_idx_mask.val[0] = wrapper::vand(vec_res_idx.val[0], wide_u32_1);
- res_idx_mask.val[1] = wrapper::vand(vec_res_idx.val[1], wide_u32_2);
- res_idx_mask.val[0] = wrapper::vadd(res_idx_mask.val[0], mask_ones);
- res_idx_mask.val[1] = wrapper::vadd(res_idx_mask.val[1], mask_ones);
-
- uint32_t res = 0xFFFFFFFF;
- uint32_t iter = 0;
- do
- {
- auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask.val[iter]), wrapper::vgetlow(res_idx_mask.val[iter]));
- pmin = wrapper::vpmin(pmin, pmin);
- res = std::min(wrapper::vgetlane(pmin, 0), res);
- iter++;
- } while (iter < 2);
-
- return (res - 0xFFFFFFFF);
-}
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
-template <class F>
-class Reducer
-{
-public:
- static void reduceX(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
- {
- // Set out window
- Window out_window(window);
- out_window.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- f(window, out_window, input, output, op);
- }
- static void reduceY(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
- {
- // Set in window
- Window in_window(window);
- Window out_window(window);
-
- in_window.set(Window::DimY, Window::Dimension(0, 1, 1));
- out_window.set(Window::DimY, Window::Dimension(0, output->info()->dimension(1), output->info()->dimension(1)));
-
- f(in_window, out_window, input, output, 1, op);
- }
- static void reduceZ(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
- {
- // Set in window
- Window in_window(window);
- Window out_window(window);
-
- in_window.set(Window::DimZ, Window::Dimension(0, 1, 1));
- out_window.set(Window::DimZ, Window::Dimension(0, output->info()->dimension(2), output->info()->dimension(2)));
-
- f(in_window, out_window, input, output, 2, op);
- }
- static void reduceW(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
- {
- // Set in/out window
- Window in_window(window);
- Window out_window(window);
-
- in_window.set(3, Window::Dimension(0, 1, 1));
- out_window.set(3, Window::Dimension(0, 1, 1));
-
- f(in_window, out_window, input, output, 3, op);
- }
-};
-
-template <typename T, int S>
-struct RedOpX
-{
- /** SIMD vector tag type. */
- using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
-
- inline void operator()(
- const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, const ReductionOperation op)
- {
- const size_t input_dim_0 = in->info()->dimension(0);
- const int window_step_x = 16 / sizeof(T);
- const auto window_start_x = static_cast<int>(in_window.x().start());
- const auto window_end_x = static_cast<int>(in_window.x().end());
-
- Window in_win_no_pad = in_window;
- in_win_no_pad.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input(in, in_win_no_pad);
- Iterator output(out, out_window);
-
- execute_window_loop(
- in_win_no_pad,
- [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<const T *>(input.ptr());
-
- auto init_res_value = static_cast<T>(0.f);
- switch (op)
- {
- case ReductionOperation::ARG_IDX_MAX:
- case ReductionOperation::ARG_IDX_MIN:
- case ReductionOperation::MIN:
- case ReductionOperation::MAX:
- {
- init_res_value = static_cast<T>(*input_ptr);
- break;
- }
- case ReductionOperation::PROD:
- {
- init_res_value = static_cast<T>(1.f);
- break;
- }
- default:
- break;
- }
- auto vec_res_value = wrapper::vdup_n(init_res_value, ExactTagType{});
- uint32x4x4_t vec_res_idx{{0}};
-
- // Compute window_step_x elements per iteration
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto vec_elements = wrapper::vloadq(input_ptr + x);
- switch (op)
- {
- case ReductionOperation::SUM_SQUARE:
- vec_res_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_res_value);
- break;
- case ReductionOperation::MEAN_SUM:
- case ReductionOperation::SUM:
- vec_res_value = wrapper::vadd(vec_elements, vec_res_value);
- break;
- case ReductionOperation::PROD:
- vec_res_value = wrapper::vmul(vec_elements, vec_res_value);
- break;
- case ReductionOperation::ARG_IDX_MIN:
- {
- auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- vec_res_idx = calculate_index<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value,
- vec_res_idx, op, 0);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- vec_res_idx = calculate_index<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value,
- vec_res_idx, op, 0);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::MIN:
- {
- vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- break;
- }
- case ReductionOperation::MAX:
- {
- vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
-
- switch (op)
- {
- case ReductionOperation::SUM:
- case ReductionOperation::MEAN_SUM:
- case ReductionOperation::SUM_SQUARE:
- {
-#ifdef ARM_COMPUTE_DEBUG_ENABLED
- auto res = static_cast<T>(0.f);
- for (int i = 0; i < S; ++i)
- {
- res += wrapper::vgetlane(vec_res_value, i);
- }
-#else // ARM_COMPUTE_DEBUG_ENABLED
- auto carry_res =
- wrapper::vpadd(wrapper::vgethigh(vec_res_value), wrapper::vgetlow(vec_res_value));
- for (int i = 0; i < S / 4; ++i)
- {
- carry_res = wrapper::vpadd(carry_res, carry_res);
- }
- auto res = wrapper::vgetlane(carry_res, 0);
-#endif // ARM_COMPUTE_DEBUG_ENABLED
- if (op == ReductionOperation::SUM_SQUARE)
- {
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res += (*(input_ptr + x)) * (*(input_ptr + x));
- }
- }
- else
- {
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res += *(input_ptr + x);
- }
- }
-
- if (op == ReductionOperation::MEAN_SUM)
- {
- res /= input_dim_0;
- }
-
- *(reinterpret_cast<T *>(output.ptr())) = res;
- break;
- }
- case ReductionOperation::PROD:
- {
- auto carry_res =
- wrapper::vmul(wrapper::vgethigh(vec_res_value), wrapper::vgetlow(vec_res_value));
- T res = 1;
- for (int i = 0; i < S / 2; ++i)
- {
- res *= wrapper::vgetlane(carry_res, i);
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res *= *(input_ptr + x);
- }
-
- *(reinterpret_cast<T *>(output.ptr())) = res;
- break;
- }
- case ReductionOperation::ARG_IDX_MIN:
- {
- auto idx = calculate_vector_index<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
- auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- if (*(input_ptr + x) < res)
- {
- idx = x;
- res = *(input_ptr + x);
- }
- }
- *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- auto idx = calculate_vector_index<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
- auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- if (*(input_ptr + x) > res)
- {
- idx = x;
- res = *(input_ptr + x);
- }
- }
- *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
- break;
- }
- case ReductionOperation::MIN:
- {
- auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res = *(input_ptr + x) < res ? *(input_ptr + x) : res;
- }
- *(reinterpret_cast<T *>(output.ptr())) = res;
- break;
- }
- case ReductionOperation::MAX:
- {
- auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res = *(input_ptr + x) > res ? *(input_ptr + x) : res;
- }
- *(reinterpret_cast<T *>(output.ptr())) = res;
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- },
- input, output);
- }
-};
-
-template <typename T>
-struct RedOpX_quantized
-{
- inline void operator()(
- const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, const ReductionOperation op)
- {
- using PromotedType = typename wrapper::traits::promote<typename wrapper::traits::promote<T>::type>::type;
-
- const auto oq_info = out->info()->quantization_info().uniform();
-
- const TensorInfo in_info = *(in->info());
- const UniformQuantizationInfo iq_info = in_info.quantization_info().uniform();
-
- const int window_step_x = 16 / sizeof(T);
- const auto window_start_x = static_cast<int>(in_window.x().start());
- const auto window_end_x = static_cast<int>(in_window.x().end());
-
- Window in_win_no_pad = in_window;
- in_win_no_pad.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input(in, in_win_no_pad);
- Iterator output(out, out_window);
-
- const auto in_offset = static_cast<float>(iq_info.offset);
- const float in_scale = iq_info.scale;
-
- const auto out_offset = static_cast<float>(oq_info.offset);
- const float out_scale = oq_info.scale;
-
- const auto num_elements = static_cast<float>(in_info.dimension(0));
-
- const float A = in_scale / (out_scale * num_elements);
- const float B = out_offset - (in_scale * in_offset) / (out_scale);
-
- execute_window_loop(
- in_win_no_pad,
- [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<T *>(input.ptr());
-
- auto vec_res_value1 =
- wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
- auto vec_res_value2 =
- wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
- auto vec_res_value3 =
- wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
- auto vec_res_value4 =
- wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
-
- auto vec_res_value1_f = vdupq_n_f32(static_cast<float>(1.f));
- auto vec_res_value2_f = vdupq_n_f32(static_cast<float>(1.f));
- auto vec_res_value3_f = vdupq_n_f32(static_cast<float>(1.f));
- auto vec_res_value4_f = vdupq_n_f32(static_cast<float>(1.f));
-
- typename wrapper::traits::neon_vector<T, 16>::type vec_res_value = {0};
-
- if (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN ||
- op == ReductionOperation::MIN || op == ReductionOperation::MAX)
- {
- vec_res_value = wrapper::vdup_n(*input_ptr, wrapper::traits::vector_128_tag{});
- }
-
- uint32x4x4_t vec_res_idx{{0}};
- // Compute window_step_x elements per iteration
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto vec_elements = wrapper::vloadq(input_ptr + x);
- switch (op)
- {
- case ReductionOperation::SUM:
- case ReductionOperation::MEAN_SUM:
- {
- const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
- const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
-
- const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
- const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
- const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
- const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
-
- vec_res_value1 = wrapper::vadd(temp32x4t_1, vec_res_value1);
- vec_res_value2 = wrapper::vadd(temp32x4t_2, vec_res_value2);
- vec_res_value3 = wrapper::vadd(temp32x4t_3, vec_res_value3);
- vec_res_value4 = wrapper::vadd(temp32x4t_4, vec_res_value4);
- break;
- }
- case ReductionOperation::PROD:
- {
- const auto offset32x4f_4 = vdupq_n_f32(iq_info.offset);
- const auto scale32x4f_4 = vdupq_n_f32(iq_info.scale);
-
- const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
- const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
-
- const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
- const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
- const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
- const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
-
- auto temp32x4f_1 = wrapper::vcvt<float>(temp32x4t_1);
- auto temp32x4f_2 = wrapper::vcvt<float>(temp32x4t_2);
- auto temp32x4f_3 = wrapper::vcvt<float>(temp32x4t_3);
- auto temp32x4f_4 = wrapper::vcvt<float>(temp32x4t_4);
-
- //de-quantize vec_elements
- temp32x4f_1 = vmulq_f32(vsubq_f32(temp32x4f_1, offset32x4f_4), scale32x4f_4);
- temp32x4f_2 = vmulq_f32(vsubq_f32(temp32x4f_2, offset32x4f_4), scale32x4f_4);
- temp32x4f_3 = vmulq_f32(vsubq_f32(temp32x4f_3, offset32x4f_4), scale32x4f_4);
- temp32x4f_4 = vmulq_f32(vsubq_f32(temp32x4f_4, offset32x4f_4), scale32x4f_4);
-
- vec_res_value1_f = vmulq_f32(temp32x4f_1, vec_res_value1_f);
- vec_res_value2_f = vmulq_f32(temp32x4f_2, vec_res_value2_f);
- vec_res_value3_f = vmulq_f32(temp32x4f_3, vec_res_value3_f);
- vec_res_value4_f = vmulq_f32(temp32x4f_4, vec_res_value4_f);
- break;
- }
- case ReductionOperation::ARG_IDX_MIN:
- {
- auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- vec_res_idx = calculate_index_quantized<decltype(vec_res_value)>(
- x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- vec_res_idx = calculate_index_quantized<decltype(vec_res_value)>(
- x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::MIN:
- {
- vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- break;
- }
- case ReductionOperation::MAX:
- {
- vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
-
- switch (op)
- {
- case ReductionOperation::ARG_IDX_MIN:
- {
- auto idx =
- calculate_vector_index_quantized<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
- auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- if (*(input_ptr + x) < res)
- {
- idx = x;
- res = *(input_ptr + x);
- }
- }
- *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- auto idx =
- calculate_vector_index_quantized<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
- auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- if (*(input_ptr + x) > res)
- {
- idx = x;
- res = *(input_ptr + x);
- }
- }
- *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
- break;
- }
- case ReductionOperation::MIN:
- {
- auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res = *(input_ptr + x) < res ? *(input_ptr + x) : res;
- }
- *(reinterpret_cast<T *>(output.ptr())) = res;
- break;
- }
- case ReductionOperation::MAX:
- {
- auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res = *(input_ptr + x) > res ? *(input_ptr + x) : res;
- }
- *(reinterpret_cast<T *>(output.ptr())) = res;
- break;
- }
- case ReductionOperation::PROD:
- {
- auto carry_res = wrapper::vmul(vec_res_value1_f, vec_res_value2_f);
- carry_res = wrapper::vmul(carry_res, vec_res_value3_f);
- carry_res = wrapper::vmul(carry_res, vec_res_value4_f);
-
- float res = wrapper::vgetlane(carry_res, 0);
- res *= wrapper::vgetlane(carry_res, 1);
- res *= wrapper::vgetlane(carry_res, 2);
- res *= wrapper::vgetlane(carry_res, 3);
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- //de-quantize input
- if (std::is_same<T, uint8_t>::value)
- {
- res *= dequantize_qasymm8(*(input_ptr + x), iq_info);
- }
- else
- {
- res *= dequantize_qasymm8_signed(*(input_ptr + x), iq_info);
- }
- }
-
- //re-quantize result
- if (std::is_same<T, uint8_t>::value)
- {
- res = quantize_qasymm8(res, iq_info);
- }
- else
- {
- res = quantize_qasymm8_signed(res, iq_info);
- }
-
- *reinterpret_cast<T *>(output.ptr()) = static_cast<T>(res);
- break;
- }
- case ReductionOperation::SUM:
- case ReductionOperation::MEAN_SUM:
- {
- auto carry_res = wrapper::vadd(vec_res_value1, vec_res_value2);
- carry_res = wrapper::vadd(carry_res, vec_res_value3);
- carry_res = wrapper::vadd(carry_res, vec_res_value4);
-
- auto carry_paddition =
- wrapper::vpadd(wrapper::vgethigh(carry_res), wrapper::vgetlow(carry_res));
- carry_paddition = wrapper::vpadd(carry_paddition, carry_paddition);
- auto res = static_cast<int32_t>(wrapper::vgetlane(carry_paddition, 0));
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- res += *(input_ptr + x);
- }
-
- if (op == ReductionOperation::MEAN_SUM)
- {
- const int32_t resFinal = A * (static_cast<float>(res)) + B;
-
- *reinterpret_cast<T *>(output.ptr()) = utils::cast::saturate_cast<T>(resFinal);
- }
- else
- {
- // Subtract accumulated offsets
- res -= (in_info.dimension(0) - 1) * iq_info.offset;
- *reinterpret_cast<T *>(output.ptr()) = utils::cast::saturate_cast<T>(res);
- }
-
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- },
- input, output);
- }
-};
-
-template <typename T, int S>
-struct RedOpYZW
-{
- /** SIMD vector tag type. */
- using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
- using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
-
- inline void operator()(const Window &in_window,
- Window &out_window,
- const ITensor *in,
- ITensor *out,
- int axis,
- const ReductionOperation op)
- {
- const TensorInfo in_info = *(in->info());
- const int window_step_x = 16 / sizeof(T);
- const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
- const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
- // As it split over x-axis, need to set the correct spiltted window start and end.
- const auto window_start_x = static_cast<int>(0);
- const auto window_end_x = static_cast<int>(in_window.shape().x());
-
- Window in_win_no_pad = in_window;
- in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
- Window out_win_no_pad = out_window;
- out_win_no_pad.set(Window::DimX,
- Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
-
- Iterator input(in, in_win_no_pad);
- Iterator output(out, out_win_no_pad);
-
- execute_window_loop(
- in_win_no_pad,
- [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<T *>(input.ptr());
-
- // Compute window_step_x elements per iteration
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- neon_vector vec_res_value = {0};
- switch (op)
- {
- case ReductionOperation::ARG_IDX_MAX:
- case ReductionOperation::ARG_IDX_MIN:
- case ReductionOperation::MIN:
- case ReductionOperation::MAX:
- {
- vec_res_value = wrapper::vloadq(input_ptr + x);
- break;
- }
- case ReductionOperation::PROD:
- {
- vec_res_value = wrapper::vdup_n(static_cast<T>(1.f), ExactTagType{});
- break;
- }
- default:
- {
- vec_res_value = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
- break;
- }
- }
- uint32x4x4_t vec_res_idx{{0}};
-
- for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
- {
- const T *in_ptr =
- reinterpret_cast<T *>(input.ptr() + x * sizeof(T) + in_info.strides_in_bytes()[axis] * dim);
- const auto vec_elements = wrapper::vloadq(in_ptr);
- switch (op)
- {
- case ReductionOperation::SUM:
- case ReductionOperation::MEAN_SUM:
- vec_res_value = wrapper::vadd(vec_elements, vec_res_value);
- break;
- case ReductionOperation::SUM_SQUARE:
- vec_res_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_res_value);
- break;
- case ReductionOperation::PROD:
- vec_res_value = wrapper::vmul(vec_elements, vec_res_value);
- break;
- case ReductionOperation::ARG_IDX_MIN:
- {
- auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- vec_res_idx =
- calculate_index(dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- vec_res_idx =
- calculate_index(dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::MIN:
- {
- vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- break;
- }
- case ReductionOperation::MAX:
- {
- vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
-
- if (op == ReductionOperation::MEAN_SUM)
- {
- auto vec_width_inv =
- wrapper::vinv(wrapper::vdup_n(static_cast<T>(in_info.dimension(axis)), ExactTagType{}));
- vec_res_value = wrapper::vmul(vec_res_value, vec_width_inv);
- }
-
- if (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX)
- {
- wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr()) + x, vec_res_idx.val[0]);
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- if (std::is_same<T, float16_t>::value)
- {
- wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr()) + x + 4, vec_res_idx.val[1]);
- }
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- }
- else
- {
- wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x * sizeof(T)), vec_res_value);
- }
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- auto res_value = 0.f;
- switch (op)
- {
- case ReductionOperation::ARG_IDX_MAX:
- case ReductionOperation::ARG_IDX_MIN:
- case ReductionOperation::MIN:
- case ReductionOperation::MAX:
- {
- res_value = *(input_ptr + x);
- break;
- }
- case ReductionOperation::PROD:
- {
- res_value = static_cast<T>(1.f);
- break;
- }
- default:
- {
- res_value = static_cast<T>(0.f);
- break;
- }
- }
-
- uint32_t res_idx = 0;
- for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
- {
- const T *in_ptr =
- reinterpret_cast<T *>(input.ptr() + x * sizeof(T) + in_info.strides_in_bytes()[axis] * dim);
-
- switch (op)
- {
- case ReductionOperation::SUM:
- case ReductionOperation::MEAN_SUM:
- res_value += *in_ptr;
- break;
- case ReductionOperation::SUM_SQUARE:
- res_value += *in_ptr * *in_ptr;
- break;
- case ReductionOperation::PROD:
- res_value *= *in_ptr;
- break;
- case ReductionOperation::ARG_IDX_MIN:
- {
- if (*in_ptr < res_value)
- {
- res_value = *in_ptr;
- res_idx = dim;
- }
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- if (*in_ptr > res_value)
- {
- res_value = *in_ptr;
- res_idx = dim;
- }
- break;
- }
- case ReductionOperation::MIN:
- {
- res_value = *in_ptr < res_value ? *in_ptr : res_value;
- break;
- }
- case ReductionOperation::MAX:
- {
- res_value = *in_ptr > res_value ? *in_ptr : res_value;
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
-
- if (op == ReductionOperation::MEAN_SUM)
- {
- res_value /= in_info.dimension(axis);
- }
-
- if (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX)
- {
- *(reinterpret_cast<uint32_t *>(output.ptr()) + x) = res_idx;
- }
- else
- {
- *(reinterpret_cast<T *>(output.ptr() + x * sizeof(T))) = res_value;
- }
- }
- },
- input, output);
- }
-};
-
-template <typename T, int S, int axis, ReductionOperation op>
-struct RedOpYZW_complex
-{
- /** SIMD vector tag type. */
- using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
- using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
-
- inline void operator()(
- const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, int, const ReductionOperation)
- {
- ARM_COMPUTE_ERROR_ON(axis != 2);
- ARM_COMPUTE_ERROR_ON(op != ReductionOperation::SUM);
-
- const TensorInfo in_info = *(in->info());
- const size_t stride_z = in_info.strides_in_bytes()[axis];
- const int window_step_x = 16 / sizeof(T);
- const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
- const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
- // As it split over x-axis, need to set the correct spiltted window start and end.
- const auto window_start_x = static_cast<int>(0);
- const auto window_end_x = static_cast<int>(in_window.shape().x());
-
- Window in_win_no_pad = in_window;
- in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
- Window out_win_no_pad = out_window;
- out_win_no_pad.set(Window::DimX,
- Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
-
- Iterator input(in, in_win_no_pad);
- Iterator output(out, out_win_no_pad);
-
- execute_window_loop(
- in_win_no_pad,
- [&](const Coordinates &)
- {
- // Compute window_step_x elements per iteration
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- neon_vector vec_res_value_0 = {0};
- neon_vector vec_res_value_1 = {0};
-
- vec_res_value_0 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
- vec_res_value_1 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
-
- T *out_ptr = reinterpret_cast<T *>(output.ptr() + 2 * x * sizeof(T));
- for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
- {
- T *in_ptr_0 = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + stride_z * dim);
- T *in_ptr_1 = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + 16 + stride_z * dim);
-
- const auto vec_elements_0 = wrapper::vloadq(in_ptr_0);
- const auto vec_elements_1 = wrapper::vloadq(in_ptr_1);
-
- vec_res_value_0 = wrapper::vadd(vec_elements_0, vec_res_value_0);
- vec_res_value_1 = wrapper::vadd(vec_elements_1, vec_res_value_1);
- }
-
- wrapper::vstore(out_ptr, vec_res_value_0);
- wrapper::vstore(out_ptr + 4, vec_res_value_1);
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- auto res_value_0 = 0.f;
- auto res_value_1 = 0.f;
-
- T *out_ptr = reinterpret_cast<T *>(output.ptr() + 2 * x * sizeof(T));
- for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
- {
- T *in_ptr = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + stride_z * dim);
- res_value_0 += *in_ptr;
- res_value_1 += *(in_ptr + 1);
- }
- *out_ptr = res_value_0;
- *(out_ptr + 1) = res_value_1;
- }
- },
- input, output);
- }
-};
-
-template <typename T>
-struct RedOpYZW_quantized
-{
- inline void operator()(const Window &in_window,
- Window &out_window,
- const ITensor *in,
- ITensor *out,
- int axis,
- const ReductionOperation op)
- {
- const TensorInfo in_info = *(in->info());
- const UniformQuantizationInfo iq_info = in_info.quantization_info().uniform();
- using PromotedType = typename wrapper::traits::promote<typename wrapper::traits::promote<T>::type>::type;
-
- const auto oq_info = out->info()->quantization_info().uniform();
-
- const int window_step_x = 16 / sizeof(T);
- const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
- const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
- // As it split over x-axis, need to set the correct spiltted window start and end.
- const auto window_start_x = static_cast<int>(0);
- const auto window_end_x = static_cast<int>(in_window.shape().x());
-
- Window in_win_no_pad = in_window;
- in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
- Window out_win_no_pad = out_window;
- out_win_no_pad.set(Window::DimX,
- Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
-
- Iterator input(in, in_win_no_pad);
- Iterator output(out, out_win_no_pad);
-
- using vector_type =
- typename wrapper::traits::neon_bitvector<PromotedType, wrapper::traits::BitWidth::W128>::type;
- using vector_type_f = typename wrapper::traits::neon_vector<float, 4>::type;
-
- vector_type vec_res_value1{};
- vector_type vec_res_value2{};
- vector_type vec_res_value3{};
- vector_type vec_res_value4{};
-
- vector_type_f vec_res_value1_f{};
- vector_type_f vec_res_value2_f{};
- vector_type_f vec_res_value3_f{};
- vector_type_f vec_res_value4_f{};
-
- const float in_offset = static_cast<float>(iq_info.offset);
- const float in_scale = iq_info.scale;
-
- const float out_offset = static_cast<float>(oq_info.offset);
- const float out_scale = oq_info.scale;
-
- const float num_elements = static_cast<float>(in_info.dimension(axis));
-
- const float A = in_scale / (out_scale * num_elements);
- const float B = out_offset - (in_scale * in_offset) / (out_scale);
-
- const auto vec_A = wrapper::vdup_n(static_cast<float>(A), wrapper::traits::vector_128_tag{});
- const auto vec_B = wrapper::vdup_n(static_cast<float>(B), wrapper::traits::vector_128_tag{});
-
- execute_window_loop(
- in_win_no_pad,
- [&](const Coordinates &)
- {
- const auto input_ptr = reinterpret_cast<T *>(input.ptr());
-
- // Compute window_step_x elements per iteration
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- uint32x4x4_t vec_res_idx{{0}};
- vec_res_value1 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
- vec_res_value2 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
- vec_res_value3 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
- vec_res_value4 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
-
- vec_res_value1_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
- vec_res_value2_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
- vec_res_value3_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
- vec_res_value4_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
-
- auto vec_res_value = wrapper::vloadq(input_ptr + x);
-
- for (unsigned int index_dim = 0; index_dim < in_info.dimension(axis); ++index_dim)
- {
- const T *in_ptr = input_ptr + x + in_info.strides_in_bytes()[axis] * index_dim;
- const auto vec_elements = wrapper::vloadq(in_ptr);
- switch (op)
- {
- case ReductionOperation::SUM:
- case ReductionOperation::MEAN_SUM:
- {
- const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
- const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
-
- const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
- const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
- const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
- const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
-
- vec_res_value1 = wrapper::vadd(temp32x4t_1, vec_res_value1);
- vec_res_value2 = wrapper::vadd(temp32x4t_2, vec_res_value2);
- vec_res_value3 = wrapper::vadd(temp32x4t_3, vec_res_value3);
- vec_res_value4 = wrapper::vadd(temp32x4t_4, vec_res_value4);
- break;
- }
- case ReductionOperation::PROD:
- {
- const auto offset32x4f_4 = wrapper::vdup_n(static_cast<float>(iq_info.offset),
- wrapper::traits::vector_128_tag{});
- const auto scale32x4f_4 =
- wrapper::vdup_n(iq_info.scale, wrapper::traits::vector_128_tag{});
-
- const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
- const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
-
- const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
- const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
- const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
- const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
-
- auto temp32x4f_1 = wrapper::vcvt<float>(temp32x4t_1);
- auto temp32x4f_2 = wrapper::vcvt<float>(temp32x4t_2);
- auto temp32x4f_3 = wrapper::vcvt<float>(temp32x4t_3);
- auto temp32x4f_4 = wrapper::vcvt<float>(temp32x4t_4);
-
- //de-quantize vec_elements
- temp32x4f_1 = wrapper::vmul(wrapper::vsub(temp32x4f_1, offset32x4f_4), scale32x4f_4);
- temp32x4f_2 = wrapper::vmul(wrapper::vsub(temp32x4f_2, offset32x4f_4), scale32x4f_4);
- temp32x4f_3 = wrapper::vmul(wrapper::vsub(temp32x4f_3, offset32x4f_4), scale32x4f_4);
- temp32x4f_4 = wrapper::vmul(wrapper::vsub(temp32x4f_4, offset32x4f_4), scale32x4f_4);
-
- vec_res_value1_f = wrapper::vmul(temp32x4f_1, vec_res_value1_f);
- vec_res_value2_f = wrapper::vmul(temp32x4f_2, vec_res_value2_f);
- vec_res_value3_f = wrapper::vmul(temp32x4f_3, vec_res_value3_f);
- vec_res_value4_f = wrapper::vmul(temp32x4f_4, vec_res_value4_f);
- break;
- }
- case ReductionOperation::ARG_IDX_MIN:
- {
- auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- vec_res_idx = calculate_index_quantized(index_dim, temp_vec_res_value, vec_res_value,
- vec_res_idx, op, axis);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- vec_res_idx = calculate_index_quantized(index_dim, temp_vec_res_value, vec_res_value,
- vec_res_idx, op, axis);
- vec_res_value = temp_vec_res_value;
- break;
- }
- case ReductionOperation::MIN:
- {
- vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
- break;
- }
- case ReductionOperation::MAX:
- {
- vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
-
- switch (op)
- {
- case ReductionOperation::ARG_IDX_MIN:
- case ReductionOperation::ARG_IDX_MAX:
- {
- wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x), vec_res_idx.val[0]);
- wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 4, vec_res_idx.val[1]);
- wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 8, vec_res_idx.val[2]);
- wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 12,
- vec_res_idx.val[3]);
- break;
- }
- case ReductionOperation::MIN:
- case ReductionOperation::MAX:
- {
- wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), vec_res_value);
- break;
- }
- case ReductionOperation::SUM:
- {
- // Subtract offsets
- auto offsets = vdupq_n_s32((in_info.dimension(axis) - 1) * iq_info.offset);
-
- auto vec_res_s_value1 = wrapper::vreinterpret(vec_res_value1);
- auto vec_res_s_value2 = wrapper::vreinterpret(vec_res_value2);
- auto vec_res_s_value3 = wrapper::vreinterpret(vec_res_value3);
- auto vec_res_s_value4 = wrapper::vreinterpret(vec_res_value4);
- vec_res_s_value1 = wrapper::vsub(vec_res_s_value1, offsets);
- vec_res_s_value2 = wrapper::vsub(vec_res_s_value2, offsets);
- vec_res_s_value3 = wrapper::vsub(vec_res_s_value3, offsets);
- vec_res_s_value4 = wrapper::vsub(vec_res_s_value4, offsets);
-
- const auto temp16x8t_1 =
- wrapper::vcombine(wrapper::vqmovn(vec_res_s_value1), wrapper::vqmovn(vec_res_s_value2));
- const auto temp16x8t_2 =
- wrapper::vcombine(wrapper::vqmovn(vec_res_s_value3), wrapper::vqmovn(vec_res_s_value4));
-
- combine_and_store<T>(temp16x8t_1, temp16x8t_2, output, x);
- break;
- }
- case ReductionOperation::MEAN_SUM:
- {
- vec_res_value1_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value1), vec_A);
- vec_res_value2_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value2), vec_A);
- vec_res_value3_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value3), vec_A);
- vec_res_value4_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value4), vec_A);
-
-#ifdef __aarch64__
- vec_res_value1 = wrapper::vcvta<PromotedType>(vec_res_value1_f);
- vec_res_value2 = wrapper::vcvta<PromotedType>(vec_res_value2_f);
- vec_res_value3 = wrapper::vcvta<PromotedType>(vec_res_value3_f);
- vec_res_value4 = wrapper::vcvta<PromotedType>(vec_res_value4_f);
-#else // defined(__aarch64__)
- vec_res_value1 = wrapper::vcvt<PromotedType>(vec_res_value1_f);
- vec_res_value2 = wrapper::vcvt<PromotedType>(vec_res_value2_f);
- vec_res_value3 = wrapper::vcvt<PromotedType>(vec_res_value3_f);
- vec_res_value4 = wrapper::vcvt<PromotedType>(vec_res_value4_f);
-#endif // __aarch64__
-
- const auto temp16x8t_1 =
- wrapper::vcombine(wrapper::vqmovn(vec_res_value1), wrapper::vqmovn(vec_res_value2));
- const auto temp16x8t_2 =
- wrapper::vcombine(wrapper::vqmovn(vec_res_value3), wrapper::vqmovn(vec_res_value4));
- auto res = wrapper::vcombine(wrapper::vqmovn(temp16x8t_1), wrapper::vqmovn(temp16x8t_2));
-
- wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), res);
- break;
- }
- case ReductionOperation::PROD:
- {
- const auto offset32x4f_4 =
- wrapper::vdup_n(static_cast<float>(iq_info.offset), wrapper::traits::vector_128_tag{});
- const auto iscale32x4f_4 = vinvq_f32(vdupq_n_f32(iq_info.scale));
-
- //re-quantize
- vec_res_value1_f =
- wrapper::vadd(wrapper::vmul(vec_res_value1_f, iscale32x4f_4), offset32x4f_4);
- vec_res_value2_f =
- wrapper::vadd(wrapper::vmul(vec_res_value2_f, iscale32x4f_4), offset32x4f_4);
- vec_res_value3_f =
- wrapper::vadd(wrapper::vmul(vec_res_value3_f, iscale32x4f_4), offset32x4f_4);
- vec_res_value4_f =
- wrapper::vadd(wrapper::vmul(vec_res_value4_f, iscale32x4f_4), offset32x4f_4);
-
- vec_res_value1 = wrapper::vcvt<T>(vec_res_value1_f);
- vec_res_value2 = wrapper::vcvt<T>(vec_res_value2_f);
- vec_res_value3 = wrapper::vcvt<T>(vec_res_value3_f);
- vec_res_value4 = wrapper::vcvt<T>(vec_res_value4_f);
-
- const auto temp16x8t_1 =
- wrapper::vcombine(wrapper::vqmovn(vec_res_value1), wrapper::vqmovn(vec_res_value2));
- const auto temp16x8t_2 =
- wrapper::vcombine(wrapper::vqmovn(vec_res_value3), wrapper::vqmovn(vec_res_value4));
- auto res = wrapper::vcombine(wrapper::vqmovn(temp16x8t_1), wrapper::vqmovn(temp16x8t_2));
-
- wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), res);
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- float res_value = 0.f;
- int32_t res_value_q = 0;
-
- switch (op)
- {
- case ReductionOperation::ARG_IDX_MAX:
- case ReductionOperation::ARG_IDX_MIN:
- case ReductionOperation::MIN:
- case ReductionOperation::MAX:
- {
- res_value = *(input_ptr + x);
- break;
- }
- case ReductionOperation::PROD:
- {
- res_value = static_cast<T>(1.0f);
- break;
- }
- default:
- {
- res_value = static_cast<T>(0.0f);
- break;
- }
- }
- uint32_t res_idx = 0;
-
- for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
- {
- const T *in_ptr =
- reinterpret_cast<T *>(input.ptr() + x + in_info.strides_in_bytes()[axis] * dim);
- switch (op)
- {
- case ReductionOperation::SUM:
- {
- res_value += *in_ptr;
- break;
- }
- case ReductionOperation::MEAN_SUM:
- {
- res_value_q += *in_ptr;
- break;
- }
- case ReductionOperation::SUM_SQUARE:
- {
- res_value += *in_ptr * *in_ptr;
- break;
- }
- case ReductionOperation::PROD:
- {
- //de-quantize input
- if (std::is_same<T, uint8_t>::value)
- {
- res_value *= dequantize_qasymm8(*in_ptr, iq_info);
- }
- else
- {
- res_value *= dequantize_qasymm8_signed(*in_ptr, iq_info);
- }
- break;
- }
- case ReductionOperation::ARG_IDX_MIN:
- {
- if (*in_ptr < res_value)
- {
- res_value = *in_ptr;
- res_idx = dim;
- }
- break;
- }
- case ReductionOperation::ARG_IDX_MAX:
- {
- if (*in_ptr > res_value)
- {
- res_value = *in_ptr;
- res_idx = dim;
- }
- break;
- }
- case ReductionOperation::MIN:
- {
- res_value = *in_ptr < res_value ? *in_ptr : res_value;
- break;
- }
- case ReductionOperation::MAX:
- {
- res_value = *in_ptr > res_value ? *in_ptr : res_value;
- break;
- }
- default:
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
-
- switch (op)
- {
- case ReductionOperation::MEAN_SUM:
- {
- // Apply previously calculated coefficients (with rounding on aarch64)
-#ifdef __aarch64__
- const int32_t res =
- arm_compute::support::cpp11::round(A * (static_cast<float>(res_value_q)) + B);
-#else // defined(__aarch64__)
- const int32_t res = A * (static_cast<float>(res_value_q)) + B;
-#endif // __aarch64__
- *reinterpret_cast<T *>(output.ptr() + x) = utils::cast::saturate_cast<T>(res);
- break;
- }
- case ReductionOperation::SUM:
- {
- // Subtract accumulated offsets
- res_value -= (in_info.dimension(axis) - 1) * iq_info.offset;
- *reinterpret_cast<T *>(output.ptr() + x) = utils::cast::saturate_cast<T>(res_value);
- break;
- }
- case ReductionOperation::PROD:
- {
- //re-quantize result
- T res = 0;
- if (std::is_same<T, uint8_t>::value)
- {
- res = quantize_qasymm8(res_value, iq_info);
- }
- else
- {
- res = quantize_qasymm8_signed(res_value, iq_info);
- }
- *(reinterpret_cast<T *>(output.ptr() + x)) = res;
- break;
- }
- case ReductionOperation::ARG_IDX_MIN:
- case ReductionOperation::ARG_IDX_MAX:
- {
- *(reinterpret_cast<uint32_t *>(output.ptr() + x * 4)) = res_idx;
- break;
- }
- default:
- *(reinterpret_cast<T *>(output.ptr() + x)) = res_value;
- }
- }
- },
- input, output);
- }
-};
-
-void reduce_op(
- const Window &window, const ITensor *input, ITensor *output, unsigned int axis, const ReductionOperation op)
+void NEReductionOperationKernel::reduce_op()
{
- const bool is_complex = (input->info()->num_channels() == 2);
+ const bool is_complex = (_input->info()->num_channels() == 2);
if (is_complex)
{
- switch (axis)
+ switch (_reduction_axis)
{
case 2:
- switch (input->info()->data_type())
+ switch (_input->info()->data_type())
{
case DataType::F32:
- switch (op)
+ {
+ switch (_op)
{
case ReductionOperation::SUM:
- return Reducer<RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>>::reduceZ(
- window, input, output, RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>(),
- op);
+ _func = REGISTER_FP32_NEON(cpu::reduce_RedOpYZW_complex_reduceZ_float32_4_2_SUM);
+ break;
default:
ARM_COMPUTE_ERROR("Not supported");
+ break;
}
+ break;
+ }
default:
+ {
ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
}
+ break;
default:
+ {
ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
}
return;
}
- switch (axis)
+ switch (_reduction_axis)
{
case 0:
{
- switch (input->info()->data_type())
+ switch (_input->info()->data_type())
{
case DataType::QASYMM8:
{
- return Reducer<RedOpX_quantized<uint8_t>>::reduceX(window, input, output,
- RedOpX_quantized<uint8_t>(), op);
+ _func = REGISTER_QASYMM8_NEON(cpu::reduce_RedOpX_reduceX_qasymm8);
+ break;
}
case DataType::QASYMM8_SIGNED:
{
- return Reducer<RedOpX_quantized<int8_t>>::reduceX(window, input, output, RedOpX_quantized<int8_t>(),
- op);
+ _func = REGISTER_QASYMM8_SIGNED_NEON(cpu::reduce_RedOpX_reduceX_qasymm8_signed);
+ break;
}
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
- return Reducer<RedOpX<float16_t, 8>>::reduceX(window, input, output, RedOpX<float16_t, 8>(), op);
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ {
+ _func = REGISTER_FP16_NEON(cpu::reduce_RedOpX_reduceX_float16_8);
+ break;
+ }
+#endif // ARM_COMPUTE_ENABLE_FP16
case DataType::F32:
{
- return Reducer<RedOpX<float, 4>>::reduceX(window, input, output, RedOpX<float, 4>(), op);
+ _func = REGISTER_FP32_NEON(cpu::reduce_RedOpX_reduceX_float32_4);
+ break;
}
case DataType::S32:
{
- return Reducer<RedOpX<int32_t, 4>>::reduceX(window, input, output, RedOpX<int32_t, 4>(), op);
+ _func = REGISTER_INTEGER_NEON(cpu::reduce_RedOpX_reduceX_S32_4);
+ break;
}
default:
{
ARM_COMPUTE_ERROR("Not supported");
+ break;
}
}
+ break;
}
case 1:
- switch (input->info()->data_type())
+ {
+ switch (_input->info()->data_type())
{
case DataType::QASYMM8:
{
- return Reducer<RedOpYZW_quantized<uint8_t>>::reduceY(window, input, output,
- RedOpYZW_quantized<uint8_t>(), op);
+ _func = REGISTER_QASYMM8_NEON(cpu::reduce_RedOpYZW_reduceY_qasymm8);
+ break;
}
case DataType::QASYMM8_SIGNED:
{
- return Reducer<RedOpYZW_quantized<int8_t>>::reduceY(window, input, output,
- RedOpYZW_quantized<int8_t>(), op);
+ _func = REGISTER_QASYMM8_SIGNED_NEON(cpu::reduce_RedOpYZW_reduceY_qasymm8_signed);
+ break;
}
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
- return Reducer<RedOpYZW<float16_t, 8>>::reduceY(window, input, output, RedOpYZW<float16_t, 8>(),
- op);
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ {
+ _func = REGISTER_FP16_NEON(cpu::reduce_RedOpYZW_reduceY_float16_8);
+ break;
+ }
+#endif // ARM_COMPUTE_ENABLE_FP16
case DataType::F32:
- return Reducer<RedOpYZW<float, 4>>::reduceY(window, input, output, RedOpYZW<float, 4>(), op);
+ {
+ _func = REGISTER_FP32_NEON(cpu::reduce_RedOpYZW_reduceY_float32_4);
+ break;
+ }
case DataType::S32:
- return Reducer<RedOpYZW<int32_t, 4>>::reduceY(window, input, output, RedOpYZW<int32_t, 4>(), op);
+ {
+ _func = REGISTER_INTEGER_NEON(cpu::reduce_RedOpYZW_reduceY_S32_4);
+ break;
+ }
default:
+ {
ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
}
+ break;
+ }
case 2:
- switch (input->info()->data_type())
+ {
+ switch (_input->info()->data_type())
{
case DataType::QASYMM8:
- return Reducer<RedOpYZW_quantized<uint8_t>>::reduceZ(window, input, output,
- RedOpYZW_quantized<uint8_t>(), op);
+ {
+ _func = REGISTER_QASYMM8_NEON(cpu::reduce_RedOpYZW_reduceZ_qasymm8);
+ break;
+ }
case DataType::QASYMM8_SIGNED:
- return Reducer<RedOpYZW_quantized<int8_t>>::reduceZ(window, input, output,
- RedOpYZW_quantized<int8_t>(), op);
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ {
+ _func = REGISTER_QASYMM8_SIGNED_NEON(cpu::reduce_RedOpYZW_reduceZ_qasymm8_signed);
+ break;
+ }
+#ifdef ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
- return Reducer<RedOpYZW<float16_t, 8>>::reduceZ(window, input, output, RedOpYZW<float16_t, 8>(),
- op);
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ {
+ _func = REGISTER_FP16_NEON(cpu::reduce_RedOpYZW_reduceZ_float16_8);
+ break;
+ }
+#endif // ARM_COMPUTE_ENABLE_FP16
case DataType::F32:
- return Reducer<RedOpYZW<float, 4>>::reduceZ(window, input, output, RedOpYZW<float, 4>(), op);
+ {
+ _func = REGISTER_FP32_NEON(cpu::reduce_RedOpYZW_reduceZ_float32_4);
+ break;
+ }
case DataType::S32:
- return Reducer<RedOpYZW<int32_t, 4>>::reduceZ(window, input, output, RedOpYZW<int32_t, 4>(), op);
+ {
+ _func = REGISTER_INTEGER_NEON(cpu::reduce_RedOpYZW_reduceZ_S32_4);
+ break;
+ }
default:
+ {
+ std::cout << int(_input->info()->data_type()) << std::endl;
ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
}
+ break;
+ }
case 3:
- switch (input->info()->data_type())
+ {
+ switch (_input->info()->data_type())
{
case DataType::QASYMM8:
- return Reducer<RedOpYZW_quantized<uint8_t>>::reduceW(window, input, output,
- RedOpYZW_quantized<uint8_t>(), op);
+ {
+ _func = REGISTER_QASYMM8_NEON(cpu::reduce_RedOpYZW_reduceW_qasymm8);
+ break;
+ }
case DataType::QASYMM8_SIGNED:
- return Reducer<RedOpYZW_quantized<int8_t>>::reduceW(window, input, output,
- RedOpYZW_quantized<int8_t>(), op);
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ {
+ _func = REGISTER_QASYMM8_SIGNED_NEON(cpu::reduce_RedOpYZW_reduceW_qasymm8_signed);
+ break;
+ }
+#ifdef ARM_COMPUTE_ENABLE_FP16
case DataType::F16:
- return Reducer<RedOpYZW<float16_t, 8>>::reduceW(window, input, output, RedOpYZW<float16_t, 8>(),
- op);
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ {
+ _func = REGISTER_FP16_NEON(cpu::reduce_RedOpYZW_reduceW_float16_8);
+ break;
+ }
+#endif // ARM_COMPUTE_ENABLE_FP16
case DataType::F32:
- return Reducer<RedOpYZW<float, 4>>::reduceW(window, input, output, RedOpYZW<float, 4>(), op);
+ {
+ _func = REGISTER_FP32_NEON(cpu::reduce_RedOpYZW_reduceW_float32_4);
+ break;
+ }
case DataType::S32:
- return Reducer<RedOpYZW<int32_t, 4>>::reduceW(window, input, output, RedOpYZW<int32_t, 4>(), op);
+ {
+ _func = REGISTER_INTEGER_NEON(cpu::reduce_RedOpYZW_reduceW_S32_4);
+ break;
+ }
default:
+ {
ARM_COMPUTE_ERROR("Not supported");
+ break;
+ }
}
+ break;
+ }
default:
+ {
ARM_COMPUTE_ERROR("Unsupported reduction axis");
+ break;
+ }
}
}
@@ -1819,10 +293,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, u
return Status{};
}
-} // namespace
NEReductionOperationKernel::NEReductionOperationKernel()
- : _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE)
+ : _func(nullptr), _input(nullptr), _output(nullptr), _reduction_axis(0), _op(ReductionOperation::SUM_SQUARE)
{
}
@@ -1856,6 +329,8 @@ void NEReductionOperationKernel::configure(const ITensor *input,
.set_data_type(output_data_type)
.reset_padding()
.set_is_resizable(true));
+ // Determine the reduction function
+ NEReductionOperationKernel::reduce_op();
}
Status NEReductionOperationKernel::validate(const ITensorInfo *input,
@@ -1874,6 +349,6 @@ void NEReductionOperationKernel::run(const Window &window, const ThreadInfo &inf
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
- reduce_op(window, _input, _output, _reduction_axis, _op);
+ (*_func)(window, _input, _output, _op);
}
} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEReductionOperationKernel.h b/src/core/NEON/kernels/NEReductionOperationKernel.h
index 78bec62c14..407e5de6d6 100644
--- a/src/core/NEON/kernels/NEReductionOperationKernel.h
+++ b/src/core/NEON/kernels/NEReductionOperationKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_NEREDUCTIONOPERATIONKERNEL_H
-#define ARM_COMPUTE_NEREDUCTIONOPERATIONKERNEL_H
+#ifndef ACL_SRC_CORE_NEON_KERNELS_NEREDUCTIONOPERATIONKERNEL_H
+#define ACL_SRC_CORE_NEON_KERNELS_NEREDUCTIONOPERATIONKERNEL_H
#include "src/core/NEON/INEKernel.h"
@@ -80,14 +80,24 @@ public:
static Status
validate(const ITensorInfo *input, const ITensorInfo *output, unsigned int axis, ReductionOperation op);
+private:
// Inherited methods overridden:
void run(const Window &window, const ThreadInfo &info) override;
+ /** Common signature for all the specialized Reduction functions
+ *
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using ReductionFunction = void (*)(const Window &window, const ITensor *in, ITensor *out, ReductionOperation op);
-private:
+ /** Populate the _func with the right reduction operation handler
+ */
+ void reduce_op();
+
+ ReductionFunction _func;
const ITensor *_input;
ITensor *_output;
unsigned int _reduction_axis;
ReductionOperation _op;
};
} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEREDUCTIONOPERATIONKERNEL_H */
+#endif // ACL_SRC_CORE_NEON_KERNELS_NEREDUCTIONOPERATIONKERNEL_H
diff --git a/src/core/NEON/kernels/NEReorderKernel.cpp b/src/core/NEON/kernels/NEReorderKernel.cpp
index f5bea3e163..fe8882f59f 100644
--- a/src/core/NEON/kernels/NEReorderKernel.cpp
+++ b/src/core/NEON/kernels/NEReorderKernel.cpp
@@ -27,6 +27,7 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/Scheduler.h"
#include "src/common/utils/Log.h"
#include "src/core/NEON/kernels/arm_gemm/transform.hpp"
@@ -233,13 +234,20 @@ Status NEReorderKernel::validate(const ITensorInfo *input,
}
}
- int ksize;
+ int ksize = 0;
switch (output_wf)
{
#if defined(ARM_COMPUTE_ENABLE_SVE)
case WeightFormat::OHWIo8:
{
- ksize = 8;
+ if (Scheduler::get().cpu_info().has_sve() && arm_gemm::utils::get_vector_length<float>() == 8)
+ {
+ ksize = 8;
+ }
+ else
+ {
+ ARM_COMPUTE_RETURN_ERROR_MSG("Unsupported weight format.");
+ }
break;
}
#endif /* ARM_COMPUTE_ENABLE_SVE */
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp
index 15064aeedc..52ecaff0a8 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_implementation_constraints.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -133,7 +133,7 @@ bool has_channel_multiplier(const DepthwiseArgs &args, const void *)
bool no_prime_right_pad(const DepthwiseArgs &args, const void *) __attribute__ ((unused));
bool no_prime_right_pad(const DepthwiseArgs &args, const void *)
{
- return (args.input_cols + args.padding.left) >= (args.kernel_cols - 1);
+ return ((args.input_cols + args.padding.left) / args.dilation_cols) >= (args.kernel_cols - 1);
}
bool qp_has_no_left_shift(const DepthwiseArgs &args, const void *_qp) __attribute__ ((unused));
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp
index c3daaf04fe..adcbedf4ce 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/depthwise_planar.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -210,13 +210,30 @@ struct OutputRowPtrsElement
T *output_padding_buffer;
};
+ // On some implementations there is a significant performance benefit to
+ // aligning the padding buffer to a 1024 byte boundary. This routine
+ // adds as much padding as needed to an arbitrary input pointer and
+ // returns an aligned void *.
+ static constexpr intptr_t BUFFER_ALIGNMENT=1024;
+
+ template <typename ptr_T>
+ static void *do_align(ptr_T in)
+ {
+ intptr_t v = reinterpret_cast<intptr_t>(in);
+ intptr_t odds = v & (BUFFER_ALIGNMENT-1);
+ intptr_t pad = odds ? (BUFFER_ALIGNMENT - odds) : 0;
+
+ return reinterpret_cast<void *>(v + pad);
+ }
+
template <typename OutputStage>
static size_t get_element_size(const WorkspaceArgs<IPlanarStrategy<OutputStage>, OutputStage> &args)
{
- // We need one pointer and stride for each row of output, and an additional
- // blob of memory into which padded stores can go.
+ // We need one pointer and stride for each row of output, and an
+ // additional blob of memory into which padded stores can go. Allow
+ // extra space so that this padding buffer can be aligned at both ends.
return args.strategy->get_output_rows() * (sizeof(T *) + 2*sizeof(size_t)) +
- get_vector_length<char>(args.strategy->get_vl_type());
+ get_vector_length<char>(args.strategy->get_vl_type()) + BUFFER_ALIGNMENT*2;
}
template <typename WorkspaceType, typename OutputStage>
@@ -227,8 +244,8 @@ struct OutputRowPtrsElement
ws->output_row_ptrs = reinterpret_cast<T **>(buffer);
ws->output_ld_cols = reinterpret_cast<size_t *>(ws->output_row_ptrs + n_rows);
ws->output_ld_vls = ws->output_ld_cols + n_rows;
- ws->output_padding_buffer = reinterpret_cast<T *>(ws->output_ld_vls + n_rows);
- return ws->output_padding_buffer + get_vector_length<T>(args.strategy->get_vl_type());
+ ws->output_padding_buffer = reinterpret_cast<T *>(do_align(ws->output_ld_vls + n_rows));
+ return do_align(ws->output_padding_buffer + get_vector_length<T>(args.strategy->get_vl_type()));
}
};
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
index 3de4bdc1fb..f18208d6c4 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_s8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,189 +52,189 @@ void interleave_a64_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
{
__asm__ __volatile__(
"cmp %x[ld_weight_col], XZR\n"
- "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mov x22, #0x3\n"
"movi v16.4s, #0x9\n"
- "movi v31.16b, #0x0\n"
- "mov x21, #0x3\n"
- "mul x21, %x[ld_weight_col], x21\n"
+ "movi v0.16b, #0x0\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"add x20, %x[qp], %[offsetof_input_offset]\n"
+ "movi v31.16b, #0x1\n"
"ld1r { v30.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_weights_offset]\n"
- "ld1r { v29.4s }, [x20]\n"
"cmp %x[ld_weight_row], XZR\n"
- "mul v29.4s, v29.4s, v30.4s\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
+ "ld1r { v29.4s }, [x20]\n"
"lsr x21, %x[n_channels], #0x2\n"
- "movi v28.16b, #0x1\n"
- "mul v29.4s, v29.4s, v16.4s\n"
- "add x25, %x[weights], %x[ld_weight_row]\n"
"add x20, %x[qp], %[offsetof_per_layer_mul]\n"
- "ld1r { v27.4s }, [x20]\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "mul x22, %x[ld_weight_col], x22\n"
"add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
- "ld1r { v26.4s }, [x20]\n"
- "add x24, x25, %x[ld_weight_row]\n"
- "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
- "mov x22, #0x0\n"
+ "add x25, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "ld1r { v27.4s }, [x20]\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "mov x24, #0x0\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x22, NE\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x23, %x[weights], %x[ld_weight_row]\n"
+ "add x22, x23, %x[ld_weight_row]\n"
"cbz x21, 4f\n"
"1:" // Loop
- "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q25, [%x[bias], x22]\n"
+ "ldr q26, [%x[bias], x24]\n"
"2:" // Loop: Skip bias load
- "ldr s19, [%x[weights], #0x0]\n"
- "ldr s16, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 v17.16b, v16.16b, v31.16b\n"
- "movi v21.4s, #0x0\n"
- "ldr s16, [%x[weights], x23]\n"
- "ldr s18, [x25, #0x0]\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v20.16b, v16.16b, v17.16b\n"
- "ldr s17, [x25, %x[ld_weight_col]]\n"
- "ldr s16, [x25, x23]\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v17.16b, v31.16b\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s19, [x24, %x[ld_weight_col]]\n"
- ".inst 0x4e949795 // sdot v21.4s, v28.16b, v20.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr s16, [x24, x23]\n"
- "zip1 v17.16b, v17.16b, v16.16b\n"
- "zip1 v16.16b, v19.16b, v31.16b\n"
- ".inst 0x4e929795 // sdot v21.4s, v28.16b, v18.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x4e909795 // sdot v21.4s, v28.16b, v16.16b\n"
+ "ldr s25, [%x[weights], #0x0]\n"
+ "ldr s18, [%x[weights], %x[ld_weight_col]]\n"
+ "movi v24.4s, #0x0\n"
+ "ldr s16, [%x[weights], x25]\n"
+ "ldr s20, [x23, #0x0]\n"
"add %x[weights], %x[weights], #0x4\n"
- "add x25, x25, #0x4\n"
- "mls v25.4s, v21.4s, v30.4s\n"
- "add x24, x24, #0x4\n"
- "add v25.4s, v25.4s, v29.4s\n"
- "str q25, [%x[outptr], #0x0]\n"
- "str q20, [%x[outptr], #0x10]\n"
- "str q18, [%x[outptr], #0x20]\n"
+ "ldr s23, [x23, %x[ld_weight_col]]\n"
+ "ldr s17, [x23, x25]\n"
+ "add x23, x23, #0x4\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s19, [x22, %x[ld_weight_col]]\n"
+ "zip1 v18.16b, v18.16b, v0.16b\n"
+ "ldr s21, [x22, x25]\n"
+ "zip1 v16.16b, v25.16b, v16.16b\n"
+ "add x22, x22, #0x4\n"
+ "zip1 v20.16b, v20.16b, v17.16b\n"
+ "zip1 v17.16b, v23.16b, v0.16b\n"
+ "zip1 v19.16b, v19.16b, v0.16b\n"
+ "zip1 v18.16b, v16.16b, v18.16b\n"
+ "zip1 v16.16b, v22.16b, v21.16b\n"
+ "zip1 v17.16b, v20.16b, v17.16b\n"
+ ".inst 0x4e9297f8 // sdot v24.4s, v31.16b, v18.16b\n"
+ "zip1 v16.16b, v16.16b, v19.16b\n"
+ ".inst 0x4e9197f8 // sdot v24.4s, v31.16b, v17.16b\n"
+ ".inst 0x4e9097f8 // sdot v24.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v24.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q18, [%x[outptr], #0x10]\n"
+ "str q17, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ldr q27, [%x[rq_mul_perchannel], x22]\n"
- "ldr q26, [%x[rq_shift_perchannel], x22]\n"
+ "ldr q28, [%x[rq_mul_perchannel], x24]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x24]\n"
"3:" // Loop: Quantisation parameters: Store
"subs x21, x21, #0x1\n"
- "str q27, [%x[outptr], #0x0]\n"
- "add x22, x22, #0x10\n"
- "str q26, [%x[outptr], #0x10]\n"
+ "str q28, [%x[outptr], #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"bgt 1b\n"
"tst %x[n_channels], #0x3\n"
"beq 13f\n"
"4:" // Oddments
- "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
"cbz %x[bias], 7f\n"
- "add %x[bias], %x[bias], x22\n"
+ "add %x[bias], %x[bias], x24\n"
"tbz %x[n_channels], #1, 5f\n"
- "ld1 { v25.d }[0], [%x[bias]], #0x8\n"
+ "ld1 { v26.d }[0], [%x[bias]], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
- "ld1 { v25.s }[2], [%x[bias]], #0x4\n"
+ "ld1 { v26.s }[2], [%x[bias]], #0x4\n"
"b 6f\n"
"5:" // Oddments: Load bias: Bit 1: Unset
- "ld1 { v25.s }[0], [%x[bias]], #0x4\n"
+ "ld1 { v26.s }[0], [%x[bias]], #0x4\n"
"6:" // Oddments: Load bias: Bit 1: End
"7:" // Oddments: Skip bias load
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v17.h }[0], [%x[weights]]\n"
- "ld1 { v24.h }[0], [x25]\n"
+ "ld1 { v18.h }[0], [%x[weights]]\n"
+ "ld1 { v21.h }[0], [x23]\n"
"add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v20.h }[0], [x21]\n"
+ "add x20, %x[weights], x25\n"
+ "ld1 { v24.h }[0], [x21]\n"
"ld1 { v16.h }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v19.h }[0], [x21]\n"
- "ld1 { v18.h }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v23.h }[0], [x24]\n"
+ "add x21, x23, %x[ld_weight_col]\n"
+ "add x20, x23, x25\n"
+ "ld1 { v20.h }[0], [x21]\n"
+ "ld1 { v17.h }[0], [x20]\n"
+ "add x21, x22, %x[ld_weight_col]\n"
+ "add x20, x22, x25\n"
+ "ld1 { v23.h }[0], [x22]\n"
"ld1 { v22.h }[0], [x21]\n"
"add %x[weights], %x[weights], #0x2\n"
- "add x25, x25, #0x2\n"
- "ld1 { v21.h }[0], [x20]\n"
- "add x24, x24, #0x2\n"
+ "add x23, x23, #0x2\n"
+ "ld1 { v19.h }[0], [x20]\n"
+ "add x22, x22, #0x2\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v17.b }[2], [%x[weights]]\n"
- "ld1 { v24.b }[2], [x25]\n"
+ "ld1 { v18.b }[2], [%x[weights]]\n"
+ "ld1 { v21.b }[2], [x23]\n"
"add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v20.b }[2], [x21]\n"
+ "add x20, %x[weights], x25\n"
+ "ld1 { v24.b }[2], [x21]\n"
"ld1 { v16.b }[2], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v19.b }[2], [x21]\n"
- "ld1 { v18.b }[2], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v23.b }[2], [x24]\n"
+ "add x21, x23, %x[ld_weight_col]\n"
+ "add x20, x23, x25\n"
+ "ld1 { v20.b }[2], [x21]\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "add x21, x22, %x[ld_weight_col]\n"
+ "add x20, x22, x25\n"
+ "ld1 { v23.b }[2], [x22]\n"
"ld1 { v22.b }[2], [x21]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 9f\n"
"8:" // Oddments: Load weights: Bit 1: Unset
- "ld1 { v17.b }[0], [%x[weights]]\n"
- "ld1 { v24.b }[0], [x25]\n"
+ "ld1 { v18.b }[0], [%x[weights]]\n"
+ "ld1 { v21.b }[0], [x23]\n"
"add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v20.b }[0], [x21]\n"
+ "add x20, %x[weights], x25\n"
+ "ld1 { v24.b }[0], [x21]\n"
"ld1 { v16.b }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v19.b }[0], [x21]\n"
- "ld1 { v18.b }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v23.b }[0], [x24]\n"
+ "add x21, x23, %x[ld_weight_col]\n"
+ "add x20, x23, x25\n"
+ "ld1 { v20.b }[0], [x21]\n"
+ "ld1 { v17.b }[0], [x20]\n"
+ "add x21, x22, %x[ld_weight_col]\n"
+ "add x20, x22, x25\n"
+ "ld1 { v23.b }[0], [x22]\n"
"ld1 { v22.b }[0], [x21]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"9:" // Oddments: Load weights: Bit 1: End
- "zip1 v17.16b, v17.16b, v16.16b\n"
- "zip1 v16.16b, v20.16b, v31.16b\n"
- "zip1 v20.16b, v17.16b, v16.16b\n"
- "zip1 v17.16b, v24.16b, v18.16b\n"
- "zip1 v16.16b, v19.16b, v31.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4e949793 // sdot v19.4s, v28.16b, v20.16b\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "zip1 v17.16b, v23.16b, v21.16b\n"
- ".inst 0x4e929793 // sdot v19.4s, v28.16b, v18.16b\n"
- "zip1 v16.16b, v22.16b, v31.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x4e909793 // sdot v19.4s, v28.16b, v16.16b\n"
- "mls v25.4s, v19.4s, v30.4s\n"
- "add v25.4s, v25.4s, v29.4s\n"
- "str q25, [%x[outptr], #0x0]\n"
- "str q20, [%x[outptr], #0x10]\n"
- "str q18, [%x[outptr], #0x20]\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v24.16b, v0.16b\n"
+ "zip1 v21.16b, v21.16b, v17.16b\n"
+ "zip1 v17.16b, v20.16b, v0.16b\n"
+ "movi v20.4s, #0x0\n"
+ "zip1 v19.16b, v23.16b, v19.16b\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v22.16b, v0.16b\n"
+ "zip1 v17.16b, v21.16b, v17.16b\n"
+ ".inst 0x4e9297f4 // sdot v20.4s, v31.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ ".inst 0x4e9197f4 // sdot v20.4s, v31.16b, v17.16b\n"
+ ".inst 0x4e9097f4 // sdot v20.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v20.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q18, [%x[outptr], #0x10]\n"
+ "str q17, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 12f\n"
- "add x21, %x[rq_mul_perchannel], x22\n"
- "add x20, %x[rq_shift_perchannel], x22\n"
+ "add x21, %x[rq_mul_perchannel], x24\n"
+ "add x20, %x[rq_shift_perchannel], x24\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v27.d }[0], [x21], #0x8\n"
- "ld1 { v26.d }[0], [x20], #0x8\n"
+ "ld1 { v28.d }[0], [x21], #0x8\n"
+ "ld1 { v27.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v27.s }[2], [x21], #0x4\n"
- "ld1 { v26.s }[2], [x20], #0x4\n"
+ "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x20], #0x4\n"
"b 11f\n"
"10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "ld1 { v26.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x21], #0x4\n"
+ "ld1 { v27.s }[0], [x20], #0x4\n"
"11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
"12:" // Oddments: Quantisation parameters: Store
- "str q27, [%x[outptr], #0x0]\n"
- "str q26, [%x[outptr], #0x10]\n"
+ "str q28, [%x[outptr], #0x0]\n"
+ "str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"13:" // End
: [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
index 19264c9fce..0ebf6ac10f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/a64_u8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,189 +52,189 @@ void interleave_a64_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
{
__asm__ __volatile__(
"cmp %x[ld_weight_col], XZR\n"
- "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "mov x22, #0x3\n"
"movi v16.4s, #0x9\n"
- "movi v31.16b, #0x0\n"
- "mov x21, #0x3\n"
- "mul x21, %x[ld_weight_col], x21\n"
+ "movi v0.16b, #0x0\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
"add x20, %x[qp], %[offsetof_input_offset]\n"
+ "movi v31.16b, #0x1\n"
"ld1r { v30.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_weights_offset]\n"
- "ld1r { v29.4s }, [x20]\n"
"cmp %x[ld_weight_row], XZR\n"
- "mul v29.4s, v29.4s, v30.4s\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x21, NE\n"
+ "ld1r { v29.4s }, [x20]\n"
"lsr x21, %x[n_channels], #0x2\n"
- "movi v28.16b, #0x1\n"
- "mul v29.4s, v29.4s, v16.4s\n"
- "add x25, %x[weights], %x[ld_weight_row]\n"
"add x20, %x[qp], %[offsetof_per_layer_mul]\n"
- "ld1r { v27.4s }, [x20]\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "mul x22, %x[ld_weight_col], x22\n"
"add x20, %x[qp], %[offsetof_per_layer_right_shift]\n"
- "ld1r { v26.4s }, [x20]\n"
- "add x24, x25, %x[ld_weight_row]\n"
- "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
- "mov x22, #0x0\n"
+ "add x25, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "ld1r { v27.4s }, [x20]\n"
+ "mul v29.4s, v29.4s, v30.4s\n"
+ "mov x24, #0x0\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x22, NE\n"
+ "mul v29.4s, v29.4s, v16.4s\n"
+ "add x23, %x[weights], %x[ld_weight_row]\n"
+ "add x22, x23, %x[ld_weight_row]\n"
"cbz x21, 4f\n"
"1:" // Loop
- "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
"cbz %x[bias], 2f\n"
- "ldr q25, [%x[bias], x22]\n"
+ "ldr q26, [%x[bias], x24]\n"
"2:" // Loop: Skip bias load
- "ldr s19, [%x[weights], #0x0]\n"
- "ldr s16, [%x[weights], %x[ld_weight_col]]\n"
- "zip1 v17.16b, v16.16b, v31.16b\n"
- "movi v21.4s, #0x0\n"
- "ldr s16, [%x[weights], x23]\n"
- "ldr s18, [x25, #0x0]\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v20.16b, v16.16b, v17.16b\n"
- "ldr s17, [x25, %x[ld_weight_col]]\n"
- "ldr s16, [x25, x23]\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v17.16b, v31.16b\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s19, [x24, %x[ld_weight_col]]\n"
- ".inst 0x6e949795 // udot v21.4s, v28.16b, v20.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr s16, [x24, x23]\n"
- "zip1 v17.16b, v17.16b, v16.16b\n"
- "zip1 v16.16b, v19.16b, v31.16b\n"
- ".inst 0x6e929795 // udot v21.4s, v28.16b, v18.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x6e909795 // udot v21.4s, v28.16b, v16.16b\n"
+ "ldr s25, [%x[weights], #0x0]\n"
+ "ldr s18, [%x[weights], %x[ld_weight_col]]\n"
+ "movi v24.4s, #0x0\n"
+ "ldr s16, [%x[weights], x25]\n"
+ "ldr s20, [x23, #0x0]\n"
"add %x[weights], %x[weights], #0x4\n"
- "add x25, x25, #0x4\n"
- "mls v25.4s, v21.4s, v30.4s\n"
- "add x24, x24, #0x4\n"
- "add v25.4s, v25.4s, v29.4s\n"
- "str q25, [%x[outptr], #0x0]\n"
- "str q20, [%x[outptr], #0x10]\n"
- "str q18, [%x[outptr], #0x20]\n"
+ "ldr s23, [x23, %x[ld_weight_col]]\n"
+ "ldr s17, [x23, x25]\n"
+ "add x23, x23, #0x4\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s19, [x22, %x[ld_weight_col]]\n"
+ "zip1 v18.16b, v18.16b, v0.16b\n"
+ "ldr s21, [x22, x25]\n"
+ "zip1 v16.16b, v25.16b, v16.16b\n"
+ "add x22, x22, #0x4\n"
+ "zip1 v20.16b, v20.16b, v17.16b\n"
+ "zip1 v17.16b, v23.16b, v0.16b\n"
+ "zip1 v19.16b, v19.16b, v0.16b\n"
+ "zip1 v18.16b, v16.16b, v18.16b\n"
+ "zip1 v16.16b, v22.16b, v21.16b\n"
+ "zip1 v17.16b, v20.16b, v17.16b\n"
+ ".inst 0x6e9297f8 // udot v24.4s, v31.16b, v18.16b\n"
+ "zip1 v16.16b, v16.16b, v19.16b\n"
+ ".inst 0x6e9197f8 // udot v24.4s, v31.16b, v17.16b\n"
+ ".inst 0x6e9097f8 // udot v24.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v24.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q18, [%x[outptr], #0x10]\n"
+ "str q17, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ldr q27, [%x[rq_mul_perchannel], x22]\n"
- "ldr q26, [%x[rq_shift_perchannel], x22]\n"
+ "ldr q28, [%x[rq_mul_perchannel], x24]\n"
+ "ldr q27, [%x[rq_shift_perchannel], x24]\n"
"3:" // Loop: Quantisation parameters: Store
"subs x21, x21, #0x1\n"
- "str q27, [%x[outptr], #0x0]\n"
- "add x22, x22, #0x10\n"
- "str q26, [%x[outptr], #0x10]\n"
+ "str q28, [%x[outptr], #0x0]\n"
+ "add x24, x24, #0x10\n"
+ "str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"bgt 1b\n"
"tst %x[n_channels], #0x3\n"
"beq 13f\n"
"4:" // Oddments
- "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
"cbz %x[bias], 7f\n"
- "add %x[bias], %x[bias], x22\n"
+ "add %x[bias], %x[bias], x24\n"
"tbz %x[n_channels], #1, 5f\n"
- "ld1 { v25.d }[0], [%x[bias]], #0x8\n"
+ "ld1 { v26.d }[0], [%x[bias]], #0x8\n"
"tbz %x[n_channels], #0, 6f\n"
- "ld1 { v25.s }[2], [%x[bias]], #0x4\n"
+ "ld1 { v26.s }[2], [%x[bias]], #0x4\n"
"b 6f\n"
"5:" // Oddments: Load bias: Bit 1: Unset
- "ld1 { v25.s }[0], [%x[bias]], #0x4\n"
+ "ld1 { v26.s }[0], [%x[bias]], #0x4\n"
"6:" // Oddments: Load bias: Bit 1: End
"7:" // Oddments: Skip bias load
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v17.h }[0], [%x[weights]]\n"
- "ld1 { v24.h }[0], [x25]\n"
+ "ld1 { v18.h }[0], [%x[weights]]\n"
+ "ld1 { v21.h }[0], [x23]\n"
"add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v20.h }[0], [x21]\n"
+ "add x20, %x[weights], x25\n"
+ "ld1 { v24.h }[0], [x21]\n"
"ld1 { v16.h }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v19.h }[0], [x21]\n"
- "ld1 { v18.h }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v23.h }[0], [x24]\n"
+ "add x21, x23, %x[ld_weight_col]\n"
+ "add x20, x23, x25\n"
+ "ld1 { v20.h }[0], [x21]\n"
+ "ld1 { v17.h }[0], [x20]\n"
+ "add x21, x22, %x[ld_weight_col]\n"
+ "add x20, x22, x25\n"
+ "ld1 { v23.h }[0], [x22]\n"
"ld1 { v22.h }[0], [x21]\n"
"add %x[weights], %x[weights], #0x2\n"
- "add x25, x25, #0x2\n"
- "ld1 { v21.h }[0], [x20]\n"
- "add x24, x24, #0x2\n"
+ "add x23, x23, #0x2\n"
+ "ld1 { v19.h }[0], [x20]\n"
+ "add x22, x22, #0x2\n"
"tbz %x[n_channels], #0, 9f\n"
- "ld1 { v17.b }[2], [%x[weights]]\n"
- "ld1 { v24.b }[2], [x25]\n"
+ "ld1 { v18.b }[2], [%x[weights]]\n"
+ "ld1 { v21.b }[2], [x23]\n"
"add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v20.b }[2], [x21]\n"
+ "add x20, %x[weights], x25\n"
+ "ld1 { v24.b }[2], [x21]\n"
"ld1 { v16.b }[2], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v19.b }[2], [x21]\n"
- "ld1 { v18.b }[2], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v23.b }[2], [x24]\n"
+ "add x21, x23, %x[ld_weight_col]\n"
+ "add x20, x23, x25\n"
+ "ld1 { v20.b }[2], [x21]\n"
+ "ld1 { v17.b }[2], [x20]\n"
+ "add x21, x22, %x[ld_weight_col]\n"
+ "add x20, x22, x25\n"
+ "ld1 { v23.b }[2], [x22]\n"
"ld1 { v22.b }[2], [x21]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 9f\n"
"8:" // Oddments: Load weights: Bit 1: Unset
- "ld1 { v17.b }[0], [%x[weights]]\n"
- "ld1 { v24.b }[0], [x25]\n"
+ "ld1 { v18.b }[0], [%x[weights]]\n"
+ "ld1 { v21.b }[0], [x23]\n"
"add x21, %x[weights], %x[ld_weight_col]\n"
- "add x20, %x[weights], x23\n"
- "ld1 { v20.b }[0], [x21]\n"
+ "add x20, %x[weights], x25\n"
+ "ld1 { v24.b }[0], [x21]\n"
"ld1 { v16.b }[0], [x20]\n"
- "add x21, x25, %x[ld_weight_col]\n"
- "add x20, x25, x23\n"
- "ld1 { v19.b }[0], [x21]\n"
- "ld1 { v18.b }[0], [x20]\n"
- "add x21, x24, %x[ld_weight_col]\n"
- "add x20, x24, x23\n"
- "ld1 { v23.b }[0], [x24]\n"
+ "add x21, x23, %x[ld_weight_col]\n"
+ "add x20, x23, x25\n"
+ "ld1 { v20.b }[0], [x21]\n"
+ "ld1 { v17.b }[0], [x20]\n"
+ "add x21, x22, %x[ld_weight_col]\n"
+ "add x20, x22, x25\n"
+ "ld1 { v23.b }[0], [x22]\n"
"ld1 { v22.b }[0], [x21]\n"
"add %x[weights], %x[weights], #0x1\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"9:" // Oddments: Load weights: Bit 1: End
- "zip1 v17.16b, v17.16b, v16.16b\n"
- "zip1 v16.16b, v20.16b, v31.16b\n"
- "zip1 v20.16b, v17.16b, v16.16b\n"
- "zip1 v17.16b, v24.16b, v18.16b\n"
- "zip1 v16.16b, v19.16b, v31.16b\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6e949793 // udot v19.4s, v28.16b, v20.16b\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "zip1 v17.16b, v23.16b, v21.16b\n"
- ".inst 0x6e929793 // udot v19.4s, v28.16b, v18.16b\n"
- "zip1 v16.16b, v22.16b, v31.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- ".inst 0x6e909793 // udot v19.4s, v28.16b, v16.16b\n"
- "mls v25.4s, v19.4s, v30.4s\n"
- "add v25.4s, v25.4s, v29.4s\n"
- "str q25, [%x[outptr], #0x0]\n"
- "str q20, [%x[outptr], #0x10]\n"
- "str q18, [%x[outptr], #0x20]\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v24.16b, v0.16b\n"
+ "zip1 v21.16b, v21.16b, v17.16b\n"
+ "zip1 v17.16b, v20.16b, v0.16b\n"
+ "movi v20.4s, #0x0\n"
+ "zip1 v19.16b, v23.16b, v19.16b\n"
+ "zip1 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v22.16b, v0.16b\n"
+ "zip1 v17.16b, v21.16b, v17.16b\n"
+ ".inst 0x6e9297f4 // udot v20.4s, v31.16b, v18.16b\n"
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ ".inst 0x6e9197f4 // udot v20.4s, v31.16b, v17.16b\n"
+ ".inst 0x6e9097f4 // udot v20.4s, v31.16b, v16.16b\n"
+ "mls v26.4s, v20.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "str q26, [%x[outptr], #0x0]\n"
+ "str q18, [%x[outptr], #0x10]\n"
+ "str q17, [%x[outptr], #0x20]\n"
"str q16, [%x[outptr], #0x30]\n"
"add %x[outptr], %x[outptr], #0x40\n"
"cbz %x[rq_mul_perchannel], 12f\n"
- "add x21, %x[rq_mul_perchannel], x22\n"
- "add x20, %x[rq_shift_perchannel], x22\n"
+ "add x21, %x[rq_mul_perchannel], x24\n"
+ "add x20, %x[rq_shift_perchannel], x24\n"
"tbz %x[n_channels], #1, 10f\n"
- "ld1 { v27.d }[0], [x21], #0x8\n"
- "ld1 { v26.d }[0], [x20], #0x8\n"
+ "ld1 { v28.d }[0], [x21], #0x8\n"
+ "ld1 { v27.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v27.s }[2], [x21], #0x4\n"
- "ld1 { v26.s }[2], [x20], #0x4\n"
+ "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x20], #0x4\n"
"b 11f\n"
"10:" // Oddments: Quantisation parameters: Load quant params: Bit 1: Unset
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "ld1 { v26.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x21], #0x4\n"
+ "ld1 { v27.s }[0], [x20], #0x4\n"
"11:" // Oddments: Quantisation parameters: Load quant params: Bit 1: End
"12:" // Oddments: Quantisation parameters: Store
- "str q27, [%x[outptr], #0x0]\n"
- "str q26, [%x[outptr], #0x10]\n"
+ "str q28, [%x[outptr], #0x0]\n"
+ "str q27, [%x[outptr], #0x10]\n"
"add %x[outptr], %x[outptr], #0x20\n"
"13:" // End
: [bias] "+&r" (bias), [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
index 5d7b54f235..7364963477 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_s8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,80 +52,80 @@ void interleave_sve_s8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
{
__asm__ __volatile__(
"cmp %x[ld_weight_col], XZR\n"
- "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
- "mov z16.s, #0x9\n"
- "mov z28.b, #0x0\n"
"mov x20, #0x3\n"
- "ptrue p2.b\n"
- "mul x20, %x[ld_weight_col], x20\n"
- "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
- "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "mov z16.s, #0x9\n"
+ "mov z31.b, #0x0\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "ptrue p3.b\n"
+ "mov z30.b, #0x1\n"
"cmp %x[ld_weight_row], XZR\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
- "mov z25.b, #0x1\n"
- "mul z26.s, p2/M, z26.s, z27.s\n"
- "add x24, %x[weights], %x[ld_weight_row]\n"
- "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
- "add x23, x24, %x[ld_weight_row]\n"
- "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
- "whilelt p1.s, XZR, %x[n_channels]\n"
- "mov x21, #0x0\n"
- "mul z26.s, p2/M, z26.s, z16.s\n"
+ "mov x24, #0x0\n"
"pfalse p8.b\n"
+ "mul x20, %x[ld_weight_col], x20\n"
+ "ld1rw { z29.s }, p3/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "ld1rw { z28.s }, p3/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "ld1rw { z27.s }, p3/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "ld1rw { z26.s }, p3/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "mul z28.s, p3/M, z28.s, z29.s\n"
+ "add x22, %x[weights], %x[ld_weight_row]\n"
+ "add x21, x22, %x[ld_weight_row]\n"
+ "mul z28.s, p3/M, z28.s, z16.s\n"
"cbz %x[bias], 1f\n"
"ptrue p8.s\n"
"1:" // No bias
"2:" // Loop
- "cntp x20, p2, p1.s\n"
+ "cntp x20, p3, p2.s\n"
+ "mov z25.s, #0x0\n"
+ "and p1.b, p3/Z, p8.b, p2.b\n"
"whilelt p0.b, XZR, x20\n"
- "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
- "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
- "zip1 z20.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "ld1b { z18.b }, p0/Z, [x24]\n"
- "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x24, x22]\n"
- "zip1 z22.b, z20.b, z19.b\n"
- "zip1 z21.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "mov z20.s, #0x0\n"
- "ld1b { z18.b }, p0/Z, [x23]\n"
- "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x23, x22]\n"
- "sdot z20.s, z25.b, z22.b\n"
- "zip1 z19.b, z21.b, z19.b\n"
- "sdot z20.s, z25.b, z19.b\n"
- "zip1 z18.b, z18.b, z16.b\n"
- "zip1 z16.b, z17.b, z28.b\n"
- "and p0.b, p2/Z, p8.b, p1.b\n"
- "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
- "zip1 z16.b, z18.b, z16.b\n"
- "sdot z20.s, z25.b, z16.b\n"
- "mls z17.s, p2/M, z20.s, z27.s\n"
+ "ld1w { z24.s }, p1/Z, [%x[bias], x24, LSL #2]\n"
+ "ld1b { z19.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z18.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
"add %x[weights], %x[weights], x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add z17.s, z17.s, z26.s\n"
- "st1w { z17.s }, p2, [%x[outptr]]\n"
- "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
- "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
- "st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
+ "ld1b { z23.b }, p0/Z, [x22]\n"
+ "ld1b { z20.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
+ "ld1b { z17.b }, p0/Z, [x22, x23]\n"
+ "ld1b { z22.b }, p0/Z, [x21]\n"
+ "add x22, x22, x20\n"
+ "zip1 z19.b, z19.b, z16.b\n"
+ "zip1 z18.b, z18.b, z31.b\n"
+ "ld1b { z21.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x21, x23]\n"
+ "add x21, x21, x20\n"
+ "zip1 z20.b, z20.b, z31.b\n"
+ "zip1 z17.b, z23.b, z17.b\n"
+ "zip1 z19.b, z19.b, z18.b\n"
+ "zip1 z18.b, z22.b, z16.b\n"
+ "zip1 z16.b, z21.b, z31.b\n"
+ "zip1 z17.b, z17.b, z20.b\n"
+ "sdot z25.s, z30.b, z19.b\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "sdot z25.s, z30.b, z17.b\n"
+ "sdot z25.s, z30.b, z16.b\n"
+ "mls z24.s, p3/M, z25.s, z29.s\n"
+ "add z24.s, z24.s, z28.s\n"
+ "st1w { z24.s }, p3, [%x[outptr]]\n"
+ "st1b { z19.b }, p3, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z17.b }, p3, [%x[outptr], #2, MUL VL]\n"
+ "st1b { z16.b }, p3, [%x[outptr], #3, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #4\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [%x[rq_mul_perchannel], x24, LSL #2]\n"
+ "ld1w { z26.s }, p2/Z, [%x[rq_shift_perchannel], x24, LSL #2]\n"
"3:" // Loop: Quantisation parameters: Store
- "incw x21\n"
- "whilelt p1.s, x21, %x[n_channels]\n"
- "st1w { z24.s }, p2, [%x[outptr]]\n"
- "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "incw x24\n"
+ "st1w { z27.s }, p3, [%x[outptr]]\n"
+ "st1w { z26.s }, p3, [%x[outptr], #1, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #2\n"
+ "whilelt p2.s, x24, %x[n_channels]\n"
"b.any 2b\n"
: [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
index c3da81448b..e1b01663f6 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/interleaves/sve_u8q_3x3_dot.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,80 +52,80 @@ void interleave_sve_u8q_3x3_dot::pack_parameters(unsigned int n_channels, void *
{
__asm__ __volatile__(
"cmp %x[ld_weight_col], XZR\n"
- "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
- "mov z16.s, #0x9\n"
- "mov z28.b, #0x0\n"
"mov x20, #0x3\n"
- "ptrue p2.b\n"
- "mul x20, %x[ld_weight_col], x20\n"
- "ld1rw { z27.s }, p2/Z, [%x[qp], %[offsetof_input_offset]]\n"
- "ld1rw { z26.s }, p2/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "mov z16.s, #0x9\n"
+ "mov z31.b, #0x0\n"
+ "csel %x[ld_weight_col], %x[ld_weight_col], %x[n_channels], NE\n"
+ "ptrue p3.b\n"
+ "mov z30.b, #0x1\n"
"cmp %x[ld_weight_row], XZR\n"
- "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
- "mov z25.b, #0x1\n"
- "mul z26.s, p2/M, z26.s, z27.s\n"
- "add x24, %x[weights], %x[ld_weight_row]\n"
- "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
- "add x23, x24, %x[ld_weight_row]\n"
- "add x22, %x[ld_weight_col], %x[ld_weight_col]\n"
- "whilelt p1.s, XZR, %x[n_channels]\n"
- "mov x21, #0x0\n"
- "mul z26.s, p2/M, z26.s, z16.s\n"
+ "mov x24, #0x0\n"
"pfalse p8.b\n"
+ "mul x20, %x[ld_weight_col], x20\n"
+ "ld1rw { z29.s }, p3/Z, [%x[qp], %[offsetof_input_offset]]\n"
+ "ld1rw { z28.s }, p3/Z, [%x[qp], %[offsetof_weights_offset]]\n"
+ "add x23, %x[ld_weight_col], %x[ld_weight_col]\n"
+ "ld1rw { z27.s }, p3/Z, [%x[qp], %[offsetof_per_layer_mul]]\n"
+ "ld1rw { z26.s }, p3/Z, [%x[qp], %[offsetof_per_layer_right_shift]]\n"
+ "csel %x[ld_weight_row], %x[ld_weight_row], x20, NE\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "mul z28.s, p3/M, z28.s, z29.s\n"
+ "add x22, %x[weights], %x[ld_weight_row]\n"
+ "add x21, x22, %x[ld_weight_row]\n"
+ "mul z28.s, p3/M, z28.s, z16.s\n"
"cbz %x[bias], 1f\n"
"ptrue p8.s\n"
"1:" // No bias
"2:" // Loop
- "cntp x20, p2, p1.s\n"
+ "cntp x20, p3, p2.s\n"
+ "mov z25.s, #0x0\n"
+ "and p1.b, p3/Z, p8.b, p2.b\n"
"whilelt p0.b, XZR, x20\n"
- "ld1b { z18.b }, p0/Z, [%x[weights]]\n"
- "ld1b { z17.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [%x[weights], x22]\n"
- "zip1 z20.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "ld1b { z18.b }, p0/Z, [x24]\n"
- "ld1b { z17.b }, p0/Z, [x24, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x24, x22]\n"
- "zip1 z22.b, z20.b, z19.b\n"
- "zip1 z21.b, z18.b, z16.b\n"
- "zip1 z19.b, z17.b, z28.b\n"
- "mov z20.s, #0x0\n"
- "ld1b { z18.b }, p0/Z, [x23]\n"
- "ld1b { z17.b }, p0/Z, [x23, %x[ld_weight_col]]\n"
- "ld1b { z16.b }, p0/Z, [x23, x22]\n"
- "udot z20.s, z25.b, z22.b\n"
- "zip1 z19.b, z21.b, z19.b\n"
- "udot z20.s, z25.b, z19.b\n"
- "zip1 z18.b, z18.b, z16.b\n"
- "zip1 z16.b, z17.b, z28.b\n"
- "and p0.b, p2/Z, p8.b, p1.b\n"
- "ld1w { z17.s }, p0/Z, [%x[bias], x21, LSL #2]\n"
- "zip1 z16.b, z18.b, z16.b\n"
- "udot z20.s, z25.b, z16.b\n"
- "mls z17.s, p2/M, z20.s, z27.s\n"
+ "ld1w { z24.s }, p1/Z, [%x[bias], x24, LSL #2]\n"
+ "ld1b { z19.b }, p0/Z, [%x[weights]]\n"
+ "ld1b { z18.b }, p0/Z, [%x[weights], %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [%x[weights], x23]\n"
"add %x[weights], %x[weights], x20\n"
- "add x24, x24, x20\n"
- "add x23, x23, x20\n"
- "add z17.s, z17.s, z26.s\n"
- "st1w { z17.s }, p2, [%x[outptr]]\n"
- "st1b { z22.b }, p2, [%x[outptr], #1, MUL VL]\n"
- "st1b { z19.b }, p2, [%x[outptr], #2, MUL VL]\n"
- "st1b { z16.b }, p2, [%x[outptr], #3, MUL VL]\n"
+ "ld1b { z23.b }, p0/Z, [x22]\n"
+ "ld1b { z20.b }, p0/Z, [x22, %x[ld_weight_col]]\n"
+ "ld1b { z17.b }, p0/Z, [x22, x23]\n"
+ "ld1b { z22.b }, p0/Z, [x21]\n"
+ "add x22, x22, x20\n"
+ "zip1 z19.b, z19.b, z16.b\n"
+ "zip1 z18.b, z18.b, z31.b\n"
+ "ld1b { z21.b }, p0/Z, [x21, %x[ld_weight_col]]\n"
+ "ld1b { z16.b }, p0/Z, [x21, x23]\n"
+ "add x21, x21, x20\n"
+ "zip1 z20.b, z20.b, z31.b\n"
+ "zip1 z17.b, z23.b, z17.b\n"
+ "zip1 z19.b, z19.b, z18.b\n"
+ "zip1 z18.b, z22.b, z16.b\n"
+ "zip1 z16.b, z21.b, z31.b\n"
+ "zip1 z17.b, z17.b, z20.b\n"
+ "udot z25.s, z30.b, z19.b\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "udot z25.s, z30.b, z17.b\n"
+ "udot z25.s, z30.b, z16.b\n"
+ "mls z24.s, p3/M, z25.s, z29.s\n"
+ "add z24.s, z24.s, z28.s\n"
+ "st1w { z24.s }, p3, [%x[outptr]]\n"
+ "st1b { z19.b }, p3, [%x[outptr], #1, MUL VL]\n"
+ "st1b { z17.b }, p3, [%x[outptr], #2, MUL VL]\n"
+ "st1b { z16.b }, p3, [%x[outptr], #3, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #4\n"
"cbz %x[rq_mul_perchannel], 3f\n"
- "ld1w { z24.s }, p1/Z, [%x[rq_mul_perchannel], x21, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [%x[rq_shift_perchannel], x21, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [%x[rq_mul_perchannel], x24, LSL #2]\n"
+ "ld1w { z26.s }, p2/Z, [%x[rq_shift_perchannel], x24, LSL #2]\n"
"3:" // Loop: Quantisation parameters: Store
- "incw x21\n"
- "whilelt p1.s, x21, %x[n_channels]\n"
- "st1w { z24.s }, p2, [%x[outptr]]\n"
- "st1w { z23.s }, p2, [%x[outptr], #1, MUL VL]\n"
+ "incw x24\n"
+ "st1w { z27.s }, p3, [%x[outptr]]\n"
+ "st1w { z26.s }, p3, [%x[outptr], #1, MUL VL]\n"
"addvl %x[outptr], %x[outptr], #2\n"
+ "whilelt p2.s, x24, %x[n_channels]\n"
"b.any 2b\n"
: [ld_weight_col] "+&r" (ld_weight_col), [ld_weight_row] "+&r" (ld_weight_row), [outptr] "+&r" (outptr), [weights] "+&r" (weights)
: [bias] "r" (bias), [n_channels] "r" (n_channels), [offsetof_input_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [offsetof_weights_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [qp] "r" (&qp), [rq_mul_perchannel] "r" (qp.per_channel_muls), [rq_shift_perchannel] "r" (qp.per_channel_right_shifts)
- : "cc", "memory", "p0", "p1", "p2", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p8", "x20", "x21", "x22", "x23", "x24", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index d8ca3d7437..2ef4639e18 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,144 +87,144 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
- "mov x22, #0x0\n"
+ "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x28, #0x2\n"
"mov x27, #0x2\n"
- "mov x26, #0x2\n"
- "str x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x23, x25\n" // offset = tile_i * ld_input_row
+ "str x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x16, #0x10\n" // cntb _, ALL, #1
"ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
"ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x23, x24\n" // offset = tile_i * ld_output_row
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "madd x21, x22, x15, x21\n" // offset += tile_j * ld_input_col
- "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x15, x15, #0x1\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x22, x14, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x3\n"
- "add x11, x15, x15\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x13, x13, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x9, x13, x25, LSL #1\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x28, x9, x25, LSL #1\n"
- "add x12, x12, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "lsr x24, %x[n_channels], #0x3\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v27.8h }, [x20]\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x23, #0x0\n"
"ld1r { v26.8h }, [x20]\n"
- "add x27, x28, x25, LSL #1\n"
- "add x26, x11, x15\n"
- "add x25, x12, x24, LSL #1\n"
+ "mul x22, x10, x26\n" // offset = tile_i * ld_input_row
+ "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x21, XZR, x16\n"
+ "mul x20, x10, x25\n" // offset = tile_i * ld_output_row
+ "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x22, x9, x15, x22\n" // offset += tile_j * ld_input_col
+ "lsl x15, x15, #0x1\n"
+ "madd x20, x9, x14, x20\n" // offset += tile_j * ld_output_col
"lsl x14, x14, #0x1\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q25, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
- "add x10, x10, #0xa0\n"
- "ldr q9, [x9, x15]\n"
+ "mul x22, x22, x28\n" // offset *= kernel_stride * output_size
+ "add x10, x15, x15\n"
+ "add x9, x10, x15\n"
+ "mul x20, x20, x27\n" // offset *= output_tile_size
+ "add x13, x13, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x28, x13, x26, LSL #1\n"
+ "add x27, x28, x26, LSL #1\n"
+ "add x26, x27, x26, LSL #1\n"
+ "add x12, x12, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x25, x12, x25, LSL #1\n"
+ "cbz x24, 4f\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q0, [x11, #0x10]\n"
+ "cmp x16, x24, LSL #4\n"
+ "ldr q1, [x11, #0x20]\n"
+ "ldr q2, [x11, #0x30]\n"
+ "ldr q3, [x11, #0x40]\n"
+ "ldr q4, [x11, #0x50]\n"
+ "ldr q5, [x11, #0x60]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "ldr q7, [x11, #0x80]\n"
+ "ldr q8, [x11, #0x90]\n"
+ "add x11, x11, #0xa0\n"
+ "ldr q9, [x28, x15]\n"
"ld1 { v10.8h }, [x13]\n"
- "ldr q11, [x13, x26]\n"
- "ldr q12, [x9, x11]\n"
- "ldr q13, [x28, x15]\n"
+ "ldr q11, [x13, x9]\n"
+ "ldr q12, [x28, x10]\n"
+ "ldr q13, [x27, x15]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v24.16b, v25.16b\n fmla v24.8h, v4.8h, v9.8h\n"
"mov v23.16b, v25.16b\n fmla v23.8h, v3.8h, v9.8h\n"
- "add x23, x23, #0x10\n"
- "cmp x23, x22, LSL #4\n"
+ "add x16, x16, #0x10\n"
+ "add x21, x21, #0x10\n"
"mov v22.16b, v25.16b\n fmla v22.8h, v1.8h, v9.8h\n"
"mov v21.16b, v25.16b\n fmla v21.8h, v0.8h, v9.8h\n"
- "ld1 { v18.8h }, [x27]\n"
- "ldr q25, [x10, #0x0]\n"
+ "ld1 { v18.8h }, [x26]\n"
+ "ldr q25, [x11, #0x0]\n"
+ "cmp x16, x24, LSL #4\n"
+ "add x23, x23, #0x10\n"
"fmla v24.8h, v0.8h, v10.8h\n"
- "ldr q20, [x28, x11]\n"
+ "ldr q20, [x27, x10]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
- "ldr q17, [x27, x26]\n"
+ "ldr q17, [x26, x9]\n"
"fmla v22.8h, v2.8h, v12.8h\n"
"fmla v21.8h, v1.8h, v12.8h\n"
- "add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
"fmla v24.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"ldr q16, [x13, x15]\n"
"fmla v22.8h, v6.8h, v18.8h\n"
- "ldr q18, [x13, x11]\n"
- "fmla v21.8h, v3.8h, v13.8h\n"
+ "ldr q18, [x13, x10]\n"
"add x13, x13, #0x10\n"
+ "fmla v21.8h, v3.8h, v13.8h\n"
"fmla v24.8h, v7.8h, v13.8h\n"
"fmla v23.8h, v6.8h, v13.8h\n"
"fmla v22.8h, v4.8h, v13.8h\n"
"fmla v21.8h, v8.8h, v17.8h\n"
- "ld1 { v17.8h }, [x9]\n"
+ "ld1 { v17.8h }, [x28]\n"
"fmla v24.8h, v1.8h, v16.8h\n"
"fmla v23.8h, v0.8h, v16.8h\n"
- "ldr q16, [x9, x26]\n"
- "add x9, x9, #0x10\n"
+ "ldr q16, [x28, x9]\n"
+ "add x28, x28, #0x10\n"
"fmla v22.8h, v5.8h, v20.8h\n"
"fmla v21.8h, v4.8h, v20.8h\n"
- "ldr q4, [x10, #0x50]\n"
+ "ldr q4, [x11, #0x50]\n"
"fmla v24.8h, v2.8h, v18.8h\n"
"fmla v23.8h, v1.8h, v18.8h\n"
- "ld1 { v19.8h }, [x28]\n"
- "ldr q1, [x10, #0x20]\n"
+ "ld1 { v19.8h }, [x27]\n"
+ "ldr q1, [x11, #0x20]\n"
"fmla v22.8h, v0.8h, v17.8h\n"
- "ldr q0, [x10, #0x10]\n"
+ "ldr q0, [x11, #0x10]\n"
"fmla v21.8h, v2.8h, v16.8h\n"
- "ldr q2, [x10, #0x30]\n"
+ "ldr q2, [x11, #0x30]\n"
"fmla v24.8h, v8.8h, v20.8h\n"
"fmla v23.8h, v7.8h, v20.8h\n"
- "ldr q18, [x28, x26]\n"
- "add x28, x28, #0x10\n"
- "ldr q13, [x28, x15]\n"
+ "ldr q18, [x27, x9]\n"
+ "add x27, x27, #0x10\n"
+ "ldr q13, [x27, x15]\n"
"fmla v22.8h, v3.8h, v19.8h\n"
"fmla v21.8h, v5.8h, v18.8h\n"
"fmla v24.8h, v3.8h, v17.8h\n"
- "ldr q17, [x27, x15]\n"
- "ldr q3, [x10, #0x40]\n"
+ "ldr q17, [x26, x15]\n"
+ "ldr q3, [x11, #0x40]\n"
"fmla v23.8h, v5.8h, v16.8h\n"
- "ldr q16, [x27, x11]\n"
- "ldr q5, [x10, #0x60]\n"
+ "ldr q16, [x26, x10]\n"
+ "ldr q5, [x11, #0x60]\n"
+ "add x26, x26, #0x10\n"
"fmla v22.8h, v7.8h, v17.8h\n"
"fmla v21.8h, v6.8h, v17.8h\n"
- "ldr q11, [x13, x26]\n"
+ "ldr q11, [x13, x9]\n"
"fmla v24.8h, v6.8h, v19.8h\n"
- "ldr q9, [x9, x15]\n"
+ "ldr q9, [x28, x15]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v23.8h, v8.8h, v18.8h\n"
"ld1 { v10.8h }, [x13]\n"
- "ldr q6, [x10, #0x70]\n"
"fmla v22.8h, v8.8h, v16.8h\n"
+ "ldr q8, [x11, #0x90]\n"
"fmla v21.8h, v7.8h, v16.8h\n"
- "ldr q12, [x9, x11]\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q12, [x28, x10]\n"
+ "ldr q7, [x11, #0x80]\n"
+ "add x11, x11, #0xa0\n"
"fmax v24.8h, v24.8h, v27.8h\n"
"fmax v23.8h, v23.8h, v27.8h\n"
- "ldr q8, [x10, #0x90]\n"
"fmax v22.8h, v22.8h, v27.8h\n"
"fmax v21.8h, v21.8h, v27.8h\n"
- "add x27, x27, #0x10\n"
"fmin v24.8h, v24.8h, v26.8h\n"
"fmin v23.8h, v23.8h, v26.8h\n"
- "st1 { v24.8h }, [x12]\n"
- "add x10, x10, #0xa0\n"
"fmin v22.8h, v22.8h, v26.8h\n"
"fmin v21.8h, v21.8h, v26.8h\n"
+ "st1 { v24.8h }, [x12]\n"
"str q23, [x12, x14]\n"
"add x12, x12, #0x10\n"
"st1 { v22.8h }, [x25]\n"
@@ -236,58 +236,58 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"mov v23.16b, v25.16b\n fmla v23.8h, v3.8h, v9.8h\n"
"mov v22.16b, v25.16b\n fmla v22.8h, v1.8h, v9.8h\n"
"mov v21.16b, v25.16b\n fmla v21.8h, v0.8h, v9.8h\n"
- "ld1 { v18.8h }, [x27]\n"
+ "ld1 { v18.8h }, [x26]\n"
"fmla v24.8h, v0.8h, v10.8h\n"
- "ldr q20, [x28, x11]\n"
+ "ldr q20, [x27, x10]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
- "ldr q17, [x27, x26]\n"
+ "ldr q17, [x26, x9]\n"
"fmla v22.8h, v2.8h, v12.8h\n"
"fmla v21.8h, v1.8h, v12.8h\n"
"fmla v24.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"ldr q16, [x13, x15]\n"
"fmla v22.8h, v6.8h, v18.8h\n"
- "ldr q18, [x13, x11]\n"
- "fmla v21.8h, v3.8h, v13.8h\n"
+ "ldr q18, [x13, x10]\n"
"add x13, x13, #0x10\n"
+ "fmla v21.8h, v3.8h, v13.8h\n"
"fmla v24.8h, v7.8h, v13.8h\n"
"fmla v23.8h, v6.8h, v13.8h\n"
"fmla v22.8h, v4.8h, v13.8h\n"
"fmla v21.8h, v8.8h, v17.8h\n"
- "ld1 { v17.8h }, [x9]\n"
+ "ld1 { v17.8h }, [x28]\n"
"fmla v24.8h, v1.8h, v16.8h\n"
"fmla v23.8h, v0.8h, v16.8h\n"
- "ldr q16, [x9, x26]\n"
- "add x9, x9, #0x10\n"
+ "ldr q16, [x28, x9]\n"
+ "add x28, x28, #0x10\n"
"fmla v22.8h, v5.8h, v20.8h\n"
"fmla v21.8h, v4.8h, v20.8h\n"
"fmla v24.8h, v2.8h, v18.8h\n"
"fmla v23.8h, v1.8h, v18.8h\n"
- "ld1 { v19.8h }, [x28]\n"
+ "ld1 { v19.8h }, [x27]\n"
"fmla v22.8h, v0.8h, v17.8h\n"
"fmla v21.8h, v2.8h, v16.8h\n"
"fmla v24.8h, v8.8h, v20.8h\n"
"fmla v23.8h, v7.8h, v20.8h\n"
- "ldr q18, [x28, x26]\n"
- "add x28, x28, #0x10\n"
+ "ldr q18, [x27, x9]\n"
+ "add x27, x27, #0x10\n"
"fmla v22.8h, v3.8h, v19.8h\n"
"fmla v21.8h, v5.8h, v18.8h\n"
"fmla v24.8h, v3.8h, v17.8h\n"
- "ldr q17, [x27, x15]\n"
+ "ldr q17, [x26, x15]\n"
"fmla v23.8h, v5.8h, v16.8h\n"
- "ldr q16, [x27, x11]\n"
+ "ldr q16, [x26, x10]\n"
+ "add x26, x26, #0x10\n"
"fmla v22.8h, v7.8h, v17.8h\n"
"fmla v21.8h, v6.8h, v17.8h\n"
- "add x27, x27, #0x10\n"
"fmla v24.8h, v6.8h, v19.8h\n"
"fmla v23.8h, v8.8h, v18.8h\n"
- "fmax v24.8h, v24.8h, v27.8h\n"
"fmla v22.8h, v8.8h, v16.8h\n"
"fmla v21.8h, v7.8h, v16.8h\n"
+ "fmax v24.8h, v24.8h, v27.8h\n"
"fmax v23.8h, v23.8h, v27.8h\n"
+ "fmin v24.8h, v24.8h, v26.8h\n"
"fmax v22.8h, v22.8h, v27.8h\n"
"fmax v21.8h, v21.8h, v27.8h\n"
- "fmin v24.8h, v24.8h, v26.8h\n"
"fmin v23.8h, v23.8h, v26.8h\n"
"st1 { v24.8h }, [x12]\n"
"fmin v22.8h, v22.8h, v26.8h\n"
@@ -300,21 +300,21 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 57f\n"
- "ldr q25, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "add x24, x9, x15\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q0, [x11, #0x10]\n"
+ "add x24, x28, x15\n"
"add x23, x13, XZR\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "add x22, x13, x26\n"
- "add x21, x9, x11\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "add x20, x28, x15\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
+ "ldr q1, [x11, #0x20]\n"
+ "ldr q2, [x11, #0x30]\n"
+ "add x22, x13, x9\n"
+ "add x21, x28, x10\n"
+ "ldr q3, [x11, #0x40]\n"
+ "ldr q4, [x11, #0x50]\n"
+ "add x20, x27, x15\n"
+ "ldr q5, [x11, #0x60]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "ldr q7, [x11, #0x80]\n"
+ "ldr q8, [x11, #0x90]\n"
"tbz %x[n_channels], #2, 6f\n"
"ldr d9, [x24], #0x8\n"
"ldr d10, [x23], #0x8\n"
@@ -365,15 +365,15 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"8:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: End
"mov v28.16b, v25.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v25.16b\n fmla v29.8h, v3.8h, v9.8h\n"
- "add x20, x27, XZR\n"
+ "add x20, x26, XZR\n"
"mov v30.16b, v25.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v25.16b\n fmla v31.8h, v0.8h, v9.8h\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "fmla v28.8h, v5.8h, v12.8h\n"
- "fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
+ "fmla v28.8h, v5.8h, v12.8h\n"
+ "fmla v29.8h, v4.8h, v12.8h\n"
"tbz %x[n_channels], #2, 10f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
@@ -396,10 +396,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"12:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v30.8h, v6.8h, v9.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x20, x27, x26\n"
+ "add x20, x26, x9\n"
"fmla v29.8h, v6.8h, v13.8h\n"
- "fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v3.8h, v13.8h\n"
+ "fmla v30.8h, v4.8h, v13.8h\n"
"tbz %x[n_channels], #2, 14f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
@@ -444,7 +444,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"20:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: End
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "add x20, x13, x11\n"
+ "add x20, x13, x10\n"
"tbz %x[n_channels], #2, 22f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
@@ -467,7 +467,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"24:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: End
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "add x20, x28, x11\n"
+ "add x20, x27, x10\n"
"tbz %x[n_channels], #2, 26f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
@@ -490,7 +490,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"28:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: End
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x9, XZR\n"
+ "add x20, x28, XZR\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
"tbz %x[n_channels], #2, 30f\n"
@@ -515,7 +515,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"32:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: End
"fmla v28.8h, v3.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x9, x26\n"
+ "add x20, x28, x9\n"
"tbz %x[n_channels], #2, 34f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
@@ -538,7 +538,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"36:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v29.8h, v5.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v12.8h\n"
- "add x20, x28, XZR\n"
+ "add x20, x27, XZR\n"
"tbz %x[n_channels], #2, 38f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
@@ -561,7 +561,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"40:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v28.8h, v6.8h, v9.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x20, x28, x26\n"
+ "add x20, x27, x9\n"
"tbz %x[n_channels], #2, 42f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
@@ -584,7 +584,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"44:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
"fmla v29.8h, v8.8h, v10.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
- "add x20, x27, x15\n"
+ "add x20, x26, x15\n"
"tbz %x[n_channels], #2, 46f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
@@ -607,7 +607,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"48:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
- "add x20, x27, x11\n"
+ "add x20, x26, x10\n"
"tbz %x[n_channels], #2, 50f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
@@ -632,28 +632,28 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"fmla v31.8h, v7.8h, v12.8h\n"
"fmax v28.8h, v28.8h, v27.8h\n"
"fmax v29.8h, v29.8h, v27.8h\n"
+ "fmin v28.8h, v28.8h, v26.8h\n"
"fmax v30.8h, v30.8h, v27.8h\n"
"fmax v31.8h, v31.8h, v27.8h\n"
- "fmin v28.8h, v28.8h, v26.8h\n"
"fmin v29.8h, v29.8h, v26.8h\n"
"fmin v30.8h, v30.8h, v26.8h\n"
"fmin v31.8h, v31.8h, v26.8h\n"
"tbz %x[n_channels], #2, 54f\n"
"mov x21, x12\n"
"mov x20, x25\n"
- "st1 { v28.d }[0], [x21], x14\n"
- "st1 { v30.d }[0], [x20], x14\n"
"add x12, x12, #0x8\n"
"add x25, x25, #0x8\n"
+ "st1 { v28.d }[0], [x21], x14\n"
+ "st1 { v30.d }[0], [x20], x14\n"
"st1 { v29.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #1, 53f\n"
"mov x21, x12\n"
"mov x20, x25\n"
- "st1 { v28.s }[2], [x21], x14\n"
- "st1 { v30.s }[2], [x20], x14\n"
"add x12, x12, #0x4\n"
"add x25, x25, #0x4\n"
+ "st1 { v28.s }[2], [x21], x14\n"
+ "st1 { v30.s }[2], [x20], x14\n"
"st1 { v29.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"tbz %x[n_channels], #0, 56f\n"
@@ -677,10 +677,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"tbz %x[n_channels], #1, 55f\n"
"mov x21, x12\n"
"mov x20, x25\n"
- "st1 { v28.s }[0], [x21], x14\n"
- "st1 { v30.s }[0], [x20], x14\n"
"add x12, x12, #0x4\n"
"add x25, x25, #0x4\n"
+ "st1 { v28.s }[0], [x21], x14\n"
+ "st1 { v30.s }[0], [x20], x14\n"
"st1 { v29.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"tbz %x[n_channels], #0, 56f\n"
@@ -700,20 +700,20 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"st1 { v31.h }[0], [x20]\n"
"56:" // Tile loop: Oddments: Store: Bit 2: End
"57:" // Tile loop: End
- "ldr x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x22, x22, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x22, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
- "csel x22, x22, XZR, LT\n"
- "cmp x23, x20\n"
+ "ldr x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x9, x9, #0x1\n"
+ "add x20, x10, #0x1\n"
+ "cmp x9, x22\n"
+ "csel x10, x10, x20, LT\n"
+ "csel x9, x9, XZR, LT\n"
+ "cmp x10, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index c9a554e9ad..90da1a803e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -78,237 +78,237 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x16, #0x10\n" // cntb _, ALL, #1
- "lsr x15, %x[n_channels], #0x3\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v27.8h }, [x20]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "mov x17, #0x10\n" // cntb _, ALL, #1
+ "lsr x16, %x[n_channels], #0x3\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v27.8h }, [x21]\n"
"ld1r { v26.8h }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "mov x28, #0x0\n"
- "sub x27, XZR, x16\n"
- "cbz x15, 3f\n"
- "ldr q25, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "cmp x16, x15, LSL #4\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "add x14, x14, #0xa0\n"
- "ldp x21, x20, [x13, #0x0]\n"
- "ldr q9, [x21, x28]\n"
- "ldr q10, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "ldr q11, [x21, x28]\n"
- "ldr q12, [x20, x28]\n"
- "ldr x20, [x13, #0x20]\n"
- "ldr q13, [x20, x28]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x22, #0x0]\n"
+ "ldp x10, x9, [x22, #0x10]\n"
+ "sub x28, XZR, x17\n"
+ "cbz x16, 3f\n"
+ "ldr q25, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "cmp x17, x16, LSL #4\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "add x15, x15, #0xa0\n"
+ "ldp x24, x23, [x14, #0x0]\n"
+ "ldp x22, x21, [x14, #0x10]\n"
+ "ldr x20, [x14, #0x20]\n"
+ "ldr q9, [x24, x13]\n"
+ "ldr q10, [x23, x13]\n"
+ "ldr q11, [x22, x13]\n"
+ "ldr q12, [x21, x13]\n"
+ "ldr q13, [x20, x13]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v24.16b, v25.16b\n fmla v24.8h, v4.8h, v9.8h\n"
"mov v23.16b, v25.16b\n fmla v23.8h, v3.8h, v9.8h\n"
- "ldr x21, [x13, #0x28]\n"
- "ldr x20, [x13, #0x30]\n"
+ "ldr x22, [x14, #0x28]\n"
+ "ldr x21, [x14, #0x30]\n"
"mov v22.16b, v25.16b\n fmla v22.8h, v1.8h, v9.8h\n"
"mov v21.16b, v25.16b\n fmla v21.8h, v0.8h, v9.8h\n"
- "ldr q18, [x21, x28]\n"
- "ldr q25, [x14, #0x0]\n"
+ "ldr q25, [x15, #0x0]\n"
+ "ldr x24, [x14, #0x38]\n"
+ "ldr x20, [x14, #0x48]\n"
+ "ldr x23, [x14, #0x40]\n"
+ "add x28, x28, #0x10\n"
+ "ldr q18, [x22, x13]\n"
+ "ldr x22, [x14, #0x50]\n"
"fmla v24.8h, v0.8h, v10.8h\n"
"fmla v23.8h, v2.8h, v11.8h\n"
- "ldr q17, [x20, x28]\n"
- "ldr x21, [x13, #0x38]\n"
+ "ldr q17, [x21, x13]\n"
+ "ldr x21, [x14, #0x58]\n"
+ "ldr q20, [x20, x13]\n"
"fmla v22.8h, v2.8h, v12.8h\n"
"fmla v21.8h, v1.8h, v12.8h\n"
- "ldr x20, [x13, #0x48]\n"
- "ldr q20, [x20, x28]\n"
+ "ldr x20, [x14, #0x60]\n"
+ "ldr x27, [x14, #0x68]\n"
+ "ldr x26, [x14, #0x70]\n"
"fmla v24.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
- "ldr q16, [x21, x28]\n"
- "ldr x20, [x13, #0x40]\n"
+ "ldr q16, [x24, x13]\n"
+ "ldr x25, [x14, #0x78]\n"
"fmla v22.8h, v6.8h, v18.8h\n"
- "ldr q18, [x20, x28]\n"
+ "ldr q18, [x23, x13]\n"
+ "ldp x24, x23, [x14, #0x0]\n"
"fmla v21.8h, v3.8h, v13.8h\n"
- "ldr x20, [x13, #0x50]\n"
"fmla v24.8h, v7.8h, v13.8h\n"
"fmla v23.8h, v6.8h, v13.8h\n"
- "ldr x22, [x13, #0x58]\n"
- "ldr x21, [x13, #0x60]\n"
"fmla v22.8h, v4.8h, v13.8h\n"
"fmla v21.8h, v8.8h, v17.8h\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0x68]\n"
+ "ldr q17, [x22, x13]\n"
"fmla v24.8h, v1.8h, v16.8h\n"
"fmla v23.8h, v0.8h, v16.8h\n"
- "ldr q16, [x22, x28]\n"
- "ldr x26, [x13, #0x70]\n"
+ "ldr q16, [x21, x13]\n"
+ "ldp x22, x21, [x14, #0x10]\n"
"fmla v22.8h, v5.8h, v20.8h\n"
"fmla v21.8h, v4.8h, v20.8h\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr x25, [x13, #0x78]\n"
+ "ldr q4, [x15, #0x50]\n"
"fmla v24.8h, v2.8h, v18.8h\n"
"fmla v23.8h, v1.8h, v18.8h\n"
- "ldr q19, [x21, x28]\n"
- "ldr q1, [x14, #0x20]\n"
+ "ldr q19, [x20, x13]\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr x20, [x14, #0x20]\n"
"fmla v22.8h, v0.8h, v17.8h\n"
- "ldr q0, [x14, #0x10]\n"
+ "ldr q0, [x15, #0x10]\n"
"fmla v21.8h, v2.8h, v16.8h\n"
- "ldr q2, [x14, #0x30]\n"
+ "ldr q2, [x15, #0x30]\n"
"fmla v24.8h, v8.8h, v20.8h\n"
+ "ldr q13, [x20, x17]\n"
"fmla v23.8h, v7.8h, v20.8h\n"
- "ldr q18, [x20, x28]\n"
- "ldp x24, x23, [x13, #0x0]\n"
+ "ldr q18, [x27, x13]\n"
"fmla v22.8h, v3.8h, v19.8h\n"
"fmla v21.8h, v5.8h, v18.8h\n"
- "ldp x22, x21, [x13, #0x10]\n"
- "ldr x20, [x13, #0x20]\n"
- "ldr q13, [x20, x16]\n"
"fmla v24.8h, v3.8h, v17.8h\n"
- "ldr q17, [x26, x28]\n"
+ "ldr q17, [x26, x13]\n"
+ "ldr q3, [x15, #0x40]\n"
"fmla v23.8h, v5.8h, v16.8h\n"
- "ldr q16, [x25, x28]\n"
- "ldr q3, [x14, #0x40]\n"
+ "ldr q16, [x25, x13]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "add x13, x13, #0x10\n"
"fmla v22.8h, v7.8h, v17.8h\n"
"fmla v21.8h, v6.8h, v17.8h\n"
- "ldr q11, [x22, x16]\n"
- "ldr q5, [x14, #0x60]\n"
+ "ldr q11, [x22, x17]\n"
"fmla v24.8h, v6.8h, v19.8h\n"
+ "ldr q9, [x24, x17]\n"
+ "ldr q6, [x15, #0x70]\n"
"fmla v23.8h, v8.8h, v18.8h\n"
- "ldr q9, [x24, x16]\n"
- "ldr q10, [x23, x16]\n"
+ "ldr q10, [x23, x17]\n"
"fmla v22.8h, v8.8h, v16.8h\n"
+ "ldr q8, [x15, #0x90]\n"
"fmla v21.8h, v7.8h, v16.8h\n"
- "ldr q12, [x21, x16]\n"
- "ldr q6, [x14, #0x70]\n"
+ "ldr q12, [x21, x17]\n"
+ "add x17, x17, #0x10\n"
+ "ldr q7, [x15, #0x80]\n"
+ "cmp x17, x16, LSL #4\n"
+ "add x15, x15, #0xa0\n"
"fmax v24.8h, v24.8h, v27.8h\n"
"fmax v23.8h, v23.8h, v27.8h\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
"fmax v22.8h, v22.8h, v27.8h\n"
"fmax v21.8h, v21.8h, v27.8h\n"
- "add x16, x16, #0x10\n"
- "add x27, x27, #0x10\n"
"fmin v24.8h, v24.8h, v26.8h\n"
"fmin v23.8h, v23.8h, v26.8h\n"
- "cmp x16, x15, LSL #4\n"
"fmin v22.8h, v22.8h, v26.8h\n"
"fmin v21.8h, v21.8h, v26.8h\n"
- "add x28, x28, #0x10\n"
- "str q24, [x12, x27]\n"
- "add x14, x14, #0xa0\n"
- "str q23, [x11, x27]\n"
- "str q22, [x10, x27]\n"
- "str q21, [x9, x27]\n"
+ "str q24, [x12, x28]\n"
+ "str q23, [x11, x28]\n"
+ "str q22, [x10, x28]\n"
+ "str q21, [x9, x28]\n"
"blt 1b\n"
"2:" // Channel tail
"mov v24.16b, v25.16b\n fmla v24.8h, v4.8h, v9.8h\n"
"mov v23.16b, v25.16b\n fmla v23.8h, v3.8h, v9.8h\n"
- "ldr x21, [x13, #0x28]\n"
- "ldr x20, [x13, #0x30]\n"
+ "ldr x22, [x14, #0x28]\n"
+ "ldr x21, [x14, #0x30]\n"
"mov v22.16b, v25.16b\n fmla v22.8h, v1.8h, v9.8h\n"
"mov v21.16b, v25.16b\n fmla v21.8h, v0.8h, v9.8h\n"
- "ldr q18, [x21, x28]\n"
- "ldr x21, [x13, #0x38]\n"
+ "ldr x27, [x14, #0x38]\n"
+ "ldr x20, [x14, #0x48]\n"
+ "ldr x26, [x14, #0x40]\n"
+ "ldr x25, [x14, #0x50]\n"
+ "add x28, x28, #0x10\n"
+ "ldr q18, [x22, x13]\n"
+ "ldr x24, [x14, #0x58]\n"
"fmla v24.8h, v0.8h, v10.8h\n"
+ "ldr q20, [x20, x13]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0x48]\n"
- "ldr q20, [x20, x28]\n"
+ "ldr q17, [x21, x13]\n"
"fmla v22.8h, v2.8h, v12.8h\n"
"fmla v21.8h, v1.8h, v12.8h\n"
- "ldr x20, [x13, #0x40]\n"
+ "ldr x23, [x14, #0x60]\n"
+ "ldr x22, [x14, #0x68]\n"
+ "ldr x21, [x14, #0x70]\n"
"fmla v24.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
- "ldr q16, [x21, x28]\n"
- "ldr x21, [x13, #0x50]\n"
+ "ldr q16, [x27, x13]\n"
+ "ldr x20, [x14, #0x78]\n"
"fmla v22.8h, v6.8h, v18.8h\n"
- "ldr q18, [x20, x28]\n"
+ "ldr q18, [x26, x13]\n"
"fmla v21.8h, v3.8h, v13.8h\n"
- "ldr x20, [x13, #0x58]\n"
"fmla v24.8h, v7.8h, v13.8h\n"
"fmla v23.8h, v6.8h, v13.8h\n"
- "ldr x23, [x13, #0x60]\n"
- "ldr x22, [x13, #0x68]\n"
"fmla v22.8h, v4.8h, v13.8h\n"
"fmla v21.8h, v8.8h, v17.8h\n"
- "ldr q17, [x21, x28]\n"
- "ldr x21, [x13, #0x70]\n"
+ "ldr q17, [x25, x13]\n"
"fmla v24.8h, v1.8h, v16.8h\n"
"fmla v23.8h, v0.8h, v16.8h\n"
- "ldr q16, [x20, x28]\n"
- "ldr x20, [x13, #0x78]\n"
+ "ldr q16, [x24, x13]\n"
"fmla v22.8h, v5.8h, v20.8h\n"
"fmla v21.8h, v4.8h, v20.8h\n"
- "add x27, x27, #0x10\n"
"fmla v24.8h, v2.8h, v18.8h\n"
"fmla v23.8h, v1.8h, v18.8h\n"
- "ldr q19, [x23, x28]\n"
+ "ldr q19, [x23, x13]\n"
"fmla v22.8h, v0.8h, v17.8h\n"
"fmla v21.8h, v2.8h, v16.8h\n"
"fmla v24.8h, v8.8h, v20.8h\n"
"fmla v23.8h, v7.8h, v20.8h\n"
- "ldr q18, [x22, x28]\n"
+ "ldr q18, [x22, x13]\n"
"fmla v22.8h, v3.8h, v19.8h\n"
"fmla v21.8h, v5.8h, v18.8h\n"
"fmla v24.8h, v3.8h, v17.8h\n"
- "ldr q17, [x21, x28]\n"
+ "ldr q17, [x21, x13]\n"
"fmla v23.8h, v5.8h, v16.8h\n"
- "ldr q16, [x20, x28]\n"
+ "ldr q16, [x20, x13]\n"
+ "add x13, x13, #0x10\n"
"fmla v22.8h, v7.8h, v17.8h\n"
"fmla v21.8h, v6.8h, v17.8h\n"
- "add x28, x28, #0x10\n"
"fmla v24.8h, v6.8h, v19.8h\n"
"fmla v23.8h, v8.8h, v18.8h\n"
- "fmax v24.8h, v24.8h, v27.8h\n"
"fmla v22.8h, v8.8h, v16.8h\n"
"fmla v21.8h, v7.8h, v16.8h\n"
+ "fmax v24.8h, v24.8h, v27.8h\n"
"fmax v23.8h, v23.8h, v27.8h\n"
+ "fmin v24.8h, v24.8h, v26.8h\n"
"fmax v22.8h, v22.8h, v27.8h\n"
"fmax v21.8h, v21.8h, v27.8h\n"
- "fmin v24.8h, v24.8h, v26.8h\n"
"fmin v23.8h, v23.8h, v26.8h\n"
- "str q24, [x12, x27]\n"
+ "str q24, [x12, x28]\n"
"fmin v22.8h, v22.8h, v26.8h\n"
"fmin v21.8h, v21.8h, v26.8h\n"
- "str q23, [x11, x27]\n"
- "str q22, [x10, x27]\n"
- "str q21, [x9, x27]\n"
+ "str q23, [x11, x28]\n"
+ "str q22, [x10, x28]\n"
+ "str q21, [x9, x28]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 56f\n"
- "ldr q25, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "mov x20, x28\n"
+ "ldr q25, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "mov x20, x13\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
"add x12, x12, x20\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
"add x11, x11, x20\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
"add x10, x10, x20\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
"add x9, x9, x20\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "ldr x24, [x13, #0x0]\n"
- "ldr x23, [x13, #0x8]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- "ldr x22, [x13, #0x10]\n"
- "ldr x21, [x13, #0x18]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- "ldr x20, [x13, #0x20]\n"
- "add x20, x20, x28\n"
+ "ldr x24, [x14, #0x0]\n"
+ "ldr x23, [x14, #0x8]\n"
+ "ldr x22, [x14, #0x10]\n"
+ "ldr x21, [x14, #0x18]\n"
+ "ldr x20, [x14, #0x20]\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
+ "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 5f\n"
"ld1 { v9.d }[0], [x24], #0x8\n"
"ld1 { v10.d }[0], [x23], #0x8\n"
@@ -359,16 +359,16 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"7:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 2: End
"mov v28.16b, v25.16b\n fmla v28.8h, v4.8h, v9.8h\n"
"mov v29.16b, v25.16b\n fmla v29.8h, v3.8h, v9.8h\n"
- "ldr x20, [x13, #0x28]\n"
- "add x20, x20, x28\n"
+ "ldr x20, [x14, #0x28]\n"
"mov v30.16b, v25.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"mov v31.16b, v25.16b\n fmla v31.8h, v0.8h, v9.8h\n"
+ "add x20, x20, x13\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v2.8h, v11.8h\n"
- "fmla v28.8h, v5.8h, v12.8h\n"
- "fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v2.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
+ "fmla v28.8h, v5.8h, v12.8h\n"
+ "fmla v29.8h, v4.8h, v12.8h\n"
"tbz %x[n_channels], #2, 9f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
@@ -390,12 +390,12 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"ld1 { v9.h }[0], [x20], #0x2\n"
"11:" // Oddments: Load input (3, 0): Bit 2: End
"fmla v30.8h, v6.8h, v9.8h\n"
- "ldr x20, [x13, #0x30]\n"
+ "ldr x20, [x14, #0x30]\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x20, x20, x28\n"
"fmla v29.8h, v6.8h, v13.8h\n"
- "fmla v30.8h, v4.8h, v13.8h\n"
"fmla v31.8h, v3.8h, v13.8h\n"
+ "add x20, x20, x13\n"
+ "fmla v30.8h, v4.8h, v13.8h\n"
"tbz %x[n_channels], #2, 13f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 12f\n"
@@ -416,9 +416,9 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"14:" // Oddments: Load input (3, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"15:" // Oddments: Load input (3, 3): Bit 2: End
- "ldr x20, [x13, #0x38]\n"
+ "ldr x20, [x14, #0x38]\n"
"fmla v31.8h, v8.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 17f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
@@ -439,10 +439,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"18:" // Oddments: Load input (0, 1): Bit 2: Unset: Bit 1: Unset
"ld1 { v12.h }[0], [x20], #0x2\n"
"19:" // Oddments: Load input (0, 1): Bit 2: End
- "ldr x20, [x13, #0x40]\n"
+ "ldr x20, [x14, #0x40]\n"
"fmla v28.8h, v1.8h, v12.8h\n"
"fmla v29.8h, v0.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 21f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
@@ -463,10 +463,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"22:" // Oddments: Load input (0, 2): Bit 2: Unset: Bit 1: Unset
"ld1 { v9.h }[0], [x20], #0x2\n"
"23:" // Oddments: Load input (0, 2): Bit 2: End
- "ldr x20, [x13, #0x48]\n"
+ "ldr x20, [x14, #0x48]\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 25f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
@@ -487,12 +487,12 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"26:" // Oddments: Load input (2, 2): Bit 2: Unset: Bit 1: Unset
"ld1 { v10.h }[0], [x20], #0x2\n"
"27:" // Oddments: Load input (2, 2): Bit 2: End
- "ldr x20, [x13, #0x50]\n"
+ "ldr x20, [x14, #0x50]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x20, x28\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 29f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
@@ -513,10 +513,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"30:" // Oddments: Load input (1, 0): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"31:" // Oddments: Load input (1, 0): Bit 2: End
- "ldr x20, [x13, #0x58]\n"
+ "ldr x20, [x14, #0x58]\n"
"fmla v28.8h, v3.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 33f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 32f\n"
@@ -537,10 +537,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"34:" // Oddments: Load input (1, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v12.h }[0], [x20], #0x2\n"
"35:" // Oddments: Load input (1, 3): Bit 2: End
- "ldr x20, [x13, #0x60]\n"
+ "ldr x20, [x14, #0x60]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
"fmla v31.8h, v2.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 37f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
@@ -561,10 +561,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"38:" // Oddments: Load input (2, 0): Bit 2: Unset: Bit 1: Unset
"ld1 { v9.h }[0], [x20], #0x2\n"
"39:" // Oddments: Load input (2, 0): Bit 2: End
- "ldr x20, [x13, #0x68]\n"
+ "ldr x20, [x14, #0x68]\n"
"fmla v28.8h, v6.8h, v9.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 41f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
@@ -585,10 +585,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"42:" // Oddments: Load input (2, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v10.h }[0], [x20], #0x2\n"
"43:" // Oddments: Load input (2, 3): Bit 2: End
- "ldr x20, [x13, #0x70]\n"
+ "ldr x20, [x14, #0x70]\n"
"fmla v29.8h, v8.8h, v10.8h\n"
"fmla v31.8h, v5.8h, v10.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 45f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
@@ -609,10 +609,10 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"46:" // Oddments: Load input (3, 1): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"47:" // Oddments: Load input (3, 1): Bit 2: End
- "ldr x20, [x13, #0x78]\n"
+ "ldr x20, [x14, #0x78]\n"
"fmla v30.8h, v7.8h, v11.8h\n"
"fmla v31.8h, v6.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #2, 49f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
@@ -637,9 +637,9 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v31.8h, v7.8h, v12.8h\n"
"fmax v28.8h, v28.8h, v27.8h\n"
"fmax v29.8h, v29.8h, v27.8h\n"
+ "fmin v28.8h, v28.8h, v26.8h\n"
"fmax v30.8h, v30.8h, v27.8h\n"
"fmax v31.8h, v31.8h, v27.8h\n"
- "fmin v28.8h, v28.8h, v26.8h\n"
"fmin v29.8h, v29.8h, v26.8h\n"
"fmin v30.8h, v30.8h, v26.8h\n"
"fmin v31.8h, v31.8h, v26.8h\n"
@@ -687,7 +687,7 @@ void a64_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"56:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index 4e64a2bf2b..778a95072a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,52 +87,52 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x24, #0x0\n"
- "mov x23, #0x0\n"
+ "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"1:" // Tile loop
- "str x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x28, #0x3\n"
"mov x27, #0x3\n"
- "mov x26, #0x3\n"
- "str x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x24, x25\n" // offset = tile_i * ld_input_row
- "ldr x8, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x24, x22\n" // offset = tile_i * ld_output_row
- "mov x24, #0x10\n" // cntb _, ALL, #1
- "madd x21, x23, x8, x21\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x8, x8, #0x1\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x23, x17, x20\n" // offset += tile_j * ld_output_col
- "lsl x17, x17, #0x1\n"
- "lsr x23, %x[n_channels], #0x3\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x16, x16, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x13, x16, x25, LSL #1\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x12, x13, x25, LSL #1\n"
- "add x11, x8, x8\n"
- "add x15, x15, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "add x10, x12, x25, LSL #1\n"
- "add x9, x11, x8\n"
- "add x28, x15, x22, LSL #1\n"
+ "str x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x6, #0x10\n" // cntb _, ALL, #1
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "lsr x17, %x[n_channels], #0x3\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v15.8h }, [x20]\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x24, #0x0\n"
"ld1r { v14.8h }, [x20]\n"
- "add x27, x10, x25, LSL #1\n"
- "add x26, x9, x8\n"
- "add x25, x28, x22, LSL #1\n"
- "add x22, x17, x17\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x24\n"
- "cbz x23, 4f\n"
+ "mul x23, x10, x26\n" // offset = tile_i * ld_input_row
+ "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x22, XZR, x6\n"
+ "mul x21, x10, x25\n" // offset = tile_i * ld_output_row
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x23, x9, x7, x23\n" // offset += tile_j * ld_input_col
+ "lsl x7, x7, #0x1\n"
+ "madd x21, x9, x8, x21\n" // offset += tile_j * ld_output_col
+ "lsl x8, x8, #0x1\n"
+ "mul x23, x23, x28\n" // offset *= kernel_stride * output_size
+ "add x13, x7, x7\n"
+ "add x12, x13, x7\n"
+ "add x11, x12, x7\n"
+ "mul x21, x21, x27\n" // offset *= output_tile_size
+ "add x20, x8, x8\n"
+ "add x16, x16, x23, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x10, x16, x26, LSL #1\n"
+ "add x9, x10, x26, LSL #1\n"
+ "add x15, x15, x21, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x28, x9, x26, LSL #1\n"
+ "add x27, x15, x25, LSL #1\n"
+ "add x26, x28, x26, LSL #1\n"
+ "add x25, x27, x25, LSL #1\n"
+ "cbz x17, 4f\n"
"ldr q31, [x14, #0x0]\n"
"ldr q0, [x14, #0x10]\n"
- "cmp x24, x23, LSL #4\n"
+ "cmp x6, x17, LSL #4\n"
"ldr q1, [x14, #0x20]\n"
"ldr q2, [x14, #0x30]\n"
"ldr q3, [x14, #0x40]\n"
@@ -142,321 +142,321 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"ldr q7, [x14, #0x80]\n"
"ldr q8, [x14, #0x90]\n"
"add x14, x14, #0xa0\n"
- "ldr q9, [x12, x11]\n"
+ "ldr q9, [x9, x13]\n"
"ld1 { v10.8h }, [x16]\n"
- "ldr q11, [x16, x26]\n"
- "ld1 { v12.8h }, [x27]\n"
- "ldr q13, [x13, x11]\n"
+ "ldr q11, [x16, x11]\n"
+ "ld1 { v12.8h }, [x26]\n"
+ "ldr q13, [x10, x13]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v29.16b, v31.16b\n fmla v29.8h, v7.8h, v9.8h\n"
- "mov v28.16b, v31.16b\n fmla v28.8h, v8.8h, v9.8h\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v7.8h, v9.8h\n"
+ "mov v29.16b, v31.16b\n fmla v29.8h, v8.8h, v9.8h\n"
+ "add x6, x6, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "mov v28.16b, v31.16b\n fmla v28.8h, v6.8h, v9.8h\n"
+ "mov v27.16b, v31.16b\n fmla v27.8h, v5.8h, v9.8h\n"
+ "cmp x6, x17, LSL #4\n"
"add x24, x24, #0x10\n"
- "cmp x24, x23, LSL #4\n"
- "mov v27.16b, v31.16b\n fmla v27.8h, v6.8h, v9.8h\n"
- "fmla v29.8h, v4.8h, v13.8h\n"
- "add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
- "mov v26.16b, v31.16b\n fmla v26.8h, v5.8h, v9.8h\n"
- "mov v25.16b, v31.16b\n fmla v25.8h, v4.8h, v9.8h\n"
- "mov v24.16b, v31.16b\n fmla v24.8h, v3.8h, v9.8h\n"
- "fmla v28.8h, v0.8h, v10.8h\n"
- "ldr q23, [x12, x9]\n"
- "fmla v27.8h, v2.8h, v11.8h\n"
- "ldr q18, [x12, x8]\n"
- "mov v22.16b, v31.16b\n fmla v22.8h, v2.8h, v9.8h\n"
- "fmla v29.8h, v6.8h, v18.8h\n"
- "mov v21.16b, v31.16b\n fmla v21.8h, v0.8h, v9.8h\n"
- "fmla v28.8h, v5.8h, v13.8h\n"
- "fmla v27.8h, v3.8h, v13.8h\n"
- "fmla v26.8h, v2.8h, v13.8h\n"
- "fmla v25.8h, v1.8h, v13.8h\n"
- "fmla v24.8h, v0.8h, v13.8h\n"
- "ldr q17, [x16, x8]\n"
- "fmla v22.8h, v6.8h, v12.8h\n"
- "ldr q16, [x27, x26]\n"
- "mov v20.16b, v31.16b\n fmla v20.8h, v1.8h, v9.8h\n"
+ "mov v26.16b, v31.16b\n fmla v26.8h, v4.8h, v9.8h\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v3.8h, v9.8h\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v2.8h, v9.8h\n"
+ "mov v23.16b, v31.16b\n fmla v23.8h, v0.8h, v9.8h\n"
+ "fmla v30.8h, v4.8h, v13.8h\n"
+ "fmla v29.8h, v0.8h, v10.8h\n"
+ "ldr q22, [x9, x12]\n"
+ "fmla v28.8h, v2.8h, v11.8h\n"
+ "ldr q17, [x9, x7]\n"
+ "fmla v27.8h, v2.8h, v13.8h\n"
+ "fmla v26.8h, v1.8h, v13.8h\n"
+ "fmla v25.8h, v0.8h, v13.8h\n"
+ "fmla v24.8h, v6.8h, v12.8h\n"
+ "ldr q16, [x26, x11]\n"
+ "mov v21.16b, v31.16b\n fmla v21.8h, v1.8h, v9.8h\n"
"ldr q31, [x14, #0x0]\n"
- "fmla v29.8h, v0.8h, v17.8h\n"
- "fmla v21.8h, v8.8h, v16.8h\n"
- "ldr q16, [x16, x9]\n"
- "fmla v28.8h, v7.8h, v18.8h\n"
- "fmla v20.8h, v0.8h, v18.8h\n"
- "fmla v26.8h, v4.8h, v18.8h\n"
- "fmla v25.8h, v3.8h, v18.8h\n"
- "fmla v22.8h, v1.8h, v18.8h\n"
- "ld1 { v19.8h }, [x13]\n"
- "fmla v29.8h, v2.8h, v16.8h\n"
- "fmla v27.8h, v1.8h, v16.8h\n"
- "ld1 { v18.8h }, [x10]\n"
- "fmla v24.8h, v4.8h, v23.8h\n"
- "fmla v28.8h, v1.8h, v17.8h\n"
- "ldr q16, [x13, x26]\n"
- "fmla v20.8h, v2.8h, v23.8h\n"
- "fmla v21.8h, v1.8h, v23.8h\n"
- "fmla v29.8h, v8.8h, v23.8h\n"
- "fmla v27.8h, v7.8h, v23.8h\n"
- "fmla v25.8h, v5.8h, v23.8h\n"
- "ldr q17, [x10, x11]\n"
+ "fmla v30.8h, v6.8h, v17.8h\n"
+ "fmla v29.8h, v5.8h, v13.8h\n"
+ "fmla v28.8h, v3.8h, v13.8h\n"
+ "ldr q18, [x16, x7]\n"
+ "fmla v27.8h, v4.8h, v17.8h\n"
+ "fmla v23.8h, v8.8h, v16.8h\n"
+ "ldr q16, [x16, x12]\n"
+ "fmla v26.8h, v3.8h, v17.8h\n"
+ "fmla v21.8h, v0.8h, v17.8h\n"
+ "fmla v24.8h, v1.8h, v17.8h\n"
+ "fmla v30.8h, v0.8h, v18.8h\n"
+ "fmla v29.8h, v7.8h, v17.8h\n"
+ "ld1 { v20.8h }, [x10]\n"
+ "fmla v28.8h, v1.8h, v16.8h\n"
+ "fmla v25.8h, v4.8h, v22.8h\n"
+ "fmla v23.8h, v1.8h, v22.8h\n"
+ "fmla v26.8h, v5.8h, v22.8h\n"
+ "fmla v21.8h, v2.8h, v22.8h\n"
+ "fmla v27.8h, v0.8h, v20.8h\n"
+ "fmla v30.8h, v2.8h, v16.8h\n"
+ "ld1 { v17.8h }, [x28]\n"
+ "fmla v29.8h, v1.8h, v18.8h\n"
+ "ldr q16, [x10, x11]\n"
+ "fmla v28.8h, v7.8h, v22.8h\n"
+ "fmla v24.8h, v3.8h, v17.8h\n"
+ "fmla v25.8h, v2.8h, v16.8h\n"
+ "fmla v27.8h, v6.8h, v17.8h\n"
+ "ldr q19, [x10, x7]\n"
+ "fmla v30.8h, v8.8h, v22.8h\n"
+ "ldr q18, [x28, x13]\n"
+ "fmla v29.8h, v3.8h, v20.8h\n"
+ "ldr q17, [x28, x11]\n"
+ "fmla v28.8h, v5.8h, v16.8h\n"
+ "ldr q16, [x26, x7]\n"
+ "fmla v21.8h, v4.8h, v18.8h\n"
+ "fmla v23.8h, v3.8h, v18.8h\n"
+ "fmla v26.8h, v7.8h, v18.8h\n"
+ "fmla v24.8h, v5.8h, v18.8h\n"
+ "fmla v25.8h, v6.8h, v18.8h\n"
+ "fmla v27.8h, v8.8h, v18.8h\n"
+ "fmla v30.8h, v3.8h, v19.8h\n"
+ "fmla v21.8h, v6.8h, v16.8h\n"
+ "fmla v29.8h, v4.8h, v19.8h\n"
+ "fmla v23.8h, v5.8h, v17.8h\n"
"fmla v26.8h, v0.8h, v19.8h\n"
- "fmla v22.8h, v3.8h, v18.8h\n"
- "fmla v24.8h, v2.8h, v16.8h\n"
- "fmla v20.8h, v4.8h, v17.8h\n"
- "fmla v21.8h, v3.8h, v17.8h\n"
- "fmla v28.8h, v3.8h, v19.8h\n"
- "ldr q19, [x10, x26]\n"
- "fmla v27.8h, v5.8h, v16.8h\n"
- "ldr q16, [x27, x8]\n"
- "fmla v26.8h, v6.8h, v18.8h\n"
- "ldr q18, [x13, x8]\n"
- "fmla v25.8h, v7.8h, v17.8h\n"
- "fmla v22.8h, v5.8h, v17.8h\n"
- "fmla v24.8h, v6.8h, v17.8h\n"
- "fmla v21.8h, v5.8h, v19.8h\n"
- "fmla v20.8h, v6.8h, v16.8h\n"
- "fmla v26.8h, v8.8h, v17.8h\n"
- "fmla v22.8h, v7.8h, v16.8h\n"
- "ldr q17, [x27, x9]\n"
- "fmla v29.8h, v3.8h, v18.8h\n"
- "fmla v25.8h, v0.8h, v18.8h\n"
- "fmla v24.8h, v8.8h, v19.8h\n"
- "ldr q16, [x13, x9]\n"
- "fmla v20.8h, v8.8h, v17.8h\n"
- "add x13, x13, #0x10\n"
- "fmla v21.8h, v7.8h, v17.8h\n"
- "ldr q19, [x10, x9]\n"
- "fmla v28.8h, v4.8h, v18.8h\n"
- "fmla v26.8h, v1.8h, v18.8h\n"
- "ldr q17, [x10, x8]\n"
- "fmla v29.8h, v5.8h, v16.8h\n"
+ "fmla v24.8h, v7.8h, v16.8h\n"
+ "ldr q18, [x26, x12]\n"
+ "fmla v25.8h, v8.8h, v17.8h\n"
+ "ldr q16, [x10, x12]\n"
+ "fmla v27.8h, v1.8h, v19.8h\n"
+ "ldr q17, [x28, x7]\n"
"add x10, x10, #0x10\n"
- "fmla v27.8h, v4.8h, v16.8h\n"
- "fmla v25.8h, v2.8h, v16.8h\n"
- "fmla v24.8h, v1.8h, v16.8h\n"
- "ldr q16, [x16, x11]\n"
- "fmla v22.8h, v4.8h, v17.8h\n"
+ "fmla v21.8h, v8.8h, v18.8h\n"
+ "fmla v23.8h, v7.8h, v18.8h\n"
+ "ldr q19, [x28, x12]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v30.8h, v5.8h, v16.8h\n"
+ "fmla v28.8h, v4.8h, v16.8h\n"
+ "fmla v26.8h, v2.8h, v16.8h\n"
+ "fmla v25.8h, v1.8h, v16.8h\n"
+ "ldr q16, [x16, x13]\n"
+ "fmla v24.8h, v4.8h, v17.8h\n"
"add x16, x16, #0x10\n"
"ld1 { v10.8h }, [x16]\n"
- "fmla v20.8h, v3.8h, v17.8h\n"
- "fmla v21.8h, v4.8h, v19.8h\n"
+ "fmla v21.8h, v3.8h, v17.8h\n"
+ "fmla v27.8h, v7.8h, v17.8h\n"
+ "fmla v23.8h, v4.8h, v19.8h\n"
"ldr q4, [x14, #0x50]\n"
- "fmla v26.8h, v7.8h, v17.8h\n"
- "fmla v25.8h, v6.8h, v17.8h\n"
- "ld1 { v18.8h }, [x12]\n"
- "fmla v28.8h, v2.8h, v16.8h\n"
- "fmla v29.8h, v1.8h, v16.8h\n"
+ "fmla v26.8h, v6.8h, v17.8h\n"
+ "ld1 { v18.8h }, [x9]\n"
+ "fmla v29.8h, v2.8h, v16.8h\n"
+ "fmla v30.8h, v1.8h, v16.8h\n"
"ldr q1, [x14, #0x20]\n"
- "fmax v29.8h, v29.8h, v15.8h\n"
- "fmla v27.8h, v0.8h, v16.8h\n"
- "ldr q17, [x12, x26]\n"
- "fmla v24.8h, v7.8h, v19.8h\n"
- "add x12, x12, #0x10\n"
- "ldr q9, [x12, x11]\n"
- "fmla v20.8h, v5.8h, v19.8h\n"
- "fmla v22.8h, v0.8h, v18.8h\n"
+ "fmla v28.8h, v0.8h, v16.8h\n"
+ "ldr q17, [x9, x11]\n"
+ "fmla v25.8h, v7.8h, v19.8h\n"
+ "add x9, x9, #0x10\n"
+ "ldr q9, [x9, x13]\n"
+ "fmla v21.8h, v5.8h, v19.8h\n"
+ "fmla v24.8h, v0.8h, v18.8h\n"
"ldr q0, [x14, #0x10]\n"
- "fmla v21.8h, v2.8h, v17.8h\n"
- "ldr q2, [x14, #0x30]\n"
- "fmla v25.8h, v8.8h, v19.8h\n"
- "ldr q16, [x27, x11]\n"
- "fmla v28.8h, v6.8h, v18.8h\n"
- "fmla v26.8h, v3.8h, v18.8h\n"
+ "fmla v26.8h, v8.8h, v19.8h\n"
+ "ldr q16, [x26, x13]\n"
+ "fmla v27.8h, v3.8h, v18.8h\n"
"ldr q3, [x14, #0x40]\n"
- "fmax v28.8h, v28.8h, v15.8h\n"
- "fmla v27.8h, v8.8h, v17.8h\n"
- "fmla v24.8h, v5.8h, v17.8h\n"
- "ldr q11, [x16, x26]\n"
+ "fmla v23.8h, v2.8h, v17.8h\n"
+ "ldr q2, [x14, #0x30]\n"
+ "fmla v29.8h, v6.8h, v18.8h\n"
+ "fmax v30.8h, v30.8h, v15.8h\n"
+ "fmla v28.8h, v8.8h, v17.8h\n"
+ "fmla v25.8h, v5.8h, v17.8h\n"
+ "ldr q11, [x16, x11]\n"
"ldr q5, [x14, #0x60]\n"
- "fmla v22.8h, v8.8h, v16.8h\n"
+ "fmla v24.8h, v8.8h, v16.8h\n"
"ldr q8, [x14, #0x90]\n"
- "fmla v20.8h, v7.8h, v16.8h\n"
+ "fmla v21.8h, v7.8h, v16.8h\n"
"ldr q7, [x14, #0x80]\n"
- "fmla v21.8h, v6.8h, v16.8h\n"
- "ldr q13, [x13, x11]\n"
- "ldr q6, [x14, #0x70]\n"
"fmax v27.8h, v27.8h, v15.8h\n"
"fmax v26.8h, v26.8h, v15.8h\n"
+ "add x26, x26, #0x10\n"
+ "ld1 { v12.8h }, [x26]\n"
+ "fmla v23.8h, v6.8h, v16.8h\n"
+ "ldr q13, [x10, x13]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "fmax v29.8h, v29.8h, v15.8h\n"
+ "fmax v28.8h, v28.8h, v15.8h\n"
"fmax v25.8h, v25.8h, v15.8h\n"
- "add x27, x27, #0x10\n"
- "ld1 { v12.8h }, [x27]\n"
- "fmax v24.8h, v24.8h, v15.8h\n"
- "fmax v22.8h, v22.8h, v15.8h\n"
"add x14, x14, #0xa0\n"
- "fmax v20.8h, v20.8h, v15.8h\n"
+ "fmax v24.8h, v24.8h, v15.8h\n"
"fmax v21.8h, v21.8h, v15.8h\n"
- "fmin v28.8h, v28.8h, v14.8h\n"
"fmin v29.8h, v29.8h, v14.8h\n"
- "st1 { v28.8h }, [x15]\n"
+ "fmin v30.8h, v30.8h, v14.8h\n"
+ "fmax v23.8h, v23.8h, v15.8h\n"
+ "fmin v28.8h, v28.8h, v14.8h\n"
"fmin v27.8h, v27.8h, v14.8h\n"
"fmin v26.8h, v26.8h, v14.8h\n"
- "str q29, [x15, x17]\n"
"fmin v25.8h, v25.8h, v14.8h\n"
"fmin v24.8h, v24.8h, v14.8h\n"
- "str q27, [x15, x22]\n"
- "add x15, x15, #0x10\n"
- "fmin v22.8h, v22.8h, v14.8h\n"
- "fmin v20.8h, v20.8h, v14.8h\n"
- "st1 { v26.8h }, [x28]\n"
+ "st1 { v29.8h }, [x15]\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "str q25, [x28, x17]\n"
- "str q24, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v22.8h }, [x25]\n"
- "str q20, [x25, x17]\n"
- "str q21, [x25, x22]\n"
+ "fmin v23.8h, v23.8h, v14.8h\n"
+ "str q30, [x15, x8]\n"
+ "str q28, [x15, x20]\n"
+ "add x15, x15, #0x10\n"
+ "st1 { v27.8h }, [x27]\n"
+ "str q26, [x27, x8]\n"
+ "str q25, [x27, x20]\n"
+ "add x27, x27, #0x10\n"
+ "st1 { v24.8h }, [x25]\n"
+ "str q21, [x25, x8]\n"
+ "str q23, [x25, x20]\n"
"add x25, x25, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v29.16b, v31.16b\n fmla v29.8h, v7.8h, v9.8h\n"
- "mov v28.16b, v31.16b\n fmla v28.8h, v8.8h, v9.8h\n"
- "mov v27.16b, v31.16b\n fmla v27.8h, v6.8h, v9.8h\n"
- "fmla v29.8h, v4.8h, v13.8h\n"
- "mov v26.16b, v31.16b\n fmla v26.8h, v5.8h, v9.8h\n"
- "mov v25.16b, v31.16b\n fmla v25.8h, v4.8h, v9.8h\n"
- "mov v24.16b, v31.16b\n fmla v24.8h, v3.8h, v9.8h\n"
- "fmla v28.8h, v0.8h, v10.8h\n"
- "ldr q23, [x12, x9]\n"
- "fmla v27.8h, v2.8h, v11.8h\n"
- "ldr q18, [x12, x8]\n"
- "mov v22.16b, v31.16b\n fmla v22.8h, v2.8h, v9.8h\n"
- "fmla v29.8h, v6.8h, v18.8h\n"
- "mov v21.16b, v31.16b\n fmla v21.8h, v0.8h, v9.8h\n"
- "fmla v28.8h, v5.8h, v13.8h\n"
- "fmla v27.8h, v3.8h, v13.8h\n"
- "fmla v26.8h, v2.8h, v13.8h\n"
- "fmla v25.8h, v1.8h, v13.8h\n"
- "fmla v24.8h, v0.8h, v13.8h\n"
- "ldr q17, [x16, x8]\n"
- "fmla v22.8h, v6.8h, v12.8h\n"
- "ldr q16, [x27, x26]\n"
- "mov v20.16b, v31.16b\n fmla v20.8h, v1.8h, v9.8h\n"
- "fmla v29.8h, v0.8h, v17.8h\n"
- "fmla v21.8h, v8.8h, v16.8h\n"
- "ldr q16, [x16, x9]\n"
- "fmla v28.8h, v7.8h, v18.8h\n"
- "fmla v20.8h, v0.8h, v18.8h\n"
- "fmla v26.8h, v4.8h, v18.8h\n"
- "fmla v25.8h, v3.8h, v18.8h\n"
- "fmla v22.8h, v1.8h, v18.8h\n"
- "ld1 { v19.8h }, [x13]\n"
- "fmla v29.8h, v2.8h, v16.8h\n"
- "fmla v27.8h, v1.8h, v16.8h\n"
- "ld1 { v18.8h }, [x10]\n"
- "fmla v24.8h, v4.8h, v23.8h\n"
- "fmla v28.8h, v1.8h, v17.8h\n"
- "ldr q16, [x13, x26]\n"
- "fmla v20.8h, v2.8h, v23.8h\n"
- "fmla v21.8h, v1.8h, v23.8h\n"
- "fmla v29.8h, v8.8h, v23.8h\n"
- "fmla v27.8h, v7.8h, v23.8h\n"
- "fmla v25.8h, v5.8h, v23.8h\n"
- "ldr q17, [x10, x11]\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v7.8h, v9.8h\n"
+ "mov v29.16b, v31.16b\n fmla v29.8h, v8.8h, v9.8h\n"
+ "mov v28.16b, v31.16b\n fmla v28.8h, v6.8h, v9.8h\n"
+ "mov v27.16b, v31.16b\n fmla v27.8h, v5.8h, v9.8h\n"
+ "mov v26.16b, v31.16b\n fmla v26.8h, v4.8h, v9.8h\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v3.8h, v9.8h\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v2.8h, v9.8h\n"
+ "mov v23.16b, v31.16b\n fmla v23.8h, v0.8h, v9.8h\n"
+ "fmla v30.8h, v4.8h, v13.8h\n"
+ "fmla v29.8h, v0.8h, v10.8h\n"
+ "ldr q22, [x9, x12]\n"
+ "fmla v28.8h, v2.8h, v11.8h\n"
+ "ldr q17, [x9, x7]\n"
+ "fmla v27.8h, v2.8h, v13.8h\n"
+ "fmla v26.8h, v1.8h, v13.8h\n"
+ "fmla v25.8h, v0.8h, v13.8h\n"
+ "fmla v24.8h, v6.8h, v12.8h\n"
+ "ldr q16, [x26, x11]\n"
+ "mov v21.16b, v31.16b\n fmla v21.8h, v1.8h, v9.8h\n"
+ "fmla v30.8h, v6.8h, v17.8h\n"
+ "fmla v29.8h, v5.8h, v13.8h\n"
+ "fmla v28.8h, v3.8h, v13.8h\n"
+ "ldr q18, [x16, x7]\n"
+ "fmla v27.8h, v4.8h, v17.8h\n"
+ "fmla v23.8h, v8.8h, v16.8h\n"
+ "ldr q16, [x16, x12]\n"
+ "fmla v26.8h, v3.8h, v17.8h\n"
+ "fmla v21.8h, v0.8h, v17.8h\n"
+ "fmla v24.8h, v1.8h, v17.8h\n"
+ "fmla v30.8h, v0.8h, v18.8h\n"
+ "fmla v29.8h, v7.8h, v17.8h\n"
+ "ld1 { v20.8h }, [x10]\n"
+ "fmla v28.8h, v1.8h, v16.8h\n"
+ "fmla v25.8h, v4.8h, v22.8h\n"
+ "fmla v23.8h, v1.8h, v22.8h\n"
+ "fmla v26.8h, v5.8h, v22.8h\n"
+ "fmla v21.8h, v2.8h, v22.8h\n"
+ "fmla v27.8h, v0.8h, v20.8h\n"
+ "fmla v30.8h, v2.8h, v16.8h\n"
+ "ld1 { v17.8h }, [x28]\n"
+ "fmla v29.8h, v1.8h, v18.8h\n"
+ "ldr q16, [x10, x11]\n"
+ "fmla v28.8h, v7.8h, v22.8h\n"
+ "fmla v24.8h, v3.8h, v17.8h\n"
+ "fmla v25.8h, v2.8h, v16.8h\n"
+ "fmla v27.8h, v6.8h, v17.8h\n"
+ "ldr q19, [x10, x7]\n"
+ "fmla v30.8h, v8.8h, v22.8h\n"
+ "ldr q18, [x28, x13]\n"
+ "fmla v29.8h, v3.8h, v20.8h\n"
+ "ldr q17, [x28, x11]\n"
+ "fmla v28.8h, v5.8h, v16.8h\n"
+ "ldr q16, [x26, x7]\n"
+ "fmla v21.8h, v4.8h, v18.8h\n"
+ "fmla v23.8h, v3.8h, v18.8h\n"
+ "fmla v26.8h, v7.8h, v18.8h\n"
+ "fmla v24.8h, v5.8h, v18.8h\n"
+ "fmla v25.8h, v6.8h, v18.8h\n"
+ "fmla v27.8h, v8.8h, v18.8h\n"
+ "fmla v30.8h, v3.8h, v19.8h\n"
+ "fmla v21.8h, v6.8h, v16.8h\n"
+ "fmla v29.8h, v4.8h, v19.8h\n"
+ "fmla v23.8h, v5.8h, v17.8h\n"
"fmla v26.8h, v0.8h, v19.8h\n"
- "fmla v22.8h, v3.8h, v18.8h\n"
- "fmla v24.8h, v2.8h, v16.8h\n"
- "fmla v20.8h, v4.8h, v17.8h\n"
- "fmla v21.8h, v3.8h, v17.8h\n"
- "fmla v28.8h, v3.8h, v19.8h\n"
- "ldr q19, [x10, x26]\n"
- "fmla v27.8h, v5.8h, v16.8h\n"
- "ldr q16, [x27, x8]\n"
- "fmla v26.8h, v6.8h, v18.8h\n"
- "ldr q18, [x13, x8]\n"
- "fmla v25.8h, v7.8h, v17.8h\n"
- "fmla v22.8h, v5.8h, v17.8h\n"
- "fmla v24.8h, v6.8h, v17.8h\n"
- "fmla v21.8h, v5.8h, v19.8h\n"
- "fmla v20.8h, v6.8h, v16.8h\n"
- "fmla v26.8h, v8.8h, v17.8h\n"
- "fmla v22.8h, v7.8h, v16.8h\n"
- "ldr q17, [x27, x9]\n"
- "fmla v29.8h, v3.8h, v18.8h\n"
- "fmla v25.8h, v0.8h, v18.8h\n"
- "fmla v24.8h, v8.8h, v19.8h\n"
- "ldr q16, [x13, x9]\n"
- "fmla v20.8h, v8.8h, v17.8h\n"
- "add x13, x13, #0x10\n"
- "fmla v21.8h, v7.8h, v17.8h\n"
- "ldr q19, [x10, x9]\n"
- "fmla v28.8h, v4.8h, v18.8h\n"
- "fmla v26.8h, v1.8h, v18.8h\n"
- "ldr q17, [x10, x8]\n"
- "fmla v29.8h, v5.8h, v16.8h\n"
+ "fmla v24.8h, v7.8h, v16.8h\n"
+ "ldr q18, [x26, x12]\n"
+ "fmla v25.8h, v8.8h, v17.8h\n"
+ "ldr q16, [x10, x12]\n"
+ "fmla v27.8h, v1.8h, v19.8h\n"
+ "ldr q17, [x28, x7]\n"
"add x10, x10, #0x10\n"
- "fmla v27.8h, v4.8h, v16.8h\n"
- "fmla v25.8h, v2.8h, v16.8h\n"
- "fmla v24.8h, v1.8h, v16.8h\n"
- "ldr q16, [x16, x11]\n"
- "fmla v22.8h, v4.8h, v17.8h\n"
+ "fmla v21.8h, v8.8h, v18.8h\n"
+ "fmla v23.8h, v7.8h, v18.8h\n"
+ "ldr q19, [x28, x12]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v30.8h, v5.8h, v16.8h\n"
+ "fmla v28.8h, v4.8h, v16.8h\n"
+ "fmla v26.8h, v2.8h, v16.8h\n"
+ "fmla v25.8h, v1.8h, v16.8h\n"
+ "ldr q16, [x16, x13]\n"
+ "fmla v24.8h, v4.8h, v17.8h\n"
"add x16, x16, #0x10\n"
- "fmla v20.8h, v3.8h, v17.8h\n"
- "fmla v21.8h, v4.8h, v19.8h\n"
- "fmla v26.8h, v7.8h, v17.8h\n"
- "fmla v25.8h, v6.8h, v17.8h\n"
- "ld1 { v18.8h }, [x12]\n"
- "fmla v28.8h, v2.8h, v16.8h\n"
- "fmla v29.8h, v1.8h, v16.8h\n"
- "fmax v29.8h, v29.8h, v15.8h\n"
- "fmla v27.8h, v0.8h, v16.8h\n"
- "ldr q17, [x12, x26]\n"
- "fmla v24.8h, v7.8h, v19.8h\n"
- "fmin v29.8h, v29.8h, v14.8h\n"
- "fmla v20.8h, v5.8h, v19.8h\n"
- "fmla v22.8h, v0.8h, v18.8h\n"
- "add x12, x12, #0x10\n"
- "fmla v21.8h, v2.8h, v17.8h\n"
- "fmla v25.8h, v8.8h, v19.8h\n"
- "ldr q16, [x27, x11]\n"
- "fmax v25.8h, v25.8h, v15.8h\n"
- "fmla v28.8h, v6.8h, v18.8h\n"
- "fmla v26.8h, v3.8h, v18.8h\n"
- "fmax v28.8h, v28.8h, v15.8h\n"
- "add x27, x27, #0x10\n"
- "fmla v27.8h, v8.8h, v17.8h\n"
- "fmla v24.8h, v5.8h, v17.8h\n"
+ "fmla v21.8h, v3.8h, v17.8h\n"
+ "fmla v27.8h, v7.8h, v17.8h\n"
+ "fmla v23.8h, v4.8h, v19.8h\n"
+ "fmla v26.8h, v6.8h, v17.8h\n"
+ "ld1 { v18.8h }, [x9]\n"
+ "fmla v29.8h, v2.8h, v16.8h\n"
+ "fmla v30.8h, v1.8h, v16.8h\n"
+ "fmla v28.8h, v0.8h, v16.8h\n"
+ "ldr q17, [x9, x11]\n"
+ "fmla v25.8h, v7.8h, v19.8h\n"
+ "add x9, x9, #0x10\n"
+ "fmla v21.8h, v5.8h, v19.8h\n"
+ "fmla v24.8h, v0.8h, v18.8h\n"
+ "fmla v26.8h, v8.8h, v19.8h\n"
+ "ldr q16, [x26, x13]\n"
+ "fmla v27.8h, v3.8h, v18.8h\n"
+ "add x26, x26, #0x10\n"
+ "fmla v23.8h, v2.8h, v17.8h\n"
+ "fmla v29.8h, v6.8h, v18.8h\n"
+ "fmax v30.8h, v30.8h, v15.8h\n"
+ "fmla v28.8h, v8.8h, v17.8h\n"
+ "fmla v25.8h, v5.8h, v17.8h\n"
+ "fmla v24.8h, v8.8h, v16.8h\n"
+ "fmla v21.8h, v7.8h, v16.8h\n"
"fmax v27.8h, v27.8h, v15.8h\n"
- "fmla v22.8h, v8.8h, v16.8h\n"
- "fmla v20.8h, v7.8h, v16.8h\n"
"fmax v26.8h, v26.8h, v15.8h\n"
- "fmla v21.8h, v6.8h, v16.8h\n"
+ "fmin v30.8h, v30.8h, v14.8h\n"
+ "fmla v23.8h, v6.8h, v16.8h\n"
+ "fmax v29.8h, v29.8h, v15.8h\n"
+ "fmax v28.8h, v28.8h, v15.8h\n"
+ "fmax v25.8h, v25.8h, v15.8h\n"
+ "fmin v27.8h, v27.8h, v14.8h\n"
"fmax v24.8h, v24.8h, v15.8h\n"
- "fmax v22.8h, v22.8h, v15.8h\n"
- "fmax v20.8h, v20.8h, v15.8h\n"
"fmax v21.8h, v21.8h, v15.8h\n"
+ "fmax v23.8h, v23.8h, v15.8h\n"
+ "fmin v29.8h, v29.8h, v14.8h\n"
"fmin v28.8h, v28.8h, v14.8h\n"
- "st1 { v28.8h }, [x15]\n"
- "fmin v27.8h, v27.8h, v14.8h\n"
"fmin v26.8h, v26.8h, v14.8h\n"
- "str q29, [x15, x17]\n"
+ "st1 { v27.8h }, [x27]\n"
"fmin v25.8h, v25.8h, v14.8h\n"
"fmin v24.8h, v24.8h, v14.8h\n"
- "str q27, [x15, x22]\n"
- "add x15, x15, #0x10\n"
- "fmin v22.8h, v22.8h, v14.8h\n"
- "fmin v20.8h, v20.8h, v14.8h\n"
- "st1 { v26.8h }, [x28]\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "str q25, [x28, x17]\n"
- "str q24, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v22.8h }, [x25]\n"
- "str q20, [x25, x17]\n"
- "str q21, [x25, x22]\n"
+ "fmin v23.8h, v23.8h, v14.8h\n"
+ "st1 { v29.8h }, [x15]\n"
+ "str q30, [x15, x8]\n"
+ "str q28, [x15, x20]\n"
+ "add x15, x15, #0x10\n"
+ "str q26, [x27, x8]\n"
+ "str q25, [x27, x20]\n"
+ "add x27, x27, #0x10\n"
+ "st1 { v24.8h }, [x25]\n"
+ "str q21, [x25, x8]\n"
+ "str q23, [x25, x20]\n"
"add x25, x25, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 93f\n"
"ldr q31, [x14, #0x0]\n"
"ldr q0, [x14, #0x10]\n"
- "add x24, x12, x11\n"
+ "add x24, x9, x13\n"
"add x23, x16, XZR\n"
"ldr q1, [x14, #0x20]\n"
"ldr q2, [x14, #0x30]\n"
- "add x22, x16, x26\n"
- "add x21, x27, XZR\n"
+ "add x22, x16, x11\n"
+ "add x21, x26, XZR\n"
"ldr q3, [x14, #0x40]\n"
"ldr q4, [x14, #0x50]\n"
- "add x20, x13, x11\n"
+ "add x20, x10, x13\n"
"ldr q5, [x14, #0x60]\n"
"ldr q6, [x14, #0x70]\n"
"ldr q7, [x14, #0x80]\n"
@@ -511,23 +511,23 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"8:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 2: End
"mov v23.16b, v31.16b\n fmla v23.8h, v8.8h, v9.8h\n"
"mov v25.16b, v31.16b\n fmla v25.8h, v6.8h, v9.8h\n"
- "add x20, x27, x26\n"
+ "add x20, x26, x11\n"
"mov v24.16b, v31.16b\n fmla v24.8h, v7.8h, v9.8h\n"
"mov v26.16b, v31.16b\n fmla v26.8h, v5.8h, v9.8h\n"
"mov v27.16b, v31.16b\n fmla v27.8h, v4.8h, v9.8h\n"
"mov v28.16b, v31.16b\n fmla v28.8h, v3.8h, v9.8h\n"
"mov v29.16b, v31.16b\n fmla v29.8h, v2.8h, v9.8h\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"fmla v23.8h, v0.8h, v10.8h\n"
"fmla v25.8h, v2.8h, v11.8h\n"
- "mov v30.16b, v31.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"fmla v31.8h, v0.8h, v9.8h\n"
- "fmla v29.8h, v6.8h, v12.8h\n"
- "fmla v23.8h, v5.8h, v13.8h\n"
"fmla v24.8h, v4.8h, v13.8h\n"
- "fmla v25.8h, v3.8h, v13.8h\n"
"fmla v26.8h, v2.8h, v13.8h\n"
"fmla v27.8h, v1.8h, v13.8h\n"
+ "fmla v29.8h, v6.8h, v12.8h\n"
"fmla v28.8h, v0.8h, v13.8h\n"
+ "fmla v23.8h, v5.8h, v13.8h\n"
+ "fmla v25.8h, v3.8h, v13.8h\n"
"tbz %x[n_channels], #2, 10f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
@@ -549,7 +549,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"ldr h12, [x20, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: End
"fmla v31.8h, v8.8h, v12.8h\n"
- "add x20, x12, x8\n"
+ "add x20, x9, x7\n"
"tbz %x[n_channels], #2, 14f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
@@ -572,7 +572,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"16:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
- "add x20, x16, x8\n"
+ "add x20, x16, x7\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
@@ -599,7 +599,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"20:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: End
"fmla v23.8h, v1.8h, v13.8h\n"
"fmla v24.8h, v0.8h, v13.8h\n"
- "add x20, x16, x9\n"
+ "add x20, x16, x12\n"
"tbz %x[n_channels], #2, 22f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
@@ -622,7 +622,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"24:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: End
"fmla v24.8h, v2.8h, v12.8h\n"
"fmla v25.8h, v1.8h, v12.8h\n"
- "add x20, x12, x9\n"
+ "add x20, x9, x12\n"
"tbz %x[n_channels], #2, 26f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
@@ -645,7 +645,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"28:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
- "add x20, x13, XZR\n"
+ "add x20, x10, XZR\n"
"fmla v27.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
@@ -672,7 +672,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"32:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: End
"fmla v23.8h, v3.8h, v11.8h\n"
"fmla v26.8h, v0.8h, v11.8h\n"
- "add x20, x13, x26\n"
+ "add x20, x10, x11\n"
"tbz %x[n_channels], #2, 34f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
@@ -695,7 +695,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"36:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v25.8h, v5.8h, v13.8h\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "add x20, x10, XZR\n"
+ "add x20, x28, XZR\n"
"tbz %x[n_channels], #2, 38f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
@@ -718,7 +718,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"40:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v26.8h, v6.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
- "add x20, x10, x11\n"
+ "add x20, x28, x13\n"
"tbz %x[n_channels], #2, 42f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
@@ -741,7 +741,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"44:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "add x20, x10, x26\n"
+ "add x20, x28, x11\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
@@ -768,7 +768,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"48:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
"fmla v28.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "add x20, x27, x8\n"
+ "add x20, x26, x7\n"
"tbz %x[n_channels], #2, 50f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
@@ -791,7 +791,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"52:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
"fmla v29.8h, v7.8h, v13.8h\n"
"fmla v30.8h, v6.8h, v13.8h\n"
- "add x20, x13, x8\n"
+ "add x20, x10, x7\n"
"tbz %x[n_channels], #2, 54f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
@@ -814,7 +814,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"56:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: End
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v24.8h, v3.8h, v12.8h\n"
- "add x20, x13, x9\n"
+ "add x20, x10, x12\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 58f\n"
@@ -839,7 +839,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"60:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v24.8h, v5.8h, v11.8h\n"
"fmla v25.8h, v4.8h, v11.8h\n"
- "add x20, x27, x9\n"
+ "add x20, x26, x12\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 62f\n"
@@ -864,7 +864,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"64:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
"fmla v30.8h, v8.8h, v13.8h\n"
"fmla v31.8h, v7.8h, v13.8h\n"
- "add x20, x10, x8\n"
+ "add x20, x28, x7\n"
"tbz %x[n_channels], #2, 66f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 65f\n"
@@ -887,7 +887,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"68:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
- "add x20, x16, x11\n"
+ "add x20, x16, x13\n"
"fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v12.8h\n"
"tbz %x[n_channels], #2, 70f\n"
@@ -912,7 +912,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"72:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: End
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v24.8h, v1.8h, v11.8h\n"
- "add x20, x10, x9\n"
+ "add x20, x28, x12\n"
"fmla v25.8h, v0.8h, v11.8h\n"
"tbz %x[n_channels], #2, 74f\n"
"ldr d13, [x20], #0x8\n"
@@ -936,7 +936,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"76:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
"fmla v27.8h, v8.8h, v13.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x20, x12, XZR\n"
+ "add x20, x9, XZR\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
"tbz %x[n_channels], #2, 78f\n"
@@ -961,7 +961,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"80:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v23.8h, v6.8h, v12.8h\n"
"fmla v26.8h, v3.8h, v12.8h\n"
- "add x20, x12, x26\n"
+ "add x20, x9, x11\n"
"fmla v29.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 82f\n"
"ldr d11, [x20], #0x8\n"
@@ -985,7 +985,7 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"84:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "add x20, x27, x11\n"
+ "add x20, x26, x13\n"
"fmla v31.8h, v2.8h, v11.8h\n"
"tbz %x[n_channels], #2, 86f\n"
"ldr d13, [x20], #0x8\n"
@@ -1030,46 +1030,46 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmin v31.8h, v31.8h, v14.8h\n"
"tbz %x[n_channels], #2, 90f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.d }[0], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.d }[0], [x21], x17\n"
"add x15, x15, #0x8\n"
- "st1 { v29.d }[0], [x20], x17\n"
- "add x28, x28, #0x8\n"
+ "add x27, x27, #0x8\n"
"add x25, x25, #0x8\n"
- "st1 { v24.d }[0], [x22], x17\n"
- "st1 { v27.d }[0], [x21], x17\n"
- "st1 { v30.d }[0], [x20], x17\n"
+ "st1 { v23.d }[0], [x22], x8\n"
+ "st1 { v26.d }[0], [x21], x8\n"
+ "st1 { v29.d }[0], [x20], x8\n"
+ "st1 { v24.d }[0], [x22], x8\n"
+ "st1 { v27.d }[0], [x21], x8\n"
+ "st1 { v30.d }[0], [x20], x8\n"
"st1 { v25.d }[0], [x22]\n"
"st1 { v28.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #1, 89f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[2], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.s }[2], [x21], x17\n"
"add x15, x15, #0x4\n"
- "st1 { v29.s }[2], [x20], x17\n"
- "add x28, x28, #0x4\n"
+ "add x27, x27, #0x4\n"
"add x25, x25, #0x4\n"
- "st1 { v24.s }[2], [x22], x17\n"
- "st1 { v27.s }[2], [x21], x17\n"
- "st1 { v30.s }[2], [x20], x17\n"
+ "st1 { v23.s }[2], [x22], x8\n"
+ "st1 { v26.s }[2], [x21], x8\n"
+ "st1 { v29.s }[2], [x20], x8\n"
+ "st1 { v24.s }[2], [x22], x8\n"
+ "st1 { v27.s }[2], [x21], x8\n"
+ "st1 { v30.s }[2], [x20], x8\n"
"st1 { v25.s }[2], [x22]\n"
"st1 { v28.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"tbz %x[n_channels], #0, 92f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[6], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.h }[6], [x21], x17\n"
- "st1 { v29.h }[6], [x20], x17\n"
- "st1 { v24.h }[6], [x22], x17\n"
- "st1 { v27.h }[6], [x21], x17\n"
- "st1 { v30.h }[6], [x20], x17\n"
+ "st1 { v23.h }[6], [x22], x8\n"
+ "st1 { v24.h }[6], [x22], x8\n"
+ "st1 { v26.h }[6], [x21], x8\n"
+ "st1 { v29.h }[6], [x20], x8\n"
+ "st1 { v27.h }[6], [x21], x8\n"
+ "st1 { v30.h }[6], [x20], x8\n"
"st1 { v25.h }[6], [x22]\n"
"st1 { v28.h }[6], [x21]\n"
"st1 { v31.h }[6], [x20]\n"
@@ -1077,14 +1077,14 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"89:" // Tile loop: Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 92f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[4], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.h }[4], [x21], x17\n"
- "st1 { v29.h }[4], [x20], x17\n"
- "st1 { v24.h }[4], [x22], x17\n"
- "st1 { v27.h }[4], [x21], x17\n"
- "st1 { v30.h }[4], [x20], x17\n"
+ "st1 { v23.h }[4], [x22], x8\n"
+ "st1 { v24.h }[4], [x22], x8\n"
+ "st1 { v26.h }[4], [x21], x8\n"
+ "st1 { v29.h }[4], [x20], x8\n"
+ "st1 { v27.h }[4], [x21], x8\n"
+ "st1 { v30.h }[4], [x20], x8\n"
"st1 { v25.h }[4], [x22]\n"
"st1 { v28.h }[4], [x21]\n"
"st1 { v31.h }[4], [x20]\n"
@@ -1092,63 +1092,63 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"90:" // Tile loop: Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 91f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[0], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.s }[0], [x21], x17\n"
"add x15, x15, #0x4\n"
- "st1 { v29.s }[0], [x20], x17\n"
- "add x28, x28, #0x4\n"
+ "add x27, x27, #0x4\n"
"add x25, x25, #0x4\n"
- "st1 { v24.s }[0], [x22], x17\n"
- "st1 { v27.s }[0], [x21], x17\n"
- "st1 { v30.s }[0], [x20], x17\n"
+ "st1 { v23.s }[0], [x22], x8\n"
+ "st1 { v26.s }[0], [x21], x8\n"
+ "st1 { v29.s }[0], [x20], x8\n"
+ "st1 { v24.s }[0], [x22], x8\n"
+ "st1 { v27.s }[0], [x21], x8\n"
+ "st1 { v30.s }[0], [x20], x8\n"
"st1 { v25.s }[0], [x22]\n"
"st1 { v28.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"tbz %x[n_channels], #0, 92f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[2], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.h }[2], [x21], x17\n"
- "st1 { v29.h }[2], [x20], x17\n"
- "st1 { v24.h }[2], [x22], x17\n"
- "st1 { v27.h }[2], [x21], x17\n"
- "st1 { v30.h }[2], [x20], x17\n"
+ "st1 { v23.h }[2], [x22], x8\n"
+ "st1 { v24.h }[2], [x22], x8\n"
+ "st1 { v26.h }[2], [x21], x8\n"
+ "st1 { v29.h }[2], [x20], x8\n"
+ "st1 { v27.h }[2], [x21], x8\n"
+ "st1 { v30.h }[2], [x20], x8\n"
"st1 { v25.h }[2], [x22]\n"
"st1 { v28.h }[2], [x21]\n"
"st1 { v31.h }[2], [x20]\n"
"b 92f\n"
"91:" // Tile loop: Oddments: Store: Bit 2: Unset: Bit 1: Unset
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.h }[0], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.h }[0], [x21], x17\n"
- "st1 { v29.h }[0], [x20], x17\n"
- "st1 { v24.h }[0], [x22], x17\n"
- "st1 { v27.h }[0], [x21], x17\n"
- "st1 { v30.h }[0], [x20], x17\n"
+ "st1 { v23.h }[0], [x22], x8\n"
+ "st1 { v24.h }[0], [x22], x8\n"
+ "st1 { v26.h }[0], [x21], x8\n"
+ "st1 { v29.h }[0], [x20], x8\n"
+ "st1 { v27.h }[0], [x21], x8\n"
+ "st1 { v30.h }[0], [x20], x8\n"
"st1 { v25.h }[0], [x22]\n"
"st1 { v28.h }[0], [x21]\n"
"st1 { v31.h }[0], [x20]\n"
"92:" // Tile loop: Oddments: Store: Bit 2: End
"93:" // Tile loop: End
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x23, x23, #0x1\n"
- "add x21, x24, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x23, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x24, x24, x21, LT\n"
- "csel x23, x23, XZR, LT\n"
- "cmp x24, x20\n"
+ "ldr x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x9, x9, #0x1\n"
+ "add x20, x10, #0x1\n"
+ "cmp x9, x22\n"
+ "csel x10, x10, x20, LT\n"
+ "csel x9, x9, XZR, LT\n"
+ "cmp x10, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 72e68482c6..c4b0c721cc 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,9 +91,9 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"lsr x8, %x[n_channels], #0x3\n"
"ldr x17, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v15.8h }, [x20]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v15.8h }, [x21]\n"
"ld1r { v14.8h }, [x20]\n"
"add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
"mov x14, #0x0\n"
@@ -111,357 +111,357 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr q7, [x16, #0x80]\n"
"ldr q8, [x16, #0x90]\n"
"add x16, x16, #0xa0\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "ldr q9, [x21, x14]\n"
- "ldr q10, [x20, x14]\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q11, [x21, x14]\n"
- "ldr q12, [x20, x14]\n"
+ "ldp x24, x23, [x15, #0x0]\n"
+ "ldp x22, x21, [x15, #0x10]\n"
"ldr x20, [x15, #0x20]\n"
+ "ldr q9, [x24, x14]\n"
+ "ldr q10, [x23, x14]\n"
+ "ldr q11, [x22, x14]\n"
+ "ldr q12, [x21, x14]\n"
"ldr q13, [x20, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v29.16b, v31.16b\n fmla v29.8h, v8.8h, v9.8h\n"
- "mov v28.16b, v31.16b\n fmla v28.8h, v7.8h, v9.8h\n"
- "ldr x26, [x15, #0x30]\n"
- "ldr x23, [x15, #0x38]\n"
- "mov v27.16b, v31.16b\n fmla v27.8h, v6.8h, v9.8h\n"
- "fmla v29.8h, v0.8h, v10.8h\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x20, [x15, #0x48]\n"
- "ldr q19, [x20, x14]\n"
- "fmla v28.8h, v4.8h, v13.8h\n"
- "mov v26.16b, v31.16b\n fmla v26.8h, v5.8h, v9.8h\n"
- "ldr x21, [x15, #0x40]\n"
- "mov v25.16b, v31.16b\n fmla v25.8h, v4.8h, v9.8h\n"
- "mov v24.16b, v31.16b\n fmla v24.8h, v3.8h, v9.8h\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v8.8h, v9.8h\n"
+ "mov v29.16b, v31.16b\n fmla v29.8h, v7.8h, v9.8h\n"
+ "ldr x22, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
+ "mov v28.16b, v31.16b\n fmla v28.8h, v6.8h, v9.8h\n"
+ "mov v27.16b, v31.16b\n fmla v27.8h, v5.8h, v9.8h\n"
+ "ldr x26, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "mov v26.16b, v31.16b\n fmla v26.8h, v4.8h, v9.8h\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v3.8h, v9.8h\n"
+ "ldr x20, [x15, #0x40]\n"
"ldr x25, [x15, #0x50]\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v2.8h, v9.8h\n"
+ "mov v23.16b, v31.16b\n fmla v23.8h, v0.8h, v9.8h\n"
"ldr x24, [x15, #0x58]\n"
- "fmla v27.8h, v2.8h, v11.8h\n"
- "ldr q17, [x26, x14]\n"
- "mov v23.16b, v31.16b\n fmla v23.8h, v2.8h, v9.8h\n"
- "ldr x20, [x15, #0x60]\n"
- "fmla v29.8h, v5.8h, v13.8h\n"
- "fmla v28.8h, v6.8h, v17.8h\n"
- "ldr x12, [x15, #0x70]\n"
- "ldr x11, [x15, #0x88]\n"
- "mov v22.16b, v31.16b\n fmla v22.8h, v0.8h, v9.8h\n"
- "fmla v27.8h, v3.8h, v13.8h\n"
- "ldr x10, [x17, #0x0]\n"
+ "ldr x23, [x15, #0x60]\n"
+ "fmla v30.8h, v0.8h, v10.8h\n"
+ "ldr q22, [x21, x14]\n"
+ "fmla v29.8h, v4.8h, v13.8h\n"
+ "ldr x12, [x15, #0x88]\n"
+ "fmla v28.8h, v2.8h, v11.8h\n"
+ "ldr q17, [x22, x14]\n"
+ "fmla v27.8h, v2.8h, v13.8h\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v26.8h, v1.8h, v13.8h\n"
+ "fmla v25.8h, v0.8h, v13.8h\n"
+ "ldr x11, [x17, #0x0]\n"
"add x13, x13, #0x10\n"
- "fmla v26.8h, v2.8h, v13.8h\n"
- "fmla v25.8h, v1.8h, v13.8h\n"
- "ldr x9, [x17, #0x8]\n"
- "ldr x28, [x17, #0x10]\n"
- "fmla v24.8h, v0.8h, v13.8h\n"
- "ldr q18, [x23, x14]\n"
- "fmla v23.8h, v6.8h, v12.8h\n"
- "ldr q16, [x22, x14]\n"
+ "fmla v24.8h, v6.8h, v12.8h\n"
+ "ldr q16, [x26, x14]\n"
"mov v21.16b, v31.16b\n fmla v21.8h, v1.8h, v9.8h\n"
"ldr q31, [x16, #0x0]\n"
- "fmla v29.8h, v7.8h, v17.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "fmla v28.8h, v0.8h, v18.8h\n"
- "fmla v22.8h, v8.8h, v16.8h\n"
- "ldr q16, [x21, x14]\n"
- "ldr x22, [x15, #0x78]\n"
- "fmla v26.8h, v4.8h, v17.8h\n"
- "fmla v25.8h, v3.8h, v17.8h\n"
- "ldr x21, [x15, #0x80]\n"
- "ldr x27, [x17, #0x18]\n"
+ "fmla v30.8h, v5.8h, v13.8h\n"
+ "fmla v29.8h, v6.8h, v17.8h\n"
+ "ldr x21, [x15, #0x68]\n"
+ "ldr x10, [x17, #0x8]\n"
+ "fmla v28.8h, v3.8h, v13.8h\n"
+ "ldr q18, [x27, x14]\n"
+ "fmla v27.8h, v4.8h, v17.8h\n"
+ "ldr x9, [x15, #0x78]\n"
+ "fmla v23.8h, v8.8h, v16.8h\n"
+ "ldr q16, [x20, x14]\n"
+ "fmla v26.8h, v3.8h, v17.8h\n"
+ "ldr x20, [x15, #0x80]\n"
"fmla v21.8h, v0.8h, v17.8h\n"
- "fmla v24.8h, v4.8h, v19.8h\n"
- "fmla v23.8h, v1.8h, v17.8h\n"
+ "fmla v25.8h, v4.8h, v22.8h\n"
+ "ldr x28, [x17, #0x10]\n"
+ "ldr x27, [x17, #0x18]\n"
+ "fmla v30.8h, v7.8h, v17.8h\n"
+ "fmla v29.8h, v0.8h, v18.8h\n"
+ "fmla v24.8h, v1.8h, v17.8h\n"
"ldr q17, [x25, x14]\n"
- "fmla v29.8h, v1.8h, v18.8h\n"
- "ldr q20, [x24, x14]\n"
- "fmla v28.8h, v2.8h, v16.8h\n"
- "fmla v27.8h, v1.8h, v16.8h\n"
- "ldr q16, [x20, x14]\n"
+ "fmla v28.8h, v1.8h, v16.8h\n"
"ldr x26, [x15, #0x90]\n"
- "fmla v25.8h, v5.8h, v19.8h\n"
- "fmla v21.8h, v2.8h, v19.8h\n"
- "ldr x25, [x15, #0xa0]\n"
- "ldr x20, [x15, #0x98]\n"
- "fmla v26.8h, v0.8h, v17.8h\n"
- "fmla v24.8h, v2.8h, v20.8h\n"
- "fmla v28.8h, v8.8h, v19.8h\n"
- "fmla v27.8h, v7.8h, v19.8h\n"
- "fmla v22.8h, v1.8h, v19.8h\n"
- "ldr q19, [x23, x14]\n"
- "fmla v23.8h, v3.8h, v16.8h\n"
- "ldr x24, [x15, #0xa8]\n"
- "fmla v26.8h, v6.8h, v16.8h\n"
- "ldr q18, [x21, x14]\n"
- "fmla v25.8h, v7.8h, v19.8h\n"
- "ldr x23, [x15, #0xc0]\n"
- "fmla v24.8h, v6.8h, v19.8h\n"
- "fmla v21.8h, v4.8h, v19.8h\n"
- "fmla v29.8h, v3.8h, v17.8h\n"
- "ldr q17, [x12, x14]\n"
- "fmla v27.8h, v5.8h, v20.8h\n"
- "ldr q16, [x22, x14]\n"
- "fmla v23.8h, v5.8h, v19.8h\n"
- "fmla v22.8h, v3.8h, v19.8h\n"
- "ldr x22, [x15, #0xb0]\n"
- "ldr x21, [x15, #0xb8]\n"
- "fmla v26.8h, v8.8h, v19.8h\n"
- "fmla v24.8h, v8.8h, v17.8h\n"
- "fmla v21.8h, v6.8h, v16.8h\n"
- "fmla v28.8h, v3.8h, v18.8h\n"
- "fmla v25.8h, v0.8h, v18.8h\n"
- "fmla v22.8h, v5.8h, v17.8h\n"
- "ldr q17, [x11, x14]\n"
- "fmla v23.8h, v7.8h, v16.8h\n"
- "ldr q16, [x26, x14]\n"
- "fmla v29.8h, v4.8h, v18.8h\n"
- "fmla v26.8h, v1.8h, v18.8h\n"
+ "fmla v26.8h, v5.8h, v22.8h\n"
+ "fmla v23.8h, v1.8h, v22.8h\n"
+ "fmla v21.8h, v2.8h, v22.8h\n"
+ "fmla v30.8h, v1.8h, v18.8h\n"
+ "ldr q20, [x24, x14]\n"
+ "ldr x25, [x15, #0x98]\n"
+ "fmla v29.8h, v2.8h, v16.8h\n"
+ "ldr q16, [x23, x14]\n"
+ "fmla v27.8h, v0.8h, v17.8h\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v28.8h, v7.8h, v22.8h\n"
+ "fmla v25.8h, v2.8h, v20.8h\n"
+ "fmla v24.8h, v3.8h, v16.8h\n"
+ "fmla v30.8h, v3.8h, v17.8h\n"
+ "ldr q19, [x22, x14]\n"
+ "ldr x23, [x15, #0xb0]\n"
+ "fmla v29.8h, v8.8h, v22.8h\n"
+ "ldr q17, [x21, x14]\n"
+ "ldr x22, [x15, #0xa8]\n"
+ "fmla v27.8h, v6.8h, v16.8h\n"
"ldr q18, [x20, x14]\n"
- "fmla v28.8h, v5.8h, v17.8h\n"
- "fmla v27.8h, v4.8h, v17.8h\n"
- "fmla v25.8h, v2.8h, v17.8h\n"
- "fmla v24.8h, v1.8h, v17.8h\n"
- "ldr q17, [x25, x14]\n"
- "fmla v21.8h, v8.8h, v16.8h\n"
- "ldr x20, [x15, #0x20]\n"
- "fmla v22.8h, v7.8h, v16.8h\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v28.8h, v5.8h, v20.8h\n"
+ "ldr q16, [x9, x14]\n"
+ "ldr x20, [x15, #0xb8]\n"
+ "fmla v26.8h, v7.8h, v17.8h\n"
+ "fmla v25.8h, v6.8h, v17.8h\n"
+ "fmla v21.8h, v4.8h, v17.8h\n"
+ "fmla v24.8h, v5.8h, v17.8h\n"
+ "fmla v23.8h, v3.8h, v17.8h\n"
+ "fmla v27.8h, v8.8h, v17.8h\n"
+ "fmla v29.8h, v3.8h, v18.8h\n"
+ "fmla v30.8h, v4.8h, v18.8h\n"
+ "fmla v25.8h, v8.8h, v19.8h\n"
+ "fmla v26.8h, v0.8h, v18.8h\n"
+ "fmla v21.8h, v6.8h, v16.8h\n"
+ "fmla v24.8h, v7.8h, v16.8h\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v23.8h, v5.8h, v19.8h\n"
+ "ldr q16, [x12, x14]\n"
+ "fmla v27.8h, v1.8h, v18.8h\n"
+ "ldr q19, [x25, x14]\n"
+ "fmla v29.8h, v5.8h, v16.8h\n"
+ "fmla v28.8h, v4.8h, v16.8h\n"
+ "fmla v26.8h, v2.8h, v16.8h\n"
+ "fmla v25.8h, v1.8h, v16.8h\n"
"ldr q16, [x24, x14]\n"
- "fmla v29.8h, v2.8h, v17.8h\n"
- "fmla v26.8h, v7.8h, v18.8h\n"
- "fmla v25.8h, v6.8h, v18.8h\n"
- "fmla v23.8h, v4.8h, v18.8h\n"
- "fmla v21.8h, v3.8h, v18.8h\n"
+ "ldr x24, [x15, #0x20]\n"
+ "fmla v21.8h, v8.8h, v17.8h\n"
+ "fmla v24.8h, v4.8h, v19.8h\n"
+ "fmla v23.8h, v7.8h, v17.8h\n"
"ldr q18, [x22, x14]\n"
- "fmla v22.8h, v4.8h, v16.8h\n"
- "ldr q4, [x16, #0x50]\n"
- "fmla v28.8h, v1.8h, v17.8h\n"
+ "fmla v27.8h, v7.8h, v19.8h\n"
+ "fmla v30.8h, v2.8h, v16.8h\n"
+ "fmla v29.8h, v1.8h, v16.8h\n"
"ldr q1, [x16, #0x20]\n"
- "fmla v27.8h, v0.8h, v17.8h\n"
- "ldr q17, [x21, x14]\n"
- "fmla v29.8h, v6.8h, v18.8h\n"
+ "fmla v26.8h, v6.8h, v19.8h\n"
+ "fmla v28.8h, v0.8h, v16.8h\n"
+ "ldr q17, [x20, x14]\n"
+ "fmla v21.8h, v3.8h, v19.8h\n"
+ "ldr q16, [x23, x14]\n"
+ "fmla v25.8h, v7.8h, v18.8h\n"
+ "fmla v23.8h, v4.8h, v18.8h\n"
+ "ldr q4, [x16, #0x50]\n"
"fmax v29.8h, v29.8h, v15.8h\n"
- "fmla v24.8h, v7.8h, v16.8h\n"
- "fmla v21.8h, v5.8h, v16.8h\n"
- "fmin v29.8h, v29.8h, v14.8h\n"
- "str q29, [x10, x13]\n"
- "fmla v23.8h, v0.8h, v18.8h\n"
+ "fmla v30.8h, v6.8h, v16.8h\n"
+ "fmla v24.8h, v0.8h, v16.8h\n"
"ldr q0, [x16, #0x10]\n"
- "fmla v22.8h, v2.8h, v17.8h\n"
- "ldr q2, [x16, #0x30]\n"
- "fmla v25.8h, v8.8h, v16.8h\n"
- "ldr q16, [x23, x14]\n"
- "fmla v26.8h, v3.8h, v18.8h\n"
+ "fmla v26.8h, v8.8h, v18.8h\n"
+ "fmla v27.8h, v3.8h, v16.8h\n"
"ldr q3, [x16, #0x40]\n"
- "fmla v27.8h, v8.8h, v17.8h\n"
- "fmla v24.8h, v5.8h, v17.8h\n"
+ "fmla v28.8h, v8.8h, v17.8h\n"
+ "fmla v21.8h, v5.8h, v18.8h\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v25.8h, v5.8h, v17.8h\n"
"ldr q5, [x16, #0x60]\n"
- "fmax v28.8h, v28.8h, v15.8h\n"
- "fmla v23.8h, v8.8h, v16.8h\n"
+ "fmla v23.8h, v2.8h, v17.8h\n"
+ "ldr q2, [x16, #0x30]\n"
+ "fmax v30.8h, v30.8h, v15.8h\n"
+ "ldp x23, x22, [x15, #0x0]\n"
+ "fmax v26.8h, v26.8h, v15.8h\n"
+ "ldp x21, x20, [x15, #0x10]\n"
+ "fmin v29.8h, v29.8h, v14.8h\n"
+ "add x14, x14, #0x10\n"
+ "fmla v24.8h, v8.8h, v16.8h\n"
"ldr q8, [x16, #0x90]\n"
+ "fmax v28.8h, v28.8h, v15.8h\n"
+ "ldr q9, [x23, x7]\n"
+ "ldr q10, [x22, x7]\n"
"fmla v21.8h, v7.8h, v16.8h\n"
"ldr q7, [x16, #0x80]\n"
- "fmla v22.8h, v6.8h, v16.8h\n"
- "ldr q13, [x20, x7]\n"
- "ldr q6, [x16, #0x70]\n"
+ "fmin v30.8h, v30.8h, v14.8h\n"
+ "ldr q11, [x21, x7]\n"
+ "ldr q12, [x20, x7]\n"
+ "fmla v23.8h, v6.8h, v16.8h\n"
"fmax v27.8h, v27.8h, v15.8h\n"
- "fmax v26.8h, v26.8h, v15.8h\n"
- "fmax v25.8h, v25.8h, v15.8h\n"
- "ldr x24, [x17, #0x20]\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "ldr q9, [x21, x7]\n"
- "ldr q10, [x20, x7]\n"
+ "ldr q13, [x24, x7]\n"
+ "ldr q6, [x16, #0x70]\n"
"fmin v28.8h, v28.8h, v14.8h\n"
- "fmin v27.8h, v27.8h, v14.8h\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q11, [x21, x7]\n"
"fmin v26.8h, v26.8h, v14.8h\n"
- "fmin v25.8h, v25.8h, v14.8h\n"
- "ldr q12, [x20, x7]\n"
+ "str q30, [x11, x13]\n"
+ "ldr x20, [x17, #0x20]\n"
+ "fmax v25.8h, v25.8h, v15.8h\n"
"fmax v24.8h, v24.8h, v15.8h\n"
- "fmax v23.8h, v23.8h, v15.8h\n"
- "str q28, [x9, x13]\n"
+ "fmin v27.8h, v27.8h, v14.8h\n"
"fmax v21.8h, v21.8h, v15.8h\n"
- "fmax v22.8h, v22.8h, v15.8h\n"
- "str q27, [x28, x13]\n"
+ "str q29, [x10, x13]\n"
"ldr x23, [x17, #0x28]\n"
- "str q26, [x27, x13]\n"
+ "fmax v23.8h, v23.8h, v15.8h\n"
+ "str q28, [x28, x13]\n"
"ldr x22, [x17, #0x30]\n"
- "ldr x21, [x17, #0x38]\n"
"add x7, x7, #0x10\n"
- "str q25, [x24, x13]\n"
- "ldr x20, [x17, #0x40]\n"
+ "str q26, [x20, x13]\n"
+ "ldr x21, [x17, #0x40]\n"
"cmp x7, x8, LSL #4\n"
+ "fmin v25.8h, v25.8h, v14.8h\n"
+ "str q27, [x27, x13]\n"
+ "ldr x20, [x17, #0x38]\n"
"fmin v24.8h, v24.8h, v14.8h\n"
- "fmin v23.8h, v23.8h, v14.8h\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "add x14, x14, #0x10\n"
- "str q24, [x23, x13]\n"
- "fmin v22.8h, v22.8h, v14.8h\n"
- "str q23, [x22, x13]\n"
+ "fmin v23.8h, v23.8h, v14.8h\n"
"add x16, x16, #0xa0\n"
- "str q21, [x21, x13]\n"
- "str q22, [x20, x13]\n"
+ "str q25, [x23, x13]\n"
+ "str q24, [x22, x13]\n"
+ "str q21, [x20, x13]\n"
+ "str q23, [x21, x13]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v29.16b, v31.16b\n fmla v29.8h, v8.8h, v9.8h\n"
- "mov v28.16b, v31.16b\n fmla v28.8h, v7.8h, v9.8h\n"
- "ldr x23, [x15, #0x30]\n"
- "ldr x22, [x15, #0x38]\n"
- "mov v27.16b, v31.16b\n fmla v27.8h, v6.8h, v9.8h\n"
- "fmla v29.8h, v0.8h, v10.8h\n"
- "ldr x21, [x15, #0x28]\n"
- "ldr x20, [x15, #0x48]\n"
- "ldr q19, [x20, x14]\n"
- "fmla v28.8h, v4.8h, v13.8h\n"
- "mov v26.16b, v31.16b\n fmla v26.8h, v5.8h, v9.8h\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v8.8h, v9.8h\n"
+ "mov v29.16b, v31.16b\n fmla v29.8h, v7.8h, v9.8h\n"
+ "ldr x22, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
+ "mov v28.16b, v31.16b\n fmla v28.8h, v6.8h, v9.8h\n"
+ "mov v27.16b, v31.16b\n fmla v27.8h, v5.8h, v9.8h\n"
+ "ldr x26, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "mov v26.16b, v31.16b\n fmla v26.8h, v4.8h, v9.8h\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v3.8h, v9.8h\n"
"ldr x20, [x15, #0x40]\n"
- "mov v25.16b, v31.16b\n fmla v25.8h, v4.8h, v9.8h\n"
- "mov v24.16b, v31.16b\n fmla v24.8h, v3.8h, v9.8h\n"
"ldr x25, [x15, #0x50]\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v2.8h, v9.8h\n"
+ "mov v23.16b, v31.16b\n fmla v23.8h, v0.8h, v9.8h\n"
"ldr x24, [x15, #0x58]\n"
- "fmla v27.8h, v2.8h, v11.8h\n"
- "ldr q17, [x23, x14]\n"
- "mov v23.16b, v31.16b\n fmla v23.8h, v2.8h, v9.8h\n"
"ldr x23, [x15, #0x60]\n"
- "fmla v29.8h, v5.8h, v13.8h\n"
- "fmla v28.8h, v6.8h, v17.8h\n"
- "ldr x12, [x15, #0x70]\n"
- "ldr x11, [x15, #0x88]\n"
- "mov v22.16b, v31.16b\n fmla v22.8h, v0.8h, v9.8h\n"
- "fmla v27.8h, v3.8h, v13.8h\n"
- "ldr x10, [x17, #0x0]\n"
+ "fmla v30.8h, v0.8h, v10.8h\n"
+ "ldr q22, [x21, x14]\n"
+ "fmla v29.8h, v4.8h, v13.8h\n"
+ "ldr x12, [x15, #0x88]\n"
+ "fmla v28.8h, v2.8h, v11.8h\n"
+ "ldr q17, [x22, x14]\n"
+ "fmla v27.8h, v2.8h, v13.8h\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v26.8h, v1.8h, v13.8h\n"
+ "fmla v25.8h, v0.8h, v13.8h\n"
+ "ldr x11, [x17, #0x0]\n"
"add x13, x13, #0x10\n"
- "fmla v26.8h, v2.8h, v13.8h\n"
- "fmla v25.8h, v1.8h, v13.8h\n"
- "ldr x9, [x17, #0x8]\n"
- "ldr x28, [x17, #0x10]\n"
- "fmla v24.8h, v0.8h, v13.8h\n"
- "ldr q18, [x22, x14]\n"
- "fmla v23.8h, v6.8h, v12.8h\n"
- "ldr q16, [x21, x14]\n"
+ "fmla v24.8h, v6.8h, v12.8h\n"
+ "ldr q16, [x26, x14]\n"
"mov v21.16b, v31.16b\n fmla v21.8h, v1.8h, v9.8h\n"
- "fmla v29.8h, v7.8h, v17.8h\n"
- "ldr x22, [x15, #0x68]\n"
- "ldr x21, [x15, #0x78]\n"
- "fmla v28.8h, v0.8h, v18.8h\n"
- "fmla v22.8h, v8.8h, v16.8h\n"
+ "ldr x21, [x15, #0x68]\n"
+ "fmla v30.8h, v5.8h, v13.8h\n"
+ "fmla v29.8h, v6.8h, v17.8h\n"
+ "ldr x10, [x17, #0x8]\n"
+ "ldr x9, [x17, #0x10]\n"
+ "fmla v28.8h, v3.8h, v13.8h\n"
+ "ldr q18, [x27, x14]\n"
+ "fmla v27.8h, v4.8h, v17.8h\n"
+ "ldr x28, [x15, #0x78]\n"
+ "fmla v23.8h, v8.8h, v16.8h\n"
"ldr q16, [x20, x14]\n"
+ "fmla v26.8h, v3.8h, v17.8h\n"
"ldr x20, [x15, #0x80]\n"
- "fmla v26.8h, v4.8h, v17.8h\n"
- "fmla v25.8h, v3.8h, v17.8h\n"
- "ldr x27, [x17, #0x18]\n"
"fmla v21.8h, v0.8h, v17.8h\n"
- "fmla v24.8h, v4.8h, v19.8h\n"
- "fmla v23.8h, v1.8h, v17.8h\n"
+ "fmla v25.8h, v4.8h, v22.8h\n"
+ "ldr x27, [x17, #0x18]\n"
+ "fmla v30.8h, v7.8h, v17.8h\n"
+ "fmla v29.8h, v0.8h, v18.8h\n"
+ "fmla v24.8h, v1.8h, v17.8h\n"
"ldr q17, [x25, x14]\n"
- "fmla v29.8h, v1.8h, v18.8h\n"
+ "fmla v28.8h, v1.8h, v16.8h\n"
+ "ldr x26, [x15, #0x90]\n"
+ "fmla v26.8h, v5.8h, v22.8h\n"
+ "fmla v23.8h, v1.8h, v22.8h\n"
+ "fmla v21.8h, v2.8h, v22.8h\n"
+ "fmla v30.8h, v1.8h, v18.8h\n"
"ldr q20, [x24, x14]\n"
- "fmla v28.8h, v2.8h, v16.8h\n"
- "fmla v27.8h, v1.8h, v16.8h\n"
+ "ldr x25, [x15, #0x98]\n"
+ "fmla v29.8h, v2.8h, v16.8h\n"
"ldr q16, [x23, x14]\n"
- "ldr x26, [x15, #0x90]\n"
- "fmla v25.8h, v5.8h, v19.8h\n"
- "fmla v21.8h, v2.8h, v19.8h\n"
- "ldr x25, [x15, #0xa0]\n"
- "ldr x24, [x15, #0x98]\n"
- "fmla v26.8h, v0.8h, v17.8h\n"
- "fmla v24.8h, v2.8h, v20.8h\n"
- "fmla v28.8h, v8.8h, v19.8h\n"
- "fmla v27.8h, v7.8h, v19.8h\n"
- "fmla v22.8h, v1.8h, v19.8h\n"
+ "fmla v27.8h, v0.8h, v17.8h\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v28.8h, v7.8h, v22.8h\n"
+ "fmla v25.8h, v2.8h, v20.8h\n"
+ "fmla v24.8h, v3.8h, v16.8h\n"
+ "fmla v30.8h, v3.8h, v17.8h\n"
"ldr q19, [x22, x14]\n"
- "fmla v23.8h, v3.8h, v16.8h\n"
- "ldr x23, [x15, #0xa8]\n"
- "fmla v26.8h, v6.8h, v16.8h\n"
+ "ldr x23, [x15, #0xb0]\n"
+ "fmla v29.8h, v8.8h, v22.8h\n"
+ "ldr q17, [x21, x14]\n"
+ "ldr x22, [x15, #0xa8]\n"
+ "fmla v27.8h, v6.8h, v16.8h\n"
"ldr q18, [x20, x14]\n"
- "fmla v25.8h, v7.8h, v19.8h\n"
- "ldr x22, [x15, #0xc0]\n"
- "fmla v24.8h, v6.8h, v19.8h\n"
- "fmla v21.8h, v4.8h, v19.8h\n"
- "fmla v29.8h, v3.8h, v17.8h\n"
- "ldr q17, [x12, x14]\n"
- "fmla v27.8h, v5.8h, v20.8h\n"
- "ldr q16, [x21, x14]\n"
- "fmla v23.8h, v5.8h, v19.8h\n"
- "fmla v22.8h, v3.8h, v19.8h\n"
- "ldr x21, [x15, #0xb0]\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v28.8h, v5.8h, v20.8h\n"
+ "ldr q16, [x28, x14]\n"
"ldr x20, [x15, #0xb8]\n"
- "fmla v26.8h, v8.8h, v19.8h\n"
- "fmla v24.8h, v8.8h, v17.8h\n"
+ "fmla v26.8h, v7.8h, v17.8h\n"
+ "fmla v25.8h, v6.8h, v17.8h\n"
+ "fmla v21.8h, v4.8h, v17.8h\n"
+ "fmla v24.8h, v5.8h, v17.8h\n"
+ "fmla v23.8h, v3.8h, v17.8h\n"
+ "fmla v27.8h, v8.8h, v17.8h\n"
+ "fmla v29.8h, v3.8h, v18.8h\n"
+ "fmla v30.8h, v4.8h, v18.8h\n"
+ "fmla v25.8h, v8.8h, v19.8h\n"
+ "fmla v26.8h, v0.8h, v18.8h\n"
"fmla v21.8h, v6.8h, v16.8h\n"
- "fmla v28.8h, v3.8h, v18.8h\n"
- "fmla v25.8h, v0.8h, v18.8h\n"
- "fmla v22.8h, v5.8h, v17.8h\n"
- "ldr q17, [x11, x14]\n"
- "fmla v23.8h, v7.8h, v16.8h\n"
- "ldr q16, [x26, x14]\n"
- "fmla v29.8h, v4.8h, v18.8h\n"
- "fmla v26.8h, v1.8h, v18.8h\n"
- "ldr q18, [x24, x14]\n"
- "fmla v28.8h, v5.8h, v17.8h\n"
- "fmla v27.8h, v4.8h, v17.8h\n"
- "fmla v25.8h, v2.8h, v17.8h\n"
- "fmla v24.8h, v1.8h, v17.8h\n"
- "ldr q17, [x25, x14]\n"
- "fmla v21.8h, v8.8h, v16.8h\n"
- "fmla v22.8h, v7.8h, v16.8h\n"
+ "fmla v24.8h, v7.8h, v16.8h\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v23.8h, v5.8h, v19.8h\n"
+ "ldr q16, [x12, x14]\n"
+ "fmla v27.8h, v1.8h, v18.8h\n"
+ "ldr q19, [x25, x14]\n"
+ "fmla v29.8h, v5.8h, v16.8h\n"
+ "fmla v28.8h, v4.8h, v16.8h\n"
+ "fmla v26.8h, v2.8h, v16.8h\n"
+ "fmla v25.8h, v1.8h, v16.8h\n"
+ "ldr q16, [x24, x14]\n"
+ "fmla v21.8h, v8.8h, v17.8h\n"
+ "fmla v24.8h, v4.8h, v19.8h\n"
+ "fmla v23.8h, v7.8h, v17.8h\n"
+ "ldr q18, [x22, x14]\n"
+ "fmla v27.8h, v7.8h, v19.8h\n"
+ "fmla v30.8h, v2.8h, v16.8h\n"
+ "fmla v29.8h, v1.8h, v16.8h\n"
+ "fmla v26.8h, v6.8h, v19.8h\n"
+ "fmla v28.8h, v0.8h, v16.8h\n"
+ "ldr q17, [x20, x14]\n"
+ "fmla v21.8h, v3.8h, v19.8h\n"
"ldr q16, [x23, x14]\n"
- "fmla v29.8h, v2.8h, v17.8h\n"
- "fmla v26.8h, v7.8h, v18.8h\n"
- "fmla v25.8h, v6.8h, v18.8h\n"
+ "fmla v25.8h, v7.8h, v18.8h\n"
"fmla v23.8h, v4.8h, v18.8h\n"
- "fmla v21.8h, v3.8h, v18.8h\n"
- "ldr q18, [x21, x14]\n"
- "fmla v22.8h, v4.8h, v16.8h\n"
- "fmla v28.8h, v1.8h, v17.8h\n"
- "fmax v28.8h, v28.8h, v15.8h\n"
- "fmla v27.8h, v0.8h, v17.8h\n"
- "ldr q17, [x20, x14]\n"
- "fmla v29.8h, v6.8h, v18.8h\n"
"fmax v29.8h, v29.8h, v15.8h\n"
- "fmla v24.8h, v7.8h, v16.8h\n"
- "fmla v21.8h, v5.8h, v16.8h\n"
+ "fmla v30.8h, v6.8h, v16.8h\n"
+ "fmla v24.8h, v0.8h, v16.8h\n"
+ "fmla v26.8h, v8.8h, v18.8h\n"
+ "fmla v27.8h, v3.8h, v16.8h\n"
+ "fmla v28.8h, v8.8h, v17.8h\n"
+ "fmla v21.8h, v5.8h, v18.8h\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v25.8h, v5.8h, v17.8h\n"
"fmin v29.8h, v29.8h, v14.8h\n"
- "str q29, [x10, x13]\n"
- "fmla v23.8h, v0.8h, v18.8h\n"
- "fmla v22.8h, v2.8h, v17.8h\n"
- "ldr x20, [x17, #0x20]\n"
- "fmin v28.8h, v28.8h, v14.8h\n"
- "fmla v25.8h, v8.8h, v16.8h\n"
- "ldr q16, [x22, x14]\n"
- "fmla v26.8h, v3.8h, v18.8h\n"
+ "fmla v23.8h, v2.8h, v17.8h\n"
+ "fmax v30.8h, v30.8h, v15.8h\n"
+ "add x14, x14, #0x10\n"
"fmax v26.8h, v26.8h, v15.8h\n"
- "fmla v27.8h, v8.8h, v17.8h\n"
- "fmla v24.8h, v5.8h, v17.8h\n"
+ "fmla v24.8h, v8.8h, v16.8h\n"
+ "fmax v28.8h, v28.8h, v15.8h\n"
"fmax v27.8h, v27.8h, v15.8h\n"
- "str q28, [x9, x13]\n"
- "fmla v23.8h, v8.8h, v16.8h\n"
+ "str q29, [x10, x13]\n"
+ "ldr x23, [x17, #0x28]\n"
"fmla v21.8h, v7.8h, v16.8h\n"
+ "fmin v30.8h, v30.8h, v14.8h\n"
+ "fmla v23.8h, v6.8h, v16.8h\n"
+ "fmin v26.8h, v26.8h, v14.8h\n"
"fmax v25.8h, v25.8h, v15.8h\n"
- "ldr x23, [x17, #0x28]\n"
- "fmla v22.8h, v6.8h, v16.8h\n"
+ "fmin v28.8h, v28.8h, v14.8h\n"
"fmin v27.8h, v27.8h, v14.8h\n"
- "str q27, [x28, x13]\n"
- "ldr x22, [x17, #0x30]\n"
- "fmin v26.8h, v26.8h, v14.8h\n"
- "fmin v25.8h, v25.8h, v14.8h\n"
- "str q26, [x27, x13]\n"
- "ldr x21, [x17, #0x38]\n"
+ "str q30, [x11, x13]\n"
+ "ldr x20, [x17, #0x20]\n"
"fmax v24.8h, v24.8h, v15.8h\n"
+ "fmax v21.8h, v21.8h, v15.8h\n"
"fmax v23.8h, v23.8h, v15.8h\n"
- "str q25, [x20, x13]\n"
+ "fmin v25.8h, v25.8h, v14.8h\n"
+ "str q28, [x9, x13]\n"
+ "ldr x22, [x17, #0x30]\n"
+ "str q27, [x27, x13]\n"
+ "ldr x21, [x17, #0x38]\n"
+ "str q26, [x20, x13]\n"
"ldr x20, [x17, #0x40]\n"
- "fmax v21.8h, v21.8h, v15.8h\n"
- "fmax v22.8h, v22.8h, v15.8h\n"
- "add x14, x14, #0x10\n"
"fmin v24.8h, v24.8h, v14.8h\n"
- "fmin v23.8h, v23.8h, v14.8h\n"
- "str q24, [x23, x13]\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "fmin v22.8h, v22.8h, v14.8h\n"
- "str q23, [x22, x13]\n"
+ "fmin v23.8h, v23.8h, v14.8h\n"
+ "str q25, [x23, x13]\n"
+ "str q24, [x22, x13]\n"
"str q21, [x21, x13]\n"
- "str q22, [x20, x13]\n"
+ "str q23, [x20, x13]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 92f\n"
@@ -478,13 +478,13 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr q8, [x16, #0x90]\n"
"ldr x24, [x15, #0x0]\n"
"ldr x23, [x15, #0x8]\n"
- "add x24, x24, x14\n"
- "add x23, x23, x14\n"
"ldr x22, [x15, #0x10]\n"
"ldr x21, [x15, #0x18]\n"
+ "ldr x20, [x15, #0x20]\n"
+ "add x24, x24, x14\n"
+ "add x23, x23, x14\n"
"add x22, x22, x14\n"
"add x21, x21, x14\n"
- "ldr x20, [x15, #0x20]\n"
"add x20, x20, x14\n"
"tbz %x[n_channels], #2, 5f\n"
"ld1 { v9.d }[0], [x24], #0x8\n"
@@ -537,23 +537,23 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"mov v23.16b, v31.16b\n fmla v23.8h, v8.8h, v9.8h\n"
"mov v25.16b, v31.16b\n fmla v25.8h, v6.8h, v9.8h\n"
"ldr x20, [x15, #0x28]\n"
- "add x20, x20, x14\n"
"mov v24.16b, v31.16b\n fmla v24.8h, v7.8h, v9.8h\n"
"mov v26.16b, v31.16b\n fmla v26.8h, v5.8h, v9.8h\n"
"mov v27.16b, v31.16b\n fmla v27.8h, v4.8h, v9.8h\n"
"mov v28.16b, v31.16b\n fmla v28.8h, v3.8h, v9.8h\n"
"mov v29.16b, v31.16b\n fmla v29.8h, v2.8h, v9.8h\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v1.8h, v9.8h\n"
+ "add x20, x20, x14\n"
"fmla v23.8h, v0.8h, v10.8h\n"
"fmla v25.8h, v2.8h, v11.8h\n"
- "mov v30.16b, v31.16b\n fmla v30.8h, v1.8h, v9.8h\n"
"fmla v31.8h, v0.8h, v9.8h\n"
- "fmla v29.8h, v6.8h, v12.8h\n"
- "fmla v23.8h, v5.8h, v13.8h\n"
"fmla v24.8h, v4.8h, v13.8h\n"
- "fmla v25.8h, v3.8h, v13.8h\n"
"fmla v26.8h, v2.8h, v13.8h\n"
"fmla v27.8h, v1.8h, v13.8h\n"
+ "fmla v29.8h, v6.8h, v12.8h\n"
"fmla v28.8h, v0.8h, v13.8h\n"
+ "fmla v23.8h, v5.8h, v13.8h\n"
+ "fmla v25.8h, v3.8h, v13.8h\n"
"tbz %x[n_channels], #2, 9f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
@@ -600,11 +600,11 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x38]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
"fmla v24.8h, v6.8h, v11.8h\n"
- "add x20, x20, x14\n"
"fmla v26.8h, v4.8h, v11.8h\n"
"fmla v27.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v11.8h\n"
"fmla v30.8h, v0.8h, v11.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 17f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
@@ -676,11 +676,11 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x50]\n"
"fmla v24.8h, v8.8h, v10.8h\n"
"fmla v25.8h, v7.8h, v10.8h\n"
- "add x20, x20, x14\n"
"fmla v27.8h, v5.8h, v10.8h\n"
"fmla v28.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
"fmla v31.8h, v1.8h, v10.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 29f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
@@ -776,11 +776,11 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x70]\n"
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "add x20, x20, x14\n"
"fmla v28.8h, v6.8h, v10.8h\n"
"fmla v29.8h, v5.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v10.8h\n"
"fmla v31.8h, v3.8h, v10.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 45f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
@@ -852,9 +852,9 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x88]\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v24.8h, v3.8h, v12.8h\n"
- "add x20, x20, x14\n"
"fmla v26.8h, v1.8h, v12.8h\n"
"fmla v27.8h, v0.8h, v12.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 57f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
@@ -878,9 +878,9 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x90]\n"
"fmla v24.8h, v5.8h, v11.8h\n"
"fmla v25.8h, v4.8h, v11.8h\n"
- "add x20, x20, x14\n"
"fmla v27.8h, v2.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 61f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 60f\n"
@@ -928,9 +928,9 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xa0]\n"
"fmla v26.8h, v7.8h, v12.8h\n"
"fmla v27.8h, v6.8h, v12.8h\n"
- "add x20, x20, x14\n"
"fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v12.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 69f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 68f\n"
@@ -954,8 +954,8 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xa8]\n"
"fmla v23.8h, v2.8h, v11.8h\n"
"fmla v24.8h, v1.8h, v11.8h\n"
- "add x20, x20, x14\n"
"fmla v25.8h, v0.8h, v11.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 73f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
@@ -979,9 +979,9 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xb0]\n"
"fmla v27.8h, v8.8h, v13.8h\n"
"fmla v28.8h, v7.8h, v13.8h\n"
- "add x20, x20, x14\n"
"fmla v30.8h, v5.8h, v13.8h\n"
"fmla v31.8h, v4.8h, v13.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 77f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
@@ -1005,8 +1005,8 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xb8]\n"
"fmla v23.8h, v6.8h, v12.8h\n"
"fmla v26.8h, v3.8h, v12.8h\n"
- "add x20, x20, x14\n"
"fmla v29.8h, v0.8h, v12.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 81f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 80f\n"
@@ -1030,8 +1030,8 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xc0]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
- "add x20, x20, x14\n"
"fmla v31.8h, v2.8h, v11.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 85f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 84f\n"
@@ -1075,206 +1075,206 @@ void a64_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"fmin v31.8h, v31.8h, v14.8h\n"
"tbz %x[n_channels], #2, 89f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.d }[0], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.d }[0], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.d }[0], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.d }[0], [x23]\n"
"st1 { v25.d }[0], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.d }[0], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.d }[0], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
- "add x13, x13, #0x8\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.d }[0], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.d }[0], [x22]\n"
+ "add x20, x20, x13\n"
+ "add x13, x13, #0x8\n"
"st1 { v30.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #1, 88f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.s }[2], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.s }[2], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.s }[2], [x23]\n"
"st1 { v25.s }[2], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.s }[2], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.s }[2], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
- "add x13, x13, #0x4\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.s }[2], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.s }[2], [x22]\n"
+ "add x20, x20, x13\n"
+ "add x13, x13, #0x4\n"
"st1 { v30.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"tbz %x[n_channels], #0, 91f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.h }[6], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.h }[6], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.h }[6], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.h }[6], [x23]\n"
"st1 { v25.h }[6], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.h }[6], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.h }[6], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.h }[6], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.h }[6], [x22]\n"
+ "add x20, x20, x13\n"
"st1 { v30.h }[6], [x21]\n"
"st1 { v31.h }[6], [x20]\n"
"b 91f\n"
"88:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 91f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.h }[4], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.h }[4], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.h }[4], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.h }[4], [x23]\n"
"st1 { v25.h }[4], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.h }[4], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.h }[4], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.h }[4], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.h }[4], [x22]\n"
+ "add x20, x20, x13\n"
"st1 { v30.h }[4], [x21]\n"
"st1 { v31.h }[4], [x20]\n"
"b 91f\n"
"89:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 90f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.s }[0], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.s }[0], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.s }[0], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.s }[0], [x23]\n"
"st1 { v25.s }[0], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.s }[0], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.s }[0], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
- "add x13, x13, #0x4\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.s }[0], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.s }[0], [x22]\n"
+ "add x20, x20, x13\n"
+ "add x13, x13, #0x4\n"
"st1 { v30.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"tbz %x[n_channels], #0, 91f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.h }[2], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.h }[2], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.h }[2], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.h }[2], [x23]\n"
"st1 { v25.h }[2], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.h }[2], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.h }[2], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.h }[2], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.h }[2], [x22]\n"
+ "add x20, x20, x13\n"
"st1 { v30.h }[2], [x21]\n"
"st1 { v31.h }[2], [x20]\n"
"b 91f\n"
"90:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.h }[0], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.h }[0], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.h }[0], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.h }[0], [x23]\n"
"st1 { v25.h }[0], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.h }[0], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.h }[0], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.h }[0], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.h }[0], [x22]\n"
+ "add x20, x20, x13\n"
"st1 { v30.h }[0], [x21]\n"
"st1 { v31.h }[0], [x20]\n"
"91:" // Oddments: Store: Bit 2: End
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index a1e1dd0e99..e88bdcc5be 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,56 +87,56 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
+ "mov x28, #0x0\n"
"mov x27, #0x0\n"
- "mov x26, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
- "mov x23, #0x4\n"
- "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x27, x24\n" // offset = tile_i * ld_input_row
+ "str x28, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x4\n"
+ "mov x21, #0x4\n"
+ "str x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x3, #0x10\n" // cntb _, ALL, #1
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
"ldr x5, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x27, x22\n" // offset = tile_i * ld_output_row
- "mov x6, #0x10\n" // cntb _, ALL, #1
- "madd x21, x26, x4, x21\n" // offset += tile_j * ld_input_col
- "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x4, x4, #0x1\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x26, x5, x20\n" // offset += tile_j * ld_output_col
- "lsl x5, x5, #0x1\n"
- "add x17, x4, x4\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x7, x7, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x15, x7, x24, LSL #1\n"
- "mul x20, x20, x23\n" // offset *= output_tile_size
- "add x14, x15, x24, LSL #1\n"
- "add x8, x8, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "lsr x13, %x[n_channels], #0x3\n"
- "add x12, x14, x24, LSL #1\n"
- "add x11, x17, x4\n"
- "add x10, x8, x22, LSL #1\n"
- "add x9, x12, x24, LSL #1\n"
- "add x28, x11, x4\n"
- "add x27, x10, x22, LSL #1\n"
- "add x23, x5, x5\n"
+ "lsr x6, %x[n_channels], #0x3\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v13.8h }, [x20]\n"
+ "ld1r { v14.8h }, [x20]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x8, #0x0\n"
"ld1r { v15.8h }, [x20]\n"
- "add x26, x9, x24, LSL #1\n"
- "add x25, x28, x4\n"
- "add x24, x27, x22, LSL #1\n"
- "add x22, x23, x5\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x6\n"
- "cbz x13, 4f\n"
- "ldr q14, [x16, #0x0]\n"
+ "mul x24, x28, x25\n" // offset = tile_i * ld_input_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x23, XZR, x3\n"
+ "mul x22, x28, x2\n" // offset = tile_i * ld_output_row
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x24, x27, x4, x24\n" // offset += tile_j * ld_input_col
+ "lsl x4, x4, #0x1\n"
+ "madd x22, x27, x5, x22\n" // offset += tile_j * ld_output_col
+ "lsl x5, x5, #0x1\n"
+ "mul x24, x24, x26\n" // offset *= kernel_stride * output_size
+ "add x15, x4, x4\n"
+ "add x14, x15, x4\n"
+ "add x13, x14, x4\n"
+ "mul x22, x22, x21\n" // offset *= output_tile_size
+ "add x21, x5, x5\n"
+ "add x12, x13, x4\n"
+ "add x20, x21, x5\n"
+ "add x7, x7, x24, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x11, x7, x25, LSL #1\n"
+ "add x10, x11, x25, LSL #1\n"
+ "add x17, x17, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x9, x10, x25, LSL #1\n"
+ "add x28, x17, x2, LSL #1\n"
+ "add x27, x9, x25, LSL #1\n"
+ "add x26, x28, x2, LSL #1\n"
+ "add x25, x27, x25, LSL #1\n"
+ "add x24, x26, x2, LSL #1\n"
+ "cbz x6, 4f\n"
+ "ldr q13, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "cmp x6, x13, LSL #4\n"
+ "cmp x3, x6, LSL #4\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
"ldr q3, [x16, #0x40]\n"
@@ -146,512 +146,512 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ldr q7, [x16, #0x80]\n"
"ldr q8, [x16, #0x90]\n"
"add x16, x16, #0xa0\n"
- "ldr q9, [x14, x17]\n"
+ "ldr q9, [x10, x15]\n"
"ld1 { v10.8h }, [x7]\n"
- "ldr q11, [x7, x25]\n"
- "ldr q12, [x14, x11]\n"
+ "ldr q11, [x7, x12]\n"
+ "ldr q12, [x10, x14]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v26.16b, v14.16b\n fmla v26.8h, v4.8h, v9.8h\n"
- "mov v28.16b, v14.16b\n fmla v28.8h, v8.8h, v9.8h\n"
- "add x6, x6, #0x10\n"
- "cmp x6, x13, LSL #4\n"
- "mov v16.16b, v14.16b\n fmla v16.8h, v3.8h, v9.8h\n"
- "mov v22.16b, v14.16b\n fmla v22.8h, v1.8h, v9.8h\n"
- "add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
- "mov v23.16b, v14.16b\n fmla v23.8h, v0.8h, v9.8h\n"
- "fmla v26.8h, v5.8h, v12.8h\n"
- "mov v25.16b, v14.16b\n fmla v25.8h, v7.8h, v9.8h\n"
- "mov v17.16b, v14.16b\n fmla v17.8h, v6.8h, v9.8h\n"
- "mov v31.16b, v14.16b\n fmla v31.8h, v5.8h, v9.8h\n"
- "mov v20.16b, v14.16b\n fmla v20.8h, v2.8h, v9.8h\n"
- "ldr q9, [x12, x17]\n"
- "fmla v28.8h, v0.8h, v10.8h\n"
- "ld1 { v30.8h }, [x26]\n"
- "mov v29.16b, v14.16b\n fmla v29.8h, v2.8h, v11.8h\n"
- "ldr q27, [x26, x25]\n"
- "fmla v16.8h, v4.8h, v12.8h\n"
- "fmla v22.8h, v2.8h, v12.8h\n"
- "fmla v23.8h, v1.8h, v12.8h\n"
- "mov v21.16b, v14.16b\n fmla v21.8h, v6.8h, v30.8h\n"
- "ldr q10, [x12, x11]\n"
- "fmla v26.8h, v7.8h, v9.8h\n"
- "fmla v25.8h, v8.8h, v12.8h\n"
- "fmla v17.8h, v7.8h, v12.8h\n"
- "fmla v29.8h, v6.8h, v12.8h\n"
- "mov v24.16b, v14.16b\n fmla v24.8h, v3.8h, v12.8h\n"
- "mov v19.16b, v14.16b\n fmla v19.8h, v0.8h, v12.8h\n"
- "ldr q11, [x7, x4]\n"
- "mov v30.16b, v14.16b\n fmla v30.8h, v8.8h, v27.8h\n"
- "ldr q12, [x7, x28]\n"
- "fmla v16.8h, v6.8h, v9.8h\n"
- "fmla v22.8h, v4.8h, v9.8h\n"
- "fmla v23.8h, v3.8h, v9.8h\n"
- "mov v27.16b, v14.16b\n fmla v27.8h, v1.8h, v9.8h\n"
- "mov v18.16b, v14.16b\n fmla v18.8h, v0.8h, v9.8h\n"
- "ldr q14, [x16, #0x0]\n"
- "fmla v31.8h, v8.8h, v9.8h\n"
- "fmla v20.8h, v5.8h, v9.8h\n"
- "fmla v21.8h, v2.8h, v9.8h\n"
- "ld1 { v9.8h }, [x15]\n"
- "fmla v26.8h, v8.8h, v10.8h\n"
- "fmla v28.8h, v1.8h, v11.8h\n"
- "fmla v25.8h, v0.8h, v11.8h\n"
- "ldr q11, [x15, x25]\n"
- "fmla v17.8h, v2.8h, v12.8h\n"
- "fmla v29.8h, v1.8h, v12.8h\n"
- "ld1 { v12.8h }, [x9]\n"
- "fmla v16.8h, v7.8h, v10.8h\n"
- "fmla v24.8h, v6.8h, v10.8h\n"
- "fmla v22.8h, v5.8h, v10.8h\n"
- "fmla v23.8h, v4.8h, v10.8h\n"
- "fmla v19.8h, v3.8h, v10.8h\n"
- "fmla v27.8h, v2.8h, v10.8h\n"
- "fmla v18.8h, v1.8h, v10.8h\n"
- "fmla v30.8h, v0.8h, v10.8h\n"
- "ldr q10, [x15, x17]\n"
- "fmla v31.8h, v0.8h, v9.8h\n"
- "fmla v20.8h, v6.8h, v12.8h\n"
- "fmla v21.8h, v3.8h, v12.8h\n"
- "ldr q12, [x9, x25]\n"
- "fmla v26.8h, v1.8h, v10.8h\n"
- "fmla v28.8h, v3.8h, v9.8h\n"
- "fmla v29.8h, v5.8h, v11.8h\n"
- "fmla v24.8h, v2.8h, v11.8h\n"
- "ldr q11, [x15, x11]\n"
- "fmla v25.8h, v4.8h, v10.8h\n"
- "fmla v17.8h, v3.8h, v10.8h\n"
- "fmla v16.8h, v0.8h, v10.8h\n"
- "fmla v19.8h, v8.8h, v12.8h\n"
- "fmla v30.8h, v5.8h, v12.8h\n"
- "ldr q9, [x26, x4]\n"
- "fmla v31.8h, v2.8h, v10.8h\n"
- "fmla v26.8h, v2.8h, v11.8h\n"
- "fmla v28.8h, v5.8h, v10.8h\n"
- "ldr q10, [x14, x4]\n"
- "fmla v25.8h, v5.8h, v11.8h\n"
- "fmla v17.8h, v4.8h, v11.8h\n"
- "fmla v29.8h, v3.8h, v11.8h\n"
- "fmla v16.8h, v1.8h, v11.8h\n"
- "fmla v24.8h, v0.8h, v11.8h\n"
- "ldr q11, [x14, x28]\n"
- "fmla v21.8h, v7.8h, v9.8h\n"
- "fmla v27.8h, v6.8h, v9.8h\n"
- "ldr q12, [x26, x28]\n"
- "fmla v31.8h, v4.8h, v10.8h\n"
- "fmla v26.8h, v3.8h, v10.8h\n"
- "fmla v20.8h, v1.8h, v10.8h\n"
- "fmla v22.8h, v0.8h, v10.8h\n"
- "fmla v28.8h, v7.8h, v10.8h\n"
- "fmla v25.8h, v6.8h, v10.8h\n"
- "ldr q10, [x7, x17]\n"
- "fmla v18.8h, v8.8h, v12.8h\n"
- "fmla v30.8h, v7.8h, v12.8h\n"
- "ldr q9, [x12, x4]\n"
- "fmla v17.8h, v8.8h, v11.8h\n"
- "fmla v29.8h, v7.8h, v11.8h\n"
- "fmla v16.8h, v5.8h, v11.8h\n"
- "fmla v24.8h, v4.8h, v11.8h\n"
- "fmla v23.8h, v2.8h, v11.8h\n"
- "fmla v19.8h, v1.8h, v11.8h\n"
- "ldr q12, [x7, x11]\n"
- "add x7, x7, #0x10\n"
+ "mov v31.16b, v13.16b\n fmla v31.8h, v4.8h, v9.8h\n"
+ "mov v24.16b, v13.16b\n fmla v24.8h, v8.8h, v9.8h\n"
+ "add x3, x3, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "mov v23.16b, v13.16b\n fmla v23.8h, v3.8h, v9.8h\n"
+ "mov v28.16b, v13.16b\n fmla v28.8h, v1.8h, v9.8h\n"
+ "cmp x3, x6, LSL #4\n"
+ "add x8, x8, #0x10\n"
+ "mov v21.16b, v13.16b\n fmla v21.8h, v0.8h, v9.8h\n"
+ "mov v22.16b, v13.16b\n fmla v22.8h, v7.8h, v9.8h\n"
+ "mov v19.16b, v13.16b\n fmla v19.8h, v6.8h, v9.8h\n"
+ "mov v29.16b, v13.16b\n fmla v29.8h, v5.8h, v9.8h\n"
+ "fmla v31.8h, v5.8h, v12.8h\n"
+ "mov v30.16b, v13.16b\n fmla v30.8h, v2.8h, v9.8h\n"
+ "ldr q9, [x9, x15]\n"
+ "fmla v24.8h, v0.8h, v10.8h\n"
+ "ld1 { v26.8h }, [x25]\n"
+ "mov v18.16b, v13.16b\n fmla v18.8h, v2.8h, v11.8h\n"
+ "ldr q17, [x25, x12]\n"
+ "fmla v23.8h, v4.8h, v12.8h\n"
+ "fmla v28.8h, v2.8h, v12.8h\n"
+ "fmla v21.8h, v1.8h, v12.8h\n"
+ "fmla v22.8h, v8.8h, v12.8h\n"
+ "mov v20.16b, v13.16b\n fmla v20.8h, v6.8h, v26.8h\n"
+ "ldr q11, [x9, x14]\n"
"fmla v31.8h, v7.8h, v9.8h\n"
- "fmla v26.8h, v6.8h, v9.8h\n"
- "fmla v20.8h, v4.8h, v9.8h\n"
- "fmla v22.8h, v3.8h, v9.8h\n"
- "fmla v21.8h, v1.8h, v9.8h\n"
- "fmla v27.8h, v0.8h, v9.8h\n"
- "ldr q9, [x12, x28]\n"
- "fmla v28.8h, v2.8h, v10.8h\n"
- "fmla v25.8h, v1.8h, v10.8h\n"
- "fmla v17.8h, v0.8h, v10.8h\n"
- "ld1 { v10.8h }, [x14]\n"
- "fmla v18.8h, v2.8h, v9.8h\n"
- "fmla v29.8h, v0.8h, v12.8h\n"
- "fmla v31.8h, v3.8h, v10.8h\n"
- "fmla v20.8h, v0.8h, v10.8h\n"
- "fmla v16.8h, v8.8h, v9.8h\n"
- "fmla v24.8h, v7.8h, v9.8h\n"
- "fmla v23.8h, v5.8h, v9.8h\n"
- "fmla v19.8h, v4.8h, v9.8h\n"
- "fmla v30.8h, v1.8h, v9.8h\n"
- "ldr q11, [x9, x17]\n"
- "fmla v25.8h, v2.8h, v12.8h\n"
- "fmla v17.8h, v1.8h, v12.8h\n"
- "ldr q12, [x14, x25]\n"
- "add x14, x14, #0x10\n"
- "ldr q9, [x14, x17]\n"
- "fmla v28.8h, v6.8h, v10.8h\n"
- "ld1 { v10.8h }, [x12]\n"
- "fmla v27.8h, v4.8h, v11.8h\n"
- "fmla v18.8h, v3.8h, v11.8h\n"
- "fmla v29.8h, v8.8h, v12.8h\n"
- "fmla v24.8h, v5.8h, v12.8h\n"
+ "fmla v19.8h, v7.8h, v12.8h\n"
+ "fmla v18.8h, v6.8h, v12.8h\n"
+ "mov v26.16b, v13.16b\n fmla v26.8h, v3.8h, v12.8h\n"
+ "mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
+ "ldr q10, [x7, x4]\n"
+ "mov v16.16b, v13.16b\n fmla v16.8h, v8.8h, v17.8h\n"
+ "ldr q12, [x7, x13]\n"
+ "fmla v23.8h, v6.8h, v9.8h\n"
+ "fmla v28.8h, v4.8h, v9.8h\n"
+ "fmla v21.8h, v3.8h, v9.8h\n"
+ "mov v25.16b, v13.16b\n fmla v25.8h, v1.8h, v9.8h\n"
+ "mov v17.16b, v13.16b\n fmla v17.8h, v0.8h, v9.8h\n"
+ "ldr q13, [x16, #0x0]\n"
+ "fmla v29.8h, v8.8h, v9.8h\n"
+ "fmla v30.8h, v5.8h, v9.8h\n"
+ "fmla v20.8h, v2.8h, v9.8h\n"
+ "ld1 { v9.8h }, [x11]\n"
+ "fmla v31.8h, v8.8h, v11.8h\n"
+ "fmla v24.8h, v1.8h, v10.8h\n"
+ "fmla v22.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x11, x12]\n"
"fmla v19.8h, v2.8h, v12.8h\n"
- "ldr q12, [x12, x25]\n"
- "add x12, x12, #0x10\n"
- "fmla v31.8h, v6.8h, v10.8h\n"
- "fmla v20.8h, v3.8h, v10.8h\n"
- "fmla v21.8h, v0.8h, v10.8h\n"
- "ldr q10, [x26, x17]\n"
- "fmla v30.8h, v2.8h, v12.8h\n"
- "fmla v27.8h, v7.8h, v10.8h\n"
- "fmla v18.8h, v6.8h, v10.8h\n"
- "fmla v20.8h, v8.8h, v11.8h\n"
- "fmla v22.8h, v7.8h, v11.8h\n"
- "fmla v23.8h, v6.8h, v11.8h\n"
- "fmla v21.8h, v5.8h, v11.8h\n"
- "ldr q11, [x9, x11]\n"
- "fmla v19.8h, v5.8h, v12.8h\n"
- "fmla v27.8h, v5.8h, v11.8h\n"
- "fmla v18.8h, v4.8h, v11.8h\n"
- "fmla v30.8h, v3.8h, v11.8h\n"
- "fmla v24.8h, v8.8h, v12.8h\n"
- "ldr q12, [x26, x11]\n"
- "fmla v21.8h, v8.8h, v10.8h\n"
- "ldr q10, [x15, x4]\n"
- "fmla v22.8h, v8.8h, v11.8h\n"
+ "fmla v18.8h, v1.8h, v12.8h\n"
+ "ld1 { v12.8h }, [x27]\n"
"fmla v23.8h, v7.8h, v11.8h\n"
- "add x26, x26, #0x10\n"
- "fmla v19.8h, v6.8h, v11.8h\n"
- "ldr q11, [x15, x28]\n"
- "fmla v27.8h, v8.8h, v12.8h\n"
- "add x15, x15, #0x10\n"
- "fmla v18.8h, v7.8h, v12.8h\n"
+ "fmla v26.8h, v6.8h, v11.8h\n"
+ "fmla v28.8h, v5.8h, v11.8h\n"
+ "fmla v21.8h, v4.8h, v11.8h\n"
+ "fmla v27.8h, v3.8h, v11.8h\n"
+ "fmla v25.8h, v2.8h, v11.8h\n"
+ "fmla v17.8h, v1.8h, v11.8h\n"
+ "fmla v16.8h, v0.8h, v11.8h\n"
+ "ldr q11, [x11, x15]\n"
+ "fmla v29.8h, v0.8h, v9.8h\n"
"fmla v30.8h, v6.8h, v12.8h\n"
- "ldr q12, [x9, x4]\n"
- "fmla v28.8h, v4.8h, v10.8h\n"
- "fmla v25.8h, v3.8h, v10.8h\n"
- "fmax v28.8h, v28.8h, v13.8h\n"
- "fmla v31.8h, v1.8h, v10.8h\n"
+ "fmla v20.8h, v3.8h, v12.8h\n"
+ "ldr q12, [x27, x12]\n"
+ "fmla v24.8h, v3.8h, v9.8h\n"
+ "fmla v31.8h, v1.8h, v11.8h\n"
+ "fmla v18.8h, v5.8h, v10.8h\n"
+ "fmla v26.8h, v2.8h, v10.8h\n"
+ "ldr q10, [x11, x14]\n"
+ "fmla v22.8h, v4.8h, v11.8h\n"
+ "fmla v19.8h, v3.8h, v11.8h\n"
+ "fmla v23.8h, v0.8h, v11.8h\n"
+ "fmla v27.8h, v8.8h, v12.8h\n"
+ "fmla v16.8h, v5.8h, v12.8h\n"
+ "ldr q9, [x25, x4]\n"
+ "fmla v29.8h, v2.8h, v11.8h\n"
+ "fmla v31.8h, v2.8h, v10.8h\n"
+ "fmla v24.8h, v5.8h, v11.8h\n"
+ "ldr q12, [x10, x4]\n"
+ "fmla v22.8h, v5.8h, v10.8h\n"
+ "fmla v19.8h, v4.8h, v10.8h\n"
+ "fmla v18.8h, v3.8h, v10.8h\n"
+ "fmla v23.8h, v1.8h, v10.8h\n"
"fmla v26.8h, v0.8h, v10.8h\n"
- "ldr q10, [x9, x28]\n"
- "ldr q0, [x16, #0x10]\n"
- "fmla v17.8h, v5.8h, v11.8h\n"
- "fmla v29.8h, v4.8h, v11.8h\n"
- "fmax v25.8h, v25.8h, v13.8h\n"
+ "ldr q11, [x10, x13]\n"
+ "fmla v20.8h, v7.8h, v9.8h\n"
+ "fmla v25.8h, v6.8h, v9.8h\n"
+ "ldr q9, [x25, x13]\n"
+ "fmla v29.8h, v4.8h, v12.8h\n"
+ "fmla v31.8h, v3.8h, v12.8h\n"
+ "fmla v30.8h, v1.8h, v12.8h\n"
+ "fmla v28.8h, v0.8h, v12.8h\n"
+ "fmla v24.8h, v7.8h, v12.8h\n"
+ "fmla v22.8h, v6.8h, v12.8h\n"
+ "ldr q10, [x7, x15]\n"
+ "fmla v17.8h, v8.8h, v9.8h\n"
+ "fmla v16.8h, v7.8h, v9.8h\n"
+ "ldr q9, [x9, x4]\n"
+ "fmla v19.8h, v8.8h, v11.8h\n"
+ "fmla v18.8h, v7.8h, v11.8h\n"
+ "fmla v23.8h, v5.8h, v11.8h\n"
+ "fmla v26.8h, v4.8h, v11.8h\n"
+ "fmla v21.8h, v2.8h, v11.8h\n"
+ "fmla v27.8h, v1.8h, v11.8h\n"
+ "ldr q12, [x7, x14]\n"
+ "add x7, x7, #0x10\n"
+ "fmla v29.8h, v7.8h, v9.8h\n"
+ "fmla v31.8h, v6.8h, v9.8h\n"
+ "fmla v30.8h, v4.8h, v9.8h\n"
+ "fmla v28.8h, v3.8h, v9.8h\n"
+ "fmla v20.8h, v1.8h, v9.8h\n"
+ "fmla v25.8h, v0.8h, v9.8h\n"
+ "ldr q9, [x9, x13]\n"
+ "fmla v24.8h, v2.8h, v10.8h\n"
+ "fmla v22.8h, v1.8h, v10.8h\n"
+ "fmla v19.8h, v0.8h, v10.8h\n"
+ "ld1 { v10.8h }, [x10]\n"
+ "fmla v18.8h, v0.8h, v12.8h\n"
+ "fmla v17.8h, v2.8h, v9.8h\n"
+ "fmla v23.8h, v8.8h, v9.8h\n"
+ "fmla v26.8h, v7.8h, v9.8h\n"
+ "fmla v21.8h, v5.8h, v9.8h\n"
+ "fmla v29.8h, v3.8h, v10.8h\n"
+ "fmla v30.8h, v0.8h, v10.8h\n"
+ "fmla v27.8h, v4.8h, v9.8h\n"
+ "fmla v16.8h, v1.8h, v9.8h\n"
+ "ldr q11, [x27, x15]\n"
+ "fmla v22.8h, v2.8h, v12.8h\n"
+ "fmla v19.8h, v1.8h, v12.8h\n"
+ "ldr q12, [x10, x12]\n"
+ "add x10, x10, #0x10\n"
+ "ldr q9, [x10, x15]\n"
+ "fmla v24.8h, v6.8h, v10.8h\n"
+ "ld1 { v10.8h }, [x9]\n"
+ "fmla v25.8h, v4.8h, v11.8h\n"
+ "fmla v17.8h, v3.8h, v11.8h\n"
+ "fmla v28.8h, v7.8h, v11.8h\n"
+ "fmla v18.8h, v8.8h, v12.8h\n"
+ "fmla v26.8h, v5.8h, v12.8h\n"
+ "fmla v27.8h, v2.8h, v12.8h\n"
+ "ldr q12, [x9, x12]\n"
+ "fmla v29.8h, v6.8h, v10.8h\n"
"add x9, x9, #0x10\n"
- "fmla v16.8h, v2.8h, v11.8h\n"
+ "fmla v30.8h, v3.8h, v10.8h\n"
+ "fmla v20.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x25, x15]\n"
+ "fmla v21.8h, v6.8h, v11.8h\n"
+ "fmla v16.8h, v2.8h, v12.8h\n"
+ "fmla v26.8h, v8.8h, v12.8h\n"
+ "fmla v25.8h, v7.8h, v10.8h\n"
+ "fmla v17.8h, v6.8h, v10.8h\n"
+ "fmla v27.8h, v5.8h, v12.8h\n"
+ "ldr q12, [x25, x14]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v30.8h, v8.8h, v11.8h\n"
+ "fmla v20.8h, v5.8h, v11.8h\n"
+ "ldr q11, [x27, x14]\n"
+ "fmla v25.8h, v5.8h, v11.8h\n"
+ "fmla v17.8h, v4.8h, v11.8h\n"
+ "fmla v16.8h, v3.8h, v11.8h\n"
+ "fmla v28.8h, v8.8h, v11.8h\n"
+ "fmla v21.8h, v7.8h, v11.8h\n"
+ "fmla v27.8h, v6.8h, v11.8h\n"
+ "ldr q11, [x11, x13]\n"
+ "fmla v20.8h, v8.8h, v10.8h\n"
+ "ldr q10, [x11, x4]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v25.8h, v8.8h, v12.8h\n"
+ "fmla v17.8h, v7.8h, v12.8h\n"
+ "fmla v16.8h, v6.8h, v12.8h\n"
+ "ldr q12, [x27, x4]\n"
+ "fmla v19.8h, v5.8h, v11.8h\n"
+ "fmla v18.8h, v4.8h, v11.8h\n"
+ "fmla v24.8h, v4.8h, v10.8h\n"
+ "fmla v22.8h, v3.8h, v10.8h\n"
+ "fmla v29.8h, v1.8h, v10.8h\n"
+ "fmla v31.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x27, x13]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "fmla v23.8h, v2.8h, v11.8h\n"
"ldr q2, [x16, #0x30]\n"
- "fmla v24.8h, v1.8h, v11.8h\n"
- "ldr q11, [x7, x25]\n"
+ "fmla v26.8h, v1.8h, v11.8h\n"
+ "ldr q11, [x7, x12]\n"
"ldr q1, [x16, #0x20]\n"
- "fmla v20.8h, v7.8h, v12.8h\n"
- "fmla v22.8h, v6.8h, v12.8h\n"
+ "fmla v30.8h, v7.8h, v12.8h\n"
+ "fmla v28.8h, v6.8h, v12.8h\n"
"ldr q6, [x16, #0x70]\n"
- "fmla v21.8h, v4.8h, v12.8h\n"
- "fmla v27.8h, v3.8h, v12.8h\n"
- "ldr q12, [x14, x11]\n"
+ "fmla v20.8h, v4.8h, v12.8h\n"
+ "fmla v25.8h, v3.8h, v12.8h\n"
+ "ldr q12, [x10, x14]\n"
"ldr q3, [x16, #0x40]\n"
- "fmla v23.8h, v8.8h, v10.8h\n"
+ "fmla v21.8h, v8.8h, v10.8h\n"
"ldr q8, [x16, #0x90]\n"
- "fmla v19.8h, v7.8h, v10.8h\n"
+ "fmla v27.8h, v7.8h, v10.8h\n"
"ldr q7, [x16, #0x80]\n"
- "fmla v18.8h, v5.8h, v10.8h\n"
+ "fmla v17.8h, v5.8h, v10.8h\n"
"ldr q5, [x16, #0x60]\n"
- "fmla v30.8h, v4.8h, v10.8h\n"
+ "fmla v16.8h, v4.8h, v10.8h\n"
"ld1 { v10.8h }, [x7]\n"
"ldr q4, [x16, #0x50]\n"
- "fmax v17.8h, v17.8h, v13.8h\n"
- "fmax v29.8h, v29.8h, v13.8h\n"
+ "fmax v24.8h, v24.8h, v14.8h\n"
+ "fmax v22.8h, v22.8h, v14.8h\n"
+ "add x27, x27, #0x10\n"
+ "fmax v19.8h, v19.8h, v14.8h\n"
+ "fmax v18.8h, v18.8h, v14.8h\n"
"add x16, x16, #0xa0\n"
- "fmax v31.8h, v31.8h, v13.8h\n"
- "fmax v26.8h, v26.8h, v13.8h\n"
- "fmax v16.8h, v16.8h, v13.8h\n"
- "fmax v24.8h, v24.8h, v13.8h\n"
- "fmax v20.8h, v20.8h, v13.8h\n"
- "fmax v22.8h, v22.8h, v13.8h\n"
- "fmax v23.8h, v23.8h, v13.8h\n"
- "fmax v19.8h, v19.8h, v13.8h\n"
- "fmax v21.8h, v21.8h, v13.8h\n"
- "fmax v27.8h, v27.8h, v13.8h\n"
- "fmax v18.8h, v18.8h, v13.8h\n"
- "fmax v30.8h, v30.8h, v13.8h\n"
- "fmin v28.8h, v28.8h, v15.8h\n"
- "fmin v25.8h, v25.8h, v15.8h\n"
- "st1 { v28.8h }, [x8]\n"
- "fmin v17.8h, v17.8h, v15.8h\n"
- "fmin v29.8h, v29.8h, v15.8h\n"
- "str q25, [x8, x5]\n"
- "fmin v31.8h, v31.8h, v15.8h\n"
- "fmin v26.8h, v26.8h, v15.8h\n"
- "str q17, [x8, x23]\n"
- "fmin v16.8h, v16.8h, v15.8h\n"
+ "fmax v29.8h, v29.8h, v14.8h\n"
+ "fmax v31.8h, v31.8h, v14.8h\n"
+ "fmax v23.8h, v23.8h, v14.8h\n"
+ "fmax v26.8h, v26.8h, v14.8h\n"
+ "fmax v30.8h, v30.8h, v14.8h\n"
+ "fmax v28.8h, v28.8h, v14.8h\n"
+ "fmax v21.8h, v21.8h, v14.8h\n"
+ "fmax v27.8h, v27.8h, v14.8h\n"
+ "fmax v20.8h, v20.8h, v14.8h\n"
+ "fmax v25.8h, v25.8h, v14.8h\n"
+ "fmax v17.8h, v17.8h, v14.8h\n"
+ "fmax v16.8h, v16.8h, v14.8h\n"
"fmin v24.8h, v24.8h, v15.8h\n"
- "str q29, [x8, x22]\n"
- "add x8, x8, #0x10\n"
- "fmin v20.8h, v20.8h, v15.8h\n"
"fmin v22.8h, v22.8h, v15.8h\n"
- "st1 { v31.8h }, [x10]\n"
- "fmin v23.8h, v23.8h, v15.8h\n"
"fmin v19.8h, v19.8h, v15.8h\n"
- "str q26, [x10, x5]\n"
- "fmin v21.8h, v21.8h, v15.8h\n"
- "fmin v27.8h, v27.8h, v15.8h\n"
- "str q16, [x10, x23]\n"
"fmin v18.8h, v18.8h, v15.8h\n"
+ "fmin v29.8h, v29.8h, v15.8h\n"
+ "fmin v31.8h, v31.8h, v15.8h\n"
+ "fmin v23.8h, v23.8h, v15.8h\n"
+ "fmin v26.8h, v26.8h, v15.8h\n"
+ "st1 { v24.8h }, [x17]\n"
"fmin v30.8h, v30.8h, v15.8h\n"
- "str q24, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v20.8h }, [x27]\n"
- "str q22, [x27, x5]\n"
- "str q23, [x27, x23]\n"
- "str q19, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v21.8h }, [x24]\n"
- "str q27, [x24, x5]\n"
- "str q18, [x24, x23]\n"
- "str q30, [x24, x22]\n"
+ "fmin v28.8h, v28.8h, v15.8h\n"
+ "str q22, [x17, x5]\n"
+ "fmin v21.8h, v21.8h, v15.8h\n"
+ "fmin v27.8h, v27.8h, v15.8h\n"
+ "str q19, [x17, x21]\n"
+ "fmin v20.8h, v20.8h, v15.8h\n"
+ "fmin v25.8h, v25.8h, v15.8h\n"
+ "str q18, [x17, x20]\n"
+ "add x17, x17, #0x10\n"
+ "fmin v17.8h, v17.8h, v15.8h\n"
+ "fmin v16.8h, v16.8h, v15.8h\n"
+ "st1 { v29.8h }, [x28]\n"
+ "str q31, [x28, x5]\n"
+ "str q23, [x28, x21]\n"
+ "str q26, [x28, x20]\n"
+ "add x28, x28, #0x10\n"
+ "st1 { v30.8h }, [x26]\n"
+ "str q28, [x26, x5]\n"
+ "str q21, [x26, x21]\n"
+ "str q27, [x26, x20]\n"
+ "add x26, x26, #0x10\n"
+ "st1 { v20.8h }, [x24]\n"
+ "str q25, [x24, x5]\n"
+ "str q17, [x24, x21]\n"
+ "str q16, [x24, x20]\n"
"add x24, x24, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v16.16b, v14.16b\n fmla v16.8h, v4.8h, v9.8h\n"
- "mov v23.16b, v14.16b\n fmla v23.8h, v8.8h, v9.8h\n"
- "mov v31.16b, v14.16b\n fmla v31.8h, v3.8h, v9.8h\n"
- "mov v30.16b, v14.16b\n fmla v30.8h, v1.8h, v9.8h\n"
- "mov v18.16b, v14.16b\n fmla v18.8h, v0.8h, v9.8h\n"
- "fmla v16.8h, v5.8h, v12.8h\n"
- "mov v17.16b, v14.16b\n fmla v17.8h, v7.8h, v9.8h\n"
- "mov v19.16b, v14.16b\n fmla v19.8h, v6.8h, v9.8h\n"
- "mov v28.16b, v14.16b\n fmla v28.8h, v5.8h, v9.8h\n"
- "mov v27.16b, v14.16b\n fmla v27.8h, v2.8h, v9.8h\n"
- "ldr q24, [x12, x17]\n"
- "fmla v23.8h, v0.8h, v10.8h\n"
- "ld1 { v21.8h }, [x26]\n"
- "mov v29.16b, v14.16b\n fmla v29.8h, v2.8h, v11.8h\n"
- "ldr q20, [x26, x25]\n"
- "fmla v31.8h, v4.8h, v12.8h\n"
+ "mov v31.16b, v13.16b\n fmla v31.8h, v4.8h, v9.8h\n"
+ "mov v17.16b, v13.16b\n fmla v17.8h, v8.8h, v9.8h\n"
+ "mov v29.16b, v13.16b\n fmla v29.8h, v3.8h, v9.8h\n"
+ "mov v30.16b, v13.16b\n fmla v30.8h, v1.8h, v9.8h\n"
+ "mov v19.16b, v13.16b\n fmla v19.8h, v0.8h, v9.8h\n"
+ "mov v20.16b, v13.16b\n fmla v20.8h, v7.8h, v9.8h\n"
+ "mov v21.16b, v13.16b\n fmla v21.8h, v6.8h, v9.8h\n"
+ "mov v18.16b, v13.16b\n fmla v18.8h, v5.8h, v9.8h\n"
+ "fmla v31.8h, v5.8h, v12.8h\n"
+ "mov v27.16b, v13.16b\n fmla v27.8h, v2.8h, v9.8h\n"
+ "ldr q24, [x9, x15]\n"
+ "fmla v17.8h, v0.8h, v10.8h\n"
+ "ld1 { v22.8h }, [x25]\n"
+ "mov v10.16b, v13.16b\n fmla v10.8h, v2.8h, v11.8h\n"
+ "ldr q16, [x25, x12]\n"
+ "fmla v29.8h, v4.8h, v12.8h\n"
"fmla v30.8h, v2.8h, v12.8h\n"
- "fmla v18.8h, v1.8h, v12.8h\n"
- "mov v26.16b, v14.16b\n fmla v26.8h, v6.8h, v21.8h\n"
- "ldr q9, [x12, x11]\n"
- "fmla v16.8h, v7.8h, v24.8h\n"
- "fmla v17.8h, v8.8h, v12.8h\n"
- "fmla v19.8h, v7.8h, v12.8h\n"
- "fmla v29.8h, v6.8h, v12.8h\n"
- "mov v11.16b, v14.16b\n fmla v11.8h, v3.8h, v12.8h\n"
- "mov v10.16b, v14.16b\n fmla v10.8h, v0.8h, v12.8h\n"
- "ldr q22, [x7, x4]\n"
- "mov v25.16b, v14.16b\n fmla v25.8h, v8.8h, v20.8h\n"
- "ldr q21, [x7, x28]\n"
- "fmla v31.8h, v6.8h, v24.8h\n"
+ "fmla v19.8h, v1.8h, v12.8h\n"
+ "fmla v20.8h, v8.8h, v12.8h\n"
+ "mov v9.16b, v13.16b\n fmla v9.8h, v6.8h, v22.8h\n"
+ "ldr q22, [x9, x14]\n"
+ "fmla v31.8h, v7.8h, v24.8h\n"
+ "fmla v21.8h, v7.8h, v12.8h\n"
+ "fmla v10.8h, v6.8h, v12.8h\n"
+ "mov v28.16b, v13.16b\n fmla v28.8h, v3.8h, v12.8h\n"
+ "mov v11.16b, v13.16b\n fmla v11.8h, v0.8h, v12.8h\n"
+ "ldr q23, [x7, x4]\n"
+ "mov v26.16b, v13.16b\n fmla v26.8h, v8.8h, v16.8h\n"
+ "ldr q16, [x7, x13]\n"
+ "fmla v29.8h, v6.8h, v24.8h\n"
"fmla v30.8h, v4.8h, v24.8h\n"
- "fmla v18.8h, v3.8h, v24.8h\n"
- "mov v12.16b, v14.16b\n fmla v12.8h, v1.8h, v24.8h\n"
- "fmla v14.8h, v0.8h, v24.8h\n"
- "fmla v28.8h, v8.8h, v24.8h\n"
+ "fmla v19.8h, v3.8h, v24.8h\n"
+ "mov v12.16b, v13.16b\n fmla v12.8h, v1.8h, v24.8h\n"
+ "mov v25.16b, v13.16b\n fmla v25.8h, v0.8h, v24.8h\n"
+ "fmla v18.8h, v8.8h, v24.8h\n"
"fmla v27.8h, v5.8h, v24.8h\n"
- "fmla v26.8h, v2.8h, v24.8h\n"
- "ld1 { v24.8h }, [x15]\n"
- "fmla v16.8h, v8.8h, v9.8h\n"
- "fmla v23.8h, v1.8h, v22.8h\n"
- "fmla v17.8h, v0.8h, v22.8h\n"
- "ldr q22, [x15, x25]\n"
- "fmla v19.8h, v2.8h, v21.8h\n"
- "fmla v29.8h, v1.8h, v21.8h\n"
- "ld1 { v20.8h }, [x9]\n"
- "fmla v31.8h, v7.8h, v9.8h\n"
- "fmla v11.8h, v6.8h, v9.8h\n"
- "fmla v30.8h, v5.8h, v9.8h\n"
- "fmla v18.8h, v4.8h, v9.8h\n"
- "fmla v10.8h, v3.8h, v9.8h\n"
- "fmla v12.8h, v2.8h, v9.8h\n"
- "fmla v14.8h, v1.8h, v9.8h\n"
- "fmla v25.8h, v0.8h, v9.8h\n"
- "ldr q21, [x15, x17]\n"
- "fmla v28.8h, v0.8h, v24.8h\n"
- "fmla v27.8h, v6.8h, v20.8h\n"
- "fmla v26.8h, v3.8h, v20.8h\n"
- "ldr q20, [x9, x25]\n"
- "fmla v16.8h, v1.8h, v21.8h\n"
- "fmla v23.8h, v3.8h, v24.8h\n"
- "fmla v29.8h, v5.8h, v22.8h\n"
- "fmla v11.8h, v2.8h, v22.8h\n"
- "ldr q22, [x15, x11]\n"
- "fmla v17.8h, v4.8h, v21.8h\n"
- "fmla v19.8h, v3.8h, v21.8h\n"
- "fmla v31.8h, v0.8h, v21.8h\n"
- "fmla v10.8h, v8.8h, v20.8h\n"
- "fmla v25.8h, v5.8h, v20.8h\n"
- "ldr q20, [x26, x4]\n"
- "fmla v28.8h, v2.8h, v21.8h\n"
- "fmla v16.8h, v2.8h, v22.8h\n"
- "fmla v23.8h, v5.8h, v21.8h\n"
- "ldr q21, [x14, x4]\n"
- "fmla v17.8h, v5.8h, v22.8h\n"
+ "fmla v9.8h, v2.8h, v24.8h\n"
+ "ld1 { v24.8h }, [x11]\n"
+ "fmla v31.8h, v8.8h, v22.8h\n"
+ "fmla v17.8h, v1.8h, v23.8h\n"
+ "fmla v20.8h, v0.8h, v23.8h\n"
+ "ldr q23, [x11, x12]\n"
+ "fmla v21.8h, v2.8h, v16.8h\n"
+ "fmla v10.8h, v1.8h, v16.8h\n"
+ "ld1 { v16.8h }, [x27]\n"
+ "fmla v29.8h, v7.8h, v22.8h\n"
+ "fmla v28.8h, v6.8h, v22.8h\n"
+ "fmla v30.8h, v5.8h, v22.8h\n"
"fmla v19.8h, v4.8h, v22.8h\n"
- "fmla v29.8h, v3.8h, v22.8h\n"
+ "fmla v11.8h, v3.8h, v22.8h\n"
+ "fmla v12.8h, v2.8h, v22.8h\n"
+ "fmla v25.8h, v1.8h, v22.8h\n"
+ "fmla v26.8h, v0.8h, v22.8h\n"
+ "ldr q22, [x11, x15]\n"
+ "fmla v18.8h, v0.8h, v24.8h\n"
+ "fmla v27.8h, v6.8h, v16.8h\n"
+ "fmla v9.8h, v3.8h, v16.8h\n"
+ "ldr q16, [x27, x12]\n"
+ "fmla v17.8h, v3.8h, v24.8h\n"
"fmla v31.8h, v1.8h, v22.8h\n"
- "fmla v11.8h, v0.8h, v22.8h\n"
- "ldr q22, [x14, x28]\n"
- "fmla v26.8h, v7.8h, v20.8h\n"
- "fmla v12.8h, v6.8h, v20.8h\n"
- "ldr q20, [x26, x28]\n"
- "fmla v28.8h, v4.8h, v21.8h\n"
- "fmla v16.8h, v3.8h, v21.8h\n"
- "fmla v27.8h, v1.8h, v21.8h\n"
- "fmla v30.8h, v0.8h, v21.8h\n"
- "fmla v23.8h, v7.8h, v21.8h\n"
- "fmla v17.8h, v6.8h, v21.8h\n"
- "ldr q21, [x7, x17]\n"
- "fmla v14.8h, v8.8h, v20.8h\n"
- "fmla v25.8h, v7.8h, v20.8h\n"
- "ldr q20, [x12, x4]\n"
- "fmla v19.8h, v8.8h, v22.8h\n"
- "fmla v29.8h, v7.8h, v22.8h\n"
- "fmla v31.8h, v5.8h, v22.8h\n"
- "fmla v11.8h, v4.8h, v22.8h\n"
+ "fmla v10.8h, v5.8h, v23.8h\n"
+ "fmla v28.8h, v2.8h, v23.8h\n"
+ "ldr q23, [x11, x14]\n"
+ "fmla v20.8h, v4.8h, v22.8h\n"
+ "fmla v21.8h, v3.8h, v22.8h\n"
+ "fmla v29.8h, v0.8h, v22.8h\n"
+ "fmla v11.8h, v8.8h, v16.8h\n"
+ "fmla v26.8h, v5.8h, v16.8h\n"
+ "ldr q16, [x25, x4]\n"
"fmla v18.8h, v2.8h, v22.8h\n"
- "fmla v10.8h, v1.8h, v22.8h\n"
- "ldr q22, [x7, x11]\n"
+ "fmla v31.8h, v2.8h, v23.8h\n"
+ "fmla v17.8h, v5.8h, v22.8h\n"
+ "ldr q22, [x10, x4]\n"
+ "fmla v20.8h, v5.8h, v23.8h\n"
+ "fmla v21.8h, v4.8h, v23.8h\n"
+ "fmla v10.8h, v3.8h, v23.8h\n"
+ "fmla v29.8h, v1.8h, v23.8h\n"
+ "fmla v28.8h, v0.8h, v23.8h\n"
+ "ldr q23, [x10, x13]\n"
+ "fmla v9.8h, v7.8h, v16.8h\n"
+ "fmla v12.8h, v6.8h, v16.8h\n"
+ "ldr q16, [x25, x13]\n"
+ "fmla v18.8h, v4.8h, v22.8h\n"
+ "fmla v31.8h, v3.8h, v22.8h\n"
+ "fmla v27.8h, v1.8h, v22.8h\n"
+ "fmla v30.8h, v0.8h, v22.8h\n"
+ "fmla v17.8h, v7.8h, v22.8h\n"
+ "fmla v20.8h, v6.8h, v22.8h\n"
+ "ldr q22, [x7, x15]\n"
+ "fmla v25.8h, v8.8h, v16.8h\n"
+ "fmla v26.8h, v7.8h, v16.8h\n"
+ "ldr q16, [x9, x4]\n"
+ "fmla v21.8h, v8.8h, v23.8h\n"
+ "fmla v10.8h, v7.8h, v23.8h\n"
+ "fmla v29.8h, v5.8h, v23.8h\n"
+ "fmla v28.8h, v4.8h, v23.8h\n"
+ "fmla v19.8h, v2.8h, v23.8h\n"
+ "fmla v11.8h, v1.8h, v23.8h\n"
+ "ldr q23, [x7, x14]\n"
"add x7, x7, #0x10\n"
- "fmla v28.8h, v7.8h, v20.8h\n"
- "fmla v16.8h, v6.8h, v20.8h\n"
- "fmla v27.8h, v4.8h, v20.8h\n"
- "fmla v30.8h, v3.8h, v20.8h\n"
- "fmla v26.8h, v1.8h, v20.8h\n"
- "fmla v12.8h, v0.8h, v20.8h\n"
- "ldr q20, [x12, x28]\n"
- "fmla v23.8h, v2.8h, v21.8h\n"
- "fmla v17.8h, v1.8h, v21.8h\n"
- "fmla v19.8h, v0.8h, v21.8h\n"
- "ld1 { v21.8h }, [x14]\n"
- "fmla v14.8h, v2.8h, v20.8h\n"
- "fmla v29.8h, v0.8h, v22.8h\n"
- "fmla v28.8h, v3.8h, v21.8h\n"
- "fmla v27.8h, v0.8h, v21.8h\n"
- "fmla v31.8h, v8.8h, v20.8h\n"
- "fmla v11.8h, v7.8h, v20.8h\n"
- "fmla v18.8h, v5.8h, v20.8h\n"
- "fmla v10.8h, v4.8h, v20.8h\n"
- "fmla v25.8h, v1.8h, v20.8h\n"
- "ldr q24, [x9, x17]\n"
+ "fmla v18.8h, v7.8h, v16.8h\n"
+ "fmla v31.8h, v6.8h, v16.8h\n"
+ "fmla v27.8h, v4.8h, v16.8h\n"
+ "fmla v30.8h, v3.8h, v16.8h\n"
+ "fmla v9.8h, v1.8h, v16.8h\n"
+ "fmla v12.8h, v0.8h, v16.8h\n"
+ "ldr q16, [x9, x13]\n"
"fmla v17.8h, v2.8h, v22.8h\n"
- "fmla v19.8h, v1.8h, v22.8h\n"
- "ldr q20, [x14, x25]\n"
- "add x14, x14, #0x10\n"
- "fmla v23.8h, v6.8h, v21.8h\n"
- "ld1 { v21.8h }, [x12]\n"
+ "fmla v20.8h, v1.8h, v22.8h\n"
+ "fmla v21.8h, v0.8h, v22.8h\n"
+ "ld1 { v22.8h }, [x10]\n"
+ "fmla v10.8h, v0.8h, v23.8h\n"
+ "fmla v25.8h, v2.8h, v16.8h\n"
+ "fmla v29.8h, v8.8h, v16.8h\n"
+ "fmla v28.8h, v7.8h, v16.8h\n"
+ "fmla v19.8h, v5.8h, v16.8h\n"
+ "fmla v18.8h, v3.8h, v22.8h\n"
+ "fmla v27.8h, v0.8h, v22.8h\n"
+ "fmla v11.8h, v4.8h, v16.8h\n"
+ "fmla v26.8h, v1.8h, v16.8h\n"
+ "ldr q24, [x27, x15]\n"
+ "fmla v20.8h, v2.8h, v23.8h\n"
+ "fmla v21.8h, v1.8h, v23.8h\n"
+ "ldr q16, [x10, x12]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v17.8h, v6.8h, v22.8h\n"
+ "ld1 { v22.8h }, [x9]\n"
"fmla v12.8h, v4.8h, v24.8h\n"
- "fmla v14.8h, v3.8h, v24.8h\n"
- "fmla v29.8h, v8.8h, v20.8h\n"
- "fmla v11.8h, v5.8h, v20.8h\n"
- "fmla v10.8h, v2.8h, v20.8h\n"
- "ldr q20, [x12, x25]\n"
- "add x12, x12, #0x10\n"
- "fmla v28.8h, v6.8h, v21.8h\n"
- "fmla v27.8h, v3.8h, v21.8h\n"
- "fmla v26.8h, v0.8h, v21.8h\n"
- "ldr q22, [x26, x17]\n"
- "fmla v25.8h, v2.8h, v20.8h\n"
- "fmla v12.8h, v7.8h, v22.8h\n"
- "fmla v14.8h, v6.8h, v22.8h\n"
- "fmla v27.8h, v8.8h, v24.8h\n"
+ "fmla v25.8h, v3.8h, v24.8h\n"
"fmla v30.8h, v7.8h, v24.8h\n"
- "fmla v18.8h, v6.8h, v24.8h\n"
- "fmla v26.8h, v5.8h, v24.8h\n"
- "ldr q21, [x9, x11]\n"
- "fmla v10.8h, v5.8h, v20.8h\n"
- "fmla v12.8h, v5.8h, v21.8h\n"
- "fmla v14.8h, v4.8h, v21.8h\n"
- "fmla v25.8h, v3.8h, v21.8h\n"
- "fmla v11.8h, v8.8h, v20.8h\n"
- "ldr q20, [x26, x11]\n"
- "fmla v26.8h, v8.8h, v22.8h\n"
- "ldr q9, [x15, x4]\n"
- "fmla v30.8h, v8.8h, v21.8h\n"
- "fmla v18.8h, v7.8h, v21.8h\n"
- "add x26, x26, #0x10\n"
- "fmla v10.8h, v6.8h, v21.8h\n"
- "ldr q21, [x15, x28]\n"
- "fmla v12.8h, v8.8h, v20.8h\n"
- "add x15, x15, #0x10\n"
- "fmla v14.8h, v7.8h, v20.8h\n"
- "fmla v25.8h, v6.8h, v20.8h\n"
- "ldr q24, [x9, x4]\n"
- "fmla v23.8h, v4.8h, v9.8h\n"
- "fmla v17.8h, v3.8h, v9.8h\n"
- "fmax v23.8h, v23.8h, v13.8h\n"
- "fmla v28.8h, v1.8h, v9.8h\n"
- "fmla v16.8h, v0.8h, v9.8h\n"
- "ldr q0, [x9, x28]\n"
- "fmax v17.8h, v17.8h, v13.8h\n"
- "fmla v19.8h, v5.8h, v21.8h\n"
- "fmla v29.8h, v4.8h, v21.8h\n"
- "fmax v19.8h, v19.8h, v13.8h\n"
+ "fmla v10.8h, v8.8h, v16.8h\n"
+ "fmla v28.8h, v5.8h, v16.8h\n"
+ "fmla v11.8h, v2.8h, v16.8h\n"
+ "ldr q16, [x9, x12]\n"
+ "fmla v18.8h, v6.8h, v22.8h\n"
"add x9, x9, #0x10\n"
- "fmla v31.8h, v2.8h, v21.8h\n"
- "fmla v11.8h, v1.8h, v21.8h\n"
- "fmax v29.8h, v29.8h, v13.8h\n"
- "fmla v27.8h, v7.8h, v24.8h\n"
- "fmla v30.8h, v6.8h, v24.8h\n"
- "fmax v28.8h, v28.8h, v13.8h\n"
- "fmla v26.8h, v4.8h, v24.8h\n"
- "fmla v12.8h, v3.8h, v24.8h\n"
- "fmax v16.8h, v16.8h, v13.8h\n"
- "fmla v18.8h, v8.8h, v0.8h\n"
- "fmla v10.8h, v7.8h, v0.8h\n"
- "fmax v31.8h, v31.8h, v13.8h\n"
- "fmla v14.8h, v5.8h, v0.8h\n"
- "fmla v25.8h, v4.8h, v0.8h\n"
- "fmax v11.8h, v11.8h, v13.8h\n"
- "fmax v27.8h, v27.8h, v13.8h\n"
- "fmax v30.8h, v30.8h, v13.8h\n"
- "fmax v18.8h, v18.8h, v13.8h\n"
- "fmax v10.8h, v10.8h, v13.8h\n"
- "fmax v26.8h, v26.8h, v13.8h\n"
- "fmax v12.8h, v12.8h, v13.8h\n"
- "fmax v14.8h, v14.8h, v13.8h\n"
- "fmax v25.8h, v25.8h, v13.8h\n"
- "fmin v23.8h, v23.8h, v15.8h\n"
+ "fmla v27.8h, v3.8h, v22.8h\n"
+ "fmla v9.8h, v0.8h, v22.8h\n"
+ "ldr q23, [x25, x15]\n"
+ "fmla v19.8h, v6.8h, v24.8h\n"
+ "fmla v26.8h, v2.8h, v16.8h\n"
+ "fmla v28.8h, v8.8h, v16.8h\n"
+ "fmla v12.8h, v7.8h, v23.8h\n"
+ "fmla v25.8h, v6.8h, v23.8h\n"
+ "fmla v11.8h, v5.8h, v16.8h\n"
+ "ldr q22, [x25, x14]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v27.8h, v8.8h, v24.8h\n"
+ "fmla v9.8h, v5.8h, v24.8h\n"
+ "ldr q16, [x27, x14]\n"
+ "fmla v12.8h, v5.8h, v16.8h\n"
+ "fmla v25.8h, v4.8h, v16.8h\n"
+ "fmla v26.8h, v3.8h, v16.8h\n"
+ "fmla v30.8h, v8.8h, v16.8h\n"
+ "fmla v19.8h, v7.8h, v16.8h\n"
+ "fmla v11.8h, v6.8h, v16.8h\n"
+ "ldr q24, [x11, x13]\n"
+ "fmla v9.8h, v8.8h, v23.8h\n"
+ "ldr q16, [x11, x4]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v12.8h, v8.8h, v22.8h\n"
+ "fmla v25.8h, v7.8h, v22.8h\n"
+ "fmla v26.8h, v6.8h, v22.8h\n"
+ "ldr q23, [x27, x4]\n"
+ "fmla v21.8h, v5.8h, v24.8h\n"
+ "fmla v10.8h, v4.8h, v24.8h\n"
+ "fmla v17.8h, v4.8h, v16.8h\n"
+ "fmla v20.8h, v3.8h, v16.8h\n"
+ "fmla v18.8h, v1.8h, v16.8h\n"
+ "fmla v31.8h, v0.8h, v16.8h\n"
+ "ldr q16, [x27, x13]\n"
+ "add x27, x27, #0x10\n"
+ "fmla v29.8h, v2.8h, v24.8h\n"
+ "fmla v28.8h, v1.8h, v24.8h\n"
+ "fmla v27.8h, v7.8h, v23.8h\n"
+ "fmla v30.8h, v6.8h, v23.8h\n"
+ "fmax v21.8h, v21.8h, v14.8h\n"
+ "fmla v9.8h, v4.8h, v23.8h\n"
+ "fmla v12.8h, v3.8h, v23.8h\n"
+ "fmax v17.8h, v17.8h, v14.8h\n"
+ "fmla v19.8h, v8.8h, v16.8h\n"
+ "fmla v11.8h, v7.8h, v16.8h\n"
+ "fmax v20.8h, v20.8h, v14.8h\n"
+ "fmla v25.8h, v5.8h, v16.8h\n"
+ "fmla v26.8h, v4.8h, v16.8h\n"
+ "fmax v10.8h, v10.8h, v14.8h\n"
+ "fmax v18.8h, v18.8h, v14.8h\n"
+ "fmax v31.8h, v31.8h, v14.8h\n"
+ "fmax v29.8h, v29.8h, v14.8h\n"
+ "fmax v28.8h, v28.8h, v14.8h\n"
+ "fmax v27.8h, v27.8h, v14.8h\n"
+ "fmax v30.8h, v30.8h, v14.8h\n"
+ "fmax v19.8h, v19.8h, v14.8h\n"
+ "fmax v11.8h, v11.8h, v14.8h\n"
+ "fmax v9.8h, v9.8h, v14.8h\n"
+ "fmax v12.8h, v12.8h, v14.8h\n"
+ "fmax v25.8h, v25.8h, v14.8h\n"
+ "fmax v26.8h, v26.8h, v14.8h\n"
"fmin v17.8h, v17.8h, v15.8h\n"
- "st1 { v23.8h }, [x8]\n"
- "fmin v19.8h, v19.8h, v15.8h\n"
+ "fmin v20.8h, v20.8h, v15.8h\n"
+ "fmin v21.8h, v21.8h, v15.8h\n"
+ "fmin v10.8h, v10.8h, v15.8h\n"
+ "fmin v18.8h, v18.8h, v15.8h\n"
+ "fmin v31.8h, v31.8h, v15.8h\n"
"fmin v29.8h, v29.8h, v15.8h\n"
- "str q17, [x8, x5]\n"
"fmin v28.8h, v28.8h, v15.8h\n"
- "fmin v16.8h, v16.8h, v15.8h\n"
- "str q19, [x8, x23]\n"
- "fmin v31.8h, v31.8h, v15.8h\n"
- "fmin v11.8h, v11.8h, v15.8h\n"
- "str q29, [x8, x22]\n"
- "add x8, x8, #0x10\n"
+ "st1 { v17.8h }, [x17]\n"
"fmin v27.8h, v27.8h, v15.8h\n"
"fmin v30.8h, v30.8h, v15.8h\n"
- "st1 { v28.8h }, [x10]\n"
- "fmin v18.8h, v18.8h, v15.8h\n"
- "fmin v10.8h, v10.8h, v15.8h\n"
- "str q16, [x10, x5]\n"
- "fmin v26.8h, v26.8h, v15.8h\n"
+ "str q20, [x17, x5]\n"
+ "fmin v19.8h, v19.8h, v15.8h\n"
+ "fmin v11.8h, v11.8h, v15.8h\n"
+ "str q21, [x17, x21]\n"
+ "fmin v9.8h, v9.8h, v15.8h\n"
"fmin v12.8h, v12.8h, v15.8h\n"
- "str q31, [x10, x23]\n"
- "fmin v14.8h, v14.8h, v15.8h\n"
+ "str q10, [x17, x20]\n"
+ "add x17, x17, #0x10\n"
"fmin v25.8h, v25.8h, v15.8h\n"
- "str q11, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v27.8h }, [x27]\n"
- "str q30, [x27, x5]\n"
- "str q18, [x27, x23]\n"
- "str q10, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v26.8h }, [x24]\n"
+ "fmin v26.8h, v26.8h, v15.8h\n"
+ "st1 { v18.8h }, [x28]\n"
+ "str q31, [x28, x5]\n"
+ "str q29, [x28, x21]\n"
+ "str q28, [x28, x20]\n"
+ "add x28, x28, #0x10\n"
+ "st1 { v27.8h }, [x26]\n"
+ "str q30, [x26, x5]\n"
+ "str q19, [x26, x21]\n"
+ "str q11, [x26, x20]\n"
+ "add x26, x26, #0x10\n"
+ "st1 { v9.8h }, [x24]\n"
"str q12, [x24, x5]\n"
- "str q14, [x24, x23]\n"
- "str q25, [x24, x22]\n"
+ "str q25, [x24, x21]\n"
+ "str q26, [x24, x20]\n"
"add x24, x24, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 141f\n"
- "ldr q14, [x16, #0x0]\n"
+ "ldr q13, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "add x23, x14, x17\n"
+ "add x23, x10, x15\n"
"add x22, x7, XZR\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
- "add x21, x7, x25\n"
- "add x20, x14, x11\n"
+ "add x21, x7, x12\n"
+ "add x20, x10, x14\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
"ldr q5, [x16, #0x60]\n"
@@ -699,27 +699,27 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ldr h11, [x21, #0x0]\n"
"ldr h12, [x20, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 2: End
- "mov v16.16b, v14.16b\n fmla v16.8h, v8.8h, v9.8h\n"
- "mov v17.16b, v14.16b\n fmla v17.8h, v7.8h, v9.8h\n"
- "add x20, x26, XZR\n"
- "mov v18.16b, v14.16b\n fmla v18.8h, v6.8h, v9.8h\n"
- "mov v21.16b, v14.16b\n fmla v21.8h, v4.8h, v9.8h\n"
- "mov v22.16b, v14.16b\n fmla v22.8h, v3.8h, v9.8h\n"
- "mov v25.16b, v14.16b\n fmla v25.8h, v1.8h, v9.8h\n"
- "mov v26.16b, v14.16b\n fmla v26.8h, v0.8h, v9.8h\n"
- "mov v19.16b, v14.16b\n fmla v19.8h, v2.8h, v11.8h\n"
- "mov v20.16b, v14.16b\n fmla v20.8h, v5.8h, v9.8h\n"
- "mov v24.16b, v14.16b\n fmla v24.8h, v2.8h, v9.8h\n"
+ "mov v16.16b, v13.16b\n fmla v16.8h, v8.8h, v9.8h\n"
+ "mov v17.16b, v13.16b\n fmla v17.8h, v7.8h, v9.8h\n"
+ "add x20, x25, XZR\n"
+ "mov v18.16b, v13.16b\n fmla v18.8h, v6.8h, v9.8h\n"
+ "mov v21.16b, v13.16b\n fmla v21.8h, v4.8h, v9.8h\n"
+ "mov v22.16b, v13.16b\n fmla v22.8h, v3.8h, v9.8h\n"
+ "mov v25.16b, v13.16b\n fmla v25.8h, v1.8h, v9.8h\n"
+ "mov v26.16b, v13.16b\n fmla v26.8h, v0.8h, v9.8h\n"
+ "mov v19.16b, v13.16b\n fmla v19.8h, v2.8h, v11.8h\n"
+ "mov v20.16b, v13.16b\n fmla v20.8h, v5.8h, v9.8h\n"
+ "mov v24.16b, v13.16b\n fmla v24.8h, v2.8h, v9.8h\n"
"fmla v16.8h, v0.8h, v10.8h\n"
"fmla v17.8h, v8.8h, v12.8h\n"
"fmla v18.8h, v7.8h, v12.8h\n"
- "fmla v19.8h, v6.8h, v12.8h\n"
"fmla v21.8h, v5.8h, v12.8h\n"
+ "fmla v19.8h, v6.8h, v12.8h\n"
"fmla v22.8h, v4.8h, v12.8h\n"
- "mov v23.16b, v14.16b\n fmla v23.8h, v3.8h, v12.8h\n"
+ "mov v23.16b, v13.16b\n fmla v23.8h, v3.8h, v12.8h\n"
"fmla v25.8h, v2.8h, v12.8h\n"
"fmla v26.8h, v1.8h, v12.8h\n"
- "mov v27.16b, v14.16b\n fmla v27.8h, v0.8h, v12.8h\n"
+ "mov v27.16b, v13.16b\n fmla v27.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 10f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
@@ -740,8 +740,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"11:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: Unset: Bit 1: Unset
"ldr h10, [x20, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: End
- "mov v28.16b, v14.16b\n fmla v28.8h, v6.8h, v10.8h\n"
- "add x20, x26, x25\n"
+ "mov v28.16b, v13.16b\n fmla v28.8h, v6.8h, v10.8h\n"
+ "add x20, x25, x12\n"
"tbz %x[n_channels], #2, 14f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
@@ -762,8 +762,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"15:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: Unset: Bit 1: Unset
"ldr h11, [x20, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 2: End
- "mov v31.16b, v14.16b\n fmla v31.8h, v8.8h, v11.8h\n"
- "add x20, x12, x17\n"
+ "mov v31.16b, v13.16b\n fmla v31.8h, v8.8h, v11.8h\n"
+ "add x20, x9, x15\n"
"tbz %x[n_channels], #2, 18f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
@@ -792,8 +792,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v25.8h, v4.8h, v9.8h\n"
"fmla v26.8h, v3.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v9.8h\n"
- "mov v29.16b, v14.16b\n fmla v29.8h, v1.8h, v9.8h\n"
- "mov v30.16b, v14.16b\n fmla v30.8h, v0.8h, v9.8h\n"
+ "mov v29.16b, v13.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "mov v30.16b, v13.16b\n fmla v30.8h, v0.8h, v9.8h\n"
"tbz %x[n_channels], #2, 22f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
@@ -816,7 +816,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"24:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 2: End
"fmla v16.8h, v1.8h, v12.8h\n"
"fmla v17.8h, v0.8h, v12.8h\n"
- "add x20, x7, x28\n"
+ "add x20, x7, x13\n"
"tbz %x[n_channels], #2, 26f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
@@ -839,7 +839,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"28:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 2: End
"fmla v18.8h, v2.8h, v11.8h\n"
"fmla v19.8h, v1.8h, v11.8h\n"
- "add x20, x12, x11\n"
+ "add x20, x9, x14\n"
"tbz %x[n_channels], #2, 30f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
@@ -862,7 +862,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"32:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
"fmla v21.8h, v8.8h, v10.8h\n"
"fmla v22.8h, v7.8h, v10.8h\n"
- "add x20, x15, XZR\n"
+ "add x20, x11, XZR\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
@@ -892,7 +892,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"36:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 2: End
"fmla v16.8h, v3.8h, v9.8h\n"
"fmla v20.8h, v0.8h, v9.8h\n"
- "add x20, x15, x25\n"
+ "add x20, x11, x12\n"
"tbz %x[n_channels], #2, 38f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
@@ -915,7 +915,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"40:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 2: End
"fmla v19.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v2.8h, v12.8h\n"
- "add x20, x9, XZR\n"
+ "add x20, x27, XZR\n"
"tbz %x[n_channels], #2, 42f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
@@ -938,7 +938,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"44:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: End
"fmla v24.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x15, x17\n"
+ "add x20, x11, x15\n"
"tbz %x[n_channels], #2, 46f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
@@ -961,7 +961,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"48:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: End
"fmla v16.8h, v5.8h, v10.8h\n"
"fmla v17.8h, v4.8h, v10.8h\n"
- "add x20, x9, x25\n"
+ "add x20, x27, x12\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v20.8h, v2.8h, v10.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
@@ -988,7 +988,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"52:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: End
"fmla v27.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v5.8h, v11.8h\n"
- "add x20, x15, x11\n"
+ "add x20, x11, x14\n"
"tbz %x[n_channels], #2, 54f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
@@ -1011,7 +1011,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"56:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v17.8h, v5.8h, v12.8h\n"
"fmla v18.8h, v4.8h, v12.8h\n"
- "add x20, x26, x4\n"
+ "add x20, x25, x4\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
@@ -1038,7 +1038,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"60:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: End
"fmla v28.8h, v7.8h, v11.8h\n"
"fmla v29.8h, v6.8h, v11.8h\n"
- "add x20, x14, x4\n"
+ "add x20, x10, x4\n"
"tbz %x[n_channels], #2, 62f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 61f\n"
@@ -1061,7 +1061,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"64:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
- "add x20, x26, x28\n"
+ "add x20, x25, x13\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
@@ -1088,7 +1088,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"68:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: End
"fmla v30.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v7.8h, v11.8h\n"
- "add x20, x14, x28\n"
+ "add x20, x10, x13\n"
"tbz %x[n_channels], #2, 70f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 69f\n"
@@ -1111,7 +1111,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"72:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v19.8h, v7.8h, v12.8h\n"
- "add x20, x7, x17\n"
+ "add x20, x7, x15\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
@@ -1138,7 +1138,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"76:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 2: End
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
- "add x20, x12, x4\n"
+ "add x20, x9, x4\n"
"fmla v18.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 78f\n"
"ldr d11, [x20], #0x8\n"
@@ -1162,7 +1162,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"80:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v20.8h, v7.8h, v11.8h\n"
"fmla v21.8h, v6.8h, v11.8h\n"
- "add x20, x7, x11\n"
+ "add x20, x7, x14\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
@@ -1189,7 +1189,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"84:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 2: End
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
- "add x20, x14, XZR\n"
+ "add x20, x10, XZR\n"
"fmla v19.8h, v0.8h, v12.8h\n"
"tbz %x[n_channels], #2, 86f\n"
"ldr d10, [x20], #0x8\n"
@@ -1213,7 +1213,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"88:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v16.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
- "add x20, x12, x28\n"
+ "add x20, x9, x13\n"
"fmla v24.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 90f\n"
"ldr d11, [x20], #0x8\n"
@@ -1237,7 +1237,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"92:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
"fmla v22.8h, v8.8h, v11.8h\n"
"fmla v23.8h, v7.8h, v11.8h\n"
- "add x20, x14, x25\n"
+ "add x20, x10, x12\n"
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v27.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v11.8h\n"
@@ -1264,7 +1264,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"96:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: End
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
- "add x20, x12, XZR\n"
+ "add x20, x9, XZR\n"
"fmla v27.8h, v2.8h, v12.8h\n"
"tbz %x[n_channels], #2, 98f\n"
"ldr d10, [x20], #0x8\n"
@@ -1288,7 +1288,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"100:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v20.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v3.8h, v10.8h\n"
- "add x20, x9, x17\n"
+ "add x20, x27, x15\n"
"fmla v28.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 102f\n"
"ldr d11, [x20], #0x8\n"
@@ -1312,7 +1312,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"104:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: End
"fmla v24.8h, v8.8h, v11.8h\n"
"fmla v25.8h, v7.8h, v11.8h\n"
- "add x20, x12, x25\n"
+ "add x20, x9, x12\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
@@ -1339,7 +1339,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"108:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: End
"fmla v23.8h, v8.8h, v12.8h\n"
"fmla v27.8h, v5.8h, v12.8h\n"
- "add x20, x26, x17\n"
+ "add x20, x25, x15\n"
"fmla v31.8h, v2.8h, v12.8h\n"
"tbz %x[n_channels], #2, 110f\n"
"ldr d10, [x20], #0x8\n"
@@ -1363,7 +1363,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"112:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: End
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x9, x11\n"
+ "add x20, x27, x14\n"
"fmla v30.8h, v6.8h, v10.8h\n"
"tbz %x[n_channels], #2, 114f\n"
"ldr d11, [x20], #0x8\n"
@@ -1387,7 +1387,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"116:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
- "add x20, x26, x11\n"
+ "add x20, x25, x14\n"
"fmla v27.8h, v6.8h, v11.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
@@ -1414,7 +1414,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"120:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: End
"fmla v29.8h, v8.8h, v12.8h\n"
"fmla v30.8h, v7.8h, v12.8h\n"
- "add x20, x15, x4\n"
+ "add x20, x11, x4\n"
"fmla v31.8h, v6.8h, v12.8h\n"
"tbz %x[n_channels], #2, 122f\n"
"ldr d10, [x20], #0x8\n"
@@ -1438,7 +1438,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"124:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 2: End
"fmla v16.8h, v4.8h, v10.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
- "add x20, x15, x28\n"
+ "add x20, x11, x13\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 126f\n"
@@ -1463,7 +1463,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"128:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v18.8h, v5.8h, v11.8h\n"
"fmla v19.8h, v4.8h, v11.8h\n"
- "add x20, x9, x4\n"
+ "add x20, x27, x4\n"
"fmla v22.8h, v2.8h, v11.8h\n"
"fmla v23.8h, v1.8h, v11.8h\n"
"tbz %x[n_channels], #2, 130f\n"
@@ -1488,7 +1488,7 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"132:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
"fmla v24.8h, v7.8h, v12.8h\n"
"fmla v25.8h, v6.8h, v12.8h\n"
- "add x20, x9, x28\n"
+ "add x20, x27, x13\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"tbz %x[n_channels], #2, 134f\n"
@@ -1513,24 +1513,24 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"136:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: End
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "fmax v16.8h, v16.8h, v13.8h\n"
+ "fmax v16.8h, v16.8h, v14.8h\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "fmax v17.8h, v17.8h, v13.8h\n"
- "fmax v18.8h, v18.8h, v13.8h\n"
- "fmax v19.8h, v19.8h, v13.8h\n"
- "fmax v20.8h, v20.8h, v13.8h\n"
- "fmax v21.8h, v21.8h, v13.8h\n"
- "fmax v22.8h, v22.8h, v13.8h\n"
- "fmax v23.8h, v23.8h, v13.8h\n"
- "fmax v24.8h, v24.8h, v13.8h\n"
- "fmax v25.8h, v25.8h, v13.8h\n"
- "fmax v26.8h, v26.8h, v13.8h\n"
- "fmax v27.8h, v27.8h, v13.8h\n"
- "fmax v28.8h, v28.8h, v13.8h\n"
- "fmax v29.8h, v29.8h, v13.8h\n"
- "fmax v30.8h, v30.8h, v13.8h\n"
- "fmax v31.8h, v31.8h, v13.8h\n"
+ "fmax v17.8h, v17.8h, v14.8h\n"
+ "fmax v18.8h, v18.8h, v14.8h\n"
+ "fmax v19.8h, v19.8h, v14.8h\n"
+ "fmax v20.8h, v20.8h, v14.8h\n"
+ "fmax v21.8h, v21.8h, v14.8h\n"
+ "fmax v22.8h, v22.8h, v14.8h\n"
+ "fmax v23.8h, v23.8h, v14.8h\n"
+ "fmax v24.8h, v24.8h, v14.8h\n"
+ "fmax v25.8h, v25.8h, v14.8h\n"
+ "fmax v26.8h, v26.8h, v14.8h\n"
+ "fmax v27.8h, v27.8h, v14.8h\n"
+ "fmax v28.8h, v28.8h, v14.8h\n"
+ "fmax v29.8h, v29.8h, v14.8h\n"
+ "fmax v30.8h, v30.8h, v14.8h\n"
+ "fmax v31.8h, v31.8h, v14.8h\n"
"fmin v16.8h, v16.8h, v15.8h\n"
"fmin v17.8h, v17.8h, v15.8h\n"
"fmin v18.8h, v18.8h, v15.8h\n"
@@ -1548,18 +1548,18 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmin v30.8h, v30.8h, v15.8h\n"
"fmin v31.8h, v31.8h, v15.8h\n"
"tbz %x[n_channels], #2, 138f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.d }[0], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "add x17, x17, #0x8\n"
+ "add x28, x28, #0x8\n"
+ "st1 { v16.d }[0], [x23], x5\n"
"st1 { v20.d }[0], [x22], x5\n"
+ "add x26, x26, #0x8\n"
+ "add x24, x24, #0x8\n"
"st1 { v24.d }[0], [x21], x5\n"
- "add x8, x8, #0x8\n"
- "add x10, x10, #0x8\n"
"st1 { v28.d }[0], [x20], x5\n"
- "add x27, x27, #0x8\n"
- "add x24, x24, #0x8\n"
"st1 { v17.d }[0], [x23], x5\n"
"st1 { v21.d }[0], [x22], x5\n"
"st1 { v25.d }[0], [x21], x5\n"
@@ -1573,18 +1573,18 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v27.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #1, 137f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[2], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "add x17, x17, #0x4\n"
+ "add x28, x28, #0x4\n"
+ "st1 { v16.s }[2], [x23], x5\n"
"st1 { v20.s }[2], [x22], x5\n"
+ "add x26, x26, #0x4\n"
+ "add x24, x24, #0x4\n"
"st1 { v24.s }[2], [x21], x5\n"
- "add x8, x8, #0x4\n"
- "add x10, x10, #0x4\n"
"st1 { v28.s }[2], [x20], x5\n"
- "add x27, x27, #0x4\n"
- "add x24, x24, #0x4\n"
"st1 { v17.s }[2], [x23], x5\n"
"st1 { v21.s }[2], [x22], x5\n"
"st1 { v25.s }[2], [x21], x5\n"
@@ -1598,15 +1598,15 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v27.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"tbz %x[n_channels], #0, 140f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[6], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "st1 { v16.h }[6], [x23], x5\n"
+ "st1 { v17.h }[6], [x23], x5\n"
"st1 { v20.h }[6], [x22], x5\n"
"st1 { v24.h }[6], [x21], x5\n"
"st1 { v28.h }[6], [x20], x5\n"
- "st1 { v17.h }[6], [x23], x5\n"
"st1 { v21.h }[6], [x22], x5\n"
"st1 { v25.h }[6], [x21], x5\n"
"st1 { v29.h }[6], [x20], x5\n"
@@ -1621,15 +1621,15 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"b 140f\n"
"137:" // Tile loop: Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 140f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[4], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "st1 { v16.h }[4], [x23], x5\n"
+ "st1 { v17.h }[4], [x23], x5\n"
"st1 { v20.h }[4], [x22], x5\n"
"st1 { v24.h }[4], [x21], x5\n"
"st1 { v28.h }[4], [x20], x5\n"
- "st1 { v17.h }[4], [x23], x5\n"
"st1 { v21.h }[4], [x22], x5\n"
"st1 { v25.h }[4], [x21], x5\n"
"st1 { v29.h }[4], [x20], x5\n"
@@ -1644,18 +1644,18 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"b 140f\n"
"138:" // Tile loop: Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 139f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[0], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "add x17, x17, #0x4\n"
+ "add x28, x28, #0x4\n"
+ "st1 { v16.s }[0], [x23], x5\n"
"st1 { v20.s }[0], [x22], x5\n"
+ "add x26, x26, #0x4\n"
+ "add x24, x24, #0x4\n"
"st1 { v24.s }[0], [x21], x5\n"
- "add x8, x8, #0x4\n"
- "add x10, x10, #0x4\n"
"st1 { v28.s }[0], [x20], x5\n"
- "add x27, x27, #0x4\n"
- "add x24, x24, #0x4\n"
"st1 { v17.s }[0], [x23], x5\n"
"st1 { v21.s }[0], [x22], x5\n"
"st1 { v25.s }[0], [x21], x5\n"
@@ -1669,15 +1669,15 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v27.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"tbz %x[n_channels], #0, 140f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[2], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "st1 { v16.h }[2], [x23], x5\n"
+ "st1 { v17.h }[2], [x23], x5\n"
"st1 { v20.h }[2], [x22], x5\n"
"st1 { v24.h }[2], [x21], x5\n"
"st1 { v28.h }[2], [x20], x5\n"
- "st1 { v17.h }[2], [x23], x5\n"
"st1 { v21.h }[2], [x22], x5\n"
"st1 { v25.h }[2], [x21], x5\n"
"st1 { v29.h }[2], [x20], x5\n"
@@ -1691,15 +1691,15 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v31.h }[2], [x20]\n"
"b 140f\n"
"139:" // Tile loop: Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.h }[0], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "st1 { v16.h }[0], [x23], x5\n"
+ "st1 { v17.h }[0], [x23], x5\n"
"st1 { v20.h }[0], [x22], x5\n"
"st1 { v24.h }[0], [x21], x5\n"
"st1 { v28.h }[0], [x20], x5\n"
- "st1 { v17.h }[0], [x23], x5\n"
"st1 { v21.h }[0], [x22], x5\n"
"st1 { v25.h }[0], [x21], x5\n"
"st1 { v29.h }[0], [x20], x5\n"
@@ -1713,20 +1713,20 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v31.h }[0], [x20]\n"
"140:" // Tile loop: Oddments: Store: Bit 2: End
"141:" // Tile loop: End
- "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
- "csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "ldr x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x28, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x27, x27, #0x1\n"
+ "add x20, x28, #0x1\n"
+ "cmp x27, x22\n"
+ "csel x28, x28, x20, LT\n"
+ "csel x27, x27, XZR, LT\n"
+ "cmp x28, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 96feeeeece..2a5656a9b3 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -102,9 +102,9 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"lsr x7, %x[n_channels], #0x3\n"
"ldr x8, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v13.8h }, [x20]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v15.8h }, [x21]\n"
"ld1r { v14.8h }, [x20]\n"
"add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
"mov x15, #0x0\n"
@@ -122,583 +122,583 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr q7, [x17, #0x80]\n"
"ldr q8, [x17, #0x90]\n"
"add x17, x17, #0xa0\n"
- "ldp x21, x20, [x16, #0x0]\n"
- "ldr q9, [x21, x15]\n"
- "ldr q10, [x20, x15]\n"
+ "ldp x23, x22, [x16, #0x0]\n"
"ldp x21, x20, [x16, #0x10]\n"
+ "ldr q9, [x23, x15]\n"
+ "ldr q10, [x22, x15]\n"
"ldr q11, [x21, x15]\n"
"ldr q12, [x20, x15]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v23.16b, v30.16b\n fmla v23.8h, v4.8h, v9.8h\n"
- "mov v17.16b, v30.16b\n fmla v17.8h, v8.8h, v9.8h\n"
+ "mov v21.16b, v30.16b\n fmla v21.8h, v4.8h, v9.8h\n"
+ "mov v26.16b, v30.16b\n fmla v26.8h, v8.8h, v9.8h\n"
"ldr x27, [x16, #0x20]\n"
"ldr x24, [x16, #0x30]\n"
- "mov v25.16b, v30.16b\n fmla v25.8h, v3.8h, v9.8h\n"
- "mov v28.16b, v30.16b\n fmla v28.8h, v1.8h, v9.8h\n"
+ "mov v27.16b, v30.16b\n fmla v27.8h, v3.8h, v9.8h\n"
+ "mov v31.16b, v30.16b\n fmla v31.8h, v1.8h, v9.8h\n"
"ldr x23, [x16, #0x28]\n"
"ldr x22, [x16, #0x38]\n"
- "mov v20.16b, v30.16b\n fmla v20.8h, v0.8h, v9.8h\n"
- "mov v16.16b, v30.16b\n fmla v16.8h, v7.8h, v9.8h\n"
+ "mov v28.16b, v30.16b\n fmla v28.8h, v0.8h, v9.8h\n"
+ "mov v18.16b, v30.16b\n fmla v18.8h, v7.8h, v9.8h\n"
"ldr x26, [x16, #0x40]\n"
"ldr x20, [x16, #0x48]\n"
- "mov v15.16b, v30.16b\n fmla v15.8h, v6.8h, v9.8h\n"
- "fmla v23.8h, v5.8h, v12.8h\n"
+ "mov v20.16b, v30.16b\n fmla v20.8h, v6.8h, v9.8h\n"
+ "mov v13.16b, v30.16b\n fmla v13.8h, v5.8h, v9.8h\n"
"ldr x25, [x16, #0x50]\n"
"ldr x21, [x16, #0x58]\n"
- "mov v27.16b, v30.16b\n fmla v27.8h, v5.8h, v9.8h\n"
- "mov v31.16b, v30.16b\n fmla v31.8h, v2.8h, v9.8h\n"
+ "fmla v21.8h, v5.8h, v12.8h\n"
+ "mov v16.16b, v30.16b\n fmla v16.8h, v2.8h, v9.8h\n"
"ldr q9, [x24, x15]\n"
"ldr x13, [x16, #0x70]\n"
- "fmla v17.8h, v0.8h, v10.8h\n"
- "ldr q22, [x27, x15]\n"
- "mov v10.16b, v30.16b\n fmla v10.8h, v2.8h, v11.8h\n"
- "ldr q18, [x23, x15]\n"
- "fmla v25.8h, v4.8h, v12.8h\n"
- "fmla v28.8h, v2.8h, v12.8h\n"
+ "fmla v26.8h, v0.8h, v10.8h\n"
+ "ldr q17, [x27, x15]\n"
+ "mov v25.16b, v30.16b\n fmla v25.8h, v2.8h, v11.8h\n"
+ "ldr q29, [x23, x15]\n"
+ "fmla v27.8h, v4.8h, v12.8h\n"
+ "fmla v31.8h, v2.8h, v12.8h\n"
"ldr x24, [x16, #0x60]\n"
"ldr x23, [x16, #0x68]\n"
- "fmla v20.8h, v1.8h, v12.8h\n"
- "fmla v16.8h, v8.8h, v12.8h\n"
+ "fmla v28.8h, v1.8h, v12.8h\n"
+ "fmla v18.8h, v8.8h, v12.8h\n"
"ldr x12, [x8, #0x0]\n"
"ldr x11, [x8, #0x8]\n"
- "fmla v15.8h, v7.8h, v12.8h\n"
- "mov v29.16b, v30.16b\n fmla v29.8h, v6.8h, v22.8h\n"
- "ldr q22, [x20, x15]\n"
+ "fmla v20.8h, v7.8h, v12.8h\n"
+ "mov v24.16b, v30.16b\n fmla v24.8h, v6.8h, v17.8h\n"
+ "ldr q10, [x20, x15]\n"
"ldr x28, [x16, #0x88]\n"
- "fmla v23.8h, v7.8h, v9.8h\n"
- "fmla v10.8h, v6.8h, v12.8h\n"
+ "fmla v21.8h, v7.8h, v9.8h\n"
+ "fmla v25.8h, v6.8h, v12.8h\n"
"ldr x10, [x8, #0x10]\n"
"ldr x9, [x8, #0x18]\n"
- "mov v21.16b, v30.16b\n fmla v21.8h, v3.8h, v12.8h\n"
+ "mov v22.16b, v30.16b\n fmla v22.8h, v3.8h, v12.8h\n"
"mov v19.16b, v30.16b\n fmla v19.8h, v0.8h, v12.8h\n"
- "ldr q11, [x22, x15]\n"
+ "ldr q12, [x22, x15]\n"
"ldr x22, [x16, #0x78]\n"
- "mov v24.16b, v30.16b\n fmla v24.8h, v8.8h, v18.8h\n"
- "ldr q12, [x26, x15]\n"
- "fmla v25.8h, v6.8h, v9.8h\n"
+ "mov v17.16b, v30.16b\n fmla v17.8h, v8.8h, v29.8h\n"
+ "ldr q11, [x26, x15]\n"
+ "fmla v27.8h, v6.8h, v9.8h\n"
"ldr x20, [x16, #0x80]\n"
- "fmla v28.8h, v4.8h, v9.8h\n"
- "fmla v20.8h, v3.8h, v9.8h\n"
+ "fmla v31.8h, v4.8h, v9.8h\n"
+ "fmla v28.8h, v3.8h, v9.8h\n"
"add x14, x14, #0x10\n"
- "mov v26.16b, v30.16b\n fmla v26.8h, v1.8h, v9.8h\n"
- "mov v18.16b, v30.16b\n fmla v18.8h, v0.8h, v9.8h\n"
+ "mov v29.16b, v30.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "mov v23.16b, v30.16b\n fmla v23.8h, v0.8h, v9.8h\n"
"ldr q30, [x17, #0x0]\n"
- "fmla v27.8h, v8.8h, v9.8h\n"
- "fmla v31.8h, v5.8h, v9.8h\n"
- "fmla v29.8h, v2.8h, v9.8h\n"
+ "fmla v13.8h, v8.8h, v9.8h\n"
+ "fmla v16.8h, v5.8h, v9.8h\n"
+ "fmla v24.8h, v2.8h, v9.8h\n"
"ldr q9, [x25, x15]\n"
- "fmla v17.8h, v1.8h, v11.8h\n"
+ "fmla v26.8h, v1.8h, v12.8h\n"
"ldr x27, [x16, #0x90]\n"
- "fmla v16.8h, v0.8h, v11.8h\n"
- "ldr q11, [x21, x15]\n"
- "fmla v15.8h, v2.8h, v12.8h\n"
+ "fmla v18.8h, v0.8h, v12.8h\n"
+ "ldr q12, [x21, x15]\n"
+ "fmla v20.8h, v2.8h, v11.8h\n"
"ldr x21, [x16, #0x98]\n"
- "fmla v23.8h, v8.8h, v22.8h\n"
- "fmla v10.8h, v1.8h, v12.8h\n"
- "ldr q12, [x24, x15]\n"
+ "fmla v21.8h, v8.8h, v10.8h\n"
+ "fmla v25.8h, v1.8h, v11.8h\n"
+ "ldr q11, [x24, x15]\n"
"ldr x26, [x16, #0xa0]\n"
- "fmla v25.8h, v7.8h, v22.8h\n"
- "fmla v21.8h, v6.8h, v22.8h\n"
- "fmla v28.8h, v5.8h, v22.8h\n"
- "fmla v20.8h, v4.8h, v22.8h\n"
- "fmla v19.8h, v3.8h, v22.8h\n"
- "fmla v26.8h, v2.8h, v22.8h\n"
- "fmla v18.8h, v1.8h, v22.8h\n"
- "fmla v24.8h, v0.8h, v22.8h\n"
- "ldr q22, [x23, x15]\n"
+ "fmla v27.8h, v7.8h, v10.8h\n"
+ "fmla v22.8h, v6.8h, v10.8h\n"
+ "fmla v31.8h, v5.8h, v10.8h\n"
+ "fmla v28.8h, v4.8h, v10.8h\n"
+ "fmla v19.8h, v3.8h, v10.8h\n"
+ "fmla v29.8h, v2.8h, v10.8h\n"
+ "fmla v23.8h, v1.8h, v10.8h\n"
+ "fmla v17.8h, v0.8h, v10.8h\n"
+ "ldr q10, [x23, x15]\n"
"ldr x25, [x16, #0xa8]\n"
- "fmla v17.8h, v3.8h, v9.8h\n"
- "fmla v27.8h, v0.8h, v9.8h\n"
- "fmla v31.8h, v6.8h, v12.8h\n"
- "fmla v29.8h, v3.8h, v12.8h\n"
+ "fmla v26.8h, v3.8h, v9.8h\n"
+ "fmla v13.8h, v0.8h, v9.8h\n"
+ "fmla v16.8h, v6.8h, v11.8h\n"
+ "fmla v24.8h, v3.8h, v11.8h\n"
"ldr q9, [x13, x15]\n"
"ldr x24, [x16, #0xb0]\n"
- "fmla v16.8h, v4.8h, v22.8h\n"
- "fmla v15.8h, v3.8h, v22.8h\n"
- "fmla v23.8h, v1.8h, v22.8h\n"
- "fmla v10.8h, v5.8h, v11.8h\n"
- "fmla v21.8h, v2.8h, v11.8h\n"
+ "fmla v18.8h, v4.8h, v10.8h\n"
+ "fmla v20.8h, v3.8h, v10.8h\n"
+ "fmla v21.8h, v1.8h, v10.8h\n"
+ "fmla v25.8h, v5.8h, v12.8h\n"
+ "fmla v22.8h, v2.8h, v12.8h\n"
"ldr q12, [x22, x15]\n"
- "fmla v25.8h, v0.8h, v22.8h\n"
+ "fmla v27.8h, v0.8h, v10.8h\n"
"ldr x23, [x16, #0xb8]\n"
"fmla v19.8h, v8.8h, v9.8h\n"
- "fmla v24.8h, v5.8h, v9.8h\n"
- "ldr q11, [x20, x15]\n"
+ "fmla v17.8h, v5.8h, v9.8h\n"
+ "ldr q9, [x20, x15]\n"
"ldr x22, [x16, #0xc0]\n"
- "fmla v17.8h, v5.8h, v22.8h\n"
- "fmla v27.8h, v2.8h, v22.8h\n"
- "ldr q22, [x28, x15]\n"
+ "fmla v26.8h, v5.8h, v10.8h\n"
+ "fmla v13.8h, v2.8h, v10.8h\n"
+ "ldr q11, [x28, x15]\n"
"ldr x20, [x16, #0xc8]\n"
- "fmla v16.8h, v5.8h, v12.8h\n"
- "fmla v15.8h, v4.8h, v12.8h\n"
- "fmla v23.8h, v2.8h, v12.8h\n"
- "fmla v10.8h, v3.8h, v12.8h\n"
- "fmla v25.8h, v1.8h, v12.8h\n"
- "fmla v21.8h, v0.8h, v12.8h\n"
- "ldr q9, [x21, x15]\n"
+ "fmla v18.8h, v5.8h, v12.8h\n"
+ "fmla v20.8h, v4.8h, v12.8h\n"
+ "fmla v21.8h, v2.8h, v12.8h\n"
+ "fmla v25.8h, v3.8h, v12.8h\n"
+ "fmla v27.8h, v1.8h, v12.8h\n"
+ "fmla v22.8h, v0.8h, v12.8h\n"
+ "ldr q10, [x21, x15]\n"
"ldr x28, [x16, #0xd8]\n"
- "fmla v29.8h, v7.8h, v11.8h\n"
- "fmla v26.8h, v6.8h, v11.8h\n"
- "ldr q12, [x27, x15]\n"
+ "fmla v24.8h, v7.8h, v9.8h\n"
+ "fmla v29.8h, v6.8h, v9.8h\n"
+ "ldr q9, [x27, x15]\n"
"ldr x21, [x16, #0xd0]\n"
- "fmla v17.8h, v7.8h, v22.8h\n"
- "fmla v16.8h, v6.8h, v22.8h\n"
- "fmla v27.8h, v4.8h, v22.8h\n"
- "fmla v23.8h, v3.8h, v22.8h\n"
- "fmla v31.8h, v1.8h, v22.8h\n"
- "fmla v28.8h, v0.8h, v22.8h\n"
- "ldr q11, [x26, x15]\n"
+ "fmla v26.8h, v7.8h, v11.8h\n"
+ "fmla v18.8h, v6.8h, v11.8h\n"
+ "fmla v13.8h, v4.8h, v11.8h\n"
+ "fmla v21.8h, v3.8h, v11.8h\n"
+ "fmla v16.8h, v1.8h, v11.8h\n"
+ "fmla v31.8h, v0.8h, v11.8h\n"
+ "ldr q12, [x26, x15]\n"
"ldr x27, [x16, #0xe0]\n"
- "fmla v15.8h, v8.8h, v9.8h\n"
- "fmla v18.8h, v8.8h, v12.8h\n"
- "fmla v24.8h, v7.8h, v12.8h\n"
- "ldr q12, [x25, x15]\n"
- "fmla v19.8h, v1.8h, v9.8h\n"
+ "fmla v20.8h, v8.8h, v10.8h\n"
+ "fmla v23.8h, v8.8h, v9.8h\n"
+ "fmla v17.8h, v7.8h, v9.8h\n"
+ "ldr q11, [x25, x15]\n"
+ "fmla v19.8h, v1.8h, v10.8h\n"
"ldr x26, [x16, #0xe8]\n"
- "fmla v10.8h, v7.8h, v9.8h\n"
- "fmla v25.8h, v5.8h, v9.8h\n"
- "fmla v21.8h, v4.8h, v9.8h\n"
- "fmla v20.8h, v2.8h, v9.8h\n"
+ "fmla v25.8h, v7.8h, v10.8h\n"
+ "fmla v27.8h, v5.8h, v10.8h\n"
+ "fmla v22.8h, v4.8h, v10.8h\n"
+ "fmla v28.8h, v2.8h, v10.8h\n"
"ldr q9, [x24, x15]\n"
- "ldr x24, [x16, #0xf0]\n"
- "fmla v17.8h, v2.8h, v11.8h\n"
- "fmla v16.8h, v1.8h, v11.8h\n"
- "fmla v15.8h, v0.8h, v11.8h\n"
- "ldr q22, [x23, x15]\n"
- "fmla v27.8h, v7.8h, v12.8h\n"
- "ldr x25, [x16, #0xf8]\n"
- "fmla v23.8h, v6.8h, v12.8h\n"
- "fmla v31.8h, v4.8h, v12.8h\n"
- "fmla v28.8h, v3.8h, v12.8h\n"
- "fmla v29.8h, v1.8h, v12.8h\n"
- "fmla v26.8h, v0.8h, v12.8h\n"
- "ldr q11, [x22, x15]\n"
- "fmla v19.8h, v4.8h, v11.8h\n"
- "ldr x23, [x16, #0x100]\n"
- "fmla v18.8h, v2.8h, v11.8h\n"
- "fmla v16.8h, v2.8h, v9.8h\n"
- "fmla v15.8h, v1.8h, v9.8h\n"
- "fmla v10.8h, v0.8h, v9.8h\n"
- "ldr q9, [x20, x15]\n"
+ "ldr x25, [x16, #0xf0]\n"
+ "fmla v26.8h, v2.8h, v12.8h\n"
+ "fmla v18.8h, v1.8h, v12.8h\n"
+ "fmla v20.8h, v0.8h, v12.8h\n"
+ "ldr q12, [x23, x15]\n"
+ "fmla v13.8h, v7.8h, v11.8h\n"
+ "ldr x24, [x16, #0xf8]\n"
+ "fmla v21.8h, v6.8h, v11.8h\n"
+ "fmla v16.8h, v4.8h, v11.8h\n"
+ "fmla v31.8h, v3.8h, v11.8h\n"
+ "fmla v24.8h, v1.8h, v11.8h\n"
+ "fmla v29.8h, v0.8h, v11.8h\n"
+ "ldr q10, [x22, x15]\n"
+ "fmla v18.8h, v2.8h, v9.8h\n"
+ "ldr x22, [x16, #0x100]\n"
+ "fmla v20.8h, v1.8h, v9.8h\n"
+ "fmla v25.8h, v0.8h, v9.8h\n"
+ "ldr q11, [x20, x15]\n"
"ldr x20, [x16, #0x108]\n"
- "fmla v17.8h, v6.8h, v22.8h\n"
- "fmla v27.8h, v3.8h, v22.8h\n"
- "fmla v31.8h, v0.8h, v22.8h\n"
- "ldr q22, [x21, x15]\n"
+ "fmla v26.8h, v6.8h, v12.8h\n"
+ "fmla v13.8h, v3.8h, v12.8h\n"
+ "fmla v19.8h, v4.8h, v10.8h\n"
+ "fmla v23.8h, v2.8h, v10.8h\n"
+ "fmla v16.8h, v0.8h, v12.8h\n"
+ "ldr q9, [x21, x15]\n"
+ "fmla v27.8h, v8.8h, v10.8h\n"
+ "ldr x23, [x16, #0x110]\n"
+ "fmla v22.8h, v7.8h, v10.8h\n"
+ "fmla v28.8h, v5.8h, v10.8h\n"
+ "fmla v17.8h, v1.8h, v10.8h\n"
+ "ldr q10, [x28, x15]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
- "ldr x22, [x16, #0x110]\n"
- "fmla v21.8h, v7.8h, v11.8h\n"
- "fmla v20.8h, v5.8h, v11.8h\n"
- "fmla v24.8h, v1.8h, v11.8h\n"
- "ldr q12, [x28, x15]\n"
- "fmla v19.8h, v2.8h, v9.8h\n"
"ldr x21, [x16, #0x118]\n"
- "fmla v29.8h, v0.8h, v22.8h\n"
- "fmla v26.8h, v4.8h, v12.8h\n"
- "fmla v18.8h, v3.8h, v12.8h\n"
- "fmla v10.8h, v8.8h, v9.8h\n"
- "fmla v21.8h, v5.8h, v9.8h\n"
- "ldr q11, [x27, x15]\n"
- "fmla v27.8h, v6.8h, v22.8h\n"
- "fmla v31.8h, v3.8h, v22.8h\n"
- "ldr q22, [x26, x15]\n"
- "fmla v28.8h, v7.8h, v12.8h\n"
- "fmla v20.8h, v6.8h, v12.8h\n"
- "fmla v29.8h, v5.8h, v12.8h\n"
- "fmla v19.8h, v5.8h, v11.8h\n"
- "fmla v24.8h, v2.8h, v11.8h\n"
- "fmla v26.8h, v7.8h, v22.8h\n"
- "fmla v18.8h, v6.8h, v22.8h\n"
- "fmla v31.8h, v8.8h, v12.8h\n"
+ "fmla v19.8h, v2.8h, v11.8h\n"
+ "fmla v24.8h, v0.8h, v9.8h\n"
+ "fmla v13.8h, v6.8h, v9.8h\n"
+ "fmla v16.8h, v3.8h, v9.8h\n"
+ "ldr q9, [x26, x15]\n"
+ "fmla v29.8h, v4.8h, v10.8h\n"
+ "fmla v23.8h, v3.8h, v10.8h\n"
+ "fmla v22.8h, v5.8h, v11.8h\n"
+ "ldr q12, [x27, x15]\n"
+ "fmla v31.8h, v7.8h, v10.8h\n"
+ "fmla v28.8h, v6.8h, v10.8h\n"
+ "fmla v24.8h, v5.8h, v10.8h\n"
+ "fmla v16.8h, v8.8h, v10.8h\n"
+ "ldr q10, [x25, x15]\n"
+ "fmla v19.8h, v5.8h, v12.8h\n"
+ "fmla v17.8h, v2.8h, v12.8h\n"
+ "fmla v29.8h, v7.8h, v9.8h\n"
+ "fmla v23.8h, v6.8h, v9.8h\n"
+ "fmla v22.8h, v8.8h, v12.8h\n"
"ldr q12, [x24, x15]\n"
- "fmla v29.8h, v8.8h, v22.8h\n"
- "ldr q22, [x23, x15]\n"
- "fmla v28.8h, v8.8h, v12.8h\n"
- "fmla v20.8h, v7.8h, v12.8h\n"
- "fmla v19.8h, v6.8h, v12.8h\n"
- "fmla v26.8h, v5.8h, v12.8h\n"
- "fmla v18.8h, v4.8h, v12.8h\n"
- "fmla v24.8h, v3.8h, v12.8h\n"
- "ldr q12, [x20, x15]\n"
- "ldp x20, x24, [x16, #0x0]\n"
- "ldr q9, [x20, x6]\n"
- "fmla v21.8h, v8.8h, v11.8h\n"
- "ldr q11, [x25, x15]\n"
- "fmla v17.8h, v4.8h, v22.8h\n"
- "fmla v16.8h, v3.8h, v22.8h\n"
- "fmla v15.8h, v5.8h, v12.8h\n"
- "fmax v17.8h, v17.8h, v13.8h\n"
- "fmla v10.8h, v4.8h, v12.8h\n"
- "fmla v26.8h, v8.8h, v11.8h\n"
- "fmax v16.8h, v16.8h, v13.8h\n"
- "fmla v18.8h, v7.8h, v11.8h\n"
- "fmla v24.8h, v6.8h, v11.8h\n"
- "ldr q11, [x22, x15]\n"
- "fmax v15.8h, v15.8h, v13.8h\n"
- "fmla v27.8h, v1.8h, v22.8h\n"
- "fmla v23.8h, v0.8h, v22.8h\n"
- "ldr q22, [x21, x15]\n"
+ "fmla v24.8h, v8.8h, v9.8h\n"
+ "ldr q9, [x22, x15]\n"
+ "fmla v31.8h, v8.8h, v10.8h\n"
+ "fmla v28.8h, v7.8h, v10.8h\n"
+ "fmla v19.8h, v6.8h, v10.8h\n"
+ "fmla v29.8h, v5.8h, v10.8h\n"
+ "fmla v17.8h, v3.8h, v10.8h\n"
+ "fmla v23.8h, v4.8h, v10.8h\n"
+ "ldr q11, [x20, x15]\n"
+ "fmla v26.8h, v4.8h, v9.8h\n"
+ "ldp x20, x22, [x16, #0x0]\n"
+ "fmla v18.8h, v3.8h, v9.8h\n"
+ "fmla v13.8h, v1.8h, v9.8h\n"
+ "fmla v21.8h, v0.8h, v9.8h\n"
+ "ldr q10, [x21, x15]\n"
"ldr q0, [x17, #0x10]\n"
- "fmla v25.8h, v2.8h, v12.8h\n"
+ "ldr q9, [x20, x6]\n"
+ "fmla v20.8h, v5.8h, v11.8h\n"
+ "fmla v25.8h, v4.8h, v11.8h\n"
+ "fmla v29.8h, v8.8h, v12.8h\n"
+ "fmla v23.8h, v7.8h, v12.8h\n"
+ "fmax v26.8h, v26.8h, v15.8h\n"
+ "fmla v17.8h, v6.8h, v12.8h\n"
+ "ldr q12, [x23, x15]\n"
+ "fmla v27.8h, v2.8h, v11.8h\n"
"ldr q2, [x17, #0x30]\n"
- "fmla v21.8h, v1.8h, v12.8h\n"
+ "fmla v22.8h, v1.8h, v11.8h\n"
"ldr q1, [x17, #0x20]\n"
- "fmax v10.8h, v10.8h, v13.8h\n"
- "fmla v31.8h, v7.8h, v11.8h\n"
- "fmla v28.8h, v6.8h, v11.8h\n"
- "ldr q6, [x17, #0x70]\n"
- "fmla v20.8h, v8.8h, v22.8h\n"
+ "fmax v18.8h, v18.8h, v15.8h\n"
+ "fmla v28.8h, v8.8h, v10.8h\n"
"ldr q8, [x17, #0x90]\n"
- "fmla v19.8h, v7.8h, v22.8h\n"
+ "fmax v20.8h, v20.8h, v15.8h\n"
+ "fmax v25.8h, v25.8h, v15.8h\n"
+ "fmla v19.8h, v7.8h, v10.8h\n"
+ "fmla v16.8h, v7.8h, v12.8h\n"
"ldr q7, [x17, #0x80]\n"
- "fmin v17.8h, v17.8h, v14.8h\n"
- "fmin v16.8h, v16.8h, v14.8h\n"
- "str q17, [x12, x14]\n"
- "ldr x23, [x8, #0x20]\n"
- "fmin v15.8h, v15.8h, v14.8h\n"
- "fmin v10.8h, v10.8h, v14.8h\n"
- "str q16, [x11, x14]\n"
- "ldr x22, [x8, #0x28]\n"
- "fmax v27.8h, v27.8h, v13.8h\n"
- "fmax v23.8h, v23.8h, v13.8h\n"
- "str q15, [x10, x14]\n"
- "ldr x21, [x8, #0x30]\n"
- "fmax v25.8h, v25.8h, v13.8h\n"
- "fmax v21.8h, v21.8h, v13.8h\n"
- "str q10, [x9, x14]\n"
- "ldr x20, [x8, #0x38]\n"
- "fmla v29.8h, v4.8h, v11.8h\n"
- "fmla v26.8h, v3.8h, v11.8h\n"
+ "fmla v31.8h, v6.8h, v12.8h\n"
+ "ldr q6, [x17, #0x70]\n"
+ "fmin v26.8h, v26.8h, v14.8h\n"
+ "fmin v18.8h, v18.8h, v14.8h\n"
+ "fmla v24.8h, v4.8h, v12.8h\n"
+ "ldp x21, x20, [x16, #0x10]\n"
+ "fmin v20.8h, v20.8h, v14.8h\n"
+ "fmin v25.8h, v25.8h, v14.8h\n"
+ "fmla v29.8h, v3.8h, v12.8h\n"
"ldr q3, [x17, #0x40]\n"
- "fmin v27.8h, v27.8h, v14.8h\n"
- "fmla v18.8h, v5.8h, v22.8h\n"
+ "fmax v13.8h, v13.8h, v15.8h\n"
+ "fmax v21.8h, v21.8h, v15.8h\n"
+ "fmla v23.8h, v5.8h, v10.8h\n"
"ldr q5, [x17, #0x60]\n"
- "fmla v24.8h, v4.8h, v22.8h\n"
- "ldr q10, [x24, x6]\n"
+ "ldr q11, [x21, x6]\n"
+ "ldr q12, [x20, x6]\n"
+ "fmax v27.8h, v27.8h, v15.8h\n"
+ "fmax v22.8h, v22.8h, v15.8h\n"
+ "str q26, [x12, x14]\n"
+ "ldr x23, [x8, #0x20]\n"
+ "fmla v17.8h, v4.8h, v10.8h\n"
+ "ldr q10, [x22, x6]\n"
"ldr q4, [x17, #0x50]\n"
- "fmin v23.8h, v23.8h, v14.8h\n"
- "fmin v25.8h, v25.8h, v14.8h\n"
- "str q27, [x23, x14]\n"
+ "str q18, [x11, x14]\n"
+ "ldr x22, [x8, #0x28]\n"
+ "fmin v13.8h, v13.8h, v14.8h\n"
+ "str q20, [x10, x14]\n"
+ "ldr x21, [x8, #0x30]\n"
"fmin v21.8h, v21.8h, v14.8h\n"
- "fmax v31.8h, v31.8h, v13.8h\n"
- "str q23, [x22, x14]\n"
- "ldr x25, [x8, #0x40]\n"
- "fmax v28.8h, v28.8h, v13.8h\n"
- "fmax v20.8h, v20.8h, v13.8h\n"
- "str q25, [x21, x14]\n"
- "ldr x23, [x8, #0x48]\n"
- "fmax v19.8h, v19.8h, v13.8h\n"
- "str q21, [x20, x14]\n"
- "ldr x22, [x8, #0x50]\n"
- "ldr x24, [x8, #0x58]\n"
- "ldp x21, x20, [x16, #0x10]\n"
- "ldr q11, [x21, x6]\n"
+ "fmin v27.8h, v27.8h, v14.8h\n"
+ "str q25, [x9, x14]\n"
+ "ldr x20, [x8, #0x38]\n"
+ "fmin v22.8h, v22.8h, v14.8h\n"
+ "fmax v16.8h, v16.8h, v15.8h\n"
+ "fmax v31.8h, v31.8h, v15.8h\n"
+ "fmax v28.8h, v28.8h, v15.8h\n"
+ "str q13, [x23, x14]\n"
+ "ldr x23, [x8, #0x40]\n"
+ "fmax v19.8h, v19.8h, v15.8h\n"
+ "str q21, [x22, x14]\n"
+ "ldr x22, [x8, #0x48]\n"
+ "fmax v24.8h, v24.8h, v15.8h\n"
+ "str q27, [x21, x14]\n"
+ "ldr x21, [x8, #0x50]\n"
+ "fmin v16.8h, v16.8h, v14.8h\n"
+ "fmax v29.8h, v29.8h, v15.8h\n"
+ "str q22, [x20, x14]\n"
+ "ldr x20, [x8, #0x58]\n"
"fmin v31.8h, v31.8h, v14.8h\n"
"fmin v28.8h, v28.8h, v14.8h\n"
- "ldr q12, [x20, x6]\n"
- "fmin v20.8h, v20.8h, v14.8h\n"
"fmin v19.8h, v19.8h, v14.8h\n"
- "str q31, [x25, x14]\n"
- "fmax v29.8h, v29.8h, v13.8h\n"
- "fmax v26.8h, v26.8h, v13.8h\n"
- "str q28, [x23, x14]\n"
- "ldr x23, [x8, #0x60]\n"
- "fmax v18.8h, v18.8h, v13.8h\n"
- "fmax v24.8h, v24.8h, v13.8h\n"
- "str q20, [x22, x14]\n"
- "ldr x22, [x8, #0x68]\n"
- "str q19, [x24, x14]\n"
- "ldr x21, [x8, #0x70]\n"
- "ldr x20, [x8, #0x78]\n"
+ "fmax v23.8h, v23.8h, v15.8h\n"
"add x6, x6, #0x10\n"
- "cmp x6, x7, LSL #4\n"
- "fmin v29.8h, v29.8h, v14.8h\n"
- "fmin v26.8h, v26.8h, v14.8h\n"
"add x15, x15, #0x10\n"
- "fmin v18.8h, v18.8h, v14.8h\n"
+ "fmax v17.8h, v17.8h, v15.8h\n"
+ "str q16, [x23, x14]\n"
+ "ldr x23, [x8, #0x60]\n"
+ "cmp x6, x7, LSL #4\n"
+ "str q31, [x22, x14]\n"
+ "ldr x22, [x8, #0x68]\n"
"fmin v24.8h, v24.8h, v14.8h\n"
- "str q29, [x23, x14]\n"
+ "fmin v29.8h, v29.8h, v14.8h\n"
+ "str q28, [x21, x14]\n"
+ "ldr x21, [x8, #0x70]\n"
+ "fmin v23.8h, v23.8h, v14.8h\n"
"add x17, x17, #0xa0\n"
- "str q26, [x22, x14]\n"
- "str q18, [x21, x14]\n"
- "str q24, [x20, x14]\n"
+ "str q19, [x20, x14]\n"
+ "ldr x20, [x8, #0x78]\n"
+ "fmin v17.8h, v17.8h, v14.8h\n"
+ "str q24, [x23, x14]\n"
+ "str q29, [x22, x14]\n"
+ "str q23, [x21, x14]\n"
+ "str q17, [x20, x14]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v31.16b, v30.16b\n fmla v31.8h, v4.8h, v9.8h\n"
- "mov v17.16b, v30.16b\n fmla v17.8h, v8.8h, v9.8h\n"
+ "mov v16.16b, v30.16b\n fmla v16.8h, v4.8h, v9.8h\n"
+ "mov v19.16b, v30.16b\n fmla v19.8h, v8.8h, v9.8h\n"
"ldr x27, [x16, #0x20]\n"
"ldr x24, [x16, #0x30]\n"
- "mov v15.16b, v30.16b\n fmla v15.8h, v3.8h, v9.8h\n"
- "mov v29.16b, v30.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "mov v13.16b, v30.16b\n fmla v13.8h, v3.8h, v9.8h\n"
+ "mov v31.16b, v30.16b\n fmla v31.8h, v1.8h, v9.8h\n"
"ldr x23, [x16, #0x28]\n"
"ldr x22, [x16, #0x38]\n"
- "mov v19.16b, v30.16b\n fmla v19.8h, v0.8h, v9.8h\n"
- "mov v20.16b, v30.16b\n fmla v20.8h, v7.8h, v9.8h\n"
+ "mov v17.16b, v30.16b\n fmla v17.8h, v0.8h, v9.8h\n"
+ "mov v18.16b, v30.16b\n fmla v18.8h, v7.8h, v9.8h\n"
"ldr x26, [x16, #0x40]\n"
"ldr x21, [x16, #0x48]\n"
- "mov v21.16b, v30.16b\n fmla v21.8h, v6.8h, v9.8h\n"
- "fmla v31.8h, v5.8h, v12.8h\n"
+ "mov v25.16b, v30.16b\n fmla v25.8h, v6.8h, v9.8h\n"
+ "mov v28.16b, v30.16b\n fmla v28.8h, v5.8h, v9.8h\n"
"ldr x25, [x16, #0x50]\n"
"ldr x20, [x16, #0x58]\n"
- "mov v18.16b, v30.16b\n fmla v18.8h, v5.8h, v9.8h\n"
- "mov v27.16b, v30.16b\n fmla v27.8h, v2.8h, v9.8h\n"
- "ldr q24, [x24, x15]\n"
+ "fmla v16.8h, v5.8h, v12.8h\n"
+ "mov v29.16b, v30.16b\n fmla v29.8h, v2.8h, v9.8h\n"
+ "ldr q22, [x24, x15]\n"
"ldr x13, [x16, #0x70]\n"
- "fmla v17.8h, v0.8h, v10.8h\n"
- "ldr q22, [x27, x15]\n"
- "mov v28.16b, v30.16b\n fmla v28.8h, v2.8h, v11.8h\n"
- "ldr q16, [x23, x15]\n"
- "fmla v15.8h, v4.8h, v12.8h\n"
- "fmla v29.8h, v2.8h, v12.8h\n"
+ "fmla v19.8h, v0.8h, v10.8h\n"
+ "ldr q20, [x27, x15]\n"
+ "mov v27.16b, v30.16b\n fmla v27.8h, v2.8h, v11.8h\n"
+ "ldr q23, [x23, x15]\n"
+ "fmla v13.8h, v4.8h, v12.8h\n"
+ "fmla v31.8h, v2.8h, v12.8h\n"
"ldr x24, [x16, #0x60]\n"
"ldr x23, [x16, #0x68]\n"
- "fmla v19.8h, v1.8h, v12.8h\n"
- "fmla v20.8h, v8.8h, v12.8h\n"
+ "fmla v17.8h, v1.8h, v12.8h\n"
+ "fmla v18.8h, v8.8h, v12.8h\n"
"ldr x12, [x8, #0x0]\n"
"ldr x11, [x8, #0x8]\n"
- "fmla v21.8h, v7.8h, v12.8h\n"
- "mov v10.16b, v30.16b\n fmla v10.8h, v6.8h, v22.8h\n"
- "ldr q22, [x21, x15]\n"
+ "fmla v25.8h, v7.8h, v12.8h\n"
+ "mov v11.16b, v30.16b\n fmla v11.8h, v6.8h, v20.8h\n"
+ "ldr q9, [x21, x15]\n"
"ldr x28, [x16, #0x88]\n"
- "fmla v31.8h, v7.8h, v24.8h\n"
- "fmla v28.8h, v6.8h, v12.8h\n"
+ "fmla v16.8h, v7.8h, v22.8h\n"
+ "fmla v27.8h, v6.8h, v12.8h\n"
"ldr x10, [x8, #0x10]\n"
"ldr x9, [x8, #0x18]\n"
- "mov v9.16b, v30.16b\n fmla v9.8h, v3.8h, v12.8h\n"
- "mov v11.16b, v30.16b\n fmla v11.8h, v0.8h, v12.8h\n"
- "ldr q23, [x22, x15]\n"
+ "mov v10.16b, v30.16b\n fmla v10.8h, v3.8h, v12.8h\n"
+ "mov v26.16b, v30.16b\n fmla v26.8h, v0.8h, v12.8h\n"
+ "ldr q21, [x22, x15]\n"
"ldr x22, [x16, #0x78]\n"
- "mov v12.16b, v30.16b\n fmla v12.8h, v8.8h, v16.8h\n"
- "ldr q16, [x26, x15]\n"
- "fmla v15.8h, v6.8h, v24.8h\n"
+ "mov v24.16b, v30.16b\n fmla v24.8h, v8.8h, v23.8h\n"
+ "ldr q23, [x26, x15]\n"
+ "fmla v13.8h, v6.8h, v22.8h\n"
"ldr x21, [x16, #0x80]\n"
- "fmla v29.8h, v4.8h, v24.8h\n"
- "fmla v19.8h, v3.8h, v24.8h\n"
+ "fmla v31.8h, v4.8h, v22.8h\n"
+ "fmla v17.8h, v3.8h, v22.8h\n"
"add x14, x14, #0x10\n"
- "mov v26.16b, v30.16b\n fmla v26.8h, v1.8h, v24.8h\n"
- "mov v25.16b, v30.16b\n fmla v25.8h, v0.8h, v24.8h\n"
- "fmla v18.8h, v8.8h, v24.8h\n"
- "fmla v27.8h, v5.8h, v24.8h\n"
- "fmla v10.8h, v2.8h, v24.8h\n"
- "ldr q24, [x25, x15]\n"
- "fmla v17.8h, v1.8h, v23.8h\n"
+ "mov v12.16b, v30.16b\n fmla v12.8h, v1.8h, v22.8h\n"
+ "fmla v30.8h, v0.8h, v22.8h\n"
+ "fmla v28.8h, v8.8h, v22.8h\n"
+ "fmla v29.8h, v5.8h, v22.8h\n"
+ "fmla v11.8h, v2.8h, v22.8h\n"
+ "ldr q22, [x25, x15]\n"
+ "fmla v19.8h, v1.8h, v21.8h\n"
"ldr x27, [x16, #0x90]\n"
- "fmla v20.8h, v0.8h, v23.8h\n"
- "ldr q23, [x20, x15]\n"
- "fmla v21.8h, v2.8h, v16.8h\n"
+ "fmla v18.8h, v0.8h, v21.8h\n"
+ "ldr q21, [x20, x15]\n"
+ "fmla v25.8h, v2.8h, v23.8h\n"
"ldr x20, [x16, #0x98]\n"
- "fmla v31.8h, v8.8h, v22.8h\n"
- "fmla v28.8h, v1.8h, v16.8h\n"
- "ldr q16, [x24, x15]\n"
+ "fmla v16.8h, v8.8h, v9.8h\n"
+ "fmla v27.8h, v1.8h, v23.8h\n"
+ "ldr q20, [x24, x15]\n"
"ldr x26, [x16, #0xa0]\n"
- "fmla v15.8h, v7.8h, v22.8h\n"
- "fmla v9.8h, v6.8h, v22.8h\n"
- "fmla v29.8h, v5.8h, v22.8h\n"
- "fmla v19.8h, v4.8h, v22.8h\n"
- "fmla v11.8h, v3.8h, v22.8h\n"
- "fmla v26.8h, v2.8h, v22.8h\n"
- "fmla v25.8h, v1.8h, v22.8h\n"
- "fmla v12.8h, v0.8h, v22.8h\n"
- "ldr q22, [x23, x15]\n"
+ "fmla v13.8h, v7.8h, v9.8h\n"
+ "fmla v10.8h, v6.8h, v9.8h\n"
+ "fmla v31.8h, v5.8h, v9.8h\n"
+ "fmla v17.8h, v4.8h, v9.8h\n"
+ "fmla v26.8h, v3.8h, v9.8h\n"
+ "fmla v12.8h, v2.8h, v9.8h\n"
+ "fmla v30.8h, v1.8h, v9.8h\n"
+ "fmla v24.8h, v0.8h, v9.8h\n"
+ "ldr q23, [x23, x15]\n"
"ldr x25, [x16, #0xa8]\n"
- "fmla v17.8h, v3.8h, v24.8h\n"
- "fmla v18.8h, v0.8h, v24.8h\n"
- "fmla v27.8h, v6.8h, v16.8h\n"
- "fmla v10.8h, v3.8h, v16.8h\n"
- "ldr q16, [x13, x15]\n"
+ "fmla v19.8h, v3.8h, v22.8h\n"
+ "fmla v28.8h, v0.8h, v22.8h\n"
+ "fmla v29.8h, v6.8h, v20.8h\n"
+ "fmla v11.8h, v3.8h, v20.8h\n"
+ "ldr q20, [x13, x15]\n"
"ldr x24, [x16, #0xb0]\n"
- "fmla v20.8h, v4.8h, v22.8h\n"
- "fmla v21.8h, v3.8h, v22.8h\n"
- "fmla v31.8h, v1.8h, v22.8h\n"
- "fmla v28.8h, v5.8h, v23.8h\n"
- "fmla v9.8h, v2.8h, v23.8h\n"
- "ldr q23, [x22, x15]\n"
- "fmla v15.8h, v0.8h, v22.8h\n"
+ "fmla v18.8h, v4.8h, v23.8h\n"
+ "fmla v25.8h, v3.8h, v23.8h\n"
+ "fmla v16.8h, v1.8h, v23.8h\n"
+ "fmla v27.8h, v5.8h, v21.8h\n"
+ "fmla v10.8h, v2.8h, v21.8h\n"
+ "ldr q22, [x22, x15]\n"
+ "fmla v13.8h, v0.8h, v23.8h\n"
"ldr x23, [x16, #0xb8]\n"
- "fmla v11.8h, v8.8h, v16.8h\n"
- "fmla v12.8h, v5.8h, v16.8h\n"
- "ldr q16, [x21, x15]\n"
+ "fmla v26.8h, v8.8h, v20.8h\n"
+ "fmla v24.8h, v5.8h, v20.8h\n"
+ "ldr q21, [x21, x15]\n"
"ldr x22, [x16, #0xc0]\n"
- "fmla v17.8h, v5.8h, v22.8h\n"
- "fmla v18.8h, v2.8h, v22.8h\n"
- "ldr q22, [x28, x15]\n"
+ "fmla v19.8h, v5.8h, v23.8h\n"
+ "fmla v28.8h, v2.8h, v23.8h\n"
+ "ldr q20, [x28, x15]\n"
"ldr x21, [x16, #0xc8]\n"
- "fmla v20.8h, v5.8h, v23.8h\n"
- "fmla v21.8h, v4.8h, v23.8h\n"
- "fmla v31.8h, v2.8h, v23.8h\n"
- "fmla v28.8h, v3.8h, v23.8h\n"
- "fmla v15.8h, v1.8h, v23.8h\n"
- "fmla v9.8h, v0.8h, v23.8h\n"
- "ldr q23, [x20, x15]\n"
+ "fmla v18.8h, v5.8h, v22.8h\n"
+ "fmla v25.8h, v4.8h, v22.8h\n"
+ "fmla v16.8h, v2.8h, v22.8h\n"
+ "fmla v27.8h, v3.8h, v22.8h\n"
+ "fmla v13.8h, v1.8h, v22.8h\n"
+ "fmla v10.8h, v0.8h, v22.8h\n"
+ "ldr q22, [x20, x15]\n"
"ldr x28, [x16, #0xd8]\n"
- "fmla v10.8h, v7.8h, v16.8h\n"
- "fmla v26.8h, v6.8h, v16.8h\n"
- "ldr q16, [x27, x15]\n"
+ "fmla v11.8h, v7.8h, v21.8h\n"
+ "fmla v12.8h, v6.8h, v21.8h\n"
+ "ldr q21, [x27, x15]\n"
"ldr x20, [x16, #0xd0]\n"
- "fmla v17.8h, v7.8h, v22.8h\n"
- "fmla v20.8h, v6.8h, v22.8h\n"
- "fmla v18.8h, v4.8h, v22.8h\n"
- "fmla v31.8h, v3.8h, v22.8h\n"
- "fmla v27.8h, v1.8h, v22.8h\n"
- "fmla v29.8h, v0.8h, v22.8h\n"
- "ldr q22, [x26, x15]\n"
+ "fmla v19.8h, v7.8h, v20.8h\n"
+ "fmla v18.8h, v6.8h, v20.8h\n"
+ "fmla v28.8h, v4.8h, v20.8h\n"
+ "fmla v16.8h, v3.8h, v20.8h\n"
+ "fmla v29.8h, v1.8h, v20.8h\n"
+ "fmla v31.8h, v0.8h, v20.8h\n"
+ "ldr q20, [x26, x15]\n"
"ldr x27, [x16, #0xe0]\n"
- "fmla v21.8h, v8.8h, v23.8h\n"
- "fmla v25.8h, v8.8h, v16.8h\n"
- "fmla v12.8h, v7.8h, v16.8h\n"
- "ldr q16, [x25, x15]\n"
- "fmla v11.8h, v1.8h, v23.8h\n"
+ "fmla v25.8h, v8.8h, v22.8h\n"
+ "fmla v30.8h, v8.8h, v21.8h\n"
+ "fmla v24.8h, v7.8h, v21.8h\n"
+ "ldr q21, [x25, x15]\n"
+ "fmla v26.8h, v1.8h, v22.8h\n"
"ldr x26, [x16, #0xe8]\n"
- "fmla v28.8h, v7.8h, v23.8h\n"
- "fmla v15.8h, v5.8h, v23.8h\n"
- "fmla v9.8h, v4.8h, v23.8h\n"
- "fmla v19.8h, v2.8h, v23.8h\n"
- "ldr q23, [x24, x15]\n"
- "ldr x25, [x16, #0xf0]\n"
+ "fmla v27.8h, v7.8h, v22.8h\n"
+ "fmla v13.8h, v5.8h, v22.8h\n"
+ "fmla v10.8h, v4.8h, v22.8h\n"
"fmla v17.8h, v2.8h, v22.8h\n"
- "fmla v20.8h, v1.8h, v22.8h\n"
- "fmla v21.8h, v0.8h, v22.8h\n"
- "ldr q22, [x23, x15]\n"
- "fmla v18.8h, v7.8h, v16.8h\n"
+ "ldr q22, [x24, x15]\n"
+ "ldr x25, [x16, #0xf0]\n"
+ "fmla v19.8h, v2.8h, v20.8h\n"
+ "fmla v18.8h, v1.8h, v20.8h\n"
+ "fmla v25.8h, v0.8h, v20.8h\n"
+ "ldr q20, [x23, x15]\n"
+ "fmla v28.8h, v7.8h, v21.8h\n"
"ldr x24, [x16, #0xf8]\n"
- "fmla v31.8h, v6.8h, v16.8h\n"
- "fmla v27.8h, v4.8h, v16.8h\n"
- "fmla v29.8h, v3.8h, v16.8h\n"
- "fmla v10.8h, v1.8h, v16.8h\n"
- "fmla v26.8h, v0.8h, v16.8h\n"
- "ldr q16, [x22, x15]\n"
- "fmla v11.8h, v4.8h, v16.8h\n"
+ "fmla v16.8h, v6.8h, v21.8h\n"
+ "fmla v29.8h, v4.8h, v21.8h\n"
+ "fmla v31.8h, v3.8h, v21.8h\n"
+ "fmla v11.8h, v1.8h, v21.8h\n"
+ "fmla v12.8h, v0.8h, v21.8h\n"
+ "ldr q21, [x22, x15]\n"
+ "fmla v18.8h, v2.8h, v22.8h\n"
"ldr x23, [x16, #0x100]\n"
- "fmla v25.8h, v2.8h, v16.8h\n"
- "fmla v20.8h, v2.8h, v23.8h\n"
- "fmla v21.8h, v1.8h, v23.8h\n"
- "fmla v28.8h, v0.8h, v23.8h\n"
+ "fmla v25.8h, v1.8h, v22.8h\n"
+ "fmla v27.8h, v0.8h, v22.8h\n"
"ldr q23, [x21, x15]\n"
"ldr x22, [x16, #0x108]\n"
- "fmla v17.8h, v6.8h, v22.8h\n"
- "fmla v18.8h, v3.8h, v22.8h\n"
- "fmla v27.8h, v0.8h, v22.8h\n"
- "ldr q22, [x20, x15]\n"
- "fmla v15.8h, v8.8h, v16.8h\n"
+ "fmla v19.8h, v6.8h, v20.8h\n"
+ "fmla v28.8h, v3.8h, v20.8h\n"
+ "fmla v26.8h, v4.8h, v21.8h\n"
+ "fmla v30.8h, v2.8h, v21.8h\n"
+ "fmla v29.8h, v0.8h, v20.8h\n"
+ "ldr q20, [x20, x15]\n"
+ "fmla v13.8h, v8.8h, v21.8h\n"
"ldr x21, [x16, #0x110]\n"
- "fmla v9.8h, v7.8h, v16.8h\n"
- "fmla v19.8h, v5.8h, v16.8h\n"
- "fmla v12.8h, v1.8h, v16.8h\n"
- "ldr q16, [x28, x15]\n"
- "fmla v11.8h, v2.8h, v23.8h\n"
+ "fmla v10.8h, v7.8h, v21.8h\n"
+ "fmla v17.8h, v5.8h, v21.8h\n"
+ "fmla v24.8h, v1.8h, v21.8h\n"
+ "ldr q21, [x28, x15]\n"
+ "fmla v27.8h, v8.8h, v23.8h\n"
"ldr x20, [x16, #0x118]\n"
- "fmla v10.8h, v0.8h, v22.8h\n"
- "fmla v26.8h, v4.8h, v16.8h\n"
- "fmla v25.8h, v3.8h, v16.8h\n"
- "fmla v28.8h, v8.8h, v23.8h\n"
- "fmla v9.8h, v5.8h, v23.8h\n"
- "ldr q23, [x27, x15]\n"
- "fmla v18.8h, v6.8h, v22.8h\n"
- "fmla v27.8h, v3.8h, v22.8h\n"
+ "fmla v26.8h, v2.8h, v23.8h\n"
+ "fmla v11.8h, v0.8h, v20.8h\n"
+ "fmla v28.8h, v6.8h, v20.8h\n"
+ "fmla v29.8h, v3.8h, v20.8h\n"
"ldr q22, [x26, x15]\n"
- "fmla v29.8h, v7.8h, v16.8h\n"
- "fmla v19.8h, v6.8h, v16.8h\n"
- "fmla v10.8h, v5.8h, v16.8h\n"
- "fmla v11.8h, v5.8h, v23.8h\n"
- "fmla v12.8h, v2.8h, v23.8h\n"
- "fmla v26.8h, v7.8h, v22.8h\n"
- "fmla v25.8h, v6.8h, v22.8h\n"
- "fmla v27.8h, v8.8h, v16.8h\n"
- "ldr q16, [x25, x15]\n"
- "fmla v10.8h, v8.8h, v22.8h\n"
- "ldr q30, [x23, x15]\n"
- "fmla v29.8h, v8.8h, v16.8h\n"
- "fmla v19.8h, v7.8h, v16.8h\n"
- "fmla v11.8h, v6.8h, v16.8h\n"
- "fmla v26.8h, v5.8h, v16.8h\n"
- "fmla v25.8h, v4.8h, v16.8h\n"
- "fmla v12.8h, v3.8h, v16.8h\n"
- "ldr q24, [x22, x15]\n"
- "fmla v9.8h, v8.8h, v23.8h\n"
- "ldr q16, [x24, x15]\n"
- "fmla v17.8h, v4.8h, v30.8h\n"
- "fmax v17.8h, v17.8h, v13.8h\n"
- "fmla v20.8h, v3.8h, v30.8h\n"
- "fmla v21.8h, v5.8h, v24.8h\n"
- "fmax v20.8h, v20.8h, v13.8h\n"
- "fmla v28.8h, v4.8h, v24.8h\n"
- "fmla v26.8h, v8.8h, v16.8h\n"
- "fmax v21.8h, v21.8h, v13.8h\n"
- "fmla v25.8h, v7.8h, v16.8h\n"
- "fmla v12.8h, v6.8h, v16.8h\n"
- "ldr q23, [x21, x15]\n"
- "fmax v28.8h, v28.8h, v13.8h\n"
- "fmla v18.8h, v1.8h, v30.8h\n"
- "fmla v31.8h, v0.8h, v30.8h\n"
- "ldr q16, [x20, x15]\n"
- "fmin v17.8h, v17.8h, v14.8h\n"
- "fmla v15.8h, v2.8h, v24.8h\n"
- "fmla v9.8h, v1.8h, v24.8h\n"
- "fmin v20.8h, v20.8h, v14.8h\n"
- "str q17, [x12, x14]\n"
- "fmla v27.8h, v7.8h, v23.8h\n"
- "fmla v29.8h, v6.8h, v23.8h\n"
- "fmin v21.8h, v21.8h, v14.8h\n"
- "str q20, [x11, x14]\n"
- "fmla v19.8h, v8.8h, v16.8h\n"
- "fmla v11.8h, v7.8h, v16.8h\n"
- "fmin v28.8h, v28.8h, v14.8h\n"
- "str q21, [x10, x14]\n"
- "fmax v18.8h, v18.8h, v13.8h\n"
- "fmax v31.8h, v31.8h, v13.8h\n"
- "str q28, [x9, x14]\n"
+ "fmla v12.8h, v4.8h, v21.8h\n"
+ "fmla v30.8h, v3.8h, v21.8h\n"
+ "fmla v10.8h, v5.8h, v23.8h\n"
+ "ldr q20, [x27, x15]\n"
+ "fmla v31.8h, v7.8h, v21.8h\n"
+ "fmla v17.8h, v6.8h, v21.8h\n"
+ "fmla v11.8h, v5.8h, v21.8h\n"
+ "fmla v29.8h, v8.8h, v21.8h\n"
+ "ldr q21, [x25, x15]\n"
+ "fmla v26.8h, v5.8h, v20.8h\n"
+ "fmla v24.8h, v2.8h, v20.8h\n"
+ "fmla v12.8h, v7.8h, v22.8h\n"
+ "fmla v30.8h, v6.8h, v22.8h\n"
+ "fmla v10.8h, v8.8h, v20.8h\n"
+ "ldr q20, [x24, x15]\n"
+ "fmla v11.8h, v8.8h, v22.8h\n"
+ "ldr q22, [x23, x15]\n"
+ "fmla v31.8h, v8.8h, v21.8h\n"
+ "fmla v17.8h, v7.8h, v21.8h\n"
+ "fmla v26.8h, v6.8h, v21.8h\n"
+ "fmla v12.8h, v5.8h, v21.8h\n"
+ "fmla v24.8h, v3.8h, v21.8h\n"
+ "fmla v30.8h, v4.8h, v21.8h\n"
+ "ldr q21, [x22, x15]\n"
+ "fmla v19.8h, v4.8h, v22.8h\n"
+ "fmla v18.8h, v3.8h, v22.8h\n"
+ "fmla v28.8h, v1.8h, v22.8h\n"
+ "fmla v16.8h, v0.8h, v22.8h\n"
+ "ldr q23, [x20, x15]\n"
+ "fmla v25.8h, v5.8h, v21.8h\n"
+ "fmla v27.8h, v4.8h, v21.8h\n"
+ "fmla v12.8h, v8.8h, v20.8h\n"
+ "fmla v30.8h, v7.8h, v20.8h\n"
+ "fmla v24.8h, v6.8h, v20.8h\n"
+ "ldr q0, [x21, x15]\n"
+ "fmax v19.8h, v19.8h, v15.8h\n"
+ "fmla v13.8h, v2.8h, v21.8h\n"
+ "fmla v10.8h, v1.8h, v21.8h\n"
+ "fmax v18.8h, v18.8h, v15.8h\n"
+ "add x15, x15, #0x10\n"
+ "fmax v25.8h, v25.8h, v15.8h\n"
+ "fmla v17.8h, v8.8h, v23.8h\n"
+ "fmla v26.8h, v7.8h, v23.8h\n"
+ "fmax v27.8h, v27.8h, v15.8h\n"
+ "fmla v29.8h, v7.8h, v0.8h\n"
+ "fmla v31.8h, v6.8h, v0.8h\n"
+ "fmin v19.8h, v19.8h, v14.8h\n"
+ "fmin v18.8h, v18.8h, v14.8h\n"
+ "fmla v11.8h, v4.8h, v0.8h\n"
+ "fmin v25.8h, v25.8h, v14.8h\n"
+ "fmax v28.8h, v28.8h, v15.8h\n"
+ "fmla v12.8h, v3.8h, v0.8h\n"
+ "fmin v27.8h, v27.8h, v14.8h\n"
+ "fmax v16.8h, v16.8h, v15.8h\n"
+ "fmla v30.8h, v5.8h, v23.8h\n"
+ "fmax v13.8h, v13.8h, v15.8h\n"
+ "fmax v10.8h, v10.8h, v15.8h\n"
+ "str q19, [x12, x14]\n"
"ldr x23, [x8, #0x20]\n"
- "fmax v15.8h, v15.8h, v13.8h\n"
- "fmax v9.8h, v9.8h, v13.8h\n"
+ "str q18, [x11, x14]\n"
"ldr x22, [x8, #0x28]\n"
+ "fmla v24.8h, v4.8h, v23.8h\n"
+ "fmin v28.8h, v28.8h, v14.8h\n"
+ "str q25, [x10, x14]\n"
"ldr x21, [x8, #0x30]\n"
+ "fmin v16.8h, v16.8h, v14.8h\n"
+ "fmax v29.8h, v29.8h, v15.8h\n"
+ "str q27, [x9, x14]\n"
"ldr x20, [x8, #0x38]\n"
- "fmla v10.8h, v4.8h, v23.8h\n"
- "fmla v26.8h, v3.8h, v23.8h\n"
- "fmin v18.8h, v18.8h, v14.8h\n"
- "fmla v25.8h, v5.8h, v16.8h\n"
- "fmla v12.8h, v4.8h, v16.8h\n"
- "fmin v31.8h, v31.8h, v14.8h\n"
- "str q18, [x23, x14]\n"
- "fmin v15.8h, v15.8h, v14.8h\n"
- "fmin v9.8h, v9.8h, v14.8h\n"
- "str q31, [x22, x14]\n"
+ "fmin v13.8h, v13.8h, v14.8h\n"
+ "fmin v10.8h, v10.8h, v14.8h\n"
+ "fmax v31.8h, v31.8h, v15.8h\n"
+ "fmax v17.8h, v17.8h, v15.8h\n"
+ "str q28, [x23, x14]\n"
"ldr x23, [x8, #0x40]\n"
- "fmax v27.8h, v27.8h, v13.8h\n"
- "fmax v29.8h, v29.8h, v13.8h\n"
- "str q15, [x21, x14]\n"
+ "fmax v26.8h, v26.8h, v15.8h\n"
+ "str q16, [x22, x14]\n"
"ldr x22, [x8, #0x48]\n"
- "fmax v19.8h, v19.8h, v13.8h\n"
- "fmax v11.8h, v11.8h, v13.8h\n"
- "str q9, [x20, x14]\n"
+ "fmin v29.8h, v29.8h, v14.8h\n"
+ "str q13, [x21, x14]\n"
"ldr x21, [x8, #0x50]\n"
+ "fmax v11.8h, v11.8h, v15.8h\n"
+ "fmax v12.8h, v12.8h, v15.8h\n"
+ "str q10, [x20, x14]\n"
"ldr x20, [x8, #0x58]\n"
- "fmin v27.8h, v27.8h, v14.8h\n"
- "fmin v29.8h, v29.8h, v14.8h\n"
- "str q27, [x23, x14]\n"
- "fmin v19.8h, v19.8h, v14.8h\n"
- "fmin v11.8h, v11.8h, v14.8h\n"
- "str q29, [x22, x14]\n"
+ "fmin v31.8h, v31.8h, v14.8h\n"
+ "fmin v17.8h, v17.8h, v14.8h\n"
+ "fmin v26.8h, v26.8h, v14.8h\n"
+ "fmax v30.8h, v30.8h, v15.8h\n"
+ "str q29, [x23, x14]\n"
"ldr x23, [x8, #0x60]\n"
- "fmax v10.8h, v10.8h, v13.8h\n"
- "fmax v26.8h, v26.8h, v13.8h\n"
- "str q19, [x21, x14]\n"
+ "fmax v24.8h, v24.8h, v15.8h\n"
+ "fmin v11.8h, v11.8h, v14.8h\n"
+ "str q31, [x22, x14]\n"
"ldr x22, [x8, #0x68]\n"
- "fmax v25.8h, v25.8h, v13.8h\n"
- "fmax v12.8h, v12.8h, v13.8h\n"
- "str q11, [x20, x14]\n"
+ "str q17, [x21, x14]\n"
"ldr x21, [x8, #0x70]\n"
- "ldr x20, [x8, #0x78]\n"
- "fmin v10.8h, v10.8h, v14.8h\n"
- "fmin v26.8h, v26.8h, v14.8h\n"
- "str q10, [x23, x14]\n"
- "fmin v25.8h, v25.8h, v14.8h\n"
"fmin v12.8h, v12.8h, v14.8h\n"
- "str q26, [x22, x14]\n"
- "add x15, x15, #0x10\n"
- "str q25, [x21, x14]\n"
- "str q12, [x20, x14]\n"
+ "str q26, [x20, x14]\n"
+ "ldr x20, [x8, #0x78]\n"
+ "fmin v30.8h, v30.8h, v14.8h\n"
+ "fmin v24.8h, v24.8h, v14.8h\n"
+ "str q11, [x23, x14]\n"
+ "str q12, [x22, x14]\n"
+ "str q30, [x21, x14]\n"
+ "str q24, [x20, x14]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 140f\n"
@@ -715,10 +715,10 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr q8, [x17, #0x90]\n"
"ldr x23, [x16, #0x0]\n"
"ldr x22, [x16, #0x8]\n"
- "add x23, x23, x15\n"
- "add x22, x22, x15\n"
"ldr x21, [x16, #0x10]\n"
"ldr x20, [x16, #0x18]\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
"add x21, x21, x15\n"
"add x20, x20, x15\n"
"tbz %x[n_channels], #2, 5f\n"
@@ -765,20 +765,20 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"mov v16.16b, v30.16b\n fmla v16.8h, v8.8h, v9.8h\n"
"mov v17.16b, v30.16b\n fmla v17.8h, v7.8h, v9.8h\n"
"ldr x20, [x16, #0x20]\n"
- "add x20, x20, x15\n"
"mov v18.16b, v30.16b\n fmla v18.8h, v6.8h, v9.8h\n"
"mov v21.16b, v30.16b\n fmla v21.8h, v4.8h, v9.8h\n"
"mov v22.16b, v30.16b\n fmla v22.8h, v3.8h, v9.8h\n"
"mov v25.16b, v30.16b\n fmla v25.8h, v1.8h, v9.8h\n"
"mov v26.16b, v30.16b\n fmla v26.8h, v0.8h, v9.8h\n"
"mov v19.16b, v30.16b\n fmla v19.8h, v2.8h, v11.8h\n"
+ "add x20, x20, x15\n"
"mov v20.16b, v30.16b\n fmla v20.8h, v5.8h, v9.8h\n"
"mov v24.16b, v30.16b\n fmla v24.8h, v2.8h, v9.8h\n"
"fmla v16.8h, v0.8h, v10.8h\n"
"fmla v17.8h, v8.8h, v12.8h\n"
"fmla v18.8h, v7.8h, v12.8h\n"
- "fmla v19.8h, v6.8h, v12.8h\n"
"fmla v21.8h, v5.8h, v12.8h\n"
+ "fmla v19.8h, v6.8h, v12.8h\n"
"fmla v22.8h, v4.8h, v12.8h\n"
"mov v23.16b, v30.16b\n fmla v23.8h, v3.8h, v12.8h\n"
"fmla v25.8h, v2.8h, v12.8h\n"
@@ -853,13 +853,13 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x38]\n"
"fmla v20.8h, v8.8h, v9.8h\n"
"fmla v21.8h, v7.8h, v9.8h\n"
- "add x20, x20, x15\n"
"fmla v22.8h, v6.8h, v9.8h\n"
"fmla v24.8h, v5.8h, v9.8h\n"
"fmla v25.8h, v4.8h, v9.8h\n"
"fmla v26.8h, v3.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v9.8h\n"
"mov v29.16b, v30.16b\n fmla v29.8h, v1.8h, v9.8h\n"
+ "add x20, x20, x15\n"
"fmla v30.8h, v0.8h, v9.8h\n"
"tbz %x[n_channels], #2, 21f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
@@ -932,13 +932,13 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x50]\n"
"fmla v21.8h, v8.8h, v10.8h\n"
"fmla v22.8h, v7.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v23.8h, v6.8h, v10.8h\n"
"fmla v25.8h, v5.8h, v10.8h\n"
"fmla v26.8h, v4.8h, v10.8h\n"
"fmla v27.8h, v3.8h, v10.8h\n"
"fmla v29.8h, v2.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"fmla v31.8h, v0.8h, v10.8h\n"
"tbz %x[n_channels], #2, 33f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
@@ -1035,11 +1035,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x70]\n"
"fmla v16.8h, v5.8h, v10.8h\n"
"fmla v17.8h, v4.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v18.8h, v3.8h, v10.8h\n"
"fmla v20.8h, v2.8h, v10.8h\n"
"fmla v21.8h, v1.8h, v10.8h\n"
"fmla v22.8h, v0.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 49f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
@@ -1087,11 +1087,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x80]\n"
"fmla v17.8h, v5.8h, v12.8h\n"
"fmla v18.8h, v4.8h, v12.8h\n"
- "add x20, x20, x15\n"
"fmla v19.8h, v3.8h, v12.8h\n"
"fmla v21.8h, v2.8h, v12.8h\n"
"fmla v22.8h, v1.8h, v12.8h\n"
"fmla v23.8h, v0.8h, v12.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 57f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
@@ -1139,11 +1139,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x90]\n"
"fmla v16.8h, v7.8h, v10.8h\n"
"fmla v17.8h, v6.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v20.8h, v4.8h, v10.8h\n"
"fmla v21.8h, v3.8h, v10.8h\n"
"fmla v24.8h, v1.8h, v10.8h\n"
"fmla v25.8h, v0.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 65f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 64f\n"
@@ -1191,11 +1191,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xa0]\n"
"fmla v18.8h, v8.8h, v12.8h\n"
"fmla v19.8h, v7.8h, v12.8h\n"
- "add x20, x20, x15\n"
"fmla v22.8h, v5.8h, v12.8h\n"
"fmla v23.8h, v4.8h, v12.8h\n"
"fmla v26.8h, v2.8h, v12.8h\n"
"fmla v27.8h, v1.8h, v12.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 73f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
@@ -1219,8 +1219,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xa8]\n"
"fmla v16.8h, v2.8h, v10.8h\n"
"fmla v17.8h, v1.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v18.8h, v0.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 77f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
@@ -1244,11 +1244,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xb0]\n"
"fmla v20.8h, v7.8h, v11.8h\n"
"fmla v21.8h, v6.8h, v11.8h\n"
- "add x20, x20, x15\n"
"fmla v24.8h, v4.8h, v11.8h\n"
"fmla v25.8h, v3.8h, v11.8h\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 81f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 80f\n"
@@ -1272,8 +1272,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xb8]\n"
"fmla v17.8h, v2.8h, v12.8h\n"
"fmla v18.8h, v1.8h, v12.8h\n"
- "add x20, x20, x15\n"
"fmla v19.8h, v0.8h, v12.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 85f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 84f\n"
@@ -1297,8 +1297,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xc0]\n"
"fmla v16.8h, v6.8h, v10.8h\n"
"fmla v20.8h, v3.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v24.8h, v0.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 89f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 88f\n"
@@ -1322,11 +1322,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xc8]\n"
"fmla v22.8h, v8.8h, v11.8h\n"
"fmla v23.8h, v7.8h, v11.8h\n"
- "add x20, x20, x15\n"
"fmla v26.8h, v5.8h, v11.8h\n"
"fmla v27.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v11.8h\n"
"fmla v31.8h, v1.8h, v11.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 93f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 92f\n"
@@ -1350,8 +1350,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xd0]\n"
"fmla v19.8h, v8.8h, v12.8h\n"
"fmla v23.8h, v5.8h, v12.8h\n"
- "add x20, x20, x15\n"
"fmla v27.8h, v2.8h, v12.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 97f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 96f\n"
@@ -1375,8 +1375,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xd8]\n"
"fmla v20.8h, v6.8h, v10.8h\n"
"fmla v24.8h, v3.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v28.8h, v0.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 101f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 100f\n"
@@ -1400,11 +1400,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xe0]\n"
"fmla v24.8h, v8.8h, v11.8h\n"
"fmla v25.8h, v7.8h, v11.8h\n"
- "add x20, x20, x15\n"
"fmla v26.8h, v6.8h, v11.8h\n"
"fmla v28.8h, v5.8h, v11.8h\n"
"fmla v29.8h, v4.8h, v11.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 105f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 104f\n"
@@ -1428,8 +1428,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xe8]\n"
"fmla v23.8h, v8.8h, v12.8h\n"
"fmla v27.8h, v5.8h, v12.8h\n"
- "add x20, x20, x15\n"
"fmla v31.8h, v2.8h, v12.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 109f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 108f\n"
@@ -1453,8 +1453,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xf0]\n"
"fmla v28.8h, v8.8h, v10.8h\n"
"fmla v29.8h, v7.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v30.8h, v6.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 113f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 112f\n"
@@ -1478,11 +1478,11 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xf8]\n"
"fmla v25.8h, v8.8h, v11.8h\n"
"fmla v26.8h, v7.8h, v11.8h\n"
- "add x20, x20, x15\n"
"fmla v27.8h, v6.8h, v11.8h\n"
"fmla v29.8h, v5.8h, v11.8h\n"
"fmla v30.8h, v4.8h, v11.8h\n"
"fmla v31.8h, v3.8h, v11.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 117f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 116f\n"
@@ -1506,8 +1506,8 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x100]\n"
"fmla v29.8h, v8.8h, v12.8h\n"
"fmla v30.8h, v7.8h, v12.8h\n"
- "add x20, x20, x15\n"
"fmla v31.8h, v6.8h, v12.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 121f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 120f\n"
@@ -1531,9 +1531,9 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x108]\n"
"fmla v16.8h, v4.8h, v10.8h\n"
"fmla v17.8h, v3.8h, v10.8h\n"
- "add x20, x20, x15\n"
"fmla v20.8h, v1.8h, v10.8h\n"
"fmla v21.8h, v0.8h, v10.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 125f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 124f\n"
@@ -1557,9 +1557,9 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x110]\n"
"fmla v18.8h, v5.8h, v11.8h\n"
"fmla v19.8h, v4.8h, v11.8h\n"
- "add x20, x20, x15\n"
"fmla v22.8h, v2.8h, v11.8h\n"
"fmla v23.8h, v1.8h, v11.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 129f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 128f\n"
@@ -1583,9 +1583,9 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x118]\n"
"fmla v24.8h, v7.8h, v12.8h\n"
"fmla v25.8h, v6.8h, v12.8h\n"
- "add x20, x20, x15\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #2, 133f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 132f\n"
@@ -1608,24 +1608,24 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"135:" // Oddments: Load input (4, 4): Bit 2: End
"fmla v26.8h, v8.8h, v10.8h\n"
"fmla v27.8h, v7.8h, v10.8h\n"
- "fmax v16.8h, v16.8h, v13.8h\n"
+ "fmax v16.8h, v16.8h, v15.8h\n"
"fmla v30.8h, v5.8h, v10.8h\n"
"fmla v31.8h, v4.8h, v10.8h\n"
- "fmax v17.8h, v17.8h, v13.8h\n"
- "fmax v18.8h, v18.8h, v13.8h\n"
- "fmax v19.8h, v19.8h, v13.8h\n"
- "fmax v20.8h, v20.8h, v13.8h\n"
- "fmax v21.8h, v21.8h, v13.8h\n"
- "fmax v22.8h, v22.8h, v13.8h\n"
- "fmax v23.8h, v23.8h, v13.8h\n"
- "fmax v24.8h, v24.8h, v13.8h\n"
- "fmax v25.8h, v25.8h, v13.8h\n"
- "fmax v26.8h, v26.8h, v13.8h\n"
- "fmax v27.8h, v27.8h, v13.8h\n"
- "fmax v28.8h, v28.8h, v13.8h\n"
- "fmax v29.8h, v29.8h, v13.8h\n"
- "fmax v30.8h, v30.8h, v13.8h\n"
- "fmax v31.8h, v31.8h, v13.8h\n"
+ "fmax v17.8h, v17.8h, v15.8h\n"
+ "fmax v18.8h, v18.8h, v15.8h\n"
+ "fmax v19.8h, v19.8h, v15.8h\n"
+ "fmax v20.8h, v20.8h, v15.8h\n"
+ "fmax v21.8h, v21.8h, v15.8h\n"
+ "fmax v22.8h, v22.8h, v15.8h\n"
+ "fmax v23.8h, v23.8h, v15.8h\n"
+ "fmax v24.8h, v24.8h, v15.8h\n"
+ "fmax v25.8h, v25.8h, v15.8h\n"
+ "fmax v26.8h, v26.8h, v15.8h\n"
+ "fmax v27.8h, v27.8h, v15.8h\n"
+ "fmax v28.8h, v28.8h, v15.8h\n"
+ "fmax v29.8h, v29.8h, v15.8h\n"
+ "fmax v30.8h, v30.8h, v15.8h\n"
+ "fmax v31.8h, v31.8h, v15.8h\n"
"fmin v16.8h, v16.8h, v14.8h\n"
"fmin v17.8h, v17.8h, v14.8h\n"
"fmin v18.8h, v18.8h, v14.8h\n"
@@ -1645,150 +1645,150 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"tbz %x[n_channels], #2, 137f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.d }[0], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.d }[0], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.d }[0], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.d }[0], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.d }[0], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.d }[0], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.d }[0], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.d }[0], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.d }[0], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.d }[0], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.d }[0], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.d }[0], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
- "add x14, x14, #0x8\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.d }[0], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.d }[0], [x22]\n"
+ "add x20, x20, x14\n"
+ "add x14, x14, #0x8\n"
"st1 { v30.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #1, 136f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.s }[2], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.s }[2], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.s }[2], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.s }[2], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.s }[2], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.s }[2], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.s }[2], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.s }[2], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.s }[2], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.s }[2], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.s }[2], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.s }[2], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
- "add x14, x14, #0x4\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.s }[2], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.s }[2], [x22]\n"
+ "add x20, x20, x14\n"
+ "add x14, x14, #0x4\n"
"st1 { v30.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"tbz %x[n_channels], #0, 139f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.h }[6], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.h }[6], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.h }[6], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.h }[6], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.h }[6], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.h }[6], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.h }[6], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.h }[6], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.h }[6], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.h }[6], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.h }[6], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.h }[6], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.h }[6], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.h }[6], [x22]\n"
+ "add x20, x20, x14\n"
"st1 { v30.h }[6], [x21]\n"
"st1 { v31.h }[6], [x20]\n"
"b 139f\n"
@@ -1796,50 +1796,50 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"tbz %x[n_channels], #0, 139f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.h }[4], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.h }[4], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.h }[4], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.h }[4], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.h }[4], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.h }[4], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.h }[4], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.h }[4], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.h }[4], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.h }[4], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.h }[4], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.h }[4], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.h }[4], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.h }[4], [x22]\n"
+ "add x20, x20, x14\n"
"st1 { v30.h }[4], [x21]\n"
"st1 { v31.h }[4], [x20]\n"
"b 139f\n"
@@ -1847,150 +1847,150 @@ void a64_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"tbz %x[n_channels], #1, 138f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.s }[0], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.s }[0], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.s }[0], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.s }[0], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.s }[0], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.s }[0], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.s }[0], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.s }[0], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.s }[0], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.s }[0], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.s }[0], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.s }[0], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
- "add x14, x14, #0x4\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.s }[0], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.s }[0], [x22]\n"
+ "add x20, x20, x14\n"
+ "add x14, x14, #0x4\n"
"st1 { v30.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"tbz %x[n_channels], #0, 139f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.h }[2], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.h }[2], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.h }[2], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.h }[2], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.h }[2], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.h }[2], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.h }[2], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.h }[2], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.h }[2], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.h }[2], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.h }[2], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.h }[2], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.h }[2], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.h }[2], [x22]\n"
+ "add x20, x20, x14\n"
"st1 { v30.h }[2], [x21]\n"
"st1 { v31.h }[2], [x20]\n"
"b 139f\n"
"138:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.h }[0], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.h }[0], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.h }[0], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.h }[0], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.h }[0], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.h }[0], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.h }[0], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.h }[0], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.h }[0], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.h }[0], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.h }[0], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.h }[0], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.h }[0], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.h }[0], [x22]\n"
+ "add x20, x20, x14\n"
"st1 { v30.h }[0], [x21]\n"
"st1 { v31.h }[0], [x20]\n"
"139:" // Oddments: Store: Bit 2: End
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 8954999990..badc0ddf36 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,259 +87,259 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
- "mov x27, #0x0\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x26, #0x4\n"
- "mov x25, #0x2\n"
- "str x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x9, #0x4\n"
+ "mov x28, #0x2\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x27, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
"ldr x6, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x23, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x27, x6, x22\n" // offset += tile_j * ld_input_col
+ "mov x26, #0x10\n" // cntb _, ALL, #1
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"ldr x7, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x6, x6, #0x1\n"
- "mul x20, x23, x21\n" // offset = tile_i * ld_output_row
- "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
- "add x8, x8, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x16, x8, x24, LSL #1\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x27, x7, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x3\n"
- "add x14, x16, x24, LSL #1\n"
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "add x13, x6, x6\n"
- "add x12, x14, x24, LSL #1\n"
- "add x11, x13, x6\n"
- "add x17, x17, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "lsr x24, %x[n_channels], #0x3\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v26.8h }, [x20]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x23, #0x0\n"
"ld1r { v27.8h }, [x20]\n"
- "add x10, x12, x24, LSL #1\n"
- "add x9, x11, x6\n"
- "add x28, x17, x21, LSL #1\n"
+ "mul x22, x11, x27\n" // offset = tile_i * ld_input_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x21, XZR, x26\n"
+ "mul x20, x11, x25\n" // offset = tile_i * ld_output_row
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x22, x10, x6, x22\n" // offset += tile_j * ld_input_col
+ "lsl x6, x6, #0x1\n"
+ "madd x20, x10, x7, x20\n" // offset += tile_j * ld_output_col
"lsl x7, x7, #0x1\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q31, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "add x15, x15, #0xa0\n"
- "ldr q9, [x14, x13]\n"
+ "mul x22, x22, x9\n" // offset *= kernel_stride * output_size
+ "add x15, x6, x6\n"
+ "add x14, x15, x6\n"
+ "add x13, x14, x6\n"
+ "mul x20, x20, x28\n" // offset *= output_tile_size
+ "add x8, x8, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x12, x8, x27, LSL #1\n"
+ "add x11, x12, x27, LSL #1\n"
+ "add x10, x11, x27, LSL #1\n"
+ "add x17, x17, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x9, x10, x27, LSL #1\n"
+ "add x28, x17, x25, LSL #1\n"
+ "cbz x24, 4f\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "cmp x26, x24, LSL #4\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x16, x16, #0xa0\n"
+ "ldr q9, [x11, x15]\n"
"ld1 { v10.8h }, [x8]\n"
"ldr q11, [x8, x6]\n"
- "ldr q12, [x8, x11]\n"
- "ldr q13, [x8, x9]\n"
- "ld1 { v14.8h }, [x16]\n"
- "ldr q15, [x16, x6]\n"
- "ldr q16, [x8, x13]\n"
+ "ldr q12, [x8, x14]\n"
+ "ldr q13, [x8, x13]\n"
+ "ld1 { v14.8h }, [x12]\n"
+ "ldr q15, [x12, x6]\n"
+ "ldr q16, [x8, x15]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v29.16b, v31.16b\n fmla v29.8h, v8.8h, v9.8h\n"
"mov v28.16b, v31.16b\n fmla v28.8h, v6.8h, v9.8h\n"
- "add x23, x23, #0x10\n"
+ "add x26, x26, #0x10\n"
"add x8, x8, #0x10\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v2.8h, v9.8h\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v0.8h, v9.8h\n"
+ "ldr q31, [x16, #0x0]\n"
+ "cmp x26, x24, LSL #4\n"
+ "add x21, x21, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v29.8h, v0.8h, v10.8h\n"
"ld1 { v10.8h }, [x8]\n"
"fmla v28.8h, v1.8h, v12.8h\n"
- "ldr q21, [x16, x9]\n"
+ "ldr q21, [x12, x13]\n"
"fmla v29.8h, v1.8h, v11.8h\n"
- "ldr q18, [x16, x11]\n"
+ "ldr q18, [x12, x14]\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "ldr q17, [x16, x13]\n"
+ "ldr q20, [x12, x15]\n"
+ "add x12, x12, #0x10\n"
"fmla v29.8h, v3.8h, v14.8h\n"
- "ld1 { v20.8h }, [x12]\n"
+ "ld1 { v17.8h }, [x10]\n"
"fmla v28.8h, v0.8h, v16.8h\n"
- "add x16, x16, #0x10\n"
"fmla v29.8h, v4.8h, v15.8h\n"
- "ld1 { v25.8h }, [x14]\n"
+ "ld1 { v23.8h }, [x11]\n"
+ "fmla v25.8h, v3.8h, v17.8h\n"
+ "ldr q19, [x10, x13]\n"
"fmla v28.8h, v4.8h, v18.8h\n"
- "ldr q19, [x12, x6]\n"
+ "ldr q17, [x10, x6]\n"
"fmla v29.8h, v2.8h, v16.8h\n"
- "ldr q18, [x14, x6]\n"
+ "ldr q22, [x11, x6]\n"
"fmla v28.8h, v5.8h, v21.8h\n"
- "ldr q24, [x14, x11]\n"
- "mov v23.16b, v31.16b\n fmla v23.8h, v2.8h, v9.8h\n"
- "mov v22.16b, v31.16b\n fmla v22.8h, v0.8h, v9.8h\n"
- "ldr q31, [x15, #0x0]\n"
- "cmp x23, x22, LSL #4\n"
- "fmla v29.8h, v5.8h, v17.8h\n"
- "fmla v28.8h, v3.8h, v17.8h\n"
- "ldr q17, [x12, x11]\n"
- "add x20, x20, #0x10\n"
- "fmla v23.8h, v3.8h, v20.8h\n"
- "ldr q16, [x12, x9]\n"
- "fmla v22.8h, v4.8h, v17.8h\n"
- "ldr q21, [x10, x6]\n"
- "fmla v23.8h, v0.8h, v25.8h\n"
- "ldr q0, [x15, #0x10]\n"
- "fmla v22.8h, v1.8h, v24.8h\n"
- "add x21, x21, #0x10\n"
- "fmla v23.8h, v4.8h, v19.8h\n"
- "ldr q20, [x14, x9]\n"
- "ldr q4, [x15, #0x50]\n"
- "fmla v22.8h, v5.8h, v16.8h\n"
- "ldr q19, [x10, x11]\n"
- "fmla v29.8h, v6.8h, v25.8h\n"
- "ld1 { v17.8h }, [x10]\n"
- "fmla v23.8h, v1.8h, v18.8h\n"
- "ldr q1, [x15, #0x20]\n"
- "fmla v22.8h, v2.8h, v20.8h\n"
- "ldr q2, [x15, #0x30]\n"
- "fmla v29.8h, v7.8h, v18.8h\n"
- "ldr q16, [x12, x13]\n"
- "fmla v23.8h, v6.8h, v17.8h\n"
- "ldr q18, [x10, x13]\n"
- "fmla v22.8h, v3.8h, v16.8h\n"
- "ldr q3, [x15, #0x40]\n"
- "fmla v23.8h, v7.8h, v21.8h\n"
- "ldr q13, [x8, x9]\n"
- "fmla v22.8h, v7.8h, v19.8h\n"
- "ld1 { v14.8h }, [x16]\n"
- "fmla v28.8h, v7.8h, v24.8h\n"
- "ldr q12, [x8, x11]\n"
- "fmla v23.8h, v5.8h, v16.8h\n"
- "ldr q16, [x8, x13]\n"
- "ldr q5, [x15, #0x60]\n"
- "fmla v22.8h, v6.8h, v18.8h\n"
+ "ldr q18, [x11, x14]\n"
+ "fmla v25.8h, v0.8h, v23.8h\n"
+ "ldr q0, [x16, #0x10]\n"
+ "fmla v29.8h, v5.8h, v20.8h\n"
+ "fmla v28.8h, v3.8h, v20.8h\n"
+ "ldr q16, [x10, x14]\n"
+ "fmla v24.8h, v4.8h, v16.8h\n"
+ "ldr q21, [x9, x6]\n"
+ "fmla v25.8h, v4.8h, v17.8h\n"
+ "ldr q20, [x11, x13]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "add x11, x11, #0x10\n"
+ "ldr q9, [x11, x15]\n"
+ "fmla v29.8h, v6.8h, v23.8h\n"
+ "ld1 { v17.8h }, [x9]\n"
+ "fmla v24.8h, v1.8h, v18.8h\n"
+ "fmla v28.8h, v7.8h, v18.8h\n"
+ "ldr q12, [x8, x14]\n"
+ "fmla v25.8h, v1.8h, v22.8h\n"
+ "ldr q1, [x16, #0x20]\n"
+ "fmla v24.8h, v5.8h, v19.8h\n"
+ "ldr q19, [x9, x14]\n"
+ "fmla v29.8h, v7.8h, v22.8h\n"
+ "ldr q16, [x10, x15]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v25.8h, v6.8h, v17.8h\n"
+ "ldr q18, [x9, x15]\n"
"fmla v28.8h, v8.8h, v20.8h\n"
- "ldr q17, [x10, x9]\n"
- "ldr q6, [x15, #0x70]\n"
- "fmla v23.8h, v8.8h, v18.8h\n"
- "fmla v22.8h, v8.8h, v17.8h\n"
- "ldr q11, [x8, x6]\n"
- "ldr q15, [x16, x6]\n"
+ "fmla v24.8h, v2.8h, v20.8h\n"
+ "ldr q17, [x9, x13]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "add x9, x9, #0x10\n"
"fmax v29.8h, v29.8h, v26.8h\n"
+ "fmla v25.8h, v7.8h, v21.8h\n"
+ "ldr q13, [x8, x13]\n"
"fmax v28.8h, v28.8h, v26.8h\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "fmax v23.8h, v23.8h, v26.8h\n"
- "fmax v22.8h, v22.8h, v26.8h\n"
- "add x14, x14, #0x10\n"
- "ldr q9, [x14, x13]\n"
+ "fmla v24.8h, v3.8h, v16.8h\n"
+ "ldr q3, [x16, #0x40]\n"
"fmin v29.8h, v29.8h, v27.8h\n"
+ "fmla v25.8h, v5.8h, v16.8h\n"
+ "ldr q16, [x8, x15]\n"
+ "ldr q5, [x16, #0x60]\n"
"fmin v28.8h, v28.8h, v27.8h\n"
- "fmin v23.8h, v23.8h, v27.8h\n"
- "fmin v22.8h, v22.8h, v27.8h\n"
- "add x12, x12, #0x10\n"
- "add x10, x10, #0x10\n"
+ "fmla v24.8h, v7.8h, v19.8h\n"
+ "ld1 { v14.8h }, [x12]\n"
+ "ldr q7, [x16, #0x80]\n"
"st1 { v29.8h }, [x17]\n"
- "add x15, x15, #0xa0\n"
+ "fmla v25.8h, v8.8h, v18.8h\n"
"str q28, [x17, x7]\n"
"add x17, x17, #0x10\n"
- "st1 { v23.8h }, [x28]\n"
- "str q22, [x28, x7]\n"
+ "fmla v24.8h, v6.8h, v18.8h\n"
+ "ldr q15, [x12, x6]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "fmax v25.8h, v25.8h, v26.8h\n"
+ "fmla v24.8h, v8.8h, v17.8h\n"
+ "ldr q11, [x8, x6]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x16, x16, #0xa0\n"
+ "fmin v25.8h, v25.8h, v27.8h\n"
+ "fmax v24.8h, v24.8h, v26.8h\n"
+ "fmin v24.8h, v24.8h, v27.8h\n"
+ "st1 { v25.8h }, [x28]\n"
+ "str q24, [x28, x7]\n"
"add x28, x28, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v29.16b, v31.16b\n fmla v29.8h, v8.8h, v9.8h\n"
- "mov v28.16b, v31.16b\n fmla v28.8h, v6.8h, v9.8h\n"
+ "mov v28.16b, v31.16b\n fmla v28.8h, v8.8h, v9.8h\n"
+ "mov v29.16b, v31.16b\n fmla v29.8h, v6.8h, v9.8h\n"
"add x8, x8, #0x10\n"
- "fmla v29.8h, v0.8h, v10.8h\n"
- "fmla v28.8h, v1.8h, v12.8h\n"
- "ldr q20, [x16, x9]\n"
- "fmla v29.8h, v1.8h, v11.8h\n"
- "ldr q18, [x16, x11]\n"
- "fmla v28.8h, v2.8h, v13.8h\n"
- "ldr q17, [x16, x13]\n"
- "fmla v29.8h, v3.8h, v14.8h\n"
- "ld1 { v19.8h }, [x12]\n"
- "fmla v28.8h, v0.8h, v16.8h\n"
- "add x16, x16, #0x10\n"
- "fmla v29.8h, v4.8h, v15.8h\n"
- "ld1 { v25.8h }, [x14]\n"
- "fmla v28.8h, v4.8h, v18.8h\n"
- "ldr q18, [x12, x6]\n"
- "fmla v29.8h, v2.8h, v16.8h\n"
- "ldr q24, [x14, x6]\n"
- "fmla v28.8h, v5.8h, v20.8h\n"
- "ldr q23, [x14, x11]\n"
- "mov v22.16b, v31.16b\n fmla v22.8h, v2.8h, v9.8h\n"
- "mov v21.16b, v31.16b\n fmla v21.8h, v0.8h, v9.8h\n"
- "fmla v29.8h, v5.8h, v17.8h\n"
- "fmla v28.8h, v3.8h, v17.8h\n"
- "ldr q17, [x12, x11]\n"
- "fmla v22.8h, v3.8h, v19.8h\n"
- "ldr q16, [x12, x9]\n"
- "fmla v21.8h, v4.8h, v17.8h\n"
- "ldr q20, [x10, x6]\n"
- "fmla v22.8h, v0.8h, v25.8h\n"
- "fmla v21.8h, v1.8h, v23.8h\n"
- "fmla v22.8h, v4.8h, v18.8h\n"
- "ldr q19, [x14, x9]\n"
- "fmla v21.8h, v5.8h, v16.8h\n"
- "ldr q18, [x10, x11]\n"
- "fmla v29.8h, v6.8h, v25.8h\n"
- "ld1 { v17.8h }, [x10]\n"
- "fmla v22.8h, v1.8h, v24.8h\n"
- "add x14, x14, #0x10\n"
- "fmla v21.8h, v2.8h, v19.8h\n"
- "fmla v29.8h, v7.8h, v24.8h\n"
- "ldr q16, [x12, x13]\n"
- "fmax v29.8h, v29.8h, v26.8h\n"
- "fmla v22.8h, v6.8h, v17.8h\n"
- "ldr q17, [x10, x13]\n"
- "fmla v21.8h, v3.8h, v16.8h\n"
- "fmin v29.8h, v29.8h, v27.8h\n"
- "fmla v22.8h, v7.8h, v20.8h\n"
- "fmla v21.8h, v7.8h, v18.8h\n"
- "st1 { v29.8h }, [x17]\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v2.8h, v9.8h\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v0.8h, v9.8h\n"
+ "fmla v28.8h, v0.8h, v10.8h\n"
+ "fmla v29.8h, v1.8h, v12.8h\n"
+ "ldr q21, [x12, x13]\n"
+ "fmla v28.8h, v1.8h, v11.8h\n"
+ "ldr q18, [x12, x14]\n"
+ "fmla v29.8h, v2.8h, v13.8h\n"
+ "ldr q20, [x12, x15]\n"
"add x12, x12, #0x10\n"
- "fmla v28.8h, v7.8h, v23.8h\n"
- "fmla v22.8h, v5.8h, v16.8h\n"
- "fmla v21.8h, v6.8h, v17.8h\n"
- "fmla v28.8h, v8.8h, v19.8h\n"
- "ldr q16, [x10, x9]\n"
- "fmax v28.8h, v28.8h, v26.8h\n"
- "fmla v22.8h, v8.8h, v17.8h\n"
- "fmla v21.8h, v8.8h, v16.8h\n"
- "fmax v22.8h, v22.8h, v26.8h\n"
+ "fmla v28.8h, v3.8h, v14.8h\n"
+ "ld1 { v17.8h }, [x10]\n"
+ "fmla v29.8h, v0.8h, v16.8h\n"
+ "fmla v25.8h, v3.8h, v17.8h\n"
+ "ldr q23, [x10, x13]\n"
+ "fmla v28.8h, v4.8h, v15.8h\n"
+ "ld1 { v22.8h }, [x11]\n"
+ "fmla v29.8h, v4.8h, v18.8h\n"
+ "ldr q19, [x10, x6]\n"
+ "fmla v28.8h, v2.8h, v16.8h\n"
+ "ldr q18, [x11, x6]\n"
+ "fmla v25.8h, v0.8h, v22.8h\n"
+ "fmla v29.8h, v5.8h, v21.8h\n"
+ "ldr q17, [x11, x14]\n"
+ "fmla v28.8h, v5.8h, v20.8h\n"
+ "fmla v29.8h, v3.8h, v20.8h\n"
+ "ldr q16, [x10, x14]\n"
+ "fmla v24.8h, v4.8h, v16.8h\n"
+ "ldr q21, [x9, x6]\n"
+ "fmla v25.8h, v4.8h, v19.8h\n"
+ "ldr q20, [x11, x13]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v28.8h, v6.8h, v22.8h\n"
+ "ld1 { v16.8h }, [x9]\n"
+ "fmla v29.8h, v7.8h, v17.8h\n"
+ "fmla v24.8h, v1.8h, v17.8h\n"
+ "fmla v25.8h, v1.8h, v18.8h\n"
+ "fmla v28.8h, v7.8h, v18.8h\n"
+ "ldr q19, [x10, x15]\n"
"add x10, x10, #0x10\n"
- "fmax v21.8h, v21.8h, v26.8h\n"
+ "fmla v29.8h, v8.8h, v20.8h\n"
+ "fmla v24.8h, v5.8h, v23.8h\n"
+ "ldr q18, [x9, x14]\n"
+ "fmla v25.8h, v6.8h, v16.8h\n"
+ "ldr q17, [x9, x15]\n"
+ "fmax v28.8h, v28.8h, v26.8h\n"
+ "fmax v29.8h, v29.8h, v26.8h\n"
+ "fmla v24.8h, v2.8h, v20.8h\n"
+ "ldr q16, [x9, x13]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v25.8h, v7.8h, v21.8h\n"
"fmin v28.8h, v28.8h, v27.8h\n"
- "str q28, [x17, x7]\n"
+ "fmin v29.8h, v29.8h, v27.8h\n"
+ "fmla v24.8h, v3.8h, v19.8h\n"
+ "st1 { v28.8h }, [x17]\n"
+ "fmla v25.8h, v5.8h, v19.8h\n"
+ "str q29, [x17, x7]\n"
"add x17, x17, #0x10\n"
- "fmin v22.8h, v22.8h, v27.8h\n"
- "fmin v21.8h, v21.8h, v27.8h\n"
- "st1 { v22.8h }, [x28]\n"
- "str q21, [x28, x7]\n"
+ "fmla v24.8h, v7.8h, v18.8h\n"
+ "fmla v25.8h, v8.8h, v17.8h\n"
+ "fmla v24.8h, v6.8h, v17.8h\n"
+ "fmax v25.8h, v25.8h, v26.8h\n"
+ "fmin v25.8h, v25.8h, v27.8h\n"
+ "fmla v24.8h, v8.8h, v16.8h\n"
+ "st1 { v25.8h }, [x28]\n"
+ "fmax v24.8h, v24.8h, v26.8h\n"
+ "fmin v24.8h, v24.8h, v27.8h\n"
+ "str q24, [x28, x7]\n"
"add x28, x28, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 81f\n"
- "ldr q31, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "add x27, x14, x13\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "add x27, x11, x15\n"
"add x26, x8, XZR\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
"add x25, x8, x6\n"
- "add x24, x8, x11\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "add x23, x8, x9\n"
- "add x22, x16, XZR\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "add x21, x16, x6\n"
- "add x20, x8, x13\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
+ "add x24, x8, x14\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "add x23, x8, x13\n"
+ "add x22, x12, XZR\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "add x21, x12, x6\n"
+ "add x20, x8, x15\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
"tbz %x[n_channels], #2, 6f\n"
"ldr d9, [x27], #0x8\n"
"ldr d10, [x26], #0x8\n"
@@ -410,18 +410,18 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h16, [x20, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: End
"mov v28.16b, v31.16b\n fmla v28.8h, v8.8h, v9.8h\n"
- "fmla v28.8h, v0.8h, v10.8h\n"
- "add x20, x16, x11\n"
"mov v29.16b, v31.16b\n fmla v29.8h, v6.8h, v9.8h\n"
- "fmla v28.8h, v1.8h, v11.8h\n"
+ "add x20, x12, x14\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v2.8h, v9.8h\n"
+ "fmla v31.8h, v0.8h, v9.8h\n"
+ "fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "fmla v28.8h, v3.8h, v14.8h\n"
+ "fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v2.8h, v13.8h\n"
+ "fmla v28.8h, v3.8h, v14.8h\n"
+ "fmla v29.8h, v0.8h, v16.8h\n"
"fmla v28.8h, v4.8h, v15.8h\n"
- "mov v30.16b, v31.16b\n fmla v30.8h, v2.8h, v9.8h\n"
- "fmla v31.8h, v0.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v16.8h\n"
- "fmla v29.8h, v0.8h, v16.8h\n"
"tbz %x[n_channels], #2, 10f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 9f\n"
@@ -443,7 +443,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h11, [x20, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v29.8h, v4.8h, v11.8h\n"
- "add x20, x16, x9\n"
+ "add x20, x12, x13\n"
"tbz %x[n_channels], #2, 14f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 13f\n"
@@ -465,7 +465,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h12, [x20, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v29.8h, v5.8h, v12.8h\n"
- "add x20, x16, x13\n"
+ "add x20, x12, x15\n"
"tbz %x[n_channels], #2, 18f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
@@ -488,7 +488,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"20:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 2: End
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "add x20, x12, XZR\n"
+ "add x20, x10, XZR\n"
"tbz %x[n_channels], #2, 22f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #1, 21f\n"
@@ -510,7 +510,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h14, [x20, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v30.8h, v3.8h, v14.8h\n"
- "add x20, x14, XZR\n"
+ "add x20, x11, XZR\n"
"tbz %x[n_channels], #2, 26f\n"
"ldr d15, [x20], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
@@ -533,7 +533,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"28:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 2: End
"fmla v28.8h, v6.8h, v15.8h\n"
"fmla v30.8h, v0.8h, v15.8h\n"
- "add x20, x12, x6\n"
+ "add x20, x10, x6\n"
"tbz %x[n_channels], #2, 30f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
@@ -555,7 +555,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h11, [x20, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
"fmla v30.8h, v4.8h, v11.8h\n"
- "add x20, x14, x6\n"
+ "add x20, x11, x6\n"
"tbz %x[n_channels], #2, 34f\n"
"ldr d16, [x20], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
@@ -578,7 +578,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"36:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
"fmla v28.8h, v7.8h, v16.8h\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "add x20, x12, x11\n"
+ "add x20, x10, x14\n"
"tbz %x[n_channels], #2, 38f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
@@ -600,7 +600,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h13, [x20, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
"fmla v31.8h, v4.8h, v13.8h\n"
- "add x20, x14, x11\n"
+ "add x20, x11, x14\n"
"tbz %x[n_channels], #2, 42f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
@@ -623,7 +623,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"44:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
"fmla v29.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "add x20, x12, x9\n"
+ "add x20, x10, x13\n"
"tbz %x[n_channels], #2, 46f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
@@ -645,7 +645,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h14, [x20, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
"fmla v31.8h, v5.8h, v14.8h\n"
- "add x20, x10, XZR\n"
+ "add x20, x9, XZR\n"
"tbz %x[n_channels], #2, 50f\n"
"ldr d15, [x20], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
@@ -667,7 +667,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h15, [x20, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: End
"fmla v30.8h, v6.8h, v15.8h\n"
- "add x20, x14, x9\n"
+ "add x20, x11, x13\n"
"tbz %x[n_channels], #2, 54f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
@@ -690,7 +690,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"56:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
"fmla v29.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "add x20, x10, x6\n"
+ "add x20, x9, x6\n"
"tbz %x[n_channels], #2, 58f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #1, 57f\n"
@@ -712,7 +712,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h13, [x20, #0x0]\n"
"60:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
"fmla v30.8h, v7.8h, v13.8h\n"
- "add x20, x12, x13\n"
+ "add x20, x10, x15\n"
"tbz %x[n_channels], #2, 62f\n"
"ldr d16, [x20], #0x8\n"
"tbz %x[n_channels], #1, 61f\n"
@@ -735,7 +735,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"64:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
"fmla v30.8h, v5.8h, v16.8h\n"
"fmla v31.8h, v3.8h, v16.8h\n"
- "add x20, x10, x11\n"
+ "add x20, x9, x14\n"
"tbz %x[n_channels], #2, 66f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #1, 65f\n"
@@ -757,7 +757,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr h14, [x20, #0x0]\n"
"68:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
"fmla v31.8h, v7.8h, v14.8h\n"
- "add x20, x10, x13\n"
+ "add x20, x9, x15\n"
"tbz %x[n_channels], #2, 70f\n"
"ldr d15, [x20], #0x8\n"
"tbz %x[n_channels], #1, 69f\n"
@@ -780,7 +780,7 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"72:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: End
"fmla v30.8h, v8.8h, v15.8h\n"
"fmla v31.8h, v6.8h, v15.8h\n"
- "add x20, x10, x9\n"
+ "add x20, x9, x13\n"
"tbz %x[n_channels], #2, 74f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 73f\n"
@@ -805,27 +805,27 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmax v28.8h, v28.8h, v26.8h\n"
"fmax v29.8h, v29.8h, v26.8h\n"
"fmax v30.8h, v30.8h, v26.8h\n"
- "fmax v31.8h, v31.8h, v26.8h\n"
"fmin v28.8h, v28.8h, v27.8h\n"
+ "fmax v31.8h, v31.8h, v26.8h\n"
"fmin v29.8h, v29.8h, v27.8h\n"
"fmin v30.8h, v30.8h, v27.8h\n"
"fmin v31.8h, v31.8h, v27.8h\n"
"tbz %x[n_channels], #2, 78f\n"
"mov x21, x17\n"
"mov x20, x28\n"
- "st1 { v28.d }[0], [x21], x7\n"
- "st1 { v30.d }[0], [x20], x7\n"
"add x17, x17, #0x8\n"
"add x28, x28, #0x8\n"
+ "st1 { v28.d }[0], [x21], x7\n"
+ "st1 { v30.d }[0], [x20], x7\n"
"st1 { v29.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #1, 77f\n"
"mov x21, x17\n"
"mov x20, x28\n"
- "st1 { v28.s }[2], [x21], x7\n"
- "st1 { v30.s }[2], [x20], x7\n"
"add x17, x17, #0x4\n"
"add x28, x28, #0x4\n"
+ "st1 { v28.s }[2], [x21], x7\n"
+ "st1 { v30.s }[2], [x20], x7\n"
"st1 { v29.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"tbz %x[n_channels], #0, 80f\n"
@@ -849,10 +849,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"tbz %x[n_channels], #1, 79f\n"
"mov x21, x17\n"
"mov x20, x28\n"
- "st1 { v28.s }[0], [x21], x7\n"
- "st1 { v30.s }[0], [x20], x7\n"
"add x17, x17, #0x4\n"
"add x28, x28, #0x4\n"
+ "st1 { v28.s }[0], [x21], x7\n"
+ "st1 { v30.s }[0], [x20], x7\n"
"st1 { v29.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"tbz %x[n_channels], #0, 80f\n"
@@ -872,16 +872,16 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"st1 { v31.h }[0], [x20]\n"
"80:" // Tile loop: Oddments: Store: Bit 2: End
"81:" // Tile loop: End
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x27, x27, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x27, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
- "csel x27, x27, XZR, LT\n"
- "cmp x23, x20\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x10, x10, #0x1\n"
+ "add x20, x11, #0x1\n"
+ "cmp x10, x22\n"
+ "csel x11, x11, x20, LT\n"
+ "csel x10, x10, XZR, LT\n"
+ "cmp x11, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index 6ae0b30afd..87a75b1026 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,275 +87,275 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x25, #0x10\n" // cntb _, ALL, #1
- "lsr x24, %x[n_channels], #0x3\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v26.8h }, [x20]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "mov x8, #0x10\n" // cntb _, ALL, #1
+ "lsr x17, %x[n_channels], #0x3\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v26.8h }, [x21]\n"
"ld1r { v27.8h }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "mov x28, #0x0\n"
- "sub x22, XZR, x25\n"
- "cbz x24, 3f\n"
- "ldr q31, [x23, #0x0]\n"
- "ldr q0, [x23, #0x10]\n"
- "cmp x25, x24, LSL #4\n"
- "ldr q1, [x23, #0x20]\n"
- "ldr q2, [x23, #0x30]\n"
- "ldr q3, [x23, #0x40]\n"
- "ldr q4, [x23, #0x50]\n"
- "ldr q5, [x23, #0x60]\n"
- "ldr q6, [x23, #0x70]\n"
- "ldr q7, [x23, #0x80]\n"
- "ldr q8, [x23, #0x90]\n"
- "add x23, x23, #0xa0\n"
- "ldp x21, x20, [x13, #0x0]\n"
- "ldr q9, [x21, x28]\n"
- "ldr q10, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "ldr q11, [x21, x28]\n"
- "ldr q12, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x20]\n"
- "ldr q13, [x21, x28]\n"
- "ldr q14, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "ldr q15, [x21, x28]\n"
- "ldr q16, [x20, x28]\n"
+ "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x14, #0x0\n"
+ "ldp x13, x12, [x22, #0x0]\n"
+ "ldp x11, x10, [x22, #0x10]\n"
+ "sub x9, XZR, x8\n"
+ "cbz x17, 3f\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "cmp x8, x17, LSL #4\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x16, x16, #0xa0\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "ldr q9, [x27, x14]\n"
+ "ldr q10, [x26, x14]\n"
+ "ldr q11, [x25, x14]\n"
+ "ldr q12, [x24, x14]\n"
+ "ldr q13, [x23, x14]\n"
+ "ldr q14, [x22, x14]\n"
+ "ldr q15, [x21, x14]\n"
+ "ldr q16, [x20, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v24.16b, v31.16b\n fmla v24.8h, v8.8h, v9.8h\n"
- "mov v23.16b, v31.16b\n fmla v23.8h, v6.8h, v9.8h\n"
- "ldr x21, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
- "fmla v24.8h, v0.8h, v10.8h\n"
- "fmla v23.8h, v1.8h, v12.8h\n"
- "ldr q20, [x20, x28]\n"
- "ldr x20, [x13, #0x50]\n"
- "fmla v24.8h, v1.8h, v11.8h\n"
- "ldr q19, [x21, x28]\n"
- "fmla v23.8h, v2.8h, v13.8h\n"
- "ldr q18, [x20, x28]\n"
- "fmla v24.8h, v3.8h, v14.8h\n"
- "fmla v23.8h, v0.8h, v16.8h\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v24.8h, v4.8h, v15.8h\n"
- "fmla v23.8h, v4.8h, v19.8h\n"
- "ldr x21, [x13, #0x78]\n"
- "ldr x20, [x13, #0x60]\n"
- "ldr q22, [x20, x28]\n"
- "fmla v24.8h, v2.8h, v16.8h\n"
- "fmla v23.8h, v5.8h, v20.8h\n"
- "ldr x20, [x13, #0x80]\n"
- "ldr q21, [x20, x28]\n"
- "mov v20.16b, v31.16b\n fmla v20.8h, v2.8h, v9.8h\n"
- "mov v19.16b, v31.16b\n fmla v19.8h, v0.8h, v9.8h\n"
- "ldr q31, [x23, #0x0]\n"
- "fmla v24.8h, v5.8h, v18.8h\n"
- "fmla v23.8h, v3.8h, v18.8h\n"
- "ldr q16, [x21, x28]\n"
- "ldr x20, [x13, #0x68]\n"
- "ldr q18, [x20, x28]\n"
- "fmla v20.8h, v3.8h, v17.8h\n"
- "fmla v19.8h, v4.8h, v16.8h\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v20.8h, v0.8h, v22.8h\n"
- "ldr q0, [x23, #0x10]\n"
- "fmla v19.8h, v1.8h, v21.8h\n"
- "ldr x20, [x13, #0x70]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v20.8h, v4.8h, v18.8h\n"
- "fmla v19.8h, v5.8h, v16.8h\n"
- "ldr q4, [x23, #0x50]\n"
- "ldr x20, [x13, #0x98]\n"
- "fmla v24.8h, v6.8h, v22.8h\n"
- "fmla v20.8h, v1.8h, v17.8h\n"
- "ldr q16, [x20, x28]\n"
- "ldr q1, [x23, #0x20]\n"
- "fmla v19.8h, v2.8h, v16.8h\n"
- "fmla v24.8h, v7.8h, v17.8h\n"
- "ldr q2, [x23, #0x30]\n"
- "ldr x20, [x13, #0x90]\n"
- "fmla v23.8h, v7.8h, v21.8h\n"
- "fmla v23.8h, v8.8h, v16.8h\n"
- "ldr q16, [x20, x28]\n"
- "ldr x20, [x13, #0xa8]\n"
- "fmla v20.8h, v6.8h, v16.8h\n"
+ "mov v29.16b, v31.16b\n fmla v29.8h, v8.8h, v9.8h\n"
+ "mov v28.16b, v31.16b\n fmla v28.8h, v6.8h, v9.8h\n"
+ "ldr x28, [x15, #0x40]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "ldr x25, [x15, #0x50]\n"
+ "ldr x20, [x15, #0x58]\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v2.8h, v9.8h\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v0.8h, v9.8h\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr x27, [x15, #0x78]\n"
+ "add x9, x9, #0x10\n"
+ "ldr x24, [x15, #0x60]\n"
+ "ldr x26, [x15, #0x68]\n"
+ "fmla v29.8h, v0.8h, v10.8h\n"
+ "fmla v28.8h, v1.8h, v12.8h\n"
+ "ldr q21, [x21, x14]\n"
+ "ldr x23, [x15, #0x88]\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v29.8h, v1.8h, v11.8h\n"
+ "ldr q18, [x28, x14]\n"
+ "ldr x21, [x15, #0x80]\n"
+ "fmla v28.8h, v2.8h, v13.8h\n"
+ "ldr q20, [x25, x14]\n"
+ "ldr x25, [x15, #0x90]\n"
+ "fmla v29.8h, v3.8h, v14.8h\n"
+ "ldr q17, [x20, x14]\n"
+ "ldr x20, [x15, #0x98]\n"
+ "fmla v28.8h, v0.8h, v16.8h\n"
+ "fmla v25.8h, v3.8h, v17.8h\n"
+ "ldr q23, [x23, x14]\n"
+ "fmla v29.8h, v4.8h, v15.8h\n"
+ "ldr q22, [x24, x14]\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v28.8h, v4.8h, v18.8h\n"
+ "ldr q17, [x26, x14]\n"
+ "ldr x23, [x15, #0xa8]\n"
+ "fmla v25.8h, v0.8h, v22.8h\n"
+ "ldr q0, [x16, #0x10]\n"
+ "fmla v29.8h, v2.8h, v16.8h\n"
+ "ldr q19, [x22, x14]\n"
+ "ldr x22, [x15, #0xb0]\n"
+ "fmla v28.8h, v5.8h, v21.8h\n"
+ "ldr q18, [x21, x14]\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v25.8h, v4.8h, v17.8h\n"
+ "ldr q21, [x20, x14]\n"
+ "fmla v29.8h, v5.8h, v20.8h\n"
+ "fmla v28.8h, v3.8h, v20.8h\n"
+ "ldr q16, [x27, x14]\n"
+ "ldr x20, [x15, #0xb8]\n"
+ "fmla v24.8h, v4.8h, v16.8h\n"
+ "ldr q20, [x24, x14]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "fmla v29.8h, v6.8h, v22.8h\n"
+ "ldr q17, [x25, x14]\n"
+ "fmla v25.8h, v1.8h, v19.8h\n"
+ "fmla v24.8h, v1.8h, v18.8h\n"
+ "ldr q1, [x16, #0x20]\n"
+ "fmla v28.8h, v7.8h, v18.8h\n"
+ "fmla v29.8h, v7.8h, v19.8h\n"
+ "ldr q16, [x23, x14]\n"
+ "fmla v24.8h, v5.8h, v23.8h\n"
+ "ldr q19, [x22, x14]\n"
+ "fmla v25.8h, v6.8h, v17.8h\n"
+ "ldr q18, [x20, x14]\n"
+ "fmla v28.8h, v8.8h, v21.8h\n"
+ "fmax v29.8h, v29.8h, v26.8h\n"
+ "fmla v24.8h, v2.8h, v21.8h\n"
+ "ldr q17, [x21, x14]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ "add x14, x14, #0x10\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "fmla v25.8h, v7.8h, v20.8h\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "fmin v29.8h, v29.8h, v27.8h\n"
+ "ldr q9, [x27, x8]\n"
+ "ldr q10, [x26, x8]\n"
+ "fmla v24.8h, v3.8h, v16.8h\n"
+ "ldr q3, [x16, #0x40]\n"
+ "fmax v28.8h, v28.8h, v26.8h\n"
+ "ldr q12, [x24, x8]\n"
+ "ldr q13, [x23, x8]\n"
+ "fmla v25.8h, v5.8h, v16.8h\n"
+ "ldr q16, [x20, x8]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "str q29, [x13, x9]\n"
+ "fmin v28.8h, v28.8h, v27.8h\n"
+ "fmla v24.8h, v7.8h, v19.8h\n"
+ "ldr q14, [x22, x8]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "fmla v25.8h, v8.8h, v18.8h\n"
+ "str q28, [x12, x9]\n"
+ "fmla v24.8h, v6.8h, v18.8h\n"
+ "ldr q15, [x21, x8]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "fmax v25.8h, v25.8h, v26.8h\n"
+ "fmla v24.8h, v8.8h, v17.8h\n"
+ "ldr q11, [x25, x8]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x8, x8, #0x10\n"
+ "add x16, x16, #0xa0\n"
+ "cmp x8, x17, LSL #4\n"
+ "fmin v25.8h, v25.8h, v27.8h\n"
"fmax v24.8h, v24.8h, v26.8h\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0xa0]\n"
- "fmla v19.8h, v3.8h, v17.8h\n"
- "fmax v23.8h, v23.8h, v26.8h\n"
- "ldr q16, [x20, x28]\n"
- "ldr q3, [x23, #0x40]\n"
- "fmla v20.8h, v7.8h, v16.8h\n"
- "fmla v20.8h, v5.8h, v17.8h\n"
- "ldr q5, [x23, #0x60]\n"
- "ldr x20, [x13, #0xb0]\n"
- "add x22, x22, #0x10\n"
"fmin v24.8h, v24.8h, v27.8h\n"
- "ldr q16, [x20, x28]\n"
- "ldr x20, [x13, #0xb8]\n"
- "fmla v19.8h, v7.8h, v16.8h\n"
- "fmin v23.8h, v23.8h, v27.8h\n"
- "ldr q16, [x20, x28]\n"
- "ldr q7, [x23, #0x80]\n"
- "fmla v19.8h, v6.8h, v16.8h\n"
- "fmla v20.8h, v8.8h, v16.8h\n"
- "ldr q6, [x23, #0x70]\n"
- "ldr x20, [x13, #0xc0]\n"
- "fmax v20.8h, v20.8h, v26.8h\n"
- "fmin v20.8h, v20.8h, v27.8h\n"
- "ldr q16, [x20, x28]\n"
- "fmla v19.8h, v8.8h, v16.8h\n"
- "ldr q8, [x23, #0x90]\n"
- "fmax v19.8h, v19.8h, v26.8h\n"
- "ldp x21, x20, [x13, #0x0]\n"
- "ldr q9, [x21, x25]\n"
- "fmin v19.8h, v19.8h, v27.8h\n"
- "add x28, x28, #0x10\n"
- "ldr q10, [x20, x25]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "str q24, [x12, x22]\n"
- "add x23, x23, #0xa0\n"
- "ldr q11, [x21, x25]\n"
- "ldr q12, [x20, x25]\n"
- "str q23, [x11, x22]\n"
- "ldp x21, x20, [x13, #0x20]\n"
- "ldr q13, [x21, x25]\n"
- "str q20, [x10, x22]\n"
- "ldr q14, [x20, x25]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "str q19, [x9, x22]\n"
- "ldr q15, [x21, x25]\n"
- "ldr q16, [x20, x25]\n"
- "add x25, x25, #0x10\n"
- "cmp x25, x24, LSL #4\n"
+ "str q25, [x11, x9]\n"
+ "str q24, [x10, x9]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v25.16b, v31.16b\n fmla v25.8h, v8.8h, v9.8h\n"
- "mov v24.16b, v31.16b\n fmla v24.8h, v6.8h, v9.8h\n"
- "ldr x21, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
- "fmla v25.8h, v0.8h, v10.8h\n"
- "fmla v24.8h, v1.8h, v12.8h\n"
- "ldr q20, [x20, x28]\n"
- "ldr x20, [x13, #0x50]\n"
- "fmla v25.8h, v1.8h, v11.8h\n"
- "ldr q18, [x21, x28]\n"
- "fmla v24.8h, v2.8h, v13.8h\n"
- "ldr q19, [x20, x28]\n"
- "fmla v25.8h, v3.8h, v14.8h\n"
- "fmla v24.8h, v0.8h, v16.8h\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v25.8h, v4.8h, v15.8h\n"
- "fmla v24.8h, v4.8h, v18.8h\n"
- "ldr x21, [x13, #0x78]\n"
- "ldr x20, [x13, #0x60]\n"
- "ldr q23, [x20, x28]\n"
- "fmla v25.8h, v2.8h, v16.8h\n"
- "fmla v24.8h, v5.8h, v20.8h\n"
- "ldr x20, [x13, #0x80]\n"
- "ldr q22, [x20, x28]\n"
- "mov v21.16b, v31.16b\n fmla v21.8h, v2.8h, v9.8h\n"
- "mov v20.16b, v31.16b\n fmla v20.8h, v0.8h, v9.8h\n"
- "ldr x20, [x13, #0x68]\n"
- "ldr q18, [x20, x28]\n"
- "fmla v25.8h, v5.8h, v19.8h\n"
+ "mov v28.16b, v31.16b\n fmla v28.8h, v8.8h, v9.8h\n"
+ "mov v29.16b, v31.16b\n fmla v29.8h, v6.8h, v9.8h\n"
+ "ldr x28, [x15, #0x40]\n"
+ "ldr x20, [x15, #0x48]\n"
+ "ldr x26, [x15, #0x50]\n"
+ "ldr x25, [x15, #0x58]\n"
+ "mov v25.16b, v31.16b\n fmla v25.8h, v2.8h, v9.8h\n"
+ "mov v24.16b, v31.16b\n fmla v24.8h, v0.8h, v9.8h\n"
+ "ldr x27, [x15, #0x78]\n"
+ "ldr x24, [x15, #0x60]\n"
+ "add x9, x9, #0x10\n"
+ "ldr x23, [x15, #0x68]\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v28.8h, v0.8h, v10.8h\n"
+ "fmla v29.8h, v1.8h, v12.8h\n"
+ "ldr q21, [x20, x14]\n"
+ "ldr x21, [x15, #0x88]\n"
+ "fmla v28.8h, v1.8h, v11.8h\n"
+ "ldr q18, [x28, x14]\n"
+ "ldr x20, [x15, #0x80]\n"
+ "fmla v29.8h, v2.8h, v13.8h\n"
+ "ldr q20, [x26, x14]\n"
+ "ldr x26, [x15, #0x90]\n"
+ "fmla v28.8h, v3.8h, v14.8h\n"
+ "ldr q17, [x25, x14]\n"
+ "ldr x25, [x15, #0x98]\n"
+ "fmla v29.8h, v0.8h, v16.8h\n"
+ "fmla v28.8h, v4.8h, v15.8h\n"
+ "ldr q23, [x24, x14]\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v25.8h, v3.8h, v17.8h\n"
+ "ldr q22, [x21, x14]\n"
+ "fmla v29.8h, v4.8h, v18.8h\n"
+ "ldr q19, [x23, x14]\n"
+ "ldr x23, [x15, #0xa8]\n"
+ "fmla v28.8h, v2.8h, v16.8h\n"
+ "ldr q18, [x22, x14]\n"
+ "ldr x22, [x15, #0xb0]\n"
+ "fmla v25.8h, v0.8h, v23.8h\n"
+ "fmla v29.8h, v5.8h, v21.8h\n"
+ "ldr q17, [x20, x14]\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v28.8h, v5.8h, v20.8h\n"
+ "fmla v29.8h, v3.8h, v20.8h\n"
+ "ldr q16, [x27, x14]\n"
+ "ldr x20, [x15, #0xb8]\n"
+ "fmla v24.8h, v4.8h, v16.8h\n"
+ "ldr q21, [x24, x14]\n"
+ "fmla v25.8h, v4.8h, v19.8h\n"
+ "ldr q20, [x25, x14]\n"
+ "fmla v28.8h, v6.8h, v23.8h\n"
+ "ldr q16, [x26, x14]\n"
+ "fmla v29.8h, v7.8h, v17.8h\n"
+ "fmla v24.8h, v1.8h, v17.8h\n"
+ "fmla v25.8h, v1.8h, v18.8h\n"
+ "fmla v28.8h, v7.8h, v18.8h\n"
+ "ldr q19, [x23, x14]\n"
+ "fmla v29.8h, v8.8h, v20.8h\n"
+ "fmla v24.8h, v5.8h, v22.8h\n"
+ "ldr q18, [x22, x14]\n"
+ "fmla v25.8h, v6.8h, v16.8h\n"
+ "ldr q17, [x20, x14]\n"
+ "fmax v28.8h, v28.8h, v26.8h\n"
+ "fmax v29.8h, v29.8h, v26.8h\n"
+ "fmla v24.8h, v2.8h, v20.8h\n"
+ "ldr q16, [x21, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v25.8h, v7.8h, v21.8h\n"
+ "fmin v28.8h, v28.8h, v27.8h\n"
+ "fmin v29.8h, v29.8h, v27.8h\n"
"fmla v24.8h, v3.8h, v19.8h\n"
- "ldr q16, [x21, x28]\n"
- "fmla v21.8h, v3.8h, v17.8h\n"
- "fmla v20.8h, v4.8h, v16.8h\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.8h, v0.8h, v23.8h\n"
- "fmla v20.8h, v1.8h, v22.8h\n"
- "ldr x20, [x13, #0x70]\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0x98]\n"
- "fmla v21.8h, v4.8h, v18.8h\n"
- "ldr q19, [x20, x28]\n"
- "fmla v20.8h, v5.8h, v16.8h\n"
- "fmla v25.8h, v6.8h, v23.8h\n"
- "ldr x20, [x13, #0x90]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.8h, v1.8h, v17.8h\n"
- "ldr x20, [x13, #0xa8]\n"
- "fmla v20.8h, v2.8h, v19.8h\n"
- "fmla v25.8h, v7.8h, v17.8h\n"
- "ldr q18, [x20, x28]\n"
- "ldr x20, [x13, #0xa0]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v21.8h, v6.8h, v16.8h\n"
- "fmla v20.8h, v3.8h, v18.8h\n"
- "ldr x20, [x13, #0xb0]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.8h, v7.8h, v17.8h\n"
- "fmla v20.8h, v7.8h, v16.8h\n"
- "ldr x20, [x13, #0xb8]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v24.8h, v7.8h, v22.8h\n"
- "fmla v21.8h, v5.8h, v18.8h\n"
- "ldr x20, [x13, #0xc0]\n"
- "fmla v20.8h, v6.8h, v17.8h\n"
- "fmla v24.8h, v8.8h, v19.8h\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.8h, v8.8h, v17.8h\n"
- "fmla v20.8h, v8.8h, v16.8h\n"
+ "str q28, [x13, x9]\n"
+ "fmla v25.8h, v5.8h, v19.8h\n"
+ "str q29, [x12, x9]\n"
+ "fmla v24.8h, v7.8h, v18.8h\n"
+ "fmla v25.8h, v8.8h, v17.8h\n"
+ "fmla v24.8h, v6.8h, v17.8h\n"
"fmax v25.8h, v25.8h, v26.8h\n"
- "add x22, x22, #0x10\n"
- "fmax v24.8h, v24.8h, v26.8h\n"
- "fmax v21.8h, v21.8h, v26.8h\n"
- "add x28, x28, #0x10\n"
- "fmax v20.8h, v20.8h, v26.8h\n"
"fmin v25.8h, v25.8h, v27.8h\n"
- "str q25, [x12, x22]\n"
+ "fmla v24.8h, v8.8h, v16.8h\n"
+ "str q25, [x11, x9]\n"
+ "fmax v24.8h, v24.8h, v26.8h\n"
"fmin v24.8h, v24.8h, v27.8h\n"
- "fmin v21.8h, v21.8h, v27.8h\n"
- "str q24, [x11, x22]\n"
- "fmin v20.8h, v20.8h, v27.8h\n"
- "str q21, [x10, x22]\n"
- "str q20, [x9, x22]\n"
+ "str q24, [x10, x9]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 80f\n"
- "ldr q31, [x23, #0x0]\n"
- "ldr q0, [x23, #0x10]\n"
- "mov x20, x28\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "mov x20, x14\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "add x13, x13, x20\n"
"add x12, x12, x20\n"
- "ldr q1, [x23, #0x20]\n"
- "ldr q2, [x23, #0x30]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
"add x11, x11, x20\n"
"add x10, x10, x20\n"
- "ldr q3, [x23, #0x40]\n"
- "ldr q4, [x23, #0x50]\n"
- "add x9, x9, x20\n"
- "ldr q5, [x23, #0x60]\n"
- "ldr q6, [x23, #0x70]\n"
- "ldr q7, [x23, #0x80]\n"
- "ldr q8, [x23, #0x90]\n"
- "ldr x27, [x13, #0x0]\n"
- "ldr x26, [x13, #0x8]\n"
- "add x27, x27, x28\n"
- "add x26, x26, x28\n"
- "ldr x25, [x13, #0x10]\n"
- "ldr x24, [x13, #0x18]\n"
- "add x25, x25, x28\n"
- "add x24, x24, x28\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr x22, [x13, #0x28]\n"
- "add x23, x23, x28\n"
- "add x22, x22, x28\n"
- "ldr x21, [x13, #0x30]\n"
- "ldr x20, [x13, #0x38]\n"
- "add x21, x21, x28\n"
- "add x20, x20, x28\n"
+ "ldr x27, [x15, #0x0]\n"
+ "ldr x26, [x15, #0x8]\n"
+ "ldr x25, [x15, #0x10]\n"
+ "ldr x24, [x15, #0x18]\n"
+ "ldr x23, [x15, #0x20]\n"
+ "ldr x22, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x30]\n"
+ "ldr x20, [x15, #0x38]\n"
+ "add x27, x27, x14\n"
+ "add x26, x26, x14\n"
+ "add x25, x25, x14\n"
+ "add x24, x24, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 5f\n"
"ld1 { v9.d }[0], [x27], #0x8\n"
"ld1 { v10.d }[0], [x26], #0x8\n"
@@ -426,19 +426,19 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"ld1 { v16.h }[0], [x20], #0x2\n"
"7:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 2: End
"mov v28.16b, v31.16b\n fmla v28.8h, v8.8h, v9.8h\n"
- "fmla v28.8h, v0.8h, v10.8h\n"
- "ldr x20, [x13, #0x40]\n"
- "add x20, x20, x28\n"
"mov v29.16b, v31.16b\n fmla v29.8h, v6.8h, v9.8h\n"
- "fmla v28.8h, v1.8h, v11.8h\n"
+ "ldr x20, [x15, #0x40]\n"
+ "mov v30.16b, v31.16b\n fmla v30.8h, v2.8h, v9.8h\n"
+ "fmla v31.8h, v0.8h, v9.8h\n"
+ "add x20, x20, x14\n"
+ "fmla v28.8h, v0.8h, v10.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
- "fmla v28.8h, v3.8h, v14.8h\n"
+ "fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v2.8h, v13.8h\n"
+ "fmla v28.8h, v3.8h, v14.8h\n"
+ "fmla v29.8h, v0.8h, v16.8h\n"
"fmla v28.8h, v4.8h, v15.8h\n"
- "mov v30.16b, v31.16b\n fmla v30.8h, v2.8h, v9.8h\n"
- "fmla v31.8h, v0.8h, v9.8h\n"
"fmla v28.8h, v2.8h, v16.8h\n"
- "fmla v29.8h, v0.8h, v16.8h\n"
"tbz %x[n_channels], #2, 9f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 8f\n"
@@ -459,9 +459,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"10:" // Oddments: Load input (1, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"11:" // Oddments: Load input (1, 3): Bit 2: End
- "ldr x20, [x13, #0x48]\n"
+ "ldr x20, [x15, #0x48]\n"
"fmla v29.8h, v4.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 13f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 12f\n"
@@ -482,9 +482,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"14:" // Oddments: Load input (1, 4): Bit 2: Unset: Bit 1: Unset
"ld1 { v12.h }[0], [x20], #0x2\n"
"15:" // Oddments: Load input (1, 4): Bit 2: End
- "ldr x20, [x13, #0x50]\n"
+ "ldr x20, [x15, #0x50]\n"
"fmla v29.8h, v5.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 17f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
@@ -505,10 +505,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"18:" // Oddments: Load input (1, 2): Bit 2: Unset: Bit 1: Unset
"ld1 { v13.h }[0], [x20], #0x2\n"
"19:" // Oddments: Load input (1, 2): Bit 2: End
- "ldr x20, [x13, #0x58]\n"
+ "ldr x20, [x15, #0x58]\n"
"fmla v28.8h, v5.8h, v13.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 21f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
@@ -529,9 +529,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"22:" // Oddments: Load input (3, 0): Bit 2: Unset: Bit 1: Unset
"ld1 { v14.h }[0], [x20], #0x2\n"
"23:" // Oddments: Load input (3, 0): Bit 2: End
- "ldr x20, [x13, #0x60]\n"
+ "ldr x20, [x15, #0x60]\n"
"fmla v30.8h, v3.8h, v14.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 25f\n"
"ld1 { v15.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
@@ -552,10 +552,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"26:" // Oddments: Load input (2, 0): Bit 2: Unset: Bit 1: Unset
"ld1 { v15.h }[0], [x20], #0x2\n"
"27:" // Oddments: Load input (2, 0): Bit 2: End
- "ldr x20, [x13, #0x68]\n"
+ "ldr x20, [x15, #0x68]\n"
"fmla v28.8h, v6.8h, v15.8h\n"
"fmla v30.8h, v0.8h, v15.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 29f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
@@ -576,9 +576,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"30:" // Oddments: Load input (3, 1): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"31:" // Oddments: Load input (3, 1): Bit 2: End
- "ldr x20, [x13, #0x70]\n"
+ "ldr x20, [x15, #0x70]\n"
"fmla v30.8h, v4.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 33f\n"
"ld1 { v16.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 32f\n"
@@ -599,10 +599,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"34:" // Oddments: Load input (2, 1): Bit 2: Unset: Bit 1: Unset
"ld1 { v16.h }[0], [x20], #0x2\n"
"35:" // Oddments: Load input (2, 1): Bit 2: End
- "ldr x20, [x13, #0x78]\n"
+ "ldr x20, [x15, #0x78]\n"
"fmla v28.8h, v7.8h, v16.8h\n"
"fmla v30.8h, v1.8h, v16.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 37f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
@@ -623,9 +623,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"38:" // Oddments: Load input (3, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v13.h }[0], [x20], #0x2\n"
"39:" // Oddments: Load input (3, 3): Bit 2: End
- "ldr x20, [x13, #0x80]\n"
+ "ldr x20, [x15, #0x80]\n"
"fmla v31.8h, v4.8h, v13.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 41f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
@@ -646,10 +646,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"42:" // Oddments: Load input (2, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v12.h }[0], [x20], #0x2\n"
"43:" // Oddments: Load input (2, 3): Bit 2: End
- "ldr x20, [x13, #0x88]\n"
+ "ldr x20, [x15, #0x88]\n"
"fmla v29.8h, v7.8h, v12.8h\n"
"fmla v31.8h, v1.8h, v12.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 45f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
@@ -670,9 +670,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"46:" // Oddments: Load input (3, 4): Bit 2: Unset: Bit 1: Unset
"ld1 { v14.h }[0], [x20], #0x2\n"
"47:" // Oddments: Load input (3, 4): Bit 2: End
- "ldr x20, [x13, #0x90]\n"
+ "ldr x20, [x15, #0x90]\n"
"fmla v31.8h, v5.8h, v14.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 49f\n"
"ld1 { v15.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
@@ -693,9 +693,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"50:" // Oddments: Load input (4, 0): Bit 2: Unset: Bit 1: Unset
"ld1 { v15.h }[0], [x20], #0x2\n"
"51:" // Oddments: Load input (4, 0): Bit 2: End
- "ldr x20, [x13, #0x98]\n"
+ "ldr x20, [x15, #0x98]\n"
"fmla v30.8h, v6.8h, v15.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 53f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 52f\n"
@@ -716,10 +716,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"54:" // Oddments: Load input (2, 4): Bit 2: Unset: Bit 1: Unset
"ld1 { v11.h }[0], [x20], #0x2\n"
"55:" // Oddments: Load input (2, 4): Bit 2: End
- "ldr x20, [x13, #0xa0]\n"
+ "ldr x20, [x15, #0xa0]\n"
"fmla v29.8h, v8.8h, v11.8h\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 57f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
@@ -740,9 +740,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"58:" // Oddments: Load input (4, 1): Bit 2: Unset: Bit 1: Unset
"ld1 { v13.h }[0], [x20], #0x2\n"
"59:" // Oddments: Load input (4, 1): Bit 2: End
- "ldr x20, [x13, #0xa8]\n"
+ "ldr x20, [x15, #0xa8]\n"
"fmla v30.8h, v7.8h, v13.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 61f\n"
"ld1 { v16.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 60f\n"
@@ -763,10 +763,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"62:" // Oddments: Load input (3, 2): Bit 2: Unset: Bit 1: Unset
"ld1 { v16.h }[0], [x20], #0x2\n"
"63:" // Oddments: Load input (3, 2): Bit 2: End
- "ldr x20, [x13, #0xb0]\n"
+ "ldr x20, [x15, #0xb0]\n"
"fmla v30.8h, v5.8h, v16.8h\n"
"fmla v31.8h, v3.8h, v16.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 65f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 64f\n"
@@ -787,9 +787,9 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"66:" // Oddments: Load input (4, 3): Bit 2: Unset: Bit 1: Unset
"ld1 { v14.h }[0], [x20], #0x2\n"
"67:" // Oddments: Load input (4, 3): Bit 2: End
- "ldr x20, [x13, #0xb8]\n"
+ "ldr x20, [x15, #0xb8]\n"
"fmla v31.8h, v7.8h, v14.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 69f\n"
"ld1 { v15.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 68f\n"
@@ -810,10 +810,10 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"70:" // Oddments: Load input (4, 2): Bit 2: Unset: Bit 1: Unset
"ld1 { v15.h }[0], [x20], #0x2\n"
"71:" // Oddments: Load input (4, 2): Bit 2: End
- "ldr x20, [x13, #0xc0]\n"
+ "ldr x20, [x15, #0xc0]\n"
"fmla v30.8h, v8.8h, v15.8h\n"
"fmla v31.8h, v6.8h, v15.8h\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 73f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
@@ -838,56 +838,56 @@ void a64_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"fmax v28.8h, v28.8h, v26.8h\n"
"fmax v29.8h, v29.8h, v26.8h\n"
"fmax v30.8h, v30.8h, v26.8h\n"
- "fmax v31.8h, v31.8h, v26.8h\n"
"fmin v28.8h, v28.8h, v27.8h\n"
+ "fmax v31.8h, v31.8h, v26.8h\n"
"fmin v29.8h, v29.8h, v27.8h\n"
"fmin v30.8h, v30.8h, v27.8h\n"
"fmin v31.8h, v31.8h, v27.8h\n"
"tbz %x[n_channels], #2, 77f\n"
- "st1 { v28.d }[0], [x12], #0x8\n"
- "st1 { v29.d }[0], [x11], #0x8\n"
- "st1 { v30.d }[0], [x10], #0x8\n"
- "st1 { v31.d }[0], [x9], #0x8\n"
+ "st1 { v28.d }[0], [x13], #0x8\n"
+ "st1 { v29.d }[0], [x12], #0x8\n"
+ "st1 { v30.d }[0], [x11], #0x8\n"
+ "st1 { v31.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
- "st1 { v28.s }[2], [x12], #0x4\n"
- "st1 { v29.s }[2], [x11], #0x4\n"
- "st1 { v30.s }[2], [x10], #0x4\n"
- "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v28.s }[2], [x13], #0x4\n"
+ "st1 { v29.s }[2], [x12], #0x4\n"
+ "st1 { v30.s }[2], [x11], #0x4\n"
+ "st1 { v31.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "st1 { v28.h }[6], [x12], #0x2\n"
- "st1 { v29.h }[6], [x11], #0x2\n"
- "st1 { v30.h }[6], [x10], #0x2\n"
- "st1 { v31.h }[6], [x9], #0x2\n"
+ "st1 { v28.h }[6], [x13], #0x2\n"
+ "st1 { v29.h }[6], [x12], #0x2\n"
+ "st1 { v30.h }[6], [x11], #0x2\n"
+ "st1 { v31.h }[6], [x10], #0x2\n"
"b 79f\n"
"76:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 79f\n"
- "st1 { v28.h }[4], [x12], #0x2\n"
- "st1 { v29.h }[4], [x11], #0x2\n"
- "st1 { v30.h }[4], [x10], #0x2\n"
- "st1 { v31.h }[4], [x9], #0x2\n"
+ "st1 { v28.h }[4], [x13], #0x2\n"
+ "st1 { v29.h }[4], [x12], #0x2\n"
+ "st1 { v30.h }[4], [x11], #0x2\n"
+ "st1 { v31.h }[4], [x10], #0x2\n"
"b 79f\n"
"77:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 78f\n"
- "st1 { v28.s }[0], [x12], #0x4\n"
- "st1 { v29.s }[0], [x11], #0x4\n"
- "st1 { v30.s }[0], [x10], #0x4\n"
- "st1 { v31.s }[0], [x9], #0x4\n"
+ "st1 { v28.s }[0], [x13], #0x4\n"
+ "st1 { v29.s }[0], [x12], #0x4\n"
+ "st1 { v30.s }[0], [x11], #0x4\n"
+ "st1 { v31.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 79f\n"
- "st1 { v28.h }[2], [x12], #0x2\n"
- "st1 { v29.h }[2], [x11], #0x2\n"
- "st1 { v30.h }[2], [x10], #0x2\n"
- "st1 { v31.h }[2], [x9], #0x2\n"
+ "st1 { v28.h }[2], [x13], #0x2\n"
+ "st1 { v29.h }[2], [x12], #0x2\n"
+ "st1 { v30.h }[2], [x11], #0x2\n"
+ "st1 { v31.h }[2], [x10], #0x2\n"
"b 79f\n"
"78:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "st1 { v28.h }[0], [x12], #0x2\n"
- "st1 { v29.h }[0], [x11], #0x2\n"
- "st1 { v30.h }[0], [x10], #0x2\n"
- "st1 { v31.h }[0], [x9], #0x2\n"
+ "st1 { v28.h }[0], [x13], #0x2\n"
+ "st1 { v29.h }[0], [x12], #0x2\n"
+ "st1 { v30.h }[0], [x11], #0x2\n"
+ "st1 { v31.h }[0], [x10], #0x2\n"
"79:" // Oddments: Store: Bit 2: End
"80:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index cecaf79704..f17beef55e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,251 +87,251 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
- "mov x26, #0x0\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x23, #0x2\n"
- "mov x25, #0x2\n"
- "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x9, #0x2\n"
+ "mov x28, #0x2\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x27, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
"ldr x2, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x27, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x26, x2, x22\n" // offset += tile_j * ld_input_col
+ "mov x26, #0x10\n" // cntb _, ALL, #1
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"ldr x3, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x2, x2, #0x1\n"
- "mul x20, x27, x21\n" // offset = tile_i * ld_output_row
- "ldr x4, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x6, x2, x2\n"
- "mul x22, x22, x23\n" // offset *= kernel_stride * output_size
- "add x4, x4, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x7, x4, x24, LSL #1\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x26, x3, x20\n" // offset += tile_j * ld_output_col
- "add x17, x7, x24, LSL #1\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "lsr x22, %x[n_channels], #0x3\n"
- "add x16, x17, x24, LSL #1\n"
- "add x15, x6, x2\n"
- "add x14, x16, x24, LSL #1\n"
- "add x13, x15, x2\n"
- "add x5, x5, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "lsr x24, %x[n_channels], #0x3\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v27.8h }, [x20]\n"
+ "ldr x4, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x23, #0x0\n"
"ld1r { v15.8h }, [x20]\n"
- "add x12, x14, x24, LSL #1\n"
- "add x11, x13, x2\n"
- "add x10, x5, x21, LSL #1\n"
+ "mul x22, x11, x27\n" // offset = tile_i * ld_input_row
+ "ldr x5, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x21, XZR, x26\n"
+ "mul x20, x11, x25\n" // offset = tile_i * ld_output_row
+ "ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x22, x10, x2, x22\n" // offset += tile_j * ld_input_col
+ "lsl x2, x2, #0x1\n"
+ "madd x20, x10, x3, x20\n" // offset += tile_j * ld_output_col
"lsl x3, x3, #0x1\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q25, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x8, x8, #0x60\n"
+ "mul x22, x22, x9\n" // offset *= kernel_stride * output_size
+ "add x7, x2, x2\n"
+ "add x8, x7, x2\n"
+ "add x17, x8, x2\n"
+ "mul x20, x20, x28\n" // offset *= output_tile_size
+ "add x16, x17, x2\n"
+ "add x4, x4, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x15, x4, x27, LSL #1\n"
+ "add x14, x15, x27, LSL #1\n"
+ "add x13, x14, x27, LSL #1\n"
+ "add x12, x13, x27, LSL #1\n"
+ "add x5, x5, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x11, x12, x27, LSL #1\n"
+ "add x10, x5, x25, LSL #1\n"
+ "cbz x24, 4f\n"
+ "ldr q25, [x6, #0x0]\n"
+ "ldr q0, [x6, #0x10]\n"
+ "cmp x26, x24, LSL #4\n"
+ "ldr q1, [x6, #0x20]\n"
+ "ldr q2, [x6, #0x30]\n"
+ "ldr q3, [x6, #0x40]\n"
+ "ldr q4, [x6, #0x50]\n"
+ "add x6, x6, #0x60\n"
"ld1 { v5.8h }, [x4]\n"
"ldr q6, [x4, x2]\n"
- "ld1 { v7.8h }, [x7]\n"
- "ldr q8, [x7, x2]\n"
- "ldr q9, [x4, x6]\n"
- "ldr q13, [x7, x6]\n"
- "ldr q11, [x4, x15]\n"
- "ldr q12, [x4, x13]\n"
- "ldr q10, [x7, x11]\n"
- "ld1 { v14.8h }, [x17]\n"
+ "ld1 { v7.8h }, [x15]\n"
+ "ldr q8, [x15, x2]\n"
+ "ldr q9, [x4, x7]\n"
+ "ldr q13, [x15, x7]\n"
+ "ldr q11, [x4, x8]\n"
+ "ldr q12, [x4, x17]\n"
+ "ldr q10, [x15, x16]\n"
+ "ld1 { v14.8h }, [x14]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v30.16b, v25.16b\n fmla v30.8h, v0.8h, v5.8h\n"
- "ldr q23, [x7, x15]\n"
+ "ldr q23, [x15, x8]\n"
"mov v31.16b, v25.16b\n fmla v31.8h, v0.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
+ "add x26, x26, #0x10\n"
"mov v29.16b, v25.16b\n fmla v29.8h, v0.8h, v7.8h\n"
"mov v28.16b, v25.16b\n fmla v28.8h, v0.8h, v8.8h\n"
- "ldr q19, [x8, #0x0]\n"
- "ldr q25, [x8, #0x140]\n"
+ "ldr q19, [x6, #0x0]\n"
+ "ldr q25, [x6, #0x140]\n"
+ "cmp x26, x24, LSL #4\n"
+ "add x21, x21, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v30.8h, v1.8h, v6.8h\n"
- "ldr q21, [x7, x13]\n"
+ "ldr q21, [x15, x17]\n"
+ "add x15, x15, #0x10\n"
"fmla v31.8h, v1.8h, v9.8h\n"
- "add x7, x7, #0x10\n"
"fmla v29.8h, v1.8h, v8.8h\n"
"fmla v28.8h, v1.8h, v13.8h\n"
- "ldr q1, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
+ "ldr q1, [x6, #0x10]\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "ldr q18, [x4, x11]\n"
- "fmla v31.8h, v2.8h, v11.8h\n"
+ "ldr q18, [x4, x16]\n"
"add x4, x4, #0x10\n"
+ "fmla v31.8h, v2.8h, v11.8h\n"
"fmla v29.8h, v2.8h, v13.8h\n"
"fmla v28.8h, v2.8h, v23.8h\n"
- "ldr q17, [x8, #0x20]\n"
- "add x20, x20, #0x10\n"
+ "ldr q17, [x6, #0x20]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "ldr q6, [x17, x2]\n"
+ "ldr q6, [x14, x2]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
- "add x21, x21, #0x10\n"
"fmla v29.8h, v3.8h, v23.8h\n"
"fmla v28.8h, v3.8h, v21.8h\n"
- "ldr q16, [x8, #0x30]\n"
+ "ldr q16, [x6, #0x30]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "ldr q2, [x17, x6]\n"
+ "ldr q2, [x14, x7]\n"
"fmla v31.8h, v4.8h, v18.8h\n"
- "ldr q0, [x17, x15]\n"
+ "ldr q0, [x14, x8]\n"
"fmla v29.8h, v4.8h, v21.8h\n"
"fmla v28.8h, v4.8h, v10.8h\n"
- "ldr q20, [x8, #0x40]\n"
+ "ldr q20, [x6, #0x40]\n"
"fmla v30.8h, v19.8h, v7.8h\n"
- "ld1 { v7.8h }, [x7]\n"
+ "ld1 { v7.8h }, [x15]\n"
"fmla v31.8h, v19.8h, v8.8h\n"
"fmla v29.8h, v19.8h, v14.8h\n"
"fmla v28.8h, v19.8h, v6.8h\n"
- "ldr q19, [x8, #0x50]\n"
+ "ldr q19, [x6, #0x50]\n"
"fmla v30.8h, v1.8h, v8.8h\n"
- "ldr q26, [x17, x11]\n"
+ "ldr q26, [x14, x16]\n"
"fmla v31.8h, v1.8h, v13.8h\n"
"fmla v29.8h, v1.8h, v6.8h\n"
"fmla v28.8h, v1.8h, v2.8h\n"
- "ldr q18, [x8, #0x60]\n"
+ "ldr q18, [x6, #0x60]\n"
"fmla v30.8h, v17.8h, v13.8h\n"
- "ldr q1, [x17, x13]\n"
+ "ldr q1, [x14, x17]\n"
+ "add x14, x14, #0x10\n"
"fmla v31.8h, v17.8h, v23.8h\n"
- "add x17, x17, #0x10\n"
"fmla v29.8h, v17.8h, v2.8h\n"
"fmla v28.8h, v17.8h, v0.8h\n"
- "ldr q17, [x8, #0x70]\n"
+ "ldr q17, [x6, #0x70]\n"
"fmla v30.8h, v16.8h, v23.8h\n"
- "ld1 { v24.8h }, [x16]\n"
+ "ld1 { v24.8h }, [x13]\n"
"fmla v31.8h, v16.8h, v21.8h\n"
"fmla v29.8h, v16.8h, v0.8h\n"
"fmla v28.8h, v16.8h, v1.8h\n"
- "ldr q16, [x8, #0x80]\n"
+ "ldr q16, [x6, #0x80]\n"
"fmla v30.8h, v20.8h, v21.8h\n"
- "ldr q23, [x16, x2]\n"
+ "ldr q23, [x13, x2]\n"
"fmla v31.8h, v20.8h, v10.8h\n"
- "ldr q22, [x16, x6]\n"
+ "ldr q22, [x13, x7]\n"
"fmla v29.8h, v20.8h, v1.8h\n"
"fmla v28.8h, v20.8h, v26.8h\n"
- "ldr q21, [x8, #0x90]\n"
+ "ldr q21, [x6, #0x90]\n"
"fmla v30.8h, v19.8h, v14.8h\n"
- "ldr q5, [x16, x11]\n"
+ "ldr q5, [x13, x16]\n"
"fmla v31.8h, v19.8h, v6.8h\n"
"fmla v29.8h, v19.8h, v24.8h\n"
"fmla v28.8h, v19.8h, v23.8h\n"
- "ldr q11, [x8, #0xa0]\n"
+ "ldr q11, [x6, #0xa0]\n"
"fmla v30.8h, v18.8h, v6.8h\n"
- "ldr q20, [x16, x15]\n"
+ "ldr q20, [x13, x8]\n"
"fmla v31.8h, v18.8h, v2.8h\n"
"fmla v29.8h, v18.8h, v23.8h\n"
"fmla v28.8h, v18.8h, v22.8h\n"
- "ldr q18, [x8, #0xb0]\n"
+ "ldr q18, [x6, #0xb0]\n"
"fmla v30.8h, v17.8h, v2.8h\n"
- "ldr q19, [x16, x13]\n"
+ "ldr q19, [x13, x17]\n"
+ "add x13, x13, #0x10\n"
"fmla v31.8h, v17.8h, v0.8h\n"
- "add x16, x16, #0x10\n"
"fmla v29.8h, v17.8h, v22.8h\n"
"fmla v28.8h, v17.8h, v20.8h\n"
- "ldr q17, [x8, #0xc0]\n"
+ "ldr q17, [x6, #0xc0]\n"
"fmla v30.8h, v16.8h, v0.8h\n"
- "ld1 { v0.8h }, [x14]\n"
+ "ld1 { v0.8h }, [x12]\n"
"fmla v31.8h, v16.8h, v1.8h\n"
"fmla v29.8h, v16.8h, v20.8h\n"
"fmla v28.8h, v16.8h, v19.8h\n"
- "ldr q16, [x8, #0xd0]\n"
+ "ldr q16, [x6, #0xd0]\n"
"fmla v30.8h, v21.8h, v1.8h\n"
- "ldr q4, [x14, x2]\n"
+ "ldr q4, [x12, x2]\n"
"fmla v31.8h, v21.8h, v26.8h\n"
- "ldr q12, [x14, x13]\n"
+ "ldr q12, [x12, x17]\n"
"fmla v29.8h, v21.8h, v19.8h\n"
"fmla v28.8h, v21.8h, v5.8h\n"
- "ldr q13, [x8, #0xe0]\n"
+ "ldr q13, [x6, #0xe0]\n"
"fmla v30.8h, v11.8h, v24.8h\n"
- "ldr q6, [x14, x6]\n"
+ "ldr q6, [x12, x7]\n"
"fmla v31.8h, v11.8h, v23.8h\n"
"fmla v29.8h, v11.8h, v0.8h\n"
"fmla v28.8h, v11.8h, v4.8h\n"
- "ldr q24, [x8, #0xf0]\n"
+ "ldr q24, [x6, #0xf0]\n"
"fmla v30.8h, v18.8h, v23.8h\n"
- "ldr q26, [x14, x15]\n"
+ "ldr q26, [x12, x8]\n"
"fmla v31.8h, v18.8h, v22.8h\n"
"fmla v29.8h, v18.8h, v4.8h\n"
"fmla v28.8h, v18.8h, v6.8h\n"
- "ldr q23, [x8, #0x100]\n"
+ "ldr q23, [x6, #0x100]\n"
"fmla v30.8h, v17.8h, v22.8h\n"
- "ldr q22, [x14, x11]\n"
+ "ldr q22, [x12, x16]\n"
+ "add x12, x12, #0x10\n"
"fmla v31.8h, v17.8h, v20.8h\n"
- "add x14, x14, #0x10\n"
"fmla v29.8h, v17.8h, v6.8h\n"
"fmla v28.8h, v17.8h, v26.8h\n"
- "ldr q21, [x8, #0x110]\n"
+ "ldr q21, [x6, #0x110]\n"
"fmla v30.8h, v16.8h, v20.8h\n"
- "ld1 { v18.8h }, [x12]\n"
+ "ld1 { v18.8h }, [x11]\n"
"fmla v31.8h, v16.8h, v19.8h\n"
"fmla v29.8h, v16.8h, v26.8h\n"
"fmla v28.8h, v16.8h, v12.8h\n"
- "ldr q20, [x8, #0x120]\n"
+ "ldr q20, [x6, #0x120]\n"
"fmla v30.8h, v13.8h, v19.8h\n"
- "ldr q17, [x12, x2]\n"
+ "ldr q17, [x11, x2]\n"
"fmla v31.8h, v13.8h, v5.8h\n"
- "ld1 { v14.8h }, [x17]\n"
+ "ld1 { v14.8h }, [x14]\n"
"fmla v29.8h, v13.8h, v12.8h\n"
"fmla v28.8h, v13.8h, v22.8h\n"
- "ldr q19, [x8, #0x130]\n"
+ "ldr q19, [x6, #0x130]\n"
"fmla v30.8h, v24.8h, v0.8h\n"
- "ldr q16, [x12, x6]\n"
+ "ldr q16, [x11, x7]\n"
"fmla v31.8h, v24.8h, v4.8h\n"
"fmla v29.8h, v24.8h, v18.8h\n"
- "ldr q18, [x12, x15]\n"
+ "ldr q18, [x11, x8]\n"
"fmla v28.8h, v24.8h, v17.8h\n"
- "ldr q0, [x8, #0x150]\n"
+ "ldr q0, [x6, #0x150]\n"
"fmla v30.8h, v23.8h, v4.8h\n"
- "ldr q13, [x7, x6]\n"
+ "ldr q13, [x15, x7]\n"
"fmla v31.8h, v23.8h, v6.8h\n"
"fmla v29.8h, v23.8h, v17.8h\n"
- "ldr q17, [x12, x13]\n"
+ "ldr q17, [x11, x17]\n"
"fmla v28.8h, v23.8h, v16.8h\n"
- "ldr q1, [x8, #0x160]\n"
+ "ldr q1, [x6, #0x160]\n"
"fmla v30.8h, v21.8h, v6.8h\n"
"ld1 { v5.8h }, [x4]\n"
"fmla v31.8h, v21.8h, v26.8h\n"
"fmla v29.8h, v21.8h, v16.8h\n"
- "ldr q16, [x12, x11]\n"
+ "ldr q16, [x11, x16]\n"
+ "add x11, x11, #0x10\n"
"fmla v28.8h, v21.8h, v18.8h\n"
- "ldr q2, [x8, #0x170]\n"
+ "ldr q2, [x6, #0x170]\n"
"fmla v30.8h, v20.8h, v26.8h\n"
"ldr q6, [x4, x2]\n"
"fmla v31.8h, v20.8h, v12.8h\n"
- "add x12, x12, #0x10\n"
"fmla v29.8h, v20.8h, v18.8h\n"
- "ldr q11, [x4, x15]\n"
+ "ldr q11, [x4, x8]\n"
"fmla v28.8h, v20.8h, v17.8h\n"
- "ldr q3, [x8, #0x180]\n"
+ "ldr q3, [x6, #0x180]\n"
"fmla v30.8h, v19.8h, v12.8h\n"
- "ldr q8, [x7, x2]\n"
+ "ldr q8, [x15, x2]\n"
"fmla v31.8h, v19.8h, v22.8h\n"
- "ldr q10, [x7, x11]\n"
+ "ldr q10, [x15, x16]\n"
"fmla v29.8h, v19.8h, v17.8h\n"
- "ldr q12, [x4, x13]\n"
+ "ldr q12, [x4, x17]\n"
"fmla v28.8h, v19.8h, v16.8h\n"
- "ldr q9, [x4, x6]\n"
- "ldr q4, [x8, #0x190]\n"
+ "ldr q9, [x4, x7]\n"
+ "ldr q4, [x6, #0x190]\n"
+ "add x6, x6, #0x1a0\n"
"fmax v30.8h, v30.8h, v27.8h\n"
"fmax v31.8h, v31.8h, v27.8h\n"
- "add x8, x8, #0x1a0\n"
"fmax v29.8h, v29.8h, v27.8h\n"
"fmax v28.8h, v28.8h, v27.8h\n"
"fmin v30.8h, v30.8h, v15.8h\n"
"fmin v31.8h, v31.8h, v15.8h\n"
- "st1 { v30.8h }, [x5]\n"
"fmin v29.8h, v29.8h, v15.8h\n"
"fmin v28.8h, v28.8h, v15.8h\n"
+ "st1 { v30.8h }, [x5]\n"
"str q31, [x5, x3]\n"
"add x5, x5, #0x10\n"
"st1 { v29.8h }, [x10]\n"
@@ -340,163 +340,163 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v31.16b, v25.16b\n fmla v31.8h, v0.8h, v5.8h\n"
- "ldr q22, [x7, x15]\n"
+ "ldr q22, [x15, x8]\n"
"mov v5.16b, v25.16b\n fmla v5.8h, v0.8h, v6.8h\n"
"mov v30.16b, v25.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v29.16b, v25.16b\n fmla v29.8h, v0.8h, v8.8h\n"
- "ldr q19, [x8, #0x0]\n"
+ "ldr q19, [x6, #0x0]\n"
"fmla v31.8h, v1.8h, v6.8h\n"
- "ldr q21, [x7, x13]\n"
+ "ldr q21, [x15, x17]\n"
+ "add x15, x15, #0x10\n"
"fmla v5.8h, v1.8h, v9.8h\n"
- "add x7, x7, #0x10\n"
"fmla v30.8h, v1.8h, v8.8h\n"
"fmla v29.8h, v1.8h, v13.8h\n"
- "ldr q18, [x8, #0x10]\n"
+ "ldr q18, [x6, #0x10]\n"
"fmla v31.8h, v2.8h, v9.8h\n"
- "ldr q16, [x4, x11]\n"
- "fmla v5.8h, v2.8h, v11.8h\n"
+ "ldr q16, [x4, x16]\n"
"add x4, x4, #0x10\n"
+ "fmla v5.8h, v2.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"fmla v29.8h, v2.8h, v22.8h\n"
- "ldr q17, [x8, #0x20]\n"
+ "ldr q17, [x6, #0x20]\n"
"fmla v31.8h, v3.8h, v11.8h\n"
- "ldr q6, [x17, x2]\n"
+ "ldr q6, [x14, x2]\n"
"fmla v5.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v22.8h\n"
"fmla v29.8h, v3.8h, v21.8h\n"
- "ldr q20, [x8, #0x30]\n"
+ "ldr q20, [x6, #0x30]\n"
"fmla v31.8h, v4.8h, v12.8h\n"
- "ldr q2, [x17, x6]\n"
+ "ldr q2, [x14, x7]\n"
"fmla v5.8h, v4.8h, v16.8h\n"
- "ldr q28, [x17, x15]\n"
+ "ldr q28, [x14, x8]\n"
"fmla v30.8h, v4.8h, v21.8h\n"
"fmla v29.8h, v4.8h, v10.8h\n"
- "ldr q16, [x8, #0x40]\n"
+ "ldr q16, [x6, #0x40]\n"
"fmla v31.8h, v19.8h, v7.8h\n"
"fmla v5.8h, v19.8h, v8.8h\n"
"fmla v30.8h, v19.8h, v14.8h\n"
"fmla v29.8h, v19.8h, v6.8h\n"
- "ldr q19, [x8, #0x50]\n"
+ "ldr q19, [x6, #0x50]\n"
"fmla v31.8h, v18.8h, v8.8h\n"
- "ldr q1, [x17, x11]\n"
+ "ldr q1, [x14, x16]\n"
"fmla v5.8h, v18.8h, v13.8h\n"
"fmla v30.8h, v18.8h, v6.8h\n"
"fmla v29.8h, v18.8h, v2.8h\n"
- "ldr q18, [x8, #0x60]\n"
+ "ldr q18, [x6, #0x60]\n"
"fmla v31.8h, v17.8h, v13.8h\n"
- "ldr q26, [x17, x13]\n"
+ "ldr q26, [x14, x17]\n"
+ "add x14, x14, #0x10\n"
"fmla v5.8h, v17.8h, v22.8h\n"
- "add x17, x17, #0x10\n"
"fmla v30.8h, v17.8h, v2.8h\n"
"fmla v29.8h, v17.8h, v28.8h\n"
- "ldr q17, [x8, #0x70]\n"
+ "ldr q17, [x6, #0x70]\n"
"fmla v31.8h, v20.8h, v22.8h\n"
- "ld1 { v25.8h }, [x16]\n"
+ "ld1 { v25.8h }, [x13]\n"
"fmla v5.8h, v20.8h, v21.8h\n"
"fmla v30.8h, v20.8h, v28.8h\n"
"fmla v29.8h, v20.8h, v26.8h\n"
- "ldr q24, [x8, #0x80]\n"
+ "ldr q24, [x6, #0x80]\n"
"fmla v31.8h, v16.8h, v21.8h\n"
- "ldr q23, [x16, x2]\n"
+ "ldr q23, [x13, x2]\n"
"fmla v5.8h, v16.8h, v10.8h\n"
- "ldr q0, [x16, x6]\n"
+ "ldr q0, [x13, x7]\n"
"fmla v30.8h, v16.8h, v26.8h\n"
"fmla v29.8h, v16.8h, v1.8h\n"
- "ldr q22, [x8, #0x90]\n"
+ "ldr q22, [x6, #0x90]\n"
"fmla v31.8h, v19.8h, v14.8h\n"
- "ldr q16, [x16, x11]\n"
+ "ldr q16, [x13, x16]\n"
"fmla v5.8h, v19.8h, v6.8h\n"
"fmla v30.8h, v19.8h, v25.8h\n"
"fmla v29.8h, v19.8h, v23.8h\n"
- "ldr q21, [x8, #0xa0]\n"
+ "ldr q21, [x6, #0xa0]\n"
"fmla v31.8h, v18.8h, v6.8h\n"
- "ldr q20, [x16, x15]\n"
+ "ldr q20, [x13, x8]\n"
"fmla v5.8h, v18.8h, v2.8h\n"
"fmla v30.8h, v18.8h, v23.8h\n"
"fmla v29.8h, v18.8h, v0.8h\n"
- "ldr q18, [x8, #0xb0]\n"
+ "ldr q18, [x6, #0xb0]\n"
"fmla v31.8h, v17.8h, v2.8h\n"
- "ldr q19, [x16, x13]\n"
+ "ldr q19, [x13, x17]\n"
+ "add x13, x13, #0x10\n"
"fmla v5.8h, v17.8h, v28.8h\n"
- "add x16, x16, #0x10\n"
"fmla v30.8h, v17.8h, v0.8h\n"
"fmla v29.8h, v17.8h, v20.8h\n"
- "ldr q17, [x8, #0xc0]\n"
+ "ldr q17, [x6, #0xc0]\n"
"fmla v31.8h, v24.8h, v28.8h\n"
- "ld1 { v7.8h }, [x14]\n"
+ "ld1 { v7.8h }, [x12]\n"
"fmla v5.8h, v24.8h, v26.8h\n"
"fmla v30.8h, v24.8h, v20.8h\n"
"fmla v29.8h, v24.8h, v19.8h\n"
- "ldr q2, [x8, #0xd0]\n"
+ "ldr q2, [x6, #0xd0]\n"
"fmla v31.8h, v22.8h, v26.8h\n"
- "ldr q28, [x14, x2]\n"
+ "ldr q28, [x12, x2]\n"
"fmla v5.8h, v22.8h, v1.8h\n"
- "ldr q13, [x14, x13]\n"
+ "ldr q13, [x12, x17]\n"
"fmla v30.8h, v22.8h, v19.8h\n"
"fmla v29.8h, v22.8h, v16.8h\n"
- "ldr q14, [x8, #0xe0]\n"
+ "ldr q14, [x6, #0xe0]\n"
"fmla v31.8h, v21.8h, v25.8h\n"
- "ldr q26, [x14, x6]\n"
+ "ldr q26, [x12, x7]\n"
"fmla v5.8h, v21.8h, v23.8h\n"
"fmla v30.8h, v21.8h, v7.8h\n"
"fmla v29.8h, v21.8h, v28.8h\n"
- "ldr q25, [x8, #0xf0]\n"
+ "ldr q25, [x6, #0xf0]\n"
"fmla v31.8h, v18.8h, v23.8h\n"
- "ldr q24, [x14, x15]\n"
+ "ldr q24, [x12, x8]\n"
"fmla v5.8h, v18.8h, v0.8h\n"
"fmla v30.8h, v18.8h, v28.8h\n"
"fmla v29.8h, v18.8h, v26.8h\n"
- "ldr q23, [x8, #0x100]\n"
+ "ldr q23, [x6, #0x100]\n"
"fmla v31.8h, v17.8h, v0.8h\n"
- "ldr q22, [x14, x11]\n"
+ "ldr q22, [x12, x16]\n"
+ "add x12, x12, #0x10\n"
"fmla v5.8h, v17.8h, v20.8h\n"
- "add x14, x14, #0x10\n"
"fmla v30.8h, v17.8h, v26.8h\n"
"fmla v29.8h, v17.8h, v24.8h\n"
- "ldr q21, [x8, #0x110]\n"
+ "ldr q21, [x6, #0x110]\n"
"fmla v31.8h, v2.8h, v20.8h\n"
- "ld1 { v18.8h }, [x12]\n"
+ "ld1 { v18.8h }, [x11]\n"
"fmla v5.8h, v2.8h, v19.8h\n"
"fmla v30.8h, v2.8h, v24.8h\n"
"fmla v29.8h, v2.8h, v13.8h\n"
- "ldr q20, [x8, #0x120]\n"
+ "ldr q20, [x6, #0x120]\n"
"fmla v31.8h, v14.8h, v19.8h\n"
- "ldr q17, [x12, x2]\n"
+ "ldr q17, [x11, x2]\n"
"fmla v5.8h, v14.8h, v16.8h\n"
"fmla v30.8h, v14.8h, v13.8h\n"
"fmla v29.8h, v14.8h, v22.8h\n"
- "ldr q19, [x8, #0x130]\n"
- "add x8, x8, #0x140\n"
+ "ldr q19, [x6, #0x130]\n"
+ "add x6, x6, #0x140\n"
"fmla v31.8h, v25.8h, v7.8h\n"
- "ldr q16, [x12, x6]\n"
+ "ldr q16, [x11, x7]\n"
"fmla v5.8h, v25.8h, v28.8h\n"
"fmla v30.8h, v25.8h, v18.8h\n"
- "ldr q18, [x12, x15]\n"
+ "ldr q18, [x11, x8]\n"
"fmla v29.8h, v25.8h, v17.8h\n"
"fmla v31.8h, v23.8h, v28.8h\n"
"fmla v5.8h, v23.8h, v26.8h\n"
"fmla v30.8h, v23.8h, v17.8h\n"
- "ldr q17, [x12, x13]\n"
+ "ldr q17, [x11, x17]\n"
"fmla v29.8h, v23.8h, v16.8h\n"
"fmla v31.8h, v21.8h, v26.8h\n"
"fmla v5.8h, v21.8h, v24.8h\n"
"fmla v30.8h, v21.8h, v16.8h\n"
- "ldr q16, [x12, x11]\n"
+ "ldr q16, [x11, x16]\n"
+ "add x11, x11, #0x10\n"
"fmla v29.8h, v21.8h, v18.8h\n"
- "add x12, x12, #0x10\n"
"fmla v31.8h, v20.8h, v24.8h\n"
"fmla v5.8h, v20.8h, v13.8h\n"
"fmla v30.8h, v20.8h, v18.8h\n"
"fmla v29.8h, v20.8h, v17.8h\n"
"fmla v31.8h, v19.8h, v13.8h\n"
"fmla v5.8h, v19.8h, v22.8h\n"
- "fmax v31.8h, v31.8h, v27.8h\n"
"fmla v30.8h, v19.8h, v17.8h\n"
"fmla v29.8h, v19.8h, v16.8h\n"
+ "fmax v31.8h, v31.8h, v27.8h\n"
"fmax v5.8h, v5.8h, v27.8h\n"
+ "fmin v31.8h, v31.8h, v15.8h\n"
"fmax v30.8h, v30.8h, v27.8h\n"
"fmax v29.8h, v29.8h, v27.8h\n"
- "fmin v31.8h, v31.8h, v15.8h\n"
"fmin v5.8h, v5.8h, v15.8h\n"
"st1 { v31.8h }, [x5]\n"
"fmin v30.8h, v30.8h, v15.8h\n"
@@ -509,23 +509,23 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x7\n"
"beq 117f\n"
- "ldr q25, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
+ "ldr q25, [x6, #0x0]\n"
+ "ldr q0, [x6, #0x10]\n"
"add x9, x4, XZR\n"
"add x28, x4, x2\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "add x27, x7, XZR\n"
- "add x26, x7, x2\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x25, x4, x6\n"
- "add x24, x7, x6\n"
- "add x23, x4, x15\n"
- "add x22, x4, x13\n"
- "add x21, x7, x11\n"
- "add x20, x17, XZR\n"
- "add x8, x8, #0x60\n"
+ "ldr q1, [x6, #0x20]\n"
+ "ldr q2, [x6, #0x30]\n"
+ "add x27, x15, XZR\n"
+ "add x26, x15, x2\n"
+ "ldr q3, [x6, #0x40]\n"
+ "ldr q4, [x6, #0x50]\n"
+ "add x25, x4, x7\n"
+ "add x24, x15, x7\n"
+ "add x23, x4, x8\n"
+ "add x22, x4, x17\n"
+ "add x21, x15, x16\n"
+ "add x20, x14, XZR\n"
+ "add x6, x6, #0x60\n"
"tbz %x[n_channels], #2, 6f\n"
"ldr d5, [x9], #0x8\n"
"ldr d6, [x28], #0x8\n"
@@ -611,7 +611,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"8:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 2: End
"mov v28.16b, v25.16b\n fmla v28.8h, v0.8h, v5.8h\n"
"mov v29.16b, v25.16b\n fmla v29.8h, v0.8h, v6.8h\n"
- "add x20, x7, x15\n"
+ "add x20, x15, x8\n"
"mov v30.16b, v25.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v25.16b\n fmla v31.8h, v0.8h, v8.8h\n"
"fmla v28.8h, v1.8h, v6.8h\n"
@@ -643,7 +643,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"12:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 2: End
"fmla v31.8h, v2.8h, v5.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x7, x13\n"
+ "add x20, x15, x17\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v5.8h\n"
"tbz %x[n_channels], #2, 14f\n"
@@ -668,7 +668,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"16:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 2: End
"fmla v31.8h, v3.8h, v6.8h\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "add x20, x4, x11\n"
+ "add x20, x4, x16\n"
"tbz %x[n_channels], #2, 18f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 17f\n"
@@ -689,13 +689,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"19:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 2: Unset: Bit 1: Unset
"ldr h9, [x20, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v29.8h, v4.8h, v9.8h\n"
"fmla v30.8h, v4.8h, v6.8h\n"
- "add x20, x17, x2\n"
+ "add x20, x14, x2\n"
"fmla v31.8h, v4.8h, v10.8h\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v0.8h, v7.8h\n"
- "add x8, x8, #0x10\n"
"fmla v29.8h, v0.8h, v8.8h\n"
"fmla v30.8h, v0.8h, v14.8h\n"
"tbz %x[n_channels], #2, 22f\n"
@@ -718,13 +718,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"23:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: Unset: Bit 1: Unset
"ldr h11, [x20, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.8h, v0.8h, v11.8h\n"
+ "add x20, x14, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v1.8h, v8.8h\n"
- "add x20, x17, x6\n"
"fmla v29.8h, v1.8h, v13.8h\n"
"fmla v30.8h, v1.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 26f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 25f\n"
@@ -745,13 +745,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"27:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: Unset: Bit 1: Unset
"ldr h12, [x20, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.8h, v1.8h, v12.8h\n"
+ "add x20, x14, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v2.8h, v13.8h\n"
- "add x20, x17, x15\n"
"fmla v29.8h, v2.8h, v5.8h\n"
"fmla v30.8h, v2.8h, v12.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 30f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 29f\n"
@@ -772,13 +772,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"31:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: Unset: Bit 1: Unset
"ldr h9, [x20, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.8h, v2.8h, v9.8h\n"
+ "add x20, x14, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v3.8h, v5.8h\n"
- "add x20, x17, x13\n"
"fmla v29.8h, v3.8h, v6.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 34f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #1, 33f\n"
@@ -799,13 +799,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"35:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: Unset: Bit 1: Unset
"ldr h13, [x20, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
+ "add x20, x14, x16\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v4.8h, v6.8h\n"
- "add x20, x17, x11\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v13.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 38f\n"
"ldr d8, [x20], #0x8\n"
"tbz %x[n_channels], #1, 37f\n"
@@ -826,12 +826,12 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"39:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: Unset: Bit 1: Unset
"ldr h8, [x20, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v31.8h, v4.8h, v8.8h\n"
+ "add x20, x13, XZR\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v0.8h, v14.8h\n"
- "add x20, x16, XZR\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 42f\n"
"ldr d5, [x20], #0x8\n"
"tbz %x[n_channels], #1, 41f\n"
@@ -853,7 +853,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"ldr h5, [x20, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 2: End
"fmla v30.8h, v0.8h, v5.8h\n"
- "add x20, x16, x2\n"
+ "add x20, x13, x2\n"
"tbz %x[n_channels], #2, 46f\n"
"ldr d6, [x20], #0x8\n"
"tbz %x[n_channels], #1, 45f\n"
@@ -874,13 +874,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"47:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: Unset: Bit 1: Unset
"ldr h6, [x20, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.8h, v0.8h, v6.8h\n"
+ "add x20, x13, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v1.8h, v11.8h\n"
- "add x20, x16, x6\n"
"fmla v29.8h, v1.8h, v12.8h\n"
"fmla v30.8h, v1.8h, v6.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 50f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 49f\n"
@@ -901,13 +901,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"51:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: Unset: Bit 1: Unset
"ldr h10, [x20, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.8h, v1.8h, v10.8h\n"
+ "add x20, x13, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v2.8h, v12.8h\n"
- "add x20, x16, x15\n"
"fmla v29.8h, v2.8h, v9.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 54f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 53f\n"
@@ -928,13 +928,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"55:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: Unset: Bit 1: Unset
"ldr h11, [x20, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
+ "add x20, x13, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v3.8h, v9.8h\n"
- "add x20, x16, x13\n"
"fmla v29.8h, v3.8h, v13.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 58f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 57f\n"
@@ -955,13 +955,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"59:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: Unset: Bit 1: Unset
"ldr h12, [x20, #0x0]\n"
"60:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
+ "add x20, x13, x16\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v4.8h, v13.8h\n"
- "add x20, x16, x11\n"
"fmla v29.8h, v4.8h, v8.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 62f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #1, 61f\n"
@@ -982,12 +982,12 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"63:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: Unset: Bit 1: Unset
"ldr h14, [x20, #0x0]\n"
"64:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v31.8h, v4.8h, v14.8h\n"
+ "add x20, x12, XZR\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v0.8h, v5.8h\n"
- "add x20, x14, XZR\n"
"fmla v29.8h, v0.8h, v6.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 66f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 65f\n"
@@ -1009,7 +1009,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"ldr h9, [x20, #0x0]\n"
"68:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 2: End
"fmla v30.8h, v0.8h, v9.8h\n"
- "add x20, x14, x2\n"
+ "add x20, x12, x2\n"
"tbz %x[n_channels], #2, 70f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #1, 69f\n"
@@ -1030,13 +1030,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"71:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: Unset: Bit 1: Unset
"ldr h13, [x20, #0x0]\n"
"72:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.8h, v0.8h, v13.8h\n"
+ "add x20, x12, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v1.8h, v6.8h\n"
- "add x20, x14, x6\n"
"fmla v29.8h, v1.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v13.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 74f\n"
"ldr d5, [x20], #0x8\n"
"tbz %x[n_channels], #1, 73f\n"
@@ -1057,13 +1057,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"75:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: Unset: Bit 1: Unset
"ldr h5, [x20, #0x0]\n"
"76:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.8h, v1.8h, v5.8h\n"
+ "add x20, x12, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v2.8h, v10.8h\n"
- "add x20, x14, x15\n"
"fmla v29.8h, v2.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v5.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 78f\n"
"ldr d6, [x20], #0x8\n"
"tbz %x[n_channels], #1, 77f\n"
@@ -1084,13 +1084,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"79:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: Unset: Bit 1: Unset
"ldr h6, [x20, #0x0]\n"
"80:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.8h, v2.8h, v6.8h\n"
+ "add x20, x12, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x14, x13\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v6.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 82f\n"
"ldr d8, [x20], #0x8\n"
"tbz %x[n_channels], #1, 81f\n"
@@ -1111,13 +1111,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"83:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: Unset: Bit 1: Unset
"ldr h8, [x20, #0x0]\n"
"84:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.8h, v3.8h, v8.8h\n"
+ "add x20, x12, x16\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "add x20, x14, x11\n"
"fmla v29.8h, v4.8h, v14.8h\n"
"fmla v30.8h, v4.8h, v8.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 86f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #1, 85f\n"
@@ -1138,12 +1138,12 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"87:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: Unset: Bit 1: Unset
"ldr h10, [x20, #0x0]\n"
"88:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 2: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
+ "add x20, x11, XZR\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v0.8h, v9.8h\n"
- "add x20, x12, XZR\n"
"fmla v29.8h, v0.8h, v13.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 90f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 89f\n"
@@ -1165,7 +1165,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"ldr h11, [x20, #0x0]\n"
"92:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 2: End
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x12, x2\n"
+ "add x20, x11, x2\n"
"tbz %x[n_channels], #2, 94f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 93f\n"
@@ -1186,13 +1186,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"95:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: Unset: Bit 1: Unset
"ldr h12, [x20, #0x0]\n"
"96:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 2: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
+ "add x20, x11, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v1.8h, v13.8h\n"
- "add x20, x12, x6\n"
"fmla v29.8h, v1.8h, v5.8h\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 98f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #1, 97f\n"
@@ -1213,13 +1213,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"99:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: Unset: Bit 1: Unset
"ldr h9, [x20, #0x0]\n"
"100:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 2: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
+ "add x20, x11, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v2.8h, v5.8h\n"
- "add x20, x12, x15\n"
"fmla v29.8h, v2.8h, v6.8h\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 102f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #1, 101f\n"
@@ -1240,13 +1240,13 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"103:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: Unset: Bit 1: Unset
"ldr h11, [x20, #0x0]\n"
"104:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 2: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
+ "add x20, x11, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.8h, v3.8h, v6.8h\n"
- "add x20, x12, x13\n"
"fmla v29.8h, v3.8h, v8.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #2, 106f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #1, 105f\n"
@@ -1267,10 +1267,10 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"107:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: Unset: Bit 1: Unset
"ldr h12, [x20, #0x0]\n"
"108:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 2: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
+ "add x20, x11, x16\n"
"fmla v28.8h, v4.8h, v8.8h\n"
- "add x20, x12, x11\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
"tbz %x[n_channels], #2, 110f\n"
@@ -1297,27 +1297,27 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmax v28.8h, v28.8h, v27.8h\n"
"fmax v29.8h, v29.8h, v27.8h\n"
"fmax v30.8h, v30.8h, v27.8h\n"
- "fmax v31.8h, v31.8h, v27.8h\n"
"fmin v28.8h, v28.8h, v15.8h\n"
+ "fmax v31.8h, v31.8h, v27.8h\n"
"fmin v29.8h, v29.8h, v15.8h\n"
"fmin v30.8h, v30.8h, v15.8h\n"
"fmin v31.8h, v31.8h, v15.8h\n"
"tbz %x[n_channels], #2, 114f\n"
"mov x21, x5\n"
"mov x20, x10\n"
- "st1 { v28.d }[0], [x21], x3\n"
- "st1 { v30.d }[0], [x20], x3\n"
"add x5, x5, #0x8\n"
"add x10, x10, #0x8\n"
+ "st1 { v28.d }[0], [x21], x3\n"
+ "st1 { v30.d }[0], [x20], x3\n"
"st1 { v29.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #1, 113f\n"
"mov x21, x5\n"
"mov x20, x10\n"
- "st1 { v28.s }[2], [x21], x3\n"
- "st1 { v30.s }[2], [x20], x3\n"
"add x5, x5, #0x4\n"
"add x10, x10, #0x4\n"
+ "st1 { v28.s }[2], [x21], x3\n"
+ "st1 { v30.s }[2], [x20], x3\n"
"st1 { v29.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"tbz %x[n_channels], #0, 116f\n"
@@ -1341,10 +1341,10 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"tbz %x[n_channels], #1, 115f\n"
"mov x21, x5\n"
"mov x20, x10\n"
- "st1 { v28.s }[0], [x21], x3\n"
- "st1 { v30.s }[0], [x20], x3\n"
"add x5, x5, #0x4\n"
"add x10, x10, #0x4\n"
+ "st1 { v28.s }[0], [x21], x3\n"
+ "st1 { v30.s }[0], [x20], x3\n"
"st1 { v29.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"tbz %x[n_channels], #0, 116f\n"
@@ -1364,16 +1364,16 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"st1 { v31.h }[0], [x20]\n"
"116:" // Tile loop: Oddments: Store: Bit 2: End
"117:" // Tile loop: End
- "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
- "csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x10, x10, #0x1\n"
+ "add x20, x11, #0x1\n"
+ "cmp x10, x22\n"
+ "csel x11, x11, x20, LT\n"
+ "csel x10, x10, XZR, LT\n"
+ "cmp x11, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 4913340c4c..c8a599b0a9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,478 +98,478 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x17, #0x10\n" // cntb _, ALL, #1
- "lsr x9, %x[n_channels], #0x3\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "mov x8, #0x10\n" // cntb _, ALL, #1
+ "lsr x17, %x[n_channels], #0x3\n"
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v27.8h }, [x20]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v27.8h }, [x21]\n"
"ld1r { v15.8h }, [x20]\n"
"add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "ldp x12, x11, [x21, #0x10]\n"
- "mov x10, #0x0\n"
- "sub x28, XZR, x17\n"
- "cbz x9, 3f\n"
+ "mov x14, #0x0\n"
+ "ldp x13, x12, [x22, #0x0]\n"
+ "ldp x11, x10, [x22, #0x10]\n"
+ "sub x9, XZR, x8\n"
+ "cbz x17, 3f\n"
"ldr q26, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "cmp x17, x9, LSL #4\n"
+ "cmp x8, x17, LSL #4\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
"add x16, x16, #0x60\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "ldr q5, [x21, x10]\n"
- "ldr q6, [x20, x10]\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q7, [x21, x10]\n"
- "ldr q8, [x20, x10]\n"
- "ldp x21, x20, [x15, #0x20]\n"
- "ldr q9, [x21, x10]\n"
- "ldr q13, [x20, x10]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ "ldp x23, x22, [x15, #0x20]\n"
"ldp x21, x20, [x15, #0x30]\n"
- "ldr q11, [x21, x10]\n"
- "ldr q12, [x20, x10]\n"
+ "ldr q5, [x27, x14]\n"
+ "ldr q6, [x26, x14]\n"
+ "ldr q7, [x25, x14]\n"
+ "ldr q8, [x24, x14]\n"
+ "ldr q9, [x23, x14]\n"
+ "ldr q13, [x22, x14]\n"
+ "ldr q11, [x21, x14]\n"
+ "ldr q12, [x20, x14]\n"
"ldp x21, x20, [x15, #0x40]\n"
- "ldr q10, [x21, x10]\n"
- "ldr q14, [x20, x10]\n"
+ "ldr q10, [x21, x14]\n"
+ "ldr q14, [x20, x14]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v30.16b, v26.16b\n fmla v30.8h, v0.8h, v5.8h\n"
"mov v31.16b, v26.16b\n fmla v31.8h, v0.8h, v6.8h\n"
- "ldr x20, [x15, #0x50]\n"
- "ldr q24, [x20, x10]\n"
- "mov v28.16b, v26.16b\n fmla v28.8h, v0.8h, v7.8h\n"
- "mov v29.16b, v26.16b\n fmla v29.8h, v0.8h, v8.8h\n"
+ "ldr x21, [x15, #0x50]\n"
+ "ldr x20, [x15, #0x58]\n"
+ "mov v29.16b, v26.16b\n fmla v29.8h, v0.8h, v7.8h\n"
+ "mov v28.16b, v26.16b\n fmla v28.8h, v0.8h, v8.8h\n"
"ldr q23, [x16, #0x0]\n"
"ldr q26, [x16, #0x140]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "ldr x25, [x15, #0x68]\n"
+ "add x9, x9, #0x10\n"
+ "ldr q22, [x21, x14]\n"
+ "ldr x24, [x15, #0x70]\n"
"fmla v30.8h, v1.8h, v6.8h\n"
+ "ldr q21, [x20, x14]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
- "ldr x20, [x15, #0x58]\n"
- "ldr q22, [x20, x10]\n"
- "fmla v28.8h, v1.8h, v8.8h\n"
- "fmla v29.8h, v1.8h, v13.8h\n"
- "ldr q21, [x16, #0x10]\n"
- "ldr x20, [x15, #0x60]\n"
+ "ldr x21, [x15, #0x78]\n"
+ "fmla v29.8h, v1.8h, v8.8h\n"
+ "fmla v28.8h, v1.8h, v13.8h\n"
+ "ldr q0, [x16, #0x10]\n"
+ "ldr x27, [x15, #0x80]\n"
+ "ldr x20, [x15, #0x88]\n"
+ "ldr x23, [x15, #0x90]\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "ldr q17, [x20, x10]\n"
+ "ldr q18, [x22, x14]\n"
+ "ldr x26, [x15, #0x98]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
- "ldr x20, [x15, #0x68]\n"
- "fmla v28.8h, v2.8h, v13.8h\n"
- "fmla v29.8h, v2.8h, v24.8h\n"
+ "fmla v29.8h, v2.8h, v13.8h\n"
+ "ldr x22, [x15, #0xa0]\n"
+ "fmla v28.8h, v2.8h, v22.8h\n"
"ldr q16, [x16, #0x20]\n"
- "ldr x22, [x15, #0x70]\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "ldr q5, [x20, x10]\n"
+ "ldr q20, [x25, x14]\n"
+ "ldr x25, [x15, #0xa8]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla v28.8h, v3.8h, v24.8h\n"
"fmla v29.8h, v3.8h, v22.8h\n"
- "ldr q20, [x16, #0x30]\n"
- "ldr x21, [x15, #0x80]\n"
+ "fmla v28.8h, v3.8h, v21.8h\n"
+ "ldr q17, [x16, #0x30]\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "ldr q19, [x22, x10]\n"
- "fmla v31.8h, v4.8h, v17.8h\n"
- "ldr q2, [x20, x10]\n"
- "fmla v28.8h, v4.8h, v22.8h\n"
- "fmla v29.8h, v4.8h, v10.8h\n"
- "ldr q18, [x16, #0x40]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr q3, [x24, x14]\n"
+ "ldr x24, [x15, #0xb0]\n"
+ "fmla v31.8h, v4.8h, v18.8h\n"
+ "ldr q2, [x21, x14]\n"
+ "ldr x21, [x15, #0xb8]\n"
+ "fmla v29.8h, v4.8h, v21.8h\n"
+ "fmla v28.8h, v4.8h, v10.8h\n"
+ "ldr q19, [x16, #0x40]\n"
"fmla v30.8h, v23.8h, v7.8h\n"
"fmla v31.8h, v23.8h, v8.8h\n"
- "ldr x23, [x15, #0x90]\n"
- "ldr x26, [x15, #0x98]\n"
- "fmla v28.8h, v23.8h, v14.8h\n"
- "fmla v29.8h, v23.8h, v5.8h\n"
- "ldr q1, [x16, #0x50]\n"
- "ldr x22, [x15, #0xa0]\n"
- "fmla v30.8h, v21.8h, v8.8h\n"
- "ldr q25, [x20, x10]\n"
- "fmla v31.8h, v21.8h, v13.8h\n"
- "ldr x25, [x15, #0xa8]\n"
- "fmla v28.8h, v21.8h, v5.8h\n"
- "fmla v29.8h, v21.8h, v19.8h\n"
- "ldr q17, [x16, #0x60]\n"
- "ldr x24, [x15, #0xb0]\n"
+ "fmla v29.8h, v23.8h, v14.8h\n"
+ "fmla v28.8h, v23.8h, v20.8h\n"
+ "ldr q18, [x16, #0x50]\n"
+ "fmla v30.8h, v0.8h, v8.8h\n"
+ "ldr q25, [x20, x14]\n"
+ "ldr x28, [x15, #0xc8]\n"
+ "fmla v31.8h, v0.8h, v13.8h\n"
+ "fmla v29.8h, v0.8h, v20.8h\n"
+ "fmla v28.8h, v0.8h, v3.8h\n"
+ "ldr q11, [x16, #0x60]\n"
"fmla v30.8h, v16.8h, v13.8h\n"
- "ldr q8, [x21, x10]\n"
- "fmla v31.8h, v16.8h, v24.8h\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla v28.8h, v16.8h, v19.8h\n"
- "fmla v29.8h, v16.8h, v2.8h\n"
+ "ldr q24, [x27, x14]\n"
+ "ldr x20, [x15, #0xc0]\n"
+ "fmla v31.8h, v16.8h, v22.8h\n"
+ "fmla v29.8h, v16.8h, v3.8h\n"
+ "fmla v28.8h, v16.8h, v2.8h\n"
"ldr q16, [x16, #0x70]\n"
- "ldr x21, [x15, #0xc0]\n"
- "fmla v30.8h, v20.8h, v24.8h\n"
- "ldr q24, [x23, x10]\n"
- "fmla v31.8h, v20.8h, v22.8h\n"
- "ldr x27, [x15, #0xc8]\n"
- "fmla v28.8h, v20.8h, v2.8h\n"
- "fmla v29.8h, v20.8h, v8.8h\n"
- "ldr q23, [x16, #0x80]\n"
+ "fmla v30.8h, v17.8h, v22.8h\n"
+ "ldr q5, [x23, x14]\n"
"ldr x23, [x15, #0xd0]\n"
- "fmla v30.8h, v18.8h, v22.8h\n"
- "ldr q22, [x26, x10]\n"
- "fmla v31.8h, v18.8h, v10.8h\n"
- "ldr q21, [x22, x10]\n"
- "fmla v28.8h, v18.8h, v8.8h\n"
- "fmla v29.8h, v18.8h, v25.8h\n"
- "ldr q20, [x16, #0x90]\n"
- "ldr x22, [x15, #0xd8]\n"
- "fmla v30.8h, v1.8h, v14.8h\n"
- "ldr q0, [x20, x10]\n"
- "fmla v31.8h, v1.8h, v5.8h\n"
- "ldr x20, [x15, #0xe0]\n"
- "fmla v28.8h, v1.8h, v24.8h\n"
- "fmla v29.8h, v1.8h, v22.8h\n"
- "ldr q6, [x16, #0xa0]\n"
+ "fmla v31.8h, v17.8h, v21.8h\n"
+ "fmla v29.8h, v17.8h, v2.8h\n"
+ "fmla v28.8h, v17.8h, v24.8h\n"
+ "ldr q17, [x16, #0x80]\n"
+ "fmla v30.8h, v19.8h, v21.8h\n"
+ "ldr q23, [x26, x14]\n"
+ "ldr x27, [x15, #0xd8]\n"
+ "fmla v31.8h, v19.8h, v10.8h\n"
+ "ldr q22, [x22, x14]\n"
+ "ldr x22, [x15, #0xe0]\n"
+ "fmla v29.8h, v19.8h, v24.8h\n"
+ "fmla v28.8h, v19.8h, v25.8h\n"
+ "ldr q21, [x16, #0x90]\n"
+ "fmla v30.8h, v18.8h, v14.8h\n"
+ "ldr q1, [x21, x14]\n"
"ldr x26, [x15, #0xf8]\n"
- "fmla v30.8h, v17.8h, v5.8h\n"
- "ldr q1, [x25, x10]\n"
- "fmla v31.8h, v17.8h, v19.8h\n"
+ "fmla v31.8h, v18.8h, v20.8h\n"
+ "fmla v29.8h, v18.8h, v5.8h\n"
+ "fmla v28.8h, v18.8h, v23.8h\n"
+ "ldr q12, [x16, #0xa0]\n"
+ "fmla v30.8h, v11.8h, v20.8h\n"
+ "ldr q0, [x25, x14]\n"
"ldr x25, [x15, #0xe8]\n"
- "fmla v28.8h, v17.8h, v22.8h\n"
- "fmla v29.8h, v17.8h, v21.8h\n"
- "ldr q18, [x16, #0xb0]\n"
- "add x28, x28, #0x10\n"
- "fmla v30.8h, v16.8h, v19.8h\n"
- "ldr q19, [x24, x10]\n"
- "fmla v31.8h, v16.8h, v2.8h\n"
+ "fmla v31.8h, v11.8h, v3.8h\n"
+ "fmla v29.8h, v11.8h, v23.8h\n"
+ "fmla v28.8h, v11.8h, v22.8h\n"
+ "ldr q20, [x16, #0xb0]\n"
+ "fmla v30.8h, v16.8h, v3.8h\n"
+ "ldr q19, [x24, x14]\n"
"ldr x24, [x15, #0xf0]\n"
- "fmla v28.8h, v16.8h, v21.8h\n"
- "fmla v29.8h, v16.8h, v1.8h\n"
- "ldr q17, [x16, #0xc0]\n"
- "fmla v30.8h, v23.8h, v2.8h\n"
- "ldr q16, [x21, x10]\n"
- "fmla v31.8h, v23.8h, v8.8h\n"
+ "fmla v31.8h, v16.8h, v2.8h\n"
+ "fmla v29.8h, v16.8h, v22.8h\n"
+ "fmla v28.8h, v16.8h, v0.8h\n"
+ "ldr q18, [x16, #0xc0]\n"
+ "fmla v30.8h, v17.8h, v2.8h\n"
+ "ldr q16, [x20, x14]\n"
"ldr x21, [x15, #0x100]\n"
- "fmla v28.8h, v23.8h, v1.8h\n"
- "fmla v29.8h, v23.8h, v19.8h\n"
- "ldr q13, [x16, #0xd0]\n"
- "fmla v30.8h, v20.8h, v8.8h\n"
- "ldr q2, [x27, x10]\n"
- "fmla v31.8h, v20.8h, v25.8h\n"
- "ldr q10, [x20, x10]\n"
- "fmla v28.8h, v20.8h, v19.8h\n"
- "fmla v29.8h, v20.8h, v0.8h\n"
- "ldr q9, [x16, #0xe0]\n"
+ "fmla v31.8h, v17.8h, v24.8h\n"
+ "fmla v29.8h, v17.8h, v0.8h\n"
+ "fmla v28.8h, v17.8h, v19.8h\n"
+ "ldr q17, [x16, #0xd0]\n"
+ "fmla v30.8h, v21.8h, v24.8h\n"
+ "ldr q14, [x28, x14]\n"
"ldr x20, [x15, #0x108]\n"
- "fmla v30.8h, v6.8h, v24.8h\n"
- "ldr q5, [x23, x10]\n"
- "fmla v31.8h, v6.8h, v22.8h\n"
+ "fmla v31.8h, v21.8h, v25.8h\n"
+ "ldr q4, [x22, x14]\n"
+ "fmla v29.8h, v21.8h, v19.8h\n"
+ "fmla v28.8h, v21.8h, v1.8h\n"
+ "ldr q7, [x16, #0xe0]\n"
+ "fmla v30.8h, v12.8h, v5.8h\n"
+ "ldr q25, [x23, x14]\n"
"ldr x23, [x15, #0x110]\n"
- "fmla v28.8h, v6.8h, v16.8h\n"
- "fmla v29.8h, v6.8h, v2.8h\n"
- "ldr q24, [x16, #0xf0]\n"
- "fmla v30.8h, v18.8h, v22.8h\n"
- "ldr q25, [x22, x10]\n"
- "fmla v31.8h, v18.8h, v21.8h\n"
+ "fmla v31.8h, v12.8h, v23.8h\n"
+ "fmla v29.8h, v12.8h, v16.8h\n"
+ "fmla v28.8h, v12.8h, v14.8h\n"
+ "ldr q11, [x16, #0xf0]\n"
+ "fmla v30.8h, v20.8h, v23.8h\n"
+ "ldr q24, [x27, x14]\n"
"ldr x22, [x15, #0x118]\n"
- "fmla v28.8h, v18.8h, v2.8h\n"
- "fmla v29.8h, v18.8h, v5.8h\n"
+ "fmla v31.8h, v20.8h, v22.8h\n"
+ "fmla v29.8h, v20.8h, v14.8h\n"
+ "fmla v28.8h, v20.8h, v25.8h\n"
"ldr q23, [x16, #0x100]\n"
- "fmla v30.8h, v17.8h, v21.8h\n"
- "ldr q22, [x25, x10]\n"
- "fmla v31.8h, v17.8h, v1.8h\n"
- "fmla v28.8h, v17.8h, v5.8h\n"
- "fmla v29.8h, v17.8h, v25.8h\n"
+ "fmla v30.8h, v18.8h, v22.8h\n"
+ "ldr q22, [x25, x14]\n"
+ "fmla v31.8h, v18.8h, v0.8h\n"
+ "fmla v29.8h, v18.8h, v25.8h\n"
+ "fmla v28.8h, v18.8h, v24.8h\n"
"ldr q21, [x16, #0x110]\n"
- "fmla v30.8h, v13.8h, v1.8h\n"
- "ldr q18, [x24, x10]\n"
- "fmla v31.8h, v13.8h, v19.8h\n"
- "fmla v28.8h, v13.8h, v25.8h\n"
- "fmla v29.8h, v13.8h, v10.8h\n"
+ "fmla v30.8h, v17.8h, v0.8h\n"
+ "ldr q18, [x24, x14]\n"
+ "fmla v31.8h, v17.8h, v19.8h\n"
+ "fmla v29.8h, v17.8h, v24.8h\n"
+ "fmla v28.8h, v17.8h, v4.8h\n"
"ldr q20, [x16, #0x120]\n"
- "fmla v30.8h, v9.8h, v19.8h\n"
- "ldr q17, [x26, x10]\n"
- "fmla v31.8h, v9.8h, v0.8h\n"
- "fmla v28.8h, v9.8h, v10.8h\n"
- "fmla v29.8h, v9.8h, v22.8h\n"
+ "fmla v30.8h, v7.8h, v19.8h\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v31.8h, v7.8h, v1.8h\n"
+ "fmla v29.8h, v7.8h, v4.8h\n"
+ "fmla v28.8h, v7.8h, v22.8h\n"
"ldr q19, [x16, #0x130]\n"
- "fmla v30.8h, v24.8h, v16.8h\n"
- "ldr q16, [x21, x10]\n"
- "fmla v31.8h, v24.8h, v2.8h\n"
- "fmla v28.8h, v24.8h, v18.8h\n"
- "ldr q18, [x20, x10]\n"
- "fmla v29.8h, v24.8h, v17.8h\n"
+ "fmla v30.8h, v11.8h, v16.8h\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v31.8h, v11.8h, v14.8h\n"
+ "fmla v29.8h, v11.8h, v18.8h\n"
+ "ldr q18, [x20, x14]\n"
+ "ldp x20, x21, [x15, #0x0]\n"
+ "fmla v28.8h, v11.8h, v17.8h\n"
"ldr q0, [x16, #0x150]\n"
- "fmla v30.8h, v23.8h, v2.8h\n"
- "fmla v31.8h, v23.8h, v5.8h\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "fmla v28.8h, v23.8h, v17.8h\n"
- "ldr q17, [x23, x10]\n"
- "fmla v29.8h, v23.8h, v16.8h\n"
+ "fmla v30.8h, v23.8h, v14.8h\n"
+ "fmla v31.8h, v23.8h, v25.8h\n"
+ "fmla v29.8h, v23.8h, v17.8h\n"
+ "ldr q17, [x23, x14]\n"
+ "fmla v28.8h, v23.8h, v16.8h\n"
"ldr q1, [x16, #0x160]\n"
- "fmla v30.8h, v21.8h, v5.8h\n"
- "ldr q5, [x21, x17]\n"
- "fmla v31.8h, v21.8h, v25.8h\n"
- "fmla v28.8h, v21.8h, v16.8h\n"
- "ldr q16, [x22, x10]\n"
- "fmla v29.8h, v21.8h, v18.8h\n"
+ "fmla v30.8h, v21.8h, v25.8h\n"
+ "ldr q5, [x20, x8]\n"
+ "fmla v31.8h, v21.8h, v24.8h\n"
+ "fmla v29.8h, v21.8h, v16.8h\n"
+ "ldr q16, [x22, x14]\n"
+ "ldp x20, x26, [x15, #0x10]\n"
+ "ldp x25, x24, [x15, #0x20]\n"
+ "ldp x23, x22, [x15, #0x30]\n"
+ "add x14, x14, #0x10\n"
+ "ldr q7, [x20, x8]\n"
+ "fmla v28.8h, v21.8h, v18.8h\n"
"ldr q2, [x16, #0x170]\n"
- "fmla v30.8h, v20.8h, v25.8h\n"
- "ldr q6, [x20, x17]\n"
- "fmla v31.8h, v20.8h, v10.8h\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q7, [x21, x17]\n"
- "fmla v28.8h, v20.8h, v18.8h\n"
- "fmla v29.8h, v20.8h, v17.8h\n"
+ "fmla v30.8h, v20.8h, v24.8h\n"
+ "ldr q6, [x21, x8]\n"
+ "ldp x21, x20, [x15, #0x40]\n"
+ "ldr q13, [x24, x8]\n"
+ "fmla v31.8h, v20.8h, v4.8h\n"
+ "fmla v29.8h, v20.8h, v18.8h\n"
+ "ldr q11, [x23, x8]\n"
+ "ldr q14, [x20, x8]\n"
+ "fmla v28.8h, v20.8h, v17.8h\n"
"ldr q3, [x16, #0x180]\n"
- "fmla v30.8h, v19.8h, v10.8h\n"
- "ldr q8, [x20, x17]\n"
+ "fmla v30.8h, v19.8h, v4.8h\n"
+ "ldr q8, [x26, x8]\n"
"fmla v31.8h, v19.8h, v22.8h\n"
- "ldp x21, x20, [x15, #0x20]\n"
- "ldr q13, [x20, x17]\n"
- "fmla v28.8h, v19.8h, v17.8h\n"
- "fmla v29.8h, v19.8h, v16.8h\n"
- "ldr q9, [x21, x17]\n"
+ "ldr q10, [x21, x8]\n"
+ "fmla v29.8h, v19.8h, v17.8h\n"
+ "ldr q12, [x22, x8]\n"
+ "fmla v28.8h, v19.8h, v16.8h\n"
+ "ldr q9, [x25, x8]\n"
+ "add x8, x8, #0x10\n"
"ldr q4, [x16, #0x190]\n"
- "ldp x21, x20, [x15, #0x30]\n"
+ "cmp x8, x17, LSL #4\n"
+ "add x16, x16, #0x1a0\n"
"fmax v30.8h, v30.8h, v27.8h\n"
"fmax v31.8h, v31.8h, v27.8h\n"
- "ldr q11, [x21, x17]\n"
- "ldr q12, [x20, x17]\n"
- "fmax v28.8h, v28.8h, v27.8h\n"
"fmax v29.8h, v29.8h, v27.8h\n"
- "ldp x21, x20, [x15, #0x40]\n"
- "ldr q10, [x21, x17]\n"
+ "fmax v28.8h, v28.8h, v27.8h\n"
"fmin v30.8h, v30.8h, v15.8h\n"
"fmin v31.8h, v31.8h, v15.8h\n"
- "ldr q14, [x20, x17]\n"
- "add x17, x17, #0x10\n"
- "cmp x17, x9, LSL #4\n"
- "fmin v28.8h, v28.8h, v15.8h\n"
"fmin v29.8h, v29.8h, v15.8h\n"
- "add x10, x10, #0x10\n"
- "str q30, [x14, x28]\n"
- "add x16, x16, #0x1a0\n"
- "str q31, [x13, x28]\n"
- "str q28, [x12, x28]\n"
- "str q29, [x11, x28]\n"
+ "fmin v28.8h, v28.8h, v15.8h\n"
+ "str q30, [x13, x9]\n"
+ "str q31, [x12, x9]\n"
+ "str q29, [x11, x9]\n"
+ "str q28, [x10, x9]\n"
"blt 1b\n"
"2:" // Channel tail
"mov v31.16b, v26.16b\n fmla v31.8h, v0.8h, v5.8h\n"
"mov v5.16b, v26.16b\n fmla v5.8h, v0.8h, v6.8h\n"
- "ldr x20, [x15, #0x50]\n"
- "ldr q22, [x20, x10]\n"
+ "ldr x22, [x15, #0x50]\n"
+ "ldr x21, [x15, #0x58]\n"
"mov v30.16b, v26.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v29.16b, v26.16b\n fmla v29.8h, v0.8h, v8.8h\n"
"ldr q19, [x16, #0x0]\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x20, [x15, #0x60]\n"
+ "ldr x25, [x15, #0x68]\n"
+ "ldr x24, [x15, #0x70]\n"
+ "add x9, x9, #0x10\n"
+ "ldr q22, [x22, x14]\n"
+ "ldr x23, [x15, #0x78]\n"
"fmla v31.8h, v1.8h, v6.8h\n"
- "ldr q21, [x20, x10]\n"
+ "ldr q21, [x21, x14]\n"
"fmla v5.8h, v1.8h, v9.8h\n"
- "ldr x21, [x15, #0x60]\n"
+ "ldr x27, [x15, #0x80]\n"
"fmla v30.8h, v1.8h, v8.8h\n"
"fmla v29.8h, v1.8h, v13.8h\n"
"ldr q18, [x16, #0x10]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x22, [x15, #0x88]\n"
+ "ldr x21, [x15, #0x90]\n"
+ "ldr x26, [x15, #0x98]\n"
"fmla v31.8h, v2.8h, v9.8h\n"
- "ldr q16, [x21, x10]\n"
+ "ldr q16, [x20, x14]\n"
+ "ldr x20, [x15, #0xa0]\n"
"fmla v5.8h, v2.8h, v11.8h\n"
- "ldr x23, [x15, #0x70]\n"
"fmla v30.8h, v2.8h, v13.8h\n"
"fmla v29.8h, v2.8h, v22.8h\n"
"ldr q17, [x16, #0x20]\n"
- "ldr x21, [x15, #0x78]\n"
"fmla v31.8h, v3.8h, v11.8h\n"
- "ldr q6, [x20, x10]\n"
+ "ldr q6, [x25, x14]\n"
+ "ldr x25, [x15, #0xa8]\n"
"fmla v5.8h, v3.8h, v12.8h\n"
- "ldr x22, [x15, #0x80]\n"
"fmla v30.8h, v3.8h, v22.8h\n"
"fmla v29.8h, v3.8h, v21.8h\n"
"ldr q20, [x16, #0x30]\n"
- "ldr x20, [x15, #0x88]\n"
"fmla v31.8h, v4.8h, v12.8h\n"
- "ldr q2, [x23, x10]\n"
+ "ldr q2, [x24, x14]\n"
+ "ldr x24, [x15, #0xb0]\n"
"fmla v5.8h, v4.8h, v16.8h\n"
- "ldr q28, [x21, x10]\n"
+ "ldr q28, [x23, x14]\n"
+ "ldr x23, [x15, #0xb8]\n"
"fmla v30.8h, v4.8h, v21.8h\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"ldr q16, [x16, #0x40]\n"
- "ldr x21, [x15, #0x90]\n"
"fmla v31.8h, v19.8h, v7.8h\n"
"fmla v5.8h, v19.8h, v8.8h\n"
- "ldr x27, [x15, #0x98]\n"
- "ldr x26, [x15, #0xa0]\n"
"fmla v30.8h, v19.8h, v14.8h\n"
"fmla v29.8h, v19.8h, v6.8h\n"
"ldr q19, [x16, #0x50]\n"
- "ldr x25, [x15, #0xa8]\n"
"fmla v31.8h, v18.8h, v8.8h\n"
- "ldr q1, [x20, x10]\n"
+ "ldr q1, [x22, x14]\n"
+ "ldr x28, [x15, #0xc8]\n"
"fmla v5.8h, v18.8h, v13.8h\n"
- "ldr x24, [x15, #0xb0]\n"
"fmla v30.8h, v18.8h, v6.8h\n"
"fmla v29.8h, v18.8h, v2.8h\n"
"ldr q18, [x16, #0x60]\n"
- "ldr x20, [x15, #0xb8]\n"
"fmla v31.8h, v17.8h, v13.8h\n"
- "ldr q26, [x22, x10]\n"
+ "ldr q26, [x27, x14]\n"
+ "ldr x22, [x15, #0xc0]\n"
"fmla v5.8h, v17.8h, v22.8h\n"
- "ldr x23, [x15, #0xc0]\n"
"fmla v30.8h, v17.8h, v2.8h\n"
"fmla v29.8h, v17.8h, v28.8h\n"
"ldr q17, [x16, #0x70]\n"
- "ldr x22, [x15, #0xc8]\n"
"fmla v31.8h, v20.8h, v22.8h\n"
- "ldr q25, [x21, x10]\n"
- "fmla v5.8h, v20.8h, v21.8h\n"
+ "ldr q25, [x21, x14]\n"
"ldr x21, [x15, #0xd0]\n"
+ "fmla v5.8h, v20.8h, v21.8h\n"
"fmla v30.8h, v20.8h, v28.8h\n"
"fmla v29.8h, v20.8h, v26.8h\n"
"ldr q24, [x16, #0x80]\n"
- "add x28, x28, #0x10\n"
"fmla v31.8h, v16.8h, v21.8h\n"
- "ldr q23, [x27, x10]\n"
+ "ldr q23, [x26, x14]\n"
+ "ldr x27, [x15, #0xd8]\n"
"fmla v5.8h, v16.8h, v10.8h\n"
- "ldr q0, [x26, x10]\n"
+ "ldr q0, [x20, x14]\n"
+ "ldr x20, [x15, #0xe0]\n"
"fmla v30.8h, v16.8h, v26.8h\n"
"fmla v29.8h, v16.8h, v1.8h\n"
"ldr q22, [x16, #0x90]\n"
- "ldr x27, [x15, #0xd8]\n"
"fmla v31.8h, v19.8h, v14.8h\n"
- "ldr q16, [x20, x10]\n"
+ "ldr q16, [x23, x14]\n"
+ "ldr x26, [x15, #0xf8]\n"
"fmla v5.8h, v19.8h, v6.8h\n"
- "ldr x20, [x15, #0xe0]\n"
"fmla v30.8h, v19.8h, v25.8h\n"
"fmla v29.8h, v19.8h, v23.8h\n"
"ldr q21, [x16, #0xa0]\n"
- "ldr x26, [x15, #0xf8]\n"
"fmla v31.8h, v18.8h, v6.8h\n"
- "ldr q20, [x25, x10]\n"
- "fmla v5.8h, v18.8h, v2.8h\n"
+ "ldr q20, [x25, x14]\n"
"ldr x25, [x15, #0xe8]\n"
+ "fmla v5.8h, v18.8h, v2.8h\n"
"fmla v30.8h, v18.8h, v23.8h\n"
"fmla v29.8h, v18.8h, v0.8h\n"
"ldr q18, [x16, #0xb0]\n"
"fmla v31.8h, v17.8h, v2.8h\n"
- "ldr q19, [x24, x10]\n"
- "fmla v5.8h, v17.8h, v28.8h\n"
+ "ldr q19, [x24, x14]\n"
"ldr x24, [x15, #0xf0]\n"
+ "fmla v5.8h, v17.8h, v28.8h\n"
"fmla v30.8h, v17.8h, v0.8h\n"
"fmla v29.8h, v17.8h, v20.8h\n"
"ldr q17, [x16, #0xc0]\n"
"fmla v31.8h, v24.8h, v28.8h\n"
- "ldr q7, [x23, x10]\n"
- "fmla v5.8h, v24.8h, v26.8h\n"
+ "ldr q10, [x22, x14]\n"
"ldr x23, [x15, #0x100]\n"
+ "fmla v5.8h, v24.8h, v26.8h\n"
"fmla v30.8h, v24.8h, v20.8h\n"
"fmla v29.8h, v24.8h, v19.8h\n"
- "ldr q3, [x16, #0xd0]\n"
+ "ldr q13, [x16, #0xd0]\n"
"fmla v31.8h, v22.8h, v26.8h\n"
- "ldr q28, [x22, x10]\n"
+ "ldr q28, [x28, x14]\n"
+ "ldr x22, [x15, #0x108]\n"
"fmla v5.8h, v22.8h, v1.8h\n"
- "ldr q13, [x20, x10]\n"
+ "ldr q14, [x20, x14]\n"
"fmla v30.8h, v22.8h, v19.8h\n"
"fmla v29.8h, v22.8h, v16.8h\n"
- "ldr q11, [x16, #0xe0]\n"
- "ldr x22, [x15, #0x108]\n"
+ "ldr q12, [x16, #0xe0]\n"
"fmla v31.8h, v21.8h, v25.8h\n"
- "ldr q26, [x21, x10]\n"
- "fmla v5.8h, v21.8h, v23.8h\n"
+ "ldr q26, [x21, x14]\n"
"ldr x21, [x15, #0x110]\n"
- "fmla v30.8h, v21.8h, v7.8h\n"
+ "fmla v5.8h, v21.8h, v23.8h\n"
+ "fmla v30.8h, v21.8h, v10.8h\n"
"fmla v29.8h, v21.8h, v28.8h\n"
"ldr q25, [x16, #0xf0]\n"
"fmla v31.8h, v18.8h, v23.8h\n"
- "ldr q24, [x27, x10]\n"
- "fmla v5.8h, v18.8h, v0.8h\n"
+ "ldr q24, [x27, x14]\n"
"ldr x20, [x15, #0x118]\n"
+ "fmla v5.8h, v18.8h, v0.8h\n"
"fmla v30.8h, v18.8h, v28.8h\n"
"fmla v29.8h, v18.8h, v26.8h\n"
"ldr q23, [x16, #0x100]\n"
"fmla v31.8h, v17.8h, v0.8h\n"
- "ldr q22, [x25, x10]\n"
+ "ldr q22, [x25, x14]\n"
"fmla v5.8h, v17.8h, v20.8h\n"
"fmla v30.8h, v17.8h, v26.8h\n"
"fmla v29.8h, v17.8h, v24.8h\n"
"ldr q21, [x16, #0x110]\n"
- "fmla v31.8h, v3.8h, v20.8h\n"
- "ldr q18, [x24, x10]\n"
- "fmla v5.8h, v3.8h, v19.8h\n"
- "fmla v30.8h, v3.8h, v24.8h\n"
- "fmla v29.8h, v3.8h, v13.8h\n"
+ "fmla v31.8h, v13.8h, v20.8h\n"
+ "ldr q18, [x24, x14]\n"
+ "fmla v5.8h, v13.8h, v19.8h\n"
+ "fmla v30.8h, v13.8h, v24.8h\n"
+ "fmla v29.8h, v13.8h, v14.8h\n"
"ldr q20, [x16, #0x120]\n"
- "fmla v31.8h, v11.8h, v19.8h\n"
- "ldr q17, [x26, x10]\n"
- "fmla v5.8h, v11.8h, v16.8h\n"
- "fmla v30.8h, v11.8h, v13.8h\n"
- "fmla v29.8h, v11.8h, v22.8h\n"
+ "fmla v31.8h, v12.8h, v19.8h\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v5.8h, v12.8h, v16.8h\n"
+ "fmla v30.8h, v12.8h, v14.8h\n"
+ "fmla v29.8h, v12.8h, v22.8h\n"
"ldr q19, [x16, #0x130]\n"
"add x16, x16, #0x140\n"
- "fmla v31.8h, v25.8h, v7.8h\n"
- "ldr q16, [x23, x10]\n"
+ "fmla v31.8h, v25.8h, v10.8h\n"
+ "ldr q16, [x23, x14]\n"
"fmla v5.8h, v25.8h, v28.8h\n"
"fmla v30.8h, v25.8h, v18.8h\n"
- "ldr q18, [x22, x10]\n"
+ "ldr q18, [x22, x14]\n"
"fmla v29.8h, v25.8h, v17.8h\n"
"fmla v31.8h, v23.8h, v28.8h\n"
"fmla v5.8h, v23.8h, v26.8h\n"
"fmla v30.8h, v23.8h, v17.8h\n"
- "ldr q17, [x21, x10]\n"
+ "ldr q17, [x21, x14]\n"
"fmla v29.8h, v23.8h, v16.8h\n"
"fmla v31.8h, v21.8h, v26.8h\n"
"fmla v5.8h, v21.8h, v24.8h\n"
"fmla v30.8h, v21.8h, v16.8h\n"
- "ldr q16, [x20, x10]\n"
+ "ldr q16, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
"fmla v29.8h, v21.8h, v18.8h\n"
- "add x10, x10, #0x10\n"
"fmla v31.8h, v20.8h, v24.8h\n"
- "fmla v5.8h, v20.8h, v13.8h\n"
+ "fmla v5.8h, v20.8h, v14.8h\n"
"fmla v30.8h, v20.8h, v18.8h\n"
"fmla v29.8h, v20.8h, v17.8h\n"
- "fmla v31.8h, v19.8h, v13.8h\n"
+ "fmla v31.8h, v19.8h, v14.8h\n"
"fmla v5.8h, v19.8h, v22.8h\n"
- "fmax v31.8h, v31.8h, v27.8h\n"
"fmla v30.8h, v19.8h, v17.8h\n"
"fmla v29.8h, v19.8h, v16.8h\n"
+ "fmax v31.8h, v31.8h, v27.8h\n"
"fmax v5.8h, v5.8h, v27.8h\n"
"fmax v30.8h, v30.8h, v27.8h\n"
- "fmax v29.8h, v29.8h, v27.8h\n"
"fmin v31.8h, v31.8h, v15.8h\n"
+ "fmax v29.8h, v29.8h, v27.8h\n"
"fmin v5.8h, v5.8h, v15.8h\n"
- "str q31, [x14, x28]\n"
"fmin v30.8h, v30.8h, v15.8h\n"
"fmin v29.8h, v29.8h, v15.8h\n"
- "str q5, [x13, x28]\n"
- "str q30, [x12, x28]\n"
- "str q29, [x11, x28]\n"
+ "str q31, [x13, x9]\n"
+ "str q5, [x12, x9]\n"
+ "str q30, [x11, x9]\n"
+ "str q29, [x10, x9]\n"
"3:" // Oddments
"tst %x[n_channels], #0x7\n"
"beq 116f\n"
"ldr q26, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "mov x20, x10\n"
- "add x14, x14, x20\n"
+ "mov x20, x14\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
- "add x13, x13, x20\n"
- "add x12, x12, x20\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
- "add x11, x11, x20\n"
"ldr x9, [x15, #0x0]\n"
"ldr x28, [x15, #0x8]\n"
- "add x9, x9, x10\n"
- "add x28, x28, x10\n"
+ "add x13, x13, x20\n"
+ "add x12, x12, x20\n"
"ldr x27, [x15, #0x10]\n"
"ldr x26, [x15, #0x18]\n"
- "add x27, x27, x10\n"
- "add x26, x26, x10\n"
+ "add x11, x11, x20\n"
+ "add x10, x10, x20\n"
"ldr x25, [x15, #0x20]\n"
"ldr x24, [x15, #0x28]\n"
- "add x25, x25, x10\n"
- "add x24, x24, x10\n"
"ldr x23, [x15, #0x30]\n"
"ldr x22, [x15, #0x38]\n"
- "add x23, x23, x10\n"
- "add x22, x22, x10\n"
+ "add x9, x9, x14\n"
+ "add x28, x28, x14\n"
"ldr x21, [x15, #0x40]\n"
"ldr x20, [x15, #0x48]\n"
- "add x21, x21, x10\n"
- "add x20, x20, x10\n"
+ "add x27, x27, x14\n"
+ "add x26, x26, x14\n"
+ "add x25, x25, x14\n"
+ "add x24, x24, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"add x16, x16, #0x60\n"
"tbz %x[n_channels], #2, 5f\n"
"ld1 { v5.d }[0], [x9], #0x8\n"
@@ -657,9 +657,9 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"mov v28.16b, v26.16b\n fmla v28.8h, v0.8h, v5.8h\n"
"mov v29.16b, v26.16b\n fmla v29.8h, v0.8h, v6.8h\n"
"ldr x20, [x15, #0x50]\n"
- "add x20, x20, x10\n"
"mov v30.16b, v26.16b\n fmla v30.8h, v0.8h, v7.8h\n"
"mov v31.16b, v26.16b\n fmla v31.8h, v0.8h, v8.8h\n"
+ "add x20, x20, x14\n"
"fmla v28.8h, v1.8h, v6.8h\n"
"fmla v29.8h, v1.8h, v9.8h\n"
"fmla v30.8h, v1.8h, v8.8h\n"
@@ -690,9 +690,9 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x58]\n"
"fmla v31.8h, v2.8h, v5.8h\n"
"fmla v28.8h, v3.8h, v11.8h\n"
- "add x20, x20, x10\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v5.8h\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 13f\n"
"ld1 { v6.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 12f\n"
@@ -716,7 +716,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x60]\n"
"fmla v31.8h, v3.8h, v6.8h\n"
"fmla v28.8h, v4.8h, v12.8h\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 17f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
@@ -742,11 +742,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v30.8h, v4.8h, v6.8h\n"
"ldr x20, [x15, #0x68]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v0.8h, v7.8h\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"fmla v29.8h, v0.8h, v8.8h\n"
"fmla v30.8h, v0.8h, v14.8h\n"
- "add x16, x16, #0x10\n"
"tbz %x[n_channels], #2, 21f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 20f\n"
@@ -770,11 +770,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0x70]\n"
"fmla v31.8h, v0.8h, v11.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v1.8h, v8.8h\n"
"fmla v29.8h, v1.8h, v13.8h\n"
"fmla v30.8h, v1.8h, v11.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 25f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 24f\n"
@@ -798,11 +798,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0x78]\n"
"fmla v31.8h, v1.8h, v12.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v2.8h, v13.8h\n"
"fmla v29.8h, v2.8h, v5.8h\n"
"fmla v30.8h, v2.8h, v12.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 29f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 28f\n"
@@ -826,11 +826,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0x80]\n"
"fmla v31.8h, v2.8h, v9.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v3.8h, v5.8h\n"
"fmla v29.8h, v3.8h, v6.8h\n"
"fmla v30.8h, v3.8h, v9.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 33f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 32f\n"
@@ -854,11 +854,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q4, [x16, #0x0]\n"
"ldr x20, [x15, #0x88]\n"
"fmla v31.8h, v3.8h, v13.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v4.8h, v6.8h\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v13.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 37f\n"
"ld1 { v8.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 36f\n"
@@ -882,10 +882,10 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q0, [x16, #0x0]\n"
"ldr x20, [x15, #0x90]\n"
"fmla v31.8h, v4.8h, v8.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v0.8h, v14.8h\n"
"fmla v29.8h, v0.8h, v11.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 41f\n"
"ld1 { v5.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 40f\n"
@@ -908,7 +908,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"43:" // Oddments: Load input (3, 0): Bit 2: End
"ldr x20, [x15, #0x98]\n"
"fmla v30.8h, v0.8h, v5.8h\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 45f\n"
"ld1 { v6.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 44f\n"
@@ -932,11 +932,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0xa0]\n"
"fmla v31.8h, v0.8h, v6.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v1.8h, v11.8h\n"
"fmla v29.8h, v1.8h, v12.8h\n"
"fmla v30.8h, v1.8h, v6.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 49f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 48f\n"
@@ -960,11 +960,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0xa8]\n"
"fmla v31.8h, v1.8h, v10.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v2.8h, v12.8h\n"
"fmla v29.8h, v2.8h, v9.8h\n"
"fmla v30.8h, v2.8h, v10.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 53f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 52f\n"
@@ -988,11 +988,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0xb0]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v3.8h, v9.8h\n"
"fmla v29.8h, v3.8h, v13.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 57f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 56f\n"
@@ -1016,11 +1016,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q4, [x16, #0x0]\n"
"ldr x20, [x15, #0xb8]\n"
"fmla v31.8h, v3.8h, v12.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v4.8h, v13.8h\n"
"fmla v29.8h, v4.8h, v8.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 61f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 60f\n"
@@ -1044,10 +1044,10 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q0, [x16, #0x0]\n"
"ldr x20, [x15, #0xc0]\n"
"fmla v31.8h, v4.8h, v14.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v0.8h, v5.8h\n"
"fmla v29.8h, v0.8h, v6.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 65f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 64f\n"
@@ -1070,7 +1070,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"67:" // Oddments: Load input (4, 0): Bit 2: End
"ldr x20, [x15, #0xc8]\n"
"fmla v30.8h, v0.8h, v9.8h\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 69f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 68f\n"
@@ -1094,11 +1094,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0xd0]\n"
"fmla v31.8h, v0.8h, v13.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v1.8h, v6.8h\n"
"fmla v29.8h, v1.8h, v10.8h\n"
"fmla v30.8h, v1.8h, v13.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 73f\n"
"ld1 { v5.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 72f\n"
@@ -1122,11 +1122,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0xd8]\n"
"fmla v31.8h, v1.8h, v5.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v2.8h, v10.8h\n"
"fmla v29.8h, v2.8h, v11.8h\n"
"fmla v30.8h, v2.8h, v5.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 77f\n"
"ld1 { v6.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 76f\n"
@@ -1150,11 +1150,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0xe0]\n"
"fmla v31.8h, v2.8h, v6.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v3.8h, v11.8h\n"
"fmla v29.8h, v3.8h, v12.8h\n"
"fmla v30.8h, v3.8h, v6.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 81f\n"
"ld1 { v8.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 80f\n"
@@ -1178,11 +1178,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q4, [x16, #0x0]\n"
"ldr x20, [x15, #0xe8]\n"
"fmla v31.8h, v3.8h, v8.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v4.8h, v12.8h\n"
"fmla v29.8h, v4.8h, v14.8h\n"
"fmla v30.8h, v4.8h, v8.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 85f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 84f\n"
@@ -1206,10 +1206,10 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q0, [x16, #0x0]\n"
"ldr x20, [x15, #0xf0]\n"
"fmla v31.8h, v4.8h, v10.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v0.8h, v9.8h\n"
"fmla v29.8h, v0.8h, v13.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 89f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 88f\n"
@@ -1232,7 +1232,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"91:" // Oddments: Load input (5, 0): Bit 2: End
"ldr x20, [x15, #0xf8]\n"
"fmla v30.8h, v0.8h, v11.8h\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 93f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 92f\n"
@@ -1256,11 +1256,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0x100]\n"
"fmla v31.8h, v0.8h, v12.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v1.8h, v13.8h\n"
"fmla v29.8h, v1.8h, v5.8h\n"
"fmla v30.8h, v1.8h, v12.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 97f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 96f\n"
@@ -1284,11 +1284,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0x108]\n"
"fmla v31.8h, v1.8h, v9.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v2.8h, v5.8h\n"
"fmla v29.8h, v2.8h, v6.8h\n"
"fmla v30.8h, v2.8h, v9.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 101f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 100f\n"
@@ -1312,11 +1312,11 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0x110]\n"
"fmla v31.8h, v2.8h, v11.8h\n"
+ "add x16, x16, #0x10\n"
"fmla v28.8h, v3.8h, v6.8h\n"
"fmla v29.8h, v3.8h, v8.8h\n"
"fmla v30.8h, v3.8h, v11.8h\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 105f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 104f\n"
@@ -1343,7 +1343,7 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v28.8h, v4.8h, v8.8h\n"
"fmla v29.8h, v4.8h, v10.8h\n"
"fmla v30.8h, v4.8h, v12.8h\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #2, 109f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #1, 108f\n"
@@ -1368,56 +1368,56 @@ void a64_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmax v28.8h, v28.8h, v27.8h\n"
"fmax v29.8h, v29.8h, v27.8h\n"
"fmax v30.8h, v30.8h, v27.8h\n"
- "fmax v31.8h, v31.8h, v27.8h\n"
"fmin v28.8h, v28.8h, v15.8h\n"
+ "fmax v31.8h, v31.8h, v27.8h\n"
"fmin v29.8h, v29.8h, v15.8h\n"
"fmin v30.8h, v30.8h, v15.8h\n"
"fmin v31.8h, v31.8h, v15.8h\n"
"tbz %x[n_channels], #2, 113f\n"
- "st1 { v28.d }[0], [x14], #0x8\n"
- "st1 { v29.d }[0], [x13], #0x8\n"
- "st1 { v30.d }[0], [x12], #0x8\n"
- "st1 { v31.d }[0], [x11], #0x8\n"
+ "st1 { v28.d }[0], [x13], #0x8\n"
+ "st1 { v29.d }[0], [x12], #0x8\n"
+ "st1 { v30.d }[0], [x11], #0x8\n"
+ "st1 { v31.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #1, 112f\n"
- "st1 { v28.s }[2], [x14], #0x4\n"
- "st1 { v29.s }[2], [x13], #0x4\n"
- "st1 { v30.s }[2], [x12], #0x4\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
+ "st1 { v28.s }[2], [x13], #0x4\n"
+ "st1 { v29.s }[2], [x12], #0x4\n"
+ "st1 { v30.s }[2], [x11], #0x4\n"
+ "st1 { v31.s }[2], [x10], #0x4\n"
"tbz %x[n_channels], #0, 115f\n"
- "st1 { v28.h }[6], [x14], #0x2\n"
- "st1 { v29.h }[6], [x13], #0x2\n"
- "st1 { v30.h }[6], [x12], #0x2\n"
- "st1 { v31.h }[6], [x11], #0x2\n"
+ "st1 { v28.h }[6], [x13], #0x2\n"
+ "st1 { v29.h }[6], [x12], #0x2\n"
+ "st1 { v30.h }[6], [x11], #0x2\n"
+ "st1 { v31.h }[6], [x10], #0x2\n"
"b 115f\n"
"112:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 115f\n"
- "st1 { v28.h }[4], [x14], #0x2\n"
- "st1 { v29.h }[4], [x13], #0x2\n"
- "st1 { v30.h }[4], [x12], #0x2\n"
- "st1 { v31.h }[4], [x11], #0x2\n"
+ "st1 { v28.h }[4], [x13], #0x2\n"
+ "st1 { v29.h }[4], [x12], #0x2\n"
+ "st1 { v30.h }[4], [x11], #0x2\n"
+ "st1 { v31.h }[4], [x10], #0x2\n"
"b 115f\n"
"113:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 114f\n"
- "st1 { v28.s }[0], [x14], #0x4\n"
- "st1 { v29.s }[0], [x13], #0x4\n"
- "st1 { v30.s }[0], [x12], #0x4\n"
- "st1 { v31.s }[0], [x11], #0x4\n"
+ "st1 { v28.s }[0], [x13], #0x4\n"
+ "st1 { v29.s }[0], [x12], #0x4\n"
+ "st1 { v30.s }[0], [x11], #0x4\n"
+ "st1 { v31.s }[0], [x10], #0x4\n"
"tbz %x[n_channels], #0, 115f\n"
- "st1 { v28.h }[2], [x14], #0x2\n"
- "st1 { v29.h }[2], [x13], #0x2\n"
- "st1 { v30.h }[2], [x12], #0x2\n"
- "st1 { v31.h }[2], [x11], #0x2\n"
+ "st1 { v28.h }[2], [x13], #0x2\n"
+ "st1 { v29.h }[2], [x12], #0x2\n"
+ "st1 { v30.h }[2], [x11], #0x2\n"
+ "st1 { v31.h }[2], [x10], #0x2\n"
"b 115f\n"
"114:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
- "st1 { v28.h }[0], [x14], #0x2\n"
- "st1 { v29.h }[0], [x13], #0x2\n"
- "st1 { v30.h }[0], [x12], #0x2\n"
- "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v28.h }[0], [x13], #0x2\n"
+ "st1 { v29.h }[0], [x12], #0x2\n"
+ "st1 { v30.h }[0], [x11], #0x2\n"
+ "st1 { v31.h }[0], [x10], #0x2\n"
"115:" // Oddments: Store: Bit 2: End
"116:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 08f40b785f..e4c8793b75 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,97 +56,97 @@ void a64_fp16_nhwc_generic_output9_mla_depthfirst_impl(
"ldr q23, [%x[bias], x11]\n"
"2:" // Channel loop: Load bias: Done
"ldr q0, [%x[params], #0x0]\n"
- "mov x26, %x[inptrs]\n"
- "ldp x21, x20, [x26], #0x10\n"
- "subs x25, %x[n_points], #0x1\n"
- "ldr q14, [x21, x11]\n"
- "ldr q15, [x20, x11]\n"
+ "mov x23, %x[inptrs]\n"
+ "subs x22, %x[n_points], #0x1\n"
"mov v24.16b, v23.16b\n"
"mov v25.16b, v23.16b\n"
- "ldp x21, x20, [x26], #0x10\n"
- "ldr q16, [x21, x11]\n"
"mov v26.16b, v23.16b\n"
+ "add %x[params], %x[params], #0x10\n"
"mov v27.16b, v23.16b\n"
- "ldr q17, [x20, x11]\n"
- "ldp x21, x20, [x26], #0x10\n"
"mov v28.16b, v23.16b\n"
+ "ldp x21, x20, [x23], #0x10\n"
"mov v29.16b, v23.16b\n"
- "ldr q18, [x21, x11]\n"
- "ldr q19, [x20, x11]\n"
"mov v30.16b, v23.16b\n"
"mov v31.16b, v23.16b\n"
- "ldp x21, x20, [x26], #0x10\n"
+ "ldr q14, [x21, x11]\n"
+ "ldr q15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q16, [x21, x11]\n"
+ "ldr q17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q18, [x21, x11]\n"
+ "ldr q19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"ldr q20, [x21, x11]\n"
- "add %x[params], %x[params], #0x10\n"
"ldr q21, [x20, x11]\n"
- "ldr x20, [x26], #0x8\n"
+ "ldr x20, [x23], #0x8\n"
"ldr q22, [x20, x11]\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x20, x24, [x26], #0x10\n"
- "ldp x23, x22, [x26], #0x10\n"
- "subs x25, x25, #0x1\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "subs x22, x22, #0x1\n"
"fmla v23.8h, v14.8h, v0.8h\n"
- "ldr q14, [x20, x11]\n"
- "ldp x21, x20, [x26], #0x10\n"
"fmla v24.8h, v15.8h, v0.8h\n"
"fmla v25.8h, v16.8h, v0.8h\n"
- "ldr q15, [x24, x11]\n"
- "ldr q16, [x23, x11]\n"
"fmla v26.8h, v17.8h, v0.8h\n"
"fmla v27.8h, v18.8h, v0.8h\n"
- "ldr q17, [x22, x11]\n"
- "ldr q18, [x21, x11]\n"
"fmla v28.8h, v19.8h, v0.8h\n"
+ "ldr q14, [x21, x11]\n"
+ "ldr q15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"fmla v29.8h, v20.8h, v0.8h\n"
- "ldr q19, [x20, x11]\n"
- "ldp x21, x20, [x26], #0x10\n"
"fmla v30.8h, v21.8h, v0.8h\n"
"fmla v31.8h, v22.8h, v0.8h\n"
"ldr q0, [%x[params], #0x0]\n"
- "ldr q20, [x21, x11]\n"
"add %x[params], %x[params], #0x10\n"
+ "ldr q16, [x21, x11]\n"
+ "ldr q17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q18, [x21, x11]\n"
+ "ldr q19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q20, [x21, x11]\n"
"ldr q21, [x20, x11]\n"
- "ldr x20, [x26], #0x8\n"
+ "ldr x20, [x23], #0x8\n"
"ldr q22, [x20, x11]\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
"fmla v23.8h, v14.8h, v0.8h\n"
"fmla v24.8h, v15.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v2.8h\n"
"ldp x28, x27, [%x[outptrs], #0x0]\n"
+ "ldp x26, x25, [%x[outptrs], #0x10]\n"
"fmla v25.8h, v16.8h, v0.8h\n"
"fmla v26.8h, v17.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v2.8h\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
+ "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmla v27.8h, v18.8h, v0.8h\n"
"fmla v28.8h, v19.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v2.8h\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
"fmla v29.8h, v20.8h, v0.8h\n"
"fmla v30.8h, v21.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v2.8h\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmla v31.8h, v22.8h, v0.8h\n"
+ "fmax v23.8h, v23.8h, v2.8h\n"
+ "fmax v24.8h, v24.8h, v2.8h\n"
+ "fmax v25.8h, v25.8h, v2.8h\n"
+ "fmax v26.8h, v26.8h, v2.8h\n"
"fmax v27.8h, v27.8h, v2.8h\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
"fmax v28.8h, v28.8h, v2.8h\n"
"fmax v29.8h, v29.8h, v2.8h\n"
"fmax v30.8h, v30.8h, v2.8h\n"
"fmax v31.8h, v31.8h, v2.8h\n"
"fmin v23.8h, v23.8h, v1.8h\n"
"fmin v24.8h, v24.8h, v1.8h\n"
- "str q23, [x28, x11]\n"
"fmin v25.8h, v25.8h, v1.8h\n"
"fmin v26.8h, v26.8h, v1.8h\n"
- "str q24, [x27, x11]\n"
"fmin v27.8h, v27.8h, v1.8h\n"
"fmin v28.8h, v28.8h, v1.8h\n"
- "str q25, [x26, x11]\n"
"fmin v29.8h, v29.8h, v1.8h\n"
"fmin v30.8h, v30.8h, v1.8h\n"
- "str q26, [x25, x11]\n"
+ "str q23, [x28, x11]\n"
"fmin v31.8h, v31.8h, v1.8h\n"
+ "str q24, [x27, x11]\n"
+ "str q25, [x26, x11]\n"
+ "str q26, [x25, x11]\n"
"str q27, [x24, x11]\n"
"str q28, [x23, x11]\n"
"str q29, [x22, x11]\n"
@@ -184,29 +184,29 @@ void a64_fp16_nhwc_generic_output9_mla_depthfirst_impl(
"10:" // Oddments: Load bias: Done
"ldr q0, [%x[params], #0x0]\n"
"mov x10, %x[inptrs]\n"
- "ldp x9, x28, [x10], #0x10\n"
"mov v24.16b, v23.16b\n"
- "ldp x27, x26, [x10], #0x10\n"
- "ldp x25, x24, [x10], #0x10\n"
"mov v25.16b, v23.16b\n"
"mov v26.16b, v23.16b\n"
- "ldp x23, x22, [x10], #0x10\n"
- "ldr x21, [x10], #0x8\n"
"mov v27.16b, v23.16b\n"
+ "add %x[params], %x[params], #0x10\n"
"mov v28.16b, v23.16b\n"
"mov v29.16b, v23.16b\n"
+ "ldp x9, x28, [x10], #0x10\n"
"mov v30.16b, v23.16b\n"
+ "mov v31.16b, v23.16b\n"
+ "ldp x27, x26, [x10], #0x10\n"
"add x9, x9, x11\n"
"add x28, x28, x11\n"
- "mov v31.16b, v23.16b\n"
+ "ldp x25, x24, [x10], #0x10\n"
"add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
- "add %x[params], %x[params], #0x10\n"
"tbz %x[n_channels], #2, 12f\n"
"ldr d14, [x9], #0x8\n"
"ldr d15, [x28], #0x8\n"
@@ -287,30 +287,30 @@ void a64_fp16_nhwc_generic_output9_mla_depthfirst_impl(
"ble 20f\n"
"15:" // Oddments: Planar loop
"ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
"fmla v23.8h, v14.8h, v0.8h\n"
"fmla v24.8h, v15.8h, v0.8h\n"
- "ldp x25, x24, [x10], #0x10\n"
- "ldp x23, x22, [x10], #0x10\n"
"fmla v25.8h, v16.8h, v0.8h\n"
"fmla v26.8h, v17.8h, v0.8h\n"
- "ldr x21, [x10], #0x8\n"
"fmla v27.8h, v18.8h, v0.8h\n"
"fmla v28.8h, v19.8h, v0.8h\n"
- "add x9, x9, x11\n"
+ "ldp x27, x26, [x10], #0x10\n"
"fmla v29.8h, v20.8h, v0.8h\n"
"fmla v30.8h, v21.8h, v0.8h\n"
+ "add x9, x9, x11\n"
"add x28, x28, x11\n"
- "add x27, x27, x11\n"
"fmla v31.8h, v22.8h, v0.8h\n"
"ldr q0, [%x[params], #0x0]\n"
+ "add %x[params], %x[params], #0x10\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
- "add %x[params], %x[params], #0x10\n"
"tbz %x[n_channels], #2, 17f\n"
"ldr d14, [x9], #0x8\n"
"ldr d15, [x28], #0x8\n"
@@ -392,40 +392,40 @@ void a64_fp16_nhwc_generic_output9_mla_depthfirst_impl(
"20:" // Oddments: Planar tail
"fmla v23.8h, v14.8h, v0.8h\n"
"fmla v24.8h, v15.8h, v0.8h\n"
- "fmax v23.8h, v23.8h, v2.8h\n"
"ldp x28, x27, [%x[outptrs], #0x0]\n"
+ "ldp x26, x25, [%x[outptrs], #0x10]\n"
"fmla v25.8h, v16.8h, v0.8h\n"
"fmla v26.8h, v17.8h, v0.8h\n"
- "fmax v24.8h, v24.8h, v2.8h\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
+ "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmla v27.8h, v18.8h, v0.8h\n"
"fmla v28.8h, v19.8h, v0.8h\n"
- "fmax v25.8h, v25.8h, v2.8h\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
"fmla v29.8h, v20.8h, v0.8h\n"
"fmla v30.8h, v21.8h, v0.8h\n"
- "fmax v26.8h, v26.8h, v2.8h\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
+ "add x28, x28, x11\n"
+ "add x27, x27, x11\n"
"fmla v31.8h, v22.8h, v0.8h\n"
+ "fmax v23.8h, v23.8h, v2.8h\n"
+ "add x26, x26, x11\n"
+ "add x25, x25, x11\n"
+ "fmax v24.8h, v24.8h, v2.8h\n"
+ "fmax v25.8h, v25.8h, v2.8h\n"
+ "add x24, x24, x11\n"
+ "add x23, x23, x11\n"
+ "fmax v26.8h, v26.8h, v2.8h\n"
"fmax v27.8h, v27.8h, v2.8h\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "add x28, x28, x11\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"fmax v28.8h, v28.8h, v2.8h\n"
"fmax v29.8h, v29.8h, v2.8h\n"
- "add x27, x27, x11\n"
- "add x26, x26, x11\n"
+ "add x20, x20, x11\n"
"fmax v30.8h, v30.8h, v2.8h\n"
"fmax v31.8h, v31.8h, v2.8h\n"
- "add x25, x25, x11\n"
- "add x24, x24, x11\n"
"fmin v23.8h, v23.8h, v1.8h\n"
"fmin v24.8h, v24.8h, v1.8h\n"
- "add x23, x23, x11\n"
- "add x22, x22, x11\n"
"fmin v25.8h, v25.8h, v1.8h\n"
"fmin v26.8h, v26.8h, v1.8h\n"
- "add x21, x21, x11\n"
- "add x20, x20, x11\n"
"fmin v27.8h, v27.8h, v1.8h\n"
"fmin v28.8h, v28.8h, v1.8h\n"
"fmin v29.8h, v29.8h, v1.8h\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index cee3fb59c5..d3a2e06453 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,20 +58,20 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"2:" // Output channel loop: Load bias: Done
"ldr q6, [%x[weights], #0x0]\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
- "ldr q1, [x21, #0x0]\n"
- "ldr q0, [x20, #0x0]\n"
"mov v16.16b, v31.16b\n"
"mov v17.16b, v31.16b\n"
"mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
"add %x[weights], %x[weights], #0x10\n"
+ "mov v19.16b, v31.16b\n"
"mov v20.16b, v31.16b\n"
+ "ldp x21, x20, [x22], #0x10\n"
"mov v21.16b, v31.16b\n"
"mov v22.16b, v31.16b\n"
"mov v23.16b, v31.16b\n"
"mov v24.16b, v31.16b\n"
+ "ldr q1, [x21, #0x0]\n"
+ "ldr q0, [x20, #0x0]\n"
"mov v25.16b, v31.16b\n"
"mov v26.16b, v31.16b\n"
"mov v27.16b, v31.16b\n"
@@ -160,71 +160,71 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"fmla v31.8h, v6.8h, v0.h[7]\n"
"fmla v16.8h, v5.8h, v4.h[0]\n"
"fmla v17.8h, v5.8h, v4.h[1]\n"
- "fmin v16.8h, v16.8h, v7.8h\n"
"fmla v18.8h, v5.8h, v4.h[2]\n"
"fmla v19.8h, v5.8h, v4.h[3]\n"
- "fmin v17.8h, v17.8h, v7.8h\n"
"fmla v20.8h, v5.8h, v4.h[4]\n"
"fmla v21.8h, v5.8h, v4.h[5]\n"
- "fmin v18.8h, v18.8h, v7.8h\n"
"fmla v22.8h, v5.8h, v4.h[6]\n"
"fmla v23.8h, v5.8h, v4.h[7]\n"
- "fmin v19.8h, v19.8h, v7.8h\n"
"fmla v24.8h, v5.8h, v3.h[0]\n"
"fmla v25.8h, v5.8h, v3.h[1]\n"
- "fmin v20.8h, v20.8h, v7.8h\n"
+ "fmin v16.8h, v16.8h, v7.8h\n"
"fmla v26.8h, v5.8h, v3.h[2]\n"
"fmla v27.8h, v5.8h, v3.h[3]\n"
- "fmin v21.8h, v21.8h, v7.8h\n"
+ "fmin v17.8h, v17.8h, v7.8h\n"
"fmla v28.8h, v5.8h, v3.h[4]\n"
"fmla v29.8h, v5.8h, v3.h[5]\n"
- "fmin v22.8h, v22.8h, v7.8h\n"
+ "fmin v18.8h, v18.8h, v7.8h\n"
"fmla v30.8h, v5.8h, v3.h[6]\n"
"fmla v31.8h, v5.8h, v3.h[7]\n"
+ "fmin v19.8h, v19.8h, v7.8h\n"
+ "fmin v20.8h, v20.8h, v7.8h\n"
+ "fmin v21.8h, v21.8h, v7.8h\n"
+ "fmin v22.8h, v22.8h, v7.8h\n"
"fmin v23.8h, v23.8h, v7.8h\n"
"fmax v16.8h, v16.8h, v8.8h\n"
"fmax v17.8h, v17.8h, v8.8h\n"
- "str q16, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"fmax v18.8h, v18.8h, v8.8h\n"
"fmax v19.8h, v19.8h, v8.8h\n"
- "str q17, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"fmax v20.8h, v20.8h, v8.8h\n"
"fmax v21.8h, v21.8h, v8.8h\n"
- "str q18, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"fmax v22.8h, v22.8h, v8.8h\n"
"fmax v23.8h, v23.8h, v8.8h\n"
- "str q19, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
+ "str q16, [x27, x28]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"fmin v24.8h, v24.8h, v7.8h\n"
"fmin v25.8h, v25.8h, v7.8h\n"
- "str q20, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q17, [x26, x28]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"fmin v26.8h, v26.8h, v7.8h\n"
"fmin v27.8h, v27.8h, v7.8h\n"
- "str q21, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
+ "str q18, [x25, x28]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"fmin v28.8h, v28.8h, v7.8h\n"
"fmin v29.8h, v29.8h, v7.8h\n"
- "str q22, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
+ "str q19, [x24, x28]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"fmin v30.8h, v30.8h, v7.8h\n"
"fmin v31.8h, v31.8h, v7.8h\n"
- "str q23, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
+ "str q20, [x23, x28]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q21, [x22, x28]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"fmax v24.8h, v24.8h, v8.8h\n"
"fmax v25.8h, v25.8h, v8.8h\n"
- "str q24, [x27, x28]\n"
+ "str q22, [x21, x28]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"fmax v26.8h, v26.8h, v8.8h\n"
"fmax v27.8h, v27.8h, v8.8h\n"
- "str q25, [x26, x28]\n"
+ "str q23, [x20, x28]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"fmax v28.8h, v28.8h, v8.8h\n"
"fmax v29.8h, v29.8h, v8.8h\n"
- "str q26, [x25, x28]\n"
"fmax v30.8h, v30.8h, v8.8h\n"
"fmax v31.8h, v31.8h, v8.8h\n"
+ "str q24, [x27, x28]\n"
+ "str q25, [x26, x28]\n"
+ "str q26, [x25, x28]\n"
"str q27, [x24, x28]\n"
"str q28, [x23, x28]\n"
"str q29, [x22, x28]\n"
@@ -280,71 +280,71 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"fmla v31.8h, v5.8h, v3.h[7]\n"
"fmla v16.8h, v1.8h, v2.h[0]\n"
"fmla v17.8h, v1.8h, v2.h[1]\n"
- "fmin v16.8h, v16.8h, v7.8h\n"
"fmla v18.8h, v1.8h, v2.h[2]\n"
"fmla v19.8h, v1.8h, v2.h[3]\n"
- "fmin v17.8h, v17.8h, v7.8h\n"
"fmla v20.8h, v1.8h, v2.h[4]\n"
"fmla v21.8h, v1.8h, v2.h[5]\n"
- "fmin v18.8h, v18.8h, v7.8h\n"
"fmla v22.8h, v1.8h, v2.h[6]\n"
"fmla v23.8h, v1.8h, v2.h[7]\n"
- "fmin v19.8h, v19.8h, v7.8h\n"
"fmla v24.8h, v1.8h, v0.h[0]\n"
"fmla v25.8h, v1.8h, v0.h[1]\n"
- "fmin v20.8h, v20.8h, v7.8h\n"
+ "fmin v16.8h, v16.8h, v7.8h\n"
"fmla v26.8h, v1.8h, v0.h[2]\n"
"fmla v27.8h, v1.8h, v0.h[3]\n"
- "fmin v21.8h, v21.8h, v7.8h\n"
+ "fmin v17.8h, v17.8h, v7.8h\n"
"fmla v28.8h, v1.8h, v0.h[4]\n"
"fmla v29.8h, v1.8h, v0.h[5]\n"
- "fmin v22.8h, v22.8h, v7.8h\n"
+ "fmin v18.8h, v18.8h, v7.8h\n"
"fmla v30.8h, v1.8h, v0.h[6]\n"
"fmla v31.8h, v1.8h, v0.h[7]\n"
+ "fmin v19.8h, v19.8h, v7.8h\n"
+ "fmin v20.8h, v20.8h, v7.8h\n"
+ "fmin v21.8h, v21.8h, v7.8h\n"
+ "fmin v22.8h, v22.8h, v7.8h\n"
"fmin v23.8h, v23.8h, v7.8h\n"
"fmax v16.8h, v16.8h, v8.8h\n"
"fmax v17.8h, v17.8h, v8.8h\n"
- "str q16, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"fmax v18.8h, v18.8h, v8.8h\n"
"fmax v19.8h, v19.8h, v8.8h\n"
- "str q17, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"fmax v20.8h, v20.8h, v8.8h\n"
"fmax v21.8h, v21.8h, v8.8h\n"
- "str q18, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"fmax v22.8h, v22.8h, v8.8h\n"
"fmax v23.8h, v23.8h, v8.8h\n"
- "str q19, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
+ "str q16, [x27, x28]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"fmin v24.8h, v24.8h, v7.8h\n"
"fmin v25.8h, v25.8h, v7.8h\n"
- "str q20, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q17, [x26, x28]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"fmin v26.8h, v26.8h, v7.8h\n"
"fmin v27.8h, v27.8h, v7.8h\n"
- "str q21, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
+ "str q18, [x25, x28]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"fmin v28.8h, v28.8h, v7.8h\n"
"fmin v29.8h, v29.8h, v7.8h\n"
- "str q22, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
+ "str q19, [x24, x28]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"fmin v30.8h, v30.8h, v7.8h\n"
"fmin v31.8h, v31.8h, v7.8h\n"
- "str q23, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
+ "str q20, [x23, x28]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q21, [x22, x28]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"fmax v24.8h, v24.8h, v8.8h\n"
"fmax v25.8h, v25.8h, v8.8h\n"
- "str q24, [x27, x28]\n"
+ "str q22, [x21, x28]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"fmax v26.8h, v26.8h, v8.8h\n"
"fmax v27.8h, v27.8h, v8.8h\n"
- "str q25, [x26, x28]\n"
+ "str q23, [x20, x28]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"fmax v28.8h, v28.8h, v8.8h\n"
"fmax v29.8h, v29.8h, v8.8h\n"
- "str q26, [x25, x28]\n"
"fmax v30.8h, v30.8h, v8.8h\n"
"fmax v31.8h, v31.8h, v8.8h\n"
+ "str q24, [x27, x28]\n"
+ "str q25, [x26, x28]\n"
+ "str q26, [x25, x28]\n"
"str q27, [x24, x28]\n"
"str q28, [x23, x28]\n"
"str q29, [x22, x28]\n"
@@ -354,80 +354,80 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"6:" // Output channel loop: Single kernel point
"fmla v16.8h, v6.8h, v1.h[0]\n"
"fmla v17.8h, v6.8h, v1.h[1]\n"
- "fmin v16.8h, v16.8h, v7.8h\n"
"lsl x28, x10, #0x1\n"
+ "ldr x27, [%x[outptrs], #0x0]\n"
"fmla v18.8h, v6.8h, v1.h[2]\n"
"fmla v19.8h, v6.8h, v1.h[3]\n"
- "fmin v17.8h, v17.8h, v7.8h\n"
- "ldr x27, [%x[outptrs], #0x0]\n"
+ "ldr x26, [%x[outptrs], #0x8]\n"
+ "ldr x25, [%x[outptrs], #0x10]\n"
"fmla v20.8h, v6.8h, v1.h[4]\n"
"fmla v21.8h, v6.8h, v1.h[5]\n"
- "fmin v18.8h, v18.8h, v7.8h\n"
- "ldr x26, [%x[outptrs], #0x8]\n"
+ "ldr x24, [%x[outptrs], #0x18]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"fmla v22.8h, v6.8h, v1.h[6]\n"
"fmla v23.8h, v6.8h, v1.h[7]\n"
- "fmin v19.8h, v19.8h, v7.8h\n"
- "ldr x25, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
"fmla v24.8h, v6.8h, v0.h[0]\n"
"fmla v25.8h, v6.8h, v0.h[1]\n"
- "fmin v20.8h, v20.8h, v7.8h\n"
- "ldr x24, [%x[outptrs], #0x18]\n"
+ "fmin v16.8h, v16.8h, v7.8h\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
"fmla v26.8h, v6.8h, v0.h[2]\n"
"fmla v27.8h, v6.8h, v0.h[3]\n"
- "fmin v21.8h, v21.8h, v7.8h\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmin v17.8h, v17.8h, v7.8h\n"
"fmla v28.8h, v6.8h, v0.h[4]\n"
"fmla v29.8h, v6.8h, v0.h[5]\n"
- "fmin v22.8h, v22.8h, v7.8h\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
+ "fmin v18.8h, v18.8h, v7.8h\n"
"fmla v30.8h, v6.8h, v0.h[6]\n"
"fmla v31.8h, v6.8h, v0.h[7]\n"
+ "fmin v19.8h, v19.8h, v7.8h\n"
+ "fmin v20.8h, v20.8h, v7.8h\n"
+ "fmin v21.8h, v21.8h, v7.8h\n"
+ "fmin v22.8h, v22.8h, v7.8h\n"
"fmin v23.8h, v23.8h, v7.8h\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"fmax v16.8h, v16.8h, v8.8h\n"
"fmax v17.8h, v17.8h, v8.8h\n"
- "str q16, [x27, x28]\n"
"fmax v18.8h, v18.8h, v8.8h\n"
"fmax v19.8h, v19.8h, v8.8h\n"
- "str q17, [x26, x28]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"fmax v20.8h, v20.8h, v8.8h\n"
"fmax v21.8h, v21.8h, v8.8h\n"
- "str q18, [x25, x28]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"fmax v22.8h, v22.8h, v8.8h\n"
"fmax v23.8h, v23.8h, v8.8h\n"
- "str q19, [x24, x28]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
+ "str q16, [x27, x28]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"fmin v24.8h, v24.8h, v7.8h\n"
"fmin v25.8h, v25.8h, v7.8h\n"
- "str q20, [x23, x28]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
+ "str q17, [x26, x28]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"fmin v26.8h, v26.8h, v7.8h\n"
"fmin v27.8h, v27.8h, v7.8h\n"
- "str q21, [x22, x28]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q18, [x25, x28]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"fmin v28.8h, v28.8h, v7.8h\n"
"fmin v29.8h, v29.8h, v7.8h\n"
- "str q22, [x21, x28]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
+ "str q19, [x24, x28]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"fmin v30.8h, v30.8h, v7.8h\n"
"fmin v31.8h, v31.8h, v7.8h\n"
- "str q23, [x20, x28]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
+ "str q20, [x23, x28]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q21, [x22, x28]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"fmax v24.8h, v24.8h, v8.8h\n"
"fmax v25.8h, v25.8h, v8.8h\n"
- "str q24, [x27, x28]\n"
+ "str q22, [x21, x28]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"fmax v26.8h, v26.8h, v8.8h\n"
"fmax v27.8h, v27.8h, v8.8h\n"
- "str q25, [x26, x28]\n"
+ "str q23, [x20, x28]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"fmax v28.8h, v28.8h, v8.8h\n"
"fmax v29.8h, v29.8h, v8.8h\n"
- "str q26, [x25, x28]\n"
"fmax v30.8h, v30.8h, v8.8h\n"
"fmax v31.8h, v31.8h, v8.8h\n"
+ "str q24, [x27, x28]\n"
+ "str q25, [x26, x28]\n"
+ "str q26, [x25, x28]\n"
"str q27, [x24, x28]\n"
"str q28, [x23, x28]\n"
"str q29, [x22, x28]\n"
@@ -466,20 +466,20 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"13:" // Output channel oddments: Load bias: Done
"ldr q6, [%x[weights], #0x0]\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
- "ldr q1, [x21, #0x0]\n"
- "ldr q0, [x20, #0x0]\n"
"mov v16.16b, v31.16b\n"
"mov v17.16b, v31.16b\n"
"mov v18.16b, v31.16b\n"
- "mov v19.16b, v31.16b\n"
"add %x[weights], %x[weights], #0x10\n"
+ "mov v19.16b, v31.16b\n"
"mov v20.16b, v31.16b\n"
+ "ldp x21, x20, [x22], #0x10\n"
"mov v21.16b, v31.16b\n"
"mov v22.16b, v31.16b\n"
"mov v23.16b, v31.16b\n"
"mov v24.16b, v31.16b\n"
+ "ldr q1, [x21, #0x0]\n"
+ "ldr q0, [x20, #0x0]\n"
"mov v25.16b, v31.16b\n"
"mov v26.16b, v31.16b\n"
"mov v27.16b, v31.16b\n"
@@ -682,47 +682,47 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #2, 20f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #1\n"
- "add x26, x26, x10, LSL #1\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #1\n"
- "add x24, x24, x10, LSL #1\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #1\n"
- "add x22, x22, x10, LSL #1\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #1\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v16.d }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v17.d }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #1\n"
+ "add x21, x21, x10, LSL #1\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v18.d }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #1\n"
"st1 { v19.d }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
"st1 { v20.d }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
"st1 { v21.d }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
"st1 { v22.d }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v23.d }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #1\n"
- "add x10, x10, #0x4\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v24.d }[0], [x27]\n"
+ "add x21, x21, x10, LSL #1\n"
"st1 { v25.d }[0], [x26]\n"
+ "add x20, x20, x10, LSL #1\n"
+ "add x10, x10, #0x4\n"
"st1 { v26.d }[0], [x25]\n"
"st1 { v27.d }[0], [x24]\n"
"st1 { v28.d }[0], [x23]\n"
@@ -732,47 +732,47 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #1, 19f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #1\n"
- "add x26, x26, x10, LSL #1\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #1\n"
- "add x24, x24, x10, LSL #1\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #1\n"
- "add x22, x22, x10, LSL #1\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #1\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v16.s }[2], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v17.s }[2], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #1\n"
+ "add x21, x21, x10, LSL #1\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v18.s }[2], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #1\n"
"st1 { v19.s }[2], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
"st1 { v20.s }[2], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
"st1 { v21.s }[2], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
"st1 { v22.s }[2], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v23.s }[2], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #1\n"
- "add x10, x10, #0x2\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v24.s }[2], [x27]\n"
+ "add x21, x21, x10, LSL #1\n"
"st1 { v25.s }[2], [x26]\n"
+ "add x20, x20, x10, LSL #1\n"
+ "add x10, x10, #0x2\n"
"st1 { v26.s }[2], [x25]\n"
"st1 { v27.s }[2], [x24]\n"
"st1 { v28.s }[2], [x23]\n"
@@ -782,46 +782,46 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #0, 22f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #1\n"
- "add x26, x26, x10, LSL #1\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #1\n"
- "add x24, x24, x10, LSL #1\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #1\n"
- "add x22, x22, x10, LSL #1\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #1\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v16.h }[6], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v17.h }[6], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #1\n"
+ "add x21, x21, x10, LSL #1\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v18.h }[6], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #1\n"
"st1 { v19.h }[6], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
"st1 { v20.h }[6], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
"st1 { v21.h }[6], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
"st1 { v22.h }[6], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v23.h }[6], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v24.h }[6], [x27]\n"
+ "add x21, x21, x10, LSL #1\n"
"st1 { v25.h }[6], [x26]\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v26.h }[6], [x25]\n"
"st1 { v27.h }[6], [x24]\n"
"st1 { v28.h }[6], [x23]\n"
@@ -833,46 +833,46 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #0, 22f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #1\n"
- "add x26, x26, x10, LSL #1\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #1\n"
- "add x24, x24, x10, LSL #1\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #1\n"
- "add x22, x22, x10, LSL #1\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #1\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v16.h }[4], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v17.h }[4], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #1\n"
+ "add x21, x21, x10, LSL #1\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v18.h }[4], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #1\n"
"st1 { v19.h }[4], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
"st1 { v20.h }[4], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
"st1 { v21.h }[4], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
"st1 { v22.h }[4], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v23.h }[4], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v24.h }[4], [x27]\n"
+ "add x21, x21, x10, LSL #1\n"
"st1 { v25.h }[4], [x26]\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v26.h }[4], [x25]\n"
"st1 { v27.h }[4], [x24]\n"
"st1 { v28.h }[4], [x23]\n"
@@ -884,47 +884,47 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #1, 21f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #1\n"
- "add x26, x26, x10, LSL #1\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #1\n"
- "add x24, x24, x10, LSL #1\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #1\n"
- "add x22, x22, x10, LSL #1\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #1\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v16.s }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v17.s }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #1\n"
+ "add x21, x21, x10, LSL #1\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v18.s }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #1\n"
"st1 { v19.s }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
"st1 { v20.s }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
"st1 { v21.s }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
"st1 { v22.s }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v23.s }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #1\n"
- "add x10, x10, #0x2\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v24.s }[0], [x27]\n"
+ "add x21, x21, x10, LSL #1\n"
"st1 { v25.s }[0], [x26]\n"
+ "add x20, x20, x10, LSL #1\n"
+ "add x10, x10, #0x2\n"
"st1 { v26.s }[0], [x25]\n"
"st1 { v27.s }[0], [x24]\n"
"st1 { v28.s }[0], [x23]\n"
@@ -934,46 +934,46 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #0, 22f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #1\n"
- "add x26, x26, x10, LSL #1\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #1\n"
- "add x24, x24, x10, LSL #1\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #1\n"
- "add x22, x22, x10, LSL #1\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #1\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v16.h }[2], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v17.h }[2], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #1\n"
+ "add x21, x21, x10, LSL #1\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v18.h }[2], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #1\n"
"st1 { v19.h }[2], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
"st1 { v20.h }[2], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
"st1 { v21.h }[2], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
"st1 { v22.h }[2], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v23.h }[2], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v24.h }[2], [x27]\n"
+ "add x21, x21, x10, LSL #1\n"
"st1 { v25.h }[2], [x26]\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v26.h }[2], [x25]\n"
"st1 { v27.h }[2], [x24]\n"
"st1 { v28.h }[2], [x23]\n"
@@ -984,46 +984,46 @@ void a64_fp16_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"21:" // Output channel oddments: Done: Store: Bit 2: Unset: Bit 1: Unset
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #1\n"
- "add x26, x26, x10, LSL #1\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #1\n"
- "add x24, x24, x10, LSL #1\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #1\n"
- "add x22, x22, x10, LSL #1\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #1\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v16.h }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v17.h }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #1\n"
+ "add x21, x21, x10, LSL #1\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v18.h }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #1\n"
"st1 { v19.h }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #1\n"
+ "add x27, x27, x10, LSL #1\n"
"st1 { v20.h }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #1\n"
+ "add x26, x26, x10, LSL #1\n"
"st1 { v21.h }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #1\n"
+ "add x25, x25, x10, LSL #1\n"
"st1 { v22.h }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #1\n"
+ "add x24, x24, x10, LSL #1\n"
"st1 { v23.h }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #1\n"
+ "add x23, x23, x10, LSL #1\n"
+ "add x22, x22, x10, LSL #1\n"
"st1 { v24.h }[0], [x27]\n"
+ "add x21, x21, x10, LSL #1\n"
"st1 { v25.h }[0], [x26]\n"
+ "add x20, x20, x10, LSL #1\n"
"st1 { v26.h }[0], [x25]\n"
"st1 { v27.h }[0], [x24]\n"
"st1 { v28.h }[0], [x23]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index fd8686c15e..1cd980a6b3 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,144 +87,144 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
- "mov x22, #0x0\n"
+ "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x28, #0x2\n"
"mov x27, #0x2\n"
- "mov x26, #0x2\n"
- "str x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x23, x25\n" // offset = tile_i * ld_input_row
+ "str x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x16, #0x10\n" // cntb _, ALL, #1
"ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
"ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x23, x24\n" // offset = tile_i * ld_output_row
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "madd x21, x22, x15, x21\n" // offset += tile_j * ld_input_col
- "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x15, x15, #0x2\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x22, x14, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x2\n"
- "add x11, x15, x15\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x13, x13, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x9, x13, x25, LSL #2\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x28, x9, x25, LSL #2\n"
- "add x12, x12, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "lsr x24, %x[n_channels], #0x2\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v27.4s }, [x20]\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x23, #0x0\n"
"ld1r { v26.4s }, [x20]\n"
- "add x27, x28, x25, LSL #2\n"
- "add x26, x11, x15\n"
- "add x25, x12, x24, LSL #2\n"
+ "mul x22, x10, x26\n" // offset = tile_i * ld_input_row
+ "ldr x12, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x21, XZR, x16\n"
+ "mul x20, x10, x25\n" // offset = tile_i * ld_output_row
+ "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x22, x9, x15, x22\n" // offset += tile_j * ld_input_col
+ "lsl x15, x15, #0x2\n"
+ "madd x20, x9, x14, x20\n" // offset += tile_j * ld_output_col
"lsl x14, x14, #0x2\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q25, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
- "add x10, x10, #0xa0\n"
- "ldr q9, [x9, x15]\n"
+ "mul x22, x22, x28\n" // offset *= kernel_stride * output_size
+ "add x10, x15, x15\n"
+ "add x9, x10, x15\n"
+ "mul x20, x20, x27\n" // offset *= output_tile_size
+ "add x13, x13, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x28, x13, x26, LSL #2\n"
+ "add x27, x28, x26, LSL #2\n"
+ "add x26, x27, x26, LSL #2\n"
+ "add x12, x12, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x25, x12, x25, LSL #2\n"
+ "cbz x24, 4f\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q0, [x11, #0x10]\n"
+ "cmp x16, x24, LSL #4\n"
+ "ldr q1, [x11, #0x20]\n"
+ "ldr q2, [x11, #0x30]\n"
+ "ldr q3, [x11, #0x40]\n"
+ "ldr q4, [x11, #0x50]\n"
+ "ldr q5, [x11, #0x60]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "ldr q7, [x11, #0x80]\n"
+ "ldr q8, [x11, #0x90]\n"
+ "add x11, x11, #0xa0\n"
+ "ldr q9, [x28, x15]\n"
"ld1 { v10.4s }, [x13]\n"
- "ldr q11, [x13, x26]\n"
- "ldr q12, [x9, x11]\n"
- "ldr q13, [x28, x15]\n"
+ "ldr q11, [x13, x9]\n"
+ "ldr q12, [x28, x10]\n"
+ "ldr q13, [x27, x15]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v24.16b, v25.16b\n fmla v24.4s, v4.4s, v9.4s\n"
"mov v23.16b, v25.16b\n fmla v23.4s, v3.4s, v9.4s\n"
- "add x23, x23, #0x10\n"
- "cmp x23, x22, LSL #4\n"
+ "add x16, x16, #0x10\n"
+ "add x21, x21, #0x10\n"
"mov v22.16b, v25.16b\n fmla v22.4s, v1.4s, v9.4s\n"
"mov v21.16b, v25.16b\n fmla v21.4s, v0.4s, v9.4s\n"
- "ld1 { v18.4s }, [x27]\n"
- "ldr q25, [x10, #0x0]\n"
+ "ld1 { v18.4s }, [x26]\n"
+ "ldr q25, [x11, #0x0]\n"
+ "cmp x16, x24, LSL #4\n"
+ "add x23, x23, #0x10\n"
"fmla v24.4s, v0.4s, v10.4s\n"
- "ldr q20, [x28, x11]\n"
+ "ldr q20, [x27, x10]\n"
"fmla v23.4s, v2.4s, v11.4s\n"
- "ldr q17, [x27, x26]\n"
+ "ldr q17, [x26, x9]\n"
"fmla v22.4s, v2.4s, v12.4s\n"
"fmla v21.4s, v1.4s, v12.4s\n"
- "add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
"fmla v24.4s, v5.4s, v12.4s\n"
"fmla v23.4s, v4.4s, v12.4s\n"
"ldr q16, [x13, x15]\n"
"fmla v22.4s, v6.4s, v18.4s\n"
- "ldr q18, [x13, x11]\n"
- "fmla v21.4s, v3.4s, v13.4s\n"
+ "ldr q18, [x13, x10]\n"
"add x13, x13, #0x10\n"
+ "fmla v21.4s, v3.4s, v13.4s\n"
"fmla v24.4s, v7.4s, v13.4s\n"
"fmla v23.4s, v6.4s, v13.4s\n"
"fmla v22.4s, v4.4s, v13.4s\n"
"fmla v21.4s, v8.4s, v17.4s\n"
- "ld1 { v17.4s }, [x9]\n"
+ "ld1 { v17.4s }, [x28]\n"
"fmla v24.4s, v1.4s, v16.4s\n"
"fmla v23.4s, v0.4s, v16.4s\n"
- "ldr q16, [x9, x26]\n"
- "add x9, x9, #0x10\n"
+ "ldr q16, [x28, x9]\n"
+ "add x28, x28, #0x10\n"
"fmla v22.4s, v5.4s, v20.4s\n"
"fmla v21.4s, v4.4s, v20.4s\n"
- "ldr q4, [x10, #0x50]\n"
+ "ldr q4, [x11, #0x50]\n"
"fmla v24.4s, v2.4s, v18.4s\n"
"fmla v23.4s, v1.4s, v18.4s\n"
- "ld1 { v19.4s }, [x28]\n"
- "ldr q1, [x10, #0x20]\n"
+ "ld1 { v19.4s }, [x27]\n"
+ "ldr q1, [x11, #0x20]\n"
"fmla v22.4s, v0.4s, v17.4s\n"
- "ldr q0, [x10, #0x10]\n"
+ "ldr q0, [x11, #0x10]\n"
"fmla v21.4s, v2.4s, v16.4s\n"
- "ldr q2, [x10, #0x30]\n"
+ "ldr q2, [x11, #0x30]\n"
"fmla v24.4s, v8.4s, v20.4s\n"
"fmla v23.4s, v7.4s, v20.4s\n"
- "ldr q18, [x28, x26]\n"
- "add x28, x28, #0x10\n"
- "ldr q13, [x28, x15]\n"
+ "ldr q18, [x27, x9]\n"
+ "add x27, x27, #0x10\n"
+ "ldr q13, [x27, x15]\n"
"fmla v22.4s, v3.4s, v19.4s\n"
"fmla v21.4s, v5.4s, v18.4s\n"
"fmla v24.4s, v3.4s, v17.4s\n"
- "ldr q17, [x27, x15]\n"
- "ldr q3, [x10, #0x40]\n"
+ "ldr q17, [x26, x15]\n"
+ "ldr q3, [x11, #0x40]\n"
"fmla v23.4s, v5.4s, v16.4s\n"
- "ldr q16, [x27, x11]\n"
- "ldr q5, [x10, #0x60]\n"
+ "ldr q16, [x26, x10]\n"
+ "ldr q5, [x11, #0x60]\n"
+ "add x26, x26, #0x10\n"
"fmla v22.4s, v7.4s, v17.4s\n"
"fmla v21.4s, v6.4s, v17.4s\n"
- "ldr q11, [x13, x26]\n"
+ "ldr q11, [x13, x9]\n"
"fmla v24.4s, v6.4s, v19.4s\n"
- "ldr q9, [x9, x15]\n"
+ "ldr q9, [x28, x15]\n"
+ "ldr q6, [x11, #0x70]\n"
"fmla v23.4s, v8.4s, v18.4s\n"
"ld1 { v10.4s }, [x13]\n"
- "ldr q6, [x10, #0x70]\n"
"fmla v22.4s, v8.4s, v16.4s\n"
+ "ldr q8, [x11, #0x90]\n"
"fmla v21.4s, v7.4s, v16.4s\n"
- "ldr q12, [x9, x11]\n"
- "ldr q7, [x10, #0x80]\n"
+ "ldr q12, [x28, x10]\n"
+ "ldr q7, [x11, #0x80]\n"
+ "add x11, x11, #0xa0\n"
"fmax v24.4s, v24.4s, v27.4s\n"
"fmax v23.4s, v23.4s, v27.4s\n"
- "ldr q8, [x10, #0x90]\n"
"fmax v22.4s, v22.4s, v27.4s\n"
"fmax v21.4s, v21.4s, v27.4s\n"
- "add x27, x27, #0x10\n"
"fmin v24.4s, v24.4s, v26.4s\n"
"fmin v23.4s, v23.4s, v26.4s\n"
- "st1 { v24.4s }, [x12]\n"
- "add x10, x10, #0xa0\n"
"fmin v22.4s, v22.4s, v26.4s\n"
"fmin v21.4s, v21.4s, v26.4s\n"
+ "st1 { v24.4s }, [x12]\n"
"str q23, [x12, x14]\n"
"add x12, x12, #0x10\n"
"st1 { v22.4s }, [x25]\n"
@@ -236,58 +236,58 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"mov v23.16b, v25.16b\n fmla v23.4s, v3.4s, v9.4s\n"
"mov v22.16b, v25.16b\n fmla v22.4s, v1.4s, v9.4s\n"
"mov v21.16b, v25.16b\n fmla v21.4s, v0.4s, v9.4s\n"
- "ld1 { v18.4s }, [x27]\n"
+ "ld1 { v18.4s }, [x26]\n"
"fmla v24.4s, v0.4s, v10.4s\n"
- "ldr q20, [x28, x11]\n"
+ "ldr q20, [x27, x10]\n"
"fmla v23.4s, v2.4s, v11.4s\n"
- "ldr q17, [x27, x26]\n"
+ "ldr q17, [x26, x9]\n"
"fmla v22.4s, v2.4s, v12.4s\n"
"fmla v21.4s, v1.4s, v12.4s\n"
"fmla v24.4s, v5.4s, v12.4s\n"
"fmla v23.4s, v4.4s, v12.4s\n"
"ldr q16, [x13, x15]\n"
"fmla v22.4s, v6.4s, v18.4s\n"
- "ldr q18, [x13, x11]\n"
- "fmla v21.4s, v3.4s, v13.4s\n"
+ "ldr q18, [x13, x10]\n"
"add x13, x13, #0x10\n"
+ "fmla v21.4s, v3.4s, v13.4s\n"
"fmla v24.4s, v7.4s, v13.4s\n"
"fmla v23.4s, v6.4s, v13.4s\n"
"fmla v22.4s, v4.4s, v13.4s\n"
"fmla v21.4s, v8.4s, v17.4s\n"
- "ld1 { v17.4s }, [x9]\n"
+ "ld1 { v17.4s }, [x28]\n"
"fmla v24.4s, v1.4s, v16.4s\n"
"fmla v23.4s, v0.4s, v16.4s\n"
- "ldr q16, [x9, x26]\n"
- "add x9, x9, #0x10\n"
+ "ldr q16, [x28, x9]\n"
+ "add x28, x28, #0x10\n"
"fmla v22.4s, v5.4s, v20.4s\n"
"fmla v21.4s, v4.4s, v20.4s\n"
"fmla v24.4s, v2.4s, v18.4s\n"
"fmla v23.4s, v1.4s, v18.4s\n"
- "ld1 { v19.4s }, [x28]\n"
+ "ld1 { v19.4s }, [x27]\n"
"fmla v22.4s, v0.4s, v17.4s\n"
"fmla v21.4s, v2.4s, v16.4s\n"
"fmla v24.4s, v8.4s, v20.4s\n"
"fmla v23.4s, v7.4s, v20.4s\n"
- "ldr q18, [x28, x26]\n"
- "add x28, x28, #0x10\n"
+ "ldr q18, [x27, x9]\n"
+ "add x27, x27, #0x10\n"
"fmla v22.4s, v3.4s, v19.4s\n"
"fmla v21.4s, v5.4s, v18.4s\n"
"fmla v24.4s, v3.4s, v17.4s\n"
- "ldr q17, [x27, x15]\n"
+ "ldr q17, [x26, x15]\n"
"fmla v23.4s, v5.4s, v16.4s\n"
- "ldr q16, [x27, x11]\n"
+ "ldr q16, [x26, x10]\n"
+ "add x26, x26, #0x10\n"
"fmla v22.4s, v7.4s, v17.4s\n"
"fmla v21.4s, v6.4s, v17.4s\n"
- "add x27, x27, #0x10\n"
"fmla v24.4s, v6.4s, v19.4s\n"
"fmla v23.4s, v8.4s, v18.4s\n"
- "fmax v24.4s, v24.4s, v27.4s\n"
"fmla v22.4s, v8.4s, v16.4s\n"
"fmla v21.4s, v7.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v27.4s\n"
"fmax v23.4s, v23.4s, v27.4s\n"
+ "fmin v24.4s, v24.4s, v26.4s\n"
"fmax v22.4s, v22.4s, v27.4s\n"
"fmax v21.4s, v21.4s, v27.4s\n"
- "fmin v24.4s, v24.4s, v26.4s\n"
"fmin v23.4s, v23.4s, v26.4s\n"
"st1 { v24.4s }, [x12]\n"
"fmin v22.4s, v22.4s, v26.4s\n"
@@ -300,21 +300,21 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 31f\n"
- "ldr q25, [x10, #0x0]\n"
- "ldr q0, [x10, #0x10]\n"
- "add x24, x9, x15\n"
+ "ldr q25, [x11, #0x0]\n"
+ "ldr q0, [x11, #0x10]\n"
+ "add x24, x28, x15\n"
"add x23, x13, XZR\n"
- "ldr q1, [x10, #0x20]\n"
- "ldr q2, [x10, #0x30]\n"
- "add x22, x13, x26\n"
- "add x21, x9, x11\n"
- "ldr q3, [x10, #0x40]\n"
- "ldr q4, [x10, #0x50]\n"
- "add x20, x28, x15\n"
- "ldr q5, [x10, #0x60]\n"
- "ldr q6, [x10, #0x70]\n"
- "ldr q7, [x10, #0x80]\n"
- "ldr q8, [x10, #0x90]\n"
+ "ldr q1, [x11, #0x20]\n"
+ "ldr q2, [x11, #0x30]\n"
+ "add x22, x13, x9\n"
+ "add x21, x28, x10\n"
+ "ldr q3, [x11, #0x40]\n"
+ "ldr q4, [x11, #0x50]\n"
+ "add x20, x27, x15\n"
+ "ldr q5, [x11, #0x60]\n"
+ "ldr q6, [x11, #0x70]\n"
+ "ldr q7, [x11, #0x80]\n"
+ "ldr q8, [x11, #0x90]\n"
"tbz %x[n_channels], #1, 5f\n"
"ldr d9, [x24], #0x8\n"
"ldr d10, [x23], #0x8\n"
@@ -337,15 +337,15 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"6:" // Tile loop: Oddments: Load inputs: (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 1: End
"mov v28.16b, v25.16b\n fmla v28.4s, v4.4s, v9.4s\n"
"mov v29.16b, v25.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "add x20, x27, XZR\n"
+ "add x20, x26, XZR\n"
"mov v30.16b, v25.16b\n fmla v30.4s, v1.4s, v9.4s\n"
"mov v31.16b, v25.16b\n fmla v31.4s, v0.4s, v9.4s\n"
"fmla v28.4s, v0.4s, v10.4s\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
"fmla v30.4s, v2.4s, v12.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
"tbz %x[n_channels], #1, 7f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
@@ -356,10 +356,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"8:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
"fmla v30.4s, v6.4s, v9.4s\n"
"fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x27, x26\n"
+ "add x20, x26, x9\n"
"fmla v29.4s, v6.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
"fmla v31.4s, v3.4s, v13.4s\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
"tbz %x[n_channels], #1, 9f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
@@ -380,7 +380,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"12:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: End
"fmla v28.4s, v1.4s, v12.4s\n"
"fmla v29.4s, v0.4s, v12.4s\n"
- "add x20, x13, x11\n"
+ "add x20, x13, x10\n"
"tbz %x[n_channels], #1, 13f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
@@ -391,7 +391,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"14:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: End
"fmla v28.4s, v2.4s, v9.4s\n"
"fmla v29.4s, v1.4s, v9.4s\n"
- "add x20, x28, x11\n"
+ "add x20, x27, x10\n"
"tbz %x[n_channels], #1, 15f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
@@ -402,7 +402,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"16:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 1: End
"fmla v28.4s, v8.4s, v10.4s\n"
"fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x9, XZR\n"
+ "add x20, x28, XZR\n"
"fmla v30.4s, v5.4s, v10.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
"tbz %x[n_channels], #1, 17f\n"
@@ -415,7 +415,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"18:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: End
"fmla v28.4s, v3.4s, v11.4s\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x9, x26\n"
+ "add x20, x28, x9\n"
"tbz %x[n_channels], #1, 19f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
@@ -426,7 +426,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"20:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
"fmla v29.4s, v5.4s, v12.4s\n"
"fmla v31.4s, v2.4s, v12.4s\n"
- "add x20, x28, XZR\n"
+ "add x20, x27, XZR\n"
"tbz %x[n_channels], #1, 21f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
@@ -437,7 +437,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"22:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
"fmla v28.4s, v6.4s, v9.4s\n"
"fmla v30.4s, v3.4s, v9.4s\n"
- "add x20, x28, x26\n"
+ "add x20, x27, x9\n"
"tbz %x[n_channels], #1, 23f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
@@ -448,7 +448,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"24:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
"fmla v29.4s, v8.4s, v10.4s\n"
"fmla v31.4s, v5.4s, v10.4s\n"
- "add x20, x27, x15\n"
+ "add x20, x26, x15\n"
"tbz %x[n_channels], #1, 25f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
@@ -459,7 +459,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"26:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
"fmla v30.4s, v7.4s, v11.4s\n"
"fmla v31.4s, v6.4s, v11.4s\n"
- "add x20, x27, x11\n"
+ "add x20, x26, x10\n"
"tbz %x[n_channels], #1, 27f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
@@ -472,19 +472,19 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"fmla v31.4s, v7.4s, v12.4s\n"
"fmax v28.4s, v28.4s, v27.4s\n"
"fmax v29.4s, v29.4s, v27.4s\n"
+ "fmin v28.4s, v28.4s, v26.4s\n"
"fmax v30.4s, v30.4s, v27.4s\n"
"fmax v31.4s, v31.4s, v27.4s\n"
- "fmin v28.4s, v28.4s, v26.4s\n"
"fmin v29.4s, v29.4s, v26.4s\n"
"fmin v30.4s, v30.4s, v26.4s\n"
"fmin v31.4s, v31.4s, v26.4s\n"
"tbz %x[n_channels], #1, 29f\n"
"mov x21, x12\n"
"mov x20, x25\n"
- "st1 { v28.d }[0], [x21], x14\n"
- "st1 { v30.d }[0], [x20], x14\n"
"add x12, x12, #0x8\n"
"add x25, x25, #0x8\n"
+ "st1 { v28.d }[0], [x21], x14\n"
+ "st1 { v30.d }[0], [x20], x14\n"
"st1 { v29.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #0, 30f\n"
@@ -504,20 +504,20 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"st1 { v31.s }[0], [x20]\n"
"30:" // Tile loop: Oddments: Store: Bit 1: End
"31:" // Tile loop: End
- "ldr x22, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x22, x22, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x22, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
- "csel x22, x22, XZR, LT\n"
- "cmp x23, x20\n"
+ "ldr x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x9, x9, #0x1\n"
+ "add x20, x10, #0x1\n"
+ "cmp x9, x22\n"
+ "csel x10, x10, x20, LT\n"
+ "csel x9, x9, XZR, LT\n"
+ "cmp x10, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 7dedfd972a..abe586725b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -78,237 +78,237 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x16, #0x10\n" // cntb _, ALL, #1
- "lsr x15, %x[n_channels], #0x2\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v27.4s }, [x20]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "mov x17, #0x10\n" // cntb _, ALL, #1
+ "lsr x16, %x[n_channels], #0x2\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v27.4s }, [x21]\n"
"ld1r { v26.4s }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "mov x28, #0x0\n"
- "sub x27, XZR, x16\n"
- "cbz x15, 3f\n"
- "ldr q25, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "cmp x16, x15, LSL #4\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "add x14, x14, #0xa0\n"
- "ldp x21, x20, [x13, #0x0]\n"
- "ldr q9, [x21, x28]\n"
- "ldr q10, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "ldr q11, [x21, x28]\n"
- "ldr q12, [x20, x28]\n"
- "ldr x20, [x13, #0x20]\n"
- "ldr q13, [x20, x28]\n"
+ "add x14, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x22, #0x0]\n"
+ "ldp x10, x9, [x22, #0x10]\n"
+ "sub x28, XZR, x17\n"
+ "cbz x16, 3f\n"
+ "ldr q25, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "cmp x17, x16, LSL #4\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
+ "add x15, x15, #0xa0\n"
+ "ldp x24, x23, [x14, #0x0]\n"
+ "ldp x22, x21, [x14, #0x10]\n"
+ "ldr x20, [x14, #0x20]\n"
+ "ldr q9, [x24, x13]\n"
+ "ldr q10, [x23, x13]\n"
+ "ldr q11, [x22, x13]\n"
+ "ldr q12, [x21, x13]\n"
+ "ldr q13, [x20, x13]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v24.16b, v25.16b\n fmla v24.4s, v4.4s, v9.4s\n"
"mov v23.16b, v25.16b\n fmla v23.4s, v3.4s, v9.4s\n"
- "ldr x21, [x13, #0x28]\n"
- "ldr x20, [x13, #0x30]\n"
+ "ldr x22, [x14, #0x28]\n"
+ "ldr x21, [x14, #0x30]\n"
"mov v22.16b, v25.16b\n fmla v22.4s, v1.4s, v9.4s\n"
"mov v21.16b, v25.16b\n fmla v21.4s, v0.4s, v9.4s\n"
- "ldr q18, [x21, x28]\n"
- "ldr q25, [x14, #0x0]\n"
+ "ldr q25, [x15, #0x0]\n"
+ "ldr x24, [x14, #0x38]\n"
+ "ldr x20, [x14, #0x48]\n"
+ "ldr x23, [x14, #0x40]\n"
+ "add x28, x28, #0x10\n"
+ "ldr q18, [x22, x13]\n"
+ "ldr x22, [x14, #0x50]\n"
"fmla v24.4s, v0.4s, v10.4s\n"
"fmla v23.4s, v2.4s, v11.4s\n"
- "ldr q17, [x20, x28]\n"
- "ldr x21, [x13, #0x38]\n"
+ "ldr q17, [x21, x13]\n"
+ "ldr x21, [x14, #0x58]\n"
+ "ldr q20, [x20, x13]\n"
"fmla v22.4s, v2.4s, v12.4s\n"
"fmla v21.4s, v1.4s, v12.4s\n"
- "ldr x20, [x13, #0x48]\n"
- "ldr q20, [x20, x28]\n"
+ "ldr x20, [x14, #0x60]\n"
+ "ldr x27, [x14, #0x68]\n"
+ "ldr x26, [x14, #0x70]\n"
"fmla v24.4s, v5.4s, v12.4s\n"
"fmla v23.4s, v4.4s, v12.4s\n"
- "ldr q16, [x21, x28]\n"
- "ldr x20, [x13, #0x40]\n"
+ "ldr q16, [x24, x13]\n"
+ "ldr x25, [x14, #0x78]\n"
"fmla v22.4s, v6.4s, v18.4s\n"
- "ldr q18, [x20, x28]\n"
+ "ldr q18, [x23, x13]\n"
+ "ldp x24, x23, [x14, #0x0]\n"
"fmla v21.4s, v3.4s, v13.4s\n"
- "ldr x20, [x13, #0x50]\n"
"fmla v24.4s, v7.4s, v13.4s\n"
"fmla v23.4s, v6.4s, v13.4s\n"
- "ldr x22, [x13, #0x58]\n"
- "ldr x21, [x13, #0x60]\n"
"fmla v22.4s, v4.4s, v13.4s\n"
"fmla v21.4s, v8.4s, v17.4s\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0x68]\n"
+ "ldr q17, [x22, x13]\n"
"fmla v24.4s, v1.4s, v16.4s\n"
"fmla v23.4s, v0.4s, v16.4s\n"
- "ldr q16, [x22, x28]\n"
- "ldr x26, [x13, #0x70]\n"
+ "ldr q16, [x21, x13]\n"
+ "ldp x22, x21, [x14, #0x10]\n"
"fmla v22.4s, v5.4s, v20.4s\n"
"fmla v21.4s, v4.4s, v20.4s\n"
- "ldr q4, [x14, #0x50]\n"
- "ldr x25, [x13, #0x78]\n"
+ "ldr q4, [x15, #0x50]\n"
"fmla v24.4s, v2.4s, v18.4s\n"
"fmla v23.4s, v1.4s, v18.4s\n"
- "ldr q19, [x21, x28]\n"
- "ldr q1, [x14, #0x20]\n"
+ "ldr q19, [x20, x13]\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr x20, [x14, #0x20]\n"
"fmla v22.4s, v0.4s, v17.4s\n"
- "ldr q0, [x14, #0x10]\n"
+ "ldr q0, [x15, #0x10]\n"
"fmla v21.4s, v2.4s, v16.4s\n"
- "ldr q2, [x14, #0x30]\n"
+ "ldr q2, [x15, #0x30]\n"
"fmla v24.4s, v8.4s, v20.4s\n"
+ "ldr q13, [x20, x17]\n"
"fmla v23.4s, v7.4s, v20.4s\n"
- "ldr q18, [x20, x28]\n"
- "ldp x24, x23, [x13, #0x0]\n"
+ "ldr q18, [x27, x13]\n"
"fmla v22.4s, v3.4s, v19.4s\n"
"fmla v21.4s, v5.4s, v18.4s\n"
- "ldp x22, x21, [x13, #0x10]\n"
- "ldr x20, [x13, #0x20]\n"
- "ldr q13, [x20, x16]\n"
"fmla v24.4s, v3.4s, v17.4s\n"
- "ldr q17, [x26, x28]\n"
+ "ldr q17, [x26, x13]\n"
+ "ldr q3, [x15, #0x40]\n"
"fmla v23.4s, v5.4s, v16.4s\n"
- "ldr q16, [x25, x28]\n"
- "ldr q3, [x14, #0x40]\n"
+ "ldr q16, [x25, x13]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "add x13, x13, #0x10\n"
"fmla v22.4s, v7.4s, v17.4s\n"
"fmla v21.4s, v6.4s, v17.4s\n"
- "ldr q11, [x22, x16]\n"
- "ldr q5, [x14, #0x60]\n"
+ "ldr q11, [x22, x17]\n"
"fmla v24.4s, v6.4s, v19.4s\n"
+ "ldr q9, [x24, x17]\n"
+ "ldr q6, [x15, #0x70]\n"
"fmla v23.4s, v8.4s, v18.4s\n"
- "ldr q9, [x24, x16]\n"
- "ldr q10, [x23, x16]\n"
+ "ldr q10, [x23, x17]\n"
"fmla v22.4s, v8.4s, v16.4s\n"
+ "ldr q8, [x15, #0x90]\n"
"fmla v21.4s, v7.4s, v16.4s\n"
- "ldr q12, [x21, x16]\n"
- "ldr q6, [x14, #0x70]\n"
+ "ldr q12, [x21, x17]\n"
+ "add x17, x17, #0x10\n"
+ "ldr q7, [x15, #0x80]\n"
+ "cmp x17, x16, LSL #4\n"
+ "add x15, x15, #0xa0\n"
"fmax v24.4s, v24.4s, v27.4s\n"
"fmax v23.4s, v23.4s, v27.4s\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
"fmax v22.4s, v22.4s, v27.4s\n"
"fmax v21.4s, v21.4s, v27.4s\n"
- "add x16, x16, #0x10\n"
- "add x27, x27, #0x10\n"
"fmin v24.4s, v24.4s, v26.4s\n"
"fmin v23.4s, v23.4s, v26.4s\n"
- "cmp x16, x15, LSL #4\n"
"fmin v22.4s, v22.4s, v26.4s\n"
"fmin v21.4s, v21.4s, v26.4s\n"
- "add x28, x28, #0x10\n"
- "str q24, [x12, x27]\n"
- "add x14, x14, #0xa0\n"
- "str q23, [x11, x27]\n"
- "str q22, [x10, x27]\n"
- "str q21, [x9, x27]\n"
+ "str q24, [x12, x28]\n"
+ "str q23, [x11, x28]\n"
+ "str q22, [x10, x28]\n"
+ "str q21, [x9, x28]\n"
"blt 1b\n"
"2:" // Channel tail
"mov v24.16b, v25.16b\n fmla v24.4s, v4.4s, v9.4s\n"
"mov v23.16b, v25.16b\n fmla v23.4s, v3.4s, v9.4s\n"
- "ldr x21, [x13, #0x28]\n"
- "ldr x20, [x13, #0x30]\n"
+ "ldr x22, [x14, #0x28]\n"
+ "ldr x21, [x14, #0x30]\n"
"mov v22.16b, v25.16b\n fmla v22.4s, v1.4s, v9.4s\n"
"mov v21.16b, v25.16b\n fmla v21.4s, v0.4s, v9.4s\n"
- "ldr q18, [x21, x28]\n"
- "ldr x21, [x13, #0x38]\n"
+ "ldr x27, [x14, #0x38]\n"
+ "ldr x20, [x14, #0x48]\n"
+ "ldr x26, [x14, #0x40]\n"
+ "ldr x25, [x14, #0x50]\n"
+ "add x28, x28, #0x10\n"
+ "ldr q18, [x22, x13]\n"
+ "ldr x24, [x14, #0x58]\n"
"fmla v24.4s, v0.4s, v10.4s\n"
+ "ldr q20, [x20, x13]\n"
"fmla v23.4s, v2.4s, v11.4s\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0x48]\n"
- "ldr q20, [x20, x28]\n"
+ "ldr q17, [x21, x13]\n"
"fmla v22.4s, v2.4s, v12.4s\n"
"fmla v21.4s, v1.4s, v12.4s\n"
- "ldr x20, [x13, #0x40]\n"
+ "ldr x23, [x14, #0x60]\n"
+ "ldr x22, [x14, #0x68]\n"
+ "ldr x21, [x14, #0x70]\n"
"fmla v24.4s, v5.4s, v12.4s\n"
"fmla v23.4s, v4.4s, v12.4s\n"
- "ldr q16, [x21, x28]\n"
- "ldr x21, [x13, #0x50]\n"
+ "ldr q16, [x27, x13]\n"
+ "ldr x20, [x14, #0x78]\n"
"fmla v22.4s, v6.4s, v18.4s\n"
- "ldr q18, [x20, x28]\n"
+ "ldr q18, [x26, x13]\n"
"fmla v21.4s, v3.4s, v13.4s\n"
- "ldr x20, [x13, #0x58]\n"
"fmla v24.4s, v7.4s, v13.4s\n"
"fmla v23.4s, v6.4s, v13.4s\n"
- "ldr x23, [x13, #0x60]\n"
- "ldr x22, [x13, #0x68]\n"
"fmla v22.4s, v4.4s, v13.4s\n"
"fmla v21.4s, v8.4s, v17.4s\n"
- "ldr q17, [x21, x28]\n"
- "ldr x21, [x13, #0x70]\n"
+ "ldr q17, [x25, x13]\n"
"fmla v24.4s, v1.4s, v16.4s\n"
"fmla v23.4s, v0.4s, v16.4s\n"
- "ldr q16, [x20, x28]\n"
- "ldr x20, [x13, #0x78]\n"
+ "ldr q16, [x24, x13]\n"
"fmla v22.4s, v5.4s, v20.4s\n"
"fmla v21.4s, v4.4s, v20.4s\n"
- "add x27, x27, #0x10\n"
"fmla v24.4s, v2.4s, v18.4s\n"
"fmla v23.4s, v1.4s, v18.4s\n"
- "ldr q19, [x23, x28]\n"
+ "ldr q19, [x23, x13]\n"
"fmla v22.4s, v0.4s, v17.4s\n"
"fmla v21.4s, v2.4s, v16.4s\n"
"fmla v24.4s, v8.4s, v20.4s\n"
"fmla v23.4s, v7.4s, v20.4s\n"
- "ldr q18, [x22, x28]\n"
+ "ldr q18, [x22, x13]\n"
"fmla v22.4s, v3.4s, v19.4s\n"
"fmla v21.4s, v5.4s, v18.4s\n"
"fmla v24.4s, v3.4s, v17.4s\n"
- "ldr q17, [x21, x28]\n"
+ "ldr q17, [x21, x13]\n"
"fmla v23.4s, v5.4s, v16.4s\n"
- "ldr q16, [x20, x28]\n"
+ "ldr q16, [x20, x13]\n"
+ "add x13, x13, #0x10\n"
"fmla v22.4s, v7.4s, v17.4s\n"
"fmla v21.4s, v6.4s, v17.4s\n"
- "add x28, x28, #0x10\n"
"fmla v24.4s, v6.4s, v19.4s\n"
"fmla v23.4s, v8.4s, v18.4s\n"
- "fmax v24.4s, v24.4s, v27.4s\n"
"fmla v22.4s, v8.4s, v16.4s\n"
"fmla v21.4s, v7.4s, v16.4s\n"
+ "fmax v24.4s, v24.4s, v27.4s\n"
"fmax v23.4s, v23.4s, v27.4s\n"
+ "fmin v24.4s, v24.4s, v26.4s\n"
"fmax v22.4s, v22.4s, v27.4s\n"
"fmax v21.4s, v21.4s, v27.4s\n"
- "fmin v24.4s, v24.4s, v26.4s\n"
"fmin v23.4s, v23.4s, v26.4s\n"
- "str q24, [x12, x27]\n"
+ "str q24, [x12, x28]\n"
"fmin v22.4s, v22.4s, v26.4s\n"
"fmin v21.4s, v21.4s, v26.4s\n"
- "str q23, [x11, x27]\n"
- "str q22, [x10, x27]\n"
- "str q21, [x9, x27]\n"
+ "str q23, [x11, x28]\n"
+ "str q22, [x10, x28]\n"
+ "str q21, [x9, x28]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 30f\n"
- "ldr q25, [x14, #0x0]\n"
- "ldr q0, [x14, #0x10]\n"
- "mov x20, x28\n"
+ "ldr q25, [x15, #0x0]\n"
+ "ldr q0, [x15, #0x10]\n"
+ "mov x20, x13\n"
+ "ldr q1, [x15, #0x20]\n"
+ "ldr q2, [x15, #0x30]\n"
+ "ldr q3, [x15, #0x40]\n"
+ "ldr q4, [x15, #0x50]\n"
+ "ldr q5, [x15, #0x60]\n"
+ "ldr q6, [x15, #0x70]\n"
"add x12, x12, x20\n"
- "ldr q1, [x14, #0x20]\n"
- "ldr q2, [x14, #0x30]\n"
"add x11, x11, x20\n"
+ "ldr q7, [x15, #0x80]\n"
+ "ldr q8, [x15, #0x90]\n"
"add x10, x10, x20\n"
- "ldr q3, [x14, #0x40]\n"
- "ldr q4, [x14, #0x50]\n"
"add x9, x9, x20\n"
- "ldr q5, [x14, #0x60]\n"
- "ldr q6, [x14, #0x70]\n"
- "ldr q7, [x14, #0x80]\n"
- "ldr q8, [x14, #0x90]\n"
- "ldr x24, [x13, #0x0]\n"
- "ldr x23, [x13, #0x8]\n"
- "add x24, x24, x28\n"
- "add x23, x23, x28\n"
- "ldr x22, [x13, #0x10]\n"
- "ldr x21, [x13, #0x18]\n"
- "add x22, x22, x28\n"
- "add x21, x21, x28\n"
- "ldr x20, [x13, #0x20]\n"
- "add x20, x20, x28\n"
+ "ldr x24, [x14, #0x0]\n"
+ "ldr x23, [x14, #0x8]\n"
+ "ldr x22, [x14, #0x10]\n"
+ "ldr x21, [x14, #0x18]\n"
+ "ldr x20, [x14, #0x20]\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
+ "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 4f\n"
"ld1 { v9.d }[0], [x24], #0x8\n"
"ld1 { v10.d }[0], [x23], #0x8\n"
@@ -331,16 +331,16 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"5:" // Oddments: Load inputs (1, 1), (0, 0), (0, 3), (1, 2), (2, 1): Bit 1: End
"mov v28.16b, v25.16b\n fmla v28.4s, v4.4s, v9.4s\n"
"mov v29.16b, v25.16b\n fmla v29.4s, v3.4s, v9.4s\n"
- "ldr x20, [x13, #0x28]\n"
- "add x20, x20, x28\n"
+ "ldr x20, [x14, #0x28]\n"
"mov v30.16b, v25.16b\n fmla v30.4s, v1.4s, v9.4s\n"
"mov v31.16b, v25.16b\n fmla v31.4s, v0.4s, v9.4s\n"
+ "add x20, x20, x13\n"
"fmla v28.4s, v0.4s, v10.4s\n"
"fmla v29.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v12.4s\n"
- "fmla v29.4s, v4.4s, v12.4s\n"
"fmla v30.4s, v2.4s, v12.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v5.4s, v12.4s\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
"tbz %x[n_channels], #1, 6f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
@@ -350,12 +350,12 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"ld1 { v9.s }[0], [x20], #0x4\n"
"7:" // Oddments: Load input (3, 0): Bit 1: End
"fmla v30.4s, v6.4s, v9.4s\n"
- "ldr x20, [x13, #0x30]\n"
+ "ldr x20, [x14, #0x30]\n"
"fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x20, x28\n"
"fmla v29.4s, v6.4s, v13.4s\n"
- "fmla v30.4s, v4.4s, v13.4s\n"
"fmla v31.4s, v3.4s, v13.4s\n"
+ "add x20, x20, x13\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
"tbz %x[n_channels], #1, 8f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
@@ -364,9 +364,9 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"8:" // Oddments: Load input (3, 3): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"9:" // Oddments: Load input (3, 3): Bit 1: End
- "ldr x20, [x13, #0x38]\n"
+ "ldr x20, [x14, #0x38]\n"
"fmla v31.4s, v8.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 10f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
@@ -375,10 +375,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"10:" // Oddments: Load input (0, 1): Bit 1: Unset
"ld1 { v12.s }[0], [x20], #0x4\n"
"11:" // Oddments: Load input (0, 1): Bit 1: End
- "ldr x20, [x13, #0x40]\n"
+ "ldr x20, [x14, #0x40]\n"
"fmla v28.4s, v1.4s, v12.4s\n"
"fmla v29.4s, v0.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 12f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
@@ -387,10 +387,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"12:" // Oddments: Load input (0, 2): Bit 1: Unset
"ld1 { v9.s }[0], [x20], #0x4\n"
"13:" // Oddments: Load input (0, 2): Bit 1: End
- "ldr x20, [x13, #0x48]\n"
+ "ldr x20, [x14, #0x48]\n"
"fmla v28.4s, v2.4s, v9.4s\n"
"fmla v29.4s, v1.4s, v9.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 14f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
@@ -399,12 +399,12 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"14:" // Oddments: Load input (2, 2): Bit 1: Unset
"ld1 { v10.s }[0], [x20], #0x4\n"
"15:" // Oddments: Load input (2, 2): Bit 1: End
- "ldr x20, [x13, #0x50]\n"
+ "ldr x20, [x14, #0x50]\n"
"fmla v28.4s, v8.4s, v10.4s\n"
"fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x20, x28\n"
"fmla v30.4s, v5.4s, v10.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 16f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
@@ -413,10 +413,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"16:" // Oddments: Load input (1, 0): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"17:" // Oddments: Load input (1, 0): Bit 1: End
- "ldr x20, [x13, #0x58]\n"
+ "ldr x20, [x14, #0x58]\n"
"fmla v28.4s, v3.4s, v11.4s\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 18f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
@@ -425,10 +425,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"18:" // Oddments: Load input (1, 3): Bit 1: Unset
"ld1 { v12.s }[0], [x20], #0x4\n"
"19:" // Oddments: Load input (1, 3): Bit 1: End
- "ldr x20, [x13, #0x60]\n"
+ "ldr x20, [x14, #0x60]\n"
"fmla v29.4s, v5.4s, v12.4s\n"
"fmla v31.4s, v2.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 20f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
@@ -437,10 +437,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"20:" // Oddments: Load input (2, 0): Bit 1: Unset
"ld1 { v9.s }[0], [x20], #0x4\n"
"21:" // Oddments: Load input (2, 0): Bit 1: End
- "ldr x20, [x13, #0x68]\n"
+ "ldr x20, [x14, #0x68]\n"
"fmla v28.4s, v6.4s, v9.4s\n"
"fmla v30.4s, v3.4s, v9.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 22f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
@@ -449,10 +449,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"22:" // Oddments: Load input (2, 3): Bit 1: Unset
"ld1 { v10.s }[0], [x20], #0x4\n"
"23:" // Oddments: Load input (2, 3): Bit 1: End
- "ldr x20, [x13, #0x70]\n"
+ "ldr x20, [x14, #0x70]\n"
"fmla v29.4s, v8.4s, v10.4s\n"
"fmla v31.4s, v5.4s, v10.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 24f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
@@ -461,10 +461,10 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"24:" // Oddments: Load input (3, 1): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"25:" // Oddments: Load input (3, 1): Bit 1: End
- "ldr x20, [x13, #0x78]\n"
+ "ldr x20, [x14, #0x78]\n"
"fmla v30.4s, v7.4s, v11.4s\n"
"fmla v31.4s, v6.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x13\n"
"tbz %x[n_channels], #1, 26f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
@@ -477,9 +477,9 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v31.4s, v7.4s, v12.4s\n"
"fmax v28.4s, v28.4s, v27.4s\n"
"fmax v29.4s, v29.4s, v27.4s\n"
+ "fmin v28.4s, v28.4s, v26.4s\n"
"fmax v30.4s, v30.4s, v27.4s\n"
"fmax v31.4s, v31.4s, v27.4s\n"
- "fmin v28.4s, v28.4s, v26.4s\n"
"fmin v29.4s, v29.4s, v26.4s\n"
"fmin v30.4s, v30.4s, v26.4s\n"
"fmin v31.4s, v31.4s, v26.4s\n"
@@ -503,7 +503,7 @@ void a64_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"30:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index 9bfcd9cd3c..8a7542d3aa 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,52 +87,52 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x24, #0x0\n"
- "mov x23, #0x0\n"
+ "mov x10, #0x0\n"
+ "mov x9, #0x0\n"
"1:" // Tile loop
- "str x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x28, #0x3\n"
"mov x27, #0x3\n"
- "mov x26, #0x3\n"
- "str x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x24, x25\n" // offset = tile_i * ld_input_row
- "ldr x8, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x24, x22\n" // offset = tile_i * ld_output_row
- "mov x24, #0x10\n" // cntb _, ALL, #1
- "madd x21, x23, x8, x21\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x8, x8, #0x2\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x23, x17, x20\n" // offset += tile_j * ld_output_col
- "lsl x17, x17, #0x2\n"
- "lsr x23, %x[n_channels], #0x2\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x27\n" // offset *= kernel_stride * output_size
- "add x16, x16, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x13, x16, x25, LSL #2\n"
- "mul x20, x20, x26\n" // offset *= output_tile_size
- "add x12, x13, x25, LSL #2\n"
- "add x11, x8, x8\n"
- "add x15, x15, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "add x10, x12, x25, LSL #2\n"
- "add x9, x11, x8\n"
- "add x28, x15, x22, LSL #2\n"
+ "str x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x6, #0x10\n" // cntb _, ALL, #1
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "lsr x17, %x[n_channels], #0x2\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v15.4s }, [x20]\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x24, #0x0\n"
"ld1r { v14.4s }, [x20]\n"
- "add x27, x10, x25, LSL #2\n"
- "add x26, x9, x8\n"
- "add x25, x28, x22, LSL #2\n"
- "add x22, x17, x17\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x24\n"
- "cbz x23, 4f\n"
+ "mul x23, x10, x26\n" // offset = tile_i * ld_input_row
+ "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x22, XZR, x6\n"
+ "mul x21, x10, x25\n" // offset = tile_i * ld_output_row
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x23, x9, x7, x23\n" // offset += tile_j * ld_input_col
+ "lsl x7, x7, #0x2\n"
+ "madd x21, x9, x8, x21\n" // offset += tile_j * ld_output_col
+ "lsl x8, x8, #0x2\n"
+ "mul x23, x23, x28\n" // offset *= kernel_stride * output_size
+ "add x13, x7, x7\n"
+ "add x12, x13, x7\n"
+ "add x11, x12, x7\n"
+ "mul x21, x21, x27\n" // offset *= output_tile_size
+ "add x20, x8, x8\n"
+ "add x16, x16, x23, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x10, x16, x26, LSL #2\n"
+ "add x9, x10, x26, LSL #2\n"
+ "add x15, x15, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x28, x9, x26, LSL #2\n"
+ "add x27, x15, x25, LSL #2\n"
+ "add x26, x28, x26, LSL #2\n"
+ "add x25, x27, x25, LSL #2\n"
+ "cbz x17, 4f\n"
"ldr q31, [x14, #0x0]\n"
"ldr q0, [x14, #0x10]\n"
- "cmp x24, x23, LSL #4\n"
+ "cmp x6, x17, LSL #4\n"
"ldr q1, [x14, #0x20]\n"
"ldr q2, [x14, #0x30]\n"
"ldr q3, [x14, #0x40]\n"
@@ -142,321 +142,321 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"ldr q7, [x14, #0x80]\n"
"ldr q8, [x14, #0x90]\n"
"add x14, x14, #0xa0\n"
- "ldr q9, [x12, x11]\n"
+ "ldr q9, [x9, x13]\n"
"ld1 { v10.4s }, [x16]\n"
- "ldr q11, [x16, x26]\n"
- "ld1 { v12.4s }, [x27]\n"
- "ldr q13, [x13, x11]\n"
+ "ldr q11, [x16, x11]\n"
+ "ld1 { v12.4s }, [x26]\n"
+ "ldr q13, [x10, x13]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v29.16b, v31.16b\n fmla v29.4s, v7.4s, v9.4s\n"
- "mov v28.16b, v31.16b\n fmla v28.4s, v8.4s, v9.4s\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v31.16b\n fmla v29.4s, v8.4s, v9.4s\n"
+ "add x6, x6, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "mov v28.16b, v31.16b\n fmla v28.4s, v6.4s, v9.4s\n"
+ "mov v27.16b, v31.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "cmp x6, x17, LSL #4\n"
"add x24, x24, #0x10\n"
- "cmp x24, x23, LSL #4\n"
- "mov v27.16b, v31.16b\n fmla v27.4s, v6.4s, v9.4s\n"
- "fmla v29.4s, v4.4s, v13.4s\n"
- "add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
- "mov v26.16b, v31.16b\n fmla v26.4s, v5.4s, v9.4s\n"
- "mov v25.16b, v31.16b\n fmla v25.4s, v4.4s, v9.4s\n"
- "mov v24.16b, v31.16b\n fmla v24.4s, v3.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr q23, [x12, x9]\n"
- "fmla v27.4s, v2.4s, v11.4s\n"
- "ldr q18, [x12, x8]\n"
- "mov v22.16b, v31.16b\n fmla v22.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v6.4s, v18.4s\n"
- "mov v21.16b, v31.16b\n fmla v21.4s, v0.4s, v9.4s\n"
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v27.4s, v3.4s, v13.4s\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
- "fmla v25.4s, v1.4s, v13.4s\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "ldr q17, [x16, x8]\n"
- "fmla v22.4s, v6.4s, v12.4s\n"
- "ldr q16, [x27, x26]\n"
- "mov v20.16b, v31.16b\n fmla v20.4s, v1.4s, v9.4s\n"
+ "mov v26.16b, v31.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v2.4s, v9.4s\n"
+ "mov v23.16b, v31.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
+ "ldr q22, [x9, x12]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q17, [x9, x7]\n"
+ "fmla v27.4s, v2.4s, v13.4s\n"
+ "fmla v26.4s, v1.4s, v13.4s\n"
+ "fmla v25.4s, v0.4s, v13.4s\n"
+ "fmla v24.4s, v6.4s, v12.4s\n"
+ "ldr q16, [x26, x11]\n"
+ "mov v21.16b, v31.16b\n fmla v21.4s, v1.4s, v9.4s\n"
"ldr q31, [x14, #0x0]\n"
- "fmla v29.4s, v0.4s, v17.4s\n"
- "fmla v21.4s, v8.4s, v16.4s\n"
- "ldr q16, [x16, x9]\n"
- "fmla v28.4s, v7.4s, v18.4s\n"
- "fmla v20.4s, v0.4s, v18.4s\n"
- "fmla v26.4s, v4.4s, v18.4s\n"
- "fmla v25.4s, v3.4s, v18.4s\n"
- "fmla v22.4s, v1.4s, v18.4s\n"
- "ld1 { v19.4s }, [x13]\n"
- "fmla v29.4s, v2.4s, v16.4s\n"
- "fmla v27.4s, v1.4s, v16.4s\n"
- "ld1 { v18.4s }, [x10]\n"
- "fmla v24.4s, v4.4s, v23.4s\n"
- "fmla v28.4s, v1.4s, v17.4s\n"
- "ldr q16, [x13, x26]\n"
- "fmla v20.4s, v2.4s, v23.4s\n"
- "fmla v21.4s, v1.4s, v23.4s\n"
- "fmla v29.4s, v8.4s, v23.4s\n"
- "fmla v27.4s, v7.4s, v23.4s\n"
- "fmla v25.4s, v5.4s, v23.4s\n"
- "ldr q17, [x10, x11]\n"
+ "fmla v30.4s, v6.4s, v17.4s\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "ldr q18, [x16, x7]\n"
+ "fmla v27.4s, v4.4s, v17.4s\n"
+ "fmla v23.4s, v8.4s, v16.4s\n"
+ "ldr q16, [x16, x12]\n"
+ "fmla v26.4s, v3.4s, v17.4s\n"
+ "fmla v21.4s, v0.4s, v17.4s\n"
+ "fmla v24.4s, v1.4s, v17.4s\n"
+ "fmla v30.4s, v0.4s, v18.4s\n"
+ "fmla v29.4s, v7.4s, v17.4s\n"
+ "ld1 { v20.4s }, [x10]\n"
+ "fmla v28.4s, v1.4s, v16.4s\n"
+ "fmla v25.4s, v4.4s, v22.4s\n"
+ "fmla v23.4s, v1.4s, v22.4s\n"
+ "fmla v26.4s, v5.4s, v22.4s\n"
+ "fmla v21.4s, v2.4s, v22.4s\n"
+ "fmla v27.4s, v0.4s, v20.4s\n"
+ "fmla v30.4s, v2.4s, v16.4s\n"
+ "ld1 { v17.4s }, [x28]\n"
+ "fmla v29.4s, v1.4s, v18.4s\n"
+ "ldr q16, [x10, x11]\n"
+ "fmla v28.4s, v7.4s, v22.4s\n"
+ "fmla v24.4s, v3.4s, v17.4s\n"
+ "fmla v25.4s, v2.4s, v16.4s\n"
+ "fmla v27.4s, v6.4s, v17.4s\n"
+ "ldr q19, [x10, x7]\n"
+ "fmla v30.4s, v8.4s, v22.4s\n"
+ "ldr q18, [x28, x13]\n"
+ "fmla v29.4s, v3.4s, v20.4s\n"
+ "ldr q17, [x28, x11]\n"
+ "fmla v28.4s, v5.4s, v16.4s\n"
+ "ldr q16, [x26, x7]\n"
+ "fmla v21.4s, v4.4s, v18.4s\n"
+ "fmla v23.4s, v3.4s, v18.4s\n"
+ "fmla v26.4s, v7.4s, v18.4s\n"
+ "fmla v24.4s, v5.4s, v18.4s\n"
+ "fmla v25.4s, v6.4s, v18.4s\n"
+ "fmla v27.4s, v8.4s, v18.4s\n"
+ "fmla v30.4s, v3.4s, v19.4s\n"
+ "fmla v21.4s, v6.4s, v16.4s\n"
+ "fmla v29.4s, v4.4s, v19.4s\n"
+ "fmla v23.4s, v5.4s, v17.4s\n"
"fmla v26.4s, v0.4s, v19.4s\n"
- "fmla v22.4s, v3.4s, v18.4s\n"
- "fmla v24.4s, v2.4s, v16.4s\n"
- "fmla v20.4s, v4.4s, v17.4s\n"
- "fmla v21.4s, v3.4s, v17.4s\n"
- "fmla v28.4s, v3.4s, v19.4s\n"
- "ldr q19, [x10, x26]\n"
- "fmla v27.4s, v5.4s, v16.4s\n"
- "ldr q16, [x27, x8]\n"
- "fmla v26.4s, v6.4s, v18.4s\n"
- "ldr q18, [x13, x8]\n"
- "fmla v25.4s, v7.4s, v17.4s\n"
- "fmla v22.4s, v5.4s, v17.4s\n"
- "fmla v24.4s, v6.4s, v17.4s\n"
- "fmla v21.4s, v5.4s, v19.4s\n"
- "fmla v20.4s, v6.4s, v16.4s\n"
- "fmla v26.4s, v8.4s, v17.4s\n"
- "fmla v22.4s, v7.4s, v16.4s\n"
- "ldr q17, [x27, x9]\n"
- "fmla v29.4s, v3.4s, v18.4s\n"
- "fmla v25.4s, v0.4s, v18.4s\n"
- "fmla v24.4s, v8.4s, v19.4s\n"
- "ldr q16, [x13, x9]\n"
- "fmla v20.4s, v8.4s, v17.4s\n"
- "add x13, x13, #0x10\n"
- "fmla v21.4s, v7.4s, v17.4s\n"
- "ldr q19, [x10, x9]\n"
- "fmla v28.4s, v4.4s, v18.4s\n"
- "fmla v26.4s, v1.4s, v18.4s\n"
- "ldr q17, [x10, x8]\n"
- "fmla v29.4s, v5.4s, v16.4s\n"
+ "fmla v24.4s, v7.4s, v16.4s\n"
+ "ldr q18, [x26, x12]\n"
+ "fmla v25.4s, v8.4s, v17.4s\n"
+ "ldr q16, [x10, x12]\n"
+ "fmla v27.4s, v1.4s, v19.4s\n"
+ "ldr q17, [x28, x7]\n"
"add x10, x10, #0x10\n"
- "fmla v27.4s, v4.4s, v16.4s\n"
- "fmla v25.4s, v2.4s, v16.4s\n"
- "fmla v24.4s, v1.4s, v16.4s\n"
- "ldr q16, [x16, x11]\n"
- "fmla v22.4s, v4.4s, v17.4s\n"
+ "fmla v21.4s, v8.4s, v18.4s\n"
+ "fmla v23.4s, v7.4s, v18.4s\n"
+ "ldr q19, [x28, x12]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v30.4s, v5.4s, v16.4s\n"
+ "fmla v28.4s, v4.4s, v16.4s\n"
+ "fmla v26.4s, v2.4s, v16.4s\n"
+ "fmla v25.4s, v1.4s, v16.4s\n"
+ "ldr q16, [x16, x13]\n"
+ "fmla v24.4s, v4.4s, v17.4s\n"
"add x16, x16, #0x10\n"
"ld1 { v10.4s }, [x16]\n"
- "fmla v20.4s, v3.4s, v17.4s\n"
- "fmla v21.4s, v4.4s, v19.4s\n"
+ "fmla v21.4s, v3.4s, v17.4s\n"
+ "fmla v27.4s, v7.4s, v17.4s\n"
+ "fmla v23.4s, v4.4s, v19.4s\n"
"ldr q4, [x14, #0x50]\n"
- "fmla v26.4s, v7.4s, v17.4s\n"
- "fmla v25.4s, v6.4s, v17.4s\n"
- "ld1 { v18.4s }, [x12]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v1.4s, v16.4s\n"
+ "fmla v26.4s, v6.4s, v17.4s\n"
+ "ld1 { v18.4s }, [x9]\n"
+ "fmla v29.4s, v2.4s, v16.4s\n"
+ "fmla v30.4s, v1.4s, v16.4s\n"
"ldr q1, [x14, #0x20]\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "fmla v27.4s, v0.4s, v16.4s\n"
- "ldr q17, [x12, x26]\n"
- "fmla v24.4s, v7.4s, v19.4s\n"
- "add x12, x12, #0x10\n"
- "ldr q9, [x12, x11]\n"
- "fmla v20.4s, v5.4s, v19.4s\n"
- "fmla v22.4s, v0.4s, v18.4s\n"
+ "fmla v28.4s, v0.4s, v16.4s\n"
+ "ldr q17, [x9, x11]\n"
+ "fmla v25.4s, v7.4s, v19.4s\n"
+ "add x9, x9, #0x10\n"
+ "ldr q9, [x9, x13]\n"
+ "fmla v21.4s, v5.4s, v19.4s\n"
+ "fmla v24.4s, v0.4s, v18.4s\n"
"ldr q0, [x14, #0x10]\n"
- "fmla v21.4s, v2.4s, v17.4s\n"
- "ldr q2, [x14, #0x30]\n"
- "fmla v25.4s, v8.4s, v19.4s\n"
- "ldr q16, [x27, x11]\n"
- "fmla v28.4s, v6.4s, v18.4s\n"
- "fmla v26.4s, v3.4s, v18.4s\n"
+ "fmla v26.4s, v8.4s, v19.4s\n"
+ "ldr q16, [x26, x13]\n"
+ "fmla v27.4s, v3.4s, v18.4s\n"
"ldr q3, [x14, #0x40]\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmla v27.4s, v8.4s, v17.4s\n"
- "fmla v24.4s, v5.4s, v17.4s\n"
- "ldr q11, [x16, x26]\n"
+ "fmla v23.4s, v2.4s, v17.4s\n"
+ "ldr q2, [x14, #0x30]\n"
+ "fmla v29.4s, v6.4s, v18.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmla v28.4s, v8.4s, v17.4s\n"
+ "fmla v25.4s, v5.4s, v17.4s\n"
+ "ldr q11, [x16, x11]\n"
"ldr q5, [x14, #0x60]\n"
- "fmla v22.4s, v8.4s, v16.4s\n"
+ "fmla v24.4s, v8.4s, v16.4s\n"
"ldr q8, [x14, #0x90]\n"
- "fmla v20.4s, v7.4s, v16.4s\n"
+ "fmla v21.4s, v7.4s, v16.4s\n"
"ldr q7, [x14, #0x80]\n"
- "fmla v21.4s, v6.4s, v16.4s\n"
- "ldr q13, [x13, x11]\n"
- "ldr q6, [x14, #0x70]\n"
"fmax v27.4s, v27.4s, v15.4s\n"
"fmax v26.4s, v26.4s, v15.4s\n"
+ "add x26, x26, #0x10\n"
+ "ld1 { v12.4s }, [x26]\n"
+ "fmla v23.4s, v6.4s, v16.4s\n"
+ "ldr q13, [x10, x13]\n"
+ "ldr q6, [x14, #0x70]\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
"fmax v25.4s, v25.4s, v15.4s\n"
- "add x27, x27, #0x10\n"
- "ld1 { v12.4s }, [x27]\n"
- "fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
"add x14, x14, #0xa0\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
"fmax v21.4s, v21.4s, v15.4s\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
"fmin v29.4s, v29.4s, v14.4s\n"
- "st1 { v28.4s }, [x15]\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
"fmin v27.4s, v27.4s, v14.4s\n"
"fmin v26.4s, v26.4s, v14.4s\n"
- "str q29, [x15, x17]\n"
"fmin v25.4s, v25.4s, v14.4s\n"
"fmin v24.4s, v24.4s, v14.4s\n"
- "str q27, [x15, x22]\n"
- "add x15, x15, #0x10\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "st1 { v26.4s }, [x28]\n"
+ "st1 { v29.4s }, [x15]\n"
"fmin v21.4s, v21.4s, v14.4s\n"
- "str q25, [x28, x17]\n"
- "str q24, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v22.4s }, [x25]\n"
- "str q20, [x25, x17]\n"
- "str q21, [x25, x22]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "str q30, [x15, x8]\n"
+ "str q28, [x15, x20]\n"
+ "add x15, x15, #0x10\n"
+ "st1 { v27.4s }, [x27]\n"
+ "str q26, [x27, x8]\n"
+ "str q25, [x27, x20]\n"
+ "add x27, x27, #0x10\n"
+ "st1 { v24.4s }, [x25]\n"
+ "str q21, [x25, x8]\n"
+ "str q23, [x25, x20]\n"
"add x25, x25, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v29.16b, v31.16b\n fmla v29.4s, v7.4s, v9.4s\n"
- "mov v28.16b, v31.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "mov v27.16b, v31.16b\n fmla v27.4s, v6.4s, v9.4s\n"
- "fmla v29.4s, v4.4s, v13.4s\n"
- "mov v26.16b, v31.16b\n fmla v26.4s, v5.4s, v9.4s\n"
- "mov v25.16b, v31.16b\n fmla v25.4s, v4.4s, v9.4s\n"
- "mov v24.16b, v31.16b\n fmla v24.4s, v3.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr q23, [x12, x9]\n"
- "fmla v27.4s, v2.4s, v11.4s\n"
- "ldr q18, [x12, x8]\n"
- "mov v22.16b, v31.16b\n fmla v22.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v6.4s, v18.4s\n"
- "mov v21.16b, v31.16b\n fmla v21.4s, v0.4s, v9.4s\n"
- "fmla v28.4s, v5.4s, v13.4s\n"
- "fmla v27.4s, v3.4s, v13.4s\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
- "fmla v25.4s, v1.4s, v13.4s\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "ldr q17, [x16, x8]\n"
- "fmla v22.4s, v6.4s, v12.4s\n"
- "ldr q16, [x27, x26]\n"
- "mov v20.16b, v31.16b\n fmla v20.4s, v1.4s, v9.4s\n"
- "fmla v29.4s, v0.4s, v17.4s\n"
- "fmla v21.4s, v8.4s, v16.4s\n"
- "ldr q16, [x16, x9]\n"
- "fmla v28.4s, v7.4s, v18.4s\n"
- "fmla v20.4s, v0.4s, v18.4s\n"
- "fmla v26.4s, v4.4s, v18.4s\n"
- "fmla v25.4s, v3.4s, v18.4s\n"
- "fmla v22.4s, v1.4s, v18.4s\n"
- "ld1 { v19.4s }, [x13]\n"
- "fmla v29.4s, v2.4s, v16.4s\n"
- "fmla v27.4s, v1.4s, v16.4s\n"
- "ld1 { v18.4s }, [x10]\n"
- "fmla v24.4s, v4.4s, v23.4s\n"
- "fmla v28.4s, v1.4s, v17.4s\n"
- "ldr q16, [x13, x26]\n"
- "fmla v20.4s, v2.4s, v23.4s\n"
- "fmla v21.4s, v1.4s, v23.4s\n"
- "fmla v29.4s, v8.4s, v23.4s\n"
- "fmla v27.4s, v7.4s, v23.4s\n"
- "fmla v25.4s, v5.4s, v23.4s\n"
- "ldr q17, [x10, x11]\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v7.4s, v9.4s\n"
+ "mov v29.16b, v31.16b\n fmla v29.4s, v8.4s, v9.4s\n"
+ "mov v28.16b, v31.16b\n fmla v28.4s, v6.4s, v9.4s\n"
+ "mov v27.16b, v31.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "mov v26.16b, v31.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v2.4s, v9.4s\n"
+ "mov v23.16b, v31.16b\n fmla v23.4s, v0.4s, v9.4s\n"
+ "fmla v30.4s, v4.4s, v13.4s\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
+ "ldr q22, [x9, x12]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q17, [x9, x7]\n"
+ "fmla v27.4s, v2.4s, v13.4s\n"
+ "fmla v26.4s, v1.4s, v13.4s\n"
+ "fmla v25.4s, v0.4s, v13.4s\n"
+ "fmla v24.4s, v6.4s, v12.4s\n"
+ "ldr q16, [x26, x11]\n"
+ "mov v21.16b, v31.16b\n fmla v21.4s, v1.4s, v9.4s\n"
+ "fmla v30.4s, v6.4s, v17.4s\n"
+ "fmla v29.4s, v5.4s, v13.4s\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "ldr q18, [x16, x7]\n"
+ "fmla v27.4s, v4.4s, v17.4s\n"
+ "fmla v23.4s, v8.4s, v16.4s\n"
+ "ldr q16, [x16, x12]\n"
+ "fmla v26.4s, v3.4s, v17.4s\n"
+ "fmla v21.4s, v0.4s, v17.4s\n"
+ "fmla v24.4s, v1.4s, v17.4s\n"
+ "fmla v30.4s, v0.4s, v18.4s\n"
+ "fmla v29.4s, v7.4s, v17.4s\n"
+ "ld1 { v20.4s }, [x10]\n"
+ "fmla v28.4s, v1.4s, v16.4s\n"
+ "fmla v25.4s, v4.4s, v22.4s\n"
+ "fmla v23.4s, v1.4s, v22.4s\n"
+ "fmla v26.4s, v5.4s, v22.4s\n"
+ "fmla v21.4s, v2.4s, v22.4s\n"
+ "fmla v27.4s, v0.4s, v20.4s\n"
+ "fmla v30.4s, v2.4s, v16.4s\n"
+ "ld1 { v17.4s }, [x28]\n"
+ "fmla v29.4s, v1.4s, v18.4s\n"
+ "ldr q16, [x10, x11]\n"
+ "fmla v28.4s, v7.4s, v22.4s\n"
+ "fmla v24.4s, v3.4s, v17.4s\n"
+ "fmla v25.4s, v2.4s, v16.4s\n"
+ "fmla v27.4s, v6.4s, v17.4s\n"
+ "ldr q19, [x10, x7]\n"
+ "fmla v30.4s, v8.4s, v22.4s\n"
+ "ldr q18, [x28, x13]\n"
+ "fmla v29.4s, v3.4s, v20.4s\n"
+ "ldr q17, [x28, x11]\n"
+ "fmla v28.4s, v5.4s, v16.4s\n"
+ "ldr q16, [x26, x7]\n"
+ "fmla v21.4s, v4.4s, v18.4s\n"
+ "fmla v23.4s, v3.4s, v18.4s\n"
+ "fmla v26.4s, v7.4s, v18.4s\n"
+ "fmla v24.4s, v5.4s, v18.4s\n"
+ "fmla v25.4s, v6.4s, v18.4s\n"
+ "fmla v27.4s, v8.4s, v18.4s\n"
+ "fmla v30.4s, v3.4s, v19.4s\n"
+ "fmla v21.4s, v6.4s, v16.4s\n"
+ "fmla v29.4s, v4.4s, v19.4s\n"
+ "fmla v23.4s, v5.4s, v17.4s\n"
"fmla v26.4s, v0.4s, v19.4s\n"
- "fmla v22.4s, v3.4s, v18.4s\n"
- "fmla v24.4s, v2.4s, v16.4s\n"
- "fmla v20.4s, v4.4s, v17.4s\n"
- "fmla v21.4s, v3.4s, v17.4s\n"
- "fmla v28.4s, v3.4s, v19.4s\n"
- "ldr q19, [x10, x26]\n"
- "fmla v27.4s, v5.4s, v16.4s\n"
- "ldr q16, [x27, x8]\n"
- "fmla v26.4s, v6.4s, v18.4s\n"
- "ldr q18, [x13, x8]\n"
- "fmla v25.4s, v7.4s, v17.4s\n"
- "fmla v22.4s, v5.4s, v17.4s\n"
- "fmla v24.4s, v6.4s, v17.4s\n"
- "fmla v21.4s, v5.4s, v19.4s\n"
- "fmla v20.4s, v6.4s, v16.4s\n"
- "fmla v26.4s, v8.4s, v17.4s\n"
- "fmla v22.4s, v7.4s, v16.4s\n"
- "ldr q17, [x27, x9]\n"
- "fmla v29.4s, v3.4s, v18.4s\n"
- "fmla v25.4s, v0.4s, v18.4s\n"
- "fmla v24.4s, v8.4s, v19.4s\n"
- "ldr q16, [x13, x9]\n"
- "fmla v20.4s, v8.4s, v17.4s\n"
- "add x13, x13, #0x10\n"
- "fmla v21.4s, v7.4s, v17.4s\n"
- "ldr q19, [x10, x9]\n"
- "fmla v28.4s, v4.4s, v18.4s\n"
- "fmla v26.4s, v1.4s, v18.4s\n"
- "ldr q17, [x10, x8]\n"
- "fmla v29.4s, v5.4s, v16.4s\n"
+ "fmla v24.4s, v7.4s, v16.4s\n"
+ "ldr q18, [x26, x12]\n"
+ "fmla v25.4s, v8.4s, v17.4s\n"
+ "ldr q16, [x10, x12]\n"
+ "fmla v27.4s, v1.4s, v19.4s\n"
+ "ldr q17, [x28, x7]\n"
"add x10, x10, #0x10\n"
- "fmla v27.4s, v4.4s, v16.4s\n"
- "fmla v25.4s, v2.4s, v16.4s\n"
- "fmla v24.4s, v1.4s, v16.4s\n"
- "ldr q16, [x16, x11]\n"
- "fmla v22.4s, v4.4s, v17.4s\n"
+ "fmla v21.4s, v8.4s, v18.4s\n"
+ "fmla v23.4s, v7.4s, v18.4s\n"
+ "ldr q19, [x28, x12]\n"
+ "add x28, x28, #0x10\n"
+ "fmla v30.4s, v5.4s, v16.4s\n"
+ "fmla v28.4s, v4.4s, v16.4s\n"
+ "fmla v26.4s, v2.4s, v16.4s\n"
+ "fmla v25.4s, v1.4s, v16.4s\n"
+ "ldr q16, [x16, x13]\n"
+ "fmla v24.4s, v4.4s, v17.4s\n"
"add x16, x16, #0x10\n"
- "fmla v20.4s, v3.4s, v17.4s\n"
- "fmla v21.4s, v4.4s, v19.4s\n"
- "fmla v26.4s, v7.4s, v17.4s\n"
- "fmla v25.4s, v6.4s, v17.4s\n"
- "ld1 { v18.4s }, [x12]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v1.4s, v16.4s\n"
- "fmax v29.4s, v29.4s, v15.4s\n"
- "fmla v27.4s, v0.4s, v16.4s\n"
- "ldr q17, [x12, x26]\n"
- "fmla v24.4s, v7.4s, v19.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "fmla v20.4s, v5.4s, v19.4s\n"
- "fmla v22.4s, v0.4s, v18.4s\n"
- "add x12, x12, #0x10\n"
- "fmla v21.4s, v2.4s, v17.4s\n"
- "fmla v25.4s, v8.4s, v19.4s\n"
- "ldr q16, [x27, x11]\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "fmla v28.4s, v6.4s, v18.4s\n"
- "fmla v26.4s, v3.4s, v18.4s\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "add x27, x27, #0x10\n"
- "fmla v27.4s, v8.4s, v17.4s\n"
- "fmla v24.4s, v5.4s, v17.4s\n"
+ "fmla v21.4s, v3.4s, v17.4s\n"
+ "fmla v27.4s, v7.4s, v17.4s\n"
+ "fmla v23.4s, v4.4s, v19.4s\n"
+ "fmla v26.4s, v6.4s, v17.4s\n"
+ "ld1 { v18.4s }, [x9]\n"
+ "fmla v29.4s, v2.4s, v16.4s\n"
+ "fmla v30.4s, v1.4s, v16.4s\n"
+ "fmla v28.4s, v0.4s, v16.4s\n"
+ "ldr q17, [x9, x11]\n"
+ "fmla v25.4s, v7.4s, v19.4s\n"
+ "add x9, x9, #0x10\n"
+ "fmla v21.4s, v5.4s, v19.4s\n"
+ "fmla v24.4s, v0.4s, v18.4s\n"
+ "fmla v26.4s, v8.4s, v19.4s\n"
+ "ldr q16, [x26, x13]\n"
+ "fmla v27.4s, v3.4s, v18.4s\n"
+ "add x26, x26, #0x10\n"
+ "fmla v23.4s, v2.4s, v17.4s\n"
+ "fmla v29.4s, v6.4s, v18.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmla v28.4s, v8.4s, v17.4s\n"
+ "fmla v25.4s, v5.4s, v17.4s\n"
+ "fmla v24.4s, v8.4s, v16.4s\n"
+ "fmla v21.4s, v7.4s, v16.4s\n"
"fmax v27.4s, v27.4s, v15.4s\n"
- "fmla v22.4s, v8.4s, v16.4s\n"
- "fmla v20.4s, v7.4s, v16.4s\n"
"fmax v26.4s, v26.4s, v15.4s\n"
- "fmla v21.4s, v6.4s, v16.4s\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmla v23.4s, v6.4s, v16.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
"fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "fmax v20.4s, v20.4s, v15.4s\n"
"fmax v21.4s, v21.4s, v15.4s\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
"fmin v28.4s, v28.4s, v14.4s\n"
- "st1 { v28.4s }, [x15]\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
"fmin v26.4s, v26.4s, v14.4s\n"
- "str q29, [x15, x17]\n"
+ "st1 { v27.4s }, [x27]\n"
"fmin v25.4s, v25.4s, v14.4s\n"
"fmin v24.4s, v24.4s, v14.4s\n"
- "str q27, [x15, x22]\n"
- "add x15, x15, #0x10\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "st1 { v26.4s }, [x28]\n"
"fmin v21.4s, v21.4s, v14.4s\n"
- "str q25, [x28, x17]\n"
- "str q24, [x28, x22]\n"
- "add x28, x28, #0x10\n"
- "st1 { v22.4s }, [x25]\n"
- "str q20, [x25, x17]\n"
- "str q21, [x25, x22]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "st1 { v29.4s }, [x15]\n"
+ "str q30, [x15, x8]\n"
+ "str q28, [x15, x20]\n"
+ "add x15, x15, #0x10\n"
+ "str q26, [x27, x8]\n"
+ "str q25, [x27, x20]\n"
+ "add x27, x27, #0x10\n"
+ "st1 { v24.4s }, [x25]\n"
+ "str q21, [x25, x8]\n"
+ "str q23, [x25, x20]\n"
"add x25, x25, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 49f\n"
"ldr q31, [x14, #0x0]\n"
"ldr q0, [x14, #0x10]\n"
- "add x24, x12, x11\n"
+ "add x24, x9, x13\n"
"add x23, x16, XZR\n"
"ldr q1, [x14, #0x20]\n"
"ldr q2, [x14, #0x30]\n"
- "add x22, x16, x26\n"
- "add x21, x27, XZR\n"
+ "add x22, x16, x11\n"
+ "add x21, x26, XZR\n"
"ldr q3, [x14, #0x40]\n"
"ldr q4, [x14, #0x50]\n"
- "add x20, x13, x11\n"
+ "add x20, x10, x13\n"
"ldr q5, [x14, #0x60]\n"
"ldr q6, [x14, #0x70]\n"
"ldr q7, [x14, #0x80]\n"
@@ -483,23 +483,23 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 4), (4, 0), (1, 2): Bit 1: End
"mov v23.16b, v31.16b\n fmla v23.4s, v8.4s, v9.4s\n"
"mov v25.16b, v31.16b\n fmla v25.4s, v6.4s, v9.4s\n"
- "add x20, x27, x26\n"
+ "add x20, x26, x11\n"
"mov v24.16b, v31.16b\n fmla v24.4s, v7.4s, v9.4s\n"
"mov v26.16b, v31.16b\n fmla v26.4s, v5.4s, v9.4s\n"
"mov v27.16b, v31.16b\n fmla v27.4s, v4.4s, v9.4s\n"
"mov v28.16b, v31.16b\n fmla v28.4s, v3.4s, v9.4s\n"
"mov v29.16b, v31.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v1.4s, v9.4s\n"
"fmla v23.4s, v0.4s, v10.4s\n"
"fmla v25.4s, v2.4s, v11.4s\n"
- "mov v30.16b, v31.16b\n fmla v30.4s, v1.4s, v9.4s\n"
"fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
"fmla v24.4s, v4.4s, v13.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
"fmla v26.4s, v2.4s, v13.4s\n"
"fmla v27.4s, v1.4s, v13.4s\n"
+ "fmla v29.4s, v6.4s, v12.4s\n"
"fmla v28.4s, v0.4s, v13.4s\n"
+ "fmla v23.4s, v5.4s, v13.4s\n"
+ "fmla v25.4s, v3.4s, v13.4s\n"
"tbz %x[n_channels], #1, 7f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
@@ -509,7 +509,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"ldr s12, [x20, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: End
"fmla v31.4s, v8.4s, v12.4s\n"
- "add x20, x12, x8\n"
+ "add x20, x9, x7\n"
"tbz %x[n_channels], #1, 9f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
@@ -520,7 +520,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"10:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
"fmla v23.4s, v7.4s, v11.4s\n"
"fmla v24.4s, v6.4s, v11.4s\n"
- "add x20, x16, x8\n"
+ "add x20, x16, x7\n"
"fmla v26.4s, v4.4s, v11.4s\n"
"fmla v27.4s, v3.4s, v11.4s\n"
"fmla v29.4s, v1.4s, v11.4s\n"
@@ -535,7 +535,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"12:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: End
"fmla v23.4s, v1.4s, v13.4s\n"
"fmla v24.4s, v0.4s, v13.4s\n"
- "add x20, x16, x9\n"
+ "add x20, x16, x12\n"
"tbz %x[n_channels], #1, 13f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
@@ -546,7 +546,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"14:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 1: End
"fmla v24.4s, v2.4s, v12.4s\n"
"fmla v25.4s, v1.4s, v12.4s\n"
- "add x20, x12, x9\n"
+ "add x20, x9, x12\n"
"tbz %x[n_channels], #1, 15f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
@@ -557,7 +557,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"16:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
"fmla v24.4s, v8.4s, v10.4s\n"
"fmla v25.4s, v7.4s, v10.4s\n"
- "add x20, x13, XZR\n"
+ "add x20, x10, XZR\n"
"fmla v27.4s, v5.4s, v10.4s\n"
"fmla v28.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v2.4s, v10.4s\n"
@@ -572,7 +572,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"18:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: End
"fmla v23.4s, v3.4s, v11.4s\n"
"fmla v26.4s, v0.4s, v11.4s\n"
- "add x20, x13, x26\n"
+ "add x20, x10, x11\n"
"tbz %x[n_channels], #1, 19f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
@@ -583,7 +583,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"20:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
"fmla v25.4s, v5.4s, v13.4s\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "add x20, x10, XZR\n"
+ "add x20, x28, XZR\n"
"tbz %x[n_channels], #1, 21f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
@@ -594,7 +594,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"22:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
"fmla v26.4s, v6.4s, v12.4s\n"
"fmla v29.4s, v3.4s, v12.4s\n"
- "add x20, x10, x11\n"
+ "add x20, x28, x13\n"
"tbz %x[n_channels], #1, 23f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
@@ -605,7 +605,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"24:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
"fmla v26.4s, v8.4s, v10.4s\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "add x20, x10, x26\n"
+ "add x20, x28, x11\n"
"fmla v28.4s, v6.4s, v10.4s\n"
"fmla v29.4s, v5.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v10.4s\n"
@@ -620,7 +620,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"26:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
"fmla v28.4s, v8.4s, v11.4s\n"
"fmla v31.4s, v5.4s, v11.4s\n"
- "add x20, x27, x8\n"
+ "add x20, x26, x7\n"
"tbz %x[n_channels], #1, 27f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
@@ -631,7 +631,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"28:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
"fmla v29.4s, v7.4s, v13.4s\n"
"fmla v30.4s, v6.4s, v13.4s\n"
- "add x20, x13, x8\n"
+ "add x20, x10, x7\n"
"tbz %x[n_channels], #1, 29f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
@@ -642,7 +642,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"30:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 1: End
"fmla v23.4s, v4.4s, v12.4s\n"
"fmla v24.4s, v3.4s, v12.4s\n"
- "add x20, x13, x9\n"
+ "add x20, x10, x12\n"
"fmla v26.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 31f\n"
@@ -655,7 +655,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"32:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
"fmla v24.4s, v5.4s, v11.4s\n"
"fmla v25.4s, v4.4s, v11.4s\n"
- "add x20, x27, x9\n"
+ "add x20, x26, x12\n"
"fmla v27.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 33f\n"
@@ -668,7 +668,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"34:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
"fmla v30.4s, v8.4s, v13.4s\n"
"fmla v31.4s, v7.4s, v13.4s\n"
- "add x20, x10, x8\n"
+ "add x20, x28, x7\n"
"tbz %x[n_channels], #1, 35f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 36f\n"
@@ -679,7 +679,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"36:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
"fmla v26.4s, v7.4s, v12.4s\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "add x20, x16, x11\n"
+ "add x20, x16, x13\n"
"fmla v29.4s, v4.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v12.4s\n"
"tbz %x[n_channels], #1, 37f\n"
@@ -692,7 +692,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"38:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: End
"fmla v23.4s, v2.4s, v11.4s\n"
"fmla v24.4s, v1.4s, v11.4s\n"
- "add x20, x10, x9\n"
+ "add x20, x28, x12\n"
"fmla v25.4s, v0.4s, v11.4s\n"
"tbz %x[n_channels], #1, 39f\n"
"ldr d13, [x20], #0x8\n"
@@ -704,7 +704,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"40:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
"fmla v27.4s, v8.4s, v13.4s\n"
"fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x12, XZR\n"
+ "add x20, x9, XZR\n"
"fmla v30.4s, v5.4s, v13.4s\n"
"fmla v31.4s, v4.4s, v13.4s\n"
"tbz %x[n_channels], #1, 41f\n"
@@ -717,7 +717,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"42:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
"fmla v23.4s, v6.4s, v12.4s\n"
"fmla v26.4s, v3.4s, v12.4s\n"
- "add x20, x12, x26\n"
+ "add x20, x9, x11\n"
"fmla v29.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 43f\n"
"ldr d11, [x20], #0x8\n"
@@ -729,7 +729,7 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"44:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
"fmla v25.4s, v8.4s, v11.4s\n"
"fmla v28.4s, v5.4s, v11.4s\n"
- "add x20, x27, x11\n"
+ "add x20, x26, x13\n"
"fmla v31.4s, v2.4s, v11.4s\n"
"tbz %x[n_channels], #1, 45f\n"
"ldr d13, [x20], #0x8\n"
@@ -762,63 +762,63 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"fmin v31.4s, v31.4s, v14.4s\n"
"tbz %x[n_channels], #1, 47f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.d }[0], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.d }[0], [x21], x17\n"
"add x15, x15, #0x8\n"
- "st1 { v29.d }[0], [x20], x17\n"
- "add x28, x28, #0x8\n"
+ "add x27, x27, #0x8\n"
"add x25, x25, #0x8\n"
- "st1 { v24.d }[0], [x22], x17\n"
- "st1 { v27.d }[0], [x21], x17\n"
- "st1 { v30.d }[0], [x20], x17\n"
+ "st1 { v23.d }[0], [x22], x8\n"
+ "st1 { v26.d }[0], [x21], x8\n"
+ "st1 { v29.d }[0], [x20], x8\n"
+ "st1 { v24.d }[0], [x22], x8\n"
+ "st1 { v27.d }[0], [x21], x8\n"
+ "st1 { v30.d }[0], [x20], x8\n"
"st1 { v25.d }[0], [x22]\n"
"st1 { v28.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #0, 48f\n"
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[2], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.s }[2], [x21], x17\n"
- "st1 { v29.s }[2], [x20], x17\n"
- "st1 { v24.s }[2], [x22], x17\n"
- "st1 { v27.s }[2], [x21], x17\n"
- "st1 { v30.s }[2], [x20], x17\n"
+ "st1 { v23.s }[2], [x22], x8\n"
+ "st1 { v24.s }[2], [x22], x8\n"
+ "st1 { v26.s }[2], [x21], x8\n"
+ "st1 { v29.s }[2], [x20], x8\n"
+ "st1 { v27.s }[2], [x21], x8\n"
+ "st1 { v30.s }[2], [x20], x8\n"
"st1 { v25.s }[2], [x22]\n"
"st1 { v28.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"b 48f\n"
"47:" // Tile loop: Oddments: Store: Bit 1: Unset
"mov x22, x15\n"
- "mov x21, x28\n"
- "st1 { v23.s }[0], [x22], x17\n"
+ "mov x21, x27\n"
"mov x20, x25\n"
- "st1 { v26.s }[0], [x21], x17\n"
- "st1 { v29.s }[0], [x20], x17\n"
- "st1 { v24.s }[0], [x22], x17\n"
- "st1 { v27.s }[0], [x21], x17\n"
- "st1 { v30.s }[0], [x20], x17\n"
+ "st1 { v23.s }[0], [x22], x8\n"
+ "st1 { v24.s }[0], [x22], x8\n"
+ "st1 { v26.s }[0], [x21], x8\n"
+ "st1 { v29.s }[0], [x20], x8\n"
+ "st1 { v27.s }[0], [x21], x8\n"
+ "st1 { v30.s }[0], [x20], x8\n"
"st1 { v25.s }[0], [x22]\n"
"st1 { v28.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"48:" // Tile loop: Oddments: Store: Bit 1: End
"49:" // Tile loop: End
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x23, x23, #0x1\n"
- "add x21, x24, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x23, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x24, x24, x21, LT\n"
- "csel x23, x23, XZR, LT\n"
- "cmp x24, x20\n"
+ "ldr x9, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x9, x9, #0x1\n"
+ "add x20, x10, #0x1\n"
+ "cmp x9, x22\n"
+ "csel x10, x10, x20, LT\n"
+ "csel x9, x9, XZR, LT\n"
+ "cmp x10, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 972f7eb535..5efd35135b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,9 +91,9 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"lsr x8, %x[n_channels], #0x2\n"
"ldr x17, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v15.4s }, [x20]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v15.4s }, [x21]\n"
"ld1r { v14.4s }, [x20]\n"
"add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
"mov x14, #0x0\n"
@@ -111,357 +111,357 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr q7, [x16, #0x80]\n"
"ldr q8, [x16, #0x90]\n"
"add x16, x16, #0xa0\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "ldr q9, [x21, x14]\n"
- "ldr q10, [x20, x14]\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q11, [x21, x14]\n"
- "ldr q12, [x20, x14]\n"
+ "ldp x24, x23, [x15, #0x0]\n"
+ "ldp x22, x21, [x15, #0x10]\n"
"ldr x20, [x15, #0x20]\n"
+ "ldr q9, [x24, x14]\n"
+ "ldr q10, [x23, x14]\n"
+ "ldr q11, [x22, x14]\n"
+ "ldr q12, [x21, x14]\n"
"ldr q13, [x20, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v29.16b, v31.16b\n fmla v29.4s, v8.4s, v9.4s\n"
- "mov v28.16b, v31.16b\n fmla v28.4s, v7.4s, v9.4s\n"
- "ldr x26, [x15, #0x30]\n"
- "ldr x23, [x15, #0x38]\n"
- "mov v27.16b, v31.16b\n fmla v27.4s, v6.4s, v9.4s\n"
- "fmla v29.4s, v0.4s, v10.4s\n"
- "ldr x22, [x15, #0x28]\n"
- "ldr x20, [x15, #0x48]\n"
- "ldr q19, [x20, x14]\n"
- "fmla v28.4s, v4.4s, v13.4s\n"
- "mov v26.16b, v31.16b\n fmla v26.4s, v5.4s, v9.4s\n"
- "ldr x21, [x15, #0x40]\n"
- "mov v25.16b, v31.16b\n fmla v25.4s, v4.4s, v9.4s\n"
- "mov v24.16b, v31.16b\n fmla v24.4s, v3.4s, v9.4s\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v8.4s, v9.4s\n"
+ "mov v29.16b, v31.16b\n fmla v29.4s, v7.4s, v9.4s\n"
+ "ldr x22, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
+ "mov v28.16b, v31.16b\n fmla v28.4s, v6.4s, v9.4s\n"
+ "mov v27.16b, v31.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "ldr x26, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "mov v26.16b, v31.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v3.4s, v9.4s\n"
+ "ldr x20, [x15, #0x40]\n"
"ldr x25, [x15, #0x50]\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v2.4s, v9.4s\n"
+ "mov v23.16b, v31.16b\n fmla v23.4s, v0.4s, v9.4s\n"
"ldr x24, [x15, #0x58]\n"
- "fmla v27.4s, v2.4s, v11.4s\n"
- "ldr q17, [x26, x14]\n"
- "mov v23.16b, v31.16b\n fmla v23.4s, v2.4s, v9.4s\n"
- "ldr x20, [x15, #0x60]\n"
- "fmla v29.4s, v5.4s, v13.4s\n"
- "fmla v28.4s, v6.4s, v17.4s\n"
- "ldr x12, [x15, #0x70]\n"
- "ldr x11, [x15, #0x88]\n"
- "mov v22.16b, v31.16b\n fmla v22.4s, v0.4s, v9.4s\n"
- "fmla v27.4s, v3.4s, v13.4s\n"
- "ldr x10, [x17, #0x0]\n"
+ "ldr x23, [x15, #0x60]\n"
+ "fmla v30.4s, v0.4s, v10.4s\n"
+ "ldr q22, [x21, x14]\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
+ "ldr x12, [x15, #0x88]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q17, [x22, x14]\n"
+ "fmla v27.4s, v2.4s, v13.4s\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v26.4s, v1.4s, v13.4s\n"
+ "fmla v25.4s, v0.4s, v13.4s\n"
+ "ldr x11, [x17, #0x0]\n"
"add x13, x13, #0x10\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
- "fmla v25.4s, v1.4s, v13.4s\n"
- "ldr x9, [x17, #0x8]\n"
- "ldr x28, [x17, #0x10]\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "ldr q18, [x23, x14]\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "ldr q16, [x22, x14]\n"
+ "fmla v24.4s, v6.4s, v12.4s\n"
+ "ldr q16, [x26, x14]\n"
"mov v21.16b, v31.16b\n fmla v21.4s, v1.4s, v9.4s\n"
"ldr q31, [x16, #0x0]\n"
- "fmla v29.4s, v7.4s, v17.4s\n"
- "ldr x23, [x15, #0x68]\n"
- "fmla v28.4s, v0.4s, v18.4s\n"
- "fmla v22.4s, v8.4s, v16.4s\n"
- "ldr q16, [x21, x14]\n"
- "ldr x22, [x15, #0x78]\n"
- "fmla v26.4s, v4.4s, v17.4s\n"
- "fmla v25.4s, v3.4s, v17.4s\n"
- "ldr x21, [x15, #0x80]\n"
- "ldr x27, [x17, #0x18]\n"
+ "fmla v30.4s, v5.4s, v13.4s\n"
+ "fmla v29.4s, v6.4s, v17.4s\n"
+ "ldr x21, [x15, #0x68]\n"
+ "ldr x10, [x17, #0x8]\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "ldr q18, [x27, x14]\n"
+ "fmla v27.4s, v4.4s, v17.4s\n"
+ "ldr x9, [x15, #0x78]\n"
+ "fmla v23.4s, v8.4s, v16.4s\n"
+ "ldr q16, [x20, x14]\n"
+ "fmla v26.4s, v3.4s, v17.4s\n"
+ "ldr x20, [x15, #0x80]\n"
"fmla v21.4s, v0.4s, v17.4s\n"
- "fmla v24.4s, v4.4s, v19.4s\n"
- "fmla v23.4s, v1.4s, v17.4s\n"
+ "fmla v25.4s, v4.4s, v22.4s\n"
+ "ldr x28, [x17, #0x10]\n"
+ "ldr x27, [x17, #0x18]\n"
+ "fmla v30.4s, v7.4s, v17.4s\n"
+ "fmla v29.4s, v0.4s, v18.4s\n"
+ "fmla v24.4s, v1.4s, v17.4s\n"
"ldr q17, [x25, x14]\n"
- "fmla v29.4s, v1.4s, v18.4s\n"
- "ldr q20, [x24, x14]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v27.4s, v1.4s, v16.4s\n"
- "ldr q16, [x20, x14]\n"
+ "fmla v28.4s, v1.4s, v16.4s\n"
"ldr x26, [x15, #0x90]\n"
- "fmla v25.4s, v5.4s, v19.4s\n"
- "fmla v21.4s, v2.4s, v19.4s\n"
- "ldr x25, [x15, #0xa0]\n"
- "ldr x20, [x15, #0x98]\n"
- "fmla v26.4s, v0.4s, v17.4s\n"
- "fmla v24.4s, v2.4s, v20.4s\n"
- "fmla v28.4s, v8.4s, v19.4s\n"
- "fmla v27.4s, v7.4s, v19.4s\n"
- "fmla v22.4s, v1.4s, v19.4s\n"
- "ldr q19, [x23, x14]\n"
- "fmla v23.4s, v3.4s, v16.4s\n"
- "ldr x24, [x15, #0xa8]\n"
- "fmla v26.4s, v6.4s, v16.4s\n"
- "ldr q18, [x21, x14]\n"
- "fmla v25.4s, v7.4s, v19.4s\n"
- "ldr x23, [x15, #0xc0]\n"
- "fmla v24.4s, v6.4s, v19.4s\n"
- "fmla v21.4s, v4.4s, v19.4s\n"
- "fmla v29.4s, v3.4s, v17.4s\n"
- "ldr q17, [x12, x14]\n"
- "fmla v27.4s, v5.4s, v20.4s\n"
- "ldr q16, [x22, x14]\n"
- "fmla v23.4s, v5.4s, v19.4s\n"
- "fmla v22.4s, v3.4s, v19.4s\n"
- "ldr x22, [x15, #0xb0]\n"
- "ldr x21, [x15, #0xb8]\n"
- "fmla v26.4s, v8.4s, v19.4s\n"
- "fmla v24.4s, v8.4s, v17.4s\n"
- "fmla v21.4s, v6.4s, v16.4s\n"
- "fmla v28.4s, v3.4s, v18.4s\n"
- "fmla v25.4s, v0.4s, v18.4s\n"
- "fmla v22.4s, v5.4s, v17.4s\n"
- "ldr q17, [x11, x14]\n"
- "fmla v23.4s, v7.4s, v16.4s\n"
- "ldr q16, [x26, x14]\n"
- "fmla v29.4s, v4.4s, v18.4s\n"
- "fmla v26.4s, v1.4s, v18.4s\n"
+ "fmla v26.4s, v5.4s, v22.4s\n"
+ "fmla v23.4s, v1.4s, v22.4s\n"
+ "fmla v21.4s, v2.4s, v22.4s\n"
+ "fmla v30.4s, v1.4s, v18.4s\n"
+ "ldr q20, [x24, x14]\n"
+ "ldr x25, [x15, #0x98]\n"
+ "fmla v29.4s, v2.4s, v16.4s\n"
+ "ldr q16, [x23, x14]\n"
+ "fmla v27.4s, v0.4s, v17.4s\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v28.4s, v7.4s, v22.4s\n"
+ "fmla v25.4s, v2.4s, v20.4s\n"
+ "fmla v24.4s, v3.4s, v16.4s\n"
+ "fmla v30.4s, v3.4s, v17.4s\n"
+ "ldr q19, [x22, x14]\n"
+ "ldr x23, [x15, #0xb0]\n"
+ "fmla v29.4s, v8.4s, v22.4s\n"
+ "ldr q17, [x21, x14]\n"
+ "ldr x22, [x15, #0xa8]\n"
+ "fmla v27.4s, v6.4s, v16.4s\n"
"ldr q18, [x20, x14]\n"
- "fmla v28.4s, v5.4s, v17.4s\n"
- "fmla v27.4s, v4.4s, v17.4s\n"
- "fmla v25.4s, v2.4s, v17.4s\n"
- "fmla v24.4s, v1.4s, v17.4s\n"
- "ldr q17, [x25, x14]\n"
- "fmla v21.4s, v8.4s, v16.4s\n"
- "ldr x20, [x15, #0x20]\n"
- "fmla v22.4s, v7.4s, v16.4s\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v28.4s, v5.4s, v20.4s\n"
+ "ldr q16, [x9, x14]\n"
+ "ldr x20, [x15, #0xb8]\n"
+ "fmla v26.4s, v7.4s, v17.4s\n"
+ "fmla v25.4s, v6.4s, v17.4s\n"
+ "fmla v21.4s, v4.4s, v17.4s\n"
+ "fmla v24.4s, v5.4s, v17.4s\n"
+ "fmla v23.4s, v3.4s, v17.4s\n"
+ "fmla v27.4s, v8.4s, v17.4s\n"
+ "fmla v29.4s, v3.4s, v18.4s\n"
+ "fmla v30.4s, v4.4s, v18.4s\n"
+ "fmla v25.4s, v8.4s, v19.4s\n"
+ "fmla v26.4s, v0.4s, v18.4s\n"
+ "fmla v21.4s, v6.4s, v16.4s\n"
+ "fmla v24.4s, v7.4s, v16.4s\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v23.4s, v5.4s, v19.4s\n"
+ "ldr q16, [x12, x14]\n"
+ "fmla v27.4s, v1.4s, v18.4s\n"
+ "ldr q19, [x25, x14]\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "fmla v28.4s, v4.4s, v16.4s\n"
+ "fmla v26.4s, v2.4s, v16.4s\n"
+ "fmla v25.4s, v1.4s, v16.4s\n"
"ldr q16, [x24, x14]\n"
- "fmla v29.4s, v2.4s, v17.4s\n"
- "fmla v26.4s, v7.4s, v18.4s\n"
- "fmla v25.4s, v6.4s, v18.4s\n"
- "fmla v23.4s, v4.4s, v18.4s\n"
- "fmla v21.4s, v3.4s, v18.4s\n"
+ "ldr x24, [x15, #0x20]\n"
+ "fmla v21.4s, v8.4s, v17.4s\n"
+ "fmla v24.4s, v4.4s, v19.4s\n"
+ "fmla v23.4s, v7.4s, v17.4s\n"
"ldr q18, [x22, x14]\n"
- "fmla v22.4s, v4.4s, v16.4s\n"
- "ldr q4, [x16, #0x50]\n"
- "fmla v28.4s, v1.4s, v17.4s\n"
+ "fmla v27.4s, v7.4s, v19.4s\n"
+ "fmla v30.4s, v2.4s, v16.4s\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
"ldr q1, [x16, #0x20]\n"
- "fmla v27.4s, v0.4s, v17.4s\n"
- "ldr q17, [x21, x14]\n"
- "fmla v29.4s, v6.4s, v18.4s\n"
+ "fmla v26.4s, v6.4s, v19.4s\n"
+ "fmla v28.4s, v0.4s, v16.4s\n"
+ "ldr q17, [x20, x14]\n"
+ "fmla v21.4s, v3.4s, v19.4s\n"
+ "ldr q16, [x23, x14]\n"
+ "fmla v25.4s, v7.4s, v18.4s\n"
+ "fmla v23.4s, v4.4s, v18.4s\n"
+ "ldr q4, [x16, #0x50]\n"
"fmax v29.4s, v29.4s, v15.4s\n"
- "fmla v24.4s, v7.4s, v16.4s\n"
- "fmla v21.4s, v5.4s, v16.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "str q29, [x10, x13]\n"
- "fmla v23.4s, v0.4s, v18.4s\n"
+ "fmla v30.4s, v6.4s, v16.4s\n"
+ "fmla v24.4s, v0.4s, v16.4s\n"
"ldr q0, [x16, #0x10]\n"
- "fmla v22.4s, v2.4s, v17.4s\n"
- "ldr q2, [x16, #0x30]\n"
- "fmla v25.4s, v8.4s, v16.4s\n"
- "ldr q16, [x23, x14]\n"
- "fmla v26.4s, v3.4s, v18.4s\n"
+ "fmla v26.4s, v8.4s, v18.4s\n"
+ "fmla v27.4s, v3.4s, v16.4s\n"
"ldr q3, [x16, #0x40]\n"
- "fmla v27.4s, v8.4s, v17.4s\n"
- "fmla v24.4s, v5.4s, v17.4s\n"
+ "fmla v28.4s, v8.4s, v17.4s\n"
+ "fmla v21.4s, v5.4s, v18.4s\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v25.4s, v5.4s, v17.4s\n"
"ldr q5, [x16, #0x60]\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmla v23.4s, v8.4s, v16.4s\n"
+ "fmla v23.4s, v2.4s, v17.4s\n"
+ "ldr q2, [x16, #0x30]\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "ldp x23, x22, [x15, #0x0]\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "ldp x21, x20, [x15, #0x10]\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "add x14, x14, #0x10\n"
+ "fmla v24.4s, v8.4s, v16.4s\n"
"ldr q8, [x16, #0x90]\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "ldr q9, [x23, x7]\n"
+ "ldr q10, [x22, x7]\n"
"fmla v21.4s, v7.4s, v16.4s\n"
"ldr q7, [x16, #0x80]\n"
- "fmla v22.4s, v6.4s, v16.4s\n"
- "ldr q13, [x20, x7]\n"
- "ldr q6, [x16, #0x70]\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "ldr q11, [x21, x7]\n"
+ "ldr q12, [x20, x7]\n"
+ "fmla v23.4s, v6.4s, v16.4s\n"
"fmax v27.4s, v27.4s, v15.4s\n"
- "fmax v26.4s, v26.4s, v15.4s\n"
- "fmax v25.4s, v25.4s, v15.4s\n"
- "ldr x24, [x17, #0x20]\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "ldr q9, [x21, x7]\n"
- "ldr q10, [x20, x7]\n"
+ "ldr q13, [x24, x7]\n"
+ "ldr q6, [x16, #0x70]\n"
"fmin v28.4s, v28.4s, v14.4s\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q11, [x21, x7]\n"
"fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "ldr q12, [x20, x7]\n"
+ "str q30, [x11, x13]\n"
+ "ldr x20, [x17, #0x20]\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
"fmax v24.4s, v24.4s, v15.4s\n"
- "fmax v23.4s, v23.4s, v15.4s\n"
- "str q28, [x9, x13]\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
"fmax v21.4s, v21.4s, v15.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "str q27, [x28, x13]\n"
+ "str q29, [x10, x13]\n"
"ldr x23, [x17, #0x28]\n"
- "str q26, [x27, x13]\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "str q28, [x28, x13]\n"
"ldr x22, [x17, #0x30]\n"
- "ldr x21, [x17, #0x38]\n"
"add x7, x7, #0x10\n"
- "str q25, [x24, x13]\n"
- "ldr x20, [x17, #0x40]\n"
+ "str q26, [x20, x13]\n"
+ "ldr x21, [x17, #0x40]\n"
"cmp x7, x8, LSL #4\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "str q27, [x27, x13]\n"
+ "ldr x20, [x17, #0x38]\n"
"fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
"fmin v21.4s, v21.4s, v14.4s\n"
- "add x14, x14, #0x10\n"
- "str q24, [x23, x13]\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "str q23, [x22, x13]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
"add x16, x16, #0xa0\n"
- "str q21, [x21, x13]\n"
- "str q22, [x20, x13]\n"
+ "str q25, [x23, x13]\n"
+ "str q24, [x22, x13]\n"
+ "str q21, [x20, x13]\n"
+ "str q23, [x21, x13]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v29.16b, v31.16b\n fmla v29.4s, v8.4s, v9.4s\n"
- "mov v28.16b, v31.16b\n fmla v28.4s, v7.4s, v9.4s\n"
- "ldr x23, [x15, #0x30]\n"
- "ldr x22, [x15, #0x38]\n"
- "mov v27.16b, v31.16b\n fmla v27.4s, v6.4s, v9.4s\n"
- "fmla v29.4s, v0.4s, v10.4s\n"
- "ldr x21, [x15, #0x28]\n"
- "ldr x20, [x15, #0x48]\n"
- "ldr q19, [x20, x14]\n"
- "fmla v28.4s, v4.4s, v13.4s\n"
- "mov v26.16b, v31.16b\n fmla v26.4s, v5.4s, v9.4s\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v8.4s, v9.4s\n"
+ "mov v29.16b, v31.16b\n fmla v29.4s, v7.4s, v9.4s\n"
+ "ldr x22, [x15, #0x30]\n"
+ "ldr x27, [x15, #0x38]\n"
+ "mov v28.16b, v31.16b\n fmla v28.4s, v6.4s, v9.4s\n"
+ "mov v27.16b, v31.16b\n fmla v27.4s, v5.4s, v9.4s\n"
+ "ldr x26, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "mov v26.16b, v31.16b\n fmla v26.4s, v4.4s, v9.4s\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v3.4s, v9.4s\n"
"ldr x20, [x15, #0x40]\n"
- "mov v25.16b, v31.16b\n fmla v25.4s, v4.4s, v9.4s\n"
- "mov v24.16b, v31.16b\n fmla v24.4s, v3.4s, v9.4s\n"
"ldr x25, [x15, #0x50]\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v2.4s, v9.4s\n"
+ "mov v23.16b, v31.16b\n fmla v23.4s, v0.4s, v9.4s\n"
"ldr x24, [x15, #0x58]\n"
- "fmla v27.4s, v2.4s, v11.4s\n"
- "ldr q17, [x23, x14]\n"
- "mov v23.16b, v31.16b\n fmla v23.4s, v2.4s, v9.4s\n"
"ldr x23, [x15, #0x60]\n"
- "fmla v29.4s, v5.4s, v13.4s\n"
- "fmla v28.4s, v6.4s, v17.4s\n"
- "ldr x12, [x15, #0x70]\n"
- "ldr x11, [x15, #0x88]\n"
- "mov v22.16b, v31.16b\n fmla v22.4s, v0.4s, v9.4s\n"
- "fmla v27.4s, v3.4s, v13.4s\n"
- "ldr x10, [x17, #0x0]\n"
+ "fmla v30.4s, v0.4s, v10.4s\n"
+ "ldr q22, [x21, x14]\n"
+ "fmla v29.4s, v4.4s, v13.4s\n"
+ "ldr x12, [x15, #0x88]\n"
+ "fmla v28.4s, v2.4s, v11.4s\n"
+ "ldr q17, [x22, x14]\n"
+ "fmla v27.4s, v2.4s, v13.4s\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v26.4s, v1.4s, v13.4s\n"
+ "fmla v25.4s, v0.4s, v13.4s\n"
+ "ldr x11, [x17, #0x0]\n"
"add x13, x13, #0x10\n"
- "fmla v26.4s, v2.4s, v13.4s\n"
- "fmla v25.4s, v1.4s, v13.4s\n"
- "ldr x9, [x17, #0x8]\n"
- "ldr x28, [x17, #0x10]\n"
- "fmla v24.4s, v0.4s, v13.4s\n"
- "ldr q18, [x22, x14]\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "ldr q16, [x21, x14]\n"
+ "fmla v24.4s, v6.4s, v12.4s\n"
+ "ldr q16, [x26, x14]\n"
"mov v21.16b, v31.16b\n fmla v21.4s, v1.4s, v9.4s\n"
- "fmla v29.4s, v7.4s, v17.4s\n"
- "ldr x22, [x15, #0x68]\n"
- "ldr x21, [x15, #0x78]\n"
- "fmla v28.4s, v0.4s, v18.4s\n"
- "fmla v22.4s, v8.4s, v16.4s\n"
+ "ldr x21, [x15, #0x68]\n"
+ "fmla v30.4s, v5.4s, v13.4s\n"
+ "fmla v29.4s, v6.4s, v17.4s\n"
+ "ldr x10, [x17, #0x8]\n"
+ "ldr x9, [x17, #0x10]\n"
+ "fmla v28.4s, v3.4s, v13.4s\n"
+ "ldr q18, [x27, x14]\n"
+ "fmla v27.4s, v4.4s, v17.4s\n"
+ "ldr x28, [x15, #0x78]\n"
+ "fmla v23.4s, v8.4s, v16.4s\n"
"ldr q16, [x20, x14]\n"
+ "fmla v26.4s, v3.4s, v17.4s\n"
"ldr x20, [x15, #0x80]\n"
- "fmla v26.4s, v4.4s, v17.4s\n"
- "fmla v25.4s, v3.4s, v17.4s\n"
- "ldr x27, [x17, #0x18]\n"
"fmla v21.4s, v0.4s, v17.4s\n"
- "fmla v24.4s, v4.4s, v19.4s\n"
- "fmla v23.4s, v1.4s, v17.4s\n"
+ "fmla v25.4s, v4.4s, v22.4s\n"
+ "ldr x27, [x17, #0x18]\n"
+ "fmla v30.4s, v7.4s, v17.4s\n"
+ "fmla v29.4s, v0.4s, v18.4s\n"
+ "fmla v24.4s, v1.4s, v17.4s\n"
"ldr q17, [x25, x14]\n"
- "fmla v29.4s, v1.4s, v18.4s\n"
+ "fmla v28.4s, v1.4s, v16.4s\n"
+ "ldr x26, [x15, #0x90]\n"
+ "fmla v26.4s, v5.4s, v22.4s\n"
+ "fmla v23.4s, v1.4s, v22.4s\n"
+ "fmla v21.4s, v2.4s, v22.4s\n"
+ "fmla v30.4s, v1.4s, v18.4s\n"
"ldr q20, [x24, x14]\n"
- "fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v27.4s, v1.4s, v16.4s\n"
+ "ldr x25, [x15, #0x98]\n"
+ "fmla v29.4s, v2.4s, v16.4s\n"
"ldr q16, [x23, x14]\n"
- "ldr x26, [x15, #0x90]\n"
- "fmla v25.4s, v5.4s, v19.4s\n"
- "fmla v21.4s, v2.4s, v19.4s\n"
- "ldr x25, [x15, #0xa0]\n"
- "ldr x24, [x15, #0x98]\n"
- "fmla v26.4s, v0.4s, v17.4s\n"
- "fmla v24.4s, v2.4s, v20.4s\n"
- "fmla v28.4s, v8.4s, v19.4s\n"
- "fmla v27.4s, v7.4s, v19.4s\n"
- "fmla v22.4s, v1.4s, v19.4s\n"
+ "fmla v27.4s, v0.4s, v17.4s\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v28.4s, v7.4s, v22.4s\n"
+ "fmla v25.4s, v2.4s, v20.4s\n"
+ "fmla v24.4s, v3.4s, v16.4s\n"
+ "fmla v30.4s, v3.4s, v17.4s\n"
"ldr q19, [x22, x14]\n"
- "fmla v23.4s, v3.4s, v16.4s\n"
- "ldr x23, [x15, #0xa8]\n"
- "fmla v26.4s, v6.4s, v16.4s\n"
+ "ldr x23, [x15, #0xb0]\n"
+ "fmla v29.4s, v8.4s, v22.4s\n"
+ "ldr q17, [x21, x14]\n"
+ "ldr x22, [x15, #0xa8]\n"
+ "fmla v27.4s, v6.4s, v16.4s\n"
"ldr q18, [x20, x14]\n"
- "fmla v25.4s, v7.4s, v19.4s\n"
- "ldr x22, [x15, #0xc0]\n"
- "fmla v24.4s, v6.4s, v19.4s\n"
- "fmla v21.4s, v4.4s, v19.4s\n"
- "fmla v29.4s, v3.4s, v17.4s\n"
- "ldr q17, [x12, x14]\n"
- "fmla v27.4s, v5.4s, v20.4s\n"
- "ldr q16, [x21, x14]\n"
- "fmla v23.4s, v5.4s, v19.4s\n"
- "fmla v22.4s, v3.4s, v19.4s\n"
- "ldr x21, [x15, #0xb0]\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v28.4s, v5.4s, v20.4s\n"
+ "ldr q16, [x28, x14]\n"
"ldr x20, [x15, #0xb8]\n"
- "fmla v26.4s, v8.4s, v19.4s\n"
- "fmla v24.4s, v8.4s, v17.4s\n"
+ "fmla v26.4s, v7.4s, v17.4s\n"
+ "fmla v25.4s, v6.4s, v17.4s\n"
+ "fmla v21.4s, v4.4s, v17.4s\n"
+ "fmla v24.4s, v5.4s, v17.4s\n"
+ "fmla v23.4s, v3.4s, v17.4s\n"
+ "fmla v27.4s, v8.4s, v17.4s\n"
+ "fmla v29.4s, v3.4s, v18.4s\n"
+ "fmla v30.4s, v4.4s, v18.4s\n"
+ "fmla v25.4s, v8.4s, v19.4s\n"
+ "fmla v26.4s, v0.4s, v18.4s\n"
"fmla v21.4s, v6.4s, v16.4s\n"
- "fmla v28.4s, v3.4s, v18.4s\n"
- "fmla v25.4s, v0.4s, v18.4s\n"
- "fmla v22.4s, v5.4s, v17.4s\n"
- "ldr q17, [x11, x14]\n"
- "fmla v23.4s, v7.4s, v16.4s\n"
- "ldr q16, [x26, x14]\n"
- "fmla v29.4s, v4.4s, v18.4s\n"
- "fmla v26.4s, v1.4s, v18.4s\n"
- "ldr q18, [x24, x14]\n"
- "fmla v28.4s, v5.4s, v17.4s\n"
- "fmla v27.4s, v4.4s, v17.4s\n"
- "fmla v25.4s, v2.4s, v17.4s\n"
- "fmla v24.4s, v1.4s, v17.4s\n"
- "ldr q17, [x25, x14]\n"
- "fmla v21.4s, v8.4s, v16.4s\n"
- "fmla v22.4s, v7.4s, v16.4s\n"
+ "fmla v24.4s, v7.4s, v16.4s\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v23.4s, v5.4s, v19.4s\n"
+ "ldr q16, [x12, x14]\n"
+ "fmla v27.4s, v1.4s, v18.4s\n"
+ "ldr q19, [x25, x14]\n"
+ "fmla v29.4s, v5.4s, v16.4s\n"
+ "fmla v28.4s, v4.4s, v16.4s\n"
+ "fmla v26.4s, v2.4s, v16.4s\n"
+ "fmla v25.4s, v1.4s, v16.4s\n"
+ "ldr q16, [x24, x14]\n"
+ "fmla v21.4s, v8.4s, v17.4s\n"
+ "fmla v24.4s, v4.4s, v19.4s\n"
+ "fmla v23.4s, v7.4s, v17.4s\n"
+ "ldr q18, [x22, x14]\n"
+ "fmla v27.4s, v7.4s, v19.4s\n"
+ "fmla v30.4s, v2.4s, v16.4s\n"
+ "fmla v29.4s, v1.4s, v16.4s\n"
+ "fmla v26.4s, v6.4s, v19.4s\n"
+ "fmla v28.4s, v0.4s, v16.4s\n"
+ "ldr q17, [x20, x14]\n"
+ "fmla v21.4s, v3.4s, v19.4s\n"
"ldr q16, [x23, x14]\n"
- "fmla v29.4s, v2.4s, v17.4s\n"
- "fmla v26.4s, v7.4s, v18.4s\n"
- "fmla v25.4s, v6.4s, v18.4s\n"
+ "fmla v25.4s, v7.4s, v18.4s\n"
"fmla v23.4s, v4.4s, v18.4s\n"
- "fmla v21.4s, v3.4s, v18.4s\n"
- "ldr q18, [x21, x14]\n"
- "fmla v22.4s, v4.4s, v16.4s\n"
- "fmla v28.4s, v1.4s, v17.4s\n"
- "fmax v28.4s, v28.4s, v15.4s\n"
- "fmla v27.4s, v0.4s, v17.4s\n"
- "ldr q17, [x20, x14]\n"
- "fmla v29.4s, v6.4s, v18.4s\n"
"fmax v29.4s, v29.4s, v15.4s\n"
- "fmla v24.4s, v7.4s, v16.4s\n"
- "fmla v21.4s, v5.4s, v16.4s\n"
+ "fmla v30.4s, v6.4s, v16.4s\n"
+ "fmla v24.4s, v0.4s, v16.4s\n"
+ "fmla v26.4s, v8.4s, v18.4s\n"
+ "fmla v27.4s, v3.4s, v16.4s\n"
+ "fmla v28.4s, v8.4s, v17.4s\n"
+ "fmla v21.4s, v5.4s, v18.4s\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v25.4s, v5.4s, v17.4s\n"
"fmin v29.4s, v29.4s, v14.4s\n"
- "str q29, [x10, x13]\n"
- "fmla v23.4s, v0.4s, v18.4s\n"
- "fmla v22.4s, v2.4s, v17.4s\n"
- "ldr x20, [x17, #0x20]\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "fmla v25.4s, v8.4s, v16.4s\n"
- "ldr q16, [x22, x14]\n"
- "fmla v26.4s, v3.4s, v18.4s\n"
+ "fmla v23.4s, v2.4s, v17.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "add x14, x14, #0x10\n"
"fmax v26.4s, v26.4s, v15.4s\n"
- "fmla v27.4s, v8.4s, v17.4s\n"
- "fmla v24.4s, v5.4s, v17.4s\n"
+ "fmla v24.4s, v8.4s, v16.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
"fmax v27.4s, v27.4s, v15.4s\n"
- "str q28, [x9, x13]\n"
- "fmla v23.4s, v8.4s, v16.4s\n"
+ "str q29, [x10, x13]\n"
+ "ldr x23, [x17, #0x28]\n"
"fmla v21.4s, v7.4s, v16.4s\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmla v23.4s, v6.4s, v16.4s\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
"fmax v25.4s, v25.4s, v15.4s\n"
- "ldr x23, [x17, #0x28]\n"
- "fmla v22.4s, v6.4s, v16.4s\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
"fmin v27.4s, v27.4s, v14.4s\n"
- "str q27, [x28, x13]\n"
- "ldr x22, [x17, #0x30]\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "str q26, [x27, x13]\n"
- "ldr x21, [x17, #0x38]\n"
+ "str q30, [x11, x13]\n"
+ "ldr x20, [x17, #0x20]\n"
"fmax v24.4s, v24.4s, v15.4s\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
"fmax v23.4s, v23.4s, v15.4s\n"
- "str q25, [x20, x13]\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "str q28, [x9, x13]\n"
+ "ldr x22, [x17, #0x30]\n"
+ "str q27, [x27, x13]\n"
+ "ldr x21, [x17, #0x38]\n"
+ "str q26, [x20, x13]\n"
"ldr x20, [x17, #0x40]\n"
- "fmax v21.4s, v21.4s, v15.4s\n"
- "fmax v22.4s, v22.4s, v15.4s\n"
- "add x14, x14, #0x10\n"
"fmin v24.4s, v24.4s, v14.4s\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "str q24, [x23, x13]\n"
"fmin v21.4s, v21.4s, v14.4s\n"
- "fmin v22.4s, v22.4s, v14.4s\n"
- "str q23, [x22, x13]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
+ "str q25, [x23, x13]\n"
+ "str q24, [x22, x13]\n"
"str q21, [x21, x13]\n"
- "str q22, [x20, x13]\n"
+ "str q23, [x20, x13]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 48f\n"
@@ -478,13 +478,13 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr q8, [x16, #0x90]\n"
"ldr x24, [x15, #0x0]\n"
"ldr x23, [x15, #0x8]\n"
- "add x24, x24, x14\n"
- "add x23, x23, x14\n"
"ldr x22, [x15, #0x10]\n"
"ldr x21, [x15, #0x18]\n"
+ "ldr x20, [x15, #0x20]\n"
+ "add x24, x24, x14\n"
+ "add x23, x23, x14\n"
"add x22, x22, x14\n"
"add x21, x21, x14\n"
- "ldr x20, [x15, #0x20]\n"
"add x20, x20, x14\n"
"tbz %x[n_channels], #1, 4f\n"
"ld1 { v9.d }[0], [x24], #0x8\n"
@@ -509,23 +509,23 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"mov v23.16b, v31.16b\n fmla v23.4s, v8.4s, v9.4s\n"
"mov v25.16b, v31.16b\n fmla v25.4s, v6.4s, v9.4s\n"
"ldr x20, [x15, #0x28]\n"
- "add x20, x20, x14\n"
"mov v24.16b, v31.16b\n fmla v24.4s, v7.4s, v9.4s\n"
"mov v26.16b, v31.16b\n fmla v26.4s, v5.4s, v9.4s\n"
"mov v27.16b, v31.16b\n fmla v27.4s, v4.4s, v9.4s\n"
"mov v28.16b, v31.16b\n fmla v28.4s, v3.4s, v9.4s\n"
"mov v29.16b, v31.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v1.4s, v9.4s\n"
+ "add x20, x20, x14\n"
"fmla v23.4s, v0.4s, v10.4s\n"
"fmla v25.4s, v2.4s, v11.4s\n"
- "mov v30.16b, v31.16b\n fmla v30.4s, v1.4s, v9.4s\n"
"fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "fmla v23.4s, v5.4s, v13.4s\n"
"fmla v24.4s, v4.4s, v13.4s\n"
- "fmla v25.4s, v3.4s, v13.4s\n"
"fmla v26.4s, v2.4s, v13.4s\n"
"fmla v27.4s, v1.4s, v13.4s\n"
+ "fmla v29.4s, v6.4s, v12.4s\n"
"fmla v28.4s, v0.4s, v13.4s\n"
+ "fmla v23.4s, v5.4s, v13.4s\n"
+ "fmla v25.4s, v3.4s, v13.4s\n"
"tbz %x[n_channels], #1, 6f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
@@ -548,11 +548,11 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x38]\n"
"fmla v23.4s, v7.4s, v11.4s\n"
"fmla v24.4s, v6.4s, v11.4s\n"
- "add x20, x20, x14\n"
"fmla v26.4s, v4.4s, v11.4s\n"
"fmla v27.4s, v3.4s, v11.4s\n"
"fmla v29.4s, v1.4s, v11.4s\n"
"fmla v30.4s, v0.4s, v11.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 10f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
@@ -588,11 +588,11 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x50]\n"
"fmla v24.4s, v8.4s, v10.4s\n"
"fmla v25.4s, v7.4s, v10.4s\n"
- "add x20, x20, x14\n"
"fmla v27.4s, v5.4s, v10.4s\n"
"fmla v28.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v2.4s, v10.4s\n"
"fmla v31.4s, v1.4s, v10.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 16f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
@@ -640,11 +640,11 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x70]\n"
"fmla v26.4s, v8.4s, v10.4s\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "add x20, x20, x14\n"
"fmla v28.4s, v6.4s, v10.4s\n"
"fmla v29.4s, v5.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v10.4s\n"
"fmla v31.4s, v3.4s, v10.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 24f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
@@ -680,9 +680,9 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x88]\n"
"fmla v23.4s, v4.4s, v12.4s\n"
"fmla v24.4s, v3.4s, v12.4s\n"
- "add x20, x20, x14\n"
"fmla v26.4s, v1.4s, v12.4s\n"
"fmla v27.4s, v0.4s, v12.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 30f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
@@ -694,9 +694,9 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x90]\n"
"fmla v24.4s, v5.4s, v11.4s\n"
"fmla v25.4s, v4.4s, v11.4s\n"
- "add x20, x20, x14\n"
"fmla v27.4s, v2.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 32f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 33f\n"
@@ -720,9 +720,9 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xa0]\n"
"fmla v26.4s, v7.4s, v12.4s\n"
"fmla v27.4s, v6.4s, v12.4s\n"
- "add x20, x20, x14\n"
"fmla v29.4s, v4.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v12.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 36f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 37f\n"
@@ -734,8 +734,8 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xa8]\n"
"fmla v23.4s, v2.4s, v11.4s\n"
"fmla v24.4s, v1.4s, v11.4s\n"
- "add x20, x20, x14\n"
"fmla v25.4s, v0.4s, v11.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 38f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
@@ -747,9 +747,9 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xb0]\n"
"fmla v27.4s, v8.4s, v13.4s\n"
"fmla v28.4s, v7.4s, v13.4s\n"
- "add x20, x20, x14\n"
"fmla v30.4s, v5.4s, v13.4s\n"
"fmla v31.4s, v4.4s, v13.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 40f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
@@ -761,8 +761,8 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xb8]\n"
"fmla v23.4s, v6.4s, v12.4s\n"
"fmla v26.4s, v3.4s, v12.4s\n"
- "add x20, x20, x14\n"
"fmla v29.4s, v0.4s, v12.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 42f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 43f\n"
@@ -774,8 +774,8 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0xc0]\n"
"fmla v25.4s, v8.4s, v11.4s\n"
"fmla v28.4s, v5.4s, v11.4s\n"
- "add x20, x20, x14\n"
"fmla v31.4s, v2.4s, v11.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 44f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 45f\n"
@@ -807,88 +807,88 @@ void a64_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"fmin v31.4s, v31.4s, v14.4s\n"
"tbz %x[n_channels], #1, 46f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.d }[0], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.d }[0], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.d }[0], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.d }[0], [x23]\n"
"st1 { v25.d }[0], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.d }[0], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.d }[0], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
- "add x13, x13, #0x8\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.d }[0], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.d }[0], [x22]\n"
+ "add x20, x20, x13\n"
+ "add x13, x13, #0x8\n"
"st1 { v30.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #0, 47f\n"
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.s }[2], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.s }[2], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.s }[2], [x23]\n"
"st1 { v25.s }[2], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.s }[2], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.s }[2], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.s }[2], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.s }[2], [x22]\n"
+ "add x20, x20, x13\n"
"st1 { v30.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Store: Bit 1: Unset
"ldr x20, [x17, #0x0]\n"
- "add x20, x20, x13\n"
- "st1 { v23.s }[0], [x20]\n"
"ldr x23, [x17, #0x8]\n"
"ldr x22, [x17, #0x10]\n"
"ldr x21, [x17, #0x18]\n"
+ "add x20, x20, x13\n"
+ "st1 { v23.s }[0], [x20]\n"
+ "ldr x20, [x17, #0x20]\n"
"add x23, x23, x13\n"
+ "st1 { v24.s }[0], [x23]\n"
+ "ldr x23, [x17, #0x28]\n"
"add x22, x22, x13\n"
- "ldr x20, [x17, #0x20]\n"
"add x21, x21, x13\n"
- "add x20, x20, x13\n"
- "st1 { v24.s }[0], [x23]\n"
"st1 { v25.s }[0], [x22]\n"
- "ldr x23, [x17, #0x28]\n"
"ldr x22, [x17, #0x30]\n"
- "add x23, x23, x13\n"
"st1 { v26.s }[0], [x21]\n"
"ldr x21, [x17, #0x38]\n"
- "add x22, x22, x13\n"
- "add x21, x21, x13\n"
+ "add x20, x20, x13\n"
"st1 { v27.s }[0], [x20]\n"
"ldr x20, [x17, #0x40]\n"
- "add x20, x20, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"st1 { v28.s }[0], [x23]\n"
+ "add x21, x21, x13\n"
"st1 { v29.s }[0], [x22]\n"
+ "add x20, x20, x13\n"
"st1 { v30.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"47:" // Oddments: Store: Bit 1: End
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index 3adf8b0d9f..6de6c3658e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,56 +87,56 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
+ "mov x28, #0x0\n"
"mov x27, #0x0\n"
- "mov x26, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
- "mov x23, #0x4\n"
- "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x27, x24\n" // offset = tile_i * ld_input_row
+ "str x28, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x4\n"
+ "mov x21, #0x4\n"
+ "str x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x3, #0x10\n" // cntb _, ALL, #1
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
"ldr x5, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x27, x22\n" // offset = tile_i * ld_output_row
- "mov x6, #0x10\n" // cntb _, ALL, #1
- "madd x21, x26, x4, x21\n" // offset += tile_j * ld_input_col
- "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "lsl x4, x4, #0x2\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "madd x20, x26, x5, x20\n" // offset += tile_j * ld_output_col
- "lsl x5, x5, #0x2\n"
- "add x17, x4, x4\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x7, x7, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x15, x7, x24, LSL #2\n"
- "mul x20, x20, x23\n" // offset *= output_tile_size
- "add x14, x15, x24, LSL #2\n"
- "add x8, x8, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "lsr x13, %x[n_channels], #0x2\n"
- "add x12, x14, x24, LSL #2\n"
- "add x11, x17, x4\n"
- "add x10, x8, x22, LSL #2\n"
- "add x9, x12, x24, LSL #2\n"
- "add x28, x11, x4\n"
- "add x27, x10, x22, LSL #2\n"
- "add x23, x5, x5\n"
+ "lsr x6, %x[n_channels], #0x2\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v13.4s }, [x20]\n"
+ "ld1r { v14.4s }, [x20]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x8, #0x0\n"
"ld1r { v15.4s }, [x20]\n"
- "add x26, x9, x24, LSL #2\n"
- "add x25, x28, x4\n"
- "add x24, x27, x22, LSL #2\n"
- "add x22, x23, x5\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x6\n"
- "cbz x13, 4f\n"
- "ldr q14, [x16, #0x0]\n"
+ "mul x24, x28, x25\n" // offset = tile_i * ld_input_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x23, XZR, x3\n"
+ "mul x22, x28, x2\n" // offset = tile_i * ld_output_row
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x24, x27, x4, x24\n" // offset += tile_j * ld_input_col
+ "lsl x4, x4, #0x2\n"
+ "madd x22, x27, x5, x22\n" // offset += tile_j * ld_output_col
+ "lsl x5, x5, #0x2\n"
+ "mul x24, x24, x26\n" // offset *= kernel_stride * output_size
+ "add x15, x4, x4\n"
+ "add x14, x15, x4\n"
+ "add x13, x14, x4\n"
+ "mul x22, x22, x21\n" // offset *= output_tile_size
+ "add x21, x5, x5\n"
+ "add x12, x13, x4\n"
+ "add x20, x21, x5\n"
+ "add x7, x7, x24, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x11, x7, x25, LSL #2\n"
+ "add x10, x11, x25, LSL #2\n"
+ "add x17, x17, x22, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x9, x10, x25, LSL #2\n"
+ "add x28, x17, x2, LSL #2\n"
+ "add x27, x9, x25, LSL #2\n"
+ "add x26, x28, x2, LSL #2\n"
+ "add x25, x27, x25, LSL #2\n"
+ "add x24, x26, x2, LSL #2\n"
+ "cbz x6, 4f\n"
+ "ldr q13, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "cmp x6, x13, LSL #4\n"
+ "cmp x3, x6, LSL #4\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
"ldr q3, [x16, #0x40]\n"
@@ -146,512 +146,512 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ldr q7, [x16, #0x80]\n"
"ldr q8, [x16, #0x90]\n"
"add x16, x16, #0xa0\n"
- "ldr q9, [x14, x17]\n"
+ "ldr q9, [x10, x15]\n"
"ld1 { v10.4s }, [x7]\n"
- "ldr q11, [x7, x25]\n"
- "ldr q12, [x14, x11]\n"
+ "ldr q11, [x7, x12]\n"
+ "ldr q12, [x10, x14]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "mov v26.16b, v14.16b\n fmla v26.4s, v4.4s, v9.4s\n"
- "mov v28.16b, v14.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "add x6, x6, #0x10\n"
- "cmp x6, x13, LSL #4\n"
- "mov v16.16b, v14.16b\n fmla v16.4s, v3.4s, v9.4s\n"
- "mov v22.16b, v14.16b\n fmla v22.4s, v1.4s, v9.4s\n"
- "add x20, x20, #0x10\n"
- "add x21, x21, #0x10\n"
- "mov v23.16b, v14.16b\n fmla v23.4s, v0.4s, v9.4s\n"
- "fmla v26.4s, v5.4s, v12.4s\n"
- "mov v25.16b, v14.16b\n fmla v25.4s, v7.4s, v9.4s\n"
- "mov v17.16b, v14.16b\n fmla v17.4s, v6.4s, v9.4s\n"
- "mov v31.16b, v14.16b\n fmla v31.4s, v5.4s, v9.4s\n"
- "mov v20.16b, v14.16b\n fmla v20.4s, v2.4s, v9.4s\n"
- "ldr q9, [x12, x17]\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ld1 { v30.4s }, [x26]\n"
- "mov v29.16b, v14.16b\n fmla v29.4s, v2.4s, v11.4s\n"
- "ldr q27, [x26, x25]\n"
- "fmla v16.4s, v4.4s, v12.4s\n"
- "fmla v22.4s, v2.4s, v12.4s\n"
- "fmla v23.4s, v1.4s, v12.4s\n"
- "mov v21.16b, v14.16b\n fmla v21.4s, v6.4s, v30.4s\n"
- "ldr q10, [x12, x11]\n"
- "fmla v26.4s, v7.4s, v9.4s\n"
- "fmla v25.4s, v8.4s, v12.4s\n"
- "fmla v17.4s, v7.4s, v12.4s\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "mov v24.16b, v14.16b\n fmla v24.4s, v3.4s, v12.4s\n"
- "mov v19.16b, v14.16b\n fmla v19.4s, v0.4s, v12.4s\n"
- "ldr q11, [x7, x4]\n"
- "mov v30.16b, v14.16b\n fmla v30.4s, v8.4s, v27.4s\n"
- "ldr q12, [x7, x28]\n"
- "fmla v16.4s, v6.4s, v9.4s\n"
- "fmla v22.4s, v4.4s, v9.4s\n"
- "fmla v23.4s, v3.4s, v9.4s\n"
- "mov v27.16b, v14.16b\n fmla v27.4s, v1.4s, v9.4s\n"
- "mov v18.16b, v14.16b\n fmla v18.4s, v0.4s, v9.4s\n"
- "ldr q14, [x16, #0x0]\n"
- "fmla v31.4s, v8.4s, v9.4s\n"
- "fmla v20.4s, v5.4s, v9.4s\n"
- "fmla v21.4s, v2.4s, v9.4s\n"
- "ld1 { v9.4s }, [x15]\n"
- "fmla v26.4s, v8.4s, v10.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
- "fmla v25.4s, v0.4s, v11.4s\n"
- "ldr q11, [x15, x25]\n"
- "fmla v17.4s, v2.4s, v12.4s\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "ld1 { v12.4s }, [x9]\n"
- "fmla v16.4s, v7.4s, v10.4s\n"
- "fmla v24.4s, v6.4s, v10.4s\n"
- "fmla v22.4s, v5.4s, v10.4s\n"
- "fmla v23.4s, v4.4s, v10.4s\n"
- "fmla v19.4s, v3.4s, v10.4s\n"
- "fmla v27.4s, v2.4s, v10.4s\n"
- "fmla v18.4s, v1.4s, v10.4s\n"
- "fmla v30.4s, v0.4s, v10.4s\n"
- "ldr q10, [x15, x17]\n"
- "fmla v31.4s, v0.4s, v9.4s\n"
- "fmla v20.4s, v6.4s, v12.4s\n"
- "fmla v21.4s, v3.4s, v12.4s\n"
- "ldr q12, [x9, x25]\n"
- "fmla v26.4s, v1.4s, v10.4s\n"
- "fmla v28.4s, v3.4s, v9.4s\n"
- "fmla v29.4s, v5.4s, v11.4s\n"
- "fmla v24.4s, v2.4s, v11.4s\n"
- "ldr q11, [x15, x11]\n"
- "fmla v25.4s, v4.4s, v10.4s\n"
- "fmla v17.4s, v3.4s, v10.4s\n"
- "fmla v16.4s, v0.4s, v10.4s\n"
- "fmla v19.4s, v8.4s, v12.4s\n"
- "fmla v30.4s, v5.4s, v12.4s\n"
- "ldr q9, [x26, x4]\n"
- "fmla v31.4s, v2.4s, v10.4s\n"
- "fmla v26.4s, v2.4s, v11.4s\n"
- "fmla v28.4s, v5.4s, v10.4s\n"
- "ldr q10, [x14, x4]\n"
- "fmla v25.4s, v5.4s, v11.4s\n"
- "fmla v17.4s, v4.4s, v11.4s\n"
- "fmla v29.4s, v3.4s, v11.4s\n"
- "fmla v16.4s, v1.4s, v11.4s\n"
- "fmla v24.4s, v0.4s, v11.4s\n"
- "ldr q11, [x14, x28]\n"
- "fmla v21.4s, v7.4s, v9.4s\n"
- "fmla v27.4s, v6.4s, v9.4s\n"
- "ldr q12, [x26, x28]\n"
- "fmla v31.4s, v4.4s, v10.4s\n"
- "fmla v26.4s, v3.4s, v10.4s\n"
- "fmla v20.4s, v1.4s, v10.4s\n"
- "fmla v22.4s, v0.4s, v10.4s\n"
- "fmla v28.4s, v7.4s, v10.4s\n"
- "fmla v25.4s, v6.4s, v10.4s\n"
- "ldr q10, [x7, x17]\n"
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v30.4s, v7.4s, v12.4s\n"
- "ldr q9, [x12, x4]\n"
- "fmla v17.4s, v8.4s, v11.4s\n"
- "fmla v29.4s, v7.4s, v11.4s\n"
- "fmla v16.4s, v5.4s, v11.4s\n"
- "fmla v24.4s, v4.4s, v11.4s\n"
- "fmla v23.4s, v2.4s, v11.4s\n"
- "fmla v19.4s, v1.4s, v11.4s\n"
- "ldr q12, [x7, x11]\n"
- "add x7, x7, #0x10\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v4.4s, v9.4s\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v8.4s, v9.4s\n"
+ "add x3, x3, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v9.4s\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v1.4s, v9.4s\n"
+ "cmp x3, x6, LSL #4\n"
+ "add x8, x8, #0x10\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v0.4s, v9.4s\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v7.4s, v9.4s\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v6.4s, v9.4s\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v5.4s, v9.4s\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v2.4s, v9.4s\n"
+ "ldr q9, [x9, x15]\n"
+ "fmla v24.4s, v0.4s, v10.4s\n"
+ "ld1 { v26.4s }, [x25]\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v2.4s, v11.4s\n"
+ "ldr q17, [x25, x12]\n"
+ "fmla v23.4s, v4.4s, v12.4s\n"
+ "fmla v28.4s, v2.4s, v12.4s\n"
+ "fmla v21.4s, v1.4s, v12.4s\n"
+ "fmla v22.4s, v8.4s, v12.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v6.4s, v26.4s\n"
+ "ldr q11, [x9, x14]\n"
"fmla v31.4s, v7.4s, v9.4s\n"
- "fmla v26.4s, v6.4s, v9.4s\n"
- "fmla v20.4s, v4.4s, v9.4s\n"
- "fmla v22.4s, v3.4s, v9.4s\n"
- "fmla v21.4s, v1.4s, v9.4s\n"
- "fmla v27.4s, v0.4s, v9.4s\n"
- "ldr q9, [x12, x28]\n"
- "fmla v28.4s, v2.4s, v10.4s\n"
- "fmla v25.4s, v1.4s, v10.4s\n"
- "fmla v17.4s, v0.4s, v10.4s\n"
- "ld1 { v10.4s }, [x14]\n"
- "fmla v18.4s, v2.4s, v9.4s\n"
- "fmla v29.4s, v0.4s, v12.4s\n"
- "fmla v31.4s, v3.4s, v10.4s\n"
- "fmla v20.4s, v0.4s, v10.4s\n"
- "fmla v16.4s, v8.4s, v9.4s\n"
- "fmla v24.4s, v7.4s, v9.4s\n"
- "fmla v23.4s, v5.4s, v9.4s\n"
- "fmla v19.4s, v4.4s, v9.4s\n"
- "fmla v30.4s, v1.4s, v9.4s\n"
- "ldr q11, [x9, x17]\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
- "fmla v17.4s, v1.4s, v12.4s\n"
- "ldr q12, [x14, x25]\n"
- "add x14, x14, #0x10\n"
- "ldr q9, [x14, x17]\n"
- "fmla v28.4s, v6.4s, v10.4s\n"
- "ld1 { v10.4s }, [x12]\n"
- "fmla v27.4s, v4.4s, v11.4s\n"
- "fmla v18.4s, v3.4s, v11.4s\n"
- "fmla v29.4s, v8.4s, v12.4s\n"
- "fmla v24.4s, v5.4s, v12.4s\n"
+ "fmla v19.4s, v7.4s, v12.4s\n"
+ "fmla v18.4s, v6.4s, v12.4s\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v3.4s, v12.4s\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
+ "ldr q10, [x7, x4]\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v17.4s\n"
+ "ldr q12, [x7, x13]\n"
+ "fmla v23.4s, v6.4s, v9.4s\n"
+ "fmla v28.4s, v4.4s, v9.4s\n"
+ "fmla v21.4s, v3.4s, v9.4s\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v0.4s, v9.4s\n"
+ "ldr q13, [x16, #0x0]\n"
+ "fmla v29.4s, v8.4s, v9.4s\n"
+ "fmla v30.4s, v5.4s, v9.4s\n"
+ "fmla v20.4s, v2.4s, v9.4s\n"
+ "ld1 { v9.4s }, [x11]\n"
+ "fmla v31.4s, v8.4s, v11.4s\n"
+ "fmla v24.4s, v1.4s, v10.4s\n"
+ "fmla v22.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x11, x12]\n"
"fmla v19.4s, v2.4s, v12.4s\n"
- "ldr q12, [x12, x25]\n"
- "add x12, x12, #0x10\n"
- "fmla v31.4s, v6.4s, v10.4s\n"
- "fmla v20.4s, v3.4s, v10.4s\n"
- "fmla v21.4s, v0.4s, v10.4s\n"
- "ldr q10, [x26, x17]\n"
- "fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v27.4s, v7.4s, v10.4s\n"
- "fmla v18.4s, v6.4s, v10.4s\n"
- "fmla v20.4s, v8.4s, v11.4s\n"
- "fmla v22.4s, v7.4s, v11.4s\n"
- "fmla v23.4s, v6.4s, v11.4s\n"
- "fmla v21.4s, v5.4s, v11.4s\n"
- "ldr q11, [x9, x11]\n"
- "fmla v19.4s, v5.4s, v12.4s\n"
- "fmla v27.4s, v5.4s, v11.4s\n"
- "fmla v18.4s, v4.4s, v11.4s\n"
- "fmla v30.4s, v3.4s, v11.4s\n"
- "fmla v24.4s, v8.4s, v12.4s\n"
- "ldr q12, [x26, x11]\n"
- "fmla v21.4s, v8.4s, v10.4s\n"
- "ldr q10, [x15, x4]\n"
- "fmla v22.4s, v8.4s, v11.4s\n"
+ "fmla v18.4s, v1.4s, v12.4s\n"
+ "ld1 { v12.4s }, [x27]\n"
"fmla v23.4s, v7.4s, v11.4s\n"
- "add x26, x26, #0x10\n"
- "fmla v19.4s, v6.4s, v11.4s\n"
- "ldr q11, [x15, x28]\n"
- "fmla v27.4s, v8.4s, v12.4s\n"
- "add x15, x15, #0x10\n"
- "fmla v18.4s, v7.4s, v12.4s\n"
+ "fmla v26.4s, v6.4s, v11.4s\n"
+ "fmla v28.4s, v5.4s, v11.4s\n"
+ "fmla v21.4s, v4.4s, v11.4s\n"
+ "fmla v27.4s, v3.4s, v11.4s\n"
+ "fmla v25.4s, v2.4s, v11.4s\n"
+ "fmla v17.4s, v1.4s, v11.4s\n"
+ "fmla v16.4s, v0.4s, v11.4s\n"
+ "ldr q11, [x11, x15]\n"
+ "fmla v29.4s, v0.4s, v9.4s\n"
"fmla v30.4s, v6.4s, v12.4s\n"
- "ldr q12, [x9, x4]\n"
- "fmla v28.4s, v4.4s, v10.4s\n"
- "fmla v25.4s, v3.4s, v10.4s\n"
- "fmax v28.4s, v28.4s, v13.4s\n"
- "fmla v31.4s, v1.4s, v10.4s\n"
+ "fmla v20.4s, v3.4s, v12.4s\n"
+ "ldr q12, [x27, x12]\n"
+ "fmla v24.4s, v3.4s, v9.4s\n"
+ "fmla v31.4s, v1.4s, v11.4s\n"
+ "fmla v18.4s, v5.4s, v10.4s\n"
+ "fmla v26.4s, v2.4s, v10.4s\n"
+ "ldr q10, [x11, x14]\n"
+ "fmla v22.4s, v4.4s, v11.4s\n"
+ "fmla v19.4s, v3.4s, v11.4s\n"
+ "fmla v23.4s, v0.4s, v11.4s\n"
+ "fmla v27.4s, v8.4s, v12.4s\n"
+ "fmla v16.4s, v5.4s, v12.4s\n"
+ "ldr q9, [x25, x4]\n"
+ "fmla v29.4s, v2.4s, v11.4s\n"
+ "fmla v31.4s, v2.4s, v10.4s\n"
+ "fmla v24.4s, v5.4s, v11.4s\n"
+ "ldr q12, [x10, x4]\n"
+ "fmla v22.4s, v5.4s, v10.4s\n"
+ "fmla v19.4s, v4.4s, v10.4s\n"
+ "fmla v18.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
"fmla v26.4s, v0.4s, v10.4s\n"
- "ldr q10, [x9, x28]\n"
- "ldr q0, [x16, #0x10]\n"
- "fmla v17.4s, v5.4s, v11.4s\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmax v25.4s, v25.4s, v13.4s\n"
+ "ldr q11, [x10, x13]\n"
+ "fmla v20.4s, v7.4s, v9.4s\n"
+ "fmla v25.4s, v6.4s, v9.4s\n"
+ "ldr q9, [x25, x13]\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
+ "fmla v31.4s, v3.4s, v12.4s\n"
+ "fmla v30.4s, v1.4s, v12.4s\n"
+ "fmla v28.4s, v0.4s, v12.4s\n"
+ "fmla v24.4s, v7.4s, v12.4s\n"
+ "fmla v22.4s, v6.4s, v12.4s\n"
+ "ldr q10, [x7, x15]\n"
+ "fmla v17.4s, v8.4s, v9.4s\n"
+ "fmla v16.4s, v7.4s, v9.4s\n"
+ "ldr q9, [x9, x4]\n"
+ "fmla v19.4s, v8.4s, v11.4s\n"
+ "fmla v18.4s, v7.4s, v11.4s\n"
+ "fmla v23.4s, v5.4s, v11.4s\n"
+ "fmla v26.4s, v4.4s, v11.4s\n"
+ "fmla v21.4s, v2.4s, v11.4s\n"
+ "fmla v27.4s, v1.4s, v11.4s\n"
+ "ldr q12, [x7, x14]\n"
+ "add x7, x7, #0x10\n"
+ "fmla v29.4s, v7.4s, v9.4s\n"
+ "fmla v31.4s, v6.4s, v9.4s\n"
+ "fmla v30.4s, v4.4s, v9.4s\n"
+ "fmla v28.4s, v3.4s, v9.4s\n"
+ "fmla v20.4s, v1.4s, v9.4s\n"
+ "fmla v25.4s, v0.4s, v9.4s\n"
+ "ldr q9, [x9, x13]\n"
+ "fmla v24.4s, v2.4s, v10.4s\n"
+ "fmla v22.4s, v1.4s, v10.4s\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x10]\n"
+ "fmla v18.4s, v0.4s, v12.4s\n"
+ "fmla v17.4s, v2.4s, v9.4s\n"
+ "fmla v23.4s, v8.4s, v9.4s\n"
+ "fmla v26.4s, v7.4s, v9.4s\n"
+ "fmla v21.4s, v5.4s, v9.4s\n"
+ "fmla v29.4s, v3.4s, v10.4s\n"
+ "fmla v30.4s, v0.4s, v10.4s\n"
+ "fmla v27.4s, v4.4s, v9.4s\n"
+ "fmla v16.4s, v1.4s, v9.4s\n"
+ "ldr q11, [x27, x15]\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
+ "fmla v19.4s, v1.4s, v12.4s\n"
+ "ldr q12, [x10, x12]\n"
+ "add x10, x10, #0x10\n"
+ "ldr q9, [x10, x15]\n"
+ "fmla v24.4s, v6.4s, v10.4s\n"
+ "ld1 { v10.4s }, [x9]\n"
+ "fmla v25.4s, v4.4s, v11.4s\n"
+ "fmla v17.4s, v3.4s, v11.4s\n"
+ "fmla v28.4s, v7.4s, v11.4s\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
+ "fmla v26.4s, v5.4s, v12.4s\n"
+ "fmla v27.4s, v2.4s, v12.4s\n"
+ "ldr q12, [x9, x12]\n"
+ "fmla v29.4s, v6.4s, v10.4s\n"
"add x9, x9, #0x10\n"
- "fmla v16.4s, v2.4s, v11.4s\n"
+ "fmla v30.4s, v3.4s, v10.4s\n"
+ "fmla v20.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x25, x15]\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v16.4s, v2.4s, v12.4s\n"
+ "fmla v26.4s, v8.4s, v12.4s\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v17.4s, v6.4s, v10.4s\n"
+ "fmla v27.4s, v5.4s, v12.4s\n"
+ "ldr q12, [x25, x14]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v30.4s, v8.4s, v11.4s\n"
+ "fmla v20.4s, v5.4s, v11.4s\n"
+ "ldr q11, [x27, x14]\n"
+ "fmla v25.4s, v5.4s, v11.4s\n"
+ "fmla v17.4s, v4.4s, v11.4s\n"
+ "fmla v16.4s, v3.4s, v11.4s\n"
+ "fmla v28.4s, v8.4s, v11.4s\n"
+ "fmla v21.4s, v7.4s, v11.4s\n"
+ "fmla v27.4s, v6.4s, v11.4s\n"
+ "ldr q11, [x11, x13]\n"
+ "fmla v20.4s, v8.4s, v10.4s\n"
+ "ldr q10, [x11, x4]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v25.4s, v8.4s, v12.4s\n"
+ "fmla v17.4s, v7.4s, v12.4s\n"
+ "fmla v16.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x27, x4]\n"
+ "fmla v19.4s, v5.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v11.4s\n"
+ "fmla v24.4s, v4.4s, v10.4s\n"
+ "fmla v22.4s, v3.4s, v10.4s\n"
+ "fmla v29.4s, v1.4s, v10.4s\n"
+ "fmla v31.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x27, x13]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "fmla v23.4s, v2.4s, v11.4s\n"
"ldr q2, [x16, #0x30]\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "ldr q11, [x7, x25]\n"
+ "fmla v26.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x7, x12]\n"
"ldr q1, [x16, #0x20]\n"
- "fmla v20.4s, v7.4s, v12.4s\n"
- "fmla v22.4s, v6.4s, v12.4s\n"
+ "fmla v30.4s, v7.4s, v12.4s\n"
+ "fmla v28.4s, v6.4s, v12.4s\n"
"ldr q6, [x16, #0x70]\n"
- "fmla v21.4s, v4.4s, v12.4s\n"
- "fmla v27.4s, v3.4s, v12.4s\n"
- "ldr q12, [x14, x11]\n"
+ "fmla v20.4s, v4.4s, v12.4s\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
+ "ldr q12, [x10, x14]\n"
"ldr q3, [x16, #0x40]\n"
- "fmla v23.4s, v8.4s, v10.4s\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
"ldr q8, [x16, #0x90]\n"
- "fmla v19.4s, v7.4s, v10.4s\n"
+ "fmla v27.4s, v7.4s, v10.4s\n"
"ldr q7, [x16, #0x80]\n"
- "fmla v18.4s, v5.4s, v10.4s\n"
+ "fmla v17.4s, v5.4s, v10.4s\n"
"ldr q5, [x16, #0x60]\n"
- "fmla v30.4s, v4.4s, v10.4s\n"
+ "fmla v16.4s, v4.4s, v10.4s\n"
"ld1 { v10.4s }, [x7]\n"
"ldr q4, [x16, #0x50]\n"
- "fmax v17.4s, v17.4s, v13.4s\n"
- "fmax v29.4s, v29.4s, v13.4s\n"
+ "fmax v24.4s, v24.4s, v14.4s\n"
+ "fmax v22.4s, v22.4s, v14.4s\n"
+ "add x27, x27, #0x10\n"
+ "fmax v19.4s, v19.4s, v14.4s\n"
+ "fmax v18.4s, v18.4s, v14.4s\n"
"add x16, x16, #0xa0\n"
- "fmax v31.4s, v31.4s, v13.4s\n"
- "fmax v26.4s, v26.4s, v13.4s\n"
- "fmax v16.4s, v16.4s, v13.4s\n"
- "fmax v24.4s, v24.4s, v13.4s\n"
- "fmax v20.4s, v20.4s, v13.4s\n"
- "fmax v22.4s, v22.4s, v13.4s\n"
- "fmax v23.4s, v23.4s, v13.4s\n"
- "fmax v19.4s, v19.4s, v13.4s\n"
- "fmax v21.4s, v21.4s, v13.4s\n"
- "fmax v27.4s, v27.4s, v13.4s\n"
- "fmax v18.4s, v18.4s, v13.4s\n"
- "fmax v30.4s, v30.4s, v13.4s\n"
- "fmin v28.4s, v28.4s, v15.4s\n"
- "fmin v25.4s, v25.4s, v15.4s\n"
- "st1 { v28.4s }, [x8]\n"
- "fmin v17.4s, v17.4s, v15.4s\n"
- "fmin v29.4s, v29.4s, v15.4s\n"
- "str q25, [x8, x5]\n"
- "fmin v31.4s, v31.4s, v15.4s\n"
- "fmin v26.4s, v26.4s, v15.4s\n"
- "str q17, [x8, x23]\n"
- "fmin v16.4s, v16.4s, v15.4s\n"
+ "fmax v29.4s, v29.4s, v14.4s\n"
+ "fmax v31.4s, v31.4s, v14.4s\n"
+ "fmax v23.4s, v23.4s, v14.4s\n"
+ "fmax v26.4s, v26.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v14.4s\n"
+ "fmax v28.4s, v28.4s, v14.4s\n"
+ "fmax v21.4s, v21.4s, v14.4s\n"
+ "fmax v27.4s, v27.4s, v14.4s\n"
+ "fmax v20.4s, v20.4s, v14.4s\n"
+ "fmax v25.4s, v25.4s, v14.4s\n"
+ "fmax v17.4s, v17.4s, v14.4s\n"
+ "fmax v16.4s, v16.4s, v14.4s\n"
"fmin v24.4s, v24.4s, v15.4s\n"
- "str q29, [x8, x22]\n"
- "add x8, x8, #0x10\n"
- "fmin v20.4s, v20.4s, v15.4s\n"
"fmin v22.4s, v22.4s, v15.4s\n"
- "st1 { v31.4s }, [x10]\n"
- "fmin v23.4s, v23.4s, v15.4s\n"
"fmin v19.4s, v19.4s, v15.4s\n"
- "str q26, [x10, x5]\n"
- "fmin v21.4s, v21.4s, v15.4s\n"
- "fmin v27.4s, v27.4s, v15.4s\n"
- "str q16, [x10, x23]\n"
"fmin v18.4s, v18.4s, v15.4s\n"
+ "fmin v29.4s, v29.4s, v15.4s\n"
+ "fmin v31.4s, v31.4s, v15.4s\n"
+ "fmin v23.4s, v23.4s, v15.4s\n"
+ "fmin v26.4s, v26.4s, v15.4s\n"
+ "st1 { v24.4s }, [x17]\n"
"fmin v30.4s, v30.4s, v15.4s\n"
- "str q24, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v20.4s }, [x27]\n"
- "str q22, [x27, x5]\n"
- "str q23, [x27, x23]\n"
- "str q19, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v21.4s }, [x24]\n"
- "str q27, [x24, x5]\n"
- "str q18, [x24, x23]\n"
- "str q30, [x24, x22]\n"
+ "fmin v28.4s, v28.4s, v15.4s\n"
+ "str q22, [x17, x5]\n"
+ "fmin v21.4s, v21.4s, v15.4s\n"
+ "fmin v27.4s, v27.4s, v15.4s\n"
+ "str q19, [x17, x21]\n"
+ "fmin v20.4s, v20.4s, v15.4s\n"
+ "fmin v25.4s, v25.4s, v15.4s\n"
+ "str q18, [x17, x20]\n"
+ "add x17, x17, #0x10\n"
+ "fmin v17.4s, v17.4s, v15.4s\n"
+ "fmin v16.4s, v16.4s, v15.4s\n"
+ "st1 { v29.4s }, [x28]\n"
+ "str q31, [x28, x5]\n"
+ "str q23, [x28, x21]\n"
+ "str q26, [x28, x20]\n"
+ "add x28, x28, #0x10\n"
+ "st1 { v30.4s }, [x26]\n"
+ "str q28, [x26, x5]\n"
+ "str q21, [x26, x21]\n"
+ "str q27, [x26, x20]\n"
+ "add x26, x26, #0x10\n"
+ "st1 { v20.4s }, [x24]\n"
+ "str q25, [x24, x5]\n"
+ "str q17, [x24, x21]\n"
+ "str q16, [x24, x20]\n"
"add x24, x24, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v16.16b, v14.16b\n fmla v16.4s, v4.4s, v9.4s\n"
- "mov v23.16b, v14.16b\n fmla v23.4s, v8.4s, v9.4s\n"
- "mov v31.16b, v14.16b\n fmla v31.4s, v3.4s, v9.4s\n"
- "mov v30.16b, v14.16b\n fmla v30.4s, v1.4s, v9.4s\n"
- "mov v18.16b, v14.16b\n fmla v18.4s, v0.4s, v9.4s\n"
- "fmla v16.4s, v5.4s, v12.4s\n"
- "mov v17.16b, v14.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "mov v19.16b, v14.16b\n fmla v19.4s, v6.4s, v9.4s\n"
- "mov v28.16b, v14.16b\n fmla v28.4s, v5.4s, v9.4s\n"
- "mov v27.16b, v14.16b\n fmla v27.4s, v2.4s, v9.4s\n"
- "ldr q24, [x12, x17]\n"
- "fmla v23.4s, v0.4s, v10.4s\n"
- "ld1 { v21.4s }, [x26]\n"
- "mov v29.16b, v14.16b\n fmla v29.4s, v2.4s, v11.4s\n"
- "ldr q20, [x26, x25]\n"
- "fmla v31.4s, v4.4s, v12.4s\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v4.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v8.4s, v9.4s\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v3.4s, v9.4s\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v1.4s, v9.4s\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v0.4s, v9.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v7.4s, v9.4s\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v6.4s, v9.4s\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v5.4s, v9.4s\n"
+ "fmla v31.4s, v5.4s, v12.4s\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v2.4s, v9.4s\n"
+ "ldr q24, [x9, x15]\n"
+ "fmla v17.4s, v0.4s, v10.4s\n"
+ "ld1 { v22.4s }, [x25]\n"
+ "mov v10.16b, v13.16b\n fmla v10.4s, v2.4s, v11.4s\n"
+ "ldr q16, [x25, x12]\n"
+ "fmla v29.4s, v4.4s, v12.4s\n"
"fmla v30.4s, v2.4s, v12.4s\n"
- "fmla v18.4s, v1.4s, v12.4s\n"
- "mov v26.16b, v14.16b\n fmla v26.4s, v6.4s, v21.4s\n"
- "ldr q9, [x12, x11]\n"
- "fmla v16.4s, v7.4s, v24.4s\n"
- "fmla v17.4s, v8.4s, v12.4s\n"
- "fmla v19.4s, v7.4s, v12.4s\n"
- "fmla v29.4s, v6.4s, v12.4s\n"
- "mov v11.16b, v14.16b\n fmla v11.4s, v3.4s, v12.4s\n"
- "mov v10.16b, v14.16b\n fmla v10.4s, v0.4s, v12.4s\n"
- "ldr q22, [x7, x4]\n"
- "mov v25.16b, v14.16b\n fmla v25.4s, v8.4s, v20.4s\n"
- "ldr q21, [x7, x28]\n"
- "fmla v31.4s, v6.4s, v24.4s\n"
+ "fmla v19.4s, v1.4s, v12.4s\n"
+ "fmla v20.4s, v8.4s, v12.4s\n"
+ "mov v9.16b, v13.16b\n fmla v9.4s, v6.4s, v22.4s\n"
+ "ldr q22, [x9, x14]\n"
+ "fmla v31.4s, v7.4s, v24.4s\n"
+ "fmla v21.4s, v7.4s, v12.4s\n"
+ "fmla v10.4s, v6.4s, v12.4s\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v3.4s, v12.4s\n"
+ "mov v11.16b, v13.16b\n fmla v11.4s, v0.4s, v12.4s\n"
+ "ldr q23, [x7, x4]\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v8.4s, v16.4s\n"
+ "ldr q16, [x7, x13]\n"
+ "fmla v29.4s, v6.4s, v24.4s\n"
"fmla v30.4s, v4.4s, v24.4s\n"
- "fmla v18.4s, v3.4s, v24.4s\n"
- "mov v12.16b, v14.16b\n fmla v12.4s, v1.4s, v24.4s\n"
- "fmla v14.4s, v0.4s, v24.4s\n"
- "fmla v28.4s, v8.4s, v24.4s\n"
+ "fmla v19.4s, v3.4s, v24.4s\n"
+ "mov v12.16b, v13.16b\n fmla v12.4s, v1.4s, v24.4s\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v0.4s, v24.4s\n"
+ "fmla v18.4s, v8.4s, v24.4s\n"
"fmla v27.4s, v5.4s, v24.4s\n"
- "fmla v26.4s, v2.4s, v24.4s\n"
- "ld1 { v24.4s }, [x15]\n"
- "fmla v16.4s, v8.4s, v9.4s\n"
- "fmla v23.4s, v1.4s, v22.4s\n"
- "fmla v17.4s, v0.4s, v22.4s\n"
- "ldr q22, [x15, x25]\n"
- "fmla v19.4s, v2.4s, v21.4s\n"
- "fmla v29.4s, v1.4s, v21.4s\n"
- "ld1 { v20.4s }, [x9]\n"
- "fmla v31.4s, v7.4s, v9.4s\n"
- "fmla v11.4s, v6.4s, v9.4s\n"
- "fmla v30.4s, v5.4s, v9.4s\n"
- "fmla v18.4s, v4.4s, v9.4s\n"
- "fmla v10.4s, v3.4s, v9.4s\n"
- "fmla v12.4s, v2.4s, v9.4s\n"
- "fmla v14.4s, v1.4s, v9.4s\n"
- "fmla v25.4s, v0.4s, v9.4s\n"
- "ldr q21, [x15, x17]\n"
- "fmla v28.4s, v0.4s, v24.4s\n"
- "fmla v27.4s, v6.4s, v20.4s\n"
- "fmla v26.4s, v3.4s, v20.4s\n"
- "ldr q20, [x9, x25]\n"
- "fmla v16.4s, v1.4s, v21.4s\n"
- "fmla v23.4s, v3.4s, v24.4s\n"
- "fmla v29.4s, v5.4s, v22.4s\n"
- "fmla v11.4s, v2.4s, v22.4s\n"
- "ldr q22, [x15, x11]\n"
- "fmla v17.4s, v4.4s, v21.4s\n"
- "fmla v19.4s, v3.4s, v21.4s\n"
- "fmla v31.4s, v0.4s, v21.4s\n"
- "fmla v10.4s, v8.4s, v20.4s\n"
- "fmla v25.4s, v5.4s, v20.4s\n"
- "ldr q20, [x26, x4]\n"
- "fmla v28.4s, v2.4s, v21.4s\n"
- "fmla v16.4s, v2.4s, v22.4s\n"
- "fmla v23.4s, v5.4s, v21.4s\n"
- "ldr q21, [x14, x4]\n"
- "fmla v17.4s, v5.4s, v22.4s\n"
+ "fmla v9.4s, v2.4s, v24.4s\n"
+ "ld1 { v24.4s }, [x11]\n"
+ "fmla v31.4s, v8.4s, v22.4s\n"
+ "fmla v17.4s, v1.4s, v23.4s\n"
+ "fmla v20.4s, v0.4s, v23.4s\n"
+ "ldr q23, [x11, x12]\n"
+ "fmla v21.4s, v2.4s, v16.4s\n"
+ "fmla v10.4s, v1.4s, v16.4s\n"
+ "ld1 { v16.4s }, [x27]\n"
+ "fmla v29.4s, v7.4s, v22.4s\n"
+ "fmla v28.4s, v6.4s, v22.4s\n"
+ "fmla v30.4s, v5.4s, v22.4s\n"
"fmla v19.4s, v4.4s, v22.4s\n"
- "fmla v29.4s, v3.4s, v22.4s\n"
+ "fmla v11.4s, v3.4s, v22.4s\n"
+ "fmla v12.4s, v2.4s, v22.4s\n"
+ "fmla v25.4s, v1.4s, v22.4s\n"
+ "fmla v26.4s, v0.4s, v22.4s\n"
+ "ldr q22, [x11, x15]\n"
+ "fmla v18.4s, v0.4s, v24.4s\n"
+ "fmla v27.4s, v6.4s, v16.4s\n"
+ "fmla v9.4s, v3.4s, v16.4s\n"
+ "ldr q16, [x27, x12]\n"
+ "fmla v17.4s, v3.4s, v24.4s\n"
"fmla v31.4s, v1.4s, v22.4s\n"
- "fmla v11.4s, v0.4s, v22.4s\n"
- "ldr q22, [x14, x28]\n"
- "fmla v26.4s, v7.4s, v20.4s\n"
- "fmla v12.4s, v6.4s, v20.4s\n"
- "ldr q20, [x26, x28]\n"
- "fmla v28.4s, v4.4s, v21.4s\n"
- "fmla v16.4s, v3.4s, v21.4s\n"
- "fmla v27.4s, v1.4s, v21.4s\n"
- "fmla v30.4s, v0.4s, v21.4s\n"
- "fmla v23.4s, v7.4s, v21.4s\n"
- "fmla v17.4s, v6.4s, v21.4s\n"
- "ldr q21, [x7, x17]\n"
- "fmla v14.4s, v8.4s, v20.4s\n"
- "fmla v25.4s, v7.4s, v20.4s\n"
- "ldr q20, [x12, x4]\n"
- "fmla v19.4s, v8.4s, v22.4s\n"
- "fmla v29.4s, v7.4s, v22.4s\n"
- "fmla v31.4s, v5.4s, v22.4s\n"
- "fmla v11.4s, v4.4s, v22.4s\n"
+ "fmla v10.4s, v5.4s, v23.4s\n"
+ "fmla v28.4s, v2.4s, v23.4s\n"
+ "ldr q23, [x11, x14]\n"
+ "fmla v20.4s, v4.4s, v22.4s\n"
+ "fmla v21.4s, v3.4s, v22.4s\n"
+ "fmla v29.4s, v0.4s, v22.4s\n"
+ "fmla v11.4s, v8.4s, v16.4s\n"
+ "fmla v26.4s, v5.4s, v16.4s\n"
+ "ldr q16, [x25, x4]\n"
"fmla v18.4s, v2.4s, v22.4s\n"
- "fmla v10.4s, v1.4s, v22.4s\n"
- "ldr q22, [x7, x11]\n"
+ "fmla v31.4s, v2.4s, v23.4s\n"
+ "fmla v17.4s, v5.4s, v22.4s\n"
+ "ldr q22, [x10, x4]\n"
+ "fmla v20.4s, v5.4s, v23.4s\n"
+ "fmla v21.4s, v4.4s, v23.4s\n"
+ "fmla v10.4s, v3.4s, v23.4s\n"
+ "fmla v29.4s, v1.4s, v23.4s\n"
+ "fmla v28.4s, v0.4s, v23.4s\n"
+ "ldr q23, [x10, x13]\n"
+ "fmla v9.4s, v7.4s, v16.4s\n"
+ "fmla v12.4s, v6.4s, v16.4s\n"
+ "ldr q16, [x25, x13]\n"
+ "fmla v18.4s, v4.4s, v22.4s\n"
+ "fmla v31.4s, v3.4s, v22.4s\n"
+ "fmla v27.4s, v1.4s, v22.4s\n"
+ "fmla v30.4s, v0.4s, v22.4s\n"
+ "fmla v17.4s, v7.4s, v22.4s\n"
+ "fmla v20.4s, v6.4s, v22.4s\n"
+ "ldr q22, [x7, x15]\n"
+ "fmla v25.4s, v8.4s, v16.4s\n"
+ "fmla v26.4s, v7.4s, v16.4s\n"
+ "ldr q16, [x9, x4]\n"
+ "fmla v21.4s, v8.4s, v23.4s\n"
+ "fmla v10.4s, v7.4s, v23.4s\n"
+ "fmla v29.4s, v5.4s, v23.4s\n"
+ "fmla v28.4s, v4.4s, v23.4s\n"
+ "fmla v19.4s, v2.4s, v23.4s\n"
+ "fmla v11.4s, v1.4s, v23.4s\n"
+ "ldr q23, [x7, x14]\n"
"add x7, x7, #0x10\n"
- "fmla v28.4s, v7.4s, v20.4s\n"
- "fmla v16.4s, v6.4s, v20.4s\n"
- "fmla v27.4s, v4.4s, v20.4s\n"
- "fmla v30.4s, v3.4s, v20.4s\n"
- "fmla v26.4s, v1.4s, v20.4s\n"
- "fmla v12.4s, v0.4s, v20.4s\n"
- "ldr q20, [x12, x28]\n"
- "fmla v23.4s, v2.4s, v21.4s\n"
- "fmla v17.4s, v1.4s, v21.4s\n"
- "fmla v19.4s, v0.4s, v21.4s\n"
- "ld1 { v21.4s }, [x14]\n"
- "fmla v14.4s, v2.4s, v20.4s\n"
- "fmla v29.4s, v0.4s, v22.4s\n"
- "fmla v28.4s, v3.4s, v21.4s\n"
- "fmla v27.4s, v0.4s, v21.4s\n"
- "fmla v31.4s, v8.4s, v20.4s\n"
- "fmla v11.4s, v7.4s, v20.4s\n"
- "fmla v18.4s, v5.4s, v20.4s\n"
- "fmla v10.4s, v4.4s, v20.4s\n"
- "fmla v25.4s, v1.4s, v20.4s\n"
- "ldr q24, [x9, x17]\n"
+ "fmla v18.4s, v7.4s, v16.4s\n"
+ "fmla v31.4s, v6.4s, v16.4s\n"
+ "fmla v27.4s, v4.4s, v16.4s\n"
+ "fmla v30.4s, v3.4s, v16.4s\n"
+ "fmla v9.4s, v1.4s, v16.4s\n"
+ "fmla v12.4s, v0.4s, v16.4s\n"
+ "ldr q16, [x9, x13]\n"
"fmla v17.4s, v2.4s, v22.4s\n"
- "fmla v19.4s, v1.4s, v22.4s\n"
- "ldr q20, [x14, x25]\n"
- "add x14, x14, #0x10\n"
- "fmla v23.4s, v6.4s, v21.4s\n"
- "ld1 { v21.4s }, [x12]\n"
+ "fmla v20.4s, v1.4s, v22.4s\n"
+ "fmla v21.4s, v0.4s, v22.4s\n"
+ "ld1 { v22.4s }, [x10]\n"
+ "fmla v10.4s, v0.4s, v23.4s\n"
+ "fmla v25.4s, v2.4s, v16.4s\n"
+ "fmla v29.4s, v8.4s, v16.4s\n"
+ "fmla v28.4s, v7.4s, v16.4s\n"
+ "fmla v19.4s, v5.4s, v16.4s\n"
+ "fmla v18.4s, v3.4s, v22.4s\n"
+ "fmla v27.4s, v0.4s, v22.4s\n"
+ "fmla v11.4s, v4.4s, v16.4s\n"
+ "fmla v26.4s, v1.4s, v16.4s\n"
+ "ldr q24, [x27, x15]\n"
+ "fmla v20.4s, v2.4s, v23.4s\n"
+ "fmla v21.4s, v1.4s, v23.4s\n"
+ "ldr q16, [x10, x12]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v17.4s, v6.4s, v22.4s\n"
+ "ld1 { v22.4s }, [x9]\n"
"fmla v12.4s, v4.4s, v24.4s\n"
- "fmla v14.4s, v3.4s, v24.4s\n"
- "fmla v29.4s, v8.4s, v20.4s\n"
- "fmla v11.4s, v5.4s, v20.4s\n"
- "fmla v10.4s, v2.4s, v20.4s\n"
- "ldr q20, [x12, x25]\n"
- "add x12, x12, #0x10\n"
- "fmla v28.4s, v6.4s, v21.4s\n"
- "fmla v27.4s, v3.4s, v21.4s\n"
- "fmla v26.4s, v0.4s, v21.4s\n"
- "ldr q22, [x26, x17]\n"
- "fmla v25.4s, v2.4s, v20.4s\n"
- "fmla v12.4s, v7.4s, v22.4s\n"
- "fmla v14.4s, v6.4s, v22.4s\n"
- "fmla v27.4s, v8.4s, v24.4s\n"
+ "fmla v25.4s, v3.4s, v24.4s\n"
"fmla v30.4s, v7.4s, v24.4s\n"
- "fmla v18.4s, v6.4s, v24.4s\n"
- "fmla v26.4s, v5.4s, v24.4s\n"
- "ldr q21, [x9, x11]\n"
- "fmla v10.4s, v5.4s, v20.4s\n"
- "fmla v12.4s, v5.4s, v21.4s\n"
- "fmla v14.4s, v4.4s, v21.4s\n"
- "fmla v25.4s, v3.4s, v21.4s\n"
- "fmla v11.4s, v8.4s, v20.4s\n"
- "ldr q20, [x26, x11]\n"
- "fmla v26.4s, v8.4s, v22.4s\n"
- "ldr q9, [x15, x4]\n"
- "fmla v30.4s, v8.4s, v21.4s\n"
- "fmla v18.4s, v7.4s, v21.4s\n"
- "add x26, x26, #0x10\n"
- "fmla v10.4s, v6.4s, v21.4s\n"
- "ldr q21, [x15, x28]\n"
- "fmla v12.4s, v8.4s, v20.4s\n"
- "add x15, x15, #0x10\n"
- "fmla v14.4s, v7.4s, v20.4s\n"
- "fmla v25.4s, v6.4s, v20.4s\n"
- "ldr q24, [x9, x4]\n"
- "fmla v23.4s, v4.4s, v9.4s\n"
- "fmla v17.4s, v3.4s, v9.4s\n"
- "fmax v23.4s, v23.4s, v13.4s\n"
- "fmla v28.4s, v1.4s, v9.4s\n"
- "fmla v16.4s, v0.4s, v9.4s\n"
- "ldr q0, [x9, x28]\n"
- "fmax v17.4s, v17.4s, v13.4s\n"
- "fmla v19.4s, v5.4s, v21.4s\n"
- "fmla v29.4s, v4.4s, v21.4s\n"
- "fmax v19.4s, v19.4s, v13.4s\n"
+ "fmla v10.4s, v8.4s, v16.4s\n"
+ "fmla v28.4s, v5.4s, v16.4s\n"
+ "fmla v11.4s, v2.4s, v16.4s\n"
+ "ldr q16, [x9, x12]\n"
+ "fmla v18.4s, v6.4s, v22.4s\n"
"add x9, x9, #0x10\n"
- "fmla v31.4s, v2.4s, v21.4s\n"
- "fmla v11.4s, v1.4s, v21.4s\n"
- "fmax v29.4s, v29.4s, v13.4s\n"
- "fmla v27.4s, v7.4s, v24.4s\n"
- "fmla v30.4s, v6.4s, v24.4s\n"
- "fmax v28.4s, v28.4s, v13.4s\n"
- "fmla v26.4s, v4.4s, v24.4s\n"
- "fmla v12.4s, v3.4s, v24.4s\n"
- "fmax v16.4s, v16.4s, v13.4s\n"
- "fmla v18.4s, v8.4s, v0.4s\n"
- "fmla v10.4s, v7.4s, v0.4s\n"
- "fmax v31.4s, v31.4s, v13.4s\n"
- "fmla v14.4s, v5.4s, v0.4s\n"
- "fmla v25.4s, v4.4s, v0.4s\n"
- "fmax v11.4s, v11.4s, v13.4s\n"
- "fmax v27.4s, v27.4s, v13.4s\n"
- "fmax v30.4s, v30.4s, v13.4s\n"
- "fmax v18.4s, v18.4s, v13.4s\n"
- "fmax v10.4s, v10.4s, v13.4s\n"
- "fmax v26.4s, v26.4s, v13.4s\n"
- "fmax v12.4s, v12.4s, v13.4s\n"
- "fmax v14.4s, v14.4s, v13.4s\n"
- "fmax v25.4s, v25.4s, v13.4s\n"
- "fmin v23.4s, v23.4s, v15.4s\n"
+ "fmla v27.4s, v3.4s, v22.4s\n"
+ "fmla v9.4s, v0.4s, v22.4s\n"
+ "ldr q23, [x25, x15]\n"
+ "fmla v19.4s, v6.4s, v24.4s\n"
+ "fmla v26.4s, v2.4s, v16.4s\n"
+ "fmla v28.4s, v8.4s, v16.4s\n"
+ "fmla v12.4s, v7.4s, v23.4s\n"
+ "fmla v25.4s, v6.4s, v23.4s\n"
+ "fmla v11.4s, v5.4s, v16.4s\n"
+ "ldr q22, [x25, x14]\n"
+ "add x25, x25, #0x10\n"
+ "fmla v27.4s, v8.4s, v24.4s\n"
+ "fmla v9.4s, v5.4s, v24.4s\n"
+ "ldr q16, [x27, x14]\n"
+ "fmla v12.4s, v5.4s, v16.4s\n"
+ "fmla v25.4s, v4.4s, v16.4s\n"
+ "fmla v26.4s, v3.4s, v16.4s\n"
+ "fmla v30.4s, v8.4s, v16.4s\n"
+ "fmla v19.4s, v7.4s, v16.4s\n"
+ "fmla v11.4s, v6.4s, v16.4s\n"
+ "ldr q24, [x11, x13]\n"
+ "fmla v9.4s, v8.4s, v23.4s\n"
+ "ldr q16, [x11, x4]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v12.4s, v8.4s, v22.4s\n"
+ "fmla v25.4s, v7.4s, v22.4s\n"
+ "fmla v26.4s, v6.4s, v22.4s\n"
+ "ldr q23, [x27, x4]\n"
+ "fmla v21.4s, v5.4s, v24.4s\n"
+ "fmla v10.4s, v4.4s, v24.4s\n"
+ "fmla v17.4s, v4.4s, v16.4s\n"
+ "fmla v20.4s, v3.4s, v16.4s\n"
+ "fmla v18.4s, v1.4s, v16.4s\n"
+ "fmla v31.4s, v0.4s, v16.4s\n"
+ "ldr q16, [x27, x13]\n"
+ "add x27, x27, #0x10\n"
+ "fmla v29.4s, v2.4s, v24.4s\n"
+ "fmla v28.4s, v1.4s, v24.4s\n"
+ "fmla v27.4s, v7.4s, v23.4s\n"
+ "fmla v30.4s, v6.4s, v23.4s\n"
+ "fmax v21.4s, v21.4s, v14.4s\n"
+ "fmla v9.4s, v4.4s, v23.4s\n"
+ "fmla v12.4s, v3.4s, v23.4s\n"
+ "fmax v17.4s, v17.4s, v14.4s\n"
+ "fmla v19.4s, v8.4s, v16.4s\n"
+ "fmla v11.4s, v7.4s, v16.4s\n"
+ "fmax v20.4s, v20.4s, v14.4s\n"
+ "fmla v25.4s, v5.4s, v16.4s\n"
+ "fmla v26.4s, v4.4s, v16.4s\n"
+ "fmax v10.4s, v10.4s, v14.4s\n"
+ "fmax v18.4s, v18.4s, v14.4s\n"
+ "fmax v31.4s, v31.4s, v14.4s\n"
+ "fmax v29.4s, v29.4s, v14.4s\n"
+ "fmax v28.4s, v28.4s, v14.4s\n"
+ "fmax v27.4s, v27.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v14.4s\n"
+ "fmax v19.4s, v19.4s, v14.4s\n"
+ "fmax v11.4s, v11.4s, v14.4s\n"
+ "fmax v9.4s, v9.4s, v14.4s\n"
+ "fmax v12.4s, v12.4s, v14.4s\n"
+ "fmax v25.4s, v25.4s, v14.4s\n"
+ "fmax v26.4s, v26.4s, v14.4s\n"
"fmin v17.4s, v17.4s, v15.4s\n"
- "st1 { v23.4s }, [x8]\n"
- "fmin v19.4s, v19.4s, v15.4s\n"
+ "fmin v20.4s, v20.4s, v15.4s\n"
+ "fmin v21.4s, v21.4s, v15.4s\n"
+ "fmin v10.4s, v10.4s, v15.4s\n"
+ "fmin v18.4s, v18.4s, v15.4s\n"
+ "fmin v31.4s, v31.4s, v15.4s\n"
"fmin v29.4s, v29.4s, v15.4s\n"
- "str q17, [x8, x5]\n"
"fmin v28.4s, v28.4s, v15.4s\n"
- "fmin v16.4s, v16.4s, v15.4s\n"
- "str q19, [x8, x23]\n"
- "fmin v31.4s, v31.4s, v15.4s\n"
- "fmin v11.4s, v11.4s, v15.4s\n"
- "str q29, [x8, x22]\n"
- "add x8, x8, #0x10\n"
+ "st1 { v17.4s }, [x17]\n"
"fmin v27.4s, v27.4s, v15.4s\n"
"fmin v30.4s, v30.4s, v15.4s\n"
- "st1 { v28.4s }, [x10]\n"
- "fmin v18.4s, v18.4s, v15.4s\n"
- "fmin v10.4s, v10.4s, v15.4s\n"
- "str q16, [x10, x5]\n"
- "fmin v26.4s, v26.4s, v15.4s\n"
+ "str q20, [x17, x5]\n"
+ "fmin v19.4s, v19.4s, v15.4s\n"
+ "fmin v11.4s, v11.4s, v15.4s\n"
+ "str q21, [x17, x21]\n"
+ "fmin v9.4s, v9.4s, v15.4s\n"
"fmin v12.4s, v12.4s, v15.4s\n"
- "str q31, [x10, x23]\n"
- "fmin v14.4s, v14.4s, v15.4s\n"
+ "str q10, [x17, x20]\n"
+ "add x17, x17, #0x10\n"
"fmin v25.4s, v25.4s, v15.4s\n"
- "str q11, [x10, x22]\n"
- "add x10, x10, #0x10\n"
- "st1 { v27.4s }, [x27]\n"
- "str q30, [x27, x5]\n"
- "str q18, [x27, x23]\n"
- "str q10, [x27, x22]\n"
- "add x27, x27, #0x10\n"
- "st1 { v26.4s }, [x24]\n"
+ "fmin v26.4s, v26.4s, v15.4s\n"
+ "st1 { v18.4s }, [x28]\n"
+ "str q31, [x28, x5]\n"
+ "str q29, [x28, x21]\n"
+ "str q28, [x28, x20]\n"
+ "add x28, x28, #0x10\n"
+ "st1 { v27.4s }, [x26]\n"
+ "str q30, [x26, x5]\n"
+ "str q19, [x26, x21]\n"
+ "str q11, [x26, x20]\n"
+ "add x26, x26, #0x10\n"
+ "st1 { v9.4s }, [x24]\n"
"str q12, [x24, x5]\n"
- "str q14, [x24, x23]\n"
- "str q25, [x24, x22]\n"
+ "str q25, [x24, x21]\n"
+ "str q26, [x24, x20]\n"
"add x24, x24, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 73f\n"
- "ldr q14, [x16, #0x0]\n"
+ "ldr q13, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "add x23, x14, x17\n"
+ "add x23, x10, x15\n"
"add x22, x7, XZR\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
- "add x21, x7, x25\n"
- "add x20, x14, x11\n"
+ "add x21, x7, x12\n"
+ "add x20, x10, x14\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
"ldr q5, [x16, #0x60]\n"
@@ -675,27 +675,27 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ldr s11, [x21, #0x0]\n"
"ldr s12, [x20, #0x0]\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 5), (2, 3): Bit 1: End
- "mov v16.16b, v14.16b\n fmla v16.4s, v8.4s, v9.4s\n"
- "mov v17.16b, v14.16b\n fmla v17.4s, v7.4s, v9.4s\n"
- "add x20, x26, XZR\n"
- "mov v18.16b, v14.16b\n fmla v18.4s, v6.4s, v9.4s\n"
- "mov v21.16b, v14.16b\n fmla v21.4s, v4.4s, v9.4s\n"
- "mov v22.16b, v14.16b\n fmla v22.4s, v3.4s, v9.4s\n"
- "mov v25.16b, v14.16b\n fmla v25.4s, v1.4s, v9.4s\n"
- "mov v26.16b, v14.16b\n fmla v26.4s, v0.4s, v9.4s\n"
- "mov v19.16b, v14.16b\n fmla v19.4s, v2.4s, v11.4s\n"
- "mov v20.16b, v14.16b\n fmla v20.4s, v5.4s, v9.4s\n"
- "mov v24.16b, v14.16b\n fmla v24.4s, v2.4s, v9.4s\n"
+ "mov v16.16b, v13.16b\n fmla v16.4s, v8.4s, v9.4s\n"
+ "mov v17.16b, v13.16b\n fmla v17.4s, v7.4s, v9.4s\n"
+ "add x20, x25, XZR\n"
+ "mov v18.16b, v13.16b\n fmla v18.4s, v6.4s, v9.4s\n"
+ "mov v21.16b, v13.16b\n fmla v21.4s, v4.4s, v9.4s\n"
+ "mov v22.16b, v13.16b\n fmla v22.4s, v3.4s, v9.4s\n"
+ "mov v25.16b, v13.16b\n fmla v25.4s, v1.4s, v9.4s\n"
+ "mov v26.16b, v13.16b\n fmla v26.4s, v0.4s, v9.4s\n"
+ "mov v19.16b, v13.16b\n fmla v19.4s, v2.4s, v11.4s\n"
+ "mov v20.16b, v13.16b\n fmla v20.4s, v5.4s, v9.4s\n"
+ "mov v24.16b, v13.16b\n fmla v24.4s, v2.4s, v9.4s\n"
"fmla v16.4s, v0.4s, v10.4s\n"
"fmla v17.4s, v8.4s, v12.4s\n"
"fmla v18.4s, v7.4s, v12.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
"fmla v21.4s, v5.4s, v12.4s\n"
+ "fmla v19.4s, v6.4s, v12.4s\n"
"fmla v22.4s, v4.4s, v12.4s\n"
- "mov v23.16b, v14.16b\n fmla v23.4s, v3.4s, v12.4s\n"
+ "mov v23.16b, v13.16b\n fmla v23.4s, v3.4s, v12.4s\n"
"fmla v25.4s, v2.4s, v12.4s\n"
"fmla v26.4s, v1.4s, v12.4s\n"
- "mov v27.16b, v14.16b\n fmla v27.4s, v0.4s, v12.4s\n"
+ "mov v27.16b, v13.16b\n fmla v27.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 7f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
@@ -704,8 +704,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"7:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 1: Unset
"ldr s10, [x20, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 1: End
- "mov v28.16b, v14.16b\n fmla v28.4s, v6.4s, v10.4s\n"
- "add x20, x26, x25\n"
+ "mov v28.16b, v13.16b\n fmla v28.4s, v6.4s, v10.4s\n"
+ "add x20, x25, x12\n"
"tbz %x[n_channels], #1, 9f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
@@ -714,8 +714,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"9:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 1: Unset
"ldr s11, [x20, #0x0]\n"
"10:" // Tile loop: Oddments: Load inputs: (5, 5): Bit 1: End
- "mov v31.16b, v14.16b\n fmla v31.4s, v8.4s, v11.4s\n"
- "add x20, x12, x17\n"
+ "mov v31.16b, v13.16b\n fmla v31.4s, v8.4s, v11.4s\n"
+ "add x20, x9, x15\n"
"tbz %x[n_channels], #1, 11f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
@@ -732,8 +732,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla v25.4s, v4.4s, v9.4s\n"
"fmla v26.4s, v3.4s, v9.4s\n"
"fmla v28.4s, v2.4s, v9.4s\n"
- "mov v29.16b, v14.16b\n fmla v29.4s, v1.4s, v9.4s\n"
- "mov v30.16b, v14.16b\n fmla v30.4s, v0.4s, v9.4s\n"
+ "mov v29.16b, v13.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "mov v30.16b, v13.16b\n fmla v30.4s, v0.4s, v9.4s\n"
"tbz %x[n_channels], #1, 13f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
@@ -744,7 +744,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"14:" // Tile loop: Oddments: Load inputs: (0, 1): Bit 1: End
"fmla v16.4s, v1.4s, v12.4s\n"
"fmla v17.4s, v0.4s, v12.4s\n"
- "add x20, x7, x28\n"
+ "add x20, x7, x13\n"
"tbz %x[n_channels], #1, 15f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
@@ -755,7 +755,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"16:" // Tile loop: Oddments: Load inputs: (0, 4): Bit 1: End
"fmla v18.4s, v2.4s, v11.4s\n"
"fmla v19.4s, v1.4s, v11.4s\n"
- "add x20, x12, x11\n"
+ "add x20, x9, x14\n"
"tbz %x[n_channels], #1, 17f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
@@ -766,7 +766,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"18:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
"fmla v21.4s, v8.4s, v10.4s\n"
"fmla v22.4s, v7.4s, v10.4s\n"
- "add x20, x15, XZR\n"
+ "add x20, x11, XZR\n"
"fmla v23.4s, v6.4s, v10.4s\n"
"fmla v25.4s, v5.4s, v10.4s\n"
"fmla v26.4s, v4.4s, v10.4s\n"
@@ -784,7 +784,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"20:" // Tile loop: Oddments: Load inputs: (1, 0): Bit 1: End
"fmla v16.4s, v3.4s, v9.4s\n"
"fmla v20.4s, v0.4s, v9.4s\n"
- "add x20, x15, x25\n"
+ "add x20, x11, x12\n"
"tbz %x[n_channels], #1, 21f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
@@ -795,7 +795,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"22:" // Tile loop: Oddments: Load inputs: (1, 5): Bit 1: End
"fmla v19.4s, v5.4s, v12.4s\n"
"fmla v23.4s, v2.4s, v12.4s\n"
- "add x20, x9, XZR\n"
+ "add x20, x27, XZR\n"
"tbz %x[n_channels], #1, 23f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
@@ -806,7 +806,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"24:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: End
"fmla v24.4s, v6.4s, v11.4s\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x15, x17\n"
+ "add x20, x11, x15\n"
"tbz %x[n_channels], #1, 25f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
@@ -817,7 +817,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"26:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 1: End
"fmla v16.4s, v5.4s, v10.4s\n"
"fmla v17.4s, v4.4s, v10.4s\n"
- "add x20, x9, x25\n"
+ "add x20, x27, x12\n"
"fmla v18.4s, v3.4s, v10.4s\n"
"fmla v20.4s, v2.4s, v10.4s\n"
"fmla v21.4s, v1.4s, v10.4s\n"
@@ -832,7 +832,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"28:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 1: End
"fmla v27.4s, v8.4s, v11.4s\n"
"fmla v31.4s, v5.4s, v11.4s\n"
- "add x20, x15, x11\n"
+ "add x20, x11, x14\n"
"tbz %x[n_channels], #1, 29f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
@@ -843,7 +843,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"30:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
"fmla v17.4s, v5.4s, v12.4s\n"
"fmla v18.4s, v4.4s, v12.4s\n"
- "add x20, x26, x4\n"
+ "add x20, x25, x4\n"
"fmla v19.4s, v3.4s, v12.4s\n"
"fmla v21.4s, v2.4s, v12.4s\n"
"fmla v22.4s, v1.4s, v12.4s\n"
@@ -858,7 +858,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"32:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 1: End
"fmla v28.4s, v7.4s, v11.4s\n"
"fmla v29.4s, v6.4s, v11.4s\n"
- "add x20, x14, x4\n"
+ "add x20, x10, x4\n"
"tbz %x[n_channels], #1, 33f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 34f\n"
@@ -869,7 +869,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"34:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
"fmla v16.4s, v7.4s, v10.4s\n"
"fmla v17.4s, v6.4s, v10.4s\n"
- "add x20, x26, x28\n"
+ "add x20, x25, x13\n"
"fmla v20.4s, v4.4s, v10.4s\n"
"fmla v21.4s, v3.4s, v10.4s\n"
"fmla v24.4s, v1.4s, v10.4s\n"
@@ -884,7 +884,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"36:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 1: End
"fmla v30.4s, v8.4s, v11.4s\n"
"fmla v31.4s, v7.4s, v11.4s\n"
- "add x20, x14, x28\n"
+ "add x20, x10, x13\n"
"tbz %x[n_channels], #1, 37f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 38f\n"
@@ -895,7 +895,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"38:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
"fmla v18.4s, v8.4s, v12.4s\n"
"fmla v19.4s, v7.4s, v12.4s\n"
- "add x20, x7, x17\n"
+ "add x20, x7, x15\n"
"fmla v22.4s, v5.4s, v12.4s\n"
"fmla v23.4s, v4.4s, v12.4s\n"
"fmla v26.4s, v2.4s, v12.4s\n"
@@ -910,7 +910,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"40:" // Tile loop: Oddments: Load inputs: (0, 2): Bit 1: End
"fmla v16.4s, v2.4s, v10.4s\n"
"fmla v17.4s, v1.4s, v10.4s\n"
- "add x20, x12, x4\n"
+ "add x20, x9, x4\n"
"fmla v18.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 41f\n"
"ldr d11, [x20], #0x8\n"
@@ -922,7 +922,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"42:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
"fmla v20.4s, v7.4s, v11.4s\n"
"fmla v21.4s, v6.4s, v11.4s\n"
- "add x20, x7, x11\n"
+ "add x20, x7, x14\n"
"fmla v24.4s, v4.4s, v11.4s\n"
"fmla v25.4s, v3.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
@@ -937,7 +937,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"44:" // Tile loop: Oddments: Load inputs: (0, 3): Bit 1: End
"fmla v17.4s, v2.4s, v12.4s\n"
"fmla v18.4s, v1.4s, v12.4s\n"
- "add x20, x14, XZR\n"
+ "add x20, x10, XZR\n"
"fmla v19.4s, v0.4s, v12.4s\n"
"tbz %x[n_channels], #1, 45f\n"
"ldr d10, [x20], #0x8\n"
@@ -949,7 +949,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"46:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
"fmla v16.4s, v6.4s, v10.4s\n"
"fmla v20.4s, v3.4s, v10.4s\n"
- "add x20, x12, x28\n"
+ "add x20, x9, x13\n"
"fmla v24.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 47f\n"
"ldr d11, [x20], #0x8\n"
@@ -961,7 +961,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"48:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
"fmla v22.4s, v8.4s, v11.4s\n"
"fmla v23.4s, v7.4s, v11.4s\n"
- "add x20, x14, x25\n"
+ "add x20, x10, x12\n"
"fmla v26.4s, v5.4s, v11.4s\n"
"fmla v27.4s, v4.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v11.4s\n"
@@ -976,7 +976,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"50:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 1: End
"fmla v19.4s, v8.4s, v12.4s\n"
"fmla v23.4s, v5.4s, v12.4s\n"
- "add x20, x12, XZR\n"
+ "add x20, x9, XZR\n"
"fmla v27.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 51f\n"
"ldr d10, [x20], #0x8\n"
@@ -988,7 +988,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"52:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
"fmla v20.4s, v6.4s, v10.4s\n"
"fmla v24.4s, v3.4s, v10.4s\n"
- "add x20, x9, x17\n"
+ "add x20, x27, x15\n"
"fmla v28.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 53f\n"
"ldr d11, [x20], #0x8\n"
@@ -1000,7 +1000,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"54:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: End
"fmla v24.4s, v8.4s, v11.4s\n"
"fmla v25.4s, v7.4s, v11.4s\n"
- "add x20, x12, x25\n"
+ "add x20, x9, x12\n"
"fmla v26.4s, v6.4s, v11.4s\n"
"fmla v28.4s, v5.4s, v11.4s\n"
"fmla v29.4s, v4.4s, v11.4s\n"
@@ -1015,7 +1015,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"56:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 1: End
"fmla v23.4s, v8.4s, v12.4s\n"
"fmla v27.4s, v5.4s, v12.4s\n"
- "add x20, x26, x17\n"
+ "add x20, x25, x15\n"
"fmla v31.4s, v2.4s, v12.4s\n"
"tbz %x[n_channels], #1, 57f\n"
"ldr d10, [x20], #0x8\n"
@@ -1027,7 +1027,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"58:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 1: End
"fmla v28.4s, v8.4s, v10.4s\n"
"fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x9, x11\n"
+ "add x20, x27, x14\n"
"fmla v30.4s, v6.4s, v10.4s\n"
"tbz %x[n_channels], #1, 59f\n"
"ldr d11, [x20], #0x8\n"
@@ -1039,7 +1039,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"60:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
"fmla v25.4s, v8.4s, v11.4s\n"
"fmla v26.4s, v7.4s, v11.4s\n"
- "add x20, x26, x11\n"
+ "add x20, x25, x14\n"
"fmla v27.4s, v6.4s, v11.4s\n"
"fmla v29.4s, v5.4s, v11.4s\n"
"fmla v30.4s, v4.4s, v11.4s\n"
@@ -1054,7 +1054,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"62:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 1: End
"fmla v29.4s, v8.4s, v12.4s\n"
"fmla v30.4s, v7.4s, v12.4s\n"
- "add x20, x15, x4\n"
+ "add x20, x11, x4\n"
"fmla v31.4s, v6.4s, v12.4s\n"
"tbz %x[n_channels], #1, 63f\n"
"ldr d10, [x20], #0x8\n"
@@ -1066,7 +1066,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"64:" // Tile loop: Oddments: Load inputs: (1, 1): Bit 1: End
"fmla v16.4s, v4.4s, v10.4s\n"
"fmla v17.4s, v3.4s, v10.4s\n"
- "add x20, x15, x28\n"
+ "add x20, x11, x13\n"
"fmla v20.4s, v1.4s, v10.4s\n"
"fmla v21.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 65f\n"
@@ -1079,7 +1079,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"66:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
"fmla v18.4s, v5.4s, v11.4s\n"
"fmla v19.4s, v4.4s, v11.4s\n"
- "add x20, x9, x4\n"
+ "add x20, x27, x4\n"
"fmla v22.4s, v2.4s, v11.4s\n"
"fmla v23.4s, v1.4s, v11.4s\n"
"tbz %x[n_channels], #1, 67f\n"
@@ -1092,7 +1092,7 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"68:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
"fmla v24.4s, v7.4s, v12.4s\n"
"fmla v25.4s, v6.4s, v12.4s\n"
- "add x20, x9, x28\n"
+ "add x20, x27, x13\n"
"fmla v28.4s, v4.4s, v12.4s\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"tbz %x[n_channels], #1, 69f\n"
@@ -1105,24 +1105,24 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"70:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: End
"fmla v26.4s, v8.4s, v10.4s\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v13.4s\n"
+ "fmax v16.4s, v16.4s, v14.4s\n"
"fmla v30.4s, v5.4s, v10.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "fmax v17.4s, v17.4s, v13.4s\n"
- "fmax v18.4s, v18.4s, v13.4s\n"
- "fmax v19.4s, v19.4s, v13.4s\n"
- "fmax v20.4s, v20.4s, v13.4s\n"
- "fmax v21.4s, v21.4s, v13.4s\n"
- "fmax v22.4s, v22.4s, v13.4s\n"
- "fmax v23.4s, v23.4s, v13.4s\n"
- "fmax v24.4s, v24.4s, v13.4s\n"
- "fmax v25.4s, v25.4s, v13.4s\n"
- "fmax v26.4s, v26.4s, v13.4s\n"
- "fmax v27.4s, v27.4s, v13.4s\n"
- "fmax v28.4s, v28.4s, v13.4s\n"
- "fmax v29.4s, v29.4s, v13.4s\n"
- "fmax v30.4s, v30.4s, v13.4s\n"
- "fmax v31.4s, v31.4s, v13.4s\n"
+ "fmax v17.4s, v17.4s, v14.4s\n"
+ "fmax v18.4s, v18.4s, v14.4s\n"
+ "fmax v19.4s, v19.4s, v14.4s\n"
+ "fmax v20.4s, v20.4s, v14.4s\n"
+ "fmax v21.4s, v21.4s, v14.4s\n"
+ "fmax v22.4s, v22.4s, v14.4s\n"
+ "fmax v23.4s, v23.4s, v14.4s\n"
+ "fmax v24.4s, v24.4s, v14.4s\n"
+ "fmax v25.4s, v25.4s, v14.4s\n"
+ "fmax v26.4s, v26.4s, v14.4s\n"
+ "fmax v27.4s, v27.4s, v14.4s\n"
+ "fmax v28.4s, v28.4s, v14.4s\n"
+ "fmax v29.4s, v29.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v14.4s\n"
+ "fmax v31.4s, v31.4s, v14.4s\n"
"fmin v16.4s, v16.4s, v15.4s\n"
"fmin v17.4s, v17.4s, v15.4s\n"
"fmin v18.4s, v18.4s, v15.4s\n"
@@ -1140,18 +1140,18 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmin v30.4s, v30.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v15.4s\n"
"tbz %x[n_channels], #1, 71f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.d }[0], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "add x17, x17, #0x8\n"
+ "add x28, x28, #0x8\n"
+ "st1 { v16.d }[0], [x23], x5\n"
"st1 { v20.d }[0], [x22], x5\n"
+ "add x26, x26, #0x8\n"
+ "add x24, x24, #0x8\n"
"st1 { v24.d }[0], [x21], x5\n"
- "add x8, x8, #0x8\n"
- "add x10, x10, #0x8\n"
"st1 { v28.d }[0], [x20], x5\n"
- "add x27, x27, #0x8\n"
- "add x24, x24, #0x8\n"
"st1 { v17.d }[0], [x23], x5\n"
"st1 { v21.d }[0], [x22], x5\n"
"st1 { v25.d }[0], [x21], x5\n"
@@ -1165,15 +1165,15 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v27.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #0, 72f\n"
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[2], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "st1 { v16.s }[2], [x23], x5\n"
+ "st1 { v17.s }[2], [x23], x5\n"
"st1 { v20.s }[2], [x22], x5\n"
"st1 { v24.s }[2], [x21], x5\n"
"st1 { v28.s }[2], [x20], x5\n"
- "st1 { v17.s }[2], [x23], x5\n"
"st1 { v21.s }[2], [x22], x5\n"
"st1 { v25.s }[2], [x21], x5\n"
"st1 { v29.s }[2], [x20], x5\n"
@@ -1187,15 +1187,15 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v31.s }[2], [x20]\n"
"b 72f\n"
"71:" // Tile loop: Oddments: Store: Bit 1: Unset
- "mov x23, x8\n"
- "mov x22, x10\n"
- "st1 { v16.s }[0], [x23], x5\n"
- "mov x21, x27\n"
+ "mov x23, x17\n"
+ "mov x22, x28\n"
+ "mov x21, x26\n"
"mov x20, x24\n"
+ "st1 { v16.s }[0], [x23], x5\n"
+ "st1 { v17.s }[0], [x23], x5\n"
"st1 { v20.s }[0], [x22], x5\n"
"st1 { v24.s }[0], [x21], x5\n"
"st1 { v28.s }[0], [x20], x5\n"
- "st1 { v17.s }[0], [x23], x5\n"
"st1 { v21.s }[0], [x22], x5\n"
"st1 { v25.s }[0], [x21], x5\n"
"st1 { v29.s }[0], [x20], x5\n"
@@ -1209,20 +1209,20 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"st1 { v31.s }[0], [x20]\n"
"72:" // Tile loop: Oddments: Store: Bit 1: End
"73:" // Tile loop: End
- "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
- "csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "ldr x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x28, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x27, x27, #0x1\n"
+ "add x20, x28, #0x1\n"
+ "cmp x27, x22\n"
+ "csel x28, x28, x20, LT\n"
+ "csel x27, x27, XZR, LT\n"
+ "cmp x28, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 76045f30d6..95ed57d48d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -102,9 +102,9 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"lsr x7, %x[n_channels], #0x2\n"
"ldr x8, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v13.4s }, [x20]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v15.4s }, [x21]\n"
"ld1r { v14.4s }, [x20]\n"
"add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
"mov x15, #0x0\n"
@@ -122,583 +122,583 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr q7, [x17, #0x80]\n"
"ldr q8, [x17, #0x90]\n"
"add x17, x17, #0xa0\n"
- "ldp x21, x20, [x16, #0x0]\n"
- "ldr q9, [x21, x15]\n"
- "ldr q10, [x20, x15]\n"
+ "ldp x23, x22, [x16, #0x0]\n"
"ldp x21, x20, [x16, #0x10]\n"
+ "ldr q9, [x23, x15]\n"
+ "ldr q10, [x22, x15]\n"
"ldr q11, [x21, x15]\n"
"ldr q12, [x20, x15]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v23.16b, v30.16b\n fmla v23.4s, v4.4s, v9.4s\n"
- "mov v17.16b, v30.16b\n fmla v17.4s, v8.4s, v9.4s\n"
+ "mov v21.16b, v30.16b\n fmla v21.4s, v4.4s, v9.4s\n"
+ "mov v26.16b, v30.16b\n fmla v26.4s, v8.4s, v9.4s\n"
"ldr x27, [x16, #0x20]\n"
"ldr x24, [x16, #0x30]\n"
- "mov v25.16b, v30.16b\n fmla v25.4s, v3.4s, v9.4s\n"
- "mov v28.16b, v30.16b\n fmla v28.4s, v1.4s, v9.4s\n"
+ "mov v27.16b, v30.16b\n fmla v27.4s, v3.4s, v9.4s\n"
+ "mov v31.16b, v30.16b\n fmla v31.4s, v1.4s, v9.4s\n"
"ldr x23, [x16, #0x28]\n"
"ldr x22, [x16, #0x38]\n"
- "mov v20.16b, v30.16b\n fmla v20.4s, v0.4s, v9.4s\n"
- "mov v16.16b, v30.16b\n fmla v16.4s, v7.4s, v9.4s\n"
+ "mov v28.16b, v30.16b\n fmla v28.4s, v0.4s, v9.4s\n"
+ "mov v18.16b, v30.16b\n fmla v18.4s, v7.4s, v9.4s\n"
"ldr x26, [x16, #0x40]\n"
"ldr x20, [x16, #0x48]\n"
- "mov v15.16b, v30.16b\n fmla v15.4s, v6.4s, v9.4s\n"
- "fmla v23.4s, v5.4s, v12.4s\n"
+ "mov v20.16b, v30.16b\n fmla v20.4s, v6.4s, v9.4s\n"
+ "mov v13.16b, v30.16b\n fmla v13.4s, v5.4s, v9.4s\n"
"ldr x25, [x16, #0x50]\n"
"ldr x21, [x16, #0x58]\n"
- "mov v27.16b, v30.16b\n fmla v27.4s, v5.4s, v9.4s\n"
- "mov v31.16b, v30.16b\n fmla v31.4s, v2.4s, v9.4s\n"
+ "fmla v21.4s, v5.4s, v12.4s\n"
+ "mov v16.16b, v30.16b\n fmla v16.4s, v2.4s, v9.4s\n"
"ldr q9, [x24, x15]\n"
"ldr x13, [x16, #0x70]\n"
- "fmla v17.4s, v0.4s, v10.4s\n"
- "ldr q22, [x27, x15]\n"
- "mov v10.16b, v30.16b\n fmla v10.4s, v2.4s, v11.4s\n"
- "ldr q18, [x23, x15]\n"
- "fmla v25.4s, v4.4s, v12.4s\n"
- "fmla v28.4s, v2.4s, v12.4s\n"
+ "fmla v26.4s, v0.4s, v10.4s\n"
+ "ldr q17, [x27, x15]\n"
+ "mov v25.16b, v30.16b\n fmla v25.4s, v2.4s, v11.4s\n"
+ "ldr q29, [x23, x15]\n"
+ "fmla v27.4s, v4.4s, v12.4s\n"
+ "fmla v31.4s, v2.4s, v12.4s\n"
"ldr x24, [x16, #0x60]\n"
"ldr x23, [x16, #0x68]\n"
- "fmla v20.4s, v1.4s, v12.4s\n"
- "fmla v16.4s, v8.4s, v12.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
"ldr x12, [x8, #0x0]\n"
"ldr x11, [x8, #0x8]\n"
- "fmla v15.4s, v7.4s, v12.4s\n"
- "mov v29.16b, v30.16b\n fmla v29.4s, v6.4s, v22.4s\n"
- "ldr q22, [x20, x15]\n"
+ "fmla v20.4s, v7.4s, v12.4s\n"
+ "mov v24.16b, v30.16b\n fmla v24.4s, v6.4s, v17.4s\n"
+ "ldr q10, [x20, x15]\n"
"ldr x28, [x16, #0x88]\n"
- "fmla v23.4s, v7.4s, v9.4s\n"
- "fmla v10.4s, v6.4s, v12.4s\n"
+ "fmla v21.4s, v7.4s, v9.4s\n"
+ "fmla v25.4s, v6.4s, v12.4s\n"
"ldr x10, [x8, #0x10]\n"
"ldr x9, [x8, #0x18]\n"
- "mov v21.16b, v30.16b\n fmla v21.4s, v3.4s, v12.4s\n"
+ "mov v22.16b, v30.16b\n fmla v22.4s, v3.4s, v12.4s\n"
"mov v19.16b, v30.16b\n fmla v19.4s, v0.4s, v12.4s\n"
- "ldr q11, [x22, x15]\n"
+ "ldr q12, [x22, x15]\n"
"ldr x22, [x16, #0x78]\n"
- "mov v24.16b, v30.16b\n fmla v24.4s, v8.4s, v18.4s\n"
- "ldr q12, [x26, x15]\n"
- "fmla v25.4s, v6.4s, v9.4s\n"
+ "mov v17.16b, v30.16b\n fmla v17.4s, v8.4s, v29.4s\n"
+ "ldr q11, [x26, x15]\n"
+ "fmla v27.4s, v6.4s, v9.4s\n"
"ldr x20, [x16, #0x80]\n"
- "fmla v28.4s, v4.4s, v9.4s\n"
- "fmla v20.4s, v3.4s, v9.4s\n"
+ "fmla v31.4s, v4.4s, v9.4s\n"
+ "fmla v28.4s, v3.4s, v9.4s\n"
"add x14, x14, #0x10\n"
- "mov v26.16b, v30.16b\n fmla v26.4s, v1.4s, v9.4s\n"
- "mov v18.16b, v30.16b\n fmla v18.4s, v0.4s, v9.4s\n"
+ "mov v29.16b, v30.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "mov v23.16b, v30.16b\n fmla v23.4s, v0.4s, v9.4s\n"
"ldr q30, [x17, #0x0]\n"
- "fmla v27.4s, v8.4s, v9.4s\n"
- "fmla v31.4s, v5.4s, v9.4s\n"
- "fmla v29.4s, v2.4s, v9.4s\n"
+ "fmla v13.4s, v8.4s, v9.4s\n"
+ "fmla v16.4s, v5.4s, v9.4s\n"
+ "fmla v24.4s, v2.4s, v9.4s\n"
"ldr q9, [x25, x15]\n"
- "fmla v17.4s, v1.4s, v11.4s\n"
+ "fmla v26.4s, v1.4s, v12.4s\n"
"ldr x27, [x16, #0x90]\n"
- "fmla v16.4s, v0.4s, v11.4s\n"
- "ldr q11, [x21, x15]\n"
- "fmla v15.4s, v2.4s, v12.4s\n"
+ "fmla v18.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x21, x15]\n"
+ "fmla v20.4s, v2.4s, v11.4s\n"
"ldr x21, [x16, #0x98]\n"
- "fmla v23.4s, v8.4s, v22.4s\n"
- "fmla v10.4s, v1.4s, v12.4s\n"
- "ldr q12, [x24, x15]\n"
+ "fmla v21.4s, v8.4s, v10.4s\n"
+ "fmla v25.4s, v1.4s, v11.4s\n"
+ "ldr q11, [x24, x15]\n"
"ldr x26, [x16, #0xa0]\n"
- "fmla v25.4s, v7.4s, v22.4s\n"
- "fmla v21.4s, v6.4s, v22.4s\n"
- "fmla v28.4s, v5.4s, v22.4s\n"
- "fmla v20.4s, v4.4s, v22.4s\n"
- "fmla v19.4s, v3.4s, v22.4s\n"
- "fmla v26.4s, v2.4s, v22.4s\n"
- "fmla v18.4s, v1.4s, v22.4s\n"
- "fmla v24.4s, v0.4s, v22.4s\n"
- "ldr q22, [x23, x15]\n"
+ "fmla v27.4s, v7.4s, v10.4s\n"
+ "fmla v22.4s, v6.4s, v10.4s\n"
+ "fmla v31.4s, v5.4s, v10.4s\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
+ "fmla v19.4s, v3.4s, v10.4s\n"
+ "fmla v29.4s, v2.4s, v10.4s\n"
+ "fmla v23.4s, v1.4s, v10.4s\n"
+ "fmla v17.4s, v0.4s, v10.4s\n"
+ "ldr q10, [x23, x15]\n"
"ldr x25, [x16, #0xa8]\n"
- "fmla v17.4s, v3.4s, v9.4s\n"
- "fmla v27.4s, v0.4s, v9.4s\n"
- "fmla v31.4s, v6.4s, v12.4s\n"
- "fmla v29.4s, v3.4s, v12.4s\n"
+ "fmla v26.4s, v3.4s, v9.4s\n"
+ "fmla v13.4s, v0.4s, v9.4s\n"
+ "fmla v16.4s, v6.4s, v11.4s\n"
+ "fmla v24.4s, v3.4s, v11.4s\n"
"ldr q9, [x13, x15]\n"
"ldr x24, [x16, #0xb0]\n"
- "fmla v16.4s, v4.4s, v22.4s\n"
- "fmla v15.4s, v3.4s, v22.4s\n"
- "fmla v23.4s, v1.4s, v22.4s\n"
- "fmla v10.4s, v5.4s, v11.4s\n"
- "fmla v21.4s, v2.4s, v11.4s\n"
+ "fmla v18.4s, v4.4s, v10.4s\n"
+ "fmla v20.4s, v3.4s, v10.4s\n"
+ "fmla v21.4s, v1.4s, v10.4s\n"
+ "fmla v25.4s, v5.4s, v12.4s\n"
+ "fmla v22.4s, v2.4s, v12.4s\n"
"ldr q12, [x22, x15]\n"
- "fmla v25.4s, v0.4s, v22.4s\n"
+ "fmla v27.4s, v0.4s, v10.4s\n"
"ldr x23, [x16, #0xb8]\n"
"fmla v19.4s, v8.4s, v9.4s\n"
- "fmla v24.4s, v5.4s, v9.4s\n"
- "ldr q11, [x20, x15]\n"
+ "fmla v17.4s, v5.4s, v9.4s\n"
+ "ldr q9, [x20, x15]\n"
"ldr x22, [x16, #0xc0]\n"
- "fmla v17.4s, v5.4s, v22.4s\n"
- "fmla v27.4s, v2.4s, v22.4s\n"
- "ldr q22, [x28, x15]\n"
+ "fmla v26.4s, v5.4s, v10.4s\n"
+ "fmla v13.4s, v2.4s, v10.4s\n"
+ "ldr q11, [x28, x15]\n"
"ldr x20, [x16, #0xc8]\n"
- "fmla v16.4s, v5.4s, v12.4s\n"
- "fmla v15.4s, v4.4s, v12.4s\n"
- "fmla v23.4s, v2.4s, v12.4s\n"
- "fmla v10.4s, v3.4s, v12.4s\n"
- "fmla v25.4s, v1.4s, v12.4s\n"
- "fmla v21.4s, v0.4s, v12.4s\n"
- "ldr q9, [x21, x15]\n"
+ "fmla v18.4s, v5.4s, v12.4s\n"
+ "fmla v20.4s, v4.4s, v12.4s\n"
+ "fmla v21.4s, v2.4s, v12.4s\n"
+ "fmla v25.4s, v3.4s, v12.4s\n"
+ "fmla v27.4s, v1.4s, v12.4s\n"
+ "fmla v22.4s, v0.4s, v12.4s\n"
+ "ldr q10, [x21, x15]\n"
"ldr x28, [x16, #0xd8]\n"
- "fmla v29.4s, v7.4s, v11.4s\n"
- "fmla v26.4s, v6.4s, v11.4s\n"
- "ldr q12, [x27, x15]\n"
+ "fmla v24.4s, v7.4s, v9.4s\n"
+ "fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr q9, [x27, x15]\n"
"ldr x21, [x16, #0xd0]\n"
- "fmla v17.4s, v7.4s, v22.4s\n"
- "fmla v16.4s, v6.4s, v22.4s\n"
- "fmla v27.4s, v4.4s, v22.4s\n"
- "fmla v23.4s, v3.4s, v22.4s\n"
- "fmla v31.4s, v1.4s, v22.4s\n"
- "fmla v28.4s, v0.4s, v22.4s\n"
- "ldr q11, [x26, x15]\n"
+ "fmla v26.4s, v7.4s, v11.4s\n"
+ "fmla v18.4s, v6.4s, v11.4s\n"
+ "fmla v13.4s, v4.4s, v11.4s\n"
+ "fmla v21.4s, v3.4s, v11.4s\n"
+ "fmla v16.4s, v1.4s, v11.4s\n"
+ "fmla v31.4s, v0.4s, v11.4s\n"
+ "ldr q12, [x26, x15]\n"
"ldr x27, [x16, #0xe0]\n"
- "fmla v15.4s, v8.4s, v9.4s\n"
- "fmla v18.4s, v8.4s, v12.4s\n"
- "fmla v24.4s, v7.4s, v12.4s\n"
- "ldr q12, [x25, x15]\n"
- "fmla v19.4s, v1.4s, v9.4s\n"
+ "fmla v20.4s, v8.4s, v10.4s\n"
+ "fmla v23.4s, v8.4s, v9.4s\n"
+ "fmla v17.4s, v7.4s, v9.4s\n"
+ "ldr q11, [x25, x15]\n"
+ "fmla v19.4s, v1.4s, v10.4s\n"
"ldr x26, [x16, #0xe8]\n"
- "fmla v10.4s, v7.4s, v9.4s\n"
- "fmla v25.4s, v5.4s, v9.4s\n"
- "fmla v21.4s, v4.4s, v9.4s\n"
- "fmla v20.4s, v2.4s, v9.4s\n"
+ "fmla v25.4s, v7.4s, v10.4s\n"
+ "fmla v27.4s, v5.4s, v10.4s\n"
+ "fmla v22.4s, v4.4s, v10.4s\n"
+ "fmla v28.4s, v2.4s, v10.4s\n"
"ldr q9, [x24, x15]\n"
- "ldr x24, [x16, #0xf0]\n"
- "fmla v17.4s, v2.4s, v11.4s\n"
- "fmla v16.4s, v1.4s, v11.4s\n"
- "fmla v15.4s, v0.4s, v11.4s\n"
- "ldr q22, [x23, x15]\n"
- "fmla v27.4s, v7.4s, v12.4s\n"
- "ldr x25, [x16, #0xf8]\n"
- "fmla v23.4s, v6.4s, v12.4s\n"
- "fmla v31.4s, v4.4s, v12.4s\n"
- "fmla v28.4s, v3.4s, v12.4s\n"
- "fmla v29.4s, v1.4s, v12.4s\n"
- "fmla v26.4s, v0.4s, v12.4s\n"
- "ldr q11, [x22, x15]\n"
- "fmla v19.4s, v4.4s, v11.4s\n"
- "ldr x23, [x16, #0x100]\n"
- "fmla v18.4s, v2.4s, v11.4s\n"
- "fmla v16.4s, v2.4s, v9.4s\n"
- "fmla v15.4s, v1.4s, v9.4s\n"
- "fmla v10.4s, v0.4s, v9.4s\n"
- "ldr q9, [x20, x15]\n"
+ "ldr x25, [x16, #0xf0]\n"
+ "fmla v26.4s, v2.4s, v12.4s\n"
+ "fmla v18.4s, v1.4s, v12.4s\n"
+ "fmla v20.4s, v0.4s, v12.4s\n"
+ "ldr q12, [x23, x15]\n"
+ "fmla v13.4s, v7.4s, v11.4s\n"
+ "ldr x24, [x16, #0xf8]\n"
+ "fmla v21.4s, v6.4s, v11.4s\n"
+ "fmla v16.4s, v4.4s, v11.4s\n"
+ "fmla v31.4s, v3.4s, v11.4s\n"
+ "fmla v24.4s, v1.4s, v11.4s\n"
+ "fmla v29.4s, v0.4s, v11.4s\n"
+ "ldr q10, [x22, x15]\n"
+ "fmla v18.4s, v2.4s, v9.4s\n"
+ "ldr x22, [x16, #0x100]\n"
+ "fmla v20.4s, v1.4s, v9.4s\n"
+ "fmla v25.4s, v0.4s, v9.4s\n"
+ "ldr q11, [x20, x15]\n"
"ldr x20, [x16, #0x108]\n"
- "fmla v17.4s, v6.4s, v22.4s\n"
- "fmla v27.4s, v3.4s, v22.4s\n"
- "fmla v31.4s, v0.4s, v22.4s\n"
- "ldr q22, [x21, x15]\n"
+ "fmla v26.4s, v6.4s, v12.4s\n"
+ "fmla v13.4s, v3.4s, v12.4s\n"
+ "fmla v19.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v2.4s, v10.4s\n"
+ "fmla v16.4s, v0.4s, v12.4s\n"
+ "ldr q9, [x21, x15]\n"
+ "fmla v27.4s, v8.4s, v10.4s\n"
+ "ldr x23, [x16, #0x110]\n"
+ "fmla v22.4s, v7.4s, v10.4s\n"
+ "fmla v28.4s, v5.4s, v10.4s\n"
+ "fmla v17.4s, v1.4s, v10.4s\n"
+ "ldr q10, [x28, x15]\n"
"fmla v25.4s, v8.4s, v11.4s\n"
- "ldr x22, [x16, #0x110]\n"
- "fmla v21.4s, v7.4s, v11.4s\n"
- "fmla v20.4s, v5.4s, v11.4s\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "ldr q12, [x28, x15]\n"
- "fmla v19.4s, v2.4s, v9.4s\n"
"ldr x21, [x16, #0x118]\n"
- "fmla v29.4s, v0.4s, v22.4s\n"
- "fmla v26.4s, v4.4s, v12.4s\n"
- "fmla v18.4s, v3.4s, v12.4s\n"
- "fmla v10.4s, v8.4s, v9.4s\n"
- "fmla v21.4s, v5.4s, v9.4s\n"
- "ldr q11, [x27, x15]\n"
- "fmla v27.4s, v6.4s, v22.4s\n"
- "fmla v31.4s, v3.4s, v22.4s\n"
- "ldr q22, [x26, x15]\n"
- "fmla v28.4s, v7.4s, v12.4s\n"
- "fmla v20.4s, v6.4s, v12.4s\n"
- "fmla v29.4s, v5.4s, v12.4s\n"
- "fmla v19.4s, v5.4s, v11.4s\n"
- "fmla v24.4s, v2.4s, v11.4s\n"
- "fmla v26.4s, v7.4s, v22.4s\n"
- "fmla v18.4s, v6.4s, v22.4s\n"
- "fmla v31.4s, v8.4s, v12.4s\n"
+ "fmla v19.4s, v2.4s, v11.4s\n"
+ "fmla v24.4s, v0.4s, v9.4s\n"
+ "fmla v13.4s, v6.4s, v9.4s\n"
+ "fmla v16.4s, v3.4s, v9.4s\n"
+ "ldr q9, [x26, x15]\n"
+ "fmla v29.4s, v4.4s, v10.4s\n"
+ "fmla v23.4s, v3.4s, v10.4s\n"
+ "fmla v22.4s, v5.4s, v11.4s\n"
+ "ldr q12, [x27, x15]\n"
+ "fmla v31.4s, v7.4s, v10.4s\n"
+ "fmla v28.4s, v6.4s, v10.4s\n"
+ "fmla v24.4s, v5.4s, v10.4s\n"
+ "fmla v16.4s, v8.4s, v10.4s\n"
+ "ldr q10, [x25, x15]\n"
+ "fmla v19.4s, v5.4s, v12.4s\n"
+ "fmla v17.4s, v2.4s, v12.4s\n"
+ "fmla v29.4s, v7.4s, v9.4s\n"
+ "fmla v23.4s, v6.4s, v9.4s\n"
+ "fmla v22.4s, v8.4s, v12.4s\n"
"ldr q12, [x24, x15]\n"
- "fmla v29.4s, v8.4s, v22.4s\n"
- "ldr q22, [x23, x15]\n"
- "fmla v28.4s, v8.4s, v12.4s\n"
- "fmla v20.4s, v7.4s, v12.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
- "fmla v26.4s, v5.4s, v12.4s\n"
- "fmla v18.4s, v4.4s, v12.4s\n"
- "fmla v24.4s, v3.4s, v12.4s\n"
- "ldr q12, [x20, x15]\n"
- "ldp x20, x24, [x16, #0x0]\n"
- "ldr q9, [x20, x6]\n"
- "fmla v21.4s, v8.4s, v11.4s\n"
- "ldr q11, [x25, x15]\n"
- "fmla v17.4s, v4.4s, v22.4s\n"
- "fmla v16.4s, v3.4s, v22.4s\n"
- "fmla v15.4s, v5.4s, v12.4s\n"
- "fmax v17.4s, v17.4s, v13.4s\n"
- "fmla v10.4s, v4.4s, v12.4s\n"
- "fmla v26.4s, v8.4s, v11.4s\n"
- "fmax v16.4s, v16.4s, v13.4s\n"
- "fmla v18.4s, v7.4s, v11.4s\n"
- "fmla v24.4s, v6.4s, v11.4s\n"
- "ldr q11, [x22, x15]\n"
- "fmax v15.4s, v15.4s, v13.4s\n"
- "fmla v27.4s, v1.4s, v22.4s\n"
- "fmla v23.4s, v0.4s, v22.4s\n"
- "ldr q22, [x21, x15]\n"
+ "fmla v24.4s, v8.4s, v9.4s\n"
+ "ldr q9, [x22, x15]\n"
+ "fmla v31.4s, v8.4s, v10.4s\n"
+ "fmla v28.4s, v7.4s, v10.4s\n"
+ "fmla v19.4s, v6.4s, v10.4s\n"
+ "fmla v29.4s, v5.4s, v10.4s\n"
+ "fmla v17.4s, v3.4s, v10.4s\n"
+ "fmla v23.4s, v4.4s, v10.4s\n"
+ "ldr q11, [x20, x15]\n"
+ "fmla v26.4s, v4.4s, v9.4s\n"
+ "ldp x20, x22, [x16, #0x0]\n"
+ "fmla v18.4s, v3.4s, v9.4s\n"
+ "fmla v13.4s, v1.4s, v9.4s\n"
+ "fmla v21.4s, v0.4s, v9.4s\n"
+ "ldr q10, [x21, x15]\n"
"ldr q0, [x17, #0x10]\n"
- "fmla v25.4s, v2.4s, v12.4s\n"
+ "ldr q9, [x20, x6]\n"
+ "fmla v20.4s, v5.4s, v11.4s\n"
+ "fmla v25.4s, v4.4s, v11.4s\n"
+ "fmla v29.4s, v8.4s, v12.4s\n"
+ "fmla v23.4s, v7.4s, v12.4s\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "fmla v17.4s, v6.4s, v12.4s\n"
+ "ldr q12, [x23, x15]\n"
+ "fmla v27.4s, v2.4s, v11.4s\n"
"ldr q2, [x17, #0x30]\n"
- "fmla v21.4s, v1.4s, v12.4s\n"
+ "fmla v22.4s, v1.4s, v11.4s\n"
"ldr q1, [x17, #0x20]\n"
- "fmax v10.4s, v10.4s, v13.4s\n"
- "fmla v31.4s, v7.4s, v11.4s\n"
- "fmla v28.4s, v6.4s, v11.4s\n"
- "ldr q6, [x17, #0x70]\n"
- "fmla v20.4s, v8.4s, v22.4s\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "fmla v28.4s, v8.4s, v10.4s\n"
"ldr q8, [x17, #0x90]\n"
- "fmla v19.4s, v7.4s, v22.4s\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmla v19.4s, v7.4s, v10.4s\n"
+ "fmla v16.4s, v7.4s, v12.4s\n"
"ldr q7, [x17, #0x80]\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "fmin v16.4s, v16.4s, v14.4s\n"
- "str q17, [x12, x14]\n"
- "ldr x23, [x8, #0x20]\n"
- "fmin v15.4s, v15.4s, v14.4s\n"
- "fmin v10.4s, v10.4s, v14.4s\n"
- "str q16, [x11, x14]\n"
- "ldr x22, [x8, #0x28]\n"
- "fmax v27.4s, v27.4s, v13.4s\n"
- "fmax v23.4s, v23.4s, v13.4s\n"
- "str q15, [x10, x14]\n"
- "ldr x21, [x8, #0x30]\n"
- "fmax v25.4s, v25.4s, v13.4s\n"
- "fmax v21.4s, v21.4s, v13.4s\n"
- "str q10, [x9, x14]\n"
- "ldr x20, [x8, #0x38]\n"
- "fmla v29.4s, v4.4s, v11.4s\n"
- "fmla v26.4s, v3.4s, v11.4s\n"
+ "fmla v31.4s, v6.4s, v12.4s\n"
+ "ldr q6, [x17, #0x70]\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "fmla v24.4s, v4.4s, v12.4s\n"
+ "ldp x21, x20, [x16, #0x10]\n"
+ "fmin v20.4s, v20.4s, v14.4s\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "fmla v29.4s, v3.4s, v12.4s\n"
"ldr q3, [x17, #0x40]\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "fmla v18.4s, v5.4s, v22.4s\n"
+ "fmax v13.4s, v13.4s, v15.4s\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "fmla v23.4s, v5.4s, v10.4s\n"
"ldr q5, [x17, #0x60]\n"
- "fmla v24.4s, v4.4s, v22.4s\n"
- "ldr q10, [x24, x6]\n"
+ "ldr q11, [x21, x6]\n"
+ "ldr q12, [x20, x6]\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "str q26, [x12, x14]\n"
+ "ldr x23, [x8, #0x20]\n"
+ "fmla v17.4s, v4.4s, v10.4s\n"
+ "ldr q10, [x22, x6]\n"
"ldr q4, [x17, #0x50]\n"
- "fmin v23.4s, v23.4s, v14.4s\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
- "str q27, [x23, x14]\n"
+ "str q18, [x11, x14]\n"
+ "ldr x22, [x8, #0x28]\n"
+ "fmin v13.4s, v13.4s, v14.4s\n"
+ "str q20, [x10, x14]\n"
+ "ldr x21, [x8, #0x30]\n"
"fmin v21.4s, v21.4s, v14.4s\n"
- "fmax v31.4s, v31.4s, v13.4s\n"
- "str q23, [x22, x14]\n"
- "ldr x25, [x8, #0x40]\n"
- "fmax v28.4s, v28.4s, v13.4s\n"
- "fmax v20.4s, v20.4s, v13.4s\n"
- "str q25, [x21, x14]\n"
- "ldr x23, [x8, #0x48]\n"
- "fmax v19.4s, v19.4s, v13.4s\n"
- "str q21, [x20, x14]\n"
- "ldr x22, [x8, #0x50]\n"
- "ldr x24, [x8, #0x58]\n"
- "ldp x21, x20, [x16, #0x10]\n"
- "ldr q11, [x21, x6]\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "str q25, [x9, x14]\n"
+ "ldr x20, [x8, #0x38]\n"
+ "fmin v22.4s, v22.4s, v14.4s\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "fmax v31.4s, v31.4s, v15.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "str q13, [x23, x14]\n"
+ "ldr x23, [x8, #0x40]\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "str q21, [x22, x14]\n"
+ "ldr x22, [x8, #0x48]\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "str q27, [x21, x14]\n"
+ "ldr x21, [x8, #0x50]\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
+ "str q22, [x20, x14]\n"
+ "ldr x20, [x8, #0x58]\n"
"fmin v31.4s, v31.4s, v14.4s\n"
"fmin v28.4s, v28.4s, v14.4s\n"
- "ldr q12, [x20, x6]\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
"fmin v19.4s, v19.4s, v14.4s\n"
- "str q31, [x25, x14]\n"
- "fmax v29.4s, v29.4s, v13.4s\n"
- "fmax v26.4s, v26.4s, v13.4s\n"
- "str q28, [x23, x14]\n"
- "ldr x23, [x8, #0x60]\n"
- "fmax v18.4s, v18.4s, v13.4s\n"
- "fmax v24.4s, v24.4s, v13.4s\n"
- "str q20, [x22, x14]\n"
- "ldr x22, [x8, #0x68]\n"
- "str q19, [x24, x14]\n"
- "ldr x21, [x8, #0x70]\n"
- "ldr x20, [x8, #0x78]\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
"add x6, x6, #0x10\n"
- "cmp x6, x7, LSL #4\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
"add x15, x15, #0x10\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "str q16, [x23, x14]\n"
+ "ldr x23, [x8, #0x60]\n"
+ "cmp x6, x7, LSL #4\n"
+ "str q31, [x22, x14]\n"
+ "ldr x22, [x8, #0x68]\n"
"fmin v24.4s, v24.4s, v14.4s\n"
- "str q29, [x23, x14]\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "str q28, [x21, x14]\n"
+ "ldr x21, [x8, #0x70]\n"
+ "fmin v23.4s, v23.4s, v14.4s\n"
"add x17, x17, #0xa0\n"
- "str q26, [x22, x14]\n"
- "str q18, [x21, x14]\n"
- "str q24, [x20, x14]\n"
+ "str q19, [x20, x14]\n"
+ "ldr x20, [x8, #0x78]\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "str q24, [x23, x14]\n"
+ "str q29, [x22, x14]\n"
+ "str q23, [x21, x14]\n"
+ "str q17, [x20, x14]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v31.16b, v30.16b\n fmla v31.4s, v4.4s, v9.4s\n"
- "mov v17.16b, v30.16b\n fmla v17.4s, v8.4s, v9.4s\n"
+ "mov v16.16b, v30.16b\n fmla v16.4s, v4.4s, v9.4s\n"
+ "mov v19.16b, v30.16b\n fmla v19.4s, v8.4s, v9.4s\n"
"ldr x27, [x16, #0x20]\n"
"ldr x24, [x16, #0x30]\n"
- "mov v15.16b, v30.16b\n fmla v15.4s, v3.4s, v9.4s\n"
- "mov v29.16b, v30.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "mov v13.16b, v30.16b\n fmla v13.4s, v3.4s, v9.4s\n"
+ "mov v31.16b, v30.16b\n fmla v31.4s, v1.4s, v9.4s\n"
"ldr x23, [x16, #0x28]\n"
"ldr x22, [x16, #0x38]\n"
- "mov v19.16b, v30.16b\n fmla v19.4s, v0.4s, v9.4s\n"
- "mov v20.16b, v30.16b\n fmla v20.4s, v7.4s, v9.4s\n"
+ "mov v17.16b, v30.16b\n fmla v17.4s, v0.4s, v9.4s\n"
+ "mov v18.16b, v30.16b\n fmla v18.4s, v7.4s, v9.4s\n"
"ldr x26, [x16, #0x40]\n"
"ldr x21, [x16, #0x48]\n"
- "mov v21.16b, v30.16b\n fmla v21.4s, v6.4s, v9.4s\n"
- "fmla v31.4s, v5.4s, v12.4s\n"
+ "mov v25.16b, v30.16b\n fmla v25.4s, v6.4s, v9.4s\n"
+ "mov v28.16b, v30.16b\n fmla v28.4s, v5.4s, v9.4s\n"
"ldr x25, [x16, #0x50]\n"
"ldr x20, [x16, #0x58]\n"
- "mov v18.16b, v30.16b\n fmla v18.4s, v5.4s, v9.4s\n"
- "mov v27.16b, v30.16b\n fmla v27.4s, v2.4s, v9.4s\n"
- "ldr q24, [x24, x15]\n"
+ "fmla v16.4s, v5.4s, v12.4s\n"
+ "mov v29.16b, v30.16b\n fmla v29.4s, v2.4s, v9.4s\n"
+ "ldr q22, [x24, x15]\n"
"ldr x13, [x16, #0x70]\n"
- "fmla v17.4s, v0.4s, v10.4s\n"
- "ldr q22, [x27, x15]\n"
- "mov v28.16b, v30.16b\n fmla v28.4s, v2.4s, v11.4s\n"
- "ldr q16, [x23, x15]\n"
- "fmla v15.4s, v4.4s, v12.4s\n"
- "fmla v29.4s, v2.4s, v12.4s\n"
+ "fmla v19.4s, v0.4s, v10.4s\n"
+ "ldr q20, [x27, x15]\n"
+ "mov v27.16b, v30.16b\n fmla v27.4s, v2.4s, v11.4s\n"
+ "ldr q23, [x23, x15]\n"
+ "fmla v13.4s, v4.4s, v12.4s\n"
+ "fmla v31.4s, v2.4s, v12.4s\n"
"ldr x24, [x16, #0x60]\n"
"ldr x23, [x16, #0x68]\n"
- "fmla v19.4s, v1.4s, v12.4s\n"
- "fmla v20.4s, v8.4s, v12.4s\n"
+ "fmla v17.4s, v1.4s, v12.4s\n"
+ "fmla v18.4s, v8.4s, v12.4s\n"
"ldr x12, [x8, #0x0]\n"
"ldr x11, [x8, #0x8]\n"
- "fmla v21.4s, v7.4s, v12.4s\n"
- "mov v10.16b, v30.16b\n fmla v10.4s, v6.4s, v22.4s\n"
- "ldr q22, [x21, x15]\n"
+ "fmla v25.4s, v7.4s, v12.4s\n"
+ "mov v11.16b, v30.16b\n fmla v11.4s, v6.4s, v20.4s\n"
+ "ldr q9, [x21, x15]\n"
"ldr x28, [x16, #0x88]\n"
- "fmla v31.4s, v7.4s, v24.4s\n"
- "fmla v28.4s, v6.4s, v12.4s\n"
+ "fmla v16.4s, v7.4s, v22.4s\n"
+ "fmla v27.4s, v6.4s, v12.4s\n"
"ldr x10, [x8, #0x10]\n"
"ldr x9, [x8, #0x18]\n"
- "mov v9.16b, v30.16b\n fmla v9.4s, v3.4s, v12.4s\n"
- "mov v11.16b, v30.16b\n fmla v11.4s, v0.4s, v12.4s\n"
- "ldr q23, [x22, x15]\n"
+ "mov v10.16b, v30.16b\n fmla v10.4s, v3.4s, v12.4s\n"
+ "mov v26.16b, v30.16b\n fmla v26.4s, v0.4s, v12.4s\n"
+ "ldr q21, [x22, x15]\n"
"ldr x22, [x16, #0x78]\n"
- "mov v12.16b, v30.16b\n fmla v12.4s, v8.4s, v16.4s\n"
- "ldr q16, [x26, x15]\n"
- "fmla v15.4s, v6.4s, v24.4s\n"
+ "mov v24.16b, v30.16b\n fmla v24.4s, v8.4s, v23.4s\n"
+ "ldr q23, [x26, x15]\n"
+ "fmla v13.4s, v6.4s, v22.4s\n"
"ldr x21, [x16, #0x80]\n"
- "fmla v29.4s, v4.4s, v24.4s\n"
- "fmla v19.4s, v3.4s, v24.4s\n"
+ "fmla v31.4s, v4.4s, v22.4s\n"
+ "fmla v17.4s, v3.4s, v22.4s\n"
"add x14, x14, #0x10\n"
- "mov v26.16b, v30.16b\n fmla v26.4s, v1.4s, v24.4s\n"
- "mov v25.16b, v30.16b\n fmla v25.4s, v0.4s, v24.4s\n"
- "fmla v18.4s, v8.4s, v24.4s\n"
- "fmla v27.4s, v5.4s, v24.4s\n"
- "fmla v10.4s, v2.4s, v24.4s\n"
- "ldr q24, [x25, x15]\n"
- "fmla v17.4s, v1.4s, v23.4s\n"
+ "mov v12.16b, v30.16b\n fmla v12.4s, v1.4s, v22.4s\n"
+ "fmla v30.4s, v0.4s, v22.4s\n"
+ "fmla v28.4s, v8.4s, v22.4s\n"
+ "fmla v29.4s, v5.4s, v22.4s\n"
+ "fmla v11.4s, v2.4s, v22.4s\n"
+ "ldr q22, [x25, x15]\n"
+ "fmla v19.4s, v1.4s, v21.4s\n"
"ldr x27, [x16, #0x90]\n"
- "fmla v20.4s, v0.4s, v23.4s\n"
- "ldr q23, [x20, x15]\n"
- "fmla v21.4s, v2.4s, v16.4s\n"
+ "fmla v18.4s, v0.4s, v21.4s\n"
+ "ldr q21, [x20, x15]\n"
+ "fmla v25.4s, v2.4s, v23.4s\n"
"ldr x20, [x16, #0x98]\n"
- "fmla v31.4s, v8.4s, v22.4s\n"
- "fmla v28.4s, v1.4s, v16.4s\n"
- "ldr q16, [x24, x15]\n"
+ "fmla v16.4s, v8.4s, v9.4s\n"
+ "fmla v27.4s, v1.4s, v23.4s\n"
+ "ldr q20, [x24, x15]\n"
"ldr x26, [x16, #0xa0]\n"
- "fmla v15.4s, v7.4s, v22.4s\n"
- "fmla v9.4s, v6.4s, v22.4s\n"
- "fmla v29.4s, v5.4s, v22.4s\n"
- "fmla v19.4s, v4.4s, v22.4s\n"
- "fmla v11.4s, v3.4s, v22.4s\n"
- "fmla v26.4s, v2.4s, v22.4s\n"
- "fmla v25.4s, v1.4s, v22.4s\n"
- "fmla v12.4s, v0.4s, v22.4s\n"
- "ldr q22, [x23, x15]\n"
+ "fmla v13.4s, v7.4s, v9.4s\n"
+ "fmla v10.4s, v6.4s, v9.4s\n"
+ "fmla v31.4s, v5.4s, v9.4s\n"
+ "fmla v17.4s, v4.4s, v9.4s\n"
+ "fmla v26.4s, v3.4s, v9.4s\n"
+ "fmla v12.4s, v2.4s, v9.4s\n"
+ "fmla v30.4s, v1.4s, v9.4s\n"
+ "fmla v24.4s, v0.4s, v9.4s\n"
+ "ldr q23, [x23, x15]\n"
"ldr x25, [x16, #0xa8]\n"
- "fmla v17.4s, v3.4s, v24.4s\n"
- "fmla v18.4s, v0.4s, v24.4s\n"
- "fmla v27.4s, v6.4s, v16.4s\n"
- "fmla v10.4s, v3.4s, v16.4s\n"
- "ldr q16, [x13, x15]\n"
+ "fmla v19.4s, v3.4s, v22.4s\n"
+ "fmla v28.4s, v0.4s, v22.4s\n"
+ "fmla v29.4s, v6.4s, v20.4s\n"
+ "fmla v11.4s, v3.4s, v20.4s\n"
+ "ldr q20, [x13, x15]\n"
"ldr x24, [x16, #0xb0]\n"
- "fmla v20.4s, v4.4s, v22.4s\n"
- "fmla v21.4s, v3.4s, v22.4s\n"
- "fmla v31.4s, v1.4s, v22.4s\n"
- "fmla v28.4s, v5.4s, v23.4s\n"
- "fmla v9.4s, v2.4s, v23.4s\n"
- "ldr q23, [x22, x15]\n"
- "fmla v15.4s, v0.4s, v22.4s\n"
+ "fmla v18.4s, v4.4s, v23.4s\n"
+ "fmla v25.4s, v3.4s, v23.4s\n"
+ "fmla v16.4s, v1.4s, v23.4s\n"
+ "fmla v27.4s, v5.4s, v21.4s\n"
+ "fmla v10.4s, v2.4s, v21.4s\n"
+ "ldr q22, [x22, x15]\n"
+ "fmla v13.4s, v0.4s, v23.4s\n"
"ldr x23, [x16, #0xb8]\n"
- "fmla v11.4s, v8.4s, v16.4s\n"
- "fmla v12.4s, v5.4s, v16.4s\n"
- "ldr q16, [x21, x15]\n"
+ "fmla v26.4s, v8.4s, v20.4s\n"
+ "fmla v24.4s, v5.4s, v20.4s\n"
+ "ldr q21, [x21, x15]\n"
"ldr x22, [x16, #0xc0]\n"
- "fmla v17.4s, v5.4s, v22.4s\n"
- "fmla v18.4s, v2.4s, v22.4s\n"
- "ldr q22, [x28, x15]\n"
+ "fmla v19.4s, v5.4s, v23.4s\n"
+ "fmla v28.4s, v2.4s, v23.4s\n"
+ "ldr q20, [x28, x15]\n"
"ldr x21, [x16, #0xc8]\n"
- "fmla v20.4s, v5.4s, v23.4s\n"
- "fmla v21.4s, v4.4s, v23.4s\n"
- "fmla v31.4s, v2.4s, v23.4s\n"
- "fmla v28.4s, v3.4s, v23.4s\n"
- "fmla v15.4s, v1.4s, v23.4s\n"
- "fmla v9.4s, v0.4s, v23.4s\n"
- "ldr q23, [x20, x15]\n"
+ "fmla v18.4s, v5.4s, v22.4s\n"
+ "fmla v25.4s, v4.4s, v22.4s\n"
+ "fmla v16.4s, v2.4s, v22.4s\n"
+ "fmla v27.4s, v3.4s, v22.4s\n"
+ "fmla v13.4s, v1.4s, v22.4s\n"
+ "fmla v10.4s, v0.4s, v22.4s\n"
+ "ldr q22, [x20, x15]\n"
"ldr x28, [x16, #0xd8]\n"
- "fmla v10.4s, v7.4s, v16.4s\n"
- "fmla v26.4s, v6.4s, v16.4s\n"
- "ldr q16, [x27, x15]\n"
+ "fmla v11.4s, v7.4s, v21.4s\n"
+ "fmla v12.4s, v6.4s, v21.4s\n"
+ "ldr q21, [x27, x15]\n"
"ldr x20, [x16, #0xd0]\n"
- "fmla v17.4s, v7.4s, v22.4s\n"
- "fmla v20.4s, v6.4s, v22.4s\n"
- "fmla v18.4s, v4.4s, v22.4s\n"
- "fmla v31.4s, v3.4s, v22.4s\n"
- "fmla v27.4s, v1.4s, v22.4s\n"
- "fmla v29.4s, v0.4s, v22.4s\n"
- "ldr q22, [x26, x15]\n"
+ "fmla v19.4s, v7.4s, v20.4s\n"
+ "fmla v18.4s, v6.4s, v20.4s\n"
+ "fmla v28.4s, v4.4s, v20.4s\n"
+ "fmla v16.4s, v3.4s, v20.4s\n"
+ "fmla v29.4s, v1.4s, v20.4s\n"
+ "fmla v31.4s, v0.4s, v20.4s\n"
+ "ldr q20, [x26, x15]\n"
"ldr x27, [x16, #0xe0]\n"
- "fmla v21.4s, v8.4s, v23.4s\n"
- "fmla v25.4s, v8.4s, v16.4s\n"
- "fmla v12.4s, v7.4s, v16.4s\n"
- "ldr q16, [x25, x15]\n"
- "fmla v11.4s, v1.4s, v23.4s\n"
+ "fmla v25.4s, v8.4s, v22.4s\n"
+ "fmla v30.4s, v8.4s, v21.4s\n"
+ "fmla v24.4s, v7.4s, v21.4s\n"
+ "ldr q21, [x25, x15]\n"
+ "fmla v26.4s, v1.4s, v22.4s\n"
"ldr x26, [x16, #0xe8]\n"
- "fmla v28.4s, v7.4s, v23.4s\n"
- "fmla v15.4s, v5.4s, v23.4s\n"
- "fmla v9.4s, v4.4s, v23.4s\n"
- "fmla v19.4s, v2.4s, v23.4s\n"
- "ldr q23, [x24, x15]\n"
- "ldr x25, [x16, #0xf0]\n"
+ "fmla v27.4s, v7.4s, v22.4s\n"
+ "fmla v13.4s, v5.4s, v22.4s\n"
+ "fmla v10.4s, v4.4s, v22.4s\n"
"fmla v17.4s, v2.4s, v22.4s\n"
- "fmla v20.4s, v1.4s, v22.4s\n"
- "fmla v21.4s, v0.4s, v22.4s\n"
- "ldr q22, [x23, x15]\n"
- "fmla v18.4s, v7.4s, v16.4s\n"
+ "ldr q22, [x24, x15]\n"
+ "ldr x25, [x16, #0xf0]\n"
+ "fmla v19.4s, v2.4s, v20.4s\n"
+ "fmla v18.4s, v1.4s, v20.4s\n"
+ "fmla v25.4s, v0.4s, v20.4s\n"
+ "ldr q20, [x23, x15]\n"
+ "fmla v28.4s, v7.4s, v21.4s\n"
"ldr x24, [x16, #0xf8]\n"
- "fmla v31.4s, v6.4s, v16.4s\n"
- "fmla v27.4s, v4.4s, v16.4s\n"
- "fmla v29.4s, v3.4s, v16.4s\n"
- "fmla v10.4s, v1.4s, v16.4s\n"
- "fmla v26.4s, v0.4s, v16.4s\n"
- "ldr q16, [x22, x15]\n"
- "fmla v11.4s, v4.4s, v16.4s\n"
+ "fmla v16.4s, v6.4s, v21.4s\n"
+ "fmla v29.4s, v4.4s, v21.4s\n"
+ "fmla v31.4s, v3.4s, v21.4s\n"
+ "fmla v11.4s, v1.4s, v21.4s\n"
+ "fmla v12.4s, v0.4s, v21.4s\n"
+ "ldr q21, [x22, x15]\n"
+ "fmla v18.4s, v2.4s, v22.4s\n"
"ldr x23, [x16, #0x100]\n"
- "fmla v25.4s, v2.4s, v16.4s\n"
- "fmla v20.4s, v2.4s, v23.4s\n"
- "fmla v21.4s, v1.4s, v23.4s\n"
- "fmla v28.4s, v0.4s, v23.4s\n"
+ "fmla v25.4s, v1.4s, v22.4s\n"
+ "fmla v27.4s, v0.4s, v22.4s\n"
"ldr q23, [x21, x15]\n"
"ldr x22, [x16, #0x108]\n"
- "fmla v17.4s, v6.4s, v22.4s\n"
- "fmla v18.4s, v3.4s, v22.4s\n"
- "fmla v27.4s, v0.4s, v22.4s\n"
- "ldr q22, [x20, x15]\n"
- "fmla v15.4s, v8.4s, v16.4s\n"
+ "fmla v19.4s, v6.4s, v20.4s\n"
+ "fmla v28.4s, v3.4s, v20.4s\n"
+ "fmla v26.4s, v4.4s, v21.4s\n"
+ "fmla v30.4s, v2.4s, v21.4s\n"
+ "fmla v29.4s, v0.4s, v20.4s\n"
+ "ldr q20, [x20, x15]\n"
+ "fmla v13.4s, v8.4s, v21.4s\n"
"ldr x21, [x16, #0x110]\n"
- "fmla v9.4s, v7.4s, v16.4s\n"
- "fmla v19.4s, v5.4s, v16.4s\n"
- "fmla v12.4s, v1.4s, v16.4s\n"
- "ldr q16, [x28, x15]\n"
- "fmla v11.4s, v2.4s, v23.4s\n"
+ "fmla v10.4s, v7.4s, v21.4s\n"
+ "fmla v17.4s, v5.4s, v21.4s\n"
+ "fmla v24.4s, v1.4s, v21.4s\n"
+ "ldr q21, [x28, x15]\n"
+ "fmla v27.4s, v8.4s, v23.4s\n"
"ldr x20, [x16, #0x118]\n"
- "fmla v10.4s, v0.4s, v22.4s\n"
- "fmla v26.4s, v4.4s, v16.4s\n"
- "fmla v25.4s, v3.4s, v16.4s\n"
- "fmla v28.4s, v8.4s, v23.4s\n"
- "fmla v9.4s, v5.4s, v23.4s\n"
- "ldr q23, [x27, x15]\n"
- "fmla v18.4s, v6.4s, v22.4s\n"
- "fmla v27.4s, v3.4s, v22.4s\n"
+ "fmla v26.4s, v2.4s, v23.4s\n"
+ "fmla v11.4s, v0.4s, v20.4s\n"
+ "fmla v28.4s, v6.4s, v20.4s\n"
+ "fmla v29.4s, v3.4s, v20.4s\n"
"ldr q22, [x26, x15]\n"
- "fmla v29.4s, v7.4s, v16.4s\n"
- "fmla v19.4s, v6.4s, v16.4s\n"
- "fmla v10.4s, v5.4s, v16.4s\n"
- "fmla v11.4s, v5.4s, v23.4s\n"
- "fmla v12.4s, v2.4s, v23.4s\n"
- "fmla v26.4s, v7.4s, v22.4s\n"
- "fmla v25.4s, v6.4s, v22.4s\n"
- "fmla v27.4s, v8.4s, v16.4s\n"
- "ldr q16, [x25, x15]\n"
- "fmla v10.4s, v8.4s, v22.4s\n"
- "ldr q30, [x23, x15]\n"
- "fmla v29.4s, v8.4s, v16.4s\n"
- "fmla v19.4s, v7.4s, v16.4s\n"
- "fmla v11.4s, v6.4s, v16.4s\n"
- "fmla v26.4s, v5.4s, v16.4s\n"
- "fmla v25.4s, v4.4s, v16.4s\n"
- "fmla v12.4s, v3.4s, v16.4s\n"
- "ldr q24, [x22, x15]\n"
- "fmla v9.4s, v8.4s, v23.4s\n"
- "ldr q16, [x24, x15]\n"
- "fmla v17.4s, v4.4s, v30.4s\n"
- "fmax v17.4s, v17.4s, v13.4s\n"
- "fmla v20.4s, v3.4s, v30.4s\n"
- "fmla v21.4s, v5.4s, v24.4s\n"
- "fmax v20.4s, v20.4s, v13.4s\n"
- "fmla v28.4s, v4.4s, v24.4s\n"
- "fmla v26.4s, v8.4s, v16.4s\n"
- "fmax v21.4s, v21.4s, v13.4s\n"
- "fmla v25.4s, v7.4s, v16.4s\n"
- "fmla v12.4s, v6.4s, v16.4s\n"
- "ldr q23, [x21, x15]\n"
- "fmax v28.4s, v28.4s, v13.4s\n"
- "fmla v18.4s, v1.4s, v30.4s\n"
- "fmla v31.4s, v0.4s, v30.4s\n"
- "ldr q16, [x20, x15]\n"
- "fmin v17.4s, v17.4s, v14.4s\n"
- "fmla v15.4s, v2.4s, v24.4s\n"
- "fmla v9.4s, v1.4s, v24.4s\n"
- "fmin v20.4s, v20.4s, v14.4s\n"
- "str q17, [x12, x14]\n"
- "fmla v27.4s, v7.4s, v23.4s\n"
- "fmla v29.4s, v6.4s, v23.4s\n"
- "fmin v21.4s, v21.4s, v14.4s\n"
- "str q20, [x11, x14]\n"
- "fmla v19.4s, v8.4s, v16.4s\n"
- "fmla v11.4s, v7.4s, v16.4s\n"
- "fmin v28.4s, v28.4s, v14.4s\n"
- "str q21, [x10, x14]\n"
- "fmax v18.4s, v18.4s, v13.4s\n"
- "fmax v31.4s, v31.4s, v13.4s\n"
- "str q28, [x9, x14]\n"
+ "fmla v12.4s, v4.4s, v21.4s\n"
+ "fmla v30.4s, v3.4s, v21.4s\n"
+ "fmla v10.4s, v5.4s, v23.4s\n"
+ "ldr q20, [x27, x15]\n"
+ "fmla v31.4s, v7.4s, v21.4s\n"
+ "fmla v17.4s, v6.4s, v21.4s\n"
+ "fmla v11.4s, v5.4s, v21.4s\n"
+ "fmla v29.4s, v8.4s, v21.4s\n"
+ "ldr q21, [x25, x15]\n"
+ "fmla v26.4s, v5.4s, v20.4s\n"
+ "fmla v24.4s, v2.4s, v20.4s\n"
+ "fmla v12.4s, v7.4s, v22.4s\n"
+ "fmla v30.4s, v6.4s, v22.4s\n"
+ "fmla v10.4s, v8.4s, v20.4s\n"
+ "ldr q20, [x24, x15]\n"
+ "fmla v11.4s, v8.4s, v22.4s\n"
+ "ldr q22, [x23, x15]\n"
+ "fmla v31.4s, v8.4s, v21.4s\n"
+ "fmla v17.4s, v7.4s, v21.4s\n"
+ "fmla v26.4s, v6.4s, v21.4s\n"
+ "fmla v12.4s, v5.4s, v21.4s\n"
+ "fmla v24.4s, v3.4s, v21.4s\n"
+ "fmla v30.4s, v4.4s, v21.4s\n"
+ "ldr q21, [x22, x15]\n"
+ "fmla v19.4s, v4.4s, v22.4s\n"
+ "fmla v18.4s, v3.4s, v22.4s\n"
+ "fmla v28.4s, v1.4s, v22.4s\n"
+ "fmla v16.4s, v0.4s, v22.4s\n"
+ "ldr q23, [x20, x15]\n"
+ "fmla v25.4s, v5.4s, v21.4s\n"
+ "fmla v27.4s, v4.4s, v21.4s\n"
+ "fmla v12.4s, v8.4s, v20.4s\n"
+ "fmla v30.4s, v7.4s, v20.4s\n"
+ "fmla v24.4s, v6.4s, v20.4s\n"
+ "ldr q0, [x21, x15]\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "fmla v13.4s, v2.4s, v21.4s\n"
+ "fmla v10.4s, v1.4s, v21.4s\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "add x15, x15, #0x10\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmla v17.4s, v8.4s, v23.4s\n"
+ "fmla v26.4s, v7.4s, v23.4s\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "fmla v29.4s, v7.4s, v0.4s\n"
+ "fmla v31.4s, v6.4s, v0.4s\n"
+ "fmin v19.4s, v19.4s, v14.4s\n"
+ "fmin v18.4s, v18.4s, v14.4s\n"
+ "fmla v11.4s, v4.4s, v0.4s\n"
+ "fmin v25.4s, v25.4s, v14.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "fmla v12.4s, v3.4s, v0.4s\n"
+ "fmin v27.4s, v27.4s, v14.4s\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
+ "fmla v30.4s, v5.4s, v23.4s\n"
+ "fmax v13.4s, v13.4s, v15.4s\n"
+ "fmax v10.4s, v10.4s, v15.4s\n"
+ "str q19, [x12, x14]\n"
"ldr x23, [x8, #0x20]\n"
- "fmax v15.4s, v15.4s, v13.4s\n"
- "fmax v9.4s, v9.4s, v13.4s\n"
+ "str q18, [x11, x14]\n"
"ldr x22, [x8, #0x28]\n"
+ "fmla v24.4s, v4.4s, v23.4s\n"
+ "fmin v28.4s, v28.4s, v14.4s\n"
+ "str q25, [x10, x14]\n"
"ldr x21, [x8, #0x30]\n"
+ "fmin v16.4s, v16.4s, v14.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
+ "str q27, [x9, x14]\n"
"ldr x20, [x8, #0x38]\n"
- "fmla v10.4s, v4.4s, v23.4s\n"
- "fmla v26.4s, v3.4s, v23.4s\n"
- "fmin v18.4s, v18.4s, v14.4s\n"
- "fmla v25.4s, v5.4s, v16.4s\n"
- "fmla v12.4s, v4.4s, v16.4s\n"
- "fmin v31.4s, v31.4s, v14.4s\n"
- "str q18, [x23, x14]\n"
- "fmin v15.4s, v15.4s, v14.4s\n"
- "fmin v9.4s, v9.4s, v14.4s\n"
- "str q31, [x22, x14]\n"
+ "fmin v13.4s, v13.4s, v14.4s\n"
+ "fmin v10.4s, v10.4s, v14.4s\n"
+ "fmax v31.4s, v31.4s, v15.4s\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "str q28, [x23, x14]\n"
"ldr x23, [x8, #0x40]\n"
- "fmax v27.4s, v27.4s, v13.4s\n"
- "fmax v29.4s, v29.4s, v13.4s\n"
- "str q15, [x21, x14]\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "str q16, [x22, x14]\n"
"ldr x22, [x8, #0x48]\n"
- "fmax v19.4s, v19.4s, v13.4s\n"
- "fmax v11.4s, v11.4s, v13.4s\n"
- "str q9, [x20, x14]\n"
+ "fmin v29.4s, v29.4s, v14.4s\n"
+ "str q13, [x21, x14]\n"
"ldr x21, [x8, #0x50]\n"
+ "fmax v11.4s, v11.4s, v15.4s\n"
+ "fmax v12.4s, v12.4s, v15.4s\n"
+ "str q10, [x20, x14]\n"
"ldr x20, [x8, #0x58]\n"
- "fmin v27.4s, v27.4s, v14.4s\n"
- "fmin v29.4s, v29.4s, v14.4s\n"
- "str q27, [x23, x14]\n"
- "fmin v19.4s, v19.4s, v14.4s\n"
- "fmin v11.4s, v11.4s, v14.4s\n"
- "str q29, [x22, x14]\n"
+ "fmin v31.4s, v31.4s, v14.4s\n"
+ "fmin v17.4s, v17.4s, v14.4s\n"
+ "fmin v26.4s, v26.4s, v14.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "str q29, [x23, x14]\n"
"ldr x23, [x8, #0x60]\n"
- "fmax v10.4s, v10.4s, v13.4s\n"
- "fmax v26.4s, v26.4s, v13.4s\n"
- "str q19, [x21, x14]\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "fmin v11.4s, v11.4s, v14.4s\n"
+ "str q31, [x22, x14]\n"
"ldr x22, [x8, #0x68]\n"
- "fmax v25.4s, v25.4s, v13.4s\n"
- "fmax v12.4s, v12.4s, v13.4s\n"
- "str q11, [x20, x14]\n"
+ "str q17, [x21, x14]\n"
"ldr x21, [x8, #0x70]\n"
- "ldr x20, [x8, #0x78]\n"
- "fmin v10.4s, v10.4s, v14.4s\n"
- "fmin v26.4s, v26.4s, v14.4s\n"
- "str q10, [x23, x14]\n"
- "fmin v25.4s, v25.4s, v14.4s\n"
"fmin v12.4s, v12.4s, v14.4s\n"
- "str q26, [x22, x14]\n"
- "add x15, x15, #0x10\n"
- "str q25, [x21, x14]\n"
- "str q12, [x20, x14]\n"
+ "str q26, [x20, x14]\n"
+ "ldr x20, [x8, #0x78]\n"
+ "fmin v30.4s, v30.4s, v14.4s\n"
+ "fmin v24.4s, v24.4s, v14.4s\n"
+ "str q11, [x23, x14]\n"
+ "str q12, [x22, x14]\n"
+ "str q30, [x21, x14]\n"
+ "str q24, [x20, x14]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 72f\n"
@@ -715,10 +715,10 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr q8, [x17, #0x90]\n"
"ldr x23, [x16, #0x0]\n"
"ldr x22, [x16, #0x8]\n"
- "add x23, x23, x15\n"
- "add x22, x22, x15\n"
"ldr x21, [x16, #0x10]\n"
"ldr x20, [x16, #0x18]\n"
+ "add x23, x23, x15\n"
+ "add x22, x22, x15\n"
"add x21, x21, x15\n"
"add x20, x20, x15\n"
"tbz %x[n_channels], #1, 4f\n"
@@ -741,20 +741,20 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"mov v16.16b, v30.16b\n fmla v16.4s, v8.4s, v9.4s\n"
"mov v17.16b, v30.16b\n fmla v17.4s, v7.4s, v9.4s\n"
"ldr x20, [x16, #0x20]\n"
- "add x20, x20, x15\n"
"mov v18.16b, v30.16b\n fmla v18.4s, v6.4s, v9.4s\n"
"mov v21.16b, v30.16b\n fmla v21.4s, v4.4s, v9.4s\n"
"mov v22.16b, v30.16b\n fmla v22.4s, v3.4s, v9.4s\n"
"mov v25.16b, v30.16b\n fmla v25.4s, v1.4s, v9.4s\n"
"mov v26.16b, v30.16b\n fmla v26.4s, v0.4s, v9.4s\n"
"mov v19.16b, v30.16b\n fmla v19.4s, v2.4s, v11.4s\n"
+ "add x20, x20, x15\n"
"mov v20.16b, v30.16b\n fmla v20.4s, v5.4s, v9.4s\n"
"mov v24.16b, v30.16b\n fmla v24.4s, v2.4s, v9.4s\n"
"fmla v16.4s, v0.4s, v10.4s\n"
"fmla v17.4s, v8.4s, v12.4s\n"
"fmla v18.4s, v7.4s, v12.4s\n"
- "fmla v19.4s, v6.4s, v12.4s\n"
"fmla v21.4s, v5.4s, v12.4s\n"
+ "fmla v19.4s, v6.4s, v12.4s\n"
"fmla v22.4s, v4.4s, v12.4s\n"
"mov v23.16b, v30.16b\n fmla v23.4s, v3.4s, v12.4s\n"
"fmla v25.4s, v2.4s, v12.4s\n"
@@ -793,13 +793,13 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x38]\n"
"fmla v20.4s, v8.4s, v9.4s\n"
"fmla v21.4s, v7.4s, v9.4s\n"
- "add x20, x20, x15\n"
"fmla v22.4s, v6.4s, v9.4s\n"
"fmla v24.4s, v5.4s, v9.4s\n"
"fmla v25.4s, v4.4s, v9.4s\n"
"fmla v26.4s, v3.4s, v9.4s\n"
"fmla v28.4s, v2.4s, v9.4s\n"
"mov v29.16b, v30.16b\n fmla v29.4s, v1.4s, v9.4s\n"
+ "add x20, x20, x15\n"
"fmla v30.4s, v0.4s, v9.4s\n"
"tbz %x[n_channels], #1, 12f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
@@ -836,13 +836,13 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x50]\n"
"fmla v21.4s, v8.4s, v10.4s\n"
"fmla v22.4s, v7.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v23.4s, v6.4s, v10.4s\n"
"fmla v25.4s, v5.4s, v10.4s\n"
"fmla v26.4s, v4.4s, v10.4s\n"
"fmla v27.4s, v3.4s, v10.4s\n"
"fmla v29.4s, v2.4s, v10.4s\n"
"fmla v30.4s, v1.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"fmla v31.4s, v0.4s, v10.4s\n"
"tbz %x[n_channels], #1, 18f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
@@ -891,11 +891,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x70]\n"
"fmla v16.4s, v5.4s, v10.4s\n"
"fmla v17.4s, v4.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v18.4s, v3.4s, v10.4s\n"
"fmla v20.4s, v2.4s, v10.4s\n"
"fmla v21.4s, v1.4s, v10.4s\n"
"fmla v22.4s, v0.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 26f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
@@ -919,11 +919,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x80]\n"
"fmla v17.4s, v5.4s, v12.4s\n"
"fmla v18.4s, v4.4s, v12.4s\n"
- "add x20, x20, x15\n"
"fmla v19.4s, v3.4s, v12.4s\n"
"fmla v21.4s, v2.4s, v12.4s\n"
"fmla v22.4s, v1.4s, v12.4s\n"
"fmla v23.4s, v0.4s, v12.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 30f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
@@ -947,11 +947,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x90]\n"
"fmla v16.4s, v7.4s, v10.4s\n"
"fmla v17.4s, v6.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v20.4s, v4.4s, v10.4s\n"
"fmla v21.4s, v3.4s, v10.4s\n"
"fmla v24.4s, v1.4s, v10.4s\n"
"fmla v25.4s, v0.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 34f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 35f\n"
@@ -975,11 +975,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xa0]\n"
"fmla v18.4s, v8.4s, v12.4s\n"
"fmla v19.4s, v7.4s, v12.4s\n"
- "add x20, x20, x15\n"
"fmla v22.4s, v5.4s, v12.4s\n"
"fmla v23.4s, v4.4s, v12.4s\n"
"fmla v26.4s, v2.4s, v12.4s\n"
"fmla v27.4s, v1.4s, v12.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 38f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
@@ -991,8 +991,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xa8]\n"
"fmla v16.4s, v2.4s, v10.4s\n"
"fmla v17.4s, v1.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v18.4s, v0.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 40f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
@@ -1004,11 +1004,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xb0]\n"
"fmla v20.4s, v7.4s, v11.4s\n"
"fmla v21.4s, v6.4s, v11.4s\n"
- "add x20, x20, x15\n"
"fmla v24.4s, v4.4s, v11.4s\n"
"fmla v25.4s, v3.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v11.4s\n"
"fmla v29.4s, v0.4s, v11.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 42f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 43f\n"
@@ -1020,8 +1020,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xb8]\n"
"fmla v17.4s, v2.4s, v12.4s\n"
"fmla v18.4s, v1.4s, v12.4s\n"
- "add x20, x20, x15\n"
"fmla v19.4s, v0.4s, v12.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 44f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 45f\n"
@@ -1033,8 +1033,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xc0]\n"
"fmla v16.4s, v6.4s, v10.4s\n"
"fmla v20.4s, v3.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v24.4s, v0.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 46f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 47f\n"
@@ -1046,11 +1046,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xc8]\n"
"fmla v22.4s, v8.4s, v11.4s\n"
"fmla v23.4s, v7.4s, v11.4s\n"
- "add x20, x20, x15\n"
"fmla v26.4s, v5.4s, v11.4s\n"
"fmla v27.4s, v4.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v11.4s\n"
"fmla v31.4s, v1.4s, v11.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 48f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 49f\n"
@@ -1062,8 +1062,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xd0]\n"
"fmla v19.4s, v8.4s, v12.4s\n"
"fmla v23.4s, v5.4s, v12.4s\n"
- "add x20, x20, x15\n"
"fmla v27.4s, v2.4s, v12.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 50f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 51f\n"
@@ -1075,8 +1075,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xd8]\n"
"fmla v20.4s, v6.4s, v10.4s\n"
"fmla v24.4s, v3.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v28.4s, v0.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 52f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 53f\n"
@@ -1088,11 +1088,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xe0]\n"
"fmla v24.4s, v8.4s, v11.4s\n"
"fmla v25.4s, v7.4s, v11.4s\n"
- "add x20, x20, x15\n"
"fmla v26.4s, v6.4s, v11.4s\n"
"fmla v28.4s, v5.4s, v11.4s\n"
"fmla v29.4s, v4.4s, v11.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 54f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 55f\n"
@@ -1104,8 +1104,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xe8]\n"
"fmla v23.4s, v8.4s, v12.4s\n"
"fmla v27.4s, v5.4s, v12.4s\n"
- "add x20, x20, x15\n"
"fmla v31.4s, v2.4s, v12.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 56f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 57f\n"
@@ -1117,8 +1117,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xf0]\n"
"fmla v28.4s, v8.4s, v10.4s\n"
"fmla v29.4s, v7.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v30.4s, v6.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 58f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 59f\n"
@@ -1130,11 +1130,11 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0xf8]\n"
"fmla v25.4s, v8.4s, v11.4s\n"
"fmla v26.4s, v7.4s, v11.4s\n"
- "add x20, x20, x15\n"
"fmla v27.4s, v6.4s, v11.4s\n"
"fmla v29.4s, v5.4s, v11.4s\n"
"fmla v30.4s, v4.4s, v11.4s\n"
"fmla v31.4s, v3.4s, v11.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 60f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 61f\n"
@@ -1146,8 +1146,8 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x100]\n"
"fmla v29.4s, v8.4s, v12.4s\n"
"fmla v30.4s, v7.4s, v12.4s\n"
- "add x20, x20, x15\n"
"fmla v31.4s, v6.4s, v12.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 62f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 63f\n"
@@ -1159,9 +1159,9 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x108]\n"
"fmla v16.4s, v4.4s, v10.4s\n"
"fmla v17.4s, v3.4s, v10.4s\n"
- "add x20, x20, x15\n"
"fmla v20.4s, v1.4s, v10.4s\n"
"fmla v21.4s, v0.4s, v10.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 64f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 65f\n"
@@ -1173,9 +1173,9 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x110]\n"
"fmla v18.4s, v5.4s, v11.4s\n"
"fmla v19.4s, v4.4s, v11.4s\n"
- "add x20, x20, x15\n"
"fmla v22.4s, v2.4s, v11.4s\n"
"fmla v23.4s, v1.4s, v11.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 66f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 67f\n"
@@ -1187,9 +1187,9 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x20, [x16, #0x118]\n"
"fmla v24.4s, v7.4s, v12.4s\n"
"fmla v25.4s, v6.4s, v12.4s\n"
- "add x20, x20, x15\n"
"fmla v28.4s, v4.4s, v12.4s\n"
"fmla v29.4s, v3.4s, v12.4s\n"
+ "add x20, x20, x15\n"
"tbz %x[n_channels], #1, 68f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 69f\n"
@@ -1200,24 +1200,24 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"69:" // Oddments: Load input (4, 4): Bit 1: End
"fmla v26.4s, v8.4s, v10.4s\n"
"fmla v27.4s, v7.4s, v10.4s\n"
- "fmax v16.4s, v16.4s, v13.4s\n"
+ "fmax v16.4s, v16.4s, v15.4s\n"
"fmla v30.4s, v5.4s, v10.4s\n"
"fmla v31.4s, v4.4s, v10.4s\n"
- "fmax v17.4s, v17.4s, v13.4s\n"
- "fmax v18.4s, v18.4s, v13.4s\n"
- "fmax v19.4s, v19.4s, v13.4s\n"
- "fmax v20.4s, v20.4s, v13.4s\n"
- "fmax v21.4s, v21.4s, v13.4s\n"
- "fmax v22.4s, v22.4s, v13.4s\n"
- "fmax v23.4s, v23.4s, v13.4s\n"
- "fmax v24.4s, v24.4s, v13.4s\n"
- "fmax v25.4s, v25.4s, v13.4s\n"
- "fmax v26.4s, v26.4s, v13.4s\n"
- "fmax v27.4s, v27.4s, v13.4s\n"
- "fmax v28.4s, v28.4s, v13.4s\n"
- "fmax v29.4s, v29.4s, v13.4s\n"
- "fmax v30.4s, v30.4s, v13.4s\n"
- "fmax v31.4s, v31.4s, v13.4s\n"
+ "fmax v17.4s, v17.4s, v15.4s\n"
+ "fmax v18.4s, v18.4s, v15.4s\n"
+ "fmax v19.4s, v19.4s, v15.4s\n"
+ "fmax v20.4s, v20.4s, v15.4s\n"
+ "fmax v21.4s, v21.4s, v15.4s\n"
+ "fmax v22.4s, v22.4s, v15.4s\n"
+ "fmax v23.4s, v23.4s, v15.4s\n"
+ "fmax v24.4s, v24.4s, v15.4s\n"
+ "fmax v25.4s, v25.4s, v15.4s\n"
+ "fmax v26.4s, v26.4s, v15.4s\n"
+ "fmax v27.4s, v27.4s, v15.4s\n"
+ "fmax v28.4s, v28.4s, v15.4s\n"
+ "fmax v29.4s, v29.4s, v15.4s\n"
+ "fmax v30.4s, v30.4s, v15.4s\n"
+ "fmax v31.4s, v31.4s, v15.4s\n"
"fmin v16.4s, v16.4s, v14.4s\n"
"fmin v17.4s, v17.4s, v14.4s\n"
"fmin v18.4s, v18.4s, v14.4s\n"
@@ -1237,150 +1237,150 @@ void a64_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"tbz %x[n_channels], #1, 70f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.d }[0], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.d }[0], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.d }[0], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.d }[0], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.d }[0], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.d }[0], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.d }[0], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.d }[0], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.d }[0], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.d }[0], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.d }[0], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.d }[0], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
- "add x14, x14, #0x8\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.d }[0], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.d }[0], [x22]\n"
+ "add x20, x20, x14\n"
+ "add x14, x14, #0x8\n"
"st1 { v30.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #0, 71f\n"
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.s }[2], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.s }[2], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.s }[2], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.s }[2], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.s }[2], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.s }[2], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.s }[2], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.s }[2], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.s }[2], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.s }[2], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.s }[2], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.s }[2], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.s }[2], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.s }[2], [x22]\n"
+ "add x20, x20, x14\n"
"st1 { v30.s }[2], [x21]\n"
"st1 { v31.s }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Store: Bit 1: Unset
"ldr x23, [x8, #0x0]\n"
"ldr x22, [x8, #0x8]\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
"ldr x21, [x8, #0x10]\n"
"ldr x20, [x8, #0x18]\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
"st1 { v16.s }[0], [x23]\n"
"ldr x23, [x8, #0x20]\n"
- "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v17.s }[0], [x22]\n"
"ldr x22, [x8, #0x28]\n"
- "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"st1 { v18.s }[0], [x21]\n"
"ldr x21, [x8, #0x30]\n"
- "add x21, x21, x14\n"
"st1 { v19.s }[0], [x20]\n"
"ldr x20, [x8, #0x38]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v20.s }[0], [x23]\n"
"ldr x23, [x8, #0x40]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v21.s }[0], [x22]\n"
"ldr x22, [x8, #0x48]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v22.s }[0], [x21]\n"
"ldr x21, [x8, #0x50]\n"
- "add x21, x21, x14\n"
"st1 { v23.s }[0], [x20]\n"
"ldr x20, [x8, #0x58]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v24.s }[0], [x23]\n"
"ldr x23, [x8, #0x60]\n"
- "add x23, x23, x14\n"
+ "add x21, x21, x14\n"
"st1 { v25.s }[0], [x22]\n"
"ldr x22, [x8, #0x68]\n"
- "add x22, x22, x14\n"
+ "add x20, x20, x14\n"
"st1 { v26.s }[0], [x21]\n"
"ldr x21, [x8, #0x70]\n"
- "add x21, x21, x14\n"
"st1 { v27.s }[0], [x20]\n"
"ldr x20, [x8, #0x78]\n"
- "add x20, x20, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
"st1 { v28.s }[0], [x23]\n"
+ "add x21, x21, x14\n"
"st1 { v29.s }[0], [x22]\n"
+ "add x20, x20, x14\n"
"st1 { v30.s }[0], [x21]\n"
"st1 { v31.s }[0], [x20]\n"
"71:" // Oddments: Store: Bit 1: End
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 5ab61fad4c..ca61372e1c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,259 +87,259 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x23, #0x0\n"
- "mov x27, #0x0\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
"1:" // Tile loop
- "str x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x26, #0x4\n"
- "mov x25, #0x2\n"
- "str x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x9, #0x4\n"
+ "mov x28, #0x2\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x27, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
"ldr x6, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x23, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x27, x6, x22\n" // offset += tile_j * ld_input_col
+ "mov x26, #0x10\n" // cntb _, ALL, #1
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"ldr x7, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x6, x6, #0x2\n"
- "mul x20, x23, x21\n" // offset = tile_i * ld_output_row
- "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
- "add x8, x8, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x16, x8, x24, LSL #2\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x27, x7, x20\n" // offset += tile_j * ld_output_col
- "lsr x22, %x[n_channels], #0x2\n"
- "add x14, x16, x24, LSL #2\n"
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "add x13, x6, x6\n"
- "add x12, x14, x24, LSL #2\n"
- "add x11, x13, x6\n"
- "add x17, x17, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "lsr x24, %x[n_channels], #0x2\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v26.4s }, [x20]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x23, #0x0\n"
"ld1r { v27.4s }, [x20]\n"
- "add x10, x12, x24, LSL #2\n"
- "add x9, x11, x6\n"
- "add x28, x17, x21, LSL #2\n"
+ "mul x22, x11, x27\n" // offset = tile_i * ld_input_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x21, XZR, x26\n"
+ "mul x20, x11, x25\n" // offset = tile_i * ld_output_row
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x22, x10, x6, x22\n" // offset += tile_j * ld_input_col
+ "lsl x6, x6, #0x2\n"
+ "madd x20, x10, x7, x20\n" // offset += tile_j * ld_output_col
"lsl x7, x7, #0x2\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q31, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "add x15, x15, #0xa0\n"
- "ldr q9, [x14, x13]\n"
+ "mul x22, x22, x9\n" // offset *= kernel_stride * output_size
+ "add x15, x6, x6\n"
+ "add x14, x15, x6\n"
+ "add x13, x14, x6\n"
+ "mul x20, x20, x28\n" // offset *= output_tile_size
+ "add x8, x8, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x12, x8, x27, LSL #2\n"
+ "add x11, x12, x27, LSL #2\n"
+ "add x10, x11, x27, LSL #2\n"
+ "add x17, x17, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x9, x10, x27, LSL #2\n"
+ "add x28, x17, x25, LSL #2\n"
+ "cbz x24, 4f\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "cmp x26, x24, LSL #4\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x16, x16, #0xa0\n"
+ "ldr q9, [x11, x15]\n"
"ld1 { v10.4s }, [x8]\n"
"ldr q11, [x8, x6]\n"
- "ldr q12, [x8, x11]\n"
- "ldr q13, [x8, x9]\n"
- "ld1 { v14.4s }, [x16]\n"
- "ldr q15, [x16, x6]\n"
- "ldr q16, [x8, x13]\n"
+ "ldr q12, [x8, x14]\n"
+ "ldr q13, [x8, x13]\n"
+ "ld1 { v14.4s }, [x12]\n"
+ "ldr q15, [x12, x6]\n"
+ "ldr q16, [x8, x15]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v29.16b, v31.16b\n fmla v29.4s, v8.4s, v9.4s\n"
"mov v28.16b, v31.16b\n fmla v28.4s, v6.4s, v9.4s\n"
- "add x23, x23, #0x10\n"
+ "add x26, x26, #0x10\n"
"add x8, x8, #0x10\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v0.4s, v9.4s\n"
+ "ldr q31, [x16, #0x0]\n"
+ "cmp x26, x24, LSL #4\n"
+ "add x21, x21, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v29.4s, v0.4s, v10.4s\n"
"ld1 { v10.4s }, [x8]\n"
"fmla v28.4s, v1.4s, v12.4s\n"
- "ldr q21, [x16, x9]\n"
+ "ldr q21, [x12, x13]\n"
"fmla v29.4s, v1.4s, v11.4s\n"
- "ldr q18, [x16, x11]\n"
+ "ldr q18, [x12, x14]\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "ldr q17, [x16, x13]\n"
+ "ldr q20, [x12, x15]\n"
+ "add x12, x12, #0x10\n"
"fmla v29.4s, v3.4s, v14.4s\n"
- "ld1 { v20.4s }, [x12]\n"
+ "ld1 { v17.4s }, [x10]\n"
"fmla v28.4s, v0.4s, v16.4s\n"
- "add x16, x16, #0x10\n"
"fmla v29.4s, v4.4s, v15.4s\n"
- "ld1 { v25.4s }, [x14]\n"
+ "ld1 { v23.4s }, [x11]\n"
+ "fmla v25.4s, v3.4s, v17.4s\n"
+ "ldr q19, [x10, x13]\n"
"fmla v28.4s, v4.4s, v18.4s\n"
- "ldr q19, [x12, x6]\n"
+ "ldr q17, [x10, x6]\n"
"fmla v29.4s, v2.4s, v16.4s\n"
- "ldr q18, [x14, x6]\n"
+ "ldr q22, [x11, x6]\n"
"fmla v28.4s, v5.4s, v21.4s\n"
- "ldr q24, [x14, x11]\n"
- "mov v23.16b, v31.16b\n fmla v23.4s, v2.4s, v9.4s\n"
- "mov v22.16b, v31.16b\n fmla v22.4s, v0.4s, v9.4s\n"
- "ldr q31, [x15, #0x0]\n"
- "cmp x23, x22, LSL #4\n"
- "fmla v29.4s, v5.4s, v17.4s\n"
- "fmla v28.4s, v3.4s, v17.4s\n"
- "ldr q17, [x12, x11]\n"
- "add x20, x20, #0x10\n"
- "fmla v23.4s, v3.4s, v20.4s\n"
- "ldr q16, [x12, x9]\n"
- "fmla v22.4s, v4.4s, v17.4s\n"
- "ldr q21, [x10, x6]\n"
- "fmla v23.4s, v0.4s, v25.4s\n"
- "ldr q0, [x15, #0x10]\n"
- "fmla v22.4s, v1.4s, v24.4s\n"
- "add x21, x21, #0x10\n"
- "fmla v23.4s, v4.4s, v19.4s\n"
- "ldr q20, [x14, x9]\n"
- "ldr q4, [x15, #0x50]\n"
- "fmla v22.4s, v5.4s, v16.4s\n"
- "ldr q19, [x10, x11]\n"
- "fmla v29.4s, v6.4s, v25.4s\n"
- "ld1 { v17.4s }, [x10]\n"
- "fmla v23.4s, v1.4s, v18.4s\n"
- "ldr q1, [x15, #0x20]\n"
- "fmla v22.4s, v2.4s, v20.4s\n"
- "ldr q2, [x15, #0x30]\n"
- "fmla v29.4s, v7.4s, v18.4s\n"
- "ldr q16, [x12, x13]\n"
- "fmla v23.4s, v6.4s, v17.4s\n"
- "ldr q18, [x10, x13]\n"
- "fmla v22.4s, v3.4s, v16.4s\n"
- "ldr q3, [x15, #0x40]\n"
- "fmla v23.4s, v7.4s, v21.4s\n"
- "ldr q13, [x8, x9]\n"
- "fmla v22.4s, v7.4s, v19.4s\n"
- "ld1 { v14.4s }, [x16]\n"
- "fmla v28.4s, v7.4s, v24.4s\n"
- "ldr q12, [x8, x11]\n"
- "fmla v23.4s, v5.4s, v16.4s\n"
- "ldr q16, [x8, x13]\n"
- "ldr q5, [x15, #0x60]\n"
- "fmla v22.4s, v6.4s, v18.4s\n"
+ "ldr q18, [x11, x14]\n"
+ "fmla v25.4s, v0.4s, v23.4s\n"
+ "ldr q0, [x16, #0x10]\n"
+ "fmla v29.4s, v5.4s, v20.4s\n"
+ "fmla v28.4s, v3.4s, v20.4s\n"
+ "ldr q16, [x10, x14]\n"
+ "fmla v24.4s, v4.4s, v16.4s\n"
+ "ldr q21, [x9, x6]\n"
+ "fmla v25.4s, v4.4s, v17.4s\n"
+ "ldr q20, [x11, x13]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "add x11, x11, #0x10\n"
+ "ldr q9, [x11, x15]\n"
+ "fmla v29.4s, v6.4s, v23.4s\n"
+ "ld1 { v17.4s }, [x9]\n"
+ "fmla v24.4s, v1.4s, v18.4s\n"
+ "fmla v28.4s, v7.4s, v18.4s\n"
+ "ldr q12, [x8, x14]\n"
+ "fmla v25.4s, v1.4s, v22.4s\n"
+ "ldr q1, [x16, #0x20]\n"
+ "fmla v24.4s, v5.4s, v19.4s\n"
+ "ldr q19, [x9, x14]\n"
+ "fmla v29.4s, v7.4s, v22.4s\n"
+ "ldr q16, [x10, x15]\n"
+ "add x10, x10, #0x10\n"
+ "fmla v25.4s, v6.4s, v17.4s\n"
+ "ldr q18, [x9, x15]\n"
"fmla v28.4s, v8.4s, v20.4s\n"
- "ldr q17, [x10, x9]\n"
- "ldr q6, [x15, #0x70]\n"
- "fmla v23.4s, v8.4s, v18.4s\n"
- "fmla v22.4s, v8.4s, v17.4s\n"
- "ldr q11, [x8, x6]\n"
- "ldr q15, [x16, x6]\n"
+ "fmla v24.4s, v2.4s, v20.4s\n"
+ "ldr q17, [x9, x13]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "add x9, x9, #0x10\n"
"fmax v29.4s, v29.4s, v26.4s\n"
+ "fmla v25.4s, v7.4s, v21.4s\n"
+ "ldr q13, [x8, x13]\n"
"fmax v28.4s, v28.4s, v26.4s\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
- "fmax v23.4s, v23.4s, v26.4s\n"
- "fmax v22.4s, v22.4s, v26.4s\n"
- "add x14, x14, #0x10\n"
- "ldr q9, [x14, x13]\n"
+ "fmla v24.4s, v3.4s, v16.4s\n"
+ "ldr q3, [x16, #0x40]\n"
"fmin v29.4s, v29.4s, v27.4s\n"
+ "fmla v25.4s, v5.4s, v16.4s\n"
+ "ldr q16, [x8, x15]\n"
+ "ldr q5, [x16, #0x60]\n"
"fmin v28.4s, v28.4s, v27.4s\n"
- "fmin v23.4s, v23.4s, v27.4s\n"
- "fmin v22.4s, v22.4s, v27.4s\n"
- "add x12, x12, #0x10\n"
- "add x10, x10, #0x10\n"
+ "fmla v24.4s, v7.4s, v19.4s\n"
+ "ld1 { v14.4s }, [x12]\n"
+ "ldr q7, [x16, #0x80]\n"
"st1 { v29.4s }, [x17]\n"
- "add x15, x15, #0xa0\n"
+ "fmla v25.4s, v8.4s, v18.4s\n"
"str q28, [x17, x7]\n"
"add x17, x17, #0x10\n"
- "st1 { v23.4s }, [x28]\n"
- "str q22, [x28, x7]\n"
+ "fmla v24.4s, v6.4s, v18.4s\n"
+ "ldr q15, [x12, x6]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "fmax v25.4s, v25.4s, v26.4s\n"
+ "fmla v24.4s, v8.4s, v17.4s\n"
+ "ldr q11, [x8, x6]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x16, x16, #0xa0\n"
+ "fmin v25.4s, v25.4s, v27.4s\n"
+ "fmax v24.4s, v24.4s, v26.4s\n"
+ "fmin v24.4s, v24.4s, v27.4s\n"
+ "st1 { v25.4s }, [x28]\n"
+ "str q24, [x28, x7]\n"
"add x28, x28, #0x10\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "mov v29.16b, v31.16b\n fmla v29.4s, v8.4s, v9.4s\n"
- "mov v28.16b, v31.16b\n fmla v28.4s, v6.4s, v9.4s\n"
+ "mov v28.16b, v31.16b\n fmla v28.4s, v8.4s, v9.4s\n"
+ "mov v29.16b, v31.16b\n fmla v29.4s, v6.4s, v9.4s\n"
"add x8, x8, #0x10\n"
- "fmla v29.4s, v0.4s, v10.4s\n"
- "fmla v28.4s, v1.4s, v12.4s\n"
- "ldr q20, [x16, x9]\n"
- "fmla v29.4s, v1.4s, v11.4s\n"
- "ldr q18, [x16, x11]\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "ldr q17, [x16, x13]\n"
- "fmla v29.4s, v3.4s, v14.4s\n"
- "ld1 { v19.4s }, [x12]\n"
- "fmla v28.4s, v0.4s, v16.4s\n"
- "add x16, x16, #0x10\n"
- "fmla v29.4s, v4.4s, v15.4s\n"
- "ld1 { v25.4s }, [x14]\n"
- "fmla v28.4s, v4.4s, v18.4s\n"
- "ldr q18, [x12, x6]\n"
- "fmla v29.4s, v2.4s, v16.4s\n"
- "ldr q24, [x14, x6]\n"
- "fmla v28.4s, v5.4s, v20.4s\n"
- "ldr q23, [x14, x11]\n"
- "mov v22.16b, v31.16b\n fmla v22.4s, v2.4s, v9.4s\n"
- "mov v21.16b, v31.16b\n fmla v21.4s, v0.4s, v9.4s\n"
- "fmla v29.4s, v5.4s, v17.4s\n"
- "fmla v28.4s, v3.4s, v17.4s\n"
- "ldr q17, [x12, x11]\n"
- "fmla v22.4s, v3.4s, v19.4s\n"
- "ldr q16, [x12, x9]\n"
- "fmla v21.4s, v4.4s, v17.4s\n"
- "ldr q20, [x10, x6]\n"
- "fmla v22.4s, v0.4s, v25.4s\n"
- "fmla v21.4s, v1.4s, v23.4s\n"
- "fmla v22.4s, v4.4s, v18.4s\n"
- "ldr q19, [x14, x9]\n"
- "fmla v21.4s, v5.4s, v16.4s\n"
- "ldr q18, [x10, x11]\n"
- "fmla v29.4s, v6.4s, v25.4s\n"
- "ld1 { v17.4s }, [x10]\n"
- "fmla v22.4s, v1.4s, v24.4s\n"
- "add x14, x14, #0x10\n"
- "fmla v21.4s, v2.4s, v19.4s\n"
- "fmla v29.4s, v7.4s, v24.4s\n"
- "ldr q16, [x12, x13]\n"
- "fmax v29.4s, v29.4s, v26.4s\n"
- "fmla v22.4s, v6.4s, v17.4s\n"
- "ldr q17, [x10, x13]\n"
- "fmla v21.4s, v3.4s, v16.4s\n"
- "fmin v29.4s, v29.4s, v27.4s\n"
- "fmla v22.4s, v7.4s, v20.4s\n"
- "fmla v21.4s, v7.4s, v18.4s\n"
- "st1 { v29.4s }, [x17]\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v0.4s, v9.4s\n"
+ "fmla v28.4s, v0.4s, v10.4s\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "ldr q21, [x12, x13]\n"
+ "fmla v28.4s, v1.4s, v11.4s\n"
+ "ldr q18, [x12, x14]\n"
+ "fmla v29.4s, v2.4s, v13.4s\n"
+ "ldr q20, [x12, x15]\n"
"add x12, x12, #0x10\n"
- "fmla v28.4s, v7.4s, v23.4s\n"
- "fmla v22.4s, v5.4s, v16.4s\n"
- "fmla v21.4s, v6.4s, v17.4s\n"
- "fmla v28.4s, v8.4s, v19.4s\n"
- "ldr q16, [x10, x9]\n"
- "fmax v28.4s, v28.4s, v26.4s\n"
- "fmla v22.4s, v8.4s, v17.4s\n"
- "fmla v21.4s, v8.4s, v16.4s\n"
- "fmax v22.4s, v22.4s, v26.4s\n"
+ "fmla v28.4s, v3.4s, v14.4s\n"
+ "ld1 { v17.4s }, [x10]\n"
+ "fmla v29.4s, v0.4s, v16.4s\n"
+ "fmla v25.4s, v3.4s, v17.4s\n"
+ "ldr q23, [x10, x13]\n"
+ "fmla v28.4s, v4.4s, v15.4s\n"
+ "ld1 { v22.4s }, [x11]\n"
+ "fmla v29.4s, v4.4s, v18.4s\n"
+ "ldr q19, [x10, x6]\n"
+ "fmla v28.4s, v2.4s, v16.4s\n"
+ "ldr q18, [x11, x6]\n"
+ "fmla v25.4s, v0.4s, v22.4s\n"
+ "fmla v29.4s, v5.4s, v21.4s\n"
+ "ldr q17, [x11, x14]\n"
+ "fmla v28.4s, v5.4s, v20.4s\n"
+ "fmla v29.4s, v3.4s, v20.4s\n"
+ "ldr q16, [x10, x14]\n"
+ "fmla v24.4s, v4.4s, v16.4s\n"
+ "ldr q21, [x9, x6]\n"
+ "fmla v25.4s, v4.4s, v19.4s\n"
+ "ldr q20, [x11, x13]\n"
+ "add x11, x11, #0x10\n"
+ "fmla v28.4s, v6.4s, v22.4s\n"
+ "ld1 { v16.4s }, [x9]\n"
+ "fmla v29.4s, v7.4s, v17.4s\n"
+ "fmla v24.4s, v1.4s, v17.4s\n"
+ "fmla v25.4s, v1.4s, v18.4s\n"
+ "fmla v28.4s, v7.4s, v18.4s\n"
+ "ldr q19, [x10, x15]\n"
"add x10, x10, #0x10\n"
- "fmax v21.4s, v21.4s, v26.4s\n"
+ "fmla v29.4s, v8.4s, v20.4s\n"
+ "fmla v24.4s, v5.4s, v23.4s\n"
+ "ldr q18, [x9, x14]\n"
+ "fmla v25.4s, v6.4s, v16.4s\n"
+ "ldr q17, [x9, x15]\n"
+ "fmax v28.4s, v28.4s, v26.4s\n"
+ "fmax v29.4s, v29.4s, v26.4s\n"
+ "fmla v24.4s, v2.4s, v20.4s\n"
+ "ldr q16, [x9, x13]\n"
+ "add x9, x9, #0x10\n"
+ "fmla v25.4s, v7.4s, v21.4s\n"
"fmin v28.4s, v28.4s, v27.4s\n"
- "str q28, [x17, x7]\n"
+ "fmin v29.4s, v29.4s, v27.4s\n"
+ "fmla v24.4s, v3.4s, v19.4s\n"
+ "st1 { v28.4s }, [x17]\n"
+ "fmla v25.4s, v5.4s, v19.4s\n"
+ "str q29, [x17, x7]\n"
"add x17, x17, #0x10\n"
- "fmin v22.4s, v22.4s, v27.4s\n"
- "fmin v21.4s, v21.4s, v27.4s\n"
- "st1 { v22.4s }, [x28]\n"
- "str q21, [x28, x7]\n"
+ "fmla v24.4s, v7.4s, v18.4s\n"
+ "fmla v25.4s, v8.4s, v17.4s\n"
+ "fmla v24.4s, v6.4s, v17.4s\n"
+ "fmax v25.4s, v25.4s, v26.4s\n"
+ "fmin v25.4s, v25.4s, v27.4s\n"
+ "fmla v24.4s, v8.4s, v16.4s\n"
+ "st1 { v25.4s }, [x28]\n"
+ "fmax v24.4s, v24.4s, v26.4s\n"
+ "fmin v24.4s, v24.4s, v27.4s\n"
+ "str q24, [x28, x7]\n"
"add x28, x28, #0x10\n"
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 43f\n"
- "ldr q31, [x15, #0x0]\n"
- "ldr q0, [x15, #0x10]\n"
- "add x27, x14, x13\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "add x27, x11, x15\n"
"add x26, x8, XZR\n"
- "ldr q1, [x15, #0x20]\n"
- "ldr q2, [x15, #0x30]\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
"add x25, x8, x6\n"
- "add x24, x8, x11\n"
- "ldr q3, [x15, #0x40]\n"
- "ldr q4, [x15, #0x50]\n"
- "add x23, x8, x9\n"
- "add x22, x16, XZR\n"
- "ldr q5, [x15, #0x60]\n"
- "ldr q6, [x15, #0x70]\n"
- "add x21, x16, x6\n"
- "add x20, x8, x13\n"
- "ldr q7, [x15, #0x80]\n"
- "ldr q8, [x15, #0x90]\n"
+ "add x24, x8, x14\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "add x23, x8, x13\n"
+ "add x22, x12, XZR\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "add x21, x12, x6\n"
+ "add x20, x8, x15\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
"tbz %x[n_channels], #1, 5f\n"
"ldr d9, [x27], #0x8\n"
"ldr d10, [x26], #0x8\n"
@@ -370,18 +370,18 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s16, [x20, #0x0]\n"
"6:" // Tile loop: Oddments: Load inputs: (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 1: End
"mov v28.16b, v31.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "add x20, x16, x11\n"
"mov v29.16b, v31.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
+ "add x20, x12, x14\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v2.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v9.4s\n"
+ "fmla v28.4s, v0.4s, v10.4s\n"
"fmla v29.4s, v1.4s, v12.4s\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
+ "fmla v28.4s, v1.4s, v11.4s\n"
"fmla v29.4s, v2.4s, v13.4s\n"
+ "fmla v28.4s, v3.4s, v14.4s\n"
+ "fmla v29.4s, v0.4s, v16.4s\n"
"fmla v28.4s, v4.4s, v15.4s\n"
- "mov v30.16b, v31.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "fmla v31.4s, v0.4s, v9.4s\n"
"fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
"tbz %x[n_channels], #1, 7f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 8f\n"
@@ -391,7 +391,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s11, [x20, #0x0]\n"
"8:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
"fmla v29.4s, v4.4s, v11.4s\n"
- "add x20, x16, x9\n"
+ "add x20, x12, x13\n"
"tbz %x[n_channels], #1, 9f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 10f\n"
@@ -401,7 +401,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s12, [x20, #0x0]\n"
"10:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
"fmla v29.4s, v5.4s, v12.4s\n"
- "add x20, x16, x13\n"
+ "add x20, x12, x15\n"
"tbz %x[n_channels], #1, 11f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
@@ -412,7 +412,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"12:" // Tile loop: Oddments: Load inputs: (1, 2): Bit 1: End
"fmla v28.4s, v5.4s, v13.4s\n"
"fmla v29.4s, v3.4s, v13.4s\n"
- "add x20, x12, XZR\n"
+ "add x20, x10, XZR\n"
"tbz %x[n_channels], #1, 13f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #0, 14f\n"
@@ -422,7 +422,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s14, [x20, #0x0]\n"
"14:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
"fmla v30.4s, v3.4s, v14.4s\n"
- "add x20, x14, XZR\n"
+ "add x20, x11, XZR\n"
"tbz %x[n_channels], #1, 15f\n"
"ldr d15, [x20], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
@@ -433,7 +433,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"16:" // Tile loop: Oddments: Load inputs: (2, 0): Bit 1: End
"fmla v28.4s, v6.4s, v15.4s\n"
"fmla v30.4s, v0.4s, v15.4s\n"
- "add x20, x12, x6\n"
+ "add x20, x10, x6\n"
"tbz %x[n_channels], #1, 17f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
@@ -443,7 +443,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s11, [x20, #0x0]\n"
"18:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
"fmla v30.4s, v4.4s, v11.4s\n"
- "add x20, x14, x6\n"
+ "add x20, x11, x6\n"
"tbz %x[n_channels], #1, 19f\n"
"ldr d16, [x20], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
@@ -454,7 +454,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"20:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
"fmla v28.4s, v7.4s, v16.4s\n"
"fmla v30.4s, v1.4s, v16.4s\n"
- "add x20, x12, x11\n"
+ "add x20, x10, x14\n"
"tbz %x[n_channels], #1, 21f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
@@ -464,7 +464,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s13, [x20, #0x0]\n"
"22:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
"fmla v31.4s, v4.4s, v13.4s\n"
- "add x20, x14, x11\n"
+ "add x20, x11, x14\n"
"tbz %x[n_channels], #1, 23f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
@@ -475,7 +475,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"24:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
"fmla v29.4s, v7.4s, v12.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
- "add x20, x12, x9\n"
+ "add x20, x10, x13\n"
"tbz %x[n_channels], #1, 25f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
@@ -485,7 +485,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s14, [x20, #0x0]\n"
"26:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
"fmla v31.4s, v5.4s, v14.4s\n"
- "add x20, x10, XZR\n"
+ "add x20, x9, XZR\n"
"tbz %x[n_channels], #1, 27f\n"
"ldr d15, [x20], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
@@ -495,7 +495,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s15, [x20, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: End
"fmla v30.4s, v6.4s, v15.4s\n"
- "add x20, x14, x9\n"
+ "add x20, x11, x13\n"
"tbz %x[n_channels], #1, 29f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
@@ -506,7 +506,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"30:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
"fmla v29.4s, v8.4s, v11.4s\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "add x20, x10, x6\n"
+ "add x20, x9, x6\n"
"tbz %x[n_channels], #1, 31f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #0, 32f\n"
@@ -516,7 +516,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s13, [x20, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
"fmla v30.4s, v7.4s, v13.4s\n"
- "add x20, x12, x13\n"
+ "add x20, x10, x15\n"
"tbz %x[n_channels], #1, 33f\n"
"ldr d16, [x20], #0x8\n"
"tbz %x[n_channels], #0, 34f\n"
@@ -527,7 +527,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"34:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
"fmla v30.4s, v5.4s, v16.4s\n"
"fmla v31.4s, v3.4s, v16.4s\n"
- "add x20, x10, x11\n"
+ "add x20, x9, x14\n"
"tbz %x[n_channels], #1, 35f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #0, 36f\n"
@@ -537,7 +537,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"ldr s14, [x20, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
"fmla v31.4s, v7.4s, v14.4s\n"
- "add x20, x10, x13\n"
+ "add x20, x9, x15\n"
"tbz %x[n_channels], #1, 37f\n"
"ldr d15, [x20], #0x8\n"
"tbz %x[n_channels], #0, 38f\n"
@@ -548,7 +548,7 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"38:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: End
"fmla v30.4s, v8.4s, v15.4s\n"
"fmla v31.4s, v6.4s, v15.4s\n"
- "add x20, x10, x9\n"
+ "add x20, x9, x13\n"
"tbz %x[n_channels], #1, 39f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 40f\n"
@@ -561,18 +561,18 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"fmax v28.4s, v28.4s, v26.4s\n"
"fmax v29.4s, v29.4s, v26.4s\n"
"fmax v30.4s, v30.4s, v26.4s\n"
- "fmax v31.4s, v31.4s, v26.4s\n"
"fmin v28.4s, v28.4s, v27.4s\n"
+ "fmax v31.4s, v31.4s, v26.4s\n"
"fmin v29.4s, v29.4s, v27.4s\n"
"fmin v30.4s, v30.4s, v27.4s\n"
"fmin v31.4s, v31.4s, v27.4s\n"
"tbz %x[n_channels], #1, 41f\n"
"mov x21, x17\n"
"mov x20, x28\n"
- "st1 { v28.d }[0], [x21], x7\n"
- "st1 { v30.d }[0], [x20], x7\n"
"add x17, x17, #0x8\n"
"add x28, x28, #0x8\n"
+ "st1 { v28.d }[0], [x21], x7\n"
+ "st1 { v30.d }[0], [x20], x7\n"
"st1 { v29.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #0, 42f\n"
@@ -592,16 +592,16 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"st1 { v31.s }[0], [x20]\n"
"42:" // Tile loop: Oddments: Store: Bit 1: End
"43:" // Tile loop: End
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x27, x27, #0x1\n"
- "add x21, x23, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x27, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x23, x23, x21, LT\n"
- "csel x27, x27, XZR, LT\n"
- "cmp x23, x20\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x10, x10, #0x1\n"
+ "add x20, x11, #0x1\n"
+ "cmp x10, x22\n"
+ "csel x11, x11, x20, LT\n"
+ "csel x10, x10, XZR, LT\n"
+ "cmp x11, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index 24fe255dfb..3fc1899921 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,275 +87,275 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x25, #0x10\n" // cntb _, ALL, #1
- "lsr x24, %x[n_channels], #0x2\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "mov x8, #0x10\n" // cntb _, ALL, #1
+ "lsr x17, %x[n_channels], #0x2\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v27.4s }, [x20]\n"
- "add x13, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x12, x11, [x21, #0x0]\n"
- "ldp x10, x9, [x21, #0x10]\n"
- "mov x28, #0x0\n"
- "sub x22, XZR, x25\n"
- "cbz x24, 3f\n"
- "ldr q31, [x23, #0x0]\n"
- "ldr q0, [x23, #0x10]\n"
- "cmp x25, x24, LSL #4\n"
- "ldr q1, [x23, #0x20]\n"
- "ldr q2, [x23, #0x30]\n"
- "ldr q3, [x23, #0x40]\n"
- "ldr q4, [x23, #0x50]\n"
- "ldr q5, [x23, #0x60]\n"
- "ldr q6, [x23, #0x70]\n"
- "ldr q7, [x23, #0x80]\n"
- "ldr q8, [x23, #0x90]\n"
- "add x23, x23, #0xa0\n"
- "ldp x21, x20, [x13, #0x0]\n"
- "ldr q9, [x21, x28]\n"
- "ldr q10, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "ldr q11, [x21, x28]\n"
- "ldr q12, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x20]\n"
- "ldr q13, [x21, x28]\n"
- "ldr q14, [x20, x28]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "ldr q15, [x21, x28]\n"
- "ldr q16, [x20, x28]\n"
+ "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x14, #0x0\n"
+ "ldp x13, x12, [x22, #0x0]\n"
+ "ldp x11, x10, [x22, #0x10]\n"
+ "sub x9, XZR, x8\n"
+ "cbz x17, 3f\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "cmp x8, x17, LSL #4\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x16, x16, #0xa0\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "ldr q9, [x27, x14]\n"
+ "ldr q10, [x26, x14]\n"
+ "ldr q11, [x25, x14]\n"
+ "ldr q12, [x24, x14]\n"
+ "ldr q13, [x23, x14]\n"
+ "ldr q14, [x22, x14]\n"
+ "ldr q15, [x21, x14]\n"
+ "ldr q16, [x20, x14]\n"
"bge 2f\n"
"1:" // Channel loop
- "mov v24.16b, v31.16b\n fmla v24.4s, v8.4s, v9.4s\n"
- "mov v23.16b, v31.16b\n fmla v23.4s, v6.4s, v9.4s\n"
- "ldr x21, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
- "fmla v24.4s, v0.4s, v10.4s\n"
- "fmla v23.4s, v1.4s, v12.4s\n"
- "ldr q20, [x20, x28]\n"
- "ldr x20, [x13, #0x50]\n"
- "fmla v24.4s, v1.4s, v11.4s\n"
- "ldr q19, [x21, x28]\n"
- "fmla v23.4s, v2.4s, v13.4s\n"
- "ldr q18, [x20, x28]\n"
- "fmla v24.4s, v3.4s, v14.4s\n"
- "fmla v23.4s, v0.4s, v16.4s\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v24.4s, v4.4s, v15.4s\n"
- "fmla v23.4s, v4.4s, v19.4s\n"
- "ldr x21, [x13, #0x78]\n"
- "ldr x20, [x13, #0x60]\n"
- "ldr q22, [x20, x28]\n"
- "fmla v24.4s, v2.4s, v16.4s\n"
- "fmla v23.4s, v5.4s, v20.4s\n"
- "ldr x20, [x13, #0x80]\n"
- "ldr q21, [x20, x28]\n"
- "mov v20.16b, v31.16b\n fmla v20.4s, v2.4s, v9.4s\n"
- "mov v19.16b, v31.16b\n fmla v19.4s, v0.4s, v9.4s\n"
- "ldr q31, [x23, #0x0]\n"
- "fmla v24.4s, v5.4s, v18.4s\n"
- "fmla v23.4s, v3.4s, v18.4s\n"
- "ldr q16, [x21, x28]\n"
- "ldr x20, [x13, #0x68]\n"
- "ldr q18, [x20, x28]\n"
- "fmla v20.4s, v3.4s, v17.4s\n"
- "fmla v19.4s, v4.4s, v16.4s\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v20.4s, v0.4s, v22.4s\n"
- "ldr q0, [x23, #0x10]\n"
- "fmla v19.4s, v1.4s, v21.4s\n"
- "ldr x20, [x13, #0x70]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v20.4s, v4.4s, v18.4s\n"
- "fmla v19.4s, v5.4s, v16.4s\n"
- "ldr q4, [x23, #0x50]\n"
- "ldr x20, [x13, #0x98]\n"
- "fmla v24.4s, v6.4s, v22.4s\n"
- "fmla v20.4s, v1.4s, v17.4s\n"
- "ldr q16, [x20, x28]\n"
- "ldr q1, [x23, #0x20]\n"
- "fmla v19.4s, v2.4s, v16.4s\n"
- "fmla v24.4s, v7.4s, v17.4s\n"
- "ldr q2, [x23, #0x30]\n"
- "ldr x20, [x13, #0x90]\n"
- "fmla v23.4s, v7.4s, v21.4s\n"
- "fmla v23.4s, v8.4s, v16.4s\n"
- "ldr q16, [x20, x28]\n"
- "ldr x20, [x13, #0xa8]\n"
- "fmla v20.4s, v6.4s, v16.4s\n"
+ "mov v29.16b, v31.16b\n fmla v29.4s, v8.4s, v9.4s\n"
+ "mov v28.16b, v31.16b\n fmla v28.4s, v6.4s, v9.4s\n"
+ "ldr x28, [x15, #0x40]\n"
+ "ldr x21, [x15, #0x48]\n"
+ "ldr x25, [x15, #0x50]\n"
+ "ldr x20, [x15, #0x58]\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v0.4s, v9.4s\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr x27, [x15, #0x78]\n"
+ "add x9, x9, #0x10\n"
+ "ldr x24, [x15, #0x60]\n"
+ "ldr x26, [x15, #0x68]\n"
+ "fmla v29.4s, v0.4s, v10.4s\n"
+ "fmla v28.4s, v1.4s, v12.4s\n"
+ "ldr q21, [x21, x14]\n"
+ "ldr x23, [x15, #0x88]\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v29.4s, v1.4s, v11.4s\n"
+ "ldr q18, [x28, x14]\n"
+ "ldr x21, [x15, #0x80]\n"
+ "fmla v28.4s, v2.4s, v13.4s\n"
+ "ldr q20, [x25, x14]\n"
+ "ldr x25, [x15, #0x90]\n"
+ "fmla v29.4s, v3.4s, v14.4s\n"
+ "ldr q17, [x20, x14]\n"
+ "ldr x20, [x15, #0x98]\n"
+ "fmla v28.4s, v0.4s, v16.4s\n"
+ "fmla v25.4s, v3.4s, v17.4s\n"
+ "ldr q23, [x23, x14]\n"
+ "fmla v29.4s, v4.4s, v15.4s\n"
+ "ldr q22, [x24, x14]\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v28.4s, v4.4s, v18.4s\n"
+ "ldr q17, [x26, x14]\n"
+ "ldr x23, [x15, #0xa8]\n"
+ "fmla v25.4s, v0.4s, v22.4s\n"
+ "ldr q0, [x16, #0x10]\n"
+ "fmla v29.4s, v2.4s, v16.4s\n"
+ "ldr q19, [x22, x14]\n"
+ "ldr x22, [x15, #0xb0]\n"
+ "fmla v28.4s, v5.4s, v21.4s\n"
+ "ldr q18, [x21, x14]\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v25.4s, v4.4s, v17.4s\n"
+ "ldr q21, [x20, x14]\n"
+ "fmla v29.4s, v5.4s, v20.4s\n"
+ "fmla v28.4s, v3.4s, v20.4s\n"
+ "ldr q16, [x27, x14]\n"
+ "ldr x20, [x15, #0xb8]\n"
+ "fmla v24.4s, v4.4s, v16.4s\n"
+ "ldr q20, [x24, x14]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "fmla v29.4s, v6.4s, v22.4s\n"
+ "ldr q17, [x25, x14]\n"
+ "fmla v25.4s, v1.4s, v19.4s\n"
+ "fmla v24.4s, v1.4s, v18.4s\n"
+ "ldr q1, [x16, #0x20]\n"
+ "fmla v28.4s, v7.4s, v18.4s\n"
+ "fmla v29.4s, v7.4s, v19.4s\n"
+ "ldr q16, [x23, x14]\n"
+ "fmla v24.4s, v5.4s, v23.4s\n"
+ "ldr q19, [x22, x14]\n"
+ "fmla v25.4s, v6.4s, v17.4s\n"
+ "ldr q18, [x20, x14]\n"
+ "fmla v28.4s, v8.4s, v21.4s\n"
+ "fmax v29.4s, v29.4s, v26.4s\n"
+ "fmla v24.4s, v2.4s, v21.4s\n"
+ "ldr q17, [x21, x14]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ "add x14, x14, #0x10\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "fmla v25.4s, v7.4s, v20.4s\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "fmin v29.4s, v29.4s, v27.4s\n"
+ "ldr q9, [x27, x8]\n"
+ "ldr q10, [x26, x8]\n"
+ "fmla v24.4s, v3.4s, v16.4s\n"
+ "ldr q3, [x16, #0x40]\n"
+ "fmax v28.4s, v28.4s, v26.4s\n"
+ "ldr q12, [x24, x8]\n"
+ "ldr q13, [x23, x8]\n"
+ "fmla v25.4s, v5.4s, v16.4s\n"
+ "ldr q16, [x20, x8]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "str q29, [x13, x9]\n"
+ "fmin v28.4s, v28.4s, v27.4s\n"
+ "fmla v24.4s, v7.4s, v19.4s\n"
+ "ldr q14, [x22, x8]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "fmla v25.4s, v8.4s, v18.4s\n"
+ "str q28, [x12, x9]\n"
+ "fmla v24.4s, v6.4s, v18.4s\n"
+ "ldr q15, [x21, x8]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "fmax v25.4s, v25.4s, v26.4s\n"
+ "fmla v24.4s, v8.4s, v17.4s\n"
+ "ldr q11, [x25, x8]\n"
+ "ldr q8, [x16, #0x90]\n"
+ "add x8, x8, #0x10\n"
+ "add x16, x16, #0xa0\n"
+ "cmp x8, x17, LSL #4\n"
+ "fmin v25.4s, v25.4s, v27.4s\n"
"fmax v24.4s, v24.4s, v26.4s\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0xa0]\n"
- "fmla v19.4s, v3.4s, v17.4s\n"
- "fmax v23.4s, v23.4s, v26.4s\n"
- "ldr q16, [x20, x28]\n"
- "ldr q3, [x23, #0x40]\n"
- "fmla v20.4s, v7.4s, v16.4s\n"
- "fmla v20.4s, v5.4s, v17.4s\n"
- "ldr q5, [x23, #0x60]\n"
- "ldr x20, [x13, #0xb0]\n"
- "add x22, x22, #0x10\n"
"fmin v24.4s, v24.4s, v27.4s\n"
- "ldr q16, [x20, x28]\n"
- "ldr x20, [x13, #0xb8]\n"
- "fmla v19.4s, v7.4s, v16.4s\n"
- "fmin v23.4s, v23.4s, v27.4s\n"
- "ldr q16, [x20, x28]\n"
- "ldr q7, [x23, #0x80]\n"
- "fmla v19.4s, v6.4s, v16.4s\n"
- "fmla v20.4s, v8.4s, v16.4s\n"
- "ldr q6, [x23, #0x70]\n"
- "ldr x20, [x13, #0xc0]\n"
- "fmax v20.4s, v20.4s, v26.4s\n"
- "fmin v20.4s, v20.4s, v27.4s\n"
- "ldr q16, [x20, x28]\n"
- "fmla v19.4s, v8.4s, v16.4s\n"
- "ldr q8, [x23, #0x90]\n"
- "fmax v19.4s, v19.4s, v26.4s\n"
- "ldp x21, x20, [x13, #0x0]\n"
- "ldr q9, [x21, x25]\n"
- "fmin v19.4s, v19.4s, v27.4s\n"
- "add x28, x28, #0x10\n"
- "ldr q10, [x20, x25]\n"
- "ldp x21, x20, [x13, #0x10]\n"
- "str q24, [x12, x22]\n"
- "add x23, x23, #0xa0\n"
- "ldr q11, [x21, x25]\n"
- "ldr q12, [x20, x25]\n"
- "str q23, [x11, x22]\n"
- "ldp x21, x20, [x13, #0x20]\n"
- "ldr q13, [x21, x25]\n"
- "str q20, [x10, x22]\n"
- "ldr q14, [x20, x25]\n"
- "ldp x21, x20, [x13, #0x30]\n"
- "str q19, [x9, x22]\n"
- "ldr q15, [x21, x25]\n"
- "ldr q16, [x20, x25]\n"
- "add x25, x25, #0x10\n"
- "cmp x25, x24, LSL #4\n"
+ "str q25, [x11, x9]\n"
+ "str q24, [x10, x9]\n"
"blt 1b\n"
"2:" // Channel tail
- "mov v25.16b, v31.16b\n fmla v25.4s, v8.4s, v9.4s\n"
- "mov v24.16b, v31.16b\n fmla v24.4s, v6.4s, v9.4s\n"
- "ldr x21, [x13, #0x40]\n"
- "ldr x20, [x13, #0x48]\n"
- "fmla v25.4s, v0.4s, v10.4s\n"
- "fmla v24.4s, v1.4s, v12.4s\n"
- "ldr q20, [x20, x28]\n"
- "ldr x20, [x13, #0x50]\n"
- "fmla v25.4s, v1.4s, v11.4s\n"
- "ldr q18, [x21, x28]\n"
- "fmla v24.4s, v2.4s, v13.4s\n"
- "ldr q19, [x20, x28]\n"
- "fmla v25.4s, v3.4s, v14.4s\n"
- "fmla v24.4s, v0.4s, v16.4s\n"
- "ldr x20, [x13, #0x58]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v25.4s, v4.4s, v15.4s\n"
- "fmla v24.4s, v4.4s, v18.4s\n"
- "ldr x21, [x13, #0x78]\n"
- "ldr x20, [x13, #0x60]\n"
- "ldr q23, [x20, x28]\n"
- "fmla v25.4s, v2.4s, v16.4s\n"
- "fmla v24.4s, v5.4s, v20.4s\n"
- "ldr x20, [x13, #0x80]\n"
- "ldr q22, [x20, x28]\n"
- "mov v21.16b, v31.16b\n fmla v21.4s, v2.4s, v9.4s\n"
- "mov v20.16b, v31.16b\n fmla v20.4s, v0.4s, v9.4s\n"
- "ldr x20, [x13, #0x68]\n"
- "ldr q18, [x20, x28]\n"
- "fmla v25.4s, v5.4s, v19.4s\n"
+ "mov v28.16b, v31.16b\n fmla v28.4s, v8.4s, v9.4s\n"
+ "mov v29.16b, v31.16b\n fmla v29.4s, v6.4s, v9.4s\n"
+ "ldr x28, [x15, #0x40]\n"
+ "ldr x20, [x15, #0x48]\n"
+ "ldr x26, [x15, #0x50]\n"
+ "ldr x25, [x15, #0x58]\n"
+ "mov v25.16b, v31.16b\n fmla v25.4s, v2.4s, v9.4s\n"
+ "mov v24.16b, v31.16b\n fmla v24.4s, v0.4s, v9.4s\n"
+ "ldr x27, [x15, #0x78]\n"
+ "ldr x24, [x15, #0x60]\n"
+ "add x9, x9, #0x10\n"
+ "ldr x23, [x15, #0x68]\n"
+ "ldr x22, [x15, #0x70]\n"
+ "fmla v28.4s, v0.4s, v10.4s\n"
+ "fmla v29.4s, v1.4s, v12.4s\n"
+ "ldr q21, [x20, x14]\n"
+ "ldr x21, [x15, #0x88]\n"
+ "fmla v28.4s, v1.4s, v11.4s\n"
+ "ldr q18, [x28, x14]\n"
+ "ldr x20, [x15, #0x80]\n"
+ "fmla v29.4s, v2.4s, v13.4s\n"
+ "ldr q20, [x26, x14]\n"
+ "ldr x26, [x15, #0x90]\n"
+ "fmla v28.4s, v3.4s, v14.4s\n"
+ "ldr q17, [x25, x14]\n"
+ "ldr x25, [x15, #0x98]\n"
+ "fmla v29.4s, v0.4s, v16.4s\n"
+ "fmla v28.4s, v4.4s, v15.4s\n"
+ "ldr q23, [x24, x14]\n"
+ "ldr x24, [x15, #0xa0]\n"
+ "fmla v25.4s, v3.4s, v17.4s\n"
+ "ldr q22, [x21, x14]\n"
+ "fmla v29.4s, v4.4s, v18.4s\n"
+ "ldr q19, [x23, x14]\n"
+ "ldr x23, [x15, #0xa8]\n"
+ "fmla v28.4s, v2.4s, v16.4s\n"
+ "ldr q18, [x22, x14]\n"
+ "ldr x22, [x15, #0xb0]\n"
+ "fmla v25.4s, v0.4s, v23.4s\n"
+ "fmla v29.4s, v5.4s, v21.4s\n"
+ "ldr q17, [x20, x14]\n"
+ "ldr x21, [x15, #0xc0]\n"
+ "fmla v28.4s, v5.4s, v20.4s\n"
+ "fmla v29.4s, v3.4s, v20.4s\n"
+ "ldr q16, [x27, x14]\n"
+ "ldr x20, [x15, #0xb8]\n"
+ "fmla v24.4s, v4.4s, v16.4s\n"
+ "ldr q21, [x24, x14]\n"
+ "fmla v25.4s, v4.4s, v19.4s\n"
+ "ldr q20, [x25, x14]\n"
+ "fmla v28.4s, v6.4s, v23.4s\n"
+ "ldr q16, [x26, x14]\n"
+ "fmla v29.4s, v7.4s, v17.4s\n"
+ "fmla v24.4s, v1.4s, v17.4s\n"
+ "fmla v25.4s, v1.4s, v18.4s\n"
+ "fmla v28.4s, v7.4s, v18.4s\n"
+ "ldr q19, [x23, x14]\n"
+ "fmla v29.4s, v8.4s, v20.4s\n"
+ "fmla v24.4s, v5.4s, v22.4s\n"
+ "ldr q18, [x22, x14]\n"
+ "fmla v25.4s, v6.4s, v16.4s\n"
+ "ldr q17, [x20, x14]\n"
+ "fmax v28.4s, v28.4s, v26.4s\n"
+ "fmax v29.4s, v29.4s, v26.4s\n"
+ "fmla v24.4s, v2.4s, v20.4s\n"
+ "ldr q16, [x21, x14]\n"
+ "add x14, x14, #0x10\n"
+ "fmla v25.4s, v7.4s, v21.4s\n"
+ "fmin v28.4s, v28.4s, v27.4s\n"
+ "fmin v29.4s, v29.4s, v27.4s\n"
"fmla v24.4s, v3.4s, v19.4s\n"
- "ldr q16, [x21, x28]\n"
- "fmla v21.4s, v3.4s, v17.4s\n"
- "fmla v20.4s, v4.4s, v16.4s\n"
- "ldr x20, [x13, #0x88]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.4s, v0.4s, v23.4s\n"
- "fmla v20.4s, v1.4s, v22.4s\n"
- "ldr x20, [x13, #0x70]\n"
- "ldr q17, [x20, x28]\n"
- "ldr x20, [x13, #0x98]\n"
- "fmla v21.4s, v4.4s, v18.4s\n"
- "ldr q19, [x20, x28]\n"
- "fmla v20.4s, v5.4s, v16.4s\n"
- "fmla v25.4s, v6.4s, v23.4s\n"
- "ldr x20, [x13, #0x90]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.4s, v1.4s, v17.4s\n"
- "ldr x20, [x13, #0xa8]\n"
- "fmla v20.4s, v2.4s, v19.4s\n"
- "fmla v25.4s, v7.4s, v17.4s\n"
- "ldr q18, [x20, x28]\n"
- "ldr x20, [x13, #0xa0]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v21.4s, v6.4s, v16.4s\n"
- "fmla v20.4s, v3.4s, v18.4s\n"
- "ldr x20, [x13, #0xb0]\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.4s, v7.4s, v17.4s\n"
- "fmla v20.4s, v7.4s, v16.4s\n"
- "ldr x20, [x13, #0xb8]\n"
- "ldr q17, [x20, x28]\n"
- "fmla v24.4s, v7.4s, v22.4s\n"
- "fmla v21.4s, v5.4s, v18.4s\n"
- "ldr x20, [x13, #0xc0]\n"
- "fmla v20.4s, v6.4s, v17.4s\n"
- "fmla v24.4s, v8.4s, v19.4s\n"
- "ldr q16, [x20, x28]\n"
- "fmla v21.4s, v8.4s, v17.4s\n"
- "fmla v20.4s, v8.4s, v16.4s\n"
+ "str q28, [x13, x9]\n"
+ "fmla v25.4s, v5.4s, v19.4s\n"
+ "str q29, [x12, x9]\n"
+ "fmla v24.4s, v7.4s, v18.4s\n"
+ "fmla v25.4s, v8.4s, v17.4s\n"
+ "fmla v24.4s, v6.4s, v17.4s\n"
"fmax v25.4s, v25.4s, v26.4s\n"
- "add x22, x22, #0x10\n"
- "fmax v24.4s, v24.4s, v26.4s\n"
- "fmax v21.4s, v21.4s, v26.4s\n"
- "add x28, x28, #0x10\n"
- "fmax v20.4s, v20.4s, v26.4s\n"
"fmin v25.4s, v25.4s, v27.4s\n"
- "str q25, [x12, x22]\n"
+ "fmla v24.4s, v8.4s, v16.4s\n"
+ "str q25, [x11, x9]\n"
+ "fmax v24.4s, v24.4s, v26.4s\n"
"fmin v24.4s, v24.4s, v27.4s\n"
- "fmin v21.4s, v21.4s, v27.4s\n"
- "str q24, [x11, x22]\n"
- "fmin v20.4s, v20.4s, v27.4s\n"
- "str q21, [x10, x22]\n"
- "str q20, [x9, x22]\n"
+ "str q24, [x10, x9]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 42f\n"
- "ldr q31, [x23, #0x0]\n"
- "ldr q0, [x23, #0x10]\n"
- "mov x20, x28\n"
+ "ldr q31, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
+ "mov x20, x14\n"
+ "ldr q1, [x16, #0x20]\n"
+ "ldr q2, [x16, #0x30]\n"
+ "ldr q3, [x16, #0x40]\n"
+ "ldr q4, [x16, #0x50]\n"
+ "ldr q5, [x16, #0x60]\n"
+ "ldr q6, [x16, #0x70]\n"
+ "add x13, x13, x20\n"
"add x12, x12, x20\n"
- "ldr q1, [x23, #0x20]\n"
- "ldr q2, [x23, #0x30]\n"
+ "ldr q7, [x16, #0x80]\n"
+ "ldr q8, [x16, #0x90]\n"
"add x11, x11, x20\n"
"add x10, x10, x20\n"
- "ldr q3, [x23, #0x40]\n"
- "ldr q4, [x23, #0x50]\n"
- "add x9, x9, x20\n"
- "ldr q5, [x23, #0x60]\n"
- "ldr q6, [x23, #0x70]\n"
- "ldr q7, [x23, #0x80]\n"
- "ldr q8, [x23, #0x90]\n"
- "ldr x27, [x13, #0x0]\n"
- "ldr x26, [x13, #0x8]\n"
- "add x27, x27, x28\n"
- "add x26, x26, x28\n"
- "ldr x25, [x13, #0x10]\n"
- "ldr x24, [x13, #0x18]\n"
- "add x25, x25, x28\n"
- "add x24, x24, x28\n"
- "ldr x23, [x13, #0x20]\n"
- "ldr x22, [x13, #0x28]\n"
- "add x23, x23, x28\n"
- "add x22, x22, x28\n"
- "ldr x21, [x13, #0x30]\n"
- "ldr x20, [x13, #0x38]\n"
- "add x21, x21, x28\n"
- "add x20, x20, x28\n"
+ "ldr x27, [x15, #0x0]\n"
+ "ldr x26, [x15, #0x8]\n"
+ "ldr x25, [x15, #0x10]\n"
+ "ldr x24, [x15, #0x18]\n"
+ "ldr x23, [x15, #0x20]\n"
+ "ldr x22, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x30]\n"
+ "ldr x20, [x15, #0x38]\n"
+ "add x27, x27, x14\n"
+ "add x26, x26, x14\n"
+ "add x25, x25, x14\n"
+ "add x24, x24, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 4f\n"
"ld1 { v9.d }[0], [x27], #0x8\n"
"ld1 { v10.d }[0], [x26], #0x8\n"
@@ -386,19 +386,19 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"ld1 { v16.s }[0], [x20], #0x4\n"
"5:" // Oddments: Load inputs (2, 2), (0, 0), (0, 1), (0, 3), (0, 4), (1, 0), (1, 1), (0, 2): Bit 1: End
"mov v28.16b, v31.16b\n fmla v28.4s, v8.4s, v9.4s\n"
- "fmla v28.4s, v0.4s, v10.4s\n"
- "ldr x20, [x13, #0x40]\n"
- "add x20, x20, x28\n"
"mov v29.16b, v31.16b\n fmla v29.4s, v6.4s, v9.4s\n"
- "fmla v28.4s, v1.4s, v11.4s\n"
+ "ldr x20, [x15, #0x40]\n"
+ "mov v30.16b, v31.16b\n fmla v30.4s, v2.4s, v9.4s\n"
+ "fmla v31.4s, v0.4s, v9.4s\n"
+ "add x20, x20, x14\n"
+ "fmla v28.4s, v0.4s, v10.4s\n"
"fmla v29.4s, v1.4s, v12.4s\n"
- "fmla v28.4s, v3.4s, v14.4s\n"
+ "fmla v28.4s, v1.4s, v11.4s\n"
"fmla v29.4s, v2.4s, v13.4s\n"
+ "fmla v28.4s, v3.4s, v14.4s\n"
+ "fmla v29.4s, v0.4s, v16.4s\n"
"fmla v28.4s, v4.4s, v15.4s\n"
- "mov v30.16b, v31.16b\n fmla v30.4s, v2.4s, v9.4s\n"
- "fmla v31.4s, v0.4s, v9.4s\n"
"fmla v28.4s, v2.4s, v16.4s\n"
- "fmla v29.4s, v0.4s, v16.4s\n"
"tbz %x[n_channels], #1, 6f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 7f\n"
@@ -407,9 +407,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"6:" // Oddments: Load input (1, 3): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"7:" // Oddments: Load input (1, 3): Bit 1: End
- "ldr x20, [x13, #0x48]\n"
+ "ldr x20, [x15, #0x48]\n"
"fmla v29.4s, v4.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 8f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
@@ -418,9 +418,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"8:" // Oddments: Load input (1, 4): Bit 1: Unset
"ld1 { v12.s }[0], [x20], #0x4\n"
"9:" // Oddments: Load input (1, 4): Bit 1: End
- "ldr x20, [x13, #0x50]\n"
+ "ldr x20, [x15, #0x50]\n"
"fmla v29.4s, v5.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 10f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
@@ -429,10 +429,10 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"10:" // Oddments: Load input (1, 2): Bit 1: Unset
"ld1 { v13.s }[0], [x20], #0x4\n"
"11:" // Oddments: Load input (1, 2): Bit 1: End
- "ldr x20, [x13, #0x58]\n"
+ "ldr x20, [x15, #0x58]\n"
"fmla v28.4s, v5.4s, v13.4s\n"
"fmla v29.4s, v3.4s, v13.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 12f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
@@ -441,9 +441,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"12:" // Oddments: Load input (3, 0): Bit 1: Unset
"ld1 { v14.s }[0], [x20], #0x4\n"
"13:" // Oddments: Load input (3, 0): Bit 1: End
- "ldr x20, [x13, #0x60]\n"
+ "ldr x20, [x15, #0x60]\n"
"fmla v30.4s, v3.4s, v14.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 14f\n"
"ld1 { v15.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
@@ -452,10 +452,10 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"14:" // Oddments: Load input (2, 0): Bit 1: Unset
"ld1 { v15.s }[0], [x20], #0x4\n"
"15:" // Oddments: Load input (2, 0): Bit 1: End
- "ldr x20, [x13, #0x68]\n"
+ "ldr x20, [x15, #0x68]\n"
"fmla v28.4s, v6.4s, v15.4s\n"
"fmla v30.4s, v0.4s, v15.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 16f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
@@ -464,9 +464,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"16:" // Oddments: Load input (3, 1): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"17:" // Oddments: Load input (3, 1): Bit 1: End
- "ldr x20, [x13, #0x70]\n"
+ "ldr x20, [x15, #0x70]\n"
"fmla v30.4s, v4.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 18f\n"
"ld1 { v16.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
@@ -475,10 +475,10 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"18:" // Oddments: Load input (2, 1): Bit 1: Unset
"ld1 { v16.s }[0], [x20], #0x4\n"
"19:" // Oddments: Load input (2, 1): Bit 1: End
- "ldr x20, [x13, #0x78]\n"
+ "ldr x20, [x15, #0x78]\n"
"fmla v28.4s, v7.4s, v16.4s\n"
"fmla v30.4s, v1.4s, v16.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 20f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
@@ -487,9 +487,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"20:" // Oddments: Load input (3, 3): Bit 1: Unset
"ld1 { v13.s }[0], [x20], #0x4\n"
"21:" // Oddments: Load input (3, 3): Bit 1: End
- "ldr x20, [x13, #0x80]\n"
+ "ldr x20, [x15, #0x80]\n"
"fmla v31.4s, v4.4s, v13.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 22f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
@@ -498,10 +498,10 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"22:" // Oddments: Load input (2, 3): Bit 1: Unset
"ld1 { v12.s }[0], [x20], #0x4\n"
"23:" // Oddments: Load input (2, 3): Bit 1: End
- "ldr x20, [x13, #0x88]\n"
+ "ldr x20, [x15, #0x88]\n"
"fmla v29.4s, v7.4s, v12.4s\n"
"fmla v31.4s, v1.4s, v12.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 24f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
@@ -510,9 +510,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"24:" // Oddments: Load input (3, 4): Bit 1: Unset
"ld1 { v14.s }[0], [x20], #0x4\n"
"25:" // Oddments: Load input (3, 4): Bit 1: End
- "ldr x20, [x13, #0x90]\n"
+ "ldr x20, [x15, #0x90]\n"
"fmla v31.4s, v5.4s, v14.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 26f\n"
"ld1 { v15.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
@@ -521,9 +521,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"26:" // Oddments: Load input (4, 0): Bit 1: Unset
"ld1 { v15.s }[0], [x20], #0x4\n"
"27:" // Oddments: Load input (4, 0): Bit 1: End
- "ldr x20, [x13, #0x98]\n"
+ "ldr x20, [x15, #0x98]\n"
"fmla v30.4s, v6.4s, v15.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 28f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 29f\n"
@@ -532,10 +532,10 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"28:" // Oddments: Load input (2, 4): Bit 1: Unset
"ld1 { v11.s }[0], [x20], #0x4\n"
"29:" // Oddments: Load input (2, 4): Bit 1: End
- "ldr x20, [x13, #0xa0]\n"
+ "ldr x20, [x15, #0xa0]\n"
"fmla v29.4s, v8.4s, v11.4s\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 30f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
@@ -544,9 +544,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"30:" // Oddments: Load input (4, 1): Bit 1: Unset
"ld1 { v13.s }[0], [x20], #0x4\n"
"31:" // Oddments: Load input (4, 1): Bit 1: End
- "ldr x20, [x13, #0xa8]\n"
+ "ldr x20, [x15, #0xa8]\n"
"fmla v30.4s, v7.4s, v13.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 32f\n"
"ld1 { v16.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 33f\n"
@@ -555,10 +555,10 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"32:" // Oddments: Load input (3, 2): Bit 1: Unset
"ld1 { v16.s }[0], [x20], #0x4\n"
"33:" // Oddments: Load input (3, 2): Bit 1: End
- "ldr x20, [x13, #0xb0]\n"
+ "ldr x20, [x15, #0xb0]\n"
"fmla v30.4s, v5.4s, v16.4s\n"
"fmla v31.4s, v3.4s, v16.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 34f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 35f\n"
@@ -567,9 +567,9 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"34:" // Oddments: Load input (4, 3): Bit 1: Unset
"ld1 { v14.s }[0], [x20], #0x4\n"
"35:" // Oddments: Load input (4, 3): Bit 1: End
- "ldr x20, [x13, #0xb8]\n"
+ "ldr x20, [x15, #0xb8]\n"
"fmla v31.4s, v7.4s, v14.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 36f\n"
"ld1 { v15.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 37f\n"
@@ -578,10 +578,10 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"36:" // Oddments: Load input (4, 2): Bit 1: Unset
"ld1 { v15.s }[0], [x20], #0x4\n"
"37:" // Oddments: Load input (4, 2): Bit 1: End
- "ldr x20, [x13, #0xc0]\n"
+ "ldr x20, [x15, #0xc0]\n"
"fmla v30.4s, v8.4s, v15.4s\n"
"fmla v31.4s, v6.4s, v15.4s\n"
- "add x20, x20, x28\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 38f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
@@ -594,32 +594,32 @@ void a64_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"fmax v28.4s, v28.4s, v26.4s\n"
"fmax v29.4s, v29.4s, v26.4s\n"
"fmax v30.4s, v30.4s, v26.4s\n"
- "fmax v31.4s, v31.4s, v26.4s\n"
"fmin v28.4s, v28.4s, v27.4s\n"
+ "fmax v31.4s, v31.4s, v26.4s\n"
"fmin v29.4s, v29.4s, v27.4s\n"
"fmin v30.4s, v30.4s, v27.4s\n"
"fmin v31.4s, v31.4s, v27.4s\n"
"tbz %x[n_channels], #1, 40f\n"
- "st1 { v28.d }[0], [x12], #0x8\n"
- "st1 { v29.d }[0], [x11], #0x8\n"
- "st1 { v30.d }[0], [x10], #0x8\n"
- "st1 { v31.d }[0], [x9], #0x8\n"
+ "st1 { v28.d }[0], [x13], #0x8\n"
+ "st1 { v29.d }[0], [x12], #0x8\n"
+ "st1 { v30.d }[0], [x11], #0x8\n"
+ "st1 { v31.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
- "st1 { v28.s }[2], [x12], #0x4\n"
- "st1 { v29.s }[2], [x11], #0x4\n"
- "st1 { v30.s }[2], [x10], #0x4\n"
- "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v28.s }[2], [x13], #0x4\n"
+ "st1 { v29.s }[2], [x12], #0x4\n"
+ "st1 { v30.s }[2], [x11], #0x4\n"
+ "st1 { v31.s }[2], [x10], #0x4\n"
"b 41f\n"
"40:" // Oddments: Store: Bit 1: Unset
- "st1 { v28.s }[0], [x12], #0x4\n"
- "st1 { v29.s }[0], [x11], #0x4\n"
- "st1 { v30.s }[0], [x10], #0x4\n"
- "st1 { v31.s }[0], [x9], #0x4\n"
+ "st1 { v28.s }[0], [x13], #0x4\n"
+ "st1 { v29.s }[0], [x12], #0x4\n"
+ "st1 { v30.s }[0], [x11], #0x4\n"
+ "st1 { v31.s }[0], [x10], #0x4\n"
"41:" // Oddments: Store: Bit 1: End
"42:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 3426fbc3f9..e35f4fdf4e 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,251 +87,251 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
);
__asm__ __volatile__(
- "mov x27, #0x0\n"
- "mov x26, #0x0\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
"1:" // Tile loop
- "str x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x23, #0x2\n"
- "mov x25, #0x2\n"
- "str x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x9, #0x2\n"
+ "mov x28, #0x2\n"
+ "str x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x27, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
"ldr x2, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x27, x24\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x26, x2, x22\n" // offset += tile_j * ld_input_col
+ "mov x26, #0x10\n" // cntb _, ALL, #1
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"ldr x3, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "lsl x2, x2, #0x2\n"
- "mul x20, x27, x21\n" // offset = tile_i * ld_output_row
- "ldr x4, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x6, x2, x2\n"
- "mul x22, x22, x23\n" // offset *= kernel_stride * output_size
- "add x4, x4, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x7, x4, x24, LSL #2\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x26, x3, x20\n" // offset += tile_j * ld_output_col
- "add x17, x7, x24, LSL #2\n"
- "mov x23, #0x10\n" // cntb _, ALL, #1
- "mul x20, x20, x25\n" // offset *= output_tile_size
- "lsr x22, %x[n_channels], #0x2\n"
- "add x16, x17, x24, LSL #2\n"
- "add x15, x6, x2\n"
- "add x14, x16, x24, LSL #2\n"
- "add x13, x15, x2\n"
- "add x5, x5, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "lsr x24, %x[n_channels], #0x2\n"
"add x20, %x[params_struct], %[offsetof_args_min]\n"
"ld1r { v27.4s }, [x20]\n"
+ "ldr x4, [%x[params_struct], %[offsetof_args_inptr]]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "mov x23, #0x0\n"
"ld1r { v15.4s }, [x20]\n"
- "add x12, x14, x24, LSL #2\n"
- "add x11, x13, x2\n"
- "add x10, x5, x21, LSL #2\n"
+ "mul x22, x11, x27\n" // offset = tile_i * ld_input_row
+ "ldr x5, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "sub x21, XZR, x26\n"
+ "mul x20, x11, x25\n" // offset = tile_i * ld_output_row
+ "ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x22, x10, x2, x22\n" // offset += tile_j * ld_input_col
+ "lsl x2, x2, #0x2\n"
+ "madd x20, x10, x3, x20\n" // offset += tile_j * ld_output_col
"lsl x3, x3, #0x2\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "cbz x22, 4f\n"
- "ldr q25, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x8, x8, #0x60\n"
+ "mul x22, x22, x9\n" // offset *= kernel_stride * output_size
+ "add x7, x2, x2\n"
+ "add x8, x7, x2\n"
+ "add x17, x8, x2\n"
+ "mul x20, x20, x28\n" // offset *= output_tile_size
+ "add x16, x17, x2\n"
+ "add x4, x4, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x15, x4, x27, LSL #2\n"
+ "add x14, x15, x27, LSL #2\n"
+ "add x13, x14, x27, LSL #2\n"
+ "add x12, x13, x27, LSL #2\n"
+ "add x5, x5, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x11, x12, x27, LSL #2\n"
+ "add x10, x5, x25, LSL #2\n"
+ "cbz x24, 4f\n"
+ "ldr q25, [x6, #0x0]\n"
+ "ldr q0, [x6, #0x10]\n"
+ "cmp x26, x24, LSL #4\n"
+ "ldr q1, [x6, #0x20]\n"
+ "ldr q2, [x6, #0x30]\n"
+ "ldr q3, [x6, #0x40]\n"
+ "ldr q4, [x6, #0x50]\n"
+ "add x6, x6, #0x60\n"
"ld1 { v5.4s }, [x4]\n"
"ldr q6, [x4, x2]\n"
- "ld1 { v7.4s }, [x7]\n"
- "ldr q8, [x7, x2]\n"
- "ldr q9, [x4, x6]\n"
- "ldr q13, [x7, x6]\n"
- "ldr q11, [x4, x15]\n"
- "ldr q12, [x4, x13]\n"
- "ldr q10, [x7, x11]\n"
- "ld1 { v14.4s }, [x17]\n"
+ "ld1 { v7.4s }, [x15]\n"
+ "ldr q8, [x15, x2]\n"
+ "ldr q9, [x4, x7]\n"
+ "ldr q13, [x15, x7]\n"
+ "ldr q11, [x4, x8]\n"
+ "ldr q12, [x4, x17]\n"
+ "ldr q10, [x15, x16]\n"
+ "ld1 { v14.4s }, [x14]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
"mov v30.16b, v25.16b\n fmla v30.4s, v0.4s, v5.4s\n"
- "ldr q23, [x7, x15]\n"
+ "ldr q23, [x15, x8]\n"
"mov v31.16b, v25.16b\n fmla v31.4s, v0.4s, v6.4s\n"
- "add x23, x23, #0x10\n"
+ "add x26, x26, #0x10\n"
"mov v29.16b, v25.16b\n fmla v29.4s, v0.4s, v7.4s\n"
"mov v28.16b, v25.16b\n fmla v28.4s, v0.4s, v8.4s\n"
- "ldr q19, [x8, #0x0]\n"
- "ldr q25, [x8, #0x140]\n"
+ "ldr q19, [x6, #0x0]\n"
+ "ldr q25, [x6, #0x140]\n"
+ "cmp x26, x24, LSL #4\n"
+ "add x21, x21, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v30.4s, v1.4s, v6.4s\n"
- "ldr q21, [x7, x13]\n"
+ "ldr q21, [x15, x17]\n"
+ "add x15, x15, #0x10\n"
"fmla v31.4s, v1.4s, v9.4s\n"
- "add x7, x7, #0x10\n"
"fmla v29.4s, v1.4s, v8.4s\n"
"fmla v28.4s, v1.4s, v13.4s\n"
- "ldr q1, [x8, #0x10]\n"
- "cmp x23, x22, LSL #4\n"
+ "ldr q1, [x6, #0x10]\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "ldr q18, [x4, x11]\n"
- "fmla v31.4s, v2.4s, v11.4s\n"
+ "ldr q18, [x4, x16]\n"
"add x4, x4, #0x10\n"
+ "fmla v31.4s, v2.4s, v11.4s\n"
"fmla v29.4s, v2.4s, v13.4s\n"
"fmla v28.4s, v2.4s, v23.4s\n"
- "ldr q17, [x8, #0x20]\n"
- "add x20, x20, #0x10\n"
+ "ldr q17, [x6, #0x20]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "ldr q6, [x17, x2]\n"
+ "ldr q6, [x14, x2]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
- "add x21, x21, #0x10\n"
"fmla v29.4s, v3.4s, v23.4s\n"
"fmla v28.4s, v3.4s, v21.4s\n"
- "ldr q16, [x8, #0x30]\n"
+ "ldr q16, [x6, #0x30]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "ldr q2, [x17, x6]\n"
+ "ldr q2, [x14, x7]\n"
"fmla v31.4s, v4.4s, v18.4s\n"
- "ldr q0, [x17, x15]\n"
+ "ldr q0, [x14, x8]\n"
"fmla v29.4s, v4.4s, v21.4s\n"
"fmla v28.4s, v4.4s, v10.4s\n"
- "ldr q20, [x8, #0x40]\n"
+ "ldr q20, [x6, #0x40]\n"
"fmla v30.4s, v19.4s, v7.4s\n"
- "ld1 { v7.4s }, [x7]\n"
+ "ld1 { v7.4s }, [x15]\n"
"fmla v31.4s, v19.4s, v8.4s\n"
"fmla v29.4s, v19.4s, v14.4s\n"
"fmla v28.4s, v19.4s, v6.4s\n"
- "ldr q19, [x8, #0x50]\n"
+ "ldr q19, [x6, #0x50]\n"
"fmla v30.4s, v1.4s, v8.4s\n"
- "ldr q26, [x17, x11]\n"
+ "ldr q26, [x14, x16]\n"
"fmla v31.4s, v1.4s, v13.4s\n"
"fmla v29.4s, v1.4s, v6.4s\n"
"fmla v28.4s, v1.4s, v2.4s\n"
- "ldr q18, [x8, #0x60]\n"
+ "ldr q18, [x6, #0x60]\n"
"fmla v30.4s, v17.4s, v13.4s\n"
- "ldr q1, [x17, x13]\n"
+ "ldr q1, [x14, x17]\n"
+ "add x14, x14, #0x10\n"
"fmla v31.4s, v17.4s, v23.4s\n"
- "add x17, x17, #0x10\n"
"fmla v29.4s, v17.4s, v2.4s\n"
"fmla v28.4s, v17.4s, v0.4s\n"
- "ldr q17, [x8, #0x70]\n"
+ "ldr q17, [x6, #0x70]\n"
"fmla v30.4s, v16.4s, v23.4s\n"
- "ld1 { v24.4s }, [x16]\n"
+ "ld1 { v24.4s }, [x13]\n"
"fmla v31.4s, v16.4s, v21.4s\n"
"fmla v29.4s, v16.4s, v0.4s\n"
"fmla v28.4s, v16.4s, v1.4s\n"
- "ldr q16, [x8, #0x80]\n"
+ "ldr q16, [x6, #0x80]\n"
"fmla v30.4s, v20.4s, v21.4s\n"
- "ldr q23, [x16, x2]\n"
+ "ldr q23, [x13, x2]\n"
"fmla v31.4s, v20.4s, v10.4s\n"
- "ldr q22, [x16, x6]\n"
+ "ldr q22, [x13, x7]\n"
"fmla v29.4s, v20.4s, v1.4s\n"
"fmla v28.4s, v20.4s, v26.4s\n"
- "ldr q21, [x8, #0x90]\n"
+ "ldr q21, [x6, #0x90]\n"
"fmla v30.4s, v19.4s, v14.4s\n"
- "ldr q5, [x16, x11]\n"
+ "ldr q5, [x13, x16]\n"
"fmla v31.4s, v19.4s, v6.4s\n"
"fmla v29.4s, v19.4s, v24.4s\n"
"fmla v28.4s, v19.4s, v23.4s\n"
- "ldr q11, [x8, #0xa0]\n"
+ "ldr q11, [x6, #0xa0]\n"
"fmla v30.4s, v18.4s, v6.4s\n"
- "ldr q20, [x16, x15]\n"
+ "ldr q20, [x13, x8]\n"
"fmla v31.4s, v18.4s, v2.4s\n"
"fmla v29.4s, v18.4s, v23.4s\n"
"fmla v28.4s, v18.4s, v22.4s\n"
- "ldr q18, [x8, #0xb0]\n"
+ "ldr q18, [x6, #0xb0]\n"
"fmla v30.4s, v17.4s, v2.4s\n"
- "ldr q19, [x16, x13]\n"
+ "ldr q19, [x13, x17]\n"
+ "add x13, x13, #0x10\n"
"fmla v31.4s, v17.4s, v0.4s\n"
- "add x16, x16, #0x10\n"
"fmla v29.4s, v17.4s, v22.4s\n"
"fmla v28.4s, v17.4s, v20.4s\n"
- "ldr q17, [x8, #0xc0]\n"
+ "ldr q17, [x6, #0xc0]\n"
"fmla v30.4s, v16.4s, v0.4s\n"
- "ld1 { v0.4s }, [x14]\n"
+ "ld1 { v0.4s }, [x12]\n"
"fmla v31.4s, v16.4s, v1.4s\n"
"fmla v29.4s, v16.4s, v20.4s\n"
"fmla v28.4s, v16.4s, v19.4s\n"
- "ldr q16, [x8, #0xd0]\n"
+ "ldr q16, [x6, #0xd0]\n"
"fmla v30.4s, v21.4s, v1.4s\n"
- "ldr q4, [x14, x2]\n"
+ "ldr q4, [x12, x2]\n"
"fmla v31.4s, v21.4s, v26.4s\n"
- "ldr q12, [x14, x13]\n"
+ "ldr q12, [x12, x17]\n"
"fmla v29.4s, v21.4s, v19.4s\n"
"fmla v28.4s, v21.4s, v5.4s\n"
- "ldr q13, [x8, #0xe0]\n"
+ "ldr q13, [x6, #0xe0]\n"
"fmla v30.4s, v11.4s, v24.4s\n"
- "ldr q6, [x14, x6]\n"
+ "ldr q6, [x12, x7]\n"
"fmla v31.4s, v11.4s, v23.4s\n"
"fmla v29.4s, v11.4s, v0.4s\n"
"fmla v28.4s, v11.4s, v4.4s\n"
- "ldr q24, [x8, #0xf0]\n"
+ "ldr q24, [x6, #0xf0]\n"
"fmla v30.4s, v18.4s, v23.4s\n"
- "ldr q26, [x14, x15]\n"
+ "ldr q26, [x12, x8]\n"
"fmla v31.4s, v18.4s, v22.4s\n"
"fmla v29.4s, v18.4s, v4.4s\n"
"fmla v28.4s, v18.4s, v6.4s\n"
- "ldr q23, [x8, #0x100]\n"
+ "ldr q23, [x6, #0x100]\n"
"fmla v30.4s, v17.4s, v22.4s\n"
- "ldr q22, [x14, x11]\n"
+ "ldr q22, [x12, x16]\n"
+ "add x12, x12, #0x10\n"
"fmla v31.4s, v17.4s, v20.4s\n"
- "add x14, x14, #0x10\n"
"fmla v29.4s, v17.4s, v6.4s\n"
"fmla v28.4s, v17.4s, v26.4s\n"
- "ldr q21, [x8, #0x110]\n"
+ "ldr q21, [x6, #0x110]\n"
"fmla v30.4s, v16.4s, v20.4s\n"
- "ld1 { v18.4s }, [x12]\n"
+ "ld1 { v18.4s }, [x11]\n"
"fmla v31.4s, v16.4s, v19.4s\n"
"fmla v29.4s, v16.4s, v26.4s\n"
"fmla v28.4s, v16.4s, v12.4s\n"
- "ldr q20, [x8, #0x120]\n"
+ "ldr q20, [x6, #0x120]\n"
"fmla v30.4s, v13.4s, v19.4s\n"
- "ldr q17, [x12, x2]\n"
+ "ldr q17, [x11, x2]\n"
"fmla v31.4s, v13.4s, v5.4s\n"
- "ld1 { v14.4s }, [x17]\n"
+ "ld1 { v14.4s }, [x14]\n"
"fmla v29.4s, v13.4s, v12.4s\n"
"fmla v28.4s, v13.4s, v22.4s\n"
- "ldr q19, [x8, #0x130]\n"
+ "ldr q19, [x6, #0x130]\n"
"fmla v30.4s, v24.4s, v0.4s\n"
- "ldr q16, [x12, x6]\n"
+ "ldr q16, [x11, x7]\n"
"fmla v31.4s, v24.4s, v4.4s\n"
"fmla v29.4s, v24.4s, v18.4s\n"
- "ldr q18, [x12, x15]\n"
+ "ldr q18, [x11, x8]\n"
"fmla v28.4s, v24.4s, v17.4s\n"
- "ldr q0, [x8, #0x150]\n"
+ "ldr q0, [x6, #0x150]\n"
"fmla v30.4s, v23.4s, v4.4s\n"
- "ldr q13, [x7, x6]\n"
+ "ldr q13, [x15, x7]\n"
"fmla v31.4s, v23.4s, v6.4s\n"
"fmla v29.4s, v23.4s, v17.4s\n"
- "ldr q17, [x12, x13]\n"
+ "ldr q17, [x11, x17]\n"
"fmla v28.4s, v23.4s, v16.4s\n"
- "ldr q1, [x8, #0x160]\n"
+ "ldr q1, [x6, #0x160]\n"
"fmla v30.4s, v21.4s, v6.4s\n"
"ld1 { v5.4s }, [x4]\n"
"fmla v31.4s, v21.4s, v26.4s\n"
"fmla v29.4s, v21.4s, v16.4s\n"
- "ldr q16, [x12, x11]\n"
+ "ldr q16, [x11, x16]\n"
+ "add x11, x11, #0x10\n"
"fmla v28.4s, v21.4s, v18.4s\n"
- "ldr q2, [x8, #0x170]\n"
+ "ldr q2, [x6, #0x170]\n"
"fmla v30.4s, v20.4s, v26.4s\n"
"ldr q6, [x4, x2]\n"
"fmla v31.4s, v20.4s, v12.4s\n"
- "add x12, x12, #0x10\n"
"fmla v29.4s, v20.4s, v18.4s\n"
- "ldr q11, [x4, x15]\n"
+ "ldr q11, [x4, x8]\n"
"fmla v28.4s, v20.4s, v17.4s\n"
- "ldr q3, [x8, #0x180]\n"
+ "ldr q3, [x6, #0x180]\n"
"fmla v30.4s, v19.4s, v12.4s\n"
- "ldr q8, [x7, x2]\n"
+ "ldr q8, [x15, x2]\n"
"fmla v31.4s, v19.4s, v22.4s\n"
- "ldr q10, [x7, x11]\n"
+ "ldr q10, [x15, x16]\n"
"fmla v29.4s, v19.4s, v17.4s\n"
- "ldr q12, [x4, x13]\n"
+ "ldr q12, [x4, x17]\n"
"fmla v28.4s, v19.4s, v16.4s\n"
- "ldr q9, [x4, x6]\n"
- "ldr q4, [x8, #0x190]\n"
+ "ldr q9, [x4, x7]\n"
+ "ldr q4, [x6, #0x190]\n"
+ "add x6, x6, #0x1a0\n"
"fmax v30.4s, v30.4s, v27.4s\n"
"fmax v31.4s, v31.4s, v27.4s\n"
- "add x8, x8, #0x1a0\n"
"fmax v29.4s, v29.4s, v27.4s\n"
"fmax v28.4s, v28.4s, v27.4s\n"
"fmin v30.4s, v30.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v15.4s\n"
- "st1 { v30.4s }, [x5]\n"
"fmin v29.4s, v29.4s, v15.4s\n"
"fmin v28.4s, v28.4s, v15.4s\n"
+ "st1 { v30.4s }, [x5]\n"
"str q31, [x5, x3]\n"
"add x5, x5, #0x10\n"
"st1 { v29.4s }, [x10]\n"
@@ -340,163 +340,163 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"blt 2b\n"
"3:" // Tile loop: Channel tail
"mov v31.16b, v25.16b\n fmla v31.4s, v0.4s, v5.4s\n"
- "ldr q22, [x7, x15]\n"
+ "ldr q22, [x15, x8]\n"
"mov v5.16b, v25.16b\n fmla v5.4s, v0.4s, v6.4s\n"
"mov v30.16b, v25.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v29.16b, v25.16b\n fmla v29.4s, v0.4s, v8.4s\n"
- "ldr q19, [x8, #0x0]\n"
+ "ldr q19, [x6, #0x0]\n"
"fmla v31.4s, v1.4s, v6.4s\n"
- "ldr q21, [x7, x13]\n"
+ "ldr q21, [x15, x17]\n"
+ "add x15, x15, #0x10\n"
"fmla v5.4s, v1.4s, v9.4s\n"
- "add x7, x7, #0x10\n"
"fmla v30.4s, v1.4s, v8.4s\n"
"fmla v29.4s, v1.4s, v13.4s\n"
- "ldr q18, [x8, #0x10]\n"
+ "ldr q18, [x6, #0x10]\n"
"fmla v31.4s, v2.4s, v9.4s\n"
- "ldr q16, [x4, x11]\n"
- "fmla v5.4s, v2.4s, v11.4s\n"
+ "ldr q16, [x4, x16]\n"
"add x4, x4, #0x10\n"
+ "fmla v5.4s, v2.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"fmla v29.4s, v2.4s, v22.4s\n"
- "ldr q17, [x8, #0x20]\n"
+ "ldr q17, [x6, #0x20]\n"
"fmla v31.4s, v3.4s, v11.4s\n"
- "ldr q6, [x17, x2]\n"
+ "ldr q6, [x14, x2]\n"
"fmla v5.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v22.4s\n"
"fmla v29.4s, v3.4s, v21.4s\n"
- "ldr q20, [x8, #0x30]\n"
+ "ldr q20, [x6, #0x30]\n"
"fmla v31.4s, v4.4s, v12.4s\n"
- "ldr q2, [x17, x6]\n"
+ "ldr q2, [x14, x7]\n"
"fmla v5.4s, v4.4s, v16.4s\n"
- "ldr q28, [x17, x15]\n"
+ "ldr q28, [x14, x8]\n"
"fmla v30.4s, v4.4s, v21.4s\n"
"fmla v29.4s, v4.4s, v10.4s\n"
- "ldr q16, [x8, #0x40]\n"
+ "ldr q16, [x6, #0x40]\n"
"fmla v31.4s, v19.4s, v7.4s\n"
"fmla v5.4s, v19.4s, v8.4s\n"
"fmla v30.4s, v19.4s, v14.4s\n"
"fmla v29.4s, v19.4s, v6.4s\n"
- "ldr q19, [x8, #0x50]\n"
+ "ldr q19, [x6, #0x50]\n"
"fmla v31.4s, v18.4s, v8.4s\n"
- "ldr q1, [x17, x11]\n"
+ "ldr q1, [x14, x16]\n"
"fmla v5.4s, v18.4s, v13.4s\n"
"fmla v30.4s, v18.4s, v6.4s\n"
"fmla v29.4s, v18.4s, v2.4s\n"
- "ldr q18, [x8, #0x60]\n"
+ "ldr q18, [x6, #0x60]\n"
"fmla v31.4s, v17.4s, v13.4s\n"
- "ldr q26, [x17, x13]\n"
+ "ldr q26, [x14, x17]\n"
+ "add x14, x14, #0x10\n"
"fmla v5.4s, v17.4s, v22.4s\n"
- "add x17, x17, #0x10\n"
"fmla v30.4s, v17.4s, v2.4s\n"
"fmla v29.4s, v17.4s, v28.4s\n"
- "ldr q17, [x8, #0x70]\n"
+ "ldr q17, [x6, #0x70]\n"
"fmla v31.4s, v20.4s, v22.4s\n"
- "ld1 { v25.4s }, [x16]\n"
+ "ld1 { v25.4s }, [x13]\n"
"fmla v5.4s, v20.4s, v21.4s\n"
"fmla v30.4s, v20.4s, v28.4s\n"
"fmla v29.4s, v20.4s, v26.4s\n"
- "ldr q24, [x8, #0x80]\n"
+ "ldr q24, [x6, #0x80]\n"
"fmla v31.4s, v16.4s, v21.4s\n"
- "ldr q23, [x16, x2]\n"
+ "ldr q23, [x13, x2]\n"
"fmla v5.4s, v16.4s, v10.4s\n"
- "ldr q0, [x16, x6]\n"
+ "ldr q0, [x13, x7]\n"
"fmla v30.4s, v16.4s, v26.4s\n"
"fmla v29.4s, v16.4s, v1.4s\n"
- "ldr q22, [x8, #0x90]\n"
+ "ldr q22, [x6, #0x90]\n"
"fmla v31.4s, v19.4s, v14.4s\n"
- "ldr q16, [x16, x11]\n"
+ "ldr q16, [x13, x16]\n"
"fmla v5.4s, v19.4s, v6.4s\n"
"fmla v30.4s, v19.4s, v25.4s\n"
"fmla v29.4s, v19.4s, v23.4s\n"
- "ldr q21, [x8, #0xa0]\n"
+ "ldr q21, [x6, #0xa0]\n"
"fmla v31.4s, v18.4s, v6.4s\n"
- "ldr q20, [x16, x15]\n"
+ "ldr q20, [x13, x8]\n"
"fmla v5.4s, v18.4s, v2.4s\n"
"fmla v30.4s, v18.4s, v23.4s\n"
"fmla v29.4s, v18.4s, v0.4s\n"
- "ldr q18, [x8, #0xb0]\n"
+ "ldr q18, [x6, #0xb0]\n"
"fmla v31.4s, v17.4s, v2.4s\n"
- "ldr q19, [x16, x13]\n"
+ "ldr q19, [x13, x17]\n"
+ "add x13, x13, #0x10\n"
"fmla v5.4s, v17.4s, v28.4s\n"
- "add x16, x16, #0x10\n"
"fmla v30.4s, v17.4s, v0.4s\n"
"fmla v29.4s, v17.4s, v20.4s\n"
- "ldr q17, [x8, #0xc0]\n"
+ "ldr q17, [x6, #0xc0]\n"
"fmla v31.4s, v24.4s, v28.4s\n"
- "ld1 { v7.4s }, [x14]\n"
+ "ld1 { v7.4s }, [x12]\n"
"fmla v5.4s, v24.4s, v26.4s\n"
"fmla v30.4s, v24.4s, v20.4s\n"
"fmla v29.4s, v24.4s, v19.4s\n"
- "ldr q2, [x8, #0xd0]\n"
+ "ldr q2, [x6, #0xd0]\n"
"fmla v31.4s, v22.4s, v26.4s\n"
- "ldr q28, [x14, x2]\n"
+ "ldr q28, [x12, x2]\n"
"fmla v5.4s, v22.4s, v1.4s\n"
- "ldr q13, [x14, x13]\n"
+ "ldr q13, [x12, x17]\n"
"fmla v30.4s, v22.4s, v19.4s\n"
"fmla v29.4s, v22.4s, v16.4s\n"
- "ldr q14, [x8, #0xe0]\n"
+ "ldr q14, [x6, #0xe0]\n"
"fmla v31.4s, v21.4s, v25.4s\n"
- "ldr q26, [x14, x6]\n"
+ "ldr q26, [x12, x7]\n"
"fmla v5.4s, v21.4s, v23.4s\n"
"fmla v30.4s, v21.4s, v7.4s\n"
"fmla v29.4s, v21.4s, v28.4s\n"
- "ldr q25, [x8, #0xf0]\n"
+ "ldr q25, [x6, #0xf0]\n"
"fmla v31.4s, v18.4s, v23.4s\n"
- "ldr q24, [x14, x15]\n"
+ "ldr q24, [x12, x8]\n"
"fmla v5.4s, v18.4s, v0.4s\n"
"fmla v30.4s, v18.4s, v28.4s\n"
"fmla v29.4s, v18.4s, v26.4s\n"
- "ldr q23, [x8, #0x100]\n"
+ "ldr q23, [x6, #0x100]\n"
"fmla v31.4s, v17.4s, v0.4s\n"
- "ldr q22, [x14, x11]\n"
+ "ldr q22, [x12, x16]\n"
+ "add x12, x12, #0x10\n"
"fmla v5.4s, v17.4s, v20.4s\n"
- "add x14, x14, #0x10\n"
"fmla v30.4s, v17.4s, v26.4s\n"
"fmla v29.4s, v17.4s, v24.4s\n"
- "ldr q21, [x8, #0x110]\n"
+ "ldr q21, [x6, #0x110]\n"
"fmla v31.4s, v2.4s, v20.4s\n"
- "ld1 { v18.4s }, [x12]\n"
+ "ld1 { v18.4s }, [x11]\n"
"fmla v5.4s, v2.4s, v19.4s\n"
"fmla v30.4s, v2.4s, v24.4s\n"
"fmla v29.4s, v2.4s, v13.4s\n"
- "ldr q20, [x8, #0x120]\n"
+ "ldr q20, [x6, #0x120]\n"
"fmla v31.4s, v14.4s, v19.4s\n"
- "ldr q17, [x12, x2]\n"
+ "ldr q17, [x11, x2]\n"
"fmla v5.4s, v14.4s, v16.4s\n"
"fmla v30.4s, v14.4s, v13.4s\n"
"fmla v29.4s, v14.4s, v22.4s\n"
- "ldr q19, [x8, #0x130]\n"
- "add x8, x8, #0x140\n"
+ "ldr q19, [x6, #0x130]\n"
+ "add x6, x6, #0x140\n"
"fmla v31.4s, v25.4s, v7.4s\n"
- "ldr q16, [x12, x6]\n"
+ "ldr q16, [x11, x7]\n"
"fmla v5.4s, v25.4s, v28.4s\n"
"fmla v30.4s, v25.4s, v18.4s\n"
- "ldr q18, [x12, x15]\n"
+ "ldr q18, [x11, x8]\n"
"fmla v29.4s, v25.4s, v17.4s\n"
"fmla v31.4s, v23.4s, v28.4s\n"
"fmla v5.4s, v23.4s, v26.4s\n"
"fmla v30.4s, v23.4s, v17.4s\n"
- "ldr q17, [x12, x13]\n"
+ "ldr q17, [x11, x17]\n"
"fmla v29.4s, v23.4s, v16.4s\n"
"fmla v31.4s, v21.4s, v26.4s\n"
"fmla v5.4s, v21.4s, v24.4s\n"
"fmla v30.4s, v21.4s, v16.4s\n"
- "ldr q16, [x12, x11]\n"
+ "ldr q16, [x11, x16]\n"
+ "add x11, x11, #0x10\n"
"fmla v29.4s, v21.4s, v18.4s\n"
- "add x12, x12, #0x10\n"
"fmla v31.4s, v20.4s, v24.4s\n"
"fmla v5.4s, v20.4s, v13.4s\n"
"fmla v30.4s, v20.4s, v18.4s\n"
"fmla v29.4s, v20.4s, v17.4s\n"
"fmla v31.4s, v19.4s, v13.4s\n"
"fmla v5.4s, v19.4s, v22.4s\n"
- "fmax v31.4s, v31.4s, v27.4s\n"
"fmla v30.4s, v19.4s, v17.4s\n"
"fmla v29.4s, v19.4s, v16.4s\n"
+ "fmax v31.4s, v31.4s, v27.4s\n"
"fmax v5.4s, v5.4s, v27.4s\n"
+ "fmin v31.4s, v31.4s, v15.4s\n"
"fmax v30.4s, v30.4s, v27.4s\n"
"fmax v29.4s, v29.4s, v27.4s\n"
- "fmin v31.4s, v31.4s, v15.4s\n"
"fmin v5.4s, v5.4s, v15.4s\n"
"st1 { v31.4s }, [x5]\n"
"fmin v30.4s, v30.4s, v15.4s\n"
@@ -509,23 +509,23 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"4:" // Tile loop: Oddments
"tst %x[n_channels], #0x3\n"
"beq 61f\n"
- "ldr q25, [x8, #0x0]\n"
- "ldr q0, [x8, #0x10]\n"
+ "ldr q25, [x6, #0x0]\n"
+ "ldr q0, [x6, #0x10]\n"
"add x9, x4, XZR\n"
"add x28, x4, x2\n"
- "ldr q1, [x8, #0x20]\n"
- "ldr q2, [x8, #0x30]\n"
- "add x27, x7, XZR\n"
- "add x26, x7, x2\n"
- "ldr q3, [x8, #0x40]\n"
- "ldr q4, [x8, #0x50]\n"
- "add x25, x4, x6\n"
- "add x24, x7, x6\n"
- "add x23, x4, x15\n"
- "add x22, x4, x13\n"
- "add x21, x7, x11\n"
- "add x20, x17, XZR\n"
- "add x8, x8, #0x60\n"
+ "ldr q1, [x6, #0x20]\n"
+ "ldr q2, [x6, #0x30]\n"
+ "add x27, x15, XZR\n"
+ "add x26, x15, x2\n"
+ "ldr q3, [x6, #0x40]\n"
+ "ldr q4, [x6, #0x50]\n"
+ "add x25, x4, x7\n"
+ "add x24, x15, x7\n"
+ "add x23, x4, x8\n"
+ "add x22, x4, x17\n"
+ "add x21, x15, x16\n"
+ "add x20, x14, XZR\n"
+ "add x6, x6, #0x60\n"
"tbz %x[n_channels], #1, 5f\n"
"ldr d5, [x9], #0x8\n"
"ldr d6, [x28], #0x8\n"
@@ -563,7 +563,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"6:" // Tile loop: Oddments: Load inputs: (0, 0), (0, 1), (1, 0), (1, 1), (0, 2), (1, 2), (0, 3), (0, 4), (1, 5), (2, 0): Bit 1: End
"mov v28.16b, v25.16b\n fmla v28.4s, v0.4s, v5.4s\n"
"mov v29.16b, v25.16b\n fmla v29.4s, v0.4s, v6.4s\n"
- "add x20, x7, x15\n"
+ "add x20, x15, x8\n"
"mov v30.16b, v25.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v25.16b\n fmla v31.4s, v0.4s, v8.4s\n"
"fmla v28.4s, v1.4s, v6.4s\n"
@@ -583,7 +583,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"8:" // Tile loop: Oddments: Load inputs: (1, 3): Bit 1: End
"fmla v31.4s, v2.4s, v5.4s\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x7, x13\n"
+ "add x20, x15, x17\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v5.4s\n"
"tbz %x[n_channels], #1, 9f\n"
@@ -596,7 +596,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"10:" // Tile loop: Oddments: Load inputs: (1, 4): Bit 1: End
"fmla v31.4s, v3.4s, v6.4s\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "add x20, x4, x11\n"
+ "add x20, x4, x16\n"
"tbz %x[n_channels], #1, 11f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 12f\n"
@@ -605,13 +605,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"11:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 1: Unset
"ldr s9, [x20, #0x0]\n"
"12:" // Tile loop: Oddments: Load inputs: (0, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v29.4s, v4.4s, v9.4s\n"
"fmla v30.4s, v4.4s, v6.4s\n"
- "add x20, x17, x2\n"
+ "add x20, x14, x2\n"
"fmla v31.4s, v4.4s, v10.4s\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v0.4s, v7.4s\n"
- "add x8, x8, #0x10\n"
"fmla v29.4s, v0.4s, v8.4s\n"
"fmla v30.4s, v0.4s, v14.4s\n"
"tbz %x[n_channels], #1, 13f\n"
@@ -622,13 +622,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"13:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: Unset
"ldr s11, [x20, #0x0]\n"
"14:" // Tile loop: Oddments: Load inputs: (2, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.4s, v0.4s, v11.4s\n"
+ "add x20, x14, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v1.4s, v8.4s\n"
- "add x20, x17, x6\n"
"fmla v29.4s, v1.4s, v13.4s\n"
"fmla v30.4s, v1.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 15f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 16f\n"
@@ -637,13 +637,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"15:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 1: Unset
"ldr s12, [x20, #0x0]\n"
"16:" // Tile loop: Oddments: Load inputs: (2, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.4s, v1.4s, v12.4s\n"
+ "add x20, x14, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v2.4s, v13.4s\n"
- "add x20, x17, x15\n"
"fmla v29.4s, v2.4s, v5.4s\n"
"fmla v30.4s, v2.4s, v12.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 17f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 18f\n"
@@ -652,13 +652,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"17:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: Unset
"ldr s9, [x20, #0x0]\n"
"18:" // Tile loop: Oddments: Load inputs: (2, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.4s, v2.4s, v9.4s\n"
+ "add x20, x14, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v3.4s, v5.4s\n"
- "add x20, x17, x13\n"
"fmla v29.4s, v3.4s, v6.4s\n"
"fmla v30.4s, v3.4s, v9.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 19f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #0, 20f\n"
@@ -667,13 +667,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"19:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: Unset
"ldr s13, [x20, #0x0]\n"
"20:" // Tile loop: Oddments: Load inputs: (2, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.4s, v3.4s, v13.4s\n"
+ "add x20, x14, x16\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v4.4s, v6.4s\n"
- "add x20, x17, x11\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v13.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 21f\n"
"ldr d8, [x20], #0x8\n"
"tbz %x[n_channels], #0, 22f\n"
@@ -682,12 +682,12 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"21:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 1: Unset
"ldr s8, [x20, #0x0]\n"
"22:" // Tile loop: Oddments: Load inputs: (2, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v31.4s, v4.4s, v8.4s\n"
+ "add x20, x13, XZR\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v0.4s, v14.4s\n"
- "add x20, x16, XZR\n"
"fmla v29.4s, v0.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 23f\n"
"ldr d5, [x20], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
@@ -697,7 +697,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"ldr s5, [x20, #0x0]\n"
"24:" // Tile loop: Oddments: Load inputs: (3, 0): Bit 1: End
"fmla v30.4s, v0.4s, v5.4s\n"
- "add x20, x16, x2\n"
+ "add x20, x13, x2\n"
"tbz %x[n_channels], #1, 25f\n"
"ldr d6, [x20], #0x8\n"
"tbz %x[n_channels], #0, 26f\n"
@@ -706,13 +706,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"25:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: Unset
"ldr s6, [x20, #0x0]\n"
"26:" // Tile loop: Oddments: Load inputs: (3, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.4s, v0.4s, v6.4s\n"
+ "add x20, x13, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v1.4s, v11.4s\n"
- "add x20, x16, x6\n"
"fmla v29.4s, v1.4s, v12.4s\n"
"fmla v30.4s, v1.4s, v6.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 27f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 28f\n"
@@ -721,13 +721,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"27:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: Unset
"ldr s10, [x20, #0x0]\n"
"28:" // Tile loop: Oddments: Load inputs: (3, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.4s, v1.4s, v10.4s\n"
+ "add x20, x13, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v2.4s, v12.4s\n"
- "add x20, x16, x15\n"
"fmla v29.4s, v2.4s, v9.4s\n"
"fmla v30.4s, v2.4s, v10.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 29f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 30f\n"
@@ -736,13 +736,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"29:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: Unset
"ldr s11, [x20, #0x0]\n"
"30:" // Tile loop: Oddments: Load inputs: (3, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
+ "add x20, x13, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v3.4s, v9.4s\n"
- "add x20, x16, x13\n"
"fmla v29.4s, v3.4s, v13.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 31f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 32f\n"
@@ -751,13 +751,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"31:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: Unset
"ldr s12, [x20, #0x0]\n"
"32:" // Tile loop: Oddments: Load inputs: (3, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
+ "add x20, x13, x16\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v4.4s, v13.4s\n"
- "add x20, x16, x11\n"
"fmla v29.4s, v4.4s, v8.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 33f\n"
"ldr d14, [x20], #0x8\n"
"tbz %x[n_channels], #0, 34f\n"
@@ -766,12 +766,12 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"33:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 1: Unset
"ldr s14, [x20, #0x0]\n"
"34:" // Tile loop: Oddments: Load inputs: (3, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v31.4s, v4.4s, v14.4s\n"
+ "add x20, x12, XZR\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v0.4s, v5.4s\n"
- "add x20, x14, XZR\n"
"fmla v29.4s, v0.4s, v6.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 35f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 36f\n"
@@ -781,7 +781,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"ldr s9, [x20, #0x0]\n"
"36:" // Tile loop: Oddments: Load inputs: (4, 0): Bit 1: End
"fmla v30.4s, v0.4s, v9.4s\n"
- "add x20, x14, x2\n"
+ "add x20, x12, x2\n"
"tbz %x[n_channels], #1, 37f\n"
"ldr d13, [x20], #0x8\n"
"tbz %x[n_channels], #0, 38f\n"
@@ -790,13 +790,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"37:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: Unset
"ldr s13, [x20, #0x0]\n"
"38:" // Tile loop: Oddments: Load inputs: (4, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.4s, v0.4s, v13.4s\n"
+ "add x20, x12, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v1.4s, v6.4s\n"
- "add x20, x14, x6\n"
"fmla v29.4s, v1.4s, v10.4s\n"
"fmla v30.4s, v1.4s, v13.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 39f\n"
"ldr d5, [x20], #0x8\n"
"tbz %x[n_channels], #0, 40f\n"
@@ -805,13 +805,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"39:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: Unset
"ldr s5, [x20, #0x0]\n"
"40:" // Tile loop: Oddments: Load inputs: (4, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.4s, v1.4s, v5.4s\n"
+ "add x20, x12, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v2.4s, v10.4s\n"
- "add x20, x14, x15\n"
"fmla v29.4s, v2.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v5.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 41f\n"
"ldr d6, [x20], #0x8\n"
"tbz %x[n_channels], #0, 42f\n"
@@ -820,13 +820,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"41:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: Unset
"ldr s6, [x20, #0x0]\n"
"42:" // Tile loop: Oddments: Load inputs: (4, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.4s, v2.4s, v6.4s\n"
+ "add x20, x12, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x14, x13\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v6.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 43f\n"
"ldr d8, [x20], #0x8\n"
"tbz %x[n_channels], #0, 44f\n"
@@ -835,13 +835,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"43:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: Unset
"ldr s8, [x20, #0x0]\n"
"44:" // Tile loop: Oddments: Load inputs: (4, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.4s, v3.4s, v8.4s\n"
+ "add x20, x12, x16\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "add x20, x14, x11\n"
"fmla v29.4s, v4.4s, v14.4s\n"
"fmla v30.4s, v4.4s, v8.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 45f\n"
"ldr d10, [x20], #0x8\n"
"tbz %x[n_channels], #0, 46f\n"
@@ -850,12 +850,12 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"45:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 1: Unset
"ldr s10, [x20, #0x0]\n"
"46:" // Tile loop: Oddments: Load inputs: (4, 5): Bit 1: End
- "ldr q0, [x8, #0x0]\n"
+ "ldr q0, [x6, #0x0]\n"
"fmla v31.4s, v4.4s, v10.4s\n"
+ "add x20, x11, XZR\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v0.4s, v9.4s\n"
- "add x20, x12, XZR\n"
"fmla v29.4s, v0.4s, v13.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 47f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 48f\n"
@@ -865,7 +865,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"ldr s11, [x20, #0x0]\n"
"48:" // Tile loop: Oddments: Load inputs: (5, 0): Bit 1: End
"fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x12, x2\n"
+ "add x20, x11, x2\n"
"tbz %x[n_channels], #1, 49f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 50f\n"
@@ -874,13 +874,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"49:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 1: Unset
"ldr s12, [x20, #0x0]\n"
"50:" // Tile loop: Oddments: Load inputs: (5, 1): Bit 1: End
- "ldr q1, [x8, #0x0]\n"
+ "ldr q1, [x6, #0x0]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
+ "add x20, x11, x7\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v1.4s, v13.4s\n"
- "add x20, x12, x6\n"
"fmla v29.4s, v1.4s, v5.4s\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 51f\n"
"ldr d9, [x20], #0x8\n"
"tbz %x[n_channels], #0, 52f\n"
@@ -889,13 +889,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"51:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 1: Unset
"ldr s9, [x20, #0x0]\n"
"52:" // Tile loop: Oddments: Load inputs: (5, 2): Bit 1: End
- "ldr q2, [x8, #0x0]\n"
+ "ldr q2, [x6, #0x0]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
+ "add x20, x11, x8\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v2.4s, v5.4s\n"
- "add x20, x12, x15\n"
"fmla v29.4s, v2.4s, v6.4s\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 53f\n"
"ldr d11, [x20], #0x8\n"
"tbz %x[n_channels], #0, 54f\n"
@@ -904,13 +904,13 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"53:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 1: Unset
"ldr s11, [x20, #0x0]\n"
"54:" // Tile loop: Oddments: Load inputs: (5, 3): Bit 1: End
- "ldr q3, [x8, #0x0]\n"
+ "ldr q3, [x6, #0x0]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
+ "add x20, x11, x17\n"
+ "add x6, x6, #0x10\n"
"fmla v28.4s, v3.4s, v6.4s\n"
- "add x20, x12, x13\n"
"fmla v29.4s, v3.4s, v8.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x8, x8, #0x10\n"
"tbz %x[n_channels], #1, 55f\n"
"ldr d12, [x20], #0x8\n"
"tbz %x[n_channels], #0, 56f\n"
@@ -919,10 +919,10 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"55:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 1: Unset
"ldr s12, [x20, #0x0]\n"
"56:" // Tile loop: Oddments: Load inputs: (5, 4): Bit 1: End
- "ldr q4, [x8, #0x0]\n"
+ "ldr q4, [x6, #0x0]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
+ "add x20, x11, x16\n"
"fmla v28.4s, v4.4s, v8.4s\n"
- "add x20, x12, x11\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
"tbz %x[n_channels], #1, 57f\n"
@@ -937,18 +937,18 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"fmax v28.4s, v28.4s, v27.4s\n"
"fmax v29.4s, v29.4s, v27.4s\n"
"fmax v30.4s, v30.4s, v27.4s\n"
- "fmax v31.4s, v31.4s, v27.4s\n"
"fmin v28.4s, v28.4s, v15.4s\n"
+ "fmax v31.4s, v31.4s, v27.4s\n"
"fmin v29.4s, v29.4s, v15.4s\n"
"fmin v30.4s, v30.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v15.4s\n"
"tbz %x[n_channels], #1, 59f\n"
"mov x21, x5\n"
"mov x20, x10\n"
- "st1 { v28.d }[0], [x21], x3\n"
- "st1 { v30.d }[0], [x20], x3\n"
"add x5, x5, #0x8\n"
"add x10, x10, #0x8\n"
+ "st1 { v28.d }[0], [x21], x3\n"
+ "st1 { v30.d }[0], [x20], x3\n"
"st1 { v29.d }[0], [x21]\n"
"st1 { v31.d }[0], [x20]\n"
"tbz %x[n_channels], #0, 60f\n"
@@ -968,16 +968,16 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"st1 { v31.s }[0], [x20]\n"
"60:" // Tile loop: Oddments: Store: Bit 1: End
"61:" // Tile loop: End
- "ldr x26, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x26, x26, #0x1\n"
- "add x21, x27, #0x1\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x26, x20\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x27, x27, x21, LT\n"
- "csel x26, x26, XZR, LT\n"
- "cmp x27, x20\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x10, x10, #0x1\n"
+ "add x20, x11, #0x1\n"
+ "cmp x10, x22\n"
+ "csel x11, x11, x20, LT\n"
+ "csel x10, x10, XZR, LT\n"
+ "cmp x11, x21\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 32939eb6dc..d50b396261 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -98,478 +98,478 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x21, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "mov x17, #0x10\n" // cntb _, ALL, #1
- "lsr x9, %x[n_channels], #0x2\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "mov x8, #0x10\n" // cntb _, ALL, #1
+ "lsr x17, %x[n_channels], #0x2\n"
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x20, %x[params_struct], %[offsetof_args_min]\n"
- "ld1r { v27.4s }, [x20]\n"
+ "add x21, %x[params_struct], %[offsetof_args_min]\n"
"add x20, %x[params_struct], %[offsetof_args_max]\n"
+ "ld1r { v27.4s }, [x21]\n"
"ld1r { v15.4s }, [x20]\n"
"add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "ldp x12, x11, [x21, #0x10]\n"
- "mov x10, #0x0\n"
- "sub x28, XZR, x17\n"
- "cbz x9, 3f\n"
+ "mov x14, #0x0\n"
+ "ldp x13, x12, [x22, #0x0]\n"
+ "ldp x11, x10, [x22, #0x10]\n"
+ "sub x9, XZR, x8\n"
+ "cbz x17, 3f\n"
"ldr q26, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "cmp x17, x9, LSL #4\n"
+ "cmp x8, x17, LSL #4\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
"add x16, x16, #0x60\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "ldr q5, [x21, x10]\n"
- "ldr q6, [x20, x10]\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q7, [x21, x10]\n"
- "ldr q8, [x20, x10]\n"
- "ldp x21, x20, [x15, #0x20]\n"
- "ldr q9, [x21, x10]\n"
- "ldr q13, [x20, x10]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ "ldp x23, x22, [x15, #0x20]\n"
"ldp x21, x20, [x15, #0x30]\n"
- "ldr q11, [x21, x10]\n"
- "ldr q12, [x20, x10]\n"
+ "ldr q5, [x27, x14]\n"
+ "ldr q6, [x26, x14]\n"
+ "ldr q7, [x25, x14]\n"
+ "ldr q8, [x24, x14]\n"
+ "ldr q9, [x23, x14]\n"
+ "ldr q13, [x22, x14]\n"
+ "ldr q11, [x21, x14]\n"
+ "ldr q12, [x20, x14]\n"
"ldp x21, x20, [x15, #0x40]\n"
- "ldr q10, [x21, x10]\n"
- "ldr q14, [x20, x10]\n"
+ "ldr q10, [x21, x14]\n"
+ "ldr q14, [x20, x14]\n"
"bge 2f\n"
"1:" // Channel loop
"mov v30.16b, v26.16b\n fmla v30.4s, v0.4s, v5.4s\n"
"mov v31.16b, v26.16b\n fmla v31.4s, v0.4s, v6.4s\n"
- "ldr x20, [x15, #0x50]\n"
- "ldr q24, [x20, x10]\n"
- "mov v28.16b, v26.16b\n fmla v28.4s, v0.4s, v7.4s\n"
- "mov v29.16b, v26.16b\n fmla v29.4s, v0.4s, v8.4s\n"
+ "ldr x21, [x15, #0x50]\n"
+ "ldr x20, [x15, #0x58]\n"
+ "mov v29.16b, v26.16b\n fmla v29.4s, v0.4s, v7.4s\n"
+ "mov v28.16b, v26.16b\n fmla v28.4s, v0.4s, v8.4s\n"
"ldr q23, [x16, #0x0]\n"
"ldr q26, [x16, #0x140]\n"
+ "ldr x22, [x15, #0x60]\n"
+ "ldr x25, [x15, #0x68]\n"
+ "add x9, x9, #0x10\n"
+ "ldr q22, [x21, x14]\n"
+ "ldr x24, [x15, #0x70]\n"
"fmla v30.4s, v1.4s, v6.4s\n"
+ "ldr q21, [x20, x14]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
- "ldr x20, [x15, #0x58]\n"
- "ldr q22, [x20, x10]\n"
- "fmla v28.4s, v1.4s, v8.4s\n"
- "fmla v29.4s, v1.4s, v13.4s\n"
- "ldr q21, [x16, #0x10]\n"
- "ldr x20, [x15, #0x60]\n"
+ "ldr x21, [x15, #0x78]\n"
+ "fmla v29.4s, v1.4s, v8.4s\n"
+ "fmla v28.4s, v1.4s, v13.4s\n"
+ "ldr q0, [x16, #0x10]\n"
+ "ldr x27, [x15, #0x80]\n"
+ "ldr x20, [x15, #0x88]\n"
+ "ldr x23, [x15, #0x90]\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "ldr q17, [x20, x10]\n"
+ "ldr q18, [x22, x14]\n"
+ "ldr x26, [x15, #0x98]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
- "ldr x20, [x15, #0x68]\n"
- "fmla v28.4s, v2.4s, v13.4s\n"
- "fmla v29.4s, v2.4s, v24.4s\n"
+ "fmla v29.4s, v2.4s, v13.4s\n"
+ "ldr x22, [x15, #0xa0]\n"
+ "fmla v28.4s, v2.4s, v22.4s\n"
"ldr q16, [x16, #0x20]\n"
- "ldr x22, [x15, #0x70]\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "ldr q5, [x20, x10]\n"
+ "ldr q20, [x25, x14]\n"
+ "ldr x25, [x15, #0xa8]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla v28.4s, v3.4s, v24.4s\n"
"fmla v29.4s, v3.4s, v22.4s\n"
- "ldr q20, [x16, #0x30]\n"
- "ldr x21, [x15, #0x80]\n"
+ "fmla v28.4s, v3.4s, v21.4s\n"
+ "ldr q17, [x16, #0x30]\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "ldr q19, [x22, x10]\n"
- "fmla v31.4s, v4.4s, v17.4s\n"
- "ldr q2, [x20, x10]\n"
- "fmla v28.4s, v4.4s, v22.4s\n"
- "fmla v29.4s, v4.4s, v10.4s\n"
- "ldr q18, [x16, #0x40]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr q3, [x24, x14]\n"
+ "ldr x24, [x15, #0xb0]\n"
+ "fmla v31.4s, v4.4s, v18.4s\n"
+ "ldr q2, [x21, x14]\n"
+ "ldr x21, [x15, #0xb8]\n"
+ "fmla v29.4s, v4.4s, v21.4s\n"
+ "fmla v28.4s, v4.4s, v10.4s\n"
+ "ldr q19, [x16, #0x40]\n"
"fmla v30.4s, v23.4s, v7.4s\n"
"fmla v31.4s, v23.4s, v8.4s\n"
- "ldr x23, [x15, #0x90]\n"
- "ldr x26, [x15, #0x98]\n"
- "fmla v28.4s, v23.4s, v14.4s\n"
- "fmla v29.4s, v23.4s, v5.4s\n"
- "ldr q1, [x16, #0x50]\n"
- "ldr x22, [x15, #0xa0]\n"
- "fmla v30.4s, v21.4s, v8.4s\n"
- "ldr q25, [x20, x10]\n"
- "fmla v31.4s, v21.4s, v13.4s\n"
- "ldr x25, [x15, #0xa8]\n"
- "fmla v28.4s, v21.4s, v5.4s\n"
- "fmla v29.4s, v21.4s, v19.4s\n"
- "ldr q17, [x16, #0x60]\n"
- "ldr x24, [x15, #0xb0]\n"
+ "fmla v29.4s, v23.4s, v14.4s\n"
+ "fmla v28.4s, v23.4s, v20.4s\n"
+ "ldr q18, [x16, #0x50]\n"
+ "fmla v30.4s, v0.4s, v8.4s\n"
+ "ldr q25, [x20, x14]\n"
+ "ldr x28, [x15, #0xc8]\n"
+ "fmla v31.4s, v0.4s, v13.4s\n"
+ "fmla v29.4s, v0.4s, v20.4s\n"
+ "fmla v28.4s, v0.4s, v3.4s\n"
+ "ldr q11, [x16, #0x60]\n"
"fmla v30.4s, v16.4s, v13.4s\n"
- "ldr q8, [x21, x10]\n"
- "fmla v31.4s, v16.4s, v24.4s\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla v28.4s, v16.4s, v19.4s\n"
- "fmla v29.4s, v16.4s, v2.4s\n"
+ "ldr q24, [x27, x14]\n"
+ "ldr x20, [x15, #0xc0]\n"
+ "fmla v31.4s, v16.4s, v22.4s\n"
+ "fmla v29.4s, v16.4s, v3.4s\n"
+ "fmla v28.4s, v16.4s, v2.4s\n"
"ldr q16, [x16, #0x70]\n"
- "ldr x21, [x15, #0xc0]\n"
- "fmla v30.4s, v20.4s, v24.4s\n"
- "ldr q24, [x23, x10]\n"
- "fmla v31.4s, v20.4s, v22.4s\n"
- "ldr x27, [x15, #0xc8]\n"
- "fmla v28.4s, v20.4s, v2.4s\n"
- "fmla v29.4s, v20.4s, v8.4s\n"
- "ldr q23, [x16, #0x80]\n"
+ "fmla v30.4s, v17.4s, v22.4s\n"
+ "ldr q5, [x23, x14]\n"
"ldr x23, [x15, #0xd0]\n"
- "fmla v30.4s, v18.4s, v22.4s\n"
- "ldr q22, [x26, x10]\n"
- "fmla v31.4s, v18.4s, v10.4s\n"
- "ldr q21, [x22, x10]\n"
- "fmla v28.4s, v18.4s, v8.4s\n"
- "fmla v29.4s, v18.4s, v25.4s\n"
- "ldr q20, [x16, #0x90]\n"
- "ldr x22, [x15, #0xd8]\n"
- "fmla v30.4s, v1.4s, v14.4s\n"
- "ldr q0, [x20, x10]\n"
- "fmla v31.4s, v1.4s, v5.4s\n"
- "ldr x20, [x15, #0xe0]\n"
- "fmla v28.4s, v1.4s, v24.4s\n"
- "fmla v29.4s, v1.4s, v22.4s\n"
- "ldr q6, [x16, #0xa0]\n"
+ "fmla v31.4s, v17.4s, v21.4s\n"
+ "fmla v29.4s, v17.4s, v2.4s\n"
+ "fmla v28.4s, v17.4s, v24.4s\n"
+ "ldr q17, [x16, #0x80]\n"
+ "fmla v30.4s, v19.4s, v21.4s\n"
+ "ldr q23, [x26, x14]\n"
+ "ldr x27, [x15, #0xd8]\n"
+ "fmla v31.4s, v19.4s, v10.4s\n"
+ "ldr q22, [x22, x14]\n"
+ "ldr x22, [x15, #0xe0]\n"
+ "fmla v29.4s, v19.4s, v24.4s\n"
+ "fmla v28.4s, v19.4s, v25.4s\n"
+ "ldr q21, [x16, #0x90]\n"
+ "fmla v30.4s, v18.4s, v14.4s\n"
+ "ldr q1, [x21, x14]\n"
"ldr x26, [x15, #0xf8]\n"
- "fmla v30.4s, v17.4s, v5.4s\n"
- "ldr q1, [x25, x10]\n"
- "fmla v31.4s, v17.4s, v19.4s\n"
+ "fmla v31.4s, v18.4s, v20.4s\n"
+ "fmla v29.4s, v18.4s, v5.4s\n"
+ "fmla v28.4s, v18.4s, v23.4s\n"
+ "ldr q12, [x16, #0xa0]\n"
+ "fmla v30.4s, v11.4s, v20.4s\n"
+ "ldr q0, [x25, x14]\n"
"ldr x25, [x15, #0xe8]\n"
- "fmla v28.4s, v17.4s, v22.4s\n"
- "fmla v29.4s, v17.4s, v21.4s\n"
- "ldr q18, [x16, #0xb0]\n"
- "add x28, x28, #0x10\n"
- "fmla v30.4s, v16.4s, v19.4s\n"
- "ldr q19, [x24, x10]\n"
- "fmla v31.4s, v16.4s, v2.4s\n"
+ "fmla v31.4s, v11.4s, v3.4s\n"
+ "fmla v29.4s, v11.4s, v23.4s\n"
+ "fmla v28.4s, v11.4s, v22.4s\n"
+ "ldr q20, [x16, #0xb0]\n"
+ "fmla v30.4s, v16.4s, v3.4s\n"
+ "ldr q19, [x24, x14]\n"
"ldr x24, [x15, #0xf0]\n"
- "fmla v28.4s, v16.4s, v21.4s\n"
- "fmla v29.4s, v16.4s, v1.4s\n"
- "ldr q17, [x16, #0xc0]\n"
- "fmla v30.4s, v23.4s, v2.4s\n"
- "ldr q16, [x21, x10]\n"
- "fmla v31.4s, v23.4s, v8.4s\n"
+ "fmla v31.4s, v16.4s, v2.4s\n"
+ "fmla v29.4s, v16.4s, v22.4s\n"
+ "fmla v28.4s, v16.4s, v0.4s\n"
+ "ldr q18, [x16, #0xc0]\n"
+ "fmla v30.4s, v17.4s, v2.4s\n"
+ "ldr q16, [x20, x14]\n"
"ldr x21, [x15, #0x100]\n"
- "fmla v28.4s, v23.4s, v1.4s\n"
- "fmla v29.4s, v23.4s, v19.4s\n"
- "ldr q13, [x16, #0xd0]\n"
- "fmla v30.4s, v20.4s, v8.4s\n"
- "ldr q2, [x27, x10]\n"
- "fmla v31.4s, v20.4s, v25.4s\n"
- "ldr q10, [x20, x10]\n"
- "fmla v28.4s, v20.4s, v19.4s\n"
- "fmla v29.4s, v20.4s, v0.4s\n"
- "ldr q9, [x16, #0xe0]\n"
+ "fmla v31.4s, v17.4s, v24.4s\n"
+ "fmla v29.4s, v17.4s, v0.4s\n"
+ "fmla v28.4s, v17.4s, v19.4s\n"
+ "ldr q17, [x16, #0xd0]\n"
+ "fmla v30.4s, v21.4s, v24.4s\n"
+ "ldr q14, [x28, x14]\n"
"ldr x20, [x15, #0x108]\n"
- "fmla v30.4s, v6.4s, v24.4s\n"
- "ldr q5, [x23, x10]\n"
- "fmla v31.4s, v6.4s, v22.4s\n"
+ "fmla v31.4s, v21.4s, v25.4s\n"
+ "ldr q4, [x22, x14]\n"
+ "fmla v29.4s, v21.4s, v19.4s\n"
+ "fmla v28.4s, v21.4s, v1.4s\n"
+ "ldr q7, [x16, #0xe0]\n"
+ "fmla v30.4s, v12.4s, v5.4s\n"
+ "ldr q25, [x23, x14]\n"
"ldr x23, [x15, #0x110]\n"
- "fmla v28.4s, v6.4s, v16.4s\n"
- "fmla v29.4s, v6.4s, v2.4s\n"
- "ldr q24, [x16, #0xf0]\n"
- "fmla v30.4s, v18.4s, v22.4s\n"
- "ldr q25, [x22, x10]\n"
- "fmla v31.4s, v18.4s, v21.4s\n"
+ "fmla v31.4s, v12.4s, v23.4s\n"
+ "fmla v29.4s, v12.4s, v16.4s\n"
+ "fmla v28.4s, v12.4s, v14.4s\n"
+ "ldr q11, [x16, #0xf0]\n"
+ "fmla v30.4s, v20.4s, v23.4s\n"
+ "ldr q24, [x27, x14]\n"
"ldr x22, [x15, #0x118]\n"
- "fmla v28.4s, v18.4s, v2.4s\n"
- "fmla v29.4s, v18.4s, v5.4s\n"
+ "fmla v31.4s, v20.4s, v22.4s\n"
+ "fmla v29.4s, v20.4s, v14.4s\n"
+ "fmla v28.4s, v20.4s, v25.4s\n"
"ldr q23, [x16, #0x100]\n"
- "fmla v30.4s, v17.4s, v21.4s\n"
- "ldr q22, [x25, x10]\n"
- "fmla v31.4s, v17.4s, v1.4s\n"
- "fmla v28.4s, v17.4s, v5.4s\n"
- "fmla v29.4s, v17.4s, v25.4s\n"
+ "fmla v30.4s, v18.4s, v22.4s\n"
+ "ldr q22, [x25, x14]\n"
+ "fmla v31.4s, v18.4s, v0.4s\n"
+ "fmla v29.4s, v18.4s, v25.4s\n"
+ "fmla v28.4s, v18.4s, v24.4s\n"
"ldr q21, [x16, #0x110]\n"
- "fmla v30.4s, v13.4s, v1.4s\n"
- "ldr q18, [x24, x10]\n"
- "fmla v31.4s, v13.4s, v19.4s\n"
- "fmla v28.4s, v13.4s, v25.4s\n"
- "fmla v29.4s, v13.4s, v10.4s\n"
+ "fmla v30.4s, v17.4s, v0.4s\n"
+ "ldr q18, [x24, x14]\n"
+ "fmla v31.4s, v17.4s, v19.4s\n"
+ "fmla v29.4s, v17.4s, v24.4s\n"
+ "fmla v28.4s, v17.4s, v4.4s\n"
"ldr q20, [x16, #0x120]\n"
- "fmla v30.4s, v9.4s, v19.4s\n"
- "ldr q17, [x26, x10]\n"
- "fmla v31.4s, v9.4s, v0.4s\n"
- "fmla v28.4s, v9.4s, v10.4s\n"
- "fmla v29.4s, v9.4s, v22.4s\n"
+ "fmla v30.4s, v7.4s, v19.4s\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v31.4s, v7.4s, v1.4s\n"
+ "fmla v29.4s, v7.4s, v4.4s\n"
+ "fmla v28.4s, v7.4s, v22.4s\n"
"ldr q19, [x16, #0x130]\n"
- "fmla v30.4s, v24.4s, v16.4s\n"
- "ldr q16, [x21, x10]\n"
- "fmla v31.4s, v24.4s, v2.4s\n"
- "fmla v28.4s, v24.4s, v18.4s\n"
- "ldr q18, [x20, x10]\n"
- "fmla v29.4s, v24.4s, v17.4s\n"
+ "fmla v30.4s, v11.4s, v16.4s\n"
+ "ldr q16, [x21, x14]\n"
+ "fmla v31.4s, v11.4s, v14.4s\n"
+ "fmla v29.4s, v11.4s, v18.4s\n"
+ "ldr q18, [x20, x14]\n"
+ "ldp x20, x21, [x15, #0x0]\n"
+ "fmla v28.4s, v11.4s, v17.4s\n"
"ldr q0, [x16, #0x150]\n"
- "fmla v30.4s, v23.4s, v2.4s\n"
- "fmla v31.4s, v23.4s, v5.4s\n"
- "ldp x21, x20, [x15, #0x0]\n"
- "fmla v28.4s, v23.4s, v17.4s\n"
- "ldr q17, [x23, x10]\n"
- "fmla v29.4s, v23.4s, v16.4s\n"
+ "fmla v30.4s, v23.4s, v14.4s\n"
+ "fmla v31.4s, v23.4s, v25.4s\n"
+ "fmla v29.4s, v23.4s, v17.4s\n"
+ "ldr q17, [x23, x14]\n"
+ "fmla v28.4s, v23.4s, v16.4s\n"
"ldr q1, [x16, #0x160]\n"
- "fmla v30.4s, v21.4s, v5.4s\n"
- "ldr q5, [x21, x17]\n"
- "fmla v31.4s, v21.4s, v25.4s\n"
- "fmla v28.4s, v21.4s, v16.4s\n"
- "ldr q16, [x22, x10]\n"
- "fmla v29.4s, v21.4s, v18.4s\n"
+ "fmla v30.4s, v21.4s, v25.4s\n"
+ "ldr q5, [x20, x8]\n"
+ "fmla v31.4s, v21.4s, v24.4s\n"
+ "fmla v29.4s, v21.4s, v16.4s\n"
+ "ldr q16, [x22, x14]\n"
+ "ldp x20, x26, [x15, #0x10]\n"
+ "ldp x25, x24, [x15, #0x20]\n"
+ "ldp x23, x22, [x15, #0x30]\n"
+ "add x14, x14, #0x10\n"
+ "ldr q7, [x20, x8]\n"
+ "fmla v28.4s, v21.4s, v18.4s\n"
"ldr q2, [x16, #0x170]\n"
- "fmla v30.4s, v20.4s, v25.4s\n"
- "ldr q6, [x20, x17]\n"
- "fmla v31.4s, v20.4s, v10.4s\n"
- "ldp x21, x20, [x15, #0x10]\n"
- "ldr q7, [x21, x17]\n"
- "fmla v28.4s, v20.4s, v18.4s\n"
- "fmla v29.4s, v20.4s, v17.4s\n"
+ "fmla v30.4s, v20.4s, v24.4s\n"
+ "ldr q6, [x21, x8]\n"
+ "ldp x21, x20, [x15, #0x40]\n"
+ "ldr q13, [x24, x8]\n"
+ "fmla v31.4s, v20.4s, v4.4s\n"
+ "fmla v29.4s, v20.4s, v18.4s\n"
+ "ldr q11, [x23, x8]\n"
+ "ldr q14, [x20, x8]\n"
+ "fmla v28.4s, v20.4s, v17.4s\n"
"ldr q3, [x16, #0x180]\n"
- "fmla v30.4s, v19.4s, v10.4s\n"
- "ldr q8, [x20, x17]\n"
+ "fmla v30.4s, v19.4s, v4.4s\n"
+ "ldr q8, [x26, x8]\n"
"fmla v31.4s, v19.4s, v22.4s\n"
- "ldp x21, x20, [x15, #0x20]\n"
- "ldr q13, [x20, x17]\n"
- "fmla v28.4s, v19.4s, v17.4s\n"
- "fmla v29.4s, v19.4s, v16.4s\n"
- "ldr q9, [x21, x17]\n"
+ "ldr q10, [x21, x8]\n"
+ "fmla v29.4s, v19.4s, v17.4s\n"
+ "ldr q12, [x22, x8]\n"
+ "fmla v28.4s, v19.4s, v16.4s\n"
+ "ldr q9, [x25, x8]\n"
+ "add x8, x8, #0x10\n"
"ldr q4, [x16, #0x190]\n"
- "ldp x21, x20, [x15, #0x30]\n"
+ "cmp x8, x17, LSL #4\n"
+ "add x16, x16, #0x1a0\n"
"fmax v30.4s, v30.4s, v27.4s\n"
"fmax v31.4s, v31.4s, v27.4s\n"
- "ldr q11, [x21, x17]\n"
- "ldr q12, [x20, x17]\n"
- "fmax v28.4s, v28.4s, v27.4s\n"
"fmax v29.4s, v29.4s, v27.4s\n"
- "ldp x21, x20, [x15, #0x40]\n"
- "ldr q10, [x21, x17]\n"
+ "fmax v28.4s, v28.4s, v27.4s\n"
"fmin v30.4s, v30.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v15.4s\n"
- "ldr q14, [x20, x17]\n"
- "add x17, x17, #0x10\n"
- "cmp x17, x9, LSL #4\n"
- "fmin v28.4s, v28.4s, v15.4s\n"
"fmin v29.4s, v29.4s, v15.4s\n"
- "add x10, x10, #0x10\n"
- "str q30, [x14, x28]\n"
- "add x16, x16, #0x1a0\n"
- "str q31, [x13, x28]\n"
- "str q28, [x12, x28]\n"
- "str q29, [x11, x28]\n"
+ "fmin v28.4s, v28.4s, v15.4s\n"
+ "str q30, [x13, x9]\n"
+ "str q31, [x12, x9]\n"
+ "str q29, [x11, x9]\n"
+ "str q28, [x10, x9]\n"
"blt 1b\n"
"2:" // Channel tail
"mov v31.16b, v26.16b\n fmla v31.4s, v0.4s, v5.4s\n"
"mov v5.16b, v26.16b\n fmla v5.4s, v0.4s, v6.4s\n"
- "ldr x20, [x15, #0x50]\n"
- "ldr q22, [x20, x10]\n"
+ "ldr x22, [x15, #0x50]\n"
+ "ldr x21, [x15, #0x58]\n"
"mov v30.16b, v26.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v29.16b, v26.16b\n fmla v29.4s, v0.4s, v8.4s\n"
"ldr q19, [x16, #0x0]\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x20, [x15, #0x60]\n"
+ "ldr x25, [x15, #0x68]\n"
+ "ldr x24, [x15, #0x70]\n"
+ "add x9, x9, #0x10\n"
+ "ldr q22, [x22, x14]\n"
+ "ldr x23, [x15, #0x78]\n"
"fmla v31.4s, v1.4s, v6.4s\n"
- "ldr q21, [x20, x10]\n"
+ "ldr q21, [x21, x14]\n"
"fmla v5.4s, v1.4s, v9.4s\n"
- "ldr x21, [x15, #0x60]\n"
+ "ldr x27, [x15, #0x80]\n"
"fmla v30.4s, v1.4s, v8.4s\n"
"fmla v29.4s, v1.4s, v13.4s\n"
"ldr q18, [x16, #0x10]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr x22, [x15, #0x88]\n"
+ "ldr x21, [x15, #0x90]\n"
+ "ldr x26, [x15, #0x98]\n"
"fmla v31.4s, v2.4s, v9.4s\n"
- "ldr q16, [x21, x10]\n"
+ "ldr q16, [x20, x14]\n"
+ "ldr x20, [x15, #0xa0]\n"
"fmla v5.4s, v2.4s, v11.4s\n"
- "ldr x23, [x15, #0x70]\n"
"fmla v30.4s, v2.4s, v13.4s\n"
"fmla v29.4s, v2.4s, v22.4s\n"
"ldr q17, [x16, #0x20]\n"
- "ldr x21, [x15, #0x78]\n"
"fmla v31.4s, v3.4s, v11.4s\n"
- "ldr q6, [x20, x10]\n"
+ "ldr q6, [x25, x14]\n"
+ "ldr x25, [x15, #0xa8]\n"
"fmla v5.4s, v3.4s, v12.4s\n"
- "ldr x22, [x15, #0x80]\n"
"fmla v30.4s, v3.4s, v22.4s\n"
"fmla v29.4s, v3.4s, v21.4s\n"
"ldr q20, [x16, #0x30]\n"
- "ldr x20, [x15, #0x88]\n"
"fmla v31.4s, v4.4s, v12.4s\n"
- "ldr q2, [x23, x10]\n"
+ "ldr q2, [x24, x14]\n"
+ "ldr x24, [x15, #0xb0]\n"
"fmla v5.4s, v4.4s, v16.4s\n"
- "ldr q28, [x21, x10]\n"
+ "ldr q28, [x23, x14]\n"
+ "ldr x23, [x15, #0xb8]\n"
"fmla v30.4s, v4.4s, v21.4s\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"ldr q16, [x16, #0x40]\n"
- "ldr x21, [x15, #0x90]\n"
"fmla v31.4s, v19.4s, v7.4s\n"
"fmla v5.4s, v19.4s, v8.4s\n"
- "ldr x27, [x15, #0x98]\n"
- "ldr x26, [x15, #0xa0]\n"
"fmla v30.4s, v19.4s, v14.4s\n"
"fmla v29.4s, v19.4s, v6.4s\n"
"ldr q19, [x16, #0x50]\n"
- "ldr x25, [x15, #0xa8]\n"
"fmla v31.4s, v18.4s, v8.4s\n"
- "ldr q1, [x20, x10]\n"
+ "ldr q1, [x22, x14]\n"
+ "ldr x28, [x15, #0xc8]\n"
"fmla v5.4s, v18.4s, v13.4s\n"
- "ldr x24, [x15, #0xb0]\n"
"fmla v30.4s, v18.4s, v6.4s\n"
"fmla v29.4s, v18.4s, v2.4s\n"
"ldr q18, [x16, #0x60]\n"
- "ldr x20, [x15, #0xb8]\n"
"fmla v31.4s, v17.4s, v13.4s\n"
- "ldr q26, [x22, x10]\n"
+ "ldr q26, [x27, x14]\n"
+ "ldr x22, [x15, #0xc0]\n"
"fmla v5.4s, v17.4s, v22.4s\n"
- "ldr x23, [x15, #0xc0]\n"
"fmla v30.4s, v17.4s, v2.4s\n"
"fmla v29.4s, v17.4s, v28.4s\n"
"ldr q17, [x16, #0x70]\n"
- "ldr x22, [x15, #0xc8]\n"
"fmla v31.4s, v20.4s, v22.4s\n"
- "ldr q25, [x21, x10]\n"
- "fmla v5.4s, v20.4s, v21.4s\n"
+ "ldr q25, [x21, x14]\n"
"ldr x21, [x15, #0xd0]\n"
+ "fmla v5.4s, v20.4s, v21.4s\n"
"fmla v30.4s, v20.4s, v28.4s\n"
"fmla v29.4s, v20.4s, v26.4s\n"
"ldr q24, [x16, #0x80]\n"
- "add x28, x28, #0x10\n"
"fmla v31.4s, v16.4s, v21.4s\n"
- "ldr q23, [x27, x10]\n"
+ "ldr q23, [x26, x14]\n"
+ "ldr x27, [x15, #0xd8]\n"
"fmla v5.4s, v16.4s, v10.4s\n"
- "ldr q0, [x26, x10]\n"
+ "ldr q0, [x20, x14]\n"
+ "ldr x20, [x15, #0xe0]\n"
"fmla v30.4s, v16.4s, v26.4s\n"
"fmla v29.4s, v16.4s, v1.4s\n"
"ldr q22, [x16, #0x90]\n"
- "ldr x27, [x15, #0xd8]\n"
"fmla v31.4s, v19.4s, v14.4s\n"
- "ldr q16, [x20, x10]\n"
+ "ldr q16, [x23, x14]\n"
+ "ldr x26, [x15, #0xf8]\n"
"fmla v5.4s, v19.4s, v6.4s\n"
- "ldr x20, [x15, #0xe0]\n"
"fmla v30.4s, v19.4s, v25.4s\n"
"fmla v29.4s, v19.4s, v23.4s\n"
"ldr q21, [x16, #0xa0]\n"
- "ldr x26, [x15, #0xf8]\n"
"fmla v31.4s, v18.4s, v6.4s\n"
- "ldr q20, [x25, x10]\n"
- "fmla v5.4s, v18.4s, v2.4s\n"
+ "ldr q20, [x25, x14]\n"
"ldr x25, [x15, #0xe8]\n"
+ "fmla v5.4s, v18.4s, v2.4s\n"
"fmla v30.4s, v18.4s, v23.4s\n"
"fmla v29.4s, v18.4s, v0.4s\n"
"ldr q18, [x16, #0xb0]\n"
"fmla v31.4s, v17.4s, v2.4s\n"
- "ldr q19, [x24, x10]\n"
- "fmla v5.4s, v17.4s, v28.4s\n"
+ "ldr q19, [x24, x14]\n"
"ldr x24, [x15, #0xf0]\n"
+ "fmla v5.4s, v17.4s, v28.4s\n"
"fmla v30.4s, v17.4s, v0.4s\n"
"fmla v29.4s, v17.4s, v20.4s\n"
"ldr q17, [x16, #0xc0]\n"
"fmla v31.4s, v24.4s, v28.4s\n"
- "ldr q7, [x23, x10]\n"
- "fmla v5.4s, v24.4s, v26.4s\n"
+ "ldr q10, [x22, x14]\n"
"ldr x23, [x15, #0x100]\n"
+ "fmla v5.4s, v24.4s, v26.4s\n"
"fmla v30.4s, v24.4s, v20.4s\n"
"fmla v29.4s, v24.4s, v19.4s\n"
- "ldr q3, [x16, #0xd0]\n"
+ "ldr q13, [x16, #0xd0]\n"
"fmla v31.4s, v22.4s, v26.4s\n"
- "ldr q28, [x22, x10]\n"
+ "ldr q28, [x28, x14]\n"
+ "ldr x22, [x15, #0x108]\n"
"fmla v5.4s, v22.4s, v1.4s\n"
- "ldr q13, [x20, x10]\n"
+ "ldr q14, [x20, x14]\n"
"fmla v30.4s, v22.4s, v19.4s\n"
"fmla v29.4s, v22.4s, v16.4s\n"
- "ldr q11, [x16, #0xe0]\n"
- "ldr x22, [x15, #0x108]\n"
+ "ldr q12, [x16, #0xe0]\n"
"fmla v31.4s, v21.4s, v25.4s\n"
- "ldr q26, [x21, x10]\n"
- "fmla v5.4s, v21.4s, v23.4s\n"
+ "ldr q26, [x21, x14]\n"
"ldr x21, [x15, #0x110]\n"
- "fmla v30.4s, v21.4s, v7.4s\n"
+ "fmla v5.4s, v21.4s, v23.4s\n"
+ "fmla v30.4s, v21.4s, v10.4s\n"
"fmla v29.4s, v21.4s, v28.4s\n"
"ldr q25, [x16, #0xf0]\n"
"fmla v31.4s, v18.4s, v23.4s\n"
- "ldr q24, [x27, x10]\n"
- "fmla v5.4s, v18.4s, v0.4s\n"
+ "ldr q24, [x27, x14]\n"
"ldr x20, [x15, #0x118]\n"
+ "fmla v5.4s, v18.4s, v0.4s\n"
"fmla v30.4s, v18.4s, v28.4s\n"
"fmla v29.4s, v18.4s, v26.4s\n"
"ldr q23, [x16, #0x100]\n"
"fmla v31.4s, v17.4s, v0.4s\n"
- "ldr q22, [x25, x10]\n"
+ "ldr q22, [x25, x14]\n"
"fmla v5.4s, v17.4s, v20.4s\n"
"fmla v30.4s, v17.4s, v26.4s\n"
"fmla v29.4s, v17.4s, v24.4s\n"
"ldr q21, [x16, #0x110]\n"
- "fmla v31.4s, v3.4s, v20.4s\n"
- "ldr q18, [x24, x10]\n"
- "fmla v5.4s, v3.4s, v19.4s\n"
- "fmla v30.4s, v3.4s, v24.4s\n"
- "fmla v29.4s, v3.4s, v13.4s\n"
+ "fmla v31.4s, v13.4s, v20.4s\n"
+ "ldr q18, [x24, x14]\n"
+ "fmla v5.4s, v13.4s, v19.4s\n"
+ "fmla v30.4s, v13.4s, v24.4s\n"
+ "fmla v29.4s, v13.4s, v14.4s\n"
"ldr q20, [x16, #0x120]\n"
- "fmla v31.4s, v11.4s, v19.4s\n"
- "ldr q17, [x26, x10]\n"
- "fmla v5.4s, v11.4s, v16.4s\n"
- "fmla v30.4s, v11.4s, v13.4s\n"
- "fmla v29.4s, v11.4s, v22.4s\n"
+ "fmla v31.4s, v12.4s, v19.4s\n"
+ "ldr q17, [x26, x14]\n"
+ "fmla v5.4s, v12.4s, v16.4s\n"
+ "fmla v30.4s, v12.4s, v14.4s\n"
+ "fmla v29.4s, v12.4s, v22.4s\n"
"ldr q19, [x16, #0x130]\n"
"add x16, x16, #0x140\n"
- "fmla v31.4s, v25.4s, v7.4s\n"
- "ldr q16, [x23, x10]\n"
+ "fmla v31.4s, v25.4s, v10.4s\n"
+ "ldr q16, [x23, x14]\n"
"fmla v5.4s, v25.4s, v28.4s\n"
"fmla v30.4s, v25.4s, v18.4s\n"
- "ldr q18, [x22, x10]\n"
+ "ldr q18, [x22, x14]\n"
"fmla v29.4s, v25.4s, v17.4s\n"
"fmla v31.4s, v23.4s, v28.4s\n"
"fmla v5.4s, v23.4s, v26.4s\n"
"fmla v30.4s, v23.4s, v17.4s\n"
- "ldr q17, [x21, x10]\n"
+ "ldr q17, [x21, x14]\n"
"fmla v29.4s, v23.4s, v16.4s\n"
"fmla v31.4s, v21.4s, v26.4s\n"
"fmla v5.4s, v21.4s, v24.4s\n"
"fmla v30.4s, v21.4s, v16.4s\n"
- "ldr q16, [x20, x10]\n"
+ "ldr q16, [x20, x14]\n"
+ "add x14, x14, #0x10\n"
"fmla v29.4s, v21.4s, v18.4s\n"
- "add x10, x10, #0x10\n"
"fmla v31.4s, v20.4s, v24.4s\n"
- "fmla v5.4s, v20.4s, v13.4s\n"
+ "fmla v5.4s, v20.4s, v14.4s\n"
"fmla v30.4s, v20.4s, v18.4s\n"
"fmla v29.4s, v20.4s, v17.4s\n"
- "fmla v31.4s, v19.4s, v13.4s\n"
+ "fmla v31.4s, v19.4s, v14.4s\n"
"fmla v5.4s, v19.4s, v22.4s\n"
- "fmax v31.4s, v31.4s, v27.4s\n"
"fmla v30.4s, v19.4s, v17.4s\n"
"fmla v29.4s, v19.4s, v16.4s\n"
+ "fmax v31.4s, v31.4s, v27.4s\n"
"fmax v5.4s, v5.4s, v27.4s\n"
"fmax v30.4s, v30.4s, v27.4s\n"
- "fmax v29.4s, v29.4s, v27.4s\n"
"fmin v31.4s, v31.4s, v15.4s\n"
+ "fmax v29.4s, v29.4s, v27.4s\n"
"fmin v5.4s, v5.4s, v15.4s\n"
- "str q31, [x14, x28]\n"
"fmin v30.4s, v30.4s, v15.4s\n"
"fmin v29.4s, v29.4s, v15.4s\n"
- "str q5, [x13, x28]\n"
- "str q30, [x12, x28]\n"
- "str q29, [x11, x28]\n"
+ "str q31, [x13, x9]\n"
+ "str q5, [x12, x9]\n"
+ "str q30, [x11, x9]\n"
+ "str q29, [x10, x9]\n"
"3:" // Oddments
"tst %x[n_channels], #0x3\n"
"beq 60f\n"
"ldr q26, [x16, #0x0]\n"
"ldr q0, [x16, #0x10]\n"
- "mov x20, x10\n"
- "add x14, x14, x20\n"
+ "mov x20, x14\n"
"ldr q1, [x16, #0x20]\n"
"ldr q2, [x16, #0x30]\n"
- "add x13, x13, x20\n"
- "add x12, x12, x20\n"
"ldr q3, [x16, #0x40]\n"
"ldr q4, [x16, #0x50]\n"
- "add x11, x11, x20\n"
"ldr x9, [x15, #0x0]\n"
"ldr x28, [x15, #0x8]\n"
- "add x9, x9, x10\n"
- "add x28, x28, x10\n"
+ "add x13, x13, x20\n"
+ "add x12, x12, x20\n"
"ldr x27, [x15, #0x10]\n"
"ldr x26, [x15, #0x18]\n"
- "add x27, x27, x10\n"
- "add x26, x26, x10\n"
+ "add x11, x11, x20\n"
+ "add x10, x10, x20\n"
"ldr x25, [x15, #0x20]\n"
"ldr x24, [x15, #0x28]\n"
- "add x25, x25, x10\n"
- "add x24, x24, x10\n"
"ldr x23, [x15, #0x30]\n"
"ldr x22, [x15, #0x38]\n"
- "add x23, x23, x10\n"
- "add x22, x22, x10\n"
+ "add x9, x9, x14\n"
+ "add x28, x28, x14\n"
"ldr x21, [x15, #0x40]\n"
"ldr x20, [x15, #0x48]\n"
- "add x21, x21, x10\n"
- "add x20, x20, x10\n"
+ "add x27, x27, x14\n"
+ "add x26, x26, x14\n"
+ "add x25, x25, x14\n"
+ "add x24, x24, x14\n"
+ "add x23, x23, x14\n"
+ "add x22, x22, x14\n"
+ "add x21, x21, x14\n"
+ "add x20, x20, x14\n"
"add x16, x16, #0x60\n"
"tbz %x[n_channels], #1, 4f\n"
"ld1 { v5.d }[0], [x9], #0x8\n"
@@ -609,9 +609,9 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"mov v28.16b, v26.16b\n fmla v28.4s, v0.4s, v5.4s\n"
"mov v29.16b, v26.16b\n fmla v29.4s, v0.4s, v6.4s\n"
"ldr x20, [x15, #0x50]\n"
- "add x20, x20, x10\n"
"mov v30.16b, v26.16b\n fmla v30.4s, v0.4s, v7.4s\n"
"mov v31.16b, v26.16b\n fmla v31.4s, v0.4s, v8.4s\n"
+ "add x20, x20, x14\n"
"fmla v28.4s, v1.4s, v6.4s\n"
"fmla v29.4s, v1.4s, v9.4s\n"
"fmla v30.4s, v1.4s, v8.4s\n"
@@ -630,9 +630,9 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x58]\n"
"fmla v31.4s, v2.4s, v5.4s\n"
"fmla v28.4s, v3.4s, v11.4s\n"
- "add x20, x20, x10\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v5.4s\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 8f\n"
"ld1 { v6.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 9f\n"
@@ -644,7 +644,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr x20, [x15, #0x60]\n"
"fmla v31.4s, v3.4s, v6.4s\n"
"fmla v28.4s, v4.4s, v12.4s\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 10f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 11f\n"
@@ -658,11 +658,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v30.4s, v4.4s, v6.4s\n"
"ldr x20, [x15, #0x68]\n"
"fmla v31.4s, v4.4s, v10.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v0.4s, v7.4s\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"fmla v29.4s, v0.4s, v8.4s\n"
"fmla v30.4s, v0.4s, v14.4s\n"
- "add x16, x16, #0x10\n"
"tbz %x[n_channels], #1, 12f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 13f\n"
@@ -674,11 +674,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0x70]\n"
"fmla v31.4s, v0.4s, v11.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v1.4s, v8.4s\n"
"fmla v29.4s, v1.4s, v13.4s\n"
"fmla v30.4s, v1.4s, v11.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 14f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 15f\n"
@@ -690,11 +690,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0x78]\n"
"fmla v31.4s, v1.4s, v12.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v2.4s, v13.4s\n"
"fmla v29.4s, v2.4s, v5.4s\n"
"fmla v30.4s, v2.4s, v12.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 16f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
@@ -706,11 +706,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0x80]\n"
"fmla v31.4s, v2.4s, v9.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v3.4s, v5.4s\n"
"fmla v29.4s, v3.4s, v6.4s\n"
"fmla v30.4s, v3.4s, v9.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 18f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 19f\n"
@@ -722,11 +722,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q4, [x16, #0x0]\n"
"ldr x20, [x15, #0x88]\n"
"fmla v31.4s, v3.4s, v13.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v4.4s, v6.4s\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v13.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 20f\n"
"ld1 { v8.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
@@ -738,10 +738,10 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q0, [x16, #0x0]\n"
"ldr x20, [x15, #0x90]\n"
"fmla v31.4s, v4.4s, v8.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v0.4s, v14.4s\n"
"fmla v29.4s, v0.4s, v11.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 22f\n"
"ld1 { v5.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 23f\n"
@@ -752,7 +752,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"23:" // Oddments: Load input (3, 0): Bit 1: End
"ldr x20, [x15, #0x98]\n"
"fmla v30.4s, v0.4s, v5.4s\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 24f\n"
"ld1 { v6.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 25f\n"
@@ -764,11 +764,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0xa0]\n"
"fmla v31.4s, v0.4s, v6.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v1.4s, v11.4s\n"
"fmla v29.4s, v1.4s, v12.4s\n"
"fmla v30.4s, v1.4s, v6.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 26f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 27f\n"
@@ -780,11 +780,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0xa8]\n"
"fmla v31.4s, v1.4s, v10.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v2.4s, v12.4s\n"
"fmla v29.4s, v2.4s, v9.4s\n"
"fmla v30.4s, v2.4s, v10.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 28f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 29f\n"
@@ -796,11 +796,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0xb0]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v3.4s, v9.4s\n"
"fmla v29.4s, v3.4s, v13.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 30f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 31f\n"
@@ -812,11 +812,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q4, [x16, #0x0]\n"
"ldr x20, [x15, #0xb8]\n"
"fmla v31.4s, v3.4s, v12.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v4.4s, v13.4s\n"
"fmla v29.4s, v4.4s, v8.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 32f\n"
"ld1 { v14.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 33f\n"
@@ -828,10 +828,10 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q0, [x16, #0x0]\n"
"ldr x20, [x15, #0xc0]\n"
"fmla v31.4s, v4.4s, v14.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v0.4s, v5.4s\n"
"fmla v29.4s, v0.4s, v6.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 34f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 35f\n"
@@ -842,7 +842,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"35:" // Oddments: Load input (4, 0): Bit 1: End
"ldr x20, [x15, #0xc8]\n"
"fmla v30.4s, v0.4s, v9.4s\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 36f\n"
"ld1 { v13.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 37f\n"
@@ -854,11 +854,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0xd0]\n"
"fmla v31.4s, v0.4s, v13.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v1.4s, v6.4s\n"
"fmla v29.4s, v1.4s, v10.4s\n"
"fmla v30.4s, v1.4s, v13.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 38f\n"
"ld1 { v5.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 39f\n"
@@ -870,11 +870,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0xd8]\n"
"fmla v31.4s, v1.4s, v5.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v2.4s, v10.4s\n"
"fmla v29.4s, v2.4s, v11.4s\n"
"fmla v30.4s, v2.4s, v5.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 40f\n"
"ld1 { v6.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 41f\n"
@@ -886,11 +886,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0xe0]\n"
"fmla v31.4s, v2.4s, v6.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v3.4s, v11.4s\n"
"fmla v29.4s, v3.4s, v12.4s\n"
"fmla v30.4s, v3.4s, v6.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 42f\n"
"ld1 { v8.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 43f\n"
@@ -902,11 +902,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q4, [x16, #0x0]\n"
"ldr x20, [x15, #0xe8]\n"
"fmla v31.4s, v3.4s, v8.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v4.4s, v12.4s\n"
"fmla v29.4s, v4.4s, v14.4s\n"
"fmla v30.4s, v4.4s, v8.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 44f\n"
"ld1 { v10.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 45f\n"
@@ -918,10 +918,10 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q0, [x16, #0x0]\n"
"ldr x20, [x15, #0xf0]\n"
"fmla v31.4s, v4.4s, v10.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v0.4s, v9.4s\n"
"fmla v29.4s, v0.4s, v13.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 46f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 47f\n"
@@ -932,7 +932,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"47:" // Oddments: Load input (5, 0): Bit 1: End
"ldr x20, [x15, #0xf8]\n"
"fmla v30.4s, v0.4s, v11.4s\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 48f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 49f\n"
@@ -944,11 +944,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q1, [x16, #0x0]\n"
"ldr x20, [x15, #0x100]\n"
"fmla v31.4s, v0.4s, v12.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v1.4s, v13.4s\n"
"fmla v29.4s, v1.4s, v5.4s\n"
"fmla v30.4s, v1.4s, v12.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 50f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 51f\n"
@@ -960,11 +960,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q2, [x16, #0x0]\n"
"ldr x20, [x15, #0x108]\n"
"fmla v31.4s, v1.4s, v9.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v2.4s, v5.4s\n"
"fmla v29.4s, v2.4s, v6.4s\n"
"fmla v30.4s, v2.4s, v9.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 52f\n"
"ld1 { v11.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 53f\n"
@@ -976,11 +976,11 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr q3, [x16, #0x0]\n"
"ldr x20, [x15, #0x110]\n"
"fmla v31.4s, v2.4s, v11.4s\n"
+ "add x16, x16, #0x10\n"
"fmla v28.4s, v3.4s, v6.4s\n"
"fmla v29.4s, v3.4s, v8.4s\n"
"fmla v30.4s, v3.4s, v11.4s\n"
- "add x20, x20, x10\n"
- "add x16, x16, #0x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 54f\n"
"ld1 { v12.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 55f\n"
@@ -995,7 +995,7 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmla v28.4s, v4.4s, v8.4s\n"
"fmla v29.4s, v4.4s, v10.4s\n"
"fmla v30.4s, v4.4s, v12.4s\n"
- "add x20, x20, x10\n"
+ "add x20, x20, x14\n"
"tbz %x[n_channels], #1, 56f\n"
"ld1 { v9.d }[0], [x20], #0x8\n"
"tbz %x[n_channels], #0, 57f\n"
@@ -1008,32 +1008,32 @@ void a64_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
"fmax v28.4s, v28.4s, v27.4s\n"
"fmax v29.4s, v29.4s, v27.4s\n"
"fmax v30.4s, v30.4s, v27.4s\n"
- "fmax v31.4s, v31.4s, v27.4s\n"
"fmin v28.4s, v28.4s, v15.4s\n"
+ "fmax v31.4s, v31.4s, v27.4s\n"
"fmin v29.4s, v29.4s, v15.4s\n"
"fmin v30.4s, v30.4s, v15.4s\n"
"fmin v31.4s, v31.4s, v15.4s\n"
"tbz %x[n_channels], #1, 58f\n"
- "st1 { v28.d }[0], [x14], #0x8\n"
- "st1 { v29.d }[0], [x13], #0x8\n"
- "st1 { v30.d }[0], [x12], #0x8\n"
- "st1 { v31.d }[0], [x11], #0x8\n"
+ "st1 { v28.d }[0], [x13], #0x8\n"
+ "st1 { v29.d }[0], [x12], #0x8\n"
+ "st1 { v30.d }[0], [x11], #0x8\n"
+ "st1 { v31.d }[0], [x10], #0x8\n"
"tbz %x[n_channels], #0, 59f\n"
- "st1 { v28.s }[2], [x14], #0x4\n"
- "st1 { v29.s }[2], [x13], #0x4\n"
- "st1 { v30.s }[2], [x12], #0x4\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
+ "st1 { v28.s }[2], [x13], #0x4\n"
+ "st1 { v29.s }[2], [x12], #0x4\n"
+ "st1 { v30.s }[2], [x11], #0x4\n"
+ "st1 { v31.s }[2], [x10], #0x4\n"
"b 59f\n"
"58:" // Oddments: Store: Bit 1: Unset
- "st1 { v28.s }[0], [x14], #0x4\n"
- "st1 { v29.s }[0], [x13], #0x4\n"
- "st1 { v30.s }[0], [x12], #0x4\n"
- "st1 { v31.s }[0], [x11], #0x4\n"
+ "st1 { v28.s }[0], [x13], #0x4\n"
+ "st1 { v29.s }[0], [x12], #0x4\n"
+ "st1 { v30.s }[0], [x11], #0x4\n"
+ "st1 { v31.s }[0], [x10], #0x4\n"
"59:" // Oddments: Store: Bit 1: End
"60:" // End
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
index a2f577784f..6fb4ce79f0 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,97 +56,97 @@ void a64_fp32_nhwc_generic_output9_mla_depthfirst_impl(
"ldr q23, [%x[bias], x11]\n"
"2:" // Channel loop: Load bias: Done
"ldr q0, [%x[params], #0x0]\n"
- "mov x26, %x[inptrs]\n"
- "ldp x21, x20, [x26], #0x10\n"
- "subs x25, %x[n_points], #0x1\n"
- "ldr q14, [x21, x11]\n"
- "ldr q15, [x20, x11]\n"
+ "mov x23, %x[inptrs]\n"
+ "subs x22, %x[n_points], #0x1\n"
"mov v24.16b, v23.16b\n"
"mov v25.16b, v23.16b\n"
- "ldp x21, x20, [x26], #0x10\n"
- "ldr q16, [x21, x11]\n"
"mov v26.16b, v23.16b\n"
+ "add %x[params], %x[params], #0x10\n"
"mov v27.16b, v23.16b\n"
- "ldr q17, [x20, x11]\n"
- "ldp x21, x20, [x26], #0x10\n"
"mov v28.16b, v23.16b\n"
+ "ldp x21, x20, [x23], #0x10\n"
"mov v29.16b, v23.16b\n"
- "ldr q18, [x21, x11]\n"
- "ldr q19, [x20, x11]\n"
"mov v30.16b, v23.16b\n"
"mov v31.16b, v23.16b\n"
- "ldp x21, x20, [x26], #0x10\n"
+ "ldr q14, [x21, x11]\n"
+ "ldr q15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q16, [x21, x11]\n"
+ "ldr q17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q18, [x21, x11]\n"
+ "ldr q19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"ldr q20, [x21, x11]\n"
- "add %x[params], %x[params], #0x10\n"
"ldr q21, [x20, x11]\n"
- "ldr x20, [x26], #0x8\n"
+ "ldr x20, [x23], #0x8\n"
"ldr q22, [x20, x11]\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x20, x24, [x26], #0x10\n"
- "ldp x23, x22, [x26], #0x10\n"
- "subs x25, x25, #0x1\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "subs x22, x22, #0x1\n"
"fmla v23.4s, v14.4s, v0.4s\n"
- "ldr q14, [x20, x11]\n"
- "ldp x21, x20, [x26], #0x10\n"
"fmla v24.4s, v15.4s, v0.4s\n"
"fmla v25.4s, v16.4s, v0.4s\n"
- "ldr q15, [x24, x11]\n"
- "ldr q16, [x23, x11]\n"
"fmla v26.4s, v17.4s, v0.4s\n"
"fmla v27.4s, v18.4s, v0.4s\n"
- "ldr q17, [x22, x11]\n"
- "ldr q18, [x21, x11]\n"
"fmla v28.4s, v19.4s, v0.4s\n"
+ "ldr q14, [x21, x11]\n"
+ "ldr q15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"fmla v29.4s, v20.4s, v0.4s\n"
- "ldr q19, [x20, x11]\n"
- "ldp x21, x20, [x26], #0x10\n"
"fmla v30.4s, v21.4s, v0.4s\n"
"fmla v31.4s, v22.4s, v0.4s\n"
"ldr q0, [%x[params], #0x0]\n"
- "ldr q20, [x21, x11]\n"
"add %x[params], %x[params], #0x10\n"
+ "ldr q16, [x21, x11]\n"
+ "ldr q17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q18, [x21, x11]\n"
+ "ldr q19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ldr q20, [x21, x11]\n"
"ldr q21, [x20, x11]\n"
- "ldr x20, [x26], #0x8\n"
+ "ldr x20, [x23], #0x8\n"
"ldr q22, [x20, x11]\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
"fmla v23.4s, v14.4s, v0.4s\n"
"fmla v24.4s, v15.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v2.4s\n"
"ldp x28, x27, [%x[outptrs], #0x0]\n"
+ "ldp x26, x25, [%x[outptrs], #0x10]\n"
"fmla v25.4s, v16.4s, v0.4s\n"
"fmla v26.4s, v17.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v2.4s\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
+ "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmla v27.4s, v18.4s, v0.4s\n"
"fmla v28.4s, v19.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
"fmla v29.4s, v20.4s, v0.4s\n"
"fmla v30.4s, v21.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v2.4s\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmla v31.4s, v22.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v2.4s\n"
+ "fmax v24.4s, v24.4s, v2.4s\n"
+ "fmax v25.4s, v25.4s, v2.4s\n"
+ "fmax v26.4s, v26.4s, v2.4s\n"
"fmax v27.4s, v27.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
"fmax v28.4s, v28.4s, v2.4s\n"
"fmax v29.4s, v29.4s, v2.4s\n"
"fmax v30.4s, v30.4s, v2.4s\n"
"fmax v31.4s, v31.4s, v2.4s\n"
"fmin v23.4s, v23.4s, v1.4s\n"
"fmin v24.4s, v24.4s, v1.4s\n"
- "str q23, [x28, x11]\n"
"fmin v25.4s, v25.4s, v1.4s\n"
"fmin v26.4s, v26.4s, v1.4s\n"
- "str q24, [x27, x11]\n"
"fmin v27.4s, v27.4s, v1.4s\n"
"fmin v28.4s, v28.4s, v1.4s\n"
- "str q25, [x26, x11]\n"
"fmin v29.4s, v29.4s, v1.4s\n"
"fmin v30.4s, v30.4s, v1.4s\n"
- "str q26, [x25, x11]\n"
+ "str q23, [x28, x11]\n"
"fmin v31.4s, v31.4s, v1.4s\n"
+ "str q24, [x27, x11]\n"
+ "str q25, [x26, x11]\n"
+ "str q26, [x25, x11]\n"
"str q27, [x24, x11]\n"
"str q28, [x23, x11]\n"
"str q29, [x22, x11]\n"
@@ -172,29 +172,29 @@ void a64_fp32_nhwc_generic_output9_mla_depthfirst_impl(
"8:" // Oddments: Load bias: Done
"ldr q0, [%x[params], #0x0]\n"
"mov x10, %x[inptrs]\n"
- "ldp x9, x28, [x10], #0x10\n"
"mov v24.16b, v23.16b\n"
- "ldp x27, x26, [x10], #0x10\n"
- "ldp x25, x24, [x10], #0x10\n"
"mov v25.16b, v23.16b\n"
"mov v26.16b, v23.16b\n"
- "ldp x23, x22, [x10], #0x10\n"
- "ldr x21, [x10], #0x8\n"
"mov v27.16b, v23.16b\n"
+ "add %x[params], %x[params], #0x10\n"
"mov v28.16b, v23.16b\n"
"mov v29.16b, v23.16b\n"
+ "ldp x9, x28, [x10], #0x10\n"
"mov v30.16b, v23.16b\n"
+ "mov v31.16b, v23.16b\n"
+ "ldp x27, x26, [x10], #0x10\n"
"add x9, x9, x11\n"
"add x28, x28, x11\n"
- "mov v31.16b, v23.16b\n"
+ "ldp x25, x24, [x10], #0x10\n"
"add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
- "add %x[params], %x[params], #0x10\n"
"tbz %x[n_channels], #1, 9f\n"
"ldr d14, [x9], #0x8\n"
"ldr d15, [x28], #0x8\n"
@@ -231,30 +231,30 @@ void a64_fp32_nhwc_generic_output9_mla_depthfirst_impl(
"ble 14f\n"
"11:" // Oddments: Planar loop
"ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
"fmla v23.4s, v14.4s, v0.4s\n"
"fmla v24.4s, v15.4s, v0.4s\n"
- "ldp x25, x24, [x10], #0x10\n"
- "ldp x23, x22, [x10], #0x10\n"
"fmla v25.4s, v16.4s, v0.4s\n"
"fmla v26.4s, v17.4s, v0.4s\n"
- "ldr x21, [x10], #0x8\n"
"fmla v27.4s, v18.4s, v0.4s\n"
"fmla v28.4s, v19.4s, v0.4s\n"
- "add x9, x9, x11\n"
+ "ldp x27, x26, [x10], #0x10\n"
"fmla v29.4s, v20.4s, v0.4s\n"
"fmla v30.4s, v21.4s, v0.4s\n"
+ "add x9, x9, x11\n"
"add x28, x28, x11\n"
- "add x27, x27, x11\n"
"fmla v31.4s, v22.4s, v0.4s\n"
"ldr q0, [%x[params], #0x0]\n"
+ "add %x[params], %x[params], #0x10\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
- "add %x[params], %x[params], #0x10\n"
"tbz %x[n_channels], #1, 12f\n"
"ldr d14, [x9], #0x8\n"
"ldr d15, [x28], #0x8\n"
@@ -292,40 +292,40 @@ void a64_fp32_nhwc_generic_output9_mla_depthfirst_impl(
"14:" // Oddments: Planar tail
"fmla v23.4s, v14.4s, v0.4s\n"
"fmla v24.4s, v15.4s, v0.4s\n"
- "fmax v23.4s, v23.4s, v2.4s\n"
"ldp x28, x27, [%x[outptrs], #0x0]\n"
+ "ldp x26, x25, [%x[outptrs], #0x10]\n"
"fmla v25.4s, v16.4s, v0.4s\n"
"fmla v26.4s, v17.4s, v0.4s\n"
- "fmax v24.4s, v24.4s, v2.4s\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
+ "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmla v27.4s, v18.4s, v0.4s\n"
"fmla v28.4s, v19.4s, v0.4s\n"
- "fmax v25.4s, v25.4s, v2.4s\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
"fmla v29.4s, v20.4s, v0.4s\n"
"fmla v30.4s, v21.4s, v0.4s\n"
- "fmax v26.4s, v26.4s, v2.4s\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
+ "add x28, x28, x11\n"
+ "add x27, x27, x11\n"
"fmla v31.4s, v22.4s, v0.4s\n"
+ "fmax v23.4s, v23.4s, v2.4s\n"
+ "add x26, x26, x11\n"
+ "add x25, x25, x11\n"
+ "fmax v24.4s, v24.4s, v2.4s\n"
+ "fmax v25.4s, v25.4s, v2.4s\n"
+ "add x24, x24, x11\n"
+ "add x23, x23, x11\n"
+ "fmax v26.4s, v26.4s, v2.4s\n"
"fmax v27.4s, v27.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "add x28, x28, x11\n"
+ "add x22, x22, x11\n"
+ "add x21, x21, x11\n"
"fmax v28.4s, v28.4s, v2.4s\n"
"fmax v29.4s, v29.4s, v2.4s\n"
- "add x27, x27, x11\n"
- "add x26, x26, x11\n"
+ "add x20, x20, x11\n"
"fmax v30.4s, v30.4s, v2.4s\n"
"fmax v31.4s, v31.4s, v2.4s\n"
- "add x25, x25, x11\n"
- "add x24, x24, x11\n"
"fmin v23.4s, v23.4s, v1.4s\n"
"fmin v24.4s, v24.4s, v1.4s\n"
- "add x23, x23, x11\n"
- "add x22, x22, x11\n"
"fmin v25.4s, v25.4s, v1.4s\n"
"fmin v26.4s, v26.4s, v1.4s\n"
- "add x21, x21, x11\n"
- "add x20, x20, x11\n"
"fmin v27.4s, v27.4s, v1.4s\n"
"fmin v28.4s, v28.4s, v1.4s\n"
"fmin v29.4s, v29.4s, v1.4s\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
index 9cafd23fb8..ac255d149f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,49 +43,49 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
__asm__ __volatile__(
"ld1r { v27.4s }, [%x[clamps]]\n"
- "ldr x21, [%x[inptrs], #0x0]\n"
- "lsr x22, %x[channel_multiplier], #0x2\n"
+ "ldr x25, [%x[inptrs], #0x0]\n"
+ "lsr x24, %x[channel_multiplier], #0x2\n"
"add x20, %x[clamps], #0x4\n"
- "ldr q0, [x21, #0x0]\n"
- "ldr q1, [x21, #0x10]\n"
- "mov x21, #0x0\n"
- "mov x14, #0x0\n"
"ld1r { v26.4s }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "ldr q2, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ldr q4, [x20, #0x0]\n"
- "ldr q5, [x20, #0x10]\n"
+ "ldr x22, [%x[inptrs], #0x8]\n"
+ "mov x23, #0x0\n"
+ "mov x15, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x10]\n"
"ldr x20, [%x[inptrs], #0x18]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x25, #0x10]\n"
+ "ldr q2, [x22, #0x0]\n"
+ "ldr q3, [x22, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x21, #0x10]\n"
"ldr q6, [x20, #0x0]\n"
"ldr q7, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "ldr q8, [x20, #0x0]\n"
- "ldr q9, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x28]\n"
- "ldr q10, [x20, #0x0]\n"
- "ldr q11, [x20, #0x10]\n"
+ "ldr x22, [%x[inptrs], #0x20]\n"
+ "ldr x21, [%x[inptrs], #0x28]\n"
"ldr x20, [%x[inptrs], #0x30]\n"
+ "ldp x14, x13, [%x[outptrs], #0x0]\n"
+ "ldp x12, x11, [%x[outptrs], #0x10]\n"
+ "ldp x10, x9, [%x[outptrs], #0x20]\n"
+ "ldr q8, [x22, #0x0]\n"
+ "ldr q9, [x22, #0x10]\n"
+ "ldr q10, [x21, #0x0]\n"
+ "ldr q11, [x21, #0x10]\n"
"ldr q12, [x20, #0x0]\n"
"ldr q13, [x20, #0x10]\n"
- "ldp x13, x12, [%x[outptrs], #0x0]\n"
- "ldp x11, x10, [%x[outptrs], #0x10]\n"
- "ldp x9, x28, [%x[outptrs], #0x20]\n"
- "ldp x27, x26, [%x[outptrs], #0x30]\n"
- "ldr x25, [%x[outptrs], #0x40]\n"
- "cbz x22, 3f\n"
+ "ldp x28, x27, [%x[outptrs], #0x30]\n"
+ "ldr x26, [%x[outptrs], #0x40]\n"
+ "cbz x24, 3f\n"
"ldr q14, [%x[params], #0x0]\n"
"ldr q31, [%x[params], #0x10]\n"
- "subs x22, x22, #0x1\n"
- "mov v15.16b, v14.16b\n"
+ "subs x24, x24, #0x1\n"
"ldr q30, [%x[params], #0x20]\n"
"ldr q29, [%x[params], #0x30]\n"
+ "add %x[params], %x[params], #0x40\n"
+ "mov v15.16b, v14.16b\n"
"mov v16.16b, v14.16b\n"
"mov v17.16b, v14.16b\n"
"mov v18.16b, v14.16b\n"
"mov v19.16b, v14.16b\n"
- "add %x[params], %x[params], #0x40\n"
"mov v20.16b, v14.16b\n"
"mov v21.16b, v14.16b\n"
"mov v22.16b, v14.16b\n"
@@ -93,8 +93,8 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"1:" // Output channel complete vector loop
"fmla v14.4s, v31.4s, v0.s[0]\n"
"fmla v15.4s, v31.4s, v0.s[2]\n"
- "subs x22, x22, #0x1\n"
- "add x21, x21, #0x4\n"
+ "subs x24, x24, #0x1\n"
+ "add x23, x23, #0x4\n"
"fmla v16.4s, v31.4s, v1.s[0]\n"
"fmla v17.4s, v31.4s, v4.s[0]\n"
"fmla v18.4s, v31.4s, v4.s[2]\n"
@@ -175,52 +175,52 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"ldr q30, [%x[params], #0x80]\n"
"fmla v14.4s, v23.4s, v4.s[2]\n"
"fmla v15.4s, v23.4s, v5.s[0]\n"
- "fmin v14.4s, v14.4s, v26.4s\n"
"fmla v16.4s, v23.4s, v5.s[2]\n"
"fmla v17.4s, v23.4s, v8.s[2]\n"
- "fmax v14.4s, v14.4s, v27.4s\n"
- "str q14, [x13, x14]\n"
- "ldr q14, [%x[params], #0x60]\n"
"fmla v18.4s, v23.4s, v9.s[0]\n"
"fmla v19.4s, v23.4s, v9.s[2]\n"
- "fmin v15.4s, v15.4s, v26.4s\n"
"fmla v20.4s, v23.4s, v12.s[2]\n"
"fmla v21.4s, v23.4s, v13.s[0]\n"
- "fmin v16.4s, v16.4s, v26.4s\n"
"fmla v22.4s, v23.4s, v13.s[2]\n"
"ldr q29, [%x[params], #0x90]\n"
+ "fmin v14.4s, v14.4s, v26.4s\n"
+ "fmin v15.4s, v15.4s, v26.4s\n"
+ "fmin v16.4s, v16.4s, v26.4s\n"
"fmin v17.4s, v17.4s, v26.4s\n"
- "add %x[params], %x[params], #0xa0\n"
"fmin v18.4s, v18.4s, v26.4s\n"
+ "fmax v14.4s, v14.4s, v27.4s\n"
"fmin v19.4s, v19.4s, v26.4s\n"
"fmin v20.4s, v20.4s, v26.4s\n"
"fmin v21.4s, v21.4s, v26.4s\n"
"fmin v22.4s, v22.4s, v26.4s\n"
"fmax v15.4s, v15.4s, v27.4s\n"
- "str q15, [x12, x14]\n"
+ "str q14, [x14, x15]\n"
+ "ldr q14, [%x[params], #0x60]\n"
"fmax v16.4s, v16.4s, v27.4s\n"
"fmax v17.4s, v17.4s, v27.4s\n"
- "str q16, [x11, x14]\n"
+ "add %x[params], %x[params], #0xa0\n"
"fmax v18.4s, v18.4s, v27.4s\n"
"fmax v19.4s, v19.4s, v27.4s\n"
- "str q17, [x10, x14]\n"
"fmax v20.4s, v20.4s, v27.4s\n"
"fmax v21.4s, v21.4s, v27.4s\n"
- "str q18, [x9, x14]\n"
+ "str q15, [x13, x15]\n"
"fmax v22.4s, v22.4s, v27.4s\n"
- "str q19, [x28, x14]\n"
+ "str q16, [x12, x15]\n"
"mov v15.16b, v14.16b\n"
- "str q20, [x27, x14]\n"
+ "str q17, [x11, x15]\n"
"mov v16.16b, v14.16b\n"
"mov v17.16b, v14.16b\n"
- "str q21, [x26, x14]\n"
+ "str q18, [x10, x15]\n"
"mov v18.16b, v14.16b\n"
+ "str q19, [x9, x15]\n"
"mov v19.16b, v14.16b\n"
- "str q22, [x25, x14]\n"
+ "str q20, [x28, x15]\n"
"mov v20.16b, v14.16b\n"
+ "str q21, [x27, x15]\n"
"mov v21.16b, v14.16b\n"
- "add x14, x14, #0x10\n"
+ "str q22, [x26, x15]\n"
"mov v22.16b, v14.16b\n"
+ "add x15, x15, #0x10\n"
"bgt 1b\n"
"2:" // Output channel complete vector tail
"fmla v14.4s, v31.4s, v0.s[0]\n"
@@ -304,17 +304,17 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"fmla v22.4s, v24.4s, v13.s[1]\n"
"fmla v14.4s, v23.4s, v4.s[2]\n"
"fmla v15.4s, v23.4s, v5.s[0]\n"
- "fmin v14.4s, v14.4s, v26.4s\n"
"fmla v16.4s, v23.4s, v5.s[2]\n"
"fmla v17.4s, v23.4s, v8.s[2]\n"
- "fmin v15.4s, v15.4s, v26.4s\n"
"fmla v18.4s, v23.4s, v9.s[0]\n"
"fmla v19.4s, v23.4s, v9.s[2]\n"
- "fmin v16.4s, v16.4s, v26.4s\n"
"fmla v20.4s, v23.4s, v12.s[2]\n"
"fmla v21.4s, v23.4s, v13.s[0]\n"
- "fmin v17.4s, v17.4s, v26.4s\n"
"fmla v22.4s, v23.4s, v13.s[2]\n"
+ "fmin v14.4s, v14.4s, v26.4s\n"
+ "fmin v15.4s, v15.4s, v26.4s\n"
+ "fmin v16.4s, v16.4s, v26.4s\n"
+ "fmin v17.4s, v17.4s, v26.4s\n"
"fmin v18.4s, v18.4s, v26.4s\n"
"fmin v19.4s, v19.4s, v26.4s\n"
"fmin v20.4s, v20.4s, v26.4s\n"
@@ -322,32 +322,32 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"fmin v22.4s, v22.4s, v26.4s\n"
"fmax v14.4s, v14.4s, v27.4s\n"
"fmax v15.4s, v15.4s, v27.4s\n"
- "str q14, [x13, x14]\n"
"fmax v16.4s, v16.4s, v27.4s\n"
"fmax v17.4s, v17.4s, v27.4s\n"
- "str q15, [x12, x14]\n"
"fmax v18.4s, v18.4s, v27.4s\n"
"fmax v19.4s, v19.4s, v27.4s\n"
- "str q16, [x11, x14]\n"
"fmax v20.4s, v20.4s, v27.4s\n"
"fmax v21.4s, v21.4s, v27.4s\n"
- "str q17, [x10, x14]\n"
+ "str q14, [x14, x15]\n"
"fmax v22.4s, v22.4s, v27.4s\n"
- "str q18, [x9, x14]\n"
- "str q19, [x28, x14]\n"
- "str q20, [x27, x14]\n"
- "str q21, [x26, x14]\n"
- "str q22, [x25, x14]\n"
- "add x14, x14, #0x10\n"
+ "str q15, [x13, x15]\n"
+ "str q16, [x12, x15]\n"
+ "str q17, [x11, x15]\n"
+ "str q18, [x10, x15]\n"
+ "str q19, [x9, x15]\n"
+ "str q20, [x28, x15]\n"
+ "str q21, [x27, x15]\n"
+ "str q22, [x26, x15]\n"
+ "add x15, x15, #0x10\n"
"3:" // Output channel oddments
"tst %x[channel_multiplier], #0x3\n"
"beq 6f\n"
"ldr q14, [%x[params], #0x0]\n"
"ldr q25, [%x[params], #0x10]\n"
- "mov v15.16b, v14.16b\n"
- "mov v16.16b, v14.16b\n"
"ldr q24, [%x[params], #0x20]\n"
"ldr q23, [%x[params], #0x30]\n"
+ "mov v15.16b, v14.16b\n"
+ "mov v16.16b, v14.16b\n"
"mov v17.16b, v14.16b\n"
"mov v18.16b, v14.16b\n"
"mov v19.16b, v14.16b\n"
@@ -435,17 +435,17 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"fmla v22.4s, v24.4s, v13.s[1]\n"
"fmla v14.4s, v23.4s, v4.s[2]\n"
"fmla v15.4s, v23.4s, v5.s[0]\n"
- "fmin v14.4s, v14.4s, v26.4s\n"
"fmla v16.4s, v23.4s, v5.s[2]\n"
"fmla v17.4s, v23.4s, v8.s[2]\n"
- "fmin v15.4s, v15.4s, v26.4s\n"
"fmla v18.4s, v23.4s, v9.s[0]\n"
"fmla v19.4s, v23.4s, v9.s[2]\n"
- "fmin v16.4s, v16.4s, v26.4s\n"
"fmla v20.4s, v23.4s, v12.s[2]\n"
"fmla v21.4s, v23.4s, v13.s[0]\n"
- "fmin v17.4s, v17.4s, v26.4s\n"
"fmla v22.4s, v23.4s, v13.s[2]\n"
+ "fmin v14.4s, v14.4s, v26.4s\n"
+ "fmin v15.4s, v15.4s, v26.4s\n"
+ "fmin v16.4s, v16.4s, v26.4s\n"
+ "fmin v17.4s, v17.4s, v26.4s\n"
"fmin v18.4s, v18.4s, v26.4s\n"
"fmin v19.4s, v19.4s, v26.4s\n"
"fmin v20.4s, v20.4s, v26.4s\n"
@@ -461,39 +461,39 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"fmax v21.4s, v21.4s, v27.4s\n"
"fmax v22.4s, v22.4s, v27.4s\n"
"tbz %x[channel_multiplier], #1, 4f\n"
- "add x20, x13, x14\n"
- "add x22, x12, x14\n"
- "st1 { v14.d }[0], [x20]\n"
- "add x21, x11, x14\n"
- "add x20, x10, x14\n"
- "st1 { v15.d }[0], [x22]\n"
- "add x24, x9, x14\n"
- "add x23, x28, x14\n"
- "st1 { v16.d }[0], [x21]\n"
- "add x22, x27, x14\n"
- "add x21, x26, x14\n"
- "st1 { v17.d }[0], [x20]\n"
- "add x20, x25, x14\n"
+ "add x22, x14, x15\n"
+ "add x21, x13, x15\n"
+ "add x20, x12, x15\n"
+ "add x25, x11, x15\n"
+ "st1 { v14.d }[0], [x22]\n"
+ "add x24, x10, x15\n"
+ "add x23, x9, x15\n"
+ "st1 { v15.d }[0], [x21]\n"
+ "add x22, x28, x15\n"
+ "add x21, x27, x15\n"
+ "st1 { v16.d }[0], [x20]\n"
+ "add x20, x26, x15\n"
+ "st1 { v17.d }[0], [x25]\n"
+ "add x15, x15, #0x8\n"
"st1 { v18.d }[0], [x24]\n"
- "add x14, x14, #0x8\n"
"st1 { v19.d }[0], [x23]\n"
"st1 { v20.d }[0], [x22]\n"
"st1 { v21.d }[0], [x21]\n"
"st1 { v22.d }[0], [x20]\n"
"tbz %x[channel_multiplier], #0, 5f\n"
- "add x20, x13, x14\n"
- "add x22, x12, x14\n"
- "st1 { v14.s }[2], [x20]\n"
- "add x21, x11, x14\n"
- "add x20, x10, x14\n"
- "st1 { v15.s }[2], [x22]\n"
- "add x24, x9, x14\n"
- "add x23, x28, x14\n"
- "st1 { v16.s }[2], [x21]\n"
- "add x22, x27, x14\n"
- "add x21, x26, x14\n"
- "st1 { v17.s }[2], [x20]\n"
- "add x20, x25, x14\n"
+ "add x22, x14, x15\n"
+ "add x21, x13, x15\n"
+ "add x20, x12, x15\n"
+ "add x25, x11, x15\n"
+ "st1 { v14.s }[2], [x22]\n"
+ "add x24, x10, x15\n"
+ "add x23, x9, x15\n"
+ "st1 { v15.s }[2], [x21]\n"
+ "add x22, x28, x15\n"
+ "add x21, x27, x15\n"
+ "st1 { v16.s }[2], [x20]\n"
+ "add x20, x26, x15\n"
+ "st1 { v17.s }[2], [x25]\n"
"st1 { v18.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
@@ -501,19 +501,19 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"st1 { v22.s }[2], [x20]\n"
"b 5f\n"
"4:" // Output channel oddments: Store: Bit 1: Unset
- "add x20, x13, x14\n"
- "add x22, x12, x14\n"
- "st1 { v14.s }[0], [x20]\n"
- "add x21, x11, x14\n"
- "add x20, x10, x14\n"
- "st1 { v15.s }[0], [x22]\n"
- "add x24, x9, x14\n"
- "add x23, x28, x14\n"
- "st1 { v16.s }[0], [x21]\n"
- "add x22, x27, x14\n"
- "add x21, x26, x14\n"
- "st1 { v17.s }[0], [x20]\n"
- "add x20, x25, x14\n"
+ "add x22, x14, x15\n"
+ "add x21, x13, x15\n"
+ "add x20, x12, x15\n"
+ "add x25, x11, x15\n"
+ "st1 { v14.s }[0], [x22]\n"
+ "add x24, x10, x15\n"
+ "add x23, x9, x15\n"
+ "st1 { v15.s }[0], [x21]\n"
+ "add x22, x28, x15\n"
+ "add x21, x27, x15\n"
+ "st1 { v16.s }[0], [x20]\n"
+ "add x20, x26, x15\n"
+ "st1 { v17.s }[0], [x25]\n"
"st1 { v18.s }[0], [x24]\n"
"st1 { v19.s }[0], [x23]\n"
"st1 { v20.s }[0], [x22]\n"
@@ -523,7 +523,7 @@ void a64_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"6:" // End
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
index c9bb1f41da..2f6a399d67 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,55 +43,55 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
__asm__ __volatile__(
"ld1r { v26.4s }, [%x[clamps]]\n"
- "ldr x21, [%x[inptrs], #0x0]\n"
- "lsr x22, %x[channel_multiplier], #0x2\n"
+ "ldr x25, [%x[inptrs], #0x0]\n"
+ "lsr x24, %x[channel_multiplier], #0x2\n"
"add x20, %x[clamps], #0x4\n"
- "ldr q0, [x21, #0x0]\n"
- "ldr q1, [x21, #0x10]\n"
- "mov x21, #0x0\n"
- "mov x13, #0x0\n"
"ld1r { v25.4s }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "ldr q2, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ldr q4, [x20, #0x0]\n"
- "ldr q5, [x20, #0x10]\n"
+ "ldr x23, [%x[inptrs], #0x8]\n"
+ "mov x22, #0x0\n"
+ "mov x14, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x10]\n"
"ldr x20, [%x[inptrs], #0x18]\n"
+ "ldr q0, [x25, #0x0]\n"
+ "ldr q1, [x25, #0x10]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "ldr q3, [x23, #0x10]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x21, #0x10]\n"
"ldr q6, [x20, #0x0]\n"
"ldr q7, [x20, #0x10]\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "ldr q8, [x20, #0x0]\n"
- "ldr q9, [x20, #0x10]\n"
+ "ldr x21, [%x[inptrs], #0x20]\n"
"ldr x20, [%x[inptrs], #0x28]\n"
+ "ldp x13, x12, [%x[outptrs], #0x0]\n"
+ "ldp x11, x10, [%x[outptrs], #0x10]\n"
+ "ldp x9, x28, [%x[outptrs], #0x20]\n"
+ "ldp x27, x26, [%x[outptrs], #0x30]\n"
+ "ldr q8, [x21, #0x0]\n"
+ "ldr q9, [x21, #0x10]\n"
"ldr q10, [x20, #0x0]\n"
"ldr q11, [x20, #0x10]\n"
- "ldp x12, x11, [%x[outptrs], #0x0]\n"
- "ldp x10, x9, [%x[outptrs], #0x10]\n"
- "ldp x28, x27, [%x[outptrs], #0x20]\n"
- "ldp x26, x25, [%x[outptrs], #0x30]\n"
- "cbz x22, 3f\n"
+ "cbz x24, 3f\n"
"ldr q12, [%x[params], #0x0]\n"
"ldr q31, [%x[params], #0x10]\n"
- "subs x22, x22, #0x1\n"
- "mov v13.16b, v12.16b\n"
+ "subs x24, x24, #0x1\n"
"ldr q30, [%x[params], #0x20]\n"
"ldr q29, [%x[params], #0x30]\n"
- "mov v14.16b, v12.16b\n"
- "mov v15.16b, v12.16b\n"
"ldr q28, [%x[params], #0x40]\n"
"ldr q27, [%x[params], #0x50]\n"
+ "add %x[params], %x[params], #0x60\n"
+ "mov v13.16b, v12.16b\n"
+ "mov v14.16b, v12.16b\n"
+ "mov v15.16b, v12.16b\n"
"mov v16.16b, v12.16b\n"
"mov v17.16b, v12.16b\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v12.16b\n"
- "add %x[params], %x[params], #0x60\n"
"beq 2f\n"
"1:" // Output channel complete vector loop
"fmla v12.4s, v31.4s, v0.s[0]\n"
"fmla v13.4s, v31.4s, v0.s[1]\n"
- "subs x22, x22, #0x1\n"
- "add x21, x21, #0x4\n"
+ "subs x24, x24, #0x1\n"
+ "add x22, x22, #0x4\n"
"fmla v14.4s, v31.4s, v0.s[2]\n"
"fmla v15.4s, v31.4s, v0.s[3]\n"
"fmla v16.4s, v31.4s, v2.s[0]\n"
@@ -308,47 +308,47 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"ldr q28, [%x[params], #0x180]\n"
"fmla v12.4s, v20.4s, v9.s[0]\n"
"fmla v13.4s, v20.4s, v9.s[1]\n"
- "fmin v12.4s, v12.4s, v25.4s\n"
"fmla v14.4s, v20.4s, v9.s[2]\n"
"fmla v15.4s, v20.4s, v9.s[3]\n"
- "fmax v12.4s, v12.4s, v26.4s\n"
- "str q12, [x12, x13]\n"
- "ldr q12, [%x[params], #0x140]\n"
"fmla v16.4s, v20.4s, v11.s[0]\n"
"fmla v17.4s, v20.4s, v11.s[1]\n"
- "fmin v13.4s, v13.4s, v25.4s\n"
"fmla v18.4s, v20.4s, v11.s[2]\n"
"fmla v19.4s, v20.4s, v11.s[3]\n"
"ldr q27, [%x[params], #0x190]\n"
+ "fmin v12.4s, v12.4s, v25.4s\n"
+ "fmin v13.4s, v13.4s, v25.4s\n"
"fmin v14.4s, v14.4s, v25.4s\n"
"fmin v15.4s, v15.4s, v25.4s\n"
+ "fmax v12.4s, v12.4s, v26.4s\n"
"fmin v16.4s, v16.4s, v25.4s\n"
- "add %x[params], %x[params], #0x1a0\n"
"fmin v17.4s, v17.4s, v25.4s\n"
"fmin v18.4s, v18.4s, v25.4s\n"
"fmin v19.4s, v19.4s, v25.4s\n"
+ "str q12, [x13, x14]\n"
+ "ldr q12, [%x[params], #0x140]\n"
"fmax v13.4s, v13.4s, v26.4s\n"
- "str q13, [x11, x13]\n"
"fmax v14.4s, v14.4s, v26.4s\n"
+ "add %x[params], %x[params], #0x1a0\n"
"fmax v15.4s, v15.4s, v26.4s\n"
- "str q14, [x10, x13]\n"
"fmax v16.4s, v16.4s, v26.4s\n"
"fmax v17.4s, v17.4s, v26.4s\n"
- "str q15, [x9, x13]\n"
"fmax v18.4s, v18.4s, v26.4s\n"
"fmax v19.4s, v19.4s, v26.4s\n"
- "str q16, [x28, x13]\n"
- "str q17, [x27, x13]\n"
+ "str q13, [x12, x14]\n"
"mov v13.16b, v12.16b\n"
+ "str q14, [x11, x14]\n"
"mov v14.16b, v12.16b\n"
- "str q18, [x26, x13]\n"
+ "str q15, [x10, x14]\n"
"mov v15.16b, v12.16b\n"
+ "str q16, [x9, x14]\n"
"mov v16.16b, v12.16b\n"
- "str q19, [x25, x13]\n"
+ "str q17, [x28, x14]\n"
"mov v17.16b, v12.16b\n"
+ "str q18, [x27, x14]\n"
"mov v18.16b, v12.16b\n"
- "add x13, x13, #0x10\n"
+ "str q19, [x26, x14]\n"
"mov v19.16b, v12.16b\n"
+ "add x14, x14, #0x10\n"
"bgt 1b\n"
"2:" // Output channel complete vector tail
"fmla v12.4s, v31.4s, v0.s[0]\n"
@@ -566,55 +566,55 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v19.4s, v21.4s, v11.s[2]\n"
"fmla v12.4s, v20.4s, v9.s[0]\n"
"fmla v13.4s, v20.4s, v9.s[1]\n"
- "fmin v12.4s, v12.4s, v25.4s\n"
"fmla v14.4s, v20.4s, v9.s[2]\n"
"fmla v15.4s, v20.4s, v9.s[3]\n"
- "fmin v13.4s, v13.4s, v25.4s\n"
"fmla v16.4s, v20.4s, v11.s[0]\n"
"fmla v17.4s, v20.4s, v11.s[1]\n"
- "fmin v14.4s, v14.4s, v25.4s\n"
"fmla v18.4s, v20.4s, v11.s[2]\n"
"fmla v19.4s, v20.4s, v11.s[3]\n"
+ "fmin v12.4s, v12.4s, v25.4s\n"
+ "fmin v13.4s, v13.4s, v25.4s\n"
+ "fmin v14.4s, v14.4s, v25.4s\n"
"fmin v15.4s, v15.4s, v25.4s\n"
"fmin v16.4s, v16.4s, v25.4s\n"
"fmin v17.4s, v17.4s, v25.4s\n"
+ "fmax v12.4s, v12.4s, v26.4s\n"
"fmin v18.4s, v18.4s, v25.4s\n"
"fmin v19.4s, v19.4s, v25.4s\n"
- "fmax v12.4s, v12.4s, v26.4s\n"
"fmax v13.4s, v13.4s, v26.4s\n"
- "str q12, [x12, x13]\n"
"fmax v14.4s, v14.4s, v26.4s\n"
"fmax v15.4s, v15.4s, v26.4s\n"
- "str q13, [x11, x13]\n"
"fmax v16.4s, v16.4s, v26.4s\n"
+ "str q12, [x13, x14]\n"
"fmax v17.4s, v17.4s, v26.4s\n"
- "str q14, [x10, x13]\n"
"fmax v18.4s, v18.4s, v26.4s\n"
"fmax v19.4s, v19.4s, v26.4s\n"
- "str q15, [x9, x13]\n"
- "str q16, [x28, x13]\n"
- "str q17, [x27, x13]\n"
- "str q18, [x26, x13]\n"
- "str q19, [x25, x13]\n"
- "add x13, x13, #0x10\n"
+ "str q13, [x12, x14]\n"
+ "str q14, [x11, x14]\n"
+ "str q15, [x10, x14]\n"
+ "str q16, [x9, x14]\n"
+ "str q17, [x28, x14]\n"
+ "str q18, [x27, x14]\n"
+ "str q19, [x26, x14]\n"
+ "add x14, x14, #0x10\n"
"3:" // Output channel oddments
"tst %x[channel_multiplier], #0x3\n"
"beq 6f\n"
"ldr q12, [%x[params], #0x0]\n"
"ldr q24, [%x[params], #0x10]\n"
- "mov v13.16b, v12.16b\n"
- "mov v14.16b, v12.16b\n"
"ldr q23, [%x[params], #0x20]\n"
"ldr q22, [%x[params], #0x30]\n"
- "mov v15.16b, v12.16b\n"
- "mov v16.16b, v12.16b\n"
"ldr q21, [%x[params], #0x40]\n"
"ldr q20, [%x[params], #0x50]\n"
+ "mov v13.16b, v12.16b\n"
+ "mov v14.16b, v12.16b\n"
+ "mov v15.16b, v12.16b\n"
+ "mov v16.16b, v12.16b\n"
"mov v17.16b, v12.16b\n"
"mov v18.16b, v12.16b\n"
+ "fmla v13.4s, v24.4s, v0.s[1]\n"
"mov v19.16b, v12.16b\n"
"fmla v12.4s, v24.4s, v0.s[0]\n"
- "fmla v13.4s, v24.4s, v0.s[1]\n"
"fmla v14.4s, v24.4s, v0.s[2]\n"
"fmla v15.4s, v24.4s, v0.s[3]\n"
"fmla v16.4s, v24.4s, v2.s[0]\n"
@@ -622,8 +622,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v24.4s, v2.s[2]\n"
"fmla v19.4s, v24.4s, v2.s[3]\n"
"ldr q24, [%x[params], #0x60]\n"
- "fmla v12.4s, v23.4s, v0.s[1]\n"
"fmla v13.4s, v23.4s, v0.s[2]\n"
+ "fmla v12.4s, v23.4s, v0.s[1]\n"
"fmla v14.4s, v23.4s, v0.s[3]\n"
"fmla v15.4s, v23.4s, v1.s[0]\n"
"fmla v16.4s, v23.4s, v2.s[1]\n"
@@ -631,8 +631,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v23.4s, v2.s[3]\n"
"fmla v19.4s, v23.4s, v3.s[0]\n"
"ldr q23, [%x[params], #0x70]\n"
- "fmla v12.4s, v22.4s, v0.s[2]\n"
"fmla v13.4s, v22.4s, v0.s[3]\n"
+ "fmla v12.4s, v22.4s, v0.s[2]\n"
"fmla v14.4s, v22.4s, v1.s[0]\n"
"fmla v15.4s, v22.4s, v1.s[1]\n"
"fmla v16.4s, v22.4s, v2.s[2]\n"
@@ -640,8 +640,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v22.4s, v3.s[0]\n"
"fmla v19.4s, v22.4s, v3.s[1]\n"
"ldr q22, [%x[params], #0x80]\n"
- "fmla v12.4s, v21.4s, v0.s[3]\n"
"fmla v13.4s, v21.4s, v1.s[0]\n"
+ "fmla v12.4s, v21.4s, v0.s[3]\n"
"fmla v14.4s, v21.4s, v1.s[1]\n"
"fmla v15.4s, v21.4s, v1.s[2]\n"
"fmla v16.4s, v21.4s, v2.s[3]\n"
@@ -649,8 +649,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v21.4s, v3.s[1]\n"
"fmla v19.4s, v21.4s, v3.s[2]\n"
"ldr q21, [%x[params], #0x90]\n"
- "fmla v12.4s, v20.4s, v1.s[0]\n"
"fmla v13.4s, v20.4s, v1.s[1]\n"
+ "fmla v12.4s, v20.4s, v1.s[0]\n"
"fmla v14.4s, v20.4s, v1.s[2]\n"
"fmla v15.4s, v20.4s, v1.s[3]\n"
"fmla v16.4s, v20.4s, v3.s[0]\n"
@@ -658,8 +658,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v20.4s, v3.s[2]\n"
"fmla v19.4s, v20.4s, v3.s[3]\n"
"ldr q20, [%x[params], #0xa0]\n"
- "fmla v12.4s, v24.4s, v2.s[0]\n"
"fmla v13.4s, v24.4s, v2.s[1]\n"
+ "fmla v12.4s, v24.4s, v2.s[0]\n"
"fmla v14.4s, v24.4s, v2.s[2]\n"
"fmla v15.4s, v24.4s, v2.s[3]\n"
"fmla v16.4s, v24.4s, v4.s[0]\n"
@@ -667,8 +667,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v24.4s, v4.s[2]\n"
"fmla v19.4s, v24.4s, v4.s[3]\n"
"ldr q24, [%x[params], #0xb0]\n"
- "fmla v12.4s, v23.4s, v2.s[1]\n"
"fmla v13.4s, v23.4s, v2.s[2]\n"
+ "fmla v12.4s, v23.4s, v2.s[1]\n"
"fmla v14.4s, v23.4s, v2.s[3]\n"
"fmla v15.4s, v23.4s, v3.s[0]\n"
"fmla v16.4s, v23.4s, v4.s[1]\n"
@@ -676,8 +676,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v23.4s, v4.s[3]\n"
"fmla v19.4s, v23.4s, v5.s[0]\n"
"ldr q23, [%x[params], #0xc0]\n"
- "fmla v12.4s, v22.4s, v2.s[2]\n"
"fmla v13.4s, v22.4s, v2.s[3]\n"
+ "fmla v12.4s, v22.4s, v2.s[2]\n"
"fmla v14.4s, v22.4s, v3.s[0]\n"
"fmla v15.4s, v22.4s, v3.s[1]\n"
"fmla v16.4s, v22.4s, v4.s[2]\n"
@@ -685,8 +685,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v22.4s, v5.s[0]\n"
"fmla v19.4s, v22.4s, v5.s[1]\n"
"ldr q22, [%x[params], #0xd0]\n"
- "fmla v12.4s, v21.4s, v2.s[3]\n"
"fmla v13.4s, v21.4s, v3.s[0]\n"
+ "fmla v12.4s, v21.4s, v2.s[3]\n"
"fmla v14.4s, v21.4s, v3.s[1]\n"
"fmla v15.4s, v21.4s, v3.s[2]\n"
"fmla v16.4s, v21.4s, v4.s[3]\n"
@@ -694,8 +694,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v21.4s, v5.s[1]\n"
"fmla v19.4s, v21.4s, v5.s[2]\n"
"ldr q21, [%x[params], #0xe0]\n"
- "fmla v12.4s, v20.4s, v3.s[0]\n"
"fmla v13.4s, v20.4s, v3.s[1]\n"
+ "fmla v12.4s, v20.4s, v3.s[0]\n"
"fmla v14.4s, v20.4s, v3.s[2]\n"
"fmla v15.4s, v20.4s, v3.s[3]\n"
"fmla v16.4s, v20.4s, v5.s[0]\n"
@@ -703,8 +703,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v20.4s, v5.s[2]\n"
"fmla v19.4s, v20.4s, v5.s[3]\n"
"ldr q20, [%x[params], #0xf0]\n"
- "fmla v12.4s, v24.4s, v4.s[0]\n"
"fmla v13.4s, v24.4s, v4.s[1]\n"
+ "fmla v12.4s, v24.4s, v4.s[0]\n"
"fmla v14.4s, v24.4s, v4.s[2]\n"
"fmla v15.4s, v24.4s, v4.s[3]\n"
"fmla v16.4s, v24.4s, v6.s[0]\n"
@@ -712,8 +712,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v24.4s, v6.s[2]\n"
"fmla v19.4s, v24.4s, v6.s[3]\n"
"ldr q24, [%x[params], #0x100]\n"
- "fmla v12.4s, v23.4s, v4.s[1]\n"
"fmla v13.4s, v23.4s, v4.s[2]\n"
+ "fmla v12.4s, v23.4s, v4.s[1]\n"
"fmla v14.4s, v23.4s, v4.s[3]\n"
"fmla v15.4s, v23.4s, v5.s[0]\n"
"fmla v16.4s, v23.4s, v6.s[1]\n"
@@ -721,8 +721,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v23.4s, v6.s[3]\n"
"fmla v19.4s, v23.4s, v7.s[0]\n"
"ldr q23, [%x[params], #0x110]\n"
- "fmla v12.4s, v22.4s, v4.s[2]\n"
"fmla v13.4s, v22.4s, v4.s[3]\n"
+ "fmla v12.4s, v22.4s, v4.s[2]\n"
"fmla v14.4s, v22.4s, v5.s[0]\n"
"fmla v15.4s, v22.4s, v5.s[1]\n"
"fmla v16.4s, v22.4s, v6.s[2]\n"
@@ -730,8 +730,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v22.4s, v7.s[0]\n"
"fmla v19.4s, v22.4s, v7.s[1]\n"
"ldr q22, [%x[params], #0x120]\n"
- "fmla v12.4s, v21.4s, v4.s[3]\n"
"fmla v13.4s, v21.4s, v5.s[0]\n"
+ "fmla v12.4s, v21.4s, v4.s[3]\n"
"fmla v14.4s, v21.4s, v5.s[1]\n"
"fmla v15.4s, v21.4s, v5.s[2]\n"
"fmla v16.4s, v21.4s, v6.s[3]\n"
@@ -739,8 +739,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v21.4s, v7.s[1]\n"
"fmla v19.4s, v21.4s, v7.s[2]\n"
"ldr q21, [%x[params], #0x130]\n"
- "fmla v12.4s, v20.4s, v5.s[0]\n"
"fmla v13.4s, v20.4s, v5.s[1]\n"
+ "fmla v12.4s, v20.4s, v5.s[0]\n"
"fmla v14.4s, v20.4s, v5.s[2]\n"
"fmla v15.4s, v20.4s, v5.s[3]\n"
"fmla v16.4s, v20.4s, v7.s[0]\n"
@@ -748,8 +748,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v20.4s, v7.s[2]\n"
"fmla v19.4s, v20.4s, v7.s[3]\n"
"ldr q20, [%x[params], #0x140]\n"
- "fmla v12.4s, v24.4s, v6.s[0]\n"
"fmla v13.4s, v24.4s, v6.s[1]\n"
+ "fmla v12.4s, v24.4s, v6.s[0]\n"
"fmla v14.4s, v24.4s, v6.s[2]\n"
"fmla v15.4s, v24.4s, v6.s[3]\n"
"fmla v16.4s, v24.4s, v8.s[0]\n"
@@ -757,8 +757,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v24.4s, v8.s[2]\n"
"fmla v19.4s, v24.4s, v8.s[3]\n"
"ldr q24, [%x[params], #0x150]\n"
- "fmla v12.4s, v23.4s, v6.s[1]\n"
"fmla v13.4s, v23.4s, v6.s[2]\n"
+ "fmla v12.4s, v23.4s, v6.s[1]\n"
"fmla v14.4s, v23.4s, v6.s[3]\n"
"fmla v15.4s, v23.4s, v7.s[0]\n"
"fmla v16.4s, v23.4s, v8.s[1]\n"
@@ -766,8 +766,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v23.4s, v8.s[3]\n"
"fmla v19.4s, v23.4s, v9.s[0]\n"
"ldr q23, [%x[params], #0x160]\n"
- "fmla v12.4s, v22.4s, v6.s[2]\n"
"fmla v13.4s, v22.4s, v6.s[3]\n"
+ "fmla v12.4s, v22.4s, v6.s[2]\n"
"fmla v14.4s, v22.4s, v7.s[0]\n"
"fmla v15.4s, v22.4s, v7.s[1]\n"
"fmla v16.4s, v22.4s, v8.s[2]\n"
@@ -775,8 +775,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v22.4s, v9.s[0]\n"
"fmla v19.4s, v22.4s, v9.s[1]\n"
"ldr q22, [%x[params], #0x170]\n"
- "fmla v12.4s, v21.4s, v6.s[3]\n"
"fmla v13.4s, v21.4s, v7.s[0]\n"
+ "fmla v12.4s, v21.4s, v6.s[3]\n"
"fmla v14.4s, v21.4s, v7.s[1]\n"
"fmla v15.4s, v21.4s, v7.s[2]\n"
"fmla v16.4s, v21.4s, v8.s[3]\n"
@@ -784,8 +784,8 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v21.4s, v9.s[1]\n"
"fmla v19.4s, v21.4s, v9.s[2]\n"
"ldr q21, [%x[params], #0x180]\n"
- "fmla v12.4s, v20.4s, v7.s[0]\n"
"fmla v13.4s, v20.4s, v7.s[1]\n"
+ "fmla v12.4s, v20.4s, v7.s[0]\n"
"fmla v14.4s, v20.4s, v7.s[2]\n"
"fmla v15.4s, v20.4s, v7.s[3]\n"
"fmla v16.4s, v20.4s, v9.s[0]\n"
@@ -793,50 +793,50 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla v18.4s, v20.4s, v9.s[2]\n"
"fmla v19.4s, v20.4s, v9.s[3]\n"
"ldr q20, [%x[params], #0x190]\n"
+ "fmla v13.4s, v24.4s, v8.s[1]\n"
"add %x[params], %x[params], #0x1a0\n"
"fmla v12.4s, v24.4s, v8.s[0]\n"
- "fmla v13.4s, v24.4s, v8.s[1]\n"
"fmla v14.4s, v24.4s, v8.s[2]\n"
"fmla v15.4s, v24.4s, v8.s[3]\n"
"fmla v16.4s, v24.4s, v10.s[0]\n"
"fmla v17.4s, v24.4s, v10.s[1]\n"
"fmla v18.4s, v24.4s, v10.s[2]\n"
"fmla v19.4s, v24.4s, v10.s[3]\n"
- "fmla v12.4s, v23.4s, v8.s[1]\n"
"fmla v13.4s, v23.4s, v8.s[2]\n"
+ "fmla v12.4s, v23.4s, v8.s[1]\n"
"fmla v14.4s, v23.4s, v8.s[3]\n"
"fmla v15.4s, v23.4s, v9.s[0]\n"
"fmla v16.4s, v23.4s, v10.s[1]\n"
"fmla v17.4s, v23.4s, v10.s[2]\n"
"fmla v18.4s, v23.4s, v10.s[3]\n"
"fmla v19.4s, v23.4s, v11.s[0]\n"
- "fmla v12.4s, v22.4s, v8.s[2]\n"
"fmla v13.4s, v22.4s, v8.s[3]\n"
+ "fmla v12.4s, v22.4s, v8.s[2]\n"
"fmla v14.4s, v22.4s, v9.s[0]\n"
"fmla v15.4s, v22.4s, v9.s[1]\n"
"fmla v16.4s, v22.4s, v10.s[2]\n"
"fmla v17.4s, v22.4s, v10.s[3]\n"
"fmla v18.4s, v22.4s, v11.s[0]\n"
"fmla v19.4s, v22.4s, v11.s[1]\n"
- "fmla v12.4s, v21.4s, v8.s[3]\n"
"fmla v13.4s, v21.4s, v9.s[0]\n"
+ "fmla v12.4s, v21.4s, v8.s[3]\n"
"fmla v14.4s, v21.4s, v9.s[1]\n"
"fmla v15.4s, v21.4s, v9.s[2]\n"
"fmla v16.4s, v21.4s, v10.s[3]\n"
"fmla v17.4s, v21.4s, v11.s[0]\n"
"fmla v18.4s, v21.4s, v11.s[1]\n"
"fmla v19.4s, v21.4s, v11.s[2]\n"
- "fmla v12.4s, v20.4s, v9.s[0]\n"
"fmla v13.4s, v20.4s, v9.s[1]\n"
- "fmin v12.4s, v12.4s, v25.4s\n"
+ "fmla v12.4s, v20.4s, v9.s[0]\n"
"fmla v14.4s, v20.4s, v9.s[2]\n"
"fmla v15.4s, v20.4s, v9.s[3]\n"
- "fmin v13.4s, v13.4s, v25.4s\n"
"fmla v16.4s, v20.4s, v11.s[0]\n"
"fmla v17.4s, v20.4s, v11.s[1]\n"
- "fmin v14.4s, v14.4s, v25.4s\n"
"fmla v18.4s, v20.4s, v11.s[2]\n"
"fmla v19.4s, v20.4s, v11.s[3]\n"
+ "fmin v13.4s, v13.4s, v25.4s\n"
+ "fmin v12.4s, v12.4s, v25.4s\n"
+ "fmin v14.4s, v14.4s, v25.4s\n"
"fmin v15.4s, v15.4s, v25.4s\n"
"fmin v16.4s, v16.4s, v25.4s\n"
"fmin v17.4s, v17.4s, v25.4s\n"
@@ -851,35 +851,35 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmax v18.4s, v18.4s, v26.4s\n"
"fmax v19.4s, v19.4s, v26.4s\n"
"tbz %x[channel_multiplier], #1, 4f\n"
- "add x20, x12, x13\n"
- "add x21, x11, x13\n"
- "st1 { v12.d }[0], [x20]\n"
- "add x20, x10, x13\n"
- "add x24, x9, x13\n"
- "st1 { v13.d }[0], [x21]\n"
- "add x23, x28, x13\n"
- "add x22, x27, x13\n"
- "st1 { v14.d }[0], [x20]\n"
- "add x21, x26, x13\n"
- "add x20, x25, x13\n"
+ "add x21, x13, x14\n"
+ "add x20, x12, x14\n"
+ "add x25, x11, x14\n"
+ "add x24, x10, x14\n"
+ "st1 { v12.d }[0], [x21]\n"
+ "add x23, x9, x14\n"
+ "add x22, x28, x14\n"
+ "st1 { v13.d }[0], [x20]\n"
+ "add x21, x27, x14\n"
+ "add x20, x26, x14\n"
+ "st1 { v14.d }[0], [x25]\n"
"st1 { v15.d }[0], [x24]\n"
+ "add x14, x14, #0x8\n"
"st1 { v16.d }[0], [x23]\n"
- "add x13, x13, #0x8\n"
"st1 { v17.d }[0], [x22]\n"
"st1 { v18.d }[0], [x21]\n"
"st1 { v19.d }[0], [x20]\n"
"tbz %x[channel_multiplier], #0, 5f\n"
- "add x20, x12, x13\n"
- "add x21, x11, x13\n"
- "st1 { v12.s }[2], [x20]\n"
- "add x20, x10, x13\n"
- "add x24, x9, x13\n"
- "st1 { v13.s }[2], [x21]\n"
- "add x23, x28, x13\n"
- "add x22, x27, x13\n"
- "st1 { v14.s }[2], [x20]\n"
- "add x21, x26, x13\n"
- "add x20, x25, x13\n"
+ "add x21, x13, x14\n"
+ "add x20, x12, x14\n"
+ "add x25, x11, x14\n"
+ "add x24, x10, x14\n"
+ "st1 { v12.s }[2], [x21]\n"
+ "add x23, x9, x14\n"
+ "add x22, x28, x14\n"
+ "st1 { v13.s }[2], [x20]\n"
+ "add x21, x27, x14\n"
+ "add x20, x26, x14\n"
+ "st1 { v14.s }[2], [x25]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v17.s }[2], [x22]\n"
@@ -887,17 +887,17 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"st1 { v19.s }[2], [x20]\n"
"b 5f\n"
"4:" // Output channel oddments: Store: Bit 1: Unset
- "add x20, x12, x13\n"
- "add x21, x11, x13\n"
- "st1 { v12.s }[0], [x20]\n"
- "add x20, x10, x13\n"
- "add x24, x9, x13\n"
- "st1 { v13.s }[0], [x21]\n"
- "add x23, x28, x13\n"
- "add x22, x27, x13\n"
- "st1 { v14.s }[0], [x20]\n"
- "add x21, x26, x13\n"
- "add x20, x25, x13\n"
+ "add x21, x13, x14\n"
+ "add x20, x12, x14\n"
+ "add x25, x11, x14\n"
+ "add x24, x10, x14\n"
+ "st1 { v12.s }[0], [x21]\n"
+ "add x23, x9, x14\n"
+ "add x22, x28, x14\n"
+ "st1 { v13.s }[0], [x20]\n"
+ "add x21, x27, x14\n"
+ "add x20, x26, x14\n"
+ "st1 { v14.s }[0], [x25]\n"
"st1 { v15.s }[0], [x24]\n"
"st1 { v16.s }[0], [x23]\n"
"st1 { v17.s }[0], [x22]\n"
@@ -907,7 +907,7 @@ void a64_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"6:" // End
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index cc18dd4bb4..83f3528286 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,22 +58,22 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"2:" // Output channel loop: Load bias: Done
"ldr q10, [%x[weights], #0x0]\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q2, [x21, #0x10]\n"
"mov v16.16b, v31.16b\n"
"mov v17.16b, v31.16b\n"
- "ldr q1, [x20, #0x0]\n"
- "ldr q0, [x20, #0x10]\n"
"mov v18.16b, v31.16b\n"
+ "add %x[weights], %x[weights], #0x10\n"
"mov v19.16b, v31.16b\n"
"mov v20.16b, v31.16b\n"
+ "ldp x21, x20, [x22], #0x10\n"
"mov v21.16b, v31.16b\n"
- "add %x[weights], %x[weights], #0x10\n"
"mov v22.16b, v31.16b\n"
"mov v23.16b, v31.16b\n"
"mov v24.16b, v31.16b\n"
+ "ldr q3, [x21, #0x0]\n"
+ "ldr q2, [x21, #0x10]\n"
+ "ldr q1, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
"mov v25.16b, v31.16b\n"
"mov v26.16b, v31.16b\n"
"mov v27.16b, v31.16b\n"
@@ -98,9 +98,9 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"subs x23, x23, #0x1\n"
"fmla v18.4s, v10.4s, v3.s[2]\n"
"fmla v19.4s, v10.4s, v3.s[3]\n"
- "ldr q3, [x21, #0x0]\n"
"fmla v20.4s, v10.4s, v2.s[0]\n"
"fmla v21.4s, v10.4s, v2.s[1]\n"
+ "ldr q3, [x21, #0x0]\n"
"fmla v22.4s, v10.4s, v2.s[2]\n"
"fmla v23.4s, v10.4s, v2.s[3]\n"
"ldr q2, [x21, #0x10]\n"
@@ -120,9 +120,9 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"fmla v17.4s, v9.4s, v8.s[1]\n"
"fmla v18.4s, v9.4s, v8.s[2]\n"
"fmla v19.4s, v9.4s, v8.s[3]\n"
- "ldr q8, [x21, #0x0]\n"
"fmla v20.4s, v9.4s, v7.s[0]\n"
"fmla v21.4s, v9.4s, v7.s[1]\n"
+ "ldr q8, [x21, #0x0]\n"
"fmla v22.4s, v9.4s, v7.s[2]\n"
"fmla v23.4s, v9.4s, v7.s[3]\n"
"ldr q7, [x21, #0x10]\n"
@@ -168,71 +168,71 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"fmla v31.4s, v10.4s, v0.s[3]\n"
"fmla v16.4s, v9.4s, v8.s[0]\n"
"fmla v17.4s, v9.4s, v8.s[1]\n"
- "fmin v16.4s, v16.4s, v11.4s\n"
"fmla v18.4s, v9.4s, v8.s[2]\n"
"fmla v19.4s, v9.4s, v8.s[3]\n"
- "fmin v17.4s, v17.4s, v11.4s\n"
"fmla v20.4s, v9.4s, v7.s[0]\n"
"fmla v21.4s, v9.4s, v7.s[1]\n"
- "fmin v18.4s, v18.4s, v11.4s\n"
"fmla v22.4s, v9.4s, v7.s[2]\n"
"fmla v23.4s, v9.4s, v7.s[3]\n"
- "fmin v19.4s, v19.4s, v11.4s\n"
"fmla v24.4s, v9.4s, v6.s[0]\n"
"fmla v25.4s, v9.4s, v6.s[1]\n"
- "fmin v20.4s, v20.4s, v11.4s\n"
+ "fmin v16.4s, v16.4s, v11.4s\n"
"fmla v26.4s, v9.4s, v6.s[2]\n"
"fmla v27.4s, v9.4s, v6.s[3]\n"
- "fmin v21.4s, v21.4s, v11.4s\n"
+ "fmin v17.4s, v17.4s, v11.4s\n"
"fmla v28.4s, v9.4s, v5.s[0]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmin v22.4s, v22.4s, v11.4s\n"
+ "fmin v18.4s, v18.4s, v11.4s\n"
"fmla v30.4s, v9.4s, v5.s[2]\n"
"fmla v31.4s, v9.4s, v5.s[3]\n"
+ "fmin v19.4s, v19.4s, v11.4s\n"
+ "fmin v20.4s, v20.4s, v11.4s\n"
+ "fmin v21.4s, v21.4s, v11.4s\n"
+ "fmin v22.4s, v22.4s, v11.4s\n"
"fmin v23.4s, v23.4s, v11.4s\n"
"fmax v16.4s, v16.4s, v12.4s\n"
"fmax v17.4s, v17.4s, v12.4s\n"
- "str q16, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"fmax v18.4s, v18.4s, v12.4s\n"
"fmax v19.4s, v19.4s, v12.4s\n"
- "str q17, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"fmax v20.4s, v20.4s, v12.4s\n"
"fmax v21.4s, v21.4s, v12.4s\n"
- "str q18, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"fmax v22.4s, v22.4s, v12.4s\n"
"fmax v23.4s, v23.4s, v12.4s\n"
- "str q19, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
+ "str q16, [x27, x28]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"fmin v24.4s, v24.4s, v11.4s\n"
"fmin v25.4s, v25.4s, v11.4s\n"
- "str q20, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q17, [x26, x28]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"fmin v26.4s, v26.4s, v11.4s\n"
"fmin v27.4s, v27.4s, v11.4s\n"
- "str q21, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
+ "str q18, [x25, x28]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"fmin v28.4s, v28.4s, v11.4s\n"
"fmin v29.4s, v29.4s, v11.4s\n"
- "str q22, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
+ "str q19, [x24, x28]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"fmin v30.4s, v30.4s, v11.4s\n"
"fmin v31.4s, v31.4s, v11.4s\n"
- "str q23, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
+ "str q20, [x23, x28]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q21, [x22, x28]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"fmax v24.4s, v24.4s, v12.4s\n"
"fmax v25.4s, v25.4s, v12.4s\n"
- "str q24, [x27, x28]\n"
+ "str q22, [x21, x28]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"fmax v26.4s, v26.4s, v12.4s\n"
"fmax v27.4s, v27.4s, v12.4s\n"
- "str q25, [x26, x28]\n"
+ "str q23, [x20, x28]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"fmax v28.4s, v28.4s, v12.4s\n"
"fmax v29.4s, v29.4s, v12.4s\n"
- "str q26, [x25, x28]\n"
"fmax v30.4s, v30.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v12.4s\n"
+ "str q24, [x27, x28]\n"
+ "str q25, [x26, x28]\n"
+ "str q26, [x25, x28]\n"
"str q27, [x24, x28]\n"
"str q28, [x23, x28]\n"
"str q29, [x22, x28]\n"
@@ -246,16 +246,16 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"lsl x28, x10, #0x2\n"
"fmla v18.4s, v10.4s, v3.s[2]\n"
"fmla v19.4s, v10.4s, v3.s[3]\n"
- "ldr q4, [x20, #0x0]\n"
"ldr x27, [%x[outptrs], #0x0]\n"
+ "ldr x26, [%x[outptrs], #0x8]\n"
"fmla v20.4s, v10.4s, v2.s[0]\n"
"fmla v21.4s, v10.4s, v2.s[1]\n"
- "ldr x26, [%x[outptrs], #0x8]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
+ "ldr x24, [%x[outptrs], #0x18]\n"
+ "ldr q4, [x20, #0x0]\n"
"fmla v22.4s, v10.4s, v2.s[2]\n"
"fmla v23.4s, v10.4s, v2.s[3]\n"
"ldr q3, [x20, #0x10]\n"
- "ldr x24, [%x[outptrs], #0x18]\n"
"fmla v24.4s, v10.4s, v1.s[0]\n"
"fmla v25.4s, v10.4s, v1.s[1]\n"
"ldr x23, [%x[outptrs], #0x20]\n"
@@ -290,71 +290,71 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"fmla v31.4s, v9.4s, v5.s[3]\n"
"fmla v16.4s, v1.4s, v4.s[0]\n"
"fmla v17.4s, v1.4s, v4.s[1]\n"
- "fmin v16.4s, v16.4s, v11.4s\n"
"fmla v18.4s, v1.4s, v4.s[2]\n"
"fmla v19.4s, v1.4s, v4.s[3]\n"
- "fmin v17.4s, v17.4s, v11.4s\n"
"fmla v20.4s, v1.4s, v3.s[0]\n"
"fmla v21.4s, v1.4s, v3.s[1]\n"
- "fmin v18.4s, v18.4s, v11.4s\n"
"fmla v22.4s, v1.4s, v3.s[2]\n"
"fmla v23.4s, v1.4s, v3.s[3]\n"
- "fmin v19.4s, v19.4s, v11.4s\n"
"fmla v24.4s, v1.4s, v2.s[0]\n"
"fmla v25.4s, v1.4s, v2.s[1]\n"
- "fmin v20.4s, v20.4s, v11.4s\n"
+ "fmin v16.4s, v16.4s, v11.4s\n"
"fmla v26.4s, v1.4s, v2.s[2]\n"
"fmla v27.4s, v1.4s, v2.s[3]\n"
- "fmin v21.4s, v21.4s, v11.4s\n"
+ "fmin v17.4s, v17.4s, v11.4s\n"
"fmla v28.4s, v1.4s, v0.s[0]\n"
"fmla v29.4s, v1.4s, v0.s[1]\n"
- "fmin v22.4s, v22.4s, v11.4s\n"
+ "fmin v18.4s, v18.4s, v11.4s\n"
"fmla v30.4s, v1.4s, v0.s[2]\n"
"fmla v31.4s, v1.4s, v0.s[3]\n"
+ "fmin v19.4s, v19.4s, v11.4s\n"
+ "fmin v20.4s, v20.4s, v11.4s\n"
+ "fmin v21.4s, v21.4s, v11.4s\n"
+ "fmin v22.4s, v22.4s, v11.4s\n"
"fmin v23.4s, v23.4s, v11.4s\n"
"fmax v16.4s, v16.4s, v12.4s\n"
"fmax v17.4s, v17.4s, v12.4s\n"
- "str q16, [x27, x28]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"fmax v18.4s, v18.4s, v12.4s\n"
"fmax v19.4s, v19.4s, v12.4s\n"
- "str q17, [x26, x28]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"fmax v20.4s, v20.4s, v12.4s\n"
"fmax v21.4s, v21.4s, v12.4s\n"
- "str q18, [x25, x28]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"fmax v22.4s, v22.4s, v12.4s\n"
"fmax v23.4s, v23.4s, v12.4s\n"
- "str q19, [x24, x28]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
+ "str q16, [x27, x28]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"fmin v24.4s, v24.4s, v11.4s\n"
"fmin v25.4s, v25.4s, v11.4s\n"
- "str q20, [x23, x28]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q17, [x26, x28]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"fmin v26.4s, v26.4s, v11.4s\n"
"fmin v27.4s, v27.4s, v11.4s\n"
- "str q21, [x22, x28]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
+ "str q18, [x25, x28]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"fmin v28.4s, v28.4s, v11.4s\n"
"fmin v29.4s, v29.4s, v11.4s\n"
- "str q22, [x21, x28]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
+ "str q19, [x24, x28]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"fmin v30.4s, v30.4s, v11.4s\n"
"fmin v31.4s, v31.4s, v11.4s\n"
- "str q23, [x20, x28]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
+ "str q20, [x23, x28]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q21, [x22, x28]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"fmax v24.4s, v24.4s, v12.4s\n"
"fmax v25.4s, v25.4s, v12.4s\n"
- "str q24, [x27, x28]\n"
+ "str q22, [x21, x28]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"fmax v26.4s, v26.4s, v12.4s\n"
"fmax v27.4s, v27.4s, v12.4s\n"
- "str q25, [x26, x28]\n"
+ "str q23, [x20, x28]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"fmax v28.4s, v28.4s, v12.4s\n"
"fmax v29.4s, v29.4s, v12.4s\n"
- "str q26, [x25, x28]\n"
"fmax v30.4s, v30.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v12.4s\n"
+ "str q24, [x27, x28]\n"
+ "str q25, [x26, x28]\n"
+ "str q26, [x25, x28]\n"
"str q27, [x24, x28]\n"
"str q28, [x23, x28]\n"
"str q29, [x22, x28]\n"
@@ -364,80 +364,80 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"6:" // Output channel loop: Single kernel point
"fmla v16.4s, v10.4s, v3.s[0]\n"
"fmla v17.4s, v10.4s, v3.s[1]\n"
- "fmin v16.4s, v16.4s, v11.4s\n"
"lsl x28, x10, #0x2\n"
+ "ldr x27, [%x[outptrs], #0x0]\n"
"fmla v18.4s, v10.4s, v3.s[2]\n"
"fmla v19.4s, v10.4s, v3.s[3]\n"
- "fmin v17.4s, v17.4s, v11.4s\n"
- "ldr x27, [%x[outptrs], #0x0]\n"
+ "ldr x26, [%x[outptrs], #0x8]\n"
+ "ldr x25, [%x[outptrs], #0x10]\n"
"fmla v20.4s, v10.4s, v2.s[0]\n"
"fmla v21.4s, v10.4s, v2.s[1]\n"
- "fmin v18.4s, v18.4s, v11.4s\n"
- "ldr x26, [%x[outptrs], #0x8]\n"
+ "ldr x24, [%x[outptrs], #0x18]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
"fmla v22.4s, v10.4s, v2.s[2]\n"
"fmla v23.4s, v10.4s, v2.s[3]\n"
- "fmin v19.4s, v19.4s, v11.4s\n"
- "ldr x25, [%x[outptrs], #0x10]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
"fmla v24.4s, v10.4s, v1.s[0]\n"
"fmla v25.4s, v10.4s, v1.s[1]\n"
- "fmin v20.4s, v20.4s, v11.4s\n"
- "ldr x24, [%x[outptrs], #0x18]\n"
+ "fmin v16.4s, v16.4s, v11.4s\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
"fmla v26.4s, v10.4s, v1.s[2]\n"
"fmla v27.4s, v10.4s, v1.s[3]\n"
- "fmin v21.4s, v21.4s, v11.4s\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
+ "fmin v17.4s, v17.4s, v11.4s\n"
"fmla v28.4s, v10.4s, v0.s[0]\n"
"fmla v29.4s, v10.4s, v0.s[1]\n"
- "fmin v22.4s, v22.4s, v11.4s\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
+ "fmin v18.4s, v18.4s, v11.4s\n"
"fmla v30.4s, v10.4s, v0.s[2]\n"
"fmla v31.4s, v10.4s, v0.s[3]\n"
+ "fmin v19.4s, v19.4s, v11.4s\n"
+ "fmin v20.4s, v20.4s, v11.4s\n"
+ "fmin v21.4s, v21.4s, v11.4s\n"
+ "fmin v22.4s, v22.4s, v11.4s\n"
"fmin v23.4s, v23.4s, v11.4s\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"fmax v16.4s, v16.4s, v12.4s\n"
"fmax v17.4s, v17.4s, v12.4s\n"
- "str q16, [x27, x28]\n"
"fmax v18.4s, v18.4s, v12.4s\n"
"fmax v19.4s, v19.4s, v12.4s\n"
- "str q17, [x26, x28]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"fmax v20.4s, v20.4s, v12.4s\n"
"fmax v21.4s, v21.4s, v12.4s\n"
- "str q18, [x25, x28]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"fmax v22.4s, v22.4s, v12.4s\n"
"fmax v23.4s, v23.4s, v12.4s\n"
- "str q19, [x24, x28]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
+ "str q16, [x27, x28]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"fmin v24.4s, v24.4s, v11.4s\n"
"fmin v25.4s, v25.4s, v11.4s\n"
- "str q20, [x23, x28]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
+ "str q17, [x26, x28]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"fmin v26.4s, v26.4s, v11.4s\n"
"fmin v27.4s, v27.4s, v11.4s\n"
- "str q21, [x22, x28]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q18, [x25, x28]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"fmin v28.4s, v28.4s, v11.4s\n"
"fmin v29.4s, v29.4s, v11.4s\n"
- "str q22, [x21, x28]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
+ "str q19, [x24, x28]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"fmin v30.4s, v30.4s, v11.4s\n"
"fmin v31.4s, v31.4s, v11.4s\n"
- "str q23, [x20, x28]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
+ "str q20, [x23, x28]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str q21, [x22, x28]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"fmax v24.4s, v24.4s, v12.4s\n"
"fmax v25.4s, v25.4s, v12.4s\n"
- "str q24, [x27, x28]\n"
+ "str q22, [x21, x28]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"fmax v26.4s, v26.4s, v12.4s\n"
"fmax v27.4s, v27.4s, v12.4s\n"
- "str q25, [x26, x28]\n"
+ "str q23, [x20, x28]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"fmax v28.4s, v28.4s, v12.4s\n"
"fmax v29.4s, v29.4s, v12.4s\n"
- "str q26, [x25, x28]\n"
"fmax v30.4s, v30.4s, v12.4s\n"
"fmax v31.4s, v31.4s, v12.4s\n"
+ "str q24, [x27, x28]\n"
+ "str q25, [x26, x28]\n"
+ "str q26, [x25, x28]\n"
"str q27, [x24, x28]\n"
"str q28, [x23, x28]\n"
"str q29, [x22, x28]\n"
@@ -464,22 +464,22 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"11:" // Output channel oddments: Load bias: Done
"ldr q10, [%x[weights], #0x0]\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
- "ldr q3, [x21, #0x0]\n"
- "ldr q2, [x21, #0x10]\n"
"mov v16.16b, v31.16b\n"
"mov v17.16b, v31.16b\n"
- "ldr q1, [x20, #0x0]\n"
- "ldr q0, [x20, #0x10]\n"
"mov v18.16b, v31.16b\n"
+ "add %x[weights], %x[weights], #0x10\n"
"mov v19.16b, v31.16b\n"
"mov v20.16b, v31.16b\n"
+ "ldp x21, x20, [x22], #0x10\n"
"mov v21.16b, v31.16b\n"
- "add %x[weights], %x[weights], #0x10\n"
"mov v22.16b, v31.16b\n"
"mov v23.16b, v31.16b\n"
"mov v24.16b, v31.16b\n"
+ "ldr q3, [x21, #0x0]\n"
+ "ldr q2, [x21, #0x10]\n"
+ "ldr q1, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
"mov v25.16b, v31.16b\n"
"mov v26.16b, v31.16b\n"
"mov v27.16b, v31.16b\n"
@@ -504,9 +504,9 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"subs x23, x23, #0x1\n"
"fmla v18.4s, v10.4s, v3.s[2]\n"
"fmla v19.4s, v10.4s, v3.s[3]\n"
- "ldr q3, [x21, #0x0]\n"
"fmla v20.4s, v10.4s, v2.s[0]\n"
"fmla v21.4s, v10.4s, v2.s[1]\n"
+ "ldr q3, [x21, #0x0]\n"
"fmla v22.4s, v10.4s, v2.s[2]\n"
"fmla v23.4s, v10.4s, v2.s[3]\n"
"ldr q2, [x21, #0x10]\n"
@@ -526,9 +526,9 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"fmla v17.4s, v9.4s, v8.s[1]\n"
"fmla v18.4s, v9.4s, v8.s[2]\n"
"fmla v19.4s, v9.4s, v8.s[3]\n"
- "ldr q8, [x21, #0x0]\n"
"fmla v20.4s, v9.4s, v7.s[0]\n"
"fmla v21.4s, v9.4s, v7.s[1]\n"
+ "ldr q8, [x21, #0x0]\n"
"fmla v22.4s, v9.4s, v7.s[2]\n"
"fmla v23.4s, v9.4s, v7.s[3]\n"
"ldr q7, [x21, #0x10]\n"
@@ -586,9 +586,9 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"ldp x21, x20, [x22], #0x10\n"
"fmla v18.4s, v10.4s, v3.s[2]\n"
"fmla v19.4s, v10.4s, v3.s[3]\n"
- "ldr q4, [x21, #0x0]\n"
"fmla v20.4s, v10.4s, v2.s[0]\n"
"fmla v21.4s, v10.4s, v2.s[1]\n"
+ "ldr q4, [x21, #0x0]\n"
"fmla v22.4s, v10.4s, v2.s[2]\n"
"fmla v23.4s, v10.4s, v2.s[3]\n"
"ldr q3, [x21, #0x10]\n"
@@ -690,47 +690,47 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #1, 17f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #2\n"
- "add x26, x26, x10, LSL #2\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #2\n"
- "add x24, x24, x10, LSL #2\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #2\n"
- "add x22, x22, x10, LSL #2\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #2\n"
- "add x20, x20, x10, LSL #2\n"
+ "add x27, x27, x10, LSL #2\n"
+ "add x26, x26, x10, LSL #2\n"
+ "add x25, x25, x10, LSL #2\n"
+ "add x24, x24, x10, LSL #2\n"
"st1 { v16.d }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #2\n"
+ "add x23, x23, x10, LSL #2\n"
+ "add x22, x22, x10, LSL #2\n"
"st1 { v17.d }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #2\n"
+ "add x21, x21, x10, LSL #2\n"
+ "add x20, x20, x10, LSL #2\n"
"st1 { v18.d }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #2\n"
"st1 { v19.d }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #2\n"
+ "add x27, x27, x10, LSL #2\n"
"st1 { v20.d }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #2\n"
+ "add x26, x26, x10, LSL #2\n"
"st1 { v21.d }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #2\n"
+ "add x25, x25, x10, LSL #2\n"
"st1 { v22.d }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #2\n"
+ "add x24, x24, x10, LSL #2\n"
"st1 { v23.d }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #2\n"
- "add x10, x10, #0x2\n"
+ "add x23, x23, x10, LSL #2\n"
+ "add x22, x22, x10, LSL #2\n"
"st1 { v24.d }[0], [x27]\n"
+ "add x21, x21, x10, LSL #2\n"
"st1 { v25.d }[0], [x26]\n"
+ "add x20, x20, x10, LSL #2\n"
+ "add x10, x10, #0x2\n"
"st1 { v26.d }[0], [x25]\n"
"st1 { v27.d }[0], [x24]\n"
"st1 { v28.d }[0], [x23]\n"
@@ -740,46 +740,46 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"tbz %x[n_output_channels], #0, 18f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #2\n"
- "add x26, x26, x10, LSL #2\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #2\n"
- "add x24, x24, x10, LSL #2\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #2\n"
- "add x22, x22, x10, LSL #2\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #2\n"
- "add x20, x20, x10, LSL #2\n"
+ "add x27, x27, x10, LSL #2\n"
+ "add x26, x26, x10, LSL #2\n"
+ "add x25, x25, x10, LSL #2\n"
+ "add x24, x24, x10, LSL #2\n"
"st1 { v16.s }[2], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #2\n"
+ "add x23, x23, x10, LSL #2\n"
+ "add x22, x22, x10, LSL #2\n"
"st1 { v17.s }[2], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #2\n"
+ "add x21, x21, x10, LSL #2\n"
+ "add x20, x20, x10, LSL #2\n"
"st1 { v18.s }[2], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #2\n"
"st1 { v19.s }[2], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #2\n"
+ "add x27, x27, x10, LSL #2\n"
"st1 { v20.s }[2], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #2\n"
+ "add x26, x26, x10, LSL #2\n"
"st1 { v21.s }[2], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #2\n"
+ "add x25, x25, x10, LSL #2\n"
"st1 { v22.s }[2], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #2\n"
+ "add x24, x24, x10, LSL #2\n"
"st1 { v23.s }[2], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #2\n"
+ "add x23, x23, x10, LSL #2\n"
+ "add x22, x22, x10, LSL #2\n"
"st1 { v24.s }[2], [x27]\n"
+ "add x21, x21, x10, LSL #2\n"
"st1 { v25.s }[2], [x26]\n"
+ "add x20, x20, x10, LSL #2\n"
"st1 { v26.s }[2], [x25]\n"
"st1 { v27.s }[2], [x24]\n"
"st1 { v28.s }[2], [x23]\n"
@@ -790,46 +790,46 @@ void a64_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"17:" // Output channel oddments: Done: Store: Bit 1: Unset
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x10, LSL #2\n"
- "add x26, x26, x10, LSL #2\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x10, LSL #2\n"
- "add x24, x24, x10, LSL #2\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x10, LSL #2\n"
- "add x22, x22, x10, LSL #2\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x10, LSL #2\n"
- "add x20, x20, x10, LSL #2\n"
+ "add x27, x27, x10, LSL #2\n"
+ "add x26, x26, x10, LSL #2\n"
+ "add x25, x25, x10, LSL #2\n"
+ "add x24, x24, x10, LSL #2\n"
"st1 { v16.s }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x10, LSL #2\n"
+ "add x23, x23, x10, LSL #2\n"
+ "add x22, x22, x10, LSL #2\n"
"st1 { v17.s }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x10, LSL #2\n"
+ "add x21, x21, x10, LSL #2\n"
+ "add x20, x20, x10, LSL #2\n"
"st1 { v18.s }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x10, LSL #2\n"
"st1 { v19.s }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x10, LSL #2\n"
+ "add x27, x27, x10, LSL #2\n"
"st1 { v20.s }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x10, LSL #2\n"
+ "add x26, x26, x10, LSL #2\n"
"st1 { v21.s }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x10, LSL #2\n"
+ "add x25, x25, x10, LSL #2\n"
"st1 { v22.s }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x10, LSL #2\n"
+ "add x24, x24, x10, LSL #2\n"
"st1 { v23.s }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x10, LSL #2\n"
+ "add x23, x23, x10, LSL #2\n"
+ "add x22, x22, x10, LSL #2\n"
"st1 { v24.s }[0], [x27]\n"
+ "add x21, x21, x10, LSL #2\n"
"st1 { v25.s }[0], [x26]\n"
+ "add x20, x20, x10, LSL #2\n"
"st1 { v26.s }[0], [x25]\n"
"st1 { v27.s }[0], [x24]\n"
"st1 { v28.s }[0], [x23]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 916c8a4afe..8af5e63a4b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,1622 +33,1622 @@ namespace depthwise {
void a64_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const unsigned int n_channels, const int8_t *const *const inptrs, const int8_t *params, const int32_t *, const arm_gemm::Requantize32& qp, const int32_t *, const int32_t *, int8_t *const *const outptrs)
{
__asm__ __volatile__(
- "mov x20, #0x1\n"
- "orr x20, x20, #0x100\n"
+ "mov x17, #0x1\n"
+ "lsr x16, %x[n_channels], #0x4\n"
"ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "orr x20, x20, #0x10000\n"
- "lsr x11, %x[n_channels], #0x4\n"
- "dup v12.4s, w20\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
+ "ldp x27, x26, [%x[inptrs], #0x10]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_minval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ldp x25, x24, [%x[inptrs], #0x20]\n"
+ "ldp x23, x22, [%x[inptrs], #0x30]\n"
+ "ld1r { v7.4s }, [x21]\n"
"ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
- "mov x28, #0x0\n"
- "mov x27, #0x0\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "ldp x25, x24, [%x[outptrs], #0x0]\n"
- "ldp x23, x22, [%x[outptrs], #0x10]\n"
- "cbz x11, 3f\n"
- "ldr q15, [x15, x28]\n"
- "ldr q28, [x14, x28]\n"
- "subs x11, x11, #0x1\n"
- "ldr q30, [x13, x28]\n"
- "ldr q8, [x12, x28]\n"
- "zip2 v19.16b, v15.16b, v30.16b\n"
- "zip1 v15.16b, v15.16b, v30.16b\n"
- "ldr q26, [x10, x28]\n"
- "ldr q0, [x9, x28]\n"
- "zip1 v7.16b, v28.16b, v8.16b\n"
- "zip2 v8.16b, v28.16b, v8.16b\n"
- "ldr q29, [x26, x28]\n"
- "ldr q10, [x21, x28]\n"
- "zip2 v25.16b, v15.16b, v7.16b\n"
- "zip1 v15.16b, v15.16b, v7.16b\n"
- "ldr q1, [%x[params], #0x10]\n"
- "ldr q6, [%x[params], #0x20]\n"
- "zip1 v7.16b, v19.16b, v8.16b\n"
- "zip2 v8.16b, v19.16b, v8.16b\n"
- "ldr q31, [%x[params], #0x0]\n"
- "ldr q20, [%x[params], #0x30]\n"
- "zip2 v21.16b, v26.16b, v29.16b\n"
- "zip1 v26.16b, v26.16b, v29.16b\n"
- "ldp x21, x20, [%x[inptrs], #0x40]\n"
- "ldr q22, [x21, x28]\n"
- "zip1 v27.16b, v0.16b, v10.16b\n"
- "zip2 v10.16b, v0.16b, v10.16b\n"
- "ldr q17, [x20, x28]\n"
- "ldp x21, x20, [%x[inptrs], #0x50]\n"
- "zip2 v23.16b, v26.16b, v27.16b\n"
- "zip1 v26.16b, v26.16b, v27.16b\n"
- "ldr q9, [x21, x28]\n"
- "ldr q5, [x20, x28]\n"
- "zip2 v28.16b, v22.16b, v9.16b\n"
- "zip1 v22.16b, v22.16b, v9.16b\n"
- "ldp x21, x20, [%x[inptrs], #0x60]\n"
- "ldr q27, [x21, x28]\n"
- "zip1 v24.16b, v17.16b, v5.16b\n"
- "zip2 v5.16b, v17.16b, v5.16b\n"
- "ldr q18, [x20, x28]\n"
+ "ld1r { v24.4s }, [x21]\n"
+ "ld1r { v12.4s }, [x20]\n"
+ "orr x17, x17, #0x100\n"
+ "mov x13, #0x0\n"
+ "mov x12, #0x0\n"
+ "ldp x11, x10, [%x[outptrs], #0x0]\n"
+ "ldp x9, x28, [%x[outptrs], #0x10]\n"
+ "orr x17, x17, #0x10000\n"
+ "dup v15.4s, w17\n"
+ "cbz x16, 3f\n"
+ "ldr q13, [x15, x13]\n"
+ "ldr q5, [x14, x13]\n"
+ "subs x16, x16, #0x1\n"
+ "ldr q27, [x27, x13]\n"
+ "ldr q9, [x26, x13]\n"
+ "ldr q1, [x25, x13]\n"
+ "ldr q28, [x24, x13]\n"
+ "ldr q26, [x23, x13]\n"
+ "ldr q4, [x22, x13]\n"
+ "ldr q30, [%x[params], #0x10]\n"
+ "ldr q8, [%x[params], #0x20]\n"
+ "zip2 v19.16b, v13.16b, v27.16b\n"
+ "zip1 v13.16b, v13.16b, v27.16b\n"
+ "ldr q17, [%x[params], #0x30]\n"
+ "ldp x27, x26, [%x[inptrs], #0x40]\n"
+ "zip1 v3.16b, v5.16b, v9.16b\n"
+ "zip2 v9.16b, v5.16b, v9.16b\n"
+ "ldp x25, x24, [%x[inptrs], #0x50]\n"
+ "ldp x23, x22, [%x[inptrs], #0x60]\n"
+ "zip2 v18.16b, v1.16b, v26.16b\n"
+ "zip1 v1.16b, v1.16b, v26.16b\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "zip1 v3.16b, v21.16b, v10.16b\n"
- "zip2 v10.16b, v21.16b, v10.16b\n"
- "ldr q4, [x21, x28]\n"
- "ldr q9, [x20, x28]\n"
- "zip2 v17.16b, v27.16b, v4.16b\n"
- "zip1 v27.16b, v27.16b, v4.16b\n"
- "zip1 v4.16b, v18.16b, v9.16b\n"
- "zip2 v9.16b, v18.16b, v9.16b\n"
+ "zip1 v16.16b, v28.16b, v4.16b\n"
+ "zip2 v4.16b, v28.16b, v4.16b\n"
+ "ldr q10, [x27, x13]\n"
+ "ldr q14, [x26, x13]\n"
+ "zip2 v2.16b, v13.16b, v3.16b\n"
+ "zip1 v13.16b, v13.16b, v3.16b\n"
"ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "zip2 v19.16b, v22.16b, v24.16b\n"
- "zip1 v22.16b, v22.16b, v24.16b\n"
- "zip1 v0.16b, v28.16b, v5.16b\n"
- "zip2 v5.16b, v28.16b, v5.16b\n"
+ "ldr q3, [x25, x13]\n"
+ "ldr q6, [x24, x13]\n"
+ "zip1 v0.16b, v19.16b, v9.16b\n"
+ "zip2 v9.16b, v19.16b, v9.16b\n"
+ "ldr q5, [x23, x13]\n"
+ "ldr q20, [x22, x13]\n"
+ "zip2 v21.16b, v1.16b, v16.16b\n"
+ "zip1 v1.16b, v1.16b, v16.16b\n"
+ "ldr q16, [x21, x13]\n"
+ "ldr q25, [x20, x13]\n"
+ "zip1 v28.16b, v18.16b, v4.16b\n"
+ "zip2 v4.16b, v18.16b, v4.16b\n"
+ "ldr q31, [%x[params], #0x0]\n"
+ "zip2 v19.16b, v10.16b, v3.16b\n"
+ "zip1 v10.16b, v10.16b, v3.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x10]\n"
+ "zip1 v18.16b, v14.16b, v6.16b\n"
+ "zip2 v6.16b, v14.16b, v6.16b\n"
+ "ldp x25, x24, [%x[inptrs], #0x20]\n"
+ "ldp x23, x22, [%x[inptrs], #0x30]\n"
+ "zip2 v23.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
"add %x[params], %x[params], #0x40\n"
- "zip2 v24.16b, v27.16b, v4.16b\n"
- "zip1 v27.16b, v27.16b, v4.16b\n"
- "zip1 v2.16b, v17.16b, v9.16b\n"
- "zip2 v9.16b, v17.16b, v9.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
+ "zip1 v16.16b, v20.16b, v25.16b\n"
+ "zip2 v25.16b, v20.16b, v25.16b\n"
+ "zip2 v29.16b, v10.16b, v18.16b\n"
+ "zip1 v10.16b, v10.16b, v18.16b\n"
+ "zip1 v27.16b, v19.16b, v6.16b\n"
+ "zip2 v6.16b, v19.16b, v6.16b\n"
+ "zip2 v18.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
+ "zip1 v14.16b, v23.16b, v25.16b\n"
+ "zip2 v25.16b, v23.16b, v25.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
"beq 2f\n"
"1:" // Loop
- "movi v21.4s, #0x0\n"
- ".inst 0x4e9a9595 // sdot v21.4s, v12.16b, v26.16b\n"
- ".inst 0x4e8f943f // sdot v31.4s, v1.16b, v15.16b\n"
- "add x28, x28, #0x10\n"
- ".inst 0x4e969595 // sdot v21.4s, v12.16b, v22.16b\n"
- ".inst 0x4e9a943d // sdot v29.4s, v1.16b, v26.16b\n"
- "movi v18.4s, #0x0\n"
- "subs x11, x11, #0x1\n"
- ".inst 0x4e9a94df // sdot v31.4s, v6.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "mov v17.16b, v21.16b\n .inst 0x4e9b9591 // sdot v17.4s, v12.16b, v27.16b\n"
- ".inst 0x4e8f9595 // sdot v21.4s, v12.16b, v15.16b\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- ".inst 0x4e9a9592 // sdot v18.4s, v12.16b, v26.16b\n"
- ".inst 0x4e9694dd // sdot v29.4s, v6.16b, v22.16b\n"
- ".inst 0x4e96969f // sdot v31.4s, v20.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e8f943e // sdot v30.4s, v1.16b, v15.16b\n"
- ".inst 0x4e9a943c // sdot v28.4s, v1.16b, v26.16b\n"
- "mls v31.4s, v21.4s, v16.4s\n"
- ".inst 0x4e969592 // sdot v18.4s, v12.16b, v22.16b\n"
- ".inst 0x4e9b969d // sdot v29.4s, v20.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e9a94de // sdot v30.4s, v6.16b, v26.16b\n"
- "ldr q26, [%x[params], #0x10]\n"
- ".inst 0x4e9694dc // sdot v28.4s, v6.16b, v22.16b\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mov v21.16b, v18.16b\n .inst 0x4e9b9595 // sdot v21.4s, v12.16b, v27.16b\n"
- ".inst 0x4e8f9592 // sdot v18.4s, v12.16b, v15.16b\n"
- "ldr q17, [%x[params], #0x0]\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e96969e // sdot v30.4s, v20.16b, v22.16b\n"
- ".inst 0x4e9b969c // sdot v28.4s, v20.16b, v27.16b\n"
- "mls v30.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v21.4s, v16.4s\n"
- "and v15.16b, v31.16b, v26.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v17.4s\n"
- "sqrdmulh v29.4s, v29.4s, v17.4s\n"
- "sqrdmulh v28.4s, v28.4s, v17.4s\n"
- "ldr q1, [%x[params], #0x60]\n"
- "sqadd v31.4s, v31.4s, v15.4s\n"
- "and v18.16b, v30.16b, v26.16b\n"
- "and v21.16b, v29.16b, v26.16b\n"
- "and v17.16b, v28.16b, v26.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x4e8d97df // sdot v31.4s, v30.16b, v13.16b\n"
+ ".inst 0x4e8197c3 // sdot v3.4s, v30.16b, v1.16b\n"
+ "add x13, x13, #0x10\n"
+ "movi v22.4s, #0x0\n"
+ "subs x16, x16, #0x1\n"
+ ".inst 0x4e8195f3 // sdot v19.4s, v15.16b, v1.16b\n"
+ ".inst 0x4e81951f // sdot v31.4s, v8.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ ".inst 0x4e8a9503 // sdot v3.4s, v8.16b, v10.16b\n"
+ ".inst 0x4e8a95f3 // sdot v19.4s, v15.16b, v10.16b\n"
+ ".inst 0x4e8195f6 // sdot v22.4s, v15.16b, v1.16b\n"
+ ".inst 0x4e8a963f // sdot v31.4s, v17.16b, v10.16b\n"
+ "ext v10.16b, v10.16b, v10.16b, #0x1\n"
+ ".inst 0x4e8197d7 // sdot v23.4s, v30.16b, v1.16b\n"
+ "mov v16.16b, v19.16b\n .inst 0x4e8595f0 // sdot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x4e8d95f3 // sdot v19.4s, v15.16b, v13.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x4e859623 // sdot v3.4s, v17.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x4e8a95f6 // sdot v22.4s, v15.16b, v10.16b\n"
+ ".inst 0x4e8d97da // sdot v26.4s, v30.16b, v13.16b\n"
+ ".inst 0x4e8a9517 // sdot v23.4s, v8.16b, v10.16b\n"
+ "mls v31.4s, v19.4s, v24.4s\n"
+ "movi v19.4s, #0x0\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x4e81951a // sdot v26.4s, v8.16b, v1.16b\n"
+ "ldr q8, [%x[params], #0x10]\n"
+ "mov v16.16b, v22.16b\n .inst 0x4e8595f0 // sdot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x4e8d95f6 // sdot v22.4s, v15.16b, v13.16b\n"
+ "ldr q1, [%x[params], #0x0]\n"
+ ".inst 0x4e9595f3 // sdot v19.4s, v15.16b, v21.16b\n"
+ ".inst 0x4e859637 // sdot v23.4s, v17.16b, v5.16b\n"
+ ".inst 0x4e8a963a // sdot v26.4s, v17.16b, v10.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v1.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v1.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v8.16b\n"
+ ".inst 0x4e9d95f3 // sdot v19.4s, v15.16b, v29.16b\n"
+ "mls v26.4s, v22.4s, v24.4s\n"
+ "movi v20.4s, #0x0\n"
+ "sqrdmulh v23.4s, v23.4s, v1.4s\n"
+ "and v30.16b, v3.16b, v8.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v1.4s\n"
+ "ldr q10, [%x[params], #0x60]\n"
+ "mov v22.16b, v19.16b\n .inst 0x4e9295f6 // sdot v22.4s, v15.16b, v18.16b\n"
+ ".inst 0x4e8295f3 // sdot v19.4s, v15.16b, v2.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v8.16b\n"
+ "and v16.16b, v26.16b, v8.16b\n"
+ "sqadd v3.4s, v3.4s, v30.4s\n"
+ "ldr q5, [%x[params], #0x50]\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v26.4s\n"
- "sqadd v30.4s, v30.4s, v18.4s\n"
- "ldr q18, [%x[params], #0x40]\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "ldr q27, [%x[params], #0x50]\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "ldr q15, [%x[params], #0x30]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v30.4s, v30.4s, v26.4s\n"
- "srshl v29.4s, v29.4s, v26.4s\n"
- "srshl v28.4s, v28.4s, v26.4s\n"
- "ldr q20, [%x[params], #0x70]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
+ "srshl v31.4s, v31.4s, v8.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v8.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x30]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "ldr q30, [%x[params], #0x40]\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "srshl v23.4s, v23.4s, v8.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v8.4s\n"
+ "ldr q1, [%x[params], #0x70]\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x4e979596 // sdot v22.4s, v12.16b, v23.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s31, [x25, x27]\n"
- "ldr q26, [%x[params], #0x20]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- ".inst 0x4e939596 // sdot v22.4s, v12.16b, v19.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s30, [x24, x27]\n"
- "mov v6.16b, v22.16b\n .inst 0x4e989586 // sdot v6.4s, v12.16b, v24.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s29, [x23, x27]\n"
- "mov v30.16b, v26.16b\n"
- ".inst 0x4e999596 // sdot v22.4s, v12.16b, v25.16b\n"
- "str s28, [x22, x27]\n"
- "mov v29.16b, v26.16b\n"
- "mov v21.16b, v26.16b\n"
- ".inst 0x4e9995fa // sdot v26.4s, v15.16b, v25.16b\n"
- ".inst 0x4e9795fd // sdot v29.4s, v15.16b, v23.16b\n"
- ".inst 0x4e97965a // sdot v26.4s, v18.16b, v23.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- "movi v28.4s, #0x0\n"
- ".inst 0x4e9995fe // sdot v30.4s, v15.16b, v25.16b\n"
- ".inst 0x4e9795f5 // sdot v21.4s, v15.16b, v23.16b\n"
- ".inst 0x4e97959c // sdot v28.4s, v12.16b, v23.16b\n"
- ".inst 0x4e93965d // sdot v29.4s, v18.16b, v19.16b\n"
- ".inst 0x4e93977a // sdot v26.4s, v27.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x4e97965e // sdot v30.4s, v18.16b, v23.16b\n"
- "ldr q4, [x9, x28]\n"
- ".inst 0x4e939655 // sdot v21.4s, v18.16b, v19.16b\n"
- "mls v26.4s, v22.4s, v16.4s\n"
- ".inst 0x4e93959c // sdot v28.4s, v12.16b, v19.16b\n"
- ".inst 0x4e98977d // sdot v29.4s, v27.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e93977e // sdot v30.4s, v27.16b, v19.16b\n"
- ".inst 0x4e989775 // sdot v21.4s, v27.16b, v24.16b\n"
- "sqrdmulh v26.4s, v26.4s, v1.4s\n"
- "mov v17.16b, v28.16b\n .inst 0x4e989591 // sdot v17.4s, v12.16b, v24.16b\n"
- ".inst 0x4e99959c // sdot v28.4s, v12.16b, v25.16b\n"
- "ldr q31, [x14, x28]\n"
- "mls v30.4s, v28.4s, v16.4s\n"
- "mls v29.4s, v6.4s, v16.4s\n"
- "mls v21.4s, v17.4s, v16.4s\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v1.4s\n"
- "sqrdmulh v29.4s, v29.4s, v1.4s\n"
- "sqrdmulh v21.4s, v21.4s, v1.4s\n"
- "ldr q27, [%x[params], #0xc0]\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "and v18.16b, v30.16b, v20.16b\n"
- "and v6.16b, v29.16b, v20.16b\n"
- "and v17.16b, v21.16b, v20.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "sqadd v30.4s, v30.4s, v18.4s\n"
- "ldr q28, [%x[params], #0xa0]\n"
- "sqadd v29.4s, v29.4s, v6.4s\n"
- "ldr q24, [%x[params], #0xb0]\n"
- "sqadd v21.4s, v21.4s, v17.4s\n"
- "ldr q15, [%x[params], #0x90]\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v21.4s, v21.4s, v20.4s\n"
- "ldr q1, [%x[params], #0xd0]\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
"smin v26.4s, v26.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s31, [x11, x12]\n"
+ "ldr q31, [%x[params], #0x20]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x4e839596 // sdot v22.4s, v12.16b, v3.16b\n"
- ".inst 0x4e809596 // sdot v22.4s, v12.16b, v0.16b\n"
+ "str s3, [x9, x12]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x27]\n"
- "ldr q26, [%x[params], #0x80]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "mov v18.16b, v22.16b\n .inst 0x4e829592 // sdot v18.4s, v12.16b, v2.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s30, [x24, x27]\n"
- ".inst 0x4e879596 // sdot v22.4s, v12.16b, v7.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s29, [x23, x27]\n"
- "mov v6.16b, v26.16b\n"
- "str s21, [x22, x27]\n"
- "mov v25.16b, v26.16b\n"
- "mov v20.16b, v26.16b\n"
- ".inst 0x4e8795fa // sdot v26.4s, v15.16b, v7.16b\n"
- ".inst 0x4e8395f9 // sdot v25.4s, v15.16b, v3.16b\n"
- ".inst 0x4e83979a // sdot v26.4s, v28.16b, v3.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x4e8795e6 // sdot v6.4s, v15.16b, v7.16b\n"
- ".inst 0x4e8395f4 // sdot v20.4s, v15.16b, v3.16b\n"
- ".inst 0x4e839597 // sdot v23.4s, v12.16b, v3.16b\n"
- ".inst 0x4e809799 // sdot v25.4s, v28.16b, v0.16b\n"
- ".inst 0x4e80971a // sdot v26.4s, v24.16b, v0.16b\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e839786 // sdot v6.4s, v28.16b, v3.16b\n"
- "ldr q19, [x26, x28]\n"
- ".inst 0x4e809794 // sdot v20.4s, v28.16b, v0.16b\n"
- "mls v26.4s, v22.4s, v16.4s\n"
- ".inst 0x4e809597 // sdot v23.4s, v12.16b, v0.16b\n"
- ".inst 0x4e829719 // sdot v25.4s, v24.16b, v2.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "mov v8.16b, v31.16b\n"
+ "str s26, [x10, x12]\n"
+ "mov v16.16b, v31.16b\n"
+ "str s23, [x28, x12]\n"
+ "mov v26.16b, v31.16b\n"
+ ".inst 0x4e82963f // sdot v31.4s, v17.16b, v2.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e959628 // sdot v8.4s, v17.16b, v21.16b\n"
"ext v2.16b, v2.16b, v2.16b, #0x1\n"
- ".inst 0x4e809706 // sdot v6.4s, v24.16b, v0.16b\n"
- ".inst 0x4e829714 // sdot v20.4s, v24.16b, v2.16b\n"
- "sqrdmulh v26.4s, v26.4s, v27.4s\n"
- "mov v17.16b, v23.16b\n .inst 0x4e829591 // sdot v17.4s, v12.16b, v2.16b\n"
- ".inst 0x4e879597 // sdot v23.4s, v12.16b, v7.16b\n"
- "ldr q21, [x13, x28]\n"
- "mls v6.4s, v23.4s, v16.4s\n"
- "mls v25.4s, v18.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v26.16b, v1.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v6.4s, v6.4s, v27.4s\n"
- "sqrdmulh v25.4s, v25.4s, v27.4s\n"
- "sqrdmulh v20.4s, v20.4s, v27.4s\n"
- "ldr q15, [%x[params], #0x120]\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "and v18.16b, v6.16b, v1.16b\n"
- "and v22.16b, v25.16b, v1.16b\n"
- "and v17.16b, v20.16b, v1.16b\n"
+ ".inst 0x4e9597df // sdot v31.4s, v30.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x4e829630 // sdot v16.4s, v17.16b, v2.16b\n"
+ ".inst 0x4e95963a // sdot v26.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e9595f4 // sdot v20.4s, v15.16b, v21.16b\n"
+ ".inst 0x4e9d97c8 // sdot v8.4s, v30.16b, v29.16b\n"
+ ".inst 0x4e9d94bf // sdot v31.4s, v5.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x4e9597d0 // sdot v16.4s, v30.16b, v21.16b\n"
+ "ldr q3, [x24, x13]\n"
+ ".inst 0x4e9d97da // sdot v26.4s, v30.16b, v29.16b\n"
+ ".inst 0x4e9d95f4 // sdot v20.4s, v15.16b, v29.16b\n"
+ ".inst 0x4e9294a8 // sdot v8.4s, v5.16b, v18.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "mls v31.4s, v19.4s, v24.4s\n"
+ "movi v23.4s, #0x0\n"
+ ".inst 0x4e9d94b0 // sdot v16.4s, v5.16b, v29.16b\n"
+ ".inst 0x4e9294ba // sdot v26.4s, v5.16b, v18.16b\n"
+ "mov v17.16b, v20.16b\n .inst 0x4e9295f1 // sdot v17.4s, v15.16b, v18.16b\n"
+ ".inst 0x4e8295f4 // sdot v20.4s, v15.16b, v2.16b\n"
+ "ldr q2, [x14, x13]\n"
+ ".inst 0x4e9c95f7 // sdot v23.4s, v15.16b, v28.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v10.4s\n"
+ "mls v8.4s, v22.4s, v24.4s\n"
+ "mls v26.4s, v17.4s, v24.4s\n"
+ "and v18.16b, v31.16b, v1.16b\n"
+ "mls v16.4s, v20.4s, v24.4s\n"
+ "movi v21.4s, #0x0\n"
+ "sqrdmulh v8.4s, v8.4s, v10.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ ".inst 0x4e9b95f7 // sdot v23.4s, v15.16b, v27.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
+ "sqrdmulh v16.4s, v16.4s, v10.4s\n"
+ "ldr q13, [%x[params], #0xc0]\n"
+ "and v17.16b, v8.16b, v1.16b\n"
+ "sqadd v31.4s, v31.4s, v18.4s\n"
+ "and v20.16b, v26.16b, v1.16b\n"
+ "and v10.16b, v16.16b, v1.16b\n"
+ "mov v19.16b, v23.16b\n .inst 0x4e8e95f3 // sdot v19.4s, v15.16b, v14.16b\n"
+ ".inst 0x4e8095f7 // sdot v23.4s, v15.16b, v0.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v31.4s, v31.4s, v1.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "ldr q30, [%x[params], #0xb0]\n"
+ "sqadd v16.4s, v16.4s, v10.4s\n"
+ "ldr q17, [%x[params], #0xa0]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v20.4s\n"
+ "ldr q20, [%x[params], #0x90]\n"
+ "srshl v8.4s, v8.4s, v1.4s\n"
+ "srshl v16.4s, v16.4s, v1.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
"srshl v26.4s, v26.4s, v1.4s\n"
- "sqadd v6.4s, v6.4s, v18.4s\n"
- "ldr q30, [%x[params], #0x100]\n"
- "sqadd v25.4s, v25.4s, v22.4s\n"
+ "ldr q22, [%x[params], #0xd0]\n"
+ "add v8.4s, v8.4s, v12.4s\n"
+ "add v16.4s, v16.4s, v12.4s\n"
+ "smin v31.4s, v31.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v8.4s, v8.4s, v7.4s\n"
+ "smax v16.4s, v16.4s, v7.4s\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v8.4s, v8.4s, v11.4s\n"
+ "smin v16.4s, v16.4s, v11.4s\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v16.16b, v16.16b, v16.16b\n"
+ "str s31, [x11, x12]\n"
+ "ldr q10, [%x[params], #0x80]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v16.16b, v16.16b, v16.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s16, [x10, x12]\n"
+ "mov v18.16b, v10.16b\n"
+ "str s8, [x9, x12]\n"
+ "mov v8.16b, v10.16b\n"
+ "str s26, [x28, x12]\n"
+ "mov v26.16b, v10.16b\n"
+ ".inst 0x4e80968a // sdot v10.4s, v20.16b, v0.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e9c9688 // sdot v8.4s, v20.16b, v28.16b\n"
+ "ext v0.16b, v0.16b, v0.16b, #0x1\n"
+ ".inst 0x4e9c962a // sdot v10.4s, v17.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e809692 // sdot v18.4s, v20.16b, v0.16b\n"
+ ".inst 0x4e9c969a // sdot v26.4s, v20.16b, v28.16b\n"
+ ".inst 0x4e9c95f5 // sdot v21.4s, v15.16b, v28.16b\n"
+ ".inst 0x4e9b9628 // sdot v8.4s, v17.16b, v27.16b\n"
+ ".inst 0x4e9b97ca // sdot v10.4s, v30.16b, v27.16b\n"
+ "ext v27.16b, v27.16b, v27.16b, #0x1\n"
+ ".inst 0x4e9c9632 // sdot v18.4s, v17.16b, v28.16b\n"
+ "ldr q28, [x23, x13]\n"
+ ".inst 0x4e9b963a // sdot v26.4s, v17.16b, v27.16b\n"
+ ".inst 0x4e9b95f5 // sdot v21.4s, v15.16b, v27.16b\n"
+ ".inst 0x4e8e97c8 // sdot v8.4s, v30.16b, v14.16b\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ "mls v10.4s, v23.4s, v24.4s\n"
+ "movi v1.4s, #0x0\n"
+ ".inst 0x4e9b97d2 // sdot v18.4s, v30.16b, v27.16b\n"
+ ".inst 0x4e8e97da // sdot v26.4s, v30.16b, v14.16b\n"
+ "mov v16.16b, v21.16b\n .inst 0x4e8e95f0 // sdot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x4e8095f5 // sdot v21.4s, v15.16b, v0.16b\n"
+ "ldr q29, [x27, x13]\n"
+ ".inst 0x4e8495e1 // sdot v1.4s, v15.16b, v4.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v13.4s\n"
+ "mls v8.4s, v19.4s, v24.4s\n"
+ "mls v26.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v10.16b, v22.16b\n"
+ "mls v18.4s, v21.4s, v24.4s\n"
+ "movi v5.4s, #0x0\n"
+ "sqrdmulh v8.4s, v8.4s, v13.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v13.4s\n"
+ ".inst 0x4e8695e1 // sdot v1.4s, v15.16b, v6.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v18.4s, v18.4s, v13.4s\n"
+ "ldr q30, [%x[params], #0x120]\n"
+ "and v17.16b, v8.16b, v22.16b\n"
+ "sqadd v10.4s, v10.4s, v16.4s\n"
+ "and v20.16b, v26.16b, v22.16b\n"
+ "and v16.16b, v18.16b, v22.16b\n"
+ "mov v19.16b, v1.16b\n .inst 0x4e9995f3 // sdot v19.4s, v15.16b, v25.16b\n"
+ ".inst 0x4e8995e1 // sdot v1.4s, v15.16b, v9.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v10.4s, v10.4s, v22.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
"ldr q27, [%x[params], #0x110]\n"
- "sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q24, [%x[params], #0xf0]\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "srshl v6.4s, v6.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "ldr q23, [%x[params], #0x130]\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "add v6.4s, v6.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
+ "ldr q17, [%x[params], #0x100]\n"
+ "add v10.4s, v10.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v20.4s\n"
+ "ldr q16, [%x[params], #0xf0]\n"
+ "srshl v8.4s, v8.4s, v22.4s\n"
+ "srshl v18.4s, v18.4s, v22.4s\n"
+ "smax v10.4s, v10.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v22.4s\n"
+ "ldr q31, [%x[params], #0x130]\n"
+ "add v8.4s, v8.4s, v12.4s\n"
+ "add v18.4s, v18.4s, v12.4s\n"
+ "smin v10.4s, v10.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v8.4s, v8.4s, v7.4s\n"
+ "smax v18.4s, v18.4s, v7.4s\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v8.4s, v8.4s, v11.4s\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
"smin v26.4s, v26.4s, v11.4s\n"
- "smax v6.4s, v6.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v6.4s, v6.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "str s10, [x11, x12]\n"
+ "ldr q0, [%x[params], #0xe0]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "movi v0.4s, #0x0\n"
- ".inst 0x4e8a9580 // sdot v0.4s, v12.16b, v10.16b\n"
- ".inst 0x4e859580 // sdot v0.4s, v12.16b, v5.16b\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "str s26, [x25, x27]\n"
- "ldr q28, [%x[params], #0xe0]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v22.16b, v0.16b\n .inst 0x4e899596 // sdot v22.4s, v12.16b, v9.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s6, [x24, x27]\n"
- ".inst 0x4e889580 // sdot v0.4s, v12.16b, v8.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x23, x27]\n"
- "mov v29.16b, v28.16b\n"
- "str s20, [x22, x27]\n"
- "mov v25.16b, v28.16b\n"
- "mov v7.16b, v28.16b\n"
- ".inst 0x4e88971c // sdot v28.4s, v24.16b, v8.16b\n"
- ".inst 0x4e8a9719 // sdot v25.4s, v24.16b, v10.16b\n"
- ".inst 0x4e8a97dc // sdot v28.4s, v30.16b, v10.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e88971d // sdot v29.4s, v24.16b, v8.16b\n"
- ".inst 0x4e8a9707 // sdot v7.4s, v24.16b, v10.16b\n"
- ".inst 0x4e8a9591 // sdot v17.4s, v12.16b, v10.16b\n"
- ".inst 0x4e8597d9 // sdot v25.4s, v30.16b, v5.16b\n"
- ".inst 0x4e85977c // sdot v28.4s, v27.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x4e8a97dd // sdot v29.4s, v30.16b, v10.16b\n"
- "ldr q10, [x21, x28]\n"
- ".inst 0x4e8597c7 // sdot v7.4s, v30.16b, v5.16b\n"
- "mls v28.4s, v0.4s, v16.4s\n"
- ".inst 0x4e859591 // sdot v17.4s, v12.16b, v5.16b\n"
- ".inst 0x4e899779 // sdot v25.4s, v27.16b, v9.16b\n"
+ "str s18, [x10, x12]\n"
+ "mov v22.16b, v0.16b\n"
+ "str s8, [x9, x12]\n"
+ "mov v23.16b, v0.16b\n"
+ "str s26, [x28, x12]\n"
+ "mov v14.16b, v0.16b\n"
+ ".inst 0x4e899600 // sdot v0.4s, v16.16b, v9.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e849617 // sdot v23.4s, v16.16b, v4.16b\n"
"ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x4e85977d // sdot v29.4s, v27.16b, v5.16b\n"
- ".inst 0x4e899767 // sdot v7.4s, v27.16b, v9.16b\n"
- "sqrdmulh v28.4s, v28.4s, v15.4s\n"
- "mov v18.16b, v17.16b\n .inst 0x4e899592 // sdot v18.4s, v12.16b, v9.16b\n"
- ".inst 0x4e889591 // sdot v17.4s, v12.16b, v8.16b\n"
- "ldr q8, [x12, x28]\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mls v25.4s, v22.4s, v16.4s\n"
- "mls v7.4s, v18.4s, v16.4s\n"
- "and v17.16b, v28.16b, v23.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v29.4s, v29.4s, v15.4s\n"
- "sqrdmulh v25.4s, v25.4s, v15.4s\n"
- "sqrdmulh v7.4s, v7.4s, v15.4s\n"
- "ldr q15, [x15, x28]\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "ldp x21, x20, [%x[inptrs], #0x40]\n"
- "ldr q22, [x21, x28]\n"
- "ldr q3, [x20, x28]\n"
- "and v24.16b, v29.16b, v23.16b\n"
- "and v20.16b, v25.16b, v23.16b\n"
- "and v17.16b, v7.16b, v23.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
+ ".inst 0x4e849620 // sdot v0.4s, v17.16b, v4.16b\n"
+ "ext v4.16b, v4.16b, v4.16b, #0x1\n"
+ ".inst 0x4e899616 // sdot v22.4s, v16.16b, v9.16b\n"
+ ".inst 0x4e84960e // sdot v14.4s, v16.16b, v4.16b\n"
+ ".inst 0x4e8495e5 // sdot v5.4s, v15.16b, v4.16b\n"
+ ".inst 0x4e869637 // sdot v23.4s, v17.16b, v6.16b\n"
+ ".inst 0x4e869760 // sdot v0.4s, v27.16b, v6.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x4e849636 // sdot v22.4s, v17.16b, v4.16b\n"
+ "ldr q4, [x22, x13]\n"
+ ".inst 0x4e86962e // sdot v14.4s, v17.16b, v6.16b\n"
+ ".inst 0x4e8695e5 // sdot v5.4s, v15.16b, v6.16b\n"
+ ".inst 0x4e999777 // sdot v23.4s, v27.16b, v25.16b\n"
+ "ext v25.16b, v25.16b, v25.16b, #0x1\n"
+ "mls v0.4s, v1.4s, v24.4s\n"
+ ".inst 0x4e869776 // sdot v22.4s, v27.16b, v6.16b\n"
+ ".inst 0x4e99976e // sdot v14.4s, v27.16b, v25.16b\n"
+ "mov v17.16b, v5.16b\n .inst 0x4e9995f1 // sdot v17.4s, v15.16b, v25.16b\n"
+ ".inst 0x4e8995e5 // sdot v5.4s, v15.16b, v9.16b\n"
+ "ldr q9, [x26, x13]\n"
+ "sqrdmulh v0.4s, v0.4s, v30.4s\n"
+ "mls v23.4s, v19.4s, v24.4s\n"
+ "and v16.16b, v0.16b, v31.16b\n"
+ "mls v22.4s, v5.4s, v24.4s\n"
+ "mls v14.4s, v17.4s, v24.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v23.4s, v23.4s, v30.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v30.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v30.4s\n"
+ "ldr q13, [x15, x13]\n"
+ "ldp x23, x22, [%x[inptrs], #0x40]\n"
"ldp x21, x20, [%x[inptrs], #0x50]\n"
- "ldr q2, [x21, x28]\n"
- "ldr q5, [x20, x28]\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v28.4s, v28.4s, v23.4s\n"
- "sqadd v29.4s, v29.4s, v24.4s\n"
- "ldr q6, [%x[params], #0x160]\n"
- "sqadd v25.4s, v25.4s, v20.4s\n"
- "ldr q20, [%x[params], #0x170]\n"
- "sqadd v7.4s, v7.4s, v17.4s\n"
- "ldr q1, [%x[params], #0x150]\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "srshl v29.4s, v29.4s, v23.4s\n"
- "srshl v25.4s, v25.4s, v23.4s\n"
- "srshl v7.4s, v7.4s, v23.4s\n"
- "ldr q26, [x10, x28]\n"
- "ldp x21, x20, [%x[inptrs], #0x60]\n"
- "ldr q27, [x21, x28]\n"
- "ldr q30, [x20, x28]\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v7.4s, v7.4s, v14.4s\n"
+ "sqadd v0.4s, v0.4s, v16.4s\n"
+ "and v19.16b, v23.16b, v31.16b\n"
+ "ldr q10, [x23, x13]\n"
+ "ldr q26, [x22, x13]\n"
+ "and v21.16b, v22.16b, v31.16b\n"
+ "and v16.16b, v14.16b, v31.16b\n"
+ "ldr q20, [x21, x13]\n"
+ "ldr q6, [x20, x13]\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "srshl v0.4s, v0.4s, v31.4s\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v19.4s\n"
+ "ldr q17, [%x[params], #0x170]\n"
+ "add v0.4s, v0.4s, v12.4s\n"
+ "sqadd v22.4s, v22.4s, v21.4s\n"
+ "ldr q8, [%x[params], #0x160]\n"
+ "sqadd v14.4s, v14.4s, v16.4s\n"
+ "ldr q30, [%x[params], #0x150]\n"
+ "srshl v23.4s, v23.4s, v31.4s\n"
+ "smax v0.4s, v0.4s, v7.4s\n"
+ "srshl v22.4s, v22.4s, v31.4s\n"
+ "srshl v14.4s, v14.4s, v31.4s\n"
+ "ldr q1, [x25, x13]\n"
+ "ldp x23, x22, [%x[inptrs], #0x60]\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "ldr q23, [x21, x28]\n"
- "ldr q9, [x20, x28]\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
"ldp x15, x14, [%x[inptrs], #0x0]\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v7.4s, v7.4s, v13.4s\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "smin v7.4s, v7.4s, v11.4s\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s28, [x25, x27]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "zip2 v17.16b, v15.16b, v21.16b\n"
- "zip1 v15.16b, v15.16b, v21.16b\n"
- "zip1 v18.16b, v31.16b, v8.16b\n"
- "zip2 v8.16b, v31.16b, v8.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s29, [x24, x27]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str s25, [x23, x27]\n"
- "zip2 v25.16b, v15.16b, v18.16b\n"
- "str s7, [x22, x27]\n"
- "zip1 v15.16b, v15.16b, v18.16b\n"
- "zip1 v7.16b, v17.16b, v8.16b\n"
- "add x27, x27, #0x4\n"
- "zip2 v8.16b, v17.16b, v8.16b\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "ldp x27, x26, [%x[inptrs], #0x10]\n"
+ "ldr q5, [x23, x13]\n"
+ "ldr q27, [x22, x13]\n"
+ "add v22.4s, v22.4s, v12.4s\n"
+ "add v14.4s, v14.4s, v12.4s\n"
+ "ldp x25, x24, [%x[inptrs], #0x20]\n"
+ "ldr q16, [x21, x13]\n"
+ "ldr q25, [x20, x13]\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "ldp x23, x22, [%x[inptrs], #0x30]\n"
+ "smax v22.4s, v22.4s, v7.4s\n"
+ "smax v14.4s, v14.4s, v7.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "smin v14.4s, v14.4s, v11.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s0, [x11, x12]\n"
+ "zip2 v18.16b, v13.16b, v29.16b\n"
+ "zip1 v13.16b, v13.16b, v29.16b\n"
+ "zip1 v0.16b, v2.16b, v9.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v14.16b, v14.16b, v14.16b\n"
+ "zip2 v9.16b, v2.16b, v9.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "zip2 v2.16b, v13.16b, v0.16b\n"
+ "zip1 v13.16b, v13.16b, v0.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v14.16b, v14.16b, v14.16b\n"
+ "str s23, [x9, x12]\n"
+ "zip1 v0.16b, v18.16b, v9.16b\n"
+ "zip2 v9.16b, v18.16b, v9.16b\n"
"ldr q31, [%x[params], #0x140]\n"
- "zip2 v29.16b, v26.16b, v19.16b\n"
"add %x[params], %x[params], #0x180\n"
- "zip1 v26.16b, v26.16b, v19.16b\n"
- "zip1 v28.16b, v4.16b, v10.16b\n"
- "zip2 v10.16b, v4.16b, v10.16b\n"
- "zip2 v24.16b, v22.16b, v2.16b\n"
- "zip1 v22.16b, v22.16b, v2.16b\n"
- "zip1 v21.16b, v3.16b, v5.16b\n"
- "zip2 v5.16b, v3.16b, v5.16b\n"
- "zip2 v18.16b, v27.16b, v23.16b\n"
- "zip1 v27.16b, v27.16b, v23.16b\n"
- "zip1 v17.16b, v30.16b, v9.16b\n"
- "zip2 v9.16b, v30.16b, v9.16b\n"
- "zip2 v23.16b, v26.16b, v28.16b\n"
- "zip1 v26.16b, v26.16b, v28.16b\n"
- "zip1 v3.16b, v29.16b, v10.16b\n"
- "zip2 v10.16b, v29.16b, v10.16b\n"
- "zip2 v19.16b, v22.16b, v21.16b\n"
- "zip1 v22.16b, v22.16b, v21.16b\n"
- "zip1 v0.16b, v24.16b, v5.16b\n"
- "zip2 v5.16b, v24.16b, v5.16b\n"
- "zip2 v24.16b, v27.16b, v17.16b\n"
- "zip1 v27.16b, v27.16b, v17.16b\n"
- "zip1 v2.16b, v18.16b, v9.16b\n"
- "zip2 v9.16b, v18.16b, v9.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
+ "zip2 v23.16b, v10.16b, v20.16b\n"
+ "zip1 v10.16b, v10.16b, v20.16b\n"
+ "str s22, [x10, x12]\n"
+ "str s14, [x28, x12]\n"
+ "zip2 v22.16b, v1.16b, v28.16b\n"
+ "zip1 v1.16b, v1.16b, v28.16b\n"
+ "add x12, x12, #0x4\n"
+ "zip1 v20.16b, v3.16b, v4.16b\n"
+ "zip2 v4.16b, v3.16b, v4.16b\n"
+ "zip1 v14.16b, v26.16b, v6.16b\n"
+ "zip2 v6.16b, v26.16b, v6.16b\n"
+ "zip2 v19.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
+ "zip1 v16.16b, v27.16b, v25.16b\n"
+ "zip2 v25.16b, v27.16b, v25.16b\n"
+ "zip2 v21.16b, v1.16b, v20.16b\n"
+ "zip1 v1.16b, v1.16b, v20.16b\n"
+ "zip1 v28.16b, v22.16b, v4.16b\n"
+ "zip2 v4.16b, v22.16b, v4.16b\n"
+ "zip2 v29.16b, v10.16b, v14.16b\n"
+ "zip1 v10.16b, v10.16b, v14.16b\n"
+ "zip1 v27.16b, v23.16b, v6.16b\n"
+ "zip2 v6.16b, v23.16b, v6.16b\n"
+ "zip2 v18.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
+ "zip1 v14.16b, v19.16b, v25.16b\n"
+ "zip2 v25.16b, v19.16b, v25.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
"bgt 1b\n"
"2:" // Detached iteration
- "movi v21.4s, #0x0\n"
- ".inst 0x4e9a9595 // sdot v21.4s, v12.16b, v26.16b\n"
- ".inst 0x4e8f943f // sdot v31.4s, v1.16b, v15.16b\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x4e8d97df // sdot v31.4s, v30.16b, v13.16b\n"
+ ".inst 0x4e8197c3 // sdot v3.4s, v30.16b, v1.16b\n"
"tst %x[n_channels], #0xf\n"
- ".inst 0x4e969595 // sdot v21.4s, v12.16b, v22.16b\n"
- ".inst 0x4e9a943d // sdot v29.4s, v1.16b, v26.16b\n"
- "movi v18.4s, #0x0\n"
- "add x28, x28, #0x10\n"
- ".inst 0x4e9a94df // sdot v31.4s, v6.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "mov v17.16b, v21.16b\n .inst 0x4e9b9591 // sdot v17.4s, v12.16b, v27.16b\n"
- ".inst 0x4e8f9595 // sdot v21.4s, v12.16b, v15.16b\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- ".inst 0x4e9a9592 // sdot v18.4s, v12.16b, v26.16b\n"
- ".inst 0x4e9694dd // sdot v29.4s, v6.16b, v22.16b\n"
- ".inst 0x4e96969f // sdot v31.4s, v20.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e8f943e // sdot v30.4s, v1.16b, v15.16b\n"
- ".inst 0x4e9a943c // sdot v28.4s, v1.16b, v26.16b\n"
- "mls v31.4s, v21.4s, v16.4s\n"
- ".inst 0x4e969592 // sdot v18.4s, v12.16b, v22.16b\n"
- ".inst 0x4e9b969d // sdot v29.4s, v20.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e9a94de // sdot v30.4s, v6.16b, v26.16b\n"
- "ldr q4, [%x[params], #0x10]\n"
- ".inst 0x4e9694dc // sdot v28.4s, v6.16b, v22.16b\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mov v21.16b, v18.16b\n .inst 0x4e9b9595 // sdot v21.4s, v12.16b, v27.16b\n"
- ".inst 0x4e8f9592 // sdot v18.4s, v12.16b, v15.16b\n"
- "ldr q17, [%x[params], #0x0]\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e96969e // sdot v30.4s, v20.16b, v22.16b\n"
- ".inst 0x4e9b969c // sdot v28.4s, v20.16b, v27.16b\n"
- "mls v30.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v21.4s, v16.4s\n"
- "and v27.16b, v31.16b, v4.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v17.4s\n"
- "sqrdmulh v29.4s, v29.4s, v17.4s\n"
- "sqrdmulh v28.4s, v28.4s, v17.4s\n"
- "ldr q15, [%x[params], #0x60]\n"
- "sqadd v31.4s, v31.4s, v27.4s\n"
- "and v20.16b, v30.16b, v4.16b\n"
- "and v18.16b, v29.16b, v4.16b\n"
- "and v17.16b, v28.16b, v4.16b\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ "movi v20.4s, #0x0\n"
+ "add x13, x13, #0x10\n"
+ ".inst 0x4e8195f3 // sdot v19.4s, v15.16b, v1.16b\n"
+ ".inst 0x4e81951f // sdot v31.4s, v8.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ ".inst 0x4e8a9503 // sdot v3.4s, v8.16b, v10.16b\n"
+ ".inst 0x4e8a95f3 // sdot v19.4s, v15.16b, v10.16b\n"
+ ".inst 0x4e8195f4 // sdot v20.4s, v15.16b, v1.16b\n"
+ ".inst 0x4e8a963f // sdot v31.4s, v17.16b, v10.16b\n"
+ "ext v10.16b, v10.16b, v10.16b, #0x1\n"
+ ".inst 0x4e8197d7 // sdot v23.4s, v30.16b, v1.16b\n"
+ "mov v16.16b, v19.16b\n .inst 0x4e8595f0 // sdot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x4e8d95f3 // sdot v19.4s, v15.16b, v13.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x4e859623 // sdot v3.4s, v17.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x4e8a95f4 // sdot v20.4s, v15.16b, v10.16b\n"
+ ".inst 0x4e8d97da // sdot v26.4s, v30.16b, v13.16b\n"
+ ".inst 0x4e8a9517 // sdot v23.4s, v8.16b, v10.16b\n"
+ "mls v31.4s, v19.4s, v24.4s\n"
+ "movi v30.4s, #0x0\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x4e81951a // sdot v26.4s, v8.16b, v1.16b\n"
+ "ldr q1, [%x[params], #0x10]\n"
+ "mov v16.16b, v20.16b\n .inst 0x4e8595f0 // sdot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x4e8d95f4 // sdot v20.4s, v15.16b, v13.16b\n"
+ "ldr q8, [%x[params], #0x0]\n"
+ ".inst 0x4e9595fe // sdot v30.4s, v15.16b, v21.16b\n"
+ ".inst 0x4e859637 // sdot v23.4s, v17.16b, v5.16b\n"
+ ".inst 0x4e8a963a // sdot v26.4s, v17.16b, v10.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v8.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v1.16b\n"
+ ".inst 0x4e9d95fe // sdot v30.4s, v15.16b, v29.16b\n"
+ "mls v26.4s, v20.4s, v24.4s\n"
+ "movi v5.4s, #0x0\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "and v22.16b, v3.16b, v1.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "ldr q20, [%x[params], #0x60]\n"
+ "mov v19.16b, v30.16b\n .inst 0x4e9295f3 // sdot v19.4s, v15.16b, v18.16b\n"
+ ".inst 0x4e8295fe // sdot v30.4s, v15.16b, v2.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v1.16b\n"
+ "and v16.16b, v26.16b, v1.16b\n"
+ "sqadd v3.4s, v3.4s, v22.4s\n"
+ "ldr q8, [%x[params], #0x50]\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "ldr q27, [%x[params], #0x40]\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "ldr q26, [%x[params], #0x50]\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "ldr q6, [%x[params], #0x30]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v29.4s, v29.4s, v4.4s\n"
- "srshl v28.4s, v28.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x70]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
+ "srshl v31.4s, v31.4s, v1.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v1.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x30]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "ldr q16, [%x[params], #0x40]\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "srshl v23.4s, v23.4s, v1.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v1.4s\n"
+ "ldr q22, [%x[params], #0x70]\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v1.4s, #0x0\n"
- ".inst 0x4e979581 // sdot v1.4s, v12.16b, v23.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s31, [x25, x27]\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s31, [x11, x12]\n"
"ldr q31, [%x[params], #0x20]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- ".inst 0x4e939581 // sdot v1.4s, v12.16b, v19.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s30, [x24, x27]\n"
- "mov v22.16b, v1.16b\n .inst 0x4e989596 // sdot v22.4s, v12.16b, v24.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s29, [x23, x27]\n"
- "mov v29.16b, v31.16b\n"
- ".inst 0x4e999581 // sdot v1.4s, v12.16b, v25.16b\n"
- "str s28, [x22, x27]\n"
- "mov v21.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- ".inst 0x4e9994df // sdot v31.4s, v6.16b, v25.16b\n"
- ".inst 0x4e9794d5 // sdot v21.4s, v6.16b, v23.16b\n"
- ".inst 0x4e97977f // sdot v31.4s, v27.16b, v23.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x4e9994dd // sdot v29.4s, v6.16b, v25.16b\n"
- ".inst 0x4e9794d4 // sdot v20.4s, v6.16b, v23.16b\n"
- ".inst 0x4e979592 // sdot v18.4s, v12.16b, v23.16b\n"
- ".inst 0x4e939775 // sdot v21.4s, v27.16b, v19.16b\n"
- ".inst 0x4e93975f // sdot v31.4s, v26.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x4e97977d // sdot v29.4s, v27.16b, v23.16b\n"
- ".inst 0x4e939774 // sdot v20.4s, v27.16b, v19.16b\n"
- "mls v31.4s, v1.4s, v16.4s\n"
- ".inst 0x4e939592 // sdot v18.4s, v12.16b, v19.16b\n"
- ".inst 0x4e989755 // sdot v21.4s, v26.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e93975d // sdot v29.4s, v26.16b, v19.16b\n"
- ".inst 0x4e989754 // sdot v20.4s, v26.16b, v24.16b\n"
- "sqrdmulh v31.4s, v31.4s, v15.4s\n"
- "mov v17.16b, v18.16b\n .inst 0x4e989591 // sdot v17.4s, v12.16b, v24.16b\n"
- ".inst 0x4e999592 // sdot v18.4s, v12.16b, v25.16b\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mls v21.4s, v22.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v4.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v29.4s, v29.4s, v15.4s\n"
- "sqrdmulh v21.4s, v21.4s, v15.4s\n"
- "sqrdmulh v20.4s, v20.4s, v15.4s\n"
- "ldr q27, [%x[params], #0xc0]\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v29.16b, v4.16b\n"
- "and v18.16b, v21.16b, v4.16b\n"
- "and v17.16b, v20.16b, v4.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s3, [x9, x12]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "mov v10.16b, v31.16b\n"
+ "str s26, [x10, x12]\n"
+ "mov v1.16b, v31.16b\n"
+ "str s23, [x28, x12]\n"
+ "mov v26.16b, v31.16b\n"
+ ".inst 0x4e82963f // sdot v31.4s, v17.16b, v2.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e95962a // sdot v10.4s, v17.16b, v21.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x4e95961f // sdot v31.4s, v16.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x4e829621 // sdot v1.4s, v17.16b, v2.16b\n"
+ ".inst 0x4e95963a // sdot v26.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e9595e5 // sdot v5.4s, v15.16b, v21.16b\n"
+ ".inst 0x4e9d960a // sdot v10.4s, v16.16b, v29.16b\n"
+ ".inst 0x4e9d951f // sdot v31.4s, v8.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x4e959601 // sdot v1.4s, v16.16b, v21.16b\n"
+ ".inst 0x4e9d961a // sdot v26.4s, v16.16b, v29.16b\n"
+ ".inst 0x4e9d95e5 // sdot v5.4s, v15.16b, v29.16b\n"
+ ".inst 0x4e92950a // sdot v10.4s, v8.16b, v18.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "mls v31.4s, v30.4s, v24.4s\n"
+ "movi v3.4s, #0x0\n"
+ ".inst 0x4e9d9501 // sdot v1.4s, v8.16b, v29.16b\n"
+ ".inst 0x4e92951a // sdot v26.4s, v8.16b, v18.16b\n"
+ "mov v16.16b, v5.16b\n .inst 0x4e9295f0 // sdot v16.4s, v15.16b, v18.16b\n"
+ ".inst 0x4e8295e5 // sdot v5.4s, v15.16b, v2.16b\n"
+ ".inst 0x4e9c95e3 // sdot v3.4s, v15.16b, v28.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v20.4s\n"
+ "mls v10.4s, v19.4s, v24.4s\n"
+ "mls v26.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v22.16b\n"
+ "mls v1.4s, v5.4s, v24.4s\n"
+ "movi v2.4s, #0x0\n"
+ "sqrdmulh v10.4s, v10.4s, v20.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ ".inst 0x4e9b95e3 // sdot v3.4s, v15.16b, v27.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+ "ldr q23, [%x[params], #0xc0]\n"
+ "and v17.16b, v10.16b, v22.16b\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v20.16b, v26.16b, v22.16b\n"
+ "and v16.16b, v1.16b, v22.16b\n"
+ "mov v19.16b, v3.16b\n .inst 0x4e8e95f3 // sdot v19.4s, v15.16b, v14.16b\n"
+ ".inst 0x4e8095e3 // sdot v3.4s, v15.16b, v0.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v4.4s\n"
- "sqadd v29.4s, v29.4s, v19.4s\n"
- "ldr q26, [%x[params], #0xa0]\n"
- "sqadd v21.4s, v21.4s, v18.4s\n"
- "ldr q25, [%x[params], #0xb0]\n"
- "sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q24, [%x[params], #0x90]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v29.4s, v29.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q1, [%x[params], #0xd0]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
+ "srshl v31.4s, v31.4s, v22.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v17.4s\n"
+ "ldr q18, [%x[params], #0xb0]\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "ldr q17, [%x[params], #0xa0]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v20.4s\n"
+ "ldr q16, [%x[params], #0x90]\n"
+ "srshl v10.4s, v10.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v22.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v22.4s\n"
+ "ldr q22, [%x[params], #0xd0]\n"
+ "add v10.4s, v10.4s, v12.4s\n"
+ "add v1.4s, v1.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v10.4s, v10.4s, v7.4s\n"
+ "smax v1.4s, v1.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x4e839597 // sdot v23.4s, v12.16b, v3.16b\n"
- ".inst 0x4e809597 // sdot v23.4s, v12.16b, v0.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v10.4s, v10.4s, v11.4s\n"
+ "smin v1.4s, v1.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s31, [x25, x27]\n"
- "ldr q31, [%x[params], #0x80]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v22.16b, v23.16b\n .inst 0x4e829596 // sdot v22.4s, v12.16b, v2.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s29, [x24, x27]\n"
- ".inst 0x4e879597 // sdot v23.4s, v12.16b, v7.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s21, [x23, x27]\n"
- "mov v21.16b, v31.16b\n"
- "str s20, [x22, x27]\n"
- "mov v4.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- ".inst 0x4e87971f // sdot v31.4s, v24.16b, v7.16b\n"
- ".inst 0x4e839704 // sdot v4.4s, v24.16b, v3.16b\n"
- ".inst 0x4e83975f // sdot v31.4s, v26.16b, v3.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x4e879715 // sdot v21.4s, v24.16b, v7.16b\n"
- ".inst 0x4e839714 // sdot v20.4s, v24.16b, v3.16b\n"
- ".inst 0x4e839592 // sdot v18.4s, v12.16b, v3.16b\n"
- ".inst 0x4e809744 // sdot v4.4s, v26.16b, v0.16b\n"
- ".inst 0x4e80973f // sdot v31.4s, v25.16b, v0.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "str s31, [x11, x12]\n"
+ "ldr q21, [%x[params], #0x80]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s1, [x10, x12]\n"
+ "mov v30.16b, v21.16b\n"
+ "str s10, [x9, x12]\n"
+ "mov v20.16b, v21.16b\n"
+ "str s26, [x28, x12]\n"
+ "mov v29.16b, v21.16b\n"
+ ".inst 0x4e809615 // sdot v21.4s, v16.16b, v0.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e9c9614 // sdot v20.4s, v16.16b, v28.16b\n"
"ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e839755 // sdot v21.4s, v26.16b, v3.16b\n"
- ".inst 0x4e809754 // sdot v20.4s, v26.16b, v0.16b\n"
- "mls v31.4s, v23.4s, v16.4s\n"
- ".inst 0x4e809592 // sdot v18.4s, v12.16b, v0.16b\n"
- ".inst 0x4e829724 // sdot v4.4s, v25.16b, v2.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- ".inst 0x4e809735 // sdot v21.4s, v25.16b, v0.16b\n"
- ".inst 0x4e829734 // sdot v20.4s, v25.16b, v2.16b\n"
- "sqrdmulh v31.4s, v31.4s, v27.4s\n"
- "mov v17.16b, v18.16b\n .inst 0x4e829591 // sdot v17.4s, v12.16b, v2.16b\n"
- ".inst 0x4e879592 // sdot v18.4s, v12.16b, v7.16b\n"
- "mls v21.4s, v18.4s, v16.4s\n"
- "mls v4.4s, v22.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v1.16b\n"
+ ".inst 0x4e9c9635 // sdot v21.4s, v17.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e80961e // sdot v30.4s, v16.16b, v0.16b\n"
+ ".inst 0x4e9c961d // sdot v29.4s, v16.16b, v28.16b\n"
+ ".inst 0x4e9c95e2 // sdot v2.4s, v15.16b, v28.16b\n"
+ ".inst 0x4e9b9634 // sdot v20.4s, v17.16b, v27.16b\n"
+ ".inst 0x4e9b9655 // sdot v21.4s, v18.16b, v27.16b\n"
+ "ext v27.16b, v27.16b, v27.16b, #0x1\n"
+ ".inst 0x4e9c963e // sdot v30.4s, v17.16b, v28.16b\n"
+ ".inst 0x4e9b963d // sdot v29.4s, v17.16b, v27.16b\n"
+ ".inst 0x4e9b95e2 // sdot v2.4s, v15.16b, v27.16b\n"
+ ".inst 0x4e8e9654 // sdot v20.4s, v18.16b, v14.16b\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ "mls v21.4s, v3.4s, v24.4s\n"
+ "movi v5.4s, #0x0\n"
+ ".inst 0x4e9b965e // sdot v30.4s, v18.16b, v27.16b\n"
+ ".inst 0x4e8e965d // sdot v29.4s, v18.16b, v14.16b\n"
+ "mov v16.16b, v2.16b\n .inst 0x4e8e95f0 // sdot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x4e8095e2 // sdot v2.4s, v15.16b, v0.16b\n"
+ ".inst 0x4e8495e5 // sdot v5.4s, v15.16b, v4.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v23.4s\n"
+ "mls v20.4s, v19.4s, v24.4s\n"
+ "mls v29.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v21.16b, v22.16b\n"
+ "mls v30.4s, v2.4s, v24.4s\n"
+ "movi v27.4s, #0x0\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v23.4s\n"
+ ".inst 0x4e8695e5 // sdot v5.4s, v15.16b, v6.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "ldr q26, [%x[params], #0x120]\n"
+ "and v17.16b, v20.16b, v22.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "and v19.16b, v29.16b, v22.16b\n"
+ "and v16.16b, v30.16b, v22.16b\n"
+ "mov v14.16b, v5.16b\n .inst 0x4e9995ee // sdot v14.4s, v15.16b, v25.16b\n"
+ ".inst 0x4e8995e5 // sdot v5.4s, v15.16b, v9.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v21.4s, v21.4s, v27.4s\n"
- "sqrdmulh v4.4s, v4.4s, v27.4s\n"
- "sqrdmulh v20.4s, v20.4s, v27.4s\n"
- "ldr q30, [%x[params], #0x120]\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v21.16b, v1.16b\n"
- "and v18.16b, v4.16b, v1.16b\n"
- "and v17.16b, v20.16b, v1.16b\n"
+ "srshl v21.4s, v21.4s, v22.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "sqadd v21.4s, v21.4s, v19.4s\n"
- "ldr q29, [%x[params], #0x100]\n"
- "sqadd v4.4s, v4.4s, v18.4s\n"
- "ldr q28, [%x[params], #0x110]\n"
"sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q27, [%x[params], #0xf0]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v4.4s, v4.4s, v1.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "ldr q26, [%x[params], #0x130]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v4.4s, v4.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v4.4s, v4.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ "ldr q18, [%x[params], #0x110]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "ldr q17, [%x[params], #0x100]\n"
+ "add v21.4s, v21.4s, v12.4s\n"
+ "sqadd v29.4s, v29.4s, v19.4s\n"
+ "ldr q16, [%x[params], #0xf0]\n"
+ "srshl v20.4s, v20.4s, v22.4s\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "smax v21.4s, v21.4s, v7.4s\n"
+ "srshl v29.4s, v29.4s, v22.4s\n"
+ "ldr q23, [%x[params], #0x130]\n"
+ "add v20.4s, v20.4s, v12.4s\n"
+ "add v30.4s, v30.4s, v12.4s\n"
"smin v21.4s, v21.4s, v11.4s\n"
- "smin v4.4s, v4.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v12.4s\n"
+ "smax v20.4s, v20.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v7.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smax v29.4s, v29.4s, v7.4s\n"
"smin v20.4s, v20.4s, v11.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v25.4s, #0x0\n"
- ".inst 0x4e8a9599 // sdot v25.4s, v12.16b, v10.16b\n"
- ".inst 0x4e859599 // sdot v25.4s, v12.16b, v5.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s31, [x25, x27]\n"
- "ldr q24, [%x[params], #0xe0]\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "smin v29.4s, v29.4s, v11.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v23.16b, v25.16b\n .inst 0x4e899597 // sdot v23.4s, v12.16b, v9.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s21, [x11, x12]\n"
+ "ldr q22, [%x[params], #0xe0]\n"
"add %x[params], %x[params], #0x140\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "str s21, [x24, x27]\n"
- ".inst 0x4e889599 // sdot v25.4s, v12.16b, v8.16b\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s4, [x23, x27]\n"
- "mov v22.16b, v24.16b\n"
- "str s20, [x22, x27]\n"
- "mov v21.16b, v24.16b\n"
- "mov v20.16b, v24.16b\n"
- ".inst 0x4e889778 // sdot v24.4s, v27.16b, v8.16b\n"
- ".inst 0x4e8a9775 // sdot v21.4s, v27.16b, v10.16b\n"
- ".inst 0x4e8a97b8 // sdot v24.4s, v29.16b, v10.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x4e889776 // sdot v22.4s, v27.16b, v8.16b\n"
- ".inst 0x4e8a9774 // sdot v20.4s, v27.16b, v10.16b\n"
- ".inst 0x4e8a9592 // sdot v18.4s, v12.16b, v10.16b\n"
- ".inst 0x4e8597b5 // sdot v21.4s, v29.16b, v5.16b\n"
- ".inst 0x4e859798 // sdot v24.4s, v28.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x4e8a97b6 // sdot v22.4s, v29.16b, v10.16b\n"
- ".inst 0x4e8597b4 // sdot v20.4s, v29.16b, v5.16b\n"
- "mls v24.4s, v25.4s, v16.4s\n"
- ".inst 0x4e859592 // sdot v18.4s, v12.16b, v5.16b\n"
- ".inst 0x4e899795 // sdot v21.4s, v28.16b, v9.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s20, [x9, x12]\n"
+ "mov v21.16b, v22.16b\n"
+ "str s30, [x10, x12]\n"
+ "mov v20.16b, v22.16b\n"
+ "str s29, [x28, x12]\n"
+ "mov v19.16b, v22.16b\n"
+ ".inst 0x4e899616 // sdot v22.4s, v16.16b, v9.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e849615 // sdot v21.4s, v16.16b, v4.16b\n"
"ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x4e859796 // sdot v22.4s, v28.16b, v5.16b\n"
- ".inst 0x4e899794 // sdot v20.4s, v28.16b, v9.16b\n"
- "sqrdmulh v24.4s, v24.4s, v30.4s\n"
- "mov v17.16b, v18.16b\n .inst 0x4e899591 // sdot v17.4s, v12.16b, v9.16b\n"
- ".inst 0x4e889592 // sdot v18.4s, v12.16b, v8.16b\n"
- "mls v22.4s, v18.4s, v16.4s\n"
- "mls v21.4s, v23.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v24.16b, v26.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqrdmulh v21.4s, v21.4s, v30.4s\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "sqadd v24.4s, v24.4s, v17.4s\n"
- "and v19.16b, v22.16b, v26.16b\n"
- "and v18.16b, v21.16b, v26.16b\n"
- "and v17.16b, v20.16b, v26.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ ".inst 0x4e849636 // sdot v22.4s, v17.16b, v4.16b\n"
+ "ext v4.16b, v4.16b, v4.16b, #0x1\n"
+ ".inst 0x4e899614 // sdot v20.4s, v16.16b, v9.16b\n"
+ ".inst 0x4e849613 // sdot v19.4s, v16.16b, v4.16b\n"
+ ".inst 0x4e8495fb // sdot v27.4s, v15.16b, v4.16b\n"
+ ".inst 0x4e869635 // sdot v21.4s, v17.16b, v6.16b\n"
+ ".inst 0x4e869656 // sdot v22.4s, v18.16b, v6.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x4e849634 // sdot v20.4s, v17.16b, v4.16b\n"
+ ".inst 0x4e869633 // sdot v19.4s, v17.16b, v6.16b\n"
+ ".inst 0x4e8695fb // sdot v27.4s, v15.16b, v6.16b\n"
+ ".inst 0x4e999655 // sdot v21.4s, v18.16b, v25.16b\n"
+ "ext v25.16b, v25.16b, v25.16b, #0x1\n"
+ "mls v22.4s, v5.4s, v24.4s\n"
+ ".inst 0x4e869654 // sdot v20.4s, v18.16b, v6.16b\n"
+ ".inst 0x4e999653 // sdot v19.4s, v18.16b, v25.16b\n"
+ "mov v17.16b, v27.16b\n .inst 0x4e9995f1 // sdot v17.4s, v15.16b, v25.16b\n"
+ ".inst 0x4e8995fb // sdot v27.4s, v15.16b, v9.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v26.4s\n"
+ "mls v21.4s, v14.4s, v24.4s\n"
+ "and v16.16b, v22.16b, v23.16b\n"
+ "mls v20.4s, v27.4s, v24.4s\n"
+ "mls v19.4s, v17.4s, v24.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v26.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v26.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v18.16b, v21.16b, v23.16b\n"
+ "and v17.16b, v20.16b, v23.16b\n"
+ "and v16.16b, v19.16b, v23.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v22.4s, v22.4s, v23.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v22.4s, v22.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v21.4s, v21.4s, v18.4s\n"
+ "add v22.4s, v22.4s, v12.4s\n"
"sqadd v20.4s, v20.4s, v17.4s\n"
- "srshl v24.4s, v24.4s, v26.4s\n"
- "srshl v22.4s, v22.4s, v26.4s\n"
- "srshl v21.4s, v21.4s, v26.4s\n"
- "srshl v20.4s, v20.4s, v26.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v21.4s, v21.4s, v23.4s\n"
+ "smax v22.4s, v22.4s, v7.4s\n"
+ "srshl v20.4s, v20.4s, v23.4s\n"
+ "srshl v19.4s, v19.4s, v23.4s\n"
+ "add v21.4s, v21.4s, v12.4s\n"
"smin v22.4s, v22.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v12.4s\n"
+ "add v19.4s, v19.4s, v12.4s\n"
+ "smax v21.4s, v21.4s, v7.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smax v20.4s, v20.4s, v7.4s\n"
+ "smax v19.4s, v19.4s, v7.4s\n"
"smin v21.4s, v21.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s22, [x11, x12]\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x25, x27]\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s22, [x24, x27]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s21, [x23, x27]\n"
- "str s20, [x22, x27]\n"
- "add x27, x27, #0x4\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s20, [x10, x12]\n"
+ "str s21, [x9, x12]\n"
+ "str s19, [x28, x12]\n"
+ "add x12, x12, #0x4\n"
"beq 35f\n"
"3:" // Oddments
"and x20, %x[n_channels], #0xf\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x21, x21, x28\n"
+ "add x15, x15, x13\n"
+ "add x14, x14, x13\n"
+ "add x27, x27, x13\n"
+ "add x26, x26, x13\n"
+ "add x25, x25, x13\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"tbz %x[n_channels], #3, 7f\n"
- "ldr d15, [x15], #0x8\n"
- "ldr d25, [x14], #0x8\n"
- "ldr d7, [x13], #0x8\n"
- "ldr d8, [x12], #0x8\n"
- "ldr d26, [x10], #0x8\n"
- "ldr d23, [x9], #0x8\n"
- "ldr d3, [x26], #0x8\n"
- "ldr d10, [x21], #0x8\n"
+ "ldr d13, [x15], #0x8\n"
+ "ldr d2, [x14], #0x8\n"
+ "ldr d0, [x27], #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v15.s }[2], [x15], #0x4\n"
- "ld1 { v25.s }[2], [x14], #0x4\n"
- "ld1 { v7.s }[2], [x13], #0x4\n"
- "ld1 { v8.s }[2], [x12], #0x4\n"
- "ld1 { v26.s }[2], [x10], #0x4\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v3.s }[2], [x26], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
+ "ld1 { v13.s }[2], [x15], #0x4\n"
+ "ld1 { v2.s }[2], [x14], #0x4\n"
+ "ld1 { v0.s }[2], [x27], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
+ "ld1 { v1.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v28.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v15.h }[6], [x15], #0x2\n"
- "ld1 { v25.h }[6], [x14], #0x2\n"
- "ld1 { v7.h }[6], [x13], #0x2\n"
- "ld1 { v8.h }[6], [x12], #0x2\n"
- "ld1 { v26.h }[6], [x10], #0x2\n"
- "ld1 { v23.h }[6], [x9], #0x2\n"
- "ld1 { v3.h }[6], [x26], #0x2\n"
- "ld1 { v10.h }[6], [x21], #0x2\n"
+ "ld1 { v13.h }[6], [x15], #0x2\n"
+ "ld1 { v2.h }[6], [x14], #0x2\n"
+ "ld1 { v0.h }[6], [x27], #0x2\n"
+ "ld1 { v9.h }[6], [x26], #0x2\n"
+ "ld1 { v1.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v28.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[14], [x15], #0x1\n"
- "ld1 { v25.b }[14], [x14], #0x1\n"
- "ld1 { v7.b }[14], [x13], #0x1\n"
- "ld1 { v8.b }[14], [x12], #0x1\n"
- "ld1 { v26.b }[14], [x10], #0x1\n"
- "ld1 { v23.b }[14], [x9], #0x1\n"
- "ld1 { v3.b }[14], [x26], #0x1\n"
- "ld1 { v10.b }[14], [x21], #0x1\n"
+ "ld1 { v13.b }[14], [x15], #0x1\n"
+ "ld1 { v2.b }[14], [x14], #0x1\n"
+ "ld1 { v0.b }[14], [x27], #0x1\n"
+ "ld1 { v9.b }[14], [x26], #0x1\n"
+ "ld1 { v1.b }[14], [x25], #0x1\n"
+ "ld1 { v21.b }[14], [x24], #0x1\n"
+ "ld1 { v28.b }[14], [x23], #0x1\n"
+ "ld1 { v4.b }[14], [x22], #0x1\n"
"b 11f\n"
"4:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[12], [x15], #0x1\n"
- "ld1 { v25.b }[12], [x14], #0x1\n"
- "ld1 { v7.b }[12], [x13], #0x1\n"
- "ld1 { v8.b }[12], [x12], #0x1\n"
- "ld1 { v26.b }[12], [x10], #0x1\n"
- "ld1 { v23.b }[12], [x9], #0x1\n"
- "ld1 { v3.b }[12], [x26], #0x1\n"
- "ld1 { v10.b }[12], [x21], #0x1\n"
+ "ld1 { v13.b }[12], [x15], #0x1\n"
+ "ld1 { v2.b }[12], [x14], #0x1\n"
+ "ld1 { v0.b }[12], [x27], #0x1\n"
+ "ld1 { v9.b }[12], [x26], #0x1\n"
+ "ld1 { v1.b }[12], [x25], #0x1\n"
+ "ld1 { v21.b }[12], [x24], #0x1\n"
+ "ld1 { v28.b }[12], [x23], #0x1\n"
+ "ld1 { v4.b }[12], [x22], #0x1\n"
"b 11f\n"
"5:" // Oddments: Load (A): Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v15.h }[4], [x15], #0x2\n"
- "ld1 { v25.h }[4], [x14], #0x2\n"
- "ld1 { v7.h }[4], [x13], #0x2\n"
- "ld1 { v8.h }[4], [x12], #0x2\n"
- "ld1 { v26.h }[4], [x10], #0x2\n"
- "ld1 { v23.h }[4], [x9], #0x2\n"
- "ld1 { v3.h }[4], [x26], #0x2\n"
- "ld1 { v10.h }[4], [x21], #0x2\n"
+ "ld1 { v13.h }[4], [x15], #0x2\n"
+ "ld1 { v2.h }[4], [x14], #0x2\n"
+ "ld1 { v0.h }[4], [x27], #0x2\n"
+ "ld1 { v9.h }[4], [x26], #0x2\n"
+ "ld1 { v1.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v28.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[10], [x15], #0x1\n"
- "ld1 { v25.b }[10], [x14], #0x1\n"
- "ld1 { v7.b }[10], [x13], #0x1\n"
- "ld1 { v8.b }[10], [x12], #0x1\n"
- "ld1 { v26.b }[10], [x10], #0x1\n"
- "ld1 { v23.b }[10], [x9], #0x1\n"
- "ld1 { v3.b }[10], [x26], #0x1\n"
- "ld1 { v10.b }[10], [x21], #0x1\n"
+ "ld1 { v13.b }[10], [x15], #0x1\n"
+ "ld1 { v2.b }[10], [x14], #0x1\n"
+ "ld1 { v0.b }[10], [x27], #0x1\n"
+ "ld1 { v9.b }[10], [x26], #0x1\n"
+ "ld1 { v1.b }[10], [x25], #0x1\n"
+ "ld1 { v21.b }[10], [x24], #0x1\n"
+ "ld1 { v28.b }[10], [x23], #0x1\n"
+ "ld1 { v4.b }[10], [x22], #0x1\n"
"b 11f\n"
"6:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[8], [x15], #0x1\n"
- "ld1 { v25.b }[8], [x14], #0x1\n"
- "ld1 { v7.b }[8], [x13], #0x1\n"
- "ld1 { v8.b }[8], [x12], #0x1\n"
- "ld1 { v26.b }[8], [x10], #0x1\n"
- "ld1 { v23.b }[8], [x9], #0x1\n"
- "ld1 { v3.b }[8], [x26], #0x1\n"
- "ld1 { v10.b }[8], [x21], #0x1\n"
+ "ld1 { v13.b }[8], [x15], #0x1\n"
+ "ld1 { v2.b }[8], [x14], #0x1\n"
+ "ld1 { v0.b }[8], [x27], #0x1\n"
+ "ld1 { v9.b }[8], [x26], #0x1\n"
+ "ld1 { v1.b }[8], [x25], #0x1\n"
+ "ld1 { v21.b }[8], [x24], #0x1\n"
+ "ld1 { v28.b }[8], [x23], #0x1\n"
+ "ld1 { v4.b }[8], [x22], #0x1\n"
"b 11f\n"
"7:" // Oddments: Load (A): Bit 3: Unset
"tbz %x[n_channels], #2, 9f\n"
- "ldr s15, [x15], #0x4\n"
- "ldr s25, [x14], #0x4\n"
- "ldr s7, [x13], #0x4\n"
- "ldr s8, [x12], #0x4\n"
- "ldr s26, [x10], #0x4\n"
- "ldr s23, [x9], #0x4\n"
- "ldr s3, [x26], #0x4\n"
- "ldr s10, [x21], #0x4\n"
+ "ldr s13, [x15], #0x4\n"
+ "ldr s2, [x14], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "ldr s9, [x26], #0x4\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s28, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v15.h }[2], [x15], #0x2\n"
- "ld1 { v25.h }[2], [x14], #0x2\n"
- "ld1 { v7.h }[2], [x13], #0x2\n"
- "ld1 { v8.h }[2], [x12], #0x2\n"
- "ld1 { v26.h }[2], [x10], #0x2\n"
- "ld1 { v23.h }[2], [x9], #0x2\n"
- "ld1 { v3.h }[2], [x26], #0x2\n"
- "ld1 { v10.h }[2], [x21], #0x2\n"
+ "ld1 { v13.h }[2], [x15], #0x2\n"
+ "ld1 { v2.h }[2], [x14], #0x2\n"
+ "ld1 { v0.h }[2], [x27], #0x2\n"
+ "ld1 { v9.h }[2], [x26], #0x2\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[6], [x15], #0x1\n"
- "ld1 { v25.b }[6], [x14], #0x1\n"
- "ld1 { v7.b }[6], [x13], #0x1\n"
- "ld1 { v8.b }[6], [x12], #0x1\n"
- "ld1 { v26.b }[6], [x10], #0x1\n"
- "ld1 { v23.b }[6], [x9], #0x1\n"
- "ld1 { v3.b }[6], [x26], #0x1\n"
- "ld1 { v10.b }[6], [x21], #0x1\n"
+ "ld1 { v13.b }[6], [x15], #0x1\n"
+ "ld1 { v2.b }[6], [x14], #0x1\n"
+ "ld1 { v0.b }[6], [x27], #0x1\n"
+ "ld1 { v9.b }[6], [x26], #0x1\n"
+ "ld1 { v1.b }[6], [x25], #0x1\n"
+ "ld1 { v21.b }[6], [x24], #0x1\n"
+ "ld1 { v28.b }[6], [x23], #0x1\n"
+ "ld1 { v4.b }[6], [x22], #0x1\n"
"b 11f\n"
"8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[4], [x15], #0x1\n"
- "ld1 { v25.b }[4], [x14], #0x1\n"
- "ld1 { v7.b }[4], [x13], #0x1\n"
- "ld1 { v8.b }[4], [x12], #0x1\n"
- "ld1 { v26.b }[4], [x10], #0x1\n"
- "ld1 { v23.b }[4], [x9], #0x1\n"
- "ld1 { v3.b }[4], [x26], #0x1\n"
- "ld1 { v10.b }[4], [x21], #0x1\n"
+ "ld1 { v13.b }[4], [x15], #0x1\n"
+ "ld1 { v2.b }[4], [x14], #0x1\n"
+ "ld1 { v0.b }[4], [x27], #0x1\n"
+ "ld1 { v9.b }[4], [x26], #0x1\n"
+ "ld1 { v1.b }[4], [x25], #0x1\n"
+ "ld1 { v21.b }[4], [x24], #0x1\n"
+ "ld1 { v28.b }[4], [x23], #0x1\n"
+ "ld1 { v4.b }[4], [x22], #0x1\n"
"b 11f\n"
"9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ldr h15, [x15], #0x2\n"
- "ldr h25, [x14], #0x2\n"
- "ldr h7, [x13], #0x2\n"
- "ldr h8, [x12], #0x2\n"
- "ldr h26, [x10], #0x2\n"
- "ldr h23, [x9], #0x2\n"
- "ldr h3, [x26], #0x2\n"
- "ldr h10, [x21], #0x2\n"
+ "ldr h13, [x15], #0x2\n"
+ "ldr h2, [x14], #0x2\n"
+ "ldr h0, [x27], #0x2\n"
+ "ldr h9, [x26], #0x2\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h28, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[2], [x15], #0x1\n"
- "ld1 { v25.b }[2], [x14], #0x1\n"
- "ld1 { v7.b }[2], [x13], #0x1\n"
- "ld1 { v8.b }[2], [x12], #0x1\n"
- "ld1 { v26.b }[2], [x10], #0x1\n"
- "ld1 { v23.b }[2], [x9], #0x1\n"
- "ld1 { v3.b }[2], [x26], #0x1\n"
- "ld1 { v10.b }[2], [x21], #0x1\n"
+ "ld1 { v13.b }[2], [x15], #0x1\n"
+ "ld1 { v2.b }[2], [x14], #0x1\n"
+ "ld1 { v0.b }[2], [x27], #0x1\n"
+ "ld1 { v9.b }[2], [x26], #0x1\n"
+ "ld1 { v1.b }[2], [x25], #0x1\n"
+ "ld1 { v21.b }[2], [x24], #0x1\n"
+ "ld1 { v28.b }[2], [x23], #0x1\n"
+ "ld1 { v4.b }[2], [x22], #0x1\n"
"b 11f\n"
"10:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b15, [x15], #0x1\n"
- "ldr b25, [x14], #0x1\n"
- "ldr b7, [x13], #0x1\n"
- "ldr b8, [x12], #0x1\n"
- "ldr b26, [x10], #0x1\n"
- "ldr b23, [x9], #0x1\n"
- "ldr b3, [x26], #0x1\n"
- "ldr b10, [x21], #0x1\n"
+ "ldr b13, [x15], #0x1\n"
+ "ldr b2, [x14], #0x1\n"
+ "ldr b0, [x27], #0x1\n"
+ "ldr b9, [x26], #0x1\n"
+ "ldr b1, [x25], #0x1\n"
+ "ldr b21, [x24], #0x1\n"
+ "ldr b28, [x23], #0x1\n"
+ "ldr b4, [x22], #0x1\n"
"11:" // Oddments: Load (A): Bit 3: End
"ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldp x26, x21, [%x[inptrs], #0x70]\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x21, x21, x28\n"
+ "ldp x27, x26, [%x[inptrs], #0x50]\n"
+ "ldp x25, x24, [%x[inptrs], #0x60]\n"
+ "ldp x23, x22, [%x[inptrs], #0x70]\n"
+ "add x15, x15, x13\n"
+ "add x14, x14, x13\n"
+ "add x27, x27, x13\n"
+ "add x26, x26, x13\n"
+ "add x25, x25, x13\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"tbz %x[n_channels], #3, 15f\n"
- "ldr d22, [x15], #0x8\n"
- "ldr d19, [x14], #0x8\n"
- "ldr d0, [x13], #0x8\n"
- "ldr d5, [x12], #0x8\n"
- "ldr d27, [x10], #0x8\n"
- "ldr d24, [x9], #0x8\n"
- "ldr d2, [x26], #0x8\n"
- "ldr d9, [x21], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d29, [x14], #0x8\n"
+ "ldr d27, [x27], #0x8\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d5, [x25], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "ldr d25, [x22], #0x8\n"
"tbz %x[n_channels], #2, 13f\n"
- "ld1 { v22.s }[2], [x15], #0x4\n"
- "ld1 { v19.s }[2], [x14], #0x4\n"
- "ld1 { v0.s }[2], [x13], #0x4\n"
- "ld1 { v5.s }[2], [x12], #0x4\n"
- "ld1 { v27.s }[2], [x10], #0x4\n"
- "ld1 { v24.s }[2], [x9], #0x4\n"
- "ld1 { v2.s }[2], [x26], #0x4\n"
- "ld1 { v9.s }[2], [x21], #0x4\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
+ "ld1 { v29.s }[2], [x14], #0x4\n"
+ "ld1 { v27.s }[2], [x27], #0x4\n"
+ "ld1 { v6.s }[2], [x26], #0x4\n"
+ "ld1 { v5.s }[2], [x25], #0x4\n"
+ "ld1 { v18.s }[2], [x24], #0x4\n"
+ "ld1 { v14.s }[2], [x23], #0x4\n"
+ "ld1 { v25.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v22.h }[6], [x15], #0x2\n"
- "ld1 { v19.h }[6], [x14], #0x2\n"
- "ld1 { v0.h }[6], [x13], #0x2\n"
- "ld1 { v5.h }[6], [x12], #0x2\n"
- "ld1 { v27.h }[6], [x10], #0x2\n"
- "ld1 { v24.h }[6], [x9], #0x2\n"
- "ld1 { v2.h }[6], [x26], #0x2\n"
- "ld1 { v9.h }[6], [x21], #0x2\n"
+ "ld1 { v10.h }[6], [x15], #0x2\n"
+ "ld1 { v29.h }[6], [x14], #0x2\n"
+ "ld1 { v27.h }[6], [x27], #0x2\n"
+ "ld1 { v6.h }[6], [x26], #0x2\n"
+ "ld1 { v5.h }[6], [x25], #0x2\n"
+ "ld1 { v18.h }[6], [x24], #0x2\n"
+ "ld1 { v14.h }[6], [x23], #0x2\n"
+ "ld1 { v25.h }[6], [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[14], [x15], #0x1\n"
- "ld1 { v19.b }[14], [x14], #0x1\n"
- "ld1 { v0.b }[14], [x13], #0x1\n"
- "ld1 { v5.b }[14], [x12], #0x1\n"
- "ld1 { v27.b }[14], [x10], #0x1\n"
- "ld1 { v24.b }[14], [x9], #0x1\n"
- "ld1 { v2.b }[14], [x26], #0x1\n"
- "ld1 { v9.b }[14], [x21], #0x1\n"
+ "ld1 { v10.b }[14], [x15], #0x1\n"
+ "ld1 { v29.b }[14], [x14], #0x1\n"
+ "ld1 { v27.b }[14], [x27], #0x1\n"
+ "ld1 { v6.b }[14], [x26], #0x1\n"
+ "ld1 { v5.b }[14], [x25], #0x1\n"
+ "ld1 { v18.b }[14], [x24], #0x1\n"
+ "ld1 { v14.b }[14], [x23], #0x1\n"
+ "ld1 { v25.b }[14], [x22], #0x1\n"
"b 19f\n"
"12:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[12], [x15], #0x1\n"
- "ld1 { v19.b }[12], [x14], #0x1\n"
- "ld1 { v0.b }[12], [x13], #0x1\n"
- "ld1 { v5.b }[12], [x12], #0x1\n"
- "ld1 { v27.b }[12], [x10], #0x1\n"
- "ld1 { v24.b }[12], [x9], #0x1\n"
- "ld1 { v2.b }[12], [x26], #0x1\n"
- "ld1 { v9.b }[12], [x21], #0x1\n"
+ "ld1 { v10.b }[12], [x15], #0x1\n"
+ "ld1 { v29.b }[12], [x14], #0x1\n"
+ "ld1 { v27.b }[12], [x27], #0x1\n"
+ "ld1 { v6.b }[12], [x26], #0x1\n"
+ "ld1 { v5.b }[12], [x25], #0x1\n"
+ "ld1 { v18.b }[12], [x24], #0x1\n"
+ "ld1 { v14.b }[12], [x23], #0x1\n"
+ "ld1 { v25.b }[12], [x22], #0x1\n"
"b 19f\n"
"13:" // Oddments: Load (B): Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v22.h }[4], [x15], #0x2\n"
- "ld1 { v19.h }[4], [x14], #0x2\n"
- "ld1 { v0.h }[4], [x13], #0x2\n"
- "ld1 { v5.h }[4], [x12], #0x2\n"
- "ld1 { v27.h }[4], [x10], #0x2\n"
- "ld1 { v24.h }[4], [x9], #0x2\n"
- "ld1 { v2.h }[4], [x26], #0x2\n"
- "ld1 { v9.h }[4], [x21], #0x2\n"
+ "ld1 { v10.h }[4], [x15], #0x2\n"
+ "ld1 { v29.h }[4], [x14], #0x2\n"
+ "ld1 { v27.h }[4], [x27], #0x2\n"
+ "ld1 { v6.h }[4], [x26], #0x2\n"
+ "ld1 { v5.h }[4], [x25], #0x2\n"
+ "ld1 { v18.h }[4], [x24], #0x2\n"
+ "ld1 { v14.h }[4], [x23], #0x2\n"
+ "ld1 { v25.h }[4], [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[10], [x15], #0x1\n"
- "ld1 { v19.b }[10], [x14], #0x1\n"
- "ld1 { v0.b }[10], [x13], #0x1\n"
- "ld1 { v5.b }[10], [x12], #0x1\n"
- "ld1 { v27.b }[10], [x10], #0x1\n"
- "ld1 { v24.b }[10], [x9], #0x1\n"
- "ld1 { v2.b }[10], [x26], #0x1\n"
- "ld1 { v9.b }[10], [x21], #0x1\n"
+ "ld1 { v10.b }[10], [x15], #0x1\n"
+ "ld1 { v29.b }[10], [x14], #0x1\n"
+ "ld1 { v27.b }[10], [x27], #0x1\n"
+ "ld1 { v6.b }[10], [x26], #0x1\n"
+ "ld1 { v5.b }[10], [x25], #0x1\n"
+ "ld1 { v18.b }[10], [x24], #0x1\n"
+ "ld1 { v14.b }[10], [x23], #0x1\n"
+ "ld1 { v25.b }[10], [x22], #0x1\n"
"b 19f\n"
"14:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[8], [x15], #0x1\n"
- "ld1 { v19.b }[8], [x14], #0x1\n"
- "ld1 { v0.b }[8], [x13], #0x1\n"
- "ld1 { v5.b }[8], [x12], #0x1\n"
- "ld1 { v27.b }[8], [x10], #0x1\n"
- "ld1 { v24.b }[8], [x9], #0x1\n"
- "ld1 { v2.b }[8], [x26], #0x1\n"
- "ld1 { v9.b }[8], [x21], #0x1\n"
+ "ld1 { v10.b }[8], [x15], #0x1\n"
+ "ld1 { v29.b }[8], [x14], #0x1\n"
+ "ld1 { v27.b }[8], [x27], #0x1\n"
+ "ld1 { v6.b }[8], [x26], #0x1\n"
+ "ld1 { v5.b }[8], [x25], #0x1\n"
+ "ld1 { v18.b }[8], [x24], #0x1\n"
+ "ld1 { v14.b }[8], [x23], #0x1\n"
+ "ld1 { v25.b }[8], [x22], #0x1\n"
"b 19f\n"
"15:" // Oddments: Load (B): Bit 3: Unset
"tbz %x[n_channels], #2, 17f\n"
- "ldr s22, [x15], #0x4\n"
- "ldr s19, [x14], #0x4\n"
- "ldr s0, [x13], #0x4\n"
- "ldr s5, [x12], #0x4\n"
- "ldr s27, [x10], #0x4\n"
- "ldr s24, [x9], #0x4\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s9, [x21], #0x4\n"
+ "ldr s10, [x15], #0x4\n"
+ "ldr s29, [x14], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s6, [x26], #0x4\n"
+ "ldr s5, [x25], #0x4\n"
+ "ldr s18, [x24], #0x4\n"
+ "ldr s14, [x23], #0x4\n"
+ "ldr s25, [x22], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v22.h }[2], [x15], #0x2\n"
- "ld1 { v19.h }[2], [x14], #0x2\n"
- "ld1 { v0.h }[2], [x13], #0x2\n"
- "ld1 { v5.h }[2], [x12], #0x2\n"
- "ld1 { v27.h }[2], [x10], #0x2\n"
- "ld1 { v24.h }[2], [x9], #0x2\n"
- "ld1 { v2.h }[2], [x26], #0x2\n"
- "ld1 { v9.h }[2], [x21], #0x2\n"
+ "ld1 { v10.h }[2], [x15], #0x2\n"
+ "ld1 { v29.h }[2], [x14], #0x2\n"
+ "ld1 { v27.h }[2], [x27], #0x2\n"
+ "ld1 { v6.h }[2], [x26], #0x2\n"
+ "ld1 { v5.h }[2], [x25], #0x2\n"
+ "ld1 { v18.h }[2], [x24], #0x2\n"
+ "ld1 { v14.h }[2], [x23], #0x2\n"
+ "ld1 { v25.h }[2], [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[6], [x15], #0x1\n"
- "ld1 { v19.b }[6], [x14], #0x1\n"
- "ld1 { v0.b }[6], [x13], #0x1\n"
- "ld1 { v5.b }[6], [x12], #0x1\n"
- "ld1 { v27.b }[6], [x10], #0x1\n"
- "ld1 { v24.b }[6], [x9], #0x1\n"
- "ld1 { v2.b }[6], [x26], #0x1\n"
- "ld1 { v9.b }[6], [x21], #0x1\n"
+ "ld1 { v10.b }[6], [x15], #0x1\n"
+ "ld1 { v29.b }[6], [x14], #0x1\n"
+ "ld1 { v27.b }[6], [x27], #0x1\n"
+ "ld1 { v6.b }[6], [x26], #0x1\n"
+ "ld1 { v5.b }[6], [x25], #0x1\n"
+ "ld1 { v18.b }[6], [x24], #0x1\n"
+ "ld1 { v14.b }[6], [x23], #0x1\n"
+ "ld1 { v25.b }[6], [x22], #0x1\n"
"b 19f\n"
"16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[4], [x15], #0x1\n"
- "ld1 { v19.b }[4], [x14], #0x1\n"
- "ld1 { v0.b }[4], [x13], #0x1\n"
- "ld1 { v5.b }[4], [x12], #0x1\n"
- "ld1 { v27.b }[4], [x10], #0x1\n"
- "ld1 { v24.b }[4], [x9], #0x1\n"
- "ld1 { v2.b }[4], [x26], #0x1\n"
- "ld1 { v9.b }[4], [x21], #0x1\n"
+ "ld1 { v10.b }[4], [x15], #0x1\n"
+ "ld1 { v29.b }[4], [x14], #0x1\n"
+ "ld1 { v27.b }[4], [x27], #0x1\n"
+ "ld1 { v6.b }[4], [x26], #0x1\n"
+ "ld1 { v5.b }[4], [x25], #0x1\n"
+ "ld1 { v18.b }[4], [x24], #0x1\n"
+ "ld1 { v14.b }[4], [x23], #0x1\n"
+ "ld1 { v25.b }[4], [x22], #0x1\n"
"b 19f\n"
"17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr h22, [x15], #0x2\n"
- "ldr h19, [x14], #0x2\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h5, [x12], #0x2\n"
- "ldr h27, [x10], #0x2\n"
- "ldr h24, [x9], #0x2\n"
- "ldr h2, [x26], #0x2\n"
- "ldr h9, [x21], #0x2\n"
+ "ldr h10, [x15], #0x2\n"
+ "ldr h29, [x14], #0x2\n"
+ "ldr h27, [x27], #0x2\n"
+ "ldr h6, [x26], #0x2\n"
+ "ldr h5, [x25], #0x2\n"
+ "ldr h18, [x24], #0x2\n"
+ "ldr h14, [x23], #0x2\n"
+ "ldr h25, [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[2], [x15], #0x1\n"
- "ld1 { v19.b }[2], [x14], #0x1\n"
- "ld1 { v0.b }[2], [x13], #0x1\n"
- "ld1 { v5.b }[2], [x12], #0x1\n"
- "ld1 { v27.b }[2], [x10], #0x1\n"
- "ld1 { v24.b }[2], [x9], #0x1\n"
- "ld1 { v2.b }[2], [x26], #0x1\n"
- "ld1 { v9.b }[2], [x21], #0x1\n"
+ "ld1 { v10.b }[2], [x15], #0x1\n"
+ "ld1 { v29.b }[2], [x14], #0x1\n"
+ "ld1 { v27.b }[2], [x27], #0x1\n"
+ "ld1 { v6.b }[2], [x26], #0x1\n"
+ "ld1 { v5.b }[2], [x25], #0x1\n"
+ "ld1 { v18.b }[2], [x24], #0x1\n"
+ "ld1 { v14.b }[2], [x23], #0x1\n"
+ "ld1 { v25.b }[2], [x22], #0x1\n"
"b 19f\n"
"18:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b22, [x15], #0x1\n"
- "ldr b19, [x14], #0x1\n"
- "ldr b0, [x13], #0x1\n"
- "ldr b5, [x12], #0x1\n"
- "ldr b27, [x10], #0x1\n"
- "ldr b24, [x9], #0x1\n"
- "ldr b2, [x26], #0x1\n"
- "ldr b9, [x21], #0x1\n"
+ "ldr b10, [x15], #0x1\n"
+ "ldr b29, [x14], #0x1\n"
+ "ldr b27, [x27], #0x1\n"
+ "ldr b6, [x26], #0x1\n"
+ "ldr b5, [x25], #0x1\n"
+ "ldr b18, [x24], #0x1\n"
+ "ldr b14, [x23], #0x1\n"
+ "ldr b25, [x22], #0x1\n"
"19:" // Oddments: Load (B): Bit 3: End
"ldr q20, [%x[params], #0x10]\n"
- "ldr q6, [%x[params], #0x20]\n"
- "zip2 v1.16b, v26.16b, v3.16b\n"
- "zip1 v26.16b, v26.16b, v3.16b\n"
- "ldr q4, [%x[params], #0x30]\n"
- "zip1 v18.16b, v23.16b, v10.16b\n"
- "zip2 v30.16b, v15.16b, v7.16b\n"
+ "ldr q17, [%x[params], #0x20]\n"
+ "zip2 v26.16b, v1.16b, v28.16b\n"
+ "zip1 v1.16b, v1.16b, v28.16b\n"
+ "ldr q30, [%x[params], #0x30]\n"
+ "zip1 v19.16b, v21.16b, v4.16b\n"
+ "zip2 v23.16b, v13.16b, v0.16b\n"
"cmp x20, #0x4\n"
- "zip1 v15.16b, v15.16b, v7.16b\n"
- "zip1 v29.16b, v25.16b, v8.16b\n"
- "zip2 v8.16b, v25.16b, v8.16b\n"
- "zip2 v10.16b, v23.16b, v10.16b\n"
- "zip2 v23.16b, v26.16b, v18.16b\n"
- "zip1 v26.16b, v26.16b, v18.16b\n"
- "zip2 v28.16b, v22.16b, v0.16b\n"
- "zip1 v22.16b, v22.16b, v0.16b\n"
- "zip1 v21.16b, v19.16b, v5.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4e9a9591 // sdot v17.4s, v12.16b, v26.16b\n"
- "zip2 v25.16b, v15.16b, v29.16b\n"
- "zip1 v15.16b, v15.16b, v29.16b\n"
- "zip1 v7.16b, v30.16b, v8.16b\n"
- "zip2 v8.16b, v30.16b, v8.16b\n"
+ "zip1 v13.16b, v13.16b, v0.16b\n"
+ "zip1 v22.16b, v2.16b, v9.16b\n"
+ "zip2 v9.16b, v2.16b, v9.16b\n"
+ "zip2 v4.16b, v21.16b, v4.16b\n"
+ "zip2 v21.16b, v1.16b, v19.16b\n"
+ "zip1 v1.16b, v1.16b, v19.16b\n"
+ "zip2 v16.16b, v10.16b, v27.16b\n"
+ "zip1 v10.16b, v10.16b, v27.16b\n"
+ "zip1 v19.16b, v29.16b, v6.16b\n"
+ "movi v8.4s, #0x0\n"
+ "zip2 v2.16b, v13.16b, v22.16b\n"
+ "zip1 v13.16b, v13.16b, v22.16b\n"
+ "zip1 v0.16b, v23.16b, v9.16b\n"
+ "zip2 v9.16b, v23.16b, v9.16b\n"
"ldr q31, [%x[params], #0x0]\n"
- "zip2 v5.16b, v19.16b, v5.16b\n"
- "zip2 v30.16b, v27.16b, v2.16b\n"
- "zip1 v27.16b, v27.16b, v2.16b\n"
- "zip1 v18.16b, v24.16b, v9.16b\n"
- "zip2 v9.16b, v24.16b, v9.16b\n"
- "zip2 v19.16b, v22.16b, v21.16b\n"
- "zip1 v22.16b, v22.16b, v21.16b\n"
- "zip1 v3.16b, v1.16b, v10.16b\n"
- ".inst 0x4e969591 // sdot v17.4s, v12.16b, v22.16b\n"
- "zip2 v10.16b, v1.16b, v10.16b\n"
- "zip1 v0.16b, v28.16b, v5.16b\n"
- "zip2 v5.16b, v28.16b, v5.16b\n"
- "zip2 v24.16b, v27.16b, v18.16b\n"
- "zip1 v27.16b, v27.16b, v18.16b\n"
- "zip1 v2.16b, v30.16b, v9.16b\n"
- "mov v18.16b, v17.16b\n .inst 0x4e9b9592 // sdot v18.4s, v12.16b, v27.16b\n"
- "zip2 v9.16b, v30.16b, v9.16b\n"
- "mov v30.16b, v31.16b\n"
- ".inst 0x4e8f9591 // sdot v17.4s, v12.16b, v15.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x4e8f969f // sdot v31.4s, v20.16b, v15.16b\n"
- ".inst 0x4e9a969d // sdot v29.4s, v20.16b, v26.16b\n"
- ".inst 0x4e9a94df // sdot v31.4s, v6.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "movi v1.4s, #0x0\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- ".inst 0x4e9a9581 // sdot v1.4s, v12.16b, v26.16b\n"
- ".inst 0x4e9694dd // sdot v29.4s, v6.16b, v22.16b\n"
- ".inst 0x4e96949f // sdot v31.4s, v4.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x4e8f969e // sdot v30.4s, v20.16b, v15.16b\n"
- ".inst 0x4e9a969c // sdot v28.4s, v20.16b, v26.16b\n"
- "mls v31.4s, v17.4s, v16.4s\n"
- ".inst 0x4e969581 // sdot v1.4s, v12.16b, v22.16b\n"
- ".inst 0x4e9b949d // sdot v29.4s, v4.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e9a94de // sdot v30.4s, v6.16b, v26.16b\n"
- "ldr q21, [%x[params], #0x50]\n"
- ".inst 0x4e9694dc // sdot v28.4s, v6.16b, v22.16b\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mov v20.16b, v1.16b\n .inst 0x4e9b9594 // sdot v20.4s, v12.16b, v27.16b\n"
- ".inst 0x4e8f9581 // sdot v1.4s, v12.16b, v15.16b\n"
- "ldr q18, [%x[params], #0x40]\n"
- "sqrdmulh v31.4s, v31.4s, v18.4s\n"
- ".inst 0x4e96949e // sdot v30.4s, v4.16b, v22.16b\n"
- ".inst 0x4e9b949c // sdot v28.4s, v4.16b, v27.16b\n"
- "mls v30.4s, v1.4s, v16.4s\n"
+ ".inst 0x4e8195e8 // sdot v8.4s, v15.16b, v1.16b\n"
+ "zip2 v6.16b, v29.16b, v6.16b\n"
+ "zip2 v22.16b, v5.16b, v14.16b\n"
+ "zip1 v5.16b, v5.16b, v14.16b\n"
+ "zip1 v3.16b, v18.16b, v25.16b\n"
+ "zip2 v25.16b, v18.16b, v25.16b\n"
+ "zip2 v29.16b, v10.16b, v19.16b\n"
+ "zip1 v10.16b, v10.16b, v19.16b\n"
+ "zip1 v28.16b, v26.16b, v4.16b\n"
+ "zip2 v4.16b, v26.16b, v4.16b\n"
+ "zip1 v27.16b, v16.16b, v6.16b\n"
+ "zip2 v6.16b, v16.16b, v6.16b\n"
+ "zip2 v18.16b, v5.16b, v3.16b\n"
+ "zip1 v5.16b, v5.16b, v3.16b\n"
+ "zip1 v14.16b, v22.16b, v25.16b\n"
+ ".inst 0x4e8a95e8 // sdot v8.4s, v15.16b, v10.16b\n"
+ "zip2 v25.16b, v22.16b, v25.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x4e8d969f // sdot v31.4s, v20.16b, v13.16b\n"
+ "movi v22.4s, #0x0\n"
+ ".inst 0x4e819683 // sdot v3.4s, v20.16b, v1.16b\n"
+ "mov v16.16b, v8.16b\n .inst 0x4e8595f0 // sdot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x4e8d95e8 // sdot v8.4s, v15.16b, v13.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x4e81963f // sdot v31.4s, v17.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ ".inst 0x4e8a9623 // sdot v3.4s, v17.16b, v10.16b\n"
+ ".inst 0x4e8d969a // sdot v26.4s, v20.16b, v13.16b\n"
+ ".inst 0x4e8195f6 // sdot v22.4s, v15.16b, v1.16b\n"
+ ".inst 0x4e8a97df // sdot v31.4s, v30.16b, v10.16b\n"
+ "ext v10.16b, v10.16b, v10.16b, #0x1\n"
+ ".inst 0x4e819697 // sdot v23.4s, v20.16b, v1.16b\n"
+ ".inst 0x4e8597c3 // sdot v3.4s, v30.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x4e81963a // sdot v26.4s, v17.16b, v1.16b\n"
+ "ldr q20, [%x[params], #0x50]\n"
+ ".inst 0x4e8a95f6 // sdot v22.4s, v15.16b, v10.16b\n"
+ "mls v31.4s, v8.4s, v24.4s\n"
+ ".inst 0x4e8a9637 // sdot v23.4s, v17.16b, v10.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ "mov v19.16b, v22.16b\n .inst 0x4e8595f3 // sdot v19.4s, v15.16b, v5.16b\n"
+ ".inst 0x4e8d95f6 // sdot v22.4s, v15.16b, v13.16b\n"
+ "ldr q17, [%x[params], #0x40]\n"
"add %x[params], %x[params], #0x60\n"
- "mls v28.4s, v20.4s, v16.4s\n"
- "and v17.16b, v31.16b, v21.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v18.4s\n"
- "sqrdmulh v29.4s, v29.4s, v18.4s\n"
- "sqrdmulh v28.4s, v28.4s, v18.4s\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v17.16b, v30.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
- "and v26.16b, v28.16b, v21.16b\n"
+ ".inst 0x4e8a97da // sdot v26.4s, v30.16b, v10.16b\n"
+ ".inst 0x4e8597d7 // sdot v23.4s, v30.16b, v5.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v26.4s, v22.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v20.16b\n"
+ "mls v23.4s, v19.4s, v24.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
+ "and v19.16b, v3.16b, v20.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v17.16b, v26.16b, v20.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v16.16b, v23.16b, v20.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v28.4s, v28.4s, v26.4s\n"
- "srshl v31.4s, v31.4s, v21.4s\n"
- "srshl v30.4s, v30.4s, v21.4s\n"
- "srshl v29.4s, v29.4s, v21.4s\n"
- "srshl v28.4s, v28.4s, v21.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "sqadd v3.4s, v3.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v31.4s, v31.4s, v20.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "srshl v3.4s, v3.4s, v20.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "srshl v26.4s, v26.4s, v20.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "srshl v23.4s, v23.4s, v20.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"blt 20f\n"
- "str s31, [x25, x27]\n"
- "str s30, [x24, x27]\n"
- "str s29, [x23, x27]\n"
- "str s28, [x22, x27]\n"
+ "str s31, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s3, [x9, x12]\n"
+ "str s23, [x28, x12]\n"
"b 23f\n"
"20:" // Oddments: Unroll 0: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 21f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 22f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 22f\n"
"21:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"22:" // Oddments: Unroll 0: Oddment store: Bit 1: End
"23:" // Oddments: Unroll 0: After oddment store
"subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
"ldr q31, [%x[params], #0x0]\n"
- "ldr q27, [%x[params], #0x10]\n"
- "movi v1.4s, #0x0\n"
- ".inst 0x4e979581 // sdot v1.4s, v12.16b, v23.16b\n"
- "ldr q26, [%x[params], #0x20]\n"
- "ldr q22, [%x[params], #0x30]\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "ldr q4, [%x[params], #0x40]\n"
- "ldr q21, [%x[params], #0x50]\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x4e99977f // sdot v31.4s, v27.16b, v25.16b\n"
- ".inst 0x4e939581 // sdot v1.4s, v12.16b, v19.16b\n"
- ".inst 0x4e97977d // sdot v29.4s, v27.16b, v23.16b\n"
- "movi v20.4s, #0x0\n"
+ "ldr q5, [%x[params], #0x10]\n"
+ "movi v8.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "ldr q22, [%x[params], #0x20]\n"
+ "ldr q20, [%x[params], #0x30]\n"
"cmp x20, #0x4\n"
- ".inst 0x4e97975f // sdot v31.4s, v26.16b, v23.16b\n"
- "mov v18.16b, v1.16b\n .inst 0x4e989592 // sdot v18.4s, v12.16b, v24.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "ldr q19, [%x[params], #0x50]\n"
+ ".inst 0x4e9595e8 // sdot v8.4s, v15.16b, v21.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e999581 // sdot v1.4s, v12.16b, v25.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- ".inst 0x4e99977e // sdot v30.4s, v27.16b, v25.16b\n"
- ".inst 0x4e97977c // sdot v28.4s, v27.16b, v23.16b\n"
- ".inst 0x4e979594 // sdot v20.4s, v12.16b, v23.16b\n"
- ".inst 0x4e93975d // sdot v29.4s, v26.16b, v19.16b\n"
- ".inst 0x4e9396df // sdot v31.4s, v22.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x4e97975e // sdot v30.4s, v26.16b, v23.16b\n"
- ".inst 0x4e93975c // sdot v28.4s, v26.16b, v19.16b\n"
- "mls v31.4s, v1.4s, v16.4s\n"
- ".inst 0x4e939594 // sdot v20.4s, v12.16b, v19.16b\n"
- ".inst 0x4e9896dd // sdot v29.4s, v22.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x4e9396de // sdot v30.4s, v22.16b, v19.16b\n"
- ".inst 0x4e9896dc // sdot v28.4s, v22.16b, v24.16b\n"
- "sqrdmulh v31.4s, v31.4s, v4.4s\n"
- "mov v17.16b, v20.16b\n .inst 0x4e989591 // sdot v17.4s, v12.16b, v24.16b\n"
- ".inst 0x4e999594 // sdot v20.4s, v12.16b, v25.16b\n"
- "mls v30.4s, v20.4s, v16.4s\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v21.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v4.4s\n"
- "sqrdmulh v29.4s, v29.4s, v4.4s\n"
- "sqrdmulh v28.4s, v28.4s, v4.4s\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v30.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
- "and v17.16b, v28.16b, v21.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x4e8294bf // sdot v31.4s, v5.16b, v2.16b\n"
+ ".inst 0x4e9594a3 // sdot v3.4s, v5.16b, v21.16b\n"
+ ".inst 0x4e9d95e8 // sdot v8.4s, v15.16b, v29.16b\n"
+ ".inst 0x4e9596df // sdot v31.4s, v22.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x4e9594b7 // sdot v23.4s, v5.16b, v21.16b\n"
+ ".inst 0x4e9595fe // sdot v30.4s, v15.16b, v21.16b\n"
+ ".inst 0x4e9d96c3 // sdot v3.4s, v22.16b, v29.16b\n"
+ "mov v16.16b, v8.16b\n .inst 0x4e9295f0 // sdot v16.4s, v15.16b, v18.16b\n"
+ ".inst 0x4e8295e8 // sdot v8.4s, v15.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x4e9d969f // sdot v31.4s, v20.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x4e8294ba // sdot v26.4s, v5.16b, v2.16b\n"
+ ".inst 0x4e929683 // sdot v3.4s, v20.16b, v18.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ ".inst 0x4e9d96d7 // sdot v23.4s, v22.16b, v29.16b\n"
+ ".inst 0x4e9d95fe // sdot v30.4s, v15.16b, v29.16b\n"
+ "mls v31.4s, v8.4s, v24.4s\n"
+ ".inst 0x4e9596da // sdot v26.4s, v22.16b, v21.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x4e929697 // sdot v23.4s, v20.16b, v18.16b\n"
+ "mov v16.16b, v30.16b\n .inst 0x4e9295f0 // sdot v16.4s, v15.16b, v18.16b\n"
+ ".inst 0x4e8295fe // sdot v30.4s, v15.16b, v2.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ ".inst 0x4e9d969a // sdot v26.4s, v20.16b, v29.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v19.16b\n"
+ "mls v26.4s, v30.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v18.16b, v3.16b, v19.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v18.4s\n"
+ "srshl v31.4s, v31.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "srshl v31.4s, v31.4s, v21.4s\n"
- "srshl v30.4s, v30.4s, v21.4s\n"
- "srshl v29.4s, v29.4s, v21.4s\n"
- "srshl v28.4s, v28.4s, v21.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"blt 24f\n"
- "str s31, [x25, x27]\n"
- "str s30, [x24, x27]\n"
- "str s29, [x23, x27]\n"
- "str s28, [x22, x27]\n"
+ "str s31, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s3, [x9, x12]\n"
+ "str s23, [x28, x12]\n"
"b 27f\n"
"24:" // Oddments: Unroll 1: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 25f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 26f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 26f\n"
"25:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"26:" // Oddments: Unroll 1: Oddment store: Bit 1: End
"27:" // Oddments: Unroll 1: After oddment store
"subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
"ldr q31, [%x[params], #0x0]\n"
- "ldr q25, [%x[params], #0x10]\n"
- "movi v24.4s, #0x0\n"
- ".inst 0x4e839598 // sdot v24.4s, v12.16b, v3.16b\n"
- "ldr q23, [%x[params], #0x20]\n"
- "ldr q22, [%x[params], #0x30]\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "ldr q21, [%x[params], #0x40]\n"
- "ldr q20, [%x[params], #0x50]\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x4e87973f // sdot v31.4s, v25.16b, v7.16b\n"
- ".inst 0x4e809598 // sdot v24.4s, v12.16b, v0.16b\n"
- ".inst 0x4e83973d // sdot v29.4s, v25.16b, v3.16b\n"
- "movi v19.4s, #0x0\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "movi v22.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "ldr q20, [%x[params], #0x20]\n"
+ "ldr q18, [%x[params], #0x30]\n"
"cmp x20, #0x4\n"
- ".inst 0x4e8396ff // sdot v31.4s, v23.16b, v3.16b\n"
- "mov v18.16b, v24.16b\n .inst 0x4e829592 // sdot v18.4s, v12.16b, v2.16b\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "ldr q19, [%x[params], #0x50]\n"
+ ".inst 0x4e9c95f6 // sdot v22.4s, v15.16b, v28.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e879598 // sdot v24.4s, v12.16b, v7.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- ".inst 0x4e87973e // sdot v30.4s, v25.16b, v7.16b\n"
- ".inst 0x4e83973c // sdot v28.4s, v25.16b, v3.16b\n"
- ".inst 0x4e839593 // sdot v19.4s, v12.16b, v3.16b\n"
- ".inst 0x4e8096fd // sdot v29.4s, v23.16b, v0.16b\n"
- ".inst 0x4e8096df // sdot v31.4s, v22.16b, v0.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x4e8097bf // sdot v31.4s, v29.16b, v0.16b\n"
+ ".inst 0x4e9c97a3 // sdot v3.4s, v29.16b, v28.16b\n"
+ ".inst 0x4e9b95f6 // sdot v22.4s, v15.16b, v27.16b\n"
+ ".inst 0x4e9c969f // sdot v31.4s, v20.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e9c97b7 // sdot v23.4s, v29.16b, v28.16b\n"
+ ".inst 0x4e9c95f5 // sdot v21.4s, v15.16b, v28.16b\n"
+ ".inst 0x4e9b9683 // sdot v3.4s, v20.16b, v27.16b\n"
+ "mov v16.16b, v22.16b\n .inst 0x4e8e95f0 // sdot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x4e8095f6 // sdot v22.4s, v15.16b, v0.16b\n"
"ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e8396fe // sdot v30.4s, v23.16b, v3.16b\n"
- ".inst 0x4e8096fc // sdot v28.4s, v23.16b, v0.16b\n"
- "mls v31.4s, v24.4s, v16.4s\n"
- ".inst 0x4e809593 // sdot v19.4s, v12.16b, v0.16b\n"
- ".inst 0x4e8296dd // sdot v29.4s, v22.16b, v2.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- ".inst 0x4e8096de // sdot v30.4s, v22.16b, v0.16b\n"
- ".inst 0x4e8296dc // sdot v28.4s, v22.16b, v2.16b\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "mov v17.16b, v19.16b\n .inst 0x4e829591 // sdot v17.4s, v12.16b, v2.16b\n"
- ".inst 0x4e879593 // sdot v19.4s, v12.16b, v7.16b\n"
- "mls v30.4s, v19.4s, v16.4s\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v20.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v30.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v28.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ ".inst 0x4e9b965f // sdot v31.4s, v18.16b, v27.16b\n"
+ "ext v27.16b, v27.16b, v27.16b, #0x1\n"
+ ".inst 0x4e8097ba // sdot v26.4s, v29.16b, v0.16b\n"
+ ".inst 0x4e8e9643 // sdot v3.4s, v18.16b, v14.16b\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ ".inst 0x4e9b9697 // sdot v23.4s, v20.16b, v27.16b\n"
+ ".inst 0x4e9b95f5 // sdot v21.4s, v15.16b, v27.16b\n"
+ "mls v31.4s, v22.4s, v24.4s\n"
+ ".inst 0x4e9c969a // sdot v26.4s, v20.16b, v28.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x4e8e9657 // sdot v23.4s, v18.16b, v14.16b\n"
+ "mov v16.16b, v21.16b\n .inst 0x4e8e95f0 // sdot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x4e8095f5 // sdot v21.4s, v15.16b, v0.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ ".inst 0x4e9b965a // sdot v26.4s, v18.16b, v27.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v19.16b\n"
+ "mls v26.4s, v21.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v18.16b, v3.16b, v19.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v18.4s\n"
+ "srshl v31.4s, v31.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"blt 28f\n"
- "str s31, [x25, x27]\n"
- "str s30, [x24, x27]\n"
- "str s29, [x23, x27]\n"
- "str s28, [x22, x27]\n"
+ "str s31, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s3, [x9, x12]\n"
+ "str s23, [x28, x12]\n"
"b 31f\n"
"28:" // Oddments: Unroll 2: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 29f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 30f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 30f\n"
"29:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"30:" // Oddments: Unroll 2: Oddment store: Bit 1: End
"31:" // Oddments: Unroll 2: After oddment store
"subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
"ldr q31, [%x[params], #0x0]\n"
- "ldr q23, [%x[params], #0x10]\n"
+ "ldr q1, [%x[params], #0x10]\n"
"movi v22.4s, #0x0\n"
- ".inst 0x4e8a9596 // sdot v22.4s, v12.16b, v10.16b\n"
- "ldr q21, [%x[params], #0x20]\n"
- "ldr q19, [%x[params], #0x30]\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "ldr q20, [%x[params], #0x40]\n"
- "ldr q26, [%x[params], #0x50]\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x4e8896ff // sdot v31.4s, v23.16b, v8.16b\n"
- ".inst 0x4e859596 // sdot v22.4s, v12.16b, v5.16b\n"
- ".inst 0x4e8a96fd // sdot v29.4s, v23.16b, v10.16b\n"
- "movi v18.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "ldr q20, [%x[params], #0x20]\n"
+ "ldr q18, [%x[params], #0x30]\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "ldr q19, [%x[params], #0x50]\n"
+ ".inst 0x4e8495f6 // sdot v22.4s, v15.16b, v4.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e8a96bf // sdot v31.4s, v21.16b, v10.16b\n"
- "mov v17.16b, v22.16b\n .inst 0x4e899591 // sdot v17.4s, v12.16b, v9.16b\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- ".inst 0x4e889596 // sdot v22.4s, v12.16b, v8.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- ".inst 0x4e8896fe // sdot v30.4s, v23.16b, v8.16b\n"
- ".inst 0x4e8a96fc // sdot v28.4s, v23.16b, v10.16b\n"
- ".inst 0x4e8a9592 // sdot v18.4s, v12.16b, v10.16b\n"
- ".inst 0x4e8596bd // sdot v29.4s, v21.16b, v5.16b\n"
- ".inst 0x4e85967f // sdot v31.4s, v19.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x4e8a96be // sdot v30.4s, v21.16b, v10.16b\n"
- ".inst 0x4e8596bc // sdot v28.4s, v21.16b, v5.16b\n"
- "mls v31.4s, v22.4s, v16.4s\n"
- ".inst 0x4e859592 // sdot v18.4s, v12.16b, v5.16b\n"
- ".inst 0x4e89967d // sdot v29.4s, v19.16b, v9.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x4e89943f // sdot v31.4s, v1.16b, v9.16b\n"
+ ".inst 0x4e849423 // sdot v3.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e8695f6 // sdot v22.4s, v15.16b, v6.16b\n"
+ ".inst 0x4e84969f // sdot v31.4s, v20.16b, v4.16b\n"
+ "ext v4.16b, v4.16b, v4.16b, #0x1\n"
+ ".inst 0x4e849437 // sdot v23.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e8495f5 // sdot v21.4s, v15.16b, v4.16b\n"
+ ".inst 0x4e869683 // sdot v3.4s, v20.16b, v6.16b\n"
+ "mov v16.16b, v22.16b\n .inst 0x4e9995f0 // sdot v16.4s, v15.16b, v25.16b\n"
+ ".inst 0x4e8995f6 // sdot v22.4s, v15.16b, v9.16b\n"
"ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x4e85967e // sdot v30.4s, v19.16b, v5.16b\n"
- ".inst 0x4e89967c // sdot v28.4s, v19.16b, v9.16b\n"
- "sqrdmulh v31.4s, v31.4s, v20.4s\n"
- "mov v7.16b, v18.16b\n .inst 0x4e899587 // sdot v7.4s, v12.16b, v9.16b\n"
- ".inst 0x4e889592 // sdot v18.4s, v12.16b, v8.16b\n"
- "mls v30.4s, v18.4s, v16.4s\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mls v28.4s, v7.4s, v16.4s\n"
- "and v16.16b, v31.16b, v26.16b\n"
+ ".inst 0x4e86965f // sdot v31.4s, v18.16b, v6.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x4e89943a // sdot v26.4s, v1.16b, v9.16b\n"
+ ".inst 0x4e999643 // sdot v3.4s, v18.16b, v25.16b\n"
+ "ext v25.16b, v25.16b, v25.16b, #0x1\n"
+ ".inst 0x4e869697 // sdot v23.4s, v20.16b, v6.16b\n"
+ ".inst 0x4e8695f5 // sdot v21.4s, v15.16b, v6.16b\n"
+ "mls v31.4s, v22.4s, v24.4s\n"
+ ".inst 0x4e84969a // sdot v26.4s, v20.16b, v4.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x4e999657 // sdot v23.4s, v18.16b, v25.16b\n"
+ "mov v16.16b, v21.16b\n .inst 0x4e9995f0 // sdot v16.4s, v15.16b, v25.16b\n"
+ ".inst 0x4e8995f5 // sdot v21.4s, v15.16b, v9.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ ".inst 0x4e86965a // sdot v26.4s, v18.16b, v6.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v19.16b\n"
+ "mls v26.4s, v21.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v18.16b, v3.16b, v19.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v20.4s\n"
- "sqrdmulh v29.4s, v29.4s, v20.4s\n"
- "sqrdmulh v28.4s, v28.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v18.16b, v30.16b, v26.16b\n"
- "and v17.16b, v29.16b, v26.16b\n"
- "and v16.16b, v28.16b, v26.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v18.4s\n"
+ "srshl v31.4s, v31.4s, v19.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v18.4s\n"
- "sqadd v29.4s, v29.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "srshl v31.4s, v31.4s, v26.4s\n"
- "srshl v30.4s, v30.4s, v26.4s\n"
- "srshl v29.4s, v29.4s, v26.4s\n"
- "srshl v28.4s, v28.4s, v26.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"32:" // Oddments: Unroll 3: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 33f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 34f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 34f\n"
"33:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"34:" // Oddments: Unroll 3: Oddment store: Bit 1: End
"35:" // End
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 4626007afa..5db236747a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
const int8_t *inptrs[16];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const int8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -91,1072 +91,1072 @@ void a64_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v14.16b }, [x20]\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v19.16b }, [x21]\n"
- "ld1r { v13.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v29.8h }, [x21]\n"
- "ld1r { v12.8h }, [x20]\n"
"mov x17, #0x0\n"
"mov x16, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
"add x15, %x[params], %[offsetof_Params_inptrs]\n"
"ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
"ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
"ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "ssubl v23.8h, v23.8b, v19.8b\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "ssubl v16.8h, v16.8b, v19.8b\n"
- "ssubl v1.8h, v1.8b, v19.8b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "ssubl v5.8h, v5.8b, v19.8b\n"
- "ssubl v26.8h, v26.8b, v19.8b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "ssubl v18.8h, v18.8b, v19.8b\n"
- "ssubl v31.8h, v31.8b, v19.8b\n"
- "ldr d20, [x14, #0x40]\n"
+ "lsr x11, x8, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v11.16b }, [x20]\n"
+ "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
+ "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v16.16b }, [x21]\n"
+ "ld1r { v12.8h }, [x20]\n"
+ "add x21, x23, %[offsetof_Requantize32_minval]\n"
+ "add x20, x23, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v14.8h }, [x21]\n"
+ "ld1r { v13.8h }, [x20]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x11, 3f\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "subs x11, x11, #0x1\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "ssubl v15.8h, v15.8b, v16.8b\n"
+ "ssubl v4.8h, v4.8b, v16.8b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v25.8h, v25.8b, v19.8b\n"
- "ssubl v20.8h, v20.8b, v19.8b\n"
- "ldr q9, [x20, #0x0]\n"
- "ldr q24, [x20, #0x10]\n"
+ "ssubl v5.8h, v5.8b, v16.8b\n"
+ "ssubl v3.8h, v3.8b, v16.8b\n"
+ "ssubl v25.8h, v25.8b, v16.8b\n"
+ "ssubl v10.8h, v10.8b, v16.8b\n"
+ "ssubl v6.8h, v6.8b, v16.8b\n"
+ "ssubl v7.8h, v7.8b, v16.8b\n"
+ "ldr q2, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "ldp x23, x22, [x15, #0x0]\n"
"add x20, x20, #0x20\n"
+ "ssubl v9.8h, v9.8b, v16.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x23, x22, [x15, #0x0]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
"ldp x21, x20, [x15, #0x10]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d22, [x23, x17]\n"
- "ldr d4, [x22, x17]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d8, [x21, x17]\n"
- "ldr d27, [x20, x17]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d19, [x23, x17]\n"
+ "ldr d21, [x22, x17]\n"
+ "ldr d29, [x21, x17]\n"
+ "ldr d22, [x20, x17]\n"
"ldr x20, [x15, #0x20]\n"
- "ldr d15, [x20, x17]\n"
- "ssubl v22.8h, v22.8b, v14.8b\n"
- "ssubl v4.8h, v4.8b, v14.8b\n"
- "ssubl v8.8h, v8.8b, v14.8b\n"
- "ssubl v27.8h, v27.8b, v14.8b\n"
- "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ssubl v19.8h, v19.8b, v11.8b\n"
+ "ssubl v21.8h, v21.8b, v11.8b\n"
+ "ssubl v29.8h, v29.8b, v11.8b\n"
+ "ssubl v22.8h, v22.8b, v11.8b\n"
+ "ldr d20, [x20, x17]\n"
+ "ssubl v20.8h, v20.8b, v11.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q3, [x13, #0x0]\n"
- "ldr q17, [x12, #0x0]\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
- "ldr q21, [x13, #0x10]\n"
- "ldr q28, [x12, #0x10]\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "ldr x20, [x15, #0x28]\n"
- "ldr d11, [x20, x17]\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "ldr x20, [x15, #0x38]\n"
- "ldr d4, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "ssubl v11.8h, v11.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "ldr x20, [x15, #0x40]\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "ssubl v4.8h, v4.8b, v14.8b\n"
- "ldr x27, [x15, #0x48]\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "ssubl v22.8h, v22.8b, v14.8b\n"
- "ldr x26, [x15, #0x50]\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "ldr d8, [x20, x17]\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "ssubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr q17, [x13, #0x0]\n"
+ "ldr q26, [x12, #0x0]\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "ldr q28, [x13, #0x10]\n"
+ "ldr q23, [x12, #0x10]\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "ldr x24, [x15, #0x28]\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "ldr x23, [x15, #0x38]\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "ldr x22, [x15, #0x30]\n"
+ "ldr x21, [x15, #0x40]\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
+ "ldr x26, [x15, #0x48]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "ldr d21, [x24, x17]\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "ldr d29, [x21, x17]\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
"ldr x25, [x15, #0x58]\n"
"ldr x24, [x15, #0x60]\n"
- "smlal v2.4s, v11.4h, v31.4h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "ssubl v21.8h, v21.8b, v11.8b\n"
"ldr x23, [x15, #0x68]\n"
+ "ssubl v18.8h, v18.8b, v11.8b\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
"ldr x22, [x15, #0x70]\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
- "smlal v9.4s, v4.4h, v16.4h\n"
"ldr x21, [x15, #0x78]\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "ssubl v19.8h, v19.8b, v11.8b\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "ldr d22, [x26, x17]\n"
+ "smlal v0.4s, v21.4h, v6.4h\n"
+ "smlal2 v24.4s, v21.8h, v6.8h\n"
+ "ldr d21, [x20, x17]\n"
+ "ssubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "ldr d27, [x27, x17]\n"
- "smlal2 v30.4s, v11.8h, v31.8h\n"
- "ldr d11, [x26, x17]\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "ssubl v27.8h, v27.8b, v14.8b\n"
"add x14, x14, #0x48\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal v10.4s, v22.4h, v20.4h\n"
- "ssubl v11.8h, v11.8b, v14.8b\n"
- "subs x8, x8, #0x1\n"
- "smlal2 v24.4s, v4.8h, v16.8h\n"
- "smlal v9.4s, v8.4h, v1.4h\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "ssubl v22.8h, v22.8b, v11.8b\n"
+ "subs x11, x11, #0x1\n"
+ "smlal v31.4s, v19.4h, v9.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "ssubl v21.8h, v21.8b, v11.8b\n"
"add x13, x13, #0x20\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "ldr d20, [x25, x17]\n"
"add x12, x12, #0x20\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "ldr d15, [x25, x17]\n"
- "ssubl v15.8h, v15.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v20.8h\n"
- "ldr d22, [x24, x17]\n"
- "smlal v7.4s, v4.4h, v23.4h\n"
- "ssubl v22.8h, v22.8b, v14.8b\n"
- "smlal v2.4s, v27.4h, v18.4h\n"
- "smlal v10.4s, v27.4h, v26.4h\n"
- "smlal2 v24.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v20.4h\n"
- "smlal2 v0.4s, v4.8h, v23.8h\n"
- "ldr d4, [x23, x17]\n"
- "smlal2 v30.4s, v27.8h, v18.8h\n"
- "ssubl v4.8h, v4.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v26.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "ssubl v26.8h, v26.8b, v14.8b\n"
- "smlal v2.4s, v11.4h, v23.4h\n"
- "smlal v10.4s, v15.4h, v1.4h\n"
- "smlal2 v24.4s, v27.8h, v20.8h\n"
- "smlal v9.4s, v11.4h, v5.4h\n"
- "smlal2 v0.4s, v8.8h, v16.8h\n"
- "ldr d8, [x21, x17]\n"
- "smlal2 v30.4s, v11.8h, v23.8h\n"
- "ssubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v15.8h, v1.8h\n"
- "smlal v7.4s, v27.4h, v25.4h\n"
+ "smlal v2.4s, v18.4h, v4.4h\n"
+ "smlal2 v1.4s, v18.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v9.8h\n"
+ "ldr d19, [x24, x17]\n"
+ "smlal v8.4s, v18.4h, v15.4h\n"
+ "smlal v31.4s, v22.4h, v25.4h\n"
+ "ssubl v20.8h, v20.8b, v11.8b\n"
+ "smlal2 v30.4s, v18.8h, v15.8h\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v0.4s, v22.4h, v10.4h\n"
+ "smlal2 v24.4s, v22.8h, v10.8h\n"
+ "smlal v2.4s, v29.4h, v5.4h\n"
+ "smlal2 v1.4s, v29.8h, v5.8h\n"
+ "ssubl v19.8h, v19.8b, v11.8b\n"
+ "smlal2 v27.4s, v22.8h, v25.8h\n"
+ "ldr d25, [x22, x17]\n"
+ "smlal v8.4s, v29.4h, v4.4h\n"
+ "ssubl v18.8h, v18.8b, v11.8b\n"
+ "smlal v31.4s, v20.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d4, [x21, x17]\n"
"add x17, x17, #0x8\n"
- "smlal v2.4s, v22.4h, v5.4h\n"
- "smlal v10.4s, v4.4h, v18.4h\n"
- "smlal2 v24.4s, v11.8h, v5.8h\n"
- "smlal v9.4s, v22.4h, v31.4h\n"
- "sqrdmulh v9.4s, v9.4s, v3.4s\n"
- "smlal2 v0.4s, v27.8h, v25.8h\n"
- "smlal2 v30.4s, v22.8h, v5.8h\n"
- "and v27.16b, v9.16b, v17.16b\n"
- "smlal2 v6.4s, v4.8h, v18.8h\n"
- "smlal v7.4s, v15.4h, v18.4h\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "smlal v2.4s, v26.4h, v25.4h\n"
- "smlal v10.4s, v26.4h, v31.4h\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
- "smlal2 v24.4s, v22.8h, v31.8h\n"
- "smlal2 v0.4s, v15.8h, v18.8h\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- "smlal2 v30.4s, v26.8h, v25.8h\n"
- "smlal2 v6.4s, v26.8h, v31.8h\n"
- "and v31.16b, v24.16b, v28.16b\n"
- "smlal v7.4s, v4.4h, v20.4h\n"
- "smlal v2.4s, v8.4h, v20.4h\n"
- "sqrdmulh v7.4s, v7.4s, v3.4s\n"
- "smlal v10.4s, v8.4h, v25.4h\n"
- "smlal2 v0.4s, v4.8h, v20.8h\n"
- "sqrdmulh v2.4s, v2.4s, v3.4s\n"
- "smlal2 v30.4s, v8.8h, v20.8h\n"
- "smlal2 v6.4s, v8.8h, v25.8h\n"
- "sqrdmulh v10.4s, v10.4s, v3.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v22.16b, v7.16b, v17.16b\n"
- "sqrdmulh v0.4s, v0.4s, v21.4s\n"
- "and v3.16b, v2.16b, v17.16b\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "and v11.16b, v10.16b, v17.16b\n"
- "sqrdmulh v6.4s, v6.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v31.4s\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
- "and v20.16b, v0.16b, v28.16b\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "and v31.16b, v30.16b, v28.16b\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v18.16b, v6.16b, v28.16b\n"
- "sqadd v7.4s, v7.4s, v22.4s\n"
+ "smlal v0.4s, v21.4h, v15.4h\n"
+ "smlal2 v24.4s, v21.8h, v15.8h\n"
+ "smlal v2.4s, v22.4h, v9.4h\n"
+ "smlal2 v1.4s, v22.8h, v9.8h\n"
+ "ssubl v25.8h, v25.8b, v11.8b\n"
+ "smlal2 v27.4s, v20.8h, v5.8h\n"
+ "smlal v8.4s, v22.4h, v7.4h\n"
+ "ssubl v4.8h, v4.8b, v11.8b\n"
+ "smlal v31.4s, v18.4h, v10.4h\n"
+ "smlal2 v30.4s, v22.8h, v7.8h\n"
+ "smlal v0.4s, v19.4h, v3.4h\n"
+ "smlal2 v24.4s, v19.8h, v3.8h\n"
+ "smlal v2.4s, v21.4h, v3.4h\n"
+ "smlal2 v1.4s, v21.8h, v3.8h\n"
+ "smlal2 v27.4s, v18.8h, v10.8h\n"
+ "smlal v8.4s, v20.4h, v10.4h\n"
+ "smlal v31.4s, v25.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v10.8h\n"
+ "smlal v0.4s, v25.4h, v7.4h\n"
+ "smlal2 v24.4s, v25.8h, v7.8h\n"
+ "smlal v2.4s, v19.4h, v6.4h\n"
+ "smlal2 v1.4s, v19.8h, v6.8h\n"
+ "smlal2 v27.4s, v25.8h, v6.8h\n"
+ "smlal v8.4s, v18.4h, v9.4h\n"
+ "smlal v31.4s, v4.4h, v7.4h\n"
+ "smlal2 v30.4s, v18.8h, v9.8h\n"
+ "smlal v0.4s, v4.4h, v9.4h\n"
+ "smlal2 v24.4s, v4.8h, v9.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v17.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v28.4s\n"
+ "smlal2 v27.4s, v4.8h, v7.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v17.4s\n"
+ "and v18.16b, v2.16b, v26.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v28.4s\n"
+ "and v4.16b, v1.16b, v23.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v21.16b, v8.16b, v26.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v28.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v28.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v20.16b, v0.16b, v26.16b\n"
+ "sqadd v2.4s, v2.4s, v18.4s\n"
+ "and v19.16b, v31.16b, v26.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v18.16b, v30.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v4.4s\n"
"sshr v20.4s, v20.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v3.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v11.4s\n"
+ "and v17.16b, v24.16b, v23.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v3.16b, v27.16b, v23.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v17.4s\n"
- "srshl v7.4s, v7.4s, v17.4s\n"
"sqadd v0.4s, v0.4s, v20.4s\n"
- "srshl v2.4s, v2.4s, v17.4s\n"
- "sqadd v30.4s, v30.4s, v31.4s\n"
- "srshl v10.4s, v10.4s, v17.4s\n"
- "sqadd v6.4s, v6.4s, v18.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v28.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v28.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v19.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v26.4s\n"
+ "srshl v8.4s, v8.4s, v26.4s\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "srshl v0.4s, v0.4s, v26.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "srshl v31.4s, v31.4s, v26.4s\n"
+ "sqadd v27.4s, v27.4s, v3.4s\n"
+ "srshl v1.4s, v1.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v28.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d9, [x11, x16]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v23.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v23.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v23.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "str d7, [x10, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "str d2, [x9, x16]\n"
- "str d10, [x28, x16]\n"
- "ldr q9, [x20, #0x0]\n"
- "ldr q24, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str d2, [x10, x16]\n"
+ "str d8, [x9, x16]\n"
+ "str d0, [x28, x16]\n"
+ "str d31, [x27, x16]\n"
"add x16, x16, #0x8\n"
+ "ldr q2, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "add x20, x20, #0x20\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
- "ldr d20, [x14, #0x40]\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldp x23, x22, [x15, #0x0]\n"
- "ssubl v23.8h, v23.8b, v19.8b\n"
- "ssubl v16.8h, v16.8b, v19.8b\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ssubl v15.8h, v15.8b, v16.8b\n"
+ "ssubl v4.8h, v4.8b, v16.8b\n"
+ "ssubl v5.8h, v5.8b, v16.8b\n"
+ "ssubl v3.8h, v3.8b, v16.8b\n"
"ldp x21, x20, [x15, #0x10]\n"
- "ldr d22, [x23, x17]\n"
- "ssubl v1.8h, v1.8b, v19.8b\n"
- "ssubl v5.8h, v5.8b, v19.8b\n"
- "ldr d4, [x22, x17]\n"
- "ldr d8, [x21, x17]\n"
- "ssubl v26.8h, v26.8b, v19.8b\n"
- "ssubl v18.8h, v18.8b, v19.8b\n"
- "ldr d27, [x20, x17]\n"
+ "ssubl v25.8h, v25.8b, v16.8b\n"
+ "ssubl v10.8h, v10.8b, v16.8b\n"
+ "ssubl v6.8h, v6.8b, v16.8b\n"
+ "ssubl v7.8h, v7.8b, v16.8b\n"
+ "ldr d19, [x23, x17]\n"
+ "ldr d21, [x22, x17]\n"
+ "ldr d29, [x21, x17]\n"
+ "ldr d22, [x20, x17]\n"
+ "ssubl v9.8h, v9.8b, v16.8b\n"
"ldr x20, [x15, #0x20]\n"
- "ssubl v31.8h, v31.8b, v19.8b\n"
- "ssubl v25.8h, v25.8b, v19.8b\n"
- "ldr d15, [x20, x17]\n"
- "ssubl v20.8h, v20.8b, v19.8b\n"
- "ssubl v22.8h, v22.8b, v14.8b\n"
- "ssubl v4.8h, v4.8b, v14.8b\n"
- "ssubl v8.8h, v8.8b, v14.8b\n"
- "ssubl v27.8h, v27.8b, v14.8b\n"
- "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ssubl v19.8h, v19.8b, v11.8b\n"
+ "ssubl v21.8h, v21.8b, v11.8b\n"
+ "ssubl v29.8h, v29.8b, v11.8b\n"
+ "ssubl v22.8h, v22.8b, v11.8b\n"
+ "ldr d20, [x20, x17]\n"
+ "ssubl v20.8h, v20.8b, v11.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q28, [x13, #0x0]\n"
- "ldr q17, [x12, #0x0]\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
- "ldr q21, [x13, #0x10]\n"
- "ldr q3, [x12, #0x10]\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "ldr x20, [x15, #0x28]\n"
- "ldr d11, [x20, x17]\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "ldr x20, [x15, #0x38]\n"
- "ldr d4, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "ssubl v11.8h, v11.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
+ "ldr q26, [x13, #0x0]\n"
+ "ldr q28, [x12, #0x0]\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "ldr q17, [x13, #0x10]\n"
+ "ldr q23, [x12, #0x10]\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "ldr x23, [x15, #0x28]\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "ldr x22, [x15, #0x38]\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "ldr x21, [x15, #0x30]\n"
"ldr x20, [x15, #0x40]\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "ssubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
"ldr x26, [x15, #0x48]\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "ssubl v22.8h, v22.8b, v14.8b\n"
"ldr x25, [x15, #0x50]\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "ldr d8, [x20, x17]\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "ssubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr d21, [x23, x17]\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "ldr d18, [x21, x17]\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "ldr d29, [x20, x17]\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
"ldr x24, [x15, #0x58]\n"
"ldr x23, [x15, #0x60]\n"
- "smlal v2.4s, v11.4h, v31.4h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "ssubl v21.8h, v21.8b, v11.8b\n"
"ldr x22, [x15, #0x68]\n"
+ "ssubl v19.8h, v19.8b, v11.8b\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
"ldr x21, [x15, #0x70]\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
- "smlal v9.4s, v4.4h, v16.4h\n"
"ldr x20, [x15, #0x78]\n"
- "tst x7, #0x7\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "ldr d27, [x26, x17]\n"
- "smlal2 v30.4s, v11.8h, v31.8h\n"
- "ldr d11, [x25, x17]\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "ssubl v27.8h, v27.8b, v14.8b\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "ssubl v18.8h, v18.8b, v11.8b\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "ldr d22, [x26, x17]\n"
+ "smlal v0.4s, v21.4h, v6.4h\n"
+ "smlal2 v24.4s, v21.8h, v6.8h\n"
+ "ldr d21, [x25, x17]\n"
+ "ssubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
+ "tst x8, #0x7\n"
"add x13, x13, #0x20\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal v10.4s, v22.4h, v20.4h\n"
- "ssubl v11.8h, v11.8b, v14.8b\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "ssubl v22.8h, v22.8b, v11.8b\n"
"add x12, x12, #0x20\n"
- "smlal2 v24.4s, v4.8h, v16.8h\n"
- "smlal v9.4s, v8.4h, v1.4h\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "ldr d15, [x24, x17]\n"
- "ssubl v15.8h, v15.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v20.8h\n"
- "ldr d22, [x23, x17]\n"
- "smlal v7.4s, v4.4h, v23.4h\n"
- "ssubl v22.8h, v22.8b, v14.8b\n"
- "smlal v2.4s, v27.4h, v18.4h\n"
- "smlal v10.4s, v27.4h, v26.4h\n"
- "smlal2 v24.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v20.4h\n"
- "smlal2 v0.4s, v4.8h, v23.8h\n"
- "ldr d4, [x22, x17]\n"
- "smlal2 v30.4s, v27.8h, v18.8h\n"
- "ssubl v4.8h, v4.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v26.8h\n"
- "ldr d26, [x21, x17]\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "ssubl v26.8h, v26.8b, v14.8b\n"
- "smlal v2.4s, v11.4h, v23.4h\n"
- "smlal v10.4s, v15.4h, v1.4h\n"
- "smlal2 v24.4s, v27.8h, v20.8h\n"
- "smlal v9.4s, v11.4h, v5.4h\n"
- "smlal2 v0.4s, v8.8h, v16.8h\n"
- "ldr d16, [x20, x17]\n"
- "smlal2 v30.4s, v11.8h, v23.8h\n"
- "ssubl v16.8h, v16.8b, v14.8b\n"
- "smlal2 v6.4s, v15.8h, v1.8h\n"
- "smlal v7.4s, v27.4h, v25.4h\n"
+ "smlal v31.4s, v18.4h, v9.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "ssubl v21.8h, v21.8b, v11.8b\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "ldr d20, [x24, x17]\n"
+ "smlal v2.4s, v19.4h, v4.4h\n"
+ "smlal2 v1.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v18.8h, v9.8h\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v8.4s, v19.4h, v15.4h\n"
+ "smlal v31.4s, v22.4h, v25.4h\n"
+ "ssubl v20.8h, v20.8b, v11.8b\n"
+ "smlal2 v30.4s, v19.8h, v15.8h\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v0.4s, v22.4h, v10.4h\n"
+ "smlal2 v24.4s, v22.8h, v10.8h\n"
+ "smlal v2.4s, v29.4h, v5.4h\n"
+ "smlal2 v1.4s, v29.8h, v5.8h\n"
+ "ssubl v18.8h, v18.8b, v11.8b\n"
+ "smlal2 v27.4s, v22.8h, v25.8h\n"
+ "ldr d25, [x21, x17]\n"
+ "smlal v8.4s, v29.4h, v4.4h\n"
+ "ssubl v19.8h, v19.8b, v11.8b\n"
+ "smlal v31.4s, v20.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x20, x17]\n"
"add x17, x17, #0x8\n"
- "smlal v2.4s, v22.4h, v5.4h\n"
- "smlal v10.4s, v4.4h, v18.4h\n"
- "smlal2 v24.4s, v11.8h, v5.8h\n"
- "smlal v9.4s, v22.4h, v31.4h\n"
- "sqrdmulh v9.4s, v9.4s, v28.4s\n"
- "smlal2 v0.4s, v27.8h, v25.8h\n"
- "smlal2 v30.4s, v22.8h, v5.8h\n"
- "and v1.16b, v9.16b, v17.16b\n"
- "smlal2 v6.4s, v4.8h, v18.8h\n"
- "smlal v7.4s, v15.4h, v18.4h\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "smlal v2.4s, v26.4h, v25.4h\n"
- "smlal v10.4s, v26.4h, v31.4h\n"
- "sqadd v9.4s, v9.4s, v1.4s\n"
- "smlal2 v24.4s, v22.8h, v31.8h\n"
- "smlal2 v0.4s, v15.8h, v18.8h\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- "smlal2 v30.4s, v26.8h, v25.8h\n"
- "smlal2 v6.4s, v26.8h, v31.8h\n"
- "and v31.16b, v24.16b, v3.16b\n"
- "smlal v7.4s, v4.4h, v20.4h\n"
- "smlal v2.4s, v16.4h, v20.4h\n"
- "sqrdmulh v7.4s, v7.4s, v28.4s\n"
- "smlal v10.4s, v16.4h, v25.4h\n"
- "smlal2 v0.4s, v4.8h, v20.8h\n"
- "sqrdmulh v2.4s, v2.4s, v28.4s\n"
- "smlal2 v30.4s, v16.8h, v20.8h\n"
- "smlal2 v6.4s, v16.8h, v25.8h\n"
- "sqrdmulh v10.4s, v10.4s, v28.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v22.16b, v7.16b, v17.16b\n"
- "sqrdmulh v0.4s, v0.4s, v21.4s\n"
- "and v15.16b, v2.16b, v17.16b\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "and v11.16b, v10.16b, v17.16b\n"
- "sqrdmulh v6.4s, v6.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v31.4s\n"
+ "smlal v0.4s, v21.4h, v15.4h\n"
+ "smlal2 v24.4s, v21.8h, v15.8h\n"
+ "smlal v2.4s, v22.4h, v9.4h\n"
+ "smlal2 v1.4s, v22.8h, v9.8h\n"
+ "ssubl v25.8h, v25.8b, v11.8b\n"
+ "smlal2 v27.4s, v20.8h, v5.8h\n"
+ "smlal v8.4s, v22.4h, v7.4h\n"
+ "ssubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v31.4s, v19.4h, v10.4h\n"
+ "smlal2 v30.4s, v22.8h, v7.8h\n"
+ "smlal v0.4s, v18.4h, v3.4h\n"
+ "smlal2 v24.4s, v18.8h, v3.8h\n"
+ "smlal v2.4s, v21.4h, v3.4h\n"
+ "smlal2 v1.4s, v21.8h, v3.8h\n"
+ "smlal2 v27.4s, v19.8h, v10.8h\n"
+ "smlal v8.4s, v20.4h, v10.4h\n"
+ "smlal v31.4s, v25.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v10.8h\n"
+ "smlal v0.4s, v25.4h, v7.4h\n"
+ "smlal2 v24.4s, v25.8h, v7.8h\n"
+ "smlal v2.4s, v18.4h, v6.4h\n"
+ "smlal2 v1.4s, v18.8h, v6.8h\n"
+ "smlal2 v27.4s, v25.8h, v6.8h\n"
+ "smlal v8.4s, v19.4h, v9.4h\n"
+ "smlal v31.4s, v29.4h, v7.4h\n"
+ "smlal2 v30.4s, v19.8h, v9.8h\n"
+ "smlal v0.4s, v29.4h, v9.4h\n"
+ "smlal2 v24.4s, v29.8h, v9.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v26.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v17.4s\n"
+ "smlal2 v27.4s, v29.8h, v7.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v26.4s\n"
+ "and v25.16b, v2.16b, v28.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v26.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v17.4s\n"
+ "and v22.16b, v1.16b, v23.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "and v21.16b, v8.16b, v28.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v17.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v17.4s\n"
"sshr v22.4s, v22.4s, #0x1f\n"
- "and v18.16b, v0.16b, v3.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "and v23.16b, v30.16b, v3.16b\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v21.16b, v6.16b, v3.16b\n"
- "sqadd v7.4s, v7.4s, v22.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v15.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v11.4s\n"
+ "and v20.16b, v0.16b, v28.16b\n"
+ "sqadd v2.4s, v2.4s, v25.4s\n"
+ "and v19.16b, v31.16b, v28.16b\n"
"sshr v21.4s, v21.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v17.4s\n"
- "srshl v7.4s, v7.4s, v17.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v2.4s, v2.4s, v17.4s\n"
- "sqadd v30.4s, v30.4s, v23.4s\n"
- "srshl v10.4s, v10.4s, v17.4s\n"
- "sqadd v6.4s, v6.4s, v21.4s\n"
- "srshl v24.4s, v24.4s, v3.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v3.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v3.4s\n"
+ "and v10.16b, v30.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v22.4s\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v17.16b, v24.16b, v23.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v22.16b, v27.16b, v23.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v19.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v28.4s\n"
+ "srshl v8.4s, v8.4s, v28.4s\n"
+ "sqadd v30.4s, v30.4s, v10.4s\n"
+ "srshl v0.4s, v0.4s, v28.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "srshl v31.4s, v31.4s, v28.4s\n"
+ "sqadd v27.4s, v27.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v3.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d9, [x11, x16]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v23.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v23.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v23.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "str d7, [x10, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "str d2, [x9, x16]\n"
- "str d10, [x28, x16]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str d2, [x10, x16]\n"
+ "str d8, [x9, x16]\n"
+ "str d0, [x28, x16]\n"
+ "str d31, [x27, x16]\n"
"add x16, x16, #0x8\n"
"beq 64f\n"
"add x14, x14, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v9.4s }, [x20], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v24.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v24.s }[2], [x20]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v2.4s }, [x20], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v1.d }[0], [x20], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v1.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v24.s }[0], [x20]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v1.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v2.d }[0], [x20], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v2.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v9.s }[0], [x20]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v2.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "ssubl v23.8h, v23.8b, v19.8b\n"
- "ssubl v16.8h, v16.8b, v19.8b\n"
- "ldr d20, [x14, #0x40]\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "ssubl v15.8h, v15.8b, v16.8b\n"
+ "ssubl v4.8h, v4.8b, v16.8b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldp x24, x23, [x15, #0x0]\n"
- "ssubl v1.8h, v1.8b, v19.8b\n"
- "ssubl v5.8h, v5.8b, v19.8b\n"
+ "ssubl v5.8h, v5.8b, v16.8b\n"
+ "ssubl v3.8h, v3.8b, v16.8b\n"
+ "ssubl v25.8h, v25.8b, v16.8b\n"
+ "ssubl v10.8h, v10.8b, v16.8b\n"
+ "ssubl v6.8h, v6.8b, v16.8b\n"
+ "ssubl v7.8h, v7.8b, v16.8b\n"
"ldp x22, x21, [x15, #0x10]\n"
- "ldr x20, [x15, #0x20]\n"
- "ssubl v26.8h, v26.8b, v19.8b\n"
- "ssubl v18.8h, v18.8b, v19.8b\n"
- "ssubl v31.8h, v31.8b, v19.8b\n"
- "ssubl v25.8h, v25.8b, v19.8b\n"
- "ssubl v20.8h, v20.8b, v19.8b\n"
+ "ssubl v9.8h, v9.8b, v16.8b\n"
"add x24, x24, x17\n"
"add x23, x23, x17\n"
+ "ldr x20, [x15, #0x20]\n"
"add x22, x22, x17\n"
"add x21, x21, x17\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v22.s }[0], [x24], #0x4\n"
- "ld1 { v4.s }[0], [x23], #0x4\n"
- "ld1 { v8.s }[0], [x22], #0x4\n"
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v8.h }[2], [x22], #0x2\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v8.b }[6], [x22]\n"
- "ld1 { v27.b }[6], [x21]\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v19.s }[0], [x24], #0x4\n"
+ "ld1 { v21.s }[0], [x23], #0x4\n"
+ "ld1 { v29.s }[0], [x22], #0x4\n"
+ "ld1 { v22.s }[0], [x21], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v19.h }[2], [x24], #0x2\n"
+ "ld1 { v21.h }[2], [x23], #0x2\n"
+ "ld1 { v29.h }[2], [x22], #0x2\n"
+ "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[6], [x24]\n"
+ "ld1 { v21.b }[6], [x23]\n"
+ "ld1 { v29.b }[6], [x22]\n"
+ "ld1 { v22.b }[6], [x21]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v8.b }[4], [x22]\n"
- "ld1 { v27.b }[4], [x21]\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[4], [x24]\n"
+ "ld1 { v21.b }[4], [x23]\n"
+ "ld1 { v29.b }[4], [x22]\n"
+ "ld1 { v22.b }[4], [x21]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v22.h }[0], [x24], #0x2\n"
- "ld1 { v4.h }[0], [x23], #0x2\n"
- "ld1 { v8.h }[0], [x22], #0x2\n"
- "ld1 { v27.h }[0], [x21], #0x2\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v8.b }[2], [x22]\n"
- "ld1 { v27.b }[2], [x21]\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v19.h }[0], [x24], #0x2\n"
+ "ld1 { v21.h }[0], [x23], #0x2\n"
+ "ld1 { v29.h }[0], [x22], #0x2\n"
+ "ld1 { v22.h }[0], [x21], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[2], [x24]\n"
+ "ld1 { v21.b }[2], [x23]\n"
+ "ld1 { v29.b }[2], [x22]\n"
+ "ld1 { v22.b }[2], [x21]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[0], [x24]\n"
- "ld1 { v4.b }[0], [x23]\n"
- "ld1 { v8.b }[0], [x22]\n"
- "ld1 { v27.b }[0], [x21]\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[0], [x24]\n"
+ "ld1 { v21.b }[0], [x23]\n"
+ "ld1 { v29.b }[0], [x22]\n"
+ "ld1 { v22.b }[0], [x21]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "ssubl v22.8h, v22.8b, v14.8b\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
+ "ssubl v19.8h, v19.8b, v11.8b\n"
+ "ssubl v21.8h, v21.8b, v11.8b\n"
"ldr x20, [x15, #0x28]\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "ssubl v4.8h, v4.8b, v14.8b\n"
- "ssubl v8.8h, v8.8b, v14.8b\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
+ "ssubl v29.8h, v29.8b, v11.8b\n"
+ "ssubl v22.8h, v22.8b, v11.8b\n"
+ "ssubl v20.8h, v20.8b, v11.8b\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
"add x20, x20, x17\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "ssubl v27.8h, v27.8b, v14.8b\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "ssubl v15.8h, v15.8b, v14.8b\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "tbz x8, #2, 13f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 12f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x8, #1, 14f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
- "ssubl v21.8h, v21.8b, v14.8b\n"
- "smlal v2.4s, v21.4h, v31.4h\n"
- "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x30]\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v0.4s, v17.4h, v6.4h\n"
+ "smlal2 v24.4s, v17.8h, v6.8h\n"
"add x20, x20, x17\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "tbz x8, #2, 17f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 16f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 18f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
- "ssubl v28.8h, v28.8b, v14.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x38]\n"
- "smlal v10.4s, v28.4h, v20.4h\n"
- "smlal2 v6.4s, v28.8h, v20.8h\n"
+ "smlal v31.4s, v16.4h, v9.4h\n"
+ "smlal2 v27.4s, v16.8h, v9.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 21f\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 20f\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "tbz x8, #2, 21f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 20f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x7, #1, 22f\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x8, #1, 22f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
- "ssubl v22.8h, v22.8b, v14.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x40]\n"
- "smlal v9.4s, v22.4h, v16.4h\n"
- "smlal2 v24.4s, v22.8h, v16.8h\n"
- "smlal v7.4s, v22.4h, v23.4h\n"
- "smlal2 v0.4s, v22.8h, v23.8h\n"
+ "smlal v2.4s, v17.4h, v4.4h\n"
+ "smlal2 v1.4s, v17.8h, v4.8h\n"
+ "smlal v8.4s, v17.4h, v15.4h\n"
+ "smlal2 v30.4s, v17.8h, v15.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
- "ssubl v21.8h, v21.8b, v14.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x48]\n"
- "smlal v9.4s, v21.4h, v1.4h\n"
- "smlal2 v24.4s, v21.8h, v1.8h\n"
- "smlal v7.4s, v21.4h, v16.4h\n"
- "smlal2 v0.4s, v21.8h, v16.8h\n"
+ "smlal v2.4s, v16.4h, v5.4h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal v8.4s, v16.4h, v4.4h\n"
+ "smlal2 v30.4s, v16.8h, v4.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "tbz x8, #2, 29f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 28f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 30f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ssubl v28.8h, v28.8b, v14.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x50]\n"
- "smlal v9.4s, v28.4h, v20.4h\n"
- "smlal2 v24.4s, v28.8h, v20.8h\n"
- "smlal v7.4s, v28.4h, v25.4h\n"
- "smlal2 v0.4s, v28.8h, v25.8h\n"
+ "smlal v2.4s, v17.4h, v9.4h\n"
+ "smlal2 v1.4s, v17.8h, v9.8h\n"
+ "smlal v8.4s, v17.4h, v7.4h\n"
+ "smlal2 v30.4s, v17.8h, v7.8h\n"
+ "smlal v0.4s, v17.4h, v10.4h\n"
+ "smlal2 v24.4s, v17.8h, v10.8h\n"
+ "smlal v31.4s, v17.4h, v25.4h\n"
"add x20, x20, x17\n"
- "smlal v2.4s, v28.4h, v18.4h\n"
- "smlal2 v30.4s, v28.8h, v18.8h\n"
- "smlal v10.4s, v28.4h, v26.4h\n"
- "smlal2 v6.4s, v28.8h, v26.8h\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "smlal2 v27.4s, v17.8h, v25.8h\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
- "ssubl v8.8h, v8.8b, v14.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x58]\n"
- "smlal v9.4s, v8.4h, v5.4h\n"
- "smlal2 v24.4s, v8.8h, v5.8h\n"
- "smlal v2.4s, v8.4h, v23.4h\n"
- "smlal2 v30.4s, v8.8h, v23.8h\n"
+ "smlal v2.4s, v16.4h, v3.4h\n"
+ "smlal2 v1.4s, v16.8h, v3.8h\n"
+ "smlal v0.4s, v16.4h, v15.4h\n"
+ "smlal2 v24.4s, v16.8h, v15.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
- "ssubl v8.8h, v8.8b, v14.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x60]\n"
- "smlal v7.4s, v8.4h, v18.4h\n"
- "smlal2 v0.4s, v8.8h, v18.8h\n"
- "smlal v10.4s, v8.4h, v1.4h\n"
- "smlal2 v6.4s, v8.8h, v1.8h\n"
+ "smlal v8.4s, v17.4h, v10.4h\n"
+ "smlal2 v30.4s, v17.8h, v10.8h\n"
+ "smlal v31.4s, v17.4h, v5.4h\n"
+ "smlal2 v27.4s, v17.8h, v5.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v17.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v17.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[6], [x20]\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[4], [x20]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v17.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[2], [x20]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[0], [x20]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
- "ssubl v17.8h, v17.8b, v14.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x68]\n"
- "smlal v9.4s, v17.4h, v31.4h\n"
- "smlal2 v24.4s, v17.8h, v31.8h\n"
- "smlal v2.4s, v17.4h, v5.4h\n"
- "smlal2 v30.4s, v17.8h, v5.8h\n"
+ "smlal v2.4s, v16.4h, v6.4h\n"
+ "smlal2 v1.4s, v16.8h, v6.8h\n"
+ "smlal v0.4s, v16.4h, v3.4h\n"
+ "smlal2 v24.4s, v16.8h, v3.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "tbz x8, #2, 45f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 44f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x8, #1, 46f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "ssubl v23.8h, v23.8b, v14.8b\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x70]\n"
- "smlal v7.4s, v23.4h, v20.4h\n"
- "smlal2 v0.4s, v23.8h, v20.8h\n"
- "smlal v10.4s, v23.4h, v18.4h\n"
- "smlal2 v6.4s, v23.8h, v18.8h\n"
+ "smlal v8.4s, v17.4h, v9.4h\n"
+ "smlal2 v30.4s, v17.8h, v9.8h\n"
+ "smlal v31.4s, v17.4h, v10.4h\n"
+ "smlal2 v27.4s, v17.8h, v10.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[6], [x20]\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[4], [x20]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v5.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[2], [x20]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[0], [x20]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ssubl v5.8h, v5.8b, v14.8b\n"
+ "ssubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x78]\n"
- "smlal v2.4s, v5.4h, v25.4h\n"
- "smlal2 v30.4s, v5.8h, v25.8h\n"
- "smlal v10.4s, v5.4h, v31.4h\n"
- "smlal2 v6.4s, v5.8h, v31.8h\n"
+ "smlal v0.4s, v16.4h, v7.4h\n"
+ "smlal2 v24.4s, v16.8h, v7.8h\n"
+ "smlal v31.4s, v16.4h, v6.4h\n"
+ "smlal2 v27.4s, v16.8h, v6.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ssubl v23.8h, v23.8b, v14.8b\n"
- "smlal v2.4s, v23.4h, v20.4h\n"
- "smlal2 v30.4s, v23.8h, v20.8h\n"
- "smlal v10.4s, v23.4h, v25.4h\n"
- "smlal2 v6.4s, v23.8h, v25.8h\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v15.4s }, [x13], #0x10\n"
- "ld1 { v19.4s }, [x12], #0x10\n"
- "tbz x7, #1, 56f\n"
+ "ssubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v0.4s, v17.4h, v9.4h\n"
+ "smlal2 v24.4s, v17.8h, v9.8h\n"
+ "smlal v31.4s, v17.4h, v7.4h\n"
+ "smlal2 v27.4s, v17.8h, v7.8h\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v16.4s }, [x13], #0x10\n"
+ "ld1 { v23.4s }, [x12], #0x10\n"
+ "tbz x8, #1, 56f\n"
"ld1 { v18.d }[0], [x13], #0x8\n"
"ld1 { v22.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v18.s }[2], [x13]\n"
"ld1 { v22.s }[2], [x12]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v18.s }[0], [x13]\n"
"ld1 { v22.s }[0], [x12]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v15.d }[0], [x13], #0x8\n"
- "ld1 { v19.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v15.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x12]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v16.d }[0], [x13], #0x8\n"
+ "ld1 { v23.d }[0], [x12], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v16.s }[2], [x13]\n"
+ "ld1 { v23.s }[2], [x12]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v15.s }[0], [x13]\n"
- "ld1 { v19.s }[0], [x12]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v16.s }[0], [x13]\n"
+ "ld1 { v23.s }[0], [x12]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v9.4s, v9.4s, v15.4s\n"
- "and v17.16b, v9.16b, v19.16b\n"
- "add x11, x11, x16\n"
+ "sqrdmulh v2.4s, v2.4s, v16.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v18.4s\n"
"add x10, x10, x16\n"
- "sqrdmulh v24.4s, v24.4s, v18.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
"add x9, x9, x16\n"
+ "sqrdmulh v8.4s, v8.4s, v16.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v16.4s\n"
"add x28, x28, x16\n"
- "and v20.16b, v24.16b, v22.16b\n"
- "sqrdmulh v7.4s, v7.4s, v15.4s\n"
- "sqrdmulh v2.4s, v2.4s, v15.4s\n"
- "sqrdmulh v10.4s, v10.4s, v15.4s\n"
- "sqadd v9.4s, v9.4s, v17.4s\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "and v21.16b, v7.16b, v19.16b\n"
- "sqrdmulh v0.4s, v0.4s, v18.4s\n"
- "and v15.16b, v2.16b, v19.16b\n"
+ "add x27, x27, x16\n"
+ "sqrdmulh v31.4s, v31.4s, v16.4s\n"
"sqrdmulh v30.4s, v30.4s, v18.4s\n"
- "and v23.16b, v10.16b, v19.16b\n"
- "sqrdmulh v6.4s, v6.4s, v18.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
+ "and v17.16b, v2.16b, v23.16b\n"
+ "and v16.16b, v1.16b, v22.16b\n"
+ "and v21.16b, v8.16b, v23.16b\n"
+ "and v20.16b, v0.16b, v23.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v18.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v18.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v21.4s, v21.4s, #0x1f\n"
- "and v18.16b, v0.16b, v22.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "and v17.16b, v30.16b, v22.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v28.16b, v6.16b, v22.16b\n"
- "sqadd v7.4s, v7.4s, v21.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v15.4s\n"
+ "and v19.16b, v30.16b, v22.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v18.16b, v24.16b, v22.16b\n"
+ "sqadd v2.4s, v2.4s, v17.4s\n"
+ "and v17.16b, v31.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v22.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v23.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v19.4s\n"
- "srshl v7.4s, v7.4s, v19.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v2.4s, v2.4s, v19.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqadd v6.4s, v6.4s, v28.4s\n"
- "srshl v24.4s, v24.4s, v22.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v22.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v23.4s\n"
+ "srshl v8.4s, v8.4s, v23.4s\n"
+ "sqadd v31.4s, v31.4s, v17.4s\n"
+ "sqadd v30.4s, v30.4s, v19.4s\n"
+ "srshl v0.4s, v0.4s, v23.4s\n"
+ "sqadd v24.4s, v24.4s, v18.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v22.4s\n"
+ "srshl v31.4s, v31.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v22.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v22.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v22.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "tbz x7, #2, 61f\n"
- "st1 { v9.s }[0], [x11], #0x4\n"
- "st1 { v7.s }[0], [x10], #0x4\n"
- "st1 { v2.s }[0], [x9], #0x4\n"
- "st1 { v10.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 60f\n"
- "st1 { v9.h }[2], [x11], #0x2\n"
- "st1 { v7.h }[2], [x10], #0x2\n"
- "st1 { v2.h }[2], [x9], #0x2\n"
- "st1 { v10.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[6], [x11], #0x1\n"
- "st1 { v7.b }[6], [x10], #0x1\n"
- "st1 { v2.b }[6], [x9], #0x1\n"
- "st1 { v10.b }[6], [x28], #0x1\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "tbz x8, #2, 61f\n"
+ "st1 { v2.s }[0], [x10], #0x4\n"
+ "st1 { v8.s }[0], [x9], #0x4\n"
+ "st1 { v0.s }[0], [x28], #0x4\n"
+ "st1 { v31.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "st1 { v2.h }[2], [x10], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v0.h }[2], [x28], #0x2\n"
+ "st1 { v31.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[6], [x10], #0x1\n"
+ "st1 { v8.b }[6], [x9], #0x1\n"
+ "st1 { v0.b }[6], [x28], #0x1\n"
+ "st1 { v31.b }[6], [x27], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[4], [x11], #0x1\n"
- "st1 { v7.b }[4], [x10], #0x1\n"
- "st1 { v2.b }[4], [x9], #0x1\n"
- "st1 { v10.b }[4], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[4], [x10], #0x1\n"
+ "st1 { v8.b }[4], [x9], #0x1\n"
+ "st1 { v0.b }[4], [x28], #0x1\n"
+ "st1 { v31.b }[4], [x27], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "st1 { v9.h }[0], [x11], #0x2\n"
- "st1 { v7.h }[0], [x10], #0x2\n"
- "st1 { v2.h }[0], [x9], #0x2\n"
- "st1 { v10.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[2], [x11], #0x1\n"
- "st1 { v7.b }[2], [x10], #0x1\n"
- "st1 { v2.b }[2], [x9], #0x1\n"
- "st1 { v10.b }[2], [x28], #0x1\n"
+ "tbz x8, #1, 62f\n"
+ "st1 { v2.h }[0], [x10], #0x2\n"
+ "st1 { v8.h }[0], [x9], #0x2\n"
+ "st1 { v0.h }[0], [x28], #0x2\n"
+ "st1 { v31.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[2], [x10], #0x1\n"
+ "st1 { v8.b }[2], [x9], #0x1\n"
+ "st1 { v0.b }[2], [x28], #0x1\n"
+ "st1 { v31.b }[2], [x27], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[0], [x11], #0x1\n"
- "st1 { v7.b }[0], [x10], #0x1\n"
- "st1 { v2.b }[0], [x9], #0x1\n"
- "st1 { v10.b }[0], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[0], [x10], #0x1\n"
+ "st1 { v8.b }[0], [x9], #0x1\n"
+ "st1 { v0.b }[0], [x28], #0x1\n"
+ "st1 { v31.b }[0], [x27], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index d98ab71cb8..d26a37c654 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
const int8_t *inptrs[25];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const int8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -100,1294 +100,1294 @@ void a64_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x2, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v6.16b }, [x20]\n"
+ "mov x3, #0x0\n"
+ "mov x4, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x17, x2, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v13.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_b_offset]\n"
"add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x21]\n"
- "ld1r { v13.8h }, [x20]\n"
+ "ld1r { v14.16b }, [x21]\n"
+ "ld1r { v25.8h }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_minval]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v17.8h }, [x21]\n"
- "ld1r { v24.8h }, [x20]\n"
- "mov x17, #0x0\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "ssubl v11.8h, v11.8b, v15.8b\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "ssubl v22.8h, v22.8b, v15.8b\n"
- "ssubl v14.8h, v14.8b, v15.8b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "ssubl v28.8h, v28.8b, v15.8b\n"
- "ssubl v18.8h, v18.8b, v15.8b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "ssubl v9.8h, v9.8b, v15.8b\n"
- "ssubl v26.8h, v26.8b, v15.8b\n"
- "ldr d4, [x14, #0x40]\n"
+ "ld1r { v23.8h }, [x21]\n"
+ "ld1r { v12.8h }, [x20]\n"
+ "ldp x16, x15, [x22, #0x0]\n"
+ "ldp x14, x13, [x22, #0x10]\n"
+ "cbz x17, 3f\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "subs x17, x17, #0x1\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "ssubl v16.8h, v16.8b, v14.8b\n"
+ "ssubl v11.8h, v11.8b, v14.8b\n"
+ "ldr d7, [x6, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr q5, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
+ "ssubl v29.8h, v29.8b, v14.8b\n"
+ "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ssubl v27.8h, v27.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v22.8h, v22.8b, v14.8b\n"
+ "ssubl v5.8h, v5.8b, v14.8b\n"
+ "ldr q19, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
"add x20, x20, #0x20\n"
+ "ssubl v7.8h, v7.8b, v14.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d25, [x27, x17]\n"
- "ldr d27, [x26, x17]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d1, [x25, x17]\n"
- "ldr d2, [x24, x17]\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "ldr d12, [x23, x17]\n"
- "ldr d16, [x22, x17]\n"
- "ssubl v1.8h, v1.8b, v6.8b\n"
- "ssubl v2.8h, v2.8b, v6.8b\n"
- "ldr d23, [x21, x17]\n"
- "ldr d10, [x20, x17]\n"
- "ssubl v12.8h, v12.8b, v6.8b\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ssubl v23.8h, v23.8b, v6.8b\n"
- "ssubl v10.8h, v10.8b, v6.8b\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "ldr d26, [x27, x3]\n"
+ "ldr d31, [x26, x3]\n"
+ "ldr d20, [x25, x3]\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr d6, [x23, x3]\n"
+ "ldr d9, [x22, x3]\n"
+ "ldr d0, [x21, x3]\n"
+ "ldr d18, [x20, x3]\n"
+ "ssubl v26.8h, v26.8b, v13.8b\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "ssubl v20.8h, v20.8b, v13.8b\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "ssubl v6.8h, v6.8b, v13.8b\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
+ "ssubl v18.8h, v18.8b, v13.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q30, [x13, #0x0]\n"
- "ldr q29, [x12, #0x0]\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x21, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "ldr x25, [x15, #0x60]\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "ldr d27, [x21, x17]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "ldr d25, [x20, x17]\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal v20.4s, v27.4h, v28.4h\n"
- "smlal v19.4s, v25.4h, v18.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "ldr d1, [x25, x17]\n"
- "ssubl v1.8h, v1.8b, v6.8b\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "ldr d2, [x24, x17]\n"
- "ssubl v2.8h, v2.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v28.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v18.8h\n"
- "ldr d25, [x22, x17]\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v20.4s, v1.4h, v11.4h\n"
- "smlal v19.4s, v2.4h, v22.4h\n"
- "ldr x24, [x15, #0x50]\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "ldr d16, [x21, x17]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "ldr d12, [x20, x17]\n"
- "ldr x23, [x15, #0x48]\n"
- "smlal2 v0.4s, v1.8h, v11.8h\n"
- "smlal2 v31.4s, v2.8h, v22.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal v20.4s, v27.4h, v18.4h\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v19.4s, v25.4h, v9.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "ldr d23, [x25, x17]\n"
- "ssubl v12.8h, v12.8b, v6.8b\n"
- "ssubl v23.8h, v23.8b, v6.8b\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "ldr d11, [x24, x17]\n"
- "ssubl v11.8h, v11.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v18.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v9.8h\n"
- "ldr d25, [x21, x17]\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v21.4s, v16.4h, v18.4h\n"
- "smlal v20.4s, v12.4h, v22.4h\n"
- "smlal v19.4s, v23.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "ldr d10, [x20, x17]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "ssubl v10.8h, v10.8b, v6.8b\n"
- "smlal v5.4s, v11.4h, v9.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal2 v8.4s, v16.8h, v18.8h\n"
- "ldr d18, [x22, x17]\n"
- "ldr d16, [x21, x17]\n"
- "smlal2 v0.4s, v12.8h, v22.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal2 v31.4s, v23.8h, v14.8h\n"
- "ldr q14, [x13, #0x10]\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v25.4h, v26.4h\n"
- "smlal v19.4s, v10.4h, v28.4h\n"
- "ssubl v18.8h, v18.8b, v6.8b\n"
- "ldr x21, [x15, #0xc0]\n"
- "smlal2 v3.4s, v11.8h, v9.8h\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v26.4h\n"
+ "ldr q17, [x7, #0x0]\n"
+ "ldr q30, [x8, #0x0]\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "ldr x24, [x5, #0x58]\n"
+ "ldr x23, [x5, #0x78]\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "ldr x22, [x5, #0x60]\n"
+ "ldr x21, [x5, #0x80]\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "ldr q26, [x7, #0x10]\n"
+ "ldr x20, [x5, #0x68]\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "ldr d31, [x24, x3]\n"
+ "ldr x12, [x5, #0x88]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "ldr x11, [x5, #0x40]\n"
+ "ldr x10, [x5, #0x70]\n"
+ "add x6, x6, #0x48\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x23, x3]\n"
+ "ldr x9, [x5, #0x98]\n"
+ "subs x17, x17, #0x1\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x28, [x5, #0x50]\n"
+ "ldr x27, [x5, #0x48]\n"
+ "add x7, x7, #0x20\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "ldr d20, [x22, x3]\n"
+ "ldr x26, [x5, #0x90]\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "ldr x25, [x5, #0xa8]\n"
+ "ldr x24, [x5, #0xa0]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "ldr d31, [x21, x3]\n"
+ "ldr x23, [x5, #0xb0]\n"
+ "ssubl v20.8h, v20.8b, v13.8b\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "ldr d6, [x20, x3]\n"
+ "ldr x22, [x5, #0xb8]\n"
+ "smlal v3.4s, v28.4h, v27.4h\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "ldr x21, [x5, #0xc0]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v8.4s, v27.8h, v9.8h\n"
- "ldr d27, [x21, x17]\n"
- "smlal2 v0.4s, v25.8h, v26.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal2 v31.4s, v10.8h, v28.8h\n"
- "smlal v21.4s, v11.4h, v28.4h\n"
- "ssubl v22.8h, v22.8b, v6.8b\n"
- "add x14, x14, #0x48\n"
- "smlal v20.4s, v18.4h, v7.4h\n"
- "smlal v19.4s, v16.4h, v7.4h\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "add x17, x17, #0x8\n"
- "smlal2 v3.4s, v1.8h, v26.8h\n"
- "smlal v5.4s, v12.4h, v7.4h\n"
- "sqrdmulh v5.4s, v5.4s, v30.4s\n"
- "subs x8, x8, #0x1\n"
- "smlal2 v8.4s, v11.8h, v28.8h\n"
- "smlal2 v0.4s, v18.8h, v7.8h\n"
- "and v28.16b, v5.16b, v29.16b\n"
- "add x13, x13, #0x20\n"
- "smlal2 v31.4s, v16.8h, v7.8h\n"
- "smlal v21.4s, v2.4h, v7.4h\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v27.8h\n"
+ "ldr d28, [x12, x3]\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "ldr d9, [x11, x3]\n"
+ "smlal v10.4s, v20.4h, v16.4h\n"
+ "ssubl v6.8h, v6.8b, v13.8b\n"
+ "smlal2 v21.4s, v20.8h, v16.8h\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "ldr d16, [x10, x3]\n"
+ "smlal v3.4s, v31.4h, v11.4h\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "smlal2 v24.4s, v31.8h, v11.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "ldr d0, [x9, x3]\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "smlal v10.4s, v6.4h, v27.4h\n"
+ "ssubl v16.8h, v16.8b, v13.8b\n"
+ "smlal2 v21.4s, v6.8h, v27.8h\n"
+ "ldr d6, [x28, x3]\n"
+ "smlal v3.4s, v28.4h, v2.4h\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal v8.4s, v9.4h, v27.4h\n"
+ "smlal2 v4.4s, v9.8h, v27.8h\n"
+ "ldr d9, [x27, x3]\n"
+ "ldr d27, [x26, x3]\n"
+ "ssubl v6.8h, v6.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr d28, [x25, x3]\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "ldr d18, [x24, x3]\n"
+ "smlal v10.4s, v16.4h, v11.4h\n"
+ "smlal2 v21.4s, v16.8h, v11.8h\n"
+ "ldr d11, [x23, x3]\n"
+ "smlal v3.4s, v0.4h, v29.4h\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "ssubl v27.8h, v27.8b, v13.8b\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v6.4h, v2.4h\n"
+ "smlal2 v24.4s, v0.8h, v29.8h\n"
+ "ldr d29, [x22, x3]\n"
+ "smlal2 v1.4s, v6.8h, v2.8h\n"
+ "ssubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "ssubl v11.8h, v11.8b, v13.8b\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "ldr d9, [x21, x3]\n"
+ "smlal v10.4s, v27.4h, v22.4h\n"
+ "smlal v3.4s, v28.4h, v15.4h\n"
+ "add x3, x3, #0x8\n"
+ "smlal v19.4s, v20.4h, v22.4h\n"
+ "smlal2 v21.4s, v27.8h, v22.8h\n"
+ "ldr q27, [x8, #0x10]\n"
+ "ssubl v29.8h, v29.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v15.8h\n"
+ "smlal2 v1.4s, v20.8h, v22.8h\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "add x8, x8, #0x20\n"
+ "smlal v8.4s, v6.4h, v15.4h\n"
+ "smlal2 v4.4s, v6.8h, v15.8h\n"
+ "smlal v10.4s, v18.4h, v5.4h\n"
+ "smlal v3.4s, v11.4h, v5.4h\n"
+ "smlal v19.4s, v16.4h, v5.4h\n"
+ "smlal2 v21.4s, v18.8h, v5.8h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal v8.4s, v31.4h, v5.4h\n"
+ "smlal2 v4.4s, v31.8h, v5.8h\n"
+ "smlal v10.4s, v28.4h, v2.4h\n"
+ "smlal v3.4s, v29.4h, v22.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v17.4s\n"
+ "smlal2 v21.4s, v28.8h, v2.8h\n"
+ "smlal2 v24.4s, v29.8h, v22.8h\n"
+ "sqrdmulh v1.4s, v1.4s, v26.4s\n"
+ "smlal v8.4s, v0.4h, v7.4h\n"
+ "and v2.16b, v19.16b, v30.16b\n"
+ "smlal2 v4.4s, v0.8h, v7.8h\n"
+ "smlal v10.4s, v29.4h, v7.4h\n"
+ "smlal v3.4s, v9.4h, v7.4h\n"
+ "and v11.16b, v1.16b, v27.16b\n"
+ "smlal2 v21.4s, v29.8h, v7.8h\n"
+ "smlal2 v24.4s, v9.8h, v7.8h\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqrdmulh v8.4s, v8.4s, v17.4s\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sqrdmulh v4.4s, v4.4s, v26.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v17.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "sqadd v19.4s, v19.4s, v2.4s\n"
+ "and v29.16b, v8.16b, v30.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "and v20.16b, v10.16b, v30.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "and v28.16b, v3.16b, v30.16b\n"
+ "sqadd v1.4s, v1.4s, v11.4s\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v7.16b, v4.16b, v27.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v2.16b, v21.16b, v27.16b\n"
"sshr v28.4s, v28.4s, #0x1f\n"
- "add x12, x12, #0x20\n"
- "smlal v20.4s, v10.4h, v9.4h\n"
- "smlal v19.4s, v22.4h, v26.4h\n"
- "sqadd v5.4s, v5.4s, v28.4s\n"
- "smlal2 v3.4s, v12.8h, v7.8h\n"
- "smlal2 v8.4s, v2.8h, v7.8h\n"
- "sqrdmulh v3.4s, v3.4s, v14.4s\n"
- "smlal2 v0.4s, v10.8h, v9.8h\n"
- "smlal2 v31.4s, v22.8h, v26.8h\n"
- "and v16.16b, v3.16b, v25.16b\n"
- "smlal v21.4s, v23.4h, v4.4h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "sqrdmulh v21.4s, v21.4s, v30.4s\n"
- "smlal v19.4s, v27.4h, v4.4h\n"
- "smlal2 v8.4s, v23.8h, v4.8h\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "smlal2 v0.4s, v22.8h, v4.8h\n"
- "smlal2 v31.4s, v27.8h, v4.8h\n"
- "sqrdmulh v19.4s, v19.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v12.16b, v21.16b, v29.16b\n"
- "sqrdmulh v8.4s, v8.4s, v14.4s\n"
- "and v23.16b, v20.16b, v29.16b\n"
- "sqrdmulh v0.4s, v0.4s, v14.4s\n"
- "and v9.16b, v19.16b, v29.16b\n"
- "sqrdmulh v31.4s, v31.4s, v14.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v12.4s, v12.4s, #0x1f\n"
- "and v18.16b, v8.16b, v25.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v22.16b, v0.16b, v25.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "and v16.16b, v31.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v12.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v23.4s\n"
+ "and v22.16b, v24.16b, v27.16b\n"
+ "sqadd v8.4s, v8.4s, v29.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v3.4s, v3.4s, v28.4s\n"
"sshr v22.4s, v22.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v9.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v29.4s\n"
- "srshl v21.4s, v21.4s, v29.4s\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v29.4s\n"
- "sqadd v0.4s, v0.4s, v22.4s\n"
- "srshl v19.4s, v19.4s, v29.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v25.4s\n"
+ "srshl v19.4s, v19.4s, v30.4s\n"
+ "srshl v8.4s, v8.4s, v30.4s\n"
+ "sqadd v4.4s, v4.4s, v7.4s\n"
+ "srshl v10.4s, v10.4s, v30.4s\n"
+ "sqadd v21.4s, v21.4s, v2.4s\n"
+ "srshl v3.4s, v3.4s, v30.4s\n"
+ "sqadd v24.4s, v24.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v27.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str d5, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
+ "srshl v4.4s, v4.4s, v27.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v27.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v27.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str d20, [x9, x16]\n"
- "str d19, [x28, x16]\n"
- "ldr q5, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str d19, [x16, x4]\n"
+ "str d8, [x15, x4]\n"
+ "str d10, [x14, x4]\n"
+ "str d3, [x13, x4]\n"
+ "add x4, x4, #0x8\n"
+ "ldr q19, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d4, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ssubl v11.8h, v11.8b, v15.8b\n"
- "ssubl v22.8h, v22.8b, v15.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ssubl v14.8h, v14.8b, v15.8b\n"
- "ssubl v28.8h, v28.8b, v15.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d25, [x27, x17]\n"
- "ssubl v18.8h, v18.8b, v15.8b\n"
- "ssubl v9.8h, v9.8b, v15.8b\n"
- "ldr d27, [x26, x17]\n"
- "ldr d1, [x25, x17]\n"
- "ssubl v26.8h, v26.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ldr d2, [x24, x17]\n"
- "ldr d12, [x23, x17]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "ldr d16, [x22, x17]\n"
- "ldr d23, [x21, x17]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "ssubl v1.8h, v1.8b, v6.8b\n"
- "ldr d10, [x20, x17]\n"
- "ssubl v2.8h, v2.8b, v6.8b\n"
- "ssubl v12.8h, v12.8b, v6.8b\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ssubl v23.8h, v23.8b, v6.8b\n"
- "ssubl v10.8h, v10.8b, v6.8b\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d7, [x6, #0x40]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ssubl v16.8h, v16.8b, v14.8b\n"
+ "ssubl v11.8h, v11.8b, v14.8b\n"
+ "ssubl v29.8h, v29.8b, v14.8b\n"
+ "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "ssubl v27.8h, v27.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v22.8h, v22.8b, v14.8b\n"
+ "ssubl v5.8h, v5.8b, v14.8b\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "ssubl v7.8h, v7.8b, v14.8b\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "ldr d26, [x27, x3]\n"
+ "ldr d31, [x26, x3]\n"
+ "ldr d20, [x25, x3]\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr d6, [x23, x3]\n"
+ "ldr d9, [x22, x3]\n"
+ "ldr d0, [x21, x3]\n"
+ "ssubl v26.8h, v26.8b, v13.8b\n"
+ "ldr d18, [x20, x3]\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "ssubl v20.8h, v20.8b, v13.8b\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "ssubl v6.8h, v6.8b, v13.8b\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
+ "ssubl v18.8h, v18.8b, v13.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q29, [x13, #0x0]\n"
- "ldr q30, [x12, #0x0]\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x21, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "ldr x25, [x15, #0x60]\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "ldr d27, [x21, x17]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "ldr d25, [x20, x17]\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal v20.4s, v27.4h, v28.4h\n"
- "smlal v19.4s, v25.4h, v18.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "ldr d1, [x25, x17]\n"
- "ssubl v1.8h, v1.8b, v6.8b\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "ldr d2, [x24, x17]\n"
- "ssubl v2.8h, v2.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v28.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v18.8h\n"
- "ldr d25, [x22, x17]\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v20.4s, v1.4h, v11.4h\n"
- "smlal v19.4s, v2.4h, v22.4h\n"
- "ldr x24, [x15, #0x50]\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "ldr d16, [x21, x17]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "ldr d12, [x20, x17]\n"
- "ldr x23, [x15, #0x48]\n"
- "smlal2 v0.4s, v1.8h, v11.8h\n"
- "smlal2 v31.4s, v2.8h, v22.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal v20.4s, v27.4h, v18.4h\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v19.4s, v25.4h, v9.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "ldr d23, [x25, x17]\n"
- "ssubl v12.8h, v12.8b, v6.8b\n"
- "ssubl v23.8h, v23.8b, v6.8b\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "ldr d11, [x24, x17]\n"
- "ssubl v11.8h, v11.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v18.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v9.8h\n"
- "ldr d25, [x21, x17]\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v21.4s, v16.4h, v18.4h\n"
- "smlal v20.4s, v12.4h, v22.4h\n"
- "smlal v19.4s, v23.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "ldr d10, [x20, x17]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "ssubl v10.8h, v10.8b, v6.8b\n"
- "smlal v5.4s, v11.4h, v9.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal2 v8.4s, v16.8h, v18.8h\n"
- "ldr d16, [x22, x17]\n"
- "ldr d18, [x21, x17]\n"
- "smlal2 v0.4s, v12.8h, v22.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal2 v31.4s, v23.8h, v14.8h\n"
- "ldr q14, [x13, #0x10]\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v25.4h, v26.4h\n"
- "smlal v19.4s, v10.4h, v28.4h\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v3.4s, v11.8h, v9.8h\n"
- "ssubl v18.8h, v18.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v26.4h\n"
- "tst x7, #0x7\n"
- "smlal2 v8.4s, v27.8h, v9.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal2 v0.4s, v25.8h, v26.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal2 v31.4s, v10.8h, v28.8h\n"
- "smlal v21.4s, v11.4h, v28.4h\n"
- "ssubl v22.8h, v22.8b, v6.8b\n"
- "add x17, x17, #0x8\n"
- "smlal v20.4s, v16.4h, v7.4h\n"
- "smlal v19.4s, v18.4h, v7.4h\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "add x13, x13, #0x20\n"
- "smlal2 v3.4s, v1.8h, v26.8h\n"
- "smlal v5.4s, v12.4h, v7.4h\n"
- "sqrdmulh v5.4s, v5.4s, v29.4s\n"
- "add x12, x12, #0x20\n"
- "smlal2 v8.4s, v11.8h, v28.8h\n"
- "smlal2 v0.4s, v16.8h, v7.8h\n"
- "and v16.16b, v5.16b, v30.16b\n"
- "smlal2 v31.4s, v18.8h, v7.8h\n"
- "smlal v21.4s, v2.4h, v7.4h\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "smlal v20.4s, v10.4h, v9.4h\n"
- "smlal v19.4s, v22.4h, v26.4h\n"
- "sqadd v5.4s, v5.4s, v16.4s\n"
- "smlal2 v3.4s, v12.8h, v7.8h\n"
- "smlal2 v8.4s, v2.8h, v7.8h\n"
- "sqrdmulh v3.4s, v3.4s, v14.4s\n"
- "smlal2 v0.4s, v10.8h, v9.8h\n"
- "smlal2 v31.4s, v22.8h, v26.8h\n"
- "and v16.16b, v3.16b, v25.16b\n"
- "smlal v21.4s, v23.4h, v4.4h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "sqrdmulh v21.4s, v21.4s, v29.4s\n"
- "smlal v19.4s, v27.4h, v4.4h\n"
- "smlal2 v8.4s, v23.8h, v4.8h\n"
- "sqrdmulh v20.4s, v20.4s, v29.4s\n"
- "smlal2 v0.4s, v22.8h, v4.8h\n"
- "smlal2 v31.4s, v27.8h, v4.8h\n"
- "sqrdmulh v19.4s, v19.4s, v29.4s\n"
+ "ldr q30, [x7, #0x0]\n"
+ "ldr q17, [x8, #0x0]\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "ldr x20, [x5, #0x58]\n"
+ "ldr x24, [x5, #0x78]\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "ldr x23, [x5, #0x60]\n"
+ "ldr x10, [x5, #0x80]\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "ldr q26, [x7, #0x10]\n"
+ "ldr x22, [x5, #0x68]\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "ldr d31, [x20, x3]\n"
+ "ldr x21, [x5, #0x88]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "ldr x20, [x5, #0x40]\n"
+ "ldr x9, [x5, #0x70]\n"
+ "tst x2, #0x7\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr x28, [x5, #0x98]\n"
+ "add x7, x7, #0x20\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x27, [x5, #0x50]\n"
+ "ldr x26, [x5, #0x48]\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "ldr d20, [x23, x3]\n"
+ "ldr x25, [x5, #0x90]\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "ldr x24, [x5, #0xa8]\n"
+ "ldr x23, [x5, #0xa0]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "ldr d31, [x10, x3]\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "ldr d6, [x22, x3]\n"
+ "smlal v3.4s, v28.4h, v27.4h\n"
+ "ssubl v20.8h, v20.8b, v13.8b\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "ldr x22, [x5, #0xb0]\n"
+ "smlal2 v24.4s, v28.8h, v27.8h\n"
+ "ldr d28, [x21, x3]\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "ldr d9, [x20, x3]\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "ssubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "ldr x21, [x5, #0xb8]\n"
+ "smlal v10.4s, v20.4h, v16.4h\n"
+ "smlal2 v21.4s, v20.8h, v16.8h\n"
+ "ldr x20, [x5, #0xc0]\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "ldr d16, [x9, x3]\n"
+ "smlal v3.4s, v31.4h, v11.4h\n"
+ "smlal2 v24.4s, v31.8h, v11.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "ldr d0, [x28, x3]\n"
+ "smlal v10.4s, v6.4h, v27.4h\n"
+ "ssubl v16.8h, v16.8b, v13.8b\n"
+ "smlal2 v21.4s, v6.8h, v27.8h\n"
+ "ldr d6, [x27, x3]\n"
+ "smlal v8.4s, v9.4h, v27.4h\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal2 v4.4s, v9.8h, v27.8h\n"
+ "ldr d9, [x26, x3]\n"
+ "ldr d27, [x25, x3]\n"
+ "smlal v3.4s, v28.4h, v2.4h\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr d28, [x24, x3]\n"
+ "ssubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v10.4s, v16.4h, v11.4h\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "ldr d18, [x23, x3]\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "ssubl v27.8h, v27.8b, v13.8b\n"
+ "smlal2 v21.4s, v16.8h, v11.8h\n"
+ "ldr d11, [x22, x3]\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v3.4s, v0.4h, v29.4h\n"
+ "smlal v19.4s, v6.4h, v2.4h\n"
+ "smlal2 v24.4s, v0.8h, v29.8h\n"
+ "ldr d29, [x21, x3]\n"
+ "ssubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v22.4h\n"
+ "smlal2 v1.4s, v6.8h, v2.8h\n"
+ "ssubl v11.8h, v11.8b, v13.8b\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "ldr d9, [x20, x3]\n"
+ "smlal2 v21.4s, v27.8h, v22.8h\n"
+ "ldr q27, [x8, #0x10]\n"
+ "smlal v3.4s, v28.4h, v15.4h\n"
+ "smlal v19.4s, v20.4h, v22.4h\n"
+ "ssubl v29.8h, v29.8b, v13.8b\n"
+ "add x3, x3, #0x8\n"
+ "smlal2 v24.4s, v28.8h, v15.8h\n"
+ "smlal v8.4s, v6.4h, v15.4h\n"
+ "add x8, x8, #0x20\n"
+ "smlal v10.4s, v18.4h, v5.4h\n"
+ "smlal2 v1.4s, v20.8h, v22.8h\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "smlal2 v4.4s, v6.8h, v15.8h\n"
+ "smlal2 v21.4s, v18.8h, v5.8h\n"
+ "smlal v3.4s, v11.4h, v5.4h\n"
+ "smlal v19.4s, v16.4h, v5.4h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal v8.4s, v31.4h, v5.4h\n"
+ "smlal v10.4s, v28.4h, v2.4h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal2 v4.4s, v31.8h, v5.8h\n"
+ "smlal2 v21.4s, v28.8h, v2.8h\n"
+ "smlal v3.4s, v29.4h, v22.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v30.4s\n"
+ "smlal2 v24.4s, v29.8h, v22.8h\n"
+ "smlal v8.4s, v0.4h, v7.4h\n"
+ "smlal v10.4s, v29.4h, v7.4h\n"
+ "sqrdmulh v1.4s, v1.4s, v26.4s\n"
+ "and v5.16b, v19.16b, v17.16b\n"
+ "smlal2 v4.4s, v0.8h, v7.8h\n"
+ "smlal2 v21.4s, v29.8h, v7.8h\n"
+ "smlal v3.4s, v9.4h, v7.4h\n"
+ "smlal2 v24.4s, v9.8h, v7.8h\n"
+ "and v16.16b, v1.16b, v27.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqrdmulh v8.4s, v8.4s, v30.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v30.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v23.16b, v21.16b, v30.16b\n"
- "sqrdmulh v8.4s, v8.4s, v14.4s\n"
- "and v27.16b, v20.16b, v30.16b\n"
- "sqrdmulh v0.4s, v0.4s, v14.4s\n"
- "and v22.16b, v19.16b, v30.16b\n"
- "sqrdmulh v31.4s, v31.4s, v14.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v14.16b, v8.16b, v25.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v18.16b, v0.16b, v25.16b\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
- "and v16.16b, v31.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v23.4s\n"
- "sshr v14.4s, v14.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v27.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v22.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v26.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v30.4s\n"
+ "sqadd v19.4s, v19.4s, v5.4s\n"
+ "and v30.16b, v8.16b, v17.16b\n"
+ "and v20.16b, v10.16b, v17.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v2.16b, v3.16b, v17.16b\n"
+ "and v11.16b, v4.16b, v27.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v9.16b, v21.16b, v27.16b\n"
+ "and v16.16b, v24.16b, v27.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sqadd v8.4s, v8.4s, v30.4s\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v30.4s\n"
- "srshl v21.4s, v21.4s, v30.4s\n"
- "sqadd v8.4s, v8.4s, v14.4s\n"
- "srshl v20.4s, v20.4s, v30.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v19.4s, v19.4s, v30.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v25.4s\n"
+ "sqadd v3.4s, v3.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v17.4s\n"
+ "srshl v8.4s, v8.4s, v17.4s\n"
+ "sqadd v4.4s, v4.4s, v11.4s\n"
+ "srshl v10.4s, v10.4s, v17.4s\n"
+ "sqadd v21.4s, v21.4s, v9.4s\n"
+ "srshl v3.4s, v3.4s, v17.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v27.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str d5, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
+ "srshl v4.4s, v4.4s, v27.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v27.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v27.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str d20, [x9, x16]\n"
- "str d19, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str d19, [x16, x4]\n"
+ "str d8, [x15, x4]\n"
+ "str d10, [x14, x4]\n"
+ "str d3, [x13, x4]\n"
+ "add x4, x4, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x6, x6, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v5.4s }, [x20], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v3.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v3.s }[2], [x20]\n"
+ "tbz x2, #2, 5f\n"
+ "ld1 { v19.4s }, [x20], #0x10\n"
+ "tbz x2, #1, 4f\n"
+ "ld1 { v1.d }[0], [x20], #0x8\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v1.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v3.s }[0], [x20]\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v1.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v5.s }[2], [x20]\n"
+ "tbz x2, #1, 6f\n"
+ "ld1 { v19.d }[0], [x20], #0x8\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v19.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v5.s }[0], [x20]\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v19.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "ssubl v11.8h, v11.8b, v15.8b\n"
- "ssubl v22.8h, v22.8b, v15.8b\n"
- "ldr d4, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ssubl v14.8h, v14.8b, v15.8b\n"
- "ssubl v28.8h, v28.8b, v15.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ssubl v18.8h, v18.8b, v15.8b\n"
- "ssubl v9.8h, v9.8b, v15.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ssubl v26.8h, v26.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v25.s }[0], [x27], #0x4\n"
- "ld1 { v27.s }[0], [x26], #0x4\n"
- "ld1 { v1.s }[0], [x25], #0x4\n"
- "ld1 { v2.s }[0], [x24], #0x4\n"
- "ld1 { v12.s }[0], [x23], #0x4\n"
- "ld1 { v16.s }[0], [x22], #0x4\n"
- "ld1 { v23.s }[0], [x21], #0x4\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v25.h }[2], [x27], #0x2\n"
- "ld1 { v27.h }[2], [x26], #0x2\n"
- "ld1 { v1.h }[2], [x25], #0x2\n"
- "ld1 { v2.h }[2], [x24], #0x2\n"
- "ld1 { v12.h }[2], [x23], #0x2\n"
- "ld1 { v16.h }[2], [x22], #0x2\n"
- "ld1 { v23.h }[2], [x21], #0x2\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[6], [x27]\n"
- "ld1 { v27.b }[6], [x26]\n"
- "ld1 { v1.b }[6], [x25]\n"
- "ld1 { v2.b }[6], [x24]\n"
- "ld1 { v12.b }[6], [x23]\n"
- "ld1 { v16.b }[6], [x22]\n"
- "ld1 { v23.b }[6], [x21]\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "ssubl v16.8h, v16.8b, v14.8b\n"
+ "ssubl v11.8h, v11.8b, v14.8b\n"
+ "ldr d7, [x6, #0x40]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
+ "ssubl v29.8h, v29.8b, v14.8b\n"
+ "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ssubl v27.8h, v27.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v22.8h, v22.8b, v14.8b\n"
+ "ssubl v5.8h, v5.8b, v14.8b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "ssubl v7.8h, v7.8b, v14.8b\n"
+ "add x27, x27, x3\n"
+ "add x26, x26, x3\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "add x25, x25, x3\n"
+ "add x24, x24, x3\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "add x23, x23, x3\n"
+ "add x22, x22, x3\n"
+ "add x21, x21, x3\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 9f\n"
+ "ld1 { v26.s }[0], [x27], #0x4\n"
+ "ld1 { v31.s }[0], [x26], #0x4\n"
+ "ld1 { v20.s }[0], [x25], #0x4\n"
+ "ld1 { v28.s }[0], [x24], #0x4\n"
+ "ld1 { v6.s }[0], [x23], #0x4\n"
+ "ld1 { v9.s }[0], [x22], #0x4\n"
+ "ld1 { v0.s }[0], [x21], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 8f\n"
+ "ld1 { v26.h }[2], [x27], #0x2\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v20.h }[2], [x25], #0x2\n"
+ "ld1 { v28.h }[2], [x24], #0x2\n"
+ "ld1 { v6.h }[2], [x23], #0x2\n"
+ "ld1 { v9.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[6], [x27]\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v20.b }[6], [x25]\n"
+ "ld1 { v28.b }[6], [x24]\n"
+ "ld1 { v6.b }[6], [x23]\n"
+ "ld1 { v9.b }[6], [x22]\n"
+ "ld1 { v0.b }[6], [x21]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[4], [x27]\n"
- "ld1 { v27.b }[4], [x26]\n"
- "ld1 { v1.b }[4], [x25]\n"
- "ld1 { v2.b }[4], [x24]\n"
- "ld1 { v12.b }[4], [x23]\n"
- "ld1 { v16.b }[4], [x22]\n"
- "ld1 { v23.b }[4], [x21]\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[4], [x27]\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v20.b }[4], [x25]\n"
+ "ld1 { v28.b }[4], [x24]\n"
+ "ld1 { v6.b }[4], [x23]\n"
+ "ld1 { v9.b }[4], [x22]\n"
+ "ld1 { v0.b }[4], [x21]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v25.h }[0], [x27], #0x2\n"
- "ld1 { v27.h }[0], [x26], #0x2\n"
- "ld1 { v1.h }[0], [x25], #0x2\n"
- "ld1 { v2.h }[0], [x24], #0x2\n"
- "ld1 { v12.h }[0], [x23], #0x2\n"
- "ld1 { v16.h }[0], [x22], #0x2\n"
- "ld1 { v23.h }[0], [x21], #0x2\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[2], [x27]\n"
- "ld1 { v27.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v12.b }[2], [x23]\n"
- "ld1 { v16.b }[2], [x22]\n"
- "ld1 { v23.b }[2], [x21]\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "tbz x2, #1, 10f\n"
+ "ld1 { v26.h }[0], [x27], #0x2\n"
+ "ld1 { v31.h }[0], [x26], #0x2\n"
+ "ld1 { v20.h }[0], [x25], #0x2\n"
+ "ld1 { v28.h }[0], [x24], #0x2\n"
+ "ld1 { v6.h }[0], [x23], #0x2\n"
+ "ld1 { v9.h }[0], [x22], #0x2\n"
+ "ld1 { v0.h }[0], [x21], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[2], [x27]\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v20.b }[2], [x25]\n"
+ "ld1 { v28.b }[2], [x24]\n"
+ "ld1 { v6.b }[2], [x23]\n"
+ "ld1 { v9.b }[2], [x22]\n"
+ "ld1 { v0.b }[2], [x21]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[0], [x27]\n"
- "ld1 { v27.b }[0], [x26]\n"
- "ld1 { v1.b }[0], [x25]\n"
- "ld1 { v2.b }[0], [x24]\n"
- "ld1 { v12.b }[0], [x23]\n"
- "ld1 { v16.b }[0], [x22]\n"
- "ld1 { v23.b }[0], [x21]\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[0], [x27]\n"
+ "ld1 { v31.b }[0], [x26]\n"
+ "ld1 { v20.b }[0], [x25]\n"
+ "ld1 { v28.b }[0], [x24]\n"
+ "ld1 { v6.b }[0], [x23]\n"
+ "ld1 { v9.b }[0], [x22]\n"
+ "ld1 { v0.b }[0], [x21]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "ssubl v25.8h, v25.8b, v6.8b\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x20, [x15, #0x40]\n"
- "ssubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "ssubl v1.8h, v1.8b, v6.8b\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "ssubl v2.8h, v2.8b, v6.8b\n"
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "ssubl v12.8h, v12.8b, v6.8b\n"
- "ssubl v23.8h, v23.8b, v6.8b\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "ssubl v10.8h, v10.8b, v6.8b\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "ssubl v26.8h, v26.8b, v13.8b\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x20, [x5, #0x40]\n"
+ "ssubl v20.8h, v20.8b, v13.8b\n"
+ "ssubl v28.8h, v28.8b, v13.8b\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "ssubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
+ "add x20, x20, x3\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "ssubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "tbz x2, #2, 13f\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 12f\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x2, #1, 14f\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "ssubl v15.8h, v15.8b, v6.8b\n"
- "ldr x20, [x15, #0x48]\n"
- "smlal v21.4s, v15.4h, v18.4h\n"
- "smlal2 v8.4s, v15.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v30.8h, v30.8b, v13.8b\n"
+ "ldr x20, [x5, #0x48]\n"
+ "smlal v8.4s, v30.4h, v27.4h\n"
+ "smlal2 v4.4s, v30.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 17f\n"
+ "ld1 { v9.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 16f\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 18f\n"
+ "ld1 { v9.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x50]\n"
- "smlal v21.4s, v16.4h, v9.4h\n"
- "smlal2 v8.4s, v16.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 21f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 20f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v9.8h, v9.8b, v13.8b\n"
+ "ldr x20, [x5, #0x50]\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 21f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 20f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 22f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x58]\n"
- "smlal v5.4s, v16.4h, v9.4h\n"
- "smlal2 v3.4s, v16.8h, v9.8h\n"
- "smlal v21.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x58]\n"
+ "smlal v19.4s, v17.4h, v2.4h\n"
+ "smlal2 v1.4s, v17.8h, v2.8h\n"
+ "smlal v8.4s, v17.4h, v15.4h\n"
+ "smlal2 v4.4s, v17.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 25f\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 24f\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 26f\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[0], [x20]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v20.4s, v16.4h, v28.4h\n"
- "smlal2 v0.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x20, [x5, #0x60]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 29f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 28f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 30f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v5.4s, v16.4h, v26.4h\n"
- "smlal2 v3.4s, v16.8h, v26.8h\n"
- "smlal v20.4s, v16.4h, v11.4h\n"
- "smlal2 v0.4s, v16.8h, v11.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x68]\n"
+ "smlal v19.4s, v17.4h, v22.4h\n"
+ "smlal2 v1.4s, v17.8h, v22.8h\n"
+ "smlal v10.4s, v17.4h, v16.4h\n"
+ "smlal2 v21.4s, v17.8h, v16.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 33f\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 32f\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 34f\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[0], [x20]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal v20.4s, v16.4h, v18.4h\n"
- "smlal2 v0.4s, v16.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v30.8h, v30.8b, v13.8b\n"
+ "ldr x20, [x5, #0x70]\n"
+ "smlal v10.4s, v30.4h, v27.4h\n"
+ "smlal2 v21.4s, v30.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 37f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 36f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 38f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v16.4h, v7.4h\n"
- "smlal2 v3.4s, v16.8h, v7.8h\n"
- "smlal v20.4s, v16.4h, v22.4h\n"
- "smlal2 v0.4s, v16.8h, v22.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 41f\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x78]\n"
+ "smlal v19.4s, v17.4h, v5.4h\n"
+ "smlal2 v1.4s, v17.8h, v5.8h\n"
+ "smlal v10.4s, v17.4h, v11.4h\n"
+ "smlal2 v21.4s, v17.8h, v11.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 41f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 40f\n"
+ "tbz x2, #1, 40f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
+ "tbz x2, #1, 42f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[0], [x20]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x80]\n"
- "smlal v19.4s, v16.4h, v18.4h\n"
- "smlal2 v31.4s, v16.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0x80]\n"
+ "smlal v3.4s, v16.4h, v27.4h\n"
+ "smlal2 v24.4s, v16.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 45f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 44f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 46f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x88]\n"
- "smlal v21.4s, v16.4h, v7.4h\n"
- "smlal2 v8.4s, v16.8h, v7.8h\n"
- "smlal v19.4s, v16.4h, v22.4h\n"
- "smlal2 v31.4s, v16.8h, v22.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 49f\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x88]\n"
+ "smlal v8.4s, v17.4h, v5.4h\n"
+ "smlal2 v4.4s, v17.8h, v5.8h\n"
+ "smlal v3.4s, v17.4h, v11.4h\n"
+ "smlal2 v24.4s, v17.8h, v11.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 49f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 48f\n"
+ "tbz x2, #1, 48f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
+ "tbz x2, #1, 50f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x90]\n"
- "smlal v19.4s, v16.4h, v9.4h\n"
- "smlal2 v31.4s, v16.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0x90]\n"
+ "smlal v3.4s, v16.4h, v2.4h\n"
+ "smlal2 v24.4s, v16.8h, v2.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 53f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 52f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 54f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x98]\n"
- "smlal v20.4s, v16.4h, v26.4h\n"
- "smlal2 v0.4s, v16.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 57f\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x98]\n"
+ "smlal v10.4s, v17.4h, v22.4h\n"
+ "smlal2 v21.4s, v17.8h, v22.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 57f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 56f\n"
+ "tbz x2, #1, 56f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
+ "tbz x2, #1, 58f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[0], [x20]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v21.4s, v16.4h, v4.4h\n"
- "smlal2 v8.4s, v16.8h, v4.8h\n"
- "smlal v19.4s, v16.4h, v14.4h\n"
- "smlal2 v31.4s, v16.8h, v14.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xa0]\n"
+ "smlal v8.4s, v16.4h, v7.4h\n"
+ "smlal2 v4.4s, v16.8h, v7.8h\n"
+ "smlal v3.4s, v16.4h, v29.4h\n"
+ "smlal2 v24.4s, v16.8h, v29.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 61f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 60f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 62f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v20.4s, v16.4h, v7.4h\n"
- "smlal2 v0.4s, v16.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 65f\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0xa8]\n"
+ "smlal v10.4s, v17.4h, v5.4h\n"
+ "smlal2 v21.4s, v17.8h, v5.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 65f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 64f\n"
+ "tbz x2, #1, 64f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
+ "tbz x2, #1, 66f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xb0]\n"
- "smlal v20.4s, v16.4h, v9.4h\n"
- "smlal2 v0.4s, v16.8h, v9.8h\n"
- "smlal v19.4s, v16.4h, v28.4h\n"
- "smlal2 v31.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xb0]\n"
+ "smlal v10.4s, v16.4h, v2.4h\n"
+ "smlal2 v21.4s, v16.8h, v2.8h\n"
+ "smlal v3.4s, v16.4h, v15.4h\n"
+ "smlal2 v24.4s, v16.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 69f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 68f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 70f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal v19.4s, v16.4h, v7.4h\n"
- "smlal2 v31.4s, v16.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 73f\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0xb8]\n"
+ "smlal v3.4s, v17.4h, v5.4h\n"
+ "smlal2 v24.4s, v17.8h, v5.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 73f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 72f\n"
+ "tbz x2, #1, 72f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
+ "tbz x2, #1, 74f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v20.4s, v16.4h, v4.4h\n"
- "smlal2 v0.4s, v16.8h, v4.8h\n"
- "smlal v19.4s, v16.4h, v26.4h\n"
- "smlal2 v31.4s, v16.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ssubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xc0]\n"
+ "smlal v10.4s, v16.4h, v7.4h\n"
+ "smlal2 v21.4s, v16.8h, v7.8h\n"
+ "smlal v3.4s, v16.4h, v22.4h\n"
+ "smlal2 v24.4s, v16.8h, v22.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 77f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 76f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 78f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
- "ssubl v16.8h, v16.8b, v6.8b\n"
- "smlal v19.4s, v16.4h, v4.4h\n"
- "smlal2 v31.4s, v16.8h, v4.8h\n"
- "tbz x7, #2, 81f\n"
- "ld1 { v14.4s }, [x13], #0x10\n"
- "ld1 { v25.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v18.d }[0], [x13], #0x8\n"
- "ld1 { v12.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x12]\n"
+ "ssubl v17.8h, v17.8b, v13.8b\n"
+ "smlal v3.4s, v17.4h, v7.4h\n"
+ "smlal2 v24.4s, v17.8h, v7.8h\n"
+ "tbz x2, #2, 81f\n"
+ "ld1 { v16.4s }, [x7], #0x10\n"
+ "ld1 { v22.4s }, [x8], #0x10\n"
+ "tbz x2, #1, 80f\n"
+ "ld1 { v0.d }[0], [x7], #0x8\n"
+ "ld1 { v31.d }[0], [x8], #0x8\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v0.s }[2], [x7]\n"
+ "ld1 { v31.s }[2], [x8]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[0], [x13]\n"
- "ld1 { v12.s }[0], [x12]\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v0.s }[0], [x7]\n"
+ "ld1 { v31.s }[0], [x8]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
- "ld1 { v14.d }[0], [x13], #0x8\n"
- "ld1 { v25.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v14.s }[2], [x13]\n"
- "ld1 { v25.s }[2], [x12]\n"
+ "tbz x2, #1, 82f\n"
+ "ld1 { v16.d }[0], [x7], #0x8\n"
+ "ld1 { v22.d }[0], [x8], #0x8\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v16.s }[2], [x7]\n"
+ "ld1 { v22.s }[2], [x8]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v14.s }[0], [x13]\n"
- "ld1 { v25.s }[0], [x12]\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v16.s }[0], [x7]\n"
+ "ld1 { v22.s }[0], [x8]\n"
"83:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v5.4s, v5.4s, v14.4s\n"
- "and v28.16b, v5.16b, v25.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v3.4s, v3.4s, v18.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v16.16b, v3.16b, v12.16b\n"
- "sqrdmulh v21.4s, v21.4s, v14.4s\n"
- "sqrdmulh v20.4s, v20.4s, v14.4s\n"
- "sqrdmulh v19.4s, v19.4s, v14.4s\n"
- "sqadd v5.4s, v5.4s, v28.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v16.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v0.4s\n"
+ "add x16, x16, x4\n"
+ "add x15, x15, x4\n"
+ "sqrdmulh v8.4s, v8.4s, v16.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v16.4s\n"
+ "add x14, x14, x4\n"
+ "add x13, x13, x4\n"
+ "sqrdmulh v3.4s, v3.4s, v16.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v0.4s\n"
+ "and v17.16b, v19.16b, v22.16b\n"
+ "and v16.16b, v1.16b, v31.16b\n"
+ "and v15.16b, v8.16b, v22.16b\n"
+ "and v20.16b, v10.16b, v22.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v0.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v0.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v14.16b, v21.16b, v25.16b\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "and v6.16b, v20.16b, v25.16b\n"
- "sqrdmulh v0.4s, v0.4s, v18.4s\n"
- "and v4.16b, v19.16b, v25.16b\n"
- "sqrdmulh v31.4s, v31.4s, v18.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v14.4s, v14.4s, #0x1f\n"
- "and v18.16b, v8.16b, v12.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "and v7.16b, v0.16b, v12.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v16.16b, v31.16b, v12.16b\n"
- "sqadd v21.4s, v21.4s, v14.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v6.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v15.4s, v15.4s, #0x1f\n"
+ "and v26.16b, v4.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v0.16b, v21.16b, v31.16b\n"
+ "sqadd v19.4s, v19.4s, v17.4s\n"
+ "and v17.16b, v3.16b, v22.16b\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v16.16b, v24.16b, v31.16b\n"
+ "sqadd v8.4s, v8.4s, v15.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v25.4s\n"
- "srshl v21.4s, v21.4s, v25.4s\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v25.4s\n"
- "sqadd v0.4s, v0.4s, v7.4s\n"
- "srshl v19.4s, v19.4s, v25.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v12.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v12.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v12.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v22.4s\n"
+ "srshl v8.4s, v8.4s, v22.4s\n"
+ "sqadd v3.4s, v3.4s, v17.4s\n"
+ "sqadd v4.4s, v4.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v22.4s\n"
+ "sqadd v21.4s, v21.4s, v0.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v31.4s\n"
+ "srshl v3.4s, v3.4s, v22.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "srshl v4.4s, v4.4s, v31.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v31.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v5.s }[0], [x11], #0x4\n"
- "st1 { v21.s }[0], [x10], #0x4\n"
- "st1 { v20.s }[0], [x9], #0x4\n"
- "st1 { v19.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v5.h }[2], [x11], #0x2\n"
- "st1 { v21.h }[2], [x10], #0x2\n"
- "st1 { v20.h }[2], [x9], #0x2\n"
- "st1 { v19.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[6], [x11], #0x1\n"
- "st1 { v21.b }[6], [x10], #0x1\n"
- "st1 { v20.b }[6], [x9], #0x1\n"
- "st1 { v19.b }[6], [x28], #0x1\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "tbz x2, #2, 85f\n"
+ "st1 { v19.s }[0], [x16], #0x4\n"
+ "st1 { v8.s }[0], [x15], #0x4\n"
+ "st1 { v10.s }[0], [x14], #0x4\n"
+ "st1 { v3.s }[0], [x13], #0x4\n"
+ "tbz x2, #1, 84f\n"
+ "st1 { v19.h }[2], [x16], #0x2\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v10.h }[2], [x14], #0x2\n"
+ "st1 { v3.h }[2], [x13], #0x2\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[6], [x16], #0x1\n"
+ "st1 { v8.b }[6], [x15], #0x1\n"
+ "st1 { v10.b }[6], [x14], #0x1\n"
+ "st1 { v3.b }[6], [x13], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[4], [x11], #0x1\n"
- "st1 { v21.b }[4], [x10], #0x1\n"
- "st1 { v20.b }[4], [x9], #0x1\n"
- "st1 { v19.b }[4], [x28], #0x1\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[4], [x16], #0x1\n"
+ "st1 { v8.b }[4], [x15], #0x1\n"
+ "st1 { v10.b }[4], [x14], #0x1\n"
+ "st1 { v3.b }[4], [x13], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v5.h }[0], [x11], #0x2\n"
- "st1 { v21.h }[0], [x10], #0x2\n"
- "st1 { v20.h }[0], [x9], #0x2\n"
- "st1 { v19.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[2], [x11], #0x1\n"
- "st1 { v21.b }[2], [x10], #0x1\n"
- "st1 { v20.b }[2], [x9], #0x1\n"
- "st1 { v19.b }[2], [x28], #0x1\n"
+ "tbz x2, #1, 86f\n"
+ "st1 { v19.h }[0], [x16], #0x2\n"
+ "st1 { v8.h }[0], [x15], #0x2\n"
+ "st1 { v10.h }[0], [x14], #0x2\n"
+ "st1 { v3.h }[0], [x13], #0x2\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[2], [x16], #0x1\n"
+ "st1 { v8.b }[2], [x15], #0x1\n"
+ "st1 { v10.b }[2], [x14], #0x1\n"
+ "st1 { v3.b }[2], [x13], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[0], [x11], #0x1\n"
- "st1 { v21.b }[0], [x10], #0x1\n"
- "st1 { v20.b }[0], [x9], #0x1\n"
- "st1 { v19.b }[0], [x28], #0x1\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[0], [x16], #0x1\n"
+ "st1 { v8.b }[0], [x15], #0x1\n"
+ "st1 { v10.b }[0], [x14], #0x1\n"
+ "st1 { v3.b }[0], [x13], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index b1648bae14..bd85c150ef 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
const int8_t *inptrs[36];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const int8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -113,1743 +113,1743 @@ void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
__asm__ __volatile__(
"ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x2, x1, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v18.16b }, [x20]\n"
+ "mov x2, #0x0\n"
+ "mov x3, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x4, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x14, x1, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v15.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_b_offset]\n"
"add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.16b }, [x21]\n"
- "ld1r { v26.8h }, [x20]\n"
+ "ld1r { v9.16b }, [x21]\n"
+ "ld1r { v13.8h }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_minval]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v11.8h }, [x21]\n"
- "ld1r { v0.8h }, [x20]\n"
- "mov x3, #0x0\n"
- "mov x4, #0x0\n"
- "add x5, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x17, x16, [x22, #0x0]\n"
- "ldp x15, x14, [x22, #0x10]\n"
- "cbz x2, 3f\n"
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "subs x2, x2, #0x1\n"
- "ssubl v6.8h, v6.8b, v13.8b\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ssubl v10.8h, v10.8b, v13.8b\n"
- "ldr d12, [x6, #0x20]\n"
+ "ld1r { v10.8h }, [x21]\n"
+ "ld1r { v14.8h }, [x20]\n"
+ "ldp x8, x17, [x22, #0x0]\n"
+ "ldp x16, x15, [x22, #0x10]\n"
+ "cbz x14, 3f\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "subs x14, x14, #0x1\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "ldr d23, [x5, #0x20]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "ldr q7, [x20, #0x0]\n"
- "ldr q15, [x20, #0x10]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "ldr q8, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
"add x20, x20, #0x20\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "ldr d31, [x9, x3]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldr d17, [x28, x3]\n"
- "ldr d30, [x27, x3]\n"
- "ssubl v31.8h, v31.8b, v18.8b\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "ldr d16, [x26, x3]\n"
- "ldr d3, [x25, x3]\n"
- "ssubl v30.8h, v30.8b, v18.8b\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "ldr d4, [x24, x3]\n"
- "ldr d25, [x23, x3]\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "ssubl v4.8h, v4.8b, v18.8b\n"
- "ldr d9, [x22, x3]\n"
- "ldr d29, [x21, x3]\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "ldr d28, [x20, x3]\n"
- "ssubl v29.8h, v29.8b, v18.8b\n"
- "ssubl v28.8h, v28.8b, v18.8b\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d24, [x9, x2]\n"
+ "ldr d21, [x28, x2]\n"
+ "ldr d16, [x27, x2]\n"
+ "ldr d20, [x26, x2]\n"
+ "ldr d7, [x25, x2]\n"
+ "ldr d19, [x24, x2]\n"
+ "ldr d28, [x23, x2]\n"
+ "ssubl v24.8h, v24.8b, v15.8b\n"
+ "ldr d26, [x22, x2]\n"
+ "ldr d29, [x21, x2]\n"
+ "ssubl v21.8h, v21.8b, v15.8b\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "ldr d18, [x20, x2]\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "ssubl v29.8h, v29.8b, v15.8b\n"
+ "ssubl v18.8h, v18.8b, v15.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr d2, [x6, #0x28]\n"
- "ldr d27, [x6, #0x30]\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "ldr d1, [x6, #0x38]\n"
- "ldr d31, [x6, #0x40]\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "ldr d8, [x6, #0x48]\n"
- "ldr x22, [x5, #0x50]\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "ldr x20, [x5, #0x58]\n"
- "ldr x21, [x5, #0x60]\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "ssubl v6.8h, v6.8b, v18.8b\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "ldr x22, [x5, #0x70]\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "ldr d14, [x20, x3]\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "ssubl v14.8h, v14.8b, v18.8b\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal v23.4s, v17.4h, v10.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr x20, [x5, #0x78]\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "ldr x21, [x5, #0x80]\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "ldr d25, [x22, x3]\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v10.8h\n"
- "ldr d10, [x20, x3]\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "ssubl v10.8h, v10.8b, v18.8b\n"
- "smlal v24.4s, v17.4h, v21.4h\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x24, [x5, #0x88]\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "smlal v7.4s, v30.4h, v2.4h\n"
- "ldr x20, [x5, #0x90]\n"
- "ldr x23, [x5, #0x98]\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "ldr d9, [x21, x3]\n"
- "smlal2 v22.4s, v17.8h, v21.8h\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "ldr d21, [x6, #0x50]\n"
- "smlal v20.4s, v3.4h, v12.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "ldr x22, [x5, #0xa0]\n"
- "ldr x21, [x5, #0xa8]\n"
- "smlal2 v15.4s, v30.8h, v2.8h\n"
- "ldr d30, [x24, x3]\n"
- "smlal v7.4s, v16.4h, v27.4h\n"
- "ssubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v12.8h\n"
- "ldr d3, [x6, #0x58]\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "ldr d12, [x20, x3]\n"
- "smlal v20.4s, v16.4h, v2.4h\n"
- "ssubl v12.8h, v12.8b, v18.8b\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal v23.4s, v14.4h, v2.4h\n"
- "ldr x20, [x5, #0xb0]\n"
- "ldr x13, [x5, #0xb8]\n"
- "smlal2 v15.4s, v16.8h, v27.8h\n"
- "smlal v7.4s, v4.4h, v1.4h\n"
- "ldr x12, [x5, #0xc0]\n"
- "ldr x11, [x5, #0xc8]\n"
- "smlal2 v5.4s, v16.8h, v2.8h\n"
- "ldr d16, [x23, x3]\n"
- "smlal2 v22.4s, v28.8h, v2.8h\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v2.8h\n"
- "ldr d2, [x6, #0x60]\n"
- "smlal v20.4s, v4.4h, v27.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v27.4h\n"
- "smlal v23.4s, v25.4h, v27.4h\n"
- "ldr x10, [x5, #0xd0]\n"
- "ldr x9, [x5, #0xd8]\n"
- "smlal2 v15.4s, v4.8h, v1.8h\n"
- "smlal v7.4s, v17.4h, v31.4h\n"
- "ldr x28, [x5, #0xe0]\n"
- "ldr x27, [x5, #0xe8]\n"
- "smlal2 v5.4s, v4.8h, v27.8h\n"
- "ldr d4, [x22, x3]\n"
- "smlal2 v22.4s, v14.8h, v27.8h\n"
- "ssubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v27.8h\n"
- "ldr d27, [x6, #0x68]\n"
- "smlal v20.4s, v17.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v1.4h\n"
- "smlal v23.4s, v10.4h, v1.4h\n"
- "ldr x26, [x5, #0xf0]\n"
- "ldr x25, [x5, #0xf8]\n"
- "smlal2 v15.4s, v17.8h, v31.8h\n"
- "smlal v7.4s, v6.4h, v8.4h\n"
- "ldr x24, [x5, #0x100]\n"
- "ldr x23, [x5, #0x108]\n"
- "smlal2 v5.4s, v17.8h, v1.8h\n"
- "ldr d17, [x21, x3]\n"
- "smlal2 v22.4s, v25.8h, v1.8h\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v1.8h\n"
- "ldr d1, [x6, #0x70]\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v31.4h\n"
- "smlal v23.4s, v9.4h, v31.4h\n"
- "ldr x22, [x5, #0x110]\n"
- "ldr x21, [x5, #0x118]\n"
- "smlal2 v15.4s, v6.8h, v8.8h\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
- "subs x2, x2, #0x1\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal2 v22.4s, v10.8h, v31.8h\n"
- "ssubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v31.8h\n"
- "ldr d31, [x6, #0x78]\n"
- "smlal v20.4s, v29.4h, v8.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v9.4h, v8.4h\n"
- "smlal v23.4s, v30.4h, v8.4h\n"
+ "ldr d3, [x5, #0x28]\n"
+ "ldr d2, [x5, #0x30]\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "ldr d4, [x5, #0x38]\n"
+ "ldr d22, [x5, #0x40]\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "ldr d24, [x5, #0x48]\n"
+ "ldr x23, [x4, #0x50]\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x50]\n"
+ "ldr x22, [x4, #0x58]\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "ldr d21, [x5, #0x58]\n"
+ "ldr x21, [x4, #0x60]\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "ldr x20, [x4, #0x68]\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "ldr x28, [x4, #0x70]\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "ldr d12, [x23, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x22, x2]\n"
+ "ldr x27, [x4, #0x78]\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "ldr x26, [x4, #0x80]\n"
+ "ssubl v12.8h, v12.8b, v15.8b\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "ldr x25, [x4, #0x88]\n"
+ "ldr x24, [x4, #0x90]\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
+ "ldr x23, [x4, #0x98]\n"
+ "ldr x22, [x4, #0xa0]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x21, x2]\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v17.4h\n"
+ "smlal2 v30.4s, v12.8h, v17.8h\n"
+ "ldr d17, [x20, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal v1.4s, v12.4h, v11.4h\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "ldr x21, [x4, #0xa8]\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal2 v25.4s, v12.8h, v11.8h\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x28, x2]\n"
+ "ssubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v7.4h, v11.4h\n"
+ "smlal2 v30.4s, v7.8h, v11.8h\n"
+ "ldr d11, [x27, x2]\n"
+ "ldr x13, [x4, #0xb8]\n"
+ "smlal v27.4s, v28.4h, v23.4h\n"
+ "smlal v1.4s, v7.4h, v23.4h\n"
+ "ldr x12, [x4, #0xc0]\n"
+ "ldr x11, [x4, #0xc8]\n"
+ "smlal2 v6.4s, v28.8h, v23.8h\n"
+ "ldr d28, [x26, x2]\n"
+ "smlal2 v25.4s, v7.8h, v23.8h\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v16.4h, v3.4h\n"
+ "smlal2 v0.4s, v16.8h, v3.8h\n"
+ "ldr d16, [x25, x2]\n"
+ "ssubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "ldr d23, [x24, x2]\n"
+ "ldr x10, [x4, #0xd0]\n"
+ "smlal v27.4s, v20.4h, v3.4h\n"
+ "smlal v1.4s, v18.4h, v3.4h\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x9, [x4, #0xd8]\n"
+ "smlal2 v6.4s, v20.8h, v3.8h\n"
+ "smlal2 v25.4s, v18.8h, v3.8h\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x28, [x4, #0xe0]\n"
+ "smlal v8.4s, v20.4h, v2.4h\n"
+ "smlal2 v0.4s, v20.8h, v2.8h\n"
+ "ldr d20, [x23, x2]\n"
+ "ssubl v23.8h, v23.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v3.4h\n"
+ "smlal2 v30.4s, v17.8h, v3.8h\n"
+ "ldr d3, [x5, #0x60]\n"
+ "ldr x27, [x4, #0xe8]\n"
+ "smlal v27.4s, v19.4h, v2.4h\n"
+ "smlal v1.4s, v17.4h, v2.4h\n"
+ "ldr x26, [x4, #0xf0]\n"
+ "ldr x25, [x4, #0xf8]\n"
+ "smlal2 v6.4s, v19.8h, v2.8h\n"
+ "smlal2 v25.4s, v17.8h, v2.8h\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x24, [x4, #0x100]\n"
+ "smlal v8.4s, v19.4h, v4.4h\n"
+ "smlal2 v0.4s, v19.8h, v4.8h\n"
+ "ldr d19, [x22, x2]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v2.4h\n"
+ "smlal2 v30.4s, v26.8h, v2.8h\n"
+ "ldr d2, [x5, #0x68]\n"
+ "ldr x23, [x4, #0x108]\n"
+ "smlal v27.4s, v12.4h, v4.4h\n"
+ "smlal v1.4s, v26.4h, v4.4h\n"
+ "ldr x22, [x4, #0x110]\n"
+ "subs x14, x14, #0x1\n"
+ "smlal2 v6.4s, v12.8h, v4.8h\n"
+ "smlal2 v25.4s, v26.8h, v4.8h\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v22.4h\n"
+ "smlal2 v0.4s, v12.8h, v22.8h\n"
+ "ldr d12, [x21, x2]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v4.4h\n"
+ "smlal2 v30.4s, v11.8h, v4.8h\n"
+ "ldr d4, [x5, #0x70]\n"
+ "ldr x21, [x4, #0x118]\n"
+ "smlal v27.4s, v7.4h, v22.4h\n"
+ "smlal v1.4s, v11.4h, v22.4h\n"
+ "smlal2 v6.4s, v7.8h, v22.8h\n"
+ "smlal2 v25.4s, v11.8h, v22.8h\n"
+ "ssubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v24.4h\n"
+ "smlal2 v0.4s, v7.8h, v24.8h\n"
+ "ldr d7, [x20, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v22.4h\n"
+ "smlal2 v30.4s, v28.8h, v22.8h\n"
+ "ldr d22, [x5, #0x78]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "ldr d28, [x13, x3]\n"
- "smlal v7.4s, v14.4h, v3.4h\n"
- "ssubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v5.4s, v29.8h, v8.8h\n"
- "ldr d29, [x6, #0x80]\n"
- "smlal2 v22.4s, v9.8h, v8.8h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal2 v19.4s, v30.8h, v8.8h\n"
- "ldr d8, [x12, x3]\n"
- "smlal v20.4s, v14.4h, v21.4h\n"
- "ssubl v8.8h, v8.8b, v18.8b\n"
- "smlal v24.4s, v12.4h, v21.4h\n"
- "smlal v23.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v14.8h, v3.8h\n"
- "smlal v7.4s, v25.4h, v2.4h\n"
- "smlal2 v5.4s, v14.8h, v21.8h\n"
- "ldr d14, [x11, x3]\n"
- "smlal2 v22.4s, v12.8h, v21.8h\n"
- "ssubl v14.8h, v14.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v21.8h\n"
- "ldr d21, [x6, #0x88]\n"
- "smlal v20.4s, v25.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v16.4h, v3.4h\n"
- "smlal v23.4s, v4.4h, v3.4h\n"
- "smlal2 v15.4s, v25.8h, v2.8h\n"
- "smlal v7.4s, v10.4h, v27.4h\n"
- "smlal2 v5.4s, v25.8h, v3.8h\n"
- "ldr d25, [x10, x3]\n"
- "smlal2 v22.4s, v16.8h, v3.8h\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v3.8h\n"
- "ldr d3, [x6, #0x90]\n"
- "smlal v20.4s, v10.4h, v2.4h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal v24.4s, v4.4h, v2.4h\n"
- "smlal v23.4s, v17.4h, v2.4h\n"
- "smlal2 v15.4s, v10.8h, v27.8h\n"
- "smlal v7.4s, v9.4h, v1.4h\n"
- "smlal2 v5.4s, v10.8h, v2.8h\n"
- "ldr d10, [x9, x3]\n"
- "smlal2 v22.4s, v4.8h, v2.8h\n"
- "ssubl v10.8h, v10.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v2.8h\n"
- "ldr d2, [x6, #0x98]\n"
- "smlal v20.4s, v9.4h, v27.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v17.4h, v27.4h\n"
- "smlal v23.4s, v6.4h, v27.4h\n"
- "smlal2 v15.4s, v9.8h, v1.8h\n"
- "smlal v7.4s, v12.4h, v31.4h\n"
- "smlal2 v5.4s, v9.8h, v27.8h\n"
- "ldr d9, [x28, x3]\n"
- "smlal2 v22.4s, v17.8h, v27.8h\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v27.8h\n"
- "ldr d27, [x6, #0xa0]\n"
- "smlal v20.4s, v30.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v1.4h\n"
- "smlal v23.4s, v28.4h, v1.4h\n"
- "smlal2 v15.4s, v12.8h, v31.8h\n"
- "ldr d12, [x27, x3]\n"
- "smlal v7.4s, v16.4h, v29.4h\n"
- "ssubl v12.8h, v12.8b, v18.8b\n"
- "smlal2 v5.4s, v30.8h, v1.8h\n"
- "ldr d30, [x6, #0xa8]\n"
- "smlal2 v22.4s, v6.8h, v1.8h\n"
- "ssubl v30.8h, v30.8b, v13.8b\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "ldr d1, [x26, x3]\n"
- "smlal v20.4s, v16.4h, v31.4h\n"
- "ssubl v1.8h, v1.8b, v18.8b\n"
- "smlal v24.4s, v8.4h, v31.4h\n"
- "smlal v23.4s, v14.4h, v31.4h\n"
- "smlal2 v15.4s, v16.8h, v29.8h\n"
- "smlal v7.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v16.8h, v31.8h\n"
- "ldr d16, [x25, x3]\n"
- "smlal2 v22.4s, v8.8h, v31.8h\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v31.8h\n"
- "ldr d31, [x6, #0xb0]\n"
- "smlal v20.4s, v4.4h, v29.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v29.4h\n"
- "smlal v23.4s, v25.4h, v29.4h\n"
- "smlal2 v15.4s, v4.8h, v21.8h\n"
- "smlal v7.4s, v17.4h, v3.4h\n"
- "smlal2 v5.4s, v4.8h, v29.8h\n"
- "ldr d4, [x24, x3]\n"
- "smlal2 v22.4s, v14.8h, v29.8h\n"
- "ssubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v29.8h\n"
- "ldr d29, [x6, #0xb8]\n"
- "smlal v20.4s, v17.4h, v21.4h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v15.4s, v17.8h, v3.8h\n"
- "smlal v7.4s, v6.4h, v2.4h\n"
- "smlal2 v5.4s, v17.8h, v21.8h\n"
- "ldr d17, [x23, x3]\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "ldr d21, [x6, #0xc0]\n"
- "smlal v20.4s, v6.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v3.4h\n"
- "smlal v23.4s, v9.4h, v3.4h\n"
- "add x6, x6, #0xc8\n"
- "smlal2 v15.4s, v6.8h, v2.8h\n"
- "smlal v7.4s, v8.4h, v27.4h\n"
- "smlal2 v5.4s, v6.8h, v3.8h\n"
- "ldr d6, [x22, x3]\n"
- "smlal2 v22.4s, v10.8h, v3.8h\n"
- "ssubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v3.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal v20.4s, v28.4h, v2.4h\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "smlal v24.4s, v9.4h, v2.4h\n"
- "smlal v23.4s, v12.4h, v2.4h\n"
- "add x3, x3, #0x8\n"
- "smlal2 v15.4s, v8.8h, v27.8h\n"
- "ldr q8, [x7, #0x0]\n"
- "smlal v7.4s, v14.4h, v30.4h\n"
- "smlal2 v5.4s, v28.8h, v2.8h\n"
- "ldr q28, [x8, #0x0]\n"
- "smlal2 v22.4s, v9.8h, v2.8h\n"
- "smlal2 v19.4s, v12.8h, v2.8h\n"
- "ldr q2, [x7, #0x10]\n"
- "smlal v20.4s, v14.4h, v27.4h\n"
+ "smlal v27.4s, v29.4h, v24.4h\n"
+ "smlal v1.4s, v28.4h, v24.4h\n"
+ "smlal2 v6.4s, v29.8h, v24.8h\n"
+ "ldr d29, [x13, x2]\n"
+ "smlal2 v25.4s, v28.8h, v24.8h\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v18.4h, v31.4h\n"
+ "smlal2 v0.4s, v18.8h, v31.8h\n"
+ "ldr d18, [x5, #0x80]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v24.4h\n"
+ "smlal2 v30.4s, v16.8h, v24.8h\n"
+ "ldr d24, [x12, x2]\n"
+ "smlal v27.4s, v17.4h, v31.4h\n"
+ "smlal v1.4s, v23.4h, v31.4h\n"
+ "ssubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v6.4s, v17.8h, v31.8h\n"
+ "smlal2 v25.4s, v23.8h, v31.8h\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v8.4s, v17.4h, v21.4h\n"
+ "smlal2 v0.4s, v17.8h, v21.8h\n"
+ "ldr d17, [x11, x2]\n"
+ "ssubl v24.8h, v24.8b, v15.8b\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x88]\n"
+ "smlal v27.4s, v26.4h, v21.4h\n"
+ "smlal v1.4s, v20.4h, v21.4h\n"
+ "smlal2 v6.4s, v26.8h, v21.8h\n"
+ "smlal2 v25.4s, v20.8h, v21.8h\n"
+ "ssubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v8.4s, v26.4h, v3.4h\n"
+ "smlal2 v0.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x10, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v21.4h\n"
+ "smlal2 v30.4s, v19.8h, v21.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "smlal v27.4s, v11.4h, v3.4h\n"
+ "smlal v1.4s, v19.4h, v3.4h\n"
+ "smlal2 v6.4s, v11.8h, v3.8h\n"
+ "smlal2 v25.4s, v19.8h, v3.8h\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v11.4h, v2.4h\n"
+ "smlal2 v0.4s, v11.8h, v2.8h\n"
+ "ldr d11, [x9, x2]\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v3.4h\n"
+ "smlal2 v30.4s, v12.8h, v3.8h\n"
+ "ldr d3, [x5, #0x98]\n"
+ "smlal v27.4s, v28.4h, v2.4h\n"
+ "smlal v1.4s, v12.4h, v2.4h\n"
+ "smlal2 v6.4s, v28.8h, v2.8h\n"
+ "smlal2 v25.4s, v12.8h, v2.8h\n"
+ "ssubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v8.4s, v28.4h, v4.4h\n"
+ "smlal2 v0.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x28, x2]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v2.4h\n"
+ "smlal2 v30.4s, v7.8h, v2.8h\n"
+ "ldr d2, [x5, #0xa0]\n"
+ "smlal v27.4s, v16.4h, v4.4h\n"
+ "smlal v1.4s, v7.4h, v4.4h\n"
+ "smlal2 v6.4s, v16.8h, v4.8h\n"
+ "ldr d16, [x27, x2]\n"
+ "smlal2 v25.4s, v7.8h, v4.8h\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "smlal v8.4s, v23.4h, v22.4h\n"
+ "smlal2 v0.4s, v23.8h, v22.8h\n"
+ "ldr d23, [x5, #0xa8]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d4, [x26, x2]\n"
+ "smlal v27.4s, v20.4h, v22.4h\n"
+ "smlal v1.4s, v24.4h, v22.4h\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "smlal2 v6.4s, v20.8h, v22.8h\n"
+ "smlal2 v25.4s, v24.8h, v22.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "ldr d20, [x25, x2]\n"
+ "ssubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v22.4h\n"
+ "smlal2 v30.4s, v17.8h, v22.8h\n"
+ "ldr d22, [x5, #0xb0]\n"
+ "smlal v27.4s, v19.4h, v18.4h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v6.4s, v19.8h, v18.8h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "smlal v8.4s, v19.4h, v31.4h\n"
+ "smlal2 v0.4s, v19.8h, v31.8h\n"
+ "ldr d19, [x24, x2]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v18.4h\n"
+ "smlal2 v30.4s, v26.8h, v18.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "smlal v27.4s, v12.4h, v31.4h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v6.4s, v12.8h, v31.8h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v21.4h\n"
+ "smlal2 v0.4s, v12.8h, v21.8h\n"
+ "ldr d12, [x23, x2]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v31.4h\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d31, [x5, #0xc0]\n"
+ "add x5, x5, #0xc8\n"
+ "smlal v27.4s, v7.4h, v21.4h\n"
+ "smlal v1.4s, v11.4h, v21.4h\n"
+ "smlal2 v6.4s, v7.8h, v21.8h\n"
+ "smlal2 v25.4s, v11.8h, v21.8h\n"
+ "ssubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v3.4h\n"
+ "smlal2 v0.4s, v7.8h, v3.8h\n"
+ "ldr d7, [x22, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v21.4h\n"
+ "smlal2 v30.4s, v28.8h, v21.8h\n"
+ "ldr d21, [x21, x2]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v27.4s, v29.4h, v3.4h\n"
+ "smlal v1.4s, v28.4h, v3.4h\n"
+ "smlal2 v6.4s, v29.8h, v3.8h\n"
+ "ldr q29, [x6, #0x0]\n"
+ "smlal2 v25.4s, v28.8h, v3.8h\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v2.4h\n"
+ "smlal2 v0.4s, v24.8h, v2.8h\n"
+ "ldr q24, [x7, #0x0]\n"
+ "ssubl v21.8h, v21.8b, v15.8b\n"
+ "smlal v5.4s, v16.4h, v3.4h\n"
+ "smlal2 v30.4s, v16.8h, v3.8h\n"
+ "ldr q3, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v27.4s, v17.4h, v2.4h\n"
+ "smlal v1.4s, v4.4h, v2.4h\n"
+ "smlal2 v6.4s, v17.8h, v2.8h\n"
+ "smlal2 v25.4s, v4.8h, v2.8h\n"
+ "ldr q4, [x7, #0x10]\n"
"add x7, x7, #0x20\n"
- "smlal v24.4s, v1.4h, v27.4h\n"
- "smlal v23.4s, v16.4h, v27.4h\n"
- "smlal2 v15.4s, v14.8h, v30.8h\n"
- "smlal v7.4s, v25.4h, v31.4h\n"
- "smlal2 v5.4s, v14.8h, v27.8h\n"
- "ldr q14, [x8, #0x10]\n"
- "smlal2 v22.4s, v1.8h, v27.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v19.4s, v16.8h, v27.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal v24.4s, v16.4h, v30.4h\n"
- "smlal v23.4s, v4.4h, v30.4h\n"
- "smlal2 v15.4s, v25.8h, v31.8h\n"
- "smlal v7.4s, v10.4h, v29.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal2 v22.4s, v16.8h, v30.8h\n"
- "smlal2 v19.4s, v4.8h, v30.8h\n"
- "smlal v20.4s, v10.4h, v31.4h\n"
- "smlal v24.4s, v4.4h, v31.4h\n"
- "smlal v23.4s, v17.4h, v31.4h\n"
- "smlal2 v15.4s, v10.8h, v29.8h\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "sqrdmulh v7.4s, v7.4s, v8.4s\n"
- "smlal2 v5.4s, v10.8h, v31.8h\n"
- "smlal2 v22.4s, v4.8h, v31.8h\n"
- "and v27.16b, v7.16b, v28.16b\n"
- "smlal2 v19.4s, v17.8h, v31.8h\n"
- "smlal v20.4s, v9.4h, v29.4h\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "smlal v24.4s, v17.4h, v29.4h\n"
- "smlal v23.4s, v6.4h, v29.4h\n"
- "sqadd v7.4s, v7.4s, v27.4s\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal2 v5.4s, v9.8h, v29.8h\n"
- "sqrdmulh v15.4s, v15.4s, v2.4s\n"
- "smlal2 v22.4s, v17.8h, v29.8h\n"
- "smlal2 v19.4s, v6.8h, v29.8h\n"
- "and v9.16b, v15.16b, v14.16b\n"
- "smlal v20.4s, v12.4h, v21.4h\n"
- "smlal v24.4s, v6.4h, v21.4h\n"
- "sqrdmulh v20.4s, v20.4s, v8.4s\n"
- "smlal v23.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v12.8h, v21.8h\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- "smlal2 v22.4s, v6.8h, v21.8h\n"
- "smlal2 v19.4s, v3.8h, v21.8h\n"
- "sqrdmulh v23.4s, v23.4s, v8.4s\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "and v25.16b, v20.16b, v28.16b\n"
- "sqrdmulh v5.4s, v5.4s, v2.4s\n"
- "and v10.16b, v24.16b, v28.16b\n"
- "sqrdmulh v22.4s, v22.4s, v2.4s\n"
- "and v21.16b, v23.16b, v28.16b\n"
- "sqrdmulh v19.4s, v19.4s, v2.4s\n"
- "sqadd v15.4s, v15.4s, v9.4s\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "and v9.16b, v5.16b, v14.16b\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "and v12.16b, v22.16b, v14.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v17.16b, v19.16b, v14.16b\n"
- "sqadd v20.4s, v20.4s, v25.4s\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v10.4s\n"
+ "smlal v8.4s, v17.4h, v23.4h\n"
+ "smlal2 v0.4s, v17.8h, v23.8h\n"
+ "smlal v5.4s, v20.4h, v2.4h\n"
+ "smlal2 v30.4s, v20.8h, v2.8h\n"
+ "smlal v27.4s, v26.4h, v23.4h\n"
+ "smlal v1.4s, v20.4h, v23.4h\n"
+ "smlal2 v6.4s, v26.8h, v23.8h\n"
+ "smlal2 v25.4s, v20.8h, v23.8h\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal2 v0.4s, v26.8h, v22.8h\n"
+ "smlal v5.4s, v19.4h, v23.4h\n"
+ "smlal2 v30.4s, v19.8h, v23.8h\n"
+ "smlal v27.4s, v11.4h, v22.4h\n"
+ "smlal v1.4s, v19.4h, v22.4h\n"
+ "smlal2 v6.4s, v11.8h, v22.8h\n"
+ "smlal2 v25.4s, v19.8h, v22.8h\n"
+ "smlal v8.4s, v11.4h, v18.4h\n"
+ "smlal2 v0.4s, v11.8h, v18.8h\n"
+ "smlal v5.4s, v12.4h, v22.4h\n"
+ "smlal2 v30.4s, v12.8h, v22.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal v1.4s, v12.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal2 v25.4s, v12.8h, v18.8h\n"
+ "smlal v8.4s, v28.4h, v31.4h\n"
+ "smlal2 v0.4s, v28.8h, v31.8h\n"
+ "smlal v5.4s, v7.4h, v18.4h\n"
+ "smlal2 v30.4s, v7.8h, v18.8h\n"
+ "smlal v27.4s, v16.4h, v31.4h\n"
+ "smlal v1.4s, v7.4h, v31.4h\n"
+ "smlal2 v6.4s, v16.8h, v31.8h\n"
+ "smlal2 v25.4s, v7.8h, v31.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v29.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v3.4s\n"
+ "smlal v5.4s, v21.4h, v31.4h\n"
+ "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "and v17.16b, v8.16b, v24.16b\n"
+ "sqrdmulh v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v29.4s\n"
+ "and v12.16b, v0.16b, v4.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v3.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v3.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v5.4s, v5.4s, v29.4s\n"
"sshr v12.4s, v12.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
+ "and v21.16b, v27.16b, v24.16b\n"
+ "and v16.16b, v1.16b, v24.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v3.4s\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v28.16b, v5.16b, v24.16b\n"
+ "sqadd v0.4s, v0.4s, v12.4s\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v18.16b, v6.16b, v4.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v17.16b, v25.16b, v4.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v3.16b, v30.16b, v4.16b\n"
+ "sqadd v27.4s, v27.4s, v21.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v28.4s\n"
- "srshl v20.4s, v20.4s, v28.4s\n"
- "sqadd v5.4s, v5.4s, v9.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqadd v22.4s, v22.4s, v12.4s\n"
- "srshl v23.4s, v23.4s, v28.4s\n"
- "sqadd v19.4s, v19.4s, v17.4s\n"
- "srshl v15.4s, v15.4s, v14.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v14.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v14.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v14.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str d7, [x17, x4]\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d20, [x16, x4]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x15, x4]\n"
- "str d23, [x14, x4]\n"
- "ldr q7, [x20, #0x0]\n"
- "ldr q15, [x20, #0x10]\n"
+ "sqadd v5.4s, v5.4s, v28.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v24.4s\n"
+ "srshl v27.4s, v27.4s, v24.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v24.4s\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "srshl v5.4s, v5.4s, v24.4s\n"
+ "sqadd v30.4s, v30.4s, v3.4s\n"
+ "srshl v0.4s, v0.4s, v4.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v4.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v4.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "str d8, [x8, x3]\n"
+ "str d27, [x17, x3]\n"
+ "str d1, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "add x3, x3, #0x8\n"
+ "ldr q8, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "add x4, x4, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldr d12, [x6, #0x20]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "ssubl v6.8h, v6.8b, v13.8b\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ldr d31, [x9, x3]\n"
- "ldr d17, [x28, x3]\n"
- "ssubl v10.8h, v10.8b, v13.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr d30, [x27, x3]\n"
- "ldr d16, [x26, x3]\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "ssubl v31.8h, v31.8b, v18.8b\n"
- "ldr d3, [x25, x3]\n"
- "ldr d4, [x24, x3]\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "ssubl v30.8h, v30.8b, v18.8b\n"
- "ldr d25, [x23, x3]\n"
- "ldr d9, [x22, x3]\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "ldr d29, [x21, x3]\n"
- "ldr d28, [x20, x3]\n"
- "ssubl v4.8h, v4.8b, v18.8b\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "ssubl v29.8h, v29.8b, v18.8b\n"
- "ssubl v28.8h, v28.8b, v18.8b\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "ldr d23, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d24, [x9, x2]\n"
+ "ldr d21, [x28, x2]\n"
+ "ldr d16, [x27, x2]\n"
+ "ldr d20, [x26, x2]\n"
+ "ldr d7, [x25, x2]\n"
+ "ldr d19, [x24, x2]\n"
+ "ldr d28, [x23, x2]\n"
+ "ldr d26, [x22, x2]\n"
+ "ssubl v24.8h, v24.8b, v15.8b\n"
+ "ssubl v21.8h, v21.8b, v15.8b\n"
+ "ldr d29, [x21, x2]\n"
+ "ldr d18, [x20, x2]\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "ssubl v29.8h, v29.8b, v15.8b\n"
+ "ssubl v18.8h, v18.8b, v15.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr d27, [x6, #0x28]\n"
- "ldr d1, [x6, #0x30]\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "ldr d2, [x6, #0x38]\n"
- "ldr d31, [x6, #0x40]\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "ldr d8, [x6, #0x48]\n"
- "ldr x22, [x5, #0x50]\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "ldr x20, [x5, #0x58]\n"
- "ldr x21, [x5, #0x60]\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "ssubl v6.8h, v6.8b, v18.8b\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr x22, [x5, #0x70]\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "ldr d14, [x20, x3]\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "ssubl v14.8h, v14.8b, v18.8b\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal v23.4s, v17.4h, v10.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr x21, [x5, #0x78]\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "ldr x20, [x5, #0x80]\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "ldr d25, [x22, x3]\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v10.8h\n"
- "ldr d10, [x21, x3]\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "ssubl v10.8h, v10.8b, v18.8b\n"
- "smlal v24.4s, v17.4h, v21.4h\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x24, [x5, #0x88]\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "smlal v7.4s, v30.4h, v27.4h\n"
- "ldr x23, [x5, #0x90]\n"
- "ldr x22, [x5, #0x98]\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "ldr d9, [x20, x3]\n"
- "smlal2 v22.4s, v17.8h, v21.8h\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "ldr d21, [x6, #0x50]\n"
- "smlal v20.4s, v3.4h, v12.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "ldr x21, [x5, #0xa0]\n"
- "ldr x20, [x5, #0xa8]\n"
- "smlal2 v15.4s, v30.8h, v27.8h\n"
- "ldr d30, [x24, x3]\n"
- "smlal v7.4s, v16.4h, v1.4h\n"
- "ssubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v12.8h\n"
- "ldr d3, [x6, #0x58]\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "ldr d12, [x23, x3]\n"
- "smlal v20.4s, v16.4h, v27.4h\n"
- "ssubl v12.8h, v12.8b, v18.8b\n"
- "smlal v24.4s, v28.4h, v27.4h\n"
- "smlal v23.4s, v14.4h, v27.4h\n"
- "ldr x13, [x5, #0xb0]\n"
- "ldr x12, [x5, #0xb8]\n"
- "smlal2 v15.4s, v16.8h, v1.8h\n"
- "smlal v7.4s, v4.4h, v2.4h\n"
- "ldr x11, [x5, #0xc0]\n"
- "ldr x10, [x5, #0xc8]\n"
- "smlal2 v5.4s, v16.8h, v27.8h\n"
- "ldr d16, [x22, x3]\n"
- "smlal2 v22.4s, v28.8h, v27.8h\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v27.8h\n"
- "ldr d27, [x6, #0x60]\n"
- "smlal v20.4s, v4.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v1.4h\n"
- "smlal v23.4s, v25.4h, v1.4h\n"
- "ldr x9, [x5, #0xd0]\n"
- "ldr x28, [x5, #0xd8]\n"
- "smlal2 v15.4s, v4.8h, v2.8h\n"
- "smlal v7.4s, v17.4h, v31.4h\n"
- "ldr x27, [x5, #0xe0]\n"
- "ldr x26, [x5, #0xe8]\n"
- "smlal2 v5.4s, v4.8h, v1.8h\n"
- "ldr d4, [x21, x3]\n"
- "smlal2 v22.4s, v14.8h, v1.8h\n"
- "ssubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "ldr d1, [x6, #0x68]\n"
- "smlal v20.4s, v17.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v2.4h\n"
- "smlal v23.4s, v10.4h, v2.4h\n"
- "ldr x25, [x5, #0xf0]\n"
- "ldr x24, [x5, #0xf8]\n"
- "smlal2 v15.4s, v17.8h, v31.8h\n"
- "smlal v7.4s, v6.4h, v8.4h\n"
- "ldr x23, [x5, #0x100]\n"
- "ldr x22, [x5, #0x108]\n"
- "smlal2 v5.4s, v17.8h, v2.8h\n"
- "ldr d17, [x20, x3]\n"
- "smlal2 v22.4s, v25.8h, v2.8h\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v2.8h\n"
- "ldr d2, [x6, #0x70]\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v31.4h\n"
- "smlal v23.4s, v9.4h, v31.4h\n"
- "ldr x21, [x5, #0x110]\n"
- "ldr x20, [x5, #0x118]\n"
- "smlal2 v15.4s, v6.8h, v8.8h\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
+ "ldr d4, [x5, #0x28]\n"
+ "ldr d3, [x5, #0x30]\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "ldr d22, [x5, #0x38]\n"
+ "ldr d2, [x5, #0x40]\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "ldr d24, [x5, #0x48]\n"
+ "ldr x21, [x4, #0x50]\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x50]\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "ldr d21, [x5, #0x58]\n"
+ "ldr x28, [x4, #0x60]\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "ldr x27, [x4, #0x68]\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "ldr x26, [x4, #0x70]\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "ldr d12, [x21, x2]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x20, x2]\n"
+ "ldr x25, [x4, #0x78]\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "ldr x24, [x4, #0x80]\n"
+ "ssubl v12.8h, v12.8b, v15.8b\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "ldr x23, [x4, #0x88]\n"
+ "ldr x22, [x4, #0x90]\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
+ "ldr x21, [x4, #0x98]\n"
+ "ldr x20, [x4, #0xa0]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x28, x2]\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v17.4h\n"
+ "smlal2 v30.4s, v12.8h, v17.8h\n"
+ "ldr d17, [x27, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal v1.4s, v12.4h, v11.4h\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "ldr x14, [x4, #0xa8]\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal2 v25.4s, v12.8h, v11.8h\n"
+ "ldr x13, [x4, #0xb0]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x26, x2]\n"
+ "ssubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v7.4h, v11.4h\n"
+ "smlal2 v30.4s, v7.8h, v11.8h\n"
+ "ldr d11, [x25, x2]\n"
+ "ldr x12, [x4, #0xb8]\n"
+ "smlal v27.4s, v28.4h, v23.4h\n"
+ "smlal v1.4s, v7.4h, v23.4h\n"
+ "ldr x11, [x4, #0xc0]\n"
+ "ldr x10, [x4, #0xc8]\n"
+ "smlal2 v6.4s, v28.8h, v23.8h\n"
+ "ldr d28, [x24, x2]\n"
+ "smlal2 v25.4s, v7.8h, v23.8h\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v16.4h, v4.4h\n"
+ "smlal2 v0.4s, v16.8h, v4.8h\n"
+ "ldr d16, [x23, x2]\n"
+ "ssubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "ldr d23, [x22, x2]\n"
+ "ldr x9, [x4, #0xd0]\n"
+ "smlal v27.4s, v20.4h, v4.4h\n"
+ "smlal v1.4s, v18.4h, v4.4h\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x28, [x4, #0xd8]\n"
+ "smlal2 v6.4s, v20.8h, v4.8h\n"
+ "smlal2 v25.4s, v18.8h, v4.8h\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x27, [x4, #0xe0]\n"
+ "smlal v8.4s, v20.4h, v3.4h\n"
+ "smlal2 v0.4s, v20.8h, v3.8h\n"
+ "ldr d20, [x21, x2]\n"
+ "ssubl v23.8h, v23.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v4.4h\n"
+ "smlal2 v30.4s, v17.8h, v4.8h\n"
+ "ldr d4, [x5, #0x60]\n"
+ "ldr x26, [x4, #0xe8]\n"
+ "smlal v27.4s, v19.4h, v3.4h\n"
+ "smlal v1.4s, v17.4h, v3.4h\n"
+ "ldr x25, [x4, #0xf0]\n"
+ "ldr x24, [x4, #0xf8]\n"
+ "smlal2 v6.4s, v19.8h, v3.8h\n"
+ "smlal2 v25.4s, v17.8h, v3.8h\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x23, [x4, #0x100]\n"
+ "smlal v8.4s, v19.4h, v22.4h\n"
+ "smlal2 v0.4s, v19.8h, v22.8h\n"
+ "ldr d19, [x20, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v3.4h\n"
+ "smlal2 v30.4s, v26.8h, v3.8h\n"
+ "ldr d3, [x5, #0x68]\n"
+ "ldr x22, [x4, #0x108]\n"
+ "smlal v27.4s, v12.4h, v22.4h\n"
+ "smlal v1.4s, v26.4h, v22.4h\n"
+ "ldr x21, [x4, #0x110]\n"
+ "ldr x20, [x4, #0x118]\n"
+ "smlal2 v6.4s, v12.8h, v22.8h\n"
+ "smlal2 v25.4s, v26.8h, v22.8h\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
"tst x1, #0x7\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "ldr d6, [x13, x3]\n"
- "smlal2 v22.4s, v10.8h, v31.8h\n"
- "ssubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v31.8h\n"
- "ldr d31, [x6, #0x78]\n"
- "smlal v20.4s, v29.4h, v8.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v9.4h, v8.4h\n"
- "smlal v23.4s, v30.4h, v8.4h\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "ldr d28, [x12, x3]\n"
- "smlal v7.4s, v14.4h, v3.4h\n"
- "ssubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v5.4s, v29.8h, v8.8h\n"
- "ldr d29, [x6, #0x80]\n"
- "smlal2 v22.4s, v9.8h, v8.8h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal2 v19.4s, v30.8h, v8.8h\n"
- "ldr d8, [x11, x3]\n"
- "smlal v20.4s, v14.4h, v21.4h\n"
- "ssubl v8.8h, v8.8b, v18.8b\n"
- "smlal v24.4s, v12.4h, v21.4h\n"
- "smlal v23.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v14.8h, v3.8h\n"
- "smlal v7.4s, v25.4h, v27.4h\n"
- "smlal2 v5.4s, v14.8h, v21.8h\n"
- "ldr d14, [x10, x3]\n"
- "smlal2 v22.4s, v12.8h, v21.8h\n"
- "ssubl v14.8h, v14.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v21.8h\n"
- "ldr d21, [x6, #0x88]\n"
- "smlal v20.4s, v25.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v16.4h, v3.4h\n"
- "smlal v23.4s, v4.4h, v3.4h\n"
- "smlal2 v15.4s, v25.8h, v27.8h\n"
- "smlal v7.4s, v10.4h, v1.4h\n"
- "smlal2 v5.4s, v25.8h, v3.8h\n"
- "ldr d25, [x9, x3]\n"
- "smlal2 v22.4s, v16.8h, v3.8h\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v3.8h\n"
- "ldr d3, [x6, #0x90]\n"
- "smlal v20.4s, v10.4h, v27.4h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal v24.4s, v4.4h, v27.4h\n"
- "smlal v23.4s, v17.4h, v27.4h\n"
- "smlal2 v15.4s, v10.8h, v1.8h\n"
- "smlal v7.4s, v9.4h, v2.4h\n"
- "smlal2 v5.4s, v10.8h, v27.8h\n"
- "ldr d10, [x28, x3]\n"
- "smlal2 v22.4s, v4.8h, v27.8h\n"
- "ssubl v10.8h, v10.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v27.8h\n"
- "ldr d27, [x6, #0x98]\n"
- "smlal v20.4s, v9.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v17.4h, v1.4h\n"
- "smlal v23.4s, v6.4h, v1.4h\n"
- "smlal2 v15.4s, v9.8h, v2.8h\n"
- "smlal v7.4s, v12.4h, v31.4h\n"
- "smlal2 v5.4s, v9.8h, v1.8h\n"
- "ldr d9, [x27, x3]\n"
- "smlal2 v22.4s, v17.8h, v1.8h\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v1.8h\n"
- "ldr d1, [x6, #0xa0]\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v2.4h\n"
- "smlal v23.4s, v28.4h, v2.4h\n"
- "smlal2 v15.4s, v12.8h, v31.8h\n"
- "ldr d12, [x26, x3]\n"
- "smlal v7.4s, v16.4h, v29.4h\n"
- "ssubl v12.8h, v12.8b, v18.8b\n"
- "smlal2 v5.4s, v30.8h, v2.8h\n"
- "ldr d30, [x6, #0xa8]\n"
- "smlal2 v22.4s, v6.8h, v2.8h\n"
- "ssubl v30.8h, v30.8b, v13.8b\n"
- "smlal2 v19.4s, v28.8h, v2.8h\n"
- "ldr d2, [x25, x3]\n"
- "smlal v20.4s, v16.4h, v31.4h\n"
- "ssubl v2.8h, v2.8b, v18.8b\n"
- "smlal v24.4s, v8.4h, v31.4h\n"
- "smlal v23.4s, v14.4h, v31.4h\n"
- "smlal2 v15.4s, v16.8h, v29.8h\n"
- "smlal v7.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v16.8h, v31.8h\n"
- "ldr d16, [x24, x3]\n"
- "smlal2 v22.4s, v8.8h, v31.8h\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v31.8h\n"
- "ldr d31, [x6, #0xb0]\n"
- "smlal v20.4s, v4.4h, v29.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v29.4h\n"
- "smlal v23.4s, v25.4h, v29.4h\n"
- "smlal2 v15.4s, v4.8h, v21.8h\n"
- "smlal v7.4s, v17.4h, v3.4h\n"
- "smlal2 v5.4s, v4.8h, v29.8h\n"
- "ldr d4, [x23, x3]\n"
- "smlal2 v22.4s, v14.8h, v29.8h\n"
- "ssubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v29.8h\n"
- "ldr d29, [x6, #0xb8]\n"
- "smlal v20.4s, v17.4h, v21.4h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v15.4s, v17.8h, v3.8h\n"
- "smlal v7.4s, v6.4h, v27.4h\n"
- "smlal2 v5.4s, v17.8h, v21.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "ldr d21, [x6, #0xc0]\n"
- "smlal v20.4s, v6.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v3.4h\n"
- "smlal v23.4s, v9.4h, v3.4h\n"
- "smlal2 v15.4s, v6.8h, v27.8h\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "smlal2 v5.4s, v6.8h, v3.8h\n"
- "ldr d6, [x21, x3]\n"
- "smlal2 v22.4s, v10.8h, v3.8h\n"
- "ssubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v3.8h\n"
- "ldr d3, [x20, x3]\n"
- "smlal v20.4s, v28.4h, v27.4h\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "smlal v24.4s, v9.4h, v27.4h\n"
- "smlal v23.4s, v12.4h, v27.4h\n"
- "add x3, x3, #0x8\n"
- "smlal2 v15.4s, v8.8h, v1.8h\n"
- "ldr q8, [x7, #0x0]\n"
- "smlal v7.4s, v14.4h, v30.4h\n"
- "smlal2 v5.4s, v28.8h, v27.8h\n"
- "ldr q28, [x8, #0x0]\n"
- "smlal2 v22.4s, v9.8h, v27.8h\n"
- "smlal2 v19.4s, v12.8h, v27.8h\n"
- "ldr q27, [x7, #0x10]\n"
- "smlal v20.4s, v14.4h, v1.4h\n"
+ "smlal v8.4s, v12.4h, v2.4h\n"
+ "smlal2 v0.4s, v12.8h, v2.8h\n"
+ "ldr d12, [x14, x2]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v22.4h\n"
+ "smlal2 v30.4s, v11.8h, v22.8h\n"
+ "ldr d22, [x5, #0x70]\n"
+ "smlal v27.4s, v7.4h, v2.4h\n"
+ "smlal v1.4s, v11.4h, v2.4h\n"
+ "smlal2 v6.4s, v7.8h, v2.8h\n"
+ "smlal2 v25.4s, v11.8h, v2.8h\n"
+ "ssubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v24.4h\n"
+ "smlal2 v0.4s, v7.8h, v24.8h\n"
+ "ldr d7, [x13, x2]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v2.4h\n"
+ "smlal2 v30.4s, v28.8h, v2.8h\n"
+ "ldr d2, [x5, #0x78]\n"
+ "smlal v27.4s, v29.4h, v24.4h\n"
+ "smlal v1.4s, v28.4h, v24.4h\n"
+ "smlal2 v6.4s, v29.8h, v24.8h\n"
+ "ldr d29, [x12, x2]\n"
+ "smlal2 v25.4s, v28.8h, v24.8h\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v18.4h, v31.4h\n"
+ "smlal2 v0.4s, v18.8h, v31.8h\n"
+ "ldr d18, [x5, #0x80]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v24.4h\n"
+ "smlal2 v30.4s, v16.8h, v24.8h\n"
+ "ldr d24, [x11, x2]\n"
+ "smlal v27.4s, v17.4h, v31.4h\n"
+ "smlal v1.4s, v23.4h, v31.4h\n"
+ "ssubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v6.4s, v17.8h, v31.8h\n"
+ "smlal2 v25.4s, v23.8h, v31.8h\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v8.4s, v17.4h, v21.4h\n"
+ "smlal2 v0.4s, v17.8h, v21.8h\n"
+ "ldr d17, [x10, x2]\n"
+ "ssubl v24.8h, v24.8b, v15.8b\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x88]\n"
+ "smlal v27.4s, v26.4h, v21.4h\n"
+ "smlal v1.4s, v20.4h, v21.4h\n"
+ "smlal2 v6.4s, v26.8h, v21.8h\n"
+ "smlal2 v25.4s, v20.8h, v21.8h\n"
+ "ssubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v8.4s, v26.4h, v4.4h\n"
+ "smlal2 v0.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x9, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v21.4h\n"
+ "smlal2 v30.4s, v19.8h, v21.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "smlal v27.4s, v11.4h, v4.4h\n"
+ "smlal v1.4s, v19.4h, v4.4h\n"
+ "smlal2 v6.4s, v11.8h, v4.8h\n"
+ "smlal2 v25.4s, v19.8h, v4.8h\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v11.4h, v3.4h\n"
+ "smlal2 v0.4s, v11.8h, v3.8h\n"
+ "ldr d11, [x28, x2]\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v4.4h\n"
+ "smlal2 v30.4s, v12.8h, v4.8h\n"
+ "ldr d4, [x5, #0x98]\n"
+ "smlal v27.4s, v28.4h, v3.4h\n"
+ "smlal v1.4s, v12.4h, v3.4h\n"
+ "smlal2 v6.4s, v28.8h, v3.8h\n"
+ "smlal2 v25.4s, v12.8h, v3.8h\n"
+ "ssubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v8.4s, v28.4h, v22.4h\n"
+ "smlal2 v0.4s, v28.8h, v22.8h\n"
+ "ldr d28, [x27, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v3.4h\n"
+ "smlal2 v30.4s, v7.8h, v3.8h\n"
+ "ldr d3, [x5, #0xa0]\n"
+ "smlal v27.4s, v16.4h, v22.4h\n"
+ "smlal v1.4s, v7.4h, v22.4h\n"
+ "smlal2 v6.4s, v16.8h, v22.8h\n"
+ "ldr d16, [x26, x2]\n"
+ "smlal2 v25.4s, v7.8h, v22.8h\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "smlal v8.4s, v23.4h, v2.4h\n"
+ "smlal2 v0.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x5, #0xa8]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v22.4h\n"
+ "smlal2 v30.4s, v29.8h, v22.8h\n"
+ "ldr d22, [x25, x2]\n"
+ "smlal v27.4s, v20.4h, v2.4h\n"
+ "smlal v1.4s, v24.4h, v2.4h\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "smlal2 v6.4s, v20.8h, v2.8h\n"
+ "smlal2 v25.4s, v24.8h, v2.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "ldr d20, [x24, x2]\n"
+ "ssubl v22.8h, v22.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v2.4h\n"
+ "smlal2 v30.4s, v17.8h, v2.8h\n"
+ "ldr d2, [x5, #0xb0]\n"
+ "smlal v27.4s, v19.4h, v18.4h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v6.4s, v19.8h, v18.8h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "smlal v8.4s, v19.4h, v31.4h\n"
+ "smlal2 v0.4s, v19.8h, v31.8h\n"
+ "ldr d19, [x23, x2]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v18.4h\n"
+ "smlal2 v30.4s, v26.8h, v18.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "smlal v27.4s, v12.4h, v31.4h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v6.4s, v12.8h, v31.8h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v21.4h\n"
+ "smlal2 v0.4s, v12.8h, v21.8h\n"
+ "ldr d12, [x22, x2]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v31.4h\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d31, [x5, #0xc0]\n"
+ "smlal v27.4s, v7.4h, v21.4h\n"
+ "smlal v1.4s, v11.4h, v21.4h\n"
+ "smlal2 v6.4s, v7.8h, v21.8h\n"
+ "smlal2 v25.4s, v11.8h, v21.8h\n"
+ "ssubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v4.4h\n"
+ "smlal2 v0.4s, v7.8h, v4.8h\n"
+ "ldr d7, [x21, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v21.4h\n"
+ "smlal2 v30.4s, v28.8h, v21.8h\n"
+ "ldr d21, [x20, x2]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v27.4s, v29.4h, v4.4h\n"
+ "smlal v1.4s, v28.4h, v4.4h\n"
+ "smlal2 v6.4s, v29.8h, v4.8h\n"
+ "ldr q29, [x6, #0x0]\n"
+ "smlal2 v25.4s, v28.8h, v4.8h\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v3.4h\n"
+ "smlal2 v0.4s, v24.8h, v3.8h\n"
+ "ldr q24, [x7, #0x0]\n"
+ "ssubl v21.8h, v21.8b, v15.8b\n"
+ "smlal v5.4s, v16.4h, v4.4h\n"
+ "smlal2 v30.4s, v16.8h, v4.8h\n"
+ "ldr q4, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v27.4s, v17.4h, v3.4h\n"
+ "smlal v1.4s, v22.4h, v3.4h\n"
+ "smlal2 v6.4s, v17.8h, v3.8h\n"
+ "smlal2 v25.4s, v22.8h, v3.8h\n"
+ "ldr q22, [x7, #0x10]\n"
"add x7, x7, #0x20\n"
- "smlal v24.4s, v2.4h, v1.4h\n"
- "smlal v23.4s, v16.4h, v1.4h\n"
- "smlal2 v15.4s, v14.8h, v30.8h\n"
- "smlal v7.4s, v25.4h, v31.4h\n"
- "smlal2 v5.4s, v14.8h, v1.8h\n"
- "ldr q14, [x8, #0x10]\n"
- "smlal2 v22.4s, v2.8h, v1.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v19.4s, v16.8h, v1.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal v24.4s, v16.4h, v30.4h\n"
- "smlal v23.4s, v4.4h, v30.4h\n"
- "smlal2 v15.4s, v25.8h, v31.8h\n"
- "smlal v7.4s, v10.4h, v29.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal2 v22.4s, v16.8h, v30.8h\n"
- "smlal2 v19.4s, v4.8h, v30.8h\n"
- "smlal v20.4s, v10.4h, v31.4h\n"
- "smlal v24.4s, v4.4h, v31.4h\n"
- "smlal v23.4s, v17.4h, v31.4h\n"
- "smlal2 v15.4s, v10.8h, v29.8h\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "sqrdmulh v7.4s, v7.4s, v8.4s\n"
- "smlal2 v5.4s, v10.8h, v31.8h\n"
- "smlal2 v22.4s, v4.8h, v31.8h\n"
- "and v4.16b, v7.16b, v28.16b\n"
- "smlal2 v19.4s, v17.8h, v31.8h\n"
- "smlal v20.4s, v9.4h, v29.4h\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "smlal v24.4s, v17.4h, v29.4h\n"
- "smlal v23.4s, v6.4h, v29.4h\n"
- "sqadd v7.4s, v7.4s, v4.4s\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal2 v5.4s, v9.8h, v29.8h\n"
- "sqrdmulh v15.4s, v15.4s, v27.4s\n"
- "smlal2 v22.4s, v17.8h, v29.8h\n"
- "smlal2 v19.4s, v6.8h, v29.8h\n"
- "and v30.16b, v15.16b, v14.16b\n"
- "smlal v20.4s, v12.4h, v21.4h\n"
- "smlal v24.4s, v6.4h, v21.4h\n"
- "sqrdmulh v20.4s, v20.4s, v8.4s\n"
- "smlal v23.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v12.8h, v21.8h\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- "smlal2 v22.4s, v6.8h, v21.8h\n"
- "smlal2 v19.4s, v3.8h, v21.8h\n"
- "sqrdmulh v23.4s, v23.4s, v8.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v3.16b, v20.16b, v28.16b\n"
- "sqrdmulh v5.4s, v5.4s, v27.4s\n"
- "and v25.16b, v24.16b, v28.16b\n"
- "sqrdmulh v22.4s, v22.4s, v27.4s\n"
- "and v16.16b, v23.16b, v28.16b\n"
- "sqrdmulh v19.4s, v19.4s, v27.4s\n"
- "sqadd v15.4s, v15.4s, v30.4s\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "and v4.16b, v5.16b, v14.16b\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "and v10.16b, v22.16b, v14.16b\n"
+ "smlal v8.4s, v17.4h, v23.4h\n"
+ "smlal2 v0.4s, v17.8h, v23.8h\n"
+ "smlal v5.4s, v20.4h, v3.4h\n"
+ "smlal2 v30.4s, v20.8h, v3.8h\n"
+ "smlal v27.4s, v26.4h, v23.4h\n"
+ "smlal v1.4s, v20.4h, v23.4h\n"
+ "smlal2 v6.4s, v26.8h, v23.8h\n"
+ "smlal2 v25.4s, v20.8h, v23.8h\n"
+ "smlal v8.4s, v26.4h, v2.4h\n"
+ "smlal2 v0.4s, v26.8h, v2.8h\n"
+ "smlal v5.4s, v19.4h, v23.4h\n"
+ "smlal2 v30.4s, v19.8h, v23.8h\n"
+ "smlal v27.4s, v11.4h, v2.4h\n"
+ "smlal v1.4s, v19.4h, v2.4h\n"
+ "smlal2 v6.4s, v11.8h, v2.8h\n"
+ "smlal2 v25.4s, v19.8h, v2.8h\n"
+ "smlal v8.4s, v11.4h, v18.4h\n"
+ "smlal2 v0.4s, v11.8h, v18.8h\n"
+ "smlal v5.4s, v12.4h, v2.4h\n"
+ "smlal2 v30.4s, v12.8h, v2.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal v1.4s, v12.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal2 v25.4s, v12.8h, v18.8h\n"
+ "smlal v8.4s, v28.4h, v31.4h\n"
+ "smlal2 v0.4s, v28.8h, v31.8h\n"
+ "smlal v5.4s, v7.4h, v18.4h\n"
+ "smlal2 v30.4s, v7.8h, v18.8h\n"
+ "smlal v27.4s, v16.4h, v31.4h\n"
+ "smlal v1.4s, v7.4h, v31.4h\n"
+ "smlal2 v6.4s, v16.8h, v31.8h\n"
+ "smlal2 v25.4s, v7.8h, v31.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v29.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v4.4s\n"
+ "smlal v5.4s, v21.4h, v31.4h\n"
+ "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "and v17.16b, v8.16b, v24.16b\n"
+ "sqrdmulh v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v29.4s\n"
+ "and v28.16b, v0.16b, v22.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v4.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v4.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v5.4s, v5.4s, v29.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v16.16b, v27.16b, v24.16b\n"
+ "and v12.16b, v1.16b, v24.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v4.4s\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v11.16b, v5.16b, v24.16b\n"
+ "sqadd v0.4s, v0.4s, v28.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v12.16b, v19.16b, v14.16b\n"
- "sqadd v20.4s, v20.4s, v3.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v25.4s\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v16.4s\n"
+ "and v18.16b, v6.16b, v22.16b\n"
"sshr v12.4s, v12.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v28.4s\n"
- "srshl v20.4s, v20.4s, v28.4s\n"
- "sqadd v5.4s, v5.4s, v4.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v28.4s\n"
- "sqadd v19.4s, v19.4s, v12.4s\n"
- "srshl v15.4s, v15.4s, v14.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v14.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v14.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v14.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str d7, [x17, x4]\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d20, [x16, x4]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x15, x4]\n"
- "str d23, [x14, x4]\n"
- "add x4, x4, #0x8\n"
+ "and v17.16b, v25.16b, v22.16b\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "and v19.16b, v30.16b, v22.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v12.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v5.4s, v5.4s, v11.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v24.4s\n"
+ "srshl v27.4s, v27.4s, v24.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v24.4s\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "srshl v5.4s, v5.4s, v24.4s\n"
+ "sqadd v30.4s, v30.4s, v19.4s\n"
+ "srshl v0.4s, v0.4s, v22.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v22.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v22.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "str d8, [x8, x3]\n"
+ "str d27, [x17, x3]\n"
+ "str d1, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "add x3, x3, #0x8\n"
"beq 124f\n"
- "add x6, x6, #0xc8\n"
+ "add x5, x5, #0xc8\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
"tbz x1, #2, 5f\n"
- "ld1 { v7.4s }, [x20], #0x10\n"
+ "ld1 { v8.4s }, [x20], #0x10\n"
"tbz x1, #1, 4f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v0.d }[0], [x20], #0x8\n"
"tbz x1, #0, 7f\n"
- "ld1 { v15.s }[2], [x20]\n"
+ "ld1 { v0.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
"tbz x1, #0, 7f\n"
- "ld1 { v15.s }[0], [x20]\n"
+ "ld1 { v0.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
"tbz x1, #1, 6f\n"
- "ld1 { v7.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x20], #0x8\n"
"tbz x1, #0, 7f\n"
- "ld1 { v7.s }[2], [x20]\n"
+ "ld1 { v8.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 7f\n"
- "ld1 { v7.s }[0], [x20]\n"
+ "ld1 { v8.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldr d12, [x6, #0x20]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "ssubl v6.8h, v6.8b, v13.8b\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "ssubl v10.8h, v10.8b, v13.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "add x9, x9, x3\n"
- "add x28, x28, x3\n"
- "add x27, x27, x3\n"
- "add x26, x26, x3\n"
- "add x25, x25, x3\n"
- "add x24, x24, x3\n"
- "add x23, x23, x3\n"
- "add x22, x22, x3\n"
- "add x21, x21, x3\n"
- "add x20, x20, x3\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "ldr d23, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "add x9, x9, x2\n"
+ "add x28, x28, x2\n"
+ "add x27, x27, x2\n"
+ "add x26, x26, x2\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "add x25, x25, x2\n"
+ "add x24, x24, x2\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "add x23, x23, x2\n"
+ "add x22, x22, x2\n"
+ "add x21, x21, x2\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 9f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "ld1 { v17.s }[0], [x28], #0x4\n"
- "ld1 { v30.s }[0], [x27], #0x4\n"
- "ld1 { v16.s }[0], [x26], #0x4\n"
- "ld1 { v3.s }[0], [x25], #0x4\n"
- "ld1 { v4.s }[0], [x24], #0x4\n"
- "ld1 { v25.s }[0], [x23], #0x4\n"
- "ld1 { v9.s }[0], [x22], #0x4\n"
+ "ld1 { v24.s }[0], [x9], #0x4\n"
+ "ld1 { v21.s }[0], [x28], #0x4\n"
+ "ld1 { v16.s }[0], [x27], #0x4\n"
+ "ld1 { v20.s }[0], [x26], #0x4\n"
+ "ld1 { v7.s }[0], [x25], #0x4\n"
+ "ld1 { v19.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v26.s }[0], [x22], #0x4\n"
"ld1 { v29.s }[0], [x21], #0x4\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
"tbz x1, #1, 8f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "ld1 { v17.h }[2], [x28], #0x2\n"
- "ld1 { v30.h }[2], [x27], #0x2\n"
- "ld1 { v16.h }[2], [x26], #0x2\n"
- "ld1 { v3.h }[2], [x25], #0x2\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v25.h }[2], [x23], #0x2\n"
- "ld1 { v9.h }[2], [x22], #0x2\n"
+ "ld1 { v24.h }[2], [x9], #0x2\n"
+ "ld1 { v21.h }[2], [x28], #0x2\n"
+ "ld1 { v16.h }[2], [x27], #0x2\n"
+ "ld1 { v20.h }[2], [x26], #0x2\n"
+ "ld1 { v7.h }[2], [x25], #0x2\n"
+ "ld1 { v19.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v26.h }[2], [x22], #0x2\n"
"ld1 { v29.h }[2], [x21], #0x2\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[6], [x9]\n"
- "ld1 { v17.b }[6], [x28]\n"
- "ld1 { v30.b }[6], [x27]\n"
- "ld1 { v16.b }[6], [x26]\n"
- "ld1 { v3.b }[6], [x25]\n"
- "ld1 { v4.b }[6], [x24]\n"
- "ld1 { v25.b }[6], [x23]\n"
- "ld1 { v9.b }[6], [x22]\n"
+ "ld1 { v24.b }[6], [x9]\n"
+ "ld1 { v21.b }[6], [x28]\n"
+ "ld1 { v16.b }[6], [x27]\n"
+ "ld1 { v20.b }[6], [x26]\n"
+ "ld1 { v7.b }[6], [x25]\n"
+ "ld1 { v19.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v26.b }[6], [x22]\n"
"ld1 { v29.b }[6], [x21]\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[4], [x9]\n"
- "ld1 { v17.b }[4], [x28]\n"
- "ld1 { v30.b }[4], [x27]\n"
- "ld1 { v16.b }[4], [x26]\n"
- "ld1 { v3.b }[4], [x25]\n"
- "ld1 { v4.b }[4], [x24]\n"
- "ld1 { v25.b }[4], [x23]\n"
- "ld1 { v9.b }[4], [x22]\n"
+ "ld1 { v24.b }[4], [x9]\n"
+ "ld1 { v21.b }[4], [x28]\n"
+ "ld1 { v16.b }[4], [x27]\n"
+ "ld1 { v20.b }[4], [x26]\n"
+ "ld1 { v7.b }[4], [x25]\n"
+ "ld1 { v19.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v26.b }[4], [x22]\n"
"ld1 { v29.b }[4], [x21]\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
"tbz x1, #1, 10f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "ld1 { v17.h }[0], [x28], #0x2\n"
- "ld1 { v30.h }[0], [x27], #0x2\n"
- "ld1 { v16.h }[0], [x26], #0x2\n"
- "ld1 { v3.h }[0], [x25], #0x2\n"
- "ld1 { v4.h }[0], [x24], #0x2\n"
- "ld1 { v25.h }[0], [x23], #0x2\n"
- "ld1 { v9.h }[0], [x22], #0x2\n"
+ "ld1 { v24.h }[0], [x9], #0x2\n"
+ "ld1 { v21.h }[0], [x28], #0x2\n"
+ "ld1 { v16.h }[0], [x27], #0x2\n"
+ "ld1 { v20.h }[0], [x26], #0x2\n"
+ "ld1 { v7.h }[0], [x25], #0x2\n"
+ "ld1 { v19.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v26.h }[0], [x22], #0x2\n"
"ld1 { v29.h }[0], [x21], #0x2\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[2], [x9]\n"
- "ld1 { v17.b }[2], [x28]\n"
- "ld1 { v30.b }[2], [x27]\n"
- "ld1 { v16.b }[2], [x26]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x24]\n"
- "ld1 { v25.b }[2], [x23]\n"
- "ld1 { v9.b }[2], [x22]\n"
+ "ld1 { v24.b }[2], [x9]\n"
+ "ld1 { v21.b }[2], [x28]\n"
+ "ld1 { v16.b }[2], [x27]\n"
+ "ld1 { v20.b }[2], [x26]\n"
+ "ld1 { v7.b }[2], [x25]\n"
+ "ld1 { v19.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v26.b }[2], [x22]\n"
"ld1 { v29.b }[2], [x21]\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[0], [x9]\n"
- "ld1 { v17.b }[0], [x28]\n"
- "ld1 { v30.b }[0], [x27]\n"
- "ld1 { v16.b }[0], [x26]\n"
- "ld1 { v3.b }[0], [x25]\n"
- "ld1 { v4.b }[0], [x24]\n"
- "ld1 { v25.b }[0], [x23]\n"
- "ld1 { v9.b }[0], [x22]\n"
+ "ld1 { v24.b }[0], [x9]\n"
+ "ld1 { v21.b }[0], [x28]\n"
+ "ld1 { v16.b }[0], [x27]\n"
+ "ld1 { v20.b }[0], [x26]\n"
+ "ld1 { v7.b }[0], [x25]\n"
+ "ld1 { v19.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v26.b }[0], [x22]\n"
"ld1 { v29.b }[0], [x21]\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "ssubl v31.8h, v31.8b, v18.8b\n"
- "ssubl v17.8h, v17.8b, v18.8b\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "ldr x20, [x5, #0x50]\n"
- "ssubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "add x20, x20, x3\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "ssubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "ssubl v29.8h, v29.8b, v18.8b\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "ssubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
+ "ssubl v24.8h, v24.8b, v15.8b\n"
+ "ssubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0x50]\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "add x20, x20, x2\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "ssubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "ssubl v18.8h, v18.8b, v15.8b\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 13f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v4.s }[0], [x20], #0x4\n"
"tbz x1, #1, 12f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v4.h }[2], [x20], #0x2\n"
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v4.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v4.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
"tbz x1, #1, 14f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v4.h }[0], [x20], #0x2\n"
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v4.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v4.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "ssubl v27.8h, v27.8b, v18.8b\n"
- "ldr x20, [x5, #0x58]\n"
- "smlal v23.4s, v27.4h, v10.4h\n"
- "smlal2 v19.4s, v27.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "smlal v24.4s, v27.4h, v21.4h\n"
- "smlal2 v22.4s, v27.8h, v21.8h\n"
+ "ssubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal v5.4s, v4.4h, v17.4h\n"
+ "smlal2 v30.4s, v4.8h, v17.8h\n"
+ "smlal v1.4s, v4.4h, v11.4h\n"
+ "smlal2 v25.4s, v4.8h, v11.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 17f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
"tbz x1, #1, 16f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[6], [x20]\n"
+ "ld1 { v21.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[4], [x20]\n"
+ "ld1 { v21.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
"tbz x1, #1, 18f\n"
- "ld1 { v6.h }[0], [x20], #0x2\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "ssubl v6.8h, v6.8b, v18.8b\n"
- "ldr x20, [x5, #0x60]\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "add x20, x20, x3\n"
+ "ssubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0x60]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "smlal v5.4s, v21.4h, v11.4h\n"
+ "smlal2 v30.4s, v21.8h, v11.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 21f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
"tbz x1, #1, 20f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
"tbz x1, #1, 22f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v31.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d14, [x6, #0x28]\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "smlal v20.4s, v9.4h, v12.4h\n"
- "smlal2 v5.4s, v9.8h, v12.8h\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v30.4h, v14.4h\n"
- "smlal2 v15.4s, v30.8h, v14.8h\n"
- "smlal v20.4s, v16.4h, v14.4h\n"
- "smlal2 v5.4s, v16.8h, v14.8h\n"
- "smlal v24.4s, v28.4h, v14.4h\n"
- "smlal2 v22.4s, v28.8h, v14.8h\n"
+ "ldr d11, [x5, #0x28]\n"
+ "ssubl v31.8h, v31.8b, v15.8b\n"
+ "smlal v1.4s, v21.4h, v23.4h\n"
+ "smlal2 v25.4s, v21.8h, v23.8h\n"
+ "ldr x20, [x4, #0x68]\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "smlal v27.4s, v31.4h, v23.4h\n"
+ "smlal2 v6.4s, v31.8h, v23.8h\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v16.4h, v11.4h\n"
+ "smlal2 v0.4s, v16.8h, v11.8h\n"
+ "smlal v1.4s, v18.4h, v11.4h\n"
+ "smlal2 v25.4s, v18.8h, v11.8h\n"
+ "smlal v27.4s, v20.4h, v11.4h\n"
+ "smlal2 v6.4s, v20.8h, v11.8h\n"
"tbz x1, #2, 25f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v24.s }[0], [x20], #0x4\n"
"tbz x1, #1, 24f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v24.h }[2], [x20], #0x2\n"
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v24.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v24.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
"tbz x1, #1, 26f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v24.h }[0], [x20], #0x2\n"
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v24.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v24.b }[0], [x20]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d21, [x6, #0x30]\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0x70]\n"
- "smlal v23.4s, v25.4h, v14.4h\n"
- "smlal2 v19.4s, v25.8h, v14.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v16.8h, v21.8h\n"
- "smlal v20.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v4.8h, v21.8h\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
+ "ldr d3, [x5, #0x30]\n"
+ "ssubl v24.8h, v24.8b, v15.8b\n"
+ "ldr x20, [x4, #0x70]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v24.4h, v11.4h\n"
+ "smlal2 v30.4s, v24.8h, v11.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v3.4h\n"
+ "smlal2 v0.4s, v20.8h, v3.8h\n"
+ "smlal v27.4s, v19.4h, v3.4h\n"
+ "smlal2 v6.4s, v19.8h, v3.8h\n"
+ "smlal v1.4s, v24.4h, v3.4h\n"
+ "smlal2 v25.4s, v24.8h, v3.8h\n"
"tbz x1, #2, 29f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v2.s }[0], [x20], #0x4\n"
"tbz x1, #1, 28f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ld1 { v2.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "ld1 { v2.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
"tbz x1, #1, 30f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v2.h }[0], [x20], #0x2\n"
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "ld1 { v2.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "ld1 { v2.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d9, [x6, #0x38]\n"
- "ssubl v10.8h, v10.8b, v18.8b\n"
- "ssubl v9.8h, v9.8b, v13.8b\n"
- "ldr x20, [x5, #0x78]\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v4.4h, v9.4h\n"
- "smlal2 v15.4s, v4.8h, v9.8h\n"
- "smlal v20.4s, v27.4h, v9.4h\n"
- "smlal2 v5.4s, v27.8h, v9.8h\n"
- "smlal v24.4s, v10.4h, v9.4h\n"
- "smlal2 v22.4s, v10.8h, v9.8h\n"
+ "ldr d22, [x5, #0x38]\n"
+ "ssubl v2.8h, v2.8b, v15.8b\n"
+ "ldr x20, [x4, #0x78]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v2.4h, v3.4h\n"
+ "smlal2 v30.4s, v2.8h, v3.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v19.4h, v22.4h\n"
+ "smlal2 v0.4s, v19.8h, v22.8h\n"
+ "smlal v27.4s, v4.4h, v22.4h\n"
+ "smlal2 v6.4s, v4.8h, v22.8h\n"
+ "smlal v1.4s, v2.4h, v22.4h\n"
+ "smlal2 v25.4s, v2.8h, v22.8h\n"
"tbz x1, #2, 33f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
"tbz x1, #1, 32f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[6], [x20]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[4], [x20]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (2, 3): Bit 2: Unset
"tbz x1, #1, 34f\n"
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[2], [x20]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[0], [x20]\n"
+ "ld1 { v26.b }[0], [x20]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d31, [x6, #0x40]\n"
- "ssubl v12.8h, v12.8b, v18.8b\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "ldr x20, [x5, #0x80]\n"
- "smlal v23.4s, v12.4h, v9.4h\n"
- "smlal2 v19.4s, v12.8h, v9.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v27.4h, v31.4h\n"
- "smlal2 v15.4s, v27.8h, v31.8h\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "smlal v24.4s, v12.4h, v31.4h\n"
- "smlal2 v22.4s, v12.8h, v31.8h\n"
+ "ldr d31, [x5, #0x40]\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "ldr x20, [x4, #0x80]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v22.4h\n"
+ "smlal2 v30.4s, v26.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v4.4h, v31.4h\n"
+ "smlal2 v0.4s, v4.8h, v31.8h\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
"tbz x1, #2, 37f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
"tbz x1, #1, 36f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "ld1 { v28.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "ld1 { v28.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
"tbz x1, #1, 38f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "ld1 { v28.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "ld1 { v28.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d16, [x6, #0x48]\n"
- "ssubl v8.8h, v8.8b, v18.8b\n"
- "ssubl v16.8h, v16.8b, v13.8b\n"
- "ldr x20, [x5, #0x88]\n"
- "smlal v23.4s, v8.4h, v31.4h\n"
- "smlal2 v19.4s, v8.8h, v31.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v6.4h, v16.4h\n"
- "smlal2 v15.4s, v6.8h, v16.8h\n"
- "smlal v20.4s, v29.4h, v16.4h\n"
- "smlal2 v5.4s, v29.8h, v16.8h\n"
- "smlal v24.4s, v8.4h, v16.4h\n"
- "smlal2 v22.4s, v8.8h, v16.8h\n"
+ "ldr d17, [x5, #0x48]\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x20, [x4, #0x88]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v31.4h\n"
+ "smlal2 v30.4s, v28.8h, v31.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v21.4h, v17.4h\n"
+ "smlal2 v0.4s, v21.8h, v17.8h\n"
+ "smlal v27.4s, v29.4h, v17.4h\n"
+ "smlal2 v6.4s, v29.8h, v17.8h\n"
+ "smlal v1.4s, v28.4h, v17.4h\n"
+ "smlal2 v25.4s, v28.8h, v17.8h\n"
"tbz x1, #2, 41f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
"tbz x1, #1, 40f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v7.h }[2], [x20], #0x2\n"
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v7.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v7.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
"tbz x1, #1, 42f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v7.h }[0], [x20], #0x2\n"
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v7.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v7.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d21, [x6, #0x50]\n"
- "ssubl v27.8h, v27.8b, v18.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0x90]\n"
- "smlal v23.4s, v27.4h, v16.4h\n"
- "smlal2 v19.4s, v27.8h, v16.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "smlal v20.4s, v25.4h, v21.4h\n"
- "smlal2 v5.4s, v25.8h, v21.8h\n"
+ "ldr d22, [x5, #0x50]\n"
+ "ssubl v7.8h, v7.8b, v15.8b\n"
+ "ldr x20, [x4, #0x90]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v17.4h\n"
+ "smlal2 v30.4s, v7.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v18.4h, v22.4h\n"
+ "smlal2 v0.4s, v18.8h, v22.8h\n"
+ "smlal v27.4s, v24.4h, v22.4h\n"
+ "smlal2 v6.4s, v24.8h, v22.8h\n"
"tbz x1, #2, 45f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
"tbz x1, #1, 44f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
"tbz x1, #1, 46f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
- "ssubl v31.8h, v31.8b, v18.8b\n"
- "ldr x20, [x5, #0x98]\n"
- "smlal v24.4s, v31.4h, v21.4h\n"
- "smlal2 v22.4s, v31.8h, v21.8h\n"
- "add x20, x20, x3\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x20, [x4, #0x98]\n"
+ "smlal v1.4s, v20.4h, v22.4h\n"
+ "smlal2 v25.4s, v20.8h, v22.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 49f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
"tbz x1, #1, 48f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
"tbz x1, #1, 50f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d2, [x6, #0x58]\n"
- "ssubl v28.8h, v28.8b, v18.8b\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr x20, [x5, #0xa0]\n"
- "smlal v23.4s, v28.4h, v21.4h\n"
- "smlal2 v19.4s, v28.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v25.4h, v2.4h\n"
- "smlal2 v15.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v10.4h, v2.4h\n"
- "smlal2 v5.4s, v10.8h, v2.8h\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal2 v22.4s, v28.8h, v2.8h\n"
+ "ldr d17, [x5, #0x58]\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "ldr x20, [x4, #0xa0]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v22.4h\n"
+ "smlal2 v30.4s, v19.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v24.4h, v17.4h\n"
+ "smlal2 v0.4s, v24.8h, v17.8h\n"
+ "smlal v27.4s, v2.4h, v17.4h\n"
+ "smlal2 v6.4s, v2.8h, v17.8h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 53f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
"tbz x1, #1, 52f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
"tbz x1, #1, 54f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v29.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d25, [x6, #0x60]\n"
- "ssubl v21.8h, v21.8b, v18.8b\n"
- "ssubl v25.8h, v25.8b, v13.8b\n"
- "ldr x20, [x5, #0xa8]\n"
- "smlal v23.4s, v21.4h, v2.4h\n"
- "smlal2 v19.4s, v21.8h, v2.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v10.4h, v25.4h\n"
- "smlal2 v15.4s, v10.8h, v25.8h\n"
- "smlal v20.4s, v12.4h, v25.4h\n"
- "smlal2 v5.4s, v12.8h, v25.8h\n"
- "smlal v24.4s, v21.4h, v25.4h\n"
- "smlal2 v22.4s, v21.8h, v25.8h\n"
+ "ldr d24, [x5, #0x60]\n"
+ "ssubl v29.8h, v29.8b, v15.8b\n"
+ "ldr x20, [x4, #0xa8]\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v17.4h\n"
+ "smlal2 v30.4s, v29.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v2.4h, v24.4h\n"
+ "smlal2 v0.4s, v2.8h, v24.8h\n"
+ "smlal v27.4s, v26.4h, v24.4h\n"
+ "smlal2 v6.4s, v26.8h, v24.8h\n"
+ "smlal v1.4s, v29.4h, v24.4h\n"
+ "smlal2 v25.4s, v29.8h, v24.8h\n"
"tbz x1, #2, 57f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
"tbz x1, #1, 56f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (3, 3): Bit 2: Unset
"tbz x1, #1, 58f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v31.b }[0], [x20]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d1, [x6, #0x68]\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr x20, [x5, #0xb0]\n"
- "smlal v23.4s, v9.4h, v25.4h\n"
- "smlal2 v19.4s, v9.8h, v25.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v12.4h, v1.4h\n"
- "smlal2 v15.4s, v12.8h, v1.8h\n"
- "smlal v20.4s, v8.4h, v1.4h\n"
- "smlal2 v5.4s, v8.8h, v1.8h\n"
- "smlal v24.4s, v9.4h, v1.4h\n"
- "smlal2 v22.4s, v9.8h, v1.8h\n"
+ "ldr d17, [x5, #0x68]\n"
+ "ssubl v31.8h, v31.8b, v15.8b\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v31.4h, v24.4h\n"
+ "smlal2 v30.4s, v31.8h, v24.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v26.4h, v17.4h\n"
+ "smlal2 v0.4s, v26.8h, v17.8h\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "smlal v1.4s, v31.4h, v17.4h\n"
+ "smlal2 v25.4s, v31.8h, v17.8h\n"
"tbz x1, #2, 61f\n"
- "ld1 { v3.s }[0], [x20], #0x4\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
"tbz x1, #1, 60f\n"
- "ld1 { v3.h }[2], [x20], #0x2\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[6], [x20]\n"
+ "ld1 { v21.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[4], [x20]\n"
+ "ld1 { v21.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (3, 4): Bit 2: Unset
"tbz x1, #1, 62f\n"
- "ld1 { v3.h }[0], [x20], #0x2\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x20]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d16, [x6, #0x70]\n"
- "ssubl v3.8h, v3.8b, v18.8b\n"
- "ssubl v16.8h, v16.8b, v13.8b\n"
- "ldr x20, [x5, #0xb8]\n"
- "smlal v23.4s, v3.4h, v1.4h\n"
- "smlal2 v19.4s, v3.8h, v1.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "smlal2 v15.4s, v8.8h, v16.8h\n"
- "smlal v20.4s, v27.4h, v16.4h\n"
- "smlal2 v5.4s, v27.8h, v16.8h\n"
- "smlal v24.4s, v3.4h, v16.4h\n"
- "smlal2 v22.4s, v3.8h, v16.8h\n"
+ "ldr d22, [x5, #0x70]\n"
+ "ssubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0xb8]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v21.4h, v17.4h\n"
+ "smlal2 v30.4s, v21.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v28.4h, v22.4h\n"
+ "smlal2 v0.4s, v28.8h, v22.8h\n"
+ "smlal v27.4s, v7.4h, v22.4h\n"
+ "smlal2 v6.4s, v7.8h, v22.8h\n"
+ "smlal v1.4s, v21.4h, v22.4h\n"
+ "smlal2 v25.4s, v21.8h, v22.8h\n"
"tbz x1, #2, 65f\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x20], #0x4\n"
"tbz x1, #1, 64f\n"
- "ld1 { v14.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x20], #0x2\n"
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[6], [x20]\n"
+ "ld1 { v11.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[4], [x20]\n"
+ "ld1 { v11.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
"tbz x1, #1, 66f\n"
- "ld1 { v14.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x20], #0x2\n"
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[2], [x20]\n"
+ "ld1 { v11.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[0], [x20]\n"
+ "ld1 { v11.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d17, [x6, #0x78]\n"
- "ssubl v14.8h, v14.8b, v18.8b\n"
- "ssubl v17.8h, v17.8b, v13.8b\n"
- "ldr x20, [x5, #0xc0]\n"
- "smlal v23.4s, v14.4h, v16.4h\n"
- "smlal2 v19.4s, v14.8h, v16.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v31.4h, v17.4h\n"
- "smlal2 v15.4s, v31.8h, v17.8h\n"
- "smlal v20.4s, v28.4h, v17.4h\n"
- "smlal2 v5.4s, v28.8h, v17.8h\n"
+ "ldr d17, [x5, #0x78]\n"
+ "ssubl v11.8h, v11.8b, v15.8b\n"
+ "ldr x20, [x4, #0xc0]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v22.4h\n"
+ "smlal2 v30.4s, v11.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v17.4h\n"
+ "smlal2 v0.4s, v20.8h, v17.8h\n"
+ "smlal v27.4s, v19.4h, v17.4h\n"
+ "smlal2 v6.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 69f\n"
- "ld1 { v1.s }[0], [x20], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
"tbz x1, #1, 68f\n"
- "ld1 { v1.h }[2], [x20], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[6], [x20]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[4], [x20]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
"tbz x1, #1, 70f\n"
- "ld1 { v1.h }[0], [x20], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
- "ssubl v1.8h, v1.8b, v18.8b\n"
- "ldr x20, [x5, #0xc8]\n"
- "smlal v24.4s, v1.4h, v17.4h\n"
- "smlal2 v22.4s, v1.8h, v17.8h\n"
- "add x20, x20, x3\n"
+ "ssubl v18.8h, v18.8b, v15.8b\n"
+ "ldr x20, [x4, #0xc8]\n"
+ "smlal v1.4s, v18.4h, v17.4h\n"
+ "smlal2 v25.4s, v18.8h, v17.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 73f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
"tbz x1, #1, 72f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
"tbz x1, #1, 74f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d29, [x6, #0x80]\n"
- "ssubl v16.8h, v16.8b, v18.8b\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "ldr x20, [x5, #0xd0]\n"
- "smlal v23.4s, v16.4h, v17.4h\n"
- "smlal2 v19.4s, v16.8h, v17.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v28.4h, v29.4h\n"
- "smlal2 v15.4s, v28.8h, v29.8h\n"
- "smlal v20.4s, v21.4h, v29.4h\n"
- "smlal2 v5.4s, v21.8h, v29.8h\n"
- "smlal v24.4s, v16.4h, v29.4h\n"
- "smlal2 v22.4s, v16.8h, v29.8h\n"
+ "ldr d4, [x5, #0x80]\n"
+ "ssubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x20, [x4, #0xd0]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v20.4h, v17.4h\n"
+ "smlal2 v30.4s, v20.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v19.4h, v4.4h\n"
+ "smlal2 v0.4s, v19.8h, v4.8h\n"
+ "smlal v27.4s, v29.4h, v4.4h\n"
+ "smlal2 v6.4s, v29.8h, v4.8h\n"
+ "smlal v1.4s, v20.4h, v4.4h\n"
+ "smlal2 v25.4s, v20.8h, v4.8h\n"
"tbz x1, #2, 77f\n"
- "ld1 { v30.s }[0], [x20], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
"tbz x1, #1, 76f\n"
- "ld1 { v30.h }[2], [x20], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[6], [x20]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[4], [x20]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
"tbz x1, #1, 78f\n"
- "ld1 { v30.h }[0], [x20], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[2], [x20]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[0], [x20]\n"
+ "ld1 { v26.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d12, [x6, #0x88]\n"
- "ssubl v30.8h, v30.8b, v18.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "ldr x20, [x5, #0xd8]\n"
- "smlal v23.4s, v30.4h, v29.4h\n"
- "smlal2 v19.4s, v30.8h, v29.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v21.4h, v12.4h\n"
- "smlal2 v15.4s, v21.8h, v12.8h\n"
- "smlal v20.4s, v9.4h, v12.4h\n"
- "smlal2 v5.4s, v9.8h, v12.8h\n"
- "smlal v24.4s, v30.4h, v12.4h\n"
- "smlal2 v22.4s, v30.8h, v12.8h\n"
+ "ldr d17, [x5, #0x88]\n"
+ "ssubl v26.8h, v26.8b, v15.8b\n"
+ "ldr x20, [x4, #0xd8]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v4.4h\n"
+ "smlal2 v30.4s, v26.8h, v4.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v29.4h, v17.4h\n"
+ "smlal2 v0.4s, v29.8h, v17.8h\n"
+ "smlal v27.4s, v31.4h, v17.4h\n"
+ "smlal2 v6.4s, v31.8h, v17.8h\n"
+ "smlal v1.4s, v26.4h, v17.4h\n"
+ "smlal2 v25.4s, v26.8h, v17.8h\n"
"tbz x1, #2, 81f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
+ "ld1 { v23.s }[0], [x20], #0x4\n"
"tbz x1, #1, 80f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
+ "ld1 { v23.h }[2], [x20], #0x2\n"
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "ld1 { v23.b }[6], [x20]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "ld1 { v23.b }[4], [x20]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
"tbz x1, #1, 82f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
+ "ld1 { v23.h }[0], [x20], #0x2\n"
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "ld1 { v23.b }[2], [x20]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "ld1 { v23.b }[0], [x20]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d21, [x6, #0x90]\n"
- "ssubl v29.8h, v29.8b, v18.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0xe0]\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal v20.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v3.8h, v21.8h\n"
- "smlal v24.4s, v29.4h, v21.4h\n"
- "smlal2 v22.4s, v29.8h, v21.8h\n"
+ "ldr d22, [x5, #0x90]\n"
+ "ssubl v23.8h, v23.8b, v15.8b\n"
+ "ldr x20, [x4, #0xe0]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v23.4h, v17.4h\n"
+ "smlal2 v30.4s, v23.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v31.4h, v22.4h\n"
+ "smlal2 v0.4s, v31.8h, v22.8h\n"
+ "smlal v27.4s, v21.4h, v22.4h\n"
+ "smlal2 v6.4s, v21.8h, v22.8h\n"
+ "smlal v1.4s, v23.4h, v22.4h\n"
+ "smlal2 v25.4s, v23.8h, v22.8h\n"
"tbz x1, #2, 85f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
"tbz x1, #1, 84f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v28.b }[6], [x20]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v28.b }[4], [x20]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
"tbz x1, #1, 86f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v28.b }[2], [x20]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v28.b }[0], [x20]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d8, [x6, #0x98]\n"
- "ssubl v25.8h, v25.8b, v18.8b\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x20, [x5, #0xe8]\n"
- "smlal v23.4s, v25.4h, v21.4h\n"
- "smlal2 v19.4s, v25.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v3.4h, v8.4h\n"
- "smlal2 v15.4s, v3.8h, v8.8h\n"
- "smlal v20.4s, v14.4h, v8.4h\n"
- "smlal2 v5.4s, v14.8h, v8.8h\n"
- "smlal v24.4s, v25.4h, v8.4h\n"
- "smlal2 v22.4s, v25.8h, v8.8h\n"
+ "ldr d17, [x5, #0x98]\n"
+ "ssubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x20, [x4, #0xe8]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v22.4h\n"
+ "smlal2 v30.4s, v28.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v21.4h, v17.4h\n"
+ "smlal2 v0.4s, v21.8h, v17.8h\n"
+ "smlal v27.4s, v11.4h, v17.4h\n"
+ "smlal2 v6.4s, v11.8h, v17.8h\n"
+ "smlal v1.4s, v28.4h, v17.4h\n"
+ "smlal2 v25.4s, v28.8h, v17.8h\n"
"tbz x1, #2, 89f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
"tbz x1, #1, 88f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
"tbz x1, #1, 90f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v16.b }[0], [x20]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d9, [x6, #0xa0]\n"
- "ssubl v21.8h, v21.8b, v18.8b\n"
- "ssubl v9.8h, v9.8b, v13.8b\n"
- "ldr x20, [x5, #0xf0]\n"
- "smlal v23.4s, v21.4h, v8.4h\n"
- "smlal2 v19.4s, v21.8h, v8.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v1.4h, v9.4h\n"
- "smlal2 v15.4s, v1.8h, v9.8h\n"
- "smlal v20.4s, v16.4h, v9.4h\n"
- "smlal2 v5.4s, v16.8h, v9.8h\n"
+ "ldr d3, [x5, #0xa0]\n"
+ "ssubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x20, [x4, #0xf0]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v17.4h\n"
+ "smlal2 v30.4s, v16.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v18.4h, v3.4h\n"
+ "smlal2 v0.4s, v18.8h, v3.8h\n"
+ "smlal v27.4s, v20.4h, v3.4h\n"
+ "smlal2 v6.4s, v20.8h, v3.8h\n"
"tbz x1, #2, 93f\n"
"ld1 { v12.s }[0], [x20], #0x4\n"
"tbz x1, #1, 92f\n"
@@ -1871,308 +1871,308 @@ void a64_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
"tbz x1, #0, 95f\n"
"ld1 { v12.b }[0], [x20]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
- "ssubl v12.8h, v12.8b, v18.8b\n"
- "ldr x20, [x5, #0xf8]\n"
- "smlal v24.4s, v12.4h, v9.4h\n"
- "smlal2 v22.4s, v12.8h, v9.8h\n"
- "add x20, x20, x3\n"
+ "ssubl v12.8h, v12.8b, v15.8b\n"
+ "ldr x20, [x4, #0xf8]\n"
+ "smlal v1.4s, v12.4h, v3.4h\n"
+ "smlal2 v25.4s, v12.8h, v3.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 97f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 96f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
"tbz x1, #1, 98f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d12, [x6, #0xa8]\n"
- "ssubl v10.8h, v10.8b, v18.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "ldr x20, [x5, #0x100]\n"
- "smlal v23.4s, v10.4h, v9.4h\n"
- "smlal2 v19.4s, v10.8h, v9.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v16.4h, v12.4h\n"
- "smlal2 v15.4s, v16.8h, v12.8h\n"
- "smlal v20.4s, v30.4h, v12.4h\n"
- "smlal2 v5.4s, v30.8h, v12.8h\n"
- "smlal v24.4s, v10.4h, v12.4h\n"
- "smlal2 v22.4s, v10.8h, v12.8h\n"
+ "ldr d18, [x5, #0xa8]\n"
+ "ssubl v17.8h, v17.8b, v15.8b\n"
+ "ldr x20, [x4, #0x100]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v17.4h, v3.4h\n"
+ "smlal2 v30.4s, v17.8h, v3.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "smlal v27.4s, v26.4h, v18.4h\n"
+ "smlal2 v6.4s, v26.8h, v18.8h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
"tbz x1, #2, 101f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
"tbz x1, #1, 100f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
"tbz x1, #1, 102f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d28, [x6, #0xb0]\n"
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "ssubl v28.8h, v28.8b, v13.8b\n"
- "ldr x20, [x5, #0x108]\n"
- "smlal v23.4s, v9.4h, v12.4h\n"
- "smlal2 v19.4s, v9.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v30.4h, v28.4h\n"
- "smlal2 v15.4s, v30.8h, v28.8h\n"
- "smlal v20.4s, v29.4h, v28.4h\n"
- "smlal2 v5.4s, v29.8h, v28.8h\n"
- "smlal v24.4s, v9.4h, v28.4h\n"
- "smlal2 v22.4s, v9.8h, v28.8h\n"
+ "ldr d12, [x5, #0xb0]\n"
+ "ssubl v19.8h, v19.8b, v15.8b\n"
+ "ldr x20, [x4, #0x108]\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v18.4h\n"
+ "smlal2 v30.4s, v19.8h, v18.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v26.4h, v12.4h\n"
+ "smlal2 v0.4s, v26.8h, v12.8h\n"
+ "smlal v27.4s, v23.4h, v12.4h\n"
+ "smlal2 v6.4s, v23.8h, v12.8h\n"
+ "smlal v1.4s, v19.4h, v12.4h\n"
+ "smlal2 v25.4s, v19.8h, v12.8h\n"
"tbz x1, #2, 105f\n"
- "ld1 { v2.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 104f\n"
- "ld1 { v2.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
"tbz x1, #1, 106f\n"
- "ld1 { v2.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d30, [x6, #0xb8]\n"
- "ssubl v2.8h, v2.8b, v18.8b\n"
- "ssubl v30.8h, v30.8b, v13.8b\n"
- "ldr x20, [x5, #0x110]\n"
- "smlal v23.4s, v2.4h, v28.4h\n"
- "smlal2 v19.4s, v2.8h, v28.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v29.4h, v30.4h\n"
- "smlal2 v15.4s, v29.8h, v30.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal v24.4s, v2.4h, v30.4h\n"
- "smlal2 v22.4s, v2.8h, v30.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "ssubl v17.8h, v17.8b, v15.8b\n"
+ "ldr x20, [x4, #0x110]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v17.4h, v12.4h\n"
+ "smlal2 v30.4s, v17.8h, v12.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v23.4h, v18.4h\n"
+ "smlal2 v0.4s, v23.8h, v18.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
"tbz x1, #2, 109f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v3.s }[0], [x20], #0x4\n"
"tbz x1, #1, 108f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v3.h }[2], [x20], #0x2\n"
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v3.b }[6], [x20]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v3.b }[4], [x20]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
"tbz x1, #1, 110f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v3.h }[0], [x20], #0x2\n"
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v3.b }[0], [x20]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d8, [x6, #0xc0]\n"
- "ssubl v27.8h, v27.8b, v18.8b\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x20, [x5, #0x118]\n"
- "smlal v23.4s, v27.4h, v30.4h\n"
- "smlal2 v19.4s, v27.8h, v30.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v25.4h, v8.4h\n"
- "smlal2 v15.4s, v25.8h, v8.8h\n"
- "smlal v20.4s, v21.4h, v8.4h\n"
- "smlal2 v5.4s, v21.8h, v8.8h\n"
- "smlal v24.4s, v27.4h, v8.4h\n"
- "smlal2 v22.4s, v27.8h, v8.8h\n"
+ "ldr d26, [x5, #0xc0]\n"
+ "ssubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x20, [x4, #0x118]\n"
+ "ssubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v5.4s, v3.4h, v18.4h\n"
+ "smlal2 v30.4s, v3.8h, v18.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v28.4h, v26.4h\n"
+ "smlal2 v0.4s, v28.8h, v26.8h\n"
+ "smlal v27.4s, v16.4h, v26.4h\n"
+ "smlal2 v6.4s, v16.8h, v26.8h\n"
+ "smlal v1.4s, v3.4h, v26.4h\n"
+ "smlal2 v25.4s, v3.8h, v26.8h\n"
"tbz x1, #2, 113f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 112f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
"tbz x1, #1, 114f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
- "ssubl v9.8h, v9.8b, v18.8b\n"
- "smlal v23.4s, v9.4h, v8.4h\n"
- "smlal2 v19.4s, v9.8h, v8.8h\n"
+ "ssubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v26.4h\n"
+ "smlal2 v30.4s, v17.8h, v26.8h\n"
"tbz x1, #2, 117f\n"
- "ld1 { v30.4s }, [x7], #0x10\n"
- "ld1 { v12.4s }, [x8], #0x10\n"
+ "ld1 { v9.4s }, [x6], #0x10\n"
+ "ld1 { v20.4s }, [x7], #0x10\n"
"tbz x1, #1, 116f\n"
- "ld1 { v14.d }[0], [x7], #0x8\n"
- "ld1 { v27.d }[0], [x8], #0x8\n"
+ "ld1 { v18.d }[0], [x6], #0x8\n"
+ "ld1 { v3.d }[0], [x7], #0x8\n"
"tbz x1, #0, 119f\n"
- "ld1 { v14.s }[2], [x7]\n"
- "ld1 { v27.s }[2], [x8]\n"
+ "ld1 { v18.s }[2], [x6]\n"
+ "ld1 { v3.s }[2], [x7]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
"tbz x1, #0, 119f\n"
- "ld1 { v14.s }[0], [x7]\n"
- "ld1 { v27.s }[0], [x8]\n"
+ "ld1 { v18.s }[0], [x6]\n"
+ "ld1 { v3.s }[0], [x7]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
"tbz x1, #1, 118f\n"
- "ld1 { v30.d }[0], [x7], #0x8\n"
- "ld1 { v12.d }[0], [x8], #0x8\n"
+ "ld1 { v9.d }[0], [x6], #0x8\n"
+ "ld1 { v20.d }[0], [x7], #0x8\n"
"tbz x1, #0, 119f\n"
- "ld1 { v30.s }[2], [x7]\n"
- "ld1 { v12.s }[2], [x8]\n"
+ "ld1 { v9.s }[2], [x6]\n"
+ "ld1 { v20.s }[2], [x7]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 119f\n"
- "ld1 { v30.s }[0], [x7]\n"
- "ld1 { v12.s }[0], [x8]\n"
+ "ld1 { v9.s }[0], [x6]\n"
+ "ld1 { v20.s }[0], [x7]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v7.4s, v7.4s, v30.4s\n"
- "and v16.16b, v7.16b, v12.16b\n"
- "add x17, x17, x4\n"
- "add x16, x16, x4\n"
- "sqrdmulh v15.4s, v15.4s, v14.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "add x15, x15, x4\n"
- "add x14, x14, x4\n"
- "and v2.16b, v15.16b, v27.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "sqrdmulh v24.4s, v24.4s, v30.4s\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "sqadd v7.4s, v7.4s, v16.4s\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "and v21.16b, v20.16b, v12.16b\n"
- "sqrdmulh v5.4s, v5.4s, v14.4s\n"
- "and v18.16b, v24.16b, v12.16b\n"
- "sqrdmulh v22.4s, v22.4s, v14.4s\n"
- "and v31.16b, v23.16b, v12.16b\n"
- "sqrdmulh v19.4s, v19.4s, v14.4s\n"
- "sqadd v15.4s, v15.4s, v2.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v9.16b, v5.16b, v27.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v4.16b, v22.16b, v27.16b\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v28.16b, v19.16b, v27.16b\n"
- "sqadd v20.4s, v20.4s, v21.4s\n"
+ "sqrdmulh v8.4s, v8.4s, v9.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v18.4s\n"
+ "add x8, x8, x3\n"
+ "add x17, x17, x3\n"
+ "sqrdmulh v27.4s, v27.4s, v9.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v9.4s\n"
+ "add x16, x16, x3\n"
+ "add x15, x15, x3\n"
+ "sqrdmulh v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v18.4s\n"
+ "and v17.16b, v8.16b, v20.16b\n"
+ "and v23.16b, v0.16b, v3.16b\n"
+ "and v9.16b, v27.16b, v20.16b\n"
+ "and v26.16b, v1.16b, v20.16b\n"
+ "sqrdmulh v25.4s, v25.4s, v18.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v18.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v18.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v31.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v12.4s\n"
- "srshl v20.4s, v20.4s, v12.4s\n"
- "sqadd v5.4s, v5.4s, v9.4s\n"
- "srshl v24.4s, v24.4s, v12.4s\n"
- "sqadd v22.4s, v22.4s, v4.4s\n"
- "srshl v23.4s, v23.4s, v12.4s\n"
- "sqadd v19.4s, v19.4s, v28.4s\n"
- "srshl v15.4s, v15.4s, v27.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v27.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v27.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v27.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "and v24.16b, v6.16b, v3.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v18.16b, v25.16b, v3.16b\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v17.16b, v5.16b, v20.16b\n"
+ "sqadd v0.4s, v0.4s, v23.4s\n"
+ "and v16.16b, v30.16b, v3.16b\n"
+ "sqadd v27.4s, v27.4s, v9.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v26.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v20.4s\n"
+ "srshl v27.4s, v27.4s, v20.4s\n"
+ "sqadd v5.4s, v5.4s, v17.4s\n"
+ "sqadd v6.4s, v6.4s, v24.4s\n"
+ "srshl v1.4s, v1.4s, v20.4s\n"
+ "sqadd v25.4s, v25.4s, v18.4s\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "srshl v0.4s, v0.4s, v3.4s\n"
+ "srshl v5.4s, v5.4s, v20.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v3.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v3.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v3.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
"tbz x1, #2, 121f\n"
- "st1 { v7.s }[0], [x17], #0x4\n"
- "st1 { v20.s }[0], [x16], #0x4\n"
- "st1 { v24.s }[0], [x15], #0x4\n"
- "st1 { v23.s }[0], [x14], #0x4\n"
+ "st1 { v8.s }[0], [x8], #0x4\n"
+ "st1 { v27.s }[0], [x17], #0x4\n"
+ "st1 { v1.s }[0], [x16], #0x4\n"
+ "st1 { v5.s }[0], [x15], #0x4\n"
"tbz x1, #1, 120f\n"
- "st1 { v7.h }[2], [x17], #0x2\n"
- "st1 { v20.h }[2], [x16], #0x2\n"
- "st1 { v24.h }[2], [x15], #0x2\n"
- "st1 { v23.h }[2], [x14], #0x2\n"
+ "st1 { v8.h }[2], [x8], #0x2\n"
+ "st1 { v27.h }[2], [x17], #0x2\n"
+ "st1 { v1.h }[2], [x16], #0x2\n"
+ "st1 { v5.h }[2], [x15], #0x2\n"
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[6], [x17], #0x1\n"
- "st1 { v20.b }[6], [x16], #0x1\n"
- "st1 { v24.b }[6], [x15], #0x1\n"
- "st1 { v23.b }[6], [x14], #0x1\n"
+ "st1 { v8.b }[6], [x8], #0x1\n"
+ "st1 { v27.b }[6], [x17], #0x1\n"
+ "st1 { v1.b }[6], [x16], #0x1\n"
+ "st1 { v5.b }[6], [x15], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[4], [x17], #0x1\n"
- "st1 { v20.b }[4], [x16], #0x1\n"
- "st1 { v24.b }[4], [x15], #0x1\n"
- "st1 { v23.b }[4], [x14], #0x1\n"
+ "st1 { v8.b }[4], [x8], #0x1\n"
+ "st1 { v27.b }[4], [x17], #0x1\n"
+ "st1 { v1.b }[4], [x16], #0x1\n"
+ "st1 { v5.b }[4], [x15], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
"tbz x1, #1, 122f\n"
- "st1 { v7.h }[0], [x17], #0x2\n"
- "st1 { v20.h }[0], [x16], #0x2\n"
- "st1 { v24.h }[0], [x15], #0x2\n"
- "st1 { v23.h }[0], [x14], #0x2\n"
+ "st1 { v8.h }[0], [x8], #0x2\n"
+ "st1 { v27.h }[0], [x17], #0x2\n"
+ "st1 { v1.h }[0], [x16], #0x2\n"
+ "st1 { v5.h }[0], [x15], #0x2\n"
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[2], [x17], #0x1\n"
- "st1 { v20.b }[2], [x16], #0x1\n"
- "st1 { v24.b }[2], [x15], #0x1\n"
- "st1 { v23.b }[2], [x14], #0x1\n"
+ "st1 { v8.b }[2], [x8], #0x1\n"
+ "st1 { v27.b }[2], [x17], #0x1\n"
+ "st1 { v1.b }[2], [x16], #0x1\n"
+ "st1 { v5.b }[2], [x15], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[0], [x17], #0x1\n"
- "st1 { v20.b }[0], [x16], #0x1\n"
- "st1 { v24.b }[0], [x15], #0x1\n"
- "st1 { v23.b }[0], [x14], #0x1\n"
+ "st1 { v8.b }[0], [x8], #0x1\n"
+ "st1 { v27.b }[0], [x17], #0x1\n"
+ "st1 { v1.b }[0], [x16], #0x1\n"
+ "st1 { v5.b }[0], [x15], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
index 77b7d231e0..dbdcedccf3 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,21 +45,21 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
"lsr x9, %x[n_channels], #0x2\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
"ld1r { v8.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v7.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v7.4s }, [x21]\n"
"ld1r { v6.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v5.16b }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v5.16b }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
"mov x11, #0x0\n"
+ "ld1r { v1.4s }, [x20]\n"
"cbz x9, 6f\n"
"1:" // Channel loop
"movi v23.4s, #0x0\n"
@@ -68,75 +68,75 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldr q23, [%x[bias], x20]\n"
"2:" // Channel loop: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
- "mov x25, %x[inptrs]\n"
- "ldp x21, x20, [x25], #0x10\n"
- "subs x24, %x[n_points], #0x1\n"
- "ldr s14, [x21, x11]\n"
- "ldr s15, [x20, x11]\n"
+ "mov x23, %x[inptrs]\n"
+ "subs x22, %x[n_points], #0x1\n"
"mov v24.16b, v23.16b\n"
"mov v25.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s16, [x21, x11]\n"
"mov v26.16b, v23.16b\n"
"mov v27.16b, v23.16b\n"
- "ldr s17, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"mov v28.16b, v23.16b\n"
+ "ldp x21, x20, [x23], #0x10\n"
"mov v29.16b, v23.16b\n"
- "ldr s18, [x21, x11]\n"
- "ldr s19, [x20, x11]\n"
"mov v30.16b, v23.16b\n"
"mov v31.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s20, [x21, x11]\n"
"ssubl v0.8h, v0.8b, v5.8b\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"ssubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"ssubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"ssubl v17.8h, v17.8b, v6.8b\n"
"ssubl v18.8h, v18.8b, v6.8b\n"
"ssubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"ssubl v20.8h, v20.8b, v6.8b\n"
"ssubl v21.8h, v21.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"ssubl v22.8h, v22.8b, v6.8b\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x23, x22, [x25], #0x10\n"
- "ldp x21, x20, [x25], #0x10\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldr s14, [x23, x11]\n"
- "ldr s15, [x22, x11]\n"
+ "subs x22, x22, #0x1\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
- "ldr s16, [x21, x11]\n"
- "ldr s17, [x20, x11]\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s18, [x21, x11]\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
- "ldr s19, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"smlal v31.4s, v22.4h, v0.4h\n"
- "subs x24, x24, #0x1\n"
"ldr s0, [%x[params]], #0x4\n"
- "ldr s20, [x21, x11]\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
"ssubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"ssubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ssubl v0.8h, v0.8b, v5.8b\n"
"ssubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
"ssubl v17.8h, v17.8b, v6.8b\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"ssubl v18.8h, v18.8b, v6.8b\n"
"ssubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"ssubl v20.8h, v20.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"ssubl v21.8h, v21.8b, v6.8b\n"
"ssubl v22.8h, v22.8b, v6.8b\n"
"bgt 3b\n"
@@ -162,27 +162,27 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"sshl v27.4s, v27.4s, v3.4s\n"
"sshl v28.4s, v28.4s, v3.4s\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
"sshl v29.4s, v29.4s, v3.4s\n"
"sshl v30.4s, v30.4s, v3.4s\n"
"sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
+ "and v16.16b, v25.16b, v1.16b\n"
"sqrdmulh v26.4s, v26.4s, v2.4s\n"
"sqrdmulh v27.4s, v27.4s, v2.4s\n"
"sqrdmulh v28.4s, v28.4s, v2.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"
@@ -254,17 +254,17 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s23, [x28, x11]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s24, [x27, x11]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s25, [x26, x11]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x11]\n"
+ "str s23, [x28, x11]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x11]\n"
+ "str s25, [x26, x11]\n"
+ "str s26, [x25, x11]\n"
"str s27, [x24, x11]\n"
"str s28, [x23, x11]\n"
"str s29, [x22, x11]\n"
@@ -290,24 +290,24 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
"9:" // Oddments: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
"mov x10, %x[inptrs]\n"
- "ldp x9, x28, [x10], #0x10\n"
"mov v24.16b, v23.16b\n"
- "ldp x27, x26, [x10], #0x10\n"
- "ldp x25, x24, [x10], #0x10\n"
"mov v25.16b, v23.16b\n"
"mov v26.16b, v23.16b\n"
- "ldp x23, x22, [x10], #0x10\n"
- "ldr x21, [x10], #0x8\n"
"mov v27.16b, v23.16b\n"
"mov v28.16b, v23.16b\n"
"mov v29.16b, v23.16b\n"
+ "ldp x9, x28, [x10], #0x10\n"
"mov v30.16b, v23.16b\n"
- "add x9, x9, x11\n"
- "add x28, x28, x11\n"
"mov v31.16b, v23.16b\n"
"ssubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x27, x26, [x10], #0x10\n"
+ "add x9, x9, x11\n"
+ "add x28, x28, x11\n"
+ "ldp x25, x24, [x10], #0x10\n"
"add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "ldr x21, [x10], #0x8\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
@@ -358,27 +358,27 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
"ble 15f\n"
"12:" // Oddments: Planar loop
"ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldp x25, x24, [x10], #0x10\n"
- "ldp x23, x22, [x10], #0x10\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldr x21, [x10], #0x8\n"
- "add x9, x9, x11\n"
+ "ldp x27, x26, [x10], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
+ "add x9, x9, x11\n"
"add x28, x28, x11\n"
- "add x27, x27, x11\n"
"smlal v31.4s, v22.4h, v0.4h\n"
"ldr s0, [%x[params]], #0x4\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "ssubl v0.8h, v0.8b, v5.8b\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
@@ -465,36 +465,36 @@ void a64_s8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v27.4s, v27.4s, v3.4s\n"
+ "sshl v28.4s, v28.4s, v3.4s\n"
"ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
"add x28, x28, x11\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
"add x27, x27, x11\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v29.4s, v29.4s, v3.4s\n"
"add x26, x26, x11\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"add x25, x25, x11\n"
+ "sshl v30.4s, v30.4s, v3.4s\n"
+ "sshl v31.4s, v31.4s, v3.4s\n"
"add x24, x24, x11\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
"add x23, x23, x11\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
"add x22, x22, x11\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
"add x21, x21, x11\n"
+ "and v16.16b, v25.16b, v1.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v2.4s\n"
"add x20, x20, x11\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v2.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index be8fbfa0e2..ff03a6e340 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,162 +41,162 @@ void a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q11, [%x[params], #0x0]\n"
+ "ldr q14, [%x[params], #0x0]\n"
"ldr q5, [%x[params], #0x10]\n"
- "movi v8.16b, #0x1\n"
- "ushr v8.4s, v8.4s, #0x8\n"
+ "movi v18.16b, #0x1\n"
+ "movi v24.4s, #0x0\n"
"ldr q6, [%x[params], #0x20]\n"
"ldr q7, [%x[params], #0x30]\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "ld1 { v1.16b }, [x20]\n"
- "mov v28.16b, v1.16b\n"
- "mov v23.16b, v1.16b\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1 { v2.16b }, [x20]\n"
- "mov v30.16b, v1.16b\n"
- "mov v21.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "ld1 { v4.16b }, [x20]\n"
- "mov v20.16b, v2.16b\n"
- "mov v29.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "ld1 { v0.16b }, [x20]\n"
- "mov v9.16b, v4.16b\n"
- "mov v22.16b, v4.16b\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "ld1 { v3.16b }, [x20]\n"
- "mov v31.16b, v4.16b\n"
- "ext v28.16b, v28.16b, v28.16b, #0x2\n"
- "ext v23.16b, v23.16b, v23.16b, #0x4\n"
- "ext v30.16b, v30.16b, v30.16b, #0x6\n"
+ "movi v28.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "ldr x25, [%x[inptrs], #0x8]\n"
+ "ldr x24, [%x[inptrs], #0x10]\n"
+ "ushr v18.4s, v18.4s, #0x8\n"
+ "movi v27.4s, #0x0\n"
+ "ldr x23, [%x[inptrs], #0x20]\n"
+ "ldr x22, [%x[inptrs], #0x0]\n"
+ "movi v21.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "movi v13.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
"add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.4s }, [x20]\n"
- "ext v21.16b, v21.16b, v21.16b, #0x2\n"
- "ext v20.16b, v20.16b, v20.16b, #0x4\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
- "ext v29.16b, v29.16b, v29.16b, #0x6\n"
- "ext v9.16b, v9.16b, v9.16b, #0x2\n"
+ "ld1 { v1.16b }, [x25]\n"
+ "ld1 { v2.16b }, [x24]\n"
+ "movi v23.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ld1 { v4.16b }, [x23]\n"
+ "ld1 { v0.16b }, [x22]\n"
+ "movi v20.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "ld1 { v3.16b }, [x21]\n"
+ "ld1r { v19.4s }, [x20]\n"
+ "movi v22.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "mov v31.16b, v1.16b\n"
+ "mov v9.16b, v1.16b\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_c_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "ext v22.16b, v22.16b, v22.16b, #0x4\n"
- "ext v31.16b, v31.16b, v31.16b, #0x6\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v15.4s }, [x20]\n"
- "mov v27.16b, v0.16b\n"
- "mov v19.16b, v0.16b\n"
+ "ld1r { v11.4s }, [x21]\n"
+ "ld1r { v10.4s }, [x20]\n"
+ "mov v16.16b, v1.16b\n"
+ "mov v30.16b, v2.16b\n"
+ "mov v29.16b, v2.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x2\n"
+ "add x10, %x[qp], %[offsetof_Requantize32_maxval]\n"
"cmp %x[n_channels], #0x4\n"
+ "ext v9.16b, v9.16b, v9.16b, #0x4\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
"mov x9, #0x0\n"
- "mov v18.16b, v0.16b\n"
- "mov v26.16b, v3.16b\n"
"mov x28, #0x0\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x2\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x4\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
- "mov v17.16b, v3.16b\n"
- "mov v16.16b, v3.16b\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "neg v19.4s, v19.4s\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
- "ext v27.16b, v27.16b, v27.16b, #0x2\n"
- "ext v19.16b, v19.16b, v19.16b, #0x4\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
"add %x[params], %x[params], #0x40\n"
- "ext v18.16b, v18.16b, v18.16b, #0x6\n"
- "zip1 v1.4s, v1.4s, v23.4s\n"
- "zip1 v28.4s, v28.4s, v30.4s\n"
- "zip1 v2.4s, v2.4s, v20.4s\n"
- "zip1 v21.4s, v21.4s, v29.4s\n"
- "ext v26.16b, v26.16b, v26.16b, #0x2\n"
- "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "zip1 v1.4s, v1.4s, v9.4s\n"
+ "ld1r { v9.4s }, [x10]\n"
+ "zip1 v31.4s, v31.4s, v16.4s\n"
+ "mov v16.16b, v2.16b\n"
+ "zip1 v2.4s, v2.4s, v29.4s\n"
+ "mov v29.16b, v4.16b\n"
"ext v16.16b, v16.16b, v16.16b, #0x6\n"
- "zip1 v4.4s, v4.4s, v22.4s\n"
- "zip1 v9.4s, v9.4s, v31.4s\n"
- "zip1 v0.4s, v0.4s, v19.4s\n"
- "zip1 v27.4s, v27.4s, v18.4s\n"
- "zip1 v1.4s, v1.4s, v28.4s\n"
- "zip1 v2.4s, v2.4s, v21.4s\n"
- ".inst 0x4f81e118 // sdot v24.4s, v8.16b, v1.4b[0]\n"
- "zip1 v3.4s, v3.4s, v17.4s\n"
- "zip1 v26.4s, v26.4s, v16.4s\n"
- ".inst 0x4fa1e119 // sdot v25.4s, v8.16b, v1.4b[1]\n"
- "zip1 v4.4s, v4.4s, v9.4s\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x4f81e917 // sdot v23.4s, v8.16b, v1.4b[2]\n"
- "movi v22.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- ".inst 0x4fa1e916 // sdot v22.4s, v8.16b, v1.4b[3]\n"
- "movi v19.4s, #0x0\n"
- "movi v9.4s, #0x0\n"
- ".inst 0x4f82e115 // sdot v21.4s, v8.16b, v2.4b[0]\n"
- "movi v10.4s, #0x0\n"
- "movi v20.4s, #0x0\n"
- ".inst 0x4fa2e113 // sdot v19.4s, v8.16b, v2.4b[1]\n"
- "movi v18.4s, #0x0\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4f82e909 // sdot v9.4s, v8.16b, v2.4b[2]\n"
- "movi v16.4s, #0x0\n"
- "zip1 v0.4s, v0.4s, v27.4s\n"
- ".inst 0x4fa2e90a // sdot v10.4s, v8.16b, v2.4b[3]\n"
- "zip1 v3.4s, v3.4s, v26.4s\n"
- ".inst 0x4f84e114 // sdot v20.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x4fa4e112 // sdot v18.4s, v8.16b, v4.4b[1]\n"
- ".inst 0x4f84e911 // sdot v17.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x4fa4e910 // sdot v16.4s, v8.16b, v4.4b[3]\n"
- "movi v31.4s, #0x0\n"
+ "zip1 v1.4s, v1.4s, v31.4s\n"
+ "mov v31.16b, v4.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x2\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x4\n"
+ "zip1 v30.4s, v30.4s, v16.4s\n"
+ "mov v16.16b, v4.16b\n"
+ ".inst 0x4f81e258 // sdot v24.4s, v18.16b, v1.4b[0]\n"
+ ".inst 0x4fa1e25c // sdot v28.4s, v18.16b, v1.4b[1]\n"
+ ".inst 0x4f81ea5a // sdot v26.4s, v18.16b, v1.4b[2]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
+ "zip1 v4.4s, v4.4s, v31.4s\n"
+ "mov v31.16b, v0.16b\n"
+ ".inst 0x4fa1ea5b // sdot v27.4s, v18.16b, v1.4b[3]\n"
+ "zip1 v2.4s, v2.4s, v30.4s\n"
+ "mov v30.16b, v0.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x2\n"
+ "zip1 v29.4s, v29.4s, v16.4s\n"
+ "mov v16.16b, v0.16b\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x4\n"
+ ".inst 0x4f82e255 // sdot v21.4s, v18.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e24c // sdot v12.4s, v18.16b, v2.4b[1]\n"
+ ".inst 0x4f82ea4d // sdot v13.4s, v18.16b, v2.4b[2]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
+ "zip1 v4.4s, v4.4s, v29.4s\n"
+ "mov v29.16b, v3.16b\n"
+ ".inst 0x4fa2ea4f // sdot v15.4s, v18.16b, v2.4b[3]\n"
+ "zip1 v0.4s, v0.4s, v30.4s\n"
+ "mov v30.16b, v3.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x2\n"
+ "zip1 v31.4s, v31.4s, v16.4s\n"
+ "mov v16.16b, v3.16b\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x4\n"
+ ".inst 0x4f84e257 // sdot v23.4s, v18.16b, v4.4b[0]\n"
+ ".inst 0x4fa4e248 // sdot v8.4s, v18.16b, v4.4b[1]\n"
+ ".inst 0x4f84ea54 // sdot v20.4s, v18.16b, v4.4b[2]\n"
+ "add v24.4s, v24.4s, v21.4s\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
+ "zip1 v0.4s, v0.4s, v31.4s\n"
+ ".inst 0x4fa4ea51 // sdot v17.4s, v18.16b, v4.4b[3]\n"
+ "zip1 v3.4s, v3.4s, v30.4s\n"
"movi v30.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- ".inst 0x4f80e11f // sdot v31.4s, v8.16b, v0.4b[0]\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- ".inst 0x4fa0e11e // sdot v30.4s, v8.16b, v0.4b[1]\n"
+ "movi v31.4s, #0x0\n"
+ "add v28.4s, v28.4s, v12.4s\n"
+ "zip1 v29.4s, v29.4s, v16.4s\n"
+ "movi v16.4s, #0x0\n"
+ ".inst 0x4f80e256 // sdot v22.4s, v18.16b, v0.4b[0]\n"
+ ".inst 0x4fa0e259 // sdot v25.4s, v18.16b, v0.4b[1]\n"
+ ".inst 0x4f80ea5e // sdot v30.4s, v18.16b, v0.4b[2]\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ ".inst 0x4fa0ea5f // sdot v31.4s, v18.16b, v0.4b[3]\n"
+ "add v27.4s, v27.4s, v15.4s\n"
+ "zip1 v3.4s, v3.4s, v29.4s\n"
"movi v29.4s, #0x0\n"
- ".inst 0x4f80e91a // sdot v26.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x4fa0e91b // sdot v27.4s, v8.16b, v0.4b[3]\n"
- ".inst 0x4f83e11c // sdot v28.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x4fa3e11d // sdot v29.4s, v8.16b, v3.4b[1]\n"
- "add v24.4s, v24.4s, v21.4s\n"
- "add v25.4s, v25.4s, v19.4s\n"
- "add v23.4s, v23.4s, v9.4s\n"
- "add v22.4s, v22.4s, v10.4s\n"
- "add v21.4s, v20.4s, v21.4s\n"
- "movi v20.4s, #0x0\n"
- ".inst 0x4f83e914 // sdot v20.4s, v8.16b, v3.4b[2]\n"
- "add v19.4s, v18.4s, v19.4s\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x4fa3e912 // sdot v18.4s, v8.16b, v3.4b[3]\n"
- "add v17.4s, v17.4s, v9.4s\n"
- "add v16.4s, v16.4s, v10.4s\n"
- "add v24.4s, v24.4s, v31.4s\n"
- "add v25.4s, v25.4s, v30.4s\n"
- "add v26.4s, v23.4s, v26.4s\n"
- "add v27.4s, v22.4s, v27.4s\n"
- "add v28.4s, v21.4s, v28.4s\n"
- "add v29.4s, v19.4s, v29.4s\n"
- "add v30.4s, v17.4s, v20.4s\n"
- "add v31.4s, v16.4s, v18.4s\n"
- "neg v12.4s, v12.4s\n"
- "mul v24.4s, v24.4s, v12.4s\n"
- "mul v25.4s, v25.4s, v12.4s\n"
- "mul v26.4s, v26.4s, v12.4s\n"
- "mul v27.4s, v27.4s, v12.4s\n"
- "mul v28.4s, v28.4s, v12.4s\n"
- "mul v29.4s, v29.4s, v12.4s\n"
- "mul v30.4s, v30.4s, v12.4s\n"
- "mul v31.4s, v31.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v21.4s\n"
+ "movi v21.4s, #0x0\n"
+ "add v12.4s, v8.4s, v12.4s\n"
+ "movi v8.4s, #0x0\n"
+ ".inst 0x4f83e250 // sdot v16.4s, v18.16b, v3.4b[0]\n"
+ ".inst 0x4fa3e25d // sdot v29.4s, v18.16b, v3.4b[1]\n"
+ ".inst 0x4f83ea55 // sdot v21.4s, v18.16b, v3.4b[2]\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ ".inst 0x4fa3ea48 // sdot v8.4s, v18.16b, v3.4b[3]\n"
+ "add v17.4s, v17.4s, v15.4s\n"
+ "add v24.4s, v24.4s, v22.4s\n"
+ "add v25.4s, v28.4s, v25.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v31.4s\n"
+ "add v28.4s, v23.4s, v16.4s\n"
+ "add v29.4s, v12.4s, v29.4s\n"
+ "add v30.4s, v20.4s, v21.4s\n"
+ "add v31.4s, v17.4s, v8.4s\n"
+ "mul v24.4s, v24.4s, v19.4s\n"
+ "mul v25.4s, v25.4s, v19.4s\n"
+ "mul v26.4s, v26.4s, v19.4s\n"
+ "mul v27.4s, v27.4s, v19.4s\n"
+ "mul v28.4s, v28.4s, v19.4s\n"
+ "mul v29.4s, v29.4s, v19.4s\n"
+ "mul v30.4s, v30.4s, v19.4s\n"
+ "mul v31.4s, v31.4s, v19.4s\n"
"zip1 v19.4s, v24.4s, v26.4s\n"
"zip1 v18.4s, v25.4s, v27.4s\n"
+ "add v24.4s, v24.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v14.4s\n"
"zip1 v17.4s, v28.4s, v30.4s\n"
"zip1 v16.4s, v29.4s, v31.4s\n"
"zip1 v22.4s, v19.4s, v18.4s\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v27.4s, v27.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v14.4s\n"
"zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v11.4s\n"
- "add v25.4s, v25.4s, v11.4s\n"
- "add v26.4s, v26.4s, v11.4s\n"
- "add v27.4s, v27.4s, v11.4s\n"
- "add v28.4s, v28.4s, v11.4s\n"
- "add v29.4s, v29.4s, v11.4s\n"
- "add v30.4s, v30.4s, v11.4s\n"
- "add v31.4s, v31.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v14.4s\n"
+ "add v30.4s, v30.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v14.4s\n"
"ble 2f\n"
"1:" // Loop
"ldr q8, [%x[params], #0x0]\n"
@@ -207,96 +207,96 @@ void a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
".inst 0x4f80e8ba // sdot v26.4s, v5.16b, v0.4b[2]\n"
".inst 0x4fa0e8bb // sdot v27.4s, v5.16b, v0.4b[3]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x4f81e0d8 // sdot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x4fa1e0d9 // sdot v25.4s, v6.16b, v1.4b[1]\n"
- "cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x4f81e8da // sdot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x4fa1e8db // sdot v27.4s, v6.16b, v1.4b[3]\n"
".inst 0x4f82e0bc // sdot v28.4s, v5.16b, v2.4b[0]\n"
".inst 0x4fa2e0bd // sdot v29.4s, v5.16b, v2.4b[1]\n"
+ "cmp %x[n_channels], #0x4\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f82e8be // sdot v30.4s, v5.16b, v2.4b[2]\n"
".inst 0x4fa2e8bf // sdot v31.4s, v5.16b, v2.4b[3]\n"
"ldr q5, [%x[params], #0x30]\n"
- ".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x4fa2e0f9 // sdot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- ".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x4fa2e8fb // sdot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ ".inst 0x4f81e0d8 // sdot v24.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4fa1e0d9 // sdot v25.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x4f81e8da // sdot v26.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4fa1e8db // sdot v27.4s, v6.16b, v1.4b[3]\n"
".inst 0x4f83e0dc // sdot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x4fa3e0dd // sdot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v8.4s\n"
".inst 0x4f83e8de // sdot v30.4s, v6.16b, v3.4b[2]\n"
".inst 0x4fa3e8df // sdot v31.4s, v6.16b, v3.4b[3]\n"
"ldr q6, [%x[params], #0x40]\n"
- "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ ".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e0f9 // sdot v25.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4fa2e8fb // sdot v27.4s, v7.16b, v2.4b[3]\n"
".inst 0x4f84e0fc // sdot v28.4s, v7.16b, v4.4b[0]\n"
".inst 0x4fa4e0fd // sdot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v21.16b\n"
".inst 0x4f84e8fe // sdot v30.4s, v7.16b, v4.4b[2]\n"
".inst 0x4fa4e8ff // sdot v31.4s, v7.16b, v4.4b[3]\n"
"ldr q7, [%x[params], #0x50]\n"
+ "add %x[params], %x[params], #0x60\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "and v19.16b, v24.16b, v21.16b\n"
"and v18.16b, v25.16b, v21.16b\n"
"and v17.16b, v26.16b, v21.16b\n"
"and v16.16b, v27.16b, v21.16b\n"
- "add %x[params], %x[params], #0x60\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v8.4s\n"
- "sqrdmulh v29.4s, v29.4s, v8.4s\n"
- "sqrdmulh v30.4s, v30.4s, v8.4s\n"
- "sqrdmulh v31.4s, v31.4s, v8.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v21.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
+ "and v18.16b, v29.16b, v21.16b\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
"sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
"and v17.16b, v30.16b, v21.16b\n"
"and v16.16b, v31.16b, v21.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v21.4s\n"
+ "srshl v25.4s, v25.4s, v21.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v21.4s\n"
- "srshl v25.4s, v25.4s, v21.4s\n"
"srshl v26.4s, v26.4s, v21.4s\n"
"srshl v27.4s, v27.4s, v21.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v21.4s\n"
"srshl v29.4s, v29.4s, v21.4s\n"
+ "add v24.4s, v24.4s, v11.4s\n"
+ "add v25.4s, v25.4s, v11.4s\n"
"srshl v30.4s, v30.4s, v21.4s\n"
"srshl v31.4s, v31.4s, v21.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v11.4s\n"
+ "add v27.4s, v27.4s, v11.4s\n"
+ "add v28.4s, v28.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v11.4s\n"
+ "add v30.4s, v30.4s, v11.4s\n"
+ "add v31.4s, v31.4s, v11.4s\n"
+ "smin v24.4s, v24.4s, v9.4s\n"
+ "smin v25.4s, v25.4s, v9.4s\n"
+ "smin v26.4s, v26.4s, v9.4s\n"
+ "smin v27.4s, v27.4s, v9.4s\n"
+ "smin v28.4s, v28.4s, v9.4s\n"
+ "smin v29.4s, v29.4s, v9.4s\n"
+ "smin v30.4s, v30.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v10.4s\n"
+ "smax v25.4s, v25.4s, v10.4s\n"
+ "smax v26.4s, v26.4s, v10.4s\n"
+ "smax v27.4s, v27.4s, v10.4s\n"
+ "smax v28.4s, v28.4s, v10.4s\n"
+ "smax v29.4s, v29.4s, v10.4s\n"
+ "smax v30.4s, v30.4s, v10.4s\n"
+ "smax v31.4s, v31.4s, v10.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -307,33 +307,33 @@ void a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
+ "str s24, [x27, x28]\n"
+ "str s25, [x26, x28]\n"
"dup v24.4s, v22.s[0]\n"
"dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
+ "str s26, [x25, x28]\n"
"dup v26.4s, v22.s[2]\n"
+ "str s27, [x24, x28]\n"
"dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
+ "add v24.4s, v24.4s, v20.4s\n"
+ "str s28, [x23, x28]\n"
"dup v28.4s, v23.s[0]\n"
+ "add v25.4s, v25.4s, v20.4s\n"
+ "str s29, [x22, x28]\n"
"dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
+ "add v26.4s, v26.4s, v20.4s\n"
+ "str s30, [x21, x28]\n"
"dup v30.4s, v23.s[2]\n"
+ "add v27.4s, v27.4s, v20.4s\n"
+ "str s31, [x20, x28]\n"
"dup v31.4s, v23.s[3]\n"
"add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v20.4s\n"
- "add v25.4s, v25.4s, v20.4s\n"
- "add v26.4s, v26.4s, v20.4s\n"
- "add v27.4s, v27.4s, v20.4s\n"
"add v28.4s, v28.4s, v20.4s\n"
"add v29.4s, v29.4s, v20.4s\n"
"add v30.4s, v30.4s, v20.4s\n"
@@ -348,98 +348,98 @@ void a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
".inst 0x4fa0e8bb // sdot v27.4s, v5.16b, v0.4b[3]\n"
"cmp %x[n_channels], #0x4\n"
"add x27, x27, x28\n"
- ".inst 0x4f81e0d8 // sdot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x4fa1e0d9 // sdot v25.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x4f82e0bc // sdot v28.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e0bd // sdot v29.4s, v5.16b, v2.4b[1]\n"
"add x26, x26, x28\n"
"add x25, x25, x28\n"
- ".inst 0x4f81e8da // sdot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x4fa1e8db // sdot v27.4s, v6.16b, v1.4b[3]\n"
+ ".inst 0x4f82e8be // sdot v30.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x4fa2e8bf // sdot v31.4s, v5.16b, v2.4b[3]\n"
"add x24, x24, x28\n"
"add x23, x23, x28\n"
- ".inst 0x4f82e0bc // sdot v28.4s, v5.16b, v2.4b[0]\n"
- ".inst 0x4fa2e0bd // sdot v29.4s, v5.16b, v2.4b[1]\n"
+ ".inst 0x4f81e0d8 // sdot v24.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4fa1e0d9 // sdot v25.4s, v6.16b, v1.4b[1]\n"
"add x22, x22, x28\n"
"add x21, x21, x28\n"
- ".inst 0x4f82e8be // sdot v30.4s, v5.16b, v2.4b[2]\n"
- ".inst 0x4fa2e8bf // sdot v31.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x4f81e8da // sdot v26.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4fa1e8db // sdot v27.4s, v6.16b, v1.4b[3]\n"
"add x20, x20, x28\n"
"add %x[params], %x[params], #0x20\n"
- ".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x4fa2e0f9 // sdot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x4fa2e8fb // sdot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
".inst 0x4f83e0dc // sdot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x4fa3e0dd // sdot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
".inst 0x4f83e8de // sdot v30.4s, v6.16b, v3.4b[2]\n"
".inst 0x4fa3e8df // sdot v31.4s, v6.16b, v3.4b[3]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ ".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e0f9 // sdot v25.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x4f82e8fa // sdot v26.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4fa2e8fb // sdot v27.4s, v7.16b, v2.4b[3]\n"
".inst 0x4f84e0fc // sdot v28.4s, v7.16b, v4.4b[0]\n"
".inst 0x4fa4e0fd // sdot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v20.16b\n"
".inst 0x4f84e8fe // sdot v30.4s, v7.16b, v4.4b[2]\n"
".inst 0x4fa4e8ff // sdot v31.4s, v7.16b, v4.4b[3]\n"
+ "sqrdmulh v24.4s, v24.4s, v21.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v21.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v21.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ "and v19.16b, v24.16b, v20.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v21.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v21.4s\n"
"and v18.16b, v25.16b, v20.16b\n"
"and v17.16b, v26.16b, v20.16b\n"
"and v16.16b, v27.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v20.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
"and v18.16b, v29.16b, v20.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"and v17.16b, v30.16b, v20.16b\n"
"and v16.16b, v31.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v20.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v25.4s, v25.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
"srshl v27.4s, v27.4s, v20.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v20.4s\n"
"srshl v29.4s, v29.4s, v20.4s\n"
+ "add v24.4s, v24.4s, v11.4s\n"
"srshl v30.4s, v30.4s, v20.4s\n"
+ "add v25.4s, v25.4s, v11.4s\n"
"srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v11.4s\n"
+ "add v27.4s, v27.4s, v11.4s\n"
+ "add v28.4s, v28.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v11.4s\n"
+ "add v30.4s, v30.4s, v11.4s\n"
+ "add v31.4s, v31.4s, v11.4s\n"
+ "smin v24.4s, v24.4s, v9.4s\n"
+ "smin v25.4s, v25.4s, v9.4s\n"
+ "smin v26.4s, v26.4s, v9.4s\n"
+ "smin v27.4s, v27.4s, v9.4s\n"
+ "smin v28.4s, v28.4s, v9.4s\n"
+ "smin v29.4s, v29.4s, v9.4s\n"
+ "smin v30.4s, v30.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v10.4s\n"
+ "smax v25.4s, v25.4s, v10.4s\n"
+ "smax v26.4s, v26.4s, v10.4s\n"
+ "smax v27.4s, v27.4s, v10.4s\n"
+ "smax v28.4s, v28.4s, v10.4s\n"
+ "smax v29.4s, v29.4s, v10.4s\n"
+ "smax v30.4s, v30.4s, v10.4s\n"
+ "smax v31.4s, v31.4s, v10.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -509,7 +509,7 @@ void a64_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
"4:" // Tail: End
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index 17afc92e30..83962606c8 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,158 +41,158 @@ void a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q12, [%x[params], #0x0]\n"
+ "ldr q22, [%x[params], #0x0]\n"
"ldr q8, [%x[params], #0x10]\n"
- "movi v30.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
+ "movi v23.16b, #0x1\n"
+ "movi v19.4s, #0x0\n"
"ldr q9, [%x[params], #0x20]\n"
"ldr q10, [%x[params], #0x30]\n"
- "movi v16.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
"ldr q11, [%x[params], #0x40]\n"
"ldr x20, [%x[inptrs], #0x18]\n"
- "movi v24.4s, #0x0\n"
"movi v31.4s, #0x0\n"
- "ld1 { v3.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "mov v26.16b, v3.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "ld1 { v4.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "mov v21.16b, v4.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- "ld1 { v2.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "mov v27.16b, v2.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- "ld1 { v1.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x28]\n"
- "zip1 v3.2d, v3.2d, v26.2d\n"
- "zip1 v4.2d, v4.2d, v21.2d\n"
- "ld1 { v5.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x30]\n"
- "mov v26.16b, v1.16b\n"
- "mov v22.16b, v5.16b\n"
- "ld1 { v6.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x38]\n"
- "mov v19.16b, v6.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "ld1 { v7.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "mov v21.16b, v7.16b\n"
- "zip1 v2.2d, v2.2d, v27.2d\n"
- "ld1 { v0.16b }, [x20]\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x4f83e3d1 // sdot v17.4s, v30.16b, v3.4b[0]\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x4f83ebd0 // sdot v16.4s, v30.16b, v3.4b[2]\n"
- ".inst 0x4f84e3d9 // sdot v25.4s, v30.16b, v4.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v23.4s }, [x20]\n"
- ".inst 0x4f84ebd8 // sdot v24.4s, v30.16b, v4.4b[2]\n"
- "mov v18.16b, v0.16b\n"
- ".inst 0x4f82e3df // sdot v31.4s, v30.16b, v2.4b[0]\n"
+ "movi v28.4s, #0x0\n"
+ "ldr x24, [%x[inptrs], #0x20]\n"
+ "ldr x23, [%x[inptrs], #0x10]\n"
"movi v29.4s, #0x0\n"
- "movi v28.4s, #0x1\n"
- ".inst 0x4f82ebdd // sdot v29.4s, v30.16b, v2.4b[2]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.4s }, [x20]\n"
- "ext v18.16b, v18.16b, v18.16b, #0x1\n"
- "zip1 v1.2d, v1.2d, v26.2d\n"
- ".inst 0x4fa3e391 // sdot v17.4s, v28.16b, v3.4b[1]\n"
- "zip1 v5.2d, v5.2d, v22.2d\n"
- "zip1 v6.2d, v6.2d, v19.2d\n"
- ".inst 0x4fa3eb90 // sdot v16.4s, v28.16b, v3.4b[3]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v14.4s }, [x20]\n"
- "zip1 v7.2d, v7.2d, v21.2d\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n"
+ "movi v25.4s, #0x1\n"
+ "ldr x22, [%x[inptrs], #0x8]\n"
+ "ldr x21, [%x[inptrs], #0x28]\n"
"movi v21.4s, #0x0\n"
- ".inst 0x4fa4eb98 // sdot v24.4s, v28.16b, v4.4b[3]\n"
- ".inst 0x4f81e3d6 // sdot v22.4s, v30.16b, v1.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v15.4s }, [x20]\n"
+ "movi v16.4s, #0x0\n"
+ "ld1 { v3.16b }, [x20]\n"
+ "ldr x20, [%x[inptrs], #0x30]\n"
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
- ".inst 0x4f81ebd5 // sdot v21.4s, v30.16b, v1.4b[2]\n"
+ "ld1 { v4.16b }, [x24]\n"
+ "ld1 { v2.16b }, [x23]\n"
+ "movi v30.4s, #0x0\n"
"movi v20.4s, #0x0\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x4f85e3da // sdot v26.4s, v30.16b, v5.4b[0]\n"
- "cmp %x[n_channels], #0x4\n"
- "zip1 v0.2d, v0.2d, v18.2d\n"
+ "ld1 { v1.16b }, [x22]\n"
+ "ld1 { v5.16b }, [x21]\n"
+ "movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
- ".inst 0x4f85ebdb // sdot v27.4s, v30.16b, v5.4b[2]\n"
+ "ld1 { v6.16b }, [x20]\n"
+ "mov v7.16b, v3.16b\n"
+ "ldr x22, [%x[inptrs], #0x38]\n"
+ "movi v24.4s, #0x0\n"
+ "mov v0.16b, v4.16b\n"
+ "ldr x21, [%x[inptrs], #0x0]\n"
+ "mov v14.16b, v2.16b\n"
+ "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v12.4s }, [x20]\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "add x11, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ext v0.16b, v0.16b, v0.16b, #0x1\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ "add x10, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "cmp %x[n_channels], #0x4\n"
"mov x9, #0x0\n"
- ".inst 0x4f86e3d4 // sdot v20.4s, v30.16b, v6.4b[0]\n"
- ".inst 0x4f86ebd3 // sdot v19.4s, v30.16b, v6.4b[2]\n"
- "add v17.4s, v17.4s, v25.4s\n"
"mov x28, #0x0\n"
- "movi v25.4s, #0x0\n"
- ".inst 0x4f87e3d2 // sdot v18.4s, v30.16b, v7.4b[0]\n"
- ".inst 0x4f87ebd9 // sdot v25.4s, v30.16b, v7.4b[2]\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
- ".inst 0x4fa2e39f // sdot v31.4s, v28.16b, v2.4b[1]\n"
- ".inst 0x4fa2eb9d // sdot v29.4s, v28.16b, v2.4b[3]\n"
- "add v16.4s, v16.4s, v24.4s\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
- "movi v24.4s, #0x0\n"
- ".inst 0x4f80e3d8 // sdot v24.4s, v30.16b, v0.4b[0]\n"
- ".inst 0x4fa1e396 // sdot v22.4s, v28.16b, v1.4b[1]\n"
+ "zip1 v3.2d, v3.2d, v7.2d\n"
+ "ld1 { v7.16b }, [x22]\n"
+ "neg v12.4s, v12.4s\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
- ".inst 0x4fa1eb95 // sdot v21.4s, v28.16b, v1.4b[3]\n"
- ".inst 0x4fa5e39a // sdot v26.4s, v28.16b, v5.4b[1]\n"
- "add v31.4s, v31.4s, v17.4s\n"
+ "zip1 v4.2d, v4.2d, v0.2d\n"
+ "ld1 { v0.16b }, [x21]\n"
+ "zip1 v2.2d, v2.2d, v14.2d\n"
+ "ld1r { v14.4s }, [x20]\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
- ".inst 0x4fa5eb9b // sdot v27.4s, v28.16b, v5.4b[3]\n"
- ".inst 0x4fa6e394 // sdot v20.4s, v28.16b, v6.4b[1]\n"
- "add v29.4s, v29.4s, v16.4s\n"
"add %x[params], %x[params], #0x50\n"
- ".inst 0x4fa6eb93 // sdot v19.4s, v28.16b, v6.4b[3]\n"
- ".inst 0x4fa7e392 // sdot v18.4s, v28.16b, v7.4b[1]\n"
- "add v22.4s, v22.4s, v31.4s\n"
- ".inst 0x4fa7eb99 // sdot v25.4s, v28.16b, v7.4b[3]\n"
- ".inst 0x4fa0e398 // sdot v24.4s, v28.16b, v0.4b[1]\n"
- "add v21.4s, v21.4s, v29.4s\n"
- "add v20.4s, v26.4s, v20.4s\n"
- "add v19.4s, v27.4s, v19.4s\n"
- "add v18.4s, v18.4s, v17.4s\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x4f80ebd1 // sdot v17.4s, v30.16b, v0.4b[2]\n"
- ".inst 0x4fa0eb91 // sdot v17.4s, v28.16b, v0.4b[3]\n"
- "add v16.4s, v25.4s, v16.4s\n"
- "add v24.4s, v22.4s, v24.4s\n"
- "add v25.4s, v21.4s, v17.4s\n"
- "add v26.4s, v26.4s, v22.4s\n"
- "add v27.4s, v27.4s, v21.4s\n"
- "add v28.4s, v20.4s, v31.4s\n"
- "add v29.4s, v19.4s, v29.4s\n"
- "add v30.4s, v20.4s, v18.4s\n"
- "add v31.4s, v19.4s, v16.4s\n"
- "neg v23.4s, v23.4s\n"
- "mul v24.4s, v24.4s, v23.4s\n"
- "mul v25.4s, v25.4s, v23.4s\n"
- "mul v26.4s, v26.4s, v23.4s\n"
- "mul v27.4s, v27.4s, v23.4s\n"
- "mul v28.4s, v28.4s, v23.4s\n"
- "mul v29.4s, v29.4s, v23.4s\n"
- "mul v30.4s, v30.4s, v23.4s\n"
- "mul v31.4s, v31.4s, v23.4s\n"
- "zip1 v19.4s, v24.4s, v26.4s\n"
- "zip1 v18.4s, v25.4s, v27.4s\n"
+ ".inst 0x4f83e2f3 // sdot v19.4s, v23.16b, v3.4b[0]\n"
+ ".inst 0x4f83eaed // sdot v13.4s, v23.16b, v3.4b[2]\n"
+ ".inst 0x4f84e2ef // sdot v15.4s, v23.16b, v4.4b[0]\n"
+ ".inst 0x4f84eaff // sdot v31.4s, v23.16b, v4.4b[2]\n"
+ ".inst 0x4f82e2fc // sdot v28.4s, v23.16b, v2.4b[0]\n"
+ ".inst 0x4f82eafd // sdot v29.4s, v23.16b, v2.4b[2]\n"
+ ".inst 0x4fa3e333 // sdot v19.4s, v25.16b, v3.4b[1]\n"
+ ".inst 0x4fa3eb2d // sdot v13.4s, v25.16b, v3.4b[3]\n"
+ ".inst 0x4fa4e32f // sdot v15.4s, v25.16b, v4.4b[1]\n"
+ ".inst 0x4fa4eb3f // sdot v31.4s, v25.16b, v4.4b[3]\n"
+ ".inst 0x4fa2e33c // sdot v28.4s, v25.16b, v2.4b[1]\n"
+ ".inst 0x4fa2eb3d // sdot v29.4s, v25.16b, v2.4b[3]\n"
+ "add v19.4s, v19.4s, v15.4s\n"
+ "ld1r { v15.4s }, [x11]\n"
+ "add v31.4s, v13.4s, v31.4s\n"
+ "mov v13.16b, v1.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ "add v28.4s, v28.4s, v19.4s\n"
+ "add v29.4s, v29.4s, v31.4s\n"
+ "zip1 v1.2d, v1.2d, v13.2d\n"
+ "mov v13.16b, v5.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x4f81e2f5 // sdot v21.4s, v23.16b, v1.4b[0]\n"
+ ".inst 0x4f81eaf0 // sdot v16.4s, v23.16b, v1.4b[2]\n"
+ "zip1 v5.2d, v5.2d, v13.2d\n"
+ "mov v13.16b, v6.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x4f85e2fa // sdot v26.4s, v23.16b, v5.4b[0]\n"
+ ".inst 0x4f85eafb // sdot v27.4s, v23.16b, v5.4b[2]\n"
+ ".inst 0x4fa1e335 // sdot v21.4s, v25.16b, v1.4b[1]\n"
+ "zip1 v6.2d, v6.2d, v13.2d\n"
+ "mov v13.16b, v7.16b\n"
+ ".inst 0x4fa1eb30 // sdot v16.4s, v25.16b, v1.4b[3]\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x4f86e2fe // sdot v30.4s, v23.16b, v6.4b[0]\n"
+ ".inst 0x4f86eaf4 // sdot v20.4s, v23.16b, v6.4b[2]\n"
+ ".inst 0x4fa5e33a // sdot v26.4s, v25.16b, v5.4b[1]\n"
+ ".inst 0x4fa5eb3b // sdot v27.4s, v25.16b, v5.4b[3]\n"
+ "add v21.4s, v21.4s, v28.4s\n"
+ "zip1 v7.2d, v7.2d, v13.2d\n"
+ "ld1r { v13.4s }, [x10]\n"
+ "add v16.4s, v16.4s, v29.4s\n"
+ ".inst 0x4fa6e33e // sdot v30.4s, v25.16b, v6.4b[1]\n"
+ ".inst 0x4fa6eb34 // sdot v20.4s, v25.16b, v6.4b[3]\n"
+ ".inst 0x4f87e2f1 // sdot v17.4s, v23.16b, v7.4b[0]\n"
+ ".inst 0x4f87eaf2 // sdot v18.4s, v23.16b, v7.4b[2]\n"
+ "add v30.4s, v26.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v21.4s\n"
+ ".inst 0x4fa7e331 // sdot v17.4s, v25.16b, v7.4b[1]\n"
+ "add v20.4s, v27.4s, v20.4s\n"
+ "add v27.4s, v27.4s, v16.4s\n"
+ "add v28.4s, v30.4s, v28.4s\n"
+ ".inst 0x4fa7eb32 // sdot v18.4s, v25.16b, v7.4b[3]\n"
+ "mul v26.4s, v26.4s, v12.4s\n"
+ "add v19.4s, v17.4s, v19.4s\n"
+ "mov v17.16b, v0.16b\n"
+ "add v29.4s, v20.4s, v29.4s\n"
+ "mul v27.4s, v27.4s, v12.4s\n"
+ "mul v28.4s, v28.4s, v12.4s\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x1\n"
+ "add v31.4s, v18.4s, v31.4s\n"
+ "movi v18.4s, #0x0\n"
+ "add v30.4s, v30.4s, v19.4s\n"
+ "mul v29.4s, v29.4s, v12.4s\n"
+ "zip1 v0.2d, v0.2d, v17.2d\n"
+ "add v31.4s, v20.4s, v31.4s\n"
+ "mul v30.4s, v30.4s, v12.4s\n"
+ ".inst 0x4f80e2f8 // sdot v24.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x4f80eaf2 // sdot v18.4s, v23.16b, v0.4b[2]\n"
+ "mul v31.4s, v31.4s, v12.4s\n"
"zip1 v17.4s, v28.4s, v30.4s\n"
- "zip1 v16.4s, v29.4s, v31.4s\n"
- "zip1 v22.4s, v19.4s, v18.4s\n"
- "zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
+ "add v28.4s, v28.4s, v22.4s\n"
+ "add v30.4s, v30.4s, v22.4s\n"
+ ".inst 0x4fa0e338 // sdot v24.4s, v25.16b, v0.4b[1]\n"
+ "zip1 v19.4s, v29.4s, v31.4s\n"
+ "add v29.4s, v29.4s, v22.4s\n"
+ ".inst 0x4fa0eb32 // sdot v18.4s, v25.16b, v0.4b[3]\n"
+ "add v31.4s, v31.4s, v22.4s\n"
+ "add v24.4s, v21.4s, v24.4s\n"
+ "zip1 v23.4s, v17.4s, v19.4s\n"
+ "add v25.4s, v16.4s, v18.4s\n"
+ "mul v24.4s, v24.4s, v12.4s\n"
+ "mul v25.4s, v25.4s, v12.4s\n"
+ "zip1 v17.4s, v24.4s, v26.4s\n"
+ "add v26.4s, v26.4s, v22.4s\n"
+ "zip1 v16.4s, v25.4s, v27.4s\n"
+ "add v27.4s, v27.4s, v22.4s\n"
+ "add v24.4s, v24.4s, v22.4s\n"
+ "add v25.4s, v25.4s, v22.4s\n"
+ "zip1 v22.4s, v17.4s, v16.4s\n"
"ble 2f\n"
"1:" // Loop
"ldr q12, [%x[params], #0x60]\n"
@@ -203,159 +203,159 @@ void a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
".inst 0x4f81e11a // sdot v26.4s, v8.16b, v1.4b[0]\n"
".inst 0x4f81e91b // sdot v27.4s, v8.16b, v1.4b[2]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x4fa0e138 // sdot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x4fa0e939 // sdot v25.4s, v9.16b, v0.4b[3]\n"
- "cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x4fa1e13a // sdot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x4fa1e93b // sdot v27.4s, v9.16b, v1.4b[3]\n"
".inst 0x4f82e11c // sdot v28.4s, v8.16b, v2.4b[0]\n"
".inst 0x4f82e91d // sdot v29.4s, v8.16b, v2.4b[2]\n"
+ "cmp %x[n_channels], #0x4\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f83e11e // sdot v30.4s, v8.16b, v3.4b[0]\n"
".inst 0x4f83e91f // sdot v31.4s, v8.16b, v3.4b[2]\n"
"ldr q17, [%x[params], #0x0]\n"
- ".inst 0x4f81e158 // sdot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x4f81e959 // sdot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x4f82e15a // sdot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x4f82e95b // sdot v27.4s, v10.16b, v2.4b[2]\n"
+ ".inst 0x4fa0e138 // sdot v24.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4fa0e939 // sdot v25.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x4fa1e13a // sdot v26.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4fa1e93b // sdot v27.4s, v9.16b, v1.4b[3]\n"
".inst 0x4fa2e13c // sdot v28.4s, v9.16b, v2.4b[1]\n"
".inst 0x4fa2e93d // sdot v29.4s, v9.16b, v2.4b[3]\n"
".inst 0x4fa3e13e // sdot v30.4s, v9.16b, v3.4b[1]\n"
".inst 0x4fa3e93f // sdot v31.4s, v9.16b, v3.4b[3]\n"
"ldr q16, [%x[params], #0x10]\n"
- ".inst 0x4fa1e178 // sdot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x4fa1e979 // sdot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x4fa2e17a // sdot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x4fa2e97b // sdot v27.4s, v11.16b, v2.4b[3]\n"
+ ".inst 0x4f81e158 // sdot v24.4s, v10.16b, v1.4b[0]\n"
+ ".inst 0x4f81e959 // sdot v25.4s, v10.16b, v1.4b[2]\n"
+ ".inst 0x4f82e15a // sdot v26.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x4f82e95b // sdot v27.4s, v10.16b, v2.4b[2]\n"
".inst 0x4f83e15c // sdot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x4f83e95d // sdot v29.4s, v10.16b, v3.4b[2]\n"
".inst 0x4f84e15e // sdot v30.4s, v10.16b, v4.4b[0]\n"
".inst 0x4f84e95f // sdot v31.4s, v10.16b, v4.4b[2]\n"
"ldr q19, [%x[params], #0x20]\n"
- ".inst 0x4f82e238 // sdot v24.4s, v17.16b, v2.4b[0]\n"
- ".inst 0x4f82ea39 // sdot v25.4s, v17.16b, v2.4b[2]\n"
- ".inst 0x4f83e23a // sdot v26.4s, v17.16b, v3.4b[0]\n"
- ".inst 0x4f83ea3b // sdot v27.4s, v17.16b, v3.4b[2]\n"
+ ".inst 0x4fa1e178 // sdot v24.4s, v11.16b, v1.4b[1]\n"
+ ".inst 0x4fa1e979 // sdot v25.4s, v11.16b, v1.4b[3]\n"
+ ".inst 0x4fa2e17a // sdot v26.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x4fa2e97b // sdot v27.4s, v11.16b, v2.4b[3]\n"
".inst 0x4fa3e17c // sdot v28.4s, v11.16b, v3.4b[1]\n"
".inst 0x4fa3e97d // sdot v29.4s, v11.16b, v3.4b[3]\n"
".inst 0x4fa4e17e // sdot v30.4s, v11.16b, v4.4b[1]\n"
".inst 0x4fa4e97f // sdot v31.4s, v11.16b, v4.4b[3]\n"
"ldr q18, [%x[params], #0x30]\n"
- ".inst 0x4fa2e218 // sdot v24.4s, v16.16b, v2.4b[1]\n"
- ".inst 0x4fa2ea19 // sdot v25.4s, v16.16b, v2.4b[3]\n"
- ".inst 0x4fa3e21a // sdot v26.4s, v16.16b, v3.4b[1]\n"
- ".inst 0x4fa3ea1b // sdot v27.4s, v16.16b, v3.4b[3]\n"
+ ".inst 0x4f82e238 // sdot v24.4s, v17.16b, v2.4b[0]\n"
+ ".inst 0x4f82ea39 // sdot v25.4s, v17.16b, v2.4b[2]\n"
+ ".inst 0x4f83e23a // sdot v26.4s, v17.16b, v3.4b[0]\n"
+ ".inst 0x4f83ea3b // sdot v27.4s, v17.16b, v3.4b[2]\n"
".inst 0x4f84e23c // sdot v28.4s, v17.16b, v4.4b[0]\n"
".inst 0x4f84ea3d // sdot v29.4s, v17.16b, v4.4b[2]\n"
".inst 0x4f85e23e // sdot v30.4s, v17.16b, v5.4b[0]\n"
".inst 0x4f85ea3f // sdot v31.4s, v17.16b, v5.4b[2]\n"
"ldr q17, [%x[params], #0x40]\n"
- ".inst 0x4f83e278 // sdot v24.4s, v19.16b, v3.4b[0]\n"
- ".inst 0x4f83ea79 // sdot v25.4s, v19.16b, v3.4b[2]\n"
- ".inst 0x4f84e27a // sdot v26.4s, v19.16b, v4.4b[0]\n"
- ".inst 0x4f84ea7b // sdot v27.4s, v19.16b, v4.4b[2]\n"
+ ".inst 0x4fa2e218 // sdot v24.4s, v16.16b, v2.4b[1]\n"
+ ".inst 0x4fa2ea19 // sdot v25.4s, v16.16b, v2.4b[3]\n"
+ ".inst 0x4fa3e21a // sdot v26.4s, v16.16b, v3.4b[1]\n"
+ ".inst 0x4fa3ea1b // sdot v27.4s, v16.16b, v3.4b[3]\n"
".inst 0x4fa4e21c // sdot v28.4s, v16.16b, v4.4b[1]\n"
".inst 0x4fa4ea1d // sdot v29.4s, v16.16b, v4.4b[3]\n"
".inst 0x4fa5e21e // sdot v30.4s, v16.16b, v5.4b[1]\n"
".inst 0x4fa5ea1f // sdot v31.4s, v16.16b, v5.4b[3]\n"
"ldr q16, [%x[params], #0x50]\n"
- ".inst 0x4fa3e258 // sdot v24.4s, v18.16b, v3.4b[1]\n"
- ".inst 0x4fa3ea59 // sdot v25.4s, v18.16b, v3.4b[3]\n"
- ".inst 0x4fa4e25a // sdot v26.4s, v18.16b, v4.4b[1]\n"
- ".inst 0x4fa4ea5b // sdot v27.4s, v18.16b, v4.4b[3]\n"
+ ".inst 0x4f83e278 // sdot v24.4s, v19.16b, v3.4b[0]\n"
+ ".inst 0x4f83ea79 // sdot v25.4s, v19.16b, v3.4b[2]\n"
+ ".inst 0x4f84e27a // sdot v26.4s, v19.16b, v4.4b[0]\n"
+ ".inst 0x4f84ea7b // sdot v27.4s, v19.16b, v4.4b[2]\n"
".inst 0x4f85e27c // sdot v28.4s, v19.16b, v5.4b[0]\n"
".inst 0x4f85ea7d // sdot v29.4s, v19.16b, v5.4b[2]\n"
".inst 0x4f86e27e // sdot v30.4s, v19.16b, v6.4b[0]\n"
".inst 0x4f86ea7f // sdot v31.4s, v19.16b, v6.4b[2]\n"
"ldr q10, [%x[params], #0xb0]\n"
- ".inst 0x4f84e238 // sdot v24.4s, v17.16b, v4.4b[0]\n"
- ".inst 0x4f84ea39 // sdot v25.4s, v17.16b, v4.4b[2]\n"
- ".inst 0x4f85e23a // sdot v26.4s, v17.16b, v5.4b[0]\n"
- ".inst 0x4f85ea3b // sdot v27.4s, v17.16b, v5.4b[2]\n"
+ ".inst 0x4fa3e258 // sdot v24.4s, v18.16b, v3.4b[1]\n"
+ ".inst 0x4fa3ea59 // sdot v25.4s, v18.16b, v3.4b[3]\n"
+ ".inst 0x4fa4e25a // sdot v26.4s, v18.16b, v4.4b[1]\n"
+ ".inst 0x4fa4ea5b // sdot v27.4s, v18.16b, v4.4b[3]\n"
".inst 0x4fa5e25c // sdot v28.4s, v18.16b, v5.4b[1]\n"
".inst 0x4fa5ea5d // sdot v29.4s, v18.16b, v5.4b[3]\n"
".inst 0x4fa6e25e // sdot v30.4s, v18.16b, v6.4b[1]\n"
".inst 0x4fa6ea5f // sdot v31.4s, v18.16b, v6.4b[3]\n"
"ldr q11, [%x[params], #0xc0]\n"
- ".inst 0x4fa4e218 // sdot v24.4s, v16.16b, v4.4b[1]\n"
- ".inst 0x4fa4ea19 // sdot v25.4s, v16.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v12.4s\n"
- ".inst 0x4fa5e21a // sdot v26.4s, v16.16b, v5.4b[1]\n"
- ".inst 0x4fa5ea1b // sdot v27.4s, v16.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v12.4s\n"
+ ".inst 0x4f84e238 // sdot v24.4s, v17.16b, v4.4b[0]\n"
+ ".inst 0x4f84ea39 // sdot v25.4s, v17.16b, v4.4b[2]\n"
+ ".inst 0x4f85e23a // sdot v26.4s, v17.16b, v5.4b[0]\n"
+ ".inst 0x4f85ea3b // sdot v27.4s, v17.16b, v5.4b[2]\n"
".inst 0x4f86e23c // sdot v28.4s, v17.16b, v6.4b[0]\n"
".inst 0x4f86ea3d // sdot v29.4s, v17.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v12.4s\n"
".inst 0x4f87e23e // sdot v30.4s, v17.16b, v7.4b[0]\n"
".inst 0x4f87ea3f // sdot v31.4s, v17.16b, v7.4b[2]\n"
"ldr q8, [%x[params], #0x90]\n"
- "sqrdmulh v27.4s, v27.4s, v12.4s\n"
+ ".inst 0x4fa4e218 // sdot v24.4s, v16.16b, v4.4b[1]\n"
+ ".inst 0x4fa4ea19 // sdot v25.4s, v16.16b, v4.4b[3]\n"
+ ".inst 0x4fa5e21a // sdot v26.4s, v16.16b, v5.4b[1]\n"
+ ".inst 0x4fa5ea1b // sdot v27.4s, v16.16b, v5.4b[3]\n"
".inst 0x4fa6e21c // sdot v28.4s, v16.16b, v6.4b[1]\n"
".inst 0x4fa6ea1d // sdot v29.4s, v16.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v21.16b\n"
".inst 0x4fa7e21e // sdot v30.4s, v16.16b, v7.4b[1]\n"
".inst 0x4fa7ea1f // sdot v31.4s, v16.16b, v7.4b[3]\n"
"ldr q9, [%x[params], #0xa0]\n"
+ "add %x[params], %x[params], #0xd0\n"
+ "sqrdmulh v24.4s, v24.4s, v12.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v12.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v12.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v12.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v12.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v12.4s\n"
+ "and v19.16b, v24.16b, v21.16b\n"
"and v18.16b, v25.16b, v21.16b\n"
"and v17.16b, v26.16b, v21.16b\n"
"and v16.16b, v27.16b, v21.16b\n"
- "add %x[params], %x[params], #0xd0\n"
+ "sqrdmulh v30.4s, v30.4s, v12.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v12.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v12.4s\n"
- "sqrdmulh v29.4s, v29.4s, v12.4s\n"
- "sqrdmulh v30.4s, v30.4s, v12.4s\n"
- "sqrdmulh v31.4s, v31.4s, v12.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v21.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
+ "and v18.16b, v29.16b, v21.16b\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
"sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
"and v17.16b, v30.16b, v21.16b\n"
"and v16.16b, v31.16b, v21.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v21.4s\n"
+ "srshl v25.4s, v25.4s, v21.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v21.4s\n"
- "srshl v25.4s, v25.4s, v21.4s\n"
"srshl v26.4s, v26.4s, v21.4s\n"
"srshl v27.4s, v27.4s, v21.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v21.4s\n"
"srshl v29.4s, v29.4s, v21.4s\n"
+ "add v24.4s, v24.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v14.4s\n"
"srshl v30.4s, v30.4s, v21.4s\n"
"srshl v31.4s, v31.4s, v21.4s\n"
- "add v24.4s, v24.4s, v13.4s\n"
- "add v25.4s, v25.4s, v13.4s\n"
- "add v26.4s, v26.4s, v13.4s\n"
- "add v27.4s, v27.4s, v13.4s\n"
- "add v28.4s, v28.4s, v13.4s\n"
- "add v29.4s, v29.4s, v13.4s\n"
- "add v30.4s, v30.4s, v13.4s\n"
- "add v31.4s, v31.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v14.4s\n"
- "smax v25.4s, v25.4s, v14.4s\n"
- "smax v26.4s, v26.4s, v14.4s\n"
- "smax v27.4s, v27.4s, v14.4s\n"
- "smax v28.4s, v28.4s, v14.4s\n"
- "smax v29.4s, v29.4s, v14.4s\n"
- "smax v30.4s, v30.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v14.4s\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v27.4s, v27.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v14.4s\n"
+ "add v30.4s, v30.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v14.4s\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "smax v24.4s, v24.4s, v15.4s\n"
+ "smax v25.4s, v25.4s, v15.4s\n"
+ "smax v26.4s, v26.4s, v15.4s\n"
+ "smax v27.4s, v27.4s, v15.4s\n"
+ "smax v28.4s, v28.4s, v15.4s\n"
+ "smax v29.4s, v29.4s, v15.4s\n"
+ "smax v30.4s, v30.4s, v15.4s\n"
+ "smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -366,33 +366,33 @@ void a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
+ "str s24, [x27, x28]\n"
+ "str s25, [x26, x28]\n"
"dup v24.4s, v22.s[0]\n"
"dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
+ "str s26, [x25, x28]\n"
"dup v26.4s, v22.s[2]\n"
+ "str s27, [x24, x28]\n"
"dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
+ "add v24.4s, v24.4s, v20.4s\n"
+ "str s28, [x23, x28]\n"
"dup v28.4s, v23.s[0]\n"
+ "add v25.4s, v25.4s, v20.4s\n"
+ "str s29, [x22, x28]\n"
"dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
+ "add v26.4s, v26.4s, v20.4s\n"
+ "str s30, [x21, x28]\n"
"dup v30.4s, v23.s[2]\n"
+ "add v27.4s, v27.4s, v20.4s\n"
+ "str s31, [x20, x28]\n"
"dup v31.4s, v23.s[3]\n"
"add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v20.4s\n"
- "add v25.4s, v25.4s, v20.4s\n"
- "add v26.4s, v26.4s, v20.4s\n"
- "add v27.4s, v27.4s, v20.4s\n"
"add v28.4s, v28.4s, v20.4s\n"
"add v29.4s, v29.4s, v20.4s\n"
"add v30.4s, v30.4s, v20.4s\n"
@@ -407,160 +407,160 @@ void a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
".inst 0x4f81e91b // sdot v27.4s, v8.16b, v1.4b[2]\n"
"cmp %x[n_channels], #0x4\n"
"add x27, x27, x28\n"
- ".inst 0x4fa0e138 // sdot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x4fa0e939 // sdot v25.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x4f82e11c // sdot v28.4s, v8.16b, v2.4b[0]\n"
+ ".inst 0x4f82e91d // sdot v29.4s, v8.16b, v2.4b[2]\n"
"add x26, x26, x28\n"
"add x25, x25, x28\n"
- ".inst 0x4fa1e13a // sdot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x4fa1e93b // sdot v27.4s, v9.16b, v1.4b[3]\n"
+ ".inst 0x4f83e11e // sdot v30.4s, v8.16b, v3.4b[0]\n"
+ ".inst 0x4f83e91f // sdot v31.4s, v8.16b, v3.4b[2]\n"
+ "ldr q17, [%x[params], #0x0]\n"
"add x24, x24, x28\n"
+ ".inst 0x4fa0e138 // sdot v24.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4fa0e939 // sdot v25.4s, v9.16b, v0.4b[3]\n"
"add x23, x23, x28\n"
- ".inst 0x4f82e11c // sdot v28.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x4f82e91d // sdot v29.4s, v8.16b, v2.4b[2]\n"
"add x22, x22, x28\n"
+ ".inst 0x4fa1e13a // sdot v26.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4fa1e93b // sdot v27.4s, v9.16b, v1.4b[3]\n"
"add x21, x21, x28\n"
- ".inst 0x4f83e11e // sdot v30.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x4f83e91f // sdot v31.4s, v8.16b, v3.4b[2]\n"
- "ldr q17, [%x[params], #0x0]\n"
"add x20, x20, x28\n"
- ".inst 0x4f81e158 // sdot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x4f81e959 // sdot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x4f82e15a // sdot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x4f82e95b // sdot v27.4s, v10.16b, v2.4b[2]\n"
".inst 0x4fa2e13c // sdot v28.4s, v9.16b, v2.4b[1]\n"
".inst 0x4fa2e93d // sdot v29.4s, v9.16b, v2.4b[3]\n"
".inst 0x4fa3e13e // sdot v30.4s, v9.16b, v3.4b[1]\n"
".inst 0x4fa3e93f // sdot v31.4s, v9.16b, v3.4b[3]\n"
"ldr q16, [%x[params], #0x10]\n"
- ".inst 0x4fa1e178 // sdot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x4fa1e979 // sdot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x4fa2e17a // sdot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x4fa2e97b // sdot v27.4s, v11.16b, v2.4b[3]\n"
+ ".inst 0x4f81e158 // sdot v24.4s, v10.16b, v1.4b[0]\n"
+ ".inst 0x4f81e959 // sdot v25.4s, v10.16b, v1.4b[2]\n"
+ ".inst 0x4f82e15a // sdot v26.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x4f82e95b // sdot v27.4s, v10.16b, v2.4b[2]\n"
".inst 0x4f83e15c // sdot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x4f83e95d // sdot v29.4s, v10.16b, v3.4b[2]\n"
".inst 0x4f84e15e // sdot v30.4s, v10.16b, v4.4b[0]\n"
".inst 0x4f84e95f // sdot v31.4s, v10.16b, v4.4b[2]\n"
"ldr q19, [%x[params], #0x20]\n"
- ".inst 0x4f82e238 // sdot v24.4s, v17.16b, v2.4b[0]\n"
- ".inst 0x4f82ea39 // sdot v25.4s, v17.16b, v2.4b[2]\n"
- ".inst 0x4f83e23a // sdot v26.4s, v17.16b, v3.4b[0]\n"
- ".inst 0x4f83ea3b // sdot v27.4s, v17.16b, v3.4b[2]\n"
+ ".inst 0x4fa1e178 // sdot v24.4s, v11.16b, v1.4b[1]\n"
+ ".inst 0x4fa1e979 // sdot v25.4s, v11.16b, v1.4b[3]\n"
+ ".inst 0x4fa2e17a // sdot v26.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x4fa2e97b // sdot v27.4s, v11.16b, v2.4b[3]\n"
".inst 0x4fa3e17c // sdot v28.4s, v11.16b, v3.4b[1]\n"
".inst 0x4fa3e97d // sdot v29.4s, v11.16b, v3.4b[3]\n"
".inst 0x4fa4e17e // sdot v30.4s, v11.16b, v4.4b[1]\n"
".inst 0x4fa4e97f // sdot v31.4s, v11.16b, v4.4b[3]\n"
"ldr q18, [%x[params], #0x30]\n"
- ".inst 0x4fa2e218 // sdot v24.4s, v16.16b, v2.4b[1]\n"
- ".inst 0x4fa2ea19 // sdot v25.4s, v16.16b, v2.4b[3]\n"
- ".inst 0x4fa3e21a // sdot v26.4s, v16.16b, v3.4b[1]\n"
- ".inst 0x4fa3ea1b // sdot v27.4s, v16.16b, v3.4b[3]\n"
+ ".inst 0x4f82e238 // sdot v24.4s, v17.16b, v2.4b[0]\n"
+ ".inst 0x4f82ea39 // sdot v25.4s, v17.16b, v2.4b[2]\n"
+ ".inst 0x4f83e23a // sdot v26.4s, v17.16b, v3.4b[0]\n"
+ ".inst 0x4f83ea3b // sdot v27.4s, v17.16b, v3.4b[2]\n"
".inst 0x4f84e23c // sdot v28.4s, v17.16b, v4.4b[0]\n"
".inst 0x4f84ea3d // sdot v29.4s, v17.16b, v4.4b[2]\n"
".inst 0x4f85e23e // sdot v30.4s, v17.16b, v5.4b[0]\n"
".inst 0x4f85ea3f // sdot v31.4s, v17.16b, v5.4b[2]\n"
"ldr q17, [%x[params], #0x40]\n"
- ".inst 0x4f83e278 // sdot v24.4s, v19.16b, v3.4b[0]\n"
- ".inst 0x4f83ea79 // sdot v25.4s, v19.16b, v3.4b[2]\n"
- ".inst 0x4f84e27a // sdot v26.4s, v19.16b, v4.4b[0]\n"
- ".inst 0x4f84ea7b // sdot v27.4s, v19.16b, v4.4b[2]\n"
+ ".inst 0x4fa2e218 // sdot v24.4s, v16.16b, v2.4b[1]\n"
+ ".inst 0x4fa2ea19 // sdot v25.4s, v16.16b, v2.4b[3]\n"
+ ".inst 0x4fa3e21a // sdot v26.4s, v16.16b, v3.4b[1]\n"
+ ".inst 0x4fa3ea1b // sdot v27.4s, v16.16b, v3.4b[3]\n"
".inst 0x4fa4e21c // sdot v28.4s, v16.16b, v4.4b[1]\n"
".inst 0x4fa4ea1d // sdot v29.4s, v16.16b, v4.4b[3]\n"
".inst 0x4fa5e21e // sdot v30.4s, v16.16b, v5.4b[1]\n"
".inst 0x4fa5ea1f // sdot v31.4s, v16.16b, v5.4b[3]\n"
"ldr q16, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x80\n"
- ".inst 0x4fa3e258 // sdot v24.4s, v18.16b, v3.4b[1]\n"
- ".inst 0x4fa3ea59 // sdot v25.4s, v18.16b, v3.4b[3]\n"
- ".inst 0x4fa4e25a // sdot v26.4s, v18.16b, v4.4b[1]\n"
- ".inst 0x4fa4ea5b // sdot v27.4s, v18.16b, v4.4b[3]\n"
+ ".inst 0x4f83e278 // sdot v24.4s, v19.16b, v3.4b[0]\n"
+ ".inst 0x4f83ea79 // sdot v25.4s, v19.16b, v3.4b[2]\n"
+ ".inst 0x4f84e27a // sdot v26.4s, v19.16b, v4.4b[0]\n"
+ ".inst 0x4f84ea7b // sdot v27.4s, v19.16b, v4.4b[2]\n"
".inst 0x4f85e27c // sdot v28.4s, v19.16b, v5.4b[0]\n"
".inst 0x4f85ea7d // sdot v29.4s, v19.16b, v5.4b[2]\n"
".inst 0x4f86e27e // sdot v30.4s, v19.16b, v6.4b[0]\n"
".inst 0x4f86ea7f // sdot v31.4s, v19.16b, v6.4b[2]\n"
- ".inst 0x4f84e238 // sdot v24.4s, v17.16b, v4.4b[0]\n"
- ".inst 0x4f84ea39 // sdot v25.4s, v17.16b, v4.4b[2]\n"
- ".inst 0x4f85e23a // sdot v26.4s, v17.16b, v5.4b[0]\n"
- ".inst 0x4f85ea3b // sdot v27.4s, v17.16b, v5.4b[2]\n"
+ ".inst 0x4fa3e258 // sdot v24.4s, v18.16b, v3.4b[1]\n"
+ ".inst 0x4fa3ea59 // sdot v25.4s, v18.16b, v3.4b[3]\n"
+ ".inst 0x4fa4e25a // sdot v26.4s, v18.16b, v4.4b[1]\n"
+ ".inst 0x4fa4ea5b // sdot v27.4s, v18.16b, v4.4b[3]\n"
".inst 0x4fa5e25c // sdot v28.4s, v18.16b, v5.4b[1]\n"
".inst 0x4fa5ea5d // sdot v29.4s, v18.16b, v5.4b[3]\n"
".inst 0x4fa6e25e // sdot v30.4s, v18.16b, v6.4b[1]\n"
".inst 0x4fa6ea5f // sdot v31.4s, v18.16b, v6.4b[3]\n"
- ".inst 0x4fa4e218 // sdot v24.4s, v16.16b, v4.4b[1]\n"
- ".inst 0x4fa4ea19 // sdot v25.4s, v16.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x4fa5e21a // sdot v26.4s, v16.16b, v5.4b[1]\n"
- ".inst 0x4fa5ea1b // sdot v27.4s, v16.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
+ ".inst 0x4f84e238 // sdot v24.4s, v17.16b, v4.4b[0]\n"
+ ".inst 0x4f84ea39 // sdot v25.4s, v17.16b, v4.4b[2]\n"
+ ".inst 0x4f85e23a // sdot v26.4s, v17.16b, v5.4b[0]\n"
+ ".inst 0x4f85ea3b // sdot v27.4s, v17.16b, v5.4b[2]\n"
".inst 0x4f86e23c // sdot v28.4s, v17.16b, v6.4b[0]\n"
".inst 0x4f86ea3d // sdot v29.4s, v17.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
".inst 0x4f87e23e // sdot v30.4s, v17.16b, v7.4b[0]\n"
".inst 0x4f87ea3f // sdot v31.4s, v17.16b, v7.4b[2]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ ".inst 0x4fa4e218 // sdot v24.4s, v16.16b, v4.4b[1]\n"
+ ".inst 0x4fa4ea19 // sdot v25.4s, v16.16b, v4.4b[3]\n"
+ ".inst 0x4fa5e21a // sdot v26.4s, v16.16b, v5.4b[1]\n"
+ ".inst 0x4fa5ea1b // sdot v27.4s, v16.16b, v5.4b[3]\n"
".inst 0x4fa6e21c // sdot v28.4s, v16.16b, v6.4b[1]\n"
".inst 0x4fa6ea1d // sdot v29.4s, v16.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v20.16b\n"
".inst 0x4fa7e21e // sdot v30.4s, v16.16b, v7.4b[1]\n"
".inst 0x4fa7ea1f // sdot v31.4s, v16.16b, v7.4b[3]\n"
+ "sqrdmulh v24.4s, v24.4s, v21.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v21.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v21.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ "and v19.16b, v24.16b, v20.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v21.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v21.4s\n"
"and v18.16b, v25.16b, v20.16b\n"
"and v17.16b, v26.16b, v20.16b\n"
"and v16.16b, v27.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v20.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
"and v18.16b, v29.16b, v20.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"and v17.16b, v30.16b, v20.16b\n"
"and v16.16b, v31.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v20.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v25.4s, v25.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
"srshl v27.4s, v27.4s, v20.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v20.4s\n"
"srshl v29.4s, v29.4s, v20.4s\n"
+ "add v24.4s, v24.4s, v14.4s\n"
"srshl v30.4s, v30.4s, v20.4s\n"
+ "add v25.4s, v25.4s, v14.4s\n"
"srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v13.4s\n"
- "add v25.4s, v25.4s, v13.4s\n"
- "add v26.4s, v26.4s, v13.4s\n"
- "add v27.4s, v27.4s, v13.4s\n"
- "add v28.4s, v28.4s, v13.4s\n"
- "add v29.4s, v29.4s, v13.4s\n"
- "add v30.4s, v30.4s, v13.4s\n"
- "add v31.4s, v31.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v14.4s\n"
- "smax v25.4s, v25.4s, v14.4s\n"
- "smax v26.4s, v26.4s, v14.4s\n"
- "smax v27.4s, v27.4s, v14.4s\n"
- "smax v28.4s, v28.4s, v14.4s\n"
- "smax v29.4s, v29.4s, v14.4s\n"
- "smax v30.4s, v30.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v14.4s\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v27.4s, v27.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v14.4s\n"
+ "add v30.4s, v30.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v14.4s\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "smax v24.4s, v24.4s, v15.4s\n"
+ "smax v25.4s, v25.4s, v15.4s\n"
+ "smax v26.4s, v26.4s, v15.4s\n"
+ "smax v27.4s, v27.4s, v15.4s\n"
+ "smax v28.4s, v28.4s, v15.4s\n"
+ "smax v29.4s, v29.4s, v15.4s\n"
+ "smax v30.4s, v30.4s, v15.4s\n"
+ "smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -630,7 +630,7 @@ void a64_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
"4:" // Tail: End
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index b21ad484e5..be3c8cf9f8 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,21 +49,21 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"lsr x10, %x[n_output_channels], #0x2\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
"ld1r { v15.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v14.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v14.4s }, [x21]\n"
"ld1r { v13.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v12.16b }, [x21]\n"
"ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v10.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v10.4s }, [x21]\n"
"ld1r { v9.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v8.4s }, [x20]\n"
"mov x9, #0x0\n"
+ "ld1r { v8.4s }, [x20]\n"
"cbz x10, 9f\n"
"1:" // Output channel loop
"movi v31.4s, #0x0\n"
@@ -96,20 +96,20 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"3:" // Output channel loop: Load quantization parameters: Done
"ldr s5, [%x[weights]], #0x4\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
+ "ldp x21, x20, [x22], #0x10\n"
"ldr d0, [x21, #0x0]\n"
"ldr d4, [x20, #0x0]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"cbz x23, 7f\n"
"ldr s7, [%x[weights]], #0x4\n"
"ldp x21, x20, [x22], #0x10\n"
"subs x23, x23, #0x1\n"
- "ssubl v7.8h, v7.8b, v12.8b\n"
"ldr d3, [x21, #0x0]\n"
"ldr d6, [x20, #0x0]\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
"beq 5f\n"
@@ -125,13 +125,13 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x21, #0x0]\n"
- "ssubl v0.8h, v0.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d4, [x20, #0x0]\n"
@@ -139,22 +139,22 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"ldp x21, x20, [x22], #0x10\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "ssubl v4.8h, v4.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "ssubl v4.8h, v4.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
"ldr d3, [x21, #0x0]\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "ssubl v3.8h, v3.8b, v13.8b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"ldr d6, [x20, #0x0]\n"
@@ -172,54 +172,54 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v19.4s, v5.4h, v0.h[3]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "smlal v16.4s, v7.4h, v3.h[0]\n"
- "smlal v17.4s, v7.4h, v3.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
- "smlal v18.4s, v7.4h, v3.h[2]\n"
- "smlal v19.4s, v7.4h, v3.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
+ "smlal v16.4s, v7.4h, v3.h[0]\n"
+ "smlal v17.4s, v7.4h, v3.h[1]\n"
+ "smlal v18.4s, v7.4h, v3.h[2]\n"
+ "smlal v19.4s, v7.4h, v3.h[3]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
+ "smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v5.4h, v4.h[2]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
- "and v3.16b, v16.16b, v8.16b\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v9.4s\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
- "and v2.16b, v17.16b, v8.16b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
- "and v1.16b, v18.16b, v8.16b\n"
- "and v0.16b, v19.16b, v8.16b\n"
+ "and v3.16b, v16.16b, v8.16b\n"
+ "and v2.16b, v17.16b, v8.16b\n"
"sshl v20.4s, v20.4s, v10.4s\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
+ "and v1.16b, v18.16b, v8.16b\n"
+ "and v0.16b, v19.16b, v8.16b\n"
+ "smlal v27.4s, v7.4h, v6.h[3]\n"
"sshl v21.4s, v21.4s, v10.4s\n"
"sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v27.4s, v7.4h, v6.h[3]\n"
+ "smlal v28.4s, v7.4h, v6.h[4]\n"
"sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
- "smlal v28.4s, v7.4h, v6.h[4]\n"
- "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -357,49 +357,49 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -421,70 +421,70 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x20, #0x0]\n"
- "ssubl v0.8h, v0.8b, v13.8b\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
"ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr s5, [%x[weights]], #0x4\n"
"ldr d4, [x28, #0x0]\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "smlal v20.4s, v7.4h, v3.h[4]\n"
+ "smlal v21.4s, v7.4h, v3.h[5]\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
+ "smlal v22.4s, v7.4h, v3.h[6]\n"
+ "smlal v23.4s, v7.4h, v3.h[7]\n"
"smlal v16.4s, v5.4h, v0.h[0]\n"
"smlal v17.4s, v5.4h, v0.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
"smlal v18.4s, v5.4h, v0.h[2]\n"
"smlal v19.4s, v5.4h, v0.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "smlal v20.4s, v7.4h, v3.h[4]\n"
- "smlal v21.4s, v7.4h, v3.h[5]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "smlal v22.4s, v7.4h, v3.h[6]\n"
- "smlal v23.4s, v7.4h, v3.h[7]\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
"smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
+ "smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v7.4h, v6.h[2]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
- "and v3.16b, v16.16b, v8.16b\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v9.4s\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
- "and v2.16b, v17.16b, v8.16b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
- "and v1.16b, v18.16b, v8.16b\n"
- "and v0.16b, v19.16b, v8.16b\n"
+ "and v3.16b, v16.16b, v8.16b\n"
+ "and v2.16b, v17.16b, v8.16b\n"
"sshl v20.4s, v20.4s, v10.4s\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
+ "and v1.16b, v18.16b, v8.16b\n"
+ "and v0.16b, v19.16b, v8.16b\n"
+ "smlal v27.4s, v5.4h, v4.h[3]\n"
"sshl v21.4s, v21.4s, v10.4s\n"
"sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v27.4s, v5.4h, v4.h[3]\n"
+ "smlal v28.4s, v5.4h, v4.h[4]\n"
"sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
- "smlal v28.4s, v5.4h, v4.h[4]\n"
- "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -622,49 +622,49 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -673,45 +673,45 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"7:" // Output channel loop: Single kernel point
"smlal v16.4s, v5.4h, v0.h[0]\n"
"smlal v17.4s, v5.4h, v0.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
"ldr x27, [%x[outptrs], #0x0]\n"
+ "ldr x26, [%x[outptrs], #0x8]\n"
"smlal v18.4s, v5.4h, v0.h[2]\n"
"smlal v19.4s, v5.4h, v0.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "ldr x26, [%x[outptrs], #0x8]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
- "smlal v20.4s, v5.4h, v0.h[4]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
+ "ldr x24, [%x[outptrs], #0x18]\n"
+ "smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "ldr x24, [%x[outptrs], #0x18]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v20.4s, v20.4s, v10.4s\n"
+ "sshl v21.4s, v21.4s, v10.4s\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "smlal v27.4s, v5.4h, v4.h[3]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
"sqrdmulh v19.4s, v19.4s, v9.4s\n"
+ "smlal v28.4s, v5.4h, v4.h[4]\n"
+ "sshl v22.4s, v22.4s, v10.4s\n"
+ "sshl v23.4s, v23.4s, v10.4s\n"
+ "smlal v29.4s, v5.4h, v4.h[5]\n"
"and v3.16b, v16.16b, v8.16b\n"
- "smlal v27.4s, v5.4h, v4.h[3]\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
"and v2.16b, v17.16b, v8.16b\n"
+ "smlal v30.4s, v5.4h, v4.h[6]\n"
"and v1.16b, v18.16b, v8.16b\n"
- "smlal v28.4s, v5.4h, v4.h[4]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"and v0.16b, v19.16b, v8.16b\n"
- "sshl v20.4s, v20.4s, v10.4s\n"
- "smlal v29.4s, v5.4h, v4.h[5]\n"
- "sshl v21.4s, v21.4s, v10.4s\n"
- "sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v30.4s, v5.4h, v4.h[6]\n"
- "sshl v23.4s, v23.4s, v10.4s\n"
- "sshl v24.4s, v24.4s, v10.4s\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
+ "sshl v24.4s, v24.4s, v10.4s\n"
"sshl v25.4s, v25.4s, v10.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
"sshr v2.4s, v2.4s, #0x1f\n"
@@ -848,49 +848,49 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -965,20 +965,20 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"18:" // Output channel oddments: Load quantization parameters: Done
"ldr s5, [%x[weights]], #0x4\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
+ "ldp x21, x20, [x22], #0x10\n"
"ldr d0, [x21, #0x0]\n"
"ldr d4, [x20, #0x0]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"ssubl v0.8h, v0.8b, v13.8b\n"
"ssubl v4.8h, v4.8b, v13.8b\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"cbz x23, 22f\n"
"ldr s7, [%x[weights]], #0x4\n"
"ldp x21, x20, [x22], #0x10\n"
"subs x23, x23, #0x1\n"
- "ssubl v7.8h, v7.8b, v12.8b\n"
"ldr d3, [x21, #0x0]\n"
"ldr d6, [x20, #0x0]\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
"ssubl v3.8h, v3.8b, v13.8b\n"
"ssubl v6.8h, v6.8b, v13.8b\n"
"beq 20f\n"
@@ -994,13 +994,13 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x21, #0x0]\n"
- "ssubl v0.8h, v0.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "ssubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d4, [x20, #0x0]\n"
@@ -1008,22 +1008,22 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"ldp x21, x20, [x22], #0x10\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "ssubl v4.8h, v4.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "ssubl v4.8h, v4.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
"ldr d3, [x21, #0x0]\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "ssubl v3.8h, v3.8b, v13.8b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"ldr d6, [x20, #0x0]\n"
@@ -1077,27 +1077,27 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d2, [x21, #0x0]\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "ssubl v2.8h, v2.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d1, [x20, #0x0]\n"
"ldr s0, [%x[weights]], #0x4\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "ssubl v0.8h, v0.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "ssubl v1.8h, v1.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
@@ -1145,18 +1145,18 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"sshl v17.4s, v17.4s, v10.4s\n"
"sshl v18.4s, v18.4s, v10.4s\n"
"sshl v19.4s, v19.4s, v10.4s\n"
+ "sshl v20.4s, v20.4s, v10.4s\n"
+ "sshl v21.4s, v21.4s, v10.4s\n"
"sqrdmulh v16.4s, v16.4s, v9.4s\n"
"sqrdmulh v17.4s, v17.4s, v9.4s\n"
"sqrdmulh v18.4s, v18.4s, v9.4s\n"
"sqrdmulh v19.4s, v19.4s, v9.4s\n"
+ "sshl v22.4s, v22.4s, v10.4s\n"
+ "sshl v23.4s, v23.4s, v10.4s\n"
"and v3.16b, v16.16b, v8.16b\n"
"and v2.16b, v17.16b, v8.16b\n"
"and v1.16b, v18.16b, v8.16b\n"
"and v0.16b, v19.16b, v8.16b\n"
- "sshl v20.4s, v20.4s, v10.4s\n"
- "sshl v21.4s, v21.4s, v10.4s\n"
- "sshl v22.4s, v22.4s, v10.4s\n"
- "sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
"sshl v25.4s, v25.4s, v10.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -1320,47 +1320,47 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"tbz %x[n_output_channels], #1, 24f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.h }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.h }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.h }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.h }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.h }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.h }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.h }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.h }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
- "add x9, x9, #0x2\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.h }[0], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.h }[0], [x26]\n"
+ "add x20, x20, x9\n"
+ "add x9, x9, #0x2\n"
"st1 { v26.h }[0], [x25]\n"
"st1 { v27.h }[0], [x24]\n"
"st1 { v28.h }[0], [x23]\n"
@@ -1370,46 +1370,46 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"tbz %x[n_output_channels], #0, 25f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.b }[2], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.b }[2], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.b }[2], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.b }[2], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.b }[2], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.b }[2], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.b }[2], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.b }[2], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.b }[2], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.b }[2], [x26]\n"
+ "add x20, x20, x9\n"
"st1 { v26.b }[2], [x25]\n"
"st1 { v27.b }[2], [x24]\n"
"st1 { v28.b }[2], [x23]\n"
@@ -1420,46 +1420,46 @@ void a64_s8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"24:" // Output channel oddments: Done: Store: Bit 1: Unset
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.b }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.b }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.b }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.b }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.b }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.b }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.b }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.b }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.b }[0], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.b }[0], [x26]\n"
+ "add x20, x20, x9\n"
"st1 { v26.b }[0], [x25]\n"
"st1 { v27.b }[0], [x24]\n"
"st1 { v28.b }[0], [x23]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index aad34c4c25..80a2deae4a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,1441 +35,1441 @@ void a64_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const unsigned int n_cha
__asm__ __volatile__(
"lsr x15, %x[n_channels], #0x4\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v8.4s }, [x20]\n"
- "ldp x14, x13, [%x[inptrs], #0x0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v12.4s }, [x20]\n"
+ "ldp x14, x27, [%x[inptrs], #0x0]\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
+ "ld1r { v27.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.4s }, [x20]\n"
+ "ldp x24, x23, [%x[inptrs], #0x20]\n"
+ "ld1r { v11.4s }, [x21]\n"
+ "ld1r { v13.4s }, [x20]\n"
+ "mov x13, #0x0\n"
"mov x12, #0x0\n"
- "mov x11, #0x0\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "ldp x25, x24, [%x[outptrs], #0x0]\n"
- "ldp x23, x22, [%x[outptrs], #0x10]\n"
+ "ldp x22, x21, [%x[inptrs], #0x30]\n"
+ "ldp x11, x10, [%x[outptrs], #0x0]\n"
+ "ldp x9, x28, [%x[outptrs], #0x10]\n"
"cbz x15, 3f\n"
- "ldr q11, [x14, x12]\n"
- "ldr q20, [x13, x12]\n"
+ "ldr q12, [x14, x13]\n"
+ "ldr q24, [x27, x13]\n"
"subs x15, x15, #0x1\n"
- "ldr q16, [x10, x12]\n"
- "ldr q14, [x9, x12]\n"
- "zip2 v19.16b, v11.16b, v16.16b\n"
- "zip1 v11.16b, v11.16b, v16.16b\n"
- "ldr q13, [x28, x12]\n"
- "ldr q18, [x27, x12]\n"
- "zip1 v17.16b, v20.16b, v14.16b\n"
- "zip2 v14.16b, v20.16b, v14.16b\n"
- "ldr q16, [x26, x12]\n"
- "ldr q27, [x21, x12]\n"
- "zip2 v10.16b, v11.16b, v17.16b\n"
- "zip1 v11.16b, v11.16b, v17.16b\n"
- "ldr q24, [%x[params], #0x10]\n"
- "ldr q9, [%x[params], #0x20]\n"
- "zip1 v3.16b, v19.16b, v14.16b\n"
- "zip2 v14.16b, v19.16b, v14.16b\n"
- "ldr q31, [%x[params], #0x0]\n"
- "ldr q6, [%x[params], #0x30]\n"
- "zip2 v30.16b, v13.16b, v16.16b\n"
- "zip1 v13.16b, v13.16b, v16.16b\n"
- "ldp x21, x20, [%x[inptrs], #0x40]\n"
- "ldr q5, [x21, x12]\n"
- "zip1 v16.16b, v18.16b, v27.16b\n"
- "zip2 v27.16b, v18.16b, v27.16b\n"
- "ldr q17, [x20, x12]\n"
- "ldp x21, x20, [%x[inptrs], #0x50]\n"
- "zip2 v28.16b, v13.16b, v16.16b\n"
- "zip1 v13.16b, v13.16b, v16.16b\n"
- "ldr q16, [x21, x12]\n"
- "ldr q7, [x20, x12]\n"
- "zip2 v20.16b, v5.16b, v16.16b\n"
- "zip1 v5.16b, v5.16b, v16.16b\n"
- "ldp x21, x20, [%x[inptrs], #0x60]\n"
- "ldr q16, [x21, x12]\n"
- "zip1 v22.16b, v17.16b, v7.16b\n"
- "zip2 v7.16b, v17.16b, v7.16b\n"
- "ldr q19, [x20, x12]\n"
+ "ldr q10, [x26, x13]\n"
+ "ldr q14, [x25, x13]\n"
+ "ldr q15, [x24, x13]\n"
+ "ldr q20, [x23, x13]\n"
+ "ldr q16, [x22, x13]\n"
+ "ldr q28, [x21, x13]\n"
+ "ldr q4, [%x[params], #0x10]\n"
+ "ldr q6, [%x[params], #0x20]\n"
+ "zip2 v19.16b, v12.16b, v10.16b\n"
+ "zip1 v12.16b, v12.16b, v10.16b\n"
+ "ldr q10, [%x[params], #0x30]\n"
+ "ldp x27, x26, [%x[inptrs], #0x40]\n"
+ "zip1 v17.16b, v24.16b, v14.16b\n"
+ "zip2 v14.16b, v24.16b, v14.16b\n"
+ "ldp x25, x24, [%x[inptrs], #0x50]\n"
+ "ldp x23, x22, [%x[inptrs], #0x60]\n"
+ "zip2 v18.16b, v15.16b, v16.16b\n"
+ "zip1 v15.16b, v15.16b, v16.16b\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "zip1 v21.16b, v30.16b, v27.16b\n"
- "zip2 v27.16b, v30.16b, v27.16b\n"
- "ldr q30, [x21, x12]\n"
- "ldr q1, [x20, x12]\n"
- "zip2 v17.16b, v16.16b, v30.16b\n"
- "zip1 v16.16b, v16.16b, v30.16b\n"
- "zip1 v18.16b, v19.16b, v1.16b\n"
- "zip2 v1.16b, v19.16b, v1.16b\n"
- "ldp x14, x13, [%x[inptrs], #0x0]\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "zip2 v29.16b, v5.16b, v22.16b\n"
- "zip1 v5.16b, v5.16b, v22.16b\n"
- "zip1 v0.16b, v20.16b, v7.16b\n"
- "zip2 v7.16b, v20.16b, v7.16b\n"
+ "zip1 v16.16b, v20.16b, v28.16b\n"
+ "zip2 v28.16b, v20.16b, v28.16b\n"
+ "ldr q8, [x27, x13]\n"
+ "ldr q21, [x26, x13]\n"
+ "zip2 v23.16b, v12.16b, v17.16b\n"
+ "zip1 v12.16b, v12.16b, v17.16b\n"
+ "ldp x14, x27, [%x[inptrs], #0x0]\n"
+ "ldr q17, [x25, x13]\n"
+ "ldr q22, [x24, x13]\n"
+ "zip1 v30.16b, v19.16b, v14.16b\n"
+ "zip2 v14.16b, v19.16b, v14.16b\n"
+ "ldr q9, [x23, x13]\n"
+ "ldr q20, [x22, x13]\n"
+ "zip2 v5.16b, v15.16b, v16.16b\n"
+ "zip1 v15.16b, v15.16b, v16.16b\n"
+ "ldr q16, [x21, x13]\n"
+ "ldr q2, [x20, x13]\n"
+ "zip1 v7.16b, v18.16b, v28.16b\n"
+ "zip2 v28.16b, v18.16b, v28.16b\n"
+ "ldr q3, [%x[params], #0x0]\n"
+ "zip2 v19.16b, v8.16b, v17.16b\n"
+ "zip1 v8.16b, v8.16b, v17.16b\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
+ "zip1 v18.16b, v21.16b, v22.16b\n"
+ "zip2 v22.16b, v21.16b, v22.16b\n"
+ "ldp x24, x23, [%x[inptrs], #0x20]\n"
+ "ldp x22, x21, [%x[inptrs], #0x30]\n"
+ "zip2 v17.16b, v9.16b, v16.16b\n"
+ "zip1 v9.16b, v9.16b, v16.16b\n"
"add %x[params], %x[params], #0x40\n"
- "zip2 v30.16b, v16.16b, v18.16b\n"
- "zip1 v16.16b, v16.16b, v18.16b\n"
- "zip1 v2.16b, v17.16b, v1.16b\n"
- "zip2 v1.16b, v17.16b, v1.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v4.16b, v31.16b\n"
+ "zip1 v16.16b, v20.16b, v2.16b\n"
+ "zip2 v2.16b, v20.16b, v2.16b\n"
+ "zip2 v21.16b, v8.16b, v18.16b\n"
+ "zip1 v8.16b, v8.16b, v18.16b\n"
+ "zip1 v29.16b, v19.16b, v22.16b\n"
+ "zip2 v22.16b, v19.16b, v22.16b\n"
+ "zip2 v1.16b, v9.16b, v16.16b\n"
+ "zip1 v9.16b, v9.16b, v16.16b\n"
+ "zip1 v31.16b, v17.16b, v2.16b\n"
+ "zip2 v2.16b, v17.16b, v2.16b\n"
+ "mov v26.16b, v3.16b\n"
+ "mov v0.16b, v3.16b\n"
+ "mov v18.16b, v3.16b\n"
"beq 2f\n"
"1:" // Loop
- ".inst 0x4e8b971f // sdot v31.4s, v24.16b, v11.16b\n"
- ".inst 0x4e8d9712 // sdot v18.4s, v24.16b, v13.16b\n"
- "ext v11.16b, v11.16b, v11.16b, #0x1\n"
- "add x12, x12, #0x10\n"
- ".inst 0x4e8d953f // sdot v31.4s, v9.16b, v13.16b\n"
- "ext v13.16b, v13.16b, v13.16b, #0x1\n"
- ".inst 0x4e8b971a // sdot v26.4s, v24.16b, v11.16b\n"
- "ldr q17, [%x[params], #0x0]\n"
- ".inst 0x4e8d9704 // sdot v4.4s, v24.16b, v13.16b\n"
- ".inst 0x4e859532 // sdot v18.4s, v9.16b, v5.16b\n"
+ ".inst 0x4e8c9483 // sdot v3.4s, v4.16b, v12.16b\n"
+ ".inst 0x4e8f9480 // sdot v0.4s, v4.16b, v15.16b\n"
+ "ext v12.16b, v12.16b, v12.16b, #0x1\n"
+ "add x13, x13, #0x10\n"
"subs x15, x15, #0x1\n"
- ".inst 0x4e8594df // sdot v31.4s, v6.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x4e8d953a // sdot v26.4s, v9.16b, v13.16b\n"
+ ".inst 0x4e8c949a // sdot v26.4s, v4.16b, v12.16b\n"
+ "ldr q17, [%x[params], #0x0]\n"
+ ".inst 0x4e8f94c3 // sdot v3.4s, v6.16b, v15.16b\n"
+ "ext v15.16b, v15.16b, v15.16b, #0x1\n"
+ ".inst 0x4e8894c0 // sdot v0.4s, v6.16b, v8.16b\n"
+ ".inst 0x4e8f9492 // sdot v18.4s, v4.16b, v15.16b\n"
+ ".inst 0x4e889543 // sdot v3.4s, v10.16b, v8.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ ".inst 0x4e8f94da // sdot v26.4s, v6.16b, v15.16b\n"
"ldr q20, [%x[params], #0x10]\n"
- ".inst 0x4e859524 // sdot v4.4s, v9.16b, v5.16b\n"
- ".inst 0x4e9094d2 // sdot v18.4s, v6.16b, v16.16b\n"
- "ext v16.16b, v16.16b, v16.16b, #0x1\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e8594da // sdot v26.4s, v6.16b, v5.16b\n"
- ".inst 0x4e9094c4 // sdot v4.4s, v6.16b, v16.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
+ ".inst 0x4e8894d2 // sdot v18.4s, v6.16b, v8.16b\n"
+ ".inst 0x4e899540 // sdot v0.4s, v10.16b, v9.16b\n"
+ "ext v9.16b, v9.16b, v9.16b, #0x1\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ ".inst 0x4e88955a // sdot v26.4s, v10.16b, v8.16b\n"
+ ".inst 0x4e899552 // sdot v18.4s, v10.16b, v9.16b\n"
+ "and v16.16b, v3.16b, v20.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "sqrdmulh v4.4s, v4.4s, v17.4s\n"
- "ldr q5, [%x[params], #0x60]\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v19.16b, v26.16b, v20.16b\n"
- "and v17.16b, v18.16b, v20.16b\n"
- "and v16.16b, v4.16b, v20.16b\n"
+ "ldr q6, [%x[params], #0x60]\n"
+ "and v19.16b, v0.16b, v20.16b\n"
+ "sqadd v3.4s, v3.4s, v16.4s\n"
+ "and v17.16b, v26.16b, v20.16b\n"
+ "and v16.16b, v18.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "sqadd v26.4s, v26.4s, v19.4s\n"
- "ldr q13, [%x[params], #0x40]\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "ldr q17, [%x[params], #0x50]\n"
- "sqadd v4.4s, v4.4s, v16.4s\n"
+ "sqadd v0.4s, v0.4s, v19.4s\n"
+ "ldr q8, [%x[params], #0x50]\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "add v3.4s, v3.4s, v13.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
"ldr q16, [%x[params], #0x30]\n"
- "add v31.4s, v31.4s, v15.4s\n"
+ "srshl v0.4s, v0.4s, v20.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
"srshl v18.4s, v18.4s, v20.4s\n"
- "srshl v4.4s, v4.4s, v20.4s\n"
- "ldr q22, [%x[params], #0x70]\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "add v26.4s, v26.4s, v15.4s\n"
- "add v18.4s, v18.4s, v15.4s\n"
- "add v4.4s, v4.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v12.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v18.4s, v18.4s, v8.4s\n"
- "smax v4.4s, v4.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v12.4s\n"
- "smin v18.4s, v18.4s, v12.4s\n"
- "smin v4.4s, v4.4s, v12.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "ldr q12, [%x[params], #0x70]\n"
+ "add v0.4s, v0.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "add v18.4s, v18.4s, v13.4s\n"
+ "smax v0.4s, v0.4s, v27.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smax v18.4s, v18.4s, v27.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s31, [x25, x11]\n"
+ "str s3, [x11, x12]\n"
"ldr q24, [%x[params], #0x20]\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s26, [x24, x11]\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "str s18, [x23, x11]\n"
- "mov v26.16b, v24.16b\n"
- "str s4, [x22, x11]\n"
- "mov v25.16b, v24.16b\n"
- "mov v23.16b, v24.16b\n"
- ".inst 0x4e8a9618 // sdot v24.4s, v16.16b, v10.16b\n"
- ".inst 0x4e9c9619 // sdot v25.4s, v16.16b, v28.16b\n"
- ".inst 0x4e9c95b8 // sdot v24.4s, v13.16b, v28.16b\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- "add x11, x11, #0x4\n"
- "ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x4e8a961a // sdot v26.4s, v16.16b, v10.16b\n"
- "ldr q10, [x13, x12]\n"
- ".inst 0x4e9c9617 // sdot v23.4s, v16.16b, v28.16b\n"
- ".inst 0x4e9d95b9 // sdot v25.4s, v13.16b, v29.16b\n"
- ".inst 0x4e9d9638 // sdot v24.4s, v17.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x4e9c95ba // sdot v26.4s, v13.16b, v28.16b\n"
- "ldr q20, [x27, x12]\n"
- ".inst 0x4e9d95b7 // sdot v23.4s, v13.16b, v29.16b\n"
- "sqrdmulh v24.4s, v24.4s, v5.4s\n"
- ".inst 0x4e9e9639 // sdot v25.4s, v17.16b, v30.16b\n"
- "ext v30.16b, v30.16b, v30.16b, #0x1\n"
- ".inst 0x4e9d963a // sdot v26.4s, v17.16b, v29.16b\n"
- ".inst 0x4e9e9637 // sdot v23.4s, v17.16b, v30.16b\n"
- "and v16.16b, v24.16b, v22.16b\n"
+ "str s26, [x10, x12]\n"
+ "mov v15.16b, v24.16b\n"
+ "str s0, [x9, x12]\n"
+ "mov v20.16b, v24.16b\n"
+ "str s18, [x28, x12]\n"
+ "mov v4.16b, v24.16b\n"
+ ".inst 0x4e979618 // sdot v24.4s, v16.16b, v23.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e859614 // sdot v20.4s, v16.16b, v5.16b\n"
+ "ext v23.16b, v23.16b, v23.16b, #0x1\n"
+ ".inst 0x4e859638 // sdot v24.4s, v17.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x4e97960f // sdot v15.4s, v16.16b, v23.16b\n"
+ "ldr q3, [x27, x13]\n"
+ ".inst 0x4e859604 // sdot v4.4s, v16.16b, v5.16b\n"
+ ".inst 0x4e959634 // sdot v20.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e959518 // sdot v24.4s, v8.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x4e85962f // sdot v15.4s, v17.16b, v5.16b\n"
+ "ldr q19, [x23, x13]\n"
+ ".inst 0x4e959624 // sdot v4.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e819514 // sdot v20.4s, v8.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ "sqrdmulh v24.4s, v24.4s, v6.4s\n"
+ ".inst 0x4e95950f // sdot v15.4s, v8.16b, v21.16b\n"
+ ".inst 0x4e819504 // sdot v4.4s, v8.16b, v1.16b\n"
+ "and v16.16b, v24.16b, v12.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v6.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v5.4s\n"
- "sqrdmulh v25.4s, v25.4s, v5.4s\n"
- "sqrdmulh v23.4s, v23.4s, v5.4s\n"
- "ldr q19, [%x[params], #0xc0]\n"
+ "sqrdmulh v15.4s, v15.4s, v6.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v6.4s\n"
+ "ldr q25, [%x[params], #0xc0]\n"
+ "and v18.16b, v20.16b, v12.16b\n"
"sqadd v24.4s, v24.4s, v16.4s\n"
- "and v18.16b, v26.16b, v22.16b\n"
- "and v17.16b, v25.16b, v22.16b\n"
- "and v16.16b, v23.16b, v22.16b\n"
+ "and v17.16b, v15.16b, v12.16b\n"
+ "and v16.16b, v4.16b, v12.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v12.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v24.4s, v24.4s, v22.4s\n"
- "sqadd v26.4s, v26.4s, v18.4s\n"
- "ldr q18, [%x[params], #0xa0]\n"
- "sqadd v25.4s, v25.4s, v17.4s\n"
- "ldr q17, [%x[params], #0xb0]\n"
- "sqadd v23.4s, v23.4s, v16.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "ldr q18, [%x[params], #0xb0]\n"
+ "sqadd v15.4s, v15.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0xa0]\n"
+ "add v24.4s, v24.4s, v13.4s\n"
+ "sqadd v4.4s, v4.4s, v16.4s\n"
"ldr q16, [%x[params], #0x90]\n"
- "add v24.4s, v24.4s, v15.4s\n"
- "srshl v26.4s, v26.4s, v22.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "srshl v23.4s, v23.4s, v22.4s\n"
- "ldr q22, [%x[params], #0xd0]\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "add v26.4s, v26.4s, v15.4s\n"
- "add v25.4s, v25.4s, v15.4s\n"
- "add v23.4s, v23.4s, v15.4s\n"
- "smin v24.4s, v24.4s, v12.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v12.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smin v23.4s, v23.4s, v12.4s\n"
+ "srshl v20.4s, v20.4s, v12.4s\n"
+ "srshl v15.4s, v15.4s, v12.4s\n"
+ "smax v24.4s, v24.4s, v27.4s\n"
+ "srshl v4.4s, v4.4s, v12.4s\n"
+ "ldr q8, [%x[params], #0xd0]\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "add v15.4s, v15.4s, v13.4s\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
+ "add v4.4s, v4.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v27.4s\n"
+ "smax v15.4s, v15.4s, v27.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
+ "smax v4.4s, v4.4s, v27.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v15.4s, v15.4s, v11.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x25, x11]\n"
+ "smin v4.4s, v4.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
+ "str s24, [x11, x12]\n"
"ldr q24, [%x[params], #0x80]\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s26, [x24, x11]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str s25, [x23, x11]\n"
- "str s23, [x22, x11]\n"
- "mov v23.16b, v24.16b\n"
- "mov v31.16b, v24.16b\n"
- ".inst 0x4e95961f // sdot v31.4s, v16.16b, v21.16b\n"
- "mov v13.16b, v24.16b\n"
- ".inst 0x4e839618 // sdot v24.4s, v16.16b, v3.16b\n"
- ".inst 0x4e959658 // sdot v24.4s, v18.16b, v21.16b\n"
- "add x11, x11, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x4e839617 // sdot v23.4s, v16.16b, v3.16b\n"
- "ldr q3, [x10, x12]\n"
- ".inst 0x4e95960d // sdot v13.4s, v16.16b, v21.16b\n"
- ".inst 0x4e80965f // sdot v31.4s, v18.16b, v0.16b\n"
- ".inst 0x4e809638 // sdot v24.4s, v17.16b, v0.16b\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e959657 // sdot v23.4s, v18.16b, v21.16b\n"
- "ldr q4, [x26, x12]\n"
- ".inst 0x4e80964d // sdot v13.4s, v18.16b, v0.16b\n"
- ".inst 0x4e82963f // sdot v31.4s, v17.16b, v2.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- "sqrdmulh v24.4s, v24.4s, v19.4s\n"
- ".inst 0x4e809637 // sdot v23.4s, v17.16b, v0.16b\n"
- ".inst 0x4e82962d // sdot v13.4s, v17.16b, v2.16b\n"
- "and v16.16b, v24.16b, v22.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v15.16b, v15.16b, v15.16b\n"
+ "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "str s15, [x10, x12]\n"
+ "mov v21.16b, v24.16b\n"
+ "str s20, [x9, x12]\n"
+ "mov v20.16b, v24.16b\n"
+ "str s4, [x28, x12]\n"
+ "mov v12.16b, v24.16b\n"
+ ".inst 0x4e9e9618 // sdot v24.4s, v16.16b, v30.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e879614 // sdot v20.4s, v16.16b, v7.16b\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x1\n"
+ ".inst 0x4e879638 // sdot v24.4s, v17.16b, v7.16b\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ ".inst 0x4e9e9615 // sdot v21.4s, v16.16b, v30.16b\n"
+ "ldr q30, [x26, x13]\n"
+ ".inst 0x4e87960c // sdot v12.4s, v16.16b, v7.16b\n"
+ ".inst 0x4e9d9634 // sdot v20.4s, v17.16b, v29.16b\n"
+ ".inst 0x4e9d9658 // sdot v24.4s, v18.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x4e879635 // sdot v21.4s, v17.16b, v7.16b\n"
+ "ldr q1, [x22, x13]\n"
+ ".inst 0x4e9d962c // sdot v12.4s, v17.16b, v29.16b\n"
+ ".inst 0x4e9f9654 // sdot v20.4s, v18.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ "sqrdmulh v24.4s, v24.4s, v25.4s\n"
+ ".inst 0x4e9d9655 // sdot v21.4s, v18.16b, v29.16b\n"
+ ".inst 0x4e9f964c // sdot v12.4s, v18.16b, v31.16b\n"
+ "and v16.16b, v24.16b, v8.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v25.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v23.4s, v23.4s, v19.4s\n"
- "sqrdmulh v31.4s, v31.4s, v19.4s\n"
- "sqrdmulh v13.4s, v13.4s, v19.4s\n"
- "ldr q19, [%x[params], #0x120]\n"
+ "sqrdmulh v21.4s, v21.4s, v25.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v25.4s\n"
+ "ldr q29, [%x[params], #0x120]\n"
+ "and v18.16b, v20.16b, v8.16b\n"
"sqadd v24.4s, v24.4s, v16.4s\n"
- "and v18.16b, v23.16b, v22.16b\n"
- "and v17.16b, v31.16b, v22.16b\n"
- "and v16.16b, v13.16b, v22.16b\n"
+ "and v17.16b, v21.16b, v8.16b\n"
+ "and v16.16b, v12.16b, v8.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v8.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v24.4s, v24.4s, v22.4s\n"
- "sqadd v23.4s, v23.4s, v18.4s\n"
- "ldr q18, [%x[params], #0x100]\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "ldr q17, [%x[params], #0x110]\n"
- "sqadd v13.4s, v13.4s, v16.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "ldr q18, [%x[params], #0x110]\n"
+ "sqadd v21.4s, v21.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x100]\n"
+ "add v24.4s, v24.4s, v13.4s\n"
+ "sqadd v12.4s, v12.4s, v16.4s\n"
"ldr q16, [%x[params], #0xf0]\n"
- "add v24.4s, v24.4s, v15.4s\n"
- "srshl v23.4s, v23.4s, v22.4s\n"
- "srshl v31.4s, v31.4s, v22.4s\n"
- "srshl v13.4s, v13.4s, v22.4s\n"
- "ldr q22, [%x[params], #0x130]\n"
- "smax v24.4s, v24.4s, v8.4s\n"
- "add v23.4s, v23.4s, v15.4s\n"
- "add v31.4s, v31.4s, v15.4s\n"
- "add v13.4s, v13.4s, v15.4s\n"
- "smin v24.4s, v24.4s, v12.4s\n"
- "smax v23.4s, v23.4s, v8.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smax v13.4s, v13.4s, v8.4s\n"
- "smin v23.4s, v23.4s, v12.4s\n"
- "smin v31.4s, v31.4s, v12.4s\n"
- "smin v13.4s, v13.4s, v12.4s\n"
+ "srshl v20.4s, v20.4s, v8.4s\n"
+ "srshl v21.4s, v21.4s, v8.4s\n"
+ "smax v24.4s, v24.4s, v27.4s\n"
+ "srshl v12.4s, v12.4s, v8.4s\n"
+ "ldr q23, [%x[params], #0x130]\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "add v21.4s, v21.4s, v13.4s\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
+ "add v12.4s, v12.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v27.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
+ "smax v12.4s, v12.4s, v27.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str s24, [x25, x11]\n"
- "ldr q2, [%x[params], #0xe0]\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s23, [x24, x11]\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "str s31, [x23, x11]\n"
- "mov v25.16b, v2.16b\n"
- "str s13, [x22, x11]\n"
- "mov v21.16b, v2.16b\n"
- "mov v30.16b, v2.16b\n"
- ".inst 0x4e8e9602 // sdot v2.4s, v16.16b, v14.16b\n"
- ".inst 0x4e9b9615 // sdot v21.4s, v16.16b, v27.16b\n"
- ".inst 0x4e9b9642 // sdot v2.4s, v18.16b, v27.16b\n"
+ "smin v12.4s, v12.4s, v11.4s\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s24, [x11, x12]\n"
+ "ldr q26, [%x[params], #0xe0]\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
+ "str s21, [x10, x12]\n"
+ "mov v5.16b, v26.16b\n"
+ "str s20, [x9, x12]\n"
+ "mov v21.16b, v26.16b\n"
+ "str s12, [x28, x12]\n"
+ "mov v0.16b, v26.16b\n"
+ ".inst 0x4e8e961a // sdot v26.4s, v16.16b, v14.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e9c9615 // sdot v21.4s, v16.16b, v28.16b\n"
"ext v14.16b, v14.16b, v14.16b, #0x1\n"
- "add x11, x11, #0x4\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e8e9619 // sdot v25.4s, v16.16b, v14.16b\n"
- "ldr q14, [x9, x12]\n"
- ".inst 0x4e9b961e // sdot v30.4s, v16.16b, v27.16b\n"
- ".inst 0x4e879655 // sdot v21.4s, v18.16b, v7.16b\n"
- ".inst 0x4e879622 // sdot v2.4s, v17.16b, v7.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- ".inst 0x4e9b9659 // sdot v25.4s, v18.16b, v27.16b\n"
- "ldr q27, [x21, x12]\n"
- ".inst 0x4e87965e // sdot v30.4s, v18.16b, v7.16b\n"
- "sqrdmulh v2.4s, v2.4s, v19.4s\n"
- ".inst 0x4e819635 // sdot v21.4s, v17.16b, v1.16b\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- ".inst 0x4e879639 // sdot v25.4s, v17.16b, v7.16b\n"
- ".inst 0x4e81963e // sdot v30.4s, v17.16b, v1.16b\n"
- "and v16.16b, v2.16b, v22.16b\n"
+ ".inst 0x4e9c963a // sdot v26.4s, v17.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e8e9605 // sdot v5.4s, v16.16b, v14.16b\n"
+ "ldr q14, [x25, x13]\n"
+ ".inst 0x4e9c9600 // sdot v0.4s, v16.16b, v28.16b\n"
+ ".inst 0x4e969635 // sdot v21.4s, v17.16b, v22.16b\n"
+ ".inst 0x4e96965a // sdot v26.4s, v18.16b, v22.16b\n"
+ "ext v22.16b, v22.16b, v22.16b, #0x1\n"
+ ".inst 0x4e9c9625 // sdot v5.4s, v17.16b, v28.16b\n"
+ "ldr q28, [x21, x13]\n"
+ ".inst 0x4e969620 // sdot v0.4s, v17.16b, v22.16b\n"
+ ".inst 0x4e829655 // sdot v21.4s, v18.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ "sqrdmulh v26.4s, v26.4s, v29.4s\n"
+ ".inst 0x4e969645 // sdot v5.4s, v18.16b, v22.16b\n"
+ ".inst 0x4e829640 // sdot v0.4s, v18.16b, v2.16b\n"
+ "and v16.16b, v26.16b, v23.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v29.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v25.4s, v25.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqrdmulh v30.4s, v30.4s, v19.4s\n"
- "ldr q11, [x14, x12]\n"
- "ldp x21, x20, [%x[inptrs], #0x40]\n"
- "ldr q5, [x21, x12]\n"
- "ldr q29, [x20, x12]\n"
- "sqadd v2.4s, v2.4s, v16.4s\n"
- "and v19.16b, v25.16b, v22.16b\n"
- "and v17.16b, v21.16b, v22.16b\n"
- "and v16.16b, v30.16b, v22.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v29.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v29.4s\n"
+ "ldr q12, [x14, x13]\n"
+ "ldp x23, x22, [%x[inptrs], #0x40]\n"
+ "and v20.16b, v21.16b, v23.16b\n"
"ldp x21, x20, [%x[inptrs], #0x50]\n"
- "ldr q26, [x21, x12]\n"
- "ldr q7, [x20, x12]\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "and v17.16b, v5.16b, v23.16b\n"
+ "ldr q8, [x23, x13]\n"
+ "ldr q29, [x22, x13]\n"
+ "and v16.16b, v0.16b, v23.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "ldr q18, [x21, x13]\n"
+ "ldr q22, [x20, x13]\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v26.4s, v26.4s, v23.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v2.4s, v2.4s, v22.4s\n"
- "sqadd v25.4s, v25.4s, v19.4s\n"
- "ldr q9, [%x[params], #0x160]\n"
- "sqadd v21.4s, v21.4s, v17.4s\n"
- "ldr q6, [%x[params], #0x170]\n"
- "sqadd v30.4s, v30.4s, v16.4s\n"
- "ldr q24, [%x[params], #0x150]\n"
- "add v2.4s, v2.4s, v15.4s\n"
- "srshl v25.4s, v25.4s, v22.4s\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
- "srshl v30.4s, v30.4s, v22.4s\n"
- "ldr q13, [x28, x12]\n"
- "smax v2.4s, v2.4s, v8.4s\n"
- "ldp x21, x20, [%x[inptrs], #0x60]\n"
- "ldr q16, [x21, x12]\n"
- "ldr q28, [x20, x12]\n"
- "add v25.4s, v25.4s, v15.4s\n"
- "add v21.4s, v21.4s, v15.4s\n"
- "add v30.4s, v30.4s, v15.4s\n"
- "smin v2.4s, v2.4s, v12.4s\n"
+ "sqadd v21.4s, v21.4s, v20.4s\n"
+ "ldr q10, [%x[params], #0x170]\n"
+ "sqadd v5.4s, v5.4s, v17.4s\n"
+ "ldr q6, [%x[params], #0x160]\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "sqadd v0.4s, v0.4s, v16.4s\n"
+ "ldr q4, [%x[params], #0x150]\n"
+ "srshl v21.4s, v21.4s, v23.4s\n"
+ "srshl v5.4s, v5.4s, v23.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "srshl v0.4s, v0.4s, v23.4s\n"
+ "ldr q15, [x24, x13]\n"
+ "ldp x23, x22, [%x[inptrs], #0x60]\n"
+ "add v21.4s, v21.4s, v13.4s\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "ldr q23, [x21, x12]\n"
- "ldr q1, [x20, x12]\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "smax v21.4s, v21.4s, v8.4s\n"
- "ldp x14, x13, [%x[inptrs], #0x0]\n"
- "smax v30.4s, v30.4s, v8.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "ldp x10, x9, [%x[inptrs], #0x10]\n"
- "ldp x28, x27, [%x[inptrs], #0x20]\n"
- "smin v21.4s, v21.4s, v12.4s\n"
- "smin v30.4s, v30.4s, v12.4s\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v2.16b, v2.16b, v2.16b\n"
- "str s2, [x25, x11]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
+ "ldp x14, x27, [%x[inptrs], #0x0]\n"
+ "add v5.4s, v5.4s, v13.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
+ "ldr q9, [x23, x13]\n"
+ "ldr q24, [x22, x13]\n"
+ "add v0.4s, v0.4s, v13.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
+ "ldp x24, x23, [%x[inptrs], #0x20]\n"
+ "ldr q25, [x21, x13]\n"
+ "ldr q2, [x20, x13]\n"
+ "smax v5.4s, v5.4s, v27.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "ldp x22, x21, [%x[inptrs], #0x30]\n"
+ "smax v0.4s, v0.4s, v27.4s\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "smin v5.4s, v5.4s, v11.4s\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "zip2 v18.16b, v11.16b, v3.16b\n"
- "zip1 v11.16b, v11.16b, v3.16b\n"
- "zip1 v17.16b, v10.16b, v14.16b\n"
- "zip2 v14.16b, v10.16b, v14.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s25, [x24, x11]\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "str s26, [x11, x12]\n"
+ "zip2 v17.16b, v12.16b, v30.16b\n"
+ "zip1 v12.16b, v12.16b, v30.16b\n"
+ "zip1 v16.16b, v3.16b, v14.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "zip2 v14.16b, v3.16b, v14.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s21, [x23, x11]\n"
- "str s30, [x22, x11]\n"
- "zip2 v10.16b, v11.16b, v17.16b\n"
- "zip1 v11.16b, v11.16b, v17.16b\n"
- "add x11, x11, #0x4\n"
- "zip1 v3.16b, v18.16b, v14.16b\n"
- "zip2 v14.16b, v18.16b, v14.16b\n"
- "ldr q31, [%x[params], #0x140]\n"
+ "zip2 v23.16b, v12.16b, v16.16b\n"
+ "zip1 v12.16b, v12.16b, v16.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "zip1 v30.16b, v17.16b, v14.16b\n"
+ "str s5, [x10, x12]\n"
+ "str s21, [x9, x12]\n"
+ "zip2 v14.16b, v17.16b, v14.16b\n"
+ "ldr q3, [%x[params], #0x140]\n"
+ "zip2 v21.16b, v15.16b, v1.16b\n"
+ "zip1 v15.16b, v15.16b, v1.16b\n"
+ "zip1 v20.16b, v19.16b, v28.16b\n"
"add %x[params], %x[params], #0x180\n"
- "zip2 v22.16b, v13.16b, v4.16b\n"
- "zip1 v13.16b, v13.16b, v4.16b\n"
- "zip1 v2.16b, v20.16b, v27.16b\n"
- "zip2 v27.16b, v20.16b, v27.16b\n"
- "zip2 v19.16b, v5.16b, v26.16b\n"
- "zip1 v5.16b, v5.16b, v26.16b\n"
- "zip1 v18.16b, v29.16b, v7.16b\n"
- "zip2 v7.16b, v29.16b, v7.16b\n"
- "zip2 v4.16b, v16.16b, v23.16b\n"
- "zip1 v16.16b, v16.16b, v23.16b\n"
- "zip1 v17.16b, v28.16b, v1.16b\n"
- "zip2 v1.16b, v28.16b, v1.16b\n"
- "zip2 v28.16b, v13.16b, v2.16b\n"
- "zip1 v13.16b, v13.16b, v2.16b\n"
- "zip1 v21.16b, v22.16b, v27.16b\n"
- "zip2 v27.16b, v22.16b, v27.16b\n"
- "zip2 v29.16b, v5.16b, v18.16b\n"
- "zip1 v5.16b, v5.16b, v18.16b\n"
- "zip1 v0.16b, v19.16b, v7.16b\n"
- "zip2 v7.16b, v19.16b, v7.16b\n"
- "zip2 v30.16b, v16.16b, v17.16b\n"
- "zip1 v16.16b, v16.16b, v17.16b\n"
- "zip1 v2.16b, v4.16b, v1.16b\n"
- "zip2 v1.16b, v4.16b, v1.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "mov v4.16b, v31.16b\n"
+ "str s0, [x28, x12]\n"
+ "zip2 v28.16b, v19.16b, v28.16b\n"
+ "zip2 v19.16b, v8.16b, v18.16b\n"
+ "add x12, x12, #0x4\n"
+ "zip1 v8.16b, v8.16b, v18.16b\n"
+ "zip1 v18.16b, v29.16b, v22.16b\n"
+ "zip2 v22.16b, v29.16b, v22.16b\n"
+ "zip2 v17.16b, v9.16b, v25.16b\n"
+ "zip1 v9.16b, v9.16b, v25.16b\n"
+ "zip1 v16.16b, v24.16b, v2.16b\n"
+ "zip2 v2.16b, v24.16b, v2.16b\n"
+ "zip2 v5.16b, v15.16b, v20.16b\n"
+ "zip1 v15.16b, v15.16b, v20.16b\n"
+ "zip1 v7.16b, v21.16b, v28.16b\n"
+ "zip2 v28.16b, v21.16b, v28.16b\n"
+ "zip2 v21.16b, v8.16b, v18.16b\n"
+ "zip1 v8.16b, v8.16b, v18.16b\n"
+ "zip1 v29.16b, v19.16b, v22.16b\n"
+ "zip2 v22.16b, v19.16b, v22.16b\n"
+ "zip2 v1.16b, v9.16b, v16.16b\n"
+ "zip1 v9.16b, v9.16b, v16.16b\n"
+ "zip1 v31.16b, v17.16b, v2.16b\n"
+ "zip2 v2.16b, v17.16b, v2.16b\n"
+ "mov v26.16b, v3.16b\n"
+ "mov v0.16b, v3.16b\n"
+ "mov v18.16b, v3.16b\n"
"bgt 1b\n"
"2:" // Detached iteration
- ".inst 0x4e8b971f // sdot v31.4s, v24.16b, v11.16b\n"
- ".inst 0x4e8d9712 // sdot v18.4s, v24.16b, v13.16b\n"
- "ext v11.16b, v11.16b, v11.16b, #0x1\n"
+ ".inst 0x4e8c9483 // sdot v3.4s, v4.16b, v12.16b\n"
+ ".inst 0x4e8f9480 // sdot v0.4s, v4.16b, v15.16b\n"
+ "ext v12.16b, v12.16b, v12.16b, #0x1\n"
"tst %x[n_channels], #0xf\n"
- ".inst 0x4e8d953f // sdot v31.4s, v9.16b, v13.16b\n"
- "ext v13.16b, v13.16b, v13.16b, #0x1\n"
- ".inst 0x4e8b971a // sdot v26.4s, v24.16b, v11.16b\n"
+ "add x13, x13, #0x10\n"
+ ".inst 0x4e8c949a // sdot v26.4s, v4.16b, v12.16b\n"
"ldr q17, [%x[params], #0x0]\n"
- ".inst 0x4e8d9704 // sdot v4.4s, v24.16b, v13.16b\n"
- ".inst 0x4e859532 // sdot v18.4s, v9.16b, v5.16b\n"
- "add x12, x12, #0x10\n"
- ".inst 0x4e8594df // sdot v31.4s, v6.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x4e8d953a // sdot v26.4s, v9.16b, v13.16b\n"
+ ".inst 0x4e8f94c3 // sdot v3.4s, v6.16b, v15.16b\n"
+ "ext v15.16b, v15.16b, v15.16b, #0x1\n"
+ ".inst 0x4e8894c0 // sdot v0.4s, v6.16b, v8.16b\n"
+ ".inst 0x4e8f9492 // sdot v18.4s, v4.16b, v15.16b\n"
+ ".inst 0x4e889543 // sdot v3.4s, v10.16b, v8.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ ".inst 0x4e8f94da // sdot v26.4s, v6.16b, v15.16b\n"
"ldr q19, [%x[params], #0x10]\n"
- ".inst 0x4e859524 // sdot v4.4s, v9.16b, v5.16b\n"
- ".inst 0x4e9094d2 // sdot v18.4s, v6.16b, v16.16b\n"
- "ext v16.16b, v16.16b, v16.16b, #0x1\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e8594da // sdot v26.4s, v6.16b, v5.16b\n"
- ".inst 0x4e9094c4 // sdot v4.4s, v6.16b, v16.16b\n"
- "and v16.16b, v31.16b, v19.16b\n"
+ ".inst 0x4e8894d2 // sdot v18.4s, v6.16b, v8.16b\n"
+ ".inst 0x4e899540 // sdot v0.4s, v10.16b, v9.16b\n"
+ "ext v9.16b, v9.16b, v9.16b, #0x1\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ ".inst 0x4e88955a // sdot v26.4s, v10.16b, v8.16b\n"
+ ".inst 0x4e899552 // sdot v18.4s, v10.16b, v9.16b\n"
+ "and v16.16b, v3.16b, v19.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "sqrdmulh v4.4s, v4.4s, v17.4s\n"
- "ldr q24, [%x[params], #0x60]\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v20.16b, v26.16b, v19.16b\n"
- "and v17.16b, v18.16b, v19.16b\n"
- "and v16.16b, v4.16b, v19.16b\n"
+ "ldr q4, [%x[params], #0x60]\n"
+ "and v20.16b, v0.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v16.4s\n"
+ "and v17.16b, v26.16b, v19.16b\n"
+ "and v16.16b, v18.16b, v19.16b\n"
"sshr v20.4s, v20.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v19.4s\n"
- "sqadd v26.4s, v26.4s, v20.4s\n"
- "ldr q5, [%x[params], #0x40]\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "ldr q17, [%x[params], #0x50]\n"
- "sqadd v4.4s, v4.4s, v16.4s\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "ldr q6, [%x[params], #0x50]\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "add v3.4s, v3.4s, v13.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
"ldr q16, [%x[params], #0x30]\n"
- "add v31.4s, v31.4s, v15.4s\n"
+ "srshl v0.4s, v0.4s, v19.4s\n"
"srshl v26.4s, v26.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
"srshl v18.4s, v18.4s, v19.4s\n"
- "srshl v4.4s, v4.4s, v19.4s\n"
- "ldr q23, [%x[params], #0x70]\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "add v26.4s, v26.4s, v15.4s\n"
- "add v18.4s, v18.4s, v15.4s\n"
- "add v4.4s, v4.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v12.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v18.4s, v18.4s, v8.4s\n"
- "smax v4.4s, v4.4s, v8.4s\n"
- "smin v26.4s, v26.4s, v12.4s\n"
- "smin v18.4s, v18.4s, v12.4s\n"
- "smin v4.4s, v4.4s, v12.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "ldr q19, [%x[params], #0x70]\n"
+ "add v0.4s, v0.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "add v18.4s, v18.4s, v13.4s\n"
+ "smax v0.4s, v0.4s, v27.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smax v18.4s, v18.4s, v27.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s31, [x25, x11]\n"
+ "str s3, [x11, x12]\n"
"ldr q25, [%x[params], #0x20]\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s26, [x24, x11]\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "str s18, [x23, x11]\n"
- "mov v22.16b, v25.16b\n"
- "str s4, [x22, x11]\n"
+ "str s26, [x10, x12]\n"
+ "mov v24.16b, v25.16b\n"
+ "str s0, [x9, x12]\n"
"mov v20.16b, v25.16b\n"
- "mov v19.16b, v25.16b\n"
- ".inst 0x4e8a9619 // sdot v25.4s, v16.16b, v10.16b\n"
- ".inst 0x4e9c9614 // sdot v20.4s, v16.16b, v28.16b\n"
- ".inst 0x4e9c94b9 // sdot v25.4s, v5.16b, v28.16b\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- "add x11, x11, #0x4\n"
- "ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x4e8a9616 // sdot v22.4s, v16.16b, v10.16b\n"
- ".inst 0x4e9c9613 // sdot v19.4s, v16.16b, v28.16b\n"
- ".inst 0x4e9d94b4 // sdot v20.4s, v5.16b, v29.16b\n"
- ".inst 0x4e9d9639 // sdot v25.4s, v17.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x4e9c94b6 // sdot v22.4s, v5.16b, v28.16b\n"
- ".inst 0x4e9d94b3 // sdot v19.4s, v5.16b, v29.16b\n"
- "sqrdmulh v25.4s, v25.4s, v24.4s\n"
- ".inst 0x4e9e9634 // sdot v20.4s, v17.16b, v30.16b\n"
- "ext v30.16b, v30.16b, v30.16b, #0x1\n"
- ".inst 0x4e9d9636 // sdot v22.4s, v17.16b, v29.16b\n"
- ".inst 0x4e9e9633 // sdot v19.4s, v17.16b, v30.16b\n"
- "and v16.16b, v25.16b, v23.16b\n"
+ "str s18, [x28, x12]\n"
+ "mov v3.16b, v25.16b\n"
+ ".inst 0x4e979619 // sdot v25.4s, v16.16b, v23.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e859614 // sdot v20.4s, v16.16b, v5.16b\n"
+ "ext v23.16b, v23.16b, v23.16b, #0x1\n"
+ ".inst 0x4e859639 // sdot v25.4s, v17.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x4e979618 // sdot v24.4s, v16.16b, v23.16b\n"
+ ".inst 0x4e859603 // sdot v3.4s, v16.16b, v5.16b\n"
+ ".inst 0x4e959634 // sdot v20.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e9594d9 // sdot v25.4s, v6.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x4e859638 // sdot v24.4s, v17.16b, v5.16b\n"
+ ".inst 0x4e959623 // sdot v3.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e8194d4 // sdot v20.4s, v6.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ "sqrdmulh v25.4s, v25.4s, v4.4s\n"
+ ".inst 0x4e9594d8 // sdot v24.4s, v6.16b, v21.16b\n"
+ ".inst 0x4e8194c3 // sdot v3.4s, v6.16b, v1.16b\n"
+ "and v16.16b, v25.16b, v19.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v4.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v22.4s, v22.4s, v24.4s\n"
- "sqrdmulh v20.4s, v20.4s, v24.4s\n"
- "sqrdmulh v19.4s, v19.4s, v24.4s\n"
- "ldr q24, [%x[params], #0xc0]\n"
+ "sqrdmulh v24.4s, v24.4s, v4.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v4.4s\n"
+ "ldr q23, [%x[params], #0xc0]\n"
+ "and v18.16b, v20.16b, v19.16b\n"
"sqadd v25.4s, v25.4s, v16.4s\n"
- "and v18.16b, v22.16b, v23.16b\n"
- "and v17.16b, v20.16b, v23.16b\n"
- "and v16.16b, v19.16b, v23.16b\n"
+ "and v17.16b, v24.16b, v19.16b\n"
+ "and v16.16b, v3.16b, v19.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v25.4s, v25.4s, v19.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v25.4s, v25.4s, v23.4s\n"
- "sqadd v22.4s, v22.4s, v18.4s\n"
- "ldr q18, [%x[params], #0xa0]\n"
- "sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q17, [%x[params], #0xb0]\n"
- "sqadd v19.4s, v19.4s, v16.4s\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "ldr q18, [%x[params], #0xb0]\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0xa0]\n"
+ "add v25.4s, v25.4s, v13.4s\n"
+ "sqadd v3.4s, v3.4s, v16.4s\n"
"ldr q16, [%x[params], #0x90]\n"
- "add v25.4s, v25.4s, v15.4s\n"
- "srshl v22.4s, v22.4s, v23.4s\n"
- "srshl v20.4s, v20.4s, v23.4s\n"
- "srshl v19.4s, v19.4s, v23.4s\n"
- "ldr q23, [%x[params], #0xd0]\n"
- "smax v25.4s, v25.4s, v8.4s\n"
- "add v22.4s, v22.4s, v15.4s\n"
- "add v20.4s, v20.4s, v15.4s\n"
- "add v19.4s, v19.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v12.4s\n"
- "smax v22.4s, v22.4s, v8.4s\n"
- "smax v20.4s, v20.4s, v8.4s\n"
- "smax v19.4s, v19.4s, v8.4s\n"
- "smin v22.4s, v22.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "smin v19.4s, v19.4s, v12.4s\n"
+ "srshl v20.4s, v20.4s, v19.4s\n"
+ "srshl v24.4s, v24.4s, v19.4s\n"
+ "smax v25.4s, v25.4s, v27.4s\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "ldr q5, [%x[params], #0xd0]\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "add v24.4s, v24.4s, v13.4s\n"
+ "smin v25.4s, v25.4s, v11.4s\n"
+ "add v3.4s, v3.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v27.4s\n"
+ "smax v24.4s, v24.4s, v27.4s\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v24.4s, v24.4s, v11.4s\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s25, [x25, x11]\n"
- "ldr q10, [%x[params], #0x80]\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s22, [x24, x11]\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "str s25, [x11, x12]\n"
+ "ldr q21, [%x[params], #0x80]\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
- "str s20, [x23, x11]\n"
- "str s19, [x22, x11]\n"
- "mov v28.16b, v10.16b\n"
- "mov v20.16b, v10.16b\n"
- ".inst 0x4e959614 // sdot v20.4s, v16.16b, v21.16b\n"
- "mov v19.16b, v10.16b\n"
- ".inst 0x4e83960a // sdot v10.4s, v16.16b, v3.16b\n"
- ".inst 0x4e95964a // sdot v10.4s, v18.16b, v21.16b\n"
- "add x11, x11, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x4e83961c // sdot v28.4s, v16.16b, v3.16b\n"
- ".inst 0x4e959613 // sdot v19.4s, v16.16b, v21.16b\n"
- ".inst 0x4e809654 // sdot v20.4s, v18.16b, v0.16b\n"
- ".inst 0x4e80962a // sdot v10.4s, v17.16b, v0.16b\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e95965c // sdot v28.4s, v18.16b, v21.16b\n"
- ".inst 0x4e809653 // sdot v19.4s, v18.16b, v0.16b\n"
- ".inst 0x4e829634 // sdot v20.4s, v17.16b, v2.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- "sqrdmulh v10.4s, v10.4s, v24.4s\n"
- ".inst 0x4e80963c // sdot v28.4s, v17.16b, v0.16b\n"
- ".inst 0x4e829633 // sdot v19.4s, v17.16b, v2.16b\n"
- "and v16.16b, v10.16b, v23.16b\n"
+ "uzp1 v24.16b, v24.16b, v24.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str s24, [x10, x12]\n"
+ "mov v26.16b, v21.16b\n"
+ "str s20, [x9, x12]\n"
+ "mov v20.16b, v21.16b\n"
+ "str s3, [x28, x12]\n"
+ "mov v19.16b, v21.16b\n"
+ ".inst 0x4e9e9615 // sdot v21.4s, v16.16b, v30.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e879614 // sdot v20.4s, v16.16b, v7.16b\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x1\n"
+ ".inst 0x4e879635 // sdot v21.4s, v17.16b, v7.16b\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ ".inst 0x4e9e961a // sdot v26.4s, v16.16b, v30.16b\n"
+ ".inst 0x4e879613 // sdot v19.4s, v16.16b, v7.16b\n"
+ ".inst 0x4e9d9634 // sdot v20.4s, v17.16b, v29.16b\n"
+ ".inst 0x4e9d9655 // sdot v21.4s, v18.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x4e87963a // sdot v26.4s, v17.16b, v7.16b\n"
+ ".inst 0x4e9d9633 // sdot v19.4s, v17.16b, v29.16b\n"
+ ".inst 0x4e9f9654 // sdot v20.4s, v18.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ "sqrdmulh v21.4s, v21.4s, v23.4s\n"
+ ".inst 0x4e9d965a // sdot v26.4s, v18.16b, v29.16b\n"
+ ".inst 0x4e9f9653 // sdot v19.4s, v18.16b, v31.16b\n"
+ "and v16.16b, v21.16b, v5.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v24.4s\n"
- "sqrdmulh v20.4s, v20.4s, v24.4s\n"
- "sqrdmulh v19.4s, v19.4s, v24.4s\n"
- "ldr q24, [%x[params], #0x120]\n"
- "sqadd v10.4s, v10.4s, v16.4s\n"
- "and v18.16b, v28.16b, v23.16b\n"
- "and v17.16b, v20.16b, v23.16b\n"
- "and v16.16b, v19.16b, v23.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v23.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "ldr q25, [%x[params], #0x120]\n"
+ "and v18.16b, v20.16b, v5.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "and v17.16b, v26.16b, v5.16b\n"
+ "and v16.16b, v19.16b, v5.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v10.4s, v10.4s, v23.4s\n"
- "sqadd v28.4s, v28.4s, v18.4s\n"
- "ldr q18, [%x[params], #0x100]\n"
- "sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q17, [%x[params], #0x110]\n"
+ "sqadd v20.4s, v20.4s, v18.4s\n"
+ "ldr q18, [%x[params], #0x110]\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x100]\n"
+ "add v21.4s, v21.4s, v13.4s\n"
"sqadd v19.4s, v19.4s, v16.4s\n"
"ldr q16, [%x[params], #0xf0]\n"
- "add v10.4s, v10.4s, v15.4s\n"
- "srshl v28.4s, v28.4s, v23.4s\n"
- "srshl v20.4s, v20.4s, v23.4s\n"
- "srshl v19.4s, v19.4s, v23.4s\n"
- "ldr q23, [%x[params], #0x130]\n"
- "smax v10.4s, v10.4s, v8.4s\n"
- "add v28.4s, v28.4s, v15.4s\n"
- "add v20.4s, v20.4s, v15.4s\n"
- "add v19.4s, v19.4s, v15.4s\n"
- "smin v10.4s, v10.4s, v12.4s\n"
- "smax v28.4s, v28.4s, v8.4s\n"
- "smax v20.4s, v20.4s, v8.4s\n"
- "smax v19.4s, v19.4s, v8.4s\n"
- "smin v28.4s, v28.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "smin v19.4s, v19.4s, v12.4s\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s10, [x25, x11]\n"
- "ldr q22, [%x[params], #0xe0]\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "ldr q24, [%x[params], #0x130]\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "smax v20.4s, v20.4s, v27.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smax v19.4s, v19.4s, v27.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s21, [x11, x12]\n"
+ "ldr q23, [%x[params], #0xe0]\n"
"add %x[params], %x[params], #0x140\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s28, [x24, x11]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str s20, [x23, x11]\n"
- "mov v21.16b, v22.16b\n"
- "str s19, [x22, x11]\n"
- "mov v20.16b, v22.16b\n"
- "mov v19.16b, v22.16b\n"
- ".inst 0x4e8e9616 // sdot v22.4s, v16.16b, v14.16b\n"
- ".inst 0x4e9b9614 // sdot v20.4s, v16.16b, v27.16b\n"
- ".inst 0x4e9b9656 // sdot v22.4s, v18.16b, v27.16b\n"
+ "str s20, [x9, x12]\n"
+ "mov v21.16b, v23.16b\n"
+ "str s26, [x10, x12]\n"
+ "mov v20.16b, v23.16b\n"
+ "str s19, [x28, x12]\n"
+ "mov v19.16b, v23.16b\n"
+ ".inst 0x4e8e9617 // sdot v23.4s, v16.16b, v14.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x4e9c9615 // sdot v21.4s, v16.16b, v28.16b\n"
"ext v14.16b, v14.16b, v14.16b, #0x1\n"
- "add x11, x11, #0x4\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x4e8e9615 // sdot v21.4s, v16.16b, v14.16b\n"
- ".inst 0x4e9b9613 // sdot v19.4s, v16.16b, v27.16b\n"
- ".inst 0x4e879654 // sdot v20.4s, v18.16b, v7.16b\n"
- ".inst 0x4e879636 // sdot v22.4s, v17.16b, v7.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- ".inst 0x4e9b9655 // sdot v21.4s, v18.16b, v27.16b\n"
- ".inst 0x4e879653 // sdot v19.4s, v18.16b, v7.16b\n"
- "sqrdmulh v22.4s, v22.4s, v24.4s\n"
- ".inst 0x4e819634 // sdot v20.4s, v17.16b, v1.16b\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- ".inst 0x4e879635 // sdot v21.4s, v17.16b, v7.16b\n"
- ".inst 0x4e819633 // sdot v19.4s, v17.16b, v1.16b\n"
- "and v16.16b, v22.16b, v23.16b\n"
+ ".inst 0x4e9c9637 // sdot v23.4s, v17.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x4e8e9614 // sdot v20.4s, v16.16b, v14.16b\n"
+ ".inst 0x4e9c9613 // sdot v19.4s, v16.16b, v28.16b\n"
+ ".inst 0x4e969635 // sdot v21.4s, v17.16b, v22.16b\n"
+ ".inst 0x4e969657 // sdot v23.4s, v18.16b, v22.16b\n"
+ "ext v22.16b, v22.16b, v22.16b, #0x1\n"
+ ".inst 0x4e9c9634 // sdot v20.4s, v17.16b, v28.16b\n"
+ ".inst 0x4e969633 // sdot v19.4s, v17.16b, v22.16b\n"
+ ".inst 0x4e829655 // sdot v21.4s, v18.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ "sqrdmulh v23.4s, v23.4s, v25.4s\n"
+ ".inst 0x4e969654 // sdot v20.4s, v18.16b, v22.16b\n"
+ ".inst 0x4e829653 // sdot v19.4s, v18.16b, v2.16b\n"
+ "and v16.16b, v23.16b, v24.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v25.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v21.4s, v21.4s, v24.4s\n"
- "sqrdmulh v20.4s, v20.4s, v24.4s\n"
- "sqrdmulh v19.4s, v19.4s, v24.4s\n"
- "sqadd v22.4s, v22.4s, v16.4s\n"
- "and v18.16b, v21.16b, v23.16b\n"
- "and v17.16b, v20.16b, v23.16b\n"
- "and v16.16b, v19.16b, v23.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v25.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v25.4s\n"
+ "and v18.16b, v21.16b, v24.16b\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "and v17.16b, v20.16b, v24.16b\n"
+ "and v16.16b, v19.16b, v24.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v23.4s, v23.4s, v24.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v21.4s, v21.4s, v18.4s\n"
"sqadd v20.4s, v20.4s, v17.4s\n"
+ "add v23.4s, v23.4s, v13.4s\n"
"sqadd v19.4s, v19.4s, v16.4s\n"
- "srshl v22.4s, v22.4s, v23.4s\n"
- "srshl v21.4s, v21.4s, v23.4s\n"
- "srshl v20.4s, v20.4s, v23.4s\n"
- "srshl v19.4s, v19.4s, v23.4s\n"
- "add v22.4s, v22.4s, v15.4s\n"
- "add v21.4s, v21.4s, v15.4s\n"
- "add v20.4s, v20.4s, v15.4s\n"
- "add v19.4s, v19.4s, v15.4s\n"
- "smax v22.4s, v22.4s, v8.4s\n"
- "smax v21.4s, v21.4s, v8.4s\n"
- "smax v20.4s, v20.4s, v8.4s\n"
- "smax v19.4s, v19.4s, v8.4s\n"
- "smin v22.4s, v22.4s, v12.4s\n"
- "smin v21.4s, v21.4s, v12.4s\n"
- "smin v20.4s, v20.4s, v12.4s\n"
- "smin v19.4s, v19.4s, v12.4s\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "srshl v21.4s, v21.4s, v24.4s\n"
+ "srshl v20.4s, v20.4s, v24.4s\n"
+ "smax v23.4s, v23.4s, v27.4s\n"
+ "srshl v19.4s, v19.4s, v24.4s\n"
+ "add v21.4s, v21.4s, v13.4s\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v13.4s\n"
+ "smax v21.4s, v21.4s, v27.4s\n"
+ "smax v20.4s, v20.4s, v27.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smax v19.4s, v19.4s, v27.4s\n"
+ "smin v21.4s, v21.4s, v11.4s\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
+ "str s23, [x11, x12]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s22, [x25, x11]\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str s21, [x24, x11]\n"
- "str s20, [x23, x11]\n"
- "str s19, [x22, x11]\n"
- "add x11, x11, #0x4\n"
+ "str s20, [x10, x12]\n"
+ "str s21, [x9, x12]\n"
+ "str s19, [x28, x12]\n"
+ "add x12, x12, #0x4\n"
"beq 35f\n"
"3:" // Oddments
"and x20, %x[n_channels], #0xf\n"
- "add x14, x14, x12\n"
- "add x13, x13, x12\n"
- "add x10, x10, x12\n"
- "add x9, x9, x12\n"
- "add x28, x28, x12\n"
- "add x27, x27, x12\n"
- "add x26, x26, x12\n"
- "add x21, x21, x12\n"
+ "add x14, x14, x13\n"
+ "add x27, x27, x13\n"
+ "add x26, x26, x13\n"
+ "add x25, x25, x13\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
+ "add x21, x21, x13\n"
"tbz %x[n_channels], #3, 7f\n"
- "ldr d11, [x14], #0x8\n"
- "ldr d10, [x13], #0x8\n"
- "ldr d3, [x10], #0x8\n"
- "ldr d14, [x9], #0x8\n"
- "ldr d13, [x28], #0x8\n"
- "ldr d28, [x27], #0x8\n"
- "ldr d21, [x26], #0x8\n"
- "ldr d27, [x21], #0x8\n"
+ "ldr d12, [x14], #0x8\n"
+ "ldr d23, [x27], #0x8\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d14, [x25], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d7, [x22], #0x8\n"
+ "ldr d28, [x21], #0x8\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v11.s }[2], [x14], #0x4\n"
- "ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v3.s }[2], [x10], #0x4\n"
- "ld1 { v14.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x28], #0x4\n"
- "ld1 { v28.s }[2], [x27], #0x4\n"
- "ld1 { v21.s }[2], [x26], #0x4\n"
- "ld1 { v27.s }[2], [x21], #0x4\n"
+ "ld1 { v12.s }[2], [x14], #0x4\n"
+ "ld1 { v23.s }[2], [x27], #0x4\n"
+ "ld1 { v30.s }[2], [x26], #0x4\n"
+ "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x24], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v7.s }[2], [x22], #0x4\n"
+ "ld1 { v28.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v11.h }[6], [x14], #0x2\n"
- "ld1 { v10.h }[6], [x13], #0x2\n"
- "ld1 { v3.h }[6], [x10], #0x2\n"
- "ld1 { v14.h }[6], [x9], #0x2\n"
- "ld1 { v13.h }[6], [x28], #0x2\n"
- "ld1 { v28.h }[6], [x27], #0x2\n"
- "ld1 { v21.h }[6], [x26], #0x2\n"
- "ld1 { v27.h }[6], [x21], #0x2\n"
+ "ld1 { v12.h }[6], [x14], #0x2\n"
+ "ld1 { v23.h }[6], [x27], #0x2\n"
+ "ld1 { v30.h }[6], [x26], #0x2\n"
+ "ld1 { v14.h }[6], [x25], #0x2\n"
+ "ld1 { v15.h }[6], [x24], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
+ "ld1 { v7.h }[6], [x22], #0x2\n"
+ "ld1 { v28.h }[6], [x21], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.b }[14], [x14], #0x1\n"
- "ld1 { v10.b }[14], [x13], #0x1\n"
- "ld1 { v3.b }[14], [x10], #0x1\n"
- "ld1 { v14.b }[14], [x9], #0x1\n"
- "ld1 { v13.b }[14], [x28], #0x1\n"
- "ld1 { v28.b }[14], [x27], #0x1\n"
- "ld1 { v21.b }[14], [x26], #0x1\n"
- "ld1 { v27.b }[14], [x21], #0x1\n"
+ "ld1 { v12.b }[14], [x14], #0x1\n"
+ "ld1 { v23.b }[14], [x27], #0x1\n"
+ "ld1 { v30.b }[14], [x26], #0x1\n"
+ "ld1 { v14.b }[14], [x25], #0x1\n"
+ "ld1 { v15.b }[14], [x24], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
+ "ld1 { v7.b }[14], [x22], #0x1\n"
+ "ld1 { v28.b }[14], [x21], #0x1\n"
"b 11f\n"
"4:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.b }[12], [x14], #0x1\n"
- "ld1 { v10.b }[12], [x13], #0x1\n"
- "ld1 { v3.b }[12], [x10], #0x1\n"
- "ld1 { v14.b }[12], [x9], #0x1\n"
- "ld1 { v13.b }[12], [x28], #0x1\n"
- "ld1 { v28.b }[12], [x27], #0x1\n"
- "ld1 { v21.b }[12], [x26], #0x1\n"
- "ld1 { v27.b }[12], [x21], #0x1\n"
+ "ld1 { v12.b }[12], [x14], #0x1\n"
+ "ld1 { v23.b }[12], [x27], #0x1\n"
+ "ld1 { v30.b }[12], [x26], #0x1\n"
+ "ld1 { v14.b }[12], [x25], #0x1\n"
+ "ld1 { v15.b }[12], [x24], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
+ "ld1 { v7.b }[12], [x22], #0x1\n"
+ "ld1 { v28.b }[12], [x21], #0x1\n"
"b 11f\n"
"5:" // Oddments: Load (A): Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v11.h }[4], [x14], #0x2\n"
- "ld1 { v10.h }[4], [x13], #0x2\n"
- "ld1 { v3.h }[4], [x10], #0x2\n"
- "ld1 { v14.h }[4], [x9], #0x2\n"
- "ld1 { v13.h }[4], [x28], #0x2\n"
- "ld1 { v28.h }[4], [x27], #0x2\n"
- "ld1 { v21.h }[4], [x26], #0x2\n"
- "ld1 { v27.h }[4], [x21], #0x2\n"
+ "ld1 { v12.h }[4], [x14], #0x2\n"
+ "ld1 { v23.h }[4], [x27], #0x2\n"
+ "ld1 { v30.h }[4], [x26], #0x2\n"
+ "ld1 { v14.h }[4], [x25], #0x2\n"
+ "ld1 { v15.h }[4], [x24], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
+ "ld1 { v7.h }[4], [x22], #0x2\n"
+ "ld1 { v28.h }[4], [x21], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.b }[10], [x14], #0x1\n"
- "ld1 { v10.b }[10], [x13], #0x1\n"
- "ld1 { v3.b }[10], [x10], #0x1\n"
- "ld1 { v14.b }[10], [x9], #0x1\n"
- "ld1 { v13.b }[10], [x28], #0x1\n"
- "ld1 { v28.b }[10], [x27], #0x1\n"
- "ld1 { v21.b }[10], [x26], #0x1\n"
- "ld1 { v27.b }[10], [x21], #0x1\n"
+ "ld1 { v12.b }[10], [x14], #0x1\n"
+ "ld1 { v23.b }[10], [x27], #0x1\n"
+ "ld1 { v30.b }[10], [x26], #0x1\n"
+ "ld1 { v14.b }[10], [x25], #0x1\n"
+ "ld1 { v15.b }[10], [x24], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
+ "ld1 { v7.b }[10], [x22], #0x1\n"
+ "ld1 { v28.b }[10], [x21], #0x1\n"
"b 11f\n"
"6:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.b }[8], [x14], #0x1\n"
- "ld1 { v10.b }[8], [x13], #0x1\n"
- "ld1 { v3.b }[8], [x10], #0x1\n"
- "ld1 { v14.b }[8], [x9], #0x1\n"
- "ld1 { v13.b }[8], [x28], #0x1\n"
- "ld1 { v28.b }[8], [x27], #0x1\n"
- "ld1 { v21.b }[8], [x26], #0x1\n"
- "ld1 { v27.b }[8], [x21], #0x1\n"
+ "ld1 { v12.b }[8], [x14], #0x1\n"
+ "ld1 { v23.b }[8], [x27], #0x1\n"
+ "ld1 { v30.b }[8], [x26], #0x1\n"
+ "ld1 { v14.b }[8], [x25], #0x1\n"
+ "ld1 { v15.b }[8], [x24], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
+ "ld1 { v7.b }[8], [x22], #0x1\n"
+ "ld1 { v28.b }[8], [x21], #0x1\n"
"b 11f\n"
"7:" // Oddments: Load (A): Bit 3: Unset
"tbz %x[n_channels], #2, 9f\n"
- "ldr s11, [x14], #0x4\n"
- "ldr s10, [x13], #0x4\n"
- "ldr s3, [x10], #0x4\n"
- "ldr s14, [x9], #0x4\n"
- "ldr s13, [x28], #0x4\n"
- "ldr s28, [x27], #0x4\n"
- "ldr s21, [x26], #0x4\n"
- "ldr s27, [x21], #0x4\n"
+ "ldr s12, [x14], #0x4\n"
+ "ldr s23, [x27], #0x4\n"
+ "ldr s30, [x26], #0x4\n"
+ "ldr s14, [x25], #0x4\n"
+ "ldr s15, [x24], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s7, [x22], #0x4\n"
+ "ldr s28, [x21], #0x4\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v11.h }[2], [x14], #0x2\n"
- "ld1 { v10.h }[2], [x13], #0x2\n"
- "ld1 { v3.h }[2], [x10], #0x2\n"
- "ld1 { v14.h }[2], [x9], #0x2\n"
- "ld1 { v13.h }[2], [x28], #0x2\n"
- "ld1 { v28.h }[2], [x27], #0x2\n"
- "ld1 { v21.h }[2], [x26], #0x2\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
+ "ld1 { v12.h }[2], [x14], #0x2\n"
+ "ld1 { v23.h }[2], [x27], #0x2\n"
+ "ld1 { v30.h }[2], [x26], #0x2\n"
+ "ld1 { v14.h }[2], [x25], #0x2\n"
+ "ld1 { v15.h }[2], [x24], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
+ "ld1 { v7.h }[2], [x22], #0x2\n"
+ "ld1 { v28.h }[2], [x21], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.b }[6], [x14], #0x1\n"
- "ld1 { v10.b }[6], [x13], #0x1\n"
- "ld1 { v3.b }[6], [x10], #0x1\n"
- "ld1 { v14.b }[6], [x9], #0x1\n"
- "ld1 { v13.b }[6], [x28], #0x1\n"
- "ld1 { v28.b }[6], [x27], #0x1\n"
- "ld1 { v21.b }[6], [x26], #0x1\n"
- "ld1 { v27.b }[6], [x21], #0x1\n"
+ "ld1 { v12.b }[6], [x14], #0x1\n"
+ "ld1 { v23.b }[6], [x27], #0x1\n"
+ "ld1 { v30.b }[6], [x26], #0x1\n"
+ "ld1 { v14.b }[6], [x25], #0x1\n"
+ "ld1 { v15.b }[6], [x24], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
+ "ld1 { v7.b }[6], [x22], #0x1\n"
+ "ld1 { v28.b }[6], [x21], #0x1\n"
"b 11f\n"
"8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.b }[4], [x14], #0x1\n"
- "ld1 { v10.b }[4], [x13], #0x1\n"
- "ld1 { v3.b }[4], [x10], #0x1\n"
- "ld1 { v14.b }[4], [x9], #0x1\n"
- "ld1 { v13.b }[4], [x28], #0x1\n"
- "ld1 { v28.b }[4], [x27], #0x1\n"
- "ld1 { v21.b }[4], [x26], #0x1\n"
- "ld1 { v27.b }[4], [x21], #0x1\n"
+ "ld1 { v12.b }[4], [x14], #0x1\n"
+ "ld1 { v23.b }[4], [x27], #0x1\n"
+ "ld1 { v30.b }[4], [x26], #0x1\n"
+ "ld1 { v14.b }[4], [x25], #0x1\n"
+ "ld1 { v15.b }[4], [x24], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
+ "ld1 { v7.b }[4], [x22], #0x1\n"
+ "ld1 { v28.b }[4], [x21], #0x1\n"
"b 11f\n"
"9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ldr h11, [x14], #0x2\n"
- "ldr h10, [x13], #0x2\n"
- "ldr h3, [x10], #0x2\n"
- "ldr h14, [x9], #0x2\n"
- "ldr h13, [x28], #0x2\n"
- "ldr h28, [x27], #0x2\n"
- "ldr h21, [x26], #0x2\n"
- "ldr h27, [x21], #0x2\n"
+ "ldr h12, [x14], #0x2\n"
+ "ldr h23, [x27], #0x2\n"
+ "ldr h30, [x26], #0x2\n"
+ "ldr h14, [x25], #0x2\n"
+ "ldr h15, [x24], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
+ "ldr h7, [x22], #0x2\n"
+ "ldr h28, [x21], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v11.b }[2], [x14], #0x1\n"
- "ld1 { v10.b }[2], [x13], #0x1\n"
- "ld1 { v3.b }[2], [x10], #0x1\n"
- "ld1 { v14.b }[2], [x9], #0x1\n"
- "ld1 { v13.b }[2], [x28], #0x1\n"
- "ld1 { v28.b }[2], [x27], #0x1\n"
- "ld1 { v21.b }[2], [x26], #0x1\n"
- "ld1 { v27.b }[2], [x21], #0x1\n"
+ "ld1 { v12.b }[2], [x14], #0x1\n"
+ "ld1 { v23.b }[2], [x27], #0x1\n"
+ "ld1 { v30.b }[2], [x26], #0x1\n"
+ "ld1 { v14.b }[2], [x25], #0x1\n"
+ "ld1 { v15.b }[2], [x24], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
+ "ld1 { v7.b }[2], [x22], #0x1\n"
+ "ld1 { v28.b }[2], [x21], #0x1\n"
"b 11f\n"
"10:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b11, [x14], #0x1\n"
- "ldr b10, [x13], #0x1\n"
- "ldr b3, [x10], #0x1\n"
- "ldr b14, [x9], #0x1\n"
- "ldr b13, [x28], #0x1\n"
- "ldr b28, [x27], #0x1\n"
- "ldr b21, [x26], #0x1\n"
- "ldr b27, [x21], #0x1\n"
+ "ldr b12, [x14], #0x1\n"
+ "ldr b23, [x27], #0x1\n"
+ "ldr b30, [x26], #0x1\n"
+ "ldr b14, [x25], #0x1\n"
+ "ldr b15, [x24], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
+ "ldr b7, [x22], #0x1\n"
+ "ldr b28, [x21], #0x1\n"
"11:" // Oddments: Load (A): Bit 3: End
- "ldp x14, x13, [%x[inptrs], #0x40]\n"
- "ldp x10, x9, [%x[inptrs], #0x50]\n"
- "add x14, x14, x12\n"
- "add x13, x13, x12\n"
- "ldp x28, x27, [%x[inptrs], #0x60]\n"
- "ldp x26, x21, [%x[inptrs], #0x70]\n"
- "add x10, x10, x12\n"
- "add x9, x9, x12\n"
- "add x28, x28, x12\n"
- "add x27, x27, x12\n"
- "add x26, x26, x12\n"
- "add x21, x21, x12\n"
+ "ldp x14, x27, [%x[inptrs], #0x40]\n"
+ "ldp x26, x25, [%x[inptrs], #0x50]\n"
+ "ldp x24, x23, [%x[inptrs], #0x60]\n"
+ "ldp x22, x21, [%x[inptrs], #0x70]\n"
+ "add x14, x14, x13\n"
+ "add x27, x27, x13\n"
+ "add x26, x26, x13\n"
+ "add x25, x25, x13\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
+ "add x21, x21, x13\n"
"tbz %x[n_channels], #3, 15f\n"
- "ldr d5, [x14], #0x8\n"
- "ldr d29, [x13], #0x8\n"
- "ldr d0, [x10], #0x8\n"
- "ldr d7, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "ldr d30, [x27], #0x8\n"
- "ldr d2, [x26], #0x8\n"
- "ldr d1, [x21], #0x8\n"
+ "ldr d8, [x14], #0x8\n"
+ "ldr d21, [x27], #0x8\n"
+ "ldr d29, [x26], #0x8\n"
+ "ldr d22, [x25], #0x8\n"
+ "ldr d9, [x24], #0x8\n"
+ "ldr d1, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
+ "ldr d2, [x21], #0x8\n"
"tbz %x[n_channels], #2, 13f\n"
- "ld1 { v5.s }[2], [x14], #0x4\n"
- "ld1 { v29.s }[2], [x13], #0x4\n"
- "ld1 { v0.s }[2], [x10], #0x4\n"
- "ld1 { v7.s }[2], [x9], #0x4\n"
- "ld1 { v16.s }[2], [x28], #0x4\n"
- "ld1 { v30.s }[2], [x27], #0x4\n"
- "ld1 { v2.s }[2], [x26], #0x4\n"
- "ld1 { v1.s }[2], [x21], #0x4\n"
+ "ld1 { v8.s }[2], [x14], #0x4\n"
+ "ld1 { v21.s }[2], [x27], #0x4\n"
+ "ld1 { v29.s }[2], [x26], #0x4\n"
+ "ld1 { v22.s }[2], [x25], #0x4\n"
+ "ld1 { v9.s }[2], [x24], #0x4\n"
+ "ld1 { v1.s }[2], [x23], #0x4\n"
+ "ld1 { v31.s }[2], [x22], #0x4\n"
+ "ld1 { v2.s }[2], [x21], #0x4\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v5.h }[6], [x14], #0x2\n"
- "ld1 { v29.h }[6], [x13], #0x2\n"
- "ld1 { v0.h }[6], [x10], #0x2\n"
- "ld1 { v7.h }[6], [x9], #0x2\n"
- "ld1 { v16.h }[6], [x28], #0x2\n"
- "ld1 { v30.h }[6], [x27], #0x2\n"
- "ld1 { v2.h }[6], [x26], #0x2\n"
- "ld1 { v1.h }[6], [x21], #0x2\n"
+ "ld1 { v8.h }[6], [x14], #0x2\n"
+ "ld1 { v21.h }[6], [x27], #0x2\n"
+ "ld1 { v29.h }[6], [x26], #0x2\n"
+ "ld1 { v22.h }[6], [x25], #0x2\n"
+ "ld1 { v9.h }[6], [x24], #0x2\n"
+ "ld1 { v1.h }[6], [x23], #0x2\n"
+ "ld1 { v31.h }[6], [x22], #0x2\n"
+ "ld1 { v2.h }[6], [x21], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v5.b }[14], [x14], #0x1\n"
- "ld1 { v29.b }[14], [x13], #0x1\n"
- "ld1 { v0.b }[14], [x10], #0x1\n"
- "ld1 { v7.b }[14], [x9], #0x1\n"
- "ld1 { v16.b }[14], [x28], #0x1\n"
- "ld1 { v30.b }[14], [x27], #0x1\n"
- "ld1 { v2.b }[14], [x26], #0x1\n"
- "ld1 { v1.b }[14], [x21], #0x1\n"
+ "ld1 { v8.b }[14], [x14], #0x1\n"
+ "ld1 { v21.b }[14], [x27], #0x1\n"
+ "ld1 { v29.b }[14], [x26], #0x1\n"
+ "ld1 { v22.b }[14], [x25], #0x1\n"
+ "ld1 { v9.b }[14], [x24], #0x1\n"
+ "ld1 { v1.b }[14], [x23], #0x1\n"
+ "ld1 { v31.b }[14], [x22], #0x1\n"
+ "ld1 { v2.b }[14], [x21], #0x1\n"
"b 19f\n"
"12:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v5.b }[12], [x14], #0x1\n"
- "ld1 { v29.b }[12], [x13], #0x1\n"
- "ld1 { v0.b }[12], [x10], #0x1\n"
- "ld1 { v7.b }[12], [x9], #0x1\n"
- "ld1 { v16.b }[12], [x28], #0x1\n"
- "ld1 { v30.b }[12], [x27], #0x1\n"
- "ld1 { v2.b }[12], [x26], #0x1\n"
- "ld1 { v1.b }[12], [x21], #0x1\n"
+ "ld1 { v8.b }[12], [x14], #0x1\n"
+ "ld1 { v21.b }[12], [x27], #0x1\n"
+ "ld1 { v29.b }[12], [x26], #0x1\n"
+ "ld1 { v22.b }[12], [x25], #0x1\n"
+ "ld1 { v9.b }[12], [x24], #0x1\n"
+ "ld1 { v1.b }[12], [x23], #0x1\n"
+ "ld1 { v31.b }[12], [x22], #0x1\n"
+ "ld1 { v2.b }[12], [x21], #0x1\n"
"b 19f\n"
"13:" // Oddments: Load (B): Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v5.h }[4], [x14], #0x2\n"
- "ld1 { v29.h }[4], [x13], #0x2\n"
- "ld1 { v0.h }[4], [x10], #0x2\n"
- "ld1 { v7.h }[4], [x9], #0x2\n"
- "ld1 { v16.h }[4], [x28], #0x2\n"
- "ld1 { v30.h }[4], [x27], #0x2\n"
- "ld1 { v2.h }[4], [x26], #0x2\n"
- "ld1 { v1.h }[4], [x21], #0x2\n"
+ "ld1 { v8.h }[4], [x14], #0x2\n"
+ "ld1 { v21.h }[4], [x27], #0x2\n"
+ "ld1 { v29.h }[4], [x26], #0x2\n"
+ "ld1 { v22.h }[4], [x25], #0x2\n"
+ "ld1 { v9.h }[4], [x24], #0x2\n"
+ "ld1 { v1.h }[4], [x23], #0x2\n"
+ "ld1 { v31.h }[4], [x22], #0x2\n"
+ "ld1 { v2.h }[4], [x21], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v5.b }[10], [x14], #0x1\n"
- "ld1 { v29.b }[10], [x13], #0x1\n"
- "ld1 { v0.b }[10], [x10], #0x1\n"
- "ld1 { v7.b }[10], [x9], #0x1\n"
- "ld1 { v16.b }[10], [x28], #0x1\n"
- "ld1 { v30.b }[10], [x27], #0x1\n"
- "ld1 { v2.b }[10], [x26], #0x1\n"
- "ld1 { v1.b }[10], [x21], #0x1\n"
+ "ld1 { v8.b }[10], [x14], #0x1\n"
+ "ld1 { v21.b }[10], [x27], #0x1\n"
+ "ld1 { v29.b }[10], [x26], #0x1\n"
+ "ld1 { v22.b }[10], [x25], #0x1\n"
+ "ld1 { v9.b }[10], [x24], #0x1\n"
+ "ld1 { v1.b }[10], [x23], #0x1\n"
+ "ld1 { v31.b }[10], [x22], #0x1\n"
+ "ld1 { v2.b }[10], [x21], #0x1\n"
"b 19f\n"
"14:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v5.b }[8], [x14], #0x1\n"
- "ld1 { v29.b }[8], [x13], #0x1\n"
- "ld1 { v0.b }[8], [x10], #0x1\n"
- "ld1 { v7.b }[8], [x9], #0x1\n"
- "ld1 { v16.b }[8], [x28], #0x1\n"
- "ld1 { v30.b }[8], [x27], #0x1\n"
- "ld1 { v2.b }[8], [x26], #0x1\n"
- "ld1 { v1.b }[8], [x21], #0x1\n"
+ "ld1 { v8.b }[8], [x14], #0x1\n"
+ "ld1 { v21.b }[8], [x27], #0x1\n"
+ "ld1 { v29.b }[8], [x26], #0x1\n"
+ "ld1 { v22.b }[8], [x25], #0x1\n"
+ "ld1 { v9.b }[8], [x24], #0x1\n"
+ "ld1 { v1.b }[8], [x23], #0x1\n"
+ "ld1 { v31.b }[8], [x22], #0x1\n"
+ "ld1 { v2.b }[8], [x21], #0x1\n"
"b 19f\n"
"15:" // Oddments: Load (B): Bit 3: Unset
"tbz %x[n_channels], #2, 17f\n"
- "ldr s5, [x14], #0x4\n"
- "ldr s29, [x13], #0x4\n"
- "ldr s0, [x10], #0x4\n"
- "ldr s7, [x9], #0x4\n"
- "ldr s16, [x28], #0x4\n"
- "ldr s30, [x27], #0x4\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s1, [x21], #0x4\n"
+ "ldr s8, [x14], #0x4\n"
+ "ldr s21, [x27], #0x4\n"
+ "ldr s29, [x26], #0x4\n"
+ "ldr s22, [x25], #0x4\n"
+ "ldr s9, [x24], #0x4\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s31, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v5.h }[2], [x14], #0x2\n"
- "ld1 { v29.h }[2], [x13], #0x2\n"
- "ld1 { v0.h }[2], [x10], #0x2\n"
- "ld1 { v7.h }[2], [x9], #0x2\n"
- "ld1 { v16.h }[2], [x28], #0x2\n"
- "ld1 { v30.h }[2], [x27], #0x2\n"
- "ld1 { v2.h }[2], [x26], #0x2\n"
- "ld1 { v1.h }[2], [x21], #0x2\n"
+ "ld1 { v8.h }[2], [x14], #0x2\n"
+ "ld1 { v21.h }[2], [x27], #0x2\n"
+ "ld1 { v29.h }[2], [x26], #0x2\n"
+ "ld1 { v22.h }[2], [x25], #0x2\n"
+ "ld1 { v9.h }[2], [x24], #0x2\n"
+ "ld1 { v1.h }[2], [x23], #0x2\n"
+ "ld1 { v31.h }[2], [x22], #0x2\n"
+ "ld1 { v2.h }[2], [x21], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v5.b }[6], [x14], #0x1\n"
- "ld1 { v29.b }[6], [x13], #0x1\n"
- "ld1 { v0.b }[6], [x10], #0x1\n"
- "ld1 { v7.b }[6], [x9], #0x1\n"
- "ld1 { v16.b }[6], [x28], #0x1\n"
- "ld1 { v30.b }[6], [x27], #0x1\n"
- "ld1 { v2.b }[6], [x26], #0x1\n"
- "ld1 { v1.b }[6], [x21], #0x1\n"
+ "ld1 { v8.b }[6], [x14], #0x1\n"
+ "ld1 { v21.b }[6], [x27], #0x1\n"
+ "ld1 { v29.b }[6], [x26], #0x1\n"
+ "ld1 { v22.b }[6], [x25], #0x1\n"
+ "ld1 { v9.b }[6], [x24], #0x1\n"
+ "ld1 { v1.b }[6], [x23], #0x1\n"
+ "ld1 { v31.b }[6], [x22], #0x1\n"
+ "ld1 { v2.b }[6], [x21], #0x1\n"
"b 19f\n"
"16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v5.b }[4], [x14], #0x1\n"
- "ld1 { v29.b }[4], [x13], #0x1\n"
- "ld1 { v0.b }[4], [x10], #0x1\n"
- "ld1 { v7.b }[4], [x9], #0x1\n"
- "ld1 { v16.b }[4], [x28], #0x1\n"
- "ld1 { v30.b }[4], [x27], #0x1\n"
- "ld1 { v2.b }[4], [x26], #0x1\n"
- "ld1 { v1.b }[4], [x21], #0x1\n"
+ "ld1 { v8.b }[4], [x14], #0x1\n"
+ "ld1 { v21.b }[4], [x27], #0x1\n"
+ "ld1 { v29.b }[4], [x26], #0x1\n"
+ "ld1 { v22.b }[4], [x25], #0x1\n"
+ "ld1 { v9.b }[4], [x24], #0x1\n"
+ "ld1 { v1.b }[4], [x23], #0x1\n"
+ "ld1 { v31.b }[4], [x22], #0x1\n"
+ "ld1 { v2.b }[4], [x21], #0x1\n"
"b 19f\n"
"17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr h5, [x14], #0x2\n"
- "ldr h29, [x13], #0x2\n"
- "ldr h0, [x10], #0x2\n"
- "ldr h7, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
- "ldr h30, [x27], #0x2\n"
- "ldr h2, [x26], #0x2\n"
- "ldr h1, [x21], #0x2\n"
+ "ldr h8, [x14], #0x2\n"
+ "ldr h21, [x27], #0x2\n"
+ "ldr h29, [x26], #0x2\n"
+ "ldr h22, [x25], #0x2\n"
+ "ldr h9, [x24], #0x2\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h31, [x22], #0x2\n"
+ "ldr h2, [x21], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v5.b }[2], [x14], #0x1\n"
- "ld1 { v29.b }[2], [x13], #0x1\n"
- "ld1 { v0.b }[2], [x10], #0x1\n"
- "ld1 { v7.b }[2], [x9], #0x1\n"
- "ld1 { v16.b }[2], [x28], #0x1\n"
- "ld1 { v30.b }[2], [x27], #0x1\n"
- "ld1 { v2.b }[2], [x26], #0x1\n"
- "ld1 { v1.b }[2], [x21], #0x1\n"
+ "ld1 { v8.b }[2], [x14], #0x1\n"
+ "ld1 { v21.b }[2], [x27], #0x1\n"
+ "ld1 { v29.b }[2], [x26], #0x1\n"
+ "ld1 { v22.b }[2], [x25], #0x1\n"
+ "ld1 { v9.b }[2], [x24], #0x1\n"
+ "ld1 { v1.b }[2], [x23], #0x1\n"
+ "ld1 { v31.b }[2], [x22], #0x1\n"
+ "ld1 { v2.b }[2], [x21], #0x1\n"
"b 19f\n"
"18:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b5, [x14], #0x1\n"
- "ldr b29, [x13], #0x1\n"
- "ldr b0, [x10], #0x1\n"
- "ldr b7, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "ldr b30, [x27], #0x1\n"
- "ldr b2, [x26], #0x1\n"
- "ldr b1, [x21], #0x1\n"
+ "ldr b8, [x14], #0x1\n"
+ "ldr b21, [x27], #0x1\n"
+ "ldr b29, [x26], #0x1\n"
+ "ldr b22, [x25], #0x1\n"
+ "ldr b9, [x24], #0x1\n"
+ "ldr b1, [x23], #0x1\n"
+ "ldr b31, [x22], #0x1\n"
+ "ldr b2, [x21], #0x1\n"
"19:" // Oddments: Load (B): Bit 3: End
- "ldr q25, [%x[params], #0x10]\n"
- "ldr q24, [%x[params], #0x20]\n"
- "zip2 v18.16b, v11.16b, v3.16b\n"
- "zip1 v11.16b, v11.16b, v3.16b\n"
- "ldr q23, [%x[params], #0x30]\n"
- "zip1 v17.16b, v10.16b, v14.16b\n"
- "zip2 v14.16b, v10.16b, v14.16b\n"
+ "ldr q10, [%x[params], #0x10]\n"
+ "ldr q4, [%x[params], #0x20]\n"
+ "zip2 v17.16b, v12.16b, v30.16b\n"
+ "zip1 v12.16b, v12.16b, v30.16b\n"
+ "ldr q25, [%x[params], #0x30]\n"
+ "zip1 v16.16b, v23.16b, v14.16b\n"
+ "zip2 v14.16b, v23.16b, v14.16b\n"
"cmp x20, #0x4\n"
- "zip2 v10.16b, v11.16b, v17.16b\n"
- "zip1 v11.16b, v11.16b, v17.16b\n"
- "zip1 v3.16b, v18.16b, v14.16b\n"
- "zip2 v14.16b, v18.16b, v14.16b\n"
- "ldr q31, [%x[params], #0x0]\n"
- "zip2 v22.16b, v13.16b, v21.16b\n"
- "zip1 v13.16b, v13.16b, v21.16b\n"
- "zip1 v21.16b, v28.16b, v27.16b\n"
- "zip2 v27.16b, v28.16b, v27.16b\n"
- "zip2 v20.16b, v5.16b, v0.16b\n"
- "zip1 v5.16b, v5.16b, v0.16b\n"
- "zip1 v19.16b, v29.16b, v7.16b\n"
- "zip2 v7.16b, v29.16b, v7.16b\n"
- "zip2 v18.16b, v16.16b, v2.16b\n"
- "zip1 v16.16b, v16.16b, v2.16b\n"
- "zip1 v17.16b, v30.16b, v1.16b\n"
- "zip2 v1.16b, v30.16b, v1.16b\n"
- "zip2 v28.16b, v13.16b, v21.16b\n"
- "zip1 v13.16b, v13.16b, v21.16b\n"
- "zip1 v21.16b, v22.16b, v27.16b\n"
- "zip2 v27.16b, v22.16b, v27.16b\n"
- "zip2 v29.16b, v5.16b, v19.16b\n"
- "zip1 v5.16b, v5.16b, v19.16b\n"
- "zip1 v0.16b, v20.16b, v7.16b\n"
- "zip2 v7.16b, v20.16b, v7.16b\n"
- "zip2 v30.16b, v16.16b, v17.16b\n"
- "zip1 v16.16b, v16.16b, v17.16b\n"
- "zip1 v2.16b, v18.16b, v1.16b\n"
- "zip2 v1.16b, v18.16b, v1.16b\n"
- "mov v26.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- ".inst 0x4e8d9732 // sdot v18.4s, v25.16b, v13.16b\n"
- "mov v4.16b, v31.16b\n"
- ".inst 0x4e8b973f // sdot v31.4s, v25.16b, v11.16b\n"
- ".inst 0x4e8d971f // sdot v31.4s, v24.16b, v13.16b\n"
- "ext v11.16b, v11.16b, v11.16b, #0x1\n"
- "ext v13.16b, v13.16b, v13.16b, #0x1\n"
- ".inst 0x4e8b973a // sdot v26.4s, v25.16b, v11.16b\n"
+ "zip2 v24.16b, v15.16b, v7.16b\n"
+ "zip1 v15.16b, v15.16b, v7.16b\n"
+ "zip1 v20.16b, v5.16b, v28.16b\n"
+ "zip2 v28.16b, v5.16b, v28.16b\n"
+ "zip2 v23.16b, v12.16b, v16.16b\n"
+ "zip1 v12.16b, v12.16b, v16.16b\n"
+ "zip1 v30.16b, v17.16b, v14.16b\n"
+ "zip2 v14.16b, v17.16b, v14.16b\n"
+ "ldr q3, [%x[params], #0x0]\n"
+ "zip2 v19.16b, v8.16b, v29.16b\n"
+ "zip1 v8.16b, v8.16b, v29.16b\n"
+ "zip1 v18.16b, v21.16b, v22.16b\n"
+ "zip2 v22.16b, v21.16b, v22.16b\n"
+ "zip2 v17.16b, v9.16b, v31.16b\n"
+ "zip1 v9.16b, v9.16b, v31.16b\n"
+ "zip1 v16.16b, v1.16b, v2.16b\n"
+ "zip2 v2.16b, v1.16b, v2.16b\n"
+ "zip2 v5.16b, v15.16b, v20.16b\n"
+ "zip1 v15.16b, v15.16b, v20.16b\n"
+ "zip1 v7.16b, v24.16b, v28.16b\n"
+ "zip2 v28.16b, v24.16b, v28.16b\n"
+ "zip2 v21.16b, v8.16b, v18.16b\n"
+ "zip1 v8.16b, v8.16b, v18.16b\n"
+ "zip1 v29.16b, v19.16b, v22.16b\n"
+ "zip2 v22.16b, v19.16b, v22.16b\n"
+ "zip2 v1.16b, v9.16b, v16.16b\n"
+ "zip1 v9.16b, v9.16b, v16.16b\n"
+ "zip1 v31.16b, v17.16b, v2.16b\n"
+ "zip2 v2.16b, v17.16b, v2.16b\n"
+ "mov v26.16b, v3.16b\n"
+ "mov v0.16b, v3.16b\n"
+ "mov v18.16b, v3.16b\n"
+ ".inst 0x4e8c9543 // sdot v3.4s, v10.16b, v12.16b\n"
+ ".inst 0x4e8f9540 // sdot v0.4s, v10.16b, v15.16b\n"
+ "ext v12.16b, v12.16b, v12.16b, #0x1\n"
+ ".inst 0x4e8f9483 // sdot v3.4s, v4.16b, v15.16b\n"
+ "ext v15.16b, v15.16b, v15.16b, #0x1\n"
+ ".inst 0x4e8c955a // sdot v26.4s, v10.16b, v12.16b\n"
"ldr q17, [%x[params], #0x40]\n"
- ".inst 0x4e8d9724 // sdot v4.4s, v25.16b, v13.16b\n"
- ".inst 0x4e859712 // sdot v18.4s, v24.16b, v5.16b\n"
- ".inst 0x4e8596ff // sdot v31.4s, v23.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x4e8d971a // sdot v26.4s, v24.16b, v13.16b\n"
+ ".inst 0x4e889480 // sdot v0.4s, v4.16b, v8.16b\n"
+ ".inst 0x4e8f9552 // sdot v18.4s, v10.16b, v15.16b\n"
+ ".inst 0x4e889723 // sdot v3.4s, v25.16b, v8.16b\n"
+ "ext v8.16b, v8.16b, v8.16b, #0x1\n"
+ ".inst 0x4e8f949a // sdot v26.4s, v4.16b, v15.16b\n"
"ldr q20, [%x[params], #0x50]\n"
- ".inst 0x4e859704 // sdot v4.4s, v24.16b, v5.16b\n"
- ".inst 0x4e9096f2 // sdot v18.4s, v23.16b, v16.16b\n"
- "ext v16.16b, v16.16b, v16.16b, #0x1\n"
"add %x[params], %x[params], #0x60\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e8596fa // sdot v26.4s, v23.16b, v5.16b\n"
- ".inst 0x4e9096e4 // sdot v4.4s, v23.16b, v16.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
+ ".inst 0x4e899720 // sdot v0.4s, v25.16b, v9.16b\n"
+ "ext v9.16b, v9.16b, v9.16b, #0x1\n"
+ ".inst 0x4e889492 // sdot v18.4s, v4.16b, v8.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ ".inst 0x4e88973a // sdot v26.4s, v25.16b, v8.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
+ ".inst 0x4e899732 // sdot v18.4s, v25.16b, v9.16b\n"
+ "and v16.16b, v3.16b, v20.16b\n"
"sqrdmulh v26.4s, v26.4s, v17.4s\n"
+ "and v19.16b, v0.16b, v20.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "sqrdmulh v4.4s, v4.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v19.16b, v26.16b, v20.16b\n"
- "and v17.16b, v18.16b, v20.16b\n"
- "and v16.16b, v4.16b, v20.16b\n"
+ "and v17.16b, v26.16b, v20.16b\n"
+ "sqadd v3.4s, v3.4s, v16.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
+ "and v16.16b, v18.16b, v20.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v0.4s, v0.4s, v19.4s\n"
+ "srshl v3.4s, v3.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v19.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "sqadd v4.4s, v4.4s, v16.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "srshl v0.4s, v0.4s, v20.4s\n"
+ "add v3.4s, v3.4s, v13.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "add v0.4s, v0.4s, v13.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
"srshl v18.4s, v18.4s, v20.4s\n"
- "srshl v4.4s, v4.4s, v20.4s\n"
- "add v31.4s, v31.4s, v15.4s\n"
- "add v26.4s, v26.4s, v15.4s\n"
- "add v18.4s, v18.4s, v15.4s\n"
- "add v4.4s, v4.4s, v15.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v18.4s, v18.4s, v8.4s\n"
- "smax v4.4s, v4.4s, v8.4s\n"
- "smin v31.4s, v31.4s, v12.4s\n"
- "smin v26.4s, v26.4s, v12.4s\n"
- "smin v18.4s, v18.4s, v12.4s\n"
- "smin v4.4s, v4.4s, v12.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "smax v0.4s, v0.4s, v27.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "add v18.4s, v18.4s, v13.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smax v18.4s, v18.4s, v27.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
"blt 20f\n"
- "str s31, [x25, x11]\n"
- "str s26, [x24, x11]\n"
- "str s18, [x23, x11]\n"
- "str s4, [x22, x11]\n"
+ "str s3, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s0, [x9, x12]\n"
+ "str s18, [x28, x12]\n"
"b 23f\n"
"20:" // Oddments: Unroll 0: Oddment store
- "add x25, x25, x11\n"
- "add x24, x24, x11\n"
- "add x23, x23, x11\n"
- "add x22, x22, x11\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 21f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v26.h }[0], [x24], #0x2\n"
- "st1 { v18.h }[0], [x23], #0x2\n"
- "st1 { v4.h }[0], [x22], #0x2\n"
+ "st1 { v3.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v0.h }[0], [x9], #0x2\n"
+ "st1 { v18.h }[0], [x28], #0x2\n"
"tbz x20, #0, 22f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v26.b }[2], [x24], #0x1\n"
- "st1 { v18.b }[2], [x23], #0x1\n"
- "st1 { v4.b }[2], [x22], #0x1\n"
+ "st1 { v3.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v0.b }[2], [x9], #0x1\n"
+ "st1 { v18.b }[2], [x28], #0x1\n"
"b 22f\n"
"21:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v26.b }[0], [x24], #0x1\n"
- "st1 { v18.b }[0], [x23], #0x1\n"
- "st1 { v4.b }[0], [x22], #0x1\n"
+ "st1 { v3.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v0.b }[0], [x9], #0x1\n"
+ "st1 { v18.b }[0], [x28], #0x1\n"
"22:" // Oddments: Unroll 0: Oddment store: Bit 1: End
"23:" // Oddments: Unroll 0: After oddment store
"subs x20, x20, #0x4\n"
- "add x11, x11, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
- "ldr q31, [%x[params], #0x0]\n"
- "ldr q23, [%x[params], #0x10]\n"
- "mov v26.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "ldr q22, [%x[params], #0x20]\n"
+ "ldr q3, [%x[params], #0x0]\n"
+ "ldr q25, [%x[params], #0x10]\n"
+ "cmp x20, #0x4\n"
+ "ldr q24, [%x[params], #0x20]\n"
"ldr q16, [%x[params], #0x30]\n"
- "mov v4.16b, v31.16b\n"
- ".inst 0x4e8a96ff // sdot v31.4s, v23.16b, v10.16b\n"
"ldr q17, [%x[params], #0x40]\n"
"ldr q20, [%x[params], #0x50]\n"
- ".inst 0x4e9c96f2 // sdot v18.4s, v23.16b, v28.16b\n"
- ".inst 0x4e9c96df // sdot v31.4s, v22.16b, v28.16b\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- "ext v28.16b, v28.16b, v28.16b, #0x1\n"
- ".inst 0x4e8a96fa // sdot v26.4s, v23.16b, v10.16b\n"
- "cmp x20, #0x4\n"
- ".inst 0x4e9c96e4 // sdot v4.4s, v23.16b, v28.16b\n"
- ".inst 0x4e9d96d2 // sdot v18.4s, v22.16b, v29.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e9d961f // sdot v31.4s, v16.16b, v29.16b\n"
- "ext v29.16b, v29.16b, v29.16b, #0x1\n"
- ".inst 0x4e9c96da // sdot v26.4s, v22.16b, v28.16b\n"
- ".inst 0x4e9d96c4 // sdot v4.4s, v22.16b, v29.16b\n"
- ".inst 0x4e9e9612 // sdot v18.4s, v16.16b, v30.16b\n"
- "ext v30.16b, v30.16b, v30.16b, #0x1\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e9d961a // sdot v26.4s, v16.16b, v29.16b\n"
- ".inst 0x4e9e9604 // sdot v4.4s, v16.16b, v30.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
+ "mov v26.16b, v3.16b\n"
+ "mov v0.16b, v3.16b\n"
+ "mov v18.16b, v3.16b\n"
+ ".inst 0x4e979723 // sdot v3.4s, v25.16b, v23.16b\n"
+ "ext v23.16b, v23.16b, v23.16b, #0x1\n"
+ ".inst 0x4e859720 // sdot v0.4s, v25.16b, v5.16b\n"
+ ".inst 0x4e859703 // sdot v3.4s, v24.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x4e97973a // sdot v26.4s, v25.16b, v23.16b\n"
+ ".inst 0x4e859732 // sdot v18.4s, v25.16b, v5.16b\n"
+ ".inst 0x4e959700 // sdot v0.4s, v24.16b, v21.16b\n"
+ ".inst 0x4e959603 // sdot v3.4s, v16.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x4e85971a // sdot v26.4s, v24.16b, v5.16b\n"
+ ".inst 0x4e959712 // sdot v18.4s, v24.16b, v21.16b\n"
+ ".inst 0x4e819600 // sdot v0.4s, v16.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ ".inst 0x4e95961a // sdot v26.4s, v16.16b, v21.16b\n"
+ ".inst 0x4e819612 // sdot v18.4s, v16.16b, v1.16b\n"
+ "and v16.16b, v3.16b, v20.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "sqrdmulh v4.4s, v4.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v19.16b, v26.16b, v20.16b\n"
- "and v17.16b, v18.16b, v20.16b\n"
- "and v16.16b, v4.16b, v20.16b\n"
+ "and v19.16b, v0.16b, v20.16b\n"
+ "sqadd v3.4s, v3.4s, v16.4s\n"
+ "and v17.16b, v26.16b, v20.16b\n"
+ "and v16.16b, v18.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v19.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "sqadd v4.4s, v4.4s, v16.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
+ "sqadd v0.4s, v0.4s, v19.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v13.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
+ "srshl v0.4s, v0.4s, v20.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
"srshl v18.4s, v18.4s, v20.4s\n"
- "srshl v4.4s, v4.4s, v20.4s\n"
- "add v31.4s, v31.4s, v15.4s\n"
- "add v26.4s, v26.4s, v15.4s\n"
- "add v18.4s, v18.4s, v15.4s\n"
- "add v4.4s, v4.4s, v15.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v18.4s, v18.4s, v8.4s\n"
- "smax v4.4s, v4.4s, v8.4s\n"
- "smin v31.4s, v31.4s, v12.4s\n"
- "smin v26.4s, v26.4s, v12.4s\n"
- "smin v18.4s, v18.4s, v12.4s\n"
- "smin v4.4s, v4.4s, v12.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "add v0.4s, v0.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "add v18.4s, v18.4s, v13.4s\n"
+ "smax v0.4s, v0.4s, v27.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smax v18.4s, v18.4s, v27.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
"blt 24f\n"
- "str s31, [x25, x11]\n"
- "str s26, [x24, x11]\n"
- "str s18, [x23, x11]\n"
- "str s4, [x22, x11]\n"
+ "str s3, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s0, [x9, x12]\n"
+ "str s18, [x28, x12]\n"
"b 27f\n"
"24:" // Oddments: Unroll 1: Oddment store
- "add x25, x25, x11\n"
- "add x24, x24, x11\n"
- "add x23, x23, x11\n"
- "add x22, x22, x11\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 25f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v26.h }[0], [x24], #0x2\n"
- "st1 { v18.h }[0], [x23], #0x2\n"
- "st1 { v4.h }[0], [x22], #0x2\n"
+ "st1 { v3.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v0.h }[0], [x9], #0x2\n"
+ "st1 { v18.h }[0], [x28], #0x2\n"
"tbz x20, #0, 26f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v26.b }[2], [x24], #0x1\n"
- "st1 { v18.b }[2], [x23], #0x1\n"
- "st1 { v4.b }[2], [x22], #0x1\n"
+ "st1 { v3.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v0.b }[2], [x9], #0x1\n"
+ "st1 { v18.b }[2], [x28], #0x1\n"
"b 26f\n"
"25:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v26.b }[0], [x24], #0x1\n"
- "st1 { v18.b }[0], [x23], #0x1\n"
- "st1 { v4.b }[0], [x22], #0x1\n"
+ "st1 { v3.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v0.b }[0], [x9], #0x1\n"
+ "st1 { v18.b }[0], [x28], #0x1\n"
"26:" // Oddments: Unroll 1: Oddment store: Bit 1: End
"27:" // Oddments: Unroll 1: After oddment store
"subs x20, x20, #0x4\n"
- "add x11, x11, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
- "ldr q31, [%x[params], #0x0]\n"
+ "ldr q3, [%x[params], #0x0]\n"
"ldr q23, [%x[params], #0x10]\n"
- "mov v26.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "ldr q22, [%x[params], #0x20]\n"
+ "cmp x20, #0x4\n"
+ "ldr q21, [%x[params], #0x20]\n"
"ldr q16, [%x[params], #0x30]\n"
- "mov v4.16b, v31.16b\n"
- ".inst 0x4e8396ff // sdot v31.4s, v23.16b, v3.16b\n"
"ldr q17, [%x[params], #0x40]\n"
"ldr q20, [%x[params], #0x50]\n"
- ".inst 0x4e9596f2 // sdot v18.4s, v23.16b, v21.16b\n"
- ".inst 0x4e9596df // sdot v31.4s, v22.16b, v21.16b\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x4e8396fa // sdot v26.4s, v23.16b, v3.16b\n"
- "cmp x20, #0x4\n"
- ".inst 0x4e9596e4 // sdot v4.4s, v23.16b, v21.16b\n"
- ".inst 0x4e8096d2 // sdot v18.4s, v22.16b, v0.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x4e80961f // sdot v31.4s, v16.16b, v0.16b\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x4e9596da // sdot v26.4s, v22.16b, v21.16b\n"
- ".inst 0x4e8096c4 // sdot v4.4s, v22.16b, v0.16b\n"
- ".inst 0x4e829612 // sdot v18.4s, v16.16b, v2.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e80961a // sdot v26.4s, v16.16b, v0.16b\n"
- ".inst 0x4e829604 // sdot v4.4s, v16.16b, v2.16b\n"
- "and v16.16b, v31.16b, v20.16b\n"
+ "mov v26.16b, v3.16b\n"
+ "mov v0.16b, v3.16b\n"
+ "mov v18.16b, v3.16b\n"
+ ".inst 0x4e9e96e3 // sdot v3.4s, v23.16b, v30.16b\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x1\n"
+ ".inst 0x4e8796e0 // sdot v0.4s, v23.16b, v7.16b\n"
+ ".inst 0x4e8796a3 // sdot v3.4s, v21.16b, v7.16b\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ ".inst 0x4e9e96fa // sdot v26.4s, v23.16b, v30.16b\n"
+ ".inst 0x4e8796f2 // sdot v18.4s, v23.16b, v7.16b\n"
+ ".inst 0x4e9d96a0 // sdot v0.4s, v21.16b, v29.16b\n"
+ ".inst 0x4e9d9603 // sdot v3.4s, v16.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x4e8796ba // sdot v26.4s, v21.16b, v7.16b\n"
+ ".inst 0x4e9d96b2 // sdot v18.4s, v21.16b, v29.16b\n"
+ ".inst 0x4e9f9600 // sdot v0.4s, v16.16b, v31.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x1\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ ".inst 0x4e9d961a // sdot v26.4s, v16.16b, v29.16b\n"
+ ".inst 0x4e9f9612 // sdot v18.4s, v16.16b, v31.16b\n"
+ "and v16.16b, v3.16b, v20.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "sqrdmulh v4.4s, v4.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v19.16b, v26.16b, v20.16b\n"
- "and v17.16b, v18.16b, v20.16b\n"
- "and v16.16b, v4.16b, v20.16b\n"
+ "and v19.16b, v0.16b, v20.16b\n"
+ "sqadd v3.4s, v3.4s, v16.4s\n"
+ "and v17.16b, v26.16b, v20.16b\n"
+ "and v16.16b, v18.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v19.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "sqadd v4.4s, v4.4s, v16.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
+ "sqadd v0.4s, v0.4s, v19.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v13.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
+ "srshl v0.4s, v0.4s, v20.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
"srshl v18.4s, v18.4s, v20.4s\n"
- "srshl v4.4s, v4.4s, v20.4s\n"
- "add v31.4s, v31.4s, v15.4s\n"
- "add v26.4s, v26.4s, v15.4s\n"
- "add v18.4s, v18.4s, v15.4s\n"
- "add v4.4s, v4.4s, v15.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v18.4s, v18.4s, v8.4s\n"
- "smax v4.4s, v4.4s, v8.4s\n"
- "smin v31.4s, v31.4s, v12.4s\n"
- "smin v26.4s, v26.4s, v12.4s\n"
- "smin v18.4s, v18.4s, v12.4s\n"
- "smin v4.4s, v4.4s, v12.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "add v0.4s, v0.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "add v18.4s, v18.4s, v13.4s\n"
+ "smax v0.4s, v0.4s, v27.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smax v18.4s, v18.4s, v27.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
"blt 28f\n"
- "str s31, [x25, x11]\n"
- "str s26, [x24, x11]\n"
- "str s18, [x23, x11]\n"
- "str s4, [x22, x11]\n"
+ "str s3, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s0, [x9, x12]\n"
+ "str s18, [x28, x12]\n"
"b 31f\n"
"28:" // Oddments: Unroll 2: Oddment store
- "add x25, x25, x11\n"
- "add x24, x24, x11\n"
- "add x23, x23, x11\n"
- "add x22, x22, x11\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 29f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v26.h }[0], [x24], #0x2\n"
- "st1 { v18.h }[0], [x23], #0x2\n"
- "st1 { v4.h }[0], [x22], #0x2\n"
+ "st1 { v3.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v0.h }[0], [x9], #0x2\n"
+ "st1 { v18.h }[0], [x28], #0x2\n"
"tbz x20, #0, 30f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v26.b }[2], [x24], #0x1\n"
- "st1 { v18.b }[2], [x23], #0x1\n"
- "st1 { v4.b }[2], [x22], #0x1\n"
+ "st1 { v3.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v0.b }[2], [x9], #0x1\n"
+ "st1 { v18.b }[2], [x28], #0x1\n"
"b 30f\n"
"29:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v26.b }[0], [x24], #0x1\n"
- "st1 { v18.b }[0], [x23], #0x1\n"
- "st1 { v4.b }[0], [x22], #0x1\n"
+ "st1 { v3.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v0.b }[0], [x9], #0x1\n"
+ "st1 { v18.b }[0], [x28], #0x1\n"
"30:" // Oddments: Unroll 2: Oddment store: Bit 1: End
"31:" // Oddments: Unroll 2: After oddment store
"subs x20, x20, #0x4\n"
- "add x11, x11, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
- "ldr q31, [%x[params], #0x0]\n"
+ "ldr q3, [%x[params], #0x0]\n"
"ldr q20, [%x[params], #0x10]\n"
- "mov v26.16b, v31.16b\n"
- "mov v18.16b, v31.16b\n"
- "ldr q19, [%x[params], #0x20]\n"
+ "ldr q17, [%x[params], #0x20]\n"
"ldr q16, [%x[params], #0x30]\n"
- "mov v4.16b, v31.16b\n"
- ".inst 0x4e8e969f // sdot v31.4s, v20.16b, v14.16b\n"
- "ldr q17, [%x[params], #0x40]\n"
- "ldr q22, [%x[params], #0x50]\n"
- ".inst 0x4e9b9692 // sdot v18.4s, v20.16b, v27.16b\n"
- ".inst 0x4e9b967f // sdot v31.4s, v19.16b, v27.16b\n"
+ "ldr q1, [%x[params], #0x40]\n"
+ "ldr q19, [%x[params], #0x50]\n"
+ "add %x[params], %x[params], #0x60\n"
+ "mov v26.16b, v3.16b\n"
+ "mov v0.16b, v3.16b\n"
+ "mov v18.16b, v3.16b\n"
+ ".inst 0x4e8e9683 // sdot v3.4s, v20.16b, v14.16b\n"
"ext v14.16b, v14.16b, v14.16b, #0x1\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
+ ".inst 0x4e9c9680 // sdot v0.4s, v20.16b, v28.16b\n"
+ ".inst 0x4e9c9623 // sdot v3.4s, v17.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
".inst 0x4e8e969a // sdot v26.4s, v20.16b, v14.16b\n"
- "add %x[params], %x[params], #0x60\n"
- ".inst 0x4e9b9684 // sdot v4.4s, v20.16b, v27.16b\n"
- ".inst 0x4e879672 // sdot v18.4s, v19.16b, v7.16b\n"
- ".inst 0x4e87961f // sdot v31.4s, v16.16b, v7.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- ".inst 0x4e9b967a // sdot v26.4s, v19.16b, v27.16b\n"
- ".inst 0x4e879664 // sdot v4.4s, v19.16b, v7.16b\n"
- ".inst 0x4e819612 // sdot v18.4s, v16.16b, v1.16b\n"
- "ext v1.16b, v1.16b, v1.16b, #0x1\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x4e87961a // sdot v26.4s, v16.16b, v7.16b\n"
- ".inst 0x4e819604 // sdot v4.4s, v16.16b, v1.16b\n"
- "and v16.16b, v31.16b, v22.16b\n"
+ ".inst 0x4e9c9692 // sdot v18.4s, v20.16b, v28.16b\n"
+ ".inst 0x4e969620 // sdot v0.4s, v17.16b, v22.16b\n"
+ ".inst 0x4e969603 // sdot v3.4s, v16.16b, v22.16b\n"
+ "ext v22.16b, v22.16b, v22.16b, #0x1\n"
+ ".inst 0x4e9c963a // sdot v26.4s, v17.16b, v28.16b\n"
+ ".inst 0x4e969632 // sdot v18.4s, v17.16b, v22.16b\n"
+ ".inst 0x4e829600 // sdot v0.4s, v16.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ "sqrdmulh v3.4s, v3.4s, v1.4s\n"
+ ".inst 0x4e96961a // sdot v26.4s, v16.16b, v22.16b\n"
+ ".inst 0x4e829612 // sdot v18.4s, v16.16b, v2.16b\n"
+ "and v16.16b, v3.16b, v19.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v1.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v17.4s\n"
- "sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "sqrdmulh v4.4s, v4.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v23.16b, v26.16b, v22.16b\n"
- "and v17.16b, v18.16b, v22.16b\n"
- "and v16.16b, v4.16b, v22.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v1.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v1.4s\n"
+ "and v29.16b, v0.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v16.4s\n"
+ "and v17.16b, v26.16b, v19.16b\n"
+ "and v16.16b, v18.16b, v19.16b\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v26.4s, v26.4s, v23.4s\n"
- "sqadd v18.4s, v18.4s, v17.4s\n"
- "sqadd v4.4s, v4.4s, v16.4s\n"
- "srshl v31.4s, v31.4s, v22.4s\n"
- "srshl v26.4s, v26.4s, v22.4s\n"
- "srshl v18.4s, v18.4s, v22.4s\n"
- "srshl v4.4s, v4.4s, v22.4s\n"
- "add v31.4s, v31.4s, v15.4s\n"
- "add v26.4s, v26.4s, v15.4s\n"
- "add v18.4s, v18.4s, v15.4s\n"
- "add v4.4s, v4.4s, v15.4s\n"
- "smax v31.4s, v31.4s, v8.4s\n"
- "smax v26.4s, v26.4s, v8.4s\n"
- "smax v18.4s, v18.4s, v8.4s\n"
- "smax v4.4s, v4.4s, v8.4s\n"
- "smin v31.4s, v31.4s, v12.4s\n"
- "smin v26.4s, v26.4s, v12.4s\n"
- "smin v18.4s, v18.4s, v12.4s\n"
- "smin v4.4s, v4.4s, v12.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "sqadd v0.4s, v0.4s, v29.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v13.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
+ "srshl v0.4s, v0.4s, v19.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v27.4s\n"
+ "srshl v18.4s, v18.4s, v19.4s\n"
+ "add v0.4s, v0.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "add v18.4s, v18.4s, v13.4s\n"
+ "smax v0.4s, v0.4s, v27.4s\n"
+ "smax v26.4s, v26.4s, v27.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smax v18.4s, v18.4s, v27.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
"32:" // Oddments: Unroll 3: Oddment store
- "add x25, x25, x11\n"
- "add x24, x24, x11\n"
- "add x23, x23, x11\n"
- "add x22, x22, x11\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 33f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v26.h }[0], [x24], #0x2\n"
- "st1 { v18.h }[0], [x23], #0x2\n"
- "st1 { v4.h }[0], [x22], #0x2\n"
+ "st1 { v3.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v0.h }[0], [x9], #0x2\n"
+ "st1 { v18.h }[0], [x28], #0x2\n"
"tbz x20, #0, 34f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v26.b }[2], [x24], #0x1\n"
- "st1 { v18.b }[2], [x23], #0x1\n"
- "st1 { v4.b }[2], [x22], #0x1\n"
+ "st1 { v3.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v0.b }[2], [x9], #0x1\n"
+ "st1 { v18.b }[2], [x28], #0x1\n"
"b 34f\n"
"33:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v26.b }[0], [x24], #0x1\n"
- "st1 { v18.b }[0], [x23], #0x1\n"
- "st1 { v4.b }[0], [x22], #0x1\n"
+ "st1 { v3.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v0.b }[0], [x9], #0x1\n"
+ "st1 { v18.b }[0], [x28], #0x1\n"
"34:" // Oddments: Unroll 3: Oddment store: Bit 1: End
"35:" // End
: [params] "+&r" (params)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 5a28daffbf..ceb2693550 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,1622 +33,1622 @@ namespace depthwise {
void a64_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const unsigned int n_channels, const uint8_t *const *const inptrs, const uint8_t *params, const int32_t *, const arm_gemm::Requantize32& qp, const int32_t *, const int32_t *, uint8_t *const *const outptrs)
{
__asm__ __volatile__(
- "mov x20, #0x1\n"
- "orr x20, x20, #0x100\n"
+ "mov x17, #0x1\n"
+ "lsr x16, %x[n_channels], #0x4\n"
"ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "orr x20, x20, #0x10000\n"
- "lsr x11, %x[n_channels], #0x4\n"
- "dup v12.4s, w20\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
+ "ldp x27, x26, [%x[inptrs], #0x10]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_minval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "ldp x25, x24, [%x[inptrs], #0x20]\n"
+ "ldp x23, x22, [%x[inptrs], #0x30]\n"
+ "ld1r { v7.4s }, [x21]\n"
"ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
- "mov x28, #0x0\n"
- "mov x27, #0x0\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "ldp x25, x24, [%x[outptrs], #0x0]\n"
- "ldp x23, x22, [%x[outptrs], #0x10]\n"
- "cbz x11, 3f\n"
- "ldr q15, [x15, x28]\n"
- "ldr q28, [x14, x28]\n"
- "subs x11, x11, #0x1\n"
- "ldr q30, [x13, x28]\n"
- "ldr q8, [x12, x28]\n"
- "zip2 v19.16b, v15.16b, v30.16b\n"
- "zip1 v15.16b, v15.16b, v30.16b\n"
- "ldr q26, [x10, x28]\n"
- "ldr q0, [x9, x28]\n"
- "zip1 v7.16b, v28.16b, v8.16b\n"
- "zip2 v8.16b, v28.16b, v8.16b\n"
- "ldr q29, [x26, x28]\n"
- "ldr q10, [x21, x28]\n"
- "zip2 v25.16b, v15.16b, v7.16b\n"
- "zip1 v15.16b, v15.16b, v7.16b\n"
- "ldr q1, [%x[params], #0x10]\n"
- "ldr q6, [%x[params], #0x20]\n"
- "zip1 v7.16b, v19.16b, v8.16b\n"
- "zip2 v8.16b, v19.16b, v8.16b\n"
- "ldr q31, [%x[params], #0x0]\n"
- "ldr q20, [%x[params], #0x30]\n"
- "zip2 v21.16b, v26.16b, v29.16b\n"
- "zip1 v26.16b, v26.16b, v29.16b\n"
- "ldp x21, x20, [%x[inptrs], #0x40]\n"
- "ldr q22, [x21, x28]\n"
- "zip1 v27.16b, v0.16b, v10.16b\n"
- "zip2 v10.16b, v0.16b, v10.16b\n"
- "ldr q17, [x20, x28]\n"
- "ldp x21, x20, [%x[inptrs], #0x50]\n"
- "zip2 v23.16b, v26.16b, v27.16b\n"
- "zip1 v26.16b, v26.16b, v27.16b\n"
- "ldr q9, [x21, x28]\n"
- "ldr q5, [x20, x28]\n"
- "zip2 v28.16b, v22.16b, v9.16b\n"
- "zip1 v22.16b, v22.16b, v9.16b\n"
- "ldp x21, x20, [%x[inptrs], #0x60]\n"
- "ldr q27, [x21, x28]\n"
- "zip1 v24.16b, v17.16b, v5.16b\n"
- "zip2 v5.16b, v17.16b, v5.16b\n"
- "ldr q18, [x20, x28]\n"
+ "ld1r { v24.4s }, [x21]\n"
+ "ld1r { v12.4s }, [x20]\n"
+ "orr x17, x17, #0x100\n"
+ "mov x13, #0x0\n"
+ "mov x12, #0x0\n"
+ "ldp x11, x10, [%x[outptrs], #0x0]\n"
+ "ldp x9, x28, [%x[outptrs], #0x10]\n"
+ "orr x17, x17, #0x10000\n"
+ "dup v15.4s, w17\n"
+ "cbz x16, 3f\n"
+ "ldr q13, [x15, x13]\n"
+ "ldr q5, [x14, x13]\n"
+ "subs x16, x16, #0x1\n"
+ "ldr q27, [x27, x13]\n"
+ "ldr q9, [x26, x13]\n"
+ "ldr q1, [x25, x13]\n"
+ "ldr q28, [x24, x13]\n"
+ "ldr q26, [x23, x13]\n"
+ "ldr q4, [x22, x13]\n"
+ "ldr q30, [%x[params], #0x10]\n"
+ "ldr q8, [%x[params], #0x20]\n"
+ "zip2 v19.16b, v13.16b, v27.16b\n"
+ "zip1 v13.16b, v13.16b, v27.16b\n"
+ "ldr q17, [%x[params], #0x30]\n"
+ "ldp x27, x26, [%x[inptrs], #0x40]\n"
+ "zip1 v3.16b, v5.16b, v9.16b\n"
+ "zip2 v9.16b, v5.16b, v9.16b\n"
+ "ldp x25, x24, [%x[inptrs], #0x50]\n"
+ "ldp x23, x22, [%x[inptrs], #0x60]\n"
+ "zip2 v18.16b, v1.16b, v26.16b\n"
+ "zip1 v1.16b, v1.16b, v26.16b\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "zip1 v3.16b, v21.16b, v10.16b\n"
- "zip2 v10.16b, v21.16b, v10.16b\n"
- "ldr q4, [x21, x28]\n"
- "ldr q9, [x20, x28]\n"
- "zip2 v17.16b, v27.16b, v4.16b\n"
- "zip1 v27.16b, v27.16b, v4.16b\n"
- "zip1 v4.16b, v18.16b, v9.16b\n"
- "zip2 v9.16b, v18.16b, v9.16b\n"
+ "zip1 v16.16b, v28.16b, v4.16b\n"
+ "zip2 v4.16b, v28.16b, v4.16b\n"
+ "ldr q10, [x27, x13]\n"
+ "ldr q14, [x26, x13]\n"
+ "zip2 v2.16b, v13.16b, v3.16b\n"
+ "zip1 v13.16b, v13.16b, v3.16b\n"
"ldp x15, x14, [%x[inptrs], #0x0]\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "zip2 v19.16b, v22.16b, v24.16b\n"
- "zip1 v22.16b, v22.16b, v24.16b\n"
- "zip1 v0.16b, v28.16b, v5.16b\n"
- "zip2 v5.16b, v28.16b, v5.16b\n"
+ "ldr q3, [x25, x13]\n"
+ "ldr q6, [x24, x13]\n"
+ "zip1 v0.16b, v19.16b, v9.16b\n"
+ "zip2 v9.16b, v19.16b, v9.16b\n"
+ "ldr q5, [x23, x13]\n"
+ "ldr q20, [x22, x13]\n"
+ "zip2 v21.16b, v1.16b, v16.16b\n"
+ "zip1 v1.16b, v1.16b, v16.16b\n"
+ "ldr q16, [x21, x13]\n"
+ "ldr q25, [x20, x13]\n"
+ "zip1 v28.16b, v18.16b, v4.16b\n"
+ "zip2 v4.16b, v18.16b, v4.16b\n"
+ "ldr q31, [%x[params], #0x0]\n"
+ "zip2 v19.16b, v10.16b, v3.16b\n"
+ "zip1 v10.16b, v10.16b, v3.16b\n"
+ "ldp x27, x26, [%x[inptrs], #0x10]\n"
+ "zip1 v18.16b, v14.16b, v6.16b\n"
+ "zip2 v6.16b, v14.16b, v6.16b\n"
+ "ldp x25, x24, [%x[inptrs], #0x20]\n"
+ "ldp x23, x22, [%x[inptrs], #0x30]\n"
+ "zip2 v23.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
"add %x[params], %x[params], #0x40\n"
- "zip2 v24.16b, v27.16b, v4.16b\n"
- "zip1 v27.16b, v27.16b, v4.16b\n"
- "zip1 v2.16b, v17.16b, v9.16b\n"
- "zip2 v9.16b, v17.16b, v9.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
+ "zip1 v16.16b, v20.16b, v25.16b\n"
+ "zip2 v25.16b, v20.16b, v25.16b\n"
+ "zip2 v29.16b, v10.16b, v18.16b\n"
+ "zip1 v10.16b, v10.16b, v18.16b\n"
+ "zip1 v27.16b, v19.16b, v6.16b\n"
+ "zip2 v6.16b, v19.16b, v6.16b\n"
+ "zip2 v18.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
+ "zip1 v14.16b, v23.16b, v25.16b\n"
+ "zip2 v25.16b, v23.16b, v25.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
"beq 2f\n"
"1:" // Loop
- "movi v21.4s, #0x0\n"
- ".inst 0x6e9a9595 // udot v21.4s, v12.16b, v26.16b\n"
- ".inst 0x6e8f943f // udot v31.4s, v1.16b, v15.16b\n"
- "add x28, x28, #0x10\n"
- ".inst 0x6e969595 // udot v21.4s, v12.16b, v22.16b\n"
- ".inst 0x6e9a943d // udot v29.4s, v1.16b, v26.16b\n"
- "movi v18.4s, #0x0\n"
- "subs x11, x11, #0x1\n"
- ".inst 0x6e9a94df // udot v31.4s, v6.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "mov v17.16b, v21.16b\n .inst 0x6e9b9591 // udot v17.4s, v12.16b, v27.16b\n"
- ".inst 0x6e8f9595 // udot v21.4s, v12.16b, v15.16b\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- ".inst 0x6e9a9592 // udot v18.4s, v12.16b, v26.16b\n"
- ".inst 0x6e9694dd // udot v29.4s, v6.16b, v22.16b\n"
- ".inst 0x6e96969f // udot v31.4s, v20.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x6e8f943e // udot v30.4s, v1.16b, v15.16b\n"
- ".inst 0x6e9a943c // udot v28.4s, v1.16b, v26.16b\n"
- "mls v31.4s, v21.4s, v16.4s\n"
- ".inst 0x6e969592 // udot v18.4s, v12.16b, v22.16b\n"
- ".inst 0x6e9b969d // udot v29.4s, v20.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x6e9a94de // udot v30.4s, v6.16b, v26.16b\n"
- "ldr q26, [%x[params], #0x10]\n"
- ".inst 0x6e9694dc // udot v28.4s, v6.16b, v22.16b\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mov v21.16b, v18.16b\n .inst 0x6e9b9595 // udot v21.4s, v12.16b, v27.16b\n"
- ".inst 0x6e8f9592 // udot v18.4s, v12.16b, v15.16b\n"
- "ldr q17, [%x[params], #0x0]\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x6e96969e // udot v30.4s, v20.16b, v22.16b\n"
- ".inst 0x6e9b969c // udot v28.4s, v20.16b, v27.16b\n"
- "mls v30.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v21.4s, v16.4s\n"
- "and v15.16b, v31.16b, v26.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v17.4s\n"
- "sqrdmulh v29.4s, v29.4s, v17.4s\n"
- "sqrdmulh v28.4s, v28.4s, v17.4s\n"
- "ldr q1, [%x[params], #0x60]\n"
- "sqadd v31.4s, v31.4s, v15.4s\n"
- "and v18.16b, v30.16b, v26.16b\n"
- "and v21.16b, v29.16b, v26.16b\n"
- "and v17.16b, v28.16b, v26.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x6e8d97df // udot v31.4s, v30.16b, v13.16b\n"
+ ".inst 0x6e8197c3 // udot v3.4s, v30.16b, v1.16b\n"
+ "add x13, x13, #0x10\n"
+ "movi v22.4s, #0x0\n"
+ "subs x16, x16, #0x1\n"
+ ".inst 0x6e8195f3 // udot v19.4s, v15.16b, v1.16b\n"
+ ".inst 0x6e81951f // udot v31.4s, v8.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ ".inst 0x6e8a9503 // udot v3.4s, v8.16b, v10.16b\n"
+ ".inst 0x6e8a95f3 // udot v19.4s, v15.16b, v10.16b\n"
+ ".inst 0x6e8195f6 // udot v22.4s, v15.16b, v1.16b\n"
+ ".inst 0x6e8a963f // udot v31.4s, v17.16b, v10.16b\n"
+ "ext v10.16b, v10.16b, v10.16b, #0x1\n"
+ ".inst 0x6e8197d7 // udot v23.4s, v30.16b, v1.16b\n"
+ "mov v16.16b, v19.16b\n .inst 0x6e8595f0 // udot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x6e8d95f3 // udot v19.4s, v15.16b, v13.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x6e859623 // udot v3.4s, v17.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x6e8a95f6 // udot v22.4s, v15.16b, v10.16b\n"
+ ".inst 0x6e8d97da // udot v26.4s, v30.16b, v13.16b\n"
+ ".inst 0x6e8a9517 // udot v23.4s, v8.16b, v10.16b\n"
+ "mls v31.4s, v19.4s, v24.4s\n"
+ "movi v19.4s, #0x0\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x6e81951a // udot v26.4s, v8.16b, v1.16b\n"
+ "ldr q8, [%x[params], #0x10]\n"
+ "mov v16.16b, v22.16b\n .inst 0x6e8595f0 // udot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x6e8d95f6 // udot v22.4s, v15.16b, v13.16b\n"
+ "ldr q1, [%x[params], #0x0]\n"
+ ".inst 0x6e9595f3 // udot v19.4s, v15.16b, v21.16b\n"
+ ".inst 0x6e859637 // udot v23.4s, v17.16b, v5.16b\n"
+ ".inst 0x6e8a963a // udot v26.4s, v17.16b, v10.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v1.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v1.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v8.16b\n"
+ ".inst 0x6e9d95f3 // udot v19.4s, v15.16b, v29.16b\n"
+ "mls v26.4s, v22.4s, v24.4s\n"
+ "movi v20.4s, #0x0\n"
+ "sqrdmulh v23.4s, v23.4s, v1.4s\n"
+ "and v30.16b, v3.16b, v8.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v1.4s\n"
+ "ldr q10, [%x[params], #0x60]\n"
+ "mov v22.16b, v19.16b\n .inst 0x6e9295f6 // udot v22.4s, v15.16b, v18.16b\n"
+ ".inst 0x6e8295f3 // udot v19.4s, v15.16b, v2.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v8.16b\n"
+ "and v16.16b, v26.16b, v8.16b\n"
+ "sqadd v3.4s, v3.4s, v30.4s\n"
+ "ldr q5, [%x[params], #0x50]\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v26.4s\n"
- "sqadd v30.4s, v30.4s, v18.4s\n"
- "ldr q18, [%x[params], #0x40]\n"
- "sqadd v29.4s, v29.4s, v21.4s\n"
- "ldr q27, [%x[params], #0x50]\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "ldr q15, [%x[params], #0x30]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v30.4s, v30.4s, v26.4s\n"
- "srshl v29.4s, v29.4s, v26.4s\n"
- "srshl v28.4s, v28.4s, v26.4s\n"
- "ldr q20, [%x[params], #0x70]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
+ "srshl v31.4s, v31.4s, v8.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v8.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x30]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "ldr q30, [%x[params], #0x40]\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "srshl v23.4s, v23.4s, v8.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v8.4s\n"
+ "ldr q1, [%x[params], #0x70]\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x6e979596 // udot v22.4s, v12.16b, v23.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s31, [x25, x27]\n"
- "ldr q26, [%x[params], #0x20]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- ".inst 0x6e939596 // udot v22.4s, v12.16b, v19.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s30, [x24, x27]\n"
- "mov v6.16b, v22.16b\n .inst 0x6e989586 // udot v6.4s, v12.16b, v24.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s29, [x23, x27]\n"
- "mov v30.16b, v26.16b\n"
- ".inst 0x6e999596 // udot v22.4s, v12.16b, v25.16b\n"
- "str s28, [x22, x27]\n"
- "mov v29.16b, v26.16b\n"
- "mov v21.16b, v26.16b\n"
- ".inst 0x6e9995fa // udot v26.4s, v15.16b, v25.16b\n"
- ".inst 0x6e9795fd // udot v29.4s, v15.16b, v23.16b\n"
- ".inst 0x6e97965a // udot v26.4s, v18.16b, v23.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- "movi v28.4s, #0x0\n"
- ".inst 0x6e9995fe // udot v30.4s, v15.16b, v25.16b\n"
- ".inst 0x6e9795f5 // udot v21.4s, v15.16b, v23.16b\n"
- ".inst 0x6e97959c // udot v28.4s, v12.16b, v23.16b\n"
- ".inst 0x6e93965d // udot v29.4s, v18.16b, v19.16b\n"
- ".inst 0x6e93977a // udot v26.4s, v27.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x6e97965e // udot v30.4s, v18.16b, v23.16b\n"
- "ldr q4, [x9, x28]\n"
- ".inst 0x6e939655 // udot v21.4s, v18.16b, v19.16b\n"
- "mls v26.4s, v22.4s, v16.4s\n"
- ".inst 0x6e93959c // udot v28.4s, v12.16b, v19.16b\n"
- ".inst 0x6e98977d // udot v29.4s, v27.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x6e93977e // udot v30.4s, v27.16b, v19.16b\n"
- ".inst 0x6e989775 // udot v21.4s, v27.16b, v24.16b\n"
- "sqrdmulh v26.4s, v26.4s, v1.4s\n"
- "mov v17.16b, v28.16b\n .inst 0x6e989591 // udot v17.4s, v12.16b, v24.16b\n"
- ".inst 0x6e99959c // udot v28.4s, v12.16b, v25.16b\n"
- "ldr q31, [x14, x28]\n"
- "mls v30.4s, v28.4s, v16.4s\n"
- "mls v29.4s, v6.4s, v16.4s\n"
- "mls v21.4s, v17.4s, v16.4s\n"
- "and v17.16b, v26.16b, v20.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v1.4s\n"
- "sqrdmulh v29.4s, v29.4s, v1.4s\n"
- "sqrdmulh v21.4s, v21.4s, v1.4s\n"
- "ldr q27, [%x[params], #0xc0]\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "and v18.16b, v30.16b, v20.16b\n"
- "and v6.16b, v29.16b, v20.16b\n"
- "and v17.16b, v21.16b, v20.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v26.4s, v26.4s, v20.4s\n"
- "sqadd v30.4s, v30.4s, v18.4s\n"
- "ldr q28, [%x[params], #0xa0]\n"
- "sqadd v29.4s, v29.4s, v6.4s\n"
- "ldr q24, [%x[params], #0xb0]\n"
- "sqadd v21.4s, v21.4s, v17.4s\n"
- "ldr q15, [%x[params], #0x90]\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v21.4s, v21.4s, v20.4s\n"
- "ldr q1, [%x[params], #0xd0]\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
"smin v26.4s, v26.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s31, [x11, x12]\n"
+ "ldr q31, [%x[params], #0x20]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x6e839596 // udot v22.4s, v12.16b, v3.16b\n"
- ".inst 0x6e809596 // udot v22.4s, v12.16b, v0.16b\n"
+ "str s3, [x9, x12]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x27]\n"
- "ldr q26, [%x[params], #0x80]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "mov v18.16b, v22.16b\n .inst 0x6e829592 // udot v18.4s, v12.16b, v2.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s30, [x24, x27]\n"
- ".inst 0x6e879596 // udot v22.4s, v12.16b, v7.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s29, [x23, x27]\n"
- "mov v6.16b, v26.16b\n"
- "str s21, [x22, x27]\n"
- "mov v25.16b, v26.16b\n"
- "mov v20.16b, v26.16b\n"
- ".inst 0x6e8795fa // udot v26.4s, v15.16b, v7.16b\n"
- ".inst 0x6e8395f9 // udot v25.4s, v15.16b, v3.16b\n"
- ".inst 0x6e83979a // udot v26.4s, v28.16b, v3.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x6e8795e6 // udot v6.4s, v15.16b, v7.16b\n"
- ".inst 0x6e8395f4 // udot v20.4s, v15.16b, v3.16b\n"
- ".inst 0x6e839597 // udot v23.4s, v12.16b, v3.16b\n"
- ".inst 0x6e809799 // udot v25.4s, v28.16b, v0.16b\n"
- ".inst 0x6e80971a // udot v26.4s, v24.16b, v0.16b\n"
- "ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x6e839786 // udot v6.4s, v28.16b, v3.16b\n"
- "ldr q19, [x26, x28]\n"
- ".inst 0x6e809794 // udot v20.4s, v28.16b, v0.16b\n"
- "mls v26.4s, v22.4s, v16.4s\n"
- ".inst 0x6e809597 // udot v23.4s, v12.16b, v0.16b\n"
- ".inst 0x6e829719 // udot v25.4s, v24.16b, v2.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "mov v8.16b, v31.16b\n"
+ "str s26, [x10, x12]\n"
+ "mov v16.16b, v31.16b\n"
+ "str s23, [x28, x12]\n"
+ "mov v26.16b, v31.16b\n"
+ ".inst 0x6e82963f // udot v31.4s, v17.16b, v2.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x6e959628 // udot v8.4s, v17.16b, v21.16b\n"
"ext v2.16b, v2.16b, v2.16b, #0x1\n"
- ".inst 0x6e809706 // udot v6.4s, v24.16b, v0.16b\n"
- ".inst 0x6e829714 // udot v20.4s, v24.16b, v2.16b\n"
- "sqrdmulh v26.4s, v26.4s, v27.4s\n"
- "mov v17.16b, v23.16b\n .inst 0x6e829591 // udot v17.4s, v12.16b, v2.16b\n"
- ".inst 0x6e879597 // udot v23.4s, v12.16b, v7.16b\n"
- "ldr q21, [x13, x28]\n"
- "mls v6.4s, v23.4s, v16.4s\n"
- "mls v25.4s, v18.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v26.16b, v1.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v6.4s, v6.4s, v27.4s\n"
- "sqrdmulh v25.4s, v25.4s, v27.4s\n"
- "sqrdmulh v20.4s, v20.4s, v27.4s\n"
- "ldr q15, [%x[params], #0x120]\n"
- "sqadd v26.4s, v26.4s, v17.4s\n"
- "and v18.16b, v6.16b, v1.16b\n"
- "and v22.16b, v25.16b, v1.16b\n"
- "and v17.16b, v20.16b, v1.16b\n"
+ ".inst 0x6e9597df // udot v31.4s, v30.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x6e829630 // udot v16.4s, v17.16b, v2.16b\n"
+ ".inst 0x6e95963a // udot v26.4s, v17.16b, v21.16b\n"
+ ".inst 0x6e9595f4 // udot v20.4s, v15.16b, v21.16b\n"
+ ".inst 0x6e9d97c8 // udot v8.4s, v30.16b, v29.16b\n"
+ ".inst 0x6e9d94bf // udot v31.4s, v5.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x6e9597d0 // udot v16.4s, v30.16b, v21.16b\n"
+ "ldr q3, [x24, x13]\n"
+ ".inst 0x6e9d97da // udot v26.4s, v30.16b, v29.16b\n"
+ ".inst 0x6e9d95f4 // udot v20.4s, v15.16b, v29.16b\n"
+ ".inst 0x6e9294a8 // udot v8.4s, v5.16b, v18.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "mls v31.4s, v19.4s, v24.4s\n"
+ "movi v23.4s, #0x0\n"
+ ".inst 0x6e9d94b0 // udot v16.4s, v5.16b, v29.16b\n"
+ ".inst 0x6e9294ba // udot v26.4s, v5.16b, v18.16b\n"
+ "mov v17.16b, v20.16b\n .inst 0x6e9295f1 // udot v17.4s, v15.16b, v18.16b\n"
+ ".inst 0x6e8295f4 // udot v20.4s, v15.16b, v2.16b\n"
+ "ldr q2, [x14, x13]\n"
+ ".inst 0x6e9c95f7 // udot v23.4s, v15.16b, v28.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v10.4s\n"
+ "mls v8.4s, v22.4s, v24.4s\n"
+ "mls v26.4s, v17.4s, v24.4s\n"
+ "and v18.16b, v31.16b, v1.16b\n"
+ "mls v16.4s, v20.4s, v24.4s\n"
+ "movi v21.4s, #0x0\n"
+ "sqrdmulh v8.4s, v8.4s, v10.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v10.4s\n"
+ ".inst 0x6e9b95f7 // udot v23.4s, v15.16b, v27.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
+ "sqrdmulh v16.4s, v16.4s, v10.4s\n"
+ "ldr q13, [%x[params], #0xc0]\n"
+ "and v17.16b, v8.16b, v1.16b\n"
+ "sqadd v31.4s, v31.4s, v18.4s\n"
+ "and v20.16b, v26.16b, v1.16b\n"
+ "and v10.16b, v16.16b, v1.16b\n"
+ "mov v19.16b, v23.16b\n .inst 0x6e8e95f3 // udot v19.4s, v15.16b, v14.16b\n"
+ ".inst 0x6e8095f7 // udot v23.4s, v15.16b, v0.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v31.4s, v31.4s, v1.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "ldr q30, [%x[params], #0xb0]\n"
+ "sqadd v16.4s, v16.4s, v10.4s\n"
+ "ldr q17, [%x[params], #0xa0]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v20.4s\n"
+ "ldr q20, [%x[params], #0x90]\n"
+ "srshl v8.4s, v8.4s, v1.4s\n"
+ "srshl v16.4s, v16.4s, v1.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
"srshl v26.4s, v26.4s, v1.4s\n"
- "sqadd v6.4s, v6.4s, v18.4s\n"
- "ldr q30, [%x[params], #0x100]\n"
- "sqadd v25.4s, v25.4s, v22.4s\n"
+ "ldr q22, [%x[params], #0xd0]\n"
+ "add v8.4s, v8.4s, v12.4s\n"
+ "add v16.4s, v16.4s, v12.4s\n"
+ "smin v31.4s, v31.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v8.4s, v8.4s, v7.4s\n"
+ "smax v16.4s, v16.4s, v7.4s\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v8.4s, v8.4s, v11.4s\n"
+ "smin v16.4s, v16.4s, v11.4s\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v16.16b, v16.16b, v16.16b\n"
+ "str s31, [x11, x12]\n"
+ "ldr q10, [%x[params], #0x80]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v16.16b, v16.16b, v16.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s16, [x10, x12]\n"
+ "mov v18.16b, v10.16b\n"
+ "str s8, [x9, x12]\n"
+ "mov v8.16b, v10.16b\n"
+ "str s26, [x28, x12]\n"
+ "mov v26.16b, v10.16b\n"
+ ".inst 0x6e80968a // udot v10.4s, v20.16b, v0.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x6e9c9688 // udot v8.4s, v20.16b, v28.16b\n"
+ "ext v0.16b, v0.16b, v0.16b, #0x1\n"
+ ".inst 0x6e9c962a // udot v10.4s, v17.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x6e809692 // udot v18.4s, v20.16b, v0.16b\n"
+ ".inst 0x6e9c969a // udot v26.4s, v20.16b, v28.16b\n"
+ ".inst 0x6e9c95f5 // udot v21.4s, v15.16b, v28.16b\n"
+ ".inst 0x6e9b9628 // udot v8.4s, v17.16b, v27.16b\n"
+ ".inst 0x6e9b97ca // udot v10.4s, v30.16b, v27.16b\n"
+ "ext v27.16b, v27.16b, v27.16b, #0x1\n"
+ ".inst 0x6e9c9632 // udot v18.4s, v17.16b, v28.16b\n"
+ "ldr q28, [x23, x13]\n"
+ ".inst 0x6e9b963a // udot v26.4s, v17.16b, v27.16b\n"
+ ".inst 0x6e9b95f5 // udot v21.4s, v15.16b, v27.16b\n"
+ ".inst 0x6e8e97c8 // udot v8.4s, v30.16b, v14.16b\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ "mls v10.4s, v23.4s, v24.4s\n"
+ "movi v1.4s, #0x0\n"
+ ".inst 0x6e9b97d2 // udot v18.4s, v30.16b, v27.16b\n"
+ ".inst 0x6e8e97da // udot v26.4s, v30.16b, v14.16b\n"
+ "mov v16.16b, v21.16b\n .inst 0x6e8e95f0 // udot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x6e8095f5 // udot v21.4s, v15.16b, v0.16b\n"
+ "ldr q29, [x27, x13]\n"
+ ".inst 0x6e8495e1 // udot v1.4s, v15.16b, v4.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v13.4s\n"
+ "mls v8.4s, v19.4s, v24.4s\n"
+ "mls v26.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v10.16b, v22.16b\n"
+ "mls v18.4s, v21.4s, v24.4s\n"
+ "movi v5.4s, #0x0\n"
+ "sqrdmulh v8.4s, v8.4s, v13.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v13.4s\n"
+ ".inst 0x6e8695e1 // udot v1.4s, v15.16b, v6.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v18.4s, v18.4s, v13.4s\n"
+ "ldr q30, [%x[params], #0x120]\n"
+ "and v17.16b, v8.16b, v22.16b\n"
+ "sqadd v10.4s, v10.4s, v16.4s\n"
+ "and v20.16b, v26.16b, v22.16b\n"
+ "and v16.16b, v18.16b, v22.16b\n"
+ "mov v19.16b, v1.16b\n .inst 0x6e9995f3 // udot v19.4s, v15.16b, v25.16b\n"
+ ".inst 0x6e8995e1 // udot v1.4s, v15.16b, v9.16b\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v10.4s, v10.4s, v22.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
"ldr q27, [%x[params], #0x110]\n"
- "sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q24, [%x[params], #0xf0]\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "srshl v6.4s, v6.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "ldr q23, [%x[params], #0x130]\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "add v6.4s, v6.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
+ "sqadd v18.4s, v18.4s, v16.4s\n"
+ "ldr q17, [%x[params], #0x100]\n"
+ "add v10.4s, v10.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v20.4s\n"
+ "ldr q16, [%x[params], #0xf0]\n"
+ "srshl v8.4s, v8.4s, v22.4s\n"
+ "srshl v18.4s, v18.4s, v22.4s\n"
+ "smax v10.4s, v10.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v22.4s\n"
+ "ldr q31, [%x[params], #0x130]\n"
+ "add v8.4s, v8.4s, v12.4s\n"
+ "add v18.4s, v18.4s, v12.4s\n"
+ "smin v10.4s, v10.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v8.4s, v8.4s, v7.4s\n"
+ "smax v18.4s, v18.4s, v7.4s\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v8.4s, v8.4s, v11.4s\n"
+ "smin v18.4s, v18.4s, v11.4s\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
"smin v26.4s, v26.4s, v11.4s\n"
- "smax v6.4s, v6.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v6.4s, v6.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "str s10, [x11, x12]\n"
+ "ldr q0, [%x[params], #0xe0]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "movi v0.4s, #0x0\n"
- ".inst 0x6e8a9580 // udot v0.4s, v12.16b, v10.16b\n"
- ".inst 0x6e859580 // udot v0.4s, v12.16b, v5.16b\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "str s26, [x25, x27]\n"
- "ldr q28, [%x[params], #0xe0]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v22.16b, v0.16b\n .inst 0x6e899596 // udot v22.4s, v12.16b, v9.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s6, [x24, x27]\n"
- ".inst 0x6e889580 // udot v0.4s, v12.16b, v8.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s25, [x23, x27]\n"
- "mov v29.16b, v28.16b\n"
- "str s20, [x22, x27]\n"
- "mov v25.16b, v28.16b\n"
- "mov v7.16b, v28.16b\n"
- ".inst 0x6e88971c // udot v28.4s, v24.16b, v8.16b\n"
- ".inst 0x6e8a9719 // udot v25.4s, v24.16b, v10.16b\n"
- ".inst 0x6e8a97dc // udot v28.4s, v30.16b, v10.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e88971d // udot v29.4s, v24.16b, v8.16b\n"
- ".inst 0x6e8a9707 // udot v7.4s, v24.16b, v10.16b\n"
- ".inst 0x6e8a9591 // udot v17.4s, v12.16b, v10.16b\n"
- ".inst 0x6e8597d9 // udot v25.4s, v30.16b, v5.16b\n"
- ".inst 0x6e85977c // udot v28.4s, v27.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x6e8a97dd // udot v29.4s, v30.16b, v10.16b\n"
- "ldr q10, [x21, x28]\n"
- ".inst 0x6e8597c7 // udot v7.4s, v30.16b, v5.16b\n"
- "mls v28.4s, v0.4s, v16.4s\n"
- ".inst 0x6e859591 // udot v17.4s, v12.16b, v5.16b\n"
- ".inst 0x6e899779 // udot v25.4s, v27.16b, v9.16b\n"
+ "str s18, [x10, x12]\n"
+ "mov v22.16b, v0.16b\n"
+ "str s8, [x9, x12]\n"
+ "mov v23.16b, v0.16b\n"
+ "str s26, [x28, x12]\n"
+ "mov v14.16b, v0.16b\n"
+ ".inst 0x6e899600 // udot v0.4s, v16.16b, v9.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x6e849617 // udot v23.4s, v16.16b, v4.16b\n"
"ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x6e85977d // udot v29.4s, v27.16b, v5.16b\n"
- ".inst 0x6e899767 // udot v7.4s, v27.16b, v9.16b\n"
- "sqrdmulh v28.4s, v28.4s, v15.4s\n"
- "mov v18.16b, v17.16b\n .inst 0x6e899592 // udot v18.4s, v12.16b, v9.16b\n"
- ".inst 0x6e889591 // udot v17.4s, v12.16b, v8.16b\n"
- "ldr q8, [x12, x28]\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mls v25.4s, v22.4s, v16.4s\n"
- "mls v7.4s, v18.4s, v16.4s\n"
- "and v17.16b, v28.16b, v23.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v29.4s, v29.4s, v15.4s\n"
- "sqrdmulh v25.4s, v25.4s, v15.4s\n"
- "sqrdmulh v7.4s, v7.4s, v15.4s\n"
- "ldr q15, [x15, x28]\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "ldp x21, x20, [%x[inptrs], #0x40]\n"
- "ldr q22, [x21, x28]\n"
- "ldr q3, [x20, x28]\n"
- "and v24.16b, v29.16b, v23.16b\n"
- "and v20.16b, v25.16b, v23.16b\n"
- "and v17.16b, v7.16b, v23.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
+ ".inst 0x6e849620 // udot v0.4s, v17.16b, v4.16b\n"
+ "ext v4.16b, v4.16b, v4.16b, #0x1\n"
+ ".inst 0x6e899616 // udot v22.4s, v16.16b, v9.16b\n"
+ ".inst 0x6e84960e // udot v14.4s, v16.16b, v4.16b\n"
+ ".inst 0x6e8495e5 // udot v5.4s, v15.16b, v4.16b\n"
+ ".inst 0x6e869637 // udot v23.4s, v17.16b, v6.16b\n"
+ ".inst 0x6e869760 // udot v0.4s, v27.16b, v6.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x6e849636 // udot v22.4s, v17.16b, v4.16b\n"
+ "ldr q4, [x22, x13]\n"
+ ".inst 0x6e86962e // udot v14.4s, v17.16b, v6.16b\n"
+ ".inst 0x6e8695e5 // udot v5.4s, v15.16b, v6.16b\n"
+ ".inst 0x6e999777 // udot v23.4s, v27.16b, v25.16b\n"
+ "ext v25.16b, v25.16b, v25.16b, #0x1\n"
+ "mls v0.4s, v1.4s, v24.4s\n"
+ ".inst 0x6e869776 // udot v22.4s, v27.16b, v6.16b\n"
+ ".inst 0x6e99976e // udot v14.4s, v27.16b, v25.16b\n"
+ "mov v17.16b, v5.16b\n .inst 0x6e9995f1 // udot v17.4s, v15.16b, v25.16b\n"
+ ".inst 0x6e8995e5 // udot v5.4s, v15.16b, v9.16b\n"
+ "ldr q9, [x26, x13]\n"
+ "sqrdmulh v0.4s, v0.4s, v30.4s\n"
+ "mls v23.4s, v19.4s, v24.4s\n"
+ "and v16.16b, v0.16b, v31.16b\n"
+ "mls v22.4s, v5.4s, v24.4s\n"
+ "mls v14.4s, v17.4s, v24.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v23.4s, v23.4s, v30.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v30.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v30.4s\n"
+ "ldr q13, [x15, x13]\n"
+ "ldp x23, x22, [%x[inptrs], #0x40]\n"
"ldp x21, x20, [%x[inptrs], #0x50]\n"
- "ldr q2, [x21, x28]\n"
- "ldr q5, [x20, x28]\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v28.4s, v28.4s, v23.4s\n"
- "sqadd v29.4s, v29.4s, v24.4s\n"
- "ldr q6, [%x[params], #0x160]\n"
- "sqadd v25.4s, v25.4s, v20.4s\n"
- "ldr q20, [%x[params], #0x170]\n"
- "sqadd v7.4s, v7.4s, v17.4s\n"
- "ldr q1, [%x[params], #0x150]\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "srshl v29.4s, v29.4s, v23.4s\n"
- "srshl v25.4s, v25.4s, v23.4s\n"
- "srshl v7.4s, v7.4s, v23.4s\n"
- "ldr q26, [x10, x28]\n"
- "ldp x21, x20, [%x[inptrs], #0x60]\n"
- "ldr q27, [x21, x28]\n"
- "ldr q30, [x20, x28]\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v7.4s, v7.4s, v14.4s\n"
+ "sqadd v0.4s, v0.4s, v16.4s\n"
+ "and v19.16b, v23.16b, v31.16b\n"
+ "ldr q10, [x23, x13]\n"
+ "ldr q26, [x22, x13]\n"
+ "and v21.16b, v22.16b, v31.16b\n"
+ "and v16.16b, v14.16b, v31.16b\n"
+ "ldr q20, [x21, x13]\n"
+ "ldr q6, [x20, x13]\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "srshl v0.4s, v0.4s, v31.4s\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v19.4s\n"
+ "ldr q17, [%x[params], #0x170]\n"
+ "add v0.4s, v0.4s, v12.4s\n"
+ "sqadd v22.4s, v22.4s, v21.4s\n"
+ "ldr q8, [%x[params], #0x160]\n"
+ "sqadd v14.4s, v14.4s, v16.4s\n"
+ "ldr q30, [%x[params], #0x150]\n"
+ "srshl v23.4s, v23.4s, v31.4s\n"
+ "smax v0.4s, v0.4s, v7.4s\n"
+ "srshl v22.4s, v22.4s, v31.4s\n"
+ "srshl v14.4s, v14.4s, v31.4s\n"
+ "ldr q1, [x25, x13]\n"
+ "ldp x23, x22, [%x[inptrs], #0x60]\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "ldr q23, [x21, x28]\n"
- "ldr q9, [x20, x28]\n"
- "smin v28.4s, v28.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
"ldp x15, x14, [%x[inptrs], #0x0]\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v7.4s, v7.4s, v13.4s\n"
- "ldp x13, x12, [%x[inptrs], #0x10]\n"
- "ldp x10, x9, [%x[inptrs], #0x20]\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v25.4s, v25.4s, v11.4s\n"
- "ldp x26, x21, [%x[inptrs], #0x30]\n"
- "smin v7.4s, v7.4s, v11.4s\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s28, [x25, x27]\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "zip2 v17.16b, v15.16b, v21.16b\n"
- "zip1 v15.16b, v15.16b, v21.16b\n"
- "zip1 v18.16b, v31.16b, v8.16b\n"
- "zip2 v8.16b, v31.16b, v8.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s29, [x24, x27]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str s25, [x23, x27]\n"
- "zip2 v25.16b, v15.16b, v18.16b\n"
- "str s7, [x22, x27]\n"
- "zip1 v15.16b, v15.16b, v18.16b\n"
- "zip1 v7.16b, v17.16b, v8.16b\n"
- "add x27, x27, #0x4\n"
- "zip2 v8.16b, v17.16b, v8.16b\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v0.4s, v0.4s, v11.4s\n"
+ "ldp x27, x26, [%x[inptrs], #0x10]\n"
+ "ldr q5, [x23, x13]\n"
+ "ldr q27, [x22, x13]\n"
+ "add v22.4s, v22.4s, v12.4s\n"
+ "add v14.4s, v14.4s, v12.4s\n"
+ "ldp x25, x24, [%x[inptrs], #0x20]\n"
+ "ldr q16, [x21, x13]\n"
+ "ldr q25, [x20, x13]\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "ldp x23, x22, [%x[inptrs], #0x30]\n"
+ "smax v22.4s, v22.4s, v7.4s\n"
+ "smax v14.4s, v14.4s, v7.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "smin v22.4s, v22.4s, v11.4s\n"
+ "smin v14.4s, v14.4s, v11.4s\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s0, [x11, x12]\n"
+ "zip2 v18.16b, v13.16b, v29.16b\n"
+ "zip1 v13.16b, v13.16b, v29.16b\n"
+ "zip1 v0.16b, v2.16b, v9.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v14.16b, v14.16b, v14.16b\n"
+ "zip2 v9.16b, v2.16b, v9.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "zip2 v2.16b, v13.16b, v0.16b\n"
+ "zip1 v13.16b, v13.16b, v0.16b\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v14.16b, v14.16b, v14.16b\n"
+ "str s23, [x9, x12]\n"
+ "zip1 v0.16b, v18.16b, v9.16b\n"
+ "zip2 v9.16b, v18.16b, v9.16b\n"
"ldr q31, [%x[params], #0x140]\n"
- "zip2 v29.16b, v26.16b, v19.16b\n"
"add %x[params], %x[params], #0x180\n"
- "zip1 v26.16b, v26.16b, v19.16b\n"
- "zip1 v28.16b, v4.16b, v10.16b\n"
- "zip2 v10.16b, v4.16b, v10.16b\n"
- "zip2 v24.16b, v22.16b, v2.16b\n"
- "zip1 v22.16b, v22.16b, v2.16b\n"
- "zip1 v21.16b, v3.16b, v5.16b\n"
- "zip2 v5.16b, v3.16b, v5.16b\n"
- "zip2 v18.16b, v27.16b, v23.16b\n"
- "zip1 v27.16b, v27.16b, v23.16b\n"
- "zip1 v17.16b, v30.16b, v9.16b\n"
- "zip2 v9.16b, v30.16b, v9.16b\n"
- "zip2 v23.16b, v26.16b, v28.16b\n"
- "zip1 v26.16b, v26.16b, v28.16b\n"
- "zip1 v3.16b, v29.16b, v10.16b\n"
- "zip2 v10.16b, v29.16b, v10.16b\n"
- "zip2 v19.16b, v22.16b, v21.16b\n"
- "zip1 v22.16b, v22.16b, v21.16b\n"
- "zip1 v0.16b, v24.16b, v5.16b\n"
- "zip2 v5.16b, v24.16b, v5.16b\n"
- "zip2 v24.16b, v27.16b, v17.16b\n"
- "zip1 v27.16b, v27.16b, v17.16b\n"
- "zip1 v2.16b, v18.16b, v9.16b\n"
- "zip2 v9.16b, v18.16b, v9.16b\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
+ "zip2 v23.16b, v10.16b, v20.16b\n"
+ "zip1 v10.16b, v10.16b, v20.16b\n"
+ "str s22, [x10, x12]\n"
+ "str s14, [x28, x12]\n"
+ "zip2 v22.16b, v1.16b, v28.16b\n"
+ "zip1 v1.16b, v1.16b, v28.16b\n"
+ "add x12, x12, #0x4\n"
+ "zip1 v20.16b, v3.16b, v4.16b\n"
+ "zip2 v4.16b, v3.16b, v4.16b\n"
+ "zip1 v14.16b, v26.16b, v6.16b\n"
+ "zip2 v6.16b, v26.16b, v6.16b\n"
+ "zip2 v19.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
+ "zip1 v16.16b, v27.16b, v25.16b\n"
+ "zip2 v25.16b, v27.16b, v25.16b\n"
+ "zip2 v21.16b, v1.16b, v20.16b\n"
+ "zip1 v1.16b, v1.16b, v20.16b\n"
+ "zip1 v28.16b, v22.16b, v4.16b\n"
+ "zip2 v4.16b, v22.16b, v4.16b\n"
+ "zip2 v29.16b, v10.16b, v14.16b\n"
+ "zip1 v10.16b, v10.16b, v14.16b\n"
+ "zip1 v27.16b, v23.16b, v6.16b\n"
+ "zip2 v6.16b, v23.16b, v6.16b\n"
+ "zip2 v18.16b, v5.16b, v16.16b\n"
+ "zip1 v5.16b, v5.16b, v16.16b\n"
+ "zip1 v14.16b, v19.16b, v25.16b\n"
+ "zip2 v25.16b, v19.16b, v25.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
"bgt 1b\n"
"2:" // Detached iteration
- "movi v21.4s, #0x0\n"
- ".inst 0x6e9a9595 // udot v21.4s, v12.16b, v26.16b\n"
- ".inst 0x6e8f943f // udot v31.4s, v1.16b, v15.16b\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x6e8d97df // udot v31.4s, v30.16b, v13.16b\n"
+ ".inst 0x6e8197c3 // udot v3.4s, v30.16b, v1.16b\n"
"tst %x[n_channels], #0xf\n"
- ".inst 0x6e969595 // udot v21.4s, v12.16b, v22.16b\n"
- ".inst 0x6e9a943d // udot v29.4s, v1.16b, v26.16b\n"
- "movi v18.4s, #0x0\n"
- "add x28, x28, #0x10\n"
- ".inst 0x6e9a94df // udot v31.4s, v6.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "mov v17.16b, v21.16b\n .inst 0x6e9b9591 // udot v17.4s, v12.16b, v27.16b\n"
- ".inst 0x6e8f9595 // udot v21.4s, v12.16b, v15.16b\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- ".inst 0x6e9a9592 // udot v18.4s, v12.16b, v26.16b\n"
- ".inst 0x6e9694dd // udot v29.4s, v6.16b, v22.16b\n"
- ".inst 0x6e96969f // udot v31.4s, v20.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x6e8f943e // udot v30.4s, v1.16b, v15.16b\n"
- ".inst 0x6e9a943c // udot v28.4s, v1.16b, v26.16b\n"
- "mls v31.4s, v21.4s, v16.4s\n"
- ".inst 0x6e969592 // udot v18.4s, v12.16b, v22.16b\n"
- ".inst 0x6e9b969d // udot v29.4s, v20.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x6e9a94de // udot v30.4s, v6.16b, v26.16b\n"
- "ldr q4, [%x[params], #0x10]\n"
- ".inst 0x6e9694dc // udot v28.4s, v6.16b, v22.16b\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mov v21.16b, v18.16b\n .inst 0x6e9b9595 // udot v21.4s, v12.16b, v27.16b\n"
- ".inst 0x6e8f9592 // udot v18.4s, v12.16b, v15.16b\n"
- "ldr q17, [%x[params], #0x0]\n"
- "sqrdmulh v31.4s, v31.4s, v17.4s\n"
- ".inst 0x6e96969e // udot v30.4s, v20.16b, v22.16b\n"
- ".inst 0x6e9b969c // udot v28.4s, v20.16b, v27.16b\n"
- "mls v30.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v21.4s, v16.4s\n"
- "and v27.16b, v31.16b, v4.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v17.4s\n"
- "sqrdmulh v29.4s, v29.4s, v17.4s\n"
- "sqrdmulh v28.4s, v28.4s, v17.4s\n"
- "ldr q15, [%x[params], #0x60]\n"
- "sqadd v31.4s, v31.4s, v27.4s\n"
- "and v20.16b, v30.16b, v4.16b\n"
- "and v18.16b, v29.16b, v4.16b\n"
- "and v17.16b, v28.16b, v4.16b\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ "movi v20.4s, #0x0\n"
+ "add x13, x13, #0x10\n"
+ ".inst 0x6e8195f3 // udot v19.4s, v15.16b, v1.16b\n"
+ ".inst 0x6e81951f // udot v31.4s, v8.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ ".inst 0x6e8a9503 // udot v3.4s, v8.16b, v10.16b\n"
+ ".inst 0x6e8a95f3 // udot v19.4s, v15.16b, v10.16b\n"
+ ".inst 0x6e8195f4 // udot v20.4s, v15.16b, v1.16b\n"
+ ".inst 0x6e8a963f // udot v31.4s, v17.16b, v10.16b\n"
+ "ext v10.16b, v10.16b, v10.16b, #0x1\n"
+ ".inst 0x6e8197d7 // udot v23.4s, v30.16b, v1.16b\n"
+ "mov v16.16b, v19.16b\n .inst 0x6e8595f0 // udot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x6e8d95f3 // udot v19.4s, v15.16b, v13.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x6e859623 // udot v3.4s, v17.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x6e8a95f4 // udot v20.4s, v15.16b, v10.16b\n"
+ ".inst 0x6e8d97da // udot v26.4s, v30.16b, v13.16b\n"
+ ".inst 0x6e8a9517 // udot v23.4s, v8.16b, v10.16b\n"
+ "mls v31.4s, v19.4s, v24.4s\n"
+ "movi v30.4s, #0x0\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x6e81951a // udot v26.4s, v8.16b, v1.16b\n"
+ "ldr q1, [%x[params], #0x10]\n"
+ "mov v16.16b, v20.16b\n .inst 0x6e8595f0 // udot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x6e8d95f4 // udot v20.4s, v15.16b, v13.16b\n"
+ "ldr q8, [%x[params], #0x0]\n"
+ ".inst 0x6e9595fe // udot v30.4s, v15.16b, v21.16b\n"
+ ".inst 0x6e859637 // udot v23.4s, v17.16b, v5.16b\n"
+ ".inst 0x6e8a963a // udot v26.4s, v17.16b, v10.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v8.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v1.16b\n"
+ ".inst 0x6e9d95fe // udot v30.4s, v15.16b, v29.16b\n"
+ "mls v26.4s, v20.4s, v24.4s\n"
+ "movi v5.4s, #0x0\n"
+ "sqrdmulh v23.4s, v23.4s, v8.4s\n"
+ "and v22.16b, v3.16b, v1.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "ldr q20, [%x[params], #0x60]\n"
+ "mov v19.16b, v30.16b\n .inst 0x6e9295f3 // udot v19.4s, v15.16b, v18.16b\n"
+ ".inst 0x6e8295fe // udot v30.4s, v15.16b, v2.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v1.16b\n"
+ "and v16.16b, v26.16b, v1.16b\n"
+ "sqadd v3.4s, v3.4s, v22.4s\n"
+ "ldr q8, [%x[params], #0x50]\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v4.4s\n"
- "sqadd v30.4s, v30.4s, v20.4s\n"
- "ldr q27, [%x[params], #0x40]\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "ldr q26, [%x[params], #0x50]\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "ldr q6, [%x[params], #0x30]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
- "srshl v29.4s, v29.4s, v4.4s\n"
- "srshl v28.4s, v28.4s, v4.4s\n"
- "ldr q4, [%x[params], #0x70]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
+ "srshl v31.4s, v31.4s, v1.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v1.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "ldr q17, [%x[params], #0x30]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "ldr q16, [%x[params], #0x40]\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "srshl v23.4s, v23.4s, v1.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v1.4s\n"
+ "ldr q22, [%x[params], #0x70]\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v1.4s, #0x0\n"
- ".inst 0x6e979581 // udot v1.4s, v12.16b, v23.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s31, [x25, x27]\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s31, [x11, x12]\n"
"ldr q31, [%x[params], #0x20]\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- ".inst 0x6e939581 // udot v1.4s, v12.16b, v19.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s30, [x24, x27]\n"
- "mov v22.16b, v1.16b\n .inst 0x6e989596 // udot v22.4s, v12.16b, v24.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s29, [x23, x27]\n"
- "mov v29.16b, v31.16b\n"
- ".inst 0x6e999581 // udot v1.4s, v12.16b, v25.16b\n"
- "str s28, [x22, x27]\n"
- "mov v21.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- ".inst 0x6e9994df // udot v31.4s, v6.16b, v25.16b\n"
- ".inst 0x6e9794d5 // udot v21.4s, v6.16b, v23.16b\n"
- ".inst 0x6e97977f // udot v31.4s, v27.16b, v23.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x6e9994dd // udot v29.4s, v6.16b, v25.16b\n"
- ".inst 0x6e9794d4 // udot v20.4s, v6.16b, v23.16b\n"
- ".inst 0x6e979592 // udot v18.4s, v12.16b, v23.16b\n"
- ".inst 0x6e939775 // udot v21.4s, v27.16b, v19.16b\n"
- ".inst 0x6e93975f // udot v31.4s, v26.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x6e97977d // udot v29.4s, v27.16b, v23.16b\n"
- ".inst 0x6e939774 // udot v20.4s, v27.16b, v19.16b\n"
- "mls v31.4s, v1.4s, v16.4s\n"
- ".inst 0x6e939592 // udot v18.4s, v12.16b, v19.16b\n"
- ".inst 0x6e989755 // udot v21.4s, v26.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x6e93975d // udot v29.4s, v26.16b, v19.16b\n"
- ".inst 0x6e989754 // udot v20.4s, v26.16b, v24.16b\n"
- "sqrdmulh v31.4s, v31.4s, v15.4s\n"
- "mov v17.16b, v18.16b\n .inst 0x6e989591 // udot v17.4s, v12.16b, v24.16b\n"
- ".inst 0x6e999592 // udot v18.4s, v12.16b, v25.16b\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mls v21.4s, v22.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v4.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v29.4s, v29.4s, v15.4s\n"
- "sqrdmulh v21.4s, v21.4s, v15.4s\n"
- "sqrdmulh v20.4s, v20.4s, v15.4s\n"
- "ldr q27, [%x[params], #0xc0]\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v29.16b, v4.16b\n"
- "and v18.16b, v21.16b, v4.16b\n"
- "and v17.16b, v20.16b, v4.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s3, [x9, x12]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "mov v10.16b, v31.16b\n"
+ "str s26, [x10, x12]\n"
+ "mov v1.16b, v31.16b\n"
+ "str s23, [x28, x12]\n"
+ "mov v26.16b, v31.16b\n"
+ ".inst 0x6e82963f // udot v31.4s, v17.16b, v2.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x6e95962a // udot v10.4s, v17.16b, v21.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x6e95961f // udot v31.4s, v16.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x6e829621 // udot v1.4s, v17.16b, v2.16b\n"
+ ".inst 0x6e95963a // udot v26.4s, v17.16b, v21.16b\n"
+ ".inst 0x6e9595e5 // udot v5.4s, v15.16b, v21.16b\n"
+ ".inst 0x6e9d960a // udot v10.4s, v16.16b, v29.16b\n"
+ ".inst 0x6e9d951f // udot v31.4s, v8.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x6e959601 // udot v1.4s, v16.16b, v21.16b\n"
+ ".inst 0x6e9d961a // udot v26.4s, v16.16b, v29.16b\n"
+ ".inst 0x6e9d95e5 // udot v5.4s, v15.16b, v29.16b\n"
+ ".inst 0x6e92950a // udot v10.4s, v8.16b, v18.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ "mls v31.4s, v30.4s, v24.4s\n"
+ "movi v3.4s, #0x0\n"
+ ".inst 0x6e9d9501 // udot v1.4s, v8.16b, v29.16b\n"
+ ".inst 0x6e92951a // udot v26.4s, v8.16b, v18.16b\n"
+ "mov v16.16b, v5.16b\n .inst 0x6e9295f0 // udot v16.4s, v15.16b, v18.16b\n"
+ ".inst 0x6e8295e5 // udot v5.4s, v15.16b, v2.16b\n"
+ ".inst 0x6e9c95e3 // udot v3.4s, v15.16b, v28.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v20.4s\n"
+ "mls v10.4s, v19.4s, v24.4s\n"
+ "mls v26.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v22.16b\n"
+ "mls v1.4s, v5.4s, v24.4s\n"
+ "movi v2.4s, #0x0\n"
+ "sqrdmulh v10.4s, v10.4s, v20.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v20.4s\n"
+ ".inst 0x6e9b95e3 // udot v3.4s, v15.16b, v27.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+ "ldr q23, [%x[params], #0xc0]\n"
+ "and v17.16b, v10.16b, v22.16b\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v20.16b, v26.16b, v22.16b\n"
+ "and v16.16b, v1.16b, v22.16b\n"
+ "mov v19.16b, v3.16b\n .inst 0x6e8e95f3 // udot v19.4s, v15.16b, v14.16b\n"
+ ".inst 0x6e8095e3 // udot v3.4s, v15.16b, v0.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v4.4s\n"
- "sqadd v29.4s, v29.4s, v19.4s\n"
- "ldr q26, [%x[params], #0xa0]\n"
- "sqadd v21.4s, v21.4s, v18.4s\n"
- "ldr q25, [%x[params], #0xb0]\n"
- "sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q24, [%x[params], #0x90]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v29.4s, v29.4s, v4.4s\n"
- "srshl v21.4s, v21.4s, v4.4s\n"
- "srshl v20.4s, v20.4s, v4.4s\n"
- "ldr q1, [%x[params], #0xd0]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
+ "srshl v31.4s, v31.4s, v22.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v17.4s\n"
+ "ldr q18, [%x[params], #0xb0]\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "ldr q17, [%x[params], #0xa0]\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v20.4s\n"
+ "ldr q16, [%x[params], #0x90]\n"
+ "srshl v10.4s, v10.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v22.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v22.4s\n"
+ "ldr q22, [%x[params], #0xd0]\n"
+ "add v10.4s, v10.4s, v12.4s\n"
+ "add v1.4s, v1.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v21.4s, v21.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v10.4s, v10.4s, v7.4s\n"
+ "smax v1.4s, v1.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x6e839597 // udot v23.4s, v12.16b, v3.16b\n"
- ".inst 0x6e809597 // udot v23.4s, v12.16b, v0.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v10.4s, v10.4s, v11.4s\n"
+ "smin v1.4s, v1.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s31, [x25, x27]\n"
- "ldr q31, [%x[params], #0x80]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v22.16b, v23.16b\n .inst 0x6e829596 // udot v22.4s, v12.16b, v2.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s29, [x24, x27]\n"
- ".inst 0x6e879597 // udot v23.4s, v12.16b, v7.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s21, [x23, x27]\n"
- "mov v21.16b, v31.16b\n"
- "str s20, [x22, x27]\n"
- "mov v4.16b, v31.16b\n"
- "mov v20.16b, v31.16b\n"
- ".inst 0x6e87971f // udot v31.4s, v24.16b, v7.16b\n"
- ".inst 0x6e839704 // udot v4.4s, v24.16b, v3.16b\n"
- ".inst 0x6e83975f // udot v31.4s, v26.16b, v3.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x6e879715 // udot v21.4s, v24.16b, v7.16b\n"
- ".inst 0x6e839714 // udot v20.4s, v24.16b, v3.16b\n"
- ".inst 0x6e839592 // udot v18.4s, v12.16b, v3.16b\n"
- ".inst 0x6e809744 // udot v4.4s, v26.16b, v0.16b\n"
- ".inst 0x6e80973f // udot v31.4s, v25.16b, v0.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "str s31, [x11, x12]\n"
+ "ldr q21, [%x[params], #0x80]\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "str s1, [x10, x12]\n"
+ "mov v30.16b, v21.16b\n"
+ "str s10, [x9, x12]\n"
+ "mov v20.16b, v21.16b\n"
+ "str s26, [x28, x12]\n"
+ "mov v29.16b, v21.16b\n"
+ ".inst 0x6e809615 // udot v21.4s, v16.16b, v0.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x6e9c9614 // udot v20.4s, v16.16b, v28.16b\n"
"ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x6e839755 // udot v21.4s, v26.16b, v3.16b\n"
- ".inst 0x6e809754 // udot v20.4s, v26.16b, v0.16b\n"
- "mls v31.4s, v23.4s, v16.4s\n"
- ".inst 0x6e809592 // udot v18.4s, v12.16b, v0.16b\n"
- ".inst 0x6e829724 // udot v4.4s, v25.16b, v2.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- ".inst 0x6e809735 // udot v21.4s, v25.16b, v0.16b\n"
- ".inst 0x6e829734 // udot v20.4s, v25.16b, v2.16b\n"
- "sqrdmulh v31.4s, v31.4s, v27.4s\n"
- "mov v17.16b, v18.16b\n .inst 0x6e829591 // udot v17.4s, v12.16b, v2.16b\n"
- ".inst 0x6e879592 // udot v18.4s, v12.16b, v7.16b\n"
- "mls v21.4s, v18.4s, v16.4s\n"
- "mls v4.4s, v22.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v1.16b\n"
+ ".inst 0x6e9c9635 // udot v21.4s, v17.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x6e80961e // udot v30.4s, v16.16b, v0.16b\n"
+ ".inst 0x6e9c961d // udot v29.4s, v16.16b, v28.16b\n"
+ ".inst 0x6e9c95e2 // udot v2.4s, v15.16b, v28.16b\n"
+ ".inst 0x6e9b9634 // udot v20.4s, v17.16b, v27.16b\n"
+ ".inst 0x6e9b9655 // udot v21.4s, v18.16b, v27.16b\n"
+ "ext v27.16b, v27.16b, v27.16b, #0x1\n"
+ ".inst 0x6e9c963e // udot v30.4s, v17.16b, v28.16b\n"
+ ".inst 0x6e9b963d // udot v29.4s, v17.16b, v27.16b\n"
+ ".inst 0x6e9b95e2 // udot v2.4s, v15.16b, v27.16b\n"
+ ".inst 0x6e8e9654 // udot v20.4s, v18.16b, v14.16b\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ "mls v21.4s, v3.4s, v24.4s\n"
+ "movi v5.4s, #0x0\n"
+ ".inst 0x6e9b965e // udot v30.4s, v18.16b, v27.16b\n"
+ ".inst 0x6e8e965d // udot v29.4s, v18.16b, v14.16b\n"
+ "mov v16.16b, v2.16b\n .inst 0x6e8e95f0 // udot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x6e8095e2 // udot v2.4s, v15.16b, v0.16b\n"
+ ".inst 0x6e8495e5 // udot v5.4s, v15.16b, v4.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v23.4s\n"
+ "mls v20.4s, v19.4s, v24.4s\n"
+ "mls v29.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v21.16b, v22.16b\n"
+ "mls v30.4s, v2.4s, v24.4s\n"
+ "movi v27.4s, #0x0\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v23.4s\n"
+ ".inst 0x6e8695e5 // udot v5.4s, v15.16b, v6.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v30.4s, v30.4s, v23.4s\n"
+ "ldr q26, [%x[params], #0x120]\n"
+ "and v17.16b, v20.16b, v22.16b\n"
+ "sqadd v21.4s, v21.4s, v16.4s\n"
+ "and v19.16b, v29.16b, v22.16b\n"
+ "and v16.16b, v30.16b, v22.16b\n"
+ "mov v14.16b, v5.16b\n .inst 0x6e9995ee // udot v14.4s, v15.16b, v25.16b\n"
+ ".inst 0x6e8995e5 // udot v5.4s, v15.16b, v9.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v21.4s, v21.4s, v27.4s\n"
- "sqrdmulh v4.4s, v4.4s, v27.4s\n"
- "sqrdmulh v20.4s, v20.4s, v27.4s\n"
- "ldr q30, [%x[params], #0x120]\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v21.16b, v1.16b\n"
- "and v18.16b, v4.16b, v1.16b\n"
- "and v17.16b, v20.16b, v1.16b\n"
+ "srshl v21.4s, v21.4s, v22.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v19.4s, v19.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "sqadd v21.4s, v21.4s, v19.4s\n"
- "ldr q29, [%x[params], #0x100]\n"
- "sqadd v4.4s, v4.4s, v18.4s\n"
- "ldr q28, [%x[params], #0x110]\n"
"sqadd v20.4s, v20.4s, v17.4s\n"
- "ldr q27, [%x[params], #0xf0]\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v4.4s, v4.4s, v1.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "ldr q26, [%x[params], #0x130]\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v4.4s, v4.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "smin v31.4s, v31.4s, v11.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v4.4s, v4.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
+ "ldr q18, [%x[params], #0x110]\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "ldr q17, [%x[params], #0x100]\n"
+ "add v21.4s, v21.4s, v12.4s\n"
+ "sqadd v29.4s, v29.4s, v19.4s\n"
+ "ldr q16, [%x[params], #0xf0]\n"
+ "srshl v20.4s, v20.4s, v22.4s\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "smax v21.4s, v21.4s, v7.4s\n"
+ "srshl v29.4s, v29.4s, v22.4s\n"
+ "ldr q23, [%x[params], #0x130]\n"
+ "add v20.4s, v20.4s, v12.4s\n"
+ "add v30.4s, v30.4s, v12.4s\n"
"smin v21.4s, v21.4s, v11.4s\n"
- "smin v4.4s, v4.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v12.4s\n"
+ "smax v20.4s, v20.4s, v7.4s\n"
+ "smax v30.4s, v30.4s, v7.4s\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "smax v29.4s, v29.4s, v7.4s\n"
"smin v20.4s, v20.4s, v11.4s\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
- "movi v25.4s, #0x0\n"
- ".inst 0x6e8a9599 // udot v25.4s, v12.16b, v10.16b\n"
- ".inst 0x6e859599 // udot v25.4s, v12.16b, v5.16b\n"
- "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "smin v30.4s, v30.4s, v11.4s\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s31, [x25, x27]\n"
- "ldr q24, [%x[params], #0xe0]\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
+ "smin v29.4s, v29.4s, v11.4s\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "mov v23.16b, v25.16b\n .inst 0x6e899597 // udot v23.4s, v12.16b, v9.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "str s21, [x11, x12]\n"
+ "ldr q22, [%x[params], #0xe0]\n"
"add %x[params], %x[params], #0x140\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v4.16b, v4.16b, v4.16b\n"
- "str s21, [x24, x27]\n"
- ".inst 0x6e889599 // udot v25.4s, v12.16b, v8.16b\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s4, [x23, x27]\n"
- "mov v22.16b, v24.16b\n"
- "str s20, [x22, x27]\n"
- "mov v21.16b, v24.16b\n"
- "mov v20.16b, v24.16b\n"
- ".inst 0x6e889778 // udot v24.4s, v27.16b, v8.16b\n"
- ".inst 0x6e8a9775 // udot v21.4s, v27.16b, v10.16b\n"
- ".inst 0x6e8a97b8 // udot v24.4s, v29.16b, v10.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- "add x27, x27, #0x4\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x6e889776 // udot v22.4s, v27.16b, v8.16b\n"
- ".inst 0x6e8a9774 // udot v20.4s, v27.16b, v10.16b\n"
- ".inst 0x6e8a9592 // udot v18.4s, v12.16b, v10.16b\n"
- ".inst 0x6e8597b5 // udot v21.4s, v29.16b, v5.16b\n"
- ".inst 0x6e859798 // udot v24.4s, v28.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x6e8a97b6 // udot v22.4s, v29.16b, v10.16b\n"
- ".inst 0x6e8597b4 // udot v20.4s, v29.16b, v5.16b\n"
- "mls v24.4s, v25.4s, v16.4s\n"
- ".inst 0x6e859592 // udot v18.4s, v12.16b, v5.16b\n"
- ".inst 0x6e899795 // udot v21.4s, v28.16b, v9.16b\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s20, [x9, x12]\n"
+ "mov v21.16b, v22.16b\n"
+ "str s30, [x10, x12]\n"
+ "mov v20.16b, v22.16b\n"
+ "str s29, [x28, x12]\n"
+ "mov v19.16b, v22.16b\n"
+ ".inst 0x6e899616 // udot v22.4s, v16.16b, v9.16b\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0x6e849615 // udot v21.4s, v16.16b, v4.16b\n"
"ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x6e859796 // udot v22.4s, v28.16b, v5.16b\n"
- ".inst 0x6e899794 // udot v20.4s, v28.16b, v9.16b\n"
- "sqrdmulh v24.4s, v24.4s, v30.4s\n"
- "mov v17.16b, v18.16b\n .inst 0x6e899591 // udot v17.4s, v12.16b, v9.16b\n"
- ".inst 0x6e889592 // udot v18.4s, v12.16b, v8.16b\n"
- "mls v22.4s, v18.4s, v16.4s\n"
- "mls v21.4s, v23.4s, v16.4s\n"
- "mls v20.4s, v17.4s, v16.4s\n"
- "and v17.16b, v24.16b, v26.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v22.4s, v22.4s, v30.4s\n"
- "sqrdmulh v21.4s, v21.4s, v30.4s\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "sqadd v24.4s, v24.4s, v17.4s\n"
- "and v19.16b, v22.16b, v26.16b\n"
- "and v18.16b, v21.16b, v26.16b\n"
- "and v17.16b, v20.16b, v26.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ ".inst 0x6e849636 // udot v22.4s, v17.16b, v4.16b\n"
+ "ext v4.16b, v4.16b, v4.16b, #0x1\n"
+ ".inst 0x6e899614 // udot v20.4s, v16.16b, v9.16b\n"
+ ".inst 0x6e849613 // udot v19.4s, v16.16b, v4.16b\n"
+ ".inst 0x6e8495fb // udot v27.4s, v15.16b, v4.16b\n"
+ ".inst 0x6e869635 // udot v21.4s, v17.16b, v6.16b\n"
+ ".inst 0x6e869656 // udot v22.4s, v18.16b, v6.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x6e849634 // udot v20.4s, v17.16b, v4.16b\n"
+ ".inst 0x6e869633 // udot v19.4s, v17.16b, v6.16b\n"
+ ".inst 0x6e8695fb // udot v27.4s, v15.16b, v6.16b\n"
+ ".inst 0x6e999655 // udot v21.4s, v18.16b, v25.16b\n"
+ "ext v25.16b, v25.16b, v25.16b, #0x1\n"
+ "mls v22.4s, v5.4s, v24.4s\n"
+ ".inst 0x6e869654 // udot v20.4s, v18.16b, v6.16b\n"
+ ".inst 0x6e999653 // udot v19.4s, v18.16b, v25.16b\n"
+ "mov v17.16b, v27.16b\n .inst 0x6e9995f1 // udot v17.4s, v15.16b, v25.16b\n"
+ ".inst 0x6e8995fb // udot v27.4s, v15.16b, v9.16b\n"
+ "sqrdmulh v22.4s, v22.4s, v26.4s\n"
+ "mls v21.4s, v14.4s, v24.4s\n"
+ "and v16.16b, v22.16b, v23.16b\n"
+ "mls v20.4s, v27.4s, v24.4s\n"
+ "mls v19.4s, v17.4s, v24.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v26.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v26.4s\n"
+ "sqadd v22.4s, v22.4s, v16.4s\n"
+ "and v18.16b, v21.16b, v23.16b\n"
+ "and v17.16b, v20.16b, v23.16b\n"
+ "and v16.16b, v19.16b, v23.16b\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v22.4s, v22.4s, v23.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v22.4s, v22.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v21.4s, v21.4s, v18.4s\n"
+ "add v22.4s, v22.4s, v12.4s\n"
"sqadd v20.4s, v20.4s, v17.4s\n"
- "srshl v24.4s, v24.4s, v26.4s\n"
- "srshl v22.4s, v22.4s, v26.4s\n"
- "srshl v21.4s, v21.4s, v26.4s\n"
- "srshl v20.4s, v20.4s, v26.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v22.4s, v22.4s, v14.4s\n"
- "add v21.4s, v21.4s, v14.4s\n"
- "add v20.4s, v20.4s, v14.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v22.4s, v22.4s, v13.4s\n"
- "smax v21.4s, v21.4s, v13.4s\n"
- "smax v20.4s, v20.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v11.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v21.4s, v21.4s, v23.4s\n"
+ "smax v22.4s, v22.4s, v7.4s\n"
+ "srshl v20.4s, v20.4s, v23.4s\n"
+ "srshl v19.4s, v19.4s, v23.4s\n"
+ "add v21.4s, v21.4s, v12.4s\n"
"smin v22.4s, v22.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v12.4s\n"
+ "add v19.4s, v19.4s, v12.4s\n"
+ "smax v21.4s, v21.4s, v7.4s\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smax v20.4s, v20.4s, v7.4s\n"
+ "smax v19.4s, v19.4s, v7.4s\n"
"smin v21.4s, v21.4s, v11.4s\n"
- "smin v20.4s, v20.4s, v11.4s\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
+ "smin v20.4s, v20.4s, v11.4s\n"
+ "smin v19.4s, v19.4s, v11.4s\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
+ "str s22, [x11, x12]\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x25, x27]\n"
- "uzp1 v22.16b, v22.16b, v22.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "str s22, [x24, x27]\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s21, [x23, x27]\n"
- "str s20, [x22, x27]\n"
- "add x27, x27, #0x4\n"
+ "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "uzp1 v19.16b, v19.16b, v19.16b\n"
+ "str s20, [x10, x12]\n"
+ "str s21, [x9, x12]\n"
+ "str s19, [x28, x12]\n"
+ "add x12, x12, #0x4\n"
"beq 35f\n"
"3:" // Oddments
"and x20, %x[n_channels], #0xf\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x21, x21, x28\n"
+ "add x15, x15, x13\n"
+ "add x14, x14, x13\n"
+ "add x27, x27, x13\n"
+ "add x26, x26, x13\n"
+ "add x25, x25, x13\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"tbz %x[n_channels], #3, 7f\n"
- "ldr d15, [x15], #0x8\n"
- "ldr d25, [x14], #0x8\n"
- "ldr d7, [x13], #0x8\n"
- "ldr d8, [x12], #0x8\n"
- "ldr d26, [x10], #0x8\n"
- "ldr d23, [x9], #0x8\n"
- "ldr d3, [x26], #0x8\n"
- "ldr d10, [x21], #0x8\n"
+ "ldr d13, [x15], #0x8\n"
+ "ldr d2, [x14], #0x8\n"
+ "ldr d0, [x27], #0x8\n"
+ "ldr d9, [x26], #0x8\n"
+ "ldr d1, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d28, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
"tbz %x[n_channels], #2, 5f\n"
- "ld1 { v15.s }[2], [x15], #0x4\n"
- "ld1 { v25.s }[2], [x14], #0x4\n"
- "ld1 { v7.s }[2], [x13], #0x4\n"
- "ld1 { v8.s }[2], [x12], #0x4\n"
- "ld1 { v26.s }[2], [x10], #0x4\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v3.s }[2], [x26], #0x4\n"
- "ld1 { v10.s }[2], [x21], #0x4\n"
+ "ld1 { v13.s }[2], [x15], #0x4\n"
+ "ld1 { v2.s }[2], [x14], #0x4\n"
+ "ld1 { v0.s }[2], [x27], #0x4\n"
+ "ld1 { v9.s }[2], [x26], #0x4\n"
+ "ld1 { v1.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v28.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #1, 4f\n"
- "ld1 { v15.h }[6], [x15], #0x2\n"
- "ld1 { v25.h }[6], [x14], #0x2\n"
- "ld1 { v7.h }[6], [x13], #0x2\n"
- "ld1 { v8.h }[6], [x12], #0x2\n"
- "ld1 { v26.h }[6], [x10], #0x2\n"
- "ld1 { v23.h }[6], [x9], #0x2\n"
- "ld1 { v3.h }[6], [x26], #0x2\n"
- "ld1 { v10.h }[6], [x21], #0x2\n"
+ "ld1 { v13.h }[6], [x15], #0x2\n"
+ "ld1 { v2.h }[6], [x14], #0x2\n"
+ "ld1 { v0.h }[6], [x27], #0x2\n"
+ "ld1 { v9.h }[6], [x26], #0x2\n"
+ "ld1 { v1.h }[6], [x25], #0x2\n"
+ "ld1 { v21.h }[6], [x24], #0x2\n"
+ "ld1 { v28.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[14], [x15], #0x1\n"
- "ld1 { v25.b }[14], [x14], #0x1\n"
- "ld1 { v7.b }[14], [x13], #0x1\n"
- "ld1 { v8.b }[14], [x12], #0x1\n"
- "ld1 { v26.b }[14], [x10], #0x1\n"
- "ld1 { v23.b }[14], [x9], #0x1\n"
- "ld1 { v3.b }[14], [x26], #0x1\n"
- "ld1 { v10.b }[14], [x21], #0x1\n"
+ "ld1 { v13.b }[14], [x15], #0x1\n"
+ "ld1 { v2.b }[14], [x14], #0x1\n"
+ "ld1 { v0.b }[14], [x27], #0x1\n"
+ "ld1 { v9.b }[14], [x26], #0x1\n"
+ "ld1 { v1.b }[14], [x25], #0x1\n"
+ "ld1 { v21.b }[14], [x24], #0x1\n"
+ "ld1 { v28.b }[14], [x23], #0x1\n"
+ "ld1 { v4.b }[14], [x22], #0x1\n"
"b 11f\n"
"4:" // Oddments: Load (A): Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[12], [x15], #0x1\n"
- "ld1 { v25.b }[12], [x14], #0x1\n"
- "ld1 { v7.b }[12], [x13], #0x1\n"
- "ld1 { v8.b }[12], [x12], #0x1\n"
- "ld1 { v26.b }[12], [x10], #0x1\n"
- "ld1 { v23.b }[12], [x9], #0x1\n"
- "ld1 { v3.b }[12], [x26], #0x1\n"
- "ld1 { v10.b }[12], [x21], #0x1\n"
+ "ld1 { v13.b }[12], [x15], #0x1\n"
+ "ld1 { v2.b }[12], [x14], #0x1\n"
+ "ld1 { v0.b }[12], [x27], #0x1\n"
+ "ld1 { v9.b }[12], [x26], #0x1\n"
+ "ld1 { v1.b }[12], [x25], #0x1\n"
+ "ld1 { v21.b }[12], [x24], #0x1\n"
+ "ld1 { v28.b }[12], [x23], #0x1\n"
+ "ld1 { v4.b }[12], [x22], #0x1\n"
"b 11f\n"
"5:" // Oddments: Load (A): Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 6f\n"
- "ld1 { v15.h }[4], [x15], #0x2\n"
- "ld1 { v25.h }[4], [x14], #0x2\n"
- "ld1 { v7.h }[4], [x13], #0x2\n"
- "ld1 { v8.h }[4], [x12], #0x2\n"
- "ld1 { v26.h }[4], [x10], #0x2\n"
- "ld1 { v23.h }[4], [x9], #0x2\n"
- "ld1 { v3.h }[4], [x26], #0x2\n"
- "ld1 { v10.h }[4], [x21], #0x2\n"
+ "ld1 { v13.h }[4], [x15], #0x2\n"
+ "ld1 { v2.h }[4], [x14], #0x2\n"
+ "ld1 { v0.h }[4], [x27], #0x2\n"
+ "ld1 { v9.h }[4], [x26], #0x2\n"
+ "ld1 { v1.h }[4], [x25], #0x2\n"
+ "ld1 { v21.h }[4], [x24], #0x2\n"
+ "ld1 { v28.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[10], [x15], #0x1\n"
- "ld1 { v25.b }[10], [x14], #0x1\n"
- "ld1 { v7.b }[10], [x13], #0x1\n"
- "ld1 { v8.b }[10], [x12], #0x1\n"
- "ld1 { v26.b }[10], [x10], #0x1\n"
- "ld1 { v23.b }[10], [x9], #0x1\n"
- "ld1 { v3.b }[10], [x26], #0x1\n"
- "ld1 { v10.b }[10], [x21], #0x1\n"
+ "ld1 { v13.b }[10], [x15], #0x1\n"
+ "ld1 { v2.b }[10], [x14], #0x1\n"
+ "ld1 { v0.b }[10], [x27], #0x1\n"
+ "ld1 { v9.b }[10], [x26], #0x1\n"
+ "ld1 { v1.b }[10], [x25], #0x1\n"
+ "ld1 { v21.b }[10], [x24], #0x1\n"
+ "ld1 { v28.b }[10], [x23], #0x1\n"
+ "ld1 { v4.b }[10], [x22], #0x1\n"
"b 11f\n"
"6:" // Oddments: Load (A): Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[8], [x15], #0x1\n"
- "ld1 { v25.b }[8], [x14], #0x1\n"
- "ld1 { v7.b }[8], [x13], #0x1\n"
- "ld1 { v8.b }[8], [x12], #0x1\n"
- "ld1 { v26.b }[8], [x10], #0x1\n"
- "ld1 { v23.b }[8], [x9], #0x1\n"
- "ld1 { v3.b }[8], [x26], #0x1\n"
- "ld1 { v10.b }[8], [x21], #0x1\n"
+ "ld1 { v13.b }[8], [x15], #0x1\n"
+ "ld1 { v2.b }[8], [x14], #0x1\n"
+ "ld1 { v0.b }[8], [x27], #0x1\n"
+ "ld1 { v9.b }[8], [x26], #0x1\n"
+ "ld1 { v1.b }[8], [x25], #0x1\n"
+ "ld1 { v21.b }[8], [x24], #0x1\n"
+ "ld1 { v28.b }[8], [x23], #0x1\n"
+ "ld1 { v4.b }[8], [x22], #0x1\n"
"b 11f\n"
"7:" // Oddments: Load (A): Bit 3: Unset
"tbz %x[n_channels], #2, 9f\n"
- "ldr s15, [x15], #0x4\n"
- "ldr s25, [x14], #0x4\n"
- "ldr s7, [x13], #0x4\n"
- "ldr s8, [x12], #0x4\n"
- "ldr s26, [x10], #0x4\n"
- "ldr s23, [x9], #0x4\n"
- "ldr s3, [x26], #0x4\n"
- "ldr s10, [x21], #0x4\n"
+ "ldr s13, [x15], #0x4\n"
+ "ldr s2, [x14], #0x4\n"
+ "ldr s0, [x27], #0x4\n"
+ "ldr s9, [x26], #0x4\n"
+ "ldr s1, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s28, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
"tbz %x[n_channels], #1, 8f\n"
- "ld1 { v15.h }[2], [x15], #0x2\n"
- "ld1 { v25.h }[2], [x14], #0x2\n"
- "ld1 { v7.h }[2], [x13], #0x2\n"
- "ld1 { v8.h }[2], [x12], #0x2\n"
- "ld1 { v26.h }[2], [x10], #0x2\n"
- "ld1 { v23.h }[2], [x9], #0x2\n"
- "ld1 { v3.h }[2], [x26], #0x2\n"
- "ld1 { v10.h }[2], [x21], #0x2\n"
+ "ld1 { v13.h }[2], [x15], #0x2\n"
+ "ld1 { v2.h }[2], [x14], #0x2\n"
+ "ld1 { v0.h }[2], [x27], #0x2\n"
+ "ld1 { v9.h }[2], [x26], #0x2\n"
+ "ld1 { v1.h }[2], [x25], #0x2\n"
+ "ld1 { v21.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[6], [x15], #0x1\n"
- "ld1 { v25.b }[6], [x14], #0x1\n"
- "ld1 { v7.b }[6], [x13], #0x1\n"
- "ld1 { v8.b }[6], [x12], #0x1\n"
- "ld1 { v26.b }[6], [x10], #0x1\n"
- "ld1 { v23.b }[6], [x9], #0x1\n"
- "ld1 { v3.b }[6], [x26], #0x1\n"
- "ld1 { v10.b }[6], [x21], #0x1\n"
+ "ld1 { v13.b }[6], [x15], #0x1\n"
+ "ld1 { v2.b }[6], [x14], #0x1\n"
+ "ld1 { v0.b }[6], [x27], #0x1\n"
+ "ld1 { v9.b }[6], [x26], #0x1\n"
+ "ld1 { v1.b }[6], [x25], #0x1\n"
+ "ld1 { v21.b }[6], [x24], #0x1\n"
+ "ld1 { v28.b }[6], [x23], #0x1\n"
+ "ld1 { v4.b }[6], [x22], #0x1\n"
"b 11f\n"
"8:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[4], [x15], #0x1\n"
- "ld1 { v25.b }[4], [x14], #0x1\n"
- "ld1 { v7.b }[4], [x13], #0x1\n"
- "ld1 { v8.b }[4], [x12], #0x1\n"
- "ld1 { v26.b }[4], [x10], #0x1\n"
- "ld1 { v23.b }[4], [x9], #0x1\n"
- "ld1 { v3.b }[4], [x26], #0x1\n"
- "ld1 { v10.b }[4], [x21], #0x1\n"
+ "ld1 { v13.b }[4], [x15], #0x1\n"
+ "ld1 { v2.b }[4], [x14], #0x1\n"
+ "ld1 { v0.b }[4], [x27], #0x1\n"
+ "ld1 { v9.b }[4], [x26], #0x1\n"
+ "ld1 { v1.b }[4], [x25], #0x1\n"
+ "ld1 { v21.b }[4], [x24], #0x1\n"
+ "ld1 { v28.b }[4], [x23], #0x1\n"
+ "ld1 { v4.b }[4], [x22], #0x1\n"
"b 11f\n"
"9:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 10f\n"
- "ldr h15, [x15], #0x2\n"
- "ldr h25, [x14], #0x2\n"
- "ldr h7, [x13], #0x2\n"
- "ldr h8, [x12], #0x2\n"
- "ldr h26, [x10], #0x2\n"
- "ldr h23, [x9], #0x2\n"
- "ldr h3, [x26], #0x2\n"
- "ldr h10, [x21], #0x2\n"
+ "ldr h13, [x15], #0x2\n"
+ "ldr h2, [x14], #0x2\n"
+ "ldr h0, [x27], #0x2\n"
+ "ldr h9, [x26], #0x2\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h21, [x24], #0x2\n"
+ "ldr h28, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
"tbz %x[n_channels], #0, 11f\n"
- "ld1 { v15.b }[2], [x15], #0x1\n"
- "ld1 { v25.b }[2], [x14], #0x1\n"
- "ld1 { v7.b }[2], [x13], #0x1\n"
- "ld1 { v8.b }[2], [x12], #0x1\n"
- "ld1 { v26.b }[2], [x10], #0x1\n"
- "ld1 { v23.b }[2], [x9], #0x1\n"
- "ld1 { v3.b }[2], [x26], #0x1\n"
- "ld1 { v10.b }[2], [x21], #0x1\n"
+ "ld1 { v13.b }[2], [x15], #0x1\n"
+ "ld1 { v2.b }[2], [x14], #0x1\n"
+ "ld1 { v0.b }[2], [x27], #0x1\n"
+ "ld1 { v9.b }[2], [x26], #0x1\n"
+ "ld1 { v1.b }[2], [x25], #0x1\n"
+ "ld1 { v21.b }[2], [x24], #0x1\n"
+ "ld1 { v28.b }[2], [x23], #0x1\n"
+ "ld1 { v4.b }[2], [x22], #0x1\n"
"b 11f\n"
"10:" // Oddments: Load (A): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b15, [x15], #0x1\n"
- "ldr b25, [x14], #0x1\n"
- "ldr b7, [x13], #0x1\n"
- "ldr b8, [x12], #0x1\n"
- "ldr b26, [x10], #0x1\n"
- "ldr b23, [x9], #0x1\n"
- "ldr b3, [x26], #0x1\n"
- "ldr b10, [x21], #0x1\n"
+ "ldr b13, [x15], #0x1\n"
+ "ldr b2, [x14], #0x1\n"
+ "ldr b0, [x27], #0x1\n"
+ "ldr b9, [x26], #0x1\n"
+ "ldr b1, [x25], #0x1\n"
+ "ldr b21, [x24], #0x1\n"
+ "ldr b28, [x23], #0x1\n"
+ "ldr b4, [x22], #0x1\n"
"11:" // Oddments: Load (A): Bit 3: End
"ldp x15, x14, [%x[inptrs], #0x40]\n"
- "ldp x13, x12, [%x[inptrs], #0x50]\n"
- "add x15, x15, x28\n"
- "add x14, x14, x28\n"
- "ldp x10, x9, [%x[inptrs], #0x60]\n"
- "ldp x26, x21, [%x[inptrs], #0x70]\n"
- "add x13, x13, x28\n"
- "add x12, x12, x28\n"
- "add x10, x10, x28\n"
- "add x9, x9, x28\n"
- "add x26, x26, x28\n"
- "add x21, x21, x28\n"
+ "ldp x27, x26, [%x[inptrs], #0x50]\n"
+ "ldp x25, x24, [%x[inptrs], #0x60]\n"
+ "ldp x23, x22, [%x[inptrs], #0x70]\n"
+ "add x15, x15, x13\n"
+ "add x14, x14, x13\n"
+ "add x27, x27, x13\n"
+ "add x26, x26, x13\n"
+ "add x25, x25, x13\n"
+ "add x24, x24, x13\n"
+ "add x23, x23, x13\n"
+ "add x22, x22, x13\n"
"tbz %x[n_channels], #3, 15f\n"
- "ldr d22, [x15], #0x8\n"
- "ldr d19, [x14], #0x8\n"
- "ldr d0, [x13], #0x8\n"
- "ldr d5, [x12], #0x8\n"
- "ldr d27, [x10], #0x8\n"
- "ldr d24, [x9], #0x8\n"
- "ldr d2, [x26], #0x8\n"
- "ldr d9, [x21], #0x8\n"
+ "ldr d10, [x15], #0x8\n"
+ "ldr d29, [x14], #0x8\n"
+ "ldr d27, [x27], #0x8\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d5, [x25], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
+ "ldr d14, [x23], #0x8\n"
+ "ldr d25, [x22], #0x8\n"
"tbz %x[n_channels], #2, 13f\n"
- "ld1 { v22.s }[2], [x15], #0x4\n"
- "ld1 { v19.s }[2], [x14], #0x4\n"
- "ld1 { v0.s }[2], [x13], #0x4\n"
- "ld1 { v5.s }[2], [x12], #0x4\n"
- "ld1 { v27.s }[2], [x10], #0x4\n"
- "ld1 { v24.s }[2], [x9], #0x4\n"
- "ld1 { v2.s }[2], [x26], #0x4\n"
- "ld1 { v9.s }[2], [x21], #0x4\n"
+ "ld1 { v10.s }[2], [x15], #0x4\n"
+ "ld1 { v29.s }[2], [x14], #0x4\n"
+ "ld1 { v27.s }[2], [x27], #0x4\n"
+ "ld1 { v6.s }[2], [x26], #0x4\n"
+ "ld1 { v5.s }[2], [x25], #0x4\n"
+ "ld1 { v18.s }[2], [x24], #0x4\n"
+ "ld1 { v14.s }[2], [x23], #0x4\n"
+ "ld1 { v25.s }[2], [x22], #0x4\n"
"tbz %x[n_channels], #1, 12f\n"
- "ld1 { v22.h }[6], [x15], #0x2\n"
- "ld1 { v19.h }[6], [x14], #0x2\n"
- "ld1 { v0.h }[6], [x13], #0x2\n"
- "ld1 { v5.h }[6], [x12], #0x2\n"
- "ld1 { v27.h }[6], [x10], #0x2\n"
- "ld1 { v24.h }[6], [x9], #0x2\n"
- "ld1 { v2.h }[6], [x26], #0x2\n"
- "ld1 { v9.h }[6], [x21], #0x2\n"
+ "ld1 { v10.h }[6], [x15], #0x2\n"
+ "ld1 { v29.h }[6], [x14], #0x2\n"
+ "ld1 { v27.h }[6], [x27], #0x2\n"
+ "ld1 { v6.h }[6], [x26], #0x2\n"
+ "ld1 { v5.h }[6], [x25], #0x2\n"
+ "ld1 { v18.h }[6], [x24], #0x2\n"
+ "ld1 { v14.h }[6], [x23], #0x2\n"
+ "ld1 { v25.h }[6], [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[14], [x15], #0x1\n"
- "ld1 { v19.b }[14], [x14], #0x1\n"
- "ld1 { v0.b }[14], [x13], #0x1\n"
- "ld1 { v5.b }[14], [x12], #0x1\n"
- "ld1 { v27.b }[14], [x10], #0x1\n"
- "ld1 { v24.b }[14], [x9], #0x1\n"
- "ld1 { v2.b }[14], [x26], #0x1\n"
- "ld1 { v9.b }[14], [x21], #0x1\n"
+ "ld1 { v10.b }[14], [x15], #0x1\n"
+ "ld1 { v29.b }[14], [x14], #0x1\n"
+ "ld1 { v27.b }[14], [x27], #0x1\n"
+ "ld1 { v6.b }[14], [x26], #0x1\n"
+ "ld1 { v5.b }[14], [x25], #0x1\n"
+ "ld1 { v18.b }[14], [x24], #0x1\n"
+ "ld1 { v14.b }[14], [x23], #0x1\n"
+ "ld1 { v25.b }[14], [x22], #0x1\n"
"b 19f\n"
"12:" // Oddments: Load (B): Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[12], [x15], #0x1\n"
- "ld1 { v19.b }[12], [x14], #0x1\n"
- "ld1 { v0.b }[12], [x13], #0x1\n"
- "ld1 { v5.b }[12], [x12], #0x1\n"
- "ld1 { v27.b }[12], [x10], #0x1\n"
- "ld1 { v24.b }[12], [x9], #0x1\n"
- "ld1 { v2.b }[12], [x26], #0x1\n"
- "ld1 { v9.b }[12], [x21], #0x1\n"
+ "ld1 { v10.b }[12], [x15], #0x1\n"
+ "ld1 { v29.b }[12], [x14], #0x1\n"
+ "ld1 { v27.b }[12], [x27], #0x1\n"
+ "ld1 { v6.b }[12], [x26], #0x1\n"
+ "ld1 { v5.b }[12], [x25], #0x1\n"
+ "ld1 { v18.b }[12], [x24], #0x1\n"
+ "ld1 { v14.b }[12], [x23], #0x1\n"
+ "ld1 { v25.b }[12], [x22], #0x1\n"
"b 19f\n"
"13:" // Oddments: Load (B): Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 14f\n"
- "ld1 { v22.h }[4], [x15], #0x2\n"
- "ld1 { v19.h }[4], [x14], #0x2\n"
- "ld1 { v0.h }[4], [x13], #0x2\n"
- "ld1 { v5.h }[4], [x12], #0x2\n"
- "ld1 { v27.h }[4], [x10], #0x2\n"
- "ld1 { v24.h }[4], [x9], #0x2\n"
- "ld1 { v2.h }[4], [x26], #0x2\n"
- "ld1 { v9.h }[4], [x21], #0x2\n"
+ "ld1 { v10.h }[4], [x15], #0x2\n"
+ "ld1 { v29.h }[4], [x14], #0x2\n"
+ "ld1 { v27.h }[4], [x27], #0x2\n"
+ "ld1 { v6.h }[4], [x26], #0x2\n"
+ "ld1 { v5.h }[4], [x25], #0x2\n"
+ "ld1 { v18.h }[4], [x24], #0x2\n"
+ "ld1 { v14.h }[4], [x23], #0x2\n"
+ "ld1 { v25.h }[4], [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[10], [x15], #0x1\n"
- "ld1 { v19.b }[10], [x14], #0x1\n"
- "ld1 { v0.b }[10], [x13], #0x1\n"
- "ld1 { v5.b }[10], [x12], #0x1\n"
- "ld1 { v27.b }[10], [x10], #0x1\n"
- "ld1 { v24.b }[10], [x9], #0x1\n"
- "ld1 { v2.b }[10], [x26], #0x1\n"
- "ld1 { v9.b }[10], [x21], #0x1\n"
+ "ld1 { v10.b }[10], [x15], #0x1\n"
+ "ld1 { v29.b }[10], [x14], #0x1\n"
+ "ld1 { v27.b }[10], [x27], #0x1\n"
+ "ld1 { v6.b }[10], [x26], #0x1\n"
+ "ld1 { v5.b }[10], [x25], #0x1\n"
+ "ld1 { v18.b }[10], [x24], #0x1\n"
+ "ld1 { v14.b }[10], [x23], #0x1\n"
+ "ld1 { v25.b }[10], [x22], #0x1\n"
"b 19f\n"
"14:" // Oddments: Load (B): Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[8], [x15], #0x1\n"
- "ld1 { v19.b }[8], [x14], #0x1\n"
- "ld1 { v0.b }[8], [x13], #0x1\n"
- "ld1 { v5.b }[8], [x12], #0x1\n"
- "ld1 { v27.b }[8], [x10], #0x1\n"
- "ld1 { v24.b }[8], [x9], #0x1\n"
- "ld1 { v2.b }[8], [x26], #0x1\n"
- "ld1 { v9.b }[8], [x21], #0x1\n"
+ "ld1 { v10.b }[8], [x15], #0x1\n"
+ "ld1 { v29.b }[8], [x14], #0x1\n"
+ "ld1 { v27.b }[8], [x27], #0x1\n"
+ "ld1 { v6.b }[8], [x26], #0x1\n"
+ "ld1 { v5.b }[8], [x25], #0x1\n"
+ "ld1 { v18.b }[8], [x24], #0x1\n"
+ "ld1 { v14.b }[8], [x23], #0x1\n"
+ "ld1 { v25.b }[8], [x22], #0x1\n"
"b 19f\n"
"15:" // Oddments: Load (B): Bit 3: Unset
"tbz %x[n_channels], #2, 17f\n"
- "ldr s22, [x15], #0x4\n"
- "ldr s19, [x14], #0x4\n"
- "ldr s0, [x13], #0x4\n"
- "ldr s5, [x12], #0x4\n"
- "ldr s27, [x10], #0x4\n"
- "ldr s24, [x9], #0x4\n"
- "ldr s2, [x26], #0x4\n"
- "ldr s9, [x21], #0x4\n"
+ "ldr s10, [x15], #0x4\n"
+ "ldr s29, [x14], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s6, [x26], #0x4\n"
+ "ldr s5, [x25], #0x4\n"
+ "ldr s18, [x24], #0x4\n"
+ "ldr s14, [x23], #0x4\n"
+ "ldr s25, [x22], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v22.h }[2], [x15], #0x2\n"
- "ld1 { v19.h }[2], [x14], #0x2\n"
- "ld1 { v0.h }[2], [x13], #0x2\n"
- "ld1 { v5.h }[2], [x12], #0x2\n"
- "ld1 { v27.h }[2], [x10], #0x2\n"
- "ld1 { v24.h }[2], [x9], #0x2\n"
- "ld1 { v2.h }[2], [x26], #0x2\n"
- "ld1 { v9.h }[2], [x21], #0x2\n"
+ "ld1 { v10.h }[2], [x15], #0x2\n"
+ "ld1 { v29.h }[2], [x14], #0x2\n"
+ "ld1 { v27.h }[2], [x27], #0x2\n"
+ "ld1 { v6.h }[2], [x26], #0x2\n"
+ "ld1 { v5.h }[2], [x25], #0x2\n"
+ "ld1 { v18.h }[2], [x24], #0x2\n"
+ "ld1 { v14.h }[2], [x23], #0x2\n"
+ "ld1 { v25.h }[2], [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[6], [x15], #0x1\n"
- "ld1 { v19.b }[6], [x14], #0x1\n"
- "ld1 { v0.b }[6], [x13], #0x1\n"
- "ld1 { v5.b }[6], [x12], #0x1\n"
- "ld1 { v27.b }[6], [x10], #0x1\n"
- "ld1 { v24.b }[6], [x9], #0x1\n"
- "ld1 { v2.b }[6], [x26], #0x1\n"
- "ld1 { v9.b }[6], [x21], #0x1\n"
+ "ld1 { v10.b }[6], [x15], #0x1\n"
+ "ld1 { v29.b }[6], [x14], #0x1\n"
+ "ld1 { v27.b }[6], [x27], #0x1\n"
+ "ld1 { v6.b }[6], [x26], #0x1\n"
+ "ld1 { v5.b }[6], [x25], #0x1\n"
+ "ld1 { v18.b }[6], [x24], #0x1\n"
+ "ld1 { v14.b }[6], [x23], #0x1\n"
+ "ld1 { v25.b }[6], [x22], #0x1\n"
"b 19f\n"
"16:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[4], [x15], #0x1\n"
- "ld1 { v19.b }[4], [x14], #0x1\n"
- "ld1 { v0.b }[4], [x13], #0x1\n"
- "ld1 { v5.b }[4], [x12], #0x1\n"
- "ld1 { v27.b }[4], [x10], #0x1\n"
- "ld1 { v24.b }[4], [x9], #0x1\n"
- "ld1 { v2.b }[4], [x26], #0x1\n"
- "ld1 { v9.b }[4], [x21], #0x1\n"
+ "ld1 { v10.b }[4], [x15], #0x1\n"
+ "ld1 { v29.b }[4], [x14], #0x1\n"
+ "ld1 { v27.b }[4], [x27], #0x1\n"
+ "ld1 { v6.b }[4], [x26], #0x1\n"
+ "ld1 { v5.b }[4], [x25], #0x1\n"
+ "ld1 { v18.b }[4], [x24], #0x1\n"
+ "ld1 { v14.b }[4], [x23], #0x1\n"
+ "ld1 { v25.b }[4], [x22], #0x1\n"
"b 19f\n"
"17:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr h22, [x15], #0x2\n"
- "ldr h19, [x14], #0x2\n"
- "ldr h0, [x13], #0x2\n"
- "ldr h5, [x12], #0x2\n"
- "ldr h27, [x10], #0x2\n"
- "ldr h24, [x9], #0x2\n"
- "ldr h2, [x26], #0x2\n"
- "ldr h9, [x21], #0x2\n"
+ "ldr h10, [x15], #0x2\n"
+ "ldr h29, [x14], #0x2\n"
+ "ldr h27, [x27], #0x2\n"
+ "ldr h6, [x26], #0x2\n"
+ "ldr h5, [x25], #0x2\n"
+ "ldr h18, [x24], #0x2\n"
+ "ldr h14, [x23], #0x2\n"
+ "ldr h25, [x22], #0x2\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v22.b }[2], [x15], #0x1\n"
- "ld1 { v19.b }[2], [x14], #0x1\n"
- "ld1 { v0.b }[2], [x13], #0x1\n"
- "ld1 { v5.b }[2], [x12], #0x1\n"
- "ld1 { v27.b }[2], [x10], #0x1\n"
- "ld1 { v24.b }[2], [x9], #0x1\n"
- "ld1 { v2.b }[2], [x26], #0x1\n"
- "ld1 { v9.b }[2], [x21], #0x1\n"
+ "ld1 { v10.b }[2], [x15], #0x1\n"
+ "ld1 { v29.b }[2], [x14], #0x1\n"
+ "ld1 { v27.b }[2], [x27], #0x1\n"
+ "ld1 { v6.b }[2], [x26], #0x1\n"
+ "ld1 { v5.b }[2], [x25], #0x1\n"
+ "ld1 { v18.b }[2], [x24], #0x1\n"
+ "ld1 { v14.b }[2], [x23], #0x1\n"
+ "ld1 { v25.b }[2], [x22], #0x1\n"
"b 19f\n"
"18:" // Oddments: Load (B): Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
- "ldr b22, [x15], #0x1\n"
- "ldr b19, [x14], #0x1\n"
- "ldr b0, [x13], #0x1\n"
- "ldr b5, [x12], #0x1\n"
- "ldr b27, [x10], #0x1\n"
- "ldr b24, [x9], #0x1\n"
- "ldr b2, [x26], #0x1\n"
- "ldr b9, [x21], #0x1\n"
+ "ldr b10, [x15], #0x1\n"
+ "ldr b29, [x14], #0x1\n"
+ "ldr b27, [x27], #0x1\n"
+ "ldr b6, [x26], #0x1\n"
+ "ldr b5, [x25], #0x1\n"
+ "ldr b18, [x24], #0x1\n"
+ "ldr b14, [x23], #0x1\n"
+ "ldr b25, [x22], #0x1\n"
"19:" // Oddments: Load (B): Bit 3: End
"ldr q20, [%x[params], #0x10]\n"
- "ldr q6, [%x[params], #0x20]\n"
- "zip2 v1.16b, v26.16b, v3.16b\n"
- "zip1 v26.16b, v26.16b, v3.16b\n"
- "ldr q4, [%x[params], #0x30]\n"
- "zip1 v18.16b, v23.16b, v10.16b\n"
- "zip2 v30.16b, v15.16b, v7.16b\n"
+ "ldr q17, [%x[params], #0x20]\n"
+ "zip2 v26.16b, v1.16b, v28.16b\n"
+ "zip1 v1.16b, v1.16b, v28.16b\n"
+ "ldr q30, [%x[params], #0x30]\n"
+ "zip1 v19.16b, v21.16b, v4.16b\n"
+ "zip2 v23.16b, v13.16b, v0.16b\n"
"cmp x20, #0x4\n"
- "zip1 v15.16b, v15.16b, v7.16b\n"
- "zip1 v29.16b, v25.16b, v8.16b\n"
- "zip2 v8.16b, v25.16b, v8.16b\n"
- "zip2 v10.16b, v23.16b, v10.16b\n"
- "zip2 v23.16b, v26.16b, v18.16b\n"
- "zip1 v26.16b, v26.16b, v18.16b\n"
- "zip2 v28.16b, v22.16b, v0.16b\n"
- "zip1 v22.16b, v22.16b, v0.16b\n"
- "zip1 v21.16b, v19.16b, v5.16b\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6e9a9591 // udot v17.4s, v12.16b, v26.16b\n"
- "zip2 v25.16b, v15.16b, v29.16b\n"
- "zip1 v15.16b, v15.16b, v29.16b\n"
- "zip1 v7.16b, v30.16b, v8.16b\n"
- "zip2 v8.16b, v30.16b, v8.16b\n"
+ "zip1 v13.16b, v13.16b, v0.16b\n"
+ "zip1 v22.16b, v2.16b, v9.16b\n"
+ "zip2 v9.16b, v2.16b, v9.16b\n"
+ "zip2 v4.16b, v21.16b, v4.16b\n"
+ "zip2 v21.16b, v1.16b, v19.16b\n"
+ "zip1 v1.16b, v1.16b, v19.16b\n"
+ "zip2 v16.16b, v10.16b, v27.16b\n"
+ "zip1 v10.16b, v10.16b, v27.16b\n"
+ "zip1 v19.16b, v29.16b, v6.16b\n"
+ "movi v8.4s, #0x0\n"
+ "zip2 v2.16b, v13.16b, v22.16b\n"
+ "zip1 v13.16b, v13.16b, v22.16b\n"
+ "zip1 v0.16b, v23.16b, v9.16b\n"
+ "zip2 v9.16b, v23.16b, v9.16b\n"
"ldr q31, [%x[params], #0x0]\n"
- "zip2 v5.16b, v19.16b, v5.16b\n"
- "zip2 v30.16b, v27.16b, v2.16b\n"
- "zip1 v27.16b, v27.16b, v2.16b\n"
- "zip1 v18.16b, v24.16b, v9.16b\n"
- "zip2 v9.16b, v24.16b, v9.16b\n"
- "zip2 v19.16b, v22.16b, v21.16b\n"
- "zip1 v22.16b, v22.16b, v21.16b\n"
- "zip1 v3.16b, v1.16b, v10.16b\n"
- ".inst 0x6e969591 // udot v17.4s, v12.16b, v22.16b\n"
- "zip2 v10.16b, v1.16b, v10.16b\n"
- "zip1 v0.16b, v28.16b, v5.16b\n"
- "zip2 v5.16b, v28.16b, v5.16b\n"
- "zip2 v24.16b, v27.16b, v18.16b\n"
- "zip1 v27.16b, v27.16b, v18.16b\n"
- "zip1 v2.16b, v30.16b, v9.16b\n"
- "mov v18.16b, v17.16b\n .inst 0x6e9b9592 // udot v18.4s, v12.16b, v27.16b\n"
- "zip2 v9.16b, v30.16b, v9.16b\n"
- "mov v30.16b, v31.16b\n"
- ".inst 0x6e8f9591 // udot v17.4s, v12.16b, v15.16b\n"
- "mov v29.16b, v31.16b\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x6e8f969f // udot v31.4s, v20.16b, v15.16b\n"
- ".inst 0x6e9a969d // udot v29.4s, v20.16b, v26.16b\n"
- ".inst 0x6e9a94df // udot v31.4s, v6.16b, v26.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "movi v1.4s, #0x0\n"
- "ext v15.16b, v15.16b, v15.16b, #0x1\n"
- ".inst 0x6e9a9581 // udot v1.4s, v12.16b, v26.16b\n"
- ".inst 0x6e9694dd // udot v29.4s, v6.16b, v22.16b\n"
- ".inst 0x6e96949f // udot v31.4s, v4.16b, v22.16b\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- ".inst 0x6e8f969e // udot v30.4s, v20.16b, v15.16b\n"
- ".inst 0x6e9a969c // udot v28.4s, v20.16b, v26.16b\n"
- "mls v31.4s, v17.4s, v16.4s\n"
- ".inst 0x6e969581 // udot v1.4s, v12.16b, v22.16b\n"
- ".inst 0x6e9b949d // udot v29.4s, v4.16b, v27.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- ".inst 0x6e9a94de // udot v30.4s, v6.16b, v26.16b\n"
- "ldr q21, [%x[params], #0x50]\n"
- ".inst 0x6e9694dc // udot v28.4s, v6.16b, v22.16b\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mov v20.16b, v1.16b\n .inst 0x6e9b9594 // udot v20.4s, v12.16b, v27.16b\n"
- ".inst 0x6e8f9581 // udot v1.4s, v12.16b, v15.16b\n"
- "ldr q18, [%x[params], #0x40]\n"
- "sqrdmulh v31.4s, v31.4s, v18.4s\n"
- ".inst 0x6e96949e // udot v30.4s, v4.16b, v22.16b\n"
- ".inst 0x6e9b949c // udot v28.4s, v4.16b, v27.16b\n"
- "mls v30.4s, v1.4s, v16.4s\n"
+ ".inst 0x6e8195e8 // udot v8.4s, v15.16b, v1.16b\n"
+ "zip2 v6.16b, v29.16b, v6.16b\n"
+ "zip2 v22.16b, v5.16b, v14.16b\n"
+ "zip1 v5.16b, v5.16b, v14.16b\n"
+ "zip1 v3.16b, v18.16b, v25.16b\n"
+ "zip2 v25.16b, v18.16b, v25.16b\n"
+ "zip2 v29.16b, v10.16b, v19.16b\n"
+ "zip1 v10.16b, v10.16b, v19.16b\n"
+ "zip1 v28.16b, v26.16b, v4.16b\n"
+ "zip2 v4.16b, v26.16b, v4.16b\n"
+ "zip1 v27.16b, v16.16b, v6.16b\n"
+ "zip2 v6.16b, v16.16b, v6.16b\n"
+ "zip2 v18.16b, v5.16b, v3.16b\n"
+ "zip1 v5.16b, v5.16b, v3.16b\n"
+ "zip1 v14.16b, v22.16b, v25.16b\n"
+ ".inst 0x6e8a95e8 // udot v8.4s, v15.16b, v10.16b\n"
+ "zip2 v25.16b, v22.16b, v25.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x6e8d969f // udot v31.4s, v20.16b, v13.16b\n"
+ "movi v22.4s, #0x0\n"
+ ".inst 0x6e819683 // udot v3.4s, v20.16b, v1.16b\n"
+ "mov v16.16b, v8.16b\n .inst 0x6e8595f0 // udot v16.4s, v15.16b, v5.16b\n"
+ ".inst 0x6e8d95e8 // udot v8.4s, v15.16b, v13.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x6e81963f // udot v31.4s, v17.16b, v1.16b\n"
+ "ext v1.16b, v1.16b, v1.16b, #0x1\n"
+ ".inst 0x6e8a9623 // udot v3.4s, v17.16b, v10.16b\n"
+ ".inst 0x6e8d969a // udot v26.4s, v20.16b, v13.16b\n"
+ ".inst 0x6e8195f6 // udot v22.4s, v15.16b, v1.16b\n"
+ ".inst 0x6e8a97df // udot v31.4s, v30.16b, v10.16b\n"
+ "ext v10.16b, v10.16b, v10.16b, #0x1\n"
+ ".inst 0x6e819697 // udot v23.4s, v20.16b, v1.16b\n"
+ ".inst 0x6e8597c3 // udot v3.4s, v30.16b, v5.16b\n"
+ "ext v5.16b, v5.16b, v5.16b, #0x1\n"
+ ".inst 0x6e81963a // udot v26.4s, v17.16b, v1.16b\n"
+ "ldr q20, [%x[params], #0x50]\n"
+ ".inst 0x6e8a95f6 // udot v22.4s, v15.16b, v10.16b\n"
+ "mls v31.4s, v8.4s, v24.4s\n"
+ ".inst 0x6e8a9637 // udot v23.4s, v17.16b, v10.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ "mov v19.16b, v22.16b\n .inst 0x6e8595f3 // udot v19.4s, v15.16b, v5.16b\n"
+ ".inst 0x6e8d95f6 // udot v22.4s, v15.16b, v13.16b\n"
+ "ldr q17, [%x[params], #0x40]\n"
"add %x[params], %x[params], #0x60\n"
- "mls v28.4s, v20.4s, v16.4s\n"
- "and v17.16b, v31.16b, v21.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v18.4s\n"
- "sqrdmulh v29.4s, v29.4s, v18.4s\n"
- "sqrdmulh v28.4s, v28.4s, v18.4s\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v17.16b, v30.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
- "and v26.16b, v28.16b, v21.16b\n"
+ ".inst 0x6e8a97da // udot v26.4s, v30.16b, v10.16b\n"
+ ".inst 0x6e8597d7 // udot v23.4s, v30.16b, v5.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v26.4s, v22.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v20.16b\n"
+ "mls v23.4s, v19.4s, v24.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
+ "and v19.16b, v3.16b, v20.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v17.16b, v26.16b, v20.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v16.16b, v23.16b, v20.16b\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v28.4s, v28.4s, v26.4s\n"
- "srshl v31.4s, v31.4s, v21.4s\n"
- "srshl v30.4s, v30.4s, v21.4s\n"
- "srshl v29.4s, v29.4s, v21.4s\n"
- "srshl v28.4s, v28.4s, v21.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "sqadd v3.4s, v3.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v31.4s, v31.4s, v20.4s\n"
+ "sqadd v26.4s, v26.4s, v17.4s\n"
+ "srshl v3.4s, v3.4s, v20.4s\n"
+ "sqadd v23.4s, v23.4s, v16.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "srshl v26.4s, v26.4s, v20.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "srshl v23.4s, v23.4s, v20.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"blt 20f\n"
- "str s31, [x25, x27]\n"
- "str s30, [x24, x27]\n"
- "str s29, [x23, x27]\n"
- "str s28, [x22, x27]\n"
+ "str s31, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s3, [x9, x12]\n"
+ "str s23, [x28, x12]\n"
"b 23f\n"
"20:" // Oddments: Unroll 0: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 21f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 22f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 22f\n"
"21:" // Oddments: Unroll 0: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"22:" // Oddments: Unroll 0: Oddment store: Bit 1: End
"23:" // Oddments: Unroll 0: After oddment store
"subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
"ldr q31, [%x[params], #0x0]\n"
- "ldr q27, [%x[params], #0x10]\n"
- "movi v1.4s, #0x0\n"
- ".inst 0x6e979581 // udot v1.4s, v12.16b, v23.16b\n"
- "ldr q26, [%x[params], #0x20]\n"
- "ldr q22, [%x[params], #0x30]\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "ldr q4, [%x[params], #0x40]\n"
- "ldr q21, [%x[params], #0x50]\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x6e99977f // udot v31.4s, v27.16b, v25.16b\n"
- ".inst 0x6e939581 // udot v1.4s, v12.16b, v19.16b\n"
- ".inst 0x6e97977d // udot v29.4s, v27.16b, v23.16b\n"
- "movi v20.4s, #0x0\n"
+ "ldr q5, [%x[params], #0x10]\n"
+ "movi v8.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "ldr q22, [%x[params], #0x20]\n"
+ "ldr q20, [%x[params], #0x30]\n"
"cmp x20, #0x4\n"
- ".inst 0x6e97975f // udot v31.4s, v26.16b, v23.16b\n"
- "mov v18.16b, v1.16b\n .inst 0x6e989592 // udot v18.4s, v12.16b, v24.16b\n"
- "ext v23.16b, v23.16b, v23.16b, #0x1\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "ldr q19, [%x[params], #0x50]\n"
+ ".inst 0x6e9595e8 // udot v8.4s, v15.16b, v21.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x6e999581 // udot v1.4s, v12.16b, v25.16b\n"
- "ext v25.16b, v25.16b, v25.16b, #0x1\n"
- ".inst 0x6e99977e // udot v30.4s, v27.16b, v25.16b\n"
- ".inst 0x6e97977c // udot v28.4s, v27.16b, v23.16b\n"
- ".inst 0x6e979594 // udot v20.4s, v12.16b, v23.16b\n"
- ".inst 0x6e93975d // udot v29.4s, v26.16b, v19.16b\n"
- ".inst 0x6e9396df // udot v31.4s, v22.16b, v19.16b\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x6e97975e // udot v30.4s, v26.16b, v23.16b\n"
- ".inst 0x6e93975c // udot v28.4s, v26.16b, v19.16b\n"
- "mls v31.4s, v1.4s, v16.4s\n"
- ".inst 0x6e939594 // udot v20.4s, v12.16b, v19.16b\n"
- ".inst 0x6e9896dd // udot v29.4s, v22.16b, v24.16b\n"
- "ext v24.16b, v24.16b, v24.16b, #0x1\n"
- ".inst 0x6e9396de // udot v30.4s, v22.16b, v19.16b\n"
- ".inst 0x6e9896dc // udot v28.4s, v22.16b, v24.16b\n"
- "sqrdmulh v31.4s, v31.4s, v4.4s\n"
- "mov v17.16b, v20.16b\n .inst 0x6e989591 // udot v17.4s, v12.16b, v24.16b\n"
- ".inst 0x6e999594 // udot v20.4s, v12.16b, v25.16b\n"
- "mls v30.4s, v20.4s, v16.4s\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v21.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v4.4s\n"
- "sqrdmulh v29.4s, v29.4s, v4.4s\n"
- "sqrdmulh v28.4s, v28.4s, v4.4s\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v30.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
- "and v17.16b, v28.16b, v21.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x6e8294bf // udot v31.4s, v5.16b, v2.16b\n"
+ ".inst 0x6e9594a3 // udot v3.4s, v5.16b, v21.16b\n"
+ ".inst 0x6e9d95e8 // udot v8.4s, v15.16b, v29.16b\n"
+ ".inst 0x6e9596df // udot v31.4s, v22.16b, v21.16b\n"
+ "ext v21.16b, v21.16b, v21.16b, #0x1\n"
+ ".inst 0x6e9594b7 // udot v23.4s, v5.16b, v21.16b\n"
+ ".inst 0x6e9595fe // udot v30.4s, v15.16b, v21.16b\n"
+ ".inst 0x6e9d96c3 // udot v3.4s, v22.16b, v29.16b\n"
+ "mov v16.16b, v8.16b\n .inst 0x6e9295f0 // udot v16.4s, v15.16b, v18.16b\n"
+ ".inst 0x6e8295e8 // udot v8.4s, v15.16b, v2.16b\n"
+ "ext v2.16b, v2.16b, v2.16b, #0x1\n"
+ ".inst 0x6e9d969f // udot v31.4s, v20.16b, v29.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x1\n"
+ ".inst 0x6e8294ba // udot v26.4s, v5.16b, v2.16b\n"
+ ".inst 0x6e929683 // udot v3.4s, v20.16b, v18.16b\n"
+ "ext v18.16b, v18.16b, v18.16b, #0x1\n"
+ ".inst 0x6e9d96d7 // udot v23.4s, v22.16b, v29.16b\n"
+ ".inst 0x6e9d95fe // udot v30.4s, v15.16b, v29.16b\n"
+ "mls v31.4s, v8.4s, v24.4s\n"
+ ".inst 0x6e9596da // udot v26.4s, v22.16b, v21.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x6e929697 // udot v23.4s, v20.16b, v18.16b\n"
+ "mov v16.16b, v30.16b\n .inst 0x6e9295f0 // udot v16.4s, v15.16b, v18.16b\n"
+ ".inst 0x6e8295fe // udot v30.4s, v15.16b, v2.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ ".inst 0x6e9d969a // udot v26.4s, v20.16b, v29.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v19.16b\n"
+ "mls v26.4s, v30.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v18.16b, v3.16b, v19.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v18.4s\n"
+ "srshl v31.4s, v31.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "srshl v31.4s, v31.4s, v21.4s\n"
- "srshl v30.4s, v30.4s, v21.4s\n"
- "srshl v29.4s, v29.4s, v21.4s\n"
- "srshl v28.4s, v28.4s, v21.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"blt 24f\n"
- "str s31, [x25, x27]\n"
- "str s30, [x24, x27]\n"
- "str s29, [x23, x27]\n"
- "str s28, [x22, x27]\n"
+ "str s31, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s3, [x9, x12]\n"
+ "str s23, [x28, x12]\n"
"b 27f\n"
"24:" // Oddments: Unroll 1: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 25f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 26f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 26f\n"
"25:" // Oddments: Unroll 1: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"26:" // Oddments: Unroll 1: Oddment store: Bit 1: End
"27:" // Oddments: Unroll 1: After oddment store
"subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
"ldr q31, [%x[params], #0x0]\n"
- "ldr q25, [%x[params], #0x10]\n"
- "movi v24.4s, #0x0\n"
- ".inst 0x6e839598 // udot v24.4s, v12.16b, v3.16b\n"
- "ldr q23, [%x[params], #0x20]\n"
- "ldr q22, [%x[params], #0x30]\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "ldr q21, [%x[params], #0x40]\n"
- "ldr q20, [%x[params], #0x50]\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x6e87973f // udot v31.4s, v25.16b, v7.16b\n"
- ".inst 0x6e809598 // udot v24.4s, v12.16b, v0.16b\n"
- ".inst 0x6e83973d // udot v29.4s, v25.16b, v3.16b\n"
- "movi v19.4s, #0x0\n"
+ "ldr q29, [%x[params], #0x10]\n"
+ "movi v22.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "ldr q20, [%x[params], #0x20]\n"
+ "ldr q18, [%x[params], #0x30]\n"
"cmp x20, #0x4\n"
- ".inst 0x6e8396ff // udot v31.4s, v23.16b, v3.16b\n"
- "mov v18.16b, v24.16b\n .inst 0x6e829592 // udot v18.4s, v12.16b, v2.16b\n"
- "ext v3.16b, v3.16b, v3.16b, #0x1\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "ldr q19, [%x[params], #0x50]\n"
+ ".inst 0x6e9c95f6 // udot v22.4s, v15.16b, v28.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x6e879598 // udot v24.4s, v12.16b, v7.16b\n"
- "ext v7.16b, v7.16b, v7.16b, #0x1\n"
- ".inst 0x6e87973e // udot v30.4s, v25.16b, v7.16b\n"
- ".inst 0x6e83973c // udot v28.4s, v25.16b, v3.16b\n"
- ".inst 0x6e839593 // udot v19.4s, v12.16b, v3.16b\n"
- ".inst 0x6e8096fd // udot v29.4s, v23.16b, v0.16b\n"
- ".inst 0x6e8096df // udot v31.4s, v22.16b, v0.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x6e8097bf // udot v31.4s, v29.16b, v0.16b\n"
+ ".inst 0x6e9c97a3 // udot v3.4s, v29.16b, v28.16b\n"
+ ".inst 0x6e9b95f6 // udot v22.4s, v15.16b, v27.16b\n"
+ ".inst 0x6e9c969f // udot v31.4s, v20.16b, v28.16b\n"
+ "ext v28.16b, v28.16b, v28.16b, #0x1\n"
+ ".inst 0x6e9c97b7 // udot v23.4s, v29.16b, v28.16b\n"
+ ".inst 0x6e9c95f5 // udot v21.4s, v15.16b, v28.16b\n"
+ ".inst 0x6e9b9683 // udot v3.4s, v20.16b, v27.16b\n"
+ "mov v16.16b, v22.16b\n .inst 0x6e8e95f0 // udot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x6e8095f6 // udot v22.4s, v15.16b, v0.16b\n"
"ext v0.16b, v0.16b, v0.16b, #0x1\n"
- ".inst 0x6e8396fe // udot v30.4s, v23.16b, v3.16b\n"
- ".inst 0x6e8096fc // udot v28.4s, v23.16b, v0.16b\n"
- "mls v31.4s, v24.4s, v16.4s\n"
- ".inst 0x6e809593 // udot v19.4s, v12.16b, v0.16b\n"
- ".inst 0x6e8296dd // udot v29.4s, v22.16b, v2.16b\n"
- "ext v2.16b, v2.16b, v2.16b, #0x1\n"
- ".inst 0x6e8096de // udot v30.4s, v22.16b, v0.16b\n"
- ".inst 0x6e8296dc // udot v28.4s, v22.16b, v2.16b\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
- "mov v17.16b, v19.16b\n .inst 0x6e829591 // udot v17.4s, v12.16b, v2.16b\n"
- ".inst 0x6e879593 // udot v19.4s, v12.16b, v7.16b\n"
- "mls v30.4s, v19.4s, v16.4s\n"
- "mls v29.4s, v18.4s, v16.4s\n"
- "mls v28.4s, v17.4s, v16.4s\n"
- "and v17.16b, v31.16b, v20.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqadd v31.4s, v31.4s, v17.4s\n"
- "and v19.16b, v30.16b, v20.16b\n"
- "and v18.16b, v29.16b, v20.16b\n"
- "and v17.16b, v28.16b, v20.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
+ ".inst 0x6e9b965f // udot v31.4s, v18.16b, v27.16b\n"
+ "ext v27.16b, v27.16b, v27.16b, #0x1\n"
+ ".inst 0x6e8097ba // udot v26.4s, v29.16b, v0.16b\n"
+ ".inst 0x6e8e9643 // udot v3.4s, v18.16b, v14.16b\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ ".inst 0x6e9b9697 // udot v23.4s, v20.16b, v27.16b\n"
+ ".inst 0x6e9b95f5 // udot v21.4s, v15.16b, v27.16b\n"
+ "mls v31.4s, v22.4s, v24.4s\n"
+ ".inst 0x6e9c969a // udot v26.4s, v20.16b, v28.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x6e8e9657 // udot v23.4s, v18.16b, v14.16b\n"
+ "mov v16.16b, v21.16b\n .inst 0x6e8e95f0 // udot v16.4s, v15.16b, v14.16b\n"
+ ".inst 0x6e8095f5 // udot v21.4s, v15.16b, v0.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ ".inst 0x6e9b965a // udot v26.4s, v18.16b, v27.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v19.16b\n"
+ "mls v26.4s, v21.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v18.16b, v3.16b, v19.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v18.4s\n"
+ "srshl v31.4s, v31.4s, v19.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v19.4s\n"
- "sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "srshl v31.4s, v31.4s, v20.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
- "srshl v29.4s, v29.4s, v20.4s\n"
- "srshl v28.4s, v28.4s, v20.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"blt 28f\n"
- "str s31, [x25, x27]\n"
- "str s30, [x24, x27]\n"
- "str s29, [x23, x27]\n"
- "str s28, [x22, x27]\n"
+ "str s31, [x11, x12]\n"
+ "str s26, [x10, x12]\n"
+ "str s3, [x9, x12]\n"
+ "str s23, [x28, x12]\n"
"b 31f\n"
"28:" // Oddments: Unroll 2: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 29f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 30f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 30f\n"
"29:" // Oddments: Unroll 2: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"30:" // Oddments: Unroll 2: Oddment store: Bit 1: End
"31:" // Oddments: Unroll 2: After oddment store
"subs x20, x20, #0x4\n"
- "add x27, x27, #0x4\n"
+ "add x12, x12, #0x4\n"
"ble 35f\n"
"ldr q31, [%x[params], #0x0]\n"
- "ldr q23, [%x[params], #0x10]\n"
+ "ldr q1, [%x[params], #0x10]\n"
"movi v22.4s, #0x0\n"
- ".inst 0x6e8a9596 // udot v22.4s, v12.16b, v10.16b\n"
- "ldr q21, [%x[params], #0x20]\n"
- "ldr q19, [%x[params], #0x30]\n"
- "mov v30.16b, v31.16b\n"
- "mov v29.16b, v31.16b\n"
- "ldr q20, [%x[params], #0x40]\n"
- "ldr q26, [%x[params], #0x50]\n"
- "mov v28.16b, v31.16b\n"
- ".inst 0x6e8896ff // udot v31.4s, v23.16b, v8.16b\n"
- ".inst 0x6e859596 // udot v22.4s, v12.16b, v5.16b\n"
- ".inst 0x6e8a96fd // udot v29.4s, v23.16b, v10.16b\n"
- "movi v18.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "ldr q20, [%x[params], #0x20]\n"
+ "ldr q18, [%x[params], #0x30]\n"
+ "ldr q17, [%x[params], #0x40]\n"
+ "ldr q19, [%x[params], #0x50]\n"
+ ".inst 0x6e8495f6 // udot v22.4s, v15.16b, v4.16b\n"
"add %x[params], %x[params], #0x60\n"
- ".inst 0x6e8a96bf // udot v31.4s, v21.16b, v10.16b\n"
- "mov v17.16b, v22.16b\n .inst 0x6e899591 // udot v17.4s, v12.16b, v9.16b\n"
- "ext v10.16b, v10.16b, v10.16b, #0x1\n"
- ".inst 0x6e889596 // udot v22.4s, v12.16b, v8.16b\n"
- "ext v8.16b, v8.16b, v8.16b, #0x1\n"
- ".inst 0x6e8896fe // udot v30.4s, v23.16b, v8.16b\n"
- ".inst 0x6e8a96fc // udot v28.4s, v23.16b, v10.16b\n"
- ".inst 0x6e8a9592 // udot v18.4s, v12.16b, v10.16b\n"
- ".inst 0x6e8596bd // udot v29.4s, v21.16b, v5.16b\n"
- ".inst 0x6e85967f // udot v31.4s, v19.16b, v5.16b\n"
- "ext v5.16b, v5.16b, v5.16b, #0x1\n"
- ".inst 0x6e8a96be // udot v30.4s, v21.16b, v10.16b\n"
- ".inst 0x6e8596bc // udot v28.4s, v21.16b, v5.16b\n"
- "mls v31.4s, v22.4s, v16.4s\n"
- ".inst 0x6e859592 // udot v18.4s, v12.16b, v5.16b\n"
- ".inst 0x6e89967d // udot v29.4s, v19.16b, v9.16b\n"
+ "mov v26.16b, v31.16b\n"
+ "mov v3.16b, v31.16b\n"
+ "mov v23.16b, v31.16b\n"
+ ".inst 0x6e89943f // udot v31.4s, v1.16b, v9.16b\n"
+ ".inst 0x6e849423 // udot v3.4s, v1.16b, v4.16b\n"
+ ".inst 0x6e8695f6 // udot v22.4s, v15.16b, v6.16b\n"
+ ".inst 0x6e84969f // udot v31.4s, v20.16b, v4.16b\n"
+ "ext v4.16b, v4.16b, v4.16b, #0x1\n"
+ ".inst 0x6e849437 // udot v23.4s, v1.16b, v4.16b\n"
+ ".inst 0x6e8495f5 // udot v21.4s, v15.16b, v4.16b\n"
+ ".inst 0x6e869683 // udot v3.4s, v20.16b, v6.16b\n"
+ "mov v16.16b, v22.16b\n .inst 0x6e9995f0 // udot v16.4s, v15.16b, v25.16b\n"
+ ".inst 0x6e8995f6 // udot v22.4s, v15.16b, v9.16b\n"
"ext v9.16b, v9.16b, v9.16b, #0x1\n"
- ".inst 0x6e85967e // udot v30.4s, v19.16b, v5.16b\n"
- ".inst 0x6e89967c // udot v28.4s, v19.16b, v9.16b\n"
- "sqrdmulh v31.4s, v31.4s, v20.4s\n"
- "mov v7.16b, v18.16b\n .inst 0x6e899587 // udot v7.4s, v12.16b, v9.16b\n"
- ".inst 0x6e889592 // udot v18.4s, v12.16b, v8.16b\n"
- "mls v30.4s, v18.4s, v16.4s\n"
- "mls v29.4s, v17.4s, v16.4s\n"
- "mls v28.4s, v7.4s, v16.4s\n"
- "and v16.16b, v31.16b, v26.16b\n"
+ ".inst 0x6e86965f // udot v31.4s, v18.16b, v6.16b\n"
+ "ext v6.16b, v6.16b, v6.16b, #0x1\n"
+ ".inst 0x6e89943a // udot v26.4s, v1.16b, v9.16b\n"
+ ".inst 0x6e999643 // udot v3.4s, v18.16b, v25.16b\n"
+ "ext v25.16b, v25.16b, v25.16b, #0x1\n"
+ ".inst 0x6e869697 // udot v23.4s, v20.16b, v6.16b\n"
+ ".inst 0x6e8695f5 // udot v21.4s, v15.16b, v6.16b\n"
+ "mls v31.4s, v22.4s, v24.4s\n"
+ ".inst 0x6e84969a // udot v26.4s, v20.16b, v4.16b\n"
+ "mls v3.4s, v16.4s, v24.4s\n"
+ ".inst 0x6e999657 // udot v23.4s, v18.16b, v25.16b\n"
+ "mov v16.16b, v21.16b\n .inst 0x6e9995f0 // udot v16.4s, v15.16b, v25.16b\n"
+ ".inst 0x6e8995f5 // udot v21.4s, v15.16b, v9.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ ".inst 0x6e86965a // udot v26.4s, v18.16b, v6.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "mls v23.4s, v16.4s, v24.4s\n"
+ "and v16.16b, v31.16b, v19.16b\n"
+ "mls v26.4s, v21.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v17.4s\n"
+ "and v18.16b, v3.16b, v19.16b\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v30.4s, v30.4s, v20.4s\n"
- "sqrdmulh v29.4s, v29.4s, v20.4s\n"
- "sqrdmulh v28.4s, v28.4s, v20.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "and v18.16b, v30.16b, v26.16b\n"
- "and v17.16b, v29.16b, v26.16b\n"
- "and v16.16b, v28.16b, v26.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v17.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
+ "and v17.16b, v23.16b, v19.16b\n"
+ "and v16.16b, v26.16b, v19.16b\n"
+ "sqadd v3.4s, v3.4s, v18.4s\n"
+ "srshl v31.4s, v31.4s, v19.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqadd v30.4s, v30.4s, v18.4s\n"
- "sqadd v29.4s, v29.4s, v17.4s\n"
- "sqadd v28.4s, v28.4s, v16.4s\n"
- "srshl v31.4s, v31.4s, v26.4s\n"
- "srshl v30.4s, v30.4s, v26.4s\n"
- "srshl v29.4s, v29.4s, v26.4s\n"
- "srshl v28.4s, v28.4s, v26.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "add v31.4s, v31.4s, v12.4s\n"
+ "sqadd v26.4s, v26.4s, v16.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v12.4s\n"
+ "smax v31.4s, v31.4s, v7.4s\n"
+ "srshl v26.4s, v26.4s, v19.4s\n"
+ "srshl v23.4s, v23.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v7.4s\n"
"smin v31.4s, v31.4s, v11.4s\n"
- "smin v30.4s, v30.4s, v11.4s\n"
- "smin v29.4s, v29.4s, v11.4s\n"
- "smin v28.4s, v28.4s, v11.4s\n"
+ "add v26.4s, v26.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "smin v3.4s, v3.4s, v11.4s\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smax v26.4s, v26.4s, v7.4s\n"
+ "smax v23.4s, v23.4s, v7.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "uzp1 v30.16b, v30.16b, v30.16b\n"
- "uzp1 v29.16b, v29.16b, v29.16b\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "smin v26.4s, v26.4s, v11.4s\n"
+ "smin v23.4s, v23.4s, v11.4s\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "uzp1 v26.16b, v26.16b, v26.16b\n"
+ "uzp1 v23.16b, v23.16b, v23.16b\n"
"32:" // Oddments: Unroll 3: Oddment store
- "add x25, x25, x27\n"
- "add x24, x24, x27\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
+ "add x11, x11, x12\n"
+ "add x10, x10, x12\n"
+ "add x9, x9, x12\n"
+ "add x28, x28, x12\n"
"tbz x20, #1, 33f\n"
- "st1 { v31.h }[0], [x25], #0x2\n"
- "st1 { v30.h }[0], [x24], #0x2\n"
- "st1 { v29.h }[0], [x23], #0x2\n"
- "st1 { v28.h }[0], [x22], #0x2\n"
+ "st1 { v31.h }[0], [x11], #0x2\n"
+ "st1 { v26.h }[0], [x10], #0x2\n"
+ "st1 { v3.h }[0], [x9], #0x2\n"
+ "st1 { v23.h }[0], [x28], #0x2\n"
"tbz x20, #0, 34f\n"
- "st1 { v31.b }[2], [x25], #0x1\n"
- "st1 { v30.b }[2], [x24], #0x1\n"
- "st1 { v29.b }[2], [x23], #0x1\n"
- "st1 { v28.b }[2], [x22], #0x1\n"
+ "st1 { v31.b }[2], [x11], #0x1\n"
+ "st1 { v26.b }[2], [x10], #0x1\n"
+ "st1 { v3.b }[2], [x9], #0x1\n"
+ "st1 { v23.b }[2], [x28], #0x1\n"
"b 34f\n"
"33:" // Oddments: Unroll 3: Oddment store: Bit 1: Unset
- "st1 { v31.b }[0], [x25], #0x1\n"
- "st1 { v30.b }[0], [x24], #0x1\n"
- "st1 { v29.b }[0], [x23], #0x1\n"
- "st1 { v28.b }[0], [x22], #0x1\n"
+ "st1 { v31.b }[0], [x11], #0x1\n"
+ "st1 { v26.b }[0], [x10], #0x1\n"
+ "st1 { v3.b }[0], [x9], #0x1\n"
+ "st1 { v23.b }[0], [x28], #0x1\n"
"34:" // Oddments: Unroll 3: Oddment store: Bit 1: End
"35:" // End
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index d5b55cb9c5..56a81849ee 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[16];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -91,1072 +91,1072 @@ void a64_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v14.16b }, [x20]\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v19.16b }, [x21]\n"
- "ld1r { v13.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v29.8h }, [x21]\n"
- "ld1r { v12.8h }, [x20]\n"
"mov x17, #0x0\n"
"mov x16, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
"add x15, %x[params], %[offsetof_Params_inptrs]\n"
"ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
"ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
"ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "usubl v23.8h, v23.8b, v19.8b\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "usubl v16.8h, v16.8b, v19.8b\n"
- "usubl v1.8h, v1.8b, v19.8b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "usubl v5.8h, v5.8b, v19.8b\n"
- "usubl v26.8h, v26.8b, v19.8b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "usubl v18.8h, v18.8b, v19.8b\n"
- "usubl v31.8h, v31.8b, v19.8b\n"
- "ldr d20, [x14, #0x40]\n"
+ "lsr x11, x8, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v11.16b }, [x20]\n"
+ "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
+ "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v16.16b }, [x21]\n"
+ "ld1r { v12.8h }, [x20]\n"
+ "add x21, x23, %[offsetof_Requantize32_minval]\n"
+ "add x20, x23, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v14.8h }, [x21]\n"
+ "ld1r { v13.8h }, [x20]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x11, 3f\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "subs x11, x11, #0x1\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "usubl v15.8h, v15.8b, v16.8b\n"
+ "usubl v4.8h, v4.8b, v16.8b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v25.8h, v25.8b, v19.8b\n"
- "usubl v20.8h, v20.8b, v19.8b\n"
- "ldr q9, [x20, #0x0]\n"
- "ldr q24, [x20, #0x10]\n"
+ "usubl v5.8h, v5.8b, v16.8b\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
+ "usubl v25.8h, v25.8b, v16.8b\n"
+ "usubl v10.8h, v10.8b, v16.8b\n"
+ "usubl v6.8h, v6.8b, v16.8b\n"
+ "usubl v7.8h, v7.8b, v16.8b\n"
+ "ldr q2, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "ldp x23, x22, [x15, #0x0]\n"
"add x20, x20, #0x20\n"
+ "usubl v9.8h, v9.8b, v16.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x23, x22, [x15, #0x0]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
"ldp x21, x20, [x15, #0x10]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d22, [x23, x17]\n"
- "ldr d4, [x22, x17]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d8, [x21, x17]\n"
- "ldr d27, [x20, x17]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d19, [x23, x17]\n"
+ "ldr d21, [x22, x17]\n"
+ "ldr d29, [x21, x17]\n"
+ "ldr d22, [x20, x17]\n"
"ldr x20, [x15, #0x20]\n"
- "ldr d15, [x20, x17]\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "ldr d20, [x20, x17]\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q3, [x13, #0x0]\n"
- "ldr q17, [x12, #0x0]\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
- "ldr q21, [x13, #0x10]\n"
- "ldr q28, [x12, #0x10]\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "ldr x20, [x15, #0x28]\n"
- "ldr d11, [x20, x17]\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "ldr x20, [x15, #0x38]\n"
- "ldr d4, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "ldr x20, [x15, #0x40]\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "ldr x27, [x15, #0x48]\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "ldr x26, [x15, #0x50]\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "ldr d8, [x20, x17]\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr q17, [x13, #0x0]\n"
+ "ldr q26, [x12, #0x0]\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "ldr q28, [x13, #0x10]\n"
+ "ldr q23, [x12, #0x10]\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "ldr x24, [x15, #0x28]\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "ldr x23, [x15, #0x38]\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "ldr x22, [x15, #0x30]\n"
+ "ldr x21, [x15, #0x40]\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
+ "ldr x26, [x15, #0x48]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "ldr d21, [x24, x17]\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "ldr d29, [x21, x17]\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
"ldr x25, [x15, #0x58]\n"
"ldr x24, [x15, #0x60]\n"
- "smlal v2.4s, v11.4h, v31.4h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"ldr x23, [x15, #0x68]\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
"ldr x22, [x15, #0x70]\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
- "smlal v9.4s, v4.4h, v16.4h\n"
"ldr x21, [x15, #0x78]\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "ldr d22, [x26, x17]\n"
+ "smlal v0.4s, v21.4h, v6.4h\n"
+ "smlal2 v24.4s, v21.8h, v6.8h\n"
+ "ldr d21, [x20, x17]\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "ldr d27, [x27, x17]\n"
- "smlal2 v30.4s, v11.8h, v31.8h\n"
- "ldr d11, [x26, x17]\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
"add x14, x14, #0x48\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal v10.4s, v22.4h, v20.4h\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
- "subs x8, x8, #0x1\n"
- "smlal2 v24.4s, v4.8h, v16.8h\n"
- "smlal v9.4s, v8.4h, v1.4h\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "subs x11, x11, #0x1\n"
+ "smlal v31.4s, v19.4h, v9.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"add x13, x13, #0x20\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "ldr d20, [x25, x17]\n"
"add x12, x12, #0x20\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "ldr d15, [x25, x17]\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v20.8h\n"
- "ldr d22, [x24, x17]\n"
- "smlal v7.4s, v4.4h, v23.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "smlal v2.4s, v27.4h, v18.4h\n"
- "smlal v10.4s, v27.4h, v26.4h\n"
- "smlal2 v24.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v20.4h\n"
- "smlal2 v0.4s, v4.8h, v23.8h\n"
- "ldr d4, [x23, x17]\n"
- "smlal2 v30.4s, v27.8h, v18.8h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v26.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "usubl v26.8h, v26.8b, v14.8b\n"
- "smlal v2.4s, v11.4h, v23.4h\n"
- "smlal v10.4s, v15.4h, v1.4h\n"
- "smlal2 v24.4s, v27.8h, v20.8h\n"
- "smlal v9.4s, v11.4h, v5.4h\n"
- "smlal2 v0.4s, v8.8h, v16.8h\n"
- "ldr d8, [x21, x17]\n"
- "smlal2 v30.4s, v11.8h, v23.8h\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v15.8h, v1.8h\n"
- "smlal v7.4s, v27.4h, v25.4h\n"
+ "smlal v2.4s, v18.4h, v4.4h\n"
+ "smlal2 v1.4s, v18.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v9.8h\n"
+ "ldr d19, [x24, x17]\n"
+ "smlal v8.4s, v18.4h, v15.4h\n"
+ "smlal v31.4s, v22.4h, v25.4h\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
+ "smlal2 v30.4s, v18.8h, v15.8h\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v0.4s, v22.4h, v10.4h\n"
+ "smlal2 v24.4s, v22.8h, v10.8h\n"
+ "smlal v2.4s, v29.4h, v5.4h\n"
+ "smlal2 v1.4s, v29.8h, v5.8h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal2 v27.4s, v22.8h, v25.8h\n"
+ "ldr d25, [x22, x17]\n"
+ "smlal v8.4s, v29.4h, v4.4h\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal v31.4s, v20.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d4, [x21, x17]\n"
"add x17, x17, #0x8\n"
- "smlal v2.4s, v22.4h, v5.4h\n"
- "smlal v10.4s, v4.4h, v18.4h\n"
- "smlal2 v24.4s, v11.8h, v5.8h\n"
- "smlal v9.4s, v22.4h, v31.4h\n"
- "sqrdmulh v9.4s, v9.4s, v3.4s\n"
- "smlal2 v0.4s, v27.8h, v25.8h\n"
- "smlal2 v30.4s, v22.8h, v5.8h\n"
- "and v27.16b, v9.16b, v17.16b\n"
- "smlal2 v6.4s, v4.8h, v18.8h\n"
- "smlal v7.4s, v15.4h, v18.4h\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "smlal v2.4s, v26.4h, v25.4h\n"
- "smlal v10.4s, v26.4h, v31.4h\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
- "smlal2 v24.4s, v22.8h, v31.8h\n"
- "smlal2 v0.4s, v15.8h, v18.8h\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- "smlal2 v30.4s, v26.8h, v25.8h\n"
- "smlal2 v6.4s, v26.8h, v31.8h\n"
- "and v31.16b, v24.16b, v28.16b\n"
- "smlal v7.4s, v4.4h, v20.4h\n"
- "smlal v2.4s, v8.4h, v20.4h\n"
- "sqrdmulh v7.4s, v7.4s, v3.4s\n"
- "smlal v10.4s, v8.4h, v25.4h\n"
- "smlal2 v0.4s, v4.8h, v20.8h\n"
- "sqrdmulh v2.4s, v2.4s, v3.4s\n"
- "smlal2 v30.4s, v8.8h, v20.8h\n"
- "smlal2 v6.4s, v8.8h, v25.8h\n"
- "sqrdmulh v10.4s, v10.4s, v3.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v22.16b, v7.16b, v17.16b\n"
- "sqrdmulh v0.4s, v0.4s, v21.4s\n"
- "and v3.16b, v2.16b, v17.16b\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "and v11.16b, v10.16b, v17.16b\n"
- "sqrdmulh v6.4s, v6.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v31.4s\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
- "and v20.16b, v0.16b, v28.16b\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "and v31.16b, v30.16b, v28.16b\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v18.16b, v6.16b, v28.16b\n"
- "sqadd v7.4s, v7.4s, v22.4s\n"
+ "smlal v0.4s, v21.4h, v15.4h\n"
+ "smlal2 v24.4s, v21.8h, v15.8h\n"
+ "smlal v2.4s, v22.4h, v9.4h\n"
+ "smlal2 v1.4s, v22.8h, v9.8h\n"
+ "usubl v25.8h, v25.8b, v11.8b\n"
+ "smlal2 v27.4s, v20.8h, v5.8h\n"
+ "smlal v8.4s, v22.4h, v7.4h\n"
+ "usubl v4.8h, v4.8b, v11.8b\n"
+ "smlal v31.4s, v18.4h, v10.4h\n"
+ "smlal2 v30.4s, v22.8h, v7.8h\n"
+ "smlal v0.4s, v19.4h, v3.4h\n"
+ "smlal2 v24.4s, v19.8h, v3.8h\n"
+ "smlal v2.4s, v21.4h, v3.4h\n"
+ "smlal2 v1.4s, v21.8h, v3.8h\n"
+ "smlal2 v27.4s, v18.8h, v10.8h\n"
+ "smlal v8.4s, v20.4h, v10.4h\n"
+ "smlal v31.4s, v25.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v10.8h\n"
+ "smlal v0.4s, v25.4h, v7.4h\n"
+ "smlal2 v24.4s, v25.8h, v7.8h\n"
+ "smlal v2.4s, v19.4h, v6.4h\n"
+ "smlal2 v1.4s, v19.8h, v6.8h\n"
+ "smlal2 v27.4s, v25.8h, v6.8h\n"
+ "smlal v8.4s, v18.4h, v9.4h\n"
+ "smlal v31.4s, v4.4h, v7.4h\n"
+ "smlal2 v30.4s, v18.8h, v9.8h\n"
+ "smlal v0.4s, v4.4h, v9.4h\n"
+ "smlal2 v24.4s, v4.8h, v9.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v17.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v28.4s\n"
+ "smlal2 v27.4s, v4.8h, v7.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v17.4s\n"
+ "and v18.16b, v2.16b, v26.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v28.4s\n"
+ "and v4.16b, v1.16b, v23.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v21.16b, v8.16b, v26.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v28.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v28.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v20.16b, v0.16b, v26.16b\n"
+ "sqadd v2.4s, v2.4s, v18.4s\n"
+ "and v19.16b, v31.16b, v26.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v18.16b, v30.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v4.4s\n"
"sshr v20.4s, v20.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v3.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v11.4s\n"
+ "and v17.16b, v24.16b, v23.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v3.16b, v27.16b, v23.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v17.4s\n"
- "srshl v7.4s, v7.4s, v17.4s\n"
"sqadd v0.4s, v0.4s, v20.4s\n"
- "srshl v2.4s, v2.4s, v17.4s\n"
- "sqadd v30.4s, v30.4s, v31.4s\n"
- "srshl v10.4s, v10.4s, v17.4s\n"
- "sqadd v6.4s, v6.4s, v18.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v28.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v28.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v19.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v26.4s\n"
+ "srshl v8.4s, v8.4s, v26.4s\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "srshl v0.4s, v0.4s, v26.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "srshl v31.4s, v31.4s, v26.4s\n"
+ "sqadd v27.4s, v27.4s, v3.4s\n"
+ "srshl v1.4s, v1.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v28.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d9, [x11, x16]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v23.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v23.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v23.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "str d7, [x10, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "str d2, [x9, x16]\n"
- "str d10, [x28, x16]\n"
- "ldr q9, [x20, #0x0]\n"
- "ldr q24, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str d2, [x10, x16]\n"
+ "str d8, [x9, x16]\n"
+ "str d0, [x28, x16]\n"
+ "str d31, [x27, x16]\n"
"add x16, x16, #0x8\n"
+ "ldr q2, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "add x20, x20, #0x20\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
- "ldr d20, [x14, #0x40]\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldp x23, x22, [x15, #0x0]\n"
- "usubl v23.8h, v23.8b, v19.8b\n"
- "usubl v16.8h, v16.8b, v19.8b\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "usubl v15.8h, v15.8b, v16.8b\n"
+ "usubl v4.8h, v4.8b, v16.8b\n"
+ "usubl v5.8h, v5.8b, v16.8b\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
"ldp x21, x20, [x15, #0x10]\n"
- "ldr d22, [x23, x17]\n"
- "usubl v1.8h, v1.8b, v19.8b\n"
- "usubl v5.8h, v5.8b, v19.8b\n"
- "ldr d4, [x22, x17]\n"
- "ldr d8, [x21, x17]\n"
- "usubl v26.8h, v26.8b, v19.8b\n"
- "usubl v18.8h, v18.8b, v19.8b\n"
- "ldr d27, [x20, x17]\n"
+ "usubl v25.8h, v25.8b, v16.8b\n"
+ "usubl v10.8h, v10.8b, v16.8b\n"
+ "usubl v6.8h, v6.8b, v16.8b\n"
+ "usubl v7.8h, v7.8b, v16.8b\n"
+ "ldr d19, [x23, x17]\n"
+ "ldr d21, [x22, x17]\n"
+ "ldr d29, [x21, x17]\n"
+ "ldr d22, [x20, x17]\n"
+ "usubl v9.8h, v9.8b, v16.8b\n"
"ldr x20, [x15, #0x20]\n"
- "usubl v31.8h, v31.8b, v19.8b\n"
- "usubl v25.8h, v25.8b, v19.8b\n"
- "ldr d15, [x20, x17]\n"
- "usubl v20.8h, v20.8b, v19.8b\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "ldr d20, [x20, x17]\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q28, [x13, #0x0]\n"
- "ldr q17, [x12, #0x0]\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
- "ldr q21, [x13, #0x10]\n"
- "ldr q3, [x12, #0x10]\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "ldr x20, [x15, #0x28]\n"
- "ldr d11, [x20, x17]\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "ldr x20, [x15, #0x38]\n"
- "ldr d4, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
+ "ldr q26, [x13, #0x0]\n"
+ "ldr q28, [x12, #0x0]\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "ldr q17, [x13, #0x10]\n"
+ "ldr q23, [x12, #0x10]\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "ldr x23, [x15, #0x28]\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "ldr x22, [x15, #0x38]\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "ldr x21, [x15, #0x30]\n"
"ldr x20, [x15, #0x40]\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
"ldr x26, [x15, #0x48]\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
"ldr x25, [x15, #0x50]\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "ldr d8, [x20, x17]\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr d21, [x23, x17]\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "ldr d18, [x21, x17]\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "ldr d29, [x20, x17]\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
"ldr x24, [x15, #0x58]\n"
"ldr x23, [x15, #0x60]\n"
- "smlal v2.4s, v11.4h, v31.4h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"ldr x22, [x15, #0x68]\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
"ldr x21, [x15, #0x70]\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
- "smlal v9.4s, v4.4h, v16.4h\n"
"ldr x20, [x15, #0x78]\n"
- "tst x7, #0x7\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "ldr d27, [x26, x17]\n"
- "smlal2 v30.4s, v11.8h, v31.8h\n"
- "ldr d11, [x25, x17]\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "ldr d22, [x26, x17]\n"
+ "smlal v0.4s, v21.4h, v6.4h\n"
+ "smlal2 v24.4s, v21.8h, v6.8h\n"
+ "ldr d21, [x25, x17]\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
+ "tst x8, #0x7\n"
"add x13, x13, #0x20\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal v10.4s, v22.4h, v20.4h\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
"add x12, x12, #0x20\n"
- "smlal2 v24.4s, v4.8h, v16.8h\n"
- "smlal v9.4s, v8.4h, v1.4h\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "ldr d15, [x24, x17]\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v20.8h\n"
- "ldr d22, [x23, x17]\n"
- "smlal v7.4s, v4.4h, v23.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "smlal v2.4s, v27.4h, v18.4h\n"
- "smlal v10.4s, v27.4h, v26.4h\n"
- "smlal2 v24.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v20.4h\n"
- "smlal2 v0.4s, v4.8h, v23.8h\n"
- "ldr d4, [x22, x17]\n"
- "smlal2 v30.4s, v27.8h, v18.8h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v26.8h\n"
- "ldr d26, [x21, x17]\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "usubl v26.8h, v26.8b, v14.8b\n"
- "smlal v2.4s, v11.4h, v23.4h\n"
- "smlal v10.4s, v15.4h, v1.4h\n"
- "smlal2 v24.4s, v27.8h, v20.8h\n"
- "smlal v9.4s, v11.4h, v5.4h\n"
- "smlal2 v0.4s, v8.8h, v16.8h\n"
- "ldr d16, [x20, x17]\n"
- "smlal2 v30.4s, v11.8h, v23.8h\n"
- "usubl v16.8h, v16.8b, v14.8b\n"
- "smlal2 v6.4s, v15.8h, v1.8h\n"
- "smlal v7.4s, v27.4h, v25.4h\n"
+ "smlal v31.4s, v18.4h, v9.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "ldr d20, [x24, x17]\n"
+ "smlal v2.4s, v19.4h, v4.4h\n"
+ "smlal2 v1.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v18.8h, v9.8h\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v8.4s, v19.4h, v15.4h\n"
+ "smlal v31.4s, v22.4h, v25.4h\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
+ "smlal2 v30.4s, v19.8h, v15.8h\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v0.4s, v22.4h, v10.4h\n"
+ "smlal2 v24.4s, v22.8h, v10.8h\n"
+ "smlal v2.4s, v29.4h, v5.4h\n"
+ "smlal2 v1.4s, v29.8h, v5.8h\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal2 v27.4s, v22.8h, v25.8h\n"
+ "ldr d25, [x21, x17]\n"
+ "smlal v8.4s, v29.4h, v4.4h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal v31.4s, v20.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x20, x17]\n"
"add x17, x17, #0x8\n"
- "smlal v2.4s, v22.4h, v5.4h\n"
- "smlal v10.4s, v4.4h, v18.4h\n"
- "smlal2 v24.4s, v11.8h, v5.8h\n"
- "smlal v9.4s, v22.4h, v31.4h\n"
- "sqrdmulh v9.4s, v9.4s, v28.4s\n"
- "smlal2 v0.4s, v27.8h, v25.8h\n"
- "smlal2 v30.4s, v22.8h, v5.8h\n"
- "and v1.16b, v9.16b, v17.16b\n"
- "smlal2 v6.4s, v4.8h, v18.8h\n"
- "smlal v7.4s, v15.4h, v18.4h\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "smlal v2.4s, v26.4h, v25.4h\n"
- "smlal v10.4s, v26.4h, v31.4h\n"
- "sqadd v9.4s, v9.4s, v1.4s\n"
- "smlal2 v24.4s, v22.8h, v31.8h\n"
- "smlal2 v0.4s, v15.8h, v18.8h\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- "smlal2 v30.4s, v26.8h, v25.8h\n"
- "smlal2 v6.4s, v26.8h, v31.8h\n"
- "and v31.16b, v24.16b, v3.16b\n"
- "smlal v7.4s, v4.4h, v20.4h\n"
- "smlal v2.4s, v16.4h, v20.4h\n"
- "sqrdmulh v7.4s, v7.4s, v28.4s\n"
- "smlal v10.4s, v16.4h, v25.4h\n"
- "smlal2 v0.4s, v4.8h, v20.8h\n"
- "sqrdmulh v2.4s, v2.4s, v28.4s\n"
- "smlal2 v30.4s, v16.8h, v20.8h\n"
- "smlal2 v6.4s, v16.8h, v25.8h\n"
- "sqrdmulh v10.4s, v10.4s, v28.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v22.16b, v7.16b, v17.16b\n"
- "sqrdmulh v0.4s, v0.4s, v21.4s\n"
- "and v15.16b, v2.16b, v17.16b\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "and v11.16b, v10.16b, v17.16b\n"
- "sqrdmulh v6.4s, v6.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v31.4s\n"
+ "smlal v0.4s, v21.4h, v15.4h\n"
+ "smlal2 v24.4s, v21.8h, v15.8h\n"
+ "smlal v2.4s, v22.4h, v9.4h\n"
+ "smlal2 v1.4s, v22.8h, v9.8h\n"
+ "usubl v25.8h, v25.8b, v11.8b\n"
+ "smlal2 v27.4s, v20.8h, v5.8h\n"
+ "smlal v8.4s, v22.4h, v7.4h\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v31.4s, v19.4h, v10.4h\n"
+ "smlal2 v30.4s, v22.8h, v7.8h\n"
+ "smlal v0.4s, v18.4h, v3.4h\n"
+ "smlal2 v24.4s, v18.8h, v3.8h\n"
+ "smlal v2.4s, v21.4h, v3.4h\n"
+ "smlal2 v1.4s, v21.8h, v3.8h\n"
+ "smlal2 v27.4s, v19.8h, v10.8h\n"
+ "smlal v8.4s, v20.4h, v10.4h\n"
+ "smlal v31.4s, v25.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v10.8h\n"
+ "smlal v0.4s, v25.4h, v7.4h\n"
+ "smlal2 v24.4s, v25.8h, v7.8h\n"
+ "smlal v2.4s, v18.4h, v6.4h\n"
+ "smlal2 v1.4s, v18.8h, v6.8h\n"
+ "smlal2 v27.4s, v25.8h, v6.8h\n"
+ "smlal v8.4s, v19.4h, v9.4h\n"
+ "smlal v31.4s, v29.4h, v7.4h\n"
+ "smlal2 v30.4s, v19.8h, v9.8h\n"
+ "smlal v0.4s, v29.4h, v9.4h\n"
+ "smlal2 v24.4s, v29.8h, v9.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v26.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v17.4s\n"
+ "smlal2 v27.4s, v29.8h, v7.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v26.4s\n"
+ "and v25.16b, v2.16b, v28.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v26.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v17.4s\n"
+ "and v22.16b, v1.16b, v23.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "and v21.16b, v8.16b, v28.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v17.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v17.4s\n"
"sshr v22.4s, v22.4s, #0x1f\n"
- "and v18.16b, v0.16b, v3.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "and v23.16b, v30.16b, v3.16b\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v21.16b, v6.16b, v3.16b\n"
- "sqadd v7.4s, v7.4s, v22.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v15.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v11.4s\n"
+ "and v20.16b, v0.16b, v28.16b\n"
+ "sqadd v2.4s, v2.4s, v25.4s\n"
+ "and v19.16b, v31.16b, v28.16b\n"
"sshr v21.4s, v21.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v17.4s\n"
- "srshl v7.4s, v7.4s, v17.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v2.4s, v2.4s, v17.4s\n"
- "sqadd v30.4s, v30.4s, v23.4s\n"
- "srshl v10.4s, v10.4s, v17.4s\n"
- "sqadd v6.4s, v6.4s, v21.4s\n"
- "srshl v24.4s, v24.4s, v3.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v3.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v3.4s\n"
+ "and v10.16b, v30.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v22.4s\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v17.16b, v24.16b, v23.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v22.16b, v27.16b, v23.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v19.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v28.4s\n"
+ "srshl v8.4s, v8.4s, v28.4s\n"
+ "sqadd v30.4s, v30.4s, v10.4s\n"
+ "srshl v0.4s, v0.4s, v28.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "srshl v31.4s, v31.4s, v28.4s\n"
+ "sqadd v27.4s, v27.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v3.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d9, [x11, x16]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v23.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v23.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v23.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "str d7, [x10, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "str d2, [x9, x16]\n"
- "str d10, [x28, x16]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str d2, [x10, x16]\n"
+ "str d8, [x9, x16]\n"
+ "str d0, [x28, x16]\n"
+ "str d31, [x27, x16]\n"
"add x16, x16, #0x8\n"
"beq 64f\n"
"add x14, x14, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v9.4s }, [x20], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v24.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v24.s }[2], [x20]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v2.4s }, [x20], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v1.d }[0], [x20], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v1.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v24.s }[0], [x20]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v1.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v2.d }[0], [x20], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v2.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v9.s }[0], [x20]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v2.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "usubl v23.8h, v23.8b, v19.8b\n"
- "usubl v16.8h, v16.8b, v19.8b\n"
- "ldr d20, [x14, #0x40]\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "usubl v15.8h, v15.8b, v16.8b\n"
+ "usubl v4.8h, v4.8b, v16.8b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldp x24, x23, [x15, #0x0]\n"
- "usubl v1.8h, v1.8b, v19.8b\n"
- "usubl v5.8h, v5.8b, v19.8b\n"
+ "usubl v5.8h, v5.8b, v16.8b\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
+ "usubl v25.8h, v25.8b, v16.8b\n"
+ "usubl v10.8h, v10.8b, v16.8b\n"
+ "usubl v6.8h, v6.8b, v16.8b\n"
+ "usubl v7.8h, v7.8b, v16.8b\n"
"ldp x22, x21, [x15, #0x10]\n"
- "ldr x20, [x15, #0x20]\n"
- "usubl v26.8h, v26.8b, v19.8b\n"
- "usubl v18.8h, v18.8b, v19.8b\n"
- "usubl v31.8h, v31.8b, v19.8b\n"
- "usubl v25.8h, v25.8b, v19.8b\n"
- "usubl v20.8h, v20.8b, v19.8b\n"
+ "usubl v9.8h, v9.8b, v16.8b\n"
"add x24, x24, x17\n"
"add x23, x23, x17\n"
+ "ldr x20, [x15, #0x20]\n"
"add x22, x22, x17\n"
"add x21, x21, x17\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v22.s }[0], [x24], #0x4\n"
- "ld1 { v4.s }[0], [x23], #0x4\n"
- "ld1 { v8.s }[0], [x22], #0x4\n"
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v8.h }[2], [x22], #0x2\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v8.b }[6], [x22]\n"
- "ld1 { v27.b }[6], [x21]\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v19.s }[0], [x24], #0x4\n"
+ "ld1 { v21.s }[0], [x23], #0x4\n"
+ "ld1 { v29.s }[0], [x22], #0x4\n"
+ "ld1 { v22.s }[0], [x21], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v19.h }[2], [x24], #0x2\n"
+ "ld1 { v21.h }[2], [x23], #0x2\n"
+ "ld1 { v29.h }[2], [x22], #0x2\n"
+ "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[6], [x24]\n"
+ "ld1 { v21.b }[6], [x23]\n"
+ "ld1 { v29.b }[6], [x22]\n"
+ "ld1 { v22.b }[6], [x21]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v8.b }[4], [x22]\n"
- "ld1 { v27.b }[4], [x21]\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[4], [x24]\n"
+ "ld1 { v21.b }[4], [x23]\n"
+ "ld1 { v29.b }[4], [x22]\n"
+ "ld1 { v22.b }[4], [x21]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v22.h }[0], [x24], #0x2\n"
- "ld1 { v4.h }[0], [x23], #0x2\n"
- "ld1 { v8.h }[0], [x22], #0x2\n"
- "ld1 { v27.h }[0], [x21], #0x2\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v8.b }[2], [x22]\n"
- "ld1 { v27.b }[2], [x21]\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v19.h }[0], [x24], #0x2\n"
+ "ld1 { v21.h }[0], [x23], #0x2\n"
+ "ld1 { v29.h }[0], [x22], #0x2\n"
+ "ld1 { v22.h }[0], [x21], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[2], [x24]\n"
+ "ld1 { v21.b }[2], [x23]\n"
+ "ld1 { v29.b }[2], [x22]\n"
+ "ld1 { v22.b }[2], [x21]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[0], [x24]\n"
- "ld1 { v4.b }[0], [x23]\n"
- "ld1 { v8.b }[0], [x22]\n"
- "ld1 { v27.b }[0], [x21]\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[0], [x24]\n"
+ "ld1 { v21.b }[0], [x23]\n"
+ "ld1 { v29.b }[0], [x22]\n"
+ "ld1 { v22.b }[0], [x21]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v22.8h, v22.8b, v14.8b\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"ldr x20, [x15, #0x28]\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
"add x20, x20, x17\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "tbz x8, #2, 13f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 12f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x8, #1, 14f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v21.8h, v21.8b, v14.8b\n"
- "smlal v2.4s, v21.4h, v31.4h\n"
- "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x30]\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v0.4s, v17.4h, v6.4h\n"
+ "smlal2 v24.4s, v17.8h, v6.8h\n"
"add x20, x20, x17\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "tbz x8, #2, 17f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 16f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 18f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
- "usubl v28.8h, v28.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x38]\n"
- "smlal v10.4s, v28.4h, v20.4h\n"
- "smlal2 v6.4s, v28.8h, v20.8h\n"
+ "smlal v31.4s, v16.4h, v9.4h\n"
+ "smlal2 v27.4s, v16.8h, v9.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 21f\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 20f\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "tbz x8, #2, 21f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 20f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x7, #1, 22f\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x8, #1, 22f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
- "usubl v22.8h, v22.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x40]\n"
- "smlal v9.4s, v22.4h, v16.4h\n"
- "smlal2 v24.4s, v22.8h, v16.8h\n"
- "smlal v7.4s, v22.4h, v23.4h\n"
- "smlal2 v0.4s, v22.8h, v23.8h\n"
+ "smlal v2.4s, v17.4h, v4.4h\n"
+ "smlal2 v1.4s, v17.8h, v4.8h\n"
+ "smlal v8.4s, v17.4h, v15.4h\n"
+ "smlal2 v30.4s, v17.8h, v15.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
- "usubl v21.8h, v21.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x48]\n"
- "smlal v9.4s, v21.4h, v1.4h\n"
- "smlal2 v24.4s, v21.8h, v1.8h\n"
- "smlal v7.4s, v21.4h, v16.4h\n"
- "smlal2 v0.4s, v21.8h, v16.8h\n"
+ "smlal v2.4s, v16.4h, v5.4h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal v8.4s, v16.4h, v4.4h\n"
+ "smlal2 v30.4s, v16.8h, v4.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "tbz x8, #2, 29f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 28f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 30f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "usubl v28.8h, v28.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x50]\n"
- "smlal v9.4s, v28.4h, v20.4h\n"
- "smlal2 v24.4s, v28.8h, v20.8h\n"
- "smlal v7.4s, v28.4h, v25.4h\n"
- "smlal2 v0.4s, v28.8h, v25.8h\n"
+ "smlal v2.4s, v17.4h, v9.4h\n"
+ "smlal2 v1.4s, v17.8h, v9.8h\n"
+ "smlal v8.4s, v17.4h, v7.4h\n"
+ "smlal2 v30.4s, v17.8h, v7.8h\n"
+ "smlal v0.4s, v17.4h, v10.4h\n"
+ "smlal2 v24.4s, v17.8h, v10.8h\n"
+ "smlal v31.4s, v17.4h, v25.4h\n"
"add x20, x20, x17\n"
- "smlal v2.4s, v28.4h, v18.4h\n"
- "smlal2 v30.4s, v28.8h, v18.8h\n"
- "smlal v10.4s, v28.4h, v26.4h\n"
- "smlal2 v6.4s, v28.8h, v26.8h\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "smlal2 v27.4s, v17.8h, v25.8h\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
- "usubl v8.8h, v8.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x58]\n"
- "smlal v9.4s, v8.4h, v5.4h\n"
- "smlal2 v24.4s, v8.8h, v5.8h\n"
- "smlal v2.4s, v8.4h, v23.4h\n"
- "smlal2 v30.4s, v8.8h, v23.8h\n"
+ "smlal v2.4s, v16.4h, v3.4h\n"
+ "smlal2 v1.4s, v16.8h, v3.8h\n"
+ "smlal v0.4s, v16.4h, v15.4h\n"
+ "smlal2 v24.4s, v16.8h, v15.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v8.8h, v8.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x60]\n"
- "smlal v7.4s, v8.4h, v18.4h\n"
- "smlal2 v0.4s, v8.8h, v18.8h\n"
- "smlal v10.4s, v8.4h, v1.4h\n"
- "smlal2 v6.4s, v8.8h, v1.8h\n"
+ "smlal v8.4s, v17.4h, v10.4h\n"
+ "smlal2 v30.4s, v17.8h, v10.8h\n"
+ "smlal v31.4s, v17.4h, v5.4h\n"
+ "smlal2 v27.4s, v17.8h, v5.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v17.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v17.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[6], [x20]\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[4], [x20]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v17.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[2], [x20]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[0], [x20]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
- "usubl v17.8h, v17.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x68]\n"
- "smlal v9.4s, v17.4h, v31.4h\n"
- "smlal2 v24.4s, v17.8h, v31.8h\n"
- "smlal v2.4s, v17.4h, v5.4h\n"
- "smlal2 v30.4s, v17.8h, v5.8h\n"
+ "smlal v2.4s, v16.4h, v6.4h\n"
+ "smlal2 v1.4s, v16.8h, v6.8h\n"
+ "smlal v0.4s, v16.4h, v3.4h\n"
+ "smlal2 v24.4s, v16.8h, v3.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "tbz x8, #2, 45f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 44f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x8, #1, 46f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "usubl v23.8h, v23.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x70]\n"
- "smlal v7.4s, v23.4h, v20.4h\n"
- "smlal2 v0.4s, v23.8h, v20.8h\n"
- "smlal v10.4s, v23.4h, v18.4h\n"
- "smlal2 v6.4s, v23.8h, v18.8h\n"
+ "smlal v8.4s, v17.4h, v9.4h\n"
+ "smlal2 v30.4s, v17.8h, v9.8h\n"
+ "smlal v31.4s, v17.4h, v10.4h\n"
+ "smlal2 v27.4s, v17.8h, v10.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[6], [x20]\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[4], [x20]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v5.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[2], [x20]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[0], [x20]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "usubl v5.8h, v5.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x78]\n"
- "smlal v2.4s, v5.4h, v25.4h\n"
- "smlal2 v30.4s, v5.8h, v25.8h\n"
- "smlal v10.4s, v5.4h, v31.4h\n"
- "smlal2 v6.4s, v5.8h, v31.8h\n"
+ "smlal v0.4s, v16.4h, v7.4h\n"
+ "smlal2 v24.4s, v16.8h, v7.8h\n"
+ "smlal v31.4s, v16.4h, v6.4h\n"
+ "smlal2 v27.4s, v16.8h, v6.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "usubl v23.8h, v23.8b, v14.8b\n"
- "smlal v2.4s, v23.4h, v20.4h\n"
- "smlal2 v30.4s, v23.8h, v20.8h\n"
- "smlal v10.4s, v23.4h, v25.4h\n"
- "smlal2 v6.4s, v23.8h, v25.8h\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v15.4s }, [x13], #0x10\n"
- "ld1 { v19.4s }, [x12], #0x10\n"
- "tbz x7, #1, 56f\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v0.4s, v17.4h, v9.4h\n"
+ "smlal2 v24.4s, v17.8h, v9.8h\n"
+ "smlal v31.4s, v17.4h, v7.4h\n"
+ "smlal2 v27.4s, v17.8h, v7.8h\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v16.4s }, [x13], #0x10\n"
+ "ld1 { v23.4s }, [x12], #0x10\n"
+ "tbz x8, #1, 56f\n"
"ld1 { v18.d }[0], [x13], #0x8\n"
"ld1 { v22.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v18.s }[2], [x13]\n"
"ld1 { v22.s }[2], [x12]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v18.s }[0], [x13]\n"
"ld1 { v22.s }[0], [x12]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v15.d }[0], [x13], #0x8\n"
- "ld1 { v19.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v15.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x12]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v16.d }[0], [x13], #0x8\n"
+ "ld1 { v23.d }[0], [x12], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v16.s }[2], [x13]\n"
+ "ld1 { v23.s }[2], [x12]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v15.s }[0], [x13]\n"
- "ld1 { v19.s }[0], [x12]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v16.s }[0], [x13]\n"
+ "ld1 { v23.s }[0], [x12]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v9.4s, v9.4s, v15.4s\n"
- "and v17.16b, v9.16b, v19.16b\n"
- "add x11, x11, x16\n"
+ "sqrdmulh v2.4s, v2.4s, v16.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v18.4s\n"
"add x10, x10, x16\n"
- "sqrdmulh v24.4s, v24.4s, v18.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
"add x9, x9, x16\n"
+ "sqrdmulh v8.4s, v8.4s, v16.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v16.4s\n"
"add x28, x28, x16\n"
- "and v20.16b, v24.16b, v22.16b\n"
- "sqrdmulh v7.4s, v7.4s, v15.4s\n"
- "sqrdmulh v2.4s, v2.4s, v15.4s\n"
- "sqrdmulh v10.4s, v10.4s, v15.4s\n"
- "sqadd v9.4s, v9.4s, v17.4s\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "and v21.16b, v7.16b, v19.16b\n"
- "sqrdmulh v0.4s, v0.4s, v18.4s\n"
- "and v15.16b, v2.16b, v19.16b\n"
+ "add x27, x27, x16\n"
+ "sqrdmulh v31.4s, v31.4s, v16.4s\n"
"sqrdmulh v30.4s, v30.4s, v18.4s\n"
- "and v23.16b, v10.16b, v19.16b\n"
- "sqrdmulh v6.4s, v6.4s, v18.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
+ "and v17.16b, v2.16b, v23.16b\n"
+ "and v16.16b, v1.16b, v22.16b\n"
+ "and v21.16b, v8.16b, v23.16b\n"
+ "and v20.16b, v0.16b, v23.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v18.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v18.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v21.4s, v21.4s, #0x1f\n"
- "and v18.16b, v0.16b, v22.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "and v17.16b, v30.16b, v22.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v28.16b, v6.16b, v22.16b\n"
- "sqadd v7.4s, v7.4s, v21.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v15.4s\n"
+ "and v19.16b, v30.16b, v22.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v18.16b, v24.16b, v22.16b\n"
+ "sqadd v2.4s, v2.4s, v17.4s\n"
+ "and v17.16b, v31.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v22.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v23.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v19.4s\n"
- "srshl v7.4s, v7.4s, v19.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v2.4s, v2.4s, v19.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqadd v6.4s, v6.4s, v28.4s\n"
- "srshl v24.4s, v24.4s, v22.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v22.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v23.4s\n"
+ "srshl v8.4s, v8.4s, v23.4s\n"
+ "sqadd v31.4s, v31.4s, v17.4s\n"
+ "sqadd v30.4s, v30.4s, v19.4s\n"
+ "srshl v0.4s, v0.4s, v23.4s\n"
+ "sqadd v24.4s, v24.4s, v18.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v22.4s\n"
+ "srshl v31.4s, v31.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v22.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v22.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v22.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "tbz x7, #2, 61f\n"
- "st1 { v9.s }[0], [x11], #0x4\n"
- "st1 { v7.s }[0], [x10], #0x4\n"
- "st1 { v2.s }[0], [x9], #0x4\n"
- "st1 { v10.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 60f\n"
- "st1 { v9.h }[2], [x11], #0x2\n"
- "st1 { v7.h }[2], [x10], #0x2\n"
- "st1 { v2.h }[2], [x9], #0x2\n"
- "st1 { v10.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[6], [x11], #0x1\n"
- "st1 { v7.b }[6], [x10], #0x1\n"
- "st1 { v2.b }[6], [x9], #0x1\n"
- "st1 { v10.b }[6], [x28], #0x1\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "tbz x8, #2, 61f\n"
+ "st1 { v2.s }[0], [x10], #0x4\n"
+ "st1 { v8.s }[0], [x9], #0x4\n"
+ "st1 { v0.s }[0], [x28], #0x4\n"
+ "st1 { v31.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "st1 { v2.h }[2], [x10], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v0.h }[2], [x28], #0x2\n"
+ "st1 { v31.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[6], [x10], #0x1\n"
+ "st1 { v8.b }[6], [x9], #0x1\n"
+ "st1 { v0.b }[6], [x28], #0x1\n"
+ "st1 { v31.b }[6], [x27], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[4], [x11], #0x1\n"
- "st1 { v7.b }[4], [x10], #0x1\n"
- "st1 { v2.b }[4], [x9], #0x1\n"
- "st1 { v10.b }[4], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[4], [x10], #0x1\n"
+ "st1 { v8.b }[4], [x9], #0x1\n"
+ "st1 { v0.b }[4], [x28], #0x1\n"
+ "st1 { v31.b }[4], [x27], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "st1 { v9.h }[0], [x11], #0x2\n"
- "st1 { v7.h }[0], [x10], #0x2\n"
- "st1 { v2.h }[0], [x9], #0x2\n"
- "st1 { v10.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[2], [x11], #0x1\n"
- "st1 { v7.b }[2], [x10], #0x1\n"
- "st1 { v2.b }[2], [x9], #0x1\n"
- "st1 { v10.b }[2], [x28], #0x1\n"
+ "tbz x8, #1, 62f\n"
+ "st1 { v2.h }[0], [x10], #0x2\n"
+ "st1 { v8.h }[0], [x9], #0x2\n"
+ "st1 { v0.h }[0], [x28], #0x2\n"
+ "st1 { v31.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[2], [x10], #0x1\n"
+ "st1 { v8.b }[2], [x9], #0x1\n"
+ "st1 { v0.b }[2], [x28], #0x1\n"
+ "st1 { v31.b }[2], [x27], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[0], [x11], #0x1\n"
- "st1 { v7.b }[0], [x10], #0x1\n"
- "st1 { v2.b }[0], [x9], #0x1\n"
- "st1 { v10.b }[0], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[0], [x10], #0x1\n"
+ "st1 { v8.b }[0], [x9], #0x1\n"
+ "st1 { v0.b }[0], [x28], #0x1\n"
+ "st1 { v31.b }[0], [x27], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index c4184622b0..4d56009adc 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[25];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -100,1294 +100,1294 @@ void a64_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x2, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v6.16b }, [x20]\n"
+ "mov x3, #0x0\n"
+ "mov x4, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x17, x2, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v13.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_b_offset]\n"
"add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x21]\n"
- "ld1r { v13.8h }, [x20]\n"
+ "ld1r { v14.16b }, [x21]\n"
+ "ld1r { v25.8h }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_minval]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v17.8h }, [x21]\n"
- "ld1r { v24.8h }, [x20]\n"
- "mov x17, #0x0\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "usubl v11.8h, v11.8b, v15.8b\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "usubl v22.8h, v22.8b, v15.8b\n"
- "usubl v14.8h, v14.8b, v15.8b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "usubl v28.8h, v28.8b, v15.8b\n"
- "usubl v18.8h, v18.8b, v15.8b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "usubl v9.8h, v9.8b, v15.8b\n"
- "usubl v26.8h, v26.8b, v15.8b\n"
- "ldr d4, [x14, #0x40]\n"
+ "ld1r { v23.8h }, [x21]\n"
+ "ld1r { v12.8h }, [x20]\n"
+ "ldp x16, x15, [x22, #0x0]\n"
+ "ldp x14, x13, [x22, #0x10]\n"
+ "cbz x17, 3f\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "subs x17, x17, #0x1\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "ldr d7, [x6, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v7.8h, v7.8b, v15.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "ldr q5, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
+ "usubl v29.8h, v29.8b, v14.8b\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "usubl v5.8h, v5.8b, v14.8b\n"
+ "ldr q19, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
"add x20, x20, #0x20\n"
+ "usubl v7.8h, v7.8b, v14.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d25, [x27, x17]\n"
- "ldr d27, [x26, x17]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d1, [x25, x17]\n"
- "ldr d2, [x24, x17]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "ldr d12, [x23, x17]\n"
- "ldr d16, [x22, x17]\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "ldr d23, [x21, x17]\n"
- "ldr d10, [x20, x17]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "ldr d26, [x27, x3]\n"
+ "ldr d31, [x26, x3]\n"
+ "ldr d20, [x25, x3]\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr d6, [x23, x3]\n"
+ "ldr d9, [x22, x3]\n"
+ "ldr d0, [x21, x3]\n"
+ "ldr d18, [x20, x3]\n"
+ "usubl v26.8h, v26.8b, v13.8b\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q30, [x13, #0x0]\n"
- "ldr q29, [x12, #0x0]\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x21, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "ldr x25, [x15, #0x60]\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "ldr d27, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "ldr d25, [x20, x17]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal v20.4s, v27.4h, v28.4h\n"
- "smlal v19.4s, v25.4h, v18.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "ldr d1, [x25, x17]\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "ldr d2, [x24, x17]\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v28.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v18.8h\n"
- "ldr d25, [x22, x17]\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v20.4s, v1.4h, v11.4h\n"
- "smlal v19.4s, v2.4h, v22.4h\n"
- "ldr x24, [x15, #0x50]\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "ldr d16, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "ldr d12, [x20, x17]\n"
- "ldr x23, [x15, #0x48]\n"
- "smlal2 v0.4s, v1.8h, v11.8h\n"
- "smlal2 v31.4s, v2.8h, v22.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal v20.4s, v27.4h, v18.4h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v19.4s, v25.4h, v9.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "ldr d23, [x25, x17]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "ldr d11, [x24, x17]\n"
- "usubl v11.8h, v11.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v18.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v9.8h\n"
- "ldr d25, [x21, x17]\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v21.4s, v16.4h, v18.4h\n"
- "smlal v20.4s, v12.4h, v22.4h\n"
- "smlal v19.4s, v23.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "ldr d10, [x20, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
- "smlal v5.4s, v11.4h, v9.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal2 v8.4s, v16.8h, v18.8h\n"
- "ldr d18, [x22, x17]\n"
- "ldr d16, [x21, x17]\n"
- "smlal2 v0.4s, v12.8h, v22.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal2 v31.4s, v23.8h, v14.8h\n"
- "ldr q14, [x13, #0x10]\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v25.4h, v26.4h\n"
- "smlal v19.4s, v10.4h, v28.4h\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "ldr x21, [x15, #0xc0]\n"
- "smlal2 v3.4s, v11.8h, v9.8h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v26.4h\n"
+ "ldr q17, [x7, #0x0]\n"
+ "ldr q30, [x8, #0x0]\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "ldr x24, [x5, #0x58]\n"
+ "ldr x23, [x5, #0x78]\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "ldr x22, [x5, #0x60]\n"
+ "ldr x21, [x5, #0x80]\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "ldr q26, [x7, #0x10]\n"
+ "ldr x20, [x5, #0x68]\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "ldr d31, [x24, x3]\n"
+ "ldr x12, [x5, #0x88]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "ldr x11, [x5, #0x40]\n"
+ "ldr x10, [x5, #0x70]\n"
+ "add x6, x6, #0x48\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x23, x3]\n"
+ "ldr x9, [x5, #0x98]\n"
+ "subs x17, x17, #0x1\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x28, [x5, #0x50]\n"
+ "ldr x27, [x5, #0x48]\n"
+ "add x7, x7, #0x20\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "ldr d20, [x22, x3]\n"
+ "ldr x26, [x5, #0x90]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "ldr x25, [x5, #0xa8]\n"
+ "ldr x24, [x5, #0xa0]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "ldr d31, [x21, x3]\n"
+ "ldr x23, [x5, #0xb0]\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "ldr d6, [x20, x3]\n"
+ "ldr x22, [x5, #0xb8]\n"
+ "smlal v3.4s, v28.4h, v27.4h\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "ldr x21, [x5, #0xc0]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v8.4s, v27.8h, v9.8h\n"
- "ldr d27, [x21, x17]\n"
- "smlal2 v0.4s, v25.8h, v26.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal2 v31.4s, v10.8h, v28.8h\n"
- "smlal v21.4s, v11.4h, v28.4h\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
- "add x14, x14, #0x48\n"
- "smlal v20.4s, v18.4h, v7.4h\n"
- "smlal v19.4s, v16.4h, v7.4h\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "add x17, x17, #0x8\n"
- "smlal2 v3.4s, v1.8h, v26.8h\n"
- "smlal v5.4s, v12.4h, v7.4h\n"
- "sqrdmulh v5.4s, v5.4s, v30.4s\n"
- "subs x8, x8, #0x1\n"
- "smlal2 v8.4s, v11.8h, v28.8h\n"
- "smlal2 v0.4s, v18.8h, v7.8h\n"
- "and v28.16b, v5.16b, v29.16b\n"
- "add x13, x13, #0x20\n"
- "smlal2 v31.4s, v16.8h, v7.8h\n"
- "smlal v21.4s, v2.4h, v7.4h\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v27.8h\n"
+ "ldr d28, [x12, x3]\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "ldr d9, [x11, x3]\n"
+ "smlal v10.4s, v20.4h, v16.4h\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal2 v21.4s, v20.8h, v16.8h\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "ldr d16, [x10, x3]\n"
+ "smlal v3.4s, v31.4h, v11.4h\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "smlal2 v24.4s, v31.8h, v11.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "ldr d0, [x9, x3]\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "smlal v10.4s, v6.4h, v27.4h\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "smlal2 v21.4s, v6.8h, v27.8h\n"
+ "ldr d6, [x28, x3]\n"
+ "smlal v3.4s, v28.4h, v2.4h\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal v8.4s, v9.4h, v27.4h\n"
+ "smlal2 v4.4s, v9.8h, v27.8h\n"
+ "ldr d9, [x27, x3]\n"
+ "ldr d27, [x26, x3]\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr d28, [x25, x3]\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "ldr d18, [x24, x3]\n"
+ "smlal v10.4s, v16.4h, v11.4h\n"
+ "smlal2 v21.4s, v16.8h, v11.8h\n"
+ "ldr d11, [x23, x3]\n"
+ "smlal v3.4s, v0.4h, v29.4h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v27.8h, v27.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v6.4h, v2.4h\n"
+ "smlal2 v24.4s, v0.8h, v29.8h\n"
+ "ldr d29, [x22, x3]\n"
+ "smlal2 v1.4s, v6.8h, v2.8h\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "usubl v11.8h, v11.8b, v13.8b\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "ldr d9, [x21, x3]\n"
+ "smlal v10.4s, v27.4h, v22.4h\n"
+ "smlal v3.4s, v28.4h, v15.4h\n"
+ "add x3, x3, #0x8\n"
+ "smlal v19.4s, v20.4h, v22.4h\n"
+ "smlal2 v21.4s, v27.8h, v22.8h\n"
+ "ldr q27, [x8, #0x10]\n"
+ "usubl v29.8h, v29.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v15.8h\n"
+ "smlal2 v1.4s, v20.8h, v22.8h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "add x8, x8, #0x20\n"
+ "smlal v8.4s, v6.4h, v15.4h\n"
+ "smlal2 v4.4s, v6.8h, v15.8h\n"
+ "smlal v10.4s, v18.4h, v5.4h\n"
+ "smlal v3.4s, v11.4h, v5.4h\n"
+ "smlal v19.4s, v16.4h, v5.4h\n"
+ "smlal2 v21.4s, v18.8h, v5.8h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal v8.4s, v31.4h, v5.4h\n"
+ "smlal2 v4.4s, v31.8h, v5.8h\n"
+ "smlal v10.4s, v28.4h, v2.4h\n"
+ "smlal v3.4s, v29.4h, v22.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v17.4s\n"
+ "smlal2 v21.4s, v28.8h, v2.8h\n"
+ "smlal2 v24.4s, v29.8h, v22.8h\n"
+ "sqrdmulh v1.4s, v1.4s, v26.4s\n"
+ "smlal v8.4s, v0.4h, v7.4h\n"
+ "and v2.16b, v19.16b, v30.16b\n"
+ "smlal2 v4.4s, v0.8h, v7.8h\n"
+ "smlal v10.4s, v29.4h, v7.4h\n"
+ "smlal v3.4s, v9.4h, v7.4h\n"
+ "and v11.16b, v1.16b, v27.16b\n"
+ "smlal2 v21.4s, v29.8h, v7.8h\n"
+ "smlal2 v24.4s, v9.8h, v7.8h\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqrdmulh v8.4s, v8.4s, v17.4s\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sqrdmulh v4.4s, v4.4s, v26.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v17.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "sqadd v19.4s, v19.4s, v2.4s\n"
+ "and v29.16b, v8.16b, v30.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "and v20.16b, v10.16b, v30.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "and v28.16b, v3.16b, v30.16b\n"
+ "sqadd v1.4s, v1.4s, v11.4s\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v7.16b, v4.16b, v27.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v2.16b, v21.16b, v27.16b\n"
"sshr v28.4s, v28.4s, #0x1f\n"
- "add x12, x12, #0x20\n"
- "smlal v20.4s, v10.4h, v9.4h\n"
- "smlal v19.4s, v22.4h, v26.4h\n"
- "sqadd v5.4s, v5.4s, v28.4s\n"
- "smlal2 v3.4s, v12.8h, v7.8h\n"
- "smlal2 v8.4s, v2.8h, v7.8h\n"
- "sqrdmulh v3.4s, v3.4s, v14.4s\n"
- "smlal2 v0.4s, v10.8h, v9.8h\n"
- "smlal2 v31.4s, v22.8h, v26.8h\n"
- "and v16.16b, v3.16b, v25.16b\n"
- "smlal v21.4s, v23.4h, v4.4h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "sqrdmulh v21.4s, v21.4s, v30.4s\n"
- "smlal v19.4s, v27.4h, v4.4h\n"
- "smlal2 v8.4s, v23.8h, v4.8h\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "smlal2 v0.4s, v22.8h, v4.8h\n"
- "smlal2 v31.4s, v27.8h, v4.8h\n"
- "sqrdmulh v19.4s, v19.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v12.16b, v21.16b, v29.16b\n"
- "sqrdmulh v8.4s, v8.4s, v14.4s\n"
- "and v23.16b, v20.16b, v29.16b\n"
- "sqrdmulh v0.4s, v0.4s, v14.4s\n"
- "and v9.16b, v19.16b, v29.16b\n"
- "sqrdmulh v31.4s, v31.4s, v14.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v12.4s, v12.4s, #0x1f\n"
- "and v18.16b, v8.16b, v25.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v22.16b, v0.16b, v25.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "and v16.16b, v31.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v12.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v23.4s\n"
+ "and v22.16b, v24.16b, v27.16b\n"
+ "sqadd v8.4s, v8.4s, v29.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v3.4s, v3.4s, v28.4s\n"
"sshr v22.4s, v22.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v9.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v29.4s\n"
- "srshl v21.4s, v21.4s, v29.4s\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v29.4s\n"
- "sqadd v0.4s, v0.4s, v22.4s\n"
- "srshl v19.4s, v19.4s, v29.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v25.4s\n"
+ "srshl v19.4s, v19.4s, v30.4s\n"
+ "srshl v8.4s, v8.4s, v30.4s\n"
+ "sqadd v4.4s, v4.4s, v7.4s\n"
+ "srshl v10.4s, v10.4s, v30.4s\n"
+ "sqadd v21.4s, v21.4s, v2.4s\n"
+ "srshl v3.4s, v3.4s, v30.4s\n"
+ "sqadd v24.4s, v24.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v27.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str d5, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
+ "srshl v4.4s, v4.4s, v27.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v27.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v27.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str d20, [x9, x16]\n"
- "str d19, [x28, x16]\n"
- "ldr q5, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str d19, [x16, x4]\n"
+ "str d8, [x15, x4]\n"
+ "str d10, [x14, x4]\n"
+ "str d3, [x13, x4]\n"
+ "add x4, x4, #0x8\n"
+ "ldr q19, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d4, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "usubl v11.8h, v11.8b, v15.8b\n"
- "usubl v22.8h, v22.8b, v15.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "usubl v14.8h, v14.8b, v15.8b\n"
- "usubl v28.8h, v28.8b, v15.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d25, [x27, x17]\n"
- "usubl v18.8h, v18.8b, v15.8b\n"
- "usubl v9.8h, v9.8b, v15.8b\n"
- "ldr d27, [x26, x17]\n"
- "ldr d1, [x25, x17]\n"
- "usubl v26.8h, v26.8b, v15.8b\n"
- "usubl v7.8h, v7.8b, v15.8b\n"
- "ldr d2, [x24, x17]\n"
- "ldr d12, [x23, x17]\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "ldr d16, [x22, x17]\n"
- "ldr d23, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "ldr d10, [x20, x17]\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d7, [x6, #0x40]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "usubl v29.8h, v29.8b, v14.8b\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "usubl v5.8h, v5.8b, v14.8b\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "usubl v7.8h, v7.8b, v14.8b\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "ldr d26, [x27, x3]\n"
+ "ldr d31, [x26, x3]\n"
+ "ldr d20, [x25, x3]\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr d6, [x23, x3]\n"
+ "ldr d9, [x22, x3]\n"
+ "ldr d0, [x21, x3]\n"
+ "usubl v26.8h, v26.8b, v13.8b\n"
+ "ldr d18, [x20, x3]\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q29, [x13, #0x0]\n"
- "ldr q30, [x12, #0x0]\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x21, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "ldr x25, [x15, #0x60]\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "ldr d27, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "ldr d25, [x20, x17]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal v20.4s, v27.4h, v28.4h\n"
- "smlal v19.4s, v25.4h, v18.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "ldr d1, [x25, x17]\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "ldr d2, [x24, x17]\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v28.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v18.8h\n"
- "ldr d25, [x22, x17]\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v20.4s, v1.4h, v11.4h\n"
- "smlal v19.4s, v2.4h, v22.4h\n"
- "ldr x24, [x15, #0x50]\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "ldr d16, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "ldr d12, [x20, x17]\n"
- "ldr x23, [x15, #0x48]\n"
- "smlal2 v0.4s, v1.8h, v11.8h\n"
- "smlal2 v31.4s, v2.8h, v22.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal v20.4s, v27.4h, v18.4h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v19.4s, v25.4h, v9.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "ldr d23, [x25, x17]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "ldr d11, [x24, x17]\n"
- "usubl v11.8h, v11.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v18.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v9.8h\n"
- "ldr d25, [x21, x17]\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v21.4s, v16.4h, v18.4h\n"
- "smlal v20.4s, v12.4h, v22.4h\n"
- "smlal v19.4s, v23.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "ldr d10, [x20, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
- "smlal v5.4s, v11.4h, v9.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal2 v8.4s, v16.8h, v18.8h\n"
- "ldr d16, [x22, x17]\n"
- "ldr d18, [x21, x17]\n"
- "smlal2 v0.4s, v12.8h, v22.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal2 v31.4s, v23.8h, v14.8h\n"
- "ldr q14, [x13, #0x10]\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v25.4h, v26.4h\n"
- "smlal v19.4s, v10.4h, v28.4h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v3.4s, v11.8h, v9.8h\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v26.4h\n"
- "tst x7, #0x7\n"
- "smlal2 v8.4s, v27.8h, v9.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal2 v0.4s, v25.8h, v26.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal2 v31.4s, v10.8h, v28.8h\n"
- "smlal v21.4s, v11.4h, v28.4h\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
- "add x17, x17, #0x8\n"
- "smlal v20.4s, v16.4h, v7.4h\n"
- "smlal v19.4s, v18.4h, v7.4h\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "add x13, x13, #0x20\n"
- "smlal2 v3.4s, v1.8h, v26.8h\n"
- "smlal v5.4s, v12.4h, v7.4h\n"
- "sqrdmulh v5.4s, v5.4s, v29.4s\n"
- "add x12, x12, #0x20\n"
- "smlal2 v8.4s, v11.8h, v28.8h\n"
- "smlal2 v0.4s, v16.8h, v7.8h\n"
- "and v16.16b, v5.16b, v30.16b\n"
- "smlal2 v31.4s, v18.8h, v7.8h\n"
- "smlal v21.4s, v2.4h, v7.4h\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "smlal v20.4s, v10.4h, v9.4h\n"
- "smlal v19.4s, v22.4h, v26.4h\n"
- "sqadd v5.4s, v5.4s, v16.4s\n"
- "smlal2 v3.4s, v12.8h, v7.8h\n"
- "smlal2 v8.4s, v2.8h, v7.8h\n"
- "sqrdmulh v3.4s, v3.4s, v14.4s\n"
- "smlal2 v0.4s, v10.8h, v9.8h\n"
- "smlal2 v31.4s, v22.8h, v26.8h\n"
- "and v16.16b, v3.16b, v25.16b\n"
- "smlal v21.4s, v23.4h, v4.4h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "sqrdmulh v21.4s, v21.4s, v29.4s\n"
- "smlal v19.4s, v27.4h, v4.4h\n"
- "smlal2 v8.4s, v23.8h, v4.8h\n"
- "sqrdmulh v20.4s, v20.4s, v29.4s\n"
- "smlal2 v0.4s, v22.8h, v4.8h\n"
- "smlal2 v31.4s, v27.8h, v4.8h\n"
- "sqrdmulh v19.4s, v19.4s, v29.4s\n"
+ "ldr q30, [x7, #0x0]\n"
+ "ldr q17, [x8, #0x0]\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "ldr x20, [x5, #0x58]\n"
+ "ldr x24, [x5, #0x78]\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "ldr x23, [x5, #0x60]\n"
+ "ldr x10, [x5, #0x80]\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "ldr q26, [x7, #0x10]\n"
+ "ldr x22, [x5, #0x68]\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "ldr d31, [x20, x3]\n"
+ "ldr x21, [x5, #0x88]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "ldr x20, [x5, #0x40]\n"
+ "ldr x9, [x5, #0x70]\n"
+ "tst x2, #0x7\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr x28, [x5, #0x98]\n"
+ "add x7, x7, #0x20\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x27, [x5, #0x50]\n"
+ "ldr x26, [x5, #0x48]\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "ldr d20, [x23, x3]\n"
+ "ldr x25, [x5, #0x90]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "ldr x24, [x5, #0xa8]\n"
+ "ldr x23, [x5, #0xa0]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "ldr d31, [x10, x3]\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "ldr d6, [x22, x3]\n"
+ "smlal v3.4s, v28.4h, v27.4h\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "ldr x22, [x5, #0xb0]\n"
+ "smlal2 v24.4s, v28.8h, v27.8h\n"
+ "ldr d28, [x21, x3]\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "ldr d9, [x20, x3]\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "ldr x21, [x5, #0xb8]\n"
+ "smlal v10.4s, v20.4h, v16.4h\n"
+ "smlal2 v21.4s, v20.8h, v16.8h\n"
+ "ldr x20, [x5, #0xc0]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "ldr d16, [x9, x3]\n"
+ "smlal v3.4s, v31.4h, v11.4h\n"
+ "smlal2 v24.4s, v31.8h, v11.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "ldr d0, [x28, x3]\n"
+ "smlal v10.4s, v6.4h, v27.4h\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "smlal2 v21.4s, v6.8h, v27.8h\n"
+ "ldr d6, [x27, x3]\n"
+ "smlal v8.4s, v9.4h, v27.4h\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal2 v4.4s, v9.8h, v27.8h\n"
+ "ldr d9, [x26, x3]\n"
+ "ldr d27, [x25, x3]\n"
+ "smlal v3.4s, v28.4h, v2.4h\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr d28, [x24, x3]\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v10.4s, v16.4h, v11.4h\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "ldr d18, [x23, x3]\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v27.8h, v27.8b, v13.8b\n"
+ "smlal2 v21.4s, v16.8h, v11.8h\n"
+ "ldr d11, [x22, x3]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v3.4s, v0.4h, v29.4h\n"
+ "smlal v19.4s, v6.4h, v2.4h\n"
+ "smlal2 v24.4s, v0.8h, v29.8h\n"
+ "ldr d29, [x21, x3]\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v22.4h\n"
+ "smlal2 v1.4s, v6.8h, v2.8h\n"
+ "usubl v11.8h, v11.8b, v13.8b\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "ldr d9, [x20, x3]\n"
+ "smlal2 v21.4s, v27.8h, v22.8h\n"
+ "ldr q27, [x8, #0x10]\n"
+ "smlal v3.4s, v28.4h, v15.4h\n"
+ "smlal v19.4s, v20.4h, v22.4h\n"
+ "usubl v29.8h, v29.8b, v13.8b\n"
+ "add x3, x3, #0x8\n"
+ "smlal2 v24.4s, v28.8h, v15.8h\n"
+ "smlal v8.4s, v6.4h, v15.4h\n"
+ "add x8, x8, #0x20\n"
+ "smlal v10.4s, v18.4h, v5.4h\n"
+ "smlal2 v1.4s, v20.8h, v22.8h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "smlal2 v4.4s, v6.8h, v15.8h\n"
+ "smlal2 v21.4s, v18.8h, v5.8h\n"
+ "smlal v3.4s, v11.4h, v5.4h\n"
+ "smlal v19.4s, v16.4h, v5.4h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal v8.4s, v31.4h, v5.4h\n"
+ "smlal v10.4s, v28.4h, v2.4h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal2 v4.4s, v31.8h, v5.8h\n"
+ "smlal2 v21.4s, v28.8h, v2.8h\n"
+ "smlal v3.4s, v29.4h, v22.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v30.4s\n"
+ "smlal2 v24.4s, v29.8h, v22.8h\n"
+ "smlal v8.4s, v0.4h, v7.4h\n"
+ "smlal v10.4s, v29.4h, v7.4h\n"
+ "sqrdmulh v1.4s, v1.4s, v26.4s\n"
+ "and v5.16b, v19.16b, v17.16b\n"
+ "smlal2 v4.4s, v0.8h, v7.8h\n"
+ "smlal2 v21.4s, v29.8h, v7.8h\n"
+ "smlal v3.4s, v9.4h, v7.4h\n"
+ "smlal2 v24.4s, v9.8h, v7.8h\n"
+ "and v16.16b, v1.16b, v27.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqrdmulh v8.4s, v8.4s, v30.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v30.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v23.16b, v21.16b, v30.16b\n"
- "sqrdmulh v8.4s, v8.4s, v14.4s\n"
- "and v27.16b, v20.16b, v30.16b\n"
- "sqrdmulh v0.4s, v0.4s, v14.4s\n"
- "and v22.16b, v19.16b, v30.16b\n"
- "sqrdmulh v31.4s, v31.4s, v14.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v14.16b, v8.16b, v25.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v18.16b, v0.16b, v25.16b\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
- "and v16.16b, v31.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v23.4s\n"
- "sshr v14.4s, v14.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v27.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v22.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v26.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v30.4s\n"
+ "sqadd v19.4s, v19.4s, v5.4s\n"
+ "and v30.16b, v8.16b, v17.16b\n"
+ "and v20.16b, v10.16b, v17.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v2.16b, v3.16b, v17.16b\n"
+ "and v11.16b, v4.16b, v27.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v9.16b, v21.16b, v27.16b\n"
+ "and v16.16b, v24.16b, v27.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sqadd v8.4s, v8.4s, v30.4s\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v30.4s\n"
- "srshl v21.4s, v21.4s, v30.4s\n"
- "sqadd v8.4s, v8.4s, v14.4s\n"
- "srshl v20.4s, v20.4s, v30.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v19.4s, v19.4s, v30.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v25.4s\n"
+ "sqadd v3.4s, v3.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v17.4s\n"
+ "srshl v8.4s, v8.4s, v17.4s\n"
+ "sqadd v4.4s, v4.4s, v11.4s\n"
+ "srshl v10.4s, v10.4s, v17.4s\n"
+ "sqadd v21.4s, v21.4s, v9.4s\n"
+ "srshl v3.4s, v3.4s, v17.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v27.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str d5, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
+ "srshl v4.4s, v4.4s, v27.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v27.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v27.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str d20, [x9, x16]\n"
- "str d19, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str d19, [x16, x4]\n"
+ "str d8, [x15, x4]\n"
+ "str d10, [x14, x4]\n"
+ "str d3, [x13, x4]\n"
+ "add x4, x4, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x6, x6, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v5.4s }, [x20], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v3.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v3.s }[2], [x20]\n"
+ "tbz x2, #2, 5f\n"
+ "ld1 { v19.4s }, [x20], #0x10\n"
+ "tbz x2, #1, 4f\n"
+ "ld1 { v1.d }[0], [x20], #0x8\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v1.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v3.s }[0], [x20]\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v1.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v5.s }[2], [x20]\n"
+ "tbz x2, #1, 6f\n"
+ "ld1 { v19.d }[0], [x20], #0x8\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v19.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v5.s }[0], [x20]\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v19.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "usubl v11.8h, v11.8b, v15.8b\n"
- "usubl v22.8h, v22.8b, v15.8b\n"
- "ldr d4, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "usubl v14.8h, v14.8b, v15.8b\n"
- "usubl v28.8h, v28.8b, v15.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "usubl v18.8h, v18.8b, v15.8b\n"
- "usubl v9.8h, v9.8b, v15.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "usubl v26.8h, v26.8b, v15.8b\n"
- "usubl v7.8h, v7.8b, v15.8b\n"
- "usubl v4.8h, v4.8b, v15.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v25.s }[0], [x27], #0x4\n"
- "ld1 { v27.s }[0], [x26], #0x4\n"
- "ld1 { v1.s }[0], [x25], #0x4\n"
- "ld1 { v2.s }[0], [x24], #0x4\n"
- "ld1 { v12.s }[0], [x23], #0x4\n"
- "ld1 { v16.s }[0], [x22], #0x4\n"
- "ld1 { v23.s }[0], [x21], #0x4\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v25.h }[2], [x27], #0x2\n"
- "ld1 { v27.h }[2], [x26], #0x2\n"
- "ld1 { v1.h }[2], [x25], #0x2\n"
- "ld1 { v2.h }[2], [x24], #0x2\n"
- "ld1 { v12.h }[2], [x23], #0x2\n"
- "ld1 { v16.h }[2], [x22], #0x2\n"
- "ld1 { v23.h }[2], [x21], #0x2\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[6], [x27]\n"
- "ld1 { v27.b }[6], [x26]\n"
- "ld1 { v1.b }[6], [x25]\n"
- "ld1 { v2.b }[6], [x24]\n"
- "ld1 { v12.b }[6], [x23]\n"
- "ld1 { v16.b }[6], [x22]\n"
- "ld1 { v23.b }[6], [x21]\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "ldr d7, [x6, #0x40]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
+ "usubl v29.8h, v29.8b, v14.8b\n"
+ "usubl v15.8h, v15.8b, v14.8b\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "usubl v5.8h, v5.8b, v14.8b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "usubl v7.8h, v7.8b, v14.8b\n"
+ "add x27, x27, x3\n"
+ "add x26, x26, x3\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "add x25, x25, x3\n"
+ "add x24, x24, x3\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "add x23, x23, x3\n"
+ "add x22, x22, x3\n"
+ "add x21, x21, x3\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 9f\n"
+ "ld1 { v26.s }[0], [x27], #0x4\n"
+ "ld1 { v31.s }[0], [x26], #0x4\n"
+ "ld1 { v20.s }[0], [x25], #0x4\n"
+ "ld1 { v28.s }[0], [x24], #0x4\n"
+ "ld1 { v6.s }[0], [x23], #0x4\n"
+ "ld1 { v9.s }[0], [x22], #0x4\n"
+ "ld1 { v0.s }[0], [x21], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 8f\n"
+ "ld1 { v26.h }[2], [x27], #0x2\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v20.h }[2], [x25], #0x2\n"
+ "ld1 { v28.h }[2], [x24], #0x2\n"
+ "ld1 { v6.h }[2], [x23], #0x2\n"
+ "ld1 { v9.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[6], [x27]\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v20.b }[6], [x25]\n"
+ "ld1 { v28.b }[6], [x24]\n"
+ "ld1 { v6.b }[6], [x23]\n"
+ "ld1 { v9.b }[6], [x22]\n"
+ "ld1 { v0.b }[6], [x21]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[4], [x27]\n"
- "ld1 { v27.b }[4], [x26]\n"
- "ld1 { v1.b }[4], [x25]\n"
- "ld1 { v2.b }[4], [x24]\n"
- "ld1 { v12.b }[4], [x23]\n"
- "ld1 { v16.b }[4], [x22]\n"
- "ld1 { v23.b }[4], [x21]\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[4], [x27]\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v20.b }[4], [x25]\n"
+ "ld1 { v28.b }[4], [x24]\n"
+ "ld1 { v6.b }[4], [x23]\n"
+ "ld1 { v9.b }[4], [x22]\n"
+ "ld1 { v0.b }[4], [x21]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v25.h }[0], [x27], #0x2\n"
- "ld1 { v27.h }[0], [x26], #0x2\n"
- "ld1 { v1.h }[0], [x25], #0x2\n"
- "ld1 { v2.h }[0], [x24], #0x2\n"
- "ld1 { v12.h }[0], [x23], #0x2\n"
- "ld1 { v16.h }[0], [x22], #0x2\n"
- "ld1 { v23.h }[0], [x21], #0x2\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[2], [x27]\n"
- "ld1 { v27.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v12.b }[2], [x23]\n"
- "ld1 { v16.b }[2], [x22]\n"
- "ld1 { v23.b }[2], [x21]\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "tbz x2, #1, 10f\n"
+ "ld1 { v26.h }[0], [x27], #0x2\n"
+ "ld1 { v31.h }[0], [x26], #0x2\n"
+ "ld1 { v20.h }[0], [x25], #0x2\n"
+ "ld1 { v28.h }[0], [x24], #0x2\n"
+ "ld1 { v6.h }[0], [x23], #0x2\n"
+ "ld1 { v9.h }[0], [x22], #0x2\n"
+ "ld1 { v0.h }[0], [x21], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[2], [x27]\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v20.b }[2], [x25]\n"
+ "ld1 { v28.b }[2], [x24]\n"
+ "ld1 { v6.b }[2], [x23]\n"
+ "ld1 { v9.b }[2], [x22]\n"
+ "ld1 { v0.b }[2], [x21]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[0], [x27]\n"
- "ld1 { v27.b }[0], [x26]\n"
- "ld1 { v1.b }[0], [x25]\n"
- "ld1 { v2.b }[0], [x24]\n"
- "ld1 { v12.b }[0], [x23]\n"
- "ld1 { v16.b }[0], [x22]\n"
- "ld1 { v23.b }[0], [x21]\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[0], [x27]\n"
+ "ld1 { v31.b }[0], [x26]\n"
+ "ld1 { v20.b }[0], [x25]\n"
+ "ld1 { v28.b }[0], [x24]\n"
+ "ld1 { v6.b }[0], [x23]\n"
+ "ld1 { v9.b }[0], [x22]\n"
+ "ld1 { v0.b }[0], [x21]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x20, [x15, #0x40]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "usubl v26.8h, v26.8b, v13.8b\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x20, [x5, #0x40]\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "add x20, x20, x3\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "tbz x2, #2, 13f\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 12f\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x2, #1, 14f\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v15.8h, v15.8b, v6.8b\n"
- "ldr x20, [x15, #0x48]\n"
- "smlal v21.4s, v15.4h, v18.4h\n"
- "smlal2 v8.4s, v15.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v30.8h, v30.8b, v13.8b\n"
+ "ldr x20, [x5, #0x48]\n"
+ "smlal v8.4s, v30.4h, v27.4h\n"
+ "smlal2 v4.4s, v30.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 17f\n"
+ "ld1 { v9.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 16f\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 18f\n"
+ "ld1 { v9.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x50]\n"
- "smlal v21.4s, v16.4h, v9.4h\n"
- "smlal2 v8.4s, v16.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 21f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 20f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "ldr x20, [x5, #0x50]\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 21f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 20f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 22f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x58]\n"
- "smlal v5.4s, v16.4h, v9.4h\n"
- "smlal2 v3.4s, v16.8h, v9.8h\n"
- "smlal v21.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x58]\n"
+ "smlal v19.4s, v17.4h, v2.4h\n"
+ "smlal2 v1.4s, v17.8h, v2.8h\n"
+ "smlal v8.4s, v17.4h, v15.4h\n"
+ "smlal2 v4.4s, v17.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 25f\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 24f\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 26f\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[0], [x20]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v20.4s, v16.4h, v28.4h\n"
- "smlal2 v0.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x20, [x5, #0x60]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 29f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 28f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 30f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v5.4s, v16.4h, v26.4h\n"
- "smlal2 v3.4s, v16.8h, v26.8h\n"
- "smlal v20.4s, v16.4h, v11.4h\n"
- "smlal2 v0.4s, v16.8h, v11.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x68]\n"
+ "smlal v19.4s, v17.4h, v22.4h\n"
+ "smlal2 v1.4s, v17.8h, v22.8h\n"
+ "smlal v10.4s, v17.4h, v16.4h\n"
+ "smlal2 v21.4s, v17.8h, v16.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 33f\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 32f\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 34f\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[0], [x20]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal v20.4s, v16.4h, v18.4h\n"
- "smlal2 v0.4s, v16.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v30.8h, v30.8b, v13.8b\n"
+ "ldr x20, [x5, #0x70]\n"
+ "smlal v10.4s, v30.4h, v27.4h\n"
+ "smlal2 v21.4s, v30.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 37f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 36f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 38f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v16.4h, v7.4h\n"
- "smlal2 v3.4s, v16.8h, v7.8h\n"
- "smlal v20.4s, v16.4h, v22.4h\n"
- "smlal2 v0.4s, v16.8h, v22.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 41f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x78]\n"
+ "smlal v19.4s, v17.4h, v5.4h\n"
+ "smlal2 v1.4s, v17.8h, v5.8h\n"
+ "smlal v10.4s, v17.4h, v11.4h\n"
+ "smlal2 v21.4s, v17.8h, v11.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 41f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 40f\n"
+ "tbz x2, #1, 40f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
+ "tbz x2, #1, 42f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[0], [x20]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x80]\n"
- "smlal v19.4s, v16.4h, v18.4h\n"
- "smlal2 v31.4s, v16.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0x80]\n"
+ "smlal v3.4s, v16.4h, v27.4h\n"
+ "smlal2 v24.4s, v16.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 45f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 44f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 46f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x88]\n"
- "smlal v21.4s, v16.4h, v7.4h\n"
- "smlal2 v8.4s, v16.8h, v7.8h\n"
- "smlal v19.4s, v16.4h, v22.4h\n"
- "smlal2 v31.4s, v16.8h, v22.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 49f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x88]\n"
+ "smlal v8.4s, v17.4h, v5.4h\n"
+ "smlal2 v4.4s, v17.8h, v5.8h\n"
+ "smlal v3.4s, v17.4h, v11.4h\n"
+ "smlal2 v24.4s, v17.8h, v11.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 49f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 48f\n"
+ "tbz x2, #1, 48f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
+ "tbz x2, #1, 50f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x90]\n"
- "smlal v19.4s, v16.4h, v9.4h\n"
- "smlal2 v31.4s, v16.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0x90]\n"
+ "smlal v3.4s, v16.4h, v2.4h\n"
+ "smlal2 v24.4s, v16.8h, v2.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 53f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 52f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 54f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x98]\n"
- "smlal v20.4s, v16.4h, v26.4h\n"
- "smlal2 v0.4s, v16.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 57f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x98]\n"
+ "smlal v10.4s, v17.4h, v22.4h\n"
+ "smlal2 v21.4s, v17.8h, v22.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 57f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 56f\n"
+ "tbz x2, #1, 56f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
+ "tbz x2, #1, 58f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[0], [x20]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v21.4s, v16.4h, v4.4h\n"
- "smlal2 v8.4s, v16.8h, v4.8h\n"
- "smlal v19.4s, v16.4h, v14.4h\n"
- "smlal2 v31.4s, v16.8h, v14.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xa0]\n"
+ "smlal v8.4s, v16.4h, v7.4h\n"
+ "smlal2 v4.4s, v16.8h, v7.8h\n"
+ "smlal v3.4s, v16.4h, v29.4h\n"
+ "smlal2 v24.4s, v16.8h, v29.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 61f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 60f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 62f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v20.4s, v16.4h, v7.4h\n"
- "smlal2 v0.4s, v16.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 65f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0xa8]\n"
+ "smlal v10.4s, v17.4h, v5.4h\n"
+ "smlal2 v21.4s, v17.8h, v5.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 65f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 64f\n"
+ "tbz x2, #1, 64f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
+ "tbz x2, #1, 66f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xb0]\n"
- "smlal v20.4s, v16.4h, v9.4h\n"
- "smlal2 v0.4s, v16.8h, v9.8h\n"
- "smlal v19.4s, v16.4h, v28.4h\n"
- "smlal2 v31.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xb0]\n"
+ "smlal v10.4s, v16.4h, v2.4h\n"
+ "smlal2 v21.4s, v16.8h, v2.8h\n"
+ "smlal v3.4s, v16.4h, v15.4h\n"
+ "smlal2 v24.4s, v16.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 69f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 68f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 70f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal v19.4s, v16.4h, v7.4h\n"
- "smlal2 v31.4s, v16.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 73f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0xb8]\n"
+ "smlal v3.4s, v17.4h, v5.4h\n"
+ "smlal2 v24.4s, v17.8h, v5.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 73f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 72f\n"
+ "tbz x2, #1, 72f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
+ "tbz x2, #1, 74f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v20.4s, v16.4h, v4.4h\n"
- "smlal2 v0.4s, v16.8h, v4.8h\n"
- "smlal v19.4s, v16.4h, v26.4h\n"
- "smlal2 v31.4s, v16.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xc0]\n"
+ "smlal v10.4s, v16.4h, v7.4h\n"
+ "smlal2 v21.4s, v16.8h, v7.8h\n"
+ "smlal v3.4s, v16.4h, v22.4h\n"
+ "smlal2 v24.4s, v16.8h, v22.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 77f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 76f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 78f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "smlal v19.4s, v16.4h, v4.4h\n"
- "smlal2 v31.4s, v16.8h, v4.8h\n"
- "tbz x7, #2, 81f\n"
- "ld1 { v14.4s }, [x13], #0x10\n"
- "ld1 { v25.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v18.d }[0], [x13], #0x8\n"
- "ld1 { v12.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x12]\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "smlal v3.4s, v17.4h, v7.4h\n"
+ "smlal2 v24.4s, v17.8h, v7.8h\n"
+ "tbz x2, #2, 81f\n"
+ "ld1 { v16.4s }, [x7], #0x10\n"
+ "ld1 { v22.4s }, [x8], #0x10\n"
+ "tbz x2, #1, 80f\n"
+ "ld1 { v0.d }[0], [x7], #0x8\n"
+ "ld1 { v31.d }[0], [x8], #0x8\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v0.s }[2], [x7]\n"
+ "ld1 { v31.s }[2], [x8]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[0], [x13]\n"
- "ld1 { v12.s }[0], [x12]\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v0.s }[0], [x7]\n"
+ "ld1 { v31.s }[0], [x8]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
- "ld1 { v14.d }[0], [x13], #0x8\n"
- "ld1 { v25.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v14.s }[2], [x13]\n"
- "ld1 { v25.s }[2], [x12]\n"
+ "tbz x2, #1, 82f\n"
+ "ld1 { v16.d }[0], [x7], #0x8\n"
+ "ld1 { v22.d }[0], [x8], #0x8\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v16.s }[2], [x7]\n"
+ "ld1 { v22.s }[2], [x8]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v14.s }[0], [x13]\n"
- "ld1 { v25.s }[0], [x12]\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v16.s }[0], [x7]\n"
+ "ld1 { v22.s }[0], [x8]\n"
"83:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v5.4s, v5.4s, v14.4s\n"
- "and v28.16b, v5.16b, v25.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v3.4s, v3.4s, v18.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v16.16b, v3.16b, v12.16b\n"
- "sqrdmulh v21.4s, v21.4s, v14.4s\n"
- "sqrdmulh v20.4s, v20.4s, v14.4s\n"
- "sqrdmulh v19.4s, v19.4s, v14.4s\n"
- "sqadd v5.4s, v5.4s, v28.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v16.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v0.4s\n"
+ "add x16, x16, x4\n"
+ "add x15, x15, x4\n"
+ "sqrdmulh v8.4s, v8.4s, v16.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v16.4s\n"
+ "add x14, x14, x4\n"
+ "add x13, x13, x4\n"
+ "sqrdmulh v3.4s, v3.4s, v16.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v0.4s\n"
+ "and v17.16b, v19.16b, v22.16b\n"
+ "and v16.16b, v1.16b, v31.16b\n"
+ "and v15.16b, v8.16b, v22.16b\n"
+ "and v20.16b, v10.16b, v22.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v0.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v0.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v14.16b, v21.16b, v25.16b\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "and v6.16b, v20.16b, v25.16b\n"
- "sqrdmulh v0.4s, v0.4s, v18.4s\n"
- "and v4.16b, v19.16b, v25.16b\n"
- "sqrdmulh v31.4s, v31.4s, v18.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v14.4s, v14.4s, #0x1f\n"
- "and v18.16b, v8.16b, v12.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "and v7.16b, v0.16b, v12.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v16.16b, v31.16b, v12.16b\n"
- "sqadd v21.4s, v21.4s, v14.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v6.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v15.4s, v15.4s, #0x1f\n"
+ "and v26.16b, v4.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v0.16b, v21.16b, v31.16b\n"
+ "sqadd v19.4s, v19.4s, v17.4s\n"
+ "and v17.16b, v3.16b, v22.16b\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v16.16b, v24.16b, v31.16b\n"
+ "sqadd v8.4s, v8.4s, v15.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v25.4s\n"
- "srshl v21.4s, v21.4s, v25.4s\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v25.4s\n"
- "sqadd v0.4s, v0.4s, v7.4s\n"
- "srshl v19.4s, v19.4s, v25.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v12.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v12.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v12.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v22.4s\n"
+ "srshl v8.4s, v8.4s, v22.4s\n"
+ "sqadd v3.4s, v3.4s, v17.4s\n"
+ "sqadd v4.4s, v4.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v22.4s\n"
+ "sqadd v21.4s, v21.4s, v0.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v31.4s\n"
+ "srshl v3.4s, v3.4s, v22.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "srshl v4.4s, v4.4s, v31.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v31.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v5.s }[0], [x11], #0x4\n"
- "st1 { v21.s }[0], [x10], #0x4\n"
- "st1 { v20.s }[0], [x9], #0x4\n"
- "st1 { v19.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v5.h }[2], [x11], #0x2\n"
- "st1 { v21.h }[2], [x10], #0x2\n"
- "st1 { v20.h }[2], [x9], #0x2\n"
- "st1 { v19.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[6], [x11], #0x1\n"
- "st1 { v21.b }[6], [x10], #0x1\n"
- "st1 { v20.b }[6], [x9], #0x1\n"
- "st1 { v19.b }[6], [x28], #0x1\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "tbz x2, #2, 85f\n"
+ "st1 { v19.s }[0], [x16], #0x4\n"
+ "st1 { v8.s }[0], [x15], #0x4\n"
+ "st1 { v10.s }[0], [x14], #0x4\n"
+ "st1 { v3.s }[0], [x13], #0x4\n"
+ "tbz x2, #1, 84f\n"
+ "st1 { v19.h }[2], [x16], #0x2\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v10.h }[2], [x14], #0x2\n"
+ "st1 { v3.h }[2], [x13], #0x2\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[6], [x16], #0x1\n"
+ "st1 { v8.b }[6], [x15], #0x1\n"
+ "st1 { v10.b }[6], [x14], #0x1\n"
+ "st1 { v3.b }[6], [x13], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[4], [x11], #0x1\n"
- "st1 { v21.b }[4], [x10], #0x1\n"
- "st1 { v20.b }[4], [x9], #0x1\n"
- "st1 { v19.b }[4], [x28], #0x1\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[4], [x16], #0x1\n"
+ "st1 { v8.b }[4], [x15], #0x1\n"
+ "st1 { v10.b }[4], [x14], #0x1\n"
+ "st1 { v3.b }[4], [x13], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v5.h }[0], [x11], #0x2\n"
- "st1 { v21.h }[0], [x10], #0x2\n"
- "st1 { v20.h }[0], [x9], #0x2\n"
- "st1 { v19.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[2], [x11], #0x1\n"
- "st1 { v21.b }[2], [x10], #0x1\n"
- "st1 { v20.b }[2], [x9], #0x1\n"
- "st1 { v19.b }[2], [x28], #0x1\n"
+ "tbz x2, #1, 86f\n"
+ "st1 { v19.h }[0], [x16], #0x2\n"
+ "st1 { v8.h }[0], [x15], #0x2\n"
+ "st1 { v10.h }[0], [x14], #0x2\n"
+ "st1 { v3.h }[0], [x13], #0x2\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[2], [x16], #0x1\n"
+ "st1 { v8.b }[2], [x15], #0x1\n"
+ "st1 { v10.b }[2], [x14], #0x1\n"
+ "st1 { v3.b }[2], [x13], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[0], [x11], #0x1\n"
- "st1 { v21.b }[0], [x10], #0x1\n"
- "st1 { v20.b }[0], [x9], #0x1\n"
- "st1 { v19.b }[0], [x28], #0x1\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[0], [x16], #0x1\n"
+ "st1 { v8.b }[0], [x15], #0x1\n"
+ "st1 { v10.b }[0], [x14], #0x1\n"
+ "st1 { v3.b }[0], [x13], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index a3fa93df9c..5798451720 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[36];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -113,1743 +113,1743 @@ void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
__asm__ __volatile__(
"ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x2, x1, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v18.16b }, [x20]\n"
+ "mov x2, #0x0\n"
+ "mov x3, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x4, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x14, x1, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v15.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_b_offset]\n"
"add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.16b }, [x21]\n"
- "ld1r { v26.8h }, [x20]\n"
+ "ld1r { v9.16b }, [x21]\n"
+ "ld1r { v13.8h }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_minval]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v11.8h }, [x21]\n"
- "ld1r { v0.8h }, [x20]\n"
- "mov x3, #0x0\n"
- "mov x4, #0x0\n"
- "add x5, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x17, x16, [x22, #0x0]\n"
- "ldp x15, x14, [x22, #0x10]\n"
- "cbz x2, 3f\n"
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "subs x2, x2, #0x1\n"
- "usubl v6.8h, v6.8b, v13.8b\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "usubl v14.8h, v14.8b, v13.8b\n"
- "usubl v10.8h, v10.8b, v13.8b\n"
- "ldr d12, [x6, #0x20]\n"
+ "ld1r { v10.8h }, [x21]\n"
+ "ld1r { v14.8h }, [x20]\n"
+ "ldp x8, x17, [x22, #0x0]\n"
+ "ldp x16, x15, [x22, #0x10]\n"
+ "cbz x14, 3f\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "subs x14, x14, #0x1\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "ldr d23, [x5, #0x20]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "usubl v12.8h, v12.8b, v13.8b\n"
- "ldr q7, [x20, #0x0]\n"
- "ldr q15, [x20, #0x10]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "usubl v12.8h, v12.8b, v9.8b\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "usubl v11.8h, v11.8b, v9.8b\n"
+ "ldr q8, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
"add x20, x20, #0x20\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "ldr d31, [x9, x3]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldr d17, [x28, x3]\n"
- "ldr d30, [x27, x3]\n"
- "usubl v31.8h, v31.8b, v18.8b\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "ldr d16, [x26, x3]\n"
- "ldr d3, [x25, x3]\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "ldr d4, [x24, x3]\n"
- "ldr d25, [x23, x3]\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "ldr d9, [x22, x3]\n"
- "ldr d29, [x21, x3]\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "ldr d28, [x20, x3]\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d24, [x9, x2]\n"
+ "ldr d21, [x28, x2]\n"
+ "ldr d16, [x27, x2]\n"
+ "ldr d20, [x26, x2]\n"
+ "ldr d7, [x25, x2]\n"
+ "ldr d19, [x24, x2]\n"
+ "ldr d28, [x23, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "ldr d26, [x22, x2]\n"
+ "ldr d29, [x21, x2]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr d18, [x20, x2]\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr d2, [x6, #0x28]\n"
- "ldr d27, [x6, #0x30]\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "ldr d1, [x6, #0x38]\n"
- "ldr d31, [x6, #0x40]\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "ldr d8, [x6, #0x48]\n"
- "ldr x22, [x5, #0x50]\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "ldr x20, [x5, #0x58]\n"
- "ldr x21, [x5, #0x60]\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "usubl v27.8h, v27.8b, v13.8b\n"
- "ldr x22, [x5, #0x70]\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "ldr d14, [x20, x3]\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal v23.4s, v17.4h, v10.4h\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
- "ldr x20, [x5, #0x78]\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "usubl v31.8h, v31.8b, v13.8b\n"
- "ldr x21, [x5, #0x80]\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "ldr d25, [x22, x3]\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v10.8h\n"
- "ldr d10, [x20, x3]\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal v24.4s, v17.4h, v21.4h\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "usubl v8.8h, v8.8b, v13.8b\n"
- "ldr x24, [x5, #0x88]\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "smlal v7.4s, v30.4h, v2.4h\n"
- "ldr x20, [x5, #0x90]\n"
- "ldr x23, [x5, #0x98]\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "ldr d9, [x21, x3]\n"
- "smlal2 v22.4s, v17.8h, v21.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "ldr d21, [x6, #0x50]\n"
- "smlal v20.4s, v3.4h, v12.4h\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "ldr x22, [x5, #0xa0]\n"
- "ldr x21, [x5, #0xa8]\n"
- "smlal2 v15.4s, v30.8h, v2.8h\n"
- "ldr d30, [x24, x3]\n"
- "smlal v7.4s, v16.4h, v27.4h\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v12.8h\n"
- "ldr d3, [x6, #0x58]\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "ldr d12, [x20, x3]\n"
- "smlal v20.4s, v16.4h, v2.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal v23.4s, v14.4h, v2.4h\n"
- "ldr x20, [x5, #0xb0]\n"
- "ldr x13, [x5, #0xb8]\n"
- "smlal2 v15.4s, v16.8h, v27.8h\n"
- "smlal v7.4s, v4.4h, v1.4h\n"
- "ldr x12, [x5, #0xc0]\n"
- "ldr x11, [x5, #0xc8]\n"
- "smlal2 v5.4s, v16.8h, v2.8h\n"
- "ldr d16, [x23, x3]\n"
- "smlal2 v22.4s, v28.8h, v2.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v2.8h\n"
- "ldr d2, [x6, #0x60]\n"
- "smlal v20.4s, v4.4h, v27.4h\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v27.4h\n"
- "smlal v23.4s, v25.4h, v27.4h\n"
- "ldr x10, [x5, #0xd0]\n"
- "ldr x9, [x5, #0xd8]\n"
- "smlal2 v15.4s, v4.8h, v1.8h\n"
- "smlal v7.4s, v17.4h, v31.4h\n"
- "ldr x28, [x5, #0xe0]\n"
- "ldr x27, [x5, #0xe8]\n"
- "smlal2 v5.4s, v4.8h, v27.8h\n"
- "ldr d4, [x22, x3]\n"
- "smlal2 v22.4s, v14.8h, v27.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v27.8h\n"
- "ldr d27, [x6, #0x68]\n"
- "smlal v20.4s, v17.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v1.4h\n"
- "smlal v23.4s, v10.4h, v1.4h\n"
- "ldr x26, [x5, #0xf0]\n"
- "ldr x25, [x5, #0xf8]\n"
- "smlal2 v15.4s, v17.8h, v31.8h\n"
- "smlal v7.4s, v6.4h, v8.4h\n"
- "ldr x24, [x5, #0x100]\n"
- "ldr x23, [x5, #0x108]\n"
- "smlal2 v5.4s, v17.8h, v1.8h\n"
- "ldr d17, [x21, x3]\n"
- "smlal2 v22.4s, v25.8h, v1.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v1.8h\n"
- "ldr d1, [x6, #0x70]\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v31.4h\n"
- "smlal v23.4s, v9.4h, v31.4h\n"
- "ldr x22, [x5, #0x110]\n"
- "ldr x21, [x5, #0x118]\n"
- "smlal2 v15.4s, v6.8h, v8.8h\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
- "subs x2, x2, #0x1\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal2 v22.4s, v10.8h, v31.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v31.8h\n"
- "ldr d31, [x6, #0x78]\n"
- "smlal v20.4s, v29.4h, v8.4h\n"
- "usubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v9.4h, v8.4h\n"
- "smlal v23.4s, v30.4h, v8.4h\n"
+ "ldr d3, [x5, #0x28]\n"
+ "ldr d2, [x5, #0x30]\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "ldr d4, [x5, #0x38]\n"
+ "ldr d22, [x5, #0x40]\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "ldr d24, [x5, #0x48]\n"
+ "ldr x23, [x4, #0x50]\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x50]\n"
+ "ldr x22, [x4, #0x58]\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "ldr d21, [x5, #0x58]\n"
+ "ldr x21, [x4, #0x60]\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "ldr x20, [x4, #0x68]\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "usubl v2.8h, v2.8b, v9.8b\n"
+ "ldr x28, [x4, #0x70]\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "ldr d12, [x23, x2]\n"
+ "usubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x22, x2]\n"
+ "ldr x27, [x4, #0x78]\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "ldr x26, [x4, #0x80]\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "ldr x25, [x4, #0x88]\n"
+ "ldr x24, [x4, #0x90]\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
+ "ldr x23, [x4, #0x98]\n"
+ "ldr x22, [x4, #0xa0]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x21, x2]\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v17.4h\n"
+ "smlal2 v30.4s, v12.8h, v17.8h\n"
+ "ldr d17, [x20, x2]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal v1.4s, v12.4h, v11.4h\n"
+ "usubl v21.8h, v21.8b, v9.8b\n"
+ "ldr x21, [x4, #0xa8]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal2 v25.4s, v12.8h, v11.8h\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x28, x2]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v7.4h, v11.4h\n"
+ "smlal2 v30.4s, v7.8h, v11.8h\n"
+ "ldr d11, [x27, x2]\n"
+ "ldr x13, [x4, #0xb8]\n"
+ "smlal v27.4s, v28.4h, v23.4h\n"
+ "smlal v1.4s, v7.4h, v23.4h\n"
+ "ldr x12, [x4, #0xc0]\n"
+ "ldr x11, [x4, #0xc8]\n"
+ "smlal2 v6.4s, v28.8h, v23.8h\n"
+ "ldr d28, [x26, x2]\n"
+ "smlal2 v25.4s, v7.8h, v23.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v16.4h, v3.4h\n"
+ "smlal2 v0.4s, v16.8h, v3.8h\n"
+ "ldr d16, [x25, x2]\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "ldr d23, [x24, x2]\n"
+ "ldr x10, [x4, #0xd0]\n"
+ "smlal v27.4s, v20.4h, v3.4h\n"
+ "smlal v1.4s, v18.4h, v3.4h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x9, [x4, #0xd8]\n"
+ "smlal2 v6.4s, v20.8h, v3.8h\n"
+ "smlal2 v25.4s, v18.8h, v3.8h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x28, [x4, #0xe0]\n"
+ "smlal v8.4s, v20.4h, v2.4h\n"
+ "smlal2 v0.4s, v20.8h, v2.8h\n"
+ "ldr d20, [x23, x2]\n"
+ "usubl v23.8h, v23.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v3.4h\n"
+ "smlal2 v30.4s, v17.8h, v3.8h\n"
+ "ldr d3, [x5, #0x60]\n"
+ "ldr x27, [x4, #0xe8]\n"
+ "smlal v27.4s, v19.4h, v2.4h\n"
+ "smlal v1.4s, v17.4h, v2.4h\n"
+ "ldr x26, [x4, #0xf0]\n"
+ "ldr x25, [x4, #0xf8]\n"
+ "smlal2 v6.4s, v19.8h, v2.8h\n"
+ "smlal2 v25.4s, v17.8h, v2.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x24, [x4, #0x100]\n"
+ "smlal v8.4s, v19.4h, v4.4h\n"
+ "smlal2 v0.4s, v19.8h, v4.8h\n"
+ "ldr d19, [x22, x2]\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v2.4h\n"
+ "smlal2 v30.4s, v26.8h, v2.8h\n"
+ "ldr d2, [x5, #0x68]\n"
+ "ldr x23, [x4, #0x108]\n"
+ "smlal v27.4s, v12.4h, v4.4h\n"
+ "smlal v1.4s, v26.4h, v4.4h\n"
+ "ldr x22, [x4, #0x110]\n"
+ "subs x14, x14, #0x1\n"
+ "smlal2 v6.4s, v12.8h, v4.8h\n"
+ "smlal2 v25.4s, v26.8h, v4.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v22.4h\n"
+ "smlal2 v0.4s, v12.8h, v22.8h\n"
+ "ldr d12, [x21, x2]\n"
+ "usubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v4.4h\n"
+ "smlal2 v30.4s, v11.8h, v4.8h\n"
+ "ldr d4, [x5, #0x70]\n"
+ "ldr x21, [x4, #0x118]\n"
+ "smlal v27.4s, v7.4h, v22.4h\n"
+ "smlal v1.4s, v11.4h, v22.4h\n"
+ "smlal2 v6.4s, v7.8h, v22.8h\n"
+ "smlal2 v25.4s, v11.8h, v22.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v24.4h\n"
+ "smlal2 v0.4s, v7.8h, v24.8h\n"
+ "ldr d7, [x20, x2]\n"
+ "usubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v22.4h\n"
+ "smlal2 v30.4s, v28.8h, v22.8h\n"
+ "ldr d22, [x5, #0x78]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "ldr d28, [x13, x3]\n"
- "smlal v7.4s, v14.4h, v3.4h\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v5.4s, v29.8h, v8.8h\n"
- "ldr d29, [x6, #0x80]\n"
- "smlal2 v22.4s, v9.8h, v8.8h\n"
- "usubl v29.8h, v29.8b, v13.8b\n"
- "smlal2 v19.4s, v30.8h, v8.8h\n"
- "ldr d8, [x12, x3]\n"
- "smlal v20.4s, v14.4h, v21.4h\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "smlal v24.4s, v12.4h, v21.4h\n"
- "smlal v23.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v14.8h, v3.8h\n"
- "smlal v7.4s, v25.4h, v2.4h\n"
- "smlal2 v5.4s, v14.8h, v21.8h\n"
- "ldr d14, [x11, x3]\n"
- "smlal2 v22.4s, v12.8h, v21.8h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v21.8h\n"
- "ldr d21, [x6, #0x88]\n"
- "smlal v20.4s, v25.4h, v3.4h\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v16.4h, v3.4h\n"
- "smlal v23.4s, v4.4h, v3.4h\n"
- "smlal2 v15.4s, v25.8h, v2.8h\n"
- "smlal v7.4s, v10.4h, v27.4h\n"
- "smlal2 v5.4s, v25.8h, v3.8h\n"
- "ldr d25, [x10, x3]\n"
- "smlal2 v22.4s, v16.8h, v3.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v3.8h\n"
- "ldr d3, [x6, #0x90]\n"
- "smlal v20.4s, v10.4h, v2.4h\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
- "smlal v24.4s, v4.4h, v2.4h\n"
- "smlal v23.4s, v17.4h, v2.4h\n"
- "smlal2 v15.4s, v10.8h, v27.8h\n"
- "smlal v7.4s, v9.4h, v1.4h\n"
- "smlal2 v5.4s, v10.8h, v2.8h\n"
- "ldr d10, [x9, x3]\n"
- "smlal2 v22.4s, v4.8h, v2.8h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v2.8h\n"
- "ldr d2, [x6, #0x98]\n"
- "smlal v20.4s, v9.4h, v27.4h\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v17.4h, v27.4h\n"
- "smlal v23.4s, v6.4h, v27.4h\n"
- "smlal2 v15.4s, v9.8h, v1.8h\n"
- "smlal v7.4s, v12.4h, v31.4h\n"
- "smlal2 v5.4s, v9.8h, v27.8h\n"
- "ldr d9, [x28, x3]\n"
- "smlal2 v22.4s, v17.8h, v27.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v27.8h\n"
- "ldr d27, [x6, #0xa0]\n"
- "smlal v20.4s, v30.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v1.4h\n"
- "smlal v23.4s, v28.4h, v1.4h\n"
- "smlal2 v15.4s, v12.8h, v31.8h\n"
- "ldr d12, [x27, x3]\n"
- "smlal v7.4s, v16.4h, v29.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal2 v5.4s, v30.8h, v1.8h\n"
- "ldr d30, [x6, #0xa8]\n"
- "smlal2 v22.4s, v6.8h, v1.8h\n"
- "usubl v30.8h, v30.8b, v13.8b\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "ldr d1, [x26, x3]\n"
- "smlal v20.4s, v16.4h, v31.4h\n"
- "usubl v1.8h, v1.8b, v18.8b\n"
- "smlal v24.4s, v8.4h, v31.4h\n"
- "smlal v23.4s, v14.4h, v31.4h\n"
- "smlal2 v15.4s, v16.8h, v29.8h\n"
- "smlal v7.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v16.8h, v31.8h\n"
- "ldr d16, [x25, x3]\n"
- "smlal2 v22.4s, v8.8h, v31.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v31.8h\n"
- "ldr d31, [x6, #0xb0]\n"
- "smlal v20.4s, v4.4h, v29.4h\n"
- "usubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v29.4h\n"
- "smlal v23.4s, v25.4h, v29.4h\n"
- "smlal2 v15.4s, v4.8h, v21.8h\n"
- "smlal v7.4s, v17.4h, v3.4h\n"
- "smlal2 v5.4s, v4.8h, v29.8h\n"
- "ldr d4, [x24, x3]\n"
- "smlal2 v22.4s, v14.8h, v29.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v29.8h\n"
- "ldr d29, [x6, #0xb8]\n"
- "smlal v20.4s, v17.4h, v21.4h\n"
- "usubl v29.8h, v29.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v15.4s, v17.8h, v3.8h\n"
- "smlal v7.4s, v6.4h, v2.4h\n"
- "smlal2 v5.4s, v17.8h, v21.8h\n"
- "ldr d17, [x23, x3]\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "ldr d21, [x6, #0xc0]\n"
- "smlal v20.4s, v6.4h, v3.4h\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v3.4h\n"
- "smlal v23.4s, v9.4h, v3.4h\n"
- "add x6, x6, #0xc8\n"
- "smlal2 v15.4s, v6.8h, v2.8h\n"
- "smlal v7.4s, v8.4h, v27.4h\n"
- "smlal2 v5.4s, v6.8h, v3.8h\n"
- "ldr d6, [x22, x3]\n"
- "smlal2 v22.4s, v10.8h, v3.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v3.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal v20.4s, v28.4h, v2.4h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal v24.4s, v9.4h, v2.4h\n"
- "smlal v23.4s, v12.4h, v2.4h\n"
- "add x3, x3, #0x8\n"
- "smlal2 v15.4s, v8.8h, v27.8h\n"
- "ldr q8, [x7, #0x0]\n"
- "smlal v7.4s, v14.4h, v30.4h\n"
- "smlal2 v5.4s, v28.8h, v2.8h\n"
- "ldr q28, [x8, #0x0]\n"
- "smlal2 v22.4s, v9.8h, v2.8h\n"
- "smlal2 v19.4s, v12.8h, v2.8h\n"
- "ldr q2, [x7, #0x10]\n"
- "smlal v20.4s, v14.4h, v27.4h\n"
+ "smlal v27.4s, v29.4h, v24.4h\n"
+ "smlal v1.4s, v28.4h, v24.4h\n"
+ "smlal2 v6.4s, v29.8h, v24.8h\n"
+ "ldr d29, [x13, x2]\n"
+ "smlal2 v25.4s, v28.8h, v24.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v18.4h, v31.4h\n"
+ "smlal2 v0.4s, v18.8h, v31.8h\n"
+ "ldr d18, [x5, #0x80]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v24.4h\n"
+ "smlal2 v30.4s, v16.8h, v24.8h\n"
+ "ldr d24, [x12, x2]\n"
+ "smlal v27.4s, v17.4h, v31.4h\n"
+ "smlal v1.4s, v23.4h, v31.4h\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v6.4s, v17.8h, v31.8h\n"
+ "smlal2 v25.4s, v23.8h, v31.8h\n"
+ "usubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v8.4s, v17.4h, v21.4h\n"
+ "smlal2 v0.4s, v17.8h, v21.8h\n"
+ "ldr d17, [x11, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x88]\n"
+ "smlal v27.4s, v26.4h, v21.4h\n"
+ "smlal v1.4s, v20.4h, v21.4h\n"
+ "smlal2 v6.4s, v26.8h, v21.8h\n"
+ "smlal2 v25.4s, v20.8h, v21.8h\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v8.4s, v26.4h, v3.4h\n"
+ "smlal2 v0.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x10, x2]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v21.4h\n"
+ "smlal2 v30.4s, v19.8h, v21.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "smlal v27.4s, v11.4h, v3.4h\n"
+ "smlal v1.4s, v19.4h, v3.4h\n"
+ "smlal2 v6.4s, v11.8h, v3.8h\n"
+ "smlal2 v25.4s, v19.8h, v3.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v11.4h, v2.4h\n"
+ "smlal2 v0.4s, v11.8h, v2.8h\n"
+ "ldr d11, [x9, x2]\n"
+ "usubl v21.8h, v21.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v3.4h\n"
+ "smlal2 v30.4s, v12.8h, v3.8h\n"
+ "ldr d3, [x5, #0x98]\n"
+ "smlal v27.4s, v28.4h, v2.4h\n"
+ "smlal v1.4s, v12.4h, v2.4h\n"
+ "smlal2 v6.4s, v28.8h, v2.8h\n"
+ "smlal2 v25.4s, v12.8h, v2.8h\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v8.4s, v28.4h, v4.4h\n"
+ "smlal2 v0.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x28, x2]\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v2.4h\n"
+ "smlal2 v30.4s, v7.8h, v2.8h\n"
+ "ldr d2, [x5, #0xa0]\n"
+ "smlal v27.4s, v16.4h, v4.4h\n"
+ "smlal v1.4s, v7.4h, v4.4h\n"
+ "smlal2 v6.4s, v16.8h, v4.8h\n"
+ "ldr d16, [x27, x2]\n"
+ "smlal2 v25.4s, v7.8h, v4.8h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal v8.4s, v23.4h, v22.4h\n"
+ "smlal2 v0.4s, v23.8h, v22.8h\n"
+ "ldr d23, [x5, #0xa8]\n"
+ "usubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d4, [x26, x2]\n"
+ "smlal v27.4s, v20.4h, v22.4h\n"
+ "smlal v1.4s, v24.4h, v22.4h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "smlal2 v6.4s, v20.8h, v22.8h\n"
+ "smlal2 v25.4s, v24.8h, v22.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "ldr d20, [x25, x2]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v22.4h\n"
+ "smlal2 v30.4s, v17.8h, v22.8h\n"
+ "ldr d22, [x5, #0xb0]\n"
+ "smlal v27.4s, v19.4h, v18.4h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v6.4s, v19.8h, v18.8h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "smlal v8.4s, v19.4h, v31.4h\n"
+ "smlal2 v0.4s, v19.8h, v31.8h\n"
+ "ldr d19, [x24, x2]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v18.4h\n"
+ "smlal2 v30.4s, v26.8h, v18.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "smlal v27.4s, v12.4h, v31.4h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v6.4s, v12.8h, v31.8h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v21.4h\n"
+ "smlal2 v0.4s, v12.8h, v21.8h\n"
+ "ldr d12, [x23, x2]\n"
+ "usubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v31.4h\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d31, [x5, #0xc0]\n"
+ "add x5, x5, #0xc8\n"
+ "smlal v27.4s, v7.4h, v21.4h\n"
+ "smlal v1.4s, v11.4h, v21.4h\n"
+ "smlal2 v6.4s, v7.8h, v21.8h\n"
+ "smlal2 v25.4s, v11.8h, v21.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v3.4h\n"
+ "smlal2 v0.4s, v7.8h, v3.8h\n"
+ "ldr d7, [x22, x2]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v21.4h\n"
+ "smlal2 v30.4s, v28.8h, v21.8h\n"
+ "ldr d21, [x21, x2]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v27.4s, v29.4h, v3.4h\n"
+ "smlal v1.4s, v28.4h, v3.4h\n"
+ "smlal2 v6.4s, v29.8h, v3.8h\n"
+ "ldr q29, [x6, #0x0]\n"
+ "smlal2 v25.4s, v28.8h, v3.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v2.4h\n"
+ "smlal2 v0.4s, v24.8h, v2.8h\n"
+ "ldr q24, [x7, #0x0]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "smlal v5.4s, v16.4h, v3.4h\n"
+ "smlal2 v30.4s, v16.8h, v3.8h\n"
+ "ldr q3, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v27.4s, v17.4h, v2.4h\n"
+ "smlal v1.4s, v4.4h, v2.4h\n"
+ "smlal2 v6.4s, v17.8h, v2.8h\n"
+ "smlal2 v25.4s, v4.8h, v2.8h\n"
+ "ldr q4, [x7, #0x10]\n"
"add x7, x7, #0x20\n"
- "smlal v24.4s, v1.4h, v27.4h\n"
- "smlal v23.4s, v16.4h, v27.4h\n"
- "smlal2 v15.4s, v14.8h, v30.8h\n"
- "smlal v7.4s, v25.4h, v31.4h\n"
- "smlal2 v5.4s, v14.8h, v27.8h\n"
- "ldr q14, [x8, #0x10]\n"
- "smlal2 v22.4s, v1.8h, v27.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v19.4s, v16.8h, v27.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal v24.4s, v16.4h, v30.4h\n"
- "smlal v23.4s, v4.4h, v30.4h\n"
- "smlal2 v15.4s, v25.8h, v31.8h\n"
- "smlal v7.4s, v10.4h, v29.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal2 v22.4s, v16.8h, v30.8h\n"
- "smlal2 v19.4s, v4.8h, v30.8h\n"
- "smlal v20.4s, v10.4h, v31.4h\n"
- "smlal v24.4s, v4.4h, v31.4h\n"
- "smlal v23.4s, v17.4h, v31.4h\n"
- "smlal2 v15.4s, v10.8h, v29.8h\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "sqrdmulh v7.4s, v7.4s, v8.4s\n"
- "smlal2 v5.4s, v10.8h, v31.8h\n"
- "smlal2 v22.4s, v4.8h, v31.8h\n"
- "and v27.16b, v7.16b, v28.16b\n"
- "smlal2 v19.4s, v17.8h, v31.8h\n"
- "smlal v20.4s, v9.4h, v29.4h\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "smlal v24.4s, v17.4h, v29.4h\n"
- "smlal v23.4s, v6.4h, v29.4h\n"
- "sqadd v7.4s, v7.4s, v27.4s\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal2 v5.4s, v9.8h, v29.8h\n"
- "sqrdmulh v15.4s, v15.4s, v2.4s\n"
- "smlal2 v22.4s, v17.8h, v29.8h\n"
- "smlal2 v19.4s, v6.8h, v29.8h\n"
- "and v9.16b, v15.16b, v14.16b\n"
- "smlal v20.4s, v12.4h, v21.4h\n"
- "smlal v24.4s, v6.4h, v21.4h\n"
- "sqrdmulh v20.4s, v20.4s, v8.4s\n"
- "smlal v23.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v12.8h, v21.8h\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- "smlal2 v22.4s, v6.8h, v21.8h\n"
- "smlal2 v19.4s, v3.8h, v21.8h\n"
- "sqrdmulh v23.4s, v23.4s, v8.4s\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "and v25.16b, v20.16b, v28.16b\n"
- "sqrdmulh v5.4s, v5.4s, v2.4s\n"
- "and v10.16b, v24.16b, v28.16b\n"
- "sqrdmulh v22.4s, v22.4s, v2.4s\n"
- "and v21.16b, v23.16b, v28.16b\n"
- "sqrdmulh v19.4s, v19.4s, v2.4s\n"
- "sqadd v15.4s, v15.4s, v9.4s\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "and v9.16b, v5.16b, v14.16b\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "and v12.16b, v22.16b, v14.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v17.16b, v19.16b, v14.16b\n"
- "sqadd v20.4s, v20.4s, v25.4s\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v10.4s\n"
+ "smlal v8.4s, v17.4h, v23.4h\n"
+ "smlal2 v0.4s, v17.8h, v23.8h\n"
+ "smlal v5.4s, v20.4h, v2.4h\n"
+ "smlal2 v30.4s, v20.8h, v2.8h\n"
+ "smlal v27.4s, v26.4h, v23.4h\n"
+ "smlal v1.4s, v20.4h, v23.4h\n"
+ "smlal2 v6.4s, v26.8h, v23.8h\n"
+ "smlal2 v25.4s, v20.8h, v23.8h\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal2 v0.4s, v26.8h, v22.8h\n"
+ "smlal v5.4s, v19.4h, v23.4h\n"
+ "smlal2 v30.4s, v19.8h, v23.8h\n"
+ "smlal v27.4s, v11.4h, v22.4h\n"
+ "smlal v1.4s, v19.4h, v22.4h\n"
+ "smlal2 v6.4s, v11.8h, v22.8h\n"
+ "smlal2 v25.4s, v19.8h, v22.8h\n"
+ "smlal v8.4s, v11.4h, v18.4h\n"
+ "smlal2 v0.4s, v11.8h, v18.8h\n"
+ "smlal v5.4s, v12.4h, v22.4h\n"
+ "smlal2 v30.4s, v12.8h, v22.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal v1.4s, v12.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal2 v25.4s, v12.8h, v18.8h\n"
+ "smlal v8.4s, v28.4h, v31.4h\n"
+ "smlal2 v0.4s, v28.8h, v31.8h\n"
+ "smlal v5.4s, v7.4h, v18.4h\n"
+ "smlal2 v30.4s, v7.8h, v18.8h\n"
+ "smlal v27.4s, v16.4h, v31.4h\n"
+ "smlal v1.4s, v7.4h, v31.4h\n"
+ "smlal2 v6.4s, v16.8h, v31.8h\n"
+ "smlal2 v25.4s, v7.8h, v31.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v29.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v3.4s\n"
+ "smlal v5.4s, v21.4h, v31.4h\n"
+ "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "and v17.16b, v8.16b, v24.16b\n"
+ "sqrdmulh v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v29.4s\n"
+ "and v12.16b, v0.16b, v4.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v3.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v3.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v5.4s, v5.4s, v29.4s\n"
"sshr v12.4s, v12.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
+ "and v21.16b, v27.16b, v24.16b\n"
+ "and v16.16b, v1.16b, v24.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v3.4s\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v28.16b, v5.16b, v24.16b\n"
+ "sqadd v0.4s, v0.4s, v12.4s\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v18.16b, v6.16b, v4.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v17.16b, v25.16b, v4.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v3.16b, v30.16b, v4.16b\n"
+ "sqadd v27.4s, v27.4s, v21.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v28.4s\n"
- "srshl v20.4s, v20.4s, v28.4s\n"
- "sqadd v5.4s, v5.4s, v9.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqadd v22.4s, v22.4s, v12.4s\n"
- "srshl v23.4s, v23.4s, v28.4s\n"
- "sqadd v19.4s, v19.4s, v17.4s\n"
- "srshl v15.4s, v15.4s, v14.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v14.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v14.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v14.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str d7, [x17, x4]\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d20, [x16, x4]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x15, x4]\n"
- "str d23, [x14, x4]\n"
- "ldr q7, [x20, #0x0]\n"
- "ldr q15, [x20, #0x10]\n"
+ "sqadd v5.4s, v5.4s, v28.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v24.4s\n"
+ "srshl v27.4s, v27.4s, v24.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v24.4s\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "srshl v5.4s, v5.4s, v24.4s\n"
+ "sqadd v30.4s, v30.4s, v3.4s\n"
+ "srshl v0.4s, v0.4s, v4.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v4.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v4.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "str d8, [x8, x3]\n"
+ "str d27, [x17, x3]\n"
+ "str d1, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "add x3, x3, #0x8\n"
+ "ldr q8, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "add x4, x4, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldr d12, [x6, #0x20]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "usubl v6.8h, v6.8b, v13.8b\n"
- "usubl v14.8h, v14.8b, v13.8b\n"
- "ldr d31, [x9, x3]\n"
- "ldr d17, [x28, x3]\n"
- "usubl v10.8h, v10.8b, v13.8b\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "ldr d30, [x27, x3]\n"
- "ldr d16, [x26, x3]\n"
- "usubl v12.8h, v12.8b, v13.8b\n"
- "usubl v31.8h, v31.8b, v18.8b\n"
- "ldr d3, [x25, x3]\n"
- "ldr d4, [x24, x3]\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "ldr d25, [x23, x3]\n"
- "ldr d9, [x22, x3]\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "ldr d29, [x21, x3]\n"
- "ldr d28, [x20, x3]\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "ldr d23, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "usubl v12.8h, v12.8b, v9.8b\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "usubl v11.8h, v11.8b, v9.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d24, [x9, x2]\n"
+ "ldr d21, [x28, x2]\n"
+ "ldr d16, [x27, x2]\n"
+ "ldr d20, [x26, x2]\n"
+ "ldr d7, [x25, x2]\n"
+ "ldr d19, [x24, x2]\n"
+ "ldr d28, [x23, x2]\n"
+ "ldr d26, [x22, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr d29, [x21, x2]\n"
+ "ldr d18, [x20, x2]\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr d27, [x6, #0x28]\n"
- "ldr d1, [x6, #0x30]\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "ldr d2, [x6, #0x38]\n"
- "ldr d31, [x6, #0x40]\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "ldr d8, [x6, #0x48]\n"
- "ldr x22, [x5, #0x50]\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "ldr x20, [x5, #0x58]\n"
- "ldr x21, [x5, #0x60]\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "usubl v27.8h, v27.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
- "ldr x22, [x5, #0x70]\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "ldr d14, [x20, x3]\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal v23.4s, v17.4h, v10.4h\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
- "ldr x21, [x5, #0x78]\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "usubl v31.8h, v31.8b, v13.8b\n"
- "ldr x20, [x5, #0x80]\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "ldr d25, [x22, x3]\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v10.8h\n"
- "ldr d10, [x21, x3]\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal v24.4s, v17.4h, v21.4h\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "usubl v8.8h, v8.8b, v13.8b\n"
- "ldr x24, [x5, #0x88]\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "smlal v7.4s, v30.4h, v27.4h\n"
- "ldr x23, [x5, #0x90]\n"
- "ldr x22, [x5, #0x98]\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "ldr d9, [x20, x3]\n"
- "smlal2 v22.4s, v17.8h, v21.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "ldr d21, [x6, #0x50]\n"
- "smlal v20.4s, v3.4h, v12.4h\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "ldr x21, [x5, #0xa0]\n"
- "ldr x20, [x5, #0xa8]\n"
- "smlal2 v15.4s, v30.8h, v27.8h\n"
- "ldr d30, [x24, x3]\n"
- "smlal v7.4s, v16.4h, v1.4h\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v12.8h\n"
- "ldr d3, [x6, #0x58]\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "ldr d12, [x23, x3]\n"
- "smlal v20.4s, v16.4h, v27.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal v24.4s, v28.4h, v27.4h\n"
- "smlal v23.4s, v14.4h, v27.4h\n"
- "ldr x13, [x5, #0xb0]\n"
- "ldr x12, [x5, #0xb8]\n"
- "smlal2 v15.4s, v16.8h, v1.8h\n"
- "smlal v7.4s, v4.4h, v2.4h\n"
- "ldr x11, [x5, #0xc0]\n"
- "ldr x10, [x5, #0xc8]\n"
- "smlal2 v5.4s, v16.8h, v27.8h\n"
- "ldr d16, [x22, x3]\n"
- "smlal2 v22.4s, v28.8h, v27.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v27.8h\n"
- "ldr d27, [x6, #0x60]\n"
- "smlal v20.4s, v4.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v1.4h\n"
- "smlal v23.4s, v25.4h, v1.4h\n"
- "ldr x9, [x5, #0xd0]\n"
- "ldr x28, [x5, #0xd8]\n"
- "smlal2 v15.4s, v4.8h, v2.8h\n"
- "smlal v7.4s, v17.4h, v31.4h\n"
- "ldr x27, [x5, #0xe0]\n"
- "ldr x26, [x5, #0xe8]\n"
- "smlal2 v5.4s, v4.8h, v1.8h\n"
- "ldr d4, [x21, x3]\n"
- "smlal2 v22.4s, v14.8h, v1.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "ldr d1, [x6, #0x68]\n"
- "smlal v20.4s, v17.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v2.4h\n"
- "smlal v23.4s, v10.4h, v2.4h\n"
- "ldr x25, [x5, #0xf0]\n"
- "ldr x24, [x5, #0xf8]\n"
- "smlal2 v15.4s, v17.8h, v31.8h\n"
- "smlal v7.4s, v6.4h, v8.4h\n"
- "ldr x23, [x5, #0x100]\n"
- "ldr x22, [x5, #0x108]\n"
- "smlal2 v5.4s, v17.8h, v2.8h\n"
- "ldr d17, [x20, x3]\n"
- "smlal2 v22.4s, v25.8h, v2.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v2.8h\n"
- "ldr d2, [x6, #0x70]\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v31.4h\n"
- "smlal v23.4s, v9.4h, v31.4h\n"
- "ldr x21, [x5, #0x110]\n"
- "ldr x20, [x5, #0x118]\n"
- "smlal2 v15.4s, v6.8h, v8.8h\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
+ "ldr d4, [x5, #0x28]\n"
+ "ldr d3, [x5, #0x30]\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "ldr d22, [x5, #0x38]\n"
+ "ldr d2, [x5, #0x40]\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "ldr d24, [x5, #0x48]\n"
+ "ldr x21, [x4, #0x50]\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x50]\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "ldr d21, [x5, #0x58]\n"
+ "ldr x28, [x4, #0x60]\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "usubl v4.8h, v4.8b, v9.8b\n"
+ "ldr x27, [x4, #0x68]\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "ldr x26, [x4, #0x70]\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "ldr d12, [x21, x2]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x20, x2]\n"
+ "ldr x25, [x4, #0x78]\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "usubl v2.8h, v2.8b, v9.8b\n"
+ "ldr x24, [x4, #0x80]\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "ldr x23, [x4, #0x88]\n"
+ "ldr x22, [x4, #0x90]\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
+ "ldr x21, [x4, #0x98]\n"
+ "ldr x20, [x4, #0xa0]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x28, x2]\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v17.4h\n"
+ "smlal2 v30.4s, v12.8h, v17.8h\n"
+ "ldr d17, [x27, x2]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal v1.4s, v12.4h, v11.4h\n"
+ "usubl v21.8h, v21.8b, v9.8b\n"
+ "ldr x14, [x4, #0xa8]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal2 v25.4s, v12.8h, v11.8h\n"
+ "ldr x13, [x4, #0xb0]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x26, x2]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v7.4h, v11.4h\n"
+ "smlal2 v30.4s, v7.8h, v11.8h\n"
+ "ldr d11, [x25, x2]\n"
+ "ldr x12, [x4, #0xb8]\n"
+ "smlal v27.4s, v28.4h, v23.4h\n"
+ "smlal v1.4s, v7.4h, v23.4h\n"
+ "ldr x11, [x4, #0xc0]\n"
+ "ldr x10, [x4, #0xc8]\n"
+ "smlal2 v6.4s, v28.8h, v23.8h\n"
+ "ldr d28, [x24, x2]\n"
+ "smlal2 v25.4s, v7.8h, v23.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v16.4h, v4.4h\n"
+ "smlal2 v0.4s, v16.8h, v4.8h\n"
+ "ldr d16, [x23, x2]\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "ldr d23, [x22, x2]\n"
+ "ldr x9, [x4, #0xd0]\n"
+ "smlal v27.4s, v20.4h, v4.4h\n"
+ "smlal v1.4s, v18.4h, v4.4h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x28, [x4, #0xd8]\n"
+ "smlal2 v6.4s, v20.8h, v4.8h\n"
+ "smlal2 v25.4s, v18.8h, v4.8h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x27, [x4, #0xe0]\n"
+ "smlal v8.4s, v20.4h, v3.4h\n"
+ "smlal2 v0.4s, v20.8h, v3.8h\n"
+ "ldr d20, [x21, x2]\n"
+ "usubl v23.8h, v23.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v4.4h\n"
+ "smlal2 v30.4s, v17.8h, v4.8h\n"
+ "ldr d4, [x5, #0x60]\n"
+ "ldr x26, [x4, #0xe8]\n"
+ "smlal v27.4s, v19.4h, v3.4h\n"
+ "smlal v1.4s, v17.4h, v3.4h\n"
+ "ldr x25, [x4, #0xf0]\n"
+ "ldr x24, [x4, #0xf8]\n"
+ "smlal2 v6.4s, v19.8h, v3.8h\n"
+ "smlal2 v25.4s, v17.8h, v3.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x23, [x4, #0x100]\n"
+ "smlal v8.4s, v19.4h, v22.4h\n"
+ "smlal2 v0.4s, v19.8h, v22.8h\n"
+ "ldr d19, [x20, x2]\n"
+ "usubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v3.4h\n"
+ "smlal2 v30.4s, v26.8h, v3.8h\n"
+ "ldr d3, [x5, #0x68]\n"
+ "ldr x22, [x4, #0x108]\n"
+ "smlal v27.4s, v12.4h, v22.4h\n"
+ "smlal v1.4s, v26.4h, v22.4h\n"
+ "ldr x21, [x4, #0x110]\n"
+ "ldr x20, [x4, #0x118]\n"
+ "smlal2 v6.4s, v12.8h, v22.8h\n"
+ "smlal2 v25.4s, v26.8h, v22.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
"tst x1, #0x7\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "ldr d6, [x13, x3]\n"
- "smlal2 v22.4s, v10.8h, v31.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v31.8h\n"
- "ldr d31, [x6, #0x78]\n"
- "smlal v20.4s, v29.4h, v8.4h\n"
- "usubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v9.4h, v8.4h\n"
- "smlal v23.4s, v30.4h, v8.4h\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "ldr d28, [x12, x3]\n"
- "smlal v7.4s, v14.4h, v3.4h\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v5.4s, v29.8h, v8.8h\n"
- "ldr d29, [x6, #0x80]\n"
- "smlal2 v22.4s, v9.8h, v8.8h\n"
- "usubl v29.8h, v29.8b, v13.8b\n"
- "smlal2 v19.4s, v30.8h, v8.8h\n"
- "ldr d8, [x11, x3]\n"
- "smlal v20.4s, v14.4h, v21.4h\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "smlal v24.4s, v12.4h, v21.4h\n"
- "smlal v23.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v14.8h, v3.8h\n"
- "smlal v7.4s, v25.4h, v27.4h\n"
- "smlal2 v5.4s, v14.8h, v21.8h\n"
- "ldr d14, [x10, x3]\n"
- "smlal2 v22.4s, v12.8h, v21.8h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v21.8h\n"
- "ldr d21, [x6, #0x88]\n"
- "smlal v20.4s, v25.4h, v3.4h\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v16.4h, v3.4h\n"
- "smlal v23.4s, v4.4h, v3.4h\n"
- "smlal2 v15.4s, v25.8h, v27.8h\n"
- "smlal v7.4s, v10.4h, v1.4h\n"
- "smlal2 v5.4s, v25.8h, v3.8h\n"
- "ldr d25, [x9, x3]\n"
- "smlal2 v22.4s, v16.8h, v3.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v3.8h\n"
- "ldr d3, [x6, #0x90]\n"
- "smlal v20.4s, v10.4h, v27.4h\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
- "smlal v24.4s, v4.4h, v27.4h\n"
- "smlal v23.4s, v17.4h, v27.4h\n"
- "smlal2 v15.4s, v10.8h, v1.8h\n"
- "smlal v7.4s, v9.4h, v2.4h\n"
- "smlal2 v5.4s, v10.8h, v27.8h\n"
- "ldr d10, [x28, x3]\n"
- "smlal2 v22.4s, v4.8h, v27.8h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v27.8h\n"
- "ldr d27, [x6, #0x98]\n"
- "smlal v20.4s, v9.4h, v1.4h\n"
- "usubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v17.4h, v1.4h\n"
- "smlal v23.4s, v6.4h, v1.4h\n"
- "smlal2 v15.4s, v9.8h, v2.8h\n"
- "smlal v7.4s, v12.4h, v31.4h\n"
- "smlal2 v5.4s, v9.8h, v1.8h\n"
- "ldr d9, [x27, x3]\n"
- "smlal2 v22.4s, v17.8h, v1.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v1.8h\n"
- "ldr d1, [x6, #0xa0]\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v2.4h\n"
- "smlal v23.4s, v28.4h, v2.4h\n"
- "smlal2 v15.4s, v12.8h, v31.8h\n"
- "ldr d12, [x26, x3]\n"
- "smlal v7.4s, v16.4h, v29.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal2 v5.4s, v30.8h, v2.8h\n"
- "ldr d30, [x6, #0xa8]\n"
- "smlal2 v22.4s, v6.8h, v2.8h\n"
- "usubl v30.8h, v30.8b, v13.8b\n"
- "smlal2 v19.4s, v28.8h, v2.8h\n"
- "ldr d2, [x25, x3]\n"
- "smlal v20.4s, v16.4h, v31.4h\n"
- "usubl v2.8h, v2.8b, v18.8b\n"
- "smlal v24.4s, v8.4h, v31.4h\n"
- "smlal v23.4s, v14.4h, v31.4h\n"
- "smlal2 v15.4s, v16.8h, v29.8h\n"
- "smlal v7.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v16.8h, v31.8h\n"
- "ldr d16, [x24, x3]\n"
- "smlal2 v22.4s, v8.8h, v31.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v31.8h\n"
- "ldr d31, [x6, #0xb0]\n"
- "smlal v20.4s, v4.4h, v29.4h\n"
- "usubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v29.4h\n"
- "smlal v23.4s, v25.4h, v29.4h\n"
- "smlal2 v15.4s, v4.8h, v21.8h\n"
- "smlal v7.4s, v17.4h, v3.4h\n"
- "smlal2 v5.4s, v4.8h, v29.8h\n"
- "ldr d4, [x23, x3]\n"
- "smlal2 v22.4s, v14.8h, v29.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v29.8h\n"
- "ldr d29, [x6, #0xb8]\n"
- "smlal v20.4s, v17.4h, v21.4h\n"
- "usubl v29.8h, v29.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v15.4s, v17.8h, v3.8h\n"
- "smlal v7.4s, v6.4h, v27.4h\n"
- "smlal2 v5.4s, v17.8h, v21.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "ldr d21, [x6, #0xc0]\n"
- "smlal v20.4s, v6.4h, v3.4h\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v3.4h\n"
- "smlal v23.4s, v9.4h, v3.4h\n"
- "smlal2 v15.4s, v6.8h, v27.8h\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "smlal2 v5.4s, v6.8h, v3.8h\n"
- "ldr d6, [x21, x3]\n"
- "smlal2 v22.4s, v10.8h, v3.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v3.8h\n"
- "ldr d3, [x20, x3]\n"
- "smlal v20.4s, v28.4h, v27.4h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal v24.4s, v9.4h, v27.4h\n"
- "smlal v23.4s, v12.4h, v27.4h\n"
- "add x3, x3, #0x8\n"
- "smlal2 v15.4s, v8.8h, v1.8h\n"
- "ldr q8, [x7, #0x0]\n"
- "smlal v7.4s, v14.4h, v30.4h\n"
- "smlal2 v5.4s, v28.8h, v27.8h\n"
- "ldr q28, [x8, #0x0]\n"
- "smlal2 v22.4s, v9.8h, v27.8h\n"
- "smlal2 v19.4s, v12.8h, v27.8h\n"
- "ldr q27, [x7, #0x10]\n"
- "smlal v20.4s, v14.4h, v1.4h\n"
+ "smlal v8.4s, v12.4h, v2.4h\n"
+ "smlal2 v0.4s, v12.8h, v2.8h\n"
+ "ldr d12, [x14, x2]\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v22.4h\n"
+ "smlal2 v30.4s, v11.8h, v22.8h\n"
+ "ldr d22, [x5, #0x70]\n"
+ "smlal v27.4s, v7.4h, v2.4h\n"
+ "smlal v1.4s, v11.4h, v2.4h\n"
+ "smlal2 v6.4s, v7.8h, v2.8h\n"
+ "smlal2 v25.4s, v11.8h, v2.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v24.4h\n"
+ "smlal2 v0.4s, v7.8h, v24.8h\n"
+ "ldr d7, [x13, x2]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v2.4h\n"
+ "smlal2 v30.4s, v28.8h, v2.8h\n"
+ "ldr d2, [x5, #0x78]\n"
+ "smlal v27.4s, v29.4h, v24.4h\n"
+ "smlal v1.4s, v28.4h, v24.4h\n"
+ "smlal2 v6.4s, v29.8h, v24.8h\n"
+ "ldr d29, [x12, x2]\n"
+ "smlal2 v25.4s, v28.8h, v24.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v18.4h, v31.4h\n"
+ "smlal2 v0.4s, v18.8h, v31.8h\n"
+ "ldr d18, [x5, #0x80]\n"
+ "usubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v24.4h\n"
+ "smlal2 v30.4s, v16.8h, v24.8h\n"
+ "ldr d24, [x11, x2]\n"
+ "smlal v27.4s, v17.4h, v31.4h\n"
+ "smlal v1.4s, v23.4h, v31.4h\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v6.4s, v17.8h, v31.8h\n"
+ "smlal2 v25.4s, v23.8h, v31.8h\n"
+ "usubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v8.4s, v17.4h, v21.4h\n"
+ "smlal2 v0.4s, v17.8h, v21.8h\n"
+ "ldr d17, [x10, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x88]\n"
+ "smlal v27.4s, v26.4h, v21.4h\n"
+ "smlal v1.4s, v20.4h, v21.4h\n"
+ "smlal2 v6.4s, v26.8h, v21.8h\n"
+ "smlal2 v25.4s, v20.8h, v21.8h\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v8.4s, v26.4h, v4.4h\n"
+ "smlal2 v0.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x9, x2]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v21.4h\n"
+ "smlal2 v30.4s, v19.8h, v21.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "smlal v27.4s, v11.4h, v4.4h\n"
+ "smlal v1.4s, v19.4h, v4.4h\n"
+ "smlal2 v6.4s, v11.8h, v4.8h\n"
+ "smlal2 v25.4s, v19.8h, v4.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v11.4h, v3.4h\n"
+ "smlal2 v0.4s, v11.8h, v3.8h\n"
+ "ldr d11, [x28, x2]\n"
+ "usubl v21.8h, v21.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v4.4h\n"
+ "smlal2 v30.4s, v12.8h, v4.8h\n"
+ "ldr d4, [x5, #0x98]\n"
+ "smlal v27.4s, v28.4h, v3.4h\n"
+ "smlal v1.4s, v12.4h, v3.4h\n"
+ "smlal2 v6.4s, v28.8h, v3.8h\n"
+ "smlal2 v25.4s, v12.8h, v3.8h\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v8.4s, v28.4h, v22.4h\n"
+ "smlal2 v0.4s, v28.8h, v22.8h\n"
+ "ldr d28, [x27, x2]\n"
+ "usubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v3.4h\n"
+ "smlal2 v30.4s, v7.8h, v3.8h\n"
+ "ldr d3, [x5, #0xa0]\n"
+ "smlal v27.4s, v16.4h, v22.4h\n"
+ "smlal v1.4s, v7.4h, v22.4h\n"
+ "smlal2 v6.4s, v16.8h, v22.8h\n"
+ "ldr d16, [x26, x2]\n"
+ "smlal2 v25.4s, v7.8h, v22.8h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal v8.4s, v23.4h, v2.4h\n"
+ "smlal2 v0.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x5, #0xa8]\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v22.4h\n"
+ "smlal2 v30.4s, v29.8h, v22.8h\n"
+ "ldr d22, [x25, x2]\n"
+ "smlal v27.4s, v20.4h, v2.4h\n"
+ "smlal v1.4s, v24.4h, v2.4h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "smlal2 v6.4s, v20.8h, v2.8h\n"
+ "smlal2 v25.4s, v24.8h, v2.8h\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "ldr d20, [x24, x2]\n"
+ "usubl v22.8h, v22.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v2.4h\n"
+ "smlal2 v30.4s, v17.8h, v2.8h\n"
+ "ldr d2, [x5, #0xb0]\n"
+ "smlal v27.4s, v19.4h, v18.4h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v6.4s, v19.8h, v18.8h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "smlal v8.4s, v19.4h, v31.4h\n"
+ "smlal2 v0.4s, v19.8h, v31.8h\n"
+ "ldr d19, [x23, x2]\n"
+ "usubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v18.4h\n"
+ "smlal2 v30.4s, v26.8h, v18.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "smlal v27.4s, v12.4h, v31.4h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v6.4s, v12.8h, v31.8h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v21.4h\n"
+ "smlal2 v0.4s, v12.8h, v21.8h\n"
+ "ldr d12, [x22, x2]\n"
+ "usubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v31.4h\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d31, [x5, #0xc0]\n"
+ "smlal v27.4s, v7.4h, v21.4h\n"
+ "smlal v1.4s, v11.4h, v21.4h\n"
+ "smlal2 v6.4s, v7.8h, v21.8h\n"
+ "smlal2 v25.4s, v11.8h, v21.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v4.4h\n"
+ "smlal2 v0.4s, v7.8h, v4.8h\n"
+ "ldr d7, [x21, x2]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v21.4h\n"
+ "smlal2 v30.4s, v28.8h, v21.8h\n"
+ "ldr d21, [x20, x2]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v27.4s, v29.4h, v4.4h\n"
+ "smlal v1.4s, v28.4h, v4.4h\n"
+ "smlal2 v6.4s, v29.8h, v4.8h\n"
+ "ldr q29, [x6, #0x0]\n"
+ "smlal2 v25.4s, v28.8h, v4.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v3.4h\n"
+ "smlal2 v0.4s, v24.8h, v3.8h\n"
+ "ldr q24, [x7, #0x0]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "smlal v5.4s, v16.4h, v4.4h\n"
+ "smlal2 v30.4s, v16.8h, v4.8h\n"
+ "ldr q4, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v27.4s, v17.4h, v3.4h\n"
+ "smlal v1.4s, v22.4h, v3.4h\n"
+ "smlal2 v6.4s, v17.8h, v3.8h\n"
+ "smlal2 v25.4s, v22.8h, v3.8h\n"
+ "ldr q22, [x7, #0x10]\n"
"add x7, x7, #0x20\n"
- "smlal v24.4s, v2.4h, v1.4h\n"
- "smlal v23.4s, v16.4h, v1.4h\n"
- "smlal2 v15.4s, v14.8h, v30.8h\n"
- "smlal v7.4s, v25.4h, v31.4h\n"
- "smlal2 v5.4s, v14.8h, v1.8h\n"
- "ldr q14, [x8, #0x10]\n"
- "smlal2 v22.4s, v2.8h, v1.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v19.4s, v16.8h, v1.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal v24.4s, v16.4h, v30.4h\n"
- "smlal v23.4s, v4.4h, v30.4h\n"
- "smlal2 v15.4s, v25.8h, v31.8h\n"
- "smlal v7.4s, v10.4h, v29.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal2 v22.4s, v16.8h, v30.8h\n"
- "smlal2 v19.4s, v4.8h, v30.8h\n"
- "smlal v20.4s, v10.4h, v31.4h\n"
- "smlal v24.4s, v4.4h, v31.4h\n"
- "smlal v23.4s, v17.4h, v31.4h\n"
- "smlal2 v15.4s, v10.8h, v29.8h\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "sqrdmulh v7.4s, v7.4s, v8.4s\n"
- "smlal2 v5.4s, v10.8h, v31.8h\n"
- "smlal2 v22.4s, v4.8h, v31.8h\n"
- "and v4.16b, v7.16b, v28.16b\n"
- "smlal2 v19.4s, v17.8h, v31.8h\n"
- "smlal v20.4s, v9.4h, v29.4h\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "smlal v24.4s, v17.4h, v29.4h\n"
- "smlal v23.4s, v6.4h, v29.4h\n"
- "sqadd v7.4s, v7.4s, v4.4s\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal2 v5.4s, v9.8h, v29.8h\n"
- "sqrdmulh v15.4s, v15.4s, v27.4s\n"
- "smlal2 v22.4s, v17.8h, v29.8h\n"
- "smlal2 v19.4s, v6.8h, v29.8h\n"
- "and v30.16b, v15.16b, v14.16b\n"
- "smlal v20.4s, v12.4h, v21.4h\n"
- "smlal v24.4s, v6.4h, v21.4h\n"
- "sqrdmulh v20.4s, v20.4s, v8.4s\n"
- "smlal v23.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v12.8h, v21.8h\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- "smlal2 v22.4s, v6.8h, v21.8h\n"
- "smlal2 v19.4s, v3.8h, v21.8h\n"
- "sqrdmulh v23.4s, v23.4s, v8.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v3.16b, v20.16b, v28.16b\n"
- "sqrdmulh v5.4s, v5.4s, v27.4s\n"
- "and v25.16b, v24.16b, v28.16b\n"
- "sqrdmulh v22.4s, v22.4s, v27.4s\n"
- "and v16.16b, v23.16b, v28.16b\n"
- "sqrdmulh v19.4s, v19.4s, v27.4s\n"
- "sqadd v15.4s, v15.4s, v30.4s\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "and v4.16b, v5.16b, v14.16b\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "and v10.16b, v22.16b, v14.16b\n"
+ "smlal v8.4s, v17.4h, v23.4h\n"
+ "smlal2 v0.4s, v17.8h, v23.8h\n"
+ "smlal v5.4s, v20.4h, v3.4h\n"
+ "smlal2 v30.4s, v20.8h, v3.8h\n"
+ "smlal v27.4s, v26.4h, v23.4h\n"
+ "smlal v1.4s, v20.4h, v23.4h\n"
+ "smlal2 v6.4s, v26.8h, v23.8h\n"
+ "smlal2 v25.4s, v20.8h, v23.8h\n"
+ "smlal v8.4s, v26.4h, v2.4h\n"
+ "smlal2 v0.4s, v26.8h, v2.8h\n"
+ "smlal v5.4s, v19.4h, v23.4h\n"
+ "smlal2 v30.4s, v19.8h, v23.8h\n"
+ "smlal v27.4s, v11.4h, v2.4h\n"
+ "smlal v1.4s, v19.4h, v2.4h\n"
+ "smlal2 v6.4s, v11.8h, v2.8h\n"
+ "smlal2 v25.4s, v19.8h, v2.8h\n"
+ "smlal v8.4s, v11.4h, v18.4h\n"
+ "smlal2 v0.4s, v11.8h, v18.8h\n"
+ "smlal v5.4s, v12.4h, v2.4h\n"
+ "smlal2 v30.4s, v12.8h, v2.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal v1.4s, v12.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal2 v25.4s, v12.8h, v18.8h\n"
+ "smlal v8.4s, v28.4h, v31.4h\n"
+ "smlal2 v0.4s, v28.8h, v31.8h\n"
+ "smlal v5.4s, v7.4h, v18.4h\n"
+ "smlal2 v30.4s, v7.8h, v18.8h\n"
+ "smlal v27.4s, v16.4h, v31.4h\n"
+ "smlal v1.4s, v7.4h, v31.4h\n"
+ "smlal2 v6.4s, v16.8h, v31.8h\n"
+ "smlal2 v25.4s, v7.8h, v31.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v29.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v4.4s\n"
+ "smlal v5.4s, v21.4h, v31.4h\n"
+ "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "and v17.16b, v8.16b, v24.16b\n"
+ "sqrdmulh v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v29.4s\n"
+ "and v28.16b, v0.16b, v22.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v4.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v4.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v5.4s, v5.4s, v29.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v16.16b, v27.16b, v24.16b\n"
+ "and v12.16b, v1.16b, v24.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v4.4s\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v11.16b, v5.16b, v24.16b\n"
+ "sqadd v0.4s, v0.4s, v28.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v12.16b, v19.16b, v14.16b\n"
- "sqadd v20.4s, v20.4s, v3.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v25.4s\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v16.4s\n"
+ "and v18.16b, v6.16b, v22.16b\n"
"sshr v12.4s, v12.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v28.4s\n"
- "srshl v20.4s, v20.4s, v28.4s\n"
- "sqadd v5.4s, v5.4s, v4.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v28.4s\n"
- "sqadd v19.4s, v19.4s, v12.4s\n"
- "srshl v15.4s, v15.4s, v14.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v14.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v14.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v14.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str d7, [x17, x4]\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d20, [x16, x4]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x15, x4]\n"
- "str d23, [x14, x4]\n"
- "add x4, x4, #0x8\n"
+ "and v17.16b, v25.16b, v22.16b\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "and v19.16b, v30.16b, v22.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v12.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v5.4s, v5.4s, v11.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v24.4s\n"
+ "srshl v27.4s, v27.4s, v24.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v24.4s\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "srshl v5.4s, v5.4s, v24.4s\n"
+ "sqadd v30.4s, v30.4s, v19.4s\n"
+ "srshl v0.4s, v0.4s, v22.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v22.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v22.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "str d8, [x8, x3]\n"
+ "str d27, [x17, x3]\n"
+ "str d1, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "add x3, x3, #0x8\n"
"beq 124f\n"
- "add x6, x6, #0xc8\n"
+ "add x5, x5, #0xc8\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
"tbz x1, #2, 5f\n"
- "ld1 { v7.4s }, [x20], #0x10\n"
+ "ld1 { v8.4s }, [x20], #0x10\n"
"tbz x1, #1, 4f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v0.d }[0], [x20], #0x8\n"
"tbz x1, #0, 7f\n"
- "ld1 { v15.s }[2], [x20]\n"
+ "ld1 { v0.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
"tbz x1, #0, 7f\n"
- "ld1 { v15.s }[0], [x20]\n"
+ "ld1 { v0.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
"tbz x1, #1, 6f\n"
- "ld1 { v7.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x20], #0x8\n"
"tbz x1, #0, 7f\n"
- "ld1 { v7.s }[2], [x20]\n"
+ "ld1 { v8.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 7f\n"
- "ld1 { v7.s }[0], [x20]\n"
+ "ld1 { v8.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldr d12, [x6, #0x20]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "usubl v6.8h, v6.8b, v13.8b\n"
- "usubl v14.8h, v14.8b, v13.8b\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "usubl v10.8h, v10.8b, v13.8b\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "usubl v12.8h, v12.8b, v13.8b\n"
- "add x9, x9, x3\n"
- "add x28, x28, x3\n"
- "add x27, x27, x3\n"
- "add x26, x26, x3\n"
- "add x25, x25, x3\n"
- "add x24, x24, x3\n"
- "add x23, x23, x3\n"
- "add x22, x22, x3\n"
- "add x21, x21, x3\n"
- "add x20, x20, x3\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "ldr d23, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "usubl v12.8h, v12.8b, v9.8b\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "usubl v11.8h, v11.8b, v9.8b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "usubl v23.8h, v23.8b, v9.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "add x9, x9, x2\n"
+ "add x28, x28, x2\n"
+ "add x27, x27, x2\n"
+ "add x26, x26, x2\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "add x25, x25, x2\n"
+ "add x24, x24, x2\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "add x23, x23, x2\n"
+ "add x22, x22, x2\n"
+ "add x21, x21, x2\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 9f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "ld1 { v17.s }[0], [x28], #0x4\n"
- "ld1 { v30.s }[0], [x27], #0x4\n"
- "ld1 { v16.s }[0], [x26], #0x4\n"
- "ld1 { v3.s }[0], [x25], #0x4\n"
- "ld1 { v4.s }[0], [x24], #0x4\n"
- "ld1 { v25.s }[0], [x23], #0x4\n"
- "ld1 { v9.s }[0], [x22], #0x4\n"
+ "ld1 { v24.s }[0], [x9], #0x4\n"
+ "ld1 { v21.s }[0], [x28], #0x4\n"
+ "ld1 { v16.s }[0], [x27], #0x4\n"
+ "ld1 { v20.s }[0], [x26], #0x4\n"
+ "ld1 { v7.s }[0], [x25], #0x4\n"
+ "ld1 { v19.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v26.s }[0], [x22], #0x4\n"
"ld1 { v29.s }[0], [x21], #0x4\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
"tbz x1, #1, 8f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "ld1 { v17.h }[2], [x28], #0x2\n"
- "ld1 { v30.h }[2], [x27], #0x2\n"
- "ld1 { v16.h }[2], [x26], #0x2\n"
- "ld1 { v3.h }[2], [x25], #0x2\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v25.h }[2], [x23], #0x2\n"
- "ld1 { v9.h }[2], [x22], #0x2\n"
+ "ld1 { v24.h }[2], [x9], #0x2\n"
+ "ld1 { v21.h }[2], [x28], #0x2\n"
+ "ld1 { v16.h }[2], [x27], #0x2\n"
+ "ld1 { v20.h }[2], [x26], #0x2\n"
+ "ld1 { v7.h }[2], [x25], #0x2\n"
+ "ld1 { v19.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v26.h }[2], [x22], #0x2\n"
"ld1 { v29.h }[2], [x21], #0x2\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[6], [x9]\n"
- "ld1 { v17.b }[6], [x28]\n"
- "ld1 { v30.b }[6], [x27]\n"
- "ld1 { v16.b }[6], [x26]\n"
- "ld1 { v3.b }[6], [x25]\n"
- "ld1 { v4.b }[6], [x24]\n"
- "ld1 { v25.b }[6], [x23]\n"
- "ld1 { v9.b }[6], [x22]\n"
+ "ld1 { v24.b }[6], [x9]\n"
+ "ld1 { v21.b }[6], [x28]\n"
+ "ld1 { v16.b }[6], [x27]\n"
+ "ld1 { v20.b }[6], [x26]\n"
+ "ld1 { v7.b }[6], [x25]\n"
+ "ld1 { v19.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v26.b }[6], [x22]\n"
"ld1 { v29.b }[6], [x21]\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[4], [x9]\n"
- "ld1 { v17.b }[4], [x28]\n"
- "ld1 { v30.b }[4], [x27]\n"
- "ld1 { v16.b }[4], [x26]\n"
- "ld1 { v3.b }[4], [x25]\n"
- "ld1 { v4.b }[4], [x24]\n"
- "ld1 { v25.b }[4], [x23]\n"
- "ld1 { v9.b }[4], [x22]\n"
+ "ld1 { v24.b }[4], [x9]\n"
+ "ld1 { v21.b }[4], [x28]\n"
+ "ld1 { v16.b }[4], [x27]\n"
+ "ld1 { v20.b }[4], [x26]\n"
+ "ld1 { v7.b }[4], [x25]\n"
+ "ld1 { v19.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v26.b }[4], [x22]\n"
"ld1 { v29.b }[4], [x21]\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
"tbz x1, #1, 10f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "ld1 { v17.h }[0], [x28], #0x2\n"
- "ld1 { v30.h }[0], [x27], #0x2\n"
- "ld1 { v16.h }[0], [x26], #0x2\n"
- "ld1 { v3.h }[0], [x25], #0x2\n"
- "ld1 { v4.h }[0], [x24], #0x2\n"
- "ld1 { v25.h }[0], [x23], #0x2\n"
- "ld1 { v9.h }[0], [x22], #0x2\n"
+ "ld1 { v24.h }[0], [x9], #0x2\n"
+ "ld1 { v21.h }[0], [x28], #0x2\n"
+ "ld1 { v16.h }[0], [x27], #0x2\n"
+ "ld1 { v20.h }[0], [x26], #0x2\n"
+ "ld1 { v7.h }[0], [x25], #0x2\n"
+ "ld1 { v19.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v26.h }[0], [x22], #0x2\n"
"ld1 { v29.h }[0], [x21], #0x2\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[2], [x9]\n"
- "ld1 { v17.b }[2], [x28]\n"
- "ld1 { v30.b }[2], [x27]\n"
- "ld1 { v16.b }[2], [x26]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x24]\n"
- "ld1 { v25.b }[2], [x23]\n"
- "ld1 { v9.b }[2], [x22]\n"
+ "ld1 { v24.b }[2], [x9]\n"
+ "ld1 { v21.b }[2], [x28]\n"
+ "ld1 { v16.b }[2], [x27]\n"
+ "ld1 { v20.b }[2], [x26]\n"
+ "ld1 { v7.b }[2], [x25]\n"
+ "ld1 { v19.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v26.b }[2], [x22]\n"
"ld1 { v29.b }[2], [x21]\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[0], [x9]\n"
- "ld1 { v17.b }[0], [x28]\n"
- "ld1 { v30.b }[0], [x27]\n"
- "ld1 { v16.b }[0], [x26]\n"
- "ld1 { v3.b }[0], [x25]\n"
- "ld1 { v4.b }[0], [x24]\n"
- "ld1 { v25.b }[0], [x23]\n"
- "ld1 { v9.b }[0], [x22]\n"
+ "ld1 { v24.b }[0], [x9]\n"
+ "ld1 { v21.b }[0], [x28]\n"
+ "ld1 { v16.b }[0], [x27]\n"
+ "ld1 { v20.b }[0], [x26]\n"
+ "ld1 { v7.b }[0], [x25]\n"
+ "ld1 { v19.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v26.b }[0], [x22]\n"
"ld1 { v29.b }[0], [x21]\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v31.8h, v31.8b, v18.8b\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "ldr x20, [x5, #0x50]\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "add x20, x20, x3\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0x50]\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "add x20, x20, x2\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 13f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v4.s }[0], [x20], #0x4\n"
"tbz x1, #1, 12f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v4.h }[2], [x20], #0x2\n"
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v4.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v4.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
"tbz x1, #1, 14f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v4.h }[0], [x20], #0x2\n"
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v4.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v4.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v27.8h, v27.8b, v18.8b\n"
- "ldr x20, [x5, #0x58]\n"
- "smlal v23.4s, v27.4h, v10.4h\n"
- "smlal2 v19.4s, v27.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "smlal v24.4s, v27.4h, v21.4h\n"
- "smlal2 v22.4s, v27.8h, v21.8h\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal v5.4s, v4.4h, v17.4h\n"
+ "smlal2 v30.4s, v4.8h, v17.8h\n"
+ "smlal v1.4s, v4.4h, v11.4h\n"
+ "smlal2 v25.4s, v4.8h, v11.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 17f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
"tbz x1, #1, 16f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[6], [x20]\n"
+ "ld1 { v21.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[4], [x20]\n"
+ "ld1 { v21.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
"tbz x1, #1, 18f\n"
- "ld1 { v6.h }[0], [x20], #0x2\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "usubl v6.8h, v6.8b, v18.8b\n"
- "ldr x20, [x5, #0x60]\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "add x20, x20, x3\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0x60]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "smlal v5.4s, v21.4h, v11.4h\n"
+ "smlal2 v30.4s, v21.8h, v11.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 21f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
"tbz x1, #1, 20f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
"tbz x1, #1, 22f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v31.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d14, [x6, #0x28]\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal v20.4s, v9.4h, v12.4h\n"
- "smlal2 v5.4s, v9.8h, v12.8h\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "usubl v14.8h, v14.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v30.4h, v14.4h\n"
- "smlal2 v15.4s, v30.8h, v14.8h\n"
- "smlal v20.4s, v16.4h, v14.4h\n"
- "smlal2 v5.4s, v16.8h, v14.8h\n"
- "smlal v24.4s, v28.4h, v14.4h\n"
- "smlal2 v22.4s, v28.8h, v14.8h\n"
+ "ldr d11, [x5, #0x28]\n"
+ "usubl v31.8h, v31.8b, v15.8b\n"
+ "smlal v1.4s, v21.4h, v23.4h\n"
+ "smlal2 v25.4s, v21.8h, v23.8h\n"
+ "ldr x20, [x4, #0x68]\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "smlal v27.4s, v31.4h, v23.4h\n"
+ "smlal2 v6.4s, v31.8h, v23.8h\n"
+ "usubl v11.8h, v11.8b, v9.8b\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v16.4h, v11.4h\n"
+ "smlal2 v0.4s, v16.8h, v11.8h\n"
+ "smlal v1.4s, v18.4h, v11.4h\n"
+ "smlal2 v25.4s, v18.8h, v11.8h\n"
+ "smlal v27.4s, v20.4h, v11.4h\n"
+ "smlal2 v6.4s, v20.8h, v11.8h\n"
"tbz x1, #2, 25f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v24.s }[0], [x20], #0x4\n"
"tbz x1, #1, 24f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v24.h }[2], [x20], #0x2\n"
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v24.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v24.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
"tbz x1, #1, 26f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v24.h }[0], [x20], #0x2\n"
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v24.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v24.b }[0], [x20]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d21, [x6, #0x30]\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0x70]\n"
- "smlal v23.4s, v25.4h, v14.4h\n"
- "smlal2 v19.4s, v25.8h, v14.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v16.8h, v21.8h\n"
- "smlal v20.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v4.8h, v21.8h\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
+ "ldr d3, [x5, #0x30]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "ldr x20, [x4, #0x70]\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v24.4h, v11.4h\n"
+ "smlal2 v30.4s, v24.8h, v11.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v3.4h\n"
+ "smlal2 v0.4s, v20.8h, v3.8h\n"
+ "smlal v27.4s, v19.4h, v3.4h\n"
+ "smlal2 v6.4s, v19.8h, v3.8h\n"
+ "smlal v1.4s, v24.4h, v3.4h\n"
+ "smlal2 v25.4s, v24.8h, v3.8h\n"
"tbz x1, #2, 29f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v2.s }[0], [x20], #0x4\n"
"tbz x1, #1, 28f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ld1 { v2.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "ld1 { v2.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
"tbz x1, #1, 30f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v2.h }[0], [x20], #0x2\n"
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "ld1 { v2.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "ld1 { v2.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d9, [x6, #0x38]\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "usubl v9.8h, v9.8b, v13.8b\n"
- "ldr x20, [x5, #0x78]\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v4.4h, v9.4h\n"
- "smlal2 v15.4s, v4.8h, v9.8h\n"
- "smlal v20.4s, v27.4h, v9.4h\n"
- "smlal2 v5.4s, v27.8h, v9.8h\n"
- "smlal v24.4s, v10.4h, v9.4h\n"
- "smlal2 v22.4s, v10.8h, v9.8h\n"
+ "ldr d22, [x5, #0x38]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldr x20, [x4, #0x78]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v2.4h, v3.4h\n"
+ "smlal2 v30.4s, v2.8h, v3.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v19.4h, v22.4h\n"
+ "smlal2 v0.4s, v19.8h, v22.8h\n"
+ "smlal v27.4s, v4.4h, v22.4h\n"
+ "smlal2 v6.4s, v4.8h, v22.8h\n"
+ "smlal v1.4s, v2.4h, v22.4h\n"
+ "smlal2 v25.4s, v2.8h, v22.8h\n"
"tbz x1, #2, 33f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
"tbz x1, #1, 32f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[6], [x20]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[4], [x20]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (2, 3): Bit 2: Unset
"tbz x1, #1, 34f\n"
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[2], [x20]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[0], [x20]\n"
+ "ld1 { v26.b }[0], [x20]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d31, [x6, #0x40]\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "usubl v31.8h, v31.8b, v13.8b\n"
- "ldr x20, [x5, #0x80]\n"
- "smlal v23.4s, v12.4h, v9.4h\n"
- "smlal2 v19.4s, v12.8h, v9.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v27.4h, v31.4h\n"
- "smlal2 v15.4s, v27.8h, v31.8h\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "smlal v24.4s, v12.4h, v31.4h\n"
- "smlal2 v22.4s, v12.8h, v31.8h\n"
+ "ldr d31, [x5, #0x40]\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "ldr x20, [x4, #0x80]\n"
+ "usubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v22.4h\n"
+ "smlal2 v30.4s, v26.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v4.4h, v31.4h\n"
+ "smlal2 v0.4s, v4.8h, v31.8h\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
"tbz x1, #2, 37f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
"tbz x1, #1, 36f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "ld1 { v28.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "ld1 { v28.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
"tbz x1, #1, 38f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "ld1 { v28.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "ld1 { v28.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d16, [x6, #0x48]\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "usubl v16.8h, v16.8b, v13.8b\n"
- "ldr x20, [x5, #0x88]\n"
- "smlal v23.4s, v8.4h, v31.4h\n"
- "smlal2 v19.4s, v8.8h, v31.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v6.4h, v16.4h\n"
- "smlal2 v15.4s, v6.8h, v16.8h\n"
- "smlal v20.4s, v29.4h, v16.4h\n"
- "smlal2 v5.4s, v29.8h, v16.8h\n"
- "smlal v24.4s, v8.4h, v16.4h\n"
- "smlal2 v22.4s, v8.8h, v16.8h\n"
+ "ldr d17, [x5, #0x48]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x20, [x4, #0x88]\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v31.4h\n"
+ "smlal2 v30.4s, v28.8h, v31.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v21.4h, v17.4h\n"
+ "smlal2 v0.4s, v21.8h, v17.8h\n"
+ "smlal v27.4s, v29.4h, v17.4h\n"
+ "smlal2 v6.4s, v29.8h, v17.8h\n"
+ "smlal v1.4s, v28.4h, v17.4h\n"
+ "smlal2 v25.4s, v28.8h, v17.8h\n"
"tbz x1, #2, 41f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
"tbz x1, #1, 40f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v7.h }[2], [x20], #0x2\n"
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v7.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v7.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
"tbz x1, #1, 42f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v7.h }[0], [x20], #0x2\n"
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v7.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v7.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d21, [x6, #0x50]\n"
- "usubl v27.8h, v27.8b, v18.8b\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0x90]\n"
- "smlal v23.4s, v27.4h, v16.4h\n"
- "smlal2 v19.4s, v27.8h, v16.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "smlal v20.4s, v25.4h, v21.4h\n"
- "smlal2 v5.4s, v25.8h, v21.8h\n"
+ "ldr d22, [x5, #0x50]\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "ldr x20, [x4, #0x90]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v17.4h\n"
+ "smlal2 v30.4s, v7.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v18.4h, v22.4h\n"
+ "smlal2 v0.4s, v18.8h, v22.8h\n"
+ "smlal v27.4s, v24.4h, v22.4h\n"
+ "smlal2 v6.4s, v24.8h, v22.8h\n"
"tbz x1, #2, 45f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
"tbz x1, #1, 44f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
"tbz x1, #1, 46f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v31.8h, v31.8b, v18.8b\n"
- "ldr x20, [x5, #0x98]\n"
- "smlal v24.4s, v31.4h, v21.4h\n"
- "smlal2 v22.4s, v31.8h, v21.8h\n"
- "add x20, x20, x3\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x20, [x4, #0x98]\n"
+ "smlal v1.4s, v20.4h, v22.4h\n"
+ "smlal2 v25.4s, v20.8h, v22.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 49f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
"tbz x1, #1, 48f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
"tbz x1, #1, 50f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d2, [x6, #0x58]\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
- "ldr x20, [x5, #0xa0]\n"
- "smlal v23.4s, v28.4h, v21.4h\n"
- "smlal2 v19.4s, v28.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v25.4h, v2.4h\n"
- "smlal2 v15.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v10.4h, v2.4h\n"
- "smlal2 v5.4s, v10.8h, v2.8h\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal2 v22.4s, v28.8h, v2.8h\n"
+ "ldr d17, [x5, #0x58]\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "ldr x20, [x4, #0xa0]\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v22.4h\n"
+ "smlal2 v30.4s, v19.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v24.4h, v17.4h\n"
+ "smlal2 v0.4s, v24.8h, v17.8h\n"
+ "smlal v27.4s, v2.4h, v17.4h\n"
+ "smlal2 v6.4s, v2.8h, v17.8h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 53f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
"tbz x1, #1, 52f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
"tbz x1, #1, 54f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v29.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d25, [x6, #0x60]\n"
- "usubl v21.8h, v21.8b, v18.8b\n"
- "usubl v25.8h, v25.8b, v13.8b\n"
- "ldr x20, [x5, #0xa8]\n"
- "smlal v23.4s, v21.4h, v2.4h\n"
- "smlal2 v19.4s, v21.8h, v2.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v10.4h, v25.4h\n"
- "smlal2 v15.4s, v10.8h, v25.8h\n"
- "smlal v20.4s, v12.4h, v25.4h\n"
- "smlal2 v5.4s, v12.8h, v25.8h\n"
- "smlal v24.4s, v21.4h, v25.4h\n"
- "smlal2 v22.4s, v21.8h, v25.8h\n"
+ "ldr d24, [x5, #0x60]\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "ldr x20, [x4, #0xa8]\n"
+ "usubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v17.4h\n"
+ "smlal2 v30.4s, v29.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v2.4h, v24.4h\n"
+ "smlal2 v0.4s, v2.8h, v24.8h\n"
+ "smlal v27.4s, v26.4h, v24.4h\n"
+ "smlal2 v6.4s, v26.8h, v24.8h\n"
+ "smlal v1.4s, v29.4h, v24.4h\n"
+ "smlal2 v25.4s, v29.8h, v24.8h\n"
"tbz x1, #2, 57f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
"tbz x1, #1, 56f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (3, 3): Bit 2: Unset
"tbz x1, #1, 58f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v31.b }[0], [x20]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d1, [x6, #0x68]\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
- "ldr x20, [x5, #0xb0]\n"
- "smlal v23.4s, v9.4h, v25.4h\n"
- "smlal2 v19.4s, v9.8h, v25.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v12.4h, v1.4h\n"
- "smlal2 v15.4s, v12.8h, v1.8h\n"
- "smlal v20.4s, v8.4h, v1.4h\n"
- "smlal2 v5.4s, v8.8h, v1.8h\n"
- "smlal v24.4s, v9.4h, v1.4h\n"
- "smlal2 v22.4s, v9.8h, v1.8h\n"
+ "ldr d17, [x5, #0x68]\n"
+ "usubl v31.8h, v31.8b, v15.8b\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v31.4h, v24.4h\n"
+ "smlal2 v30.4s, v31.8h, v24.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v26.4h, v17.4h\n"
+ "smlal2 v0.4s, v26.8h, v17.8h\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "smlal v1.4s, v31.4h, v17.4h\n"
+ "smlal2 v25.4s, v31.8h, v17.8h\n"
"tbz x1, #2, 61f\n"
- "ld1 { v3.s }[0], [x20], #0x4\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
"tbz x1, #1, 60f\n"
- "ld1 { v3.h }[2], [x20], #0x2\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[6], [x20]\n"
+ "ld1 { v21.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[4], [x20]\n"
+ "ld1 { v21.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (3, 4): Bit 2: Unset
"tbz x1, #1, 62f\n"
- "ld1 { v3.h }[0], [x20], #0x2\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x20]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d16, [x6, #0x70]\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "usubl v16.8h, v16.8b, v13.8b\n"
- "ldr x20, [x5, #0xb8]\n"
- "smlal v23.4s, v3.4h, v1.4h\n"
- "smlal2 v19.4s, v3.8h, v1.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "smlal2 v15.4s, v8.8h, v16.8h\n"
- "smlal v20.4s, v27.4h, v16.4h\n"
- "smlal2 v5.4s, v27.8h, v16.8h\n"
- "smlal v24.4s, v3.4h, v16.4h\n"
- "smlal2 v22.4s, v3.8h, v16.8h\n"
+ "ldr d22, [x5, #0x70]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0xb8]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v21.4h, v17.4h\n"
+ "smlal2 v30.4s, v21.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v28.4h, v22.4h\n"
+ "smlal2 v0.4s, v28.8h, v22.8h\n"
+ "smlal v27.4s, v7.4h, v22.4h\n"
+ "smlal2 v6.4s, v7.8h, v22.8h\n"
+ "smlal v1.4s, v21.4h, v22.4h\n"
+ "smlal2 v25.4s, v21.8h, v22.8h\n"
"tbz x1, #2, 65f\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x20], #0x4\n"
"tbz x1, #1, 64f\n"
- "ld1 { v14.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x20], #0x2\n"
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[6], [x20]\n"
+ "ld1 { v11.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[4], [x20]\n"
+ "ld1 { v11.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
"tbz x1, #1, 66f\n"
- "ld1 { v14.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x20], #0x2\n"
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[2], [x20]\n"
+ "ld1 { v11.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[0], [x20]\n"
+ "ld1 { v11.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d17, [x6, #0x78]\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "usubl v17.8h, v17.8b, v13.8b\n"
- "ldr x20, [x5, #0xc0]\n"
- "smlal v23.4s, v14.4h, v16.4h\n"
- "smlal2 v19.4s, v14.8h, v16.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v31.4h, v17.4h\n"
- "smlal2 v15.4s, v31.8h, v17.8h\n"
- "smlal v20.4s, v28.4h, v17.4h\n"
- "smlal2 v5.4s, v28.8h, v17.8h\n"
+ "ldr d17, [x5, #0x78]\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "ldr x20, [x4, #0xc0]\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v22.4h\n"
+ "smlal2 v30.4s, v11.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v17.4h\n"
+ "smlal2 v0.4s, v20.8h, v17.8h\n"
+ "smlal v27.4s, v19.4h, v17.4h\n"
+ "smlal2 v6.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 69f\n"
- "ld1 { v1.s }[0], [x20], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
"tbz x1, #1, 68f\n"
- "ld1 { v1.h }[2], [x20], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[6], [x20]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[4], [x20]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
"tbz x1, #1, 70f\n"
- "ld1 { v1.h }[0], [x20], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
- "usubl v1.8h, v1.8b, v18.8b\n"
- "ldr x20, [x5, #0xc8]\n"
- "smlal v24.4s, v1.4h, v17.4h\n"
- "smlal2 v22.4s, v1.8h, v17.8h\n"
- "add x20, x20, x3\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
+ "ldr x20, [x4, #0xc8]\n"
+ "smlal v1.4s, v18.4h, v17.4h\n"
+ "smlal2 v25.4s, v18.8h, v17.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 73f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
"tbz x1, #1, 72f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
"tbz x1, #1, 74f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d29, [x6, #0x80]\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "usubl v29.8h, v29.8b, v13.8b\n"
- "ldr x20, [x5, #0xd0]\n"
- "smlal v23.4s, v16.4h, v17.4h\n"
- "smlal2 v19.4s, v16.8h, v17.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v28.4h, v29.4h\n"
- "smlal2 v15.4s, v28.8h, v29.8h\n"
- "smlal v20.4s, v21.4h, v29.4h\n"
- "smlal2 v5.4s, v21.8h, v29.8h\n"
- "smlal v24.4s, v16.4h, v29.4h\n"
- "smlal2 v22.4s, v16.8h, v29.8h\n"
+ "ldr d4, [x5, #0x80]\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x20, [x4, #0xd0]\n"
+ "usubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v20.4h, v17.4h\n"
+ "smlal2 v30.4s, v20.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v19.4h, v4.4h\n"
+ "smlal2 v0.4s, v19.8h, v4.8h\n"
+ "smlal v27.4s, v29.4h, v4.4h\n"
+ "smlal2 v6.4s, v29.8h, v4.8h\n"
+ "smlal v1.4s, v20.4h, v4.4h\n"
+ "smlal2 v25.4s, v20.8h, v4.8h\n"
"tbz x1, #2, 77f\n"
- "ld1 { v30.s }[0], [x20], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
"tbz x1, #1, 76f\n"
- "ld1 { v30.h }[2], [x20], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[6], [x20]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[4], [x20]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
"tbz x1, #1, 78f\n"
- "ld1 { v30.h }[0], [x20], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[2], [x20]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[0], [x20]\n"
+ "ld1 { v26.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d12, [x6, #0x88]\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "usubl v12.8h, v12.8b, v13.8b\n"
- "ldr x20, [x5, #0xd8]\n"
- "smlal v23.4s, v30.4h, v29.4h\n"
- "smlal2 v19.4s, v30.8h, v29.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v21.4h, v12.4h\n"
- "smlal2 v15.4s, v21.8h, v12.8h\n"
- "smlal v20.4s, v9.4h, v12.4h\n"
- "smlal2 v5.4s, v9.8h, v12.8h\n"
- "smlal v24.4s, v30.4h, v12.4h\n"
- "smlal2 v22.4s, v30.8h, v12.8h\n"
+ "ldr d17, [x5, #0x88]\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "ldr x20, [x4, #0xd8]\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v4.4h\n"
+ "smlal2 v30.4s, v26.8h, v4.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v29.4h, v17.4h\n"
+ "smlal2 v0.4s, v29.8h, v17.8h\n"
+ "smlal v27.4s, v31.4h, v17.4h\n"
+ "smlal2 v6.4s, v31.8h, v17.8h\n"
+ "smlal v1.4s, v26.4h, v17.4h\n"
+ "smlal2 v25.4s, v26.8h, v17.8h\n"
"tbz x1, #2, 81f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
+ "ld1 { v23.s }[0], [x20], #0x4\n"
"tbz x1, #1, 80f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
+ "ld1 { v23.h }[2], [x20], #0x2\n"
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "ld1 { v23.b }[6], [x20]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "ld1 { v23.b }[4], [x20]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
"tbz x1, #1, 82f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
+ "ld1 { v23.h }[0], [x20], #0x2\n"
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "ld1 { v23.b }[2], [x20]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "ld1 { v23.b }[0], [x20]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d21, [x6, #0x90]\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "usubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0xe0]\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal v20.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v3.8h, v21.8h\n"
- "smlal v24.4s, v29.4h, v21.4h\n"
- "smlal2 v22.4s, v29.8h, v21.8h\n"
+ "ldr d22, [x5, #0x90]\n"
+ "usubl v23.8h, v23.8b, v15.8b\n"
+ "ldr x20, [x4, #0xe0]\n"
+ "usubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v23.4h, v17.4h\n"
+ "smlal2 v30.4s, v23.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v31.4h, v22.4h\n"
+ "smlal2 v0.4s, v31.8h, v22.8h\n"
+ "smlal v27.4s, v21.4h, v22.4h\n"
+ "smlal2 v6.4s, v21.8h, v22.8h\n"
+ "smlal v1.4s, v23.4h, v22.4h\n"
+ "smlal2 v25.4s, v23.8h, v22.8h\n"
"tbz x1, #2, 85f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
"tbz x1, #1, 84f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v28.b }[6], [x20]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v28.b }[4], [x20]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
"tbz x1, #1, 86f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v28.b }[2], [x20]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v28.b }[0], [x20]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d8, [x6, #0x98]\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "usubl v8.8h, v8.8b, v13.8b\n"
- "ldr x20, [x5, #0xe8]\n"
- "smlal v23.4s, v25.4h, v21.4h\n"
- "smlal2 v19.4s, v25.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v3.4h, v8.4h\n"
- "smlal2 v15.4s, v3.8h, v8.8h\n"
- "smlal v20.4s, v14.4h, v8.4h\n"
- "smlal2 v5.4s, v14.8h, v8.8h\n"
- "smlal v24.4s, v25.4h, v8.4h\n"
- "smlal2 v22.4s, v25.8h, v8.8h\n"
+ "ldr d17, [x5, #0x98]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x20, [x4, #0xe8]\n"
+ "usubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v22.4h\n"
+ "smlal2 v30.4s, v28.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v21.4h, v17.4h\n"
+ "smlal2 v0.4s, v21.8h, v17.8h\n"
+ "smlal v27.4s, v11.4h, v17.4h\n"
+ "smlal2 v6.4s, v11.8h, v17.8h\n"
+ "smlal v1.4s, v28.4h, v17.4h\n"
+ "smlal2 v25.4s, v28.8h, v17.8h\n"
"tbz x1, #2, 89f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
"tbz x1, #1, 88f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
"tbz x1, #1, 90f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v16.b }[0], [x20]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d9, [x6, #0xa0]\n"
- "usubl v21.8h, v21.8b, v18.8b\n"
- "usubl v9.8h, v9.8b, v13.8b\n"
- "ldr x20, [x5, #0xf0]\n"
- "smlal v23.4s, v21.4h, v8.4h\n"
- "smlal2 v19.4s, v21.8h, v8.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v1.4h, v9.4h\n"
- "smlal2 v15.4s, v1.8h, v9.8h\n"
- "smlal v20.4s, v16.4h, v9.4h\n"
- "smlal2 v5.4s, v16.8h, v9.8h\n"
+ "ldr d3, [x5, #0xa0]\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x20, [x4, #0xf0]\n"
+ "usubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v17.4h\n"
+ "smlal2 v30.4s, v16.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v18.4h, v3.4h\n"
+ "smlal2 v0.4s, v18.8h, v3.8h\n"
+ "smlal v27.4s, v20.4h, v3.4h\n"
+ "smlal2 v6.4s, v20.8h, v3.8h\n"
"tbz x1, #2, 93f\n"
"ld1 { v12.s }[0], [x20], #0x4\n"
"tbz x1, #1, 92f\n"
@@ -1871,308 +1871,308 @@ void a64_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
"tbz x1, #0, 95f\n"
"ld1 { v12.b }[0], [x20]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
- "usubl v12.8h, v12.8b, v18.8b\n"
- "ldr x20, [x5, #0xf8]\n"
- "smlal v24.4s, v12.4h, v9.4h\n"
- "smlal2 v22.4s, v12.8h, v9.8h\n"
- "add x20, x20, x3\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "ldr x20, [x4, #0xf8]\n"
+ "smlal v1.4s, v12.4h, v3.4h\n"
+ "smlal2 v25.4s, v12.8h, v3.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 97f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 96f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
"tbz x1, #1, 98f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d12, [x6, #0xa8]\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "usubl v12.8h, v12.8b, v13.8b\n"
- "ldr x20, [x5, #0x100]\n"
- "smlal v23.4s, v10.4h, v9.4h\n"
- "smlal2 v19.4s, v10.8h, v9.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v16.4h, v12.4h\n"
- "smlal2 v15.4s, v16.8h, v12.8h\n"
- "smlal v20.4s, v30.4h, v12.4h\n"
- "smlal2 v5.4s, v30.8h, v12.8h\n"
- "smlal v24.4s, v10.4h, v12.4h\n"
- "smlal2 v22.4s, v10.8h, v12.8h\n"
+ "ldr d18, [x5, #0xa8]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "ldr x20, [x4, #0x100]\n"
+ "usubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v17.4h, v3.4h\n"
+ "smlal2 v30.4s, v17.8h, v3.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "smlal v27.4s, v26.4h, v18.4h\n"
+ "smlal2 v6.4s, v26.8h, v18.8h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
"tbz x1, #2, 101f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
"tbz x1, #1, 100f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
"tbz x1, #1, 102f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d28, [x6, #0xb0]\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "usubl v28.8h, v28.8b, v13.8b\n"
- "ldr x20, [x5, #0x108]\n"
- "smlal v23.4s, v9.4h, v12.4h\n"
- "smlal2 v19.4s, v9.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v30.4h, v28.4h\n"
- "smlal2 v15.4s, v30.8h, v28.8h\n"
- "smlal v20.4s, v29.4h, v28.4h\n"
- "smlal2 v5.4s, v29.8h, v28.8h\n"
- "smlal v24.4s, v9.4h, v28.4h\n"
- "smlal2 v22.4s, v9.8h, v28.8h\n"
+ "ldr d12, [x5, #0xb0]\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "ldr x20, [x4, #0x108]\n"
+ "usubl v12.8h, v12.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v18.4h\n"
+ "smlal2 v30.4s, v19.8h, v18.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v26.4h, v12.4h\n"
+ "smlal2 v0.4s, v26.8h, v12.8h\n"
+ "smlal v27.4s, v23.4h, v12.4h\n"
+ "smlal2 v6.4s, v23.8h, v12.8h\n"
+ "smlal v1.4s, v19.4h, v12.4h\n"
+ "smlal2 v25.4s, v19.8h, v12.8h\n"
"tbz x1, #2, 105f\n"
- "ld1 { v2.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 104f\n"
- "ld1 { v2.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
"tbz x1, #1, 106f\n"
- "ld1 { v2.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d30, [x6, #0xb8]\n"
- "usubl v2.8h, v2.8b, v18.8b\n"
- "usubl v30.8h, v30.8b, v13.8b\n"
- "ldr x20, [x5, #0x110]\n"
- "smlal v23.4s, v2.4h, v28.4h\n"
- "smlal2 v19.4s, v2.8h, v28.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v29.4h, v30.4h\n"
- "smlal2 v15.4s, v29.8h, v30.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal v24.4s, v2.4h, v30.4h\n"
- "smlal2 v22.4s, v2.8h, v30.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "ldr x20, [x4, #0x110]\n"
+ "usubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v17.4h, v12.4h\n"
+ "smlal2 v30.4s, v17.8h, v12.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v23.4h, v18.4h\n"
+ "smlal2 v0.4s, v23.8h, v18.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
"tbz x1, #2, 109f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v3.s }[0], [x20], #0x4\n"
"tbz x1, #1, 108f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v3.h }[2], [x20], #0x2\n"
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v3.b }[6], [x20]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v3.b }[4], [x20]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
"tbz x1, #1, 110f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v3.h }[0], [x20], #0x2\n"
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v3.b }[0], [x20]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d8, [x6, #0xc0]\n"
- "usubl v27.8h, v27.8b, v18.8b\n"
- "usubl v8.8h, v8.8b, v13.8b\n"
- "ldr x20, [x5, #0x118]\n"
- "smlal v23.4s, v27.4h, v30.4h\n"
- "smlal2 v19.4s, v27.8h, v30.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v25.4h, v8.4h\n"
- "smlal2 v15.4s, v25.8h, v8.8h\n"
- "smlal v20.4s, v21.4h, v8.4h\n"
- "smlal2 v5.4s, v21.8h, v8.8h\n"
- "smlal v24.4s, v27.4h, v8.4h\n"
- "smlal2 v22.4s, v27.8h, v8.8h\n"
+ "ldr d26, [x5, #0xc0]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x20, [x4, #0x118]\n"
+ "usubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v5.4s, v3.4h, v18.4h\n"
+ "smlal2 v30.4s, v3.8h, v18.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v28.4h, v26.4h\n"
+ "smlal2 v0.4s, v28.8h, v26.8h\n"
+ "smlal v27.4s, v16.4h, v26.4h\n"
+ "smlal2 v6.4s, v16.8h, v26.8h\n"
+ "smlal v1.4s, v3.4h, v26.4h\n"
+ "smlal2 v25.4s, v3.8h, v26.8h\n"
"tbz x1, #2, 113f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 112f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
"tbz x1, #1, 114f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal v23.4s, v9.4h, v8.4h\n"
- "smlal2 v19.4s, v9.8h, v8.8h\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v26.4h\n"
+ "smlal2 v30.4s, v17.8h, v26.8h\n"
"tbz x1, #2, 117f\n"
- "ld1 { v30.4s }, [x7], #0x10\n"
- "ld1 { v12.4s }, [x8], #0x10\n"
+ "ld1 { v9.4s }, [x6], #0x10\n"
+ "ld1 { v20.4s }, [x7], #0x10\n"
"tbz x1, #1, 116f\n"
- "ld1 { v14.d }[0], [x7], #0x8\n"
- "ld1 { v27.d }[0], [x8], #0x8\n"
+ "ld1 { v18.d }[0], [x6], #0x8\n"
+ "ld1 { v3.d }[0], [x7], #0x8\n"
"tbz x1, #0, 119f\n"
- "ld1 { v14.s }[2], [x7]\n"
- "ld1 { v27.s }[2], [x8]\n"
+ "ld1 { v18.s }[2], [x6]\n"
+ "ld1 { v3.s }[2], [x7]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
"tbz x1, #0, 119f\n"
- "ld1 { v14.s }[0], [x7]\n"
- "ld1 { v27.s }[0], [x8]\n"
+ "ld1 { v18.s }[0], [x6]\n"
+ "ld1 { v3.s }[0], [x7]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
"tbz x1, #1, 118f\n"
- "ld1 { v30.d }[0], [x7], #0x8\n"
- "ld1 { v12.d }[0], [x8], #0x8\n"
+ "ld1 { v9.d }[0], [x6], #0x8\n"
+ "ld1 { v20.d }[0], [x7], #0x8\n"
"tbz x1, #0, 119f\n"
- "ld1 { v30.s }[2], [x7]\n"
- "ld1 { v12.s }[2], [x8]\n"
+ "ld1 { v9.s }[2], [x6]\n"
+ "ld1 { v20.s }[2], [x7]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 119f\n"
- "ld1 { v30.s }[0], [x7]\n"
- "ld1 { v12.s }[0], [x8]\n"
+ "ld1 { v9.s }[0], [x6]\n"
+ "ld1 { v20.s }[0], [x7]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v7.4s, v7.4s, v30.4s\n"
- "and v16.16b, v7.16b, v12.16b\n"
- "add x17, x17, x4\n"
- "add x16, x16, x4\n"
- "sqrdmulh v15.4s, v15.4s, v14.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "add x15, x15, x4\n"
- "add x14, x14, x4\n"
- "and v2.16b, v15.16b, v27.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "sqrdmulh v24.4s, v24.4s, v30.4s\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "sqadd v7.4s, v7.4s, v16.4s\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "and v21.16b, v20.16b, v12.16b\n"
- "sqrdmulh v5.4s, v5.4s, v14.4s\n"
- "and v18.16b, v24.16b, v12.16b\n"
- "sqrdmulh v22.4s, v22.4s, v14.4s\n"
- "and v31.16b, v23.16b, v12.16b\n"
- "sqrdmulh v19.4s, v19.4s, v14.4s\n"
- "sqadd v15.4s, v15.4s, v2.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v9.16b, v5.16b, v27.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v4.16b, v22.16b, v27.16b\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v28.16b, v19.16b, v27.16b\n"
- "sqadd v20.4s, v20.4s, v21.4s\n"
+ "sqrdmulh v8.4s, v8.4s, v9.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v18.4s\n"
+ "add x8, x8, x3\n"
+ "add x17, x17, x3\n"
+ "sqrdmulh v27.4s, v27.4s, v9.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v9.4s\n"
+ "add x16, x16, x3\n"
+ "add x15, x15, x3\n"
+ "sqrdmulh v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v18.4s\n"
+ "and v17.16b, v8.16b, v20.16b\n"
+ "and v23.16b, v0.16b, v3.16b\n"
+ "and v9.16b, v27.16b, v20.16b\n"
+ "and v26.16b, v1.16b, v20.16b\n"
+ "sqrdmulh v25.4s, v25.4s, v18.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v18.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v18.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v31.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v12.4s\n"
- "srshl v20.4s, v20.4s, v12.4s\n"
- "sqadd v5.4s, v5.4s, v9.4s\n"
- "srshl v24.4s, v24.4s, v12.4s\n"
- "sqadd v22.4s, v22.4s, v4.4s\n"
- "srshl v23.4s, v23.4s, v12.4s\n"
- "sqadd v19.4s, v19.4s, v28.4s\n"
- "srshl v15.4s, v15.4s, v27.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v27.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v27.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v27.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "and v24.16b, v6.16b, v3.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v18.16b, v25.16b, v3.16b\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v17.16b, v5.16b, v20.16b\n"
+ "sqadd v0.4s, v0.4s, v23.4s\n"
+ "and v16.16b, v30.16b, v3.16b\n"
+ "sqadd v27.4s, v27.4s, v9.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v26.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v20.4s\n"
+ "srshl v27.4s, v27.4s, v20.4s\n"
+ "sqadd v5.4s, v5.4s, v17.4s\n"
+ "sqadd v6.4s, v6.4s, v24.4s\n"
+ "srshl v1.4s, v1.4s, v20.4s\n"
+ "sqadd v25.4s, v25.4s, v18.4s\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "srshl v0.4s, v0.4s, v3.4s\n"
+ "srshl v5.4s, v5.4s, v20.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v3.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v3.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v3.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
"tbz x1, #2, 121f\n"
- "st1 { v7.s }[0], [x17], #0x4\n"
- "st1 { v20.s }[0], [x16], #0x4\n"
- "st1 { v24.s }[0], [x15], #0x4\n"
- "st1 { v23.s }[0], [x14], #0x4\n"
+ "st1 { v8.s }[0], [x8], #0x4\n"
+ "st1 { v27.s }[0], [x17], #0x4\n"
+ "st1 { v1.s }[0], [x16], #0x4\n"
+ "st1 { v5.s }[0], [x15], #0x4\n"
"tbz x1, #1, 120f\n"
- "st1 { v7.h }[2], [x17], #0x2\n"
- "st1 { v20.h }[2], [x16], #0x2\n"
- "st1 { v24.h }[2], [x15], #0x2\n"
- "st1 { v23.h }[2], [x14], #0x2\n"
+ "st1 { v8.h }[2], [x8], #0x2\n"
+ "st1 { v27.h }[2], [x17], #0x2\n"
+ "st1 { v1.h }[2], [x16], #0x2\n"
+ "st1 { v5.h }[2], [x15], #0x2\n"
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[6], [x17], #0x1\n"
- "st1 { v20.b }[6], [x16], #0x1\n"
- "st1 { v24.b }[6], [x15], #0x1\n"
- "st1 { v23.b }[6], [x14], #0x1\n"
+ "st1 { v8.b }[6], [x8], #0x1\n"
+ "st1 { v27.b }[6], [x17], #0x1\n"
+ "st1 { v1.b }[6], [x16], #0x1\n"
+ "st1 { v5.b }[6], [x15], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[4], [x17], #0x1\n"
- "st1 { v20.b }[4], [x16], #0x1\n"
- "st1 { v24.b }[4], [x15], #0x1\n"
- "st1 { v23.b }[4], [x14], #0x1\n"
+ "st1 { v8.b }[4], [x8], #0x1\n"
+ "st1 { v27.b }[4], [x17], #0x1\n"
+ "st1 { v1.b }[4], [x16], #0x1\n"
+ "st1 { v5.b }[4], [x15], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
"tbz x1, #1, 122f\n"
- "st1 { v7.h }[0], [x17], #0x2\n"
- "st1 { v20.h }[0], [x16], #0x2\n"
- "st1 { v24.h }[0], [x15], #0x2\n"
- "st1 { v23.h }[0], [x14], #0x2\n"
+ "st1 { v8.h }[0], [x8], #0x2\n"
+ "st1 { v27.h }[0], [x17], #0x2\n"
+ "st1 { v1.h }[0], [x16], #0x2\n"
+ "st1 { v5.h }[0], [x15], #0x2\n"
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[2], [x17], #0x1\n"
- "st1 { v20.b }[2], [x16], #0x1\n"
- "st1 { v24.b }[2], [x15], #0x1\n"
- "st1 { v23.b }[2], [x14], #0x1\n"
+ "st1 { v8.b }[2], [x8], #0x1\n"
+ "st1 { v27.b }[2], [x17], #0x1\n"
+ "st1 { v1.b }[2], [x16], #0x1\n"
+ "st1 { v5.b }[2], [x15], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[0], [x17], #0x1\n"
- "st1 { v20.b }[0], [x16], #0x1\n"
- "st1 { v24.b }[0], [x15], #0x1\n"
- "st1 { v23.b }[0], [x14], #0x1\n"
+ "st1 { v8.b }[0], [x8], #0x1\n"
+ "st1 { v27.b }[0], [x17], #0x1\n"
+ "st1 { v1.b }[0], [x16], #0x1\n"
+ "st1 { v5.b }[0], [x15], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
index f7aa889b56..0641563b63 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,21 +45,21 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"lsr x9, %x[n_channels], #0x2\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
"ld1r { v8.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v7.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v7.4s }, [x21]\n"
"ld1r { v6.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v5.16b }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v5.16b }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
"mov x11, #0x0\n"
+ "ld1r { v1.4s }, [x20]\n"
"cbz x9, 6f\n"
"1:" // Channel loop
"movi v23.4s, #0x0\n"
@@ -68,75 +68,75 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldr q23, [%x[bias], x20]\n"
"2:" // Channel loop: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
- "mov x25, %x[inptrs]\n"
- "ldp x21, x20, [x25], #0x10\n"
- "subs x24, %x[n_points], #0x1\n"
- "ldr s14, [x21, x11]\n"
- "ldr s15, [x20, x11]\n"
+ "mov x23, %x[inptrs]\n"
+ "subs x22, %x[n_points], #0x1\n"
"mov v24.16b, v23.16b\n"
"mov v25.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s16, [x21, x11]\n"
"mov v26.16b, v23.16b\n"
"mov v27.16b, v23.16b\n"
- "ldr s17, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"mov v28.16b, v23.16b\n"
+ "ldp x21, x20, [x23], #0x10\n"
"mov v29.16b, v23.16b\n"
- "ldr s18, [x21, x11]\n"
- "ldr s19, [x20, x11]\n"
"mov v30.16b, v23.16b\n"
"mov v31.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s20, [x21, x11]\n"
"usubl v0.8h, v0.8b, v5.8b\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"usubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v17.8h, v17.8b, v6.8b\n"
"usubl v18.8h, v18.8b, v6.8b\n"
"usubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"usubl v20.8h, v20.8b, v6.8b\n"
"usubl v21.8h, v21.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"usubl v22.8h, v22.8b, v6.8b\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x23, x22, [x25], #0x10\n"
- "ldp x21, x20, [x25], #0x10\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldr s14, [x23, x11]\n"
- "ldr s15, [x22, x11]\n"
+ "subs x22, x22, #0x1\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
- "ldr s16, [x21, x11]\n"
- "ldr s17, [x20, x11]\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s18, [x21, x11]\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
- "ldr s19, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"smlal v31.4s, v22.4h, v0.4h\n"
- "subs x24, x24, #0x1\n"
"ldr s0, [%x[params]], #0x4\n"
- "ldr s20, [x21, x11]\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
"usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"usubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "usubl v0.8h, v0.8b, v5.8b\n"
"usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
"usubl v17.8h, v17.8b, v6.8b\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v18.8h, v18.8b, v6.8b\n"
"usubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"usubl v20.8h, v20.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"usubl v21.8h, v21.8b, v6.8b\n"
"usubl v22.8h, v22.8b, v6.8b\n"
"bgt 3b\n"
@@ -162,27 +162,27 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"sshl v27.4s, v27.4s, v3.4s\n"
"sshl v28.4s, v28.4s, v3.4s\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
"sshl v29.4s, v29.4s, v3.4s\n"
"sshl v30.4s, v30.4s, v3.4s\n"
"sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
+ "and v16.16b, v25.16b, v1.16b\n"
"sqrdmulh v26.4s, v26.4s, v2.4s\n"
"sqrdmulh v27.4s, v27.4s, v2.4s\n"
"sqrdmulh v28.4s, v28.4s, v2.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"
@@ -254,17 +254,17 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s23, [x28, x11]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s24, [x27, x11]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s25, [x26, x11]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x11]\n"
+ "str s23, [x28, x11]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x11]\n"
+ "str s25, [x26, x11]\n"
+ "str s26, [x25, x11]\n"
"str s27, [x24, x11]\n"
"str s28, [x23, x11]\n"
"str s29, [x22, x11]\n"
@@ -290,24 +290,24 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"9:" // Oddments: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
"mov x10, %x[inptrs]\n"
- "ldp x9, x28, [x10], #0x10\n"
"mov v24.16b, v23.16b\n"
- "ldp x27, x26, [x10], #0x10\n"
- "ldp x25, x24, [x10], #0x10\n"
"mov v25.16b, v23.16b\n"
"mov v26.16b, v23.16b\n"
- "ldp x23, x22, [x10], #0x10\n"
- "ldr x21, [x10], #0x8\n"
"mov v27.16b, v23.16b\n"
"mov v28.16b, v23.16b\n"
"mov v29.16b, v23.16b\n"
+ "ldp x9, x28, [x10], #0x10\n"
"mov v30.16b, v23.16b\n"
- "add x9, x9, x11\n"
- "add x28, x28, x11\n"
"mov v31.16b, v23.16b\n"
"usubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x27, x26, [x10], #0x10\n"
+ "add x9, x9, x11\n"
+ "add x28, x28, x11\n"
+ "ldp x25, x24, [x10], #0x10\n"
"add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "ldr x21, [x10], #0x8\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
@@ -358,27 +358,27 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ble 15f\n"
"12:" // Oddments: Planar loop
"ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldp x25, x24, [x10], #0x10\n"
- "ldp x23, x22, [x10], #0x10\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldr x21, [x10], #0x8\n"
- "add x9, x9, x11\n"
+ "ldp x27, x26, [x10], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
+ "add x9, x9, x11\n"
"add x28, x28, x11\n"
- "add x27, x27, x11\n"
"smlal v31.4s, v22.4h, v0.4h\n"
"ldr s0, [%x[params]], #0x4\n"
- "usubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "usubl v0.8h, v0.8b, v5.8b\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
@@ -465,36 +465,36 @@ void a64_u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v27.4s, v27.4s, v3.4s\n"
+ "sshl v28.4s, v28.4s, v3.4s\n"
"ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
"add x28, x28, x11\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
"add x27, x27, x11\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v29.4s, v29.4s, v3.4s\n"
"add x26, x26, x11\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"add x25, x25, x11\n"
+ "sshl v30.4s, v30.4s, v3.4s\n"
+ "sshl v31.4s, v31.4s, v3.4s\n"
"add x24, x24, x11\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
"add x23, x23, x11\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
"add x22, x22, x11\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
"add x21, x21, x11\n"
+ "and v16.16b, v25.16b, v1.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v2.4s\n"
"add x20, x20, x11\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v2.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index d69f391514..24831a7153 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,162 +41,162 @@ void a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q11, [%x[params], #0x0]\n"
+ "ldr q14, [%x[params], #0x0]\n"
"ldr q5, [%x[params], #0x10]\n"
- "movi v8.16b, #0x1\n"
- "ushr v8.4s, v8.4s, #0x8\n"
+ "movi v18.16b, #0x1\n"
+ "movi v24.4s, #0x0\n"
"ldr q6, [%x[params], #0x20]\n"
"ldr q7, [%x[params], #0x30]\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "ld1 { v1.16b }, [x20]\n"
- "mov v28.16b, v1.16b\n"
- "mov v23.16b, v1.16b\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1 { v2.16b }, [x20]\n"
- "mov v30.16b, v1.16b\n"
- "mov v21.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "ld1 { v4.16b }, [x20]\n"
- "mov v20.16b, v2.16b\n"
- "mov v29.16b, v2.16b\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "ld1 { v0.16b }, [x20]\n"
- "mov v9.16b, v4.16b\n"
- "mov v22.16b, v4.16b\n"
- "ldr x20, [%x[inptrs], #0x18]\n"
- "ld1 { v3.16b }, [x20]\n"
- "mov v31.16b, v4.16b\n"
- "ext v28.16b, v28.16b, v28.16b, #0x2\n"
- "ext v23.16b, v23.16b, v23.16b, #0x4\n"
- "ext v30.16b, v30.16b, v30.16b, #0x6\n"
+ "movi v28.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "ldr x25, [%x[inptrs], #0x8]\n"
+ "ldr x24, [%x[inptrs], #0x10]\n"
+ "ushr v18.4s, v18.4s, #0x8\n"
+ "movi v27.4s, #0x0\n"
+ "ldr x23, [%x[inptrs], #0x20]\n"
+ "ldr x22, [%x[inptrs], #0x0]\n"
+ "movi v21.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "ldr x21, [%x[inptrs], #0x18]\n"
+ "movi v13.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
"add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.4s }, [x20]\n"
- "ext v21.16b, v21.16b, v21.16b, #0x2\n"
- "ext v20.16b, v20.16b, v20.16b, #0x4\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v14.4s }, [x20]\n"
- "ext v29.16b, v29.16b, v29.16b, #0x6\n"
- "ext v9.16b, v9.16b, v9.16b, #0x2\n"
+ "ld1 { v1.16b }, [x25]\n"
+ "ld1 { v2.16b }, [x24]\n"
+ "movi v23.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ld1 { v4.16b }, [x23]\n"
+ "ld1 { v0.16b }, [x22]\n"
+ "movi v20.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "ld1 { v3.16b }, [x21]\n"
+ "ld1r { v19.4s }, [x20]\n"
+ "movi v22.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "mov v31.16b, v1.16b\n"
+ "mov v9.16b, v1.16b\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_c_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v13.4s }, [x20]\n"
- "ext v22.16b, v22.16b, v22.16b, #0x4\n"
- "ext v31.16b, v31.16b, v31.16b, #0x6\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v15.4s }, [x20]\n"
- "mov v27.16b, v0.16b\n"
- "mov v19.16b, v0.16b\n"
+ "ld1r { v11.4s }, [x21]\n"
+ "ld1r { v10.4s }, [x20]\n"
+ "mov v16.16b, v1.16b\n"
+ "mov v30.16b, v2.16b\n"
+ "mov v29.16b, v2.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x2\n"
+ "add x10, %x[qp], %[offsetof_Requantize32_maxval]\n"
"cmp %x[n_channels], #0x4\n"
+ "ext v9.16b, v9.16b, v9.16b, #0x4\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
"mov x9, #0x0\n"
- "mov v18.16b, v0.16b\n"
- "mov v26.16b, v3.16b\n"
"mov x28, #0x0\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x2\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x4\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
- "mov v17.16b, v3.16b\n"
- "mov v16.16b, v3.16b\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "neg v19.4s, v19.4s\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
- "ext v27.16b, v27.16b, v27.16b, #0x2\n"
- "ext v19.16b, v19.16b, v19.16b, #0x4\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
"add %x[params], %x[params], #0x40\n"
- "ext v18.16b, v18.16b, v18.16b, #0x6\n"
- "zip1 v1.4s, v1.4s, v23.4s\n"
- "zip1 v28.4s, v28.4s, v30.4s\n"
- "zip1 v2.4s, v2.4s, v20.4s\n"
- "zip1 v21.4s, v21.4s, v29.4s\n"
- "ext v26.16b, v26.16b, v26.16b, #0x2\n"
- "ext v17.16b, v17.16b, v17.16b, #0x4\n"
+ "zip1 v1.4s, v1.4s, v9.4s\n"
+ "ld1r { v9.4s }, [x10]\n"
+ "zip1 v31.4s, v31.4s, v16.4s\n"
+ "mov v16.16b, v2.16b\n"
+ "zip1 v2.4s, v2.4s, v29.4s\n"
+ "mov v29.16b, v4.16b\n"
"ext v16.16b, v16.16b, v16.16b, #0x6\n"
- "zip1 v4.4s, v4.4s, v22.4s\n"
- "zip1 v9.4s, v9.4s, v31.4s\n"
- "zip1 v0.4s, v0.4s, v19.4s\n"
- "zip1 v27.4s, v27.4s, v18.4s\n"
- "zip1 v1.4s, v1.4s, v28.4s\n"
- "zip1 v2.4s, v2.4s, v21.4s\n"
- ".inst 0x6f81e118 // udot v24.4s, v8.16b, v1.4b[0]\n"
- "zip1 v3.4s, v3.4s, v17.4s\n"
- "zip1 v26.4s, v26.4s, v16.4s\n"
- ".inst 0x6fa1e119 // udot v25.4s, v8.16b, v1.4b[1]\n"
- "zip1 v4.4s, v4.4s, v9.4s\n"
- "movi v23.4s, #0x0\n"
- ".inst 0x6f81e917 // udot v23.4s, v8.16b, v1.4b[2]\n"
- "movi v22.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- ".inst 0x6fa1e916 // udot v22.4s, v8.16b, v1.4b[3]\n"
- "movi v19.4s, #0x0\n"
- "movi v9.4s, #0x0\n"
- ".inst 0x6f82e115 // udot v21.4s, v8.16b, v2.4b[0]\n"
- "movi v10.4s, #0x0\n"
- "movi v20.4s, #0x0\n"
- ".inst 0x6fa2e113 // udot v19.4s, v8.16b, v2.4b[1]\n"
- "movi v18.4s, #0x0\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6f82e909 // udot v9.4s, v8.16b, v2.4b[2]\n"
- "movi v16.4s, #0x0\n"
- "zip1 v0.4s, v0.4s, v27.4s\n"
- ".inst 0x6fa2e90a // udot v10.4s, v8.16b, v2.4b[3]\n"
- "zip1 v3.4s, v3.4s, v26.4s\n"
- ".inst 0x6f84e114 // udot v20.4s, v8.16b, v4.4b[0]\n"
- ".inst 0x6fa4e112 // udot v18.4s, v8.16b, v4.4b[1]\n"
- ".inst 0x6f84e911 // udot v17.4s, v8.16b, v4.4b[2]\n"
- ".inst 0x6fa4e910 // udot v16.4s, v8.16b, v4.4b[3]\n"
- "movi v31.4s, #0x0\n"
+ "zip1 v1.4s, v1.4s, v31.4s\n"
+ "mov v31.16b, v4.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x2\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x4\n"
+ "zip1 v30.4s, v30.4s, v16.4s\n"
+ "mov v16.16b, v4.16b\n"
+ ".inst 0x6f81e258 // udot v24.4s, v18.16b, v1.4b[0]\n"
+ ".inst 0x6fa1e25c // udot v28.4s, v18.16b, v1.4b[1]\n"
+ ".inst 0x6f81ea5a // udot v26.4s, v18.16b, v1.4b[2]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
+ "zip1 v4.4s, v4.4s, v31.4s\n"
+ "mov v31.16b, v0.16b\n"
+ ".inst 0x6fa1ea5b // udot v27.4s, v18.16b, v1.4b[3]\n"
+ "zip1 v2.4s, v2.4s, v30.4s\n"
+ "mov v30.16b, v0.16b\n"
+ "ext v31.16b, v31.16b, v31.16b, #0x2\n"
+ "zip1 v29.4s, v29.4s, v16.4s\n"
+ "mov v16.16b, v0.16b\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x4\n"
+ ".inst 0x6f82e255 // udot v21.4s, v18.16b, v2.4b[0]\n"
+ ".inst 0x6fa2e24c // udot v12.4s, v18.16b, v2.4b[1]\n"
+ ".inst 0x6f82ea4d // udot v13.4s, v18.16b, v2.4b[2]\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
+ "zip1 v4.4s, v4.4s, v29.4s\n"
+ "mov v29.16b, v3.16b\n"
+ ".inst 0x6fa2ea4f // udot v15.4s, v18.16b, v2.4b[3]\n"
+ "zip1 v0.4s, v0.4s, v30.4s\n"
+ "mov v30.16b, v3.16b\n"
+ "ext v29.16b, v29.16b, v29.16b, #0x2\n"
+ "zip1 v31.4s, v31.4s, v16.4s\n"
+ "mov v16.16b, v3.16b\n"
+ "ext v30.16b, v30.16b, v30.16b, #0x4\n"
+ ".inst 0x6f84e257 // udot v23.4s, v18.16b, v4.4b[0]\n"
+ ".inst 0x6fa4e248 // udot v8.4s, v18.16b, v4.4b[1]\n"
+ ".inst 0x6f84ea54 // udot v20.4s, v18.16b, v4.4b[2]\n"
+ "add v24.4s, v24.4s, v21.4s\n"
+ "ext v16.16b, v16.16b, v16.16b, #0x6\n"
+ "zip1 v0.4s, v0.4s, v31.4s\n"
+ ".inst 0x6fa4ea51 // udot v17.4s, v18.16b, v4.4b[3]\n"
+ "zip1 v3.4s, v3.4s, v30.4s\n"
"movi v30.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- ".inst 0x6f80e11f // udot v31.4s, v8.16b, v0.4b[0]\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- ".inst 0x6fa0e11e // udot v30.4s, v8.16b, v0.4b[1]\n"
+ "movi v31.4s, #0x0\n"
+ "add v28.4s, v28.4s, v12.4s\n"
+ "zip1 v29.4s, v29.4s, v16.4s\n"
+ "movi v16.4s, #0x0\n"
+ ".inst 0x6f80e256 // udot v22.4s, v18.16b, v0.4b[0]\n"
+ ".inst 0x6fa0e259 // udot v25.4s, v18.16b, v0.4b[1]\n"
+ ".inst 0x6f80ea5e // udot v30.4s, v18.16b, v0.4b[2]\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ ".inst 0x6fa0ea5f // udot v31.4s, v18.16b, v0.4b[3]\n"
+ "add v27.4s, v27.4s, v15.4s\n"
+ "zip1 v3.4s, v3.4s, v29.4s\n"
"movi v29.4s, #0x0\n"
- ".inst 0x6f80e91a // udot v26.4s, v8.16b, v0.4b[2]\n"
- ".inst 0x6fa0e91b // udot v27.4s, v8.16b, v0.4b[3]\n"
- ".inst 0x6f83e11c // udot v28.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x6fa3e11d // udot v29.4s, v8.16b, v3.4b[1]\n"
- "add v24.4s, v24.4s, v21.4s\n"
- "add v25.4s, v25.4s, v19.4s\n"
- "add v23.4s, v23.4s, v9.4s\n"
- "add v22.4s, v22.4s, v10.4s\n"
- "add v21.4s, v20.4s, v21.4s\n"
- "movi v20.4s, #0x0\n"
- ".inst 0x6f83e914 // udot v20.4s, v8.16b, v3.4b[2]\n"
- "add v19.4s, v18.4s, v19.4s\n"
- "movi v18.4s, #0x0\n"
- ".inst 0x6fa3e912 // udot v18.4s, v8.16b, v3.4b[3]\n"
- "add v17.4s, v17.4s, v9.4s\n"
- "add v16.4s, v16.4s, v10.4s\n"
- "add v24.4s, v24.4s, v31.4s\n"
- "add v25.4s, v25.4s, v30.4s\n"
- "add v26.4s, v23.4s, v26.4s\n"
- "add v27.4s, v22.4s, v27.4s\n"
- "add v28.4s, v21.4s, v28.4s\n"
- "add v29.4s, v19.4s, v29.4s\n"
- "add v30.4s, v17.4s, v20.4s\n"
- "add v31.4s, v16.4s, v18.4s\n"
- "neg v12.4s, v12.4s\n"
- "mul v24.4s, v24.4s, v12.4s\n"
- "mul v25.4s, v25.4s, v12.4s\n"
- "mul v26.4s, v26.4s, v12.4s\n"
- "mul v27.4s, v27.4s, v12.4s\n"
- "mul v28.4s, v28.4s, v12.4s\n"
- "mul v29.4s, v29.4s, v12.4s\n"
- "mul v30.4s, v30.4s, v12.4s\n"
- "mul v31.4s, v31.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v21.4s\n"
+ "movi v21.4s, #0x0\n"
+ "add v12.4s, v8.4s, v12.4s\n"
+ "movi v8.4s, #0x0\n"
+ ".inst 0x6f83e250 // udot v16.4s, v18.16b, v3.4b[0]\n"
+ ".inst 0x6fa3e25d // udot v29.4s, v18.16b, v3.4b[1]\n"
+ ".inst 0x6f83ea55 // udot v21.4s, v18.16b, v3.4b[2]\n"
+ "add v20.4s, v20.4s, v13.4s\n"
+ ".inst 0x6fa3ea48 // udot v8.4s, v18.16b, v3.4b[3]\n"
+ "add v17.4s, v17.4s, v15.4s\n"
+ "add v24.4s, v24.4s, v22.4s\n"
+ "add v25.4s, v28.4s, v25.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v31.4s\n"
+ "add v28.4s, v23.4s, v16.4s\n"
+ "add v29.4s, v12.4s, v29.4s\n"
+ "add v30.4s, v20.4s, v21.4s\n"
+ "add v31.4s, v17.4s, v8.4s\n"
+ "mul v24.4s, v24.4s, v19.4s\n"
+ "mul v25.4s, v25.4s, v19.4s\n"
+ "mul v26.4s, v26.4s, v19.4s\n"
+ "mul v27.4s, v27.4s, v19.4s\n"
+ "mul v28.4s, v28.4s, v19.4s\n"
+ "mul v29.4s, v29.4s, v19.4s\n"
+ "mul v30.4s, v30.4s, v19.4s\n"
+ "mul v31.4s, v31.4s, v19.4s\n"
"zip1 v19.4s, v24.4s, v26.4s\n"
"zip1 v18.4s, v25.4s, v27.4s\n"
+ "add v24.4s, v24.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v14.4s\n"
"zip1 v17.4s, v28.4s, v30.4s\n"
"zip1 v16.4s, v29.4s, v31.4s\n"
"zip1 v22.4s, v19.4s, v18.4s\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v27.4s, v27.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v14.4s\n"
"zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v11.4s\n"
- "add v25.4s, v25.4s, v11.4s\n"
- "add v26.4s, v26.4s, v11.4s\n"
- "add v27.4s, v27.4s, v11.4s\n"
- "add v28.4s, v28.4s, v11.4s\n"
- "add v29.4s, v29.4s, v11.4s\n"
- "add v30.4s, v30.4s, v11.4s\n"
- "add v31.4s, v31.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v14.4s\n"
+ "add v30.4s, v30.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v14.4s\n"
"ble 2f\n"
"1:" // Loop
"ldr q8, [%x[params], #0x0]\n"
@@ -207,96 +207,96 @@ void a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
".inst 0x6f80e8ba // udot v26.4s, v5.16b, v0.4b[2]\n"
".inst 0x6fa0e8bb // udot v27.4s, v5.16b, v0.4b[3]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x6f81e0d8 // udot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6fa1e0d9 // udot v25.4s, v6.16b, v1.4b[1]\n"
- "cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x6f81e8da // udot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6fa1e8db // udot v27.4s, v6.16b, v1.4b[3]\n"
".inst 0x6f82e0bc // udot v28.4s, v5.16b, v2.4b[0]\n"
".inst 0x6fa2e0bd // udot v29.4s, v5.16b, v2.4b[1]\n"
+ "cmp %x[n_channels], #0x4\n"
+ "add x9, x9, #0x10\n"
".inst 0x6f82e8be // udot v30.4s, v5.16b, v2.4b[2]\n"
".inst 0x6fa2e8bf // udot v31.4s, v5.16b, v2.4b[3]\n"
"ldr q5, [%x[params], #0x30]\n"
- ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x6fa2e0f9 // udot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x6fa2e8fb // udot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ ".inst 0x6f81e0d8 // udot v24.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x6fa1e0d9 // udot v25.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x6f81e8da // udot v26.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x6fa1e8db // udot v27.4s, v6.16b, v1.4b[3]\n"
".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x6fa3e0dd // udot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v8.4s\n"
".inst 0x6f83e8de // udot v30.4s, v6.16b, v3.4b[2]\n"
".inst 0x6fa3e8df // udot v31.4s, v6.16b, v3.4b[3]\n"
"ldr q6, [%x[params], #0x40]\n"
- "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x6fa2e0f9 // udot v25.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x6fa2e8fb // udot v27.4s, v7.16b, v2.4b[3]\n"
".inst 0x6f84e0fc // udot v28.4s, v7.16b, v4.4b[0]\n"
".inst 0x6fa4e0fd // udot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v21.16b\n"
".inst 0x6f84e8fe // udot v30.4s, v7.16b, v4.4b[2]\n"
".inst 0x6fa4e8ff // udot v31.4s, v7.16b, v4.4b[3]\n"
"ldr q7, [%x[params], #0x50]\n"
+ "add %x[params], %x[params], #0x60\n"
+ "sqrdmulh v24.4s, v24.4s, v8.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v8.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v8.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v8.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v8.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v8.4s\n"
+ "and v19.16b, v24.16b, v21.16b\n"
"and v18.16b, v25.16b, v21.16b\n"
"and v17.16b, v26.16b, v21.16b\n"
"and v16.16b, v27.16b, v21.16b\n"
- "add %x[params], %x[params], #0x60\n"
+ "sqrdmulh v30.4s, v30.4s, v8.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v8.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v8.4s\n"
- "sqrdmulh v29.4s, v29.4s, v8.4s\n"
- "sqrdmulh v30.4s, v30.4s, v8.4s\n"
- "sqrdmulh v31.4s, v31.4s, v8.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v21.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
+ "and v18.16b, v29.16b, v21.16b\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
"sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
"and v17.16b, v30.16b, v21.16b\n"
"and v16.16b, v31.16b, v21.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v21.4s\n"
+ "srshl v25.4s, v25.4s, v21.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v21.4s\n"
- "srshl v25.4s, v25.4s, v21.4s\n"
"srshl v26.4s, v26.4s, v21.4s\n"
"srshl v27.4s, v27.4s, v21.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v21.4s\n"
"srshl v29.4s, v29.4s, v21.4s\n"
+ "add v24.4s, v24.4s, v11.4s\n"
+ "add v25.4s, v25.4s, v11.4s\n"
"srshl v30.4s, v30.4s, v21.4s\n"
"srshl v31.4s, v31.4s, v21.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v11.4s\n"
+ "add v27.4s, v27.4s, v11.4s\n"
+ "add v28.4s, v28.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v11.4s\n"
+ "add v30.4s, v30.4s, v11.4s\n"
+ "add v31.4s, v31.4s, v11.4s\n"
+ "smin v24.4s, v24.4s, v9.4s\n"
+ "smin v25.4s, v25.4s, v9.4s\n"
+ "smin v26.4s, v26.4s, v9.4s\n"
+ "smin v27.4s, v27.4s, v9.4s\n"
+ "smin v28.4s, v28.4s, v9.4s\n"
+ "smin v29.4s, v29.4s, v9.4s\n"
+ "smin v30.4s, v30.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v10.4s\n"
+ "smax v25.4s, v25.4s, v10.4s\n"
+ "smax v26.4s, v26.4s, v10.4s\n"
+ "smax v27.4s, v27.4s, v10.4s\n"
+ "smax v28.4s, v28.4s, v10.4s\n"
+ "smax v29.4s, v29.4s, v10.4s\n"
+ "smax v30.4s, v30.4s, v10.4s\n"
+ "smax v31.4s, v31.4s, v10.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -307,33 +307,33 @@ void a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
+ "str s24, [x27, x28]\n"
+ "str s25, [x26, x28]\n"
"dup v24.4s, v22.s[0]\n"
"dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
+ "str s26, [x25, x28]\n"
"dup v26.4s, v22.s[2]\n"
+ "str s27, [x24, x28]\n"
"dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
+ "add v24.4s, v24.4s, v20.4s\n"
+ "str s28, [x23, x28]\n"
"dup v28.4s, v23.s[0]\n"
+ "add v25.4s, v25.4s, v20.4s\n"
+ "str s29, [x22, x28]\n"
"dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
+ "add v26.4s, v26.4s, v20.4s\n"
+ "str s30, [x21, x28]\n"
"dup v30.4s, v23.s[2]\n"
+ "add v27.4s, v27.4s, v20.4s\n"
+ "str s31, [x20, x28]\n"
"dup v31.4s, v23.s[3]\n"
"add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v20.4s\n"
- "add v25.4s, v25.4s, v20.4s\n"
- "add v26.4s, v26.4s, v20.4s\n"
- "add v27.4s, v27.4s, v20.4s\n"
"add v28.4s, v28.4s, v20.4s\n"
"add v29.4s, v29.4s, v20.4s\n"
"add v30.4s, v30.4s, v20.4s\n"
@@ -348,98 +348,98 @@ void a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
".inst 0x6fa0e8bb // udot v27.4s, v5.16b, v0.4b[3]\n"
"cmp %x[n_channels], #0x4\n"
"add x27, x27, x28\n"
- ".inst 0x6f81e0d8 // udot v24.4s, v6.16b, v1.4b[0]\n"
- ".inst 0x6fa1e0d9 // udot v25.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x6f82e0bc // udot v28.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x6fa2e0bd // udot v29.4s, v5.16b, v2.4b[1]\n"
"add x26, x26, x28\n"
"add x25, x25, x28\n"
- ".inst 0x6f81e8da // udot v26.4s, v6.16b, v1.4b[2]\n"
- ".inst 0x6fa1e8db // udot v27.4s, v6.16b, v1.4b[3]\n"
+ ".inst 0x6f82e8be // udot v30.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x6fa2e8bf // udot v31.4s, v5.16b, v2.4b[3]\n"
"add x24, x24, x28\n"
"add x23, x23, x28\n"
- ".inst 0x6f82e0bc // udot v28.4s, v5.16b, v2.4b[0]\n"
- ".inst 0x6fa2e0bd // udot v29.4s, v5.16b, v2.4b[1]\n"
+ ".inst 0x6f81e0d8 // udot v24.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x6fa1e0d9 // udot v25.4s, v6.16b, v1.4b[1]\n"
"add x22, x22, x28\n"
"add x21, x21, x28\n"
- ".inst 0x6f82e8be // udot v30.4s, v5.16b, v2.4b[2]\n"
- ".inst 0x6fa2e8bf // udot v31.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x6f81e8da // udot v26.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x6fa1e8db // udot v27.4s, v6.16b, v1.4b[3]\n"
"add x20, x20, x28\n"
"add %x[params], %x[params], #0x20\n"
- ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
- ".inst 0x6fa2e0f9 // udot v25.4s, v7.16b, v2.4b[1]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
- ".inst 0x6fa2e8fb // udot v27.4s, v7.16b, v2.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
".inst 0x6f83e0dc // udot v28.4s, v6.16b, v3.4b[0]\n"
".inst 0x6fa3e0dd // udot v29.4s, v6.16b, v3.4b[1]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
".inst 0x6f83e8de // udot v30.4s, v6.16b, v3.4b[2]\n"
".inst 0x6fa3e8df // udot v31.4s, v6.16b, v3.4b[3]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ ".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x6fa2e0f9 // udot v25.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x6f82e8fa // udot v26.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x6fa2e8fb // udot v27.4s, v7.16b, v2.4b[3]\n"
".inst 0x6f84e0fc // udot v28.4s, v7.16b, v4.4b[0]\n"
".inst 0x6fa4e0fd // udot v29.4s, v7.16b, v4.4b[1]\n"
- "and v19.16b, v24.16b, v20.16b\n"
".inst 0x6f84e8fe // udot v30.4s, v7.16b, v4.4b[2]\n"
".inst 0x6fa4e8ff // udot v31.4s, v7.16b, v4.4b[3]\n"
+ "sqrdmulh v24.4s, v24.4s, v21.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v21.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v21.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ "and v19.16b, v24.16b, v20.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v21.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v21.4s\n"
"and v18.16b, v25.16b, v20.16b\n"
"and v17.16b, v26.16b, v20.16b\n"
"and v16.16b, v27.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v20.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
"and v18.16b, v29.16b, v20.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"and v17.16b, v30.16b, v20.16b\n"
"and v16.16b, v31.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v20.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v25.4s, v25.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
"srshl v27.4s, v27.4s, v20.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v20.4s\n"
"srshl v29.4s, v29.4s, v20.4s\n"
+ "add v24.4s, v24.4s, v11.4s\n"
"srshl v30.4s, v30.4s, v20.4s\n"
+ "add v25.4s, v25.4s, v11.4s\n"
"srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v14.4s\n"
- "add v25.4s, v25.4s, v14.4s\n"
- "add v26.4s, v26.4s, v14.4s\n"
- "add v27.4s, v27.4s, v14.4s\n"
- "add v28.4s, v28.4s, v14.4s\n"
- "add v29.4s, v29.4s, v14.4s\n"
- "add v30.4s, v30.4s, v14.4s\n"
- "add v31.4s, v31.4s, v14.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v13.4s\n"
- "smax v25.4s, v25.4s, v13.4s\n"
- "smax v26.4s, v26.4s, v13.4s\n"
- "smax v27.4s, v27.4s, v13.4s\n"
- "smax v28.4s, v28.4s, v13.4s\n"
- "smax v29.4s, v29.4s, v13.4s\n"
- "smax v30.4s, v30.4s, v13.4s\n"
- "smax v31.4s, v31.4s, v13.4s\n"
+ "add v26.4s, v26.4s, v11.4s\n"
+ "add v27.4s, v27.4s, v11.4s\n"
+ "add v28.4s, v28.4s, v11.4s\n"
+ "add v29.4s, v29.4s, v11.4s\n"
+ "add v30.4s, v30.4s, v11.4s\n"
+ "add v31.4s, v31.4s, v11.4s\n"
+ "smin v24.4s, v24.4s, v9.4s\n"
+ "smin v25.4s, v25.4s, v9.4s\n"
+ "smin v26.4s, v26.4s, v9.4s\n"
+ "smin v27.4s, v27.4s, v9.4s\n"
+ "smin v28.4s, v28.4s, v9.4s\n"
+ "smin v29.4s, v29.4s, v9.4s\n"
+ "smin v30.4s, v30.4s, v9.4s\n"
+ "smin v31.4s, v31.4s, v9.4s\n"
+ "smax v24.4s, v24.4s, v10.4s\n"
+ "smax v25.4s, v25.4s, v10.4s\n"
+ "smax v26.4s, v26.4s, v10.4s\n"
+ "smax v27.4s, v27.4s, v10.4s\n"
+ "smax v28.4s, v28.4s, v10.4s\n"
+ "smax v29.4s, v29.4s, v10.4s\n"
+ "smax v30.4s, v30.4s, v10.4s\n"
+ "smax v31.4s, v31.4s, v10.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -509,7 +509,7 @@ void a64_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
"4:" // Tail: End
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index 61cec2b66d..4558812cbb 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,158 +41,158 @@ void a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "ldr q12, [%x[params], #0x0]\n"
+ "ldr q22, [%x[params], #0x0]\n"
"ldr q8, [%x[params], #0x10]\n"
- "movi v30.16b, #0x1\n"
- "movi v17.4s, #0x0\n"
+ "movi v23.16b, #0x1\n"
+ "movi v19.4s, #0x0\n"
"ldr q9, [%x[params], #0x20]\n"
"ldr q10, [%x[params], #0x30]\n"
- "movi v16.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
"ldr q11, [%x[params], #0x40]\n"
"ldr x20, [%x[inptrs], #0x18]\n"
- "movi v24.4s, #0x0\n"
"movi v31.4s, #0x0\n"
- "ld1 { v3.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x20]\n"
- "mov v26.16b, v3.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "ld1 { v4.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "mov v21.16b, v4.16b\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- "ld1 { v2.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x8]\n"
- "mov v27.16b, v2.16b\n"
- "ext v27.16b, v27.16b, v27.16b, #0x1\n"
- "ld1 { v1.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x28]\n"
- "zip1 v3.2d, v3.2d, v26.2d\n"
- "zip1 v4.2d, v4.2d, v21.2d\n"
- "ld1 { v5.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x30]\n"
- "mov v26.16b, v1.16b\n"
- "mov v22.16b, v5.16b\n"
- "ld1 { v6.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x38]\n"
- "mov v19.16b, v6.16b\n"
- "ext v26.16b, v26.16b, v26.16b, #0x1\n"
- "ld1 { v7.16b }, [x20]\n"
- "ldr x20, [%x[inptrs], #0x0]\n"
- "mov v21.16b, v7.16b\n"
- "zip1 v2.2d, v2.2d, v27.2d\n"
- "ld1 { v0.16b }, [x20]\n"
- "ext v22.16b, v22.16b, v22.16b, #0x1\n"
- "ext v19.16b, v19.16b, v19.16b, #0x1\n"
- ".inst 0x6f83e3d1 // udot v17.4s, v30.16b, v3.4b[0]\n"
- "ext v21.16b, v21.16b, v21.16b, #0x1\n"
- ".inst 0x6f83ebd0 // udot v16.4s, v30.16b, v3.4b[2]\n"
- ".inst 0x6f84e3d9 // udot v25.4s, v30.16b, v4.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v23.4s }, [x20]\n"
- ".inst 0x6f84ebd8 // udot v24.4s, v30.16b, v4.4b[2]\n"
- "mov v18.16b, v0.16b\n"
- ".inst 0x6f82e3df // udot v31.4s, v30.16b, v2.4b[0]\n"
+ "movi v28.4s, #0x0\n"
+ "ldr x24, [%x[inptrs], #0x20]\n"
+ "ldr x23, [%x[inptrs], #0x10]\n"
"movi v29.4s, #0x0\n"
- "movi v28.4s, #0x1\n"
- ".inst 0x6f82ebdd // udot v29.4s, v30.16b, v2.4b[2]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.4s }, [x20]\n"
- "ext v18.16b, v18.16b, v18.16b, #0x1\n"
- "zip1 v1.2d, v1.2d, v26.2d\n"
- ".inst 0x6fa3e391 // udot v17.4s, v28.16b, v3.4b[1]\n"
- "zip1 v5.2d, v5.2d, v22.2d\n"
- "zip1 v6.2d, v6.2d, v19.2d\n"
- ".inst 0x6fa3eb90 // udot v16.4s, v28.16b, v3.4b[3]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
- "ld1r { v14.4s }, [x20]\n"
- "zip1 v7.2d, v7.2d, v21.2d\n"
- "movi v22.4s, #0x0\n"
- ".inst 0x6fa4e399 // udot v25.4s, v28.16b, v4.4b[1]\n"
+ "movi v25.4s, #0x1\n"
+ "ldr x22, [%x[inptrs], #0x8]\n"
+ "ldr x21, [%x[inptrs], #0x28]\n"
"movi v21.4s, #0x0\n"
- ".inst 0x6fa4eb98 // udot v24.4s, v28.16b, v4.4b[3]\n"
- ".inst 0x6f81e3d6 // udot v22.4s, v30.16b, v1.4b[0]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v15.4s }, [x20]\n"
+ "movi v16.4s, #0x0\n"
+ "ld1 { v3.16b }, [x20]\n"
+ "ldr x20, [%x[inptrs], #0x30]\n"
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
- ".inst 0x6f81ebd5 // udot v21.4s, v30.16b, v1.4b[2]\n"
+ "ld1 { v4.16b }, [x24]\n"
+ "ld1 { v2.16b }, [x23]\n"
+ "movi v30.4s, #0x0\n"
"movi v20.4s, #0x0\n"
- "movi v19.4s, #0x0\n"
- ".inst 0x6f85e3da // udot v26.4s, v30.16b, v5.4b[0]\n"
- "cmp %x[n_channels], #0x4\n"
- "zip1 v0.2d, v0.2d, v18.2d\n"
+ "ld1 { v1.16b }, [x22]\n"
+ "ld1 { v5.16b }, [x21]\n"
+ "movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
- ".inst 0x6f85ebdb // udot v27.4s, v30.16b, v5.4b[2]\n"
+ "ld1 { v6.16b }, [x20]\n"
+ "mov v7.16b, v3.16b\n"
+ "ldr x22, [%x[inptrs], #0x38]\n"
+ "movi v24.4s, #0x0\n"
+ "mov v0.16b, v4.16b\n"
+ "ldr x21, [%x[inptrs], #0x0]\n"
+ "mov v14.16b, v2.16b\n"
+ "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v12.4s }, [x20]\n"
+ "ext v7.16b, v7.16b, v7.16b, #0x1\n"
+ "add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "add x11, %x[qp], %[offsetof_Requantize32_minval]\n"
+ "ext v0.16b, v0.16b, v0.16b, #0x1\n"
+ "ext v14.16b, v14.16b, v14.16b, #0x1\n"
+ "add x10, %x[qp], %[offsetof_Requantize32_maxval]\n"
+ "cmp %x[n_channels], #0x4\n"
"mov x9, #0x0\n"
- ".inst 0x6f86e3d4 // udot v20.4s, v30.16b, v6.4b[0]\n"
- ".inst 0x6f86ebd3 // udot v19.4s, v30.16b, v6.4b[2]\n"
- "add v17.4s, v17.4s, v25.4s\n"
"mov x28, #0x0\n"
- "movi v25.4s, #0x0\n"
- ".inst 0x6f87e3d2 // udot v18.4s, v30.16b, v7.4b[0]\n"
- ".inst 0x6f87ebd9 // udot v25.4s, v30.16b, v7.4b[2]\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
- ".inst 0x6fa2e39f // udot v31.4s, v28.16b, v2.4b[1]\n"
- ".inst 0x6fa2eb9d // udot v29.4s, v28.16b, v2.4b[3]\n"
- "add v16.4s, v16.4s, v24.4s\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
- "movi v24.4s, #0x0\n"
- ".inst 0x6f80e3d8 // udot v24.4s, v30.16b, v0.4b[0]\n"
- ".inst 0x6fa1e396 // udot v22.4s, v28.16b, v1.4b[1]\n"
+ "zip1 v3.2d, v3.2d, v7.2d\n"
+ "ld1 { v7.16b }, [x22]\n"
+ "neg v12.4s, v12.4s\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
- ".inst 0x6fa1eb95 // udot v21.4s, v28.16b, v1.4b[3]\n"
- ".inst 0x6fa5e39a // udot v26.4s, v28.16b, v5.4b[1]\n"
- "add v31.4s, v31.4s, v17.4s\n"
+ "zip1 v4.2d, v4.2d, v0.2d\n"
+ "ld1 { v0.16b }, [x21]\n"
+ "zip1 v2.2d, v2.2d, v14.2d\n"
+ "ld1r { v14.4s }, [x20]\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
- ".inst 0x6fa5eb9b // udot v27.4s, v28.16b, v5.4b[3]\n"
- ".inst 0x6fa6e394 // udot v20.4s, v28.16b, v6.4b[1]\n"
- "add v29.4s, v29.4s, v16.4s\n"
"add %x[params], %x[params], #0x50\n"
- ".inst 0x6fa6eb93 // udot v19.4s, v28.16b, v6.4b[3]\n"
- ".inst 0x6fa7e392 // udot v18.4s, v28.16b, v7.4b[1]\n"
- "add v22.4s, v22.4s, v31.4s\n"
- ".inst 0x6fa7eb99 // udot v25.4s, v28.16b, v7.4b[3]\n"
- ".inst 0x6fa0e398 // udot v24.4s, v28.16b, v0.4b[1]\n"
- "add v21.4s, v21.4s, v29.4s\n"
- "add v20.4s, v26.4s, v20.4s\n"
- "add v19.4s, v27.4s, v19.4s\n"
- "add v18.4s, v18.4s, v17.4s\n"
- "movi v17.4s, #0x0\n"
- ".inst 0x6f80ebd1 // udot v17.4s, v30.16b, v0.4b[2]\n"
- ".inst 0x6fa0eb91 // udot v17.4s, v28.16b, v0.4b[3]\n"
- "add v16.4s, v25.4s, v16.4s\n"
- "add v24.4s, v22.4s, v24.4s\n"
- "add v25.4s, v21.4s, v17.4s\n"
- "add v26.4s, v26.4s, v22.4s\n"
- "add v27.4s, v27.4s, v21.4s\n"
- "add v28.4s, v20.4s, v31.4s\n"
- "add v29.4s, v19.4s, v29.4s\n"
- "add v30.4s, v20.4s, v18.4s\n"
- "add v31.4s, v19.4s, v16.4s\n"
- "neg v23.4s, v23.4s\n"
- "mul v24.4s, v24.4s, v23.4s\n"
- "mul v25.4s, v25.4s, v23.4s\n"
- "mul v26.4s, v26.4s, v23.4s\n"
- "mul v27.4s, v27.4s, v23.4s\n"
- "mul v28.4s, v28.4s, v23.4s\n"
- "mul v29.4s, v29.4s, v23.4s\n"
- "mul v30.4s, v30.4s, v23.4s\n"
- "mul v31.4s, v31.4s, v23.4s\n"
- "zip1 v19.4s, v24.4s, v26.4s\n"
- "zip1 v18.4s, v25.4s, v27.4s\n"
+ ".inst 0x6f83e2f3 // udot v19.4s, v23.16b, v3.4b[0]\n"
+ ".inst 0x6f83eaed // udot v13.4s, v23.16b, v3.4b[2]\n"
+ ".inst 0x6f84e2ef // udot v15.4s, v23.16b, v4.4b[0]\n"
+ ".inst 0x6f84eaff // udot v31.4s, v23.16b, v4.4b[2]\n"
+ ".inst 0x6f82e2fc // udot v28.4s, v23.16b, v2.4b[0]\n"
+ ".inst 0x6f82eafd // udot v29.4s, v23.16b, v2.4b[2]\n"
+ ".inst 0x6fa3e333 // udot v19.4s, v25.16b, v3.4b[1]\n"
+ ".inst 0x6fa3eb2d // udot v13.4s, v25.16b, v3.4b[3]\n"
+ ".inst 0x6fa4e32f // udot v15.4s, v25.16b, v4.4b[1]\n"
+ ".inst 0x6fa4eb3f // udot v31.4s, v25.16b, v4.4b[3]\n"
+ ".inst 0x6fa2e33c // udot v28.4s, v25.16b, v2.4b[1]\n"
+ ".inst 0x6fa2eb3d // udot v29.4s, v25.16b, v2.4b[3]\n"
+ "add v19.4s, v19.4s, v15.4s\n"
+ "ld1r { v15.4s }, [x11]\n"
+ "add v31.4s, v13.4s, v31.4s\n"
+ "mov v13.16b, v1.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ "add v28.4s, v28.4s, v19.4s\n"
+ "add v29.4s, v29.4s, v31.4s\n"
+ "zip1 v1.2d, v1.2d, v13.2d\n"
+ "mov v13.16b, v5.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x6f81e2f5 // udot v21.4s, v23.16b, v1.4b[0]\n"
+ ".inst 0x6f81eaf0 // udot v16.4s, v23.16b, v1.4b[2]\n"
+ "zip1 v5.2d, v5.2d, v13.2d\n"
+ "mov v13.16b, v6.16b\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x6f85e2fa // udot v26.4s, v23.16b, v5.4b[0]\n"
+ ".inst 0x6f85eafb // udot v27.4s, v23.16b, v5.4b[2]\n"
+ ".inst 0x6fa1e335 // udot v21.4s, v25.16b, v1.4b[1]\n"
+ "zip1 v6.2d, v6.2d, v13.2d\n"
+ "mov v13.16b, v7.16b\n"
+ ".inst 0x6fa1eb30 // udot v16.4s, v25.16b, v1.4b[3]\n"
+ "ext v13.16b, v13.16b, v13.16b, #0x1\n"
+ ".inst 0x6f86e2fe // udot v30.4s, v23.16b, v6.4b[0]\n"
+ ".inst 0x6f86eaf4 // udot v20.4s, v23.16b, v6.4b[2]\n"
+ ".inst 0x6fa5e33a // udot v26.4s, v25.16b, v5.4b[1]\n"
+ ".inst 0x6fa5eb3b // udot v27.4s, v25.16b, v5.4b[3]\n"
+ "add v21.4s, v21.4s, v28.4s\n"
+ "zip1 v7.2d, v7.2d, v13.2d\n"
+ "ld1r { v13.4s }, [x10]\n"
+ "add v16.4s, v16.4s, v29.4s\n"
+ ".inst 0x6fa6e33e // udot v30.4s, v25.16b, v6.4b[1]\n"
+ ".inst 0x6fa6eb34 // udot v20.4s, v25.16b, v6.4b[3]\n"
+ ".inst 0x6f87e2f1 // udot v17.4s, v23.16b, v7.4b[0]\n"
+ ".inst 0x6f87eaf2 // udot v18.4s, v23.16b, v7.4b[2]\n"
+ "add v30.4s, v26.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v21.4s\n"
+ ".inst 0x6fa7e331 // udot v17.4s, v25.16b, v7.4b[1]\n"
+ "add v20.4s, v27.4s, v20.4s\n"
+ "add v27.4s, v27.4s, v16.4s\n"
+ "add v28.4s, v30.4s, v28.4s\n"
+ ".inst 0x6fa7eb32 // udot v18.4s, v25.16b, v7.4b[3]\n"
+ "mul v26.4s, v26.4s, v12.4s\n"
+ "add v19.4s, v17.4s, v19.4s\n"
+ "mov v17.16b, v0.16b\n"
+ "add v29.4s, v20.4s, v29.4s\n"
+ "mul v27.4s, v27.4s, v12.4s\n"
+ "mul v28.4s, v28.4s, v12.4s\n"
+ "ext v17.16b, v17.16b, v17.16b, #0x1\n"
+ "add v31.4s, v18.4s, v31.4s\n"
+ "movi v18.4s, #0x0\n"
+ "add v30.4s, v30.4s, v19.4s\n"
+ "mul v29.4s, v29.4s, v12.4s\n"
+ "zip1 v0.2d, v0.2d, v17.2d\n"
+ "add v31.4s, v20.4s, v31.4s\n"
+ "mul v30.4s, v30.4s, v12.4s\n"
+ ".inst 0x6f80e2f8 // udot v24.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x6f80eaf2 // udot v18.4s, v23.16b, v0.4b[2]\n"
+ "mul v31.4s, v31.4s, v12.4s\n"
"zip1 v17.4s, v28.4s, v30.4s\n"
- "zip1 v16.4s, v29.4s, v31.4s\n"
- "zip1 v22.4s, v19.4s, v18.4s\n"
- "zip1 v23.4s, v17.4s, v16.4s\n"
- "add v24.4s, v24.4s, v12.4s\n"
- "add v25.4s, v25.4s, v12.4s\n"
- "add v26.4s, v26.4s, v12.4s\n"
- "add v27.4s, v27.4s, v12.4s\n"
- "add v28.4s, v28.4s, v12.4s\n"
- "add v29.4s, v29.4s, v12.4s\n"
- "add v30.4s, v30.4s, v12.4s\n"
- "add v31.4s, v31.4s, v12.4s\n"
+ "add v28.4s, v28.4s, v22.4s\n"
+ "add v30.4s, v30.4s, v22.4s\n"
+ ".inst 0x6fa0e338 // udot v24.4s, v25.16b, v0.4b[1]\n"
+ "zip1 v19.4s, v29.4s, v31.4s\n"
+ "add v29.4s, v29.4s, v22.4s\n"
+ ".inst 0x6fa0eb32 // udot v18.4s, v25.16b, v0.4b[3]\n"
+ "add v31.4s, v31.4s, v22.4s\n"
+ "add v24.4s, v21.4s, v24.4s\n"
+ "zip1 v23.4s, v17.4s, v19.4s\n"
+ "add v25.4s, v16.4s, v18.4s\n"
+ "mul v24.4s, v24.4s, v12.4s\n"
+ "mul v25.4s, v25.4s, v12.4s\n"
+ "zip1 v17.4s, v24.4s, v26.4s\n"
+ "add v26.4s, v26.4s, v22.4s\n"
+ "zip1 v16.4s, v25.4s, v27.4s\n"
+ "add v27.4s, v27.4s, v22.4s\n"
+ "add v24.4s, v24.4s, v22.4s\n"
+ "add v25.4s, v25.4s, v22.4s\n"
+ "zip1 v22.4s, v17.4s, v16.4s\n"
"ble 2f\n"
"1:" // Loop
"ldr q12, [%x[params], #0x60]\n"
@@ -203,159 +203,159 @@ void a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
".inst 0x6f81e11a // udot v26.4s, v8.16b, v1.4b[0]\n"
".inst 0x6f81e91b // udot v27.4s, v8.16b, v1.4b[2]\n"
"sub %x[n_channels], %x[n_channels], #0x4\n"
- ".inst 0x6fa0e138 // udot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x6fa0e939 // udot v25.4s, v9.16b, v0.4b[3]\n"
- "cmp %x[n_channels], #0x4\n"
- "add x9, x9, #0x10\n"
- ".inst 0x6fa1e13a // udot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x6fa1e93b // udot v27.4s, v9.16b, v1.4b[3]\n"
".inst 0x6f82e11c // udot v28.4s, v8.16b, v2.4b[0]\n"
".inst 0x6f82e91d // udot v29.4s, v8.16b, v2.4b[2]\n"
+ "cmp %x[n_channels], #0x4\n"
+ "add x9, x9, #0x10\n"
".inst 0x6f83e11e // udot v30.4s, v8.16b, v3.4b[0]\n"
".inst 0x6f83e91f // udot v31.4s, v8.16b, v3.4b[2]\n"
"ldr q17, [%x[params], #0x0]\n"
- ".inst 0x6f81e158 // udot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x6f81e959 // udot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x6f82e15a // udot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x6f82e95b // udot v27.4s, v10.16b, v2.4b[2]\n"
+ ".inst 0x6fa0e138 // udot v24.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x6fa0e939 // udot v25.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x6fa1e13a // udot v26.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x6fa1e93b // udot v27.4s, v9.16b, v1.4b[3]\n"
".inst 0x6fa2e13c // udot v28.4s, v9.16b, v2.4b[1]\n"
".inst 0x6fa2e93d // udot v29.4s, v9.16b, v2.4b[3]\n"
".inst 0x6fa3e13e // udot v30.4s, v9.16b, v3.4b[1]\n"
".inst 0x6fa3e93f // udot v31.4s, v9.16b, v3.4b[3]\n"
"ldr q16, [%x[params], #0x10]\n"
- ".inst 0x6fa1e178 // udot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x6fa1e979 // udot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x6fa2e17a // udot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x6fa2e97b // udot v27.4s, v11.16b, v2.4b[3]\n"
+ ".inst 0x6f81e158 // udot v24.4s, v10.16b, v1.4b[0]\n"
+ ".inst 0x6f81e959 // udot v25.4s, v10.16b, v1.4b[2]\n"
+ ".inst 0x6f82e15a // udot v26.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x6f82e95b // udot v27.4s, v10.16b, v2.4b[2]\n"
".inst 0x6f83e15c // udot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x6f83e95d // udot v29.4s, v10.16b, v3.4b[2]\n"
".inst 0x6f84e15e // udot v30.4s, v10.16b, v4.4b[0]\n"
".inst 0x6f84e95f // udot v31.4s, v10.16b, v4.4b[2]\n"
"ldr q19, [%x[params], #0x20]\n"
- ".inst 0x6f82e238 // udot v24.4s, v17.16b, v2.4b[0]\n"
- ".inst 0x6f82ea39 // udot v25.4s, v17.16b, v2.4b[2]\n"
- ".inst 0x6f83e23a // udot v26.4s, v17.16b, v3.4b[0]\n"
- ".inst 0x6f83ea3b // udot v27.4s, v17.16b, v3.4b[2]\n"
+ ".inst 0x6fa1e178 // udot v24.4s, v11.16b, v1.4b[1]\n"
+ ".inst 0x6fa1e979 // udot v25.4s, v11.16b, v1.4b[3]\n"
+ ".inst 0x6fa2e17a // udot v26.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x6fa2e97b // udot v27.4s, v11.16b, v2.4b[3]\n"
".inst 0x6fa3e17c // udot v28.4s, v11.16b, v3.4b[1]\n"
".inst 0x6fa3e97d // udot v29.4s, v11.16b, v3.4b[3]\n"
".inst 0x6fa4e17e // udot v30.4s, v11.16b, v4.4b[1]\n"
".inst 0x6fa4e97f // udot v31.4s, v11.16b, v4.4b[3]\n"
"ldr q18, [%x[params], #0x30]\n"
- ".inst 0x6fa2e218 // udot v24.4s, v16.16b, v2.4b[1]\n"
- ".inst 0x6fa2ea19 // udot v25.4s, v16.16b, v2.4b[3]\n"
- ".inst 0x6fa3e21a // udot v26.4s, v16.16b, v3.4b[1]\n"
- ".inst 0x6fa3ea1b // udot v27.4s, v16.16b, v3.4b[3]\n"
+ ".inst 0x6f82e238 // udot v24.4s, v17.16b, v2.4b[0]\n"
+ ".inst 0x6f82ea39 // udot v25.4s, v17.16b, v2.4b[2]\n"
+ ".inst 0x6f83e23a // udot v26.4s, v17.16b, v3.4b[0]\n"
+ ".inst 0x6f83ea3b // udot v27.4s, v17.16b, v3.4b[2]\n"
".inst 0x6f84e23c // udot v28.4s, v17.16b, v4.4b[0]\n"
".inst 0x6f84ea3d // udot v29.4s, v17.16b, v4.4b[2]\n"
".inst 0x6f85e23e // udot v30.4s, v17.16b, v5.4b[0]\n"
".inst 0x6f85ea3f // udot v31.4s, v17.16b, v5.4b[2]\n"
"ldr q17, [%x[params], #0x40]\n"
- ".inst 0x6f83e278 // udot v24.4s, v19.16b, v3.4b[0]\n"
- ".inst 0x6f83ea79 // udot v25.4s, v19.16b, v3.4b[2]\n"
- ".inst 0x6f84e27a // udot v26.4s, v19.16b, v4.4b[0]\n"
- ".inst 0x6f84ea7b // udot v27.4s, v19.16b, v4.4b[2]\n"
+ ".inst 0x6fa2e218 // udot v24.4s, v16.16b, v2.4b[1]\n"
+ ".inst 0x6fa2ea19 // udot v25.4s, v16.16b, v2.4b[3]\n"
+ ".inst 0x6fa3e21a // udot v26.4s, v16.16b, v3.4b[1]\n"
+ ".inst 0x6fa3ea1b // udot v27.4s, v16.16b, v3.4b[3]\n"
".inst 0x6fa4e21c // udot v28.4s, v16.16b, v4.4b[1]\n"
".inst 0x6fa4ea1d // udot v29.4s, v16.16b, v4.4b[3]\n"
".inst 0x6fa5e21e // udot v30.4s, v16.16b, v5.4b[1]\n"
".inst 0x6fa5ea1f // udot v31.4s, v16.16b, v5.4b[3]\n"
"ldr q16, [%x[params], #0x50]\n"
- ".inst 0x6fa3e258 // udot v24.4s, v18.16b, v3.4b[1]\n"
- ".inst 0x6fa3ea59 // udot v25.4s, v18.16b, v3.4b[3]\n"
- ".inst 0x6fa4e25a // udot v26.4s, v18.16b, v4.4b[1]\n"
- ".inst 0x6fa4ea5b // udot v27.4s, v18.16b, v4.4b[3]\n"
+ ".inst 0x6f83e278 // udot v24.4s, v19.16b, v3.4b[0]\n"
+ ".inst 0x6f83ea79 // udot v25.4s, v19.16b, v3.4b[2]\n"
+ ".inst 0x6f84e27a // udot v26.4s, v19.16b, v4.4b[0]\n"
+ ".inst 0x6f84ea7b // udot v27.4s, v19.16b, v4.4b[2]\n"
".inst 0x6f85e27c // udot v28.4s, v19.16b, v5.4b[0]\n"
".inst 0x6f85ea7d // udot v29.4s, v19.16b, v5.4b[2]\n"
".inst 0x6f86e27e // udot v30.4s, v19.16b, v6.4b[0]\n"
".inst 0x6f86ea7f // udot v31.4s, v19.16b, v6.4b[2]\n"
"ldr q10, [%x[params], #0xb0]\n"
- ".inst 0x6f84e238 // udot v24.4s, v17.16b, v4.4b[0]\n"
- ".inst 0x6f84ea39 // udot v25.4s, v17.16b, v4.4b[2]\n"
- ".inst 0x6f85e23a // udot v26.4s, v17.16b, v5.4b[0]\n"
- ".inst 0x6f85ea3b // udot v27.4s, v17.16b, v5.4b[2]\n"
+ ".inst 0x6fa3e258 // udot v24.4s, v18.16b, v3.4b[1]\n"
+ ".inst 0x6fa3ea59 // udot v25.4s, v18.16b, v3.4b[3]\n"
+ ".inst 0x6fa4e25a // udot v26.4s, v18.16b, v4.4b[1]\n"
+ ".inst 0x6fa4ea5b // udot v27.4s, v18.16b, v4.4b[3]\n"
".inst 0x6fa5e25c // udot v28.4s, v18.16b, v5.4b[1]\n"
".inst 0x6fa5ea5d // udot v29.4s, v18.16b, v5.4b[3]\n"
".inst 0x6fa6e25e // udot v30.4s, v18.16b, v6.4b[1]\n"
".inst 0x6fa6ea5f // udot v31.4s, v18.16b, v6.4b[3]\n"
"ldr q11, [%x[params], #0xc0]\n"
- ".inst 0x6fa4e218 // udot v24.4s, v16.16b, v4.4b[1]\n"
- ".inst 0x6fa4ea19 // udot v25.4s, v16.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v12.4s\n"
- ".inst 0x6fa5e21a // udot v26.4s, v16.16b, v5.4b[1]\n"
- ".inst 0x6fa5ea1b // udot v27.4s, v16.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v12.4s\n"
+ ".inst 0x6f84e238 // udot v24.4s, v17.16b, v4.4b[0]\n"
+ ".inst 0x6f84ea39 // udot v25.4s, v17.16b, v4.4b[2]\n"
+ ".inst 0x6f85e23a // udot v26.4s, v17.16b, v5.4b[0]\n"
+ ".inst 0x6f85ea3b // udot v27.4s, v17.16b, v5.4b[2]\n"
".inst 0x6f86e23c // udot v28.4s, v17.16b, v6.4b[0]\n"
".inst 0x6f86ea3d // udot v29.4s, v17.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v12.4s\n"
".inst 0x6f87e23e // udot v30.4s, v17.16b, v7.4b[0]\n"
".inst 0x6f87ea3f // udot v31.4s, v17.16b, v7.4b[2]\n"
"ldr q8, [%x[params], #0x90]\n"
- "sqrdmulh v27.4s, v27.4s, v12.4s\n"
+ ".inst 0x6fa4e218 // udot v24.4s, v16.16b, v4.4b[1]\n"
+ ".inst 0x6fa4ea19 // udot v25.4s, v16.16b, v4.4b[3]\n"
+ ".inst 0x6fa5e21a // udot v26.4s, v16.16b, v5.4b[1]\n"
+ ".inst 0x6fa5ea1b // udot v27.4s, v16.16b, v5.4b[3]\n"
".inst 0x6fa6e21c // udot v28.4s, v16.16b, v6.4b[1]\n"
".inst 0x6fa6ea1d // udot v29.4s, v16.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v21.16b\n"
".inst 0x6fa7e21e // udot v30.4s, v16.16b, v7.4b[1]\n"
".inst 0x6fa7ea1f // udot v31.4s, v16.16b, v7.4b[3]\n"
"ldr q9, [%x[params], #0xa0]\n"
+ "add %x[params], %x[params], #0xd0\n"
+ "sqrdmulh v24.4s, v24.4s, v12.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v12.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v12.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v12.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v12.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v12.4s\n"
+ "and v19.16b, v24.16b, v21.16b\n"
"and v18.16b, v25.16b, v21.16b\n"
"and v17.16b, v26.16b, v21.16b\n"
"and v16.16b, v27.16b, v21.16b\n"
- "add %x[params], %x[params], #0xd0\n"
+ "sqrdmulh v30.4s, v30.4s, v12.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v12.4s\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v12.4s\n"
- "sqrdmulh v29.4s, v29.4s, v12.4s\n"
- "sqrdmulh v30.4s, v30.4s, v12.4s\n"
- "sqrdmulh v31.4s, v31.4s, v12.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v21.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
+ "and v18.16b, v29.16b, v21.16b\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
"sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v21.16b\n"
- "and v18.16b, v29.16b, v21.16b\n"
"and v17.16b, v30.16b, v21.16b\n"
"and v16.16b, v31.16b, v21.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v21.4s\n"
+ "srshl v25.4s, v25.4s, v21.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v21.4s\n"
- "srshl v25.4s, v25.4s, v21.4s\n"
"srshl v26.4s, v26.4s, v21.4s\n"
"srshl v27.4s, v27.4s, v21.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v21.4s\n"
"srshl v29.4s, v29.4s, v21.4s\n"
+ "add v24.4s, v24.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v14.4s\n"
"srshl v30.4s, v30.4s, v21.4s\n"
"srshl v31.4s, v31.4s, v21.4s\n"
- "add v24.4s, v24.4s, v13.4s\n"
- "add v25.4s, v25.4s, v13.4s\n"
- "add v26.4s, v26.4s, v13.4s\n"
- "add v27.4s, v27.4s, v13.4s\n"
- "add v28.4s, v28.4s, v13.4s\n"
- "add v29.4s, v29.4s, v13.4s\n"
- "add v30.4s, v30.4s, v13.4s\n"
- "add v31.4s, v31.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v14.4s\n"
- "smax v25.4s, v25.4s, v14.4s\n"
- "smax v26.4s, v26.4s, v14.4s\n"
- "smax v27.4s, v27.4s, v14.4s\n"
- "smax v28.4s, v28.4s, v14.4s\n"
- "smax v29.4s, v29.4s, v14.4s\n"
- "smax v30.4s, v30.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v14.4s\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v27.4s, v27.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v14.4s\n"
+ "add v30.4s, v30.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v14.4s\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "smax v24.4s, v24.4s, v15.4s\n"
+ "smax v25.4s, v25.4s, v15.4s\n"
+ "smax v26.4s, v26.4s, v15.4s\n"
+ "smax v27.4s, v27.4s, v15.4s\n"
+ "smax v28.4s, v28.4s, v15.4s\n"
+ "smax v29.4s, v29.4s, v15.4s\n"
+ "smax v30.4s, v30.4s, v15.4s\n"
+ "smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -366,33 +366,33 @@ void a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
- "str s24, [x27, x28]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
- "str s25, [x26, x28]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
- "str s26, [x25, x28]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
- "str s27, [x24, x28]\n"
- "str s28, [x23, x28]\n"
+ "str s24, [x27, x28]\n"
+ "str s25, [x26, x28]\n"
"dup v24.4s, v22.s[0]\n"
"dup v25.4s, v22.s[1]\n"
- "str s29, [x22, x28]\n"
+ "str s26, [x25, x28]\n"
"dup v26.4s, v22.s[2]\n"
+ "str s27, [x24, x28]\n"
"dup v27.4s, v22.s[3]\n"
- "str s30, [x21, x28]\n"
+ "add v24.4s, v24.4s, v20.4s\n"
+ "str s28, [x23, x28]\n"
"dup v28.4s, v23.s[0]\n"
+ "add v25.4s, v25.4s, v20.4s\n"
+ "str s29, [x22, x28]\n"
"dup v29.4s, v23.s[1]\n"
- "str s31, [x20, x28]\n"
+ "add v26.4s, v26.4s, v20.4s\n"
+ "str s30, [x21, x28]\n"
"dup v30.4s, v23.s[2]\n"
+ "add v27.4s, v27.4s, v20.4s\n"
+ "str s31, [x20, x28]\n"
"dup v31.4s, v23.s[3]\n"
"add x28, x28, #0x4\n"
- "add v24.4s, v24.4s, v20.4s\n"
- "add v25.4s, v25.4s, v20.4s\n"
- "add v26.4s, v26.4s, v20.4s\n"
- "add v27.4s, v27.4s, v20.4s\n"
"add v28.4s, v28.4s, v20.4s\n"
"add v29.4s, v29.4s, v20.4s\n"
"add v30.4s, v30.4s, v20.4s\n"
@@ -407,160 +407,160 @@ void a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
".inst 0x6f81e91b // udot v27.4s, v8.16b, v1.4b[2]\n"
"cmp %x[n_channels], #0x4\n"
"add x27, x27, x28\n"
- ".inst 0x6fa0e138 // udot v24.4s, v9.16b, v0.4b[1]\n"
- ".inst 0x6fa0e939 // udot v25.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x6f82e11c // udot v28.4s, v8.16b, v2.4b[0]\n"
+ ".inst 0x6f82e91d // udot v29.4s, v8.16b, v2.4b[2]\n"
"add x26, x26, x28\n"
"add x25, x25, x28\n"
- ".inst 0x6fa1e13a // udot v26.4s, v9.16b, v1.4b[1]\n"
- ".inst 0x6fa1e93b // udot v27.4s, v9.16b, v1.4b[3]\n"
+ ".inst 0x6f83e11e // udot v30.4s, v8.16b, v3.4b[0]\n"
+ ".inst 0x6f83e91f // udot v31.4s, v8.16b, v3.4b[2]\n"
+ "ldr q17, [%x[params], #0x0]\n"
"add x24, x24, x28\n"
+ ".inst 0x6fa0e138 // udot v24.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x6fa0e939 // udot v25.4s, v9.16b, v0.4b[3]\n"
"add x23, x23, x28\n"
- ".inst 0x6f82e11c // udot v28.4s, v8.16b, v2.4b[0]\n"
- ".inst 0x6f82e91d // udot v29.4s, v8.16b, v2.4b[2]\n"
"add x22, x22, x28\n"
+ ".inst 0x6fa1e13a // udot v26.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x6fa1e93b // udot v27.4s, v9.16b, v1.4b[3]\n"
"add x21, x21, x28\n"
- ".inst 0x6f83e11e // udot v30.4s, v8.16b, v3.4b[0]\n"
- ".inst 0x6f83e91f // udot v31.4s, v8.16b, v3.4b[2]\n"
- "ldr q17, [%x[params], #0x0]\n"
"add x20, x20, x28\n"
- ".inst 0x6f81e158 // udot v24.4s, v10.16b, v1.4b[0]\n"
- ".inst 0x6f81e959 // udot v25.4s, v10.16b, v1.4b[2]\n"
- ".inst 0x6f82e15a // udot v26.4s, v10.16b, v2.4b[0]\n"
- ".inst 0x6f82e95b // udot v27.4s, v10.16b, v2.4b[2]\n"
".inst 0x6fa2e13c // udot v28.4s, v9.16b, v2.4b[1]\n"
".inst 0x6fa2e93d // udot v29.4s, v9.16b, v2.4b[3]\n"
".inst 0x6fa3e13e // udot v30.4s, v9.16b, v3.4b[1]\n"
".inst 0x6fa3e93f // udot v31.4s, v9.16b, v3.4b[3]\n"
"ldr q16, [%x[params], #0x10]\n"
- ".inst 0x6fa1e178 // udot v24.4s, v11.16b, v1.4b[1]\n"
- ".inst 0x6fa1e979 // udot v25.4s, v11.16b, v1.4b[3]\n"
- ".inst 0x6fa2e17a // udot v26.4s, v11.16b, v2.4b[1]\n"
- ".inst 0x6fa2e97b // udot v27.4s, v11.16b, v2.4b[3]\n"
+ ".inst 0x6f81e158 // udot v24.4s, v10.16b, v1.4b[0]\n"
+ ".inst 0x6f81e959 // udot v25.4s, v10.16b, v1.4b[2]\n"
+ ".inst 0x6f82e15a // udot v26.4s, v10.16b, v2.4b[0]\n"
+ ".inst 0x6f82e95b // udot v27.4s, v10.16b, v2.4b[2]\n"
".inst 0x6f83e15c // udot v28.4s, v10.16b, v3.4b[0]\n"
".inst 0x6f83e95d // udot v29.4s, v10.16b, v3.4b[2]\n"
".inst 0x6f84e15e // udot v30.4s, v10.16b, v4.4b[0]\n"
".inst 0x6f84e95f // udot v31.4s, v10.16b, v4.4b[2]\n"
"ldr q19, [%x[params], #0x20]\n"
- ".inst 0x6f82e238 // udot v24.4s, v17.16b, v2.4b[0]\n"
- ".inst 0x6f82ea39 // udot v25.4s, v17.16b, v2.4b[2]\n"
- ".inst 0x6f83e23a // udot v26.4s, v17.16b, v3.4b[0]\n"
- ".inst 0x6f83ea3b // udot v27.4s, v17.16b, v3.4b[2]\n"
+ ".inst 0x6fa1e178 // udot v24.4s, v11.16b, v1.4b[1]\n"
+ ".inst 0x6fa1e979 // udot v25.4s, v11.16b, v1.4b[3]\n"
+ ".inst 0x6fa2e17a // udot v26.4s, v11.16b, v2.4b[1]\n"
+ ".inst 0x6fa2e97b // udot v27.4s, v11.16b, v2.4b[3]\n"
".inst 0x6fa3e17c // udot v28.4s, v11.16b, v3.4b[1]\n"
".inst 0x6fa3e97d // udot v29.4s, v11.16b, v3.4b[3]\n"
".inst 0x6fa4e17e // udot v30.4s, v11.16b, v4.4b[1]\n"
".inst 0x6fa4e97f // udot v31.4s, v11.16b, v4.4b[3]\n"
"ldr q18, [%x[params], #0x30]\n"
- ".inst 0x6fa2e218 // udot v24.4s, v16.16b, v2.4b[1]\n"
- ".inst 0x6fa2ea19 // udot v25.4s, v16.16b, v2.4b[3]\n"
- ".inst 0x6fa3e21a // udot v26.4s, v16.16b, v3.4b[1]\n"
- ".inst 0x6fa3ea1b // udot v27.4s, v16.16b, v3.4b[3]\n"
+ ".inst 0x6f82e238 // udot v24.4s, v17.16b, v2.4b[0]\n"
+ ".inst 0x6f82ea39 // udot v25.4s, v17.16b, v2.4b[2]\n"
+ ".inst 0x6f83e23a // udot v26.4s, v17.16b, v3.4b[0]\n"
+ ".inst 0x6f83ea3b // udot v27.4s, v17.16b, v3.4b[2]\n"
".inst 0x6f84e23c // udot v28.4s, v17.16b, v4.4b[0]\n"
".inst 0x6f84ea3d // udot v29.4s, v17.16b, v4.4b[2]\n"
".inst 0x6f85e23e // udot v30.4s, v17.16b, v5.4b[0]\n"
".inst 0x6f85ea3f // udot v31.4s, v17.16b, v5.4b[2]\n"
"ldr q17, [%x[params], #0x40]\n"
- ".inst 0x6f83e278 // udot v24.4s, v19.16b, v3.4b[0]\n"
- ".inst 0x6f83ea79 // udot v25.4s, v19.16b, v3.4b[2]\n"
- ".inst 0x6f84e27a // udot v26.4s, v19.16b, v4.4b[0]\n"
- ".inst 0x6f84ea7b // udot v27.4s, v19.16b, v4.4b[2]\n"
+ ".inst 0x6fa2e218 // udot v24.4s, v16.16b, v2.4b[1]\n"
+ ".inst 0x6fa2ea19 // udot v25.4s, v16.16b, v2.4b[3]\n"
+ ".inst 0x6fa3e21a // udot v26.4s, v16.16b, v3.4b[1]\n"
+ ".inst 0x6fa3ea1b // udot v27.4s, v16.16b, v3.4b[3]\n"
".inst 0x6fa4e21c // udot v28.4s, v16.16b, v4.4b[1]\n"
".inst 0x6fa4ea1d // udot v29.4s, v16.16b, v4.4b[3]\n"
".inst 0x6fa5e21e // udot v30.4s, v16.16b, v5.4b[1]\n"
".inst 0x6fa5ea1f // udot v31.4s, v16.16b, v5.4b[3]\n"
"ldr q16, [%x[params], #0x50]\n"
"add %x[params], %x[params], #0x80\n"
- ".inst 0x6fa3e258 // udot v24.4s, v18.16b, v3.4b[1]\n"
- ".inst 0x6fa3ea59 // udot v25.4s, v18.16b, v3.4b[3]\n"
- ".inst 0x6fa4e25a // udot v26.4s, v18.16b, v4.4b[1]\n"
- ".inst 0x6fa4ea5b // udot v27.4s, v18.16b, v4.4b[3]\n"
+ ".inst 0x6f83e278 // udot v24.4s, v19.16b, v3.4b[0]\n"
+ ".inst 0x6f83ea79 // udot v25.4s, v19.16b, v3.4b[2]\n"
+ ".inst 0x6f84e27a // udot v26.4s, v19.16b, v4.4b[0]\n"
+ ".inst 0x6f84ea7b // udot v27.4s, v19.16b, v4.4b[2]\n"
".inst 0x6f85e27c // udot v28.4s, v19.16b, v5.4b[0]\n"
".inst 0x6f85ea7d // udot v29.4s, v19.16b, v5.4b[2]\n"
".inst 0x6f86e27e // udot v30.4s, v19.16b, v6.4b[0]\n"
".inst 0x6f86ea7f // udot v31.4s, v19.16b, v6.4b[2]\n"
- ".inst 0x6f84e238 // udot v24.4s, v17.16b, v4.4b[0]\n"
- ".inst 0x6f84ea39 // udot v25.4s, v17.16b, v4.4b[2]\n"
- ".inst 0x6f85e23a // udot v26.4s, v17.16b, v5.4b[0]\n"
- ".inst 0x6f85ea3b // udot v27.4s, v17.16b, v5.4b[2]\n"
+ ".inst 0x6fa3e258 // udot v24.4s, v18.16b, v3.4b[1]\n"
+ ".inst 0x6fa3ea59 // udot v25.4s, v18.16b, v3.4b[3]\n"
+ ".inst 0x6fa4e25a // udot v26.4s, v18.16b, v4.4b[1]\n"
+ ".inst 0x6fa4ea5b // udot v27.4s, v18.16b, v4.4b[3]\n"
".inst 0x6fa5e25c // udot v28.4s, v18.16b, v5.4b[1]\n"
".inst 0x6fa5ea5d // udot v29.4s, v18.16b, v5.4b[3]\n"
".inst 0x6fa6e25e // udot v30.4s, v18.16b, v6.4b[1]\n"
".inst 0x6fa6ea5f // udot v31.4s, v18.16b, v6.4b[3]\n"
- ".inst 0x6fa4e218 // udot v24.4s, v16.16b, v4.4b[1]\n"
- ".inst 0x6fa4ea19 // udot v25.4s, v16.16b, v4.4b[3]\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- ".inst 0x6fa5e21a // udot v26.4s, v16.16b, v5.4b[1]\n"
- ".inst 0x6fa5ea1b // udot v27.4s, v16.16b, v5.4b[3]\n"
- "sqrdmulh v25.4s, v25.4s, v21.4s\n"
+ ".inst 0x6f84e238 // udot v24.4s, v17.16b, v4.4b[0]\n"
+ ".inst 0x6f84ea39 // udot v25.4s, v17.16b, v4.4b[2]\n"
+ ".inst 0x6f85e23a // udot v26.4s, v17.16b, v5.4b[0]\n"
+ ".inst 0x6f85ea3b // udot v27.4s, v17.16b, v5.4b[2]\n"
".inst 0x6f86e23c // udot v28.4s, v17.16b, v6.4b[0]\n"
".inst 0x6f86ea3d // udot v29.4s, v17.16b, v6.4b[2]\n"
- "sqrdmulh v26.4s, v26.4s, v21.4s\n"
".inst 0x6f87e23e // udot v30.4s, v17.16b, v7.4b[0]\n"
".inst 0x6f87ea3f // udot v31.4s, v17.16b, v7.4b[2]\n"
- "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ ".inst 0x6fa4e218 // udot v24.4s, v16.16b, v4.4b[1]\n"
+ ".inst 0x6fa4ea19 // udot v25.4s, v16.16b, v4.4b[3]\n"
+ ".inst 0x6fa5e21a // udot v26.4s, v16.16b, v5.4b[1]\n"
+ ".inst 0x6fa5ea1b // udot v27.4s, v16.16b, v5.4b[3]\n"
".inst 0x6fa6e21c // udot v28.4s, v16.16b, v6.4b[1]\n"
".inst 0x6fa6ea1d // udot v29.4s, v16.16b, v6.4b[3]\n"
- "and v19.16b, v24.16b, v20.16b\n"
".inst 0x6fa7e21e // udot v30.4s, v16.16b, v7.4b[1]\n"
".inst 0x6fa7ea1f // udot v31.4s, v16.16b, v7.4b[3]\n"
+ "sqrdmulh v24.4s, v24.4s, v21.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v21.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v21.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v21.4s\n"
+ "and v19.16b, v24.16b, v20.16b\n"
+ "sqrdmulh v28.4s, v28.4s, v21.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v21.4s\n"
"and v18.16b, v25.16b, v20.16b\n"
"and v17.16b, v26.16b, v20.16b\n"
"and v16.16b, v27.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v28.4s, v28.4s, v21.4s\n"
- "sqrdmulh v29.4s, v29.4s, v21.4s\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
+ "and v19.16b, v28.16b, v20.16b\n"
"sqadd v25.4s, v25.4s, v18.4s\n"
"sqadd v26.4s, v26.4s, v17.4s\n"
- "sqadd v27.4s, v27.4s, v16.4s\n"
- "and v19.16b, v28.16b, v20.16b\n"
"and v18.16b, v29.16b, v20.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
"and v17.16b, v30.16b, v20.16b\n"
"and v16.16b, v31.16b, v20.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
"sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v24.4s, v24.4s, v20.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "srshl v25.4s, v25.4s, v20.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v19.4s\n"
"sqadd v29.4s, v29.4s, v18.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v24.4s, v24.4s, v20.4s\n"
- "srshl v25.4s, v25.4s, v20.4s\n"
"srshl v26.4s, v26.4s, v20.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
"srshl v27.4s, v27.4s, v20.4s\n"
+ "sqadd v31.4s, v31.4s, v16.4s\n"
"srshl v28.4s, v28.4s, v20.4s\n"
"srshl v29.4s, v29.4s, v20.4s\n"
+ "add v24.4s, v24.4s, v14.4s\n"
"srshl v30.4s, v30.4s, v20.4s\n"
+ "add v25.4s, v25.4s, v14.4s\n"
"srshl v31.4s, v31.4s, v20.4s\n"
- "add v24.4s, v24.4s, v13.4s\n"
- "add v25.4s, v25.4s, v13.4s\n"
- "add v26.4s, v26.4s, v13.4s\n"
- "add v27.4s, v27.4s, v13.4s\n"
- "add v28.4s, v28.4s, v13.4s\n"
- "add v29.4s, v29.4s, v13.4s\n"
- "add v30.4s, v30.4s, v13.4s\n"
- "add v31.4s, v31.4s, v13.4s\n"
- "smin v24.4s, v24.4s, v15.4s\n"
- "smin v25.4s, v25.4s, v15.4s\n"
- "smin v26.4s, v26.4s, v15.4s\n"
- "smin v27.4s, v27.4s, v15.4s\n"
- "smin v28.4s, v28.4s, v15.4s\n"
- "smin v29.4s, v29.4s, v15.4s\n"
- "smin v30.4s, v30.4s, v15.4s\n"
- "smin v31.4s, v31.4s, v15.4s\n"
- "smax v24.4s, v24.4s, v14.4s\n"
- "smax v25.4s, v25.4s, v14.4s\n"
- "smax v26.4s, v26.4s, v14.4s\n"
- "smax v27.4s, v27.4s, v14.4s\n"
- "smax v28.4s, v28.4s, v14.4s\n"
- "smax v29.4s, v29.4s, v14.4s\n"
- "smax v30.4s, v30.4s, v14.4s\n"
- "smax v31.4s, v31.4s, v14.4s\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v27.4s, v27.4s, v14.4s\n"
+ "add v28.4s, v28.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v14.4s\n"
+ "add v30.4s, v30.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v14.4s\n"
+ "smin v24.4s, v24.4s, v13.4s\n"
+ "smin v25.4s, v25.4s, v13.4s\n"
+ "smin v26.4s, v26.4s, v13.4s\n"
+ "smin v27.4s, v27.4s, v13.4s\n"
+ "smin v28.4s, v28.4s, v13.4s\n"
+ "smin v29.4s, v29.4s, v13.4s\n"
+ "smin v30.4s, v30.4s, v13.4s\n"
+ "smin v31.4s, v31.4s, v13.4s\n"
+ "smax v24.4s, v24.4s, v15.4s\n"
+ "smax v25.4s, v25.4s, v15.4s\n"
+ "smax v26.4s, v26.4s, v15.4s\n"
+ "smax v27.4s, v27.4s, v15.4s\n"
+ "smax v28.4s, v28.4s, v15.4s\n"
+ "smax v29.4s, v29.4s, v15.4s\n"
+ "smax v30.4s, v30.4s, v15.4s\n"
+ "smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
@@ -630,7 +630,7 @@ void a64_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
"4:" // Tail: End
: [n_channels] "+&r" (n_output_channels), [params] "+&r" (params)
: [inptrs] "r" (inptrs), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index 0770c126ec..82d7f407e2 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,21 +49,21 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"lsr x10, %x[n_output_channels], #0x2\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
"ld1r { v15.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v14.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v14.4s }, [x21]\n"
"ld1r { v13.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v12.16b }, [x21]\n"
"ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v10.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v10.4s }, [x21]\n"
"ld1r { v9.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v8.4s }, [x20]\n"
"mov x9, #0x0\n"
+ "ld1r { v8.4s }, [x20]\n"
"cbz x10, 9f\n"
"1:" // Output channel loop
"movi v31.4s, #0x0\n"
@@ -96,20 +96,20 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"3:" // Output channel loop: Load quantization parameters: Done
"ldr s5, [%x[weights]], #0x4\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
+ "ldp x21, x20, [x22], #0x10\n"
"ldr d0, [x21, #0x0]\n"
"ldr d4, [x20, #0x0]\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
"usubl v0.8h, v0.8b, v13.8b\n"
"usubl v4.8h, v4.8b, v13.8b\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
"cbz x23, 7f\n"
"ldr s7, [%x[weights]], #0x4\n"
"ldp x21, x20, [x22], #0x10\n"
"subs x23, x23, #0x1\n"
- "usubl v7.8h, v7.8b, v12.8b\n"
"ldr d3, [x21, #0x0]\n"
"ldr d6, [x20, #0x0]\n"
+ "usubl v7.8h, v7.8b, v12.8b\n"
"usubl v3.8h, v3.8b, v13.8b\n"
"usubl v6.8h, v6.8b, v13.8b\n"
"beq 5f\n"
@@ -125,13 +125,13 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x21, #0x0]\n"
- "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d4, [x20, #0x0]\n"
@@ -139,22 +139,22 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"ldp x21, x20, [x22], #0x10\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
"ldr d3, [x21, #0x0]\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "usubl v3.8h, v3.8b, v13.8b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"ldr d6, [x20, #0x0]\n"
@@ -172,54 +172,54 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v19.4s, v5.4h, v0.h[3]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "smlal v16.4s, v7.4h, v3.h[0]\n"
- "smlal v17.4s, v7.4h, v3.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
- "smlal v18.4s, v7.4h, v3.h[2]\n"
- "smlal v19.4s, v7.4h, v3.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
+ "smlal v16.4s, v7.4h, v3.h[0]\n"
+ "smlal v17.4s, v7.4h, v3.h[1]\n"
+ "smlal v18.4s, v7.4h, v3.h[2]\n"
+ "smlal v19.4s, v7.4h, v3.h[3]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
+ "smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v5.4h, v4.h[2]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
- "and v3.16b, v16.16b, v8.16b\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v9.4s\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
- "and v2.16b, v17.16b, v8.16b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
- "and v1.16b, v18.16b, v8.16b\n"
- "and v0.16b, v19.16b, v8.16b\n"
+ "and v3.16b, v16.16b, v8.16b\n"
+ "and v2.16b, v17.16b, v8.16b\n"
"sshl v20.4s, v20.4s, v10.4s\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
+ "and v1.16b, v18.16b, v8.16b\n"
+ "and v0.16b, v19.16b, v8.16b\n"
+ "smlal v27.4s, v7.4h, v6.h[3]\n"
"sshl v21.4s, v21.4s, v10.4s\n"
"sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v27.4s, v7.4h, v6.h[3]\n"
+ "smlal v28.4s, v7.4h, v6.h[4]\n"
"sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
- "smlal v28.4s, v7.4h, v6.h[4]\n"
- "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -357,49 +357,49 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -421,70 +421,70 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x20, #0x0]\n"
- "usubl v0.8h, v0.8b, v13.8b\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
"ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr s5, [%x[weights]], #0x4\n"
"ldr d4, [x28, #0x0]\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
+ "smlal v20.4s, v7.4h, v3.h[4]\n"
+ "smlal v21.4s, v7.4h, v3.h[5]\n"
"usubl v4.8h, v4.8b, v13.8b\n"
+ "smlal v22.4s, v7.4h, v3.h[6]\n"
+ "smlal v23.4s, v7.4h, v3.h[7]\n"
"smlal v16.4s, v5.4h, v0.h[0]\n"
"smlal v17.4s, v5.4h, v0.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
"smlal v18.4s, v5.4h, v0.h[2]\n"
"smlal v19.4s, v5.4h, v0.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "smlal v20.4s, v7.4h, v3.h[4]\n"
- "smlal v21.4s, v7.4h, v3.h[5]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "smlal v22.4s, v7.4h, v3.h[6]\n"
- "smlal v23.4s, v7.4h, v3.h[7]\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
"smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
+ "smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v7.4h, v6.h[2]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
- "and v3.16b, v16.16b, v8.16b\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v9.4s\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
- "and v2.16b, v17.16b, v8.16b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
- "and v1.16b, v18.16b, v8.16b\n"
- "and v0.16b, v19.16b, v8.16b\n"
+ "and v3.16b, v16.16b, v8.16b\n"
+ "and v2.16b, v17.16b, v8.16b\n"
"sshl v20.4s, v20.4s, v10.4s\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
+ "and v1.16b, v18.16b, v8.16b\n"
+ "and v0.16b, v19.16b, v8.16b\n"
+ "smlal v27.4s, v5.4h, v4.h[3]\n"
"sshl v21.4s, v21.4s, v10.4s\n"
"sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v27.4s, v5.4h, v4.h[3]\n"
+ "smlal v28.4s, v5.4h, v4.h[4]\n"
"sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
- "smlal v28.4s, v5.4h, v4.h[4]\n"
- "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -622,49 +622,49 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -673,45 +673,45 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"7:" // Output channel loop: Single kernel point
"smlal v16.4s, v5.4h, v0.h[0]\n"
"smlal v17.4s, v5.4h, v0.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
"ldr x27, [%x[outptrs], #0x0]\n"
+ "ldr x26, [%x[outptrs], #0x8]\n"
"smlal v18.4s, v5.4h, v0.h[2]\n"
"smlal v19.4s, v5.4h, v0.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "ldr x26, [%x[outptrs], #0x8]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
- "smlal v20.4s, v5.4h, v0.h[4]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
+ "ldr x24, [%x[outptrs], #0x18]\n"
+ "smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "ldr x24, [%x[outptrs], #0x18]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v20.4s, v20.4s, v10.4s\n"
+ "sshl v21.4s, v21.4s, v10.4s\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "smlal v27.4s, v5.4h, v4.h[3]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
"sqrdmulh v19.4s, v19.4s, v9.4s\n"
+ "smlal v28.4s, v5.4h, v4.h[4]\n"
+ "sshl v22.4s, v22.4s, v10.4s\n"
+ "sshl v23.4s, v23.4s, v10.4s\n"
+ "smlal v29.4s, v5.4h, v4.h[5]\n"
"and v3.16b, v16.16b, v8.16b\n"
- "smlal v27.4s, v5.4h, v4.h[3]\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
"and v2.16b, v17.16b, v8.16b\n"
+ "smlal v30.4s, v5.4h, v4.h[6]\n"
"and v1.16b, v18.16b, v8.16b\n"
- "smlal v28.4s, v5.4h, v4.h[4]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"and v0.16b, v19.16b, v8.16b\n"
- "sshl v20.4s, v20.4s, v10.4s\n"
- "smlal v29.4s, v5.4h, v4.h[5]\n"
- "sshl v21.4s, v21.4s, v10.4s\n"
- "sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v30.4s, v5.4h, v4.h[6]\n"
- "sshl v23.4s, v23.4s, v10.4s\n"
- "sshl v24.4s, v24.4s, v10.4s\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
+ "sshl v24.4s, v24.4s, v10.4s\n"
"sshl v25.4s, v25.4s, v10.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
"sshr v2.4s, v2.4s, #0x1f\n"
@@ -848,49 +848,49 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -965,20 +965,20 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"18:" // Output channel oddments: Load quantization parameters: Done
"ldr s5, [%x[weights]], #0x4\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
+ "ldp x21, x20, [x22], #0x10\n"
"ldr d0, [x21, #0x0]\n"
"ldr d4, [x20, #0x0]\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
"usubl v0.8h, v0.8b, v13.8b\n"
"usubl v4.8h, v4.8b, v13.8b\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
"cbz x23, 22f\n"
"ldr s7, [%x[weights]], #0x4\n"
"ldp x21, x20, [x22], #0x10\n"
"subs x23, x23, #0x1\n"
- "usubl v7.8h, v7.8b, v12.8b\n"
"ldr d3, [x21, #0x0]\n"
"ldr d6, [x20, #0x0]\n"
+ "usubl v7.8h, v7.8b, v12.8b\n"
"usubl v3.8h, v3.8b, v13.8b\n"
"usubl v6.8h, v6.8b, v13.8b\n"
"beq 20f\n"
@@ -994,13 +994,13 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x21, #0x0]\n"
- "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d4, [x20, #0x0]\n"
@@ -1008,22 +1008,22 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"ldp x21, x20, [x22], #0x10\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "usubl v5.8h, v5.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
"ldr d3, [x21, #0x0]\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
+ "usubl v5.8h, v5.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "usubl v3.8h, v3.8b, v13.8b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"ldr d6, [x20, #0x0]\n"
@@ -1077,27 +1077,27 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d2, [x21, #0x0]\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v2.8h, v2.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d1, [x20, #0x0]\n"
"ldr s0, [%x[weights]], #0x4\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "usubl v0.8h, v0.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "usubl v1.8h, v1.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
+ "usubl v0.8h, v0.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
@@ -1145,18 +1145,18 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"sshl v17.4s, v17.4s, v10.4s\n"
"sshl v18.4s, v18.4s, v10.4s\n"
"sshl v19.4s, v19.4s, v10.4s\n"
+ "sshl v20.4s, v20.4s, v10.4s\n"
+ "sshl v21.4s, v21.4s, v10.4s\n"
"sqrdmulh v16.4s, v16.4s, v9.4s\n"
"sqrdmulh v17.4s, v17.4s, v9.4s\n"
"sqrdmulh v18.4s, v18.4s, v9.4s\n"
"sqrdmulh v19.4s, v19.4s, v9.4s\n"
+ "sshl v22.4s, v22.4s, v10.4s\n"
+ "sshl v23.4s, v23.4s, v10.4s\n"
"and v3.16b, v16.16b, v8.16b\n"
"and v2.16b, v17.16b, v8.16b\n"
"and v1.16b, v18.16b, v8.16b\n"
"and v0.16b, v19.16b, v8.16b\n"
- "sshl v20.4s, v20.4s, v10.4s\n"
- "sshl v21.4s, v21.4s, v10.4s\n"
- "sshl v22.4s, v22.4s, v10.4s\n"
- "sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
"sshl v25.4s, v25.4s, v10.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -1320,47 +1320,47 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"tbz %x[n_output_channels], #1, 24f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.h }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.h }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.h }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.h }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.h }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.h }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.h }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.h }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
- "add x9, x9, #0x2\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.h }[0], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.h }[0], [x26]\n"
+ "add x20, x20, x9\n"
+ "add x9, x9, #0x2\n"
"st1 { v26.h }[0], [x25]\n"
"st1 { v27.h }[0], [x24]\n"
"st1 { v28.h }[0], [x23]\n"
@@ -1370,46 +1370,46 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"tbz %x[n_output_channels], #0, 25f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.b }[2], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.b }[2], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.b }[2], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.b }[2], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.b }[2], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.b }[2], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.b }[2], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.b }[2], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.b }[2], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.b }[2], [x26]\n"
+ "add x20, x20, x9\n"
"st1 { v26.b }[2], [x25]\n"
"st1 { v27.b }[2], [x24]\n"
"st1 { v28.b }[2], [x23]\n"
@@ -1420,46 +1420,46 @@ void a64_u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_imp
"24:" // Output channel oddments: Done: Store: Bit 1: Unset
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.b }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.b }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.b }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.b }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.b }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.b }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.b }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.b }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.b }[0], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.b }[0], [x26]\n"
+ "add x20, x20, x9\n"
"st1 { v26.b }[0], [x25]\n"
"st1 { v27.b }[0], [x24]\n"
"st1 { v28.b }[0], [x23]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index d1872c90f8..62ad1fc0f5 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[16];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -91,1070 +91,1070 @@ void a64_u8qa_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x16, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x15, x16, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v18.16b }, [x20]\n"
+ "mov x16, #0x0\n"
+ "mov x15, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x14, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x10, x17, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v29.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_c_offset]\n"
"add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1r { v5.8h }, [x21]\n"
- "ld1r { v14.8h }, [x20]\n"
+ "ld1r { v12.8h }, [x21]\n"
+ "ld1r { v15.8h }, [x20]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "mov x14, #0x0\n"
- "ld1r { v12.8h }, [x20]\n"
- "mov x13, #0x0\n"
- "add x12, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x11, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x10, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x9, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x28, x27, [x22, #0x0]\n"
- "ldp x26, x25, [x22, #0x10]\n"
- "cbz x15, 3f\n"
- "ldr d19, [x11, #0x0]\n"
- "ldr d7, [x11, #0x8]\n"
- "subs x15, x15, #0x1\n"
- "usubl v19.8h, v19.8b, v18.8b\n"
- "ldr d1, [x11, #0x10]\n"
- "ldr d17, [x11, #0x18]\n"
- "usubl v7.8h, v7.8b, v18.8b\n"
- "usubl v1.8h, v1.8b, v18.8b\n"
- "ldr d8, [x11, #0x20]\n"
- "ldr d31, [x11, #0x28]\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "ldr d29, [x11, #0x30]\n"
- "ldr d16, [x11, #0x38]\n"
- "usubl v31.8h, v31.8b, v18.8b\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "ldr d4, [x11, #0x40]\n"
+ "ld1r { v17.8h }, [x20]\n"
+ "ldp x9, x28, [x22, #0x0]\n"
+ "ldp x27, x26, [x22, #0x10]\n"
+ "cbz x10, 3f\n"
+ "ldr d22, [x13, #0x0]\n"
+ "ldr d24, [x13, #0x8]\n"
+ "subs x10, x10, #0x1\n"
+ "ldr d9, [x13, #0x10]\n"
+ "ldr d7, [x13, #0x18]\n"
+ "ldr d25, [x13, #0x20]\n"
+ "ldr d4, [x13, #0x28]\n"
+ "ldr d13, [x13, #0x30]\n"
+ "ldr d14, [x13, #0x38]\n"
+ "usubl v22.8h, v22.8b, v29.8b\n"
+ "usubl v24.8h, v24.8b, v29.8b\n"
+ "ldr d2, [x13, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q9, [x20, #0x10]\n"
+ "usubl v9.8h, v9.8b, v29.8b\n"
+ "usubl v7.8h, v7.8b, v29.8b\n"
+ "usubl v25.8h, v25.8b, v29.8b\n"
+ "usubl v4.8h, v4.8b, v29.8b\n"
+ "usubl v13.8h, v13.8b, v29.8b\n"
+ "usubl v14.8h, v14.8b, v29.8b\n"
+ "ldr q20, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "ldp x23, x22, [x14, #0x0]\n"
"add x20, x20, #0x20\n"
+ "usubl v2.8h, v2.8b, v29.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x23, x22, [x12, #0x0]\n"
- "ldp x21, x20, [x12, #0x10]\n"
- "mov v3.16b, v28.16b\n"
- "mov v30.16b, v9.16b\n"
- "ldr d23, [x23, x14]\n"
- "ldr d10, [x22, x14]\n"
- "mov v0.16b, v28.16b\n"
- "mov v22.16b, v9.16b\n"
- "ldr d11, [x21, x14]\n"
- "ldr d13, [x20, x14]\n"
- "mov v6.16b, v28.16b\n"
- "mov v2.16b, v9.16b\n"
- "ldr x20, [x12, #0x20]\n"
- "ldr d27, [x20, x14]\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "ushll v10.8h, v10.8b, #0x0\n"
+ "mov v8.16b, v20.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "mov v3.16b, v20.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldp x21, x20, [x14, #0x10]\n"
+ "mov v10.16b, v20.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d11, [x23, x16]\n"
+ "ldr d28, [x22, x16]\n"
+ "ldr d18, [x21, x16]\n"
+ "ldr d19, [x20, x16]\n"
+ "ldr x20, [x14, #0x20]\n"
"ushll v11.8h, v11.8b, #0x0\n"
- "ushll v13.8h, v13.8b, #0x0\n"
- "ushll v27.8h, v27.8b, #0x0\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
+ "ldr d23, [x20, x16]\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
"beq 2f\n"
"1:" // Loop
- "ldr q24, [x10, #0x0]\n"
- "ldr q25, [x9, #0x0]\n"
- "smlal v28.4s, v23.4h, v8.4h\n"
- "smlal2 v9.4s, v23.8h, v8.8h\n"
- "ldr q20, [x10, #0x10]\n"
- "ldr q26, [x9, #0x10]\n"
- "smlal v28.4s, v10.4h, v19.4h\n"
- "smlal v3.4s, v23.4h, v17.4h\n"
- "ldr x20, [x12, #0x28]\n"
- "ldr d21, [x20, x14]\n"
- "smlal v0.4s, v23.4h, v7.4h\n"
- "smlal v6.4s, v23.4h, v19.4h\n"
- "smlal2 v9.4s, v10.8h, v19.8h\n"
- "ldr x20, [x12, #0x38]\n"
- "ldr d10, [x20, x14]\n"
- "smlal v28.4s, v13.4h, v31.4h\n"
- "smlal2 v30.4s, v23.8h, v17.8h\n"
- "smlal2 v22.4s, v23.8h, v7.8h\n"
- "ldr x20, [x12, #0x30]\n"
- "ldr d15, [x20, x14]\n"
- "smlal2 v2.4s, v23.8h, v19.8h\n"
- "smlal v3.4s, v11.4h, v1.4h\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "ldr x20, [x12, #0x40]\n"
- "ldr d23, [x20, x14]\n"
- "smlal v0.4s, v13.4h, v1.4h\n"
- "smlal v6.4s, v13.4h, v7.4h\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal2 v9.4s, v13.8h, v31.8h\n"
- "smlal v28.4s, v27.4h, v16.4h\n"
- "ldr x20, [x12, #0x48]\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal2 v30.4s, v11.8h, v1.8h\n"
- "ldr d11, [x20, x14]\n"
- "smlal2 v22.4s, v13.8h, v1.8h\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v2.4s, v13.8h, v7.8h\n"
- "smlal v3.4s, v13.4h, v8.4h\n"
- "ldr x21, [x12, #0x50]\n"
- "ldr x20, [x12, #0x58]\n"
- "smlal v0.4s, v21.4h, v29.4h\n"
- "smlal v6.4s, v27.4h, v17.4h\n"
+ "ldr q26, [x12, #0x0]\n"
+ "ldr q6, [x11, #0x0]\n"
+ "smlal v20.4s, v11.4h, v25.4h\n"
+ "smlal2 v1.4s, v11.8h, v25.8h\n"
+ "ldr q0, [x12, #0x10]\n"
+ "ldr q5, [x11, #0x10]\n"
+ "smlal v8.4s, v11.4h, v7.4h\n"
+ "smlal v3.4s, v11.4h, v24.4h\n"
+ "ldr x25, [x14, #0x28]\n"
+ "smlal v10.4s, v11.4h, v22.4h\n"
+ "ldr x24, [x14, #0x38]\n"
+ "smlal2 v21.4s, v11.8h, v7.8h\n"
+ "smlal2 v30.4s, v11.8h, v24.8h\n"
+ "smlal2 v27.4s, v11.8h, v22.8h\n"
+ "ldr x23, [x14, #0x30]\n"
+ "ldr x22, [x14, #0x40]\n"
+ "smlal v20.4s, v28.4h, v22.4h\n"
+ "smlal2 v1.4s, v28.8h, v22.8h\n"
+ "ldr x20, [x14, #0x48]\n"
+ "ldr x21, [x14, #0x50]\n"
+ "ldr d16, [x25, x16]\n"
+ "ldr d28, [x24, x16]\n"
+ "smlal v8.4s, v18.4h, v9.4h\n"
+ "smlal v3.4s, v19.4h, v9.4h\n"
+ "ldr d31, [x23, x16]\n"
+ "ldr d11, [x22, x16]\n"
+ "smlal v10.4s, v19.4h, v24.4h\n"
+ "smlal2 v21.4s, v18.8h, v9.8h\n"
+ "ldr d18, [x20, x16]\n"
+ "smlal2 v30.4s, v19.8h, v9.8h\n"
+ "smlal2 v27.4s, v19.8h, v24.8h\n"
+ "ldr x20, [x14, #0x58]\n"
+ "smlal v20.4s, v19.4h, v4.4h\n"
+ "smlal2 v1.4s, v19.8h, v4.8h\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x24, [x14, #0x60]\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "smlal v8.4s, v19.4h, v25.4h\n"
+ "ldr x23, [x14, #0x68]\n"
+ "ldr x22, [x14, #0x70]\n"
+ "smlal v10.4s, v23.4h, v7.4h\n"
+ "ushll v31.8h, v31.8b, #0x0\n"
+ "smlal2 v21.4s, v19.8h, v25.8h\n"
+ "ldr d19, [x21, x16]\n"
+ "smlal v3.4s, v16.4h, v13.4h\n"
+ "smlal2 v30.4s, v16.8h, v13.8h\n"
+ "ldr d16, [x20, x16]\n"
"ushll v11.8h, v11.8b, #0x0\n"
- "ldr x24, [x12, #0x60]\n"
- "smlal2 v9.4s, v27.8h, v16.8h\n"
- "smlal v28.4s, v10.4h, v7.4h\n"
- "ldr x23, [x12, #0x68]\n"
- "ldr x22, [x12, #0x70]\n"
- "smlal2 v30.4s, v13.8h, v8.8h\n"
- "ldr d13, [x21, x14]\n"
- "smlal2 v22.4s, v21.8h, v29.8h\n"
- "ldr d21, [x20, x14]\n"
- "smlal2 v2.4s, v27.8h, v17.8h\n"
- "smlal v3.4s, v27.4h, v29.4h\n"
- "ushll v13.8h, v13.8b, #0x0\n"
- "ldr x21, [x12, #0x78]\n"
- "smlal v0.4s, v27.4h, v8.4h\n"
- "smlal v6.4s, v15.4h, v4.4h\n"
- "ushll v21.8h, v21.8b, #0x0\n"
+ "smlal v20.4s, v23.4h, v14.4h\n"
+ "smlal2 v1.4s, v23.8h, v14.8h\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "ldr x21, [x14, #0x78]\n"
+ "smlal2 v27.4s, v23.8h, v7.8h\n"
+ "smlal v8.4s, v23.4h, v13.4h\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v9.4s, v10.8h, v7.8h\n"
- "smlal v28.4s, v23.4h, v1.4h\n"
- "add x11, x11, #0x48\n"
- "subs x15, x15, #0x1\n"
- "smlal2 v30.4s, v27.8h, v29.8h\n"
- "smlal2 v22.4s, v27.8h, v8.8h\n"
- "ldr d27, [x24, x14]\n"
- "ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v2.4s, v15.8h, v4.8h\n"
- "ldr d15, [x23, x14]\n"
- "smlal v3.4s, v10.4h, v19.4h\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal v0.4s, v11.4h, v31.4h\n"
- "smlal v6.4s, v11.4h, v8.4h\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
- "smlal2 v9.4s, v23.8h, v1.8h\n"
- "smlal v28.4s, v11.4h, v4.4h\n"
- "smlal2 v30.4s, v10.8h, v19.8h\n"
- "ldr d10, [x22, x14]\n"
- "smlal2 v22.4s, v11.8h, v31.8h\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal2 v2.4s, v11.8h, v8.8h\n"
- "ldr d8, [x21, x14]\n"
+ "smlal v10.4s, v31.4h, v2.4h\n"
+ "smlal2 v21.4s, v23.8h, v13.8h\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "add x13, x13, #0x48\n"
+ "smlal v3.4s, v23.4h, v25.4h\n"
+ "smlal2 v30.4s, v23.8h, v25.8h\n"
+ "ldr d23, [x24, x16]\n"
+ "subs x10, x10, #0x1\n"
+ "smlal v20.4s, v28.4h, v24.4h\n"
+ "smlal2 v1.4s, v28.8h, v24.8h\n"
+ "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
+ "smlal2 v27.4s, v31.8h, v2.8h\n"
+ "ldr d31, [x23, x16]\n"
+ "smlal v8.4s, v28.4h, v22.4h\n"
+ "smlal v10.4s, v18.4h, v25.4h\n"
+ "smlal2 v21.4s, v28.8h, v22.8h\n"
+ "ldr d28, [x22, x16]\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
+ "smlal v3.4s, v18.4h, v4.4h\n"
+ "smlal2 v30.4s, v18.8h, v4.8h\n"
+ "smlal v20.4s, v11.4h, v9.4h\n"
+ "smlal2 v1.4s, v11.8h, v9.8h\n"
+ "ushll v31.8h, v31.8b, #0x0\n"
+ "smlal2 v27.4s, v18.8h, v25.8h\n"
+ "ldr d25, [x21, x16]\n"
+ "smlal v8.4s, v11.4h, v24.4h\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "smlal v10.4s, v16.4h, v9.4h\n"
+ "smlal2 v21.4s, v11.8h, v24.8h\n"
+ "add x16, x16, #0x8\n"
+ "smlal v3.4s, v19.4h, v22.4h\n"
+ "smlal2 v30.4s, v19.8h, v22.8h\n"
+ "smlal v20.4s, v18.4h, v2.4h\n"
+ "smlal2 v1.4s, v18.8h, v2.8h\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "smlal2 v27.4s, v16.8h, v9.8h\n"
+ "smlal v8.4s, v18.4h, v14.4h\n"
+ "smlal v10.4s, v31.4h, v4.4h\n"
+ "smlal2 v21.4s, v18.8h, v14.8h\n"
"smlal v3.4s, v23.4h, v7.4h\n"
- "ushll v8.8h, v8.8b, #0x0\n"
- "smlal v0.4s, v13.4h, v19.4h\n"
- "smlal v6.4s, v21.4h, v1.4h\n"
- "add x14, x14, #0x8\n"
- "smlal2 v9.4s, v11.8h, v4.8h\n"
- "smlal v28.4s, v13.4h, v17.4h\n"
"smlal2 v30.4s, v23.8h, v7.8h\n"
- "smlal2 v22.4s, v13.8h, v19.8h\n"
- "smlal2 v2.4s, v21.8h, v1.8h\n"
- "smlal v3.4s, v11.4h, v16.4h\n"
- "smlal v0.4s, v27.4h, v17.4h\n"
- "smlal v6.4s, v15.4h, v31.4h\n"
- "smlal2 v9.4s, v13.8h, v17.8h\n"
- "smlal v28.4s, v27.4h, v29.4h\n"
- "sqrdmulh v28.4s, v28.4s, v24.4s\n"
- "smlal2 v30.4s, v11.8h, v16.8h\n"
- "smlal2 v22.4s, v27.8h, v17.8h\n"
- "and v17.16b, v28.16b, v25.16b\n"
- "smlal2 v2.4s, v15.8h, v31.8h\n"
- "smlal v3.4s, v21.4h, v31.4h\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "smlal v0.4s, v10.4h, v16.4h\n"
- "smlal v6.4s, v10.4h, v29.4h\n"
- "sqadd v28.4s, v28.4s, v17.4s\n"
- "smlal2 v9.4s, v27.8h, v29.8h\n"
- "smlal2 v30.4s, v21.8h, v31.8h\n"
- "sqrdmulh v9.4s, v9.4s, v20.4s\n"
- "smlal2 v22.4s, v10.8h, v16.8h\n"
- "smlal2 v2.4s, v10.8h, v29.8h\n"
- "and v23.16b, v9.16b, v26.16b\n"
- "smlal v3.4s, v15.4h, v4.4h\n"
- "smlal v0.4s, v8.4h, v4.4h\n"
- "sqrdmulh v3.4s, v3.4s, v24.4s\n"
- "smlal v6.4s, v8.4h, v16.4h\n"
- "smlal2 v30.4s, v15.8h, v4.8h\n"
- "sqrdmulh v0.4s, v0.4s, v24.4s\n"
- "smlal2 v22.4s, v8.8h, v4.8h\n"
- "smlal2 v2.4s, v8.8h, v16.8h\n"
- "sqrdmulh v6.4s, v6.4s, v24.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v8.16b, v3.16b, v25.16b\n"
- "sqrdmulh v30.4s, v30.4s, v20.4s\n"
- "and v11.16b, v0.16b, v25.16b\n"
- "sqrdmulh v22.4s, v22.4s, v20.4s\n"
- "and v29.16b, v6.16b, v25.16b\n"
- "sqrdmulh v2.4s, v2.4s, v20.4s\n"
- "sqadd v9.4s, v9.4s, v23.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "and v13.16b, v30.16b, v26.16b\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v21.16b, v22.16b, v26.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v23.16b, v2.16b, v26.16b\n"
- "sqadd v3.4s, v3.4s, v8.4s\n"
+ "smlal v20.4s, v19.4h, v7.4h\n"
+ "smlal2 v1.4s, v19.8h, v7.8h\n"
+ "smlal2 v27.4s, v31.8h, v4.8h\n"
+ "smlal v8.4s, v16.4h, v4.4h\n"
+ "smlal v10.4s, v28.4h, v13.4h\n"
+ "smlal2 v21.4s, v16.8h, v4.8h\n"
+ "smlal v3.4s, v28.4h, v14.4h\n"
+ "smlal2 v30.4s, v28.8h, v14.8h\n"
+ "smlal v20.4s, v23.4h, v13.4h\n"
+ "smlal2 v1.4s, v23.8h, v13.8h\n"
+ "smlal2 v27.4s, v28.8h, v13.8h\n"
+ "smlal v8.4s, v31.4h, v2.4h\n"
+ "smlal v10.4s, v25.4h, v14.4h\n"
+ "smlal2 v21.4s, v31.8h, v2.8h\n"
+ "smlal v3.4s, v25.4h, v2.4h\n"
+ "smlal2 v30.4s, v25.8h, v2.8h\n"
+ "sqrdmulh v20.4s, v20.4s, v26.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v0.4s\n"
+ "smlal2 v27.4s, v25.8h, v14.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v26.4s\n"
+ "and v19.16b, v20.16b, v6.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v26.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v0.4s\n"
+ "and v9.16b, v1.16b, v5.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v26.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v16.16b, v8.16b, v6.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v0.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v0.4s\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "and v22.16b, v3.16b, v6.16b\n"
+ "sqadd v20.4s, v20.4s, v19.4s\n"
+ "and v13.16b, v10.16b, v6.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v28.16b, v21.16b, v5.16b\n"
+ "sqadd v1.4s, v1.4s, v9.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v0.16b, v30.16b, v5.16b\n"
"sshr v13.4s, v13.4s, #0x1f\n"
- "sqadd v0.4s, v0.4s, v11.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "sqadd v6.4s, v6.4s, v29.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "srshl v28.4s, v28.4s, v25.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqadd v30.4s, v30.4s, v13.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqadd v22.4s, v22.4s, v21.4s\n"
- "srshl v6.4s, v6.4s, v25.4s\n"
- "sqadd v2.4s, v2.4s, v23.4s\n"
- "srshl v9.4s, v9.4s, v26.4s\n"
- "sqxtn v28.4h, v28.4s\n"
- "srshl v30.4s, v30.4s, v26.4s\n"
+ "and v18.16b, v27.16b, v5.16b\n"
+ "sqadd v8.4s, v8.4s, v16.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sqadd v3.4s, v3.4s, v22.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v13.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "srshl v20.4s, v20.4s, v6.4s\n"
+ "srshl v8.4s, v8.4s, v6.4s\n"
+ "sqadd v21.4s, v21.4s, v28.4s\n"
+ "srshl v3.4s, v3.4s, v6.4s\n"
+ "sqadd v30.4s, v30.4s, v0.4s\n"
+ "srshl v10.4s, v10.4s, v6.4s\n"
+ "sqadd v27.4s, v27.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v5.4s\n"
+ "sqxtn v20.4h, v20.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v30.4s, v30.4s, v5.4s\n"
"sqxtn v3.4h, v3.4s\n"
- "srshl v22.4s, v22.4s, v26.4s\n"
- "sqxtn v0.4h, v0.4s\n"
- "srshl v2.4s, v2.4s, v26.4s\n"
- "sqxtn v6.4h, v6.4s\n"
- "sqxtn2 v28.8h, v9.4s\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "sqxtn2 v20.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v21.4s\n"
"sqxtn2 v3.8h, v30.4s\n"
- "sqxtn2 v0.8h, v22.4s\n"
- "sqxtn2 v6.8h, v2.4s\n"
- "sqadd v28.8h, v28.8h, v5.8h\n"
- "sqadd v3.8h, v3.8h, v5.8h\n"
- "sqadd v0.8h, v0.8h, v5.8h\n"
- "sqadd v6.8h, v6.8h, v5.8h\n"
- "smax v28.8h, v28.8h, v14.8h\n"
- "smax v3.8h, v3.8h, v14.8h\n"
- "smax v0.8h, v0.8h, v14.8h\n"
- "smax v6.8h, v6.8h, v14.8h\n"
- "smin v28.8h, v28.8h, v12.8h\n"
- "smin v3.8h, v3.8h, v12.8h\n"
- "smin v0.8h, v0.8h, v12.8h\n"
- "smin v6.8h, v6.8h, v12.8h\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str d28, [x28, x13]\n"
+ "sqxtn2 v10.8h, v27.4s\n"
+ "sqadd v20.8h, v20.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v3.8h, v3.8h, v12.8h\n"
+ "sqadd v10.8h, v10.8h, v12.8h\n"
+ "smax v20.8h, v20.8h, v15.8h\n"
+ "smax v8.8h, v8.8h, v15.8h\n"
+ "smax v3.8h, v3.8h, v15.8h\n"
+ "smax v10.8h, v10.8h, v15.8h\n"
+ "smin v20.8h, v20.8h, v17.8h\n"
+ "smin v8.8h, v8.8h, v17.8h\n"
+ "smin v3.8h, v3.8h, v17.8h\n"
+ "smin v10.8h, v10.8h, v17.8h\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
"uzp1 v3.16b, v3.16b, v3.16b\n"
- "uzp1 v0.16b, v0.16b, v0.16b\n"
- "str d3, [x27, x13]\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "str d0, [x26, x13]\n"
- "str d6, [x25, x13]\n"
- "ldr q28, [x20, #0x0]\n"
- "ldr q9, [x20, #0x10]\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d20, [x9, x15]\n"
+ "str d8, [x28, x15]\n"
+ "str d3, [x27, x15]\n"
+ "str d10, [x26, x15]\n"
+ "add x15, x15, #0x8\n"
+ "ldr q20, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d19, [x11, #0x0]\n"
- "ldr d7, [x11, #0x8]\n"
- "add x13, x13, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d1, [x11, #0x10]\n"
- "ldr d17, [x11, #0x18]\n"
- "mov v3.16b, v28.16b\n"
- "mov v30.16b, v9.16b\n"
- "ldr d8, [x11, #0x20]\n"
- "ldr d31, [x11, #0x28]\n"
- "mov v0.16b, v28.16b\n"
- "mov v22.16b, v9.16b\n"
- "ldr d29, [x11, #0x30]\n"
- "ldr d16, [x11, #0x38]\n"
- "mov v6.16b, v28.16b\n"
- "mov v2.16b, v9.16b\n"
- "ldr d4, [x11, #0x40]\n"
- "ldp x23, x22, [x12, #0x0]\n"
- "usubl v19.8h, v19.8b, v18.8b\n"
- "usubl v7.8h, v7.8b, v18.8b\n"
- "ldp x21, x20, [x12, #0x10]\n"
- "ldr d23, [x23, x14]\n"
- "usubl v1.8h, v1.8b, v18.8b\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "ldr d10, [x22, x14]\n"
- "ldr d11, [x21, x14]\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "usubl v31.8h, v31.8b, v18.8b\n"
- "ldr d13, [x20, x14]\n"
- "ldr x20, [x12, #0x20]\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "ldr d27, [x20, x14]\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "ushll v10.8h, v10.8b, #0x0\n"
+ "ldr d22, [x13, #0x0]\n"
+ "ldr d24, [x13, #0x8]\n"
+ "ldr d9, [x13, #0x10]\n"
+ "ldr d7, [x13, #0x18]\n"
+ "ldr d25, [x13, #0x20]\n"
+ "ldr d4, [x13, #0x28]\n"
+ "mov v8.16b, v20.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d13, [x13, #0x30]\n"
+ "ldr d14, [x13, #0x38]\n"
+ "mov v3.16b, v20.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d2, [x13, #0x40]\n"
+ "ldp x23, x22, [x14, #0x0]\n"
+ "mov v10.16b, v20.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "usubl v22.8h, v22.8b, v29.8b\n"
+ "usubl v24.8h, v24.8b, v29.8b\n"
+ "usubl v9.8h, v9.8b, v29.8b\n"
+ "usubl v7.8h, v7.8b, v29.8b\n"
+ "ldp x21, x20, [x14, #0x10]\n"
+ "usubl v25.8h, v25.8b, v29.8b\n"
+ "usubl v4.8h, v4.8b, v29.8b\n"
+ "usubl v13.8h, v13.8b, v29.8b\n"
+ "usubl v14.8h, v14.8b, v29.8b\n"
+ "ldr d11, [x23, x16]\n"
+ "ldr d28, [x22, x16]\n"
+ "ldr d18, [x21, x16]\n"
+ "ldr d19, [x20, x16]\n"
+ "usubl v2.8h, v2.8b, v29.8b\n"
+ "ldr x20, [x14, #0x20]\n"
"ushll v11.8h, v11.8b, #0x0\n"
- "ushll v13.8h, v13.8b, #0x0\n"
- "ushll v27.8h, v27.8b, #0x0\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
+ "ldr d23, [x20, x16]\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q26, [x10, #0x0]\n"
- "ldr q25, [x9, #0x0]\n"
- "smlal v28.4s, v23.4h, v8.4h\n"
- "smlal2 v9.4s, v23.8h, v8.8h\n"
- "ldr q24, [x10, #0x10]\n"
- "ldr q20, [x9, #0x10]\n"
- "smlal v28.4s, v10.4h, v19.4h\n"
- "smlal v3.4s, v23.4h, v17.4h\n"
- "ldr x20, [x12, #0x28]\n"
- "ldr d21, [x20, x14]\n"
- "smlal v0.4s, v23.4h, v7.4h\n"
- "smlal v6.4s, v23.4h, v19.4h\n"
- "smlal2 v9.4s, v10.8h, v19.8h\n"
- "ldr x20, [x12, #0x38]\n"
- "ldr d15, [x20, x14]\n"
- "smlal v28.4s, v13.4h, v31.4h\n"
- "smlal2 v30.4s, v23.8h, v17.8h\n"
- "smlal2 v22.4s, v23.8h, v7.8h\n"
- "ldr x20, [x12, #0x30]\n"
- "ldr d10, [x20, x14]\n"
- "smlal2 v2.4s, v23.8h, v19.8h\n"
- "smlal v3.4s, v11.4h, v1.4h\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "ldr x20, [x12, #0x40]\n"
- "ldr d23, [x20, x14]\n"
- "smlal v0.4s, v13.4h, v1.4h\n"
- "smlal v6.4s, v13.4h, v7.4h\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal2 v9.4s, v13.8h, v31.8h\n"
- "smlal v28.4s, v27.4h, v16.4h\n"
- "ldr x20, [x12, #0x48]\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal2 v30.4s, v11.8h, v1.8h\n"
- "ldr d11, [x20, x14]\n"
- "smlal2 v22.4s, v13.8h, v1.8h\n"
+ "ldr q16, [x12, #0x0]\n"
+ "ldr q5, [x11, #0x0]\n"
+ "smlal v20.4s, v11.4h, v25.4h\n"
+ "smlal2 v1.4s, v11.8h, v25.8h\n"
+ "ldr q6, [x12, #0x10]\n"
+ "ldr q31, [x11, #0x10]\n"
+ "smlal v8.4s, v11.4h, v7.4h\n"
+ "smlal v3.4s, v11.4h, v24.4h\n"
+ "ldr x25, [x14, #0x28]\n"
+ "smlal v10.4s, v11.4h, v22.4h\n"
+ "ldr x23, [x14, #0x38]\n"
+ "smlal2 v21.4s, v11.8h, v7.8h\n"
+ "smlal2 v30.4s, v11.8h, v24.8h\n"
+ "smlal2 v27.4s, v11.8h, v22.8h\n"
+ "ldr x22, [x14, #0x30]\n"
+ "ldr x21, [x14, #0x40]\n"
+ "smlal v20.4s, v28.4h, v22.4h\n"
+ "smlal2 v1.4s, v28.8h, v22.8h\n"
+ "ldr x20, [x14, #0x48]\n"
+ "ldr x24, [x14, #0x50]\n"
+ "ldr d28, [x25, x16]\n"
+ "ldr d26, [x23, x16]\n"
+ "smlal v8.4s, v18.4h, v9.4h\n"
+ "smlal v3.4s, v19.4h, v9.4h\n"
+ "ldr d11, [x22, x16]\n"
+ "ldr d0, [x21, x16]\n"
+ "smlal v10.4s, v19.4h, v24.4h\n"
+ "smlal2 v21.4s, v18.8h, v9.8h\n"
+ "ldr d18, [x20, x16]\n"
+ "smlal2 v30.4s, v19.8h, v9.8h\n"
+ "smlal2 v27.4s, v19.8h, v24.8h\n"
+ "ldr x20, [x14, #0x58]\n"
+ "smlal v20.4s, v19.4h, v4.4h\n"
+ "smlal2 v1.4s, v19.8h, v4.8h\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "ldr x23, [x14, #0x60]\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "smlal v8.4s, v19.4h, v25.4h\n"
+ "ldr x22, [x14, #0x68]\n"
+ "ldr x21, [x14, #0x70]\n"
+ "smlal v10.4s, v23.4h, v7.4h\n"
+ "ushll v11.8h, v11.8b, #0x0\n"
+ "smlal2 v21.4s, v19.8h, v25.8h\n"
+ "ldr d19, [x24, x16]\n"
+ "smlal v3.4s, v28.4h, v13.4h\n"
+ "smlal2 v30.4s, v28.8h, v13.8h\n"
+ "ldr d28, [x20, x16]\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "smlal v20.4s, v23.4h, v14.4h\n"
+ "smlal2 v1.4s, v23.8h, v14.8h\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "ldr x20, [x14, #0x78]\n"
+ "smlal2 v27.4s, v23.8h, v7.8h\n"
+ "smlal v8.4s, v23.4h, v13.4h\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
+ "tst x17, #0x7\n"
+ "smlal v10.4s, v11.4h, v2.4h\n"
+ "smlal2 v21.4s, v23.8h, v13.8h\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "add x12, x12, #0x20\n"
+ "smlal v3.4s, v23.4h, v25.4h\n"
+ "smlal2 v30.4s, v23.8h, v25.8h\n"
+ "ldr d23, [x23, x16]\n"
+ "add x11, x11, #0x20\n"
+ "smlal v20.4s, v26.4h, v24.4h\n"
+ "smlal2 v1.4s, v26.8h, v24.8h\n"
+ "smlal2 v27.4s, v11.8h, v2.8h\n"
+ "ldr d11, [x22, x16]\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal v10.4s, v18.4h, v25.4h\n"
+ "smlal2 v21.4s, v26.8h, v22.8h\n"
+ "ldr d26, [x21, x16]\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v2.4s, v13.8h, v7.8h\n"
- "smlal v3.4s, v13.4h, v8.4h\n"
- "ldr x24, [x12, #0x50]\n"
- "ldr x20, [x12, #0x58]\n"
- "smlal v0.4s, v21.4h, v29.4h\n"
- "smlal v6.4s, v27.4h, v17.4h\n"
+ "smlal v3.4s, v18.4h, v4.4h\n"
+ "smlal2 v30.4s, v18.8h, v4.8h\n"
+ "smlal v20.4s, v0.4h, v9.4h\n"
+ "smlal2 v1.4s, v0.8h, v9.8h\n"
"ushll v11.8h, v11.8b, #0x0\n"
- "ldr x23, [x12, #0x60]\n"
- "smlal2 v9.4s, v27.8h, v16.8h\n"
- "smlal v28.4s, v15.4h, v7.4h\n"
- "ldr x22, [x12, #0x68]\n"
- "ldr x21, [x12, #0x70]\n"
- "smlal2 v30.4s, v13.8h, v8.8h\n"
- "ldr d13, [x24, x14]\n"
- "smlal2 v22.4s, v21.8h, v29.8h\n"
- "ldr d21, [x20, x14]\n"
- "smlal2 v2.4s, v27.8h, v17.8h\n"
- "smlal v3.4s, v27.4h, v29.4h\n"
- "ushll v13.8h, v13.8b, #0x0\n"
- "ldr x20, [x12, #0x78]\n"
- "smlal v0.4s, v27.4h, v8.4h\n"
- "smlal v6.4s, v10.4h, v4.4h\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "tst x16, #0x7\n"
- "smlal2 v9.4s, v15.8h, v7.8h\n"
- "smlal v28.4s, v23.4h, v1.4h\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
- "smlal2 v30.4s, v27.8h, v29.8h\n"
- "smlal2 v22.4s, v27.8h, v8.8h\n"
- "ldr d27, [x23, x14]\n"
- "ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v2.4s, v10.8h, v4.8h\n"
- "ldr d10, [x22, x14]\n"
- "smlal v3.4s, v15.4h, v19.4h\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal v0.4s, v11.4h, v31.4h\n"
- "smlal v6.4s, v11.4h, v8.4h\n"
- "smlal2 v9.4s, v23.8h, v1.8h\n"
- "smlal v28.4s, v11.4h, v4.4h\n"
- "smlal2 v30.4s, v15.8h, v19.8h\n"
- "ldr d15, [x21, x14]\n"
- "smlal2 v22.4s, v11.8h, v31.8h\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal2 v2.4s, v11.8h, v8.8h\n"
- "ldr d8, [x20, x14]\n"
+ "smlal2 v27.4s, v18.8h, v25.8h\n"
+ "ldr d25, [x20, x16]\n"
+ "smlal v8.4s, v0.4h, v24.4h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "smlal v10.4s, v28.4h, v9.4h\n"
+ "smlal2 v21.4s, v0.8h, v24.8h\n"
+ "add x16, x16, #0x8\n"
+ "smlal v3.4s, v19.4h, v22.4h\n"
+ "smlal2 v30.4s, v19.8h, v22.8h\n"
+ "smlal v20.4s, v18.4h, v2.4h\n"
+ "smlal2 v1.4s, v18.8h, v2.8h\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "smlal2 v27.4s, v28.8h, v9.8h\n"
+ "smlal v8.4s, v18.4h, v14.4h\n"
+ "smlal v10.4s, v11.4h, v4.4h\n"
+ "smlal2 v21.4s, v18.8h, v14.8h\n"
"smlal v3.4s, v23.4h, v7.4h\n"
- "ushll v8.8h, v8.8b, #0x0\n"
- "smlal v0.4s, v13.4h, v19.4h\n"
- "smlal v6.4s, v21.4h, v1.4h\n"
- "add x14, x14, #0x8\n"
- "smlal2 v9.4s, v11.8h, v4.8h\n"
- "smlal v28.4s, v13.4h, v17.4h\n"
"smlal2 v30.4s, v23.8h, v7.8h\n"
- "smlal2 v22.4s, v13.8h, v19.8h\n"
- "smlal2 v2.4s, v21.8h, v1.8h\n"
- "smlal v3.4s, v11.4h, v16.4h\n"
- "smlal v0.4s, v27.4h, v17.4h\n"
- "smlal v6.4s, v10.4h, v31.4h\n"
- "smlal2 v9.4s, v13.8h, v17.8h\n"
- "smlal v28.4s, v27.4h, v29.4h\n"
- "sqrdmulh v28.4s, v28.4s, v26.4s\n"
- "smlal2 v30.4s, v11.8h, v16.8h\n"
- "smlal2 v22.4s, v27.8h, v17.8h\n"
- "and v1.16b, v28.16b, v25.16b\n"
- "smlal2 v2.4s, v10.8h, v31.8h\n"
- "smlal v3.4s, v21.4h, v31.4h\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "smlal v0.4s, v15.4h, v16.4h\n"
- "smlal v6.4s, v15.4h, v29.4h\n"
- "sqadd v28.4s, v28.4s, v1.4s\n"
- "smlal2 v9.4s, v27.8h, v29.8h\n"
- "smlal2 v30.4s, v21.8h, v31.8h\n"
- "sqrdmulh v9.4s, v9.4s, v24.4s\n"
- "smlal2 v22.4s, v15.8h, v16.8h\n"
- "smlal2 v2.4s, v15.8h, v29.8h\n"
- "and v27.16b, v9.16b, v20.16b\n"
- "smlal v3.4s, v10.4h, v4.4h\n"
- "smlal v0.4s, v8.4h, v4.4h\n"
- "sqrdmulh v3.4s, v3.4s, v26.4s\n"
- "smlal v6.4s, v8.4h, v16.4h\n"
- "smlal2 v30.4s, v10.8h, v4.8h\n"
- "sqrdmulh v0.4s, v0.4s, v26.4s\n"
- "smlal2 v22.4s, v8.8h, v4.8h\n"
- "smlal2 v2.4s, v8.8h, v16.8h\n"
- "sqrdmulh v6.4s, v6.4s, v26.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v16.16b, v3.16b, v25.16b\n"
- "sqrdmulh v30.4s, v30.4s, v24.4s\n"
- "and v4.16b, v0.16b, v25.16b\n"
- "sqrdmulh v22.4s, v22.4s, v24.4s\n"
- "and v17.16b, v6.16b, v25.16b\n"
- "sqrdmulh v2.4s, v2.4s, v24.4s\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v8.16b, v30.16b, v20.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v26.16b, v22.16b, v20.16b\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "and v11.16b, v2.16b, v20.16b\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v0.4s, v0.4s, v4.4s\n"
+ "smlal v20.4s, v19.4h, v7.4h\n"
+ "smlal2 v1.4s, v19.8h, v7.8h\n"
+ "smlal2 v27.4s, v11.8h, v4.8h\n"
+ "smlal v8.4s, v28.4h, v4.4h\n"
+ "smlal v10.4s, v26.4h, v13.4h\n"
+ "smlal2 v21.4s, v28.8h, v4.8h\n"
+ "smlal v3.4s, v26.4h, v14.4h\n"
+ "smlal2 v30.4s, v26.8h, v14.8h\n"
+ "smlal v20.4s, v23.4h, v13.4h\n"
+ "smlal2 v1.4s, v23.8h, v13.8h\n"
+ "smlal2 v27.4s, v26.8h, v13.8h\n"
+ "smlal v8.4s, v11.4h, v2.4h\n"
+ "smlal v10.4s, v25.4h, v14.4h\n"
+ "smlal2 v21.4s, v11.8h, v2.8h\n"
+ "smlal v3.4s, v25.4h, v2.4h\n"
+ "smlal2 v30.4s, v25.8h, v2.8h\n"
+ "sqrdmulh v20.4s, v20.4s, v16.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v6.4s\n"
+ "smlal2 v27.4s, v25.8h, v14.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v16.4s\n"
+ "and v0.16b, v20.16b, v5.16b\n"
+ "sqrdmulh v10.4s, v10.4s, v16.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v6.4s\n"
+ "and v24.16b, v1.16b, v31.16b\n"
+ "sqrdmulh v3.4s, v3.4s, v16.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "and v22.16b, v8.16b, v5.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v6.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v6.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "and v26.16b, v3.16b, v5.16b\n"
+ "sqadd v20.4s, v20.4s, v0.4s\n"
+ "and v16.16b, v10.16b, v5.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v23.16b, v21.16b, v31.16b\n"
+ "sqadd v1.4s, v1.4s, v24.4s\n"
"sshr v26.4s, v26.4s, #0x1f\n"
- "sqadd v6.4s, v6.4s, v17.4s\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "srshl v28.4s, v28.4s, v25.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqadd v30.4s, v30.4s, v8.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqadd v22.4s, v22.4s, v26.4s\n"
- "srshl v6.4s, v6.4s, v25.4s\n"
- "sqadd v2.4s, v2.4s, v11.4s\n"
- "srshl v9.4s, v9.4s, v20.4s\n"
- "sqxtn v28.4h, v28.4s\n"
- "srshl v30.4s, v30.4s, v20.4s\n"
+ "and v7.16b, v30.16b, v31.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v9.16b, v27.16b, v31.16b\n"
+ "sqadd v8.4s, v8.4s, v22.4s\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "sqadd v3.4s, v3.4s, v26.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v16.4s\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "srshl v8.4s, v8.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v23.4s\n"
+ "srshl v3.4s, v3.4s, v5.4s\n"
+ "sqadd v30.4s, v30.4s, v7.4s\n"
+ "srshl v10.4s, v10.4s, v5.4s\n"
+ "sqadd v27.4s, v27.4s, v9.4s\n"
+ "srshl v1.4s, v1.4s, v31.4s\n"
+ "sqxtn v20.4h, v20.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v30.4s, v30.4s, v31.4s\n"
"sqxtn v3.4h, v3.4s\n"
- "srshl v22.4s, v22.4s, v20.4s\n"
- "sqxtn v0.4h, v0.4s\n"
- "srshl v2.4s, v2.4s, v20.4s\n"
- "sqxtn v6.4h, v6.4s\n"
- "sqxtn2 v28.8h, v9.4s\n"
+ "srshl v27.4s, v27.4s, v31.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "sqxtn2 v20.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v21.4s\n"
"sqxtn2 v3.8h, v30.4s\n"
- "sqxtn2 v0.8h, v22.4s\n"
- "sqxtn2 v6.8h, v2.4s\n"
- "sqadd v28.8h, v28.8h, v5.8h\n"
- "sqadd v3.8h, v3.8h, v5.8h\n"
- "sqadd v0.8h, v0.8h, v5.8h\n"
- "sqadd v6.8h, v6.8h, v5.8h\n"
- "smax v28.8h, v28.8h, v14.8h\n"
- "smax v3.8h, v3.8h, v14.8h\n"
- "smax v0.8h, v0.8h, v14.8h\n"
- "smax v6.8h, v6.8h, v14.8h\n"
- "smin v28.8h, v28.8h, v12.8h\n"
- "smin v3.8h, v3.8h, v12.8h\n"
- "smin v0.8h, v0.8h, v12.8h\n"
- "smin v6.8h, v6.8h, v12.8h\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
- "str d28, [x28, x13]\n"
+ "sqxtn2 v10.8h, v27.4s\n"
+ "sqadd v20.8h, v20.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v3.8h, v3.8h, v12.8h\n"
+ "sqadd v10.8h, v10.8h, v12.8h\n"
+ "smax v20.8h, v20.8h, v15.8h\n"
+ "smax v8.8h, v8.8h, v15.8h\n"
+ "smax v3.8h, v3.8h, v15.8h\n"
+ "smax v10.8h, v10.8h, v15.8h\n"
+ "smin v20.8h, v20.8h, v17.8h\n"
+ "smin v8.8h, v8.8h, v17.8h\n"
+ "smin v3.8h, v3.8h, v17.8h\n"
+ "smin v10.8h, v10.8h, v17.8h\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
"uzp1 v3.16b, v3.16b, v3.16b\n"
- "uzp1 v0.16b, v0.16b, v0.16b\n"
- "str d3, [x27, x13]\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "str d0, [x26, x13]\n"
- "str d6, [x25, x13]\n"
- "add x13, x13, #0x8\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "str d20, [x9, x15]\n"
+ "str d8, [x28, x15]\n"
+ "str d3, [x27, x15]\n"
+ "str d10, [x26, x15]\n"
+ "add x15, x15, #0x8\n"
"beq 64f\n"
- "add x11, x11, #0x48\n"
+ "add x13, x13, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x16, #2, 5f\n"
- "ld1 { v28.4s }, [x20], #0x10\n"
- "tbz x16, #1, 4f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
- "tbz x16, #0, 7f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "tbz x17, #2, 5f\n"
+ "ld1 { v20.4s }, [x20], #0x10\n"
+ "tbz x17, #1, 4f\n"
+ "ld1 { v1.d }[0], [x20], #0x8\n"
+ "tbz x17, #0, 7f\n"
+ "ld1 { v1.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x16, #0, 7f\n"
- "ld1 { v9.s }[0], [x20]\n"
+ "tbz x17, #0, 7f\n"
+ "ld1 { v1.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x16, #1, 6f\n"
- "ld1 { v28.d }[0], [x20], #0x8\n"
- "tbz x16, #0, 7f\n"
- "ld1 { v28.s }[2], [x20]\n"
+ "tbz x17, #1, 6f\n"
+ "ld1 { v20.d }[0], [x20], #0x8\n"
+ "tbz x17, #0, 7f\n"
+ "ld1 { v20.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 7f\n"
- "ld1 { v28.s }[0], [x20]\n"
+ "tbz x17, #0, 7f\n"
+ "ld1 { v20.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d19, [x11, #0x0]\n"
- "ldr d7, [x11, #0x8]\n"
- "mov v3.16b, v28.16b\n"
- "mov v30.16b, v9.16b\n"
- "ldr d1, [x11, #0x10]\n"
- "ldr d17, [x11, #0x18]\n"
- "mov v0.16b, v28.16b\n"
- "mov v22.16b, v9.16b\n"
- "ldr d8, [x11, #0x20]\n"
- "ldr d31, [x11, #0x28]\n"
- "mov v6.16b, v28.16b\n"
- "mov v2.16b, v9.16b\n"
- "ldr d29, [x11, #0x30]\n"
- "ldr d16, [x11, #0x38]\n"
- "usubl v19.8h, v19.8b, v18.8b\n"
- "usubl v7.8h, v7.8b, v18.8b\n"
- "ldr d4, [x11, #0x40]\n"
- "ldp x24, x23, [x12, #0x0]\n"
- "usubl v1.8h, v1.8b, v18.8b\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "ldp x22, x21, [x12, #0x10]\n"
- "ldr x20, [x12, #0x20]\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "usubl v31.8h, v31.8b, v18.8b\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "add x24, x24, x14\n"
- "add x23, x23, x14\n"
- "add x22, x22, x14\n"
- "add x21, x21, x14\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 9f\n"
- "ld1 { v23.s }[0], [x24], #0x4\n"
- "ld1 { v10.s }[0], [x23], #0x4\n"
- "ld1 { v11.s }[0], [x22], #0x4\n"
- "ld1 { v13.s }[0], [x21], #0x4\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 8f\n"
- "ld1 { v23.h }[2], [x24], #0x2\n"
- "ld1 { v10.h }[2], [x23], #0x2\n"
- "ld1 { v11.h }[2], [x22], #0x2\n"
- "ld1 { v13.h }[2], [x21], #0x2\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 11f\n"
- "ld1 { v23.b }[6], [x24]\n"
- "ld1 { v10.b }[6], [x23]\n"
- "ld1 { v11.b }[6], [x22]\n"
- "ld1 { v13.b }[6], [x21]\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ldr d22, [x13, #0x0]\n"
+ "ldr d24, [x13, #0x8]\n"
+ "mov v8.16b, v20.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d9, [x13, #0x10]\n"
+ "ldr d7, [x13, #0x18]\n"
+ "mov v3.16b, v20.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d25, [x13, #0x20]\n"
+ "ldr d4, [x13, #0x28]\n"
+ "mov v10.16b, v20.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d13, [x13, #0x30]\n"
+ "ldr d14, [x13, #0x38]\n"
+ "usubl v22.8h, v22.8b, v29.8b\n"
+ "usubl v24.8h, v24.8b, v29.8b\n"
+ "ldr d2, [x13, #0x40]\n"
+ "ldp x24, x23, [x14, #0x0]\n"
+ "usubl v9.8h, v9.8b, v29.8b\n"
+ "usubl v7.8h, v7.8b, v29.8b\n"
+ "usubl v25.8h, v25.8b, v29.8b\n"
+ "usubl v4.8h, v4.8b, v29.8b\n"
+ "usubl v13.8h, v13.8b, v29.8b\n"
+ "usubl v14.8h, v14.8b, v29.8b\n"
+ "ldp x22, x21, [x14, #0x10]\n"
+ "usubl v2.8h, v2.8b, v29.8b\n"
+ "add x24, x24, x16\n"
+ "add x23, x23, x16\n"
+ "ldr x20, [x14, #0x20]\n"
+ "add x22, x22, x16\n"
+ "add x21, x21, x16\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 9f\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v18.s }[0], [x22], #0x4\n"
+ "ld1 { v19.s }[0], [x21], #0x4\n"
+ "ld1 { v23.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 8f\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v18.h }[2], [x22], #0x2\n"
+ "ld1 { v19.h }[2], [x21], #0x2\n"
+ "ld1 { v23.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 11f\n"
+ "ld1 { v11.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v18.b }[6], [x22]\n"
+ "ld1 { v19.b }[6], [x21]\n"
+ "ld1 { v23.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x16, #0, 11f\n"
- "ld1 { v23.b }[4], [x24]\n"
- "ld1 { v10.b }[4], [x23]\n"
- "ld1 { v11.b }[4], [x22]\n"
- "ld1 { v13.b }[4], [x21]\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x17, #0, 11f\n"
+ "ld1 { v11.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v18.b }[4], [x22]\n"
+ "ld1 { v19.b }[4], [x21]\n"
+ "ld1 { v23.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x16, #1, 10f\n"
- "ld1 { v23.h }[0], [x24], #0x2\n"
- "ld1 { v10.h }[0], [x23], #0x2\n"
- "ld1 { v11.h }[0], [x22], #0x2\n"
- "ld1 { v13.h }[0], [x21], #0x2\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 11f\n"
- "ld1 { v23.b }[2], [x24]\n"
- "ld1 { v10.b }[2], [x23]\n"
- "ld1 { v11.b }[2], [x22]\n"
- "ld1 { v13.b }[2], [x21]\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x17, #1, 10f\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v18.h }[0], [x22], #0x2\n"
+ "ld1 { v19.h }[0], [x21], #0x2\n"
+ "ld1 { v23.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 11f\n"
+ "ld1 { v11.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v18.b }[2], [x22]\n"
+ "ld1 { v19.b }[2], [x21]\n"
+ "ld1 { v23.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 11f\n"
- "ld1 { v23.b }[0], [x24]\n"
- "ld1 { v10.b }[0], [x23]\n"
- "ld1 { v11.b }[0], [x22]\n"
- "ld1 { v13.b }[0], [x21]\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x17, #0, 11f\n"
+ "ld1 { v11.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v18.b }[0], [x22]\n"
+ "ld1 { v19.b }[0], [x21]\n"
+ "ld1 { v23.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "ushll v23.8h, v23.8b, #0x0\n"
- "smlal v28.4s, v23.4h, v8.4h\n"
- "smlal2 v9.4s, v23.8h, v8.8h\n"
- "ldr x20, [x12, #0x28]\n"
- "smlal v3.4s, v23.4h, v17.4h\n"
- "smlal2 v30.4s, v23.8h, v17.8h\n"
- "ushll v10.8h, v10.8b, #0x0\n"
"ushll v11.8h, v11.8b, #0x0\n"
- "smlal v0.4s, v23.4h, v7.4h\n"
- "smlal2 v22.4s, v23.8h, v7.8h\n"
- "add x20, x20, x14\n"
- "smlal v6.4s, v23.4h, v19.4h\n"
- "smlal2 v2.4s, v23.8h, v19.8h\n"
- "ushll v13.8h, v13.8b, #0x0\n"
- "smlal v28.4s, v10.4h, v19.4h\n"
- "smlal2 v9.4s, v10.8h, v19.8h\n"
- "ushll v27.8h, v27.8b, #0x0\n"
- "smlal v3.4s, v11.4h, v1.4h\n"
- "smlal2 v30.4s, v11.8h, v1.8h\n"
- "smlal v28.4s, v13.4h, v31.4h\n"
- "smlal2 v9.4s, v13.8h, v31.8h\n"
- "smlal v3.4s, v13.4h, v8.4h\n"
- "smlal2 v30.4s, v13.8h, v8.8h\n"
- "smlal v0.4s, v13.4h, v1.4h\n"
- "smlal2 v22.4s, v13.8h, v1.8h\n"
- "smlal v6.4s, v13.4h, v7.4h\n"
- "smlal2 v2.4s, v13.8h, v7.8h\n"
- "tbz x16, #2, 13f\n"
- "ld1 { v26.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 12f\n"
- "ld1 { v26.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 15f\n"
- "ld1 { v26.b }[6], [x20]\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "ldr x20, [x14, #0x28]\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
+ "smlal v20.4s, v11.4h, v25.4h\n"
+ "smlal2 v1.4s, v11.8h, v25.8h\n"
+ "smlal v8.4s, v11.4h, v7.4h\n"
+ "smlal2 v21.4s, v11.8h, v7.8h\n"
+ "add x20, x20, x16\n"
+ "smlal v3.4s, v11.4h, v24.4h\n"
+ "smlal2 v30.4s, v11.8h, v24.8h\n"
+ "smlal v10.4s, v11.4h, v22.4h\n"
+ "smlal2 v27.4s, v11.8h, v22.8h\n"
+ "smlal v20.4s, v28.4h, v22.4h\n"
+ "smlal2 v1.4s, v28.8h, v22.8h\n"
+ "smlal v8.4s, v18.4h, v9.4h\n"
+ "smlal2 v21.4s, v18.8h, v9.8h\n"
+ "smlal v3.4s, v19.4h, v9.4h\n"
+ "smlal2 v30.4s, v19.8h, v9.8h\n"
+ "smlal v10.4s, v19.4h, v24.4h\n"
+ "smlal2 v27.4s, v19.8h, v24.8h\n"
+ "smlal v20.4s, v19.4h, v4.4h\n"
+ "smlal2 v1.4s, v19.8h, v4.8h\n"
+ "smlal v8.4s, v19.4h, v25.4h\n"
+ "smlal2 v21.4s, v19.8h, v25.8h\n"
+ "tbz x17, #2, 13f\n"
+ "ld1 { v6.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 12f\n"
+ "ld1 { v6.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 15f\n"
+ "ld1 { v6.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x16, #0, 15f\n"
- "ld1 { v26.b }[4], [x20]\n"
+ "tbz x17, #0, 15f\n"
+ "ld1 { v6.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x16, #1, 14f\n"
- "ld1 { v26.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 15f\n"
- "ld1 { v26.b }[2], [x20]\n"
+ "tbz x17, #1, 14f\n"
+ "ld1 { v6.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 15f\n"
+ "ld1 { v6.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 15f\n"
- "ld1 { v26.b }[0], [x20]\n"
+ "tbz x17, #0, 15f\n"
+ "ld1 { v6.b }[0], [x20]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
- "ushll v26.8h, v26.8b, #0x0\n"
- "smlal v0.4s, v26.4h, v29.4h\n"
- "smlal2 v22.4s, v26.8h, v29.8h\n"
- "ldr x20, [x12, #0x30]\n"
- "smlal v28.4s, v27.4h, v16.4h\n"
- "smlal2 v9.4s, v27.8h, v16.8h\n"
- "add x20, x20, x14\n"
- "smlal v3.4s, v27.4h, v29.4h\n"
- "smlal2 v30.4s, v27.8h, v29.8h\n"
- "smlal v0.4s, v27.4h, v8.4h\n"
- "smlal2 v22.4s, v27.8h, v8.8h\n"
- "smlal v6.4s, v27.4h, v17.4h\n"
- "smlal2 v2.4s, v27.8h, v17.8h\n"
- "tbz x16, #2, 17f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 16f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 19f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "ushll v6.8h, v6.8b, #0x0\n"
+ "ldr x20, [x14, #0x30]\n"
+ "smlal v20.4s, v23.4h, v14.4h\n"
+ "smlal2 v1.4s, v23.8h, v14.8h\n"
+ "smlal v8.4s, v23.4h, v13.4h\n"
+ "smlal2 v21.4s, v23.8h, v13.8h\n"
+ "smlal v10.4s, v23.4h, v7.4h\n"
+ "smlal2 v27.4s, v23.8h, v7.8h\n"
+ "smlal v3.4s, v6.4h, v13.4h\n"
+ "smlal2 v30.4s, v6.8h, v13.8h\n"
+ "add x20, x20, x16\n"
+ "smlal v3.4s, v23.4h, v25.4h\n"
+ "smlal2 v30.4s, v23.8h, v25.8h\n"
+ "tbz x17, #2, 17f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 16f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 19f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x16, #0, 19f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x17, #0, 19f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x16, #1, 18f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 19f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x17, #1, 18f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 19f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 19f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x17, #0, 19f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
- "ushll v23.8h, v23.8b, #0x0\n"
- "ldr x20, [x12, #0x38]\n"
- "smlal v6.4s, v23.4h, v4.4h\n"
- "smlal2 v2.4s, v23.8h, v4.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 21f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 20f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 23f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "ldr x20, [x14, #0x38]\n"
+ "smlal v10.4s, v26.4h, v2.4h\n"
+ "smlal2 v27.4s, v26.8h, v2.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 21f\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 20f\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 23f\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x16, #0, 23f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x17, #0, 23f\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x16, #1, 22f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 23f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x17, #1, 22f\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 23f\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 23f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x17, #0, 23f\n"
+ "ld1 { v31.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
- "ushll v21.8h, v21.8b, #0x0\n"
- "ldr x20, [x12, #0x40]\n"
- "smlal v28.4s, v21.4h, v7.4h\n"
- "smlal2 v9.4s, v21.8h, v7.8h\n"
- "smlal v3.4s, v21.4h, v19.4h\n"
- "smlal2 v30.4s, v21.8h, v19.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 25f\n"
- "ld1 { v18.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 24f\n"
- "ld1 { v18.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 27f\n"
- "ld1 { v18.b }[6], [x20]\n"
+ "ushll v31.8h, v31.8b, #0x0\n"
+ "ldr x20, [x14, #0x40]\n"
+ "smlal v20.4s, v31.4h, v24.4h\n"
+ "smlal2 v1.4s, v31.8h, v24.8h\n"
+ "smlal v8.4s, v31.4h, v22.4h\n"
+ "smlal2 v21.4s, v31.8h, v22.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 25f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 24f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 27f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x16, #0, 27f\n"
- "ld1 { v18.b }[4], [x20]\n"
+ "tbz x17, #0, 27f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x16, #1, 26f\n"
- "ld1 { v18.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 27f\n"
- "ld1 { v18.b }[2], [x20]\n"
+ "tbz x17, #1, 26f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 27f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 27f\n"
- "ld1 { v18.b }[0], [x20]\n"
+ "tbz x17, #0, 27f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
- "ushll v18.8h, v18.8b, #0x0\n"
- "ldr x20, [x12, #0x48]\n"
- "smlal v28.4s, v18.4h, v1.4h\n"
- "smlal2 v9.4s, v18.8h, v1.8h\n"
- "smlal v3.4s, v18.4h, v7.4h\n"
- "smlal2 v30.4s, v18.8h, v7.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 29f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 28f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 31f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "ldr x20, [x14, #0x48]\n"
+ "smlal v20.4s, v26.4h, v9.4h\n"
+ "smlal2 v1.4s, v26.8h, v9.8h\n"
+ "smlal v8.4s, v26.4h, v24.4h\n"
+ "smlal2 v21.4s, v26.8h, v24.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 29f\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 28f\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 31f\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x16, #0, 31f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x17, #0, 31f\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x16, #1, 30f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 31f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x17, #1, 30f\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 31f\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 31f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x17, #0, 31f\n"
+ "ld1 { v18.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ushll v15.8h, v15.8b, #0x0\n"
- "ldr x20, [x12, #0x50]\n"
- "smlal v28.4s, v15.4h, v4.4h\n"
- "smlal2 v9.4s, v15.8h, v4.8h\n"
- "smlal v3.4s, v15.4h, v16.4h\n"
- "smlal2 v30.4s, v15.8h, v16.8h\n"
- "add x20, x20, x14\n"
- "smlal v0.4s, v15.4h, v31.4h\n"
- "smlal2 v22.4s, v15.8h, v31.8h\n"
- "smlal v6.4s, v15.4h, v8.4h\n"
- "smlal2 v2.4s, v15.8h, v8.8h\n"
- "tbz x16, #2, 33f\n"
- "ld1 { v20.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 32f\n"
- "ld1 { v20.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 35f\n"
- "ld1 { v20.b }[6], [x20]\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "ldr x20, [x14, #0x50]\n"
+ "smlal v20.4s, v18.4h, v2.4h\n"
+ "smlal2 v1.4s, v18.8h, v2.8h\n"
+ "smlal v8.4s, v18.4h, v14.4h\n"
+ "smlal2 v21.4s, v18.8h, v14.8h\n"
+ "smlal v3.4s, v18.4h, v4.4h\n"
+ "smlal2 v30.4s, v18.8h, v4.8h\n"
+ "smlal v10.4s, v18.4h, v25.4h\n"
+ "add x20, x20, x16\n"
+ "smlal2 v27.4s, v18.8h, v25.8h\n"
+ "tbz x17, #2, 33f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 32f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 35f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x16, #0, 35f\n"
- "ld1 { v20.b }[4], [x20]\n"
+ "tbz x17, #0, 35f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x16, #1, 34f\n"
- "ld1 { v20.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 35f\n"
- "ld1 { v20.b }[2], [x20]\n"
+ "tbz x17, #1, 34f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 35f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 35f\n"
- "ld1 { v20.b }[0], [x20]\n"
+ "tbz x17, #0, 35f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
- "ushll v20.8h, v20.8b, #0x0\n"
- "ldr x20, [x12, #0x58]\n"
- "smlal v28.4s, v20.4h, v17.4h\n"
- "smlal2 v9.4s, v20.8h, v17.8h\n"
- "smlal v0.4s, v20.4h, v19.4h\n"
- "smlal2 v22.4s, v20.8h, v19.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 37f\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 36f\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 39f\n"
- "ld1 { v11.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x14, #0x58]\n"
+ "smlal v20.4s, v16.4h, v7.4h\n"
+ "smlal2 v1.4s, v16.8h, v7.8h\n"
+ "smlal v3.4s, v16.4h, v22.4h\n"
+ "smlal2 v30.4s, v16.8h, v22.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 37f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 36f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 39f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x16, #0, 39f\n"
- "ld1 { v11.b }[4], [x20]\n"
+ "tbz x17, #0, 39f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x16, #1, 38f\n"
- "ld1 { v11.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 39f\n"
- "ld1 { v11.b }[2], [x20]\n"
+ "tbz x17, #1, 38f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 39f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 39f\n"
- "ld1 { v11.b }[0], [x20]\n"
+ "tbz x17, #0, 39f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
- "ushll v11.8h, v11.8b, #0x0\n"
- "ldr x20, [x12, #0x60]\n"
- "smlal v3.4s, v11.4h, v31.4h\n"
- "smlal2 v30.4s, v11.8h, v31.8h\n"
- "smlal v6.4s, v11.4h, v1.4h\n"
- "smlal2 v2.4s, v11.8h, v1.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 41f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 40f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 43f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "ldr x20, [x14, #0x60]\n"
+ "smlal v8.4s, v26.4h, v4.4h\n"
+ "smlal2 v21.4s, v26.8h, v4.8h\n"
+ "smlal v10.4s, v26.4h, v9.4h\n"
+ "smlal2 v27.4s, v26.8h, v9.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 41f\n"
+ "ld1 { v5.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 40f\n"
+ "ld1 { v5.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 43f\n"
+ "ld1 { v5.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x16, #0, 43f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x17, #0, 43f\n"
+ "ld1 { v5.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x16, #1, 42f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 43f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x17, #1, 42f\n"
+ "ld1 { v5.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 43f\n"
+ "ld1 { v5.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 43f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x17, #0, 43f\n"
+ "ld1 { v5.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
- "ushll v23.8h, v23.8b, #0x0\n"
- "ldr x20, [x12, #0x68]\n"
- "smlal v28.4s, v23.4h, v29.4h\n"
- "smlal2 v9.4s, v23.8h, v29.8h\n"
- "smlal v0.4s, v23.4h, v17.4h\n"
- "smlal2 v22.4s, v23.8h, v17.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 45f\n"
- "ld1 { v20.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 44f\n"
- "ld1 { v20.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 47f\n"
- "ld1 { v20.b }[6], [x20]\n"
+ "ushll v5.8h, v5.8b, #0x0\n"
+ "ldr x20, [x14, #0x68]\n"
+ "smlal v20.4s, v5.4h, v13.4h\n"
+ "smlal2 v1.4s, v5.8h, v13.8h\n"
+ "smlal v3.4s, v5.4h, v7.4h\n"
+ "smlal2 v30.4s, v5.8h, v7.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 45f\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 44f\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 47f\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x16, #0, 47f\n"
- "ld1 { v20.b }[4], [x20]\n"
+ "tbz x17, #0, 47f\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x16, #1, 46f\n"
- "ld1 { v20.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 47f\n"
- "ld1 { v20.b }[2], [x20]\n"
+ "tbz x17, #1, 46f\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 47f\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 47f\n"
- "ld1 { v20.b }[0], [x20]\n"
+ "tbz x17, #0, 47f\n"
+ "ld1 { v19.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "ushll v20.8h, v20.8b, #0x0\n"
- "ldr x20, [x12, #0x70]\n"
- "smlal v3.4s, v20.4h, v4.4h\n"
- "smlal2 v30.4s, v20.8h, v4.8h\n"
- "smlal v6.4s, v20.4h, v31.4h\n"
- "smlal2 v2.4s, v20.8h, v31.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 49f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 48f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 51f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
+ "ldr x20, [x14, #0x70]\n"
+ "smlal v8.4s, v19.4h, v2.4h\n"
+ "smlal2 v21.4s, v19.8h, v2.8h\n"
+ "smlal v10.4s, v19.4h, v4.4h\n"
+ "smlal2 v27.4s, v19.8h, v4.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 49f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 48f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 51f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x16, #0, 51f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x17, #0, 51f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x16, #1, 50f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 51f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x17, #1, 50f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 51f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 51f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x17, #0, 51f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ushll v8.8h, v8.8b, #0x0\n"
- "ldr x20, [x12, #0x78]\n"
- "smlal v0.4s, v8.4h, v16.4h\n"
- "smlal2 v22.4s, v8.8h, v16.8h\n"
- "smlal v6.4s, v8.4h, v29.4h\n"
- "smlal2 v2.4s, v8.8h, v29.8h\n"
- "add x20, x20, x14\n"
- "tbz x16, #2, 53f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x16, #1, 52f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x16, #0, 55f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x14, #0x78]\n"
+ "smlal v3.4s, v16.4h, v14.4h\n"
+ "smlal2 v30.4s, v16.8h, v14.8h\n"
+ "smlal v10.4s, v16.4h, v13.4h\n"
+ "smlal2 v27.4s, v16.8h, v13.8h\n"
+ "add x20, x20, x16\n"
+ "tbz x17, #2, 53f\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
+ "tbz x17, #1, 52f\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
+ "tbz x17, #0, 55f\n"
+ "ld1 { v28.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x16, #0, 55f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x17, #0, 55f\n"
+ "ld1 { v28.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x16, #1, 54f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x16, #0, 55f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x17, #1, 54f\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
+ "tbz x17, #0, 55f\n"
+ "ld1 { v28.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 55f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x17, #0, 55f\n"
+ "ld1 { v28.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ushll v8.8h, v8.8b, #0x0\n"
- "smlal v0.4s, v8.4h, v4.4h\n"
- "smlal2 v22.4s, v8.8h, v4.8h\n"
- "smlal v6.4s, v8.4h, v16.4h\n"
- "smlal2 v2.4s, v8.8h, v16.8h\n"
- "tbz x16, #2, 57f\n"
- "ld1 { v7.4s }, [x10], #0x10\n"
- "ld1 { v23.4s }, [x9], #0x10\n"
- "tbz x16, #1, 56f\n"
- "ld1 { v11.d }[0], [x10], #0x8\n"
- "ld1 { v27.d }[0], [x9], #0x8\n"
- "tbz x16, #0, 59f\n"
- "ld1 { v11.s }[2], [x10]\n"
- "ld1 { v27.s }[2], [x9]\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "smlal v3.4s, v28.4h, v2.4h\n"
+ "smlal2 v30.4s, v28.8h, v2.8h\n"
+ "smlal v10.4s, v28.4h, v14.4h\n"
+ "smlal2 v27.4s, v28.8h, v14.8h\n"
+ "tbz x17, #2, 57f\n"
+ "ld1 { v29.4s }, [x12], #0x10\n"
+ "ld1 { v19.4s }, [x11], #0x10\n"
+ "tbz x17, #1, 56f\n"
+ "ld1 { v7.d }[0], [x12], #0x8\n"
+ "ld1 { v18.d }[0], [x11], #0x8\n"
+ "tbz x17, #0, 59f\n"
+ "ld1 { v7.s }[2], [x12]\n"
+ "ld1 { v18.s }[2], [x11]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x16, #0, 59f\n"
- "ld1 { v11.s }[0], [x10]\n"
- "ld1 { v27.s }[0], [x9]\n"
+ "tbz x17, #0, 59f\n"
+ "ld1 { v7.s }[0], [x12]\n"
+ "ld1 { v18.s }[0], [x11]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x16, #1, 58f\n"
- "ld1 { v7.d }[0], [x10], #0x8\n"
- "ld1 { v23.d }[0], [x9], #0x8\n"
- "tbz x16, #0, 59f\n"
- "ld1 { v7.s }[2], [x10]\n"
- "ld1 { v23.s }[2], [x9]\n"
+ "tbz x17, #1, 58f\n"
+ "ld1 { v29.d }[0], [x12], #0x8\n"
+ "ld1 { v19.d }[0], [x11], #0x8\n"
+ "tbz x17, #0, 59f\n"
+ "ld1 { v29.s }[2], [x12]\n"
+ "ld1 { v19.s }[2], [x11]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 59f\n"
- "ld1 { v7.s }[0], [x10]\n"
- "ld1 { v23.s }[0], [x9]\n"
+ "tbz x17, #0, 59f\n"
+ "ld1 { v29.s }[0], [x12]\n"
+ "ld1 { v19.s }[0], [x11]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v28.4s, v28.4s, v7.4s\n"
- "and v20.16b, v28.16b, v23.16b\n"
- "add x28, x28, x13\n"
- "add x27, x27, x13\n"
- "sqrdmulh v9.4s, v9.4s, v11.4s\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "add x26, x26, x13\n"
- "add x25, x25, x13\n"
- "and v4.16b, v9.16b, v27.16b\n"
- "sqrdmulh v3.4s, v3.4s, v7.4s\n"
- "sqrdmulh v0.4s, v0.4s, v7.4s\n"
- "sqrdmulh v6.4s, v6.4s, v7.4s\n"
- "sqadd v28.4s, v28.4s, v20.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v19.16b, v3.16b, v23.16b\n"
- "sqrdmulh v30.4s, v30.4s, v11.4s\n"
- "and v29.16b, v0.16b, v23.16b\n"
- "sqrdmulh v22.4s, v22.4s, v11.4s\n"
- "and v26.16b, v6.16b, v23.16b\n"
- "sqrdmulh v2.4s, v2.4s, v11.4s\n"
- "sqadd v9.4s, v9.4s, v4.4s\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "and v17.16b, v30.16b, v27.16b\n"
- "sshr v29.4s, v29.4s, #0x1f\n"
- "and v8.16b, v22.16b, v27.16b\n"
+ "sqrdmulh v20.4s, v20.4s, v29.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v7.4s\n"
+ "add x9, x9, x15\n"
+ "add x28, x28, x15\n"
+ "sqrdmulh v8.4s, v8.4s, v29.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v29.4s\n"
+ "add x27, x27, x15\n"
+ "add x26, x26, x15\n"
+ "sqrdmulh v10.4s, v10.4s, v29.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v7.4s\n"
+ "and v24.16b, v20.16b, v19.16b\n"
+ "and v28.16b, v1.16b, v18.16b\n"
+ "and v26.16b, v8.16b, v19.16b\n"
+ "and v0.16b, v3.16b, v19.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v7.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v7.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
"sshr v26.4s, v26.4s, #0x1f\n"
- "and v13.16b, v2.16b, v27.16b\n"
- "sqadd v3.4s, v3.4s, v19.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v0.4s, v0.4s, v29.4s\n"
- "sshr v8.4s, v8.4s, #0x1f\n"
- "sqadd v6.4s, v6.4s, v26.4s\n"
- "sshr v13.4s, v13.4s, #0x1f\n"
- "srshl v28.4s, v28.4s, v23.4s\n"
- "srshl v3.4s, v3.4s, v23.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "srshl v0.4s, v0.4s, v23.4s\n"
- "sqadd v22.4s, v22.4s, v8.4s\n"
- "srshl v6.4s, v6.4s, v23.4s\n"
- "sqadd v2.4s, v2.4s, v13.4s\n"
- "srshl v9.4s, v9.4s, v27.4s\n"
- "sqxtn v28.4h, v28.4s\n"
- "srshl v30.4s, v30.4s, v27.4s\n"
+ "and v11.16b, v21.16b, v18.16b\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "and v25.16b, v30.16b, v18.16b\n"
+ "sqadd v20.4s, v20.4s, v24.4s\n"
+ "and v5.16b, v10.16b, v19.16b\n"
+ "sqadd v1.4s, v1.4s, v28.4s\n"
+ "and v4.16b, v27.16b, v18.16b\n"
+ "sqadd v8.4s, v8.4s, v26.4s\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v3.4s, v3.4s, v0.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "srshl v20.4s, v20.4s, v19.4s\n"
+ "srshl v8.4s, v8.4s, v19.4s\n"
+ "sqadd v10.4s, v10.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v11.4s\n"
+ "srshl v3.4s, v3.4s, v19.4s\n"
+ "sqadd v30.4s, v30.4s, v25.4s\n"
+ "sqadd v27.4s, v27.4s, v4.4s\n"
+ "srshl v1.4s, v1.4s, v18.4s\n"
+ "srshl v10.4s, v10.4s, v19.4s\n"
+ "sqxtn v20.4h, v20.4s\n"
+ "srshl v21.4s, v21.4s, v18.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v30.4s, v30.4s, v18.4s\n"
"sqxtn v3.4h, v3.4s\n"
- "srshl v22.4s, v22.4s, v27.4s\n"
- "sqxtn v0.4h, v0.4s\n"
- "srshl v2.4s, v2.4s, v27.4s\n"
- "sqxtn v6.4h, v6.4s\n"
- "sqxtn2 v28.8h, v9.4s\n"
+ "srshl v27.4s, v27.4s, v18.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "sqxtn2 v20.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v21.4s\n"
"sqxtn2 v3.8h, v30.4s\n"
- "sqxtn2 v0.8h, v22.4s\n"
- "sqxtn2 v6.8h, v2.4s\n"
- "sqadd v28.8h, v28.8h, v5.8h\n"
- "sqadd v3.8h, v3.8h, v5.8h\n"
- "sqadd v0.8h, v0.8h, v5.8h\n"
- "sqadd v6.8h, v6.8h, v5.8h\n"
- "smax v28.8h, v28.8h, v14.8h\n"
- "smax v3.8h, v3.8h, v14.8h\n"
- "smax v0.8h, v0.8h, v14.8h\n"
- "smax v6.8h, v6.8h, v14.8h\n"
- "smin v28.8h, v28.8h, v12.8h\n"
- "smin v3.8h, v3.8h, v12.8h\n"
- "smin v0.8h, v0.8h, v12.8h\n"
- "smin v6.8h, v6.8h, v12.8h\n"
- "uzp1 v28.16b, v28.16b, v28.16b\n"
+ "sqxtn2 v10.8h, v27.4s\n"
+ "sqadd v20.8h, v20.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v3.8h, v3.8h, v12.8h\n"
+ "sqadd v10.8h, v10.8h, v12.8h\n"
+ "smax v20.8h, v20.8h, v15.8h\n"
+ "smax v8.8h, v8.8h, v15.8h\n"
+ "smax v3.8h, v3.8h, v15.8h\n"
+ "smax v10.8h, v10.8h, v15.8h\n"
+ "smin v20.8h, v20.8h, v17.8h\n"
+ "smin v8.8h, v8.8h, v17.8h\n"
+ "smin v3.8h, v3.8h, v17.8h\n"
+ "smin v10.8h, v10.8h, v17.8h\n"
+ "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
"uzp1 v3.16b, v3.16b, v3.16b\n"
- "uzp1 v0.16b, v0.16b, v0.16b\n"
- "uzp1 v6.16b, v6.16b, v6.16b\n"
- "tbz x16, #2, 61f\n"
- "st1 { v28.s }[0], [x28], #0x4\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "tbz x17, #2, 61f\n"
+ "st1 { v20.s }[0], [x9], #0x4\n"
+ "st1 { v8.s }[0], [x28], #0x4\n"
"st1 { v3.s }[0], [x27], #0x4\n"
- "st1 { v0.s }[0], [x26], #0x4\n"
- "st1 { v6.s }[0], [x25], #0x4\n"
- "tbz x16, #1, 60f\n"
- "st1 { v28.h }[2], [x28], #0x2\n"
+ "st1 { v10.s }[0], [x26], #0x4\n"
+ "tbz x17, #1, 60f\n"
+ "st1 { v20.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x28], #0x2\n"
"st1 { v3.h }[2], [x27], #0x2\n"
- "st1 { v0.h }[2], [x26], #0x2\n"
- "st1 { v6.h }[2], [x25], #0x2\n"
- "tbz x16, #0, 63f\n"
- "st1 { v28.b }[6], [x28], #0x1\n"
+ "st1 { v10.h }[2], [x26], #0x2\n"
+ "tbz x17, #0, 63f\n"
+ "st1 { v20.b }[6], [x9], #0x1\n"
+ "st1 { v8.b }[6], [x28], #0x1\n"
"st1 { v3.b }[6], [x27], #0x1\n"
- "st1 { v0.b }[6], [x26], #0x1\n"
- "st1 { v6.b }[6], [x25], #0x1\n"
+ "st1 { v10.b }[6], [x26], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x16, #0, 63f\n"
- "st1 { v28.b }[4], [x28], #0x1\n"
+ "tbz x17, #0, 63f\n"
+ "st1 { v20.b }[4], [x9], #0x1\n"
+ "st1 { v8.b }[4], [x28], #0x1\n"
"st1 { v3.b }[4], [x27], #0x1\n"
- "st1 { v0.b }[4], [x26], #0x1\n"
- "st1 { v6.b }[4], [x25], #0x1\n"
+ "st1 { v10.b }[4], [x26], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x16, #1, 62f\n"
- "st1 { v28.h }[0], [x28], #0x2\n"
+ "tbz x17, #1, 62f\n"
+ "st1 { v20.h }[0], [x9], #0x2\n"
+ "st1 { v8.h }[0], [x28], #0x2\n"
"st1 { v3.h }[0], [x27], #0x2\n"
- "st1 { v0.h }[0], [x26], #0x2\n"
- "st1 { v6.h }[0], [x25], #0x2\n"
- "tbz x16, #0, 63f\n"
- "st1 { v28.b }[2], [x28], #0x1\n"
+ "st1 { v10.h }[0], [x26], #0x2\n"
+ "tbz x17, #0, 63f\n"
+ "st1 { v20.b }[2], [x9], #0x1\n"
+ "st1 { v8.b }[2], [x28], #0x1\n"
"st1 { v3.b }[2], [x27], #0x1\n"
- "st1 { v0.b }[2], [x26], #0x1\n"
- "st1 { v6.b }[2], [x25], #0x1\n"
+ "st1 { v10.b }[2], [x26], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x16, #0, 63f\n"
- "st1 { v28.b }[0], [x28], #0x1\n"
+ "tbz x17, #0, 63f\n"
+ "st1 { v20.b }[0], [x9], #0x1\n"
+ "st1 { v8.b }[0], [x28], #0x1\n"
"st1 { v3.b }[0], [x27], #0x1\n"
- "st1 { v0.b }[0], [x26], #0x1\n"
- "st1 { v6.b }[0], [x25], #0x1\n"
+ "st1 { v10.b }[0], [x26], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index 6cb10a7bb2..7ebd3a5620 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[25];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -100,1292 +100,1292 @@ void a64_u8qa_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v6.16b }, [x20]\n"
+ "mov x4, #0x0\n"
+ "mov x5, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x6, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x16, x3, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v16.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_c_offset]\n"
"add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1r { v22.8h }, [x21]\n"
- "ld1r { v13.8h }, [x20]\n"
+ "ld1r { v12.8h }, [x21]\n"
+ "ld1r { v14.8h }, [x20]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "mov x17, #0x0\n"
- "ld1r { v5.8h }, [x20]\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d12, [x14, #0x0]\n"
- "ldr d11, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "ldr d25, [x14, #0x10]\n"
- "ldr d24, [x14, #0x18]\n"
- "usubl v11.8h, v11.8b, v6.8b\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "ldr d23, [x14, #0x20]\n"
- "ldr d7, [x14, #0x28]\n"
- "usubl v24.8h, v24.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "ldr d3, [x14, #0x30]\n"
- "ldr d9, [x14, #0x38]\n"
- "usubl v7.8h, v7.8b, v6.8b\n"
- "usubl v3.8h, v3.8b, v6.8b\n"
- "ldr d30, [x14, #0x40]\n"
+ "ld1r { v6.8h }, [x20]\n"
+ "ldp x15, x14, [x22, #0x0]\n"
+ "ldp x13, x12, [x22, #0x10]\n"
+ "cbz x16, 3f\n"
+ "ldr d15, [x7, #0x0]\n"
+ "ldr d13, [x7, #0x8]\n"
+ "subs x16, x16, #0x1\n"
+ "ldr d28, [x7, #0x10]\n"
+ "ldr d11, [x7, #0x18]\n"
+ "ldr d23, [x7, #0x20]\n"
+ "ldr d17, [x7, #0x28]\n"
+ "ldr d10, [x7, #0x30]\n"
+ "ldr d2, [x7, #0x38]\n"
+ "usubl v15.8h, v15.8b, v16.8b\n"
+ "usubl v13.8h, v13.8b, v16.8b\n"
+ "ldr d3, [x7, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v9.8h, v9.8b, v6.8b\n"
- "usubl v30.8h, v30.8b, v6.8b\n"
- "ldr q8, [x20, #0x0]\n"
- "ldr q2, [x20, #0x10]\n"
+ "usubl v28.8h, v28.8b, v16.8b\n"
+ "usubl v11.8h, v11.8b, v16.8b\n"
+ "usubl v23.8h, v23.8b, v16.8b\n"
+ "usubl v17.8h, v17.8b, v16.8b\n"
+ "usubl v10.8h, v10.8b, v16.8b\n"
+ "usubl v2.8h, v2.8b, v16.8b\n"
+ "ldr q22, [x20, #0x0]\n"
+ "ldr q8, [x20, #0x10]\n"
+ "ldp x27, x26, [x6, #0x0]\n"
"add x20, x20, #0x20\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
+ "mov v9.16b, v22.16b\n"
+ "mov v31.16b, v8.16b\n"
+ "mov v20.16b, v22.16b\n"
"mov v21.16b, v8.16b\n"
- "mov v4.16b, v2.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "mov v20.16b, v8.16b\n"
- "mov v1.16b, v2.16b\n"
- "ldr d26, [x27, x17]\n"
- "ldr d18, [x26, x17]\n"
- "mov v16.16b, v8.16b\n"
- "mov v14.16b, v2.16b\n"
- "ldr d10, [x25, x17]\n"
- "ldr d27, [x24, x17]\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "ldr d17, [x23, x17]\n"
- "ldr d19, [x22, x17]\n"
- "ushll v10.8h, v10.8b, #0x0\n"
+ "ldp x25, x24, [x6, #0x10]\n"
+ "mov v18.16b, v22.16b\n"
+ "mov v5.16b, v8.16b\n"
+ "ldp x23, x22, [x6, #0x20]\n"
+ "ldp x21, x20, [x6, #0x30]\n"
+ "ldr d29, [x27, x4]\n"
+ "ldr d25, [x26, x4]\n"
+ "ldr d0, [x25, x4]\n"
+ "ldr d7, [x24, x4]\n"
+ "ldr d24, [x23, x4]\n"
+ "ldr d27, [x22, x4]\n"
+ "ldr d26, [x21, x4]\n"
+ "ldr d1, [x20, x4]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "ldr d15, [x21, x17]\n"
- "ldr d28, [x20, x17]\n"
- "ushll v17.8h, v17.8b, #0x0\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "ushll v28.8h, v28.8b, #0x0\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "ushll v1.8h, v1.8b, #0x0\n"
"beq 2f\n"
"1:" // Loop
- "ldr q31, [x13, #0x0]\n"
- "ldr q0, [x12, #0x0]\n"
- "smlal v8.4s, v26.4h, v30.4h\n"
- "smlal2 v2.4s, v26.8h, v30.8h\n"
- "ldr q29, [x13, #0x10]\n"
- "ldr x21, [x15, #0x58]\n"
- "smlal v8.4s, v18.4h, v12.4h\n"
- "smlal v21.4s, v26.4h, v3.4h\n"
- "ldr x20, [x15, #0x78]\n"
- "ldr x25, [x15, #0x60]\n"
- "smlal v20.4s, v26.4h, v25.4h\n"
- "smlal v16.4s, v26.4h, v12.4h\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal2 v2.4s, v18.8h, v12.8h\n"
- "ldr d18, [x21, x17]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "smlal v8.4s, v10.4h, v11.4h\n"
- "smlal2 v4.4s, v26.8h, v3.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v1.4s, v26.8h, v25.8h\n"
- "smlal2 v14.4s, v26.8h, v12.8h\n"
- "ldr d26, [x20, x17]\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "smlal v21.4s, v27.4h, v11.4h\n"
- "smlal v20.4s, v18.4h, v24.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal v16.4s, v26.4h, v23.4h\n"
- "smlal2 v2.4s, v10.8h, v11.8h\n"
- "ldr d10, [x25, x17]\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal v8.4s, v19.4h, v24.4h\n"
- "smlal2 v4.4s, v27.8h, v11.8h\n"
- "ldr d27, [x24, x17]\n"
+ "ldr q30, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
+ "smlal v22.4s, v29.4h, v3.4h\n"
+ "smlal2 v8.4s, v29.8h, v3.8h\n"
+ "ldr q19, [x8, #0x10]\n"
+ "ldr x21, [x6, #0x58]\n"
+ "smlal v9.4s, v29.4h, v10.4h\n"
+ "smlal v20.4s, v29.4h, v28.4h\n"
+ "ldr x20, [x6, #0x78]\n"
+ "ldr x23, [x6, #0x60]\n"
+ "smlal v18.4s, v29.4h, v15.4h\n"
+ "smlal2 v31.4s, v29.8h, v10.8h\n"
+ "ldr x22, [x6, #0x80]\n"
+ "smlal2 v21.4s, v29.8h, v28.8h\n"
+ "smlal2 v5.4s, v29.8h, v15.8h\n"
+ "ldr q29, [x17, #0x10]\n"
+ "smlal v22.4s, v25.4h, v15.4h\n"
+ "smlal2 v8.4s, v25.8h, v15.8h\n"
+ "ldr d25, [x21, x4]\n"
+ "ldr x21, [x6, #0x68]\n"
+ "ldr x11, [x6, #0x88]\n"
+ "smlal v9.4s, v7.4h, v13.4h\n"
+ "ldr x10, [x6, #0x40]\n"
+ "add x7, x7, #0x48\n"
+ "smlal2 v31.4s, v7.8h, v13.8h\n"
+ "ldr d7, [x20, x4]\n"
+ "ldr x20, [x6, #0x70]\n"
+ "subs x16, x16, #0x1\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x9, [x6, #0x98]\n"
+ "ldr x28, [x6, #0x50]\n"
+ "add x8, x8, #0x20\n"
+ "smlal v22.4s, v0.4h, v13.4h\n"
+ "smlal2 v8.4s, v0.8h, v13.8h\n"
+ "ldr d0, [x23, x4]\n"
+ "ldr x27, [x6, #0x48]\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "smlal v9.4s, v24.4h, v28.4h\n"
+ "ldr x26, [x6, #0x90]\n"
+ "ldr x25, [x6, #0xa8]\n"
+ "smlal v20.4s, v25.4h, v11.4h\n"
+ "smlal2 v21.4s, v25.8h, v11.8h\n"
+ "ldr d25, [x22, x4]\n"
+ "ldr x24, [x6, #0xa0]\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "smlal2 v31.4s, v24.8h, v28.8h\n"
+ "ldr d24, [x21, x4]\n"
+ "ldr x23, [x6, #0xb0]\n"
+ "smlal v18.4s, v7.4h, v23.4h\n"
+ "smlal v22.4s, v27.4h, v11.4h\n"
+ "ldr x22, [x6, #0xb8]\n"
+ "ldr x21, [x6, #0xc0]\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "smlal2 v5.4s, v7.8h, v23.8h\n"
+ "ldr d7, [x11, x4]\n"
+ "smlal2 v8.4s, v27.8h, v11.8h\n"
+ "ldr d27, [x10, x4]\n"
+ "smlal v20.4s, v0.4h, v15.4h\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
+ "smlal2 v21.4s, v0.8h, v15.8h\n"
+ "smlal v9.4s, v1.4h, v15.4h\n"
+ "smlal2 v31.4s, v1.8h, v15.8h\n"
+ "ldr d15, [x20, x4]\n"
+ "ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
+ "smlal v18.4s, v25.4h, v13.4h\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "smlal v22.4s, v26.4h, v23.4h\n"
+ "add x17, x17, #0x20\n"
+ "smlal2 v5.4s, v25.8h, v13.8h\n"
+ "smlal2 v8.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x9, x4]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v1.4s, v18.8h, v24.8h\n"
- "ldr d18, [x23, x17]\n"
- "smlal2 v14.4s, v26.8h, v23.8h\n"
- "ldr d26, [x22, x17]\n"
- "ldr x24, [x15, #0x98]\n"
- "smlal v21.4s, v17.4h, v25.4h\n"
- "smlal v20.4s, v10.4h, v12.4h\n"
- "ldr x23, [x15, #0x50]\n"
- "smlal v16.4s, v27.4h, v11.4h\n"
- "smlal2 v2.4s, v19.8h, v24.8h\n"
- "ldr d19, [x21, x17]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "smlal v8.4s, v15.4h, v23.4h\n"
- "smlal2 v4.4s, v17.8h, v25.8h\n"
- "ldr d17, [x20, x17]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal2 v1.4s, v10.8h, v12.8h\n"
- "smlal2 v14.4s, v27.8h, v11.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v28.4h, v12.4h\n"
- "smlal v20.4s, v18.4h, v23.4h\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "smlal v16.4s, v26.4h, v7.4h\n"
- "smlal2 v2.4s, v15.8h, v23.8h\n"
- "ldr d15, [x24, x17]\n"
- "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v20.4s, v24.4h, v23.4h\n"
"ushll v15.8h, v15.8b, #0x0\n"
- "smlal v8.4s, v28.4h, v25.4h\n"
- "smlal2 v4.4s, v28.8h, v12.8h\n"
- "ldr d12, [x23, x17]\n"
- "ushll v12.8h, v12.8b, #0x0\n"
- "smlal2 v1.4s, v18.8h, v23.8h\n"
- "ldr d18, [x22, x17]\n"
- "smlal2 v14.4s, v26.8h, v7.8h\n"
- "ldr d26, [x21, x17]\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v21.4s, v19.4h, v23.4h\n"
- "smlal v20.4s, v17.4h, v11.4h\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v16.4s, v15.4h, v25.4h\n"
- "smlal2 v2.4s, v28.8h, v25.8h\n"
- "ldr d28, [x20, x17]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
+ "smlal2 v21.4s, v24.8h, v23.8h\n"
+ "ldr d24, [x28, x4]\n"
+ "smlal v18.4s, v7.4h, v17.4h\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "smlal v8.4s, v12.4h, v7.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal2 v4.4s, v19.8h, v23.8h\n"
- "ldr d23, [x22, x17]\n"
- "ldr d19, [x21, x17]\n"
- "smlal2 v1.4s, v17.8h, v11.8h\n"
- "ldr d11, [x20, x17]\n"
- "smlal2 v14.4s, v15.8h, v25.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal v21.4s, v18.4h, v7.4h\n"
- "smlal v20.4s, v26.4h, v3.4h\n"
+ "smlal v22.4s, v1.4h, v28.4h\n"
+ "smlal v9.4s, v27.4h, v23.4h\n"
+ "smlal2 v31.4s, v27.8h, v23.8h\n"
+ "ldr d27, [x27, x4]\n"
+ "ldr d23, [x26, x4]\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
+ "smlal2 v5.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x25, x4]\n"
+ "smlal2 v8.4s, v1.8h, v28.8h\n"
+ "ldr d1, [x24, x4]\n"
+ "smlal v20.4s, v15.4h, v13.4h\n"
+ "smlal2 v21.4s, v15.8h, v13.8h\n"
+ "ldr d13, [x23, x4]\n"
+ "smlal v18.4s, v26.4h, v28.4h\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "ldr x21, [x15, #0xc0]\n"
- "smlal v16.4s, v28.4h, v24.4h\n"
- "smlal2 v2.4s, v12.8h, v7.8h\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal v8.4s, v10.4h, v3.4h\n"
- "smlal2 v4.4s, v18.8h, v7.8h\n"
- "ldr d18, [x21, x17]\n"
- "ushll v11.8h, v11.8b, #0x0\n"
- "smlal2 v1.4s, v26.8h, v3.8h\n"
- "smlal2 v14.4s, v28.8h, v24.8h\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "add x14, x14, #0x48\n"
- "smlal v21.4s, v12.4h, v24.4h\n"
- "smlal v20.4s, v23.4h, v9.4h\n"
- "add x17, x17, #0x8\n"
- "subs x8, x8, #0x1\n"
- "smlal v16.4s, v19.4h, v9.4h\n"
- "smlal2 v2.4s, v10.8h, v3.8h\n"
- "add x13, x13, #0x20\n"
- "add x12, x12, #0x20\n"
- "smlal v8.4s, v17.4h, v9.4h\n"
- "smlal2 v4.4s, v12.8h, v24.8h\n"
- "sqrdmulh v8.4s, v8.4s, v31.4s\n"
- "smlal2 v1.4s, v23.8h, v9.8h\n"
- "smlal2 v14.4s, v19.8h, v9.8h\n"
- "and v10.16b, v8.16b, v0.16b\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v28.4h, v7.4h\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "smlal v16.4s, v11.4h, v3.4h\n"
- "smlal2 v2.4s, v17.8h, v9.8h\n"
- "sqrdmulh v2.4s, v2.4s, v29.4s\n"
- "smlal2 v4.4s, v27.8h, v9.8h\n"
- "smlal2 v1.4s, v28.8h, v7.8h\n"
- "and v12.16b, v2.16b, v25.16b\n"
- "smlal2 v14.4s, v11.8h, v3.8h\n"
- "smlal v21.4s, v15.4h, v30.4h\n"
- "sqrdmulh v21.4s, v21.4s, v31.4s\n"
- "smlal v20.4s, v11.4h, v30.4h\n"
- "smlal v16.4s, v18.4h, v30.4h\n"
- "sqrdmulh v20.4s, v20.4s, v31.4s\n"
- "smlal2 v4.4s, v15.8h, v30.8h\n"
- "smlal2 v1.4s, v11.8h, v30.8h\n"
- "sqrdmulh v16.4s, v16.4s, v31.4s\n"
- "smlal2 v14.4s, v18.8h, v30.8h\n"
- "sqadd v8.4s, v8.4s, v10.4s\n"
- "sshr v12.4s, v12.4s, #0x1f\n"
- "and v27.16b, v21.16b, v0.16b\n"
- "sqrdmulh v4.4s, v4.4s, v29.4s\n"
- "and v24.16b, v20.16b, v0.16b\n"
- "sqrdmulh v1.4s, v1.4s, v29.4s\n"
- "and v19.16b, v16.16b, v0.16b\n"
- "sqrdmulh v14.4s, v14.4s, v29.4s\n"
- "sqadd v2.4s, v2.4s, v12.4s\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v18.16b, v4.16b, v25.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
- "and v17.16b, v1.16b, v25.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "and v15.16b, v14.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v27.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v24.4s\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "smlal v22.4s, v24.4h, v17.4h\n"
+ "smlal2 v5.4s, v26.8h, v28.8h\n"
+ "ldr d28, [x22, x4]\n"
+ "smlal2 v8.4s, v24.8h, v17.8h\n"
+ "ushll v1.8h, v1.8b, #0x0\n"
+ "smlal v9.4s, v27.4h, v17.4h\n"
+ "ushll v13.8h, v13.8b, #0x0\n"
+ "smlal2 v31.4s, v27.8h, v17.8h\n"
+ "ldr d27, [x21, x4]\n"
+ "smlal v20.4s, v23.4h, v10.4h\n"
+ "smlal v18.4s, v7.4h, v11.4h\n"
+ "add x4, x4, #0x8\n"
+ "smlal v22.4s, v0.4h, v10.4h\n"
+ "smlal2 v21.4s, v23.8h, v10.8h\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "smlal2 v5.4s, v7.8h, v11.8h\n"
+ "smlal2 v8.4s, v0.8h, v10.8h\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
+ "smlal v9.4s, v24.4h, v11.4h\n"
+ "smlal2 v31.4s, v24.8h, v11.8h\n"
+ "smlal v20.4s, v1.4h, v2.4h\n"
+ "smlal v18.4s, v13.4h, v2.4h\n"
+ "smlal v22.4s, v15.4h, v2.4h\n"
+ "smlal2 v21.4s, v1.8h, v2.8h\n"
+ "smlal2 v5.4s, v13.8h, v2.8h\n"
+ "smlal2 v8.4s, v15.8h, v2.8h\n"
+ "smlal v9.4s, v25.4h, v2.4h\n"
+ "smlal2 v31.4s, v25.8h, v2.8h\n"
+ "smlal v20.4s, v7.4h, v17.4h\n"
+ "smlal v18.4s, v28.4h, v10.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v30.4s\n"
+ "smlal2 v21.4s, v7.8h, v17.8h\n"
+ "smlal2 v5.4s, v28.8h, v10.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v19.4s\n"
+ "smlal v9.4s, v26.4h, v3.4h\n"
+ "and v17.16b, v22.16b, v4.16b\n"
+ "smlal2 v31.4s, v26.8h, v3.8h\n"
+ "smlal v20.4s, v28.4h, v3.4h\n"
+ "smlal v18.4s, v27.4h, v3.4h\n"
+ "and v26.16b, v8.16b, v29.16b\n"
+ "smlal2 v21.4s, v28.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v3.8h\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v19.4s\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "srshl v8.4s, v8.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v0.4s\n"
- "sqadd v4.4s, v4.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v0.4s\n"
- "sqadd v1.4s, v1.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v0.4s\n"
- "sqadd v14.4s, v14.4s, v15.4s\n"
- "srshl v2.4s, v2.4s, v25.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v4.4s, v4.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v1.4s, v1.4s, v25.4s\n"
+ "sqrdmulh v9.4s, v9.4s, v30.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v19.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v30.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v30.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "and v25.16b, v9.16b, v4.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v19.4s\n"
+ "and v0.16b, v20.16b, v4.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v19.4s\n"
+ "and v1.16b, v18.16b, v4.16b\n"
+ "sqadd v8.4s, v8.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "and v11.16b, v31.16b, v29.16b\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "and v17.16b, v21.16b, v29.16b\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "and v30.16b, v5.16b, v29.16b\n"
+ "sqadd v9.4s, v9.4s, v25.4s\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v0.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v1.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "srshl v22.4s, v22.4s, v4.4s\n"
+ "srshl v9.4s, v9.4s, v4.4s\n"
+ "sqadd v31.4s, v31.4s, v11.4s\n"
+ "srshl v20.4s, v20.4s, v4.4s\n"
+ "sqadd v21.4s, v21.4s, v17.4s\n"
+ "srshl v18.4s, v18.4s, v4.4s\n"
+ "sqadd v5.4s, v5.4s, v30.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v31.4s, v31.4s, v29.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v21.4s, v21.4s, v29.4s\n"
"sqxtn v20.4h, v20.4s\n"
- "srshl v14.4s, v14.4s, v25.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "sqxtn2 v8.8h, v2.4s\n"
- "sqxtn2 v21.8h, v4.4s\n"
- "sqxtn2 v20.8h, v1.4s\n"
- "sqxtn2 v16.8h, v14.4s\n"
- "sqadd v8.8h, v8.8h, v22.8h\n"
- "sqadd v21.8h, v21.8h, v22.8h\n"
- "sqadd v20.8h, v20.8h, v22.8h\n"
- "sqadd v16.8h, v16.8h, v22.8h\n"
- "smax v8.8h, v8.8h, v13.8h\n"
- "smax v21.8h, v21.8h, v13.8h\n"
- "smax v20.8h, v20.8h, v13.8h\n"
- "smax v16.8h, v16.8h, v13.8h\n"
- "smin v8.8h, v8.8h, v5.8h\n"
- "smin v21.8h, v21.8h, v5.8h\n"
- "smin v20.8h, v20.8h, v5.8h\n"
- "smin v16.8h, v16.8h, v5.8h\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "str d8, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "sqxtn2 v22.8h, v8.4s\n"
+ "sqxtn2 v9.8h, v31.4s\n"
+ "sqxtn2 v20.8h, v21.4s\n"
+ "sqxtn2 v18.8h, v5.4s\n"
+ "sqadd v22.8h, v22.8h, v12.8h\n"
+ "sqadd v9.8h, v9.8h, v12.8h\n"
+ "sqadd v20.8h, v20.8h, v12.8h\n"
+ "sqadd v18.8h, v18.8h, v12.8h\n"
+ "smax v22.8h, v22.8h, v14.8h\n"
+ "smax v9.8h, v9.8h, v14.8h\n"
+ "smax v20.8h, v20.8h, v14.8h\n"
+ "smax v18.8h, v18.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v6.8h\n"
+ "smin v9.8h, v9.8h, v6.8h\n"
+ "smin v20.8h, v20.8h, v6.8h\n"
+ "smin v18.8h, v18.8h, v6.8h\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d20, [x9, x16]\n"
- "str d16, [x28, x16]\n"
- "ldr q8, [x20, #0x0]\n"
- "ldr q2, [x20, #0x10]\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "str d22, [x15, x5]\n"
+ "str d9, [x14, x5]\n"
+ "str d20, [x13, x5]\n"
+ "str d18, [x12, x5]\n"
+ "add x5, x5, #0x8\n"
+ "ldr q22, [x20, #0x0]\n"
+ "ldr q8, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d12, [x14, #0x0]\n"
- "ldr d11, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d25, [x14, #0x10]\n"
- "ldr d24, [x14, #0x18]\n"
+ "ldr d15, [x7, #0x0]\n"
+ "ldr d13, [x7, #0x8]\n"
+ "ldr d28, [x7, #0x10]\n"
+ "ldr d11, [x7, #0x18]\n"
+ "ldr d23, [x7, #0x20]\n"
+ "ldr d17, [x7, #0x28]\n"
+ "mov v9.16b, v22.16b\n"
+ "mov v31.16b, v8.16b\n"
+ "ldr d10, [x7, #0x30]\n"
+ "ldr d2, [x7, #0x38]\n"
+ "mov v20.16b, v22.16b\n"
"mov v21.16b, v8.16b\n"
- "mov v4.16b, v2.16b\n"
- "ldr d23, [x14, #0x20]\n"
- "ldr d7, [x14, #0x28]\n"
- "mov v20.16b, v8.16b\n"
- "mov v1.16b, v2.16b\n"
- "ldr d3, [x14, #0x30]\n"
- "ldr d9, [x14, #0x38]\n"
- "mov v16.16b, v8.16b\n"
- "mov v14.16b, v2.16b\n"
- "ldr d30, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v11.8h, v11.8b, v6.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v24.8h, v24.8b, v6.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d26, [x27, x17]\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "usubl v7.8h, v7.8b, v6.8b\n"
- "ldr d18, [x26, x17]\n"
- "ldr d10, [x25, x17]\n"
- "usubl v3.8h, v3.8b, v6.8b\n"
- "usubl v9.8h, v9.8b, v6.8b\n"
- "ldr d27, [x24, x17]\n"
- "ldr d17, [x23, x17]\n"
- "usubl v30.8h, v30.8b, v6.8b\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "ldr d19, [x22, x17]\n"
- "ldr d15, [x21, x17]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "ldr d28, [x20, x17]\n"
+ "ldr d3, [x7, #0x40]\n"
+ "ldp x27, x26, [x6, #0x0]\n"
+ "mov v18.16b, v22.16b\n"
+ "mov v5.16b, v8.16b\n"
+ "usubl v15.8h, v15.8b, v16.8b\n"
+ "usubl v13.8h, v13.8b, v16.8b\n"
+ "usubl v28.8h, v28.8b, v16.8b\n"
+ "usubl v11.8h, v11.8b, v16.8b\n"
+ "ldp x25, x24, [x6, #0x10]\n"
+ "usubl v23.8h, v23.8b, v16.8b\n"
+ "usubl v17.8h, v17.8b, v16.8b\n"
+ "usubl v10.8h, v10.8b, v16.8b\n"
+ "usubl v2.8h, v2.8b, v16.8b\n"
+ "ldp x23, x22, [x6, #0x20]\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
+ "ldp x21, x20, [x6, #0x30]\n"
+ "ldr d29, [x27, x4]\n"
+ "ldr d25, [x26, x4]\n"
+ "ldr d0, [x25, x4]\n"
+ "ldr d7, [x24, x4]\n"
+ "ldr d24, [x23, x4]\n"
+ "ldr d27, [x22, x4]\n"
+ "ldr d26, [x21, x4]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "ldr d1, [x20, x4]\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "ushll v17.8h, v17.8b, #0x0\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "ushll v28.8h, v28.8b, #0x0\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "ushll v1.8h, v1.8b, #0x0\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q0, [x13, #0x0]\n"
- "ldr q31, [x12, #0x0]\n"
- "smlal v8.4s, v26.4h, v30.4h\n"
- "smlal2 v2.4s, v26.8h, v30.8h\n"
- "ldr q29, [x13, #0x10]\n"
- "ldr x21, [x15, #0x58]\n"
- "smlal v8.4s, v18.4h, v12.4h\n"
- "smlal v21.4s, v26.4h, v3.4h\n"
- "ldr x20, [x15, #0x78]\n"
- "ldr x25, [x15, #0x60]\n"
- "smlal v20.4s, v26.4h, v25.4h\n"
- "smlal v16.4s, v26.4h, v12.4h\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal2 v2.4s, v18.8h, v12.8h\n"
- "ldr d18, [x21, x17]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "smlal v8.4s, v10.4h, v11.4h\n"
- "smlal2 v4.4s, v26.8h, v3.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v1.4s, v26.8h, v25.8h\n"
- "smlal2 v14.4s, v26.8h, v12.8h\n"
- "ldr d26, [x20, x17]\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "smlal v21.4s, v27.4h, v11.4h\n"
- "smlal v20.4s, v18.4h, v24.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal v16.4s, v26.4h, v23.4h\n"
- "smlal2 v2.4s, v10.8h, v11.8h\n"
- "ldr d10, [x25, x17]\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal v8.4s, v19.4h, v24.4h\n"
- "smlal2 v4.4s, v27.8h, v11.8h\n"
- "ldr d27, [x24, x17]\n"
+ "ldr q30, [x8, #0x0]\n"
+ "ldr q4, [x17, #0x0]\n"
+ "smlal v22.4s, v29.4h, v3.4h\n"
+ "smlal2 v8.4s, v29.8h, v3.8h\n"
+ "ldr q19, [x8, #0x10]\n"
+ "ldr x20, [x6, #0x58]\n"
+ "smlal v9.4s, v29.4h, v10.4h\n"
+ "smlal v20.4s, v29.4h, v28.4h\n"
+ "ldr x23, [x6, #0x78]\n"
+ "ldr x22, [x6, #0x60]\n"
+ "smlal v18.4s, v29.4h, v15.4h\n"
+ "smlal2 v31.4s, v29.8h, v10.8h\n"
+ "ldr x21, [x6, #0x80]\n"
+ "smlal2 v21.4s, v29.8h, v28.8h\n"
+ "smlal2 v5.4s, v29.8h, v15.8h\n"
+ "ldr q29, [x17, #0x10]\n"
+ "smlal v22.4s, v25.4h, v15.4h\n"
+ "smlal2 v8.4s, v25.8h, v15.8h\n"
+ "ldr d25, [x20, x4]\n"
+ "ldr x20, [x6, #0x68]\n"
+ "ldr x11, [x6, #0x88]\n"
+ "smlal v9.4s, v7.4h, v13.4h\n"
+ "ldr x10, [x6, #0x40]\n"
+ "tst x3, #0x7\n"
+ "smlal2 v31.4s, v7.8h, v13.8h\n"
+ "ldr d7, [x23, x4]\n"
+ "ldr x9, [x6, #0x70]\n"
+ "add x8, x8, #0x20\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x28, [x6, #0x98]\n"
+ "ldr x27, [x6, #0x50]\n"
+ "add x17, x17, #0x20\n"
+ "smlal v22.4s, v0.4h, v13.4h\n"
+ "smlal2 v8.4s, v0.8h, v13.8h\n"
+ "ldr d0, [x22, x4]\n"
+ "ldr x26, [x6, #0x48]\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "smlal v9.4s, v24.4h, v28.4h\n"
+ "ldr x25, [x6, #0x90]\n"
+ "ldr x24, [x6, #0xa8]\n"
+ "smlal v20.4s, v25.4h, v11.4h\n"
+ "smlal2 v21.4s, v25.8h, v11.8h\n"
+ "ldr d25, [x21, x4]\n"
+ "ldr x23, [x6, #0xa0]\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "smlal2 v31.4s, v24.8h, v28.8h\n"
+ "ldr d24, [x20, x4]\n"
+ "ldr x22, [x6, #0xb0]\n"
+ "smlal v18.4s, v7.4h, v23.4h\n"
+ "smlal v22.4s, v27.4h, v11.4h\n"
+ "ldr x21, [x6, #0xb8]\n"
+ "ldr x20, [x6, #0xc0]\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "smlal2 v5.4s, v7.8h, v23.8h\n"
+ "ldr d7, [x11, x4]\n"
+ "smlal2 v8.4s, v27.8h, v11.8h\n"
+ "ldr d27, [x10, x4]\n"
+ "smlal v20.4s, v0.4h, v15.4h\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
+ "smlal2 v21.4s, v0.8h, v15.8h\n"
+ "smlal v9.4s, v1.4h, v15.4h\n"
+ "smlal2 v31.4s, v1.8h, v15.8h\n"
+ "ldr d15, [x9, x4]\n"
+ "smlal v18.4s, v25.4h, v13.4h\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "smlal v22.4s, v26.4h, v23.4h\n"
+ "smlal2 v5.4s, v25.8h, v13.8h\n"
+ "smlal2 v8.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x28, x4]\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "smlal2 v1.4s, v18.8h, v24.8h\n"
- "ldr d18, [x23, x17]\n"
- "smlal2 v14.4s, v26.8h, v23.8h\n"
- "ldr d26, [x22, x17]\n"
- "ldr x24, [x15, #0x98]\n"
- "smlal v21.4s, v17.4h, v25.4h\n"
- "smlal v20.4s, v10.4h, v12.4h\n"
- "ldr x23, [x15, #0x50]\n"
- "smlal v16.4s, v27.4h, v11.4h\n"
- "smlal2 v2.4s, v19.8h, v24.8h\n"
- "ldr d19, [x21, x17]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "smlal v8.4s, v15.4h, v23.4h\n"
- "smlal2 v4.4s, v17.8h, v25.8h\n"
- "ldr d17, [x20, x17]\n"
- "ldr x22, [x15, #0x48]\n"
- "smlal2 v1.4s, v10.8h, v12.8h\n"
- "smlal2 v14.4s, v27.8h, v11.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v28.4h, v12.4h\n"
- "smlal v20.4s, v18.4h, v23.4h\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "smlal v16.4s, v26.4h, v7.4h\n"
- "smlal2 v2.4s, v15.8h, v23.8h\n"
- "ldr d15, [x24, x17]\n"
- "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v20.4s, v24.4h, v23.4h\n"
"ushll v15.8h, v15.8b, #0x0\n"
- "smlal v8.4s, v28.4h, v25.4h\n"
- "smlal2 v4.4s, v28.8h, v12.8h\n"
- "ldr d12, [x23, x17]\n"
- "ushll v12.8h, v12.8b, #0x0\n"
- "smlal2 v1.4s, v18.8h, v23.8h\n"
- "ldr d18, [x22, x17]\n"
- "smlal2 v14.4s, v26.8h, v7.8h\n"
- "ldr d26, [x21, x17]\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v21.4s, v19.4h, v23.4h\n"
- "smlal v20.4s, v17.4h, v11.4h\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v16.4s, v15.4h, v25.4h\n"
- "smlal2 v2.4s, v28.8h, v25.8h\n"
- "ldr d28, [x20, x17]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
+ "smlal2 v21.4s, v24.8h, v23.8h\n"
+ "ldr d24, [x27, x4]\n"
+ "smlal v18.4s, v7.4h, v17.4h\n"
"ushll v26.8h, v26.8b, #0x0\n"
- "smlal v8.4s, v12.4h, v7.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal2 v4.4s, v19.8h, v23.8h\n"
- "ldr d23, [x22, x17]\n"
- "ldr d19, [x21, x17]\n"
- "smlal2 v1.4s, v17.8h, v11.8h\n"
- "ldr d11, [x20, x17]\n"
- "smlal2 v14.4s, v15.8h, v25.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal v21.4s, v18.4h, v7.4h\n"
- "smlal v20.4s, v26.4h, v3.4h\n"
+ "smlal v22.4s, v1.4h, v28.4h\n"
+ "smlal v9.4s, v27.4h, v23.4h\n"
+ "smlal2 v31.4s, v27.8h, v23.8h\n"
+ "ldr d27, [x26, x4]\n"
+ "ldr d23, [x25, x4]\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
+ "smlal2 v5.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x24, x4]\n"
+ "smlal2 v8.4s, v1.8h, v28.8h\n"
+ "ldr d1, [x23, x4]\n"
+ "smlal v20.4s, v15.4h, v13.4h\n"
+ "smlal2 v21.4s, v15.8h, v13.8h\n"
+ "ldr d13, [x22, x4]\n"
+ "smlal v18.4s, v26.4h, v28.4h\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
"ushll v23.8h, v23.8b, #0x0\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v16.4s, v28.4h, v24.4h\n"
- "smlal2 v2.4s, v12.8h, v7.8h\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "tst x7, #0x7\n"
- "smlal v8.4s, v10.4h, v3.4h\n"
- "smlal2 v4.4s, v18.8h, v7.8h\n"
- "ldr d18, [x20, x17]\n"
- "ushll v11.8h, v11.8b, #0x0\n"
- "smlal2 v1.4s, v26.8h, v3.8h\n"
- "smlal2 v14.4s, v28.8h, v24.8h\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "add x17, x17, #0x8\n"
- "smlal v21.4s, v12.4h, v24.4h\n"
- "smlal v20.4s, v23.4h, v9.4h\n"
- "add x13, x13, #0x20\n"
- "add x12, x12, #0x20\n"
- "smlal v16.4s, v19.4h, v9.4h\n"
- "smlal2 v2.4s, v10.8h, v3.8h\n"
- "smlal v8.4s, v17.4h, v9.4h\n"
- "smlal2 v4.4s, v12.8h, v24.8h\n"
- "sqrdmulh v8.4s, v8.4s, v0.4s\n"
- "smlal2 v1.4s, v23.8h, v9.8h\n"
- "smlal2 v14.4s, v19.8h, v9.8h\n"
- "and v23.16b, v8.16b, v31.16b\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v28.4h, v7.4h\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "smlal v16.4s, v11.4h, v3.4h\n"
- "smlal2 v2.4s, v17.8h, v9.8h\n"
- "sqrdmulh v2.4s, v2.4s, v29.4s\n"
- "smlal2 v4.4s, v27.8h, v9.8h\n"
- "smlal2 v1.4s, v28.8h, v7.8h\n"
- "and v7.16b, v2.16b, v25.16b\n"
- "smlal2 v14.4s, v11.8h, v3.8h\n"
- "smlal v21.4s, v15.4h, v30.4h\n"
- "sqrdmulh v21.4s, v21.4s, v0.4s\n"
- "smlal v20.4s, v11.4h, v30.4h\n"
- "smlal v16.4s, v18.4h, v30.4h\n"
- "sqrdmulh v20.4s, v20.4s, v0.4s\n"
- "smlal2 v4.4s, v15.8h, v30.8h\n"
- "smlal2 v1.4s, v11.8h, v30.8h\n"
- "sqrdmulh v16.4s, v16.4s, v0.4s\n"
- "smlal2 v14.4s, v18.8h, v30.8h\n"
- "sqadd v8.4s, v8.4s, v23.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "and v23.16b, v21.16b, v31.16b\n"
- "sqrdmulh v4.4s, v4.4s, v29.4s\n"
- "and v24.16b, v20.16b, v31.16b\n"
- "sqrdmulh v1.4s, v1.4s, v29.4s\n"
- "and v19.16b, v16.16b, v31.16b\n"
- "sqrdmulh v14.4s, v14.4s, v29.4s\n"
- "sqadd v2.4s, v2.4s, v7.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v18.16b, v4.16b, v25.16b\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
+ "smlal v22.4s, v24.4h, v17.4h\n"
+ "smlal2 v5.4s, v26.8h, v28.8h\n"
+ "ldr d28, [x21, x4]\n"
+ "smlal2 v8.4s, v24.8h, v17.8h\n"
+ "ushll v1.8h, v1.8b, #0x0\n"
+ "smlal v9.4s, v27.4h, v17.4h\n"
+ "ushll v13.8h, v13.8b, #0x0\n"
+ "smlal2 v31.4s, v27.8h, v17.8h\n"
+ "ldr d27, [x20, x4]\n"
+ "smlal v20.4s, v23.4h, v10.4h\n"
+ "smlal v18.4s, v7.4h, v11.4h\n"
+ "add x4, x4, #0x8\n"
+ "smlal v22.4s, v0.4h, v10.4h\n"
+ "smlal2 v21.4s, v23.8h, v10.8h\n"
+ "ushll v28.8h, v28.8b, #0x0\n"
+ "smlal2 v5.4s, v7.8h, v11.8h\n"
+ "smlal2 v8.4s, v0.8h, v10.8h\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
+ "smlal v9.4s, v24.4h, v11.4h\n"
+ "smlal2 v31.4s, v24.8h, v11.8h\n"
+ "smlal v20.4s, v1.4h, v2.4h\n"
+ "smlal v18.4s, v13.4h, v2.4h\n"
+ "smlal v22.4s, v15.4h, v2.4h\n"
+ "smlal2 v21.4s, v1.8h, v2.8h\n"
+ "smlal2 v5.4s, v13.8h, v2.8h\n"
+ "smlal2 v8.4s, v15.8h, v2.8h\n"
+ "smlal v9.4s, v25.4h, v2.4h\n"
+ "smlal2 v31.4s, v25.8h, v2.8h\n"
+ "smlal v20.4s, v7.4h, v17.4h\n"
+ "smlal v18.4s, v28.4h, v10.4h\n"
+ "sqrdmulh v22.4s, v22.4s, v30.4s\n"
+ "smlal2 v21.4s, v7.8h, v17.8h\n"
+ "smlal2 v5.4s, v28.8h, v10.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v19.4s\n"
+ "smlal v9.4s, v26.4h, v3.4h\n"
+ "and v17.16b, v22.16b, v4.16b\n"
+ "smlal2 v31.4s, v26.8h, v3.8h\n"
+ "smlal v20.4s, v28.4h, v3.4h\n"
+ "smlal v18.4s, v27.4h, v3.4h\n"
+ "and v15.16b, v8.16b, v29.16b\n"
+ "smlal2 v21.4s, v28.8h, v3.8h\n"
+ "smlal2 v5.4s, v27.8h, v3.8h\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v9.4s, v9.4s, v30.4s\n"
+ "sshr v15.4s, v15.4s, #0x1f\n"
+ "sqrdmulh v31.4s, v31.4s, v19.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v30.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v30.4s\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "and v25.16b, v9.16b, v4.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v19.4s\n"
+ "and v24.16b, v20.16b, v4.16b\n"
+ "sqrdmulh v5.4s, v5.4s, v19.4s\n"
+ "and v23.16b, v18.16b, v4.16b\n"
+ "sqadd v8.4s, v8.4s, v15.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "and v30.16b, v31.16b, v29.16b\n"
"sshr v24.4s, v24.4s, #0x1f\n"
- "and v17.16b, v1.16b, v25.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "and v15.16b, v14.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v23.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v17.16b, v21.16b, v29.16b\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "and v7.16b, v5.16b, v29.16b\n"
+ "sqadd v9.4s, v9.4s, v25.4s\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v24.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v19.4s\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "srshl v8.4s, v8.4s, v31.4s\n"
- "srshl v21.4s, v21.4s, v31.4s\n"
- "sqadd v4.4s, v4.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v31.4s\n"
- "sqadd v1.4s, v1.4s, v17.4s\n"
- "srshl v16.4s, v16.4s, v31.4s\n"
- "sqadd v14.4s, v14.4s, v15.4s\n"
- "srshl v2.4s, v2.4s, v25.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v4.4s, v4.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v1.4s, v1.4s, v25.4s\n"
+ "sqadd v18.4s, v18.4s, v23.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "srshl v22.4s, v22.4s, v4.4s\n"
+ "srshl v9.4s, v9.4s, v4.4s\n"
+ "sqadd v31.4s, v31.4s, v30.4s\n"
+ "srshl v20.4s, v20.4s, v4.4s\n"
+ "sqadd v21.4s, v21.4s, v17.4s\n"
+ "srshl v18.4s, v18.4s, v4.4s\n"
+ "sqadd v5.4s, v5.4s, v7.4s\n"
+ "srshl v8.4s, v8.4s, v29.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v31.4s, v31.4s, v29.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v21.4s, v21.4s, v29.4s\n"
"sqxtn v20.4h, v20.4s\n"
- "srshl v14.4s, v14.4s, v25.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "sqxtn2 v8.8h, v2.4s\n"
- "sqxtn2 v21.8h, v4.4s\n"
- "sqxtn2 v20.8h, v1.4s\n"
- "sqxtn2 v16.8h, v14.4s\n"
- "sqadd v8.8h, v8.8h, v22.8h\n"
- "sqadd v21.8h, v21.8h, v22.8h\n"
- "sqadd v20.8h, v20.8h, v22.8h\n"
- "sqadd v16.8h, v16.8h, v22.8h\n"
- "smax v8.8h, v8.8h, v13.8h\n"
- "smax v21.8h, v21.8h, v13.8h\n"
- "smax v20.8h, v20.8h, v13.8h\n"
- "smax v16.8h, v16.8h, v13.8h\n"
- "smin v8.8h, v8.8h, v5.8h\n"
- "smin v21.8h, v21.8h, v5.8h\n"
- "smin v20.8h, v20.8h, v5.8h\n"
- "smin v16.8h, v16.8h, v5.8h\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "str d8, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "srshl v5.4s, v5.4s, v29.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "sqxtn2 v22.8h, v8.4s\n"
+ "sqxtn2 v9.8h, v31.4s\n"
+ "sqxtn2 v20.8h, v21.4s\n"
+ "sqxtn2 v18.8h, v5.4s\n"
+ "sqadd v22.8h, v22.8h, v12.8h\n"
+ "sqadd v9.8h, v9.8h, v12.8h\n"
+ "sqadd v20.8h, v20.8h, v12.8h\n"
+ "sqadd v18.8h, v18.8h, v12.8h\n"
+ "smax v22.8h, v22.8h, v14.8h\n"
+ "smax v9.8h, v9.8h, v14.8h\n"
+ "smax v20.8h, v20.8h, v14.8h\n"
+ "smax v18.8h, v18.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v6.8h\n"
+ "smin v9.8h, v9.8h, v6.8h\n"
+ "smin v20.8h, v20.8h, v6.8h\n"
+ "smin v18.8h, v18.8h, v6.8h\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "str d20, [x9, x16]\n"
- "str d16, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "str d22, [x15, x5]\n"
+ "str d9, [x14, x5]\n"
+ "str d20, [x13, x5]\n"
+ "str d18, [x12, x5]\n"
+ "add x5, x5, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x7, x7, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v8.4s }, [x20], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v2.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v2.s }[2], [x20]\n"
+ "tbz x3, #2, 5f\n"
+ "ld1 { v22.4s }, [x20], #0x10\n"
+ "tbz x3, #1, 4f\n"
+ "ld1 { v8.d }[0], [x20], #0x8\n"
+ "tbz x3, #0, 7f\n"
+ "ld1 { v8.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v2.s }[0], [x20]\n"
+ "tbz x3, #0, 7f\n"
+ "ld1 { v8.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v8.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v8.s }[2], [x20]\n"
+ "tbz x3, #1, 6f\n"
+ "ld1 { v22.d }[0], [x20], #0x8\n"
+ "tbz x3, #0, 7f\n"
+ "ld1 { v22.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v8.s }[0], [x20]\n"
+ "tbz x3, #0, 7f\n"
+ "ld1 { v22.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d12, [x14, #0x0]\n"
- "ldr d11, [x14, #0x8]\n"
+ "ldr d15, [x7, #0x0]\n"
+ "ldr d13, [x7, #0x8]\n"
+ "mov v9.16b, v22.16b\n"
+ "mov v31.16b, v8.16b\n"
+ "ldr d28, [x7, #0x10]\n"
+ "ldr d11, [x7, #0x18]\n"
+ "mov v20.16b, v22.16b\n"
"mov v21.16b, v8.16b\n"
- "mov v4.16b, v2.16b\n"
- "ldr d25, [x14, #0x10]\n"
- "ldr d24, [x14, #0x18]\n"
- "mov v20.16b, v8.16b\n"
- "mov v1.16b, v2.16b\n"
- "ldr d23, [x14, #0x20]\n"
- "ldr d7, [x14, #0x28]\n"
- "mov v16.16b, v8.16b\n"
- "mov v14.16b, v2.16b\n"
- "ldr d3, [x14, #0x30]\n"
- "ldr d9, [x14, #0x38]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v11.8h, v11.8b, v6.8b\n"
- "ldr d30, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v24.8h, v24.8b, v6.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "usubl v7.8h, v7.8b, v6.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "usubl v3.8h, v3.8b, v6.8b\n"
- "usubl v9.8h, v9.8b, v6.8b\n"
- "usubl v30.8h, v30.8b, v6.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v26.s }[0], [x27], #0x4\n"
- "ld1 { v18.s }[0], [x26], #0x4\n"
- "ld1 { v10.s }[0], [x25], #0x4\n"
- "ld1 { v27.s }[0], [x24], #0x4\n"
- "ld1 { v17.s }[0], [x23], #0x4\n"
- "ld1 { v19.s }[0], [x22], #0x4\n"
- "ld1 { v15.s }[0], [x21], #0x4\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v26.h }[2], [x27], #0x2\n"
- "ld1 { v18.h }[2], [x26], #0x2\n"
- "ld1 { v10.h }[2], [x25], #0x2\n"
- "ld1 { v27.h }[2], [x24], #0x2\n"
- "ld1 { v17.h }[2], [x23], #0x2\n"
- "ld1 { v19.h }[2], [x22], #0x2\n"
- "ld1 { v15.h }[2], [x21], #0x2\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v26.b }[6], [x27]\n"
- "ld1 { v18.b }[6], [x26]\n"
- "ld1 { v10.b }[6], [x25]\n"
- "ld1 { v27.b }[6], [x24]\n"
- "ld1 { v17.b }[6], [x23]\n"
- "ld1 { v19.b }[6], [x22]\n"
- "ld1 { v15.b }[6], [x21]\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ldr d23, [x7, #0x20]\n"
+ "ldr d17, [x7, #0x28]\n"
+ "mov v18.16b, v22.16b\n"
+ "mov v5.16b, v8.16b\n"
+ "ldr d10, [x7, #0x30]\n"
+ "ldr d2, [x7, #0x38]\n"
+ "usubl v15.8h, v15.8b, v16.8b\n"
+ "usubl v13.8h, v13.8b, v16.8b\n"
+ "ldr d3, [x7, #0x40]\n"
+ "ldp x27, x26, [x6, #0x0]\n"
+ "usubl v28.8h, v28.8b, v16.8b\n"
+ "usubl v11.8h, v11.8b, v16.8b\n"
+ "usubl v23.8h, v23.8b, v16.8b\n"
+ "usubl v17.8h, v17.8b, v16.8b\n"
+ "usubl v10.8h, v10.8b, v16.8b\n"
+ "usubl v2.8h, v2.8b, v16.8b\n"
+ "ldp x25, x24, [x6, #0x10]\n"
+ "usubl v3.8h, v3.8b, v16.8b\n"
+ "add x27, x27, x4\n"
+ "add x26, x26, x4\n"
+ "ldp x23, x22, [x6, #0x20]\n"
+ "add x25, x25, x4\n"
+ "add x24, x24, x4\n"
+ "ldp x21, x20, [x6, #0x30]\n"
+ "add x23, x23, x4\n"
+ "add x22, x22, x4\n"
+ "add x21, x21, x4\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 9f\n"
+ "ld1 { v29.s }[0], [x27], #0x4\n"
+ "ld1 { v25.s }[0], [x26], #0x4\n"
+ "ld1 { v0.s }[0], [x25], #0x4\n"
+ "ld1 { v7.s }[0], [x24], #0x4\n"
+ "ld1 { v24.s }[0], [x23], #0x4\n"
+ "ld1 { v27.s }[0], [x22], #0x4\n"
+ "ld1 { v26.s }[0], [x21], #0x4\n"
+ "ld1 { v1.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 8f\n"
+ "ld1 { v29.h }[2], [x27], #0x2\n"
+ "ld1 { v25.h }[2], [x26], #0x2\n"
+ "ld1 { v0.h }[2], [x25], #0x2\n"
+ "ld1 { v7.h }[2], [x24], #0x2\n"
+ "ld1 { v24.h }[2], [x23], #0x2\n"
+ "ld1 { v27.h }[2], [x22], #0x2\n"
+ "ld1 { v26.h }[2], [x21], #0x2\n"
+ "ld1 { v1.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 11f\n"
+ "ld1 { v29.b }[6], [x27]\n"
+ "ld1 { v25.b }[6], [x26]\n"
+ "ld1 { v0.b }[6], [x25]\n"
+ "ld1 { v7.b }[6], [x24]\n"
+ "ld1 { v24.b }[6], [x23]\n"
+ "ld1 { v27.b }[6], [x22]\n"
+ "ld1 { v26.b }[6], [x21]\n"
+ "ld1 { v1.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v26.b }[4], [x27]\n"
- "ld1 { v18.b }[4], [x26]\n"
- "ld1 { v10.b }[4], [x25]\n"
- "ld1 { v27.b }[4], [x24]\n"
- "ld1 { v17.b }[4], [x23]\n"
- "ld1 { v19.b }[4], [x22]\n"
- "ld1 { v15.b }[4], [x21]\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x3, #0, 11f\n"
+ "ld1 { v29.b }[4], [x27]\n"
+ "ld1 { v25.b }[4], [x26]\n"
+ "ld1 { v0.b }[4], [x25]\n"
+ "ld1 { v7.b }[4], [x24]\n"
+ "ld1 { v24.b }[4], [x23]\n"
+ "ld1 { v27.b }[4], [x22]\n"
+ "ld1 { v26.b }[4], [x21]\n"
+ "ld1 { v1.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v26.h }[0], [x27], #0x2\n"
- "ld1 { v18.h }[0], [x26], #0x2\n"
- "ld1 { v10.h }[0], [x25], #0x2\n"
- "ld1 { v27.h }[0], [x24], #0x2\n"
- "ld1 { v17.h }[0], [x23], #0x2\n"
- "ld1 { v19.h }[0], [x22], #0x2\n"
- "ld1 { v15.h }[0], [x21], #0x2\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v26.b }[2], [x27]\n"
- "ld1 { v18.b }[2], [x26]\n"
- "ld1 { v10.b }[2], [x25]\n"
- "ld1 { v27.b }[2], [x24]\n"
- "ld1 { v17.b }[2], [x23]\n"
- "ld1 { v19.b }[2], [x22]\n"
- "ld1 { v15.b }[2], [x21]\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x3, #1, 10f\n"
+ "ld1 { v29.h }[0], [x27], #0x2\n"
+ "ld1 { v25.h }[0], [x26], #0x2\n"
+ "ld1 { v0.h }[0], [x25], #0x2\n"
+ "ld1 { v7.h }[0], [x24], #0x2\n"
+ "ld1 { v24.h }[0], [x23], #0x2\n"
+ "ld1 { v27.h }[0], [x22], #0x2\n"
+ "ld1 { v26.h }[0], [x21], #0x2\n"
+ "ld1 { v1.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 11f\n"
+ "ld1 { v29.b }[2], [x27]\n"
+ "ld1 { v25.b }[2], [x26]\n"
+ "ld1 { v0.b }[2], [x25]\n"
+ "ld1 { v7.b }[2], [x24]\n"
+ "ld1 { v24.b }[2], [x23]\n"
+ "ld1 { v27.b }[2], [x22]\n"
+ "ld1 { v26.b }[2], [x21]\n"
+ "ld1 { v1.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v26.b }[0], [x27]\n"
- "ld1 { v18.b }[0], [x26]\n"
- "ld1 { v10.b }[0], [x25]\n"
- "ld1 { v27.b }[0], [x24]\n"
- "ld1 { v17.b }[0], [x23]\n"
- "ld1 { v19.b }[0], [x22]\n"
- "ld1 { v15.b }[0], [x21]\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x3, #0, 11f\n"
+ "ld1 { v29.b }[0], [x27]\n"
+ "ld1 { v25.b }[0], [x26]\n"
+ "ld1 { v0.b }[0], [x25]\n"
+ "ld1 { v7.b }[0], [x24]\n"
+ "ld1 { v24.b }[0], [x23]\n"
+ "ld1 { v27.b }[0], [x22]\n"
+ "ld1 { v26.b }[0], [x21]\n"
+ "ld1 { v1.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "ushll v26.8h, v26.8b, #0x0\n"
- "smlal v8.4s, v26.4h, v30.4h\n"
- "smlal2 v2.4s, v26.8h, v30.8h\n"
- "ldr x20, [x15, #0x40]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "smlal v8.4s, v18.4h, v12.4h\n"
- "smlal2 v2.4s, v18.8h, v12.8h\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal v21.4s, v26.4h, v3.4h\n"
- "smlal2 v4.4s, v26.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "smlal v8.4s, v10.4h, v11.4h\n"
- "smlal2 v2.4s, v10.8h, v11.8h\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x20, [x6, #0x40]\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ushll v7.8h, v7.8b, #0x0\n"
"ushll v27.8h, v27.8b, #0x0\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "smlal v21.4s, v27.4h, v11.4h\n"
- "smlal2 v4.4s, v27.8h, v11.8h\n"
- "smlal v8.4s, v19.4h, v24.4h\n"
- "smlal2 v2.4s, v19.8h, v24.8h\n"
- "ushll v17.8h, v17.8b, #0x0\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal v21.4s, v17.4h, v25.4h\n"
- "smlal2 v4.4s, v17.8h, v25.8h\n"
- "smlal v8.4s, v15.4h, v23.4h\n"
- "smlal2 v2.4s, v15.8h, v23.8h\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal v20.4s, v26.4h, v25.4h\n"
- "smlal2 v1.4s, v26.8h, v25.8h\n"
- "smlal v16.4s, v26.4h, v12.4h\n"
- "smlal2 v14.4s, v26.8h, v12.8h\n"
- "smlal v8.4s, v28.4h, v25.4h\n"
- "smlal2 v2.4s, v28.8h, v25.8h\n"
- "smlal v21.4s, v28.4h, v12.4h\n"
- "smlal2 v4.4s, v28.8h, v12.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "ushll v24.8h, v24.8b, #0x0\n"
+ "smlal v22.4s, v29.4h, v3.4h\n"
+ "smlal2 v8.4s, v29.8h, v3.8h\n"
+ "smlal v9.4s, v29.4h, v10.4h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "add x20, x20, x4\n"
+ "smlal2 v31.4s, v29.8h, v10.8h\n"
+ "ushll v1.8h, v1.8b, #0x0\n"
+ "smlal v20.4s, v29.4h, v28.4h\n"
+ "smlal2 v21.4s, v29.8h, v28.8h\n"
+ "smlal v18.4s, v29.4h, v15.4h\n"
+ "smlal v22.4s, v25.4h, v15.4h\n"
+ "smlal2 v5.4s, v29.8h, v15.8h\n"
+ "smlal2 v8.4s, v25.8h, v15.8h\n"
+ "smlal v9.4s, v7.4h, v13.4h\n"
+ "smlal2 v31.4s, v7.8h, v13.8h\n"
+ "smlal v22.4s, v0.4h, v13.4h\n"
+ "smlal2 v8.4s, v0.8h, v13.8h\n"
+ "smlal v9.4s, v24.4h, v28.4h\n"
+ "smlal v22.4s, v27.4h, v11.4h\n"
+ "smlal2 v31.4s, v24.8h, v28.8h\n"
+ "smlal2 v8.4s, v27.8h, v11.8h\n"
+ "smlal v9.4s, v1.4h, v15.4h\n"
+ "smlal v22.4s, v26.4h, v23.4h\n"
+ "smlal2 v31.4s, v1.8h, v15.8h\n"
+ "smlal2 v8.4s, v26.8h, v23.8h\n"
+ "smlal v22.4s, v1.4h, v28.4h\n"
+ "smlal2 v8.4s, v1.8h, v28.8h\n"
+ "tbz x3, #2, 13f\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 12f\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 15f\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "tbz x3, #0, 15f\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "tbz x3, #1, 14f\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 15f\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "tbz x3, #0, 15f\n"
+ "ld1 { v30.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "ushll v31.8h, v31.8b, #0x0\n"
- "ldr x20, [x15, #0x48]\n"
- "smlal v21.4s, v31.4h, v23.4h\n"
- "smlal2 v4.4s, v31.8h, v23.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ushll v30.8h, v30.8b, #0x0\n"
+ "ldr x20, [x6, #0x48]\n"
+ "smlal v9.4s, v30.4h, v23.4h\n"
+ "smlal2 v31.4s, v30.8h, v23.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 17f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 16f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 19f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x3, #0, 19f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x3, #1, 18f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 19f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x3, #0, 19f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "ushll v28.8h, v28.8b, #0x0\n"
- "ldr x20, [x15, #0x50]\n"
- "smlal v21.4s, v28.4h, v7.4h\n"
- "smlal2 v4.4s, v28.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 21f\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0x50]\n"
+ "smlal v9.4s, v16.4h, v17.4h\n"
+ "smlal2 v31.4s, v16.8h, v17.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 21f\n"
"ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 20f\n"
+ "tbz x3, #1, 20f\n"
"ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x3, #0, 23f\n"
"ld1 { v27.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x3, #0, 23f\n"
"ld1 { v27.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
+ "tbz x3, #1, 22f\n"
"ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
+ "tbz x3, #0, 23f\n"
"ld1 { v27.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
+ "tbz x3, #0, 23f\n"
"ld1 { v27.b }[0], [x20]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
"ushll v27.8h, v27.8b, #0x0\n"
- "ldr x20, [x15, #0x58]\n"
- "smlal v8.4s, v27.4h, v7.4h\n"
- "smlal2 v2.4s, v27.8h, v7.8h\n"
- "smlal v21.4s, v27.4h, v24.4h\n"
- "smlal2 v4.4s, v27.8h, v24.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v0.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v0.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v0.b }[6], [x20]\n"
+ "ldr x20, [x6, #0x58]\n"
+ "smlal v22.4s, v27.4h, v17.4h\n"
+ "smlal2 v8.4s, v27.8h, v17.8h\n"
+ "smlal v9.4s, v27.4h, v11.4h\n"
+ "smlal2 v31.4s, v27.8h, v11.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 25f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 24f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 27f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v0.b }[4], [x20]\n"
+ "tbz x3, #0, 27f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v0.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v0.b }[2], [x20]\n"
+ "tbz x3, #1, 26f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 27f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v0.b }[0], [x20]\n"
+ "tbz x3, #0, 27f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
- "ushll v0.8h, v0.8b, #0x0\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v20.4s, v0.4h, v24.4h\n"
- "smlal2 v1.4s, v0.8h, v24.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0x60]\n"
+ "smlal v20.4s, v16.4h, v11.4h\n"
+ "smlal2 v21.4s, v16.8h, v11.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 29f\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 28f\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 31f\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x3, #0, 31f\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x3, #1, 30f\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 31f\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x3, #0, 31f\n"
+ "ld1 { v29.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
- "ushll v15.8h, v15.8b, #0x0\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v8.4s, v15.4h, v3.4h\n"
- "smlal2 v2.4s, v15.8h, v3.8h\n"
- "smlal v20.4s, v15.4h, v12.4h\n"
- "smlal2 v1.4s, v15.8h, v12.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v0.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v0.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v0.b }[6], [x20]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "ldr x20, [x6, #0x68]\n"
+ "smlal v22.4s, v29.4h, v10.4h\n"
+ "smlal2 v8.4s, v29.8h, v10.8h\n"
+ "smlal v20.4s, v29.4h, v15.4h\n"
+ "smlal2 v21.4s, v29.8h, v15.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 33f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 32f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 35f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v0.b }[4], [x20]\n"
+ "tbz x3, #0, 35f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v0.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v0.b }[2], [x20]\n"
+ "tbz x3, #1, 34f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 35f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v0.b }[0], [x20]\n"
+ "tbz x3, #0, 35f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
- "ushll v0.8h, v0.8b, #0x0\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal v20.4s, v0.4h, v23.4h\n"
- "smlal2 v1.4s, v0.8h, v23.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v6.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0x70]\n"
+ "smlal v20.4s, v16.4h, v23.4h\n"
+ "smlal2 v21.4s, v16.8h, v23.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 37f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 36f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 39f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v6.b }[4], [x20]\n"
+ "tbz x3, #0, 39f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v6.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v6.b }[2], [x20]\n"
+ "tbz x3, #1, 38f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 39f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v6.b }[0], [x20]\n"
+ "tbz x3, #0, 39f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
- "ushll v6.8h, v6.8b, #0x0\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v8.4s, v6.4h, v9.4h\n"
- "smlal2 v2.4s, v6.8h, v9.8h\n"
- "smlal v20.4s, v6.4h, v11.4h\n"
- "smlal2 v1.4s, v6.8h, v11.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "ldr x20, [x6, #0x78]\n"
+ "smlal v22.4s, v26.4h, v2.4h\n"
+ "smlal2 v8.4s, v26.8h, v2.8h\n"
+ "smlal v20.4s, v26.4h, v13.4h\n"
+ "smlal2 v21.4s, v26.8h, v13.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 41f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 40f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 43f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "tbz x3, #0, 43f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "tbz x3, #1, 42f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 43f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "tbz x3, #0, 43f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
- "ushll v27.8h, v27.8b, #0x0\n"
- "ldr x20, [x15, #0x80]\n"
- "smlal v16.4s, v27.4h, v23.4h\n"
- "smlal2 v14.4s, v27.8h, v23.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0x80]\n"
+ "smlal v18.4s, v16.4h, v23.4h\n"
+ "smlal2 v5.4s, v16.8h, v23.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 45f\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 44f\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 47f\n"
+ "ld1 { v25.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "tbz x3, #0, 47f\n"
+ "ld1 { v25.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "tbz x3, #1, 46f\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 47f\n"
+ "ld1 { v25.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "tbz x3, #0, 47f\n"
+ "ld1 { v25.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "ushll v10.8h, v10.8b, #0x0\n"
- "ldr x20, [x15, #0x88]\n"
- "smlal v21.4s, v10.4h, v9.4h\n"
- "smlal2 v4.4s, v10.8h, v9.8h\n"
- "smlal v16.4s, v10.4h, v11.4h\n"
- "smlal2 v14.4s, v10.8h, v11.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x20, [x6, #0x88]\n"
+ "smlal v9.4s, v25.4h, v2.4h\n"
+ "smlal2 v31.4s, v25.8h, v2.8h\n"
+ "smlal v18.4s, v25.4h, v13.4h\n"
+ "smlal2 v5.4s, v25.8h, v13.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 49f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 48f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 51f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x3, #0, 51f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x3, #1, 50f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 51f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x3, #0, 51f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
- "ushll v28.8h, v28.8b, #0x0\n"
- "ldr x20, [x15, #0x90]\n"
- "smlal v16.4s, v28.4h, v7.4h\n"
- "smlal2 v14.4s, v28.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0x90]\n"
+ "smlal v18.4s, v16.4h, v17.4h\n"
+ "smlal2 v5.4s, v16.8h, v17.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 53f\n"
+ "ld1 { v1.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 52f\n"
+ "ld1 { v1.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 55f\n"
+ "ld1 { v1.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x3, #0, 55f\n"
+ "ld1 { v1.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x3, #1, 54f\n"
+ "ld1 { v1.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 55f\n"
+ "ld1 { v1.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x3, #0, 55f\n"
+ "ld1 { v1.b }[0], [x20]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
- "ushll v15.8h, v15.8b, #0x0\n"
- "ldr x20, [x15, #0x98]\n"
- "smlal v20.4s, v15.4h, v3.4h\n"
- "smlal2 v1.4s, v15.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 56f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v6.b }[6], [x20]\n"
+ "ushll v1.8h, v1.8b, #0x0\n"
+ "ldr x20, [x6, #0x98]\n"
+ "smlal v20.4s, v1.4h, v10.4h\n"
+ "smlal2 v21.4s, v1.8h, v10.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 57f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 56f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 59f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v6.b }[4], [x20]\n"
+ "tbz x3, #0, 59f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v6.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v6.b }[2], [x20]\n"
+ "tbz x3, #1, 58f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 59f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v6.b }[0], [x20]\n"
+ "tbz x3, #0, 59f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
- "ushll v6.8h, v6.8b, #0x0\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v21.4s, v6.4h, v30.4h\n"
- "smlal2 v4.4s, v6.8h, v30.8h\n"
- "smlal v16.4s, v6.4h, v25.4h\n"
- "smlal2 v14.4s, v6.8h, v25.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0xa0]\n"
+ "smlal v9.4s, v16.4h, v3.4h\n"
+ "smlal2 v31.4s, v16.8h, v3.8h\n"
+ "smlal v18.4s, v16.4h, v28.4h\n"
+ "smlal2 v5.4s, v16.8h, v28.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 61f\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 60f\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 63f\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x3, #0, 63f\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x3, #1, 62f\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 63f\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x3, #0, 63f\n"
+ "ld1 { v19.b }[0], [x20]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
- "ushll v23.8h, v23.8b, #0x0\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v20.4s, v23.4h, v9.4h\n"
- "smlal2 v1.4s, v23.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 65f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 64f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v12.b }[6], [x20]\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
+ "ldr x20, [x6, #0xa8]\n"
+ "smlal v20.4s, v19.4h, v2.4h\n"
+ "smlal2 v21.4s, v19.8h, v2.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 65f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 64f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 67f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v12.b }[4], [x20]\n"
+ "tbz x3, #0, 67f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
- "ld1 { v12.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
- "ld1 { v12.b }[2], [x20]\n"
+ "tbz x3, #1, 66f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 67f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
- "ld1 { v12.b }[0], [x20]\n"
+ "tbz x3, #0, 67f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
- "ushll v12.8h, v12.8b, #0x0\n"
- "ldr x20, [x15, #0xb0]\n"
- "smlal v20.4s, v12.4h, v7.4h\n"
- "smlal2 v1.4s, v12.8h, v7.8h\n"
- "smlal v16.4s, v12.4h, v24.4h\n"
- "smlal2 v14.4s, v12.8h, v24.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0xb0]\n"
+ "smlal v20.4s, v16.4h, v17.4h\n"
+ "smlal2 v21.4s, v16.8h, v17.8h\n"
+ "smlal v18.4s, v16.4h, v11.4h\n"
+ "smlal2 v5.4s, v16.8h, v11.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 69f\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 68f\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 71f\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "tbz x3, #0, 71f\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "tbz x3, #1, 70f\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 71f\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "tbz x3, #0, 71f\n"
+ "ld1 { v29.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
- "ushll v10.8h, v10.8b, #0x0\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal v16.4s, v10.4h, v9.4h\n"
- "smlal2 v14.4s, v10.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 73f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 72f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "ldr x20, [x6, #0xb8]\n"
+ "smlal v18.4s, v29.4h, v2.4h\n"
+ "smlal2 v5.4s, v29.8h, v2.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 73f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 72f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 75f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x3, #0, 75f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x3, #1, 74f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 75f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x3, #0, 75f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
- "ushll v15.8h, v15.8b, #0x0\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v20.4s, v15.4h, v30.4h\n"
- "smlal2 v1.4s, v15.8h, v30.8h\n"
- "smlal v16.4s, v15.4h, v3.4h\n"
- "smlal2 v14.4s, v15.8h, v3.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x6, #0xc0]\n"
+ "smlal v20.4s, v16.4h, v3.4h\n"
+ "smlal2 v21.4s, v16.8h, v3.8h\n"
+ "smlal v18.4s, v16.4h, v10.4h\n"
+ "smlal2 v5.4s, v16.8h, v10.8h\n"
+ "add x20, x20, x4\n"
+ "tbz x3, #2, 77f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x3, #1, 76f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x3, #0, 79f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x3, #0, 79f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x3, #1, 78f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x3, #0, 79f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x3, #0, 79f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal v16.4s, v28.4h, v30.4h\n"
- "smlal2 v14.4s, v28.8h, v30.8h\n"
- "tbz x7, #2, 81f\n"
- "ld1 { v19.4s }, [x13], #0x10\n"
- "ld1 { v23.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v18.d }[0], [x13], #0x8\n"
- "ld1 { v24.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[2], [x13]\n"
- "ld1 { v24.s }[2], [x12]\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v18.4s, v17.4h, v3.4h\n"
+ "smlal2 v5.4s, v17.8h, v3.8h\n"
+ "tbz x3, #2, 81f\n"
+ "ld1 { v16.4s }, [x8], #0x10\n"
+ "ld1 { v23.4s }, [x17], #0x10\n"
+ "tbz x3, #1, 80f\n"
+ "ld1 { v26.d }[0], [x8], #0x8\n"
+ "ld1 { v2.d }[0], [x17], #0x8\n"
+ "tbz x3, #0, 83f\n"
+ "ld1 { v26.s }[2], [x8]\n"
+ "ld1 { v2.s }[2], [x17]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[0], [x13]\n"
- "ld1 { v24.s }[0], [x12]\n"
+ "tbz x3, #0, 83f\n"
+ "ld1 { v26.s }[0], [x8]\n"
+ "ld1 { v2.s }[0], [x17]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
- "ld1 { v19.d }[0], [x13], #0x8\n"
- "ld1 { v23.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v19.s }[2], [x13]\n"
- "ld1 { v23.s }[2], [x12]\n"
+ "tbz x3, #1, 82f\n"
+ "ld1 { v16.d }[0], [x8], #0x8\n"
+ "ld1 { v23.d }[0], [x17], #0x8\n"
+ "tbz x3, #0, 83f\n"
+ "ld1 { v16.s }[2], [x8]\n"
+ "ld1 { v23.s }[2], [x17]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v19.s }[0], [x13]\n"
- "ld1 { v23.s }[0], [x12]\n"
+ "tbz x3, #0, 83f\n"
+ "ld1 { v16.s }[0], [x8]\n"
+ "ld1 { v23.s }[0], [x17]\n"
"83:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v8.4s, v8.4s, v19.4s\n"
- "and v17.16b, v8.16b, v23.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v2.4s, v2.4s, v18.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v16.4s\n"
+ "sqrdmulh v8.4s, v8.4s, v26.4s\n"
+ "add x15, x15, x5\n"
+ "add x14, x14, x5\n"
+ "sqrdmulh v9.4s, v9.4s, v16.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v16.4s\n"
+ "add x13, x13, x5\n"
+ "add x12, x12, x5\n"
+ "sqrdmulh v18.4s, v18.4s, v16.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v26.4s\n"
+ "and v17.16b, v22.16b, v23.16b\n"
+ "and v16.16b, v8.16b, v2.16b\n"
+ "and v19.16b, v9.16b, v23.16b\n"
+ "and v28.16b, v20.16b, v23.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v26.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v11.16b, v2.16b, v24.16b\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqrdmulh v20.4s, v20.4s, v19.4s\n"
- "sqrdmulh v16.4s, v16.4s, v19.4s\n"
- "sqadd v8.4s, v8.4s, v17.4s\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v28.16b, v21.16b, v23.16b\n"
- "sqrdmulh v4.4s, v4.4s, v18.4s\n"
- "and v17.16b, v20.16b, v23.16b\n"
- "sqrdmulh v1.4s, v1.4s, v18.4s\n"
- "and v19.16b, v16.16b, v23.16b\n"
- "sqrdmulh v14.4s, v14.4s, v18.4s\n"
- "sqadd v2.4s, v2.4s, v11.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v15.16b, v31.16b, v2.16b\n"
"sshr v28.4s, v28.4s, #0x1f\n"
- "and v18.16b, v4.16b, v24.16b\n"
+ "and v0.16b, v21.16b, v2.16b\n"
+ "sqadd v22.4s, v22.4s, v17.4s\n"
+ "and v17.16b, v18.16b, v23.16b\n"
+ "sqadd v8.4s, v8.4s, v16.4s\n"
+ "and v16.16b, v5.16b, v2.16b\n"
+ "sqadd v9.4s, v9.4s, v19.4s\n"
+ "sshr v15.4s, v15.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "and v12.16b, v1.16b, v24.16b\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "and v25.16b, v14.16b, v24.16b\n"
- "sqadd v21.4s, v21.4s, v28.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v17.4s\n"
- "sshr v12.4s, v12.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v19.4s\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "srshl v8.4s, v8.4s, v23.4s\n"
- "srshl v21.4s, v21.4s, v23.4s\n"
- "sqadd v4.4s, v4.4s, v18.4s\n"
+ "sqadd v20.4s, v20.4s, v28.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v22.4s, v22.4s, v23.4s\n"
+ "srshl v9.4s, v9.4s, v23.4s\n"
+ "sqadd v18.4s, v18.4s, v17.4s\n"
+ "sqadd v31.4s, v31.4s, v15.4s\n"
"srshl v20.4s, v20.4s, v23.4s\n"
- "sqadd v1.4s, v1.4s, v12.4s\n"
- "srshl v16.4s, v16.4s, v23.4s\n"
- "sqadd v14.4s, v14.4s, v25.4s\n"
- "srshl v2.4s, v2.4s, v24.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "srshl v4.4s, v4.4s, v24.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v1.4s, v1.4s, v24.4s\n"
+ "sqadd v21.4s, v21.4s, v0.4s\n"
+ "sqadd v5.4s, v5.4s, v16.4s\n"
+ "srshl v8.4s, v8.4s, v2.4s\n"
+ "srshl v18.4s, v18.4s, v23.4s\n"
+ "sqxtn v22.4h, v22.4s\n"
+ "srshl v31.4s, v31.4s, v2.4s\n"
+ "sqxtn v9.4h, v9.4s\n"
+ "srshl v21.4s, v21.4s, v2.4s\n"
"sqxtn v20.4h, v20.4s\n"
- "srshl v14.4s, v14.4s, v24.4s\n"
- "sqxtn v16.4h, v16.4s\n"
- "sqxtn2 v8.8h, v2.4s\n"
- "sqxtn2 v21.8h, v4.4s\n"
- "sqxtn2 v20.8h, v1.4s\n"
- "sqxtn2 v16.8h, v14.4s\n"
- "sqadd v8.8h, v8.8h, v22.8h\n"
- "sqadd v21.8h, v21.8h, v22.8h\n"
- "sqadd v20.8h, v20.8h, v22.8h\n"
- "sqadd v16.8h, v16.8h, v22.8h\n"
- "smax v8.8h, v8.8h, v13.8h\n"
- "smax v21.8h, v21.8h, v13.8h\n"
- "smax v20.8h, v20.8h, v13.8h\n"
- "smax v16.8h, v16.8h, v13.8h\n"
- "smin v8.8h, v8.8h, v5.8h\n"
- "smin v21.8h, v21.8h, v5.8h\n"
- "smin v20.8h, v20.8h, v5.8h\n"
- "smin v16.8h, v16.8h, v5.8h\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
+ "srshl v5.4s, v5.4s, v2.4s\n"
+ "sqxtn v18.4h, v18.4s\n"
+ "sqxtn2 v22.8h, v8.4s\n"
+ "sqxtn2 v9.8h, v31.4s\n"
+ "sqxtn2 v20.8h, v21.4s\n"
+ "sqxtn2 v18.8h, v5.4s\n"
+ "sqadd v22.8h, v22.8h, v12.8h\n"
+ "sqadd v9.8h, v9.8h, v12.8h\n"
+ "sqadd v20.8h, v20.8h, v12.8h\n"
+ "sqadd v18.8h, v18.8h, v12.8h\n"
+ "smax v22.8h, v22.8h, v14.8h\n"
+ "smax v9.8h, v9.8h, v14.8h\n"
+ "smax v20.8h, v20.8h, v14.8h\n"
+ "smax v18.8h, v18.8h, v14.8h\n"
+ "smin v22.8h, v22.8h, v6.8h\n"
+ "smin v9.8h, v9.8h, v6.8h\n"
+ "smin v20.8h, v20.8h, v6.8h\n"
+ "smin v18.8h, v18.8h, v6.8h\n"
+ "uzp1 v22.16b, v22.16b, v22.16b\n"
+ "uzp1 v9.16b, v9.16b, v9.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v16.16b, v16.16b, v16.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v8.s }[0], [x11], #0x4\n"
- "st1 { v21.s }[0], [x10], #0x4\n"
- "st1 { v20.s }[0], [x9], #0x4\n"
- "st1 { v16.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v21.h }[2], [x10], #0x2\n"
- "st1 { v20.h }[2], [x9], #0x2\n"
- "st1 { v16.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v8.b }[6], [x11], #0x1\n"
- "st1 { v21.b }[6], [x10], #0x1\n"
- "st1 { v20.b }[6], [x9], #0x1\n"
- "st1 { v16.b }[6], [x28], #0x1\n"
+ "uzp1 v18.16b, v18.16b, v18.16b\n"
+ "tbz x3, #2, 85f\n"
+ "st1 { v22.s }[0], [x15], #0x4\n"
+ "st1 { v9.s }[0], [x14], #0x4\n"
+ "st1 { v20.s }[0], [x13], #0x4\n"
+ "st1 { v18.s }[0], [x12], #0x4\n"
+ "tbz x3, #1, 84f\n"
+ "st1 { v22.h }[2], [x15], #0x2\n"
+ "st1 { v9.h }[2], [x14], #0x2\n"
+ "st1 { v20.h }[2], [x13], #0x2\n"
+ "st1 { v18.h }[2], [x12], #0x2\n"
+ "tbz x3, #0, 87f\n"
+ "st1 { v22.b }[6], [x15], #0x1\n"
+ "st1 { v9.b }[6], [x14], #0x1\n"
+ "st1 { v20.b }[6], [x13], #0x1\n"
+ "st1 { v18.b }[6], [x12], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v8.b }[4], [x11], #0x1\n"
- "st1 { v21.b }[4], [x10], #0x1\n"
- "st1 { v20.b }[4], [x9], #0x1\n"
- "st1 { v16.b }[4], [x28], #0x1\n"
+ "tbz x3, #0, 87f\n"
+ "st1 { v22.b }[4], [x15], #0x1\n"
+ "st1 { v9.b }[4], [x14], #0x1\n"
+ "st1 { v20.b }[4], [x13], #0x1\n"
+ "st1 { v18.b }[4], [x12], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v8.h }[0], [x11], #0x2\n"
- "st1 { v21.h }[0], [x10], #0x2\n"
- "st1 { v20.h }[0], [x9], #0x2\n"
- "st1 { v16.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v8.b }[2], [x11], #0x1\n"
- "st1 { v21.b }[2], [x10], #0x1\n"
- "st1 { v20.b }[2], [x9], #0x1\n"
- "st1 { v16.b }[2], [x28], #0x1\n"
+ "tbz x3, #1, 86f\n"
+ "st1 { v22.h }[0], [x15], #0x2\n"
+ "st1 { v9.h }[0], [x14], #0x2\n"
+ "st1 { v20.h }[0], [x13], #0x2\n"
+ "st1 { v18.h }[0], [x12], #0x2\n"
+ "tbz x3, #0, 87f\n"
+ "st1 { v22.b }[2], [x15], #0x1\n"
+ "st1 { v9.b }[2], [x14], #0x1\n"
+ "st1 { v20.b }[2], [x13], #0x1\n"
+ "st1 { v18.b }[2], [x12], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v8.b }[0], [x11], #0x1\n"
- "st1 { v21.b }[0], [x10], #0x1\n"
- "st1 { v20.b }[0], [x9], #0x1\n"
- "st1 { v16.b }[0], [x28], #0x1\n"
+ "tbz x3, #0, 87f\n"
+ "st1 { v22.b }[0], [x15], #0x1\n"
+ "st1 { v9.b }[0], [x14], #0x1\n"
+ "st1 { v20.b }[0], [x13], #0x1\n"
+ "st1 { v18.b }[0], [x12], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index 9316732632..42de21c670 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[36];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -111,2071 +111,2071 @@ void a64_u8qa_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x2, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x3, x2, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v2.16b }, [x20]\n"
+ "mov x2, #0x0\n"
+ "mov x3, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x4, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x8, x1, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
+ "ld1r { v14.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_c_offset]\n"
"add x20, x23, %[offsetof_Requantize32_minval]\n"
- "ld1r { v25.8h }, [x21]\n"
- "ld1r { v12.8h }, [x20]\n"
+ "ld1r { v15.8h }, [x21]\n"
+ "ld1r { v31.8h }, [x20]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "mov x4, #0x0\n"
- "ld1r { v26.8h }, [x20]\n"
- "mov x5, #0x0\n"
- "add x6, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x7, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x8, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x17, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x16, x15, [x22, #0x0]\n"
- "ldp x14, x13, [x22, #0x10]\n"
- "cbz x3, 3f\n"
- "ldr d21, [x7, #0x0]\n"
- "ldr d15, [x7, #0x8]\n"
- "subs x3, x3, #0x1\n"
- "usubl v21.8h, v21.8b, v2.8b\n"
- "ldr d29, [x7, #0x10]\n"
- "ldr d18, [x7, #0x18]\n"
- "usubl v15.8h, v15.8b, v2.8b\n"
- "usubl v29.8h, v29.8b, v2.8b\n"
- "ldr d3, [x7, #0x20]\n"
+ "ld1r { v28.8h }, [x20]\n"
+ "ldp x17, x16, [x22, #0x0]\n"
+ "ldp x15, x14, [x22, #0x10]\n"
+ "cbz x8, 3f\n"
+ "ldr d6, [x5, #0x0]\n"
+ "ldr d20, [x5, #0x8]\n"
+ "subs x8, x8, #0x1\n"
+ "ldr d9, [x5, #0x10]\n"
+ "ldr d1, [x5, #0x18]\n"
+ "ldr d17, [x5, #0x20]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "usubl v18.8h, v18.8b, v2.8b\n"
- "usubl v3.8h, v3.8b, v2.8b\n"
- "ldr q13, [x20, #0x0]\n"
+ "usubl v6.8h, v6.8b, v14.8b\n"
+ "usubl v20.8h, v20.8b, v14.8b\n"
+ "usubl v9.8h, v9.8b, v14.8b\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldr q30, [x20, #0x0]\n"
"ldr q24, [x20, #0x10]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
"add x20, x20, #0x20\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x9, x28, [x6, #0x0]\n"
- "ldp x27, x26, [x6, #0x10]\n"
- "mov v7.16b, v13.16b\n"
- "mov v14.16b, v24.16b\n"
- "ldp x25, x24, [x6, #0x20]\n"
- "ldp x23, x22, [x6, #0x30]\n"
- "mov v27.16b, v13.16b\n"
- "mov v22.16b, v24.16b\n"
- "ldp x21, x20, [x6, #0x40]\n"
- "ldr d10, [x9, x4]\n"
- "mov v8.16b, v13.16b\n"
- "mov v17.16b, v24.16b\n"
- "ldr d16, [x28, x4]\n"
- "ldr d23, [x27, x4]\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "ushll v16.8h, v16.8b, #0x0\n"
- "ldr d30, [x26, x4]\n"
- "ldr d4, [x25, x4]\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "ushll v30.8h, v30.8b, #0x0\n"
- "ldr d28, [x24, x4]\n"
- "ldr d31, [x23, x4]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "mov v12.16b, v30.16b\n"
+ "mov v13.16b, v24.16b\n"
+ "mov v5.16b, v30.16b\n"
+ "mov v23.16b, v24.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "mov v7.16b, v30.16b\n"
+ "mov v19.16b, v24.16b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d18, [x9, x2]\n"
+ "ldr d4, [x28, x2]\n"
+ "ldr d0, [x27, x2]\n"
+ "ldr d25, [x26, x2]\n"
+ "ldr d10, [x25, x2]\n"
+ "ldr d11, [x24, x2]\n"
+ "ldr d22, [x23, x2]\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "ldr d21, [x22, x2]\n"
+ "ldr d8, [x21, x2]\n"
"ushll v4.8h, v4.8b, #0x0\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "ldr d1, [x22, x4]\n"
- "ldr d9, [x21, x4]\n"
- "ushll v31.8h, v31.8b, #0x0\n"
- "ushll v1.8h, v1.8b, #0x0\n"
- "ldr d11, [x20, x4]\n"
- "ushll v9.8h, v9.8b, #0x0\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ldr d26, [x20, x2]\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
"ushll v11.8h, v11.8b, #0x0\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "ushll v21.8h, v21.8b, #0x0\n"
+ "ushll v8.8h, v8.8b, #0x0\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
"beq 2f\n"
"1:" // Loop
- "ldr d5, [x7, #0x28]\n"
- "ldr d6, [x7, #0x30]\n"
- "smlal v13.4s, v10.4h, v21.4h\n"
- "smlal2 v24.4s, v10.8h, v21.8h\n"
- "ldr d19, [x7, #0x38]\n"
- "ldr d0, [x7, #0x40]\n"
- "smlal v13.4s, v16.4h, v15.4h\n"
- "smlal v7.4s, v16.4h, v21.4h\n"
- "ldr d10, [x7, #0x48]\n"
- "ldr d20, [x7, #0x50]\n"
- "smlal v27.4s, v23.4h, v21.4h\n"
- "smlal v8.4s, v30.4h, v21.4h\n"
- "ldr x21, [x6, #0x50]\n"
- "smlal2 v24.4s, v16.8h, v15.8h\n"
- "smlal v13.4s, v4.4h, v29.4h\n"
- "ldr x20, [x6, #0x58]\n"
- "smlal2 v14.4s, v16.8h, v21.8h\n"
- "ldr d16, [x21, x4]\n"
- "smlal2 v22.4s, v23.8h, v21.8h\n"
- "ushll v16.8h, v16.8b, #0x0\n"
- "smlal2 v17.4s, v30.8h, v21.8h\n"
- "ldr d21, [x20, x4]\n"
- "smlal v7.4s, v4.4h, v15.4h\n"
- "ldr x22, [x6, #0x60]\n"
- "smlal v27.4s, v30.4h, v15.4h\n"
- "smlal v8.4s, v28.4h, v15.4h\n"
+ "ldr d29, [x5, #0x28]\n"
+ "ldr d2, [x5, #0x30]\n"
+ "smlal v30.4s, v18.4h, v6.4h\n"
+ "smlal2 v24.4s, v18.8h, v6.8h\n"
+ "ldr d18, [x5, #0x38]\n"
+ "ldr d27, [x5, #0x40]\n"
+ "smlal v12.4s, v4.4h, v6.4h\n"
+ "smlal v5.4s, v0.4h, v6.4h\n"
+ "ldr d16, [x5, #0x48]\n"
+ "ldr d3, [x5, #0x50]\n"
+ "smlal v7.4s, v25.4h, v6.4h\n"
+ "smlal2 v13.4s, v4.8h, v6.8h\n"
+ "ldr x23, [x4, #0x50]\n"
+ "smlal2 v23.4s, v0.8h, v6.8h\n"
+ "smlal2 v19.4s, v25.8h, v6.8h\n"
+ "ldr d6, [x5, #0x58]\n"
+ "smlal v30.4s, v4.4h, v20.4h\n"
+ "smlal2 v24.4s, v4.8h, v20.8h\n"
+ "ldr d4, [x5, #0x60]\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v12.4s, v10.4h, v20.4h\n"
+ "smlal v5.4s, v25.4h, v20.4h\n"
+ "ldr x22, [x4, #0x60]\n"
+ "usubl v29.8h, v29.8b, v14.8b\n"
+ "smlal v7.4s, v11.4h, v20.4h\n"
+ "smlal2 v13.4s, v10.8h, v20.8h\n"
+ "ldr x21, [x4, #0x68]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal2 v23.4s, v25.8h, v20.8h\n"
+ "smlal2 v19.4s, v11.8h, v20.8h\n"
+ "ldr d20, [x23, x2]\n"
+ "ldr x27, [x4, #0x70]\n"
+ "smlal v30.4s, v10.4h, v9.4h\n"
+ "smlal2 v24.4s, v10.8h, v9.8h\n"
+ "ldr d10, [x20, x2]\n"
+ "usubl v18.8h, v18.8b, v14.8b\n"
+ "smlal v12.4s, v22.4h, v9.4h\n"
+ "smlal v5.4s, v11.4h, v9.4h\n"
+ "ldr x20, [x4, #0x78]\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "ushll v20.8h, v20.8b, #0x0\n"
+ "smlal2 v13.4s, v22.8h, v9.8h\n"
+ "ldr x26, [x4, #0x80]\n"
+ "ldr x25, [x4, #0x88]\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
+ "smlal2 v23.4s, v11.8h, v9.8h\n"
+ "ldr x24, [x4, #0x90]\n"
+ "ldr x23, [x4, #0x98]\n"
+ "smlal v30.4s, v22.4h, v1.4h\n"
+ "smlal2 v24.4s, v22.8h, v1.8h\n"
+ "ldr d22, [x22, x2]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v20.4h, v9.4h\n"
+ "smlal2 v19.4s, v20.8h, v9.8h\n"
+ "ldr d9, [x21, x2]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v12.4s, v21.4h, v1.4h\n"
+ "smlal v5.4s, v20.4h, v1.4h\n"
+ "usubl v6.8h, v6.8b, v14.8b\n"
+ "ldr x22, [x4, #0xa0]\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "smlal2 v13.4s, v21.8h, v1.8h\n"
+ "smlal2 v23.4s, v20.8h, v1.8h\n"
+ "ldr x21, [x4, #0xa8]\n"
+ "smlal v30.4s, v21.4h, v17.4h\n"
+ "smlal2 v24.4s, v21.8h, v17.8h\n"
+ "ldr d21, [x27, x2]\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "smlal v7.4s, v10.4h, v1.4h\n"
+ "smlal2 v19.4s, v10.8h, v1.8h\n"
+ "ldr d1, [x20, x2]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v12.4s, v22.4h, v17.4h\n"
+ "smlal v5.4s, v10.4h, v17.4h\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "ldr x13, [x4, #0xb8]\n"
+ "smlal2 v13.4s, v22.8h, v17.8h\n"
+ "ldr d22, [x26, x2]\n"
+ "smlal2 v23.4s, v10.8h, v17.8h\n"
"ushll v21.8h, v21.8b, #0x0\n"
- "ldr x20, [x6, #0x68]\n"
- "smlal2 v24.4s, v4.8h, v29.8h\n"
- "smlal v13.4s, v31.4h, v18.4h\n"
- "usubl v5.8h, v5.8b, v2.8b\n"
- "ldr x21, [x6, #0x70]\n"
- "smlal2 v14.4s, v4.8h, v15.8h\n"
- "ldr d4, [x22, x4]\n"
- "smlal2 v22.4s, v30.8h, v15.8h\n"
- "ushll v4.8h, v4.8b, #0x0\n"
- "smlal2 v17.4s, v28.8h, v15.8h\n"
- "ldr d15, [x20, x4]\n"
- "smlal v7.4s, v31.4h, v29.4h\n"
- "usubl v6.8h, v6.8b, v2.8b\n"
- "smlal v27.4s, v28.4h, v29.4h\n"
- "smlal v8.4s, v16.4h, v29.4h\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "ldr x20, [x6, #0x78]\n"
- "smlal2 v24.4s, v31.8h, v18.8h\n"
- "smlal v13.4s, v1.4h, v3.4h\n"
- "usubl v19.8h, v19.8b, v2.8b\n"
- "ldr x22, [x6, #0x80]\n"
- "smlal2 v14.4s, v31.8h, v29.8h\n"
- "ldr d31, [x21, x4]\n"
- "smlal2 v22.4s, v28.8h, v29.8h\n"
- "ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v17.4s, v16.8h, v29.8h\n"
- "ldr d29, [x20, x4]\n"
- "smlal v7.4s, v1.4h, v18.4h\n"
- "usubl v0.8h, v0.8b, v2.8b\n"
- "smlal v27.4s, v16.4h, v18.4h\n"
- "smlal v8.4s, v21.4h, v18.4h\n"
- "ushll v29.8h, v29.8b, #0x0\n"
- "ldr x20, [x6, #0x88]\n"
- "smlal2 v24.4s, v1.8h, v3.8h\n"
- "smlal v13.4s, v23.4h, v5.4h\n"
- "usubl v10.8h, v10.8b, v2.8b\n"
- "ldr x21, [x6, #0x90]\n"
- "smlal2 v14.4s, v1.8h, v18.8h\n"
- "ldr d1, [x22, x4]\n"
- "smlal2 v22.4s, v16.8h, v18.8h\n"
+ "smlal v30.4s, v0.4h, v29.4h\n"
+ "smlal2 v24.4s, v0.8h, v29.8h\n"
+ "ldr d0, [x25, x2]\n"
"ushll v1.8h, v1.8b, #0x0\n"
- "smlal2 v17.4s, v21.8h, v18.8h\n"
- "ldr d18, [x20, x4]\n"
- "smlal v7.4s, v4.4h, v3.4h\n"
- "usubl v20.8h, v20.8b, v2.8b\n"
- "smlal v27.4s, v21.4h, v3.4h\n"
- "smlal v8.4s, v9.4h, v3.4h\n"
- "ldr x20, [x6, #0x98]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "smlal2 v24.4s, v23.8h, v5.8h\n"
- "ldr d23, [x7, #0x58]\n"
- "smlal v13.4s, v30.4h, v6.4h\n"
- "usubl v23.8h, v23.8b, v2.8b\n"
- "smlal2 v14.4s, v4.8h, v3.8h\n"
- "ldr d4, [x21, x4]\n"
- "smlal2 v22.4s, v21.8h, v3.8h\n"
- "ldr x23, [x6, #0xa0]\n"
- "smlal2 v17.4s, v9.8h, v3.8h\n"
- "ldr d3, [x20, x4]\n"
- "smlal v7.4s, v30.4h, v5.4h\n"
- "ushll v4.8h, v4.8b, #0x0\n"
- "smlal v27.4s, v11.4h, v5.4h\n"
- "smlal v8.4s, v15.4h, v5.4h\n"
- "ushll v3.8h, v3.8b, #0x0\n"
- "ldr x22, [x6, #0xa8]\n"
- "smlal2 v24.4s, v30.8h, v6.8h\n"
- "smlal v13.4s, v28.4h, v19.4h\n"
- "ldr x21, [x6, #0xb0]\n"
- "ldr x20, [x6, #0xb8]\n"
- "smlal2 v14.4s, v30.8h, v5.8h\n"
- "ldr d30, [x7, #0x60]\n"
- "smlal2 v22.4s, v11.8h, v5.8h\n"
- "usubl v30.8h, v30.8b, v2.8b\n"
- "smlal2 v17.4s, v15.8h, v5.8h\n"
- "ldr d5, [x23, x4]\n"
- "smlal v7.4s, v28.4h, v6.4h\n"
- "ushll v5.8h, v5.8b, #0x0\n"
- "smlal v27.4s, v15.4h, v6.4h\n"
- "smlal v8.4s, v31.4h, v6.4h\n"
- "ldr x12, [x6, #0xc0]\n"
- "ldr x11, [x6, #0xc8]\n"
- "smlal2 v24.4s, v28.8h, v19.8h\n"
- "smlal v13.4s, v16.4h, v0.4h\n"
- "ldr x10, [x6, #0xd0]\n"
- "ldr x9, [x6, #0xd8]\n"
- "smlal2 v14.4s, v28.8h, v6.8h\n"
- "ldr d28, [x7, #0x68]\n"
- "smlal2 v22.4s, v15.8h, v6.8h\n"
- "usubl v28.8h, v28.8b, v2.8b\n"
- "smlal2 v17.4s, v31.8h, v6.8h\n"
- "ldr d6, [x22, x4]\n"
- "smlal v7.4s, v16.4h, v19.4h\n"
- "ushll v6.8h, v6.8b, #0x0\n"
- "smlal v27.4s, v31.4h, v19.4h\n"
- "smlal v8.4s, v29.4h, v19.4h\n"
- "ldr x28, [x6, #0xe0]\n"
- "ldr x27, [x6, #0xe8]\n"
- "smlal2 v24.4s, v16.8h, v0.8h\n"
- "smlal v13.4s, v21.4h, v10.4h\n"
- "ldr x26, [x6, #0xf0]\n"
- "ldr x25, [x6, #0xf8]\n"
- "smlal2 v14.4s, v16.8h, v19.8h\n"
- "ldr d16, [x7, #0x70]\n"
- "smlal2 v22.4s, v31.8h, v19.8h\n"
- "usubl v16.8h, v16.8b, v2.8b\n"
- "smlal2 v17.4s, v29.8h, v19.8h\n"
- "ldr d19, [x21, x4]\n"
- "smlal v7.4s, v21.4h, v0.4h\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "smlal v27.4s, v29.4h, v0.4h\n"
- "smlal v8.4s, v1.4h, v0.4h\n"
- "ldr x24, [x6, #0x100]\n"
- "ldr x23, [x6, #0x108]\n"
- "smlal2 v24.4s, v21.8h, v10.8h\n"
- "smlal v13.4s, v11.4h, v20.4h\n"
- "ldr x22, [x6, #0x110]\n"
- "ldr x21, [x6, #0x118]\n"
- "smlal2 v14.4s, v21.8h, v0.8h\n"
- "ldr d21, [x7, #0x78]\n"
- "smlal2 v22.4s, v29.8h, v0.8h\n"
- "usubl v21.8h, v21.8b, v2.8b\n"
- "smlal2 v17.4s, v1.8h, v0.8h\n"
- "ldr d0, [x20, x4]\n"
- "smlal v7.4s, v9.4h, v10.4h\n"
+ "smlal v7.4s, v8.4h, v17.4h\n"
+ "smlal2 v19.4s, v8.8h, v17.8h\n"
+ "ldr d17, [x24, x2]\n"
+ "ldr x12, [x4, #0xc0]\n"
+ "smlal v12.4s, v25.4h, v29.4h\n"
+ "smlal v5.4s, v26.4h, v29.4h\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "ldr x11, [x4, #0xc8]\n"
+ "smlal2 v13.4s, v25.8h, v29.8h\n"
+ "smlal2 v23.4s, v26.8h, v29.8h\n"
"ushll v0.8h, v0.8b, #0x0\n"
- "smlal v27.4s, v1.4h, v10.4h\n"
- "smlal v8.4s, v18.4h, v10.4h\n"
+ "ldr x10, [x4, #0xd0]\n"
+ "smlal v30.4s, v25.4h, v2.4h\n"
+ "smlal2 v24.4s, v25.8h, v2.8h\n"
+ "ldr d25, [x23, x2]\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v7.4s, v9.4h, v29.4h\n"
+ "smlal2 v19.4s, v9.8h, v29.8h\n"
+ "ldr d29, [x22, x2]\n"
+ "ldr x9, [x4, #0xd8]\n"
+ "smlal v12.4s, v11.4h, v2.4h\n"
+ "smlal v5.4s, v9.4h, v2.4h\n"
+ "ldr x28, [x4, #0xe0]\n"
+ "ldr x27, [x4, #0xe8]\n"
+ "smlal2 v13.4s, v11.8h, v2.8h\n"
+ "smlal2 v23.4s, v9.8h, v2.8h\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x26, [x4, #0xf0]\n"
+ "smlal v30.4s, v11.4h, v18.4h\n"
+ "smlal2 v24.4s, v11.8h, v18.8h\n"
+ "ldr d11, [x5, #0x68]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "smlal v7.4s, v21.4h, v2.4h\n"
+ "smlal2 v19.4s, v21.8h, v2.8h\n"
+ "ldr d2, [x21, x2]\n"
+ "ldr x25, [x4, #0xf8]\n"
+ "smlal v12.4s, v20.4h, v18.4h\n"
+ "smlal v5.4s, v21.4h, v18.4h\n"
+ "ldr x24, [x4, #0x100]\n"
+ "ldr x23, [x4, #0x108]\n"
+ "smlal2 v13.4s, v20.8h, v18.8h\n"
+ "smlal2 v23.4s, v21.8h, v18.8h\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "ldr x22, [x4, #0x110]\n"
+ "smlal v30.4s, v20.4h, v27.4h\n"
+ "smlal2 v24.4s, v20.8h, v27.8h\n"
+ "ldr d20, [x5, #0x70]\n"
+ "ushll v2.8h, v2.8b, #0x0\n"
+ "smlal v7.4s, v1.4h, v18.4h\n"
+ "smlal2 v19.4s, v1.8h, v18.8h\n"
+ "ldr d18, [x20, x2]\n"
+ "ldr x21, [x4, #0x118]\n"
+ "smlal v12.4s, v10.4h, v27.4h\n"
+ "smlal v5.4s, v1.4h, v27.4h\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "subs x3, x3, #0x1\n"
- "smlal2 v24.4s, v11.8h, v20.8h\n"
- "ldr d11, [x7, #0x80]\n"
- "smlal v13.4s, v15.4h, v23.4h\n"
- "usubl v11.8h, v11.8b, v2.8b\n"
- "smlal2 v14.4s, v9.8h, v10.8h\n"
- "ldr d9, [x12, x4]\n"
- "smlal2 v22.4s, v1.8h, v10.8h\n"
- "ushll v9.8h, v9.8b, #0x0\n"
- "smlal2 v17.4s, v18.8h, v10.8h\n"
- "ldr d10, [x11, x4]\n"
- "smlal v7.4s, v15.4h, v20.4h\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "smlal v27.4s, v4.4h, v20.4h\n"
- "smlal v8.4s, v3.4h, v20.4h\n"
- "smlal2 v24.4s, v15.8h, v23.8h\n"
- "smlal v13.4s, v31.4h, v30.4h\n"
- "smlal2 v14.4s, v15.8h, v20.8h\n"
- "ldr d15, [x7, #0x88]\n"
- "smlal2 v22.4s, v4.8h, v20.8h\n"
- "usubl v15.8h, v15.8b, v2.8b\n"
- "smlal2 v17.4s, v3.8h, v20.8h\n"
- "ldr d20, [x10, x4]\n"
- "smlal v7.4s, v31.4h, v23.4h\n"
- "ushll v20.8h, v20.8b, #0x0\n"
- "smlal v27.4s, v3.4h, v23.4h\n"
- "smlal v8.4s, v5.4h, v23.4h\n"
- "smlal2 v24.4s, v31.8h, v30.8h\n"
- "smlal v13.4s, v29.4h, v28.4h\n"
- "smlal2 v14.4s, v31.8h, v23.8h\n"
- "ldr d31, [x7, #0x90]\n"
- "smlal2 v22.4s, v3.8h, v23.8h\n"
- "usubl v31.8h, v31.8b, v2.8b\n"
- "smlal2 v17.4s, v5.8h, v23.8h\n"
- "ldr d23, [x9, x4]\n"
- "smlal v7.4s, v29.4h, v30.4h\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "smlal v27.4s, v5.4h, v30.4h\n"
- "smlal v8.4s, v6.4h, v30.4h\n"
- "smlal2 v24.4s, v29.8h, v28.8h\n"
- "smlal v13.4s, v1.4h, v16.4h\n"
- "smlal2 v14.4s, v29.8h, v30.8h\n"
- "ldr d29, [x7, #0x98]\n"
- "smlal2 v22.4s, v5.8h, v30.8h\n"
- "usubl v29.8h, v29.8b, v2.8b\n"
- "smlal2 v17.4s, v6.8h, v30.8h\n"
- "ldr d30, [x28, x4]\n"
- "smlal v7.4s, v1.4h, v28.4h\n"
- "ushll v30.8h, v30.8b, #0x0\n"
- "smlal v27.4s, v6.4h, v28.4h\n"
- "smlal v8.4s, v19.4h, v28.4h\n"
- "smlal2 v24.4s, v1.8h, v16.8h\n"
- "smlal v13.4s, v4.4h, v21.4h\n"
- "smlal2 v14.4s, v1.8h, v28.8h\n"
- "ldr d1, [x7, #0xa0]\n"
- "smlal2 v22.4s, v6.8h, v28.8h\n"
- "usubl v1.8h, v1.8b, v2.8b\n"
- "smlal2 v17.4s, v19.8h, v28.8h\n"
- "ldr d28, [x27, x4]\n"
- "smlal v7.4s, v18.4h, v16.4h\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal v27.4s, v19.4h, v16.4h\n"
- "smlal v8.4s, v0.4h, v16.4h\n"
- "smlal2 v24.4s, v4.8h, v21.8h\n"
- "ldr d4, [x7, #0xa8]\n"
- "smlal v13.4s, v3.4h, v11.4h\n"
- "usubl v4.8h, v4.8b, v2.8b\n"
- "smlal2 v14.4s, v18.8h, v16.8h\n"
- "ldr d18, [x26, x4]\n"
- "smlal2 v22.4s, v19.8h, v16.8h\n"
+ "subs x8, x8, #0x1\n"
+ "smlal2 v13.4s, v10.8h, v27.8h\n"
+ "smlal2 v23.4s, v1.8h, v27.8h\n"
+ "usubl v20.8h, v20.8b, v14.8b\n"
+ "smlal v30.4s, v10.4h, v16.4h\n"
+ "smlal2 v24.4s, v10.8h, v16.8h\n"
+ "ldr d10, [x5, #0x78]\n"
"ushll v18.8h, v18.8b, #0x0\n"
- "smlal2 v17.4s, v0.8h, v16.8h\n"
- "ldr d16, [x25, x4]\n"
- "smlal v7.4s, v3.4h, v21.4h\n"
+ "smlal v7.4s, v22.4h, v27.4h\n"
+ "smlal2 v19.4s, v22.8h, v27.8h\n"
+ "ldr d27, [x13, x2]\n"
+ "smlal v12.4s, v8.4h, v16.4h\n"
+ "smlal v5.4s, v22.4h, v16.4h\n"
+ "smlal2 v13.4s, v8.8h, v16.8h\n"
+ "ldr d8, [x5, #0x80]\n"
+ "smlal2 v23.4s, v22.8h, v16.8h\n"
+ "usubl v10.8h, v10.8b, v14.8b\n"
+ "smlal v30.4s, v26.4h, v3.4h\n"
+ "smlal2 v24.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x12, x2]\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
+ "smlal v7.4s, v0.4h, v16.4h\n"
+ "smlal2 v19.4s, v0.8h, v16.8h\n"
+ "ldr d16, [x11, x2]\n"
+ "smlal v12.4s, v9.4h, v3.4h\n"
+ "smlal v5.4s, v17.4h, v3.4h\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "smlal2 v13.4s, v9.8h, v3.8h\n"
+ "smlal2 v23.4s, v17.8h, v3.8h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "smlal v30.4s, v9.4h, v6.4h\n"
+ "smlal2 v24.4s, v9.8h, v6.8h\n"
+ "ldr d9, [x5, #0x88]\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "smlal v27.4s, v9.4h, v21.4h\n"
- "smlal v8.4s, v10.4h, v21.4h\n"
- "smlal2 v24.4s, v3.8h, v11.8h\n"
- "smlal v13.4s, v5.4h, v15.4h\n"
- "smlal2 v14.4s, v3.8h, v21.8h\n"
- "ldr d3, [x7, #0xb0]\n"
- "smlal2 v22.4s, v9.8h, v21.8h\n"
- "usubl v3.8h, v3.8b, v2.8b\n"
- "smlal2 v17.4s, v10.8h, v21.8h\n"
- "ldr d21, [x24, x4]\n"
- "smlal v7.4s, v5.4h, v11.4h\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "smlal v27.4s, v10.4h, v11.4h\n"
- "smlal v8.4s, v20.4h, v11.4h\n"
- "smlal2 v24.4s, v5.8h, v15.8h\n"
- "smlal v13.4s, v6.4h, v31.4h\n"
- "smlal2 v14.4s, v5.8h, v11.8h\n"
- "ldr d5, [x7, #0xb8]\n"
- "smlal2 v22.4s, v10.8h, v11.8h\n"
- "usubl v5.8h, v5.8b, v2.8b\n"
- "smlal2 v17.4s, v20.8h, v11.8h\n"
- "ldr d11, [x23, x4]\n"
- "smlal v7.4s, v6.4h, v15.4h\n"
+ "smlal v7.4s, v25.4h, v3.4h\n"
+ "smlal2 v19.4s, v25.8h, v3.8h\n"
+ "ldr d3, [x10, x2]\n"
+ "smlal v12.4s, v21.4h, v6.4h\n"
+ "smlal v5.4s, v25.4h, v6.4h\n"
+ "smlal2 v13.4s, v21.8h, v6.8h\n"
+ "smlal2 v23.4s, v25.8h, v6.8h\n"
+ "usubl v9.8h, v9.8b, v14.8b\n"
+ "smlal v30.4s, v21.4h, v4.4h\n"
+ "smlal2 v24.4s, v21.8h, v4.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "ushll v3.8h, v3.8b, #0x0\n"
+ "smlal v7.4s, v29.4h, v6.4h\n"
+ "smlal2 v19.4s, v29.8h, v6.8h\n"
+ "ldr d6, [x9, x2]\n"
+ "smlal v12.4s, v1.4h, v4.4h\n"
+ "smlal v5.4s, v29.4h, v4.4h\n"
+ "smlal2 v13.4s, v1.8h, v4.8h\n"
+ "smlal2 v23.4s, v29.8h, v4.8h\n"
+ "usubl v21.8h, v21.8b, v14.8b\n"
+ "smlal v30.4s, v1.4h, v11.4h\n"
+ "smlal2 v24.4s, v1.8h, v11.8h\n"
+ "ldr d1, [x5, #0x98]\n"
+ "ushll v6.8h, v6.8b, #0x0\n"
+ "smlal v7.4s, v2.4h, v4.4h\n"
+ "smlal2 v19.4s, v2.8h, v4.8h\n"
+ "ldr d4, [x28, x2]\n"
+ "smlal v12.4s, v22.4h, v11.4h\n"
+ "smlal v5.4s, v2.4h, v11.4h\n"
+ "smlal2 v13.4s, v22.8h, v11.8h\n"
+ "smlal2 v23.4s, v2.8h, v11.8h\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v30.4s, v22.4h, v20.4h\n"
+ "smlal2 v24.4s, v22.8h, v20.8h\n"
+ "ldr d22, [x5, #0xa0]\n"
+ "ushll v4.8h, v4.8b, #0x0\n"
+ "smlal v7.4s, v18.4h, v11.4h\n"
+ "smlal2 v19.4s, v18.8h, v11.8h\n"
+ "ldr d11, [x27, x2]\n"
+ "smlal v12.4s, v0.4h, v20.4h\n"
+ "smlal v5.4s, v18.4h, v20.4h\n"
+ "smlal2 v13.4s, v0.8h, v20.8h\n"
+ "ldr d0, [x5, #0xa8]\n"
+ "smlal2 v23.4s, v18.8h, v20.8h\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "smlal v30.4s, v17.4h, v10.4h\n"
+ "smlal2 v24.4s, v17.8h, v10.8h\n"
+ "ldr d17, [x26, x2]\n"
"ushll v11.8h, v11.8b, #0x0\n"
- "smlal v27.4s, v20.4h, v15.4h\n"
- "smlal v8.4s, v23.4h, v15.4h\n"
- "smlal2 v24.4s, v6.8h, v31.8h\n"
- "smlal v13.4s, v19.4h, v29.4h\n"
- "smlal2 v14.4s, v6.8h, v15.8h\n"
- "ldr d6, [x7, #0xc0]\n"
- "smlal2 v22.4s, v20.8h, v15.8h\n"
- "usubl v6.8h, v6.8b, v2.8b\n"
- "smlal2 v17.4s, v23.8h, v15.8h\n"
- "ldr d15, [x22, x4]\n"
- "smlal v7.4s, v19.4h, v31.4h\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal v27.4s, v23.4h, v31.4h\n"
- "smlal v8.4s, v30.4h, v31.4h\n"
- "add x7, x7, #0xc8\n"
- "smlal2 v24.4s, v19.8h, v29.8h\n"
- "smlal v13.4s, v9.4h, v1.4h\n"
- "smlal2 v14.4s, v19.8h, v31.8h\n"
- "ldr d19, [x21, x4]\n"
- "smlal2 v22.4s, v23.8h, v31.8h\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "smlal2 v17.4s, v30.8h, v31.8h\n"
- "ldr q31, [x8, #0x0]\n"
- "smlal v7.4s, v0.4h, v29.4h\n"
- "add x4, x4, #0x8\n"
- "smlal v27.4s, v30.4h, v29.4h\n"
- "smlal v8.4s, v28.4h, v29.4h\n"
- "smlal2 v24.4s, v9.8h, v1.8h\n"
- "ldr q9, [x17, #0x0]\n"
- "smlal v13.4s, v10.4h, v4.4h\n"
- "smlal2 v14.4s, v0.8h, v29.8h\n"
- "ldr q0, [x8, #0x10]\n"
- "smlal2 v22.4s, v30.8h, v29.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v17.4s, v28.8h, v29.8h\n"
- "ldr q29, [x17, #0x10]\n"
- "smlal v7.4s, v10.4h, v1.4h\n"
- "add x17, x17, #0x20\n"
- "smlal v27.4s, v18.4h, v1.4h\n"
- "smlal v8.4s, v16.4h, v1.4h\n"
- "smlal2 v24.4s, v10.8h, v4.8h\n"
- "smlal v13.4s, v20.4h, v3.4h\n"
- "smlal2 v14.4s, v10.8h, v1.8h\n"
- "smlal2 v22.4s, v18.8h, v1.8h\n"
- "smlal2 v17.4s, v16.8h, v1.8h\n"
- "smlal v7.4s, v20.4h, v4.4h\n"
- "smlal v27.4s, v16.4h, v4.4h\n"
- "smlal v8.4s, v21.4h, v4.4h\n"
- "smlal2 v24.4s, v20.8h, v3.8h\n"
- "smlal v13.4s, v23.4h, v5.4h\n"
- "smlal2 v14.4s, v20.8h, v4.8h\n"
- "smlal2 v22.4s, v16.8h, v4.8h\n"
- "smlal2 v17.4s, v21.8h, v4.8h\n"
- "smlal v7.4s, v23.4h, v3.4h\n"
- "smlal v27.4s, v21.4h, v3.4h\n"
- "smlal v8.4s, v11.4h, v3.4h\n"
- "smlal2 v24.4s, v23.8h, v5.8h\n"
- "smlal v13.4s, v30.4h, v6.4h\n"
- "sqrdmulh v13.4s, v13.4s, v31.4s\n"
- "smlal2 v14.4s, v23.8h, v3.8h\n"
- "smlal2 v22.4s, v21.8h, v3.8h\n"
- "and v23.16b, v13.16b, v9.16b\n"
- "smlal2 v17.4s, v11.8h, v3.8h\n"
- "smlal v7.4s, v30.4h, v5.4h\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "smlal v27.4s, v11.4h, v5.4h\n"
- "smlal v8.4s, v15.4h, v5.4h\n"
- "sqadd v13.4s, v13.4s, v23.4s\n"
- "smlal2 v24.4s, v30.8h, v6.8h\n"
- "smlal2 v14.4s, v30.8h, v5.8h\n"
- "sqrdmulh v24.4s, v24.4s, v0.4s\n"
- "smlal2 v22.4s, v11.8h, v5.8h\n"
- "smlal2 v17.4s, v15.8h, v5.8h\n"
- "and v10.16b, v24.16b, v29.16b\n"
- "smlal v7.4s, v28.4h, v6.4h\n"
- "smlal v27.4s, v15.4h, v6.4h\n"
- "sqrdmulh v7.4s, v7.4s, v31.4s\n"
- "smlal v8.4s, v19.4h, v6.4h\n"
- "smlal2 v14.4s, v28.8h, v6.8h\n"
- "sqrdmulh v27.4s, v27.4s, v31.4s\n"
- "smlal2 v22.4s, v15.8h, v6.8h\n"
- "smlal2 v17.4s, v19.8h, v6.8h\n"
- "sqrdmulh v8.4s, v8.4s, v31.4s\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "and v28.16b, v7.16b, v9.16b\n"
- "sqrdmulh v14.4s, v14.4s, v0.4s\n"
- "and v20.16b, v27.16b, v9.16b\n"
- "sqrdmulh v22.4s, v22.4s, v0.4s\n"
- "and v23.16b, v8.16b, v9.16b\n"
- "sqrdmulh v17.4s, v17.4s, v0.4s\n"
- "sqadd v24.4s, v24.4s, v10.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "and v18.16b, v14.16b, v29.16b\n"
+ "smlal v7.4s, v27.4h, v20.4h\n"
+ "smlal2 v19.4s, v27.8h, v20.8h\n"
+ "ldr d20, [x25, x2]\n"
+ "smlal v12.4s, v25.4h, v10.4h\n"
+ "smlal v5.4s, v26.4h, v10.4h\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal2 v13.4s, v25.8h, v10.8h\n"
+ "smlal2 v23.4s, v26.8h, v10.8h\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v30.4s, v25.4h, v8.4h\n"
+ "smlal2 v24.4s, v25.8h, v8.8h\n"
+ "ldr d25, [x5, #0xb0]\n"
+ "ushll v20.8h, v20.8b, #0x0\n"
+ "smlal v7.4s, v16.4h, v10.4h\n"
+ "smlal2 v19.4s, v16.8h, v10.8h\n"
+ "ldr d10, [x24, x2]\n"
+ "smlal v12.4s, v29.4h, v8.4h\n"
+ "smlal v5.4s, v16.4h, v8.4h\n"
+ "smlal2 v13.4s, v29.8h, v8.8h\n"
+ "smlal2 v23.4s, v16.8h, v8.8h\n"
+ "usubl v25.8h, v25.8b, v14.8b\n"
+ "smlal v30.4s, v29.4h, v9.4h\n"
+ "smlal2 v24.4s, v29.8h, v9.8h\n"
+ "ldr d29, [x5, #0xb8]\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
+ "smlal v7.4s, v3.4h, v8.4h\n"
+ "smlal2 v19.4s, v3.8h, v8.8h\n"
+ "ldr d8, [x23, x2]\n"
+ "smlal v12.4s, v2.4h, v9.4h\n"
+ "smlal v5.4s, v3.4h, v9.4h\n"
+ "smlal2 v13.4s, v2.8h, v9.8h\n"
+ "smlal2 v23.4s, v3.8h, v9.8h\n"
+ "usubl v29.8h, v29.8b, v14.8b\n"
+ "smlal v30.4s, v2.4h, v21.4h\n"
+ "smlal2 v24.4s, v2.8h, v21.8h\n"
+ "ldr d2, [x5, #0xc0]\n"
+ "ushll v8.8h, v8.8b, #0x0\n"
+ "smlal v7.4s, v6.4h, v9.4h\n"
+ "smlal2 v19.4s, v6.8h, v9.8h\n"
+ "ldr d9, [x22, x2]\n"
+ "add x5, x5, #0xc8\n"
+ "smlal v12.4s, v18.4h, v21.4h\n"
+ "smlal v5.4s, v6.4h, v21.4h\n"
+ "smlal2 v13.4s, v18.8h, v21.8h\n"
+ "smlal2 v23.4s, v6.8h, v21.8h\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v30.4s, v18.4h, v1.4h\n"
+ "smlal2 v24.4s, v18.8h, v1.8h\n"
+ "ldr d18, [x21, x2]\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "smlal v7.4s, v4.4h, v21.4h\n"
+ "smlal2 v19.4s, v4.8h, v21.8h\n"
+ "ldr q21, [x6, #0x0]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v12.4s, v27.4h, v1.4h\n"
+ "smlal v5.4s, v4.4h, v1.4h\n"
+ "smlal2 v13.4s, v27.8h, v1.8h\n"
+ "ldr q27, [x7, #0x0]\n"
+ "smlal2 v23.4s, v4.8h, v1.8h\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
+ "smlal v30.4s, v26.4h, v22.4h\n"
+ "smlal2 v24.4s, v26.8h, v22.8h\n"
+ "ldr q26, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v7.4s, v11.4h, v1.4h\n"
+ "smlal2 v19.4s, v11.8h, v1.8h\n"
+ "ldr q1, [x7, #0x10]\n"
+ "add x7, x7, #0x20\n"
+ "smlal v12.4s, v16.4h, v22.4h\n"
+ "smlal v5.4s, v17.4h, v22.4h\n"
+ "smlal2 v13.4s, v16.8h, v22.8h\n"
+ "smlal2 v23.4s, v17.8h, v22.8h\n"
+ "smlal v30.4s, v16.4h, v0.4h\n"
+ "smlal2 v24.4s, v16.8h, v0.8h\n"
+ "smlal v7.4s, v20.4h, v22.4h\n"
+ "smlal2 v19.4s, v20.8h, v22.8h\n"
+ "smlal v12.4s, v3.4h, v0.4h\n"
+ "smlal v5.4s, v20.4h, v0.4h\n"
+ "smlal2 v13.4s, v3.8h, v0.8h\n"
+ "smlal2 v23.4s, v20.8h, v0.8h\n"
+ "smlal v30.4s, v3.4h, v25.4h\n"
+ "smlal2 v24.4s, v3.8h, v25.8h\n"
+ "smlal v7.4s, v10.4h, v0.4h\n"
+ "smlal2 v19.4s, v10.8h, v0.8h\n"
+ "smlal v12.4s, v6.4h, v25.4h\n"
+ "smlal v5.4s, v10.4h, v25.4h\n"
+ "smlal2 v13.4s, v6.8h, v25.8h\n"
+ "smlal2 v23.4s, v10.8h, v25.8h\n"
+ "smlal v30.4s, v6.4h, v29.4h\n"
+ "smlal2 v24.4s, v6.8h, v29.8h\n"
+ "smlal v7.4s, v8.4h, v25.4h\n"
+ "smlal2 v19.4s, v8.8h, v25.8h\n"
+ "smlal v12.4s, v4.4h, v29.4h\n"
+ "smlal v5.4s, v8.4h, v29.4h\n"
+ "smlal2 v13.4s, v4.8h, v29.8h\n"
+ "smlal2 v23.4s, v8.8h, v29.8h\n"
+ "smlal v30.4s, v4.4h, v2.4h\n"
+ "smlal2 v24.4s, v4.8h, v2.8h\n"
+ "smlal v7.4s, v9.4h, v29.4h\n"
+ "smlal2 v19.4s, v9.8h, v29.8h\n"
+ "smlal v12.4s, v11.4h, v2.4h\n"
+ "smlal v5.4s, v9.4h, v2.4h\n"
+ "smlal2 v13.4s, v11.8h, v2.8h\n"
+ "smlal2 v23.4s, v9.8h, v2.8h\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "smlal v7.4s, v18.4h, v2.4h\n"
+ "smlal2 v19.4s, v18.8h, v2.8h\n"
+ "and v17.16b, v30.16b, v27.16b\n"
+ "sqrdmulh v12.4s, v12.4s, v21.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v21.4s\n"
+ "and v16.16b, v24.16b, v1.16b\n"
+ "sqrdmulh v13.4s, v13.4s, v26.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v26.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v7.4s, v7.4s, v21.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v20.16b, v12.16b, v27.16b\n"
+ "and v0.16b, v5.16b, v27.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v26.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "and v8.16b, v7.16b, v27.16b\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
"sshr v20.4s, v20.4s, #0x1f\n"
- "and v30.16b, v22.16b, v29.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v5.16b, v17.16b, v29.16b\n"
- "sqadd v7.4s, v7.4s, v28.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v27.4s, v27.4s, v20.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v23.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v9.4s\n"
- "srshl v7.4s, v7.4s, v9.4s\n"
- "sqadd v14.4s, v14.4s, v18.4s\n"
- "srshl v27.4s, v27.4s, v9.4s\n"
- "sqadd v22.4s, v22.4s, v30.4s\n"
- "srshl v8.4s, v8.4s, v9.4s\n"
- "sqadd v17.4s, v17.4s, v5.4s\n"
- "srshl v24.4s, v24.4s, v29.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v14.4s, v14.4s, v29.4s\n"
+ "and v22.16b, v13.16b, v1.16b\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
+ "and v17.16b, v23.16b, v1.16b\n"
+ "sshr v8.4s, v8.4s, #0x1f\n"
+ "and v16.16b, v19.16b, v1.16b\n"
+ "sqadd v12.4s, v12.4s, v20.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "sqadd v5.4s, v5.4s, v0.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v7.4s, v7.4s, v8.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v27.4s\n"
+ "srshl v12.4s, v12.4s, v27.4s\n"
+ "sqadd v13.4s, v13.4s, v22.4s\n"
+ "srshl v5.4s, v5.4s, v27.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "srshl v7.4s, v7.4s, v27.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v24.4s, v24.4s, v1.4s\n"
+ "sqxtn v30.4h, v30.4s\n"
+ "srshl v13.4s, v13.4s, v1.4s\n"
+ "sqxtn v12.4h, v12.4s\n"
+ "srshl v23.4s, v23.4s, v1.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "srshl v19.4s, v19.4s, v1.4s\n"
"sqxtn v7.4h, v7.4s\n"
- "srshl v22.4s, v22.4s, v29.4s\n"
- "sqxtn v27.4h, v27.4s\n"
- "srshl v17.4s, v17.4s, v29.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "sqxtn2 v13.8h, v24.4s\n"
- "sqxtn2 v7.8h, v14.4s\n"
- "sqxtn2 v27.8h, v22.4s\n"
- "sqxtn2 v8.8h, v17.4s\n"
- "sqadd v13.8h, v13.8h, v25.8h\n"
- "sqadd v7.8h, v7.8h, v25.8h\n"
- "sqadd v27.8h, v27.8h, v25.8h\n"
- "sqadd v8.8h, v8.8h, v25.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v7.8h, v7.8h, v12.8h\n"
- "smax v27.8h, v27.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v26.8h\n"
- "smin v7.8h, v7.8h, v26.8h\n"
- "smin v27.8h, v27.8h, v26.8h\n"
- "smin v8.8h, v8.8h, v26.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x16, x5]\n"
+ "sqxtn2 v30.8h, v24.4s\n"
+ "sqxtn2 v12.8h, v13.4s\n"
+ "sqxtn2 v5.8h, v23.4s\n"
+ "sqxtn2 v7.8h, v19.4s\n"
+ "sqadd v30.8h, v30.8h, v15.8h\n"
+ "sqadd v12.8h, v12.8h, v15.8h\n"
+ "sqadd v5.8h, v5.8h, v15.8h\n"
+ "sqadd v7.8h, v7.8h, v15.8h\n"
+ "smax v30.8h, v30.8h, v31.8h\n"
+ "smax v12.8h, v12.8h, v31.8h\n"
+ "smax v5.8h, v5.8h, v31.8h\n"
+ "smax v7.8h, v7.8h, v31.8h\n"
+ "smin v30.8h, v30.8h, v28.8h\n"
+ "smin v12.8h, v12.8h, v28.8h\n"
+ "smin v5.8h, v5.8h, v28.8h\n"
+ "smin v7.8h, v7.8h, v28.8h\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
"uzp1 v7.16b, v7.16b, v7.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "str d7, [x15, x5]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "str d27, [x14, x5]\n"
- "str d8, [x13, x5]\n"
- "ldr q13, [x20, #0x0]\n"
+ "str d30, [x17, x3]\n"
+ "str d12, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "str d7, [x14, x3]\n"
+ "add x3, x3, #0x8\n"
+ "ldr q30, [x20, #0x0]\n"
"ldr q24, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d21, [x7, #0x0]\n"
- "ldr d15, [x7, #0x8]\n"
- "add x5, x5, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d29, [x7, #0x10]\n"
- "ldr d18, [x7, #0x18]\n"
- "mov v7.16b, v13.16b\n"
- "mov v14.16b, v24.16b\n"
- "ldr d3, [x7, #0x20]\n"
- "ldp x9, x28, [x6, #0x0]\n"
- "mov v27.16b, v13.16b\n"
- "mov v22.16b, v24.16b\n"
- "ldp x27, x26, [x6, #0x10]\n"
- "ldp x25, x24, [x6, #0x20]\n"
- "mov v8.16b, v13.16b\n"
- "mov v17.16b, v24.16b\n"
- "ldp x23, x22, [x6, #0x30]\n"
- "ldp x21, x20, [x6, #0x40]\n"
- "usubl v21.8h, v21.8b, v2.8b\n"
- "usubl v15.8h, v15.8b, v2.8b\n"
- "ldr d10, [x9, x4]\n"
- "ldr d16, [x28, x4]\n"
- "usubl v29.8h, v29.8b, v2.8b\n"
- "usubl v18.8h, v18.8b, v2.8b\n"
- "ldr d23, [x27, x4]\n"
- "ldr d30, [x26, x4]\n"
- "usubl v3.8h, v3.8b, v2.8b\n"
- "ushll v10.8h, v10.8b, #0x0\n"
- "ldr d4, [x25, x4]\n"
- "ldr d28, [x24, x4]\n"
- "ushll v16.8h, v16.8b, #0x0\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "ldr d31, [x23, x4]\n"
- "ldr d1, [x22, x4]\n"
- "ushll v30.8h, v30.8b, #0x0\n"
+ "ldr d6, [x5, #0x0]\n"
+ "ldr d20, [x5, #0x8]\n"
+ "ldr d9, [x5, #0x10]\n"
+ "ldr d1, [x5, #0x18]\n"
+ "ldr d17, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v12.16b, v30.16b\n"
+ "mov v13.16b, v24.16b\n"
+ "mov v5.16b, v30.16b\n"
+ "mov v23.16b, v24.16b\n"
+ "mov v7.16b, v30.16b\n"
+ "mov v19.16b, v24.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "usubl v6.8h, v6.8b, v14.8b\n"
+ "usubl v20.8h, v20.8b, v14.8b\n"
+ "usubl v9.8h, v9.8b, v14.8b\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d18, [x9, x2]\n"
+ "ldr d4, [x28, x2]\n"
+ "ldr d0, [x27, x2]\n"
+ "ldr d25, [x26, x2]\n"
+ "ldr d10, [x25, x2]\n"
+ "ldr d11, [x24, x2]\n"
+ "ldr d22, [x23, x2]\n"
+ "ldr d21, [x22, x2]\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
"ushll v4.8h, v4.8b, #0x0\n"
- "ldr d9, [x21, x4]\n"
- "ldr d11, [x20, x4]\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "ushll v31.8h, v31.8b, #0x0\n"
- "ushll v1.8h, v1.8b, #0x0\n"
- "ushll v9.8h, v9.8b, #0x0\n"
+ "ldr d8, [x21, x2]\n"
+ "ldr d26, [x20, x2]\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
"ushll v11.8h, v11.8b, #0x0\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "ushll v21.8h, v21.8b, #0x0\n"
+ "ushll v8.8h, v8.8b, #0x0\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
"bgt 1b\n"
"2:" // Tail
- "ldr d0, [x7, #0x28]\n"
- "ldr d20, [x7, #0x30]\n"
- "smlal v13.4s, v10.4h, v21.4h\n"
- "smlal2 v24.4s, v10.8h, v21.8h\n"
- "ldr d6, [x7, #0x38]\n"
- "ldr d19, [x7, #0x40]\n"
- "smlal v13.4s, v16.4h, v15.4h\n"
- "smlal v7.4s, v16.4h, v21.4h\n"
- "ldr d10, [x7, #0x48]\n"
- "ldr d5, [x7, #0x50]\n"
- "smlal v27.4s, v23.4h, v21.4h\n"
- "smlal v8.4s, v30.4h, v21.4h\n"
- "ldr x21, [x6, #0x50]\n"
- "smlal2 v24.4s, v16.8h, v15.8h\n"
- "smlal v13.4s, v4.4h, v29.4h\n"
- "ldr x20, [x6, #0x58]\n"
- "smlal2 v14.4s, v16.8h, v21.8h\n"
- "ldr d16, [x21, x4]\n"
- "smlal2 v22.4s, v23.8h, v21.8h\n"
- "ushll v16.8h, v16.8b, #0x0\n"
- "smlal2 v17.4s, v30.8h, v21.8h\n"
- "ldr d21, [x20, x4]\n"
- "smlal v7.4s, v4.4h, v15.4h\n"
- "ldr x22, [x6, #0x60]\n"
- "smlal v27.4s, v30.4h, v15.4h\n"
- "smlal v8.4s, v28.4h, v15.4h\n"
+ "ldr d3, [x5, #0x28]\n"
+ "ldr d27, [x5, #0x30]\n"
+ "smlal v30.4s, v18.4h, v6.4h\n"
+ "smlal2 v24.4s, v18.8h, v6.8h\n"
+ "ldr d16, [x5, #0x38]\n"
+ "ldr d18, [x5, #0x40]\n"
+ "smlal v12.4s, v4.4h, v6.4h\n"
+ "smlal v5.4s, v0.4h, v6.4h\n"
+ "ldr d2, [x5, #0x48]\n"
+ "ldr d29, [x5, #0x50]\n"
+ "smlal v7.4s, v25.4h, v6.4h\n"
+ "smlal2 v13.4s, v4.8h, v6.8h\n"
+ "ldr x23, [x4, #0x50]\n"
+ "smlal2 v23.4s, v0.8h, v6.8h\n"
+ "smlal2 v19.4s, v25.8h, v6.8h\n"
+ "ldr d6, [x5, #0x58]\n"
+ "smlal v30.4s, v4.4h, v20.4h\n"
+ "smlal2 v24.4s, v4.8h, v20.8h\n"
+ "ldr d4, [x5, #0x60]\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v12.4s, v10.4h, v20.4h\n"
+ "smlal v5.4s, v25.4h, v20.4h\n"
+ "ldr x22, [x4, #0x60]\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v7.4s, v11.4h, v20.4h\n"
+ "smlal2 v13.4s, v10.8h, v20.8h\n"
+ "ldr x21, [x4, #0x68]\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "smlal2 v23.4s, v25.8h, v20.8h\n"
+ "smlal2 v19.4s, v11.8h, v20.8h\n"
+ "ldr d20, [x23, x2]\n"
+ "ldr x27, [x4, #0x70]\n"
+ "smlal v30.4s, v10.4h, v9.4h\n"
+ "smlal2 v24.4s, v10.8h, v9.8h\n"
+ "ldr d10, [x20, x2]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v12.4s, v22.4h, v9.4h\n"
+ "smlal v5.4s, v11.4h, v9.4h\n"
+ "ldr x20, [x4, #0x78]\n"
+ "usubl v18.8h, v18.8b, v14.8b\n"
+ "ushll v20.8h, v20.8b, #0x0\n"
+ "smlal2 v13.4s, v22.8h, v9.8h\n"
+ "ldr x26, [x4, #0x80]\n"
+ "ldr x25, [x4, #0x88]\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
+ "smlal2 v23.4s, v11.8h, v9.8h\n"
+ "ldr x24, [x4, #0x90]\n"
+ "ldr x23, [x4, #0x98]\n"
+ "smlal v30.4s, v22.4h, v1.4h\n"
+ "smlal2 v24.4s, v22.8h, v1.8h\n"
+ "ldr d22, [x22, x2]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v7.4s, v20.4h, v9.4h\n"
+ "smlal2 v19.4s, v20.8h, v9.8h\n"
+ "ldr d9, [x21, x2]\n"
+ "usubl v29.8h, v29.8b, v14.8b\n"
+ "smlal v12.4s, v21.4h, v1.4h\n"
+ "smlal v5.4s, v20.4h, v1.4h\n"
+ "usubl v6.8h, v6.8b, v14.8b\n"
+ "ldr x22, [x4, #0xa0]\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "smlal2 v13.4s, v21.8h, v1.8h\n"
+ "smlal2 v23.4s, v20.8h, v1.8h\n"
+ "ldr x21, [x4, #0xa8]\n"
+ "smlal v30.4s, v21.4h, v17.4h\n"
+ "smlal2 v24.4s, v21.8h, v17.8h\n"
+ "ldr d21, [x27, x2]\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "smlal v7.4s, v10.4h, v1.4h\n"
+ "smlal2 v19.4s, v10.8h, v1.8h\n"
+ "ldr d1, [x20, x2]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v12.4s, v22.4h, v17.4h\n"
+ "smlal v5.4s, v10.4h, v17.4h\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "ldr x12, [x4, #0xb8]\n"
+ "smlal2 v13.4s, v22.8h, v17.8h\n"
+ "ldr d22, [x26, x2]\n"
+ "smlal2 v23.4s, v10.8h, v17.8h\n"
"ushll v21.8h, v21.8b, #0x0\n"
- "ldr x20, [x6, #0x68]\n"
- "smlal2 v24.4s, v4.8h, v29.8h\n"
- "smlal v13.4s, v31.4h, v18.4h\n"
- "usubl v0.8h, v0.8b, v2.8b\n"
- "ldr x21, [x6, #0x70]\n"
- "smlal2 v14.4s, v4.8h, v15.8h\n"
- "ldr d4, [x22, x4]\n"
- "smlal2 v22.4s, v30.8h, v15.8h\n"
- "ushll v4.8h, v4.8b, #0x0\n"
- "smlal2 v17.4s, v28.8h, v15.8h\n"
- "ldr d15, [x20, x4]\n"
- "smlal v7.4s, v31.4h, v29.4h\n"
- "usubl v20.8h, v20.8b, v2.8b\n"
- "smlal v27.4s, v28.4h, v29.4h\n"
- "smlal v8.4s, v16.4h, v29.4h\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "ldr x20, [x6, #0x78]\n"
- "smlal2 v24.4s, v31.8h, v18.8h\n"
- "smlal v13.4s, v1.4h, v3.4h\n"
- "usubl v6.8h, v6.8b, v2.8b\n"
- "ldr x22, [x6, #0x80]\n"
- "smlal2 v14.4s, v31.8h, v29.8h\n"
- "ldr d31, [x21, x4]\n"
- "smlal2 v22.4s, v28.8h, v29.8h\n"
- "ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v17.4s, v16.8h, v29.8h\n"
- "ldr d29, [x20, x4]\n"
- "smlal v7.4s, v1.4h, v18.4h\n"
- "usubl v19.8h, v19.8b, v2.8b\n"
- "smlal v27.4s, v16.4h, v18.4h\n"
- "smlal v8.4s, v21.4h, v18.4h\n"
- "ushll v29.8h, v29.8b, #0x0\n"
- "ldr x20, [x6, #0x88]\n"
- "smlal2 v24.4s, v1.8h, v3.8h\n"
- "smlal v13.4s, v23.4h, v0.4h\n"
- "usubl v10.8h, v10.8b, v2.8b\n"
- "ldr x21, [x6, #0x90]\n"
- "smlal2 v14.4s, v1.8h, v18.8h\n"
- "ldr d1, [x22, x4]\n"
- "smlal2 v22.4s, v16.8h, v18.8h\n"
+ "smlal v30.4s, v0.4h, v3.4h\n"
+ "smlal2 v24.4s, v0.8h, v3.8h\n"
+ "ldr d0, [x25, x2]\n"
"ushll v1.8h, v1.8b, #0x0\n"
- "smlal2 v17.4s, v21.8h, v18.8h\n"
- "ldr d18, [x20, x4]\n"
- "smlal v7.4s, v4.4h, v3.4h\n"
- "usubl v5.8h, v5.8b, v2.8b\n"
- "smlal v27.4s, v21.4h, v3.4h\n"
- "smlal v8.4s, v9.4h, v3.4h\n"
- "ldr x20, [x6, #0x98]\n"
+ "smlal v7.4s, v8.4h, v17.4h\n"
+ "smlal2 v19.4s, v8.8h, v17.8h\n"
+ "ldr d17, [x24, x2]\n"
+ "ldr x11, [x4, #0xc0]\n"
+ "smlal v12.4s, v25.4h, v3.4h\n"
+ "smlal v5.4s, v26.4h, v3.4h\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "ldr x10, [x4, #0xc8]\n"
+ "smlal2 v13.4s, v25.8h, v3.8h\n"
+ "smlal2 v23.4s, v26.8h, v3.8h\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ldr x9, [x4, #0xd0]\n"
+ "smlal v30.4s, v25.4h, v27.4h\n"
+ "smlal2 v24.4s, v25.8h, v27.8h\n"
+ "ldr d25, [x23, x2]\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v7.4s, v9.4h, v3.4h\n"
+ "smlal2 v19.4s, v9.8h, v3.8h\n"
+ "ldr d3, [x22, x2]\n"
+ "ldr x28, [x4, #0xd8]\n"
+ "smlal v12.4s, v11.4h, v27.4h\n"
+ "smlal v5.4s, v9.4h, v27.4h\n"
+ "ldr x27, [x4, #0xe0]\n"
+ "ldr x26, [x4, #0xe8]\n"
+ "smlal2 v13.4s, v11.8h, v27.8h\n"
+ "smlal2 v23.4s, v9.8h, v27.8h\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x25, [x4, #0xf0]\n"
+ "smlal v30.4s, v11.4h, v16.4h\n"
+ "smlal2 v24.4s, v11.8h, v16.8h\n"
+ "ldr d11, [x5, #0x68]\n"
+ "ushll v3.8h, v3.8b, #0x0\n"
+ "smlal v7.4s, v21.4h, v27.4h\n"
+ "smlal2 v19.4s, v21.8h, v27.8h\n"
+ "ldr d27, [x21, x2]\n"
+ "ldr x24, [x4, #0xf8]\n"
+ "smlal v12.4s, v20.4h, v16.4h\n"
+ "smlal v5.4s, v21.4h, v16.4h\n"
+ "ldr x23, [x4, #0x100]\n"
+ "ldr x22, [x4, #0x108]\n"
+ "smlal2 v13.4s, v20.8h, v16.8h\n"
+ "smlal2 v23.4s, v21.8h, v16.8h\n"
+ "usubl v11.8h, v11.8b, v14.8b\n"
+ "ldr x21, [x4, #0x110]\n"
+ "smlal v30.4s, v20.4h, v18.4h\n"
+ "smlal2 v24.4s, v20.8h, v18.8h\n"
+ "ldr d20, [x5, #0x70]\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
+ "smlal v7.4s, v1.4h, v16.4h\n"
+ "smlal2 v19.4s, v1.8h, v16.8h\n"
+ "ldr d16, [x20, x2]\n"
+ "ldr x20, [x4, #0x118]\n"
+ "smlal v12.4s, v10.4h, v18.4h\n"
+ "smlal v5.4s, v1.4h, v18.4h\n"
+ "tst x1, #0x7\n"
+ "smlal2 v13.4s, v10.8h, v18.8h\n"
+ "smlal2 v23.4s, v1.8h, v18.8h\n"
+ "usubl v20.8h, v20.8b, v14.8b\n"
+ "smlal v30.4s, v10.4h, v2.4h\n"
+ "smlal2 v24.4s, v10.8h, v2.8h\n"
+ "ldr d10, [x5, #0x78]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "smlal v7.4s, v22.4h, v18.4h\n"
+ "smlal2 v19.4s, v22.8h, v18.8h\n"
+ "ldr d18, [x12, x2]\n"
+ "smlal v12.4s, v8.4h, v2.4h\n"
+ "smlal v5.4s, v22.4h, v2.4h\n"
+ "smlal2 v13.4s, v8.8h, v2.8h\n"
+ "ldr d8, [x5, #0x80]\n"
+ "smlal2 v23.4s, v22.8h, v2.8h\n"
+ "usubl v10.8h, v10.8b, v14.8b\n"
+ "smlal v30.4s, v26.4h, v29.4h\n"
+ "smlal2 v24.4s, v26.8h, v29.8h\n"
+ "ldr d26, [x11, x2]\n"
"ushll v18.8h, v18.8b, #0x0\n"
- "smlal2 v24.4s, v23.8h, v0.8h\n"
- "ldr d23, [x7, #0x58]\n"
- "smlal v13.4s, v30.4h, v20.4h\n"
- "usubl v23.8h, v23.8b, v2.8b\n"
- "smlal2 v14.4s, v4.8h, v3.8h\n"
- "ldr d4, [x21, x4]\n"
- "smlal2 v22.4s, v21.8h, v3.8h\n"
- "ldr x22, [x6, #0xa0]\n"
- "smlal2 v17.4s, v9.8h, v3.8h\n"
- "ldr d3, [x20, x4]\n"
- "smlal v7.4s, v30.4h, v0.4h\n"
+ "smlal v7.4s, v0.4h, v2.4h\n"
+ "smlal2 v19.4s, v0.8h, v2.8h\n"
+ "ldr d2, [x10, x2]\n"
+ "smlal v12.4s, v9.4h, v29.4h\n"
+ "smlal v5.4s, v17.4h, v29.4h\n"
+ "usubl v8.8h, v8.8b, v14.8b\n"
+ "smlal2 v13.4s, v9.8h, v29.8h\n"
+ "smlal2 v23.4s, v17.8h, v29.8h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "smlal v30.4s, v9.4h, v6.4h\n"
+ "smlal2 v24.4s, v9.8h, v6.8h\n"
+ "ldr d9, [x5, #0x88]\n"
+ "ushll v2.8h, v2.8b, #0x0\n"
+ "smlal v7.4s, v25.4h, v29.4h\n"
+ "smlal2 v19.4s, v25.8h, v29.8h\n"
+ "ldr d29, [x9, x2]\n"
+ "smlal v12.4s, v21.4h, v6.4h\n"
+ "smlal v5.4s, v25.4h, v6.4h\n"
+ "smlal2 v13.4s, v21.8h, v6.8h\n"
+ "smlal2 v23.4s, v25.8h, v6.8h\n"
+ "usubl v9.8h, v9.8b, v14.8b\n"
+ "smlal v30.4s, v21.4h, v4.4h\n"
+ "smlal2 v24.4s, v21.8h, v4.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "smlal v7.4s, v3.4h, v6.4h\n"
+ "smlal2 v19.4s, v3.8h, v6.8h\n"
+ "ldr d6, [x28, x2]\n"
+ "smlal v12.4s, v1.4h, v4.4h\n"
+ "smlal v5.4s, v3.4h, v4.4h\n"
+ "smlal2 v13.4s, v1.8h, v4.8h\n"
+ "smlal2 v23.4s, v3.8h, v4.8h\n"
+ "usubl v21.8h, v21.8b, v14.8b\n"
+ "smlal v30.4s, v1.4h, v11.4h\n"
+ "smlal2 v24.4s, v1.8h, v11.8h\n"
+ "ldr d1, [x5, #0x98]\n"
+ "ushll v6.8h, v6.8b, #0x0\n"
+ "smlal v7.4s, v27.4h, v4.4h\n"
+ "smlal2 v19.4s, v27.8h, v4.8h\n"
+ "ldr d4, [x27, x2]\n"
+ "smlal v12.4s, v22.4h, v11.4h\n"
+ "smlal v5.4s, v27.4h, v11.4h\n"
+ "smlal2 v13.4s, v22.8h, v11.8h\n"
+ "smlal2 v23.4s, v27.8h, v11.8h\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "smlal v30.4s, v22.4h, v20.4h\n"
+ "smlal2 v24.4s, v22.8h, v20.8h\n"
+ "ldr d22, [x5, #0xa0]\n"
"ushll v4.8h, v4.8b, #0x0\n"
- "smlal v27.4s, v11.4h, v0.4h\n"
- "smlal v8.4s, v15.4h, v0.4h\n"
- "ushll v3.8h, v3.8b, #0x0\n"
- "ldr x21, [x6, #0xa8]\n"
- "smlal2 v24.4s, v30.8h, v20.8h\n"
- "smlal v13.4s, v28.4h, v6.4h\n"
- "ldr x20, [x6, #0xb0]\n"
- "ldr x12, [x6, #0xb8]\n"
- "smlal2 v14.4s, v30.8h, v0.8h\n"
- "ldr d30, [x7, #0x60]\n"
- "smlal2 v22.4s, v11.8h, v0.8h\n"
- "usubl v30.8h, v30.8b, v2.8b\n"
- "smlal2 v17.4s, v15.8h, v0.8h\n"
- "ldr d0, [x22, x4]\n"
- "smlal v7.4s, v28.4h, v20.4h\n"
- "ushll v0.8h, v0.8b, #0x0\n"
- "smlal v27.4s, v15.4h, v20.4h\n"
- "smlal v8.4s, v31.4h, v20.4h\n"
- "ldr x11, [x6, #0xc0]\n"
- "ldr x10, [x6, #0xc8]\n"
- "smlal2 v24.4s, v28.8h, v6.8h\n"
- "smlal v13.4s, v16.4h, v19.4h\n"
- "ldr x9, [x6, #0xd0]\n"
- "ldr x28, [x6, #0xd8]\n"
- "smlal2 v14.4s, v28.8h, v20.8h\n"
- "ldr d28, [x7, #0x68]\n"
- "smlal2 v22.4s, v15.8h, v20.8h\n"
- "usubl v28.8h, v28.8b, v2.8b\n"
- "smlal2 v17.4s, v31.8h, v20.8h\n"
- "ldr d20, [x21, x4]\n"
- "smlal v7.4s, v16.4h, v6.4h\n"
+ "smlal v7.4s, v16.4h, v11.4h\n"
+ "smlal2 v19.4s, v16.8h, v11.8h\n"
+ "ldr d11, [x26, x2]\n"
+ "smlal v12.4s, v0.4h, v20.4h\n"
+ "smlal v5.4s, v16.4h, v20.4h\n"
+ "smlal2 v13.4s, v0.8h, v20.8h\n"
+ "ldr d0, [x5, #0xa8]\n"
+ "smlal2 v23.4s, v16.8h, v20.8h\n"
+ "usubl v22.8h, v22.8b, v14.8b\n"
+ "smlal v30.4s, v17.4h, v10.4h\n"
+ "smlal2 v24.4s, v17.8h, v10.8h\n"
+ "ldr d17, [x25, x2]\n"
+ "ushll v11.8h, v11.8b, #0x0\n"
+ "smlal v7.4s, v18.4h, v20.4h\n"
+ "smlal2 v19.4s, v18.8h, v20.8h\n"
+ "ldr d20, [x24, x2]\n"
+ "smlal v12.4s, v25.4h, v10.4h\n"
+ "smlal v5.4s, v26.4h, v10.4h\n"
+ "usubl v0.8h, v0.8b, v14.8b\n"
+ "smlal2 v13.4s, v25.8h, v10.8h\n"
+ "smlal2 v23.4s, v26.8h, v10.8h\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v30.4s, v25.4h, v8.4h\n"
+ "smlal2 v24.4s, v25.8h, v8.8h\n"
+ "ldr d25, [x5, #0xb0]\n"
"ushll v20.8h, v20.8b, #0x0\n"
- "smlal v27.4s, v31.4h, v6.4h\n"
- "smlal v8.4s, v29.4h, v6.4h\n"
- "ldr x27, [x6, #0xe0]\n"
- "ldr x26, [x6, #0xe8]\n"
- "smlal2 v24.4s, v16.8h, v19.8h\n"
- "smlal v13.4s, v21.4h, v10.4h\n"
- "ldr x25, [x6, #0xf0]\n"
- "ldr x24, [x6, #0xf8]\n"
- "smlal2 v14.4s, v16.8h, v6.8h\n"
- "ldr d16, [x7, #0x70]\n"
- "smlal2 v22.4s, v31.8h, v6.8h\n"
- "usubl v16.8h, v16.8b, v2.8b\n"
- "smlal2 v17.4s, v29.8h, v6.8h\n"
- "ldr d6, [x20, x4]\n"
- "smlal v7.4s, v21.4h, v19.4h\n"
- "ushll v6.8h, v6.8b, #0x0\n"
- "smlal v27.4s, v29.4h, v19.4h\n"
- "smlal v8.4s, v1.4h, v19.4h\n"
- "ldr x23, [x6, #0x100]\n"
- "ldr x22, [x6, #0x108]\n"
- "smlal2 v24.4s, v21.8h, v10.8h\n"
- "smlal v13.4s, v11.4h, v5.4h\n"
- "ldr x21, [x6, #0x110]\n"
- "ldr x20, [x6, #0x118]\n"
- "smlal2 v14.4s, v21.8h, v19.8h\n"
- "ldr d21, [x7, #0x78]\n"
- "smlal2 v22.4s, v29.8h, v19.8h\n"
- "usubl v21.8h, v21.8b, v2.8b\n"
- "smlal2 v17.4s, v1.8h, v19.8h\n"
- "ldr d19, [x12, x4]\n"
- "smlal v7.4s, v9.4h, v10.4h\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "smlal v27.4s, v1.4h, v10.4h\n"
- "smlal v8.4s, v18.4h, v10.4h\n"
- "tst x2, #0x7\n"
- "smlal2 v24.4s, v11.8h, v5.8h\n"
- "ldr d11, [x7, #0x80]\n"
- "smlal v13.4s, v15.4h, v23.4h\n"
- "usubl v11.8h, v11.8b, v2.8b\n"
- "smlal2 v14.4s, v9.8h, v10.8h\n"
- "ldr d9, [x11, x4]\n"
- "smlal2 v22.4s, v1.8h, v10.8h\n"
- "ushll v9.8h, v9.8b, #0x0\n"
- "smlal2 v17.4s, v18.8h, v10.8h\n"
- "ldr d10, [x10, x4]\n"
- "smlal v7.4s, v15.4h, v5.4h\n"
+ "smlal v7.4s, v2.4h, v10.4h\n"
+ "smlal2 v19.4s, v2.8h, v10.8h\n"
+ "ldr d10, [x23, x2]\n"
+ "smlal v12.4s, v3.4h, v8.4h\n"
+ "smlal v5.4s, v2.4h, v8.4h\n"
+ "smlal2 v13.4s, v3.8h, v8.8h\n"
+ "smlal2 v23.4s, v2.8h, v8.8h\n"
+ "usubl v25.8h, v25.8b, v14.8b\n"
+ "smlal v30.4s, v3.4h, v9.4h\n"
+ "smlal2 v24.4s, v3.8h, v9.8h\n"
+ "ldr d3, [x5, #0xb8]\n"
"ushll v10.8h, v10.8b, #0x0\n"
- "smlal v27.4s, v4.4h, v5.4h\n"
- "smlal v8.4s, v3.4h, v5.4h\n"
- "smlal2 v24.4s, v15.8h, v23.8h\n"
- "smlal v13.4s, v31.4h, v30.4h\n"
- "smlal2 v14.4s, v15.8h, v5.8h\n"
- "ldr d15, [x7, #0x88]\n"
- "smlal2 v22.4s, v4.8h, v5.8h\n"
- "usubl v15.8h, v15.8b, v2.8b\n"
- "smlal2 v17.4s, v3.8h, v5.8h\n"
- "ldr d5, [x9, x4]\n"
- "smlal v7.4s, v31.4h, v23.4h\n"
- "ushll v5.8h, v5.8b, #0x0\n"
- "smlal v27.4s, v3.4h, v23.4h\n"
- "smlal v8.4s, v0.4h, v23.4h\n"
- "smlal2 v24.4s, v31.8h, v30.8h\n"
- "smlal v13.4s, v29.4h, v28.4h\n"
- "smlal2 v14.4s, v31.8h, v23.8h\n"
- "ldr d31, [x7, #0x90]\n"
- "smlal2 v22.4s, v3.8h, v23.8h\n"
- "usubl v31.8h, v31.8b, v2.8b\n"
- "smlal2 v17.4s, v0.8h, v23.8h\n"
- "ldr d23, [x28, x4]\n"
- "smlal v7.4s, v29.4h, v30.4h\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "smlal v27.4s, v0.4h, v30.4h\n"
- "smlal v8.4s, v20.4h, v30.4h\n"
- "smlal2 v24.4s, v29.8h, v28.8h\n"
- "smlal v13.4s, v1.4h, v16.4h\n"
- "smlal2 v14.4s, v29.8h, v30.8h\n"
- "ldr d29, [x7, #0x98]\n"
- "smlal2 v22.4s, v0.8h, v30.8h\n"
- "usubl v29.8h, v29.8b, v2.8b\n"
- "smlal2 v17.4s, v20.8h, v30.8h\n"
- "ldr d30, [x27, x4]\n"
- "smlal v7.4s, v1.4h, v28.4h\n"
- "ushll v30.8h, v30.8b, #0x0\n"
- "smlal v27.4s, v20.4h, v28.4h\n"
- "smlal v8.4s, v6.4h, v28.4h\n"
- "smlal2 v24.4s, v1.8h, v16.8h\n"
- "smlal v13.4s, v4.4h, v21.4h\n"
- "smlal2 v14.4s, v1.8h, v28.8h\n"
- "ldr d1, [x7, #0xa0]\n"
- "smlal2 v22.4s, v20.8h, v28.8h\n"
- "usubl v1.8h, v1.8b, v2.8b\n"
- "smlal2 v17.4s, v6.8h, v28.8h\n"
- "ldr d28, [x26, x4]\n"
- "smlal v7.4s, v18.4h, v16.4h\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal v27.4s, v6.4h, v16.4h\n"
- "smlal v8.4s, v19.4h, v16.4h\n"
- "smlal2 v24.4s, v4.8h, v21.8h\n"
- "ldr d4, [x7, #0xa8]\n"
- "smlal v13.4s, v3.4h, v11.4h\n"
- "usubl v4.8h, v4.8b, v2.8b\n"
- "smlal2 v14.4s, v18.8h, v16.8h\n"
- "ldr d18, [x25, x4]\n"
- "smlal2 v22.4s, v6.8h, v16.8h\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "smlal2 v17.4s, v19.8h, v16.8h\n"
- "ldr d16, [x24, x4]\n"
- "smlal v7.4s, v3.4h, v21.4h\n"
+ "smlal v7.4s, v29.4h, v8.4h\n"
+ "smlal2 v19.4s, v29.8h, v8.8h\n"
+ "ldr d8, [x22, x2]\n"
+ "smlal v12.4s, v27.4h, v9.4h\n"
+ "smlal v5.4s, v29.4h, v9.4h\n"
+ "smlal2 v13.4s, v27.8h, v9.8h\n"
+ "smlal2 v23.4s, v29.8h, v9.8h\n"
+ "usubl v3.8h, v3.8b, v14.8b\n"
+ "smlal v30.4s, v27.4h, v21.4h\n"
+ "smlal2 v24.4s, v27.8h, v21.8h\n"
+ "ldr d27, [x5, #0xc0]\n"
+ "ushll v8.8h, v8.8b, #0x0\n"
+ "smlal v7.4s, v6.4h, v9.4h\n"
+ "smlal2 v19.4s, v6.8h, v9.8h\n"
+ "ldr d9, [x21, x2]\n"
+ "smlal v12.4s, v16.4h, v21.4h\n"
+ "smlal v5.4s, v6.4h, v21.4h\n"
+ "smlal2 v13.4s, v16.8h, v21.8h\n"
+ "smlal2 v23.4s, v6.8h, v21.8h\n"
+ "usubl v27.8h, v27.8b, v14.8b\n"
+ "smlal v30.4s, v16.4h, v1.4h\n"
+ "smlal2 v24.4s, v16.8h, v1.8h\n"
+ "ldr d16, [x20, x2]\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "smlal v7.4s, v4.4h, v21.4h\n"
+ "smlal2 v19.4s, v4.8h, v21.8h\n"
+ "ldr q21, [x6, #0x0]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v12.4s, v18.4h, v1.4h\n"
+ "smlal v5.4s, v4.4h, v1.4h\n"
+ "smlal2 v13.4s, v18.8h, v1.8h\n"
+ "ldr q18, [x7, #0x0]\n"
+ "smlal2 v23.4s, v4.8h, v1.8h\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "smlal v27.4s, v9.4h, v21.4h\n"
- "smlal v8.4s, v10.4h, v21.4h\n"
- "smlal2 v24.4s, v3.8h, v11.8h\n"
- "smlal v13.4s, v0.4h, v15.4h\n"
- "smlal2 v14.4s, v3.8h, v21.8h\n"
- "ldr d3, [x7, #0xb0]\n"
- "smlal2 v22.4s, v9.8h, v21.8h\n"
- "usubl v3.8h, v3.8b, v2.8b\n"
- "smlal2 v17.4s, v10.8h, v21.8h\n"
- "ldr d21, [x23, x4]\n"
- "smlal v7.4s, v0.4h, v11.4h\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "smlal v27.4s, v10.4h, v11.4h\n"
- "smlal v8.4s, v5.4h, v11.4h\n"
- "smlal2 v24.4s, v0.8h, v15.8h\n"
- "smlal v13.4s, v20.4h, v31.4h\n"
- "smlal2 v14.4s, v0.8h, v11.8h\n"
- "ldr d0, [x7, #0xb8]\n"
- "smlal2 v22.4s, v10.8h, v11.8h\n"
- "usubl v0.8h, v0.8b, v2.8b\n"
- "smlal2 v17.4s, v5.8h, v11.8h\n"
- "ldr d11, [x22, x4]\n"
- "smlal v7.4s, v20.4h, v15.4h\n"
- "ushll v11.8h, v11.8b, #0x0\n"
- "smlal v27.4s, v5.4h, v15.4h\n"
- "smlal v8.4s, v23.4h, v15.4h\n"
- "smlal2 v24.4s, v20.8h, v31.8h\n"
- "smlal v13.4s, v6.4h, v29.4h\n"
- "smlal2 v14.4s, v20.8h, v15.8h\n"
- "ldr d20, [x7, #0xc0]\n"
- "smlal2 v22.4s, v5.8h, v15.8h\n"
- "usubl v20.8h, v20.8b, v2.8b\n"
- "smlal2 v17.4s, v23.8h, v15.8h\n"
- "ldr d15, [x21, x4]\n"
- "smlal v7.4s, v6.4h, v31.4h\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal v27.4s, v23.4h, v31.4h\n"
- "smlal v8.4s, v30.4h, v31.4h\n"
- "smlal2 v24.4s, v6.8h, v29.8h\n"
- "smlal v13.4s, v9.4h, v1.4h\n"
- "smlal2 v14.4s, v6.8h, v31.8h\n"
- "ldr d6, [x20, x4]\n"
- "smlal2 v22.4s, v23.8h, v31.8h\n"
- "ushll v6.8h, v6.8b, #0x0\n"
- "smlal2 v17.4s, v30.8h, v31.8h\n"
- "ldr q31, [x8, #0x0]\n"
- "smlal v7.4s, v19.4h, v29.4h\n"
- "add x4, x4, #0x8\n"
- "smlal v27.4s, v30.4h, v29.4h\n"
- "smlal v8.4s, v28.4h, v29.4h\n"
- "smlal2 v24.4s, v9.8h, v1.8h\n"
- "ldr q9, [x17, #0x0]\n"
- "smlal v13.4s, v10.4h, v4.4h\n"
- "smlal2 v14.4s, v19.8h, v29.8h\n"
- "ldr q19, [x8, #0x10]\n"
- "smlal2 v22.4s, v30.8h, v29.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v17.4s, v28.8h, v29.8h\n"
- "ldr q29, [x17, #0x10]\n"
- "smlal v7.4s, v10.4h, v1.4h\n"
- "add x17, x17, #0x20\n"
- "smlal v27.4s, v18.4h, v1.4h\n"
- "smlal v8.4s, v16.4h, v1.4h\n"
- "smlal2 v24.4s, v10.8h, v4.8h\n"
- "smlal v13.4s, v5.4h, v3.4h\n"
- "smlal2 v14.4s, v10.8h, v1.8h\n"
- "smlal2 v22.4s, v18.8h, v1.8h\n"
- "smlal2 v17.4s, v16.8h, v1.8h\n"
- "smlal v7.4s, v5.4h, v4.4h\n"
- "smlal v27.4s, v16.4h, v4.4h\n"
- "smlal v8.4s, v21.4h, v4.4h\n"
- "smlal2 v24.4s, v5.8h, v3.8h\n"
- "smlal v13.4s, v23.4h, v0.4h\n"
- "smlal2 v14.4s, v5.8h, v4.8h\n"
- "smlal2 v22.4s, v16.8h, v4.8h\n"
- "smlal2 v17.4s, v21.8h, v4.8h\n"
- "smlal v7.4s, v23.4h, v3.4h\n"
- "smlal v27.4s, v21.4h, v3.4h\n"
- "smlal v8.4s, v11.4h, v3.4h\n"
- "smlal2 v24.4s, v23.8h, v0.8h\n"
- "smlal v13.4s, v30.4h, v20.4h\n"
- "sqrdmulh v13.4s, v13.4s, v31.4s\n"
- "smlal2 v14.4s, v23.8h, v3.8h\n"
- "smlal2 v22.4s, v21.8h, v3.8h\n"
- "and v21.16b, v13.16b, v9.16b\n"
- "smlal2 v17.4s, v11.8h, v3.8h\n"
- "smlal v7.4s, v30.4h, v0.4h\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "smlal v27.4s, v11.4h, v0.4h\n"
- "smlal v8.4s, v15.4h, v0.4h\n"
- "sqadd v13.4s, v13.4s, v21.4s\n"
- "smlal2 v24.4s, v30.8h, v20.8h\n"
- "smlal2 v14.4s, v30.8h, v0.8h\n"
- "sqrdmulh v24.4s, v24.4s, v19.4s\n"
- "smlal2 v22.4s, v11.8h, v0.8h\n"
- "smlal2 v17.4s, v15.8h, v0.8h\n"
- "and v16.16b, v24.16b, v29.16b\n"
- "smlal v7.4s, v28.4h, v20.4h\n"
- "smlal v27.4s, v15.4h, v20.4h\n"
- "sqrdmulh v7.4s, v7.4s, v31.4s\n"
- "smlal v8.4s, v6.4h, v20.4h\n"
- "smlal2 v14.4s, v28.8h, v20.8h\n"
- "sqrdmulh v27.4s, v27.4s, v31.4s\n"
- "smlal2 v22.4s, v15.8h, v20.8h\n"
- "smlal2 v17.4s, v6.8h, v20.8h\n"
- "sqrdmulh v8.4s, v8.4s, v31.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v23.16b, v7.16b, v9.16b\n"
- "sqrdmulh v14.4s, v14.4s, v19.4s\n"
- "and v20.16b, v27.16b, v9.16b\n"
- "sqrdmulh v22.4s, v22.4s, v19.4s\n"
- "and v3.16b, v8.16b, v9.16b\n"
- "sqrdmulh v17.4s, v17.4s, v19.4s\n"
- "sqadd v24.4s, v24.4s, v16.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v18.16b, v14.16b, v29.16b\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "and v19.16b, v22.16b, v29.16b\n"
+ "smlal v30.4s, v26.4h, v22.4h\n"
+ "smlal2 v24.4s, v26.8h, v22.8h\n"
+ "ldr q26, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v7.4s, v11.4h, v1.4h\n"
+ "smlal2 v19.4s, v11.8h, v1.8h\n"
+ "ldr q1, [x7, #0x10]\n"
+ "add x7, x7, #0x20\n"
+ "smlal v12.4s, v2.4h, v22.4h\n"
+ "smlal v5.4s, v17.4h, v22.4h\n"
+ "smlal2 v13.4s, v2.8h, v22.8h\n"
+ "smlal2 v23.4s, v17.8h, v22.8h\n"
+ "smlal v30.4s, v2.4h, v0.4h\n"
+ "smlal2 v24.4s, v2.8h, v0.8h\n"
+ "smlal v7.4s, v20.4h, v22.4h\n"
+ "smlal2 v19.4s, v20.8h, v22.8h\n"
+ "smlal v12.4s, v29.4h, v0.4h\n"
+ "smlal v5.4s, v20.4h, v0.4h\n"
+ "smlal2 v13.4s, v29.8h, v0.8h\n"
+ "smlal2 v23.4s, v20.8h, v0.8h\n"
+ "smlal v30.4s, v29.4h, v25.4h\n"
+ "smlal2 v24.4s, v29.8h, v25.8h\n"
+ "smlal v7.4s, v10.4h, v0.4h\n"
+ "smlal2 v19.4s, v10.8h, v0.8h\n"
+ "smlal v12.4s, v6.4h, v25.4h\n"
+ "smlal v5.4s, v10.4h, v25.4h\n"
+ "smlal2 v13.4s, v6.8h, v25.8h\n"
+ "smlal2 v23.4s, v10.8h, v25.8h\n"
+ "smlal v30.4s, v6.4h, v3.4h\n"
+ "smlal2 v24.4s, v6.8h, v3.8h\n"
+ "smlal v7.4s, v8.4h, v25.4h\n"
+ "smlal2 v19.4s, v8.8h, v25.8h\n"
+ "smlal v12.4s, v4.4h, v3.4h\n"
+ "smlal v5.4s, v8.4h, v3.4h\n"
+ "smlal2 v13.4s, v4.8h, v3.8h\n"
+ "smlal2 v23.4s, v8.8h, v3.8h\n"
+ "smlal v30.4s, v4.4h, v27.4h\n"
+ "smlal2 v24.4s, v4.8h, v27.8h\n"
+ "smlal v7.4s, v9.4h, v3.4h\n"
+ "smlal2 v19.4s, v9.8h, v3.8h\n"
+ "smlal v12.4s, v11.4h, v27.4h\n"
+ "smlal v5.4s, v9.4h, v27.4h\n"
+ "smlal2 v13.4s, v11.8h, v27.8h\n"
+ "smlal2 v23.4s, v9.8h, v27.8h\n"
+ "sqrdmulh v30.4s, v30.4s, v21.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "smlal v7.4s, v16.4h, v27.4h\n"
+ "smlal2 v19.4s, v16.8h, v27.8h\n"
+ "and v17.16b, v30.16b, v18.16b\n"
+ "sqrdmulh v12.4s, v12.4s, v21.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v21.4s\n"
+ "and v22.16b, v24.16b, v1.16b\n"
+ "sqrdmulh v13.4s, v13.4s, v26.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v26.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v7.4s, v7.4s, v21.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "and v6.16b, v12.16b, v18.16b\n"
+ "and v27.16b, v5.16b, v18.16b\n"
+ "sqrdmulh v19.4s, v19.4s, v26.4s\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "and v3.16b, v7.16b, v18.16b\n"
+ "sqadd v24.4s, v24.4s, v22.4s\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "and v4.16b, v13.16b, v1.16b\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "and v17.16b, v23.16b, v1.16b\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "and v30.16b, v17.16b, v29.16b\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v27.4s, v27.4s, v20.4s\n"
- "sshr v19.4s, v19.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v3.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v9.4s\n"
- "srshl v7.4s, v7.4s, v9.4s\n"
- "sqadd v14.4s, v14.4s, v18.4s\n"
- "srshl v27.4s, v27.4s, v9.4s\n"
- "sqadd v22.4s, v22.4s, v19.4s\n"
- "srshl v8.4s, v8.4s, v9.4s\n"
- "sqadd v17.4s, v17.4s, v30.4s\n"
- "srshl v24.4s, v24.4s, v29.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v14.4s, v14.4s, v29.4s\n"
+ "and v16.16b, v19.16b, v1.16b\n"
+ "sqadd v12.4s, v12.4s, v6.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sqadd v5.4s, v5.4s, v27.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v7.4s, v7.4s, v3.4s\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "sqadd v13.4s, v13.4s, v4.4s\n"
+ "srshl v5.4s, v5.4s, v18.4s\n"
+ "sqadd v23.4s, v23.4s, v17.4s\n"
+ "srshl v7.4s, v7.4s, v18.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v24.4s, v24.4s, v1.4s\n"
+ "sqxtn v30.4h, v30.4s\n"
+ "srshl v13.4s, v13.4s, v1.4s\n"
+ "sqxtn v12.4h, v12.4s\n"
+ "srshl v23.4s, v23.4s, v1.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "srshl v19.4s, v19.4s, v1.4s\n"
"sqxtn v7.4h, v7.4s\n"
- "srshl v22.4s, v22.4s, v29.4s\n"
- "sqxtn v27.4h, v27.4s\n"
- "srshl v17.4s, v17.4s, v29.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "sqxtn2 v13.8h, v24.4s\n"
- "sqxtn2 v7.8h, v14.4s\n"
- "sqxtn2 v27.8h, v22.4s\n"
- "sqxtn2 v8.8h, v17.4s\n"
- "sqadd v13.8h, v13.8h, v25.8h\n"
- "sqadd v7.8h, v7.8h, v25.8h\n"
- "sqadd v27.8h, v27.8h, v25.8h\n"
- "sqadd v8.8h, v8.8h, v25.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v7.8h, v7.8h, v12.8h\n"
- "smax v27.8h, v27.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v26.8h\n"
- "smin v7.8h, v7.8h, v26.8h\n"
- "smin v27.8h, v27.8h, v26.8h\n"
- "smin v8.8h, v8.8h, v26.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
- "str d13, [x16, x5]\n"
+ "sqxtn2 v30.8h, v24.4s\n"
+ "sqxtn2 v12.8h, v13.4s\n"
+ "sqxtn2 v5.8h, v23.4s\n"
+ "sqxtn2 v7.8h, v19.4s\n"
+ "sqadd v30.8h, v30.8h, v15.8h\n"
+ "sqadd v12.8h, v12.8h, v15.8h\n"
+ "sqadd v5.8h, v5.8h, v15.8h\n"
+ "sqadd v7.8h, v7.8h, v15.8h\n"
+ "smax v30.8h, v30.8h, v31.8h\n"
+ "smax v12.8h, v12.8h, v31.8h\n"
+ "smax v5.8h, v5.8h, v31.8h\n"
+ "smax v7.8h, v7.8h, v31.8h\n"
+ "smin v30.8h, v30.8h, v28.8h\n"
+ "smin v12.8h, v12.8h, v28.8h\n"
+ "smin v5.8h, v5.8h, v28.8h\n"
+ "smin v7.8h, v7.8h, v28.8h\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
"uzp1 v7.16b, v7.16b, v7.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "str d7, [x15, x5]\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "str d27, [x14, x5]\n"
- "str d8, [x13, x5]\n"
- "add x5, x5, #0x8\n"
+ "str d30, [x17, x3]\n"
+ "str d12, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "str d7, [x14, x3]\n"
+ "add x3, x3, #0x8\n"
"beq 124f\n"
- "add x7, x7, #0xc8\n"
+ "add x5, x5, #0xc8\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x2, #2, 5f\n"
- "ld1 { v13.4s }, [x20], #0x10\n"
- "tbz x2, #1, 4f\n"
+ "tbz x1, #2, 5f\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x1, #1, 4f\n"
"ld1 { v24.d }[0], [x20], #0x8\n"
- "tbz x2, #0, 7f\n"
+ "tbz x1, #0, 7f\n"
"ld1 { v24.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x2, #0, 7f\n"
+ "tbz x1, #0, 7f\n"
"ld1 { v24.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x2, #1, 6f\n"
- "ld1 { v13.d }[0], [x20], #0x8\n"
- "tbz x2, #0, 7f\n"
- "ld1 { v13.s }[2], [x20]\n"
+ "tbz x1, #1, 6f\n"
+ "ld1 { v30.d }[0], [x20], #0x8\n"
+ "tbz x1, #0, 7f\n"
+ "ld1 { v30.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 7f\n"
- "ld1 { v13.s }[0], [x20]\n"
+ "tbz x1, #0, 7f\n"
+ "ld1 { v30.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d21, [x7, #0x0]\n"
- "ldr d15, [x7, #0x8]\n"
- "mov v7.16b, v13.16b\n"
- "mov v14.16b, v24.16b\n"
- "ldr d29, [x7, #0x10]\n"
- "ldr d18, [x7, #0x18]\n"
- "mov v27.16b, v13.16b\n"
- "mov v22.16b, v24.16b\n"
- "ldr d3, [x7, #0x20]\n"
- "ldp x9, x28, [x6, #0x0]\n"
- "mov v8.16b, v13.16b\n"
- "mov v17.16b, v24.16b\n"
- "ldp x27, x26, [x6, #0x10]\n"
- "ldp x25, x24, [x6, #0x20]\n"
- "usubl v21.8h, v21.8b, v2.8b\n"
- "usubl v15.8h, v15.8b, v2.8b\n"
- "ldp x23, x22, [x6, #0x30]\n"
- "ldp x21, x20, [x6, #0x40]\n"
- "usubl v29.8h, v29.8b, v2.8b\n"
- "usubl v18.8h, v18.8b, v2.8b\n"
- "usubl v3.8h, v3.8b, v2.8b\n"
- "add x9, x9, x4\n"
- "add x28, x28, x4\n"
- "add x27, x27, x4\n"
- "add x26, x26, x4\n"
- "add x25, x25, x4\n"
- "add x24, x24, x4\n"
- "add x23, x23, x4\n"
- "add x22, x22, x4\n"
- "add x21, x21, x4\n"
- "add x20, x20, x4\n"
- "tbz x2, #2, 9f\n"
- "ld1 { v10.s }[0], [x9], #0x4\n"
- "ld1 { v16.s }[0], [x28], #0x4\n"
- "ld1 { v23.s }[0], [x27], #0x4\n"
- "ld1 { v30.s }[0], [x26], #0x4\n"
- "ld1 { v4.s }[0], [x25], #0x4\n"
- "ld1 { v28.s }[0], [x24], #0x4\n"
- "ld1 { v31.s }[0], [x23], #0x4\n"
- "ld1 { v1.s }[0], [x22], #0x4\n"
- "ld1 { v9.s }[0], [x21], #0x4\n"
- "ld1 { v11.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 8f\n"
- "ld1 { v10.h }[2], [x9], #0x2\n"
- "ld1 { v16.h }[2], [x28], #0x2\n"
- "ld1 { v23.h }[2], [x27], #0x2\n"
- "ld1 { v30.h }[2], [x26], #0x2\n"
- "ld1 { v4.h }[2], [x25], #0x2\n"
- "ld1 { v28.h }[2], [x24], #0x2\n"
- "ld1 { v31.h }[2], [x23], #0x2\n"
- "ld1 { v1.h }[2], [x22], #0x2\n"
- "ld1 { v9.h }[2], [x21], #0x2\n"
- "ld1 { v11.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 11f\n"
- "ld1 { v10.b }[6], [x9]\n"
- "ld1 { v16.b }[6], [x28]\n"
- "ld1 { v23.b }[6], [x27]\n"
- "ld1 { v30.b }[6], [x26]\n"
- "ld1 { v4.b }[6], [x25]\n"
- "ld1 { v28.b }[6], [x24]\n"
- "ld1 { v31.b }[6], [x23]\n"
- "ld1 { v1.b }[6], [x22]\n"
- "ld1 { v9.b }[6], [x21]\n"
- "ld1 { v11.b }[6], [x20]\n"
+ "ldr d6, [x5, #0x0]\n"
+ "ldr d20, [x5, #0x8]\n"
+ "mov v12.16b, v30.16b\n"
+ "mov v13.16b, v24.16b\n"
+ "ldr d9, [x5, #0x10]\n"
+ "ldr d1, [x5, #0x18]\n"
+ "mov v5.16b, v30.16b\n"
+ "mov v23.16b, v24.16b\n"
+ "ldr d17, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v7.16b, v30.16b\n"
+ "mov v19.16b, v24.16b\n"
+ "usubl v6.8h, v6.8b, v14.8b\n"
+ "usubl v20.8h, v20.8b, v14.8b\n"
+ "usubl v9.8h, v9.8b, v14.8b\n"
+ "usubl v1.8h, v1.8b, v14.8b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "add x9, x9, x2\n"
+ "add x28, x28, x2\n"
+ "add x27, x27, x2\n"
+ "add x26, x26, x2\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "add x25, x25, x2\n"
+ "add x24, x24, x2\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "add x23, x23, x2\n"
+ "add x22, x22, x2\n"
+ "add x21, x21, x2\n"
+ "add x20, x20, x2\n"
+ "tbz x1, #2, 9f\n"
+ "ld1 { v18.s }[0], [x9], #0x4\n"
+ "ld1 { v4.s }[0], [x28], #0x4\n"
+ "ld1 { v0.s }[0], [x27], #0x4\n"
+ "ld1 { v25.s }[0], [x26], #0x4\n"
+ "ld1 { v10.s }[0], [x25], #0x4\n"
+ "ld1 { v11.s }[0], [x24], #0x4\n"
+ "ld1 { v22.s }[0], [x23], #0x4\n"
+ "ld1 { v21.s }[0], [x22], #0x4\n"
+ "ld1 { v8.s }[0], [x21], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 8f\n"
+ "ld1 { v18.h }[2], [x9], #0x2\n"
+ "ld1 { v4.h }[2], [x28], #0x2\n"
+ "ld1 { v0.h }[2], [x27], #0x2\n"
+ "ld1 { v25.h }[2], [x26], #0x2\n"
+ "ld1 { v10.h }[2], [x25], #0x2\n"
+ "ld1 { v11.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v21.h }[2], [x22], #0x2\n"
+ "ld1 { v8.h }[2], [x21], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 11f\n"
+ "ld1 { v18.b }[6], [x9]\n"
+ "ld1 { v4.b }[6], [x28]\n"
+ "ld1 { v0.b }[6], [x27]\n"
+ "ld1 { v25.b }[6], [x26]\n"
+ "ld1 { v10.b }[6], [x25]\n"
+ "ld1 { v11.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v21.b }[6], [x22]\n"
+ "ld1 { v8.b }[6], [x21]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x2, #0, 11f\n"
- "ld1 { v10.b }[4], [x9]\n"
- "ld1 { v16.b }[4], [x28]\n"
- "ld1 { v23.b }[4], [x27]\n"
- "ld1 { v30.b }[4], [x26]\n"
- "ld1 { v4.b }[4], [x25]\n"
- "ld1 { v28.b }[4], [x24]\n"
- "ld1 { v31.b }[4], [x23]\n"
- "ld1 { v1.b }[4], [x22]\n"
- "ld1 { v9.b }[4], [x21]\n"
- "ld1 { v11.b }[4], [x20]\n"
+ "tbz x1, #0, 11f\n"
+ "ld1 { v18.b }[4], [x9]\n"
+ "ld1 { v4.b }[4], [x28]\n"
+ "ld1 { v0.b }[4], [x27]\n"
+ "ld1 { v25.b }[4], [x26]\n"
+ "ld1 { v10.b }[4], [x25]\n"
+ "ld1 { v11.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v21.b }[4], [x22]\n"
+ "ld1 { v8.b }[4], [x21]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x2, #1, 10f\n"
- "ld1 { v10.h }[0], [x9], #0x2\n"
- "ld1 { v16.h }[0], [x28], #0x2\n"
- "ld1 { v23.h }[0], [x27], #0x2\n"
- "ld1 { v30.h }[0], [x26], #0x2\n"
- "ld1 { v4.h }[0], [x25], #0x2\n"
- "ld1 { v28.h }[0], [x24], #0x2\n"
- "ld1 { v31.h }[0], [x23], #0x2\n"
- "ld1 { v1.h }[0], [x22], #0x2\n"
- "ld1 { v9.h }[0], [x21], #0x2\n"
- "ld1 { v11.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 11f\n"
- "ld1 { v10.b }[2], [x9]\n"
- "ld1 { v16.b }[2], [x28]\n"
- "ld1 { v23.b }[2], [x27]\n"
- "ld1 { v30.b }[2], [x26]\n"
- "ld1 { v4.b }[2], [x25]\n"
- "ld1 { v28.b }[2], [x24]\n"
- "ld1 { v31.b }[2], [x23]\n"
- "ld1 { v1.b }[2], [x22]\n"
- "ld1 { v9.b }[2], [x21]\n"
- "ld1 { v11.b }[2], [x20]\n"
+ "tbz x1, #1, 10f\n"
+ "ld1 { v18.h }[0], [x9], #0x2\n"
+ "ld1 { v4.h }[0], [x28], #0x2\n"
+ "ld1 { v0.h }[0], [x27], #0x2\n"
+ "ld1 { v25.h }[0], [x26], #0x2\n"
+ "ld1 { v10.h }[0], [x25], #0x2\n"
+ "ld1 { v11.h }[0], [x24], #0x2\n"
+ "ld1 { v22.h }[0], [x23], #0x2\n"
+ "ld1 { v21.h }[0], [x22], #0x2\n"
+ "ld1 { v8.h }[0], [x21], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 11f\n"
+ "ld1 { v18.b }[2], [x9]\n"
+ "ld1 { v4.b }[2], [x28]\n"
+ "ld1 { v0.b }[2], [x27]\n"
+ "ld1 { v25.b }[2], [x26]\n"
+ "ld1 { v10.b }[2], [x25]\n"
+ "ld1 { v11.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v21.b }[2], [x22]\n"
+ "ld1 { v8.b }[2], [x21]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 11f\n"
- "ld1 { v10.b }[0], [x9]\n"
- "ld1 { v16.b }[0], [x28]\n"
- "ld1 { v23.b }[0], [x27]\n"
- "ld1 { v30.b }[0], [x26]\n"
- "ld1 { v4.b }[0], [x25]\n"
- "ld1 { v28.b }[0], [x24]\n"
- "ld1 { v31.b }[0], [x23]\n"
- "ld1 { v1.b }[0], [x22]\n"
- "ld1 { v9.b }[0], [x21]\n"
- "ld1 { v11.b }[0], [x20]\n"
+ "tbz x1, #0, 11f\n"
+ "ld1 { v18.b }[0], [x9]\n"
+ "ld1 { v4.b }[0], [x28]\n"
+ "ld1 { v0.b }[0], [x27]\n"
+ "ld1 { v25.b }[0], [x26]\n"
+ "ld1 { v10.b }[0], [x25]\n"
+ "ld1 { v11.b }[0], [x24]\n"
+ "ld1 { v22.b }[0], [x23]\n"
+ "ld1 { v21.b }[0], [x22]\n"
+ "ld1 { v8.b }[0], [x21]\n"
+ "ld1 { v26.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "ushll v10.8h, v10.8b, #0x0\n"
- "ushll v16.8h, v16.8b, #0x0\n"
- "smlal v13.4s, v10.4h, v21.4h\n"
- "ldr x20, [x6, #0x50]\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "smlal2 v24.4s, v10.8h, v21.8h\n"
- "smlal v7.4s, v16.4h, v21.4h\n"
- "smlal2 v14.4s, v16.8h, v21.8h\n"
- "smlal v27.4s, v23.4h, v21.4h\n"
- "ushll v30.8h, v30.8b, #0x0\n"
- "add x20, x20, x4\n"
- "smlal2 v22.4s, v23.8h, v21.8h\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
"ushll v4.8h, v4.8b, #0x0\n"
- "smlal v8.4s, v30.4h, v21.4h\n"
- "smlal2 v17.4s, v30.8h, v21.8h\n"
- "smlal v13.4s, v16.4h, v15.4h\n"
- "ushll v28.8h, v28.8b, #0x0\n"
- "smlal2 v24.4s, v16.8h, v15.8h\n"
- "smlal v7.4s, v4.4h, v15.4h\n"
- "ushll v31.8h, v31.8b, #0x0\n"
- "smlal2 v14.4s, v4.8h, v15.8h\n"
- "smlal v27.4s, v30.4h, v15.4h\n"
- "ushll v1.8h, v1.8b, #0x0\n"
- "smlal2 v22.4s, v30.8h, v15.8h\n"
- "ushll v9.8h, v9.8b, #0x0\n"
- "smlal v8.4s, v28.4h, v15.4h\n"
+ "ldr x20, [x4, #0x50]\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
"ushll v11.8h, v11.8b, #0x0\n"
- "smlal2 v17.4s, v28.8h, v15.8h\n"
- "smlal v13.4s, v4.4h, v29.4h\n"
- "smlal2 v24.4s, v4.8h, v29.8h\n"
- "smlal v7.4s, v31.4h, v29.4h\n"
- "smlal2 v14.4s, v31.8h, v29.8h\n"
- "smlal v27.4s, v28.4h, v29.4h\n"
- "smlal2 v22.4s, v28.8h, v29.8h\n"
- "tbz x2, #2, 13f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 12f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 15f\n"
- "ld1 { v5.b }[6], [x20]\n"
+ "smlal v30.4s, v18.4h, v6.4h\n"
+ "smlal2 v24.4s, v18.8h, v6.8h\n"
+ "smlal v12.4s, v4.4h, v6.4h\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "add x20, x20, x2\n"
+ "smlal2 v13.4s, v4.8h, v6.8h\n"
+ "smlal v5.4s, v0.4h, v6.4h\n"
+ "ushll v21.8h, v21.8b, #0x0\n"
+ "smlal2 v23.4s, v0.8h, v6.8h\n"
+ "smlal v7.4s, v25.4h, v6.4h\n"
+ "ushll v8.8h, v8.8b, #0x0\n"
+ "smlal2 v19.4s, v25.8h, v6.8h\n"
+ "smlal v30.4s, v4.4h, v20.4h\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "smlal2 v24.4s, v4.8h, v20.8h\n"
+ "smlal v12.4s, v10.4h, v20.4h\n"
+ "smlal2 v13.4s, v10.8h, v20.8h\n"
+ "smlal v5.4s, v25.4h, v20.4h\n"
+ "smlal2 v23.4s, v25.8h, v20.8h\n"
+ "smlal v7.4s, v11.4h, v20.4h\n"
+ "smlal2 v19.4s, v11.8h, v20.8h\n"
+ "smlal v30.4s, v10.4h, v9.4h\n"
+ "smlal2 v24.4s, v10.8h, v9.8h\n"
+ "smlal v12.4s, v22.4h, v9.4h\n"
+ "smlal2 v13.4s, v22.8h, v9.8h\n"
+ "smlal v5.4s, v11.4h, v9.4h\n"
+ "smlal2 v23.4s, v11.8h, v9.8h\n"
+ "tbz x1, #2, 13f\n"
+ "ld1 { v2.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 12f\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 15f\n"
+ "ld1 { v2.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x2, #0, 15f\n"
- "ld1 { v5.b }[4], [x20]\n"
+ "tbz x1, #0, 15f\n"
+ "ld1 { v2.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x2, #1, 14f\n"
- "ld1 { v5.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 15f\n"
- "ld1 { v5.b }[2], [x20]\n"
+ "tbz x1, #1, 14f\n"
+ "ld1 { v2.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 15f\n"
+ "ld1 { v2.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 15f\n"
- "ld1 { v5.b }[0], [x20]\n"
+ "tbz x1, #0, 15f\n"
+ "ld1 { v2.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "ushll v5.8h, v5.8b, #0x0\n"
- "ldr x20, [x6, #0x58]\n"
- "smlal v8.4s, v5.4h, v29.4h\n"
- "smlal2 v17.4s, v5.8h, v29.8h\n"
- "smlal v13.4s, v31.4h, v18.4h\n"
- "smlal2 v24.4s, v31.8h, v18.8h\n"
- "add x20, x20, x4\n"
- "smlal v7.4s, v1.4h, v18.4h\n"
- "smlal2 v14.4s, v1.8h, v18.8h\n"
- "smlal v27.4s, v5.4h, v18.4h\n"
- "smlal2 v22.4s, v5.8h, v18.8h\n"
- "tbz x2, #2, 17f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 16f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 19f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ushll v2.8h, v2.8b, #0x0\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v30.4s, v22.4h, v1.4h\n"
+ "smlal2 v24.4s, v22.8h, v1.8h\n"
+ "smlal v12.4s, v21.4h, v1.4h\n"
+ "smlal2 v13.4s, v21.8h, v1.8h\n"
+ "smlal v7.4s, v2.4h, v9.4h\n"
+ "smlal2 v19.4s, v2.8h, v9.8h\n"
+ "smlal v5.4s, v2.4h, v1.4h\n"
+ "smlal2 v23.4s, v2.8h, v1.8h\n"
+ "add x20, x20, x2\n"
+ "tbz x1, #2, 17f\n"
+ "ld1 { v22.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 16f\n"
+ "ld1 { v22.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 19f\n"
+ "ld1 { v22.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x2, #0, 19f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "tbz x1, #0, 19f\n"
+ "ld1 { v22.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x2, #1, 18f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 19f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "tbz x1, #1, 18f\n"
+ "ld1 { v22.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 19f\n"
+ "ld1 { v22.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 19f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "tbz x1, #0, 19f\n"
+ "ld1 { v22.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "ushll v10.8h, v10.8b, #0x0\n"
- "ldr x20, [x6, #0x60]\n"
- "smlal v8.4s, v10.4h, v18.4h\n"
- "smlal2 v17.4s, v10.8h, v18.8h\n"
- "smlal v13.4s, v1.4h, v3.4h\n"
- "smlal2 v24.4s, v1.8h, v3.8h\n"
- "add x20, x20, x4\n"
- "tbz x2, #2, 21f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 20f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 23f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "ldr x20, [x4, #0x60]\n"
+ "smlal v30.4s, v21.4h, v17.4h\n"
+ "smlal2 v24.4s, v21.8h, v17.8h\n"
+ "smlal v7.4s, v22.4h, v1.4h\n"
+ "smlal2 v19.4s, v22.8h, v1.8h\n"
+ "add x20, x20, x2\n"
+ "tbz x1, #2, 21f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 20f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 23f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
- "tbz x2, #0, 23f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x1, #0, 23f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
- "tbz x2, #1, 22f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 23f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x1, #1, 22f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 23f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 23f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x1, #0, 23f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d6, [x7, #0x28]\n"
- "ushll v15.8h, v15.8b, #0x0\n"
- "smlal v7.4s, v15.4h, v3.4h\n"
- "smlal2 v14.4s, v15.8h, v3.8h\n"
- "smlal v27.4s, v10.4h, v3.4h\n"
- "smlal2 v22.4s, v10.8h, v3.8h\n"
- "usubl v6.8h, v6.8b, v2.8b\n"
- "ldr x20, [x6, #0x68]\n"
- "smlal v8.4s, v9.4h, v3.4h\n"
- "smlal2 v17.4s, v9.8h, v3.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v23.4h, v6.4h\n"
- "smlal2 v24.4s, v23.8h, v6.8h\n"
- "smlal v7.4s, v30.4h, v6.4h\n"
- "smlal2 v14.4s, v30.8h, v6.8h\n"
- "smlal v27.4s, v11.4h, v6.4h\n"
- "smlal2 v22.4s, v11.8h, v6.8h\n"
- "tbz x2, #2, 25f\n"
- "ld1 { v20.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 24f\n"
- "ld1 { v20.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 27f\n"
- "ld1 { v20.b }[6], [x20]\n"
+ "ldr d18, [x5, #0x28]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "smlal v5.4s, v22.4h, v17.4h\n"
+ "smlal2 v23.4s, v22.8h, v17.8h\n"
+ "ldr x20, [x4, #0x68]\n"
+ "smlal v7.4s, v8.4h, v17.4h\n"
+ "smlal2 v19.4s, v8.8h, v17.8h\n"
+ "smlal v12.4s, v16.4h, v17.4h\n"
+ "smlal2 v13.4s, v16.8h, v17.8h\n"
+ "usubl v18.8h, v18.8b, v14.8b\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v0.4h, v18.4h\n"
+ "smlal2 v24.4s, v0.8h, v18.8h\n"
+ "smlal v5.4s, v26.4h, v18.4h\n"
+ "smlal2 v23.4s, v26.8h, v18.8h\n"
+ "smlal v12.4s, v25.4h, v18.4h\n"
+ "smlal2 v13.4s, v25.8h, v18.8h\n"
+ "tbz x1, #2, 25f\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 24f\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 27f\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x2, #0, 27f\n"
- "ld1 { v20.b }[4], [x20]\n"
+ "tbz x1, #0, 27f\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x2, #1, 26f\n"
- "ld1 { v20.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 27f\n"
- "ld1 { v20.b }[2], [x20]\n"
+ "tbz x1, #1, 26f\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 27f\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 27f\n"
- "ld1 { v20.b }[0], [x20]\n"
+ "tbz x1, #0, 27f\n"
+ "ld1 { v29.b }[0], [x20]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d4, [x7, #0x30]\n"
- "ushll v20.8h, v20.8b, #0x0\n"
- "usubl v4.8h, v4.8b, v2.8b\n"
- "ldr x20, [x6, #0x70]\n"
- "smlal v8.4s, v20.4h, v6.4h\n"
- "smlal2 v17.4s, v20.8h, v6.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "smlal2 v24.4s, v30.8h, v4.8h\n"
- "smlal v7.4s, v28.4h, v4.4h\n"
- "smlal2 v14.4s, v28.8h, v4.8h\n"
- "smlal v27.4s, v20.4h, v4.4h\n"
- "smlal2 v22.4s, v20.8h, v4.8h\n"
- "tbz x2, #2, 29f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 28f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 31f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "ldr d16, [x5, #0x30]\n"
+ "ushll v29.8h, v29.8b, #0x0\n"
+ "ldr x20, [x4, #0x70]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v29.4h, v18.4h\n"
+ "smlal2 v19.4s, v29.8h, v18.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v25.4h, v16.4h\n"
+ "smlal2 v24.4s, v25.8h, v16.8h\n"
+ "smlal v12.4s, v11.4h, v16.4h\n"
+ "smlal2 v13.4s, v11.8h, v16.8h\n"
+ "smlal v5.4s, v29.4h, v16.4h\n"
+ "smlal2 v23.4s, v29.8h, v16.8h\n"
+ "tbz x1, #2, 29f\n"
+ "ld1 { v9.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 28f\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 31f\n"
+ "ld1 { v9.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x2, #0, 31f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x1, #0, 31f\n"
+ "ld1 { v9.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x2, #1, 30f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 31f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x1, #1, 30f\n"
+ "ld1 { v9.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 31f\n"
+ "ld1 { v9.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 31f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x1, #0, 31f\n"
+ "ld1 { v9.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d30, [x7, #0x38]\n"
- "ushll v23.8h, v23.8b, #0x0\n"
- "usubl v30.8h, v30.8b, v2.8b\n"
- "ldr x20, [x6, #0x78]\n"
- "smlal v8.4s, v23.4h, v4.4h\n"
- "smlal2 v17.4s, v23.8h, v4.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v28.4h, v30.4h\n"
- "smlal2 v24.4s, v28.8h, v30.8h\n"
- "smlal v7.4s, v5.4h, v30.4h\n"
- "smlal2 v14.4s, v5.8h, v30.8h\n"
- "smlal v27.4s, v23.4h, v30.4h\n"
- "smlal2 v22.4s, v23.8h, v30.8h\n"
- "tbz x2, #2, 33f\n"
- "ld1 { v3.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 32f\n"
- "ld1 { v3.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 35f\n"
- "ld1 { v3.b }[6], [x20]\n"
+ "ldr d17, [x5, #0x38]\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "ldr x20, [x4, #0x78]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "smlal v7.4s, v9.4h, v16.4h\n"
+ "smlal2 v19.4s, v9.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v11.4h, v17.4h\n"
+ "smlal2 v24.4s, v11.8h, v17.8h\n"
+ "smlal v12.4s, v2.4h, v17.4h\n"
+ "smlal2 v13.4s, v2.8h, v17.8h\n"
+ "smlal v5.4s, v9.4h, v17.4h\n"
+ "smlal2 v23.4s, v9.8h, v17.8h\n"
+ "tbz x1, #2, 33f\n"
+ "ld1 { v6.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 32f\n"
+ "ld1 { v6.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 35f\n"
+ "ld1 { v6.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x2, #0, 35f\n"
- "ld1 { v3.b }[4], [x20]\n"
+ "tbz x1, #0, 35f\n"
+ "ld1 { v6.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x2, #1, 34f\n"
- "ld1 { v3.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 35f\n"
- "ld1 { v3.b }[2], [x20]\n"
+ "tbz x1, #1, 34f\n"
+ "ld1 { v6.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 35f\n"
+ "ld1 { v6.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 35f\n"
- "ld1 { v3.b }[0], [x20]\n"
+ "tbz x1, #0, 35f\n"
+ "ld1 { v6.b }[0], [x20]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d16, [x7, #0x40]\n"
- "ushll v3.8h, v3.8b, #0x0\n"
- "usubl v16.8h, v16.8b, v2.8b\n"
- "ldr x20, [x6, #0x80]\n"
- "smlal v8.4s, v3.4h, v30.4h\n"
- "smlal2 v17.4s, v3.8h, v30.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v5.4h, v16.4h\n"
- "smlal2 v24.4s, v5.8h, v16.8h\n"
- "smlal v7.4s, v10.4h, v16.4h\n"
- "smlal2 v14.4s, v10.8h, v16.8h\n"
- "smlal v27.4s, v3.4h, v16.4h\n"
- "smlal2 v22.4s, v3.8h, v16.8h\n"
- "tbz x2, #2, 37f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 36f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 39f\n"
- "ld1 { v6.b }[6], [x20]\n"
+ "ldr d16, [x5, #0x40]\n"
+ "ushll v6.8h, v6.8b, #0x0\n"
+ "ldr x20, [x4, #0x80]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v6.4h, v17.4h\n"
+ "smlal2 v19.4s, v6.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v2.4h, v16.4h\n"
+ "smlal2 v24.4s, v2.8h, v16.8h\n"
+ "smlal v12.4s, v22.4h, v16.4h\n"
+ "smlal2 v13.4s, v22.8h, v16.8h\n"
+ "smlal v5.4s, v6.4h, v16.4h\n"
+ "smlal2 v23.4s, v6.8h, v16.8h\n"
+ "tbz x1, #2, 37f\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 36f\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 39f\n"
+ "ld1 { v25.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x2, #0, 39f\n"
- "ld1 { v6.b }[4], [x20]\n"
+ "tbz x1, #0, 39f\n"
+ "ld1 { v25.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x2, #1, 38f\n"
- "ld1 { v6.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 39f\n"
- "ld1 { v6.b }[2], [x20]\n"
+ "tbz x1, #1, 38f\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 39f\n"
+ "ld1 { v25.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 39f\n"
- "ld1 { v6.b }[0], [x20]\n"
+ "tbz x1, #0, 39f\n"
+ "ld1 { v25.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d1, [x7, #0x48]\n"
- "ushll v6.8h, v6.8b, #0x0\n"
- "usubl v1.8h, v1.8b, v2.8b\n"
- "ldr x20, [x6, #0x88]\n"
- "smlal v8.4s, v6.4h, v16.4h\n"
- "smlal2 v17.4s, v6.8h, v16.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v10.4h, v1.4h\n"
- "smlal2 v24.4s, v10.8h, v1.8h\n"
- "smlal v7.4s, v9.4h, v1.4h\n"
- "smlal2 v14.4s, v9.8h, v1.8h\n"
- "smlal v27.4s, v6.4h, v1.4h\n"
- "smlal2 v22.4s, v6.8h, v1.8h\n"
- "tbz x2, #2, 41f\n"
- "ld1 { v18.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 40f\n"
- "ld1 { v18.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 43f\n"
- "ld1 { v18.b }[6], [x20]\n"
+ "ldr d4, [x5, #0x48]\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x20, [x4, #0x88]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v7.4s, v25.4h, v16.4h\n"
+ "smlal2 v19.4s, v25.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v22.4h, v4.4h\n"
+ "smlal2 v24.4s, v22.8h, v4.8h\n"
+ "smlal v12.4s, v8.4h, v4.4h\n"
+ "smlal2 v13.4s, v8.8h, v4.8h\n"
+ "smlal v5.4s, v25.4h, v4.4h\n"
+ "smlal2 v23.4s, v25.8h, v4.8h\n"
+ "tbz x1, #2, 41f\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 40f\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 43f\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
- "tbz x2, #0, 43f\n"
- "ld1 { v18.b }[4], [x20]\n"
+ "tbz x1, #0, 43f\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
- "tbz x2, #1, 42f\n"
- "ld1 { v18.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 43f\n"
- "ld1 { v18.b }[2], [x20]\n"
+ "tbz x1, #1, 42f\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 43f\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 43f\n"
- "ld1 { v18.b }[0], [x20]\n"
+ "tbz x1, #0, 43f\n"
+ "ld1 { v20.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d28, [x7, #0x50]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "usubl v28.8h, v28.8b, v2.8b\n"
- "ldr x20, [x6, #0x90]\n"
- "smlal v8.4s, v18.4h, v1.4h\n"
- "smlal2 v17.4s, v18.8h, v1.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v11.4h, v28.4h\n"
- "smlal2 v24.4s, v11.8h, v28.8h\n"
- "smlal v7.4s, v20.4h, v28.4h\n"
- "smlal2 v14.4s, v20.8h, v28.8h\n"
- "tbz x2, #2, 45f\n"
- "ld1 { v30.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 44f\n"
- "ld1 { v30.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 47f\n"
- "ld1 { v30.b }[6], [x20]\n"
+ "ldr d16, [x5, #0x50]\n"
+ "ushll v20.8h, v20.8b, #0x0\n"
+ "ldr x20, [x4, #0x90]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v20.4h, v4.4h\n"
+ "smlal2 v19.4s, v20.8h, v4.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v26.4h, v16.4h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "smlal v12.4s, v29.4h, v16.4h\n"
+ "smlal2 v13.4s, v29.8h, v16.8h\n"
+ "tbz x1, #2, 45f\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 44f\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 47f\n"
+ "ld1 { v21.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x2, #0, 47f\n"
- "ld1 { v30.b }[4], [x20]\n"
+ "tbz x1, #0, 47f\n"
+ "ld1 { v21.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x2, #1, 46f\n"
- "ld1 { v30.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 47f\n"
- "ld1 { v30.b }[2], [x20]\n"
+ "tbz x1, #1, 46f\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 47f\n"
+ "ld1 { v21.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 47f\n"
- "ld1 { v30.b }[0], [x20]\n"
+ "tbz x1, #0, 47f\n"
+ "ld1 { v21.b }[0], [x20]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
- "ushll v30.8h, v30.8b, #0x0\n"
- "ldr x20, [x6, #0x98]\n"
- "smlal v27.4s, v30.4h, v28.4h\n"
- "smlal2 v22.4s, v30.8h, v28.8h\n"
- "add x20, x20, x4\n"
- "tbz x2, #2, 49f\n"
- "ld1 { v19.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 48f\n"
- "ld1 { v19.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 51f\n"
- "ld1 { v19.b }[6], [x20]\n"
+ "ushll v21.8h, v21.8b, #0x0\n"
+ "ldr x20, [x4, #0x98]\n"
+ "smlal v5.4s, v21.4h, v16.4h\n"
+ "smlal2 v23.4s, v21.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "tbz x1, #2, 49f\n"
+ "ld1 { v27.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 48f\n"
+ "ld1 { v27.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 51f\n"
+ "ld1 { v27.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x2, #0, 51f\n"
- "ld1 { v19.b }[4], [x20]\n"
+ "tbz x1, #0, 51f\n"
+ "ld1 { v27.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x2, #1, 50f\n"
- "ld1 { v19.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 51f\n"
- "ld1 { v19.b }[2], [x20]\n"
+ "tbz x1, #1, 50f\n"
+ "ld1 { v27.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 51f\n"
+ "ld1 { v27.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 51f\n"
- "ld1 { v19.b }[0], [x20]\n"
+ "tbz x1, #0, 51f\n"
+ "ld1 { v27.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d0, [x7, #0x58]\n"
- "ushll v19.8h, v19.8b, #0x0\n"
- "usubl v0.8h, v0.8b, v2.8b\n"
- "ldr x20, [x6, #0xa0]\n"
- "smlal v8.4s, v19.4h, v28.4h\n"
- "smlal2 v17.4s, v19.8h, v28.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v20.4h, v0.4h\n"
- "smlal2 v24.4s, v20.8h, v0.8h\n"
- "smlal v7.4s, v23.4h, v0.4h\n"
- "smlal2 v14.4s, v23.8h, v0.8h\n"
- "smlal v27.4s, v19.4h, v0.4h\n"
- "smlal2 v22.4s, v19.8h, v0.8h\n"
- "tbz x2, #2, 53f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 52f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 55f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ldr d17, [x5, #0x58]\n"
+ "ushll v27.8h, v27.8b, #0x0\n"
+ "ldr x20, [x4, #0xa0]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "smlal v7.4s, v27.4h, v16.4h\n"
+ "smlal2 v19.4s, v27.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v29.4h, v17.4h\n"
+ "smlal2 v24.4s, v29.8h, v17.8h\n"
+ "smlal v12.4s, v9.4h, v17.4h\n"
+ "smlal2 v13.4s, v9.8h, v17.8h\n"
+ "smlal v5.4s, v27.4h, v17.4h\n"
+ "smlal2 v23.4s, v27.8h, v17.8h\n"
+ "tbz x1, #2, 53f\n"
+ "ld1 { v0.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 52f\n"
+ "ld1 { v0.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 55f\n"
+ "ld1 { v0.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x2, #0, 55f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "tbz x1, #0, 55f\n"
+ "ld1 { v0.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x2, #1, 54f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 55f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "tbz x1, #1, 54f\n"
+ "ld1 { v0.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 55f\n"
+ "ld1 { v0.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 55f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "tbz x1, #0, 55f\n"
+ "ld1 { v0.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d10, [x7, #0x60]\n"
- "ushll v9.8h, v9.8b, #0x0\n"
- "usubl v10.8h, v10.8b, v2.8b\n"
- "ldr x20, [x6, #0xa8]\n"
- "smlal v8.4s, v9.4h, v0.4h\n"
- "smlal2 v17.4s, v9.8h, v0.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v23.4h, v10.4h\n"
- "smlal2 v24.4s, v23.8h, v10.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "smlal2 v14.4s, v3.8h, v10.8h\n"
- "smlal v27.4s, v9.4h, v10.4h\n"
- "smlal2 v22.4s, v9.8h, v10.8h\n"
- "tbz x2, #2, 57f\n"
- "ld1 { v20.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 56f\n"
- "ld1 { v20.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 59f\n"
- "ld1 { v20.b }[6], [x20]\n"
+ "ldr d16, [x5, #0x60]\n"
+ "ushll v0.8h, v0.8b, #0x0\n"
+ "ldr x20, [x4, #0xa8]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v0.4h, v17.4h\n"
+ "smlal2 v19.4s, v0.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v9.4h, v16.4h\n"
+ "smlal2 v24.4s, v9.8h, v16.8h\n"
+ "smlal v12.4s, v6.4h, v16.4h\n"
+ "smlal2 v13.4s, v6.8h, v16.8h\n"
+ "smlal v5.4s, v0.4h, v16.4h\n"
+ "smlal2 v23.4s, v0.8h, v16.8h\n"
+ "tbz x1, #2, 57f\n"
+ "ld1 { v3.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 56f\n"
+ "ld1 { v3.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 59f\n"
+ "ld1 { v3.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x2, #0, 59f\n"
- "ld1 { v20.b }[4], [x20]\n"
+ "tbz x1, #0, 59f\n"
+ "ld1 { v3.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x2, #1, 58f\n"
- "ld1 { v20.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 59f\n"
- "ld1 { v20.b }[2], [x20]\n"
+ "tbz x1, #1, 58f\n"
+ "ld1 { v3.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 59f\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 59f\n"
- "ld1 { v20.b }[0], [x20]\n"
+ "tbz x1, #0, 59f\n"
+ "ld1 { v3.b }[0], [x20]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d28, [x7, #0x68]\n"
- "ushll v20.8h, v20.8b, #0x0\n"
- "usubl v28.8h, v28.8b, v2.8b\n"
- "ldr x20, [x6, #0xb0]\n"
- "smlal v8.4s, v20.4h, v10.4h\n"
- "smlal2 v17.4s, v20.8h, v10.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v3.4h, v28.4h\n"
- "smlal2 v24.4s, v3.8h, v28.8h\n"
- "smlal v7.4s, v6.4h, v28.4h\n"
- "smlal2 v14.4s, v6.8h, v28.8h\n"
- "smlal v27.4s, v20.4h, v28.4h\n"
- "smlal2 v22.4s, v20.8h, v28.8h\n"
- "tbz x2, #2, 61f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 60f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 63f\n"
- "ld1 { v5.b }[6], [x20]\n"
+ "ldr d17, [x5, #0x68]\n"
+ "ushll v3.8h, v3.8b, #0x0\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "smlal v7.4s, v3.4h, v16.4h\n"
+ "smlal2 v19.4s, v3.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v6.4h, v17.4h\n"
+ "smlal2 v24.4s, v6.8h, v17.8h\n"
+ "smlal v12.4s, v25.4h, v17.4h\n"
+ "smlal2 v13.4s, v25.8h, v17.8h\n"
+ "smlal v5.4s, v3.4h, v17.4h\n"
+ "smlal2 v23.4s, v3.8h, v17.8h\n"
+ "tbz x1, #2, 61f\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 60f\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 63f\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x2, #0, 63f\n"
- "ld1 { v5.b }[4], [x20]\n"
+ "tbz x1, #0, 63f\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x2, #1, 62f\n"
- "ld1 { v5.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 63f\n"
- "ld1 { v5.b }[2], [x20]\n"
+ "tbz x1, #1, 62f\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 63f\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 63f\n"
- "ld1 { v5.b }[0], [x20]\n"
+ "tbz x1, #0, 63f\n"
+ "ld1 { v26.b }[0], [x20]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d23, [x7, #0x70]\n"
- "ushll v5.8h, v5.8b, #0x0\n"
- "usubl v23.8h, v23.8b, v2.8b\n"
- "ldr x20, [x6, #0xb8]\n"
- "smlal v8.4s, v5.4h, v28.4h\n"
- "smlal2 v17.4s, v5.8h, v28.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v6.4h, v23.4h\n"
- "smlal2 v24.4s, v6.8h, v23.8h\n"
- "smlal v7.4s, v18.4h, v23.4h\n"
- "smlal2 v14.4s, v18.8h, v23.8h\n"
- "smlal v27.4s, v5.4h, v23.4h\n"
- "smlal2 v22.4s, v5.8h, v23.8h\n"
- "tbz x2, #2, 65f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 64f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 67f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "ldr d16, [x5, #0x70]\n"
+ "ushll v26.8h, v26.8b, #0x0\n"
+ "ldr x20, [x4, #0xb8]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v26.4h, v17.4h\n"
+ "smlal2 v19.4s, v26.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v25.4h, v16.4h\n"
+ "smlal2 v24.4s, v25.8h, v16.8h\n"
+ "smlal v12.4s, v20.4h, v16.4h\n"
+ "smlal2 v13.4s, v20.8h, v16.8h\n"
+ "smlal v5.4s, v26.4h, v16.4h\n"
+ "smlal2 v23.4s, v26.8h, v16.8h\n"
+ "tbz x1, #2, 65f\n"
+ "ld1 { v2.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 64f\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 67f\n"
+ "ld1 { v2.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
- "tbz x2, #0, 67f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "tbz x1, #0, 67f\n"
+ "ld1 { v2.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
- "tbz x2, #1, 66f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 67f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "tbz x1, #1, 66f\n"
+ "ld1 { v2.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 67f\n"
+ "ld1 { v2.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 67f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "tbz x1, #0, 67f\n"
+ "ld1 { v2.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d4, [x7, #0x78]\n"
- "ushll v29.8h, v29.8b, #0x0\n"
- "usubl v4.8h, v4.8b, v2.8b\n"
- "ldr x20, [x6, #0xc0]\n"
- "smlal v8.4s, v29.4h, v23.4h\n"
- "smlal2 v17.4s, v29.8h, v23.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v30.4h, v4.4h\n"
- "smlal2 v24.4s, v30.8h, v4.8h\n"
- "smlal v7.4s, v19.4h, v4.4h\n"
- "smlal2 v14.4s, v19.8h, v4.8h\n"
- "tbz x2, #2, 69f\n"
+ "ldr d17, [x5, #0x78]\n"
+ "ushll v2.8h, v2.8b, #0x0\n"
+ "ldr x20, [x4, #0xc0]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "smlal v7.4s, v2.4h, v16.4h\n"
+ "smlal2 v19.4s, v2.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v21.4h, v17.4h\n"
+ "smlal2 v24.4s, v21.8h, v17.8h\n"
+ "smlal v12.4s, v27.4h, v17.4h\n"
+ "smlal2 v13.4s, v27.8h, v17.8h\n"
+ "tbz x1, #2, 69f\n"
"ld1 { v18.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 68f\n"
+ "tbz x1, #1, 68f\n"
"ld1 { v18.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 71f\n"
+ "tbz x1, #0, 71f\n"
"ld1 { v18.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x2, #0, 71f\n"
+ "tbz x1, #0, 71f\n"
"ld1 { v18.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x2, #1, 70f\n"
+ "tbz x1, #1, 70f\n"
"ld1 { v18.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 71f\n"
+ "tbz x1, #0, 71f\n"
"ld1 { v18.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 71f\n"
+ "tbz x1, #0, 71f\n"
"ld1 { v18.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
"ushll v18.8h, v18.8b, #0x0\n"
- "ldr x20, [x6, #0xc8]\n"
- "smlal v27.4s, v18.4h, v4.4h\n"
- "smlal2 v22.4s, v18.8h, v4.8h\n"
- "add x20, x20, x4\n"
- "tbz x2, #2, 73f\n"
- "ld1 { v1.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 72f\n"
- "ld1 { v1.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 75f\n"
- "ld1 { v1.b }[6], [x20]\n"
+ "ldr x20, [x4, #0xc8]\n"
+ "smlal v5.4s, v18.4h, v17.4h\n"
+ "smlal2 v23.4s, v18.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "tbz x1, #2, 73f\n"
+ "ld1 { v10.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 72f\n"
+ "ld1 { v10.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 75f\n"
+ "ld1 { v10.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x2, #0, 75f\n"
- "ld1 { v1.b }[4], [x20]\n"
+ "tbz x1, #0, 75f\n"
+ "ld1 { v10.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x2, #1, 74f\n"
- "ld1 { v1.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 75f\n"
- "ld1 { v1.b }[2], [x20]\n"
+ "tbz x1, #1, 74f\n"
+ "ld1 { v10.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 75f\n"
+ "ld1 { v10.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 75f\n"
- "ld1 { v1.b }[0], [x20]\n"
+ "tbz x1, #0, 75f\n"
+ "ld1 { v10.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d23, [x7, #0x80]\n"
- "ushll v1.8h, v1.8b, #0x0\n"
- "usubl v23.8h, v23.8b, v2.8b\n"
- "ldr x20, [x6, #0xd0]\n"
- "smlal v8.4s, v1.4h, v4.4h\n"
- "smlal2 v17.4s, v1.8h, v4.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v19.4h, v23.4h\n"
- "smlal2 v24.4s, v19.8h, v23.8h\n"
- "smlal v7.4s, v9.4h, v23.4h\n"
- "smlal2 v14.4s, v9.8h, v23.8h\n"
- "smlal v27.4s, v1.4h, v23.4h\n"
- "smlal2 v22.4s, v1.8h, v23.8h\n"
- "tbz x2, #2, 77f\n"
- "ld1 { v4.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 76f\n"
- "ld1 { v4.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 79f\n"
- "ld1 { v4.b }[6], [x20]\n"
+ "ldr d16, [x5, #0x80]\n"
+ "ushll v10.8h, v10.8b, #0x0\n"
+ "ldr x20, [x4, #0xd0]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v10.4h, v17.4h\n"
+ "smlal2 v19.4s, v10.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v27.4h, v16.4h\n"
+ "smlal2 v24.4s, v27.8h, v16.8h\n"
+ "smlal v12.4s, v0.4h, v16.4h\n"
+ "smlal2 v13.4s, v0.8h, v16.8h\n"
+ "smlal v5.4s, v10.4h, v16.4h\n"
+ "smlal2 v23.4s, v10.8h, v16.8h\n"
+ "tbz x1, #2, 77f\n"
+ "ld1 { v6.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 76f\n"
+ "ld1 { v6.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 79f\n"
+ "ld1 { v6.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x2, #0, 79f\n"
- "ld1 { v4.b }[4], [x20]\n"
+ "tbz x1, #0, 79f\n"
+ "ld1 { v6.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x2, #1, 78f\n"
- "ld1 { v4.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 79f\n"
- "ld1 { v4.b }[2], [x20]\n"
+ "tbz x1, #1, 78f\n"
+ "ld1 { v6.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 79f\n"
+ "ld1 { v6.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 79f\n"
- "ld1 { v4.b }[0], [x20]\n"
+ "tbz x1, #0, 79f\n"
+ "ld1 { v6.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d30, [x7, #0x88]\n"
- "ushll v4.8h, v4.8b, #0x0\n"
- "usubl v30.8h, v30.8b, v2.8b\n"
- "ldr x20, [x6, #0xd8]\n"
- "smlal v8.4s, v4.4h, v23.4h\n"
- "smlal2 v17.4s, v4.8h, v23.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v9.4h, v30.4h\n"
- "smlal2 v24.4s, v9.8h, v30.8h\n"
- "smlal v7.4s, v20.4h, v30.4h\n"
- "smlal2 v14.4s, v20.8h, v30.8h\n"
- "smlal v27.4s, v4.4h, v30.4h\n"
- "smlal2 v22.4s, v4.8h, v30.8h\n"
- "tbz x2, #2, 81f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 80f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 83f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ldr d17, [x5, #0x88]\n"
+ "ushll v6.8h, v6.8b, #0x0\n"
+ "ldr x20, [x4, #0xd8]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "smlal v7.4s, v6.4h, v16.4h\n"
+ "smlal2 v19.4s, v6.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v0.4h, v17.4h\n"
+ "smlal2 v24.4s, v0.8h, v17.8h\n"
+ "smlal v12.4s, v3.4h, v17.4h\n"
+ "smlal2 v13.4s, v3.8h, v17.8h\n"
+ "smlal v5.4s, v6.4h, v17.4h\n"
+ "smlal2 v23.4s, v6.8h, v17.8h\n"
+ "tbz x1, #2, 81f\n"
+ "ld1 { v11.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 80f\n"
+ "ld1 { v11.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 83f\n"
+ "ld1 { v11.b }[6], [x20]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x2, #0, 83f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x1, #0, 83f\n"
+ "ld1 { v11.b }[4], [x20]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x2, #1, 82f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 83f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x1, #1, 82f\n"
+ "ld1 { v11.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 83f\n"
+ "ld1 { v11.b }[2], [x20]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 83f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x1, #0, 83f\n"
+ "ld1 { v11.b }[0], [x20]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d3, [x7, #0x90]\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "usubl v3.8h, v3.8b, v2.8b\n"
- "ldr x20, [x6, #0xe0]\n"
- "smlal v8.4s, v21.4h, v30.4h\n"
- "smlal2 v17.4s, v21.8h, v30.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v20.4h, v3.4h\n"
- "smlal2 v24.4s, v20.8h, v3.8h\n"
- "smlal v7.4s, v5.4h, v3.4h\n"
- "smlal2 v14.4s, v5.8h, v3.8h\n"
- "smlal v27.4s, v21.4h, v3.4h\n"
- "smlal2 v22.4s, v21.8h, v3.8h\n"
- "tbz x2, #2, 85f\n"
- "ld1 { v30.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 84f\n"
- "ld1 { v30.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 87f\n"
- "ld1 { v30.b }[6], [x20]\n"
+ "ldr d16, [x5, #0x90]\n"
+ "ushll v11.8h, v11.8b, #0x0\n"
+ "ldr x20, [x4, #0xe0]\n"
+ "usubl v16.8h, v16.8b, v14.8b\n"
+ "smlal v7.4s, v11.4h, v17.4h\n"
+ "smlal2 v19.4s, v11.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v3.4h, v16.4h\n"
+ "smlal2 v24.4s, v3.8h, v16.8h\n"
+ "smlal v12.4s, v26.4h, v16.4h\n"
+ "smlal2 v13.4s, v26.8h, v16.8h\n"
+ "smlal v5.4s, v11.4h, v16.4h\n"
+ "smlal2 v23.4s, v11.8h, v16.8h\n"
+ "tbz x1, #2, 85f\n"
+ "ld1 { v25.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 84f\n"
+ "ld1 { v25.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 87f\n"
+ "ld1 { v25.b }[6], [x20]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x2, #0, 87f\n"
- "ld1 { v30.b }[4], [x20]\n"
+ "tbz x1, #0, 87f\n"
+ "ld1 { v25.b }[4], [x20]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x2, #1, 86f\n"
- "ld1 { v30.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 87f\n"
- "ld1 { v30.b }[2], [x20]\n"
+ "tbz x1, #1, 86f\n"
+ "ld1 { v25.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 87f\n"
+ "ld1 { v25.b }[2], [x20]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 87f\n"
- "ld1 { v30.b }[0], [x20]\n"
+ "tbz x1, #0, 87f\n"
+ "ld1 { v25.b }[0], [x20]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d19, [x7, #0x98]\n"
- "ushll v30.8h, v30.8b, #0x0\n"
- "usubl v19.8h, v19.8b, v2.8b\n"
- "ldr x20, [x6, #0xe8]\n"
- "smlal v8.4s, v30.4h, v3.4h\n"
- "smlal2 v17.4s, v30.8h, v3.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v5.4h, v19.4h\n"
- "smlal2 v24.4s, v5.8h, v19.8h\n"
- "smlal v7.4s, v29.4h, v19.4h\n"
- "smlal2 v14.4s, v29.8h, v19.8h\n"
- "smlal v27.4s, v30.4h, v19.4h\n"
- "smlal2 v22.4s, v30.8h, v19.8h\n"
- "tbz x2, #2, 89f\n"
- "ld1 { v20.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 88f\n"
- "ld1 { v20.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 91f\n"
- "ld1 { v20.b }[6], [x20]\n"
+ "ldr d17, [x5, #0x98]\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ldr x20, [x4, #0xe8]\n"
+ "usubl v17.8h, v17.8b, v14.8b\n"
+ "smlal v7.4s, v25.4h, v16.4h\n"
+ "smlal2 v19.4s, v25.8h, v16.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v26.4h, v17.4h\n"
+ "smlal2 v24.4s, v26.8h, v17.8h\n"
+ "smlal v12.4s, v2.4h, v17.4h\n"
+ "smlal2 v13.4s, v2.8h, v17.8h\n"
+ "smlal v5.4s, v25.4h, v17.4h\n"
+ "smlal2 v23.4s, v25.8h, v17.8h\n"
+ "tbz x1, #2, 89f\n"
+ "ld1 { v9.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 88f\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 91f\n"
+ "ld1 { v9.b }[6], [x20]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
- "tbz x2, #0, 91f\n"
- "ld1 { v20.b }[4], [x20]\n"
+ "tbz x1, #0, 91f\n"
+ "ld1 { v9.b }[4], [x20]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
- "tbz x2, #1, 90f\n"
- "ld1 { v20.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 91f\n"
- "ld1 { v20.b }[2], [x20]\n"
+ "tbz x1, #1, 90f\n"
+ "ld1 { v9.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 91f\n"
+ "ld1 { v9.b }[2], [x20]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 91f\n"
- "ld1 { v20.b }[0], [x20]\n"
+ "tbz x1, #0, 91f\n"
+ "ld1 { v9.b }[0], [x20]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d23, [x7, #0xa0]\n"
- "ushll v20.8h, v20.8b, #0x0\n"
- "usubl v23.8h, v23.8b, v2.8b\n"
- "ldr x20, [x6, #0xf0]\n"
- "smlal v8.4s, v20.4h, v19.4h\n"
- "smlal2 v17.4s, v20.8h, v19.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v18.4h, v23.4h\n"
- "smlal2 v24.4s, v18.8h, v23.8h\n"
- "smlal v7.4s, v1.4h, v23.4h\n"
- "smlal2 v14.4s, v1.8h, v23.8h\n"
- "tbz x2, #2, 93f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 92f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 95f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ldr d4, [x5, #0xa0]\n"
+ "ushll v9.8h, v9.8b, #0x0\n"
+ "ldr x20, [x4, #0xf0]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v7.4s, v9.4h, v17.4h\n"
+ "smlal2 v19.4s, v9.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v18.4h, v4.4h\n"
+ "smlal2 v24.4s, v18.8h, v4.8h\n"
+ "smlal v12.4s, v10.4h, v4.4h\n"
+ "smlal2 v13.4s, v10.8h, v4.8h\n"
+ "tbz x1, #2, 93f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 92f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 95f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 95f\n"
"92:" // Oddments: Load (5, 0): Bit 2: Bit 1: Unset
- "tbz x2, #0, 95f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "tbz x1, #0, 95f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 95f\n"
"93:" // Oddments: Load (5, 0): Bit 2: Unset
- "tbz x2, #1, 94f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 95f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "tbz x1, #1, 94f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 95f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 95f\n"
"94:" // Oddments: Load (5, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 95f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "tbz x1, #0, 95f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
- "ushll v10.8h, v10.8b, #0x0\n"
- "ldr x20, [x6, #0xf8]\n"
- "smlal v27.4s, v10.4h, v23.4h\n"
- "smlal2 v22.4s, v10.8h, v23.8h\n"
- "add x20, x20, x4\n"
- "tbz x2, #2, 97f\n"
- "ld1 { v18.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 96f\n"
- "ld1 { v18.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 99f\n"
- "ld1 { v18.b }[6], [x20]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x4, #0xf8]\n"
+ "smlal v5.4s, v16.4h, v4.4h\n"
+ "smlal2 v23.4s, v16.8h, v4.8h\n"
+ "add x20, x20, x2\n"
+ "tbz x1, #2, 97f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 96f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 99f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
- "tbz x2, #0, 99f\n"
- "ld1 { v18.b }[4], [x20]\n"
+ "tbz x1, #0, 99f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
- "tbz x2, #1, 98f\n"
- "ld1 { v18.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 99f\n"
- "ld1 { v18.b }[2], [x20]\n"
+ "tbz x1, #1, 98f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 99f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 99f\n"
- "ld1 { v18.b }[0], [x20]\n"
+ "tbz x1, #0, 99f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d5, [x7, #0xa8]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "usubl v5.8h, v5.8b, v2.8b\n"
- "ldr x20, [x6, #0x100]\n"
- "smlal v8.4s, v18.4h, v23.4h\n"
- "smlal2 v17.4s, v18.8h, v23.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v1.4h, v5.4h\n"
- "smlal2 v24.4s, v1.8h, v5.8h\n"
- "smlal v7.4s, v4.4h, v5.4h\n"
- "smlal2 v14.4s, v4.8h, v5.8h\n"
- "smlal v27.4s, v18.4h, v5.4h\n"
- "smlal2 v22.4s, v18.8h, v5.8h\n"
- "tbz x2, #2, 101f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 100f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 103f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ldr d26, [x5, #0xa8]\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "ldr x20, [x4, #0x100]\n"
+ "usubl v26.8h, v26.8b, v14.8b\n"
+ "smlal v7.4s, v17.4h, v4.4h\n"
+ "smlal2 v19.4s, v17.8h, v4.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v10.4h, v26.4h\n"
+ "smlal2 v24.4s, v10.8h, v26.8h\n"
+ "smlal v12.4s, v6.4h, v26.4h\n"
+ "smlal2 v13.4s, v6.8h, v26.8h\n"
+ "smlal v5.4s, v17.4h, v26.4h\n"
+ "smlal2 v23.4s, v17.8h, v26.8h\n"
+ "tbz x1, #2, 101f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 100f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 103f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
- "tbz x2, #0, 103f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "tbz x1, #0, 103f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
- "tbz x2, #1, 102f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 103f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "tbz x1, #1, 102f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 103f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 103f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "tbz x1, #0, 103f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d18, [x7, #0xb0]\n"
- "ushll v9.8h, v9.8b, #0x0\n"
- "usubl v18.8h, v18.8b, v2.8b\n"
- "ldr x20, [x6, #0x108]\n"
- "smlal v8.4s, v9.4h, v5.4h\n"
- "smlal2 v17.4s, v9.8h, v5.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v4.4h, v18.4h\n"
- "smlal2 v24.4s, v4.8h, v18.8h\n"
- "smlal v7.4s, v21.4h, v18.4h\n"
- "smlal2 v14.4s, v21.8h, v18.8h\n"
- "smlal v27.4s, v9.4h, v18.4h\n"
- "smlal2 v22.4s, v9.8h, v18.8h\n"
- "tbz x2, #2, 105f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 104f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 107f\n"
- "ld1 { v5.b }[6], [x20]\n"
+ "ldr d4, [x5, #0xb0]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x4, #0x108]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v7.4s, v16.4h, v26.4h\n"
+ "smlal2 v19.4s, v16.8h, v26.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v6.4h, v4.4h\n"
+ "smlal2 v24.4s, v6.8h, v4.8h\n"
+ "smlal v12.4s, v11.4h, v4.4h\n"
+ "smlal2 v13.4s, v11.8h, v4.8h\n"
+ "smlal v5.4s, v16.4h, v4.4h\n"
+ "smlal2 v23.4s, v16.8h, v4.8h\n"
+ "tbz x1, #2, 105f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 104f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 107f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
- "tbz x2, #0, 107f\n"
- "ld1 { v5.b }[4], [x20]\n"
+ "tbz x1, #0, 107f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
- "tbz x2, #1, 106f\n"
- "ld1 { v5.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 107f\n"
- "ld1 { v5.b }[2], [x20]\n"
+ "tbz x1, #1, 106f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 107f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 107f\n"
- "ld1 { v5.b }[0], [x20]\n"
+ "tbz x1, #0, 107f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d11, [x7, #0xb8]\n"
- "ushll v5.8h, v5.8b, #0x0\n"
- "usubl v11.8h, v11.8b, v2.8b\n"
- "ldr x20, [x6, #0x110]\n"
- "smlal v8.4s, v5.4h, v18.4h\n"
- "smlal2 v17.4s, v5.8h, v18.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v21.4h, v11.4h\n"
- "smlal2 v24.4s, v21.8h, v11.8h\n"
- "smlal v7.4s, v30.4h, v11.4h\n"
- "smlal2 v14.4s, v30.8h, v11.8h\n"
- "smlal v27.4s, v5.4h, v11.4h\n"
- "smlal2 v22.4s, v5.8h, v11.8h\n"
- "tbz x2, #2, 109f\n"
- "ld1 { v18.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 108f\n"
- "ld1 { v18.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 111f\n"
- "ld1 { v18.b }[6], [x20]\n"
+ "ldr d2, [x5, #0xb8]\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "ldr x20, [x4, #0x110]\n"
+ "usubl v2.8h, v2.8b, v14.8b\n"
+ "smlal v7.4s, v17.4h, v4.4h\n"
+ "smlal2 v19.4s, v17.8h, v4.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v11.4h, v2.4h\n"
+ "smlal2 v24.4s, v11.8h, v2.8h\n"
+ "smlal v12.4s, v25.4h, v2.4h\n"
+ "smlal2 v13.4s, v25.8h, v2.8h\n"
+ "smlal v5.4s, v17.4h, v2.4h\n"
+ "smlal2 v23.4s, v17.8h, v2.8h\n"
+ "tbz x1, #2, 109f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 108f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 111f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
- "tbz x2, #0, 111f\n"
- "ld1 { v18.b }[4], [x20]\n"
+ "tbz x1, #0, 111f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
- "tbz x2, #1, 110f\n"
- "ld1 { v18.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 111f\n"
- "ld1 { v18.b }[2], [x20]\n"
+ "tbz x1, #1, 110f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 111f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 111f\n"
- "ld1 { v18.b }[0], [x20]\n"
+ "tbz x1, #0, 111f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d16, [x7, #0xc0]\n"
- "ushll v18.8h, v18.8b, #0x0\n"
- "usubl v16.8h, v16.8b, v2.8b\n"
- "ldr x20, [x6, #0x118]\n"
- "smlal v8.4s, v18.4h, v11.4h\n"
- "smlal2 v17.4s, v18.8h, v11.8h\n"
- "add x20, x20, x4\n"
- "smlal v13.4s, v30.4h, v16.4h\n"
- "smlal2 v24.4s, v30.8h, v16.8h\n"
- "smlal v7.4s, v20.4h, v16.4h\n"
- "smlal2 v14.4s, v20.8h, v16.8h\n"
- "smlal v27.4s, v18.4h, v16.4h\n"
- "smlal2 v22.4s, v18.8h, v16.8h\n"
- "tbz x2, #2, 113f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x2, #1, 112f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x2, #0, 115f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ldr d4, [x5, #0xc0]\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ldr x20, [x4, #0x118]\n"
+ "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v7.4s, v16.4h, v2.4h\n"
+ "smlal2 v19.4s, v16.8h, v2.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v30.4s, v25.4h, v4.4h\n"
+ "smlal2 v24.4s, v25.8h, v4.8h\n"
+ "smlal v12.4s, v9.4h, v4.4h\n"
+ "smlal2 v13.4s, v9.8h, v4.8h\n"
+ "smlal v5.4s, v16.4h, v4.4h\n"
+ "smlal2 v23.4s, v16.8h, v4.8h\n"
+ "tbz x1, #2, 113f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x1, #1, 112f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x1, #0, 115f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
- "tbz x2, #0, 115f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x1, #0, 115f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
- "tbz x2, #1, 114f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x2, #0, 115f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x1, #1, 114f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x1, #0, 115f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 115f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x1, #0, 115f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
- "ushll v21.8h, v21.8b, #0x0\n"
- "smlal v8.4s, v21.4h, v16.4h\n"
- "smlal2 v17.4s, v21.8h, v16.8h\n"
- "tbz x2, #2, 117f\n"
- "ld1 { v16.4s }, [x8], #0x10\n"
- "ld1 { v21.4s }, [x17], #0x10\n"
- "tbz x2, #1, 116f\n"
- "ld1 { v18.d }[0], [x8], #0x8\n"
- "ld1 { v0.d }[0], [x17], #0x8\n"
- "tbz x2, #0, 119f\n"
- "ld1 { v18.s }[2], [x8]\n"
- "ld1 { v0.s }[2], [x17]\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "smlal v7.4s, v17.4h, v4.4h\n"
+ "smlal2 v19.4s, v17.8h, v4.8h\n"
+ "tbz x1, #2, 117f\n"
+ "ld1 { v16.4s }, [x6], #0x10\n"
+ "ld1 { v27.4s }, [x7], #0x10\n"
+ "tbz x1, #1, 116f\n"
+ "ld1 { v11.d }[0], [x6], #0x8\n"
+ "ld1 { v18.d }[0], [x7], #0x8\n"
+ "tbz x1, #0, 119f\n"
+ "ld1 { v11.s }[2], [x6]\n"
+ "ld1 { v18.s }[2], [x7]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x2, #0, 119f\n"
- "ld1 { v18.s }[0], [x8]\n"
- "ld1 { v0.s }[0], [x17]\n"
+ "tbz x1, #0, 119f\n"
+ "ld1 { v11.s }[0], [x6]\n"
+ "ld1 { v18.s }[0], [x7]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x2, #1, 118f\n"
- "ld1 { v16.d }[0], [x8], #0x8\n"
- "ld1 { v21.d }[0], [x17], #0x8\n"
- "tbz x2, #0, 119f\n"
- "ld1 { v16.s }[2], [x8]\n"
- "ld1 { v21.s }[2], [x17]\n"
+ "tbz x1, #1, 118f\n"
+ "ld1 { v16.d }[0], [x6], #0x8\n"
+ "ld1 { v27.d }[0], [x7], #0x8\n"
+ "tbz x1, #0, 119f\n"
+ "ld1 { v16.s }[2], [x6]\n"
+ "ld1 { v27.s }[2], [x7]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 119f\n"
- "ld1 { v16.s }[0], [x8]\n"
- "ld1 { v21.s }[0], [x17]\n"
+ "tbz x1, #0, 119f\n"
+ "ld1 { v16.s }[0], [x6]\n"
+ "ld1 { v27.s }[0], [x7]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v13.4s, v13.4s, v16.4s\n"
- "and v5.16b, v13.16b, v21.16b\n"
- "add x16, x16, x5\n"
- "add x15, x15, x5\n"
- "sqrdmulh v24.4s, v24.4s, v18.4s\n"
- "sshr v5.4s, v5.4s, #0x1f\n"
- "add x14, x14, x5\n"
- "add x13, x13, x5\n"
- "and v2.16b, v24.16b, v0.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v16.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v11.4s\n"
+ "add x17, x17, x3\n"
+ "add x16, x16, x3\n"
+ "sqrdmulh v12.4s, v12.4s, v16.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v16.4s\n"
+ "add x15, x15, x3\n"
+ "add x14, x14, x3\n"
"sqrdmulh v7.4s, v7.4s, v16.4s\n"
- "sqrdmulh v27.4s, v27.4s, v16.4s\n"
- "sqrdmulh v8.4s, v8.4s, v16.4s\n"
- "sqadd v13.4s, v13.4s, v5.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v11.4s\n"
+ "and v17.16b, v30.16b, v27.16b\n"
+ "and v16.16b, v24.16b, v18.16b\n"
+ "and v25.16b, v12.16b, v27.16b\n"
+ "and v2.16b, v5.16b, v27.16b\n"
+ "sqrdmulh v23.4s, v23.4s, v11.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v11.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "and v22.16b, v13.16b, v18.16b\n"
"sshr v2.4s, v2.4s, #0x1f\n"
- "and v23.16b, v7.16b, v21.16b\n"
- "sqrdmulh v14.4s, v14.4s, v18.4s\n"
- "and v20.16b, v27.16b, v21.16b\n"
- "sqrdmulh v22.4s, v22.4s, v18.4s\n"
- "and v31.16b, v8.16b, v21.16b\n"
- "sqrdmulh v17.4s, v17.4s, v18.4s\n"
- "sqadd v24.4s, v24.4s, v2.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v18.16b, v14.16b, v0.16b\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "and v11.16b, v22.16b, v0.16b\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v10.16b, v17.16b, v0.16b\n"
- "sqadd v7.4s, v7.4s, v23.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v27.4s, v27.4s, v20.4s\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "sqadd v8.4s, v8.4s, v31.4s\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "srshl v13.4s, v13.4s, v21.4s\n"
- "srshl v7.4s, v7.4s, v21.4s\n"
- "sqadd v14.4s, v14.4s, v18.4s\n"
- "srshl v27.4s, v27.4s, v21.4s\n"
- "sqadd v22.4s, v22.4s, v11.4s\n"
- "srshl v8.4s, v8.4s, v21.4s\n"
- "sqadd v17.4s, v17.4s, v10.4s\n"
- "srshl v24.4s, v24.4s, v0.4s\n"
- "sqxtn v13.4h, v13.4s\n"
- "srshl v14.4s, v14.4s, v0.4s\n"
+ "and v3.16b, v23.16b, v18.16b\n"
+ "sqadd v30.4s, v30.4s, v17.4s\n"
+ "and v17.16b, v7.16b, v27.16b\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "and v16.16b, v19.16b, v18.16b\n"
+ "sqadd v12.4s, v12.4s, v25.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v5.4s, v5.4s, v2.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v30.4s, v30.4s, v27.4s\n"
+ "srshl v12.4s, v12.4s, v27.4s\n"
+ "sqadd v7.4s, v7.4s, v17.4s\n"
+ "sqadd v13.4s, v13.4s, v22.4s\n"
+ "srshl v5.4s, v5.4s, v27.4s\n"
+ "sqadd v23.4s, v23.4s, v3.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "srshl v24.4s, v24.4s, v18.4s\n"
+ "srshl v7.4s, v7.4s, v27.4s\n"
+ "sqxtn v30.4h, v30.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "sqxtn v12.4h, v12.4s\n"
+ "srshl v23.4s, v23.4s, v18.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "srshl v19.4s, v19.4s, v18.4s\n"
"sqxtn v7.4h, v7.4s\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
- "sqxtn v27.4h, v27.4s\n"
- "srshl v17.4s, v17.4s, v0.4s\n"
- "sqxtn v8.4h, v8.4s\n"
- "sqxtn2 v13.8h, v24.4s\n"
- "sqxtn2 v7.8h, v14.4s\n"
- "sqxtn2 v27.8h, v22.4s\n"
- "sqxtn2 v8.8h, v17.4s\n"
- "sqadd v13.8h, v13.8h, v25.8h\n"
- "sqadd v7.8h, v7.8h, v25.8h\n"
- "sqadd v27.8h, v27.8h, v25.8h\n"
- "sqadd v8.8h, v8.8h, v25.8h\n"
- "smax v13.8h, v13.8h, v12.8h\n"
- "smax v7.8h, v7.8h, v12.8h\n"
- "smax v27.8h, v27.8h, v12.8h\n"
- "smax v8.8h, v8.8h, v12.8h\n"
- "smin v13.8h, v13.8h, v26.8h\n"
- "smin v7.8h, v7.8h, v26.8h\n"
- "smin v27.8h, v27.8h, v26.8h\n"
- "smin v8.8h, v8.8h, v26.8h\n"
- "uzp1 v13.16b, v13.16b, v13.16b\n"
+ "sqxtn2 v30.8h, v24.4s\n"
+ "sqxtn2 v12.8h, v13.4s\n"
+ "sqxtn2 v5.8h, v23.4s\n"
+ "sqxtn2 v7.8h, v19.4s\n"
+ "sqadd v30.8h, v30.8h, v15.8h\n"
+ "sqadd v12.8h, v12.8h, v15.8h\n"
+ "sqadd v5.8h, v5.8h, v15.8h\n"
+ "sqadd v7.8h, v7.8h, v15.8h\n"
+ "smax v30.8h, v30.8h, v31.8h\n"
+ "smax v12.8h, v12.8h, v31.8h\n"
+ "smax v5.8h, v5.8h, v31.8h\n"
+ "smax v7.8h, v7.8h, v31.8h\n"
+ "smin v30.8h, v30.8h, v28.8h\n"
+ "smin v12.8h, v12.8h, v28.8h\n"
+ "smin v5.8h, v5.8h, v28.8h\n"
+ "smin v7.8h, v7.8h, v28.8h\n"
+ "uzp1 v30.16b, v30.16b, v30.16b\n"
+ "uzp1 v12.16b, v12.16b, v12.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
"uzp1 v7.16b, v7.16b, v7.16b\n"
- "uzp1 v27.16b, v27.16b, v27.16b\n"
- "uzp1 v8.16b, v8.16b, v8.16b\n"
- "tbz x2, #2, 121f\n"
- "st1 { v13.s }[0], [x16], #0x4\n"
- "st1 { v7.s }[0], [x15], #0x4\n"
- "st1 { v27.s }[0], [x14], #0x4\n"
- "st1 { v8.s }[0], [x13], #0x4\n"
- "tbz x2, #1, 120f\n"
- "st1 { v13.h }[2], [x16], #0x2\n"
- "st1 { v7.h }[2], [x15], #0x2\n"
- "st1 { v27.h }[2], [x14], #0x2\n"
- "st1 { v8.h }[2], [x13], #0x2\n"
- "tbz x2, #0, 123f\n"
- "st1 { v13.b }[6], [x16], #0x1\n"
- "st1 { v7.b }[6], [x15], #0x1\n"
- "st1 { v27.b }[6], [x14], #0x1\n"
- "st1 { v8.b }[6], [x13], #0x1\n"
+ "tbz x1, #2, 121f\n"
+ "st1 { v30.s }[0], [x17], #0x4\n"
+ "st1 { v12.s }[0], [x16], #0x4\n"
+ "st1 { v5.s }[0], [x15], #0x4\n"
+ "st1 { v7.s }[0], [x14], #0x4\n"
+ "tbz x1, #1, 120f\n"
+ "st1 { v30.h }[2], [x17], #0x2\n"
+ "st1 { v12.h }[2], [x16], #0x2\n"
+ "st1 { v5.h }[2], [x15], #0x2\n"
+ "st1 { v7.h }[2], [x14], #0x2\n"
+ "tbz x1, #0, 123f\n"
+ "st1 { v30.b }[6], [x17], #0x1\n"
+ "st1 { v12.b }[6], [x16], #0x1\n"
+ "st1 { v5.b }[6], [x15], #0x1\n"
+ "st1 { v7.b }[6], [x14], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x2, #0, 123f\n"
- "st1 { v13.b }[4], [x16], #0x1\n"
- "st1 { v7.b }[4], [x15], #0x1\n"
- "st1 { v27.b }[4], [x14], #0x1\n"
- "st1 { v8.b }[4], [x13], #0x1\n"
+ "tbz x1, #0, 123f\n"
+ "st1 { v30.b }[4], [x17], #0x1\n"
+ "st1 { v12.b }[4], [x16], #0x1\n"
+ "st1 { v5.b }[4], [x15], #0x1\n"
+ "st1 { v7.b }[4], [x14], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
- "tbz x2, #1, 122f\n"
- "st1 { v13.h }[0], [x16], #0x2\n"
- "st1 { v7.h }[0], [x15], #0x2\n"
- "st1 { v27.h }[0], [x14], #0x2\n"
- "st1 { v8.h }[0], [x13], #0x2\n"
- "tbz x2, #0, 123f\n"
- "st1 { v13.b }[2], [x16], #0x1\n"
- "st1 { v7.b }[2], [x15], #0x1\n"
- "st1 { v27.b }[2], [x14], #0x1\n"
- "st1 { v8.b }[2], [x13], #0x1\n"
+ "tbz x1, #1, 122f\n"
+ "st1 { v30.h }[0], [x17], #0x2\n"
+ "st1 { v12.h }[0], [x16], #0x2\n"
+ "st1 { v5.h }[0], [x15], #0x2\n"
+ "st1 { v7.h }[0], [x14], #0x2\n"
+ "tbz x1, #0, 123f\n"
+ "st1 { v30.b }[2], [x17], #0x1\n"
+ "st1 { v12.b }[2], [x16], #0x1\n"
+ "st1 { v5.b }[2], [x15], #0x1\n"
+ "st1 { v7.b }[2], [x14], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x2, #0, 123f\n"
- "st1 { v13.b }[0], [x16], #0x1\n"
- "st1 { v7.b }[0], [x15], #0x1\n"
- "st1 { v27.b }[0], [x14], #0x1\n"
- "st1 { v8.b }[0], [x13], #0x1\n"
+ "tbz x1, #0, 123f\n"
+ "st1 { v30.b }[0], [x17], #0x1\n"
+ "st1 { v12.b }[0], [x16], #0x1\n"
+ "st1 { v5.b }[0], [x15], #0x1\n"
+ "st1 { v7.b }[0], [x14], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index f1c1b2315c..7e1e00abcc 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[16];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -91,1072 +91,1072 @@ void a64_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v14.16b }, [x20]\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v19.16b }, [x21]\n"
- "ld1r { v13.8h }, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_minval]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v29.8h }, [x21]\n"
- "ld1r { v12.8h }, [x20]\n"
"mov x17, #0x0\n"
"mov x16, #0x0\n"
+ "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
"add x15, %x[params], %[offsetof_Params_inptrs]\n"
"ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
"ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
"ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "ssubl v23.8h, v23.8b, v19.8b\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "ssubl v16.8h, v16.8b, v19.8b\n"
- "ssubl v1.8h, v1.8b, v19.8b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "ssubl v5.8h, v5.8b, v19.8b\n"
- "ssubl v26.8h, v26.8b, v19.8b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "ssubl v18.8h, v18.8b, v19.8b\n"
- "ssubl v31.8h, v31.8b, v19.8b\n"
- "ldr d20, [x14, #0x40]\n"
+ "lsr x11, x8, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v11.16b }, [x20]\n"
+ "add x21, x23, %[offsetof_Requantize32_b_offset]\n"
+ "add x20, x23, %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v16.16b }, [x21]\n"
+ "ld1r { v12.8h }, [x20]\n"
+ "add x21, x23, %[offsetof_Requantize32_minval]\n"
+ "add x20, x23, %[offsetof_Requantize32_maxval]\n"
+ "ld1r { v14.8h }, [x21]\n"
+ "ld1r { v13.8h }, [x20]\n"
+ "ldp x10, x9, [x22, #0x0]\n"
+ "ldp x28, x27, [x22, #0x10]\n"
+ "cbz x11, 3f\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "subs x11, x11, #0x1\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "ssubl v15.8h, v15.8b, v16.8b\n"
+ "ssubl v4.8h, v4.8b, v16.8b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v25.8h, v25.8b, v19.8b\n"
- "ssubl v20.8h, v20.8b, v19.8b\n"
- "ldr q9, [x20, #0x0]\n"
- "ldr q24, [x20, #0x10]\n"
+ "ssubl v5.8h, v5.8b, v16.8b\n"
+ "ssubl v3.8h, v3.8b, v16.8b\n"
+ "ssubl v25.8h, v25.8b, v16.8b\n"
+ "ssubl v10.8h, v10.8b, v16.8b\n"
+ "ssubl v6.8h, v6.8b, v16.8b\n"
+ "ssubl v7.8h, v7.8b, v16.8b\n"
+ "ldr q2, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "ldp x23, x22, [x15, #0x0]\n"
"add x20, x20, #0x20\n"
+ "ssubl v9.8h, v9.8b, v16.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x23, x22, [x15, #0x0]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
"ldp x21, x20, [x15, #0x10]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d22, [x23, x17]\n"
- "ldr d4, [x22, x17]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d8, [x21, x17]\n"
- "ldr d27, [x20, x17]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d19, [x23, x17]\n"
+ "ldr d21, [x22, x17]\n"
+ "ldr d29, [x21, x17]\n"
+ "ldr d22, [x20, x17]\n"
"ldr x20, [x15, #0x20]\n"
- "ldr d15, [x20, x17]\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "ldr d20, [x20, x17]\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q3, [x13, #0x0]\n"
- "ldr q17, [x12, #0x0]\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
- "ldr q21, [x13, #0x10]\n"
- "ldr q28, [x12, #0x10]\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "ldr x20, [x15, #0x28]\n"
- "ldr d11, [x20, x17]\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "ldr x20, [x15, #0x38]\n"
- "ldr d4, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "ldr x20, [x15, #0x40]\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "ldr x27, [x15, #0x48]\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "ldr x26, [x15, #0x50]\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "ldr d8, [x20, x17]\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr q17, [x13, #0x0]\n"
+ "ldr q26, [x12, #0x0]\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "ldr q28, [x13, #0x10]\n"
+ "ldr q23, [x12, #0x10]\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "ldr x24, [x15, #0x28]\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "ldr x23, [x15, #0x38]\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "ldr x22, [x15, #0x30]\n"
+ "ldr x21, [x15, #0x40]\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
+ "ldr x26, [x15, #0x48]\n"
+ "ldr x20, [x15, #0x50]\n"
+ "ldr d21, [x24, x17]\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "ldr d29, [x21, x17]\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
"ldr x25, [x15, #0x58]\n"
"ldr x24, [x15, #0x60]\n"
- "smlal v2.4s, v11.4h, v31.4h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"ldr x23, [x15, #0x68]\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
"ldr x22, [x15, #0x70]\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
- "smlal v9.4s, v4.4h, v16.4h\n"
"ldr x21, [x15, #0x78]\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "ldr d22, [x26, x17]\n"
+ "smlal v0.4s, v21.4h, v6.4h\n"
+ "smlal2 v24.4s, v21.8h, v6.8h\n"
+ "ldr d21, [x20, x17]\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "ldr d27, [x27, x17]\n"
- "smlal2 v30.4s, v11.8h, v31.8h\n"
- "ldr d11, [x26, x17]\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
"add x14, x14, #0x48\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal v10.4s, v22.4h, v20.4h\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
- "subs x8, x8, #0x1\n"
- "smlal2 v24.4s, v4.8h, v16.8h\n"
- "smlal v9.4s, v8.4h, v1.4h\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "subs x11, x11, #0x1\n"
+ "smlal v31.4s, v19.4h, v9.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"add x13, x13, #0x20\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "ldr d20, [x25, x17]\n"
"add x12, x12, #0x20\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "ldr d15, [x25, x17]\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v20.8h\n"
- "ldr d22, [x24, x17]\n"
- "smlal v7.4s, v4.4h, v23.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "smlal v2.4s, v27.4h, v18.4h\n"
- "smlal v10.4s, v27.4h, v26.4h\n"
- "smlal2 v24.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v20.4h\n"
- "smlal2 v0.4s, v4.8h, v23.8h\n"
- "ldr d4, [x23, x17]\n"
- "smlal2 v30.4s, v27.8h, v18.8h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v26.8h\n"
- "ldr d26, [x22, x17]\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "usubl v26.8h, v26.8b, v14.8b\n"
- "smlal v2.4s, v11.4h, v23.4h\n"
- "smlal v10.4s, v15.4h, v1.4h\n"
- "smlal2 v24.4s, v27.8h, v20.8h\n"
- "smlal v9.4s, v11.4h, v5.4h\n"
- "smlal2 v0.4s, v8.8h, v16.8h\n"
- "ldr d8, [x21, x17]\n"
- "smlal2 v30.4s, v11.8h, v23.8h\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v15.8h, v1.8h\n"
- "smlal v7.4s, v27.4h, v25.4h\n"
+ "smlal v2.4s, v18.4h, v4.4h\n"
+ "smlal2 v1.4s, v18.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v9.8h\n"
+ "ldr d19, [x24, x17]\n"
+ "smlal v8.4s, v18.4h, v15.4h\n"
+ "smlal v31.4s, v22.4h, v25.4h\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
+ "smlal2 v30.4s, v18.8h, v15.8h\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v0.4s, v22.4h, v10.4h\n"
+ "smlal2 v24.4s, v22.8h, v10.8h\n"
+ "smlal v2.4s, v29.4h, v5.4h\n"
+ "smlal2 v1.4s, v29.8h, v5.8h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal2 v27.4s, v22.8h, v25.8h\n"
+ "ldr d25, [x22, x17]\n"
+ "smlal v8.4s, v29.4h, v4.4h\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal v31.4s, v20.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d4, [x21, x17]\n"
"add x17, x17, #0x8\n"
- "smlal v2.4s, v22.4h, v5.4h\n"
- "smlal v10.4s, v4.4h, v18.4h\n"
- "smlal2 v24.4s, v11.8h, v5.8h\n"
- "smlal v9.4s, v22.4h, v31.4h\n"
- "sqrdmulh v9.4s, v9.4s, v3.4s\n"
- "smlal2 v0.4s, v27.8h, v25.8h\n"
- "smlal2 v30.4s, v22.8h, v5.8h\n"
- "and v27.16b, v9.16b, v17.16b\n"
- "smlal2 v6.4s, v4.8h, v18.8h\n"
- "smlal v7.4s, v15.4h, v18.4h\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "smlal v2.4s, v26.4h, v25.4h\n"
- "smlal v10.4s, v26.4h, v31.4h\n"
- "sqadd v9.4s, v9.4s, v27.4s\n"
- "smlal2 v24.4s, v22.8h, v31.8h\n"
- "smlal2 v0.4s, v15.8h, v18.8h\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- "smlal2 v30.4s, v26.8h, v25.8h\n"
- "smlal2 v6.4s, v26.8h, v31.8h\n"
- "and v31.16b, v24.16b, v28.16b\n"
- "smlal v7.4s, v4.4h, v20.4h\n"
- "smlal v2.4s, v8.4h, v20.4h\n"
- "sqrdmulh v7.4s, v7.4s, v3.4s\n"
- "smlal v10.4s, v8.4h, v25.4h\n"
- "smlal2 v0.4s, v4.8h, v20.8h\n"
- "sqrdmulh v2.4s, v2.4s, v3.4s\n"
- "smlal2 v30.4s, v8.8h, v20.8h\n"
- "smlal2 v6.4s, v8.8h, v25.8h\n"
- "sqrdmulh v10.4s, v10.4s, v3.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v22.16b, v7.16b, v17.16b\n"
- "sqrdmulh v0.4s, v0.4s, v21.4s\n"
- "and v3.16b, v2.16b, v17.16b\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "and v11.16b, v10.16b, v17.16b\n"
- "sqrdmulh v6.4s, v6.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v31.4s\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
- "and v20.16b, v0.16b, v28.16b\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "and v31.16b, v30.16b, v28.16b\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v18.16b, v6.16b, v28.16b\n"
- "sqadd v7.4s, v7.4s, v22.4s\n"
+ "smlal v0.4s, v21.4h, v15.4h\n"
+ "smlal2 v24.4s, v21.8h, v15.8h\n"
+ "smlal v2.4s, v22.4h, v9.4h\n"
+ "smlal2 v1.4s, v22.8h, v9.8h\n"
+ "usubl v25.8h, v25.8b, v11.8b\n"
+ "smlal2 v27.4s, v20.8h, v5.8h\n"
+ "smlal v8.4s, v22.4h, v7.4h\n"
+ "usubl v4.8h, v4.8b, v11.8b\n"
+ "smlal v31.4s, v18.4h, v10.4h\n"
+ "smlal2 v30.4s, v22.8h, v7.8h\n"
+ "smlal v0.4s, v19.4h, v3.4h\n"
+ "smlal2 v24.4s, v19.8h, v3.8h\n"
+ "smlal v2.4s, v21.4h, v3.4h\n"
+ "smlal2 v1.4s, v21.8h, v3.8h\n"
+ "smlal2 v27.4s, v18.8h, v10.8h\n"
+ "smlal v8.4s, v20.4h, v10.4h\n"
+ "smlal v31.4s, v25.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v10.8h\n"
+ "smlal v0.4s, v25.4h, v7.4h\n"
+ "smlal2 v24.4s, v25.8h, v7.8h\n"
+ "smlal v2.4s, v19.4h, v6.4h\n"
+ "smlal2 v1.4s, v19.8h, v6.8h\n"
+ "smlal2 v27.4s, v25.8h, v6.8h\n"
+ "smlal v8.4s, v18.4h, v9.4h\n"
+ "smlal v31.4s, v4.4h, v7.4h\n"
+ "smlal2 v30.4s, v18.8h, v9.8h\n"
+ "smlal v0.4s, v4.4h, v9.4h\n"
+ "smlal2 v24.4s, v4.8h, v9.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v17.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v28.4s\n"
+ "smlal2 v27.4s, v4.8h, v7.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v17.4s\n"
+ "and v18.16b, v2.16b, v26.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v17.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v28.4s\n"
+ "and v4.16b, v1.16b, v23.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v17.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "and v21.16b, v8.16b, v26.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v28.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v28.4s\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "and v20.16b, v0.16b, v26.16b\n"
+ "sqadd v2.4s, v2.4s, v18.4s\n"
+ "and v19.16b, v31.16b, v26.16b\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v18.16b, v30.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v4.4s\n"
"sshr v20.4s, v20.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v3.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v11.4s\n"
+ "and v17.16b, v24.16b, v23.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v3.16b, v27.16b, v23.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v17.4s\n"
- "srshl v7.4s, v7.4s, v17.4s\n"
"sqadd v0.4s, v0.4s, v20.4s\n"
- "srshl v2.4s, v2.4s, v17.4s\n"
- "sqadd v30.4s, v30.4s, v31.4s\n"
- "srshl v10.4s, v10.4s, v17.4s\n"
- "sqadd v6.4s, v6.4s, v18.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v28.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v28.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v19.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v26.4s\n"
+ "srshl v8.4s, v8.4s, v26.4s\n"
+ "sqadd v30.4s, v30.4s, v18.4s\n"
+ "srshl v0.4s, v0.4s, v26.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "srshl v31.4s, v31.4s, v26.4s\n"
+ "sqadd v27.4s, v27.4s, v3.4s\n"
+ "srshl v1.4s, v1.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v28.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d9, [x11, x16]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v23.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v23.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v23.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "str d7, [x10, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "str d2, [x9, x16]\n"
- "str d10, [x28, x16]\n"
- "ldr q9, [x20, #0x0]\n"
- "ldr q24, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str d2, [x10, x16]\n"
+ "str d8, [x9, x16]\n"
+ "str d0, [x28, x16]\n"
+ "str d31, [x27, x16]\n"
"add x16, x16, #0x8\n"
+ "ldr q2, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "add x20, x20, #0x20\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
- "ldr d20, [x14, #0x40]\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldp x23, x22, [x15, #0x0]\n"
- "ssubl v23.8h, v23.8b, v19.8b\n"
- "ssubl v16.8h, v16.8b, v19.8b\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ssubl v15.8h, v15.8b, v16.8b\n"
+ "ssubl v4.8h, v4.8b, v16.8b\n"
+ "ssubl v5.8h, v5.8b, v16.8b\n"
+ "ssubl v3.8h, v3.8b, v16.8b\n"
"ldp x21, x20, [x15, #0x10]\n"
- "ldr d22, [x23, x17]\n"
- "ssubl v1.8h, v1.8b, v19.8b\n"
- "ssubl v5.8h, v5.8b, v19.8b\n"
- "ldr d4, [x22, x17]\n"
- "ldr d8, [x21, x17]\n"
- "ssubl v26.8h, v26.8b, v19.8b\n"
- "ssubl v18.8h, v18.8b, v19.8b\n"
- "ldr d27, [x20, x17]\n"
+ "ssubl v25.8h, v25.8b, v16.8b\n"
+ "ssubl v10.8h, v10.8b, v16.8b\n"
+ "ssubl v6.8h, v6.8b, v16.8b\n"
+ "ssubl v7.8h, v7.8b, v16.8b\n"
+ "ldr d19, [x23, x17]\n"
+ "ldr d21, [x22, x17]\n"
+ "ldr d29, [x21, x17]\n"
+ "ldr d22, [x20, x17]\n"
+ "ssubl v9.8h, v9.8b, v16.8b\n"
"ldr x20, [x15, #0x20]\n"
- "ssubl v31.8h, v31.8b, v19.8b\n"
- "ssubl v25.8h, v25.8b, v19.8b\n"
- "ldr d15, [x20, x17]\n"
- "ssubl v20.8h, v20.8b, v19.8b\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "ldr d20, [x20, x17]\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q28, [x13, #0x0]\n"
- "ldr q17, [x12, #0x0]\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
- "ldr q21, [x13, #0x10]\n"
- "ldr q3, [x12, #0x10]\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "ldr x20, [x15, #0x28]\n"
- "ldr d11, [x20, x17]\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "ldr x20, [x15, #0x38]\n"
- "ldr d4, [x20, x17]\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
- "ldr x20, [x15, #0x30]\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
+ "ldr q26, [x13, #0x0]\n"
+ "ldr q28, [x12, #0x0]\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "ldr q17, [x13, #0x10]\n"
+ "ldr q23, [x12, #0x10]\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "ldr x23, [x15, #0x28]\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "ldr x22, [x15, #0x38]\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "ldr x21, [x15, #0x30]\n"
"ldr x20, [x15, #0x40]\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
"ldr x26, [x15, #0x48]\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
"ldr x25, [x15, #0x50]\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "ldr d8, [x20, x17]\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
+ "ldr d21, [x23, x17]\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "ldr d18, [x21, x17]\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "ldr d29, [x20, x17]\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
"ldr x24, [x15, #0x58]\n"
"ldr x23, [x15, #0x60]\n"
- "smlal v2.4s, v11.4h, v31.4h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"ldr x22, [x15, #0x68]\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
"ldr x21, [x15, #0x70]\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
- "smlal v9.4s, v4.4h, v16.4h\n"
"ldr x20, [x15, #0x78]\n"
- "tst x7, #0x7\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "ldr d27, [x26, x17]\n"
- "smlal2 v30.4s, v11.8h, v31.8h\n"
- "ldr d11, [x25, x17]\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "ldr d22, [x26, x17]\n"
+ "smlal v0.4s, v21.4h, v6.4h\n"
+ "smlal2 v24.4s, v21.8h, v6.8h\n"
+ "ldr d21, [x25, x17]\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
+ "tst x8, #0x7\n"
"add x13, x13, #0x20\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal v10.4s, v22.4h, v20.4h\n"
- "usubl v11.8h, v11.8b, v14.8b\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
"add x12, x12, #0x20\n"
- "smlal2 v24.4s, v4.8h, v16.8h\n"
- "smlal v9.4s, v8.4h, v1.4h\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "ldr d15, [x24, x17]\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
- "smlal2 v6.4s, v22.8h, v20.8h\n"
- "ldr d22, [x23, x17]\n"
- "smlal v7.4s, v4.4h, v23.4h\n"
- "usubl v22.8h, v22.8b, v14.8b\n"
- "smlal v2.4s, v27.4h, v18.4h\n"
- "smlal v10.4s, v27.4h, v26.4h\n"
- "smlal2 v24.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v20.4h\n"
- "smlal2 v0.4s, v4.8h, v23.8h\n"
- "ldr d4, [x22, x17]\n"
- "smlal2 v30.4s, v27.8h, v18.8h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "smlal2 v6.4s, v27.8h, v26.8h\n"
- "ldr d26, [x21, x17]\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "usubl v26.8h, v26.8b, v14.8b\n"
- "smlal v2.4s, v11.4h, v23.4h\n"
- "smlal v10.4s, v15.4h, v1.4h\n"
- "smlal2 v24.4s, v27.8h, v20.8h\n"
- "smlal v9.4s, v11.4h, v5.4h\n"
- "smlal2 v0.4s, v8.8h, v16.8h\n"
- "ldr d16, [x20, x17]\n"
- "smlal2 v30.4s, v11.8h, v23.8h\n"
- "usubl v16.8h, v16.8b, v14.8b\n"
- "smlal2 v6.4s, v15.8h, v1.8h\n"
- "smlal v7.4s, v27.4h, v25.4h\n"
+ "smlal v31.4s, v18.4h, v9.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "ldr d20, [x24, x17]\n"
+ "smlal v2.4s, v19.4h, v4.4h\n"
+ "smlal2 v1.4s, v19.8h, v4.8h\n"
+ "smlal2 v27.4s, v18.8h, v9.8h\n"
+ "ldr d18, [x23, x17]\n"
+ "smlal v8.4s, v19.4h, v15.4h\n"
+ "smlal v31.4s, v22.4h, v25.4h\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
+ "smlal2 v30.4s, v19.8h, v15.8h\n"
+ "ldr d19, [x22, x17]\n"
+ "smlal v0.4s, v22.4h, v10.4h\n"
+ "smlal2 v24.4s, v22.8h, v10.8h\n"
+ "smlal v2.4s, v29.4h, v5.4h\n"
+ "smlal2 v1.4s, v29.8h, v5.8h\n"
+ "usubl v18.8h, v18.8b, v11.8b\n"
+ "smlal2 v27.4s, v22.8h, v25.8h\n"
+ "ldr d25, [x21, x17]\n"
+ "smlal v8.4s, v29.4h, v4.4h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "smlal v31.4s, v20.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d29, [x20, x17]\n"
"add x17, x17, #0x8\n"
- "smlal v2.4s, v22.4h, v5.4h\n"
- "smlal v10.4s, v4.4h, v18.4h\n"
- "smlal2 v24.4s, v11.8h, v5.8h\n"
- "smlal v9.4s, v22.4h, v31.4h\n"
- "sqrdmulh v9.4s, v9.4s, v28.4s\n"
- "smlal2 v0.4s, v27.8h, v25.8h\n"
- "smlal2 v30.4s, v22.8h, v5.8h\n"
- "and v1.16b, v9.16b, v17.16b\n"
- "smlal2 v6.4s, v4.8h, v18.8h\n"
- "smlal v7.4s, v15.4h, v18.4h\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "smlal v2.4s, v26.4h, v25.4h\n"
- "smlal v10.4s, v26.4h, v31.4h\n"
- "sqadd v9.4s, v9.4s, v1.4s\n"
- "smlal2 v24.4s, v22.8h, v31.8h\n"
- "smlal2 v0.4s, v15.8h, v18.8h\n"
- "sqrdmulh v24.4s, v24.4s, v21.4s\n"
- "smlal2 v30.4s, v26.8h, v25.8h\n"
- "smlal2 v6.4s, v26.8h, v31.8h\n"
- "and v31.16b, v24.16b, v3.16b\n"
- "smlal v7.4s, v4.4h, v20.4h\n"
- "smlal v2.4s, v16.4h, v20.4h\n"
- "sqrdmulh v7.4s, v7.4s, v28.4s\n"
- "smlal v10.4s, v16.4h, v25.4h\n"
- "smlal2 v0.4s, v4.8h, v20.8h\n"
- "sqrdmulh v2.4s, v2.4s, v28.4s\n"
- "smlal2 v30.4s, v16.8h, v20.8h\n"
- "smlal2 v6.4s, v16.8h, v25.8h\n"
- "sqrdmulh v10.4s, v10.4s, v28.4s\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v22.16b, v7.16b, v17.16b\n"
- "sqrdmulh v0.4s, v0.4s, v21.4s\n"
- "and v15.16b, v2.16b, v17.16b\n"
- "sqrdmulh v30.4s, v30.4s, v21.4s\n"
- "and v11.16b, v10.16b, v17.16b\n"
- "sqrdmulh v6.4s, v6.4s, v21.4s\n"
- "sqadd v24.4s, v24.4s, v31.4s\n"
+ "smlal v0.4s, v21.4h, v15.4h\n"
+ "smlal2 v24.4s, v21.8h, v15.8h\n"
+ "smlal v2.4s, v22.4h, v9.4h\n"
+ "smlal2 v1.4s, v22.8h, v9.8h\n"
+ "usubl v25.8h, v25.8b, v11.8b\n"
+ "smlal2 v27.4s, v20.8h, v5.8h\n"
+ "smlal v8.4s, v22.4h, v7.4h\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "smlal v31.4s, v19.4h, v10.4h\n"
+ "smlal2 v30.4s, v22.8h, v7.8h\n"
+ "smlal v0.4s, v18.4h, v3.4h\n"
+ "smlal2 v24.4s, v18.8h, v3.8h\n"
+ "smlal v2.4s, v21.4h, v3.4h\n"
+ "smlal2 v1.4s, v21.8h, v3.8h\n"
+ "smlal2 v27.4s, v19.8h, v10.8h\n"
+ "smlal v8.4s, v20.4h, v10.4h\n"
+ "smlal v31.4s, v25.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v10.8h\n"
+ "smlal v0.4s, v25.4h, v7.4h\n"
+ "smlal2 v24.4s, v25.8h, v7.8h\n"
+ "smlal v2.4s, v18.4h, v6.4h\n"
+ "smlal2 v1.4s, v18.8h, v6.8h\n"
+ "smlal2 v27.4s, v25.8h, v6.8h\n"
+ "smlal v8.4s, v19.4h, v9.4h\n"
+ "smlal v31.4s, v29.4h, v7.4h\n"
+ "smlal2 v30.4s, v19.8h, v9.8h\n"
+ "smlal v0.4s, v29.4h, v9.4h\n"
+ "smlal2 v24.4s, v29.8h, v9.8h\n"
+ "sqrdmulh v2.4s, v2.4s, v26.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v17.4s\n"
+ "smlal2 v27.4s, v29.8h, v7.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v26.4s\n"
+ "and v25.16b, v2.16b, v28.16b\n"
+ "sqrdmulh v31.4s, v31.4s, v26.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v17.4s\n"
+ "and v22.16b, v1.16b, v23.16b\n"
+ "sqrdmulh v0.4s, v0.4s, v26.4s\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "and v21.16b, v8.16b, v28.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v17.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v17.4s\n"
"sshr v22.4s, v22.4s, #0x1f\n"
- "and v18.16b, v0.16b, v3.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "and v23.16b, v30.16b, v3.16b\n"
- "sshr v11.4s, v11.4s, #0x1f\n"
- "and v21.16b, v6.16b, v3.16b\n"
- "sqadd v7.4s, v7.4s, v22.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v15.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v11.4s\n"
+ "and v20.16b, v0.16b, v28.16b\n"
+ "sqadd v2.4s, v2.4s, v25.4s\n"
+ "and v19.16b, v31.16b, v28.16b\n"
"sshr v21.4s, v21.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v17.4s\n"
- "srshl v7.4s, v7.4s, v17.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v2.4s, v2.4s, v17.4s\n"
- "sqadd v30.4s, v30.4s, v23.4s\n"
- "srshl v10.4s, v10.4s, v17.4s\n"
- "sqadd v6.4s, v6.4s, v21.4s\n"
- "srshl v24.4s, v24.4s, v3.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v3.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v3.4s\n"
+ "and v10.16b, v30.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v22.4s\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v17.16b, v24.16b, v23.16b\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "and v22.16b, v27.16b, v23.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
+ "sshr v10.4s, v10.4s, #0x1f\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v19.4s\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v28.4s\n"
+ "srshl v8.4s, v8.4s, v28.4s\n"
+ "sqadd v30.4s, v30.4s, v10.4s\n"
+ "srshl v0.4s, v0.4s, v28.4s\n"
+ "sqadd v24.4s, v24.4s, v17.4s\n"
+ "srshl v31.4s, v31.4s, v28.4s\n"
+ "sqadd v27.4s, v27.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v3.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "str d9, [x11, x16]\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v23.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v23.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v23.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "str d7, [x10, x16]\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "str d2, [x9, x16]\n"
- "str d10, [x28, x16]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str d2, [x10, x16]\n"
+ "str d8, [x9, x16]\n"
+ "str d0, [x28, x16]\n"
+ "str d31, [x27, x16]\n"
"add x16, x16, #0x8\n"
"beq 64f\n"
"add x14, x14, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v9.4s }, [x20], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v24.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v24.s }[2], [x20]\n"
+ "tbz x8, #2, 5f\n"
+ "ld1 { v2.4s }, [x20], #0x10\n"
+ "tbz x8, #1, 4f\n"
+ "ld1 { v1.d }[0], [x20], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v1.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v24.s }[0], [x20]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v1.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v9.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v9.s }[2], [x20]\n"
+ "tbz x8, #1, 6f\n"
+ "ld1 { v2.d }[0], [x20], #0x8\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v2.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v9.s }[0], [x20]\n"
+ "tbz x8, #0, 7f\n"
+ "ld1 { v2.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d23, [x14, #0x0]\n"
- "ldr d16, [x14, #0x8]\n"
- "mov v7.16b, v9.16b\n"
- "mov v0.16b, v24.16b\n"
- "ldr d1, [x14, #0x10]\n"
- "ldr d5, [x14, #0x18]\n"
- "mov v2.16b, v9.16b\n"
- "mov v30.16b, v24.16b\n"
- "ldr d26, [x14, #0x20]\n"
- "ldr d18, [x14, #0x28]\n"
- "mov v10.16b, v9.16b\n"
- "mov v6.16b, v24.16b\n"
- "ldr d31, [x14, #0x30]\n"
- "ldr d25, [x14, #0x38]\n"
- "ssubl v23.8h, v23.8b, v19.8b\n"
- "ssubl v16.8h, v16.8b, v19.8b\n"
- "ldr d20, [x14, #0x40]\n"
+ "ldr d15, [x14, #0x0]\n"
+ "ldr d4, [x14, #0x8]\n"
+ "mov v8.16b, v2.16b\n"
+ "mov v30.16b, v1.16b\n"
+ "ldr d5, [x14, #0x10]\n"
+ "ldr d3, [x14, #0x18]\n"
+ "mov v0.16b, v2.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d25, [x14, #0x20]\n"
+ "ldr d10, [x14, #0x28]\n"
+ "mov v31.16b, v2.16b\n"
+ "mov v27.16b, v1.16b\n"
+ "ldr d6, [x14, #0x30]\n"
+ "ldr d7, [x14, #0x38]\n"
+ "ssubl v15.8h, v15.8b, v16.8b\n"
+ "ssubl v4.8h, v4.8b, v16.8b\n"
+ "ldr d9, [x14, #0x40]\n"
"ldp x24, x23, [x15, #0x0]\n"
- "ssubl v1.8h, v1.8b, v19.8b\n"
- "ssubl v5.8h, v5.8b, v19.8b\n"
+ "ssubl v5.8h, v5.8b, v16.8b\n"
+ "ssubl v3.8h, v3.8b, v16.8b\n"
+ "ssubl v25.8h, v25.8b, v16.8b\n"
+ "ssubl v10.8h, v10.8b, v16.8b\n"
+ "ssubl v6.8h, v6.8b, v16.8b\n"
+ "ssubl v7.8h, v7.8b, v16.8b\n"
"ldp x22, x21, [x15, #0x10]\n"
- "ldr x20, [x15, #0x20]\n"
- "ssubl v26.8h, v26.8b, v19.8b\n"
- "ssubl v18.8h, v18.8b, v19.8b\n"
- "ssubl v31.8h, v31.8b, v19.8b\n"
- "ssubl v25.8h, v25.8b, v19.8b\n"
- "ssubl v20.8h, v20.8b, v19.8b\n"
+ "ssubl v9.8h, v9.8b, v16.8b\n"
"add x24, x24, x17\n"
"add x23, x23, x17\n"
+ "ldr x20, [x15, #0x20]\n"
"add x22, x22, x17\n"
"add x21, x21, x17\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v22.s }[0], [x24], #0x4\n"
- "ld1 { v4.s }[0], [x23], #0x4\n"
- "ld1 { v8.s }[0], [x22], #0x4\n"
- "ld1 { v27.s }[0], [x21], #0x4\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v22.h }[2], [x24], #0x2\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v8.h }[2], [x22], #0x2\n"
- "ld1 { v27.h }[2], [x21], #0x2\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[6], [x24]\n"
- "ld1 { v4.b }[6], [x23]\n"
- "ld1 { v8.b }[6], [x22]\n"
- "ld1 { v27.b }[6], [x21]\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "tbz x8, #2, 9f\n"
+ "ld1 { v19.s }[0], [x24], #0x4\n"
+ "ld1 { v21.s }[0], [x23], #0x4\n"
+ "ld1 { v29.s }[0], [x22], #0x4\n"
+ "ld1 { v22.s }[0], [x21], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 8f\n"
+ "ld1 { v19.h }[2], [x24], #0x2\n"
+ "ld1 { v21.h }[2], [x23], #0x2\n"
+ "ld1 { v29.h }[2], [x22], #0x2\n"
+ "ld1 { v22.h }[2], [x21], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[6], [x24]\n"
+ "ld1 { v21.b }[6], [x23]\n"
+ "ld1 { v29.b }[6], [x22]\n"
+ "ld1 { v22.b }[6], [x21]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[4], [x24]\n"
- "ld1 { v4.b }[4], [x23]\n"
- "ld1 { v8.b }[4], [x22]\n"
- "ld1 { v27.b }[4], [x21]\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[4], [x24]\n"
+ "ld1 { v21.b }[4], [x23]\n"
+ "ld1 { v29.b }[4], [x22]\n"
+ "ld1 { v22.b }[4], [x21]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v22.h }[0], [x24], #0x2\n"
- "ld1 { v4.h }[0], [x23], #0x2\n"
- "ld1 { v8.h }[0], [x22], #0x2\n"
- "ld1 { v27.h }[0], [x21], #0x2\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[2], [x24]\n"
- "ld1 { v4.b }[2], [x23]\n"
- "ld1 { v8.b }[2], [x22]\n"
- "ld1 { v27.b }[2], [x21]\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x8, #1, 10f\n"
+ "ld1 { v19.h }[0], [x24], #0x2\n"
+ "ld1 { v21.h }[0], [x23], #0x2\n"
+ "ld1 { v29.h }[0], [x22], #0x2\n"
+ "ld1 { v22.h }[0], [x21], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[2], [x24]\n"
+ "ld1 { v21.b }[2], [x23]\n"
+ "ld1 { v29.b }[2], [x22]\n"
+ "ld1 { v22.b }[2], [x21]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v22.b }[0], [x24]\n"
- "ld1 { v4.b }[0], [x23]\n"
- "ld1 { v8.b }[0], [x22]\n"
- "ld1 { v27.b }[0], [x21]\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x8, #0, 11f\n"
+ "ld1 { v19.b }[0], [x24]\n"
+ "ld1 { v21.b }[0], [x23]\n"
+ "ld1 { v29.b }[0], [x22]\n"
+ "ld1 { v22.b }[0], [x21]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v22.8h, v22.8b, v14.8b\n"
- "smlal v9.4s, v22.4h, v26.4h\n"
- "smlal2 v24.4s, v22.8h, v26.8h\n"
+ "usubl v19.8h, v19.8b, v11.8b\n"
+ "usubl v21.8h, v21.8b, v11.8b\n"
"ldr x20, [x15, #0x28]\n"
- "smlal v7.4s, v22.4h, v5.4h\n"
- "smlal2 v0.4s, v22.8h, v5.8h\n"
- "usubl v4.8h, v4.8b, v14.8b\n"
- "usubl v8.8h, v8.8b, v14.8b\n"
- "smlal v2.4s, v22.4h, v16.4h\n"
- "smlal2 v30.4s, v22.8h, v16.8h\n"
+ "usubl v29.8h, v29.8b, v11.8b\n"
+ "usubl v22.8h, v22.8b, v11.8b\n"
+ "usubl v20.8h, v20.8b, v11.8b\n"
+ "smlal v2.4s, v19.4h, v25.4h\n"
+ "smlal2 v1.4s, v19.8h, v25.8h\n"
+ "smlal v8.4s, v19.4h, v3.4h\n"
+ "smlal2 v30.4s, v19.8h, v3.8h\n"
"add x20, x20, x17\n"
- "smlal v10.4s, v22.4h, v23.4h\n"
- "smlal2 v6.4s, v22.8h, v23.8h\n"
- "usubl v27.8h, v27.8b, v14.8b\n"
- "smlal v9.4s, v4.4h, v23.4h\n"
- "smlal2 v24.4s, v4.8h, v23.8h\n"
- "usubl v15.8h, v15.8b, v14.8b\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "smlal2 v0.4s, v8.8h, v1.8h\n"
- "smlal v9.4s, v27.4h, v18.4h\n"
- "smlal2 v24.4s, v27.8h, v18.8h\n"
- "smlal v7.4s, v27.4h, v26.4h\n"
- "smlal2 v0.4s, v27.8h, v26.8h\n"
- "smlal v2.4s, v27.4h, v1.4h\n"
- "smlal2 v30.4s, v27.8h, v1.8h\n"
- "smlal v10.4s, v27.4h, v16.4h\n"
- "smlal2 v6.4s, v27.8h, v16.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "smlal v0.4s, v19.4h, v4.4h\n"
+ "smlal2 v24.4s, v19.8h, v4.8h\n"
+ "smlal v31.4s, v19.4h, v15.4h\n"
+ "smlal2 v27.4s, v19.8h, v15.8h\n"
+ "smlal v2.4s, v21.4h, v15.4h\n"
+ "smlal2 v1.4s, v21.8h, v15.8h\n"
+ "smlal v8.4s, v29.4h, v5.4h\n"
+ "smlal2 v30.4s, v29.8h, v5.8h\n"
+ "smlal v0.4s, v22.4h, v5.4h\n"
+ "smlal2 v24.4s, v22.8h, v5.8h\n"
+ "smlal v31.4s, v22.4h, v4.4h\n"
+ "smlal2 v27.4s, v22.8h, v4.8h\n"
+ "smlal v2.4s, v22.4h, v10.4h\n"
+ "smlal2 v1.4s, v22.8h, v10.8h\n"
+ "smlal v8.4s, v22.4h, v25.4h\n"
+ "smlal2 v30.4s, v22.8h, v25.8h\n"
+ "tbz x8, #2, 13f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 12f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x8, #1, 14f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x8, #0, 15f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"15:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v21.8h, v21.8b, v14.8b\n"
- "smlal v2.4s, v21.4h, v31.4h\n"
- "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x30]\n"
- "smlal v9.4s, v15.4h, v25.4h\n"
- "smlal2 v24.4s, v15.8h, v25.8h\n"
+ "smlal v2.4s, v20.4h, v7.4h\n"
+ "smlal2 v1.4s, v20.8h, v7.8h\n"
+ "smlal v8.4s, v20.4h, v6.4h\n"
+ "smlal2 v30.4s, v20.8h, v6.8h\n"
+ "smlal v31.4s, v20.4h, v3.4h\n"
+ "smlal2 v27.4s, v20.8h, v3.8h\n"
+ "smlal v0.4s, v17.4h, v6.4h\n"
+ "smlal2 v24.4s, v17.8h, v6.8h\n"
"add x20, x20, x17\n"
- "smlal v7.4s, v15.4h, v31.4h\n"
- "smlal2 v0.4s, v15.8h, v31.8h\n"
- "smlal v2.4s, v15.4h, v26.4h\n"
- "smlal2 v30.4s, v15.8h, v26.8h\n"
- "smlal v10.4s, v15.4h, v5.4h\n"
- "smlal2 v6.4s, v15.8h, v5.8h\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "smlal v0.4s, v20.4h, v25.4h\n"
+ "smlal2 v24.4s, v20.8h, v25.8h\n"
+ "tbz x8, #2, 17f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 16f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 18f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 19f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"19:" // Oddments: Load (3, 3): Bit 2: End
- "usubl v28.8h, v28.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x38]\n"
- "smlal v10.4s, v28.4h, v20.4h\n"
- "smlal2 v6.4s, v28.8h, v20.8h\n"
+ "smlal v31.4s, v16.4h, v9.4h\n"
+ "smlal2 v27.4s, v16.8h, v9.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 21f\n"
- "ld1 { v22.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 20f\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[6], [x20]\n"
+ "tbz x8, #2, 21f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 20f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[4], [x20]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 1): Bit 2: Unset
- "tbz x7, #1, 22f\n"
- "ld1 { v22.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[2], [x20]\n"
+ "tbz x8, #1, 22f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v22.b }[0], [x20]\n"
+ "tbz x8, #0, 23f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 1): Bit 2: End
- "usubl v22.8h, v22.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x40]\n"
- "smlal v9.4s, v22.4h, v16.4h\n"
- "smlal2 v24.4s, v22.8h, v16.8h\n"
- "smlal v7.4s, v22.4h, v23.4h\n"
- "smlal2 v0.4s, v22.8h, v23.8h\n"
+ "smlal v2.4s, v17.4h, v4.4h\n"
+ "smlal2 v1.4s, v17.8h, v4.8h\n"
+ "smlal v8.4s, v17.4h, v15.4h\n"
+ "smlal2 v30.4s, v17.8h, v15.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "tbz x8, #2, 25f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 24f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (0, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (0, 2): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "tbz x8, #1, 26f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (0, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "tbz x8, #0, 27f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"27:" // Oddments: Load (0, 2): Bit 2: End
- "usubl v21.8h, v21.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x48]\n"
- "smlal v9.4s, v21.4h, v1.4h\n"
- "smlal2 v24.4s, v21.8h, v1.8h\n"
- "smlal v7.4s, v21.4h, v16.4h\n"
- "smlal2 v0.4s, v21.8h, v16.8h\n"
+ "smlal v2.4s, v16.4h, v5.4h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal v8.4s, v16.4h, v4.4h\n"
+ "smlal2 v30.4s, v16.8h, v4.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "tbz x8, #2, 29f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 28f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "tbz x8, #1, 30f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "tbz x8, #0, 31f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "usubl v28.8h, v28.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x50]\n"
- "smlal v9.4s, v28.4h, v20.4h\n"
- "smlal2 v24.4s, v28.8h, v20.8h\n"
- "smlal v7.4s, v28.4h, v25.4h\n"
- "smlal2 v0.4s, v28.8h, v25.8h\n"
+ "smlal v2.4s, v17.4h, v9.4h\n"
+ "smlal2 v1.4s, v17.8h, v9.8h\n"
+ "smlal v8.4s, v17.4h, v7.4h\n"
+ "smlal2 v30.4s, v17.8h, v7.8h\n"
+ "smlal v0.4s, v17.4h, v10.4h\n"
+ "smlal2 v24.4s, v17.8h, v10.8h\n"
+ "smlal v31.4s, v17.4h, v25.4h\n"
"add x20, x20, x17\n"
- "smlal v2.4s, v28.4h, v18.4h\n"
- "smlal2 v30.4s, v28.8h, v18.8h\n"
- "smlal v10.4s, v28.4h, v26.4h\n"
- "smlal2 v6.4s, v28.8h, v26.8h\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "smlal2 v27.4s, v17.8h, v25.8h\n"
+ "tbz x8, #2, 33f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 32f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (1, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (1, 0): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x8, #1, 34f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (1, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x8, #0, 35f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"35:" // Oddments: Load (1, 0): Bit 2: End
- "usubl v8.8h, v8.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x58]\n"
- "smlal v9.4s, v8.4h, v5.4h\n"
- "smlal2 v24.4s, v8.8h, v5.8h\n"
- "smlal v2.4s, v8.4h, v23.4h\n"
- "smlal2 v30.4s, v8.8h, v23.8h\n"
+ "smlal v2.4s, v16.4h, v3.4h\n"
+ "smlal2 v1.4s, v16.8h, v3.8h\n"
+ "smlal v0.4s, v16.4h, v15.4h\n"
+ "smlal2 v24.4s, v16.8h, v15.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "tbz x8, #2, 37f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 36f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "tbz x8, #1, 38f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "tbz x8, #0, 39f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"39:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v8.8h, v8.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x60]\n"
- "smlal v7.4s, v8.4h, v18.4h\n"
- "smlal2 v0.4s, v8.8h, v18.8h\n"
- "smlal v10.4s, v8.4h, v1.4h\n"
- "smlal2 v6.4s, v8.8h, v1.8h\n"
+ "smlal v8.4s, v17.4h, v10.4h\n"
+ "smlal2 v30.4s, v17.8h, v10.8h\n"
+ "smlal v31.4s, v17.4h, v5.4h\n"
+ "smlal2 v27.4s, v17.8h, v5.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 41f\n"
- "ld1 { v17.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 40f\n"
- "ld1 { v17.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[6], [x20]\n"
+ "tbz x8, #2, 41f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 40f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[4], [x20]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 42f\n"
- "ld1 { v17.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[2], [x20]\n"
+ "tbz x8, #1, 42f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
- "ld1 { v17.b }[0], [x20]\n"
+ "tbz x8, #0, 43f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 0): Bit 2: End
- "usubl v17.8h, v17.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x68]\n"
- "smlal v9.4s, v17.4h, v31.4h\n"
- "smlal2 v24.4s, v17.8h, v31.8h\n"
- "smlal v2.4s, v17.4h, v5.4h\n"
- "smlal2 v30.4s, v17.8h, v5.8h\n"
+ "smlal v2.4s, v16.4h, v6.4h\n"
+ "smlal2 v1.4s, v16.8h, v6.8h\n"
+ "smlal v0.4s, v16.4h, v3.4h\n"
+ "smlal2 v24.4s, v16.8h, v3.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "tbz x8, #2, 45f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 44f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x8, #1, 46f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x8, #0, 47f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "usubl v23.8h, v23.8b, v14.8b\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
"ldr x20, [x15, #0x70]\n"
- "smlal v7.4s, v23.4h, v20.4h\n"
- "smlal2 v0.4s, v23.8h, v20.8h\n"
- "smlal v10.4s, v23.4h, v18.4h\n"
- "smlal2 v6.4s, v23.8h, v18.8h\n"
+ "smlal v8.4s, v17.4h, v9.4h\n"
+ "smlal2 v30.4s, v17.8h, v9.8h\n"
+ "smlal v31.4s, v17.4h, v10.4h\n"
+ "smlal2 v27.4s, v17.8h, v10.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 49f\n"
- "ld1 { v5.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 48f\n"
- "ld1 { v5.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[6], [x20]\n"
+ "tbz x8, #2, 49f\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 48f\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[4], [x20]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 50f\n"
- "ld1 { v5.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[2], [x20]\n"
+ "tbz x8, #1, 50f\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
- "ld1 { v5.b }[0], [x20]\n"
+ "tbz x8, #0, 51f\n"
+ "ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "usubl v5.8h, v5.8b, v14.8b\n"
+ "usubl v16.8h, v16.8b, v11.8b\n"
"ldr x20, [x15, #0x78]\n"
- "smlal v2.4s, v5.4h, v25.4h\n"
- "smlal2 v30.4s, v5.8h, v25.8h\n"
- "smlal v10.4s, v5.4h, v31.4h\n"
- "smlal2 v6.4s, v5.8h, v31.8h\n"
+ "smlal v0.4s, v16.4h, v7.4h\n"
+ "smlal2 v24.4s, v16.8h, v7.8h\n"
+ "smlal v31.4s, v16.4h, v6.4h\n"
+ "smlal2 v27.4s, v16.8h, v6.8h\n"
"add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v23.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v23.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[6], [x20]\n"
+ "tbz x8, #2, 53f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x8, #1, 52f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[4], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v23.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[2], [x20]\n"
+ "tbz x8, #1, 54f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v23.b }[0], [x20]\n"
+ "tbz x8, #0, 55f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "usubl v23.8h, v23.8b, v14.8b\n"
- "smlal v2.4s, v23.4h, v20.4h\n"
- "smlal2 v30.4s, v23.8h, v20.8h\n"
- "smlal v10.4s, v23.4h, v25.4h\n"
- "smlal2 v6.4s, v23.8h, v25.8h\n"
- "tbz x7, #2, 57f\n"
- "ld1 { v15.4s }, [x13], #0x10\n"
- "ld1 { v19.4s }, [x12], #0x10\n"
- "tbz x7, #1, 56f\n"
+ "usubl v17.8h, v17.8b, v11.8b\n"
+ "smlal v0.4s, v17.4h, v9.4h\n"
+ "smlal2 v24.4s, v17.8h, v9.8h\n"
+ "smlal v31.4s, v17.4h, v7.4h\n"
+ "smlal2 v27.4s, v17.8h, v7.8h\n"
+ "tbz x8, #2, 57f\n"
+ "ld1 { v16.4s }, [x13], #0x10\n"
+ "ld1 { v23.4s }, [x12], #0x10\n"
+ "tbz x8, #1, 56f\n"
"ld1 { v18.d }[0], [x13], #0x8\n"
"ld1 { v22.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v18.s }[2], [x13]\n"
"ld1 { v22.s }[2], [x12]\n"
"b 59f\n"
"56:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x8, #0, 59f\n"
"ld1 { v18.s }[0], [x13]\n"
"ld1 { v22.s }[0], [x12]\n"
"b 59f\n"
"57:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 58f\n"
- "ld1 { v15.d }[0], [x13], #0x8\n"
- "ld1 { v19.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 59f\n"
- "ld1 { v15.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x12]\n"
+ "tbz x8, #1, 58f\n"
+ "ld1 { v16.d }[0], [x13], #0x8\n"
+ "ld1 { v23.d }[0], [x12], #0x8\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v16.s }[2], [x13]\n"
+ "ld1 { v23.s }[2], [x12]\n"
"b 59f\n"
"58:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
- "ld1 { v15.s }[0], [x13]\n"
- "ld1 { v19.s }[0], [x12]\n"
+ "tbz x8, #0, 59f\n"
+ "ld1 { v16.s }[0], [x13]\n"
+ "ld1 { v23.s }[0], [x12]\n"
"59:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v9.4s, v9.4s, v15.4s\n"
- "and v17.16b, v9.16b, v19.16b\n"
- "add x11, x11, x16\n"
+ "sqrdmulh v2.4s, v2.4s, v16.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v18.4s\n"
"add x10, x10, x16\n"
- "sqrdmulh v24.4s, v24.4s, v18.4s\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
"add x9, x9, x16\n"
+ "sqrdmulh v8.4s, v8.4s, v16.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v16.4s\n"
"add x28, x28, x16\n"
- "and v20.16b, v24.16b, v22.16b\n"
- "sqrdmulh v7.4s, v7.4s, v15.4s\n"
- "sqrdmulh v2.4s, v2.4s, v15.4s\n"
- "sqrdmulh v10.4s, v10.4s, v15.4s\n"
- "sqadd v9.4s, v9.4s, v17.4s\n"
- "sshr v20.4s, v20.4s, #0x1f\n"
- "and v21.16b, v7.16b, v19.16b\n"
- "sqrdmulh v0.4s, v0.4s, v18.4s\n"
- "and v15.16b, v2.16b, v19.16b\n"
+ "add x27, x27, x16\n"
+ "sqrdmulh v31.4s, v31.4s, v16.4s\n"
"sqrdmulh v30.4s, v30.4s, v18.4s\n"
- "and v23.16b, v10.16b, v19.16b\n"
- "sqrdmulh v6.4s, v6.4s, v18.4s\n"
- "sqadd v24.4s, v24.4s, v20.4s\n"
+ "and v17.16b, v2.16b, v23.16b\n"
+ "and v16.16b, v1.16b, v22.16b\n"
+ "and v21.16b, v8.16b, v23.16b\n"
+ "and v20.16b, v0.16b, v23.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v18.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v18.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sshr v21.4s, v21.4s, #0x1f\n"
- "and v18.16b, v0.16b, v22.16b\n"
- "sshr v15.4s, v15.4s, #0x1f\n"
- "and v17.16b, v30.16b, v22.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v28.16b, v6.16b, v22.16b\n"
- "sqadd v7.4s, v7.4s, v21.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v2.4s, v2.4s, v15.4s\n"
+ "and v19.16b, v30.16b, v22.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v18.16b, v24.16b, v22.16b\n"
+ "sqadd v2.4s, v2.4s, v17.4s\n"
+ "and v17.16b, v31.16b, v23.16b\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v16.16b, v27.16b, v22.16b\n"
+ "sqadd v8.4s, v8.4s, v21.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "sqadd v10.4s, v10.4s, v23.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "srshl v9.4s, v9.4s, v19.4s\n"
- "srshl v7.4s, v7.4s, v19.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v2.4s, v2.4s, v19.4s\n"
- "sqadd v30.4s, v30.4s, v17.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
- "sqadd v6.4s, v6.4s, v28.4s\n"
- "srshl v24.4s, v24.4s, v22.4s\n"
- "sqxtn v9.4h, v9.4s\n"
- "srshl v0.4s, v0.4s, v22.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqadd v0.4s, v0.4s, v20.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v2.4s, v2.4s, v23.4s\n"
+ "srshl v8.4s, v8.4s, v23.4s\n"
+ "sqadd v31.4s, v31.4s, v17.4s\n"
+ "sqadd v30.4s, v30.4s, v19.4s\n"
+ "srshl v0.4s, v0.4s, v23.4s\n"
+ "sqadd v24.4s, v24.4s, v18.4s\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v22.4s\n"
+ "srshl v31.4s, v31.4s, v23.4s\n"
"sqxtn v2.4h, v2.4s\n"
- "srshl v6.4s, v6.4s, v22.4s\n"
- "sqxtn v10.4h, v10.4s\n"
- "sqxtn2 v9.8h, v24.4s\n"
- "sqxtn2 v7.8h, v0.4s\n"
- "sqxtn2 v2.8h, v30.4s\n"
- "sqxtn2 v10.8h, v6.4s\n"
- "sqadd v9.8h, v9.8h, v13.8h\n"
- "sqadd v7.8h, v7.8h, v13.8h\n"
- "sqadd v2.8h, v2.8h, v13.8h\n"
- "sqadd v10.8h, v10.8h, v13.8h\n"
- "smax v9.8h, v9.8h, v29.8h\n"
- "smax v7.8h, v7.8h, v29.8h\n"
- "smax v2.8h, v2.8h, v29.8h\n"
- "smax v10.8h, v10.8h, v29.8h\n"
- "smin v9.8h, v9.8h, v12.8h\n"
- "smin v7.8h, v7.8h, v12.8h\n"
- "smin v2.8h, v2.8h, v12.8h\n"
- "smin v10.8h, v10.8h, v12.8h\n"
- "uzp1 v9.16b, v9.16b, v9.16b\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v24.4s, v24.4s, v22.4s\n"
+ "sqxtn v0.4h, v0.4s\n"
+ "srshl v27.4s, v27.4s, v22.4s\n"
+ "sqxtn v31.4h, v31.4s\n"
+ "sqxtn2 v2.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v30.4s\n"
+ "sqxtn2 v0.8h, v24.4s\n"
+ "sqxtn2 v31.8h, v27.4s\n"
+ "sqadd v2.8h, v2.8h, v12.8h\n"
+ "sqadd v8.8h, v8.8h, v12.8h\n"
+ "sqadd v0.8h, v0.8h, v12.8h\n"
+ "sqadd v31.8h, v31.8h, v12.8h\n"
+ "smax v2.8h, v2.8h, v14.8h\n"
+ "smax v8.8h, v8.8h, v14.8h\n"
+ "smax v0.8h, v0.8h, v14.8h\n"
+ "smax v31.8h, v31.8h, v14.8h\n"
+ "smin v2.8h, v2.8h, v13.8h\n"
+ "smin v8.8h, v8.8h, v13.8h\n"
+ "smin v0.8h, v0.8h, v13.8h\n"
+ "smin v31.8h, v31.8h, v13.8h\n"
"uzp1 v2.16b, v2.16b, v2.16b\n"
- "uzp1 v10.16b, v10.16b, v10.16b\n"
- "tbz x7, #2, 61f\n"
- "st1 { v9.s }[0], [x11], #0x4\n"
- "st1 { v7.s }[0], [x10], #0x4\n"
- "st1 { v2.s }[0], [x9], #0x4\n"
- "st1 { v10.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 60f\n"
- "st1 { v9.h }[2], [x11], #0x2\n"
- "st1 { v7.h }[2], [x10], #0x2\n"
- "st1 { v2.h }[2], [x9], #0x2\n"
- "st1 { v10.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[6], [x11], #0x1\n"
- "st1 { v7.b }[6], [x10], #0x1\n"
- "st1 { v2.b }[6], [x9], #0x1\n"
- "st1 { v10.b }[6], [x28], #0x1\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v0.16b, v0.16b, v0.16b\n"
+ "uzp1 v31.16b, v31.16b, v31.16b\n"
+ "tbz x8, #2, 61f\n"
+ "st1 { v2.s }[0], [x10], #0x4\n"
+ "st1 { v8.s }[0], [x9], #0x4\n"
+ "st1 { v0.s }[0], [x28], #0x4\n"
+ "st1 { v31.s }[0], [x27], #0x4\n"
+ "tbz x8, #1, 60f\n"
+ "st1 { v2.h }[2], [x10], #0x2\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v0.h }[2], [x28], #0x2\n"
+ "st1 { v31.h }[2], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[6], [x10], #0x1\n"
+ "st1 { v8.b }[6], [x9], #0x1\n"
+ "st1 { v0.b }[6], [x28], #0x1\n"
+ "st1 { v31.b }[6], [x27], #0x1\n"
"b 63f\n"
"60:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[4], [x11], #0x1\n"
- "st1 { v7.b }[4], [x10], #0x1\n"
- "st1 { v2.b }[4], [x9], #0x1\n"
- "st1 { v10.b }[4], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[4], [x10], #0x1\n"
+ "st1 { v8.b }[4], [x9], #0x1\n"
+ "st1 { v0.b }[4], [x28], #0x1\n"
+ "st1 { v31.b }[4], [x27], #0x1\n"
"b 63f\n"
"61:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "st1 { v9.h }[0], [x11], #0x2\n"
- "st1 { v7.h }[0], [x10], #0x2\n"
- "st1 { v2.h }[0], [x9], #0x2\n"
- "st1 { v10.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[2], [x11], #0x1\n"
- "st1 { v7.b }[2], [x10], #0x1\n"
- "st1 { v2.b }[2], [x9], #0x1\n"
- "st1 { v10.b }[2], [x28], #0x1\n"
+ "tbz x8, #1, 62f\n"
+ "st1 { v2.h }[0], [x10], #0x2\n"
+ "st1 { v8.h }[0], [x9], #0x2\n"
+ "st1 { v0.h }[0], [x28], #0x2\n"
+ "st1 { v31.h }[0], [x27], #0x2\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[2], [x10], #0x1\n"
+ "st1 { v8.b }[2], [x9], #0x1\n"
+ "st1 { v0.b }[2], [x28], #0x1\n"
+ "st1 { v31.b }[2], [x27], #0x1\n"
"b 63f\n"
"62:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "st1 { v9.b }[0], [x11], #0x1\n"
- "st1 { v7.b }[0], [x10], #0x1\n"
- "st1 { v2.b }[0], [x9], #0x1\n"
- "st1 { v10.b }[0], [x28], #0x1\n"
+ "tbz x8, #0, 63f\n"
+ "st1 { v2.b }[0], [x10], #0x1\n"
+ "st1 { v8.b }[0], [x9], #0x1\n"
+ "st1 { v0.b }[0], [x28], #0x1\n"
+ "st1 { v31.b }[0], [x27], #0x1\n"
"63:" // Oddments: Bit 2: End
"64:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index e9db8e1322..34758ed6a3 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -55,7 +55,7 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[25];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -100,1294 +100,1294 @@ void a64_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "ldr x7, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x2, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x8, x7, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v6.16b }, [x20]\n"
+ "mov x3, #0x0\n"
+ "mov x4, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x17, x2, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v13.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_b_offset]\n"
"add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v15.16b }, [x21]\n"
- "ld1r { v13.8h }, [x20]\n"
+ "ld1r { v14.16b }, [x21]\n"
+ "ld1r { v25.8h }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_minval]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v17.8h }, [x21]\n"
- "ld1r { v24.8h }, [x20]\n"
- "mov x17, #0x0\n"
- "mov x16, #0x0\n"
- "add x15, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x11, x10, [x22, #0x0]\n"
- "ldp x9, x28, [x22, #0x10]\n"
- "cbz x8, 3f\n"
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "subs x8, x8, #0x1\n"
- "ssubl v11.8h, v11.8b, v15.8b\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "ssubl v22.8h, v22.8b, v15.8b\n"
- "ssubl v14.8h, v14.8b, v15.8b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "ssubl v28.8h, v28.8b, v15.8b\n"
- "ssubl v18.8h, v18.8b, v15.8b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "ssubl v9.8h, v9.8b, v15.8b\n"
- "ssubl v26.8h, v26.8b, v15.8b\n"
- "ldr d4, [x14, #0x40]\n"
+ "ld1r { v23.8h }, [x21]\n"
+ "ld1r { v12.8h }, [x20]\n"
+ "ldp x16, x15, [x22, #0x0]\n"
+ "ldp x14, x13, [x22, #0x10]\n"
+ "cbz x17, 3f\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "subs x17, x17, #0x1\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "ssubl v16.8h, v16.8b, v14.8b\n"
+ "ssubl v11.8h, v11.8b, v14.8b\n"
+ "ldr d7, [x6, #0x40]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "ldr q5, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
+ "ssubl v29.8h, v29.8b, v14.8b\n"
+ "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ssubl v27.8h, v27.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v22.8h, v22.8b, v14.8b\n"
+ "ssubl v5.8h, v5.8b, v14.8b\n"
+ "ldr q19, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
"add x20, x20, #0x20\n"
+ "ssubl v7.8h, v7.8b, v14.8b\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d25, [x27, x17]\n"
- "ldr d27, [x26, x17]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d1, [x25, x17]\n"
- "ldr d2, [x24, x17]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "ldr d12, [x23, x17]\n"
- "ldr d16, [x22, x17]\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "ldr d23, [x21, x17]\n"
- "ldr d10, [x20, x17]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "ldr d26, [x27, x3]\n"
+ "ldr d31, [x26, x3]\n"
+ "ldr d20, [x25, x3]\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr d6, [x23, x3]\n"
+ "ldr d9, [x22, x3]\n"
+ "ldr d0, [x21, x3]\n"
+ "ldr d18, [x20, x3]\n"
+ "usubl v26.8h, v26.8b, v13.8b\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr q30, [x13, #0x0]\n"
- "ldr q29, [x12, #0x0]\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x21, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "ldr x25, [x15, #0x60]\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "ldr d27, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "ldr d25, [x20, x17]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal v20.4s, v27.4h, v28.4h\n"
- "smlal v19.4s, v25.4h, v18.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "ldr d1, [x25, x17]\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "ldr d2, [x24, x17]\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v28.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v18.8h\n"
- "ldr d25, [x22, x17]\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v20.4s, v1.4h, v11.4h\n"
- "smlal v19.4s, v2.4h, v22.4h\n"
- "ldr x24, [x15, #0x50]\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "ldr d16, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "ldr d12, [x20, x17]\n"
- "ldr x23, [x15, #0x48]\n"
- "smlal2 v0.4s, v1.8h, v11.8h\n"
- "smlal2 v31.4s, v2.8h, v22.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal v20.4s, v27.4h, v18.4h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v19.4s, v25.4h, v9.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "ldr d23, [x25, x17]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "ldr d11, [x24, x17]\n"
- "usubl v11.8h, v11.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v18.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v9.8h\n"
- "ldr d25, [x21, x17]\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v21.4s, v16.4h, v18.4h\n"
- "smlal v20.4s, v12.4h, v22.4h\n"
- "smlal v19.4s, v23.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "ldr d10, [x20, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
- "smlal v5.4s, v11.4h, v9.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal2 v8.4s, v16.8h, v18.8h\n"
- "ldr d18, [x22, x17]\n"
- "ldr d16, [x21, x17]\n"
- "smlal2 v0.4s, v12.8h, v22.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal2 v31.4s, v23.8h, v14.8h\n"
- "ldr q14, [x13, #0x10]\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v25.4h, v26.4h\n"
- "smlal v19.4s, v10.4h, v28.4h\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "ldr x21, [x15, #0xc0]\n"
- "smlal2 v3.4s, v11.8h, v9.8h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v26.4h\n"
+ "ldr q17, [x7, #0x0]\n"
+ "ldr q30, [x8, #0x0]\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "ldr x24, [x5, #0x58]\n"
+ "ldr x23, [x5, #0x78]\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "ldr x22, [x5, #0x60]\n"
+ "ldr x21, [x5, #0x80]\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "ldr q26, [x7, #0x10]\n"
+ "ldr x20, [x5, #0x68]\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "ldr d31, [x24, x3]\n"
+ "ldr x12, [x5, #0x88]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "ldr x11, [x5, #0x40]\n"
+ "ldr x10, [x5, #0x70]\n"
+ "add x6, x6, #0x48\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x23, x3]\n"
+ "ldr x9, [x5, #0x98]\n"
+ "subs x17, x17, #0x1\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x28, [x5, #0x50]\n"
+ "ldr x27, [x5, #0x48]\n"
+ "add x7, x7, #0x20\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "ldr d20, [x22, x3]\n"
+ "ldr x26, [x5, #0x90]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "ldr x25, [x5, #0xa8]\n"
+ "ldr x24, [x5, #0xa0]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "ldr d31, [x21, x3]\n"
+ "ldr x23, [x5, #0xb0]\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "ldr d6, [x20, x3]\n"
+ "ldr x22, [x5, #0xb8]\n"
+ "smlal v3.4s, v28.4h, v27.4h\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "ldr x21, [x5, #0xc0]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v8.4s, v27.8h, v9.8h\n"
- "ldr d27, [x21, x17]\n"
- "smlal2 v0.4s, v25.8h, v26.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal2 v31.4s, v10.8h, v28.8h\n"
- "smlal v21.4s, v11.4h, v28.4h\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
- "add x14, x14, #0x48\n"
- "smlal v20.4s, v18.4h, v7.4h\n"
- "smlal v19.4s, v16.4h, v7.4h\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "add x17, x17, #0x8\n"
- "smlal2 v3.4s, v1.8h, v26.8h\n"
- "smlal v5.4s, v12.4h, v7.4h\n"
- "sqrdmulh v5.4s, v5.4s, v30.4s\n"
- "subs x8, x8, #0x1\n"
- "smlal2 v8.4s, v11.8h, v28.8h\n"
- "smlal2 v0.4s, v18.8h, v7.8h\n"
- "and v28.16b, v5.16b, v29.16b\n"
- "add x13, x13, #0x20\n"
- "smlal2 v31.4s, v16.8h, v7.8h\n"
- "smlal v21.4s, v2.4h, v7.4h\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v27.8h\n"
+ "ldr d28, [x12, x3]\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "ldr d9, [x11, x3]\n"
+ "smlal v10.4s, v20.4h, v16.4h\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal2 v21.4s, v20.8h, v16.8h\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "ldr d16, [x10, x3]\n"
+ "smlal v3.4s, v31.4h, v11.4h\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "smlal2 v24.4s, v31.8h, v11.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "ldr d0, [x9, x3]\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "smlal v10.4s, v6.4h, v27.4h\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "smlal2 v21.4s, v6.8h, v27.8h\n"
+ "ldr d6, [x28, x3]\n"
+ "smlal v3.4s, v28.4h, v2.4h\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal v8.4s, v9.4h, v27.4h\n"
+ "smlal2 v4.4s, v9.8h, v27.8h\n"
+ "ldr d9, [x27, x3]\n"
+ "ldr d27, [x26, x3]\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr d28, [x25, x3]\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "ldr d18, [x24, x3]\n"
+ "smlal v10.4s, v16.4h, v11.4h\n"
+ "smlal2 v21.4s, v16.8h, v11.8h\n"
+ "ldr d11, [x23, x3]\n"
+ "smlal v3.4s, v0.4h, v29.4h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v27.8h, v27.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v6.4h, v2.4h\n"
+ "smlal2 v24.4s, v0.8h, v29.8h\n"
+ "ldr d29, [x22, x3]\n"
+ "smlal2 v1.4s, v6.8h, v2.8h\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "usubl v11.8h, v11.8b, v13.8b\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "ldr d9, [x21, x3]\n"
+ "smlal v10.4s, v27.4h, v22.4h\n"
+ "smlal v3.4s, v28.4h, v15.4h\n"
+ "add x3, x3, #0x8\n"
+ "smlal v19.4s, v20.4h, v22.4h\n"
+ "smlal2 v21.4s, v27.8h, v22.8h\n"
+ "ldr q27, [x8, #0x10]\n"
+ "usubl v29.8h, v29.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v15.8h\n"
+ "smlal2 v1.4s, v20.8h, v22.8h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "add x8, x8, #0x20\n"
+ "smlal v8.4s, v6.4h, v15.4h\n"
+ "smlal2 v4.4s, v6.8h, v15.8h\n"
+ "smlal v10.4s, v18.4h, v5.4h\n"
+ "smlal v3.4s, v11.4h, v5.4h\n"
+ "smlal v19.4s, v16.4h, v5.4h\n"
+ "smlal2 v21.4s, v18.8h, v5.8h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal v8.4s, v31.4h, v5.4h\n"
+ "smlal2 v4.4s, v31.8h, v5.8h\n"
+ "smlal v10.4s, v28.4h, v2.4h\n"
+ "smlal v3.4s, v29.4h, v22.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v17.4s\n"
+ "smlal2 v21.4s, v28.8h, v2.8h\n"
+ "smlal2 v24.4s, v29.8h, v22.8h\n"
+ "sqrdmulh v1.4s, v1.4s, v26.4s\n"
+ "smlal v8.4s, v0.4h, v7.4h\n"
+ "and v2.16b, v19.16b, v30.16b\n"
+ "smlal2 v4.4s, v0.8h, v7.8h\n"
+ "smlal v10.4s, v29.4h, v7.4h\n"
+ "smlal v3.4s, v9.4h, v7.4h\n"
+ "and v11.16b, v1.16b, v27.16b\n"
+ "smlal2 v21.4s, v29.8h, v7.8h\n"
+ "smlal2 v24.4s, v9.8h, v7.8h\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqrdmulh v8.4s, v8.4s, v17.4s\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sqrdmulh v4.4s, v4.4s, v26.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v17.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v17.4s\n"
+ "sqadd v19.4s, v19.4s, v2.4s\n"
+ "and v29.16b, v8.16b, v30.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "and v20.16b, v10.16b, v30.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "and v28.16b, v3.16b, v30.16b\n"
+ "sqadd v1.4s, v1.4s, v11.4s\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "and v7.16b, v4.16b, v27.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v2.16b, v21.16b, v27.16b\n"
"sshr v28.4s, v28.4s, #0x1f\n"
- "add x12, x12, #0x20\n"
- "smlal v20.4s, v10.4h, v9.4h\n"
- "smlal v19.4s, v22.4h, v26.4h\n"
- "sqadd v5.4s, v5.4s, v28.4s\n"
- "smlal2 v3.4s, v12.8h, v7.8h\n"
- "smlal2 v8.4s, v2.8h, v7.8h\n"
- "sqrdmulh v3.4s, v3.4s, v14.4s\n"
- "smlal2 v0.4s, v10.8h, v9.8h\n"
- "smlal2 v31.4s, v22.8h, v26.8h\n"
- "and v16.16b, v3.16b, v25.16b\n"
- "smlal v21.4s, v23.4h, v4.4h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "sqrdmulh v21.4s, v21.4s, v30.4s\n"
- "smlal v19.4s, v27.4h, v4.4h\n"
- "smlal2 v8.4s, v23.8h, v4.8h\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "smlal2 v0.4s, v22.8h, v4.8h\n"
- "smlal2 v31.4s, v27.8h, v4.8h\n"
- "sqrdmulh v19.4s, v19.4s, v30.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "and v12.16b, v21.16b, v29.16b\n"
- "sqrdmulh v8.4s, v8.4s, v14.4s\n"
- "and v23.16b, v20.16b, v29.16b\n"
- "sqrdmulh v0.4s, v0.4s, v14.4s\n"
- "and v9.16b, v19.16b, v29.16b\n"
- "sqrdmulh v31.4s, v31.4s, v14.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v12.4s, v12.4s, #0x1f\n"
- "and v18.16b, v8.16b, v25.16b\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v22.16b, v0.16b, v25.16b\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "and v16.16b, v31.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v12.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v23.4s\n"
+ "and v22.16b, v24.16b, v27.16b\n"
+ "sqadd v8.4s, v8.4s, v29.4s\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v3.4s, v3.4s, v28.4s\n"
"sshr v22.4s, v22.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v9.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v29.4s\n"
- "srshl v21.4s, v21.4s, v29.4s\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v29.4s\n"
- "sqadd v0.4s, v0.4s, v22.4s\n"
- "srshl v19.4s, v19.4s, v29.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v25.4s\n"
+ "srshl v19.4s, v19.4s, v30.4s\n"
+ "srshl v8.4s, v8.4s, v30.4s\n"
+ "sqadd v4.4s, v4.4s, v7.4s\n"
+ "srshl v10.4s, v10.4s, v30.4s\n"
+ "sqadd v21.4s, v21.4s, v2.4s\n"
+ "srshl v3.4s, v3.4s, v30.4s\n"
+ "sqadd v24.4s, v24.4s, v22.4s\n"
+ "srshl v1.4s, v1.4s, v27.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str d5, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
+ "srshl v4.4s, v4.4s, v27.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v27.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v27.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str d20, [x9, x16]\n"
- "str d19, [x28, x16]\n"
- "ldr q5, [x20, #0x0]\n"
- "ldr q3, [x20, #0x10]\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str d19, [x16, x4]\n"
+ "str d8, [x15, x4]\n"
+ "str d10, [x14, x4]\n"
+ "str d3, [x13, x4]\n"
+ "add x4, x4, #0x8\n"
+ "ldr q19, [x20, #0x0]\n"
+ "ldr q1, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "add x16, x16, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d4, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ssubl v11.8h, v11.8b, v15.8b\n"
- "ssubl v22.8h, v22.8b, v15.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ssubl v14.8h, v14.8b, v15.8b\n"
- "ssubl v28.8h, v28.8b, v15.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ldr d25, [x27, x17]\n"
- "ssubl v18.8h, v18.8b, v15.8b\n"
- "ssubl v9.8h, v9.8b, v15.8b\n"
- "ldr d27, [x26, x17]\n"
- "ldr d1, [x25, x17]\n"
- "ssubl v26.8h, v26.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ldr d2, [x24, x17]\n"
- "ldr d12, [x23, x17]\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "ldr d16, [x22, x17]\n"
- "ldr d23, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "ldr d10, [x20, x17]\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d7, [x6, #0x40]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ssubl v16.8h, v16.8b, v14.8b\n"
+ "ssubl v11.8h, v11.8b, v14.8b\n"
+ "ssubl v29.8h, v29.8b, v14.8b\n"
+ "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "ssubl v27.8h, v27.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v22.8h, v22.8b, v14.8b\n"
+ "ssubl v5.8h, v5.8b, v14.8b\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "ssubl v7.8h, v7.8b, v14.8b\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "ldr d26, [x27, x3]\n"
+ "ldr d31, [x26, x3]\n"
+ "ldr d20, [x25, x3]\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr d6, [x23, x3]\n"
+ "ldr d9, [x22, x3]\n"
+ "ldr d0, [x21, x3]\n"
+ "usubl v26.8h, v26.8b, v13.8b\n"
+ "ldr d18, [x20, x3]\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr q29, [x13, #0x0]\n"
- "ldr q30, [x12, #0x0]\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x21, [x15, #0x58]\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "ldr x25, [x15, #0x60]\n"
- "ldr x24, [x15, #0x80]\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "ldr d27, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "ldr x23, [x15, #0x68]\n"
- "ldr x22, [x15, #0x88]\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "ldr d25, [x20, x17]\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal v20.4s, v27.4h, v28.4h\n"
- "smlal v19.4s, v25.4h, v18.4h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "ldr d1, [x25, x17]\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "ldr d2, [x24, x17]\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v28.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v18.8h\n"
- "ldr d25, [x22, x17]\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "ldr x25, [x15, #0x98]\n"
- "smlal v20.4s, v1.4h, v11.4h\n"
- "smlal v19.4s, v2.4h, v22.4h\n"
- "ldr x24, [x15, #0x50]\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "ldr d16, [x21, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "ldr d12, [x20, x17]\n"
- "ldr x23, [x15, #0x48]\n"
- "smlal2 v0.4s, v1.8h, v11.8h\n"
- "smlal2 v31.4s, v2.8h, v22.8h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal v20.4s, v27.4h, v18.4h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x22, [x15, #0xa0]\n"
- "smlal v19.4s, v25.4h, v9.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "ldr d23, [x25, x17]\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "ldr d11, [x24, x17]\n"
- "usubl v11.8h, v11.8b, v6.8b\n"
- "smlal2 v0.4s, v27.8h, v18.8h\n"
- "ldr d27, [x23, x17]\n"
- "smlal2 v31.4s, v25.8h, v9.8h\n"
- "ldr d25, [x21, x17]\n"
- "ldr x21, [x15, #0xb0]\n"
- "smlal v21.4s, v16.4h, v18.4h\n"
- "smlal v20.4s, v12.4h, v22.4h\n"
- "smlal v19.4s, v23.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "ldr d10, [x20, x17]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "usubl v25.8h, v25.8b, v6.8b\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
- "smlal v5.4s, v11.4h, v9.4h\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal2 v8.4s, v16.8h, v18.8h\n"
- "ldr d16, [x22, x17]\n"
- "ldr d18, [x21, x17]\n"
- "smlal2 v0.4s, v12.8h, v22.8h\n"
- "ldr d22, [x20, x17]\n"
- "smlal2 v31.4s, v23.8h, v14.8h\n"
- "ldr q14, [x13, #0x10]\n"
- "smlal v21.4s, v27.4h, v9.4h\n"
- "smlal v20.4s, v25.4h, v26.4h\n"
- "smlal v19.4s, v10.4h, v28.4h\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal2 v3.4s, v11.8h, v9.8h\n"
- "usubl v18.8h, v18.8b, v6.8b\n"
- "smlal v5.4s, v1.4h, v26.4h\n"
- "tst x7, #0x7\n"
- "smlal2 v8.4s, v27.8h, v9.8h\n"
- "ldr d27, [x20, x17]\n"
- "smlal2 v0.4s, v25.8h, v26.8h\n"
- "ldr q25, [x12, #0x10]\n"
- "smlal2 v31.4s, v10.8h, v28.8h\n"
- "smlal v21.4s, v11.4h, v28.4h\n"
- "usubl v22.8h, v22.8b, v6.8b\n"
- "add x17, x17, #0x8\n"
- "smlal v20.4s, v16.4h, v7.4h\n"
- "smlal v19.4s, v18.4h, v7.4h\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "add x13, x13, #0x20\n"
- "smlal2 v3.4s, v1.8h, v26.8h\n"
- "smlal v5.4s, v12.4h, v7.4h\n"
- "sqrdmulh v5.4s, v5.4s, v29.4s\n"
- "add x12, x12, #0x20\n"
- "smlal2 v8.4s, v11.8h, v28.8h\n"
- "smlal2 v0.4s, v16.8h, v7.8h\n"
- "and v16.16b, v5.16b, v30.16b\n"
- "smlal2 v31.4s, v18.8h, v7.8h\n"
- "smlal v21.4s, v2.4h, v7.4h\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "smlal v20.4s, v10.4h, v9.4h\n"
- "smlal v19.4s, v22.4h, v26.4h\n"
- "sqadd v5.4s, v5.4s, v16.4s\n"
- "smlal2 v3.4s, v12.8h, v7.8h\n"
- "smlal2 v8.4s, v2.8h, v7.8h\n"
- "sqrdmulh v3.4s, v3.4s, v14.4s\n"
- "smlal2 v0.4s, v10.8h, v9.8h\n"
- "smlal2 v31.4s, v22.8h, v26.8h\n"
- "and v16.16b, v3.16b, v25.16b\n"
- "smlal v21.4s, v23.4h, v4.4h\n"
- "smlal v20.4s, v22.4h, v4.4h\n"
- "sqrdmulh v21.4s, v21.4s, v29.4s\n"
- "smlal v19.4s, v27.4h, v4.4h\n"
- "smlal2 v8.4s, v23.8h, v4.8h\n"
- "sqrdmulh v20.4s, v20.4s, v29.4s\n"
- "smlal2 v0.4s, v22.8h, v4.8h\n"
- "smlal2 v31.4s, v27.8h, v4.8h\n"
- "sqrdmulh v19.4s, v19.4s, v29.4s\n"
+ "ldr q30, [x7, #0x0]\n"
+ "ldr q17, [x8, #0x0]\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "ldr x20, [x5, #0x58]\n"
+ "ldr x24, [x5, #0x78]\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "ldr x23, [x5, #0x60]\n"
+ "ldr x10, [x5, #0x80]\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "ldr q26, [x7, #0x10]\n"
+ "ldr x22, [x5, #0x68]\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "ldr d31, [x20, x3]\n"
+ "ldr x21, [x5, #0x88]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "ldr x20, [x5, #0x40]\n"
+ "ldr x9, [x5, #0x70]\n"
+ "tst x2, #0x7\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x24, x3]\n"
+ "ldr x28, [x5, #0x98]\n"
+ "add x7, x7, #0x20\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x27, [x5, #0x50]\n"
+ "ldr x26, [x5, #0x48]\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "ldr d20, [x23, x3]\n"
+ "ldr x25, [x5, #0x90]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "ldr x24, [x5, #0xa8]\n"
+ "ldr x23, [x5, #0xa0]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "ldr d31, [x10, x3]\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "ldr d6, [x22, x3]\n"
+ "smlal v3.4s, v28.4h, v27.4h\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "ldr x22, [x5, #0xb0]\n"
+ "smlal2 v24.4s, v28.8h, v27.8h\n"
+ "ldr d28, [x21, x3]\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "ldr d9, [x20, x3]\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "ldr x21, [x5, #0xb8]\n"
+ "smlal v10.4s, v20.4h, v16.4h\n"
+ "smlal2 v21.4s, v20.8h, v16.8h\n"
+ "ldr x20, [x5, #0xc0]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "ldr d16, [x9, x3]\n"
+ "smlal v3.4s, v31.4h, v11.4h\n"
+ "smlal2 v24.4s, v31.8h, v11.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "ldr d0, [x28, x3]\n"
+ "smlal v10.4s, v6.4h, v27.4h\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "smlal2 v21.4s, v6.8h, v27.8h\n"
+ "ldr d6, [x27, x3]\n"
+ "smlal v8.4s, v9.4h, v27.4h\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal2 v4.4s, v9.8h, v27.8h\n"
+ "ldr d9, [x26, x3]\n"
+ "ldr d27, [x25, x3]\n"
+ "smlal v3.4s, v28.4h, v2.4h\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "smlal2 v24.4s, v28.8h, v2.8h\n"
+ "ldr d28, [x24, x3]\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v10.4s, v16.4h, v11.4h\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "ldr d18, [x23, x3]\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v27.8h, v27.8b, v13.8b\n"
+ "smlal2 v21.4s, v16.8h, v11.8h\n"
+ "ldr d11, [x22, x3]\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "smlal v3.4s, v0.4h, v29.4h\n"
+ "smlal v19.4s, v6.4h, v2.4h\n"
+ "smlal2 v24.4s, v0.8h, v29.8h\n"
+ "ldr d29, [x21, x3]\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "smlal v10.4s, v27.4h, v22.4h\n"
+ "smlal2 v1.4s, v6.8h, v2.8h\n"
+ "usubl v11.8h, v11.8b, v13.8b\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "ldr d9, [x20, x3]\n"
+ "smlal2 v21.4s, v27.8h, v22.8h\n"
+ "ldr q27, [x8, #0x10]\n"
+ "smlal v3.4s, v28.4h, v15.4h\n"
+ "smlal v19.4s, v20.4h, v22.4h\n"
+ "usubl v29.8h, v29.8b, v13.8b\n"
+ "add x3, x3, #0x8\n"
+ "smlal2 v24.4s, v28.8h, v15.8h\n"
+ "smlal v8.4s, v6.4h, v15.4h\n"
+ "add x8, x8, #0x20\n"
+ "smlal v10.4s, v18.4h, v5.4h\n"
+ "smlal2 v1.4s, v20.8h, v22.8h\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "smlal2 v4.4s, v6.8h, v15.8h\n"
+ "smlal2 v21.4s, v18.8h, v5.8h\n"
+ "smlal v3.4s, v11.4h, v5.4h\n"
+ "smlal v19.4s, v16.4h, v5.4h\n"
+ "smlal2 v24.4s, v11.8h, v5.8h\n"
+ "smlal v8.4s, v31.4h, v5.4h\n"
+ "smlal v10.4s, v28.4h, v2.4h\n"
+ "smlal2 v1.4s, v16.8h, v5.8h\n"
+ "smlal2 v4.4s, v31.8h, v5.8h\n"
+ "smlal2 v21.4s, v28.8h, v2.8h\n"
+ "smlal v3.4s, v29.4h, v22.4h\n"
+ "sqrdmulh v19.4s, v19.4s, v30.4s\n"
+ "smlal2 v24.4s, v29.8h, v22.8h\n"
+ "smlal v8.4s, v0.4h, v7.4h\n"
+ "smlal v10.4s, v29.4h, v7.4h\n"
+ "sqrdmulh v1.4s, v1.4s, v26.4s\n"
+ "and v5.16b, v19.16b, v17.16b\n"
+ "smlal2 v4.4s, v0.8h, v7.8h\n"
+ "smlal2 v21.4s, v29.8h, v7.8h\n"
+ "smlal v3.4s, v9.4h, v7.4h\n"
+ "smlal2 v24.4s, v9.8h, v7.8h\n"
+ "and v16.16b, v1.16b, v27.16b\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqrdmulh v8.4s, v8.4s, v30.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v30.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v23.16b, v21.16b, v30.16b\n"
- "sqrdmulh v8.4s, v8.4s, v14.4s\n"
- "and v27.16b, v20.16b, v30.16b\n"
- "sqrdmulh v0.4s, v0.4s, v14.4s\n"
- "and v22.16b, v19.16b, v30.16b\n"
- "sqrdmulh v31.4s, v31.4s, v14.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v23.4s, v23.4s, #0x1f\n"
- "and v14.16b, v8.16b, v25.16b\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "and v18.16b, v0.16b, v25.16b\n"
- "sshr v22.4s, v22.4s, #0x1f\n"
- "and v16.16b, v31.16b, v25.16b\n"
- "sqadd v21.4s, v21.4s, v23.4s\n"
- "sshr v14.4s, v14.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v27.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v22.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v26.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v26.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v30.4s\n"
+ "sqadd v19.4s, v19.4s, v5.4s\n"
+ "and v30.16b, v8.16b, v17.16b\n"
+ "and v20.16b, v10.16b, v17.16b\n"
+ "sqrdmulh v24.4s, v24.4s, v26.4s\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v2.16b, v3.16b, v17.16b\n"
+ "and v11.16b, v4.16b, v27.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v9.16b, v21.16b, v27.16b\n"
+ "and v16.16b, v24.16b, v27.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "sqadd v8.4s, v8.4s, v30.4s\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v9.4s, v9.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v30.4s\n"
- "srshl v21.4s, v21.4s, v30.4s\n"
- "sqadd v8.4s, v8.4s, v14.4s\n"
- "srshl v20.4s, v20.4s, v30.4s\n"
- "sqadd v0.4s, v0.4s, v18.4s\n"
- "srshl v19.4s, v19.4s, v30.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v25.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v25.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v25.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v25.4s\n"
+ "sqadd v3.4s, v3.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v17.4s\n"
+ "srshl v8.4s, v8.4s, v17.4s\n"
+ "sqadd v4.4s, v4.4s, v11.4s\n"
+ "srshl v10.4s, v10.4s, v17.4s\n"
+ "sqadd v21.4s, v21.4s, v9.4s\n"
+ "srshl v3.4s, v3.4s, v17.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v27.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "str d5, [x11, x16]\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "str d21, [x10, x16]\n"
+ "srshl v4.4s, v4.4s, v27.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v27.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v27.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "str d20, [x9, x16]\n"
- "str d19, [x28, x16]\n"
- "add x16, x16, #0x8\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "str d19, [x16, x4]\n"
+ "str d8, [x15, x4]\n"
+ "str d10, [x14, x4]\n"
+ "str d3, [x13, x4]\n"
+ "add x4, x4, #0x8\n"
"beq 88f\n"
- "add x14, x14, #0x48\n"
+ "add x6, x6, #0x48\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "tbz x7, #2, 5f\n"
- "ld1 { v5.4s }, [x20], #0x10\n"
- "tbz x7, #1, 4f\n"
- "ld1 { v3.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v3.s }[2], [x20]\n"
+ "tbz x2, #2, 5f\n"
+ "ld1 { v19.4s }, [x20], #0x10\n"
+ "tbz x2, #1, 4f\n"
+ "ld1 { v1.d }[0], [x20], #0x8\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v1.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v3.s }[0], [x20]\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v1.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
- "tbz x7, #1, 6f\n"
- "ld1 { v5.d }[0], [x20], #0x8\n"
- "tbz x7, #0, 7f\n"
- "ld1 { v5.s }[2], [x20]\n"
+ "tbz x2, #1, 6f\n"
+ "ld1 { v19.d }[0], [x20], #0x8\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v19.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 7f\n"
- "ld1 { v5.s }[0], [x20]\n"
+ "tbz x2, #0, 7f\n"
+ "ld1 { v19.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d11, [x14, #0x0]\n"
- "ldr d22, [x14, #0x8]\n"
- "mov v21.16b, v5.16b\n"
- "mov v8.16b, v3.16b\n"
- "ldr d14, [x14, #0x10]\n"
- "ldr d28, [x14, #0x18]\n"
- "mov v20.16b, v5.16b\n"
- "mov v0.16b, v3.16b\n"
- "ldr d18, [x14, #0x20]\n"
- "ldr d9, [x14, #0x28]\n"
- "mov v19.16b, v5.16b\n"
- "mov v31.16b, v3.16b\n"
- "ldr d26, [x14, #0x30]\n"
- "ldr d7, [x14, #0x38]\n"
- "ssubl v11.8h, v11.8b, v15.8b\n"
- "ssubl v22.8h, v22.8b, v15.8b\n"
- "ldr d4, [x14, #0x40]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ssubl v14.8h, v14.8b, v15.8b\n"
- "ssubl v28.8h, v28.8b, v15.8b\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ssubl v18.8h, v18.8b, v15.8b\n"
- "ssubl v9.8h, v9.8b, v15.8b\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ssubl v26.8h, v26.8b, v15.8b\n"
- "ssubl v7.8h, v7.8b, v15.8b\n"
- "ssubl v4.8h, v4.8b, v15.8b\n"
- "add x27, x27, x17\n"
- "add x26, x26, x17\n"
- "add x25, x25, x17\n"
- "add x24, x24, x17\n"
- "add x23, x23, x17\n"
- "add x22, x22, x17\n"
- "add x21, x21, x17\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 9f\n"
- "ld1 { v25.s }[0], [x27], #0x4\n"
- "ld1 { v27.s }[0], [x26], #0x4\n"
- "ld1 { v1.s }[0], [x25], #0x4\n"
- "ld1 { v2.s }[0], [x24], #0x4\n"
- "ld1 { v12.s }[0], [x23], #0x4\n"
- "ld1 { v16.s }[0], [x22], #0x4\n"
- "ld1 { v23.s }[0], [x21], #0x4\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 8f\n"
- "ld1 { v25.h }[2], [x27], #0x2\n"
- "ld1 { v27.h }[2], [x26], #0x2\n"
- "ld1 { v1.h }[2], [x25], #0x2\n"
- "ld1 { v2.h }[2], [x24], #0x2\n"
- "ld1 { v12.h }[2], [x23], #0x2\n"
- "ld1 { v16.h }[2], [x22], #0x2\n"
- "ld1 { v23.h }[2], [x21], #0x2\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[6], [x27]\n"
- "ld1 { v27.b }[6], [x26]\n"
- "ld1 { v1.b }[6], [x25]\n"
- "ld1 { v2.b }[6], [x24]\n"
- "ld1 { v12.b }[6], [x23]\n"
- "ld1 { v16.b }[6], [x22]\n"
- "ld1 { v23.b }[6], [x21]\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ldr d16, [x6, #0x0]\n"
+ "ldr d11, [x6, #0x8]\n"
+ "mov v8.16b, v19.16b\n"
+ "mov v4.16b, v1.16b\n"
+ "ldr d29, [x6, #0x10]\n"
+ "ldr d15, [x6, #0x18]\n"
+ "mov v10.16b, v19.16b\n"
+ "mov v21.16b, v1.16b\n"
+ "ldr d27, [x6, #0x20]\n"
+ "ldr d2, [x6, #0x28]\n"
+ "mov v3.16b, v19.16b\n"
+ "mov v24.16b, v1.16b\n"
+ "ldr d22, [x6, #0x30]\n"
+ "ldr d5, [x6, #0x38]\n"
+ "ssubl v16.8h, v16.8b, v14.8b\n"
+ "ssubl v11.8h, v11.8b, v14.8b\n"
+ "ldr d7, [x6, #0x40]\n"
+ "ldp x27, x26, [x5, #0x0]\n"
+ "ssubl v29.8h, v29.8b, v14.8b\n"
+ "ssubl v15.8h, v15.8b, v14.8b\n"
+ "ssubl v27.8h, v27.8b, v14.8b\n"
+ "ssubl v2.8h, v2.8b, v14.8b\n"
+ "ssubl v22.8h, v22.8b, v14.8b\n"
+ "ssubl v5.8h, v5.8b, v14.8b\n"
+ "ldp x25, x24, [x5, #0x10]\n"
+ "ssubl v7.8h, v7.8b, v14.8b\n"
+ "add x27, x27, x3\n"
+ "add x26, x26, x3\n"
+ "ldp x23, x22, [x5, #0x20]\n"
+ "add x25, x25, x3\n"
+ "add x24, x24, x3\n"
+ "ldp x21, x20, [x5, #0x30]\n"
+ "add x23, x23, x3\n"
+ "add x22, x22, x3\n"
+ "add x21, x21, x3\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 9f\n"
+ "ld1 { v26.s }[0], [x27], #0x4\n"
+ "ld1 { v31.s }[0], [x26], #0x4\n"
+ "ld1 { v20.s }[0], [x25], #0x4\n"
+ "ld1 { v28.s }[0], [x24], #0x4\n"
+ "ld1 { v6.s }[0], [x23], #0x4\n"
+ "ld1 { v9.s }[0], [x22], #0x4\n"
+ "ld1 { v0.s }[0], [x21], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 8f\n"
+ "ld1 { v26.h }[2], [x27], #0x2\n"
+ "ld1 { v31.h }[2], [x26], #0x2\n"
+ "ld1 { v20.h }[2], [x25], #0x2\n"
+ "ld1 { v28.h }[2], [x24], #0x2\n"
+ "ld1 { v6.h }[2], [x23], #0x2\n"
+ "ld1 { v9.h }[2], [x22], #0x2\n"
+ "ld1 { v0.h }[2], [x21], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[6], [x27]\n"
+ "ld1 { v31.b }[6], [x26]\n"
+ "ld1 { v20.b }[6], [x25]\n"
+ "ld1 { v28.b }[6], [x24]\n"
+ "ld1 { v6.b }[6], [x23]\n"
+ "ld1 { v9.b }[6], [x22]\n"
+ "ld1 { v0.b }[6], [x21]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[4], [x27]\n"
- "ld1 { v27.b }[4], [x26]\n"
- "ld1 { v1.b }[4], [x25]\n"
- "ld1 { v2.b }[4], [x24]\n"
- "ld1 { v12.b }[4], [x23]\n"
- "ld1 { v16.b }[4], [x22]\n"
- "ld1 { v23.b }[4], [x21]\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[4], [x27]\n"
+ "ld1 { v31.b }[4], [x26]\n"
+ "ld1 { v20.b }[4], [x25]\n"
+ "ld1 { v28.b }[4], [x24]\n"
+ "ld1 { v6.b }[4], [x23]\n"
+ "ld1 { v9.b }[4], [x22]\n"
+ "ld1 { v0.b }[4], [x21]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
- "tbz x7, #1, 10f\n"
- "ld1 { v25.h }[0], [x27], #0x2\n"
- "ld1 { v27.h }[0], [x26], #0x2\n"
- "ld1 { v1.h }[0], [x25], #0x2\n"
- "ld1 { v2.h }[0], [x24], #0x2\n"
- "ld1 { v12.h }[0], [x23], #0x2\n"
- "ld1 { v16.h }[0], [x22], #0x2\n"
- "ld1 { v23.h }[0], [x21], #0x2\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[2], [x27]\n"
- "ld1 { v27.b }[2], [x26]\n"
- "ld1 { v1.b }[2], [x25]\n"
- "ld1 { v2.b }[2], [x24]\n"
- "ld1 { v12.b }[2], [x23]\n"
- "ld1 { v16.b }[2], [x22]\n"
- "ld1 { v23.b }[2], [x21]\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "tbz x2, #1, 10f\n"
+ "ld1 { v26.h }[0], [x27], #0x2\n"
+ "ld1 { v31.h }[0], [x26], #0x2\n"
+ "ld1 { v20.h }[0], [x25], #0x2\n"
+ "ld1 { v28.h }[0], [x24], #0x2\n"
+ "ld1 { v6.h }[0], [x23], #0x2\n"
+ "ld1 { v9.h }[0], [x22], #0x2\n"
+ "ld1 { v0.h }[0], [x21], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[2], [x27]\n"
+ "ld1 { v31.b }[2], [x26]\n"
+ "ld1 { v20.b }[2], [x25]\n"
+ "ld1 { v28.b }[2], [x24]\n"
+ "ld1 { v6.b }[2], [x23]\n"
+ "ld1 { v9.b }[2], [x22]\n"
+ "ld1 { v0.b }[2], [x21]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 11f\n"
- "ld1 { v25.b }[0], [x27]\n"
- "ld1 { v27.b }[0], [x26]\n"
- "ld1 { v1.b }[0], [x25]\n"
- "ld1 { v2.b }[0], [x24]\n"
- "ld1 { v12.b }[0], [x23]\n"
- "ld1 { v16.b }[0], [x22]\n"
- "ld1 { v23.b }[0], [x21]\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "tbz x2, #0, 11f\n"
+ "ld1 { v26.b }[0], [x27]\n"
+ "ld1 { v31.b }[0], [x26]\n"
+ "ld1 { v20.b }[0], [x25]\n"
+ "ld1 { v28.b }[0], [x24]\n"
+ "ld1 { v6.b }[0], [x23]\n"
+ "ld1 { v9.b }[0], [x22]\n"
+ "ld1 { v0.b }[0], [x21]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v25.8h, v25.8b, v6.8b\n"
- "smlal v5.4s, v25.4h, v4.4h\n"
- "smlal2 v3.4s, v25.8h, v4.8h\n"
- "ldr x20, [x15, #0x40]\n"
- "usubl v27.8h, v27.8b, v6.8b\n"
- "smlal v5.4s, v27.4h, v11.4h\n"
- "smlal2 v3.4s, v27.8h, v11.8h\n"
- "usubl v1.8h, v1.8b, v6.8b\n"
- "smlal v21.4s, v25.4h, v26.4h\n"
- "smlal2 v8.4s, v25.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "smlal v5.4s, v1.4h, v22.4h\n"
- "smlal2 v3.4s, v1.8h, v22.8h\n"
- "usubl v2.8h, v2.8b, v6.8b\n"
- "usubl v16.8h, v16.8b, v6.8b\n"
- "smlal v21.4s, v2.4h, v22.4h\n"
- "smlal2 v8.4s, v2.8h, v22.8h\n"
- "smlal v5.4s, v16.4h, v28.4h\n"
- "smlal2 v3.4s, v16.8h, v28.8h\n"
- "usubl v12.8h, v12.8b, v6.8b\n"
- "usubl v23.8h, v23.8b, v6.8b\n"
- "smlal v21.4s, v12.4h, v14.4h\n"
- "smlal2 v8.4s, v12.8h, v14.8h\n"
- "smlal v5.4s, v23.4h, v18.4h\n"
- "smlal2 v3.4s, v23.8h, v18.8h\n"
- "usubl v10.8h, v10.8b, v6.8b\n"
- "smlal v20.4s, v25.4h, v14.4h\n"
- "smlal2 v0.4s, v25.8h, v14.8h\n"
- "smlal v19.4s, v25.4h, v11.4h\n"
- "smlal2 v31.4s, v25.8h, v11.8h\n"
- "smlal v5.4s, v10.4h, v14.4h\n"
- "smlal2 v3.4s, v10.8h, v14.8h\n"
- "smlal v21.4s, v10.4h, v11.4h\n"
- "smlal2 v8.4s, v10.8h, v11.8h\n"
- "tbz x7, #2, 13f\n"
- "ld1 { v15.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 12f\n"
- "ld1 { v15.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[6], [x20]\n"
+ "usubl v26.8h, v26.8b, v13.8b\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x20, [x5, #0x40]\n"
+ "usubl v20.8h, v20.8b, v13.8b\n"
+ "usubl v28.8h, v28.8b, v13.8b\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "usubl v6.8h, v6.8b, v13.8b\n"
+ "smlal v19.4s, v26.4h, v7.4h\n"
+ "smlal2 v1.4s, v26.8h, v7.8h\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
+ "add x20, x20, x3\n"
+ "smlal2 v4.4s, v26.8h, v22.8h\n"
+ "usubl v18.8h, v18.8b, v13.8b\n"
+ "smlal v10.4s, v26.4h, v29.4h\n"
+ "smlal2 v21.4s, v26.8h, v29.8h\n"
+ "smlal v3.4s, v26.4h, v16.4h\n"
+ "smlal v19.4s, v31.4h, v16.4h\n"
+ "smlal2 v24.4s, v26.8h, v16.8h\n"
+ "smlal2 v1.4s, v31.8h, v16.8h\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v4.4s, v28.8h, v11.8h\n"
+ "smlal v19.4s, v20.4h, v11.4h\n"
+ "smlal2 v1.4s, v20.8h, v11.8h\n"
+ "smlal v8.4s, v6.4h, v29.4h\n"
+ "smlal v19.4s, v9.4h, v15.4h\n"
+ "smlal2 v4.4s, v6.8h, v29.8h\n"
+ "smlal2 v1.4s, v9.8h, v15.8h\n"
+ "smlal v8.4s, v18.4h, v16.4h\n"
+ "smlal v19.4s, v0.4h, v27.4h\n"
+ "smlal2 v4.4s, v18.8h, v16.8h\n"
+ "smlal2 v1.4s, v0.8h, v27.8h\n"
+ "smlal v19.4s, v18.4h, v29.4h\n"
+ "smlal2 v1.4s, v18.8h, v29.8h\n"
+ "tbz x2, #2, 13f\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 12f\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[4], [x20]\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
- "tbz x7, #1, 14f\n"
- "ld1 { v15.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[2], [x20]\n"
+ "tbz x2, #1, 14f\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 15f\n"
- "ld1 { v15.b }[0], [x20]\n"
+ "tbz x2, #0, 15f\n"
+ "ld1 { v30.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v15.8h, v15.8b, v6.8b\n"
- "ldr x20, [x15, #0x48]\n"
- "smlal v21.4s, v15.4h, v18.4h\n"
- "smlal2 v8.4s, v15.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 17f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 16f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v30.8h, v30.8b, v13.8b\n"
+ "ldr x20, [x5, #0x48]\n"
+ "smlal v8.4s, v30.4h, v27.4h\n"
+ "smlal2 v4.4s, v30.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 17f\n"
+ "ld1 { v9.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 16f\n"
+ "ld1 { v9.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
- "tbz x7, #1, 18f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 18f\n"
+ "ld1 { v9.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 19f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 19f\n"
+ "ld1 { v9.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x50]\n"
- "smlal v21.4s, v16.4h, v9.4h\n"
- "smlal2 v8.4s, v16.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 21f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 20f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v9.8h, v9.8b, v13.8b\n"
+ "ldr x20, [x5, #0x50]\n"
+ "smlal v8.4s, v9.4h, v2.4h\n"
+ "smlal2 v4.4s, v9.8h, v2.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 21f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 20f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (1, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (1, 2): Bit 2: Unset
- "tbz x7, #1, 22f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 22f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (1, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 23f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 23f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"23:" // Oddments: Load (1, 2): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x58]\n"
- "smlal v5.4s, v16.4h, v9.4h\n"
- "smlal2 v3.4s, v16.8h, v9.8h\n"
- "smlal v21.4s, v16.4h, v28.4h\n"
- "smlal2 v8.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 25f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 24f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x58]\n"
+ "smlal v19.4s, v17.4h, v2.4h\n"
+ "smlal2 v1.4s, v17.8h, v2.8h\n"
+ "smlal v8.4s, v17.4h, v15.4h\n"
+ "smlal2 v4.4s, v17.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 25f\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 24f\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (3, 0): Bit 2: Unset
- "tbz x7, #1, 26f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 26f\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 27f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 27f\n"
+ "ld1 { v31.b }[0], [x20]\n"
"27:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x60]\n"
- "smlal v20.4s, v16.4h, v28.4h\n"
- "smlal2 v0.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 29f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 28f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v31.8h, v31.8b, v13.8b\n"
+ "ldr x20, [x5, #0x60]\n"
+ "smlal v10.4s, v31.4h, v15.4h\n"
+ "smlal2 v21.4s, v31.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 29f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 28f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 0): Bit 2: Unset
- "tbz x7, #1, 30f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 30f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 31f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 31f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 0): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x68]\n"
- "smlal v5.4s, v16.4h, v26.4h\n"
- "smlal2 v3.4s, v16.8h, v26.8h\n"
- "smlal v20.4s, v16.4h, v11.4h\n"
- "smlal2 v0.4s, v16.8h, v11.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 33f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 32f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x68]\n"
+ "smlal v19.4s, v17.4h, v22.4h\n"
+ "smlal2 v1.4s, v17.8h, v22.8h\n"
+ "smlal v10.4s, v17.4h, v16.4h\n"
+ "smlal2 v21.4s, v17.8h, v16.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 33f\n"
+ "ld1 { v30.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 32f\n"
+ "ld1 { v30.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (3, 1): Bit 2: Unset
- "tbz x7, #1, 34f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 34f\n"
+ "ld1 { v30.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 35f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 35f\n"
+ "ld1 { v30.b }[0], [x20]\n"
"35:" // Oddments: Load (3, 1): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x70]\n"
- "smlal v20.4s, v16.4h, v18.4h\n"
- "smlal2 v0.4s, v16.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 37f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 36f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v30.8h, v30.8b, v13.8b\n"
+ "ldr x20, [x5, #0x70]\n"
+ "smlal v10.4s, v30.4h, v27.4h\n"
+ "smlal2 v21.4s, v30.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 37f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 36f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 1): Bit 2: Unset
- "tbz x7, #1, 38f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 38f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 39f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 39f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 1): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x78]\n"
- "smlal v5.4s, v16.4h, v7.4h\n"
- "smlal2 v3.4s, v16.8h, v7.8h\n"
- "smlal v20.4s, v16.4h, v22.4h\n"
- "smlal2 v0.4s, v16.8h, v22.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 41f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x78]\n"
+ "smlal v19.4s, v17.4h, v5.4h\n"
+ "smlal2 v1.4s, v17.8h, v5.8h\n"
+ "smlal v10.4s, v17.4h, v11.4h\n"
+ "smlal2 v21.4s, v17.8h, v11.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 41f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 40f\n"
+ "tbz x2, #1, 40f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (3, 3): Bit 2: Unset
- "tbz x7, #1, 42f\n"
+ "tbz x2, #1, 42f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 43f\n"
+ "tbz x2, #0, 43f\n"
"ld1 { v16.b }[0], [x20]\n"
"43:" // Oddments: Load (3, 3): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x80]\n"
- "smlal v19.4s, v16.4h, v18.4h\n"
- "smlal2 v31.4s, v16.8h, v18.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 45f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 44f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0x80]\n"
+ "smlal v3.4s, v16.4h, v27.4h\n"
+ "smlal2 v24.4s, v16.8h, v27.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 45f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 44f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (2, 3): Bit 2: Unset
- "tbz x7, #1, 46f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 46f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 47f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 47f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"47:" // Oddments: Load (2, 3): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x88]\n"
- "smlal v21.4s, v16.4h, v7.4h\n"
- "smlal2 v8.4s, v16.8h, v7.8h\n"
- "smlal v19.4s, v16.4h, v22.4h\n"
- "smlal2 v31.4s, v16.8h, v22.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 49f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x88]\n"
+ "smlal v8.4s, v17.4h, v5.4h\n"
+ "smlal2 v4.4s, v17.8h, v5.8h\n"
+ "smlal v3.4s, v17.4h, v11.4h\n"
+ "smlal2 v24.4s, v17.8h, v11.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 49f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 48f\n"
+ "tbz x2, #1, 48f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 4): Bit 2: Unset
- "tbz x7, #1, 50f\n"
+ "tbz x2, #1, 50f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 51f\n"
+ "tbz x2, #0, 51f\n"
"ld1 { v16.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x90]\n"
- "smlal v19.4s, v16.4h, v9.4h\n"
- "smlal2 v31.4s, v16.8h, v9.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 53f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 52f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0x90]\n"
+ "smlal v3.4s, v16.4h, v2.4h\n"
+ "smlal2 v24.4s, v16.8h, v2.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 53f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 52f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (4, 0): Bit 2: Unset
- "tbz x7, #1, 54f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 54f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 55f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 55f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"55:" // Oddments: Load (4, 0): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0x98]\n"
- "smlal v20.4s, v16.4h, v26.4h\n"
- "smlal2 v0.4s, v16.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 57f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0x98]\n"
+ "smlal v10.4s, v17.4h, v22.4h\n"
+ "smlal2 v21.4s, v17.8h, v22.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 57f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 56f\n"
+ "tbz x2, #1, 56f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (2, 4): Bit 2: Unset
- "tbz x7, #1, 58f\n"
+ "tbz x2, #1, 58f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 59f\n"
+ "tbz x2, #0, 59f\n"
"ld1 { v16.b }[0], [x20]\n"
"59:" // Oddments: Load (2, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xa0]\n"
- "smlal v21.4s, v16.4h, v4.4h\n"
- "smlal2 v8.4s, v16.8h, v4.8h\n"
- "smlal v19.4s, v16.4h, v14.4h\n"
- "smlal2 v31.4s, v16.8h, v14.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 61f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 60f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xa0]\n"
+ "smlal v8.4s, v16.4h, v7.4h\n"
+ "smlal2 v4.4s, v16.8h, v7.8h\n"
+ "smlal v3.4s, v16.4h, v29.4h\n"
+ "smlal2 v24.4s, v16.8h, v29.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 61f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 60f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (4, 1): Bit 2: Unset
- "tbz x7, #1, 62f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 62f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 63f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 63f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"63:" // Oddments: Load (4, 1): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xa8]\n"
- "smlal v20.4s, v16.4h, v7.4h\n"
- "smlal2 v0.4s, v16.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 65f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0xa8]\n"
+ "smlal v10.4s, v17.4h, v5.4h\n"
+ "smlal2 v21.4s, v17.8h, v5.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 65f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 64f\n"
+ "tbz x2, #1, 64f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 2): Bit 2: Unset
- "tbz x7, #1, 66f\n"
+ "tbz x2, #1, 66f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 67f\n"
+ "tbz x2, #0, 67f\n"
"ld1 { v16.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 2): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xb0]\n"
- "smlal v20.4s, v16.4h, v9.4h\n"
- "smlal2 v0.4s, v16.8h, v9.8h\n"
- "smlal v19.4s, v16.4h, v28.4h\n"
- "smlal2 v31.4s, v16.8h, v28.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 69f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 68f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xb0]\n"
+ "smlal v10.4s, v16.4h, v2.4h\n"
+ "smlal2 v21.4s, v16.8h, v2.8h\n"
+ "smlal v3.4s, v16.4h, v15.4h\n"
+ "smlal2 v24.4s, v16.8h, v15.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 69f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 68f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 3): Bit 2: Unset
- "tbz x7, #1, 70f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 70f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 71f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 71f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 3): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xb8]\n"
- "smlal v19.4s, v16.4h, v7.4h\n"
- "smlal2 v31.4s, v16.8h, v7.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 73f\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "ldr x20, [x5, #0xb8]\n"
+ "smlal v3.4s, v17.4h, v5.4h\n"
+ "smlal2 v24.4s, v17.8h, v5.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 73f\n"
"ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 72f\n"
+ "tbz x2, #1, 72f\n"
"ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 2): Bit 2: Unset
- "tbz x7, #1, 74f\n"
+ "tbz x2, #1, 74f\n"
"ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 75f\n"
+ "tbz x2, #0, 75f\n"
"ld1 { v16.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 2): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "ldr x20, [x15, #0xc0]\n"
- "smlal v20.4s, v16.4h, v4.4h\n"
- "smlal2 v0.4s, v16.8h, v4.8h\n"
- "smlal v19.4s, v16.4h, v26.4h\n"
- "smlal2 v31.4s, v16.8h, v26.8h\n"
- "add x20, x20, x17\n"
- "tbz x7, #2, 77f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
- "tbz x7, #1, 76f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "usubl v16.8h, v16.8b, v13.8b\n"
+ "ldr x20, [x5, #0xc0]\n"
+ "smlal v10.4s, v16.4h, v7.4h\n"
+ "smlal2 v21.4s, v16.8h, v7.8h\n"
+ "smlal v3.4s, v16.4h, v22.4h\n"
+ "smlal2 v24.4s, v16.8h, v22.8h\n"
+ "add x20, x20, x3\n"
+ "tbz x2, #2, 77f\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
+ "tbz x2, #1, 76f\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 4): Bit 2: Unset
- "tbz x7, #1, 78f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "tbz x2, #1, 78f\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 79f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "tbz x2, #0, 79f\n"
+ "ld1 { v17.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 4): Bit 2: End
- "usubl v16.8h, v16.8b, v6.8b\n"
- "smlal v19.4s, v16.4h, v4.4h\n"
- "smlal2 v31.4s, v16.8h, v4.8h\n"
- "tbz x7, #2, 81f\n"
- "ld1 { v14.4s }, [x13], #0x10\n"
- "ld1 { v25.4s }, [x12], #0x10\n"
- "tbz x7, #1, 80f\n"
- "ld1 { v18.d }[0], [x13], #0x8\n"
- "ld1 { v12.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x12]\n"
+ "usubl v17.8h, v17.8b, v13.8b\n"
+ "smlal v3.4s, v17.4h, v7.4h\n"
+ "smlal2 v24.4s, v17.8h, v7.8h\n"
+ "tbz x2, #2, 81f\n"
+ "ld1 { v16.4s }, [x7], #0x10\n"
+ "ld1 { v22.4s }, [x8], #0x10\n"
+ "tbz x2, #1, 80f\n"
+ "ld1 { v0.d }[0], [x7], #0x8\n"
+ "ld1 { v31.d }[0], [x8], #0x8\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v0.s }[2], [x7]\n"
+ "ld1 { v31.s }[2], [x8]\n"
"b 83f\n"
"80:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v18.s }[0], [x13]\n"
- "ld1 { v12.s }[0], [x12]\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v0.s }[0], [x7]\n"
+ "ld1 { v31.s }[0], [x8]\n"
"b 83f\n"
"81:" // Oddments: Load requant params: Bit 2: Unset
- "tbz x7, #1, 82f\n"
- "ld1 { v14.d }[0], [x13], #0x8\n"
- "ld1 { v25.d }[0], [x12], #0x8\n"
- "tbz x7, #0, 83f\n"
- "ld1 { v14.s }[2], [x13]\n"
- "ld1 { v25.s }[2], [x12]\n"
+ "tbz x2, #1, 82f\n"
+ "ld1 { v16.d }[0], [x7], #0x8\n"
+ "ld1 { v22.d }[0], [x8], #0x8\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v16.s }[2], [x7]\n"
+ "ld1 { v22.s }[2], [x8]\n"
"b 83f\n"
"82:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 83f\n"
- "ld1 { v14.s }[0], [x13]\n"
- "ld1 { v25.s }[0], [x12]\n"
+ "tbz x2, #0, 83f\n"
+ "ld1 { v16.s }[0], [x7]\n"
+ "ld1 { v22.s }[0], [x8]\n"
"83:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v5.4s, v5.4s, v14.4s\n"
- "and v28.16b, v5.16b, v25.16b\n"
- "add x11, x11, x16\n"
- "add x10, x10, x16\n"
- "sqrdmulh v3.4s, v3.4s, v18.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "add x9, x9, x16\n"
- "add x28, x28, x16\n"
- "and v16.16b, v3.16b, v12.16b\n"
- "sqrdmulh v21.4s, v21.4s, v14.4s\n"
- "sqrdmulh v20.4s, v20.4s, v14.4s\n"
- "sqrdmulh v19.4s, v19.4s, v14.4s\n"
- "sqadd v5.4s, v5.4s, v28.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v16.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v0.4s\n"
+ "add x16, x16, x4\n"
+ "add x15, x15, x4\n"
+ "sqrdmulh v8.4s, v8.4s, v16.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v16.4s\n"
+ "add x14, x14, x4\n"
+ "add x13, x13, x4\n"
+ "sqrdmulh v3.4s, v3.4s, v16.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v0.4s\n"
+ "and v17.16b, v19.16b, v22.16b\n"
+ "and v16.16b, v1.16b, v31.16b\n"
+ "and v15.16b, v8.16b, v22.16b\n"
+ "and v20.16b, v10.16b, v22.16b\n"
+ "sqrdmulh v21.4s, v21.4s, v0.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v0.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v14.16b, v21.16b, v25.16b\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "and v6.16b, v20.16b, v25.16b\n"
- "sqrdmulh v0.4s, v0.4s, v18.4s\n"
- "and v4.16b, v19.16b, v25.16b\n"
- "sqrdmulh v31.4s, v31.4s, v18.4s\n"
- "sqadd v3.4s, v3.4s, v16.4s\n"
- "sshr v14.4s, v14.4s, #0x1f\n"
- "and v18.16b, v8.16b, v12.16b\n"
- "sshr v6.4s, v6.4s, #0x1f\n"
- "and v7.16b, v0.16b, v12.16b\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "and v16.16b, v31.16b, v12.16b\n"
- "sqadd v21.4s, v21.4s, v14.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v20.4s, v20.4s, v6.4s\n"
- "sshr v7.4s, v7.4s, #0x1f\n"
- "sqadd v19.4s, v19.4s, v4.4s\n"
+ "sshr v15.4s, v15.4s, #0x1f\n"
+ "and v26.16b, v4.16b, v31.16b\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "and v0.16b, v21.16b, v31.16b\n"
+ "sqadd v19.4s, v19.4s, v17.4s\n"
+ "and v17.16b, v3.16b, v22.16b\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
+ "and v16.16b, v24.16b, v31.16b\n"
+ "sqadd v8.4s, v8.4s, v15.4s\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v10.4s, v10.4s, v20.4s\n"
+ "sshr v0.4s, v0.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "srshl v5.4s, v5.4s, v25.4s\n"
- "srshl v21.4s, v21.4s, v25.4s\n"
- "sqadd v8.4s, v8.4s, v18.4s\n"
- "srshl v20.4s, v20.4s, v25.4s\n"
- "sqadd v0.4s, v0.4s, v7.4s\n"
- "srshl v19.4s, v19.4s, v25.4s\n"
- "sqadd v31.4s, v31.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v12.4s\n"
- "sqxtn v5.4h, v5.4s\n"
- "srshl v8.4s, v8.4s, v12.4s\n"
- "sqxtn v21.4h, v21.4s\n"
- "srshl v0.4s, v0.4s, v12.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v31.4s, v31.4s, v12.4s\n"
+ "srshl v19.4s, v19.4s, v22.4s\n"
+ "srshl v8.4s, v8.4s, v22.4s\n"
+ "sqadd v3.4s, v3.4s, v17.4s\n"
+ "sqadd v4.4s, v4.4s, v26.4s\n"
+ "srshl v10.4s, v10.4s, v22.4s\n"
+ "sqadd v21.4s, v21.4s, v0.4s\n"
+ "sqadd v24.4s, v24.4s, v16.4s\n"
+ "srshl v1.4s, v1.4s, v31.4s\n"
+ "srshl v3.4s, v3.4s, v22.4s\n"
"sqxtn v19.4h, v19.4s\n"
- "sqxtn2 v5.8h, v3.4s\n"
- "sqxtn2 v21.8h, v8.4s\n"
- "sqxtn2 v20.8h, v0.4s\n"
- "sqxtn2 v19.8h, v31.4s\n"
- "sqadd v5.8h, v5.8h, v13.8h\n"
- "sqadd v21.8h, v21.8h, v13.8h\n"
- "sqadd v20.8h, v20.8h, v13.8h\n"
- "sqadd v19.8h, v19.8h, v13.8h\n"
- "smax v5.8h, v5.8h, v17.8h\n"
- "smax v21.8h, v21.8h, v17.8h\n"
- "smax v20.8h, v20.8h, v17.8h\n"
- "smax v19.8h, v19.8h, v17.8h\n"
- "smin v5.8h, v5.8h, v24.8h\n"
- "smin v21.8h, v21.8h, v24.8h\n"
- "smin v20.8h, v20.8h, v24.8h\n"
- "smin v19.8h, v19.8h, v24.8h\n"
- "uzp1 v5.16b, v5.16b, v5.16b\n"
- "uzp1 v21.16b, v21.16b, v21.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
+ "srshl v4.4s, v4.4s, v31.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v21.4s, v21.4s, v31.4s\n"
+ "sqxtn v10.4h, v10.4s\n"
+ "srshl v24.4s, v24.4s, v31.4s\n"
+ "sqxtn v3.4h, v3.4s\n"
+ "sqxtn2 v19.8h, v1.4s\n"
+ "sqxtn2 v8.8h, v4.4s\n"
+ "sqxtn2 v10.8h, v21.4s\n"
+ "sqxtn2 v3.8h, v24.4s\n"
+ "sqadd v19.8h, v19.8h, v25.8h\n"
+ "sqadd v8.8h, v8.8h, v25.8h\n"
+ "sqadd v10.8h, v10.8h, v25.8h\n"
+ "sqadd v3.8h, v3.8h, v25.8h\n"
+ "smax v19.8h, v19.8h, v23.8h\n"
+ "smax v8.8h, v8.8h, v23.8h\n"
+ "smax v10.8h, v10.8h, v23.8h\n"
+ "smax v3.8h, v3.8h, v23.8h\n"
+ "smin v19.8h, v19.8h, v12.8h\n"
+ "smin v8.8h, v8.8h, v12.8h\n"
+ "smin v10.8h, v10.8h, v12.8h\n"
+ "smin v3.8h, v3.8h, v12.8h\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
- "tbz x7, #2, 85f\n"
- "st1 { v5.s }[0], [x11], #0x4\n"
- "st1 { v21.s }[0], [x10], #0x4\n"
- "st1 { v20.s }[0], [x9], #0x4\n"
- "st1 { v19.s }[0], [x28], #0x4\n"
- "tbz x7, #1, 84f\n"
- "st1 { v5.h }[2], [x11], #0x2\n"
- "st1 { v21.h }[2], [x10], #0x2\n"
- "st1 { v20.h }[2], [x9], #0x2\n"
- "st1 { v19.h }[2], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[6], [x11], #0x1\n"
- "st1 { v21.b }[6], [x10], #0x1\n"
- "st1 { v20.b }[6], [x9], #0x1\n"
- "st1 { v19.b }[6], [x28], #0x1\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v10.16b, v10.16b, v10.16b\n"
+ "uzp1 v3.16b, v3.16b, v3.16b\n"
+ "tbz x2, #2, 85f\n"
+ "st1 { v19.s }[0], [x16], #0x4\n"
+ "st1 { v8.s }[0], [x15], #0x4\n"
+ "st1 { v10.s }[0], [x14], #0x4\n"
+ "st1 { v3.s }[0], [x13], #0x4\n"
+ "tbz x2, #1, 84f\n"
+ "st1 { v19.h }[2], [x16], #0x2\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v10.h }[2], [x14], #0x2\n"
+ "st1 { v3.h }[2], [x13], #0x2\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[6], [x16], #0x1\n"
+ "st1 { v8.b }[6], [x15], #0x1\n"
+ "st1 { v10.b }[6], [x14], #0x1\n"
+ "st1 { v3.b }[6], [x13], #0x1\n"
"b 87f\n"
"84:" // Oddments: Bit 2: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[4], [x11], #0x1\n"
- "st1 { v21.b }[4], [x10], #0x1\n"
- "st1 { v20.b }[4], [x9], #0x1\n"
- "st1 { v19.b }[4], [x28], #0x1\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[4], [x16], #0x1\n"
+ "st1 { v8.b }[4], [x15], #0x1\n"
+ "st1 { v10.b }[4], [x14], #0x1\n"
+ "st1 { v3.b }[4], [x13], #0x1\n"
"b 87f\n"
"85:" // Oddments: Bit 2: Unset
- "tbz x7, #1, 86f\n"
- "st1 { v5.h }[0], [x11], #0x2\n"
- "st1 { v21.h }[0], [x10], #0x2\n"
- "st1 { v20.h }[0], [x9], #0x2\n"
- "st1 { v19.h }[0], [x28], #0x2\n"
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[2], [x11], #0x1\n"
- "st1 { v21.b }[2], [x10], #0x1\n"
- "st1 { v20.b }[2], [x9], #0x1\n"
- "st1 { v19.b }[2], [x28], #0x1\n"
+ "tbz x2, #1, 86f\n"
+ "st1 { v19.h }[0], [x16], #0x2\n"
+ "st1 { v8.h }[0], [x15], #0x2\n"
+ "st1 { v10.h }[0], [x14], #0x2\n"
+ "st1 { v3.h }[0], [x13], #0x2\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[2], [x16], #0x1\n"
+ "st1 { v8.b }[2], [x15], #0x1\n"
+ "st1 { v10.b }[2], [x14], #0x1\n"
+ "st1 { v3.b }[2], [x13], #0x1\n"
"b 87f\n"
"86:" // Oddments: Bit 2: Unset: Bit 1: Unset
- "tbz x7, #0, 87f\n"
- "st1 { v5.b }[0], [x11], #0x1\n"
- "st1 { v21.b }[0], [x10], #0x1\n"
- "st1 { v20.b }[0], [x9], #0x1\n"
- "st1 { v19.b }[0], [x28], #0x1\n"
+ "tbz x2, #0, 87f\n"
+ "st1 { v19.b }[0], [x16], #0x1\n"
+ "st1 { v8.b }[0], [x15], #0x1\n"
+ "st1 { v10.b }[0], [x14], #0x1\n"
+ "st1 { v3.b }[0], [x13], #0x1\n"
"87:" // Oddments: Bit 2: End
"88:" // End
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index df955206e2..71622239b4 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[36];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -113,1743 +113,1743 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
__asm__ __volatile__(
"ldr x1, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "lsr x2, x1, #0x3\n"
- "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
- "ld1r { v18.16b }, [x20]\n"
+ "mov x2, #0x0\n"
+ "mov x3, #0x0\n"
"ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "add x4, %x[params], %[offsetof_Params_inptrs]\n"
+ "ldr x5, [%x[params], %[offsetof_Params_weights]]\n"
+ "ldr x6, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "lsr x14, x1, #0x3\n"
+ "add x20, x23, %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v15.16b }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_b_offset]\n"
"add x20, x23, %[offsetof_Requantize32_c_offset]\n"
- "ld1r { v13.16b }, [x21]\n"
- "ld1r { v26.8h }, [x20]\n"
+ "ld1r { v9.16b }, [x21]\n"
+ "ld1r { v13.8h }, [x20]\n"
"add x21, x23, %[offsetof_Requantize32_minval]\n"
"add x20, x23, %[offsetof_Requantize32_maxval]\n"
- "ld1r { v11.8h }, [x21]\n"
- "ld1r { v0.8h }, [x20]\n"
- "mov x3, #0x0\n"
- "mov x4, #0x0\n"
- "add x5, %x[params], %[offsetof_Params_inptrs]\n"
- "ldr x6, [%x[params], %[offsetof_Params_weights]]\n"
- "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "ldp x17, x16, [x22, #0x0]\n"
- "ldp x15, x14, [x22, #0x10]\n"
- "cbz x2, 3f\n"
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "subs x2, x2, #0x1\n"
- "ssubl v6.8h, v6.8b, v13.8b\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ssubl v10.8h, v10.8b, v13.8b\n"
- "ldr d12, [x6, #0x20]\n"
+ "ld1r { v10.8h }, [x21]\n"
+ "ld1r { v14.8h }, [x20]\n"
+ "ldp x8, x17, [x22, #0x0]\n"
+ "ldp x16, x15, [x22, #0x10]\n"
+ "cbz x14, 3f\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "subs x14, x14, #0x1\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "ldr d23, [x5, #0x20]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "ldr q7, [x20, #0x0]\n"
- "ldr q15, [x20, #0x10]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "ldr q8, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
"add x20, x20, #0x20\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "ldr d31, [x9, x3]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldr d17, [x28, x3]\n"
- "ldr d30, [x27, x3]\n"
- "usubl v31.8h, v31.8b, v18.8b\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "ldr d16, [x26, x3]\n"
- "ldr d3, [x25, x3]\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "ldr d4, [x24, x3]\n"
- "ldr d25, [x23, x3]\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "ldr d9, [x22, x3]\n"
- "ldr d29, [x21, x3]\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "ldr d28, [x20, x3]\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d24, [x9, x2]\n"
+ "ldr d21, [x28, x2]\n"
+ "ldr d16, [x27, x2]\n"
+ "ldr d20, [x26, x2]\n"
+ "ldr d7, [x25, x2]\n"
+ "ldr d19, [x24, x2]\n"
+ "ldr d28, [x23, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "ldr d26, [x22, x2]\n"
+ "ldr d29, [x21, x2]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr d18, [x20, x2]\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
"beq 2f\n"
"1:" // Loop
- "ldr d2, [x6, #0x28]\n"
- "ldr d27, [x6, #0x30]\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "ldr d1, [x6, #0x38]\n"
- "ldr d31, [x6, #0x40]\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "ldr d8, [x6, #0x48]\n"
- "ldr x22, [x5, #0x50]\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "ldr x20, [x5, #0x58]\n"
- "ldr x21, [x5, #0x60]\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "ldr x22, [x5, #0x70]\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "ldr d14, [x20, x3]\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal v23.4s, v17.4h, v10.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr x20, [x5, #0x78]\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "ldr x21, [x5, #0x80]\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "ldr d25, [x22, x3]\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v10.8h\n"
- "ldr d10, [x20, x3]\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal v24.4s, v17.4h, v21.4h\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x24, [x5, #0x88]\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "smlal v7.4s, v30.4h, v2.4h\n"
- "ldr x20, [x5, #0x90]\n"
- "ldr x23, [x5, #0x98]\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "ldr d9, [x21, x3]\n"
- "smlal2 v22.4s, v17.8h, v21.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "ldr d21, [x6, #0x50]\n"
- "smlal v20.4s, v3.4h, v12.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "ldr x22, [x5, #0xa0]\n"
- "ldr x21, [x5, #0xa8]\n"
- "smlal2 v15.4s, v30.8h, v2.8h\n"
- "ldr d30, [x24, x3]\n"
- "smlal v7.4s, v16.4h, v27.4h\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v12.8h\n"
- "ldr d3, [x6, #0x58]\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "ldr d12, [x20, x3]\n"
- "smlal v20.4s, v16.4h, v2.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal v23.4s, v14.4h, v2.4h\n"
- "ldr x20, [x5, #0xb0]\n"
- "ldr x13, [x5, #0xb8]\n"
- "smlal2 v15.4s, v16.8h, v27.8h\n"
- "smlal v7.4s, v4.4h, v1.4h\n"
- "ldr x12, [x5, #0xc0]\n"
- "ldr x11, [x5, #0xc8]\n"
- "smlal2 v5.4s, v16.8h, v2.8h\n"
- "ldr d16, [x23, x3]\n"
- "smlal2 v22.4s, v28.8h, v2.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v2.8h\n"
- "ldr d2, [x6, #0x60]\n"
- "smlal v20.4s, v4.4h, v27.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v27.4h\n"
- "smlal v23.4s, v25.4h, v27.4h\n"
- "ldr x10, [x5, #0xd0]\n"
- "ldr x9, [x5, #0xd8]\n"
- "smlal2 v15.4s, v4.8h, v1.8h\n"
- "smlal v7.4s, v17.4h, v31.4h\n"
- "ldr x28, [x5, #0xe0]\n"
- "ldr x27, [x5, #0xe8]\n"
- "smlal2 v5.4s, v4.8h, v27.8h\n"
- "ldr d4, [x22, x3]\n"
- "smlal2 v22.4s, v14.8h, v27.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v27.8h\n"
- "ldr d27, [x6, #0x68]\n"
- "smlal v20.4s, v17.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v1.4h\n"
- "smlal v23.4s, v10.4h, v1.4h\n"
- "ldr x26, [x5, #0xf0]\n"
- "ldr x25, [x5, #0xf8]\n"
- "smlal2 v15.4s, v17.8h, v31.8h\n"
- "smlal v7.4s, v6.4h, v8.4h\n"
- "ldr x24, [x5, #0x100]\n"
- "ldr x23, [x5, #0x108]\n"
- "smlal2 v5.4s, v17.8h, v1.8h\n"
- "ldr d17, [x21, x3]\n"
- "smlal2 v22.4s, v25.8h, v1.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v1.8h\n"
- "ldr d1, [x6, #0x70]\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v31.4h\n"
- "smlal v23.4s, v9.4h, v31.4h\n"
- "ldr x22, [x5, #0x110]\n"
- "ldr x21, [x5, #0x118]\n"
- "smlal2 v15.4s, v6.8h, v8.8h\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
- "subs x2, x2, #0x1\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal2 v22.4s, v10.8h, v31.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v31.8h\n"
- "ldr d31, [x6, #0x78]\n"
- "smlal v20.4s, v29.4h, v8.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v9.4h, v8.4h\n"
- "smlal v23.4s, v30.4h, v8.4h\n"
+ "ldr d3, [x5, #0x28]\n"
+ "ldr d2, [x5, #0x30]\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "ldr d4, [x5, #0x38]\n"
+ "ldr d22, [x5, #0x40]\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "ldr d24, [x5, #0x48]\n"
+ "ldr x23, [x4, #0x50]\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x50]\n"
+ "ldr x22, [x4, #0x58]\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "ldr d21, [x5, #0x58]\n"
+ "ldr x21, [x4, #0x60]\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "ldr x20, [x4, #0x68]\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "ldr x28, [x4, #0x70]\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "ldr d12, [x23, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x22, x2]\n"
+ "ldr x27, [x4, #0x78]\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "ldr x26, [x4, #0x80]\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "ldr x25, [x4, #0x88]\n"
+ "ldr x24, [x4, #0x90]\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
+ "ldr x23, [x4, #0x98]\n"
+ "ldr x22, [x4, #0xa0]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x21, x2]\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v17.4h\n"
+ "smlal2 v30.4s, v12.8h, v17.8h\n"
+ "ldr d17, [x20, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal v1.4s, v12.4h, v11.4h\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "ldr x21, [x4, #0xa8]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal2 v25.4s, v12.8h, v11.8h\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x28, x2]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v7.4h, v11.4h\n"
+ "smlal2 v30.4s, v7.8h, v11.8h\n"
+ "ldr d11, [x27, x2]\n"
+ "ldr x13, [x4, #0xb8]\n"
+ "smlal v27.4s, v28.4h, v23.4h\n"
+ "smlal v1.4s, v7.4h, v23.4h\n"
+ "ldr x12, [x4, #0xc0]\n"
+ "ldr x11, [x4, #0xc8]\n"
+ "smlal2 v6.4s, v28.8h, v23.8h\n"
+ "ldr d28, [x26, x2]\n"
+ "smlal2 v25.4s, v7.8h, v23.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v16.4h, v3.4h\n"
+ "smlal2 v0.4s, v16.8h, v3.8h\n"
+ "ldr d16, [x25, x2]\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "ldr d23, [x24, x2]\n"
+ "ldr x10, [x4, #0xd0]\n"
+ "smlal v27.4s, v20.4h, v3.4h\n"
+ "smlal v1.4s, v18.4h, v3.4h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x9, [x4, #0xd8]\n"
+ "smlal2 v6.4s, v20.8h, v3.8h\n"
+ "smlal2 v25.4s, v18.8h, v3.8h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x28, [x4, #0xe0]\n"
+ "smlal v8.4s, v20.4h, v2.4h\n"
+ "smlal2 v0.4s, v20.8h, v2.8h\n"
+ "ldr d20, [x23, x2]\n"
+ "usubl v23.8h, v23.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v3.4h\n"
+ "smlal2 v30.4s, v17.8h, v3.8h\n"
+ "ldr d3, [x5, #0x60]\n"
+ "ldr x27, [x4, #0xe8]\n"
+ "smlal v27.4s, v19.4h, v2.4h\n"
+ "smlal v1.4s, v17.4h, v2.4h\n"
+ "ldr x26, [x4, #0xf0]\n"
+ "ldr x25, [x4, #0xf8]\n"
+ "smlal2 v6.4s, v19.8h, v2.8h\n"
+ "smlal2 v25.4s, v17.8h, v2.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x24, [x4, #0x100]\n"
+ "smlal v8.4s, v19.4h, v4.4h\n"
+ "smlal2 v0.4s, v19.8h, v4.8h\n"
+ "ldr d19, [x22, x2]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v2.4h\n"
+ "smlal2 v30.4s, v26.8h, v2.8h\n"
+ "ldr d2, [x5, #0x68]\n"
+ "ldr x23, [x4, #0x108]\n"
+ "smlal v27.4s, v12.4h, v4.4h\n"
+ "smlal v1.4s, v26.4h, v4.4h\n"
+ "ldr x22, [x4, #0x110]\n"
+ "subs x14, x14, #0x1\n"
+ "smlal2 v6.4s, v12.8h, v4.8h\n"
+ "smlal2 v25.4s, v26.8h, v4.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v22.4h\n"
+ "smlal2 v0.4s, v12.8h, v22.8h\n"
+ "ldr d12, [x21, x2]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v4.4h\n"
+ "smlal2 v30.4s, v11.8h, v4.8h\n"
+ "ldr d4, [x5, #0x70]\n"
+ "ldr x21, [x4, #0x118]\n"
+ "smlal v27.4s, v7.4h, v22.4h\n"
+ "smlal v1.4s, v11.4h, v22.4h\n"
+ "smlal2 v6.4s, v7.8h, v22.8h\n"
+ "smlal2 v25.4s, v11.8h, v22.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v24.4h\n"
+ "smlal2 v0.4s, v7.8h, v24.8h\n"
+ "ldr d7, [x20, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v22.4h\n"
+ "smlal2 v30.4s, v28.8h, v22.8h\n"
+ "ldr d22, [x5, #0x78]\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "ldr d28, [x13, x3]\n"
- "smlal v7.4s, v14.4h, v3.4h\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v5.4s, v29.8h, v8.8h\n"
- "ldr d29, [x6, #0x80]\n"
- "smlal2 v22.4s, v9.8h, v8.8h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal2 v19.4s, v30.8h, v8.8h\n"
- "ldr d8, [x12, x3]\n"
- "smlal v20.4s, v14.4h, v21.4h\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "smlal v24.4s, v12.4h, v21.4h\n"
- "smlal v23.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v14.8h, v3.8h\n"
- "smlal v7.4s, v25.4h, v2.4h\n"
- "smlal2 v5.4s, v14.8h, v21.8h\n"
- "ldr d14, [x11, x3]\n"
- "smlal2 v22.4s, v12.8h, v21.8h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v21.8h\n"
- "ldr d21, [x6, #0x88]\n"
- "smlal v20.4s, v25.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v16.4h, v3.4h\n"
- "smlal v23.4s, v4.4h, v3.4h\n"
- "smlal2 v15.4s, v25.8h, v2.8h\n"
- "smlal v7.4s, v10.4h, v27.4h\n"
- "smlal2 v5.4s, v25.8h, v3.8h\n"
- "ldr d25, [x10, x3]\n"
- "smlal2 v22.4s, v16.8h, v3.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v3.8h\n"
- "ldr d3, [x6, #0x90]\n"
- "smlal v20.4s, v10.4h, v2.4h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal v24.4s, v4.4h, v2.4h\n"
- "smlal v23.4s, v17.4h, v2.4h\n"
- "smlal2 v15.4s, v10.8h, v27.8h\n"
- "smlal v7.4s, v9.4h, v1.4h\n"
- "smlal2 v5.4s, v10.8h, v2.8h\n"
- "ldr d10, [x9, x3]\n"
- "smlal2 v22.4s, v4.8h, v2.8h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v2.8h\n"
- "ldr d2, [x6, #0x98]\n"
- "smlal v20.4s, v9.4h, v27.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v17.4h, v27.4h\n"
- "smlal v23.4s, v6.4h, v27.4h\n"
- "smlal2 v15.4s, v9.8h, v1.8h\n"
- "smlal v7.4s, v12.4h, v31.4h\n"
- "smlal2 v5.4s, v9.8h, v27.8h\n"
- "ldr d9, [x28, x3]\n"
- "smlal2 v22.4s, v17.8h, v27.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v27.8h\n"
- "ldr d27, [x6, #0xa0]\n"
- "smlal v20.4s, v30.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v1.4h\n"
- "smlal v23.4s, v28.4h, v1.4h\n"
- "smlal2 v15.4s, v12.8h, v31.8h\n"
- "ldr d12, [x27, x3]\n"
- "smlal v7.4s, v16.4h, v29.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal2 v5.4s, v30.8h, v1.8h\n"
- "ldr d30, [x6, #0xa8]\n"
- "smlal2 v22.4s, v6.8h, v1.8h\n"
- "ssubl v30.8h, v30.8b, v13.8b\n"
- "smlal2 v19.4s, v28.8h, v1.8h\n"
- "ldr d1, [x26, x3]\n"
- "smlal v20.4s, v16.4h, v31.4h\n"
- "usubl v1.8h, v1.8b, v18.8b\n"
- "smlal v24.4s, v8.4h, v31.4h\n"
- "smlal v23.4s, v14.4h, v31.4h\n"
- "smlal2 v15.4s, v16.8h, v29.8h\n"
- "smlal v7.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v16.8h, v31.8h\n"
- "ldr d16, [x25, x3]\n"
- "smlal2 v22.4s, v8.8h, v31.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v31.8h\n"
- "ldr d31, [x6, #0xb0]\n"
- "smlal v20.4s, v4.4h, v29.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v29.4h\n"
- "smlal v23.4s, v25.4h, v29.4h\n"
- "smlal2 v15.4s, v4.8h, v21.8h\n"
- "smlal v7.4s, v17.4h, v3.4h\n"
- "smlal2 v5.4s, v4.8h, v29.8h\n"
- "ldr d4, [x24, x3]\n"
- "smlal2 v22.4s, v14.8h, v29.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v29.8h\n"
- "ldr d29, [x6, #0xb8]\n"
- "smlal v20.4s, v17.4h, v21.4h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v15.4s, v17.8h, v3.8h\n"
- "smlal v7.4s, v6.4h, v2.4h\n"
- "smlal2 v5.4s, v17.8h, v21.8h\n"
- "ldr d17, [x23, x3]\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "ldr d21, [x6, #0xc0]\n"
- "smlal v20.4s, v6.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v3.4h\n"
- "smlal v23.4s, v9.4h, v3.4h\n"
- "add x6, x6, #0xc8\n"
- "smlal2 v15.4s, v6.8h, v2.8h\n"
- "smlal v7.4s, v8.4h, v27.4h\n"
- "smlal2 v5.4s, v6.8h, v3.8h\n"
- "ldr d6, [x22, x3]\n"
- "smlal2 v22.4s, v10.8h, v3.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v3.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal v20.4s, v28.4h, v2.4h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal v24.4s, v9.4h, v2.4h\n"
- "smlal v23.4s, v12.4h, v2.4h\n"
- "add x3, x3, #0x8\n"
- "smlal2 v15.4s, v8.8h, v27.8h\n"
- "ldr q8, [x7, #0x0]\n"
- "smlal v7.4s, v14.4h, v30.4h\n"
- "smlal2 v5.4s, v28.8h, v2.8h\n"
- "ldr q28, [x8, #0x0]\n"
- "smlal2 v22.4s, v9.8h, v2.8h\n"
- "smlal2 v19.4s, v12.8h, v2.8h\n"
- "ldr q2, [x7, #0x10]\n"
- "smlal v20.4s, v14.4h, v27.4h\n"
+ "smlal v27.4s, v29.4h, v24.4h\n"
+ "smlal v1.4s, v28.4h, v24.4h\n"
+ "smlal2 v6.4s, v29.8h, v24.8h\n"
+ "ldr d29, [x13, x2]\n"
+ "smlal2 v25.4s, v28.8h, v24.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v18.4h, v31.4h\n"
+ "smlal2 v0.4s, v18.8h, v31.8h\n"
+ "ldr d18, [x5, #0x80]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v24.4h\n"
+ "smlal2 v30.4s, v16.8h, v24.8h\n"
+ "ldr d24, [x12, x2]\n"
+ "smlal v27.4s, v17.4h, v31.4h\n"
+ "smlal v1.4s, v23.4h, v31.4h\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v6.4s, v17.8h, v31.8h\n"
+ "smlal2 v25.4s, v23.8h, v31.8h\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v8.4s, v17.4h, v21.4h\n"
+ "smlal2 v0.4s, v17.8h, v21.8h\n"
+ "ldr d17, [x11, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x88]\n"
+ "smlal v27.4s, v26.4h, v21.4h\n"
+ "smlal v1.4s, v20.4h, v21.4h\n"
+ "smlal2 v6.4s, v26.8h, v21.8h\n"
+ "smlal2 v25.4s, v20.8h, v21.8h\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v8.4s, v26.4h, v3.4h\n"
+ "smlal2 v0.4s, v26.8h, v3.8h\n"
+ "ldr d26, [x10, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v21.4h\n"
+ "smlal2 v30.4s, v19.8h, v21.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "smlal v27.4s, v11.4h, v3.4h\n"
+ "smlal v1.4s, v19.4h, v3.4h\n"
+ "smlal2 v6.4s, v11.8h, v3.8h\n"
+ "smlal2 v25.4s, v19.8h, v3.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v11.4h, v2.4h\n"
+ "smlal2 v0.4s, v11.8h, v2.8h\n"
+ "ldr d11, [x9, x2]\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v3.4h\n"
+ "smlal2 v30.4s, v12.8h, v3.8h\n"
+ "ldr d3, [x5, #0x98]\n"
+ "smlal v27.4s, v28.4h, v2.4h\n"
+ "smlal v1.4s, v12.4h, v2.4h\n"
+ "smlal2 v6.4s, v28.8h, v2.8h\n"
+ "smlal2 v25.4s, v12.8h, v2.8h\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v8.4s, v28.4h, v4.4h\n"
+ "smlal2 v0.4s, v28.8h, v4.8h\n"
+ "ldr d28, [x28, x2]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v2.4h\n"
+ "smlal2 v30.4s, v7.8h, v2.8h\n"
+ "ldr d2, [x5, #0xa0]\n"
+ "smlal v27.4s, v16.4h, v4.4h\n"
+ "smlal v1.4s, v7.4h, v4.4h\n"
+ "smlal2 v6.4s, v16.8h, v4.8h\n"
+ "ldr d16, [x27, x2]\n"
+ "smlal2 v25.4s, v7.8h, v4.8h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal v8.4s, v23.4h, v22.4h\n"
+ "smlal2 v0.4s, v23.8h, v22.8h\n"
+ "ldr d23, [x5, #0xa8]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v4.4h\n"
+ "smlal2 v30.4s, v29.8h, v4.8h\n"
+ "ldr d4, [x26, x2]\n"
+ "smlal v27.4s, v20.4h, v22.4h\n"
+ "smlal v1.4s, v24.4h, v22.4h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "smlal2 v6.4s, v20.8h, v22.8h\n"
+ "smlal2 v25.4s, v24.8h, v22.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "ldr d20, [x25, x2]\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v22.4h\n"
+ "smlal2 v30.4s, v17.8h, v22.8h\n"
+ "ldr d22, [x5, #0xb0]\n"
+ "smlal v27.4s, v19.4h, v18.4h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v6.4s, v19.8h, v18.8h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "smlal v8.4s, v19.4h, v31.4h\n"
+ "smlal2 v0.4s, v19.8h, v31.8h\n"
+ "ldr d19, [x24, x2]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v18.4h\n"
+ "smlal2 v30.4s, v26.8h, v18.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "smlal v27.4s, v12.4h, v31.4h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v6.4s, v12.8h, v31.8h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v21.4h\n"
+ "smlal2 v0.4s, v12.8h, v21.8h\n"
+ "ldr d12, [x23, x2]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v31.4h\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d31, [x5, #0xc0]\n"
+ "add x5, x5, #0xc8\n"
+ "smlal v27.4s, v7.4h, v21.4h\n"
+ "smlal v1.4s, v11.4h, v21.4h\n"
+ "smlal2 v6.4s, v7.8h, v21.8h\n"
+ "smlal2 v25.4s, v11.8h, v21.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v3.4h\n"
+ "smlal2 v0.4s, v7.8h, v3.8h\n"
+ "ldr d7, [x22, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v21.4h\n"
+ "smlal2 v30.4s, v28.8h, v21.8h\n"
+ "ldr d21, [x21, x2]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v27.4s, v29.4h, v3.4h\n"
+ "smlal v1.4s, v28.4h, v3.4h\n"
+ "smlal2 v6.4s, v29.8h, v3.8h\n"
+ "ldr q29, [x6, #0x0]\n"
+ "smlal2 v25.4s, v28.8h, v3.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v2.4h\n"
+ "smlal2 v0.4s, v24.8h, v2.8h\n"
+ "ldr q24, [x7, #0x0]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "smlal v5.4s, v16.4h, v3.4h\n"
+ "smlal2 v30.4s, v16.8h, v3.8h\n"
+ "ldr q3, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v27.4s, v17.4h, v2.4h\n"
+ "smlal v1.4s, v4.4h, v2.4h\n"
+ "smlal2 v6.4s, v17.8h, v2.8h\n"
+ "smlal2 v25.4s, v4.8h, v2.8h\n"
+ "ldr q4, [x7, #0x10]\n"
"add x7, x7, #0x20\n"
- "smlal v24.4s, v1.4h, v27.4h\n"
- "smlal v23.4s, v16.4h, v27.4h\n"
- "smlal2 v15.4s, v14.8h, v30.8h\n"
- "smlal v7.4s, v25.4h, v31.4h\n"
- "smlal2 v5.4s, v14.8h, v27.8h\n"
- "ldr q14, [x8, #0x10]\n"
- "smlal2 v22.4s, v1.8h, v27.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v19.4s, v16.8h, v27.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal v24.4s, v16.4h, v30.4h\n"
- "smlal v23.4s, v4.4h, v30.4h\n"
- "smlal2 v15.4s, v25.8h, v31.8h\n"
- "smlal v7.4s, v10.4h, v29.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal2 v22.4s, v16.8h, v30.8h\n"
- "smlal2 v19.4s, v4.8h, v30.8h\n"
- "smlal v20.4s, v10.4h, v31.4h\n"
- "smlal v24.4s, v4.4h, v31.4h\n"
- "smlal v23.4s, v17.4h, v31.4h\n"
- "smlal2 v15.4s, v10.8h, v29.8h\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "sqrdmulh v7.4s, v7.4s, v8.4s\n"
- "smlal2 v5.4s, v10.8h, v31.8h\n"
- "smlal2 v22.4s, v4.8h, v31.8h\n"
- "and v27.16b, v7.16b, v28.16b\n"
- "smlal2 v19.4s, v17.8h, v31.8h\n"
- "smlal v20.4s, v9.4h, v29.4h\n"
- "sshr v27.4s, v27.4s, #0x1f\n"
- "smlal v24.4s, v17.4h, v29.4h\n"
- "smlal v23.4s, v6.4h, v29.4h\n"
- "sqadd v7.4s, v7.4s, v27.4s\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal2 v5.4s, v9.8h, v29.8h\n"
- "sqrdmulh v15.4s, v15.4s, v2.4s\n"
- "smlal2 v22.4s, v17.8h, v29.8h\n"
- "smlal2 v19.4s, v6.8h, v29.8h\n"
- "and v9.16b, v15.16b, v14.16b\n"
- "smlal v20.4s, v12.4h, v21.4h\n"
- "smlal v24.4s, v6.4h, v21.4h\n"
- "sqrdmulh v20.4s, v20.4s, v8.4s\n"
- "smlal v23.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v12.8h, v21.8h\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- "smlal2 v22.4s, v6.8h, v21.8h\n"
- "smlal2 v19.4s, v3.8h, v21.8h\n"
- "sqrdmulh v23.4s, v23.4s, v8.4s\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "and v25.16b, v20.16b, v28.16b\n"
- "sqrdmulh v5.4s, v5.4s, v2.4s\n"
- "and v10.16b, v24.16b, v28.16b\n"
- "sqrdmulh v22.4s, v22.4s, v2.4s\n"
- "and v21.16b, v23.16b, v28.16b\n"
- "sqrdmulh v19.4s, v19.4s, v2.4s\n"
- "sqadd v15.4s, v15.4s, v9.4s\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "and v9.16b, v5.16b, v14.16b\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "and v12.16b, v22.16b, v14.16b\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v17.16b, v19.16b, v14.16b\n"
- "sqadd v20.4s, v20.4s, v25.4s\n"
- "sshr v9.4s, v9.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v10.4s\n"
+ "smlal v8.4s, v17.4h, v23.4h\n"
+ "smlal2 v0.4s, v17.8h, v23.8h\n"
+ "smlal v5.4s, v20.4h, v2.4h\n"
+ "smlal2 v30.4s, v20.8h, v2.8h\n"
+ "smlal v27.4s, v26.4h, v23.4h\n"
+ "smlal v1.4s, v20.4h, v23.4h\n"
+ "smlal2 v6.4s, v26.8h, v23.8h\n"
+ "smlal2 v25.4s, v20.8h, v23.8h\n"
+ "smlal v8.4s, v26.4h, v22.4h\n"
+ "smlal2 v0.4s, v26.8h, v22.8h\n"
+ "smlal v5.4s, v19.4h, v23.4h\n"
+ "smlal2 v30.4s, v19.8h, v23.8h\n"
+ "smlal v27.4s, v11.4h, v22.4h\n"
+ "smlal v1.4s, v19.4h, v22.4h\n"
+ "smlal2 v6.4s, v11.8h, v22.8h\n"
+ "smlal2 v25.4s, v19.8h, v22.8h\n"
+ "smlal v8.4s, v11.4h, v18.4h\n"
+ "smlal2 v0.4s, v11.8h, v18.8h\n"
+ "smlal v5.4s, v12.4h, v22.4h\n"
+ "smlal2 v30.4s, v12.8h, v22.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal v1.4s, v12.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal2 v25.4s, v12.8h, v18.8h\n"
+ "smlal v8.4s, v28.4h, v31.4h\n"
+ "smlal2 v0.4s, v28.8h, v31.8h\n"
+ "smlal v5.4s, v7.4h, v18.4h\n"
+ "smlal2 v30.4s, v7.8h, v18.8h\n"
+ "smlal v27.4s, v16.4h, v31.4h\n"
+ "smlal v1.4s, v7.4h, v31.4h\n"
+ "smlal2 v6.4s, v16.8h, v31.8h\n"
+ "smlal2 v25.4s, v7.8h, v31.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v29.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v3.4s\n"
+ "smlal v5.4s, v21.4h, v31.4h\n"
+ "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "and v17.16b, v8.16b, v24.16b\n"
+ "sqrdmulh v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v29.4s\n"
+ "and v12.16b, v0.16b, v4.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v3.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v3.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v5.4s, v5.4s, v29.4s\n"
"sshr v12.4s, v12.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v21.4s\n"
+ "and v21.16b, v27.16b, v24.16b\n"
+ "and v16.16b, v1.16b, v24.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v3.4s\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v28.16b, v5.16b, v24.16b\n"
+ "sqadd v0.4s, v0.4s, v12.4s\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "and v18.16b, v6.16b, v4.16b\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v17.16b, v25.16b, v4.16b\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v3.16b, v30.16b, v4.16b\n"
+ "sqadd v27.4s, v27.4s, v21.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v16.4s\n"
"sshr v17.4s, v17.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v28.4s\n"
- "srshl v20.4s, v20.4s, v28.4s\n"
- "sqadd v5.4s, v5.4s, v9.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqadd v22.4s, v22.4s, v12.4s\n"
- "srshl v23.4s, v23.4s, v28.4s\n"
- "sqadd v19.4s, v19.4s, v17.4s\n"
- "srshl v15.4s, v15.4s, v14.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v14.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v14.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v14.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str d7, [x17, x4]\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d20, [x16, x4]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x15, x4]\n"
- "str d23, [x14, x4]\n"
- "ldr q7, [x20, #0x0]\n"
- "ldr q15, [x20, #0x10]\n"
+ "sqadd v5.4s, v5.4s, v28.4s\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v24.4s\n"
+ "srshl v27.4s, v27.4s, v24.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v24.4s\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "srshl v5.4s, v5.4s, v24.4s\n"
+ "sqadd v30.4s, v30.4s, v3.4s\n"
+ "srshl v0.4s, v0.4s, v4.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v4.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v4.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "str d8, [x8, x3]\n"
+ "str d27, [x17, x3]\n"
+ "str d1, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "add x3, x3, #0x8\n"
+ "ldr q8, [x20, #0x0]\n"
+ "ldr q0, [x20, #0x10]\n"
"add x20, x20, #0x20\n"
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "add x4, x4, #0x8\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldr d12, [x6, #0x20]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "ssubl v6.8h, v6.8b, v13.8b\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ldr d31, [x9, x3]\n"
- "ldr d17, [x28, x3]\n"
- "ssubl v10.8h, v10.8b, v13.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr d30, [x27, x3]\n"
- "ldr d16, [x26, x3]\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "usubl v31.8h, v31.8b, v18.8b\n"
- "ldr d3, [x25, x3]\n"
- "ldr d4, [x24, x3]\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "ldr d25, [x23, x3]\n"
- "ldr d9, [x22, x3]\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "ldr d29, [x21, x3]\n"
- "ldr d28, [x20, x3]\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "ldr d23, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "ldr d24, [x9, x2]\n"
+ "ldr d21, [x28, x2]\n"
+ "ldr d16, [x27, x2]\n"
+ "ldr d20, [x26, x2]\n"
+ "ldr d7, [x25, x2]\n"
+ "ldr d19, [x24, x2]\n"
+ "ldr d28, [x23, x2]\n"
+ "ldr d26, [x22, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr d29, [x21, x2]\n"
+ "ldr d18, [x20, x2]\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
"bgt 1b\n"
"2:" // Tail
- "ldr d27, [x6, #0x28]\n"
- "ldr d1, [x6, #0x30]\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "ldr d2, [x6, #0x38]\n"
- "ldr d31, [x6, #0x40]\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "ldr d8, [x6, #0x48]\n"
- "ldr x22, [x5, #0x50]\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "ldr x20, [x5, #0x58]\n"
- "ldr x21, [x5, #0x60]\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "ldr d6, [x20, x3]\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr x22, [x5, #0x70]\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "ldr d3, [x21, x3]\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "ldr d14, [x20, x3]\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal v23.4s, v17.4h, v10.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr x21, [x5, #0x78]\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "ldr x20, [x5, #0x80]\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "ldr d25, [x22, x3]\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v10.8h\n"
- "ldr d10, [x21, x3]\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal v24.4s, v17.4h, v21.4h\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x24, [x5, #0x88]\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "smlal v7.4s, v30.4h, v27.4h\n"
- "ldr x23, [x5, #0x90]\n"
- "ldr x22, [x5, #0x98]\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "ldr d9, [x20, x3]\n"
- "smlal2 v22.4s, v17.8h, v21.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "ldr d21, [x6, #0x50]\n"
- "smlal v20.4s, v3.4h, v12.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "ldr x21, [x5, #0xa0]\n"
- "ldr x20, [x5, #0xa8]\n"
- "smlal2 v15.4s, v30.8h, v27.8h\n"
- "ldr d30, [x24, x3]\n"
- "smlal v7.4s, v16.4h, v1.4h\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v12.8h\n"
- "ldr d3, [x6, #0x58]\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "ldr d12, [x23, x3]\n"
- "smlal v20.4s, v16.4h, v27.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal v24.4s, v28.4h, v27.4h\n"
- "smlal v23.4s, v14.4h, v27.4h\n"
- "ldr x13, [x5, #0xb0]\n"
- "ldr x12, [x5, #0xb8]\n"
- "smlal2 v15.4s, v16.8h, v1.8h\n"
- "smlal v7.4s, v4.4h, v2.4h\n"
- "ldr x11, [x5, #0xc0]\n"
- "ldr x10, [x5, #0xc8]\n"
- "smlal2 v5.4s, v16.8h, v27.8h\n"
- "ldr d16, [x22, x3]\n"
- "smlal2 v22.4s, v28.8h, v27.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v27.8h\n"
- "ldr d27, [x6, #0x60]\n"
- "smlal v20.4s, v4.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v1.4h\n"
- "smlal v23.4s, v25.4h, v1.4h\n"
- "ldr x9, [x5, #0xd0]\n"
- "ldr x28, [x5, #0xd8]\n"
- "smlal2 v15.4s, v4.8h, v2.8h\n"
- "smlal v7.4s, v17.4h, v31.4h\n"
- "ldr x27, [x5, #0xe0]\n"
- "ldr x26, [x5, #0xe8]\n"
- "smlal2 v5.4s, v4.8h, v1.8h\n"
- "ldr d4, [x21, x3]\n"
- "smlal2 v22.4s, v14.8h, v1.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v1.8h\n"
- "ldr d1, [x6, #0x68]\n"
- "smlal v20.4s, v17.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v2.4h\n"
- "smlal v23.4s, v10.4h, v2.4h\n"
- "ldr x25, [x5, #0xf0]\n"
- "ldr x24, [x5, #0xf8]\n"
- "smlal2 v15.4s, v17.8h, v31.8h\n"
- "smlal v7.4s, v6.4h, v8.4h\n"
- "ldr x23, [x5, #0x100]\n"
- "ldr x22, [x5, #0x108]\n"
- "smlal2 v5.4s, v17.8h, v2.8h\n"
- "ldr d17, [x20, x3]\n"
- "smlal2 v22.4s, v25.8h, v2.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v2.8h\n"
- "ldr d2, [x6, #0x70]\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v31.4h\n"
- "smlal v23.4s, v9.4h, v31.4h\n"
- "ldr x21, [x5, #0x110]\n"
- "ldr x20, [x5, #0x118]\n"
- "smlal2 v15.4s, v6.8h, v8.8h\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
+ "ldr d4, [x5, #0x28]\n"
+ "ldr d3, [x5, #0x30]\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "ldr d22, [x5, #0x38]\n"
+ "ldr d2, [x5, #0x40]\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "ldr d24, [x5, #0x48]\n"
+ "ldr x21, [x4, #0x50]\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x50]\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "ldr d21, [x5, #0x58]\n"
+ "ldr x28, [x4, #0x60]\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "ldr x27, [x4, #0x68]\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "ldr x26, [x4, #0x70]\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "ldr d12, [x21, x2]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "ldr d7, [x20, x2]\n"
+ "ldr x25, [x4, #0x78]\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "ldr x24, [x4, #0x80]\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "ldr x23, [x4, #0x88]\n"
+ "ldr x22, [x4, #0x90]\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
+ "ldr x21, [x4, #0x98]\n"
+ "ldr x20, [x4, #0xa0]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "ldr d28, [x28, x2]\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v17.4h\n"
+ "smlal2 v30.4s, v12.8h, v17.8h\n"
+ "ldr d17, [x27, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal v1.4s, v12.4h, v11.4h\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "ldr x14, [x4, #0xa8]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal2 v25.4s, v12.8h, v11.8h\n"
+ "ldr x13, [x4, #0xb0]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "ldr d26, [x26, x2]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v7.4h, v11.4h\n"
+ "smlal2 v30.4s, v7.8h, v11.8h\n"
+ "ldr d11, [x25, x2]\n"
+ "ldr x12, [x4, #0xb8]\n"
+ "smlal v27.4s, v28.4h, v23.4h\n"
+ "smlal v1.4s, v7.4h, v23.4h\n"
+ "ldr x11, [x4, #0xc0]\n"
+ "ldr x10, [x4, #0xc8]\n"
+ "smlal2 v6.4s, v28.8h, v23.8h\n"
+ "ldr d28, [x24, x2]\n"
+ "smlal2 v25.4s, v7.8h, v23.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v16.4h, v4.4h\n"
+ "smlal2 v0.4s, v16.8h, v4.8h\n"
+ "ldr d16, [x23, x2]\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "ldr d23, [x22, x2]\n"
+ "ldr x9, [x4, #0xd0]\n"
+ "smlal v27.4s, v20.4h, v4.4h\n"
+ "smlal v1.4s, v18.4h, v4.4h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x28, [x4, #0xd8]\n"
+ "smlal2 v6.4s, v20.8h, v4.8h\n"
+ "smlal2 v25.4s, v18.8h, v4.8h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x27, [x4, #0xe0]\n"
+ "smlal v8.4s, v20.4h, v3.4h\n"
+ "smlal2 v0.4s, v20.8h, v3.8h\n"
+ "ldr d20, [x21, x2]\n"
+ "usubl v23.8h, v23.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v4.4h\n"
+ "smlal2 v30.4s, v17.8h, v4.8h\n"
+ "ldr d4, [x5, #0x60]\n"
+ "ldr x26, [x4, #0xe8]\n"
+ "smlal v27.4s, v19.4h, v3.4h\n"
+ "smlal v1.4s, v17.4h, v3.4h\n"
+ "ldr x25, [x4, #0xf0]\n"
+ "ldr x24, [x4, #0xf8]\n"
+ "smlal2 v6.4s, v19.8h, v3.8h\n"
+ "smlal2 v25.4s, v17.8h, v3.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x23, [x4, #0x100]\n"
+ "smlal v8.4s, v19.4h, v22.4h\n"
+ "smlal2 v0.4s, v19.8h, v22.8h\n"
+ "ldr d19, [x20, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v3.4h\n"
+ "smlal2 v30.4s, v26.8h, v3.8h\n"
+ "ldr d3, [x5, #0x68]\n"
+ "ldr x22, [x4, #0x108]\n"
+ "smlal v27.4s, v12.4h, v22.4h\n"
+ "smlal v1.4s, v26.4h, v22.4h\n"
+ "ldr x21, [x4, #0x110]\n"
+ "ldr x20, [x4, #0x118]\n"
+ "smlal2 v6.4s, v12.8h, v22.8h\n"
+ "smlal2 v25.4s, v26.8h, v22.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
"tst x1, #0x7\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "ldr d6, [x13, x3]\n"
- "smlal2 v22.4s, v10.8h, v31.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v31.8h\n"
- "ldr d31, [x6, #0x78]\n"
- "smlal v20.4s, v29.4h, v8.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v9.4h, v8.4h\n"
- "smlal v23.4s, v30.4h, v8.4h\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "ldr d28, [x12, x3]\n"
- "smlal v7.4s, v14.4h, v3.4h\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v5.4s, v29.8h, v8.8h\n"
- "ldr d29, [x6, #0x80]\n"
- "smlal2 v22.4s, v9.8h, v8.8h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal2 v19.4s, v30.8h, v8.8h\n"
- "ldr d8, [x11, x3]\n"
- "smlal v20.4s, v14.4h, v21.4h\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "smlal v24.4s, v12.4h, v21.4h\n"
- "smlal v23.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v14.8h, v3.8h\n"
- "smlal v7.4s, v25.4h, v27.4h\n"
- "smlal2 v5.4s, v14.8h, v21.8h\n"
- "ldr d14, [x10, x3]\n"
- "smlal2 v22.4s, v12.8h, v21.8h\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "smlal2 v19.4s, v16.8h, v21.8h\n"
- "ldr d21, [x6, #0x88]\n"
- "smlal v20.4s, v25.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v16.4h, v3.4h\n"
- "smlal v23.4s, v4.4h, v3.4h\n"
- "smlal2 v15.4s, v25.8h, v27.8h\n"
- "smlal v7.4s, v10.4h, v1.4h\n"
- "smlal2 v5.4s, v25.8h, v3.8h\n"
- "ldr d25, [x9, x3]\n"
- "smlal2 v22.4s, v16.8h, v3.8h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v3.8h\n"
- "ldr d3, [x6, #0x90]\n"
- "smlal v20.4s, v10.4h, v27.4h\n"
- "ssubl v3.8h, v3.8b, v13.8b\n"
- "smlal v24.4s, v4.4h, v27.4h\n"
- "smlal v23.4s, v17.4h, v27.4h\n"
- "smlal2 v15.4s, v10.8h, v1.8h\n"
- "smlal v7.4s, v9.4h, v2.4h\n"
- "smlal2 v5.4s, v10.8h, v27.8h\n"
- "ldr d10, [x28, x3]\n"
- "smlal2 v22.4s, v4.8h, v27.8h\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "smlal2 v19.4s, v17.8h, v27.8h\n"
- "ldr d27, [x6, #0x98]\n"
- "smlal v20.4s, v9.4h, v1.4h\n"
- "ssubl v27.8h, v27.8b, v13.8b\n"
- "smlal v24.4s, v17.4h, v1.4h\n"
- "smlal v23.4s, v6.4h, v1.4h\n"
- "smlal2 v15.4s, v9.8h, v2.8h\n"
- "smlal v7.4s, v12.4h, v31.4h\n"
- "smlal2 v5.4s, v9.8h, v1.8h\n"
- "ldr d9, [x27, x3]\n"
- "smlal2 v22.4s, v17.8h, v1.8h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v19.4s, v6.8h, v1.8h\n"
- "ldr d1, [x6, #0xa0]\n"
- "smlal v20.4s, v30.4h, v2.4h\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "smlal v24.4s, v6.4h, v2.4h\n"
- "smlal v23.4s, v28.4h, v2.4h\n"
- "smlal2 v15.4s, v12.8h, v31.8h\n"
- "ldr d12, [x26, x3]\n"
- "smlal v7.4s, v16.4h, v29.4h\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "smlal2 v5.4s, v30.8h, v2.8h\n"
- "ldr d30, [x6, #0xa8]\n"
- "smlal2 v22.4s, v6.8h, v2.8h\n"
- "ssubl v30.8h, v30.8b, v13.8b\n"
- "smlal2 v19.4s, v28.8h, v2.8h\n"
- "ldr d2, [x25, x3]\n"
- "smlal v20.4s, v16.4h, v31.4h\n"
- "usubl v2.8h, v2.8b, v18.8b\n"
- "smlal v24.4s, v8.4h, v31.4h\n"
- "smlal v23.4s, v14.4h, v31.4h\n"
- "smlal2 v15.4s, v16.8h, v29.8h\n"
- "smlal v7.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v16.8h, v31.8h\n"
- "ldr d16, [x24, x3]\n"
- "smlal2 v22.4s, v8.8h, v31.8h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "smlal2 v19.4s, v14.8h, v31.8h\n"
- "ldr d31, [x6, #0xb0]\n"
- "smlal v20.4s, v4.4h, v29.4h\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "smlal v24.4s, v14.4h, v29.4h\n"
- "smlal v23.4s, v25.4h, v29.4h\n"
- "smlal2 v15.4s, v4.8h, v21.8h\n"
- "smlal v7.4s, v17.4h, v3.4h\n"
- "smlal2 v5.4s, v4.8h, v29.8h\n"
- "ldr d4, [x23, x3]\n"
- "smlal2 v22.4s, v14.8h, v29.8h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v19.4s, v25.8h, v29.8h\n"
- "ldr d29, [x6, #0xb8]\n"
- "smlal v20.4s, v17.4h, v21.4h\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v15.4s, v17.8h, v3.8h\n"
- "smlal v7.4s, v6.4h, v27.4h\n"
- "smlal2 v5.4s, v17.8h, v21.8h\n"
- "ldr d17, [x22, x3]\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "ldr d21, [x6, #0xc0]\n"
- "smlal v20.4s, v6.4h, v3.4h\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "smlal v24.4s, v10.4h, v3.4h\n"
- "smlal v23.4s, v9.4h, v3.4h\n"
- "smlal2 v15.4s, v6.8h, v27.8h\n"
- "smlal v7.4s, v8.4h, v1.4h\n"
- "smlal2 v5.4s, v6.8h, v3.8h\n"
- "ldr d6, [x21, x3]\n"
- "smlal2 v22.4s, v10.8h, v3.8h\n"
- "usubl v6.8h, v6.8b, v18.8b\n"
- "smlal2 v19.4s, v9.8h, v3.8h\n"
- "ldr d3, [x20, x3]\n"
- "smlal v20.4s, v28.4h, v27.4h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal v24.4s, v9.4h, v27.4h\n"
- "smlal v23.4s, v12.4h, v27.4h\n"
- "add x3, x3, #0x8\n"
- "smlal2 v15.4s, v8.8h, v1.8h\n"
- "ldr q8, [x7, #0x0]\n"
- "smlal v7.4s, v14.4h, v30.4h\n"
- "smlal2 v5.4s, v28.8h, v27.8h\n"
- "ldr q28, [x8, #0x0]\n"
- "smlal2 v22.4s, v9.8h, v27.8h\n"
- "smlal2 v19.4s, v12.8h, v27.8h\n"
- "ldr q27, [x7, #0x10]\n"
- "smlal v20.4s, v14.4h, v1.4h\n"
+ "smlal v8.4s, v12.4h, v2.4h\n"
+ "smlal2 v0.4s, v12.8h, v2.8h\n"
+ "ldr d12, [x14, x2]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v22.4h\n"
+ "smlal2 v30.4s, v11.8h, v22.8h\n"
+ "ldr d22, [x5, #0x70]\n"
+ "smlal v27.4s, v7.4h, v2.4h\n"
+ "smlal v1.4s, v11.4h, v2.4h\n"
+ "smlal2 v6.4s, v7.8h, v2.8h\n"
+ "smlal2 v25.4s, v11.8h, v2.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v24.4h\n"
+ "smlal2 v0.4s, v7.8h, v24.8h\n"
+ "ldr d7, [x13, x2]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v2.4h\n"
+ "smlal2 v30.4s, v28.8h, v2.8h\n"
+ "ldr d2, [x5, #0x78]\n"
+ "smlal v27.4s, v29.4h, v24.4h\n"
+ "smlal v1.4s, v28.4h, v24.4h\n"
+ "smlal2 v6.4s, v29.8h, v24.8h\n"
+ "ldr d29, [x12, x2]\n"
+ "smlal2 v25.4s, v28.8h, v24.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v18.4h, v31.4h\n"
+ "smlal2 v0.4s, v18.8h, v31.8h\n"
+ "ldr d18, [x5, #0x80]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v24.4h\n"
+ "smlal2 v30.4s, v16.8h, v24.8h\n"
+ "ldr d24, [x11, x2]\n"
+ "smlal v27.4s, v17.4h, v31.4h\n"
+ "smlal v1.4s, v23.4h, v31.4h\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v6.4s, v17.8h, v31.8h\n"
+ "smlal2 v25.4s, v23.8h, v31.8h\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v8.4s, v17.4h, v21.4h\n"
+ "smlal2 v0.4s, v17.8h, v21.8h\n"
+ "ldr d17, [x10, x2]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "ldr d31, [x5, #0x88]\n"
+ "smlal v27.4s, v26.4h, v21.4h\n"
+ "smlal v1.4s, v20.4h, v21.4h\n"
+ "smlal2 v6.4s, v26.8h, v21.8h\n"
+ "smlal2 v25.4s, v20.8h, v21.8h\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v8.4s, v26.4h, v4.4h\n"
+ "smlal2 v0.4s, v26.8h, v4.8h\n"
+ "ldr d26, [x9, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v21.4h\n"
+ "smlal2 v30.4s, v19.8h, v21.8h\n"
+ "ldr d21, [x5, #0x90]\n"
+ "smlal v27.4s, v11.4h, v4.4h\n"
+ "smlal v1.4s, v19.4h, v4.4h\n"
+ "smlal2 v6.4s, v11.8h, v4.8h\n"
+ "smlal2 v25.4s, v19.8h, v4.8h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal v8.4s, v11.4h, v3.4h\n"
+ "smlal2 v0.4s, v11.8h, v3.8h\n"
+ "ldr d11, [x28, x2]\n"
+ "ssubl v21.8h, v21.8b, v9.8b\n"
+ "smlal v5.4s, v12.4h, v4.4h\n"
+ "smlal2 v30.4s, v12.8h, v4.8h\n"
+ "ldr d4, [x5, #0x98]\n"
+ "smlal v27.4s, v28.4h, v3.4h\n"
+ "smlal v1.4s, v12.4h, v3.4h\n"
+ "smlal2 v6.4s, v28.8h, v3.8h\n"
+ "smlal2 v25.4s, v12.8h, v3.8h\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "smlal v8.4s, v28.4h, v22.4h\n"
+ "smlal2 v0.4s, v28.8h, v22.8h\n"
+ "ldr d28, [x27, x2]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v3.4h\n"
+ "smlal2 v30.4s, v7.8h, v3.8h\n"
+ "ldr d3, [x5, #0xa0]\n"
+ "smlal v27.4s, v16.4h, v22.4h\n"
+ "smlal v1.4s, v7.4h, v22.4h\n"
+ "smlal2 v6.4s, v16.8h, v22.8h\n"
+ "ldr d16, [x26, x2]\n"
+ "smlal2 v25.4s, v7.8h, v22.8h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "smlal v8.4s, v23.4h, v2.4h\n"
+ "smlal2 v0.4s, v23.8h, v2.8h\n"
+ "ldr d23, [x5, #0xa8]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v22.4h\n"
+ "smlal2 v30.4s, v29.8h, v22.8h\n"
+ "ldr d22, [x25, x2]\n"
+ "smlal v27.4s, v20.4h, v2.4h\n"
+ "smlal v1.4s, v24.4h, v2.4h\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "smlal2 v6.4s, v20.8h, v2.8h\n"
+ "smlal2 v25.4s, v24.8h, v2.8h\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "ldr d20, [x24, x2]\n"
+ "usubl v22.8h, v22.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v2.4h\n"
+ "smlal2 v30.4s, v17.8h, v2.8h\n"
+ "ldr d2, [x5, #0xb0]\n"
+ "smlal v27.4s, v19.4h, v18.4h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v6.4s, v19.8h, v18.8h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "smlal v8.4s, v19.4h, v31.4h\n"
+ "smlal2 v0.4s, v19.8h, v31.8h\n"
+ "ldr d19, [x23, x2]\n"
+ "ssubl v2.8h, v2.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v18.4h\n"
+ "smlal2 v30.4s, v26.8h, v18.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "smlal v27.4s, v12.4h, v31.4h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v6.4s, v12.8h, v31.8h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v12.4h, v21.4h\n"
+ "smlal2 v0.4s, v12.8h, v21.8h\n"
+ "ldr d12, [x22, x2]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v31.4h\n"
+ "smlal2 v30.4s, v11.8h, v31.8h\n"
+ "ldr d31, [x5, #0xc0]\n"
+ "smlal v27.4s, v7.4h, v21.4h\n"
+ "smlal v1.4s, v11.4h, v21.4h\n"
+ "smlal2 v6.4s, v7.8h, v21.8h\n"
+ "smlal2 v25.4s, v11.8h, v21.8h\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "smlal v8.4s, v7.4h, v4.4h\n"
+ "smlal2 v0.4s, v7.8h, v4.8h\n"
+ "ldr d7, [x21, x2]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v21.4h\n"
+ "smlal2 v30.4s, v28.8h, v21.8h\n"
+ "ldr d21, [x20, x2]\n"
+ "add x2, x2, #0x8\n"
+ "smlal v27.4s, v29.4h, v4.4h\n"
+ "smlal v1.4s, v28.4h, v4.4h\n"
+ "smlal2 v6.4s, v29.8h, v4.8h\n"
+ "ldr q29, [x6, #0x0]\n"
+ "smlal2 v25.4s, v28.8h, v4.8h\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v3.4h\n"
+ "smlal2 v0.4s, v24.8h, v3.8h\n"
+ "ldr q24, [x7, #0x0]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "smlal v5.4s, v16.4h, v4.4h\n"
+ "smlal2 v30.4s, v16.8h, v4.8h\n"
+ "ldr q4, [x6, #0x10]\n"
+ "add x6, x6, #0x20\n"
+ "smlal v27.4s, v17.4h, v3.4h\n"
+ "smlal v1.4s, v22.4h, v3.4h\n"
+ "smlal2 v6.4s, v17.8h, v3.8h\n"
+ "smlal2 v25.4s, v22.8h, v3.8h\n"
+ "ldr q22, [x7, #0x10]\n"
"add x7, x7, #0x20\n"
- "smlal v24.4s, v2.4h, v1.4h\n"
- "smlal v23.4s, v16.4h, v1.4h\n"
- "smlal2 v15.4s, v14.8h, v30.8h\n"
- "smlal v7.4s, v25.4h, v31.4h\n"
- "smlal2 v5.4s, v14.8h, v1.8h\n"
- "ldr q14, [x8, #0x10]\n"
- "smlal2 v22.4s, v2.8h, v1.8h\n"
- "add x8, x8, #0x20\n"
- "smlal2 v19.4s, v16.8h, v1.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal v24.4s, v16.4h, v30.4h\n"
- "smlal v23.4s, v4.4h, v30.4h\n"
- "smlal2 v15.4s, v25.8h, v31.8h\n"
- "smlal v7.4s, v10.4h, v29.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal2 v22.4s, v16.8h, v30.8h\n"
- "smlal2 v19.4s, v4.8h, v30.8h\n"
- "smlal v20.4s, v10.4h, v31.4h\n"
- "smlal v24.4s, v4.4h, v31.4h\n"
- "smlal v23.4s, v17.4h, v31.4h\n"
- "smlal2 v15.4s, v10.8h, v29.8h\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "sqrdmulh v7.4s, v7.4s, v8.4s\n"
- "smlal2 v5.4s, v10.8h, v31.8h\n"
- "smlal2 v22.4s, v4.8h, v31.8h\n"
- "and v4.16b, v7.16b, v28.16b\n"
- "smlal2 v19.4s, v17.8h, v31.8h\n"
- "smlal v20.4s, v9.4h, v29.4h\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "smlal v24.4s, v17.4h, v29.4h\n"
- "smlal v23.4s, v6.4h, v29.4h\n"
- "sqadd v7.4s, v7.4s, v4.4s\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal2 v5.4s, v9.8h, v29.8h\n"
- "sqrdmulh v15.4s, v15.4s, v27.4s\n"
- "smlal2 v22.4s, v17.8h, v29.8h\n"
- "smlal2 v19.4s, v6.8h, v29.8h\n"
- "and v30.16b, v15.16b, v14.16b\n"
- "smlal v20.4s, v12.4h, v21.4h\n"
- "smlal v24.4s, v6.4h, v21.4h\n"
- "sqrdmulh v20.4s, v20.4s, v8.4s\n"
- "smlal v23.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v12.8h, v21.8h\n"
- "sqrdmulh v24.4s, v24.4s, v8.4s\n"
- "smlal2 v22.4s, v6.8h, v21.8h\n"
- "smlal2 v19.4s, v3.8h, v21.8h\n"
- "sqrdmulh v23.4s, v23.4s, v8.4s\n"
- "sshr v30.4s, v30.4s, #0x1f\n"
- "and v3.16b, v20.16b, v28.16b\n"
- "sqrdmulh v5.4s, v5.4s, v27.4s\n"
- "and v25.16b, v24.16b, v28.16b\n"
- "sqrdmulh v22.4s, v22.4s, v27.4s\n"
- "and v16.16b, v23.16b, v28.16b\n"
- "sqrdmulh v19.4s, v19.4s, v27.4s\n"
- "sqadd v15.4s, v15.4s, v30.4s\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "and v4.16b, v5.16b, v14.16b\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "and v10.16b, v22.16b, v14.16b\n"
+ "smlal v8.4s, v17.4h, v23.4h\n"
+ "smlal2 v0.4s, v17.8h, v23.8h\n"
+ "smlal v5.4s, v20.4h, v3.4h\n"
+ "smlal2 v30.4s, v20.8h, v3.8h\n"
+ "smlal v27.4s, v26.4h, v23.4h\n"
+ "smlal v1.4s, v20.4h, v23.4h\n"
+ "smlal2 v6.4s, v26.8h, v23.8h\n"
+ "smlal2 v25.4s, v20.8h, v23.8h\n"
+ "smlal v8.4s, v26.4h, v2.4h\n"
+ "smlal2 v0.4s, v26.8h, v2.8h\n"
+ "smlal v5.4s, v19.4h, v23.4h\n"
+ "smlal2 v30.4s, v19.8h, v23.8h\n"
+ "smlal v27.4s, v11.4h, v2.4h\n"
+ "smlal v1.4s, v19.4h, v2.4h\n"
+ "smlal2 v6.4s, v11.8h, v2.8h\n"
+ "smlal2 v25.4s, v19.8h, v2.8h\n"
+ "smlal v8.4s, v11.4h, v18.4h\n"
+ "smlal2 v0.4s, v11.8h, v18.8h\n"
+ "smlal v5.4s, v12.4h, v2.4h\n"
+ "smlal2 v30.4s, v12.8h, v2.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal v1.4s, v12.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal2 v25.4s, v12.8h, v18.8h\n"
+ "smlal v8.4s, v28.4h, v31.4h\n"
+ "smlal2 v0.4s, v28.8h, v31.8h\n"
+ "smlal v5.4s, v7.4h, v18.4h\n"
+ "smlal2 v30.4s, v7.8h, v18.8h\n"
+ "smlal v27.4s, v16.4h, v31.4h\n"
+ "smlal v1.4s, v7.4h, v31.4h\n"
+ "smlal2 v6.4s, v16.8h, v31.8h\n"
+ "smlal2 v25.4s, v7.8h, v31.8h\n"
+ "sqrdmulh v8.4s, v8.4s, v29.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v4.4s\n"
+ "smlal v5.4s, v21.4h, v31.4h\n"
+ "smlal2 v30.4s, v21.8h, v31.8h\n"
+ "and v17.16b, v8.16b, v24.16b\n"
+ "sqrdmulh v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v29.4s\n"
+ "and v28.16b, v0.16b, v22.16b\n"
+ "sqrdmulh v6.4s, v6.4s, v4.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v4.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqrdmulh v5.4s, v5.4s, v29.4s\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v16.16b, v27.16b, v24.16b\n"
+ "and v12.16b, v1.16b, v24.16b\n"
+ "sqrdmulh v30.4s, v30.4s, v4.4s\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v11.16b, v5.16b, v24.16b\n"
+ "sqadd v0.4s, v0.4s, v28.4s\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "and v12.16b, v19.16b, v14.16b\n"
- "sqadd v20.4s, v20.4s, v3.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v25.4s\n"
- "sshr v10.4s, v10.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v16.4s\n"
+ "and v18.16b, v6.16b, v22.16b\n"
"sshr v12.4s, v12.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v28.4s\n"
- "srshl v20.4s, v20.4s, v28.4s\n"
- "sqadd v5.4s, v5.4s, v4.4s\n"
- "srshl v24.4s, v24.4s, v28.4s\n"
- "sqadd v22.4s, v22.4s, v10.4s\n"
- "srshl v23.4s, v23.4s, v28.4s\n"
- "sqadd v19.4s, v19.4s, v12.4s\n"
- "srshl v15.4s, v15.4s, v14.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v14.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v14.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v14.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "str d7, [x17, x4]\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "str d20, [x16, x4]\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
- "str d24, [x15, x4]\n"
- "str d23, [x14, x4]\n"
- "add x4, x4, #0x8\n"
+ "and v17.16b, v25.16b, v22.16b\n"
+ "sshr v11.4s, v11.4s, #0x1f\n"
+ "and v19.16b, v30.16b, v22.16b\n"
+ "sqadd v27.4s, v27.4s, v16.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v12.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v5.4s, v5.4s, v11.4s\n"
+ "sshr v19.4s, v19.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v24.4s\n"
+ "srshl v27.4s, v27.4s, v24.4s\n"
+ "sqadd v6.4s, v6.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v24.4s\n"
+ "sqadd v25.4s, v25.4s, v17.4s\n"
+ "srshl v5.4s, v5.4s, v24.4s\n"
+ "sqadd v30.4s, v30.4s, v19.4s\n"
+ "srshl v0.4s, v0.4s, v22.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v22.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v22.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v22.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
+ "str d8, [x8, x3]\n"
+ "str d27, [x17, x3]\n"
+ "str d1, [x16, x3]\n"
+ "str d5, [x15, x3]\n"
+ "add x3, x3, #0x8\n"
"beq 124f\n"
- "add x6, x6, #0xc8\n"
+ "add x5, x5, #0xc8\n"
"3:" // Oddments
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
"tbz x1, #2, 5f\n"
- "ld1 { v7.4s }, [x20], #0x10\n"
+ "ld1 { v8.4s }, [x20], #0x10\n"
"tbz x1, #1, 4f\n"
- "ld1 { v15.d }[0], [x20], #0x8\n"
+ "ld1 { v0.d }[0], [x20], #0x8\n"
"tbz x1, #0, 7f\n"
- "ld1 { v15.s }[2], [x20]\n"
+ "ld1 { v0.s }[2], [x20]\n"
"b 7f\n"
"4:" // Oddments: Load bias: Bit 2: Bit 1: Unset
"tbz x1, #0, 7f\n"
- "ld1 { v15.s }[0], [x20]\n"
+ "ld1 { v0.s }[0], [x20]\n"
"b 7f\n"
"5:" // Oddments: Load bias: Bit 2: Unset
"tbz x1, #1, 6f\n"
- "ld1 { v7.d }[0], [x20], #0x8\n"
+ "ld1 { v8.d }[0], [x20], #0x8\n"
"tbz x1, #0, 7f\n"
- "ld1 { v7.s }[2], [x20]\n"
+ "ld1 { v8.s }[2], [x20]\n"
"b 7f\n"
"6:" // Oddments: Load bias: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 7f\n"
- "ld1 { v7.s }[0], [x20]\n"
+ "ld1 { v8.s }[0], [x20]\n"
"7:" // Oddments: Load bias: Bit 2: End
- "ldr d6, [x6, #0x0]\n"
- "ldr d14, [x6, #0x8]\n"
- "mov v20.16b, v7.16b\n"
- "mov v5.16b, v15.16b\n"
- "ldr d10, [x6, #0x10]\n"
- "ldr d21, [x6, #0x18]\n"
- "mov v24.16b, v7.16b\n"
- "mov v22.16b, v15.16b\n"
- "ldr d12, [x6, #0x20]\n"
- "ldp x9, x28, [x5, #0x0]\n"
- "mov v23.16b, v7.16b\n"
- "mov v19.16b, v15.16b\n"
- "ldp x27, x26, [x5, #0x10]\n"
- "ldp x25, x24, [x5, #0x20]\n"
- "ssubl v6.8h, v6.8b, v13.8b\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ldp x23, x22, [x5, #0x30]\n"
- "ldp x21, x20, [x5, #0x40]\n"
- "ssubl v10.8h, v10.8b, v13.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "add x9, x9, x3\n"
- "add x28, x28, x3\n"
- "add x27, x27, x3\n"
- "add x26, x26, x3\n"
- "add x25, x25, x3\n"
- "add x24, x24, x3\n"
- "add x23, x23, x3\n"
- "add x22, x22, x3\n"
- "add x21, x21, x3\n"
- "add x20, x20, x3\n"
+ "ldr d31, [x5, #0x0]\n"
+ "ldr d12, [x5, #0x8]\n"
+ "mov v27.16b, v8.16b\n"
+ "mov v6.16b, v0.16b\n"
+ "ldr d17, [x5, #0x10]\n"
+ "ldr d11, [x5, #0x18]\n"
+ "mov v1.16b, v8.16b\n"
+ "mov v25.16b, v0.16b\n"
+ "ldr d23, [x5, #0x20]\n"
+ "ldp x9, x28, [x4, #0x0]\n"
+ "mov v5.16b, v8.16b\n"
+ "mov v30.16b, v0.16b\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "ldp x27, x26, [x4, #0x10]\n"
+ "ssubl v23.8h, v23.8b, v9.8b\n"
+ "ldp x25, x24, [x4, #0x20]\n"
+ "add x9, x9, x2\n"
+ "add x28, x28, x2\n"
+ "add x27, x27, x2\n"
+ "add x26, x26, x2\n"
+ "ldp x23, x22, [x4, #0x30]\n"
+ "add x25, x25, x2\n"
+ "add x24, x24, x2\n"
+ "ldp x21, x20, [x4, #0x40]\n"
+ "add x23, x23, x2\n"
+ "add x22, x22, x2\n"
+ "add x21, x21, x2\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 9f\n"
- "ld1 { v31.s }[0], [x9], #0x4\n"
- "ld1 { v17.s }[0], [x28], #0x4\n"
- "ld1 { v30.s }[0], [x27], #0x4\n"
- "ld1 { v16.s }[0], [x26], #0x4\n"
- "ld1 { v3.s }[0], [x25], #0x4\n"
- "ld1 { v4.s }[0], [x24], #0x4\n"
- "ld1 { v25.s }[0], [x23], #0x4\n"
- "ld1 { v9.s }[0], [x22], #0x4\n"
+ "ld1 { v24.s }[0], [x9], #0x4\n"
+ "ld1 { v21.s }[0], [x28], #0x4\n"
+ "ld1 { v16.s }[0], [x27], #0x4\n"
+ "ld1 { v20.s }[0], [x26], #0x4\n"
+ "ld1 { v7.s }[0], [x25], #0x4\n"
+ "ld1 { v19.s }[0], [x24], #0x4\n"
+ "ld1 { v28.s }[0], [x23], #0x4\n"
+ "ld1 { v26.s }[0], [x22], #0x4\n"
"ld1 { v29.s }[0], [x21], #0x4\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
"tbz x1, #1, 8f\n"
- "ld1 { v31.h }[2], [x9], #0x2\n"
- "ld1 { v17.h }[2], [x28], #0x2\n"
- "ld1 { v30.h }[2], [x27], #0x2\n"
- "ld1 { v16.h }[2], [x26], #0x2\n"
- "ld1 { v3.h }[2], [x25], #0x2\n"
- "ld1 { v4.h }[2], [x24], #0x2\n"
- "ld1 { v25.h }[2], [x23], #0x2\n"
- "ld1 { v9.h }[2], [x22], #0x2\n"
+ "ld1 { v24.h }[2], [x9], #0x2\n"
+ "ld1 { v21.h }[2], [x28], #0x2\n"
+ "ld1 { v16.h }[2], [x27], #0x2\n"
+ "ld1 { v20.h }[2], [x26], #0x2\n"
+ "ld1 { v7.h }[2], [x25], #0x2\n"
+ "ld1 { v19.h }[2], [x24], #0x2\n"
+ "ld1 { v28.h }[2], [x23], #0x2\n"
+ "ld1 { v26.h }[2], [x22], #0x2\n"
"ld1 { v29.h }[2], [x21], #0x2\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[6], [x9]\n"
- "ld1 { v17.b }[6], [x28]\n"
- "ld1 { v30.b }[6], [x27]\n"
- "ld1 { v16.b }[6], [x26]\n"
- "ld1 { v3.b }[6], [x25]\n"
- "ld1 { v4.b }[6], [x24]\n"
- "ld1 { v25.b }[6], [x23]\n"
- "ld1 { v9.b }[6], [x22]\n"
+ "ld1 { v24.b }[6], [x9]\n"
+ "ld1 { v21.b }[6], [x28]\n"
+ "ld1 { v16.b }[6], [x27]\n"
+ "ld1 { v20.b }[6], [x26]\n"
+ "ld1 { v7.b }[6], [x25]\n"
+ "ld1 { v19.b }[6], [x24]\n"
+ "ld1 { v28.b }[6], [x23]\n"
+ "ld1 { v26.b }[6], [x22]\n"
"ld1 { v29.b }[6], [x21]\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 11f\n"
"8:" // Oddments: Initial loads: Bit 2: Bit 1: Unset
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[4], [x9]\n"
- "ld1 { v17.b }[4], [x28]\n"
- "ld1 { v30.b }[4], [x27]\n"
- "ld1 { v16.b }[4], [x26]\n"
- "ld1 { v3.b }[4], [x25]\n"
- "ld1 { v4.b }[4], [x24]\n"
- "ld1 { v25.b }[4], [x23]\n"
- "ld1 { v9.b }[4], [x22]\n"
+ "ld1 { v24.b }[4], [x9]\n"
+ "ld1 { v21.b }[4], [x28]\n"
+ "ld1 { v16.b }[4], [x27]\n"
+ "ld1 { v20.b }[4], [x26]\n"
+ "ld1 { v7.b }[4], [x25]\n"
+ "ld1 { v19.b }[4], [x24]\n"
+ "ld1 { v28.b }[4], [x23]\n"
+ "ld1 { v26.b }[4], [x22]\n"
"ld1 { v29.b }[4], [x21]\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 11f\n"
"9:" // Oddments: Initial loads: Bit 2: Unset
"tbz x1, #1, 10f\n"
- "ld1 { v31.h }[0], [x9], #0x2\n"
- "ld1 { v17.h }[0], [x28], #0x2\n"
- "ld1 { v30.h }[0], [x27], #0x2\n"
- "ld1 { v16.h }[0], [x26], #0x2\n"
- "ld1 { v3.h }[0], [x25], #0x2\n"
- "ld1 { v4.h }[0], [x24], #0x2\n"
- "ld1 { v25.h }[0], [x23], #0x2\n"
- "ld1 { v9.h }[0], [x22], #0x2\n"
+ "ld1 { v24.h }[0], [x9], #0x2\n"
+ "ld1 { v21.h }[0], [x28], #0x2\n"
+ "ld1 { v16.h }[0], [x27], #0x2\n"
+ "ld1 { v20.h }[0], [x26], #0x2\n"
+ "ld1 { v7.h }[0], [x25], #0x2\n"
+ "ld1 { v19.h }[0], [x24], #0x2\n"
+ "ld1 { v28.h }[0], [x23], #0x2\n"
+ "ld1 { v26.h }[0], [x22], #0x2\n"
"ld1 { v29.h }[0], [x21], #0x2\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[2], [x9]\n"
- "ld1 { v17.b }[2], [x28]\n"
- "ld1 { v30.b }[2], [x27]\n"
- "ld1 { v16.b }[2], [x26]\n"
- "ld1 { v3.b }[2], [x25]\n"
- "ld1 { v4.b }[2], [x24]\n"
- "ld1 { v25.b }[2], [x23]\n"
- "ld1 { v9.b }[2], [x22]\n"
+ "ld1 { v24.b }[2], [x9]\n"
+ "ld1 { v21.b }[2], [x28]\n"
+ "ld1 { v16.b }[2], [x27]\n"
+ "ld1 { v20.b }[2], [x26]\n"
+ "ld1 { v7.b }[2], [x25]\n"
+ "ld1 { v19.b }[2], [x24]\n"
+ "ld1 { v28.b }[2], [x23]\n"
+ "ld1 { v26.b }[2], [x22]\n"
"ld1 { v29.b }[2], [x21]\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 11f\n"
"10:" // Oddments: Initial loads: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 11f\n"
- "ld1 { v31.b }[0], [x9]\n"
- "ld1 { v17.b }[0], [x28]\n"
- "ld1 { v30.b }[0], [x27]\n"
- "ld1 { v16.b }[0], [x26]\n"
- "ld1 { v3.b }[0], [x25]\n"
- "ld1 { v4.b }[0], [x24]\n"
- "ld1 { v25.b }[0], [x23]\n"
- "ld1 { v9.b }[0], [x22]\n"
+ "ld1 { v24.b }[0], [x9]\n"
+ "ld1 { v21.b }[0], [x28]\n"
+ "ld1 { v16.b }[0], [x27]\n"
+ "ld1 { v20.b }[0], [x26]\n"
+ "ld1 { v7.b }[0], [x25]\n"
+ "ld1 { v19.b }[0], [x24]\n"
+ "ld1 { v28.b }[0], [x23]\n"
+ "ld1 { v26.b }[0], [x22]\n"
"ld1 { v29.b }[0], [x21]\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"11:" // Oddments: Initial loads: Bit 2: End
- "usubl v31.8h, v31.8b, v18.8b\n"
- "usubl v17.8h, v17.8b, v18.8b\n"
- "smlal v7.4s, v31.4h, v6.4h\n"
- "ldr x20, [x5, #0x50]\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "smlal2 v15.4s, v31.8h, v6.8h\n"
- "smlal v20.4s, v17.4h, v6.4h\n"
- "smlal2 v5.4s, v17.8h, v6.8h\n"
- "smlal v24.4s, v30.4h, v6.4h\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "add x20, x20, x3\n"
- "smlal2 v22.4s, v30.8h, v6.8h\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "smlal v23.4s, v16.4h, v6.4h\n"
- "smlal2 v19.4s, v16.8h, v6.8h\n"
- "smlal v7.4s, v17.4h, v14.4h\n"
- "usubl v4.8h, v4.8b, v18.8b\n"
- "smlal2 v15.4s, v17.8h, v14.8h\n"
- "smlal v20.4s, v3.4h, v14.4h\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "smlal2 v5.4s, v3.8h, v14.8h\n"
- "smlal v24.4s, v16.4h, v14.4h\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal2 v22.4s, v16.8h, v14.8h\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "smlal v23.4s, v4.4h, v14.4h\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "smlal2 v19.4s, v4.8h, v14.8h\n"
- "smlal v7.4s, v3.4h, v10.4h\n"
- "smlal2 v15.4s, v3.8h, v10.8h\n"
- "smlal v20.4s, v25.4h, v10.4h\n"
- "smlal2 v5.4s, v25.8h, v10.8h\n"
- "smlal v24.4s, v4.4h, v10.4h\n"
- "smlal2 v22.4s, v4.8h, v10.8h\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0x50]\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "smlal v8.4s, v24.4h, v31.4h\n"
+ "smlal2 v0.4s, v24.8h, v31.8h\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "add x20, x20, x2\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal v1.4s, v16.4h, v31.4h\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "smlal2 v25.4s, v16.8h, v31.8h\n"
+ "smlal v5.4s, v20.4h, v31.4h\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "smlal2 v30.4s, v20.8h, v31.8h\n"
+ "smlal v8.4s, v21.4h, v12.4h\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
+ "smlal2 v0.4s, v21.8h, v12.8h\n"
+ "smlal v27.4s, v7.4h, v12.4h\n"
+ "smlal2 v6.4s, v7.8h, v12.8h\n"
+ "smlal v1.4s, v20.4h, v12.4h\n"
+ "smlal2 v25.4s, v20.8h, v12.8h\n"
+ "smlal v5.4s, v19.4h, v12.4h\n"
+ "smlal2 v30.4s, v19.8h, v12.8h\n"
+ "smlal v8.4s, v7.4h, v17.4h\n"
+ "smlal2 v0.4s, v7.8h, v17.8h\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 13f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v4.s }[0], [x20], #0x4\n"
"tbz x1, #1, 12f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v4.h }[2], [x20], #0x2\n"
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v4.b }[6], [x20]\n"
"b 15f\n"
"12:" // Oddments: Load (1, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v4.b }[4], [x20]\n"
"b 15f\n"
"13:" // Oddments: Load (1, 3): Bit 2: Unset
"tbz x1, #1, 14f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v4.h }[0], [x20], #0x2\n"
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v4.b }[2], [x20]\n"
"b 15f\n"
"14:" // Oddments: Load (1, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 15f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v4.b }[0], [x20]\n"
"15:" // Oddments: Load (1, 3): Bit 2: End
- "usubl v27.8h, v27.8b, v18.8b\n"
- "ldr x20, [x5, #0x58]\n"
- "smlal v23.4s, v27.4h, v10.4h\n"
- "smlal2 v19.4s, v27.8h, v10.8h\n"
- "smlal v7.4s, v25.4h, v21.4h\n"
- "smlal2 v15.4s, v25.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v20.4s, v9.4h, v21.4h\n"
- "smlal2 v5.4s, v9.8h, v21.8h\n"
- "smlal v24.4s, v27.4h, v21.4h\n"
- "smlal2 v22.4s, v27.8h, v21.8h\n"
+ "usubl v4.8h, v4.8b, v15.8b\n"
+ "ldr x20, [x4, #0x58]\n"
+ "smlal v8.4s, v28.4h, v11.4h\n"
+ "smlal2 v0.4s, v28.8h, v11.8h\n"
+ "smlal v27.4s, v26.4h, v11.4h\n"
+ "smlal2 v6.4s, v26.8h, v11.8h\n"
+ "smlal v5.4s, v4.4h, v17.4h\n"
+ "smlal2 v30.4s, v4.8h, v17.8h\n"
+ "smlal v1.4s, v4.4h, v11.4h\n"
+ "smlal2 v25.4s, v4.8h, v11.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 17f\n"
- "ld1 { v6.s }[0], [x20], #0x4\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
"tbz x1, #1, 16f\n"
- "ld1 { v6.h }[2], [x20], #0x2\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[6], [x20]\n"
+ "ld1 { v21.b }[6], [x20]\n"
"b 19f\n"
"16:" // Oddments: Load (1, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[4], [x20]\n"
+ "ld1 { v21.b }[4], [x20]\n"
"b 19f\n"
"17:" // Oddments: Load (1, 4): Bit 2: Unset
"tbz x1, #1, 18f\n"
- "ld1 { v6.h }[0], [x20], #0x2\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x20]\n"
"b 19f\n"
"18:" // Oddments: Load (1, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 19f\n"
- "ld1 { v6.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x20]\n"
"19:" // Oddments: Load (1, 4): Bit 2: End
- "usubl v6.8h, v6.8b, v18.8b\n"
- "ldr x20, [x5, #0x60]\n"
- "smlal v23.4s, v6.4h, v21.4h\n"
- "smlal2 v19.4s, v6.8h, v21.8h\n"
- "smlal v7.4s, v9.4h, v12.4h\n"
- "smlal2 v15.4s, v9.8h, v12.8h\n"
- "add x20, x20, x3\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0x60]\n"
+ "smlal v8.4s, v26.4h, v23.4h\n"
+ "smlal2 v0.4s, v26.8h, v23.8h\n"
+ "smlal v5.4s, v21.4h, v11.4h\n"
+ "smlal2 v30.4s, v21.8h, v11.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 21f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
"tbz x1, #1, 20f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 23f\n"
"20:" // Oddments: Load (0, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 23f\n"
"21:" // Oddments: Load (0, 5): Bit 2: Unset
"tbz x1, #1, 22f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 23f\n"
"22:" // Oddments: Load (0, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 23f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v31.b }[0], [x20]\n"
"23:" // Oddments: Load (0, 5): Bit 2: End
- "ldr d14, [x6, #0x28]\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal v20.4s, v9.4h, v12.4h\n"
- "smlal2 v5.4s, v9.8h, v12.8h\n"
- "smlal v24.4s, v6.4h, v12.4h\n"
- "smlal2 v22.4s, v6.8h, v12.8h\n"
- "ssubl v14.8h, v14.8b, v13.8b\n"
- "ldr x20, [x5, #0x68]\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v30.4h, v14.4h\n"
- "smlal2 v15.4s, v30.8h, v14.8h\n"
- "smlal v20.4s, v16.4h, v14.4h\n"
- "smlal2 v5.4s, v16.8h, v14.8h\n"
- "smlal v24.4s, v28.4h, v14.4h\n"
- "smlal2 v22.4s, v28.8h, v14.8h\n"
+ "ldr d11, [x5, #0x28]\n"
+ "usubl v31.8h, v31.8b, v15.8b\n"
+ "smlal v1.4s, v21.4h, v23.4h\n"
+ "smlal2 v25.4s, v21.8h, v23.8h\n"
+ "ldr x20, [x4, #0x68]\n"
+ "smlal v5.4s, v29.4h, v23.4h\n"
+ "smlal2 v30.4s, v29.8h, v23.8h\n"
+ "smlal v27.4s, v31.4h, v23.4h\n"
+ "smlal2 v6.4s, v31.8h, v23.8h\n"
+ "ssubl v11.8h, v11.8b, v9.8b\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v16.4h, v11.4h\n"
+ "smlal2 v0.4s, v16.8h, v11.8h\n"
+ "smlal v1.4s, v18.4h, v11.4h\n"
+ "smlal2 v25.4s, v18.8h, v11.8h\n"
+ "smlal v27.4s, v20.4h, v11.4h\n"
+ "smlal2 v6.4s, v20.8h, v11.8h\n"
"tbz x1, #2, 25f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v24.s }[0], [x20], #0x4\n"
"tbz x1, #1, 24f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v24.h }[2], [x20], #0x2\n"
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v24.b }[6], [x20]\n"
"b 27f\n"
"24:" // Oddments: Load (2, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v24.b }[4], [x20]\n"
"b 27f\n"
"25:" // Oddments: Load (2, 1): Bit 2: Unset
"tbz x1, #1, 26f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v24.h }[0], [x20], #0x2\n"
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v24.b }[2], [x20]\n"
"b 27f\n"
"26:" // Oddments: Load (2, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 27f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v24.b }[0], [x20]\n"
"27:" // Oddments: Load (2, 1): Bit 2: End
- "ldr d21, [x6, #0x30]\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0x70]\n"
- "smlal v23.4s, v25.4h, v14.4h\n"
- "smlal2 v19.4s, v25.8h, v14.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v16.4h, v21.4h\n"
- "smlal2 v15.4s, v16.8h, v21.8h\n"
- "smlal v20.4s, v4.4h, v21.4h\n"
- "smlal2 v5.4s, v4.8h, v21.8h\n"
- "smlal v24.4s, v25.4h, v21.4h\n"
- "smlal2 v22.4s, v25.8h, v21.8h\n"
+ "ldr d3, [x5, #0x30]\n"
+ "usubl v24.8h, v24.8b, v15.8b\n"
+ "ldr x20, [x4, #0x70]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v24.4h, v11.4h\n"
+ "smlal2 v30.4s, v24.8h, v11.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v3.4h\n"
+ "smlal2 v0.4s, v20.8h, v3.8h\n"
+ "smlal v27.4s, v19.4h, v3.4h\n"
+ "smlal2 v6.4s, v19.8h, v3.8h\n"
+ "smlal v1.4s, v24.4h, v3.4h\n"
+ "smlal2 v25.4s, v24.8h, v3.8h\n"
"tbz x1, #2, 29f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v2.s }[0], [x20], #0x4\n"
"tbz x1, #1, 28f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ld1 { v2.b }[6], [x20]\n"
"b 31f\n"
"28:" // Oddments: Load (2, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "ld1 { v2.b }[4], [x20]\n"
"b 31f\n"
"29:" // Oddments: Load (2, 2): Bit 2: Unset
"tbz x1, #1, 30f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v2.h }[0], [x20], #0x2\n"
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "ld1 { v2.b }[2], [x20]\n"
"b 31f\n"
"30:" // Oddments: Load (2, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 31f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "ld1 { v2.b }[0], [x20]\n"
"31:" // Oddments: Load (2, 2): Bit 2: End
- "ldr d9, [x6, #0x38]\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "ssubl v9.8h, v9.8b, v13.8b\n"
- "ldr x20, [x5, #0x78]\n"
- "smlal v23.4s, v10.4h, v21.4h\n"
- "smlal2 v19.4s, v10.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v4.4h, v9.4h\n"
- "smlal2 v15.4s, v4.8h, v9.8h\n"
- "smlal v20.4s, v27.4h, v9.4h\n"
- "smlal2 v5.4s, v27.8h, v9.8h\n"
- "smlal v24.4s, v10.4h, v9.4h\n"
- "smlal2 v22.4s, v10.8h, v9.8h\n"
+ "ldr d22, [x5, #0x38]\n"
+ "usubl v2.8h, v2.8b, v15.8b\n"
+ "ldr x20, [x4, #0x78]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v2.4h, v3.4h\n"
+ "smlal2 v30.4s, v2.8h, v3.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v19.4h, v22.4h\n"
+ "smlal2 v0.4s, v19.8h, v22.8h\n"
+ "smlal v27.4s, v4.4h, v22.4h\n"
+ "smlal2 v6.4s, v4.8h, v22.8h\n"
+ "smlal v1.4s, v2.4h, v22.4h\n"
+ "smlal2 v25.4s, v2.8h, v22.8h\n"
"tbz x1, #2, 33f\n"
- "ld1 { v12.s }[0], [x20], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
"tbz x1, #1, 32f\n"
- "ld1 { v12.h }[2], [x20], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[6], [x20]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 35f\n"
"32:" // Oddments: Load (2, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[4], [x20]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 35f\n"
"33:" // Oddments: Load (2, 3): Bit 2: Unset
"tbz x1, #1, 34f\n"
- "ld1 { v12.h }[0], [x20], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[2], [x20]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 35f\n"
"34:" // Oddments: Load (2, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 35f\n"
- "ld1 { v12.b }[0], [x20]\n"
+ "ld1 { v26.b }[0], [x20]\n"
"35:" // Oddments: Load (2, 3): Bit 2: End
- "ldr d31, [x6, #0x40]\n"
- "usubl v12.8h, v12.8b, v18.8b\n"
- "ssubl v31.8h, v31.8b, v13.8b\n"
- "ldr x20, [x5, #0x80]\n"
- "smlal v23.4s, v12.4h, v9.4h\n"
- "smlal2 v19.4s, v12.8h, v9.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v27.4h, v31.4h\n"
- "smlal2 v15.4s, v27.8h, v31.8h\n"
- "smlal v20.4s, v6.4h, v31.4h\n"
- "smlal2 v5.4s, v6.8h, v31.8h\n"
- "smlal v24.4s, v12.4h, v31.4h\n"
- "smlal2 v22.4s, v12.8h, v31.8h\n"
+ "ldr d31, [x5, #0x40]\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "ldr x20, [x4, #0x80]\n"
+ "ssubl v31.8h, v31.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v22.4h\n"
+ "smlal2 v30.4s, v26.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v4.4h, v31.4h\n"
+ "smlal2 v0.4s, v4.8h, v31.8h\n"
+ "smlal v27.4s, v21.4h, v31.4h\n"
+ "smlal2 v6.4s, v21.8h, v31.8h\n"
+ "smlal v1.4s, v26.4h, v31.4h\n"
+ "smlal2 v25.4s, v26.8h, v31.8h\n"
"tbz x1, #2, 37f\n"
- "ld1 { v8.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
"tbz x1, #1, 36f\n"
- "ld1 { v8.h }[2], [x20], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[6], [x20]\n"
+ "ld1 { v28.b }[6], [x20]\n"
"b 39f\n"
"36:" // Oddments: Load (2, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[4], [x20]\n"
+ "ld1 { v28.b }[4], [x20]\n"
"b 39f\n"
"37:" // Oddments: Load (2, 4): Bit 2: Unset
"tbz x1, #1, 38f\n"
- "ld1 { v8.h }[0], [x20], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[2], [x20]\n"
+ "ld1 { v28.b }[2], [x20]\n"
"b 39f\n"
"38:" // Oddments: Load (2, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 39f\n"
- "ld1 { v8.b }[0], [x20]\n"
+ "ld1 { v28.b }[0], [x20]\n"
"39:" // Oddments: Load (2, 4): Bit 2: End
- "ldr d16, [x6, #0x48]\n"
- "usubl v8.8h, v8.8b, v18.8b\n"
- "ssubl v16.8h, v16.8b, v13.8b\n"
- "ldr x20, [x5, #0x88]\n"
- "smlal v23.4s, v8.4h, v31.4h\n"
- "smlal2 v19.4s, v8.8h, v31.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v6.4h, v16.4h\n"
- "smlal2 v15.4s, v6.8h, v16.8h\n"
- "smlal v20.4s, v29.4h, v16.4h\n"
- "smlal2 v5.4s, v29.8h, v16.8h\n"
- "smlal v24.4s, v8.4h, v16.4h\n"
- "smlal2 v22.4s, v8.8h, v16.8h\n"
+ "ldr d17, [x5, #0x48]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x20, [x4, #0x88]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v31.4h\n"
+ "smlal2 v30.4s, v28.8h, v31.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v21.4h, v17.4h\n"
+ "smlal2 v0.4s, v21.8h, v17.8h\n"
+ "smlal v27.4s, v29.4h, v17.4h\n"
+ "smlal2 v6.4s, v29.8h, v17.8h\n"
+ "smlal v1.4s, v28.4h, v17.4h\n"
+ "smlal2 v25.4s, v28.8h, v17.8h\n"
"tbz x1, #2, 41f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v7.s }[0], [x20], #0x4\n"
"tbz x1, #1, 40f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v7.h }[2], [x20], #0x2\n"
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v7.b }[6], [x20]\n"
"b 43f\n"
"40:" // Oddments: Load (2, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v7.b }[4], [x20]\n"
"b 43f\n"
"41:" // Oddments: Load (2, 5): Bit 2: Unset
"tbz x1, #1, 42f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v7.h }[0], [x20], #0x2\n"
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v7.b }[2], [x20]\n"
"b 43f\n"
"42:" // Oddments: Load (2, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 43f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v7.b }[0], [x20]\n"
"43:" // Oddments: Load (2, 5): Bit 2: End
- "ldr d21, [x6, #0x50]\n"
- "usubl v27.8h, v27.8b, v18.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0x90]\n"
- "smlal v23.4s, v27.4h, v16.4h\n"
- "smlal2 v19.4s, v27.8h, v16.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v28.4h, v21.4h\n"
- "smlal2 v15.4s, v28.8h, v21.8h\n"
- "smlal v20.4s, v25.4h, v21.4h\n"
- "smlal2 v5.4s, v25.8h, v21.8h\n"
+ "ldr d22, [x5, #0x50]\n"
+ "usubl v7.8h, v7.8b, v15.8b\n"
+ "ldr x20, [x4, #0x90]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v7.4h, v17.4h\n"
+ "smlal2 v30.4s, v7.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v18.4h, v22.4h\n"
+ "smlal2 v0.4s, v18.8h, v22.8h\n"
+ "smlal v27.4s, v24.4h, v22.4h\n"
+ "smlal2 v6.4s, v24.8h, v22.8h\n"
"tbz x1, #2, 45f\n"
- "ld1 { v31.s }[0], [x20], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
"tbz x1, #1, 44f\n"
- "ld1 { v31.h }[2], [x20], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[6], [x20]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 47f\n"
"44:" // Oddments: Load (3, 0): Bit 2: Bit 1: Unset
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[4], [x20]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 47f\n"
"45:" // Oddments: Load (3, 0): Bit 2: Unset
"tbz x1, #1, 46f\n"
- "ld1 { v31.h }[0], [x20], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[2], [x20]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 47f\n"
"46:" // Oddments: Load (3, 0): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 47f\n"
- "ld1 { v31.b }[0], [x20]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"47:" // Oddments: Load (3, 0): Bit 2: End
- "usubl v31.8h, v31.8b, v18.8b\n"
- "ldr x20, [x5, #0x98]\n"
- "smlal v24.4s, v31.4h, v21.4h\n"
- "smlal2 v22.4s, v31.8h, v21.8h\n"
- "add x20, x20, x3\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x20, [x4, #0x98]\n"
+ "smlal v1.4s, v20.4h, v22.4h\n"
+ "smlal2 v25.4s, v20.8h, v22.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 49f\n"
- "ld1 { v28.s }[0], [x20], #0x4\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
"tbz x1, #1, 48f\n"
- "ld1 { v28.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 51f\n"
"48:" // Oddments: Load (3, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 51f\n"
"49:" // Oddments: Load (3, 1): Bit 2: Unset
"tbz x1, #1, 50f\n"
- "ld1 { v28.h }[0], [x20], #0x2\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 51f\n"
"50:" // Oddments: Load (3, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 51f\n"
- "ld1 { v28.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"51:" // Oddments: Load (3, 1): Bit 2: End
- "ldr d2, [x6, #0x58]\n"
- "usubl v28.8h, v28.8b, v18.8b\n"
- "ssubl v2.8h, v2.8b, v13.8b\n"
- "ldr x20, [x5, #0xa0]\n"
- "smlal v23.4s, v28.4h, v21.4h\n"
- "smlal2 v19.4s, v28.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v25.4h, v2.4h\n"
- "smlal2 v15.4s, v25.8h, v2.8h\n"
- "smlal v20.4s, v10.4h, v2.4h\n"
- "smlal2 v5.4s, v10.8h, v2.8h\n"
- "smlal v24.4s, v28.4h, v2.4h\n"
- "smlal2 v22.4s, v28.8h, v2.8h\n"
+ "ldr d17, [x5, #0x58]\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "ldr x20, [x4, #0xa0]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v22.4h\n"
+ "smlal2 v30.4s, v19.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v24.4h, v17.4h\n"
+ "smlal2 v0.4s, v24.8h, v17.8h\n"
+ "smlal v27.4s, v2.4h, v17.4h\n"
+ "smlal2 v6.4s, v2.8h, v17.8h\n"
+ "smlal v1.4s, v19.4h, v17.4h\n"
+ "smlal2 v25.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 53f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
+ "ld1 { v29.s }[0], [x20], #0x4\n"
"tbz x1, #1, 52f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
+ "ld1 { v29.h }[2], [x20], #0x2\n"
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ld1 { v29.b }[6], [x20]\n"
"b 55f\n"
"52:" // Oddments: Load (3, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "ld1 { v29.b }[4], [x20]\n"
"b 55f\n"
"53:" // Oddments: Load (3, 2): Bit 2: Unset
"tbz x1, #1, 54f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
+ "ld1 { v29.h }[0], [x20], #0x2\n"
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v29.b }[2], [x20]\n"
"b 55f\n"
"54:" // Oddments: Load (3, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 55f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v29.b }[0], [x20]\n"
"55:" // Oddments: Load (3, 2): Bit 2: End
- "ldr d25, [x6, #0x60]\n"
- "usubl v21.8h, v21.8b, v18.8b\n"
- "ssubl v25.8h, v25.8b, v13.8b\n"
- "ldr x20, [x5, #0xa8]\n"
- "smlal v23.4s, v21.4h, v2.4h\n"
- "smlal2 v19.4s, v21.8h, v2.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v10.4h, v25.4h\n"
- "smlal2 v15.4s, v10.8h, v25.8h\n"
- "smlal v20.4s, v12.4h, v25.4h\n"
- "smlal2 v5.4s, v12.8h, v25.8h\n"
- "smlal v24.4s, v21.4h, v25.4h\n"
- "smlal2 v22.4s, v21.8h, v25.8h\n"
+ "ldr d24, [x5, #0x60]\n"
+ "usubl v29.8h, v29.8b, v15.8b\n"
+ "ldr x20, [x4, #0xa8]\n"
+ "ssubl v24.8h, v24.8b, v9.8b\n"
+ "smlal v5.4s, v29.4h, v17.4h\n"
+ "smlal2 v30.4s, v29.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v2.4h, v24.4h\n"
+ "smlal2 v0.4s, v2.8h, v24.8h\n"
+ "smlal v27.4s, v26.4h, v24.4h\n"
+ "smlal2 v6.4s, v26.8h, v24.8h\n"
+ "smlal v1.4s, v29.4h, v24.4h\n"
+ "smlal2 v25.4s, v29.8h, v24.8h\n"
"tbz x1, #2, 57f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v31.s }[0], [x20], #0x4\n"
"tbz x1, #1, 56f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v31.h }[2], [x20], #0x2\n"
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v31.b }[6], [x20]\n"
"b 59f\n"
"56:" // Oddments: Load (3, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v31.b }[4], [x20]\n"
"b 59f\n"
"57:" // Oddments: Load (3, 3): Bit 2: Unset
"tbz x1, #1, 58f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v31.h }[0], [x20], #0x2\n"
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v31.b }[2], [x20]\n"
"b 59f\n"
"58:" // Oddments: Load (3, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 59f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v31.b }[0], [x20]\n"
"59:" // Oddments: Load (3, 3): Bit 2: End
- "ldr d1, [x6, #0x68]\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "ssubl v1.8h, v1.8b, v13.8b\n"
- "ldr x20, [x5, #0xb0]\n"
- "smlal v23.4s, v9.4h, v25.4h\n"
- "smlal2 v19.4s, v9.8h, v25.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v12.4h, v1.4h\n"
- "smlal2 v15.4s, v12.8h, v1.8h\n"
- "smlal v20.4s, v8.4h, v1.4h\n"
- "smlal2 v5.4s, v8.8h, v1.8h\n"
- "smlal v24.4s, v9.4h, v1.4h\n"
- "smlal2 v22.4s, v9.8h, v1.8h\n"
+ "ldr d17, [x5, #0x68]\n"
+ "usubl v31.8h, v31.8b, v15.8b\n"
+ "ldr x20, [x4, #0xb0]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v31.4h, v24.4h\n"
+ "smlal2 v30.4s, v31.8h, v24.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v26.4h, v17.4h\n"
+ "smlal2 v0.4s, v26.8h, v17.8h\n"
+ "smlal v27.4s, v28.4h, v17.4h\n"
+ "smlal2 v6.4s, v28.8h, v17.8h\n"
+ "smlal v1.4s, v31.4h, v17.4h\n"
+ "smlal2 v25.4s, v31.8h, v17.8h\n"
"tbz x1, #2, 61f\n"
- "ld1 { v3.s }[0], [x20], #0x4\n"
+ "ld1 { v21.s }[0], [x20], #0x4\n"
"tbz x1, #1, 60f\n"
- "ld1 { v3.h }[2], [x20], #0x2\n"
+ "ld1 { v21.h }[2], [x20], #0x2\n"
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[6], [x20]\n"
+ "ld1 { v21.b }[6], [x20]\n"
"b 63f\n"
"60:" // Oddments: Load (3, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[4], [x20]\n"
+ "ld1 { v21.b }[4], [x20]\n"
"b 63f\n"
"61:" // Oddments: Load (3, 4): Bit 2: Unset
"tbz x1, #1, 62f\n"
- "ld1 { v3.h }[0], [x20], #0x2\n"
+ "ld1 { v21.h }[0], [x20], #0x2\n"
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[2], [x20]\n"
+ "ld1 { v21.b }[2], [x20]\n"
"b 63f\n"
"62:" // Oddments: Load (3, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 63f\n"
- "ld1 { v3.b }[0], [x20]\n"
+ "ld1 { v21.b }[0], [x20]\n"
"63:" // Oddments: Load (3, 4): Bit 2: End
- "ldr d16, [x6, #0x70]\n"
- "usubl v3.8h, v3.8b, v18.8b\n"
- "ssubl v16.8h, v16.8b, v13.8b\n"
- "ldr x20, [x5, #0xb8]\n"
- "smlal v23.4s, v3.4h, v1.4h\n"
- "smlal2 v19.4s, v3.8h, v1.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v8.4h, v16.4h\n"
- "smlal2 v15.4s, v8.8h, v16.8h\n"
- "smlal v20.4s, v27.4h, v16.4h\n"
- "smlal2 v5.4s, v27.8h, v16.8h\n"
- "smlal v24.4s, v3.4h, v16.4h\n"
- "smlal2 v22.4s, v3.8h, v16.8h\n"
+ "ldr d22, [x5, #0x70]\n"
+ "usubl v21.8h, v21.8b, v15.8b\n"
+ "ldr x20, [x4, #0xb8]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v21.4h, v17.4h\n"
+ "smlal2 v30.4s, v21.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v28.4h, v22.4h\n"
+ "smlal2 v0.4s, v28.8h, v22.8h\n"
+ "smlal v27.4s, v7.4h, v22.4h\n"
+ "smlal2 v6.4s, v7.8h, v22.8h\n"
+ "smlal v1.4s, v21.4h, v22.4h\n"
+ "smlal2 v25.4s, v21.8h, v22.8h\n"
"tbz x1, #2, 65f\n"
- "ld1 { v14.s }[0], [x20], #0x4\n"
+ "ld1 { v11.s }[0], [x20], #0x4\n"
"tbz x1, #1, 64f\n"
- "ld1 { v14.h }[2], [x20], #0x2\n"
+ "ld1 { v11.h }[2], [x20], #0x2\n"
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[6], [x20]\n"
+ "ld1 { v11.b }[6], [x20]\n"
"b 67f\n"
"64:" // Oddments: Load (3, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[4], [x20]\n"
+ "ld1 { v11.b }[4], [x20]\n"
"b 67f\n"
"65:" // Oddments: Load (3, 5): Bit 2: Unset
"tbz x1, #1, 66f\n"
- "ld1 { v14.h }[0], [x20], #0x2\n"
+ "ld1 { v11.h }[0], [x20], #0x2\n"
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[2], [x20]\n"
+ "ld1 { v11.b }[2], [x20]\n"
"b 67f\n"
"66:" // Oddments: Load (3, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 67f\n"
- "ld1 { v14.b }[0], [x20]\n"
+ "ld1 { v11.b }[0], [x20]\n"
"67:" // Oddments: Load (3, 5): Bit 2: End
- "ldr d17, [x6, #0x78]\n"
- "usubl v14.8h, v14.8b, v18.8b\n"
- "ssubl v17.8h, v17.8b, v13.8b\n"
- "ldr x20, [x5, #0xc0]\n"
- "smlal v23.4s, v14.4h, v16.4h\n"
- "smlal2 v19.4s, v14.8h, v16.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v31.4h, v17.4h\n"
- "smlal2 v15.4s, v31.8h, v17.8h\n"
- "smlal v20.4s, v28.4h, v17.4h\n"
- "smlal2 v5.4s, v28.8h, v17.8h\n"
+ "ldr d17, [x5, #0x78]\n"
+ "usubl v11.8h, v11.8b, v15.8b\n"
+ "ldr x20, [x4, #0xc0]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v11.4h, v22.4h\n"
+ "smlal2 v30.4s, v11.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v17.4h\n"
+ "smlal2 v0.4s, v20.8h, v17.8h\n"
+ "smlal v27.4s, v19.4h, v17.4h\n"
+ "smlal2 v6.4s, v19.8h, v17.8h\n"
"tbz x1, #2, 69f\n"
- "ld1 { v1.s }[0], [x20], #0x4\n"
+ "ld1 { v18.s }[0], [x20], #0x4\n"
"tbz x1, #1, 68f\n"
- "ld1 { v1.h }[2], [x20], #0x2\n"
+ "ld1 { v18.h }[2], [x20], #0x2\n"
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[6], [x20]\n"
+ "ld1 { v18.b }[6], [x20]\n"
"b 71f\n"
"68:" // Oddments: Load (4, 0): Bit 2: Bit 1: Unset
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[4], [x20]\n"
+ "ld1 { v18.b }[4], [x20]\n"
"b 71f\n"
"69:" // Oddments: Load (4, 0): Bit 2: Unset
"tbz x1, #1, 70f\n"
- "ld1 { v1.h }[0], [x20], #0x2\n"
+ "ld1 { v18.h }[0], [x20], #0x2\n"
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[2], [x20]\n"
+ "ld1 { v18.b }[2], [x20]\n"
"b 71f\n"
"70:" // Oddments: Load (4, 0): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 71f\n"
- "ld1 { v1.b }[0], [x20]\n"
+ "ld1 { v18.b }[0], [x20]\n"
"71:" // Oddments: Load (4, 0): Bit 2: End
- "usubl v1.8h, v1.8b, v18.8b\n"
- "ldr x20, [x5, #0xc8]\n"
- "smlal v24.4s, v1.4h, v17.4h\n"
- "smlal2 v22.4s, v1.8h, v17.8h\n"
- "add x20, x20, x3\n"
+ "usubl v18.8h, v18.8b, v15.8b\n"
+ "ldr x20, [x4, #0xc8]\n"
+ "smlal v1.4s, v18.4h, v17.4h\n"
+ "smlal2 v25.4s, v18.8h, v17.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 73f\n"
- "ld1 { v16.s }[0], [x20], #0x4\n"
+ "ld1 { v20.s }[0], [x20], #0x4\n"
"tbz x1, #1, 72f\n"
- "ld1 { v16.h }[2], [x20], #0x2\n"
+ "ld1 { v20.h }[2], [x20], #0x2\n"
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[6], [x20]\n"
+ "ld1 { v20.b }[6], [x20]\n"
"b 75f\n"
"72:" // Oddments: Load (4, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[4], [x20]\n"
+ "ld1 { v20.b }[4], [x20]\n"
"b 75f\n"
"73:" // Oddments: Load (4, 1): Bit 2: Unset
"tbz x1, #1, 74f\n"
- "ld1 { v16.h }[0], [x20], #0x2\n"
+ "ld1 { v20.h }[0], [x20], #0x2\n"
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[2], [x20]\n"
+ "ld1 { v20.b }[2], [x20]\n"
"b 75f\n"
"74:" // Oddments: Load (4, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 75f\n"
- "ld1 { v16.b }[0], [x20]\n"
+ "ld1 { v20.b }[0], [x20]\n"
"75:" // Oddments: Load (4, 1): Bit 2: End
- "ldr d29, [x6, #0x80]\n"
- "usubl v16.8h, v16.8b, v18.8b\n"
- "ssubl v29.8h, v29.8b, v13.8b\n"
- "ldr x20, [x5, #0xd0]\n"
- "smlal v23.4s, v16.4h, v17.4h\n"
- "smlal2 v19.4s, v16.8h, v17.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v28.4h, v29.4h\n"
- "smlal2 v15.4s, v28.8h, v29.8h\n"
- "smlal v20.4s, v21.4h, v29.4h\n"
- "smlal2 v5.4s, v21.8h, v29.8h\n"
- "smlal v24.4s, v16.4h, v29.4h\n"
- "smlal2 v22.4s, v16.8h, v29.8h\n"
+ "ldr d4, [x5, #0x80]\n"
+ "usubl v20.8h, v20.8b, v15.8b\n"
+ "ldr x20, [x4, #0xd0]\n"
+ "ssubl v4.8h, v4.8b, v9.8b\n"
+ "smlal v5.4s, v20.4h, v17.4h\n"
+ "smlal2 v30.4s, v20.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v19.4h, v4.4h\n"
+ "smlal2 v0.4s, v19.8h, v4.8h\n"
+ "smlal v27.4s, v29.4h, v4.4h\n"
+ "smlal2 v6.4s, v29.8h, v4.8h\n"
+ "smlal v1.4s, v20.4h, v4.4h\n"
+ "smlal2 v25.4s, v20.8h, v4.8h\n"
"tbz x1, #2, 77f\n"
- "ld1 { v30.s }[0], [x20], #0x4\n"
+ "ld1 { v26.s }[0], [x20], #0x4\n"
"tbz x1, #1, 76f\n"
- "ld1 { v30.h }[2], [x20], #0x2\n"
+ "ld1 { v26.h }[2], [x20], #0x2\n"
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[6], [x20]\n"
+ "ld1 { v26.b }[6], [x20]\n"
"b 79f\n"
"76:" // Oddments: Load (4, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[4], [x20]\n"
+ "ld1 { v26.b }[4], [x20]\n"
"b 79f\n"
"77:" // Oddments: Load (4, 2): Bit 2: Unset
"tbz x1, #1, 78f\n"
- "ld1 { v30.h }[0], [x20], #0x2\n"
+ "ld1 { v26.h }[0], [x20], #0x2\n"
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[2], [x20]\n"
+ "ld1 { v26.b }[2], [x20]\n"
"b 79f\n"
"78:" // Oddments: Load (4, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 79f\n"
- "ld1 { v30.b }[0], [x20]\n"
+ "ld1 { v26.b }[0], [x20]\n"
"79:" // Oddments: Load (4, 2): Bit 2: End
- "ldr d12, [x6, #0x88]\n"
- "usubl v30.8h, v30.8b, v18.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "ldr x20, [x5, #0xd8]\n"
- "smlal v23.4s, v30.4h, v29.4h\n"
- "smlal2 v19.4s, v30.8h, v29.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v21.4h, v12.4h\n"
- "smlal2 v15.4s, v21.8h, v12.8h\n"
- "smlal v20.4s, v9.4h, v12.4h\n"
- "smlal2 v5.4s, v9.8h, v12.8h\n"
- "smlal v24.4s, v30.4h, v12.4h\n"
- "smlal2 v22.4s, v30.8h, v12.8h\n"
+ "ldr d17, [x5, #0x88]\n"
+ "usubl v26.8h, v26.8b, v15.8b\n"
+ "ldr x20, [x4, #0xd8]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v26.4h, v4.4h\n"
+ "smlal2 v30.4s, v26.8h, v4.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v29.4h, v17.4h\n"
+ "smlal2 v0.4s, v29.8h, v17.8h\n"
+ "smlal v27.4s, v31.4h, v17.4h\n"
+ "smlal2 v6.4s, v31.8h, v17.8h\n"
+ "smlal v1.4s, v26.4h, v17.4h\n"
+ "smlal2 v25.4s, v26.8h, v17.8h\n"
"tbz x1, #2, 81f\n"
- "ld1 { v29.s }[0], [x20], #0x4\n"
+ "ld1 { v23.s }[0], [x20], #0x4\n"
"tbz x1, #1, 80f\n"
- "ld1 { v29.h }[2], [x20], #0x2\n"
+ "ld1 { v23.h }[2], [x20], #0x2\n"
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[6], [x20]\n"
+ "ld1 { v23.b }[6], [x20]\n"
"b 83f\n"
"80:" // Oddments: Load (4, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[4], [x20]\n"
+ "ld1 { v23.b }[4], [x20]\n"
"b 83f\n"
"81:" // Oddments: Load (4, 3): Bit 2: Unset
"tbz x1, #1, 82f\n"
- "ld1 { v29.h }[0], [x20], #0x2\n"
+ "ld1 { v23.h }[0], [x20], #0x2\n"
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[2], [x20]\n"
+ "ld1 { v23.b }[2], [x20]\n"
"b 83f\n"
"82:" // Oddments: Load (4, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 83f\n"
- "ld1 { v29.b }[0], [x20]\n"
+ "ld1 { v23.b }[0], [x20]\n"
"83:" // Oddments: Load (4, 3): Bit 2: End
- "ldr d21, [x6, #0x90]\n"
- "usubl v29.8h, v29.8b, v18.8b\n"
- "ssubl v21.8h, v21.8b, v13.8b\n"
- "ldr x20, [x5, #0xe0]\n"
- "smlal v23.4s, v29.4h, v12.4h\n"
- "smlal2 v19.4s, v29.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v9.4h, v21.4h\n"
- "smlal2 v15.4s, v9.8h, v21.8h\n"
- "smlal v20.4s, v3.4h, v21.4h\n"
- "smlal2 v5.4s, v3.8h, v21.8h\n"
- "smlal v24.4s, v29.4h, v21.4h\n"
- "smlal2 v22.4s, v29.8h, v21.8h\n"
+ "ldr d22, [x5, #0x90]\n"
+ "usubl v23.8h, v23.8b, v15.8b\n"
+ "ldr x20, [x4, #0xe0]\n"
+ "ssubl v22.8h, v22.8b, v9.8b\n"
+ "smlal v5.4s, v23.4h, v17.4h\n"
+ "smlal2 v30.4s, v23.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v31.4h, v22.4h\n"
+ "smlal2 v0.4s, v31.8h, v22.8h\n"
+ "smlal v27.4s, v21.4h, v22.4h\n"
+ "smlal2 v6.4s, v21.8h, v22.8h\n"
+ "smlal v1.4s, v23.4h, v22.4h\n"
+ "smlal2 v25.4s, v23.8h, v22.8h\n"
"tbz x1, #2, 85f\n"
- "ld1 { v25.s }[0], [x20], #0x4\n"
+ "ld1 { v28.s }[0], [x20], #0x4\n"
"tbz x1, #1, 84f\n"
- "ld1 { v25.h }[2], [x20], #0x2\n"
+ "ld1 { v28.h }[2], [x20], #0x2\n"
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[6], [x20]\n"
+ "ld1 { v28.b }[6], [x20]\n"
"b 87f\n"
"84:" // Oddments: Load (4, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[4], [x20]\n"
+ "ld1 { v28.b }[4], [x20]\n"
"b 87f\n"
"85:" // Oddments: Load (4, 4): Bit 2: Unset
"tbz x1, #1, 86f\n"
- "ld1 { v25.h }[0], [x20], #0x2\n"
+ "ld1 { v28.h }[0], [x20], #0x2\n"
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[2], [x20]\n"
+ "ld1 { v28.b }[2], [x20]\n"
"b 87f\n"
"86:" // Oddments: Load (4, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 87f\n"
- "ld1 { v25.b }[0], [x20]\n"
+ "ld1 { v28.b }[0], [x20]\n"
"87:" // Oddments: Load (4, 4): Bit 2: End
- "ldr d8, [x6, #0x98]\n"
- "usubl v25.8h, v25.8b, v18.8b\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x20, [x5, #0xe8]\n"
- "smlal v23.4s, v25.4h, v21.4h\n"
- "smlal2 v19.4s, v25.8h, v21.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v3.4h, v8.4h\n"
- "smlal2 v15.4s, v3.8h, v8.8h\n"
- "smlal v20.4s, v14.4h, v8.4h\n"
- "smlal2 v5.4s, v14.8h, v8.8h\n"
- "smlal v24.4s, v25.4h, v8.4h\n"
- "smlal2 v22.4s, v25.8h, v8.8h\n"
+ "ldr d17, [x5, #0x98]\n"
+ "usubl v28.8h, v28.8b, v15.8b\n"
+ "ldr x20, [x4, #0xe8]\n"
+ "ssubl v17.8h, v17.8b, v9.8b\n"
+ "smlal v5.4s, v28.4h, v22.4h\n"
+ "smlal2 v30.4s, v28.8h, v22.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v21.4h, v17.4h\n"
+ "smlal2 v0.4s, v21.8h, v17.8h\n"
+ "smlal v27.4s, v11.4h, v17.4h\n"
+ "smlal2 v6.4s, v11.8h, v17.8h\n"
+ "smlal v1.4s, v28.4h, v17.4h\n"
+ "smlal2 v25.4s, v28.8h, v17.8h\n"
"tbz x1, #2, 89f\n"
- "ld1 { v21.s }[0], [x20], #0x4\n"
+ "ld1 { v16.s }[0], [x20], #0x4\n"
"tbz x1, #1, 88f\n"
- "ld1 { v21.h }[2], [x20], #0x2\n"
+ "ld1 { v16.h }[2], [x20], #0x2\n"
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[6], [x20]\n"
+ "ld1 { v16.b }[6], [x20]\n"
"b 91f\n"
"88:" // Oddments: Load (4, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[4], [x20]\n"
+ "ld1 { v16.b }[4], [x20]\n"
"b 91f\n"
"89:" // Oddments: Load (4, 5): Bit 2: Unset
"tbz x1, #1, 90f\n"
- "ld1 { v21.h }[0], [x20], #0x2\n"
+ "ld1 { v16.h }[0], [x20], #0x2\n"
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[2], [x20]\n"
+ "ld1 { v16.b }[2], [x20]\n"
"b 91f\n"
"90:" // Oddments: Load (4, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 91f\n"
- "ld1 { v21.b }[0], [x20]\n"
+ "ld1 { v16.b }[0], [x20]\n"
"91:" // Oddments: Load (4, 5): Bit 2: End
- "ldr d9, [x6, #0xa0]\n"
- "usubl v21.8h, v21.8b, v18.8b\n"
- "ssubl v9.8h, v9.8b, v13.8b\n"
- "ldr x20, [x5, #0xf0]\n"
- "smlal v23.4s, v21.4h, v8.4h\n"
- "smlal2 v19.4s, v21.8h, v8.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v1.4h, v9.4h\n"
- "smlal2 v15.4s, v1.8h, v9.8h\n"
- "smlal v20.4s, v16.4h, v9.4h\n"
- "smlal2 v5.4s, v16.8h, v9.8h\n"
+ "ldr d3, [x5, #0xa0]\n"
+ "usubl v16.8h, v16.8b, v15.8b\n"
+ "ldr x20, [x4, #0xf0]\n"
+ "ssubl v3.8h, v3.8b, v9.8b\n"
+ "smlal v5.4s, v16.4h, v17.4h\n"
+ "smlal2 v30.4s, v16.8h, v17.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v18.4h, v3.4h\n"
+ "smlal2 v0.4s, v18.8h, v3.8h\n"
+ "smlal v27.4s, v20.4h, v3.4h\n"
+ "smlal2 v6.4s, v20.8h, v3.8h\n"
"tbz x1, #2, 93f\n"
"ld1 { v12.s }[0], [x20], #0x4\n"
"tbz x1, #1, 92f\n"
@@ -1871,308 +1871,308 @@ void a64_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
"tbz x1, #0, 95f\n"
"ld1 { v12.b }[0], [x20]\n"
"95:" // Oddments: Load (5, 0): Bit 2: End
- "usubl v12.8h, v12.8b, v18.8b\n"
- "ldr x20, [x5, #0xf8]\n"
- "smlal v24.4s, v12.4h, v9.4h\n"
- "smlal2 v22.4s, v12.8h, v9.8h\n"
- "add x20, x20, x3\n"
+ "usubl v12.8h, v12.8b, v15.8b\n"
+ "ldr x20, [x4, #0xf8]\n"
+ "smlal v1.4s, v12.4h, v3.4h\n"
+ "smlal2 v25.4s, v12.8h, v3.8h\n"
+ "add x20, x20, x2\n"
"tbz x1, #2, 97f\n"
- "ld1 { v10.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 96f\n"
- "ld1 { v10.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 99f\n"
"96:" // Oddments: Load (5, 1): Bit 2: Bit 1: Unset
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 99f\n"
"97:" // Oddments: Load (5, 1): Bit 2: Unset
"tbz x1, #1, 98f\n"
- "ld1 { v10.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 99f\n"
"98:" // Oddments: Load (5, 1): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 99f\n"
- "ld1 { v10.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"99:" // Oddments: Load (5, 1): Bit 2: End
- "ldr d12, [x6, #0xa8]\n"
- "usubl v10.8h, v10.8b, v18.8b\n"
- "ssubl v12.8h, v12.8b, v13.8b\n"
- "ldr x20, [x5, #0x100]\n"
- "smlal v23.4s, v10.4h, v9.4h\n"
- "smlal2 v19.4s, v10.8h, v9.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v16.4h, v12.4h\n"
- "smlal2 v15.4s, v16.8h, v12.8h\n"
- "smlal v20.4s, v30.4h, v12.4h\n"
- "smlal2 v5.4s, v30.8h, v12.8h\n"
- "smlal v24.4s, v10.4h, v12.4h\n"
- "smlal2 v22.4s, v10.8h, v12.8h\n"
+ "ldr d18, [x5, #0xa8]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "ldr x20, [x4, #0x100]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v17.4h, v3.4h\n"
+ "smlal2 v30.4s, v17.8h, v3.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v20.4h, v18.4h\n"
+ "smlal2 v0.4s, v20.8h, v18.8h\n"
+ "smlal v27.4s, v26.4h, v18.4h\n"
+ "smlal2 v6.4s, v26.8h, v18.8h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
"tbz x1, #2, 101f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v19.s }[0], [x20], #0x4\n"
"tbz x1, #1, 100f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v19.h }[2], [x20], #0x2\n"
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v19.b }[6], [x20]\n"
"b 103f\n"
"100:" // Oddments: Load (5, 2): Bit 2: Bit 1: Unset
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v19.b }[4], [x20]\n"
"b 103f\n"
"101:" // Oddments: Load (5, 2): Bit 2: Unset
"tbz x1, #1, 102f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v19.h }[0], [x20], #0x2\n"
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v19.b }[2], [x20]\n"
"b 103f\n"
"102:" // Oddments: Load (5, 2): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 103f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v19.b }[0], [x20]\n"
"103:" // Oddments: Load (5, 2): Bit 2: End
- "ldr d28, [x6, #0xb0]\n"
- "usubl v9.8h, v9.8b, v18.8b\n"
- "ssubl v28.8h, v28.8b, v13.8b\n"
- "ldr x20, [x5, #0x108]\n"
- "smlal v23.4s, v9.4h, v12.4h\n"
- "smlal2 v19.4s, v9.8h, v12.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v30.4h, v28.4h\n"
- "smlal2 v15.4s, v30.8h, v28.8h\n"
- "smlal v20.4s, v29.4h, v28.4h\n"
- "smlal2 v5.4s, v29.8h, v28.8h\n"
- "smlal v24.4s, v9.4h, v28.4h\n"
- "smlal2 v22.4s, v9.8h, v28.8h\n"
+ "ldr d12, [x5, #0xb0]\n"
+ "usubl v19.8h, v19.8b, v15.8b\n"
+ "ldr x20, [x4, #0x108]\n"
+ "ssubl v12.8h, v12.8b, v9.8b\n"
+ "smlal v5.4s, v19.4h, v18.4h\n"
+ "smlal2 v30.4s, v19.8h, v18.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v26.4h, v12.4h\n"
+ "smlal2 v0.4s, v26.8h, v12.8h\n"
+ "smlal v27.4s, v23.4h, v12.4h\n"
+ "smlal2 v6.4s, v23.8h, v12.8h\n"
+ "smlal v1.4s, v19.4h, v12.4h\n"
+ "smlal2 v25.4s, v19.8h, v12.8h\n"
"tbz x1, #2, 105f\n"
- "ld1 { v2.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 104f\n"
- "ld1 { v2.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 107f\n"
"104:" // Oddments: Load (5, 3): Bit 2: Bit 1: Unset
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 107f\n"
"105:" // Oddments: Load (5, 3): Bit 2: Unset
"tbz x1, #1, 106f\n"
- "ld1 { v2.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 107f\n"
"106:" // Oddments: Load (5, 3): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 107f\n"
- "ld1 { v2.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"107:" // Oddments: Load (5, 3): Bit 2: End
- "ldr d30, [x6, #0xb8]\n"
- "usubl v2.8h, v2.8b, v18.8b\n"
- "ssubl v30.8h, v30.8b, v13.8b\n"
- "ldr x20, [x5, #0x110]\n"
- "smlal v23.4s, v2.4h, v28.4h\n"
- "smlal2 v19.4s, v2.8h, v28.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v29.4h, v30.4h\n"
- "smlal2 v15.4s, v29.8h, v30.8h\n"
- "smlal v20.4s, v25.4h, v30.4h\n"
- "smlal2 v5.4s, v25.8h, v30.8h\n"
- "smlal v24.4s, v2.4h, v30.4h\n"
- "smlal2 v22.4s, v2.8h, v30.8h\n"
+ "ldr d18, [x5, #0xb8]\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "ldr x20, [x4, #0x110]\n"
+ "ssubl v18.8h, v18.8b, v9.8b\n"
+ "smlal v5.4s, v17.4h, v12.4h\n"
+ "smlal2 v30.4s, v17.8h, v12.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v23.4h, v18.4h\n"
+ "smlal2 v0.4s, v23.8h, v18.8h\n"
+ "smlal v27.4s, v28.4h, v18.4h\n"
+ "smlal2 v6.4s, v28.8h, v18.8h\n"
+ "smlal v1.4s, v17.4h, v18.4h\n"
+ "smlal2 v25.4s, v17.8h, v18.8h\n"
"tbz x1, #2, 109f\n"
- "ld1 { v27.s }[0], [x20], #0x4\n"
+ "ld1 { v3.s }[0], [x20], #0x4\n"
"tbz x1, #1, 108f\n"
- "ld1 { v27.h }[2], [x20], #0x2\n"
+ "ld1 { v3.h }[2], [x20], #0x2\n"
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[6], [x20]\n"
+ "ld1 { v3.b }[6], [x20]\n"
"b 111f\n"
"108:" // Oddments: Load (5, 4): Bit 2: Bit 1: Unset
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[4], [x20]\n"
+ "ld1 { v3.b }[4], [x20]\n"
"b 111f\n"
"109:" // Oddments: Load (5, 4): Bit 2: Unset
"tbz x1, #1, 110f\n"
- "ld1 { v27.h }[0], [x20], #0x2\n"
+ "ld1 { v3.h }[0], [x20], #0x2\n"
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[2], [x20]\n"
+ "ld1 { v3.b }[2], [x20]\n"
"b 111f\n"
"110:" // Oddments: Load (5, 4): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 111f\n"
- "ld1 { v27.b }[0], [x20]\n"
+ "ld1 { v3.b }[0], [x20]\n"
"111:" // Oddments: Load (5, 4): Bit 2: End
- "ldr d8, [x6, #0xc0]\n"
- "usubl v27.8h, v27.8b, v18.8b\n"
- "ssubl v8.8h, v8.8b, v13.8b\n"
- "ldr x20, [x5, #0x118]\n"
- "smlal v23.4s, v27.4h, v30.4h\n"
- "smlal2 v19.4s, v27.8h, v30.8h\n"
- "add x20, x20, x3\n"
- "smlal v7.4s, v25.4h, v8.4h\n"
- "smlal2 v15.4s, v25.8h, v8.8h\n"
- "smlal v20.4s, v21.4h, v8.4h\n"
- "smlal2 v5.4s, v21.8h, v8.8h\n"
- "smlal v24.4s, v27.4h, v8.4h\n"
- "smlal2 v22.4s, v27.8h, v8.8h\n"
+ "ldr d26, [x5, #0xc0]\n"
+ "usubl v3.8h, v3.8b, v15.8b\n"
+ "ldr x20, [x4, #0x118]\n"
+ "ssubl v26.8h, v26.8b, v9.8b\n"
+ "smlal v5.4s, v3.4h, v18.4h\n"
+ "smlal2 v30.4s, v3.8h, v18.8h\n"
+ "add x20, x20, x2\n"
+ "smlal v8.4s, v28.4h, v26.4h\n"
+ "smlal2 v0.4s, v28.8h, v26.8h\n"
+ "smlal v27.4s, v16.4h, v26.4h\n"
+ "smlal2 v6.4s, v16.8h, v26.8h\n"
+ "smlal v1.4s, v3.4h, v26.4h\n"
+ "smlal2 v25.4s, v3.8h, v26.8h\n"
"tbz x1, #2, 113f\n"
- "ld1 { v9.s }[0], [x20], #0x4\n"
+ "ld1 { v17.s }[0], [x20], #0x4\n"
"tbz x1, #1, 112f\n"
- "ld1 { v9.h }[2], [x20], #0x2\n"
+ "ld1 { v17.h }[2], [x20], #0x2\n"
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[6], [x20]\n"
+ "ld1 { v17.b }[6], [x20]\n"
"b 115f\n"
"112:" // Oddments: Load (5, 5): Bit 2: Bit 1: Unset
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[4], [x20]\n"
+ "ld1 { v17.b }[4], [x20]\n"
"b 115f\n"
"113:" // Oddments: Load (5, 5): Bit 2: Unset
"tbz x1, #1, 114f\n"
- "ld1 { v9.h }[0], [x20], #0x2\n"
+ "ld1 { v17.h }[0], [x20], #0x2\n"
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[2], [x20]\n"
+ "ld1 { v17.b }[2], [x20]\n"
"b 115f\n"
"114:" // Oddments: Load (5, 5): Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 115f\n"
- "ld1 { v9.b }[0], [x20]\n"
+ "ld1 { v17.b }[0], [x20]\n"
"115:" // Oddments: Load (5, 5): Bit 2: End
- "usubl v9.8h, v9.8b, v18.8b\n"
- "smlal v23.4s, v9.4h, v8.4h\n"
- "smlal2 v19.4s, v9.8h, v8.8h\n"
+ "usubl v17.8h, v17.8b, v15.8b\n"
+ "smlal v5.4s, v17.4h, v26.4h\n"
+ "smlal2 v30.4s, v17.8h, v26.8h\n"
"tbz x1, #2, 117f\n"
- "ld1 { v30.4s }, [x7], #0x10\n"
- "ld1 { v12.4s }, [x8], #0x10\n"
+ "ld1 { v9.4s }, [x6], #0x10\n"
+ "ld1 { v20.4s }, [x7], #0x10\n"
"tbz x1, #1, 116f\n"
- "ld1 { v14.d }[0], [x7], #0x8\n"
- "ld1 { v27.d }[0], [x8], #0x8\n"
+ "ld1 { v18.d }[0], [x6], #0x8\n"
+ "ld1 { v3.d }[0], [x7], #0x8\n"
"tbz x1, #0, 119f\n"
- "ld1 { v14.s }[2], [x7]\n"
- "ld1 { v27.s }[2], [x8]\n"
+ "ld1 { v18.s }[2], [x6]\n"
+ "ld1 { v3.s }[2], [x7]\n"
"b 119f\n"
"116:" // Oddments: Load requant params: Bit 2: Bit 1: Unset
"tbz x1, #0, 119f\n"
- "ld1 { v14.s }[0], [x7]\n"
- "ld1 { v27.s }[0], [x8]\n"
+ "ld1 { v18.s }[0], [x6]\n"
+ "ld1 { v3.s }[0], [x7]\n"
"b 119f\n"
"117:" // Oddments: Load requant params: Bit 2: Unset
"tbz x1, #1, 118f\n"
- "ld1 { v30.d }[0], [x7], #0x8\n"
- "ld1 { v12.d }[0], [x8], #0x8\n"
+ "ld1 { v9.d }[0], [x6], #0x8\n"
+ "ld1 { v20.d }[0], [x7], #0x8\n"
"tbz x1, #0, 119f\n"
- "ld1 { v30.s }[2], [x7]\n"
- "ld1 { v12.s }[2], [x8]\n"
+ "ld1 { v9.s }[2], [x6]\n"
+ "ld1 { v20.s }[2], [x7]\n"
"b 119f\n"
"118:" // Oddments: Load requant params: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 119f\n"
- "ld1 { v30.s }[0], [x7]\n"
- "ld1 { v12.s }[0], [x8]\n"
+ "ld1 { v9.s }[0], [x6]\n"
+ "ld1 { v20.s }[0], [x7]\n"
"119:" // Oddments: Load requant params: Bit 2: End
- "sqrdmulh v7.4s, v7.4s, v30.4s\n"
- "and v16.16b, v7.16b, v12.16b\n"
- "add x17, x17, x4\n"
- "add x16, x16, x4\n"
- "sqrdmulh v15.4s, v15.4s, v14.4s\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
- "add x15, x15, x4\n"
- "add x14, x14, x4\n"
- "and v2.16b, v15.16b, v27.16b\n"
- "sqrdmulh v20.4s, v20.4s, v30.4s\n"
- "sqrdmulh v24.4s, v24.4s, v30.4s\n"
- "sqrdmulh v23.4s, v23.4s, v30.4s\n"
- "sqadd v7.4s, v7.4s, v16.4s\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "and v21.16b, v20.16b, v12.16b\n"
- "sqrdmulh v5.4s, v5.4s, v14.4s\n"
- "and v18.16b, v24.16b, v12.16b\n"
- "sqrdmulh v22.4s, v22.4s, v14.4s\n"
- "and v31.16b, v23.16b, v12.16b\n"
- "sqrdmulh v19.4s, v19.4s, v14.4s\n"
- "sqadd v15.4s, v15.4s, v2.4s\n"
- "sshr v21.4s, v21.4s, #0x1f\n"
- "and v9.16b, v5.16b, v27.16b\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "and v4.16b, v22.16b, v27.16b\n"
- "sshr v31.4s, v31.4s, #0x1f\n"
- "and v28.16b, v19.16b, v27.16b\n"
- "sqadd v20.4s, v20.4s, v21.4s\n"
+ "sqrdmulh v8.4s, v8.4s, v9.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v18.4s\n"
+ "add x8, x8, x3\n"
+ "add x17, x17, x3\n"
+ "sqrdmulh v27.4s, v27.4s, v9.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v9.4s\n"
+ "add x16, x16, x3\n"
+ "add x15, x15, x3\n"
+ "sqrdmulh v5.4s, v5.4s, v9.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v18.4s\n"
+ "and v17.16b, v8.16b, v20.16b\n"
+ "and v23.16b, v0.16b, v3.16b\n"
+ "and v9.16b, v27.16b, v20.16b\n"
+ "and v26.16b, v1.16b, v20.16b\n"
+ "sqrdmulh v25.4s, v25.4s, v18.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v18.4s\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
"sshr v9.4s, v9.4s, #0x1f\n"
- "sqadd v24.4s, v24.4s, v18.4s\n"
- "sshr v4.4s, v4.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v31.4s\n"
- "sshr v28.4s, v28.4s, #0x1f\n"
- "srshl v7.4s, v7.4s, v12.4s\n"
- "srshl v20.4s, v20.4s, v12.4s\n"
- "sqadd v5.4s, v5.4s, v9.4s\n"
- "srshl v24.4s, v24.4s, v12.4s\n"
- "sqadd v22.4s, v22.4s, v4.4s\n"
- "srshl v23.4s, v23.4s, v12.4s\n"
- "sqadd v19.4s, v19.4s, v28.4s\n"
- "srshl v15.4s, v15.4s, v27.4s\n"
- "sqxtn v7.4h, v7.4s\n"
- "srshl v5.4s, v5.4s, v27.4s\n"
- "sqxtn v20.4h, v20.4s\n"
- "srshl v22.4s, v22.4s, v27.4s\n"
- "sqxtn v24.4h, v24.4s\n"
- "srshl v19.4s, v19.4s, v27.4s\n"
- "sqxtn v23.4h, v23.4s\n"
- "sqxtn2 v7.8h, v15.4s\n"
- "sqxtn2 v20.8h, v5.4s\n"
- "sqxtn2 v24.8h, v22.4s\n"
- "sqxtn2 v23.8h, v19.4s\n"
- "sqadd v7.8h, v7.8h, v26.8h\n"
- "sqadd v20.8h, v20.8h, v26.8h\n"
- "sqadd v24.8h, v24.8h, v26.8h\n"
- "sqadd v23.8h, v23.8h, v26.8h\n"
- "smax v7.8h, v7.8h, v11.8h\n"
- "smax v20.8h, v20.8h, v11.8h\n"
- "smax v24.8h, v24.8h, v11.8h\n"
- "smax v23.8h, v23.8h, v11.8h\n"
- "smin v7.8h, v7.8h, v0.8h\n"
- "smin v20.8h, v20.8h, v0.8h\n"
- "smin v24.8h, v24.8h, v0.8h\n"
- "smin v23.8h, v23.8h, v0.8h\n"
- "uzp1 v7.16b, v7.16b, v7.16b\n"
- "uzp1 v20.16b, v20.16b, v20.16b\n"
- "uzp1 v24.16b, v24.16b, v24.16b\n"
- "uzp1 v23.16b, v23.16b, v23.16b\n"
+ "and v24.16b, v6.16b, v3.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "and v18.16b, v25.16b, v3.16b\n"
+ "sqadd v8.4s, v8.4s, v17.4s\n"
+ "and v17.16b, v5.16b, v20.16b\n"
+ "sqadd v0.4s, v0.4s, v23.4s\n"
+ "and v16.16b, v30.16b, v3.16b\n"
+ "sqadd v27.4s, v27.4s, v9.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sqadd v1.4s, v1.4s, v26.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "srshl v8.4s, v8.4s, v20.4s\n"
+ "srshl v27.4s, v27.4s, v20.4s\n"
+ "sqadd v5.4s, v5.4s, v17.4s\n"
+ "sqadd v6.4s, v6.4s, v24.4s\n"
+ "srshl v1.4s, v1.4s, v20.4s\n"
+ "sqadd v25.4s, v25.4s, v18.4s\n"
+ "sqadd v30.4s, v30.4s, v16.4s\n"
+ "srshl v0.4s, v0.4s, v3.4s\n"
+ "srshl v5.4s, v5.4s, v20.4s\n"
+ "sqxtn v8.4h, v8.4s\n"
+ "srshl v6.4s, v6.4s, v3.4s\n"
+ "sqxtn v27.4h, v27.4s\n"
+ "srshl v25.4s, v25.4s, v3.4s\n"
+ "sqxtn v1.4h, v1.4s\n"
+ "srshl v30.4s, v30.4s, v3.4s\n"
+ "sqxtn v5.4h, v5.4s\n"
+ "sqxtn2 v8.8h, v0.4s\n"
+ "sqxtn2 v27.8h, v6.4s\n"
+ "sqxtn2 v1.8h, v25.4s\n"
+ "sqxtn2 v5.8h, v30.4s\n"
+ "sqadd v8.8h, v8.8h, v13.8h\n"
+ "sqadd v27.8h, v27.8h, v13.8h\n"
+ "sqadd v1.8h, v1.8h, v13.8h\n"
+ "sqadd v5.8h, v5.8h, v13.8h\n"
+ "smax v8.8h, v8.8h, v10.8h\n"
+ "smax v27.8h, v27.8h, v10.8h\n"
+ "smax v1.8h, v1.8h, v10.8h\n"
+ "smax v5.8h, v5.8h, v10.8h\n"
+ "smin v8.8h, v8.8h, v14.8h\n"
+ "smin v27.8h, v27.8h, v14.8h\n"
+ "smin v1.8h, v1.8h, v14.8h\n"
+ "smin v5.8h, v5.8h, v14.8h\n"
+ "uzp1 v8.16b, v8.16b, v8.16b\n"
+ "uzp1 v27.16b, v27.16b, v27.16b\n"
+ "uzp1 v1.16b, v1.16b, v1.16b\n"
+ "uzp1 v5.16b, v5.16b, v5.16b\n"
"tbz x1, #2, 121f\n"
- "st1 { v7.s }[0], [x17], #0x4\n"
- "st1 { v20.s }[0], [x16], #0x4\n"
- "st1 { v24.s }[0], [x15], #0x4\n"
- "st1 { v23.s }[0], [x14], #0x4\n"
+ "st1 { v8.s }[0], [x8], #0x4\n"
+ "st1 { v27.s }[0], [x17], #0x4\n"
+ "st1 { v1.s }[0], [x16], #0x4\n"
+ "st1 { v5.s }[0], [x15], #0x4\n"
"tbz x1, #1, 120f\n"
- "st1 { v7.h }[2], [x17], #0x2\n"
- "st1 { v20.h }[2], [x16], #0x2\n"
- "st1 { v24.h }[2], [x15], #0x2\n"
- "st1 { v23.h }[2], [x14], #0x2\n"
+ "st1 { v8.h }[2], [x8], #0x2\n"
+ "st1 { v27.h }[2], [x17], #0x2\n"
+ "st1 { v1.h }[2], [x16], #0x2\n"
+ "st1 { v5.h }[2], [x15], #0x2\n"
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[6], [x17], #0x1\n"
- "st1 { v20.b }[6], [x16], #0x1\n"
- "st1 { v24.b }[6], [x15], #0x1\n"
- "st1 { v23.b }[6], [x14], #0x1\n"
+ "st1 { v8.b }[6], [x8], #0x1\n"
+ "st1 { v27.b }[6], [x17], #0x1\n"
+ "st1 { v1.b }[6], [x16], #0x1\n"
+ "st1 { v5.b }[6], [x15], #0x1\n"
"b 123f\n"
"120:" // Oddments: Bit 2: Bit 1: Unset
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[4], [x17], #0x1\n"
- "st1 { v20.b }[4], [x16], #0x1\n"
- "st1 { v24.b }[4], [x15], #0x1\n"
- "st1 { v23.b }[4], [x14], #0x1\n"
+ "st1 { v8.b }[4], [x8], #0x1\n"
+ "st1 { v27.b }[4], [x17], #0x1\n"
+ "st1 { v1.b }[4], [x16], #0x1\n"
+ "st1 { v5.b }[4], [x15], #0x1\n"
"b 123f\n"
"121:" // Oddments: Bit 2: Unset
"tbz x1, #1, 122f\n"
- "st1 { v7.h }[0], [x17], #0x2\n"
- "st1 { v20.h }[0], [x16], #0x2\n"
- "st1 { v24.h }[0], [x15], #0x2\n"
- "st1 { v23.h }[0], [x14], #0x2\n"
+ "st1 { v8.h }[0], [x8], #0x2\n"
+ "st1 { v27.h }[0], [x17], #0x2\n"
+ "st1 { v1.h }[0], [x16], #0x2\n"
+ "st1 { v5.h }[0], [x15], #0x2\n"
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[2], [x17], #0x1\n"
- "st1 { v20.b }[2], [x16], #0x1\n"
- "st1 { v24.b }[2], [x15], #0x1\n"
- "st1 { v23.b }[2], [x14], #0x1\n"
+ "st1 { v8.b }[2], [x8], #0x1\n"
+ "st1 { v27.b }[2], [x17], #0x1\n"
+ "st1 { v1.b }[2], [x16], #0x1\n"
+ "st1 { v5.b }[2], [x15], #0x1\n"
"b 123f\n"
"122:" // Oddments: Bit 2: Unset: Bit 1: Unset
"tbz x1, #0, 123f\n"
- "st1 { v7.b }[0], [x17], #0x1\n"
- "st1 { v20.b }[0], [x16], #0x1\n"
- "st1 { v24.b }[0], [x15], #0x1\n"
- "st1 { v23.b }[0], [x14], #0x1\n"
+ "st1 { v8.b }[0], [x8], #0x1\n"
+ "st1 { v27.b }[0], [x17], #0x1\n"
+ "st1 { v1.b }[0], [x16], #0x1\n"
+ "st1 { v5.b }[0], [x15], #0x1\n"
"123:" // Oddments: Bit 2: End
"124:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
index c2bec4cdab..bb3de6c865 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,21 +45,21 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
"lsr x9, %x[n_channels], #0x2\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
"ld1r { v8.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v7.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v7.4s }, [x21]\n"
"ld1r { v6.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v5.16b }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v5.16b }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
"mov x11, #0x0\n"
+ "ld1r { v1.4s }, [x20]\n"
"cbz x9, 6f\n"
"1:" // Channel loop
"movi v23.4s, #0x0\n"
@@ -68,75 +68,75 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldr q23, [%x[bias], x20]\n"
"2:" // Channel loop: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
- "mov x25, %x[inptrs]\n"
- "ldp x21, x20, [x25], #0x10\n"
- "subs x24, %x[n_points], #0x1\n"
- "ldr s14, [x21, x11]\n"
- "ldr s15, [x20, x11]\n"
+ "mov x23, %x[inptrs]\n"
+ "subs x22, %x[n_points], #0x1\n"
"mov v24.16b, v23.16b\n"
"mov v25.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s16, [x21, x11]\n"
"mov v26.16b, v23.16b\n"
"mov v27.16b, v23.16b\n"
- "ldr s17, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"mov v28.16b, v23.16b\n"
+ "ldp x21, x20, [x23], #0x10\n"
"mov v29.16b, v23.16b\n"
- "ldr s18, [x21, x11]\n"
- "ldr s19, [x20, x11]\n"
"mov v30.16b, v23.16b\n"
"mov v31.16b, v23.16b\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s20, [x21, x11]\n"
"ssubl v0.8h, v0.8b, v5.8b\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"usubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v17.8h, v17.8b, v6.8b\n"
"usubl v18.8h, v18.8b, v6.8b\n"
"usubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"usubl v20.8h, v20.8b, v6.8b\n"
"usubl v21.8h, v21.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"usubl v22.8h, v22.8b, v6.8b\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x23, x22, [x25], #0x10\n"
- "ldp x21, x20, [x25], #0x10\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldr s14, [x23, x11]\n"
- "ldr s15, [x22, x11]\n"
+ "subs x22, x22, #0x1\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
- "ldr s16, [x21, x11]\n"
- "ldr s17, [x20, x11]\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldp x21, x20, [x25], #0x10\n"
- "ldr s18, [x21, x11]\n"
+ "ldr s14, [x21, x11]\n"
+ "ldr s15, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
- "ldr s19, [x20, x11]\n"
- "ldp x21, x20, [x25], #0x10\n"
"smlal v31.4s, v22.4h, v0.4h\n"
- "subs x24, x24, #0x1\n"
"ldr s0, [%x[params]], #0x4\n"
- "ldr s20, [x21, x11]\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
"usubl v14.8h, v14.8b, v6.8b\n"
- "ldr s21, [x20, x11]\n"
- "ldr x20, [x25], #0x8\n"
"usubl v15.8h, v15.8b, v6.8b\n"
+ "ldr s16, [x21, x11]\n"
+ "ldr s17, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
+ "ssubl v0.8h, v0.8b, v5.8b\n"
"usubl v16.8h, v16.8b, v6.8b\n"
- "ldr s22, [x20, x11]\n"
"usubl v17.8h, v17.8b, v6.8b\n"
+ "ldr s18, [x21, x11]\n"
+ "ldr s19, [x20, x11]\n"
+ "ldp x21, x20, [x23], #0x10\n"
"usubl v18.8h, v18.8b, v6.8b\n"
"usubl v19.8h, v19.8b, v6.8b\n"
+ "ldr s20, [x21, x11]\n"
+ "ldr s21, [x20, x11]\n"
+ "ldr x20, [x23], #0x8\n"
"usubl v20.8h, v20.8b, v6.8b\n"
+ "ldr s22, [x20, x11]\n"
"usubl v21.8h, v21.8b, v6.8b\n"
"usubl v22.8h, v22.8b, v6.8b\n"
"bgt 3b\n"
@@ -162,27 +162,27 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"sshl v27.4s, v27.4s, v3.4s\n"
"sshl v28.4s, v28.4s, v3.4s\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
"sshl v29.4s, v29.4s, v3.4s\n"
"sshl v30.4s, v30.4s, v3.4s\n"
"sshl v31.4s, v31.4s, v3.4s\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sshr v17.4s, v17.4s, #0x1f\n"
- "sshr v16.4s, v16.4s, #0x1f\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
+ "and v16.16b, v25.16b, v1.16b\n"
"sqrdmulh v26.4s, v26.4s, v2.4s\n"
"sqrdmulh v27.4s, v27.4s, v2.4s\n"
"sqrdmulh v28.4s, v28.4s, v2.4s\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
+ "sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"
@@ -254,17 +254,17 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
"uzp1 v31.16b, v31.16b, v31.16b\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s23, [x28, x11]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s24, [x27, x11]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s25, [x26, x11]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s26, [x25, x11]\n"
+ "str s23, [x28, x11]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x11]\n"
+ "str s25, [x26, x11]\n"
+ "str s26, [x25, x11]\n"
"str s27, [x24, x11]\n"
"str s28, [x23, x11]\n"
"str s29, [x22, x11]\n"
@@ -290,24 +290,24 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
"9:" // Oddments: Load bias: Done
"ldr s0, [%x[params]], #0x4\n"
"mov x10, %x[inptrs]\n"
- "ldp x9, x28, [x10], #0x10\n"
"mov v24.16b, v23.16b\n"
- "ldp x27, x26, [x10], #0x10\n"
- "ldp x25, x24, [x10], #0x10\n"
"mov v25.16b, v23.16b\n"
"mov v26.16b, v23.16b\n"
- "ldp x23, x22, [x10], #0x10\n"
- "ldr x21, [x10], #0x8\n"
"mov v27.16b, v23.16b\n"
"mov v28.16b, v23.16b\n"
"mov v29.16b, v23.16b\n"
+ "ldp x9, x28, [x10], #0x10\n"
"mov v30.16b, v23.16b\n"
- "add x9, x9, x11\n"
- "add x28, x28, x11\n"
"mov v31.16b, v23.16b\n"
"ssubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x27, x26, [x10], #0x10\n"
+ "add x9, x9, x11\n"
+ "add x28, x28, x11\n"
+ "ldp x25, x24, [x10], #0x10\n"
"add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "ldr x21, [x10], #0x8\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
"add x23, x23, x11\n"
@@ -358,27 +358,27 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ble 15f\n"
"12:" // Oddments: Planar loop
"ldp x9, x28, [x10], #0x10\n"
- "ldp x27, x26, [x10], #0x10\n"
"smlal v23.4s, v14.4h, v0.4h\n"
"smlal v24.4s, v15.4h, v0.4h\n"
- "ldp x25, x24, [x10], #0x10\n"
- "ldp x23, x22, [x10], #0x10\n"
"smlal v25.4s, v16.4h, v0.4h\n"
"smlal v26.4s, v17.4h, v0.4h\n"
"smlal v27.4s, v18.4h, v0.4h\n"
"smlal v28.4s, v19.4h, v0.4h\n"
- "ldr x21, [x10], #0x8\n"
- "add x9, x9, x11\n"
+ "ldp x27, x26, [x10], #0x10\n"
"smlal v29.4s, v20.4h, v0.4h\n"
"smlal v30.4s, v21.4h, v0.4h\n"
+ "add x9, x9, x11\n"
"add x28, x28, x11\n"
- "add x27, x27, x11\n"
"smlal v31.4s, v22.4h, v0.4h\n"
"ldr s0, [%x[params]], #0x4\n"
- "ssubl v0.8h, v0.8b, v5.8b\n"
+ "ldp x25, x24, [x10], #0x10\n"
+ "add x27, x27, x11\n"
"add x26, x26, x11\n"
+ "ldp x23, x22, [x10], #0x10\n"
+ "ssubl v0.8h, v0.8b, v5.8b\n"
"add x25, x25, x11\n"
"add x24, x24, x11\n"
+ "ldr x21, [x10], #0x8\n"
"add x23, x23, x11\n"
"add x22, x22, x11\n"
"add x21, x21, x11\n"
@@ -465,36 +465,36 @@ void a64_u8s8u8q_nhwc_generic_output9_mla_depthfirst_impl(
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
"sshl v25.4s, v25.4s, v3.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sshl v26.4s, v26.4s, v3.4s\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v27.4s, v27.4s, v3.4s\n"
+ "sshl v28.4s, v28.4s, v3.4s\n"
"ldr x20, [%x[outptrs], #0x40]\n"
+ "sqrdmulh v23.4s, v23.4s, v2.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v2.4s\n"
"add x28, x28, x11\n"
- "and v18.16b, v23.16b, v1.16b\n"
- "and v17.16b, v24.16b, v1.16b\n"
"add x27, x27, x11\n"
+ "sqrdmulh v25.4s, v25.4s, v2.4s\n"
+ "sshl v29.4s, v29.4s, v3.4s\n"
"add x26, x26, x11\n"
- "and v16.16b, v25.16b, v1.16b\n"
- "sshl v26.4s, v26.4s, v3.4s\n"
"add x25, x25, x11\n"
+ "sshl v30.4s, v30.4s, v3.4s\n"
+ "sshl v31.4s, v31.4s, v3.4s\n"
"add x24, x24, x11\n"
- "sshl v27.4s, v27.4s, v3.4s\n"
- "sshl v28.4s, v28.4s, v3.4s\n"
"add x23, x23, x11\n"
+ "and v18.16b, v23.16b, v1.16b\n"
+ "and v17.16b, v24.16b, v1.16b\n"
"add x22, x22, x11\n"
- "sshl v29.4s, v29.4s, v3.4s\n"
- "sshl v30.4s, v30.4s, v3.4s\n"
"add x21, x21, x11\n"
+ "and v16.16b, v25.16b, v1.16b\n"
+ "sqrdmulh v26.4s, v26.4s, v2.4s\n"
"add x20, x20, x11\n"
- "sshl v31.4s, v31.4s, v3.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v2.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
"sqrdmulh v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v2.4s\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index ed99f1f642..2a65f9af21 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,21 +49,21 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"lsr x10, %x[n_output_channels], #0x2\n"
"add x20, %x[qp], %[offsetof_Requantize32_minval]\n"
"ld1r { v15.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_maxval]\n"
- "ld1r { v14.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_maxval]\n"
"add x20, %x[qp], %[offsetof_Requantize32_a_offset]\n"
+ "ld1r { v14.4s }, [x21]\n"
"ld1r { v13.16b }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_b_offset]\n"
- "ld1r { v12.16b }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_b_offset]\n"
"add x20, %x[qp], %[offsetof_Requantize32_c_offset]\n"
+ "ld1r { v12.16b }, [x21]\n"
"ld1r { v11.4s }, [x20]\n"
- "add x20, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
- "ld1r { v10.4s }, [x20]\n"
+ "add x21, %x[qp], %[offsetof_Requantize32_per_layer_left_shift]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_mul]\n"
+ "ld1r { v10.4s }, [x21]\n"
"ld1r { v9.4s }, [x20]\n"
"add x20, %x[qp], %[offsetof_Requantize32_per_layer_right_shift]\n"
- "ld1r { v8.4s }, [x20]\n"
"mov x9, #0x0\n"
+ "ld1r { v8.4s }, [x20]\n"
"cbz x10, 9f\n"
"1:" // Output channel loop
"movi v31.4s, #0x0\n"
@@ -96,20 +96,20 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"3:" // Output channel loop: Load quantization parameters: Done
"ldr s5, [%x[weights]], #0x4\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
+ "ldp x21, x20, [x22], #0x10\n"
"ldr d0, [x21, #0x0]\n"
"ldr d4, [x20, #0x0]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"usubl v0.8h, v0.8b, v13.8b\n"
"usubl v4.8h, v4.8b, v13.8b\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"cbz x23, 7f\n"
"ldr s7, [%x[weights]], #0x4\n"
"ldp x21, x20, [x22], #0x10\n"
"subs x23, x23, #0x1\n"
- "ssubl v7.8h, v7.8b, v12.8b\n"
"ldr d3, [x21, #0x0]\n"
"ldr d6, [x20, #0x0]\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
"usubl v3.8h, v3.8b, v13.8b\n"
"usubl v6.8h, v6.8b, v13.8b\n"
"beq 5f\n"
@@ -125,13 +125,13 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x21, #0x0]\n"
- "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d4, [x20, #0x0]\n"
@@ -139,22 +139,22 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"ldp x21, x20, [x22], #0x10\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
"ldr d3, [x21, #0x0]\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "usubl v3.8h, v3.8b, v13.8b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"ldr d6, [x20, #0x0]\n"
@@ -172,54 +172,54 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smlal v19.4s, v5.4h, v0.h[3]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "smlal v16.4s, v7.4h, v3.h[0]\n"
- "smlal v17.4s, v7.4h, v3.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
- "smlal v18.4s, v7.4h, v3.h[2]\n"
- "smlal v19.4s, v7.4h, v3.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
+ "smlal v16.4s, v7.4h, v3.h[0]\n"
+ "smlal v17.4s, v7.4h, v3.h[1]\n"
+ "smlal v18.4s, v7.4h, v3.h[2]\n"
+ "smlal v19.4s, v7.4h, v3.h[3]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
+ "smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v5.4h, v4.h[2]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
- "and v3.16b, v16.16b, v8.16b\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v9.4s\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
- "and v2.16b, v17.16b, v8.16b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
- "and v1.16b, v18.16b, v8.16b\n"
- "and v0.16b, v19.16b, v8.16b\n"
+ "and v3.16b, v16.16b, v8.16b\n"
+ "and v2.16b, v17.16b, v8.16b\n"
"sshl v20.4s, v20.4s, v10.4s\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
+ "and v1.16b, v18.16b, v8.16b\n"
+ "and v0.16b, v19.16b, v8.16b\n"
+ "smlal v27.4s, v7.4h, v6.h[3]\n"
"sshl v21.4s, v21.4s, v10.4s\n"
"sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v27.4s, v7.4h, v6.h[3]\n"
+ "smlal v28.4s, v7.4h, v6.h[4]\n"
"sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
- "smlal v28.4s, v7.4h, v6.h[4]\n"
- "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -357,49 +357,49 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -421,70 +421,70 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x20, #0x0]\n"
- "usubl v0.8h, v0.8b, v13.8b\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
"ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr s5, [%x[weights]], #0x4\n"
"ldr d4, [x28, #0x0]\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
+ "smlal v20.4s, v7.4h, v3.h[4]\n"
+ "smlal v21.4s, v7.4h, v3.h[5]\n"
"usubl v4.8h, v4.8b, v13.8b\n"
+ "smlal v22.4s, v7.4h, v3.h[6]\n"
+ "smlal v23.4s, v7.4h, v3.h[7]\n"
"smlal v16.4s, v5.4h, v0.h[0]\n"
"smlal v17.4s, v5.4h, v0.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
"smlal v18.4s, v5.4h, v0.h[2]\n"
"smlal v19.4s, v5.4h, v0.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "smlal v20.4s, v7.4h, v3.h[4]\n"
- "smlal v21.4s, v7.4h, v3.h[5]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "smlal v22.4s, v7.4h, v3.h[6]\n"
- "smlal v23.4s, v7.4h, v3.h[7]\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
"smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
+ "smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v19.4s, v19.4s, v9.4s\n"
- "smlal v26.4s, v7.4h, v6.h[2]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
- "and v3.16b, v16.16b, v8.16b\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v9.4s\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
- "and v2.16b, v17.16b, v8.16b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
- "and v1.16b, v18.16b, v8.16b\n"
- "and v0.16b, v19.16b, v8.16b\n"
+ "and v3.16b, v16.16b, v8.16b\n"
+ "and v2.16b, v17.16b, v8.16b\n"
"sshl v20.4s, v20.4s, v10.4s\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
+ "and v1.16b, v18.16b, v8.16b\n"
+ "and v0.16b, v19.16b, v8.16b\n"
+ "smlal v27.4s, v5.4h, v4.h[3]\n"
"sshl v21.4s, v21.4s, v10.4s\n"
"sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v27.4s, v5.4h, v4.h[3]\n"
+ "smlal v28.4s, v5.4h, v4.h[4]\n"
"sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
- "smlal v28.4s, v5.4h, v4.h[4]\n"
- "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "sshl v25.4s, v25.4s, v10.4s\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -622,49 +622,49 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -673,45 +673,45 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"7:" // Output channel loop: Single kernel point
"smlal v16.4s, v5.4h, v0.h[0]\n"
"smlal v17.4s, v5.4h, v0.h[1]\n"
- "sshl v16.4s, v16.4s, v10.4s\n"
"ldr x27, [%x[outptrs], #0x0]\n"
+ "ldr x26, [%x[outptrs], #0x8]\n"
"smlal v18.4s, v5.4h, v0.h[2]\n"
"smlal v19.4s, v5.4h, v0.h[3]\n"
- "sshl v17.4s, v17.4s, v10.4s\n"
- "ldr x26, [%x[outptrs], #0x8]\n"
- "sshl v18.4s, v18.4s, v10.4s\n"
- "sshl v19.4s, v19.4s, v10.4s\n"
- "smlal v20.4s, v5.4h, v0.h[4]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
+ "ldr x24, [%x[outptrs], #0x18]\n"
+ "smlal v20.4s, v5.4h, v0.h[4]\n"
"smlal v21.4s, v5.4h, v0.h[5]\n"
+ "ldr x23, [%x[outptrs], #0x20]\n"
+ "ldr x22, [%x[outptrs], #0x28]\n"
"smlal v22.4s, v5.4h, v0.h[6]\n"
- "sqrdmulh v16.4s, v16.4s, v9.4s\n"
- "ldr x24, [%x[outptrs], #0x18]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
+ "ldr x21, [%x[outptrs], #0x30]\n"
+ "ldr x20, [%x[outptrs], #0x38]\n"
+ "sshl v16.4s, v16.4s, v10.4s\n"
+ "sshl v17.4s, v17.4s, v10.4s\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
- "sqrdmulh v17.4s, v17.4s, v9.4s\n"
- "ldr x23, [%x[outptrs], #0x20]\n"
+ "sshl v18.4s, v18.4s, v10.4s\n"
+ "sshl v19.4s, v19.4s, v10.4s\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
- "sqrdmulh v18.4s, v18.4s, v9.4s\n"
+ "sshl v20.4s, v20.4s, v10.4s\n"
+ "sshl v21.4s, v21.4s, v10.4s\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
- "ldr x22, [%x[outptrs], #0x28]\n"
+ "sqrdmulh v16.4s, v16.4s, v9.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v9.4s\n"
+ "smlal v27.4s, v5.4h, v4.h[3]\n"
+ "sqrdmulh v18.4s, v18.4s, v9.4s\n"
"sqrdmulh v19.4s, v19.4s, v9.4s\n"
+ "smlal v28.4s, v5.4h, v4.h[4]\n"
+ "sshl v22.4s, v22.4s, v10.4s\n"
+ "sshl v23.4s, v23.4s, v10.4s\n"
+ "smlal v29.4s, v5.4h, v4.h[5]\n"
"and v3.16b, v16.16b, v8.16b\n"
- "smlal v27.4s, v5.4h, v4.h[3]\n"
- "ldr x21, [%x[outptrs], #0x30]\n"
"and v2.16b, v17.16b, v8.16b\n"
+ "smlal v30.4s, v5.4h, v4.h[6]\n"
"and v1.16b, v18.16b, v8.16b\n"
- "smlal v28.4s, v5.4h, v4.h[4]\n"
- "ldr x20, [%x[outptrs], #0x38]\n"
"and v0.16b, v19.16b, v8.16b\n"
- "sshl v20.4s, v20.4s, v10.4s\n"
- "smlal v29.4s, v5.4h, v4.h[5]\n"
- "sshl v21.4s, v21.4s, v10.4s\n"
- "sshl v22.4s, v22.4s, v10.4s\n"
- "smlal v30.4s, v5.4h, v4.h[6]\n"
- "sshl v23.4s, v23.4s, v10.4s\n"
- "sshl v24.4s, v24.4s, v10.4s\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
+ "sshl v24.4s, v24.4s, v10.4s\n"
"sshl v25.4s, v25.4s, v10.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
"sshr v2.4s, v2.4s, #0x1f\n"
@@ -848,49 +848,49 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smax v30.4s, v30.4s, v15.4s\n"
"smax v31.4s, v31.4s, v15.4s\n"
"uzp1 v16.16b, v16.16b, v16.16b\n"
- "str s16, [x27, x9]\n"
- "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v17.16b, v17.16b, v17.16b\n"
"uzp1 v18.16b, v18.16b, v18.16b\n"
- "str s17, [x26, x9]\n"
- "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v19.16b, v19.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v20.16b\n"
- "str s18, [x25, x9]\n"
- "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v21.16b, v21.16b, v21.16b\n"
"uzp1 v22.16b, v22.16b, v22.16b\n"
- "str s19, [x24, x9]\n"
- "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v23.16b, v23.16b, v23.16b\n"
+ "str s16, [x27, x9]\n"
+ "ldr x27, [%x[outptrs], #0x40]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s20, [x23, x9]\n"
- "ldr x23, [%x[outptrs], #0x60]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s17, [x26, x9]\n"
+ "ldr x26, [%x[outptrs], #0x48]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s21, [x22, x9]\n"
- "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s18, [x25, x9]\n"
+ "ldr x25, [%x[outptrs], #0x50]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s22, [x21, x9]\n"
- "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
+ "str s19, [x24, x9]\n"
+ "ldr x24, [%x[outptrs], #0x58]\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s23, [x20, x9]\n"
- "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s20, [x23, x9]\n"
+ "ldr x23, [%x[outptrs], #0x60]\n"
+ "str s21, [x22, x9]\n"
+ "ldr x22, [%x[outptrs], #0x68]\n"
"uzp1 v24.16b, v24.16b, v24.16b\n"
- "str s24, [x27, x9]\n"
"uzp1 v25.16b, v25.16b, v25.16b\n"
+ "str s22, [x21, x9]\n"
+ "ldr x21, [%x[outptrs], #0x70]\n"
"uzp1 v26.16b, v26.16b, v26.16b\n"
- "str s25, [x26, x9]\n"
"uzp1 v27.16b, v27.16b, v27.16b\n"
+ "str s23, [x20, x9]\n"
+ "ldr x20, [%x[outptrs], #0x78]\n"
"uzp1 v28.16b, v28.16b, v28.16b\n"
- "str s26, [x25, x9]\n"
"uzp1 v29.16b, v29.16b, v29.16b\n"
"uzp1 v30.16b, v30.16b, v30.16b\n"
- "str s27, [x24, x9]\n"
"uzp1 v31.16b, v31.16b, v31.16b\n"
+ "str s24, [x27, x9]\n"
+ "str s25, [x26, x9]\n"
+ "str s26, [x25, x9]\n"
+ "str s27, [x24, x9]\n"
"str s28, [x23, x9]\n"
"str s29, [x22, x9]\n"
"str s30, [x21, x9]\n"
@@ -965,20 +965,20 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"18:" // Output channel oddments: Load quantization parameters: Done
"ldr s5, [%x[weights]], #0x4\n"
"mov x22, %x[inptrs]\n"
- "ldp x21, x20, [x22], #0x10\n"
"lsr x23, %x[kernel_points], #0x1\n"
+ "ldp x21, x20, [x22], #0x10\n"
"ldr d0, [x21, #0x0]\n"
"ldr d4, [x20, #0x0]\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"usubl v0.8h, v0.8b, v13.8b\n"
"usubl v4.8h, v4.8b, v13.8b\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"cbz x23, 22f\n"
"ldr s7, [%x[weights]], #0x4\n"
"ldp x21, x20, [x22], #0x10\n"
"subs x23, x23, #0x1\n"
- "ssubl v7.8h, v7.8b, v12.8b\n"
"ldr d3, [x21, #0x0]\n"
"ldr d6, [x20, #0x0]\n"
+ "ssubl v7.8h, v7.8b, v12.8b\n"
"usubl v3.8h, v3.8b, v13.8b\n"
"usubl v6.8h, v6.8b, v13.8b\n"
"beq 20f\n"
@@ -994,13 +994,13 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d0, [x21, #0x0]\n"
- "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v0.8h, v0.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d4, [x20, #0x0]\n"
@@ -1008,22 +1008,22 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"ldp x21, x20, [x22], #0x10\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "usubl v4.8h, v4.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
"ldr d3, [x21, #0x0]\n"
- "usubl v3.8h, v3.8b, v13.8b\n"
+ "ssubl v5.8h, v5.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
"smlal v27.4s, v7.4h, v6.h[3]\n"
"smlal v28.4s, v7.4h, v6.h[4]\n"
"smlal v29.4s, v7.4h, v6.h[5]\n"
+ "usubl v3.8h, v3.8b, v13.8b\n"
"smlal v30.4s, v7.4h, v6.h[6]\n"
"smlal v31.4s, v7.4h, v6.h[7]\n"
"ldr d6, [x20, #0x0]\n"
@@ -1077,27 +1077,27 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"smlal v22.4s, v5.4h, v0.h[6]\n"
"smlal v23.4s, v5.4h, v0.h[7]\n"
"ldr d2, [x21, #0x0]\n"
- "usubl v2.8h, v2.8b, v13.8b\n"
"smlal v24.4s, v5.4h, v4.h[0]\n"
"smlal v25.4s, v5.4h, v4.h[1]\n"
"smlal v26.4s, v5.4h, v4.h[2]\n"
"smlal v27.4s, v5.4h, v4.h[3]\n"
"smlal v28.4s, v5.4h, v4.h[4]\n"
"smlal v29.4s, v5.4h, v4.h[5]\n"
+ "usubl v2.8h, v2.8b, v13.8b\n"
"smlal v30.4s, v5.4h, v4.h[6]\n"
"smlal v31.4s, v5.4h, v4.h[7]\n"
"ldr d1, [x20, #0x0]\n"
"ldr s0, [%x[weights]], #0x4\n"
"smlal v16.4s, v7.4h, v3.h[0]\n"
"smlal v17.4s, v7.4h, v3.h[1]\n"
- "usubl v1.8h, v1.8b, v13.8b\n"
"smlal v18.4s, v7.4h, v3.h[2]\n"
"smlal v19.4s, v7.4h, v3.h[3]\n"
- "ssubl v0.8h, v0.8b, v12.8b\n"
"smlal v20.4s, v7.4h, v3.h[4]\n"
"smlal v21.4s, v7.4h, v3.h[5]\n"
+ "usubl v1.8h, v1.8b, v13.8b\n"
"smlal v22.4s, v7.4h, v3.h[6]\n"
"smlal v23.4s, v7.4h, v3.h[7]\n"
+ "ssubl v0.8h, v0.8b, v12.8b\n"
"smlal v24.4s, v7.4h, v6.h[0]\n"
"smlal v25.4s, v7.4h, v6.h[1]\n"
"smlal v26.4s, v7.4h, v6.h[2]\n"
@@ -1145,18 +1145,18 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"sshl v17.4s, v17.4s, v10.4s\n"
"sshl v18.4s, v18.4s, v10.4s\n"
"sshl v19.4s, v19.4s, v10.4s\n"
+ "sshl v20.4s, v20.4s, v10.4s\n"
+ "sshl v21.4s, v21.4s, v10.4s\n"
"sqrdmulh v16.4s, v16.4s, v9.4s\n"
"sqrdmulh v17.4s, v17.4s, v9.4s\n"
"sqrdmulh v18.4s, v18.4s, v9.4s\n"
"sqrdmulh v19.4s, v19.4s, v9.4s\n"
+ "sshl v22.4s, v22.4s, v10.4s\n"
+ "sshl v23.4s, v23.4s, v10.4s\n"
"and v3.16b, v16.16b, v8.16b\n"
"and v2.16b, v17.16b, v8.16b\n"
"and v1.16b, v18.16b, v8.16b\n"
"and v0.16b, v19.16b, v8.16b\n"
- "sshl v20.4s, v20.4s, v10.4s\n"
- "sshl v21.4s, v21.4s, v10.4s\n"
- "sshl v22.4s, v22.4s, v10.4s\n"
- "sshl v23.4s, v23.4s, v10.4s\n"
"sshl v24.4s, v24.4s, v10.4s\n"
"sshl v25.4s, v25.4s, v10.4s\n"
"sshr v3.4s, v3.4s, #0x1f\n"
@@ -1320,47 +1320,47 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"tbz %x[n_output_channels], #1, 24f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.h }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.h }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.h }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.h }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.h }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.h }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.h }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.h }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
- "add x9, x9, #0x2\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.h }[0], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.h }[0], [x26]\n"
+ "add x20, x20, x9\n"
+ "add x9, x9, #0x2\n"
"st1 { v26.h }[0], [x25]\n"
"st1 { v27.h }[0], [x24]\n"
"st1 { v28.h }[0], [x23]\n"
@@ -1370,46 +1370,46 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"tbz %x[n_output_channels], #0, 25f\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.b }[2], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.b }[2], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.b }[2], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.b }[2], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.b }[2], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.b }[2], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.b }[2], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.b }[2], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.b }[2], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.b }[2], [x26]\n"
+ "add x20, x20, x9\n"
"st1 { v26.b }[2], [x25]\n"
"st1 { v27.b }[2], [x24]\n"
"st1 { v28.b }[2], [x23]\n"
@@ -1420,46 +1420,46 @@ void a64_u8s8u8q_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst
"24:" // Output channel oddments: Done: Store: Bit 1: Unset
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "add x27, x27, x9\n"
- "add x26, x26, x9\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "add x25, x25, x9\n"
- "add x24, x24, x9\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "add x23, x23, x9\n"
- "add x22, x22, x9\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "add x21, x21, x9\n"
- "add x20, x20, x9\n"
+ "add x27, x27, x9\n"
+ "add x26, x26, x9\n"
+ "add x25, x25, x9\n"
+ "add x24, x24, x9\n"
"st1 { v16.b }[0], [x27]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "add x27, x27, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v17.b }[0], [x26]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "add x26, x26, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"st1 { v18.b }[0], [x25]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "add x25, x25, x9\n"
"st1 { v19.b }[0], [x24]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "add x24, x24, x9\n"
+ "add x27, x27, x9\n"
"st1 { v20.b }[0], [x23]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "add x23, x23, x9\n"
+ "add x26, x26, x9\n"
"st1 { v21.b }[0], [x22]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "add x22, x22, x9\n"
+ "add x25, x25, x9\n"
"st1 { v22.b }[0], [x21]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "add x21, x21, x9\n"
+ "add x24, x24, x9\n"
"st1 { v23.b }[0], [x20]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "add x20, x20, x9\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
"st1 { v24.b }[0], [x27]\n"
+ "add x21, x21, x9\n"
"st1 { v25.b }[0], [x26]\n"
+ "add x20, x20, x9\n"
"st1 { v26.b }[0], [x25]\n"
"st1 { v27.b }[0], [x24]\n"
"st1 { v28.b }[0], [x23]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp
index 2b6f70c089..74a68d6929 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,12 +22,14 @@
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "utils.hpp"
#include <cstdint>
#pragma once
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
namespace arm_conv {
namespace depthwise {
@@ -65,3 +67,5 @@ class sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirs
} // namespace depthwise
} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 2d558ade3f..ca58dbc10f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -102,33 +102,33 @@ void sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"mul x20, x4, x21\n" // offset = tile_i * ld_input_row
"ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x5, x6, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
"add x17, x6, x6\n"
+ "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "add x16, x17, x6\n"
"add x7, x7, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x16, x7, x21, LSL #1\n"
- "add x15, x17, x6\n"
- "add x14, x16, x21, LSL #1\n"
+ "add x15, x7, x21, LSL #1\n"
+ "add x14, x15, x21, LSL #1\n"
"add x13, x14, x21, LSL #1\n"
"cbnz x5, 2f\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"lsl x12, %x[n_channels], #0x1\n"
"mov x21, #0x4\n"
"mul x21, x21, x6\n"
- "add x11, x16, x6, LSL #1\n"
- "add x10, x7, x15, LSL #1\n"
- "add x9, x16, x17, LSL #1\n"
- "sub x20, x24, x5\n"
+ "add x11, x15, x6, LSL #1\n"
+ "add x10, x7, x16, LSL #1\n"
+ "add x9, x15, x17, LSL #1\n"
+ "sub x20, x20, x5\n"
"add x28, x14, x6, LSL #1\n"
"sub x20, x20, #0x1\n"
- "add x27, x13, x15, LSL #1\n"
+ "add x27, x13, x16, LSL #1\n"
"and x20, x20, #0x3fffff\n"
"add x26, x7, x6, LSL #1\n"
"orr x12, x12, x20, LSL #22\n"
"add x25, x7, x17, LSL #1\n"
"orr x12, x12, x21, LSL #38\n"
"add x24, x14, x17, LSL #1\n"
- "add x23, x16, x15, LSL #1\n"
- "add x22, x14, x15, LSL #1\n"
+ "add x23, x15, x16, LSL #1\n"
+ "add x22, x14, x16, LSL #1\n"
"add x21, x13, x6, LSL #1\n"
"add x20, x13, x17, LSL #1\n"
".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
@@ -141,187 +141,187 @@ void sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
+ ".inst 0xf8ac49fa // rprfm pldonce, x12, [x15]\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"mov x20, #0x2\n"
- "ld1h { z18.h }, p3/Z, [x8]\n"
+ "ld1h { z22.h }, p3/Z, [x8]\n"
"addvl x8, x8, #1\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x24\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "cnth x25\n"
".inst 0xa040a100 // ld1h { z0.h-z3.h }, pn8.b/Z, [x8]\n"
"addvl x8, x8, #4\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_outptr]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
".inst 0xa040a104 // ld1h { z4.h-z7.h }, pn8.b/Z, [x8]\n"
"addvl x8, x8, #4\n"
- "mul x22, x4, x26\n" // offset = tile_i * ld_output_row
- "cmp x24, %x[n_channels]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "madd x22, x5, x25, x22\n" // offset += tile_j * ld_output_col
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "mul x22, x4, x23\n" // offset = tile_i * ld_output_row
+ "cmp x25, %x[n_channels]\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "madd x22, x5, x26, x22\n" // offset += tile_j * ld_output_col
+ "ld1rh { z21.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"mov x21, #0x0\n"
"mul x22, x22, x20\n" // offset *= output_tile_size
- "sub x20, XZR, x24\n"
+ "sub x20, XZR, x25\n"
"ld1h { z8.h }, p3/Z, [x8]\n"
- "add x23, x23, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z9.h }, p2/Z, [x16, x6, LSL #1]\n"
+ "add x24, x24, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "ld1h { z9.h }, p2/Z, [x15, x6, LSL #1]\n"
"addvl x8, x8, #1\n"
- "add x22, x23, x26, LSL #1\n"
+ "add x23, x24, x23, LSL #1\n"
"ld1h { z10.h }, p2/Z, [x7]\n"
- "ld1h { z11.h }, p2/Z, [x7, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x16, x17, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x7, x16, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x15, x17, LSL #1]\n"
"ld1h { z13.h }, p2/Z, [x14, x6, LSL #1]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z28, z18\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z3.h, z9.h\n"
- "whilelt p1.h, x24, %x[n_channels]\n"
+ "movprfx z24, z22\n fmla z24.h, p3/M, z4.h, z9.h\n"
+ "movprfx z25, z22\n fmla z25.h, p3/M, z3.h, z9.h\n"
+ "whilelt p1.h, x25, %x[n_channels]\n"
"inch x21\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x13]\n"
- "inch x24\n"
- "ld1h { z18.h }, p3/Z, [x8]\n"
+ "movprfx z26, z22\n fmla z26.h, p3/M, z1.h, z9.h\n"
+ "movprfx z27, z22\n fmla z27.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z17.h }, p2/Z, [x13]\n"
+ "inch x25\n"
+ "ld1h { z22.h }, p3/Z, [x8]\n"
"addvl x8, x8, #1\n"
"mov p0.b, p2.b\n"
"inch x20\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "fmla z28.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x7, x6, LSL #1]\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x7, x17, LSL #1]\n"
+ "fmla z24.h, p3/M, z0.h, z10.h\n"
+ "fmla z25.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x16, LSL #1]\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z27.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z18.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z24.h, p3/M, z5.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z28.h }, p2/Z, [x7, x6, LSL #1]\n"
+ "fmla z26.h, p3/M, z6.h, z17.h\n"
+ "fmla z27.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z14.h }, p2/Z, [x7, x17, LSL #1]\n"
"addvl x7, x7, #1\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z29.h, p3/M, z6.h, z13.h\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16]\n"
- "fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x16, x15, LSL #1]\n"
- "addvl x16, x16, #1\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x14]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z13.h\n"
+ "fmla z25.h, p3/M, z6.h, z13.h\n"
+ "fmla z26.h, p3/M, z4.h, z13.h\n"
+ "fmla z27.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x15]\n"
+ "fmla z24.h, p3/M, z1.h, z28.h\n"
+ "fmla z25.h, p3/M, z0.h, z28.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, x16, LSL #1]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z26.h, p3/M, z5.h, z18.h\n"
+ "fmla z27.h, p3/M, z4.h, z18.h\n"
+ "fmla z24.h, p3/M, z2.h, z14.h\n"
+ "fmla z25.h, p3/M, z1.h, z14.h\n"
+ "ld1h { z19.h }, p2/Z, [x14]\n"
+ "fmla z26.h, p3/M, z0.h, z17.h\n"
+ "fmla z27.h, p3/M, z2.h, z16.h\n"
+ "fmla z24.h, p3/M, z8.h, z18.h\n"
+ "fmla z25.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x14, x16, LSL #1]\n"
"addvl x14, x14, #1\n"
+ "fmla z26.h, p3/M, z3.h, z19.h\n"
"ld1h { z13.h }, p1/Z, [x14, x6, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x6, LSL #1]\n"
+ "fmla z27.h, p3/M, z5.h, z18.h\n"
+ "fmla z24.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x13, x6, LSL #1]\n"
+ "fmla z25.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "whilelt p2.h, x21, %x[n_channels]\n"
+ "cmp x25, %x[n_channels]\n"
".inst 0xa040a100 // ld1h { z0.h-z3.h }, pn8.b/Z, [x8]\n"
"addvl x8, x8, #4\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x17, LSL #1]\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "cmp x24, %x[n_channels]\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
"addvl x13, x13, #1\n"
- "ld1h { z11.h }, p1/Z, [x7, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "ld1h { z9.h }, p1/Z, [x16, x6, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "fmla z27.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z11.h }, p1/Z, [x7, x16, LSL #1]\n"
+ "fmla z24.h, p3/M, z6.h, z19.h\n"
+ "fmla z25.h, p3/M, z8.h, z18.h\n"
+ "ld1h { z9.h }, p1/Z, [x15, x6, LSL #1]\n"
"ld1h { z10.h }, p1/Z, [x7]\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
+ "fmla z26.h, p3/M, z8.h, z16.h\n"
+ "fmla z27.h, p3/M, z7.h, z16.h\n"
".inst 0xa040a104 // ld1h { z4.h-z7.h }, pn8.b/Z, [x8]\n"
"addvl x8, x8, #4\n"
- "ld1h { z12.h }, p1/Z, [x16, x17, LSL #1]\n"
+ "ld1h { z12.h }, p1/Z, [x15, x17, LSL #1]\n"
"ld1h { z8.h }, p3/Z, [x8]\n"
"addvl x8, x8, #1\n"
- ".inst 0xc170ca3c // fclamp { z28.h-z31.h }, z17.h, z16.h\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x25, LSL #1]\n"
+ ".inst 0xc175c9f8 // fclamp { z24.h-z27.h }, z15.h, z21.h\n"
+ "st1h { z24.h }, p0, [x24]\n"
+ "st1h { z25.h }, p0, [x24, x26, LSL #1]\n"
+ "addvl x24, x24, #1\n"
+ "st1h { z26.h }, p0, [x23]\n"
+ "st1h { z27.h }, p0, [x23, x26, LSL #1]\n"
"addvl x23, x23, #1\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x25, LSL #1]\n"
- "addvl x22, x22, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
- "movprfx z28, z18\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z3.h, z9.h\n"
+ "movprfx z28, z22\n fmla z28.h, p3/M, z4.h, z9.h\n"
+ "movprfx z29, z22\n fmla z29.h, p3/M, z3.h, z9.h\n"
"ldr x5, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"mov p0.b, p2.b\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x13]\n"
+ "movprfx z30, z22\n fmla z30.h, p3/M, z1.h, z9.h\n"
+ "movprfx z31, z22\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z18.h }, p2/Z, [x13]\n"
"ldr x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"add x5, x5, #0x1\n"
"fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x17, LSL #1]\n"
"fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x13, x16, LSL #1]\n"
"add x20, x4, #0x1\n"
"fmla z30.h, p3/M, z2.h, z12.h\n"
"fmla z31.h, p3/M, z1.h, z12.h\n"
- "cmp x5, x24\n"
+ "ld1h { z20.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "cmp x5, x22\n"
"csel x4, x4, x20, LT\n"
"csel x5, x5, XZR, LT\n"
"cmp x4, x21\n"
"fmla z28.h, p3/M, z5.h, z12.h\n"
"fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x7, x6, LSL #1]\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x7, x17, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x7, x6, LSL #1]\n"
+ "fmla z30.h, p3/M, z6.h, z18.h\n"
"fmla z31.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z18.h }, p2/Z, [x7, x17, LSL #1]\n"
"fmla z28.h, p3/M, z7.h, z13.h\n"
"fmla z29.h, p3/M, z6.h, z13.h\n"
"fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16]\n"
- "fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x16, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x14]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x6, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
- ".inst 0xc170ca3c // fclamp { z28.h-z31.h }, z17.h, z16.h\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x25, LSL #1]\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x25, LSL #1]\n"
+ "fmla z31.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x15]\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "fmla z29.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z20.h\n"
+ "fmla z31.h, p3/M, z4.h, z20.h\n"
+ "fmla z28.h, p3/M, z2.h, z18.h\n"
+ "fmla z29.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x14]\n"
+ "fmla z30.h, p3/M, z0.h, z17.h\n"
+ "fmla z31.h, p3/M, z2.h, z16.h\n"
+ "fmla z28.h, p3/M, z8.h, z20.h\n"
+ "fmla z29.h, p3/M, z7.h, z20.h\n"
+ "ld1h { z18.h }, p2/Z, [x14, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z19.h\n"
+ "fmla z31.h, p3/M, z5.h, z18.h\n"
+ "fmla z28.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x13, x6, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z17.h\n"
+ "fmla z31.h, p3/M, z6.h, z17.h\n"
+ "fmla z28.h, p3/M, z6.h, z19.h\n"
+ "fmla z29.h, p3/M, z8.h, z18.h\n"
+ "fmla z30.h, p3/M, z8.h, z16.h\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ ".inst 0xc175c9fc // fclamp { z28.h-z31.h }, z15.h, z21.h\n"
+ "st1h { z28.h }, p0, [x24]\n"
+ "st1h { z29.h }, p0, [x24, x26, LSL #1]\n"
+ "st1h { z30.h }, p0, [x23]\n"
+ "st1h { z31.h }, p0, [x23, x26, LSL #1]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
@@ -333,4 +333,4 @@ void sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 415e344832..b4449ec76f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -85,185 +85,185 @@ void sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldr x13, [x16, #0x20]\n"
- "cnth x12\n"
+ "ldr x24, [x16, #0x20]\n"
+ "cnth x13\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ldp x11, x10, [x20, #0x0]\n"
- "cmp x12, %x[n_channels]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x9, XZR, x12\n"
- "ldp x28, x27, [x20, #0x10]\n"
- "ld1h { z16.h }, p3/Z, [x14]\n"
+ "ld1rh { z22.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
+ "cmp x13, %x[n_channels]\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x10, XZR, x13\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ld1h { z20.h }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
- "ldp x26, x25, [x16, #0x0]\n"
+ "ldp x23, x22, [x16, #0x0]\n"
".inst 0xa040a1c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "ldp x24, x23, [x16, #0x10]\n"
+ "ldp x21, x20, [x16, #0x10]\n"
".inst 0xa040a1c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
"ld1h { z8.h }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
- "ld1h { z9.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x23, x15, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x13, x15, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x24, x15, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z16\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z16\n fmla z29.h, p3/M, z3.h, z9.h\n"
- "ldr x22, [x16, #0x28]\n"
- "whilelt p1.h, x12, %x[n_channels]\n"
- "movprfx z30, z16\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "movprfx z24, z20\n fmla z24.h, p3/M, z4.h, z9.h\n"
+ "movprfx z25, z20\n fmla z25.h, p3/M, z3.h, z9.h\n"
+ "ldr x20, [x16, #0x28]\n"
+ "whilelt p1.h, x13, %x[n_channels]\n"
+ "movprfx z26, z20\n fmla z26.h, p3/M, z1.h, z9.h\n"
+ "movprfx z27, z20\n fmla z27.h, p3/M, z0.h, z9.h\n"
"ldr x21, [x16, #0x30]\n"
- "ld1h { z16.h }, p3/Z, [x14]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ld1h { z20.h }, p3/Z, [x14]\n"
+ "ldr x24, [x16, #0x38]\n"
"addvl x14, x14, #1\n"
- "inch x9\n"
- "ld1h { z9.h }, p2/Z, [x22, x15, LSL #1]\n"
- "ldr x25, [x16, #0x48]\n"
+ "inch x10\n"
+ "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x16, #0x48]\n"
"mov p0.b, p2.b\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x15, LSL #1]\n"
- "ldr x26, [x16, #0x40]\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ldr x24, [x16, #0x50]\n"
- "ld1h { z10.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x23, [x16, #0x58]\n"
- "fmla z28.h, p3/M, z5.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x15, LSL #1]\n"
- "ldr x13, [x16, #0x60]\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ldr x22, [x16, #0x68]\n"
- "ldr x21, [x16, #0x70]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z29.h, p3/M, z6.h, z13.h\n"
- "ldr x20, [x16, #0x78]\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ldp x26, x25, [x16, #0x0]\n"
- "fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x15, LSL #1]\n"
- "ldp x24, x23, [x16, #0x10]\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x13, [x16, #0x20]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "ld1h { z13.h }, p1/Z, [x13, x12, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x22, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z0.h, z10.h\n"
+ "fmla z25.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x23, [x16, #0x40]\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z27.h, p3/M, z1.h, z12.h\n"
+ "ldr x22, [x16, #0x50]\n"
+ "ld1h { z18.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x21, [x16, #0x58]\n"
+ "ldr x20, [x16, #0x60]\n"
+ "fmla z24.h, p3/M, z5.h, z12.h\n"
+ "fmla z25.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z28.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "ldr x27, [x16, #0x68]\n"
+ "fmla z26.h, p3/M, z6.h, z17.h\n"
+ "fmla z27.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z14.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "ldr x26, [x16, #0x70]\n"
+ "ldr x25, [x16, #0x78]\n"
+ "ldp x24, x23, [x16, #0x0]\n"
+ "fmla z24.h, p3/M, z7.h, z13.h\n"
+ "fmla z25.h, p3/M, z6.h, z13.h\n"
+ "fmla z26.h, p3/M, z4.h, z13.h\n"
+ "fmla z27.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z1.h, z28.h\n"
+ "fmla z25.h, p3/M, z0.h, z28.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldp x22, x21, [x16, #0x10]\n"
+ "fmla z26.h, p3/M, z5.h, z18.h\n"
+ "fmla z27.h, p3/M, z4.h, z18.h\n"
+ "fmla z24.h, p3/M, z2.h, z14.h\n"
+ "fmla z25.h, p3/M, z1.h, z14.h\n"
+ "ld1h { z19.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x16, #0x20]\n"
+ "fmla z26.h, p3/M, z0.h, z17.h\n"
+ "fmla z27.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z13.h }, p1/Z, [x20, x13, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z18.h\n"
+ "fmla z25.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z3.h, z19.h\n"
+ "fmla z27.h, p3/M, z5.h, z18.h\n"
+ "fmla z24.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "fmla z25.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "inch x15\n"
".inst 0xa040a1c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x15, LSL #1]\n"
- "inch x15\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p1/Z, [x24, x12, LSL #1]\n"
"whilelt p2.h, x15, %x[n_channels]\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "ld1h { z9.h }, p1/Z, [x26, x12, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p1/Z, [x25, x12, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
- "ld1h { z12.h }, p1/Z, [x23, x12, LSL #1]\n"
- "inch x12\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "fmla z27.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z11.h }, p1/Z, [x22, x13, LSL #1]\n"
+ "fmla z24.h, p3/M, z6.h, z19.h\n"
+ "fmla z25.h, p3/M, z8.h, z18.h\n"
+ "ld1h { z9.h }, p1/Z, [x24, x13, LSL #1]\n"
+ "ld1h { z10.h }, p1/Z, [x23, x13, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z16.h\n"
+ "fmla z27.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z12.h }, p1/Z, [x21, x13, LSL #1]\n"
+ "inch x13\n"
".inst 0xa040a1c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "cmp x12, %x[n_channels]\n"
+ "cmp x13, %x[n_channels]\n"
"ld1h { z8.h }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
- ".inst 0xc171ca5c // fclamp { z28.h-z31.h }, z18.h, z17.h\n"
- "st1h { z28.h }, p0, [x11, x9, LSL #1]\n"
- "st1h { z29.h }, p0, [x10, x9, LSL #1]\n"
- "st1h { z30.h }, p0, [x28, x9, LSL #1]\n"
- "st1h { z31.h }, p0, [x27, x9, LSL #1]\n"
+ ".inst 0xc16fcad8 // fclamp { z24.h-z27.h }, z22.h, z15.h\n"
+ "st1h { z24.h }, p0, [x12, x10, LSL #1]\n"
+ "st1h { z25.h }, p0, [x11, x10, LSL #1]\n"
+ "st1h { z26.h }, p0, [x9, x10, LSL #1]\n"
+ "st1h { z27.h }, p0, [x28, x10, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z16\n fmla z28.h, p3/M, z4.h, z9.h\n"
- "movprfx z29, z16\n fmla z29.h, p3/M, z3.h, z9.h\n"
- "ldr x22, [x16, #0x28]\n"
- "inch x9\n"
- "movprfx z30, z16\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "movprfx z31, z16\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ldr x21, [x16, #0x30]\n"
+ "movprfx z28, z20\n fmla z28.h, p3/M, z4.h, z9.h\n"
+ "movprfx z29, z20\n fmla z29.h, p3/M, z3.h, z9.h\n"
+ "ldr x21, [x16, #0x28]\n"
+ "inch x10\n"
+ "movprfx z30, z20\n fmla z30.h, p3/M, z1.h, z9.h\n"
+ "movprfx z31, z20\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ldr x20, [x16, #0x30]\n"
"mov p0.b, p2.b\n"
- "ldr x20, [x16, #0x38]\n"
- "ld1h { z9.h }, p2/Z, [x22, x15, LSL #1]\n"
- "ldr x25, [x16, #0x48]\n"
+ "ldr x22, [x16, #0x38]\n"
+ "ld1h { z18.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x21, [x16, #0x48]\n"
"fmla z28.h, p3/M, z0.h, z10.h\n"
"fmla z29.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x15, LSL #1]\n"
- "ldr x26, [x16, #0x40]\n"
+ "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x16, #0x40]\n"
"fmla z30.h, p3/M, z2.h, z12.h\n"
"fmla z31.h, p3/M, z1.h, z12.h\n"
- "ldr x24, [x16, #0x50]\n"
- "ld1h { z10.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x23, [x16, #0x58]\n"
+ "ldr x25, [x16, #0x50]\n"
+ "ld1h { z20.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x24, [x16, #0x58]\n"
+ "ldr x23, [x16, #0x60]\n"
"fmla z28.h, p3/M, z5.h, z12.h\n"
"fmla z29.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x15, LSL #1]\n"
- "ldr x13, [x16, #0x60]\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
"ldr x22, [x16, #0x68]\n"
+ "fmla z30.h, p3/M, z6.h, z18.h\n"
+ "fmla z31.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z18.h }, p2/Z, [x20, x15, LSL #1]\n"
"ldr x21, [x16, #0x70]\n"
+ "ldr x20, [x16, #0x78]\n"
"fmla z28.h, p3/M, z7.h, z13.h\n"
"fmla z29.h, p3/M, z6.h, z13.h\n"
- "ldr x20, [x16, #0x78]\n"
"fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z1.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x13, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x22, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x21, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z11.h\n"
- "fmla z31.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z8.h, z10.h\n"
- "fmla z30.h, p3/M, z8.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z12.h\n"
- ".inst 0xc171ca5c // fclamp { z28.h-z31.h }, z18.h, z17.h\n"
- "st1h { z28.h }, p0, [x11, x9, LSL #1]\n"
- "st1h { z29.h }, p0, [x10, x9, LSL #1]\n"
- "st1h { z30.h }, p0, [x28, x9, LSL #1]\n"
- "st1h { z31.h }, p0, [x27, x9, LSL #1]\n"
+ "fmla z31.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "fmla z29.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z20.h\n"
+ "fmla z31.h, p3/M, z4.h, z20.h\n"
+ "fmla z28.h, p3/M, z2.h, z18.h\n"
+ "fmla z29.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z0.h, z17.h\n"
+ "fmla z31.h, p3/M, z2.h, z16.h\n"
+ "fmla z28.h, p3/M, z8.h, z20.h\n"
+ "fmla z29.h, p3/M, z7.h, z20.h\n"
+ "ld1h { z18.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z19.h\n"
+ "fmla z31.h, p3/M, z5.h, z18.h\n"
+ "fmla z28.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z17.h\n"
+ "fmla z31.h, p3/M, z6.h, z17.h\n"
+ "fmla z28.h, p3/M, z6.h, z19.h\n"
+ "fmla z29.h, p3/M, z8.h, z18.h\n"
+ "fmla z30.h, p3/M, z8.h, z16.h\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ ".inst 0xc16fcadc // fclamp { z28.h-z31.h }, z22.h, z15.h\n"
+ "st1h { z28.h }, p0, [x12, x10, LSL #1]\n"
+ "st1h { z29.h }, p0, [x11, x10, LSL #1]\n"
+ "st1h { z30.h }, p0, [x9, x10, LSL #1]\n"
+ "st1h { z31.h }, p0, [x28, x10, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
@@ -274,4 +274,4 @@ void sme2_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp
index f90fbc3906..9622603947 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,12 +22,14 @@
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "utils.hpp"
#include <cstdint>
#pragma once
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
namespace arm_conv {
namespace depthwise {
@@ -65,3 +67,5 @@ class sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst : public DepthwiseDepthfirs
} // namespace depthwise
} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index 3a7d1cb0b4..a2fe312a1c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -102,56 +102,56 @@ void sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
"mul x20, x2, x21\n" // offset = tile_i * ld_input_row
"ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
"add x7, x4, x4\n"
+ "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "add x8, x7, x4\n"
"add x5, x5, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x8, x5, x21, LSL #1\n"
- "add x17, x7, x4\n"
- "add x16, x8, x21, LSL #1\n"
- "add x15, x17, x4\n"
- "add x14, x16, x21, LSL #1\n"
+ "add x17, x8, x4\n"
+ "add x16, x5, x21, LSL #1\n"
+ "add x15, x16, x21, LSL #1\n"
+ "add x14, x15, x21, LSL #1\n"
"add x13, x14, x21, LSL #1\n"
"cbnz x3, 2f\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"lsl x12, %x[n_channels], #0x1\n"
"mov x28, #0x6\n"
"mul x28, x28, x4\n"
- "add x27, x16, x7, LSL #1\n"
- "add x26, x5, x15, LSL #1\n"
- "add x25, x8, x7, LSL #1\n"
- "sub x20, x9, x3\n"
- "add x24, x13, x15, LSL #1\n"
+ "add x27, x15, x7, LSL #1\n"
+ "add x26, x5, x17, LSL #1\n"
+ "add x25, x16, x7, LSL #1\n"
+ "sub x20, x20, x3\n"
+ "add x24, x13, x17, LSL #1\n"
"sub x20, x20, #0x1\n"
- "add x23, x16, x4, LSL #1\n"
+ "add x23, x15, x4, LSL #1\n"
"and x20, x20, #0x3fffff\n"
"add x22, x5, x4, LSL #1\n"
"orr x12, x12, x20, LSL #22\n"
- "add x21, x5, x17, LSL #1\n"
+ "add x21, x5, x8, LSL #1\n"
"orr x12, x12, x28, LSL #38\n"
- "add x20, x16, x17, LSL #1\n"
- "add x11, x8, x15, LSL #1\n"
+ "add x20, x15, x8, LSL #1\n"
+ "add x11, x16, x17, LSL #1\n"
"add x10, x14, x7, LSL #1\n"
- "add x9, x14, x15, LSL #1\n"
+ "add x9, x14, x17, LSL #1\n"
"add x28, x13, x4, LSL #1\n"
".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- "add x27, x8, x4, LSL #1\n"
+ "add x27, x16, x4, LSL #1\n"
".inst 0xf8ac48ba // rprfm pldonce, x12, [x5]\n"
".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- "add x26, x8, x17, LSL #1\n"
+ "add x26, x16, x8, LSL #1\n"
".inst 0xf8ac49ba // rprfm pldonce, x12, [x13]\n"
".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- "add x25, x13, x17, LSL #1\n"
+ "add x25, x13, x8, LSL #1\n"
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
"add x24, x14, x4, LSL #1\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
"add x23, x5, x7, LSL #1\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- "add x22, x14, x17, LSL #1\n"
+ "add x22, x14, x8, LSL #1\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- "add x21, x16, x15, LSL #1\n"
+ "add x21, x15, x17, LSL #1\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
"add x20, x13, x7, LSL #1\n"
- ".inst 0xf8ac491a // rprfm pldonce, x12, [x8]\n"
+ ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
@@ -163,312 +163,312 @@ void sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
+ ".inst 0xf8ac49fa // rprfm pldonce, x12, [x15]\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"mov x21, #0x3\n"
- "ld1h { z18.h }, p3/Z, [x6]\n"
+ "ld1h { z25.h }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
"ldr x27, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x26\n"
+ "cnth x22\n"
".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_outptr]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
".inst 0xa040a0c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- "mul x20, x2, x22\n" // offset = tile_i * ld_output_row
- "cmp x26, %x[n_channels]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mul x20, x2, x23\n" // offset = tile_i * ld_output_row
+ "cmp x22, %x[n_channels]\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"madd x20, x3, x27, x20\n" // offset += tile_j * ld_output_col
- "add x24, x27, x27\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "add x25, x27, x27\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"mul x20, x20, x21\n" // offset *= output_tile_size
"mov x21, #0x0\n"
"ld1h { z8.h }, p3/Z, [x6]\n"
- "add x25, x25, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "sub x20, XZR, x26\n"
- "ld1h { z9.h }, p2/Z, [x16, x7, LSL #1]\n"
- "add x23, x25, x22, LSL #1\n"
+ "add x26, x26, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "sub x20, XZR, x22\n"
+ "ld1h { z9.h }, p2/Z, [x15, x7, LSL #1]\n"
+ "add x24, x26, x23, LSL #1\n"
"ld1h { z10.h }, p2/Z, [x5]\n"
"addvl x6, x6, #1\n"
- "add x22, x23, x22, LSL #1\n"
- "ld1h { z11.h }, p2/Z, [x5, x15, LSL #1]\n"
+ "add x23, x24, x23, LSL #1\n"
+ "ld1h { z11.h }, p2/Z, [x5, x17, LSL #1]\n"
"ld1h { z12.h }, p2/Z, [x13]\n"
- "ld1h { z13.h }, p2/Z, [x8, x7, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x16, x7, LSL #1]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z24, z18\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "movprfx z23, z18\n fmla z23.h, p3/M, z8.h, z9.h\n"
- "whilelt p1.h, x26, %x[n_channels]\n"
+ "movprfx z28, z25\n fmla z28.h, p3/M, z7.h, z9.h\n"
+ "movprfx z23, z25\n fmla z23.h, p3/M, z8.h, z9.h\n"
+ "whilelt p1.h, x22, %x[n_channels]\n"
"inch x21\n"
- "movprfx z25, z18\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "movprfx z26, z18\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "inch x26\n"
+ "movprfx z29, z25\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "movprfx z30, z25\n fmla z30.h, p3/M, z5.h, z9.h\n"
+ "inch x22\n"
"mov p0.b, p2.b\n"
- "movprfx z27, z18\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "movprfx z28, z18\n fmla z28.h, p3/M, z3.h, z9.h\n"
+ "movprfx z31, z25\n fmla z31.h, p3/M, z4.h, z9.h\n"
+ "movprfx z16, z25\n fmla z16.h, p3/M, z3.h, z9.h\n"
"inch x20\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "fmla z24.h, p3/M, z4.h, z13.h\n"
+ "movprfx z17, z25\n fmla z17.h, p3/M, z2.h, z9.h\n"
+ "movprfx z19, z25\n fmla z19.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z4.h, z13.h\n"
"fmla z23.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x16, x17, LSL #1]\n"
- "fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16, x4, LSL #1]\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
- "fmla z27.h, p3/M, z1.h, z13.h\n"
- "fmla z28.h, p3/M, z0.h, z13.h\n"
- "fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x15, LSL #1]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x15, x8, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z13.h\n"
+ "fmla z31.h, p3/M, z1.h, z13.h\n"
+ "fmla z16.h, p3/M, z0.h, z13.h\n"
+ "fmla z17.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z21.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "movprfx z18, z25\n fmla z18.h, p3/M, z1.h, z9.h\n"
+ "fmla z28.h, p3/M, z6.h, z20.h\n"
"fmla z23.h, p3/M, z5.h, z13.h\n"
- "ld1h { z18.h }, p3/Z, [x6]\n"
+ "ld1h { z25.h }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
- "fmla z25.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x5, x4, LSL #1]\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x5, x17, LSL #1]\n"
- "fmla z27.h, p3/M, z3.h, z11.h\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x8]\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x8, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x14]\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x4, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "ld1h { z13.h }, p2/Z, [x13, x4, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x13, x17, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x8, x17, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "addvl x8, x8, #1\n"
- "ld1h { z12.h }, p2/Z, [x14, x4, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z27.h }, p2/Z, [x5, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z20.h\n"
+ "fmla z19.h, p3/M, z8.h, z21.h\n"
+ "ld1h { z24.h }, p2/Z, [x5, x8, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z20.h\n"
+ "fmla z18.h, p3/M, z0.h, z20.h\n"
+ "fmla z17.h, p3/M, z1.h, z20.h\n"
+ "fmla z28.h, p3/M, z0.h, z27.h\n"
+ "fmla z23.h, p3/M, z7.h, z20.h\n"
+ "ld1h { z21.h }, p2/Z, [x16]\n"
+ "fmla z29.h, p3/M, z1.h, z24.h\n"
+ "fmla z16.h, p3/M, z4.h, z10.h\n"
+ "fmla z19.h, p3/M, z1.h, z10.h\n"
+ "fmla z31.h, p3/M, z5.h, z10.h\n"
+ "fmla z18.h, p3/M, z2.h, z10.h\n"
+ "fmla z30.h, p3/M, z0.h, z21.h\n"
+ "fmla z28.h, p3/M, z2.h, z24.h\n"
+ "fmla z23.h, p3/M, z1.h, z27.h\n"
+ "ld1h { z13.h }, p2/Z, [x16, x17, LSL #1]\n"
+ "ld1h { z20.h }, p2/Z, [x14]\n"
+ "fmla z29.h, p3/M, z7.h, z10.h\n"
+ "fmla z16.h, p3/M, z2.h, z13.h\n"
+ "fmla z28.h, p3/M, z8.h, z10.h\n"
+ "fmla z17.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z27.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z23.h, p3/M, z3.h, z21.h\n"
+ "fmla z29.h, p3/M, z5.h, z13.h\n"
+ "ld1h { z22.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z30.h, p3/M, z6.h, z20.h\n"
+ "ld1h { z20.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z18.h, p3/M, z4.h, z27.h\n"
+ "fmla z19.h, p3/M, z3.h, z27.h\n"
+ "ld1h { z21.h }, p2/Z, [x16, x4, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z27.h\n"
+ "fmla z16.h, p3/M, z6.h, z27.h\n"
+ "fmla z17.h, p3/M, z5.h, z27.h\n"
+ "fmla z30.h, p3/M, z8.h, z27.h\n"
+ "fmla z28.h, p3/M, z3.h, z21.h\n"
+ "fmla z19.h, p3/M, z5.h, z22.h\n"
+ "fmla z18.h, p3/M, z6.h, z20.h\n"
+ "fmla z16.h, p3/M, z8.h, z22.h\n"
+ "fmla z31.h, p3/M, z0.h, z21.h\n"
+ "ld1h { z9.h }, p2/Z, [x16, x8, LSL #1]\n"
+ "addvl x16, x16, #1\n"
+ "fmla z17.h, p3/M, z7.h, z20.h\n"
+ "ld1h { z20.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z23.h, p3/M, z4.h, z21.h\n"
+ "fmla z30.h, p3/M, z1.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z9.h\n"
+ "fmla z29.h, p3/M, z4.h, z9.h\n"
+ "fmla z18.h, p3/M, z8.h, z20.h\n"
+ "fmla z19.h, p3/M, z7.h, z20.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x8, LSL #1]\n"
"addvl x14, x14, #1\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x5, x7, LSL #1]\n"
+ "fmla z31.h, p3/M, z2.h, z9.h\n"
+ "fmla z16.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z20.h }, p2/Z, [x5, x7, LSL #1]\n"
"addvl x5, x5, #1\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
+ "fmla z17.h, p3/M, z4.h, z21.h\n"
+ "fmla z30.h, p3/M, z7.h, z21.h\n"
"ld1h { z10.h }, p1/Z, [x5]\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z12.h\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x16]\n"
- "ld1h { z11.h }, p2/Z, [x16, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "addvl x16, x16, #1\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "ld1h { z9.h }, p1/Z, [x16, x7, LSL #1]\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- ".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
- "addvl x6, x6, #4\n"
- "fmla z27.h, p3/M, z8.h, z13.h\n"
+ "fmla z18.h, p3/M, z3.h, z21.h\n"
+ "fmla z23.h, p3/M, z2.h, z20.h\n"
+ "fmla z19.h, p3/M, z4.h, z12.h\n"
+ "fmla z31.h, p3/M, z6.h, z21.h\n"
+ "ld1h { z11.h }, p2/Z, [x15]\n"
+ "fmla z28.h, p3/M, z1.h, z20.h\n"
+ "fmla z29.h, p3/M, z0.h, z20.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, x17, LSL #1]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z16.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z9.h }, p1/Z, [x15, x7, LSL #1]\n"
+ "fmla z18.h, p3/M, z5.h, z12.h\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "fmla z17.h, p3/M, z0.h, z11.h\n"
+ "fmla z19.h, p3/M, z2.h, z20.h\n"
+ "fmla z31.h, p3/M, z8.h, z12.h\n"
"ld1h { z13.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
+ "fmla z30.h, p3/M, z3.h, z11.h\n"
"whilelt p2.h, x21, %x[n_channels]\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
+ "fmla z29.h, p3/M, z8.h, z20.h\n"
+ "fmla z16.h, p3/M, z5.h, z20.h\n"
+ ".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "fmax z23.h, p3/M, z23.h, z15.h\n"
"addvl x13, x13, #1\n"
- "cmp x26, %x[n_channels]\n"
- "ld1h { z11.h }, p1/Z, [x5, x15, LSL #1]\n"
- "fmax z23.h, p3/M, z23.h, z17.h\n"
+ "cmp x22, %x[n_channels]\n"
+ "ld1h { z11.h }, p1/Z, [x5, x17, LSL #1]\n"
+ "fmla z17.h, p3/M, z8.h, z13.h\n"
+ "fmla z18.h, p3/M, z7.h, z13.h\n"
"ld1h { z12.h }, p1/Z, [x13]\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
+ "fmla z19.h, p3/M, z6.h, z13.h\n"
".inst 0xa040a0c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- ".inst 0xc170ca38 // fclamp { z24.h-z27.h }, z17.h, z16.h\n"
- "ld1h { z13.h }, p1/Z, [x8, x7, LSL #1]\n"
+ ".inst 0xc16ec9fc // fclamp { z28.h-z31.h }, z15.h, z14.h\n"
+ "ld1h { z13.h }, p1/Z, [x16, x7, LSL #1]\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
"ld1h { z8.h }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- ".inst 0xc170ca3c // fclamp { z28.h-z31.h }, z17.h, z16.h\n"
- "st1h { z26.h }, p0, [x23]\n"
- "st1h { z27.h }, p0, [x23, x27, LSL #1]\n"
- "st1h { z23.h }, p0, [x25]\n"
- "st1h { z24.h }, p0, [x25, x27, LSL #1]\n"
- "st1h { z25.h }, p0, [x25, x24, LSL #1]\n"
- "addvl x25, x25, #1\n"
- "st1h { z28.h }, p0, [x23, x24, LSL #1]\n"
+ ".inst 0xc16ec9f0 // fclamp { z16.h-z19.h }, z15.h, z14.h\n"
+ "st1h { z30.h }, p0, [x24]\n"
+ "st1h { z23.h }, p0, [x26]\n"
+ "st1h { z28.h }, p0, [x26, x27, LSL #1]\n"
+ "st1h { z29.h }, p0, [x26, x25, LSL #1]\n"
+ "addvl x26, x26, #1\n"
+ "st1h { z31.h }, p0, [x24, x27, LSL #1]\n"
+ "st1h { z16.h }, p0, [x24, x25, LSL #1]\n"
+ "addvl x24, x24, #1\n"
+ "st1h { z17.h }, p0, [x23]\n"
+ "st1h { z18.h }, p0, [x23, x27, LSL #1]\n"
+ "st1h { z19.h }, p0, [x23, x25, LSL #1]\n"
"addvl x23, x23, #1\n"
- "st1h { z29.h }, p0, [x22]\n"
- "st1h { z30.h }, p0, [x22, x27, LSL #1]\n"
- "st1h { z31.h }, p0, [x22, x24, LSL #1]\n"
- "addvl x22, x22, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
- "movprfx z24, z18\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "movprfx z23, z18\n fmla z23.h, p3/M, z8.h, z9.h\n"
+ "movprfx z20, z25\n fmla z20.h, p3/M, z7.h, z9.h\n"
+ "movprfx z24, z25\n fmla z24.h, p3/M, z8.h, z9.h\n"
"ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"mov p0.b, p2.b\n"
- "movprfx z25, z18\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "movprfx z26, z18\n fmla z26.h, p3/M, z5.h, z9.h\n"
+ "movprfx z21, z25\n fmla z21.h, p3/M, z6.h, z9.h\n"
+ "movprfx z22, z25\n fmla z22.h, p3/M, z5.h, z9.h\n"
"ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z27, z18\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "movprfx z28, z18\n fmla z28.h, p3/M, z3.h, z9.h\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "movprfx z23, z25\n fmla z23.h, p3/M, z4.h, z9.h\n"
+ "movprfx z28, z25\n fmla z28.h, p3/M, z3.h, z9.h\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "movprfx z29, z25\n fmla z29.h, p3/M, z2.h, z9.h\n"
+ "movprfx z31, z25\n fmla z31.h, p3/M, z0.h, z9.h\n"
"ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"add x3, x3, #0x1\n"
- "fmla z24.h, p3/M, z4.h, z13.h\n"
- "fmla z23.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x16, x17, LSL #1]\n"
+ "fmla z20.h, p3/M, z4.h, z13.h\n"
+ "fmla z24.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, x8, LSL #1]\n"
"add x20, x2, #0x1\n"
- "fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16, x4, LSL #1]\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
- "cmp x3, x9\n"
- "fmla z27.h, p3/M, z1.h, z13.h\n"
+ "fmla z21.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, x4, LSL #1]\n"
+ "fmla z22.h, p3/M, z2.h, z13.h\n"
+ "cmp x3, x22\n"
+ "fmla z23.h, p3/M, z1.h, z13.h\n"
"fmla z28.h, p3/M, z0.h, z13.h\n"
"csel x2, x2, x20, LT\n"
"csel x3, x3, XZR, LT\n"
"fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x15, LSL #1]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "movprfx z30, z25\n fmla z30.h, p3/M, z1.h, z9.h\n"
"cmp x2, x21\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z23.h, p3/M, z5.h, z13.h\n"
- "fmla z25.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x5, x4, LSL #1]\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x5, x17, LSL #1]\n"
- "fmla z27.h, p3/M, z3.h, z11.h\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x8]\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x8, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x14]\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x4, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "ld1h { z13.h }, p2/Z, [x13, x4, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x13, x17, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x8, x17, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x4, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x5, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z12.h\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x16]\n"
- "ld1h { z11.h }, p2/Z, [x16, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z27.h, p3/M, z8.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmax z23.h, p3/M, z23.h, z17.h\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
- ".inst 0xc170ca38 // fclamp { z24.h-z27.h }, z17.h, z16.h\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- ".inst 0xc170ca3c // fclamp { z28.h-z31.h }, z17.h, z16.h\n"
- "st1h { z26.h }, p0, [x23]\n"
- "st1h { z27.h }, p0, [x23, x27, LSL #1]\n"
- "st1h { z23.h }, p0, [x25]\n"
- "st1h { z24.h }, p0, [x25, x27, LSL #1]\n"
- "st1h { z25.h }, p0, [x25, x24, LSL #1]\n"
- "st1h { z28.h }, p0, [x23, x24, LSL #1]\n"
- "st1h { z29.h }, p0, [x22]\n"
- "st1h { z30.h }, p0, [x22, x27, LSL #1]\n"
- "st1h { z31.h }, p0, [x22, x24, LSL #1]\n"
+ "fmla z20.h, p3/M, z6.h, z18.h\n"
+ "fmla z24.h, p3/M, z5.h, z13.h\n"
+ "fmla z21.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z17.h }, p2/Z, [x5, x4, LSL #1]\n"
+ "fmla z22.h, p3/M, z4.h, z18.h\n"
+ "fmla z31.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x5, x8, LSL #1]\n"
+ "fmla z23.h, p3/M, z3.h, z18.h\n"
+ "fmla z30.h, p3/M, z0.h, z18.h\n"
+ "fmla z29.h, p3/M, z1.h, z18.h\n"
+ "fmla z20.h, p3/M, z0.h, z17.h\n"
+ "fmla z24.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x16]\n"
+ "fmla z21.h, p3/M, z1.h, z16.h\n"
+ "fmla z28.h, p3/M, z4.h, z19.h\n"
+ "fmla z31.h, p3/M, z1.h, z19.h\n"
+ "fmla z23.h, p3/M, z5.h, z19.h\n"
+ "fmla z30.h, p3/M, z2.h, z19.h\n"
+ "fmla z22.h, p3/M, z0.h, z18.h\n"
+ "fmla z20.h, p3/M, z2.h, z16.h\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x16, x17, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x14]\n"
+ "fmla z21.h, p3/M, z7.h, z19.h\n"
+ "fmla z28.h, p3/M, z2.h, z17.h\n"
+ "fmla z20.h, p3/M, z8.h, z19.h\n"
+ "fmla z29.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z24.h, p3/M, z3.h, z18.h\n"
+ "fmla z21.h, p3/M, z5.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z22.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z19.h\n"
+ "fmla z31.h, p3/M, z3.h, z19.h\n"
+ "ld1h { z17.h }, p2/Z, [x16, x4, LSL #1]\n"
+ "fmla z23.h, p3/M, z7.h, z19.h\n"
+ "fmla z28.h, p3/M, z6.h, z19.h\n"
+ "fmla z29.h, p3/M, z5.h, z19.h\n"
+ "fmla z22.h, p3/M, z8.h, z19.h\n"
+ "fmla z20.h, p3/M, z3.h, z17.h\n"
+ "fmla z31.h, p3/M, z5.h, z18.h\n"
+ "fmla z30.h, p3/M, z6.h, z16.h\n"
+ "fmla z28.h, p3/M, z8.h, z18.h\n"
+ "fmla z23.h, p3/M, z0.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x16, x8, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z17.h\n"
+ "fmla z22.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z20.h, p3/M, z5.h, z18.h\n"
+ "fmla z21.h, p3/M, z4.h, z18.h\n"
+ "fmla z30.h, p3/M, z8.h, z16.h\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x14, x8, LSL #1]\n"
+ "fmla z23.h, p3/M, z2.h, z18.h\n"
+ "fmla z28.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x5, x7, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z17.h\n"
+ "fmla z22.h, p3/M, z7.h, z17.h\n"
+ "fmla z30.h, p3/M, z3.h, z17.h\n"
+ "fmla z24.h, p3/M, z2.h, z16.h\n"
+ "fmla z31.h, p3/M, z4.h, z19.h\n"
+ "fmla z23.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x15]\n"
+ "fmla z20.h, p3/M, z1.h, z16.h\n"
+ "fmla z21.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x15, x17, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z19.h\n"
+ "fmla z30.h, p3/M, z5.h, z19.h\n"
+ "fmla z24.h, p3/M, z6.h, z18.h\n"
+ "fmla z29.h, p3/M, z0.h, z18.h\n"
+ "fmla z31.h, p3/M, z2.h, z17.h\n"
+ "fmla z23.h, p3/M, z8.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z3.h, z18.h\n"
+ "fmla z21.h, p3/M, z8.h, z17.h\n"
+ "fmla z28.h, p3/M, z5.h, z17.h\n"
+ "fmax z24.h, p3/M, z24.h, z15.h\n"
+ "fmla z29.h, p3/M, z8.h, z16.h\n"
+ "fmla z30.h, p3/M, z7.h, z16.h\n"
+ "fmla z31.h, p3/M, z6.h, z16.h\n"
+ ".inst 0xc16ec9f4 // fclamp { z20.h-z23.h }, z15.h, z14.h\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ ".inst 0xc16ec9fc // fclamp { z28.h-z31.h }, z15.h, z14.h\n"
+ "st1h { z22.h }, p0, [x24]\n"
+ "st1h { z24.h }, p0, [x26]\n"
+ "st1h { z20.h }, p0, [x26, x27, LSL #1]\n"
+ "st1h { z21.h }, p0, [x26, x25, LSL #1]\n"
+ "st1h { z23.h }, p0, [x24, x27, LSL #1]\n"
+ "st1h { z28.h }, p0, [x24, x25, LSL #1]\n"
+ "st1h { z29.h }, p0, [x23]\n"
+ "st1h { z30.h }, p0, [x23, x27, LSL #1]\n"
+ "st1h { z31.h }, p0, [x23, x25, LSL #1]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
@@ -480,4 +480,4 @@ void sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index e85cb9e017..acf66316ea 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -93,344 +93,344 @@ void sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"mov x15, #0x0\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldp x14, x13, [x16, #0x0]\n"
- "ldp x12, x11, [x16, #0x10]\n"
- "cnth x10\n"
+ "ldp x24, x23, [x16, #0x0]\n"
+ "ldp x22, x21, [x16, #0x10]\n"
+ "cnth x14\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1h { z17.h }, p3/Z, [x17]\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1h { z30.h }, p3/Z, [x17]\n"
"addvl x17, x17, #1\n"
- "ldr x9, [x16, #0x20]\n"
- "cmp x10, %x[n_channels]\n"
+ "ldr x20, [x16, #0x20]\n"
+ "cmp x14, %x[n_channels]\n"
".inst 0xa040a220 // ld1h { z0.h-z3.h }, pn8.b/Z, [x17]\n"
"addvl x17, x17, #4\n"
- "ldr x28, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "sub x27, XZR, x10\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "sub x12, XZR, x14\n"
".inst 0xa040a224 // ld1h { z4.h-z7.h }, pn8.b/Z, [x17]\n"
"addvl x17, x17, #4\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"ld1h { z8.h }, p3/Z, [x17]\n"
"addvl x17, x17, #1\n"
- "ld1h { z9.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x9, x15, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x20, x15, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z23, z17\n fmla z23.h, p3/M, z8.h, z9.h\n"
- "movprfx z24, z17\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "ldr x26, [x16, #0x30]\n"
- "inch x27\n"
- "movprfx z25, z17\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "movprfx z26, z17\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "ldr x25, [x16, #0x38]\n"
+ "movprfx z31, z30\n fmla z31.h, p3/M, z8.h, z9.h\n"
+ "movprfx z24, z30\n fmla z24.h, p3/M, z7.h, z9.h\n"
+ "ldr x23, [x16, #0x30]\n"
+ "inch x12\n"
+ "movprfx z25, z30\n fmla z25.h, p3/M, z6.h, z9.h\n"
+ "movprfx z26, z30\n fmla z26.h, p3/M, z5.h, z9.h\n"
+ "ldr x27, [x16, #0x38]\n"
"mov p1.b, p2.b\n"
- "movprfx z27, z17\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "movprfx z28, z17\n fmla z28.h, p3/M, z3.h, z9.h\n"
- "ldr x24, [x16, #0x28]\n"
- "whilelt p0.h, x10, %x[n_channels]\n"
- "movprfx z29, z17\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z17\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ldr x13, [x16, #0x48]\n"
- "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "movprfx z27, z30\n fmla z27.h, p3/M, z4.h, z9.h\n"
+ "movprfx z20, z30\n fmla z20.h, p3/M, z3.h, z9.h\n"
+ "ldr x22, [x16, #0x28]\n"
+ "whilelt p0.h, x14, %x[n_channels]\n"
+ "movprfx z21, z30\n fmla z21.h, p3/M, z2.h, z9.h\n"
+ "movprfx z23, z30\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
"fmla z24.h, p3/M, z4.h, z13.h\n"
- "ldr x14, [x16, #0x40]\n"
+ "ldr x20, [x16, #0x40]\n"
"fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x23, x15, LSL #1]\n"
"fmla z26.h, p3/M, z2.h, z13.h\n"
- "ldr x12, [x16, #0x50]\n"
+ "ldr x26, [x16, #0x50]\n"
"fmla z27.h, p3/M, z1.h, z13.h\n"
- "fmla z28.h, p3/M, z0.h, z13.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x11, [x16, #0x58]\n"
- "fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
- "movprfx z30, z17\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "ldr x9, [x16, #0x60]\n"
- "fmla z23.h, p3/M, z5.h, z13.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "ldr x24, [x16, #0x68]\n"
- "ld1h { z17.h }, p3/Z, [x17]\n"
+ "fmla z20.h, p3/M, z0.h, z13.h\n"
+ "ld1h { z19.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x25, [x16, #0x58]\n"
+ "fmla z21.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "movprfx z22, z30\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "ldr x24, [x16, #0x60]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "fmla z24.h, p3/M, z6.h, z17.h\n"
+ "ldr x23, [x16, #0x68]\n"
+ "ld1h { z30.h }, p3/Z, [x17]\n"
"fmla z25.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
- "ldr x26, [x16, #0x70]\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z3.h, z11.h\n"
- "ldr x25, [x16, #0x78]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "ldr x14, [x16, #0x80]\n"
+ "ld1h { z18.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z17.h\n"
+ "ldr x22, [x16, #0x70]\n"
+ "fmla z23.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z27.h, p3/M, z3.h, z17.h\n"
+ "ldr x21, [x16, #0x78]\n"
+ "fmla z22.h, p3/M, z0.h, z17.h\n"
+ "fmla z20.h, p3/M, z4.h, z19.h\n"
+ "ldr x20, [x16, #0x80]\n"
"addvl x17, x17, #1\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "ldr x13, [x16, #0x88]\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0x90]\n"
- "fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "ldr x23, [x28, #0x0]\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "ldr x22, [x28, #0x8]\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "ldr x9, [x16, #0xa0]\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
- "ldr x11, [x16, #0x98]\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "ldr x21, [x28, #0x10]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xb0]\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ldr x24, [x16, #0xa8]\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0xb8]\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "ldr x14, [x16, #0xc0]\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "ldr x20, [x28, #0x18]\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x12, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x15, LSL #1]\n"
- "ldr x9, [x16, #0x20]\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z12.h\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "fmla z27.h, p3/M, z8.h, z13.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ldp x14, x13, [x16, #0x0]\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmax z23.h, p3/M, z23.h, z18.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ldp x12, x11, [x16, #0x10]\n"
+ "fmla z31.h, p3/M, z7.h, z17.h\n"
+ "fmla z24.h, p3/M, z0.h, z18.h\n"
+ "ldr x11, [x16, #0x88]\n"
+ "fmla z21.h, p3/M, z1.h, z17.h\n"
+ "fmla z25.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ldr x10, [x16, #0x90]\n"
+ "fmla z27.h, p3/M, z5.h, z19.h\n"
+ "fmla z23.h, p3/M, z1.h, z19.h\n"
+ "ldr x9, [x13, #0x0]\n"
+ "fmla z22.h, p3/M, z2.h, z19.h\n"
+ "ldr x28, [x13, #0x8]\n"
+ "fmla z31.h, p3/M, z1.h, z18.h\n"
+ "fmla z24.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z9.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "ldr x27, [x16, #0x98]\n"
+ "ld1h { z16.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z0.h, z17.h\n"
+ "fmla z25.h, p3/M, z7.h, z19.h\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "ldr x26, [x13, #0x10]\n"
+ "fmla z20.h, p3/M, z2.h, z9.h\n"
+ "ldr x25, [x13, #0x18]\n"
+ "fmla z24.h, p3/M, z8.h, z19.h\n"
+ "fmla z21.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z29.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z26.h, p3/M, z6.h, z16.h\n"
+ "fmla z31.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ldr x22, [x16, #0xb0]\n"
+ "fmla z25.h, p3/M, z5.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x21, [x16, #0xb8]\n"
+ "fmla z27.h, p3/M, z7.h, z29.h\n"
+ "fmla z20.h, p3/M, z6.h, z29.h\n"
+ "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x16, #0xc0]\n"
+ "fmla z22.h, p3/M, z4.h, z29.h\n"
+ "fmla z21.h, p3/M, z5.h, z29.h\n"
+ "fmla z23.h, p3/M, z3.h, z29.h\n"
+ "fmla z26.h, p3/M, z8.h, z29.h\n"
+ "fmla z24.h, p3/M, z3.h, z17.h\n"
+ "fmla z31.h, p3/M, z4.h, z17.h\n"
+ "fmla z20.h, p3/M, z8.h, z18.h\n"
+ "fmla z27.h, p3/M, z0.h, z17.h\n"
+ "fmla z22.h, p3/M, z6.h, z16.h\n"
+ "fmla z21.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z13.h }, p2/Z, [x10, x15, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x11, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z5.h, z16.h\n"
+ "fmla z25.h, p3/M, z4.h, z16.h\n"
+ "fmla z27.h, p3/M, z2.h, z16.h\n"
+ "fmla z20.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z28.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "ldr x24, [x16, #0x20]\n"
+ "fmla z22.h, p3/M, z8.h, z13.h\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "fmla z21.h, p3/M, z4.h, z17.h\n"
+ "fmla z23.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z31.h, p3/M, z2.h, z28.h\n"
+ "fmla z24.h, p3/M, z1.h, z28.h\n"
+ "fmla z27.h, p3/M, z6.h, z17.h\n"
+ "fmla z25.h, p3/M, z0.h, z28.h\n"
+ "ld1h { z18.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z22.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z20.h, p3/M, z7.h, z16.h\n"
+ "fmla z23.h, p3/M, z4.h, z16.h\n"
+ "fmla z31.h, p3/M, z6.h, z17.h\n"
+ "fmla z21.h, p3/M, z0.h, z17.h\n"
+ "fmla z22.h, p3/M, z5.h, z16.h\n"
+ "fmla z27.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldp x23, x22, [x16, #0x0]\n"
+ "fmla z23.h, p3/M, z2.h, z18.h\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
+ "ldp x21, x20, [x16, #0x10]\n"
"inch x15\n"
+ "fmla z25.h, p3/M, z8.h, z18.h\n"
+ "fmla z20.h, p3/M, z5.h, z18.h\n"
".inst 0xa040a220 // ld1h { z0.h-z3.h }, pn8.b/Z, [x17]\n"
"addvl x17, x17, #4\n"
+ "fmax z31.h, p3/M, z31.h, z15.h\n"
+ "fmla z21.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z9.h }, p0/Z, [x23, x14, LSL #1]\n"
"whilelt p2.h, x15, %x[n_channels]\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- ".inst 0xc170ca58 // fclamp { z24.h-z27.h }, z18.h, z16.h\n"
- "ld1h { z9.h }, p0/Z, [x14, x10, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "ld1h { z10.h }, p0/Z, [x13, x10, LSL #1]\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
- "ld1h { z11.h }, p0/Z, [x12, x10, LSL #1]\n"
- "ld1h { z12.h }, p0/Z, [x11, x10, LSL #1]\n"
- "st1h { z24.h }, p1, [x22, x27, LSL #1]\n"
- "ldr x22, [x28, #0x28]\n"
- "st1h { z25.h }, p1, [x21, x27, LSL #1]\n"
- "ldr x21, [x28, #0x30]\n"
- "ld1h { z13.h }, p0/Z, [x9, x10, LSL #1]\n"
- "inch x10\n"
- "st1h { z23.h }, p1, [x23, x27, LSL #1]\n"
- "ldr x23, [x28, #0x20]\n"
+ "fmla z22.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z10.h }, p0/Z, [x22, x14, LSL #1]\n"
+ "fmla z23.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z11.h }, p0/Z, [x21, x14, LSL #1]\n"
+ ".inst 0xc16ec9f8 // fclamp { z24.h-z27.h }, z15.h, z14.h\n"
+ "ld1h { z12.h }, p0/Z, [x20, x14, LSL #1]\n"
+ "fmin z31.h, p3/M, z31.h, z14.h\n"
+ "ld1h { z13.h }, p0/Z, [x24, x14, LSL #1]\n"
+ "inch x14\n"
".inst 0xa040a224 // ld1h { z4.h-z7.h }, pn8.b/Z, [x17]\n"
"addvl x17, x17, #4\n"
- "st1h { z26.h }, p1, [x20, x27, LSL #1]\n"
- "ldr x20, [x28, #0x38]\n"
- "cmp x10, %x[n_channels]\n"
- ".inst 0xc170ca5c // fclamp { z28.h-z31.h }, z18.h, z16.h\n"
+ "cmp x14, %x[n_channels]\n"
+ ".inst 0xc16ec9f4 // fclamp { z20.h-z23.h }, z15.h, z14.h\n"
"ld1h { z8.h }, p3/Z, [x17]\n"
"addvl x17, x17, #1\n"
- "st1h { z27.h }, p1, [x23, x27, LSL #1]\n"
- "ldr x23, [x28, #0x40]\n"
- "st1h { z28.h }, p1, [x22, x27, LSL #1]\n"
- "st1h { z29.h }, p1, [x21, x27, LSL #1]\n"
- "st1h { z30.h }, p1, [x20, x27, LSL #1]\n"
- "st1h { z31.h }, p1, [x23, x27, LSL #1]\n"
+ "st1h { z24.h }, p1, [x28, x12, LSL #1]\n"
+ "ldr x23, [x13, #0x28]\n"
+ "st1h { z31.h }, p1, [x9, x12, LSL #1]\n"
+ "ldr x20, [x13, #0x20]\n"
+ "st1h { z25.h }, p1, [x26, x12, LSL #1]\n"
+ "ldr x22, [x13, #0x30]\n"
+ "st1h { z26.h }, p1, [x25, x12, LSL #1]\n"
+ "ldr x21, [x13, #0x38]\n"
+ "st1h { z27.h }, p1, [x20, x12, LSL #1]\n"
+ "ldr x20, [x13, #0x40]\n"
+ "st1h { z20.h }, p1, [x23, x12, LSL #1]\n"
+ "st1h { z21.h }, p1, [x22, x12, LSL #1]\n"
+ "st1h { z22.h }, p1, [x21, x12, LSL #1]\n"
+ "st1h { z23.h }, p1, [x20, x12, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z23, z17\n fmla z23.h, p3/M, z8.h, z9.h\n"
- "movprfx z24, z17\n fmla z24.h, p3/M, z7.h, z9.h\n"
- "ldr x26, [x16, #0x30]\n"
- "inch x27\n"
- "movprfx z25, z17\n fmla z25.h, p3/M, z6.h, z9.h\n"
- "movprfx z26, z17\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "ldr x25, [x16, #0x38]\n"
- "mov p1.b, p2.b\n"
- "movprfx z27, z17\n fmla z27.h, p3/M, z4.h, z9.h\n"
- "movprfx z28, z17\n fmla z28.h, p3/M, z3.h, z9.h\n"
- "ldr x24, [x16, #0x28]\n"
- "movprfx z29, z17\n fmla z29.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z17\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ldr x13, [x16, #0x48]\n"
- "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "movprfx z20, z30\n fmla z20.h, p3/M, z8.h, z9.h\n"
+ "movprfx z24, z30\n fmla z24.h, p3/M, z7.h, z9.h\n"
+ "ldr x23, [x16, #0x30]\n"
+ "inch x12\n"
+ "movprfx z25, z30\n fmla z25.h, p3/M, z6.h, z9.h\n"
+ "movprfx z26, z30\n fmla z26.h, p3/M, z5.h, z9.h\n"
+ "ldr x27, [x16, #0x38]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z27, z30\n fmla z27.h, p3/M, z4.h, z9.h\n"
+ "movprfx z28, z30\n fmla z28.h, p3/M, z3.h, z9.h\n"
+ "ldr x22, [x16, #0x28]\n"
+ "movprfx z29, z30\n fmla z29.h, p3/M, z2.h, z9.h\n"
+ "movprfx z31, z30\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmla z20.h, p3/M, z0.h, z10.h\n"
"fmla z24.h, p3/M, z4.h, z13.h\n"
- "ldr x14, [x16, #0x40]\n"
+ "ldr x20, [x16, #0x40]\n"
"fmla z25.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ld1h { z19.h }, p2/Z, [x23, x15, LSL #1]\n"
"fmla z26.h, p3/M, z2.h, z13.h\n"
- "ldr x12, [x16, #0x50]\n"
+ "ldr x26, [x16, #0x50]\n"
"fmla z27.h, p3/M, z1.h, z13.h\n"
"fmla z28.h, p3/M, z0.h, z13.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x11, [x16, #0x58]\n"
+ "ld1h { z18.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x25, [x16, #0x58]\n"
"fmla z29.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
- "movprfx z30, z17\n fmla z30.h, p3/M, z1.h, z9.h\n"
- "ldr x9, [x16, #0x60]\n"
- "fmla z23.h, p3/M, z5.h, z13.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "ldr x24, [x16, #0x68]\n"
+ "ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ldr x24, [x16, #0x60]\n"
+ "fmla z20.h, p3/M, z5.h, z13.h\n"
+ "fmla z24.h, p3/M, z6.h, z19.h\n"
+ "ldr x23, [x16, #0x68]\n"
"fmla z25.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z4.h, z11.h\n"
- "ldr x26, [x16, #0x70]\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z3.h, z11.h\n"
- "ldr x25, [x16, #0x78]\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z28.h, p3/M, z4.h, z10.h\n"
- "ldr x14, [x16, #0x80]\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "ldr x13, [x16, #0x88]\n"
- "fmla z29.h, p3/M, z1.h, z11.h\n"
- "fmla z25.h, p3/M, z1.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0x90]\n"
- "fmla z27.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "ldr x23, [x28, #0x0]\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "ldr x22, [x28, #0x8]\n"
- "fmla z23.h, p3/M, z1.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z24.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x9, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z0.h, z11.h\n"
- "ldr x9, [x16, #0xa0]\n"
- "fmla z25.h, p3/M, z7.h, z10.h\n"
- "ldr x11, [x16, #0x98]\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "ldr x21, [x28, #0x10]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xb0]\n"
- "fmla z24.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ldr x24, [x16, #0xa8]\n"
- "fmla z25.h, p3/M, z5.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0xb8]\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmla z29.h, p3/M, z5.h, z10.h\n"
- "ldr x14, [x16, #0xc0]\n"
- "fmla z31.h, p3/M, z3.h, z10.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "ldr x20, [x28, #0x18]\n"
- "fmla z24.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "fmla z27.h, p3/M, z0.h, z12.h\n"
- "fmla z30.h, p3/M, z6.h, z13.h\n"
- "fmla z29.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x12, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "fmla z25.h, p3/M, z4.h, z11.h\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x9, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z13.h\n"
- "fmla z26.h, p3/M, z7.h, z12.h\n"
- "fmla z29.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z12.h\n"
- "fmla z25.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z3.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "fmla z29.h, p3/M, z0.h, z12.h\n"
- "fmla z27.h, p3/M, z8.h, z13.h\n"
- "fmla z26.h, p3/M, z3.h, z12.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z30.h, p3/M, z5.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmax z23.h, p3/M, z23.h, z18.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z29.h, p3/M, z8.h, z13.h\n"
- ".inst 0xc170ca58 // fclamp { z24.h-z27.h }, z18.h, z16.h\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "fmla z31.h, p3/M, z6.h, z13.h\n"
- "st1h { z24.h }, p1, [x22, x27, LSL #1]\n"
- "ldr x22, [x28, #0x28]\n"
- "st1h { z25.h }, p1, [x21, x27, LSL #1]\n"
- "ldr x21, [x28, #0x30]\n"
- "st1h { z26.h }, p1, [x20, x27, LSL #1]\n"
- "ldr x20, [x28, #0x38]\n"
- "st1h { z23.h }, p1, [x23, x27, LSL #1]\n"
- "ldr x23, [x28, #0x20]\n"
- ".inst 0xc170ca5c // fclamp { z28.h-z31.h }, z18.h, z16.h\n"
- "st1h { z27.h }, p1, [x23, x27, LSL #1]\n"
- "ldr x23, [x28, #0x40]\n"
- "st1h { z28.h }, p1, [x22, x27, LSL #1]\n"
- "st1h { z29.h }, p1, [x21, x27, LSL #1]\n"
- "st1h { z30.h }, p1, [x20, x27, LSL #1]\n"
- "st1h { z31.h }, p1, [x23, x27, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z19.h\n"
+ "ldr x22, [x16, #0x70]\n"
+ "fmla z31.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z27.h, p3/M, z3.h, z19.h\n"
+ "ldr x21, [x16, #0x78]\n"
+ "fmla z30.h, p3/M, z0.h, z19.h\n"
+ "fmla z28.h, p3/M, z4.h, z18.h\n"
+ "ldr x20, [x16, #0x80]\n"
+ "fmla z20.h, p3/M, z7.h, z19.h\n"
+ "fmla z24.h, p3/M, z0.h, z17.h\n"
+ "ldr x11, [x16, #0x88]\n"
+ "fmla z29.h, p3/M, z1.h, z19.h\n"
+ "fmla z25.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ldr x10, [x16, #0x90]\n"
+ "fmla z27.h, p3/M, z5.h, z18.h\n"
+ "fmla z31.h, p3/M, z1.h, z18.h\n"
+ "ldr x9, [x13, #0x0]\n"
+ "fmla z30.h, p3/M, z2.h, z18.h\n"
+ "ldr x28, [x13, #0x8]\n"
+ "fmla z20.h, p3/M, z1.h, z17.h\n"
+ "fmla z24.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "ldr x27, [x16, #0x98]\n"
+ "ld1h { z16.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z0.h, z19.h\n"
+ "fmla z25.h, p3/M, z7.h, z18.h\n"
+ "ldr x26, [x16, #0xa0]\n"
+ "ldr x25, [x13, #0x10]\n"
+ "fmla z28.h, p3/M, z2.h, z17.h\n"
+ "ldr x24, [x13, #0x18]\n"
+ "fmla z24.h, p3/M, z8.h, z18.h\n"
+ "fmla z29.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z26.h, p3/M, z6.h, z16.h\n"
+ "fmla z20.h, p3/M, z3.h, z19.h\n"
+ "ld1h { z19.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ldr x22, [x16, #0xb0]\n"
+ "fmla z25.h, p3/M, z5.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x21, [x16, #0xb8]\n"
+ "fmla z27.h, p3/M, z7.h, z18.h\n"
+ "fmla z28.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x16, #0xc0]\n"
+ "fmla z30.h, p3/M, z4.h, z18.h\n"
+ "fmla z29.h, p3/M, z5.h, z18.h\n"
+ "fmla z31.h, p3/M, z3.h, z18.h\n"
+ "fmla z26.h, p3/M, z8.h, z18.h\n"
+ "fmla z24.h, p3/M, z3.h, z17.h\n"
+ "fmla z20.h, p3/M, z4.h, z17.h\n"
+ "fmla z28.h, p3/M, z8.h, z19.h\n"
+ "fmla z27.h, p3/M, z0.h, z17.h\n"
+ "fmla z30.h, p3/M, z6.h, z16.h\n"
+ "fmla z29.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x10, x15, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x11, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z5.h, z16.h\n"
+ "fmla z25.h, p3/M, z4.h, z16.h\n"
+ "fmla z27.h, p3/M, z2.h, z16.h\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z18.h\n"
+ "fmla z26.h, p3/M, z7.h, z19.h\n"
+ "fmla z29.h, p3/M, z4.h, z19.h\n"
+ "fmla z31.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z20.h, p3/M, z2.h, z17.h\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "fmla z27.h, p3/M, z6.h, z19.h\n"
+ "fmla z25.h, p3/M, z0.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z19.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z16.h\n"
+ "fmla z31.h, p3/M, z4.h, z16.h\n"
+ "fmla z20.h, p3/M, z6.h, z17.h\n"
+ "fmla z29.h, p3/M, z0.h, z17.h\n"
+ "fmla z30.h, p3/M, z5.h, z16.h\n"
+ "fmla z27.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z31.h, p3/M, z2.h, z18.h\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
+ "fmla z25.h, p3/M, z8.h, z18.h\n"
+ "fmla z28.h, p3/M, z5.h, z18.h\n"
+ "fmax z20.h, p3/M, z20.h, z15.h\n"
+ "fmla z29.h, p3/M, z8.h, z16.h\n"
+ "fmla z30.h, p3/M, z7.h, z16.h\n"
+ "fmla z31.h, p3/M, z6.h, z16.h\n"
+ ".inst 0xc16ec9f8 // fclamp { z24.h-z27.h }, z15.h, z14.h\n"
+ "fmin z20.h, p3/M, z20.h, z14.h\n"
+ ".inst 0xc16ec9fc // fclamp { z28.h-z31.h }, z15.h, z14.h\n"
+ "st1h { z24.h }, p0, [x28, x12, LSL #1]\n"
+ "ldr x23, [x13, #0x28]\n"
+ "st1h { z20.h }, p0, [x9, x12, LSL #1]\n"
+ "ldr x20, [x13, #0x20]\n"
+ "st1h { z25.h }, p0, [x25, x12, LSL #1]\n"
+ "ldr x22, [x13, #0x30]\n"
+ "st1h { z26.h }, p0, [x24, x12, LSL #1]\n"
+ "ldr x21, [x13, #0x38]\n"
+ "st1h { z27.h }, p0, [x20, x12, LSL #1]\n"
+ "ldr x20, [x13, #0x40]\n"
+ "st1h { z28.h }, p0, [x23, x12, LSL #1]\n"
+ "st1h { z29.h }, p0, [x22, x12, LSL #1]\n"
+ "st1h { z30.h }, p0, [x21, x12, LSL #1]\n"
+ "st1h { z31.h }, p0, [x20, x12, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
@@ -441,4 +441,4 @@ void sme2_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp
index 6b75d12295..f06fb72d31 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,12 +22,14 @@
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "utils.hpp"
#include <cstdint>
#pragma once
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
namespace arm_conv {
namespace depthwise {
@@ -65,3 +67,5 @@ class sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst : public DepthwiseDepthfirs
} // namespace depthwise
} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index 37a9febf47..0d1151ae6f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -88,98 +88,98 @@ void sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x1, #0x0\n"
"mov x2, #0x0\n"
+ "mov x3, #0x0\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
"1:" // Tile loop
- "str x1, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x22, #0x4\n"
- "str x2, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x3, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x4, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "mul x20, x1, x21\n" // offset = tile_i * ld_input_row
- "ldr x5, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x2, x3, x20\n" // offset += tile_j * ld_input_col
+ "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
+ "ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
+ "madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
+ "add x7, x4, x4\n"
"mul x20, x20, x22\n" // offset *= kernel_stride * output_size
- "add x6, x3, x3\n"
- "add x4, x4, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x7, x4, x21, LSL #1\n"
- "add x8, x6, x3\n"
- "add x17, x7, x21, LSL #1\n"
- "add x16, x8, x3\n"
- "add x15, x17, x21, LSL #1\n"
- "add x14, x16, x3\n"
- "add x13, x15, x21, LSL #1\n"
+ "add x8, x7, x4\n"
+ "add x5, x5, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x17, x8, x4\n"
+ "add x16, x5, x21, LSL #1\n"
+ "add x15, x17, x4\n"
+ "add x14, x16, x21, LSL #1\n"
+ "add x13, x14, x21, LSL #1\n"
"add x12, x13, x21, LSL #1\n"
- "cbnz x2, 2f\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x11, x12, x21, LSL #1\n"
+ "cbnz x3, 2f\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"lsl x10, %x[n_channels], #0x1\n"
"mov x21, #0x8\n"
- "mul x21, x21, x3\n"
- "add x9, x17, x6, LSL #1\n"
- "add x28, x4, x14, LSL #1\n"
- "add x27, x17, x8, LSL #1\n"
- "sub x20, x11, x2\n"
- "add x26, x12, x14, LSL #1\n"
+ "mul x21, x21, x4\n"
+ "add x9, x14, x7, LSL #1\n"
+ "add x28, x5, x15, LSL #1\n"
+ "add x27, x14, x8, LSL #1\n"
+ "sub x20, x20, x3\n"
+ "add x26, x11, x15, LSL #1\n"
"sub x20, x20, #0x1\n"
- "add x25, x15, x6, LSL #1\n"
+ "add x25, x13, x7, LSL #1\n"
"and x20, x20, #0x3fffff\n"
- "add x24, x4, x3, LSL #1\n"
+ "add x24, x5, x4, LSL #1\n"
"orr x10, x10, x20, LSL #22\n"
- "add x23, x4, x16, LSL #1\n"
+ "add x23, x5, x17, LSL #1\n"
"orr x10, x10, x21, LSL #38\n"
- "add x22, x15, x8, LSL #1\n"
- "add x21, x7, x14, LSL #1\n"
- "add x20, x7, x6, LSL #1\n"
+ "add x22, x13, x8, LSL #1\n"
+ "add x21, x16, x15, LSL #1\n"
+ "add x20, x16, x7, LSL #1\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- "add x9, x13, x14, LSL #1\n"
- ".inst 0xf8aa489a // rprfm pldonce, x10, [x4]\n"
+ "add x9, x12, x15, LSL #1\n"
+ ".inst 0xf8aa48ba // rprfm pldonce, x10, [x5]\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- "add x28, x7, x8, LSL #1\n"
+ "add x28, x16, x8, LSL #1\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x12, x3, LSL #1\n"
- ".inst 0xf8aa499a // rprfm pldonce, x10, [x12]\n"
+ "add x27, x11, x4, LSL #1\n"
+ ".inst 0xf8aa497a // rprfm pldonce, x10, [x11]\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x17, x3, LSL #1\n"
+ "add x26, x14, x4, LSL #1\n"
".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x12, x16, LSL #1\n"
+ "add x25, x11, x17, LSL #1\n"
".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- "add x24, x17, x16, LSL #1\n"
+ "add x24, x14, x17, LSL #1\n"
".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- "add x23, x4, x6, LSL #1\n"
+ "add x23, x5, x7, LSL #1\n"
".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x15, x3, LSL #1\n"
- ".inst 0xf8aa48fa // rprfm pldonce, x10, [x7]\n"
+ "add x22, x13, x4, LSL #1\n"
+ ".inst 0xf8aa4a1a // rprfm pldonce, x10, [x16]\n"
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x4, x8, LSL #1\n"
- ".inst 0xf8aa49ba // rprfm pldonce, x10, [x13]\n"
+ "add x21, x5, x8, LSL #1\n"
+ ".inst 0xf8aa499a // rprfm pldonce, x10, [x12]\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x15, x16, LSL #1\n"
+ "add x20, x13, x17, LSL #1\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- "add x9, x17, x14, LSL #1\n"
+ "add x9, x14, x15, LSL #1\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- "add x28, x13, x6, LSL #1\n"
+ "add x28, x12, x7, LSL #1\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x15, x14, LSL #1\n"
+ "add x27, x13, x15, LSL #1\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x12, x6, LSL #1\n"
+ "add x26, x11, x7, LSL #1\n"
".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x13, x8, LSL #1\n"
+ "add x25, x12, x8, LSL #1\n"
".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- "add x24, x12, x8, LSL #1\n"
+ "add x24, x11, x8, LSL #1\n"
".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- "add x23, x7, x3, LSL #1\n"
+ "add x23, x16, x4, LSL #1\n"
".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x7, x16, LSL #1\n"
+ "add x22, x16, x17, LSL #1\n"
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x13, x3, LSL #1\n"
- ".inst 0xf8aa4a3a // rprfm pldonce, x10, [x17]\n"
+ "add x21, x12, x4, LSL #1\n"
+ ".inst 0xf8aa49da // rprfm pldonce, x10, [x14]\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x13, x16, LSL #1\n"
+ "add x20, x12, x17, LSL #1\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- ".inst 0xf8aa49fa // rprfm pldonce, x10, [x15]\n"
+ ".inst 0xf8aa49ba // rprfm pldonce, x10, [x13]\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
@@ -190,483 +190,483 @@ void sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"mov x21, #0x4\n"
- "ld1h { z15.h }, p3/Z, [x5]\n"
- "addvl x5, x5, #1\n"
+ "ld1h { z14.h }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
"ldr x9, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x28\n"
- ".inst 0xa040a0a0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x5]\n"
- "addvl x5, x5, #4\n"
- "ldr x27, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "cnth x22\n"
+ ".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "ldr x28, [%x[params_struct], %[offsetof_args_outptr]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- ".inst 0xa040a0a4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x5]\n"
- "addvl x5, x5, #4\n"
- "mul x20, x1, x22\n" // offset = tile_i * ld_output_row
- "cmp x28, %x[n_channels]\n"
- "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "madd x20, x2, x9, x20\n" // offset += tile_j * ld_output_col
- "add x26, x9, x9\n"
- "ld1rh { z13.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ ".inst 0xa040a0c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "mul x20, x2, x23\n" // offset = tile_i * ld_output_row
+ "cmp x22, %x[n_channels]\n"
+ "ld1rh { z13.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "madd x20, x3, x9, x20\n" // offset += tile_j * ld_output_col
+ "add x27, x9, x9\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"mul x20, x20, x21\n" // offset *= output_tile_size
- "add x25, x26, x9\n"
- "ld1h { z8.h }, p3/Z, [x5]\n"
- "add x27, x27, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x26, x27, x9\n"
+ "ld1h { z8.h }, p3/Z, [x6]\n"
+ "add x28, x28, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
"mov x21, #0x0\n"
- "ld1h { z9.h }, p2/Z, [x17, x6, LSL #1]\n"
- "add x24, x27, x22, LSL #1\n"
- "sub x20, XZR, x28\n"
- "ld1h { z10.h }, p2/Z, [x4]\n"
- "add x23, x24, x22, LSL #1\n"
- "ld1h { z11.h }, p2/Z, [x4, x14, LSL #1]\n"
- "addvl x5, x5, #1\n"
- "add x22, x23, x22, LSL #1\n"
- "ld1h { z12.h }, p2/Z, [x17, x8, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "add x25, x28, x23, LSL #1\n"
+ "sub x20, XZR, x22\n"
+ "ld1h { z10.h }, p2/Z, [x5]\n"
+ "add x24, x25, x23, LSL #1\n"
+ "ld1h { z11.h }, p2/Z, [x5, x15, LSL #1]\n"
+ "addvl x6, x6, #1\n"
+ "add x23, x24, x23, LSL #1\n"
+ "ld1h { z12.h }, p2/Z, [x14, x8, LSL #1]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z21, z15\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z15\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "whilelt p1.h, x28, %x[n_channels]\n"
+ "movprfx z25, z14\n fmla z25.h, p3/M, z4.h, z9.h\n"
+ "movprfx z28, z14\n fmla z28.h, p3/M, z8.h, z9.h\n"
+ "whilelt p1.h, x22, %x[n_channels]\n"
"inch x21\n"
- "movprfx z22, z15\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z15\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "inch x28\n"
+ "movprfx z26, z14\n fmla z26.h, p3/M, z3.h, z9.h\n"
+ "movprfx z17, z14\n fmla z17.h, p3/M, z1.h, z9.h\n"
+ "inch x22\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z15\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "movprfx z17, z15\n fmla z17.h, p3/M, z7.h, z9.h\n"
+ "movprfx z18, z14\n fmla z18.h, p3/M, z0.h, z9.h\n"
+ "movprfx z29, z14\n fmla z29.h, p3/M, z7.h, z9.h\n"
"inch x20\n"
- "movprfx z18, z15\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "movprfx z20, z15\n fmla z20.h, p3/M, z5.h, z9.h\n"
- "fmla z21.h, p3/M, z5.h, z12.h\n"
- "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x15, x6, LSL #1]\n"
- "fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z15\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x12]\n"
- "fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z8.h, z12.h\n"
- "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z21.h, p3/M, z7.h, z9.h\n"
- "ld1h { z10.h }, p2/Z, [x15, x8, LSL #1]\n"
- "fmla z18.h, p3/M, z7.h, z12.h\n"
- "fmla z19.h, p3/M, z6.h, z12.h\n"
- "movprfx z23, z15\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z15\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x4, x3, LSL #1]\n"
- "movprfx z31, z15\n fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x4, x16, LSL #1]\n"
- "fmla z25.h, p3/M, z4.h, z9.h\n"
- "fmla z26.h, p3/M, z3.h, z9.h\n"
- "movprfx z29, z15\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z15\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "ld1h { z15.h }, p3/Z, [x5]\n"
- "addvl x5, x5, #1\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z21.h, p3/M, z8.h, z10.h\n"
- "ld1h { z9.h }, p2/Z, [x7]\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x7, x14, LSL #1]\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
- "fmla z31.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x7, x6, LSL #1]\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x14, LSL #1]\n"
- "fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "fmla z17.h, p3/M, z4.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x7, x8, LSL #1]\n"
- "fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x3, LSL #1]\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x17, x3, LSL #1]\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z1.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x17, x16, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x16, LSL #1]\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
- "fmla z16.h, p3/M, z7.h, z10.h\n"
- "fmla z17.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x4, x6, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x15, x3, LSL #1]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x4, x8, LSL #1]\n"
- "addvl x4, x4, #1\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x15, x16, LSL #1]\n"
- "fmla z16.h, p3/M, z2.h, z10.h\n"
- "fmla z17.h, p3/M, z1.h, z10.h\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x17]\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x6, LSL #1]\n"
+ "movprfx z30, z14\n fmla z30.h, p3/M, z6.h, z9.h\n"
+ "movprfx z24, z14\n fmla z24.h, p3/M, z5.h, z9.h\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "movprfx z16, z14\n fmla z16.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "fmla z28.h, p3/M, z0.h, z10.h\n"
+ "movprfx z31, z14\n fmla z31.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z19.h }, p2/Z, [x11]\n"
+ "fmla z26.h, p3/M, z4.h, z12.h\n"
"fmla z17.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z22.h }, p2/Z, [x11, x15, LSL #1]\n"
"fmla z18.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x17, x14, LSL #1]\n"
- "addvl x17, x17, #1\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x15]\n"
+ "fmla z29.h, p3/M, z8.h, z12.h\n"
+ "movprfx z20, z14\n fmla z20.h, p3/M, z6.h, z19.h\n"
+ "fmla z25.h, p3/M, z7.h, z9.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z12.h\n"
+ "fmla z31.h, p3/M, z6.h, z12.h\n"
+ "movprfx z27, z14\n fmla z27.h, p3/M, z3.h, z12.h\n"
+ "movprfx z19, z14\n fmla z19.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x5, x4, LSL #1]\n"
+ "movprfx z23, z14\n fmla z23.h, p3/M, z8.h, z22.h\n"
+ "fmla z26.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x5, x17, LSL #1]\n"
+ "fmla z17.h, p3/M, z4.h, z9.h\n"
+ "fmla z18.h, p3/M, z3.h, z9.h\n"
+ "movprfx z21, z14\n fmla z21.h, p3/M, z1.h, z9.h\n"
+ "movprfx z22, z14\n fmla z22.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z14.h }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
+ "fmla z24.h, p3/M, z8.h, z9.h\n"
+ "fmla z16.h, p3/M, z5.h, z9.h\n"
+ "fmla z20.h, p3/M, z2.h, z9.h\n"
+ "fmla z25.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z9.h }, p2/Z, [x16]\n"
+ "fmla z28.h, p3/M, z1.h, z10.h\n"
+ "fmla z29.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x16, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x12]\n"
+ "fmla z26.h, p3/M, z7.h, z11.h\n"
+ "fmla z27.h, p3/M, z6.h, z11.h\n"
+ "fmla z17.h, p3/M, z5.h, z11.h\n"
+ "fmla z18.h, p3/M, z4.h, z11.h\n"
+ "fmla z19.h, p3/M, z3.h, z11.h\n"
+ "fmla z21.h, p3/M, z2.h, z11.h\n"
+ "fmla z22.h, p3/M, z1.h, z11.h\n"
+ "fmla z23.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x16, x7, LSL #1]\n"
+ "fmla z24.h, p3/M, z0.h, z9.h\n"
+ "fmla z16.h, p3/M, z6.h, z12.h\n"
+ "fmla z20.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x12, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z9.h\n"
+ "fmla z25.h, p3/M, z1.h, z11.h\n"
+ "fmla z31.h, p3/M, z5.h, z10.h\n"
+ "fmla z27.h, p3/M, z2.h, z10.h\n"
"fmla z29.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x16, x8, LSL #1]\n"
"fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
- "ld1h { z9.h }, p1/Z, [x17, x6, LSL #1]\n"
+ "fmla z26.h, p3/M, z0.h, z11.h\n"
"fmla z19.h, p3/M, z8.h, z12.h\n"
"fmla z23.h, p3/M, z5.h, z12.h\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x15, x14, LSL #1]\n"
- "addvl x15, x15, #1\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x6, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z11.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x8, LSL #1]\n"
- "addvl x12, x12, #1\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x11, x4, LSL #1]\n"
+ "fmla z24.h, p3/M, z2.h, z11.h\n"
+ "fmla z25.h, p3/M, z2.h, z10.h\n"
"fmla z28.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x8, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x7, x16, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x7, x3, LSL #1]\n"
- "addvl x7, x7, #1\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "fmla z30.h, p3/M, z7.h, z12.h\n"
- "fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x3, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z9.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "fmla z31.h, p3/M, z3.h, z10.h\n"
+ "fmla z26.h, p3/M, z1.h, z10.h\n"
+ "fmla z27.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z20.h, p3/M, z7.h, z12.h\n"
+ "fmla z21.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x17, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z9.h\n"
+ "fmla z25.h, p3/M, z3.h, z9.h\n"
+ "fmla z16.h, p3/M, z1.h, z9.h\n"
+ "fmla z17.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z7.h, z9.h\n"
+ "fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x5, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z8.h, z11.h\n"
+ "fmla z23.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "fmla z31.h, p3/M, z7.h, z10.h\n"
+ "fmla z26.h, p3/M, z5.h, z10.h\n"
+ "fmla z27.h, p3/M, z4.h, z10.h\n"
+ "fmla z18.h, p3/M, z2.h, z10.h\n"
+ "fmla z19.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z9.h }, p2/Z, [x5, x8, LSL #1]\n"
+ "addvl x5, x5, #1\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "fmla z25.h, p3/M, z6.h, z11.h\n"
+ "fmla z16.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z3.h, z11.h\n"
+ "fmla z20.h, p3/M, z1.h, z11.h\n"
+ "fmla z21.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z12.h\n"
+ "fmla z29.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x14]\n"
+ "fmla z31.h, p3/M, z0.h, z9.h\n"
+ "fmla z22.h, p3/M, z2.h, z11.h\n"
+ "fmla z26.h, p3/M, z8.h, z11.h\n"
+ "fmla z27.h, p3/M, z7.h, z11.h\n"
"fmla z18.h, p3/M, z5.h, z11.h\n"
+ "fmla z24.h, p3/M, z3.h, z10.h\n"
+ "fmla z16.h, p3/M, z0.h, z10.h\n"
"fmla z19.h, p3/M, z4.h, z11.h\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x16, LSL #1]\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "fmla z22.h, p3/M, z2.h, z11.h\n"
"fmla z23.h, p3/M, z1.h, z11.h\n"
- "cmp x28, %x[n_channels]\n"
+ "ld1h { z11.h }, p2/Z, [x12, x7, LSL #1]\n"
+ "fmla z29.h, p3/M, z2.h, z9.h\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "addvl x14, x14, #1\n"
+ "fmla z28.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x13]\n"
+ "fmla z21.h, p3/M, z4.h, z11.h\n"
+ "fmla z22.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z9.h }, p1/Z, [x14, x7, LSL #1]\n"
+ "fmla z31.h, p3/M, z8.h, z12.h\n"
+ "fmla z27.h, p3/M, z5.h, z12.h\n"
+ "fmla z19.h, p3/M, z2.h, z12.h\n"
+ "fmla z24.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x13, x15, LSL #1]\n"
"addvl x13, x13, #1\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "ld1h { z11.h }, p1/Z, [x4, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- ".inst 0xa040a0a0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x5]\n"
- "addvl x5, x5, #4\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "ld1h { z12.h }, p1/Z, [x17, x8, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- ".inst 0xa040a0a4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x5]\n"
- "addvl x5, x5, #4\n"
- ".inst 0xc16dc9d0 // fclamp { z16.h-z19.h }, z14.h, z13.h\n"
- ".inst 0xc16dc9d4 // fclamp { z20.h-z23.h }, z14.h, z13.h\n"
- "ld1h { z10.h }, p1/Z, [x4]\n"
- "ld1h { z8.h }, p3/Z, [x5]\n"
- "addvl x5, x5, #1\n"
- ".inst 0xc16dc9d8 // fclamp { z24.h-z27.h }, z14.h, z13.h\n"
- ".inst 0xc16dc9dc // fclamp { z28.h-z31.h }, z14.h, z13.h\n"
- "st1h { z16.h }, p0, [x27]\n"
- "st1h { z17.h }, p0, [x27, x9, LSL #1]\n"
- "st1h { z18.h }, p0, [x27, x26, LSL #1]\n"
- "st1h { z19.h }, p0, [x27, x25, LSL #1]\n"
- "addvl x27, x27, #1\n"
- "st1h { z20.h }, p0, [x24]\n"
- "st1h { z21.h }, p0, [x24, x9, LSL #1]\n"
- "st1h { z22.h }, p0, [x24, x26, LSL #1]\n"
- "st1h { z23.h }, p0, [x24, x25, LSL #1]\n"
+ "fmla z16.h, p3/M, z3.h, z10.h\n"
+ "fmla z20.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "fmla z17.h, p3/M, z7.h, z11.h\n"
+ "fmla z18.h, p3/M, z6.h, z11.h\n"
+ "fmla z23.h, p3/M, z2.h, z12.h\n"
+ "fmla z27.h, p3/M, z8.h, z12.h\n"
+ "fmla z21.h, p3/M, z7.h, z10.h\n"
+ "fmla z22.h, p3/M, z6.h, z10.h\n"
+ "fmla z16.h, p3/M, z8.h, z11.h\n"
+ "fmla z20.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x8, LSL #1]\n"
+ "fmla z19.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x11, x8, LSL #1]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z21.h, p3/M, z5.h, z11.h\n"
+ "fmla z22.h, p3/M, z4.h, z11.h\n"
+ "fmla z23.h, p3/M, z3.h, z11.h\n"
+ "fmla z20.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x16, x4, LSL #1]\n"
+ "fmla z17.h, p3/M, z8.h, z11.h\n"
+ "fmla z18.h, p3/M, z7.h, z11.h\n"
+ "fmla z19.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x16, x17, LSL #1]\n"
+ "addvl x16, x16, #1\n"
+ "fmla z21.h, p3/M, z8.h, z12.h\n"
+ "fmla z22.h, p3/M, z7.h, z12.h\n"
+ "fmla z23.h, p3/M, z6.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x12, x4, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z10.h\n"
+ "fmla z24.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x12, x17, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z11.h\n"
+ "whilelt p2.h, x21, %x[n_channels]\n"
+ "fmla z31.h, p3/M, z4.h, z11.h\n"
+ "fmla z26.h, p3/M, z2.h, z11.h\n"
+ "cmp x22, %x[n_channels]\n"
+ "addvl x12, x12, #1\n"
+ "fmla z27.h, p3/M, z1.h, z11.h\n"
+ "fmla z16.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z11.h }, p1/Z, [x5, x15, LSL #1]\n"
+ "fmla z17.h, p3/M, z6.h, z12.h\n"
+ "fmla z20.h, p3/M, z4.h, z12.h\n"
+ "fmla z21.h, p3/M, z3.h, z12.h\n"
+ "fmla z18.h, p3/M, z8.h, z10.h\n"
+ ".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "fmla z19.h, p3/M, z7.h, z10.h\n"
+ "fmla z22.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z12.h }, p1/Z, [x14, x8, LSL #1]\n"
+ "fmla z23.h, p3/M, z4.h, z10.h\n"
+ ".inst 0xa040a0c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ ".inst 0xc16fc9bc // fclamp { z28.h-z31.h }, z13.h, z15.h\n"
+ ".inst 0xc16fc9b8 // fclamp { z24.h-z27.h }, z13.h, z15.h\n"
+ "ld1h { z10.h }, p1/Z, [x5]\n"
+ "ld1h { z8.h }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
+ ".inst 0xc16fc9b0 // fclamp { z16.h-z19.h }, z13.h, z15.h\n"
+ ".inst 0xc16fc9b4 // fclamp { z20.h-z23.h }, z13.h, z15.h\n"
+ "st1h { z28.h }, p0, [x28]\n"
+ "st1h { z29.h }, p0, [x28, x9, LSL #1]\n"
+ "st1h { z30.h }, p0, [x28, x27, LSL #1]\n"
+ "st1h { z31.h }, p0, [x28, x26, LSL #1]\n"
+ "addvl x28, x28, #1\n"
+ "st1h { z24.h }, p0, [x25]\n"
+ "st1h { z25.h }, p0, [x25, x9, LSL #1]\n"
+ "st1h { z26.h }, p0, [x25, x27, LSL #1]\n"
+ "st1h { z27.h }, p0, [x25, x26, LSL #1]\n"
+ "addvl x25, x25, #1\n"
+ "st1h { z16.h }, p0, [x24]\n"
+ "st1h { z17.h }, p0, [x24, x9, LSL #1]\n"
+ "st1h { z18.h }, p0, [x24, x27, LSL #1]\n"
+ "st1h { z19.h }, p0, [x24, x26, LSL #1]\n"
"addvl x24, x24, #1\n"
- "st1h { z24.h }, p0, [x23]\n"
- "st1h { z25.h }, p0, [x23, x9, LSL #1]\n"
- "st1h { z26.h }, p0, [x23, x26, LSL #1]\n"
- "st1h { z27.h }, p0, [x23, x25, LSL #1]\n"
+ "st1h { z20.h }, p0, [x23]\n"
+ "st1h { z21.h }, p0, [x23, x9, LSL #1]\n"
+ "st1h { z22.h }, p0, [x23, x27, LSL #1]\n"
+ "st1h { z23.h }, p0, [x23, x26, LSL #1]\n"
"addvl x23, x23, #1\n"
- "st1h { z28.h }, p0, [x22]\n"
- "st1h { z29.h }, p0, [x22, x9, LSL #1]\n"
- "st1h { z30.h }, p0, [x22, x26, LSL #1]\n"
- "st1h { z31.h }, p0, [x22, x25, LSL #1]\n"
- "addvl x22, x22, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
- "movprfx z21, z15\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z15\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "ldr x2, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "movprfx z21, z14\n fmla z21.h, p3/M, z4.h, z9.h\n"
+ "movprfx z24, z14\n fmla z24.h, p3/M, z8.h, z9.h\n"
+ "ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"mov p0.b, p2.b\n"
- "movprfx z22, z15\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z15\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "ldr x1, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z26, z15\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "movprfx z17, z15\n fmla z17.h, p3/M, z7.h, z9.h\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "movprfx z18, z15\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "movprfx z20, z15\n fmla z20.h, p3/M, z5.h, z9.h\n"
+ "movprfx z22, z14\n fmla z22.h, p3/M, z3.h, z9.h\n"
+ "movprfx z29, z14\n fmla z29.h, p3/M, z1.h, z9.h\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z30, z14\n fmla z30.h, p3/M, z0.h, z9.h\n"
+ "movprfx z25, z14\n fmla z25.h, p3/M, z7.h, z9.h\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "movprfx z26, z14\n fmla z26.h, p3/M, z6.h, z9.h\n"
+ "movprfx z20, z14\n fmla z20.h, p3/M, z5.h, z9.h\n"
"ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "add x2, x2, #0x1\n"
+ "add x3, x3, #0x1\n"
"fmla z21.h, p3/M, z5.h, z12.h\n"
- "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x15, x6, LSL #1]\n"
- "add x20, x1, #0x1\n"
- "fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z15\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x12]\n"
- "cmp x2, x11\n"
+ "movprfx z28, z14\n fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "add x20, x2, #0x1\n"
+ "fmla z24.h, p3/M, z0.h, z10.h\n"
+ "movprfx z27, z14\n fmla z27.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x11]\n"
+ "cmp x3, x22\n"
"fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x14, LSL #1]\n"
- "csel x1, x1, x20, LT\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z8.h, z12.h\n"
- "csel x2, x2, XZR, LT\n"
- "cmp x1, x21\n"
- "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z10.h\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z18.h }, p2/Z, [x11, x15, LSL #1]\n"
+ "csel x2, x2, x20, LT\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "fmla z25.h, p3/M, z8.h, z12.h\n"
+ "csel x3, x3, XZR, LT\n"
+ "cmp x2, x21\n"
+ "movprfx z16, z14\n fmla z16.h, p3/M, z6.h, z17.h\n"
"fmla z21.h, p3/M, z7.h, z9.h\n"
- "ld1h { z10.h }, p2/Z, [x15, x8, LSL #1]\n"
- "fmla z18.h, p3/M, z7.h, z12.h\n"
- "fmla z19.h, p3/M, z6.h, z12.h\n"
- "movprfx z23, z15\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z15\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x4, x3, LSL #1]\n"
- "movprfx z31, z15\n fmla z31.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z12.h\n"
+ "fmla z27.h, p3/M, z6.h, z12.h\n"
+ "movprfx z23, z14\n fmla z23.h, p3/M, z3.h, z12.h\n"
+ "movprfx z31, z14\n fmla z31.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x5, x4, LSL #1]\n"
+ "movprfx z19, z14\n fmla z19.h, p3/M, z8.h, z18.h\n"
"fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x4, x16, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x5, x17, LSL #1]\n"
+ "fmla z29.h, p3/M, z4.h, z9.h\n"
+ "fmla z30.h, p3/M, z3.h, z9.h\n"
+ "movprfx z17, z14\n fmla z17.h, p3/M, z1.h, z9.h\n"
+ "movprfx z18, z14\n fmla z18.h, p3/M, z0.h, z9.h\n"
+ "fmla z20.h, p3/M, z8.h, z9.h\n"
+ "fmla z28.h, p3/M, z5.h, z9.h\n"
+ "fmla z16.h, p3/M, z2.h, z9.h\n"
+ "fmla z21.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z14.h }, p2/Z, [x16]\n"
+ "fmla z24.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x16, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z27.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x12]\n"
+ "fmla z22.h, p3/M, z7.h, z11.h\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "fmla z29.h, p3/M, z5.h, z11.h\n"
+ "fmla z30.h, p3/M, z4.h, z11.h\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "fmla z18.h, p3/M, z1.h, z11.h\n"
+ "fmla z19.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z9.h }, p2/Z, [x16, x7, LSL #1]\n"
+ "fmla z20.h, p3/M, z0.h, z14.h\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "fmla z16.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z3.h, z14.h\n"
+ "fmla z21.h, p3/M, z1.h, z9.h\n"
+ "fmla z27.h, p3/M, z5.h, z10.h\n"
+ "fmla z23.h, p3/M, z2.h, z10.h\n"
"fmla z25.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x16, x8, LSL #1]\n"
"fmla z26.h, p3/M, z3.h, z9.h\n"
- "movprfx z29, z15\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z15\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z21.h, p3/M, z8.h, z10.h\n"
- "ld1h { z9.h }, p2/Z, [x7]\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x7, x14, LSL #1]\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
- "fmla z31.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x7, x6, LSL #1]\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x14, LSL #1]\n"
- "fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "fmla z17.h, p3/M, z4.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x7, x8, LSL #1]\n"
- "fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x3, LSL #1]\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
+ "fmla z22.h, p3/M, z0.h, z9.h\n"
+ "fmla z31.h, p3/M, z8.h, z11.h\n"
+ "fmla z19.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x11, x4, LSL #1]\n"
+ "fmla z20.h, p3/M, z2.h, z9.h\n"
"fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x17, x3, LSL #1]\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
+ "fmla z24.h, p3/M, z5.h, z9.h\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z12.h\n"
+ "fmla z27.h, p3/M, z3.h, z12.h\n"
"fmla z22.h, p3/M, z1.h, z12.h\n"
"fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x17, x16, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x16, LSL #1]\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x17, LSL #1]\n"
"fmla z16.h, p3/M, z7.h, z10.h\n"
"fmla z17.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x4, x6, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x15, x3, LSL #1]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x11, x17, LSL #1]\n"
+ "fmla z20.h, p3/M, z4.h, z9.h\n"
+ "fmla z21.h, p3/M, z3.h, z9.h\n"
+ "fmla z28.h, p3/M, z1.h, z9.h\n"
+ "fmla z29.h, p3/M, z0.h, z9.h\n"
+ "fmla z24.h, p3/M, z7.h, z9.h\n"
+ "fmla z25.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z10.h }, p2/Z, [x5, x7, LSL #1]\n"
+ "fmla z18.h, p3/M, z8.h, z11.h\n"
+ "fmla z19.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z14.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z12.h\n"
+ "fmla z27.h, p3/M, z7.h, z12.h\n"
"fmla z22.h, p3/M, z5.h, z12.h\n"
"fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x4, x8, LSL #1]\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x15, x16, LSL #1]\n"
- "fmla z16.h, p3/M, z2.h, z10.h\n"
- "fmla z17.h, p3/M, z1.h, z10.h\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x17]\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x5, x8, LSL #1]\n"
+ "fmla z20.h, p3/M, z7.h, z14.h\n"
+ "fmla z21.h, p3/M, z6.h, z14.h\n"
+ "fmla z28.h, p3/M, z4.h, z14.h\n"
+ "fmla z29.h, p3/M, z3.h, z14.h\n"
+ "fmla z16.h, p3/M, z1.h, z14.h\n"
+ "fmla z17.h, p3/M, z0.h, z14.h\n"
+ "ld1h { z14.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "fmla z25.h, p3/M, z1.h, z10.h\n"
+ "fmla z26.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x14]\n"
+ "fmla z27.h, p3/M, z0.h, z9.h\n"
+ "fmla z18.h, p3/M, z2.h, z14.h\n"
+ "fmla z22.h, p3/M, z8.h, z14.h\n"
+ "fmla z23.h, p3/M, z7.h, z14.h\n"
+ "fmla z30.h, p3/M, z5.h, z14.h\n"
"fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x6, LSL #1]\n"
- "fmla z17.h, p3/M, z2.h, z12.h\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x17, x14, LSL #1]\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x15]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
- "fmla z19.h, p3/M, z8.h, z12.h\n"
- "fmla z23.h, p3/M, z5.h, z12.h\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x15, x14, LSL #1]\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
"fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x6, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z11.h\n"
+ "fmla z31.h, p3/M, z4.h, z14.h\n"
+ "fmla z19.h, p3/M, z1.h, z14.h\n"
+ "ld1h { z11.h }, p2/Z, [x12, x7, LSL #1]\n"
+ "fmla z25.h, p3/M, z2.h, z9.h\n"
+ "fmla z26.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z14.h }, p2/Z, [x13]\n"
+ "fmla z17.h, p3/M, z4.h, z11.h\n"
+ "fmla z18.h, p3/M, z3.h, z11.h\n"
+ "fmla z27.h, p3/M, z8.h, z12.h\n"
+ "fmla z23.h, p3/M, z5.h, z12.h\n"
"fmla z31.h, p3/M, z2.h, z12.h\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "fmla z29.h, p3/M, z7.h, z10.h\n"
- "fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x8, LSL #1]\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x8, LSL #1]\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x7, x16, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x7, x3, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "fmla z30.h, p3/M, z7.h, z12.h\n"
- "fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x3, LSL #1]\n"
- "fmla z18.h, p3/M, z5.h, z11.h\n"
- "fmla z19.h, p3/M, z4.h, z11.h\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x16, LSL #1]\n"
+ "fmla z20.h, p3/M, z6.h, z14.h\n"
+ "ld1h { z9.h }, p2/Z, [x13, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z14.h\n"
+ "fmla z16.h, p3/M, z0.h, z14.h\n"
+ "ld1h { z12.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z11.h\n"
+ "fmla z30.h, p3/M, z6.h, z11.h\n"
+ "fmla z19.h, p3/M, z2.h, z9.h\n"
+ "fmla z23.h, p3/M, z8.h, z9.h\n"
+ "fmla z17.h, p3/M, z7.h, z12.h\n"
+ "fmla z18.h, p3/M, z6.h, z12.h\n"
+ "fmla z28.h, p3/M, z8.h, z11.h\n"
+ "fmla z16.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x12, x8, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z9.h\n"
+ "ld1h { z14.h }, p2/Z, [x11, x8, LSL #1]\n"
+ "fmla z17.h, p3/M, z5.h, z10.h\n"
+ "fmla z18.h, p3/M, z4.h, z10.h\n"
+ "fmla z19.h, p3/M, z3.h, z10.h\n"
+ "fmla z16.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x16, x4, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z10.h\n"
+ "fmla z30.h, p3/M, z7.h, z10.h\n"
+ "fmla z31.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z11.h }, p2/Z, [x16, x17, LSL #1]\n"
+ "fmla z17.h, p3/M, z8.h, z14.h\n"
+ "fmla z18.h, p3/M, z7.h, z14.h\n"
+ "fmla z19.h, p3/M, z6.h, z14.h\n"
+ "fmla z24.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z10.h }, p2/Z, [x12, x4, LSL #1]\n"
+ "fmla z25.h, p3/M, z3.h, z9.h\n"
+ "fmla z20.h, p3/M, z1.h, z9.h\n"
+ "fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x12, x17, LSL #1]\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "fmla z27.h, p3/M, z4.h, z11.h\n"
"fmla z22.h, p3/M, z2.h, z11.h\n"
"fmla z23.h, p3/M, z1.h, z11.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- ".inst 0xc16dc9d0 // fclamp { z16.h-z19.h }, z14.h, z13.h\n"
- ".inst 0xc16dc9d4 // fclamp { z20.h-z23.h }, z14.h, z13.h\n"
- ".inst 0xc16dc9d8 // fclamp { z24.h-z27.h }, z14.h, z13.h\n"
- ".inst 0xc16dc9dc // fclamp { z28.h-z31.h }, z14.h, z13.h\n"
- "st1h { z16.h }, p0, [x27]\n"
- "st1h { z17.h }, p0, [x27, x9, LSL #1]\n"
- "st1h { z18.h }, p0, [x27, x26, LSL #1]\n"
- "st1h { z19.h }, p0, [x27, x25, LSL #1]\n"
- "st1h { z20.h }, p0, [x24]\n"
- "st1h { z21.h }, p0, [x24, x9, LSL #1]\n"
- "st1h { z22.h }, p0, [x24, x26, LSL #1]\n"
- "st1h { z23.h }, p0, [x24, x25, LSL #1]\n"
- "st1h { z24.h }, p0, [x23]\n"
- "st1h { z25.h }, p0, [x23, x9, LSL #1]\n"
- "st1h { z26.h }, p0, [x23, x26, LSL #1]\n"
- "st1h { z27.h }, p0, [x23, x25, LSL #1]\n"
- "st1h { z28.h }, p0, [x22]\n"
- "st1h { z29.h }, p0, [x22, x9, LSL #1]\n"
- "st1h { z30.h }, p0, [x22, x26, LSL #1]\n"
- "st1h { z31.h }, p0, [x22, x25, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z10.h\n"
+ "fmla z29.h, p3/M, z6.h, z10.h\n"
+ "fmla z16.h, p3/M, z4.h, z10.h\n"
+ "fmla z17.h, p3/M, z3.h, z10.h\n"
+ "fmla z30.h, p3/M, z8.h, z12.h\n"
+ "fmla z31.h, p3/M, z7.h, z12.h\n"
+ "fmla z18.h, p3/M, z5.h, z12.h\n"
+ "fmla z19.h, p3/M, z4.h, z12.h\n"
+ ".inst 0xc16fc9b8 // fclamp { z24.h-z27.h }, z13.h, z15.h\n"
+ ".inst 0xc16fc9b4 // fclamp { z20.h-z23.h }, z13.h, z15.h\n"
+ ".inst 0xc16fc9bc // fclamp { z28.h-z31.h }, z13.h, z15.h\n"
+ ".inst 0xc16fc9b0 // fclamp { z16.h-z19.h }, z13.h, z15.h\n"
+ "st1h { z24.h }, p0, [x28]\n"
+ "st1h { z25.h }, p0, [x28, x9, LSL #1]\n"
+ "st1h { z26.h }, p0, [x28, x27, LSL #1]\n"
+ "st1h { z27.h }, p0, [x28, x26, LSL #1]\n"
+ "st1h { z20.h }, p0, [x25]\n"
+ "st1h { z21.h }, p0, [x25, x9, LSL #1]\n"
+ "st1h { z22.h }, p0, [x25, x27, LSL #1]\n"
+ "st1h { z23.h }, p0, [x25, x26, LSL #1]\n"
+ "st1h { z28.h }, p0, [x24]\n"
+ "st1h { z29.h }, p0, [x24, x9, LSL #1]\n"
+ "st1h { z30.h }, p0, [x24, x27, LSL #1]\n"
+ "st1h { z31.h }, p0, [x24, x26, LSL #1]\n"
+ "st1h { z16.h }, p0, [x23]\n"
+ "st1h { z17.h }, p0, [x23, x9, LSL #1]\n"
+ "st1h { z18.h }, p0, [x23, x27, LSL #1]\n"
+ "st1h { z19.h }, p0, [x23, x26, LSL #1]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 2e6f1123a4..7430ff89ed 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -98,556 +98,556 @@ void sme2_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "mov x15, #0x0\n"
+ "add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x16, #0x0\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldp x14, x13, [x16, #0x0]\n"
- "ldp x12, x11, [x16, #0x10]\n"
- "cnth x10\n"
+ "ldp x23, x22, [x17, #0x0]\n"
+ "ldp x21, x20, [x17, #0x10]\n"
+ "cnth x15\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1h { z14.h }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "cmp x10, %x[n_channels]\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- ".inst 0xa040a220 // ld1h { z0.h-z3.h }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "sub x28, XZR, x10\n"
- ".inst 0xa040a224 // ld1h { z4.h-z7.h }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "ld1rh { z13.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z8.h }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "ld1h { z9.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1h { z13.h }, p3/Z, [x8]\n"
+ "addvl x8, x8, #1\n"
+ "cmp x15, %x[n_channels]\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ ".inst 0xa040a100 // ld1h { z0.h-z3.h }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
+ "sub x13, XZR, x15\n"
+ ".inst 0xa040a104 // ld1h { z4.h-z7.h }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1h { z8.h }, p3/Z, [x8]\n"
+ "addvl x8, x8, #1\n"
+ "ld1h { z9.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z21, z14\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z14\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "ldr x27, [x16, #0x20]\n"
- "inch x28\n"
- "movprfx z22, z14\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z14\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "ldr x26, [x16, #0x30]\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z4.h, z9.h\n"
+ "movprfx z16, z13\n fmla z16.h, p3/M, z8.h, z9.h\n"
+ "ldr x24, [x17, #0x20]\n"
+ "inch x13\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z3.h, z9.h\n"
+ "movprfx z29, z13\n fmla z29.h, p3/M, z1.h, z9.h\n"
+ "ldr x20, [x17, #0x30]\n"
"mov p1.b, p2.b\n"
- "movprfx z26, z14\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "ldr x25, [x16, #0x28]\n"
- "movprfx z17, z14\n fmla z17.h, p3/M, z7.h, z9.h\n"
- "whilelt p0.h, x10, %x[n_channels]\n"
- "movprfx z18, z14\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "movprfx z20, z14\n fmla z20.h, p3/M, z5.h, z9.h\n"
- "ldr x24, [x16, #0x38]\n"
- "fmla z21.h, p3/M, z5.h, z12.h\n"
- "movprfx z24, z14\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x14, [x16, #0x40]\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z0.h, z9.h\n"
+ "ldr x21, [x17, #0x28]\n"
+ "movprfx z17, z13\n fmla z17.h, p3/M, z7.h, z9.h\n"
+ "whilelt p0.h, x15, %x[n_channels]\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z6.h, z9.h\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z5.h, z9.h\n"
+ "ldr x23, [x17, #0x38]\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x22, [x17, #0x40]\n"
"fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z14\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x13, [x16, #0x48]\n"
- "fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x12, [x16, #0x50]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x48]\n"
+ "fmla z26.h, p3/M, z4.h, z12.h\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z22.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ldr x27, [x17, #0x50]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
"fmla z17.h, p3/M, z8.h, z12.h\n"
- "ldr x27, [x16, #0x60]\n"
+ "ldr x26, [x17, #0x60]\n"
"fmla z18.h, p3/M, z7.h, z12.h\n"
- "movprfx z28, z14\n fmla z28.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x25, [x16, #0x68]\n"
- "fmla z21.h, p3/M, z7.h, z9.h\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x25, [x17, #0x68]\n"
+ "fmla z25.h, p3/M, z7.h, z9.h\n"
"fmla z19.h, p3/M, z6.h, z12.h\n"
- "ldr x11, [x16, #0x58]\n"
- "movprfx z23, z14\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z14\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ldr x26, [x16, #0x70]\n"
- "movprfx z31, z14\n fmla z31.h, p3/M, z8.h, z11.h\n"
- "fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ldr x24, [x16, #0x78]\n"
- "fmla z25.h, p3/M, z4.h, z9.h\n"
- "fmla z26.h, p3/M, z3.h, z9.h\n"
- "ldr x14, [x16, #0x80]\n"
- "movprfx z29, z14\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z14\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "ldr x13, [x16, #0x88]\n"
- "ld1h { z14.h }, p3/Z, [x17]\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "ldr x23, [x9, #0x0]\n"
- "addvl x17, x17, #1\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "ld1h { z9.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0x90]\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "ldr x11, [x16, #0x98]\n"
- "fmla z21.h, p3/M, z8.h, z10.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xa0]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "ldr x22, [x9, #0x8]\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "ldr x21, [x9, #0x10]\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "ldr x20, [x9, #0x18]\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
- "fmla z31.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0xa8]\n"
+ "ldr x21, [x17, #0x58]\n"
+ "movprfx z27, z13\n fmla z27.h, p3/M, z3.h, z12.h\n"
+ "movprfx z31, z13\n fmla z31.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ldr x24, [x17, #0x70]\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z8.h, z22.h\n"
+ "fmla z26.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x78]\n"
+ "fmla z29.h, p3/M, z4.h, z9.h\n"
+ "fmla z30.h, p3/M, z3.h, z9.h\n"
+ "ldr x22, [x17, #0x80]\n"
+ "movprfx z21, z13\n fmla z21.h, p3/M, z1.h, z9.h\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z0.h, z9.h\n"
+ "ldr x20, [x17, #0x88]\n"
+ "ld1h { z13.h }, p3/Z, [x8]\n"
+ "fmla z24.h, p3/M, z8.h, z9.h\n"
+ "fmla z28.h, p3/M, z5.h, z9.h\n"
+ "ldr x12, [x14, #0x0]\n"
+ "addvl x8, x8, #1\n"
+ "fmla z20.h, p3/M, z2.h, z9.h\n"
+ "fmla z16.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z9.h }, p2/Z, [x27, x16, LSL #1]\n"
+ "ldr x27, [x17, #0x90]\n"
+ "fmla z17.h, p3/M, z0.h, z11.h\n"
+ "fmla z18.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x98]\n"
+ "fmla z25.h, p3/M, z8.h, z10.h\n"
+ "fmla z19.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "ldr x26, [x17, #0xa0]\n"
+ "fmla z26.h, p3/M, z7.h, z10.h\n"
+ "fmla z27.h, p3/M, z6.h, z10.h\n"
+ "ldr x11, [x14, #0x8]\n"
+ "fmla z29.h, p3/M, z5.h, z10.h\n"
+ "fmla z30.h, p3/M, z4.h, z10.h\n"
+ "ldr x10, [x14, #0x10]\n"
+ "fmla z31.h, p3/M, z3.h, z10.h\n"
+ "fmla z21.h, p3/M, z2.h, z10.h\n"
+ "ldr x9, [x14, #0x18]\n"
+ "fmla z22.h, p3/M, z1.h, z10.h\n"
+ "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xa8]\n"
"fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xb0]\n"
+ "fmla z24.h, p3/M, z0.h, z9.h\n"
+ "fmla z28.h, p3/M, z6.h, z12.h\n"
+ "fmla z20.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xb0]\n"
"fmla z17.h, p3/M, z4.h, z10.h\n"
"fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0xb8]\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ldr x14, [x16, #0xc0]\n"
+ "fmla z25.h, p3/M, z1.h, z10.h\n"
+ "fmla z19.h, p3/M, z5.h, z11.h\n"
+ "fmla z27.h, p3/M, z2.h, z11.h\n"
+ "fmla z26.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z11.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0xb8]\n"
+ "fmla z31.h, p3/M, z8.h, z12.h\n"
+ "fmla z23.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "ldr x22, [x17, #0xc0]\n"
"fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x13, [x16, #0xc8]\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z1.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "ldr x11, [x16, #0xd8]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0xd0]\n"
- "fmla z16.h, p3/M, z7.h, z10.h\n"
- "fmla z17.h, p3/M, z6.h, z10.h\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xe0]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xf0]\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x20, [x17, #0xc8]\n"
+ "fmla z17.h, p3/M, z5.h, z11.h\n"
+ "fmla z18.h, p3/M, z4.h, z11.h\n"
+ "fmla z25.h, p3/M, z2.h, z11.h\n"
+ "fmla z19.h, p3/M, z3.h, z11.h\n"
+ "fmla z26.h, p3/M, z1.h, z11.h\n"
+ "fmla z27.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ldr x28, [x17, #0xd8]\n"
+ "fmla z20.h, p3/M, z7.h, z9.h\n"
+ "fmla z21.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x27, x16, LSL #1]\n"
+ "ldr x21, [x17, #0xd0]\n"
+ "fmla z16.h, p3/M, z7.h, z12.h\n"
+ "fmla z17.h, p3/M, z6.h, z12.h\n"
+ "fmla z24.h, p3/M, z4.h, z12.h\n"
+ "fmla z25.h, p3/M, z3.h, z12.h\n"
+ "fmla z28.h, p3/M, z1.h, z12.h\n"
+ "fmla z29.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "ldr x27, [x17, #0xe0]\n"
+ "fmla z18.h, p3/M, z8.h, z11.h\n"
+ "fmla z22.h, p3/M, z8.h, z9.h\n"
+ "fmla z23.h, p3/M, z7.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z11.h\n"
+ "ldr x26, [x17, #0xe8]\n"
+ "fmla z19.h, p3/M, z7.h, z11.h\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "fmla z27.h, p3/M, z4.h, z11.h\n"
+ "fmla z30.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xf0]\n"
"fmla z16.h, p3/M, z2.h, z10.h\n"
"fmla z17.h, p3/M, z1.h, z10.h\n"
"fmla z18.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0xf8]\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z17.h, p3/M, z2.h, z12.h\n"
- "ldr x14, [x16, #0x100]\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x13, [x16, #0x108]\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
- "fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0x110]\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xf8]\n"
+ "fmla z25.h, p3/M, z6.h, z12.h\n"
+ "fmla z28.h, p3/M, z4.h, z12.h\n"
+ "fmla z29.h, p3/M, z3.h, z12.h\n"
+ "fmla z20.h, p3/M, z1.h, z12.h\n"
+ "fmla z21.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "ldr x22, [x17, #0x100]\n"
+ "fmla z18.h, p3/M, z1.h, z11.h\n"
+ "fmla z19.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x108]\n"
+ "fmla z16.h, p3/M, z6.h, z9.h\n"
+ "fmla z24.h, p3/M, z3.h, z9.h\n"
+ "fmla z31.h, p3/M, z4.h, z10.h\n"
+ "fmla z22.h, p3/M, z2.h, z10.h\n"
+ "fmla z28.h, p3/M, z0.h, z9.h\n"
+ "fmla z26.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x110]\n"
+ "fmla z27.h, p3/M, z7.h, z10.h\n"
+ "fmla z30.h, p3/M, z5.h, z10.h\n"
+ "fmla z23.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x16, LSL #1]\n"
"fmla z19.h, p3/M, z8.h, z12.h\n"
- "ldr x11, [x16, #0x118]\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z23.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x15, LSL #1]\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
+ "ldr x21, [x17, #0x118]\n"
"fmla z31.h, p3/M, z2.h, z12.h\n"
+ "fmla z20.h, p3/M, z0.h, z11.h\n"
+ "fmla z24.h, p3/M, z6.h, z11.h\n"
+ "fmla z28.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "fmla z21.h, p3/M, z4.h, z10.h\n"
+ "fmla z22.h, p3/M, z3.h, z10.h\n"
+ "fmla z27.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x27, x16, LSL #1]\n"
"fmla z29.h, p3/M, z7.h, z10.h\n"
"fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z20.h, p3/M, z5.h, z10.h\n"
"fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "ldp x14, x13, [x16, #0x0]\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z18.h, p3/M, z5.h, z11.h\n"
- "fmla z19.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z9.h\n"
+ "fmla z23.h, p3/M, z2.h, z9.h\n"
+ "fmla z21.h, p3/M, z7.h, z11.h\n"
+ "fmla z22.h, p3/M, z6.h, z11.h\n"
+ "fmla z20.h, p3/M, z8.h, z11.h\n"
"fmla z29.h, p3/M, z8.h, z12.h\n"
- "ld1h { z9.h }, p0/Z, [x14, x10, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x22, x16, LSL #1]\n"
"fmla z30.h, p3/M, z7.h, z12.h\n"
+ "fmla z27.h, p3/M, z8.h, z9.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x16, LSL #1]\n"
"fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldp x12, x11, [x16, #0x10]\n"
- "fmla z22.h, p3/M, z2.h, z11.h\n"
- "fmla z23.h, p3/M, z1.h, z11.h\n"
+ "fmla z23.h, p3/M, z3.h, z12.h\n"
+ "fmla z21.h, p3/M, z5.h, z12.h\n"
+ "fmla z22.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldp x20, x22, [x17, #0x0]\n"
+ "fmla z16.h, p3/M, z4.h, z10.h\n"
+ "fmla z17.h, p3/M, z3.h, z10.h\n"
+ "fmla z24.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "fmla z18.h, p3/M, z5.h, z12.h\n"
+ "fmla z19.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z9.h }, p0/Z, [x20, x15, LSL #1]\n"
+ "fmla z21.h, p3/M, z8.h, z11.h\n"
+ "fmla z22.h, p3/M, z7.h, z11.h\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z0.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ldp x21, x20, [x17, #0x10]\n"
+ "fmla z27.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z8.h, z10.h\n"
+ "inch x16\n"
+ ".inst 0xc16fc9d0 // fclamp { z16.h-z19.h }, z14.h, z15.h\n"
+ "fmla z31.h, p3/M, z7.h, z10.h\n"
+ "whilelt p2.h, x16, %x[n_channels]\n"
+ "fmla z28.h, p3/M, z7.h, z0.h\n"
+ "fmla z29.h, p3/M, z6.h, z0.h\n"
+ "ld1h { z11.h }, p0/Z, [x21, x15, LSL #1]\n"
+ "fmla z20.h, p3/M, z4.h, z0.h\n"
+ "fmla z21.h, p3/M, z3.h, z0.h\n"
+ "ld1h { z12.h }, p0/Z, [x20, x15, LSL #1]\n"
+ ".inst 0xc16fc9d8 // fclamp { z24.h-z27.h }, z14.h, z15.h\n"
+ "fmla z22.h, p3/M, z5.h, z10.h\n"
+ ".inst 0xa040a100 // ld1h { z0.h-z3.h }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
+ "st1h { z16.h }, p1, [x12, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x20]\n"
+ "fmla z23.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z10.h }, p0/Z, [x22, x15, LSL #1]\n"
+ "st1h { z17.h }, p1, [x11, x13, LSL #1]\n"
+ "ldr x22, [x14, #0x28]\n"
+ ".inst 0xc16fc9dc // fclamp { z28.h-z31.h }, z14.h, z15.h\n"
"inch x15\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- "whilelt p2.h, x15, %x[n_channels]\n"
- ".inst 0xc16dc9f0 // fclamp { z16.h-z19.h }, z15.h, z13.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "ld1h { z11.h }, p0/Z, [x12, x10, LSL #1]\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- ".inst 0xc16dc9f4 // fclamp { z20.h-z23.h }, z15.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "ld1h { z12.h }, p0/Z, [x11, x10, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z10.h }, p0/Z, [x13, x10, LSL #1]\n"
- "inch x10\n"
- "st1h { z16.h }, p1, [x23, x28, LSL #1]\n"
- "ldr x23, [x9, #0x20]\n"
- ".inst 0xa040a220 // ld1h { z0.h-z3.h }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "st1h { z17.h }, p1, [x22, x28, LSL #1]\n"
- "ldr x22, [x9, #0x28]\n"
- ".inst 0xc16dc9f8 // fclamp { z24.h-z27.h }, z15.h, z13.h\n"
- ".inst 0xa040a224 // ld1h { z4.h-z7.h }, pn8.b/Z, [x17]\n"
- "st1h { z18.h }, p1, [x21, x28, LSL #1]\n"
- "ldr x21, [x9, #0x30]\n"
- "addvl x17, x17, #4\n"
- "cmp x10, %x[n_channels]\n"
- "st1h { z19.h }, p1, [x20, x28, LSL #1]\n"
- "ldr x20, [x9, #0x38]\n"
- ".inst 0xc16dc9fc // fclamp { z28.h-z31.h }, z15.h, z13.h\n"
- "ld1h { z8.h }, p3/Z, [x17]\n"
- "st1h { z20.h }, p1, [x23, x28, LSL #1]\n"
- "ldr x23, [x9, #0x40]\n"
- "addvl x17, x17, #1\n"
- "st1h { z21.h }, p1, [x22, x28, LSL #1]\n"
- "ldr x22, [x9, #0x48]\n"
- "st1h { z22.h }, p1, [x21, x28, LSL #1]\n"
- "ldr x21, [x9, #0x50]\n"
- "st1h { z23.h }, p1, [x20, x28, LSL #1]\n"
- "ldr x20, [x9, #0x58]\n"
- "st1h { z24.h }, p1, [x23, x28, LSL #1]\n"
- "ldr x23, [x9, #0x60]\n"
- "st1h { z25.h }, p1, [x22, x28, LSL #1]\n"
- "ldr x22, [x9, #0x68]\n"
- "st1h { z26.h }, p1, [x21, x28, LSL #1]\n"
- "ldr x21, [x9, #0x70]\n"
- "st1h { z27.h }, p1, [x20, x28, LSL #1]\n"
- "ldr x20, [x9, #0x78]\n"
- "st1h { z28.h }, p1, [x23, x28, LSL #1]\n"
- "st1h { z29.h }, p1, [x22, x28, LSL #1]\n"
- "st1h { z30.h }, p1, [x21, x28, LSL #1]\n"
- "st1h { z31.h }, p1, [x20, x28, LSL #1]\n"
+ "st1h { z18.h }, p1, [x10, x13, LSL #1]\n"
+ "ldr x21, [x14, #0x30]\n"
+ ".inst 0xa040a104 // ld1h { z4.h-z7.h }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
+ "st1h { z19.h }, p1, [x9, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x38]\n"
+ "cmp x15, %x[n_channels]\n"
+ "ld1h { z8.h }, p3/Z, [x8]\n"
+ "st1h { z24.h }, p1, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x40]\n"
+ ".inst 0xc16fc9d4 // fclamp { z20.h-z23.h }, z14.h, z15.h\n"
+ "addvl x8, x8, #1\n"
+ "st1h { z25.h }, p1, [x22, x13, LSL #1]\n"
+ "ldr x22, [x14, #0x48]\n"
+ "st1h { z26.h }, p1, [x21, x13, LSL #1]\n"
+ "ldr x21, [x14, #0x50]\n"
+ "st1h { z27.h }, p1, [x20, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x58]\n"
+ "st1h { z28.h }, p1, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x60]\n"
+ "st1h { z29.h }, p1, [x22, x13, LSL #1]\n"
+ "ldr x22, [x14, #0x68]\n"
+ "st1h { z30.h }, p1, [x21, x13, LSL #1]\n"
+ "ldr x21, [x14, #0x70]\n"
+ "st1h { z31.h }, p1, [x20, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x78]\n"
+ "st1h { z20.h }, p1, [x23, x13, LSL #1]\n"
+ "st1h { z21.h }, p1, [x22, x13, LSL #1]\n"
+ "st1h { z22.h }, p1, [x21, x13, LSL #1]\n"
+ "st1h { z23.h }, p1, [x20, x13, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z21, z14\n fmla z21.h, p3/M, z4.h, z9.h\n"
- "movprfx z16, z14\n fmla z16.h, p3/M, z8.h, z9.h\n"
- "ldr x27, [x16, #0x20]\n"
- "inch x28\n"
- "movprfx z22, z14\n fmla z22.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z14\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "ldr x26, [x16, #0x30]\n"
- "mov p1.b, p2.b\n"
- "movprfx z26, z14\n fmla z26.h, p3/M, z0.h, z9.h\n"
- "ldr x25, [x16, #0x28]\n"
- "movprfx z17, z14\n fmla z17.h, p3/M, z7.h, z9.h\n"
- "movprfx z18, z14\n fmla z18.h, p3/M, z6.h, z9.h\n"
- "movprfx z20, z14\n fmla z20.h, p3/M, z5.h, z9.h\n"
- "ldr x24, [x16, #0x38]\n"
+ "movprfx z21, z13\n fmla z21.h, p3/M, z4.h, z9.h\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z8.h, z9.h\n"
+ "ldr x24, [x17, #0x20]\n"
+ "inch x13\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z3.h, z9.h\n"
+ "movprfx z29, z13\n fmla z29.h, p3/M, z1.h, z9.h\n"
+ "ldr x20, [x17, #0x30]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z0.h, z9.h\n"
+ "ldr x23, [x17, #0x28]\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z7.h, z9.h\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z6.h, z9.h\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z5.h, z9.h\n"
+ "ldr x22, [x17, #0x38]\n"
"fmla z21.h, p3/M, z5.h, z12.h\n"
- "movprfx z24, z14\n fmla z24.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x14, [x16, #0x40]\n"
- "fmla z16.h, p3/M, z0.h, z10.h\n"
- "movprfx z19, z14\n fmla z19.h, p3/M, z2.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x13, [x16, #0x48]\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x40]\n"
+ "fmla z24.h, p3/M, z0.h, z10.h\n"
+ "movprfx z27, z13\n fmla z27.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x48]\n"
"fmla z22.h, p3/M, z4.h, z12.h\n"
- "fmla z25.h, p3/M, z2.h, z12.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x12, [x16, #0x50]\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "fmla z17.h, p3/M, z8.h, z12.h\n"
- "ldr x27, [x16, #0x60]\n"
- "fmla z18.h, p3/M, z7.h, z12.h\n"
- "movprfx z28, z14\n fmla z28.h, p3/M, z6.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x25, [x16, #0x68]\n"
+ "fmla z29.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z18.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ldr x27, [x17, #0x50]\n"
+ "fmla z30.h, p3/M, z1.h, z12.h\n"
+ "fmla z25.h, p3/M, z8.h, z12.h\n"
+ "ldr x26, [x17, #0x60]\n"
+ "fmla z26.h, p3/M, z7.h, z12.h\n"
+ "movprfx z16, z13\n fmla z16.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x25, [x17, #0x68]\n"
"fmla z21.h, p3/M, z7.h, z9.h\n"
- "fmla z19.h, p3/M, z6.h, z12.h\n"
- "ldr x11, [x16, #0x58]\n"
- "movprfx z23, z14\n fmla z23.h, p3/M, z3.h, z12.h\n"
- "movprfx z27, z14\n fmla z27.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ldr x26, [x16, #0x70]\n"
- "movprfx z31, z14\n fmla z31.h, p3/M, z8.h, z11.h\n"
+ "fmla z27.h, p3/M, z6.h, z12.h\n"
+ "ldr x20, [x17, #0x58]\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z3.h, z12.h\n"
+ "movprfx z31, z13\n fmla z31.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "ldr x24, [x17, #0x70]\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z8.h, z18.h\n"
"fmla z22.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ldr x24, [x16, #0x78]\n"
- "fmla z25.h, p3/M, z4.h, z9.h\n"
- "fmla z26.h, p3/M, z3.h, z9.h\n"
- "ldr x14, [x16, #0x80]\n"
- "movprfx z29, z14\n fmla z29.h, p3/M, z1.h, z9.h\n"
- "movprfx z30, z14\n fmla z30.h, p3/M, z0.h, z9.h\n"
- "ldr x13, [x16, #0x88]\n"
+ "ld1h { z12.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x78]\n"
+ "fmla z29.h, p3/M, z4.h, z9.h\n"
+ "fmla z30.h, p3/M, z3.h, z9.h\n"
+ "ldr x22, [x17, #0x80]\n"
+ "movprfx z17, z13\n fmla z17.h, p3/M, z1.h, z9.h\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z0.h, z9.h\n"
+ "ldr x21, [x17, #0x88]\n"
"fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z24.h, p3/M, z5.h, z9.h\n"
- "ldr x23, [x9, #0x0]\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "fmla z16.h, p3/M, z1.h, z12.h\n"
- "ld1h { z9.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0x90]\n"
- "fmla z17.h, p3/M, z0.h, z12.h\n"
- "fmla z18.h, p3/M, z2.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "ldr x11, [x16, #0x98]\n"
- "fmla z21.h, p3/M, z8.h, z10.h\n"
- "fmla z19.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xa0]\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z23.h, p3/M, z6.h, z10.h\n"
- "ldr x22, [x9, #0x8]\n"
+ "fmla z28.h, p3/M, z5.h, z9.h\n"
+ "ldr x12, [x14, #0x0]\n"
+ "fmla z16.h, p3/M, z2.h, z9.h\n"
+ "fmla z24.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z13.h }, p2/Z, [x27, x16, LSL #1]\n"
+ "ldr x27, [x17, #0x90]\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x98]\n"
+ "fmla z21.h, p3/M, z8.h, z11.h\n"
+ "fmla z27.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "ldr x26, [x17, #0xa0]\n"
+ "fmla z22.h, p3/M, z7.h, z11.h\n"
+ "fmla z23.h, p3/M, z6.h, z11.h\n"
+ "ldr x11, [x14, #0x8]\n"
+ "fmla z29.h, p3/M, z5.h, z11.h\n"
+ "fmla z30.h, p3/M, z4.h, z11.h\n"
+ "ldr x10, [x14, #0x10]\n"
+ "fmla z31.h, p3/M, z3.h, z11.h\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "ldr x9, [x14, #0x18]\n"
+ "fmla z18.h, p3/M, z1.h, z11.h\n"
+ "fmla z19.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xa8]\n"
+ "fmla z24.h, p3/M, z3.h, z13.h\n"
+ "fmla z20.h, p3/M, z0.h, z13.h\n"
+ "fmla z28.h, p3/M, z6.h, z9.h\n"
+ "fmla z16.h, p3/M, z3.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xb0]\n"
+ "fmla z25.h, p3/M, z4.h, z11.h\n"
+ "fmla z26.h, p3/M, z3.h, z11.h\n"
+ "fmla z21.h, p3/M, z1.h, z11.h\n"
+ "fmla z27.h, p3/M, z5.h, z10.h\n"
+ "fmla z23.h, p3/M, z2.h, z10.h\n"
+ "fmla z22.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0xb8]\n"
+ "fmla z31.h, p3/M, z8.h, z9.h\n"
+ "fmla z19.h, p3/M, z5.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "ldr x22, [x17, #0xc0]\n"
+ "fmla z24.h, p3/M, z5.h, z11.h\n"
+ "fmla z20.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0xc8]\n"
"fmla z25.h, p3/M, z5.h, z10.h\n"
"fmla z26.h, p3/M, z4.h, z10.h\n"
- "ldr x21, [x9, #0x10]\n"
+ "fmla z21.h, p3/M, z2.h, z10.h\n"
"fmla z27.h, p3/M, z3.h, z10.h\n"
- "fmla z29.h, p3/M, z2.h, z10.h\n"
- "ldr x20, [x9, #0x18]\n"
- "fmla z30.h, p3/M, z1.h, z10.h\n"
- "fmla z31.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z16.h, p3/M, z3.h, z9.h\n"
- "fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
+ "fmla z22.h, p3/M, z1.h, z10.h\n"
+ "fmla z23.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x28, [x17, #0xd8]\n"
+ "fmla z16.h, p3/M, z7.h, z9.h\n"
+ "fmla z17.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z13.h }, p2/Z, [x27, x16, LSL #1]\n"
+ "ldr x20, [x17, #0xd0]\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "fmla z25.h, p3/M, z6.h, z11.h\n"
+ "fmla z20.h, p3/M, z4.h, z11.h\n"
+ "fmla z21.h, p3/M, z3.h, z11.h\n"
+ "fmla z28.h, p3/M, z1.h, z11.h\n"
+ "fmla z29.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "ldr x27, [x17, #0xe0]\n"
+ "fmla z26.h, p3/M, z8.h, z12.h\n"
+ "fmla z18.h, p3/M, z8.h, z13.h\n"
+ "fmla z19.h, p3/M, z7.h, z13.h\n"
+ "ld1h { z10.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ldr x26, [x17, #0xe8]\n"
+ "fmla z27.h, p3/M, z7.h, z12.h\n"
+ "fmla z22.h, p3/M, z5.h, z12.h\n"
+ "fmla z23.h, p3/M, z4.h, z12.h\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xf0]\n"
+ "fmla z24.h, p3/M, z2.h, z11.h\n"
+ "fmla z25.h, p3/M, z1.h, z11.h\n"
+ "fmla z26.h, p3/M, z0.h, z11.h\n"
+ "fmla z20.h, p3/M, z7.h, z10.h\n"
+ "ld1h { z13.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xf8]\n"
+ "fmla z21.h, p3/M, z6.h, z10.h\n"
+ "fmla z28.h, p3/M, z4.h, z10.h\n"
+ "fmla z29.h, p3/M, z3.h, z10.h\n"
+ "fmla z16.h, p3/M, z1.h, z10.h\n"
+ "fmla z17.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "fmla z25.h, p3/M, z2.h, z12.h\n"
+ "ldr x23, [x17, #0x100]\n"
+ "fmla z26.h, p3/M, z1.h, z12.h\n"
+ "fmla z27.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "ldr x22, [x17, #0x108]\n"
+ "fmla z24.h, p3/M, z6.h, z13.h\n"
+ "fmla z20.h, p3/M, z3.h, z13.h\n"
+ "fmla z31.h, p3/M, z4.h, z9.h\n"
+ "fmla z18.h, p3/M, z2.h, z9.h\n"
+ "fmla z28.h, p3/M, z0.h, z13.h\n"
+ "fmla z22.h, p3/M, z8.h, z9.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x110]\n"
+ "fmla z23.h, p3/M, z7.h, z9.h\n"
+ "fmla z30.h, p3/M, z5.h, z9.h\n"
+ "fmla z19.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x16, LSL #1]\n"
+ "fmla z27.h, p3/M, z8.h, z12.h\n"
+ "ldr x20, [x17, #0x118]\n"
+ "fmla z31.h, p3/M, z2.h, z12.h\n"
+ "fmla z16.h, p3/M, z0.h, z11.h\n"
+ "fmla z20.h, p3/M, z6.h, z11.h\n"
"fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xb0]\n"
+ "ld1h { z9.h }, p2/Z, [x26, x16, LSL #1]\n"
"fmla z17.h, p3/M, z4.h, z10.h\n"
"fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z19.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z2.h, z12.h\n"
- "fmla z22.h, p3/M, z0.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0xb8]\n"
- "fmla z27.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ldr x14, [x16, #0xc0]\n"
- "fmla z16.h, p3/M, z5.h, z10.h\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x13, [x16, #0xc8]\n"
- "fmla z17.h, p3/M, z5.h, z12.h\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z21.h, p3/M, z2.h, z12.h\n"
- "fmla z19.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z1.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "ldr x11, [x16, #0xd8]\n"
- "fmla z28.h, p3/M, z7.h, z11.h\n"
- "fmla z29.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0xd0]\n"
- "fmla z16.h, p3/M, z7.h, z10.h\n"
- "fmla z17.h, p3/M, z6.h, z10.h\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z21.h, p3/M, z3.h, z10.h\n"
- "fmla z24.h, p3/M, z1.h, z10.h\n"
- "fmla z25.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xe0]\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z19.h, p3/M, z7.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z23.h, p3/M, z4.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xf0]\n"
- "fmla z16.h, p3/M, z2.h, z10.h\n"
- "fmla z17.h, p3/M, z1.h, z10.h\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z7.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0xf8]\n"
- "fmla z21.h, p3/M, z6.h, z11.h\n"
- "fmla z24.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z17.h, p3/M, z2.h, z12.h\n"
- "ldr x14, [x16, #0x100]\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "fmla z19.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x15, LSL #1]\n"
- "ldr x13, [x16, #0x108]\n"
- "fmla z16.h, p3/M, z6.h, z10.h\n"
- "fmla z20.h, p3/M, z3.h, z10.h\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z2.h, z11.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z22.h, p3/M, z8.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ldr x12, [x16, #0x110]\n"
- "fmla z23.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z19.h, p3/M, z8.h, z12.h\n"
- "ldr x11, [x16, #0x118]\n"
- "fmla z27.h, p3/M, z2.h, z12.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
"fmla z23.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x15, LSL #1]\n"
- "fmla z25.h, p3/M, z7.h, z11.h\n"
- "fmla z26.h, p3/M, z6.h, z11.h\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmla z24.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z5.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z13.h }, p2/Z, [x27, x16, LSL #1]\n"
"fmla z29.h, p3/M, z7.h, z10.h\n"
"fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z16.h, p3/M, z5.h, z10.h\n"
"fmla z28.h, p3/M, z8.h, z10.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z7.h, z11.h\n"
- "fmla z27.h, p3/M, z6.h, z11.h\n"
- "fmla z29.h, p3/M, z5.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z11.h\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
- "fmla z16.h, p3/M, z4.h, z10.h\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z18.h, p3/M, z5.h, z11.h\n"
- "fmla z19.h, p3/M, z4.h, z11.h\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "fmla z30.h, p3/M, z7.h, z12.h\n"
- "fmla z31.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x12, x15, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z13.h\n"
+ "fmla z19.h, p3/M, z2.h, z13.h\n"
+ "fmla z17.h, p3/M, z7.h, z9.h\n"
+ "fmla z18.h, p3/M, z6.h, z9.h\n"
+ "fmla z16.h, p3/M, z8.h, z9.h\n"
+ "fmla z29.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z10.h\n"
+ "fmla z23.h, p3/M, z8.h, z13.h\n"
+ "ld1h { z13.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z10.h\n"
+ "fmla z19.h, p3/M, z3.h, z10.h\n"
+ "fmla z17.h, p3/M, z5.h, z10.h\n"
+ "fmla z18.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z11.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z12.h\n"
+ "fmla z25.h, p3/M, z3.h, z12.h\n"
+ "fmla z20.h, p3/M, z1.h, z12.h\n"
+ "fmla z21.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "fmla z27.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z8.h, z13.h\n"
+ "fmla z18.h, p3/M, z7.h, z13.h\n"
+ "fmla z19.h, p3/M, z6.h, z13.h\n"
"fmla z22.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z13.h }, p2/Z, [x21, x16, LSL #1]\n"
"fmla z23.h, p3/M, z1.h, z11.h\n"
- "fmla z26.h, p3/M, z8.h, z10.h\n"
- "fmla z27.h, p3/M, z7.h, z10.h\n"
- ".inst 0xc16dc9f0 // fclamp { z16.h-z19.h }, z15.h, z13.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- ".inst 0xc16dc9f4 // fclamp { z20.h-z23.h }, z15.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "st1h { z16.h }, p1, [x23, x28, LSL #1]\n"
- "ldr x23, [x9, #0x20]\n"
- "st1h { z17.h }, p1, [x22, x28, LSL #1]\n"
- "ldr x22, [x9, #0x28]\n"
- "st1h { z18.h }, p1, [x21, x28, LSL #1]\n"
- "ldr x21, [x9, #0x30]\n"
- ".inst 0xc16dc9f8 // fclamp { z24.h-z27.h }, z15.h, z13.h\n"
- "st1h { z19.h }, p1, [x20, x28, LSL #1]\n"
- "ldr x20, [x9, #0x38]\n"
- "st1h { z20.h }, p1, [x23, x28, LSL #1]\n"
- "ldr x23, [x9, #0x40]\n"
- ".inst 0xc16dc9fc // fclamp { z28.h-z31.h }, z15.h, z13.h\n"
- "st1h { z21.h }, p1, [x22, x28, LSL #1]\n"
- "ldr x22, [x9, #0x48]\n"
- "st1h { z22.h }, p1, [x21, x28, LSL #1]\n"
- "ldr x21, [x9, #0x50]\n"
- "st1h { z23.h }, p1, [x20, x28, LSL #1]\n"
- "ldr x20, [x9, #0x58]\n"
- "st1h { z24.h }, p1, [x23, x28, LSL #1]\n"
- "ldr x23, [x9, #0x60]\n"
- "st1h { z25.h }, p1, [x22, x28, LSL #1]\n"
- "ldr x22, [x9, #0x68]\n"
- "st1h { z26.h }, p1, [x21, x28, LSL #1]\n"
- "ldr x21, [x9, #0x70]\n"
- "st1h { z27.h }, p1, [x20, x28, LSL #1]\n"
- "ldr x20, [x9, #0x78]\n"
- "st1h { z28.h }, p1, [x23, x28, LSL #1]\n"
- "st1h { z29.h }, p1, [x22, x28, LSL #1]\n"
- "st1h { z30.h }, p1, [x21, x28, LSL #1]\n"
- "st1h { z31.h }, p1, [x20, x28, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z9.h\n"
+ ".inst 0xc16fc9d8 // fclamp { z24.h-z27.h }, z14.h, z15.h\n"
+ "fmla z31.h, p3/M, z7.h, z9.h\n"
+ "fmla z28.h, p3/M, z7.h, z13.h\n"
+ "fmla z29.h, p3/M, z6.h, z13.h\n"
+ "fmla z16.h, p3/M, z4.h, z13.h\n"
+ "fmla z17.h, p3/M, z3.h, z13.h\n"
+ ".inst 0xc16fc9d4 // fclamp { z20.h-z23.h }, z14.h, z15.h\n"
+ "fmla z18.h, p3/M, z5.h, z9.h\n"
+ "st1h { z24.h }, p0, [x12, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x20]\n"
+ "fmla z19.h, p3/M, z4.h, z9.h\n"
+ "st1h { z25.h }, p0, [x11, x13, LSL #1]\n"
+ "ldr x22, [x14, #0x28]\n"
+ ".inst 0xc16fc9dc // fclamp { z28.h-z31.h }, z14.h, z15.h\n"
+ "st1h { z26.h }, p0, [x10, x13, LSL #1]\n"
+ "ldr x21, [x14, #0x30]\n"
+ "st1h { z27.h }, p0, [x9, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x38]\n"
+ "st1h { z20.h }, p0, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x40]\n"
+ ".inst 0xc16fc9d0 // fclamp { z16.h-z19.h }, z14.h, z15.h\n"
+ "st1h { z21.h }, p0, [x22, x13, LSL #1]\n"
+ "ldr x22, [x14, #0x48]\n"
+ "st1h { z22.h }, p0, [x21, x13, LSL #1]\n"
+ "ldr x21, [x14, #0x50]\n"
+ "st1h { z23.h }, p0, [x20, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x58]\n"
+ "st1h { z28.h }, p0, [x23, x13, LSL #1]\n"
+ "ldr x23, [x14, #0x60]\n"
+ "st1h { z29.h }, p0, [x22, x13, LSL #1]\n"
+ "ldr x22, [x14, #0x68]\n"
+ "st1h { z30.h }, p0, [x21, x13, LSL #1]\n"
+ "ldr x21, [x14, #0x70]\n"
+ "st1h { z31.h }, p0, [x20, x13, LSL #1]\n"
+ "ldr x20, [x14, #0x78]\n"
+ "st1h { z16.h }, p0, [x23, x13, LSL #1]\n"
+ "st1h { z17.h }, p0, [x22, x13, LSL #1]\n"
+ "st1h { z18.h }, p0, [x21, x13, LSL #1]\n"
+ "st1h { z19.h }, p0, [x20, x13, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp
index 27fcb2e6d2..eacad19f36 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,12 +22,14 @@
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "utils.hpp"
#include <cstdint>
#pragma once
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
namespace arm_conv {
namespace depthwise {
@@ -65,3 +67,5 @@ class sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst : public DepthwiseDepthfirs
} // namespace depthwise
} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 066ce06aa6..6015161a4b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -102,58 +102,58 @@ void sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
"mul x20, x2, x21\n" // offset = tile_i * ld_input_row
"ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
"add x7, x4, x4\n"
+ "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "add x8, x7, x4\n"
"add x5, x5, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x8, x5, x21, LSL #1\n"
- "add x17, x7, x4\n"
- "add x16, x8, x21, LSL #1\n"
- "add x15, x17, x4\n"
- "add x14, x16, x21, LSL #1\n"
+ "add x17, x8, x4\n"
+ "add x16, x5, x21, LSL #1\n"
+ "add x15, x16, x21, LSL #1\n"
+ "add x14, x15, x21, LSL #1\n"
"add x13, x14, x21, LSL #1\n"
"cbnz x3, 2f\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"lsl x12, %x[n_channels], #0x1\n"
"mov x28, #0x8\n"
"mul x28, x28, x4\n"
- "add x27, x16, x7, LSL #1\n"
+ "add x27, x15, x7, LSL #1\n"
"add x26, x5, x4, LSL #1\n"
- "add x25, x5, x17, LSL #1\n"
- "sub x20, x24, x3\n"
- "add x24, x5, x15, LSL #1\n"
+ "add x25, x5, x8, LSL #1\n"
+ "sub x20, x20, x3\n"
+ "add x24, x5, x17, LSL #1\n"
"sub x20, x20, #0x1\n"
- "add x23, x8, x4, LSL #1\n"
+ "add x23, x16, x4, LSL #1\n"
"and x20, x20, #0x3fffff\n"
"add x22, x5, x7, LSL #1\n"
"orr x12, x12, x20, LSL #22\n"
- "add x21, x8, x17, LSL #1\n"
+ "add x21, x16, x8, LSL #1\n"
"orr x12, x12, x28, LSL #38\n"
- "add x20, x8, x15, LSL #1\n"
- "add x11, x8, x7, LSL #1\n"
+ "add x20, x16, x17, LSL #1\n"
+ "add x11, x16, x7, LSL #1\n"
"add x10, x14, x4, LSL #1\n"
- "add x9, x16, x4, LSL #1\n"
- "add x28, x14, x17, LSL #1\n"
+ "add x9, x15, x4, LSL #1\n"
+ "add x28, x14, x8, LSL #1\n"
".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- "add x27, x16, x17, LSL #1\n"
+ "add x27, x15, x8, LSL #1\n"
".inst 0xf8ac48ba // rprfm pldonce, x12, [x5]\n"
".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- "add x26, x14, x15, LSL #1\n"
+ "add x26, x14, x17, LSL #1\n"
".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- "add x25, x16, x15, LSL #1\n"
+ "add x25, x15, x17, LSL #1\n"
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
"add x24, x13, x4, LSL #1\n"
- ".inst 0xf8ac491a // rprfm pldonce, x12, [x8]\n"
+ ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
"add x23, x14, x7, LSL #1\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- "add x22, x13, x17, LSL #1\n"
+ "add x22, x13, x8, LSL #1\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
"add x21, x13, x7, LSL #1\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
- "add x20, x13, x15, LSL #1\n"
+ "add x20, x13, x17, LSL #1\n"
".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
- ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
+ ".inst 0xf8ac49fa // rprfm pldonce, x12, [x15]\n"
".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
".inst 0xf8ac493a // rprfm pldonce, x12, [x9]\n"
".inst 0xf8ac4b9a // rprfm pldonce, x12, [x28]\n"
@@ -167,199 +167,199 @@ void sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"mov x20, #0x2\n"
- "ld1h { z19.h }, p3/Z, [x6]\n"
+ "ld1h { z28.h }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x24\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "cnth x25\n"
".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_outptr]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
".inst 0xa040a0c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- "mul x22, x2, x26\n" // offset = tile_i * ld_output_row
- "cmp x24, %x[n_channels]\n"
- "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "madd x22, x3, x25, x22\n" // offset += tile_j * ld_output_col
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "mul x22, x2, x23\n" // offset = tile_i * ld_output_row
+ "cmp x25, %x[n_channels]\n"
+ "ld1rh { z30.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "madd x22, x3, x26, x22\n" // offset += tile_j * ld_output_col
+ "ld1rh { z31.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"mov x21, #0x0\n"
"mul x22, x22, x20\n" // offset *= output_tile_size
- "sub x20, XZR, x24\n"
+ "sub x20, XZR, x25\n"
"ld1h { z8.h }, p3/Z, [x6]\n"
- "add x23, x23, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z9.h }, p2/Z, [x16, x7, LSL #1]\n"
+ "add x24, x24, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "ld1h { z9.h }, p2/Z, [x15, x7, LSL #1]\n"
"addvl x6, x6, #1\n"
- "add x22, x23, x26, LSL #1\n"
+ "add x23, x24, x23, LSL #1\n"
"ld1h { z10.h }, p2/Z, [x5]\n"
"ld1h { z11.h }, p2/Z, [x5, x4, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x5, x17, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x5, x15, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x8]\n"
- "ld1h { z15.h }, p2/Z, [x8, x4, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x5, x8, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x5, x17, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x16]\n"
+ "ld1h { z15.h }, p2/Z, [x16, x4, LSL #1]\n"
"ld1h { z16.h }, p2/Z, [x5, x7, LSL #1]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z28, z19\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z19\n fmla z29.h, p3/M, z6.h, z9.h\n"
- "whilelt p1.h, x24, %x[n_channels]\n"
+ "movprfx z24, z28\n fmla z24.h, p3/M, z8.h, z9.h\n"
+ "movprfx z25, z28\n fmla z25.h, p3/M, z6.h, z9.h\n"
+ "whilelt p1.h, x25, %x[n_channels]\n"
"inch x21\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z19.h }, p3/Z, [x6]\n"
+ "movprfx z26, z28\n fmla z26.h, p3/M, z2.h, z9.h\n"
+ "movprfx z27, z28\n fmla z27.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z28.h }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
- "inch x24\n"
+ "inch x25\n"
"mov p0.b, p2.b\n"
"addvl x5, x5, #1\n"
"inch x20\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z0.h, z10.h\n"
+ "fmla z25.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z18.h }, p2/Z, [x16, x17, LSL #1]\n"
"ld1h { z10.h }, p1/Z, [x5]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x8, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x8, x7, LSL #1]\n"
- "addvl x8, x8, #1\n"
- "fmla z28.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x14]\n"
- "fmla z29.h, p3/M, z0.h, z16.h\n"
- "fmla z28.h, p3/M, z4.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x16]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x4, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x16, x4, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x16, x17, LSL #1]\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z1.h, z11.h\n"
+ "fmla z25.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z9.h }, p2/Z, [x16, x8, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x16, x7, LSL #1]\n"
"addvl x16, x16, #1\n"
- "ld1h { z9.h }, p1/Z, [x16, x7, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x13]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "ld1h { z13.h }, p2/Z, [x13, x4, LSL #1]\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p1/Z, [x5, x17, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
+ "fmla z24.h, p3/M, z3.h, z14.h\n"
+ "fmla z25.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x14]\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z15.h\n"
+ "fmla z25.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z19.h }, p2/Z, [x15]\n"
+ "ld1h { z17.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z26.h, p3/M, z0.h, z19.h\n"
+ "fmla z24.h, p3/M, z2.h, z16.h\n"
+ "fmla z25.h, p3/M, z5.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, x8, LSL #1]\n"
+ "ld1h { z0.h }, p2/Z, [x15, x4, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, x17, LSL #1]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z24.h, p3/M, z5.h, z22.h\n"
+ "fmla z25.h, p3/M, z3.h, z22.h\n"
+ "ld1h { z16.h }, p2/Z, [x14, x8, LSL #1]\n"
+ "ld1h { z9.h }, p1/Z, [x15, x7, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z16.h\n"
+ "fmla z26.h, p3/M, z1.h, z0.h\n"
+ "ld1h { z17.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z24.h, p3/M, z6.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x13]\n"
+ "fmla z25.h, p3/M, z7.h, z18.h\n"
+ "fmla z27.h, p3/M, z1.h, z18.h\n"
+ "fmla z26.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z0.h\n"
"ld1h { z16.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z25.h, p3/M, z8.h, z20.h\n"
"addvl x14, x14, #1\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x13, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "ld1h { z13.h }, p1/Z, [x5, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
+ "ld1h { z12.h }, p1/Z, [x5, x8, LSL #1]\n"
+ "fmla z27.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z18.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z13.h }, p1/Z, [x5, x17, LSL #1]\n"
+ "fmla z27.h, p3/M, z2.h, z20.h\n"
+ "fmla z26.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x13, x17, LSL #1]\n"
"whilelt p2.h, x21, %x[n_channels]\n"
- "cmp x24, %x[n_channels]\n"
+ "cmp x25, %x[n_channels]\n"
"addvl x13, x13, #1\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
- "fmla z31.h, p3/M, z3.h, z16.h\n"
+ "fmla z27.h, p3/M, z3.h, z16.h\n"
+ "fmla z26.h, p3/M, z8.h, z19.h\n"
".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
"ld1h { z16.h }, p1/Z, [x5, x7, LSL #1]\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "ld1h { z14.h }, p1/Z, [x8]\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
- "fmla z31.h, p3/M, z6.h, z15.h\n"
+ "fmla z27.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z14.h }, p1/Z, [x16]\n"
+ "fmla z27.h, p3/M, z6.h, z19.h\n"
".inst 0xa040a0c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- "ld1h { z15.h }, p1/Z, [x8, x4, LSL #1]\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z15.h }, p1/Z, [x16, x4, LSL #1]\n"
+ "fmla z27.h, p3/M, z8.h, z17.h\n"
"ld1h { z11.h }, p1/Z, [x5, x4, LSL #1]\n"
"ld1h { z8.h }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
- ".inst 0xc171ca5c // fclamp { z28.h-z31.h }, z18.h, z17.h\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x25, LSL #1]\n"
+ ".inst 0xc17fcbd8 // fclamp { z24.h-z27.h }, z30.h, z31.h\n"
+ "st1h { z24.h }, p0, [x24]\n"
+ "st1h { z25.h }, p0, [x24, x26, LSL #1]\n"
+ "addvl x24, x24, #1\n"
+ "st1h { z26.h }, p0, [x23]\n"
+ "st1h { z27.h }, p0, [x23, x26, LSL #1]\n"
"addvl x23, x23, #1\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x25, LSL #1]\n"
- "addvl x22, x22, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
- "movprfx z28, z19\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z19\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "movprfx z24, z28\n fmla z24.h, p3/M, z8.h, z9.h\n"
+ "movprfx z25, z28\n fmla z25.h, p3/M, z6.h, z9.h\n"
"ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"mov p0.b, p2.b\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "movprfx z26, z28\n fmla z26.h, p3/M, z2.h, z9.h\n"
+ "movprfx z27, z28\n fmla z27.h, p3/M, z0.h, z9.h\n"
"ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"add x3, x3, #0x1\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z0.h, z10.h\n"
+ "fmla z25.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z21.h }, p2/Z, [x16, x17, LSL #1]\n"
"add x20, x2, #0x1\n"
- "cmp x3, x24\n"
+ "cmp x3, x22\n"
"csel x2, x2, x20, LT\n"
"csel x3, x3, XZR, LT\n"
"cmp x2, x21\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x8, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x8, x7, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x14]\n"
- "fmla z29.h, p3/M, z0.h, z16.h\n"
- "fmla z28.h, p3/M, z4.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x16]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x4, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x16, x4, LSL #1]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x16, x17, LSL #1]\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x13, x4, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x13]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x14, x7, LSL #1]\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x13, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x13, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
- "fmla z31.h, p3/M, z3.h, z16.h\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "fmla z31.h, p3/M, z6.h, z15.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- ".inst 0xc171ca5c // fclamp { z28.h-z31.h }, z18.h, z17.h\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x25, LSL #1]\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x25, LSL #1]\n"
+ "fmla z24.h, p3/M, z1.h, z11.h\n"
+ "fmla z25.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z18.h }, p2/Z, [x16, x8, LSL #1]\n"
+ "ld1h { z20.h }, p2/Z, [x16, x7, LSL #1]\n"
+ "fmla z24.h, p3/M, z3.h, z14.h\n"
+ "fmla z25.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x14]\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z23.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z15.h\n"
+ "fmla z25.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z22.h }, p2/Z, [x15]\n"
+ "ld1h { z19.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z26.h, p3/M, z0.h, z22.h\n"
+ "fmla z24.h, p3/M, z2.h, z16.h\n"
+ "fmla z25.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, x8, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x15, x4, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z19.h\n"
+ "ld1h { z21.h }, p2/Z, [x15, x17, LSL #1]\n"
+ "fmla z24.h, p3/M, z5.h, z20.h\n"
+ "fmla z25.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x14, x8, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z16.h\n"
+ "fmla z26.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z24.h, p3/M, z6.h, z22.h\n"
+ "ld1h { z16.h }, p2/Z, [x13]\n"
+ "fmla z25.h, p3/M, z7.h, z18.h\n"
+ "fmla z27.h, p3/M, z1.h, z18.h\n"
+ "fmla z26.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z25.h, p3/M, z8.h, z21.h\n"
+ "fmla z27.h, p3/M, z5.h, z23.h\n"
+ "ld1h { z17.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z20.h\n"
+ "fmla z27.h, p3/M, z2.h, z21.h\n"
+ "fmla z26.h, p3/M, z5.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "fmla z27.h, p3/M, z3.h, z18.h\n"
+ "fmla z26.h, p3/M, z8.h, z19.h\n"
+ "fmla z27.h, p3/M, z7.h, z17.h\n"
+ "fmla z27.h, p3/M, z6.h, z19.h\n"
+ "fmla z27.h, p3/M, z8.h, z16.h\n"
+ ".inst 0xc17fcbd8 // fclamp { z24.h-z27.h }, z30.h, z31.h\n"
+ "st1h { z24.h }, p0, [x24]\n"
+ "st1h { z25.h }, p0, [x24, x26, LSL #1]\n"
+ "st1h { z26.h }, p0, [x23]\n"
+ "st1h { z27.h }, p0, [x23, x26, LSL #1]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
@@ -371,4 +371,4 @@ void sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index 1bf3a84959..ebbbd760fc 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -96,24 +96,24 @@ void sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
".inst 0x25207810 // ptrue pn8.b\n"
"cnth x13\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1rh { z19.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rh { z24.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"ldp x12, x11, [x20, #0x0]\n"
- "ldp x10, x9, [x20, #0x10]\n"
"cmp x13, %x[n_channels]\n"
- "ld1rh { z18.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x28, XZR, x13\n"
- "ld1h { z17.h }, p3/Z, [x14]\n"
+ "ld1rh { z27.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x10, XZR, x13\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ld1h { z23.h }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
"ldp x27, x26, [x16, #0x0]\n"
- "ldp x25, x24, [x16, #0x10]\n"
".inst 0xa040a1c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "ldp x23, x22, [x16, #0x20]\n"
+ "ldp x25, x24, [x16, #0x10]\n"
".inst 0xa040a1c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "ldp x21, x20, [x16, #0x30]\n"
+ "ldp x23, x22, [x16, #0x20]\n"
"ld1h { z8.h }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
+ "ldp x21, x20, [x16, #0x30]\n"
"ld1h { z9.h }, p2/Z, [x27, x15, LSL #1]\n"
"ld1h { z10.h }, p2/Z, [x26, x15, LSL #1]\n"
"ld1h { z11.h }, p2/Z, [x25, x15, LSL #1]\n"
@@ -124,187 +124,187 @@ void sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z17\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z17\n fmla z29.h, p3/M, z6.h, z9.h\n"
- "ldr x27, [x16, #0x40]\n"
+ "movprfx z28, z23\n fmla z28.h, p3/M, z8.h, z9.h\n"
+ "movprfx z29, z23\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x25, [x16, #0x40]\n"
"whilelt p1.h, x13, %x[n_channels]\n"
- "ldr x26, [x16, #0x48]\n"
- "movprfx z30, z17\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z17\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "ld1h { z17.h }, p3/Z, [x14]\n"
- "ldr x25, [x16, #0x50]\n"
+ "ldr x22, [x16, #0x48]\n"
+ "movprfx z30, z23\n fmla z30.h, p3/M, z2.h, z9.h\n"
+ "movprfx z31, z23\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z23.h }, p3/Z, [x14]\n"
+ "ldr x21, [x16, #0x50]\n"
"addvl x14, x14, #1\n"
- "inch x28\n"
- "ldr x24, [x16, #0x58]\n"
+ "inch x10\n"
+ "ldr x20, [x16, #0x58]\n"
"mov p0.b, p2.b\n"
"fmla z28.h, p3/M, z0.h, z10.h\n"
"fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x20, [x16, #0x78]\n"
- "ldr x23, [x16, #0x60]\n"
- "ldr x22, [x16, #0x68]\n"
+ "ld1h { z20.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "ldr x26, [x16, #0x68]\n"
+ "ldr x23, [x16, #0x88]\n"
"fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x15, LSL #1]\n"
"fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x27, [x16, #0x80]\n"
- "ldr x26, [x16, #0x88]\n"
+ "ld1h { z25.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "ldr x22, [x16, #0x80]\n"
+ "ld1h { z19.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x21, [x16, #0x70]\n"
+ "ldr x25, [x16, #0x90]\n"
"fmla z28.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x24, x15, LSL #1]\n"
"fmla z29.h, p3/M, z0.h, z16.h\n"
- "ldr x24, [x16, #0x98]\n"
- "ldr x25, [x16, #0x90]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x16, #0x98]\n"
+ "fmla z30.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z22.h }, p2/Z, [x23, x15, LSL #1]\n"
"fmla z28.h, p3/M, z4.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x23, x15, LSL #1]\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x22, x15, LSL #1]\n"
- "ldr x22, [x16, #0xa8]\n"
+ "fmla z29.h, p3/M, z4.h, z25.h\n"
+ "ld1h { z18.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "ld1h { z17.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z30.h, p3/M, z0.h, z18.h\n"
"fmla z28.h, p3/M, z2.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z20.h\n"
+ "ld1h { z25.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ldr x22, [x16, #0xc0]\n"
+ "ld1h { z15.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x21, [x16, #0xb0]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xc0]\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z19.h\n"
+ "fmla z29.h, p3/M, z3.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x15, LSL #1]\n"
"ldr x20, [x16, #0xb8]\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x20, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x21, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z16.h\n"
+ "fmla z30.h, p3/M, z1.h, z15.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z25.h\n"
+ "fmla z31.h, p3/M, z1.h, z25.h\n"
+ "fmla z30.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z15.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z20.h\n"
+ "fmla z31.h, p3/M, z5.h, z22.h\n"
+ "ld1h { z18.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z17.h\n"
+ "fmla z31.h, p3/M, z2.h, z20.h\n"
+ "fmla z30.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, x15, LSL #1]\n"
"ldp x27, x26, [x16, #0x0]\n"
- "inch x15\n"
"ldp x25, x24, [x16, #0x10]\n"
- "whilelt p2.h, x15, %x[n_channels]\n"
+ "inch x15\n"
"ldp x23, x22, [x16, #0x20]\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
+ "whilelt p2.h, x15, %x[n_channels]\n"
"ldp x21, x20, [x16, #0x30]\n"
"ld1h { z9.h }, p1/Z, [x27, x13, LSL #1]\n"
"fmla z31.h, p3/M, z3.h, z16.h\n"
+ "fmla z30.h, p3/M, z8.h, z19.h\n"
"ld1h { z10.h }, p1/Z, [x26, x13, LSL #1]\n"
"ld1h { z12.h }, p1/Z, [x24, x13, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
"ld1h { z13.h }, p1/Z, [x23, x13, LSL #1]\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "ld1h { z14.h }, p1/Z, [x22, x13, LSL #1]\n"
"ld1h { z16.h }, p1/Z, [x20, x13, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z14.h }, p1/Z, [x22, x13, LSL #1]\n"
".inst 0xa040a1c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "fmla z31.h, p3/M, z6.h, z15.h\n"
+ "fmla z31.h, p3/M, z6.h, z19.h\n"
"ld1h { z15.h }, p1/Z, [x21, x13, LSL #1]\n"
".inst 0xa040a1c4 // ld1h { z4.h-z7.h }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
+ "fmla z31.h, p3/M, z8.h, z17.h\n"
"ld1h { z11.h }, p1/Z, [x25, x13, LSL #1]\n"
"inch x13\n"
"cmp x13, %x[n_channels]\n"
"ld1h { z8.h }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
- ".inst 0xc172ca7c // fclamp { z28.h-z31.h }, z19.h, z18.h\n"
- "st1h { z28.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z29.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z30.h }, p0, [x10, x28, LSL #1]\n"
- "st1h { z31.h }, p0, [x9, x28, LSL #1]\n"
+ ".inst 0xc17bcb1c // fclamp { z28.h-z31.h }, z24.h, z27.h\n"
+ "st1h { z28.h }, p0, [x12, x10, LSL #1]\n"
+ "st1h { z29.h }, p0, [x11, x10, LSL #1]\n"
+ "st1h { z30.h }, p0, [x9, x10, LSL #1]\n"
+ "st1h { z31.h }, p0, [x28, x10, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z17\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "movprfx z29, z17\n fmla z29.h, p3/M, z6.h, z9.h\n"
- "ldr x27, [x16, #0x40]\n"
- "inch x28\n"
- "ldr x26, [x16, #0x48]\n"
- "movprfx z30, z17\n fmla z30.h, p3/M, z2.h, z9.h\n"
- "movprfx z31, z17\n fmla z31.h, p3/M, z0.h, z9.h\n"
+ "movprfx z28, z23\n fmla z28.h, p3/M, z8.h, z9.h\n"
+ "movprfx z29, z23\n fmla z29.h, p3/M, z6.h, z9.h\n"
+ "ldr x25, [x16, #0x40]\n"
+ "inch x10\n"
+ "ldr x22, [x16, #0x48]\n"
+ "movprfx z30, z23\n fmla z30.h, p3/M, z2.h, z9.h\n"
+ "movprfx z31, z23\n fmla z31.h, p3/M, z0.h, z9.h\n"
"mov p0.b, p2.b\n"
- "ldr x25, [x16, #0x50]\n"
- "ldr x24, [x16, #0x58]\n"
+ "ldr x21, [x16, #0x50]\n"
+ "ldr x20, [x16, #0x58]\n"
"fmla z28.h, p3/M, z0.h, z10.h\n"
"fmla z29.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x26, x15, LSL #1]\n"
- "ldr x20, [x16, #0x78]\n"
- "ldr x23, [x16, #0x60]\n"
- "ldr x22, [x16, #0x68]\n"
+ "ld1h { z21.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "ldr x26, [x16, #0x68]\n"
+ "ldr x23, [x16, #0x88]\n"
"fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x15, LSL #1]\n"
"fmla z29.h, p3/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x27, [x16, #0x80]\n"
- "ldr x26, [x16, #0x88]\n"
+ "ld1h { z18.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "ldr x22, [x16, #0x80]\n"
+ "ld1h { z20.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x21, [x16, #0x70]\n"
+ "ldr x25, [x16, #0x90]\n"
"fmla z28.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x24, x15, LSL #1]\n"
"fmla z29.h, p3/M, z0.h, z16.h\n"
- "ldr x24, [x16, #0x98]\n"
- "ldr x25, [x16, #0x90]\n"
- "fmla z30.h, p3/M, z3.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x16, #0x98]\n"
+ "fmla z30.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z23.h }, p2/Z, [x23, x15, LSL #1]\n"
"fmla z28.h, p3/M, z4.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x23, x15, LSL #1]\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z29.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x22, x15, LSL #1]\n"
- "ldr x22, [x16, #0xa8]\n"
+ "fmla z29.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z22.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "ld1h { z19.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z30.h, p3/M, z0.h, z22.h\n"
"fmla z28.h, p3/M, z2.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z18.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ldr x22, [x16, #0xc0]\n"
+ "ld1h { z17.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x21, [x16, #0xb0]\n"
- "fmla z30.h, p3/M, z0.h, z15.h\n"
- "fmla z29.h, p3/M, z5.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xc0]\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z19.h\n"
+ "ld1h { z21.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z20.h\n"
+ "fmla z29.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x15, LSL #1]\n"
"ldr x20, [x16, #0xb8]\n"
- "fmla z30.h, p3/M, z4.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z1.h, z16.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "fmla z28.h, p3/M, z7.h, z16.h\n"
+ "fmla z31.h, p3/M, z4.h, z16.h\n"
+ "fmla z30.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z6.h, z22.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z18.h\n"
+ "fmla z31.h, p3/M, z1.h, z18.h\n"
+ "fmla z30.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z21.h\n"
+ "fmla z31.h, p3/M, z5.h, z23.h\n"
+ "ld1h { z17.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z20.h\n"
+ "fmla z31.h, p3/M, z2.h, z21.h\n"
+ "fmla z30.h, p3/M, z5.h, z18.h\n"
"ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z6.h, z15.h\n"
- "ld1h { z15.h }, p2/Z, [x20, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z11.h\n"
- "fmla z31.h, p3/M, z5.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x21, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z13.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z5.h, z16.h\n"
- "fmla z31.h, p3/M, z3.h, z16.h\n"
- "fmla z30.h, p3/M, z8.h, z15.h\n"
- "fmla z31.h, p3/M, z7.h, z14.h\n"
- "fmla z31.h, p3/M, z6.h, z15.h\n"
- "fmla z31.h, p3/M, z8.h, z11.h\n"
- ".inst 0xc172ca7c // fclamp { z28.h-z31.h }, z19.h, z18.h\n"
- "st1h { z28.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z29.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z30.h }, p0, [x10, x28, LSL #1]\n"
- "st1h { z31.h }, p0, [x9, x28, LSL #1]\n"
+ "fmla z31.h, p3/M, z3.h, z18.h\n"
+ "fmla z30.h, p3/M, z8.h, z19.h\n"
+ "fmla z31.h, p3/M, z7.h, z17.h\n"
+ "fmla z31.h, p3/M, z6.h, z19.h\n"
+ "fmla z31.h, p3/M, z8.h, z16.h\n"
+ ".inst 0xc17bcb1c // fclamp { z28.h-z31.h }, z24.h, z27.h\n"
+ "st1h { z28.h }, p0, [x12, x10, LSL #1]\n"
+ "st1h { z29.h }, p0, [x11, x10, LSL #1]\n"
+ "st1h { z30.h }, p0, [x9, x10, LSL #1]\n"
+ "st1h { z31.h }, p0, [x28, x10, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
@@ -315,4 +315,4 @@ void sme2_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp
index 84263cb564..e6864ba2c3 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,12 +22,14 @@
* SOFTWARE.
*/
-#include "src/core/NEON/kernels/arm_gemm/utils.hpp"
+#include "utils.hpp"
#include <cstdint>
#pragma once
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+
namespace arm_conv {
namespace depthwise {
@@ -65,3 +67,5 @@ class sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst : public DepthwiseDepthfirs
} // namespace depthwise
} // namespace arm_conv
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 58b7824b98..96231dc1ab 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -102,81 +102,81 @@ void sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
"mul x20, x2, x21\n" // offset = tile_i * ld_input_row
"ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
- "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
"add x7, x4, x4\n"
+ "mul x20, x20, x22\n" // offset *= kernel_stride * output_size
+ "add x8, x7, x4\n"
"add x5, x5, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x8, x5, x21, LSL #1\n"
- "add x17, x7, x4\n"
- "add x16, x8, x21, LSL #1\n"
+ "add x17, x8, x4\n"
+ "add x16, x5, x21, LSL #1\n"
"add x15, x17, x4\n"
"add x14, x16, x21, LSL #1\n"
- "add x13, x15, x4\n"
- "add x12, x14, x21, LSL #1\n"
+ "add x13, x14, x21, LSL #1\n"
+ "add x12, x13, x21, LSL #1\n"
"add x11, x12, x21, LSL #1\n"
"cbnz x3, 2f\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"lsl x10, %x[n_channels], #0x1\n"
"mov x21, #0x4\n"
"mul x21, x21, x4\n"
"add x9, x5, x4, LSL #1\n"
- "add x28, x8, x4, LSL #1\n"
+ "add x28, x16, x4, LSL #1\n"
"add x27, x5, x7, LSL #1\n"
- "sub x20, x25, x3\n"
- "add x26, x8, x7, LSL #1\n"
+ "sub x20, x20, x3\n"
+ "add x26, x16, x7, LSL #1\n"
"sub x20, x20, #0x1\n"
- "add x25, x5, x17, LSL #1\n"
+ "add x25, x5, x8, LSL #1\n"
"and x20, x20, #0x3fffff\n"
- "add x24, x5, x15, LSL #1\n"
+ "add x24, x5, x17, LSL #1\n"
"orr x10, x10, x20, LSL #22\n"
- "add x23, x8, x13, LSL #1\n"
+ "add x23, x16, x15, LSL #1\n"
"orr x10, x10, x21, LSL #38\n"
- "add x22, x8, x17, LSL #1\n"
- "add x21, x8, x15, LSL #1\n"
- "add x20, x5, x13, LSL #1\n"
+ "add x22, x16, x8, LSL #1\n"
+ "add x21, x16, x17, LSL #1\n"
+ "add x20, x5, x15, LSL #1\n"
".inst 0xf8aa48ba // rprfm pldonce, x10, [x5]\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- "add x9, x16, x4, LSL #1\n"
- ".inst 0xf8aa491a // rprfm pldonce, x10, [x8]\n"
+ "add x9, x14, x4, LSL #1\n"
+ ".inst 0xf8aa4a1a // rprfm pldonce, x10, [x16]\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- "add x28, x16, x7, LSL #1\n"
+ "add x28, x14, x7, LSL #1\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x16, x17, LSL #1\n"
+ "add x27, x14, x8, LSL #1\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x16, x15, LSL #1\n"
+ "add x26, x14, x17, LSL #1\n"
".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x16, x13, LSL #1\n"
+ "add x25, x14, x15, LSL #1\n"
".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- "add x24, x14, x4, LSL #1\n"
+ "add x24, x13, x4, LSL #1\n"
".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- "add x23, x14, x7, LSL #1\n"
- ".inst 0xf8aa4a1a // rprfm pldonce, x10, [x16]\n"
+ "add x23, x13, x7, LSL #1\n"
+ ".inst 0xf8aa49da // rprfm pldonce, x10, [x14]\n"
".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x14, x17, LSL #1\n"
+ "add x22, x13, x8, LSL #1\n"
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x14, x15, LSL #1\n"
+ "add x21, x13, x17, LSL #1\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x14, x13, LSL #1\n"
+ "add x20, x13, x15, LSL #1\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
"add x9, x12, x4, LSL #1\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
"add x28, x12, x7, LSL #1\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x12, x17, LSL #1\n"
+ "add x27, x12, x8, LSL #1\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x12, x15, LSL #1\n"
+ "add x26, x12, x17, LSL #1\n"
".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x12, x13, LSL #1\n"
- ".inst 0xf8aa49da // rprfm pldonce, x10, [x14]\n"
+ "add x25, x12, x15, LSL #1\n"
+ ".inst 0xf8aa49ba // rprfm pldonce, x10, [x13]\n"
".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
"add x24, x11, x4, LSL #1\n"
".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
"add x23, x11, x7, LSL #1\n"
".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x11, x17, LSL #1\n"
+ "add x22, x11, x8, LSL #1\n"
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x11, x15, LSL #1\n"
+ "add x21, x11, x17, LSL #1\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x11, x13, LSL #1\n"
+ "add x20, x11, x15, LSL #1\n"
".inst 0xf8aa499a // rprfm pldonce, x10, [x12]\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
@@ -191,387 +191,387 @@ void sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
"ldr x27, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mov x26, #0x2\n"
- "cnth x25\n"
- "ld1h { z18.h }, p3/Z, [x6]\n"
- "ldr x24, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "mov x23, #0x2\n"
+ "cnth x26\n"
+ "ld1h { z31.h }, p3/Z, [x6]\n"
+ "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"addvl x6, x6, #1\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1rh { z17.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_outptr]]\n"
".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- "cmp x25, %x[n_channels]\n"
+ "cmp x26, %x[n_channels]\n"
"mul x22, x2, x27\n" // offset = tile_i * ld_output_row
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1rh { z27.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"mov x21, #0x0\n"
- "madd x22, x3, x24, x22\n" // offset += tile_j * ld_output_col
- "sub x20, XZR, x25\n"
+ "madd x22, x3, x25, x22\n" // offset += tile_j * ld_output_col
+ "sub x20, XZR, x26\n"
"ld1h { z4.h }, p3/Z, [x6]\n"
- "mul x22, x22, x26\n" // offset *= output_tile_size
+ "mul x22, x22, x23\n" // offset *= output_tile_size
"ld1h { z5.h }, p2/Z, [x5]\n"
"addvl x6, x6, #1\n"
- "add x23, x23, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x24, x24, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
"ld1h { z6.h }, p2/Z, [x5, x4, LSL #1]\n"
- "add x22, x23, x27, LSL #1\n"
- "ld1h { z7.h }, p2/Z, [x8]\n"
- "ld1h { z8.h }, p2/Z, [x8, x4, LSL #1]\n"
+ "add x23, x24, x27, LSL #1\n"
+ "ld1h { z7.h }, p2/Z, [x16]\n"
+ "ld1h { z8.h }, p2/Z, [x16, x4, LSL #1]\n"
"ld1h { z9.h }, p2/Z, [x5, x7, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x8, x7, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x5, x17, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x5, x15, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x8, x13, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x16]\n"
+ "ld1h { z13.h }, p2/Z, [x16, x7, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x5, x8, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x5, x17, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x16, x15, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x14]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z28, z18\n fmla z28.h, p3/M, z0.h, z5.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x8, x17, LSL #1]\n"
- "whilelt p1.h, x25, %x[n_channels]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z0.h, z7.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z8.h\n"
- "ld1h { z0.h }, p3/Z, [x6]\n"
+ "movprfx z20, z31\n fmla z20.h, p3/M, z0.h, z5.h\n"
+ "movprfx z21, z31\n fmla z21.h, p3/M, z0.h, z6.h\n"
+ "ld1h { z30.h }, p2/Z, [x16, x8, LSL #1]\n"
+ "whilelt p1.h, x26, %x[n_channels]\n"
+ "movprfx z22, z31\n fmla z22.h, p3/M, z0.h, z7.h\n"
+ "movprfx z23, z31\n fmla z23.h, p3/M, z0.h, z8.h\n"
+ "ld1h { z18.h }, p3/Z, [x6]\n"
"inch x21\n"
- "inch x25\n"
+ "inch x26\n"
"mov p0.b, p2.b\n"
"inch x20\n"
- "fmla z28.h, p3/M, z1.h, z6.h\n"
- "ld1h { z6.h }, p2/Z, [x8, x15, LSL #1]\n"
- "addvl x8, x8, #1\n"
- "fmla z29.h, p3/M, z1.h, z9.h\n"
- "fmla z30.h, p3/M, z1.h, z8.h\n"
- "fmla z31.h, p3/M, z1.h, z13.h\n"
- "ld1h { z1.h }, p3/Z, [x6, #1, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x5, x13, LSL #1]\n"
- "addvl x5, x5, #1\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "fmla z30.h, p3/M, z2.h, z13.h\n"
- "fmla z31.h, p3/M, z2.h, z5.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #2, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z5.h\n"
- "fmla z31.h, p3/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p3/Z, [x6, #3, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x16, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x16, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z6.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x6, #4, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z7.h\n"
- "ld1h { z7.h }, p1/Z, [x8]\n"
- "fmla z29.h, p3/M, z0.h, z8.h\n"
- "fmla z30.h, p3/M, z0.h, z14.h\n"
- "fmla z31.h, p3/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p3/Z, [x6, #5, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z8.h\n"
- "ld1h { z8.h }, p2/Z, [x16, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z1.h, z13.h\n"
- "fmla z30.h, p3/M, z1.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p3/Z, [x6, #6, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x16, x15, LSL #1]\n"
+ "fmla z20.h, p3/M, z1.h, z6.h\n"
+ "fmla z21.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z5.h }, p2/Z, [x16, x17, LSL #1]\n"
"addvl x16, x16, #1\n"
- "fmla z29.h, p3/M, z2.h, z5.h\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #7, MUL VL]\n"
- "addvl x6, x6, #16\n"
- "ld1h { z18.h }, p3/Z, [x6, #4, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z5.h\n"
- "ld1h { z5.h }, p2/Z, [x14]\n"
- "fmla z29.h, p3/M, z3.h, z6.h\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p3/Z, [x6, #-8, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z6.h\n"
- "ld1h { z6.h }, p2/Z, [x14, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x7, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p3/Z, [x6, #-7, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x14, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "fmla z30.h, p3/M, z0.h, z5.h\n"
- "fmla z31.h, p3/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p3/Z, [x6, #-6, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "fmla z30.h, p3/M, z1.h, z6.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p3/Z, [x6, #-5, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "fmla z22.h, p3/M, z1.h, z8.h\n"
+ "fmla z23.h, p3/M, z1.h, z13.h\n"
+ "ld1h { z24.h }, p3/Z, [x6, #1, MUL VL]\n"
+ "fmla z20.h, p3/M, z2.h, z9.h\n"
+ "fmla z21.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x5, x15, LSL #1]\n"
+ "addvl x5, x5, #1\n"
+ "fmla z22.h, p3/M, z2.h, z13.h\n"
+ "fmla z23.h, p3/M, z2.h, z30.h\n"
+ "ld1h { z16.h }, p3/Z, [x6, #2, MUL VL]\n"
+ "fmla z20.h, p3/M, z3.h, z11.h\n"
+ "fmla z21.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z22.h, p3/M, z3.h, z30.h\n"
+ "fmla z23.h, p3/M, z3.h, z5.h\n"
+ "ld1h { z29.h }, p3/Z, [x6, #3, MUL VL]\n"
+ "fmla z20.h, p3/M, z4.h, z12.h\n"
+ "fmla z21.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z28.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z4.h, z5.h\n"
+ "fmla z23.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z25.h }, p2/Z, [x14, x8, LSL #1]\n"
+ "ld1h { z3.h }, p3/Z, [x6, #4, MUL VL]\n"
+ "fmla z20.h, p3/M, z18.h, z7.h\n"
+ "fmla z21.h, p3/M, z18.h, z8.h\n"
+ "ld1h { z7.h }, p1/Z, [x16]\n"
+ "fmla z22.h, p3/M, z18.h, z14.h\n"
+ "fmla z23.h, p3/M, z18.h, z11.h\n"
+ "ld1h { z19.h }, p3/Z, [x6, #5, MUL VL]\n"
+ "fmla z20.h, p3/M, z24.h, z8.h\n"
+ "fmla z21.h, p3/M, z24.h, z13.h\n"
+ "ld1h { z26.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "fmla z22.h, p3/M, z24.h, z11.h\n"
+ "fmla z23.h, p3/M, z24.h, z28.h\n"
+ "ld1h { z18.h }, p3/Z, [x6, #6, MUL VL]\n"
+ "fmla z20.h, p3/M, z16.h, z13.h\n"
+ "fmla z21.h, p3/M, z16.h, z30.h\n"
+ "ld1h { z17.h }, p2/Z, [x14, x17, LSL #1]\n"
"addvl x14, x14, #1\n"
- "fmla z29.h, p3/M, z2.h, z9.h\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #-4, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x12]\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p3/Z, [x6, #-3, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x12, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z8.h\n"
- "ld1h { z8.h }, p2/Z, [x12, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z14.h\n"
- "ld1h { z4.h }, p3/Z, [x6, #-2, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z5.h\n"
- "ld1h { z5.h }, p2/Z, [x12, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z6.h\n"
- "fmla z30.h, p3/M, z0.h, z9.h\n"
- "fmla z31.h, p3/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p3/Z, [x6, #-1, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z6.h\n"
- "ld1h { z6.h }, p2/Z, [x12, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z1.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z13.h\n"
- "fmla z31.h, p3/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p3/Z, [x6]\n"
- "fmla z28.h, p3/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x13, LSL #1]\n"
+ "fmla z22.h, p3/M, z16.h, z28.h\n"
+ "fmla z23.h, p3/M, z16.h, z25.h\n"
+ "ld1h { z9.h }, p3/Z, [x6, #7, MUL VL]\n"
+ "addvl x6, x6, #16\n"
+ "ld1h { z31.h }, p3/Z, [x6, #4, MUL VL]\n"
+ "fmla z20.h, p3/M, z29.h, z30.h\n"
+ "fmla z21.h, p3/M, z29.h, z5.h\n"
+ "ld1h { z12.h }, p2/Z, [x13]\n"
+ "fmla z22.h, p3/M, z29.h, z25.h\n"
+ "fmla z23.h, p3/M, z29.h, z17.h\n"
+ "ld1h { z4.h }, p3/Z, [x6, #-8, MUL VL]\n"
+ "fmla z20.h, p3/M, z3.h, z5.h\n"
+ "fmla z21.h, p3/M, z3.h, z10.h\n"
+ "ld1h { z2.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z22.h, p3/M, z3.h, z17.h\n"
+ "fmla z23.h, p3/M, z3.h, z26.h\n"
+ "ld1h { z1.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "ld1h { z16.h }, p3/Z, [x6, #-7, MUL VL]\n"
+ "fmla z20.h, p3/M, z19.h, z14.h\n"
+ "fmla z21.h, p3/M, z19.h, z11.h\n"
+ "ld1h { z0.h }, p2/Z, [x13, x15, LSL #1]\n"
+ "fmla z22.h, p3/M, z19.h, z12.h\n"
+ "fmla z23.h, p3/M, z19.h, z2.h\n"
+ "ld1h { z30.h }, p3/Z, [x6, #-6, MUL VL]\n"
+ "fmla z20.h, p3/M, z18.h, z11.h\n"
+ "fmla z21.h, p3/M, z18.h, z28.h\n"
+ "ld1h { z29.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z22.h, p3/M, z18.h, z2.h\n"
+ "fmla z23.h, p3/M, z18.h, z1.h\n"
+ "ld1h { z24.h }, p3/Z, [x6, #-5, MUL VL]\n"
+ "fmla z20.h, p3/M, z9.h, z28.h\n"
+ "fmla z21.h, p3/M, z9.h, z25.h\n"
+ "ld1h { z28.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "addvl x13, x13, #1\n"
+ "fmla z22.h, p3/M, z9.h, z1.h\n"
+ "fmla z23.h, p3/M, z9.h, z29.h\n"
+ "ld1h { z18.h }, p3/Z, [x6, #-4, MUL VL]\n"
+ "fmla z20.h, p3/M, z4.h, z25.h\n"
+ "fmla z21.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z25.h }, p2/Z, [x12]\n"
+ "fmla z22.h, p3/M, z4.h, z29.h\n"
+ "fmla z23.h, p3/M, z4.h, z28.h\n"
+ "ld1h { z19.h }, p3/Z, [x6, #-3, MUL VL]\n"
+ "fmla z20.h, p3/M, z16.h, z17.h\n"
+ "fmla z21.h, p3/M, z16.h, z26.h\n"
+ "ld1h { z17.h }, p2/Z, [x12, x4, LSL #1]\n"
+ "fmla z22.h, p3/M, z16.h, z28.h\n"
+ "fmla z23.h, p3/M, z16.h, z0.h\n"
+ "ld1h { z26.h }, p2/Z, [x12, x17, LSL #1]\n"
+ "ld1h { z16.h }, p3/Z, [x6, #-2, MUL VL]\n"
+ "fmla z20.h, p3/M, z30.h, z12.h\n"
+ "fmla z21.h, p3/M, z30.h, z2.h\n"
+ "ld1h { z6.h }, p2/Z, [x12, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z30.h, z25.h\n"
+ "fmla z23.h, p3/M, z30.h, z17.h\n"
+ "ld1h { z11.h }, p3/Z, [x6, #-1, MUL VL]\n"
+ "fmla z20.h, p3/M, z24.h, z2.h\n"
+ "fmla z21.h, p3/M, z24.h, z1.h\n"
+ "ld1h { z8.h }, p2/Z, [x12, x8, LSL #1]\n"
+ "fmla z22.h, p3/M, z24.h, z17.h\n"
+ "fmla z23.h, p3/M, z24.h, z6.h\n"
+ "ld1h { z12.h }, p3/Z, [x6]\n"
+ "fmla z20.h, p3/M, z18.h, z1.h\n"
+ "fmla z21.h, p3/M, z18.h, z29.h\n"
+ "ld1h { z30.h }, p2/Z, [x12, x15, LSL #1]\n"
"addvl x12, x12, #1\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "fmla z30.h, p3/M, z2.h, z5.h\n"
- "fmla z31.h, p3/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #1, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z6.h\n"
- "fmla z31.h, p3/M, z3.h, z8.h\n"
+ "fmla z22.h, p3/M, z18.h, z6.h\n"
+ "fmla z23.h, p3/M, z18.h, z8.h\n"
+ "ld1h { z24.h }, p3/Z, [x6, #1, MUL VL]\n"
+ "fmla z20.h, p3/M, z19.h, z29.h\n"
+ "fmla z21.h, p3/M, z19.h, z28.h\n"
+ "ld1h { z18.h }, p2/Z, [x11]\n"
+ "fmla z22.h, p3/M, z19.h, z8.h\n"
+ "fmla z23.h, p3/M, z19.h, z26.h\n"
"ld1h { z3.h }, p3/Z, [x6, #2, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z14.h\n"
- "ld1h { z14.h }, p1/Z, [x16]\n"
- "fmla z30.h, p3/M, z4.h, z8.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x6, #3, MUL VL]\n"
+ "fmla z20.h, p3/M, z16.h, z28.h\n"
+ "fmla z21.h, p3/M, z16.h, z0.h\n"
+ "ld1h { z28.h }, p2/Z, [x11, x4, LSL #1]\n"
+ "fmla z22.h, p3/M, z16.h, z26.h\n"
+ "fmla z23.h, p3/M, z16.h, z30.h\n"
+ "ld1h { z19.h }, p3/Z, [x6, #3, MUL VL]\n"
"addvl x6, x6, #5\n"
- "fmla z28.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z13.h\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x17, LSL #1]\n"
- "fmla z31.h, p3/M, z0.h, z12.h\n"
- "fmla z28.h, p3/M, z1.h, z13.h\n"
- "ld1h { z13.h }, p1/Z, [x8, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z1.h, z5.h\n"
- "fmla z30.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z1.h, z9.h\n"
- "fmla z28.h, p3/M, z2.h, z5.h\n"
+ "ld1h { z14.h }, p1/Z, [x14]\n"
+ "fmla z20.h, p3/M, z11.h, z25.h\n"
+ "fmla z21.h, p3/M, z11.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z11.h, z18.h\n"
+ "fmla z23.h, p3/M, z11.h, z28.h\n"
+ "ld1h { z18.h }, p2/Z, [x11, x8, LSL #1]\n"
+ "fmla z20.h, p3/M, z12.h, z17.h\n"
+ "fmla z21.h, p3/M, z12.h, z6.h\n"
+ "ld1h { z13.h }, p1/Z, [x16, x7, LSL #1]\n"
+ "fmla z22.h, p3/M, z12.h, z28.h\n"
+ "fmla z23.h, p3/M, z12.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x11, x17, LSL #1]\n"
+ "fmla z20.h, p3/M, z24.h, z6.h\n"
+ "fmla z21.h, p3/M, z24.h, z8.h\n"
"ld1h { z5.h }, p1/Z, [x5]\n"
- "fmla z29.h, p3/M, z2.h, z6.h\n"
- "fmla z30.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x13, LSL #1]\n"
+ "fmla z22.h, p3/M, z24.h, z16.h\n"
+ "fmla z23.h, p3/M, z24.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x11, x15, LSL #1]\n"
"whilelt p2.h, x21, %x[n_channels]\n"
- "cmp x25, %x[n_channels]\n"
+ "cmp x26, %x[n_channels]\n"
"addvl x11, x11, #1\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z6.h\n"
+ "fmla z20.h, p3/M, z3.h, z8.h\n"
+ "fmla z21.h, p3/M, z3.h, z26.h\n"
"ld1h { z6.h }, p1/Z, [x5, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z8.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p1/Z, [x5, x17, LSL #1]\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
+ "fmla z22.h, p3/M, z3.h, z18.h\n"
+ "fmla z23.h, p3/M, z3.h, z17.h\n"
".inst 0xa040a0c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x6]\n"
"addvl x6, x6, #4\n"
- "fmla z28.h, p3/M, z4.h, z8.h\n"
- "ld1h { z8.h }, p1/Z, [x8, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "ld1h { z10.h }, p1/Z, [x8, x13, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p1/Z, [x5, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z9.h\n"
+ "ld1h { z11.h }, p1/Z, [x5, x8, LSL #1]\n"
+ "fmla z20.h, p3/M, z19.h, z26.h\n"
+ "fmla z21.h, p3/M, z19.h, z30.h\n"
+ "ld1h { z8.h }, p1/Z, [x16, x4, LSL #1]\n"
+ "fmla z22.h, p3/M, z19.h, z17.h\n"
+ "fmla z23.h, p3/M, z19.h, z16.h\n"
"ld1h { z9.h }, p1/Z, [x5, x7, LSL #1]\n"
+ "ld1h { z12.h }, p1/Z, [x5, x17, LSL #1]\n"
+ "ld1h { z10.h }, p1/Z, [x16, x15, LSL #1]\n"
"ld1h { z4.h }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
- ".inst 0xc170ca3c // fclamp { z28.h-z31.h }, z17.h, z16.h\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x24, LSL #1]\n"
+ ".inst 0xc17bc9f4 // fclamp { z20.h-z23.h }, z15.h, z27.h\n"
+ "st1h { z20.h }, p0, [x24]\n"
+ "st1h { z21.h }, p0, [x24, x25, LSL #1]\n"
+ "addvl x24, x24, #1\n"
+ "st1h { z22.h }, p0, [x23]\n"
+ "st1h { z23.h }, p0, [x23, x25, LSL #1]\n"
"addvl x23, x23, #1\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x24, LSL #1]\n"
- "addvl x22, x22, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
- "movprfx z28, z18\n fmla z28.h, p3/M, z0.h, z5.h\n"
- "movprfx z29, z18\n fmla z29.h, p3/M, z0.h, z6.h\n"
- "ld1h { z5.h }, p2/Z, [x8, x17, LSL #1]\n"
+ "movprfx z28, z31\n fmla z28.h, p3/M, z0.h, z5.h\n"
+ "movprfx z29, z31\n fmla z29.h, p3/M, z0.h, z6.h\n"
+ "ld1h { z23.h }, p2/Z, [x16, x8, LSL #1]\n"
"ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "movprfx z30, z18\n fmla z30.h, p3/M, z0.h, z7.h\n"
- "movprfx z31, z18\n fmla z31.h, p3/M, z0.h, z8.h\n"
- "ld1h { z0.h }, p3/Z, [x6]\n"
+ "movprfx z30, z31\n fmla z30.h, p3/M, z0.h, z7.h\n"
+ "fmla z31.h, p3/M, z0.h, z8.h\n"
+ "ld1h { z19.h }, p3/Z, [x6]\n"
"ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"mov p0.b, p2.b\n"
"ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"add x3, x3, #0x1\n"
"fmla z28.h, p3/M, z1.h, z6.h\n"
- "ld1h { z6.h }, p2/Z, [x8, x15, LSL #1]\n"
"fmla z29.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z22.h }, p2/Z, [x16, x17, LSL #1]\n"
"add x20, x2, #0x1\n"
"fmla z30.h, p3/M, z1.h, z8.h\n"
"fmla z31.h, p3/M, z1.h, z13.h\n"
- "ld1h { z1.h }, p3/Z, [x6, #1, MUL VL]\n"
- "cmp x3, x25\n"
+ "ld1h { z21.h }, p3/Z, [x6, #1, MUL VL]\n"
+ "cmp x3, x22\n"
"csel x2, x2, x20, LT\n"
"csel x3, x3, XZR, LT\n"
"cmp x2, x21\n"
"fmla z28.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x5, x13, LSL #1]\n"
"fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z18.h }, p2/Z, [x5, x15, LSL #1]\n"
"fmla z30.h, p3/M, z2.h, z13.h\n"
- "fmla z31.h, p3/M, z2.h, z5.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #2, MUL VL]\n"
+ "fmla z31.h, p3/M, z2.h, z23.h\n"
+ "ld1h { z16.h }, p3/Z, [x6, #2, MUL VL]\n"
"fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x16, x4, LSL #1]\n"
"fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z5.h\n"
- "fmla z31.h, p3/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p3/Z, [x6, #3, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x14, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z23.h\n"
+ "fmla z31.h, p3/M, z3.h, z22.h\n"
+ "ld1h { z17.h }, p3/Z, [x6, #3, MUL VL]\n"
"fmla z28.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x16, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x16, x17, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z6.h\n"
+ "fmla z29.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z5.h }, p2/Z, [x14, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z22.h\n"
"fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x6, #4, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z7.h\n"
- "fmla z29.h, p3/M, z0.h, z8.h\n"
- "fmla z30.h, p3/M, z0.h, z14.h\n"
- "fmla z31.h, p3/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p3/Z, [x6, #5, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z8.h\n"
- "ld1h { z8.h }, p2/Z, [x16, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z1.h, z13.h\n"
- "fmla z30.h, p3/M, z1.h, z11.h\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p3/Z, [x6, #6, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x16, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z5.h\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
- "fmla z31.h, p3/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #7, MUL VL]\n"
+ "ld1h { z3.h }, p2/Z, [x14, x8, LSL #1]\n"
+ "ld1h { z20.h }, p3/Z, [x6, #4, MUL VL]\n"
+ "fmla z28.h, p3/M, z19.h, z7.h\n"
+ "fmla z29.h, p3/M, z19.h, z8.h\n"
+ "fmla z30.h, p3/M, z19.h, z14.h\n"
+ "fmla z31.h, p3/M, z19.h, z0.h\n"
+ "ld1h { z19.h }, p3/Z, [x6, #5, MUL VL]\n"
+ "fmla z28.h, p3/M, z21.h, z8.h\n"
+ "fmla z29.h, p3/M, z21.h, z13.h\n"
+ "ld1h { z26.h }, p2/Z, [x14, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z21.h, z0.h\n"
+ "fmla z31.h, p3/M, z21.h, z5.h\n"
+ "ld1h { z18.h }, p3/Z, [x6, #6, MUL VL]\n"
+ "fmla z28.h, p3/M, z16.h, z13.h\n"
+ "fmla z29.h, p3/M, z16.h, z23.h\n"
+ "ld1h { z25.h }, p2/Z, [x14, x17, LSL #1]\n"
+ "fmla z30.h, p3/M, z16.h, z5.h\n"
+ "fmla z31.h, p3/M, z16.h, z3.h\n"
+ "ld1h { z16.h }, p3/Z, [x6, #7, MUL VL]\n"
"addvl x6, x6, #16\n"
- "fmla z28.h, p3/M, z3.h, z5.h\n"
- "ld1h { z5.h }, p2/Z, [x14]\n"
- "fmla z29.h, p3/M, z3.h, z6.h\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z31.h, p3/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p3/Z, [x6, #-8, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z6.h\n"
- "ld1h { z6.h }, p2/Z, [x14, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x14, x7, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z13.h\n"
- "fmla z31.h, p3/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p3/Z, [x6, #-7, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z14.h\n"
- "ld1h { z14.h }, p2/Z, [x14, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z11.h\n"
- "fmla z30.h, p3/M, z0.h, z5.h\n"
- "fmla z31.h, p3/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p3/Z, [x6, #-6, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z1.h, z12.h\n"
- "fmla z30.h, p3/M, z1.h, z6.h\n"
- "fmla z31.h, p3/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p3/Z, [x6, #-5, MUL VL]\n"
- "fmla z28.h, p3/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x14, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z9.h\n"
- "fmla z30.h, p3/M, z2.h, z10.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #-4, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x12]\n"
- "fmla z29.h, p3/M, z3.h, z13.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p3/Z, [x6, #-3, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z13.h\n"
- "ld1h { z13.h }, p2/Z, [x12, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z8.h\n"
- "ld1h { z8.h }, p2/Z, [x12, x15, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z14.h\n"
+ "fmla z28.h, p3/M, z17.h, z23.h\n"
+ "fmla z29.h, p3/M, z17.h, z22.h\n"
+ "ld1h { z24.h }, p2/Z, [x13]\n"
+ "fmla z30.h, p3/M, z17.h, z3.h\n"
+ "fmla z31.h, p3/M, z17.h, z25.h\n"
+ "ld1h { z17.h }, p3/Z, [x6, #-8, MUL VL]\n"
+ "fmla z28.h, p3/M, z20.h, z22.h\n"
+ "fmla z29.h, p3/M, z20.h, z10.h\n"
+ "ld1h { z23.h }, p2/Z, [x13, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z20.h, z25.h\n"
+ "fmla z31.h, p3/M, z20.h, z26.h\n"
+ "ld1h { z2.h }, p2/Z, [x13, x7, LSL #1]\n"
+ "ld1h { z22.h }, p3/Z, [x6, #-7, MUL VL]\n"
+ "fmla z28.h, p3/M, z19.h, z14.h\n"
+ "fmla z29.h, p3/M, z19.h, z0.h\n"
+ "ld1h { z1.h }, p2/Z, [x13, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z19.h, z24.h\n"
+ "fmla z31.h, p3/M, z19.h, z23.h\n"
+ "ld1h { z21.h }, p3/Z, [x6, #-6, MUL VL]\n"
+ "fmla z28.h, p3/M, z18.h, z0.h\n"
+ "fmla z29.h, p3/M, z18.h, z5.h\n"
+ "ld1h { z0.h }, p2/Z, [x13, x8, LSL #1]\n"
+ "fmla z30.h, p3/M, z18.h, z23.h\n"
+ "fmla z31.h, p3/M, z18.h, z2.h\n"
+ "ld1h { z20.h }, p3/Z, [x6, #-5, MUL VL]\n"
+ "fmla z28.h, p3/M, z16.h, z5.h\n"
+ "fmla z29.h, p3/M, z16.h, z3.h\n"
+ "ld1h { z19.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "fmla z30.h, p3/M, z16.h, z2.h\n"
+ "fmla z31.h, p3/M, z16.h, z0.h\n"
+ "ld1h { z18.h }, p3/Z, [x6, #-4, MUL VL]\n"
+ "fmla z28.h, p3/M, z17.h, z3.h\n"
+ "fmla z29.h, p3/M, z17.h, z25.h\n"
+ "ld1h { z16.h }, p2/Z, [x12]\n"
+ "fmla z30.h, p3/M, z17.h, z0.h\n"
+ "fmla z31.h, p3/M, z17.h, z19.h\n"
+ "ld1h { z17.h }, p3/Z, [x6, #-3, MUL VL]\n"
+ "fmla z28.h, p3/M, z22.h, z25.h\n"
+ "fmla z29.h, p3/M, z22.h, z26.h\n"
+ "ld1h { z7.h }, p2/Z, [x12, x4, LSL #1]\n"
+ "fmla z30.h, p3/M, z22.h, z19.h\n"
+ "fmla z31.h, p3/M, z22.h, z1.h\n"
+ "ld1h { z12.h }, p2/Z, [x12, x17, LSL #1]\n"
"ld1h { z4.h }, p3/Z, [x6, #-2, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z5.h\n"
- "ld1h { z5.h }, p2/Z, [x12, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z6.h\n"
- "fmla z30.h, p3/M, z0.h, z9.h\n"
- "fmla z31.h, p3/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p3/Z, [x6, #-1, MUL VL]\n"
- "fmla z28.h, p3/M, z1.h, z6.h\n"
- "ld1h { z6.h }, p2/Z, [x12, x17, LSL #1]\n"
- "fmla z29.h, p3/M, z1.h, z10.h\n"
- "fmla z30.h, p3/M, z1.h, z13.h\n"
- "fmla z31.h, p3/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p3/Z, [x6]\n"
- "fmla z28.h, p3/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x13, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z11.h\n"
- "fmla z30.h, p3/M, z2.h, z5.h\n"
- "fmla z31.h, p3/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p3/Z, [x6, #1, MUL VL]\n"
- "fmla z28.h, p3/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11]\n"
- "fmla z29.h, p3/M, z3.h, z12.h\n"
- "fmla z30.h, p3/M, z3.h, z6.h\n"
- "fmla z31.h, p3/M, z3.h, z8.h\n"
- "ld1h { z3.h }, p3/Z, [x6, #2, MUL VL]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x4, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z14.h\n"
- "fmla z30.h, p3/M, z4.h, z8.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p3/Z, [x6, #3, MUL VL]\n"
- "fmla z28.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z0.h, z13.h\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x17, LSL #1]\n"
- "fmla z31.h, p3/M, z0.h, z12.h\n"
- "fmla z28.h, p3/M, z1.h, z13.h\n"
- "fmla z29.h, p3/M, z1.h, z5.h\n"
- "fmla z30.h, p3/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z31.h, p3/M, z1.h, z9.h\n"
- "fmla z28.h, p3/M, z2.h, z5.h\n"
- "fmla z29.h, p3/M, z2.h, z6.h\n"
- "fmla z30.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x13, LSL #1]\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "fmla z28.h, p3/M, z3.h, z6.h\n"
- "fmla z29.h, p3/M, z3.h, z8.h\n"
- "fmla z30.h, p3/M, z3.h, z11.h\n"
- "fmla z31.h, p3/M, z3.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z8.h\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
+ "fmla z28.h, p3/M, z21.h, z24.h\n"
+ "fmla z29.h, p3/M, z21.h, z23.h\n"
+ "ld1h { z26.h }, p2/Z, [x12, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z21.h, z16.h\n"
+ "fmla z31.h, p3/M, z21.h, z7.h\n"
+ "ld1h { z25.h }, p3/Z, [x6, #-1, MUL VL]\n"
+ "fmla z28.h, p3/M, z20.h, z23.h\n"
+ "fmla z29.h, p3/M, z20.h, z2.h\n"
+ "ld1h { z24.h }, p2/Z, [x12, x8, LSL #1]\n"
+ "fmla z30.h, p3/M, z20.h, z7.h\n"
+ "fmla z31.h, p3/M, z20.h, z26.h\n"
+ "ld1h { z23.h }, p3/Z, [x6]\n"
+ "fmla z28.h, p3/M, z18.h, z2.h\n"
+ "fmla z29.h, p3/M, z18.h, z0.h\n"
+ "ld1h { z22.h }, p2/Z, [x12, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z18.h, z26.h\n"
+ "fmla z31.h, p3/M, z18.h, z24.h\n"
+ "ld1h { z21.h }, p3/Z, [x6, #1, MUL VL]\n"
+ "fmla z28.h, p3/M, z17.h, z0.h\n"
+ "fmla z29.h, p3/M, z17.h, z19.h\n"
+ "ld1h { z18.h }, p2/Z, [x11]\n"
+ "fmla z30.h, p3/M, z17.h, z24.h\n"
+ "fmla z31.h, p3/M, z17.h, z12.h\n"
+ "ld1h { z20.h }, p3/Z, [x6, #2, MUL VL]\n"
+ "fmla z28.h, p3/M, z4.h, z19.h\n"
+ "fmla z29.h, p3/M, z4.h, z1.h\n"
+ "ld1h { z17.h }, p2/Z, [x11, x4, LSL #1]\n"
"fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z9.h\n"
- ".inst 0xc170ca3c // fclamp { z28.h-z31.h }, z17.h, z16.h\n"
- "st1h { z28.h }, p0, [x23]\n"
- "st1h { z29.h }, p0, [x23, x24, LSL #1]\n"
- "st1h { z30.h }, p0, [x22]\n"
- "st1h { z31.h }, p0, [x22, x24, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z22.h\n"
+ "ld1h { z19.h }, p3/Z, [x6, #3, MUL VL]\n"
+ "fmla z28.h, p3/M, z25.h, z16.h\n"
+ "fmla z29.h, p3/M, z25.h, z7.h\n"
+ "ld1h { z16.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z25.h, z18.h\n"
+ "fmla z31.h, p3/M, z25.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x11, x8, LSL #1]\n"
+ "fmla z28.h, p3/M, z23.h, z7.h\n"
+ "fmla z29.h, p3/M, z23.h, z26.h\n"
+ "fmla z30.h, p3/M, z23.h, z17.h\n"
+ "fmla z31.h, p3/M, z23.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x11, x17, LSL #1]\n"
+ "fmla z28.h, p3/M, z21.h, z26.h\n"
+ "fmla z29.h, p3/M, z21.h, z24.h\n"
+ "fmla z30.h, p3/M, z21.h, z16.h\n"
+ "fmla z31.h, p3/M, z21.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x11, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z20.h, z24.h\n"
+ "fmla z29.h, p3/M, z20.h, z12.h\n"
+ "fmla z30.h, p3/M, z20.h, z18.h\n"
+ "fmla z31.h, p3/M, z20.h, z17.h\n"
+ "fmla z28.h, p3/M, z19.h, z12.h\n"
+ "fmla z29.h, p3/M, z19.h, z22.h\n"
+ "fmla z30.h, p3/M, z19.h, z17.h\n"
+ "fmla z31.h, p3/M, z19.h, z16.h\n"
+ ".inst 0xc17bc9fc // fclamp { z28.h-z31.h }, z15.h, z27.h\n"
+ "st1h { z28.h }, p0, [x24]\n"
+ "st1h { z29.h }, p0, [x24, x25, LSL #1]\n"
+ "st1h { z30.h }, p0, [x23]\n"
+ "st1h { z31.h }, p0, [x23, x25, LSL #1]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
@@ -583,4 +583,4 @@ void sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 313036876e..e76f92e3cf 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,7 +25,7 @@
#include <cstddef>
#include <cstdint>
-#if defined(ARM_COMPUTE_ENABLE_SME2)
+#if defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
namespace arm_conv {
namespace depthwise {
@@ -99,439 +99,439 @@ void sme2_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "mov x15, #0x0\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mov x16, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0x25207810 // ptrue pn8.b\n"
"whilelt p3.h, XZR, %x[n_channels]\n"
"ptrue p2.b\n"
- "cnth x13\n"
- "ldp x12, x11, [x20, #0x0]\n"
+ "cnth x14\n"
+ "ldp x13, x12, [x20, #0x0]\n"
+ "cmp x14, %x[n_channels]\n"
+ "ld1rh { z15.h }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "sub x11, XZR, x14\n"
"ldp x10, x9, [x20, #0x10]\n"
- "cmp x13, %x[n_channels]\n"
- "ld1rh { z18.h }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "sub x28, XZR, x13\n"
- "ldp x27, x26, [x16, #0x0]\n"
- "ld1h { z17.h }, p2/Z, [x14]\n"
- "addvl x14, x14, #1\n"
- "ldp x25, x24, [x16, #0x10]\n"
- ".inst 0xa040a1c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x14]\n"
- "addvl x14, x14, #4\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "ld1rh { z16.h }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z5.h }, p3/Z, [x27, x15, LSL #1]\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "ld1h { z6.h }, p3/Z, [x26, x15, LSL #1]\n"
- "ldp x27, x26, [x16, #0x40]\n"
- "ld1h { z4.h }, p2/Z, [x14]\n"
- "addvl x14, x14, #1\n"
- "ld1h { z7.h }, p3/Z, [x25, x15, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ld1h { z9.h }, p3/Z, [x23, x15, LSL #1]\n"
- "ld1h { z13.h }, p3/Z, [x22, x15, LSL #1]\n"
- "ld1h { z11.h }, p3/Z, [x21, x15, LSL #1]\n"
- "ld1h { z12.h }, p3/Z, [x20, x15, LSL #1]\n"
- "ld1h { z10.h }, p3/Z, [x27, x15, LSL #1]\n"
- "ld1h { z14.h }, p3/Z, [x26, x15, LSL #1]\n"
+ "ld1h { z26.h }, p2/Z, [x15]\n"
+ "addvl x15, x15, #1\n"
+ "ldp x21, x20, [x17, #0x0]\n"
+ ".inst 0xa040a1e0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x15]\n"
+ "addvl x15, x15, #4\n"
+ "ldp x27, x26, [x17, #0x10]\n"
+ "ld1rh { z27.h }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ldp x25, x24, [x17, #0x20]\n"
+ "ld1h { z4.h }, p2/Z, [x15]\n"
+ "addvl x15, x15, #1\n"
+ "ld1h { z5.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldp x23, x22, [x17, #0x30]\n"
+ "ld1h { z6.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldp x21, x20, [x17, #0x40]\n"
+ "ld1h { z7.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ld1h { z8.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "ld1h { z9.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ld1h { z13.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ld1h { z11.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ld1h { z12.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ld1h { z10.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ld1h { z14.h }, p3/Z, [x20, x16, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z17\n fmla z28.h, p2/M, z0.h, z5.h\n"
- "movprfx z29, z17\n fmla z29.h, p2/M, z0.h, z6.h\n"
- "ldr x25, [x16, #0x50]\n"
- "whilelt p1.h, x13, %x[n_channels]\n"
- "movprfx z30, z17\n fmla z30.h, p2/M, z0.h, z7.h\n"
- "movprfx z31, z17\n fmla z31.h, p2/M, z0.h, z8.h\n"
- "ldr x24, [x16, #0x58]\n"
- "ld1h { z0.h }, p2/Z, [x14]\n"
- "ldr x23, [x16, #0x60]\n"
- "inch x28\n"
+ "movprfx z28, z26\n fmla z28.h, p2/M, z0.h, z5.h\n"
+ "movprfx z29, z26\n fmla z29.h, p2/M, z0.h, z6.h\n"
+ "ldr x21, [x17, #0x50]\n"
+ "whilelt p1.h, x14, %x[n_channels]\n"
+ "movprfx z30, z26\n fmla z30.h, p2/M, z0.h, z7.h\n"
+ "movprfx z31, z26\n fmla z31.h, p2/M, z0.h, z8.h\n"
+ "ldr x20, [x17, #0x58]\n"
+ "ld1h { z0.h }, p2/Z, [x15]\n"
+ "ldr x22, [x17, #0x60]\n"
+ "inch x11\n"
"mov p0.b, p3.b\n"
- "ld1h { z5.h }, p3/Z, [x25, x15, LSL #1]\n"
- "ldr x22, [x16, #0x68]\n"
+ "ld1h { z17.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x25, [x17, #0x68]\n"
"fmla z28.h, p2/M, z1.h, z6.h\n"
"fmla z29.h, p2/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ldr x21, [x16, #0x70]\n"
+ "ld1h { z24.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x24, [x17, #0x70]\n"
"fmla z30.h, p2/M, z1.h, z8.h\n"
"fmla z31.h, p2/M, z1.h, z13.h\n"
- "ld1h { z1.h }, p2/Z, [x14, #1, MUL VL]\n"
- "ldr x20, [x16, #0x78]\n"
- "ldr x27, [x16, #0x80]\n"
+ "ld1h { z26.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "ldr x21, [x17, #0x78]\n"
+ "ldr x27, [x17, #0x80]\n"
+ "ldr x20, [x17, #0x88]\n"
"fmla z28.h, p2/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x23, x15, LSL #1]\n"
"fmla z29.h, p2/M, z2.h, z11.h\n"
- "ldr x26, [x16, #0x88]\n"
+ "ld1h { z16.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x90]\n"
"fmla z30.h, p2/M, z2.h, z13.h\n"
- "fmla z31.h, p2/M, z2.h, z5.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #2, MUL VL]\n"
- "ldr x25, [x16, #0x90]\n"
- "ldr x24, [x16, #0x98]\n"
+ "fmla z31.h, p2/M, z2.h, z17.h\n"
+ "ld1h { z23.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "ldr x26, [x17, #0x98]\n"
+ "ldr x22, [x17, #0xa0]\n"
"fmla z28.h, p2/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x15, LSL #1]\n"
"fmla z29.h, p2/M, z3.h, z12.h\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z30.h, p2/M, z3.h, z5.h\n"
- "fmla z31.h, p2/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #3, MUL VL]\n"
- "ldr x22, [x16, #0xa8]\n"
+ "ld1h { z22.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xa8]\n"
+ "fmla z30.h, p2/M, z3.h, z17.h\n"
+ "fmla z31.h, p2/M, z3.h, z24.h\n"
+ "ld1h { z5.h }, p2/Z, [x15, #3, MUL VL]\n"
"fmla z28.h, p2/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x15, LSL #1]\n"
- "ldr x21, [x16, #0xb0]\n"
- "fmla z29.h, p2/M, z4.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x20, x15, LSL #1]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z30.h, p2/M, z4.h, z6.h\n"
+ "fmla z29.h, p2/M, z4.h, z16.h\n"
+ "ld1h { z19.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xb0]\n"
+ "fmla z30.h, p2/M, z4.h, z24.h\n"
"fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #4, MUL VL]\n"
+ "ld1h { z18.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0xb8]\n"
+ "ld1h { z16.h }, p2/Z, [x15, #4, MUL VL]\n"
"fmla z28.h, p2/M, z0.h, z7.h\n"
"fmla z29.h, p2/M, z0.h, z8.h\n"
"fmla z30.h, p2/M, z0.h, z14.h\n"
- "fmla z31.h, p2/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p2/Z, [x14, #5, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z8.h\n"
- "ld1h { z8.h }, p3/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xc8]\n"
+ "fmla z31.h, p2/M, z0.h, z22.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z28.h, p2/M, z26.h, z8.h\n"
+ "fmla z29.h, p2/M, z26.h, z13.h\n"
+ "ld1h { z1.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x28, [x17, #0xc8]\n"
+ "fmla z30.h, p2/M, z26.h, z22.h\n"
+ "fmla z31.h, p2/M, z26.h, z19.h\n"
+ "ld1h { z9.h }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z28.h, p2/M, z23.h, z13.h\n"
+ "fmla z29.h, p2/M, z23.h, z17.h\n"
+ "ld1h { z6.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x20, [x17, #0xc0]\n"
+ "fmla z30.h, p2/M, z23.h, z19.h\n"
+ "fmla z31.h, p2/M, z23.h, z18.h\n"
+ "ld1h { z21.h }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "ld1h { z26.h }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z28.h, p2/M, z5.h, z17.h\n"
+ "fmla z29.h, p2/M, z5.h, z24.h\n"
+ "ld1h { z4.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0xd0]\n"
+ "fmla z30.h, p2/M, z5.h, z18.h\n"
+ "fmla z31.h, p2/M, z5.h, z6.h\n"
+ "ld1h { z17.h }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z28.h, p2/M, z16.h, z24.h\n"
+ "fmla z29.h, p2/M, z16.h, z10.h\n"
+ "ld1h { z0.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "ldr x27, [x17, #0xd8]\n"
+ "fmla z30.h, p2/M, z16.h, z6.h\n"
+ "fmla z31.h, p2/M, z16.h, z1.h\n"
+ "ld1h { z25.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x22, [x17, #0xe0]\n"
+ "ld1h { z16.h }, p2/Z, [x15, #-7, MUL VL]\n"
+ "fmla z28.h, p2/M, z20.h, z14.h\n"
+ "fmla z29.h, p2/M, z20.h, z22.h\n"
+ "ld1h { z24.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x26, [x17, #0xf8]\n"
+ "fmla z30.h, p2/M, z20.h, z4.h\n"
+ "fmla z31.h, p2/M, z20.h, z0.h\n"
+ "ld1h { z23.h }, p2/Z, [x15, #-6, MUL VL]\n"
+ "fmla z28.h, p2/M, z9.h, z22.h\n"
+ "fmla z29.h, p2/M, z9.h, z19.h\n"
+ "ld1h { z3.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xe8]\n"
+ "fmla z30.h, p2/M, z9.h, z0.h\n"
+ "fmla z31.h, p2/M, z9.h, z25.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #-5, MUL VL]\n"
+ "fmla z28.h, p2/M, z21.h, z19.h\n"
+ "fmla z29.h, p2/M, z21.h, z18.h\n"
+ "ld1h { z22.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xf0]\n"
+ "fmla z30.h, p2/M, z21.h, z25.h\n"
+ "fmla z31.h, p2/M, z21.h, z3.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #-4, MUL VL]\n"
+ "fmla z28.h, p2/M, z17.h, z18.h\n"
+ "fmla z29.h, p2/M, z17.h, z6.h\n"
+ "ld1h { z18.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x100]\n"
+ "fmla z30.h, p2/M, z17.h, z3.h\n"
+ "fmla z31.h, p2/M, z17.h, z22.h\n"
+ "ld1h { z17.h }, p2/Z, [x15, #-3, MUL VL]\n"
+ "fmla z28.h, p2/M, z16.h, z6.h\n"
+ "fmla z29.h, p2/M, z16.h, z1.h\n"
+ "ld1h { z5.h }, p3/Z, [x28, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x108]\n"
+ "fmla z30.h, p2/M, z16.h, z22.h\n"
+ "fmla z31.h, p2/M, z16.h, z24.h\n"
+ "ld1h { z21.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x15, #-2, MUL VL]\n"
+ "fmla z28.h, p2/M, z23.h, z4.h\n"
+ "fmla z29.h, p2/M, z23.h, z0.h\n"
+ "ld1h { z13.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x110]\n"
+ "fmla z30.h, p2/M, z23.h, z18.h\n"
+ "fmla z31.h, p2/M, z23.h, z5.h\n"
+ "ld1h { z9.h }, p2/Z, [x15, #-1, MUL VL]\n"
+ "fmla z28.h, p2/M, z20.h, z0.h\n"
+ "fmla z29.h, p2/M, z20.h, z25.h\n"
+ "ld1h { z23.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x22, [x17, #0x118]\n"
+ "fmla z30.h, p2/M, z20.h, z5.h\n"
+ "fmla z31.h, p2/M, z20.h, z13.h\n"
+ "ld1h { z1.h }, p2/Z, [x15]\n"
+ "fmla z28.h, p2/M, z19.h, z25.h\n"
+ "fmla z29.h, p2/M, z19.h, z3.h\n"
+ "ld1h { z14.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z19.h, z13.h\n"
+ "fmla z31.h, p2/M, z19.h, z23.h\n"
+ "ld1h { z7.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "fmla z28.h, p2/M, z17.h, z3.h\n"
+ "fmla z29.h, p2/M, z17.h, z22.h\n"
+ "ld1h { z0.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z17.h, z23.h\n"
+ "fmla z31.h, p2/M, z17.h, z21.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z28.h, p2/M, z16.h, z22.h\n"
+ "fmla z29.h, p2/M, z16.h, z24.h\n"
+ "ld1h { z17.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z16.h, z21.h\n"
+ "fmla z31.h, p2/M, z16.h, z14.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #5\n"
+ "fmla z28.h, p2/M, z9.h, z18.h\n"
+ "fmla z29.h, p2/M, z9.h, z5.h\n"
+ "ld1h { z16.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z9.h, z0.h\n"
+ "fmla z31.h, p2/M, z9.h, z17.h\n"
+ "ld1h { z18.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldp x21, x20, [x17, #0x0]\n"
+ "fmla z28.h, p2/M, z1.h, z5.h\n"
"fmla z29.h, p2/M, z1.h, z13.h\n"
- "fmla z30.h, p2/M, z1.h, z11.h\n"
- "fmla z31.h, p2/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p2/Z, [x14, #6, MUL VL]\n"
- "fmla z28.h, p2/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p3/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xc0]\n"
- "fmla z29.h, p2/M, z2.h, z5.h\n"
- "fmla z30.h, p2/M, z2.h, z12.h\n"
- "fmla z31.h, p2/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #7, MUL VL]\n"
- "addvl x14, x14, #16\n"
- "ld1h { z17.h }, p2/Z, [x14, #4, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z5.h\n"
- "ld1h { z5.h }, p3/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0xd0]\n"
- "fmla z29.h, p2/M, z3.h, z6.h\n"
- "fmla z30.h, p2/M, z3.h, z9.h\n"
- "fmla z31.h, p2/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #-8, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z6.h\n"
- "ld1h { z6.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0xd8]\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "ld1h { z10.h }, p3/Z, [x23, x15, LSL #1]\n"
- "ldr x23, [x16, #0xe0]\n"
- "fmla z30.h, p2/M, z4.h, z13.h\n"
- "fmla z31.h, p2/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #-7, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z14.h\n"
- "ld1h { z14.h }, p3/Z, [x20, x15, LSL #1]\n"
- "ldr x20, [x16, #0xf8]\n"
- "fmla z29.h, p2/M, z0.h, z11.h\n"
- "fmla z30.h, p2/M, z0.h, z5.h\n"
- "fmla z31.h, p2/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p2/Z, [x14, #-6, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x15, LSL #1]\n"
- "ldr x22, [x16, #0xe8]\n"
- "fmla z29.h, p2/M, z1.h, z12.h\n"
- "fmla z30.h, p2/M, z1.h, z6.h\n"
- "fmla z31.h, p2/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p2/Z, [x14, #-5, MUL VL]\n"
- "fmla z28.h, p2/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x15, LSL #1]\n"
- "ldr x21, [x16, #0xf0]\n"
- "fmla z29.h, p2/M, z2.h, z9.h\n"
- "fmla z30.h, p2/M, z2.h, z10.h\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #-4, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0x100]\n"
- "fmla z29.h, p2/M, z3.h, z13.h\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #-3, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z13.h\n"
- "ld1h { z13.h }, p3/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0x108]\n"
- "fmla z29.h, p2/M, z4.h, z8.h\n"
- "ld1h { z8.h }, p3/Z, [x23, x15, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z14.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #-2, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z5.h\n"
- "ld1h { z5.h }, p3/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0x110]\n"
- "fmla z29.h, p2/M, z0.h, z6.h\n"
- "fmla z30.h, p2/M, z0.h, z9.h\n"
- "fmla z31.h, p2/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p2/Z, [x14, #-1, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z6.h\n"
- "ld1h { z6.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0x118]\n"
- "fmla z29.h, p2/M, z1.h, z10.h\n"
- "fmla z30.h, p2/M, z1.h, z13.h\n"
- "fmla z31.h, p2/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p2/Z, [x14]\n"
- "fmla z28.h, p2/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p3/Z, [x22, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z2.h, z11.h\n"
- "fmla z30.h, p2/M, z2.h, z5.h\n"
- "fmla z31.h, p2/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #1, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x21, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z3.h, z12.h\n"
- "fmla z30.h, p2/M, z3.h, z6.h\n"
- "fmla z31.h, p2/M, z3.h, z8.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #2, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x20, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z4.h, z14.h\n"
- "fmla z30.h, p2/M, z4.h, z8.h\n"
- "fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #3, MUL VL]\n"
- "addvl x14, x14, #5\n"
- "fmla z28.h, p2/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x27, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z0.h, z13.h\n"
- "fmla z30.h, p2/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x26, x15, LSL #1]\n"
- "ldp x27, x26, [x16, #0x0]\n"
- "fmla z31.h, p2/M, z0.h, z12.h\n"
- "fmla z28.h, p2/M, z1.h, z13.h\n"
- "fmla z29.h, p2/M, z1.h, z5.h\n"
- "fmla z30.h, p2/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x25, x15, LSL #1]\n"
- "fmla z31.h, p2/M, z1.h, z9.h\n"
- "fmla z28.h, p2/M, z2.h, z5.h\n"
- "ld1h { z5.h }, p1/Z, [x27, x13, LSL #1]\n"
- "fmla z29.h, p2/M, z2.h, z6.h\n"
- "fmla z30.h, p2/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ldp x25, x24, [x16, #0x10]\n"
- "ldp x23, x22, [x16, #0x20]\n"
- "inch x15\n"
- "ldp x21, x20, [x16, #0x30]\n"
- "whilelt p3.h, x15, %x[n_channels]\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "fmla z28.h, p2/M, z3.h, z6.h\n"
- "ld1h { z6.h }, p1/Z, [x26, x13, LSL #1]\n"
- "ldp x27, x26, [x16, #0x40]\n"
- "fmla z29.h, p2/M, z3.h, z8.h\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "ld1h { z7.h }, p1/Z, [x25, x13, LSL #1]\n"
- "ld1h { z13.h }, p1/Z, [x22, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "fmla z28.h, p2/M, z4.h, z8.h\n"
- "ld1h { z8.h }, p1/Z, [x24, x13, LSL #1]\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "ld1h { z11.h }, p1/Z, [x21, x13, LSL #1]\n"
- "ld1h { z12.h }, p1/Z, [x20, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z4.h, z9.h\n"
- "ld1h { z9.h }, p1/Z, [x23, x13, LSL #1]\n"
- "ld1h { z10.h }, p1/Z, [x27, x13, LSL #1]\n"
- "ld1h { z14.h }, p1/Z, [x26, x13, LSL #1]\n"
- "inch x13\n"
- ".inst 0xa040a1c0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x14]\n"
- "addvl x14, x14, #4\n"
- "cmp x13, %x[n_channels]\n"
- ".inst 0xc170ca5c // fclamp { z28.h-z31.h }, z18.h, z16.h\n"
- "ld1h { z4.h }, p2/Z, [x14]\n"
- "addvl x14, x14, #1\n"
- "st1h { z28.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z29.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z30.h }, p0, [x10, x28, LSL #1]\n"
- "st1h { z31.h }, p0, [x9, x28, LSL #1]\n"
+ "fmla z30.h, p2/M, z1.h, z17.h\n"
+ "fmla z31.h, p2/M, z1.h, z16.h\n"
+ "ld1h { z17.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z7.h, z13.h\n"
+ "fmla z29.h, p2/M, z7.h, z23.h\n"
+ "ld1h { z5.h }, p1/Z, [x21, x14, LSL #1]\n"
+ "fmla z30.h, p2/M, z7.h, z16.h\n"
+ "fmla z31.h, p2/M, z7.h, z18.h\n"
+ "ld1h { z16.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldp x27, x26, [x17, #0x10]\n"
+ "ldp x25, x24, [x17, #0x20]\n"
+ "inch x16\n"
+ "ldp x23, x22, [x17, #0x30]\n"
+ "whilelt p3.h, x16, %x[n_channels]\n"
+ "fmla z28.h, p2/M, z20.h, z23.h\n"
+ "fmla z29.h, p2/M, z20.h, z21.h\n"
+ "ld1h { z6.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "ldp x21, x20, [x17, #0x40]\n"
+ "fmla z30.h, p2/M, z20.h, z18.h\n"
+ "fmla z31.h, p2/M, z20.h, z17.h\n"
+ "ld1h { z7.h }, p1/Z, [x27, x14, LSL #1]\n"
+ "ld1h { z13.h }, p1/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z11.h }, p1/Z, [x23, x14, LSL #1]\n"
+ "fmla z28.h, p2/M, z19.h, z21.h\n"
+ "fmla z29.h, p2/M, z19.h, z14.h\n"
+ "ld1h { z8.h }, p1/Z, [x26, x14, LSL #1]\n"
+ "fmla z30.h, p2/M, z19.h, z17.h\n"
+ "fmla z31.h, p2/M, z19.h, z16.h\n"
+ "ld1h { z9.h }, p1/Z, [x25, x14, LSL #1]\n"
+ "ld1h { z12.h }, p1/Z, [x22, x14, LSL #1]\n"
+ "ld1h { z10.h }, p1/Z, [x21, x14, LSL #1]\n"
+ "ld1h { z14.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "inch x14\n"
+ ".inst 0xa040a1e0 // ld1h { z0.h-z3.h }, pn8.b/Z, [x15]\n"
+ "addvl x15, x15, #4\n"
+ "cmp x14, %x[n_channels]\n"
+ ".inst 0xc17bc9fc // fclamp { z28.h-z31.h }, z15.h, z27.h\n"
+ "ld1h { z4.h }, p2/Z, [x15]\n"
+ "addvl x15, x15, #1\n"
+ "st1h { z28.h }, p0, [x13, x11, LSL #1]\n"
+ "st1h { z29.h }, p0, [x12, x11, LSL #1]\n"
+ "st1h { z30.h }, p0, [x10, x11, LSL #1]\n"
+ "st1h { z31.h }, p0, [x9, x11, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z17\n fmla z28.h, p2/M, z0.h, z5.h\n"
- "movprfx z29, z17\n fmla z29.h, p2/M, z0.h, z6.h\n"
- "ldr x25, [x16, #0x50]\n"
- "inch x28\n"
- "movprfx z30, z17\n fmla z30.h, p2/M, z0.h, z7.h\n"
- "movprfx z31, z17\n fmla z31.h, p2/M, z0.h, z8.h\n"
- "ldr x24, [x16, #0x58]\n"
- "ld1h { z0.h }, p2/Z, [x14]\n"
- "ldr x23, [x16, #0x60]\n"
+ "movprfx z28, z26\n fmla z28.h, p2/M, z0.h, z5.h\n"
+ "movprfx z29, z26\n fmla z29.h, p2/M, z0.h, z6.h\n"
+ "ldr x22, [x17, #0x50]\n"
+ "inch x11\n"
+ "movprfx z30, z26\n fmla z30.h, p2/M, z0.h, z7.h\n"
+ "movprfx z31, z26\n fmla z31.h, p2/M, z0.h, z8.h\n"
+ "ldr x21, [x17, #0x58]\n"
+ "ld1h { z19.h }, p2/Z, [x15]\n"
+ "ldr x20, [x17, #0x60]\n"
"mov p0.b, p3.b\n"
- "ld1h { z5.h }, p3/Z, [x25, x15, LSL #1]\n"
- "ldr x22, [x16, #0x68]\n"
+ "ld1h { z23.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x25, [x17, #0x68]\n"
"fmla z28.h, p2/M, z1.h, z6.h\n"
"fmla z29.h, p2/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ldr x21, [x16, #0x70]\n"
+ "ld1h { z22.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x24, [x17, #0x70]\n"
"fmla z30.h, p2/M, z1.h, z8.h\n"
"fmla z31.h, p2/M, z1.h, z13.h\n"
- "ld1h { z1.h }, p2/Z, [x14, #1, MUL VL]\n"
- "ldr x20, [x16, #0x78]\n"
- "ldr x27, [x16, #0x80]\n"
+ "ld1h { z21.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "ldr x23, [x17, #0x78]\n"
+ "ldr x27, [x17, #0x80]\n"
+ "ldr x22, [x17, #0x88]\n"
"fmla z28.h, p2/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x23, x15, LSL #1]\n"
"fmla z29.h, p2/M, z2.h, z11.h\n"
- "ldr x26, [x16, #0x88]\n"
+ "ld1h { z18.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x90]\n"
"fmla z30.h, p2/M, z2.h, z13.h\n"
- "fmla z31.h, p2/M, z2.h, z5.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #2, MUL VL]\n"
- "ldr x25, [x16, #0x90]\n"
- "ldr x24, [x16, #0x98]\n"
- "fmla z28.h, p2/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z3.h, z12.h\n"
- "ldr x23, [x16, #0xa0]\n"
- "fmla z30.h, p2/M, z3.h, z5.h\n"
- "fmla z31.h, p2/M, z3.h, z6.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #3, MUL VL]\n"
- "ldr x22, [x16, #0xa8]\n"
- "fmla z28.h, p2/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x15, LSL #1]\n"
- "ldr x21, [x16, #0xb0]\n"
- "fmla z29.h, p2/M, z4.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x20, x15, LSL #1]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z30.h, p2/M, z4.h, z6.h\n"
- "fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #4, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z7.h\n"
- "fmla z29.h, p2/M, z0.h, z8.h\n"
- "fmla z30.h, p2/M, z0.h, z14.h\n"
- "fmla z31.h, p2/M, z0.h, z11.h\n"
- "ld1h { z0.h }, p2/Z, [x14, #5, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z8.h\n"
- "ld1h { z8.h }, p3/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0xc8]\n"
- "fmla z29.h, p2/M, z1.h, z13.h\n"
- "fmla z30.h, p2/M, z1.h, z11.h\n"
- "fmla z31.h, p2/M, z1.h, z12.h\n"
- "ld1h { z1.h }, p2/Z, [x14, #6, MUL VL]\n"
- "fmla z28.h, p2/M, z2.h, z13.h\n"
- "ld1h { z13.h }, p3/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0xc0]\n"
- "fmla z29.h, p2/M, z2.h, z5.h\n"
- "fmla z30.h, p2/M, z2.h, z12.h\n"
- "fmla z31.h, p2/M, z2.h, z9.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #7, MUL VL]\n"
- "addvl x14, x14, #16\n"
- "fmla z28.h, p2/M, z3.h, z5.h\n"
- "ld1h { z5.h }, p3/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0xd0]\n"
- "fmla z29.h, p2/M, z3.h, z6.h\n"
- "fmla z30.h, p2/M, z3.h, z9.h\n"
- "fmla z31.h, p2/M, z3.h, z13.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #-8, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z6.h\n"
- "ld1h { z6.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0xd8]\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "ld1h { z10.h }, p3/Z, [x23, x15, LSL #1]\n"
- "ldr x23, [x16, #0xe0]\n"
- "fmla z30.h, p2/M, z4.h, z13.h\n"
- "fmla z31.h, p2/M, z4.h, z8.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #-7, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z14.h\n"
- "ld1h { z14.h }, p3/Z, [x20, x15, LSL #1]\n"
- "ldr x20, [x16, #0xf8]\n"
- "fmla z29.h, p2/M, z0.h, z11.h\n"
- "fmla z30.h, p2/M, z0.h, z5.h\n"
- "fmla z31.h, p2/M, z0.h, z6.h\n"
- "ld1h { z0.h }, p2/Z, [x14, #-6, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x22, x15, LSL #1]\n"
- "ldr x22, [x16, #0xe8]\n"
- "fmla z29.h, p2/M, z1.h, z12.h\n"
- "fmla z30.h, p2/M, z1.h, z6.h\n"
- "fmla z31.h, p2/M, z1.h, z10.h\n"
- "ld1h { z1.h }, p2/Z, [x14, #-5, MUL VL]\n"
- "fmla z28.h, p2/M, z2.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x21, x15, LSL #1]\n"
- "ldr x21, [x16, #0xf0]\n"
- "fmla z29.h, p2/M, z2.h, z9.h\n"
- "fmla z30.h, p2/M, z2.h, z10.h\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #-4, MUL VL]\n"
- "fmla z28.h, p2/M, z3.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x27, x15, LSL #1]\n"
- "ldr x27, [x16, #0x100]\n"
- "fmla z29.h, p2/M, z3.h, z13.h\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #-3, MUL VL]\n"
- "fmla z28.h, p2/M, z4.h, z13.h\n"
- "ld1h { z13.h }, p3/Z, [x26, x15, LSL #1]\n"
- "ldr x26, [x16, #0x108]\n"
- "fmla z29.h, p2/M, z4.h, z8.h\n"
- "ld1h { z8.h }, p3/Z, [x23, x15, LSL #1]\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z14.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #-2, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z5.h\n"
- "ld1h { z5.h }, p3/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x16, #0x110]\n"
- "fmla z29.h, p2/M, z0.h, z6.h\n"
- "fmla z30.h, p2/M, z0.h, z9.h\n"
- "fmla z31.h, p2/M, z0.h, z13.h\n"
- "ld1h { z0.h }, p2/Z, [x14, #-1, MUL VL]\n"
- "fmla z28.h, p2/M, z1.h, z6.h\n"
- "ld1h { z6.h }, p3/Z, [x24, x15, LSL #1]\n"
- "ldr x24, [x16, #0x118]\n"
- "fmla z29.h, p2/M, z1.h, z10.h\n"
- "fmla z30.h, p2/M, z1.h, z13.h\n"
- "fmla z31.h, p2/M, z1.h, z5.h\n"
- "ld1h { z1.h }, p2/Z, [x14]\n"
- "fmla z28.h, p2/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p3/Z, [x22, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z2.h, z11.h\n"
- "fmla z30.h, p2/M, z2.h, z5.h\n"
- "fmla z31.h, p2/M, z2.h, z6.h\n"
- "ld1h { z2.h }, p2/Z, [x14, #1, MUL VL]\n"
+ "fmla z31.h, p2/M, z2.h, z23.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "ldr x26, [x17, #0x98]\n"
+ "ldr x20, [x17, #0xa0]\n"
"fmla z28.h, p2/M, z3.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x21, x15, LSL #1]\n"
"fmla z29.h, p2/M, z3.h, z12.h\n"
- "fmla z30.h, p2/M, z3.h, z6.h\n"
- "fmla z31.h, p2/M, z3.h, z8.h\n"
- "ld1h { z3.h }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z0.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xa8]\n"
+ "fmla z30.h, p2/M, z3.h, z23.h\n"
+ "fmla z31.h, p2/M, z3.h, z22.h\n"
+ "ld1h { z17.h }, p2/Z, [x15, #3, MUL VL]\n"
"fmla z28.h, p2/M, z4.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x20, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z4.h, z14.h\n"
- "fmla z30.h, p2/M, z4.h, z8.h\n"
+ "fmla z29.h, p2/M, z4.h, z18.h\n"
+ "ld1h { z5.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xb0]\n"
+ "fmla z30.h, p2/M, z4.h, z22.h\n"
"fmla z31.h, p2/M, z4.h, z10.h\n"
- "ld1h { z4.h }, p2/Z, [x14, #3, MUL VL]\n"
- "fmla z28.h, p2/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x27, x15, LSL #1]\n"
- "fmla z29.h, p2/M, z0.h, z13.h\n"
- "fmla z30.h, p2/M, z0.h, z11.h\n"
- "ld1h { z11.h }, p3/Z, [x26, x15, LSL #1]\n"
- "fmla z31.h, p2/M, z0.h, z12.h\n"
- "fmla z28.h, p2/M, z1.h, z13.h\n"
- "fmla z29.h, p2/M, z1.h, z5.h\n"
- "fmla z30.h, p2/M, z1.h, z12.h\n"
- "ld1h { z12.h }, p3/Z, [x25, x15, LSL #1]\n"
- "fmla z31.h, p2/M, z1.h, z9.h\n"
- "fmla z28.h, p2/M, z2.h, z5.h\n"
- "fmla z29.h, p2/M, z2.h, z6.h\n"
- "fmla z30.h, p2/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p3/Z, [x24, x15, LSL #1]\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "fmla z28.h, p2/M, z3.h, z6.h\n"
- "fmla z29.h, p2/M, z3.h, z8.h\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "fmla z28.h, p2/M, z4.h, z8.h\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z9.h\n"
- ".inst 0xc170ca5c // fclamp { z28.h-z31.h }, z18.h, z16.h\n"
- "st1h { z28.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z29.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z30.h }, p0, [x10, x28, LSL #1]\n"
- "st1h { z31.h }, p0, [x9, x28, LSL #1]\n"
+ "ld1h { z3.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0xb8]\n"
+ "ld1h { z20.h }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z28.h, p2/M, z19.h, z7.h\n"
+ "fmla z29.h, p2/M, z19.h, z8.h\n"
+ "fmla z30.h, p2/M, z19.h, z14.h\n"
+ "fmla z31.h, p2/M, z19.h, z0.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z28.h, p2/M, z21.h, z8.h\n"
+ "fmla z29.h, p2/M, z21.h, z13.h\n"
+ "ld1h { z26.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x28, [x17, #0xc8]\n"
+ "fmla z30.h, p2/M, z21.h, z0.h\n"
+ "fmla z31.h, p2/M, z21.h, z5.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z28.h, p2/M, z16.h, z13.h\n"
+ "fmla z29.h, p2/M, z16.h, z23.h\n"
+ "ld1h { z25.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x22, [x17, #0xc0]\n"
+ "fmla z30.h, p2/M, z16.h, z5.h\n"
+ "fmla z31.h, p2/M, z16.h, z3.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z28.h, p2/M, z17.h, z23.h\n"
+ "fmla z29.h, p2/M, z17.h, z22.h\n"
+ "ld1h { z24.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0xd0]\n"
+ "fmla z30.h, p2/M, z17.h, z3.h\n"
+ "fmla z31.h, p2/M, z17.h, z25.h\n"
+ "ld1h { z17.h }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z28.h, p2/M, z20.h, z22.h\n"
+ "fmla z29.h, p2/M, z20.h, z10.h\n"
+ "ld1h { z23.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "ldr x27, [x17, #0xd8]\n"
+ "fmla z30.h, p2/M, z20.h, z25.h\n"
+ "fmla z31.h, p2/M, z20.h, z26.h\n"
+ "ld1h { z2.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x20, [x17, #0xe0]\n"
+ "ld1h { z22.h }, p2/Z, [x15, #-7, MUL VL]\n"
+ "fmla z28.h, p2/M, z19.h, z14.h\n"
+ "fmla z29.h, p2/M, z19.h, z0.h\n"
+ "ld1h { z1.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x26, [x17, #0xf8]\n"
+ "fmla z30.h, p2/M, z19.h, z24.h\n"
+ "fmla z31.h, p2/M, z19.h, z23.h\n"
+ "ld1h { z21.h }, p2/Z, [x15, #-6, MUL VL]\n"
+ "fmla z28.h, p2/M, z18.h, z0.h\n"
+ "fmla z29.h, p2/M, z18.h, z5.h\n"
+ "ld1h { z0.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xe8]\n"
+ "fmla z30.h, p2/M, z18.h, z23.h\n"
+ "fmla z31.h, p2/M, z18.h, z2.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #-5, MUL VL]\n"
+ "fmla z28.h, p2/M, z16.h, z5.h\n"
+ "fmla z29.h, p2/M, z16.h, z3.h\n"
+ "ld1h { z19.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xf0]\n"
+ "fmla z30.h, p2/M, z16.h, z2.h\n"
+ "fmla z31.h, p2/M, z16.h, z0.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, #-4, MUL VL]\n"
+ "fmla z28.h, p2/M, z17.h, z3.h\n"
+ "fmla z29.h, p2/M, z17.h, z25.h\n"
+ "ld1h { z16.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x100]\n"
+ "fmla z30.h, p2/M, z17.h, z0.h\n"
+ "fmla z31.h, p2/M, z17.h, z19.h\n"
+ "ld1h { z17.h }, p2/Z, [x15, #-3, MUL VL]\n"
+ "fmla z28.h, p2/M, z22.h, z25.h\n"
+ "fmla z29.h, p2/M, z22.h, z26.h\n"
+ "ld1h { z7.h }, p3/Z, [x28, x16, LSL #1]\n"
+ "ldr x22, [x17, #0x108]\n"
+ "fmla z30.h, p2/M, z22.h, z19.h\n"
+ "fmla z31.h, p2/M, z22.h, z1.h\n"
+ "ld1h { z9.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ld1h { z4.h }, p2/Z, [x15, #-2, MUL VL]\n"
+ "fmla z28.h, p2/M, z21.h, z24.h\n"
+ "fmla z29.h, p2/M, z21.h, z23.h\n"
+ "ld1h { z26.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x110]\n"
+ "fmla z30.h, p2/M, z21.h, z16.h\n"
+ "fmla z31.h, p2/M, z21.h, z7.h\n"
+ "ld1h { z25.h }, p2/Z, [x15, #-1, MUL VL]\n"
+ "fmla z28.h, p2/M, z20.h, z23.h\n"
+ "fmla z29.h, p2/M, z20.h, z2.h\n"
+ "ld1h { z24.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x118]\n"
+ "fmla z30.h, p2/M, z20.h, z7.h\n"
+ "fmla z31.h, p2/M, z20.h, z26.h\n"
+ "ld1h { z23.h }, p2/Z, [x15]\n"
+ "fmla z28.h, p2/M, z18.h, z2.h\n"
+ "fmla z29.h, p2/M, z18.h, z0.h\n"
+ "ld1h { z22.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z18.h, z26.h\n"
+ "fmla z31.h, p2/M, z18.h, z24.h\n"
+ "ld1h { z21.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "fmla z28.h, p2/M, z17.h, z0.h\n"
+ "fmla z29.h, p2/M, z17.h, z19.h\n"
+ "ld1h { z18.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z17.h, z24.h\n"
+ "fmla z31.h, p2/M, z17.h, z9.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z28.h, p2/M, z4.h, z19.h\n"
+ "fmla z29.h, p2/M, z4.h, z1.h\n"
+ "ld1h { z17.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z4.h, z9.h\n"
+ "fmla z31.h, p2/M, z4.h, z22.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z28.h, p2/M, z25.h, z16.h\n"
+ "fmla z29.h, p2/M, z25.h, z7.h\n"
+ "ld1h { z16.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "fmla z30.h, p2/M, z25.h, z18.h\n"
+ "fmla z31.h, p2/M, z25.h, z17.h\n"
+ "ld1h { z18.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z23.h, z7.h\n"
+ "fmla z29.h, p2/M, z23.h, z26.h\n"
+ "fmla z30.h, p2/M, z23.h, z17.h\n"
+ "fmla z31.h, p2/M, z23.h, z16.h\n"
+ "ld1h { z17.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z21.h, z26.h\n"
+ "fmla z29.h, p2/M, z21.h, z24.h\n"
+ "fmla z30.h, p2/M, z21.h, z16.h\n"
+ "fmla z31.h, p2/M, z21.h, z18.h\n"
+ "ld1h { z16.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z20.h, z24.h\n"
+ "fmla z29.h, p2/M, z20.h, z9.h\n"
+ "fmla z30.h, p2/M, z20.h, z18.h\n"
+ "fmla z31.h, p2/M, z20.h, z17.h\n"
+ "fmla z28.h, p2/M, z19.h, z9.h\n"
+ "fmla z29.h, p2/M, z19.h, z22.h\n"
+ "fmla z30.h, p2/M, z19.h, z17.h\n"
+ "fmla z31.h, p2/M, z19.h, z16.h\n"
+ ".inst 0xc17bc9fc // fclamp { z28.h-z31.h }, z15.h, z27.h\n"
+ "st1h { z28.h }, p0, [x13, x11, LSL #1]\n"
+ "st1h { z29.h }, p0, [x12, x11, LSL #1]\n"
+ "st1h { z30.h }, p0, [x10, x11, LSL #1]\n"
+ "st1h { z31.h }, p0, [x9, x11, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
} // namespace depthwise
} // namespace arm_conv
-#endif // defined(ARM_COMPUTE_ENABLE_SME2)
+#endif // defined(ARM_COMPUTE_ENABLE_SME2) && defined(__ARM_FP16_ARGS) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 96cfd5e497..e0c7d71e61 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,240 +88,240 @@ void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ptrue p3.b\n"
- ".inst 0x25207810 // ptrue pn8.b\n"
"mov x4, #0x0\n"
"mov x5, #0x0\n"
+ "ptrue p3.b\n"
+ ".inst 0x25207810 // ptrue pn8.b\n"
"1:" // Tile loop
"str x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x22, #0x2\n"
"str x5, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x4, x21\n" // offset = tile_i * ld_input_row
"ldr x6, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x20, x4, x21\n" // offset = tile_i * ld_input_row
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x5, x6, x20\n" // offset += tile_j * ld_input_col
+ "add x17, x6, x6\n"
"mul x20, x20, x22\n" // offset *= kernel_stride * output_size
- "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "add x16, x17, x6\n"
"add x7, x7, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x8, x7, x21, LSL #2\n"
- "add x17, x8, x21, LSL #2\n"
- "add x16, x6, x6\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x14, x17, x21, LSL #2\n"
- "add x13, x16, x6\n"
+ "add x15, x7, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
+ "add x13, x14, x21, LSL #2\n"
"cbnz x5, 2f\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x5\n"
- "sub x21, x21, #0x1\n"
"lsl x12, %x[n_channels], #0x2\n"
- "mov x20, #0x8\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x6\n"
- "orr x12, x12, x21, LSL #22\n"
- "orr x12, x12, x20, LSL #38\n"
- "add x11, x8, x6, LSL #2\n"
- "add x10, x7, x13, LSL #2\n"
- "add x9, x8, x16, LSL #2\n"
- "add x28, x17, x6, LSL #2\n"
- "add x27, x14, x13, LSL #2\n"
+ "mov x21, #0x8\n"
+ "mul x21, x21, x6\n"
+ "add x11, x15, x6, LSL #2\n"
+ "add x10, x7, x16, LSL #2\n"
+ "add x9, x15, x17, LSL #2\n"
+ "sub x20, x20, x5\n"
+ "add x28, x14, x6, LSL #2\n"
+ "sub x20, x20, #0x1\n"
+ "add x27, x13, x16, LSL #2\n"
+ "and x20, x20, #0x3fffff\n"
"add x26, x7, x6, LSL #2\n"
- "add x25, x7, x16, LSL #2\n"
- "add x24, x17, x16, LSL #2\n"
- "add x23, x8, x13, LSL #2\n"
- "add x22, x17, x13, LSL #2\n"
- "add x21, x14, x6, LSL #2\n"
- "add x20, x14, x16, LSL #2\n"
+ "orr x12, x12, x20, LSL #22\n"
+ "add x25, x7, x17, LSL #2\n"
+ "orr x12, x12, x21, LSL #38\n"
+ "add x24, x14, x17, LSL #2\n"
+ "add x23, x15, x16, LSL #2\n"
+ "add x22, x14, x16, LSL #2\n"
+ "add x21, x13, x6, LSL #2\n"
+ "add x20, x13, x17, LSL #2\n"
".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
".inst 0xf8ac48fa // rprfm pldonce, x12, [x7]\n"
".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
".inst 0xf8ac493a // rprfm pldonce, x12, [x9]\n"
".inst 0xf8ac4b9a // rprfm pldonce, x12, [x28]\n"
- ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
+ ".inst 0xf8ac49ba // rprfm pldonce, x12, [x13]\n"
".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- ".inst 0xf8ac491a // rprfm pldonce, x12, [x8]\n"
+ ".inst 0xf8ac49fa // rprfm pldonce, x12, [x15]\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- ".inst 0xf8ac4a3a // rprfm pldonce, x12, [x17]\n"
+ ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x4, x22\n" // offset = tile_i * ld_output_row
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"mov x20, #0x2\n"
- "ld1w { z22.s }, p3/Z, [x15]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x5, x25, x21\n" // offset += tile_j * ld_output_col
- "addvl x15, x15, #1\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
+ "ld1w { z22.s }, p3/Z, [x8]\n"
+ "addvl x8, x8, #1\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "cntw x25\n"
+ ".inst 0xa040c100 // ld1w { z0.s-z3.s }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
"ldr x24, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mul x21, x21, x20\n" // offset *= output_tile_size
- "cntw x23\n"
- "ld1rw { z21.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "addvl x15, x15, #4\n"
- "add x24, x24, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "addvl x15, x15, #4\n"
- "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x23, %x[n_channels]\n"
- "add x22, x24, x22, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
+ ".inst 0xa040c104 // ld1w { z4.s-z7.s }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
+ "mul x22, x4, x23\n" // offset = tile_i * ld_output_row
+ "cmp x25, %x[n_channels]\n"
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "madd x22, x5, x26, x22\n" // offset += tile_j * ld_output_col
+ "ld1rw { z21.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "ld1w { z9.s }, p2/Z, [x8, x6, LSL #2]\n"
+ "mul x22, x22, x20\n" // offset *= output_tile_size
+ "sub x20, XZR, x25\n"
+ "ld1w { z8.s }, p3/Z, [x8]\n"
+ "add x24, x24, x22, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "ld1w { z9.s }, p2/Z, [x15, x6, LSL #2]\n"
+ "addvl x8, x8, #1\n"
+ "add x23, x24, x23, LSL #2\n"
"ld1w { z10.s }, p2/Z, [x7]\n"
- "addvl x15, x15, #1\n"
- "ld1w { z11.s }, p2/Z, [x7, x13, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x8, x16, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x17, x6, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x7, x16, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x15, x17, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x14, x6, LSL #2]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z28, z22\n fmla z28.s, p3/M, z4.s, z9.s\n"
- "movprfx z29, z22\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "whilelt p1.s, x23, %x[n_channels]\n"
- "incw x21\n"
- "movprfx z30, z22\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "movprfx z31, z22\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z18.s }, p2/Z, [x14]\n"
- "incw x23\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x14, x13, LSL #2]\n"
- "mov p0.b, p2.b\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x17, x16, LSL #2]\n"
- "incw x20\n"
- "fmla z28.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x7, x6, LSL #2]\n"
- "fmla z30.s, p3/M, z6.s, z18.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z10.s }, p2/Z, [x7, x16, LSL #2]\n"
- "addvl x7, x7, #1\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z29.s, p3/M, z6.s, z13.s\n"
- "ld1w { z22.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z17.s\n"
- "ld1w { z9.s }, p2/Z, [x8]\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z0.s, z11.s\n"
- "ld1w { z19.s }, p2/Z, [x8, x13, LSL #2]\n"
- "addvl x8, x8, #1\n"
- "fmla z30.s, p3/M, z5.s, z16.s\n"
- "fmla z31.s, p3/M, z4.s, z16.s\n"
- "fmla z28.s, p3/M, z2.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z10.s\n"
- "ld1w { z18.s }, p2/Z, [x17]\n"
- "fmla z30.s, p3/M, z0.s, z9.s\n"
- "fmla z31.s, p3/M, z2.s, z19.s\n"
- "fmla z28.s, p3/M, z8.s, z16.s\n"
- "fmla z29.s, p3/M, z7.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x17, x13, LSL #2]\n"
- "addvl x17, x17, #1\n"
- "fmla z30.s, p3/M, z3.s, z18.s\n"
- "fmla z31.s, p3/M, z5.s, z17.s\n"
- "ld1w { z13.s }, p1/Z, [x17, x6, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z9.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x6, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z19.s\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "fmla z31.s, p3/M, z6.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x16, LSL #2]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "fmla z29.s, p3/M, z8.s, z17.s\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- "fmla z30.s, p3/M, z8.s, z16.s\n"
- "fmla z31.s, p3/M, z7.s, z16.s\n"
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- "cmp x23, %x[n_channels]\n"
- ".inst 0xc1aecabc // fclamp { z28.s-z31.s }, z21.s, z14.s\n"
- "addvl x14, x14, #1\n"
- "ld1w { z9.s }, p1/Z, [x8, x6, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x7]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "ld1w { z11.s }, p1/Z, [x7, x13, LSL #2]\n"
- "st1w { z29.s }, p0, [x24, x25, LSL #2]\n"
- "addvl x24, x24, #1\n"
- "ld1w { z12.s }, p1/Z, [x8, x16, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x25, LSL #2]\n"
- "addvl x22, x22, #1\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
- "blt 3b\n"
- "4:" // Tile loop: Channel tail
"movprfx z24, z22\n fmla z24.s, p3/M, z4.s, z9.s\n"
"movprfx z25, z22\n fmla z25.s, p3/M, z3.s, z9.s\n"
- "ldr x5, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "add x5, x5, #0x1\n"
+ "whilelt p1.s, x25, %x[n_channels]\n"
+ "incw x21\n"
"movprfx z26, z22\n fmla z26.s, p3/M, z1.s, z9.s\n"
"movprfx z27, z22\n fmla z27.s, p3/M, z0.s, z9.s\n"
- "ld1w { z17.s }, p2/Z, [x14]\n"
- "ldr x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ld1w { z17.s }, p2/Z, [x13]\n"
+ "incw x25\n"
+ "ld1w { z22.s }, p3/Z, [x8]\n"
+ "addvl x8, x8, #1\n"
+ "mov p0.b, p2.b\n"
+ "incw x20\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
"fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x13, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ld1w { z16.s }, p2/Z, [x13, x16, LSL #2]\n"
"fmla z26.s, p3/M, z2.s, z12.s\n"
"fmla z27.s, p3/M, z1.s, z12.s\n"
- "ld1w { z20.s }, p2/Z, [x17, x16, LSL #2]\n"
- "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "ld1w { z18.s }, p2/Z, [x14, x17, LSL #2]\n"
"fmla z24.s, p3/M, z5.s, z12.s\n"
"fmla z25.s, p3/M, z4.s, z12.s\n"
- "ld1w { z18.s }, p2/Z, [x7, x6, LSL #2]\n"
- "cmp x5, x20\n"
+ "ld1w { z28.s }, p2/Z, [x7, x6, LSL #2]\n"
"fmla z26.s, p3/M, z6.s, z17.s\n"
"fmla z27.s, p3/M, z3.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x7, x16, LSL #2]\n"
- "add x20, x4, #0x1\n"
+ "ld1w { z14.s }, p2/Z, [x7, x17, LSL #2]\n"
+ "addvl x7, x7, #1\n"
"fmla z24.s, p3/M, z7.s, z13.s\n"
"fmla z25.s, p3/M, z6.s, z13.s\n"
- "csel x4, x4, x20, LT\n"
- "mov p0.b, p2.b\n"
"fmla z26.s, p3/M, z4.s, z13.s\n"
"fmla z27.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x8]\n"
- "csel x5, x5, XZR, LT\n"
- "fmla z24.s, p3/M, z1.s, z18.s\n"
- "fmla z25.s, p3/M, z0.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x8, x13, LSL #2]\n"
- "cmp x4, x21\n"
- "fmla z26.s, p3/M, z5.s, z20.s\n"
- "fmla z27.s, p3/M, z4.s, z20.s\n"
- "fmla z24.s, p3/M, z2.s, z17.s\n"
- "fmla z25.s, p3/M, z1.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x17]\n"
- "fmla z26.s, p3/M, z0.s, z16.s\n"
- "fmla z27.s, p3/M, z2.s, z19.s\n"
- "fmla z24.s, p3/M, z8.s, z20.s\n"
- "fmla z25.s, p3/M, z7.s, z20.s\n"
- "ld1w { z17.s }, p2/Z, [x17, x13, LSL #2]\n"
- "fmla z26.s, p3/M, z3.s, z18.s\n"
- "fmla z27.s, p3/M, z5.s, z17.s\n"
- "fmla z24.s, p3/M, z3.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x6, LSL #2]\n"
- "fmla z25.s, p3/M, z5.s, z19.s\n"
- "fmla z26.s, p3/M, z7.s, z16.s\n"
- "fmla z27.s, p3/M, z6.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x16, LSL #2]\n"
- "fmla z24.s, p3/M, z6.s, z18.s\n"
- "fmla z25.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x15]\n"
+ "fmla z24.s, p3/M, z1.s, z28.s\n"
+ "fmla z25.s, p3/M, z0.s, z28.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, x16, LSL #2]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z26.s, p3/M, z5.s, z18.s\n"
+ "fmla z27.s, p3/M, z4.s, z18.s\n"
+ "fmla z24.s, p3/M, z2.s, z14.s\n"
+ "fmla z25.s, p3/M, z1.s, z14.s\n"
+ "ld1w { z19.s }, p2/Z, [x14]\n"
+ "fmla z26.s, p3/M, z0.s, z17.s\n"
+ "fmla z27.s, p3/M, z2.s, z16.s\n"
+ "fmla z24.s, p3/M, z8.s, z18.s\n"
+ "fmla z25.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x14, x16, LSL #2]\n"
+ "addvl x14, x14, #1\n"
+ "fmla z26.s, p3/M, z3.s, z19.s\n"
+ "ld1w { z13.s }, p1/Z, [x14, x6, LSL #2]\n"
+ "fmla z27.s, p3/M, z5.s, z18.s\n"
+ "fmla z24.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x13, x6, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x13, x17, LSL #2]\n"
+ "whilelt p2.s, x21, %x[n_channels]\n"
+ "cmp x25, %x[n_channels]\n"
+ ".inst 0xa040c100 // ld1w { z0.s-z3.s }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
+ "addvl x13, x13, #1\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "fmla z27.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z11.s }, p1/Z, [x7, x16, LSL #2]\n"
+ "fmla z24.s, p3/M, z6.s, z19.s\n"
+ "fmla z25.s, p3/M, z8.s, z18.s\n"
+ "ld1w { z9.s }, p1/Z, [x15, x6, LSL #2]\n"
+ "ld1w { z10.s }, p1/Z, [x7]\n"
"fmla z26.s, p3/M, z8.s, z16.s\n"
"fmla z27.s, p3/M, z7.s, z16.s\n"
- ".inst 0xc1aecab8 // fclamp { z24.s-z27.s }, z21.s, z14.s\n"
+ ".inst 0xa040c104 // ld1w { z4.s-z7.s }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
+ "ld1w { z12.s }, p1/Z, [x15, x17, LSL #2]\n"
+ "ld1w { z8.s }, p3/Z, [x8]\n"
+ "addvl x8, x8, #1\n"
+ ".inst 0xc1b5c9f8 // fclamp { z24.s-z27.s }, z15.s, z21.s\n"
"st1w { z24.s }, p0, [x24]\n"
- "st1w { z25.s }, p0, [x24, x25, LSL #2]\n"
- "st1w { z26.s }, p0, [x22]\n"
- "st1w { z27.s }, p0, [x22, x25, LSL #2]\n"
+ "st1w { z25.s }, p0, [x24, x26, LSL #2]\n"
+ "addvl x24, x24, #1\n"
+ "st1w { z26.s }, p0, [x23]\n"
+ "st1w { z27.s }, p0, [x23, x26, LSL #2]\n"
+ "addvl x23, x23, #1\n"
+ "blt 3b\n"
+ "4:" // Tile loop: Channel tail
+ "movprfx z28, z22\n fmla z28.s, p3/M, z4.s, z9.s\n"
+ "movprfx z29, z22\n fmla z29.s, p3/M, z3.s, z9.s\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z30, z22\n fmla z30.s, p3/M, z1.s, z9.s\n"
+ "movprfx z31, z22\n fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z18.s }, p2/Z, [x13]\n"
+ "ldr x4, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x5, x5, #0x1\n"
+ "fmla z28.s, p3/M, z0.s, z10.s\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x13, x16, LSL #2]\n"
+ "add x20, x4, #0x1\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z20.s }, p2/Z, [x14, x17, LSL #2]\n"
+ "cmp x5, x22\n"
+ "csel x4, x4, x20, LT\n"
+ "csel x5, x5, XZR, LT\n"
+ "cmp x4, x21\n"
+ "fmla z28.s, p3/M, z5.s, z12.s\n"
+ "fmla z29.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x7, x6, LSL #2]\n"
+ "fmla z30.s, p3/M, z6.s, z18.s\n"
+ "fmla z31.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z18.s }, p2/Z, [x7, x17, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z13.s\n"
+ "fmla z29.s, p3/M, z6.s, z13.s\n"
+ "fmla z30.s, p3/M, z4.s, z13.s\n"
+ "fmla z31.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x15]\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "fmla z29.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z20.s\n"
+ "fmla z31.s, p3/M, z4.s, z20.s\n"
+ "fmla z28.s, p3/M, z2.s, z18.s\n"
+ "fmla z29.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x14]\n"
+ "fmla z30.s, p3/M, z0.s, z17.s\n"
+ "fmla z31.s, p3/M, z2.s, z16.s\n"
+ "fmla z28.s, p3/M, z8.s, z20.s\n"
+ "fmla z29.s, p3/M, z7.s, z20.s\n"
+ "ld1w { z18.s }, p2/Z, [x14, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z19.s\n"
+ "fmla z31.s, p3/M, z5.s, z18.s\n"
+ "fmla z28.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x13, x6, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x13, x17, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z17.s\n"
+ "fmla z31.s, p3/M, z6.s, z17.s\n"
+ "fmla z28.s, p3/M, z6.s, z19.s\n"
+ "fmla z29.s, p3/M, z8.s, z18.s\n"
+ "fmla z30.s, p3/M, z8.s, z16.s\n"
+ "fmla z31.s, p3/M, z7.s, z16.s\n"
+ ".inst 0xc1b5c9fc // fclamp { z28.s-z31.s }, z15.s, z21.s\n"
+ "st1w { z28.s }, p0, [x24]\n"
+ "st1w { z29.s }, p0, [x24, x26, LSL #2]\n"
+ "st1w { z30.s }, p0, [x23]\n"
+ "st1w { z31.s }, p0, [x23, x26, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 39f1b3635f..7ad83b779b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -80,194 +80,194 @@ void sme2_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ptrue p3.b\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x15, #0x0\n"
"ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z23.s }, p3/Z, [x14]\n"
+ "ldr x24, [x16, #0x20]\n"
+ "cntw x13\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ld1rw { z22.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
+ "cmp x13, %x[n_channels]\n"
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x10, XZR, x13\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ld1w { z20.s }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "cntw x11\n"
+ "ldp x23, x22, [x16, #0x0]\n"
".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "ldp x10, x9, [x20, #0x10]\n"
- "mov x28, #0x0\n"
- "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ldp x21, x20, [x16, #0x10]\n"
".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
- "ldp x24, x23, [x15, #0x0]\n"
"addvl x14, x14, #4\n"
- "cmp x11, %x[n_channels]\n"
- "ld1rw { z22.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ldp x22, x21, [x15, #0x10]\n"
- "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x27, XZR, x11\n"
- "ldr x20, [x15, #0x20]\n"
"ld1w { z8.s }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
- "ld1w { z9.s }, p2/Z, [x24, x28, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x22, x28, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x24, x15, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z23\n fmla z28.s, p3/M, z4.s, z9.s\n"
- "movprfx z29, z23\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x20, [x15, #0x28]\n"
- "whilelt p1.s, x11, %x[n_channels]\n"
- "movprfx z30, z23\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "movprfx z31, z23\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z19.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x20, [x15, #0x30]\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ldr x21, [x15, #0x38]\n"
- "ld1w { z18.s }, p2/Z, [x20, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x20, [x15, #0x48]\n"
- "ld1w { z17.s }, p2/Z, [x20, x28, LSL #2]\n"
- "fmla z28.s, p3/M, z5.s, z12.s\n"
- "fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ldr x20, [x15, #0x40]\n"
- "fmla z30.s, p3/M, z6.s, z19.s\n"
- "fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z25.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x21, [x15, #0x50]\n"
- "fmla z28.s, p3/M, z7.s, z13.s\n"
- "fmla z29.s, p3/M, z6.s, z13.s\n"
- "ldr x20, [x15, #0x58]\n"
- "ld1w { z23.s }, p3/Z, [x14]\n"
- "fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z18.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ldr x21, [x15, #0x60]\n"
- "fmla z28.s, p3/M, z1.s, z16.s\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x20, [x15, #0x68]\n"
- "fmla z30.s, p3/M, z5.s, z17.s\n"
- "fmla z31.s, p3/M, z4.s, z17.s\n"
- "ldr x26, [x15, #0x70]\n"
+ "movprfx z24, z20\n fmla z24.s, p3/M, z4.s, z9.s\n"
+ "movprfx z25, z20\n fmla z25.s, p3/M, z3.s, z9.s\n"
+ "ldr x20, [x16, #0x28]\n"
+ "whilelt p1.s, x13, %x[n_channels]\n"
+ "movprfx z26, z20\n fmla z26.s, p3/M, z1.s, z9.s\n"
+ "movprfx z27, z20\n fmla z27.s, p3/M, z0.s, z9.s\n"
+ "ldr x21, [x16, #0x30]\n"
+ "ld1w { z20.s }, p3/Z, [x14]\n"
+ "ldr x24, [x16, #0x38]\n"
"addvl x14, x14, #1\n"
- "fmla z28.s, p3/M, z2.s, z25.s\n"
- "fmla z29.s, p3/M, z1.s, z25.s\n"
- "ld1w { z18.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ldr x25, [x15, #0x78]\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z2.s, z19.s\n"
- "ldp x24, x23, [x15, #0x0]\n"
- "incw x27\n"
- "fmla z28.s, p3/M, z8.s, z17.s\n"
- "fmla z29.s, p3/M, z7.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldp x22, x21, [x15, #0x10]\n"
- "fmla z30.s, p3/M, z3.s, z18.s\n"
- "fmla z31.s, p3/M, z5.s, z17.s\n"
- "ldr x20, [x15, #0x20]\n"
- "ld1w { z13.s }, p1/Z, [x20, x11, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "ld1w { z16.s }, p2/Z, [x26, x28, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z19.s\n"
+ "incw x10\n"
+ "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0x48]\n"
"mov p0.b, p2.b\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "fmla z31.s, p3/M, z6.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x28, LSL #2]\n"
- "incw x28\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "fmla z29.s, p3/M, z8.s, z17.s\n"
- "ld1w { z9.s }, p1/Z, [x24, x11, LSL #2]\n"
- "whilelt p2.s, x28, %x[n_channels]\n"
- "fmla z30.s, p3/M, z8.s, z16.s\n"
- "fmla z31.s, p3/M, z7.s, z16.s\n"
- "ld1w { z10.s }, p1/Z, [x23, x11, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x22, x11, LSL #2]\n"
- ".inst 0xc1afcadc // fclamp { z28.s-z31.s }, z22.s, z15.s\n"
- "st1w { z28.s }, p0, [x13, x27, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x21, x11, LSL #2]\n"
- "incw x11\n"
- "cmp x11, %x[n_channels]\n"
- "st1w { z29.s }, p0, [x12, x27, LSL #2]\n"
+ "fmla z24.s, p3/M, z0.s, z10.s\n"
+ "fmla z25.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x23, [x16, #0x40]\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "fmla z27.s, p3/M, z1.s, z12.s\n"
+ "ldr x22, [x16, #0x50]\n"
+ "ld1w { z18.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x21, [x16, #0x58]\n"
+ "ldr x20, [x16, #0x60]\n"
+ "fmla z24.s, p3/M, z5.s, z12.s\n"
+ "fmla z25.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z28.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "ldr x27, [x16, #0x68]\n"
+ "fmla z26.s, p3/M, z6.s, z17.s\n"
+ "fmla z27.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z14.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "ldr x26, [x16, #0x70]\n"
+ "ldr x25, [x16, #0x78]\n"
+ "ldp x24, x23, [x16, #0x0]\n"
+ "fmla z24.s, p3/M, z7.s, z13.s\n"
+ "fmla z25.s, p3/M, z6.s, z13.s\n"
+ "fmla z26.s, p3/M, z4.s, z13.s\n"
+ "fmla z27.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z1.s, z28.s\n"
+ "fmla z25.s, p3/M, z0.s, z28.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldp x22, x21, [x16, #0x10]\n"
+ "fmla z26.s, p3/M, z5.s, z18.s\n"
+ "fmla z27.s, p3/M, z4.s, z18.s\n"
+ "fmla z24.s, p3/M, z2.s, z14.s\n"
+ "fmla z25.s, p3/M, z1.s, z14.s\n"
+ "ld1w { z19.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0x20]\n"
+ "fmla z26.s, p3/M, z0.s, z17.s\n"
+ "fmla z27.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z13.s }, p1/Z, [x20, x13, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z18.s\n"
+ "fmla z25.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z3.s, z19.s\n"
+ "fmla z27.s, p3/M, z5.s, z18.s\n"
+ "fmla z24.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "incw x15\n"
".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "st1w { z30.s }, p0, [x10, x27, LSL #2]\n"
+ "whilelt p2.s, x15, %x[n_channels]\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "fmla z27.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z11.s }, p1/Z, [x22, x13, LSL #2]\n"
+ "fmla z24.s, p3/M, z6.s, z19.s\n"
+ "fmla z25.s, p3/M, z8.s, z18.s\n"
+ "ld1w { z9.s }, p1/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z10.s }, p1/Z, [x23, x13, LSL #2]\n"
+ "fmla z26.s, p3/M, z8.s, z16.s\n"
+ "fmla z27.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z12.s }, p1/Z, [x21, x13, LSL #2]\n"
+ "incw x13\n"
".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
"addvl x14, x14, #4\n"
- "st1w { z31.s }, p0, [x9, x27, LSL #2]\n"
+ "cmp x13, %x[n_channels]\n"
"ld1w { z8.s }, p3/Z, [x14]\n"
"addvl x14, x14, #1\n"
+ ".inst 0xc1afcad8 // fclamp { z24.s-z27.s }, z22.s, z15.s\n"
+ "st1w { z24.s }, p0, [x12, x10, LSL #2]\n"
+ "st1w { z25.s }, p0, [x11, x10, LSL #2]\n"
+ "st1w { z26.s }, p0, [x9, x10, LSL #2]\n"
+ "st1w { z27.s }, p0, [x28, x10, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z23\n fmla z28.s, p3/M, z4.s, z9.s\n"
- "movprfx z29, z23\n fmla z29.s, p3/M, z3.s, z9.s\n"
- "ldr x20, [x15, #0x28]\n"
- "incw x27\n"
- "movprfx z30, z23\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "movprfx z31, z23\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x20, [x15, #0x30]\n"
+ "movprfx z28, z20\n fmla z28.s, p3/M, z4.s, z9.s\n"
+ "movprfx z29, z20\n fmla z29.s, p3/M, z3.s, z9.s\n"
+ "ldr x21, [x16, #0x28]\n"
+ "incw x10\n"
+ "movprfx z30, z20\n fmla z30.s, p3/M, z1.s, z9.s\n"
+ "movprfx z31, z20\n fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ldr x20, [x16, #0x30]\n"
+ "mov p0.b, p2.b\n"
+ "ldr x22, [x16, #0x38]\n"
+ "ld1w { z18.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x21, [x16, #0x48]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z2.s, z11.s\n"
- "ldr x21, [x15, #0x38]\n"
- "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0x40]\n"
"fmla z30.s, p3/M, z2.s, z12.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ldr x20, [x15, #0x48]\n"
- "ld1w { z20.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ldr x25, [x16, #0x50]\n"
+ "ld1w { z20.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x24, [x16, #0x58]\n"
+ "ldr x23, [x16, #0x60]\n"
"fmla z28.s, p3/M, z5.s, z12.s\n"
"fmla z29.s, p3/M, z4.s, z12.s\n"
- "ld1w { z18.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ldr x20, [x15, #0x40]\n"
- "fmla z30.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldr x22, [x16, #0x68]\n"
+ "fmla z30.s, p3/M, z6.s, z18.s\n"
"fmla z31.s, p3/M, z3.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x20, [x15, #0x50]\n"
+ "ld1w { z18.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x21, [x16, #0x70]\n"
+ "ldr x20, [x16, #0x78]\n"
"fmla z28.s, p3/M, z7.s, z13.s\n"
"fmla z29.s, p3/M, z6.s, z13.s\n"
- "ldr x21, [x15, #0x58]\n"
- "mov p0.b, p2.b\n"
"fmla z30.s, p3/M, z4.s, z13.s\n"
- "fmla z31.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x20, [x15, #0x60]\n"
- "fmla z28.s, p3/M, z1.s, z18.s\n"
- "fmla z29.s, p3/M, z0.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x21, x28, LSL #2]\n"
- "ldr x22, [x15, #0x68]\n"
+ "fmla z31.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "fmla z29.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x15, LSL #2]\n"
"fmla z30.s, p3/M, z5.s, z20.s\n"
"fmla z31.s, p3/M, z4.s, z20.s\n"
- "ldr x21, [x15, #0x70]\n"
- "fmla z28.s, p3/M, z2.s, z17.s\n"
- "fmla z29.s, p3/M, z1.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ldr x20, [x15, #0x78]\n"
- "fmla z30.s, p3/M, z0.s, z16.s\n"
- "fmla z31.s, p3/M, z2.s, z19.s\n"
+ "fmla z28.s, p3/M, z2.s, z18.s\n"
+ "fmla z29.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z0.s, z17.s\n"
+ "fmla z31.s, p3/M, z2.s, z16.s\n"
"fmla z28.s, p3/M, z8.s, z20.s\n"
"fmla z29.s, p3/M, z7.s, z20.s\n"
- "ld1w { z17.s }, p2/Z, [x22, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z3.s, z18.s\n"
- "fmla z31.s, p3/M, z5.s, z17.s\n"
- "fmla z28.s, p3/M, z3.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x28, LSL #2]\n"
- "fmla z29.s, p3/M, z5.s, z19.s\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "fmla z31.s, p3/M, z6.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "fmla z29.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z19.s\n"
+ "fmla z31.s, p3/M, z5.s, z18.s\n"
+ "fmla z28.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z17.s\n"
+ "fmla z31.s, p3/M, z6.s, z17.s\n"
+ "fmla z28.s, p3/M, z6.s, z19.s\n"
+ "fmla z29.s, p3/M, z8.s, z18.s\n"
"fmla z30.s, p3/M, z8.s, z16.s\n"
"fmla z31.s, p3/M, z7.s, z16.s\n"
".inst 0xc1afcadc // fclamp { z28.s-z31.s }, z22.s, z15.s\n"
- "st1w { z28.s }, p0, [x13, x27, LSL #2]\n"
- "st1w { z29.s }, p0, [x12, x27, LSL #2]\n"
- "st1w { z30.s }, p0, [x10, x27, LSL #2]\n"
- "st1w { z31.s }, p0, [x9, x27, LSL #2]\n"
+ "st1w { z28.s }, p0, [x12, x10, LSL #2]\n"
+ "st1w { z29.s }, p0, [x11, x10, LSL #2]\n"
+ "st1w { z30.s }, p0, [x9, x10, LSL #2]\n"
+ "st1w { z31.s }, p0, [x28, x10, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index d15a3a8377..cbb8d893d5 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,72 +88,72 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ptrue p3.b\n"
- ".inst 0x25207810 // ptrue pn8.b\n"
"mov x2, #0x0\n"
"mov x3, #0x0\n"
+ "ptrue p3.b\n"
+ ".inst 0x25207810 // ptrue pn8.b\n"
"1:" // Tile loop
"str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x22, #0x3\n"
"str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
+ "ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
+ "add x7, x4, x4\n"
"mul x20, x20, x22\n" // offset *= kernel_stride * output_size
- "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "add x8, x7, x4\n"
"add x5, x5, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x6, x5, x21, LSL #2\n"
- "add x7, x6, x21, LSL #2\n"
- "add x8, x4, x4\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, x7, x21, LSL #2\n"
- "add x15, x8, x4\n"
- "add x14, x16, x21, LSL #2\n"
- "add x13, x15, x4\n"
+ "add x17, x8, x4\n"
+ "add x16, x5, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
+ "add x13, x14, x21, LSL #2\n"
"cbnz x3, 2f\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x3\n"
- "sub x21, x21, #0x1\n"
"lsl x12, %x[n_channels], #0x2\n"
- "mov x20, #0xc\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x4\n"
- "orr x12, x12, x21, LSL #22\n"
- "orr x12, x12, x20, LSL #38\n"
- "add x27, x7, x8, LSL #2\n"
- "add x26, x5, x13, LSL #2\n"
- "add x25, x6, x8, LSL #2\n"
- "add x24, x14, x13, LSL #2\n"
- "add x23, x7, x4, LSL #2\n"
+ "mov x28, #0xc\n"
+ "mul x28, x28, x4\n"
+ "add x27, x15, x7, LSL #2\n"
+ "add x26, x5, x17, LSL #2\n"
+ "add x25, x16, x7, LSL #2\n"
+ "sub x20, x20, x3\n"
+ "add x24, x13, x17, LSL #2\n"
+ "sub x20, x20, #0x1\n"
+ "add x23, x15, x4, LSL #2\n"
+ "and x20, x20, #0x3fffff\n"
"add x22, x5, x4, LSL #2\n"
- "add x21, x5, x15, LSL #2\n"
- "add x20, x7, x15, LSL #2\n"
- "add x11, x6, x13, LSL #2\n"
- "add x10, x16, x8, LSL #2\n"
- "add x9, x16, x13, LSL #2\n"
- "add x28, x14, x4, LSL #2\n"
+ "orr x12, x12, x20, LSL #22\n"
+ "add x21, x5, x8, LSL #2\n"
+ "orr x12, x12, x28, LSL #38\n"
+ "add x20, x15, x8, LSL #2\n"
+ "add x11, x16, x17, LSL #2\n"
+ "add x10, x14, x7, LSL #2\n"
+ "add x9, x14, x17, LSL #2\n"
+ "add x28, x13, x4, LSL #2\n"
".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- "add x27, x6, x4, LSL #2\n"
+ "add x27, x16, x4, LSL #2\n"
".inst 0xf8ac48ba // rprfm pldonce, x12, [x5]\n"
".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- "add x26, x6, x15, LSL #2\n"
- ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
+ "add x26, x16, x8, LSL #2\n"
+ ".inst 0xf8ac49ba // rprfm pldonce, x12, [x13]\n"
".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- "add x25, x14, x15, LSL #2\n"
+ "add x25, x13, x8, LSL #2\n"
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- "add x24, x16, x4, LSL #2\n"
+ "add x24, x14, x4, LSL #2\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- "add x23, x5, x8, LSL #2\n"
+ "add x23, x5, x7, LSL #2\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- "add x22, x16, x15, LSL #2\n"
+ "add x22, x14, x8, LSL #2\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- "add x21, x7, x13, LSL #2\n"
+ "add x21, x15, x17, LSL #2\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
- "add x20, x14, x8, LSL #2\n"
- ".inst 0xf8ac48da // rprfm pldonce, x12, [x6]\n"
- ".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
+ "add x20, x13, x7, LSL #2\n"
".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
+ ".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
+ ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
".inst 0xf8ac493a // rprfm pldonce, x12, [x9]\n"
".inst 0xf8ac4b9a // rprfm pldonce, x12, [x28]\n"
@@ -163,312 +163,312 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- ".inst 0xf8ac48fa // rprfm pldonce, x12, [x7]\n"
+ ".inst 0xf8ac49fa // rprfm pldonce, x12, [x15]\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x2, x22\n" // offset = tile_i * ld_output_row
- "mov x20, #0x3\n"
- "ld1w { z24.s }, p3/Z, [x17]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x21, #0x3\n"
+ "ld1w { z25.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
"ldr x27, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x3, x27, x21\n" // offset += tile_j * ld_output_col
- "mul x21, x21, x20\n" // offset *= output_tile_size
- "ld1rw { z26.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "cntw x22\n"
+ ".inst 0xa040c0c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
"ldr x26, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "addvl x17, x17, #1\n"
- "add x26, x26, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "cntw x25\n"
- "addvl x17, x17, #4\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "add x24, x26, x22, LSL #2\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
+ ".inst 0xa040c0c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "mul x20, x2, x23\n" // offset = tile_i * ld_output_row
+ "cmp x22, %x[n_channels]\n"
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "madd x20, x3, x27, x20\n" // offset += tile_j * ld_output_col
+ "add x25, x27, x27\n"
"ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "addvl x17, x17, #4\n"
- "cmp x25, %x[n_channels]\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "add x23, x24, x22, LSL #2\n"
- "add x22, x27, x27\n"
- "ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
+ "mul x20, x20, x21\n" // offset *= output_tile_size
"mov x21, #0x0\n"
- "sub x20, XZR, x25\n"
+ "ld1w { z8.s }, p3/Z, [x6]\n"
+ "add x26, x26, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "sub x20, XZR, x22\n"
+ "ld1w { z9.s }, p2/Z, [x15, x7, LSL #2]\n"
+ "add x24, x26, x23, LSL #2\n"
"ld1w { z10.s }, p2/Z, [x5]\n"
- "ld1w { z11.s }, p2/Z, [x5, x13, LSL #2]\n"
- "addvl x17, x17, #1\n"
- "ld1w { z12.s }, p2/Z, [x14]\n"
- "ld1w { z13.s }, p2/Z, [x6, x8, LSL #2]\n"
+ "addvl x6, x6, #1\n"
+ "add x23, x24, x23, LSL #2\n"
+ "ld1w { z11.s }, p2/Z, [x5, x17, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x13]\n"
+ "ld1w { z13.s }, p2/Z, [x16, x7, LSL #2]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z28, z24\n fmla z28.s, p3/M, z7.s, z9.s\n"
- "movprfx z27, z24\n fmla z27.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x25, %x[n_channels]\n"
+ "movprfx z28, z25\n fmla z28.s, p3/M, z7.s, z9.s\n"
+ "movprfx z23, z25\n fmla z23.s, p3/M, z8.s, z9.s\n"
+ "whilelt p1.s, x22, %x[n_channels]\n"
"incw x21\n"
- "movprfx z29, z24\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "fmla z28.s, p3/M, z4.s, z13.s\n"
- "incw x25\n"
+ "movprfx z29, z25\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "movprfx z30, z25\n fmla z30.s, p3/M, z5.s, z9.s\n"
+ "incw x22\n"
"mov p0.b, p2.b\n"
- "movprfx z30, z24\n fmla z30.s, p3/M, z5.s, z9.s\n"
- "movprfx z31, z24\n fmla z31.s, p3/M, z4.s, z9.s\n"
+ "movprfx z31, z25\n fmla z31.s, p3/M, z4.s, z9.s\n"
+ "movprfx z16, z25\n fmla z16.s, p3/M, z3.s, z9.s\n"
"incw x20\n"
- "movprfx z20, z24\n fmla z20.s, p3/M, z3.s, z9.s\n"
- "fmla z27.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x7, x15, LSL #2]\n"
+ "movprfx z17, z25\n fmla z17.s, p3/M, z2.s, z9.s\n"
+ "movprfx z19, z25\n fmla z19.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z4.s, z13.s\n"
+ "fmla z23.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x15, x8, LSL #2]\n"
"fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z19.s }, p2/Z, [x7, x4, LSL #2]\n"
- "movprfx z21, z24\n fmla z21.s, p3/M, z2.s, z9.s\n"
- "fmla z28.s, p3/M, z6.s, z19.s\n"
- "movprfx z23, z24\n fmla z23.s, p3/M, z0.s, z9.s\n"
- "fmla z27.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, x4, LSL #2]\n"
"fmla z30.s, p3/M, z2.s, z13.s\n"
"fmla z31.s, p3/M, z1.s, z13.s\n"
- "fmla z20.s, p3/M, z0.s, z13.s\n"
- "ld1w { z18.s }, p2/Z, [x5, x4, LSL #2]\n"
- "fmla z21.s, p3/M, z6.s, z12.s\n"
- "ld1w { z15.s }, p2/Z, [x14, x13, LSL #2]\n"
- "movprfx z22, z24\n fmla z22.s, p3/M, z1.s, z9.s\n"
- "fmla z28.s, p3/M, z0.s, z18.s\n"
- "fmla z23.s, p3/M, z8.s, z15.s\n"
- "fmla z27.s, p3/M, z7.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x5, x15, LSL #2]\n"
- "fmla z22.s, p3/M, z0.s, z19.s\n"
- "fmla z30.s, p3/M, z4.s, z19.s\n"
- "ld1w { z24.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "fmla z31.s, p3/M, z3.s, z19.s\n"
- "fmla z21.s, p3/M, z1.s, z19.s\n"
- "ld1w { z17.s }, p2/Z, [x6]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z1.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x16]\n"
- "fmla z20.s, p3/M, z4.s, z10.s\n"
- "fmla z27.s, p3/M, z1.s, z18.s\n"
- "ld1w { z9.s }, p2/Z, [x6, x13, LSL #2]\n"
- "fmla z22.s, p3/M, z2.s, z10.s\n"
- "fmla z23.s, p3/M, z1.s, z10.s\n"
- "fmla z28.s, p3/M, z8.s, z10.s\n"
- "fmla z29.s, p3/M, z7.s, z10.s\n"
- "fmla z31.s, p3/M, z5.s, z10.s\n"
- "fmla z30.s, p3/M, z0.s, z17.s\n"
- "ld1w { z19.s }, p2/Z, [x16, x8, LSL #2]\n"
- "fmla z20.s, p3/M, z2.s, z9.s\n"
- "fmla z21.s, p3/M, z3.s, z16.s\n"
- "fmla z22.s, p3/M, z4.s, z19.s\n"
- "fmla z23.s, p3/M, z3.s, z19.s\n"
- "fmla z27.s, p3/M, z3.s, z17.s\n"
- "fmla z29.s, p3/M, z5.s, z9.s\n"
- "ld1w { z17.s }, p2/Z, [x16, x13, LSL #2]\n"
- "fmla z30.s, p3/M, z6.s, z16.s\n"
- "fmla z31.s, p3/M, z7.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x4, LSL #2]\n"
- "fmla z20.s, p3/M, z6.s, z19.s\n"
- "fmla z21.s, p3/M, z5.s, z19.s\n"
- "ld1w { z18.s }, p2/Z, [x6, x4, LSL #2]\n"
- "fmla z23.s, p3/M, z5.s, z17.s\n"
- "fmla z22.s, p3/M, z6.s, z16.s\n"
- "fmla z30.s, p3/M, z8.s, z19.s\n"
- "fmla z20.s, p3/M, z8.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x6, x15, LSL #2]\n"
+ "fmla z16.s, p3/M, z0.s, z13.s\n"
+ "fmla z17.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z21.s }, p2/Z, [x13, x17, LSL #2]\n"
+ "movprfx z18, z25\n fmla z18.s, p3/M, z1.s, z9.s\n"
+ "fmla z28.s, p3/M, z6.s, z20.s\n"
+ "fmla z23.s, p3/M, z5.s, z13.s\n"
+ "ld1w { z25.s }, p3/Z, [x6]\n"
"addvl x6, x6, #1\n"
- "fmla z21.s, p3/M, z7.s, z16.s\n"
- "fmla z28.s, p3/M, z3.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x15, LSL #2]\n"
- "fmla z31.s, p3/M, z0.s, z18.s\n"
- "fmla z27.s, p3/M, z4.s, z18.s\n"
- "fmla z22.s, p3/M, z8.s, z16.s\n"
- "fmla z23.s, p3/M, z7.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x16, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z18.s\n"
- "fmla z28.s, p3/M, z5.s, z17.s\n"
- "ld1w { z11.s }, p2/Z, [x16, x4, LSL #2]\n"
+ "fmla z29.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z27.s }, p2/Z, [x5, x4, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z20.s\n"
+ "fmla z19.s, p3/M, z8.s, z21.s\n"
+ "ld1w { z24.s }, p2/Z, [x5, x8, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z20.s\n"
+ "fmla z18.s, p3/M, z0.s, z20.s\n"
+ "fmla z17.s, p3/M, z1.s, z20.s\n"
+ "fmla z28.s, p3/M, z0.s, z27.s\n"
+ "fmla z23.s, p3/M, z7.s, z20.s\n"
+ "ld1w { z21.s }, p2/Z, [x16]\n"
+ "fmla z29.s, p3/M, z1.s, z24.s\n"
+ "fmla z16.s, p3/M, z4.s, z10.s\n"
+ "fmla z19.s, p3/M, z1.s, z10.s\n"
+ "fmla z31.s, p3/M, z5.s, z10.s\n"
+ "fmla z18.s, p3/M, z2.s, z10.s\n"
+ "fmla z30.s, p3/M, z0.s, z21.s\n"
+ "fmla z28.s, p3/M, z2.s, z24.s\n"
+ "fmla z23.s, p3/M, z1.s, z27.s\n"
+ "ld1w { z13.s }, p2/Z, [x16, x17, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x14]\n"
+ "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z16.s, p3/M, z2.s, z13.s\n"
+ "fmla z28.s, p3/M, z8.s, z10.s\n"
+ "fmla z17.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z27.s }, p2/Z, [x14, x7, LSL #2]\n"
+ "fmla z23.s, p3/M, z3.s, z21.s\n"
+ "fmla z29.s, p3/M, z5.s, z13.s\n"
+ "ld1w { z22.s }, p2/Z, [x14, x17, LSL #2]\n"
+ "fmla z30.s, p3/M, z6.s, z20.s\n"
+ "ld1w { z20.s }, p2/Z, [x13, x4, LSL #2]\n"
+ "fmla z18.s, p3/M, z4.s, z27.s\n"
+ "fmla z19.s, p3/M, z3.s, z27.s\n"
+ "ld1w { z21.s }, p2/Z, [x16, x4, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z27.s\n"
+ "fmla z16.s, p3/M, z6.s, z27.s\n"
+ "fmla z17.s, p3/M, z5.s, z27.s\n"
+ "fmla z30.s, p3/M, z8.s, z27.s\n"
+ "fmla z28.s, p3/M, z3.s, z21.s\n"
+ "fmla z19.s, p3/M, z5.s, z22.s\n"
+ "fmla z18.s, p3/M, z6.s, z20.s\n"
+ "fmla z16.s, p3/M, z8.s, z22.s\n"
+ "fmla z31.s, p3/M, z0.s, z21.s\n"
+ "ld1w { z9.s }, p2/Z, [x16, x8, LSL #2]\n"
"addvl x16, x16, #1\n"
- "fmla z29.s, p3/M, z4.s, z17.s\n"
- "fmla z31.s, p3/M, z2.s, z17.s\n"
- "fmla z20.s, p3/M, z1.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x5, x8, LSL #2]\n"
- "fmla z21.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z7.s, z20.s\n"
+ "ld1w { z20.s }, p2/Z, [x13, x8, LSL #2]\n"
+ "fmla z23.s, p3/M, z4.s, z21.s\n"
+ "fmla z30.s, p3/M, z1.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x14, x4, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z9.s\n"
+ "fmla z29.s, p3/M, z4.s, z9.s\n"
+ "fmla z18.s, p3/M, z8.s, z20.s\n"
+ "fmla z19.s, p3/M, z7.s, z20.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, x8, LSL #2]\n"
+ "addvl x14, x14, #1\n"
+ "fmla z31.s, p3/M, z2.s, z9.s\n"
+ "fmla z16.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z20.s }, p2/Z, [x5, x7, LSL #2]\n"
"addvl x5, x5, #1\n"
- "fmla z22.s, p3/M, z3.s, z11.s\n"
- "fmla z27.s, p3/M, z2.s, z16.s\n"
+ "fmla z17.s, p3/M, z4.s, z21.s\n"
+ "fmla z30.s, p3/M, z7.s, z21.s\n"
"ld1w { z10.s }, p1/Z, [x5]\n"
- "fmla z23.s, p3/M, z4.s, z19.s\n"
- "fmla z30.s, p3/M, z7.s, z11.s\n"
- "fmla z31.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z1.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x7]\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x7, x13, LSL #2]\n"
- "fmla z20.s, p3/M, z7.s, z19.s\n"
- "addvl x7, x7, #1\n"
- "fmla z22.s, p3/M, z5.s, z19.s\n"
- "fmla z27.s, p3/M, z6.s, z18.s\n"
- "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
- "fmla z21.s, p3/M, z0.s, z18.s\n"
- "fmla z23.s, p3/M, z2.s, z17.s\n"
- "fmla z31.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x8, LSL #2]\n"
- "fmla z30.s, p3/M, z3.s, z18.s\n"
+ "fmla z18.s, p3/M, z3.s, z21.s\n"
+ "fmla z23.s, p3/M, z2.s, z20.s\n"
+ "fmla z19.s, p3/M, z4.s, z12.s\n"
+ "fmla z31.s, p3/M, z6.s, z21.s\n"
+ "ld1w { z11.s }, p2/Z, [x15]\n"
+ "fmla z28.s, p3/M, z1.s, z20.s\n"
+ "fmla z29.s, p3/M, z0.s, z20.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, x17, LSL #2]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z16.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z9.s }, p1/Z, [x15, x7, LSL #2]\n"
+ "fmla z18.s, p3/M, z5.s, z12.s\n"
+ "fmla z23.s, p3/M, z6.s, z11.s\n"
+ "fmla z17.s, p3/M, z0.s, z11.s\n"
+ "fmla z19.s, p3/M, z2.s, z20.s\n"
+ "fmla z31.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z13.s }, p2/Z, [x13, x7, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z11.s\n"
"whilelt p2.s, x21, %x[n_channels]\n"
- "fmla z29.s, p3/M, z8.s, z17.s\n"
- "fmla z20.s, p3/M, z5.s, z17.s\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "fmla z22.s, p3/M, z7.s, z16.s\n"
- "addvl x14, x14, #1\n"
- "cmp x25, %x[n_channels]\n"
- "fmla z23.s, p3/M, z6.s, z16.s\n"
- "fmax z27.s, p3/M, z27.s, z26.s\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "fmin z27.s, p3/M, z27.s, z14.s\n"
- ".inst 0xc1aecb5c // fclamp { z28.s-z31.s }, z26.s, z14.s\n"
- "ld1w { z11.s }, p1/Z, [x5, x13, LSL #2]\n"
- ".inst 0xc1aecb54 // fclamp { z20.s-z23.s }, z26.s, z14.s\n"
- "ld1w { z12.s }, p1/Z, [x14]\n"
- "st1w { z27.s }, p0, [x26]\n"
- "ld1w { z13.s }, p1/Z, [x6, x8, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z20.s\n"
+ "fmla z16.s, p3/M, z5.s, z20.s\n"
+ ".inst 0xa040c0c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "fmax z23.s, p3/M, z23.s, z15.s\n"
+ "addvl x13, x13, #1\n"
+ "cmp x22, %x[n_channels]\n"
+ "ld1w { z11.s }, p1/Z, [x5, x17, LSL #2]\n"
+ "fmla z17.s, p3/M, z8.s, z13.s\n"
+ "fmla z18.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z12.s }, p1/Z, [x13]\n"
+ "fmla z19.s, p3/M, z6.s, z13.s\n"
+ ".inst 0xa040c0c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ ".inst 0xc1aec9fc // fclamp { z28.s-z31.s }, z15.s, z14.s\n"
+ "ld1w { z13.s }, p1/Z, [x16, x7, LSL #2]\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "ld1w { z8.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
+ ".inst 0xc1aec9f0 // fclamp { z16.s-z19.s }, z15.s, z14.s\n"
+ "st1w { z30.s }, p0, [x24]\n"
+ "st1w { z23.s }, p0, [x26]\n"
"st1w { z28.s }, p0, [x26, x27, LSL #2]\n"
- "st1w { z29.s }, p0, [x26, x22, LSL #2]\n"
+ "st1w { z29.s }, p0, [x26, x25, LSL #2]\n"
"addvl x26, x26, #1\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "st1w { z30.s }, p0, [x24]\n"
"st1w { z31.s }, p0, [x24, x27, LSL #2]\n"
- "st1w { z20.s }, p0, [x24, x22, LSL #2]\n"
+ "st1w { z16.s }, p0, [x24, x25, LSL #2]\n"
"addvl x24, x24, #1\n"
- "st1w { z21.s }, p0, [x23]\n"
- "st1w { z22.s }, p0, [x23, x27, LSL #2]\n"
- "st1w { z23.s }, p0, [x23, x22, LSL #2]\n"
+ "st1w { z17.s }, p0, [x23]\n"
+ "st1w { z18.s }, p0, [x23, x27, LSL #2]\n"
+ "st1w { z19.s }, p0, [x23, x25, LSL #2]\n"
"addvl x23, x23, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
- "movprfx z28, z24\n fmla z28.s, p3/M, z7.s, z9.s\n"
- "movprfx z25, z24\n fmla z25.s, p3/M, z8.s, z9.s\n"
+ "movprfx z20, z25\n fmla z20.s, p3/M, z7.s, z9.s\n"
+ "movprfx z24, z25\n fmla z24.s, p3/M, z8.s, z9.s\n"
"ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "add x3, x3, #0x1\n"
- "movprfx z29, z24\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "fmla z28.s, p3/M, z4.s, z13.s\n"
- "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x21, x2, #0x1\n"
- "movprfx z30, z24\n fmla z30.s, p3/M, z5.s, z9.s\n"
- "movprfx z31, z24\n fmla z31.s, p3/M, z4.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x3, x20\n"
- "movprfx z20, z24\n fmla z20.s, p3/M, z3.s, z9.s\n"
- "fmla z25.s, p3/M, z0.s, z10.s\n"
- "ld1w { z27.s }, p2/Z, [x7, x15, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z29.s, p3/M, z2.s, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x7, x4, LSL #2]\n"
- "movprfx z21, z24\n fmla z21.s, p3/M, z2.s, z9.s\n"
- "csel x2, x2, x21, LT\n"
- "fmla z28.s, p3/M, z6.s, z17.s\n"
- "movprfx z23, z24\n fmla z23.s, p3/M, z0.s, z9.s\n"
"mov p0.b, p2.b\n"
+ "movprfx z21, z25\n fmla z21.s, p3/M, z6.s, z9.s\n"
+ "movprfx z22, z25\n fmla z22.s, p3/M, z5.s, z9.s\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z23, z25\n fmla z23.s, p3/M, z4.s, z9.s\n"
+ "movprfx z28, z25\n fmla z28.s, p3/M, z3.s, z9.s\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "movprfx z29, z25\n fmla z29.s, p3/M, z2.s, z9.s\n"
+ "movprfx z31, z25\n fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x3, x3, #0x1\n"
+ "fmla z20.s, p3/M, z4.s, z13.s\n"
+ "fmla z24.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z19.s }, p2/Z, [x15, x8, LSL #2]\n"
+ "add x20, x2, #0x1\n"
+ "fmla z21.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, x4, LSL #2]\n"
+ "fmla z22.s, p3/M, z2.s, z13.s\n"
+ "cmp x3, x22\n"
+ "fmla z23.s, p3/M, z1.s, z13.s\n"
+ "fmla z28.s, p3/M, z0.s, z13.s\n"
+ "csel x2, x2, x20, LT\n"
"csel x3, x3, XZR, LT\n"
- "fmla z25.s, p3/M, z5.s, z13.s\n"
- "fmla z29.s, p3/M, z3.s, z13.s\n"
- "cmp x2, x20\n"
- "fmla z30.s, p3/M, z2.s, z13.s\n"
- "fmla z31.s, p3/M, z1.s, z13.s\n"
- "fmla z20.s, p3/M, z0.s, z13.s\n"
- "ld1w { z19.s }, p2/Z, [x5, x4, LSL #2]\n"
- "fmla z21.s, p3/M, z6.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x13, LSL #2]\n"
- "movprfx z22, z24\n fmla z22.s, p3/M, z1.s, z9.s\n"
- "fmla z28.s, p3/M, z0.s, z19.s\n"
- "fmla z23.s, p3/M, z8.s, z16.s\n"
- "fmla z25.s, p3/M, z7.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x5, x15, LSL #2]\n"
- "fmla z22.s, p3/M, z0.s, z17.s\n"
- "fmla z30.s, p3/M, z4.s, z17.s\n"
- "fmla z31.s, p3/M, z3.s, z17.s\n"
- "fmla z21.s, p3/M, z1.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x6]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z1.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x16]\n"
- "fmla z20.s, p3/M, z4.s, z27.s\n"
- "fmla z25.s, p3/M, z1.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x6, x13, LSL #2]\n"
- "fmla z22.s, p3/M, z2.s, z27.s\n"
- "fmla z23.s, p3/M, z1.s, z27.s\n"
- "fmla z28.s, p3/M, z8.s, z27.s\n"
- "fmla z29.s, p3/M, z7.s, z27.s\n"
- "fmla z31.s, p3/M, z5.s, z27.s\n"
+ "fmla z29.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x13, x17, LSL #2]\n"
+ "movprfx z30, z25\n fmla z30.s, p3/M, z1.s, z9.s\n"
+ "cmp x2, x21\n"
+ "fmla z20.s, p3/M, z6.s, z18.s\n"
+ "fmla z24.s, p3/M, z5.s, z13.s\n"
+ "fmla z21.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z17.s }, p2/Z, [x5, x4, LSL #2]\n"
+ "fmla z22.s, p3/M, z4.s, z18.s\n"
+ "fmla z31.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x5, x8, LSL #2]\n"
+ "fmla z23.s, p3/M, z3.s, z18.s\n"
"fmla z30.s, p3/M, z0.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x16, x8, LSL #2]\n"
+ "fmla z29.s, p3/M, z1.s, z18.s\n"
+ "fmla z20.s, p3/M, z0.s, z17.s\n"
+ "fmla z24.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x16]\n"
+ "fmla z21.s, p3/M, z1.s, z16.s\n"
+ "fmla z28.s, p3/M, z4.s, z19.s\n"
+ "fmla z31.s, p3/M, z1.s, z19.s\n"
+ "fmla z23.s, p3/M, z5.s, z19.s\n"
+ "fmla z30.s, p3/M, z2.s, z19.s\n"
+ "fmla z22.s, p3/M, z0.s, z18.s\n"
"fmla z20.s, p3/M, z2.s, z16.s\n"
- "fmla z21.s, p3/M, z3.s, z17.s\n"
- "fmla z22.s, p3/M, z4.s, z19.s\n"
- "fmla z23.s, p3/M, z3.s, z19.s\n"
- "fmla z25.s, p3/M, z3.s, z18.s\n"
- "fmla z29.s, p3/M, z5.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x16, x13, LSL #2]\n"
- "fmla z30.s, p3/M, z6.s, z17.s\n"
- "fmla z31.s, p3/M, z7.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x4, LSL #2]\n"
- "fmla z20.s, p3/M, z6.s, z19.s\n"
- "fmla z21.s, p3/M, z5.s, z19.s\n"
- "ld1w { z17.s }, p2/Z, [x6, x4, LSL #2]\n"
- "fmla z23.s, p3/M, z5.s, z18.s\n"
+ "fmla z24.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x16, x17, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x14]\n"
+ "fmla z21.s, p3/M, z7.s, z19.s\n"
+ "fmla z28.s, p3/M, z2.s, z17.s\n"
+ "fmla z20.s, p3/M, z8.s, z19.s\n"
+ "fmla z29.s, p3/M, z3.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x14, x7, LSL #2]\n"
+ "fmla z24.s, p3/M, z3.s, z18.s\n"
+ "fmla z21.s, p3/M, z5.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x14, x17, LSL #2]\n"
"fmla z22.s, p3/M, z6.s, z16.s\n"
- "fmla z30.s, p3/M, z8.s, z19.s\n"
- "fmla z20.s, p3/M, z8.s, z18.s\n"
- "ld1w { z18.s }, p2/Z, [x6, x15, LSL #2]\n"
- "fmla z21.s, p3/M, z7.s, z16.s\n"
- "fmla z28.s, p3/M, z3.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x15, LSL #2]\n"
- "fmla z31.s, p3/M, z0.s, z17.s\n"
- "fmla z25.s, p3/M, z4.s, z17.s\n"
- "fmla z22.s, p3/M, z8.s, z16.s\n"
- "fmla z23.s, p3/M, z7.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x16, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z17.s\n"
- "fmla z28.s, p3/M, z5.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x13, x4, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z19.s\n"
+ "fmla z31.s, p3/M, z3.s, z19.s\n"
"ld1w { z17.s }, p2/Z, [x16, x4, LSL #2]\n"
- "fmla z29.s, p3/M, z4.s, z18.s\n"
- "fmla z31.s, p3/M, z2.s, z18.s\n"
- "fmla z20.s, p3/M, z1.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x5, x8, LSL #2]\n"
- "fmla z21.s, p3/M, z4.s, z17.s\n"
- "fmla z22.s, p3/M, z3.s, z17.s\n"
- "fmla z25.s, p3/M, z2.s, z16.s\n"
- "fmla z23.s, p3/M, z4.s, z19.s\n"
- "fmla z30.s, p3/M, z7.s, z17.s\n"
- "fmla z31.s, p3/M, z6.s, z17.s\n"
- "fmla z28.s, p3/M, z1.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x7]\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x7, x13, LSL #2]\n"
- "fmla z20.s, p3/M, z7.s, z19.s\n"
- "fmla z22.s, p3/M, z5.s, z19.s\n"
- "fmla z25.s, p3/M, z6.s, z18.s\n"
- "fmla z21.s, p3/M, z0.s, z18.s\n"
- "fmla z23.s, p3/M, z2.s, z17.s\n"
- "fmla z31.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x8, LSL #2]\n"
- "fmla z30.s, p3/M, z3.s, z18.s\n"
- "fmla z29.s, p3/M, z8.s, z17.s\n"
- "fmla z20.s, p3/M, z5.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "fmla z22.s, p3/M, z7.s, z16.s\n"
- "fmla z23.s, p3/M, z6.s, z16.s\n"
- "fmax z25.s, p3/M, z25.s, z26.s\n"
- "fmin z25.s, p3/M, z25.s, z14.s\n"
- ".inst 0xc1aecb5c // fclamp { z28.s-z31.s }, z26.s, z14.s\n"
- "st1w { z25.s }, p0, [x26]\n"
- ".inst 0xc1aecb54 // fclamp { z20.s-z23.s }, z26.s, z14.s\n"
- "st1w { z28.s }, p0, [x26, x27, LSL #2]\n"
- "st1w { z29.s }, p0, [x26, x22, LSL #2]\n"
- "st1w { z30.s }, p0, [x24]\n"
- "st1w { z31.s }, p0, [x24, x27, LSL #2]\n"
- "st1w { z20.s }, p0, [x24, x22, LSL #2]\n"
- "st1w { z21.s }, p0, [x23]\n"
- "st1w { z22.s }, p0, [x23, x27, LSL #2]\n"
- "st1w { z23.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z23.s, p3/M, z7.s, z19.s\n"
+ "fmla z28.s, p3/M, z6.s, z19.s\n"
+ "fmla z29.s, p3/M, z5.s, z19.s\n"
+ "fmla z22.s, p3/M, z8.s, z19.s\n"
+ "fmla z20.s, p3/M, z3.s, z17.s\n"
+ "fmla z31.s, p3/M, z5.s, z18.s\n"
+ "fmla z30.s, p3/M, z6.s, z16.s\n"
+ "fmla z28.s, p3/M, z8.s, z18.s\n"
+ "fmla z23.s, p3/M, z0.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x16, x8, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x13, x8, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z17.s\n"
+ "fmla z22.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x14, x4, LSL #2]\n"
+ "fmla z20.s, p3/M, z5.s, z18.s\n"
+ "fmla z21.s, p3/M, z4.s, z18.s\n"
+ "fmla z30.s, p3/M, z8.s, z16.s\n"
+ "fmla z31.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x14, x8, LSL #2]\n"
+ "fmla z23.s, p3/M, z2.s, z18.s\n"
+ "fmla z28.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x5, x7, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z17.s\n"
+ "fmla z22.s, p3/M, z7.s, z17.s\n"
+ "fmla z30.s, p3/M, z3.s, z17.s\n"
+ "fmla z24.s, p3/M, z2.s, z16.s\n"
+ "fmla z31.s, p3/M, z4.s, z19.s\n"
+ "fmla z23.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x15]\n"
+ "fmla z20.s, p3/M, z1.s, z16.s\n"
+ "fmla z21.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x15, x17, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z19.s\n"
+ "fmla z30.s, p3/M, z5.s, z19.s\n"
+ "fmla z24.s, p3/M, z6.s, z18.s\n"
+ "fmla z29.s, p3/M, z0.s, z18.s\n"
+ "fmla z31.s, p3/M, z2.s, z17.s\n"
+ "fmla z23.s, p3/M, z8.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x13, x7, LSL #2]\n"
+ "fmla z22.s, p3/M, z3.s, z18.s\n"
+ "fmla z21.s, p3/M, z8.s, z17.s\n"
+ "fmla z28.s, p3/M, z5.s, z17.s\n"
+ "fmax z24.s, p3/M, z24.s, z15.s\n"
+ "fmla z29.s, p3/M, z8.s, z16.s\n"
+ "fmla z30.s, p3/M, z7.s, z16.s\n"
+ "fmla z31.s, p3/M, z6.s, z16.s\n"
+ ".inst 0xc1aec9f4 // fclamp { z20.s-z23.s }, z15.s, z14.s\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ ".inst 0xc1aec9fc // fclamp { z28.s-z31.s }, z15.s, z14.s\n"
+ "st1w { z22.s }, p0, [x24]\n"
+ "st1w { z24.s }, p0, [x26]\n"
+ "st1w { z20.s }, p0, [x26, x27, LSL #2]\n"
+ "st1w { z21.s }, p0, [x26, x25, LSL #2]\n"
+ "st1w { z23.s }, p0, [x24, x27, LSL #2]\n"
+ "st1w { z28.s }, p0, [x24, x25, LSL #2]\n"
+ "st1w { z29.s }, p0, [x23]\n"
+ "st1w { z30.s }, p0, [x23, x27, LSL #2]\n"
+ "st1w { z31.s }, p0, [x23, x25, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 2c868b6cf3..ee896b6ba1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,30 +87,30 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
activation_min, activation_max);
__asm__ __volatile__(
- "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0xd503477f // SMSTART ZA\n"
- "add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x15, #0x0\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z20.s }, p3/Z, [x8]\n"
- "addvl x8, x8, #1\n"
- "ldp x24, x23, [x17, #0x0]\n"
- "ldp x22, x21, [x17, #0x10]\n"
- "cntw x16\n"
- ".inst 0xa040c100 // ld1w { z0.s-z3.s }, pn8.b/Z, [x8]\n"
- "addvl x8, x8, #4\n"
- "ldr x20, [x17, #0x20]\n"
- "mov x15, #0x0\n"
+ "ldp x24, x23, [x16, #0x0]\n"
+ "ldp x22, x21, [x16, #0x10]\n"
+ "cntw x14\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- ".inst 0xa040c104 // ld1w { z4.s-z7.s }, pn8.b/Z, [x8]\n"
- "addvl x8, x8, #4\n"
- "cmp x16, %x[n_channels]\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rw { z22.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1w { z30.s }, p3/Z, [x17]\n"
+ "addvl x17, x17, #1\n"
+ "ldr x20, [x16, #0x20]\n"
+ "cmp x14, %x[n_channels]\n"
+ ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+ "addvl x17, x17, #4\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "sub x12, XZR, x14\n"
+ ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+ "addvl x17, x17, #4\n"
"ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x13, XZR, x16\n"
- "ld1w { z8.s }, p3/Z, [x8]\n"
- "addvl x8, x8, #1\n"
+ "ld1w { z8.s }, p3/Z, [x17]\n"
+ "addvl x17, x17, #1\n"
"ld1w { z9.s }, p2/Z, [x24, x15, LSL #2]\n"
"ld1w { z10.s }, p2/Z, [x23, x15, LSL #2]\n"
"ld1w { z11.s }, p2/Z, [x22, x15, LSL #2]\n"
@@ -118,323 +118,323 @@ void sme2_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ld1w { z13.s }, p2/Z, [x20, x15, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z21, z20\n fmla z21.s, p3/M, z8.s, z9.s\n"
- "movprfx z24, z20\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "ldr x22, [x17, #0x30]\n"
- "incw x13\n"
- "movprfx z25, z20\n fmla z25.s, p3/M, z6.s, z9.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "ldr x25, [x17, #0x38]\n"
+ "movprfx z31, z30\n fmla z31.s, p3/M, z8.s, z9.s\n"
+ "movprfx z24, z30\n fmla z24.s, p3/M, z7.s, z9.s\n"
+ "ldr x23, [x16, #0x30]\n"
+ "incw x12\n"
+ "movprfx z25, z30\n fmla z25.s, p3/M, z6.s, z9.s\n"
+ "movprfx z26, z30\n fmla z26.s, p3/M, z5.s, z9.s\n"
+ "ldr x27, [x16, #0x38]\n"
"mov p1.b, p2.b\n"
+ "movprfx z27, z30\n fmla z27.s, p3/M, z4.s, z9.s\n"
+ "movprfx z20, z30\n fmla z20.s, p3/M, z3.s, z9.s\n"
+ "ldr x22, [x16, #0x28]\n"
+ "whilelt p0.s, x14, %x[n_channels]\n"
+ "movprfx z21, z30\n fmla z21.s, p3/M, z2.s, z9.s\n"
+ "movprfx z23, z30\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
"fmla z24.s, p3/M, z4.s, z13.s\n"
- "movprfx z26, z20\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "ldr x21, [x17, #0x28]\n"
- "whilelt p0.s, x16, %x[n_channels]\n"
- "movprfx z27, z20\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "movprfx z28, z20\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "ldr x20, [x17, #0x48]\n"
- "ld1w { z19.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0x40]\n"
"fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z23.s }, p2/Z, [x22, x15, LSL #2]\n"
- "movprfx z29, z20\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "ldr x20, [x17, #0x40]\n"
- "fmla z21.s, p3/M, z5.s, z13.s\n"
- "fmla z24.s, p3/M, z6.s, z23.s\n"
- "ldr x24, [x17, #0x50]\n"
- "movprfx z31, z20\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "fmla z25.s, p3/M, z3.s, z13.s\n"
- "ldr x23, [x17, #0x58]\n"
+ "ld1w { z17.s }, p2/Z, [x23, x15, LSL #2]\n"
"fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ldr x26, [x16, #0x50]\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "ldr x22, [x17, #0x60]\n"
- "fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z6.s, z12.s\n"
- "ldr x12, [x17, #0x70]\n"
- "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "movprfx z30, z20\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "fmla z21.s, p3/M, z7.s, z23.s\n"
- "ldr x21, [x17, #0x68]\n"
- "fmla z24.s, p3/M, z0.s, z17.s\n"
- "fmla z31.s, p3/M, z8.s, z16.s\n"
+ "fmla z20.s, p3/M, z0.s, z13.s\n"
+ "ld1w { z19.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x25, [x16, #0x58]\n"
+ "fmla z21.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "movprfx z22, z30\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "ldr x24, [x16, #0x60]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "fmla z24.s, p3/M, z6.s, z17.s\n"
+ "ldr x23, [x16, #0x68]\n"
+ "ld1w { z30.s }, p3/Z, [x17]\n"
+ "fmla z25.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z18.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z17.s\n"
+ "ldr x22, [x16, #0x70]\n"
+ "fmla z23.s, p3/M, z8.s, z12.s\n"
"ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
- "ldr x27, [x17, #0x78]\n"
- "fmla z26.s, p3/M, z4.s, z23.s\n"
- "fmla z27.s, p3/M, z3.s, z23.s\n"
- "ldr x20, [x17, #0x80]\n"
- "ld1w { z20.s }, p3/Z, [x8]\n"
- "fmla z30.s, p3/M, z0.s, z23.s\n"
- "fmla z28.s, p3/M, z4.s, z19.s\n"
- "ldr x11, [x17, #0x88]\n"
- "addvl x8, x8, #1\n"
- "fmla z29.s, p3/M, z1.s, z23.s\n"
+ "fmla z27.s, p3/M, z3.s, z17.s\n"
+ "ldr x21, [x16, #0x78]\n"
+ "fmla z22.s, p3/M, z0.s, z17.s\n"
+ "fmla z20.s, p3/M, z4.s, z19.s\n"
+ "ldr x20, [x16, #0x80]\n"
+ "addvl x17, x17, #1\n"
+ "fmla z31.s, p3/M, z7.s, z17.s\n"
+ "fmla z24.s, p3/M, z0.s, z18.s\n"
+ "ldr x11, [x16, #0x88]\n"
"fmla z21.s, p3/M, z1.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x24, x15, LSL #2]\n"
- "ldr x26, [x17, #0x90]\n"
- "fmla z24.s, p3/M, z2.s, z16.s\n"
"fmla z25.s, p3/M, z1.s, z16.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x15, LSL #2]\n"
- "ldr x25, [x17, #0x98]\n"
- "ld1w { z17.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "ldr x10, [x16, #0x90]\n"
"fmla z27.s, p3/M, z5.s, z19.s\n"
- "fmla z30.s, p3/M, z2.s, z19.s\n"
- "ldr x24, [x17, #0xa0]\n"
- "fmla z26.s, p3/M, z0.s, z18.s\n"
- "fmla z28.s, p3/M, z2.s, z11.s\n"
- "ldr x10, [x14, #0x0]\n"
- "fmla z24.s, p3/M, z8.s, z19.s\n"
+ "fmla z23.s, p3/M, z1.s, z19.s\n"
+ "ldr x9, [x13, #0x0]\n"
+ "fmla z22.s, p3/M, z2.s, z19.s\n"
+ "ldr x28, [x13, #0x8]\n"
+ "fmla z31.s, p3/M, z1.s, z18.s\n"
+ "fmla z24.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z9.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ldr x27, [x16, #0x98]\n"
+ "ld1w { z16.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z0.s, z17.s\n"
"fmla z25.s, p3/M, z7.s, z19.s\n"
- "ldr x9, [x14, #0x8]\n"
- "fmla z31.s, p3/M, z1.s, z19.s\n"
- "fmla z29.s, p3/M, z3.s, z17.s\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "ldr x26, [x13, #0x10]\n"
+ "fmla z20.s, p3/M, z2.s, z9.s\n"
+ "ldr x25, [x13, #0x18]\n"
+ "fmla z24.s, p3/M, z8.s, z19.s\n"
+ "fmla z21.s, p3/M, z3.s, z16.s\n"
+ "ld1w { z29.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z26.s, p3/M, z6.s, z16.s\n"
+ "fmla z31.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldr x22, [x16, #0xb0]\n"
+ "fmla z25.s, p3/M, z5.s, z9.s\n"
"ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "ldr x23, [x17, #0xa8]\n"
- "fmla z26.s, p3/M, z6.s, z17.s\n"
- "fmla z27.s, p3/M, z7.s, z16.s\n"
- "ld1w { z23.s }, p2/Z, [x20, x15, LSL #2]\n"
- "ldr x22, [x17, #0xc0]\n"
- "fmla z28.s, p3/M, z6.s, z16.s\n"
- "fmla z30.s, p3/M, z4.s, z16.s\n"
- "ldr x28, [x14, #0x10]\n"
- "fmla z21.s, p3/M, z3.s, z18.s\n"
- "fmla z25.s, p3/M, z5.s, z11.s\n"
- "ld1w { z15.s }, p2/Z, [x12, x15, LSL #2]\n"
- "ldr x21, [x17, #0xb0]\n"
- "fmla z29.s, p3/M, z5.s, z16.s\n"
- "fmla z31.s, p3/M, z3.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x27, x15, LSL #2]\n"
- "ldr x20, [x17, #0xb8]\n"
- "fmla z26.s, p3/M, z8.s, z16.s\n"
- "fmla z28.s, p3/M, z8.s, z15.s\n"
- "ldr x27, [x14, #0x18]\n"
- "fmla z30.s, p3/M, z6.s, z19.s\n"
- "fmla z24.s, p3/M, z3.s, z23.s\n"
- "fmla z27.s, p3/M, z0.s, z23.s\n"
- "fmla z31.s, p3/M, z5.s, z15.s\n"
- "ld1w { z17.s }, p2/Z, [x11, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z7.s, z19.s\n"
- "ld1w { z19.s }, p2/Z, [x26, x15, LSL #2]\n"
- "fmla z21.s, p3/M, z4.s, z23.s\n"
- "fmla z26.s, p3/M, z1.s, z23.s\n"
- "fmla z24.s, p3/M, z5.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z25.s, p3/M, z4.s, z17.s\n"
- "fmla z27.s, p3/M, z2.s, z17.s\n"
- "fmla z28.s, p3/M, z1.s, z17.s\n"
- "fmla z30.s, p3/M, z8.s, z19.s\n"
- "ld1w { z17.s }, p2/Z, [x24, x15, LSL #2]\n"
- "ldr x26, [x17, #0x20]\n"
- "fmla z21.s, p3/M, z2.s, z17.s\n"
- "fmla z26.s, p3/M, z7.s, z16.s\n"
- "fmla z27.s, p3/M, z6.s, z16.s\n"
- "fmla z29.s, p3/M, z4.s, z16.s\n"
- "fmla z30.s, p3/M, z3.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x21, x15, LSL #2]\n"
- "fmla z31.s, p3/M, z7.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z21.s, p3/M, z6.s, z18.s\n"
- "fmla z31.s, p3/M, z4.s, z16.s\n"
- "fmla z24.s, p3/M, z1.s, z17.s\n"
- "fmla z25.s, p3/M, z0.s, z17.s\n"
+ "ldr x21, [x16, #0xb8]\n"
+ "fmla z27.s, p3/M, z7.s, z29.s\n"
+ "fmla z20.s, p3/M, z6.s, z29.s\n"
"ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
- "fmax z21.s, p3/M, z21.s, z22.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "fmla z30.s, p3/M, z5.s, z16.s\n"
- "fmla z29.s, p3/M, z0.s, z18.s\n"
- "fmla z31.s, p3/M, z2.s, z17.s\n"
+ "ldr x20, [x16, #0xc0]\n"
+ "fmla z22.s, p3/M, z4.s, z29.s\n"
+ "fmla z21.s, p3/M, z5.s, z29.s\n"
+ "fmla z23.s, p3/M, z3.s, z29.s\n"
+ "fmla z26.s, p3/M, z8.s, z29.s\n"
+ "fmla z24.s, p3/M, z3.s, z17.s\n"
+ "fmla z31.s, p3/M, z4.s, z17.s\n"
+ "fmla z20.s, p3/M, z8.s, z18.s\n"
+ "fmla z27.s, p3/M, z0.s, z17.s\n"
+ "fmla z22.s, p3/M, z6.s, z16.s\n"
+ "fmla z21.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z13.s }, p2/Z, [x10, x15, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x11, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z5.s, z16.s\n"
+ "fmla z25.s, p3/M, z4.s, z16.s\n"
+ "fmla z27.s, p3/M, z2.s, z16.s\n"
+ "fmla z20.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z28.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "ldr x24, [x16, #0x20]\n"
+ "fmla z22.s, p3/M, z8.s, z13.s\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "fmla z21.s, p3/M, z4.s, z17.s\n"
+ "fmla z23.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z31.s, p3/M, z2.s, z28.s\n"
+ "fmla z24.s, p3/M, z1.s, z28.s\n"
+ "fmla z27.s, p3/M, z6.s, z17.s\n"
+ "fmla z25.s, p3/M, z0.s, z28.s\n"
+ "ld1w { z18.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z22.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z20.s, p3/M, z7.s, z16.s\n"
+ "fmla z23.s, p3/M, z4.s, z16.s\n"
+ "fmla z31.s, p3/M, z6.s, z17.s\n"
+ "fmla z21.s, p3/M, z0.s, z17.s\n"
+ "fmla z22.s, p3/M, z5.s, z16.s\n"
"fmla z27.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
- "ldp x22, x21, [x17, #0x0]\n"
- "fmla z26.s, p3/M, z3.s, z18.s\n"
- "fmla z25.s, p3/M, z8.s, z17.s\n"
- "ldp x25, x24, [x17, #0x10]\n"
+ "ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldp x23, x22, [x16, #0x0]\n"
+ "fmla z23.s, p3/M, z2.s, z18.s\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
+ "ldp x21, x20, [x16, #0x10]\n"
"incw x15\n"
- "fmin z21.s, p3/M, z21.s, z14.s\n"
- "st1w { z21.s }, p1, [x10, x13, LSL #2]\n"
- "ldr x20, [x14, #0x20]\n"
- "fmla z28.s, p3/M, z5.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z16.s\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "ld1w { z9.s }, p0/Z, [x22, x16, LSL #2]\n"
+ "fmla z25.s, p3/M, z8.s, z18.s\n"
+ "fmla z20.s, p3/M, z5.s, z18.s\n"
+ ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+ "addvl x17, x17, #4\n"
+ "fmax z31.s, p3/M, z31.s, z15.s\n"
+ "fmla z21.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z9.s }, p0/Z, [x23, x14, LSL #2]\n"
"whilelt p2.s, x15, %x[n_channels]\n"
- "fmla z31.s, p3/M, z6.s, z16.s\n"
- ".inst 0xc1aecad8 // fclamp { z24.s-z27.s }, z22.s, z14.s\n"
- "st1w { z24.s }, p1, [x9, x13, LSL #2]\n"
- "ldr x23, [x14, #0x28]\n"
- "st1w { z25.s }, p1, [x28, x13, LSL #2]\n"
- "ldr x22, [x14, #0x30]\n"
- "ld1w { z10.s }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0xc1aecadc // fclamp { z28.s-z31.s }, z22.s, z14.s\n"
- "st1w { z26.s }, p1, [x27, x13, LSL #2]\n"
- "ldr x21, [x14, #0x38]\n"
- "ld1w { z11.s }, p0/Z, [x25, x16, LSL #2]\n"
- "st1w { z27.s }, p1, [x20, x13, LSL #2]\n"
- "ldr x20, [x14, #0x40]\n"
- "ld1w { z12.s }, p0/Z, [x24, x16, LSL #2]\n"
- "ld1w { z13.s }, p0/Z, [x26, x16, LSL #2]\n"
- "incw x16\n"
- "cmp x16, %x[n_channels]\n"
- "st1w { z28.s }, p1, [x23, x13, LSL #2]\n"
- ".inst 0xa040c100 // ld1w { z0.s-z3.s }, pn8.b/Z, [x8]\n"
- "addvl x8, x8, #4\n"
- "st1w { z29.s }, p1, [x22, x13, LSL #2]\n"
- ".inst 0xa040c104 // ld1w { z4.s-z7.s }, pn8.b/Z, [x8]\n"
- "addvl x8, x8, #4\n"
- "st1w { z30.s }, p1, [x21, x13, LSL #2]\n"
- "st1w { z31.s }, p1, [x20, x13, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x8]\n"
- "addvl x8, x8, #1\n"
+ "fmla z22.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z10.s }, p0/Z, [x22, x14, LSL #2]\n"
+ "fmla z23.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z11.s }, p0/Z, [x21, x14, LSL #2]\n"
+ ".inst 0xc1aec9f8 // fclamp { z24.s-z27.s }, z15.s, z14.s\n"
+ "ld1w { z12.s }, p0/Z, [x20, x14, LSL #2]\n"
+ "fmin z31.s, p3/M, z31.s, z14.s\n"
+ "ld1w { z13.s }, p0/Z, [x24, x14, LSL #2]\n"
+ "incw x14\n"
+ ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
+ "addvl x17, x17, #4\n"
+ "cmp x14, %x[n_channels]\n"
+ ".inst 0xc1aec9f4 // fclamp { z20.s-z23.s }, z15.s, z14.s\n"
+ "ld1w { z8.s }, p3/Z, [x17]\n"
+ "addvl x17, x17, #1\n"
+ "st1w { z24.s }, p1, [x28, x12, LSL #2]\n"
+ "ldr x23, [x13, #0x28]\n"
+ "st1w { z31.s }, p1, [x9, x12, LSL #2]\n"
+ "ldr x20, [x13, #0x20]\n"
+ "st1w { z25.s }, p1, [x26, x12, LSL #2]\n"
+ "ldr x22, [x13, #0x30]\n"
+ "st1w { z26.s }, p1, [x25, x12, LSL #2]\n"
+ "ldr x21, [x13, #0x38]\n"
+ "st1w { z27.s }, p1, [x20, x12, LSL #2]\n"
+ "ldr x20, [x13, #0x40]\n"
+ "st1w { z20.s }, p1, [x23, x12, LSL #2]\n"
+ "st1w { z21.s }, p1, [x22, x12, LSL #2]\n"
+ "st1w { z22.s }, p1, [x21, x12, LSL #2]\n"
+ "st1w { z23.s }, p1, [x20, x12, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z21, z20\n fmla z21.s, p3/M, z8.s, z9.s\n"
- "movprfx z24, z20\n fmla z24.s, p3/M, z7.s, z9.s\n"
- "ldr x23, [x17, #0x30]\n"
- "incw x13\n"
- "movprfx z25, z20\n fmla z25.s, p3/M, z6.s, z9.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "ldr x22, [x17, #0x38]\n"
+ "movprfx z20, z30\n fmla z20.s, p3/M, z8.s, z9.s\n"
+ "movprfx z24, z30\n fmla z24.s, p3/M, z7.s, z9.s\n"
+ "ldr x23, [x16, #0x30]\n"
+ "incw x12\n"
+ "movprfx z25, z30\n fmla z25.s, p3/M, z6.s, z9.s\n"
+ "movprfx z26, z30\n fmla z26.s, p3/M, z5.s, z9.s\n"
+ "ldr x27, [x16, #0x38]\n"
"mov p0.b, p2.b\n"
+ "movprfx z27, z30\n fmla z27.s, p3/M, z4.s, z9.s\n"
+ "movprfx z28, z30\n fmla z28.s, p3/M, z3.s, z9.s\n"
+ "ldr x22, [x16, #0x28]\n"
+ "movprfx z29, z30\n fmla z29.s, p3/M, z2.s, z9.s\n"
+ "movprfx z31, z30\n fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ldr x21, [x16, #0x48]\n"
+ "fmla z20.s, p3/M, z0.s, z10.s\n"
"fmla z24.s, p3/M, z4.s, z13.s\n"
- "movprfx z26, z20\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "ldr x21, [x17, #0x28]\n"
- "movprfx z27, z20\n fmla z27.s, p3/M, z4.s, z9.s\n"
- "movprfx z28, z20\n fmla z28.s, p3/M, z3.s, z9.s\n"
- "ldr x20, [x17, #0x48]\n"
- "ld1w { z19.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0x40]\n"
"fmla z25.s, p3/M, z2.s, z11.s\n"
- "ld1w { z18.s }, p2/Z, [x23, x15, LSL #2]\n"
- "movprfx z29, z20\n fmla z29.s, p3/M, z2.s, z9.s\n"
- "ldr x20, [x17, #0x40]\n"
- "fmla z21.s, p3/M, z5.s, z13.s\n"
- "fmla z24.s, p3/M, z6.s, z18.s\n"
- "ldr x25, [x17, #0x50]\n"
- "movprfx z31, z20\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "fmla z25.s, p3/M, z3.s, z13.s\n"
- "ldr x24, [x17, #0x58]\n"
+ "ld1w { z19.s }, p2/Z, [x23, x15, LSL #2]\n"
"fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ldr x26, [x16, #0x50]\n"
"fmla z27.s, p3/M, z1.s, z13.s\n"
- "ldr x23, [x17, #0x60]\n"
"fmla z28.s, p3/M, z0.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x25, [x16, #0x58]\n"
"fmla z29.s, p3/M, z6.s, z12.s\n"
- "ldr x12, [x17, #0x70]\n"
- "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "movprfx z30, z20\n fmla z30.s, p3/M, z1.s, z9.s\n"
- "fmla z21.s, p3/M, z7.s, z18.s\n"
- "ldr x22, [x17, #0x68]\n"
- "fmla z24.s, p3/M, z0.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ldr x24, [x16, #0x60]\n"
+ "fmla z20.s, p3/M, z5.s, z13.s\n"
+ "fmla z24.s, p3/M, z6.s, z19.s\n"
+ "ldr x23, [x16, #0x68]\n"
+ "fmla z25.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z19.s\n"
+ "ldr x22, [x16, #0x70]\n"
"fmla z31.s, p3/M, z8.s, z16.s\n"
"ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
- "ldr x21, [x17, #0x78]\n"
- "fmla z26.s, p3/M, z4.s, z18.s\n"
- "fmla z27.s, p3/M, z3.s, z18.s\n"
- "ldr x20, [x17, #0x80]\n"
- "fmla z30.s, p3/M, z0.s, z18.s\n"
- "fmla z28.s, p3/M, z4.s, z19.s\n"
- "ldr x11, [x17, #0x88]\n"
- "fmla z29.s, p3/M, z1.s, z18.s\n"
- "fmla z21.s, p3/M, z1.s, z17.s\n"
- "ld1w { z20.s }, p2/Z, [x25, x15, LSL #2]\n"
- "ldr x10, [x17, #0x90]\n"
- "fmla z24.s, p3/M, z2.s, z16.s\n"
+ "fmla z27.s, p3/M, z3.s, z19.s\n"
+ "ldr x21, [x16, #0x78]\n"
+ "fmla z30.s, p3/M, z0.s, z19.s\n"
+ "fmla z28.s, p3/M, z4.s, z18.s\n"
+ "ldr x20, [x16, #0x80]\n"
+ "fmla z20.s, p3/M, z7.s, z19.s\n"
+ "fmla z24.s, p3/M, z0.s, z17.s\n"
+ "ldr x11, [x16, #0x88]\n"
+ "fmla z29.s, p3/M, z1.s, z19.s\n"
"fmla z25.s, p3/M, z1.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x24, x15, LSL #2]\n"
- "ldr x9, [x17, #0x98]\n"
- "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z5.s, z19.s\n"
- "fmla z30.s, p3/M, z2.s, z19.s\n"
- "ldr x28, [x17, #0xa0]\n"
- "fmla z26.s, p3/M, z0.s, z20.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "ldr x10, [x16, #0x90]\n"
+ "fmla z27.s, p3/M, z5.s, z18.s\n"
+ "fmla z31.s, p3/M, z1.s, z18.s\n"
+ "ldr x9, [x13, #0x0]\n"
+ "fmla z30.s, p3/M, z2.s, z18.s\n"
+ "ldr x28, [x13, #0x8]\n"
+ "fmla z20.s, p3/M, z1.s, z17.s\n"
+ "fmla z24.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ldr x27, [x16, #0x98]\n"
+ "ld1w { z16.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z0.s, z19.s\n"
+ "fmla z25.s, p3/M, z7.s, z18.s\n"
+ "ldr x26, [x16, #0xa0]\n"
+ "ldr x25, [x13, #0x10]\n"
"fmla z28.s, p3/M, z2.s, z17.s\n"
- "ldr x27, [x14, #0x0]\n"
- "fmla z24.s, p3/M, z8.s, z19.s\n"
- "fmla z25.s, p3/M, z7.s, z19.s\n"
- "ldr x26, [x14, #0x8]\n"
- "fmla z31.s, p3/M, z1.s, z19.s\n"
+ "ldr x24, [x13, #0x18]\n"
+ "fmla z24.s, p3/M, z8.s, z18.s\n"
"fmla z29.s, p3/M, z3.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x22, x15, LSL #2]\n"
- "ldr x25, [x17, #0xa8]\n"
+ "ld1w { z18.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "ldr x23, [x16, #0xa8]\n"
"fmla z26.s, p3/M, z6.s, z16.s\n"
- "fmla z27.s, p3/M, z7.s, z19.s\n"
- "ld1w { z18.s }, p2/Z, [x20, x15, LSL #2]\n"
- "ldr x23, [x17, #0xc0]\n"
- "fmla z28.s, p3/M, z6.s, z19.s\n"
- "fmla z30.s, p3/M, z4.s, z19.s\n"
- "ldr x24, [x14, #0x10]\n"
- "fmla z21.s, p3/M, z3.s, z20.s\n"
+ "fmla z20.s, p3/M, z3.s, z19.s\n"
+ "ld1w { z19.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldr x22, [x16, #0xb0]\n"
"fmla z25.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x12, x15, LSL #2]\n"
- "ldr x22, [x17, #0xb0]\n"
- "fmla z29.s, p3/M, z5.s, z19.s\n"
- "fmla z31.s, p3/M, z3.s, z19.s\n"
"ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "ldr x20, [x17, #0xb8]\n"
- "fmla z26.s, p3/M, z8.s, z19.s\n"
- "fmla z28.s, p3/M, z8.s, z17.s\n"
- "ldr x21, [x14, #0x18]\n"
+ "ldr x21, [x16, #0xb8]\n"
+ "fmla z27.s, p3/M, z7.s, z18.s\n"
+ "fmla z28.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0xc0]\n"
+ "fmla z30.s, p3/M, z4.s, z18.s\n"
+ "fmla z29.s, p3/M, z5.s, z18.s\n"
+ "fmla z31.s, p3/M, z3.s, z18.s\n"
+ "fmla z26.s, p3/M, z8.s, z18.s\n"
+ "fmla z24.s, p3/M, z3.s, z17.s\n"
+ "fmla z20.s, p3/M, z4.s, z17.s\n"
+ "fmla z28.s, p3/M, z8.s, z19.s\n"
+ "fmla z27.s, p3/M, z0.s, z17.s\n"
"fmla z30.s, p3/M, z6.s, z16.s\n"
- "fmla z24.s, p3/M, z3.s, z18.s\n"
- "fmla z27.s, p3/M, z0.s, z18.s\n"
- "fmla z31.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x11, x15, LSL #2]\n"
"fmla z29.s, p3/M, z7.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x10, x15, LSL #2]\n"
- "fmla z21.s, p3/M, z4.s, z18.s\n"
- "fmla z26.s, p3/M, z1.s, z18.s\n"
- "fmla z24.s, p3/M, z5.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x9, x15, LSL #2]\n"
- "fmla z25.s, p3/M, z4.s, z17.s\n"
- "fmla z27.s, p3/M, z2.s, z17.s\n"
- "fmla z28.s, p3/M, z1.s, z17.s\n"
- "fmla z30.s, p3/M, z8.s, z19.s\n"
- "ld1w { z17.s }, p2/Z, [x28, x15, LSL #2]\n"
- "fmla z21.s, p3/M, z2.s, z17.s\n"
- "fmla z26.s, p3/M, z7.s, z16.s\n"
- "fmla z27.s, p3/M, z6.s, z16.s\n"
- "fmla z29.s, p3/M, z4.s, z16.s\n"
- "fmla z30.s, p3/M, z3.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x22, x15, LSL #2]\n"
- "fmla z31.s, p3/M, z7.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z21.s, p3/M, z6.s, z18.s\n"
- "fmla z31.s, p3/M, z4.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x10, x15, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x11, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z5.s, z16.s\n"
+ "fmla z25.s, p3/M, z4.s, z16.s\n"
+ "fmla z27.s, p3/M, z2.s, z16.s\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z18.s\n"
+ "fmla z26.s, p3/M, z7.s, z19.s\n"
+ "fmla z29.s, p3/M, z4.s, z19.s\n"
+ "fmla z31.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z20.s, p3/M, z2.s, z17.s\n"
"fmla z24.s, p3/M, z1.s, z17.s\n"
+ "fmla z27.s, p3/M, z6.s, z19.s\n"
"fmla z25.s, p3/M, z0.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
- "fmax z21.s, p3/M, z21.s, z22.s\n"
+ "ld1w { z18.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z19.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, x15, LSL #2]\n"
"fmla z28.s, p3/M, z7.s, z16.s\n"
+ "fmla z31.s, p3/M, z4.s, z16.s\n"
+ "fmla z20.s, p3/M, z6.s, z17.s\n"
+ "fmla z29.s, p3/M, z0.s, z17.s\n"
"fmla z30.s, p3/M, z5.s, z16.s\n"
- "fmla z29.s, p3/M, z0.s, z18.s\n"
- "fmla z31.s, p3/M, z2.s, z17.s\n"
"fmla z27.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z26.s, p3/M, z3.s, z18.s\n"
- "fmla z25.s, p3/M, z8.s, z17.s\n"
- "fmin z21.s, p3/M, z21.s, z14.s\n"
- "st1w { z21.s }, p0, [x27, x13, LSL #2]\n"
- "ldr x20, [x14, #0x20]\n"
- "fmla z28.s, p3/M, z5.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z31.s, p3/M, z2.s, z18.s\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
+ "fmla z25.s, p3/M, z8.s, z18.s\n"
+ "fmla z28.s, p3/M, z5.s, z18.s\n"
+ "fmax z20.s, p3/M, z20.s, z15.s\n"
"fmla z29.s, p3/M, z8.s, z16.s\n"
"fmla z30.s, p3/M, z7.s, z16.s\n"
"fmla z31.s, p3/M, z6.s, z16.s\n"
- ".inst 0xc1aecad8 // fclamp { z24.s-z27.s }, z22.s, z14.s\n"
- "st1w { z24.s }, p0, [x26, x13, LSL #2]\n"
- "ldr x23, [x14, #0x28]\n"
- "st1w { z25.s }, p0, [x24, x13, LSL #2]\n"
- "ldr x22, [x14, #0x30]\n"
- ".inst 0xc1aecadc // fclamp { z28.s-z31.s }, z22.s, z14.s\n"
- "st1w { z26.s }, p0, [x21, x13, LSL #2]\n"
- "ldr x21, [x14, #0x38]\n"
- "st1w { z27.s }, p0, [x20, x13, LSL #2]\n"
- "ldr x20, [x14, #0x40]\n"
- "st1w { z28.s }, p0, [x23, x13, LSL #2]\n"
- "st1w { z29.s }, p0, [x22, x13, LSL #2]\n"
- "st1w { z30.s }, p0, [x21, x13, LSL #2]\n"
- "st1w { z31.s }, p0, [x20, x13, LSL #2]\n"
+ ".inst 0xc1aec9f8 // fclamp { z24.s-z27.s }, z15.s, z14.s\n"
+ "fmin z20.s, p3/M, z20.s, z14.s\n"
+ ".inst 0xc1aec9fc // fclamp { z28.s-z31.s }, z15.s, z14.s\n"
+ "st1w { z24.s }, p0, [x28, x12, LSL #2]\n"
+ "ldr x23, [x13, #0x28]\n"
+ "st1w { z20.s }, p0, [x9, x12, LSL #2]\n"
+ "ldr x20, [x13, #0x20]\n"
+ "st1w { z25.s }, p0, [x25, x12, LSL #2]\n"
+ "ldr x22, [x13, #0x30]\n"
+ "st1w { z26.s }, p0, [x24, x12, LSL #2]\n"
+ "ldr x21, [x13, #0x38]\n"
+ "st1w { z27.s }, p0, [x20, x12, LSL #2]\n"
+ "ldr x20, [x13, #0x40]\n"
+ "st1w { z28.s }, p0, [x23, x12, LSL #2]\n"
+ "st1w { z29.s }, p0, [x22, x12, LSL #2]\n"
+ "st1w { z30.s }, p0, [x21, x12, LSL #2]\n"
+ "st1w { z31.s }, p0, [x20, x12, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index efd37c38ec..cf4a0d5b9b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,98 +88,98 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ptrue p3.b\n"
- ".inst 0x25207810 // ptrue pn8.b\n"
"mov x2, #0x0\n"
"mov x3, #0x0\n"
+ "ptrue p3.b\n"
+ ".inst 0x25207810 // ptrue pn8.b\n"
"1:" // Tile loop
"str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x22, #0x4\n"
"str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
+ "ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
+ "add x7, x4, x4\n"
"mul x20, x20, x22\n" // offset *= kernel_stride * output_size
- "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "add x8, x7, x4\n"
"add x5, x5, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x6, x5, x21, LSL #2\n"
- "add x7, x6, x21, LSL #2\n"
- "add x8, x4, x4\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, x7, x21, LSL #2\n"
- "add x15, x8, x4\n"
+ "add x17, x8, x4\n"
+ "add x16, x5, x21, LSL #2\n"
+ "add x15, x17, x4\n"
"add x14, x16, x21, LSL #2\n"
- "add x13, x15, x4\n"
- "add x12, x14, x21, LSL #2\n"
- "add x11, x13, x4\n"
+ "add x13, x14, x21, LSL #2\n"
+ "add x12, x13, x21, LSL #2\n"
+ "add x11, x12, x21, LSL #2\n"
"cbnz x3, 2f\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x3\n"
- "sub x21, x21, #0x1\n"
"lsl x10, %x[n_channels], #0x2\n"
- "mov x20, #0x10\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x4\n"
- "orr x10, x10, x21, LSL #22\n"
- "orr x10, x10, x20, LSL #38\n"
- "add x9, x7, x8, LSL #2\n"
- "add x28, x5, x11, LSL #2\n"
- "add x27, x7, x15, LSL #2\n"
- "add x26, x12, x11, LSL #2\n"
- "add x25, x16, x8, LSL #2\n"
+ "mov x21, #0x10\n"
+ "mul x21, x21, x4\n"
+ "add x9, x14, x7, LSL #2\n"
+ "add x28, x5, x15, LSL #2\n"
+ "add x27, x14, x8, LSL #2\n"
+ "sub x20, x20, x3\n"
+ "add x26, x11, x15, LSL #2\n"
+ "sub x20, x20, #0x1\n"
+ "add x25, x13, x7, LSL #2\n"
+ "and x20, x20, #0x3fffff\n"
"add x24, x5, x4, LSL #2\n"
- "add x23, x5, x13, LSL #2\n"
- "add x22, x16, x15, LSL #2\n"
- "add x21, x6, x11, LSL #2\n"
- "add x20, x6, x8, LSL #2\n"
+ "orr x10, x10, x20, LSL #22\n"
+ "add x23, x5, x17, LSL #2\n"
+ "orr x10, x10, x21, LSL #38\n"
+ "add x22, x13, x8, LSL #2\n"
+ "add x21, x16, x15, LSL #2\n"
+ "add x20, x16, x7, LSL #2\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- "add x9, x14, x11, LSL #2\n"
+ "add x9, x12, x15, LSL #2\n"
".inst 0xf8aa48ba // rprfm pldonce, x10, [x5]\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- "add x28, x6, x15, LSL #2\n"
+ "add x28, x16, x8, LSL #2\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x12, x4, LSL #2\n"
- ".inst 0xf8aa499a // rprfm pldonce, x10, [x12]\n"
+ "add x27, x11, x4, LSL #2\n"
+ ".inst 0xf8aa497a // rprfm pldonce, x10, [x11]\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x7, x4, LSL #2\n"
+ "add x26, x14, x4, LSL #2\n"
".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x12, x13, LSL #2\n"
+ "add x25, x11, x17, LSL #2\n"
".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- "add x24, x7, x13, LSL #2\n"
+ "add x24, x14, x17, LSL #2\n"
".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- "add x23, x5, x8, LSL #2\n"
+ "add x23, x5, x7, LSL #2\n"
".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x16, x4, LSL #2\n"
- ".inst 0xf8aa48da // rprfm pldonce, x10, [x6]\n"
+ "add x22, x13, x4, LSL #2\n"
+ ".inst 0xf8aa4a1a // rprfm pldonce, x10, [x16]\n"
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x5, x15, LSL #2\n"
- ".inst 0xf8aa49da // rprfm pldonce, x10, [x14]\n"
+ "add x21, x5, x8, LSL #2\n"
+ ".inst 0xf8aa499a // rprfm pldonce, x10, [x12]\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x16, x13, LSL #2\n"
+ "add x20, x13, x17, LSL #2\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- "add x9, x7, x11, LSL #2\n"
+ "add x9, x14, x15, LSL #2\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
- "add x28, x14, x8, LSL #2\n"
+ "add x28, x12, x7, LSL #2\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
- "add x27, x16, x11, LSL #2\n"
+ "add x27, x13, x15, LSL #2\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
- "add x26, x12, x8, LSL #2\n"
+ "add x26, x11, x7, LSL #2\n"
".inst 0xf8aa4b3a // rprfm pldonce, x10, [x25]\n"
- "add x25, x14, x15, LSL #2\n"
+ "add x25, x12, x8, LSL #2\n"
".inst 0xf8aa4b1a // rprfm pldonce, x10, [x24]\n"
- "add x24, x12, x15, LSL #2\n"
+ "add x24, x11, x8, LSL #2\n"
".inst 0xf8aa4afa // rprfm pldonce, x10, [x23]\n"
- "add x23, x6, x4, LSL #2\n"
+ "add x23, x16, x4, LSL #2\n"
".inst 0xf8aa4ada // rprfm pldonce, x10, [x22]\n"
- "add x22, x6, x13, LSL #2\n"
+ "add x22, x16, x17, LSL #2\n"
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
- "add x21, x14, x4, LSL #2\n"
- ".inst 0xf8aa48fa // rprfm pldonce, x10, [x7]\n"
+ "add x21, x12, x4, LSL #2\n"
+ ".inst 0xf8aa49da // rprfm pldonce, x10, [x14]\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
- "add x20, x14, x13, LSL #2\n"
+ "add x20, x12, x17, LSL #2\n"
".inst 0xf8aa493a // rprfm pldonce, x10, [x9]\n"
- ".inst 0xf8aa4a1a // rprfm pldonce, x10, [x16]\n"
+ ".inst 0xf8aa49ba // rprfm pldonce, x10, [x13]\n"
".inst 0xf8aa4b9a // rprfm pldonce, x10, [x28]\n"
".inst 0xf8aa4b7a // rprfm pldonce, x10, [x27]\n"
".inst 0xf8aa4b5a // rprfm pldonce, x10, [x26]\n"
@@ -190,67 +190,67 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
".inst 0xf8aa4aba // rprfm pldonce, x10, [x21]\n"
".inst 0xf8aa4a9a // rprfm pldonce, x10, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x2, x22\n" // offset = tile_i * ld_output_row
- "mov x20, #0x4\n"
- "ld1w { z14.s }, p3/Z, [x17]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "mov x21, #0x4\n"
+ "ld1w { z14.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
"ldr x9, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x3, x9, x21\n" // offset += tile_j * ld_output_col
- "mul x21, x21, x20\n" // offset *= output_tile_size
- "ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "cntw x22\n"
+ ".inst 0xa040c0c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
"ldr x28, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x28, x28, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "addvl x17, x17, #1\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "add x27, x28, x22, LSL #2\n"
- "cntw x26\n"
- "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "addvl x17, x17, #4\n"
- "add x25, x27, x22, LSL #2\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "add x24, x9, x9\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
- "addvl x17, x17, #4\n"
- "cmp x26, %x[n_channels]\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "add x23, x25, x22, LSL #2\n"
- "add x22, x24, x9\n"
- "ld1w { z10.s }, p2/Z, [x5]\n"
+ ".inst 0xa040c0c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "mul x20, x2, x23\n" // offset = tile_i * ld_output_row
+ "cmp x22, %x[n_channels]\n"
+ "ld1rw { z13.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "madd x20, x3, x9, x20\n" // offset += tile_j * ld_output_col
+ "add x27, x9, x9\n"
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "mul x20, x20, x21\n" // offset *= output_tile_size
+ "add x26, x27, x9\n"
+ "ld1w { z8.s }, p3/Z, [x6]\n"
+ "add x28, x28, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
"mov x21, #0x0\n"
- "sub x20, XZR, x26\n"
- "ld1w { z11.s }, p2/Z, [x5, x11, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x7, x15, LSL #2]\n"
- "addvl x17, x17, #1\n"
+ "ld1w { z9.s }, p2/Z, [x14, x7, LSL #2]\n"
+ "add x25, x28, x23, LSL #2\n"
+ "sub x20, XZR, x22\n"
+ "ld1w { z10.s }, p2/Z, [x5]\n"
+ "add x24, x25, x23, LSL #2\n"
+ "ld1w { z11.s }, p2/Z, [x5, x15, LSL #2]\n"
+ "addvl x6, x6, #1\n"
+ "add x23, x24, x23, LSL #2\n"
+ "ld1w { z12.s }, p2/Z, [x14, x8, LSL #2]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
"movprfx z25, z14\n fmla z25.s, p3/M, z4.s, z9.s\n"
"movprfx z28, z14\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
+ "whilelt p1.s, x22, %x[n_channels]\n"
"incw x21\n"
"movprfx z26, z14\n fmla z26.s, p3/M, z3.s, z9.s\n"
"movprfx z17, z14\n fmla z17.s, p3/M, z1.s, z9.s\n"
- "incw x26\n"
+ "incw x22\n"
"mov p0.b, p2.b\n"
"movprfx z18, z14\n fmla z18.s, p3/M, z0.s, z9.s\n"
- "fmla z25.s, p3/M, z5.s, z12.s\n"
- "incw x20\n"
"movprfx z29, z14\n fmla z29.s, p3/M, z7.s, z9.s\n"
+ "incw x20\n"
"movprfx z30, z14\n fmla z30.s, p3/M, z6.s, z9.s\n"
"movprfx z24, z14\n fmla z24.s, p3/M, z5.s, z9.s\n"
+ "fmla z25.s, p3/M, z5.s, z12.s\n"
"movprfx z16, z14\n fmla z16.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x16, x8, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x13, x7, LSL #2]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"movprfx z31, z14\n fmla z31.s, p3/M, z2.s, z11.s\n"
- "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z19.s }, p2/Z, [x11]\n"
"fmla z26.s, p3/M, z4.s, z12.s\n"
"fmla z17.s, p3/M, z2.s, z12.s\n"
- "ld1w { z22.s }, p2/Z, [x12, x11, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x11, x15, LSL #2]\n"
"fmla z18.s, p3/M, z1.s, z12.s\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
"movprfx z20, z14\n fmla z20.s, p3/M, z6.s, z19.s\n"
- "ld1w { z11.s }, p2/Z, [x16, x15, LSL #2]\n"
"fmla z25.s, p3/M, z7.s, z9.s\n"
- "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x13, x8, LSL #2]\n"
"fmla z30.s, p3/M, z7.s, z12.s\n"
"fmla z31.s, p3/M, z6.s, z12.s\n"
"movprfx z27, z14\n fmla z27.s, p3/M, z3.s, z12.s\n"
@@ -258,24 +258,24 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ld1w { z10.s }, p2/Z, [x5, x4, LSL #2]\n"
"movprfx z23, z14\n fmla z23.s, p3/M, z8.s, z22.s\n"
"fmla z26.s, p3/M, z6.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x5, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x5, x17, LSL #2]\n"
"fmla z17.s, p3/M, z4.s, z9.s\n"
"fmla z18.s, p3/M, z3.s, z9.s\n"
"movprfx z21, z14\n fmla z21.s, p3/M, z1.s, z9.s\n"
"movprfx z22, z14\n fmla z22.s, p3/M, z0.s, z9.s\n"
- "ld1w { z14.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
+ "ld1w { z14.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
"fmla z24.s, p3/M, z8.s, z9.s\n"
"fmla z16.s, p3/M, z5.s, z9.s\n"
"fmla z20.s, p3/M, z2.s, z9.s\n"
"fmla z25.s, p3/M, z8.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x6]\n"
+ "ld1w { z9.s }, p2/Z, [x16]\n"
"fmla z28.s, p3/M, z1.s, z10.s\n"
"fmla z29.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x6, x11, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
"fmla z30.s, p3/M, z2.s, z12.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14]\n"
+ "ld1w { z12.s }, p2/Z, [x12]\n"
"fmla z26.s, p3/M, z7.s, z11.s\n"
"fmla z27.s, p3/M, z6.s, z11.s\n"
"fmla z17.s, p3/M, z5.s, z11.s\n"
@@ -284,52 +284,52 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z21.s, p3/M, z2.s, z11.s\n"
"fmla z22.s, p3/M, z1.s, z11.s\n"
"fmla z23.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x6, x8, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x16, x7, LSL #2]\n"
"fmla z24.s, p3/M, z0.s, z9.s\n"
"fmla z16.s, p3/M, z6.s, z12.s\n"
"fmla z20.s, p3/M, z3.s, z12.s\n"
- "fmla z25.s, p3/M, z1.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x11, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x12, x15, LSL #2]\n"
"fmla z28.s, p3/M, z3.s, z9.s\n"
+ "fmla z25.s, p3/M, z1.s, z11.s\n"
"fmla z31.s, p3/M, z5.s, z10.s\n"
"fmla z27.s, p3/M, z2.s, z10.s\n"
"fmla z29.s, p3/M, z4.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x6, x15, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x16, x8, LSL #2]\n"
"fmla z30.s, p3/M, z3.s, z11.s\n"
"fmla z26.s, p3/M, z0.s, z11.s\n"
"fmla z19.s, p3/M, z8.s, z12.s\n"
"fmla z23.s, p3/M, z5.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x4, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x4, LSL #2]\n"
"fmla z24.s, p3/M, z2.s, z11.s\n"
"fmla z25.s, p3/M, z2.s, z10.s\n"
"fmla z28.s, p3/M, z5.s, z11.s\n"
"fmla z29.s, p3/M, z5.s, z10.s\n"
- "ld1w { z9.s }, p2/Z, [x7, x4, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x14, x4, LSL #2]\n"
"fmla z30.s, p3/M, z4.s, z10.s\n"
"fmla z31.s, p3/M, z3.s, z10.s\n"
"fmla z26.s, p3/M, z1.s, z10.s\n"
"fmla z27.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x7, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x14, x17, LSL #2]\n"
"fmla z20.s, p3/M, z7.s, z12.s\n"
"fmla z21.s, p3/M, z6.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x13, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x11, x17, LSL #2]\n"
"fmla z24.s, p3/M, z4.s, z9.s\n"
"fmla z25.s, p3/M, z3.s, z9.s\n"
"fmla z16.s, p3/M, z1.s, z9.s\n"
"fmla z17.s, p3/M, z0.s, z9.s\n"
"fmla z28.s, p3/M, z7.s, z9.s\n"
"fmla z29.s, p3/M, z6.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x5, x8, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x5, x7, LSL #2]\n"
"fmla z22.s, p3/M, z8.s, z11.s\n"
"fmla z23.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x16, x4, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x4, LSL #2]\n"
"fmla z30.s, p3/M, z8.s, z10.s\n"
"fmla z31.s, p3/M, z7.s, z10.s\n"
"fmla z26.s, p3/M, z5.s, z10.s\n"
"fmla z27.s, p3/M, z4.s, z10.s\n"
"fmla z18.s, p3/M, z2.s, z10.s\n"
"fmla z19.s, p3/M, z1.s, z10.s\n"
- "ld1w { z9.s }, p2/Z, [x5, x15, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x5, x8, LSL #2]\n"
"addvl x5, x5, #1\n"
"fmla z24.s, p3/M, z7.s, z11.s\n"
"fmla z25.s, p3/M, z6.s, z11.s\n"
@@ -337,153 +337,153 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z17.s, p3/M, z3.s, z11.s\n"
"fmla z20.s, p3/M, z1.s, z11.s\n"
"fmla z21.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x16, x13, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x17, LSL #2]\n"
"fmla z28.s, p3/M, z2.s, z12.s\n"
"fmla z29.s, p3/M, z1.s, z12.s\n"
"fmla z30.s, p3/M, z0.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x7]\n"
- "fmla z22.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z10.s }, p2/Z, [x14]\n"
"fmla z31.s, p3/M, z0.s, z9.s\n"
- "fmla z24.s, p3/M, z3.s, z10.s\n"
- "fmla z16.s, p3/M, z0.s, z10.s\n"
+ "fmla z22.s, p3/M, z2.s, z11.s\n"
"fmla z26.s, p3/M, z8.s, z11.s\n"
"fmla z27.s, p3/M, z7.s, z11.s\n"
"fmla z18.s, p3/M, z5.s, z11.s\n"
+ "fmla z24.s, p3/M, z3.s, z10.s\n"
+ "fmla z16.s, p3/M, z0.s, z10.s\n"
"fmla z19.s, p3/M, z4.s, z11.s\n"
"fmla z23.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x8, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x7, LSL #2]\n"
"fmla z29.s, p3/M, z2.s, z9.s\n"
"fmla z30.s, p3/M, z1.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x7, x11, LSL #2]\n"
- "addvl x7, x7, #1\n"
+ "ld1w { z12.s }, p2/Z, [x14, x15, LSL #2]\n"
+ "addvl x14, x14, #1\n"
"fmla z28.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x16]\n"
+ "ld1w { z10.s }, p2/Z, [x13]\n"
"fmla z21.s, p3/M, z4.s, z11.s\n"
"fmla z22.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z9.s }, p1/Z, [x14, x7, LSL #2]\n"
"fmla z31.s, p3/M, z8.s, z12.s\n"
- "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
"fmla z27.s, p3/M, z5.s, z12.s\n"
"fmla z19.s, p3/M, z2.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x16, x11, LSL #2]\n"
- "addvl x16, x16, #1\n"
"fmla z24.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x13, x15, LSL #2]\n"
+ "addvl x13, x13, #1\n"
"fmla z16.s, p3/M, z3.s, z10.s\n"
"fmla z20.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x8, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x11, x7, LSL #2]\n"
+ "fmla z17.s, p3/M, z7.s, z11.s\n"
+ "fmla z18.s, p3/M, z6.s, z11.s\n"
"fmla z23.s, p3/M, z2.s, z12.s\n"
+ "fmla z27.s, p3/M, z8.s, z12.s\n"
"fmla z21.s, p3/M, z7.s, z10.s\n"
"fmla z22.s, p3/M, z6.s, z10.s\n"
"fmla z16.s, p3/M, z8.s, z11.s\n"
- "fmla z17.s, p3/M, z7.s, z11.s\n"
- "fmla z18.s, p3/M, z6.s, z11.s\n"
"fmla z20.s, p3/M, z5.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x15, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x8, LSL #2]\n"
"fmla z19.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x11, x8, LSL #2]\n"
+ "addvl x11, x11, #1\n"
"fmla z21.s, p3/M, z5.s, z11.s\n"
"fmla z22.s, p3/M, z4.s, z11.s\n"
"fmla z23.s, p3/M, z3.s, z11.s\n"
- "fmla z27.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x15, LSL #2]\n"
"fmla z20.s, p3/M, z8.s, z10.s\n"
- "addvl x12, x12, #1\n"
- "ld1w { z10.s }, p2/Z, [x6, x4, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x16, x4, LSL #2]\n"
"fmla z17.s, p3/M, z8.s, z11.s\n"
"fmla z18.s, p3/M, z7.s, z11.s\n"
"fmla z19.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x16, x17, LSL #2]\n"
+ "addvl x16, x16, #1\n"
"fmla z21.s, p3/M, z8.s, z12.s\n"
- "ld1w { z11.s }, p2/Z, [x6, x13, LSL #2]\n"
- "addvl x6, x6, #1\n"
"fmla z22.s, p3/M, z7.s, z12.s\n"
"fmla z23.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x4, LSL #2]\n"
"fmla z28.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x12, x4, LSL #2]\n"
"fmla z29.s, p3/M, z3.s, z10.s\n"
"fmla z24.s, p3/M, z1.s, z10.s\n"
"fmla z25.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x14, x13, LSL #2]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
+ "ld1w { z10.s }, p2/Z, [x12, x17, LSL #2]\n"
"fmla z30.s, p3/M, z5.s, z11.s\n"
+ "whilelt p2.s, x21, %x[n_channels]\n"
"fmla z31.s, p3/M, z4.s, z11.s\n"
- "cmp x26, %x[n_channels]\n"
- "addvl x14, x14, #1\n"
"fmla z26.s, p3/M, z2.s, z11.s\n"
+ "cmp x22, %x[n_channels]\n"
+ "addvl x12, x12, #1\n"
"fmla z27.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p1/Z, [x5, x11, LSL #2]\n"
"fmla z16.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z11.s }, p1/Z, [x5, x15, LSL #2]\n"
"fmla z17.s, p3/M, z6.s, z12.s\n"
"fmla z20.s, p3/M, z4.s, z12.s\n"
"fmla z21.s, p3/M, z3.s, z12.s\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
"fmla z18.s, p3/M, z8.s, z10.s\n"
+ ".inst 0xa040c0c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
"fmla z19.s, p3/M, z7.s, z10.s\n"
- "ld1w { z12.s }, p1/Z, [x7, x15, LSL #2]\n"
"fmla z22.s, p3/M, z5.s, z10.s\n"
+ "ld1w { z12.s }, p1/Z, [x14, x8, LSL #2]\n"
"fmla z23.s, p3/M, z4.s, z10.s\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
+ ".inst 0xa040c0c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
".inst 0xc1afc9bc // fclamp { z28.s-z31.s }, z13.s, z15.s\n"
".inst 0xc1afc9b8 // fclamp { z24.s-z27.s }, z13.s, z15.s\n"
"ld1w { z10.s }, p1/Z, [x5]\n"
+ "ld1w { z8.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
".inst 0xc1afc9b0 // fclamp { z16.s-z19.s }, z13.s, z15.s\n"
".inst 0xc1afc9b4 // fclamp { z20.s-z23.s }, z13.s, z15.s\n"
"st1w { z28.s }, p0, [x28]\n"
"st1w { z29.s }, p0, [x28, x9, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "st1w { z30.s }, p0, [x28, x24, LSL #2]\n"
- "st1w { z31.s }, p0, [x28, x22, LSL #2]\n"
+ "st1w { z30.s }, p0, [x28, x27, LSL #2]\n"
+ "st1w { z31.s }, p0, [x28, x26, LSL #2]\n"
"addvl x28, x28, #1\n"
- "st1w { z24.s }, p0, [x27]\n"
- "st1w { z25.s }, p0, [x27, x9, LSL #2]\n"
- "st1w { z26.s }, p0, [x27, x24, LSL #2]\n"
- "st1w { z27.s }, p0, [x27, x22, LSL #2]\n"
- "addvl x27, x27, #1\n"
- "st1w { z16.s }, p0, [x25]\n"
- "st1w { z17.s }, p0, [x25, x9, LSL #2]\n"
- "st1w { z18.s }, p0, [x25, x24, LSL #2]\n"
- "st1w { z19.s }, p0, [x25, x22, LSL #2]\n"
+ "st1w { z24.s }, p0, [x25]\n"
+ "st1w { z25.s }, p0, [x25, x9, LSL #2]\n"
+ "st1w { z26.s }, p0, [x25, x27, LSL #2]\n"
+ "st1w { z27.s }, p0, [x25, x26, LSL #2]\n"
"addvl x25, x25, #1\n"
+ "st1w { z16.s }, p0, [x24]\n"
+ "st1w { z17.s }, p0, [x24, x9, LSL #2]\n"
+ "st1w { z18.s }, p0, [x24, x27, LSL #2]\n"
+ "st1w { z19.s }, p0, [x24, x26, LSL #2]\n"
+ "addvl x24, x24, #1\n"
"st1w { z20.s }, p0, [x23]\n"
"st1w { z21.s }, p0, [x23, x9, LSL #2]\n"
- "st1w { z22.s }, p0, [x23, x24, LSL #2]\n"
- "st1w { z23.s }, p0, [x23, x22, LSL #2]\n"
+ "st1w { z22.s }, p0, [x23, x27, LSL #2]\n"
+ "st1w { z23.s }, p0, [x23, x26, LSL #2]\n"
"addvl x23, x23, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
"movprfx z21, z14\n fmla z21.s, p3/M, z4.s, z9.s\n"
"movprfx z24, z14\n fmla z24.s, p3/M, z8.s, z9.s\n"
"ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "add x3, x3, #0x1\n"
+ "mov p0.b, p2.b\n"
"movprfx z22, z14\n fmla z22.s, p3/M, z3.s, z9.s\n"
"movprfx z29, z14\n fmla z29.s, p3/M, z1.s, z9.s\n"
"ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "add x21, x2, #0x1\n"
"movprfx z30, z14\n fmla z30.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z5.s, z12.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "cmp x3, x20\n"
"movprfx z25, z14\n fmla z25.s, p3/M, z7.s, z9.s\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"movprfx z26, z14\n fmla z26.s, p3/M, z6.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x2, x2, x21, LT\n"
"movprfx z20, z14\n fmla z20.s, p3/M, z5.s, z9.s\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "add x3, x3, #0x1\n"
+ "fmla z21.s, p3/M, z5.s, z12.s\n"
"movprfx z28, z14\n fmla z28.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x16, x8, LSL #2]\n"
- "mov p0.b, p2.b\n"
+ "ld1w { z9.s }, p2/Z, [x13, x7, LSL #2]\n"
+ "add x20, x2, #0x1\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
"movprfx z27, z14\n fmla z27.s, p3/M, z2.s, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x12]\n"
- "csel x3, x3, XZR, LT\n"
+ "ld1w { z17.s }, p2/Z, [x11]\n"
+ "cmp x3, x22\n"
"fmla z22.s, p3/M, z4.s, z12.s\n"
"fmla z29.s, p3/M, z2.s, z12.s\n"
- "ld1w { z18.s }, p2/Z, [x12, x11, LSL #2]\n"
- "cmp x2, x20\n"
+ "ld1w { z18.s }, p2/Z, [x11, x15, LSL #2]\n"
+ "csel x2, x2, x20, LT\n"
"fmla z30.s, p3/M, z1.s, z12.s\n"
+ "fmla z25.s, p3/M, z8.s, z12.s\n"
+ "csel x3, x3, XZR, LT\n"
+ "cmp x2, x21\n"
"movprfx z16, z14\n fmla z16.s, p3/M, z6.s, z17.s\n"
- "ld1w { z11.s }, p2/Z, [x16, x15, LSL #2]\n"
"fmla z21.s, p3/M, z7.s, z9.s\n"
- "fmla z25.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x13, x8, LSL #2]\n"
"fmla z26.s, p3/M, z7.s, z12.s\n"
"fmla z27.s, p3/M, z6.s, z12.s\n"
"movprfx z23, z14\n fmla z23.s, p3/M, z3.s, z12.s\n"
@@ -491,7 +491,7 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"ld1w { z10.s }, p2/Z, [x5, x4, LSL #2]\n"
"movprfx z19, z14\n fmla z19.s, p3/M, z8.s, z18.s\n"
"fmla z22.s, p3/M, z6.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x5, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x5, x17, LSL #2]\n"
"fmla z29.s, p3/M, z4.s, z9.s\n"
"fmla z30.s, p3/M, z3.s, z9.s\n"
"movprfx z17, z14\n fmla z17.s, p3/M, z1.s, z9.s\n"
@@ -500,13 +500,13 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z28.s, p3/M, z5.s, z9.s\n"
"fmla z16.s, p3/M, z2.s, z9.s\n"
"fmla z21.s, p3/M, z8.s, z11.s\n"
- "ld1w { z14.s }, p2/Z, [x6]\n"
+ "ld1w { z14.s }, p2/Z, [x16]\n"
"fmla z24.s, p3/M, z1.s, z10.s\n"
"fmla z25.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x6, x11, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x16, x15, LSL #2]\n"
"fmla z26.s, p3/M, z2.s, z12.s\n"
"fmla z27.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14]\n"
+ "ld1w { z12.s }, p2/Z, [x12]\n"
"fmla z22.s, p3/M, z7.s, z11.s\n"
"fmla z23.s, p3/M, z6.s, z11.s\n"
"fmla z29.s, p3/M, z5.s, z11.s\n"
@@ -515,117 +515,117 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z17.s, p3/M, z2.s, z11.s\n"
"fmla z18.s, p3/M, z1.s, z11.s\n"
"fmla z19.s, p3/M, z0.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x6, x8, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x16, x7, LSL #2]\n"
"fmla z20.s, p3/M, z0.s, z14.s\n"
"fmla z28.s, p3/M, z6.s, z12.s\n"
"fmla z16.s, p3/M, z3.s, z12.s\n"
- "fmla z21.s, p3/M, z1.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x11, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x15, LSL #2]\n"
"fmla z24.s, p3/M, z3.s, z14.s\n"
+ "fmla z21.s, p3/M, z1.s, z9.s\n"
"fmla z27.s, p3/M, z5.s, z10.s\n"
"fmla z23.s, p3/M, z2.s, z10.s\n"
"fmla z25.s, p3/M, z4.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x6, x15, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x16, x8, LSL #2]\n"
"fmla z26.s, p3/M, z3.s, z9.s\n"
"fmla z22.s, p3/M, z0.s, z9.s\n"
"fmla z31.s, p3/M, z8.s, z11.s\n"
"fmla z19.s, p3/M, z5.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x4, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x11, x4, LSL #2]\n"
"fmla z20.s, p3/M, z2.s, z9.s\n"
"fmla z21.s, p3/M, z2.s, z12.s\n"
"fmla z24.s, p3/M, z5.s, z9.s\n"
"fmla z25.s, p3/M, z5.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x7, x4, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x14, x4, LSL #2]\n"
"fmla z26.s, p3/M, z4.s, z12.s\n"
"fmla z27.s, p3/M, z3.s, z12.s\n"
"fmla z22.s, p3/M, z1.s, z12.s\n"
"fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x7, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x14, x17, LSL #2]\n"
"fmla z16.s, p3/M, z7.s, z10.s\n"
"fmla z17.s, p3/M, z6.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x13, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x11, x17, LSL #2]\n"
"fmla z20.s, p3/M, z4.s, z9.s\n"
"fmla z21.s, p3/M, z3.s, z9.s\n"
"fmla z28.s, p3/M, z1.s, z9.s\n"
"fmla z29.s, p3/M, z0.s, z9.s\n"
"fmla z24.s, p3/M, z7.s, z9.s\n"
"fmla z25.s, p3/M, z6.s, z9.s\n"
- "ld1w { z10.s }, p2/Z, [x5, x8, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x5, x7, LSL #2]\n"
"fmla z18.s, p3/M, z8.s, z11.s\n"
"fmla z19.s, p3/M, z7.s, z11.s\n"
- "ld1w { z14.s }, p2/Z, [x16, x4, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x13, x4, LSL #2]\n"
"fmla z26.s, p3/M, z8.s, z12.s\n"
"fmla z27.s, p3/M, z7.s, z12.s\n"
"fmla z22.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
"fmla z30.s, p3/M, z2.s, z12.s\n"
"fmla z31.s, p3/M, z1.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x5, x15, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x5, x8, LSL #2]\n"
"fmla z20.s, p3/M, z7.s, z14.s\n"
"fmla z21.s, p3/M, z6.s, z14.s\n"
"fmla z28.s, p3/M, z4.s, z14.s\n"
"fmla z29.s, p3/M, z3.s, z14.s\n"
"fmla z16.s, p3/M, z1.s, z14.s\n"
"fmla z17.s, p3/M, z0.s, z14.s\n"
- "ld1w { z14.s }, p2/Z, [x16, x13, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x13, x17, LSL #2]\n"
"fmla z24.s, p3/M, z2.s, z10.s\n"
"fmla z25.s, p3/M, z1.s, z10.s\n"
"fmla z26.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x7]\n"
- "fmla z18.s, p3/M, z2.s, z14.s\n"
+ "ld1w { z10.s }, p2/Z, [x14]\n"
"fmla z27.s, p3/M, z0.s, z9.s\n"
- "fmla z20.s, p3/M, z3.s, z10.s\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
+ "fmla z18.s, p3/M, z2.s, z14.s\n"
"fmla z22.s, p3/M, z8.s, z14.s\n"
"fmla z23.s, p3/M, z7.s, z14.s\n"
"fmla z30.s, p3/M, z5.s, z14.s\n"
+ "fmla z20.s, p3/M, z3.s, z10.s\n"
+ "fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z31.s, p3/M, z4.s, z14.s\n"
"fmla z19.s, p3/M, z1.s, z14.s\n"
- "ld1w { z11.s }, p2/Z, [x14, x8, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x12, x7, LSL #2]\n"
"fmla z25.s, p3/M, z2.s, z9.s\n"
"fmla z26.s, p3/M, z1.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x7, x11, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x14, x15, LSL #2]\n"
"fmla z24.s, p3/M, z6.s, z10.s\n"
- "ld1w { z14.s }, p2/Z, [x16]\n"
+ "ld1w { z14.s }, p2/Z, [x13]\n"
"fmla z17.s, p3/M, z4.s, z11.s\n"
"fmla z18.s, p3/M, z3.s, z11.s\n"
"fmla z27.s, p3/M, z8.s, z12.s\n"
"fmla z23.s, p3/M, z5.s, z12.s\n"
"fmla z31.s, p3/M, z2.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x16, x11, LSL #2]\n"
"fmla z20.s, p3/M, z6.s, z14.s\n"
+ "ld1w { z9.s }, p2/Z, [x13, x15, LSL #2]\n"
"fmla z28.s, p3/M, z3.s, z14.s\n"
"fmla z16.s, p3/M, z0.s, z14.s\n"
- "ld1w { z12.s }, p2/Z, [x12, x8, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x11, x7, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z11.s\n"
+ "fmla z30.s, p3/M, z6.s, z11.s\n"
"fmla z19.s, p3/M, z2.s, z9.s\n"
+ "fmla z23.s, p3/M, z8.s, z9.s\n"
"fmla z17.s, p3/M, z7.s, z12.s\n"
"fmla z18.s, p3/M, z6.s, z12.s\n"
"fmla z28.s, p3/M, z8.s, z11.s\n"
- "fmla z29.s, p3/M, z7.s, z11.s\n"
- "fmla z30.s, p3/M, z6.s, z11.s\n"
"fmla z16.s, p3/M, z5.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x14, x15, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x12, x8, LSL #2]\n"
"fmla z31.s, p3/M, z5.s, z9.s\n"
+ "ld1w { z14.s }, p2/Z, [x11, x8, LSL #2]\n"
"fmla z17.s, p3/M, z5.s, z10.s\n"
"fmla z18.s, p3/M, z4.s, z10.s\n"
"fmla z19.s, p3/M, z3.s, z10.s\n"
- "fmla z23.s, p3/M, z8.s, z9.s\n"
- "ld1w { z14.s }, p2/Z, [x12, x15, LSL #2]\n"
"fmla z16.s, p3/M, z8.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x6, x4, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x16, x4, LSL #2]\n"
"fmla z29.s, p3/M, z8.s, z10.s\n"
"fmla z30.s, p3/M, z7.s, z10.s\n"
"fmla z31.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z11.s }, p2/Z, [x16, x17, LSL #2]\n"
"fmla z17.s, p3/M, z8.s, z14.s\n"
- "ld1w { z11.s }, p2/Z, [x6, x13, LSL #2]\n"
"fmla z18.s, p3/M, z7.s, z14.s\n"
"fmla z19.s, p3/M, z6.s, z14.s\n"
- "ld1w { z10.s }, p2/Z, [x14, x4, LSL #2]\n"
"fmla z24.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z10.s }, p2/Z, [x12, x4, LSL #2]\n"
"fmla z25.s, p3/M, z3.s, z9.s\n"
"fmla z20.s, p3/M, z1.s, z9.s\n"
"fmla z21.s, p3/M, z0.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x14, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x12, x17, LSL #2]\n"
"fmla z26.s, p3/M, z5.s, z11.s\n"
"fmla z27.s, p3/M, z4.s, z11.s\n"
"fmla z22.s, p3/M, z2.s, z11.s\n"
@@ -640,24 +640,24 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
"fmla z19.s, p3/M, z4.s, z12.s\n"
".inst 0xc1afc9b8 // fclamp { z24.s-z27.s }, z13.s, z15.s\n"
".inst 0xc1afc9b4 // fclamp { z20.s-z23.s }, z13.s, z15.s\n"
- "st1w { z24.s }, p0, [x28]\n"
".inst 0xc1afc9bc // fclamp { z28.s-z31.s }, z13.s, z15.s\n"
".inst 0xc1afc9b0 // fclamp { z16.s-z19.s }, z13.s, z15.s\n"
+ "st1w { z24.s }, p0, [x28]\n"
"st1w { z25.s }, p0, [x28, x9, LSL #2]\n"
- "st1w { z26.s }, p0, [x28, x24, LSL #2]\n"
- "st1w { z27.s }, p0, [x28, x22, LSL #2]\n"
- "st1w { z20.s }, p0, [x27]\n"
- "st1w { z21.s }, p0, [x27, x9, LSL #2]\n"
- "st1w { z22.s }, p0, [x27, x24, LSL #2]\n"
- "st1w { z23.s }, p0, [x27, x22, LSL #2]\n"
- "st1w { z28.s }, p0, [x25]\n"
- "st1w { z29.s }, p0, [x25, x9, LSL #2]\n"
- "st1w { z30.s }, p0, [x25, x24, LSL #2]\n"
- "st1w { z31.s }, p0, [x25, x22, LSL #2]\n"
+ "st1w { z26.s }, p0, [x28, x27, LSL #2]\n"
+ "st1w { z27.s }, p0, [x28, x26, LSL #2]\n"
+ "st1w { z20.s }, p0, [x25]\n"
+ "st1w { z21.s }, p0, [x25, x9, LSL #2]\n"
+ "st1w { z22.s }, p0, [x25, x27, LSL #2]\n"
+ "st1w { z23.s }, p0, [x25, x26, LSL #2]\n"
+ "st1w { z28.s }, p0, [x24]\n"
+ "st1w { z29.s }, p0, [x24, x9, LSL #2]\n"
+ "st1w { z30.s }, p0, [x24, x27, LSL #2]\n"
+ "st1w { z31.s }, p0, [x24, x26, LSL #2]\n"
"st1w { z16.s }, p0, [x23]\n"
"st1w { z17.s }, p0, [x23, x9, LSL #2]\n"
- "st1w { z18.s }, p0, [x23, x24, LSL #2]\n"
- "st1w { z19.s }, p0, [x23, x22, LSL #2]\n"
+ "st1w { z18.s }, p0, [x23, x27, LSL #2]\n"
+ "st1w { z19.s }, p0, [x23, x26, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 2e2a45bab0..44bfbf4849 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -101,540 +101,540 @@ void sme2_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x16, #0x0\n"
"ptrue p3.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z13.s }, p3/Z, [x8]\n"
- "addvl x8, x8, #1\n"
"ldp x23, x22, [x17, #0x0]\n"
"ldp x21, x20, [x17, #0x10]\n"
- "cntw x16\n"
+ "cntw x15\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1w { z13.s }, p3/Z, [x8]\n"
+ "addvl x8, x8, #1\n"
+ "cmp x15, %x[n_channels]\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_outptrs]]\n"
".inst 0xa040c100 // ld1w { z0.s-z3.s }, pn8.b/Z, [x8]\n"
"addvl x8, x8, #4\n"
- "mov x15, #0x0\n"
- "whilelt p2.s, XZR, %x[n_channels]\n"
+ "sub x13, XZR, x15\n"
".inst 0xa040c104 // ld1w { z4.s-z7.s }, pn8.b/Z, [x8]\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_outptrs]]\n"
"addvl x8, x8, #4\n"
- "cmp x16, %x[n_channels]\n"
- "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x13, XZR, x16\n"
"ld1w { z8.s }, p3/Z, [x8]\n"
"addvl x8, x8, #1\n"
- "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x22, x15, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x21, x15, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x23, x16, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z29, z13\n fmla z29.s, p3/M, z4.s, z9.s\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z4.s, z9.s\n"
"movprfx z16, z13\n fmla z16.s, p3/M, z8.s, z9.s\n"
"ldr x24, [x17, #0x20]\n"
"incw x13\n"
- "movprfx z30, z13\n fmla z30.s, p3/M, z3.s, z9.s\n"
- "movprfx z25, z13\n fmla z25.s, p3/M, z1.s, z9.s\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z3.s, z9.s\n"
+ "movprfx z29, z13\n fmla z29.s, p3/M, z1.s, z9.s\n"
"ldr x20, [x17, #0x30]\n"
"mov p1.b, p2.b\n"
- "movprfx z26, z13\n fmla z26.s, p3/M, z0.s, z9.s\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z0.s, z9.s\n"
"ldr x21, [x17, #0x28]\n"
"movprfx z17, z13\n fmla z17.s, p3/M, z7.s, z9.s\n"
- "whilelt p0.s, x16, %x[n_channels]\n"
+ "whilelt p0.s, x15, %x[n_channels]\n"
"movprfx z18, z13\n fmla z18.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z5.s, z9.s\n"
"ldr x23, [x17, #0x38]\n"
- "movprfx z28, z13\n fmla z28.s, p3/M, z5.s, z9.s\n"
- "movprfx z24, z13\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z12.s\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x22, [x17, #0x40]\n"
"fmla z16.s, p3/M, z0.s, z10.s\n"
"movprfx z19, z13\n fmla z19.s, p3/M, z2.s, z11.s\n"
- "ld1w { z22.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x24, x16, LSL #2]\n"
"ldr x20, [x17, #0x48]\n"
- "fmla z30.s, p3/M, z4.s, z12.s\n"
- "fmla z25.s, p3/M, z2.s, z12.s\n"
- "ld1w { z21.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z12.s\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z22.s }, p2/Z, [x21, x16, LSL #2]\n"
"ldr x27, [x17, #0x50]\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
"fmla z17.s, p3/M, z8.s, z12.s\n"
"ldr x26, [x17, #0x60]\n"
"fmla z18.s, p3/M, z7.s, z12.s\n"
- "movprfx z20, z13\n fmla z20.s, p3/M, z6.s, z22.s\n"
- "ld1w { z11.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x25, [x17, #0x68]\n"
- "fmla z29.s, p3/M, z7.s, z9.s\n"
+ "fmla z25.s, p3/M, z7.s, z9.s\n"
"fmla z19.s, p3/M, z6.s, z12.s\n"
"ldr x21, [x17, #0x58]\n"
- "movprfx z31, z13\n fmla z31.s, p3/M, z3.s, z12.s\n"
- "movprfx z27, z13\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "movprfx z27, z13\n fmla z27.s, p3/M, z3.s, z12.s\n"
+ "movprfx z31, z13\n fmla z31.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x23, x16, LSL #2]\n"
"ldr x24, [x17, #0x70]\n"
- "movprfx z23, z13\n fmla z23.s, p3/M, z8.s, z21.s\n"
- "fmla z30.s, p3/M, z6.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z8.s, z22.s\n"
+ "fmla z26.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z12.s }, p2/Z, [x22, x16, LSL #2]\n"
"ldr x23, [x17, #0x78]\n"
- "fmla z25.s, p3/M, z4.s, z9.s\n"
- "fmla z26.s, p3/M, z3.s, z9.s\n"
+ "fmla z29.s, p3/M, z4.s, z9.s\n"
+ "fmla z30.s, p3/M, z3.s, z9.s\n"
"ldr x22, [x17, #0x80]\n"
"movprfx z21, z13\n fmla z21.s, p3/M, z1.s, z9.s\n"
"movprfx z22, z13\n fmla z22.s, p3/M, z0.s, z9.s\n"
"ldr x20, [x17, #0x88]\n"
"ld1w { z13.s }, p3/Z, [x8]\n"
- "fmla z28.s, p3/M, z8.s, z9.s\n"
- "fmla z24.s, p3/M, z5.s, z9.s\n"
+ "fmla z24.s, p3/M, z8.s, z9.s\n"
+ "fmla z28.s, p3/M, z5.s, z9.s\n"
"ldr x12, [x14, #0x0]\n"
"addvl x8, x8, #1\n"
"fmla z20.s, p3/M, z2.s, z9.s\n"
- "fmla z16.s, p3/M, z1.s, z10.s\n"
- "ld1w { z9.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z16.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z9.s }, p2/Z, [x27, x16, LSL #2]\n"
"ldr x27, [x17, #0x90]\n"
- "fmla z17.s, p3/M, z0.s, z10.s\n"
+ "fmla z17.s, p3/M, z0.s, z11.s\n"
"fmla z18.s, p3/M, z2.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x16, LSL #2]\n"
"ldr x21, [x17, #0x98]\n"
- "fmla z29.s, p3/M, z8.s, z11.s\n"
+ "fmla z25.s, p3/M, z8.s, z10.s\n"
"fmla z19.s, p3/M, z1.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x26, x16, LSL #2]\n"
"ldr x26, [x17, #0xa0]\n"
- "fmla z30.s, p3/M, z7.s, z11.s\n"
- "fmla z31.s, p3/M, z6.s, z11.s\n"
+ "fmla z26.s, p3/M, z7.s, z10.s\n"
+ "fmla z27.s, p3/M, z6.s, z10.s\n"
"ldr x11, [x14, #0x8]\n"
- "fmla z25.s, p3/M, z5.s, z11.s\n"
- "fmla z26.s, p3/M, z4.s, z11.s\n"
+ "fmla z29.s, p3/M, z5.s, z10.s\n"
+ "fmla z30.s, p3/M, z4.s, z10.s\n"
"ldr x10, [x14, #0x10]\n"
- "fmla z27.s, p3/M, z3.s, z11.s\n"
- "fmla z21.s, p3/M, z2.s, z11.s\n"
+ "fmla z31.s, p3/M, z3.s, z10.s\n"
+ "fmla z21.s, p3/M, z2.s, z10.s\n"
"ldr x9, [x14, #0x18]\n"
- "fmla z22.s, p3/M, z1.s, z11.s\n"
- "fmla z23.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z22.s, p3/M, z1.s, z10.s\n"
+ "fmla z23.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x16, LSL #2]\n"
"ldr x25, [x17, #0xa8]\n"
"fmla z16.s, p3/M, z3.s, z9.s\n"
- "fmla z28.s, p3/M, z0.s, z9.s\n"
- "fmla z24.s, p3/M, z6.s, z12.s\n"
+ "fmla z24.s, p3/M, z0.s, z9.s\n"
+ "fmla z28.s, p3/M, z6.s, z12.s\n"
"fmla z20.s, p3/M, z3.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x24, x16, LSL #2]\n"
"ldr x24, [x17, #0xb0]\n"
- "fmla z17.s, p3/M, z4.s, z11.s\n"
- "fmla z18.s, p3/M, z3.s, z11.s\n"
- "fmla z29.s, p3/M, z1.s, z11.s\n"
- "fmla z19.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z2.s, z10.s\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z17.s, p3/M, z4.s, z10.s\n"
+ "fmla z18.s, p3/M, z3.s, z10.s\n"
+ "fmla z25.s, p3/M, z1.s, z10.s\n"
+ "fmla z19.s, p3/M, z5.s, z11.s\n"
+ "fmla z27.s, p3/M, z2.s, z11.s\n"
+ "fmla z26.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z11.s }, p2/Z, [x23, x16, LSL #2]\n"
"ldr x23, [x17, #0xb8]\n"
- "fmla z27.s, p3/M, z8.s, z9.s\n"
- "fmla z23.s, p3/M, z5.s, z9.s\n"
- "ld1w { z10.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z31.s, p3/M, z8.s, z12.s\n"
+ "fmla z23.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x16, LSL #2]\n"
"ldr x22, [x17, #0xc0]\n"
- "fmla z16.s, p3/M, z5.s, z11.s\n"
- "fmla z28.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z16.s, p3/M, z5.s, z10.s\n"
+ "fmla z24.s, p3/M, z2.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x20, [x17, #0xc8]\n"
- "fmla z17.s, p3/M, z5.s, z12.s\n"
- "fmla z18.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z2.s, z12.s\n"
- "fmla z19.s, p3/M, z3.s, z12.s\n"
- "fmla z30.s, p3/M, z1.s, z12.s\n"
- "fmla z31.s, p3/M, z0.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z17.s, p3/M, z5.s, z11.s\n"
+ "fmla z18.s, p3/M, z4.s, z11.s\n"
+ "fmla z25.s, p3/M, z2.s, z11.s\n"
+ "fmla z19.s, p3/M, z3.s, z11.s\n"
+ "fmla z26.s, p3/M, z1.s, z11.s\n"
+ "fmla z27.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x16, LSL #2]\n"
"ldr x28, [x17, #0xd8]\n"
- "fmla z20.s, p3/M, z7.s, z10.s\n"
- "fmla z21.s, p3/M, z6.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z20.s, p3/M, z7.s, z9.s\n"
+ "fmla z21.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x27, x16, LSL #2]\n"
"ldr x21, [x17, #0xd0]\n"
- "fmla z16.s, p3/M, z7.s, z11.s\n"
- "fmla z17.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z4.s, z11.s\n"
- "fmla z29.s, p3/M, z3.s, z11.s\n"
- "fmla z24.s, p3/M, z1.s, z11.s\n"
- "fmla z25.s, p3/M, z0.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "fmla z16.s, p3/M, z7.s, z12.s\n"
+ "fmla z17.s, p3/M, z6.s, z12.s\n"
+ "fmla z24.s, p3/M, z4.s, z12.s\n"
+ "fmla z25.s, p3/M, z3.s, z12.s\n"
+ "fmla z28.s, p3/M, z1.s, z12.s\n"
+ "fmla z29.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x16, LSL #2]\n"
"ldr x27, [x17, #0xe0]\n"
- "fmla z18.s, p3/M, z8.s, z9.s\n"
- "fmla z22.s, p3/M, z8.s, z10.s\n"
- "fmla z23.s, p3/M, z7.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z1.s, z9.s\n"
+ "fmla z18.s, p3/M, z8.s, z11.s\n"
+ "fmla z22.s, p3/M, z8.s, z9.s\n"
+ "fmla z23.s, p3/M, z7.s, z9.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x16, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z11.s\n"
"ldr x26, [x17, #0xe8]\n"
- "fmla z19.s, p3/M, z7.s, z9.s\n"
- "fmla z30.s, p3/M, z5.s, z9.s\n"
- "fmla z31.s, p3/M, z4.s, z9.s\n"
- "fmla z26.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z19.s, p3/M, z7.s, z11.s\n"
+ "fmla z26.s, p3/M, z5.s, z11.s\n"
+ "fmla z27.s, p3/M, z4.s, z11.s\n"
+ "fmla z30.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x16, LSL #2]\n"
"ldr x25, [x17, #0xf0]\n"
- "fmla z16.s, p3/M, z2.s, z11.s\n"
- "fmla z17.s, p3/M, z1.s, z11.s\n"
- "fmla z18.s, p3/M, z0.s, z11.s\n"
- "fmla z28.s, p3/M, z7.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z16.s, p3/M, z2.s, z10.s\n"
+ "fmla z17.s, p3/M, z1.s, z10.s\n"
+ "fmla z18.s, p3/M, z0.s, z10.s\n"
+ "fmla z24.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x23, x16, LSL #2]\n"
"ldr x24, [x17, #0xf8]\n"
- "fmla z29.s, p3/M, z6.s, z10.s\n"
- "fmla z24.s, p3/M, z4.s, z10.s\n"
- "fmla z25.s, p3/M, z3.s, z10.s\n"
- "fmla z20.s, p3/M, z1.s, z10.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x22, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z10.s\n"
- "ldr x23, [x17, #0x100]\n"
- "fmla z22.s, p3/M, z2.s, z10.s\n"
- "fmla z17.s, p3/M, z2.s, z9.s\n"
- "fmla z18.s, p3/M, z1.s, z9.s\n"
- "fmla z19.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z12.s\n"
+ "fmla z28.s, p3/M, z4.s, z12.s\n"
+ "fmla z29.s, p3/M, z3.s, z12.s\n"
+ "fmla z20.s, p3/M, z1.s, z12.s\n"
+ "fmla z21.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z17.s, p3/M, z2.s, z11.s\n"
+ "ldr x22, [x17, #0x100]\n"
+ "fmla z18.s, p3/M, z1.s, z11.s\n"
+ "fmla z19.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x20, [x17, #0x108]\n"
- "fmla z16.s, p3/M, z6.s, z11.s\n"
- "fmla z28.s, p3/M, z3.s, z11.s\n"
- "fmla z24.s, p3/M, z0.s, z11.s\n"
- "fmla z30.s, p3/M, z8.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x15, LSL #2]\n"
- "ldr x22, [x17, #0x110]\n"
- "fmla z31.s, p3/M, z7.s, z10.s\n"
- "fmla z26.s, p3/M, z5.s, z10.s\n"
+ "fmla z16.s, p3/M, z6.s, z9.s\n"
+ "fmla z24.s, p3/M, z3.s, z9.s\n"
+ "fmla z31.s, p3/M, z4.s, z10.s\n"
+ "fmla z22.s, p3/M, z2.s, z10.s\n"
+ "fmla z28.s, p3/M, z0.s, z9.s\n"
+ "fmla z26.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "ldr x23, [x17, #0x110]\n"
+ "fmla z27.s, p3/M, z7.s, z10.s\n"
+ "fmla z30.s, p3/M, z5.s, z10.s\n"
"fmla z23.s, p3/M, z1.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x28, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x16, LSL #2]\n"
+ "fmla z19.s, p3/M, z8.s, z12.s\n"
"ldr x21, [x17, #0x118]\n"
+ "fmla z31.s, p3/M, z2.s, z12.s\n"
"fmla z20.s, p3/M, z0.s, z11.s\n"
+ "fmla z24.s, p3/M, z6.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x16, LSL #2]\n"
"fmla z21.s, p3/M, z4.s, z10.s\n"
"fmla z22.s, p3/M, z3.s, z10.s\n"
- "fmla z19.s, p3/M, z8.s, z9.s\n"
- "fmla z31.s, p3/M, z5.s, z9.s\n"
- "fmla z28.s, p3/M, z6.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x27, x15, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x15, LSL #2]\n"
- "fmla z25.s, p3/M, z7.s, z10.s\n"
- "fmla z26.s, p3/M, z6.s, z10.s\n"
+ "fmla z27.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x27, x16, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z30.s, p3/M, z6.s, z10.s\n"
"fmla z20.s, p3/M, z5.s, z10.s\n"
- "fmla z27.s, p3/M, z5.s, z9.s\n"
+ "fmla z28.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x16, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z9.s\n"
"fmla z23.s, p3/M, z2.s, z9.s\n"
- "fmla z21.s, p3/M, z7.s, z12.s\n"
- "fmla z22.s, p3/M, z6.s, z12.s\n"
- "fmla z24.s, p3/M, z8.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z20.s, p3/M, z8.s, z12.s\n"
- "fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z6.s, z11.s\n"
- "fmla z21.s, p3/M, z5.s, z11.s\n"
- "fmla z22.s, p3/M, z4.s, z11.s\n"
- "fmla z23.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x20, x15, LSL #2]\n"
- "ldp x20, x25, [x17, #0x0]\n"
- "fmla z31.s, p3/M, z8.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z7.s, z11.s\n"
+ "fmla z22.s, p3/M, z6.s, z11.s\n"
+ "fmla z20.s, p3/M, z8.s, z11.s\n"
+ "fmla z29.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z12.s\n"
+ "fmla z27.s, p3/M, z8.s, z9.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z12.s\n"
+ "fmla z23.s, p3/M, z3.s, z12.s\n"
+ "fmla z21.s, p3/M, z5.s, z12.s\n"
+ "fmla z22.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
+ "ldp x20, x22, [x17, #0x0]\n"
"fmla z16.s, p3/M, z4.s, z10.s\n"
"fmla z17.s, p3/M, z3.s, z10.s\n"
- "fmla z18.s, p3/M, z5.s, z11.s\n"
- "ld1w { z9.s }, p0/Z, [x20, x16, LSL #2]\n"
- "fmla z19.s, p3/M, z4.s, z11.s\n"
- "fmla z21.s, p3/M, z8.s, z12.s\n"
- "fmla z22.s, p3/M, z7.s, z12.s\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x15, LSL #2]\n"
- "fmla z28.s, p3/M, z1.s, z10.s\n"
- "fmla z29.s, p3/M, z0.s, z10.s\n"
- "ld1w { z0.s }, p2/Z, [x21, x15, LSL #2]\n"
- "ldp x20, x24, [x17, #0x10]\n"
- "fmla z30.s, p3/M, z2.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z11.s\n"
- "incw x15\n"
- "ld1w { z11.s }, p0/Z, [x20, x16, LSL #2]\n"
+ "fmla z24.s, p3/M, z1.s, z10.s\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "fmla z18.s, p3/M, z5.s, z12.s\n"
+ "fmla z19.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z9.s }, p0/Z, [x20, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z8.s, z11.s\n"
+ "fmla z22.s, p3/M, z7.s, z11.s\n"
+ "fmla z23.s, p3/M, z6.s, z11.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z0.s }, p2/Z, [x23, x16, LSL #2]\n"
+ "ldp x21, x20, [x17, #0x10]\n"
+ "fmla z27.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z8.s, z10.s\n"
+ "incw x16\n"
".inst 0xc1afc9d0 // fclamp { z16.s-z19.s }, z14.s, z15.s\n"
+ "fmla z31.s, p3/M, z7.s, z10.s\n"
+ "whilelt p2.s, x16, %x[n_channels]\n"
+ "fmla z28.s, p3/M, z7.s, z0.s\n"
+ "fmla z29.s, p3/M, z6.s, z0.s\n"
+ "ld1w { z11.s }, p0/Z, [x21, x15, LSL #2]\n"
+ "fmla z20.s, p3/M, z4.s, z0.s\n"
+ "fmla z21.s, p3/M, z3.s, z0.s\n"
+ "ld1w { z12.s }, p0/Z, [x20, x15, LSL #2]\n"
+ ".inst 0xc1afc9d8 // fclamp { z24.s-z27.s }, z14.s, z15.s\n"
+ "fmla z22.s, p3/M, z5.s, z10.s\n"
+ ".inst 0xa040c100 // ld1w { z0.s-z3.s }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
"st1w { z16.s }, p1, [x12, x13, LSL #2]\n"
"ldr x23, [x14, #0x20]\n"
- "fmla z24.s, p3/M, z7.s, z12.s\n"
+ "fmla z23.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z10.s }, p0/Z, [x22, x15, LSL #2]\n"
"st1w { z17.s }, p1, [x11, x13, LSL #2]\n"
"ldr x22, [x14, #0x28]\n"
- "fmla z25.s, p3/M, z6.s, z12.s\n"
- "fmla z26.s, p3/M, z8.s, z0.s\n"
+ ".inst 0xc1afc9dc // fclamp { z28.s-z31.s }, z14.s, z15.s\n"
+ "incw x15\n"
"st1w { z18.s }, p1, [x10, x13, LSL #2]\n"
"ldr x21, [x14, #0x30]\n"
- "fmla z27.s, p3/M, z7.s, z0.s\n"
- ".inst 0xc1afc9dc // fclamp { z28.s-z31.s }, z14.s, z15.s\n"
+ ".inst 0xa040c104 // ld1w { z4.s-z7.s }, pn8.b/Z, [x8]\n"
+ "addvl x8, x8, #4\n"
"st1w { z19.s }, p1, [x9, x13, LSL #2]\n"
"ldr x20, [x14, #0x38]\n"
- "fmla z20.s, p3/M, z4.s, z12.s\n"
- "fmla z21.s, p3/M, z3.s, z12.s\n"
- "st1w { z28.s }, p1, [x23, x13, LSL #2]\n"
+ "cmp x15, %x[n_channels]\n"
+ "ld1w { z8.s }, p3/Z, [x8]\n"
+ "st1w { z24.s }, p1, [x23, x13, LSL #2]\n"
"ldr x23, [x14, #0x40]\n"
- "fmla z22.s, p3/M, z5.s, z0.s\n"
- "fmla z23.s, p3/M, z4.s, z0.s\n"
- "st1w { z29.s }, p1, [x22, x13, LSL #2]\n"
+ ".inst 0xc1afc9d4 // fclamp { z20.s-z23.s }, z14.s, z15.s\n"
+ "addvl x8, x8, #1\n"
+ "st1w { z25.s }, p1, [x22, x13, LSL #2]\n"
"ldr x22, [x14, #0x48]\n"
- ".inst 0xc1afc9d8 // fclamp { z24.s-z27.s }, z14.s, z15.s\n"
- "ld1w { z10.s }, p0/Z, [x25, x16, LSL #2]\n"
- "st1w { z30.s }, p1, [x21, x13, LSL #2]\n"
+ "st1w { z26.s }, p1, [x21, x13, LSL #2]\n"
"ldr x21, [x14, #0x50]\n"
- "ld1w { z12.s }, p0/Z, [x24, x16, LSL #2]\n"
- "incw x16\n"
- "st1w { z31.s }, p1, [x20, x13, LSL #2]\n"
+ "st1w { z27.s }, p1, [x20, x13, LSL #2]\n"
"ldr x20, [x14, #0x58]\n"
- ".inst 0xa040c100 // ld1w { z0.s-z3.s }, pn8.b/Z, [x8]\n"
- "addvl x8, x8, #4\n"
- "st1w { z24.s }, p1, [x23, x13, LSL #2]\n"
+ "st1w { z28.s }, p1, [x23, x13, LSL #2]\n"
"ldr x23, [x14, #0x60]\n"
- "whilelt p2.s, x15, %x[n_channels]\n"
- ".inst 0xa040c104 // ld1w { z4.s-z7.s }, pn8.b/Z, [x8]\n"
- "st1w { z25.s }, p1, [x22, x13, LSL #2]\n"
+ "st1w { z29.s }, p1, [x22, x13, LSL #2]\n"
"ldr x22, [x14, #0x68]\n"
- "addvl x8, x8, #4\n"
- "cmp x16, %x[n_channels]\n"
- "st1w { z26.s }, p1, [x21, x13, LSL #2]\n"
+ "st1w { z30.s }, p1, [x21, x13, LSL #2]\n"
"ldr x21, [x14, #0x70]\n"
- ".inst 0xc1afc9d4 // fclamp { z20.s-z23.s }, z14.s, z15.s\n"
- "ld1w { z8.s }, p3/Z, [x8]\n"
- "st1w { z27.s }, p1, [x20, x13, LSL #2]\n"
+ "st1w { z31.s }, p1, [x20, x13, LSL #2]\n"
"ldr x20, [x14, #0x78]\n"
- "addvl x8, x8, #1\n"
"st1w { z20.s }, p1, [x23, x13, LSL #2]\n"
"st1w { z21.s }, p1, [x22, x13, LSL #2]\n"
"st1w { z22.s }, p1, [x21, x13, LSL #2]\n"
"st1w { z23.s }, p1, [x20, x13, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z29, z13\n fmla z29.s, p3/M, z4.s, z9.s\n"
- "movprfx z20, z13\n fmla z20.s, p3/M, z8.s, z9.s\n"
+ "movprfx z21, z13\n fmla z21.s, p3/M, z4.s, z9.s\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z8.s, z9.s\n"
"ldr x24, [x17, #0x20]\n"
"incw x13\n"
- "movprfx z30, z13\n fmla z30.s, p3/M, z3.s, z9.s\n"
- "movprfx z25, z13\n fmla z25.s, p3/M, z1.s, z9.s\n"
+ "movprfx z22, z13\n fmla z22.s, p3/M, z3.s, z9.s\n"
+ "movprfx z29, z13\n fmla z29.s, p3/M, z1.s, z9.s\n"
"ldr x20, [x17, #0x30]\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z13\n fmla z26.s, p3/M, z0.s, z9.s\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z0.s, z9.s\n"
"ldr x23, [x17, #0x28]\n"
- "movprfx z21, z13\n fmla z21.s, p3/M, z7.s, z9.s\n"
- "movprfx z22, z13\n fmla z22.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z5.s, z12.s\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z7.s, z9.s\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z6.s, z9.s\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z5.s, z9.s\n"
"ldr x22, [x17, #0x38]\n"
- "movprfx z28, z13\n fmla z28.s, p3/M, z5.s, z9.s\n"
- "movprfx z24, z13\n fmla z24.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z5.s, z12.s\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x21, [x17, #0x40]\n"
- "fmla z20.s, p3/M, z0.s, z10.s\n"
- "movprfx z23, z13\n fmla z23.s, p3/M, z2.s, z11.s\n"
- "ld1w { z19.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z0.s, z10.s\n"
+ "movprfx z27, z13\n fmla z27.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x16, LSL #2]\n"
"ldr x20, [x17, #0x48]\n"
- "fmla z30.s, p3/M, z4.s, z12.s\n"
- "fmla z25.s, p3/M, z2.s, z12.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z22.s, p3/M, z4.s, z12.s\n"
+ "fmla z29.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z18.s }, p2/Z, [x23, x16, LSL #2]\n"
"ldr x27, [x17, #0x50]\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "fmla z21.s, p3/M, z8.s, z12.s\n"
+ "fmla z30.s, p3/M, z1.s, z12.s\n"
+ "fmla z25.s, p3/M, z8.s, z12.s\n"
"ldr x26, [x17, #0x60]\n"
- "fmla z22.s, p3/M, z7.s, z12.s\n"
- "movprfx z16, z13\n fmla z16.s, p3/M, z6.s, z19.s\n"
- "ld1w { z11.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z12.s\n"
+ "movprfx z16, z13\n fmla z16.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x25, [x17, #0x68]\n"
- "fmla z29.s, p3/M, z7.s, z9.s\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
+ "fmla z21.s, p3/M, z7.s, z9.s\n"
+ "fmla z27.s, p3/M, z6.s, z12.s\n"
"ldr x20, [x17, #0x58]\n"
- "movprfx z31, z13\n fmla z31.s, p3/M, z3.s, z12.s\n"
- "movprfx z27, z13\n fmla z27.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z3.s, z12.s\n"
+ "movprfx z31, z13\n fmla z31.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x22, x16, LSL #2]\n"
"ldr x24, [x17, #0x70]\n"
- "movprfx z19, z13\n fmla z19.s, p3/M, z8.s, z17.s\n"
- "fmla z30.s, p3/M, z6.s, z9.s\n"
- "ld1w { z10.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "movprfx z19, z13\n fmla z19.s, p3/M, z8.s, z18.s\n"
+ "fmla z22.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z12.s }, p2/Z, [x21, x16, LSL #2]\n"
"ldr x23, [x17, #0x78]\n"
- "fmla z25.s, p3/M, z4.s, z9.s\n"
- "fmla z26.s, p3/M, z3.s, z9.s\n"
+ "fmla z29.s, p3/M, z4.s, z9.s\n"
+ "fmla z30.s, p3/M, z3.s, z9.s\n"
"ldr x22, [x17, #0x80]\n"
"movprfx z17, z13\n fmla z17.s, p3/M, z1.s, z9.s\n"
"movprfx z18, z13\n fmla z18.s, p3/M, z0.s, z9.s\n"
"ldr x21, [x17, #0x88]\n"
- "fmla z28.s, p3/M, z8.s, z9.s\n"
- "fmla z24.s, p3/M, z5.s, z9.s\n"
+ "fmla z20.s, p3/M, z8.s, z9.s\n"
+ "fmla z28.s, p3/M, z5.s, z9.s\n"
"ldr x12, [x14, #0x0]\n"
"fmla z16.s, p3/M, z2.s, z9.s\n"
- "fmla z20.s, p3/M, z1.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z1.s, z10.s\n"
+ "ld1w { z13.s }, p2/Z, [x27, x16, LSL #2]\n"
"ldr x27, [x17, #0x90]\n"
- "fmla z21.s, p3/M, z0.s, z12.s\n"
- "fmla z22.s, p3/M, z2.s, z10.s\n"
- "ld1w { z13.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x20, [x17, #0x98]\n"
- "fmla z29.s, p3/M, z8.s, z11.s\n"
- "fmla z23.s, p3/M, z1.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z8.s, z11.s\n"
+ "fmla z27.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x16, LSL #2]\n"
"ldr x26, [x17, #0xa0]\n"
- "fmla z30.s, p3/M, z7.s, z11.s\n"
- "fmla z31.s, p3/M, z6.s, z11.s\n"
+ "fmla z22.s, p3/M, z7.s, z11.s\n"
+ "fmla z23.s, p3/M, z6.s, z11.s\n"
"ldr x11, [x14, #0x8]\n"
- "fmla z25.s, p3/M, z5.s, z11.s\n"
- "fmla z26.s, p3/M, z4.s, z11.s\n"
+ "fmla z29.s, p3/M, z5.s, z11.s\n"
+ "fmla z30.s, p3/M, z4.s, z11.s\n"
"ldr x10, [x14, #0x10]\n"
- "fmla z27.s, p3/M, z3.s, z11.s\n"
+ "fmla z31.s, p3/M, z3.s, z11.s\n"
"fmla z17.s, p3/M, z2.s, z11.s\n"
"ldr x9, [x14, #0x18]\n"
"fmla z18.s, p3/M, z1.s, z11.s\n"
"fmla z19.s, p3/M, z0.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x16, LSL #2]\n"
"ldr x25, [x17, #0xa8]\n"
- "fmla z20.s, p3/M, z3.s, z9.s\n"
- "fmla z28.s, p3/M, z0.s, z9.s\n"
- "fmla z24.s, p3/M, z6.s, z12.s\n"
- "fmla z16.s, p3/M, z3.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z3.s, z13.s\n"
+ "fmla z20.s, p3/M, z0.s, z13.s\n"
+ "fmla z28.s, p3/M, z6.s, z9.s\n"
+ "fmla z16.s, p3/M, z3.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x24, x16, LSL #2]\n"
"ldr x24, [x17, #0xb0]\n"
- "fmla z21.s, p3/M, z4.s, z10.s\n"
- "fmla z22.s, p3/M, z3.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z10.s\n"
- "fmla z23.s, p3/M, z5.s, z13.s\n"
- "fmla z31.s, p3/M, z2.s, z13.s\n"
- "fmla z30.s, p3/M, z0.s, z10.s\n"
- "ld1w { z13.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z4.s, z11.s\n"
+ "fmla z26.s, p3/M, z3.s, z11.s\n"
+ "fmla z21.s, p3/M, z1.s, z11.s\n"
+ "fmla z27.s, p3/M, z5.s, z10.s\n"
+ "fmla z23.s, p3/M, z2.s, z10.s\n"
+ "fmla z22.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z10.s }, p2/Z, [x23, x16, LSL #2]\n"
"ldr x23, [x17, #0xb8]\n"
- "fmla z27.s, p3/M, z8.s, z12.s\n"
- "fmla z19.s, p3/M, z5.s, z12.s\n"
- "ld1w { z9.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z31.s, p3/M, z8.s, z9.s\n"
+ "fmla z19.s, p3/M, z5.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x16, LSL #2]\n"
"ldr x22, [x17, #0xc0]\n"
- "fmla z20.s, p3/M, z5.s, z10.s\n"
- "fmla z28.s, p3/M, z2.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z5.s, z11.s\n"
+ "fmla z20.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x16, LSL #2]\n"
"ldr x21, [x17, #0xc8]\n"
- "fmla z21.s, p3/M, z5.s, z13.s\n"
- "fmla z22.s, p3/M, z4.s, z13.s\n"
- "fmla z29.s, p3/M, z2.s, z13.s\n"
- "fmla z23.s, p3/M, z3.s, z13.s\n"
- "fmla z30.s, p3/M, z1.s, z13.s\n"
- "fmla z31.s, p3/M, z0.s, z13.s\n"
- "ld1w { z10.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z10.s\n"
+ "fmla z26.s, p3/M, z4.s, z10.s\n"
+ "fmla z21.s, p3/M, z2.s, z10.s\n"
+ "fmla z27.s, p3/M, z3.s, z10.s\n"
+ "fmla z22.s, p3/M, z1.s, z10.s\n"
+ "fmla z23.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x28, [x17, #0xd8]\n"
"fmla z16.s, p3/M, z7.s, z9.s\n"
"fmla z17.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x27, x16, LSL #2]\n"
"ldr x20, [x17, #0xd0]\n"
- "fmla z20.s, p3/M, z7.s, z12.s\n"
- "fmla z21.s, p3/M, z6.s, z12.s\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z29.s, p3/M, z3.s, z12.s\n"
- "fmla z24.s, p3/M, z1.s, z12.s\n"
- "fmla z25.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z11.s\n"
+ "fmla z25.s, p3/M, z6.s, z11.s\n"
+ "fmla z20.s, p3/M, z4.s, z11.s\n"
+ "fmla z21.s, p3/M, z3.s, z11.s\n"
+ "fmla z28.s, p3/M, z1.s, z11.s\n"
+ "fmla z29.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x16, LSL #2]\n"
"ldr x27, [x17, #0xe0]\n"
- "fmla z22.s, p3/M, z8.s, z10.s\n"
- "fmla z18.s, p3/M, z8.s, z11.s\n"
- "fmla z19.s, p3/M, z7.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z1.s, z10.s\n"
+ "fmla z26.s, p3/M, z8.s, z12.s\n"
+ "fmla z18.s, p3/M, z8.s, z13.s\n"
+ "fmla z19.s, p3/M, z7.s, z13.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x16, LSL #2]\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
"ldr x26, [x17, #0xe8]\n"
- "fmla z23.s, p3/M, z7.s, z10.s\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "fmla z26.s, p3/M, z2.s, z10.s\n"
- "ld1w { z9.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z27.s, p3/M, z7.s, z12.s\n"
+ "fmla z22.s, p3/M, z5.s, z12.s\n"
+ "fmla z23.s, p3/M, z4.s, z12.s\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x16, LSL #2]\n"
"ldr x25, [x17, #0xf0]\n"
- "fmla z20.s, p3/M, z2.s, z12.s\n"
- "fmla z21.s, p3/M, z1.s, z12.s\n"
- "fmla z22.s, p3/M, z0.s, z12.s\n"
- "fmla z28.s, p3/M, z7.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z2.s, z11.s\n"
+ "fmla z25.s, p3/M, z1.s, z11.s\n"
+ "fmla z26.s, p3/M, z0.s, z11.s\n"
+ "fmla z20.s, p3/M, z7.s, z10.s\n"
+ "ld1w { z13.s }, p2/Z, [x23, x16, LSL #2]\n"
"ldr x24, [x17, #0xf8]\n"
- "fmla z29.s, p3/M, z6.s, z11.s\n"
- "fmla z24.s, p3/M, z4.s, z11.s\n"
- "fmla z25.s, p3/M, z3.s, z11.s\n"
- "fmla z16.s, p3/M, z1.s, z11.s\n"
- "fmla z17.s, p3/M, z0.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x22, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z10.s\n"
+ "fmla z21.s, p3/M, z6.s, z10.s\n"
+ "fmla z28.s, p3/M, z4.s, z10.s\n"
+ "fmla z29.s, p3/M, z3.s, z10.s\n"
+ "fmla z16.s, p3/M, z1.s, z10.s\n"
+ "fmla z17.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z25.s, p3/M, z2.s, z12.s\n"
"ldr x23, [x17, #0x100]\n"
- "fmla z18.s, p3/M, z2.s, z10.s\n"
- "fmla z21.s, p3/M, z2.s, z9.s\n"
- "fmla z22.s, p3/M, z1.s, z9.s\n"
- "fmla z23.s, p3/M, z0.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z1.s, z12.s\n"
+ "fmla z27.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x21, x16, LSL #2]\n"
"ldr x22, [x17, #0x108]\n"
- "fmla z20.s, p3/M, z6.s, z12.s\n"
- "fmla z28.s, p3/M, z3.s, z12.s\n"
- "fmla z24.s, p3/M, z0.s, z12.s\n"
- "fmla z30.s, p3/M, z8.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z6.s, z13.s\n"
+ "fmla z20.s, p3/M, z3.s, z13.s\n"
+ "fmla z31.s, p3/M, z4.s, z9.s\n"
+ "fmla z18.s, p3/M, z2.s, z9.s\n"
+ "fmla z28.s, p3/M, z0.s, z13.s\n"
+ "fmla z22.s, p3/M, z8.s, z9.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x16, LSL #2]\n"
"ldr x21, [x17, #0x110]\n"
- "fmla z31.s, p3/M, z7.s, z10.s\n"
- "fmla z26.s, p3/M, z5.s, z10.s\n"
- "fmla z19.s, p3/M, z1.s, z10.s\n"
- "ld1w { z9.s }, p2/Z, [x28, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z2.s, z11.s\n"
+ "fmla z23.s, p3/M, z7.s, z9.s\n"
+ "fmla z30.s, p3/M, z5.s, z9.s\n"
+ "fmla z19.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x16, LSL #2]\n"
+ "fmla z27.s, p3/M, z8.s, z12.s\n"
"ldr x20, [x17, #0x118]\n"
- "fmla z16.s, p3/M, z0.s, z12.s\n"
- "fmla z17.s, p3/M, z4.s, z9.s\n"
- "fmla z18.s, p3/M, z3.s, z9.s\n"
- "fmla z23.s, p3/M, z8.s, z11.s\n"
- "fmla z31.s, p3/M, z5.s, z11.s\n"
- "fmla z28.s, p3/M, z6.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x27, x15, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x26, x15, LSL #2]\n"
- "fmla z25.s, p3/M, z7.s, z9.s\n"
- "fmla z26.s, p3/M, z6.s, z9.s\n"
- "fmla z16.s, p3/M, z5.s, z9.s\n"
- "fmla z27.s, p3/M, z5.s, z10.s\n"
- "fmla z19.s, p3/M, z2.s, z10.s\n"
- "fmla z17.s, p3/M, z7.s, z12.s\n"
- "fmla z18.s, p3/M, z6.s, z12.s\n"
- "fmla z24.s, p3/M, z8.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z16.s, p3/M, z8.s, z12.s\n"
- "fmla z25.s, p3/M, z8.s, z11.s\n"
- "fmla z26.s, p3/M, z7.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z6.s, z11.s\n"
- "fmla z17.s, p3/M, z5.s, z11.s\n"
- "fmla z18.s, p3/M, z4.s, z11.s\n"
- "fmla z19.s, p3/M, z3.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x22, x15, LSL #2]\n"
- "fmla z31.s, p3/M, z8.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x15, LSL #2]\n"
- "fmla z20.s, p3/M, z4.s, z9.s\n"
- "fmla z21.s, p3/M, z3.s, z9.s\n"
- "fmla z22.s, p3/M, z5.s, z11.s\n"
- "fmla z23.s, p3/M, z4.s, z11.s\n"
- "fmla z17.s, p3/M, z8.s, z12.s\n"
- "fmla z18.s, p3/M, z7.s, z12.s\n"
- "fmla z19.s, p3/M, z6.s, z12.s\n"
- "ld1w { z13.s }, p2/Z, [x21, x15, LSL #2]\n"
- "fmla z28.s, p3/M, z1.s, z9.s\n"
- "fmla z29.s, p3/M, z0.s, z9.s\n"
- "ld1w { z0.s }, p2/Z, [x20, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z2.s, z11.s\n"
- "fmla z31.s, p3/M, z1.s, z11.s\n"
+ "fmla z31.s, p3/M, z2.s, z12.s\n"
+ "fmla z16.s, p3/M, z0.s, z11.s\n"
+ "fmla z20.s, p3/M, z6.s, z11.s\n"
+ "fmla z28.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x16, LSL #2]\n"
+ "fmla z17.s, p3/M, z4.s, z10.s\n"
+ "fmla z18.s, p3/M, z3.s, z10.s\n"
+ "fmla z23.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z13.s }, p2/Z, [x27, x16, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z10.s\n"
+ "fmla z30.s, p3/M, z6.s, z10.s\n"
+ "fmla z16.s, p3/M, z5.s, z10.s\n"
+ "fmla z28.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x25, x16, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z13.s\n"
+ "fmla z19.s, p3/M, z2.s, z13.s\n"
+ "fmla z17.s, p3/M, z7.s, z9.s\n"
+ "fmla z18.s, p3/M, z6.s, z9.s\n"
+ "fmla z16.s, p3/M, z8.s, z9.s\n"
+ "fmla z29.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x23, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z10.s\n"
+ "fmla z23.s, p3/M, z8.s, z13.s\n"
+ "ld1w { z13.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z10.s\n"
+ "fmla z19.s, p3/M, z3.s, z10.s\n"
+ "fmla z17.s, p3/M, z5.s, z10.s\n"
+ "fmla z18.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z11.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z12.s\n"
+ "fmla z25.s, p3/M, z3.s, z12.s\n"
+ "fmla z20.s, p3/M, z1.s, z12.s\n"
+ "fmla z21.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x20, x16, LSL #2]\n"
+ "fmla z26.s, p3/M, z5.s, z11.s\n"
+ "fmla z27.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z8.s, z13.s\n"
+ "fmla z18.s, p3/M, z7.s, z13.s\n"
+ "fmla z19.s, p3/M, z6.s, z13.s\n"
+ "fmla z22.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z13.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "fmla z23.s, p3/M, z1.s, z11.s\n"
+ "fmla z30.s, p3/M, z8.s, z9.s\n"
+ ".inst 0xc1afc9d8 // fclamp { z24.s-z27.s }, z14.s, z15.s\n"
+ "fmla z31.s, p3/M, z7.s, z9.s\n"
+ "fmla z28.s, p3/M, z7.s, z13.s\n"
+ "fmla z29.s, p3/M, z6.s, z13.s\n"
+ "fmla z16.s, p3/M, z4.s, z13.s\n"
+ "fmla z17.s, p3/M, z3.s, z13.s\n"
".inst 0xc1afc9d4 // fclamp { z20.s-z23.s }, z14.s, z15.s\n"
- "st1w { z20.s }, p0, [x12, x13, LSL #2]\n"
+ "fmla z18.s, p3/M, z5.s, z9.s\n"
+ "st1w { z24.s }, p0, [x12, x13, LSL #2]\n"
"ldr x23, [x14, #0x20]\n"
- "fmla z24.s, p3/M, z7.s, z13.s\n"
- "st1w { z21.s }, p0, [x11, x13, LSL #2]\n"
+ "fmla z19.s, p3/M, z4.s, z9.s\n"
+ "st1w { z25.s }, p0, [x11, x13, LSL #2]\n"
"ldr x22, [x14, #0x28]\n"
- "fmla z25.s, p3/M, z6.s, z13.s\n"
- "fmla z26.s, p3/M, z8.s, z0.s\n"
- "st1w { z22.s }, p0, [x10, x13, LSL #2]\n"
- "ldr x21, [x14, #0x30]\n"
- "fmla z27.s, p3/M, z7.s, z0.s\n"
".inst 0xc1afc9dc // fclamp { z28.s-z31.s }, z14.s, z15.s\n"
- "st1w { z23.s }, p0, [x9, x13, LSL #2]\n"
+ "st1w { z26.s }, p0, [x10, x13, LSL #2]\n"
+ "ldr x21, [x14, #0x30]\n"
+ "st1w { z27.s }, p0, [x9, x13, LSL #2]\n"
"ldr x20, [x14, #0x38]\n"
- "fmla z16.s, p3/M, z4.s, z13.s\n"
- "fmla z17.s, p3/M, z3.s, z13.s\n"
- "st1w { z28.s }, p0, [x23, x13, LSL #2]\n"
+ "st1w { z20.s }, p0, [x23, x13, LSL #2]\n"
"ldr x23, [x14, #0x40]\n"
- "fmla z18.s, p3/M, z5.s, z0.s\n"
- "fmla z19.s, p3/M, z4.s, z0.s\n"
- "st1w { z29.s }, p0, [x22, x13, LSL #2]\n"
- "ldr x22, [x14, #0x48]\n"
- ".inst 0xc1afc9d8 // fclamp { z24.s-z27.s }, z14.s, z15.s\n"
".inst 0xc1afc9d0 // fclamp { z16.s-z19.s }, z14.s, z15.s\n"
- "st1w { z30.s }, p0, [x21, x13, LSL #2]\n"
+ "st1w { z21.s }, p0, [x22, x13, LSL #2]\n"
+ "ldr x22, [x14, #0x48]\n"
+ "st1w { z22.s }, p0, [x21, x13, LSL #2]\n"
"ldr x21, [x14, #0x50]\n"
- "st1w { z31.s }, p0, [x20, x13, LSL #2]\n"
+ "st1w { z23.s }, p0, [x20, x13, LSL #2]\n"
"ldr x20, [x14, #0x58]\n"
- "st1w { z24.s }, p0, [x23, x13, LSL #2]\n"
+ "st1w { z28.s }, p0, [x23, x13, LSL #2]\n"
"ldr x23, [x14, #0x60]\n"
- "st1w { z25.s }, p0, [x22, x13, LSL #2]\n"
+ "st1w { z29.s }, p0, [x22, x13, LSL #2]\n"
"ldr x22, [x14, #0x68]\n"
- "st1w { z26.s }, p0, [x21, x13, LSL #2]\n"
+ "st1w { z30.s }, p0, [x21, x13, LSL #2]\n"
"ldr x21, [x14, #0x70]\n"
- "st1w { z27.s }, p0, [x20, x13, LSL #2]\n"
+ "st1w { z31.s }, p0, [x20, x13, LSL #2]\n"
"ldr x20, [x14, #0x78]\n"
"st1w { z16.s }, p0, [x23, x13, LSL #2]\n"
"st1w { z17.s }, p0, [x22, x13, LSL #2]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 066b935486..131a8eec01 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,78 +88,78 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ptrue p3.b\n"
- ".inst 0x25207810 // ptrue pn8.b\n"
"mov x2, #0x0\n"
"mov x3, #0x0\n"
+ "ptrue p3.b\n"
+ ".inst 0x25207810 // ptrue pn8.b\n"
"1:" // Tile loop
"str x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
"mov x22, #0x4\n"
"str x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
"ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x20, x2, x21\n" // offset = tile_i * ld_input_row
+ "ldr x6, [%x[params_struct], %[offsetof_args_params]]\n"
"madd x20, x3, x4, x20\n" // offset += tile_j * ld_input_col
+ "add x7, x4, x4\n"
"mul x20, x20, x22\n" // offset *= kernel_stride * output_size
- "ldr x5, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "add x8, x7, x4\n"
"add x5, x5, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x6, x5, x21, LSL #2\n"
- "add x7, x6, x21, LSL #2\n"
- "add x8, x4, x4\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x16, x7, x21, LSL #2\n"
- "add x15, x8, x4\n"
- "add x14, x16, x21, LSL #2\n"
- "add x13, x15, x4\n"
+ "add x17, x8, x4\n"
+ "add x16, x5, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
+ "add x13, x14, x21, LSL #2\n"
"cbnz x3, 2f\n"
"ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "sub x21, x20, x3\n"
- "sub x21, x21, #0x1\n"
"lsl x12, %x[n_channels], #0x2\n"
- "mov x20, #0x10\n"
- "and x21, x21, #0x3fffff\n"
- "mul x20, x20, x4\n"
- "orr x12, x12, x21, LSL #22\n"
- "orr x12, x12, x20, LSL #38\n"
- "add x27, x7, x8, LSL #2\n"
+ "mov x28, #0x10\n"
+ "mul x28, x28, x4\n"
+ "add x27, x15, x7, LSL #2\n"
"add x26, x5, x4, LSL #2\n"
- "add x25, x5, x15, LSL #2\n"
- "add x24, x5, x13, LSL #2\n"
- "add x23, x6, x4, LSL #2\n"
- "add x22, x5, x8, LSL #2\n"
- "add x21, x6, x15, LSL #2\n"
- "add x20, x6, x13, LSL #2\n"
- "add x11, x6, x8, LSL #2\n"
- "add x10, x16, x4, LSL #2\n"
- "add x9, x7, x4, LSL #2\n"
- "add x28, x16, x15, LSL #2\n"
+ "add x25, x5, x8, LSL #2\n"
+ "sub x20, x20, x3\n"
+ "add x24, x5, x17, LSL #2\n"
+ "sub x20, x20, #0x1\n"
+ "add x23, x16, x4, LSL #2\n"
+ "and x20, x20, #0x3fffff\n"
+ "add x22, x5, x7, LSL #2\n"
+ "orr x12, x12, x20, LSL #22\n"
+ "add x21, x16, x8, LSL #2\n"
+ "orr x12, x12, x28, LSL #38\n"
+ "add x20, x16, x17, LSL #2\n"
+ "add x11, x16, x7, LSL #2\n"
+ "add x10, x14, x4, LSL #2\n"
+ "add x9, x15, x4, LSL #2\n"
+ "add x28, x14, x8, LSL #2\n"
".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
- "add x27, x7, x15, LSL #2\n"
+ "add x27, x15, x8, LSL #2\n"
".inst 0xf8ac48ba // rprfm pldonce, x12, [x5]\n"
".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- "add x26, x16, x13, LSL #2\n"
+ "add x26, x14, x17, LSL #2\n"
".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
- "add x25, x7, x13, LSL #2\n"
+ "add x25, x15, x17, LSL #2\n"
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
- "add x24, x14, x4, LSL #2\n"
- ".inst 0xf8ac48da // rprfm pldonce, x12, [x6]\n"
+ "add x24, x13, x4, LSL #2\n"
+ ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
- "add x23, x16, x8, LSL #2\n"
+ "add x23, x14, x7, LSL #2\n"
".inst 0xf8ac4ada // rprfm pldonce, x12, [x22]\n"
- "add x22, x14, x15, LSL #2\n"
+ "add x22, x13, x8, LSL #2\n"
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
- "add x21, x14, x8, LSL #2\n"
+ "add x21, x13, x7, LSL #2\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
- "add x20, x14, x13, LSL #2\n"
+ "add x20, x13, x17, LSL #2\n"
".inst 0xf8ac497a // rprfm pldonce, x12, [x11]\n"
- ".inst 0xf8ac4a1a // rprfm pldonce, x12, [x16]\n"
- ".inst 0xf8ac48fa // rprfm pldonce, x12, [x7]\n"
+ ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
+ ".inst 0xf8ac49fa // rprfm pldonce, x12, [x15]\n"
".inst 0xf8ac495a // rprfm pldonce, x12, [x10]\n"
".inst 0xf8ac493a // rprfm pldonce, x12, [x9]\n"
".inst 0xf8ac4b9a // rprfm pldonce, x12, [x28]\n"
".inst 0xf8ac4b7a // rprfm pldonce, x12, [x27]\n"
".inst 0xf8ac4b5a // rprfm pldonce, x12, [x26]\n"
- ".inst 0xf8ac49da // rprfm pldonce, x12, [x14]\n"
+ ".inst 0xf8ac49ba // rprfm pldonce, x12, [x13]\n"
".inst 0xf8ac4b3a // rprfm pldonce, x12, [x25]\n"
".inst 0xf8ac4b1a // rprfm pldonce, x12, [x24]\n"
".inst 0xf8ac4afa // rprfm pldonce, x12, [x23]\n"
@@ -167,199 +167,199 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
".inst 0xf8ac4aba // rprfm pldonce, x12, [x21]\n"
".inst 0xf8ac4a9a // rprfm pldonce, x12, [x20]\n"
"2:" // Tile loop: Prefetch input rows: End
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x2, x22\n" // offset = tile_i * ld_output_row
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"mov x20, #0x2\n"
- "ld1w { z22.s }, p3/Z, [x17]\n"
- "ldr x25, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "madd x21, x3, x25, x21\n" // offset += tile_j * ld_output_col
- "addvl x17, x17, #1\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
+ "ld1w { z28.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
+ "ldr x26, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
+ "cntw x25\n"
+ ".inst 0xa040c0c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
"ldr x24, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "mul x21, x21, x20\n" // offset *= output_tile_size
- "cntw x23\n"
- "ld1rw { z26.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "addvl x17, x17, #4\n"
- "add x24, x24, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "addvl x17, x17, #4\n"
- "ld1rw { z24.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x23, %x[n_channels]\n"
- "add x22, x24, x22, LSL #2\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
+ ".inst 0xa040c0c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "mul x22, x2, x23\n" // offset = tile_i * ld_output_row
+ "cmp x25, %x[n_channels]\n"
+ "ld1rw { z30.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "madd x22, x3, x26, x22\n" // offset += tile_j * ld_output_col
+ "ld1rw { z31.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"mov x21, #0x0\n"
- "sub x20, XZR, x23\n"
- "ld1w { z9.s }, p2/Z, [x7, x8, LSL #2]\n"
+ "mul x22, x22, x20\n" // offset *= output_tile_size
+ "sub x20, XZR, x25\n"
+ "ld1w { z8.s }, p3/Z, [x6]\n"
+ "add x24, x24, x22, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "ld1w { z9.s }, p2/Z, [x15, x7, LSL #2]\n"
+ "addvl x6, x6, #1\n"
+ "add x23, x24, x23, LSL #2\n"
"ld1w { z10.s }, p2/Z, [x5]\n"
- "addvl x17, x17, #1\n"
"ld1w { z11.s }, p2/Z, [x5, x4, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x5, x15, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x5, x13, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x6]\n"
- "ld1w { z15.s }, p2/Z, [x6, x4, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x5, x8, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x5, x8, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x5, x17, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x16]\n"
+ "ld1w { z15.s }, p2/Z, [x16, x4, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x5, x7, LSL #2]\n"
"bge 4f\n"
"3:" // Tile loop: Channel loop
- "movprfx z28, z22\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z22\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "whilelt p1.s, x23, %x[n_channels]\n"
+ "movprfx z24, z28\n fmla z24.s, p3/M, z8.s, z9.s\n"
+ "movprfx z25, z28\n fmla z25.s, p3/M, z6.s, z9.s\n"
+ "whilelt p1.s, x25, %x[n_channels]\n"
"incw x21\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z18.s }, p2/Z, [x6, x13, LSL #2]\n"
- "incw x23\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z27.s }, p2/Z, [x6, x15, LSL #2]\n"
+ "movprfx z26, z28\n fmla z26.s, p3/M, z2.s, z9.s\n"
+ "movprfx z27, z28\n fmla z27.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z28.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
+ "incw x25\n"
"mov p0.b, p2.b\n"
- "fmla z28.s, p3/M, z3.s, z14.s\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x6, x8, LSL #2]\n"
"addvl x5, x5, #1\n"
- "fmla z28.s, p3/M, z4.s, z15.s\n"
- "fmla z29.s, p3/M, z4.s, z27.s\n"
- "ld1w { z25.s }, p2/Z, [x16]\n"
- "addvl x6, x6, #1\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z18.s\n"
- "ld1w { z12.s }, p2/Z, [x7]\n"
"incw x20\n"
- "movprfx z30, z22\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z22\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z18.s }, p2/Z, [x7, x15, LSL #2]\n"
- "fmla z28.s, p3/M, z5.s, z17.s\n"
- "fmla z29.s, p3/M, z3.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x16, x15, LSL #2]\n"
- "fmla z30.s, p3/M, z3.s, z25.s\n"
- "fmla z31.s, p3/M, z4.s, z16.s\n"
- "ld1w { z10.s }, p2/Z, [x16, x4, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z12.s\n"
- "fmla z31.s, p3/M, z1.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x16, x13, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z10.s\n"
- "fmla z31.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x7, x4, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z12.s\n"
- "ld1w { z22.s }, p2/Z, [x7, x13, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "addvl x7, x7, #1\n"
- "fmla z31.s, p3/M, z2.s, z22.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14]\n"
- "ld1w { z17.s }, p2/Z, [x16, x8, LSL #2]\n"
- "fmla z30.s, p3/M, z6.s, z16.s\n"
- "fmla z31.s, p3/M, z3.s, z17.s\n"
+ "fmla z24.s, p3/M, z0.s, z10.s\n"
+ "fmla z25.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z18.s }, p2/Z, [x16, x17, LSL #2]\n"
+ "ld1w { z10.s }, p1/Z, [x5]\n"
+ "fmla z24.s, p3/M, z1.s, z11.s\n"
+ "fmla z25.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z9.s }, p2/Z, [x16, x8, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x16, x7, LSL #2]\n"
"addvl x16, x16, #1\n"
- "ld1w { z16.s }, p2/Z, [x14, x4, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "fmla z29.s, p3/M, z7.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x15, LSL #2]\n"
- "fmla z31.s, p3/M, z7.s, z16.s\n"
- "fmla z30.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x14, x8, LSL #2]\n"
- "fmla z31.s, p3/M, z6.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z22.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x13, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z17.s\n"
- "fmla z31.s, p3/M, z8.s, z16.s\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "ld1w { z22.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
- "cmp x23, %x[n_channels]\n"
- ".inst 0xc1b8cb5c // fclamp { z28.s-z31.s }, z26.s, z24.s\n"
- ".inst 0xa040c220 // ld1w { z0.s-z3.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
+ "fmla z24.s, p3/M, z3.s, z14.s\n"
+ "fmla z25.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x14]\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x14, x17, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z15.s\n"
+ "fmla z25.s, p3/M, z4.s, z9.s\n"
+ "ld1w { z19.s }, p2/Z, [x15]\n"
+ "ld1w { z17.s }, p2/Z, [x14, x4, LSL #2]\n"
+ "fmla z26.s, p3/M, z0.s, z19.s\n"
+ "fmla z24.s, p3/M, z2.s, z16.s\n"
+ "fmla z25.s, p3/M, z5.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, x8, LSL #2]\n"
+ "ld1w { z0.s }, p2/Z, [x15, x4, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z17.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, x17, LSL #2]\n"
+ "addvl x15, x15, #1\n"
+ "fmla z24.s, p3/M, z5.s, z22.s\n"
+ "fmla z25.s, p3/M, z3.s, z22.s\n"
+ "ld1w { z16.s }, p2/Z, [x14, x8, LSL #2]\n"
+ "ld1w { z9.s }, p1/Z, [x15, x7, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z16.s\n"
+ "fmla z26.s, p3/M, z1.s, z0.s\n"
+ "ld1w { z17.s }, p2/Z, [x13, x4, LSL #2]\n"
+ "fmla z24.s, p3/M, z6.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x13]\n"
+ "fmla z25.s, p3/M, z7.s, z18.s\n"
+ "fmla z27.s, p3/M, z1.s, z18.s\n"
+ "fmla z26.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x13, x7, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z0.s\n"
+ "ld1w { z16.s }, p2/Z, [x14, x7, LSL #2]\n"
+ "fmla z25.s, p3/M, z8.s, z20.s\n"
"addvl x14, x14, #1\n"
- "st1w { z28.s }, p0, [x24]\n"
- ".inst 0xa040c224 // ld1w { z4.s-z7.s }, pn8.b/Z, [x17]\n"
- "addvl x17, x17, #4\n"
- "st1w { z29.s }, p0, [x24, x25, LSL #2]\n"
- "addvl x24, x24, #1\n"
- "ld1w { z9.s }, p1/Z, [x7, x8, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "ld1w { z10.s }, p1/Z, [x5]\n"
- "st1w { z31.s }, p0, [x22, x25, LSL #2]\n"
- "addvl x22, x22, #1\n"
+ "ld1w { z12.s }, p1/Z, [x5, x8, LSL #2]\n"
+ "fmla z27.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z18.s }, p2/Z, [x13, x8, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z13.s }, p1/Z, [x5, x17, LSL #2]\n"
+ "fmla z27.s, p3/M, z2.s, z20.s\n"
+ "fmla z26.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x13, x17, LSL #2]\n"
+ "whilelt p2.s, x21, %x[n_channels]\n"
+ "cmp x25, %x[n_channels]\n"
+ "addvl x13, x13, #1\n"
+ "fmla z27.s, p3/M, z3.s, z16.s\n"
+ "fmla z26.s, p3/M, z8.s, z19.s\n"
+ ".inst 0xa040c0c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "ld1w { z16.s }, p1/Z, [x5, x7, LSL #2]\n"
+ "fmla z27.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z14.s }, p1/Z, [x16]\n"
+ "fmla z27.s, p3/M, z6.s, z19.s\n"
+ ".inst 0xa040c0c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x6]\n"
+ "addvl x6, x6, #4\n"
+ "ld1w { z15.s }, p1/Z, [x16, x4, LSL #2]\n"
+ "fmla z27.s, p3/M, z8.s, z17.s\n"
"ld1w { z11.s }, p1/Z, [x5, x4, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x5, x15, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x5, x13, LSL #2]\n"
- "ld1w { z14.s }, p1/Z, [x6]\n"
- "ld1w { z15.s }, p1/Z, [x6, x4, LSL #2]\n"
- "ld1w { z16.s }, p1/Z, [x5, x8, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17]\n"
- "addvl x17, x17, #1\n"
+ "ld1w { z8.s }, p3/Z, [x6]\n"
+ "addvl x6, x6, #1\n"
+ ".inst 0xc1bfcbd8 // fclamp { z24.s-z27.s }, z30.s, z31.s\n"
+ "st1w { z24.s }, p0, [x24]\n"
+ "st1w { z25.s }, p0, [x24, x26, LSL #2]\n"
+ "addvl x24, x24, #1\n"
+ "st1w { z26.s }, p0, [x23]\n"
+ "st1w { z27.s }, p0, [x23, x26, LSL #2]\n"
+ "addvl x23, x23, #1\n"
"blt 3b\n"
"4:" // Tile loop: Channel tail
- "movprfx z28, z22\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z22\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "movprfx z24, z28\n fmla z24.s, p3/M, z8.s, z9.s\n"
+ "movprfx z25, z28\n fmla z25.s, p3/M, z6.s, z9.s\n"
"ldr x3, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "add x3, x3, #0x1\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z12.s\n"
- "ld1w { z18.s }, p2/Z, [x6, x13, LSL #2]\n"
+ "mov p0.b, p2.b\n"
+ "movprfx z26, z28\n fmla z26.s, p3/M, z2.s, z9.s\n"
+ "movprfx z27, z28\n fmla z27.s, p3/M, z0.s, z9.s\n"
"ldr x2, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "fmla z28.s, p3/M, z1.s, z11.s\n"
- "fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x6, x15, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "fmla z28.s, p3/M, z3.s, z14.s\n"
- "fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z20.s }, p2/Z, [x6, x8, LSL #2]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
"ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z28.s, p3/M, z4.s, z15.s\n"
- "fmla z29.s, p3/M, z4.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x16]\n"
- "cmp x3, x20\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z18.s\n"
- "ld1w { z18.s }, p2/Z, [x7]\n"
+ "add x3, x3, #0x1\n"
+ "fmla z24.s, p3/M, z0.s, z10.s\n"
+ "fmla z25.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z21.s }, p2/Z, [x16, x17, LSL #2]\n"
"add x20, x2, #0x1\n"
- "movprfx z30, z22\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z22\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ld1w { z19.s }, p2/Z, [x7, x15, LSL #2]\n"
+ "cmp x3, x22\n"
"csel x2, x2, x20, LT\n"
- "fmla z28.s, p3/M, z5.s, z20.s\n"
- "fmla z29.s, p3/M, z3.s, z20.s\n"
- "ld1w { z16.s }, p2/Z, [x16, x15, LSL #2]\n"
- "mov p0.b, p2.b\n"
- "fmla z30.s, p3/M, z3.s, z17.s\n"
- "fmla z31.s, p3/M, z4.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x16, x4, LSL #2]\n"
"csel x3, x3, XZR, LT\n"
- "fmla z30.s, p3/M, z0.s, z18.s\n"
- "fmla z31.s, p3/M, z1.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x16, x13, LSL #2]\n"
"cmp x2, x21\n"
- "fmla z30.s, p3/M, z4.s, z17.s\n"
- "fmla z31.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x7, x4, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "ld1w { z18.s }, p2/Z, [x7, x13, LSL #2]\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "fmla z31.s, p3/M, z2.s, z18.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14]\n"
- "ld1w { z17.s }, p2/Z, [x16, x8, LSL #2]\n"
- "fmla z30.s, p3/M, z6.s, z16.s\n"
- "fmla z31.s, p3/M, z3.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x4, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "fmla z29.s, p3/M, z7.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x15, LSL #2]\n"
- "fmla z31.s, p3/M, z7.s, z16.s\n"
- "fmla z30.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x14, x8, LSL #2]\n"
- "fmla z31.s, p3/M, z6.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x13, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z17.s\n"
- "fmla z31.s, p3/M, z8.s, z16.s\n"
- ".inst 0xc1b8cb5c // fclamp { z28.s-z31.s }, z26.s, z24.s\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x24, x25, LSL #2]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x22, x25, LSL #2]\n"
+ "fmla z24.s, p3/M, z1.s, z11.s\n"
+ "fmla z25.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z18.s }, p2/Z, [x16, x8, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x16, x7, LSL #2]\n"
+ "fmla z24.s, p3/M, z3.s, z14.s\n"
+ "fmla z25.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x14]\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z23.s }, p2/Z, [x14, x17, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z15.s\n"
+ "fmla z25.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z22.s }, p2/Z, [x15]\n"
+ "ld1w { z19.s }, p2/Z, [x14, x4, LSL #2]\n"
+ "fmla z26.s, p3/M, z0.s, z22.s\n"
+ "fmla z24.s, p3/M, z2.s, z16.s\n"
+ "fmla z25.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, x8, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x15, x4, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z19.s\n"
+ "ld1w { z21.s }, p2/Z, [x15, x17, LSL #2]\n"
+ "fmla z24.s, p3/M, z5.s, z20.s\n"
+ "fmla z25.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x14, x8, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z16.s\n"
+ "fmla z26.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z20.s }, p2/Z, [x13, x4, LSL #2]\n"
+ "fmla z24.s, p3/M, z6.s, z22.s\n"
+ "ld1w { z16.s }, p2/Z, [x13]\n"
+ "fmla z25.s, p3/M, z7.s, z18.s\n"
+ "fmla z27.s, p3/M, z1.s, z18.s\n"
+ "fmla z26.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x13, x7, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x14, x7, LSL #2]\n"
+ "fmla z25.s, p3/M, z8.s, z21.s\n"
+ "fmla z27.s, p3/M, z5.s, z23.s\n"
+ "ld1w { z17.s }, p2/Z, [x13, x8, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z20.s\n"
+ "fmla z27.s, p3/M, z2.s, z21.s\n"
+ "fmla z26.s, p3/M, z5.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x13, x17, LSL #2]\n"
+ "fmla z27.s, p3/M, z3.s, z18.s\n"
+ "fmla z26.s, p3/M, z8.s, z19.s\n"
+ "fmla z27.s, p3/M, z7.s, z17.s\n"
+ "fmla z27.s, p3/M, z6.s, z19.s\n"
+ "fmla z27.s, p3/M, z8.s, z16.s\n"
+ ".inst 0xc1bfcbd8 // fclamp { z24.s-z27.s }, z30.s, z31.s\n"
+ "st1w { z24.s }, p0, [x24]\n"
+ "st1w { z25.s }, p0, [x24, x26, LSL #2]\n"
+ "st1w { z26.s }, p0, [x23]\n"
+ "st1w { z27.s }, p0, [x23, x26, LSL #2]\n"
"blt 1b\n"
".inst 0xd503467f // SMSTOP\n"
:
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index dc7a40ff54..7ca4cafbe6 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -90,221 +90,221 @@ void sme2_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
"ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x15, #0x0\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
"ptrue p3.b\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ld1w { z26.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
- "ldp x14, x13, [x20, #0x0]\n"
- "cntw x12\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "cntw x13\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
- "ldp x28, x26, [x16, #0x0]\n"
- "addvl x15, x15, #4\n"
- "cmp x12, %x[n_channels]\n"
- "ld1rw { z25.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rw { z24.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
+ "cmp x13, %x[n_channels]\n"
+ "ld1rw { z27.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x10, XZR, x13\n"
+ "ldp x9, x28, [x20, #0x10]\n"
+ "ld1w { z23.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
+ "ldp x27, x26, [x16, #0x0]\n"
+ ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
"ldp x25, x24, [x16, #0x10]\n"
- "ld1rw { z24.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "sub x27, XZR, x12\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
"ldp x23, x22, [x16, #0x20]\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
+ "ld1w { z8.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"ldp x21, x20, [x16, #0x30]\n"
- "ld1w { z9.s }, p2/Z, [x28, x9, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ld1w { z15.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ld1w { z15.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z28, z26\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z26\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "ldr x21, [x16, #0x40]\n"
- "whilelt p1.s, x12, %x[n_channels]\n"
+ "movprfx z28, z23\n fmla z28.s, p3/M, z8.s, z9.s\n"
+ "movprfx z29, z23\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x25, [x16, #0x40]\n"
+ "whilelt p1.s, x13, %x[n_channels]\n"
+ "ldr x22, [x16, #0x48]\n"
+ "movprfx z30, z23\n fmla z30.s, p3/M, z2.s, z9.s\n"
+ "movprfx z31, z23\n fmla z31.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z23.s }, p3/Z, [x14]\n"
+ "ldr x21, [x16, #0x50]\n"
+ "addvl x14, x14, #1\n"
+ "incw x10\n"
+ "ldr x20, [x16, #0x58]\n"
+ "mov p0.b, p2.b\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z1.s, z12.s\n"
- "ldr x20, [x16, #0x48]\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "ldr x26, [x16, #0x68]\n"
+ "ldr x23, [x16, #0x88]\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z22.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x20, [x16, #0x50]\n"
+ "ld1w { z25.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ldr x22, [x16, #0x80]\n"
+ "ld1w { z19.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x21, [x16, #0x70]\n"
+ "ldr x25, [x16, #0x90]\n"
"fmla z28.s, p3/M, z3.s, z14.s\n"
"fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0x98]\n"
+ "fmla z30.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z22.s }, p2/Z, [x23, x15, LSL #2]\n"
"fmla z28.s, p3/M, z4.s, z15.s\n"
- "fmla z29.s, p3/M, z4.s, z22.s\n"
- "ldr x21, [x16, #0x78]\n"
- "ld1w { z23.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z25.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "ld1w { z17.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z30.s, p3/M, z0.s, z18.s\n"
"fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z18.s\n"
- "ldr x20, [x16, #0x60]\n"
- "ld1w { z13.s }, p2/Z, [x20, x9, LSL #2]\n"
- "movprfx z30, z26\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z26\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ldr x20, [x16, #0x80]\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z5.s, z17.s\n"
- "fmla z29.s, p3/M, z3.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x21, [x16, #0x68]\n"
- "fmla z30.s, p3/M, z3.s, z23.s\n"
- "fmla z31.s, p3/M, z4.s, z16.s\n"
- "ldr x20, [x16, #0x88]\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z13.s\n"
- "fmla z31.s, p3/M, z1.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x16, #0x70]\n"
- "ldr x20, [x16, #0x98]\n"
+ "fmla z29.s, p3/M, z5.s, z20.s\n"
+ "ld1w { z25.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldr x22, [x16, #0xc0]\n"
+ "ld1w { z15.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x21, [x16, #0xb0]\n"
"fmla z30.s, p3/M, z4.s, z17.s\n"
- "fmla z31.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z13.s\n"
- "ld1w { z4.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x16, #0x90]\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "ldr x20, [x16, #0xa8]\n"
- "fmla z31.s, p3/M, z2.s, z4.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x16, #0xa0]\n"
- "fmla z30.s, p3/M, z6.s, z16.s\n"
- "fmla z31.s, p3/M, z3.s, z17.s\n"
- "ldr x20, [x16, #0xb0]\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "fmla z29.s, p3/M, z7.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z19.s\n"
+ "fmla z29.s, p3/M, z3.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x15, LSL #2]\n"
"ldr x20, [x16, #0xb8]\n"
- "fmla z31.s, p3/M, z7.s, z16.s\n"
- "fmla z30.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x16, #0xc0]\n"
- "fmla z31.s, p3/M, z6.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z4.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldp x20, x26, [x16, #0x0]\n"
- "fmla z30.s, p3/M, z8.s, z17.s\n"
- "fmla z31.s, p3/M, z8.s, z16.s\n"
+ "fmla z31.s, p3/M, z4.s, z16.s\n"
+ "fmla z30.s, p3/M, z1.s, z15.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z28.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z25.s\n"
+ "fmla z31.s, p3/M, z1.s, z25.s\n"
+ "fmla z30.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z15.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z20.s\n"
+ "fmla z31.s, p3/M, z5.s, z22.s\n"
+ "ld1w { z18.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z17.s\n"
+ "fmla z31.s, p3/M, z2.s, z20.s\n"
+ "fmla z30.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldp x27, x26, [x16, #0x0]\n"
"ldp x25, x24, [x16, #0x10]\n"
- "ld1w { z26.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
- "incw x9\n"
+ "incw x15\n"
"ldp x23, x22, [x16, #0x20]\n"
- "ld1w { z9.s }, p1/Z, [x20, x12, LSL #2]\n"
- "incw x27\n"
- "mov p0.b, p2.b\n"
+ "whilelt p2.s, x15, %x[n_channels]\n"
"ldp x21, x20, [x16, #0x30]\n"
- "ld1w { z10.s }, p1/Z, [x26, x12, LSL #2]\n"
- "whilelt p2.s, x9, %x[n_channels]\n"
- ".inst 0xc1b8cb3c // fclamp { z28.s-z31.s }, z25.s, z24.s\n"
- "ld1w { z11.s }, p1/Z, [x25, x12, LSL #2]\n"
- "st1w { z28.s }, p0, [x14, x27, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x24, x12, LSL #2]\n"
- "st1w { z29.s }, p0, [x13, x27, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x23, x12, LSL #2]\n"
- "st1w { z30.s }, p0, [x11, x27, LSL #2]\n"
- "ld1w { z14.s }, p1/Z, [x22, x12, LSL #2]\n"
- "st1w { z31.s }, p0, [x10, x27, LSL #2]\n"
- "ld1w { z15.s }, p1/Z, [x21, x12, LSL #2]\n"
- "ld1w { z16.s }, p1/Z, [x20, x12, LSL #2]\n"
- "incw x12\n"
- "cmp x12, %x[n_channels]\n"
- ".inst 0xa040c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
- "addvl x15, x15, #4\n"
- "ld1w { z8.s }, p3/Z, [x15]\n"
- "addvl x15, x15, #1\n"
+ "ld1w { z9.s }, p1/Z, [x27, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z16.s\n"
+ "fmla z30.s, p3/M, z8.s, z19.s\n"
+ "ld1w { z10.s }, p1/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z12.s }, p1/Z, [x24, x13, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x20, x13, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z14.s }, p1/Z, [x22, x13, LSL #2]\n"
+ ".inst 0xa040c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
+ "fmla z31.s, p3/M, z6.s, z19.s\n"
+ "ld1w { z15.s }, p1/Z, [x21, x13, LSL #2]\n"
+ ".inst 0xa040c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14]\n"
+ "addvl x14, x14, #4\n"
+ "fmla z31.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z11.s }, p1/Z, [x25, x13, LSL #2]\n"
+ "incw x13\n"
+ "cmp x13, %x[n_channels]\n"
+ "ld1w { z8.s }, p3/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
+ ".inst 0xc1bbcb1c // fclamp { z28.s-z31.s }, z24.s, z27.s\n"
+ "st1w { z28.s }, p0, [x12, x10, LSL #2]\n"
+ "st1w { z29.s }, p0, [x11, x10, LSL #2]\n"
+ "st1w { z30.s }, p0, [x9, x10, LSL #2]\n"
+ "st1w { z31.s }, p0, [x28, x10, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z28, z26\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "movprfx z29, z26\n fmla z29.s, p3/M, z6.s, z9.s\n"
- "ldr x21, [x16, #0x40]\n"
- "incw x27\n"
+ "movprfx z28, z23\n fmla z28.s, p3/M, z8.s, z9.s\n"
+ "movprfx z29, z23\n fmla z29.s, p3/M, z6.s, z9.s\n"
+ "ldr x25, [x16, #0x40]\n"
+ "incw x10\n"
+ "ldr x22, [x16, #0x48]\n"
+ "movprfx z30, z23\n fmla z30.s, p3/M, z2.s, z9.s\n"
+ "movprfx z31, z23\n fmla z31.s, p3/M, z0.s, z9.s\n"
+ "mov p0.b, p2.b\n"
+ "ldr x21, [x16, #0x50]\n"
+ "ldr x20, [x16, #0x58]\n"
"fmla z28.s, p3/M, z0.s, z10.s\n"
"fmla z29.s, p3/M, z1.s, z12.s\n"
- "ldr x20, [x16, #0x48]\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z21.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "ldr x26, [x16, #0x68]\n"
+ "ldr x23, [x16, #0x88]\n"
"fmla z28.s, p3/M, z1.s, z11.s\n"
"fmla z29.s, p3/M, z2.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x20, [x16, #0x50]\n"
+ "ld1w { z18.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ldr x22, [x16, #0x80]\n"
+ "ld1w { z20.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x21, [x16, #0x70]\n"
+ "ldr x25, [x16, #0x90]\n"
"fmla z28.s, p3/M, z3.s, z14.s\n"
"fmla z29.s, p3/M, z0.s, z16.s\n"
- "ld1w { z20.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x16, #0x98]\n"
+ "fmla z30.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z23.s }, p2/Z, [x23, x15, LSL #2]\n"
"fmla z28.s, p3/M, z4.s, z15.s\n"
- "fmla z29.s, p3/M, z4.s, z17.s\n"
- "ldr x21, [x16, #0x78]\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "fmla z29.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z22.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "ld1w { z19.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z30.s, p3/M, z0.s, z22.s\n"
"fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z5.s, z18.s\n"
- "ldr x20, [x16, #0x60]\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "movprfx z30, z26\n fmla z30.s, p3/M, z2.s, z9.s\n"
- "movprfx z31, z26\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "ldr x20, [x16, #0x80]\n"
- "ld1w { z19.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ldr x22, [x16, #0xc0]\n"
+ "ld1w { z17.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x21, [x16, #0xb0]\n"
+ "fmla z30.s, p3/M, z4.s, z19.s\n"
+ "ld1w { z21.s }, p2/Z, [x20, x15, LSL #2]\n"
"fmla z28.s, p3/M, z5.s, z20.s\n"
"fmla z29.s, p3/M, z3.s, z20.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x21, [x16, #0x68]\n"
- "fmla z30.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "ldr x20, [x16, #0xb8]\n"
"fmla z31.s, p3/M, z4.s, z16.s\n"
- "ldr x20, [x16, #0x88]\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z0.s, z18.s\n"
- "fmla z31.s, p3/M, z1.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x16, #0x70]\n"
- "ldr x20, [x16, #0x98]\n"
- "fmla z30.s, p3/M, z4.s, z17.s\n"
- "fmla z31.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x16, #0x90]\n"
- "fmla z30.s, p3/M, z1.s, z16.s\n"
- "ldr x20, [x16, #0xa8]\n"
- "fmla z31.s, p3/M, z2.s, z18.s\n"
- "fmla z28.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x16, #0xa0]\n"
+ "fmla z30.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z20.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z28.s, p3/M, z6.s, z22.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z18.s\n"
+ "fmla z31.s, p3/M, z1.s, z18.s\n"
"fmla z30.s, p3/M, z6.s, z16.s\n"
- "fmla z31.s, p3/M, z3.s, z17.s\n"
- "ldr x20, [x16, #0xb0]\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z16.s\n"
- "fmla z29.s, p3/M, z7.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z31.s, p3/M, z7.s, z16.s\n"
- "fmla z30.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x16, #0xc0]\n"
- "fmla z31.s, p3/M, z6.s, z17.s\n"
- "fmla z29.s, p3/M, z8.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z21.s\n"
+ "fmla z31.s, p3/M, z5.s, z23.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z20.s\n"
+ "fmla z31.s, p3/M, z2.s, z21.s\n"
+ "fmla z30.s, p3/M, z5.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z31.s, p3/M, z3.s, z18.s\n"
+ "fmla z30.s, p3/M, z8.s, z19.s\n"
+ "fmla z31.s, p3/M, z7.s, z17.s\n"
+ "fmla z31.s, p3/M, z6.s, z19.s\n"
"fmla z31.s, p3/M, z8.s, z16.s\n"
- "mov p0.b, p2.b\n"
- ".inst 0xc1b8cb3c // fclamp { z28.s-z31.s }, z25.s, z24.s\n"
- "st1w { z28.s }, p0, [x14, x27, LSL #2]\n"
- "st1w { z29.s }, p0, [x13, x27, LSL #2]\n"
- "st1w { z30.s }, p0, [x11, x27, LSL #2]\n"
- "st1w { z31.s }, p0, [x10, x27, LSL #2]\n"
+ ".inst 0xc1bbcb1c // fclamp { z28.s-z31.s }, z24.s, z27.s\n"
+ "st1w { z28.s }, p0, [x12, x10, LSL #2]\n"
+ "st1w { z29.s }, p0, [x11, x10, LSL #2]\n"
+ "st1w { z30.s }, p0, [x9, x10, LSL #2]\n"
+ "st1w { z31.s }, p0, [x28, x10, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
index a385893146..f3906d8798 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s1_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,65 +72,65 @@ void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
"ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"mov x20, #0x6\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x7\n"
"ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ld1rw { z2.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rw { z3.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x20, x20, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z9.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p1.s, XZR, x16\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z24.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
"ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
- "fmov z20.s, #0x0\n"
+ "fmov z16.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z20.s }, p1/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x20, x15, LSL #2]\n"
"2:" // Load bias: Done
"ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x14, #0x1\n"
- "orr x24, x20, %x[ld_in_col], LSL #18\n"
- "mov z21.d, z20.d\n"
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa0404ae6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x23]\n"
- "orr x24, x16, x24, LSL #20\n"
- "mov x22, #0x6\n"
+ "mov x23, #0x6\n"
+ "add x20, x17, x7\n"
+ "mov z17.d, z16.d\n"
+ "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ "mov z18.d, z16.d\n"
+ "mov z19.d, z16.d\n"
"ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z10.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
- "add x21, x17, x7\n"
- ".inst 0xa1404ae0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x23]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "mov z22.d, z20.d\n"
- "mov z23.d, z20.d\n"
- "ld1w { z9.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
"mov x8, #0x0\n"
+ "sub x23, x23, x20\n"
+ "sub x20, x14, #0x1\n"
"ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
- ".inst 0xa0404ae4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x23]\n"
- "lsl x24, x24, #0x2\n"
- "sub x22, x22, x21\n"
- "ld1w { z1.s }, p2/Z, [x23, #2, MUL VL]\n"
- "madd x20, x20, x17, x13\n"
+ ".inst 0xa0404ace // ld1w { z14.s-z15.s }, pn10.b/Z, [x22]\n"
+ "orr x20, x20, %x[ld_in_col], LSL #18\n"
+ "ld1w { z11.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "orr x20, x16, x20, LSL #20\n"
+ ".inst 0xa0404acc // ld1w { z12.s-z13.s }, pn10.b/Z, [x22]\n"
+ "lsl x20, x20, #0x2\n"
+ "madd x21, x21, x17, x13\n"
+ "ld1w { z0.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ ".inst 0xa0404ac4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x22]\n"
+ "ld1w { z7.s }, p2/Z, [x22, #2, MUL VL]\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b84a9c // rprfm pldstrm, x24, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x23, x23, #0x1\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x22, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x13, x17, x20, x13\n"
- ".inst 0xc0040e80 // mova za.d[x8, #0], { z20.d-z23.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040e81 // mova za.d[x8, #1], { z20.d-z23.d }\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ ".inst 0xc0040e00 // mova za.d[x8, #0], { z16.d-z19.d }\n"
"mov x10, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x13, x17, x21, x13\n"
+ ".inst 0xc0040e01 // mova za.d[x8, #1], { z16.d-z19.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
"ldp x9, x28, [x22], #0x10\n"
- ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
"ldp x27, x26, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
"ldp x25, x24, [x22], #0x10\n"
"ldp x23, x22, [x20], #0x10\n"
"cbz x21, 5f\n"
@@ -139,18 +139,18 @@ void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
"sub x21, x21, x20\n"
"sub x10, x10, x20\n"
"cbz x21, 5f\n"
- ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
"sub x11, x11, x21\n"
- ".inst 0xc1b8c84c // fclamp { z12.s-z15.s }, z2.s, z24.s\n"
+ ".inst 0xc1a9c87c // fclamp { z28.s-z31.s }, z3.s, z9.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
- "st1w { z12.s }, p1, [x9]\n"
+ "st1w { z28.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z13.s }, p1, [x28]\n"
+ "st1w { z29.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z14.s }, p1, [x25]\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "st1w { z15.s }, p1, [x24]\n"
+ "st1w { z31.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
@@ -162,94 +162,94 @@ void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x13]\n"
+ "ld1w { z22.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
+ "ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13619c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z6.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13019e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z0.s\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- ".inst 0xc1341a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z4.s\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
+ ".inst 0xc13e1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z14.s\n"
+ ".inst 0xc13c1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z12.s\n"
+ ".inst 0xc1341b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z4.s\n"
"7:" // Unpadded: 1 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z13.s }, p1/Z, [x13]\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13719a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z7.s\n"
- ".inst 0xc13619a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z6.s\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13819c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z8.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- ".inst 0xc13019c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z0.s\n"
- ".inst 0xc13519e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z5.s\n"
- ".inst 0xc13419e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z4.s\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xc13f1b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z15.s\n"
+ ".inst 0xc13e1b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z14.s\n"
+ ".inst 0xc13d1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z13.s\n"
+ ".inst 0xc13c1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z12.s\n"
+ ".inst 0xc1351b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z5.s\n"
+ ".inst 0xc1341b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z4.s\n"
"8:" // Unpadded: 0 priming loads
"cbz x14, 16f\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x13]\n"
+ "ld1w { z20.s }, p1/Z, [x13]\n"
"sub x14, x14, #0x1\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"sub x11, x11, #0x1\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"cmp x14, x11\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"csel x21, x14, x11, LT\n"
- "ld1w { z29.s }, p1/Z, [x20]\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z30.s }, p1/Z, [x20]\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"sub x11, x11, x21\n"
"cbz x21, 15f\n"
"9:" // Unpadded: Main loop
- ".inst 0xc13a1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
+ ".inst 0xc13b1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z11.s\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
"subs x21, x21, #0x1\n"
- ".inst 0xc1391b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z9.s\n"
- ".inst 0xc1371b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z7.s\n"
- ".inst 0xc1361b22 // fmla za.s[x8, 2], { z25.s-z28.s }, z6.s\n"
- "ld1w { z25.s }, p1/Z, [x13]\n"
+ ".inst 0xc13f1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z15.s\n"
+ ".inst 0xc13e1a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z14.s\n"
+ "ld1w { z20.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1311b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z1.s\n"
- ".inst 0xc1381b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z8.s\n"
- ".inst 0xc1301b42 // fmla za.s[x8, 2], { z26.s-z29.s }, z0.s\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
+ ".inst 0xc1301aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z0.s\n"
+ ".inst 0xc13d1aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z13.s\n"
+ ".inst 0xc13c1aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z12.s\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
- ".inst 0xc1b8c84c // fclamp { z12.s-z15.s }, z2.s, z24.s\n"
- "st1w { z12.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc1351b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z5.s\n"
- "st1w { z13.s }, p1, [x28]\n"
- "add x28, x28, x26, LSL #2\n"
- ".inst 0xc1341b62 // fmla za.s[x8, 2], { z27.s-z30.s }, z4.s\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ ".inst 0xc1371ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z7.s\n"
+ ".inst 0xc1351ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z5.s\n"
+ ".inst 0xc1341ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z4.s\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z14.s }, p1, [x25]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ ".inst 0xc1a9c87c // fclamp { z28.s-z31.s }, z3.s, z9.s\n"
+ "st1w { z28.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "ld1w { z29.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z15.s }, p1, [x24]\n"
+ "st1w { z31.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
- ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
- "ld1w { z30.s }, p1/Z, [x20]\n"
"bgt 9b\n"
"b 15f\n"
"10:" // Padded
@@ -259,173 +259,173 @@ void sme2_fp32_planar_3x3_s1_4rows_mla_za_impl(
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z23.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1361960 // fmla za.s[x8, 0], { z11.s-z14.s }, z6.s\n"
+ "mov x12, #0x4\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1301980 // fmla za.s[x8, 0], { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc13e1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z14.s\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13419a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z4.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
+ ".inst 0xc13c1b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z12.s\n"
+ ".inst 0xc1341b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z4.s\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z25.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1371960 // fmla za.s[x8, 0], { z11.s-z14.s }, z7.s\n"
+ "mov x12, #0x4\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1361961 // fmla za.s[x8, 1], { z11.s-z14.s }, z6.s\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13f1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z15.s\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1381980 // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1301981 // fmla za.s[x8, 1], { z12.s-z15.s }, z0.s\n"
- ".inst 0xc13519a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z5.s\n"
- ".inst 0xc13419a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z4.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13e1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z14.s\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
+ ".inst 0xc13d1b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z13.s\n"
+ ".inst 0xc13c1b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z12.s\n"
+ ".inst 0xc1351b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z5.s\n"
+ ".inst 0xc1341b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z4.s\n"
"13:" // Padded: 0 priming loads
"cbz x14, 16f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z25.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x14, x14, #0x1\n"
+ "sub x11, x11, #0x1\n"
+ "cmp x14, x11\n"
+ "ld1w { z20.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x21, x14, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x21\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
- "sub x14, x14, #0x1\n"
- "sub x11, x11, #0x1\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z23.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "cmp x14, x11\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
- "csel x21, x14, x11, LT\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- "sub x11, x11, x21\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
"cbz x21, 15f\n"
"14:" // Padded: Main loop
- ".inst 0xc13a1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1391b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z9.s\n"
+ ".inst 0xc13b1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z11.s\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc13f1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z15.s\n"
"subs x21, x21, #0x1\n"
- ".inst 0xc1371b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z7.s\n"
- ".inst 0xc1361b22 // fmla za.s[x8, 2], { z25.s-z28.s }, z6.s\n"
- "ld1w { z25.s }, p0/Z, [x13]\n"
+ ".inst 0xc13e1a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z14.s\n"
+ "ld1w { z20.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1301aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z0.s\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1311b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z1.s\n"
- ".inst 0xc1381b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z8.s\n"
- ".inst 0xc1301b42 // fmla za.s[x8, 2], { z26.s-z29.s }, z0.s\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13d1aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z13.s\n"
+ ".inst 0xc13c1aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z12.s\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1b8c850 // fclamp { z16.s-z19.s }, z2.s, z24.s\n"
- "st1w { z16.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc1351b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z5.s\n"
- "st1w { z17.s }, p1, [x28]\n"
- "add x28, x28, x26, LSL #2\n"
- ".inst 0xc1341b62 // fmla za.s[x8, 2], { z27.s-z30.s }, z4.s\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z7.s\n"
+ ".inst 0xc1351ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z5.s\n"
+ ".inst 0xc1341ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z4.s\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z18.s }, p1, [x25]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "st1w { z19.s }, p1, [x24]\n"
+ "ld1w { z23.s }, p0/Z, [x20]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1a9c87c // fclamp { z28.s-z31.s }, z3.s, z9.s\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "st1w { z28.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "st1w { z29.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z30.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
+ "st1w { z31.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 14b\n"
"15:" // Main loop tail
- ".inst 0xc13a1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
- ".inst 0xc1391b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z9.s\n"
- ".inst 0xc1371b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z7.s\n"
- ".inst 0xc1361b22 // fmla za.s[x8, 2], { z25.s-z28.s }, z6.s\n"
- ".inst 0xc1311b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z1.s\n"
- ".inst 0xc1381b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z8.s\n"
- ".inst 0xc1301b42 // fmla za.s[x8, 2], { z26.s-z29.s }, z0.s\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1b8c850 // fclamp { z16.s-z19.s }, z2.s, z24.s\n"
- "st1w { z16.s }, p1, [x9]\n"
+ ".inst 0xc13b1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z11.s\n"
+ ".inst 0xc13f1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z15.s\n"
+ ".inst 0xc13e1a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z14.s\n"
+ ".inst 0xc1301aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z0.s\n"
+ ".inst 0xc13d1aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z13.s\n"
+ ".inst 0xc13c1aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z12.s\n"
+ ".inst 0xc1371ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z7.s\n"
+ ".inst 0xc1351ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z5.s\n"
+ ".inst 0xc1341ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z4.s\n"
+ ".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a9c874 // fclamp { z20.s-z23.s }, z3.s, z9.s\n"
+ "st1w { z20.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc1351b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z5.s\n"
- "st1w { z17.s }, p1, [x28]\n"
+ "st1w { z21.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- ".inst 0xc1341b62 // fmla za.s[x8, 2], { z27.s-z30.s }, z4.s\n"
- "add x8, x8, #0x1\n"
- "st1w { z18.s }, p1, [x25]\n"
+ "st1w { z22.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "st1w { z19.s }, p1, [x24]\n"
+ "st1w { z23.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
- ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
"16:" // Main loop skip tail
"cbz x11, 18f\n"
"17:" // Right padding loop
- ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"subs x11, x11, #0x1\n"
- ".inst 0xc1b8c848 // fclamp { z8.s-z11.s }, z2.s, z24.s\n"
- "st1w { z8.s }, p1, [x9]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a9c864 // fclamp { z4.s-z7.s }, z3.s, z9.s\n"
+ "st1w { z4.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
- "st1w { z9.s }, p1, [x28]\n"
+ "st1w { z5.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z10.s }, p1, [x25]\n"
+ "st1w { z6.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "st1w { z11.s }, p1, [x24]\n"
+ "st1w { z7.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 17b\n"
"18:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "whilelt p1.s, x15, x16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x15\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x15, x16\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
index 26315101b4..5ecfb08799 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_3x3_s2_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,18 +72,18 @@ void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
"ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"mov x20, #0x9\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x7\n"
"ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ld1rw { z7.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rw { z28.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x20, x20, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p1.s, XZR, x16\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z9.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
"ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
"fmov z12.s, #0x0\n"
@@ -91,46 +91,46 @@ void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
"ld1w { z12.s }, p1/Z, [x20, x15, LSL #2]\n"
"2:" // Load bias: Done
"ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x14, #0x1\n"
- "orr x24, x20, %x[ld_in_col], LSL #18\n"
+ "mov x23, #0x9\n"
+ "add x20, x17, x7\n"
"mov z13.d, z12.d\n"
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa1404ae2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x23]\n"
- "orr x24, x16, x24, LSL #20\n"
- "mov x22, #0x9\n"
- "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z8.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
- "add x21, x17, x7\n"
- ".inst 0xa0404ae0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x23]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
+ "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
"mov z14.d, z12.d\n"
"mov z15.d, z12.d\n"
- "ld1w { z5.s }, p2/Z, [x23, #2, MUL VL]\n"
- "addvl x23, x23, #3\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
"mov x8, #0x0\n"
+ "sub x23, x23, x20\n"
+ "sub x20, x14, #0x1\n"
"ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
- ".inst 0xa1404ae3 // ld1w { z3.s, z11.s }, pn10.b/Z, [x23]\n"
- "lsl x24, x24, #0x2\n"
- "sub x22, x22, x21\n"
- "ld1w { z6.s }, p2/Z, [x23, #2, MUL VL]\n"
- "madd x20, x20, x17, x13\n"
+ ".inst 0xa0404ac6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x22]\n"
+ "orr x20, x20, %x[ld_in_col], LSL #18\n"
+ "ld1w { z10.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "orr x20, x16, x20, LSL #20\n"
+ ".inst 0xa0404ac2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x22]\n"
+ "lsl x20, x20, #0x2\n"
+ "madd x21, x21, x17, x13\n"
+ "ld1w { z11.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ ".inst 0xa0404ac4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x22]\n"
+ "ld1w { z9.s }, p2/Z, [x22, #2, MUL VL]\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b84a9c // rprfm pldstrm, x24, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x23, x23, #0x1\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x13, x17, x20, x13\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
".inst 0xc0040d80 // mova za.d[x8, #0], { z12.d-z15.d }\n"
+ "mov x22, #0x2\n"
"ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x13, x17, x21, x13\n"
".inst 0xc0040d81 // mova za.d[x8, #1], { z12.d-z15.d }\n"
- "mov x22, #0x2\n"
- "ldp x10, x9, [x23], #0x10\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
+ "ldp x10, x9, [x23], #0x10\n"
"ldp x28, x27, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
"ldp x26, x25, [x23], #0x10\n"
"ldp x24, x23, [x20], #0x10\n"
"cbz x21, 5f\n"
@@ -142,9 +142,9 @@ void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
"and x22, x21, #0x1\n"
"add x21, x21, #0x1\n"
- ".inst 0xc1a9c8f4 // fclamp { z20.s-z23.s }, z7.s, z9.s\n"
"lsr x21, x21, #0x1\n"
"sub x11, x11, x21\n"
+ ".inst 0xc1a8cb94 // fclamp { z20.s-z23.s }, z28.s, z8.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
"st1w { z20.s }, p1, [x10]\n"
@@ -165,136 +165,136 @@ void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x13]\n"
+ "ld1w { z21.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z30.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
+ "ld1w { z31.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
+ "ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1321a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z2.s\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ "ld1w { z1.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1301b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z0.s\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
- ".inst 0xc1331a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z3.s\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ ".inst 0xc1361aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z6.s\n"
+ ".inst 0xc1321bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z2.s\n"
+ ".inst 0xc1341ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z4.s\n"
"7:" // Unpadded: 1 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x13]\n"
+ "ld1w { z21.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z30.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z31.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z29.s }, p1/Z, [x20]\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z10.s\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1311a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z1.s\n"
- "ld1w { z30.s }, p1/Z, [x20]\n"
- ".inst 0xc13b1b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z11.s\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ ".inst 0xc1371aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z7.s\n"
+ ".inst 0xc1331ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z3.s\n"
+ ".inst 0xc1351ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z5.s\n"
"8:" // Unpadded: 0 priming loads
"cmp x14, #0x2\n"
"blt 16f\n"
"add x21, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x13]\n"
+ "ld1w { z29.s }, p1/Z, [x13]\n"
"sub x14, x14, #0x2\n"
- "ld1w { z19.s }, p1/Z, [x21]\n"
+ "ld1w { z22.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"sub x11, x11, #0x1\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"lsr x20, x14, #0x1\n"
- "ld1w { z20.s }, p1/Z, [x21]\n"
+ "ld1w { z23.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"cmp x20, x11\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
+ "ld1w { z31.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"csel x22, x20, x11, LT\n"
- "ld1w { z21.s }, p1/Z, [x21]\n"
+ "ld1w { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z28.s }, p1/Z, [x21]\n"
+ "ld1w { z0.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"and x14, x14, #0x1\n"
- "ld1w { z22.s }, p1/Z, [x21]\n"
+ "ld1w { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"sub x11, x11, x22\n"
- "ld1w { z29.s }, p1/Z, [x21]\n"
+ "ld1w { z1.s }, p1/Z, [x21]\n"
"cbz x22, 15f\n"
"9:" // Unpadded: Main loop
- ".inst 0xc1381b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z8.s\n"
+ ".inst 0xc13a1ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z10.s\n"
"add x21, x13, %x[ld_in_row], LSL #2\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc1321b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z2.s\n"
- "ld1w { z25.s }, p1/Z, [x13]\n"
+ ".inst 0xc1361ba1 // fmla za.s[x8, 1], { z29.s-z0.s }, z6.s\n"
+ "ld1w { z29.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1351a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z5.s\n"
- ".inst 0xc1301a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z0.s\n"
- "ld1w { z18.s }, p1/Z, [x21]\n"
+ ".inst 0xc13b1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z11.s\n"
+ ".inst 0xc1321ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
+ "ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1361b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z6.s\n"
- ".inst 0xc1331b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z3.s\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ ".inst 0xc1391bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z9.s\n"
+ ".inst 0xc1341bc1 // fmla za.s[x8, 1], { z30.s-z1.s }, z4.s\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x21]\n"
+ "ld1w { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
+ "ld1w { z31.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
+ "ld1w { z0.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1a9c8f4 // fclamp { z20.s-z23.s }, z7.s, z9.s\n"
- "st1w { z20.s }, p1, [x10]\n"
- "ld1w { z20.s }, p1/Z, [x21]\n"
+ ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
+ "ld1w { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p1/Z, [x21]\n"
+ ".inst 0xc1371ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z7.s\n"
+ "ld1w { z29.s }, p1/Z, [x13]\n"
+ ".inst 0xc1a8cb98 // fclamp { z24.s-z27.s }, z28.s, z8.s\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1331a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z3.s\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "st1w { z24.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- "st1w { z21.s }, p1, [x9]\n"
- "ld1w { z28.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z10.s\n"
+ "st1w { z25.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1311a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z1.s\n"
- "st1w { z22.s }, p1, [x26]\n"
- "ld1w { z29.s }, p1/Z, [x21]\n"
- ".inst 0xc13b1b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z11.s\n"
+ "st1w { z26.s }, p1, [x26]\n"
"add x26, x26, x24, LSL #2\n"
- "st1w { z23.s }, p1, [x25]\n"
- "ld1w { z25.s }, p1/Z, [x13]\n"
+ "st1w { z27.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
+ ".inst 0xc1351bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z5.s\n"
+ "ld1w { z30.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z23.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ "ld1w { z31.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z29.s }, p1/Z, [x20]\n"
+ "ld1w { z1.s }, p1/Z, [x20]\n"
"bgt 9b\n"
"b 15f\n"
"10:" // Padded
@@ -304,323 +304,323 @@ void sme2_fp32_planar_3x3_s2_4rows_mla_za_impl(
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z27.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z23.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"ld1w { z24.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"ld1w { z25.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- ".inst 0xc1321b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z2.s\n"
"ld1w { z26.s }, p0/Z, [x20]\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc1361ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z6.s\n"
+ "ld1w { z23.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1301ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z0.s\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1331b80 // fmla za.s[x8, 0], { z28.s-z31.s }, z3.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
+ ".inst 0xc1321a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z2.s\n"
+ ".inst 0xc1341b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z4.s\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z22.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z29.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- ".inst 0xc13a1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z10.s\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc1371ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z7.s\n"
+ "ld1w { z23.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1311b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z1.s\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13b1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z11.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p0/Z, [x20]\n"
+ ".inst 0xc1331a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z3.s\n"
+ ".inst 0xc1351bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z5.s\n"
"13:" // Padded: 0 priming loads
"cmp x14, #0x2\n"
"blt 16f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z25.s }, p0/Z, [x13]\n"
"add x21, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x14, x14, #0x2\n"
+ "sub x11, x11, #0x1\n"
+ "lsr x20, x14, #0x1\n"
+ "cmp x20, x11\n"
+ "and x14, x14, #0x1\n"
+ "ld1w { z29.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "csel x22, x20, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x22\n"
+ "ld1w { z22.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z26.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z20.s }, p0/Z, [x21]\n"
- "mov x12, #0x4\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z23.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "sub x14, x14, #0x2\n"
- "ld1w { z21.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z28.s }, p0/Z, [x21]\n"
- "sub x11, x11, #0x1\n"
- "lsr x20, x14, #0x1\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x21]\n"
- "mov x12, #0x8\n"
- "cmp x20, x11\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ "ld1w { z25.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z29.s }, p0/Z, [x21]\n"
- "csel x22, x20, x11, LT\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- "and x14, x14, #0x1\n"
- "sub x11, x11, x22\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
"cbz x22, 15f\n"
"14:" // Padded: Main loop
- ".inst 0xc1381b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z8.s\n"
"mov x12, #0x0\n"
+ ".inst 0xc13a1ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z10.s\n"
+ "add x21, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1321b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z2.s\n"
+ ".inst 0xc1361ba1 // fmla za.s[x8, 1], { z29.s-z0.s }, z6.s\n"
+ "subs x22, x22, #0x1\n"
"ld1w { z18.s }, p0/Z, [x13]\n"
- "add x21, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1351a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z5.s\n"
+ ".inst 0xc13b1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z11.s\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1321ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1301a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z0.s\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1361b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z6.s\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xc1331b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z3.s\n"
- "ld1w { z19.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1391bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z9.s\n"
+ ".inst 0xc1341bc1 // fmla za.s[x8, 1], { z30.s-z1.s }, z4.s\n"
+ "ld1w { z19.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- "ld1w { z26.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
+ "ld1w { z30.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1a9c8fc // fclamp { z28.s-z31.s }, z7.s, z9.s\n"
+ ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
+ "ld1w { z20.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- "st1w { z28.s }, p1, [x10]\n"
+ ".inst 0xc1a8cb98 // fclamp { z24.s-z27.s }, z28.s, z8.s\n"
+ "ld1w { z31.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z21.s }, p0/Z, [x21]\n"
- "add x8, x8, #0x1\n"
- "st1w { z29.s }, p1, [x9]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "st1w { z24.s }, p1, [x10]\n"
+ "add x10, x10, x28, LSL #2\n"
+ "st1w { z25.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z28.s }, p0/Z, [x21]\n"
- "st1w { z30.s }, p1, [x26]\n"
- "mov x12, #0x8\n"
- ".inst 0xc13a1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- "st1w { z31.s }, p1, [x25]\n"
+ "st1w { z26.s }, p1, [x26]\n"
+ "mov x12, #0x8\n"
+ "add x26, x26, x24, LSL #2\n"
+ "st1w { z27.s }, p1, [x25]\n"
+ "add x25, x25, x23, LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1311b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z1.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z7.s\n"
"mov x12, #0x0\n"
"ld1w { z22.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z25.s }, p0/Z, [x13]\n"
+ ".inst 0xc1331ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z3.s\n"
+ "ld1w { z29.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc13b1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z11.s\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1351a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z5.s\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z23.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "add x10, x10, x28, LSL #2\n"
- "add x9, x9, x27, LSL #2\n"
- "add x26, x26, x24, LSL #2\n"
- "add x25, x25, x23, LSL #2\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p0/Z, [x20]\n"
"bgt 14b\n"
"15:" // Main loop tail
- ".inst 0xc1381b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z8.s\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1321b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z2.s\n"
- "ld1w { z18.s }, p0/Z, [x13]\n"
+ ".inst 0xc13a1ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z10.s\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc1361ba1 // fmla za.s[x8, 1], { z29.s-z0.s }, z6.s\n"
+ "ld1w { z29.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1351a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z5.s\n"
+ ".inst 0xc13b1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z11.s\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1301a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z0.s\n"
+ ".inst 0xc1321ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z2.s\n"
"ld1w { z22.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1361b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z6.s\n"
- ".inst 0xc1331b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z3.s\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1391bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z9.s\n"
+ ".inst 0xc1341bc1 // fmla za.s[x8, 1], { z30.s-z1.s }, z4.s\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
"mov x12, #0x4\n"
+ ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
"ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1a9c8fc // fclamp { z28.s-z31.s }, z7.s, z9.s\n"
+ ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z28.s }, p1, [x10]\n"
+ ".inst 0xc1a8cb90 // fclamp { z16.s-z19.s }, z28.s, z8.s\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x8, x8, #0x1\n"
- "st1w { z29.s }, p1, [x9]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "st1w { z30.s }, p1, [x26]\n"
- "mov x12, #0x8\n"
- ".inst 0xc13a1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z31.s }, p1, [x25]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1311ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z1.s\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
+ "st1w { z16.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
+ "st1w { z17.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "st1w { z18.s }, p1, [x26]\n"
+ "mov x12, #0x8\n"
"add x26, x26, x24, LSL #2\n"
- ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
+ "st1w { z19.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- ".inst 0xc13b1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z11.s\n"
+ ".inst 0xc1371ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z7.s\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p0/Z, [x20]\n"
+ ".inst 0xc1331ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z3.s\n"
+ ".inst 0xc1351bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z5.s\n"
"16:" // Main loop skip tail
"cbz x14, 17f\n" // Skip remainder inputs
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z21.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x11, x11, #0x1\n"
+ "ld1w { z23.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z24.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- ".inst 0xc1381aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z8.s\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc13a1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z10.s\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1351b80 // fmla za.s[x8, 0], { z28.s-z31.s }, z5.s\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- ".inst 0xc1321aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z2.s\n"
- "sub x11, x11, #0x1\n"
- ".inst 0xc1361ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z6.s\n"
- ".inst 0xc1301b81 // fmla za.s[x8, 1], { z28.s-z31.s }, z0.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1361ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z6.s\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
+ ".inst 0xc13b1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc1321a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z2.s\n"
+ ".inst 0xc1391b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z9.s\n"
+ ".inst 0xc1341b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z4.s\n"
".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1a9c8f0 // fclamp { z16.s-z19.s }, z7.s, z9.s\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
+ ".inst 0xc1a8cb90 // fclamp { z16.s-z19.s }, z28.s, z8.s\n"
"st1w { z16.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- ".inst 0xc1331ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z3.s\n"
- "add x8, x8, #0x1\n"
"st1w { z17.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
"st1w { z18.s }, p1, [x26]\n"
"add x26, x26, x24, LSL #2\n"
- ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
"st1w { z19.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
"17:" // Tail input: End
"cbz x11, 19f\n"
"18:" // Right padding loop
- ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060c14 // mova { z20.d-z23.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"subs x11, x11, #0x1\n"
- ".inst 0xc1a9c8e0 // fclamp { z0.s-z3.s }, z7.s, z9.s\n"
- "st1w { z0.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
- "st1w { z1.s }, p1, [x9]\n"
+ ".inst 0xc1a8cb94 // fclamp { z20.s-z23.s }, z28.s, z8.s\n"
+ "st1w { z20.s }, p1, [x10]\n"
+ "add x10, x10, x28, LSL #2\n"
+ "st1w { z21.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z2.s }, p1, [x26]\n"
+ "st1w { z22.s }, p1, [x26]\n"
"add x26, x26, x24, LSL #2\n"
- "st1w { z3.s }, p1, [x25]\n"
+ "st1w { z23.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
"bgt 18b\n"
"19:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "whilelt p1.s, x15, x16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x15\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x15, x16\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
index 3741b973b4..d59a2e5c6a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s1_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,67 +72,67 @@ void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
"ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"mov x20, #0x8\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x6\n"
"ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ld1rw { z16.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x20, x20, x6\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z29.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p1.s, XZR, x17\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z17.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p8.s, XZR, x7\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
"ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
- "fmov z28.s, #0x0\n"
+ "fmov z24.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x20, x16, LSL #2]\n"
"2:" // Load bias: Done
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- "mov z29.d, z28.d\n"
- "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
- "orr x23, x17, x23, LSL #20\n"
"mov x22, #0x8\n"
+ "add x20, x7, x6\n"
+ "mov z25.d, z24.d\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ "mov z26.d, z24.d\n"
+ "mov z27.d, z24.d\n"
"ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "ld1w { z2.s }, p2/Z, [x14, #4, MUL VL]\n"
- "addvl x14, x14, #5\n"
- "mov z30.d, z28.d\n"
- "mov z31.d, z28.d\n"
- ".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
"mov x8, #0x0\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x15, #0x1\n"
"ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
+ "orr x20, x20, %x[ld_in_col], LSL #18\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "orr x20, x17, x20, LSL #20\n"
+ "madd x21, x21, x7, x13\n"
+ "ld1w { z7.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "addvl x14, x14, #5\n"
+ "lsl x20, x20, #0x2\n"
+ ".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x13\n"
- "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
"addvl x14, x14, #5\n"
"3:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x22, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x13, x7, x20, x13\n"
- ".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ ".inst 0xc0040f00 // mova za.d[x8, #0], { z24.d-z27.d }\n"
"mov x10, #0x4\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x13, x7, x21, x13\n"
+ ".inst 0xc0040f01 // mova za.d[x8, #1], { z24.d-z27.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
"ldp x9, x28, [x22], #0x10\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
+ ".inst 0xc0040f03 // mova za.d[x8, #3], { z24.d-z27.d }\n"
"ldp x27, x26, [x20], #0x10\n"
- ".inst 0xc0040f83 // mova za.d[x8, #3], { z28.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
"ldp x25, x24, [x22], #0x10\n"
"ldp x23, x22, [x20], #0x10\n"
"cbz x21, 5f\n"
@@ -141,18 +141,18 @@ void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
"sub x21, x21, x20\n"
"sub x10, x10, x20\n"
"cbz x21, 5f\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
"sub x11, x11, x21\n"
- ".inst 0xc1b1ca04 // fclamp { z4.s-z7.s }, z16.s, z17.s\n"
+ ".inst 0xc1bdcae0 // fclamp { z0.s-z3.s }, z23.s, z29.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
- "st1w { z4.s }, p1, [x9]\n"
+ "st1w { z0.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z5.s }, p1, [x28]\n"
+ "st1w { z1.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z6.s }, p1, [x25]\n"
+ "st1w { z2.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "st1w { z7.s }, p1, [x24]\n"
+ "st1w { z3.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
@@ -168,275 +168,275 @@ void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
"beq 7f\n"
"6:" // Unpadded: 4 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x13]\n"
+ "ld1w { z31.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ "ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z1.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z2.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z14.s\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z3.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z10.s\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z4.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1351be0 // fmla za.s[x8, 0], { z31.s-z2.s }, z5.s\n"
+ ".inst 0xa04049c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc1351a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z5.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z5.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z6.s }, p1/Z, [x20]\n"
+ ".inst 0xc13a1800 // fmla za.s[x8, 0], { z0.s-z3.s }, z10.s\n"
".inst 0xa04049cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc13c1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z12.s\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1381820 // fmla za.s[x8, 0], { z1.s-z4.s }, z8.s\n"
+ ".inst 0xa04049c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1311ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z1.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13c1840 // fmla za.s[x8, 0], { z2.s-z5.s }, z12.s\n"
+ ".inst 0xc1381860 // fmla za.s[x8, 0], { z3.s-z6.s }, z8.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
"7:" // Unpadded: 3 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x13]\n"
+ "ld1w { z15.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z15.s\n"
- ".inst 0xc13e1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z14.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13b1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z11.s\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z10.s\n"
- ".inst 0xa04049c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14]\n"
- "addvl x14, x14, #5\n"
- ".inst 0xc1371ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z7.s\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
+ ".inst 0xc13d19e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z13.s\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1361ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z6.s\n"
+ ".inst 0xc13519e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z5.s\n"
".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc13d1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z13.s\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
- ".inst 0xc1351ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z5.s\n"
- ".inst 0xa04049c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14]\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
+ ".inst 0xc13b1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc13a1a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z10.s\n"
+ ".inst 0xa04049c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc13d1a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z13.s\n"
+ ".inst 0xc1351a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z5.s\n"
+ ".inst 0xa14049c7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1371b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z7.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1361b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z6.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1331a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z3.s\n"
+ ".inst 0xa04149c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1321a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z2.s\n"
+ ".inst 0xc13f1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z15.s\n"
+ ".inst 0xc1371a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z7.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
"addvl x14, x14, #5\n"
"8:" // Unpadded: 2 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z1.s }, p1/Z, [x13]\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z2.s }, p1/Z, [x20]\n"
+ "ld1w { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z3.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z4.s }, p1/Z, [x20]\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13c1820 // fmla za.s[x8, 0], { z1.s-z4.s }, z12.s\n"
- ".inst 0xc13f1821 // fmla za.s[x8, 1], { z1.s-z4.s }, z15.s\n"
- "ld1w { z5.s }, p1/Z, [x20]\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1822 // fmla za.s[x8, 2], { z1.s-z4.s }, z14.s\n"
- "ld1w { z6.s }, p1/Z, [x20]\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1381840 // fmla za.s[x8, 0], { z2.s-z5.s }, z8.s\n"
- ".inst 0xa04049cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14]\n"
- ".inst 0xc13b1841 // fmla za.s[x8, 1], { z2.s-z5.s }, z11.s\n"
- ".inst 0xa04149ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "addvl x14, x14, #5\n"
- ".inst 0xc13a1842 // fmla za.s[x8, 2], { z2.s-z5.s }, z10.s\n"
- "ld1w { z7.s }, p1/Z, [x20]\n"
+ ".inst 0xc13419c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z4.s\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13d19c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z13.s\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1860 // fmla za.s[x8, 0], { z3.s-z6.s }, z14.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
- ".inst 0xc13d1861 // fmla za.s[x8, 1], { z3.s-z6.s }, z13.s\n"
- ".inst 0xa14149c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13519c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z5.s\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc13c1862 // fmla za.s[x8, 2], { z3.s-z6.s }, z12.s\n"
- "ld1w { z8.s }, p1/Z, [x20]\n"
- ".inst 0xc1301880 // fmla za.s[x8, 0], { z4.s-z7.s }, z0.s\n"
- ".inst 0xa04049c0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x14]\n"
- ".inst 0xc13f1881 // fmla za.s[x8, 1], { z4.s-z7.s }, z15.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
+ ".inst 0xc13819e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z8.s\n"
+ ".inst 0xa14149c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13b19e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z11.s\n"
+ ".inst 0xc13a19e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z10.s\n"
+ ".inst 0xa14049c6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc1321a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z2.s\n"
+ ".inst 0xa04149ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1391a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z9.s\n"
+ ".inst 0xc1311a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z1.s\n"
+ ".inst 0xa14049c7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13e1882 // fmla za.s[x8, 2], { z4.s-z7.s }, z14.s\n"
- ".inst 0xc13c18a0 // fmla za.s[x8, 0], { z5.s-z8.s }, z12.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13118a1 // fmla za.s[x8, 1], { z5.s-z8.s }, z1.s\n"
- ".inst 0xc13018a2 // fmla za.s[x8, 2], { z5.s-z8.s }, z0.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1351a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z5.s\n"
+ ".inst 0xc13e1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z14.s\n"
+ ".inst 0xc1361a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
+ ".inst 0xc13a1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z10.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13f1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z15.s\n"
+ ".inst 0xc1371a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z7.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
"addvl x14, x14, #5\n"
"9:" // Unpadded: 1 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x13]\n"
+ "ld1w { z15.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13d1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z13.s\n"
- ".inst 0xc13c1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z12.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f1a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z15.s\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1a83 // fmla za.s[x8, 3], { z20.s-z23.s }, z14.s\n"
- ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
- ".inst 0xc1391aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z9.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "addvl x14, x14, #5\n"
- ".inst 0xc1381aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z8.s\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
+ ".inst 0xc13c19e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z12.s\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13b1aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z11.s\n"
- ".inst 0xa14149c6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13a1aa3 // fmla za.s[x8, 3], { z21.s-z24.s }, z10.s\n"
- ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13419e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z4.s\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13d19e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z13.s\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
+ ".inst 0xc13519e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z5.s\n"
+ ".inst 0xa14049c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc1391a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z9.s\n"
+ ".inst 0xc1381a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z8.s\n"
+ ".inst 0xa14149c7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13b1a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc13a1a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z10.s\n"
+ ".inst 0xa04049c0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc13d1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z13.s\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
- ".inst 0xc13c1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z12.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1351ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z5.s\n"
- ".inst 0xc1341ac3 // fmla za.s[x8, 3], { z22.s-z25.s }, z4.s\n"
- ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1331a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z3.s\n"
+ ".inst 0xc1321a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z2.s\n"
+ ".inst 0xa14149c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13c1a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z12.s\n"
+ ".inst 0xc1341a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z4.s\n"
+ ".inst 0xa14049c6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13e1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z14.s\n"
- "ld1w { z2.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1361ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z6.s\n"
- ".inst 0xc1391ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z9.s\n"
- ".inst 0xc1311ae3 // fmla za.s[x8, 3], { z23.s-z26.s }, z1.s\n"
- ".inst 0xc13d1b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z13.s\n"
- ".inst 0xc13c1b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z12.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1351b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z5.s\n"
- ".inst 0xc1341b03 // fmla za.s[x8, 3], { z24.s-z27.s }, z4.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13f1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z15.s\n"
+ ".inst 0xc1371a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z7.s\n"
+ ".inst 0xc1311a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z1.s\n"
+ "ld1w { z7.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc1301a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z0.s\n"
+ ".inst 0xc13d1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z13.s\n"
+ ".inst 0xc1351a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z5.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13e1a62 // fmla za.s[x8, 2], { z19.s-z22.s }, z14.s\n"
+ ".inst 0xc1361a63 // fmla za.s[x8, 3], { z19.s-z22.s }, z6.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
"addvl x14, x14, #5\n"
"10:" // Unpadded: 0 priming loads
"cbz x15, 20f\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x13]\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
"sub x15, x15, #0x1\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ "ld1w { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"sub x11, x11, #0x1\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"cmp x15, x11\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"csel x21, x15, x11, LT\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"sub x11, x11, x21\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"cbz x21, 19f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc1321a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z2.s\n"
- "ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc13719c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z7.s\n"
+ "ld1w { z0.s }, p2/Z, [x14, #4, MUL VL]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
"subs x21, x21, #0x1\n"
- ".inst 0xc1331a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z3.s\n"
- ".inst 0xc13d1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z13.s\n"
- ".inst 0xc13c1a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z12.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13f1a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z15.s\n"
- ".inst 0xc13e1a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z14.s\n"
- ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13c19c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z12.s\n"
+ ".inst 0xc13419c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z4.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13d19c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z13.s\n"
+ ".inst 0xc13619e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z6.s\n"
+ ".inst 0xc13519c4 // fmla za.s[x8, 4], { z14.s-z17.s }, z5.s\n"
+ ".inst 0xa04049c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc1361a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z6.s\n"
+ ".inst 0xc13919e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z9.s\n"
"ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1391a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z9.s\n"
- "ld1w { z18.s }, p1/Z, [x13]\n"
+ ".inst 0xc13819e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z8.s\n"
+ ".inst 0xa14149c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1301a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z0.s\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1381a62 // fmla za.s[x8, 2], { z19.s-z22.s }, z8.s\n"
- ".inst 0xa04149ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13b1a63 // fmla za.s[x8, 3], { z19.s-z22.s }, z11.s\n"
- ".inst 0xc13a1a64 // fmla za.s[x8, 4], { z19.s-z22.s }, z10.s\n"
- ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13b19e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z11.s\n"
+ ".inst 0xc13a19e4 // fmla za.s[x8, 4], { z15.s-z18.s }, z10.s\n"
+ ".inst 0xa04049c0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc1361aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z6.s\n"
- "ld1w { z2.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc13d1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z13.s\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ ".inst 0xc13c1a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z12.s\n"
+ "ld1w { z8.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc1361a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z6.s\n"
+ "ld1w { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13c1a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z12.s\n"
- ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1351a83 // fmla za.s[x8, 3], { z20.s-z23.s }, z5.s\n"
- ".inst 0xc1341a84 // fmla za.s[x8, 4], { z20.s-z23.s }, z4.s\n"
- ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1341a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z4.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1331a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z3.s\n"
+ ".inst 0xc1321a04 // fmla za.s[x8, 4], { z16.s-z19.s }, z2.s\n"
+ ".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1321ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z2.s\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc1381a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z8.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f1aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z15.s\n"
- "ld1w { z2.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc13e1aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z14.s\n"
- ".inst 0xc1381aa3 // fmla za.s[x8, 3], { z21.s-z24.s }, z8.s\n"
- ".inst 0xc1301aa4 // fmla za.s[x8, 4], { z21.s-z24.s }, z0.s\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ ".inst 0xc13d1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z13.s\n"
+ ".inst 0xc1351a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z5.s\n"
+ "ld1w { z7.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc1311a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z1.s\n"
+ ".inst 0xc1301a24 // fmla za.s[x8, 4], { z17.s-z20.s }, z0.s\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
- ".inst 0xc1b1ca0c // fclamp { z12.s-z15.s }, z16.s, z17.s\n"
- "st1w { z12.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc1371ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z7.s\n"
- "st1w { z13.s }, p1, [x28]\n"
- "add x28, x28, x26, LSL #2\n"
- ".inst 0xc1361ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z6.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "st1w { z14.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc1351ac3 // fmla za.s[x8, 3], { z22.s-z25.s }, z5.s\n"
- "st1w { z15.s }, p1, [x24]\n"
- "add x24, x24, x22, LSL #2\n"
- ".inst 0xc1341ac4 // fmla za.s[x8, 4], { z22.s-z25.s }, z4.s\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
+ ".inst 0xc13c1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z12.s\n"
+ ".inst 0xc1341a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z4.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13b1a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z11.s\n"
+ ".inst 0xc13a1a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z10.s\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"add x8, x8, #0x1\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1bdcae0 // fclamp { z0.s-z3.s }, z23.s, z29.s\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "st1w { z0.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
"addvl x14, x14, #5\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
+ "st1w { z1.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z2.s }, p1, [x25]\n"
+ "add x25, x25, x23, LSL #2\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
+ "st1w { z3.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 11b\n"
"b 19f\n"
"12:" // Padded
@@ -450,395 +450,395 @@ void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z19.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z14.s\n"
- ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "mov x12, #0x4\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc13519c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z5.s\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1311aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z1.s\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x14, x14, #5\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
".inst 0xa04049c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1361ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z6.s\n"
+ ".inst 0xc13a19e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z10.s\n"
+ ".inst 0xa14049c7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1361a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z6.s\n"
+ ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1301ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z0.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
+ ".inst 0xc1371a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z7.s\n"
+ ".inst 0xc1311a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z1.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
"14:" // Padded: 3 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z0.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z15.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z1.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z2.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z3.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f1800 // fmla za.s[x8, 0], { z0.s-z3.s }, z15.s\n"
+ "mov x12, #0x4\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13e1801 // fmla za.s[x8, 1], { z0.s-z3.s }, z14.s\n"
- "ld1w { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13d19e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z13.s\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc13b1820 // fmla za.s[x8, 0], { z1.s-z4.s }, z11.s\n"
- "ld1w { z5.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1821 // fmla za.s[x8, 1], { z1.s-z4.s }, z10.s\n"
- ".inst 0xa04049c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc13519e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z5.s\n"
+ ".inst 0xa14049c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc1391840 // fmla za.s[x8, 0], { z2.s-z5.s }, z9.s\n"
- "ld1w { z6.s }, p0/Z, [x20]\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1381841 // fmla za.s[x8, 1], { z2.s-z5.s }, z8.s\n"
+ ".inst 0xc13b1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc13a1a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z10.s\n"
".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13f1860 // fmla za.s[x8, 0], { z3.s-z6.s }, z15.s\n"
- "ld1w { z7.s }, p0/Z, [x20]\n"
- ".inst 0xc13e1861 // fmla za.s[x8, 1], { z3.s-z6.s }, z14.s\n"
- ".inst 0xa14049c3 // ld1w { z3.s, z11.s }, pn10.b/Z, [x14]\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13c1a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z12.s\n"
+ ".inst 0xc1341a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z4.s\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
+ ".inst 0xa04049cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13b1880 // fmla za.s[x8, 0], { z4.s-z7.s }, z11.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1331881 // fmla za.s[x8, 1], { z4.s-z7.s }, z3.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13f1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z15.s\n"
+ ".inst 0xc13e1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z14.s\n"
+ ".inst 0xa04149c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13d1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z13.s\n"
+ ".inst 0xc13c1a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z12.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
"addvl x14, x14, #5\n"
"15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z19.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z15.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13c1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z12.s\n"
- ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ "mov x12, #0x4\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13f1a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z15.s\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1a62 // fmla za.s[x8, 2], { z19.s-z22.s }, z14.s\n"
+ ".inst 0xc13419e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z4.s\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1381a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z8.s\n"
- ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc13d19e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z13.s\n"
+ ".inst 0xa04149c0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13519e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z5.s\n"
+ ".inst 0xa04049c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc13b1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z11.s\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1381a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z8.s\n"
+ ".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13b1a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc13a1a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z10.s\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc13a1a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z10.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xa14049c2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x14]\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1361aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z6.s\n"
- ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc1381aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z8.s\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- ".inst 0xc1301aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z0.s\n"
- ".inst 0xa04049c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14]\n"
- ".inst 0xc1341ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z4.s\n"
- ".inst 0xa14149c3 // ld1w { z3.s, z11.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13a1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z10.s\n"
- ".inst 0xc1321ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z2.s\n"
- ".inst 0xc1331ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z3.s\n"
+ ".inst 0xc1301a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z0.s\n"
".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1371ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z7.s\n"
- ".inst 0xc1361ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z6.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1371a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
+ ".inst 0xc1361a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
+ ".inst 0xa04049c0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x14]\n"
+ "ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xc1381a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z8.s\n"
+ ".inst 0xc13a1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z10.s\n"
+ ".inst 0xc1321a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z2.s\n"
+ ".inst 0xc13c1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z12.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1311a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z1.s\n"
+ ".inst 0xc1301a62 // fmla za.s[x8, 2], { z19.s-z22.s }, z0.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
"addvl x14, x14, #5\n"
"16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13d1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z13.s\n"
+ "mov x12, #0x4\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13c1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z12.s\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f1a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z15.s\n"
+ ".inst 0xc13c19c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z12.s\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z14.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13419c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z4.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13d19c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z13.s\n"
+ ".inst 0xc13519c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z5.s\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1391a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z9.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "addvl x14, x14, #5\n"
- ".inst 0xc1381a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z8.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13919e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z9.s\n"
+ ".inst 0xa04049c0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc13819e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z8.s\n"
+ ".inst 0xa04149c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13b19e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z11.s\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc13b1a62 // fmla za.s[x8, 2], { z19.s-z22.s }, z11.s\n"
- ".inst 0xa14149c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13a1a63 // fmla za.s[x8, 3], { z19.s-z22.s }, z10.s\n"
- ".inst 0xa14049c1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x14]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13a19e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z10.s\n"
+ ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc13d1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z13.s\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- ".inst 0xc13c1a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z12.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13f1a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z15.s\n"
- ".inst 0xc13e1a83 // fmla za.s[x8, 3], { z20.s-z23.s }, z14.s\n"
- ".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13c1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z12.s\n"
+ ".inst 0xc1341a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z4.s\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
+ ".inst 0xc1311a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z1.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1301a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z0.s\n"
+ ".inst 0xa04049c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1381aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z8.s\n"
- "ld1w { z2.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1301aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z0.s\n"
- ".inst 0xc1391aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z9.s\n"
- ".inst 0xc1311aa3 // fmla za.s[x8, 3], { z21.s-z24.s }, z1.s\n"
- ".inst 0xc13d1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z13.s\n"
- ".inst 0xc13c1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z12.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13b1ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z11.s\n"
- ".inst 0xc13a1ac3 // fmla za.s[x8, 3], { z22.s-z25.s }, z10.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc1371a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z7.s\n"
+ ".inst 0xc1361a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z6.s\n"
+ ".inst 0xc13f1a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z15.s\n"
+ "ld1w { z7.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc13e1a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z14.s\n"
+ ".inst 0xc13c1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z12.s\n"
+ ".inst 0xc1341a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z4.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1331a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z3.s\n"
+ ".inst 0xc1321a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z2.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
"addvl x14, x14, #5\n"
"17:" // Padded: 0 priming loads
"cbz x15, 20f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x13]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x15, x15, #0x1\n"
+ "sub x11, x11, #0x1\n"
+ "cmp x15, x11\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x21, x15, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x21\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "sub x15, x15, #0x1\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "sub x11, x11, #0x1\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "cmp x15, x11\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "csel x21, x15, x11, LT\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- "sub x11, x11, x21\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
"cbz x21, 19f\n"
"18:" // Padded: Main loop
- ".inst 0xc1321a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z2.s\n"
- "ld1w { z0.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc13719c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z7.s\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1331a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z3.s\n"
+ "ld1w { z7.s }, p2/Z, [x14, #4, MUL VL]\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13c19c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z12.s\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"subs x21, x21, #0x1\n"
- ".inst 0xc13d1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z13.s\n"
- ".inst 0xc13c1a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z12.s\n"
- ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13f1a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z15.s\n"
- ".inst 0xc13e1a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z14.s\n"
- ".inst 0xa04049c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14]\n"
- "addvl x14, x14, #5\n"
- ".inst 0xc1301a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z0.s\n"
- "ld1w { z12.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1391a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z9.s\n"
- "ld1w { z18.s }, p0/Z, [x13]\n"
+ ".inst 0xc13419c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z4.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13d19c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z13.s\n"
+ ".inst 0xc13519c4 // fmla za.s[x8, 4], { z14.s-z17.s }, z5.s\n"
+ "ld1w { z14.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1381a62 // fmla za.s[x8, 2], { z19.s-z22.s }, z8.s\n"
- ".inst 0xa14149c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13b1a63 // fmla za.s[x8, 3], { z19.s-z22.s }, z11.s\n"
- ".inst 0xc13a1a64 // fmla za.s[x8, 4], { z19.s-z22.s }, z10.s\n"
- ".inst 0xa04049c6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13619e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z6.s\n"
+ ".inst 0xa04049c0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc13c1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1331a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z3.s\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13919e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc13819e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z8.s\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13b19e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z11.s\n"
+ ".inst 0xc13a19e4 // fmla za.s[x8, 4], { z15.s-z18.s }, z10.s\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1321a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z2.s\n"
- ".inst 0xa14149c3 // ld1w { z3.s, z11.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1351a83 // fmla za.s[x8, 3], { z20.s-z23.s }, z5.s\n"
- ".inst 0xc1341a84 // fmla za.s[x8, 4], { z20.s-z23.s }, z4.s\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z7.s\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ ".inst 0xc13c1a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc1341a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z4.s\n"
+ ".inst 0xa04149ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1311a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1301a04 // fmla za.s[x8, 4], { z16.s-z19.s }, z0.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- ".inst 0xc13c1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z12.s\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1391a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z9.s\n"
+ "mov x12, #0x4\n"
+ ".inst 0xa14049c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14]\n"
"ldr x14, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1381aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z8.s\n"
- "ld1w { z2.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1301aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z0.s\n"
- ".inst 0xc1371aa3 // fmla za.s[x8, 3], { z21.s-z24.s }, z7.s\n"
- ".inst 0xc1361aa4 // fmla za.s[x8, 4], { z21.s-z24.s }, z6.s\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1331a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z3.s\n"
+ ".inst 0xc1321a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z2.s\n"
+ ".inst 0xc13d1a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z13.s\n"
+ ".inst 0xc1351a24 // fmla za.s[x8, 4], { z17.s-z20.s }, z5.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
- ".inst 0xc1b1ca04 // fclamp { z4.s-z7.s }, z16.s, z17.s\n"
- "st1w { z4.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc13b1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z11.s\n"
- "st1w { z5.s }, p1, [x28]\n"
- "add x28, x28, x26, LSL #2\n"
- ".inst 0xc1331ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z3.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "st1w { z6.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0xc13f1ac3 // fmla za.s[x8, 3], { z22.s-z25.s }, z15.s\n"
- "st1w { z7.s }, p1, [x24]\n"
- "add x24, x24, x22, LSL #2\n"
- ".inst 0xc13e1ac4 // fmla za.s[x8, 4], { z22.s-z25.s }, z14.s\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13c1a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z12.s\n"
+ "ld1w { z7.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc13b1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z11.s\n"
+ ".inst 0xc13a1a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z10.s\n"
+ ".inst 0xa14149c4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1381a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z8.s\n"
+ ".inst 0xc1301a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z0.s\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa04049ce // ld1w { z14.s-z15.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
+ ".inst 0xa14049c5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
"add x8, x8, #0x1\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
"addvl x14, x14, #5\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
+ ".inst 0xc1bdcae0 // fclamp { z0.s-z3.s }, z23.s, z29.s\n"
+ "ld1w { z20.s }, p0/Z, [x20]\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "st1w { z0.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "ld1w { z21.s }, p0/Z, [x20]\n"
+ "st1w { z1.s }, p1, [x28]\n"
+ "add x28, x28, x26, LSL #2\n"
+ "st1w { z2.s }, p1, [x25]\n"
+ "add x25, x25, x23, LSL #2\n"
+ "st1w { z3.s }, p1, [x24]\n"
+ "add x24, x24, x22, LSL #2\n"
"bgt 18b\n"
"19:" // Main loop tail
- ".inst 0xc1321a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z2.s\n"
- "ld1w { z6.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1331a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z3.s\n"
- ".inst 0xc13d1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z13.s\n"
- ".inst 0xc13c1a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z12.s\n"
- ".inst 0xa04149c4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13f1a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z15.s\n"
- ".inst 0xc13e1a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z14.s\n"
- ".inst 0xa04049c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14]\n"
- "addvl x14, x14, #5\n"
- ".inst 0xc1361a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z6.s\n"
+ ".inst 0xc13719c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z7.s\n"
"ld1w { z7.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1391a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z9.s\n"
- ".inst 0xc1381a62 // fmla za.s[x8, 2], { z19.s-z22.s }, z8.s\n"
- ".inst 0xa14149c0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc13b1a63 // fmla za.s[x8, 3], { z19.s-z22.s }, z11.s\n"
- ".inst 0xc13a1a64 // fmla za.s[x8, 4], { z19.s-z22.s }, z10.s\n"
- ".inst 0xa04049ca // ld1w { z10.s-z11.s }, pn10.b/Z, [x14]\n"
+ ".inst 0xc13c19c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z12.s\n"
+ ".inst 0xc13419c2 // fmla za.s[x8, 2], { z14.s-z17.s }, z4.s\n"
+ ".inst 0xa04149c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13d19c3 // fmla za.s[x8, 3], { z14.s-z17.s }, z13.s\n"
+ ".inst 0xc13619e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z6.s\n"
+ ".inst 0xc13519c4 // fmla za.s[x8, 4], { z14.s-z17.s }, z5.s\n"
+ ".inst 0xa14049c6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x14]\n"
+ "addvl x14, x14, #5\n"
+ "ld1w { z0.s }, p2/Z, [x14, #4, MUL VL]\n"
+ ".inst 0xc13919e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z9.s\n"
+ ".inst 0xc13819e2 // fmla za.s[x8, 2], { z15.s-z18.s }, z8.s\n"
+ ".inst 0xa04149c8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc1371a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z7.s\n"
+ ".inst 0xc13b19e3 // fmla za.s[x8, 3], { z15.s-z18.s }, z11.s\n"
+ ".inst 0xc13a19e4 // fmla za.s[x8, 4], { z15.s-z18.s }, z10.s\n"
+ ".inst 0xa04049cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14]\n"
"addvl x14, x14, #5\n"
- ".inst 0xc1371aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z7.s\n"
"ld1w { z1.s }, p2/Z, [x14, #4, MUL VL]\n"
- ".inst 0xc1351a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z5.s\n"
- ".inst 0xc1341a82 // fmla za.s[x8, 2], { z20.s-z23.s }, z4.s\n"
- ".inst 0xa04149cc // ld1w { z12.s-z13.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
- ".inst 0xc1331a83 // fmla za.s[x8, 3], { z20.s-z23.s }, z3.s\n"
- ".inst 0xc1321a84 // fmla za.s[x8, 4], { z20.s-z23.s }, z2.s\n"
+ ".inst 0xc1331a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z3.s\n"
+ ".inst 0xc1301a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z0.s\n"
+ ".inst 0xc1321a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z2.s\n"
+ ".inst 0xa14149c7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x14, #0x2, MUL VL]\n"
+ ".inst 0xc13e1a03 // fmla za.s[x8, 3], { z16.s-z19.s }, z14.s\n"
+ ".inst 0xc1361a04 // fmla za.s[x8, 4], { z16.s-z19.s }, z6.s\n"
".inst 0xa04049c2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x14]\n"
- ".inst 0xc1311ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z1.s\n"
- ".inst 0xc1381aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z8.s\n"
- ".inst 0xc1301aa2 // fmla za.s[x8, 2], { z21.s-z24.s }, z0.s\n"
- ".inst 0xc13b1aa3 // fmla za.s[x8, 3], { z21.s-z24.s }, z11.s\n"
- ".inst 0xc13a1aa4 // fmla za.s[x8, 4], { z21.s-z24.s }, z10.s\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
- ".inst 0xc1b1ca04 // fclamp { z4.s-z7.s }, z16.s, z17.s\n"
- "st1w { z4.s }, p1, [x9]\n"
+ ".inst 0xc1311a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z1.s\n"
+ ".inst 0xc1391a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z9.s\n"
+ ".inst 0xc1381a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z8.s\n"
+ ".inst 0xc13d1a23 // fmla za.s[x8, 3], { z17.s-z20.s }, z13.s\n"
+ ".inst 0xc13c1a24 // fmla za.s[x8, 4], { z17.s-z20.s }, z12.s\n"
+ ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
+ ".inst 0xc13f1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z15.s\n"
+ ".inst 0xc1371a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z7.s\n"
+ ".inst 0xc1331a43 // fmla za.s[x8, 3], { z18.s-z21.s }, z3.s\n"
+ ".inst 0xc1321a44 // fmla za.s[x8, 4], { z18.s-z21.s }, z2.s\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc1bdcae8 // fclamp { z8.s-z11.s }, z23.s, z29.s\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ "st1w { z8.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc13d1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z13.s\n"
- "st1w { z5.s }, p1, [x28]\n"
+ "st1w { z9.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- ".inst 0xc13c1ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z12.s\n"
- "st1w { z6.s }, p1, [x25]\n"
+ "st1w { z10.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- ".inst 0xc1331ac3 // fmla za.s[x8, 3], { z22.s-z25.s }, z3.s\n"
- "st1w { z7.s }, p1, [x24]\n"
+ "st1w { z11.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
- ".inst 0xc1321ac4 // fmla za.s[x8, 4], { z22.s-z25.s }, z2.s\n"
- "add x8, x8, #0x1\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"20:" // Main loop skip tail
"cbz x11, 22f\n"
"21:" // Right padding loop
".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"subs x11, x11, #0x1\n"
- ".inst 0xc1b1ca00 // fclamp { z0.s-z3.s }, z16.s, z17.s\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ ".inst 0xc1bdcae0 // fclamp { z0.s-z3.s }, z23.s, z29.s\n"
"st1w { z0.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"st1w { z1.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
"st1w { z2.s }, p1, [x25]\n"
@@ -848,12 +848,12 @@ void sme2_fp32_planar_5x5_s1_4rows_mla_za_impl(
"bgt 21b\n"
"22:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
"incb x20, ALL, MUL #16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x16\n"
- "whilelt p1.s, x16, x17\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
index 81ad8e5833..233b6bd61a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32_planar_5x5_s2_4rows_mla_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,67 +72,67 @@ void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
"ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"mov x20, #0xb\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x5\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ld1rw { z2.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rw { z17.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x20, x20, x5\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z16.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p1.s, XZR, x7\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z3.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p8.s, XZR, x6\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
"ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
- "fmov z28.s, #0x0\n"
+ "fmov z20.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x17, LSL #2]\n"
"2:" // Load bias: Done
"ldr x16, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x16, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- "mov z29.d, z28.d\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "orr x23, x7, x23, LSL #20\n"
"mov x22, #0xb\n"
+ "add x20, x6, x5\n"
+ "mov z21.d, z20.d\n"
+ "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ "mov z22.d, z20.d\n"
+ "mov z23.d, z20.d\n"
"ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "add x21, x6, x5\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "mov z30.d, z28.d\n"
- "mov z31.d, z28.d\n"
- ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
"mov x8, #0x0\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x16, #0x1\n"
"ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
+ "orr x20, x20, %x[ld_in_col], LSL #18\n"
+ ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14149e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "orr x20, x7, x20, LSL #20\n"
+ "madd x21, x21, x6, x14\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "addvl x15, x15, #5\n"
+ "lsl x20, x20, #0x2\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x14\n"
- "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
"addvl x15, x15, #5\n"
"3:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x14, x6, x20, x14\n"
- ".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ ".inst 0xc0040e80 // mova za.d[x8, #0], { z20.d-z23.d }\n"
"mov x22, #0x4\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x14, x6, x21, x14\n"
+ ".inst 0xc0040e81 // mova za.d[x8, #1], { z20.d-z23.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
"ldp x11, x10, [x23], #0x10\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
+ ".inst 0xc0040e83 // mova za.d[x8, #3], { z20.d-z23.d }\n"
"ldp x9, x28, [x20], #0x10\n"
- ".inst 0xc0040f83 // mova za.d[x8, #3], { z28.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
"ldp x27, x26, [x23], #0x10\n"
"ldp x25, x24, [x20], #0x10\n"
"cbz x21, 5f\n"
@@ -141,21 +141,21 @@ void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 5f\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
"and x22, x21, #0x1\n"
"add x21, x21, #0x1\n"
- ".inst 0xc1a3c850 // fclamp { z16.s-z19.s }, z2.s, z3.s\n"
"lsr x21, x21, #0x1\n"
"sub x13, x13, x21\n"
+ ".inst 0xc1b0ca38 // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
- "st1w { z16.s }, p1, [x11]\n"
+ "st1w { z24.s }, p1, [x11]\n"
"add x11, x11, x9, LSL #2\n"
- "st1w { z17.s }, p1, [x10]\n"
+ "st1w { z25.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- "st1w { z18.s }, p1, [x27]\n"
+ "st1w { z26.s }, p1, [x27]\n"
"add x27, x27, x25, LSL #2\n"
- "st1w { z19.s }, p1, [x26]\n"
+ "st1w { z27.s }, p1, [x26]\n"
"add x26, x26, x24, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
@@ -171,331 +171,331 @@ void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
"beq 7f\n"
"6:" // Unpadded: 4 priming loads
"add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z9.s }, p1/Z, [x14]\n"
+ "ld1w { z24.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z9.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z10.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z11.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1341920 // fmla za.s[x8, 0], { z9.s-z12.s }, z4.s\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1371a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z7.s\n"
- "ld1w { z13.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc1371b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z7.s\n"
+ ".inst 0xa04049e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc1341940 // fmla za.s[x8, 0], { z10.s-z13.s }, z4.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xc1341920 // fmla za.s[x8, 0], { z9.s-z12.s }, z4.s\n"
+ ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc1301aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z0.s\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
+ ".inst 0xc1321b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z2.s\n"
".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
"ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1341960 // fmla za.s[x8, 0], { z11.s-z14.s }, z4.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc1311940 // fmla za.s[x8, 0], { z10.s-z13.s }, z1.s\n"
+ ".inst 0xc1341b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z4.s\n"
+ ".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
"7:" // Unpadded: 3 priming loads
"add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x14]\n"
+ "ld1w { z28.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
"ld1w { z7.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z8.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z30.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
+ "ld1w { z31.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1351ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z5.s\n"
"ld1w { z10.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f18e0 // fmla za.s[x8, 0], { z7.s-z10.s }, z15.s\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
+ "ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc13f1b80 // fmla za.s[x8, 0], { z28.s-z31.s }, z15.s\n"
+ ".inst 0xa04049e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13f1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z15.s\n"
"ld1w { z11.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z1.s }, p1/Z, [x20]\n"
+ ".inst 0xc13c18e0 // fmla za.s[x8, 0], { z7.s-z10.s }, z12.s\n"
+ ".inst 0xa14049e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc1371900 // fmla za.s[x8, 0], { z8.s-z11.s }, z7.s\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
- ".inst 0xa04049ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc1331ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z3.s\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
"ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13b1b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z11.s\n"
- ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13e1900 // fmla za.s[x8, 0], { z8.s-z11.s }, z14.s\n"
+ ".inst 0xc1351bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z5.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"addvl x15, x15, #5\n"
"8:" // Unpadded: 2 priming loads
"add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x14]\n"
+ "ld1w { z25.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
+ "ld1w { z10.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
+ "ld1w { z11.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13a1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z10.s\n"
- ".inst 0xc1341a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z4.s\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
+ "ld1w { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13019c0 // fmla za.s[x8, 0], { z14.s-z17.s }, z0.s\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13719c1 // fmla za.s[x8, 1], { z14.s-z17.s }, z7.s\n"
- ".inst 0xa04049e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1361b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z6.s\n"
+ ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1371b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z7.s\n"
+ ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13a1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z10.s\n"
- ".inst 0xc1381a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z8.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
+ "ld1w { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc13619e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z6.s\n"
+ "ld1w { z30.s }, p1/Z, [x20]\n"
+ ".inst 0xc1301940 // fmla za.s[x8, 0], { z10.s-z13.s }, z0.s\n"
".inst 0xa04149e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1341941 // fmla za.s[x8, 1], { z10.s-z13.s }, z4.s\n"
+ ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13819e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z8.s\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- ".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc13e1aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z14.s\n"
- ".inst 0xa14149e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1371aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z7.s\n"
+ ".inst 0xc1321b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z2.s\n"
+ ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1361b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z6.s\n"
".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xc1381960 // fmla za.s[x8, 0], { z11.s-z14.s }, z8.s\n"
+ ".inst 0xc1311961 // fmla za.s[x8, 1], { z11.s-z14.s }, z1.s\n"
+ ".inst 0xc1321b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z2.s\n"
+ ".inst 0xa14149e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1341b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z4.s\n"
+ ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"addvl x15, x15, #5\n"
"9:" // Unpadded: 1 priming loads
"add x20, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z7.s }, p1/Z, [x14]\n"
+ "ld1w { z24.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
+ "ld1w { z3.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z8.s }, p1/Z, [x20]\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ "ld1w { z4.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z9.s }, p1/Z, [x20]\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ "ld1w { z5.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z10.s }, p1/Z, [x20]\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13b18e0 // fmla za.s[x8, 0], { z7.s-z10.s }, z11.s\n"
- ".inst 0xc13518e1 // fmla za.s[x8, 1], { z7.s-z10.s }, z5.s\n"
- "ld1w { z21.s }, p1/Z, [x20]\n"
+ "ld1w { z6.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1311a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z1.s\n"
- "ld1w { z11.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z15.s\n"
- ".inst 0xa04049e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ec // ld1w { z12.s-z13.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13e1b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z14.s\n"
+ ".inst 0xa04149e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13f1b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z15.s\n"
+ ".inst 0xa14049e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13d1900 // fmla za.s[x8, 0], { z8.s-z11.s }, z13.s\n"
- ".inst 0xc1311901 // fmla za.s[x8, 1], { z8.s-z11.s }, z1.s\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z7.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc13e1a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z14.s\n"
- ".inst 0xa14149e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc13e1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z14.s\n"
- "ld1w { z12.s }, p1/Z, [x20]\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xc1311860 // fmla za.s[x8, 0], { z3.s-z6.s }, z1.s\n"
+ ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13c1861 // fmla za.s[x8, 1], { z3.s-z6.s }, z12.s\n"
".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc13f1921 // fmla za.s[x8, 1], { z9.s-z12.s }, z15.s\n"
- ".inst 0xa04149ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13f1920 // fmla za.s[x8, 0], { z9.s-z12.s }, z15.s\n"
- ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
"addvl x15, x15, #5\n"
+ ".inst 0xc1391b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z9.s\n"
+ ".inst 0xa04149e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13a1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z10.s\n"
+ ".inst 0xa04049ec // ld1w { z12.s-z13.s }, pn10.b/Z, [x15]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xc1311880 // fmla za.s[x8, 0], { z4.s-z7.s }, z1.s\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc13f1881 // fmla za.s[x8, 1], { z4.s-z7.s }, z15.s\n"
+ ".inst 0xc1391b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z9.s\n"
+ ".inst 0xa14149e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13d1b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z13.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
"addvl x15, x15, #5\n"
"10:" // Unpadded: 0 priming loads
"cmp x16, #0x2\n"
"blt 20f\n"
"add x21, x14, %x[ld_in_row], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x14]\n"
+ "ld1w { z9.s }, p1/Z, [x14]\n"
"sub x16, x16, #0x2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"sub x13, x13, #0x1\n"
- "ld1w { z23.s }, p1/Z, [x21]\n"
+ "ld1w { z10.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"lsr x20, x16, #0x1\n"
- "ld1w { z17.s }, p1/Z, [x21]\n"
+ "ld1w { z27.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"cmp x20, x13\n"
- "ld1w { z24.s }, p1/Z, [x21]\n"
+ "ld1w { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"csel x23, x20, x13, LT\n"
- "ld1w { z18.s }, p1/Z, [x21]\n"
+ "ld1w { z28.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x21]\n"
+ "ld1w { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"and x16, x16, #0x1\n"
- "ld1w { z19.s }, p1/Z, [x21]\n"
+ "ld1w { z29.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"sub x13, x13, x23\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "ld1w { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z20.s }, p1/Z, [x21]\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
+ "ld1w { z14.s }, p1/Z, [x21]\n"
"cbz x23, 19f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc1391ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z9.s\n"
- "ld1w { z13.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1381920 // fmla za.s[x8, 0], { z9.s-z12.s }, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
"add x22, x14, %x[ld_in_row], LSL #2\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc13a1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z10.s\n"
- ".inst 0xa14149e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc1341ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z4.s\n"
- ".inst 0xa04049e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1361a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z6.s\n"
- "ld1w { z11.s }, p2/Z, [x15, #4, MUL VL]\n"
- ".inst 0xc1301a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z0.s\n"
- ".inst 0xa04149ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc1371a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z7.s\n"
+ ".inst 0xc1361921 // fmla za.s[x8, 1], { z9.s-z12.s }, z6.s\n"
+ ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1371922 // fmla za.s[x8, 2], { z9.s-z12.s }, z7.s\n"
".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13d1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z13.s\n"
- "ld1w { z4.s }, p2/Z, [x15, #4, MUL VL]\n"
- ".inst 0xc1311ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z1.s\n"
- ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc1381ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z8.s\n"
- ".inst 0xa04049ec // ld1w { z12.s-z13.s }, pn10.b/Z, [x15]\n"
- "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13b1a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z11.s\n"
- "ld1w { z15.s }, p1/Z, [x14]\n"
+ "ld1w { z31.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- "add x20, x14, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z14.s\n"
- "ld1w { z22.s }, p1/Z, [x22]\n"
+ ".inst 0xc1351b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z5.s\n"
+ "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "add x21, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1301b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z0.s\n"
+ ".inst 0xa14149e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1341b42 // fmla za.s[x8, 2], { z26.s-z29.s }, z4.s\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ "ld1w { z26.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1361a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
+ ".inst 0xc1381940 // fmla za.s[x8, 0], { z10.s-z13.s }, z8.s\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1321941 // fmla za.s[x8, 1], { z10.s-z13.s }, z2.s\n"
+ ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1361942 // fmla za.s[x8, 2], { z10.s-z13.s }, z6.s\n"
+ ".inst 0xa04049e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z0.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x22]\n"
+ ".inst 0xc1311b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z1.s\n"
+ ".inst 0xc1371b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z7.s\n"
+ ".inst 0xc1341b62 // fmla za.s[x8, 2], { z27.s-z30.s }, z4.s\n"
+ "ld1w { z27.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1341b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z4.s\n"
- ".inst 0xc1301b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z0.s\n"
- ".inst 0xa0414aa6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc13c1b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z12.s\n"
- "ld1w { z17.s }, p1/Z, [x22]\n"
+ ".inst 0xc1351960 // fmla za.s[x8, 0], { z11.s-z14.s }, z5.s\n"
+ ".inst 0xc1321961 // fmla za.s[x8, 1], { z11.s-z14.s }, z2.s\n"
+ ".inst 0xa0414a82 // ld1w { z2.s-z3.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1381962 // fmla za.s[x8, 2], { z11.s-z14.s }, z8.s\n"
+ "ld1w { z1.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x22]\n"
+ "ld1w { z28.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
+ ".inst 0xa1404a82 // ld1w { z2.s, z10.s }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- ".inst 0xa1404aa4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x21]\n"
- "addvl x21, x21, #5\n"
- ".inst 0xc1a3c848 // fclamp { z8.s-z11.s }, z2.s, z3.s\n"
- "st1w { z8.s }, p1, [x11]\n"
- "ld1w { z18.s }, p1/Z, [x22]\n"
+ "addvl x20, x20, #5\n"
+ "ld1w { z2.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xa0404a84 // ld1w { z4.s-z5.s }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0414a86 // ld1w { z6.s-z7.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "addvl x20, x20, #5\n"
+ ".inst 0xc1331be0 // fmla za.s[x8, 0], { z31.s-z2.s }, z3.s\n"
+ "ld1w { z29.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1b0ca2c // fclamp { z12.s-z15.s }, z17.s, z16.s\n"
+ ".inst 0xc13a1be1 // fmla za.s[x8, 1], { z31.s-z2.s }, z10.s\n"
+ "ld1w { z3.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa0404a8a // ld1w { z10.s-z11.s }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0414a88 // ld1w { z8.s-z9.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "addvl x20, x20, #5\n"
+ ".inst 0xc1371b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z7.s\n"
+ "ld1w { z30.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13719e0 // fmla za.s[x8, 0], { z15.s-z18.s }, z7.s\n"
+ "st1w { z12.s }, p1, [x11]\n"
+ ".inst 0xc1351b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z5.s\n"
+ ".inst 0xa1404a84 // ld1w { z4.s, z12.s }, pn10.b/Z, [x20]\n"
"add x11, x11, x9, LSL #2\n"
- ".inst 0xc13c19e1 // fmla za.s[x8, 1], { z15.s-z18.s }, z12.s\n"
- ".inst 0xa1404aa7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x21]\n"
- "st1w { z9.s }, p1, [x10]\n"
+ "st1w { z13.s }, p1, [x10]\n"
+ ".inst 0xa0414a84 // ld1w { z4.s-z5.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "addvl x20, x20, #5\n"
"add x10, x10, x28, LSL #2\n"
- ".inst 0xa1414aa6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- "addvl x21, x21, #5\n"
- "st1w { z10.s }, p1, [x27]\n"
+ "st1w { z14.s }, p1, [x27]\n"
+ "ld1w { z4.s }, p1/Z, [x22]\n"
"add x27, x27, x25, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z14.s\n"
- "st1w { z11.s }, p1, [x26]\n"
- ".inst 0xc13f1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z15.s\n"
- "ld1w { z19.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "st1w { z15.s }, p1, [x26]\n"
"add x26, x26, x24, LSL #2\n"
- ".inst 0xa0404aae // ld1w { z14.s-z15.s }, pn10.b/Z, [x21]\n"
- ".inst 0xc13f1a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z15.s\n"
- ".inst 0xa1414aa4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- "addvl x21, x21, #5\n"
- ".inst 0xc13c1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z12.s\n"
- "ld1w { z26.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- ".inst 0xa0404aac // ld1w { z12.s-z13.s }, pn10.b/Z, [x21]\n"
- ".inst 0xc13d1ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z13.s\n"
- ".inst 0xa1414aa4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- "addvl x21, x21, #5\n"
- ".inst 0xc13c1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z12.s\n"
- "ld1w { z20.s }, p1/Z, [x22]\n"
- ".inst 0xa1404aa7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x21]\n"
- ".inst 0xc13f1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z15.s\n"
- ".inst 0xa0414aaa // ld1w { z10.s-z11.s }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc13b1a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z11.s\n"
+ ".inst 0xc1391800 // fmla za.s[x8, 0], { z0.s-z3.s }, z9.s\n"
+ ".inst 0xa0414a86 // ld1w { z6.s-z7.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc13b1801 // fmla za.s[x8, 1], { z0.s-z3.s }, z11.s\n"
+ ".inst 0xa1404a86 // ld1w { z6.s, z14.s }, pn10.b/Z, [x20]\n"
"ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- "ld1w { z22.s }, p1/Z, [x14]\n"
+ "ld1w { z9.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1351b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z5.s\n"
+ "ld1w { z10.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13c1b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z12.s\n"
+ "ld1w { z27.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1371820 // fmla za.s[x8, 0], { z1.s-z4.s }, z7.s\n"
+ ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13e1821 // fmla za.s[x8, 1], { z1.s-z4.s }, z14.s\n"
+ "ld1w { z11.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z29.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
"addvl x15, x15, #5\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ "ld1w { z14.s }, p1/Z, [x21]\n"
"bgt 11b\n"
"b 19f\n"
"12:" // Padded
@@ -509,625 +509,625 @@ void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z9.s }, p0/Z, [x14]\n"
"add x20, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z29.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z8.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z10.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z9.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z11.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z10.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- ".inst 0xc1341920 // fmla za.s[x8, 0], { z9.s-z12.s }, z4.s\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc1371ba0 // fmla za.s[x8, 0], { z29.s-z0.s }, z7.s\n"
+ "ld1w { z11.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1371ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z7.s\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1361940 // fmla za.s[x8, 0], { z10.s-z13.s }, z6.s\n"
+ ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
+ "ld1w { z1.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1341900 // fmla za.s[x8, 0], { z8.s-z11.s }, z4.s\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc1361b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z6.s\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z12.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1361bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z6.s\n"
+ ".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
"ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1361960 // fmla za.s[x8, 0], { z11.s-z14.s }, z6.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
+ ".inst 0xc1341920 // fmla za.s[x8, 0], { z9.s-z12.s }, z4.s\n"
+ ".inst 0xc13e1be0 // fmla za.s[x8, 0], { z31.s-z2.s }, z14.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
"14:" // Padded: 3 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z22.s }, p0/Z, [x14]\n"
"add x20, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z30.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z9.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z10.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z11.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- ".inst 0xc1351ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z5.s\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc13f1bc0 // fmla za.s[x8, 0], { z30.s-z1.s }, z15.s\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13f1920 // fmla za.s[x8, 0], { z9.s-z12.s }, z15.s\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1381ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z8.s\n"
+ ".inst 0xa14049e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
+ "ld1w { z2.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc13c1b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z12.s\n"
+ ".inst 0xa04049ec // ld1w { z12.s-z13.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13f1940 // fmla za.s[x8, 0], { z10.s-z13.s }, z15.s\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13e1be0 // fmla za.s[x8, 0], { z31.s-z2.s }, z14.s\n"
+ ".inst 0xa04049ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15]\n"
"ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13f1b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z15.s\n"
- ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ "ld1w { z3.s }, p0/Z, [x20]\n"
+ ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13d1b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z13.s\n"
+ ".inst 0xc13b1800 // fmla za.s[x8, 0], { z0.s-z3.s }, z11.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"addvl x15, x15, #5\n"
"15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
"add x20, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z25.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z12.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- ".inst 0xc13a1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z10.s\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- ".inst 0xc1341a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z4.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc1361b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z6.s\n"
+ "ld1w { z14.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- ".inst 0xc1301ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z0.s\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1371ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z7.s\n"
- ".inst 0xa14149e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1371b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z7.s\n"
+ ".inst 0xa14049e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa14149e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"addvl x15, x15, #5\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1371a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z7.s\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1301a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z0.s\n"
- ".inst 0xa14049e5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc13a1b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z10.s\n"
- ".inst 0xc1351b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z5.s\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- ".inst 0xa14049e5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1351a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z5.s\n"
- ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1301a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z0.s\n"
+ ".inst 0xc1301960 // fmla za.s[x8, 0], { z11.s-z14.s }, z0.s\n"
+ ".inst 0xa04149e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1341961 // fmla za.s[x8, 1], { z11.s-z14.s }, z4.s\n"
".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"addvl x15, x15, #5\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1311b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z1.s\n"
+ ".inst 0xa14149e3 // ld1w { z3.s, z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1321b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z2.s\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
+ ".inst 0xa04049e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xc1381980 // fmla za.s[x8, 0], { z12.s-z15.s }, z8.s\n"
+ ".inst 0xc1341981 // fmla za.s[x8, 1], { z12.s-z15.s }, z4.s\n"
+ ".inst 0xc1331b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z3.s\n"
+ ".inst 0xa14149e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1301b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z0.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"addvl x15, x15, #5\n"
"16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z19.s }, p0/Z, [x14]\n"
"add x20, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z25.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x14, x14, %x[ld_in_col], LSL #2\n"
"ld1w { z8.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z9.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z9.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z10.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z10.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z22.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
"mov x12, #0x8\n"
- ".inst 0xc13b1a60 // fmla za.s[x8, 0], { z19.s-z22.s }, z11.s\n"
+ ".inst 0xc13e1b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z14.s\n"
"ld1w { z11.s }, p0/Z, [x20]\n"
- ".inst 0xc1351a61 // fmla za.s[x8, 1], { z19.s-z22.s }, z5.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- ".inst 0xc1311900 // fmla za.s[x8, 0], { z8.s-z11.s }, z1.s\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13f1b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z15.s\n"
+ ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "addvl x15, x15, #5\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc13f1901 // fmla za.s[x8, 1], { z8.s-z11.s }, z15.s\n"
- ".inst 0xa14149e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1311900 // fmla za.s[x8, 0], { z8.s-z11.s }, z1.s\n"
+ ".inst 0xa14149e5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13c1901 // fmla za.s[x8, 1], { z8.s-z11.s }, z12.s\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13e1a80 // fmla za.s[x8, 0], { z20.s-z23.s }, z14.s\n"
"ld1w { z12.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1351a81 // fmla za.s[x8, 1], { z20.s-z23.s }, z5.s\n"
- ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa14149e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1381920 // fmla za.s[x8, 0], { z9.s-z12.s }, z8.s\n"
- ".inst 0xc1371921 // fmla za.s[x8, 1], { z9.s-z12.s }, z7.s\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- ".inst 0xa14049e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1381aa1 // fmla za.s[x8, 1], { z21.s-z24.s }, z8.s\n"
- ".inst 0xa04149ec // ld1w { z12.s-z13.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1331b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z3.s\n"
+ ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc13f1b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z15.s\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
+ ".inst 0xa04049e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15]\n"
"ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13d1aa0 // fmla za.s[x8, 0], { z21.s-z24.s }, z13.s\n"
- ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xc13d1920 // fmla za.s[x8, 0], { z9.s-z12.s }, z13.s\n"
+ ".inst 0xc1351921 // fmla za.s[x8, 1], { z9.s-z12.s }, z5.s\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1311b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z1.s\n"
+ ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1331b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z3.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
"addvl x15, x15, #5\n"
"17:" // Padded: 0 priming loads
"cmp x16, #0x2\n"
"blt 20f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z22.s }, p0/Z, [x14]\n"
"add x21, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x16, x16, #0x2\n"
+ "sub x13, x13, #0x1\n"
+ "lsr x20, x16, #0x1\n"
+ "cmp x20, x13\n"
+ "and x16, x16, #0x1\n"
+ "ld1w { z9.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "csel x23, x20, x13, LT\n"
+ "add x14, x14, %x[ld_in_col], LSL #2\n"
+ "sub x13, x13, x23\n"
+ "ld1w { z26.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z23.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z10.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z17.s }, p0/Z, [x21]\n"
- "mov x12, #0x4\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z27.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z24.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z18.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z19.s }, p0/Z, [x21]\n"
- "sub x16, x16, #0x2\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ "ld1w { z29.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "sub x13, x13, #0x1\n"
- "ld1w { z26.s }, p0/Z, [x21]\n"
- "lsr x20, x16, #0x1\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "cmp x20, x13\n"
- "ld1w { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
- "csel x23, x20, x13, LT\n"
- "add x14, x14, %x[ld_in_col], LSL #2\n"
- "and x16, x16, #0x1\n"
- "sub x13, x13, x23\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x21]\n"
"cbz x23, 19f\n"
"18:" // Padded: Main loop
- ".inst 0xc1391ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z9.s\n"
- "ld1w { z15.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1381920 // fmla za.s[x8, 0], { z9.s-z12.s }, z8.s\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13a1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z10.s\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "ld1w { z15.s }, p2/Z, [x15, #4, MUL VL]\n"
"add x22, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1361921 // fmla za.s[x8, 1], { z9.s-z12.s }, z6.s\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc1341ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z4.s\n"
- ".inst 0xa14049e5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1361a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z6.s\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
- ".inst 0xc1301a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z0.s\n"
- ".inst 0xa04149ec // ld1w { z12.s-z13.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc1371a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z7.s\n"
- ".inst 0xa14049e6 // ld1w { z6.s, z14.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc1371922 // fmla za.s[x8, 2], { z9.s-z12.s }, z7.s\n"
+ ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13f1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z15.s\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ ".inst 0xc1351b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z5.s\n"
+ "ld1w { z8.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13a1ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z10.s\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1301b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z0.s\n"
+ ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"add x21, x14, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1351ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z5.s\n"
- ".inst 0xa04049ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1311a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z1.s\n"
- "ld1w { z0.s }, p2/Z, [x15, #4, MUL VL]\n"
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13c1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z12.s\n"
- "ld1w { z12.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1341b42 // fmla za.s[x8, 2], { z26.s-z29.s }, z4.s\n"
+ ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z31.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1361a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z6.s\n"
- "ld1w { z17.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13f1940 // fmla za.s[x8, 0], { z10.s-z13.s }, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xc1321941 // fmla za.s[x8, 1], { z10.s-z13.s }, z2.s\n"
+ ".inst 0xa04149e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1361942 // fmla za.s[x8, 2], { z10.s-z13.s }, z6.s\n"
+ "ld1w { z9.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- "ld1w { z13.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1301b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z0.s\n"
+ "mov x12, #0x4\n"
+ ".inst 0xa14049e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc13f1b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z15.s\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xc1301b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z0.s\n"
+ ".inst 0xc1311b62 // fmla za.s[x8, 2], { z27.s-z30.s }, z1.s\n"
+ "ld1w { z0.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13a1b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z10.s\n"
- ".inst 0xa1414a81 // ld1w { z1.s, z9.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc13e1b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z14.s\n"
- "ld1w { z18.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1351960 // fmla za.s[x8, 0], { z11.s-z14.s }, z5.s\n"
+ ".inst 0xc1341961 // fmla za.s[x8, 1], { z11.s-z14.s }, z4.s\n"
+ ".inst 0xa0414a86 // ld1w { z6.s-z7.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1321962 // fmla za.s[x8, 2], { z11.s-z14.s }, z2.s\n"
+ "ld1w { z10.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z14.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xa0404a82 // ld1w { z2.s-z3.s }, pn10.b/Z, [x20]\n"
+ "addvl x20, x20, #5\n"
".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- ".inst 0xa0404a80 // ld1w { z0.s-z1.s }, pn10.b/Z, [x20]\n"
"add x8, x8, #0x1\n"
- "addvl x20, x20, #5\n"
- ".inst 0xc1a3c858 // fclamp { z24.s-z27.s }, z2.s, z3.s\n"
- "ld1w { z19.s }, p0/Z, [x22]\n"
+ ".inst 0xa0404a8e // ld1w { z14.s-z15.s }, pn10.b/Z, [x20]\n"
+ "ld1w { z1.s }, p0/Z, [x22]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xa1414a86 // ld1w { z6.s, z14.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "addvl x20, x20, #5\n"
+ "ld1w { z11.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "st1w { z24.s }, p1, [x11]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1b0ca38 // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
"mov x12, #0x8\n"
- ".inst 0xc1391a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z9.s\n"
- ".inst 0xa0404a88 // ld1w { z8.s-z9.s }, pn10.b/Z, [x20]\n"
+ "ld1w { z2.s }, p0/Z, [x22]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371900 // fmla za.s[x8, 0], { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc1331901 // fmla za.s[x8, 1], { z8.s-z11.s }, z3.s\n"
+ ".inst 0xa0404a84 // ld1w { z4.s-z5.s }, pn10.b/Z, [x20]\n"
+ "st1w { z24.s }, p1, [x11]\n"
"add x11, x11, x9, LSL #2\n"
- ".inst 0xc1311a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z1.s\n"
- ".inst 0xa0414a80 // ld1w { z0.s-z1.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xa0414a8c // ld1w { z12.s-z13.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
"addvl x20, x20, #5\n"
"st1w { z25.s }, p1, [x10]\n"
- "ld1w { z15.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1311980 // fmla za.s[x8, 0], { z12.s-z15.s }, z1.s\n"
- ".inst 0xc1391981 // fmla za.s[x8, 1], { z12.s-z15.s }, z9.s\n"
- "ld1w { z20.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa0404a8a // ld1w { z10.s-z11.s }, pn10.b/Z, [x20]\n"
- ".inst 0xc13b1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z11.s\n"
"add x10, x10, x28, LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x22]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
"st1w { z26.s }, p1, [x27]\n"
- ".inst 0xa1414a80 // ld1w { z0.s, z8.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "addvl x20, x20, #5\n"
- ".inst 0xc1381a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z8.s\n"
+ ".inst 0xc13e1be0 // fmla za.s[x8, 0], { z31.s-z2.s }, z14.s\n"
+ ".inst 0xa1414a86 // ld1w { z6.s, z14.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
"add x27, x27, x25, LSL #2\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"st1w { z27.s }, p1, [x26]\n"
- ".inst 0xa0404a88 // ld1w { z8.s-z9.s }, pn10.b/Z, [x20]\n"
- "mov x12, #0x0\n"
- ".inst 0xc13919a1 // fmla za.s[x8, 1], { z13.s-z16.s }, z9.s\n"
- "add x26, x26, x24, LSL #2\n"
- ".inst 0xa1414a81 // ld1w { z1.s, z9.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc13f1be1 // fmla za.s[x8, 1], { z31.s-z2.s }, z15.s\n"
+ ".inst 0xa1404a87 // ld1w { z7.s, z15.s }, pn10.b/Z, [x20]\n"
"addvl x20, x20, #5\n"
- ".inst 0xc13919a0 // fmla za.s[x8, 0], { z13.s-z16.s }, z9.s\n"
- "ld1w { z21.s }, p0/Z, [x22]\n"
+ "add x26, x26, x24, LSL #2\n"
+ "ld1w { z3.s }, p0/Z, [x22]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x0\n"
+ ".inst 0xc13d1920 // fmla za.s[x8, 0], { z9.s-z12.s }, z13.s\n"
+ ".inst 0xa0414a86 // ld1w { z6.s-z7.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1w { z13.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- "ld1w { z22.s }, p0/Z, [x14]\n"
+ ".inst 0xc1351921 // fmla za.s[x8, 1], { z9.s-z12.s }, z5.s\n"
+ ".inst 0xa0404a84 // ld1w { z4.s-z5.s }, pn10.b/Z, [x20]\n"
+ "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1w { z9.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc13e1800 // fmla za.s[x8, 0], { z0.s-z3.s }, z14.s\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xa0404a8e // ld1w { z14.s-z15.s }, pn10.b/Z, [x20]\n"
- ".inst 0xc13f1a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z15.s\n"
- ".inst 0xa0414a80 // ld1w { z0.s-z1.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1311a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z1.s\n"
- "ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13f1801 // fmla za.s[x8, 1], { z0.s-z3.s }, z15.s\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z26.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z23.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371940 // fmla za.s[x8, 0], { z10.s-z13.s }, z7.s\n"
+ ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1351941 // fmla za.s[x8, 1], { z10.s-z13.s }, z5.s\n"
+ ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ "ld1w { z10.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- "ld1w { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z24.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "addvl x15, x15, #5\n"
+ "ld1w { z11.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z18.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z12.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z19.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ "ld1w { z29.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z26.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa04049e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z30.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z14.s }, p0/Z, [x21]\n"
"bgt 18b\n"
"19:" // Main loop tail
- ".inst 0xc1391ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z9.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1381920 // fmla za.s[x8, 0], { z9.s-z12.s }, z8.s\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13a1ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z10.s\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
"add x21, x14, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1341ac2 // fmla za.s[x8, 2], { z22.s-z25.s }, z4.s\n"
- ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1361a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z6.s\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
- ".inst 0xc1301a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z0.s\n"
- ".inst 0xa04149ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc1371a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z7.s\n"
- ".inst 0xa04049ec // ld1w { z12.s-z13.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc1361921 // fmla za.s[x8, 1], { z9.s-z12.s }, z6.s\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xa04149e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1371922 // fmla za.s[x8, 2], { z9.s-z12.s }, z7.s\n"
+ ".inst 0xa04049e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc1381ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z8.s\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
+ ".inst 0xc1351b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z5.s\n"
+ "ld1w { z9.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x14, x14, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13a1ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z10.s\n"
- ".inst 0xa14149e5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc1311ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z1.s\n"
+ ".inst 0xc1301b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z0.s\n"
+ ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1341b42 // fmla za.s[x8, 2], { z26.s-z29.s }, z4.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1391a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z9.s\n"
- "ld1w { z1.s }, p2/Z, [x15, #4, MUL VL]\n"
- "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc13e1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z14.s\n"
- "ld1w { z22.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "ld1w { z31.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc13c1a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z12.s\n"
- "ld1w { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1381940 // fmla za.s[x8, 0], { z10.s-z13.s }, z8.s\n"
+ "ld1w { z15.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xc1321941 // fmla za.s[x8, 1], { z10.s-z13.s }, z2.s\n"
+ ".inst 0xa04149e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1361942 // fmla za.s[x8, 2], { z10.s-z13.s }, z6.s\n"
+ "ld1w { z10.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- "ld1w { z23.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1311b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z1.s\n"
+ "mov x12, #0x4\n"
+ ".inst 0xa04049e2 // ld1w { z2.s-z3.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc13f1b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z15.s\n"
+ "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ ".inst 0xc1301b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z0.s\n"
+ ".inst 0xc1371b62 // fmla za.s[x8, 2], { z27.s-z30.s }, z7.s\n"
+ "ld1w { z0.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1351b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z5.s\n"
- ".inst 0xa0414a8e // ld1w { z14.s-z15.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1371b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z7.s\n"
- "ld1w { z18.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1361960 // fmla za.s[x8, 0], { z11.s-z14.s }, z6.s\n"
+ ".inst 0xc1341961 // fmla za.s[x8, 1], { z11.s-z14.s }, z4.s\n"
+ ".inst 0xa0414a84 // ld1w { z4.s-z5.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1321962 // fmla za.s[x8, 2], { z11.s-z14.s }, z2.s\n"
+ "ld1w { z11.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z24.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
- ".inst 0xa0404a84 // ld1w { z4.s-z5.s }, pn10.b/Z, [x20]\n"
- "add x8, x8, #0x1\n"
+ ".inst 0xa0404a86 // ld1w { z6.s-z7.s }, pn10.b/Z, [x20]\n"
"addvl x20, x20, #5\n"
- ".inst 0xc1a3c848 // fclamp { z8.s-z11.s }, z2.s, z3.s\n"
- "ld1w { z19.s }, p0/Z, [x21]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xa0404a82 // ld1w { z2.s-z3.s }, pn10.b/Z, [x20]\n"
+ "ld1w { z1.s }, p0/Z, [x21]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "st1w { z8.s }, p1, [x11]\n"
- "mov x12, #0x8\n"
- ".inst 0xc13f1a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z15.s\n"
- ".inst 0xa0404a80 // ld1w { z0.s-z1.s }, pn10.b/Z, [x20]\n"
- "add x11, x11, x9, LSL #2\n"
- ".inst 0xc1351a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z5.s\n"
- ".inst 0xa1414a80 // ld1w { z0.s, z8.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xa0414a8e // ld1w { z14.s-z15.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
"addvl x20, x20, #5\n"
- "st1w { z9.s }, p1, [x10]\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
+ "ld1w { z12.s }, p0/Z, [x21]\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1b0ca38 // fclamp { z24.s-z27.s }, z17.s, z16.s\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc1351920 // fmla za.s[x8, 0], { z9.s-z12.s }, z5.s\n"
+ "ld1w { z2.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1381ac0 // fmla za.s[x8, 0], { z22.s-z25.s }, z8.s\n"
- ".inst 0xc1311ac1 // fmla za.s[x8, 1], { z22.s-z25.s }, z1.s\n"
- "ld1w { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371921 // fmla za.s[x8, 1], { z9.s-z12.s }, z7.s\n"
+ ".inst 0xa0404a88 // ld1w { z8.s-z9.s }, pn10.b/Z, [x20]\n"
+ "st1w { z24.s }, p1, [x11]\n"
+ "add x11, x11, x9, LSL #2\n"
+ ".inst 0xa1414a86 // ld1w { z6.s, z14.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "addvl x20, x20, #5\n"
+ "st1w { z25.s }, p1, [x10]\n"
+ "add x10, x10, x28, LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ ".inst 0xc13f1be0 // fmla za.s[x8, 0], { z31.s-z2.s }, z15.s\n"
+ ".inst 0xa0414a84 // ld1w { z4.s-z5.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add x27, x27, x25, LSL #2\n"
+ "st1w { z27.s }, p1, [x26]\n"
+ ".inst 0xc1331be1 // fmla za.s[x8, 1], { z31.s-z2.s }, z3.s\n"
".inst 0xa0404a86 // ld1w { z6.s-z7.s }, pn10.b/Z, [x20]\n"
- ".inst 0xc1371a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z7.s\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z10.s }, p1, [x27]\n"
- ".inst 0xa1414a81 // ld1w { z1.s, z9.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
"addvl x20, x20, #5\n"
- ".inst 0xc1391a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z9.s\n"
- "add x27, x27, x25, LSL #2\n"
- "ld1w { z26.s }, p0/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "st1w { z11.s }, p1, [x26]\n"
- ".inst 0xa1404a84 // ld1w { z4.s, z12.s }, pn10.b/Z, [x20]\n"
- ".inst 0xc13c1ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z12.s\n"
"add x26, x26, x24, LSL #2\n"
- ".inst 0xa1414a84 // ld1w { z4.s, z12.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "addvl x20, x20, #5\n"
- ".inst 0xc13c1ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z12.s\n"
- "ld1w { z21.s }, p0/Z, [x21]\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
- ".inst 0xa0404a80 // ld1w { z0.s-z1.s }, pn10.b/Z, [x20]\n"
- ".inst 0xc1311a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z1.s\n"
- ".inst 0xa0414a80 // ld1w { z0.s-z1.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1w { z3.s }, p0/Z, [x21]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13e1940 // fmla za.s[x8, 0], { z10.s-z13.s }, z14.s\n"
+ ".inst 0xa0414a8e // ld1w { z14.s-z15.s }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1391941 // fmla za.s[x8, 1], { z10.s-z13.s }, z9.s\n"
+ "ld1w { z14.s }, p0/Z, [x21]\n"
+ ".inst 0xa0404a88 // ld1w { z8.s-z9.s }, pn10.b/Z, [x20]\n"
"ldr x15, [%x[args], %[offsetof_Args_weights]]\n"
- ".inst 0xc1311a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z1.s\n"
- ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
- ".inst 0xa04149ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
- "addvl x15, x15, #5\n"
+ ".inst 0xc1351800 // fmla za.s[x8, 0], { z0.s-z3.s }, z5.s\n"
+ ".inst 0xc1371801 // fmla za.s[x8, 1], { z0.s-z3.s }, z7.s\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc13f1960 // fmla za.s[x8, 0], { z11.s-z14.s }, z15.s\n"
+ ".inst 0xa04149e6 // ld1w { z6.s-z7.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1391961 // fmla za.s[x8, 1], { z11.s-z14.s }, z9.s\n"
".inst 0xa14049e7 // ld1w { z7.s, z15.s }, pn10.b/Z, [x15]\n"
+ "addvl x15, x15, #5\n"
+ ".inst 0xa14049e4 // ld1w { z4.s, z12.s }, pn10.b/Z, [x15]\n"
".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x15, #4, MUL VL]\n"
"addvl x15, x15, #5\n"
"20:" // Main loop skip tail
"cbz x16, 21f\n" // Skip remainder inputs
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x14]\n"
"add x20, x14, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x13, x13, #0x1\n"
+ "ld1w { z25.s }, p0/Z, [x14]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ "ld1w { z12.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z13.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- ".inst 0xc1391a00 // fmla za.s[x8, 0], { z16.s-z19.s }, z9.s\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0xc1381b20 // fmla za.s[x8, 0], { z25.s-z28.s }, z8.s\n"
+ "ld1w { z14.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1361ae0 // fmla za.s[x8, 0], { z23.s-z26.s }, z6.s\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- ".inst 0xc13a1a01 // fmla za.s[x8, 1], { z16.s-z19.s }, z10.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1361b21 // fmla za.s[x8, 1], { z25.s-z28.s }, z6.s\n"
+ ".inst 0xc1371b22 // fmla za.s[x8, 2], { z25.s-z28.s }, z7.s\n"
+ ".inst 0xa14049e1 // ld1w { z1.s, z9.s }, pn10.b/Z, [x15]\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1341a02 // fmla za.s[x8, 2], { z16.s-z19.s }, z4.s\n"
- ".inst 0xa04049ea // ld1w { z10.s-z11.s }, pn10.b/Z, [x15]\n"
- "sub x13, x13, #0x1\n"
- ".inst 0xa04149ee // ld1w { z14.s-z15.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- "addvl x15, x15, #5\n"
- ".inst 0xc1381a20 // fmla za.s[x8, 0], { z17.s-z20.s }, z8.s\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- ".inst 0xc1301ae1 // fmla za.s[x8, 1], { z23.s-z26.s }, z0.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1351960 // fmla za.s[x8, 0], { z11.s-z14.s }, z5.s\n"
+ ".inst 0xa14149e2 // ld1w { z2.s, z10.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ "addvl x15, x15, #5\n"
+ "ld1w { z10.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1301961 // fmla za.s[x8, 1], { z11.s-z14.s }, z0.s\n"
+ "ld1w { z15.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z9.s }, p2/Z, [x15, #4, MUL VL]\n"
- ".inst 0xc1371ae2 // fmla za.s[x8, 2], { z23.s-z26.s }, z7.s\n"
- ".inst 0xa04049e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1391b00 // fmla za.s[x8, 0], { z24.s-z27.s }, z9.s\n"
- ".inst 0xa14149e5 // ld1w { z5.s, z13.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1341962 // fmla za.s[x8, 2], { z11.s-z14.s }, z4.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa14049e3 // ld1w { z3.s, z11.s }, pn10.b/Z, [x15]\n"
+ ".inst 0xc1381b40 // fmla za.s[x8, 0], { z26.s-z29.s }, z8.s\n"
+ ".inst 0xa14149e0 // ld1w { z0.s, z8.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
"addvl x15, x15, #5\n"
- ".inst 0xc13e1a21 // fmla za.s[x8, 1], { z17.s-z20.s }, z14.s\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- ".inst 0xc13a1a22 // fmla za.s[x8, 2], { z17.s-z20.s }, z10.s\n"
- "ld1w { z8.s }, p2/Z, [x15, #4, MUL VL]\n"
- ".inst 0xc1381a40 // fmla za.s[x8, 0], { z18.s-z21.s }, z8.s\n"
- ".inst 0xc1351b01 // fmla za.s[x8, 1], { z24.s-z27.s }, z5.s\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
+ ".inst 0xc1321b41 // fmla za.s[x8, 1], { z26.s-z29.s }, z2.s\n"
+ "ld1w { z6.s }, p2/Z, [x15, #4, MUL VL]\n"
+ ".inst 0xc1311b42 // fmla za.s[x8, 2], { z26.s-z29.s }, z1.s\n"
".inst 0xa04049e8 // ld1w { z8.s-z9.s }, pn10.b/Z, [x15]\n"
- ".inst 0xc1301b02 // fmla za.s[x8, 2], { z24.s-z27.s }, z0.s\n"
- ".inst 0xa04149e0 // ld1w { z0.s-z1.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- ".inst 0xc1a3c858 // fclamp { z24.s-z27.s }, z2.s, z3.s\n"
- "st1w { z24.s }, p1, [x11]\n"
+ ".inst 0xc13a1980 // fmla za.s[x8, 0], { z12.s-z15.s }, z10.s\n"
+ ".inst 0xa04149e4 // ld1w { z4.s-z5.s }, pn10.b/Z, [x15, #0x2, MUL VL]\n"
+ ".inst 0xc1301981 // fmla za.s[x8, 1], { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1331982 // fmla za.s[x8, 2], { z12.s-z15.s }, z3.s\n"
+ ".inst 0xc1361b60 // fmla za.s[x8, 0], { z27.s-z30.s }, z6.s\n"
+ ".inst 0xc1341b61 // fmla za.s[x8, 1], { z27.s-z30.s }, z4.s\n"
+ ".inst 0xc1381b62 // fmla za.s[x8, 2], { z27.s-z30.s }, z8.s\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1b0ca24 // fclamp { z4.s-z7.s }, z17.s, z16.s\n"
+ "st1w { z4.s }, p1, [x11]\n"
"add x11, x11, x9, LSL #2\n"
- ".inst 0xc1301a41 // fmla za.s[x8, 1], { z18.s-z21.s }, z0.s\n"
- "st1w { z25.s }, p1, [x10]\n"
+ "st1w { z5.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- ".inst 0xc1381a42 // fmla za.s[x8, 2], { z18.s-z21.s }, z8.s\n"
- "add x8, x8, #0x1\n"
- "st1w { z26.s }, p1, [x27]\n"
+ "st1w { z6.s }, p1, [x27]\n"
"add x27, x27, x25, LSL #2\n"
- "st1w { z27.s }, p1, [x26]\n"
+ "st1w { z7.s }, p1, [x26]\n"
"add x26, x26, x24, LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"21:" // Tail input: End
"cbz x13, 23f\n"
"22:" // Right padding loop
".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc1a3c848 // fclamp { z8.s-z11.s }, z2.s, z3.s\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1b0ca28 // fclamp { z8.s-z11.s }, z17.s, z16.s\n"
"st1w { z8.s }, p1, [x11]\n"
"add x11, x11, x9, LSL #2\n"
- ".inst 0xc0040f84 // mova za.d[x8, #4], { z28.d-z31.d }\n"
"st1w { z9.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
"st1w { z10.s }, p1, [x27]\n"
@@ -1137,12 +1137,12 @@ void sme2_fp32_planar_5x5_s2_4rows_mla_za_impl(
"bgt 22b\n"
"23:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
"incb x20, ALL, MUL #16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x17\n"
- "whilelt p1.s, x17, x7\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
index be82e04613..412d786d8a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,99 +72,99 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
"ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"mov x20, #0x6\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x7\n"
"ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
- "ld1rw { z25.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x20, x20, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z13.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p1.s, XZR, x16\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z13.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
"ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
- "fmov z26.s, #0x0\n"
+ "fmov z4.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z26.s }, p1/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z4.s }, p1/Z, [x20, x15, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x21\n"
- "fmov z6.s, #0x0\n"
- "ld1w { z15.s }, p2/Z, [x20]\n"
+ "ldr x25, [%x[args], %[offsetof_Args_weights]]\n"
+ "fmov z1.s, #0x0\n"
+ "fmov z12.s, #0x0\n"
+ "mov x24, #0x6\n"
+ "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "fmov z15.s, #0x0\n"
+ "add x20, x17, x7\n"
+ "lsl x23, %x[ld_in_row], #0x2\n"
+ "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
+ "mov z5.d, z4.d\n"
+ "mov x8, #0x0\n"
+ "sub x24, x24, x20\n"
+ "mov x22, x25\n"
+ "incb x25\n"
+ "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ld1w { z31.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #3\n"
+ "sub x20, x14, #0x1\n"
+ "ld1w { z21.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #3\n"
+ "orr x21, x20, %x[ld_in_col], LSL #18\n"
+ "ld1w { z10.s }, p2/Z, [x22]\n"
+ "mov x20, x25\n"
+ "incb x25\n"
+ ".inst 0x648aabe1 // bfcvtnt z1.h, p2/M, z31.s\n"
+ ".inst 0x658aabe2 // bfcvt z2.h, p2/M, z31.s\n"
+ "ld1w { z8.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #3\n"
- "incb x21\n"
+ ".inst 0x658aaaa3 // bfcvt z3.h, p2/M, z21.s\n"
+ "orr x21, x16, x21, LSL #20\n"
+ "madd x23, x23, x17, x13\n"
+ ".inst 0x658aa94e // bfcvt z14.h, p2/M, z10.s\n"
+ "lsl x21, x21, #0x2\n"
+ ".inst 0x658aa907 // bfcvt z7.h, p2/M, z8.s\n"
+ ".inst 0x648aa90c // bfcvtnt z12.h, p2/M, z8.s\n"
+ ".inst 0x648aaaa2 // bfcvtnt z2.h, p2/M, z21.s\n"
"ld1w { z29.s }, p2/Z, [x20]\n"
- ".inst 0x648aa9e6 // bfcvtnt z6.h, p2/M, z15.s\n"
"incb x20, ALL, MUL #3\n"
- "ld1w { z30.s }, p2/Z, [x20]\n"
- "mov x20, x21\n"
- ".inst 0x658aa9e5 // bfcvt z5.h, p2/M, z15.s\n"
- "ld1w { z14.s }, p2/Z, [x20]\n"
- ".inst 0x658aaba8 // bfcvt z8.h, p2/M, z29.s\n"
- "fmov z11.s, #0x0\n"
+ ".inst 0x648aa943 // bfcvtnt z3.h, p2/M, z10.s\n"
+ "ld1w { z20.s }, p2/Z, [x20]\n"
+ "mov x20, x25\n"
+ "ld1w { z17.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #3\n"
- ".inst 0x658aa9ca // bfcvt z10.h, p2/M, z14.s\n"
- ".inst 0x648aaba5 // bfcvtnt z5.h, p2/M, z29.s\n"
- "incb x21\n"
- "ld1w { z24.s }, p2/Z, [x20]\n"
- "incb x20, ALL, MUL #3\n"
- ".inst 0x648aabc8 // bfcvtnt z8.h, p2/M, z30.s\n"
- ".inst 0x658aabcc // bfcvt z12.h, p2/M, z30.s\n"
- "ld1w { z28.s }, p2/Z, [x20]\n"
- "mov x21, x21\n"
- ".inst 0x648aa9cb // bfcvtnt z11.h, p2/M, z14.s\n"
- "ld1w { z20.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
- ".inst 0x648aab0a // bfcvtnt z10.h, p2/M, z24.s\n"
- ".inst 0x658aab09 // bfcvt z9.h, p2/M, z24.s\n"
- "ld1w { z15.s }, p2/Z, [x21]\n"
- "ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
- "incb x21, ALL, MUL #3\n"
- "fmov z14.s, #0x0\n"
- ".inst 0x658aaa81 // bfcvt z1.h, p2/M, z20.s\n"
- "ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
- ".inst 0x658aa9e7 // bfcvt z7.h, p2/M, z15.s\n"
- ".inst 0x648aab89 // bfcvtnt z9.h, p2/M, z28.s\n"
- "sub x20, x14, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- ".inst 0x658aab84 // bfcvt z4.h, p2/M, z28.s\n"
- "ld1w { z29.s }, p2/Z, [x21]\n"
- "orr x23, x16, x23, LSL #20\n"
- "mov x22, #0x6\n"
- "add x21, x17, x7\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "mov z27.d, z26.d\n"
- ".inst 0x648aaa8e // bfcvtnt z14.h, p2/M, z20.s\n"
- ".inst 0x648aa9e1 // bfcvtnt z1.h, p2/M, z15.s\n"
".inst 0x648aaba7 // bfcvtnt z7.h, p2/M, z29.s\n"
- "mov x8, #0x0\n"
- "ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
- ".inst 0x658aaba2 // bfcvt z2.h, p2/M, z29.s\n"
- "lsl x23, x23, #0x2\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x17, x13\n"
+ ".inst 0x658aaba0 // bfcvt z0.h, p2/M, z29.s\n"
+ "ld1w { z25.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ ".inst 0x658aaa86 // bfcvt z6.h, p2/M, z20.s\n"
+ ".inst 0x658aaa29 // bfcvt z9.h, p2/M, z17.s\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ ".inst 0x658aab28 // bfcvt z8.h, p2/M, z25.s\n"
+ ".inst 0x648aaa80 // bfcvtnt z0.h, p2/M, z20.s\n"
+ "ld1w { z10.s }, p2/Z, [x20]\n"
+ ".inst 0x648aab29 // bfcvtnt z9.h, p2/M, z25.s\n"
+ ".inst 0x648aa948 // bfcvtnt z8.h, p2/M, z10.s\n"
+ ".inst 0x658aa94a // bfcvt z10.h, p2/M, z10.s\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x24, x24, #0x1\n"
+ ".inst 0xf8b54afc // rprfm pldstrm, x21, [x23]\n"
+ "add x23, x23, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x22, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x13, x17, x20, x13\n"
- ".inst 0xc0040b40 // mova za.d[x8, #0], { z26.d-z27.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040b41 // mova za.d[x8, #1], { z26.d-z27.d }\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ ".inst 0xc0040880 // mova za.d[x8, #0], { z4.d-z5.d }\n"
"mov x10, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x13, x17, x21, x13\n"
+ ".inst 0xc0040881 // mova za.d[x8, #1], { z4.d-z5.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040882 // mova za.d[x8, #2], { z4.d-z5.d }\n"
"ldp x9, x28, [x22], #0x10\n"
- ".inst 0xc0040b42 // mova za.d[x8, #2], { z26.d-z27.d }\n"
+ ".inst 0xc0040883 // mova za.d[x8, #3], { z4.d-z5.d }\n"
"ldp x27, x26, [x20], #0x10\n"
- ".inst 0xc0040b43 // mova za.d[x8, #3], { z26.d-z27.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040b44 // mova za.d[x8, #4], { z26.d-z27.d }\n"
+ ".inst 0xc0040884 // mova za.d[x8, #4], { z4.d-z5.d }\n"
+ ".inst 0xc0040885 // mova za.d[x8, #5], { z4.d-z5.d }\n"
"ldp x25, x24, [x22], #0x10\n"
- ".inst 0xc0040b45 // mova za.d[x8, #5], { z26.d-z27.d }\n"
"ldp x23, x22, [x20], #0x10\n"
"cbz x21, 5f\n"
"cmp x21, x10\n"
@@ -172,19 +172,19 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x10, x10, x20\n"
"cbz x21, 5f\n"
- ".inst 0xc0060814 // mova { z20.d-z21.d }, za.d[x8, #0]\n"
+ ".inst 0xc006081c // mova { z28.d-z29.d }, za.d[x8, #0]\n"
"sub x11, x11, x21\n"
- ".inst 0xc0060836 // mova { z22.d-z23.d }, za.d[x8, #1]\n"
- ".inst 0xc1adcb34 // fclamp { z20.s-z23.s }, z25.s, z13.s\n"
+ ".inst 0xc006083e // mova { z30.d-z31.d }, za.d[x8, #1]\n"
+ ".inst 0xc1adcafc // fclamp { z28.s-z31.s }, z23.s, z13.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
- "st1w { z20.s }, p1, [x9]\n"
+ "st1w { z28.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z22.s }, p1, [x28]\n"
+ "st1w { z30.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- "st1w { z21.s }, p1, [x25]\n"
+ "st1w { z29.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "st1w { z23.s }, p1, [x24]\n"
+ "st1w { z31.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
@@ -196,124 +196,124 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x13]\n"
- ".inst 0x658aaa3e // bfcvt z30.h, p2/M, z17.s\n"
+ "ld1w { z21.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab9e // bfcvtnt z30.h, p2/M, z28.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa1f // bfcvt z31.h, p2/M, z16.s\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa9ff // bfcvtnt z31.h, p2/M, z15.s\n"
- ".inst 0xc12513d0 // bfdot za.s[x8, 0], { z30.h-z31.h }, z5.h\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ ".inst 0x658aaabb // bfcvt z27.h, p2/M, z21.s\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa00 // bfcvt z0.h, p2/M, z16.s\n"
- ".inst 0xc12613d1 // bfdot za.s[x8, 1], { z30.h-z31.h }, z6.h\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
- ".inst 0x648aa9e0 // bfcvtnt z0.h, p2/M, z15.s\n"
- ".inst 0xc12c13f0 // bfdot za.s[x8, 0], { z31.h-z0.h }, z12.h\n"
- ".inst 0xc12813f1 // bfdot za.s[x8, 1], { z31.h-z0.h }, z8.h\n"
+ ".inst 0x658aab1c // bfcvt z28.h, p2/M, z24.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
+ ".inst 0x648aaa7b // bfcvtnt z27.h, p2/M, z19.s\n"
+ ".inst 0x658aaadd // bfcvt z29.h, p2/M, z22.s\n"
+ ".inst 0x648aaa3c // bfcvtnt z28.h, p2/M, z17.s\n"
+ ".inst 0x648aaa1d // bfcvtnt z29.h, p2/M, z16.s\n"
+ ".inst 0xc1221370 // bfdot za.s[x8, 0], { z27.h-z28.h }, z2.h\n"
+ ".inst 0xc1211371 // bfdot za.s[x8, 1], { z27.h-z28.h }, z1.h\n"
+ ".inst 0xc12e1390 // bfdot za.s[x8, 0], { z28.h-z29.h }, z14.h\n"
+ ".inst 0xc1231391 // bfdot za.s[x8, 1], { z28.h-z29.h }, z3.h\n"
"7:" // Unpadded: 1 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z31.s }, p1/Z, [x13]\n"
- ".inst 0x658aabef // bfcvt z15.h, p2/M, z31.s\n"
+ "ld1w { z24.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z30.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
- ".inst 0xc12a11f0 // bfdot za.s[x8, 0], { z15.h-z16.h }, z10.h\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ ".inst 0x658aab13 // bfcvt z19.h, p2/M, z24.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaad1 // bfcvt z17.h, p2/M, z22.s\n"
- ".inst 0xc12b11f1 // bfdot za.s[x8, 1], { z15.h-z16.h }, z11.h\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
- ".inst 0x648aaa51 // bfcvtnt z17.h, p2/M, z18.s\n"
- ".inst 0xc12511f2 // bfdot za.s[x8, 2], { z15.h-z16.h }, z5.h\n"
- ".inst 0xc12611f3 // bfdot za.s[x8, 3], { z15.h-z16.h }, z6.h\n"
- ".inst 0xc1241210 // bfdot za.s[x8, 0], { z16.h-z17.h }, z4.h\n"
- ".inst 0xc1291211 // bfdot za.s[x8, 1], { z16.h-z17.h }, z9.h\n"
- ".inst 0xc12c1212 // bfdot za.s[x8, 2], { z16.h-z17.h }, z12.h\n"
- ".inst 0xc1281213 // bfdot za.s[x8, 3], { z16.h-z17.h }, z8.h\n"
+ ".inst 0x658aaa94 // bfcvt z20.h, p2/M, z20.s\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
+ ".inst 0x648aabd3 // bfcvtnt z19.h, p2/M, z30.s\n"
+ ".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
+ ".inst 0x648aaa54 // bfcvtnt z20.h, p2/M, z18.s\n"
+ ".inst 0x648aaa35 // bfcvtnt z21.h, p2/M, z17.s\n"
+ ".inst 0xc1271270 // bfdot za.s[x8, 0], { z19.h-z20.h }, z7.h\n"
+ ".inst 0xc12c1271 // bfdot za.s[x8, 1], { z19.h-z20.h }, z12.h\n"
+ ".inst 0xc1221272 // bfdot za.s[x8, 2], { z19.h-z20.h }, z2.h\n"
+ ".inst 0xc1211273 // bfdot za.s[x8, 3], { z19.h-z20.h }, z1.h\n"
+ ".inst 0xc1261290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z6.h\n"
+ ".inst 0xc1201291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z0.h\n"
+ ".inst 0xc12e1292 // bfdot za.s[x8, 2], { z20.h-z21.h }, z14.h\n"
+ ".inst 0xc1231293 // bfdot za.s[x8, 3], { z20.h-z21.h }, z3.h\n"
"8:" // Unpadded: 0 priming loads
"cbz x14, 16f\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x13]\n"
- ".inst 0x658aaa16 // bfcvt z22.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p1/Z, [x13]\n"
"sub x14, x14, #0x1\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"sub x11, x11, #0x1\n"
- ".inst 0x648aaa16 // bfcvtnt z22.h, p2/M, z16.s\n"
- "ld1w { z0.s }, p1/Z, [x20]\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa817 // bfcvt z23.h, p2/M, z0.s\n"
"cmp x14, x11\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa3c // bfcvt z28.h, p2/M, z17.s\n"
"csel x21, x14, x11, LT\n"
- ".inst 0x648aab17 // bfcvtnt z23.h, p2/M, z24.s\n"
- "ld1w { z0.s }, p1/Z, [x20]\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa818 // bfcvt z24.h, p2/M, z0.s\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0x648aaa18 // bfcvtnt z24.h, p2/M, z16.s\n"
+ ".inst 0x658aaabd // bfcvt z29.h, p2/M, z21.s\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"sub x11, x11, x21\n"
+ ".inst 0x658aaa9e // bfcvt z30.h, p2/M, z20.s\n"
+ ".inst 0x648aaa1c // bfcvtnt z28.h, p2/M, z16.s\n"
+ ".inst 0x648aab7d // bfcvtnt z29.h, p2/M, z27.s\n"
+ ".inst 0x648aaa3e // bfcvtnt z30.h, p2/M, z17.s\n"
"cbz x21, 15f\n"
"9:" // Unpadded: Main loop
- ".inst 0xc12112d0 // bfdot za.s[x8, 0], { z22.h-z23.h }, z1.h\n"
+ ".inst 0xc1291390 // bfdot za.s[x8, 0], { z28.h-z29.h }, z9.h\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z0.s }, p1/Z, [x13]\n"
+ "ld1w { z19.s }, p1/Z, [x13]\n"
"subs x21, x21, #0x1\n"
- ".inst 0xc12e12d1 // bfdot za.s[x8, 1], { z22.h-z23.h }, z14.h\n"
- "ld1w { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc12f1391 // bfdot za.s[x8, 1], { z28.h-z29.h }, z15.h\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12212f0 // bfdot za.s[x8, 0], { z23.h-z24.h }, z2.h\n"
- ".inst 0xc12712f1 // bfdot za.s[x8, 1], { z23.h-z24.h }, z7.h\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
+ ".inst 0xc1271392 // bfdot za.s[x8, 2], { z28.h-z29.h }, z7.h\n"
+ ".inst 0xc12c1393 // bfdot za.s[x8, 3], { z28.h-z29.h }, z12.h\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12a12d2 // bfdot za.s[x8, 2], { z22.h-z23.h }, z10.h\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
+ ".inst 0xc12a13b0 // bfdot za.s[x8, 0], { z29.h-z30.h }, z10.h\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12b12d3 // bfdot za.s[x8, 3], { z22.h-z23.h }, z11.h\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
- ".inst 0xc12512d4 // bfdot za.s[x8, 4], { z22.h-z23.h }, z5.h\n"
- ".inst 0xc12612d5 // bfdot za.s[x8, 5], { z22.h-z23.h }, z6.h\n"
- ".inst 0x658aa816 // bfcvt z22.h, p2/M, z0.s\n"
- ".inst 0x648aaa96 // bfcvtnt z22.h, p2/M, z20.s\n"
- ".inst 0xc12412f2 // bfdot za.s[x8, 2], { z23.h-z24.h }, z4.h\n"
- ".inst 0xc12912f3 // bfdot za.s[x8, 3], { z23.h-z24.h }, z9.h\n"
- ".inst 0xc12c12f4 // bfdot za.s[x8, 4], { z23.h-z24.h }, z12.h\n"
- ".inst 0xc12812f5 // bfdot za.s[x8, 5], { z23.h-z24.h }, z8.h\n"
- ".inst 0x658aaa77 // bfcvt z23.h, p2/M, z19.s\n"
- ".inst 0x658aaa38 // bfcvt z24.h, p2/M, z17.s\n"
+ ".inst 0xc12813b1 // bfdot za.s[x8, 1], { z29.h-z30.h }, z8.h\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc1221394 // bfdot za.s[x8, 4], { z28.h-z29.h }, z2.h\n"
+ ".inst 0xc1211395 // bfdot za.s[x8, 5], { z28.h-z29.h }, z1.h\n"
+ ".inst 0x658aaa7c // bfcvt z28.h, p2/M, z19.s\n"
+ ".inst 0xc12613b2 // bfdot za.s[x8, 2], { z29.h-z30.h }, z6.h\n"
+ ".inst 0xc12013b3 // bfdot za.s[x8, 3], { z29.h-z30.h }, z0.h\n"
+ ".inst 0xc12e13b4 // bfdot za.s[x8, 4], { z29.h-z30.h }, z14.h\n"
+ ".inst 0xc12313b5 // bfdot za.s[x8, 5], { z29.h-z30.h }, z3.h\n"
+ ".inst 0x658aaa3d // bfcvt z29.h, p2/M, z17.s\n"
+ ".inst 0x658aaa1e // bfcvt z30.h, p2/M, z16.s\n"
".inst 0xc0060810 // mova { z16.d-z17.d }, za.d[x8, #0]\n"
- ".inst 0x648aaa57 // bfcvtnt z23.h, p2/M, z18.s\n"
- ".inst 0x648aab98 // bfcvtnt z24.h, p2/M, z28.s\n"
+ ".inst 0x648aaa5c // bfcvtnt z28.h, p2/M, z18.s\n"
".inst 0xc0060832 // mova { z18.d-z19.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
- ".inst 0xc1adcb30 // fclamp { z16.s-z19.s }, z25.s, z13.s\n"
+ ".inst 0xc0040884 // mova za.d[x8, #4], { z4.d-z5.d }\n"
+ ".inst 0xc0040885 // mova za.d[x8, #5], { z4.d-z5.d }\n"
+ ".inst 0x648aaabd // bfcvtnt z29.h, p2/M, z21.s\n"
+ ".inst 0x648aaa9e // bfcvtnt z30.h, p2/M, z20.s\n"
+ ".inst 0xc1adcaf0 // fclamp { z16.s-z19.s }, z23.s, z13.s\n"
"st1w { z16.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
"st1w { z18.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- ".inst 0xc0040b44 // mova za.d[x8, #4], { z26.d-z27.d }\n"
"st1w { z17.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040b45 // mova za.d[x8, #5], { z26.d-z27.d }\n"
"st1w { z19.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 9b\n"
@@ -325,186 +325,186 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x13]\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z17.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa33 // bfcvt z19.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1251290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z5.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaaf6 // bfcvt z22.h, p2/M, z23.s\n"
+ ".inst 0x658aaa34 // bfcvt z20.h, p2/M, z17.s\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
+ ".inst 0x658aabf5 // bfcvt z21.h, p2/M, z31.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa16 // bfcvtnt z22.h, p2/M, z16.s\n"
- ".inst 0xc1261291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z6.h\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc12c12b0 // bfdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
- ".inst 0xc12812b1 // bfdot za.s[x8, 1], { z21.h-z22.h }, z8.h\n"
+ ".inst 0xc1221270 // bfdot za.s[x8, 0], { z19.h-z20.h }, z2.h\n"
+ ".inst 0xc1211271 // bfdot za.s[x8, 1], { z19.h-z20.h }, z1.h\n"
+ ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
+ ".inst 0xc12e1290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z14.h\n"
+ ".inst 0xc1231291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z3.h\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x13]\n"
- ".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z17.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa34 // bfcvt z20.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12a1270 // bfdot za.s[x8, 0], { z19.h-z20.h }, z10.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9f5 // bfcvt z21.h, p2/M, z15.s\n"
+ ".inst 0x658aaa35 // bfcvt z21.h, p2/M, z17.s\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
- ".inst 0xc12b1271 // bfdot za.s[x8, 1], { z19.h-z20.h }, z11.h\n"
- ".inst 0xc1251272 // bfdot za.s[x8, 2], { z19.h-z20.h }, z5.h\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1261273 // bfdot za.s[x8, 3], { z19.h-z20.h }, z6.h\n"
- ".inst 0xc1241290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z4.h\n"
- ".inst 0xc1291291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z9.h\n"
- ".inst 0xc12c1292 // bfdot za.s[x8, 2], { z20.h-z21.h }, z12.h\n"
- ".inst 0xc1281293 // bfdot za.s[x8, 3], { z20.h-z21.h }, z8.h\n"
+ ".inst 0x658aab36 // bfcvt z22.h, p2/M, z25.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0xc1271290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z7.h\n"
+ ".inst 0xc12c1291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z12.h\n"
+ ".inst 0x648aaa16 // bfcvtnt z22.h, p2/M, z16.s\n"
+ ".inst 0xc1221292 // bfdot za.s[x8, 2], { z20.h-z21.h }, z2.h\n"
+ ".inst 0xc1211293 // bfdot za.s[x8, 3], { z20.h-z21.h }, z1.h\n"
+ ".inst 0xc12612b0 // bfdot za.s[x8, 0], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xc12012b1 // bfdot za.s[x8, 1], { z21.h-z22.h }, z0.h\n"
+ ".inst 0xc12e12b2 // bfdot za.s[x8, 2], { z21.h-z22.h }, z14.h\n"
+ ".inst 0xc12312b3 // bfdot za.s[x8, 3], { z21.h-z22.h }, z3.h\n"
"13:" // Padded: 0 priming loads
"cbz x14, 16f\n"
"mov x12, #0x0\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x14, x14, #0x1\n"
+ "sub x11, x11, #0x1\n"
+ "cmp x14, x11\n"
"ld1w { z16.s }, p0/Z, [x13]\n"
- ".inst 0x658aaa16 // bfcvt z22.h, p2/M, z16.s\n"
- "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa16 // bfcvtnt z22.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x21, x14, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x21\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa17 // bfcvt z23.h, p2/M, z16.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0x658aaa1c // bfcvt z28.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa17 // bfcvtnt z23.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa3c // bfcvtnt z28.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa18 // bfcvt z24.h, p2/M, z16.s\n"
+ ".inst 0x658aaa1d // bfcvt z29.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa3d // bfcvtnt z29.h, p2/M, z17.s\n"
+ ".inst 0x658aaa1e // bfcvt z30.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- "sub x14, x14, #0x1\n"
- ".inst 0x648aaa18 // bfcvtnt z24.h, p2/M, z16.s\n"
- "sub x11, x11, #0x1\n"
- "cmp x14, x11\n"
- "csel x21, x14, x11, LT\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- "sub x11, x11, x21\n"
+ ".inst 0x648aaa1e // bfcvtnt z30.h, p2/M, z16.s\n"
"cbz x21, 15f\n"
"14:" // Padded: Main loop
"mov x12, #0x0\n"
- ".inst 0xc12112d0 // bfdot za.s[x8, 0], { z22.h-z23.h }, z1.h\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z20.s }, p0/Z, [x13]\n"
- ".inst 0xc12e12d1 // bfdot za.s[x8, 1], { z22.h-z23.h }, z14.h\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1291390 // bfdot za.s[x8, 0], { z28.h-z29.h }, z9.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc12f1391 // bfdot za.s[x8, 1], { z28.h-z29.h }, z15.h\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xc1271392 // bfdot za.s[x8, 2], { z28.h-z29.h }, z7.h\n"
+ ".inst 0xc12c1393 // bfdot za.s[x8, 3], { z28.h-z29.h }, z12.h\n"
+ "ld1w { z17.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc12a13b0 // bfdot za.s[x8, 0], { z29.h-z30.h }, z10.h\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc12813b1 // bfdot za.s[x8, 1], { z29.h-z30.h }, z8.h\n"
+ ".inst 0xc1221394 // bfdot za.s[x8, 4], { z28.h-z29.h }, z2.h\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- ".inst 0xc12212f0 // bfdot za.s[x8, 0], { z23.h-z24.h }, z2.h\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1211395 // bfdot za.s[x8, 5], { z28.h-z29.h }, z1.h\n"
+ ".inst 0xc12613b2 // bfdot za.s[x8, 2], { z29.h-z30.h }, z6.h\n"
+ ".inst 0x658aaa3c // bfcvt z28.h, p2/M, z17.s\n"
+ ".inst 0xc12013b3 // bfdot za.s[x8, 3], { z29.h-z30.h }, z0.h\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc12712f1 // bfdot za.s[x8, 1], { z23.h-z24.h }, z7.h\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- "mov x12, #0x4\n"
- ".inst 0xc12a12d2 // bfdot za.s[x8, 2], { z22.h-z23.h }, z10.h\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12b12d3 // bfdot za.s[x8, 3], { z22.h-z23.h }, z11.h\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc12e13b4 // bfdot za.s[x8, 4], { z29.h-z30.h }, z14.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc12313b5 // bfdot za.s[x8, 5], { z29.h-z30.h }, z3.h\n"
+ ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+ ".inst 0x648aaa1c // bfcvtnt z28.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12512d4 // bfdot za.s[x8, 4], { z22.h-z23.h }, z5.h\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "subs x21, x21, #0x1\n"
- ".inst 0xc12612d5 // bfdot za.s[x8, 5], { z22.h-z23.h }, z6.h\n"
- ".inst 0x658aaa96 // bfcvt z22.h, p2/M, z20.s\n"
- ".inst 0x648aaa76 // bfcvtnt z22.h, p2/M, z19.s\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc12412f2 // bfdot za.s[x8, 2], { z23.h-z24.h }, z4.h\n"
- ".inst 0xc12912f3 // bfdot za.s[x8, 3], { z23.h-z24.h }, z9.h\n"
- ".inst 0xc12c12f4 // bfdot za.s[x8, 4], { z23.h-z24.h }, z12.h\n"
- ".inst 0xc12812f5 // bfdot za.s[x8, 5], { z23.h-z24.h }, z8.h\n"
- ".inst 0x658aaa37 // bfcvt z23.h, p2/M, z17.s\n"
- ".inst 0x658aaa18 // bfcvt z24.h, p2/M, z16.s\n"
- ".inst 0xc0060810 // mova { z16.d-z17.d }, za.d[x8, #0]\n"
- ".inst 0x648aaa57 // bfcvtnt z23.h, p2/M, z18.s\n"
- ".inst 0x648aa9f8 // bfcvtnt z24.h, p2/M, z15.s\n"
- ".inst 0xc0060832 // mova { z18.d-z19.d }, za.d[x8, #1]\n"
+ ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
+ ".inst 0x658aaa3d // bfcvt z29.h, p2/M, z17.s\n"
"add x8, x8, #0x2\n"
- ".inst 0xc1adcb30 // fclamp { z16.s-z19.s }, z25.s, z13.s\n"
- "st1w { z16.s }, p1, [x9]\n"
+ ".inst 0xc0040884 // mova za.d[x8, #4], { z4.d-z5.d }\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0040885 // mova za.d[x8, #5], { z4.d-z5.d }\n"
+ ".inst 0xc1adcaf8 // fclamp { z24.s-z27.s }, z23.s, z13.s\n"
+ ".inst 0x648aaa1d // bfcvtnt z29.h, p2/M, z16.s\n"
+ ".inst 0x658aabfe // bfcvt z30.h, p2/M, z31.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
+ "st1w { z24.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z18.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- ".inst 0xc0040b44 // mova za.d[x8, #4], { z26.d-z27.d }\n"
- "st1w { z17.s }, p1, [x25]\n"
+ "st1w { z25.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040b45 // mova za.d[x8, #5], { z26.d-z27.d }\n"
- "st1w { z19.s }, p1, [x24]\n"
+ ".inst 0x648aaa1e // bfcvtnt z30.h, p2/M, z16.s\n"
+ "st1w { z27.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 14b\n"
"15:" // Main loop tail
- ".inst 0xc12112d0 // bfdot za.s[x8, 0], { z22.h-z23.h }, z1.h\n"
- ".inst 0xc12e12d1 // bfdot za.s[x8, 1], { z22.h-z23.h }, z14.h\n"
- ".inst 0xc12212f0 // bfdot za.s[x8, 0], { z23.h-z24.h }, z2.h\n"
- ".inst 0xc12712f1 // bfdot za.s[x8, 1], { z23.h-z24.h }, z7.h\n"
- ".inst 0xc12a12d2 // bfdot za.s[x8, 2], { z22.h-z23.h }, z10.h\n"
- ".inst 0xc12b12d3 // bfdot za.s[x8, 3], { z22.h-z23.h }, z11.h\n"
- ".inst 0xc12512d4 // bfdot za.s[x8, 4], { z22.h-z23.h }, z5.h\n"
- ".inst 0xc12612d5 // bfdot za.s[x8, 5], { z22.h-z23.h }, z6.h\n"
- ".inst 0xc0060810 // mova { z16.d-z17.d }, za.d[x8, #0]\n"
- ".inst 0xc0060832 // mova { z18.d-z19.d }, za.d[x8, #1]\n"
- ".inst 0xc1adcb30 // fclamp { z16.s-z19.s }, z25.s, z13.s\n"
- "st1w { z16.s }, p1, [x9]\n"
+ ".inst 0xc1291390 // bfdot za.s[x8, 0], { z28.h-z29.h }, z9.h\n"
+ ".inst 0xc12f1391 // bfdot za.s[x8, 1], { z28.h-z29.h }, z15.h\n"
+ ".inst 0xc1271392 // bfdot za.s[x8, 2], { z28.h-z29.h }, z7.h\n"
+ ".inst 0xc12c1393 // bfdot za.s[x8, 3], { z28.h-z29.h }, z12.h\n"
+ ".inst 0xc12a13b0 // bfdot za.s[x8, 0], { z29.h-z30.h }, z10.h\n"
+ ".inst 0xc12813b1 // bfdot za.s[x8, 1], { z29.h-z30.h }, z8.h\n"
+ ".inst 0xc1221394 // bfdot za.s[x8, 4], { z28.h-z29.h }, z2.h\n"
+ ".inst 0xc1211395 // bfdot za.s[x8, 5], { z28.h-z29.h }, z1.h\n"
+ ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+ ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
+ ".inst 0xc12613b2 // bfdot za.s[x8, 2], { z29.h-z30.h }, z6.h\n"
+ ".inst 0xc12013b3 // bfdot za.s[x8, 3], { z29.h-z30.h }, z0.h\n"
+ ".inst 0xc12e13b4 // bfdot za.s[x8, 4], { z29.h-z30.h }, z14.h\n"
+ ".inst 0xc12313b5 // bfdot za.s[x8, 5], { z29.h-z30.h }, z3.h\n"
+ "add x8, x8, #0x2\n"
+ ".inst 0xc1adcaf8 // fclamp { z24.s-z27.s }, z23.s, z13.s\n"
+ ".inst 0xc0040884 // mova za.d[x8, #4], { z4.d-z5.d }\n"
+ ".inst 0xc0040885 // mova za.d[x8, #5], { z4.d-z5.d }\n"
+ "st1w { z24.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc12412f2 // bfdot za.s[x8, 2], { z23.h-z24.h }, z4.h\n"
- "st1w { z18.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- ".inst 0xc12912f3 // bfdot za.s[x8, 3], { z23.h-z24.h }, z9.h\n"
- "st1w { z17.s }, p1, [x25]\n"
+ "st1w { z25.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- ".inst 0xc12c12f4 // bfdot za.s[x8, 4], { z23.h-z24.h }, z12.h\n"
- "st1w { z19.s }, p1, [x24]\n"
+ "st1w { z27.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
- ".inst 0xc12812f5 // bfdot za.s[x8, 5], { z23.h-z24.h }, z8.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xc0040b44 // mova za.d[x8, #4], { z26.d-z27.d }\n"
- ".inst 0xc0040b45 // mova za.d[x8, #5], { z26.d-z27.d }\n"
"16:" // Main loop skip tail
"cbz x11, 18f\n"
"17:" // Right padding loop
@@ -512,25 +512,25 @@ void sme2_fp32bf16fp32_planar_3x3_s1_4rows_dot_za_impl(
"subs x11, x11, #0x1\n"
".inst 0xc006083e // mova { z30.d-z31.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
- ".inst 0xc1adcb3c // fclamp { z28.s-z31.s }, z25.s, z13.s\n"
+ ".inst 0xc0040884 // mova za.d[x8, #4], { z4.d-z5.d }\n"
+ ".inst 0xc0040885 // mova za.d[x8, #5], { z4.d-z5.d }\n"
+ ".inst 0xc1adcafc // fclamp { z28.s-z31.s }, z23.s, z13.s\n"
"st1w { z28.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
"st1w { z30.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
- ".inst 0xc0040b44 // mova za.d[x8, #4], { z26.d-z27.d }\n"
"st1w { z29.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- ".inst 0xc0040b45 // mova za.d[x8, #5], { z26.d-z27.d }\n"
"st1w { z31.s }, p1, [x24]\n"
"add x24, x24, x22, LSL #2\n"
"bgt 17b\n"
"18:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "whilelt p1.s, x15, x16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x15\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x15, x16\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
index a3b9ca402a..7298a88814 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,85 +72,85 @@ void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
"ldr x7, [%x[args], %[offsetof_Args_pad_bottom]]\n"
"mov x20, #0x9\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x7\n"
"ldr x17, [%x[args], %[offsetof_Args_pad_top]]\n"
"ptrue p2.b\n"
- "ld1rw { z4.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x16, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rw { z24.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x20, x20, x7\n"
+ "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z13.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p1.s, XZR, x16\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z1.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
"whilelt p8.s, XZR, x17\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ldr x15, [%x[args], %[offsetof_Args_current_channel]]\n"
"1:" // Channel loop
"ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
- "fmov z24.s, #0x0\n"
+ "fmov z20.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z24.s }, p1/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x15, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x21\n"
- "ld1w { z18.s }, p2/Z, [x20]\n"
- "incb x20, ALL, MUL #3\n"
- "incb x21\n"
- "ld1w { z23.s }, p2/Z, [x20]\n"
- "incb x20, ALL, MUL #3\n"
- ".inst 0x658aaa4e // bfcvt z14.h, p2/M, z18.s\n"
- "ld1w { z6.s }, p2/Z, [x20]\n"
- "mov x20, x21\n"
- ".inst 0x648aaaee // bfcvtnt z14.h, p2/M, z23.s\n"
- "incb x21\n"
- "ld1w { z28.s }, p2/Z, [x20]\n"
- "incb x20, ALL, MUL #3\n"
- ".inst 0x658aa8c3 // bfcvt z3.h, p2/M, z6.s\n"
- ".inst 0x658aab88 // bfcvt z8.h, p2/M, z28.s\n"
- "ld1w { z10.s }, p2/Z, [x20]\n"
- "incb x20, ALL, MUL #3\n"
+ "ldr x25, [%x[args], %[offsetof_Args_weights]]\n"
+ "mov x24, #0x9\n"
+ "add x20, x17, x7\n"
+ "mov z21.d, z20.d\n"
"ldr x14, [%x[args], %[offsetof_Args_input_cols]]\n"
- ".inst 0x648aa948 // bfcvtnt z8.h, p2/M, z10.s\n"
- "ld1w { z2.s }, p2/Z, [x20]\n"
- "mov x21, x21\n"
- ".inst 0x658aa847 // bfcvt z7.h, p2/M, z2.s\n"
+ "lsl x23, %x[ld_in_row], #0x2\n"
+ "mov z22.d, z20.d\n"
+ "mov z23.d, z20.d\n"
"ldr x13, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z9.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
- ".inst 0x658aa920 // bfcvt z0.h, p2/M, z9.s\n"
- "sub x20, x14, #0x1\n"
- "ld1w { z6.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #3\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- "mov z25.d, z24.d\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "orr x23, x16, x23, LSL #20\n"
- "mov x22, #0x9\n"
- "mov z26.d, z24.d\n"
- "add x21, x17, x7\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "mov z27.d, z24.d\n"
- ".inst 0x648aa8c0 // bfcvtnt z0.h, p2/M, z6.s\n"
- ".inst 0x658aaa26 // bfcvt z6.h, p2/M, z17.s\n"
"mov x8, #0x0\n"
+ "sub x24, x24, x20\n"
+ "mov x22, x25\n"
+ "incb x25\n"
"ldr x11, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x17, x13\n"
+ "ld1w { z5.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #3\n"
+ "sub x20, x14, #0x1\n"
+ "ld1w { z14.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #3\n"
+ "orr x21, x20, %x[ld_in_col], LSL #18\n"
+ "ld1w { z29.s }, p2/Z, [x22]\n"
+ "mov x20, x25\n"
+ "incb x25\n"
+ ".inst 0x658aa8ab // bfcvt z11.h, p2/M, z5.s\n"
+ "ld1w { z28.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ "orr x21, x16, x21, LSL #20\n"
+ "lsl x21, x21, #0x2\n"
+ "madd x23, x23, x17, x13\n"
+ ".inst 0x658aaba9 // bfcvt z9.h, p2/M, z29.s\n"
+ ".inst 0x658aab84 // bfcvt z4.h, p2/M, z28.s\n"
+ ".inst 0x648aa9cb // bfcvtnt z11.h, p2/M, z14.s\n"
+ "ld1w { z19.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ "ld1w { z12.s }, p2/Z, [x20]\n"
+ "mov x20, x25\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ ".inst 0x648aaa64 // bfcvtnt z4.h, p2/M, z19.s\n"
+ "ld1w { z3.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #3\n"
+ ".inst 0x658aa985 // bfcvt z5.h, p2/M, z12.s\n"
+ "ld1w { z15.s }, p2/Z, [x20]\n"
+ ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
+ ".inst 0x658aa9e6 // bfcvt z6.h, p2/M, z15.s\n"
+ ".inst 0x648aa86a // bfcvtnt z10.h, p2/M, z3.s\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x24, x24, #0x1\n"
+ ".inst 0xf8b54afc // rprfm pldstrm, x21, [x23]\n"
+ "add x23, x23, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x13, x17, x20, x13\n"
- ".inst 0xc0040f00 // mova za.d[x8, #0], { z24.d-z27.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040f01 // mova za.d[x8, #1], { z24.d-z27.d }\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ ".inst 0xc0040e80 // mova za.d[x8, #0], { z20.d-z23.d }\n"
"mov x22, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x13, x17, x21, x13\n"
+ ".inst 0xc0040e81 // mova za.d[x8, #1], { z20.d-z23.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
"ldp x10, x9, [x23], #0x10\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
"ldp x28, x27, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
"ldp x26, x25, [x23], #0x10\n"
"ldp x24, x23, [x20], #0x10\n"
"cbz x21, 5f\n"
@@ -162,9 +162,9 @@ void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
"and x22, x21, #0x1\n"
"add x21, x21, #0x1\n"
- ".inst 0xc1a1c890 // fclamp { z16.s-z19.s }, z4.s, z1.s\n"
"lsr x21, x21, #0x1\n"
"sub x11, x11, x21\n"
+ ".inst 0xc1adcb10 // fclamp { z16.s-z19.s }, z24.s, z13.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
"st1w { z16.s }, p1, [x10]\n"
@@ -185,176 +185,176 @@ void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
"beq 7f\n"
"6:" // Unpadded: 2 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x13]\n"
- ".inst 0x658aaa53 // bfcvt z19.h, p2/M, z18.s\n"
+ "ld1w { z25.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z12.s }, p1/Z, [x20]\n"
+ "ld1w { z8.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa993 // bfcvtnt z19.h, p2/M, z12.s\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaaf4 // bfcvt z20.h, p2/M, z23.s\n"
- "ld1w { z2.s }, p1/Z, [x20]\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa854 // bfcvtnt z20.h, p2/M, z2.s\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
+ ".inst 0x658aab3e // bfcvt z30.h, p2/M, z25.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9f5 // bfcvt z21.h, p2/M, z15.s\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z3.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaad5 // bfcvtnt z21.h, p2/M, z22.s\n"
- "ld1w { z30.s }, p1/Z, [x20]\n"
+ ".inst 0x658aaa5f // bfcvt z31.h, p2/M, z18.s\n"
+ "ld1w { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aabd6 // bfcvt z22.h, p2/M, z30.s\n"
- "ld1w { z12.s }, p1/Z, [x20]\n"
+ ".inst 0x658aaa00 // bfcvt z0.h, p2/M, z16.s\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa996 // bfcvtnt z22.h, p2/M, z12.s\n"
- ".inst 0xc13e1270 // bfdot za.s[x8, 0], { z19.h-z22.h }, z14.h\n"
- "ld1w { z31.s }, p1/Z, [x20]\n"
- ".inst 0x658aabf7 // bfcvt z23.h, p2/M, z31.s\n"
- ".inst 0xc1331290 // bfdot za.s[x8, 0], { z20.h-z23.h }, z3.h\n"
+ ".inst 0x648aa91e // bfcvtnt z30.h, p2/M, z8.s\n"
+ "ld1w { z8.s }, p1/Z, [x20]\n"
+ ".inst 0x658aa9c1 // bfcvt z1.h, p2/M, z14.s\n"
+ ".inst 0x648aab5f // bfcvtnt z31.h, p2/M, z26.s\n"
+ ".inst 0x648aa860 // bfcvtnt z0.h, p2/M, z3.s\n"
+ ".inst 0x658aa902 // bfcvt z2.h, p2/M, z8.s\n"
+ ".inst 0x648aab61 // bfcvtnt z1.h, p2/M, z27.s\n"
+ ".inst 0xc13b13d0 // bfdot za.s[x8, 0], { z30.h-z1.h }, z11.h\n"
+ ".inst 0xc13913f0 // bfdot za.s[x8, 0], { z31.h-z2.h }, z9.h\n"
"7:" // Unpadded: 1 priming loads
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x13]\n"
- ".inst 0x658aaa30 // bfcvt z16.h, p2/M, z17.s\n"
+ "ld1w { z31.s }, p1/Z, [x13]\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "ld1w { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaad0 // bfcvtnt z16.h, p2/M, z22.s\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab91 // bfcvt z17.h, p2/M, z28.s\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
+ "ld1w { z8.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa51 // bfcvtnt z17.h, p2/M, z18.s\n"
- "ld1w { z2.s }, p1/Z, [x20]\n"
+ ".inst 0x658aabf9 // bfcvt z25.h, p2/M, z31.s\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa852 // bfcvt z18.h, p2/M, z2.s\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ "ld1w { z3.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa72 // bfcvtnt z18.h, p2/M, z19.s\n"
- "ld1w { z2.s }, p1/Z, [x20]\n"
+ ".inst 0x658aaa7a // bfcvt z26.h, p2/M, z19.s\n"
+ "ld1w { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa853 // bfcvt z19.h, p2/M, z2.s\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ ".inst 0x658aabbb // bfcvt z27.h, p2/M, z29.s\n"
+ "ld1w { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaaf3 // bfcvtnt z19.h, p2/M, z23.s\n"
- ".inst 0xc1381210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z8.h\n"
- "ld1w { z10.s }, p1/Z, [x20]\n"
- ".inst 0x658aa954 // bfcvt z20.h, p2/M, z10.s\n"
- ".inst 0xc1371230 // bfdot za.s[x8, 0], { z17.h-z20.h }, z7.h\n"
+ ".inst 0x648aa819 // bfcvtnt z25.h, p2/M, z0.s\n"
+ "ld1w { z7.s }, p1/Z, [x20]\n"
+ ".inst 0x658aa99c // bfcvt z28.h, p2/M, z12.s\n"
+ ".inst 0x648aa91a // bfcvtnt z26.h, p2/M, z8.s\n"
+ ".inst 0x648aa87b // bfcvtnt z27.h, p2/M, z3.s\n"
+ ".inst 0x658aa8fd // bfcvt z29.h, p2/M, z7.s\n"
+ ".inst 0x648aaa7c // bfcvtnt z28.h, p2/M, z19.s\n"
+ ".inst 0xc1341330 // bfdot za.s[x8, 0], { z25.h-z28.h }, z4.h\n"
+ ".inst 0xc1351350 // bfdot za.s[x8, 0], { z26.h-z29.h }, z5.h\n"
"8:" // Unpadded: 0 priming loads
"cmp x14, #0x2\n"
"blt 16f\n"
"add x21, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x13]\n"
- ".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p1/Z, [x13]\n"
"sub x14, x14, #0x2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z18.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"sub x11, x11, #0x1\n"
- ".inst 0x648aaa09 // bfcvtnt z9.h, p2/M, z16.s\n"
"ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
"lsr x20, x14, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa2f // bfcvt z15.h, p2/M, z17.s\n"
"cmp x20, x11\n"
- ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
"csel x22, x20, x11, LT\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
"and x14, x14, #0x1\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ "ld1w { z29.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ ".inst 0x658aaa31 // bfcvt z17.h, p2/M, z17.s\n"
+ ".inst 0x648aaa4f // bfcvtnt z15.h, p2/M, z18.s\n"
+ "ld1w { z0.s }, p1/Z, [x21]\n"
"sub x11, x11, x22\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ ".inst 0x658aab52 // bfcvt z18.h, p2/M, z26.s\n"
+ ".inst 0x648aaa70 // bfcvtnt z16.h, p2/M, z19.s\n"
+ ".inst 0x648aabd1 // bfcvtnt z17.h, p2/M, z30.s\n"
+ ".inst 0x658aa813 // bfcvt z19.h, p2/M, z0.s\n"
+ ".inst 0x648aabb2 // bfcvtnt z18.h, p2/M, z29.s\n"
"cbz x22, 15f\n"
"9:" // Unpadded: Main loop
"add x21, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x13]\n"
- ".inst 0xc1301130 // bfdot za.s[x8, 0], { z9.h-z12.h }, z0.h\n"
+ "ld1w { z14.s }, p1/Z, [x13]\n"
+ ".inst 0xc13a11f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z10.h\n"
"add x13, x13, %x[ld_in_col], LSL #2\n"
- "ld1w { z15.s }, p1/Z, [x21]\n"
+ "ld1w { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13e1131 // bfdot za.s[x8, 1], { z9.h-z12.h }, z14.h\n"
- ".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "ld1w { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1361150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z6.h\n"
+ ".inst 0xc13b11f1 // bfdot za.s[x8, 1], { z15.h-z18.h }, z11.h\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x21]\n"
+ "ld1w { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1331151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z3.h\n"
- ".inst 0x658aaa4a // bfcvt z10.h, p2/M, z18.s\n"
- "ld1w { z30.s }, p1/Z, [x21]\n"
+ "subs x22, x22, #0x1\n"
+ "ld1w { z27.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aabcb // bfcvt z11.h, p2/M, z30.s\n"
- ".inst 0x648aa9e9 // bfcvtnt z9.h, p2/M, z15.s\n"
- "ld1w { z19.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa9dc // bfcvt z28.h, p2/M, z14.s\n"
+ "ld1w { z2.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa2a // bfcvtnt z10.h, p2/M, z17.s\n"
- ".inst 0x648aaa6b // bfcvtnt z11.h, p2/M, z19.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
+ ".inst 0xc1361210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z6.h\n"
+ "ld1w { z8.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "ld1w { z2.s }, p1/Z, [x21]\n"
+ ".inst 0xc1391211 // bfdot za.s[x8, 1], { z16.h-z19.h }, z9.h\n"
+ ".inst 0x658aab3d // bfcvt z29.h, p2/M, z25.s\n"
+ "ld1w { z7.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa84c // bfcvtnt z12.h, p2/M, z2.s\n"
+ ".inst 0x658aa85e // bfcvt z30.h, p2/M, z2.s\n"
+ "ld1w { z1.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aab5c // bfcvtnt z28.h, p2/M, z26.s\n"
+ "ld1w { z15.s }, p1/Z, [x13]\n"
+ ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- "ld1w { z29.s }, p1/Z, [x13]\n"
- ".inst 0xc1381130 // bfdot za.s[x8, 0], { z9.h-z12.h }, z8.h\n"
- ".inst 0x658aaba9 // bfcvt z9.h, p2/M, z29.s\n"
- "subs x22, x22, #0x1\n"
- "ld1w { z22.s }, p1/Z, [x20]\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0x658aa8ff // bfcvt z31.h, p2/M, z7.s\n"
+ ".inst 0x648aab7d // bfcvtnt z29.h, p2/M, z27.s\n"
+ "ld1w { z7.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x21]\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ ".inst 0x648aa91e // bfcvtnt z30.h, p2/M, z8.s\n"
+ "ld1w { z2.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1adcb10 // fclamp { z16.s-z19.s }, z24.s, z13.s\n"
+ ".inst 0x648aa83f // bfcvtnt z31.h, p2/M, z1.s\n"
+ "ld1w { z8.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aab20 // bfcvt z0.h, p2/M, z25.s\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1a1c890 // fclamp { z16.s-z19.s }, z4.s, z1.s\n"
"st1w { z16.s }, p1, [x10]\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- ".inst 0xc1371150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z7.h\n"
"add x10, x10, x28, LSL #2\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab8a // bfcvt z10.h, p2/M, z28.s\n"
+ ".inst 0xc1341390 // bfdot za.s[x8, 0], { z28.h-z31.h }, z4.h\n"
+ ".inst 0x658aa9ef // bfcvt z15.h, p2/M, z15.s\n"
+ "ld1w { z12.s }, p1/Z, [x20]\n"
"st1w { z17.s }, p1, [x9]\n"
- "ld1w { z31.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
"add x9, x9, x27, LSL #2\n"
"st1w { z18.s }, p1, [x26]\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
"add x26, x26, x24, LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
"st1w { z19.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaac9 // bfcvtnt z9.h, p2/M, z22.s\n"
- ".inst 0x648aabea // bfcvtnt z10.h, p2/M, z31.s\n"
- "ld1w { z31.s }, p1/Z, [x20]\n"
- ".inst 0x648aaa2b // bfcvtnt z11.h, p2/M, z17.s\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0x658aabed // bfcvt z13.h, p2/M, z31.s\n"
+ ".inst 0xc13513b0 // bfdot za.s[x8, 0], { z29.h-z0.h }, z5.h\n"
+ ".inst 0x658aa850 // bfcvt z16.h, p2/M, z2.s\n"
+ ".inst 0x658aa911 // bfcvt z17.h, p2/M, z8.s\n"
+ ".inst 0x658aa832 // bfcvt z18.h, p2/M, z1.s\n"
+ ".inst 0x648aa8ef // bfcvtnt z15.h, p2/M, z7.s\n"
+ ".inst 0x658aa993 // bfcvt z19.h, p2/M, z12.s\n"
+ ".inst 0x648aab50 // bfcvtnt z16.h, p2/M, z26.s\n"
+ ".inst 0x648aab71 // bfcvtnt z17.h, p2/M, z27.s\n"
+ ".inst 0x648aab32 // bfcvtnt z18.h, p2/M, z25.s\n"
"bgt 9b\n"
"b 15f\n"
"10:" // Padded
@@ -364,350 +364,350 @@ void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
"beq 12f\n"
"11:" // Padded: 2 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x13]\n"
- ".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1w { z17.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa49 // bfcvtnt z9.h, p2/M, z18.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- ".inst 0x658aa98a // bfcvt z10.h, p2/M, z12.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa3e // bfcvt z30.h, p2/M, z17.s\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z12.s }, p0/Z, [x20]\n"
- ".inst 0x648aa98a // bfcvtnt z10.h, p2/M, z12.s\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa1e // bfcvtnt z30.h, p2/M, z16.s\n"
+ "ld1w { z12.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa4b // bfcvt z11.h, p2/M, z18.s\n"
+ ".inst 0x658aabff // bfcvt z31.h, p2/M, z31.s\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ ".inst 0x648aa99f // bfcvtnt z31.h, p2/M, z12.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0x658aaba0 // bfcvt z0.h, p2/M, z29.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- "mov x12, #0x8\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa20 // bfcvtnt z0.h, p2/M, z17.s\n"
+ ".inst 0x658aaa01 // bfcvt z1.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- ".inst 0xc13e1130 // bfdot za.s[x8, 0], { z9.h-z12.h }, z14.h\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1331150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z3.h\n"
+ ".inst 0x648aaa21 // bfcvtnt z1.h, p2/M, z17.s\n"
+ ".inst 0x658aaa02 // bfcvt z2.h, p2/M, z16.s\n"
+ ".inst 0xc13b13d0 // bfdot za.s[x8, 0], { z30.h-z1.h }, z11.h\n"
+ ".inst 0xc13913f0 // bfdot za.s[x8, 0], { z31.h-z2.h }, z9.h\n"
"12:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1w { z16.s }, p0/Z, [x13]\n"
- ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa70 // bfcvtnt z16.h, p2/M, z19.s\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9b1 // bfcvt z17.h, p2/M, z13.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
"ld1w { z12.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa991 // bfcvtnt z17.h, p2/M, z12.s\n"
+ ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
+ "ld1w { z3.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z9.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa932 // bfcvt z18.h, p2/M, z9.s\n"
+ ".inst 0x658aa991 // bfcvt z17.h, p2/M, z12.s\n"
+ "ld1w { z30.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z11.s }, p0/Z, [x20]\n"
- "mov x12, #0x8\n"
- ".inst 0x648aa972 // bfcvtnt z18.h, p2/M, z11.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aa871 // bfcvtnt z17.h, p2/M, z3.s\n"
+ ".inst 0x658aabd2 // bfcvt z18.h, p2/M, z30.s\n"
+ "ld1w { z31.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- ".inst 0x658aaab3 // bfcvt z19.h, p2/M, z21.s\n"
- ".inst 0xc13811f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z8.h\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1371210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z7.h\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z1.s }, p0/Z, [x20]\n"
+ ".inst 0x648aabf2 // bfcvtnt z18.h, p2/M, z31.s\n"
+ ".inst 0x658aa833 // bfcvt z19.h, p2/M, z1.s\n"
+ ".inst 0xc13411f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z4.h\n"
+ ".inst 0xc1351210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z5.h\n"
"13:" // Padded: 0 priming loads
"cmp x14, #0x2\n"
"blt 16f\n"
"mov x12, #0x0\n"
+ "add x21, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x14, x14, #0x2\n"
+ "sub x11, x11, #0x1\n"
+ "lsr x20, x14, #0x1\n"
+ "cmp x20, x11\n"
+ "and x14, x14, #0x1\n"
"ld1w { z16.s }, p0/Z, [x13]\n"
- ".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa09 // bfcvtnt z9.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x22, x20, x11, LT\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ "sub x11, x11, x22\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ "ld1w { z18.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
+ "ld1w { z19.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa51 // bfcvt z17.h, p2/M, z18.s\n"
+ "ld1w { z18.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
"mov x12, #0x8\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa71 // bfcvtnt z17.h, p2/M, z19.s\n"
+ "ld1w { z25.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
- "sub x14, x14, #0x2\n"
- "sub x11, x11, #0x1\n"
- "lsr x20, x14, #0x1\n"
- "cmp x20, x11\n"
- "csel x21, x20, x11, LT\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- "and x14, x14, #0x1\n"
- "sub x11, x11, x21\n"
- "cbz x21, 15f\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa52 // bfcvt z18.h, p2/M, z18.s\n"
+ "ld1w { z19.s }, p0/Z, [x21]\n"
+ ".inst 0x648aab32 // bfcvtnt z18.h, p2/M, z25.s\n"
+ ".inst 0x658aaa73 // bfcvt z19.h, p2/M, z19.s\n"
+ "cbz x22, 15f\n"
"14:" // Padded: Main loop
"mov x12, #0x0\n"
+ "add x21, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13a11f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z10.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z18.s }, p0/Z, [x13]\n"
- ".inst 0xc1301130 // bfdot za.s[x8, 0], { z9.h-z12.h }, z0.h\n"
- "add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13b11f1 // bfdot za.s[x8, 1], { z15.h-z18.h }, z11.h\n"
+ "subs x22, x22, #0x1\n"
+ "ld1w { z25.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- ".inst 0xc13e1131 // bfdot za.s[x8, 1], { z9.h-z12.h }, z14.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1361210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z6.h\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1391211 // bfdot za.s[x8, 1], { z16.h-z19.h }, z9.h\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0xc1361150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z6.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aab39 // bfcvt z25.h, p2/M, z25.s\n"
+ ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ "ld1w { z29.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- ".inst 0xc1331151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z3.h\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa49 // bfcvt z9.h, p2/M, z18.s\n"
- ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
+ ".inst 0x648aaa19 // bfcvtnt z25.h, p2/M, z16.s\n"
+ ".inst 0xc1adcb00 // fclamp { z0.s-z3.s }, z24.s, z13.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z2.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa84b // bfcvt z11.h, p2/M, z2.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aabba // bfcvt z26.h, p2/M, z29.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa29 // bfcvtnt z9.h, p2/M, z17.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "st1w { z0.s }, p1, [x10]\n"
+ ".inst 0x648aaa3a // bfcvtnt z26.h, p2/M, z17.s\n"
+ "add x10, x10, x28, LSL #2\n"
+ "st1w { z1.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "st1w { z2.s }, p1, [x26]\n"
+ "add x26, x26, x24, LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab8c // bfcvt z12.h, p2/M, z28.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa1b // bfcvt z27.h, p2/M, z16.s\n"
+ "st1w { z3.s }, p1, [x25]\n"
+ "add x25, x25, x23, LSL #2\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
"mov x12, #0x8\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa6a // bfcvtnt z10.h, p2/M, z19.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa3b // bfcvtnt z27.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z13.s }, p0/Z, [x20]\n"
- ".inst 0x648aa9eb // bfcvtnt z11.h, p2/M, z15.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa1c // bfcvt z28.h, p2/M, z16.s\n"
"mov x12, #0x0\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x13]\n"
- "add x20, x13, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9ad // bfcvt z13.h, p2/M, z13.s\n"
+ ".inst 0x648aaa3c // bfcvtnt z28.h, p2/M, z17.s\n"
+ "ld1w { z18.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z21.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1a1c89c // fclamp { z28.s-z31.s }, z4.s, z1.s\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0x658aaa1d // bfcvt z29.h, p2/M, z16.s\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1341330 // bfdot za.s[x8, 0], { z25.h-z28.h }, z4.h\n"
"ld1w { z17.s }, p0/Z, [x20]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z28.s }, p1, [x10]\n"
+ ".inst 0x658aaa4f // bfcvt z15.h, p2/M, z18.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x4\n"
- "ld1w { z20.s }, p0/Z, [x20]\n"
- "st1w { z29.s }, p1, [x9]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1351350 // bfdot za.s[x8, 0], { z26.h-z29.h }, z5.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "st1w { z30.s }, p1, [x26]\n"
- "add x8, x8, #0x1\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1381130 // bfdot za.s[x8, 0], { z9.h-z12.h }, z8.h\n"
- ".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
"ld1w { z18.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1371150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z7.h\n"
+ ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa2a // bfcvt z10.h, p2/M, z17.s\n"
+ ".inst 0x658aaa51 // bfcvt z17.h, p2/M, z18.s\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z17.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa6b // bfcvt z11.h, p2/M, z19.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa71 // bfcvtnt z17.h, p2/M, z19.s\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "subs x21, x21, #0x1\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z31.s }, p1, [x25]\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "add x9, x9, x27, LSL #2\n"
- "add x26, x26, x24, LSL #2\n"
- ".inst 0x648aaaa9 // bfcvtnt z9.h, p2/M, z21.s\n"
- ".inst 0x648aaa8a // bfcvtnt z10.h, p2/M, z20.s\n"
- "add x25, x25, x23, LSL #2\n"
- ".inst 0x648aaa4b // bfcvtnt z11.h, p2/M, z18.s\n"
- ".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa52 // bfcvt z18.h, p2/M, z18.s\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
+ ".inst 0x648aab32 // bfcvtnt z18.h, p2/M, z25.s\n"
+ ".inst 0x658aaa73 // bfcvt z19.h, p2/M, z19.s\n"
"bgt 14b\n"
"15:" // Main loop tail
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z17.s }, p0/Z, [x13]\n"
- ".inst 0xc1301130 // bfdot za.s[x8, 0], { z9.h-z12.h }, z0.h\n"
"add x20, x13, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13a11f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z10.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc13b11f1 // bfdot za.s[x8, 1], { z15.h-z18.h }, z11.h\n"
+ "ld1w { z25.s }, p0/Z, [x13]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z2.s }, p0/Z, [x20]\n"
- ".inst 0xc13e1131 // bfdot za.s[x8, 1], { z9.h-z12.h }, z14.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1361210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z6.h\n"
+ "add x13, x13, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1391211 // bfdot za.s[x8, 1], { z16.h-z19.h }, z9.h\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0xc1361150 // bfdot za.s[x8, 0], { z10.h-z13.h }, z6.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aab2f // bfcvt z15.h, p2/M, z25.s\n"
+ ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z23.s }, p0/Z, [x20]\n"
- ".inst 0xc1331151 // bfdot za.s[x8, 1], { z10.h-z13.h }, z3.h\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa32 // bfcvt z18.h, p2/M, z17.s\n"
- ".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
+ ".inst 0xc1adcb00 // fclamp { z0.s-z3.s }, z24.s, z13.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
+ ".inst 0x658aab90 // bfcvt z16.h, p2/M, z28.s\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa852 // bfcvtnt z18.h, p2/M, z2.s\n"
+ "st1w { z0.s }, p1, [x10]\n"
+ ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
+ "add x10, x10, x28, LSL #2\n"
+ "st1w { z1.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ "st1w { z2.s }, p1, [x26]\n"
+ "add x26, x26, x24, LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
+ ".inst 0x658aaa51 // bfcvt z17.h, p2/M, z18.s\n"
+ "st1w { z3.s }, p1, [x25]\n"
+ "add x25, x25, x23, LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "mov x12, #0x8\n"
- ".inst 0x648aaaf3 // bfcvtnt z19.h, p2/M, z23.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa71 // bfcvtnt z17.h, p2/M, z19.s\n"
+ ".inst 0x658aaa52 // bfcvt z18.h, p2/M, z18.s\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0x648aa9f4 // bfcvtnt z20.h, p2/M, z15.s\n"
- ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- ".inst 0x658aaa16 // bfcvt z22.h, p2/M, z16.s\n"
- ".inst 0xc1381250 // bfdot za.s[x8, 0], { z18.h-z21.h }, z8.h\n"
- ".inst 0xc1a1c89c // fclamp { z28.s-z31.s }, z4.s, z1.s\n"
- "st1w { z28.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z29.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
- "add x13, x13, %x[ld_in_col], LSL #2\n"
- "st1w { z30.s }, p1, [x26]\n"
- "add x26, x26, x24, LSL #2\n"
- ".inst 0xc1371270 // bfdot za.s[x8, 0], { z19.h-z22.h }, z7.h\n"
- "st1w { z31.s }, p1, [x25]\n"
- "add x25, x25, x23, LSL #2\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z19.s }, p0/Z, [x20]\n"
+ ".inst 0x648aab32 // bfcvtnt z18.h, p2/M, z25.s\n"
+ ".inst 0x658aaa73 // bfcvt z19.h, p2/M, z19.s\n"
+ ".inst 0xc13411f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z4.h\n"
+ ".inst 0xc1351210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z5.h\n"
"16:" // Main loop skip tail
"cbz x14, 17f\n" // Skip remainder inputs
"mov x12, #0x0\n"
+ "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x11, x11, #0x1\n"
"ld1w { z16.s }, p0/Z, [x13]\n"
- ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
- "add x20, x13, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa0f // bfcvtnt z15.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z2.s }, p0/Z, [x20]\n"
- ".inst 0x648aa850 // bfcvtnt z16.h, p2/M, z2.s\n"
- "mov x12, #0x4\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z10.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa951 // bfcvt z17.h, p2/M, z10.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ "ld1w { z12.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aabd1 // bfcvtnt z17.h, p2/M, z30.s\n"
+ ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
+ "ld1w { z8.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa72 // bfcvt z18.h, p2/M, z19.s\n"
+ ".inst 0x658aa991 // bfcvt z17.h, p2/M, z12.s\n"
+ "ld1w { z0.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- "mov x12, #0x8\n"
- ".inst 0x648aaa72 // bfcvtnt z18.h, p2/M, z19.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aa911 // bfcvtnt z17.h, p2/M, z8.s\n"
+ ".inst 0x658aa812 // bfcvt z18.h, p2/M, z0.s\n"
+ "ld1w { z7.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z19.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa73 // bfcvt z19.h, p2/M, z19.s\n"
- ".inst 0xc13011f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z0.h\n"
- "sub x11, x11, #0x1\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x20]\n"
+ ".inst 0x648aa8f2 // bfcvtnt z18.h, p2/M, z7.s\n"
+ ".inst 0x658aab93 // bfcvt z19.h, p2/M, z28.s\n"
+ ".inst 0xc13a11f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z10.h\n"
+ ".inst 0xc13b11f1 // bfdot za.s[x8, 1], { z15.h-z18.h }, z11.h\n"
".inst 0xc1361210 // bfdot za.s[x8, 0], { z16.h-z19.h }, z6.h\n"
- ".inst 0xc13e11f1 // bfdot za.s[x8, 1], { z15.h-z18.h }, z14.h\n"
+ ".inst 0xc1391211 // bfdot za.s[x8, 1], { z16.h-z19.h }, z9.h\n"
".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
- ".inst 0xc1a1c888 // fclamp { z8.s-z11.s }, z4.s, z1.s\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ ".inst 0xc1adcb08 // fclamp { z8.s-z11.s }, z24.s, z13.s\n"
"st1w { z8.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- ".inst 0xc1331211 // bfdot za.s[x8, 1], { z16.h-z19.h }, z3.h\n"
- "add x8, x8, #0x1\n"
"st1w { z9.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
"st1w { z10.s }, p1, [x26]\n"
"add x26, x26, x24, LSL #2\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
"st1w { z11.s }, p1, [x25]\n"
"add x25, x25, x23, LSL #2\n"
"17:" // Tail input: End
@@ -716,10 +716,10 @@ void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"subs x11, x11, #0x1\n"
- ".inst 0xc1a1c888 // fclamp { z8.s-z11.s }, z4.s, z1.s\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ ".inst 0xc1adcb08 // fclamp { z8.s-z11.s }, z24.s, z13.s\n"
"st1w { z8.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
"st1w { z9.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
"st1w { z10.s }, p1, [x26]\n"
@@ -729,11 +729,11 @@ void sme2_fp32bf16fp32_planar_3x3_s2_4rows_dot_za_impl(
"bgt 18b\n"
"19:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x15\n"
+ "whilelt p1.s, x15, x16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x15\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x15, x16\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
index b72042558d..0b6239a5a4 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,237 +69,242 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
Args args = { inptr, ld_in_vl, pad_top, 8u - std::min(8u, pad_top + valid_input_rows), pad_left, weights, bias, valid_input_cols, output_cols, outptrs, outlds, outvllds, start_channel, valid_channels, act_min, act_max };
__asm__ __volatile__(
- "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0x8\n"
+ "ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0x8\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x4\n"
- "ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x20, x22, #0x8\n"
"ptrue p2.b\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
- "ld1rw { z29.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x7\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z28.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
- "whilelt p8.s, XZR, x6\n"
- "addvl SP, SP, #-30\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "ld1rw { z8.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x21, x21, x6\n"
+ "mov SP, x20\n"
"ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z29.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-30\n"
+ "whilelt p1.s, XZR, x7\n"
+ "whilelt p9.s, XZR, x21\n"
+ "whilelt p8.s, XZR, x5\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"1:" // Channel loop
- "ldr x21, [%x[args], %[offsetof_Args_bias]]\n"
- "fmov z30.s, #0x0\n"
- "cbz x21, 2f\n"
- "ld1w { z30.s }, p1/Z, [x21, x17, LSL #2]\n"
+ "ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
+ "fmov z18.s, #0x0\n"
+ "cbz x20, 2f\n"
+ "ld1w { z18.s }, p1/Z, [x20, x17, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x21\n"
- "ld1w { z12.s }, p2/Z, [x20]\n"
+ "ldr x28, [%x[args], %[offsetof_Args_weights]]\n"
+ "fmov z27.s, #0x0\n"
+ "addvl x27, SP, #30\n"
+ "mov x26, #0x8\n"
+ "addvl x27, x27, #-6\n"
+ "ldr x25, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "add x24, x5, x6\n"
+ "mov z19.d, z18.d\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x23, %x[ld_in_row], #0x2\n"
+ "mov x11, #0x0\n"
+ "mov x22, x28\n"
+ "incb x28\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ld1w { z23.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ "sub x20, x25, #0x1\n"
+ "ld1w { z30.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ "orr x21, x20, %x[ld_in_col], LSL #18\n"
+ "ld1w { z25.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ "orr x21, x7, x21, LSL #20\n"
+ "ld1w { z26.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ ".inst 0x658aaaef // bfcvt z15.h, p2/M, z23.s\n"
+ ".inst 0x648aaafb // bfcvtnt z27.h, p2/M, z23.s\n"
+ "ld1w { z16.s }, p2/Z, [x22]\n"
+ ".inst 0x658aabc4 // bfcvt z4.h, p2/M, z30.s\n"
+ "mov x20, x28\n"
+ "incb x28\n"
+ ".inst 0x658aab21 // bfcvt z1.h, p2/M, z25.s\n"
+ "ld1w { z22.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "ld1w { z24.s }, p2/Z, [x20]\n"
+ "mov x8, #0x8\n"
+ ".inst 0x658aab4c // bfcvt z12.h, p2/M, z26.s\n"
+ "lsl x21, x21, #0x2\n"
+ "sub x26, x26, x24\n"
+ "st1h { z27.h }, p2, [x27]\n"
+ ".inst 0x648aabcf // bfcvtnt z15.h, p2/M, z30.s\n"
+ "ld1w { z7.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "fmov z11.s, #0x0\n"
- "incb x21\n"
- "ld1w { z3.s }, p2/Z, [x20]\n"
+ "fmov z17.s, #0x0\n"
+ ".inst 0x648aab24 // bfcvtnt z4.h, p2/M, z25.s\n"
+ "ld1w { z0.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ ".inst 0x648aab41 // bfcvtnt z1.h, p2/M, z26.s\n"
+ "ld1w { z21.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aa99a // bfcvt z26.h, p2/M, z12.s\n"
- ".inst 0x658aab10 // bfcvt z16.h, p2/M, z24.s\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
"ld1w { z20.s }, p2/Z, [x20]\n"
+ "mov x20, x28\n"
+ "incb x28\n"
+ "st1h { z15.h }, p2, [x27, #1, MUL VL]\n"
+ ".inst 0x658aaadf // bfcvt z31.h, p2/M, z22.s\n"
+ "madd x23, x23, x5, x16\n"
+ "st1h { z4.h }, p2, [x27, #2, MUL VL]\n"
+ ".inst 0x658aa8fc // bfcvt z28.h, p2/M, z7.s\n"
+ ".inst 0x648aaad1 // bfcvtnt z17.h, p2/M, z22.s\n"
+ "st1h { z1.h }, p2, [x27, #3, MUL VL]\n"
+ ".inst 0x658aa81b // bfcvt z27.h, p2/M, z0.s\n"
+ "ld1w { z9.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "addvl x24, SP, #30\n"
- ".inst 0x648aa98b // bfcvtnt z11.h, p2/M, z12.s\n"
- "ld1w { z25.s }, p2/Z, [x20]\n"
- "mov x20, x21\n"
- ".inst 0x658aa875 // bfcvt z21.h, p2/M, z3.s\n"
- "addvl x24, x24, #-6\n"
- "ld1w { z6.s }, p2/Z, [x20]\n"
- ".inst 0x658aaa9b // bfcvt z27.h, p2/M, z20.s\n"
+ "st1h { z12.h }, p2, [x27, #4, MUL VL]\n"
+ ".inst 0x658aaaaf // bfcvt z15.h, p2/M, z21.s\n"
+ "st1h { z14.h }, p2, [x27, #5, MUL VL]\n"
+ "addvl x27, x27, #-6\n"
+ ".inst 0x648aa8ff // bfcvtnt z31.h, p2/M, z7.s\n"
+ "ld1w { z26.s }, p2/Z, [x20]\n"
+ "st1h { z17.h }, p2, [x27]\n"
"incb x20, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x24]\n"
- ".inst 0x648aab1a // bfcvtnt z26.h, p2/M, z24.s\n"
- "ld1w { z14.s }, p2/Z, [x20]\n"
+ "fmov z25.s, #0x0\n"
+ ".inst 0x648aa81c // bfcvtnt z28.h, p2/M, z0.s\n"
+ "ld1w { z1.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "fmov z11.s, #0x0\n"
- "st1h { z26.h }, p2, [x24, #1, MUL VL]\n"
- ".inst 0x648aa870 // bfcvtnt z16.h, p2/M, z3.s\n"
- "ld1w { z19.s }, p2/Z, [x20]\n"
+ ".inst 0x648aaabb // bfcvtnt z27.h, p2/M, z21.s\n"
+ ".inst 0x658aaa83 // bfcvt z3.h, p2/M, z20.s\n"
+ "ld1w { z4.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aa8c9 // bfcvt z9.h, p2/M, z6.s\n"
- ".inst 0x648aaa95 // bfcvtnt z21.h, p2/M, z20.s\n"
- "incb x21\n"
- "ld1w { z12.s }, p2/Z, [x20]\n"
+ ".inst 0x648aaa8f // bfcvtnt z15.h, p2/M, z20.s\n"
+ "st1h { z31.h }, p2, [x27, #1, MUL VL]\n"
+ ".inst 0x658aa92c // bfcvt z12.h, p2/M, z9.s\n"
+ "ld1w { z0.s }, p2/Z, [x20]\n"
+ "mov x20, x28\n"
+ "st1h { z28.h }, p2, [x27, #2, MUL VL]\n"
+ ".inst 0x658aab57 // bfcvt z23.h, p2/M, z26.s\n"
+ ".inst 0x648aa939 // bfcvtnt z25.h, p2/M, z9.s\n"
+ "incb x28\n"
+ "st1h { z27.h }, p2, [x27, #3, MUL VL]\n"
+ ".inst 0x658aa83e // bfcvt z30.h, p2/M, z1.s\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "st1h { z16.h }, p2, [x24, #2, MUL VL]\n"
- ".inst 0x648aab3b // bfcvtnt z27.h, p2/M, z25.s\n"
- ".inst 0x658aab37 // bfcvt z23.h, p2/M, z25.s\n"
- "ld1w { z5.s }, p2/Z, [x20]\n"
- ".inst 0x658aa9c8 // bfcvt z8.h, p2/M, z14.s\n"
- "mov x23, x21\n"
- "st1h { z21.h }, p2, [x24, #3, MUL VL]\n"
- ".inst 0x648aa8cb // bfcvtnt z11.h, p2/M, z6.s\n"
- ".inst 0x658aaa79 // bfcvt z25.h, p2/M, z19.s\n"
- "ld1w { z4.s }, p2/Z, [x23]\n"
- "incb x23, ALL, MUL #5\n"
- "st1h { z27.h }, p2, [x24, #4, MUL VL]\n"
- ".inst 0x648aa9c9 // bfcvtnt z9.h, p2/M, z14.s\n"
- ".inst 0x658aa991 // bfcvt z17.h, p2/M, z12.s\n"
- "incb x21\n"
- "st1h { z23.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
- "ld1w { z26.s }, p2/Z, [x23]\n"
- "incb x23, ALL, MUL #5\n"
- "st1h { z11.h }, p2, [x24]\n"
- "fmov z2.s, #0x0\n"
- ".inst 0x648aaa68 // bfcvtnt z8.h, p2/M, z19.s\n"
- "ldr x25, [%x[args], %[offsetof_Args_input_cols]]\n"
- "st1h { z9.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [x23]\n"
- "incb x23, ALL, MUL #5\n"
- ".inst 0x658aa893 // bfcvt z19.h, p2/M, z4.s\n"
- "st1h { z8.h }, p2, [x24, #2, MUL VL]\n"
- ".inst 0x648aa999 // bfcvtnt z25.h, p2/M, z12.s\n"
- "ld1w { z7.s }, p2/Z, [x23]\n"
- "incb x23, ALL, MUL #5\n"
- ".inst 0x658aab4e // bfcvt z14.h, p2/M, z26.s\n"
- ".inst 0x648aa8b1 // bfcvtnt z17.h, p2/M, z5.s\n"
- "st1h { z25.h }, p2, [x24, #3, MUL VL]\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- ".inst 0x658aa8ab // bfcvt z11.h, p2/M, z5.s\n"
- "ld1w { z18.s }, p2/Z, [x23]\n"
- "mov x20, x21\n"
- ".inst 0x648aa882 // bfcvtnt z2.h, p2/M, z4.s\n"
- ".inst 0x658aab66 // bfcvt z6.h, p2/M, z27.s\n"
- "ld1w { z15.s }, p2/Z, [x20]\n"
+ "st1h { z15.h }, p2, [x27, #4, MUL VL]\n"
+ ".inst 0x658aa894 // bfcvt z20.h, p2/M, z4.s\n"
+ "st1h { z3.h }, p2, [x27, #5, MUL VL]\n"
+ "addvl x27, x27, #-6\n"
+ ".inst 0x648aab4c // bfcvtnt z12.h, p2/M, z26.s\n"
+ "ld1w { z7.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "st1h { z17.h }, p2, [x24, #4, MUL VL]\n"
- "st1h { z11.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
- ".inst 0x648aab53 // bfcvtnt z19.h, p2/M, z26.s\n"
- ".inst 0x658aa8fa // bfcvt z26.h, p2/M, z7.s\n"
- "ld1w { z11.s }, p2/Z, [x20]\n"
+ "st1h { z25.h }, p2, [x27]\n"
+ ".inst 0x648aa837 // bfcvtnt z23.h, p2/M, z1.s\n"
+ "fmov z22.s, #0x0\n"
+ "ld1w { z25.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "st1h { z2.h }, p2, [x24]\n"
- ".inst 0x648aab6e // bfcvtnt z14.h, p2/M, z27.s\n"
- "ld1w { z4.s }, p2/Z, [x20]\n"
- "fmov z21.s, #0x0\n"
- "st1h { z19.h }, p2, [x24, #1, MUL VL]\n"
+ ".inst 0x648aa89e // bfcvtnt z30.h, p2/M, z4.s\n"
+ ".inst 0x658aa810 // bfcvt z16.h, p2/M, z0.s\n"
+ "ld1w { z3.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ ".inst 0x648aa814 // bfcvtnt z20.h, p2/M, z0.s\n"
+ "st1h { z12.h }, p2, [x27, #1, MUL VL]\n"
+ ".inst 0x658aa964 // bfcvt z4.h, p2/M, z11.s\n"
+ "ld1w { z24.s }, p2/Z, [x20]\n"
+ "mov x20, x28\n"
+ "st1h { z23.h }, p2, [x27, #2, MUL VL]\n"
+ ".inst 0x658aa8e9 // bfcvt z9.h, p2/M, z7.s\n"
+ ".inst 0x648aa976 // bfcvtnt z22.h, p2/M, z11.s\n"
+ "st1h { z30.h }, p2, [x27, #3, MUL VL]\n"
+ ".inst 0x658aab3c // bfcvt z28.h, p2/M, z25.s\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aa9ea // bfcvt z10.h, p2/M, z15.s\n"
- "st1h { z14.h }, p2, [x24, #2, MUL VL]\n"
- ".inst 0x648aa8e6 // bfcvtnt z6.h, p2/M, z7.s\n"
- "incb x21\n"
- "ld1w { z17.s }, p2/Z, [x20]\n"
+ "st1h { z20.h }, p2, [x27, #4, MUL VL]\n"
+ ".inst 0x658aa861 // bfcvt z1.h, p2/M, z3.s\n"
+ ".inst 0x648aa8e4 // bfcvtnt z4.h, p2/M, z7.s\n"
+ "ld1w { z2.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aa973 // bfcvt z19.h, p2/M, z11.s\n"
- "st1h { z6.h }, p2, [x24, #3, MUL VL]\n"
- ".inst 0x648aaa5a // bfcvtnt z26.h, p2/M, z18.s\n"
- ".inst 0x658aaa45 // bfcvt z5.h, p2/M, z18.s\n"
+ "st1h { z16.h }, p2, [x27, #5, MUL VL]\n"
+ ".inst 0x648aab29 // bfcvtnt z9.h, p2/M, z25.s\n"
"ld1w { z12.s }, p2/Z, [x20]\n"
- "mov x21, x21\n"
- ".inst 0x658aa897 // bfcvt z23.h, p2/M, z4.s\n"
- ".inst 0x648aa9f5 // bfcvtnt z21.h, p2/M, z15.s\n"
- "ld1w { z24.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- ".inst 0x648aa96a // bfcvtnt z10.h, p2/M, z11.s\n"
- "ld1w { z3.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "st1h { z26.h }, p2, [x24, #4, MUL VL]\n"
- ".inst 0x648aa893 // bfcvtnt z19.h, p2/M, z4.s\n"
- ".inst 0x658aaa30 // bfcvt z16.h, p2/M, z17.s\n"
- "ld1w { z2.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- ".inst 0x648aaa37 // bfcvtnt z23.h, p2/M, z17.s\n"
- "ld1w { z26.s }, p2/Z, [x21]\n"
- "st1h { z5.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
- "st1h { z21.h }, p2, [x24]\n"
- ".inst 0x648aa990 // bfcvtnt z16.h, p2/M, z12.s\n"
- "incb x21, ALL, MUL #5\n"
- "fmov z8.s, #0x0\n"
- "st1h { z10.h }, p2, [x24, #1, MUL VL]\n"
- ".inst 0x658aab04 // bfcvt z4.h, p2/M, z24.s\n"
- ".inst 0x658aa985 // bfcvt z5.h, p2/M, z12.s\n"
- "sub x20, x25, #0x1\n"
- "st1h { z19.h }, p2, [x24, #2, MUL VL]\n"
- ".inst 0x658aa871 // bfcvt z17.h, p2/M, z3.s\n"
- "ld1w { z25.s }, p2/Z, [x21]\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- "st1h { z23.h }, p2, [x24, #3, MUL VL]\n"
- ".inst 0x658aa857 // bfcvt z23.h, p2/M, z2.s\n"
- "orr x23, x7, x23, LSL #20\n"
- "mov x22, #0x8\n"
- "st1h { z16.h }, p2, [x24, #4, MUL VL]\n"
- ".inst 0x658aab4e // bfcvt z14.h, p2/M, z26.s\n"
- "add x21, x6, x4\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "st1h { z5.h }, p2, [x24, #5, MUL VL]\n"
- "addvl x24, x24, #-6\n"
- "mov z31.d, z30.d\n"
- ".inst 0x648aab08 // bfcvtnt z8.h, p2/M, z24.s\n"
- "st1h { z8.h }, p2, [x24]\n"
- ".inst 0x648aa864 // bfcvtnt z4.h, p2/M, z3.s\n"
- ".inst 0x648aa851 // bfcvtnt z17.h, p2/M, z2.s\n"
- "mov x11, #0x0\n"
- "st1h { z4.h }, p2, [x24, #1, MUL VL]\n"
- ".inst 0x648aab57 // bfcvtnt z23.h, p2/M, z26.s\n"
- ".inst 0x648aab2e // bfcvtnt z14.h, p2/M, z25.s\n"
- "mov x8, #0x8\n"
- "st1h { z17.h }, p2, [x24, #2, MUL VL]\n"
- ".inst 0x658aab26 // bfcvt z6.h, p2/M, z25.s\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- "st1h { z23.h }, p2, [x24, #3, MUL VL]\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x16\n"
- "st1h { z14.h }, p2, [x24, #4, MUL VL]\n"
- "st1h { z6.h }, p2, [x24, #5, MUL VL]\n"
+ "incb x20, ALL, MUL #5\n"
+ "addvl x27, x27, #-6\n"
+ ".inst 0x648aa87c // bfcvtnt z28.h, p2/M, z3.s\n"
+ "ld1w { z27.s }, p2/Z, [x20]\n"
+ "st1h { z22.h }, p2, [x27]\n"
+ "incb x20, ALL, MUL #5\n"
+ ".inst 0x648aab01 // bfcvtnt z1.h, p2/M, z24.s\n"
+ "fmov z17.s, #0x0\n"
+ "st1h { z4.h }, p2, [x27, #1, MUL VL]\n"
+ ".inst 0x658aa97e // bfcvt z30.h, p2/M, z11.s\n"
+ ".inst 0x658aab0a // bfcvt z10.h, p2/M, z24.s\n"
+ "st1h { z9.h }, p2, [x27, #2, MUL VL]\n"
+ ".inst 0x658aa84f // bfcvt z15.h, p2/M, z2.s\n"
+ "ld1w { z26.s }, p2/Z, [x20]\n"
+ "st1h { z28.h }, p2, [x27, #3, MUL VL]\n"
+ ".inst 0x658aa98e // bfcvt z14.h, p2/M, z12.s\n"
+ "st1h { z1.h }, p2, [x27, #4, MUL VL]\n"
+ ".inst 0x658aab75 // bfcvt z21.h, p2/M, z27.s\n"
+ ".inst 0x648aa971 // bfcvtnt z17.h, p2/M, z11.s\n"
+ "st1h { z10.h }, p2, [x27, #5, MUL VL]\n"
+ "addvl x27, x27, #-6\n"
+ ".inst 0x648aa85e // bfcvtnt z30.h, p2/M, z2.s\n"
+ ".inst 0x658aab4a // bfcvt z10.h, p2/M, z26.s\n"
+ ".inst 0x648aa98f // bfcvtnt z15.h, p2/M, z12.s\n"
+ ".inst 0x648aab6e // bfcvtnt z14.h, p2/M, z27.s\n"
+ "st1h { z17.h }, p2, [x27]\n"
+ ".inst 0x648aab55 // bfcvtnt z21.h, p2/M, z26.s\n"
+ "st1h { z30.h }, p2, [x27, #1, MUL VL]\n"
+ "st1h { z15.h }, p2, [x27, #2, MUL VL]\n"
+ "st1h { z14.h }, p2, [x27, #3, MUL VL]\n"
+ "st1h { z21.h }, p2, [x27, #4, MUL VL]\n"
+ "st1h { z10.h }, p2, [x27, #5, MUL VL]\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x26, x26, #0x1\n"
+ ".inst 0xf8b54afc // rprfm pldstrm, x21, [x23]\n"
+ "add x23, x23, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x16, x6, x20, x16\n"
- ".inst 0xc0046bc0 // mova za.d[x11, #0], { z30.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0046bc1 // mova za.d[x11, #1], { z30.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ ".inst 0xc0046a40 // mova za.d[x11, #0], { z18.d-z19.d }\n"
"mov x22, #0x4\n"
- "ldp x14, x13, [x23], #0x10\n"
- ".inst 0xc0046bc2 // mova za.d[x11, #2], { z30.d-z31.d }\n"
- "ldp x5, x10, [x20], #0x10\n"
- ".inst 0xc0046bc3 // mova za.d[x11, #3], { z30.d-z31.d }\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x16, x5, x21, x16\n"
+ ".inst 0xc0046a41 // mova za.d[x11, #1], { z18.d-z19.d }\n"
"ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0046bc4 // mova za.d[x11, #4], { z30.d-z31.d }\n"
+ ".inst 0xc0046a42 // mova za.d[x11, #2], { z18.d-z19.d }\n"
+ "ldp x14, x13, [x23], #0x10\n"
+ ".inst 0xc0046a43 // mova za.d[x11, #3], { z18.d-z19.d }\n"
+ "ldp x2, x10, [x20], #0x10\n"
+ ".inst 0xc0046a44 // mova za.d[x11, #4], { z18.d-z19.d }\n"
+ ".inst 0xc0046a45 // mova za.d[x11, #5], { z18.d-z19.d }\n"
"ldp x9, x28, [x23], #0x10\n"
- ".inst 0xc0046bc5 // mova za.d[x11, #5], { z30.d-z31.d }\n"
+ ".inst 0xc0046a46 // mova za.d[x11, #6], { z18.d-z19.d }\n"
"ldp x27, x26, [x20], #0x10\n"
- ".inst 0xc0046bc6 // mova za.d[x11, #6], { z30.d-z31.d }\n"
- ".inst 0xc0046bc7 // mova za.d[x11, #7], { z30.d-z31.d }\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
+ ".inst 0xc0046a47 // mova za.d[x11, #7], { z18.d-z19.d }\n"
+ ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
+ ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
"cbz x21, 5f\n"
"cmp x21, x22\n"
"csel x20, x21, x22, LT\n"
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 5f\n"
- ".inst 0xc0066804 // mova { z4.d-z5.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066800 // mova { z0.d-z1.d }, za.d[x11, #0]\n"
"sub x15, x15, x21\n"
- ".inst 0xc0066826 // mova { z6.d-z7.d }, za.d[x11, #1]\n"
- ".inst 0xc1bccba4 // fclamp { z4.s-z7.s }, z29.s, z28.s\n"
+ ".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
+ ".inst 0xc1bdc900 // fclamp { z0.s-z3.s }, z8.s, z29.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
- "st1w { z4.s }, p1, [x14]\n"
- "add x14, x14, x5, LSL #2\n"
- "st1w { z6.s }, p1, [x13]\n"
+ "st1w { z0.s }, p1, [x14]\n"
+ "add x14, x14, x2, LSL #2\n"
+ "st1w { z2.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
- "st1w { z5.s }, p1, [x9]\n"
+ "st1w { z1.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- "st1w { z7.s }, p1, [x28]\n"
+ "st1w { z3.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
- "adds XZR, x6, x4\n"
+ "adds XZR, x5, x6\n"
"bne 12f\n"
"cbz x22, 10f\n"
"cmp x22, #0x1\n"
@@ -312,327 +317,327 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"6:" // Unpadded: 4 priming loads
"add x21, x16, %x[ld_in_row], LSL #2\n"
"ld1w { z21.s }, p1/Z, [x16]\n"
- ".inst 0x658aaab2 // bfcvt z18.h, p2/M, z21.s\n"
"addvl x20, SP, #24\n"
- "ld1w { z11.s }, p1/Z, [x21]\n"
+ "ld1w { z0.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa972 // bfcvtnt z18.h, p2/M, z11.s\n"
"add x16, x16, %x[ld_in_col], LSL #2\n"
- "ld1w { z17.s }, p1/Z, [x21]\n"
+ "ld1w { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa33 // bfcvt z19.h, p2/M, z17.s\n"
- "ld1w { z12.s }, p1/Z, [x21]\n"
+ "ld1w { z28.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa993 // bfcvtnt z19.h, p2/M, z12.s\n"
- "ld1w { z7.s }, p1/Z, [x21]\n"
+ ".inst 0x658aaab7 // bfcvt z23.h, p2/M, z21.s\n"
+ "ld1w { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa8f4 // bfcvt z20.h, p2/M, z7.s\n"
- "ld1w { z12.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa978 // bfcvt z24.h, p2/M, z11.s\n"
+ "ld1w { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa994 // bfcvtnt z20.h, p2/M, z12.s\n"
- ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12d7250 // bfdot za.s[x11, 0], { z18.h-z19.h }, z13.h\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0x648aa817 // bfcvtnt z23.h, p2/M, z0.s\n"
+ ".inst 0x658aa9b9 // bfcvt z25.h, p2/M, z13.s\n"
"ld1w { z6.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa8d5 // bfcvt z21.h, p2/M, z6.s\n"
- ".inst 0xc12c7251 // bfdot za.s[x11, 1], { z18.h-z19.h }, z12.h\n"
- ".inst 0xa0412a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12b7270 // bfdot za.s[x11, 0], { z19.h-z20.h }, z11.h\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
- ".inst 0x648aab75 // bfcvtnt z21.h, p2/M, z27.s\n"
- ".inst 0xc12a7271 // bfdot za.s[x11, 1], { z19.h-z20.h }, z10.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc12b7290 // bfdot za.s[x11, 0], { z20.h-z21.h }, z11.h\n"
- ".inst 0xc12a7291 // bfdot za.s[x11, 1], { z20.h-z21.h }, z10.h\n"
+ ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0x648aab98 // bfcvtnt z24.h, p2/M, z28.s\n"
+ "ld1w { z30.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa8da // bfcvt z26.h, p2/M, z6.s\n"
+ ".inst 0xa1422a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0x648aa9d9 // bfcvtnt z25.h, p2/M, z14.s\n"
+ ".inst 0xc12972f0 // bfdot za.s[x11, 0], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc12172f1 // bfdot za.s[x11, 1], { z23.h-z24.h }, z1.h\n"
+ ".inst 0x648aabda // bfcvtnt z26.h, p2/M, z30.s\n"
+ ".inst 0xc12d7310 // bfdot za.s[x11, 0], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc1257311 // bfdot za.s[x11, 1], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc12f7330 // bfdot za.s[x11, 0], { z25.h-z26.h }, z15.h\n"
+ ".inst 0xc1277331 // bfdot za.s[x11, 1], { z25.h-z26.h }, z7.h\n"
"7:" // Unpadded: 3 priming loads
"add x22, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z6.s }, p1/Z, [x16]\n"
- ".inst 0x658aa8d7 // bfcvt z23.h, p2/M, z6.s\n"
+ "ld1w { z31.s }, p1/Z, [x16]\n"
"addvl x21, SP, #18\n"
- "ld1w { z1.s }, p1/Z, [x22]\n"
+ "ld1w { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa837 // bfcvtnt z23.h, p2/M, z1.s\n"
"addvl x20, SP, #24\n"
- "ld1w { z15.s }, p1/Z, [x22]\n"
+ "ld1w { z21.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9f8 // bfcvt z24.h, p2/M, z15.s\n"
"add x16, x16, %x[ld_in_col], LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
+ "ld1w { z23.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa18 // bfcvtnt z24.h, p2/M, z16.s\n"
- "ld1w { z1.s }, p1/Z, [x22]\n"
+ ".inst 0x658aabfe // bfcvt z30.h, p2/M, z31.s\n"
+ "ld1w { z20.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa839 // bfcvt z25.h, p2/M, z1.s\n"
- "ld1w { z9.s }, p1/Z, [x22]\n"
+ ".inst 0x658aaabf // bfcvt z31.h, p2/M, z21.s\n"
+ "ld1w { z10.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa939 // bfcvtnt z25.h, p2/M, z9.s\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12972f0 // bfdot za.s[x11, 0], { z23.h-z24.h }, z9.h\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ ".inst 0x648aa9fe // bfcvtnt z30.h, p2/M, z15.s\n"
+ ".inst 0x658aaa80 // bfcvt z0.h, p2/M, z20.s\n"
+ "ld1w { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa1a // bfcvt z26.h, p2/M, z16.s\n"
- ".inst 0xc12172f1 // bfdot za.s[x11, 1], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xa1412aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0x648aaaff // bfcvtnt z31.h, p2/M, z23.s\n"
+ "ld1w { z4.s }, p1/Z, [x22]\n"
+ ".inst 0x658aa9e1 // bfcvt z1.h, p2/M, z15.s\n"
+ ".inst 0xa1422aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0x648aa940 // bfcvtnt z0.h, p2/M, z10.s\n"
+ ".inst 0xc12e73d0 // bfdot za.s[x11, 0], { z30.h-z31.h }, z14.h\n"
+ ".inst 0xc12673d1 // bfdot za.s[x11, 1], { z30.h-z31.h }, z6.h\n"
".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12f72f2 // bfdot za.s[x11, 2], { z23.h-z24.h }, z15.h\n"
- ".inst 0xa1412aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12772f3 // bfdot za.s[x11, 3], { z23.h-z24.h }, z7.h\n"
- "ld1w { z16.s }, p1/Z, [x22]\n"
- ".inst 0x648aaa1a // bfcvtnt z26.h, p2/M, z16.s\n"
- ".inst 0xc1297310 // bfdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
- ".inst 0xc1217311 // bfdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
- ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xa1422aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12f7312 // bfdot za.s[x11, 2], { z24.h-z25.h }, z15.h\n"
- ".inst 0xc1277313 // bfdot za.s[x11, 3], { z24.h-z25.h }, z7.h\n"
- ".inst 0xc12b7330 // bfdot za.s[x11, 0], { z25.h-z26.h }, z11.h\n"
- ".inst 0xc1237331 // bfdot za.s[x11, 1], { z25.h-z26.h }, z3.h\n"
- ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1237332 // bfdot za.s[x11, 2], { z25.h-z26.h }, z3.h\n"
- ".inst 0xc1227333 // bfdot za.s[x11, 3], { z25.h-z26.h }, z2.h\n"
+ ".inst 0x648aa881 // bfcvtnt z1.h, p2/M, z4.s\n"
+ ".inst 0xc12f73d2 // bfdot za.s[x11, 2], { z30.h-z31.h }, z15.h\n"
+ ".inst 0xc12773d3 // bfdot za.s[x11, 3], { z30.h-z31.h }, z7.h\n"
+ ".inst 0xc12b73f0 // bfdot za.s[x11, 0], { z31.h-z0.h }, z11.h\n"
+ ".inst 0xc12373f1 // bfdot za.s[x11, 1], { z31.h-z0.h }, z3.h\n"
+ ".inst 0xa0412a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc12f73f2 // bfdot za.s[x11, 2], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12e73f3 // bfdot za.s[x11, 3], { z31.h-z0.h }, z14.h\n"
+ ".inst 0xc12d7010 // bfdot za.s[x11, 0], { z0.h-z1.h }, z13.h\n"
+ ".inst 0xc1257011 // bfdot za.s[x11, 1], { z0.h-z1.h }, z5.h\n"
+ ".inst 0xa1422a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc12a7012 // bfdot za.s[x11, 2], { z0.h-z1.h }, z10.h\n"
+ ".inst 0xc1227013 // bfdot za.s[x11, 3], { z0.h-z1.h }, z2.h\n"
"8:" // Unpadded: 2 priming loads
"add x23, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z24.s }, p1/Z, [x16]\n"
- ".inst 0x658aab02 // bfcvt z2.h, p2/M, z24.s\n"
+ "ld1w { z15.s }, p1/Z, [x16]\n"
"addvl x22, SP, #12\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
+ "ld1w { z21.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa02 // bfcvtnt z2.h, p2/M, z16.s\n"
"addvl x21, SP, #18\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
+ "ld1w { z31.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa03 // bfcvt z3.h, p2/M, z16.s\n"
"addvl x20, SP, #24\n"
- "ld1w { z16.s }, p1/Z, [x23]\n"
+ "ld1w { z22.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa03 // bfcvtnt z3.h, p2/M, z16.s\n"
+ ".inst 0x658aa9e2 // bfcvt z2.h, p2/M, z15.s\n"
"add x16, x16, %x[ld_in_col], LSL #2\n"
+ "ld1w { z10.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aabe3 // bfcvt z3.h, p2/M, z31.s\n"
"ld1w { z1.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa824 // bfcvt z4.h, p2/M, z1.s\n"
- "ld1w { z19.s }, p1/Z, [x23]\n"
+ ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
+ ".inst 0x648aaaa2 // bfcvtnt z2.h, p2/M, z21.s\n"
+ ".inst 0x658aa944 // bfcvt z4.h, p2/M, z10.s\n"
+ "ld1w { z13.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa64 // bfcvtnt z4.h, p2/M, z19.s\n"
- ".inst 0xa1402ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0x648aaac3 // bfcvtnt z3.h, p2/M, z22.s\n"
+ "ld1w { z12.s }, p1/Z, [x23]\n"
+ ".inst 0x658aa9a5 // bfcvt z5.h, p2/M, z13.s\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0x648aa824 // bfcvtnt z4.h, p2/M, z1.s\n"
".inst 0xc12f7050 // bfdot za.s[x11, 0], { z2.h-z3.h }, z15.h\n"
- "ld1w { z0.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa805 // bfcvt z5.h, p2/M, z0.s\n"
- ".inst 0xc1277051 // bfdot za.s[x11, 1], { z2.h-z3.h }, z7.h\n"
- ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12f7052 // bfdot za.s[x11, 2], { z2.h-z3.h }, z15.h\n"
- ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc1277053 // bfdot za.s[x11, 3], { z2.h-z3.h }, z7.h\n"
- "ld1w { z10.s }, p1/Z, [x23]\n"
- ".inst 0x648aa945 // bfcvtnt z5.h, p2/M, z10.s\n"
- ".inst 0xc12e7070 // bfdot za.s[x11, 0], { z3.h-z4.h }, z14.h\n"
- ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc12e7051 // bfdot za.s[x11, 1], { z2.h-z3.h }, z14.h\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0x648aa985 // bfcvtnt z5.h, p2/M, z12.s\n"
+ ".inst 0xc1297052 // bfdot za.s[x11, 2], { z2.h-z3.h }, z9.h\n"
+ ".inst 0xc1217053 // bfdot za.s[x11, 3], { z2.h-z3.h }, z1.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1277070 // bfdot za.s[x11, 0], { z3.h-z4.h }, z7.h\n"
".inst 0xc1267071 // bfdot za.s[x11, 1], { z3.h-z4.h }, z6.h\n"
- ".inst 0xa0412aac // ld1h { z12.h-z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12f7054 // bfdot za.s[x11, 4], { z2.h-z3.h }, z15.h\n"
- ".inst 0xa1422ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc1277055 // bfdot za.s[x11, 5], { z2.h-z3.h }, z7.h\n"
- ".inst 0xc12d7072 // bfdot za.s[x11, 2], { z3.h-z4.h }, z13.h\n"
- ".inst 0xc12c7073 // bfdot za.s[x11, 3], { z3.h-z4.h }, z12.h\n"
+ ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc12d7054 // bfdot za.s[x11, 4], { z2.h-z3.h }, z13.h\n"
+ ".inst 0xc12c7055 // bfdot za.s[x11, 5], { z2.h-z3.h }, z12.h\n"
+ ".inst 0xc12f7072 // bfdot za.s[x11, 2], { z3.h-z4.h }, z15.h\n"
+ ".inst 0xc12e7073 // bfdot za.s[x11, 3], { z3.h-z4.h }, z14.h\n"
".inst 0xa0412a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1287090 // bfdot za.s[x11, 0], { z4.h-z5.h }, z8.h\n"
- ".inst 0xc1207091 // bfdot za.s[x11, 1], { z4.h-z5.h }, z0.h\n"
- ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc12b7090 // bfdot za.s[x11, 0], { z4.h-z5.h }, z11.h\n"
+ ".inst 0xc12a7091 // bfdot za.s[x11, 1], { z4.h-z5.h }, z10.h\n"
+ ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
".inst 0xc12f7074 // bfdot za.s[x11, 4], { z3.h-z4.h }, z15.h\n"
".inst 0xc12e7075 // bfdot za.s[x11, 5], { z3.h-z4.h }, z14.h\n"
- ".inst 0xc1277092 // bfdot za.s[x11, 2], { z4.h-z5.h }, z7.h\n"
- ".inst 0xc1267093 // bfdot za.s[x11, 3], { z4.h-z5.h }, z6.h\n"
- ".inst 0xa1422a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1287094 // bfdot za.s[x11, 4], { z4.h-z5.h }, z8.h\n"
- ".inst 0xc1207095 // bfdot za.s[x11, 5], { z4.h-z5.h }, z0.h\n"
+ ".inst 0xc12b7092 // bfdot za.s[x11, 2], { z4.h-z5.h }, z11.h\n"
+ ".inst 0xc12a7093 // bfdot za.s[x11, 3], { z4.h-z5.h }, z10.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc12e7094 // bfdot za.s[x11, 4], { z4.h-z5.h }, z14.h\n"
+ ".inst 0xc1267095 // bfdot za.s[x11, 5], { z4.h-z5.h }, z6.h\n"
"9:" // Unpadded: 1 priming loads
"add x24, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z18.s }, p1/Z, [x16]\n"
- ".inst 0x658aaa4c // bfcvt z12.h, p2/M, z18.s\n"
+ "ld1w { z7.s }, p1/Z, [x16]\n"
"addvl x23, SP, #6\n"
- "ld1w { z7.s }, p1/Z, [x24]\n"
+ "ld1w { z27.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa8ec // bfcvtnt z12.h, p2/M, z7.s\n"
"addvl x22, SP, #12\n"
- "ld1w { z20.s }, p1/Z, [x24]\n"
+ "ld1w { z26.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa8d // bfcvt z13.h, p2/M, z20.s\n"
"addvl x21, SP, #18\n"
- "ld1w { z0.s }, p1/Z, [x24]\n"
+ "ld1w { z23.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa80d // bfcvtnt z13.h, p2/M, z0.s\n"
+ ".inst 0x658aa8ff // bfcvt z31.h, p2/M, z7.s\n"
"addvl x20, SP, #24\n"
- "ld1w { z10.s }, p1/Z, [x24]\n"
+ "ld1w { z7.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa94e // bfcvt z14.h, p2/M, z10.s\n"
"add x16, x16, %x[ld_in_col], LSL #2\n"
- "ld1w { z0.s }, p1/Z, [x24]\n"
+ ".inst 0x658aab40 // bfcvt z0.h, p2/M, z26.s\n"
+ "ld1w { z24.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa80e // bfcvtnt z14.h, p2/M, z0.s\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1217190 // bfdot za.s[x11, 0], { z12.h-z13.h }, z1.h\n"
- "ld1w { z17.s }, p1/Z, [x24]\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0x648aab7f // bfcvtnt z31.h, p2/M, z27.s\n"
+ ".inst 0x658aa8e1 // bfcvt z1.h, p2/M, z7.s\n"
+ "ld1w { z30.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa2f // bfcvt z15.h, p2/M, z17.s\n"
- ".inst 0xc1207191 // bfdot za.s[x11, 1], { z12.h-z13.h }, z0.h\n"
- ".inst 0xa0402aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc12b7192 // bfdot za.s[x11, 2], { z12.h-z13.h }, z11.h\n"
- ".inst 0xa0412ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc12a7193 // bfdot za.s[x11, 3], { z12.h-z13.h }, z10.h\n"
- "ld1w { z18.s }, p1/Z, [x24]\n"
- ".inst 0x648aaa4f // bfcvtnt z15.h, p2/M, z18.s\n"
- ".inst 0xc12171b0 // bfdot za.s[x11, 0], { z13.h-z14.h }, z1.h\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12071b1 // bfdot za.s[x11, 1], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc12a7194 // bfdot za.s[x11, 4], { z12.h-z13.h }, z10.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc1227195 // bfdot za.s[x11, 5], { z12.h-z13.h }, z2.h\n"
- ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12771b2 // bfdot za.s[x11, 2], { z13.h-z14.h }, z7.h\n"
- ".inst 0xc12671b3 // bfdot za.s[x11, 3], { z13.h-z14.h }, z6.h\n"
- ".inst 0xa0412aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12b71d0 // bfdot za.s[x11, 0], { z14.h-z15.h }, z11.h\n"
- ".inst 0xc12a71d1 // bfdot za.s[x11, 1], { z14.h-z15.h }, z10.h\n"
- ".inst 0xa1422ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc1297196 // bfdot za.s[x11, 6], { z12.h-z13.h }, z9.h\n"
- ".inst 0xc1287197 // bfdot za.s[x11, 7], { z12.h-z13.h }, z8.h\n"
- ".inst 0xc12171b4 // bfdot za.s[x11, 4], { z13.h-z14.h }, z1.h\n"
- ".inst 0xc12071b5 // bfdot za.s[x11, 5], { z13.h-z14.h }, z0.h\n"
- ".inst 0xa1412a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12a71d2 // bfdot za.s[x11, 2], { z14.h-z15.h }, z10.h\n"
- ".inst 0xc12271d3 // bfdot za.s[x11, 3], { z14.h-z15.h }, z2.h\n"
- ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12b71b6 // bfdot za.s[x11, 6], { z13.h-z14.h }, z11.h\n"
- ".inst 0xc12371b7 // bfdot za.s[x11, 7], { z13.h-z14.h }, z3.h\n"
- ".inst 0xc12771d4 // bfdot za.s[x11, 4], { z14.h-z15.h }, z7.h\n"
- ".inst 0xc12671d5 // bfdot za.s[x11, 5], { z14.h-z15.h }, z6.h\n"
- ".inst 0xa0422a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc12771d6 // bfdot za.s[x11, 6], { z14.h-z15.h }, z7.h\n"
- ".inst 0xc12671d7 // bfdot za.s[x11, 7], { z14.h-z15.h }, z6.h\n"
+ ".inst 0xa0412aec // ld1h { z12.h-z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0x648aaae0 // bfcvtnt z0.h, p2/M, z23.s\n"
+ "ld1w { z9.s }, p1/Z, [x24]\n"
+ ".inst 0x658aabc2 // bfcvt z2.h, p2/M, z30.s\n"
+ ".inst 0xa0422ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x648aab01 // bfcvtnt z1.h, p2/M, z24.s\n"
+ ".inst 0xc12e73f0 // bfdot za.s[x11, 0], { z31.h-z0.h }, z14.h\n"
+ ".inst 0xc12673f1 // bfdot za.s[x11, 1], { z31.h-z0.h }, z6.h\n"
+ ".inst 0xa1402ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22]\n"
+ ".inst 0x648aa922 // bfcvtnt z2.h, p2/M, z9.s\n"
+ ".inst 0xc12f73f2 // bfdot za.s[x11, 2], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12773f3 // bfdot za.s[x11, 3], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc12d7010 // bfdot za.s[x11, 0], { z0.h-z1.h }, z13.h\n"
+ ".inst 0xc12c7011 // bfdot za.s[x11, 1], { z0.h-z1.h }, z12.h\n"
+ ".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc12e73f4 // bfdot za.s[x11, 4], { z31.h-z0.h }, z14.h\n"
+ ".inst 0xc12673f5 // bfdot za.s[x11, 5], { z31.h-z0.h }, z6.h\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc12f7012 // bfdot za.s[x11, 2], { z0.h-z1.h }, z15.h\n"
+ ".inst 0xc1277013 // bfdot za.s[x11, 3], { z0.h-z1.h }, z7.h\n"
+ ".inst 0xa0412aac // ld1h { z12.h-z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc1257030 // bfdot za.s[x11, 0], { z1.h-z2.h }, z5.h\n"
+ ".inst 0xc1247031 // bfdot za.s[x11, 1], { z1.h-z2.h }, z4.h\n"
+ ".inst 0xa0422aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc12e73f6 // bfdot za.s[x11, 6], { z31.h-z0.h }, z14.h\n"
+ ".inst 0xc12673f7 // bfdot za.s[x11, 7], { z31.h-z0.h }, z6.h\n"
+ ".inst 0xc12d7014 // bfdot za.s[x11, 4], { z0.h-z1.h }, z13.h\n"
+ ".inst 0xc12c7015 // bfdot za.s[x11, 5], { z0.h-z1.h }, z12.h\n"
+ ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc12b7032 // bfdot za.s[x11, 2], { z1.h-z2.h }, z11.h\n"
+ ".inst 0xc12a7033 // bfdot za.s[x11, 3], { z1.h-z2.h }, z10.h\n"
+ ".inst 0xa0422aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc1277016 // bfdot za.s[x11, 6], { z0.h-z1.h }, z7.h\n"
+ ".inst 0xc1267017 // bfdot za.s[x11, 7], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xc12f7034 // bfdot za.s[x11, 4], { z1.h-z2.h }, z15.h\n"
+ ".inst 0xc12e7035 // bfdot za.s[x11, 5], { z1.h-z2.h }, z14.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc12e7036 // bfdot za.s[x11, 6], { z1.h-z2.h }, z14.h\n"
+ ".inst 0xc1267037 // bfdot za.s[x11, 7], { z1.h-z2.h }, z6.h\n"
"10:" // Unpadded: 0 priming loads
- ".inst 0xa1402be6 // ld1h { z6.h, z14.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- ".inst 0xa1422be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+ ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1422be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 20f\n"
"add x20, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z1.s }, p1/Z, [x16]\n"
- ".inst 0x658aa834 // bfcvt z20.h, p2/M, z1.s\n"
+ "ld1w { z22.s }, p1/Z, [x16]\n"
"sub x25, x25, #0x1\n"
- "ld1w { z10.s }, p1/Z, [x20]\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
"sub x15, x15, #0x1\n"
- ".inst 0x648aa954 // bfcvtnt z20.h, p2/M, z10.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
"cmp x25, x15\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
+ "ld1w { z7.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaade // bfcvt z30.h, p2/M, z22.s\n"
"csel x25, x25, x15, LT\n"
- ".inst 0x648aaa75 // bfcvtnt z21.h, p2/M, z19.s\n"
- "ld1w { z23.s }, p1/Z, [x20]\n"
+ "ld1w { z28.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaaf6 // bfcvt z22.h, p2/M, z23.s\n"
"add x16, x16, %x[ld_in_col], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ "ld1w { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab76 // bfcvtnt z22.h, p2/M, z27.s\n"
+ ".inst 0x658aab3f // bfcvt z31.h, p2/M, z25.s\n"
"sub x15, x15, x25\n"
- "ld1w { z15.s }, p1/Z, [x20]\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9f7 // bfcvt z23.h, p2/M, z15.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0x648aaa17 // bfcvtnt z23.h, p2/M, z16.s\n"
+ ".inst 0x658aab80 // bfcvt z0.h, p2/M, z28.s\n"
+ "ld1w { z12.s }, p1/Z, [x20]\n"
+ ".inst 0x648aaa1e // bfcvtnt z30.h, p2/M, z16.s\n"
+ ".inst 0x658aaa21 // bfcvt z1.h, p2/M, z17.s\n"
+ ".inst 0x648aa8ff // bfcvtnt z31.h, p2/M, z7.s\n"
+ ".inst 0x648aa920 // bfcvtnt z0.h, p2/M, z9.s\n"
+ ".inst 0x648aa981 // bfcvtnt z1.h, p2/M, z12.s\n"
"cbz x25, 19f\n"
"11:" // Unpadded: Main loop
"addvl x24, SP, #6\n"
- ".inst 0xc12e7290 // bfdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
+ ".inst 0xc12b73d0 // bfdot za.s[x11, 0], { z30.h-z31.h }, z11.h\n"
"addvl x23, SP, #12\n"
- "ld1w { z27.s }, p1/Z, [x16]\n"
- ".inst 0xc1267291 // bfdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
+ "ld1w { z25.s }, p1/Z, [x16]\n"
+ ".inst 0xc12373d1 // bfdot za.s[x11, 1], { z30.h-z31.h }, z3.h\n"
+ ".inst 0xa0402b0e // ld1h { z14.h-z15.h }, pn10.b/Z, [x24]\n"
"addvl x22, SP, #18\n"
"addvl x21, SP, #24\n"
- ".inst 0xc1297292 // bfdot za.s[x11, 2], { z20.h-z21.h }, z9.h\n"
"add x20, x16, %x[ld_in_row], LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1217293 // bfdot za.s[x11, 3], { z20.h-z21.h }, z1.h\n"
- ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"subs x25, x25, #0x1\n"
+ "ld1w { z24.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
"add x16, x16, %x[ld_in_col], LSL #2\n"
- ".inst 0xc12d72b0 // bfdot za.s[x11, 0], { z21.h-z22.h }, z13.h\n"
- "ld1w { z25.s }, p1/Z, [x20]\n"
+ ".inst 0xc12f73d2 // bfdot za.s[x11, 2], { z30.h-z31.h }, z15.h\n"
+ "ld1w { z23.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc12e73d3 // bfdot za.s[x11, 3], { z30.h-z31.h }, z14.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc12573f0 // bfdot za.s[x11, 0], { z31.h-z0.h }, z5.h\n"
+ "ld1w { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12572b1 // bfdot za.s[x11, 1], { z21.h-z22.h }, z5.h\n"
+ ".inst 0xc12473f1 // bfdot za.s[x11, 1], { z31.h-z0.h }, z4.h\n"
".inst 0xa1412b07 // ld1h { z7.h, z15.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
- ".inst 0xc12e7294 // bfdot za.s[x11, 4], { z20.h-z21.h }, z14.h\n"
- "ld1w { z24.s }, p1/Z, [x20]\n"
+ ".inst 0xc12e73d4 // bfdot za.s[x11, 4], { z30.h-z31.h }, z14.h\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1267295 // bfdot za.s[x11, 5], { z20.h-z21.h }, z6.h\n"
+ ".inst 0xc12673d5 // bfdot za.s[x11, 5], { z30.h-z31.h }, z6.h\n"
".inst 0xa1402ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc12f72b2 // bfdot za.s[x11, 2], { z21.h-z22.h }, z15.h\n"
- "ld1w { z19.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12772b3 // bfdot za.s[x11, 3], { z21.h-z22.h }, z7.h\n"
- ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc12c72d0 // bfdot za.s[x11, 0], { z22.h-z23.h }, z12.h\n"
- "ld1w { z18.s }, p1/Z, [x20]\n"
+ ".inst 0xc12f73f2 // bfdot za.s[x11, 2], { z31.h-z0.h }, z15.h\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12472d1 // bfdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
- ".inst 0xa1422b07 // ld1h { z7.h, z15.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc12d7296 // bfdot za.s[x11, 6], { z20.h-z21.h }, z13.h\n"
+ ".inst 0xc12773f3 // bfdot za.s[x11, 3], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xa1412ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc12a7010 // bfdot za.s[x11, 0], { z0.h-z1.h }, z10.h\n"
"ld1w { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1257297 // bfdot za.s[x11, 7], { z20.h-z21.h }, z5.h\n"
- ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12e72b4 // bfdot za.s[x11, 4], { z21.h-z22.h }, z14.h\n"
+ ".inst 0xc1227011 // bfdot za.s[x11, 1], { z0.h-z1.h }, z2.h\n"
+ ".inst 0xa1422b06 // ld1h { z6.h, z14.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc12d73d6 // bfdot za.s[x11, 6], { z30.h-z31.h }, z13.h\n"
"ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0xc12672b5 // bfdot za.s[x11, 5], { z21.h-z22.h }, z6.h\n"
- ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc12f72d2 // bfdot za.s[x11, 2], { z22.h-z23.h }, z15.h\n"
- ".inst 0xc12772d3 // bfdot za.s[x11, 3], { z22.h-z23.h }, z7.h\n"
- ".inst 0xa1422ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc12e72b6 // bfdot za.s[x11, 6], { z21.h-z22.h }, z14.h\n"
- ".inst 0xc12672b7 // bfdot za.s[x11, 7], { z21.h-z22.h }, z6.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12f72d4 // bfdot za.s[x11, 4], { z22.h-z23.h }, z15.h\n"
- ".inst 0xc12772d5 // bfdot za.s[x11, 5], { z22.h-z23.h }, z7.h\n"
- ".inst 0xa0422ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc12f72d6 // bfdot za.s[x11, 6], { z22.h-z23.h }, z15.h\n"
- ".inst 0xc12e72d7 // bfdot za.s[x11, 7], { z22.h-z23.h }, z14.h\n"
- ".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12c1290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z12.h\n"
- ".inst 0xc1241291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z4.h\n"
- ".inst 0x658aab74 // bfcvt z20.h, p2/M, z27.s\n"
- ".inst 0xa1402be6 // ld1h { z6.h, z14.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc12d12b0 // bfdot za.s[x8, 0], { z21.h-z22.h }, z13.h\n"
- ".inst 0x648aab54 // bfcvtnt z20.h, p2/M, z26.s\n"
- ".inst 0xc12512b1 // bfdot za.s[x8, 1], { z21.h-z22.h }, z5.h\n"
- ".inst 0x658aab35 // bfcvt z21.h, p2/M, z25.s\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- ".inst 0xc12912d0 // bfdot za.s[x8, 0], { z22.h-z23.h }, z9.h\n"
- ".inst 0x648aab15 // bfcvtnt z21.h, p2/M, z24.s\n"
- ".inst 0xc12112d1 // bfdot za.s[x8, 1], { z22.h-z23.h }, z1.h\n"
- ".inst 0x658aaa76 // bfcvt z22.h, p2/M, z19.s\n"
- ".inst 0x658aaa37 // bfcvt z23.h, p2/M, z17.s\n"
+ ".inst 0xc12573d7 // bfdot za.s[x11, 7], { z30.h-z31.h }, z5.h\n"
+ ".inst 0xa1402aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc12f73f4 // bfdot za.s[x11, 4], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12773f5 // bfdot za.s[x11, 5], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc12e7012 // bfdot za.s[x11, 2], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc1267013 // bfdot za.s[x11, 3], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xa1422ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc12f73f6 // bfdot za.s[x11, 6], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12773f7 // bfdot za.s[x11, 7], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xa1412aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc12e7014 // bfdot za.s[x11, 4], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc1267015 // bfdot za.s[x11, 5], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xa1422ac3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc12b7016 // bfdot za.s[x11, 6], { z0.h-z1.h }, z11.h\n"
+ ".inst 0xc1237017 // bfdot za.s[x11, 7], { z0.h-z1.h }, z3.h\n"
+ ".inst 0xa1422aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc12d13d0 // bfdot za.s[x8, 0], { z30.h-z31.h }, z13.h\n"
+ ".inst 0xc12513d1 // bfdot za.s[x8, 1], { z30.h-z31.h }, z5.h\n"
+ ".inst 0x658aab3e // bfcvt z30.h, p2/M, z25.s\n"
+ ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc12f13f0 // bfdot za.s[x8, 0], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12713f1 // bfdot za.s[x8, 1], { z31.h-z0.h }, z7.h\n"
+ ".inst 0x658aaaff // bfcvt z31.h, p2/M, z23.s\n"
+ ".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0x648aab1e // bfcvtnt z30.h, p2/M, z24.s\n"
+ ".inst 0xc12a1010 // bfdot za.s[x8, 0], { z0.h-z1.h }, z10.h\n"
+ ".inst 0xc1221011 // bfdot za.s[x8, 1], { z0.h-z1.h }, z2.h\n"
+ ".inst 0x658aaaa0 // bfcvt z0.h, p2/M, z21.s\n"
+ ".inst 0x658aaa21 // bfcvt z1.h, p2/M, z17.s\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
- ".inst 0xa1422be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- ".inst 0x648aaa56 // bfcvtnt z22.h, p2/M, z18.s\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1bccba8 // fclamp { z8.s-z11.s }, z29.s, z28.s\n"
- "st1w { z8.s }, p1, [x14]\n"
- "add x14, x14, x5, LSL #2\n"
- "st1w { z10.s }, p1, [x13]\n"
- "add x13, x13, x10, LSL #2\n"
+ ".inst 0xa1422be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+ ".inst 0x648aaadf // bfcvtnt z31.h, p2/M, z22.s\n"
+ ".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+ ".inst 0x648aaa80 // bfcvtnt z0.h, p2/M, z20.s\n"
+ ".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
+ ".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
- "st1w { z9.s }, p1, [x9]\n"
+ ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
+ ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
+ ".inst 0xc1bdc90c // fclamp { z12.s-z15.s }, z8.s, z29.s\n"
+ "st1w { z12.s }, p1, [x14]\n"
+ "add x14, x14, x2, LSL #2\n"
+ "st1w { z14.s }, p1, [x13]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "st1w { z13.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
- ".inst 0x648aaa17 // bfcvtnt z23.h, p2/M, z16.s\n"
- "st1w { z11.s }, p1, [x28]\n"
+ "st1w { z15.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
"bgt 11b\n"
"b 19f\n"
@@ -647,450 +652,450 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x16]\n"
- ".inst 0x658aaa06 // bfcvt z6.h, p2/M, z16.s\n"
"add x21, x16, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x16]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa1422a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
"ld1w { z16.s }, p0/Z, [x21]\n"
- ".inst 0x648aaa06 // bfcvtnt z6.h, p2/M, z16.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- ".inst 0x658aaa07 // bfcvt z7.h, p2/M, z16.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa29 // bfcvt z9.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- ".inst 0x648aaa07 // bfcvtnt z7.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa09 // bfcvtnt z9.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x21]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa08 // bfcvt z8.h, p2/M, z16.s\n"
+ ".inst 0x658aaa2a // bfcvt z10.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x20, SP, #24\n"
- "ld1w { z16.s }, p0/Z, [x21]\n"
- ".inst 0x648aaa08 // bfcvtnt z8.h, p2/M, z16.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12f70d0 // bfdot za.s[x11, 0], { z6.h-z7.h }, z15.h\n"
- "ld1w { z9.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa2b // bfcvt z11.h, p2/M, z17.s\n"
+ ".inst 0xc12d7130 // bfdot za.s[x11, 0], { z9.h-z10.h }, z13.h\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0x658aa929 // bfcvt z9.h, p2/M, z9.s\n"
- ".inst 0xc12e70d1 // bfdot za.s[x11, 1], { z6.h-z7.h }, z14.h\n"
- ".inst 0xa0412a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1257131 // bfdot za.s[x11, 1], { z9.h-z10.h }, z5.h\n"
+ ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ ".inst 0x658aaa2c // bfcvt z12.h, p2/M, z17.s\n"
"ld1w { z16.s }, p0/Z, [x21]\n"
- ".inst 0x648aaa09 // bfcvtnt z9.h, p2/M, z16.s\n"
- ".inst 0xc12f70f0 // bfdot za.s[x11, 0], { z7.h-z8.h }, z15.h\n"
- ".inst 0xc12e70f1 // bfdot za.s[x11, 1], { z7.h-z8.h }, z14.h\n"
- ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1237110 // bfdot za.s[x11, 0], { z8.h-z9.h }, z3.h\n"
- ".inst 0xc1227111 // bfdot za.s[x11, 1], { z8.h-z9.h }, z2.h\n"
+ ".inst 0xc12e7150 // bfdot za.s[x11, 0], { z10.h-z11.h }, z14.h\n"
+ ".inst 0xc1267151 // bfdot za.s[x11, 1], { z10.h-z11.h }, z6.h\n"
+ ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
+ ".inst 0xc12f7170 // bfdot za.s[x11, 0], { z11.h-z12.h }, z15.h\n"
+ ".inst 0xc1277171 // bfdot za.s[x11, 1], { z11.h-z12.h }, z7.h\n"
"14:" // Padded: 3 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
"ld1w { z16.s }, p0/Z, [x16]\n"
- ".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "add x22, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- ".inst 0x648aaa09 // bfcvtnt z9.h, p2/M, z16.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0x658aaa18 // bfcvt z24.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x22]\n"
- ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa38 // bfcvtnt z24.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x21, SP, #18\n"
+ ".inst 0x658aaa19 // bfcvt z25.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x22]\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa39 // bfcvtnt z25.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12f7130 // bfdot za.s[x11, 0], { z9.h-z10.h }, z15.h\n"
- "ld1w { z16.s }, p0/Z, [x22]\n"
- "addvl x20, SP, #24\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ ".inst 0x658aaa1a // bfcvt z26.h, p2/M, z16.s\n"
+ ".inst 0xc12e7310 // bfdot za.s[x11, 0], { z24.h-z25.h }, z14.h\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1277131 // bfdot za.s[x11, 1], { z9.h-z10.h }, z7.h\n"
- ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
- ".inst 0xa1412aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12e7132 // bfdot za.s[x11, 2], { z9.h-z10.h }, z14.h\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1267311 // bfdot za.s[x11, 1], { z24.h-z25.h }, z6.h\n"
+ ".inst 0x648aaa3a // bfcvtnt z26.h, p2/M, z17.s\n"
+ ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
+ ".inst 0x658aaa1b // bfcvt z27.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x22]\n"
- ".inst 0xc1267133 // bfdot za.s[x11, 3], { z9.h-z10.h }, z6.h\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- ".inst 0xc12f7150 // bfdot za.s[x11, 0], { z10.h-z11.h }, z15.h\n"
- ".inst 0xa1422aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc1277151 // bfdot za.s[x11, 1], { z10.h-z11.h }, z7.h\n"
+ ".inst 0xc12c7312 // bfdot za.s[x11, 2], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc1247313 // bfdot za.s[x11, 3], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xc12f7330 // bfdot za.s[x11, 0], { z25.h-z26.h }, z15.h\n"
+ ".inst 0x648aaa1b // bfcvtnt z27.h, p2/M, z16.s\n"
+ ".inst 0xc1277331 // bfdot za.s[x11, 1], { z25.h-z26.h }, z7.h\n"
".inst 0xa0412a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12f7152 // bfdot za.s[x11, 2], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc12e7153 // bfdot za.s[x11, 3], { z10.h-z11.h }, z14.h\n"
- ".inst 0xc12d7170 // bfdot za.s[x11, 0], { z11.h-z12.h }, z13.h\n"
- ".inst 0xc1257171 // bfdot za.s[x11, 1], { z11.h-z12.h }, z5.h\n"
+ ".inst 0xc12f7332 // bfdot za.s[x11, 2], { z25.h-z26.h }, z15.h\n"
+ ".inst 0xc12e7333 // bfdot za.s[x11, 3], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc1217350 // bfdot za.s[x11, 0], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc1207351 // bfdot za.s[x11, 1], { z26.h-z27.h }, z0.h\n"
".inst 0xa0422a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc12f7172 // bfdot za.s[x11, 2], { z11.h-z12.h }, z15.h\n"
- ".inst 0xc12e7173 // bfdot za.s[x11, 3], { z11.h-z12.h }, z14.h\n"
+ ".inst 0xc12f7352 // bfdot za.s[x11, 2], { z26.h-z27.h }, z15.h\n"
+ ".inst 0xc12e7353 // bfdot za.s[x11, 3], { z26.h-z27.h }, z14.h\n"
"15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x23, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x22, SP, #12\n"
+ ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
"ld1w { z16.s }, p0/Z, [x16]\n"
- ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
- "add x23, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x23]\n"
- ".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa1422ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x23]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x23]\n"
- ".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0x658aaa19 // bfcvt z25.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x23]\n"
- ".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa39 // bfcvtnt z25.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x23]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x22, SP, #12\n"
+ ".inst 0x658aaa1a // bfcvt z26.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x23]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa3a // bfcvtnt z26.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x23]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc1297250 // bfdot za.s[x11, 0], { z18.h-z19.h }, z9.h\n"
- "ld1w { z26.s }, p0/Z, [x23]\n"
- "addvl x21, SP, #18\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab55 // bfcvt z21.h, p2/M, z26.s\n"
+ ".inst 0x658aaa1b // bfcvt z27.h, p2/M, z16.s\n"
+ ".inst 0xc12e7330 // bfdot za.s[x11, 0], { z25.h-z26.h }, z14.h\n"
+ "ld1w { z16.s }, p0/Z, [x23]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1217251 // bfdot za.s[x11, 1], { z18.h-z19.h }, z1.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
- ".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc12e7252 // bfdot za.s[x11, 2], { z18.h-z19.h }, z14.h\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "add x23, x23, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1267331 // bfdot za.s[x11, 1], { z25.h-z26.h }, z6.h\n"
+ ".inst 0x648aaa3b // bfcvtnt z27.h, p2/M, z17.s\n"
+ ".inst 0xa0402aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21]\n"
+ ".inst 0x658aaa1c // bfcvt z28.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x23]\n"
- ".inst 0xc1267253 // bfdot za.s[x11, 3], { z18.h-z19.h }, z6.h\n"
- ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
+ ".inst 0xc12b7332 // bfdot za.s[x11, 2], { z25.h-z26.h }, z11.h\n"
+ ".inst 0xc12a7333 // bfdot za.s[x11, 3], { z25.h-z26.h }, z10.h\n"
".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12f7270 // bfdot za.s[x11, 0], { z19.h-z20.h }, z15.h\n"
- ".inst 0xc1277271 // bfdot za.s[x11, 1], { z19.h-z20.h }, z7.h\n"
- ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xa1422ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc12d7254 // bfdot za.s[x11, 4], { z18.h-z19.h }, z13.h\n"
- ".inst 0xc1257255 // bfdot za.s[x11, 5], { z18.h-z19.h }, z5.h\n"
- ".inst 0xc12e7272 // bfdot za.s[x11, 2], { z19.h-z20.h }, z14.h\n"
- ".inst 0xc1267273 // bfdot za.s[x11, 3], { z19.h-z20.h }, z6.h\n"
+ ".inst 0xc12f7350 // bfdot za.s[x11, 0], { z26.h-z27.h }, z15.h\n"
+ ".inst 0x648aaa1c // bfcvtnt z28.h, p2/M, z16.s\n"
+ ".inst 0xc1277351 // bfdot za.s[x11, 1], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc12d7334 // bfdot za.s[x11, 4], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1257335 // bfdot za.s[x11, 5], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xc12f7352 // bfdot za.s[x11, 2], { z26.h-z27.h }, z15.h\n"
+ ".inst 0xc12e7353 // bfdot za.s[x11, 3], { z26.h-z27.h }, z14.h\n"
".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12f7290 // bfdot za.s[x11, 0], { z20.h-z21.h }, z15.h\n"
- ".inst 0xc1277291 // bfdot za.s[x11, 1], { z20.h-z21.h }, z7.h\n"
+ ".inst 0xc12c7370 // bfdot za.s[x11, 0], { z27.h-z28.h }, z12.h\n"
+ ".inst 0xc1247371 // bfdot za.s[x11, 1], { z27.h-z28.h }, z4.h\n"
".inst 0xa0422aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12d7274 // bfdot za.s[x11, 4], { z19.h-z20.h }, z13.h\n"
- ".inst 0xc1257275 // bfdot za.s[x11, 5], { z19.h-z20.h }, z5.h\n"
- ".inst 0xc12f7292 // bfdot za.s[x11, 2], { z20.h-z21.h }, z15.h\n"
- ".inst 0xc12e7293 // bfdot za.s[x11, 3], { z20.h-z21.h }, z14.h\n"
- ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1237294 // bfdot za.s[x11, 4], { z20.h-z21.h }, z3.h\n"
- ".inst 0xc1227295 // bfdot za.s[x11, 5], { z20.h-z21.h }, z2.h\n"
+ ".inst 0xc12d7354 // bfdot za.s[x11, 4], { z26.h-z27.h }, z13.h\n"
+ ".inst 0xc1257355 // bfdot za.s[x11, 5], { z26.h-z27.h }, z5.h\n"
+ ".inst 0xc12f7372 // bfdot za.s[x11, 2], { z27.h-z28.h }, z15.h\n"
+ ".inst 0xc12e7373 // bfdot za.s[x11, 3], { z27.h-z28.h }, z14.h\n"
+ ".inst 0xa0422a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc1257374 // bfdot za.s[x11, 4], { z27.h-z28.h }, z5.h\n"
+ ".inst 0xc1247375 // bfdot za.s[x11, 5], { z27.h-z28.h }, z4.h\n"
"16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x24, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x23, SP, #6\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ "addvl x22, SP, #12\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa1412ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "addvl x20, SP, #24\n"
"ld1w { z16.s }, p0/Z, [x16]\n"
- ".inst 0x658aaa09 // bfcvt z9.h, p2/M, z16.s\n"
- "add x24, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x648aaa09 // bfcvtnt z9.h, p2/M, z16.s\n"
- "add x24, x24, %x[ld_in_row], LSL #2\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa0422ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0x658aaa19 // bfcvt z25.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x648aaa0a // bfcvtnt z10.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa39 // bfcvtnt z25.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "addvl x23, SP, #6\n"
+ ".inst 0x658aaa1a // bfcvt z26.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x648aaa0b // bfcvtnt z11.h, p2/M, z16.s\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa3a // bfcvtnt z26.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa1402ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc12f7130 // bfdot za.s[x11, 0], { z9.h-z10.h }, z15.h\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- "addvl x22, SP, #12\n"
"add x24, x24, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ ".inst 0x658aaa1b // bfcvt z27.h, p2/M, z16.s\n"
+ ".inst 0xc12e7330 // bfdot za.s[x11, 0], { z25.h-z26.h }, z14.h\n"
+ "ld1w { z16.s }, p0/Z, [x24]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1277131 // bfdot za.s[x11, 1], { z9.h-z10.h }, z7.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- ".inst 0xa1412ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc12e7132 // bfdot za.s[x11, 2], { z9.h-z10.h }, z14.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1267331 // bfdot za.s[x11, 1], { z25.h-z26.h }, z6.h\n"
+ ".inst 0x648aaa3b // bfcvtnt z27.h, p2/M, z17.s\n"
+ ".inst 0xa1402ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22]\n"
+ ".inst 0x658aaa1c // bfcvt z28.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0xc1267133 // bfdot za.s[x11, 3], { z9.h-z10.h }, z6.h\n"
- ".inst 0x648aaa0c // bfcvtnt z12.h, p2/M, z16.s\n"
- ".inst 0xa1402aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12f7150 // bfdot za.s[x11, 0], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc1277151 // bfdot za.s[x11, 1], { z10.h-z11.h }, z7.h\n"
+ ".inst 0xc12a7332 // bfdot za.s[x11, 2], { z25.h-z26.h }, z10.h\n"
+ ".inst 0xc1227333 // bfdot za.s[x11, 3], { z25.h-z26.h }, z2.h\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc12f7350 // bfdot za.s[x11, 0], { z26.h-z27.h }, z15.h\n"
+ ".inst 0x648aaa1c // bfcvtnt z28.h, p2/M, z16.s\n"
+ ".inst 0xc1277351 // bfdot za.s[x11, 1], { z26.h-z27.h }, z7.h\n"
".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa1422ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc12d7134 // bfdot za.s[x11, 4], { z9.h-z10.h }, z13.h\n"
- ".inst 0xc1257135 // bfdot za.s[x11, 5], { z9.h-z10.h }, z5.h\n"
+ ".inst 0xc12e7334 // bfdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc1267335 // bfdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12f7152 // bfdot za.s[x11, 2], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc1277153 // bfdot za.s[x11, 3], { z10.h-z11.h }, z7.h\n"
+ ".inst 0xc12f7352 // bfdot za.s[x11, 2], { z26.h-z27.h }, z15.h\n"
+ ".inst 0xc1277353 // bfdot za.s[x11, 3], { z26.h-z27.h }, z7.h\n"
".inst 0xa1412aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12e7170 // bfdot za.s[x11, 0], { z11.h-z12.h }, z14.h\n"
- ".inst 0xc1267171 // bfdot za.s[x11, 1], { z11.h-z12.h }, z6.h\n"
+ ".inst 0xc1217370 // bfdot za.s[x11, 0], { z27.h-z28.h }, z1.h\n"
+ ".inst 0xc1207371 // bfdot za.s[x11, 1], { z27.h-z28.h }, z0.h\n"
".inst 0xa1422ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc12d7136 // bfdot za.s[x11, 6], { z9.h-z10.h }, z13.h\n"
- ".inst 0xc1257137 // bfdot za.s[x11, 7], { z9.h-z10.h }, z5.h\n"
- ".inst 0xc12f7154 // bfdot za.s[x11, 4], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc1277155 // bfdot za.s[x11, 5], { z10.h-z11.h }, z7.h\n"
+ ".inst 0xc12d7336 // bfdot za.s[x11, 6], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1257337 // bfdot za.s[x11, 7], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xc12f7354 // bfdot za.s[x11, 4], { z26.h-z27.h }, z15.h\n"
+ ".inst 0xc1277355 // bfdot za.s[x11, 5], { z26.h-z27.h }, z7.h\n"
".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12e7172 // bfdot za.s[x11, 2], { z11.h-z12.h }, z14.h\n"
- ".inst 0xc1267173 // bfdot za.s[x11, 3], { z11.h-z12.h }, z6.h\n"
+ ".inst 0xc12e7372 // bfdot za.s[x11, 2], { z27.h-z28.h }, z14.h\n"
+ ".inst 0xc1267373 // bfdot za.s[x11, 3], { z27.h-z28.h }, z6.h\n"
".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12f7156 // bfdot za.s[x11, 6], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc1277157 // bfdot za.s[x11, 7], { z10.h-z11.h }, z7.h\n"
- ".inst 0xc1297174 // bfdot za.s[x11, 4], { z11.h-z12.h }, z9.h\n"
- ".inst 0xc1217175 // bfdot za.s[x11, 5], { z11.h-z12.h }, z1.h\n"
+ ".inst 0xc12f7356 // bfdot za.s[x11, 6], { z26.h-z27.h }, z15.h\n"
+ ".inst 0xc1277357 // bfdot za.s[x11, 7], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xc1297374 // bfdot za.s[x11, 4], { z27.h-z28.h }, z9.h\n"
+ ".inst 0xc1217375 // bfdot za.s[x11, 5], { z27.h-z28.h }, z1.h\n"
".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1217176 // bfdot za.s[x11, 6], { z11.h-z12.h }, z1.h\n"
- ".inst 0xc1207177 // bfdot za.s[x11, 7], { z11.h-z12.h }, z0.h\n"
+ ".inst 0xc1217376 // bfdot za.s[x11, 6], { z27.h-z28.h }, z1.h\n"
+ ".inst 0xc1207377 // bfdot za.s[x11, 7], { z27.h-z28.h }, z0.h\n"
"17:" // Padded: 0 priming loads
- ".inst 0xa1402be6 // ld1h { z6.h, z14.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- ".inst 0xa1422be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+ ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1422be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 20f\n"
"mov x12, #0x0\n"
+ "add x20, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x25, x25, #0x1\n"
+ "sub x15, x15, #0x1\n"
+ "cmp x25, x15\n"
"ld1w { z16.s }, p0/Z, [x16]\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
- "add x20, x16, %x[ld_in_row], LSL #2\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x25, x25, x15, LT\n"
+ "add x16, x16, %x[ld_in_col], LSL #2\n"
+ "sub x15, x15, x25\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0x658aaa1e // bfcvt z30.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
- "mov x12, #0x4\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa3e // bfcvtnt z30.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa16 // bfcvt z22.h, p2/M, z16.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0x658aaa1f // bfcvt z31.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa16 // bfcvtnt z22.h, p2/M, z16.s\n"
+ ".inst 0x648aaa3f // bfcvtnt z31.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa17 // bfcvt z23.h, p2/M, z16.s\n"
+ ".inst 0x658aaa00 // bfcvt z0.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa20 // bfcvtnt z0.h, p2/M, z17.s\n"
+ ".inst 0x658aaa01 // bfcvt z1.h, p2/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x20]\n"
- "sub x25, x25, #0x1\n"
- ".inst 0x648aaa17 // bfcvtnt z23.h, p2/M, z16.s\n"
- "sub x15, x15, #0x1\n"
- "cmp x25, x15\n"
- "csel x25, x25, x15, LT\n"
- "add x16, x16, %x[ld_in_col], LSL #2\n"
- "sub x15, x15, x25\n"
+ ".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
"cbz x25, 19f\n"
"18:" // Padded: Main loop
- "addvl x24, SP, #6\n"
- ".inst 0xc12e7290 // bfdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
- "addvl x23, SP, #12\n"
- ".inst 0xc1267291 // bfdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa0402b02 // ld1h { z2.h-z3.h }, pn10.b/Z, [x24]\n"
"mov x12, #0x0\n"
+ "addvl x24, SP, #6\n"
+ ".inst 0xc12b73d0 // bfdot za.s[x11, 0], { z30.h-z31.h }, z11.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1237292 // bfdot za.s[x11, 2], { z20.h-z21.h }, z3.h\n"
- "ld1w { z16.s }, p0/Z, [x16]\n"
+ ".inst 0xc12373d1 // bfdot za.s[x11, 1], { z30.h-z31.h }, z3.h\n"
+ ".inst 0xa1402b03 // ld1h { z3.h, z11.h }, pn10.b/Z, [x24]\n"
+ "addvl x23, SP, #12\n"
"add x22, x16, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1227293 // bfdot za.s[x11, 3], { z20.h-z21.h }, z2.h\n"
- ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- ".inst 0xc12d72b0 // bfdot za.s[x11, 0], { z21.h-z22.h }, z13.h\n"
- "ld1w { z19.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc12572b1 // bfdot za.s[x11, 1], { z21.h-z22.h }, z5.h\n"
- ".inst 0xa1412b07 // ld1h { z7.h, z15.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
"subs x25, x25, #0x1\n"
+ "ld1w { z22.s }, p0/Z, [x16]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc12b73d2 // bfdot za.s[x11, 2], { z30.h-z31.h }, z11.h\n"
"add x16, x16, %x[ld_in_col], LSL #2\n"
- ".inst 0xc12e7294 // bfdot za.s[x11, 4], { z20.h-z21.h }, z14.h\n"
- "ld1w { z17.s }, p0/Z, [x22]\n"
+ ".inst 0xc12373d3 // bfdot za.s[x11, 3], { z30.h-z31.h }, z3.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc12573f0 // bfdot za.s[x11, 0], { z31.h-z0.h }, z5.h\n"
+ ".inst 0xc12473f1 // bfdot za.s[x11, 1], { z31.h-z0.h }, z4.h\n"
+ ".inst 0xa1412b07 // ld1h { z7.h, z15.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ "ld1w { z9.s }, p0/Z, [x22]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc12e73d4 // bfdot za.s[x11, 4], { z30.h-z31.h }, z14.h\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc12673d5 // bfdot za.s[x11, 5], { z30.h-z31.h }, z6.h\n"
+ ".inst 0xa1402aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc12f73f2 // bfdot za.s[x11, 2], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12773f3 // bfdot za.s[x11, 3], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xa1412ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc12a7010 // bfdot za.s[x11, 0], { z0.h-z1.h }, z10.h\n"
+ "ld1w { z25.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- ".inst 0xc1267295 // bfdot za.s[x11, 5], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc12f72b2 // bfdot za.s[x11, 2], { z21.h-z22.h }, z15.h\n"
- "ld1w { z27.s }, p0/Z, [x22]\n"
+ ".inst 0xc1227011 // bfdot za.s[x11, 1], { z0.h-z1.h }, z2.h\n"
+ ".inst 0xa1422b06 // ld1h { z6.h, z14.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc12d73d6 // bfdot za.s[x11, 6], { z30.h-z31.h }, z13.h\n"
+ ".inst 0xc12573d7 // bfdot za.s[x11, 7], { z30.h-z31.h }, z5.h\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc12f73f4 // bfdot za.s[x11, 4], { z31.h-z0.h }, z15.h\n"
+ "ld1w { z24.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc12772b3 // bfdot za.s[x11, 3], { z21.h-z22.h }, z7.h\n"
- ".inst 0xa1412ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc12c72d0 // bfdot za.s[x11, 0], { z22.h-z23.h }, z12.h\n"
- "ld1w { z10.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc12773f5 // bfdot za.s[x11, 5], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xa1412aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc12e7012 // bfdot za.s[x11, 2], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc1267013 // bfdot za.s[x11, 3], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xa1422ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc12f73f6 // bfdot za.s[x11, 6], { z31.h-z0.h }, z15.h\n"
+ "ld1w { z23.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc12472d1 // bfdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
- ".inst 0xa1422b04 // ld1h { z4.h, z12.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc12e7296 // bfdot za.s[x11, 6], { z20.h-z21.h }, z14.h\n"
- "ld1w { z8.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc12773f7 // bfdot za.s[x11, 7], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc12e7014 // bfdot za.s[x11, 4], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc1267015 // bfdot za.s[x11, 5], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xa0422aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "ld1w { z20.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1267297 // bfdot za.s[x11, 7], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12f72b4 // bfdot za.s[x11, 4], { z21.h-z22.h }, z15.h\n"
- "ld1w { z11.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1237016 // bfdot za.s[x11, 6], { z0.h-z1.h }, z3.h\n"
+ ".inst 0xc1227017 // bfdot za.s[x11, 7], { z0.h-z1.h }, z2.h\n"
+ ".inst 0xa1422a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc12772b5 // bfdot za.s[x11, 5], { z21.h-z22.h }, z7.h\n"
- ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12c72d2 // bfdot za.s[x11, 2], { z22.h-z23.h }, z12.h\n"
- "ld1w { z18.s }, p0/Z, [x22]\n"
- ".inst 0xc12472d3 // bfdot za.s[x11, 3], { z22.h-z23.h }, z4.h\n"
- ".inst 0xa1422ae4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc12f72b6 // bfdot za.s[x11, 6], { z21.h-z22.h }, z15.h\n"
- ".inst 0xc12e72b7 // bfdot za.s[x11, 7], { z21.h-z22.h }, z14.h\n"
- ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12c72d4 // bfdot za.s[x11, 4], { z22.h-z23.h }, z12.h\n"
- ".inst 0xc12472d5 // bfdot za.s[x11, 5], { z22.h-z23.h }, z4.h\n"
- ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12172d6 // bfdot za.s[x11, 6], { z22.h-z23.h }, z1.h\n"
- ".inst 0xc12072d7 // bfdot za.s[x11, 7], { z22.h-z23.h }, z0.h\n"
- ".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc12d1290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z13.h\n"
- ".inst 0xc1251291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z5.h\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
- ".inst 0xa1402be6 // ld1h { z6.h, z14.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc12f12b0 // bfdot za.s[x8, 0], { z21.h-z22.h }, z15.h\n"
- ".inst 0x648aaa74 // bfcvtnt z20.h, p2/M, z19.s\n"
- ".inst 0xc12712b1 // bfdot za.s[x8, 1], { z21.h-z22.h }, z7.h\n"
- ".inst 0x658aaa35 // bfcvt z21.h, p2/M, z17.s\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- ".inst 0xc12112d0 // bfdot za.s[x8, 0], { z22.h-z23.h }, z1.h\n"
- ".inst 0x648aab75 // bfcvtnt z21.h, p2/M, z27.s\n"
- ".inst 0xc12012d1 // bfdot za.s[x8, 1], { z22.h-z23.h }, z0.h\n"
- ".inst 0x658aa956 // bfcvt z22.h, p2/M, z10.s\n"
- ".inst 0x658aa977 // bfcvt z23.h, p2/M, z11.s\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc12513d0 // bfdot za.s[x8, 0], { z30.h-z31.h }, z5.h\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
+ ".inst 0xc12413d1 // bfdot za.s[x8, 1], { z30.h-z31.h }, z4.h\n"
+ ".inst 0x658aaade // bfcvt z30.h, p2/M, z22.s\n"
+ ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc12f13f0 // bfdot za.s[x8, 0], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12713f1 // bfdot za.s[x8, 1], { z31.h-z0.h }, z7.h\n"
+ ".inst 0x658aab3f // bfcvt z31.h, p2/M, z25.s\n"
+ ".inst 0xa0412be4 // ld1h { z4.h-z5.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0x648aa93e // bfcvtnt z30.h, p2/M, z9.s\n"
+ ".inst 0xc12a1010 // bfdot za.s[x8, 0], { z0.h-z1.h }, z10.h\n"
+ ".inst 0xc1221011 // bfdot za.s[x8, 1], { z0.h-z1.h }, z2.h\n"
+ ".inst 0x658aaae0 // bfcvt z0.h, p2/M, z23.s\n"
+ ".inst 0x658aaa21 // bfcvt z1.h, p2/M, z17.s\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066800 // mova { z0.d-z1.d }, za.d[x11, #0]\n"
- ".inst 0xa1422be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- ".inst 0x648aa916 // bfcvtnt z22.h, p2/M, z8.s\n"
- ".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
- ".inst 0xc1bccba0 // fclamp { z0.s-z3.s }, z29.s, z28.s\n"
- "st1w { z0.s }, p1, [x14]\n"
- "add x14, x14, x5, LSL #2\n"
- "st1w { z2.s }, p1, [x13]\n"
- "add x13, x13, x10, LSL #2\n"
+ ".inst 0xa1422be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
+ ".inst 0x648aab1f // bfcvtnt z31.h, p2/M, z24.s\n"
+ ".inst 0xc006680c // mova { z12.d-z13.d }, za.d[x11, #0]\n"
+ ".inst 0x648aaa80 // bfcvtnt z0.h, p2/M, z20.s\n"
+ ".inst 0x648aaa01 // bfcvtnt z1.h, p2/M, z16.s\n"
+ ".inst 0xc006682e // mova { z14.d-z15.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
- "st1w { z1.s }, p1, [x9]\n"
+ ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
+ ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
+ ".inst 0xc1bdc90c // fclamp { z12.s-z15.s }, z8.s, z29.s\n"
+ "st1w { z12.s }, p1, [x14]\n"
+ "add x14, x14, x2, LSL #2\n"
+ "st1w { z14.s }, p1, [x13]\n"
+ "add x13, x13, x10, LSL #2\n"
+ "st1w { z13.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
- ".inst 0x648aaa57 // bfcvtnt z23.h, p2/M, z18.s\n"
- "st1w { z3.s }, p1, [x28]\n"
+ "st1w { z15.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
"bgt 18b\n"
"19:" // Main loop tail
"addvl x23, SP, #6\n"
- ".inst 0xc12e7290 // bfdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
+ ".inst 0xc12b73d0 // bfdot za.s[x11, 0], { z30.h-z31.h }, z11.h\n"
"addvl x22, SP, #12\n"
- ".inst 0xc1267291 // bfdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc12373d1 // bfdot za.s[x11, 1], { z30.h-z31.h }, z3.h\n"
+ ".inst 0xa1402ae3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- ".inst 0xc1217292 // bfdot za.s[x11, 2], { z20.h-z21.h }, z1.h\n"
- ".inst 0xc1207293 // bfdot za.s[x11, 3], { z20.h-z21.h }, z0.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc12d72b0 // bfdot za.s[x11, 0], { z21.h-z22.h }, z13.h\n"
- ".inst 0xc12572b1 // bfdot za.s[x11, 1], { z21.h-z22.h }, z5.h\n"
- ".inst 0xa1412ae7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc12e7294 // bfdot za.s[x11, 4], { z20.h-z21.h }, z14.h\n"
- ".inst 0xc1267295 // bfdot za.s[x11, 5], { z20.h-z21.h }, z6.h\n"
+ ".inst 0xc12b73d2 // bfdot za.s[x11, 2], { z30.h-z31.h }, z11.h\n"
+ ".inst 0xc12373d3 // bfdot za.s[x11, 3], { z30.h-z31.h }, z3.h\n"
+ ".inst 0xa0402acc // ld1h { z12.h-z13.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc12573f0 // bfdot za.s[x11, 0], { z31.h-z0.h }, z5.h\n"
+ ".inst 0xc12473f1 // bfdot za.s[x11, 1], { z31.h-z0.h }, z4.h\n"
+ ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc12d73d4 // bfdot za.s[x11, 4], { z30.h-z31.h }, z13.h\n"
+ ".inst 0xc12c73d5 // bfdot za.s[x11, 5], { z30.h-z31.h }, z12.h\n"
".inst 0xa1402aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc12f72b2 // bfdot za.s[x11, 2], { z21.h-z22.h }, z15.h\n"
- ".inst 0xc12772b3 // bfdot za.s[x11, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc12e73f2 // bfdot za.s[x11, 2], { z31.h-z0.h }, z14.h\n"
+ ".inst 0xc12673f3 // bfdot za.s[x11, 3], { z31.h-z0.h }, z6.h\n"
".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc12c72d0 // bfdot za.s[x11, 0], { z22.h-z23.h }, z12.h\n"
- ".inst 0xc12472d1 // bfdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xc12a7010 // bfdot za.s[x11, 0], { z0.h-z1.h }, z10.h\n"
+ ".inst 0xc1227011 // bfdot za.s[x11, 1], { z0.h-z1.h }, z2.h\n"
".inst 0xa1422ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc12d7296 // bfdot za.s[x11, 6], { z20.h-z21.h }, z13.h\n"
- ".inst 0xc1257297 // bfdot za.s[x11, 7], { z20.h-z21.h }, z5.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc12f72b4 // bfdot za.s[x11, 4], { z21.h-z22.h }, z15.h\n"
- ".inst 0xc12772b5 // bfdot za.s[x11, 5], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc12d73d6 // bfdot za.s[x11, 6], { z30.h-z31.h }, z13.h\n"
+ ".inst 0xc12573d7 // bfdot za.s[x11, 7], { z30.h-z31.h }, z5.h\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc12f73f4 // bfdot za.s[x11, 4], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12773f5 // bfdot za.s[x11, 5], { z31.h-z0.h }, z7.h\n"
".inst 0xa1412aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc12e72d2 // bfdot za.s[x11, 2], { z22.h-z23.h }, z14.h\n"
- ".inst 0xc12672d3 // bfdot za.s[x11, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xc12e7012 // bfdot za.s[x11, 2], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc1267013 // bfdot za.s[x11, 3], { z0.h-z1.h }, z6.h\n"
".inst 0xa1422ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc12f72b6 // bfdot za.s[x11, 6], { z21.h-z22.h }, z15.h\n"
- ".inst 0xc12772b7 // bfdot za.s[x11, 7], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc12f73f6 // bfdot za.s[x11, 6], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12773f7 // bfdot za.s[x11, 7], { z31.h-z0.h }, z7.h\n"
".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc12e72d4 // bfdot za.s[x11, 4], { z22.h-z23.h }, z14.h\n"
- ".inst 0xc12672d5 // bfdot za.s[x11, 5], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xc12e7014 // bfdot za.s[x11, 4], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc1267015 // bfdot za.s[x11, 5], { z0.h-z1.h }, z6.h\n"
".inst 0xa1422aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc12b72d6 // bfdot za.s[x11, 6], { z22.h-z23.h }, z11.h\n"
- ".inst 0xc12372d7 // bfdot za.s[x11, 7], { z22.h-z23.h }, z3.h\n"
+ ".inst 0xc12b7016 // bfdot za.s[x11, 6], { z0.h-z1.h }, z11.h\n"
+ ".inst 0xc1237017 // bfdot za.s[x11, 7], { z0.h-z1.h }, z3.h\n"
".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc12d1290 // bfdot za.s[x8, 0], { z20.h-z21.h }, z13.h\n"
- ".inst 0xc1251291 // bfdot za.s[x8, 1], { z20.h-z21.h }, z5.h\n"
- ".inst 0xc12f12b0 // bfdot za.s[x8, 0], { z21.h-z22.h }, z15.h\n"
- ".inst 0xc12712b1 // bfdot za.s[x8, 1], { z21.h-z22.h }, z7.h\n"
- ".inst 0xc12312d0 // bfdot za.s[x8, 0], { z22.h-z23.h }, z3.h\n"
- ".inst 0xc12212d1 // bfdot za.s[x8, 1], { z22.h-z23.h }, z2.h\n"
+ ".inst 0xc12513d0 // bfdot za.s[x8, 0], { z30.h-z31.h }, z5.h\n"
+ ".inst 0xc12413d1 // bfdot za.s[x8, 1], { z30.h-z31.h }, z4.h\n"
+ ".inst 0xc12f13f0 // bfdot za.s[x8, 0], { z31.h-z0.h }, z15.h\n"
+ ".inst 0xc12713f1 // bfdot za.s[x8, 1], { z31.h-z0.h }, z7.h\n"
+ ".inst 0xc1231010 // bfdot za.s[x8, 0], { z0.h-z1.h }, z3.h\n"
+ ".inst 0xc1221011 // bfdot za.s[x8, 1], { z0.h-z1.h }, z2.h\n"
"add x8, x8, #0x2\n"
".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
- ".inst 0xc1bccbb4 // fclamp { z20.s-z23.s }, z29.s, z28.s\n"
+ "add x11, x11, #0x2\n"
+ ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
+ ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
+ ".inst 0xc1bdc914 // fclamp { z20.s-z23.s }, z8.s, z29.s\n"
"st1w { z20.s }, p1, [x14]\n"
- "add x14, x14, x5, LSL #2\n"
+ "add x14, x14, x2, LSL #2\n"
"st1w { z22.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
- "add x11, x11, #0x2\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
"st1w { z21.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
"st1w { z23.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
"20:" // Main loop skip tail
@@ -1100,27 +1105,27 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"add x8, x8, #0x2\n"
"subs x15, x15, #0x1\n"
".inst 0xc0066822 // mova { z2.d-z3.d }, za.d[x11, #1]\n"
- ".inst 0xc1bccba0 // fclamp { z0.s-z3.s }, z29.s, z28.s\n"
+ "add x11, x11, #0x2\n"
+ ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
+ ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
+ ".inst 0xc1bdc900 // fclamp { z0.s-z3.s }, z8.s, z29.s\n"
"st1w { z0.s }, p1, [x14]\n"
- "add x14, x14, x5, LSL #2\n"
+ "add x14, x14, x2, LSL #2\n"
"st1w { z2.s }, p1, [x13]\n"
"add x13, x13, x10, LSL #2\n"
- "add x11, x11, #0x2\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
"st1w { z1.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
"st1w { z3.s }, p1, [x28]\n"
"add x28, x28, x26, LSL #2\n"
"bgt 21b\n"
"22:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
"incb x20, ALL, MUL #16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x17\n"
- "whilelt p1.s, x17, x7\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1139,9 +1144,11 @@ void sme2_fp32bf16fp32_planar_5x5_s1_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x2", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
index 3a56e69d26..936e963915 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,162 +70,167 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
__asm__ __volatile__(
"ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "mov x20, #0xb\n"
+ "mov x22, SP\n"
+ "mov x21, #0xb\n"
".inst 0xd503477f // SMSTART ZA\n"
- "sub x20, x20, x3\n"
"ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x20, x22, #0x8\n"
"ptrue p2.b\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
- "ld1rw { z13.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
"ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "ld1rw { z30.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_min]]\n"
+ "sub x21, x21, x3\n"
+ "mov SP, x20\n"
+ "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z31.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-15\n"
"whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z12.s }, p2/Z, [%x[args], %[offsetof_Args_clamp_max]]\n"
+ "whilelt p9.s, XZR, x21\n"
"whilelt p8.s, XZR, x4\n"
- "addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
"1:" // Channel loop
"ldr x20, [%x[args], %[offsetof_Args_bias]]\n"
- "fmov z16.s, #0x0\n"
+ "fmov z24.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z16.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x20, x6, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x21, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x21\n"
- "ld1w { z31.s }, p2/Z, [x20]\n"
+ "ldr x27, [%x[args], %[offsetof_Args_weights]]\n"
+ "addvl x26, SP, #15\n"
+ "mov x25, #0xb\n"
+ "mov z25.d, z24.d\n"
+ "addvl x26, x26, #-3\n"
+ "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
+ "add x24, x4, x3\n"
+ "mov z26.d, z24.d\n"
+ "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x23, %x[ld_in_row], #0x2\n"
+ "mov z27.d, z24.d\n"
+ "mov x8, #0x0\n"
+ "mov x22, x27\n"
+ "incb x27\n"
+ "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "ld1w { z15.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ "sub x20, x7, #0x1\n"
+ "ld1w { z23.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ "orr x21, x20, %x[ld_in_col], LSL #18\n"
+ "ld1w { z14.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ "orr x21, x5, x21, LSL #20\n"
+ ".inst 0x658aa9fd // bfcvt z29.h, p2/M, z15.s\n"
+ "ld1w { z15.s }, p2/Z, [x22]\n"
+ "incb x22, ALL, MUL #5\n"
+ "lsl x21, x21, #0x2\n"
+ "ld1w { z0.s }, p2/Z, [x22]\n"
+ "mov x20, x27\n"
+ "incb x27\n"
+ ".inst 0x658aa9ce // bfcvt z14.h, p2/M, z14.s\n"
+ "ld1w { z22.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "ld1w { z8.s }, p2/Z, [x20]\n"
+ "sub x25, x25, x24\n"
+ "madd x23, x23, x4, x17\n"
+ ".inst 0x648aaafd // bfcvtnt z29.h, p2/M, z23.s\n"
+ "ld1w { z20.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aabef // bfcvt z15.h, p2/M, z31.s\n"
- "incb x21\n"
- "ld1w { z18.s }, p2/Z, [x20]\n"
- "incb x20, ALL, MUL #5\n"
- ".inst 0x658aaa4e // bfcvt z14.h, p2/M, z18.s\n"
- "addvl x24, SP, #15\n"
+ ".inst 0x658aa81c // bfcvt z28.h, p2/M, z0.s\n"
"ld1w { z17.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x648aa90f // bfcvtnt z15.h, p2/M, z8.s\n"
- "addvl x24, x24, #-3\n"
- "ld1w { z18.s }, p2/Z, [x20]\n"
- "mov x20, x21\n"
- "st1h { z15.h }, p2, [x24]\n"
- ".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
- "ld1w { z29.s }, p2/Z, [x20]\n"
+ ".inst 0x648aa9ee // bfcvtnt z14.h, p2/M, z15.s\n"
+ "ld1w { z2.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aabb5 // bfcvt z21.h, p2/M, z29.s\n"
- "incb x21\n"
- "ld1w { z17.s }, p2/Z, [x20]\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
+ "mov x20, x27\n"
+ "incb x27\n"
+ "st1h { z29.h }, p2, [x26]\n"
+ ".inst 0x658aaad3 // bfcvt z19.h, p2/M, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "st1h { z14.h }, p2, [x24, #1, MUL VL]\n"
- ".inst 0x658aaa58 // bfcvt z24.h, p2/M, z18.s\n"
- "ld1w { z26.s }, p2/Z, [x20]\n"
+ "st1h { z14.h }, p2, [x26, #1, MUL VL]\n"
+ ".inst 0x658aaa21 // bfcvt z1.h, p2/M, z17.s\n"
+ "st1h { z28.h }, p2, [x26, #2, MUL VL]\n"
+ "addvl x26, x26, #-3\n"
+ ".inst 0x658aa965 // bfcvt z5.h, p2/M, z11.s\n"
+ ".inst 0x648aaa93 // bfcvtnt z19.h, p2/M, z20.s\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aab41 // bfcvt z1.h, p2/M, z26.s\n"
- ".inst 0x648aaa35 // bfcvtnt z21.h, p2/M, z17.s\n"
+ "ld1w { z0.s }, p2/Z, [x20]\n"
+ "incb x20, ALL, MUL #5\n"
+ ".inst 0x648aa841 // bfcvtnt z1.h, p2/M, z2.s\n"
"ld1w { z17.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "st1h { z24.h }, p2, [x24, #2, MUL VL]\n"
- "addvl x24, x24, #-3\n"
- "ld1w { z9.s }, p2/Z, [x20]\n"
- "mov x20, x21\n"
- "st1h { z21.h }, p2, [x24]\n"
- ".inst 0x648aaa21 // bfcvtnt z1.h, p2/M, z17.s\n"
- "ld1w { z3.s }, p2/Z, [x20]\n"
+ "ld1w { z29.s }, p2/Z, [x20]\n"
+ "mov x20, x27\n"
+ "incb x27\n"
+ "st1h { z19.h }, p2, [x26]\n"
+ ".inst 0x658aaad3 // bfcvt z19.h, p2/M, z22.s\n"
+ "ld1w { z18.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "incb x21\n"
- ".inst 0x658aa864 // bfcvt z4.h, p2/M, z3.s\n"
- "ld1w { z31.s }, p2/Z, [x20]\n"
+ "st1h { z1.h }, p2, [x26, #1, MUL VL]\n"
+ ".inst 0x658aa814 // bfcvt z20.h, p2/M, z0.s\n"
+ "st1h { z5.h }, p2, [x26, #2, MUL VL]\n"
+ "addvl x26, x26, #-3\n"
+ ".inst 0x658aabaf // bfcvt z15.h, p2/M, z29.s\n"
+ ".inst 0x648aa9b3 // bfcvtnt z19.h, p2/M, z13.s\n"
+ "ld1w { z7.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aa92b // bfcvt z11.h, p2/M, z9.s\n"
- "st1h { z1.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x20]\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aaa46 // bfcvt z6.h, p2/M, z18.s\n"
- "st1h { z11.h }, p2, [x24, #2, MUL VL]\n"
- "ld1w { z5.s }, p2/Z, [x20]\n"
+ ".inst 0x648aaa34 // bfcvtnt z20.h, p2/M, z17.s\n"
+ "ld1w { z29.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "addvl x24, x24, #-3\n"
- ".inst 0x648aabe4 // bfcvtnt z4.h, p2/M, z31.s\n"
- "ld1w { z27.s }, p2/Z, [x20]\n"
- "mov x20, x21\n"
- "st1h { z4.h }, p2, [x24]\n"
- ".inst 0x648aa8a6 // bfcvtnt z6.h, p2/M, z5.s\n"
- "ld1w { z9.s }, p2/Z, [x20]\n"
+ "ld1w { z11.s }, p2/Z, [x20]\n"
+ "mov x20, x27\n"
+ "st1h { z19.h }, p2, [x26]\n"
+ ".inst 0x658aaa5c // bfcvt z28.h, p2/M, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aa938 // bfcvt z24.h, p2/M, z9.s\n"
- "incb x21\n"
- "ld1w { z17.s }, p2/Z, [x20]\n"
+ "st1h { z20.h }, p2, [x26, #1, MUL VL]\n"
+ ".inst 0x658aa9b6 // bfcvt z22.h, p2/M, z13.s\n"
+ "st1h { z15.h }, p2, [x26, #2, MUL VL]\n"
+ "addvl x26, x26, #-3\n"
+ ".inst 0x658aa965 // bfcvt z5.h, p2/M, z11.s\n"
+ ".inst 0x648aa8fc // bfcvtnt z28.h, p2/M, z7.s\n"
+ "ld1w { z19.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x658aab75 // bfcvt z21.h, p2/M, z27.s\n"
- "st1h { z6.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z31.s }, p2/Z, [x20]\n"
+ "ld1w { z13.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- ".inst 0x648aaa38 // bfcvtnt z24.h, p2/M, z17.s\n"
- ".inst 0x658aabf9 // bfcvt z25.h, p2/M, z31.s\n"
- "ld1w { z18.s }, p2/Z, [x20]\n"
+ ".inst 0x648aabb6 // bfcvtnt z22.h, p2/M, z29.s\n"
+ "ld1w { z2.s }, p2/Z, [x20]\n"
"incb x20, ALL, MUL #5\n"
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "st1h { z21.h }, p2, [x24, #2, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x20]\n"
- "mov x21, x21\n"
- "addvl x24, x24, #-3\n"
- "st1h { z24.h }, p2, [x24]\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- ".inst 0x648aaa59 // bfcvtnt z25.h, p2/M, z18.s\n"
- "st1h { z25.h }, p2, [x24, #1, MUL VL]\n"
- "ld1w { z8.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- ".inst 0x658aaa29 // bfcvt z9.h, p2/M, z17.s\n"
- ".inst 0x658aa976 // bfcvt z22.h, p2/M, z11.s\n"
- "ld1w { z28.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- ".inst 0x658aab85 // bfcvt z5.h, p2/M, z28.s\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "ld1w { z25.s }, p2/Z, [x21]\n"
- "incb x21, ALL, MUL #5\n"
- "sub x20, x7, #0x1\n"
- "st1h { z22.h }, p2, [x24, #2, MUL VL]\n"
- "ld1w { z11.s }, p2/Z, [x21]\n"
- "orr x23, x20, %x[ld_in_col], LSL #18\n"
- "addvl x24, x24, #-3\n"
- "mov z17.d, z16.d\n"
- "orr x23, x5, x23, LSL #20\n"
- "mov x22, #0xb\n"
- "mov z18.d, z16.d\n"
- "mov z19.d, z16.d\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- ".inst 0x648aa909 // bfcvtnt z9.h, p2/M, z8.s\n"
- "st1h { z9.h }, p2, [x24]\n"
- ".inst 0x648aab25 // bfcvtnt z5.h, p2/M, z25.s\n"
- "st1h { z5.h }, p2, [x24, #1, MUL VL]\n"
- ".inst 0x658aa97b // bfcvt z27.h, p2/M, z11.s\n"
- "mov x8, #0x0\n"
- "st1h { z27.h }, p2, [x24, #2, MUL VL]\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
- "lsl x23, x23, #0x2\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "ld1w { z0.s }, p2/Z, [x20]\n"
+ "st1h { z28.h }, p2, [x26]\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ "st1h { z22.h }, p2, [x26, #1, MUL VL]\n"
+ ".inst 0x658aa9b2 // bfcvt z18.h, p2/M, z13.s\n"
+ "st1h { z5.h }, p2, [x26, #2, MUL VL]\n"
+ "addvl x26, x26, #-3\n"
+ ".inst 0x658aa81c // bfcvt z28.h, p2/M, z0.s\n"
+ ".inst 0x648aaa70 // bfcvtnt z16.h, p2/M, z19.s\n"
+ ".inst 0x648aa852 // bfcvtnt z18.h, p2/M, z2.s\n"
+ "st1h { z16.h }, p2, [x26]\n"
+ "st1h { z18.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z28.h }, p2, [x26, #2, MUL VL]\n"
"3:" // Issue prefetches
- "subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col], LSL #2\n"
+ "subs x25, x25, #0x1\n"
+ ".inst 0xf8b54afc // rprfm pldstrm, x21, [x23]\n"
+ "add x23, x23, %x[ld_in_col], LSL #2\n"
"bgt 3b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x2\n"
- "msub x17, x4, x20, x17\n"
- ".inst 0xc0040e00 // mova za.d[x8, #0], { z16.d-z19.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040e01 // mova za.d[x8, #1], { z16.d-z19.d }\n"
+ "lsl x21, %x[ld_in_row], #0x2\n"
+ ".inst 0xc0040f00 // mova za.d[x8, #0], { z24.d-z27.d }\n"
"mov x22, #0x4\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x17, x4, x21, x17\n"
+ ".inst 0xc0040f01 // mova za.d[x8, #1], { z24.d-z27.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040f02 // mova za.d[x8, #2], { z24.d-z27.d }\n"
"ldp x15, x14, [x23], #0x10\n"
- ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc0040f03 // mova za.d[x8, #3], { z24.d-z27.d }\n"
"ldp x13, x11, [x20], #0x10\n"
- ".inst 0xc0040e03 // mova za.d[x8, #3], { z16.d-z19.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040e04 // mova za.d[x8, #4], { z16.d-z19.d }\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
"ldp x10, x9, [x23], #0x10\n"
"ldp x28, x27, [x20], #0x10\n"
"cbz x21, 5f\n"
@@ -234,21 +239,21 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 5f\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
"and x22, x21, #0x1\n"
"add x21, x21, #0x1\n"
- ".inst 0xc1acc9a4 // fclamp { z4.s-z7.s }, z13.s, z12.s\n"
"lsr x21, x21, #0x1\n"
"sub x16, x16, x21\n"
+ ".inst 0xc1bfcbc0 // fclamp { z0.s-z3.s }, z30.s, z31.s\n"
"4:" // Left padding
"subs x21, x21, #0x1\n"
- "st1w { z4.s }, p1, [x15]\n"
+ "st1w { z0.s }, p1, [x15]\n"
"add x15, x15, x13, LSL #2\n"
- "st1w { z5.s }, p1, [x14]\n"
+ "st1w { z1.s }, p1, [x14]\n"
"add x14, x14, x11, LSL #2\n"
- "st1w { z6.s }, p1, [x10]\n"
+ "st1w { z2.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- "st1w { z7.s }, p1, [x9]\n"
+ "st1w { z3.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
"bgt 4b\n"
"5:" // Left padding: End
@@ -264,331 +269,331 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
"beq 7f\n"
"6:" // Unpadded: 4 priming loads
"add x21, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z0.s }, p1/Z, [x17]\n"
- ".inst 0x658aa816 // bfcvt z22.h, p2/M, z0.s\n"
+ "ld1w { z2.s }, p1/Z, [x17]\n"
"addvl x20, SP, #12\n"
- "ld1w { z9.s }, p1/Z, [x21]\n"
+ "ld1w { z18.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa936 // bfcvtnt z22.h, p2/M, z9.s\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z28.s }, p1/Z, [x21]\n"
+ "ld1w { z5.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab97 // bfcvt z23.h, p2/M, z28.s\n"
- "ld1w { z20.s }, p1/Z, [x21]\n"
+ "ld1w { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa97 // bfcvtnt z23.h, p2/M, z20.s\n"
- "ld1w { z20.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa84d // bfcvt z13.h, p2/M, z2.s\n"
+ "ld1w { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa98 // bfcvt z24.h, p2/M, z20.s\n"
- "ld1w { z29.s }, p1/Z, [x21]\n"
+ "ld1w { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aabb8 // bfcvtnt z24.h, p2/M, z29.s\n"
- "ld1w { z30.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa8ae // bfcvt z14.h, p2/M, z5.s\n"
+ "ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aabd9 // bfcvt z25.h, p2/M, z30.s\n"
- "ld1w { z9.s }, p1/Z, [x21]\n"
+ "ld1w { z1.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa939 // bfcvtnt z25.h, p2/M, z9.s\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ ".inst 0x658aaa6f // bfcvt z15.h, p2/M, z19.s\n"
+ ".inst 0x648aaa4d // bfcvtnt z13.h, p2/M, z18.s\n"
+ "ld1w { z20.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab5a // bfcvt z26.h, p2/M, z26.s\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc13312d0 // bfdot za.s[x8, 0], { z22.h-z25.h }, z3.h\n"
- "ld1w { z9.s }, p1/Z, [x21]\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ "ld1w { z3.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa93a // bfcvtnt z26.h, p2/M, z9.s\n"
- ".inst 0xc13b12f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z11.h\n"
- "ld1w { z9.s }, p1/Z, [x21]\n"
- ".inst 0x658aa93b // bfcvt z27.h, p2/M, z9.s\n"
- "ld1h { z9.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1391310 // bfdot za.s[x8, 0], { z24.h-z27.h }, z9.h\n"
+ ".inst 0x648aaaaf // bfcvtnt z15.h, p2/M, z21.s\n"
+ ".inst 0x658aaa91 // bfcvt z17.h, p2/M, z20.s\n"
+ "ld1w { z19.s }, p1/Z, [x21]\n"
+ "ld1h { z12.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0x648aa830 // bfcvtnt z16.h, p2/M, z1.s\n"
+ ".inst 0x658aaa72 // bfcvt z18.h, p2/M, z19.s\n"
+ ".inst 0x648aa871 // bfcvtnt z17.h, p2/M, z3.s\n"
+ ".inst 0xc13411b0 // bfdot za.s[x8, 0], { z13.h-z16.h }, z4.h\n"
+ ".inst 0xc13511d0 // bfdot za.s[x8, 0], { z14.h-z17.h }, z5.h\n"
+ ".inst 0xc13c11f0 // bfdot za.s[x8, 0], { z15.h-z18.h }, z12.h\n"
"7:" // Unpadded: 3 priming loads
"add x21, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x17]\n"
- ".inst 0x658aab7d // bfcvt z29.h, p2/M, z27.s\n"
+ "ld1w { z17.s }, p1/Z, [x17]\n"
"addvl x20, SP, #9\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "ld1w { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab5d // bfcvtnt z29.h, p2/M, z26.s\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z9.s }, p1/Z, [x21]\n"
+ "ld1w { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa93e // bfcvt z30.h, p2/M, z9.s\n"
- "ld1w { z20.s }, p1/Z, [x21]\n"
+ "ld1w { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa9e // bfcvtnt z30.h, p2/M, z20.s\n"
- "ld1w { z25.s }, p1/Z, [x21]\n"
+ ".inst 0x658aaa25 // bfcvt z5.h, p2/M, z17.s\n"
+ "ld1w { z18.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab3f // bfcvt z31.h, p2/M, z25.s\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "ld1w { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab5f // bfcvtnt z31.h, p2/M, z26.s\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa966 // bfcvt z6.h, p2/M, z11.s\n"
+ "ld1w { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab60 // bfcvt z0.h, p2/M, z27.s\n"
- "ld1w { z9.s }, p1/Z, [x21]\n"
+ "ld1w { z10.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa920 // bfcvtnt z0.h, p2/M, z9.s\n"
- "ld1w { z23.s }, p1/Z, [x21]\n"
+ ".inst 0x658aaa47 // bfcvt z7.h, p2/M, z18.s\n"
+ ".inst 0x648aa885 // bfcvtnt z5.h, p2/M, z4.s\n"
+ "ld1w { z0.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaae1 // bfcvt z1.h, p2/M, z23.s\n"
- ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc13413b0 // bfdot za.s[x8, 0], { z29.h-z0.h }, z4.h\n"
- "ld1w { z9.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa9c8 // bfcvt z8.h, p2/M, z14.s\n"
+ ".inst 0x648aa9a6 // bfcvtnt z6.h, p2/M, z13.s\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ "ld1w { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa921 // bfcvtnt z1.h, p2/M, z9.s\n"
- ".inst 0xc13513d0 // bfdot za.s[x8, 0], { z30.h-z1.h }, z5.h\n"
- "ld1w { z29.s }, p1/Z, [x21]\n"
- ".inst 0x658aaba2 // bfcvt z2.h, p2/M, z29.s\n"
- "ld1h { z9.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc13913f0 // bfdot za.s[x8, 0], { z31.h-z2.h }, z9.h\n"
+ ".inst 0x648aa987 // bfcvtnt z7.h, p2/M, z12.s\n"
+ ".inst 0x658aa809 // bfcvt z9.h, p2/M, z0.s\n"
+ "ld1w { z0.s }, p1/Z, [x21]\n"
+ "ld1h { z2.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0x648aa948 // bfcvtnt z8.h, p2/M, z10.s\n"
+ ".inst 0x658aa80a // bfcvt z10.h, p2/M, z0.s\n"
+ ".inst 0x648aa889 // bfcvtnt z9.h, p2/M, z4.s\n"
+ ".inst 0xc13310b0 // bfdot za.s[x8, 0], { z5.h-z8.h }, z3.h\n"
+ ".inst 0xc13b10d0 // bfdot za.s[x8, 0], { z6.h-z9.h }, z11.h\n"
+ ".inst 0xc13210f0 // bfdot za.s[x8, 0], { z7.h-z10.h }, z2.h\n"
"8:" // Unpadded: 2 priming loads
"add x22, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x17]\n"
- ".inst 0x658aab7a // bfcvt z26.h, p2/M, z27.s\n"
+ "ld1w { z11.s }, p1/Z, [x17]\n"
"addvl x21, SP, #6\n"
- "ld1w { z21.s }, p1/Z, [x22]\n"
+ "ld1w { z23.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaaba // bfcvtnt z26.h, p2/M, z21.s\n"
"addvl x20, SP, #12\n"
- "ld1w { z25.s }, p1/Z, [x22]\n"
+ "ld1w { z19.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab3b // bfcvt z27.h, p2/M, z25.s\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z4.s }, p1/Z, [x22]\n"
+ "ld1w { z28.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa89b // bfcvtnt z27.h, p2/M, z4.s\n"
- "ld1w { z10.s }, p1/Z, [x22]\n"
+ ".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
+ "ld1w { z18.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa95c // bfcvt z28.h, p2/M, z10.s\n"
- "ld1w { z4.s }, p1/Z, [x22]\n"
+ "ld1w { z6.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa89c // bfcvtnt z28.h, p2/M, z4.s\n"
- "ld1w { z5.s }, p1/Z, [x22]\n"
+ ".inst 0x658aaa6c // bfcvt z12.h, p2/M, z19.s\n"
+ "ld1w { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa8bd // bfcvt z29.h, p2/M, z5.s\n"
- "ld1w { z5.s }, p1/Z, [x22]\n"
+ "ld1w { z29.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa8bd // bfcvtnt z29.h, p2/M, z5.s\n"
- "ld1w { z5.s }, p1/Z, [x22]\n"
+ ".inst 0x658aaa4d // bfcvt z13.h, p2/M, z18.s\n"
+ ".inst 0x648aaaeb // bfcvtnt z11.h, p2/M, z23.s\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa8be // bfcvt z30.h, p2/M, z5.s\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc13e1350 // bfdot za.s[x8, 0], { z26.h-z29.h }, z14.h\n"
- "ld1w { z5.s }, p1/Z, [x22]\n"
- ".inst 0x648aa8be // bfcvtnt z30.h, p2/M, z5.s\n"
+ ".inst 0x658aaa2e // bfcvt z14.h, p2/M, z17.s\n"
+ ".inst 0x648aab8c // bfcvtnt z12.h, p2/M, z28.s\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "ld1w { z20.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13f1370 // bfdot za.s[x8, 0], { z27.h-z30.h }, z15.h\n"
- ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1381351 // bfdot za.s[x8, 1], { z26.h-z29.h }, z8.h\n"
- "ld1w { z23.s }, p1/Z, [x22]\n"
- ".inst 0x658aaaff // bfcvt z31.h, p2/M, z23.s\n"
- ".inst 0xc1391371 // bfdot za.s[x8, 1], { z27.h-z30.h }, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1301390 // bfdot za.s[x8, 0], { z28.h-z31.h }, z0.h\n"
+ ".inst 0x648aa8cd // bfcvtnt z13.h, p2/M, z6.s\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "ld1h { z3.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0x648aabae // bfcvtnt z14.h, p2/M, z29.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0x648aaa8f // bfcvtnt z15.h, p2/M, z20.s\n"
+ ".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ ".inst 0xc13a1190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1301171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc13311b0 // bfdot za.s[x8, 0], { z13.h-z16.h }, z3.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1301391 // bfdot za.s[x8, 1], { z28.h-z31.h }, z0.h\n"
+ ".inst 0xc1311191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc13011b1 // bfdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"9:" // Unpadded: 1 priming loads
"add x22, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x17]\n"
- ".inst 0x658aab77 // bfcvt z23.h, p2/M, z27.s\n"
+ "ld1w { z16.s }, p1/Z, [x17]\n"
"addvl x21, SP, #3\n"
- "ld1w { z24.s }, p1/Z, [x22]\n"
+ "ld1w { z5.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab17 // bfcvtnt z23.h, p2/M, z24.s\n"
"addvl x20, SP, #9\n"
- "ld1w { z31.s }, p1/Z, [x22]\n"
+ "ld1w { z23.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aabf8 // bfcvt z24.h, p2/M, z31.s\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z6.s }, p1/Z, [x22]\n"
+ "ld1w { z0.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa8d8 // bfcvtnt z24.h, p2/M, z6.s\n"
+ ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
"ld1w { z28.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab99 // bfcvt z25.h, p2/M, z28.s\n"
- "ld1w { z26.s }, p1/Z, [x22]\n"
+ "ld1w { z22.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab59 // bfcvtnt z25.h, p2/M, z26.s\n"
- "ld1w { z28.s }, p1/Z, [x22]\n"
+ ".inst 0x658aaaf2 // bfcvt z18.h, p2/M, z23.s\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab9a // bfcvt z26.h, p2/M, z28.s\n"
- "ld1w { z4.s }, p1/Z, [x22]\n"
+ "ld1w { z8.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa89a // bfcvtnt z26.h, p2/M, z4.s\n"
- "ld1w { z20.s }, p1/Z, [x22]\n"
+ ".inst 0x658aab93 // bfcvt z19.h, p2/M, z28.s\n"
+ ".inst 0x648aa8b1 // bfcvtnt z17.h, p2/M, z5.s\n"
+ "ld1w { z21.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aaa9b // bfcvt z27.h, p2/M, z20.s\n"
- ".inst 0xa1402aa0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc13012f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z0.h\n"
- "ld1w { z20.s }, p1/Z, [x22]\n"
- ".inst 0x648aaa9b // bfcvtnt z27.h, p2/M, z20.s\n"
+ ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
+ ".inst 0x648aa812 // bfcvtnt z18.h, p2/M, z0.s\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "ld1w { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc1381310 // bfdot za.s[x8, 0], { z24.h-z27.h }, z8.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc13212f1 // bfdot za.s[x8, 1], { z23.h-z26.h }, z2.h\n"
- "ld1w { z11.s }, p1/Z, [x22]\n"
- ".inst 0x658aa97c // bfcvt z28.h, p2/M, z11.s\n"
- ".inst 0xc1331311 // bfdot za.s[x8, 1], { z24.h-z27.h }, z3.h\n"
- "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1341330 // bfdot za.s[x8, 0], { z25.h-z28.h }, z4.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1301331 // bfdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
+ ".inst 0x648aaad3 // bfcvtnt z19.h, p2/M, z22.s\n"
+ ".inst 0x658aaab5 // bfcvt z21.h, p2/M, z21.s\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0x648aa914 // bfcvtnt z20.h, p2/M, z8.s\n"
+ ".inst 0x658aaa16 // bfcvt z22.h, p2/M, z16.s\n"
+ ".inst 0x648aa9b5 // bfcvtnt z21.h, p2/M, z13.s\n"
+ ".inst 0xc1321230 // bfdot za.s[x8, 0], { z17.h-z20.h }, z2.h\n"
+ ".inst 0xc13a1250 // bfdot za.s[x8, 0], { z18.h-z21.h }, z10.h\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1331231 // bfdot za.s[x8, 1], { z17.h-z20.h }, z3.h\n"
+ ".inst 0xc1301270 // bfdot za.s[x8, 0], { z19.h-z22.h }, z0.h\n"
+ "ld1h { z7.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc13b1251 // bfdot za.s[x8, 1], { z18.h-z21.h }, z11.h\n"
+ ".inst 0xc1371271 // bfdot za.s[x8, 1], { z19.h-z22.h }, z7.h\n"
"10:" // Unpadded: 0 priming loads
"cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z7.h }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z9.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 20f\n"
"add x21, x17, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x17]\n"
- ".inst 0x658aab75 // bfcvt z21.h, p2/M, z27.s\n"
+ "ld1w { z10.s }, p1/Z, [x17]\n"
"sub x7, x7, #0x2\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "ld1w { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
"sub x16, x16, #0x1\n"
- ".inst 0x648aab55 // bfcvtnt z21.h, p2/M, z26.s\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "ld1w { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab56 // bfcvt z22.h, p2/M, z26.s\n"
"lsr x20, x7, #0x1\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "ld1w { z19.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aa941 // bfcvt z1.h, p2/M, z10.s\n"
"cmp x20, x16\n"
- ".inst 0x648aab56 // bfcvtnt z22.h, p2/M, z26.s\n"
- "ld1w { z8.s }, p1/Z, [x21]\n"
+ "ld1w { z28.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa917 // bfcvt z23.h, p2/M, z8.s\n"
"csel x26, x20, x16, LT\n"
- "ld1w { z2.s }, p1/Z, [x21]\n"
+ "ld1w { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa857 // bfcvtnt z23.h, p2/M, z2.s\n"
+ ".inst 0x658aaaa2 // bfcvt z2.h, p2/M, z21.s\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
"ld1w { z6.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa8d8 // bfcvt z24.h, p2/M, z6.s\n"
"and x7, x7, #0x1\n"
- "ld1w { z15.s }, p1/Z, [x21]\n"
+ "ld1w { z23.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa9f8 // bfcvtnt z24.h, p2/M, z15.s\n"
- "sub x16, x16, x26\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
+ ".inst 0x658aab83 // bfcvt z3.h, p2/M, z28.s\n"
+ ".inst 0x648aa9c1 // bfcvtnt z1.h, p2/M, z14.s\n"
+ "ld1w { z5.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab79 // bfcvt z25.h, p2/M, z27.s\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "sub x16, x16, x26\n"
+ "ld1w { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab59 // bfcvtnt z25.h, p2/M, z26.s\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
- ".inst 0x658aab7a // bfcvt z26.h, p2/M, z27.s\n"
+ ".inst 0x658aa8c4 // bfcvt z4.h, p2/M, z6.s\n"
+ ".inst 0x648aaa62 // bfcvtnt z2.h, p2/M, z19.s\n"
+ "ld1w { z29.s }, p1/Z, [x21]\n"
+ ".inst 0x658aa8a5 // bfcvt z5.h, p2/M, z5.s\n"
+ ".inst 0x648aaa03 // bfcvtnt z3.h, p2/M, z16.s\n"
+ ".inst 0x648aaae4 // bfcvtnt z4.h, p2/M, z23.s\n"
+ ".inst 0x658aaba6 // bfcvt z6.h, p2/M, z29.s\n"
+ ".inst 0x648aa985 // bfcvtnt z5.h, p2/M, z12.s\n"
"cbz x26, 19f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc13312b0 // bfdot za.s[x8, 0], { z21.h-z24.h }, z3.h\n"
+ ".inst 0xc1301030 // bfdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
"addvl x25, SP, #6\n"
"addvl x24, SP, #12\n"
- "ld1w { z14.s }, p1/Z, [x17]\n"
- ".inst 0xc13b12d0 // bfdot za.s[x8, 0], { z22.h-z25.h }, z11.h\n"
- ".inst 0xa1402b20 // ld1h { z0.h, z8.h }, pn10.b/Z, [x25]\n"
+ "ld1w { z10.s }, p1/Z, [x17]\n"
"add x23, x17, %x[ld_in_row], LSL #2\n"
"addvl x22, SP, #3\n"
- ".inst 0xc13012b1 // bfdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1w { z27.s }, p1/Z, [x23]\n"
+ "ld1w { z13.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13812d1 // bfdot za.s[x8, 1], { z22.h-z25.h }, z8.h\n"
- ".inst 0xa1402b00 // ld1h { z0.h, z8.h }, pn10.b/Z, [x24]\n"
+ "ld1w { z11.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row], LSL #2\n"
"addvl x21, SP, #9\n"
+ ".inst 0xc1381050 // bfdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ ".inst 0xa1402b27 // ld1h { z7.h, z15.h }, pn10.b/Z, [x25]\n"
"add x20, x17, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13012b2 // bfdot za.s[x8, 2], { z21.h-z24.h }, z0.h\n"
- "ld1w { z2.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9d5 // bfcvt z21.h, p2/M, z14.s\n"
- ".inst 0xc13712f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z7.h\n"
- "ld1h { z11.h }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0x648aab75 // bfcvtnt z21.h, p2/M, z27.s\n"
"subs x26, x26, #0x1\n"
- "ld1w { z14.s }, p1/Z, [x23]\n"
+ "ld1w { z22.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13812d2 // bfdot za.s[x8, 2], { z22.h-z25.h }, z8.h\n"
- ".inst 0x658aa856 // bfcvt z22.h, p2/M, z2.s\n"
- "ld1w { z7.s }, p1/Z, [x23]\n"
+ "ld1w { z21.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13b12f1 // bfdot za.s[x8, 1], { z23.h-z26.h }, z11.h\n"
- ".inst 0x648aa9d6 // bfcvtnt z22.h, p2/M, z14.s\n"
- "ld1w { z31.s }, p1/Z, [x23]\n"
+ ".inst 0xc1371031 // bfdot za.s[x8, 1], { z1.h-z4.h }, z7.h\n"
+ "ld1w { z0.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
- ".inst 0xc1acc9a8 // fclamp { z8.s-z11.s }, z13.s, z12.s\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
- ".inst 0xc13012f2 // bfdot za.s[x8, 2], { z23.h-z26.h }, z0.h\n"
- ".inst 0x658aa8f7 // bfcvt z23.h, p2/M, z7.s\n"
- "add x8, x8, #0x1\n"
- "ld1w { z26.s }, p1/Z, [x23]\n"
+ ".inst 0xc1391070 // bfdot za.s[x8, 0], { z3.h-z6.h }, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1w { z8.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab58 // bfcvt z24.h, p2/M, z26.s\n"
- ".inst 0x648aabf7 // bfcvtnt z23.h, p2/M, z31.s\n"
- "ld1w { z2.s }, p1/Z, [x23]\n"
+ "ld1w { z20.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa858 // bfcvtnt z24.h, p2/M, z2.s\n"
- "st1w { z8.s }, p1, [x15]\n"
- "ld1w { z0.s }, p1/Z, [x23]\n"
+ ".inst 0xc13f1051 // bfdot za.s[x8, 1], { z2.h-z5.h }, z15.h\n"
+ ".inst 0xa0402b0e // ld1h { z14.h-z15.h }, pn10.b/Z, [x24]\n"
+ "ld1w { z29.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa819 // bfcvt z25.h, p2/M, z0.s\n"
- "add x15, x15, x13, LSL #2\n"
- ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc13212b0 // bfdot za.s[x8, 0], { z21.h-z24.h }, z2.h\n"
- "st1w { z9.s }, p1, [x14]\n"
- "add x14, x14, x11, LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x23]\n"
- ".inst 0x648aab59 // bfcvtnt z25.h, p2/M, z26.s\n"
+ ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
+ "ld1w { z9.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13312d0 // bfdot za.s[x8, 0], { z22.h-z25.h }, z3.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc13112b1 // bfdot za.s[x8, 1], { z21.h-z24.h }, z1.h\n"
- "st1w { z10.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "ld1w { z26.s }, p1/Z, [x23]\n"
- ".inst 0x658aab5a // bfcvt z26.h, p2/M, z26.s\n"
- ".inst 0xc13912d1 // bfdot za.s[x8, 1], { z22.h-z25.h }, z9.h\n"
- "ld1w { z31.s }, p1/Z, [x17]\n"
- ".inst 0x658aabf5 // bfcvt z21.h, p2/M, z31.s\n"
- "st1w { z11.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- "ld1w { z30.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0xc0040e04 // mova za.d[x8, #4], { z16.d-z19.d }\n"
- ".inst 0x648aabd5 // bfcvtnt z21.h, p2/M, z30.s\n"
- "ld1w { z0.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa816 // bfcvt z22.h, p2/M, z0.s\n"
+ ".inst 0xc13e1032 // bfdot za.s[x8, 2], { z1.h-z4.h }, z14.h\n"
+ ".inst 0x658aa941 // bfcvt z1.h, p2/M, z10.s\n"
+ "ld1w { z23.s }, p1/Z, [x23]\n"
+ ".inst 0xc13c1071 // bfdot za.s[x8, 1], { z3.h-z6.h }, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z28.s }, p1/Z, [x17]\n"
+ ".inst 0xc1bfcbd0 // fclamp { z16.s-z19.s }, z30.s, z31.s\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z1.s }, p1/Z, [x20]\n"
+ "ld1w { z10.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13f1052 // bfdot za.s[x8, 2], { z2.h-z5.h }, z15.h\n"
+ ".inst 0x658aa962 // bfcvt z2.h, p2/M, z11.s\n"
+ ".inst 0x648aa9a1 // bfcvtnt z1.h, p2/M, z13.s\n"
+ ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
+ "ld1w { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa836 // bfcvtnt z22.h, p2/M, z1.s\n"
+ "st1w { z16.s }, p1, [x15]\n"
+ "add x15, x15, x13, LSL #2\n"
"ld1w { z11.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc13212f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z2.h\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
+ "st1w { z17.s }, p1, [x14]\n"
+ "add x14, x14, x11, LSL #2\n"
+ ".inst 0xc13c1072 // bfdot za.s[x8, 2], { z3.h-z6.h }, z12.h\n"
+ ".inst 0x658aaaa3 // bfcvt z3.h, p2/M, z21.s\n"
+ ".inst 0x658aa904 // bfcvt z4.h, p2/M, z8.s\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0x648aaac2 // bfcvtnt z2.h, p2/M, z22.s\n"
+ ".inst 0x658aaba5 // bfcvt z5.h, p2/M, z29.s\n"
+ "ld1w { z12.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaae6 // bfcvt z6.h, p2/M, z23.s\n"
+ "ld1h { z8.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "st1w { z18.s }, p1, [x10]\n"
+ "add x10, x10, x28, LSL #2\n"
+ "ld1w { z7.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z14.s }, p1/Z, [x20]\n"
+ "st1w { z19.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ ".inst 0x648aa803 // bfcvtnt z3.h, p2/M, z0.s\n"
+ ".inst 0x648aaa84 // bfcvtnt z4.h, p2/M, z20.s\n"
+ "ld1w { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x20]\n"
+ ".inst 0x648aa925 // bfcvtnt z5.h, p2/M, z9.s\n"
+ "ld1w { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc13412f1 // bfdot za.s[x8, 1], { z23.h-z26.h }, z4.h\n"
- ".inst 0x658aa977 // bfcvt z23.h, p2/M, z11.s\n"
- "ld1w { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ "ld1w { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa9d8 // bfcvt z24.h, p2/M, z14.s\n"
- ".inst 0x658aabb9 // bfcvt z25.h, p2/M, z29.s\n"
- "ld1w { z5.s }, p1/Z, [x20]\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab97 // bfcvtnt z23.h, p2/M, z28.s\n"
- ".inst 0x648aab78 // bfcvtnt z24.h, p2/M, z27.s\n"
- "ld1w { z11.s }, p1/Z, [x20]\n"
- ".inst 0x648aa8b9 // bfcvtnt z25.h, p2/M, z5.s\n"
- ".inst 0x658aa97a // bfcvt z26.h, p2/M, z11.s\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z7.h }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0xc13e1030 // bfdot za.s[x8, 0], { z1.h-z4.h }, z14.h\n"
+ "ld1w { z17.s }, p1/Z, [x20]\n"
+ ".inst 0xc13f1050 // bfdot za.s[x8, 0], { z2.h-z5.h }, z15.h\n"
+ ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc13e1031 // bfdot za.s[x8, 1], { z1.h-z4.h }, z14.h\n"
+ ".inst 0x658aab81 // bfcvt z1.h, p2/M, z28.s\n"
+ ".inst 0xc1381070 // bfdot za.s[x8, 0], { z3.h-z6.h }, z8.h\n"
+ "ld1h { z14.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0xc13f1051 // bfdot za.s[x8, 1], { z2.h-z5.h }, z15.h\n"
+ ".inst 0x658aa9a2 // bfcvt z2.h, p2/M, z13.s\n"
+ ".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+ ".inst 0x648aa941 // bfcvtnt z1.h, p2/M, z10.s\n"
+ ".inst 0xc13e1071 // bfdot za.s[x8, 1], { z3.h-z6.h }, z14.h\n"
+ ".inst 0x658aa983 // bfcvt z3.h, p2/M, z12.s\n"
+ ".inst 0x658aaaa4 // bfcvt z4.h, p2/M, z21.s\n"
+ ".inst 0x658aaa85 // bfcvt z5.h, p2/M, z20.s\n"
+ "ld1h { z9.h }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x648aa962 // bfcvtnt z2.h, p2/M, z11.s\n"
+ ".inst 0x658aaa26 // bfcvt z6.h, p2/M, z17.s\n"
+ ".inst 0x648aa8e3 // bfcvtnt z3.h, p2/M, z7.s\n"
+ ".inst 0x648aaa04 // bfcvtnt z4.h, p2/M, z16.s\n"
+ ".inst 0x648aaa45 // bfcvtnt z5.h, p2/M, z18.s\n"
"bgt 11b\n"
"b 19f\n"
"12:" // Padded
@@ -602,620 +607,620 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 4 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z1.s }, p0/Z, [x17]\n"
- ".inst 0x658aa837 // bfcvt z23.h, p2/M, z1.s\n"
"add x21, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #12\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x17]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z29.s }, p0/Z, [x21]\n"
- ".inst 0x648aabb7 // bfcvtnt z23.h, p2/M, z29.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z30.s }, p0/Z, [x21]\n"
- ".inst 0x658aabd8 // bfcvt z24.h, p2/M, z30.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z15.s }, p0/Z, [x21]\n"
- ".inst 0x648aa9f8 // bfcvtnt z24.h, p2/M, z15.s\n"
- "mov x12, #0x4\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa2b // bfcvtnt z11.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab79 // bfcvt z25.h, p2/M, z27.s\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa99 // bfcvtnt z25.h, p2/M, z20.s\n"
+ ".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z10.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa95a // bfcvt z26.h, p2/M, z10.s\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z8.s }, p0/Z, [x21]\n"
- ".inst 0x648aa91a // bfcvtnt z26.h, p2/M, z8.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa2d // bfcvtnt z13.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z28.s }, p0/Z, [x21]\n"
- ".inst 0x658aab9b // bfcvt z27.h, p2/M, z28.s\n"
- "addvl x20, SP, #12\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc13112f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z1.h\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z28.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab9b // bfcvtnt z27.h, p2/M, z28.s\n"
+ ".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z0.s }, p0/Z, [x21]\n"
- ".inst 0x658aa81c // bfcvt z28.h, p2/M, z0.s\n"
- ".inst 0xc1391310 // bfdot za.s[x8, 0], { z24.h-z27.h }, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- ".inst 0xc1301330 // bfdot za.s[x8, 0], { z25.h-z28.h }, z0.h\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0xc13a1190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc13011b0 // bfdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"14:" // Padded: 3 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z21.s }, p0/Z, [x17]\n"
- ".inst 0x658aaab4 // bfcvt z20.h, p2/M, z21.s\n"
"add x21, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #9\n"
+ ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x17]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
- ".inst 0x648aab74 // bfcvtnt z20.h, p2/M, z27.s\n"
- "add x21, x21, %x[ld_in_row], LSL #2\n"
+ "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
- ".inst 0x658aab75 // bfcvt z21.h, p2/M, z27.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
- ".inst 0x648aab75 // bfcvtnt z21.h, p2/M, z27.s\n"
- "mov x12, #0x4\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa2b // bfcvtnt z11.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z29.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aabb6 // bfcvt z22.h, p2/M, z29.s\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab76 // bfcvtnt z22.h, p2/M, z27.s\n"
+ ".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab77 // bfcvt z23.h, p2/M, z27.s\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z8.s }, p0/Z, [x21]\n"
- ".inst 0x648aa917 // bfcvtnt z23.h, p2/M, z8.s\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa2d // bfcvtnt z13.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z28.s }, p0/Z, [x21]\n"
- ".inst 0x658aab98 // bfcvt z24.h, p2/M, z28.s\n"
- "addvl x20, SP, #9\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1311290 // bfdot za.s[x8, 0], { z20.h-z23.h }, z1.h\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z0.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa818 // bfcvtnt z24.h, p2/M, z0.s\n"
+ ".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z1.s }, p0/Z, [x21]\n"
- ".inst 0x658aa839 // bfcvt z25.h, p2/M, z1.s\n"
- ".inst 0xc13912b0 // bfdot za.s[x8, 0], { z21.h-z24.h }, z9.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13012d0 // bfdot za.s[x8, 0], { z22.h-z25.h }, z0.h\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1321170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0xc13a1190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xc13011b0 // bfdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"15:" // Padded: 2 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z6.s }, p0/Z, [x17]\n"
- ".inst 0x658aa8da // bfcvt z26.h, p2/M, z6.s\n"
"add x22, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x21, SP, #6\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #12\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x17]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z29.s }, p0/Z, [x22]\n"
- ".inst 0x648aabba // bfcvtnt z26.h, p2/M, z29.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z28.s }, p0/Z, [x22]\n"
- ".inst 0x658aab9b // bfcvt z27.h, p2/M, z28.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z14.s }, p0/Z, [x22]\n"
- ".inst 0x648aa9db // bfcvtnt z27.h, p2/M, z14.s\n"
- "mov x12, #0x4\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa2b // bfcvtnt z11.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z24.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab1c // bfcvt z28.h, p2/M, z24.s\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z1.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa83c // bfcvtnt z28.h, p2/M, z1.s\n"
+ ".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z3.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa87d // bfcvt z29.h, p2/M, z3.s\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z0.s }, p0/Z, [x22]\n"
- ".inst 0x648aa81d // bfcvtnt z29.h, p2/M, z0.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa2d // bfcvtnt z13.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z24.s }, p0/Z, [x22]\n"
- ".inst 0x658aab1e // bfcvt z30.h, p2/M, z24.s\n"
- "addvl x21, SP, #6\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1311350 // bfdot za.s[x8, 0], { z26.h-z29.h }, z1.h\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z23.s }, p0/Z, [x22]\n"
- ".inst 0x648aaafe // bfcvtnt z30.h, p2/M, z23.s\n"
- "addvl x20, SP, #12\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1391370 // bfdot za.s[x8, 0], { z27.h-z30.h }, z9.h\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1381170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0xc1391190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "ld1w { z31.s }, p0/Z, [x22]\n"
- ".inst 0xc1301351 // bfdot za.s[x8, 1], { z26.h-z29.h }, z0.h\n"
- ".inst 0x658aabff // bfcvt z31.h, p2/M, z31.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1311371 // bfdot za.s[x8, 1], { z27.h-z30.h }, z1.h\n"
- ".inst 0xc1301390 // bfdot za.s[x8, 0], { z28.h-z31.h }, z0.h\n"
+ ".inst 0xc1301171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc13211b0 // bfdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1301391 // bfdot za.s[x8, 1], { z28.h-z31.h }, z0.h\n"
+ ".inst 0xc1311191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc13011b1 // bfdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"16:" // Padded: 1 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z22.s }, p0/Z, [x17]\n"
- ".inst 0x658aaad5 // bfcvt z21.h, p2/M, z22.s\n"
"add x22, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x21, SP, #3\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #9\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x17]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z3.s }, p0/Z, [x22]\n"
- ".inst 0x648aa875 // bfcvtnt z21.h, p2/M, z3.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z20.s }, p0/Z, [x22]\n"
- ".inst 0x658aaa96 // bfcvt z22.h, p2/M, z20.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa0b // bfcvt z11.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z25.s }, p0/Z, [x22]\n"
- ".inst 0x648aab36 // bfcvtnt z22.h, p2/M, z25.s\n"
- "mov x12, #0x4\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa2b // bfcvtnt z11.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z24.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab17 // bfcvt z23.h, p2/M, z24.s\n"
+ ".inst 0x658aaa0c // bfcvt z12.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z0.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa817 // bfcvtnt z23.h, p2/M, z0.s\n"
+ ".inst 0x648aaa2c // bfcvtnt z12.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z7.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa8f8 // bfcvt z24.h, p2/M, z7.s\n"
+ ".inst 0x658aaa0d // bfcvt z13.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z28.s }, p0/Z, [x22]\n"
- ".inst 0x648aab98 // bfcvtnt z24.h, p2/M, z28.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa2d // bfcvtnt z13.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z6.s }, p0/Z, [x22]\n"
- ".inst 0x658aa8d9 // bfcvt z25.h, p2/M, z6.s\n"
- "addvl x21, SP, #3\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc13112b0 // bfdot za.s[x8, 0], { z21.h-z24.h }, z1.h\n"
+ ".inst 0x658aaa0e // bfcvt z14.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z6.s }, p0/Z, [x22]\n"
- ".inst 0x648aa8d9 // bfcvtnt z25.h, p2/M, z6.s\n"
- "addvl x20, SP, #9\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa2e // bfcvtnt z14.h, p2/M, z17.s\n"
+ ".inst 0x658aaa0f // bfcvt z15.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc13912d0 // bfdot za.s[x8, 0], { z22.h-z25.h }, z9.h\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1381170 // bfdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0xc1391190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "ld1w { z3.s }, p0/Z, [x22]\n"
- ".inst 0xc13012b1 // bfdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- ".inst 0x658aa87a // bfcvt z26.h, p2/M, z3.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc13112d1 // bfdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- ".inst 0xc13012f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z0.h\n"
+ ".inst 0xc1301171 // bfdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc13211b0 // bfdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc13012f1 // bfdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
+ ".inst 0xc1311191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc13011b1 // bfdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"17:" // Padded: 0 priming loads
"cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z7.h }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z9.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 20f\n"
"mov x12, #0x0\n"
+ "add x21, x17, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z25.s }, p0/Z, [x17]\n"
- ".inst 0x658aab35 // bfcvt z21.h, p2/M, z25.s\n"
- "add x20, x17, %x[ld_in_row], LSL #2\n"
+ "sub x7, x7, #0x2\n"
+ "sub x16, x16, #0x1\n"
+ "lsr x20, x7, #0x1\n"
+ "cmp x20, x16\n"
+ "and x7, x7, #0x1\n"
+ "ld1w { z16.s }, p0/Z, [x17]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- ".inst 0x648aab75 // bfcvtnt z21.h, p2/M, z27.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "csel x26, x20, x16, LT\n"
+ "add x17, x17, %x[ld_in_col], LSL #2\n"
+ "sub x16, x16, x26\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- ".inst 0x658aab76 // bfcvt z22.h, p2/M, z27.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa01 // bfcvt z1.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- ".inst 0x648aab76 // bfcvtnt z22.h, p2/M, z27.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa21 // bfcvtnt z1.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab77 // bfcvt z23.h, p2/M, z27.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa02 // bfcvt z2.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z25.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab37 // bfcvtnt z23.h, p2/M, z25.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa22 // bfcvtnt z2.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab58 // bfcvt z24.h, p2/M, z26.s\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa03 // bfcvt z3.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
"mov x12, #0x8\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- ".inst 0x648aab78 // bfcvtnt z24.h, p2/M, z27.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa23 // bfcvtnt z3.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- ".inst 0x658aab79 // bfcvt z25.h, p2/M, z27.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa04 // bfcvt z4.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z26.s }, p0/Z, [x20]\n"
- ".inst 0x648aab59 // bfcvtnt z25.h, p2/M, z26.s\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa24 // bfcvtnt z4.h, p2/M, z17.s\n"
+ "ld1w { z18.s }, p0/Z, [x21]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- ".inst 0x658aab7a // bfcvt z26.h, p2/M, z27.s\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
- "csel x24, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x24\n"
- "cbz x24, 19f\n"
+ "add x21, x21, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa05 // bfcvt z5.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x21]\n"
+ ".inst 0x648aaa45 // bfcvtnt z5.h, p2/M, z18.s\n"
+ ".inst 0x658aaa26 // bfcvt z6.h, p2/M, z17.s\n"
+ "cbz x26, 19f\n"
"18:" // Padded: Main loop
- ".inst 0xc13312b0 // bfdot za.s[x8, 0], { z21.h-z24.h }, z3.h\n"
- "addvl x23, SP, #6\n"
- "addvl x21, SP, #12\n"
- ".inst 0xc13b12d0 // bfdot za.s[x8, 0], { z22.h-z25.h }, z11.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
"mov x12, #0x0\n"
+ "add x25, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1301030 // bfdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13012b1 // bfdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1w { z9.s }, p0/Z, [x17]\n"
- "add x20, x17, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc13112d1 // bfdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "addvl x24, SP, #6\n"
+ "addvl x23, SP, #12\n"
"addvl x22, SP, #3\n"
+ "addvl x21, SP, #9\n"
+ "subs x26, x26, #0x1\n"
+ "ld1w { z18.s }, p0/Z, [x17]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1381050 // bfdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
"add x17, x17, %x[ld_in_col], LSL #2\n"
- ".inst 0xc13012b2 // bfdot za.s[x8, 2], { z21.h-z24.h }, z0.h\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xa0402b0a // ld1h { z10.h-z11.h }, pn10.b/Z, [x24]\n"
+ "add x20, x17, %x[ld_in_row], LSL #2\n"
+ "ld1w { z0.s }, p0/Z, [x25]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13a1031 // bfdot za.s[x8, 1], { z1.h-z4.h }, z10.h\n"
+ ".inst 0xc1391070 // bfdot za.s[x8, 0], { z3.h-z6.h }, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x25]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc13712f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z7.h\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
"mov x12, #0x4\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
- ".inst 0xc13112d2 // bfdot za.s[x8, 2], { z22.h-z25.h }, z1.h\n"
- ".inst 0x658aa921 // bfcvt z1.h, p2/M, z9.s\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13b1051 // bfdot za.s[x8, 1], { z2.h-z5.h }, z11.h\n"
+ ".inst 0xa0402aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23]\n"
+ "ld1w { z17.s }, p0/Z, [x25]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13012f1 // bfdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "ld1w { z9.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
+ ".inst 0xc13a1032 // bfdot za.s[x8, 2], { z1.h-z4.h }, z10.h\n"
+ ".inst 0x658aaa52 // bfcvt z18.h, p2/M, z18.s\n"
+ "ld1w { z16.s }, p0/Z, [x25]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0x658aab62 // bfcvt z2.h, p2/M, z27.s\n"
- "ld1w { z27.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1391071 // bfdot za.s[x8, 1], { z3.h-z6.h }, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xc1bfcbcc // fclamp { z12.s-z15.s }, z30.s, z31.s\n"
+ "ld1w { z22.s }, p0/Z, [x25]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0x648aa9c1 // bfcvtnt z1.h, p2/M, z14.s\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc13012f2 // bfdot za.s[x8, 2], { z23.h-z26.h }, z0.h\n"
- ".inst 0x658aa923 // bfcvt z3.h, p2/M, z9.s\n"
- "addvl x21, SP, #9\n"
- "ld1w { z9.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13b1052 // bfdot za.s[x8, 2], { z2.h-z5.h }, z11.h\n"
+ ".inst 0x658aaa73 // bfcvt z19.h, p2/M, z19.s\n"
+ ".inst 0x648aa812 // bfcvtnt z18.h, p2/M, z0.s\n"
+ ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
+ "st1w { z12.s }, p1, [x15]\n"
+ "add x15, x15, x13, LSL #2\n"
+ "ld1w { z11.s }, p0/Z, [x25]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0x658aa924 // bfcvt z4.h, p2/M, z9.s\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ "st1w { z13.s }, p1, [x14]\n"
"mov x12, #0x8\n"
- "ld1w { z24.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa9e2 // bfcvtnt z2.h, p2/M, z15.s\n"
+ ".inst 0xc1391072 // bfdot za.s[x8, 2], { z3.h-z6.h }, z9.h\n"
+ ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0x648aaa33 // bfcvtnt z19.h, p2/M, z17.s\n"
+ "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "add x14, x14, x11, LSL #2\n"
+ "st1w { z14.s }, p1, [x10]\n"
+ "ld1w { z16.s }, p0/Z, [x25]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z9.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aab63 // bfcvtnt z3.h, p2/M, z27.s\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aa975 // bfcvt z21.h, p2/M, z11.s\n"
+ "add x10, x10, x28, LSL #2\n"
+ "st1w { z15.s }, p1, [x9]\n"
+ "add x9, x9, x27, LSL #2\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ ".inst 0x648aaad4 // bfcvtnt z20.h, p2/M, z22.s\n"
+ "ld1w { z17.s }, p0/Z, [x25]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0x648aab04 // bfcvtnt z4.h, p2/M, z24.s\n"
- ".inst 0x658aa925 // bfcvt z5.h, p2/M, z9.s\n"
- ".inst 0xa1402ac0 // ld1h { z0.h, z8.h }, pn10.b/Z, [x22]\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- ".inst 0x648aabc5 // bfcvtnt z5.h, p2/M, z30.s\n"
- ".inst 0xc1301030 // bfdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
+ "ld1w { z9.s }, p0/Z, [x25]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
+ "add x25, x25, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa36 // bfcvt z22.h, p2/M, z17.s\n"
"mov x12, #0x0\n"
+ ".inst 0xc1301250 // bfdot za.s[x8, 0], { z18.h-z21.h }, z0.h\n"
+ "ld1w { z16.s }, p0/Z, [x25]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1381050 // bfdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- "ld1w { z0.s }, p0/Z, [x17]\n"
- "add x20, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aa936 // bfcvtnt z22.h, p2/M, z9.s\n"
+ "ld1w { z9.s }, p0/Z, [x17]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc1361031 // bfdot za.s[x8, 1], { z1.h-z4.h }, z6.h\n"
- "ld1w { z10.s }, p0/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa17 // bfcvt z23.h, p2/M, z16.s\n"
+ "add x17, x17, %x[ld_in_col], LSL #2\n"
+ ".inst 0xc1381270 // bfdot za.s[x8, 0], { z19.h-z22.h }, z8.h\n"
+ ".inst 0xa0402aac // ld1h { z12.h-z13.h }, pn10.b/Z, [x21]\n"
+ "ld1w { z4.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0x658aaba6 // bfcvt z6.h, p2/M, z29.s\n"
- "ld1w { z9.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13c1251 // bfdot za.s[x8, 1], { z18.h-z21.h }, z12.h\n"
+ ".inst 0x658aa921 // bfcvt z1.h, p2/M, z9.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc13e1051 // bfdot za.s[x8, 1], { z2.h-z5.h }, z14.h\n"
- "mov x12, #0x4\n"
- "ld1w { z29.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa815 // bfcvt z21.h, p2/M, z0.s\n"
+ ".inst 0xc1321290 // bfdot za.s[x8, 0], { z20.h-z23.h }, z2.h\n"
+ "mov x12, #0x4\n"
+ "ld1h { z12.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z31.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa936 // bfcvt z22.h, p2/M, z9.s\n"
+ ".inst 0xc13d1271 // bfdot za.s[x8, 1], { z19.h-z22.h }, z13.h\n"
+ ".inst 0x658aaa02 // bfcvt z2.h, p2/M, z16.s\n"
+ ".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+ ".inst 0x648aa881 // bfcvtnt z1.h, p2/M, z4.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc1301070 // bfdot za.s[x8, 0], { z3.h-z6.h }, z0.h\n"
- "subs x24, x24, #0x1\n"
- "ld1w { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13c1291 // bfdot za.s[x8, 1], { z20.h-z23.h }, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0x648aaa22 // bfcvtnt z2.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1acc9b8 // fclamp { z24.s-z27.s }, z13.s, z12.s\n"
- "ld1w { z30.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa03 // bfcvt z3.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "st1w { z24.s }, p1, [x15]\n"
- "mov x12, #0x8\n"
- "ld1w { z14.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
- "st1w { z25.s }, p1, [x14]\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa23 // bfcvtnt z3.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1301071 // bfdot za.s[x8, 1], { z3.h-z6.h }, z0.h\n"
- ".inst 0x658aabf7 // bfcvt z23.h, p2/M, z31.s\n"
- "ld1w { z8.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa04 // bfcvt z4.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0x658aabd8 // bfcvt z24.h, p2/M, z30.s\n"
- "ld1w { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa24 // bfcvtnt z4.h, p2/M, z17.s\n"
+ "ld1w { z18.s }, p0/Z, [x20]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0x658aa919 // bfcvt z25.h, p2/M, z8.s\n"
- "ld1w { z5.s }, p0/Z, [x20]\n"
- "add x15, x15, x13, LSL #2\n"
- "add x14, x14, x11, LSL #2\n"
- "st1w { z26.s }, p1, [x10]\n"
- "add x10, x10, x28, LSL #2\n"
- "st1w { z27.s }, p1, [x9]\n"
- "add x9, x9, x27, LSL #2\n"
- ".inst 0xc0040e04 // mova za.d[x8, #4], { z16.d-z19.d }\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- ".inst 0x648aa955 // bfcvtnt z21.h, p2/M, z10.s\n"
- ".inst 0x648aabb6 // bfcvtnt z22.h, p2/M, z29.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1h { z7.h }, p2/Z, [SP, #2, MUL VL]\n"
- ".inst 0x648aa9f7 // bfcvtnt z23.h, p2/M, z15.s\n"
- ".inst 0x648aa9d8 // bfcvtnt z24.h, p2/M, z14.s\n"
- ".inst 0x648aa899 // bfcvtnt z25.h, p2/M, z4.s\n"
- ".inst 0x658aa8ba // bfcvt z26.h, p2/M, z5.s\n"
+ "add x20, x20, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa05 // bfcvt z5.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
+ ".inst 0x648aaa45 // bfcvtnt z5.h, p2/M, z18.s\n"
+ ".inst 0x658aaa26 // bfcvt z6.h, p2/M, z17.s\n"
"bgt 18b\n"
"19:" // Main loop tail
- ".inst 0xc13312b0 // bfdot za.s[x8, 0], { z21.h-z24.h }, z3.h\n"
- "addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
- ".inst 0xc13b12d0 // bfdot za.s[x8, 0], { z22.h-z25.h }, z11.h\n"
- ".inst 0xa0402b00 // ld1h { z0.h-z1.h }, pn10.b/Z, [x24]\n"
"mov x12, #0x0\n"
+ ".inst 0xc1301030 // bfdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
+ "add x24, x17, %x[ld_in_row], LSL #2\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13012b1 // bfdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1w { z5.s }, p0/Z, [x17]\n"
- "add x22, x17, %x[ld_in_row], LSL #2\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xc13112d1 // bfdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ "addvl x23, SP, #6\n"
+ "addvl x22, SP, #12\n"
"addvl x21, SP, #3\n"
"addvl x20, SP, #9\n"
- ".inst 0xc13012b2 // bfdot za.s[x8, 2], { z21.h-z24.h }, z0.h\n"
- "ld1w { z29.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z18.s }, p0/Z, [x17]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1381050 // bfdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ "add x17, x17, %x[ld_in_col], LSL #2\n"
+ ".inst 0xa0402aec // ld1h { z12.h-z13.h }, pn10.b/Z, [x23]\n"
+ "ld1w { z14.s }, p0/Z, [x24]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z2.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13c1031 // bfdot za.s[x8, 1], { z1.h-z4.h }, z12.h\n"
+ ".inst 0xc1391070 // bfdot za.s[x8, 0], { z3.h-z6.h }, z9.h\n"
+ "ld1h { z7.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc13712f0 // bfdot za.s[x8, 0], { z23.h-z26.h }, z7.h\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
"mov x12, #0x4\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
- ".inst 0xc13112d2 // bfdot za.s[x8, 2], { z22.h-z25.h }, z1.h\n"
- ".inst 0x658aa8bb // bfcvt z27.h, p2/M, z5.s\n"
- "ld1w { z20.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13d1051 // bfdot za.s[x8, 1], { z2.h-z5.h }, z13.h\n"
+ ".inst 0xa0402acc // ld1h { z12.h-z13.h }, pn10.b/Z, [x22]\n"
+ "ld1w { z15.s }, p0/Z, [x24]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc13012f1 // bfdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "ld1w { z1.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
+ ".inst 0xc13c1032 // bfdot za.s[x8, 2], { z1.h-z4.h }, z12.h\n"
+ ".inst 0x658aaa4c // bfcvt z12.h, p2/M, z18.s\n"
+ "ld1w { z19.s }, p0/Z, [x24]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0x658aa85c // bfcvt z28.h, p2/M, z2.s\n"
- "ld1w { z14.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1371071 // bfdot za.s[x8, 1], { z3.h-z6.h }, z7.h\n"
+ "ld1h { z7.h }, p2/Z, [x22, #2, MUL VL]\n"
+ ".inst 0xc1bfcbc8 // fclamp { z8.s-z11.s }, z30.s, z31.s\n"
+ "ld1w { z18.s }, p0/Z, [x24]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0x648aabbb // bfcvtnt z27.h, p2/M, z29.s\n"
- "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
- ".inst 0xc13012f2 // bfdot za.s[x8, 2], { z23.h-z26.h }, z0.h\n"
- ".inst 0x658aa83d // bfcvt z29.h, p2/M, z1.s\n"
- "add x17, x17, %x[ld_in_col], LSL #2\n"
- "ld1w { z1.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc13d1052 // bfdot za.s[x8, 2], { z2.h-z5.h }, z13.h\n"
+ ".inst 0x658aaa2d // bfcvt z13.h, p2/M, z17.s\n"
+ ".inst 0x648aa9cc // bfcvtnt z12.h, p2/M, z14.s\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "st1w { z8.s }, p1, [x15]\n"
+ "add x15, x15, x13, LSL #2\n"
+ "ld1w { z8.s }, p0/Z, [x24]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0x658aa83e // bfcvt z30.h, p2/M, z1.s\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ "st1w { z9.s }, p1, [x14]\n"
"mov x12, #0x8\n"
- "ld1w { z31.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aaa9c // bfcvtnt z28.h, p2/M, z20.s\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z26.s }, p0/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa9dd // bfcvtnt z29.h, p2/M, z14.s\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0x648aabfe // bfcvtnt z30.h, p2/M, z31.s\n"
- ".inst 0x658aab5f // bfcvt z31.h, p2/M, z26.s\n"
- ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
- "ld1w { z9.s }, p0/Z, [x22]\n"
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ ".inst 0xc1371072 // bfdot za.s[x8, 2], { z3.h-z6.h }, z7.h\n"
+ ".inst 0x658aaa6e // bfcvt z14.h, p2/M, z19.s\n"
"add x8, x8, #0x1\n"
- ".inst 0x648aa93f // bfcvtnt z31.h, p2/M, z9.s\n"
- ".inst 0xc1321370 // bfdot za.s[x8, 0], { z27.h-z30.h }, z2.h\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z26.s }, p0/Z, [x22]\n"
- ".inst 0xc13a1390 // bfdot za.s[x8, 0], { z28.h-z31.h }, z10.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- ".inst 0x658aab40 // bfcvt z0.h, p2/M, z26.s\n"
- ".inst 0xc1321371 // bfdot za.s[x8, 1], { z27.h-z30.h }, z2.h\n"
- "ld1h { z9.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1acc9a4 // fclamp { z4.s-z7.s }, z13.s, z12.s\n"
- ".inst 0xc13a1391 // bfdot za.s[x8, 1], { z28.h-z31.h }, z10.h\n"
- "st1w { z4.s }, p1, [x15]\n"
- "add x15, x15, x13, LSL #2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc13913b0 // bfdot za.s[x8, 0], { z29.h-z0.h }, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1w { z5.s }, p1, [x14]\n"
+ ".inst 0x648aa9ed // bfcvtnt z13.h, p2/M, z15.s\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
"add x14, x14, x11, LSL #2\n"
- "st1w { z6.s }, p1, [x10]\n"
+ "st1w { z10.s }, p1, [x10]\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aa90f // bfcvt z15.h, p2/M, z8.s\n"
"add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040e04 // mova za.d[x8, #4], { z16.d-z19.d }\n"
- "st1w { z7.s }, p1, [x9]\n"
+ "st1w { z11.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
- ".inst 0xc13913b1 // bfdot za.s[x8, 1], { z29.h-z0.h }, z9.h\n"
- "ld1h { z7.h }, p2/Z, [SP, #2, MUL VL]\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ ".inst 0x648aaa4e // bfcvtnt z14.h, p2/M, z18.s\n"
+ "ld1w { z16.s }, p0/Z, [x24]\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0x648aaa2f // bfcvtnt z15.h, p2/M, z17.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "add x24, x24, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1301190 // bfdot za.s[x8, 0], { z12.h-z15.h }, z0.h\n"
+ "ld1w { z9.s }, p0/Z, [x24]\n"
+ ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
+ ".inst 0x658aa931 // bfcvt z17.h, p2/M, z9.s\n"
+ ".inst 0xc13111b0 // bfdot za.s[x8, 0], { z13.h-z16.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1301191 // bfdot za.s[x8, 1], { z12.h-z15.h }, z0.h\n"
+ ".inst 0xc13211d0 // bfdot za.s[x8, 0], { z14.h-z17.h }, z2.h\n"
+ "ld1h { z5.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc13111b1 // bfdot za.s[x8, 1], { z13.h-z16.h }, z1.h\n"
+ ".inst 0xa1402be0 // ld1h { z0.h, z8.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc13511d1 // bfdot za.s[x8, 1], { z14.h-z17.h }, z5.h\n"
+ "ld1h { z9.h }, p2/Z, [SP, #2, MUL VL]\n"
"20:" // Main loop skip tail
"cbz x7, 21f\n" // Skip remainder inputs
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z25.s }, p0/Z, [x17]\n"
- ".inst 0x658aab3d // bfcvt z29.h, p2/M, z25.s\n"
"add x22, x17, %x[ld_in_row], LSL #2\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x21, SP, #6\n"
+ "addvl x20, SP, #12\n"
+ "sub x16, x16, #0x1\n"
+ "ld1w { z16.s }, p0/Z, [x17]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z26.s }, p0/Z, [x22]\n"
- ".inst 0x648aab5d // bfcvtnt z29.h, p2/M, z26.s\n"
- "add x22, x22, %x[ld_in_row], LSL #2\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z25.s }, p0/Z, [x22]\n"
- ".inst 0x658aab3e // bfcvt z30.h, p2/M, z25.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1w { z24.s }, p0/Z, [x22]\n"
- ".inst 0x648aab1e // bfcvtnt z30.h, p2/M, z24.s\n"
- "mov x12, #0x4\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x4\n"
+ ".inst 0x648aaa32 // bfcvtnt z18.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z26.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aab5f // bfcvt z31.h, p2/M, z26.s\n"
+ ".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
+ "ld1w { z29.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z9.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x648aa93f // bfcvtnt z31.h, p2/M, z9.s\n"
+ ".inst 0x648aaa33 // bfcvtnt z19.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1w { z9.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0x658aa920 // bfcvt z0.h, p2/M, z9.s\n"
+ ".inst 0x658aabb4 // bfcvt z20.h, p2/M, z29.s\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "mov x12, #0x8\n"
- "ld1w { z24.s }, p0/Z, [x22]\n"
- ".inst 0x648aab00 // bfcvtnt z0.h, p2/M, z24.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ "mov x12, #0x8\n"
+ ".inst 0x648aaa34 // bfcvtnt z20.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1w { z9.s }, p0/Z, [x22]\n"
- ".inst 0x658aa921 // bfcvt z1.h, p2/M, z9.s\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
+ "ld1w { z13.s }, p0/Z, [x22]\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1w { z25.s }, p0/Z, [x22]\n"
- ".inst 0x648aab21 // bfcvtnt z1.h, p2/M, z25.s\n"
- ".inst 0xc13313b0 // bfdot za.s[x8, 0], { z29.h-z0.h }, z3.h\n"
- "addvl x21, SP, #6\n"
"add x22, x22, %x[ld_in_row], LSL #2\n"
- ".inst 0xc13b13d0 // bfdot za.s[x8, 0], { z30.h-z1.h }, z11.h\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
+ ".inst 0x648aaa35 // bfcvtnt z21.h, p2/M, z17.s\n"
+ ".inst 0x658aa9b6 // bfcvt z22.h, p2/M, z13.s\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "addvl x20, SP, #12\n"
- ".inst 0xc13e13b1 // bfdot za.s[x8, 1], { z29.h-z0.h }, z14.h\n"
- "ld1w { z25.s }, p0/Z, [x22]\n"
- ".inst 0x658aab22 // bfcvt z2.h, p2/M, z25.s\n"
- "sub x16, x16, #0x1\n"
- ".inst 0xc13f13d1 // bfdot za.s[x8, 1], { z30.h-z1.h }, z15.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc13e13b2 // bfdot za.s[x8, 2], { z29.h-z0.h }, z14.h\n"
- ".inst 0xc13713f0 // bfdot za.s[x8, 0], { z31.h-z2.h }, z7.h\n"
- "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc13f13d2 // bfdot za.s[x8, 2], { z30.h-z1.h }, z15.h\n"
- ".inst 0xc13413f1 // bfdot za.s[x8, 1], { z31.h-z2.h }, z4.h\n"
- "ld1h { z9.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x22, x22, %x[ld_in_row], LSL #2\n"
+ ".inst 0xc1301250 // bfdot za.s[x8, 0], { z18.h-z21.h }, z0.h\n"
+ "ld1w { z14.s }, p0/Z, [x22]\n"
+ ".inst 0x648aaa36 // bfcvtnt z22.h, p2/M, z17.s\n"
+ ".inst 0x658aa9d7 // bfcvt z23.h, p2/M, z14.s\n"
+ ".inst 0xc1381270 // bfdot za.s[x8, 0], { z19.h-z22.h }, z8.h\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1301251 // bfdot za.s[x8, 1], { z18.h-z21.h }, z0.h\n"
+ ".inst 0xc1391290 // bfdot za.s[x8, 0], { z20.h-z23.h }, z9.h\n"
+ "ld1h { z14.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0xc1311271 // bfdot za.s[x8, 1], { z19.h-z22.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
- ".inst 0xc1acc9a4 // fclamp { z4.s-z7.s }, z13.s, z12.s\n"
+ ".inst 0xc1301252 // bfdot za.s[x8, 2], { z18.h-z21.h }, z0.h\n"
+ ".inst 0xc13e1291 // bfdot za.s[x8, 1], { z20.h-z23.h }, z14.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1bfcbc4 // fclamp { z4.s-z7.s }, z30.s, z31.s\n"
+ ".inst 0xc1311272 // bfdot za.s[x8, 2], { z19.h-z22.h }, z1.h\n"
"st1w { z4.s }, p1, [x15]\n"
"add x15, x15, x13, LSL #2\n"
- ".inst 0xc13913f2 // bfdot za.s[x8, 2], { z31.h-z2.h }, z9.h\n"
- "add x8, x8, #0x1\n"
"st1w { z5.s }, p1, [x14]\n"
"add x14, x14, x11, LSL #2\n"
+ ".inst 0xc1301292 // bfdot za.s[x8, 2], { z20.h-z23.h }, z0.h\n"
+ "add x8, x8, #0x1\n"
"st1w { z6.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- ".inst 0xc0040e04 // mova za.d[x8, #4], { z16.d-z19.d }\n"
"st1w { z7.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
"21:" // Tail input: End
"cbz x16, 23f\n"
"22:" // Right padding loop
- ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
"subs x16, x16, #0x1\n"
- ".inst 0xc1acc9a4 // fclamp { z4.s-z7.s }, z13.s, z12.s\n"
- "st1w { z4.s }, p1, [x15]\n"
+ ".inst 0xc0040f04 // mova za.d[x8, #4], { z24.d-z27.d }\n"
+ ".inst 0xc1bfcbd0 // fclamp { z16.s-z19.s }, z30.s, z31.s\n"
+ "st1w { z16.s }, p1, [x15]\n"
"add x15, x15, x13, LSL #2\n"
- ".inst 0xc0040e04 // mova za.d[x8, #4], { z16.d-z19.d }\n"
- "st1w { z5.s }, p1, [x14]\n"
+ "st1w { z17.s }, p1, [x14]\n"
"add x14, x14, x11, LSL #2\n"
- "st1w { z6.s }, p1, [x10]\n"
+ "st1w { z18.s }, p1, [x10]\n"
"add x10, x10, x28, LSL #2\n"
- "st1w { z7.s }, p1, [x9]\n"
+ "st1w { z19.s }, p1, [x9]\n"
"add x9, x9, x27, LSL #2\n"
"bgt 22b\n"
"23:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x6\n"
+ "whilelt p1.s, x6, x5\n"
"incb x20, ALL, MUL #16\n"
"incb x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21, LSL #2\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1234,6 +1239,8 @@ void sme2_fp32bf16fp32_planar_5x5_s2_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_bias] "I" (offsetof(Args, bias)), [offsetof_Args_clamp_max] "I" (offsetof(Args, clamp_max)), [offsetof_Args_clamp_min] "I" (offsetof(Args, clamp_min)), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
index 845f376926..f5a4583d74 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,133 +70,138 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0x6\n"
"ptrue p2.b\n"
- "mov x20, #0x6\n"
"ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x22, #0x8\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z20.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x21, x6\n"
+ "mov SP, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-12\n"
+ "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z20.h, p2/M, z20.h\n"
+ "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x7\n"
- "addvl SP, SP, #-12\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z21.h, p2/M, z21.h\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z28.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z30.s, #0x0\n"
+ "mov z28.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z30.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x20, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z10.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z23.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z25.h, #0x0\n"
+ "addvl x22, SP, #12\n"
+ "addvl x22, x22, #-4\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z29.d, z28.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1sb { z0.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1rh { z31.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z7.h, #0x0\n"
- "sub z10.h, z10.h, z31.h\n"
- "incw x22\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z26.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z16.h, z16.h, z31.h\n"
- "trn1 z20.h, z7.h, z10.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
- "sub z11.h, z11.h, z31.h\n"
- "mov x20, x22\n"
- "trn1 z19.h, z10.h, z16.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z0.h, z0.h, z23.h\n"
+ "sub z26.h, z26.h, z23.h\n"
+ "sub z15.h, z15.h, z23.h\n"
+ "trn1 z14.h, z25.h, z0.h\n"
+ "trn1 z2.h, z0.h, z26.h\n"
+ "ld1sb { z21.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "trn1 z26.h, z16.h, z11.h\n"
- "trn1 z13.h, z11.h, z7.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "trn1 z16.h, z26.h, z15.h\n"
+ "ld1sb { z1.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z24.h, z24.h, z31.h\n"
- "sub z11.h, z11.h, z31.h\n"
- "ld1sb { z2.s }, p2/Z, [x20]\n"
- "sub z2.h, z2.h, z31.h\n"
- "addvl x21, SP, #12\n"
- "incw x22\n"
- "addvl x21, x21, #-4\n"
- "mov x20, x22\n"
- "st1h { z20.h }, p2, [x21]\n"
- "trn1 z22.h, z7.h, z24.h\n"
- "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z1.h, z24.h, z11.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "trn1 z15.h, z15.h, z25.h\n"
+ "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "sub z21.h, z21.h, z23.h\n"
+ "st1h { z14.h }, p2, [x22]\n"
+ "sub z1.h, z1.h, z23.h\n"
+ "st1h { z2.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z11.h, z11.h, z23.h\n"
+ "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z15.h }, p2, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #-4\n"
+ "trn1 z3.h, z25.h, z21.h\n"
+ "trn1 z14.h, z21.h, z1.h\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z3.h, z11.h, z2.h\n"
- "ld1sb { z0.s }, p2/Z, [x20]\n"
+ "trn1 z10.h, z1.h, z11.h\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z13.h }, p2, [x21, #3, MUL VL]\n"
- "trn1 z25.h, z2.h, z7.h\n"
- "ld1sb { z4.s }, p2/Z, [x20]\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "sub z16.h, z16.h, z31.h\n"
- "sub z0.h, z0.h, z31.h\n"
- "addvl x21, x21, #-4\n"
- "st1h { z22.h }, p2, [x21]\n"
- "sub z4.h, z4.h, z31.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
- "mov z31.d, z30.d\n"
- "st1h { z3.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z24.h, z7.h, z16.h\n"
- "trn1 z18.h, z16.h, z0.h\n"
- "st1h { z25.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #-4\n"
- "trn1 z0.h, z0.h, z4.h\n"
- "trn1 z1.h, z4.h, z7.h\n"
- "st1h { z24.h }, p2, [x21]\n"
- "st1h { z18.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z1.h }, p2, [x21, #3, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z14.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "trn1 z26.h, z11.h, z25.h\n"
+ "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "sub z15.h, z15.h, z23.h\n"
+ "st1h { z3.h }, p2, [x22]\n"
+ "sub z9.h, z9.h, z23.h\n"
+ "st1h { z14.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z16.h, z16.h, z23.h\n"
+ "st1h { z10.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #-4\n"
+ "trn1 z22.h, z25.h, z15.h\n"
+ "trn1 z6.h, z15.h, z9.h\n"
+ "trn1 z12.h, z9.h, z16.h\n"
+ "trn1 z11.h, z16.h, z25.h\n"
+ "st1h { z22.h }, p2, [x22]\n"
+ "st1h { z6.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z12.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z11.h }, p2, [x22, #3, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z5.s }, p1/Z, [x21, x16, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z12.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x20, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
"mov x22, #0x6\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x7, x6\n"
+ "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x15, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x7, x14\n"
+ "orr x20, x17, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
"mov x22, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x14, x7, x21, x14\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040b82 // mova za.d[x8, #2], { z28.d-z29.d }\n"
"ldp x11, x10, [x23], #0x10\n"
- ".inst 0xc0040bc2 // mova za.d[x8, #2], { z30.d-z31.d }\n"
+ ".inst 0xc0040b83 // mova za.d[x8, #3], { z28.d-z29.d }\n"
"ldp x9, x28, [x20], #0x10\n"
- ".inst 0xc0040bc3 // mova za.d[x8, #3], { z30.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
"ldp x27, x26, [x23], #0x10\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
"ldp x25, x24, [x20], #0x10\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
@@ -204,22 +209,22 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
"sub x13, x13, x21\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -231,148 +236,148 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1sb { z20.s }, p1/Z, [x14]\n"
+ "ld1sb { z27.s }, p1/Z, [x14]\n"
"addvl x20, SP, #8\n"
"ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z20.h, z16.h\n"
- "add z4.h, z4.h, z21.h\n"
- "ld1sb { z23.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z22.s }, p1/Z, [x21]\n"
+ "ld1sb { z3.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z5.h, z23.h, z22.h\n"
- "add z5.h, z5.h, z21.h\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
+ "ld1sb { z1.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
- "trn1 z6.h, z17.h, z16.h\n"
- "add z6.h, z6.h, z21.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16b1488 // sdot za.s[x8, 0], { z4.h-z5.h }, z11.h\n"
- ".inst 0xc1631489 // sdot za.s[x8, 1], { z4.h-z5.h }, z3.h\n"
- ".inst 0xa1412a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16814a8 // sdot za.s[x8, 0], { z5.h-z6.h }, z8.h\n"
- ".inst 0xc16014a9 // sdot za.s[x8, 1], { z5.h-z6.h }, z0.h\n"
+ "ld1sb { z12.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z15.h, z27.h, z16.h\n"
+ "ld1sb { z18.s }, p1/Z, [x21]\n"
+ "trn1 z16.h, z3.h, z1.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z15.h, z15.h, z20.h\n"
+ "trn1 z17.h, z12.h, z18.h\n"
+ "add z16.h, z16.h, z20.h\n"
+ "add z17.h, z17.h, z20.h\n"
+ ".inst 0xc16b15e8 // sdot za.s[x8, 0], { z15.h-z16.h }, z11.h\n"
+ ".inst 0xc16a15e9 // sdot za.s[x8, 1], { z15.h-z16.h }, z10.h\n"
+ ".inst 0xc1631608 // sdot za.s[x8, 0], { z16.h-z17.h }, z3.h\n"
+ ".inst 0xc1621609 // sdot za.s[x8, 1], { z16.h-z17.h }, z2.h\n"
"9:" // Unpadded: 1 priming loads
"add x22, x14, %x[ld_in_row]\n"
- "ld1sb { z25.s }, p1/Z, [x14]\n"
+ "ld1sb { z22.s }, p1/Z, [x14]\n"
"addvl x21, SP, #4\n"
- "ld1sb { z6.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z3.h, z25.h, z6.h\n"
- "add z3.h, z3.h, z21.h\n"
- "ld1sb { z18.s }, p1/Z, [x22]\n"
+ "ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #8\n"
- "ld1sb { z26.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z4.h, z18.h, z26.h\n"
- "add z4.h, z4.h, z21.h\n"
- "ld1sb { z2.s }, p1/Z, [x22]\n"
+ "ld1sb { z19.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z5.s }, p1/Z, [x22]\n"
- "trn1 z5.h, z2.h, z5.h\n"
- "add z5.h, z5.h, z21.h\n"
+ "ld1sb { z10.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z21.h, z22.h, z16.h\n"
+ "ld1sb { z7.s }, p1/Z, [x22]\n"
+ "trn1 z22.h, z19.h, z10.h\n"
".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1611468 // sdot za.s[x8, 0], { z3.h-z4.h }, z1.h\n"
- ".inst 0xc1601469 // sdot za.s[x8, 1], { z3.h-z4.h }, z0.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- ".inst 0xa0412aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a146a // sdot za.s[x8, 2], { z3.h-z4.h }, z10.h\n"
- ".inst 0xc162146b // sdot za.s[x8, 3], { z3.h-z4.h }, z2.h\n"
- ".inst 0xc1691488 // sdot za.s[x8, 0], { z4.h-z5.h }, z9.h\n"
- ".inst 0xc1681489 // sdot za.s[x8, 1], { z4.h-z5.h }, z8.h\n"
- ".inst 0xa1412a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a148a // sdot za.s[x8, 2], { z4.h-z5.h }, z10.h\n"
- ".inst 0xc162148b // sdot za.s[x8, 3], { z4.h-z5.h }, z2.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add z21.h, z21.h, z20.h\n"
+ "trn1 z23.h, z11.h, z7.h\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
+ ".inst 0xc16116a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z1.h\n"
+ ".inst 0xc16016a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z0.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16e16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16616c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16916ca // sdot za.s[x8, 2], { z22.h-z23.h }, z9.h\n"
+ ".inst 0xc16116cb // sdot za.s[x8, 3], { z22.h-z23.h }, z1.h\n"
"10:" // Unpadded: 0 priming loads
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
"add x20, x14, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x14]\n"
+ "ld1sb { z15.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x1\n"
- "ld1sb { z9.s }, p1/Z, [x20]\n"
+ "ld1sb { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z6.h, z17.h, z9.h\n"
"sub x13, x13, #0x1\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "ld1sb { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"cmp x15, x13\n"
- "add z6.h, z6.h, z21.h\n"
- "ld1sb { z7.s }, p1/Z, [x20]\n"
+ "ld1sb { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z7.h, z17.h, z7.h\n"
"csel x23, x15, x13, LT\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
+ "ld1sb { z2.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z7.h, z7.h, z21.h\n"
+ "trn1 z21.h, z15.h, z0.h\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z1.s }, p1/Z, [x20]\n"
- "trn1 z8.h, z17.h, z1.h\n"
- "add z8.h, z8.h, z21.h\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
"sub x13, x13, x23\n"
+ "trn1 z22.h, z24.h, z9.h\n"
+ "trn1 z23.h, z2.h, z15.h\n"
+ "add z21.h, z21.h, z20.h\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
"cbz x23, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
"addvl x22, SP, #4\n"
"addvl x21, SP, #8\n"
- "ld1sb { z2.s }, p1/Z, [x14]\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa1402ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22]\n"
+ "ld1sb { z26.s }, p1/Z, [x14]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
"add x20, x14, %x[ld_in_row]\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- "ld1sb { z19.s }, p1/Z, [x20]\n"
+ "ld1sb { z4.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa1412ac3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- "ld1sb { z23.s }, p1/Z, [x20]\n"
+ "ld1sb { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc16d14ca // sdot za.s[x8, 2], { z6.h-z7.h }, z13.h\n"
- "ld1sb { z18.s }, p1/Z, [x20]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ "ld1sb { z3.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16514cb // sdot za.s[x8, 3], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc16914cc // sdot za.s[x8, 4], { z6.h-z7.h }, z9.h\n"
- "ld1sb { z17.s }, p1/Z, [x20]\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc16e16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z14.h\n"
+ "ld1sb { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16114cd // sdot za.s[x8, 5], { z6.h-z7.h }, z1.h\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc16b14ea // sdot za.s[x8, 2], { z7.h-z8.h }, z11.h\n"
- "trn1 z6.h, z2.h, z19.h\n"
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16314eb // sdot za.s[x8, 3], { z7.h-z8.h }, z3.h\n"
+ ".inst 0xc16616ab // sdot za.s[x8, 3], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc0060810 // mova { z16.d-z17.d }, za.d[x8, #0]\n"
+ "ld1sb { z11.s }, p1/Z, [x20]\n"
+ ".inst 0xc0060832 // mova { z18.d-z19.d }, za.d[x8, #1]\n"
+ ".inst 0xc16916ac // sdot za.s[x8, 4], { z21.h-z22.h }, z9.h\n"
+ ".inst 0xc16116ad // sdot za.s[x8, 5], { z21.h-z22.h }, z1.h\n"
+ "trn1 z21.h, z26.h, z4.h\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16f16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z15.h\n"
+ ".inst 0xc1a5ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z5.s\n"
+ ".inst 0xc16716cb // sdot za.s[x8, 3], { z22.h-z23.h }, z7.h\n"
".inst 0xa1412aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- ".inst 0xc16914ec // sdot za.s[x8, 4], { z7.h-z8.h }, z9.h\n"
- "st1b { z24.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "add z6.h, z6.h, z21.h\n"
- ".inst 0xc16114ed // sdot za.s[x8, 5], { z7.h-z8.h }, z1.h\n"
- "trn1 z7.h, z23.h, z18.h\n"
- "trn1 z8.h, z17.h, z16.h\n"
+ "add z21.h, z21.h, z20.h\n"
+ ".inst 0xc1adaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
+ ".inst 0xc16916cc // sdot za.s[x8, 4], { z22.h-z23.h }, z9.h\n"
+ ".inst 0xc16116cd // sdot za.s[x8, 5], { z22.h-z23.h }, z1.h\n"
+ "trn1 z22.h, z27.h, z3.h\n"
+ "trn1 z23.h, z25.h, z11.h\n"
"add x8, x8, #0x2\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z26.s }, p1, [x10]\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
+ ".inst 0xc1bfcfd0 // sclamp { z16.s-z19.s }, z30.s, z31.s\n"
+ "st1b { z16.s }, p1, [x11]\n"
+ "add x11, x11, x9\n"
+ "st1b { z18.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z17.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- "add z7.h, z7.h, z21.h\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z19.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "add z8.h, z8.h, z21.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
@@ -382,258 +387,258 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #8\n"
+ ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1sb { z17.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z7.h, z19.h, z18.h\n"
- "trn1 z8.h, z17.h, z16.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #8\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "trn1 z9.h, z17.h, z16.h\n"
- ".inst 0xc16a14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z10.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16214e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z2.h\n"
- ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16d1508 // sdot za.s[x8, 0], { z8.h-z9.h }, z13.h\n"
- ".inst 0xc1651509 // sdot za.s[x8, 1], { z8.h-z9.h }, z5.h\n"
+ ".inst 0xc16c16e8 // sdot za.s[x8, 0], { z23.h-z24.h }, z12.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16416e9 // sdot za.s[x8, 1], { z23.h-z24.h }, z4.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ ".inst 0xc16f1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z15.h\n"
+ ".inst 0xc1671709 // sdot za.s[x8, 1], { z24.h-z25.h }, z7.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x22, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x21, SP, #4\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #8\n"
+ ".inst 0xa1412aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "ld1sb { z17.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z16.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z19.h, z18.h\n"
- "trn1 z23.h, z17.h, z16.h\n"
+ "ld1sb { z10.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z10.h, p0/M, z10.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z17.h, z18.h, z10.h\n"
+ "add z14.h, p0/M, z14.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #4\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- "trn1 z24.h, z17.h, z16.h\n"
- ".inst 0xc16116c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z1.h\n"
- ".inst 0xc16016c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z0.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0412aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16d16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z13.h\n"
- ".inst 0xc16516cb // sdot za.s[x8, 3], { z22.h-z23.h }, z5.h\n"
- ".inst 0xc16116e8 // sdot za.s[x8, 0], { z23.h-z24.h }, z1.h\n"
- ".inst 0xc16016e9 // sdot za.s[x8, 1], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xc16f1608 // sdot za.s[x8, 0], { z16.h-z17.h }, z15.h\n"
+ "ld1sb { z10.s }, p0/Z, [x22]\n"
+ ".inst 0xc1671609 // sdot za.s[x8, 1], { z16.h-z17.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ "add z10.h, p0/M, z10.h, z20.h\n"
+ ".inst 0xc16f160a // sdot za.s[x8, 2], { z16.h-z17.h }, z15.h\n"
+ ".inst 0xc167160b // sdot za.s[x8, 3], { z16.h-z17.h }, z7.h\n"
+ "trn1 z18.h, z14.h, z10.h\n"
+ ".inst 0xc16c1628 // sdot za.s[x8, 0], { z17.h-z18.h }, z12.h\n"
+ ".inst 0xc1641629 // sdot za.s[x8, 1], { z17.h-z18.h }, z4.h\n"
".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16116ea // sdot za.s[x8, 2], { z23.h-z24.h }, z1.h\n"
- ".inst 0xc16016eb // sdot za.s[x8, 3], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xc161162a // sdot za.s[x8, 2], { z17.h-z18.h }, z1.h\n"
+ ".inst 0xc160162b // sdot za.s[x8, 3], { z17.h-z18.h }, z0.h\n"
"15:" // Padded: 0 priming loads
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
"add x20, x14, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x15, x15, #0x1\n"
+ "sub x13, x13, #0x1\n"
+ "cmp x15, x13\n"
+ "ld1sb { z17.s }, p0/Z, [x14]\n"
+ "csel x23, x15, x13, LT\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "sub x13, x13, x23\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z21.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z6.h, z19.h, z18.h\n"
- "trn1 z7.h, z17.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- "sub x15, x15, #0x1\n"
- "sub x13, x13, #0x1\n"
- "cmp x15, x13\n"
- "trn1 z8.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
- "add x14, x14, %x[ld_in_col]\n"
- "sub x13, x13, x23\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z23.h, z17.h, z16.h\n"
"cbz x23, 17f\n"
"16:" // Padded: Main loop
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z9.s }, p0/Z, [x14]\n"
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- "add z9.h, p0/M, z9.h, z21.h\n"
"add x22, x14, %x[ld_in_row]\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ "addvl x21, SP, #4\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #8\n"
+ "subs x23, x23, #0x1\n"
+ "ld1sb { z16.s }, p0/Z, [x14]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x22]\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
+ ".inst 0xc16f16ac // sdot za.s[x8, 4], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ad // sdot za.s[x8, 5], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16e16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z14.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc16616cb // sdot za.s[x8, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
"ld1sb { z18.s }, p0/Z, [x22]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16f16cc // sdot za.s[x8, 4], { z22.h-z23.h }, z15.h\n"
+ ".inst 0xc16716cd // sdot za.s[x8, 5], { z22.h-z23.h }, z7.h\n"
+ "add x8, x8, #0x2\n"
+ ".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "trn1 z21.h, z16.h, z17.h\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x22]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"mov x12, #0x4\n"
- "addvl x21, SP, #4\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16b14ca // sdot za.s[x8, 2], { z6.h-z7.h }, z11.h\n"
- "subs x23, x23, #0x1\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
"ld1sb { z17.s }, p0/Z, [x22]\n"
- ".inst 0xc16314cb // sdot za.s[x8, 3], { z6.h-z7.h }, z3.h\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa0412aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16d14cc // sdot za.s[x8, 4], { z6.h-z7.h }, z13.h\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- "ld1sb { z2.s }, p0/Z, [x22]\n"
- ".inst 0xc16514cd // sdot za.s[x8, 5], { z6.h-z7.h }, z5.h\n"
- "add z2.h, p0/M, z2.h, z21.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16b14ea // sdot za.s[x8, 2], { z7.h-z8.h }, z11.h\n"
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- ".inst 0xc16a14eb // sdot za.s[x8, 3], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa1412a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc16b14ec // sdot za.s[x8, 4], { z7.h-z8.h }, z11.h\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "trn1 z6.h, z9.h, z19.h\n"
- ".inst 0xc16314ed // sdot za.s[x8, 5], { z7.h-z8.h }, z3.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- "trn1 z7.h, z18.h, z16.h\n"
- "trn1 z8.h, z17.h, z2.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z23.h, z17.h, z16.h\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
"addvl x21, SP, #4\n"
"addvl x20, SP, #8\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc16114ca // sdot za.s[x8, 2], { z6.h-z7.h }, z1.h\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc16014cb // sdot za.s[x8, 3], { z6.h-z7.h }, z0.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc16914cc // sdot za.s[x8, 4], { z6.h-z7.h }, z9.h\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- "st1b { z24.s }, p1, [x11]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ ".inst 0xc16f16ac // sdot za.s[x8, 4], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ad // sdot za.s[x8, 5], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16e16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+ ".inst 0xc16616cb // sdot za.s[x8, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc16c16cc // sdot za.s[x8, 4], { z22.h-z23.h }, z12.h\n"
+ ".inst 0xc16416cd // sdot za.s[x8, 5], { z22.h-z23.h }, z4.h\n"
+ "add x8, x8, #0x2\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc16114cd // sdot za.s[x8, 5], { z6.h-z7.h }, z1.h\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc16314ea // sdot za.s[x8, 2], { z7.h-z8.h }, z3.h\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- ".inst 0xc16214eb // sdot za.s[x8, 3], { z7.h-z8.h }, z2.h\n"
- ".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc16114ec // sdot za.s[x8, 4], { z7.h-z8.h }, z1.h\n"
- ".inst 0xc16014ed // sdot za.s[x8, 5], { z7.h-z8.h }, z0.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
"18:" // Main loop skip tail
"cbz x13, 20f\n"
"19:" // Right padding loop
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
+ ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
- ".inst 0xc1acaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc1afab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z15.s\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- ".inst 0xc1bccfa4 // sclamp { z4.s-z7.s }, z29.s, z28.s\n"
- "st1b { z4.s }, p1, [x11]\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a5ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc1adaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
+ ".inst 0xc1a8ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ ".inst 0xc1bfcfd8 // sclamp { z24.s-z27.s }, z30.s, z31.s\n"
+ "st1b { z24.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z26.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
+ "st1b { z25.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z27.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 19b\n"
"20:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -652,6 +657,8 @@ void sme2_s8q_planar_3x3_s1_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #12\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
index 1d0efc6bc1..a3cfa94b03 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,119 +70,124 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0x9\n"
"ptrue p2.b\n"
- "mov x20, #0x9\n"
"ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z11.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x22, #0x8\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z29.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x21, x6\n"
+ "mov SP, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-6\n"
+ "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z29.h, p2/M, z29.h\n"
+ "ld1rw { z0.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x7\n"
- "addvl SP, SP, #-6\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z11.h, p2/M, z11.h\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z28.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z28.s, #0x0\n"
+ "mov z16.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x20, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z22.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z27.h, #0x0\n"
+ "addvl x22, SP, #6\n"
+ "addvl x22, x22, #-2\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z17.d, z16.d\n"
+ "mov z18.d, z16.d\n"
+ "mov z19.d, z16.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1sb { z25.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1rh { z16.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "sub z26.h, z26.h, z16.h\n"
- "incw x22\n"
- "mov z24.h, #0x0\n"
- "ld1sb { z3.s }, p2/Z, [x20]\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z3.h, z3.h, z16.h\n"
- "trn1 z31.h, z26.h, z3.h\n"
- "ld1sb { z21.s }, p2/Z, [x20]\n"
- "sub z21.h, z21.h, z16.h\n"
- "mov x20, x22\n"
- "trn1 z14.h, z21.h, z24.h\n"
- "ld1sb { z2.s }, p2/Z, [x20]\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z25.h, z25.h, z22.h\n"
+ "sub z15.h, z15.h, z22.h\n"
+ "sub z9.h, z9.h, z22.h\n"
+ "trn1 z24.h, z25.h, z15.h\n"
+ "ld1sb { z12.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z2.h, z2.h, z16.h\n"
- "addvl x21, SP, #6\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
+ "ld1sb { z4.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z25.h, z25.h, z16.h\n"
- "incw x22\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "sub z27.h, z27.h, z16.h\n"
- "addvl x21, x21, #-2\n"
- "mov x20, x22\n"
- "st1h { z31.h }, p2, [x21]\n"
- "trn1 z4.h, z2.h, z25.h\n"
- "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "trn1 z11.h, z9.h, z27.h\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "sub z12.h, z12.h, z22.h\n"
+ "sub z4.h, z4.h, z22.h\n"
+ "st1h { z24.h }, p2, [x22]\n"
+ "sub z15.h, z15.h, z22.h\n"
+ "st1h { z11.h }, p2, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #-2\n"
+ "trn1 z9.h, z12.h, z4.h\n"
+ "ld1sb { z14.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
+ "ld1sb { z10.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z14.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z12.h, z27.h, z24.h\n"
- "ld1sb { z20.s }, p2/Z, [x20]\n"
- "sub z26.h, z26.h, z16.h\n"
- "sub z23.h, z23.h, z16.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "sub z20.h, z20.h, z16.h\n"
- "addvl x21, x21, #-2\n"
- "st1h { z4.h }, p2, [x21]\n"
- "mov z29.d, z28.d\n"
- "st1h { z12.h }, p2, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #-2\n"
- "mov z30.d, z28.d\n"
- "mov z31.d, z28.d\n"
- "trn1 z25.h, z26.h, z23.h\n"
- "st1h { z25.h }, p2, [x21]\n"
- "trn1 z3.h, z20.h, z24.h\n"
- "st1h { z3.h }, p2, [x21, #1, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z6.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "trn1 z21.h, z15.h, z27.h\n"
+ "ld1sb { z30.s }, p2/Z, [x20]\n"
+ "sub z14.h, z14.h, z22.h\n"
+ "sub z10.h, z10.h, z22.h\n"
+ "st1h { z9.h }, p2, [x22]\n"
+ "sub z30.h, z30.h, z22.h\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #-2\n"
+ "trn1 z15.h, z14.h, z10.h\n"
+ "trn1 z25.h, z30.h, z27.h\n"
+ "st1h { z15.h }, p2, [x22]\n"
+ "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z1.s }, p1/Z, [x21, x16, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z9.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z0.s }, p1/Z, [x20, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
"mov x22, #0x9\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x7, x6\n"
+ "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x15, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x7, x14\n"
+ "orr x20, x17, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040e00 // mova za.d[x8, #0], { z16.d-z19.d }\n"
"mov x22, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x14, x7, x21, x14\n"
+ ".inst 0xc0040e01 // mova za.d[x8, #1], { z16.d-z19.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
"ldp x11, x10, [x23], #0x10\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
"ldp x9, x28, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
"ldp x27, x26, [x23], #0x10\n"
"ldp x25, x24, [x20], #0x10\n"
"cbz x21, 7f\n"
@@ -191,24 +196,24 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
+ ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
"and x22, x21, #0x1\n"
- ".inst 0xc1a9aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z9.s\n"
"add x21, x21, #0x1\n"
"lsr x21, x21, #0x1\n"
- ".inst 0xc1adab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
"sub x13, x13, x21\n"
- ".inst 0xc1a7cd58 // sclamp { z24.s-z27.s }, z10.s, z7.s\n"
+ ".inst 0xc1a1ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+ ".inst 0xc1a0aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1a8ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
+ ".inst 0xc1bccfec // sclamp { z12.s-z15.s }, z31.s, z28.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "st1b { z12.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z25.s }, p1, [x10]\n"
+ "st1b { z13.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z26.s }, p1, [x27]\n"
+ "st1b { z14.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z15.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -220,194 +225,194 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1sb { z1.s }, p1/Z, [x14]\n"
+ "ld1sb { z23.s }, p1/Z, [x14]\n"
"addvl x20, SP, #4\n"
- "ld1sb { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z21.h\n"
- "add z1.h, z1.h, z11.h\n"
- "ld1sb { z2.s }, p1/Z, [x21]\n"
+ "ld1sb { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z15.s }, p1/Z, [x21]\n"
+ "ld1sb { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z2.h, z2.h, z15.h\n"
- "add z2.h, z2.h, z11.h\n"
- "ld1sb { z3.s }, p1/Z, [x21]\n"
+ "ld1sb { z5.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z21.s }, p1/Z, [x21]\n"
+ "ld1sb { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z3.h, z3.h, z21.h\n"
- "add z3.h, z3.h, z11.h\n"
- "ld1sb { z4.s }, p1/Z, [x21]\n"
+ "trn1 z23.h, z23.h, z4.h\n"
+ "ld1sb { z6.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z19.s }, p1/Z, [x21]\n"
+ "ld1sb { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z4.h, z19.h\n"
- "add z4.h, z4.h, z11.h\n"
- "ld1sb { z8.s }, p1/Z, [x21]\n"
- "mov z5.d, z8.d\n"
- "add z5.h, z5.h, z11.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701428 // sdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
- ".inst 0xc1781448 // sdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ "trn1 z24.h, z24.h, z5.h\n"
+ "ld1sb { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z25.h, z25.h, z6.h\n"
+ "ld1sb { z10.s }, p1/Z, [x21]\n"
+ "add z23.h, z23.h, z29.h\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ "trn1 z26.h, z26.h, z20.h\n"
+ "add z24.h, z24.h, z29.h\n"
+ "mov z27.d, z10.d\n"
+ "add z25.h, z25.h, z29.h\n"
+ "add z26.h, z26.h, z29.h\n"
+ "add z27.h, z27.h, z29.h\n"
+ ".inst 0xc17616e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z6.h\n"
+ ".inst 0xc17e1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z14.h\n"
"9:" // Unpadded: 1 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1sb { z1.s }, p1/Z, [x14]\n"
+ "ld1sb { z20.s }, p1/Z, [x14]\n"
"addvl x20, SP, #2\n"
- "ld1sb { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z21.h\n"
- "add z1.h, z1.h, z11.h\n"
"ld1sb { z2.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z12.s }, p1/Z, [x21]\n"
+ "ld1sb { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z2.h, z2.h, z12.h\n"
- "add z2.h, z2.h, z11.h\n"
- "ld1sb { z3.s }, p1/Z, [x21]\n"
+ "ld1sb { z25.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z22.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z8.s }, p1/Z, [x21]\n"
+ "trn1 z20.h, z20.h, z2.h\n"
+ "ld1sb { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z3.h, z3.h, z8.h\n"
- "add z3.h, z3.h, z11.h\n"
- "ld1sb { z4.s }, p1/Z, [x21]\n"
+ "ld1sb { z23.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z5.s }, p1/Z, [x21]\n"
+ "trn1 z21.h, z21.h, z25.h\n"
+ "ld1sb { z9.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z4.h, z5.h\n"
- "add z4.h, z4.h, z11.h\n"
- "ld1sb { z5.s }, p1/Z, [x21]\n"
- "mov z5.d, z5.d\n"
- "add z5.h, z5.h, z11.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701428 // sdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
- ".inst 0xc1781448 // sdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ "trn1 z22.h, z22.h, z24.h\n"
+ "ld1sb { z3.s }, p1/Z, [x21]\n"
+ "add z20.h, z20.h, z29.h\n"
+ ".inst 0xa0402a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20]\n"
+ "trn1 z23.h, z23.h, z9.h\n"
+ "add z21.h, z21.h, z29.h\n"
+ "mov z24.d, z3.d\n"
+ "add z22.h, z22.h, z29.h\n"
+ "add z23.h, z23.h, z29.h\n"
+ "add z24.h, z24.h, z29.h\n"
+ ".inst 0xc1761688 // sdot za.s[x8, 0], { z20.h-z23.h }, z6.h\n"
+ ".inst 0xc17716a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z7.h\n"
"10:" // Unpadded: 0 priming loads
"cmp x15, #0x2\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
"add x21, x14, %x[ld_in_row]\n"
- "ld1sb { z21.s }, p1/Z, [x14]\n"
+ "ld1sb { z10.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x2\n"
- "ld1sb { z8.s }, p1/Z, [x21]\n"
+ "ld1sb { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z8.h\n"
"sub x13, x13, #0x1\n"
- "ld1sb { z22.s }, p1/Z, [x21]\n"
+ "ld1sb { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"lsr x20, x15, #0x1\n"
- "add z21.h, z21.h, z11.h\n"
- "ld1sb { z25.s }, p1/Z, [x21]\n"
+ "ld1sb { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z25.h\n"
"cmp x20, x13\n"
- "ld1sb { z23.s }, p1/Z, [x21]\n"
+ "ld1sb { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z25.h\n"
"csel x23, x20, x13, LT\n"
- "add z22.h, z22.h, z11.h\n"
- "ld1sb { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z18.h\n"
- "add z23.h, z23.h, z11.h\n"
- "ld1sb { z24.s }, p1/Z, [x21]\n"
+ "ld1sb { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1sb { z19.s }, p1/Z, [x21]\n"
+ "ld1sb { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z19.h\n"
- "add z24.h, z24.h, z11.h\n"
- "ld1sb { z8.s }, p1/Z, [x21]\n"
- "mov z25.d, z8.d\n"
- "add z25.h, z25.h, z11.h\n"
+ "trn1 z11.h, z11.h, z24.h\n"
"and x15, x15, #0x1\n"
+ "ld1sb { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"sub x13, x13, x23\n"
+ "ld1sb { z26.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z30.h\n"
+ "add z10.h, z10.h, z29.h\n"
+ "trn1 z13.h, z13.h, z20.h\n"
+ "add z11.h, z11.h, z29.h\n"
+ "mov z14.d, z26.d\n"
+ "add z12.h, z12.h, z29.h\n"
+ "add z13.h, z13.h, z29.h\n"
+ "add z14.h, z14.h, z29.h\n"
"cbz x23, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
"addvl x20, SP, #4\n"
"add x22, x14, %x[ld_in_row]\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
"addvl x21, SP, #2\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1sb { z21.s }, p1/Z, [x14]\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1771549 // sdot za.s[x8, 1], { z10.h-z13.h }, z7.h\n"
+ "ld1sb { z3.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "ld1sb { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- "trn1 z21.h, z21.h, z18.h\n"
- "ld1sb { z22.s }, p1/Z, [x22]\n"
+ "ld1sb { z9.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z21.h, z21.h, z11.h\n"
- ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
- "ld1sb { z8.s }, p1/Z, [x22]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ "add x20, x14, %x[ld_in_row]\n"
+ ".inst 0xc17f1569 // sdot za.s[x8, 1], { z11.h-z14.h }, z15.h\n"
+ "ld1sb { z4.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z8.h\n"
- "add z22.h, z22.h, z11.h\n"
- "ld1sb { z23.s }, p1/Z, [x22]\n"
+ "trn1 z3.h, z3.h, z9.h\n"
+ "ld1sb { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ld1sb { z27.s }, p1/Z, [x22]\n"
+ "ld1sb { z5.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z27.h\n"
- "add z23.h, z23.h, z11.h\n"
- "ld1sb { z24.s }, p1/Z, [x22]\n"
+ ".inst 0xc1a1ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z1.s\n"
+ "ld1sb { z10.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
- "ld1sb { z8.s }, p1/Z, [x22]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1sb { z6.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z8.h\n"
- "add z24.h, z24.h, z11.h\n"
- "ld1sb { z4.s }, p1/Z, [x22]\n"
- "mov z25.d, z4.d\n"
- "add z25.h, z25.h, z11.h\n"
- ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17416a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z4.h\n"
- ".inst 0xc1a9aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
- "ld1sb { z21.s }, p1/Z, [x14]\n"
- ".inst 0xc17c16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z12.h\n"
- ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
- "ld1sb { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z12.h\n"
- ".inst 0xc1a7cd40 // sclamp { z0.s-z3.s }, z10.s, z7.s\n"
+ "trn1 z4.h, z4.h, z15.h\n"
+ "add z3.h, z3.h, z29.h\n"
+ "ld1sb { z14.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z5.h, z5.h, z10.h\n"
+ "ld1sb { z21.s }, p1/Z, [x22]\n"
+ ".inst 0xc1a0aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z0.s\n"
+ ".inst 0xa0402aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21]\n"
+ "trn1 z6.h, z6.h, z14.h\n"
+ "add z4.h, z4.h, z29.h\n"
+ "mov z7.d, z21.d\n"
+ "add z5.h, z5.h, z29.h\n"
+ ".inst 0xc1a8ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ "add z6.h, z6.h, z29.h\n"
+ "add z7.h, z7.h, z29.h\n"
+ ".inst 0xc1bccff8 // sclamp { z24.s-z27.s }, z31.s, z28.s\n"
+ ".inst 0xc17a1468 // sdot za.s[x8, 0], { z3.h-z6.h }, z10.h\n"
+ "ld1sb { z10.s }, p1/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
"ld1sb { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "st1b { z0.s }, p1, [x11]\n"
+ "st1b { z24.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "ld1sb { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc17b1488 // sdot za.s[x8, 0], { z4.h-z7.h }, z11.h\n"
+ "ld1sb { z11.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z20.h\n"
- "st1b { z1.s }, p1, [x10]\n"
- "ld1sb { z23.s }, p1/Z, [x20]\n"
+ "trn1 z10.h, z10.h, z22.h\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "st1b { z25.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z2.s }, p1, [x27]\n"
- "ld1sb { z24.s }, p1/Z, [x20]\n"
+ "ld1sb { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z24.h\n"
+ "st1b { z26.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "ld1sb { z24.s }, p1/Z, [x20]\n"
+ "ld1sb { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "st1b { z3.s }, p1, [x26]\n"
+ "st1b { z27.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "ld1sb { z3.s }, p1/Z, [x20]\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z3.h\n"
- "add z21.h, z21.h, z11.h\n"
- "ld1sb { z3.s }, p1/Z, [x20]\n"
- "mov z25.d, z3.d\n"
- "add z22.h, z22.h, z11.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- "add z23.h, z23.h, z11.h\n"
- "add z24.h, z24.h, z11.h\n"
- "add z25.h, z25.h, z11.h\n"
+ "trn1 z11.h, z11.h, z14.h\n"
+ "add z10.h, z10.h, z29.h\n"
+ "ld1sb { z6.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "trn1 z12.h, z12.h, z9.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
+ "trn1 z13.h, z13.h, z6.h\n"
+ "add z11.h, z11.h, z29.h\n"
+ "mov z14.d, z20.d\n"
+ "add z12.h, z12.h, z29.h\n"
+ "add z13.h, z13.h, z29.h\n"
+ "add z14.h, z14.h, z29.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
@@ -417,440 +422,440 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z22.s }, p0/Z, [x14]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #4\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z21.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z20.h, z20.h, z22.h\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z4.s }, p0/Z, [x20]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z17.h\n"
- "trn1 z23.h, z23.h, z4.h\n"
+ "ld1sb { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z23.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z24.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z25.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z11.h\n"
- "addvl x20, SP, #4\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "trn1 z25.h, z25.h, z17.h\n"
- ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
- "mov z26.d, z1.d\n"
- ".inst 0xc17416c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z4.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17c16e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z12.h\n"
+ "ld1sb { z24.s }, p0/Z, [x21]\n"
+ "trn1 z23.h, z23.h, z25.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
+ ".inst 0xc1731688 // sdot za.s[x8, 0], { z20.h-z23.h }, z3.h\n"
+ "mov z24.d, z24.d\n"
+ ".inst 0xc17b16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z11.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z22.s }, p0/Z, [x14]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #2\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ "ld1sb { z21.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z20.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z5.s }, p0/Z, [x20]\n"
- "add z5.h, p0/M, z5.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z17.h\n"
- "trn1 z23.h, z23.h, z5.h\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z20.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z23.h, z20.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z25.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z15.s }, p0/Z, [x20]\n"
- "add z15.h, p0/M, z15.h, z11.h\n"
- "addvl x20, SP, #2\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "trn1 z25.h, z25.h, z17.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "mov z26.d, z15.d\n"
- ".inst 0xc17016c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z0.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17116e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z1.h\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "trn1 z24.h, z24.h, z25.h\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
+ ".inst 0xc17316a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z3.h\n"
+ "mov z25.d, z20.d\n"
+ ".inst 0xc17b16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z11.h\n"
"15:" // Padded: 0 priming loads
"cmp x15, #0x2\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
- "mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z18.h\n"
- "trn1 z22.h, z22.h, z3.h\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
- "add z19.h, p0/M, z19.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "mov x12, #0x8\n"
- "add z20.h, p0/M, z20.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
"sub x15, x15, #0x2\n"
"sub x13, x13, #0x1\n"
- "trn1 z23.h, z23.h, z19.h\n"
- "trn1 z24.h, z24.h, z20.h\n"
"lsr x20, x15, #0x1\n"
"cmp x20, x13\n"
- "mov z25.d, z3.d\n"
- "csel x22, x20, x13, LT\n"
- "add x14, x14, %x[ld_in_col]\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 17f\n"
- "16:" // Padded: Main loop
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "addvl x20, SP, #4\n"
- "mov x12, #0x0\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x21, x14, %x[ld_in_row]\n"
- ".inst 0xc17416a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z4.h\n"
- "ld1sb { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "ld1sb { z10.s }, p0/Z, [x14]\n"
+ "csel x23, x20, x13, LT\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "sub x13, x13, x23\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z14.s }, p0/Z, [x21]\n"
- "add z14.h, p0/M, z14.h, z11.h\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17c16c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z12.h\n"
- "ld1sb { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
+ "ld1sb { z11.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z20.h\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z15.s }, p0/Z, [x21]\n"
"mov x12, #0x4\n"
- "add z15.h, p0/M, z15.h, z11.h\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z23.s }, p0/Z, [x21]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
+ "ld1sb { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z20.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z24.s }, p0/Z, [x21]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
+ "ld1sb { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z4.s }, p0/Z, [x21]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
"mov x12, #0x8\n"
+ "ld1sb { z21.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z14.h\n"
- "trn1 z22.h, z22.h, z15.h\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "addvl x20, SP, #2\n"
- "ld1sb { z2.s }, p0/Z, [x21]\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z4.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z21.h\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
+ "mov z14.d, z20.d\n"
+ "cbz x23, 17f\n"
+ "16:" // Padded: Main loop
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "addvl x20, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "add z2.h, p0/M, z2.h, z11.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17016a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z0.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "add x22, x14, %x[ld_in_row]\n"
+ "addvl x21, SP, #2\n"
+ "subs x23, x23, #0x1\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1721549 // sdot za.s[x8, 1], { z10.h-z13.h }, z2.h\n"
+ "ld1sb { z10.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x20, x14, %x[ld_in_row]\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "mov z25.d, z2.d\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1731569 // sdot za.s[x8, 1], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1sb { z26.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z26.h, p0/M, z26.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17116c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z1.h\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ "ld1sb { z11.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z26.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z4.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
+ "ld1sb { z9.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z4.s }, p1, [x11]\n"
+ "add x11, x11, x9\n"
+ "st1b { z5.s }, p1, [x10]\n"
+ "add x10, x10, x28\n"
+ "ld1sb { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "st1b { z6.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
+ "trn1 z11.h, z11.h, z9.h\n"
+ "st1b { z7.s }, p1, [x26]\n"
+ "add x26, x26, x24\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z9.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z9.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- "add x20, x20, %x[ld_in_row]\n"
- "add z12.h, p0/M, z12.h, z11.h\n"
+ "ld1sb { z20.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z11.h\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "trn1 z21.h, z21.h, z20.h\n"
- "st1b { z17.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "trn1 z22.h, z22.h, z4.h\n"
- "trn1 z23.h, z23.h, z27.h\n"
- "st1b { z18.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "trn1 z24.h, z24.h, z12.h\n"
- "mov z25.d, z8.d\n"
- "st1b { z19.s }, p1, [x26]\n"
- "add x26, x26, x24\n"
- "add x14, x14, %x[ld_in_col]\n"
- "bgt 16b\n"
- "17:" // Main loop tail
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "addvl x20, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1sb { z9.s }, p0/Z, [x22]\n"
+ "trn1 z13.h, z13.h, z20.h\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1sb { z0.s }, p0/Z, [x14]\n"
- "add z0.h, p0/M, z0.h, z11.h\n"
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "ld1sb { z10.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "mov z14.d, z9.d\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z14.s }, p0/Z, [x20]\n"
- "add z14.h, p0/M, z14.h, z11.h\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
+ "ld1sb { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- "ld1sb { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z11.h\n"
+ "ld1sb { z11.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z25.h\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z12.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add z12.h, p0/M, z12.h, z11.h\n"
+ "ld1sb { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z2.s }, p0/Z, [x20]\n"
- "add z2.h, p0/M, z2.h, z11.h\n"
+ "ld1sb { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z15.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "ld1sb { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z4.h, p0/M, z4.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z3.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
+ "ld1sb { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z4.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z25.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
+ "ld1sb { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
+ "add z4.h, p0/M, z4.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
+ "ld1sb { z26.s }, p0/Z, [x20]\n"
+ "trn1 z13.h, z13.h, z4.h\n"
+ "add z26.h, p0/M, z26.h, z29.h\n"
+ "mov z14.d, z26.d\n"
+ "bgt 16b\n"
+ "17:" // Main loop tail
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "addvl x22, SP, #4\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "add x21, x14, %x[ld_in_row]\n"
"addvl x20, SP, #2\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- "trn1 z0.h, z0.h, z14.h\n"
- "add x8, x8, #0x1\n"
- "add z27.h, p0/M, z27.h, z11.h\n"
- "trn1 z1.h, z1.h, z12.h\n"
- "trn1 z2.h, z2.h, z21.h\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1721549 // sdot za.s[x8, 1], { z10.h-z13.h }, z2.h\n"
+ "ld1sb { z9.s }, p0/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "trn1 z3.h, z3.h, z25.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- "mov z4.d, z27.d\n"
- ".inst 0xc17e1408 // sdot za.s[x8, 0], { z0.h-z3.h }, z14.h\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1731569 // sdot za.s[x8, 1], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ "ld1sb { z10.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z15.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x4\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "st1b { z17.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc17f1428 // sdot za.s[x8, 0], { z1.h-z4.h }, z15.h\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- "st1b { z18.s }, p1, [x27]\n"
+ "ld1sb { z11.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z19.s }, p1, [x26]\n"
+ "trn1 z10.h, z10.h, z15.h\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "ld1sb { z5.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z5.h, p0/M, z5.h, z29.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "ld1sb { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z5.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x8\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1sb { z5.s }, p0/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z15.h\n"
+ "add z5.h, p0/M, z5.h, z29.h\n"
+ ".inst 0xc1721528 // sdot za.s[x8, 0], { z9.h-z12.h }, z2.h\n"
+ "mov z13.d, z5.d\n"
+ ".inst 0xc1731548 // sdot za.s[x8, 0], { z10.h-z13.h }, z3.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"18:" // Main loop skip tail
"cbz x15, 19f\n" // Skip remainder inputs
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #4\n"
+ "sub x13, x13, #0x1\n"
"ld1sb { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z15.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z0.s }, p0/Z, [x20]\n"
- "add z0.h, p0/M, z0.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z17.h\n"
- "trn1 z22.h, z22.h, z0.h\n"
+ "ld1sb { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z12.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z23.h, z20.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z5.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z5.h, p0/M, z5.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z30.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z30.h, p0/M, z30.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z4.s }, p0/Z, [x20]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z5.h\n"
- "mov z25.d, z4.d\n"
- "addvl x20, SP, #4\n"
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "sub x13, x13, #0x1\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
+ "ld1sb { z6.s }, p0/Z, [x21]\n"
+ "trn1 z24.h, z24.h, z30.h\n"
+ "add z6.h, p0/M, z6.h, z29.h\n"
+ ".inst 0xc17216a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z2.h\n"
+ "mov z25.d, z6.d\n"
+ ".inst 0xc17316c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z3.h\n"
+ ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17516a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z5.h\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ ".inst 0xc17d16c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z13.h\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "st1b { z17.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z18.s }, p1, [x27]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z19.s }, p1, [x26]\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"19:" // Tail input: End
"cbz x13, 21f\n"
"20:" // Right padding loop
- ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a9aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
- ".inst 0xc1a7cd40 // sclamp { z0.s-z3.s }, z10.s, z7.s\n"
- "st1b { z0.s }, p1, [x11]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z1.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z2.s }, p1, [x27]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z3.s }, p1, [x26]\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 20b\n"
"21:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -869,6 +874,8 @@ void sme2_s8q_planar_3x3_s2_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #6\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
index bb68733a45..857d20ab09 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,249 +70,254 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "ptrue p2.b\n"
+ "mov x22, SP\n"
"mov x20, #0x8\n"
+ "ptrue p2.b\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z17.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x5\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x22, #0x8\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z15.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x21, x21, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x20, x5\n"
+ "mov SP, x21\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-30\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x7\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "neg z15.h, p2/M, z15.h\n"
+ "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x6\n"
- "addvl SP, SP, #-30\n"
- "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z17.h, p2/M, z17.h\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z18.s, #0x0\n"
+ "mov z28.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z18.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x20, x17, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x23\n"
- "ld1sb { z2.s }, p2/Z, [x20]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z0.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z13.h, #0x0\n"
+ "addvl x22, SP, #30\n"
+ "addvl x22, x22, #-6\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z29.d, z28.d\n"
+ "mov x23, x24\n"
+ "incw x24\n"
+ "ld1sb { z22.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1sb { z21.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1sb { z19.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1sb { z25.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "sub z22.h, z22.h, z0.h\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "ld1sb { z5.s }, p2/Z, [x23]\n"
+ "mov x20, x24\n"
+ "incw x24\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "sub z25.h, z25.h, z0.h\n"
+ "sub z5.h, z5.h, z0.h\n"
+ "trn1 z6.h, z13.h, z22.h\n"
+ "trn1 z23.h, z22.h, z21.h\n"
+ "ld1sb { z27.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "ld1rh { z3.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z15.h, #0x0\n"
- "sub z2.h, z2.h, z3.h\n"
- "incw x23\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "trn1 z4.h, z21.h, z19.h\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z13.h, z13.h, z3.h\n"
- "trn1 z11.h, z15.h, z2.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
+ "trn1 z26.h, z19.h, z25.h\n"
+ "ld1sb { z18.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z27.h, z27.h, z3.h\n"
- "trn1 z0.h, z2.h, z13.h\n"
+ "trn1 z22.h, z25.h, z5.h\n"
+ "ld1sb { z7.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z25.h, z5.h, z13.h\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "sub z9.h, z9.h, z0.h\n"
+ "ld1sb { z1.s }, p2/Z, [x20]\n"
+ "mov x20, x24\n"
+ "sub z18.h, z18.h, z0.h\n"
+ "st1h { z6.h }, p2, [x22]\n"
+ "incw x24\n"
+ "sub z7.h, z7.h, z0.h\n"
+ "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z1.h, z1.h, z0.h\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "trn1 z20.h, z13.h, z27.h\n"
+ "trn1 z12.h, z27.h, z9.h\n"
+ "ld1sb { z21.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z2.h, z9.h, z18.h\n"
"ld1sb { z19.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z19.h, z19.h, z3.h\n"
- "trn1 z26.h, z13.h, z27.h\n"
+ "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z9.h, z18.h, z7.h\n"
"ld1sb { z14.s }, p2/Z, [x20]\n"
- "sub z14.h, z14.h, z3.h\n"
- "mov x20, x23\n"
- "trn1 z10.h, z27.h, z19.h\n"
- "ld1sb { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z19.h, z19.h, z14.h\n"
- "trn1 z1.h, z14.h, z15.h\n"
+ "st1h { z22.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z18.h, z7.h, z1.h\n"
"ld1sb { z5.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z9.h, z9.h, z3.h\n"
- "sub z5.h, z5.h, z3.h\n"
- "ld1sb { z29.s }, p2/Z, [x20]\n"
+ "st1h { z25.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z25.h, z1.h, z13.h\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "addvl x22, x22, #-6\n"
+ "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "mov x20, x24\n"
+ "st1h { z20.h }, p2, [x22]\n"
+ "sub z5.h, z5.h, z0.h\n"
+ "st1h { z12.h }, p2, [x22, #1, MUL VL]\n"
+ "incw x24\n"
+ "st1h { z2.h }, p2, [x22, #2, MUL VL]\n"
+ "sub z16.h, z16.h, z0.h\n"
+ "trn1 z7.h, z13.h, z21.h\n"
+ "trn1 z20.h, z21.h, z19.h\n"
+ "ld1sb { z6.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z29.h, z29.h, z3.h\n"
- "addvl x22, SP, #30\n"
+ "trn1 z17.h, z19.h, z14.h\n"
+ "st1h { z9.h }, p2, [x22, #3, MUL VL]\n"
"ld1sb { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "incw x23\n"
- "sub z2.h, z2.h, z3.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "addvl x22, x22, #-6\n"
- "sub z23.h, z23.h, z3.h\n"
- "mov x20, x23\n"
- "st1h { z11.h }, p2, [x22]\n"
- "trn1 z20.h, z15.h, z9.h\n"
- "incw x23\n"
- "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "st1h { z0.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z22.h, z9.h, z5.h\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z9.h, z5.h, z29.h\n"
+ "trn1 z12.h, z14.h, z5.h\n"
+ "st1h { z18.h }, p2, [x22, #4, MUL VL]\n"
"ld1sb { z21.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z26.h, z29.h, z2.h\n"
- "ld1sb { z0.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z28.h, z2.h, z23.h\n"
- "ld1sb { z19.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z1.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z2.h, z23.h, z15.h\n"
- "sub z25.h, z25.h, z3.h\n"
+ "st1h { z25.h }, p2, [x22, #5, MUL VL]\n"
"addvl x22, x22, #-6\n"
- "sub z21.h, z21.h, z3.h\n"
- "ld1sb { z6.s }, p2/Z, [x20]\n"
- "sub z0.h, z0.h, z3.h\n"
- "mov x20, x23\n"
- "sub z19.h, z19.h, z3.h\n"
- "sub z6.h, z6.h, z3.h\n"
- "st1h { z20.h }, p2, [x22]\n"
- "incw x23\n"
- "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z11.h, z15.h, z25.h\n"
- "trn1 z10.h, z25.h, z21.h\n"
- "ld1sb { z5.s }, p2/Z, [x20]\n"
+ "trn1 z5.h, z5.h, z16.h\n"
+ "ld1sb { z25.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z9.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z14.h, z21.h, z0.h\n"
+ "trn1 z4.h, z16.h, z13.h\n"
+ "sub z6.h, z6.h, z0.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1sb { z19.s }, p2/Z, [x20]\n"
+ "mov x20, x24\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "st1h { z7.h }, p2, [x22]\n"
+ "sub z25.h, z25.h, z0.h\n"
+ "st1h { z20.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
+ "trn1 z1.h, z13.h, z6.h\n"
+ "trn1 z24.h, z6.h, z2.h\n"
"ld1sb { z23.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z21.h, z0.h, z19.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z28.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z19.h, z19.h, z6.h\n"
- "ld1sb { z29.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z2.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z13.h, z6.h, z15.h\n"
- "sub z5.h, z5.h, z3.h\n"
- "sub z23.h, z23.h, z3.h\n"
- "ld1sb { z1.s }, p2/Z, [x20]\n"
- "addvl x22, x22, #-6\n"
- "sub z27.h, z27.h, z3.h\n"
- "sub z29.h, z29.h, z3.h\n"
- "mov x20, x23\n"
- "st1h { z11.h }, p2, [x22]\n"
- "sub z1.h, z1.h, z3.h\n"
- "st1h { z10.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z30.h, z15.h, z5.h\n"
- "trn1 z26.h, z5.h, z23.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "trn1 z16.h, z2.h, z21.h\n"
+ "ld1sb { z6.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z14.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z22.h, z23.h, z27.h\n"
- "ld1sb { z5.s }, p2/Z, [x20]\n"
+ "st1h { z12.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z20.h, z21.h, z25.h\n"
+ "ld1sb { z14.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z28.h, z27.h, z29.h\n"
- "ld1sb { z8.s }, p2/Z, [x20]\n"
+ "st1h { z5.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z17.h, z25.h, z19.h\n"
+ "ld1sb { z22.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z27.h, z29.h, z1.h\n"
- "ld1sb { z9.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z13.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z2.h, z1.h, z15.h\n"
- "ld1sb { z14.s }, p2/Z, [x20]\n"
- "sub z11.h, z11.h, z3.h\n"
+ "st1h { z4.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z19.h, z19.h, z13.h\n"
+ "ld1sb { z27.s }, p2/Z, [x20]\n"
+ "sub z23.h, z23.h, z0.h\n"
"addvl x22, x22, #-6\n"
- "sub z5.h, z5.h, z3.h\n"
- "sub z8.h, z8.h, z3.h\n"
- "st1h { z30.h }, p2, [x22]\n"
- "sub z9.h, z9.h, z3.h\n"
- "sub z14.h, z14.h, z3.h\n"
- "st1h { z26.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "mov z19.d, z18.d\n"
- "trn1 z22.h, z15.h, z11.h\n"
- "st1h { z28.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z1.h, z11.h, z5.h\n"
- "trn1 z31.h, z5.h, z8.h\n"
- "st1h { z27.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z8.h, z8.h, z9.h\n"
- "trn1 z21.h, z9.h, z14.h\n"
- "st1h { z2.h }, p2, [x22, #5, MUL VL]\n"
+ "sub z6.h, z6.h, z0.h\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "st1h { z1.h }, p2, [x22]\n"
+ "sub z22.h, z22.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z16.h, z13.h, z23.h\n"
+ "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z7.h, z23.h, z6.h\n"
+ "trn1 z12.h, z6.h, z14.h\n"
+ "st1h { z19.h }, p2, [x22, #5, MUL VL]\n"
"addvl x22, x22, #-6\n"
- "trn1 z15.h, z14.h, z15.h\n"
- "st1h { z22.h }, p2, [x22]\n"
- "st1h { z1.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z31.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z8.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z15.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z5.h, z14.h, z22.h\n"
+ "trn1 z14.h, z22.h, z27.h\n"
+ "trn1 z20.h, z27.h, z13.h\n"
+ "st1h { z16.h }, p2, [x22]\n"
+ "st1h { z7.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z12.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z5.h }, p2, [x22, #3, MUL VL]\n"
+ "st1h { z14.h }, p2, [x22, #4, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #5, MUL VL]\n"
"cbz x21, 3f\n"
- "ld1w { z7.s }, p1/Z, [x21, x17, LSL #2]\n"
+ "ld1w { z8.s }, p1/Z, [x21, x17, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z4.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "ld1w { z11.s }, p1/Z, [x20, x17, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x25, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x25, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x7, x23, LSL #22\n"
"mov x22, #0x8\n"
- "add x21, x6, x5\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x6, x5\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x11, #0x0\n"
"mov x8, #0x8\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x16\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x25, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x6, x16\n"
+ "orr x20, x7, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x16, x6, x20, x16\n"
- ".inst 0xc0046a40 // mova za.d[x11, #0], { z18.d-z19.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0046a41 // mova za.d[x11, #1], { z18.d-z19.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0046b80 // mova za.d[x11, #0], { z28.d-z29.d }\n"
"mov x22, #0x4\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x16, x6, x21, x16\n"
+ ".inst 0xc0046b81 // mova za.d[x11, #1], { z28.d-z29.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0046b82 // mova za.d[x11, #2], { z28.d-z29.d }\n"
"ldp x14, x13, [x23], #0x10\n"
- ".inst 0xc0046a42 // mova za.d[x11, #2], { z18.d-z19.d }\n"
+ ".inst 0xc0046b83 // mova za.d[x11, #3], { z28.d-z29.d }\n"
"ldp x4, x10, [x20], #0x10\n"
- ".inst 0xc0046a43 // mova za.d[x11, #3], { z18.d-z19.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0046a44 // mova za.d[x11, #4], { z18.d-z19.d }\n"
+ ".inst 0xc0046b84 // mova za.d[x11, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0046b85 // mova za.d[x11, #5], { z28.d-z29.d }\n"
"ldp x9, x28, [x23], #0x10\n"
- ".inst 0xc0046a45 // mova za.d[x11, #5], { z18.d-z19.d }\n"
+ ".inst 0xc0046b86 // mova za.d[x11, #6], { z28.d-z29.d }\n"
"ldp x27, x26, [x20], #0x10\n"
- ".inst 0xc0046a46 // mova za.d[x11, #6], { z18.d-z19.d }\n"
- ".inst 0xc0046a47 // mova za.d[x11, #7], { z18.d-z19.d }\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
+ ".inst 0xc0046b87 // mova za.d[x11, #7], { z28.d-z29.d }\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
"csel x20, x21, x22, LT\n"
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066810 // mova { z16.d-z17.d }, za.d[x11, #0]\n"
"sub x15, x15, x21\n"
- ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
- ".inst 0xc1a4aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z4.s\n"
- ".inst 0xc1acab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z12.s\n"
- ".inst 0xc1b0cf14 // sclamp { z20.s-z23.s }, z24.s, z16.s\n"
+ ".inst 0xc0066832 // mova { z18.d-z19.d }, za.d[x11, #1]\n"
+ ".inst 0xc1a8ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+ ".inst 0xc1abaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
+ ".inst 0xc1becff0 // sclamp { z16.s-z19.s }, z31.s, z30.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z20.s }, p1, [x14]\n"
+ "st1b { z16.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z22.s }, p1, [x13]\n"
+ "st1b { z18.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z21.s }, p1, [x9]\n"
+ "st1b { z17.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z23.s }, p1, [x28]\n"
+ "st1b { z19.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -328,331 +333,331 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
"add x21, x16, %x[ld_in_row]\n"
- "ld1sb { z1.s }, p1/Z, [x16]\n"
+ "ld1sb { z4.s }, p1/Z, [x16]\n"
"addvl x20, SP, #24\n"
- "ld1sb { z28.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z1.h, z28.h\n"
- "add z27.h, z27.h, z17.h\n"
- "ld1sb { z1.s }, p1/Z, [x21]\n"
+ "ld1sb { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1sb { z2.s }, p1/Z, [x21]\n"
+ "ld1sb { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z28.h, z1.h, z2.h\n"
- "add z28.h, z28.h, z17.h\n"
- "ld1sb { z13.s }, p1/Z, [x21]\n"
+ "ld1sb { z19.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z6.s }, p1/Z, [x21]\n"
+ "trn1 z22.h, z4.h, z13.h\n"
+ "ld1sb { z27.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z13.h, z6.h\n"
- "add z29.h, z29.h, z17.h\n"
- "ld1sb { z30.s }, p1/Z, [x21]\n"
+ "trn1 z23.h, z25.h, z19.h\n"
+ "ld1sb { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16a7768 // sdot za.s[x11, 0], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ "add z22.h, z22.h, z15.h\n"
+ "trn1 z24.h, z14.h, z27.h\n"
"ld1sb { z20.s }, p1/Z, [x21]\n"
- "trn1 z30.h, z30.h, z20.h\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "add z30.h, z30.h, z17.h\n"
- ".inst 0xc1697788 // sdot za.s[x11, 0], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc1617789 // sdot za.s[x11, 1], { z28.h-z29.h }, z1.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z23.h, z23.h, z15.h\n"
+ ".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "trn1 z25.h, z21.h, z20.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16d76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z13.h\n"
+ ".inst 0xc16c76c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z12.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc16e76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z14.h\n"
+ ".inst 0xc16676e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xc1617708 // sdot za.s[x11, 0], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xc1607709 // sdot za.s[x11, 1], { z24.h-z25.h }, z0.h\n"
"9:" // Unpadded: 3 priming loads
"add x22, x16, %x[ld_in_row]\n"
- "ld1sb { z2.s }, p1/Z, [x16]\n"
+ "ld1sb { z21.s }, p1/Z, [x16]\n"
"addvl x21, SP, #18\n"
- "ld1sb { z28.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z20.h, z2.h, z28.h\n"
- "add z20.h, z20.h, z17.h\n"
- "ld1sb { z31.s }, p1/Z, [x22]\n"
+ "ld1sb { z18.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- "ld1sb { z11.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z21.h, z31.h, z11.h\n"
- "add z21.h, z21.h, z17.h\n"
- "ld1sb { z25.s }, p1/Z, [x22]\n"
+ "ld1sb { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1sb { z8.s }, p1/Z, [x22]\n"
+ "ld1sb { z3.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z22.h, z25.h, z8.h\n"
- "add z22.h, z22.h, z17.h\n"
- "ld1sb { z8.s }, p1/Z, [x22]\n"
+ "ld1sb { z27.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16e7688 // sdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
- "ld1sb { z3.s }, p1/Z, [x22]\n"
- "trn1 z23.h, z8.h, z3.h\n"
- ".inst 0xc1667689 // sdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc161768a // sdot za.s[x11, 2], { z20.h-z21.h }, z1.h\n"
- "add z23.h, z23.h, z17.h\n"
+ "trn1 z24.h, z21.h, z18.h\n"
+ "ld1sb { z7.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z25.h, z17.h, z3.h\n"
+ "ld1sb { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ "add z24.h, z24.h, z15.h\n"
+ "trn1 z26.h, z27.h, z7.h\n"
+ "ld1sb { z16.s }, p1/Z, [x22]\n"
".inst 0xa1412aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc160768b // sdot za.s[x11, 3], { z20.h-z21.h }, z0.h\n"
- ".inst 0xc16976a8 // sdot za.s[x11, 0], { z21.h-z22.h }, z9.h\n"
- ".inst 0xa0422aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16176a9 // sdot za.s[x11, 1], { z21.h-z22.h }, z1.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16976aa // sdot za.s[x11, 2], { z21.h-z22.h }, z9.h\n"
- ".inst 0xc16176ab // sdot za.s[x11, 3], { z21.h-z22.h }, z1.h\n"
- ".inst 0xc16f76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z15.h\n"
- ".inst 0xc16e76c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z14.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z11.h\n"
- ".inst 0xc16a76cb // sdot za.s[x11, 3], { z22.h-z23.h }, z10.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "trn1 z27.h, z17.h, z16.h\n"
+ "add z26.h, z26.h, z15.h\n"
+ ".inst 0xc1637708 // sdot za.s[x11, 0], { z24.h-z25.h }, z3.h\n"
+ ".inst 0xc1627709 // sdot za.s[x11, 1], { z24.h-z25.h }, z2.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ "add z27.h, z27.h, z15.h\n"
+ ".inst 0xc16d770a // sdot za.s[x11, 2], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc16c770b // sdot za.s[x11, 3], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc1697728 // sdot za.s[x11, 0], { z25.h-z26.h }, z9.h\n"
+ ".inst 0xc1617729 // sdot za.s[x11, 1], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
+ ".inst 0xc1677748 // sdot za.s[x11, 0], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xc1667749 // sdot za.s[x11, 1], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa0422a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
+ ".inst 0xc16c774b // sdot za.s[x11, 3], { z26.h-z27.h }, z12.h\n"
"10:" // Unpadded: 2 priming loads
"add x23, x16, %x[ld_in_row]\n"
- "ld1sb { z2.s }, p1/Z, [x16]\n"
+ "ld1sb { z0.s }, p1/Z, [x16]\n"
"addvl x22, SP, #12\n"
- "ld1sb { z22.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z0.h, z2.h, z22.h\n"
- "add z0.h, z0.h, z17.h\n"
- "ld1sb { z14.s }, p1/Z, [x23]\n"
+ "ld1sb { z19.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
- "ld1sb { z6.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z1.h, z14.h, z6.h\n"
- "add z1.h, z1.h, z17.h\n"
- "ld1sb { z15.s }, p1/Z, [x23]\n"
+ "ld1sb { z4.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- "ld1sb { z6.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z2.h, z15.h, z6.h\n"
- "add z2.h, z2.h, z17.h\n"
- "ld1sb { z21.s }, p1/Z, [x23]\n"
+ "ld1sb { z3.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16f7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z15.h\n"
- "ld1sb { z30.s }, p1/Z, [x23]\n"
- "trn1 z3.h, z21.h, z30.h\n"
- ".inst 0xc16e7409 // sdot za.s[x11, 1], { z0.h-z1.h }, z14.h\n"
- ".inst 0xa1402aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16d740a // sdot za.s[x11, 2], { z0.h-z1.h }, z13.h\n"
- "add z3.h, z3.h, z17.h\n"
- ".inst 0xa0412ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc165740b // sdot za.s[x11, 3], { z0.h-z1.h }, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16f7428 // sdot za.s[x11, 0], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e7429 // sdot za.s[x11, 1], { z1.h-z2.h }, z14.h\n"
- ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16b740c // sdot za.s[x11, 4], { z0.h-z1.h }, z11.h\n"
- ".inst 0xc16a740d // sdot za.s[x11, 5], { z0.h-z1.h }, z10.h\n"
- ".inst 0xc16f742a // sdot za.s[x11, 2], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e742b // sdot za.s[x11, 3], { z1.h-z2.h }, z14.h\n"
- ".inst 0xa0412a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1697448 // sdot za.s[x11, 0], { z2.h-z3.h }, z9.h\n"
- ".inst 0xc1687449 // sdot za.s[x11, 1], { z2.h-z3.h }, z8.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f742c // sdot za.s[x11, 4], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e742d // sdot za.s[x11, 5], { z1.h-z2.h }, z14.h\n"
- ".inst 0xc16b744a // sdot za.s[x11, 2], { z2.h-z3.h }, z11.h\n"
- ".inst 0xc16a744b // sdot za.s[x11, 3], { z2.h-z3.h }, z10.h\n"
- ".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc161744c // sdot za.s[x11, 4], { z2.h-z3.h }, z1.h\n"
- ".inst 0xc160744d // sdot za.s[x11, 5], { z2.h-z3.h }, z0.h\n"
+ "ld1sb { z17.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z22.h, z0.h, z19.h\n"
+ "ld1sb { z25.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z23.h, z4.h, z3.h\n"
+ "ld1sb { z9.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ "add z22.h, z22.h, z15.h\n"
+ "trn1 z24.h, z17.h, z25.h\n"
+ "ld1sb { z17.s }, p1/Z, [x23]\n"
+ ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ "add z23.h, z23.h, z15.h\n"
+ ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ "trn1 z25.h, z9.h, z17.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16576c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc16576ca // sdot za.s[x11, 2], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476cb // sdot za.s[x11, 3], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16776e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z7.h\n"
+ ".inst 0xc16676e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16576cc // sdot za.s[x11, 4], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476cd // sdot za.s[x11, 5], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xc16776ea // sdot za.s[x11, 2], { z23.h-z24.h }, z7.h\n"
+ ".inst 0xc16676eb // sdot za.s[x11, 3], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1617708 // sdot za.s[x11, 0], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xc1607709 // sdot za.s[x11, 1], { z24.h-z25.h }, z0.h\n"
+ ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16576ec // sdot za.s[x11, 4], { z23.h-z24.h }, z5.h\n"
+ ".inst 0xc16476ed // sdot za.s[x11, 5], { z23.h-z24.h }, z4.h\n"
+ ".inst 0xc167770a // sdot za.s[x11, 2], { z24.h-z25.h }, z7.h\n"
+ ".inst 0xc166770b // sdot za.s[x11, 3], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1422a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d770c // sdot za.s[x11, 4], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc165770d // sdot za.s[x11, 5], { z24.h-z25.h }, z5.h\n"
"11:" // Unpadded: 1 priming loads
"add x24, x16, %x[ld_in_row]\n"
- "ld1sb { z0.s }, p1/Z, [x16]\n"
+ "ld1sb { z16.s }, p1/Z, [x16]\n"
"addvl x23, SP, #6\n"
- "ld1sb { z3.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z28.h, z0.h, z3.h\n"
- "add z28.h, z28.h, z17.h\n"
- "ld1sb { z6.s }, p1/Z, [x24]\n"
+ "ld1sb { z22.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
"addvl x22, SP, #12\n"
- "ld1sb { z30.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z29.h, z6.h, z30.h\n"
- "add z29.h, z29.h, z17.h\n"
- "ld1sb { z1.s }, p1/Z, [x24]\n"
+ "ld1sb { z19.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
"ld1sb { z25.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
- "trn1 z30.h, z1.h, z25.h\n"
- "add z30.h, z30.h, z17.h\n"
- "ld1sb { z3.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1617788 // sdot za.s[x11, 0], { z28.h-z29.h }, z1.h\n"
+ "ld1sb { z6.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z18.h, z16.h, z22.h\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1sb { z5.s }, p1/Z, [x24]\n"
- "trn1 z31.h, z3.h, z5.h\n"
- ".inst 0xc1607789 // sdot za.s[x11, 1], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16e778a // sdot za.s[x11, 2], { z28.h-z29.h }, z14.h\n"
- "add z31.h, z31.h, z17.h\n"
- ".inst 0xa1412ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc166778b // sdot za.s[x11, 3], { z28.h-z29.h }, z6.h\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16a77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z10.h\n"
- ".inst 0xc16277a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z2.h\n"
- ".inst 0xa0412ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa1422ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16f778c // sdot za.s[x11, 4], { z28.h-z29.h }, z15.h\n"
- ".inst 0xc16e778d // sdot za.s[x11, 5], { z28.h-z29.h }, z14.h\n"
- ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16977aa // sdot za.s[x11, 2], { z29.h-z30.h }, z9.h\n"
- ".inst 0xc16877ab // sdot za.s[x11, 3], { z29.h-z30.h }, z8.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a77c8 // sdot za.s[x11, 0], { z30.h-z31.h }, z10.h\n"
- ".inst 0xc16277c9 // sdot za.s[x11, 1], { z30.h-z31.h }, z2.h\n"
- ".inst 0xa1422ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16e778e // sdot za.s[x11, 6], { z28.h-z29.h }, z14.h\n"
- ".inst 0xc166778f // sdot za.s[x11, 7], { z28.h-z29.h }, z6.h\n"
- ".inst 0xc16d77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z13.h\n"
- ".inst 0xc16577ad // sdot za.s[x11, 5], { z29.h-z30.h }, z5.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a77ca // sdot za.s[x11, 2], { z30.h-z31.h }, z10.h\n"
- ".inst 0xc16277cb // sdot za.s[x11, 3], { z30.h-z31.h }, z2.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z14.h\n"
- ".inst 0xc16677af // sdot za.s[x11, 7], { z29.h-z30.h }, z6.h\n"
- ".inst 0xc16977cc // sdot za.s[x11, 4], { z30.h-z31.h }, z9.h\n"
- ".inst 0xc16877cd // sdot za.s[x11, 5], { z30.h-z31.h }, z8.h\n"
- ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16e77ce // sdot za.s[x11, 6], { z30.h-z31.h }, z14.h\n"
- ".inst 0xc16677cf // sdot za.s[x11, 7], { z30.h-z31.h }, z6.h\n"
+ "ld1sb { z4.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z19.h, z19.h, z25.h\n"
+ "ld1sb { z27.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ ".inst 0xa1402ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
+ "add z18.h, z18.h, z15.h\n"
+ "trn1 z20.h, z6.h, z4.h\n"
+ "ld1sb { z22.s }, p1/Z, [x24]\n"
+ ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "add z19.h, z19.h, z15.h\n"
+ ".inst 0xa1422ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "trn1 z21.h, z27.h, z22.h\n"
+ "add z20.h, z20.h, z15.h\n"
+ ".inst 0xc1697648 // sdot za.s[x11, 0], { z18.h-z19.h }, z9.h\n"
+ ".inst 0xc1617649 // sdot za.s[x11, 1], { z18.h-z19.h }, z1.h\n"
+ ".inst 0xa1402ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22]\n"
+ "add z21.h, z21.h, z15.h\n"
+ ".inst 0xc16c764a // sdot za.s[x11, 2], { z18.h-z19.h }, z12.h\n"
+ ".inst 0xc164764b // sdot za.s[x11, 3], { z18.h-z19.h }, z4.h\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16d7668 // sdot za.s[x11, 0], { z19.h-z20.h }, z13.h\n"
+ ".inst 0xc1657669 // sdot za.s[x11, 1], { z19.h-z20.h }, z5.h\n"
+ ".inst 0xa1412ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163764c // sdot za.s[x11, 4], { z18.h-z19.h }, z3.h\n"
+ ".inst 0xc162764d // sdot za.s[x11, 5], { z18.h-z19.h }, z2.h\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16c766a // sdot za.s[x11, 2], { z19.h-z20.h }, z12.h\n"
+ ".inst 0xc164766b // sdot za.s[x11, 3], { z19.h-z20.h }, z4.h\n"
+ ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16e7688 // sdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
+ ".inst 0xc1667689 // sdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
+ ".inst 0xa1422ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc169764e // sdot za.s[x11, 6], { z18.h-z19.h }, z9.h\n"
+ ".inst 0xc161764f // sdot za.s[x11, 7], { z18.h-z19.h }, z1.h\n"
+ ".inst 0xc163766c // sdot za.s[x11, 4], { z19.h-z20.h }, z3.h\n"
+ ".inst 0xc162766d // sdot za.s[x11, 5], { z19.h-z20.h }, z2.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16c768a // sdot za.s[x11, 2], { z20.h-z21.h }, z12.h\n"
+ ".inst 0xc164768b // sdot za.s[x11, 3], { z20.h-z21.h }, z4.h\n"
+ ".inst 0xa1422aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc169766e // sdot za.s[x11, 6], { z19.h-z20.h }, z9.h\n"
+ ".inst 0xc161766f // sdot za.s[x11, 7], { z19.h-z20.h }, z1.h\n"
+ ".inst 0xc16c768c // sdot za.s[x11, 4], { z20.h-z21.h }, z12.h\n"
+ ".inst 0xc164768d // sdot za.s[x11, 5], { z20.h-z21.h }, z4.h\n"
+ ".inst 0xa0422a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d768e // sdot za.s[x11, 6], { z20.h-z21.h }, z13.h\n"
+ ".inst 0xc16c768f // sdot za.s[x11, 7], { z20.h-z21.h }, z12.h\n"
"12:" // Unpadded: 0 priming loads
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 22f\n"
"add x20, x16, %x[ld_in_row]\n"
- "ld1sb { z26.s }, p1/Z, [x16]\n"
+ "ld1sb { z6.s }, p1/Z, [x16]\n"
"sub x25, x25, #0x1\n"
- "ld1sb { z28.s }, p1/Z, [x20]\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z26.h, z28.h\n"
"sub x15, x15, #0x1\n"
- "ld1sb { z31.s }, p1/Z, [x20]\n"
+ "ld1sb { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"cmp x25, x15\n"
- "add z25.h, z25.h, z17.h\n"
- "ld1sb { z15.s }, p1/Z, [x20]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z31.h, z15.h\n"
"csel x25, x25, x15, LT\n"
- "ld1sb { z22.s }, p1/Z, [x20]\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z26.h, z26.h, z17.h\n"
+ "trn1 z24.h, z6.h, z13.h\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1sb { z8.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z27.h, z22.h, z8.h\n"
- "add z27.h, z27.h, z17.h\n"
- "ld1sb { z21.s }, p1/Z, [x20]\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"sub x15, x15, x25\n"
- "ld1sb { z20.s }, p1/Z, [x20]\n"
- "trn1 z28.h, z21.h, z20.h\n"
- "add z28.h, z28.h, z17.h\n"
+ "ld1sb { z22.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z25.h, z21.h, z19.h\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "trn1 z26.h, z20.h, z13.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ "trn1 z27.h, z22.h, z16.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ "add z26.h, z26.h, z15.h\n"
+ "add z27.h, z27.h, z15.h\n"
"cbz x25, 21f\n"
"13:" // Unpadded: Main loop
"addvl x24, SP, #6\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
"addvl x23, SP, #12\n"
- "ld1sb { z21.s }, p1/Z, [x16]\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402b0e // ld1h { z14.h-z15.h }, pn10.b/Z, [x24]\n"
+ "ld1sb { z23.s }, p1/Z, [x16]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
"addvl x22, SP, #18\n"
"addvl x21, SP, #24\n"
- ".inst 0xc16f772a // sdot za.s[x11, 2], { z25.h-z26.h }, z15.h\n"
"add x20, x16, %x[ld_in_row]\n"
- "ld1sb { z0.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16e772b // sdot za.s[x11, 3], { z25.h-z26.h }, z14.h\n"
- ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- "ld1sb { z20.s }, p1/Z, [x20]\n"
+ "ld1sb { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412b05 // ld1h { z5.h, z13.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
- ".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
- "ld1sb { z31.s }, p1/Z, [x20]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc169770a // sdot za.s[x11, 2], { z24.h-z25.h }, z9.h\n"
+ "ld1sb { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
- "ld1sb { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xc161770b // sdot za.s[x11, 3], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ "ld1sb { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc165774b // sdot za.s[x11, 3], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412b04 // ld1h { z4.h, z12.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
"ld1sb { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc16f772e // sdot za.s[x11, 6], { z25.h-z26.h }, z15.h\n"
- "ld1sb { z30.s }, p1/Z, [x20]\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1402ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc16c772a // sdot za.s[x11, 2], { z25.h-z26.h }, z12.h\n"
+ "ld1sb { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc164772b // sdot za.s[x11, 3], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16e772f // sdot za.s[x11, 7], { z25.h-z26.h }, z14.h\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16d774c // sdot za.s[x11, 4], { z26.h-z27.h }, z13.h\n"
- "ld1sb { z6.s }, p1/Z, [x20]\n"
- ".inst 0xc165774d // sdot za.s[x11, 5], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16a776a // sdot za.s[x11, 2], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776b // sdot za.s[x11, 3], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422b02 // ld1h { z2.h-z3.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc16d770e // sdot za.s[x11, 6], { z24.h-z25.h }, z13.h\n"
+ "ld1sb { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xc165770f // sdot za.s[x11, 7], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a776c // sdot za.s[x11, 4], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776d // sdot za.s[x11, 5], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc169776e // sdot za.s[x11, 6], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776f // sdot za.s[x11, 7], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z15.h\n"
- ".inst 0xc16e1729 // sdot za.s[x8, 1], { z25.h-z26.h }, z14.h\n"
- "trn1 z25.h, z21.h, z0.h\n"
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16d1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z13.h\n"
- "add z25.h, z25.h, z17.h\n"
- ".inst 0xc1651749 // sdot za.s[x8, 1], { z26.h-z27.h }, z5.h\n"
- "trn1 z26.h, z20.h, z31.h\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- ".inst 0xc16b1768 // sdot za.s[x8, 0], { z27.h-z28.h }, z11.h\n"
- "add z26.h, z26.h, z17.h\n"
- ".inst 0xc16a1769 // sdot za.s[x8, 1], { z27.h-z28.h }, z10.h\n"
- "trn1 z27.h, z29.h, z22.h\n"
- "trn1 z28.h, z30.h, z6.h\n"
+ ".inst 0xc161774c // sdot za.s[x11, 4], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc160774d // sdot za.s[x11, 5], { z26.h-z27.h }, z0.h\n"
+ ".inst 0xa0422ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc167774e // sdot za.s[x11, 6], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xc166774f // sdot za.s[x11, 7], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa1422aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16c1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc1641709 // sdot za.s[x8, 1], { z24.h-z25.h }, z4.h\n"
+ "trn1 z24.h, z23.h, z19.h\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16d1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1651729 // sdot za.s[x8, 1], { z25.h-z26.h }, z5.h\n"
+ "trn1 z25.h, z21.h, z20.h\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16e1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z14.h\n"
+ ".inst 0xc1661749 // sdot za.s[x8, 1], { z26.h-z27.h }, z6.h\n"
+ "trn1 z26.h, z22.h, z18.h\n"
+ "trn1 z27.h, z17.h, z16.h\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "add z27.h, z27.h, z17.h\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ "add z26.h, z26.h, z15.h\n"
+ "add z27.h, z27.h, z15.h\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "add z28.h, z28.h, z17.h\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 13b\n"
"b 21f\n"
@@ -667,513 +672,513 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z9.s }, p0/Z, [x16]\n"
- "add z9.h, p0/M, z9.h, z17.h\n"
"add x21, x16, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0412a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1sb { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x21]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1sb { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
+ "ld1sb { z26.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z9.h, z22.h\n"
- "trn1 z0.h, z21.h, z20.h\n"
+ "add z26.h, p0/M, z26.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
+ "ld1sb { z18.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z26.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
+ ".inst 0xc16e76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z14.h\n"
+ "ld1sb { z25.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc16676c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z6.h\n"
+ "add z25.h, p0/M, z25.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x21]\n"
- "addvl x20, SP, #24\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1sb { z1.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z25.h\n"
+ "add z1.h, p0/M, z1.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "trn1 z1.h, z22.h, z20.h\n"
- "ld1sb { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16a77e8 // sdot za.s[x11, 0], { z31.h-z0.h }, z10.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc16277e9 // sdot za.s[x11, 1], { z31.h-z0.h }, z2.h\n"
- ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "trn1 z2.h, z21.h, z20.h\n"
- ".inst 0xc16d7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z13.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1657409 // sdot za.s[x11, 1], { z0.h-z1.h }, z5.h\n"
- ".inst 0xc1697428 // sdot za.s[x11, 0], { z1.h-z2.h }, z9.h\n"
- ".inst 0xc1687429 // sdot za.s[x11, 1], { z1.h-z2.h }, z8.h\n"
+ ".inst 0xc16d76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z13.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16c76e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ "trn1 z25.h, z1.h, z16.h\n"
+ ".inst 0xc1637708 // sdot za.s[x11, 0], { z24.h-z25.h }, z3.h\n"
+ ".inst 0xc1627709 // sdot za.s[x11, 1], { z24.h-z25.h }, z2.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z5.s }, p0/Z, [x16]\n"
- "add z5.h, p0/M, z5.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "ld1sb { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z0.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z28.h, z5.h, z22.h\n"
- "trn1 z29.h, z21.h, z20.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z1.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z14.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1667409 // sdot za.s[x11, 1], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e740a // sdot za.s[x11, 2], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc166740b // sdot za.s[x11, 3], { z0.h-z1.h }, z6.h\n"
+ "ld1sb { z0.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z2.h, z18.h, z17.h\n"
+ "add z0.h, p0/M, z0.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "trn1 z30.h, z22.h, z20.h\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #24\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc1617788 // sdot za.s[x11, 0], { z28.h-z29.h }, z1.h\n"
- ".inst 0xc1607789 // sdot za.s[x11, 1], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- "trn1 z31.h, z21.h, z20.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc169778a // sdot za.s[x11, 2], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc161778b // sdot za.s[x11, 3], { z28.h-z29.h }, z1.h\n"
- ".inst 0xa1422aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z15.h\n"
- ".inst 0xc16e77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z14.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16977aa // sdot za.s[x11, 2], { z29.h-z30.h }, z9.h\n"
- ".inst 0xc16177ab // sdot za.s[x11, 3], { z29.h-z30.h }, z1.h\n"
- ".inst 0xc16b77c8 // sdot za.s[x11, 0], { z30.h-z31.h }, z11.h\n"
- ".inst 0xc16377c9 // sdot za.s[x11, 1], { z30.h-z31.h }, z3.h\n"
- ".inst 0xa0422a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f77ca // sdot za.s[x11, 2], { z30.h-z31.h }, z15.h\n"
- ".inst 0xc16e77cb // sdot za.s[x11, 3], { z30.h-z31.h }, z14.h\n"
+ ".inst 0xc16c7428 // sdot za.s[x11, 0], { z1.h-z2.h }, z12.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ ".inst 0xc1647429 // sdot za.s[x11, 1], { z1.h-z2.h }, z4.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
+ ".inst 0xc16e742a // sdot za.s[x11, 2], { z1.h-z2.h }, z14.h\n"
+ ".inst 0xc166742b // sdot za.s[x11, 3], { z1.h-z2.h }, z6.h\n"
+ "trn1 z3.h, z0.h, z17.h\n"
+ ".inst 0xc16d7448 // sdot za.s[x11, 0], { z2.h-z3.h }, z13.h\n"
+ ".inst 0xc1657449 // sdot za.s[x11, 1], { z2.h-z3.h }, z5.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16e744a // sdot za.s[x11, 2], { z2.h-z3.h }, z14.h\n"
+ ".inst 0xc166744b // sdot za.s[x11, 3], { z2.h-z3.h }, z6.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x23, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z29.s }, p0/Z, [x16]\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x22, SP, #12\n"
+ ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ "ld1sb { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z8.h, z29.h, z22.h\n"
- "trn1 z9.h, z21.h, z20.h\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16376c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z3.h\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xc16276c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z2.h\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cb // sdot za.s[x11, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1sb { z17.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ ".inst 0xc16976cc // sdot za.s[x11, 4], { z22.h-z23.h }, z9.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- "trn1 z10.h, z22.h, z20.h\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16f7508 // sdot za.s[x11, 0], { z8.h-z9.h }, z15.h\n"
- ".inst 0xc16e7509 // sdot za.s[x11, 1], { z8.h-z9.h }, z14.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
- "trn1 z11.h, z21.h, z20.h\n"
- ".inst 0xa1412ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16e750a // sdot za.s[x11, 2], { z8.h-z9.h }, z14.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc166750b // sdot za.s[x11, 3], { z8.h-z9.h }, z6.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16d7528 // sdot za.s[x11, 0], { z9.h-z10.h }, z13.h\n"
- ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc1657529 // sdot za.s[x11, 1], { z9.h-z10.h }, z5.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16f750c // sdot za.s[x11, 4], { z8.h-z9.h }, z15.h\n"
- ".inst 0xc16e750d // sdot za.s[x11, 5], { z8.h-z9.h }, z14.h\n"
- ".inst 0xc16d752a // sdot za.s[x11, 2], { z9.h-z10.h }, z13.h\n"
- ".inst 0xc165752b // sdot za.s[x11, 3], { z9.h-z10.h }, z5.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1617548 // sdot za.s[x11, 0], { z10.h-z11.h }, z1.h\n"
- ".inst 0xc1607549 // sdot za.s[x11, 1], { z10.h-z11.h }, z0.h\n"
- ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e752c // sdot za.s[x11, 4], { z9.h-z10.h }, z14.h\n"
- ".inst 0xc166752d // sdot za.s[x11, 5], { z9.h-z10.h }, z6.h\n"
- ".inst 0xc161754a // sdot za.s[x11, 2], { z10.h-z11.h }, z1.h\n"
- ".inst 0xc160754b // sdot za.s[x11, 3], { z10.h-z11.h }, z0.h\n"
- ".inst 0xa0422a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f754c // sdot za.s[x11, 4], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc16e754d // sdot za.s[x11, 5], { z10.h-z11.h }, z14.h\n"
+ ".inst 0xc16176cd // sdot za.s[x11, 5], { z22.h-z23.h }, z1.h\n"
+ ".inst 0xc16c76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z12.h\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ ".inst 0xc16476e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z4.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0xc16e76ea // sdot za.s[x11, 2], { z23.h-z24.h }, z14.h\n"
+ ".inst 0xc16676eb // sdot za.s[x11, 3], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ ".inst 0xc16976ec // sdot za.s[x11, 4], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176ed // sdot za.s[x11, 5], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xc16d7708 // sdot za.s[x11, 0], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc1657709 // sdot za.s[x11, 1], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa0422aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc165770a // sdot za.s[x11, 2], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc164770b // sdot za.s[x11, 3], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x24, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z1.s }, p0/Z, [x16]\n"
- "add z1.h, p0/M, z1.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x23, SP, #6\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ "addvl x22, SP, #12\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "addvl x20, SP, #24\n"
+ "ld1sb { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z21.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z1.h, z22.h\n"
- "trn1 z27.h, z21.h, z20.h\n"
+ "ld1sb { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z18.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z22.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e76a8 // sdot za.s[x11, 0], { z21.h-z22.h }, z14.h\n"
+ "ld1sb { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ ".inst 0xc16676a9 // sdot za.s[x11, 1], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "addvl x23, SP, #6\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16776aa // sdot za.s[x11, 2], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16676ab // sdot za.s[x11, 3], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21]\n"
+ "ld1sb { z17.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z23.h, z18.h, z16.h\n"
+ ".inst 0xc16776ac // sdot za.s[x11, 4], { z21.h-z22.h }, z7.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402aee // ld1h { z14.h-z15.h }, pn10.b/Z, [x23]\n"
- "trn1 z28.h, z22.h, z20.h\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16f7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z15.h\n"
- ".inst 0xc16e7749 // sdot za.s[x11, 1], { z26.h-z27.h }, z14.h\n"
- ".inst 0xa0402ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- "trn1 z29.h, z21.h, z20.h\n"
- ".inst 0xa0412aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc161774a // sdot za.s[x11, 2], { z26.h-z27.h }, z1.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc160774b // sdot za.s[x11, 3], { z26.h-z27.h }, z0.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16b7768 // sdot za.s[x11, 0], { z27.h-z28.h }, z11.h\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16a7769 // sdot za.s[x11, 1], { z27.h-z28.h }, z10.h\n"
- ".inst 0xa0412aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16e774c // sdot za.s[x11, 4], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc166774d // sdot za.s[x11, 5], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16b776a // sdot za.s[x11, 2], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a776b // sdot za.s[x11, 3], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xc16676ad // sdot za.s[x11, 5], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16576c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z5.h\n"
+ "ld1sb { z16.s }, p0/Z, [x24]\n"
+ ".inst 0xc16476c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc16d76ae // sdot za.s[x11, 6], { z21.h-z22.h }, z13.h\n"
+ ".inst 0xc16c76af // sdot za.s[x11, 7], { z21.h-z22.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0xc16e76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cb // sdot za.s[x11, 3], { z22.h-z23.h }, z6.h\n"
".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc1697788 // sdot za.s[x11, 0], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc1687789 // sdot za.s[x11, 1], { z28.h-z29.h }, z8.h\n"
- ".inst 0xa1422ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
- ".inst 0xc16e776c // sdot za.s[x11, 4], { z27.h-z28.h }, z14.h\n"
- ".inst 0xc166776d // sdot za.s[x11, 5], { z27.h-z28.h }, z6.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a778a // sdot za.s[x11, 2], { z28.h-z29.h }, z10.h\n"
- ".inst 0xc162778b // sdot za.s[x11, 3], { z28.h-z29.h }, z2.h\n"
- ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e776e // sdot za.s[x11, 6], { z27.h-z28.h }, z14.h\n"
- ".inst 0xc166776f // sdot za.s[x11, 7], { z27.h-z28.h }, z6.h\n"
- ".inst 0xc161778c // sdot za.s[x11, 4], { z28.h-z29.h }, z1.h\n"
- ".inst 0xc160778d // sdot za.s[x11, 5], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1422a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16a778e // sdot za.s[x11, 6], { z28.h-z29.h }, z10.h\n"
- ".inst 0xc162778f // sdot za.s[x11, 7], { z28.h-z29.h }, z2.h\n"
+ "trn1 z24.h, z17.h, z16.h\n"
+ ".inst 0xc16e76cc // sdot za.s[x11, 4], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cd // sdot za.s[x11, 5], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16976e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc16776ce // sdot za.s[x11, 6], { z22.h-z23.h }, z7.h\n"
+ ".inst 0xc16676cf // sdot za.s[x11, 7], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xc16176ea // sdot za.s[x11, 2], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xc16076eb // sdot za.s[x11, 3], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16976ec // sdot za.s[x11, 4], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176ed // sdot za.s[x11, 5], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xa1422a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16c76ee // sdot za.s[x11, 6], { z23.h-z24.h }, z12.h\n"
+ ".inst 0xc16476ef // sdot za.s[x11, 7], { z23.h-z24.h }, z4.h\n"
"19:" // Padded: 0 priming loads
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 22f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z6.s }, p0/Z, [x16]\n"
- "add z6.h, p0/M, z6.h, z17.h\n"
"add x20, x16, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x25, x25, #0x1\n"
+ "sub x15, x15, #0x1\n"
+ "cmp x25, x15\n"
+ "ld1sb { z18.s }, p0/Z, [x16]\n"
+ "csel x25, x25, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "sub x15, x15, x25\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z30.s }, p0/Z, [x20]\n"
- "add z30.h, p0/M, z30.h, z17.h\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z17.h\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z17.h\n"
"mov x12, #0x4\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z6.h, z30.h\n"
- "trn1 z26.h, z27.h, z26.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z17.h\n"
+ "ld1sb { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z9.s }, p0/Z, [x20]\n"
- "add z9.h, p0/M, z9.h, z17.h\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z26.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- "sub x25, x25, #0x1\n"
- "sub x15, x15, #0x1\n"
- "cmp x25, x15\n"
- "trn1 z27.h, z8.h, z9.h\n"
- "trn1 z28.h, z21.h, z29.h\n"
- "csel x25, x25, x15, LT\n"
- "add x16, x16, %x[ld_in_col]\n"
- "sub x15, x15, x25\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ "trn1 z27.h, z17.h, z16.h\n"
"cbz x25, 21f\n"
"20:" // Padded: Main loop
"mov x12, #0x0\n"
+ "addvl x24, SP, #6\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z8.s }, p0/Z, [x16]\n"
- "add z8.h, p0/M, z8.h, z17.h\n"
- "add x24, x16, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z21.s }, p0/Z, [x24]\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
- "addvl x23, SP, #6\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
- "addvl x22, SP, #12\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
- ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402b05 // ld1h { z5.h, z13.h }, pn10.b/Z, [x24]\n"
+ "addvl x23, SP, #12\n"
+ "add x22, x16, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- "ld1sb { z29.s }, p0/Z, [x24]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- "mov x12, #0x4\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "subs x25, x25, #0x1\n"
+ "ld1sb { z16.s }, p0/Z, [x16]\n"
+ ".inst 0xc16d770a // sdot za.s[x11, 2], { z24.h-z25.h }, z13.h\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc165770b // sdot za.s[x11, 3], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ "ld1sb { z19.s }, p0/Z, [x22]\n"
+ ".inst 0xc169772a // sdot za.s[x11, 2], { z25.h-z26.h }, z9.h\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc161772b // sdot za.s[x11, 3], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ "add z19.h, p0/M, z19.h, z15.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422b02 // ld1h { z2.h-z3.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc165770e // sdot za.s[x11, 6], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc164770f // sdot za.s[x11, 7], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
- "ld1sb { z30.s }, p0/Z, [x24]\n"
- "add z30.h, p0/M, z30.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "ld1sb { z23.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "add z23.h, p0/M, z23.h, z15.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0xc16d772e // sdot za.s[x11, 6], { z25.h-z26.h }, z13.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc165772f // sdot za.s[x11, 7], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc167774c // sdot za.s[x11, 4], { z26.h-z27.h }, z7.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc166774d // sdot za.s[x11, 5], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa0422aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "subs x25, x25, #0x1\n"
- ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
- "ld1sb { z15.s }, p0/Z, [x24]\n"
- "add z15.h, p0/M, z15.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc165774b // sdot za.s[x11, 3], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa0412aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163774e // sdot za.s[x11, 6], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774f // sdot za.s[x11, 7], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "ld1sb { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16c1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z12.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
- "ld1sb { z20.s }, p0/Z, [x24]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc1641709 // sdot za.s[x8, 1], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ "trn1 z24.h, z16.h, z19.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16d1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1651729 // sdot za.s[x8, 1], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "trn1 z25.h, z23.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
- "ld1sb { z31.s }, p0/Z, [x24]\n"
- "add z31.h, p0/M, z31.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc16b774c // sdot za.s[x11, 4], { z26.h-z27.h }, z11.h\n"
- "ld1sb { z22.s }, p0/Z, [x24]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- ".inst 0xc16a774d // sdot za.s[x11, 5], { z26.h-z27.h }, z10.h\n"
- ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc169776a // sdot za.s[x11, 2], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776b // sdot za.s[x11, 3], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16e774e // sdot za.s[x11, 6], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc166774f // sdot za.s[x11, 7], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc161776c // sdot za.s[x11, 4], { z27.h-z28.h }, z1.h\n"
- ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc169776e // sdot za.s[x11, 6], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776f // sdot za.s[x11, 7], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1631728 // sdot za.s[x8, 0], { z25.h-z26.h }, z3.h\n"
- ".inst 0xc1621729 // sdot za.s[x8, 1], { z25.h-z26.h }, z2.h\n"
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- "trn1 z25.h, z8.h, z21.h\n"
- ".inst 0xc16e1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc1661749 // sdot za.s[x8, 1], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "trn1 z26.h, z29.h, z30.h\n"
- ".inst 0xc16b1768 // sdot za.s[x8, 0], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a1769 // sdot za.s[x8, 1], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xc1631748 // sdot za.s[x8, 0], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc1621749 // sdot za.s[x8, 1], { z26.h-z27.h }, z2.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
"add x8, x8, #0x2\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "trn1 z27.h, z15.h, z20.h\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
- "trn1 z28.h, z31.h, z22.h\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ "trn1 z26.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ "ld1sb { z18.s }, p0/Z, [x22]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ "trn1 z27.h, z17.h, z18.h\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 20b\n"
"21:" // Main loop tail
"addvl x23, SP, #6\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
"addvl x22, SP, #12\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
- ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
+ ".inst 0xc16e770a // sdot za.s[x11, 2], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770b // sdot za.s[x11, 3], { z24.h-z25.h }, z6.h\n"
".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16d772a // sdot za.s[x11, 2], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc165772b // sdot za.s[x11, 3], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc16c770e // sdot za.s[x11, 6], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc164770f // sdot za.s[x11, 7], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc169774a // sdot za.s[x11, 2], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc161774b // sdot za.s[x11, 3], { z26.h-z27.h }, z1.h\n"
- ".inst 0xa1412ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa1422ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc169774c // sdot za.s[x11, 4], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc161774d // sdot za.s[x11, 5], { z26.h-z27.h }, z1.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16b776a // sdot za.s[x11, 2], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a776b // sdot za.s[x11, 3], { z27.h-z28.h }, z10.h\n"
- ".inst 0xa0422ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa0412a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc163776c // sdot za.s[x11, 4], { z27.h-z28.h }, z3.h\n"
- ".inst 0xc162776d // sdot za.s[x11, 5], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16a776e // sdot za.s[x11, 6], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776f // sdot za.s[x11, 7], { z27.h-z28.h }, z2.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16c774c // sdot za.s[x11, 4], { z26.h-z27.h }, z12.h\n"
+ ".inst 0xc164774d // sdot za.s[x11, 5], { z26.h-z27.h }, z4.h\n"
+ ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc161774e // sdot za.s[x11, 6], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc160774f // sdot za.s[x11, 7], { z26.h-z27.h }, z0.h\n"
".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z15.h\n"
- ".inst 0xc16e1729 // sdot za.s[x8, 1], { z25.h-z26.h }, z14.h\n"
- ".inst 0xc1691748 // sdot za.s[x8, 0], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc1681749 // sdot za.s[x8, 1], { z26.h-z27.h }, z8.h\n"
- ".inst 0xc1611768 // sdot za.s[x8, 0], { z27.h-z28.h }, z1.h\n"
- ".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xc16d1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc1651709 // sdot za.s[x8, 1], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc16e1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc1661729 // sdot za.s[x8, 1], { z25.h-z26.h }, z6.h\n"
+ ".inst 0xc1611748 // sdot za.s[x8, 0], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc1601749 // sdot za.s[x8, 1], { z26.h-z27.h }, z0.h\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"22:" // Main loop skip tail
"cbz x15, 24f\n"
"23:" // Right padding loop
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066818 // mova { z24.d-z25.d }, za.d[x11, #0]\n"
"add x8, x8, #0x2\n"
"subs x15, x15, #0x1\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc006683a // mova { z26.d-z27.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ ".inst 0xc1abaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z11.s\n"
+ ".inst 0xc1aaab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
+ ".inst 0xc1becff8 // sclamp { z24.s-z27.s }, z31.s, z30.s\n"
+ "st1b { z24.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z26.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z25.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z27.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 23b\n"
"24:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
"incw x20, ALL, MUL #16\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x17\n"
- "whilelt p1.s, x17, x7\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1192,6 +1197,8 @@ void sme2_s8q_planar_5x5_s1_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
index 3da0d14d74..94aa79c747 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_s8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,194 +69,199 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0xb\n"
"ptrue p2.b\n"
- "mov x20, #0xb\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z7.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x3\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x20, x22, #0x8\n"
+ "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x4\n"
+ "sub x21, x21, x4\n"
+ "mov SP, x20\n"
+ "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
"addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z7.h, p2/M, z7.h\n"
+ "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "whilelt p1.s, XZR, x6\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z18.h, p2/M, z18.h\n"
+ "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "whilelt p8.s, XZR, x5\n"
+ "ld1rw { z19.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z21.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z12.s, #0x0\n"
+ "mov z20.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z12.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x7, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z0.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z12.h, #0x0\n"
+ "addvl x22, SP, #15\n"
+ "addvl x22, x22, #-3\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z21.d, z20.d\n"
+ "mov z22.d, z20.d\n"
+ "mov z23.d, z20.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1sb { z24.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "ld1rh { z28.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "sub z13.h, z13.h, z28.h\n"
- "incw x22\n"
- "mov z26.h, #0x0\n"
- "ld1sb { z22.s }, p2/Z, [x20]\n"
+ "ld1sb { z30.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z22.h, z22.h, z28.h\n"
- "trn1 z17.h, z13.h, z22.h\n"
- "ld1sb { z20.s }, p2/Z, [x20]\n"
+ "ld1sb { z8.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z20.h, z20.h, z28.h\n"
- "addvl x21, SP, #15\n"
- "ld1sb { z1.s }, p2/Z, [x20]\n"
+ "ld1sb { z17.s }, p2/Z, [x20]\n"
+ "sub z24.h, z24.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "sub z1.h, z1.h, z28.h\n"
- "trn1 z29.h, z20.h, z1.h\n"
+ "sub z30.h, z30.h, z0.h\n"
+ "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z8.h, z8.h, z0.h\n"
+ "sub z17.h, z17.h, z0.h\n"
+ "sub z26.h, z26.h, z0.h\n"
+ "trn1 z16.h, z24.h, z30.h\n"
"ld1sb { z27.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "sub z27.h, z27.h, z28.h\n"
- "incw x22\n"
- "ld1sb { z14.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "sub z14.h, z14.h, z28.h\n"
- "addvl x21, x21, #-3\n"
- "ld1sb { z18.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z18.h, z18.h, z28.h\n"
- "trn1 z22.h, z27.h, z26.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
+ "ld1sb { z11.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z23.h, z23.h, z28.h\n"
- "st1h { z17.h }, p2, [x21]\n"
- "ld1sb { z30.s }, p2/Z, [x20]\n"
+ "trn1 z15.h, z8.h, z17.h\n"
+ "ld1sb { z31.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z30.h, z30.h, z28.h\n"
- "trn1 z8.h, z14.h, z18.h\n"
- "ld1sb { z15.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
- "sub z15.h, z15.h, z28.h\n"
- "ld1sb { z20.s }, p2/Z, [x20]\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
+ "sub z27.h, z27.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z23.h, z23.h, z30.h\n"
- "sub z20.h, z20.h, z28.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "sub z24.h, z24.h, z28.h\n"
- "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+ "trn1 z24.h, z26.h, z12.h\n"
+ "sub z11.h, z11.h, z0.h\n"
+ "ld1sb { z10.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z16.h }, p2, [x22]\n"
+ "sub z31.h, z31.h, z0.h\n"
+ "incw x23\n"
+ "sub z9.h, z9.h, z0.h\n"
+ "st1h { z15.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z10.h, z10.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z11.h, z27.h, z11.h\n"
"ld1sb { z16.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z0.h, z15.h, z26.h\n"
- "incw x22\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "ld1sb { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z16.h, z16.h, z28.h\n"
- "sub z13.h, z13.h, z28.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z8.h }, p2, [x21]\n"
- "trn1 z27.h, z20.h, z24.h\n"
- "ld1sb { z22.s }, p2/Z, [x20]\n"
+ "trn1 z13.h, z31.h, z9.h\n"
+ "ld1sb { z28.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z11.h, z11.h, z28.h\n"
- "ld1sb { z3.s }, p2/Z, [x20]\n"
+ "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "sub z16.h, z16.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z23.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z20.h, z16.h, z13.h\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "trn1 z8.h, z10.h, z12.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1sb { z14.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z11.h }, p2, [x22]\n"
+ "sub z28.h, z28.h, z0.h\n"
+ "incw x23\n"
+ "sub z26.h, z26.h, z0.h\n"
+ "st1h { z13.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "st1h { z8.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z13.h, z16.h, z2.h\n"
+ "ld1sb { z31.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z22.h, z22.h, z28.h\n"
- "sub z3.h, z3.h, z28.h\n"
- "ld1sb { z15.s }, p2/Z, [x20]\n"
+ "ld1sb { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z29.h, z11.h, z26.h\n"
+ "trn1 z30.h, z28.h, z26.h\n"
"ld1sb { z16.s }, p2/Z, [x20]\n"
- "incw x22\n"
- "sub z13.h, z13.h, z28.h\n"
- "sub z15.h, z15.h, z28.h\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z27.h }, p2, [x21]\n"
- "sub z16.h, z16.h, z28.h\n"
- "trn1 z19.h, z22.h, z3.h\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z20.h }, p2, [x21, #1, MUL VL]\n"
- "ld1sb { z0.s }, p2/Z, [x20]\n"
+ "ld1sb { z27.s }, p2/Z, [x20]\n"
+ "sub z31.h, z31.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z31.h, z13.h, z15.h\n"
- "st1h { z29.h }, p2, [x21, #2, MUL VL]\n"
- "ld1sb { z18.s }, p2/Z, [x20]\n"
+ "trn1 z17.h, z14.h, z12.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1sb { z4.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z13.h }, p2, [x22]\n"
+ "sub z16.h, z16.h, z0.h\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "st1h { z30.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z4.h, z4.h, z0.h\n"
+ "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z31.h, z31.h, z2.h\n"
+ "ld1sb { z29.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z16.h, z16.h, z26.h\n"
- "sub z17.h, z17.h, z28.h\n"
- "ld1sb { z22.s }, p2/Z, [x20]\n"
+ "ld1sb { z10.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z0.h, z0.h, z28.h\n"
- "sub z18.h, z18.h, z28.h\n"
- "ld1sb { z1.s }, p2/Z, [x20]\n"
- "sub z22.h, z22.h, z28.h\n"
- "sub z1.h, z1.h, z28.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "addvl x21, x21, #-3\n"
- "st1h { z19.h }, p2, [x21]\n"
- "mov z13.d, z12.d\n"
- "mov z14.d, z12.d\n"
- "st1h { z31.h }, p2, [x21, #1, MUL VL]\n"
- "mov z15.d, z12.d\n"
- "trn1 z8.h, z17.h, z0.h\n"
- "st1h { z16.h }, p2, [x21, #2, MUL VL]\n"
- "addvl x21, x21, #-3\n"
- "trn1 z31.h, z18.h, z22.h\n"
- "trn1 z29.h, z1.h, z26.h\n"
- "st1h { z8.h }, p2, [x21]\n"
- "st1h { z31.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z29.h }, p2, [x21, #2, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z6.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "trn1 z24.h, z16.h, z27.h\n"
+ "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "ld1sb { z8.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z4.h, z4.h, z12.h\n"
+ "sub z29.h, z29.h, z0.h\n"
+ "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "sub z10.h, z10.h, z0.h\n"
+ "st1h { z31.h }, p2, [x22]\n"
+ "sub z13.h, z13.h, z0.h\n"
+ "sub z8.h, z8.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z11.h, z11.h, z0.h\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z14.h, z29.h, z10.h\n"
+ "trn1 z10.h, z13.h, z8.h\n"
+ "trn1 z4.h, z11.h, z12.h\n"
+ "st1h { z14.h }, p2, [x22]\n"
+ "st1h { z10.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x21, x7, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z4.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ld1w { z5.s }, p1/Z, [x20, x7, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x7, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x5, x23, LSL #22\n"
+ "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
"mov x22, #0xb\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x5, x4\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x17, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x5, x16\n"
+ "orr x20, x6, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x17, x4, x20, x17\n"
- ".inst 0xc0040d80 // mova za.d[x8, #0], { z12.d-z15.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040d81 // mova za.d[x8, #1], { z12.d-z15.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040e80 // mova za.d[x8, #0], { z20.d-z23.d }\n"
"mov x22, #0x4\n"
- "ldp x15, x14, [x23], #0x10\n"
- ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
- "ldp x13, x11, [x20], #0x10\n"
- ".inst 0xc0040d83 // mova za.d[x8, #3], { z12.d-z15.d }\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x16, x5, x21, x16\n"
+ ".inst 0xc0040e81 // mova za.d[x8, #1], { z20.d-z23.d }\n"
"ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "ldp x10, x9, [x23], #0x10\n"
- "ldp x28, x27, [x20], #0x10\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ "ldp x14, x13, [x23], #0x10\n"
+ ".inst 0xc0040e83 // mova za.d[x8, #3], { z20.d-z23.d }\n"
+ "ldp x11, x10, [x20], #0x10\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ "ldp x9, x28, [x23], #0x10\n"
+ "ldp x27, x26, [x20], #0x10\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
"csel x20, x21, x22, LT\n"
@@ -264,379 +269,379 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
"and x22, x21, #0x1\n"
- ".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
"add x21, x21, #0x1\n"
"lsr x21, x21, #0x1\n"
- ".inst 0xc1aaab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
- "sub x16, x16, x21\n"
- ".inst 0xc1b5ccbc // sclamp { z28.s-z31.s }, z5.s, z21.s\n"
+ "sub x15, x15, x21\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z28.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z29.s }, p1, [x14]\n"
+ "st1b { z28.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z30.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z31.s }, p1, [x9]\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x4, x3\n"
+ "adds XZR, x5, x4\n"
"bne 14f\n"
"cbz x22, 12f\n"
"cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "sub x17, x17, x22\n"
"beq 11f\n"
"cmp x22, #0x2\n"
"beq 10f\n"
"cmp x22, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1sb { z27.s }, p1/Z, [x17]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1sb { z8.s }, p1/Z, [x16]\n"
"addvl x20, SP, #12\n"
- "ld1sb { z0.s }, p1/Z, [x21]\n"
+ "ld1sb { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z0.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1sb { z28.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z9.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z11.s }, p1/Z, [x21]\n"
+ "ld1sb { z31.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z28.h, z28.h, z11.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1sb { z29.s }, p1/Z, [x21]\n"
+ "ld1sb { z10.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z8.s }, p1/Z, [x21]\n"
+ "trn1 z8.h, z8.h, z26.h\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z8.h\n"
- "add z29.h, z29.h, z7.h\n"
+ "ld1sb { z11.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z31.h\n"
"ld1sb { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
+ "ld1sb { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z17.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1sb { z31.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z26.s }, p1/Z, [x21]\n"
+ "trn1 z10.h, z10.h, z16.h\n"
+ "add z8.h, z8.h, z18.h\n"
+ "ld1sb { z28.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z26.h\n"
- "add z31.h, z31.h, z7.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
- "ld1sb { z20.s }, p1/Z, [x21]\n"
- "mov z0.d, z20.d\n"
- "add z0.h, z0.h, z7.h\n"
- ".inst 0xc1781788 // sdot za.s[x8, 0], { z28.h-z31.h }, z8.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17817a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z8.h\n"
+ "trn1 z11.h, z11.h, z30.h\n"
+ "add z9.h, z9.h, z18.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1sb { z2.s }, p1/Z, [x21]\n"
+ "add z10.h, z10.h, z18.h\n"
+ "trn1 z12.h, z12.h, z28.h\n"
+ "ld1h { z4.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "mov z13.d, z2.d\n"
+ "add z12.h, z12.h, z18.h\n"
+ ".inst 0xc1701508 // sdot za.s[x8, 0], { z8.h-z11.h }, z0.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xc1711528 // sdot za.s[x8, 0], { z9.h-z12.h }, z1.h\n"
+ ".inst 0xc1741548 // sdot za.s[x8, 0], { z10.h-z13.h }, z4.h\n"
"9:" // Unpadded: 3 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1sb { z29.s }, p1/Z, [x17]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1sb { z12.s }, p1/Z, [x16]\n"
"addvl x20, SP, #9\n"
- "ld1sb { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z17.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1sb { z30.s }, p1/Z, [x21]\n"
+ "ld1sb { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z0.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z0.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1sb { z31.s }, p1/Z, [x21]\n"
+ "ld1sb { z2.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "ld1sb { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1sb { z0.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z11.h\n"
+ "ld1sb { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
+ "ld1sb { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z0.h, z0.h, z16.h\n"
- "add z0.h, z0.h, z7.h\n"
- "ld1sb { z1.s }, p1/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z2.h\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z16.h\n"
- "add z1.h, z1.h, z7.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17217a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z2.h\n"
- "ld1sb { z16.s }, p1/Z, [x21]\n"
- "mov z2.d, z16.d\n"
- "add z2.h, z2.h, z7.h\n"
- ".inst 0xc17317c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z3.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17817e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z8.h\n"
+ "trn1 z14.h, z14.h, z24.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "ld1sb { z24.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
+ "add z14.h, z14.h, z18.h\n"
+ "trn1 z16.h, z16.h, z24.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add z15.h, z15.h, z18.h\n"
+ "mov z17.d, z17.d\n"
+ "add z16.h, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "add z17.h, z17.h, z18.h\n"
+ ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17015c8 // sdot za.s[x8, 0], { z14.h-z17.h }, z0.h\n"
"10:" // Unpadded: 2 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1sb { z26.s }, p1/Z, [x17]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
"addvl x21, SP, #6\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z16.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1sb { z27.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #12\n"
- "ld1sb { z16.s }, p1/Z, [x22]\n"
+ "ld1sb { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1sb { z28.s }, p1/Z, [x22]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z26.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z29.s }, p1/Z, [x22]\n"
+ "ld1sb { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z28.h, z28.h, z29.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1sb { z29.s }, p1/Z, [x22]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "ld1sb { z24.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1sb { z19.s }, p1/Z, [x22]\n"
+ "ld1sb { z14.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z19.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1sb { z30.s }, p1/Z, [x22]\n"
+ "trn1 z12.h, z12.h, z26.h\n"
+ "ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1sb { z23.s }, p1/Z, [x22]\n"
- "trn1 z30.h, z30.h, z23.h\n"
+ "ld1sb { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z30.h, z30.h, z7.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721748 // sdot za.s[x8, 0], { z26.h-z29.h }, z2.h\n"
- "ld1sb { z22.s }, p1/Z, [x22]\n"
- "mov z31.d, z22.d\n"
- ".inst 0xc1731768 // sdot za.s[x8, 0], { z27.h-z30.h }, z3.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1h { z3.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17b1769 // sdot za.s[x8, 1], { z27.h-z30.h }, z11.h\n"
- ".inst 0xc1731788 // sdot za.s[x8, 0], { z28.h-z31.h }, z3.h\n"
+ "trn1 z13.h, z13.h, z24.h\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1sb { z24.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "ld1sb { z16.s }, p1/Z, [x22]\n"
+ "add z13.h, z13.h, z18.h\n"
+ "trn1 z15.h, z15.h, z24.h\n"
+ "ld1h { z1.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z14.h, z14.h, z18.h\n"
+ "mov z16.d, z16.d\n"
+ "add z15.h, z15.h, z18.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17115a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z1.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701789 // sdot za.s[x8, 1], { z28.h-z31.h }, z0.h\n"
+ ".inst 0xc1781589 // sdot za.s[x8, 1], { z12.h-z15.h }, z8.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"11:" // Unpadded: 1 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1sb { z29.s }, p1/Z, [x17]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "ld1sb { z9.s }, p1/Z, [x16]\n"
"addvl x21, SP, #3\n"
- "ld1sb { z22.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z22.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1sb { z30.s }, p1/Z, [x22]\n"
+ "ld1sb { z4.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #9\n"
- "ld1sb { z25.s }, p1/Z, [x22]\n"
+ "ld1sb { z10.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z25.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1sb { z31.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1sb { z0.s }, p1/Z, [x22]\n"
+ "ld1sb { z11.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z4.h\n"
+ "ld1sb { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "ld1sb { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z16.h\n"
"ld1sb { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z0.h, z0.h, z16.h\n"
- "add z0.h, z0.h, z7.h\n"
- "ld1sb { z1.s }, p1/Z, [x22]\n"
+ "ld1sb { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1sb { z2.s }, p1/Z, [x22]\n"
- "trn1 z1.h, z1.h, z2.h\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "add z9.h, z9.h, z18.h\n"
+ "ld1sb { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z1.h, z1.h, z7.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17217a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z2.h\n"
- "ld1sb { z24.s }, p1/Z, [x22]\n"
- "mov z2.d, z24.d\n"
- ".inst 0xc17317c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z3.h\n"
- ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17817a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z8.h\n"
- "add z2.h, z2.h, z7.h\n"
- "ld1h { z3.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17917c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z9.h\n"
- ".inst 0xc17317e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z3.h\n"
- "ld1h { z3.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17317e9 // sdot za.s[x8, 1], { z31.h-z2.h }, z3.h\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z10.h, z10.h, z18.h\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "ld1sb { z16.s }, p1/Z, [x22]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "trn1 z13.h, z13.h, z17.h\n"
+ "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z12.h, z12.h, z18.h\n"
+ "mov z14.d, z16.d\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xc1701528 // sdot za.s[x8, 0], { z9.h-z12.h }, z0.h\n"
+ "add z14.h, z14.h, z18.h\n"
+ ".inst 0xc1711548 // sdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1701529 // sdot za.s[x8, 1], { z9.h-z12.h }, z0.h\n"
+ ".inst 0xc1741568 // sdot za.s[x8, 0], { z11.h-z14.h }, z4.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1711549 // sdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
"12:" // Unpadded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
- "add x21, x17, %x[ld_in_row]\n"
- "ld1sb { z23.s }, p1/Z, [x17]\n"
- "sub x7, x7, #0x2\n"
- "ld1sb { z25.s }, p1/Z, [x21]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "sub x17, x17, #0x2\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z25.h\n"
- "sub x16, x16, #0x1\n"
- "ld1sb { z24.s }, p1/Z, [x21]\n"
+ "sub x15, x15, #0x1\n"
+ "ld1sb { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x7, #0x1\n"
- "add z23.h, z23.h, z7.h\n"
- "ld1sb { z30.s }, p1/Z, [x21]\n"
+ "lsr x20, x17, #0x1\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z30.h\n"
- "cmp x20, x16\n"
- "ld1sb { z25.s }, p1/Z, [x21]\n"
+ "cmp x20, x15\n"
+ "ld1sb { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "csel x26, x20, x16, LT\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1sb { z22.s }, p1/Z, [x21]\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "csel x25, x20, x15, LT\n"
+ "ld1sb { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z22.h\n"
- "add z25.h, z25.h, z7.h\n"
- "ld1sb { z26.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1sb { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z22.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "and x17, x17, #0x1\n"
+ "ld1sb { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z22.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1sb { z27.s }, p1/Z, [x21]\n"
+ "sub x15, x15, x25\n"
+ "ld1sb { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "and x7, x7, #0x1\n"
- "ld1sb { z30.s }, p1/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z4.h\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1sb { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z30.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1sb { z28.s }, p1/Z, [x21]\n"
- "mov z28.d, z28.d\n"
- "add z28.h, z28.h, z7.h\n"
- "sub x16, x16, x26\n"
- "cbz x26, 21f\n"
+ "ld1sb { z30.s }, p1/Z, [x21]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ "mov z16.d, z30.d\n"
+ "add z14.h, z14.h, z18.h\n"
+ "add z15.h, z15.h, z18.h\n"
+ "add z16.h, z16.h, z18.h\n"
+ "cbz x25, 21f\n"
"13:" // Unpadded: Main loop
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
- "addvl x25, SP, #6\n"
- "addvl x24, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa0402b20 // ld1h { z0.h-z1.h }, pn10.b/Z, [x25]\n"
- "add x23, x17, %x[ld_in_row]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "addvl x24, SP, #6\n"
+ "addvl x20, SP, #12\n"
+ "add x23, x16, %x[ld_in_row]\n"
"addvl x22, SP, #3\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
"addvl x21, SP, #9\n"
- "subs x26, x26, #0x1\n"
- ".inst 0xc1711709 // sdot za.s[x8, 1], { z24.h-z27.h }, z1.h\n"
- ".inst 0xa0402b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24]\n"
- ".inst 0xc17816ea // sdot za.s[x8, 2], { z23.h-z26.h }, z8.h\n"
- "ld1sb { z23.s }, p1/Z, [x17]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "add x20, x17, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0xc179170a // sdot za.s[x8, 2], { z24.h-z27.h }, z9.h\n"
- "ld1sb { z16.s }, p1/Z, [x23]\n"
+ "subs x25, x25, #0x1\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402b00 // ld1h { z0.h-z1.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ "ld1sb { z28.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z16.h\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "ld1h { z9.h }, p2/Z, [x24, #2, MUL VL]\n"
- "add z23.h, z23.h, z7.h\n"
- "ld1sb { z24.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a3ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z3.s\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ "ld1sb { z29.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc179172a // sdot za.s[x8, 2], { z25.h-z28.h }, z9.h\n"
- "ld1sb { z18.s }, p1/Z, [x23]\n"
+ "ld1sb { z9.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1sb { z25.s }, p1/Z, [x23]\n"
+ "trn1 z28.h, z28.h, z17.h\n"
+ ".inst 0xa0402aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+ "ld1sb { z30.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "ld1sb { z8.s }, p1/Z, [x23]\n"
+ "trn1 z29.h, z29.h, z9.h\n"
+ "ld1sb { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z8.h\n"
- "add z25.h, z25.h, z7.h\n"
- "ld1sb { z26.s }, p1/Z, [x23]\n"
+ "add z28.h, z28.h, z18.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1sb { z31.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "ld1sb { z28.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a7ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+ "ld1sb { z13.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z28.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1sb { z27.s }, p1/Z, [x23]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ "ld1sb { z0.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
- "ld1sb { z28.s }, p1/Z, [x23]\n"
- "trn1 z27.h, z27.h, z28.h\n"
+ "trn1 z30.h, z30.h, z17.h\n"
+ "add z29.h, z29.h, z18.h\n"
+ "ld1sb { z14.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "add z27.h, z27.h, z7.h\n"
- ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc17216e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z2.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- "ld1sb { z20.s }, p1/Z, [x23]\n"
- "mov z28.d, z20.d\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc1711709 // sdot za.s[x8, 1], { z24.h-z27.h }, z1.h\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- ".inst 0xc1701728 // sdot za.s[x8, 0], { z25.h-z28.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1sb { z23.s }, p1/Z, [x17]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "st1b { z17.s }, p1, [x14]\n"
+ "trn1 z31.h, z31.h, z13.h\n"
+ "ld1sb { z8.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a6ce78 // sclamp { z24.s-z27.s }, z19.s, z6.s\n"
+ "ld1h { z12.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "add z30.h, z30.h, z18.h\n"
+ "trn1 z0.h, z0.h, z14.h\n"
+ "mov z1.d, z8.d\n"
+ "add z31.h, z31.h, z18.h\n"
+ "st1b { z24.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "st1b { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "add z0.h, z0.h, z18.h\n"
+ "st1b { z26.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z1.h, z1.h, z18.h\n"
+ "st1b { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ ".inst 0xc17a1788 // sdot za.s[x8, 0], { z28.h-z31.h }, z10.h\n"
+ ".inst 0xc17b17a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z11.h\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1781789 // sdot za.s[x8, 1], { z28.h-z31.h }, z8.h\n"
+ "ld1sb { z11.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17c17c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z12.h\n"
+ "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z16.h\n"
- "st1b { z18.s }, p1, [x10]\n"
- "ld1sb { z24.s }, p1/Z, [x20]\n"
+ ".inst 0xc17917a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z9.h\n"
+ "ld1sb { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add x10, x10, x28\n"
- "st1b { z19.s }, p1, [x9]\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "ld1sb { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z16.h\n"
- "add x9, x9, x27\n"
- "ld1sb { z25.s }, p1/Z, [x20]\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc17417c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z4.h\n"
+ "ld1sb { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "add z23.h, z23.h, z7.h\n"
+ "trn1 z12.h, z12.h, z9.h\n"
"ld1sb { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z16.h\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1sb { z26.s }, p1/Z, [x20]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1sb { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z25.h, z25.h, z7.h\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "ld1sb { z1.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z16.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1sb { z27.s }, p1/Z, [x20]\n"
+ "ld1sb { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "ld1sb { z16.s }, p1/Z, [x20]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "ld1sb { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "add z27.h, z27.h, z7.h\n"
"ld1sb { z16.s }, p1/Z, [x20]\n"
- "mov z28.d, z16.d\n"
- "add z28.h, z28.h, z7.h\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "trn1 z14.h, z14.h, z1.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ "mov z16.d, z16.d\n"
+ "add z14.h, z14.h, z18.h\n"
+ "add z15.h, z15.h, z18.h\n"
+ "add z16.h, z16.h, z18.h\n"
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
"cbz x22, 19f\n"
"cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "sub x17, x17, x22\n"
"beq 18f\n"
"cmp x22, #0x2\n"
"beq 17f\n"
@@ -644,686 +649,686 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z27.s }, p0/Z, [x17]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "addvl x20, SP, #12\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z28.s }, p0/Z, [x21]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1sb { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z17.h\n"
- "trn1 z28.h, z28.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z29.s }, p0/Z, [x21]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
+ "ld1sb { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x21]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z30.s }, p0/Z, [x21]\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
+ "ld1sb { z14.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z31.s }, p0/Z, [x21]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x20, SP, #12\n"
+ ".inst 0xc1711568 // sdot za.s[x8, 0], { z11.h-z14.h }, z1.h\n"
+ "ld1sb { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z18.h\n"
- "trn1 z30.h, z30.h, z17.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
- "ld1sb { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
- "mov z0.d, z20.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1711788 // sdot za.s[x8, 0], { z28.h-z31.h }, z1.h\n"
- "ld1h { z1.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17117a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z1.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z24.s }, p0/Z, [x17]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "addvl x20, SP, #9\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z25.s }, p0/Z, [x21]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1sb { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z17.h\n"
- "trn1 z25.h, z25.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z26.s }, p0/Z, [x21]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
+ "ld1sb { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x21]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z27.s }, p0/Z, [x21]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
+ "ld1sb { z14.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z28.s }, p0/Z, [x21]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xc1711568 // sdot za.s[x8, 0], { z11.h-z14.h }, z1.h\n"
+ "ld1sb { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z18.h\n"
- "trn1 z27.h, z27.h, z17.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- "trn1 z28.h, z28.h, z16.h\n"
- ".inst 0xc1721708 // sdot za.s[x8, 0], { z24.h-z27.h }, z2.h\n"
- "ld1sb { z11.s }, p0/Z, [x21]\n"
- "add z11.h, p0/M, z11.h, z7.h\n"
- "mov z29.d, z11.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1731728 // sdot za.s[x8, 0], { z25.h-z28.h }, z3.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701748 // sdot za.s[x8, 0], { z26.h-z29.h }, z0.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z25.s }, p0/Z, [x17]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #6\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #12\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z17.h\n"
- "trn1 z26.h, z26.h, z16.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x21, SP, #6\n"
- "trn1 z27.h, z27.h, z18.h\n"
- "trn1 z28.h, z28.h, z17.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z29.h, z29.h, z16.h\n"
- ".inst 0xc1711728 // sdot za.s[x8, 0], { z25.h-z28.h }, z1.h\n"
- "ld1sb { z1.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #12\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- ".inst 0xc1791748 // sdot za.s[x8, 0], { z26.h-z29.h }, z9.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1721729 // sdot za.s[x8, 1], { z25.h-z28.h }, z2.h\n"
- "mov z30.d, z1.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z9.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- ".inst 0xc1791768 // sdot za.s[x8, 0], { z27.h-z30.h }, z9.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z25.s }, p0/Z, [x17]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #9\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z17.h\n"
- "trn1 z26.h, z26.h, z16.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x21, SP, #3\n"
- "trn1 z27.h, z27.h, z18.h\n"
- "trn1 z28.h, z28.h, z17.h\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z29.h, z29.h, z16.h\n"
- ".inst 0xc1731728 // sdot za.s[x8, 0], { z25.h-z28.h }, z3.h\n"
- "ld1sb { z0.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #9\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
- ".inst 0xc17b1748 // sdot za.s[x8, 0], { z26.h-z29.h }, z11.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1721729 // sdot za.s[x8, 1], { z25.h-z28.h }, z2.h\n"
- "mov z30.d, z0.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"19:" // Padded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "sub x17, x17, #0x2\n"
+ "sub x15, x15, #0x1\n"
+ "lsr x20, x17, #0x1\n"
+ "cmp x20, x15\n"
+ "and x17, x17, #0x1\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "csel x25, x20, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "sub x15, x15, x25\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z16.h\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z19.s }, p0/Z, [x20]\n"
- "add z19.h, p0/M, z19.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
- "trn1 z25.h, z25.h, z19.h\n"
- "trn1 z26.h, z26.h, z18.h\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
- "trn1 z27.h, z27.h, z17.h\n"
- "mov z28.d, z16.d\n"
- "csel x25, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col]\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x25\n"
+ "ld1sb { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ "mov z16.d, z16.d\n"
"cbz x25, 21f\n"
"20:" // Padded: Main loop
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa1402b00 // ld1h { z0.h, z8.h }, pn10.b/Z, [x24]\n"
+ "addvl x20, SP, #12\n"
"mov x12, #0x0\n"
+ "add x23, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "add x20, x17, %x[ld_in_row]\n"
"addvl x22, SP, #3\n"
- ".inst 0xc1781709 // sdot za.s[x8, 1], { z24.h-z27.h }, z8.h\n"
- ".inst 0xa1402ae3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #9\n"
"subs x25, x25, #0x1\n"
- ".inst 0xc17316ea // sdot za.s[x8, 2], { z23.h-z26.h }, z3.h\n"
- "ld1sb { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402b00 // ld1h { z0.h-z1.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc178156a // sdot za.s[x8, 2], { z11.h-z14.h }, z8.h\n"
+ "ld1sb { z25.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z1.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ "add z25.h, p0/M, z25.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc179158a // sdot za.s[x8, 2], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa1402ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22]\n"
+ "ld1sb { z10.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc17115aa // sdot za.s[x8, 2], { z13.h-z16.h }, z1.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "add z10.h, p0/M, z10.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b170a // sdot za.s[x8, 2], { z24.h-z27.h }, z11.h\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ "ld1sb { z26.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z25.h, z25.h, z10.h\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
+ "add z26.h, p0/M, z26.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1sb { z1.s }, p0/Z, [x20]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1h { z3.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "st1b { z28.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc173172a // sdot za.s[x8, 2], { z25.h-z28.h }, z3.h\n"
- "trn1 z23.h, z23.h, z16.h\n"
- "ld1sb { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ "ld1sb { z27.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z26.h, z26.h, z16.h\n"
+ "add z27.h, p0/M, z27.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z28.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z27.h, z27.h, z16.h\n"
+ "add z28.h, p0/M, z28.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z30.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z29.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z28.h, z28.h, z16.h\n"
+ "add z29.h, p0/M, z29.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "trn1 z24.h, z24.h, z1.h\n"
- "trn1 z25.h, z25.h, z3.h\n"
- "trn1 z26.h, z26.h, z30.h\n"
- ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "trn1 z27.h, z27.h, z29.h\n"
+ ".inst 0xc1741728 // sdot za.s[x8, 0], { z25.h-z28.h }, z4.h\n"
+ "ld1sb { z15.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17216e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z2.h\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
"mov x12, #0x0\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "ld1sb { z16.s }, p0/Z, [x23]\n"
+ "trn1 z29.h, z29.h, z15.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17216e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z2.h\n"
- "ld1sb { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ ".inst 0xc17c1748 // sdot za.s[x8, 0], { z26.h-z29.h }, z12.h\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ "mov z30.d, z16.d\n"
+ ".inst 0xc1711729 // sdot za.s[x8, 1], { z25.h-z28.h }, z1.h\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z7.h\n"
+ ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0xc1791749 // sdot za.s[x8, 1], { z26.h-z29.h }, z9.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1731709 // sdot za.s[x8, 1], { z24.h-z27.h }, z3.h\n"
- "ld1sb { z24.s }, p0/Z, [x20]\n"
- "mov z28.d, z20.d\n"
- "ld1h { z1.h }, p2/Z, [x22, #2, MUL VL]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
+ "ld1sb { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z22.s }, p0/Z, [x20]\n"
- ".inst 0xc1711728 // sdot za.s[x8, 0], { z25.h-z28.h }, z1.h\n"
"mov x12, #0x4\n"
- "add z22.h, p0/M, z22.h, z7.h\n"
- "ld1h { z1.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1711729 // sdot za.s[x8, 1], { z25.h-z28.h }, z1.h\n"
- "ld1sb { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1sb { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
+ "ld1sb { z14.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z20.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
+ "ld1sb { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z31.s }, p0/Z, [x20]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
+ "ld1sb { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z17.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- "trn1 z23.h, z23.h, z8.h\n"
- "trn1 z24.h, z24.h, z22.h\n"
- "st1b { z18.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "trn1 z25.h, z25.h, z28.h\n"
- "trn1 z26.h, z26.h, z20.h\n"
- "st1b { z19.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "trn1 z27.h, z27.h, z31.h\n"
- "mov z28.d, z1.d\n"
+ "ld1sb { z16.s }, p0/Z, [x20]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ "mov z16.d, z16.d\n"
"bgt 20b\n"
"21:" // Main loop tail
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"addvl x24, SP, #6\n"
"addvl x23, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa0402b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24]\n"
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17816e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z8.h\n"
- "add x22, x17, %x[ld_in_row]\n"
"addvl x21, SP, #3\n"
- ".inst 0xc1791709 // sdot za.s[x8, 1], { z24.h-z27.h }, z9.h\n"
- ".inst 0xa1402ae3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x23]\n"
"addvl x20, SP, #9\n"
- ".inst 0xc17316ea // sdot za.s[x8, 2], { z23.h-z26.h }, z3.h\n"
- "ld1sb { z29.s }, p0/Z, [x17]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1711569 // sdot za.s[x8, 1], { z11.h-z14.h }, z1.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1791589 // sdot za.s[x8, 1], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xc1a3ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z3.s\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z8.s }, p0/Z, [x22]\n"
- "add z8.h, p0/M, z8.h, z7.h\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "ld1sb { z10.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z10.h, p0/M, z10.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b170a // sdot za.s[x8, 2], { z24.h-z27.h }, z11.h\n"
- "ld1sb { z30.s }, p0/Z, [x22]\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+ "ld1sb { z12.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "trn1 z11.h, z11.h, z10.h\n"
+ ".inst 0xc1a6ce78 // sclamp { z24.s-z27.s }, z19.s, z6.s\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1sb { z20.s }, p0/Z, [x22]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "st1b { z24.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z26.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc172172a // sdot za.s[x8, 2], { z25.h-z28.h }, z2.h\n"
- "trn1 z29.h, z29.h, z8.h\n"
- "ld1sb { z31.s }, p0/Z, [x22]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "st1b { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ "ld1sb { z13.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z25.s }, p0/Z, [x22]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z0.s }, p0/Z, [x22]\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
+ "ld1sb { z14.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z17.s }, p0/Z, [x22]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z1.s }, p0/Z, [x22]\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
+ "ld1sb { z15.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z28.s }, p0/Z, [x22]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "trn1 z30.h, z30.h, z20.h\n"
- "trn1 z31.h, z31.h, z25.h\n"
- "trn1 z0.h, z0.h, z17.h\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z1.h, z1.h, z28.h\n"
- ".inst 0xc17317a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z3.h\n"
- "ld1sb { z22.s }, p0/Z, [x22]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
- "add z22.h, p0/M, z22.h, z7.h\n"
- ".inst 0xc17b17c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z11.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1a4aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z4.s\n"
- ".inst 0xc17317a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z3.h\n"
- "mov z2.d, z22.d\n"
- "ld1h { z9.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17b17c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z11.h\n"
- ".inst 0xc1aaab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
- ".inst 0xc17917e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z9.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1b5ccb8 // sclamp { z24.s-z27.s }, z5.s, z21.s\n"
- "st1b { z24.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z25.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "st1b { z26.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- ".inst 0xc17817e9 // sdot za.s[x8, 1], { z31.h-z2.h }, z8.h\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z27.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"22:" // Main loop skip tail
- "cbz x7, 23f\n" // Skip remainder inputs
+ "cbz x17, 23f\n" // Skip remainder inputs
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z24.s }, p0/Z, [x17]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #6\n"
+ "addvl x20, SP, #12\n"
+ "sub x15, x15, #0x1\n"
+ "ld1sb { z11.s }, p0/Z, [x16]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z17.h\n"
- "trn1 z25.h, z25.h, z16.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1sb { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1sb { z16.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1sb { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1sb { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1sb { z31.s }, p0/Z, [x20]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1sb { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z26.h, z26.h, z17.h\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "ld1sb { z0.s }, p0/Z, [x20]\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
- "trn1 z28.h, z28.h, z31.h\n"
- "addvl x21, SP, #6\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- "mov z29.d, z0.d\n"
- "addvl x20, SP, #12\n"
- "sub x16, x16, #0x1\n"
- ".inst 0xc17b1728 // sdot za.s[x8, 0], { z25.h-z28.h }, z11.h\n"
- ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721748 // sdot za.s[x8, 0], { z26.h-z29.h }, z2.h\n"
+ "ld1sb { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
"ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1781709 // sdot za.s[x8, 1], { z24.h-z27.h }, z8.h\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1791729 // sdot za.s[x8, 1], { z25.h-z28.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
- ".inst 0xc171170a // sdot za.s[x8, 2], { z24.h-z27.h }, z1.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- ".inst 0xc179172a // sdot za.s[x8, 2], { z25.h-z28.h }, z9.h\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- ".inst 0xc1721749 // sdot za.s[x8, 1], { z26.h-z29.h }, z2.h\n"
- "ld1h { z3.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1b { z17.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- ".inst 0xc173174a // sdot za.s[x8, 2], { z26.h-z29.h }, z3.h\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"add x8, x8, #0x1\n"
- "st1b { z18.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z19.s }, p1, [x9]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
+ "st1b { z28.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"23:" // Tail input: End
- "cbz x16, 25f\n"
+ "cbz x15, 25f\n"
"24:" // Right padding loop
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+ ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "subs x16, x16, #0x1\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- ".inst 0xc1aaab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
- ".inst 0xc1b5ccbc // sclamp { z28.s-z31.s }, z5.s, z21.s\n"
- "st1b { z28.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z29.s }, p1, [x14]\n"
+ "subs x15, x15, #0x1\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a3ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z3.s\n"
+ ".inst 0xc1a5aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z5.s\n"
+ ".inst 0xc1a7ab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc1a6ce68 // sclamp { z8.s-z11.s }, z19.s, z6.s\n"
+ "st1b { z8.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z30.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z31.s }, p1, [x9]\n"
+ "st1b { z9.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z10.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z11.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 24b\n"
"25:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x7\n"
+ "whilelt p1.s, x7, x6\n"
"incw x20, ALL, MUL #16\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1342,9 +1347,11 @@ void sme2_s8q_planar_5x5_s2_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
index 60c3a1e632..40bfd5850a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,133 +70,138 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0x6\n"
"ptrue p2.b\n"
- "mov x20, #0x6\n"
"ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x22, #0x8\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z20.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x21, x6\n"
+ "mov SP, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-12\n"
+ "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z20.h, p2/M, z20.h\n"
+ "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x7\n"
- "addvl SP, SP, #-12\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z21.h, p2/M, z21.h\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z28.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z30.s, #0x0\n"
+ "mov z28.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z30.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x20, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1b { z10.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z23.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z25.h, #0x0\n"
+ "addvl x22, SP, #12\n"
+ "addvl x22, x22, #-4\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z29.d, z28.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1b { z0.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1rh { z31.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z7.h, #0x0\n"
- "sub z10.h, z10.h, z31.h\n"
- "incw x22\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "ld1b { z26.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z16.h, z16.h, z31.h\n"
- "trn1 z20.h, z7.h, z10.h\n"
- "ld1b { z11.s }, p2/Z, [x20]\n"
- "sub z11.h, z11.h, z31.h\n"
- "mov x20, x22\n"
- "trn1 z19.h, z10.h, z16.h\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
+ "ld1b { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z0.h, z0.h, z23.h\n"
+ "sub z26.h, z26.h, z23.h\n"
+ "sub z15.h, z15.h, z23.h\n"
+ "trn1 z14.h, z25.h, z0.h\n"
+ "trn1 z2.h, z0.h, z26.h\n"
+ "ld1b { z21.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "trn1 z26.h, z16.h, z11.h\n"
- "trn1 z13.h, z11.h, z7.h\n"
- "ld1b { z11.s }, p2/Z, [x20]\n"
+ "trn1 z16.h, z26.h, z15.h\n"
+ "ld1b { z1.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z24.h, z24.h, z31.h\n"
- "sub z11.h, z11.h, z31.h\n"
- "ld1b { z2.s }, p2/Z, [x20]\n"
- "sub z2.h, z2.h, z31.h\n"
- "addvl x21, SP, #12\n"
- "incw x22\n"
- "addvl x21, x21, #-4\n"
- "mov x20, x22\n"
- "st1h { z20.h }, p2, [x21]\n"
- "trn1 z22.h, z7.h, z24.h\n"
- "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z1.h, z24.h, z11.h\n"
- "ld1b { z16.s }, p2/Z, [x20]\n"
+ "trn1 z15.h, z15.h, z25.h\n"
+ "ld1b { z11.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "sub z21.h, z21.h, z23.h\n"
+ "st1h { z14.h }, p2, [x22]\n"
+ "sub z1.h, z1.h, z23.h\n"
+ "st1h { z2.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z11.h, z11.h, z23.h\n"
+ "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z15.h }, p2, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #-4\n"
+ "trn1 z3.h, z25.h, z21.h\n"
+ "trn1 z14.h, z21.h, z1.h\n"
+ "ld1b { z15.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z3.h, z11.h, z2.h\n"
- "ld1b { z0.s }, p2/Z, [x20]\n"
+ "trn1 z10.h, z1.h, z11.h\n"
+ "ld1b { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z13.h }, p2, [x21, #3, MUL VL]\n"
- "trn1 z25.h, z2.h, z7.h\n"
- "ld1b { z4.s }, p2/Z, [x20]\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "sub z16.h, z16.h, z31.h\n"
- "sub z0.h, z0.h, z31.h\n"
- "addvl x21, x21, #-4\n"
- "st1h { z22.h }, p2, [x21]\n"
- "sub z4.h, z4.h, z31.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
- "mov z31.d, z30.d\n"
- "st1h { z3.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z24.h, z7.h, z16.h\n"
- "trn1 z18.h, z16.h, z0.h\n"
- "st1h { z25.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #-4\n"
- "trn1 z0.h, z0.h, z4.h\n"
- "trn1 z1.h, z4.h, z7.h\n"
- "st1h { z24.h }, p2, [x21]\n"
- "st1h { z18.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z1.h }, p2, [x21, #3, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z14.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "trn1 z26.h, z11.h, z25.h\n"
+ "ld1b { z16.s }, p2/Z, [x20]\n"
+ "sub z15.h, z15.h, z23.h\n"
+ "st1h { z3.h }, p2, [x22]\n"
+ "sub z9.h, z9.h, z23.h\n"
+ "st1h { z14.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z16.h, z16.h, z23.h\n"
+ "st1h { z10.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #-4\n"
+ "trn1 z22.h, z25.h, z15.h\n"
+ "trn1 z6.h, z15.h, z9.h\n"
+ "trn1 z12.h, z9.h, z16.h\n"
+ "trn1 z11.h, z16.h, z25.h\n"
+ "st1h { z22.h }, p2, [x22]\n"
+ "st1h { z6.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z12.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z11.h }, p2, [x22, #3, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z5.s }, p1/Z, [x21, x16, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z12.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x20, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
"mov x22, #0x6\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x7, x6\n"
+ "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x15, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x7, x14\n"
+ "orr x20, x17, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
"mov x22, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x14, x7, x21, x14\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040b82 // mova za.d[x8, #2], { z28.d-z29.d }\n"
"ldp x11, x10, [x23], #0x10\n"
- ".inst 0xc0040bc2 // mova za.d[x8, #2], { z30.d-z31.d }\n"
+ ".inst 0xc0040b83 // mova za.d[x8, #3], { z28.d-z29.d }\n"
"ldp x9, x28, [x20], #0x10\n"
- ".inst 0xc0040bc3 // mova za.d[x8, #3], { z30.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
"ldp x27, x26, [x23], #0x10\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
"ldp x25, x24, [x20], #0x10\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
@@ -204,22 +209,22 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
"sub x13, x13, x21\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -231,148 +236,148 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z20.s }, p1/Z, [x14]\n"
+ "ld1b { z27.s }, p1/Z, [x14]\n"
"addvl x20, SP, #8\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z20.h, z16.h\n"
- "add z4.h, z4.h, z21.h\n"
- "ld1b { z23.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "ld1b { z3.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z5.h, z23.h, z22.h\n"
- "add z5.h, z5.h, z21.h\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
+ "ld1b { z1.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "trn1 z6.h, z17.h, z16.h\n"
- "add z6.h, z6.h, z21.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16b1488 // sdot za.s[x8, 0], { z4.h-z5.h }, z11.h\n"
- ".inst 0xc1631489 // sdot za.s[x8, 1], { z4.h-z5.h }, z3.h\n"
- ".inst 0xa1412a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16814a8 // sdot za.s[x8, 0], { z5.h-z6.h }, z8.h\n"
- ".inst 0xc16014a9 // sdot za.s[x8, 1], { z5.h-z6.h }, z0.h\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z15.h, z27.h, z16.h\n"
+ "ld1b { z18.s }, p1/Z, [x21]\n"
+ "trn1 z16.h, z3.h, z1.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z15.h, z15.h, z20.h\n"
+ "trn1 z17.h, z12.h, z18.h\n"
+ "add z16.h, z16.h, z20.h\n"
+ "add z17.h, z17.h, z20.h\n"
+ ".inst 0xc16b15e8 // sdot za.s[x8, 0], { z15.h-z16.h }, z11.h\n"
+ ".inst 0xc16a15e9 // sdot za.s[x8, 1], { z15.h-z16.h }, z10.h\n"
+ ".inst 0xc1631608 // sdot za.s[x8, 0], { z16.h-z17.h }, z3.h\n"
+ ".inst 0xc1621609 // sdot za.s[x8, 1], { z16.h-z17.h }, z2.h\n"
"9:" // Unpadded: 1 priming loads
"add x22, x14, %x[ld_in_row]\n"
- "ld1b { z25.s }, p1/Z, [x14]\n"
+ "ld1b { z22.s }, p1/Z, [x14]\n"
"addvl x21, SP, #4\n"
- "ld1b { z6.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z3.h, z25.h, z6.h\n"
- "add z3.h, z3.h, z21.h\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #8\n"
- "ld1b { z26.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z4.h, z18.h, z26.h\n"
- "add z4.h, z4.h, z21.h\n"
- "ld1b { z2.s }, p1/Z, [x22]\n"
+ "ld1b { z19.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z5.s }, p1/Z, [x22]\n"
- "trn1 z5.h, z2.h, z5.h\n"
- "add z5.h, z5.h, z21.h\n"
+ "ld1b { z10.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z21.h, z22.h, z16.h\n"
+ "ld1b { z7.s }, p1/Z, [x22]\n"
+ "trn1 z22.h, z19.h, z10.h\n"
".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1611468 // sdot za.s[x8, 0], { z3.h-z4.h }, z1.h\n"
- ".inst 0xc1601469 // sdot za.s[x8, 1], { z3.h-z4.h }, z0.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- ".inst 0xa0412aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a146a // sdot za.s[x8, 2], { z3.h-z4.h }, z10.h\n"
- ".inst 0xc162146b // sdot za.s[x8, 3], { z3.h-z4.h }, z2.h\n"
- ".inst 0xc1691488 // sdot za.s[x8, 0], { z4.h-z5.h }, z9.h\n"
- ".inst 0xc1681489 // sdot za.s[x8, 1], { z4.h-z5.h }, z8.h\n"
- ".inst 0xa1412a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a148a // sdot za.s[x8, 2], { z4.h-z5.h }, z10.h\n"
- ".inst 0xc162148b // sdot za.s[x8, 3], { z4.h-z5.h }, z2.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add z21.h, z21.h, z20.h\n"
+ "trn1 z23.h, z11.h, z7.h\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
+ ".inst 0xc16116a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z1.h\n"
+ ".inst 0xc16016a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z0.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16e16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16616c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16916ca // sdot za.s[x8, 2], { z22.h-z23.h }, z9.h\n"
+ ".inst 0xc16116cb // sdot za.s[x8, 3], { z22.h-z23.h }, z1.h\n"
"10:" // Unpadded: 0 priming loads
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
"add x20, x14, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x14]\n"
+ "ld1b { z15.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x1\n"
- "ld1b { z9.s }, p1/Z, [x20]\n"
+ "ld1b { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z6.h, z17.h, z9.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
+ "ld1b { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"cmp x15, x13\n"
- "add z6.h, z6.h, z21.h\n"
- "ld1b { z7.s }, p1/Z, [x20]\n"
+ "ld1b { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z7.h, z17.h, z7.h\n"
"csel x23, x15, x13, LT\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
+ "ld1b { z2.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z7.h, z7.h, z21.h\n"
+ "trn1 z21.h, z15.h, z0.h\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z1.s }, p1/Z, [x20]\n"
- "trn1 z8.h, z17.h, z1.h\n"
- "add z8.h, z8.h, z21.h\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
"sub x13, x13, x23\n"
+ "trn1 z22.h, z24.h, z9.h\n"
+ "trn1 z23.h, z2.h, z15.h\n"
+ "add z21.h, z21.h, z20.h\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
"cbz x23, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
"addvl x22, SP, #4\n"
"addvl x21, SP, #8\n"
- "ld1b { z2.s }, p1/Z, [x14]\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa1402ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22]\n"
+ "ld1b { z26.s }, p1/Z, [x14]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
"add x20, x14, %x[ld_in_row]\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
+ "ld1b { z4.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa1412ac3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- "ld1b { z23.s }, p1/Z, [x20]\n"
+ "ld1b { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc16d14ca // sdot za.s[x8, 2], { z6.h-z7.h }, z13.h\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ "ld1b { z3.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16514cb // sdot za.s[x8, 3], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc16914cc // sdot za.s[x8, 4], { z6.h-z7.h }, z9.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc16e16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z14.h\n"
+ "ld1b { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16114cd // sdot za.s[x8, 5], { z6.h-z7.h }, z1.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc16b14ea // sdot za.s[x8, 2], { z7.h-z8.h }, z11.h\n"
- "trn1 z6.h, z2.h, z19.h\n"
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16314eb // sdot za.s[x8, 3], { z7.h-z8.h }, z3.h\n"
+ ".inst 0xc16616ab // sdot za.s[x8, 3], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc0060810 // mova { z16.d-z17.d }, za.d[x8, #0]\n"
+ "ld1b { z11.s }, p1/Z, [x20]\n"
+ ".inst 0xc0060832 // mova { z18.d-z19.d }, za.d[x8, #1]\n"
+ ".inst 0xc16916ac // sdot za.s[x8, 4], { z21.h-z22.h }, z9.h\n"
+ ".inst 0xc16116ad // sdot za.s[x8, 5], { z21.h-z22.h }, z1.h\n"
+ "trn1 z21.h, z26.h, z4.h\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16f16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z15.h\n"
+ ".inst 0xc1a5ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z5.s\n"
+ ".inst 0xc16716cb // sdot za.s[x8, 3], { z22.h-z23.h }, z7.h\n"
".inst 0xa1412aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- ".inst 0xc16914ec // sdot za.s[x8, 4], { z7.h-z8.h }, z9.h\n"
- "st1b { z24.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "add z6.h, z6.h, z21.h\n"
- ".inst 0xc16114ed // sdot za.s[x8, 5], { z7.h-z8.h }, z1.h\n"
- "trn1 z7.h, z23.h, z18.h\n"
- "trn1 z8.h, z17.h, z16.h\n"
+ "add z21.h, z21.h, z20.h\n"
+ ".inst 0xc1adaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
+ ".inst 0xc16916cc // sdot za.s[x8, 4], { z22.h-z23.h }, z9.h\n"
+ ".inst 0xc16116cd // sdot za.s[x8, 5], { z22.h-z23.h }, z1.h\n"
+ "trn1 z22.h, z27.h, z3.h\n"
+ "trn1 z23.h, z25.h, z11.h\n"
"add x8, x8, #0x2\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z26.s }, p1, [x10]\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
+ ".inst 0xc1bfcfd0 // sclamp { z16.s-z19.s }, z30.s, z31.s\n"
+ "st1b { z16.s }, p1, [x11]\n"
+ "add x11, x11, x9\n"
+ "st1b { z18.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z17.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- "add z7.h, z7.h, z21.h\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z19.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "add z8.h, z8.h, z21.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
@@ -382,258 +387,258 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #8\n"
+ ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1b { z17.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z7.h, z19.h, z18.h\n"
- "trn1 z8.h, z17.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #8\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "trn1 z9.h, z17.h, z16.h\n"
- ".inst 0xc16a14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z10.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16214e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z2.h\n"
- ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16d1508 // sdot za.s[x8, 0], { z8.h-z9.h }, z13.h\n"
- ".inst 0xc1651509 // sdot za.s[x8, 1], { z8.h-z9.h }, z5.h\n"
+ ".inst 0xc16c16e8 // sdot za.s[x8, 0], { z23.h-z24.h }, z12.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16416e9 // sdot za.s[x8, 1], { z23.h-z24.h }, z4.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ ".inst 0xc16f1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z15.h\n"
+ ".inst 0xc1671709 // sdot za.s[x8, 1], { z24.h-z25.h }, z7.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x22, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x21, SP, #4\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #8\n"
+ ".inst 0xa1412aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "ld1b { z17.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z16.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z19.h, z18.h\n"
- "trn1 z23.h, z17.h, z16.h\n"
+ "ld1b { z10.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z10.h, p0/M, z10.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z17.h, z18.h, z10.h\n"
+ "add z14.h, p0/M, z14.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #4\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- "trn1 z24.h, z17.h, z16.h\n"
- ".inst 0xc16116c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z1.h\n"
- ".inst 0xc16016c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z0.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0412aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16d16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z13.h\n"
- ".inst 0xc16516cb // sdot za.s[x8, 3], { z22.h-z23.h }, z5.h\n"
- ".inst 0xc16116e8 // sdot za.s[x8, 0], { z23.h-z24.h }, z1.h\n"
- ".inst 0xc16016e9 // sdot za.s[x8, 1], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xc16f1608 // sdot za.s[x8, 0], { z16.h-z17.h }, z15.h\n"
+ "ld1b { z10.s }, p0/Z, [x22]\n"
+ ".inst 0xc1671609 // sdot za.s[x8, 1], { z16.h-z17.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ "add z10.h, p0/M, z10.h, z20.h\n"
+ ".inst 0xc16f160a // sdot za.s[x8, 2], { z16.h-z17.h }, z15.h\n"
+ ".inst 0xc167160b // sdot za.s[x8, 3], { z16.h-z17.h }, z7.h\n"
+ "trn1 z18.h, z14.h, z10.h\n"
+ ".inst 0xc16c1628 // sdot za.s[x8, 0], { z17.h-z18.h }, z12.h\n"
+ ".inst 0xc1641629 // sdot za.s[x8, 1], { z17.h-z18.h }, z4.h\n"
".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16116ea // sdot za.s[x8, 2], { z23.h-z24.h }, z1.h\n"
- ".inst 0xc16016eb // sdot za.s[x8, 3], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xc161162a // sdot za.s[x8, 2], { z17.h-z18.h }, z1.h\n"
+ ".inst 0xc160162b // sdot za.s[x8, 3], { z17.h-z18.h }, z0.h\n"
"15:" // Padded: 0 priming loads
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
"add x20, x14, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x15, x15, #0x1\n"
+ "sub x13, x13, #0x1\n"
+ "cmp x15, x13\n"
+ "ld1b { z17.s }, p0/Z, [x14]\n"
+ "csel x23, x15, x13, LT\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "sub x13, x13, x23\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z21.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z6.h, z19.h, z18.h\n"
- "trn1 z7.h, z17.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- "sub x15, x15, #0x1\n"
- "sub x13, x13, #0x1\n"
- "cmp x15, x13\n"
- "trn1 z8.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
- "add x14, x14, %x[ld_in_col]\n"
- "sub x13, x13, x23\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z23.h, z17.h, z16.h\n"
"cbz x23, 17f\n"
"16:" // Padded: Main loop
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z9.s }, p0/Z, [x14]\n"
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- "add z9.h, p0/M, z9.h, z21.h\n"
"add x22, x14, %x[ld_in_row]\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ "addvl x21, SP, #4\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #8\n"
+ "subs x23, x23, #0x1\n"
+ "ld1b { z16.s }, p0/Z, [x14]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x22]\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
+ ".inst 0xc16f16ac // sdot za.s[x8, 4], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ad // sdot za.s[x8, 5], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16e16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z14.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc16616cb // sdot za.s[x8, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
"ld1b { z18.s }, p0/Z, [x22]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16f16cc // sdot za.s[x8, 4], { z22.h-z23.h }, z15.h\n"
+ ".inst 0xc16716cd // sdot za.s[x8, 5], { z22.h-z23.h }, z7.h\n"
+ "add x8, x8, #0x2\n"
+ ".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "trn1 z21.h, z16.h, z17.h\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x22]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"mov x12, #0x4\n"
- "addvl x21, SP, #4\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16b14ca // sdot za.s[x8, 2], { z6.h-z7.h }, z11.h\n"
- "subs x23, x23, #0x1\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
"ld1b { z17.s }, p0/Z, [x22]\n"
- ".inst 0xc16314cb // sdot za.s[x8, 3], { z6.h-z7.h }, z3.h\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa0412aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16d14cc // sdot za.s[x8, 4], { z6.h-z7.h }, z13.h\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- "ld1b { z2.s }, p0/Z, [x22]\n"
- ".inst 0xc16514cd // sdot za.s[x8, 5], { z6.h-z7.h }, z5.h\n"
- "add z2.h, p0/M, z2.h, z21.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16b14ea // sdot za.s[x8, 2], { z7.h-z8.h }, z11.h\n"
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- ".inst 0xc16a14eb // sdot za.s[x8, 3], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa1412a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc16b14ec // sdot za.s[x8, 4], { z7.h-z8.h }, z11.h\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "trn1 z6.h, z9.h, z19.h\n"
- ".inst 0xc16314ed // sdot za.s[x8, 5], { z7.h-z8.h }, z3.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- "trn1 z7.h, z18.h, z16.h\n"
- "trn1 z8.h, z17.h, z2.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z23.h, z17.h, z16.h\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
"addvl x21, SP, #4\n"
"addvl x20, SP, #8\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc16114ca // sdot za.s[x8, 2], { z6.h-z7.h }, z1.h\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc16014cb // sdot za.s[x8, 3], { z6.h-z7.h }, z0.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc16914cc // sdot za.s[x8, 4], { z6.h-z7.h }, z9.h\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- "st1b { z24.s }, p1, [x11]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ ".inst 0xc16f16ac // sdot za.s[x8, 4], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ad // sdot za.s[x8, 5], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16e16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+ ".inst 0xc16616cb // sdot za.s[x8, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc16c16cc // sdot za.s[x8, 4], { z22.h-z23.h }, z12.h\n"
+ ".inst 0xc16416cd // sdot za.s[x8, 5], { z22.h-z23.h }, z4.h\n"
+ "add x8, x8, #0x2\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc16114cd // sdot za.s[x8, 5], { z6.h-z7.h }, z1.h\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc16314ea // sdot za.s[x8, 2], { z7.h-z8.h }, z3.h\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- ".inst 0xc16214eb // sdot za.s[x8, 3], { z7.h-z8.h }, z2.h\n"
- ".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc16114ec // sdot za.s[x8, 4], { z7.h-z8.h }, z1.h\n"
- ".inst 0xc16014ed // sdot za.s[x8, 5], { z7.h-z8.h }, z0.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
"18:" // Main loop skip tail
"cbz x13, 20f\n"
"19:" // Right padding loop
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
+ ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
- ".inst 0xc1acaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc1afab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z15.s\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- ".inst 0xc1bccfa4 // sclamp { z4.s-z7.s }, z29.s, z28.s\n"
- "st1b { z4.s }, p1, [x11]\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a5ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc1adaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
+ ".inst 0xc1a8ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ ".inst 0xc1bfcfd8 // sclamp { z24.s-z27.s }, z30.s, z31.s\n"
+ "st1b { z24.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z26.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
+ "st1b { z25.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z27.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 19b\n"
"20:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -652,6 +657,8 @@ void sme2_u8q_planar_3x3_s1_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #12\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
index e4ce6c74fb..0b17ad3ae9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,119 +70,124 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0x9\n"
"ptrue p2.b\n"
- "mov x20, #0x9\n"
"ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z11.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x22, #0x8\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z29.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x21, x6\n"
+ "mov SP, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-6\n"
+ "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z29.h, p2/M, z29.h\n"
+ "ld1rw { z0.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x7\n"
- "addvl SP, SP, #-6\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z11.h, p2/M, z11.h\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z28.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z28.s, #0x0\n"
+ "mov z16.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x20, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1b { z26.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z22.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z27.h, #0x0\n"
+ "addvl x22, SP, #6\n"
+ "addvl x22, x22, #-2\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z17.d, z16.d\n"
+ "mov z18.d, z16.d\n"
+ "mov z19.d, z16.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1b { z25.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1rh { z16.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "sub z26.h, z26.h, z16.h\n"
- "incw x22\n"
- "mov z24.h, #0x0\n"
- "ld1b { z3.s }, p2/Z, [x20]\n"
+ "ld1b { z15.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z3.h, z3.h, z16.h\n"
- "trn1 z31.h, z26.h, z3.h\n"
- "ld1b { z21.s }, p2/Z, [x20]\n"
- "sub z21.h, z21.h, z16.h\n"
- "mov x20, x22\n"
- "trn1 z14.h, z21.h, z24.h\n"
- "ld1b { z2.s }, p2/Z, [x20]\n"
+ "ld1b { z9.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z25.h, z25.h, z22.h\n"
+ "sub z15.h, z15.h, z22.h\n"
+ "sub z9.h, z9.h, z22.h\n"
+ "trn1 z24.h, z25.h, z15.h\n"
+ "ld1b { z12.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z2.h, z2.h, z16.h\n"
- "addvl x21, SP, #6\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
+ "ld1b { z4.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z25.h, z25.h, z16.h\n"
- "incw x22\n"
- "ld1b { z27.s }, p2/Z, [x20]\n"
- "sub z27.h, z27.h, z16.h\n"
- "addvl x21, x21, #-2\n"
- "mov x20, x22\n"
- "st1h { z31.h }, p2, [x21]\n"
- "trn1 z4.h, z2.h, z25.h\n"
- "ld1b { z26.s }, p2/Z, [x20]\n"
+ "trn1 z11.h, z9.h, z27.h\n"
+ "ld1b { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "sub z12.h, z12.h, z22.h\n"
+ "sub z4.h, z4.h, z22.h\n"
+ "st1h { z24.h }, p2, [x22]\n"
+ "sub z15.h, z15.h, z22.h\n"
+ "st1h { z11.h }, p2, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #-2\n"
+ "trn1 z9.h, z12.h, z4.h\n"
+ "ld1b { z14.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1b { z23.s }, p2/Z, [x20]\n"
+ "ld1b { z10.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z14.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z12.h, z27.h, z24.h\n"
- "ld1b { z20.s }, p2/Z, [x20]\n"
- "sub z26.h, z26.h, z16.h\n"
- "sub z23.h, z23.h, z16.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "sub z20.h, z20.h, z16.h\n"
- "addvl x21, x21, #-2\n"
- "st1h { z4.h }, p2, [x21]\n"
- "mov z29.d, z28.d\n"
- "st1h { z12.h }, p2, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #-2\n"
- "mov z30.d, z28.d\n"
- "mov z31.d, z28.d\n"
- "trn1 z25.h, z26.h, z23.h\n"
- "st1h { z25.h }, p2, [x21]\n"
- "trn1 z3.h, z20.h, z24.h\n"
- "st1h { z3.h }, p2, [x21, #1, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z6.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "trn1 z21.h, z15.h, z27.h\n"
+ "ld1b { z30.s }, p2/Z, [x20]\n"
+ "sub z14.h, z14.h, z22.h\n"
+ "sub z10.h, z10.h, z22.h\n"
+ "st1h { z9.h }, p2, [x22]\n"
+ "sub z30.h, z30.h, z22.h\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #-2\n"
+ "trn1 z15.h, z14.h, z10.h\n"
+ "trn1 z25.h, z30.h, z27.h\n"
+ "st1h { z15.h }, p2, [x22]\n"
+ "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z1.s }, p1/Z, [x21, x16, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z9.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z0.s }, p1/Z, [x20, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
"mov x22, #0x9\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x7, x6\n"
+ "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x15, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x7, x14\n"
+ "orr x20, x17, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040e00 // mova za.d[x8, #0], { z16.d-z19.d }\n"
"mov x22, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x14, x7, x21, x14\n"
+ ".inst 0xc0040e01 // mova za.d[x8, #1], { z16.d-z19.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
"ldp x11, x10, [x23], #0x10\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
"ldp x9, x28, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
"ldp x27, x26, [x23], #0x10\n"
"ldp x25, x24, [x20], #0x10\n"
"cbz x21, 7f\n"
@@ -191,24 +196,24 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
+ ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
"and x22, x21, #0x1\n"
- ".inst 0xc1a9aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z9.s\n"
"add x21, x21, #0x1\n"
"lsr x21, x21, #0x1\n"
- ".inst 0xc1adab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
"sub x13, x13, x21\n"
- ".inst 0xc1a7cd58 // sclamp { z24.s-z27.s }, z10.s, z7.s\n"
+ ".inst 0xc1a1ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+ ".inst 0xc1a0aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1a8ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
+ ".inst 0xc1bccfec // sclamp { z12.s-z15.s }, z31.s, z28.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "st1b { z12.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z25.s }, p1, [x10]\n"
+ "st1b { z13.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z26.s }, p1, [x27]\n"
+ "st1b { z14.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z15.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -220,194 +225,194 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z1.s }, p1/Z, [x14]\n"
+ "ld1b { z23.s }, p1/Z, [x14]\n"
"addvl x20, SP, #4\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z21.h\n"
- "add z1.h, z1.h, z11.h\n"
- "ld1b { z2.s }, p1/Z, [x21]\n"
+ "ld1b { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z2.h, z2.h, z15.h\n"
- "add z2.h, z2.h, z11.h\n"
- "ld1b { z3.s }, p1/Z, [x21]\n"
+ "ld1b { z5.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z3.h, z3.h, z21.h\n"
- "add z3.h, z3.h, z11.h\n"
- "ld1b { z4.s }, p1/Z, [x21]\n"
+ "trn1 z23.h, z23.h, z4.h\n"
+ "ld1b { z6.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
+ "ld1b { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z4.h, z19.h\n"
- "add z4.h, z4.h, z11.h\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
- "mov z5.d, z8.d\n"
- "add z5.h, z5.h, z11.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701428 // sdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
- ".inst 0xc1781448 // sdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ "trn1 z24.h, z24.h, z5.h\n"
+ "ld1b { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z25.h, z25.h, z6.h\n"
+ "ld1b { z10.s }, p1/Z, [x21]\n"
+ "add z23.h, z23.h, z29.h\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ "trn1 z26.h, z26.h, z20.h\n"
+ "add z24.h, z24.h, z29.h\n"
+ "mov z27.d, z10.d\n"
+ "add z25.h, z25.h, z29.h\n"
+ "add z26.h, z26.h, z29.h\n"
+ "add z27.h, z27.h, z29.h\n"
+ ".inst 0xc17616e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z6.h\n"
+ ".inst 0xc17e1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z14.h\n"
"9:" // Unpadded: 1 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z1.s }, p1/Z, [x14]\n"
+ "ld1b { z20.s }, p1/Z, [x14]\n"
"addvl x20, SP, #2\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z21.h\n"
- "add z1.h, z1.h, z11.h\n"
"ld1b { z2.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z12.s }, p1/Z, [x21]\n"
+ "ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z2.h, z2.h, z12.h\n"
- "add z2.h, z2.h, z11.h\n"
- "ld1b { z3.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
+ "trn1 z20.h, z20.h, z2.h\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z3.h, z3.h, z8.h\n"
- "add z3.h, z3.h, z11.h\n"
- "ld1b { z4.s }, p1/Z, [x21]\n"
+ "ld1b { z23.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z5.s }, p1/Z, [x21]\n"
+ "trn1 z21.h, z21.h, z25.h\n"
+ "ld1b { z9.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z4.h, z5.h\n"
- "add z4.h, z4.h, z11.h\n"
- "ld1b { z5.s }, p1/Z, [x21]\n"
- "mov z5.d, z5.d\n"
- "add z5.h, z5.h, z11.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701428 // sdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
- ".inst 0xc1781448 // sdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ "trn1 z22.h, z22.h, z24.h\n"
+ "ld1b { z3.s }, p1/Z, [x21]\n"
+ "add z20.h, z20.h, z29.h\n"
+ ".inst 0xa0402a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20]\n"
+ "trn1 z23.h, z23.h, z9.h\n"
+ "add z21.h, z21.h, z29.h\n"
+ "mov z24.d, z3.d\n"
+ "add z22.h, z22.h, z29.h\n"
+ "add z23.h, z23.h, z29.h\n"
+ "add z24.h, z24.h, z29.h\n"
+ ".inst 0xc1761688 // sdot za.s[x8, 0], { z20.h-z23.h }, z6.h\n"
+ ".inst 0xc17716a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z7.h\n"
"10:" // Unpadded: 0 priming loads
"cmp x15, #0x2\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z21.s }, p1/Z, [x14]\n"
+ "ld1b { z10.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x2\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z8.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "ld1b { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"lsr x20, x15, #0x1\n"
- "add z21.h, z21.h, z11.h\n"
- "ld1b { z25.s }, p1/Z, [x21]\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z25.h\n"
"cmp x20, x13\n"
- "ld1b { z23.s }, p1/Z, [x21]\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z25.h\n"
"csel x23, x20, x13, LT\n"
- "add z22.h, z22.h, z11.h\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z18.h\n"
- "add z23.h, z23.h, z11.h\n"
- "ld1b { z24.s }, p1/Z, [x21]\n"
+ "ld1b { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z19.h\n"
- "add z24.h, z24.h, z11.h\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
- "mov z25.d, z8.d\n"
- "add z25.h, z25.h, z11.h\n"
+ "trn1 z11.h, z11.h, z24.h\n"
"and x15, x15, #0x1\n"
+ "ld1b { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"sub x13, x13, x23\n"
+ "ld1b { z26.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z30.h\n"
+ "add z10.h, z10.h, z29.h\n"
+ "trn1 z13.h, z13.h, z20.h\n"
+ "add z11.h, z11.h, z29.h\n"
+ "mov z14.d, z26.d\n"
+ "add z12.h, z12.h, z29.h\n"
+ "add z13.h, z13.h, z29.h\n"
+ "add z14.h, z14.h, z29.h\n"
"cbz x23, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
"addvl x20, SP, #4\n"
"add x22, x14, %x[ld_in_row]\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
"addvl x21, SP, #2\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1b { z21.s }, p1/Z, [x14]\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1771549 // sdot za.s[x8, 1], { z10.h-z13.h }, z7.h\n"
+ "ld1b { z3.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- "trn1 z21.h, z21.h, z18.h\n"
- "ld1b { z22.s }, p1/Z, [x22]\n"
+ "ld1b { z9.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z21.h, z21.h, z11.h\n"
- ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ "add x20, x14, %x[ld_in_row]\n"
+ ".inst 0xc17f1569 // sdot za.s[x8, 1], { z11.h-z14.h }, z15.h\n"
+ "ld1b { z4.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z8.h\n"
- "add z22.h, z22.h, z11.h\n"
- "ld1b { z23.s }, p1/Z, [x22]\n"
+ "trn1 z3.h, z3.h, z9.h\n"
+ "ld1b { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ld1b { z27.s }, p1/Z, [x22]\n"
+ "ld1b { z5.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z27.h\n"
- "add z23.h, z23.h, z11.h\n"
- "ld1b { z24.s }, p1/Z, [x22]\n"
+ ".inst 0xc1a1ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z1.s\n"
+ "ld1b { z10.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1b { z6.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z8.h\n"
- "add z24.h, z24.h, z11.h\n"
- "ld1b { z4.s }, p1/Z, [x22]\n"
- "mov z25.d, z4.d\n"
- "add z25.h, z25.h, z11.h\n"
- ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17416a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z4.h\n"
- ".inst 0xc1a9aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
- "ld1b { z21.s }, p1/Z, [x14]\n"
- ".inst 0xc17c16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z12.h\n"
- ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
- "ld1b { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z12.h\n"
- ".inst 0xc1a7cd40 // sclamp { z0.s-z3.s }, z10.s, z7.s\n"
+ "trn1 z4.h, z4.h, z15.h\n"
+ "add z3.h, z3.h, z29.h\n"
+ "ld1b { z14.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z5.h, z5.h, z10.h\n"
+ "ld1b { z21.s }, p1/Z, [x22]\n"
+ ".inst 0xc1a0aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z0.s\n"
+ ".inst 0xa0402aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21]\n"
+ "trn1 z6.h, z6.h, z14.h\n"
+ "add z4.h, z4.h, z29.h\n"
+ "mov z7.d, z21.d\n"
+ "add z5.h, z5.h, z29.h\n"
+ ".inst 0xc1a8ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ "add z6.h, z6.h, z29.h\n"
+ "add z7.h, z7.h, z29.h\n"
+ ".inst 0xc1bccff8 // sclamp { z24.s-z27.s }, z31.s, z28.s\n"
+ ".inst 0xc17a1468 // sdot za.s[x8, 0], { z3.h-z6.h }, z10.h\n"
+ "ld1b { z10.s }, p1/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
"ld1b { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "st1b { z0.s }, p1, [x11]\n"
+ "st1b { z24.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc17b1488 // sdot za.s[x8, 0], { z4.h-z7.h }, z11.h\n"
+ "ld1b { z11.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z20.h\n"
- "st1b { z1.s }, p1, [x10]\n"
- "ld1b { z23.s }, p1/Z, [x20]\n"
+ "trn1 z10.h, z10.h, z22.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "st1b { z25.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z2.s }, p1, [x27]\n"
- "ld1b { z24.s }, p1/Z, [x20]\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z24.h\n"
+ "st1b { z26.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "ld1b { z24.s }, p1/Z, [x20]\n"
+ "ld1b { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "st1b { z3.s }, p1, [x26]\n"
+ "st1b { z27.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "ld1b { z3.s }, p1/Z, [x20]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z3.h\n"
- "add z21.h, z21.h, z11.h\n"
- "ld1b { z3.s }, p1/Z, [x20]\n"
- "mov z25.d, z3.d\n"
- "add z22.h, z22.h, z11.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- "add z23.h, z23.h, z11.h\n"
- "add z24.h, z24.h, z11.h\n"
- "add z25.h, z25.h, z11.h\n"
+ "trn1 z11.h, z11.h, z14.h\n"
+ "add z10.h, z10.h, z29.h\n"
+ "ld1b { z6.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "trn1 z12.h, z12.h, z9.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
+ "trn1 z13.h, z13.h, z6.h\n"
+ "add z11.h, z11.h, z29.h\n"
+ "mov z14.d, z20.d\n"
+ "add z12.h, z12.h, z29.h\n"
+ "add z13.h, z13.h, z29.h\n"
+ "add z14.h, z14.h, z29.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
@@ -417,440 +422,440 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x14]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #4\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z21.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z20.h, z20.h, z22.h\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z4.s }, p0/Z, [x20]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z17.h\n"
- "trn1 z23.h, z23.h, z4.h\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z23.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z24.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z25.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z11.h\n"
- "addvl x20, SP, #4\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "trn1 z25.h, z25.h, z17.h\n"
- ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
- "mov z26.d, z1.d\n"
- ".inst 0xc17416c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z4.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17c16e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z12.h\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "trn1 z23.h, z23.h, z25.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
+ ".inst 0xc1731688 // sdot za.s[x8, 0], { z20.h-z23.h }, z3.h\n"
+ "mov z24.d, z24.d\n"
+ ".inst 0xc17b16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z11.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x14]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #2\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z20.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z5.s }, p0/Z, [x20]\n"
- "add z5.h, p0/M, z5.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z17.h\n"
- "trn1 z23.h, z23.h, z5.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z20.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z23.h, z20.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z25.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
- "add z15.h, p0/M, z15.h, z11.h\n"
- "addvl x20, SP, #2\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "trn1 z25.h, z25.h, z17.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "mov z26.d, z15.d\n"
- ".inst 0xc17016c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z0.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17116e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z1.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "trn1 z24.h, z24.h, z25.h\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
+ ".inst 0xc17316a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z3.h\n"
+ "mov z25.d, z20.d\n"
+ ".inst 0xc17b16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z11.h\n"
"15:" // Padded: 0 priming loads
"cmp x15, #0x2\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
- "mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z18.h\n"
- "trn1 z22.h, z22.h, z3.h\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
- "add z19.h, p0/M, z19.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "mov x12, #0x8\n"
- "add z20.h, p0/M, z20.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
"sub x15, x15, #0x2\n"
"sub x13, x13, #0x1\n"
- "trn1 z23.h, z23.h, z19.h\n"
- "trn1 z24.h, z24.h, z20.h\n"
"lsr x20, x15, #0x1\n"
"cmp x20, x13\n"
- "mov z25.d, z3.d\n"
- "csel x22, x20, x13, LT\n"
- "add x14, x14, %x[ld_in_col]\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 17f\n"
- "16:" // Padded: Main loop
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "addvl x20, SP, #4\n"
- "mov x12, #0x0\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x21, x14, %x[ld_in_row]\n"
- ".inst 0xc17416a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z4.h\n"
- "ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "ld1b { z10.s }, p0/Z, [x14]\n"
+ "csel x23, x20, x13, LT\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "sub x13, x13, x23\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
- "add z14.h, p0/M, z14.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17c16c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z12.h\n"
- "ld1b { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
+ "ld1b { z11.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z20.h\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
"mov x12, #0x4\n"
- "add z15.h, p0/M, z15.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x21]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z20.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x21]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z4.s }, p0/Z, [x21]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
"mov x12, #0x8\n"
+ "ld1b { z21.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z14.h\n"
- "trn1 z22.h, z22.h, z15.h\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "addvl x20, SP, #2\n"
- "ld1b { z2.s }, p0/Z, [x21]\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z4.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z21.h\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
+ "mov z14.d, z20.d\n"
+ "cbz x23, 17f\n"
+ "16:" // Padded: Main loop
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "addvl x20, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "add z2.h, p0/M, z2.h, z11.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17016a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z0.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "add x22, x14, %x[ld_in_row]\n"
+ "addvl x21, SP, #2\n"
+ "subs x23, x23, #0x1\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1721549 // sdot za.s[x8, 1], { z10.h-z13.h }, z2.h\n"
+ "ld1b { z10.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x20, x14, %x[ld_in_row]\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "mov z25.d, z2.d\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1731569 // sdot za.s[x8, 1], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1b { z26.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z26.h, p0/M, z26.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17116c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z1.h\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ "ld1b { z11.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z26.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z4.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
+ "ld1b { z9.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z4.s }, p1, [x11]\n"
+ "add x11, x11, x9\n"
+ "st1b { z5.s }, p1, [x10]\n"
+ "add x10, x10, x28\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "st1b { z6.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
+ "trn1 z11.h, z11.h, z9.h\n"
+ "st1b { z7.s }, p1, [x26]\n"
+ "add x26, x26, x24\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z9.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z9.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- "add x20, x20, %x[ld_in_row]\n"
- "add z12.h, p0/M, z12.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z11.h\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "trn1 z21.h, z21.h, z20.h\n"
- "st1b { z17.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "trn1 z22.h, z22.h, z4.h\n"
- "trn1 z23.h, z23.h, z27.h\n"
- "st1b { z18.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "trn1 z24.h, z24.h, z12.h\n"
- "mov z25.d, z8.d\n"
- "st1b { z19.s }, p1, [x26]\n"
- "add x26, x26, x24\n"
- "add x14, x14, %x[ld_in_col]\n"
- "bgt 16b\n"
- "17:" // Main loop tail
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "addvl x20, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z9.s }, p0/Z, [x22]\n"
+ "trn1 z13.h, z13.h, z20.h\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1b { z0.s }, p0/Z, [x14]\n"
- "add z0.h, p0/M, z0.h, z11.h\n"
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "ld1b { z10.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "mov z14.d, z9.d\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
- "add z14.h, p0/M, z14.h, z11.h\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
+ "ld1b { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z11.h\n"
+ "ld1b { z11.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z25.h\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add z12.h, p0/M, z12.h, z11.h\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z2.s }, p0/Z, [x20]\n"
- "add z2.h, p0/M, z2.h, z11.h\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z15.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "ld1b { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z4.h, p0/M, z4.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z4.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
+ "ld1b { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
+ "add z4.h, p0/M, z4.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
+ "ld1b { z26.s }, p0/Z, [x20]\n"
+ "trn1 z13.h, z13.h, z4.h\n"
+ "add z26.h, p0/M, z26.h, z29.h\n"
+ "mov z14.d, z26.d\n"
+ "bgt 16b\n"
+ "17:" // Main loop tail
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "addvl x22, SP, #4\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "add x21, x14, %x[ld_in_row]\n"
"addvl x20, SP, #2\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- "trn1 z0.h, z0.h, z14.h\n"
- "add x8, x8, #0x1\n"
- "add z27.h, p0/M, z27.h, z11.h\n"
- "trn1 z1.h, z1.h, z12.h\n"
- "trn1 z2.h, z2.h, z21.h\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1721549 // sdot za.s[x8, 1], { z10.h-z13.h }, z2.h\n"
+ "ld1b { z9.s }, p0/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "trn1 z3.h, z3.h, z25.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- "mov z4.d, z27.d\n"
- ".inst 0xc17e1408 // sdot za.s[x8, 0], { z0.h-z3.h }, z14.h\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1731569 // sdot za.s[x8, 1], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ "ld1b { z10.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z15.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x4\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "st1b { z17.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc17f1428 // sdot za.s[x8, 0], { z1.h-z4.h }, z15.h\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- "st1b { z18.s }, p1, [x27]\n"
+ "ld1b { z11.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z19.s }, p1, [x26]\n"
+ "trn1 z10.h, z10.h, z15.h\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z5.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z5.h, p0/M, z5.h, z29.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z5.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x8\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1b { z5.s }, p0/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z15.h\n"
+ "add z5.h, p0/M, z5.h, z29.h\n"
+ ".inst 0xc1721528 // sdot za.s[x8, 0], { z9.h-z12.h }, z2.h\n"
+ "mov z13.d, z5.d\n"
+ ".inst 0xc1731548 // sdot za.s[x8, 0], { z10.h-z13.h }, z3.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"18:" // Main loop skip tail
"cbz x15, 19f\n" // Skip remainder inputs
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #4\n"
+ "sub x13, x13, #0x1\n"
"ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z15.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z0.s }, p0/Z, [x20]\n"
- "add z0.h, p0/M, z0.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z17.h\n"
- "trn1 z22.h, z22.h, z0.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z12.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z23.h, z20.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z5.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z5.h, p0/M, z5.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z30.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z30.h, p0/M, z30.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z4.s }, p0/Z, [x20]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z5.h\n"
- "mov z25.d, z4.d\n"
- "addvl x20, SP, #4\n"
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "sub x13, x13, #0x1\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
+ "ld1b { z6.s }, p0/Z, [x21]\n"
+ "trn1 z24.h, z24.h, z30.h\n"
+ "add z6.h, p0/M, z6.h, z29.h\n"
+ ".inst 0xc17216a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z2.h\n"
+ "mov z25.d, z6.d\n"
+ ".inst 0xc17316c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z3.h\n"
+ ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17516a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z5.h\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ ".inst 0xc17d16c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z13.h\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "st1b { z17.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z18.s }, p1, [x27]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z19.s }, p1, [x26]\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"19:" // Tail input: End
"cbz x13, 21f\n"
"20:" // Right padding loop
- ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a9aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
- ".inst 0xc1a7cd40 // sclamp { z0.s-z3.s }, z10.s, z7.s\n"
- "st1b { z0.s }, p1, [x11]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z1.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z2.s }, p1, [x27]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z3.s }, p1, [x26]\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 20b\n"
"21:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -869,6 +874,8 @@ void sme2_u8q_planar_3x3_s2_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #6\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
index d33ef764ef..d4db24071c 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,249 +70,254 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "ptrue p2.b\n"
+ "mov x22, SP\n"
"mov x20, #0x8\n"
+ "ptrue p2.b\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z17.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x5\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x22, #0x8\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z15.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x21, x21, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x20, x5\n"
+ "mov SP, x21\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-30\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x7\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "neg z15.h, p2/M, z15.h\n"
+ "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x6\n"
- "addvl SP, SP, #-30\n"
- "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z17.h, p2/M, z17.h\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z18.s, #0x0\n"
+ "mov z28.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z18.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x20, x17, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x23\n"
- "ld1b { z2.s }, p2/Z, [x20]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z0.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z13.h, #0x0\n"
+ "addvl x22, SP, #30\n"
+ "addvl x22, x22, #-6\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z29.d, z28.d\n"
+ "mov x23, x24\n"
+ "incw x24\n"
+ "ld1b { z22.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1b { z21.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1b { z19.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1b { z25.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "sub z22.h, z22.h, z0.h\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "ld1b { z5.s }, p2/Z, [x23]\n"
+ "mov x20, x24\n"
+ "incw x24\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "sub z25.h, z25.h, z0.h\n"
+ "sub z5.h, z5.h, z0.h\n"
+ "trn1 z6.h, z13.h, z22.h\n"
+ "trn1 z23.h, z22.h, z21.h\n"
+ "ld1b { z27.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "ld1rh { z3.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z15.h, #0x0\n"
- "sub z2.h, z2.h, z3.h\n"
- "incw x23\n"
- "ld1b { z13.s }, p2/Z, [x20]\n"
+ "trn1 z4.h, z21.h, z19.h\n"
+ "ld1b { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z13.h, z13.h, z3.h\n"
- "trn1 z11.h, z15.h, z2.h\n"
- "ld1b { z27.s }, p2/Z, [x20]\n"
+ "trn1 z26.h, z19.h, z25.h\n"
+ "ld1b { z18.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z27.h, z27.h, z3.h\n"
- "trn1 z0.h, z2.h, z13.h\n"
+ "trn1 z22.h, z25.h, z5.h\n"
+ "ld1b { z7.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z25.h, z5.h, z13.h\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "sub z9.h, z9.h, z0.h\n"
+ "ld1b { z1.s }, p2/Z, [x20]\n"
+ "mov x20, x24\n"
+ "sub z18.h, z18.h, z0.h\n"
+ "st1h { z6.h }, p2, [x22]\n"
+ "incw x24\n"
+ "sub z7.h, z7.h, z0.h\n"
+ "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z1.h, z1.h, z0.h\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "trn1 z20.h, z13.h, z27.h\n"
+ "trn1 z12.h, z27.h, z9.h\n"
+ "ld1b { z21.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z2.h, z9.h, z18.h\n"
"ld1b { z19.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z19.h, z19.h, z3.h\n"
- "trn1 z26.h, z13.h, z27.h\n"
+ "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z9.h, z18.h, z7.h\n"
"ld1b { z14.s }, p2/Z, [x20]\n"
- "sub z14.h, z14.h, z3.h\n"
- "mov x20, x23\n"
- "trn1 z10.h, z27.h, z19.h\n"
- "ld1b { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z19.h, z19.h, z14.h\n"
- "trn1 z1.h, z14.h, z15.h\n"
+ "st1h { z22.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z18.h, z7.h, z1.h\n"
"ld1b { z5.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z9.h, z9.h, z3.h\n"
- "sub z5.h, z5.h, z3.h\n"
- "ld1b { z29.s }, p2/Z, [x20]\n"
+ "st1h { z25.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z25.h, z1.h, z13.h\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "addvl x22, x22, #-6\n"
+ "ld1b { z16.s }, p2/Z, [x20]\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "mov x20, x24\n"
+ "st1h { z20.h }, p2, [x22]\n"
+ "sub z5.h, z5.h, z0.h\n"
+ "st1h { z12.h }, p2, [x22, #1, MUL VL]\n"
+ "incw x24\n"
+ "st1h { z2.h }, p2, [x22, #2, MUL VL]\n"
+ "sub z16.h, z16.h, z0.h\n"
+ "trn1 z7.h, z13.h, z21.h\n"
+ "trn1 z20.h, z21.h, z19.h\n"
+ "ld1b { z6.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z29.h, z29.h, z3.h\n"
- "addvl x22, SP, #30\n"
+ "trn1 z17.h, z19.h, z14.h\n"
+ "st1h { z9.h }, p2, [x22, #3, MUL VL]\n"
"ld1b { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "incw x23\n"
- "sub z2.h, z2.h, z3.h\n"
- "ld1b { z23.s }, p2/Z, [x20]\n"
- "addvl x22, x22, #-6\n"
- "sub z23.h, z23.h, z3.h\n"
- "mov x20, x23\n"
- "st1h { z11.h }, p2, [x22]\n"
- "trn1 z20.h, z15.h, z9.h\n"
- "incw x23\n"
- "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "st1h { z0.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z22.h, z9.h, z5.h\n"
- "ld1b { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z9.h, z5.h, z29.h\n"
+ "trn1 z12.h, z14.h, z5.h\n"
+ "st1h { z18.h }, p2, [x22, #4, MUL VL]\n"
"ld1b { z21.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z26.h, z29.h, z2.h\n"
- "ld1b { z0.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z28.h, z2.h, z23.h\n"
- "ld1b { z19.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z1.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z2.h, z23.h, z15.h\n"
- "sub z25.h, z25.h, z3.h\n"
+ "st1h { z25.h }, p2, [x22, #5, MUL VL]\n"
"addvl x22, x22, #-6\n"
- "sub z21.h, z21.h, z3.h\n"
- "ld1b { z6.s }, p2/Z, [x20]\n"
- "sub z0.h, z0.h, z3.h\n"
- "mov x20, x23\n"
- "sub z19.h, z19.h, z3.h\n"
- "sub z6.h, z6.h, z3.h\n"
- "st1h { z20.h }, p2, [x22]\n"
- "incw x23\n"
- "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z11.h, z15.h, z25.h\n"
- "trn1 z10.h, z25.h, z21.h\n"
- "ld1b { z5.s }, p2/Z, [x20]\n"
+ "trn1 z5.h, z5.h, z16.h\n"
+ "ld1b { z25.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z9.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z14.h, z21.h, z0.h\n"
+ "trn1 z4.h, z16.h, z13.h\n"
+ "sub z6.h, z6.h, z0.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1b { z19.s }, p2/Z, [x20]\n"
+ "mov x20, x24\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "st1h { z7.h }, p2, [x22]\n"
+ "sub z25.h, z25.h, z0.h\n"
+ "st1h { z20.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
+ "trn1 z1.h, z13.h, z6.h\n"
+ "trn1 z24.h, z6.h, z2.h\n"
"ld1b { z23.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z21.h, z0.h, z19.h\n"
- "ld1b { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z28.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z19.h, z19.h, z6.h\n"
- "ld1b { z29.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z2.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z13.h, z6.h, z15.h\n"
- "sub z5.h, z5.h, z3.h\n"
- "sub z23.h, z23.h, z3.h\n"
- "ld1b { z1.s }, p2/Z, [x20]\n"
- "addvl x22, x22, #-6\n"
- "sub z27.h, z27.h, z3.h\n"
- "sub z29.h, z29.h, z3.h\n"
- "mov x20, x23\n"
- "st1h { z11.h }, p2, [x22]\n"
- "sub z1.h, z1.h, z3.h\n"
- "st1h { z10.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z30.h, z15.h, z5.h\n"
- "trn1 z26.h, z5.h, z23.h\n"
- "ld1b { z11.s }, p2/Z, [x20]\n"
+ "trn1 z16.h, z2.h, z21.h\n"
+ "ld1b { z6.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z14.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z22.h, z23.h, z27.h\n"
- "ld1b { z5.s }, p2/Z, [x20]\n"
+ "st1h { z12.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z20.h, z21.h, z25.h\n"
+ "ld1b { z14.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z28.h, z27.h, z29.h\n"
- "ld1b { z8.s }, p2/Z, [x20]\n"
+ "st1h { z5.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z17.h, z25.h, z19.h\n"
+ "ld1b { z22.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z27.h, z29.h, z1.h\n"
- "ld1b { z9.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z13.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z2.h, z1.h, z15.h\n"
- "ld1b { z14.s }, p2/Z, [x20]\n"
- "sub z11.h, z11.h, z3.h\n"
+ "st1h { z4.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z19.h, z19.h, z13.h\n"
+ "ld1b { z27.s }, p2/Z, [x20]\n"
+ "sub z23.h, z23.h, z0.h\n"
"addvl x22, x22, #-6\n"
- "sub z5.h, z5.h, z3.h\n"
- "sub z8.h, z8.h, z3.h\n"
- "st1h { z30.h }, p2, [x22]\n"
- "sub z9.h, z9.h, z3.h\n"
- "sub z14.h, z14.h, z3.h\n"
- "st1h { z26.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "mov z19.d, z18.d\n"
- "trn1 z22.h, z15.h, z11.h\n"
- "st1h { z28.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z1.h, z11.h, z5.h\n"
- "trn1 z31.h, z5.h, z8.h\n"
- "st1h { z27.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z8.h, z8.h, z9.h\n"
- "trn1 z21.h, z9.h, z14.h\n"
- "st1h { z2.h }, p2, [x22, #5, MUL VL]\n"
+ "sub z6.h, z6.h, z0.h\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "st1h { z1.h }, p2, [x22]\n"
+ "sub z22.h, z22.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z16.h, z13.h, z23.h\n"
+ "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z7.h, z23.h, z6.h\n"
+ "trn1 z12.h, z6.h, z14.h\n"
+ "st1h { z19.h }, p2, [x22, #5, MUL VL]\n"
"addvl x22, x22, #-6\n"
- "trn1 z15.h, z14.h, z15.h\n"
- "st1h { z22.h }, p2, [x22]\n"
- "st1h { z1.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z31.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z8.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z15.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z5.h, z14.h, z22.h\n"
+ "trn1 z14.h, z22.h, z27.h\n"
+ "trn1 z20.h, z27.h, z13.h\n"
+ "st1h { z16.h }, p2, [x22]\n"
+ "st1h { z7.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z12.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z5.h }, p2, [x22, #3, MUL VL]\n"
+ "st1h { z14.h }, p2, [x22, #4, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #5, MUL VL]\n"
"cbz x21, 3f\n"
- "ld1w { z7.s }, p1/Z, [x21, x17, LSL #2]\n"
+ "ld1w { z8.s }, p1/Z, [x21, x17, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z4.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "ld1w { z11.s }, p1/Z, [x20, x17, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x25, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x25, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x7, x23, LSL #22\n"
"mov x22, #0x8\n"
- "add x21, x6, x5\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x6, x5\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x11, #0x0\n"
"mov x8, #0x8\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x16\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x25, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x6, x16\n"
+ "orr x20, x7, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x16, x6, x20, x16\n"
- ".inst 0xc0046a40 // mova za.d[x11, #0], { z18.d-z19.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0046a41 // mova za.d[x11, #1], { z18.d-z19.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0046b80 // mova za.d[x11, #0], { z28.d-z29.d }\n"
"mov x22, #0x4\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x16, x6, x21, x16\n"
+ ".inst 0xc0046b81 // mova za.d[x11, #1], { z28.d-z29.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0046b82 // mova za.d[x11, #2], { z28.d-z29.d }\n"
"ldp x14, x13, [x23], #0x10\n"
- ".inst 0xc0046a42 // mova za.d[x11, #2], { z18.d-z19.d }\n"
+ ".inst 0xc0046b83 // mova za.d[x11, #3], { z28.d-z29.d }\n"
"ldp x4, x10, [x20], #0x10\n"
- ".inst 0xc0046a43 // mova za.d[x11, #3], { z18.d-z19.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0046a44 // mova za.d[x11, #4], { z18.d-z19.d }\n"
+ ".inst 0xc0046b84 // mova za.d[x11, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0046b85 // mova za.d[x11, #5], { z28.d-z29.d }\n"
"ldp x9, x28, [x23], #0x10\n"
- ".inst 0xc0046a45 // mova za.d[x11, #5], { z18.d-z19.d }\n"
+ ".inst 0xc0046b86 // mova za.d[x11, #6], { z28.d-z29.d }\n"
"ldp x27, x26, [x20], #0x10\n"
- ".inst 0xc0046a46 // mova za.d[x11, #6], { z18.d-z19.d }\n"
- ".inst 0xc0046a47 // mova za.d[x11, #7], { z18.d-z19.d }\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
+ ".inst 0xc0046b87 // mova za.d[x11, #7], { z28.d-z29.d }\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
"csel x20, x21, x22, LT\n"
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066810 // mova { z16.d-z17.d }, za.d[x11, #0]\n"
"sub x15, x15, x21\n"
- ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
- ".inst 0xc1a4aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z4.s\n"
- ".inst 0xc1acab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z12.s\n"
- ".inst 0xc1b0cf14 // sclamp { z20.s-z23.s }, z24.s, z16.s\n"
+ ".inst 0xc0066832 // mova { z18.d-z19.d }, za.d[x11, #1]\n"
+ ".inst 0xc1a8ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+ ".inst 0xc1abaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
+ ".inst 0xc1becff0 // sclamp { z16.s-z19.s }, z31.s, z30.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z20.s }, p1, [x14]\n"
+ "st1b { z16.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z22.s }, p1, [x13]\n"
+ "st1b { z18.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z21.s }, p1, [x9]\n"
+ "st1b { z17.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z23.s }, p1, [x28]\n"
+ "st1b { z19.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -328,331 +333,331 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
"add x21, x16, %x[ld_in_row]\n"
- "ld1b { z1.s }, p1/Z, [x16]\n"
+ "ld1b { z4.s }, p1/Z, [x16]\n"
"addvl x20, SP, #24\n"
- "ld1b { z28.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z1.h, z28.h\n"
- "add z27.h, z27.h, z17.h\n"
- "ld1b { z1.s }, p1/Z, [x21]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z2.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z28.h, z1.h, z2.h\n"
- "add z28.h, z28.h, z17.h\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
+ "ld1b { z19.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z6.s }, p1/Z, [x21]\n"
+ "trn1 z22.h, z4.h, z13.h\n"
+ "ld1b { z27.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z13.h, z6.h\n"
- "add z29.h, z29.h, z17.h\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "trn1 z23.h, z25.h, z19.h\n"
+ "ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16a7768 // sdot za.s[x11, 0], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ "add z22.h, z22.h, z15.h\n"
+ "trn1 z24.h, z14.h, z27.h\n"
"ld1b { z20.s }, p1/Z, [x21]\n"
- "trn1 z30.h, z30.h, z20.h\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "add z30.h, z30.h, z17.h\n"
- ".inst 0xc1697788 // sdot za.s[x11, 0], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc1617789 // sdot za.s[x11, 1], { z28.h-z29.h }, z1.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z23.h, z23.h, z15.h\n"
+ ".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "trn1 z25.h, z21.h, z20.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16d76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z13.h\n"
+ ".inst 0xc16c76c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z12.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc16e76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z14.h\n"
+ ".inst 0xc16676e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xc1617708 // sdot za.s[x11, 0], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xc1607709 // sdot za.s[x11, 1], { z24.h-z25.h }, z0.h\n"
"9:" // Unpadded: 3 priming loads
"add x22, x16, %x[ld_in_row]\n"
- "ld1b { z2.s }, p1/Z, [x16]\n"
+ "ld1b { z21.s }, p1/Z, [x16]\n"
"addvl x21, SP, #18\n"
- "ld1b { z28.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z20.h, z2.h, z28.h\n"
- "add z20.h, z20.h, z17.h\n"
- "ld1b { z31.s }, p1/Z, [x22]\n"
+ "ld1b { z18.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- "ld1b { z11.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z21.h, z31.h, z11.h\n"
- "add z21.h, z21.h, z17.h\n"
- "ld1b { z25.s }, p1/Z, [x22]\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ "ld1b { z3.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z22.h, z25.h, z8.h\n"
- "add z22.h, z22.h, z17.h\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ "ld1b { z27.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16e7688 // sdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
- "ld1b { z3.s }, p1/Z, [x22]\n"
- "trn1 z23.h, z8.h, z3.h\n"
- ".inst 0xc1667689 // sdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc161768a // sdot za.s[x11, 2], { z20.h-z21.h }, z1.h\n"
- "add z23.h, z23.h, z17.h\n"
+ "trn1 z24.h, z21.h, z18.h\n"
+ "ld1b { z7.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z25.h, z17.h, z3.h\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ "add z24.h, z24.h, z15.h\n"
+ "trn1 z26.h, z27.h, z7.h\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
".inst 0xa1412aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc160768b // sdot za.s[x11, 3], { z20.h-z21.h }, z0.h\n"
- ".inst 0xc16976a8 // sdot za.s[x11, 0], { z21.h-z22.h }, z9.h\n"
- ".inst 0xa0422aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16176a9 // sdot za.s[x11, 1], { z21.h-z22.h }, z1.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16976aa // sdot za.s[x11, 2], { z21.h-z22.h }, z9.h\n"
- ".inst 0xc16176ab // sdot za.s[x11, 3], { z21.h-z22.h }, z1.h\n"
- ".inst 0xc16f76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z15.h\n"
- ".inst 0xc16e76c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z14.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z11.h\n"
- ".inst 0xc16a76cb // sdot za.s[x11, 3], { z22.h-z23.h }, z10.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "trn1 z27.h, z17.h, z16.h\n"
+ "add z26.h, z26.h, z15.h\n"
+ ".inst 0xc1637708 // sdot za.s[x11, 0], { z24.h-z25.h }, z3.h\n"
+ ".inst 0xc1627709 // sdot za.s[x11, 1], { z24.h-z25.h }, z2.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ "add z27.h, z27.h, z15.h\n"
+ ".inst 0xc16d770a // sdot za.s[x11, 2], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc16c770b // sdot za.s[x11, 3], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc1697728 // sdot za.s[x11, 0], { z25.h-z26.h }, z9.h\n"
+ ".inst 0xc1617729 // sdot za.s[x11, 1], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
+ ".inst 0xc1677748 // sdot za.s[x11, 0], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xc1667749 // sdot za.s[x11, 1], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa0422a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
+ ".inst 0xc16c774b // sdot za.s[x11, 3], { z26.h-z27.h }, z12.h\n"
"10:" // Unpadded: 2 priming loads
"add x23, x16, %x[ld_in_row]\n"
- "ld1b { z2.s }, p1/Z, [x16]\n"
+ "ld1b { z0.s }, p1/Z, [x16]\n"
"addvl x22, SP, #12\n"
- "ld1b { z22.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z0.h, z2.h, z22.h\n"
- "add z0.h, z0.h, z17.h\n"
- "ld1b { z14.s }, p1/Z, [x23]\n"
+ "ld1b { z19.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
- "ld1b { z6.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z1.h, z14.h, z6.h\n"
- "add z1.h, z1.h, z17.h\n"
- "ld1b { z15.s }, p1/Z, [x23]\n"
+ "ld1b { z4.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- "ld1b { z6.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z2.h, z15.h, z6.h\n"
- "add z2.h, z2.h, z17.h\n"
- "ld1b { z21.s }, p1/Z, [x23]\n"
+ "ld1b { z3.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16f7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z15.h\n"
- "ld1b { z30.s }, p1/Z, [x23]\n"
- "trn1 z3.h, z21.h, z30.h\n"
- ".inst 0xc16e7409 // sdot za.s[x11, 1], { z0.h-z1.h }, z14.h\n"
- ".inst 0xa1402aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16d740a // sdot za.s[x11, 2], { z0.h-z1.h }, z13.h\n"
- "add z3.h, z3.h, z17.h\n"
- ".inst 0xa0412ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc165740b // sdot za.s[x11, 3], { z0.h-z1.h }, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16f7428 // sdot za.s[x11, 0], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e7429 // sdot za.s[x11, 1], { z1.h-z2.h }, z14.h\n"
- ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16b740c // sdot za.s[x11, 4], { z0.h-z1.h }, z11.h\n"
- ".inst 0xc16a740d // sdot za.s[x11, 5], { z0.h-z1.h }, z10.h\n"
- ".inst 0xc16f742a // sdot za.s[x11, 2], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e742b // sdot za.s[x11, 3], { z1.h-z2.h }, z14.h\n"
- ".inst 0xa0412a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1697448 // sdot za.s[x11, 0], { z2.h-z3.h }, z9.h\n"
- ".inst 0xc1687449 // sdot za.s[x11, 1], { z2.h-z3.h }, z8.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f742c // sdot za.s[x11, 4], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e742d // sdot za.s[x11, 5], { z1.h-z2.h }, z14.h\n"
- ".inst 0xc16b744a // sdot za.s[x11, 2], { z2.h-z3.h }, z11.h\n"
- ".inst 0xc16a744b // sdot za.s[x11, 3], { z2.h-z3.h }, z10.h\n"
- ".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc161744c // sdot za.s[x11, 4], { z2.h-z3.h }, z1.h\n"
- ".inst 0xc160744d // sdot za.s[x11, 5], { z2.h-z3.h }, z0.h\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z22.h, z0.h, z19.h\n"
+ "ld1b { z25.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z23.h, z4.h, z3.h\n"
+ "ld1b { z9.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ "add z22.h, z22.h, z15.h\n"
+ "trn1 z24.h, z17.h, z25.h\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
+ ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ "add z23.h, z23.h, z15.h\n"
+ ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ "trn1 z25.h, z9.h, z17.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16576c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc16576ca // sdot za.s[x11, 2], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476cb // sdot za.s[x11, 3], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16776e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z7.h\n"
+ ".inst 0xc16676e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16576cc // sdot za.s[x11, 4], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476cd // sdot za.s[x11, 5], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xc16776ea // sdot za.s[x11, 2], { z23.h-z24.h }, z7.h\n"
+ ".inst 0xc16676eb // sdot za.s[x11, 3], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1617708 // sdot za.s[x11, 0], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xc1607709 // sdot za.s[x11, 1], { z24.h-z25.h }, z0.h\n"
+ ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16576ec // sdot za.s[x11, 4], { z23.h-z24.h }, z5.h\n"
+ ".inst 0xc16476ed // sdot za.s[x11, 5], { z23.h-z24.h }, z4.h\n"
+ ".inst 0xc167770a // sdot za.s[x11, 2], { z24.h-z25.h }, z7.h\n"
+ ".inst 0xc166770b // sdot za.s[x11, 3], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1422a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d770c // sdot za.s[x11, 4], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc165770d // sdot za.s[x11, 5], { z24.h-z25.h }, z5.h\n"
"11:" // Unpadded: 1 priming loads
"add x24, x16, %x[ld_in_row]\n"
- "ld1b { z0.s }, p1/Z, [x16]\n"
+ "ld1b { z16.s }, p1/Z, [x16]\n"
"addvl x23, SP, #6\n"
- "ld1b { z3.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z28.h, z0.h, z3.h\n"
- "add z28.h, z28.h, z17.h\n"
- "ld1b { z6.s }, p1/Z, [x24]\n"
+ "ld1b { z22.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
"addvl x22, SP, #12\n"
- "ld1b { z30.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z29.h, z6.h, z30.h\n"
- "add z29.h, z29.h, z17.h\n"
- "ld1b { z1.s }, p1/Z, [x24]\n"
+ "ld1b { z19.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
"ld1b { z25.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
- "trn1 z30.h, z1.h, z25.h\n"
- "add z30.h, z30.h, z17.h\n"
- "ld1b { z3.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1617788 // sdot za.s[x11, 0], { z28.h-z29.h }, z1.h\n"
+ "ld1b { z6.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z18.h, z16.h, z22.h\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z5.s }, p1/Z, [x24]\n"
- "trn1 z31.h, z3.h, z5.h\n"
- ".inst 0xc1607789 // sdot za.s[x11, 1], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16e778a // sdot za.s[x11, 2], { z28.h-z29.h }, z14.h\n"
- "add z31.h, z31.h, z17.h\n"
- ".inst 0xa1412ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc166778b // sdot za.s[x11, 3], { z28.h-z29.h }, z6.h\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16a77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z10.h\n"
- ".inst 0xc16277a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z2.h\n"
- ".inst 0xa0412ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa1422ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16f778c // sdot za.s[x11, 4], { z28.h-z29.h }, z15.h\n"
- ".inst 0xc16e778d // sdot za.s[x11, 5], { z28.h-z29.h }, z14.h\n"
- ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16977aa // sdot za.s[x11, 2], { z29.h-z30.h }, z9.h\n"
- ".inst 0xc16877ab // sdot za.s[x11, 3], { z29.h-z30.h }, z8.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a77c8 // sdot za.s[x11, 0], { z30.h-z31.h }, z10.h\n"
- ".inst 0xc16277c9 // sdot za.s[x11, 1], { z30.h-z31.h }, z2.h\n"
- ".inst 0xa1422ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16e778e // sdot za.s[x11, 6], { z28.h-z29.h }, z14.h\n"
- ".inst 0xc166778f // sdot za.s[x11, 7], { z28.h-z29.h }, z6.h\n"
- ".inst 0xc16d77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z13.h\n"
- ".inst 0xc16577ad // sdot za.s[x11, 5], { z29.h-z30.h }, z5.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a77ca // sdot za.s[x11, 2], { z30.h-z31.h }, z10.h\n"
- ".inst 0xc16277cb // sdot za.s[x11, 3], { z30.h-z31.h }, z2.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z14.h\n"
- ".inst 0xc16677af // sdot za.s[x11, 7], { z29.h-z30.h }, z6.h\n"
- ".inst 0xc16977cc // sdot za.s[x11, 4], { z30.h-z31.h }, z9.h\n"
- ".inst 0xc16877cd // sdot za.s[x11, 5], { z30.h-z31.h }, z8.h\n"
- ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16e77ce // sdot za.s[x11, 6], { z30.h-z31.h }, z14.h\n"
- ".inst 0xc16677cf // sdot za.s[x11, 7], { z30.h-z31.h }, z6.h\n"
+ "ld1b { z4.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z19.h, z19.h, z25.h\n"
+ "ld1b { z27.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ ".inst 0xa1402ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
+ "add z18.h, z18.h, z15.h\n"
+ "trn1 z20.h, z6.h, z4.h\n"
+ "ld1b { z22.s }, p1/Z, [x24]\n"
+ ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "add z19.h, z19.h, z15.h\n"
+ ".inst 0xa1422ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "trn1 z21.h, z27.h, z22.h\n"
+ "add z20.h, z20.h, z15.h\n"
+ ".inst 0xc1697648 // sdot za.s[x11, 0], { z18.h-z19.h }, z9.h\n"
+ ".inst 0xc1617649 // sdot za.s[x11, 1], { z18.h-z19.h }, z1.h\n"
+ ".inst 0xa1402ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22]\n"
+ "add z21.h, z21.h, z15.h\n"
+ ".inst 0xc16c764a // sdot za.s[x11, 2], { z18.h-z19.h }, z12.h\n"
+ ".inst 0xc164764b // sdot za.s[x11, 3], { z18.h-z19.h }, z4.h\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16d7668 // sdot za.s[x11, 0], { z19.h-z20.h }, z13.h\n"
+ ".inst 0xc1657669 // sdot za.s[x11, 1], { z19.h-z20.h }, z5.h\n"
+ ".inst 0xa1412ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163764c // sdot za.s[x11, 4], { z18.h-z19.h }, z3.h\n"
+ ".inst 0xc162764d // sdot za.s[x11, 5], { z18.h-z19.h }, z2.h\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16c766a // sdot za.s[x11, 2], { z19.h-z20.h }, z12.h\n"
+ ".inst 0xc164766b // sdot za.s[x11, 3], { z19.h-z20.h }, z4.h\n"
+ ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16e7688 // sdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
+ ".inst 0xc1667689 // sdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
+ ".inst 0xa1422ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc169764e // sdot za.s[x11, 6], { z18.h-z19.h }, z9.h\n"
+ ".inst 0xc161764f // sdot za.s[x11, 7], { z18.h-z19.h }, z1.h\n"
+ ".inst 0xc163766c // sdot za.s[x11, 4], { z19.h-z20.h }, z3.h\n"
+ ".inst 0xc162766d // sdot za.s[x11, 5], { z19.h-z20.h }, z2.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16c768a // sdot za.s[x11, 2], { z20.h-z21.h }, z12.h\n"
+ ".inst 0xc164768b // sdot za.s[x11, 3], { z20.h-z21.h }, z4.h\n"
+ ".inst 0xa1422aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc169766e // sdot za.s[x11, 6], { z19.h-z20.h }, z9.h\n"
+ ".inst 0xc161766f // sdot za.s[x11, 7], { z19.h-z20.h }, z1.h\n"
+ ".inst 0xc16c768c // sdot za.s[x11, 4], { z20.h-z21.h }, z12.h\n"
+ ".inst 0xc164768d // sdot za.s[x11, 5], { z20.h-z21.h }, z4.h\n"
+ ".inst 0xa0422a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d768e // sdot za.s[x11, 6], { z20.h-z21.h }, z13.h\n"
+ ".inst 0xc16c768f // sdot za.s[x11, 7], { z20.h-z21.h }, z12.h\n"
"12:" // Unpadded: 0 priming loads
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 22f\n"
"add x20, x16, %x[ld_in_row]\n"
- "ld1b { z26.s }, p1/Z, [x16]\n"
+ "ld1b { z6.s }, p1/Z, [x16]\n"
"sub x25, x25, #0x1\n"
- "ld1b { z28.s }, p1/Z, [x20]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z26.h, z28.h\n"
"sub x15, x15, #0x1\n"
- "ld1b { z31.s }, p1/Z, [x20]\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"cmp x25, x15\n"
- "add z25.h, z25.h, z17.h\n"
- "ld1b { z15.s }, p1/Z, [x20]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z31.h, z15.h\n"
"csel x25, x25, x15, LT\n"
- "ld1b { z22.s }, p1/Z, [x20]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z26.h, z26.h, z17.h\n"
+ "trn1 z24.h, z6.h, z13.h\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z8.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z27.h, z22.h, z8.h\n"
- "add z27.h, z27.h, z17.h\n"
- "ld1b { z21.s }, p1/Z, [x20]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"sub x15, x15, x25\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "trn1 z28.h, z21.h, z20.h\n"
- "add z28.h, z28.h, z17.h\n"
+ "ld1b { z22.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z25.h, z21.h, z19.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "trn1 z26.h, z20.h, z13.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ "trn1 z27.h, z22.h, z16.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ "add z26.h, z26.h, z15.h\n"
+ "add z27.h, z27.h, z15.h\n"
"cbz x25, 21f\n"
"13:" // Unpadded: Main loop
"addvl x24, SP, #6\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
"addvl x23, SP, #12\n"
- "ld1b { z21.s }, p1/Z, [x16]\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402b0e // ld1h { z14.h-z15.h }, pn10.b/Z, [x24]\n"
+ "ld1b { z23.s }, p1/Z, [x16]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
"addvl x22, SP, #18\n"
"addvl x21, SP, #24\n"
- ".inst 0xc16f772a // sdot za.s[x11, 2], { z25.h-z26.h }, z15.h\n"
"add x20, x16, %x[ld_in_row]\n"
- "ld1b { z0.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16e772b // sdot za.s[x11, 3], { z25.h-z26.h }, z14.h\n"
- ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412b05 // ld1h { z5.h, z13.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
- ".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
- "ld1b { z31.s }, p1/Z, [x20]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc169770a // sdot za.s[x11, 2], { z24.h-z25.h }, z9.h\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
- "ld1b { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xc161770b // sdot za.s[x11, 3], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc165774b // sdot za.s[x11, 3], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412b04 // ld1h { z4.h, z12.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
"ld1b { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc16f772e // sdot za.s[x11, 6], { z25.h-z26.h }, z15.h\n"
- "ld1b { z30.s }, p1/Z, [x20]\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1402ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc16c772a // sdot za.s[x11, 2], { z25.h-z26.h }, z12.h\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc164772b // sdot za.s[x11, 3], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16e772f // sdot za.s[x11, 7], { z25.h-z26.h }, z14.h\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16d774c // sdot za.s[x11, 4], { z26.h-z27.h }, z13.h\n"
- "ld1b { z6.s }, p1/Z, [x20]\n"
- ".inst 0xc165774d // sdot za.s[x11, 5], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16a776a // sdot za.s[x11, 2], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776b // sdot za.s[x11, 3], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422b02 // ld1h { z2.h-z3.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc16d770e // sdot za.s[x11, 6], { z24.h-z25.h }, z13.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xc165770f // sdot za.s[x11, 7], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a776c // sdot za.s[x11, 4], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776d // sdot za.s[x11, 5], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc169776e // sdot za.s[x11, 6], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776f // sdot za.s[x11, 7], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z15.h\n"
- ".inst 0xc16e1729 // sdot za.s[x8, 1], { z25.h-z26.h }, z14.h\n"
- "trn1 z25.h, z21.h, z0.h\n"
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16d1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z13.h\n"
- "add z25.h, z25.h, z17.h\n"
- ".inst 0xc1651749 // sdot za.s[x8, 1], { z26.h-z27.h }, z5.h\n"
- "trn1 z26.h, z20.h, z31.h\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- ".inst 0xc16b1768 // sdot za.s[x8, 0], { z27.h-z28.h }, z11.h\n"
- "add z26.h, z26.h, z17.h\n"
- ".inst 0xc16a1769 // sdot za.s[x8, 1], { z27.h-z28.h }, z10.h\n"
- "trn1 z27.h, z29.h, z22.h\n"
- "trn1 z28.h, z30.h, z6.h\n"
+ ".inst 0xc161774c // sdot za.s[x11, 4], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc160774d // sdot za.s[x11, 5], { z26.h-z27.h }, z0.h\n"
+ ".inst 0xa0422ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc167774e // sdot za.s[x11, 6], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xc166774f // sdot za.s[x11, 7], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa1422aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16c1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc1641709 // sdot za.s[x8, 1], { z24.h-z25.h }, z4.h\n"
+ "trn1 z24.h, z23.h, z19.h\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16d1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1651729 // sdot za.s[x8, 1], { z25.h-z26.h }, z5.h\n"
+ "trn1 z25.h, z21.h, z20.h\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16e1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z14.h\n"
+ ".inst 0xc1661749 // sdot za.s[x8, 1], { z26.h-z27.h }, z6.h\n"
+ "trn1 z26.h, z22.h, z18.h\n"
+ "trn1 z27.h, z17.h, z16.h\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "add z27.h, z27.h, z17.h\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ "add z26.h, z26.h, z15.h\n"
+ "add z27.h, z27.h, z15.h\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "add z28.h, z28.h, z17.h\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 13b\n"
"b 21f\n"
@@ -667,513 +672,513 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z9.s }, p0/Z, [x16]\n"
- "add z9.h, p0/M, z9.h, z17.h\n"
"add x21, x16, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0412a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
+ "ld1b { z26.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z9.h, z22.h\n"
- "trn1 z0.h, z21.h, z20.h\n"
+ "add z26.h, p0/M, z26.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z26.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
+ ".inst 0xc16e76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z14.h\n"
+ "ld1b { z25.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc16676c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z6.h\n"
+ "add z25.h, p0/M, z25.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
- "addvl x20, SP, #24\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1b { z1.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z25.h\n"
+ "add z1.h, p0/M, z1.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "trn1 z1.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16a77e8 // sdot za.s[x11, 0], { z31.h-z0.h }, z10.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc16277e9 // sdot za.s[x11, 1], { z31.h-z0.h }, z2.h\n"
- ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "trn1 z2.h, z21.h, z20.h\n"
- ".inst 0xc16d7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z13.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1657409 // sdot za.s[x11, 1], { z0.h-z1.h }, z5.h\n"
- ".inst 0xc1697428 // sdot za.s[x11, 0], { z1.h-z2.h }, z9.h\n"
- ".inst 0xc1687429 // sdot za.s[x11, 1], { z1.h-z2.h }, z8.h\n"
+ ".inst 0xc16d76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z13.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16c76e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ "trn1 z25.h, z1.h, z16.h\n"
+ ".inst 0xc1637708 // sdot za.s[x11, 0], { z24.h-z25.h }, z3.h\n"
+ ".inst 0xc1627709 // sdot za.s[x11, 1], { z24.h-z25.h }, z2.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z5.s }, p0/Z, [x16]\n"
- "add z5.h, p0/M, z5.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z0.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z28.h, z5.h, z22.h\n"
- "trn1 z29.h, z21.h, z20.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z1.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z14.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1667409 // sdot za.s[x11, 1], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e740a // sdot za.s[x11, 2], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc166740b // sdot za.s[x11, 3], { z0.h-z1.h }, z6.h\n"
+ "ld1b { z0.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z2.h, z18.h, z17.h\n"
+ "add z0.h, p0/M, z0.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "trn1 z30.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #24\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc1617788 // sdot za.s[x11, 0], { z28.h-z29.h }, z1.h\n"
- ".inst 0xc1607789 // sdot za.s[x11, 1], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- "trn1 z31.h, z21.h, z20.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc169778a // sdot za.s[x11, 2], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc161778b // sdot za.s[x11, 3], { z28.h-z29.h }, z1.h\n"
- ".inst 0xa1422aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z15.h\n"
- ".inst 0xc16e77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z14.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16977aa // sdot za.s[x11, 2], { z29.h-z30.h }, z9.h\n"
- ".inst 0xc16177ab // sdot za.s[x11, 3], { z29.h-z30.h }, z1.h\n"
- ".inst 0xc16b77c8 // sdot za.s[x11, 0], { z30.h-z31.h }, z11.h\n"
- ".inst 0xc16377c9 // sdot za.s[x11, 1], { z30.h-z31.h }, z3.h\n"
- ".inst 0xa0422a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f77ca // sdot za.s[x11, 2], { z30.h-z31.h }, z15.h\n"
- ".inst 0xc16e77cb // sdot za.s[x11, 3], { z30.h-z31.h }, z14.h\n"
+ ".inst 0xc16c7428 // sdot za.s[x11, 0], { z1.h-z2.h }, z12.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ ".inst 0xc1647429 // sdot za.s[x11, 1], { z1.h-z2.h }, z4.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
+ ".inst 0xc16e742a // sdot za.s[x11, 2], { z1.h-z2.h }, z14.h\n"
+ ".inst 0xc166742b // sdot za.s[x11, 3], { z1.h-z2.h }, z6.h\n"
+ "trn1 z3.h, z0.h, z17.h\n"
+ ".inst 0xc16d7448 // sdot za.s[x11, 0], { z2.h-z3.h }, z13.h\n"
+ ".inst 0xc1657449 // sdot za.s[x11, 1], { z2.h-z3.h }, z5.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16e744a // sdot za.s[x11, 2], { z2.h-z3.h }, z14.h\n"
+ ".inst 0xc166744b // sdot za.s[x11, 3], { z2.h-z3.h }, z6.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x23, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x16]\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x22, SP, #12\n"
+ ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z8.h, z29.h, z22.h\n"
- "trn1 z9.h, z21.h, z20.h\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16376c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z3.h\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xc16276c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z2.h\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cb // sdot za.s[x11, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ ".inst 0xc16976cc // sdot za.s[x11, 4], { z22.h-z23.h }, z9.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- "trn1 z10.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16f7508 // sdot za.s[x11, 0], { z8.h-z9.h }, z15.h\n"
- ".inst 0xc16e7509 // sdot za.s[x11, 1], { z8.h-z9.h }, z14.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
- "trn1 z11.h, z21.h, z20.h\n"
- ".inst 0xa1412ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16e750a // sdot za.s[x11, 2], { z8.h-z9.h }, z14.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc166750b // sdot za.s[x11, 3], { z8.h-z9.h }, z6.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16d7528 // sdot za.s[x11, 0], { z9.h-z10.h }, z13.h\n"
- ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc1657529 // sdot za.s[x11, 1], { z9.h-z10.h }, z5.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16f750c // sdot za.s[x11, 4], { z8.h-z9.h }, z15.h\n"
- ".inst 0xc16e750d // sdot za.s[x11, 5], { z8.h-z9.h }, z14.h\n"
- ".inst 0xc16d752a // sdot za.s[x11, 2], { z9.h-z10.h }, z13.h\n"
- ".inst 0xc165752b // sdot za.s[x11, 3], { z9.h-z10.h }, z5.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1617548 // sdot za.s[x11, 0], { z10.h-z11.h }, z1.h\n"
- ".inst 0xc1607549 // sdot za.s[x11, 1], { z10.h-z11.h }, z0.h\n"
- ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e752c // sdot za.s[x11, 4], { z9.h-z10.h }, z14.h\n"
- ".inst 0xc166752d // sdot za.s[x11, 5], { z9.h-z10.h }, z6.h\n"
- ".inst 0xc161754a // sdot za.s[x11, 2], { z10.h-z11.h }, z1.h\n"
- ".inst 0xc160754b // sdot za.s[x11, 3], { z10.h-z11.h }, z0.h\n"
- ".inst 0xa0422a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f754c // sdot za.s[x11, 4], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc16e754d // sdot za.s[x11, 5], { z10.h-z11.h }, z14.h\n"
+ ".inst 0xc16176cd // sdot za.s[x11, 5], { z22.h-z23.h }, z1.h\n"
+ ".inst 0xc16c76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z12.h\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ ".inst 0xc16476e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z4.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0xc16e76ea // sdot za.s[x11, 2], { z23.h-z24.h }, z14.h\n"
+ ".inst 0xc16676eb // sdot za.s[x11, 3], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ ".inst 0xc16976ec // sdot za.s[x11, 4], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176ed // sdot za.s[x11, 5], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xc16d7708 // sdot za.s[x11, 0], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc1657709 // sdot za.s[x11, 1], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa0422aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc165770a // sdot za.s[x11, 2], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc164770b // sdot za.s[x11, 3], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x24, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z1.s }, p0/Z, [x16]\n"
- "add z1.h, p0/M, z1.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x23, SP, #6\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ "addvl x22, SP, #12\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "addvl x20, SP, #24\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z21.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z1.h, z22.h\n"
- "trn1 z27.h, z21.h, z20.h\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z22.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e76a8 // sdot za.s[x11, 0], { z21.h-z22.h }, z14.h\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ ".inst 0xc16676a9 // sdot za.s[x11, 1], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "addvl x23, SP, #6\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16776aa // sdot za.s[x11, 2], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16676ab // sdot za.s[x11, 3], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z23.h, z18.h, z16.h\n"
+ ".inst 0xc16776ac // sdot za.s[x11, 4], { z21.h-z22.h }, z7.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402aee // ld1h { z14.h-z15.h }, pn10.b/Z, [x23]\n"
- "trn1 z28.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16f7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z15.h\n"
- ".inst 0xc16e7749 // sdot za.s[x11, 1], { z26.h-z27.h }, z14.h\n"
- ".inst 0xa0402ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- "trn1 z29.h, z21.h, z20.h\n"
- ".inst 0xa0412aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc161774a // sdot za.s[x11, 2], { z26.h-z27.h }, z1.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc160774b // sdot za.s[x11, 3], { z26.h-z27.h }, z0.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16b7768 // sdot za.s[x11, 0], { z27.h-z28.h }, z11.h\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16a7769 // sdot za.s[x11, 1], { z27.h-z28.h }, z10.h\n"
- ".inst 0xa0412aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16e774c // sdot za.s[x11, 4], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc166774d // sdot za.s[x11, 5], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16b776a // sdot za.s[x11, 2], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a776b // sdot za.s[x11, 3], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xc16676ad // sdot za.s[x11, 5], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16576c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z5.h\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ ".inst 0xc16476c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc16d76ae // sdot za.s[x11, 6], { z21.h-z22.h }, z13.h\n"
+ ".inst 0xc16c76af // sdot za.s[x11, 7], { z21.h-z22.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0xc16e76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cb // sdot za.s[x11, 3], { z22.h-z23.h }, z6.h\n"
".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc1697788 // sdot za.s[x11, 0], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc1687789 // sdot za.s[x11, 1], { z28.h-z29.h }, z8.h\n"
- ".inst 0xa1422ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
- ".inst 0xc16e776c // sdot za.s[x11, 4], { z27.h-z28.h }, z14.h\n"
- ".inst 0xc166776d // sdot za.s[x11, 5], { z27.h-z28.h }, z6.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a778a // sdot za.s[x11, 2], { z28.h-z29.h }, z10.h\n"
- ".inst 0xc162778b // sdot za.s[x11, 3], { z28.h-z29.h }, z2.h\n"
- ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e776e // sdot za.s[x11, 6], { z27.h-z28.h }, z14.h\n"
- ".inst 0xc166776f // sdot za.s[x11, 7], { z27.h-z28.h }, z6.h\n"
- ".inst 0xc161778c // sdot za.s[x11, 4], { z28.h-z29.h }, z1.h\n"
- ".inst 0xc160778d // sdot za.s[x11, 5], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1422a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16a778e // sdot za.s[x11, 6], { z28.h-z29.h }, z10.h\n"
- ".inst 0xc162778f // sdot za.s[x11, 7], { z28.h-z29.h }, z2.h\n"
+ "trn1 z24.h, z17.h, z16.h\n"
+ ".inst 0xc16e76cc // sdot za.s[x11, 4], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cd // sdot za.s[x11, 5], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16976e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc16776ce // sdot za.s[x11, 6], { z22.h-z23.h }, z7.h\n"
+ ".inst 0xc16676cf // sdot za.s[x11, 7], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xc16176ea // sdot za.s[x11, 2], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xc16076eb // sdot za.s[x11, 3], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16976ec // sdot za.s[x11, 4], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176ed // sdot za.s[x11, 5], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xa1422a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16c76ee // sdot za.s[x11, 6], { z23.h-z24.h }, z12.h\n"
+ ".inst 0xc16476ef // sdot za.s[x11, 7], { z23.h-z24.h }, z4.h\n"
"19:" // Padded: 0 priming loads
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 22f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z6.s }, p0/Z, [x16]\n"
- "add z6.h, p0/M, z6.h, z17.h\n"
"add x20, x16, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x25, x25, #0x1\n"
+ "sub x15, x15, #0x1\n"
+ "cmp x25, x15\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "csel x25, x25, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "sub x15, x15, x25\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z30.s }, p0/Z, [x20]\n"
- "add z30.h, p0/M, z30.h, z17.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z17.h\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z17.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z6.h, z30.h\n"
- "trn1 z26.h, z27.h, z26.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z17.h\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z9.s }, p0/Z, [x20]\n"
- "add z9.h, p0/M, z9.h, z17.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z26.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- "sub x25, x25, #0x1\n"
- "sub x15, x15, #0x1\n"
- "cmp x25, x15\n"
- "trn1 z27.h, z8.h, z9.h\n"
- "trn1 z28.h, z21.h, z29.h\n"
- "csel x25, x25, x15, LT\n"
- "add x16, x16, %x[ld_in_col]\n"
- "sub x15, x15, x25\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ "trn1 z27.h, z17.h, z16.h\n"
"cbz x25, 21f\n"
"20:" // Padded: Main loop
"mov x12, #0x0\n"
+ "addvl x24, SP, #6\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z8.s }, p0/Z, [x16]\n"
- "add z8.h, p0/M, z8.h, z17.h\n"
- "add x24, x16, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x24]\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
- "addvl x23, SP, #6\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
- "addvl x22, SP, #12\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
- ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402b05 // ld1h { z5.h, z13.h }, pn10.b/Z, [x24]\n"
+ "addvl x23, SP, #12\n"
+ "add x22, x16, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- "ld1b { z29.s }, p0/Z, [x24]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- "mov x12, #0x4\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "subs x25, x25, #0x1\n"
+ "ld1b { z16.s }, p0/Z, [x16]\n"
+ ".inst 0xc16d770a // sdot za.s[x11, 2], { z24.h-z25.h }, z13.h\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc165770b // sdot za.s[x11, 3], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x22]\n"
+ ".inst 0xc169772a // sdot za.s[x11, 2], { z25.h-z26.h }, z9.h\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc161772b // sdot za.s[x11, 3], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ "add z19.h, p0/M, z19.h, z15.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422b02 // ld1h { z2.h-z3.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc165770e // sdot za.s[x11, 6], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc164770f // sdot za.s[x11, 7], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
- "ld1b { z30.s }, p0/Z, [x24]\n"
- "add z30.h, p0/M, z30.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "add z23.h, p0/M, z23.h, z15.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0xc16d772e // sdot za.s[x11, 6], { z25.h-z26.h }, z13.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc165772f // sdot za.s[x11, 7], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc167774c // sdot za.s[x11, 4], { z26.h-z27.h }, z7.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc166774d // sdot za.s[x11, 5], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa0422aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "subs x25, x25, #0x1\n"
- ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
- "ld1b { z15.s }, p0/Z, [x24]\n"
- "add z15.h, p0/M, z15.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc165774b // sdot za.s[x11, 3], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa0412aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163774e // sdot za.s[x11, 6], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774f // sdot za.s[x11, 7], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16c1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z12.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
- "ld1b { z20.s }, p0/Z, [x24]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc1641709 // sdot za.s[x8, 1], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ "trn1 z24.h, z16.h, z19.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16d1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1651729 // sdot za.s[x8, 1], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "trn1 z25.h, z23.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
- "ld1b { z31.s }, p0/Z, [x24]\n"
- "add z31.h, p0/M, z31.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc16b774c // sdot za.s[x11, 4], { z26.h-z27.h }, z11.h\n"
- "ld1b { z22.s }, p0/Z, [x24]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- ".inst 0xc16a774d // sdot za.s[x11, 5], { z26.h-z27.h }, z10.h\n"
- ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc169776a // sdot za.s[x11, 2], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776b // sdot za.s[x11, 3], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16e774e // sdot za.s[x11, 6], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc166774f // sdot za.s[x11, 7], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc161776c // sdot za.s[x11, 4], { z27.h-z28.h }, z1.h\n"
- ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc169776e // sdot za.s[x11, 6], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776f // sdot za.s[x11, 7], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1631728 // sdot za.s[x8, 0], { z25.h-z26.h }, z3.h\n"
- ".inst 0xc1621729 // sdot za.s[x8, 1], { z25.h-z26.h }, z2.h\n"
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- "trn1 z25.h, z8.h, z21.h\n"
- ".inst 0xc16e1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc1661749 // sdot za.s[x8, 1], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "trn1 z26.h, z29.h, z30.h\n"
- ".inst 0xc16b1768 // sdot za.s[x8, 0], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a1769 // sdot za.s[x8, 1], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xc1631748 // sdot za.s[x8, 0], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc1621749 // sdot za.s[x8, 1], { z26.h-z27.h }, z2.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
"add x8, x8, #0x2\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "trn1 z27.h, z15.h, z20.h\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
- "trn1 z28.h, z31.h, z22.h\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ "trn1 z26.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ "trn1 z27.h, z17.h, z18.h\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 20b\n"
"21:" // Main loop tail
"addvl x23, SP, #6\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
"addvl x22, SP, #12\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
- ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
+ ".inst 0xc16e770a // sdot za.s[x11, 2], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770b // sdot za.s[x11, 3], { z24.h-z25.h }, z6.h\n"
".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16d772a // sdot za.s[x11, 2], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc165772b // sdot za.s[x11, 3], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc16c770e // sdot za.s[x11, 6], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc164770f // sdot za.s[x11, 7], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc169774a // sdot za.s[x11, 2], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc161774b // sdot za.s[x11, 3], { z26.h-z27.h }, z1.h\n"
- ".inst 0xa1412ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa1422ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc169774c // sdot za.s[x11, 4], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc161774d // sdot za.s[x11, 5], { z26.h-z27.h }, z1.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16b776a // sdot za.s[x11, 2], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a776b // sdot za.s[x11, 3], { z27.h-z28.h }, z10.h\n"
- ".inst 0xa0422ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa0412a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc163776c // sdot za.s[x11, 4], { z27.h-z28.h }, z3.h\n"
- ".inst 0xc162776d // sdot za.s[x11, 5], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16a776e // sdot za.s[x11, 6], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776f // sdot za.s[x11, 7], { z27.h-z28.h }, z2.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16c774c // sdot za.s[x11, 4], { z26.h-z27.h }, z12.h\n"
+ ".inst 0xc164774d // sdot za.s[x11, 5], { z26.h-z27.h }, z4.h\n"
+ ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc161774e // sdot za.s[x11, 6], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc160774f // sdot za.s[x11, 7], { z26.h-z27.h }, z0.h\n"
".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z15.h\n"
- ".inst 0xc16e1729 // sdot za.s[x8, 1], { z25.h-z26.h }, z14.h\n"
- ".inst 0xc1691748 // sdot za.s[x8, 0], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc1681749 // sdot za.s[x8, 1], { z26.h-z27.h }, z8.h\n"
- ".inst 0xc1611768 // sdot za.s[x8, 0], { z27.h-z28.h }, z1.h\n"
- ".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xc16d1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc1651709 // sdot za.s[x8, 1], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc16e1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc1661729 // sdot za.s[x8, 1], { z25.h-z26.h }, z6.h\n"
+ ".inst 0xc1611748 // sdot za.s[x8, 0], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc1601749 // sdot za.s[x8, 1], { z26.h-z27.h }, z0.h\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"22:" // Main loop skip tail
"cbz x15, 24f\n"
"23:" // Right padding loop
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066818 // mova { z24.d-z25.d }, za.d[x11, #0]\n"
"add x8, x8, #0x2\n"
"subs x15, x15, #0x1\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc006683a // mova { z26.d-z27.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ ".inst 0xc1abaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z11.s\n"
+ ".inst 0xc1aaab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
+ ".inst 0xc1becff8 // sclamp { z24.s-z27.s }, z31.s, z30.s\n"
+ "st1b { z24.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z26.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z25.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z27.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 23b\n"
"24:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
"incw x20, ALL, MUL #16\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x17\n"
- "whilelt p1.s, x17, x7\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1192,6 +1197,8 @@ void sme2_u8q_planar_5x5_s1_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
index 6c144afa77..6dbdcc6a84 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,194 +69,199 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0xb\n"
"ptrue p2.b\n"
- "mov x20, #0xb\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z7.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x3\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x20, x22, #0x8\n"
+ "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x4\n"
+ "sub x21, x21, x4\n"
+ "mov SP, x20\n"
+ "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
"addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z7.h, p2/M, z7.h\n"
+ "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "whilelt p1.s, XZR, x6\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z18.h, p2/M, z18.h\n"
+ "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "whilelt p8.s, XZR, x5\n"
+ "ld1rw { z19.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z21.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z12.s, #0x0\n"
+ "mov z20.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z12.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x7, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1b { z13.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z0.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z12.h, #0x0\n"
+ "addvl x22, SP, #15\n"
+ "addvl x22, x22, #-3\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z21.d, z20.d\n"
+ "mov z22.d, z20.d\n"
+ "mov z23.d, z20.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1b { z24.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "ld1rh { z28.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "sub z13.h, z13.h, z28.h\n"
- "incw x22\n"
- "mov z26.h, #0x0\n"
- "ld1b { z22.s }, p2/Z, [x20]\n"
+ "ld1b { z30.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z22.h, z22.h, z28.h\n"
- "trn1 z17.h, z13.h, z22.h\n"
- "ld1b { z20.s }, p2/Z, [x20]\n"
+ "ld1b { z8.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z20.h, z20.h, z28.h\n"
- "addvl x21, SP, #15\n"
- "ld1b { z1.s }, p2/Z, [x20]\n"
+ "ld1b { z17.s }, p2/Z, [x20]\n"
+ "sub z24.h, z24.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "sub z1.h, z1.h, z28.h\n"
- "trn1 z29.h, z20.h, z1.h\n"
+ "sub z30.h, z30.h, z0.h\n"
+ "ld1b { z26.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z8.h, z8.h, z0.h\n"
+ "sub z17.h, z17.h, z0.h\n"
+ "sub z26.h, z26.h, z0.h\n"
+ "trn1 z16.h, z24.h, z30.h\n"
"ld1b { z27.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "sub z27.h, z27.h, z28.h\n"
- "incw x22\n"
- "ld1b { z14.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "sub z14.h, z14.h, z28.h\n"
- "addvl x21, x21, #-3\n"
- "ld1b { z18.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z18.h, z18.h, z28.h\n"
- "trn1 z22.h, z27.h, z26.h\n"
- "ld1b { z23.s }, p2/Z, [x20]\n"
+ "ld1b { z11.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z23.h, z23.h, z28.h\n"
- "st1h { z17.h }, p2, [x21]\n"
- "ld1b { z30.s }, p2/Z, [x20]\n"
+ "trn1 z15.h, z8.h, z17.h\n"
+ "ld1b { z31.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z30.h, z30.h, z28.h\n"
- "trn1 z8.h, z14.h, z18.h\n"
- "ld1b { z15.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
- "sub z15.h, z15.h, z28.h\n"
- "ld1b { z20.s }, p2/Z, [x20]\n"
+ "ld1b { z9.s }, p2/Z, [x20]\n"
+ "sub z27.h, z27.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z23.h, z23.h, z30.h\n"
- "sub z20.h, z20.h, z28.h\n"
- "ld1b { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "sub z24.h, z24.h, z28.h\n"
- "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+ "trn1 z24.h, z26.h, z12.h\n"
+ "sub z11.h, z11.h, z0.h\n"
+ "ld1b { z10.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z16.h }, p2, [x22]\n"
+ "sub z31.h, z31.h, z0.h\n"
+ "incw x23\n"
+ "sub z9.h, z9.h, z0.h\n"
+ "st1h { z15.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z10.h, z10.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z11.h, z27.h, z11.h\n"
"ld1b { z16.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z0.h, z15.h, z26.h\n"
- "incw x22\n"
- "ld1b { z13.s }, p2/Z, [x20]\n"
+ "ld1b { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z16.h, z16.h, z28.h\n"
- "sub z13.h, z13.h, z28.h\n"
- "ld1b { z11.s }, p2/Z, [x20]\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z8.h }, p2, [x21]\n"
- "trn1 z27.h, z20.h, z24.h\n"
- "ld1b { z22.s }, p2/Z, [x20]\n"
+ "trn1 z13.h, z31.h, z9.h\n"
+ "ld1b { z28.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z11.h, z11.h, z28.h\n"
- "ld1b { z3.s }, p2/Z, [x20]\n"
+ "ld1b { z26.s }, p2/Z, [x20]\n"
+ "sub z16.h, z16.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z23.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z20.h, z16.h, z13.h\n"
- "ld1b { z13.s }, p2/Z, [x20]\n"
+ "trn1 z8.h, z10.h, z12.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1b { z14.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z11.h }, p2, [x22]\n"
+ "sub z28.h, z28.h, z0.h\n"
+ "incw x23\n"
+ "sub z26.h, z26.h, z0.h\n"
+ "st1h { z13.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "st1h { z8.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z13.h, z16.h, z2.h\n"
+ "ld1b { z31.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z22.h, z22.h, z28.h\n"
- "sub z3.h, z3.h, z28.h\n"
- "ld1b { z15.s }, p2/Z, [x20]\n"
+ "ld1b { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z29.h, z11.h, z26.h\n"
+ "trn1 z30.h, z28.h, z26.h\n"
"ld1b { z16.s }, p2/Z, [x20]\n"
- "incw x22\n"
- "sub z13.h, z13.h, z28.h\n"
- "sub z15.h, z15.h, z28.h\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z27.h }, p2, [x21]\n"
- "sub z16.h, z16.h, z28.h\n"
- "trn1 z19.h, z22.h, z3.h\n"
- "ld1b { z17.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z20.h }, p2, [x21, #1, MUL VL]\n"
- "ld1b { z0.s }, p2/Z, [x20]\n"
+ "ld1b { z27.s }, p2/Z, [x20]\n"
+ "sub z31.h, z31.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z31.h, z13.h, z15.h\n"
- "st1h { z29.h }, p2, [x21, #2, MUL VL]\n"
- "ld1b { z18.s }, p2/Z, [x20]\n"
+ "trn1 z17.h, z14.h, z12.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1b { z4.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z13.h }, p2, [x22]\n"
+ "sub z16.h, z16.h, z0.h\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "st1h { z30.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z4.h, z4.h, z0.h\n"
+ "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z31.h, z31.h, z2.h\n"
+ "ld1b { z29.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z16.h, z16.h, z26.h\n"
- "sub z17.h, z17.h, z28.h\n"
- "ld1b { z22.s }, p2/Z, [x20]\n"
+ "ld1b { z10.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z0.h, z0.h, z28.h\n"
- "sub z18.h, z18.h, z28.h\n"
- "ld1b { z1.s }, p2/Z, [x20]\n"
- "sub z22.h, z22.h, z28.h\n"
- "sub z1.h, z1.h, z28.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "addvl x21, x21, #-3\n"
- "st1h { z19.h }, p2, [x21]\n"
- "mov z13.d, z12.d\n"
- "mov z14.d, z12.d\n"
- "st1h { z31.h }, p2, [x21, #1, MUL VL]\n"
- "mov z15.d, z12.d\n"
- "trn1 z8.h, z17.h, z0.h\n"
- "st1h { z16.h }, p2, [x21, #2, MUL VL]\n"
- "addvl x21, x21, #-3\n"
- "trn1 z31.h, z18.h, z22.h\n"
- "trn1 z29.h, z1.h, z26.h\n"
- "st1h { z8.h }, p2, [x21]\n"
- "st1h { z31.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z29.h }, p2, [x21, #2, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z6.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "trn1 z24.h, z16.h, z27.h\n"
+ "ld1b { z13.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "ld1b { z8.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z4.h, z4.h, z12.h\n"
+ "sub z29.h, z29.h, z0.h\n"
+ "ld1b { z11.s }, p2/Z, [x20]\n"
+ "sub z10.h, z10.h, z0.h\n"
+ "st1h { z31.h }, p2, [x22]\n"
+ "sub z13.h, z13.h, z0.h\n"
+ "sub z8.h, z8.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z11.h, z11.h, z0.h\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z14.h, z29.h, z10.h\n"
+ "trn1 z10.h, z13.h, z8.h\n"
+ "trn1 z4.h, z11.h, z12.h\n"
+ "st1h { z14.h }, p2, [x22]\n"
+ "st1h { z10.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x21, x7, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z4.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ld1w { z5.s }, p1/Z, [x20, x7, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x7, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x5, x23, LSL #22\n"
+ "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
"mov x22, #0xb\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x5, x4\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x17, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x5, x16\n"
+ "orr x20, x6, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x17, x4, x20, x17\n"
- ".inst 0xc0040d80 // mova za.d[x8, #0], { z12.d-z15.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040d81 // mova za.d[x8, #1], { z12.d-z15.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040e80 // mova za.d[x8, #0], { z20.d-z23.d }\n"
"mov x22, #0x4\n"
- "ldp x15, x14, [x23], #0x10\n"
- ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
- "ldp x13, x11, [x20], #0x10\n"
- ".inst 0xc0040d83 // mova za.d[x8, #3], { z12.d-z15.d }\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x16, x5, x21, x16\n"
+ ".inst 0xc0040e81 // mova za.d[x8, #1], { z20.d-z23.d }\n"
"ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "ldp x10, x9, [x23], #0x10\n"
- "ldp x28, x27, [x20], #0x10\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ "ldp x14, x13, [x23], #0x10\n"
+ ".inst 0xc0040e83 // mova za.d[x8, #3], { z20.d-z23.d }\n"
+ "ldp x11, x10, [x20], #0x10\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ "ldp x9, x28, [x23], #0x10\n"
+ "ldp x27, x26, [x20], #0x10\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
"csel x20, x21, x22, LT\n"
@@ -264,379 +269,379 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
"and x22, x21, #0x1\n"
- ".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
"add x21, x21, #0x1\n"
"lsr x21, x21, #0x1\n"
- ".inst 0xc1aaab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
- "sub x16, x16, x21\n"
- ".inst 0xc1b5ccbc // sclamp { z28.s-z31.s }, z5.s, z21.s\n"
+ "sub x15, x15, x21\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z28.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z29.s }, p1, [x14]\n"
+ "st1b { z28.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z30.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z31.s }, p1, [x9]\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x4, x3\n"
+ "adds XZR, x5, x4\n"
"bne 14f\n"
"cbz x22, 12f\n"
"cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "sub x17, x17, x22\n"
"beq 11f\n"
"cmp x22, #0x2\n"
"beq 10f\n"
"cmp x22, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z27.s }, p1/Z, [x17]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z8.s }, p1/Z, [x16]\n"
"addvl x20, SP, #12\n"
- "ld1b { z0.s }, p1/Z, [x21]\n"
+ "ld1b { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z0.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1b { z28.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z9.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z11.s }, p1/Z, [x21]\n"
+ "ld1b { z31.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z28.h, z28.h, z11.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1b { z29.s }, p1/Z, [x21]\n"
+ "ld1b { z10.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
+ "trn1 z8.h, z8.h, z26.h\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z8.h\n"
- "add z29.h, z29.h, z7.h\n"
+ "ld1b { z11.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z31.h\n"
"ld1b { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z17.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1b { z31.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z26.s }, p1/Z, [x21]\n"
+ "trn1 z10.h, z10.h, z16.h\n"
+ "add z8.h, z8.h, z18.h\n"
+ "ld1b { z28.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z26.h\n"
- "add z31.h, z31.h, z7.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "mov z0.d, z20.d\n"
- "add z0.h, z0.h, z7.h\n"
- ".inst 0xc1781788 // sdot za.s[x8, 0], { z28.h-z31.h }, z8.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17817a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z8.h\n"
+ "trn1 z11.h, z11.h, z30.h\n"
+ "add z9.h, z9.h, z18.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z2.s }, p1/Z, [x21]\n"
+ "add z10.h, z10.h, z18.h\n"
+ "trn1 z12.h, z12.h, z28.h\n"
+ "ld1h { z4.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "mov z13.d, z2.d\n"
+ "add z12.h, z12.h, z18.h\n"
+ ".inst 0xc1701508 // sdot za.s[x8, 0], { z8.h-z11.h }, z0.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xc1711528 // sdot za.s[x8, 0], { z9.h-z12.h }, z1.h\n"
+ ".inst 0xc1741548 // sdot za.s[x8, 0], { z10.h-z13.h }, z4.h\n"
"9:" // Unpadded: 3 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z29.s }, p1/Z, [x17]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p1/Z, [x16]\n"
"addvl x20, SP, #9\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z17.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "ld1b { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z0.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z0.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1b { z31.s }, p1/Z, [x21]\n"
+ "ld1b { z2.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1b { z0.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z11.h\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z0.h, z0.h, z16.h\n"
- "add z0.h, z0.h, z7.h\n"
- "ld1b { z1.s }, p1/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z2.h\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z16.h\n"
- "add z1.h, z1.h, z7.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17217a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z2.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "mov z2.d, z16.d\n"
- "add z2.h, z2.h, z7.h\n"
- ".inst 0xc17317c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z3.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17817e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z8.h\n"
+ "trn1 z14.h, z14.h, z24.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add z14.h, z14.h, z18.h\n"
+ "trn1 z16.h, z16.h, z24.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add z15.h, z15.h, z18.h\n"
+ "mov z17.d, z17.d\n"
+ "add z16.h, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "add z17.h, z17.h, z18.h\n"
+ ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17015c8 // sdot za.s[x8, 0], { z14.h-z17.h }, z0.h\n"
"10:" // Unpadded: 2 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z26.s }, p1/Z, [x17]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
"addvl x21, SP, #6\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z16.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #12\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1b { z28.s }, p1/Z, [x22]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z26.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z29.s }, p1/Z, [x22]\n"
+ "ld1b { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z28.h, z28.h, z29.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1b { z29.s }, p1/Z, [x22]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "ld1b { z24.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1b { z19.s }, p1/Z, [x22]\n"
+ "ld1b { z14.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z19.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x22]\n"
+ "trn1 z12.h, z12.h, z26.h\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1b { z23.s }, p1/Z, [x22]\n"
- "trn1 z30.h, z30.h, z23.h\n"
+ "ld1b { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z30.h, z30.h, z7.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721748 // sdot za.s[x8, 0], { z26.h-z29.h }, z2.h\n"
- "ld1b { z22.s }, p1/Z, [x22]\n"
- "mov z31.d, z22.d\n"
- ".inst 0xc1731768 // sdot za.s[x8, 0], { z27.h-z30.h }, z3.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1h { z3.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17b1769 // sdot za.s[x8, 1], { z27.h-z30.h }, z11.h\n"
- ".inst 0xc1731788 // sdot za.s[x8, 0], { z28.h-z31.h }, z3.h\n"
+ "trn1 z13.h, z13.h, z24.h\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1b { z24.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
+ "add z13.h, z13.h, z18.h\n"
+ "trn1 z15.h, z15.h, z24.h\n"
+ "ld1h { z1.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z14.h, z14.h, z18.h\n"
+ "mov z16.d, z16.d\n"
+ "add z15.h, z15.h, z18.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17115a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z1.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701789 // sdot za.s[x8, 1], { z28.h-z31.h }, z0.h\n"
+ ".inst 0xc1781589 // sdot za.s[x8, 1], { z12.h-z15.h }, z8.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"11:" // Unpadded: 1 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z29.s }, p1/Z, [x17]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "ld1b { z9.s }, p1/Z, [x16]\n"
"addvl x21, SP, #3\n"
- "ld1b { z22.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z22.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x22]\n"
+ "ld1b { z4.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #9\n"
- "ld1b { z25.s }, p1/Z, [x22]\n"
+ "ld1b { z10.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z25.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1b { z31.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1b { z0.s }, p1/Z, [x22]\n"
+ "ld1b { z11.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z4.h\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z16.h\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z0.h, z0.h, z16.h\n"
- "add z0.h, z0.h, z7.h\n"
- "ld1b { z1.s }, p1/Z, [x22]\n"
+ "ld1b { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1b { z2.s }, p1/Z, [x22]\n"
- "trn1 z1.h, z1.h, z2.h\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "add z9.h, z9.h, z18.h\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z1.h, z1.h, z7.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17217a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z2.h\n"
- "ld1b { z24.s }, p1/Z, [x22]\n"
- "mov z2.d, z24.d\n"
- ".inst 0xc17317c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z3.h\n"
- ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17817a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z8.h\n"
- "add z2.h, z2.h, z7.h\n"
- "ld1h { z3.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17917c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z9.h\n"
- ".inst 0xc17317e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z3.h\n"
- "ld1h { z3.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17317e9 // sdot za.s[x8, 1], { z31.h-z2.h }, z3.h\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z10.h, z10.h, z18.h\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "trn1 z13.h, z13.h, z17.h\n"
+ "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z12.h, z12.h, z18.h\n"
+ "mov z14.d, z16.d\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xc1701528 // sdot za.s[x8, 0], { z9.h-z12.h }, z0.h\n"
+ "add z14.h, z14.h, z18.h\n"
+ ".inst 0xc1711548 // sdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1701529 // sdot za.s[x8, 1], { z9.h-z12.h }, z0.h\n"
+ ".inst 0xc1741568 // sdot za.s[x8, 0], { z11.h-z14.h }, z4.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1711549 // sdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
"12:" // Unpadded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z23.s }, p1/Z, [x17]\n"
- "sub x7, x7, #0x2\n"
- "ld1b { z25.s }, p1/Z, [x21]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "sub x17, x17, #0x2\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z25.h\n"
- "sub x16, x16, #0x1\n"
- "ld1b { z24.s }, p1/Z, [x21]\n"
+ "sub x15, x15, #0x1\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x7, #0x1\n"
- "add z23.h, z23.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "lsr x20, x17, #0x1\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z30.h\n"
- "cmp x20, x16\n"
- "ld1b { z25.s }, p1/Z, [x21]\n"
+ "cmp x20, x15\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "csel x26, x20, x16, LT\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "csel x25, x20, x15, LT\n"
+ "ld1b { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z22.h\n"
- "add z25.h, z25.h, z7.h\n"
- "ld1b { z26.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "and x17, x17, #0x1\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z22.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x21]\n"
+ "sub x15, x15, x25\n"
+ "ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "and x7, x7, #0x1\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z4.h\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z30.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1b { z28.s }, p1/Z, [x21]\n"
- "mov z28.d, z28.d\n"
- "add z28.h, z28.h, z7.h\n"
- "sub x16, x16, x26\n"
- "cbz x26, 21f\n"
+ "ld1b { z30.s }, p1/Z, [x21]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ "mov z16.d, z30.d\n"
+ "add z14.h, z14.h, z18.h\n"
+ "add z15.h, z15.h, z18.h\n"
+ "add z16.h, z16.h, z18.h\n"
+ "cbz x25, 21f\n"
"13:" // Unpadded: Main loop
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
- "addvl x25, SP, #6\n"
- "addvl x24, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa0402b20 // ld1h { z0.h-z1.h }, pn10.b/Z, [x25]\n"
- "add x23, x17, %x[ld_in_row]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "addvl x24, SP, #6\n"
+ "addvl x20, SP, #12\n"
+ "add x23, x16, %x[ld_in_row]\n"
"addvl x22, SP, #3\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
"addvl x21, SP, #9\n"
- "subs x26, x26, #0x1\n"
- ".inst 0xc1711709 // sdot za.s[x8, 1], { z24.h-z27.h }, z1.h\n"
- ".inst 0xa0402b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24]\n"
- ".inst 0xc17816ea // sdot za.s[x8, 2], { z23.h-z26.h }, z8.h\n"
- "ld1b { z23.s }, p1/Z, [x17]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "add x20, x17, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0xc179170a // sdot za.s[x8, 2], { z24.h-z27.h }, z9.h\n"
- "ld1b { z16.s }, p1/Z, [x23]\n"
+ "subs x25, x25, #0x1\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402b00 // ld1h { z0.h-z1.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ "ld1b { z28.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z16.h\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "ld1h { z9.h }, p2/Z, [x24, #2, MUL VL]\n"
- "add z23.h, z23.h, z7.h\n"
- "ld1b { z24.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a3ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z3.s\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ "ld1b { z29.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc179172a // sdot za.s[x8, 2], { z25.h-z28.h }, z9.h\n"
- "ld1b { z18.s }, p1/Z, [x23]\n"
+ "ld1b { z9.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1b { z25.s }, p1/Z, [x23]\n"
+ "trn1 z28.h, z28.h, z17.h\n"
+ ".inst 0xa0402aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+ "ld1b { z30.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "ld1b { z8.s }, p1/Z, [x23]\n"
+ "trn1 z29.h, z29.h, z9.h\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z8.h\n"
- "add z25.h, z25.h, z7.h\n"
- "ld1b { z26.s }, p1/Z, [x23]\n"
+ "add z28.h, z28.h, z18.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1b { z31.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "ld1b { z28.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a7ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+ "ld1b { z13.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z28.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x23]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ "ld1b { z0.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
- "ld1b { z28.s }, p1/Z, [x23]\n"
- "trn1 z27.h, z27.h, z28.h\n"
+ "trn1 z30.h, z30.h, z17.h\n"
+ "add z29.h, z29.h, z18.h\n"
+ "ld1b { z14.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "add z27.h, z27.h, z7.h\n"
- ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc17216e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z2.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- "ld1b { z20.s }, p1/Z, [x23]\n"
- "mov z28.d, z20.d\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc1711709 // sdot za.s[x8, 1], { z24.h-z27.h }, z1.h\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- ".inst 0xc1701728 // sdot za.s[x8, 0], { z25.h-z28.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1b { z23.s }, p1/Z, [x17]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "st1b { z17.s }, p1, [x14]\n"
+ "trn1 z31.h, z31.h, z13.h\n"
+ "ld1b { z8.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a6ce78 // sclamp { z24.s-z27.s }, z19.s, z6.s\n"
+ "ld1h { z12.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "add z30.h, z30.h, z18.h\n"
+ "trn1 z0.h, z0.h, z14.h\n"
+ "mov z1.d, z8.d\n"
+ "add z31.h, z31.h, z18.h\n"
+ "st1b { z24.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "st1b { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "add z0.h, z0.h, z18.h\n"
+ "st1b { z26.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z1.h, z1.h, z18.h\n"
+ "st1b { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ ".inst 0xc17a1788 // sdot za.s[x8, 0], { z28.h-z31.h }, z10.h\n"
+ ".inst 0xc17b17a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z11.h\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1781789 // sdot za.s[x8, 1], { z28.h-z31.h }, z8.h\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17c17c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z12.h\n"
+ "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z16.h\n"
- "st1b { z18.s }, p1, [x10]\n"
- "ld1b { z24.s }, p1/Z, [x20]\n"
+ ".inst 0xc17917a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z9.h\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add x10, x10, x28\n"
- "st1b { z19.s }, p1, [x9]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z16.h\n"
- "add x9, x9, x27\n"
- "ld1b { z25.s }, p1/Z, [x20]\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc17417c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z4.h\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "add z23.h, z23.h, z7.h\n"
+ "trn1 z12.h, z12.h, z9.h\n"
"ld1b { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z16.h\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1b { z26.s }, p1/Z, [x20]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z25.h, z25.h, z7.h\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z1.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z16.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x20]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "add z27.h, z27.h, z7.h\n"
"ld1b { z16.s }, p1/Z, [x20]\n"
- "mov z28.d, z16.d\n"
- "add z28.h, z28.h, z7.h\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "trn1 z14.h, z14.h, z1.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ "mov z16.d, z16.d\n"
+ "add z14.h, z14.h, z18.h\n"
+ "add z15.h, z15.h, z18.h\n"
+ "add z16.h, z16.h, z18.h\n"
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
"cbz x22, 19f\n"
"cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "sub x17, x17, x22\n"
"beq 18f\n"
"cmp x22, #0x2\n"
"beq 17f\n"
@@ -644,686 +649,686 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x17]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "addvl x20, SP, #12\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z28.s }, p0/Z, [x21]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z17.h\n"
- "trn1 z28.h, z28.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x21]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z30.s }, p0/Z, [x21]\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z31.s }, p0/Z, [x21]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x20, SP, #12\n"
+ ".inst 0xc1711568 // sdot za.s[x8, 0], { z11.h-z14.h }, z1.h\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z18.h\n"
- "trn1 z30.h, z30.h, z17.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
- "mov z0.d, z20.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1711788 // sdot za.s[x8, 0], { z28.h-z31.h }, z1.h\n"
- "ld1h { z1.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17117a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z1.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x17]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "addvl x20, SP, #9\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x21]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z17.h\n"
- "trn1 z25.h, z25.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z26.s }, p0/Z, [x21]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z27.s }, p0/Z, [x21]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z28.s }, p0/Z, [x21]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xc1711568 // sdot za.s[x8, 0], { z11.h-z14.h }, z1.h\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z18.h\n"
- "trn1 z27.h, z27.h, z17.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- "trn1 z28.h, z28.h, z16.h\n"
- ".inst 0xc1721708 // sdot za.s[x8, 0], { z24.h-z27.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x21]\n"
- "add z11.h, p0/M, z11.h, z7.h\n"
- "mov z29.d, z11.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1731728 // sdot za.s[x8, 0], { z25.h-z28.h }, z3.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701748 // sdot za.s[x8, 0], { z26.h-z29.h }, z0.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z25.s }, p0/Z, [x17]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #6\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #12\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z17.h\n"
- "trn1 z26.h, z26.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x21, SP, #6\n"
- "trn1 z27.h, z27.h, z18.h\n"
- "trn1 z28.h, z28.h, z17.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z29.h, z29.h, z16.h\n"
- ".inst 0xc1711728 // sdot za.s[x8, 0], { z25.h-z28.h }, z1.h\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #12\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- ".inst 0xc1791748 // sdot za.s[x8, 0], { z26.h-z29.h }, z9.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1721729 // sdot za.s[x8, 1], { z25.h-z28.h }, z2.h\n"
- "mov z30.d, z1.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z9.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- ".inst 0xc1791768 // sdot za.s[x8, 0], { z27.h-z30.h }, z9.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z25.s }, p0/Z, [x17]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #9\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z17.h\n"
- "trn1 z26.h, z26.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x21, SP, #3\n"
- "trn1 z27.h, z27.h, z18.h\n"
- "trn1 z28.h, z28.h, z17.h\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z29.h, z29.h, z16.h\n"
- ".inst 0xc1731728 // sdot za.s[x8, 0], { z25.h-z28.h }, z3.h\n"
- "ld1b { z0.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #9\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
- ".inst 0xc17b1748 // sdot za.s[x8, 0], { z26.h-z29.h }, z11.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1721729 // sdot za.s[x8, 1], { z25.h-z28.h }, z2.h\n"
- "mov z30.d, z0.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"19:" // Padded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "sub x17, x17, #0x2\n"
+ "sub x15, x15, #0x1\n"
+ "lsr x20, x17, #0x1\n"
+ "cmp x20, x15\n"
+ "and x17, x17, #0x1\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "csel x25, x20, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "sub x15, x15, x25\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
- "add z19.h, p0/M, z19.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
- "trn1 z25.h, z25.h, z19.h\n"
- "trn1 z26.h, z26.h, z18.h\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
- "trn1 z27.h, z27.h, z17.h\n"
- "mov z28.d, z16.d\n"
- "csel x25, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col]\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x25\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ "mov z16.d, z16.d\n"
"cbz x25, 21f\n"
"20:" // Padded: Main loop
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa1402b00 // ld1h { z0.h, z8.h }, pn10.b/Z, [x24]\n"
+ "addvl x20, SP, #12\n"
"mov x12, #0x0\n"
+ "add x23, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "add x20, x17, %x[ld_in_row]\n"
"addvl x22, SP, #3\n"
- ".inst 0xc1781709 // sdot za.s[x8, 1], { z24.h-z27.h }, z8.h\n"
- ".inst 0xa1402ae3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #9\n"
"subs x25, x25, #0x1\n"
- ".inst 0xc17316ea // sdot za.s[x8, 2], { z23.h-z26.h }, z3.h\n"
- "ld1b { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402b00 // ld1h { z0.h-z1.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc178156a // sdot za.s[x8, 2], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z25.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z1.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ "add z25.h, p0/M, z25.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc179158a // sdot za.s[x8, 2], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa1402ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22]\n"
+ "ld1b { z10.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc17115aa // sdot za.s[x8, 2], { z13.h-z16.h }, z1.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "add z10.h, p0/M, z10.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b170a // sdot za.s[x8, 2], { z24.h-z27.h }, z11.h\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ "ld1b { z26.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z25.h, z25.h, z10.h\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
+ "add z26.h, p0/M, z26.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1h { z3.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "st1b { z28.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc173172a // sdot za.s[x8, 2], { z25.h-z28.h }, z3.h\n"
- "trn1 z23.h, z23.h, z16.h\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ "ld1b { z27.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z26.h, z26.h, z16.h\n"
+ "add z27.h, p0/M, z27.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z28.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z27.h, z27.h, z16.h\n"
+ "add z28.h, p0/M, z28.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z30.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z29.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z28.h, z28.h, z16.h\n"
+ "add z29.h, p0/M, z29.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "trn1 z24.h, z24.h, z1.h\n"
- "trn1 z25.h, z25.h, z3.h\n"
- "trn1 z26.h, z26.h, z30.h\n"
- ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "trn1 z27.h, z27.h, z29.h\n"
+ ".inst 0xc1741728 // sdot za.s[x8, 0], { z25.h-z28.h }, z4.h\n"
+ "ld1b { z15.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17216e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z2.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
"mov x12, #0x0\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "trn1 z29.h, z29.h, z15.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17216e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z2.h\n"
- "ld1b { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ ".inst 0xc17c1748 // sdot za.s[x8, 0], { z26.h-z29.h }, z12.h\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ "mov z30.d, z16.d\n"
+ ".inst 0xc1711729 // sdot za.s[x8, 1], { z25.h-z28.h }, z1.h\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z7.h\n"
+ ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0xc1791749 // sdot za.s[x8, 1], { z26.h-z29.h }, z9.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1731709 // sdot za.s[x8, 1], { z24.h-z27.h }, z3.h\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "mov z28.d, z20.d\n"
- "ld1h { z1.h }, p2/Z, [x22, #2, MUL VL]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- ".inst 0xc1711728 // sdot za.s[x8, 0], { z25.h-z28.h }, z1.h\n"
"mov x12, #0x4\n"
- "add z22.h, p0/M, z22.h, z7.h\n"
- "ld1h { z1.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1711729 // sdot za.s[x8, 1], { z25.h-z28.h }, z1.h\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z31.s }, p0/Z, [x20]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z17.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- "trn1 z23.h, z23.h, z8.h\n"
- "trn1 z24.h, z24.h, z22.h\n"
- "st1b { z18.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "trn1 z25.h, z25.h, z28.h\n"
- "trn1 z26.h, z26.h, z20.h\n"
- "st1b { z19.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "trn1 z27.h, z27.h, z31.h\n"
- "mov z28.d, z1.d\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ "mov z16.d, z16.d\n"
"bgt 20b\n"
"21:" // Main loop tail
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"addvl x24, SP, #6\n"
"addvl x23, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa0402b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24]\n"
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17816e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z8.h\n"
- "add x22, x17, %x[ld_in_row]\n"
"addvl x21, SP, #3\n"
- ".inst 0xc1791709 // sdot za.s[x8, 1], { z24.h-z27.h }, z9.h\n"
- ".inst 0xa1402ae3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x23]\n"
"addvl x20, SP, #9\n"
- ".inst 0xc17316ea // sdot za.s[x8, 2], { z23.h-z26.h }, z3.h\n"
- "ld1b { z29.s }, p0/Z, [x17]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1711569 // sdot za.s[x8, 1], { z11.h-z14.h }, z1.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1791589 // sdot za.s[x8, 1], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xc1a3ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z3.s\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z8.s }, p0/Z, [x22]\n"
- "add z8.h, p0/M, z8.h, z7.h\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z10.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z10.h, p0/M, z10.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b170a // sdot za.s[x8, 2], { z24.h-z27.h }, z11.h\n"
- "ld1b { z30.s }, p0/Z, [x22]\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "trn1 z11.h, z11.h, z10.h\n"
+ ".inst 0xc1a6ce78 // sclamp { z24.s-z27.s }, z19.s, z6.s\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z20.s }, p0/Z, [x22]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "st1b { z24.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z26.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc172172a // sdot za.s[x8, 2], { z25.h-z28.h }, z2.h\n"
- "trn1 z29.h, z29.h, z8.h\n"
- "ld1b { z31.s }, p0/Z, [x22]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "st1b { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z25.s }, p0/Z, [x22]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z0.s }, p0/Z, [x22]\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x22]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z1.s }, p0/Z, [x22]\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z28.s }, p0/Z, [x22]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "trn1 z30.h, z30.h, z20.h\n"
- "trn1 z31.h, z31.h, z25.h\n"
- "trn1 z0.h, z0.h, z17.h\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z1.h, z1.h, z28.h\n"
- ".inst 0xc17317a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z3.h\n"
- "ld1b { z22.s }, p0/Z, [x22]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
- "add z22.h, p0/M, z22.h, z7.h\n"
- ".inst 0xc17b17c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z11.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1a4aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z4.s\n"
- ".inst 0xc17317a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z3.h\n"
- "mov z2.d, z22.d\n"
- "ld1h { z9.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17b17c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z11.h\n"
- ".inst 0xc1aaab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
- ".inst 0xc17917e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z9.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1b5ccb8 // sclamp { z24.s-z27.s }, z5.s, z21.s\n"
- "st1b { z24.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z25.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "st1b { z26.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- ".inst 0xc17817e9 // sdot za.s[x8, 1], { z31.h-z2.h }, z8.h\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z27.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"22:" // Main loop skip tail
- "cbz x7, 23f\n" // Skip remainder inputs
+ "cbz x17, 23f\n" // Skip remainder inputs
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x17]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #6\n"
+ "addvl x20, SP, #12\n"
+ "sub x15, x15, #0x1\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z17.h\n"
- "trn1 z25.h, z25.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z31.s }, p0/Z, [x20]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z26.h, z26.h, z17.h\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "ld1b { z0.s }, p0/Z, [x20]\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
- "trn1 z28.h, z28.h, z31.h\n"
- "addvl x21, SP, #6\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- "mov z29.d, z0.d\n"
- "addvl x20, SP, #12\n"
- "sub x16, x16, #0x1\n"
- ".inst 0xc17b1728 // sdot za.s[x8, 0], { z25.h-z28.h }, z11.h\n"
- ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721748 // sdot za.s[x8, 0], { z26.h-z29.h }, z2.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
"ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1781709 // sdot za.s[x8, 1], { z24.h-z27.h }, z8.h\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1791729 // sdot za.s[x8, 1], { z25.h-z28.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
- ".inst 0xc171170a // sdot za.s[x8, 2], { z24.h-z27.h }, z1.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- ".inst 0xc179172a // sdot za.s[x8, 2], { z25.h-z28.h }, z9.h\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- ".inst 0xc1721749 // sdot za.s[x8, 1], { z26.h-z29.h }, z2.h\n"
- "ld1h { z3.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1b { z17.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- ".inst 0xc173174a // sdot za.s[x8, 2], { z26.h-z29.h }, z3.h\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"add x8, x8, #0x1\n"
- "st1b { z18.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z19.s }, p1, [x9]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
+ "st1b { z28.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"23:" // Tail input: End
- "cbz x16, 25f\n"
+ "cbz x15, 25f\n"
"24:" // Right padding loop
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+ ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "subs x16, x16, #0x1\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- ".inst 0xc1aaab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
- ".inst 0xc1b5ccbc // sclamp { z28.s-z31.s }, z5.s, z21.s\n"
- "st1b { z28.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z29.s }, p1, [x14]\n"
+ "subs x15, x15, #0x1\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a3ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z3.s\n"
+ ".inst 0xc1a5aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z5.s\n"
+ ".inst 0xc1a7ab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc1a6ce68 // sclamp { z8.s-z11.s }, z19.s, z6.s\n"
+ "st1b { z8.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z30.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z31.s }, p1, [x9]\n"
+ "st1b { z9.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z10.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z11.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 24b\n"
"25:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x7\n"
+ "whilelt p1.s, x7, x6\n"
"incw x20, ALL, MUL #16\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1342,9 +1347,11 @@ void sme2_u8q_planar_5x5_s2_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
index 612beb342a..0ed98e15de 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,133 +70,138 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0x6\n"
"ptrue p2.b\n"
- "mov x20, #0x6\n"
"ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z21.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x22, #0x8\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z20.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x21, x6\n"
+ "mov SP, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-12\n"
+ "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z20.h, p2/M, z20.h\n"
+ "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x7\n"
- "addvl SP, SP, #-12\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z21.h, p2/M, z21.h\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z29.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z28.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z30.s, #0x0\n"
+ "mov z28.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z30.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x20, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z10.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z23.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z25.h, #0x0\n"
+ "addvl x22, SP, #12\n"
+ "addvl x22, x22, #-4\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z29.d, z28.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1sb { z0.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1rh { z31.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z7.h, #0x0\n"
- "sub z10.h, z10.h, z31.h\n"
- "incw x22\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "ld1sb { z26.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z16.h, z16.h, z31.h\n"
- "trn1 z20.h, z7.h, z10.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
- "sub z11.h, z11.h, z31.h\n"
- "mov x20, x22\n"
- "trn1 z19.h, z10.h, z16.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z0.h, z0.h, z23.h\n"
+ "sub z26.h, z26.h, z23.h\n"
+ "sub z15.h, z15.h, z23.h\n"
+ "trn1 z14.h, z25.h, z0.h\n"
+ "trn1 z2.h, z0.h, z26.h\n"
+ "ld1sb { z21.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "trn1 z26.h, z16.h, z11.h\n"
- "trn1 z13.h, z11.h, z7.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "trn1 z16.h, z26.h, z15.h\n"
+ "ld1sb { z1.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z24.h, z24.h, z31.h\n"
- "sub z11.h, z11.h, z31.h\n"
- "ld1sb { z2.s }, p2/Z, [x20]\n"
- "sub z2.h, z2.h, z31.h\n"
- "addvl x21, SP, #12\n"
- "incw x22\n"
- "addvl x21, x21, #-4\n"
- "mov x20, x22\n"
- "st1h { z20.h }, p2, [x21]\n"
- "trn1 z22.h, z7.h, z24.h\n"
- "st1h { z19.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z1.h, z24.h, z11.h\n"
- "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "trn1 z15.h, z15.h, z25.h\n"
+ "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "sub z21.h, z21.h, z23.h\n"
+ "st1h { z14.h }, p2, [x22]\n"
+ "sub z1.h, z1.h, z23.h\n"
+ "st1h { z2.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z11.h, z11.h, z23.h\n"
+ "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z15.h }, p2, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #-4\n"
+ "trn1 z3.h, z25.h, z21.h\n"
+ "trn1 z14.h, z21.h, z1.h\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z26.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z3.h, z11.h, z2.h\n"
- "ld1sb { z0.s }, p2/Z, [x20]\n"
+ "trn1 z10.h, z1.h, z11.h\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z13.h }, p2, [x21, #3, MUL VL]\n"
- "trn1 z25.h, z2.h, z7.h\n"
- "ld1sb { z4.s }, p2/Z, [x20]\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "sub z16.h, z16.h, z31.h\n"
- "sub z0.h, z0.h, z31.h\n"
- "addvl x21, x21, #-4\n"
- "st1h { z22.h }, p2, [x21]\n"
- "sub z4.h, z4.h, z31.h\n"
- "st1h { z1.h }, p2, [x21, #1, MUL VL]\n"
- "mov z31.d, z30.d\n"
- "st1h { z3.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z24.h, z7.h, z16.h\n"
- "trn1 z18.h, z16.h, z0.h\n"
- "st1h { z25.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #-4\n"
- "trn1 z0.h, z0.h, z4.h\n"
- "trn1 z1.h, z4.h, z7.h\n"
- "st1h { z24.h }, p2, [x21]\n"
- "st1h { z18.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z1.h }, p2, [x21, #3, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z14.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "trn1 z26.h, z11.h, z25.h\n"
+ "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "sub z15.h, z15.h, z23.h\n"
+ "st1h { z3.h }, p2, [x22]\n"
+ "sub z9.h, z9.h, z23.h\n"
+ "st1h { z14.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z16.h, z16.h, z23.h\n"
+ "st1h { z10.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
+ "addvl x22, x22, #-4\n"
+ "trn1 z22.h, z25.h, z15.h\n"
+ "trn1 z6.h, z15.h, z9.h\n"
+ "trn1 z12.h, z9.h, z16.h\n"
+ "trn1 z11.h, z16.h, z25.h\n"
+ "st1h { z22.h }, p2, [x22]\n"
+ "st1h { z6.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z12.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z11.h }, p2, [x22, #3, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z5.s }, p1/Z, [x21, x16, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z12.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x20, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
"mov x22, #0x6\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x7, x6\n"
+ "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x15, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x7, x14\n"
+ "orr x20, x17, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040bc0 // mova za.d[x8, #0], { z30.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040bc1 // mova za.d[x8, #1], { z30.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
"mov x22, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x14, x7, x21, x14\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040b82 // mova za.d[x8, #2], { z28.d-z29.d }\n"
"ldp x11, x10, [x23], #0x10\n"
- ".inst 0xc0040bc2 // mova za.d[x8, #2], { z30.d-z31.d }\n"
+ ".inst 0xc0040b83 // mova za.d[x8, #3], { z28.d-z29.d }\n"
"ldp x9, x28, [x20], #0x10\n"
- ".inst 0xc0040bc3 // mova za.d[x8, #3], { z30.d-z31.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
"ldp x27, x26, [x23], #0x10\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
"ldp x25, x24, [x20], #0x10\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
@@ -204,22 +209,22 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
"sub x13, x13, x21\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -231,148 +236,148 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z20.s }, p1/Z, [x14]\n"
+ "ld1b { z27.s }, p1/Z, [x14]\n"
"addvl x20, SP, #8\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z20.h, z16.h\n"
- "add z4.h, z4.h, z21.h\n"
- "ld1b { z23.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "ld1b { z3.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z5.h, z23.h, z22.h\n"
- "add z5.h, z5.h, z21.h\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
+ "ld1b { z1.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "trn1 z6.h, z17.h, z16.h\n"
- "add z6.h, z6.h, z21.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16b1488 // sdot za.s[x8, 0], { z4.h-z5.h }, z11.h\n"
- ".inst 0xc1631489 // sdot za.s[x8, 1], { z4.h-z5.h }, z3.h\n"
- ".inst 0xa1412a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16814a8 // sdot za.s[x8, 0], { z5.h-z6.h }, z8.h\n"
- ".inst 0xc16014a9 // sdot za.s[x8, 1], { z5.h-z6.h }, z0.h\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z15.h, z27.h, z16.h\n"
+ "ld1b { z18.s }, p1/Z, [x21]\n"
+ "trn1 z16.h, z3.h, z1.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0412a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z15.h, z15.h, z20.h\n"
+ "trn1 z17.h, z12.h, z18.h\n"
+ "add z16.h, z16.h, z20.h\n"
+ "add z17.h, z17.h, z20.h\n"
+ ".inst 0xc16b15e8 // sdot za.s[x8, 0], { z15.h-z16.h }, z11.h\n"
+ ".inst 0xc16a15e9 // sdot za.s[x8, 1], { z15.h-z16.h }, z10.h\n"
+ ".inst 0xc1631608 // sdot za.s[x8, 0], { z16.h-z17.h }, z3.h\n"
+ ".inst 0xc1621609 // sdot za.s[x8, 1], { z16.h-z17.h }, z2.h\n"
"9:" // Unpadded: 1 priming loads
"add x22, x14, %x[ld_in_row]\n"
- "ld1b { z25.s }, p1/Z, [x14]\n"
+ "ld1b { z22.s }, p1/Z, [x14]\n"
"addvl x21, SP, #4\n"
- "ld1b { z6.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z3.h, z25.h, z6.h\n"
- "add z3.h, z3.h, z21.h\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #8\n"
- "ld1b { z26.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z4.h, z18.h, z26.h\n"
- "add z4.h, z4.h, z21.h\n"
- "ld1b { z2.s }, p1/Z, [x22]\n"
+ "ld1b { z19.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z5.s }, p1/Z, [x22]\n"
- "trn1 z5.h, z2.h, z5.h\n"
- "add z5.h, z5.h, z21.h\n"
+ "ld1b { z10.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z21.h, z22.h, z16.h\n"
+ "ld1b { z7.s }, p1/Z, [x22]\n"
+ "trn1 z22.h, z19.h, z10.h\n"
".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1611468 // sdot za.s[x8, 0], { z3.h-z4.h }, z1.h\n"
- ".inst 0xc1601469 // sdot za.s[x8, 1], { z3.h-z4.h }, z0.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- ".inst 0xa0412aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a146a // sdot za.s[x8, 2], { z3.h-z4.h }, z10.h\n"
- ".inst 0xc162146b // sdot za.s[x8, 3], { z3.h-z4.h }, z2.h\n"
- ".inst 0xc1691488 // sdot za.s[x8, 0], { z4.h-z5.h }, z9.h\n"
- ".inst 0xc1681489 // sdot za.s[x8, 1], { z4.h-z5.h }, z8.h\n"
- ".inst 0xa1412a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a148a // sdot za.s[x8, 2], { z4.h-z5.h }, z10.h\n"
- ".inst 0xc162148b // sdot za.s[x8, 3], { z4.h-z5.h }, z2.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add z21.h, z21.h, z20.h\n"
+ "trn1 z23.h, z11.h, z7.h\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
+ ".inst 0xc16116a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z1.h\n"
+ ".inst 0xc16016a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z0.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16e16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16616c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16916ca // sdot za.s[x8, 2], { z22.h-z23.h }, z9.h\n"
+ ".inst 0xc16116cb // sdot za.s[x8, 3], { z22.h-z23.h }, z1.h\n"
"10:" // Unpadded: 0 priming loads
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
"add x20, x14, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x14]\n"
+ "ld1b { z15.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x1\n"
- "ld1b { z9.s }, p1/Z, [x20]\n"
+ "ld1b { z0.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z6.h, z17.h, z9.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
+ "ld1b { z24.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"cmp x15, x13\n"
- "add z6.h, z6.h, z21.h\n"
- "ld1b { z7.s }, p1/Z, [x20]\n"
+ "ld1b { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z7.h, z17.h, z7.h\n"
"csel x23, x15, x13, LT\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
+ "ld1b { z2.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z7.h, z7.h, z21.h\n"
+ "trn1 z21.h, z15.h, z0.h\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z1.s }, p1/Z, [x20]\n"
- "trn1 z8.h, z17.h, z1.h\n"
- "add z8.h, z8.h, z21.h\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
"sub x13, x13, x23\n"
+ "trn1 z22.h, z24.h, z9.h\n"
+ "trn1 z23.h, z2.h, z15.h\n"
+ "add z21.h, z21.h, z20.h\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
"cbz x23, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
"addvl x22, SP, #4\n"
"addvl x21, SP, #8\n"
- "ld1b { z2.s }, p1/Z, [x14]\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa1402ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22]\n"
+ "ld1b { z26.s }, p1/Z, [x14]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
"add x20, x14, %x[ld_in_row]\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- "ld1b { z19.s }, p1/Z, [x20]\n"
+ "ld1b { z4.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa1412ac3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- "ld1b { z23.s }, p1/Z, [x20]\n"
+ "ld1b { z27.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc16d14ca // sdot za.s[x8, 2], { z6.h-z7.h }, z13.h\n"
- "ld1b { z18.s }, p1/Z, [x20]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ "ld1b { z3.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16514cb // sdot za.s[x8, 3], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc16914cc // sdot za.s[x8, 4], { z6.h-z7.h }, z9.h\n"
- "ld1b { z17.s }, p1/Z, [x20]\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412ac7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc16e16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z14.h\n"
+ "ld1b { z25.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16114cd // sdot za.s[x8, 5], { z6.h-z7.h }, z1.h\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc16b14ea // sdot za.s[x8, 2], { z7.h-z8.h }, z11.h\n"
- "trn1 z6.h, z2.h, z19.h\n"
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16314eb // sdot za.s[x8, 3], { z7.h-z8.h }, z3.h\n"
+ ".inst 0xc16616ab // sdot za.s[x8, 3], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc0060810 // mova { z16.d-z17.d }, za.d[x8, #0]\n"
+ "ld1b { z11.s }, p1/Z, [x20]\n"
+ ".inst 0xc0060832 // mova { z18.d-z19.d }, za.d[x8, #1]\n"
+ ".inst 0xc16916ac // sdot za.s[x8, 4], { z21.h-z22.h }, z9.h\n"
+ ".inst 0xc16116ad // sdot za.s[x8, 5], { z21.h-z22.h }, z1.h\n"
+ "trn1 z21.h, z26.h, z4.h\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16f16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z15.h\n"
+ ".inst 0xc1a5ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z5.s\n"
+ ".inst 0xc16716cb // sdot za.s[x8, 3], { z22.h-z23.h }, z7.h\n"
".inst 0xa1412aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- ".inst 0xc16914ec // sdot za.s[x8, 4], { z7.h-z8.h }, z9.h\n"
- "st1b { z24.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "add z6.h, z6.h, z21.h\n"
- ".inst 0xc16114ed // sdot za.s[x8, 5], { z7.h-z8.h }, z1.h\n"
- "trn1 z7.h, z23.h, z18.h\n"
- "trn1 z8.h, z17.h, z16.h\n"
+ "add z21.h, z21.h, z20.h\n"
+ ".inst 0xc1adaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
+ ".inst 0xc16916cc // sdot za.s[x8, 4], { z22.h-z23.h }, z9.h\n"
+ ".inst 0xc16116cd // sdot za.s[x8, 5], { z22.h-z23.h }, z1.h\n"
+ "trn1 z22.h, z27.h, z3.h\n"
+ "trn1 z23.h, z25.h, z11.h\n"
"add x8, x8, #0x2\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z26.s }, p1, [x10]\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+ "add z22.h, z22.h, z20.h\n"
+ "add z23.h, z23.h, z20.h\n"
+ ".inst 0xc1bfcfd0 // sclamp { z16.s-z19.s }, z30.s, z31.s\n"
+ "st1b { z16.s }, p1, [x11]\n"
+ "add x11, x11, x9\n"
+ "st1b { z18.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z17.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- "add z7.h, z7.h, z21.h\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z19.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "add z8.h, z8.h, z21.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
@@ -382,258 +387,258 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #8\n"
+ ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1b { z17.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z7.h, z19.h, z18.h\n"
- "trn1 z8.h, z17.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #8\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "trn1 z9.h, z17.h, z16.h\n"
- ".inst 0xc16a14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z10.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16214e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z2.h\n"
- ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16d1508 // sdot za.s[x8, 0], { z8.h-z9.h }, z13.h\n"
- ".inst 0xc1651509 // sdot za.s[x8, 1], { z8.h-z9.h }, z5.h\n"
+ ".inst 0xc16c16e8 // sdot za.s[x8, 0], { z23.h-z24.h }, z12.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16416e9 // sdot za.s[x8, 1], { z23.h-z24.h }, z4.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ ".inst 0xc16f1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z15.h\n"
+ ".inst 0xc1671709 // sdot za.s[x8, 1], { z24.h-z25.h }, z7.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x22, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x21, SP, #4\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #8\n"
+ ".inst 0xa1412aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "ld1b { z17.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z16.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z19.h, z18.h\n"
- "trn1 z23.h, z17.h, z16.h\n"
+ "ld1b { z10.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z10.h, p0/M, z10.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z17.h, z18.h, z10.h\n"
+ "add z14.h, p0/M, z14.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #4\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- "trn1 z24.h, z17.h, z16.h\n"
- ".inst 0xc16116c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z1.h\n"
- ".inst 0xc16016c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z0.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0412aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16d16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z13.h\n"
- ".inst 0xc16516cb // sdot za.s[x8, 3], { z22.h-z23.h }, z5.h\n"
- ".inst 0xc16116e8 // sdot za.s[x8, 0], { z23.h-z24.h }, z1.h\n"
- ".inst 0xc16016e9 // sdot za.s[x8, 1], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xc16f1608 // sdot za.s[x8, 0], { z16.h-z17.h }, z15.h\n"
+ "ld1b { z10.s }, p0/Z, [x22]\n"
+ ".inst 0xc1671609 // sdot za.s[x8, 1], { z16.h-z17.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ "add z10.h, p0/M, z10.h, z20.h\n"
+ ".inst 0xc16f160a // sdot za.s[x8, 2], { z16.h-z17.h }, z15.h\n"
+ ".inst 0xc167160b // sdot za.s[x8, 3], { z16.h-z17.h }, z7.h\n"
+ "trn1 z18.h, z14.h, z10.h\n"
+ ".inst 0xc16c1628 // sdot za.s[x8, 0], { z17.h-z18.h }, z12.h\n"
+ ".inst 0xc1641629 // sdot za.s[x8, 1], { z17.h-z18.h }, z4.h\n"
".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16116ea // sdot za.s[x8, 2], { z23.h-z24.h }, z1.h\n"
- ".inst 0xc16016eb // sdot za.s[x8, 3], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xc161162a // sdot za.s[x8, 2], { z17.h-z18.h }, z1.h\n"
+ ".inst 0xc160162b // sdot za.s[x8, 3], { z17.h-z18.h }, z0.h\n"
"15:" // Padded: 0 priming loads
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
"cbz x15, 18f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z19.s }, p0/Z, [x14]\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
"add x20, x14, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x15, x15, #0x1\n"
+ "sub x13, x13, #0x1\n"
+ "cmp x15, x13\n"
+ "ld1b { z17.s }, p0/Z, [x14]\n"
+ "csel x23, x15, x13, LT\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "sub x13, x13, x23\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z21.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z6.h, z19.h, z18.h\n"
- "trn1 z7.h, z17.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
"ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
"ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- "sub x15, x15, #0x1\n"
- "sub x13, x13, #0x1\n"
- "cmp x15, x13\n"
- "trn1 z8.h, z17.h, z16.h\n"
- "csel x23, x15, x13, LT\n"
- "add x14, x14, %x[ld_in_col]\n"
- "sub x13, x13, x23\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z23.h, z17.h, z16.h\n"
"cbz x23, 17f\n"
"16:" // Padded: Main loop
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z9.s }, p0/Z, [x14]\n"
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- "add z9.h, p0/M, z9.h, z21.h\n"
"add x22, x14, %x[ld_in_row]\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ "addvl x21, SP, #4\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #8\n"
+ "subs x23, x23, #0x1\n"
+ "ld1b { z16.s }, p0/Z, [x14]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x22]\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- "add z19.h, p0/M, z19.h, z21.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
+ ".inst 0xc16f16ac // sdot za.s[x8, 4], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ad // sdot za.s[x8, 5], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16e16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z14.h\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc16616cb // sdot za.s[x8, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
"ld1b { z18.s }, p0/Z, [x22]\n"
- "add z18.h, p0/M, z18.h, z21.h\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16f16cc // sdot za.s[x8, 4], { z22.h-z23.h }, z15.h\n"
+ ".inst 0xc16716cd // sdot za.s[x8, 5], { z22.h-z23.h }, z7.h\n"
+ "add x8, x8, #0x2\n"
+ ".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "trn1 z21.h, z16.h, z17.h\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ "add z18.h, p0/M, z18.h, z20.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x22]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"mov x12, #0x4\n"
- "addvl x21, SP, #4\n"
- "add z16.h, p0/M, z16.h, z21.h\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #8\n"
- ".inst 0xc16b14ca // sdot za.s[x8, 2], { z6.h-z7.h }, z11.h\n"
- "subs x23, x23, #0x1\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
"ld1b { z17.s }, p0/Z, [x22]\n"
- ".inst 0xc16314cb // sdot za.s[x8, 3], { z6.h-z7.h }, z3.h\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z21.h\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- ".inst 0xa0412aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16d14cc // sdot za.s[x8, 4], { z6.h-z7.h }, z13.h\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- "ld1b { z2.s }, p0/Z, [x22]\n"
- ".inst 0xc16514cd // sdot za.s[x8, 5], { z6.h-z7.h }, z5.h\n"
- "add z2.h, p0/M, z2.h, z21.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc16b14ea // sdot za.s[x8, 2], { z7.h-z8.h }, z11.h\n"
- ".inst 0xa1402be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- ".inst 0xc16a14eb // sdot za.s[x8, 3], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa1412a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc16b14ec // sdot za.s[x8, 4], { z7.h-z8.h }, z11.h\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "add z17.h, p0/M, z17.h, z20.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "trn1 z6.h, z9.h, z19.h\n"
- ".inst 0xc16314ed // sdot za.s[x8, 5], { z7.h-z8.h }, z3.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xa0412bea // ld1h { z10.h-z11.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- "trn1 z7.h, z18.h, z16.h\n"
- "trn1 z8.h, z17.h, z2.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add z16.h, p0/M, z16.h, z20.h\n"
+ "trn1 z23.h, z17.h, z16.h\n"
"bgt 16b\n"
"17:" // Main loop tail
- ".inst 0xc16d14c8 // sdot za.s[x8, 0], { z6.h-z7.h }, z13.h\n"
+ ".inst 0xc16c16a8 // sdot za.s[x8, 0], { z21.h-z22.h }, z12.h\n"
"addvl x21, SP, #4\n"
"addvl x20, SP, #8\n"
- ".inst 0xc16514c9 // sdot za.s[x8, 1], { z6.h-z7.h }, z5.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16b14e8 // sdot za.s[x8, 0], { z7.h-z8.h }, z11.h\n"
- ".inst 0xc16a14e9 // sdot za.s[x8, 1], { z7.h-z8.h }, z10.h\n"
- ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
- ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z14.s\n"
- ".inst 0xc16114ca // sdot za.s[x8, 2], { z6.h-z7.h }, z1.h\n"
- ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
- ".inst 0xc16014cb // sdot za.s[x8, 3], { z6.h-z7.h }, z0.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1afab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z15.s\n"
- ".inst 0xc16914cc // sdot za.s[x8, 4], { z6.h-z7.h }, z9.h\n"
- ".inst 0xc1bccfb8 // sclamp { z24.s-z27.s }, z29.s, z28.s\n"
- "st1b { z24.s }, p1, [x11]\n"
+ ".inst 0xc16416a9 // sdot za.s[x8, 1], { z21.h-z22.h }, z4.h\n"
+ ".inst 0xa1402aa7 // ld1h { z7.h, z15.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16b16c8 // sdot za.s[x8, 0], { z22.h-z23.h }, z11.h\n"
+ ".inst 0xc16a16c9 // sdot za.s[x8, 1], { z22.h-z23.h }, z10.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16f16aa // sdot za.s[x8, 2], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ab // sdot za.s[x8, 3], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060800 // mova { z0.d-z1.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060822 // mova { z2.d-z3.d }, za.d[x8, #1]\n"
+ ".inst 0xc16f16ac // sdot za.s[x8, 4], { z21.h-z22.h }, z15.h\n"
+ ".inst 0xc16716ad // sdot za.s[x8, 5], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16e16ca // sdot za.s[x8, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc1a5ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
+ ".inst 0xc16616cb // sdot za.s[x8, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1412a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1adaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc16c16cc // sdot za.s[x8, 4], { z22.h-z23.h }, z12.h\n"
+ ".inst 0xc16416cd // sdot za.s[x8, 5], { z22.h-z23.h }, z4.h\n"
+ "add x8, x8, #0x2\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
+ ".inst 0xc1bfcfc0 // sclamp { z0.s-z3.s }, z30.s, z31.s\n"
+ "st1b { z0.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc16114cd // sdot za.s[x8, 5], { z6.h-z7.h }, z1.h\n"
- "st1b { z26.s }, p1, [x10]\n"
+ "st1b { z2.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc16314ea // sdot za.s[x8, 2], { z7.h-z8.h }, z3.h\n"
- "st1b { z25.s }, p1, [x27]\n"
+ "st1b { z1.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- ".inst 0xc16214eb // sdot za.s[x8, 3], { z7.h-z8.h }, z2.h\n"
- ".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z3.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xc16114ec // sdot za.s[x8, 4], { z7.h-z8.h }, z1.h\n"
- ".inst 0xc16014ed // sdot za.s[x8, 5], { z7.h-z8.h }, z0.h\n"
- "add x8, x8, #0x2\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
"18:" // Main loop skip tail
"cbz x13, 20f\n"
"19:" // Right padding loop
- ".inst 0xc0060804 // mova { z4.d-z5.d }, za.d[x8, #0]\n"
+ ".inst 0xc0060818 // mova { z24.d-z25.d }, za.d[x8, #0]\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0060826 // mova { z6.d-z7.d }, za.d[x8, #1]\n"
- ".inst 0xc1aeac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
+ ".inst 0xc006083a // mova { z26.d-z27.d }, za.d[x8, #1]\n"
"add x8, x8, #0x2\n"
- ".inst 0xc1acaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z12.s\n"
- ".inst 0xc0040bc4 // mova za.d[x8, #4], { z30.d-z31.d }\n"
- ".inst 0xc1afab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z15.s\n"
- ".inst 0xc0040bc5 // mova za.d[x8, #5], { z30.d-z31.d }\n"
- ".inst 0xc1bccfa4 // sclamp { z4.s-z7.s }, z29.s, z28.s\n"
- "st1b { z4.s }, p1, [x11]\n"
+ ".inst 0xc0040b84 // mova za.d[x8, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0040b85 // mova za.d[x8, #5], { z28.d-z29.d }\n"
+ ".inst 0xc1a5ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc1adaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
+ ".inst 0xc1a8ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ ".inst 0xc1bfcfd8 // sclamp { z24.s-z27.s }, z30.s, z31.s\n"
+ "st1b { z24.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z6.s }, p1, [x10]\n"
+ "st1b { z26.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z5.s }, p1, [x27]\n"
+ "st1b { z25.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z7.s }, p1, [x26]\n"
+ "st1b { z27.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 19b\n"
"20:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -652,6 +657,8 @@ void sme2_u8s8u8q_planar_3x3_s1_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #12\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
index 8ce04fb8c2..1de49f698b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,119 +70,124 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0x9\n"
"ptrue p2.b\n"
- "mov x20, #0x9\n"
"ldr x7, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z11.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x6\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x22, #0x8\n"
"ldr x17, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z29.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x21, x6\n"
+ "mov SP, x20\n"
+ "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-6\n"
+ "ld1rw { z1.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x17\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z29.h, p2/M, z29.h\n"
+ "ld1rw { z0.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x7\n"
- "addvl SP, SP, #-6\n"
- "ldr x16, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z11.h, p2/M, z11.h\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z28.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z28.s, #0x0\n"
+ "mov z16.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z28.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z16.s }, p1/Z, [x20, x16, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z22.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z27.h, #0x0\n"
+ "addvl x22, SP, #6\n"
+ "addvl x22, x22, #-2\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z17.d, z16.d\n"
+ "mov z18.d, z16.d\n"
+ "mov z19.d, z16.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1sb { z25.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1rh { z16.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "sub z26.h, z26.h, z16.h\n"
- "incw x22\n"
- "mov z24.h, #0x0\n"
- "ld1sb { z3.s }, p2/Z, [x20]\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z3.h, z3.h, z16.h\n"
- "trn1 z31.h, z26.h, z3.h\n"
- "ld1sb { z21.s }, p2/Z, [x20]\n"
- "sub z21.h, z21.h, z16.h\n"
- "mov x20, x22\n"
- "trn1 z14.h, z21.h, z24.h\n"
- "ld1sb { z2.s }, p2/Z, [x20]\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z25.h, z25.h, z22.h\n"
+ "sub z15.h, z15.h, z22.h\n"
+ "sub z9.h, z9.h, z22.h\n"
+ "trn1 z24.h, z25.h, z15.h\n"
+ "ld1sb { z12.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z2.h, z2.h, z16.h\n"
- "addvl x21, SP, #6\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
+ "ld1sb { z4.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "sub z25.h, z25.h, z16.h\n"
- "incw x22\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "sub z27.h, z27.h, z16.h\n"
- "addvl x21, x21, #-2\n"
- "mov x20, x22\n"
- "st1h { z31.h }, p2, [x21]\n"
- "trn1 z4.h, z2.h, z25.h\n"
- "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "trn1 z11.h, z9.h, z27.h\n"
+ "ld1sb { z15.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "sub z12.h, z12.h, z22.h\n"
+ "sub z4.h, z4.h, z22.h\n"
+ "st1h { z24.h }, p2, [x22]\n"
+ "sub z15.h, z15.h, z22.h\n"
+ "st1h { z11.h }, p2, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #-2\n"
+ "trn1 z9.h, z12.h, z4.h\n"
+ "ld1sb { z14.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
+ "ld1sb { z10.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #3\n"
- "st1h { z14.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z12.h, z27.h, z24.h\n"
- "ld1sb { z20.s }, p2/Z, [x20]\n"
- "sub z26.h, z26.h, z16.h\n"
- "sub z23.h, z23.h, z16.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "sub z20.h, z20.h, z16.h\n"
- "addvl x21, x21, #-2\n"
- "st1h { z4.h }, p2, [x21]\n"
- "mov z29.d, z28.d\n"
- "st1h { z12.h }, p2, [x21, #1, MUL VL]\n"
- "addvl x21, x21, #-2\n"
- "mov z30.d, z28.d\n"
- "mov z31.d, z28.d\n"
- "trn1 z25.h, z26.h, z23.h\n"
- "st1h { z25.h }, p2, [x21]\n"
- "trn1 z3.h, z20.h, z24.h\n"
- "st1h { z3.h }, p2, [x21, #1, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z6.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "trn1 z21.h, z15.h, z27.h\n"
+ "ld1sb { z30.s }, p2/Z, [x20]\n"
+ "sub z14.h, z14.h, z22.h\n"
+ "sub z10.h, z10.h, z22.h\n"
+ "st1h { z9.h }, p2, [x22]\n"
+ "sub z30.h, z30.h, z22.h\n"
+ "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #-2\n"
+ "trn1 z15.h, z14.h, z10.h\n"
+ "trn1 z25.h, z30.h, z27.h\n"
+ "st1h { z15.h }, p2, [x22]\n"
+ "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z1.s }, p1/Z, [x21, x16, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z9.s }, p1/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z0.s }, p1/Z, [x20, x16, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x15, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x15, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x17, x23, LSL #22\n"
"mov x22, #0x9\n"
- "add x21, x7, x6\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x7, x6\n"
+ "ldr x14, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x7, x14\n"
+ "ldr x13, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x15, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x7, x14\n"
+ "orr x20, x17, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x14, x7, x20, x14\n"
- ".inst 0xc0040f80 // mova za.d[x8, #0], { z28.d-z31.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040f81 // mova za.d[x8, #1], { z28.d-z31.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040e00 // mova za.d[x8, #0], { z16.d-z19.d }\n"
"mov x22, #0x2\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x14, x7, x21, x14\n"
+ ".inst 0xc0040e01 // mova za.d[x8, #1], { z16.d-z19.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
"ldp x11, x10, [x23], #0x10\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
"ldp x9, x28, [x20], #0x10\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
"ldp x27, x26, [x23], #0x10\n"
"ldp x25, x24, [x20], #0x10\n"
"cbz x21, 7f\n"
@@ -191,24 +196,24 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
+ ".inst 0xc0060c0c // mova { z12.d-z15.d }, za.d[x8, #0]\n"
"and x22, x21, #0x1\n"
- ".inst 0xc1a9aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z9.s\n"
"add x21, x21, #0x1\n"
"lsr x21, x21, #0x1\n"
- ".inst 0xc1adab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
"sub x13, x13, x21\n"
- ".inst 0xc1a7cd58 // sclamp { z24.s-z27.s }, z10.s, z7.s\n"
+ ".inst 0xc1a1ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+ ".inst 0xc1a0aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1a8ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z8.s\n"
+ ".inst 0xc1bccfec // sclamp { z12.s-z15.s }, z31.s, z28.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z24.s }, p1, [x11]\n"
+ "st1b { z12.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z25.s }, p1, [x10]\n"
+ "st1b { z13.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z26.s }, p1, [x27]\n"
+ "st1b { z14.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z27.s }, p1, [x26]\n"
+ "st1b { z15.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -220,194 +225,194 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 2 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z1.s }, p1/Z, [x14]\n"
+ "ld1b { z23.s }, p1/Z, [x14]\n"
"addvl x20, SP, #4\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z21.h\n"
- "add z1.h, z1.h, z11.h\n"
- "ld1b { z2.s }, p1/Z, [x21]\n"
+ "ld1b { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z15.s }, p1/Z, [x21]\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z2.h, z2.h, z15.h\n"
- "add z2.h, z2.h, z11.h\n"
- "ld1b { z3.s }, p1/Z, [x21]\n"
+ "ld1b { z5.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z3.h, z3.h, z21.h\n"
- "add z3.h, z3.h, z11.h\n"
- "ld1b { z4.s }, p1/Z, [x21]\n"
+ "trn1 z23.h, z23.h, z4.h\n"
+ "ld1b { z6.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
+ "ld1b { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z4.h, z19.h\n"
- "add z4.h, z4.h, z11.h\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
- "mov z5.d, z8.d\n"
- "add z5.h, z5.h, z11.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701428 // sdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
- ".inst 0xc1781448 // sdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ "trn1 z24.h, z24.h, z5.h\n"
+ "ld1b { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z25.h, z25.h, z6.h\n"
+ "ld1b { z10.s }, p1/Z, [x21]\n"
+ "add z23.h, z23.h, z29.h\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ "trn1 z26.h, z26.h, z20.h\n"
+ "add z24.h, z24.h, z29.h\n"
+ "mov z27.d, z10.d\n"
+ "add z25.h, z25.h, z29.h\n"
+ "add z26.h, z26.h, z29.h\n"
+ "add z27.h, z27.h, z29.h\n"
+ ".inst 0xc17616e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z6.h\n"
+ ".inst 0xc17e1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z14.h\n"
"9:" // Unpadded: 1 priming loads
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z1.s }, p1/Z, [x14]\n"
+ "ld1b { z20.s }, p1/Z, [x14]\n"
"addvl x20, SP, #2\n"
- "ld1b { z21.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z21.h\n"
- "add z1.h, z1.h, z11.h\n"
"ld1b { z2.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z12.s }, p1/Z, [x21]\n"
+ "ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z2.h, z2.h, z12.h\n"
- "add z2.h, z2.h, z11.h\n"
- "ld1b { z3.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
+ "trn1 z20.h, z20.h, z2.h\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z3.h, z3.h, z8.h\n"
- "add z3.h, z3.h, z11.h\n"
- "ld1b { z4.s }, p1/Z, [x21]\n"
+ "ld1b { z23.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z5.s }, p1/Z, [x21]\n"
+ "trn1 z21.h, z21.h, z25.h\n"
+ "ld1b { z9.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z4.h, z4.h, z5.h\n"
- "add z4.h, z4.h, z11.h\n"
- "ld1b { z5.s }, p1/Z, [x21]\n"
- "mov z5.d, z5.d\n"
- "add z5.h, z5.h, z11.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701428 // sdot za.s[x8, 0], { z1.h-z4.h }, z0.h\n"
- ".inst 0xc1781448 // sdot za.s[x8, 0], { z2.h-z5.h }, z8.h\n"
+ "trn1 z22.h, z22.h, z24.h\n"
+ "ld1b { z3.s }, p1/Z, [x21]\n"
+ "add z20.h, z20.h, z29.h\n"
+ ".inst 0xa0402a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20]\n"
+ "trn1 z23.h, z23.h, z9.h\n"
+ "add z21.h, z21.h, z29.h\n"
+ "mov z24.d, z3.d\n"
+ "add z22.h, z22.h, z29.h\n"
+ "add z23.h, z23.h, z29.h\n"
+ "add z24.h, z24.h, z29.h\n"
+ ".inst 0xc1761688 // sdot za.s[x8, 0], { z20.h-z23.h }, z6.h\n"
+ ".inst 0xc17716a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z7.h\n"
"10:" // Unpadded: 0 priming loads
"cmp x15, #0x2\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
"add x21, x14, %x[ld_in_row]\n"
- "ld1b { z21.s }, p1/Z, [x14]\n"
+ "ld1b { z10.s }, p1/Z, [x14]\n"
"sub x15, x15, #0x2\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z8.h\n"
"sub x13, x13, #0x1\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "ld1b { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"lsr x20, x15, #0x1\n"
- "add z21.h, z21.h, z11.h\n"
- "ld1b { z25.s }, p1/Z, [x21]\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z25.h\n"
"cmp x20, x13\n"
- "ld1b { z23.s }, p1/Z, [x21]\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z25.h\n"
"csel x23, x20, x13, LT\n"
- "add z22.h, z22.h, z11.h\n"
- "ld1b { z18.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z18.h\n"
- "add z23.h, z23.h, z11.h\n"
- "ld1b { z24.s }, p1/Z, [x21]\n"
+ "ld1b { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x14, x14, %x[ld_in_col]\n"
- "ld1b { z19.s }, p1/Z, [x21]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z19.h\n"
- "add z24.h, z24.h, z11.h\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
- "mov z25.d, z8.d\n"
- "add z25.h, z25.h, z11.h\n"
+ "trn1 z11.h, z11.h, z24.h\n"
"and x15, x15, #0x1\n"
+ "ld1b { z20.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
"sub x13, x13, x23\n"
+ "ld1b { z26.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z30.h\n"
+ "add z10.h, z10.h, z29.h\n"
+ "trn1 z13.h, z13.h, z20.h\n"
+ "add z11.h, z11.h, z29.h\n"
+ "mov z14.d, z26.d\n"
+ "add z12.h, z12.h, z29.h\n"
+ "add z13.h, z13.h, z29.h\n"
+ "add z14.h, z14.h, z29.h\n"
"cbz x23, 17f\n"
"11:" // Unpadded: Main loop
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
"addvl x20, SP, #4\n"
"add x22, x14, %x[ld_in_row]\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
"addvl x21, SP, #2\n"
"subs x23, x23, #0x1\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1b { z21.s }, p1/Z, [x14]\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa1402a87 // ld1h { z7.h, z15.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1771549 // sdot za.s[x8, 1], { z10.h-z13.h }, z7.h\n"
+ "ld1b { z3.s }, p1/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "add x20, x14, %x[ld_in_row]\n"
- "ld1b { z18.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- "trn1 z21.h, z21.h, z18.h\n"
- "ld1b { z22.s }, p1/Z, [x22]\n"
+ "ld1b { z9.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z21.h, z21.h, z11.h\n"
- ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ "add x20, x14, %x[ld_in_row]\n"
+ ".inst 0xc17f1569 // sdot za.s[x8, 1], { z11.h-z14.h }, z15.h\n"
+ "ld1b { z4.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z8.h\n"
- "add z22.h, z22.h, z11.h\n"
- "ld1b { z23.s }, p1/Z, [x22]\n"
+ "trn1 z3.h, z3.h, z9.h\n"
+ "ld1b { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "ld1b { z27.s }, p1/Z, [x22]\n"
+ "ld1b { z5.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z27.h\n"
- "add z23.h, z23.h, z11.h\n"
- "ld1b { z24.s }, p1/Z, [x22]\n"
+ ".inst 0xc1a1ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z1.s\n"
+ "ld1b { z10.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1b { z6.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z8.h\n"
- "add z24.h, z24.h, z11.h\n"
- "ld1b { z4.s }, p1/Z, [x22]\n"
- "mov z25.d, z4.d\n"
- "add z25.h, z25.h, z11.h\n"
- ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17416a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z4.h\n"
- ".inst 0xc1a9aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
- "ld1b { z21.s }, p1/Z, [x14]\n"
- ".inst 0xc17c16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z12.h\n"
- ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
- "ld1b { z12.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z12.h\n"
- ".inst 0xc1a7cd40 // sclamp { z0.s-z3.s }, z10.s, z7.s\n"
+ "trn1 z4.h, z4.h, z15.h\n"
+ "add z3.h, z3.h, z29.h\n"
+ "ld1b { z14.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z5.h, z5.h, z10.h\n"
+ "ld1b { z21.s }, p1/Z, [x22]\n"
+ ".inst 0xc1a0aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z0.s\n"
+ ".inst 0xa0402aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21]\n"
+ "trn1 z6.h, z6.h, z14.h\n"
+ "add z4.h, z4.h, z29.h\n"
+ "mov z7.d, z21.d\n"
+ "add z5.h, z5.h, z29.h\n"
+ ".inst 0xc1a8ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ "add z6.h, z6.h, z29.h\n"
+ "add z7.h, z7.h, z29.h\n"
+ ".inst 0xc1bccff8 // sclamp { z24.s-z27.s }, z31.s, z28.s\n"
+ ".inst 0xc17a1468 // sdot za.s[x8, 0], { z3.h-z6.h }, z10.h\n"
+ "ld1b { z10.s }, p1/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
"ld1b { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "st1b { z0.s }, p1, [x11]\n"
+ "st1b { z24.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc17b1488 // sdot za.s[x8, 0], { z4.h-z7.h }, z11.h\n"
+ "ld1b { z11.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z20.h\n"
- "st1b { z1.s }, p1, [x10]\n"
- "ld1b { z23.s }, p1/Z, [x20]\n"
+ "trn1 z10.h, z10.h, z22.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "st1b { z25.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z2.s }, p1, [x27]\n"
- "ld1b { z24.s }, p1/Z, [x20]\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z24.h\n"
+ "st1b { z26.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "ld1b { z24.s }, p1/Z, [x20]\n"
+ "ld1b { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "st1b { z3.s }, p1, [x26]\n"
+ "st1b { z27.s }, p1, [x26]\n"
"add x26, x26, x24\n"
- "ld1b { z3.s }, p1/Z, [x20]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z3.h\n"
- "add z21.h, z21.h, z11.h\n"
- "ld1b { z3.s }, p1/Z, [x20]\n"
- "mov z25.d, z3.d\n"
- "add z22.h, z22.h, z11.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- "add z23.h, z23.h, z11.h\n"
- "add z24.h, z24.h, z11.h\n"
- "add z25.h, z25.h, z11.h\n"
+ "trn1 z11.h, z11.h, z14.h\n"
+ "add z10.h, z10.h, z29.h\n"
+ "ld1b { z6.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
+ "trn1 z12.h, z12.h, z9.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
+ "trn1 z13.h, z13.h, z6.h\n"
+ "add z11.h, z11.h, z29.h\n"
+ "mov z14.d, z20.d\n"
+ "add z12.h, z12.h, z29.h\n"
+ "add z13.h, z13.h, z29.h\n"
+ "add z14.h, z14.h, z29.h\n"
"bgt 11b\n"
"b 17f\n"
"12:" // Padded
@@ -417,440 +422,440 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
"beq 14f\n"
"13:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x14]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #4\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z21.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z20.h, z20.h, z22.h\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z4.s }, p0/Z, [x20]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z17.h\n"
- "trn1 z23.h, z23.h, z4.h\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z23.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z24.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z25.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z11.h\n"
- "addvl x20, SP, #4\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "trn1 z25.h, z25.h, z17.h\n"
- ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
- "mov z26.d, z1.d\n"
- ".inst 0xc17416c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z4.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17c16e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z12.h\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "trn1 z23.h, z23.h, z25.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
+ ".inst 0xc1731688 // sdot za.s[x8, 0], { z20.h-z23.h }, z3.h\n"
+ "mov z24.d, z24.d\n"
+ ".inst 0xc17b16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z11.h\n"
"14:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x14]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "addvl x20, SP, #2\n"
+ ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z21.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z20.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z5.s }, p0/Z, [x20]\n"
- "add z5.h, p0/M, z5.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z22.h, z22.h, z17.h\n"
- "trn1 z23.h, z23.h, z5.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z20.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z23.h, z20.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z25.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z15.s }, p0/Z, [x20]\n"
- "add z15.h, p0/M, z15.h, z11.h\n"
- "addvl x20, SP, #2\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "trn1 z25.h, z25.h, z17.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "mov z26.d, z15.d\n"
- ".inst 0xc17016c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z0.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17116e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z1.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "trn1 z24.h, z24.h, z25.h\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
+ ".inst 0xc17316a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z3.h\n"
+ "mov z25.d, z20.d\n"
+ ".inst 0xc17b16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z11.h\n"
"15:" // Padded: 0 priming loads
"cmp x15, #0x2\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"blt 18f\n"
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
- "mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z18.h\n"
- "trn1 z22.h, z22.h, z3.h\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
- "add z19.h, p0/M, z19.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "mov x12, #0x8\n"
- "add z20.h, p0/M, z20.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
"sub x15, x15, #0x2\n"
"sub x13, x13, #0x1\n"
- "trn1 z23.h, z23.h, z19.h\n"
- "trn1 z24.h, z24.h, z20.h\n"
"lsr x20, x15, #0x1\n"
"cmp x20, x13\n"
- "mov z25.d, z3.d\n"
- "csel x22, x20, x13, LT\n"
- "add x14, x14, %x[ld_in_col]\n"
"and x15, x15, #0x1\n"
- "sub x13, x13, x22\n"
- "cbz x22, 17f\n"
- "16:" // Padded: Main loop
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "addvl x20, SP, #4\n"
- "mov x12, #0x0\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x21, x14, %x[ld_in_row]\n"
- ".inst 0xc17416a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z4.h\n"
- "ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "ld1b { z10.s }, p0/Z, [x14]\n"
+ "csel x23, x20, x13, LT\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "sub x13, x13, x23\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z14.s }, p0/Z, [x21]\n"
- "add z14.h, p0/M, z14.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17c16c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z12.h\n"
- "ld1b { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
+ "ld1b { z11.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z20.h\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z15.s }, p0/Z, [x21]\n"
"mov x12, #0x4\n"
- "add z15.h, p0/M, z15.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x21]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z20.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x21]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z20.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z4.s }, p0/Z, [x21]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
"mov x12, #0x8\n"
+ "ld1b { z21.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z14.h\n"
- "trn1 z22.h, z22.h, z15.h\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "addvl x20, SP, #2\n"
- "ld1b { z2.s }, p0/Z, [x21]\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z4.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z21.h\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
+ "mov z14.d, z20.d\n"
+ "cbz x23, 17f\n"
+ "16:" // Padded: Main loop
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "addvl x20, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "add z2.h, p0/M, z2.h, z11.h\n"
- "add x14, x14, %x[ld_in_col]\n"
- ".inst 0xc17016a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z0.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "add x22, x14, %x[ld_in_row]\n"
+ "addvl x21, SP, #2\n"
+ "subs x23, x23, #0x1\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1721549 // sdot za.s[x8, 1], { z10.h-z13.h }, z2.h\n"
+ "ld1b { z10.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x20, x14, %x[ld_in_row]\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "mov z25.d, z2.d\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1731569 // sdot za.s[x8, 1], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1b { z26.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z26.h, p0/M, z26.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17116c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z1.h\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ "ld1b { z11.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z26.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z4.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
+ "ld1b { z9.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z4.s }, p1, [x11]\n"
+ "add x11, x11, x9\n"
+ "st1b { z5.s }, p1, [x10]\n"
+ "add x10, x10, x28\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "st1b { z6.s }, p1, [x27]\n"
+ "add x27, x27, x25\n"
+ "trn1 z11.h, z11.h, z9.h\n"
+ "st1b { z7.s }, p1, [x26]\n"
+ "add x26, x26, x24\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z9.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z9.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- "add x20, x20, %x[ld_in_row]\n"
- "add z12.h, p0/M, z12.h, z11.h\n"
+ "ld1b { z20.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z11.h\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
- "add x11, x11, x9\n"
- "trn1 z21.h, z21.h, z20.h\n"
- "st1b { z17.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "trn1 z22.h, z22.h, z4.h\n"
- "trn1 z23.h, z23.h, z27.h\n"
- "st1b { z18.s }, p1, [x27]\n"
- "add x27, x27, x25\n"
- "trn1 z24.h, z24.h, z12.h\n"
- "mov z25.d, z8.d\n"
- "st1b { z19.s }, p1, [x26]\n"
- "add x26, x26, x24\n"
- "add x14, x14, %x[ld_in_col]\n"
- "bgt 16b\n"
- "17:" // Main loop tail
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "addvl x20, SP, #4\n"
"mov x12, #0x0\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z9.s }, p0/Z, [x22]\n"
+ "trn1 z13.h, z13.h, z20.h\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "add x20, x14, %x[ld_in_row]\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- "ld1b { z0.s }, p0/Z, [x14]\n"
- "add z0.h, p0/M, z0.h, z11.h\n"
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "ld1b { z10.s }, p0/Z, [x14]\n"
+ "add x14, x14, %x[ld_in_col]\n"
+ "mov z14.d, z9.d\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z14.s }, p0/Z, [x20]\n"
- "add z14.h, p0/M, z14.h, z11.h\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
+ "ld1b { z25.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z25.h, p0/M, z25.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z11.h\n"
+ "ld1b { z11.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z25.h\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z12.s }, p0/Z, [x20]\n"
"mov x12, #0x4\n"
- "add z12.h, p0/M, z12.h, z11.h\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z2.s }, p0/Z, [x20]\n"
- "add z2.h, p0/M, z2.h, z11.h\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z15.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
+ "ld1b { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z4.h, p0/M, z4.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add z3.h, p0/M, z3.h, z11.h\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z4.h\n"
+ "add z13.h, p0/M, z13.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
+ "ld1b { z4.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z25.h, p0/M, z25.h, z11.h\n"
+ "add z4.h, p0/M, z4.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
+ "ld1b { z26.s }, p0/Z, [x20]\n"
+ "trn1 z13.h, z13.h, z4.h\n"
+ "add z26.h, p0/M, z26.h, z29.h\n"
+ "mov z14.d, z26.d\n"
+ "bgt 16b\n"
+ "17:" // Main loop tail
+ ".inst 0xc1721548 // sdot za.s[x8, 0], { z10.h-z13.h }, z2.h\n"
+ "addvl x22, SP, #4\n"
+ "mov x12, #0x0\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "add x21, x14, %x[ld_in_row]\n"
"addvl x20, SP, #2\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- "trn1 z0.h, z0.h, z14.h\n"
- "add x8, x8, #0x1\n"
- "add z27.h, p0/M, z27.h, z11.h\n"
- "trn1 z1.h, z1.h, z12.h\n"
- "trn1 z2.h, z2.h, z21.h\n"
+ ".inst 0xc1731568 // sdot za.s[x8, 0], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1721549 // sdot za.s[x8, 1], { z10.h-z13.h }, z2.h\n"
+ "ld1b { z9.s }, p0/Z, [x14]\n"
"add x14, x14, %x[ld_in_col]\n"
- "trn1 z3.h, z3.h, z25.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- "mov z4.d, z27.d\n"
- ".inst 0xc17e1408 // sdot za.s[x8, 0], { z0.h-z3.h }, z14.h\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ "add z9.h, p0/M, z9.h, z29.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1731569 // sdot za.s[x8, 1], { z11.h-z14.h }, z3.h\n"
+ ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
+ "add x8, x8, #0x1\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ "ld1b { z10.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z15.h\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ "add z10.h, p0/M, z10.h, z29.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x4\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "st1b { z17.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- ".inst 0xc17f1428 // sdot za.s[x8, 0], { z1.h-z4.h }, z15.h\n"
- ".inst 0xa0402bee // ld1h { z14.h-z15.h }, pn10.b/Z, [SP]\n"
- "st1b { z18.s }, p1, [x27]\n"
+ "ld1b { z11.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z19.s }, p1, [x26]\n"
+ "trn1 z10.h, z10.h, z15.h\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
+ "add z11.h, p0/M, z11.h, z29.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ "ld1b { z5.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z5.h, p0/M, z5.h, z29.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z5.h\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ "mov x12, #0x8\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "ld1b { z5.s }, p0/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z15.h\n"
+ "add z5.h, p0/M, z5.h, z29.h\n"
+ ".inst 0xc1721528 // sdot za.s[x8, 0], { z9.h-z12.h }, z2.h\n"
+ "mov z13.d, z5.d\n"
+ ".inst 0xc1731548 // sdot za.s[x8, 0], { z10.h-z13.h }, z3.h\n"
+ ".inst 0xa0402be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP]\n"
"18:" // Main loop skip tail
"cbz x15, 19f\n" // Skip remainder inputs
"mov x12, #0x0\n"
+ "add x21, x14, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #4\n"
+ "sub x13, x13, #0x1\n"
"ld1b { z21.s }, p0/Z, [x14]\n"
- "add z21.h, p0/M, z21.h, z11.h\n"
- "add x20, x14, %x[ld_in_row]\n"
+ "add z21.h, p0/M, z21.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z22.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z21.h, z21.h, z15.h\n"
+ "add z22.h, p0/M, z22.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z0.s }, p0/Z, [x20]\n"
- "add z0.h, p0/M, z0.h, z11.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z21.h, z21.h, z17.h\n"
- "trn1 z22.h, z22.h, z0.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z12.h, p0/M, z12.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x20]\n"
- "add z23.h, p0/M, z23.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z22.h, z12.h\n"
+ "add z23.h, p0/M, z23.h, z29.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z20.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z20.h, p0/M, z20.h, z29.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z24.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z23.h, z20.h\n"
+ "add z24.h, p0/M, z24.h, z29.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z5.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z5.h, p0/M, z5.h, z11.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z30.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z30.h, p0/M, z30.h, z29.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z4.s }, p0/Z, [x20]\n"
- "add z4.h, p0/M, z4.h, z11.h\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z5.h\n"
- "mov z25.d, z4.d\n"
- "addvl x20, SP, #4\n"
- ".inst 0xc17e16a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z14.h\n"
- "sub x13, x13, #0x1\n"
- ".inst 0xc17f16c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z15.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1a9aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
- ".inst 0xc17016a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z0.h\n"
- ".inst 0xc1adab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z13.s\n"
- ".inst 0xc17116c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z1.h\n"
+ "ld1b { z6.s }, p0/Z, [x21]\n"
+ "trn1 z24.h, z24.h, z30.h\n"
+ "add z6.h, p0/M, z6.h, z29.h\n"
+ ".inst 0xc17216a8 // sdot za.s[x8, 0], { z21.h-z24.h }, z2.h\n"
+ "mov z25.d, z6.d\n"
+ ".inst 0xc17316c8 // sdot za.s[x8, 0], { z22.h-z25.h }, z3.h\n"
+ ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc17516a9 // sdot za.s[x8, 1], { z21.h-z24.h }, z5.h\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
+ ".inst 0xc17d16c9 // sdot za.s[x8, 1], { z22.h-z25.h }, z13.h\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a7cd50 // sclamp { z16.s-z19.s }, z10.s, z7.s\n"
- "st1b { z16.s }, p1, [x11]\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- "st1b { z17.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z18.s }, p1, [x27]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z19.s }, p1, [x26]\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"19:" // Tail input: End
"cbz x13, 21f\n"
"20:" // Right padding loop
- ".inst 0xc0060c00 // mova { z0.d-z3.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
+ ".inst 0xc0060c04 // mova { z4.d-z7.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a9aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
"subs x13, x13, #0x1\n"
- ".inst 0xc0040f82 // mova za.d[x8, #2], { z28.d-z31.d }\n"
- ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
- ".inst 0xc1a7cd40 // sclamp { z0.s-z3.s }, z10.s, z7.s\n"
- "st1b { z0.s }, p1, [x11]\n"
+ ".inst 0xc0040e02 // mova za.d[x8, #2], { z16.d-z19.d }\n"
+ ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a0aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1a8ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
+ ".inst 0xc1bccfe4 // sclamp { z4.s-z7.s }, z31.s, z28.s\n"
+ "st1b { z4.s }, p1, [x11]\n"
"add x11, x11, x9\n"
- "st1b { z1.s }, p1, [x10]\n"
+ "st1b { z5.s }, p1, [x10]\n"
"add x10, x10, x28\n"
- "st1b { z2.s }, p1, [x27]\n"
+ "st1b { z6.s }, p1, [x27]\n"
"add x27, x27, x25\n"
- "st1b { z3.s }, p1, [x26]\n"
+ "st1b { z7.s }, p1, [x26]\n"
"add x26, x26, x24\n"
"bgt 20b\n"
"21:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x16\n"
+ "whilelt p1.s, x16, x17\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
- "incw x16\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "whilelt p1.s, x16, x17\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -869,6 +874,8 @@ void sme2_u8s8u8q_planar_3x3_s2_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #6\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
index 64023eeaff..baaf51c711 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,249 +70,254 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"ldr x5, [%x[args], %[offsetof_Args_pad_bottom]]\n"
- "ptrue p2.b\n"
+ "mov x22, SP\n"
"mov x20, #0x8\n"
+ "ptrue p2.b\n"
"ldr x6, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z17.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x5\n"
- ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x21, x22, #0x8\n"
"ldr x7, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z15.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x21, x21, #-0x400\n"
+ ".inst 0x25207812 // ptrue pn10.b\n"
+ "sub x20, x20, x5\n"
+ "mov SP, x21\n"
+ "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
+ "addvl SP, SP, #-30\n"
+ "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
"whilelt p1.s, XZR, x7\n"
"whilelt p9.s, XZR, x20\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "neg z15.h, p2/M, z15.h\n"
+ "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"whilelt p8.s, XZR, x6\n"
- "addvl SP, SP, #-30\n"
- "ldr x17, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z17.h, p2/M, z17.h\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z24.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z18.s, #0x0\n"
+ "mov z28.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z18.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x20, x17, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x23\n"
- "ld1sb { z2.s }, p2/Z, [x20]\n"
+ "ldr x24, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z0.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z13.h, #0x0\n"
+ "addvl x22, SP, #30\n"
+ "addvl x22, x22, #-6\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z29.d, z28.d\n"
+ "mov x23, x24\n"
+ "incw x24\n"
+ "ld1sb { z22.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1sb { z21.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1sb { z19.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "ld1sb { z25.s }, p2/Z, [x23]\n"
+ "incw x23, ALL, MUL #5\n"
+ "sub z22.h, z22.h, z0.h\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "ld1sb { z5.s }, p2/Z, [x23]\n"
+ "mov x20, x24\n"
+ "incw x24\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "sub z25.h, z25.h, z0.h\n"
+ "sub z5.h, z5.h, z0.h\n"
+ "trn1 z6.h, z13.h, z22.h\n"
+ "trn1 z23.h, z22.h, z21.h\n"
+ "ld1sb { z27.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "ld1rh { z3.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z15.h, #0x0\n"
- "sub z2.h, z2.h, z3.h\n"
- "incw x23\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "trn1 z4.h, z21.h, z19.h\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z13.h, z13.h, z3.h\n"
- "trn1 z11.h, z15.h, z2.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
+ "trn1 z26.h, z19.h, z25.h\n"
+ "ld1sb { z18.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z27.h, z27.h, z3.h\n"
- "trn1 z0.h, z2.h, z13.h\n"
+ "trn1 z22.h, z25.h, z5.h\n"
+ "ld1sb { z7.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z25.h, z5.h, z13.h\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "sub z9.h, z9.h, z0.h\n"
+ "ld1sb { z1.s }, p2/Z, [x20]\n"
+ "mov x20, x24\n"
+ "sub z18.h, z18.h, z0.h\n"
+ "st1h { z6.h }, p2, [x22]\n"
+ "incw x24\n"
+ "sub z7.h, z7.h, z0.h\n"
+ "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z1.h, z1.h, z0.h\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "trn1 z20.h, z13.h, z27.h\n"
+ "trn1 z12.h, z27.h, z9.h\n"
+ "ld1sb { z21.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z2.h, z9.h, z18.h\n"
"ld1sb { z19.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z19.h, z19.h, z3.h\n"
- "trn1 z26.h, z13.h, z27.h\n"
+ "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z9.h, z18.h, z7.h\n"
"ld1sb { z14.s }, p2/Z, [x20]\n"
- "sub z14.h, z14.h, z3.h\n"
- "mov x20, x23\n"
- "trn1 z10.h, z27.h, z19.h\n"
- "ld1sb { z9.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z19.h, z19.h, z14.h\n"
- "trn1 z1.h, z14.h, z15.h\n"
+ "st1h { z22.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z18.h, z7.h, z1.h\n"
"ld1sb { z5.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z9.h, z9.h, z3.h\n"
- "sub z5.h, z5.h, z3.h\n"
- "ld1sb { z29.s }, p2/Z, [x20]\n"
+ "st1h { z25.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z25.h, z1.h, z13.h\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "addvl x22, x22, #-6\n"
+ "ld1sb { z16.s }, p2/Z, [x20]\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "mov x20, x24\n"
+ "st1h { z20.h }, p2, [x22]\n"
+ "sub z5.h, z5.h, z0.h\n"
+ "st1h { z12.h }, p2, [x22, #1, MUL VL]\n"
+ "incw x24\n"
+ "st1h { z2.h }, p2, [x22, #2, MUL VL]\n"
+ "sub z16.h, z16.h, z0.h\n"
+ "trn1 z7.h, z13.h, z21.h\n"
+ "trn1 z20.h, z21.h, z19.h\n"
+ "ld1sb { z6.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z29.h, z29.h, z3.h\n"
- "addvl x22, SP, #30\n"
+ "trn1 z17.h, z19.h, z14.h\n"
+ "st1h { z9.h }, p2, [x22, #3, MUL VL]\n"
"ld1sb { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "incw x23\n"
- "sub z2.h, z2.h, z3.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
- "addvl x22, x22, #-6\n"
- "sub z23.h, z23.h, z3.h\n"
- "mov x20, x23\n"
- "st1h { z11.h }, p2, [x22]\n"
- "trn1 z20.h, z15.h, z9.h\n"
- "incw x23\n"
- "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "st1h { z0.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z22.h, z9.h, z5.h\n"
- "ld1sb { z25.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z9.h, z5.h, z29.h\n"
+ "trn1 z12.h, z14.h, z5.h\n"
+ "st1h { z18.h }, p2, [x22, #4, MUL VL]\n"
"ld1sb { z21.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z10.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z26.h, z29.h, z2.h\n"
- "ld1sb { z0.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z28.h, z2.h, z23.h\n"
- "ld1sb { z19.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z1.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z2.h, z23.h, z15.h\n"
- "sub z25.h, z25.h, z3.h\n"
+ "st1h { z25.h }, p2, [x22, #5, MUL VL]\n"
"addvl x22, x22, #-6\n"
- "sub z21.h, z21.h, z3.h\n"
- "ld1sb { z6.s }, p2/Z, [x20]\n"
- "sub z0.h, z0.h, z3.h\n"
- "mov x20, x23\n"
- "sub z19.h, z19.h, z3.h\n"
- "sub z6.h, z6.h, z3.h\n"
- "st1h { z20.h }, p2, [x22]\n"
- "incw x23\n"
- "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z11.h, z15.h, z25.h\n"
- "trn1 z10.h, z25.h, z21.h\n"
- "ld1sb { z5.s }, p2/Z, [x20]\n"
+ "trn1 z5.h, z5.h, z16.h\n"
+ "ld1sb { z25.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z9.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z14.h, z21.h, z0.h\n"
+ "trn1 z4.h, z16.h, z13.h\n"
+ "sub z6.h, z6.h, z0.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1sb { z19.s }, p2/Z, [x20]\n"
+ "mov x20, x24\n"
+ "sub z21.h, z21.h, z0.h\n"
+ "st1h { z7.h }, p2, [x22]\n"
+ "sub z25.h, z25.h, z0.h\n"
+ "st1h { z20.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z19.h, z19.h, z0.h\n"
+ "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
+ "trn1 z1.h, z13.h, z6.h\n"
+ "trn1 z24.h, z6.h, z2.h\n"
"ld1sb { z23.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z26.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z21.h, z0.h, z19.h\n"
- "ld1sb { z27.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z28.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z19.h, z19.h, z6.h\n"
- "ld1sb { z29.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z2.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z13.h, z6.h, z15.h\n"
- "sub z5.h, z5.h, z3.h\n"
- "sub z23.h, z23.h, z3.h\n"
- "ld1sb { z1.s }, p2/Z, [x20]\n"
- "addvl x22, x22, #-6\n"
- "sub z27.h, z27.h, z3.h\n"
- "sub z29.h, z29.h, z3.h\n"
- "mov x20, x23\n"
- "st1h { z11.h }, p2, [x22]\n"
- "sub z1.h, z1.h, z3.h\n"
- "st1h { z10.h }, p2, [x22, #1, MUL VL]\n"
- "trn1 z30.h, z15.h, z5.h\n"
- "trn1 z26.h, z5.h, z23.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "trn1 z16.h, z2.h, z21.h\n"
+ "ld1sb { z6.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z14.h }, p2, [x22, #2, MUL VL]\n"
- "trn1 z22.h, z23.h, z27.h\n"
- "ld1sb { z5.s }, p2/Z, [x20]\n"
+ "st1h { z12.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z20.h, z21.h, z25.h\n"
+ "ld1sb { z14.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z28.h, z27.h, z29.h\n"
- "ld1sb { z8.s }, p2/Z, [x20]\n"
+ "st1h { z5.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z17.h, z25.h, z19.h\n"
+ "ld1sb { z22.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z27.h, z29.h, z1.h\n"
- "ld1sb { z9.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "st1h { z13.h }, p2, [x22, #5, MUL VL]\n"
- "trn1 z2.h, z1.h, z15.h\n"
- "ld1sb { z14.s }, p2/Z, [x20]\n"
- "sub z11.h, z11.h, z3.h\n"
+ "st1h { z4.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z19.h, z19.h, z13.h\n"
+ "ld1sb { z27.s }, p2/Z, [x20]\n"
+ "sub z23.h, z23.h, z0.h\n"
"addvl x22, x22, #-6\n"
- "sub z5.h, z5.h, z3.h\n"
- "sub z8.h, z8.h, z3.h\n"
- "st1h { z30.h }, p2, [x22]\n"
- "sub z9.h, z9.h, z3.h\n"
- "sub z14.h, z14.h, z3.h\n"
- "st1h { z26.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "mov z19.d, z18.d\n"
- "trn1 z22.h, z15.h, z11.h\n"
- "st1h { z28.h }, p2, [x22, #3, MUL VL]\n"
- "trn1 z1.h, z11.h, z5.h\n"
- "trn1 z31.h, z5.h, z8.h\n"
- "st1h { z27.h }, p2, [x22, #4, MUL VL]\n"
- "trn1 z8.h, z8.h, z9.h\n"
- "trn1 z21.h, z9.h, z14.h\n"
- "st1h { z2.h }, p2, [x22, #5, MUL VL]\n"
+ "sub z6.h, z6.h, z0.h\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "st1h { z1.h }, p2, [x22]\n"
+ "sub z22.h, z22.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
+ "trn1 z16.h, z13.h, z23.h\n"
+ "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
+ "trn1 z7.h, z23.h, z6.h\n"
+ "trn1 z12.h, z6.h, z14.h\n"
+ "st1h { z19.h }, p2, [x22, #5, MUL VL]\n"
"addvl x22, x22, #-6\n"
- "trn1 z15.h, z14.h, z15.h\n"
- "st1h { z22.h }, p2, [x22]\n"
- "st1h { z1.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z31.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z8.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z15.h }, p2, [x22, #5, MUL VL]\n"
+ "trn1 z5.h, z14.h, z22.h\n"
+ "trn1 z14.h, z22.h, z27.h\n"
+ "trn1 z20.h, z27.h, z13.h\n"
+ "st1h { z16.h }, p2, [x22]\n"
+ "st1h { z7.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z12.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z5.h }, p2, [x22, #3, MUL VL]\n"
+ "st1h { z14.h }, p2, [x22, #4, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #5, MUL VL]\n"
"cbz x21, 3f\n"
- "ld1w { z7.s }, p1/Z, [x21, x17, LSL #2]\n"
+ "ld1w { z8.s }, p1/Z, [x21, x17, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z4.s }, p1/Z, [x20, x17, LSL #2]\n"
+ "ld1w { z11.s }, p1/Z, [x20, x17, LSL #2]\n"
"4:" // Load right_shift: End
"ldr x25, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x25, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x7, x23, LSL #22\n"
"mov x22, #0x8\n"
- "add x21, x6, x5\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x6, x5\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x11, #0x0\n"
"mov x8, #0x8\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x6, x16\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x25, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x6, x16\n"
+ "orr x20, x7, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x16, x6, x20, x16\n"
- ".inst 0xc0046a40 // mova za.d[x11, #0], { z18.d-z19.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0046a41 // mova za.d[x11, #1], { z18.d-z19.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0046b80 // mova za.d[x11, #0], { z28.d-z29.d }\n"
"mov x22, #0x4\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x16, x6, x21, x16\n"
+ ".inst 0xc0046b81 // mova za.d[x11, #1], { z28.d-z29.d }\n"
+ "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
+ ".inst 0xc0046b82 // mova za.d[x11, #2], { z28.d-z29.d }\n"
"ldp x14, x13, [x23], #0x10\n"
- ".inst 0xc0046a42 // mova za.d[x11, #2], { z18.d-z19.d }\n"
+ ".inst 0xc0046b83 // mova za.d[x11, #3], { z28.d-z29.d }\n"
"ldp x4, x10, [x20], #0x10\n"
- ".inst 0xc0046a43 // mova za.d[x11, #3], { z18.d-z19.d }\n"
- "ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0046a44 // mova za.d[x11, #4], { z18.d-z19.d }\n"
+ ".inst 0xc0046b84 // mova za.d[x11, #4], { z28.d-z29.d }\n"
+ ".inst 0xc0046b85 // mova za.d[x11, #5], { z28.d-z29.d }\n"
"ldp x9, x28, [x23], #0x10\n"
- ".inst 0xc0046a45 // mova za.d[x11, #5], { z18.d-z19.d }\n"
+ ".inst 0xc0046b86 // mova za.d[x11, #6], { z28.d-z29.d }\n"
"ldp x27, x26, [x20], #0x10\n"
- ".inst 0xc0046a46 // mova za.d[x11, #6], { z18.d-z19.d }\n"
- ".inst 0xc0046a47 // mova za.d[x11, #7], { z18.d-z19.d }\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
+ ".inst 0xc0046b87 // mova za.d[x11, #7], { z28.d-z29.d }\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
"csel x20, x21, x22, LT\n"
"sub x21, x21, x20\n"
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
- ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066810 // mova { z16.d-z17.d }, za.d[x11, #0]\n"
"sub x15, x15, x21\n"
- ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
- ".inst 0xc1a4aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z4.s\n"
- ".inst 0xc1acab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z12.s\n"
- ".inst 0xc1b0cf14 // sclamp { z20.s-z23.s }, z24.s, z16.s\n"
+ ".inst 0xc0066832 // mova { z18.d-z19.d }, za.d[x11, #1]\n"
+ ".inst 0xc1a8ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z8.s\n"
+ ".inst 0xc1abaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
+ ".inst 0xc1becff0 // sclamp { z16.s-z19.s }, z31.s, z30.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z20.s }, p1, [x14]\n"
+ "st1b { z16.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z22.s }, p1, [x13]\n"
+ "st1b { z18.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z21.s }, p1, [x9]\n"
+ "st1b { z17.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z23.s }, p1, [x28]\n"
+ "st1b { z19.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
@@ -328,331 +333,331 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
"add x21, x16, %x[ld_in_row]\n"
- "ld1b { z1.s }, p1/Z, [x16]\n"
+ "ld1b { z4.s }, p1/Z, [x16]\n"
"addvl x20, SP, #24\n"
- "ld1b { z28.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z1.h, z28.h\n"
- "add z27.h, z27.h, z17.h\n"
- "ld1b { z1.s }, p1/Z, [x21]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z2.s }, p1/Z, [x21]\n"
+ "ld1b { z25.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z28.h, z1.h, z2.h\n"
- "add z28.h, z28.h, z17.h\n"
- "ld1b { z13.s }, p1/Z, [x21]\n"
+ "ld1b { z19.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z6.s }, p1/Z, [x21]\n"
+ "trn1 z22.h, z4.h, z13.h\n"
+ "ld1b { z27.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z13.h, z6.h\n"
- "add z29.h, z29.h, z17.h\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "trn1 z23.h, z25.h, z19.h\n"
+ "ld1b { z21.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16a7768 // sdot za.s[x11, 0], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ "add z22.h, z22.h, z15.h\n"
+ "trn1 z24.h, z14.h, z27.h\n"
"ld1b { z20.s }, p1/Z, [x21]\n"
- "trn1 z30.h, z30.h, z20.h\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "add z30.h, z30.h, z17.h\n"
- ".inst 0xc1697788 // sdot za.s[x11, 0], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc1617789 // sdot za.s[x11, 1], { z28.h-z29.h }, z1.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z11.h\n"
- ".inst 0xc16a77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z10.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z23.h, z23.h, z15.h\n"
+ ".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "trn1 z25.h, z21.h, z20.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16d76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z13.h\n"
+ ".inst 0xc16c76c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z12.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc16e76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z14.h\n"
+ ".inst 0xc16676e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xc1617708 // sdot za.s[x11, 0], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xc1607709 // sdot za.s[x11, 1], { z24.h-z25.h }, z0.h\n"
"9:" // Unpadded: 3 priming loads
"add x22, x16, %x[ld_in_row]\n"
- "ld1b { z2.s }, p1/Z, [x16]\n"
+ "ld1b { z21.s }, p1/Z, [x16]\n"
"addvl x21, SP, #18\n"
- "ld1b { z28.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z20.h, z2.h, z28.h\n"
- "add z20.h, z20.h, z17.h\n"
- "ld1b { z31.s }, p1/Z, [x22]\n"
+ "ld1b { z18.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- "ld1b { z11.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z21.h, z31.h, z11.h\n"
- "add z21.h, z21.h, z17.h\n"
- "ld1b { z25.s }, p1/Z, [x22]\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ "ld1b { z3.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z22.h, z25.h, z8.h\n"
- "add z22.h, z22.h, z17.h\n"
- "ld1b { z8.s }, p1/Z, [x22]\n"
+ "ld1b { z27.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16e7688 // sdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
- "ld1b { z3.s }, p1/Z, [x22]\n"
- "trn1 z23.h, z8.h, z3.h\n"
- ".inst 0xc1667689 // sdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc161768a // sdot za.s[x11, 2], { z20.h-z21.h }, z1.h\n"
- "add z23.h, z23.h, z17.h\n"
+ "trn1 z24.h, z21.h, z18.h\n"
+ "ld1b { z7.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z25.h, z17.h, z3.h\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ "add z24.h, z24.h, z15.h\n"
+ "trn1 z26.h, z27.h, z7.h\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
".inst 0xa1412aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc160768b // sdot za.s[x11, 3], { z20.h-z21.h }, z0.h\n"
- ".inst 0xc16976a8 // sdot za.s[x11, 0], { z21.h-z22.h }, z9.h\n"
- ".inst 0xa0422aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16176a9 // sdot za.s[x11, 1], { z21.h-z22.h }, z1.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16976aa // sdot za.s[x11, 2], { z21.h-z22.h }, z9.h\n"
- ".inst 0xc16176ab // sdot za.s[x11, 3], { z21.h-z22.h }, z1.h\n"
- ".inst 0xc16f76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z15.h\n"
- ".inst 0xc16e76c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z14.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16b76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z11.h\n"
- ".inst 0xc16a76cb // sdot za.s[x11, 3], { z22.h-z23.h }, z10.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "trn1 z27.h, z17.h, z16.h\n"
+ "add z26.h, z26.h, z15.h\n"
+ ".inst 0xc1637708 // sdot za.s[x11, 0], { z24.h-z25.h }, z3.h\n"
+ ".inst 0xc1627709 // sdot za.s[x11, 1], { z24.h-z25.h }, z2.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ "add z27.h, z27.h, z15.h\n"
+ ".inst 0xc16d770a // sdot za.s[x11, 2], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc16c770b // sdot za.s[x11, 3], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc1697728 // sdot za.s[x11, 0], { z25.h-z26.h }, z9.h\n"
+ ".inst 0xc1617729 // sdot za.s[x11, 1], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xa0412a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
+ ".inst 0xc1677748 // sdot za.s[x11, 0], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xc1667749 // sdot za.s[x11, 1], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa0422a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
+ ".inst 0xc16c774b // sdot za.s[x11, 3], { z26.h-z27.h }, z12.h\n"
"10:" // Unpadded: 2 priming loads
"add x23, x16, %x[ld_in_row]\n"
- "ld1b { z2.s }, p1/Z, [x16]\n"
+ "ld1b { z0.s }, p1/Z, [x16]\n"
"addvl x22, SP, #12\n"
- "ld1b { z22.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z0.h, z2.h, z22.h\n"
- "add z0.h, z0.h, z17.h\n"
- "ld1b { z14.s }, p1/Z, [x23]\n"
+ "ld1b { z19.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
- "ld1b { z6.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z1.h, z14.h, z6.h\n"
- "add z1.h, z1.h, z17.h\n"
- "ld1b { z15.s }, p1/Z, [x23]\n"
+ "ld1b { z4.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- "ld1b { z6.s }, p1/Z, [x23]\n"
- "add x23, x23, %x[ld_in_row]\n"
- "trn1 z2.h, z15.h, z6.h\n"
- "add z2.h, z2.h, z17.h\n"
- "ld1b { z21.s }, p1/Z, [x23]\n"
+ "ld1b { z3.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
"add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16f7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z15.h\n"
- "ld1b { z30.s }, p1/Z, [x23]\n"
- "trn1 z3.h, z21.h, z30.h\n"
- ".inst 0xc16e7409 // sdot za.s[x11, 1], { z0.h-z1.h }, z14.h\n"
- ".inst 0xa1402aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16d740a // sdot za.s[x11, 2], { z0.h-z1.h }, z13.h\n"
- "add z3.h, z3.h, z17.h\n"
- ".inst 0xa0412ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc165740b // sdot za.s[x11, 3], { z0.h-z1.h }, z5.h\n"
- ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16f7428 // sdot za.s[x11, 0], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e7429 // sdot za.s[x11, 1], { z1.h-z2.h }, z14.h\n"
- ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xa0422ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16b740c // sdot za.s[x11, 4], { z0.h-z1.h }, z11.h\n"
- ".inst 0xc16a740d // sdot za.s[x11, 5], { z0.h-z1.h }, z10.h\n"
- ".inst 0xc16f742a // sdot za.s[x11, 2], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e742b // sdot za.s[x11, 3], { z1.h-z2.h }, z14.h\n"
- ".inst 0xa0412a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1697448 // sdot za.s[x11, 0], { z2.h-z3.h }, z9.h\n"
- ".inst 0xc1687449 // sdot za.s[x11, 1], { z2.h-z3.h }, z8.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f742c // sdot za.s[x11, 4], { z1.h-z2.h }, z15.h\n"
- ".inst 0xc16e742d // sdot za.s[x11, 5], { z1.h-z2.h }, z14.h\n"
- ".inst 0xc16b744a // sdot za.s[x11, 2], { z2.h-z3.h }, z11.h\n"
- ".inst 0xc16a744b // sdot za.s[x11, 3], { z2.h-z3.h }, z10.h\n"
- ".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc161744c // sdot za.s[x11, 4], { z2.h-z3.h }, z1.h\n"
- ".inst 0xc160744d // sdot za.s[x11, 5], { z2.h-z3.h }, z0.h\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z22.h, z0.h, z19.h\n"
+ "ld1b { z25.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z23.h, z4.h, z3.h\n"
+ "ld1b { z9.s }, p1/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xa0402ac4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x22]\n"
+ "add z22.h, z22.h, z15.h\n"
+ "trn1 z24.h, z17.h, z25.h\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
+ ".inst 0xa0412ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ "add z23.h, z23.h, z15.h\n"
+ ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ "trn1 z25.h, z9.h, z17.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16576c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc16576ca // sdot za.s[x11, 2], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476cb // sdot za.s[x11, 3], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa0402a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16776e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z7.h\n"
+ ".inst 0xc16676e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa0412aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16576cc // sdot za.s[x11, 4], { z22.h-z23.h }, z5.h\n"
+ ".inst 0xc16476cd // sdot za.s[x11, 5], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xc16776ea // sdot za.s[x11, 2], { z23.h-z24.h }, z7.h\n"
+ ".inst 0xc16676eb // sdot za.s[x11, 3], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa0412a84 // ld1h { z4.h-z5.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc1617708 // sdot za.s[x11, 0], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xc1607709 // sdot za.s[x11, 1], { z24.h-z25.h }, z0.h\n"
+ ".inst 0xa0422aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16576ec // sdot za.s[x11, 4], { z23.h-z24.h }, z5.h\n"
+ ".inst 0xc16476ed // sdot za.s[x11, 5], { z23.h-z24.h }, z4.h\n"
+ ".inst 0xc167770a // sdot za.s[x11, 2], { z24.h-z25.h }, z7.h\n"
+ ".inst 0xc166770b // sdot za.s[x11, 3], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1422a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d770c // sdot za.s[x11, 4], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc165770d // sdot za.s[x11, 5], { z24.h-z25.h }, z5.h\n"
"11:" // Unpadded: 1 priming loads
"add x24, x16, %x[ld_in_row]\n"
- "ld1b { z0.s }, p1/Z, [x16]\n"
+ "ld1b { z16.s }, p1/Z, [x16]\n"
"addvl x23, SP, #6\n"
- "ld1b { z3.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z28.h, z0.h, z3.h\n"
- "add z28.h, z28.h, z17.h\n"
- "ld1b { z6.s }, p1/Z, [x24]\n"
+ "ld1b { z22.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
"addvl x22, SP, #12\n"
- "ld1b { z30.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
- "trn1 z29.h, z6.h, z30.h\n"
- "add z29.h, z29.h, z17.h\n"
- "ld1b { z1.s }, p1/Z, [x24]\n"
+ "ld1b { z19.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
"ld1b { z25.s }, p1/Z, [x24]\n"
"add x24, x24, %x[ld_in_row]\n"
- "trn1 z30.h, z1.h, z25.h\n"
- "add z30.h, z30.h, z17.h\n"
- "ld1b { z3.s }, p1/Z, [x24]\n"
- "add x24, x24, %x[ld_in_row]\n"
"addvl x20, SP, #24\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
- ".inst 0xc1617788 // sdot za.s[x11, 0], { z28.h-z29.h }, z1.h\n"
+ "ld1b { z6.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z18.h, z16.h, z22.h\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z5.s }, p1/Z, [x24]\n"
- "trn1 z31.h, z3.h, z5.h\n"
- ".inst 0xc1607789 // sdot za.s[x11, 1], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16e778a // sdot za.s[x11, 2], { z28.h-z29.h }, z14.h\n"
- "add z31.h, z31.h, z17.h\n"
- ".inst 0xa1412ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc166778b // sdot za.s[x11, 3], { z28.h-z29.h }, z6.h\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16a77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z10.h\n"
- ".inst 0xc16277a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z2.h\n"
- ".inst 0xa0412ac8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xa1422ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16f778c // sdot za.s[x11, 4], { z28.h-z29.h }, z15.h\n"
- ".inst 0xc16e778d // sdot za.s[x11, 5], { z28.h-z29.h }, z14.h\n"
- ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16977aa // sdot za.s[x11, 2], { z29.h-z30.h }, z9.h\n"
- ".inst 0xc16877ab // sdot za.s[x11, 3], { z29.h-z30.h }, z8.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a77c8 // sdot za.s[x11, 0], { z30.h-z31.h }, z10.h\n"
- ".inst 0xc16277c9 // sdot za.s[x11, 1], { z30.h-z31.h }, z2.h\n"
- ".inst 0xa1422ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16e778e // sdot za.s[x11, 6], { z28.h-z29.h }, z14.h\n"
- ".inst 0xc166778f // sdot za.s[x11, 7], { z28.h-z29.h }, z6.h\n"
- ".inst 0xc16d77ac // sdot za.s[x11, 4], { z29.h-z30.h }, z13.h\n"
- ".inst 0xc16577ad // sdot za.s[x11, 5], { z29.h-z30.h }, z5.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a77ca // sdot za.s[x11, 2], { z30.h-z31.h }, z10.h\n"
- ".inst 0xc16277cb // sdot za.s[x11, 3], { z30.h-z31.h }, z2.h\n"
- ".inst 0xa0422aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e77ae // sdot za.s[x11, 6], { z29.h-z30.h }, z14.h\n"
- ".inst 0xc16677af // sdot za.s[x11, 7], { z29.h-z30.h }, z6.h\n"
- ".inst 0xc16977cc // sdot za.s[x11, 4], { z30.h-z31.h }, z9.h\n"
- ".inst 0xc16877cd // sdot za.s[x11, 5], { z30.h-z31.h }, z8.h\n"
- ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16e77ce // sdot za.s[x11, 6], { z30.h-z31.h }, z14.h\n"
- ".inst 0xc16677cf // sdot za.s[x11, 7], { z30.h-z31.h }, z6.h\n"
+ "ld1b { z4.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z19.h, z19.h, z25.h\n"
+ "ld1b { z27.s }, p1/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ ".inst 0xa1402ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23]\n"
+ "add z18.h, z18.h, z15.h\n"
+ "trn1 z20.h, z6.h, z4.h\n"
+ "ld1b { z22.s }, p1/Z, [x24]\n"
+ ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "add z19.h, z19.h, z15.h\n"
+ ".inst 0xa1422ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "trn1 z21.h, z27.h, z22.h\n"
+ "add z20.h, z20.h, z15.h\n"
+ ".inst 0xc1697648 // sdot za.s[x11, 0], { z18.h-z19.h }, z9.h\n"
+ ".inst 0xc1617649 // sdot za.s[x11, 1], { z18.h-z19.h }, z1.h\n"
+ ".inst 0xa1402ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22]\n"
+ "add z21.h, z21.h, z15.h\n"
+ ".inst 0xc16c764a // sdot za.s[x11, 2], { z18.h-z19.h }, z12.h\n"
+ ".inst 0xc164764b // sdot za.s[x11, 3], { z18.h-z19.h }, z4.h\n"
+ ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16d7668 // sdot za.s[x11, 0], { z19.h-z20.h }, z13.h\n"
+ ".inst 0xc1657669 // sdot za.s[x11, 1], { z19.h-z20.h }, z5.h\n"
+ ".inst 0xa1412ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163764c // sdot za.s[x11, 4], { z18.h-z19.h }, z3.h\n"
+ ".inst 0xc162764d // sdot za.s[x11, 5], { z18.h-z19.h }, z2.h\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16c766a // sdot za.s[x11, 2], { z19.h-z20.h }, z12.h\n"
+ ".inst 0xc164766b // sdot za.s[x11, 3], { z19.h-z20.h }, z4.h\n"
+ ".inst 0xa0412aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc16e7688 // sdot za.s[x11, 0], { z20.h-z21.h }, z14.h\n"
+ ".inst 0xc1667689 // sdot za.s[x11, 1], { z20.h-z21.h }, z6.h\n"
+ ".inst 0xa1422ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc169764e // sdot za.s[x11, 6], { z18.h-z19.h }, z9.h\n"
+ ".inst 0xc161764f // sdot za.s[x11, 7], { z18.h-z19.h }, z1.h\n"
+ ".inst 0xc163766c // sdot za.s[x11, 4], { z19.h-z20.h }, z3.h\n"
+ ".inst 0xc162766d // sdot za.s[x11, 5], { z19.h-z20.h }, z2.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16c768a // sdot za.s[x11, 2], { z20.h-z21.h }, z12.h\n"
+ ".inst 0xc164768b // sdot za.s[x11, 3], { z20.h-z21.h }, z4.h\n"
+ ".inst 0xa1422aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc169766e // sdot za.s[x11, 6], { z19.h-z20.h }, z9.h\n"
+ ".inst 0xc161766f // sdot za.s[x11, 7], { z19.h-z20.h }, z1.h\n"
+ ".inst 0xc16c768c // sdot za.s[x11, 4], { z20.h-z21.h }, z12.h\n"
+ ".inst 0xc164768d // sdot za.s[x11, 5], { z20.h-z21.h }, z4.h\n"
+ ".inst 0xa0422a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16d768e // sdot za.s[x11, 6], { z20.h-z21.h }, z13.h\n"
+ ".inst 0xc16c768f // sdot za.s[x11, 7], { z20.h-z21.h }, z12.h\n"
"12:" // Unpadded: 0 priming loads
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 22f\n"
"add x20, x16, %x[ld_in_row]\n"
- "ld1b { z26.s }, p1/Z, [x16]\n"
+ "ld1b { z6.s }, p1/Z, [x16]\n"
"sub x25, x25, #0x1\n"
- "ld1b { z28.s }, p1/Z, [x20]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z26.h, z28.h\n"
"sub x15, x15, #0x1\n"
- "ld1b { z31.s }, p1/Z, [x20]\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"cmp x25, x15\n"
- "add z25.h, z25.h, z17.h\n"
- "ld1b { z15.s }, p1/Z, [x20]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z31.h, z15.h\n"
"csel x25, x25, x15, LT\n"
- "ld1b { z22.s }, p1/Z, [x20]\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z26.h, z26.h, z17.h\n"
+ "trn1 z24.h, z6.h, z13.h\n"
"add x16, x16, %x[ld_in_col]\n"
- "ld1b { z8.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z27.h, z22.h, z8.h\n"
- "add z27.h, z27.h, z17.h\n"
- "ld1b { z21.s }, p1/Z, [x20]\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
"sub x15, x15, x25\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
- "trn1 z28.h, z21.h, z20.h\n"
- "add z28.h, z28.h, z17.h\n"
+ "ld1b { z22.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ "trn1 z25.h, z21.h, z19.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ "trn1 z26.h, z20.h, z13.h\n"
+ "add z24.h, z24.h, z15.h\n"
+ "trn1 z27.h, z22.h, z16.h\n"
+ "add z25.h, z25.h, z15.h\n"
+ "add z26.h, z26.h, z15.h\n"
+ "add z27.h, z27.h, z15.h\n"
"cbz x25, 21f\n"
"13:" // Unpadded: Main loop
"addvl x24, SP, #6\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
"addvl x23, SP, #12\n"
- "ld1b { z21.s }, p1/Z, [x16]\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402b0e // ld1h { z14.h-z15.h }, pn10.b/Z, [x24]\n"
+ "ld1b { z23.s }, p1/Z, [x16]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
"addvl x22, SP, #18\n"
"addvl x21, SP, #24\n"
- ".inst 0xc16f772a // sdot za.s[x11, 2], { z25.h-z26.h }, z15.h\n"
"add x20, x16, %x[ld_in_row]\n"
- "ld1b { z0.s }, p1/Z, [x20]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16e772b // sdot za.s[x11, 3], { z25.h-z26.h }, z14.h\n"
- ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"subs x25, x25, #0x1\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- "ld1b { z20.s }, p1/Z, [x20]\n"
+ "ld1b { z19.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412b05 // ld1h { z5.h, z13.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
- ".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
- "ld1b { z31.s }, p1/Z, [x20]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc169770a // sdot za.s[x11, 2], { z24.h-z25.h }, z9.h\n"
+ "ld1b { z21.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
- "ld1b { z29.s }, p1/Z, [x20]\n"
+ ".inst 0xc161770b // sdot za.s[x11, 3], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ "ld1b { z20.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc165774b // sdot za.s[x11, 3], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412b04 // ld1h { z4.h, z12.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
"ld1b { z22.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422b02 // ld1h { z2.h, z10.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc16f772e // sdot za.s[x11, 6], { z25.h-z26.h }, z15.h\n"
- "ld1b { z30.s }, p1/Z, [x20]\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1402ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc16c772a // sdot za.s[x11, 2], { z25.h-z26.h }, z12.h\n"
+ "ld1b { z18.s }, p1/Z, [x20]\n"
+ "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc164772b // sdot za.s[x11, 3], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc16e772f // sdot za.s[x11, 7], { z25.h-z26.h }, z14.h\n"
- ".inst 0xa0402aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16d774c // sdot za.s[x11, 4], { z26.h-z27.h }, z13.h\n"
- "ld1b { z6.s }, p1/Z, [x20]\n"
- ".inst 0xc165774d // sdot za.s[x11, 5], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16a776a // sdot za.s[x11, 2], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776b // sdot za.s[x11, 3], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ae2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422b02 // ld1h { z2.h-z3.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc16d770e // sdot za.s[x11, 6], { z24.h-z25.h }, z13.h\n"
+ "ld1b { z16.s }, p1/Z, [x20]\n"
+ ".inst 0xc165770f // sdot za.s[x11, 7], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16a776c // sdot za.s[x11, 4], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776d // sdot za.s[x11, 5], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc169776e // sdot za.s[x11, 6], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776f // sdot za.s[x11, 7], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422aaa // ld1h { z10.h-z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z15.h\n"
- ".inst 0xc16e1729 // sdot za.s[x8, 1], { z25.h-z26.h }, z14.h\n"
- "trn1 z25.h, z21.h, z0.h\n"
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xc16d1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z13.h\n"
- "add z25.h, z25.h, z17.h\n"
- ".inst 0xc1651749 // sdot za.s[x8, 1], { z26.h-z27.h }, z5.h\n"
- "trn1 z26.h, z20.h, z31.h\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- ".inst 0xc16b1768 // sdot za.s[x8, 0], { z27.h-z28.h }, z11.h\n"
- "add z26.h, z26.h, z17.h\n"
- ".inst 0xc16a1769 // sdot za.s[x8, 1], { z27.h-z28.h }, z10.h\n"
- "trn1 z27.h, z29.h, z22.h\n"
- "trn1 z28.h, z30.h, z6.h\n"
+ ".inst 0xc161774c // sdot za.s[x11, 4], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc160774d // sdot za.s[x11, 5], { z26.h-z27.h }, z0.h\n"
+ ".inst 0xa0422ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc167774e // sdot za.s[x11, 6], { z26.h-z27.h }, z7.h\n"
+ ".inst 0xc166774f // sdot za.s[x11, 7], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa1422aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16c1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc1641709 // sdot za.s[x8, 1], { z24.h-z25.h }, z4.h\n"
+ "trn1 z24.h, z23.h, z19.h\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc16d1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1651729 // sdot za.s[x8, 1], { z25.h-z26.h }, z5.h\n"
+ "trn1 z25.h, z21.h, z20.h\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "add z24.h, z24.h, z15.h\n"
+ ".inst 0xc16e1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z14.h\n"
+ ".inst 0xc1661749 // sdot za.s[x8, 1], { z26.h-z27.h }, z6.h\n"
+ "trn1 z26.h, z22.h, z18.h\n"
+ "trn1 z27.h, z17.h, z16.h\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "add z27.h, z27.h, z17.h\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ "add z25.h, z25.h, z15.h\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ "add z26.h, z26.h, z15.h\n"
+ "add z27.h, z27.h, z15.h\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "add z28.h, z28.h, z17.h\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 13b\n"
"b 21f\n"
@@ -667,513 +672,513 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z9.s }, p0/Z, [x16]\n"
- "add z9.h, p0/M, z9.h, z17.h\n"
"add x21, x16, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xa0412a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
+ "ld1b { z26.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z9.h, z22.h\n"
- "trn1 z0.h, z21.h, z20.h\n"
+ "add z26.h, p0/M, z26.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x21]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
+ "ld1b { z18.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z26.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
+ ".inst 0xc16e76c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z14.h\n"
+ "ld1b { z25.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ ".inst 0xc16676c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z6.h\n"
+ "add z25.h, p0/M, z25.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x21]\n"
- "addvl x20, SP, #24\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1b { z1.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z25.h\n"
+ "add z1.h, p0/M, z1.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa1402a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20]\n"
- "trn1 z1.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16a77e8 // sdot za.s[x11, 0], { z31.h-z0.h }, z10.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc16277e9 // sdot za.s[x11, 1], { z31.h-z0.h }, z2.h\n"
- ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- "trn1 z2.h, z21.h, z20.h\n"
- ".inst 0xc16d7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z13.h\n"
- ".inst 0xa0422a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1657409 // sdot za.s[x11, 1], { z0.h-z1.h }, z5.h\n"
- ".inst 0xc1697428 // sdot za.s[x11, 0], { z1.h-z2.h }, z9.h\n"
- ".inst 0xc1687429 // sdot za.s[x11, 1], { z1.h-z2.h }, z8.h\n"
+ ".inst 0xc16d76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z13.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ ".inst 0xc16c76e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ "trn1 z25.h, z1.h, z16.h\n"
+ ".inst 0xc1637708 // sdot za.s[x11, 0], { z24.h-z25.h }, z3.h\n"
+ ".inst 0xc1627709 // sdot za.s[x11, 1], { z24.h-z25.h }, z2.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z5.s }, p0/Z, [x16]\n"
- "add z5.h, p0/M, z5.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z0.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z28.h, z5.h, z22.h\n"
- "trn1 z29.h, z21.h, z20.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z1.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e7408 // sdot za.s[x11, 0], { z0.h-z1.h }, z14.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1667409 // sdot za.s[x11, 1], { z0.h-z1.h }, z6.h\n"
+ ".inst 0xa1402a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e740a // sdot za.s[x11, 2], { z0.h-z1.h }, z14.h\n"
+ ".inst 0xc166740b // sdot za.s[x11, 3], { z0.h-z1.h }, z6.h\n"
+ "ld1b { z0.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z2.h, z18.h, z17.h\n"
+ "add z0.h, p0/M, z0.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- "trn1 z30.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #24\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc1617788 // sdot za.s[x11, 0], { z28.h-z29.h }, z1.h\n"
- ".inst 0xc1607789 // sdot za.s[x11, 1], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- "trn1 z31.h, z21.h, z20.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xa0412aae // ld1h { z14.h-z15.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc169778a // sdot za.s[x11, 2], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc161778b // sdot za.s[x11, 3], { z28.h-z29.h }, z1.h\n"
- ".inst 0xa1422aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16f77a8 // sdot za.s[x11, 0], { z29.h-z30.h }, z15.h\n"
- ".inst 0xc16e77a9 // sdot za.s[x11, 1], { z29.h-z30.h }, z14.h\n"
- ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16977aa // sdot za.s[x11, 2], { z29.h-z30.h }, z9.h\n"
- ".inst 0xc16177ab // sdot za.s[x11, 3], { z29.h-z30.h }, z1.h\n"
- ".inst 0xc16b77c8 // sdot za.s[x11, 0], { z30.h-z31.h }, z11.h\n"
- ".inst 0xc16377c9 // sdot za.s[x11, 1], { z30.h-z31.h }, z3.h\n"
- ".inst 0xa0422a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f77ca // sdot za.s[x11, 2], { z30.h-z31.h }, z15.h\n"
- ".inst 0xc16e77cb // sdot za.s[x11, 3], { z30.h-z31.h }, z14.h\n"
+ ".inst 0xc16c7428 // sdot za.s[x11, 0], { z1.h-z2.h }, z12.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ ".inst 0xc1647429 // sdot za.s[x11, 1], { z1.h-z2.h }, z4.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
+ ".inst 0xc16e742a // sdot za.s[x11, 2], { z1.h-z2.h }, z14.h\n"
+ ".inst 0xc166742b // sdot za.s[x11, 3], { z1.h-z2.h }, z6.h\n"
+ "trn1 z3.h, z0.h, z17.h\n"
+ ".inst 0xc16d7448 // sdot za.s[x11, 0], { z2.h-z3.h }, z13.h\n"
+ ".inst 0xc1657449 // sdot za.s[x11, 1], { z2.h-z3.h }, z5.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16e744a // sdot za.s[x11, 2], { z2.h-z3.h }, z14.h\n"
+ ".inst 0xc166744b // sdot za.s[x11, 3], { z2.h-z3.h }, z6.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x23, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x16]\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x22, SP, #12\n"
+ ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
+ "addvl x21, SP, #18\n"
+ "addvl x20, SP, #24\n"
+ ".inst 0xa1412ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z22.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z8.h, z29.h, z22.h\n"
- "trn1 z9.h, z21.h, z20.h\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z23.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16376c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z3.h\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xc16276c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z2.h\n"
+ ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cb // sdot za.s[x11, 3], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z17.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ ".inst 0xc16976cc // sdot za.s[x11, 4], { z22.h-z23.h }, z9.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402ace // ld1h { z14.h-z15.h }, pn10.b/Z, [x22]\n"
- "trn1 z10.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "addvl x21, SP, #18\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16f7508 // sdot za.s[x11, 0], { z8.h-z9.h }, z15.h\n"
- ".inst 0xc16e7509 // sdot za.s[x11, 1], { z8.h-z9.h }, z14.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- "addvl x20, SP, #24\n"
- "trn1 z11.h, z21.h, z20.h\n"
- ".inst 0xa1412ac5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16e750a // sdot za.s[x11, 2], { z8.h-z9.h }, z14.h\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc166750b // sdot za.s[x11, 3], { z8.h-z9.h }, z6.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16d7528 // sdot za.s[x11, 0], { z9.h-z10.h }, z13.h\n"
- ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc1657529 // sdot za.s[x11, 1], { z9.h-z10.h }, z5.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16f750c // sdot za.s[x11, 4], { z8.h-z9.h }, z15.h\n"
- ".inst 0xc16e750d // sdot za.s[x11, 5], { z8.h-z9.h }, z14.h\n"
- ".inst 0xc16d752a // sdot za.s[x11, 2], { z9.h-z10.h }, z13.h\n"
- ".inst 0xc165752b // sdot za.s[x11, 3], { z9.h-z10.h }, z5.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc1617548 // sdot za.s[x11, 0], { z10.h-z11.h }, z1.h\n"
- ".inst 0xc1607549 // sdot za.s[x11, 1], { z10.h-z11.h }, z0.h\n"
- ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e752c // sdot za.s[x11, 4], { z9.h-z10.h }, z14.h\n"
- ".inst 0xc166752d // sdot za.s[x11, 5], { z9.h-z10.h }, z6.h\n"
- ".inst 0xc161754a // sdot za.s[x11, 2], { z10.h-z11.h }, z1.h\n"
- ".inst 0xc160754b // sdot za.s[x11, 3], { z10.h-z11.h }, z0.h\n"
- ".inst 0xa0422a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f754c // sdot za.s[x11, 4], { z10.h-z11.h }, z15.h\n"
- ".inst 0xc16e754d // sdot za.s[x11, 5], { z10.h-z11.h }, z14.h\n"
+ ".inst 0xc16176cd // sdot za.s[x11, 5], { z22.h-z23.h }, z1.h\n"
+ ".inst 0xc16c76e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z12.h\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ ".inst 0xc16476e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z4.h\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0xc16e76ea // sdot za.s[x11, 2], { z23.h-z24.h }, z14.h\n"
+ ".inst 0xc16676eb // sdot za.s[x11, 3], { z23.h-z24.h }, z6.h\n"
+ ".inst 0xa1412a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ ".inst 0xc16976ec // sdot za.s[x11, 4], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176ed // sdot za.s[x11, 5], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xc16d7708 // sdot za.s[x11, 0], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc1657709 // sdot za.s[x11, 1], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa0422aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc165770a // sdot za.s[x11, 2], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc164770b // sdot za.s[x11, 3], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1422a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x24, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z1.s }, p0/Z, [x16]\n"
- "add z1.h, p0/M, z1.h, z17.h\n"
- "add x20, x16, %x[ld_in_row]\n"
+ "addvl x23, SP, #6\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ "addvl x22, SP, #12\n"
+ "addvl x21, SP, #18\n"
+ ".inst 0xa0412ae4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ "addvl x20, SP, #24\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xa1422ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z21.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z1.h, z22.h\n"
- "trn1 z27.h, z21.h, z20.h\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z18.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z22.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16e76a8 // sdot za.s[x11, 0], { z21.h-z22.h }, z14.h\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ ".inst 0xc16676a9 // sdot za.s[x11, 1], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402ac6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x22]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "addvl x23, SP, #6\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc16776aa // sdot za.s[x11, 2], { z21.h-z22.h }, z7.h\n"
+ ".inst 0xc16676ab // sdot za.s[x11, 3], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402aa6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z17.s }, p0/Z, [x24]\n"
+ "add x24, x24, %x[ld_in_row]\n"
+ "trn1 z23.h, z18.h, z16.h\n"
+ ".inst 0xc16776ac // sdot za.s[x11, 4], { z21.h-z22.h }, z7.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xa0402aee // ld1h { z14.h-z15.h }, pn10.b/Z, [x23]\n"
- "trn1 z28.h, z22.h, z20.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
- "addvl x22, SP, #12\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- ".inst 0xc16f7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z15.h\n"
- ".inst 0xc16e7749 // sdot za.s[x11, 1], { z26.h-z27.h }, z14.h\n"
- ".inst 0xa0402ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22]\n"
- "addvl x21, SP, #18\n"
- "trn1 z29.h, z21.h, z20.h\n"
- ".inst 0xa0412aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xc161774a // sdot za.s[x11, 2], { z26.h-z27.h }, z1.h\n"
- "addvl x20, SP, #24\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc160774b // sdot za.s[x11, 3], { z26.h-z27.h }, z0.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc16b7768 // sdot za.s[x11, 0], { z27.h-z28.h }, z11.h\n"
- ".inst 0xa0422ae8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xc16a7769 // sdot za.s[x11, 1], { z27.h-z28.h }, z10.h\n"
- ".inst 0xa0412aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc16e774c // sdot za.s[x11, 4], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc166774d // sdot za.s[x11, 5], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc16b776a // sdot za.s[x11, 2], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a776b // sdot za.s[x11, 3], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xc16676ad // sdot za.s[x11, 5], { z21.h-z22.h }, z6.h\n"
+ ".inst 0xa0402a8c // ld1h { z12.h-z13.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc16576c8 // sdot za.s[x11, 0], { z22.h-z23.h }, z5.h\n"
+ "ld1b { z16.s }, p0/Z, [x24]\n"
+ ".inst 0xc16476c9 // sdot za.s[x11, 1], { z22.h-z23.h }, z4.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc16d76ae // sdot za.s[x11, 6], { z21.h-z22.h }, z13.h\n"
+ ".inst 0xc16c76af // sdot za.s[x11, 7], { z21.h-z22.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0xc16e76ca // sdot za.s[x11, 2], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cb // sdot za.s[x11, 3], { z22.h-z23.h }, z6.h\n"
".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc1697788 // sdot za.s[x11, 0], { z28.h-z29.h }, z9.h\n"
- ".inst 0xc1687789 // sdot za.s[x11, 1], { z28.h-z29.h }, z8.h\n"
- ".inst 0xa1422ac2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
- ".inst 0xc16e776c // sdot za.s[x11, 4], { z27.h-z28.h }, z14.h\n"
- ".inst 0xc166776d // sdot za.s[x11, 5], { z27.h-z28.h }, z6.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc16a778a // sdot za.s[x11, 2], { z28.h-z29.h }, z10.h\n"
- ".inst 0xc162778b // sdot za.s[x11, 3], { z28.h-z29.h }, z2.h\n"
- ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16e776e // sdot za.s[x11, 6], { z27.h-z28.h }, z14.h\n"
- ".inst 0xc166776f // sdot za.s[x11, 7], { z27.h-z28.h }, z6.h\n"
- ".inst 0xc161778c // sdot za.s[x11, 4], { z28.h-z29.h }, z1.h\n"
- ".inst 0xc160778d // sdot za.s[x11, 5], { z28.h-z29.h }, z0.h\n"
- ".inst 0xa1422a82 // ld1h { z2.h, z10.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16a778e // sdot za.s[x11, 6], { z28.h-z29.h }, z10.h\n"
- ".inst 0xc162778f // sdot za.s[x11, 7], { z28.h-z29.h }, z2.h\n"
+ "trn1 z24.h, z17.h, z16.h\n"
+ ".inst 0xc16e76cc // sdot za.s[x11, 4], { z22.h-z23.h }, z14.h\n"
+ ".inst 0xc16676cd // sdot za.s[x11, 5], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xa0412a86 // ld1h { z6.h-z7.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16976e8 // sdot za.s[x11, 0], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176e9 // sdot za.s[x11, 1], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
+ ".inst 0xc16776ce // sdot za.s[x11, 6], { z22.h-z23.h }, z7.h\n"
+ ".inst 0xc16676cf // sdot za.s[x11, 7], { z22.h-z23.h }, z6.h\n"
+ ".inst 0xc16176ea // sdot za.s[x11, 2], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xc16076eb // sdot za.s[x11, 3], { z23.h-z24.h }, z0.h\n"
+ ".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc16976ec // sdot za.s[x11, 4], { z23.h-z24.h }, z9.h\n"
+ ".inst 0xc16176ed // sdot za.s[x11, 5], { z23.h-z24.h }, z1.h\n"
+ ".inst 0xa1422a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ ".inst 0xc16c76ee // sdot za.s[x11, 6], { z23.h-z24.h }, z12.h\n"
+ ".inst 0xc16476ef // sdot za.s[x11, 7], { z23.h-z24.h }, z4.h\n"
"19:" // Padded: 0 priming loads
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
"cbz x25, 22f\n"
"mov x12, #0x0\n"
- ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z6.s }, p0/Z, [x16]\n"
- "add z6.h, p0/M, z6.h, z17.h\n"
"add x20, x16, %x[ld_in_row]\n"
+ ".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
+ "sub x25, x25, #0x1\n"
+ "sub x15, x15, #0x1\n"
+ "cmp x25, x15\n"
+ "ld1b { z18.s }, p0/Z, [x16]\n"
+ "csel x25, x25, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "sub x15, x15, x25\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z30.s }, p0/Z, [x20]\n"
- "add z30.h, p0/M, z30.h, z17.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z17.h\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z24.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z17.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z6.h, z30.h\n"
- "trn1 z26.h, z27.h, z26.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z17.h\n"
+ "ld1b { z18.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z25.h, z17.h, z16.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z9.s }, p0/Z, [x20]\n"
- "add z9.h, p0/M, z9.h, z17.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z21.s }, p0/Z, [x20]\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z26.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- "sub x25, x25, #0x1\n"
- "sub x15, x15, #0x1\n"
- "cmp x25, x15\n"
- "trn1 z27.h, z8.h, z9.h\n"
- "trn1 z28.h, z21.h, z29.h\n"
- "csel x25, x25, x15, LT\n"
- "add x16, x16, %x[ld_in_col]\n"
- "sub x15, x15, x25\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ "trn1 z27.h, z17.h, z16.h\n"
"cbz x25, 21f\n"
"20:" // Padded: Main loop
"mov x12, #0x0\n"
+ "addvl x24, SP, #6\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z8.s }, p0/Z, [x16]\n"
- "add z8.h, p0/M, z8.h, z17.h\n"
- "add x24, x16, %x[ld_in_row]\n"
- ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z21.s }, p0/Z, [x24]\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
- "addvl x23, SP, #6\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
- "addvl x22, SP, #12\n"
- "add z21.h, p0/M, z21.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
- ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402b05 // ld1h { z5.h, z13.h }, pn10.b/Z, [x24]\n"
+ "addvl x23, SP, #12\n"
+ "add x22, x16, %x[ld_in_row]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- "ld1b { z29.s }, p0/Z, [x24]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- "add z29.h, p0/M, z29.h, z17.h\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
- "mov x12, #0x4\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "subs x25, x25, #0x1\n"
+ "ld1b { z16.s }, p0/Z, [x16]\n"
+ ".inst 0xc16d770a // sdot za.s[x11, 2], { z24.h-z25.h }, z13.h\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc165770b // sdot za.s[x11, 3], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
+ ".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa0402aa4 // ld1h { z4.h-z5.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z19.s }, p0/Z, [x22]\n"
+ ".inst 0xc169772a // sdot za.s[x11, 2], { z25.h-z26.h }, z9.h\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc161772b // sdot za.s[x11, 3], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xa1412ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ "add z19.h, p0/M, z19.h, z15.h\n"
+ ".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422b02 // ld1h { z2.h-z3.h }, pn10.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc165770e // sdot za.s[x11, 6], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc164770f // sdot za.s[x11, 7], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402a84 // ld1h { z4.h, z12.h }, pn10.b/Z, [x20]\n"
".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
- "ld1b { z30.s }, p0/Z, [x24]\n"
- "add z30.h, p0/M, z30.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
+ "ld1b { z23.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae6 // ld1h { z6.h-z7.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ "add z23.h, p0/M, z23.h, z15.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0xc16d772e // sdot za.s[x11, 6], { z25.h-z26.h }, z13.h\n"
+ "mov x12, #0x4\n"
+ ".inst 0xc165772f // sdot za.s[x11, 7], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc167774c // sdot za.s[x11, 4], { z26.h-z27.h }, z7.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc166774d // sdot za.s[x11, 5], { z26.h-z27.h }, z6.h\n"
+ ".inst 0xa0422aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "subs x25, x25, #0x1\n"
- ".inst 0xc16d774a // sdot za.s[x11, 2], { z26.h-z27.h }, z13.h\n"
- "ld1b { z15.s }, p0/Z, [x24]\n"
- "add z15.h, p0/M, z15.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc165774b // sdot za.s[x11, 3], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa0412aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc163774e // sdot za.s[x11, 6], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774f // sdot za.s[x11, 7], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16c1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z12.h\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "add x16, x16, %x[ld_in_col]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
- "ld1b { z20.s }, p0/Z, [x24]\n"
- "add z20.h, p0/M, z20.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc1641709 // sdot za.s[x8, 1], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402be1 // ld1h { z1.h, z9.h }, pn10.b/Z, [SP]\n"
+ "trn1 z24.h, z16.h, z19.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc16d1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc1651729 // sdot za.s[x8, 1], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412be4 // ld1h { z4.h, z12.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
+ "trn1 z25.h, z23.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z15.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
- "ld1b { z31.s }, p0/Z, [x24]\n"
- "add z31.h, p0/M, z31.h, z17.h\n"
- "add x24, x24, %x[ld_in_row]\n"
- ".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- ".inst 0xc16b774c // sdot za.s[x11, 4], { z26.h-z27.h }, z11.h\n"
- "ld1b { z22.s }, p0/Z, [x24]\n"
- "add z22.h, p0/M, z22.h, z17.h\n"
- ".inst 0xc16a774d // sdot za.s[x11, 5], { z26.h-z27.h }, z10.h\n"
- ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc169776a // sdot za.s[x11, 2], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776b // sdot za.s[x11, 3], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422ac0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16e774e // sdot za.s[x11, 6], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc166774f // sdot za.s[x11, 7], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc161776c // sdot za.s[x11, 4], { z27.h-z28.h }, z1.h\n"
- ".inst 0xc160776d // sdot za.s[x11, 5], { z27.h-z28.h }, z0.h\n"
- ".inst 0xa1422aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc169776e // sdot za.s[x11, 6], { z27.h-z28.h }, z9.h\n"
- ".inst 0xc161776f // sdot za.s[x11, 7], { z27.h-z28.h }, z1.h\n"
- ".inst 0xa0422a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc1631728 // sdot za.s[x8, 0], { z25.h-z26.h }, z3.h\n"
- ".inst 0xc1621729 // sdot za.s[x8, 1], { z25.h-z26.h }, z2.h\n"
- ".inst 0xa0402be0 // ld1h { z0.h-z1.h }, pn10.b/Z, [SP]\n"
- "trn1 z25.h, z8.h, z21.h\n"
- ".inst 0xc16e1748 // sdot za.s[x8, 0], { z26.h-z27.h }, z14.h\n"
- ".inst 0xc1661749 // sdot za.s[x8, 1], { z26.h-z27.h }, z6.h\n"
- ".inst 0xa1412be5 // ld1h { z5.h, z13.h }, pn10.b/Z, [SP, #0x2, MUL VL]\n"
- "trn1 z26.h, z29.h, z30.h\n"
- ".inst 0xc16b1768 // sdot za.s[x8, 0], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a1769 // sdot za.s[x8, 1], { z27.h-z28.h }, z10.h\n"
+ ".inst 0xc1631748 // sdot za.s[x8, 0], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc1621749 // sdot za.s[x8, 1], { z26.h-z27.h }, z2.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
"add x8, x8, #0x2\n"
".inst 0xa0422be2 // ld1h { z2.h-z3.h }, pn10.b/Z, [SP, #0x4, MUL VL]\n"
- "trn1 z27.h, z15.h, z20.h\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
- "trn1 z28.h, z31.h, z22.h\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ "trn1 z26.h, z18.h, z16.h\n"
+ "add z17.h, p0/M, z17.h, z15.h\n"
+ ".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ "ld1b { z18.s }, p0/Z, [x22]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ "add z18.h, p0/M, z18.h, z15.h\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ "trn1 z27.h, z17.h, z18.h\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 20b\n"
"21:" // Main loop tail
"addvl x23, SP, #6\n"
- ".inst 0xc1617728 // sdot za.s[x11, 0], { z25.h-z26.h }, z1.h\n"
+ ".inst 0xc1697708 // sdot za.s[x11, 0], { z24.h-z25.h }, z9.h\n"
"addvl x22, SP, #12\n"
- ".inst 0xc1607729 // sdot za.s[x11, 1], { z25.h-z26.h }, z0.h\n"
- ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc1617709 // sdot za.s[x11, 1], { z24.h-z25.h }, z1.h\n"
+ ".inst 0xa1402ae6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #18\n"
"addvl x20, SP, #24\n"
- ".inst 0xc161772a // sdot za.s[x11, 2], { z25.h-z26.h }, z1.h\n"
- ".inst 0xc160772b // sdot za.s[x11, 3], { z25.h-z26.h }, z0.h\n"
+ ".inst 0xc16e770a // sdot za.s[x11, 2], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770b // sdot za.s[x11, 3], { z24.h-z25.h }, z6.h\n"
".inst 0xa1402ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc16d7748 // sdot za.s[x11, 0], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc1657749 // sdot za.s[x11, 1], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa1412ae1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc16c7728 // sdot za.s[x11, 0], { z25.h-z26.h }, z12.h\n"
+ ".inst 0xc1647729 // sdot za.s[x11, 1], { z25.h-z26.h }, z4.h\n"
+ ".inst 0xa1412ae5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xc16e770c // sdot za.s[x11, 4], { z24.h-z25.h }, z14.h\n"
+ ".inst 0xc166770d // sdot za.s[x11, 5], { z24.h-z25.h }, z6.h\n"
+ ".inst 0xa1402aa4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc16d772a // sdot za.s[x11, 2], { z25.h-z26.h }, z13.h\n"
+ ".inst 0xc165772b // sdot za.s[x11, 3], { z25.h-z26.h }, z5.h\n"
+ ".inst 0xa1412ac6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
+ ".inst 0xc1637748 // sdot za.s[x11, 0], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc1627749 // sdot za.s[x11, 1], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa0422ae2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xc16c770e // sdot za.s[x11, 6], { z24.h-z25.h }, z12.h\n"
+ ".inst 0xc164770f // sdot za.s[x11, 7], { z24.h-z25.h }, z4.h\n"
+ ".inst 0xa1402a85 // ld1h { z5.h, z13.h }, pn10.b/Z, [x20]\n"
".inst 0xc16e772c // sdot za.s[x11, 4], { z25.h-z26.h }, z14.h\n"
".inst 0xc166772d // sdot za.s[x11, 5], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa1402aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc169774a // sdot za.s[x11, 2], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc161774b // sdot za.s[x11, 3], { z26.h-z27.h }, z1.h\n"
- ".inst 0xa1412ac1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x22, #0x2, MUL VL]\n"
- ".inst 0xc1637768 // sdot za.s[x11, 0], { z27.h-z28.h }, z3.h\n"
- ".inst 0xc1627769 // sdot za.s[x11, 1], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa0422aea // ld1h { z10.h-z11.h }, pn10.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1412aa6 // ld1h { z6.h, z14.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
+ ".inst 0xc163774a // sdot za.s[x11, 2], { z26.h-z27.h }, z3.h\n"
+ ".inst 0xc162774b // sdot za.s[x11, 3], { z26.h-z27.h }, z2.h\n"
+ ".inst 0xa1422ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
".inst 0xc16e772e // sdot za.s[x11, 6], { z25.h-z26.h }, z14.h\n"
".inst 0xc166772f // sdot za.s[x11, 7], { z25.h-z26.h }, z6.h\n"
- ".inst 0xa0402a8e // ld1h { z14.h-z15.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc169774c // sdot za.s[x11, 4], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc161774d // sdot za.s[x11, 5], { z26.h-z27.h }, z1.h\n"
- ".inst 0xa1412aa5 // ld1h { z5.h, z13.h }, pn10.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xc16b776a // sdot za.s[x11, 2], { z27.h-z28.h }, z11.h\n"
- ".inst 0xc16a776b // sdot za.s[x11, 3], { z27.h-z28.h }, z10.h\n"
- ".inst 0xa0422ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22, #0x4, MUL VL]\n"
- ".inst 0xc16d774e // sdot za.s[x11, 6], { z26.h-z27.h }, z13.h\n"
- ".inst 0xc165774f // sdot za.s[x11, 7], { z26.h-z27.h }, z5.h\n"
- ".inst 0xa0412a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
- ".inst 0xc163776c // sdot za.s[x11, 4], { z27.h-z28.h }, z3.h\n"
- ".inst 0xc162776d // sdot za.s[x11, 5], { z27.h-z28.h }, z2.h\n"
- ".inst 0xa1422aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xc16a776e // sdot za.s[x11, 6], { z27.h-z28.h }, z10.h\n"
- ".inst 0xc162776f // sdot za.s[x11, 7], { z27.h-z28.h }, z2.h\n"
+ ".inst 0xa1412a86 // ld1h { z6.h, z14.h }, pn10.b/Z, [x20, #0x2, MUL VL]\n"
+ ".inst 0xc16c774c // sdot za.s[x11, 4], { z26.h-z27.h }, z12.h\n"
+ ".inst 0xc164774d // sdot za.s[x11, 5], { z26.h-z27.h }, z4.h\n"
+ ".inst 0xa0422aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21, #0x4, MUL VL]\n"
+ ".inst 0xc161774e // sdot za.s[x11, 6], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc160774f // sdot za.s[x11, 7], { z26.h-z27.h }, z0.h\n"
".inst 0xa0422a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20, #0x4, MUL VL]\n"
- ".inst 0xc16f1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z15.h\n"
- ".inst 0xc16e1729 // sdot za.s[x8, 1], { z25.h-z26.h }, z14.h\n"
- ".inst 0xc1691748 // sdot za.s[x8, 0], { z26.h-z27.h }, z9.h\n"
- ".inst 0xc1681749 // sdot za.s[x8, 1], { z26.h-z27.h }, z8.h\n"
- ".inst 0xc1611768 // sdot za.s[x8, 0], { z27.h-z28.h }, z1.h\n"
- ".inst 0xc1601769 // sdot za.s[x8, 1], { z27.h-z28.h }, z0.h\n"
+ ".inst 0xc16d1708 // sdot za.s[x8, 0], { z24.h-z25.h }, z13.h\n"
+ ".inst 0xc1651709 // sdot za.s[x8, 1], { z24.h-z25.h }, z5.h\n"
+ ".inst 0xc16e1728 // sdot za.s[x8, 0], { z25.h-z26.h }, z14.h\n"
+ ".inst 0xc1661729 // sdot za.s[x8, 1], { z25.h-z26.h }, z6.h\n"
+ ".inst 0xc1611748 // sdot za.s[x8, 0], { z26.h-z27.h }, z1.h\n"
+ ".inst 0xc1601749 // sdot za.s[x8, 1], { z26.h-z27.h }, z0.h\n"
"add x8, x8, #0x2\n"
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc0066814 // mova { z20.d-z21.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066836 // mova { z22.d-z23.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z8.s\n"
+ ".inst 0xc1abaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
+ ".inst 0xc1aaab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1becff4 // sclamp { z20.s-z23.s }, z31.s, z30.s\n"
+ "st1b { z20.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z22.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z21.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z23.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"22:" // Main loop skip tail
"cbz x15, 24f\n"
"23:" // Right padding loop
- ".inst 0xc0066808 // mova { z8.d-z9.d }, za.d[x11, #0]\n"
+ ".inst 0xc0066818 // mova { z24.d-z25.d }, za.d[x11, #0]\n"
"add x8, x8, #0x2\n"
"subs x15, x15, #0x1\n"
- ".inst 0xc006682a // mova { z10.d-z11.d }, za.d[x11, #1]\n"
- ".inst 0xc1a7ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc006683a // mova { z26.d-z27.d }, za.d[x11, #1]\n"
"add x11, x11, #0x2\n"
- ".inst 0xc1a4aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z4.s\n"
- ".inst 0xc0040a40 // mova za.d[x8, #0], { z18.d-z19.d }\n"
- ".inst 0xc1acab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z12.s\n"
- ".inst 0xc0040a41 // mova za.d[x8, #1], { z18.d-z19.d }\n"
- ".inst 0xc1b0cf08 // sclamp { z8.s-z11.s }, z24.s, z16.s\n"
- "st1b { z8.s }, p1, [x14]\n"
+ ".inst 0xc0040b80 // mova za.d[x8, #0], { z28.d-z29.d }\n"
+ ".inst 0xc0040b81 // mova za.d[x8, #1], { z28.d-z29.d }\n"
+ ".inst 0xc1a8ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z8.s\n"
+ ".inst 0xc1abaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z11.s\n"
+ ".inst 0xc1aaab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
+ ".inst 0xc1becff8 // sclamp { z24.s-z27.s }, z31.s, z30.s\n"
+ "st1b { z24.s }, p1, [x14]\n"
"add x14, x14, x4\n"
- "st1b { z10.s }, p1, [x13]\n"
+ "st1b { z26.s }, p1, [x13]\n"
"add x13, x13, x10\n"
- "st1b { z9.s }, p1, [x9]\n"
+ "st1b { z25.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- "st1b { z11.s }, p1, [x28]\n"
+ "st1b { z27.s }, p1, [x28]\n"
"add x28, x28, x26\n"
"bgt 23b\n"
"24:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x17\n"
+ "whilelt p1.s, x17, x7\n"
"incw x20, ALL, MUL #16\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x17\n"
- "whilelt p1.s, x17, x7\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1192,6 +1197,8 @@ void sme2_u8s8u8q_planar_5x5_s1_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #30\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
index d8dc69127e..d4708f8916 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -69,194 +69,199 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "ldr x3, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "ldr x4, [%x[args], %[offsetof_Args_pad_bottom]]\n"
+ "mov x22, SP\n"
+ "mov x21, #0xb\n"
"ptrue p2.b\n"
- "mov x20, #0xb\n"
- "ldr x4, [%x[args], %[offsetof_Args_pad_top]]\n"
- "ld1rh { z7.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
- "sub x20, x20, x3\n"
+ "ldr x5, [%x[args], %[offsetof_Args_pad_top]]\n"
+ "sub x20, x22, #0x8\n"
+ "ldr x6, [%x[args], %[offsetof_Args_n_channels]]\n"
+ "ld1rh { z18.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_a_offset]]\n"
+ "and x20, x20, #-0x400\n"
".inst 0x25207812 // ptrue pn10.b\n"
- "ldr x5, [%x[args], %[offsetof_Args_n_channels]]\n"
- "whilelt p1.s, XZR, x5\n"
- "whilelt p9.s, XZR, x20\n"
- "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "whilelt p8.s, XZR, x4\n"
+ "sub x21, x21, x4\n"
+ "mov SP, x20\n"
+ "ldr x7, [%x[args], %[offsetof_Args_current_channel]]\n"
+ "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "str x22, [SP]\n"
"addvl SP, SP, #-15\n"
- "ldr x6, [%x[args], %[offsetof_Args_current_channel]]\n"
- "neg z7.h, p2/M, z7.h\n"
+ "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "whilelt p1.s, XZR, x6\n"
+ "whilelt p9.s, XZR, x21\n"
+ "neg z18.h, p2/M, z18.h\n"
+ "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "whilelt p8.s, XZR, x5\n"
+ "ld1rw { z19.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
"eor p8.b, p2/Z, p8.b, p9.b\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z5.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z21.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
"1:" // Channel loop
"ldr x20, [%x[qp], %[offsetof_Requantize32_bias]]\n"
- "mov z12.s, #0x0\n"
+ "mov z20.s, #0x0\n"
"cbz x20, 2f\n"
- "ld1w { z12.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x7, LSL #2]\n"
"2:" // Load bias: Done
- "ldr x22, [%x[args], %[offsetof_Args_weights]]\n"
- "mov x20, x22\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "ldr x23, [%x[args], %[offsetof_Args_weights]]\n"
+ "ld1rh { z0.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "mov z12.h, #0x0\n"
+ "addvl x22, SP, #15\n"
+ "addvl x22, x22, #-3\n"
+ "ldr x21, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
+ "mov z21.d, z20.d\n"
+ "mov z22.d, z20.d\n"
+ "mov z23.d, z20.d\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "ld1sb { z24.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "ld1rh { z28.h }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "sub z13.h, z13.h, z28.h\n"
- "incw x22\n"
- "mov z26.h, #0x0\n"
- "ld1sb { z22.s }, p2/Z, [x20]\n"
+ "ld1sb { z30.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z22.h, z22.h, z28.h\n"
- "trn1 z17.h, z13.h, z22.h\n"
- "ld1sb { z20.s }, p2/Z, [x20]\n"
+ "ld1sb { z8.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z20.h, z20.h, z28.h\n"
- "addvl x21, SP, #15\n"
- "ld1sb { z1.s }, p2/Z, [x20]\n"
+ "ld1sb { z17.s }, p2/Z, [x20]\n"
+ "sub z24.h, z24.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "sub z1.h, z1.h, z28.h\n"
- "trn1 z29.h, z20.h, z1.h\n"
+ "sub z30.h, z30.h, z0.h\n"
+ "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "incw x23\n"
+ "sub z8.h, z8.h, z0.h\n"
+ "sub z17.h, z17.h, z0.h\n"
+ "sub z26.h, z26.h, z0.h\n"
+ "trn1 z16.h, z24.h, z30.h\n"
"ld1sb { z27.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "sub z27.h, z27.h, z28.h\n"
- "incw x22\n"
- "ld1sb { z14.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "sub z14.h, z14.h, z28.h\n"
- "addvl x21, x21, #-3\n"
- "ld1sb { z18.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z18.h, z18.h, z28.h\n"
- "trn1 z22.h, z27.h, z26.h\n"
- "ld1sb { z23.s }, p2/Z, [x20]\n"
+ "ld1sb { z11.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z23.h, z23.h, z28.h\n"
- "st1h { z17.h }, p2, [x21]\n"
- "ld1sb { z30.s }, p2/Z, [x20]\n"
+ "trn1 z15.h, z8.h, z17.h\n"
+ "ld1sb { z31.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z30.h, z30.h, z28.h\n"
- "trn1 z8.h, z14.h, z18.h\n"
- "ld1sb { z15.s }, p2/Z, [x20]\n"
- "mov x20, x22\n"
- "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
- "sub z15.h, z15.h, z28.h\n"
- "ld1sb { z20.s }, p2/Z, [x20]\n"
+ "ld1sb { z9.s }, p2/Z, [x20]\n"
+ "sub z27.h, z27.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z23.h, z23.h, z30.h\n"
- "sub z20.h, z20.h, z28.h\n"
- "ld1sb { z24.s }, p2/Z, [x20]\n"
- "incw x20, ALL, MUL #5\n"
- "sub z24.h, z24.h, z28.h\n"
- "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+ "trn1 z24.h, z26.h, z12.h\n"
+ "sub z11.h, z11.h, z0.h\n"
+ "ld1sb { z10.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z16.h }, p2, [x22]\n"
+ "sub z31.h, z31.h, z0.h\n"
+ "incw x23\n"
+ "sub z9.h, z9.h, z0.h\n"
+ "st1h { z15.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z10.h, z10.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z11.h, z27.h, z11.h\n"
"ld1sb { z16.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z0.h, z15.h, z26.h\n"
- "incw x22\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "ld1sb { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z16.h, z16.h, z28.h\n"
- "sub z13.h, z13.h, z28.h\n"
- "ld1sb { z11.s }, p2/Z, [x20]\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z8.h }, p2, [x21]\n"
- "trn1 z27.h, z20.h, z24.h\n"
- "ld1sb { z22.s }, p2/Z, [x20]\n"
+ "trn1 z13.h, z31.h, z9.h\n"
+ "ld1sb { z28.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z11.h, z11.h, z28.h\n"
- "ld1sb { z3.s }, p2/Z, [x20]\n"
+ "ld1sb { z26.s }, p2/Z, [x20]\n"
+ "sub z16.h, z16.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z23.h }, p2, [x21, #1, MUL VL]\n"
- "trn1 z20.h, z16.h, z13.h\n"
- "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "trn1 z8.h, z10.h, z12.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1sb { z14.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z11.h }, p2, [x22]\n"
+ "sub z28.h, z28.h, z0.h\n"
+ "incw x23\n"
+ "sub z26.h, z26.h, z0.h\n"
+ "st1h { z13.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z14.h, z14.h, z0.h\n"
+ "st1h { z8.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z13.h, z16.h, z2.h\n"
+ "ld1sb { z31.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z22.h, z22.h, z28.h\n"
- "sub z3.h, z3.h, z28.h\n"
- "ld1sb { z15.s }, p2/Z, [x20]\n"
+ "ld1sb { z2.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
- "trn1 z29.h, z11.h, z26.h\n"
+ "trn1 z30.h, z28.h, z26.h\n"
"ld1sb { z16.s }, p2/Z, [x20]\n"
- "incw x22\n"
- "sub z13.h, z13.h, z28.h\n"
- "sub z15.h, z15.h, z28.h\n"
- "addvl x21, x21, #-3\n"
- "mov x20, x22\n"
- "st1h { z27.h }, p2, [x21]\n"
- "sub z16.h, z16.h, z28.h\n"
- "trn1 z19.h, z22.h, z3.h\n"
- "ld1sb { z17.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "st1h { z20.h }, p2, [x21, #1, MUL VL]\n"
- "ld1sb { z0.s }, p2/Z, [x20]\n"
+ "ld1sb { z27.s }, p2/Z, [x20]\n"
+ "sub z31.h, z31.h, z0.h\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z31.h, z13.h, z15.h\n"
- "st1h { z29.h }, p2, [x21, #2, MUL VL]\n"
- "ld1sb { z18.s }, p2/Z, [x20]\n"
+ "trn1 z17.h, z14.h, z12.h\n"
+ "sub z2.h, z2.h, z0.h\n"
+ "ld1sb { z4.s }, p2/Z, [x20]\n"
+ "mov x20, x23\n"
+ "st1h { z13.h }, p2, [x22]\n"
+ "sub z16.h, z16.h, z0.h\n"
+ "sub z27.h, z27.h, z0.h\n"
+ "st1h { z30.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z4.h, z4.h, z0.h\n"
+ "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z31.h, z31.h, z2.h\n"
+ "ld1sb { z29.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "trn1 z16.h, z16.h, z26.h\n"
- "sub z17.h, z17.h, z28.h\n"
- "ld1sb { z22.s }, p2/Z, [x20]\n"
+ "ld1sb { z10.s }, p2/Z, [x20]\n"
"incw x20, ALL, MUL #5\n"
- "sub z0.h, z0.h, z28.h\n"
- "sub z18.h, z18.h, z28.h\n"
- "ld1sb { z1.s }, p2/Z, [x20]\n"
- "sub z22.h, z22.h, z28.h\n"
- "sub z1.h, z1.h, z28.h\n"
- "ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_muls]]\n"
- "addvl x21, x21, #-3\n"
- "st1h { z19.h }, p2, [x21]\n"
- "mov z13.d, z12.d\n"
- "mov z14.d, z12.d\n"
- "st1h { z31.h }, p2, [x21, #1, MUL VL]\n"
- "mov z15.d, z12.d\n"
- "trn1 z8.h, z17.h, z0.h\n"
- "st1h { z16.h }, p2, [x21, #2, MUL VL]\n"
- "addvl x21, x21, #-3\n"
- "trn1 z31.h, z18.h, z22.h\n"
- "trn1 z29.h, z1.h, z26.h\n"
- "st1h { z8.h }, p2, [x21]\n"
- "st1h { z31.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z29.h }, p2, [x21, #2, MUL VL]\n"
- "cbz x20, 3f\n"
- "ld1w { z6.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "trn1 z24.h, z16.h, z27.h\n"
+ "ld1sb { z13.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "ld1sb { z8.s }, p2/Z, [x20]\n"
+ "incw x20, ALL, MUL #5\n"
+ "trn1 z4.h, z4.h, z12.h\n"
+ "sub z29.h, z29.h, z0.h\n"
+ "ld1sb { z11.s }, p2/Z, [x20]\n"
+ "sub z10.h, z10.h, z0.h\n"
+ "st1h { z31.h }, p2, [x22]\n"
+ "sub z13.h, z13.h, z0.h\n"
+ "sub z8.h, z8.h, z0.h\n"
+ "st1h { z24.h }, p2, [x22, #1, MUL VL]\n"
+ "sub z11.h, z11.h, z0.h\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #-3\n"
+ "trn1 z14.h, z29.h, z10.h\n"
+ "trn1 z10.h, z13.h, z8.h\n"
+ "trn1 z4.h, z11.h, z12.h\n"
+ "st1h { z14.h }, p2, [x22]\n"
+ "st1h { z10.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z4.h }, p2, [x22, #2, MUL VL]\n"
+ "cbz x21, 3f\n"
+ "ld1w { z3.s }, p1/Z, [x21, x7, LSL #2]\n"
"3:" // Load mul: End
"ldr x20, [%x[qp], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
"cbz x20, 4f\n"
- "ld1w { z4.s }, p1/Z, [x20, x6, LSL #2]\n"
+ "ld1w { z5.s }, p1/Z, [x20, x7, LSL #2]\n"
"4:" // Load right_shift: End
- "ldr x7, [%x[args], %[offsetof_Args_input_cols]]\n"
- "sub x20, x7, #0x1\n"
- "orr x23, x20, %x[ld_in_col], LSL #16\n"
- "ldr x17, [%x[args], %[offsetof_Args_inptr]]\n"
- "orr x23, x5, x23, LSL #22\n"
+ "ldr x17, [%x[args], %[offsetof_Args_input_cols]]\n"
"mov x22, #0xb\n"
- "add x21, x4, x3\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "ldr x16, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "add x20, x5, x4\n"
+ "ldr x16, [%x[args], %[offsetof_Args_inptr]]\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
"mov x8, #0x0\n"
- "lsl x23, x23, #0x0\n"
- "sub x22, x22, x21\n"
- "madd x20, x20, x4, x17\n"
+ "ldr x15, [%x[args], %[offsetof_Args_output_cols]]\n"
+ "sub x22, x22, x20\n"
+ "sub x20, x17, #0x1\n"
+ "orr x20, x20, %x[ld_in_col], LSL #16\n"
+ "madd x21, x21, x5, x16\n"
+ "orr x20, x6, x20, LSL #22\n"
+ "lsl x20, x20, #0x0\n"
"5:" // Issue prefetches
"subs x22, x22, #0x1\n"
- ".inst 0xf8b74a9c // rprfm pldstrm, x23, [x20]\n"
- "add x20, x20, %x[ld_in_col]\n"
+ ".inst 0xf8b44abc // rprfm pldstrm, x20, [x21]\n"
+ "add x21, x21, %x[ld_in_col]\n"
"bgt 5b\n"
"ldr x23, [%x[args], %[offsetof_Args_outptrs]]\n"
- "lsl x20, %x[ld_in_row], #0x0\n"
- "msub x17, x4, x20, x17\n"
- ".inst 0xc0040d80 // mova za.d[x8, #0], { z12.d-z15.d }\n"
- "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
- ".inst 0xc0040d81 // mova za.d[x8, #1], { z12.d-z15.d }\n"
+ "lsl x21, %x[ld_in_row], #0x0\n"
+ ".inst 0xc0040e80 // mova za.d[x8, #0], { z20.d-z23.d }\n"
"mov x22, #0x4\n"
- "ldp x15, x14, [x23], #0x10\n"
- ".inst 0xc0040d82 // mova za.d[x8, #2], { z12.d-z15.d }\n"
- "ldp x13, x11, [x20], #0x10\n"
- ".inst 0xc0040d83 // mova za.d[x8, #3], { z12.d-z15.d }\n"
+ "ldr x20, [%x[args], %[offsetof_Args_ld_out_cols]]\n"
+ "msub x16, x5, x21, x16\n"
+ ".inst 0xc0040e81 // mova za.d[x8, #1], { z20.d-z23.d }\n"
"ldr x21, [%x[args], %[offsetof_Args_pad_left]]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "ldp x10, x9, [x23], #0x10\n"
- "ldp x28, x27, [x20], #0x10\n"
+ ".inst 0xc0040e82 // mova za.d[x8, #2], { z20.d-z23.d }\n"
+ "ldp x14, x13, [x23], #0x10\n"
+ ".inst 0xc0040e83 // mova za.d[x8, #3], { z20.d-z23.d }\n"
+ "ldp x11, x10, [x20], #0x10\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ "ldp x9, x28, [x23], #0x10\n"
+ "ldp x27, x26, [x20], #0x10\n"
"cbz x21, 7f\n"
"cmp x21, x22\n"
"csel x20, x21, x22, LT\n"
@@ -264,379 +269,379 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"sub x22, x22, x20\n"
"cbz x21, 7f\n"
".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
"and x22, x21, #0x1\n"
- ".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
"add x21, x21, #0x1\n"
"lsr x21, x21, #0x1\n"
- ".inst 0xc1aaab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
- "sub x16, x16, x21\n"
- ".inst 0xc1b5ccbc // sclamp { z28.s-z31.s }, z5.s, z21.s\n"
+ "sub x15, x15, x21\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
"6:" // Left padding
"subs x21, x21, #0x1\n"
- "st1b { z28.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z29.s }, p1, [x14]\n"
+ "st1b { z28.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z30.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z31.s }, p1, [x9]\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 6b\n"
"7:" // Left padding: End
- "adds XZR, x4, x3\n"
+ "adds XZR, x5, x4\n"
"bne 14f\n"
"cbz x22, 12f\n"
"cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "sub x17, x17, x22\n"
"beq 11f\n"
"cmp x22, #0x2\n"
"beq 10f\n"
"cmp x22, #0x3\n"
"beq 9f\n"
"8:" // Unpadded: 4 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z27.s }, p1/Z, [x17]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z8.s }, p1/Z, [x16]\n"
"addvl x20, SP, #12\n"
- "ld1b { z0.s }, p1/Z, [x21]\n"
+ "ld1b { z26.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z0.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1b { z28.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z9.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z11.s }, p1/Z, [x21]\n"
+ "ld1b { z31.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z28.h, z28.h, z11.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1b { z29.s }, p1/Z, [x21]\n"
+ "ld1b { z10.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z8.s }, p1/Z, [x21]\n"
+ "trn1 z8.h, z8.h, z26.h\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z8.h\n"
- "add z29.h, z29.h, z7.h\n"
+ "ld1b { z11.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z31.h\n"
"ld1b { z30.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z17.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1b { z31.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "ld1b { z26.s }, p1/Z, [x21]\n"
+ "trn1 z10.h, z10.h, z16.h\n"
+ "add z8.h, z8.h, z18.h\n"
+ "ld1b { z28.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z26.h\n"
- "add z31.h, z31.h, z7.h\n"
- ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
- "ld1b { z20.s }, p1/Z, [x21]\n"
- "mov z0.d, z20.d\n"
- "add z0.h, z0.h, z7.h\n"
- ".inst 0xc1781788 // sdot za.s[x8, 0], { z28.h-z31.h }, z8.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17817a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z8.h\n"
+ "trn1 z11.h, z11.h, z30.h\n"
+ "add z9.h, z9.h, z18.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z2.s }, p1/Z, [x21]\n"
+ "add z10.h, z10.h, z18.h\n"
+ "trn1 z12.h, z12.h, z28.h\n"
+ "ld1h { z4.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "mov z13.d, z2.d\n"
+ "add z12.h, z12.h, z18.h\n"
+ ".inst 0xc1701508 // sdot za.s[x8, 0], { z8.h-z11.h }, z0.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xc1711528 // sdot za.s[x8, 0], { z9.h-z12.h }, z1.h\n"
+ ".inst 0xc1741548 // sdot za.s[x8, 0], { z10.h-z13.h }, z4.h\n"
"9:" // Unpadded: 3 priming loads
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z29.s }, p1/Z, [x17]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p1/Z, [x16]\n"
"addvl x20, SP, #9\n"
- "ld1b { z17.s }, p1/Z, [x21]\n"
- "add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z17.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "ld1b { z11.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z0.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z0.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1b { z31.s }, p1/Z, [x21]\n"
+ "ld1b { z2.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1b { z0.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z11.h\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
+ "ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z0.h, z0.h, z16.h\n"
- "add z0.h, z0.h, z7.h\n"
- "ld1b { z1.s }, p1/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z2.h\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
"ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z1.h, z1.h, z16.h\n"
- "add z1.h, z1.h, z7.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17217a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z2.h\n"
- "ld1b { z16.s }, p1/Z, [x21]\n"
- "mov z2.d, z16.d\n"
- "add z2.h, z2.h, z7.h\n"
- ".inst 0xc17317c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z3.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17817e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z8.h\n"
+ "trn1 z14.h, z14.h, z24.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "ld1b { z24.s }, p1/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xa0402a8a // ld1h { z10.h-z11.h }, pn10.b/Z, [x20]\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
+ "add z14.h, z14.h, z18.h\n"
+ "trn1 z16.h, z16.h, z24.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add z15.h, z15.h, z18.h\n"
+ "mov z17.d, z17.d\n"
+ "add z16.h, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ "add z17.h, z17.h, z18.h\n"
+ ".inst 0xc17b15a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z11.h\n"
+ ".inst 0xc17015c8 // sdot za.s[x8, 0], { z14.h-z17.h }, z0.h\n"
"10:" // Unpadded: 2 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z26.s }, p1/Z, [x17]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
"addvl x21, SP, #6\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z16.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #12\n"
- "ld1b { z16.s }, p1/Z, [x22]\n"
+ "ld1b { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1b { z28.s }, p1/Z, [x22]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z26.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z29.s }, p1/Z, [x22]\n"
+ "ld1b { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z28.h, z28.h, z29.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1b { z29.s }, p1/Z, [x22]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "ld1b { z24.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1b { z19.s }, p1/Z, [x22]\n"
+ "ld1b { z14.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z19.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x22]\n"
+ "trn1 z12.h, z12.h, z26.h\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1b { z23.s }, p1/Z, [x22]\n"
- "trn1 z30.h, z30.h, z23.h\n"
+ "ld1b { z15.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z30.h, z30.h, z7.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721748 // sdot za.s[x8, 0], { z26.h-z29.h }, z2.h\n"
- "ld1b { z22.s }, p1/Z, [x22]\n"
- "mov z31.d, z22.d\n"
- ".inst 0xc1731768 // sdot za.s[x8, 0], { z27.h-z30.h }, z3.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1h { z3.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17b1769 // sdot za.s[x8, 1], { z27.h-z30.h }, z11.h\n"
- ".inst 0xc1731788 // sdot za.s[x8, 0], { z28.h-z31.h }, z3.h\n"
+ "trn1 z13.h, z13.h, z24.h\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1b { z24.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
+ "add z13.h, z13.h, z18.h\n"
+ "trn1 z15.h, z15.h, z24.h\n"
+ "ld1h { z1.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z14.h, z14.h, z18.h\n"
+ "mov z16.d, z16.d\n"
+ "add z15.h, z15.h, z18.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "add z16.h, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402a80 // ld1h { z0.h, z8.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17115a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z1.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701789 // sdot za.s[x8, 1], { z28.h-z31.h }, z0.h\n"
+ ".inst 0xc1781589 // sdot za.s[x8, 1], { z12.h-z15.h }, z8.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"11:" // Unpadded: 1 priming loads
- "add x22, x17, %x[ld_in_row]\n"
- "ld1b { z29.s }, p1/Z, [x17]\n"
+ "add x22, x16, %x[ld_in_row]\n"
+ "ld1b { z9.s }, p1/Z, [x16]\n"
"addvl x21, SP, #3\n"
- "ld1b { z22.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z22.h\n"
- "add z29.h, z29.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x22]\n"
+ "ld1b { z4.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
"addvl x20, SP, #9\n"
- "ld1b { z25.s }, p1/Z, [x22]\n"
+ "ld1b { z10.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z30.h, z30.h, z25.h\n"
- "add z30.h, z30.h, z7.h\n"
- "ld1b { z31.s }, p1/Z, [x22]\n"
- "add x22, x22, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "add x16, x16, %x[ld_in_col]\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- "add z31.h, z31.h, z7.h\n"
- "ld1b { z0.s }, p1/Z, [x22]\n"
+ "ld1b { z11.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z9.h, z9.h, z4.h\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z10.h, z10.h, z16.h\n"
"ld1b { z16.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "trn1 z0.h, z0.h, z16.h\n"
- "add z0.h, z0.h, z7.h\n"
- "ld1b { z1.s }, p1/Z, [x22]\n"
+ "ld1b { z13.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1b { z2.s }, p1/Z, [x22]\n"
- "trn1 z1.h, z1.h, z2.h\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "add z9.h, z9.h, z18.h\n"
+ "ld1b { z17.s }, p1/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "add z1.h, z1.h, z7.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17217a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z2.h\n"
- "ld1b { z24.s }, p1/Z, [x22]\n"
- "mov z2.d, z24.d\n"
- ".inst 0xc17317c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z3.h\n"
- ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc17817a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z8.h\n"
- "add z2.h, z2.h, z7.h\n"
- "ld1h { z3.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17917c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z9.h\n"
- ".inst 0xc17317e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z3.h\n"
- "ld1h { z3.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17317e9 // sdot za.s[x8, 1], { z31.h-z2.h }, z3.h\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z10.h, z10.h, z18.h\n"
+ ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z16.s }, p1/Z, [x22]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "trn1 z13.h, z13.h, z17.h\n"
+ "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z12.h, z12.h, z18.h\n"
+ "mov z14.d, z16.d\n"
+ "add z13.h, z13.h, z18.h\n"
+ ".inst 0xc1701528 // sdot za.s[x8, 0], { z9.h-z12.h }, z0.h\n"
+ "add z14.h, z14.h, z18.h\n"
+ ".inst 0xc1711548 // sdot za.s[x8, 0], { z10.h-z13.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc1701529 // sdot za.s[x8, 1], { z9.h-z12.h }, z0.h\n"
+ ".inst 0xc1741568 // sdot za.s[x8, 0], { z11.h-z14.h }, z4.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1711549 // sdot za.s[x8, 1], { z10.h-z13.h }, z1.h\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
"12:" // Unpadded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
- "add x21, x17, %x[ld_in_row]\n"
- "ld1b { z23.s }, p1/Z, [x17]\n"
- "sub x7, x7, #0x2\n"
- "ld1b { z25.s }, p1/Z, [x21]\n"
+ "add x21, x16, %x[ld_in_row]\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "sub x17, x17, #0x2\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z25.h\n"
- "sub x16, x16, #0x1\n"
- "ld1b { z24.s }, p1/Z, [x21]\n"
+ "sub x15, x15, #0x1\n"
+ "ld1b { z12.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "lsr x20, x7, #0x1\n"
- "add z23.h, z23.h, z7.h\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "lsr x20, x17, #0x1\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z30.h\n"
- "cmp x20, x16\n"
- "ld1b { z25.s }, p1/Z, [x21]\n"
+ "cmp x20, x15\n"
+ "ld1b { z13.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "csel x26, x20, x16, LT\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ "csel x25, x20, x15, LT\n"
+ "ld1b { z4.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z22.h\n"
- "add z25.h, z25.h, z7.h\n"
- "ld1b { z26.s }, p1/Z, [x21]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "ld1b { z14.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z22.s }, p1/Z, [x21]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "and x17, x17, #0x1\n"
+ "ld1b { z16.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z22.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x21]\n"
+ "sub x15, x15, x25\n"
+ "ld1b { z15.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "and x7, x7, #0x1\n"
- "ld1b { z30.s }, p1/Z, [x21]\n"
+ "trn1 z13.h, z13.h, z4.h\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1b { z17.s }, p1/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z30.h\n"
- "add z27.h, z27.h, z7.h\n"
- "ld1b { z28.s }, p1/Z, [x21]\n"
- "mov z28.d, z28.d\n"
- "add z28.h, z28.h, z7.h\n"
- "sub x16, x16, x26\n"
- "cbz x26, 21f\n"
+ "ld1b { z30.s }, p1/Z, [x21]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ "mov z16.d, z30.d\n"
+ "add z14.h, z14.h, z18.h\n"
+ "add z15.h, z15.h, z18.h\n"
+ "add z16.h, z16.h, z18.h\n"
+ "cbz x25, 21f\n"
"13:" // Unpadded: Main loop
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
- "addvl x25, SP, #6\n"
- "addvl x24, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa0402b20 // ld1h { z0.h-z1.h }, pn10.b/Z, [x25]\n"
- "add x23, x17, %x[ld_in_row]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "addvl x24, SP, #6\n"
+ "addvl x20, SP, #12\n"
+ "add x23, x16, %x[ld_in_row]\n"
"addvl x22, SP, #3\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
"addvl x21, SP, #9\n"
- "subs x26, x26, #0x1\n"
- ".inst 0xc1711709 // sdot za.s[x8, 1], { z24.h-z27.h }, z1.h\n"
- ".inst 0xa0402b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24]\n"
- ".inst 0xc17816ea // sdot za.s[x8, 2], { z23.h-z26.h }, z8.h\n"
- "ld1b { z23.s }, p1/Z, [x17]\n"
- "add x17, x17, %x[ld_in_col]\n"
- "add x20, x17, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0xc179170a // sdot za.s[x8, 2], { z24.h-z27.h }, z9.h\n"
- "ld1b { z16.s }, p1/Z, [x23]\n"
+ "subs x25, x25, #0x1\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402b00 // ld1h { z0.h-z1.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ "ld1b { z28.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z16.h\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "ld1h { z9.h }, p2/Z, [x24, #2, MUL VL]\n"
- "add z23.h, z23.h, z7.h\n"
- "ld1b { z24.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a3ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z3.s\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ "ld1b { z29.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc179172a // sdot za.s[x8, 2], { z25.h-z28.h }, z9.h\n"
- "ld1b { z18.s }, p1/Z, [x23]\n"
+ "ld1b { z9.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z18.h\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1b { z25.s }, p1/Z, [x23]\n"
+ "trn1 z28.h, z28.h, z17.h\n"
+ ".inst 0xa0402aca // ld1h { z10.h-z11.h }, pn10.b/Z, [x22]\n"
+ ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+ "ld1b { z30.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "ld1b { z8.s }, p1/Z, [x23]\n"
+ "trn1 z29.h, z29.h, z9.h\n"
+ "ld1b { z17.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z8.h\n"
- "add z25.h, z25.h, z7.h\n"
- "ld1b { z26.s }, p1/Z, [x23]\n"
+ "add z28.h, z28.h, z18.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1b { z31.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "ld1b { z28.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a7ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+ "ld1b { z13.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z28.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x23]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ "ld1b { z0.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
- "ld1b { z28.s }, p1/Z, [x23]\n"
- "trn1 z27.h, z27.h, z28.h\n"
+ "trn1 z30.h, z30.h, z17.h\n"
+ "add z29.h, z29.h, z18.h\n"
+ "ld1b { z14.s }, p1/Z, [x23]\n"
"add x23, x23, %x[ld_in_row]\n"
- "add z27.h, z27.h, z7.h\n"
- ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
- ".inst 0xc17216e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z2.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- "ld1b { z20.s }, p1/Z, [x23]\n"
- "mov z28.d, z20.d\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- ".inst 0xa0402aa0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "add z28.h, z28.h, z7.h\n"
- "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
- ".inst 0xc1711709 // sdot za.s[x8, 1], { z24.h-z27.h }, z1.h\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- ".inst 0xc1701728 // sdot za.s[x8, 0], { z25.h-z28.h }, z0.h\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1b { z23.s }, p1/Z, [x17]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "st1b { z17.s }, p1, [x14]\n"
+ "trn1 z31.h, z31.h, z13.h\n"
+ "ld1b { z8.s }, p1/Z, [x23]\n"
+ ".inst 0xc1a6ce78 // sclamp { z24.s-z27.s }, z19.s, z6.s\n"
+ "ld1h { z12.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "add z30.h, z30.h, z18.h\n"
+ "trn1 z0.h, z0.h, z14.h\n"
+ "mov z1.d, z8.d\n"
+ "add z31.h, z31.h, z18.h\n"
+ "st1b { z24.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "st1b { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "add z0.h, z0.h, z18.h\n"
+ "st1b { z26.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z1.h, z1.h, z18.h\n"
+ "st1b { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ ".inst 0xc17a1788 // sdot za.s[x8, 0], { z28.h-z31.h }, z10.h\n"
+ ".inst 0xc17b17a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z11.h\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1781789 // sdot za.s[x8, 1], { z28.h-z31.h }, z8.h\n"
+ "ld1b { z11.s }, p1/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17c17c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z12.h\n"
+ "ld1h { z4.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z16.h\n"
- "st1b { z18.s }, p1, [x10]\n"
- "ld1b { z24.s }, p1/Z, [x20]\n"
+ ".inst 0xc17917a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z9.h\n"
+ "ld1b { z12.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add x10, x10, x28\n"
- "st1b { z19.s }, p1, [x9]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z9.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z16.h\n"
- "add x9, x9, x27\n"
- "ld1b { z25.s }, p1/Z, [x20]\n"
+ "trn1 z11.h, z11.h, z17.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc17417c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z4.h\n"
+ "ld1b { z13.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "add z23.h, z23.h, z7.h\n"
+ "trn1 z12.h, z12.h, z9.h\n"
"ld1b { z16.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z16.h\n"
- "add z24.h, z24.h, z7.h\n"
- "ld1b { z26.s }, p1/Z, [x20]\n"
+ "add z11.h, z11.h, z18.h\n"
+ "ld1b { z14.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "add z25.h, z25.h, z7.h\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "ld1b { z1.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z16.h\n"
- "add z26.h, z26.h, z7.h\n"
- "ld1b { z27.s }, p1/Z, [x20]\n"
+ "ld1b { z15.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "ld1b { z16.s }, p1/Z, [x20]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z12.h, z12.h, z18.h\n"
+ "ld1b { z17.s }, p1/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "add z27.h, z27.h, z7.h\n"
"ld1b { z16.s }, p1/Z, [x20]\n"
- "mov z28.d, z16.d\n"
- "add z28.h, z28.h, z7.h\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "trn1 z14.h, z14.h, z1.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z13.h, z13.h, z18.h\n"
+ "mov z16.d, z16.d\n"
+ "add z14.h, z14.h, z18.h\n"
+ "add z15.h, z15.h, z18.h\n"
+ "add z16.h, z16.h, z18.h\n"
"bgt 13b\n"
"b 21f\n"
"14:" // Padded
"cbz x22, 19f\n"
"cmp x22, #0x1\n"
- "sub x7, x7, x22\n"
+ "sub x17, x17, x22\n"
"beq 18f\n"
"cmp x22, #0x2\n"
"beq 17f\n"
@@ -644,686 +649,686 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"beq 16f\n"
"15:" // Padded: 4 priming loads
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x17]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "addvl x20, SP, #12\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z28.s }, p0/Z, [x21]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z27.h, z27.h, z17.h\n"
- "trn1 z28.h, z28.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x21]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z30.s }, p0/Z, [x21]\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z31.s }, p0/Z, [x21]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x20, SP, #12\n"
+ ".inst 0xc1711568 // sdot za.s[x8, 0], { z11.h-z14.h }, z1.h\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z29.h, z29.h, z18.h\n"
- "trn1 z30.h, z30.h, z17.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
- "trn1 z31.h, z31.h, z16.h\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
- "ld1b { z20.s }, p0/Z, [x21]\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
- "mov z0.d, z20.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1711788 // sdot za.s[x8, 0], { z28.h-z31.h }, z1.h\n"
- "ld1h { z1.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc17117a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z1.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"16:" // Padded: 3 priming loads
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x17]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x21, x17, %x[ld_in_row]\n"
+ "addvl x20, SP, #9\n"
+ ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x21]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z17.h\n"
- "trn1 z25.h, z25.h, z16.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z26.s }, p0/Z, [x21]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x21]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z27.s }, p0/Z, [x21]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x21]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z28.s }, p0/Z, [x21]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x21]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x20, SP, #9\n"
+ ".inst 0xc1711568 // sdot za.s[x8, 0], { z11.h-z14.h }, z1.h\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
"add x21, x21, %x[ld_in_row]\n"
- "trn1 z26.h, z26.h, z18.h\n"
- "trn1 z27.h, z27.h, z17.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- "trn1 z28.h, z28.h, z16.h\n"
- ".inst 0xc1721708 // sdot za.s[x8, 0], { z24.h-z27.h }, z2.h\n"
- "ld1b { z11.s }, p0/Z, [x21]\n"
- "add z11.h, p0/M, z11.h, z7.h\n"
- "mov z29.d, z11.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1731728 // sdot za.s[x8, 0], { z25.h-z28.h }, z3.h\n"
- "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701748 // sdot za.s[x8, 0], { z26.h-z29.h }, z0.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc17015a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z0.h\n"
"17:" // Padded: 2 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z25.s }, p0/Z, [x17]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #6\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #12\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z17.h\n"
- "trn1 z26.h, z26.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x21, SP, #6\n"
- "trn1 z27.h, z27.h, z18.h\n"
- "trn1 z28.h, z28.h, z17.h\n"
- ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z29.h, z29.h, z16.h\n"
- ".inst 0xc1711728 // sdot za.s[x8, 0], { z25.h-z28.h }, z1.h\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #12\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- ".inst 0xc1791748 // sdot za.s[x8, 0], { z26.h-z29.h }, z9.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1721729 // sdot za.s[x8, 1], { z25.h-z28.h }, z2.h\n"
- "mov z30.d, z1.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z9.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- ".inst 0xc1791768 // sdot za.s[x8, 0], { z27.h-z30.h }, z9.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"18:" // Padded: 1 priming loads
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z25.s }, p0/Z, [x17]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #3\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "addvl x20, SP, #9\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z25.h, z25.h, z17.h\n"
- "trn1 z26.h, z26.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "addvl x21, SP, #3\n"
- "trn1 z27.h, z27.h, z18.h\n"
- "trn1 z28.h, z28.h, z17.h\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z29.h, z29.h, z16.h\n"
- ".inst 0xc1731728 // sdot za.s[x8, 0], { z25.h-z28.h }, z3.h\n"
- "ld1b { z0.s }, p0/Z, [x20]\n"
- "addvl x20, SP, #9\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
- ".inst 0xc17b1748 // sdot za.s[x8, 0], { z26.h-z29.h }, z11.h\n"
- ".inst 0xa0402a82 // ld1h { z2.h-z3.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1721729 // sdot za.s[x8, 1], { z25.h-z28.h }, z2.h\n"
- "mov z30.d, z0.d\n"
- "add x17, x17, %x[ld_in_col]\n"
- "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1731749 // sdot za.s[x8, 1], { z26.h-z29.h }, z3.h\n"
- ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
"ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
"19:" // Padded: 0 priming loads
- "cmp x7, #0x2\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "cmp x17, #0x2\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"blt 22f\n"
"mov x12, #0x0\n"
+ "add x21, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "sub x17, x17, #0x2\n"
+ "sub x15, x15, #0x1\n"
+ "lsr x20, x17, #0x1\n"
+ "cmp x20, x15\n"
+ "and x17, x17, #0x1\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "csel x25, x20, x15, LT\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "sub x15, x15, x25\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z23.h, z23.h, z17.h\n"
- "trn1 z24.h, z24.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z19.s }, p0/Z, [x20]\n"
- "add z19.h, p0/M, z19.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z18.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z18.h, p0/M, z18.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z17.s }, p0/Z, [x21]\n"
+ "add x21, x21, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "sub x7, x7, #0x2\n"
- "sub x16, x16, #0x1\n"
- "trn1 z25.h, z25.h, z19.h\n"
- "trn1 z26.h, z26.h, z18.h\n"
- "lsr x20, x7, #0x1\n"
- "cmp x20, x16\n"
- "trn1 z27.h, z27.h, z17.h\n"
- "mov z28.d, z16.d\n"
- "csel x25, x20, x16, LT\n"
- "add x17, x17, %x[ld_in_col]\n"
- "and x7, x7, #0x1\n"
- "sub x16, x16, x25\n"
+ "ld1b { z16.s }, p0/Z, [x21]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ "mov z16.d, z16.d\n"
"cbz x25, 21f\n"
"20:" // Padded: Main loop
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"addvl x24, SP, #6\n"
- "addvl x23, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa1402b00 // ld1h { z0.h, z8.h }, pn10.b/Z, [x24]\n"
+ "addvl x20, SP, #12\n"
"mov x12, #0x0\n"
+ "add x23, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17016e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z0.h\n"
- "add x20, x17, %x[ld_in_row]\n"
"addvl x22, SP, #3\n"
- ".inst 0xc1781709 // sdot za.s[x8, 1], { z24.h-z27.h }, z8.h\n"
- ".inst 0xa1402ae3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x23]\n"
"addvl x21, SP, #9\n"
"subs x25, x25, #0x1\n"
- ".inst 0xc17316ea // sdot za.s[x8, 2], { z23.h-z26.h }, z3.h\n"
- "ld1b { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402b00 // ld1h { z0.h-z1.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402a88 // ld1h { z8.h-z9.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc178156a // sdot za.s[x8, 2], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z25.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z1.h }, p2/Z, [x20, #2, MUL VL]\n"
+ "add x20, x16, %x[ld_in_row]\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ "add z25.h, p0/M, z25.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc179158a // sdot za.s[x8, 2], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa1402ac4 // ld1h { z4.h, z12.h }, pn10.b/Z, [x22]\n"
+ "ld1b { z10.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc17115aa // sdot za.s[x8, 2], { z13.h-z16.h }, z1.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1h { z0.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "add z10.h, p0/M, z10.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b170a // sdot za.s[x8, 2], { z24.h-z27.h }, z11.h\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ "ld1b { z26.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z25.h, z25.h, z10.h\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
+ "add z26.h, p0/M, z26.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
- "ld1h { z3.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "st1b { z28.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc173172a // sdot za.s[x8, 2], { z25.h-z28.h }, z3.h\n"
- "trn1 z23.h, z23.h, z16.h\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ "ld1b { z27.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z26.h, z26.h, z16.h\n"
+ "add z27.h, p0/M, z27.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z3.s }, p0/Z, [x20]\n"
- "add z3.h, p0/M, z3.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z28.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z27.h, z27.h, z16.h\n"
+ "add z28.h, p0/M, z28.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z30.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z29.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "trn1 z28.h, z28.h, z16.h\n"
+ "add z29.h, p0/M, z29.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z29.s }, p0/Z, [x20]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
- "trn1 z24.h, z24.h, z1.h\n"
- "trn1 z25.h, z25.h, z3.h\n"
- "trn1 z26.h, z26.h, z30.h\n"
- ".inst 0xa0402ac2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x22]\n"
- "add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
- "trn1 z27.h, z27.h, z29.h\n"
+ ".inst 0xc1741728 // sdot za.s[x8, 0], { z25.h-z28.h }, z4.h\n"
+ "ld1b { z15.s }, p0/Z, [x23]\n"
+ "add x23, x23, %x[ld_in_row]\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17216e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z2.h\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
"mov x12, #0x0\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- ".inst 0xa0402aa2 // ld1h { z2.h-z3.h }, pn10.b/Z, [x21]\n"
- "add x17, x17, %x[ld_in_col]\n"
+ "ld1b { z16.s }, p0/Z, [x23]\n"
+ "trn1 z29.h, z29.h, z15.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17216e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z2.h\n"
- "ld1b { z23.s }, p0/Z, [x17]\n"
- "add z23.h, p0/M, z23.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ ".inst 0xc17c1748 // sdot za.s[x8, 0], { z26.h-z29.h }, z12.h\n"
+ ".inst 0xa1402aa1 // ld1h { z1.h, z9.h }, pn10.b/Z, [x21]\n"
+ "mov z30.d, z16.d\n"
+ ".inst 0xc1711729 // sdot za.s[x8, 1], { z25.h-z28.h }, z1.h\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z8.s }, p0/Z, [x20]\n"
- "add z8.h, p0/M, z8.h, z7.h\n"
+ ".inst 0xc1701768 // sdot za.s[x8, 0], { z27.h-z30.h }, z0.h\n"
+ "ld1h { z0.h }, p2/Z, [x21, #2, MUL VL]\n"
+ ".inst 0xc1791749 // sdot za.s[x8, 1], { z26.h-z29.h }, z9.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1701769 // sdot za.s[x8, 1], { z27.h-z30.h }, z0.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc1731709 // sdot za.s[x8, 1], { z24.h-z27.h }, z3.h\n"
- "ld1b { z24.s }, p0/Z, [x20]\n"
- "mov z28.d, z20.d\n"
- "ld1h { z1.h }, p2/Z, [x22, #2, MUL VL]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
+ "ld1b { z12.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z22.s }, p0/Z, [x20]\n"
- ".inst 0xc1711728 // sdot za.s[x8, 0], { z25.h-z28.h }, z1.h\n"
"mov x12, #0x4\n"
- "add z22.h, p0/M, z22.h, z7.h\n"
- "ld1h { z1.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc1711729 // sdot za.s[x8, 1], { z25.h-z28.h }, z1.h\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1b { z13.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z20.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z31.s }, p0/Z, [x20]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
+ "ld1b { z17.s }, p0/Z, [x20]\n"
"add x20, x20, %x[ld_in_row]\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z1.s }, p0/Z, [x20]\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z17.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- "trn1 z23.h, z23.h, z8.h\n"
- "trn1 z24.h, z24.h, z22.h\n"
- "st1b { z18.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "trn1 z25.h, z25.h, z28.h\n"
- "trn1 z26.h, z26.h, z20.h\n"
- "st1b { z19.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
- "trn1 z27.h, z27.h, z31.h\n"
- "mov z28.d, z1.d\n"
+ "ld1b { z16.s }, p0/Z, [x20]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ "mov z16.d, z16.d\n"
"bgt 20b\n"
"21:" // Main loop tail
- ".inst 0xc17316e8 // sdot za.s[x8, 0], { z23.h-z26.h }, z3.h\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
"addvl x24, SP, #6\n"
"addvl x23, SP, #12\n"
- ".inst 0xc17b1708 // sdot za.s[x8, 0], { z24.h-z27.h }, z11.h\n"
- ".inst 0xa0402b08 // ld1h { z8.h-z9.h }, pn10.b/Z, [x24]\n"
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc17816e9 // sdot za.s[x8, 1], { z23.h-z26.h }, z8.h\n"
- "add x22, x17, %x[ld_in_row]\n"
"addvl x21, SP, #3\n"
- ".inst 0xc1791709 // sdot za.s[x8, 1], { z24.h-z27.h }, z9.h\n"
- ".inst 0xa1402ae3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x23]\n"
"addvl x20, SP, #9\n"
- ".inst 0xc17316ea // sdot za.s[x8, 2], { z23.h-z26.h }, z3.h\n"
- "ld1b { z29.s }, p0/Z, [x17]\n"
- "add z29.h, p0/M, z29.h, z7.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402b01 // ld1h { z1.h, z9.h }, pn10.b/Z, [x24]\n"
+ ".inst 0xc1711569 // sdot za.s[x8, 1], { z11.h-z14.h }, z1.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xc1791589 // sdot za.s[x8, 1], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402ae0 // ld1h { z0.h-z1.h }, pn10.b/Z, [x23]\n"
+ ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add x16, x16, %x[ld_in_col]\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xc1a3ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z3.s\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z8.s }, p0/Z, [x22]\n"
- "add z8.h, p0/M, z8.h, z7.h\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
+ "ld1b { z10.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
+ "add x8, x8, #0x1\n"
+ "ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
+ "add z10.h, p0/M, z10.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- ".inst 0xc17b170a // sdot za.s[x8, 2], { z24.h-z27.h }, z11.h\n"
- "ld1b { z30.s }, p0/Z, [x22]\n"
- "add z30.h, p0/M, z30.h, z7.h\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc1721728 // sdot za.s[x8, 0], { z25.h-z28.h }, z2.h\n"
- "ld1h { z0.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "trn1 z11.h, z11.h, z10.h\n"
+ ".inst 0xc1a6ce78 // sclamp { z24.s-z27.s }, z19.s, z6.s\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
"mov x12, #0x4\n"
- "ld1b { z20.s }, p0/Z, [x22]\n"
- ".inst 0xc1701729 // sdot za.s[x8, 1], { z25.h-z28.h }, z0.h\n"
- "add z20.h, p0/M, z20.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- "ld1h { z2.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "st1b { z24.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z25.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z26.s }, p1, [x9]\n"
+ "add x9, x9, x27\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- ".inst 0xc172172a // sdot za.s[x8, 2], { z25.h-z28.h }, z2.h\n"
- "trn1 z29.h, z29.h, z8.h\n"
- "ld1b { z31.s }, p0/Z, [x22]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
+ "st1b { z27.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z25.s }, p0/Z, [x22]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z0.s }, p0/Z, [x22]\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z17.s }, p0/Z, [x22]\n"
"mov x12, #0x8\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z1.s }, p0/Z, [x22]\n"
- "add z1.h, p0/M, z1.h, z7.h\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z28.s }, p0/Z, [x22]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "trn1 z30.h, z30.h, z20.h\n"
- "trn1 z31.h, z31.h, z25.h\n"
- "trn1 z0.h, z0.h, z17.h\n"
- ".inst 0xa1402aa3 // ld1h { z3.h, z11.h }, pn10.b/Z, [x21]\n"
+ ".inst 0xc1781568 // sdot za.s[x8, 0], { z11.h-z14.h }, z8.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
"add x22, x22, %x[ld_in_row]\n"
- ".inst 0xc0060c18 // mova { z24.d-z27.d }, za.d[x8, #0]\n"
- "add x8, x8, #0x1\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z1.h, z1.h, z28.h\n"
- ".inst 0xc17317a8 // sdot za.s[x8, 0], { z29.h-z0.h }, z3.h\n"
- "ld1b { z22.s }, p0/Z, [x22]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
- "add z22.h, p0/M, z22.h, z7.h\n"
- ".inst 0xc17b17c8 // sdot za.s[x8, 0], { z30.h-z1.h }, z11.h\n"
- ".inst 0xa1402a83 // ld1h { z3.h, z11.h }, pn10.b/Z, [x20]\n"
- "add x17, x17, %x[ld_in_col]\n"
- ".inst 0xc1a4aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z4.s\n"
- ".inst 0xc17317a9 // sdot za.s[x8, 1], { z29.h-z0.h }, z3.h\n"
- "mov z2.d, z22.d\n"
- "ld1h { z9.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc17b17c9 // sdot za.s[x8, 1], { z30.h-z1.h }, z11.h\n"
- ".inst 0xc1aaab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
- ".inst 0xc17917e8 // sdot za.s[x8, 0], { z31.h-z2.h }, z9.h\n"
- "ld1h { z8.h }, p2/Z, [x20, #2, MUL VL]\n"
- ".inst 0xc1b5ccb8 // sclamp { z24.s-z27.s }, z5.s, z21.s\n"
- "st1b { z24.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z25.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- ".inst 0xa1402be3 // ld1h { z3.h, z11.h }, pn10.b/Z, [SP]\n"
- "st1b { z26.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- ".inst 0xc17817e9 // sdot za.s[x8, 1], { z31.h-z2.h }, z8.h\n"
- "ld1h { z2.h }, p2/Z, [SP, #2, MUL VL]\n"
- "st1b { z27.s }, p1, [x9]\n"
- "add x9, x9, x27\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc1791588 // sdot za.s[x8, 0], { z12.h-z15.h }, z9.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1701569 // sdot za.s[x8, 1], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1711589 // sdot za.s[x8, 1], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xa1402be2 // ld1h { z2.h, z10.h }, pn10.b/Z, [SP]\n"
+ ".inst 0xc17015a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z0.h\n"
+ "ld1h { z8.h }, p2/Z, [SP, #2, MUL VL]\n"
"22:" // Main loop skip tail
- "cbz x7, 23f\n" // Skip remainder inputs
+ "cbz x17, 23f\n" // Skip remainder inputs
"mov x12, #0x0\n"
+ "add x22, x16, %x[ld_in_row]\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z24.s }, p0/Z, [x17]\n"
- "add z24.h, p0/M, z24.h, z7.h\n"
- "add x20, x17, %x[ld_in_row]\n"
+ "addvl x21, SP, #6\n"
+ "addvl x20, SP, #12\n"
+ "sub x15, x15, #0x1\n"
+ "ld1b { z11.s }, p0/Z, [x16]\n"
+ "add z11.h, p0/M, z11.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z25.s }, p0/Z, [x20]\n"
- "add z25.h, p0/M, z25.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z12.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z11.h, z11.h, z16.h\n"
+ "add z12.h, p0/M, z12.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
"mov x12, #0x4\n"
- "add x20, x20, %x[ld_in_row]\n"
- "trn1 z24.h, z24.h, z17.h\n"
- "trn1 z25.h, z25.h, z16.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z26.s }, p0/Z, [x20]\n"
- "add z26.h, p0/M, z26.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z13.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z12.h, z12.h, z16.h\n"
+ "add z13.h, p0/M, z13.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z17.s }, p0/Z, [x20]\n"
- "add z17.h, p0/M, z17.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "ld1b { z27.s }, p0/Z, [x20]\n"
- "add z27.h, p0/M, z27.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z14.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z13.h, z13.h, z16.h\n"
+ "add z14.h, p0/M, z14.h, z18.h\n"
".inst 0x25f04500 // psel p0.s, p1.s/Z, p8.s[w12, #3]\n"
- "ld1b { z16.s }, p0/Z, [x20]\n"
"mov x12, #0x8\n"
- "add z16.h, p0/M, z16.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
".inst 0x25304500 // psel p0.s, p1.s/Z, p8.s[w12]\n"
- "ld1b { z28.s }, p0/Z, [x20]\n"
- "add z28.h, p0/M, z28.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ "ld1b { z15.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "trn1 z14.h, z14.h, z16.h\n"
+ "add z15.h, p0/M, z15.h, z18.h\n"
".inst 0x25704500 // psel p0.s, p1.s/Z, p8.s[w12, #1]\n"
- "ld1b { z31.s }, p0/Z, [x20]\n"
- "add z31.h, p0/M, z31.h, z7.h\n"
- "add x20, x20, %x[ld_in_row]\n"
+ ".inst 0xc1721568 // sdot za.s[x8, 0], { z11.h-z14.h }, z2.h\n"
+ "ld1b { z17.s }, p0/Z, [x22]\n"
+ "add x22, x22, %x[ld_in_row]\n"
+ "add z17.h, p0/M, z17.h, z18.h\n"
".inst 0x25b04500 // psel p0.s, p1.s/Z, p8.s[w12, #2]\n"
- "trn1 z26.h, z26.h, z17.h\n"
- "trn1 z27.h, z27.h, z16.h\n"
- "ld1b { z0.s }, p0/Z, [x20]\n"
- "add z0.h, p0/M, z0.h, z7.h\n"
- "trn1 z28.h, z28.h, z31.h\n"
- "addvl x21, SP, #6\n"
- ".inst 0xc1731708 // sdot za.s[x8, 0], { z24.h-z27.h }, z3.h\n"
- "mov z29.d, z0.d\n"
- "addvl x20, SP, #12\n"
- "sub x16, x16, #0x1\n"
- ".inst 0xc17b1728 // sdot za.s[x8, 0], { z25.h-z28.h }, z11.h\n"
- ".inst 0xa0402aa8 // ld1h { z8.h-z9.h }, pn10.b/Z, [x21]\n"
- ".inst 0xc1721748 // sdot za.s[x8, 0], { z26.h-z29.h }, z2.h\n"
+ "ld1b { z16.s }, p0/Z, [x22]\n"
+ "trn1 z15.h, z15.h, z17.h\n"
+ "add z16.h, p0/M, z16.h, z18.h\n"
+ ".inst 0xc17a1588 // sdot za.s[x8, 0], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa1402aa2 // ld1h { z2.h, z10.h }, pn10.b/Z, [x21]\n"
+ "mov z16.d, z16.d\n"
+ ".inst 0xc1721569 // sdot za.s[x8, 1], { z11.h-z14.h }, z2.h\n"
+ ".inst 0xc17815a8 // sdot za.s[x8, 0], { z13.h-z16.h }, z8.h\n"
"ld1h { z2.h }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0xc1781709 // sdot za.s[x8, 1], { z24.h-z27.h }, z8.h\n"
- ".inst 0xc0060c10 // mova { z16.d-z19.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1791729 // sdot za.s[x8, 1], { z25.h-z28.h }, z9.h\n"
- ".inst 0xa1402a81 // ld1h { z1.h, z9.h }, pn10.b/Z, [x20]\n"
- ".inst 0xc1a4aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z4.s\n"
- ".inst 0xc171170a // sdot za.s[x8, 2], { z24.h-z27.h }, z1.h\n"
- ".inst 0xc1aaab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z10.s\n"
- ".inst 0xc179172a // sdot za.s[x8, 2], { z25.h-z28.h }, z9.h\n"
- ".inst 0xc1b5ccb0 // sclamp { z16.s-z19.s }, z5.s, z21.s\n"
- "st1b { z16.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- ".inst 0xc1721749 // sdot za.s[x8, 1], { z26.h-z29.h }, z2.h\n"
- "ld1h { z3.h }, p2/Z, [x20, #2, MUL VL]\n"
- "st1b { z17.s }, p1, [x14]\n"
- "add x14, x14, x11\n"
- ".inst 0xc173174a // sdot za.s[x8, 2], { z26.h-z29.h }, z3.h\n"
+ ".inst 0xc17a1589 // sdot za.s[x8, 1], { z12.h-z15.h }, z10.h\n"
+ ".inst 0xa0402a80 // ld1h { z0.h-z1.h }, pn10.b/Z, [x20]\n"
+ ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
+ ".inst 0xc170156a // sdot za.s[x8, 2], { z11.h-z14.h }, z0.h\n"
+ ".inst 0xc17215a9 // sdot za.s[x8, 1], { z13.h-z16.h }, z2.h\n"
+ "ld1h { z0.h }, p2/Z, [x20, #2, MUL VL]\n"
+ ".inst 0xc1a3ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z3.s\n"
+ ".inst 0xc171158a // sdot za.s[x8, 2], { z12.h-z15.h }, z1.h\n"
+ ".inst 0xc1a5aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z5.s\n"
+ ".inst 0xc17015aa // sdot za.s[x8, 2], { z13.h-z16.h }, z0.h\n"
"add x8, x8, #0x1\n"
- "st1b { z18.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z19.s }, p1, [x9]\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a7ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z7.s\n"
+ ".inst 0xc1a6ce7c // sclamp { z28.s-z31.s }, z19.s, z6.s\n"
+ "st1b { z28.s }, p1, [x14]\n"
+ "add x14, x14, x11\n"
+ "st1b { z29.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z30.s }, p1, [x9]\n"
"add x9, x9, x27\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
+ "st1b { z31.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"23:" // Tail input: End
- "cbz x16, 25f\n"
+ "cbz x15, 25f\n"
"24:" // Right padding loop
- ".inst 0xc0060c1c // mova { z28.d-z31.d }, za.d[x8, #0]\n"
- ".inst 0xc1a6ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z6.s\n"
+ ".inst 0xc0060c08 // mova { z8.d-z11.d }, za.d[x8, #0]\n"
"add x8, x8, #0x1\n"
- ".inst 0xc1a4aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z4.s\n"
- "subs x16, x16, #0x1\n"
- ".inst 0xc0040d84 // mova za.d[x8, #4], { z12.d-z15.d }\n"
- ".inst 0xc1aaab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
- ".inst 0xc1b5ccbc // sclamp { z28.s-z31.s }, z5.s, z21.s\n"
- "st1b { z28.s }, p1, [x15]\n"
- "add x15, x15, x13\n"
- "st1b { z29.s }, p1, [x14]\n"
+ "subs x15, x15, #0x1\n"
+ ".inst 0xc0040e84 // mova za.d[x8, #4], { z20.d-z23.d }\n"
+ ".inst 0xc1a3ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z3.s\n"
+ ".inst 0xc1a5aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z5.s\n"
+ ".inst 0xc1a7ab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z7.s\n"
+ ".inst 0xc1a6ce68 // sclamp { z8.s-z11.s }, z19.s, z6.s\n"
+ "st1b { z8.s }, p1, [x14]\n"
"add x14, x14, x11\n"
- "st1b { z30.s }, p1, [x10]\n"
- "add x10, x10, x28\n"
- "st1b { z31.s }, p1, [x9]\n"
+ "st1b { z9.s }, p1, [x13]\n"
+ "add x13, x13, x10\n"
+ "st1b { z10.s }, p1, [x9]\n"
"add x9, x9, x27\n"
+ "st1b { z11.s }, p1, [x28]\n"
+ "add x28, x28, x26\n"
"bgt 24b\n"
"25:" // End
"ldr x20, [%x[args], %[offsetof_Args_weights]]\n"
+ "incw x7\n"
+ "whilelt p1.s, x7, x6\n"
"incw x20, ALL, MUL #16\n"
"incw x20, ALL, MUL #9\n"
"str x20, [%x[args], %[offsetof_Args_weights]]\n"
"ldr x21, [%x[args], %[offsetof_Args_ld_in_vl]]\n"
- "incw x6\n"
- "whilelt p1.s, x6, x5\n"
"ldr x20, [%x[args], %[offsetof_Args_inptr]]\n"
"add x20, x20, x21\n"
"str x20, [%x[args], %[offsetof_Args_inptr]]\n"
@@ -1342,9 +1347,11 @@ void sme2_u8s8u8q_planar_5x5_s2_4rows_dot_za_impl(
"b.any 1b\n"
"addvl SP, SP, #15\n"
".inst 0xd503467f // SMSTOP\n"
+ "ldr x20, [SP, #0x0]\n"
+ "mov SP, x20\n"
:
: [args] "r" (&args), [ld_in_col] "r" (ld_in_col), [ld_in_row] "r" (ld_in_row), [offsetof_Args_current_channel] "I" (offsetof(Args, current_channel)), [offsetof_Args_inptr] "I" (offsetof(Args, inptr)), [offsetof_Args_input_cols] "I" (offsetof(Args, input_cols)), [offsetof_Args_ld_in_vl] "I" (offsetof(Args, ld_in_vl)), [offsetof_Args_ld_out_cols] "I" (offsetof(Args, ld_out_cols)), [offsetof_Args_ld_out_vls] "I" (offsetof(Args, ld_out_vls)), [offsetof_Args_n_channels] "I" (offsetof(Args, n_channels)), [offsetof_Args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_Args_output_cols] "I" (offsetof(Args, output_cols)), [offsetof_Args_pad_bottom] "I" (offsetof(Args, pad_bottom)), [offsetof_Args_pad_left] "I" (offsetof(Args, pad_left)), [offsetof_Args_pad_top] "I" (offsetof(Args, pad_top)), [offsetof_Args_weights] "I" (offsetof(Args, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_bias] "I" (offsetof(arm_gemm::Requantize32, bias)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(arm_gemm::Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(arm_gemm::Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(arm_gemm::Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(arm_gemm::Requantize32, per_layer_right_shift)), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index d807856ccb..dec7a99425 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,84 +88,84 @@ void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x10, #0x0\n"
- "mov x14, #0x0\n"
+ "mov x17, #0x0\n"
+ "mov x16, #0x0\n"
"1:" // Tile loop
- "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x2\n"
"mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x10, x23\n" // offset = tile_i * ld_input_row
- "ldr x13, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x10, x22\n" // offset = tile_i * ld_output_row
- "cnth x11\n"
- "madd x21, x14, x13, x21\n" // offset += tile_j * ld_input_col
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "cnth x15\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "madd x20, x14, x12, x20\n" // offset += tile_j * ld_output_col
- "ldr x28, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "ld1h { z27.h }, p3/Z, [x10]\n"
- "add x27, x13, x13\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x9, x9, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "ld1h { z0.h }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x10, #2, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1h { z2.h }, p3/Z, [x10, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x10, #4, MUL VL]\n"
- "add x26, x9, x23, LSL #1\n"
- "ld1h { z4.h }, p3/Z, [x10, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x10, #6, MUL VL]\n"
- "add x25, x26, x23, LSL #1\n"
- "add x24, x27, x13\n"
- "ld1h { z6.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "add x28, x28, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1rh { z26.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x11, %x[n_channels]\n"
- "add x23, x25, x23, LSL #1\n"
- "ld1rh { z25.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "add x22, x28, x22, LSL #1\n"
- "mov x21, #0x0\n"
- "ld1h { z8.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x26, x13, LSL #1]\n"
- "sub x20, XZR, x11\n"
- "ld1h { z10.h }, p2/Z, [x9]\n"
- "ld1h { z11.h }, p2/Z, [x9, x24, LSL #1]\n"
- "addvl x10, x10, #-6\n"
- "ld1h { z12.h }, p2/Z, [x26, x27, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "mov x12, #0x0\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x22, x17, x24\n" // offset = tile_i * ld_input_row
+ "mul x21, x17, x23\n" // offset = tile_i * ld_output_row
+ "ldr x9, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "cmp x15, %x[n_channels]\n"
+ "ld1rh { z27.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x28, x14, x14\n"
+ "ld1rh { z26.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x20, XZR, x15\n"
+ "madd x22, x16, x14, x22\n" // offset += tile_j * ld_input_col
+ "ld1h { z25.h }, p3/Z, [x11]\n"
+ "ld1h { z0.h }, p3/Z, [x11, #1, MUL VL]\n"
+ "add x27, x28, x14\n"
+ "madd x21, x16, x13, x21\n" // offset += tile_j * ld_output_col
+ "ld1h { z1.h }, p3/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x11, #3, MUL VL]\n"
+ "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
+ "ld1h { z3.h }, p3/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x11, #5, MUL VL]\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "ld1h { z5.h }, p3/Z, [x11, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "add x10, x10, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x26, x10, x24, LSL #1\n"
+ "ld1h { z10.h }, p2/Z, [x10]\n"
+ "ld1h { z11.h }, p2/Z, [x10, x27, LSL #1]\n"
+ "add x25, x26, x24, LSL #1\n"
+ "add x9, x9, x21, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x24, x25, x24, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x26, x28, LSL #1]\n"
+ "add x23, x9, x23, LSL #1\n"
+ "ld1h { z7.h }, p3/Z, [x11, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x11, #-7, MUL VL]\n"
+ "addvl x11, x11, #-6\n"
+ "ld1h { z13.h }, p2/Z, [x25, x14, LSL #1]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z24, z27\n fmla z24.h, p3/M, z4.h, z9.h\n"
- "movprfx z23, z27\n fmla z23.h, p3/M, z3.h, z9.h\n"
- "whilelt p1.h, x11, %x[n_channels]\n"
- "inch x21\n"
- "movprfx z22, z27\n fmla z22.h, p3/M, z1.h, z9.h\n"
- "movprfx z21, z27\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "ld1h { z18.h }, p2/Z, [x23]\n"
- "inch x11\n"
+ "movprfx z24, z25\n fmla z24.h, p3/M, z4.h, z9.h\n"
+ "movprfx z23, z25\n fmla z23.h, p3/M, z3.h, z9.h\n"
+ "whilelt p1.h, x15, %x[n_channels]\n"
+ "inch x12\n"
+ "movprfx z22, z25\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "movprfx z21, z25\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z18.h }, p2/Z, [x24]\n"
+ "inch x15\n"
+ "mov p0.b, p2.b\n"
+ "ld1h { z25.h }, p3/Z, [x11]\n"
+ "inch x20\n"
"fmla z24.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z20.h }, p2/Z, [x25, x28, LSL #1]\n"
"fmla z23.h, p3/M, z2.h, z11.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x24, LSL #1]\n"
- "ld1h { z20.h }, p2/Z, [x25, x27, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x24, x27, LSL #1]\n"
"fmla z22.h, p3/M, z2.h, z12.h\n"
"fmla z21.h, p3/M, z1.h, z12.h\n"
- "mov p0.b, p2.b\n"
- "ld1h { z27.h }, p3/Z, [x10]\n"
"fmla z24.h, p3/M, z5.h, z12.h\n"
"fmla z23.h, p3/M, z4.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x9, x13, LSL #1]\n"
- "inch x20\n"
+ "ld1h { z16.h }, p2/Z, [x10, x14, LSL #1]\n"
"fmla z22.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x10, x28, LSL #1]\n"
+ "addvl x10, x10, #1\n"
"fmla z21.h, p3/M, z3.h, z13.h\n"
- "ld1h { z18.h }, p2/Z, [x9, x27, LSL #1]\n"
- "addvl x9, x9, #1\n"
"fmla z24.h, p3/M, z7.h, z13.h\n"
"fmla z23.h, p3/M, z6.h, z13.h\n"
"fmla z22.h, p3/M, z4.h, z13.h\n"
@@ -173,102 +173,102 @@ void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"ld1h { z17.h }, p2/Z, [x26]\n"
"fmla z24.h, p3/M, z1.h, z16.h\n"
"fmla z23.h, p3/M, z0.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x26, x24, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x26, x27, LSL #1]\n"
"addvl x26, x26, #1\n"
"fmla z22.h, p3/M, z5.h, z20.h\n"
"fmla z21.h, p3/M, z4.h, z20.h\n"
- "ld1h { z4.h }, p3/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x11, #5, MUL VL]\n"
"fmla z24.h, p3/M, z2.h, z18.h\n"
"fmla z23.h, p3/M, z1.h, z18.h\n"
"ld1h { z19.h }, p2/Z, [x25]\n"
- "ld1h { z1.h }, p3/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x11, #2, MUL VL]\n"
"fmla z22.h, p3/M, z0.h, z17.h\n"
+ "ld1h { z0.h }, p3/Z, [x11, #1, MUL VL]\n"
"fmla z21.h, p3/M, z2.h, z16.h\n"
- "ld1h { z0.h }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x11, #3, MUL VL]\n"
"fmla z24.h, p3/M, z8.h, z20.h\n"
"fmla z23.h, p3/M, z7.h, z20.h\n"
- "ld1h { z18.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x25, x27, LSL #1]\n"
"addvl x25, x25, #1\n"
"fmla z22.h, p3/M, z3.h, z19.h\n"
"fmla z21.h, p3/M, z5.h, z18.h\n"
- "ld1h { z13.h }, p1/Z, [x25, x13, LSL #1]\n"
"fmla z24.h, p3/M, z3.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z3.h }, p3/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z13.h }, p1/Z, [x25, x14, LSL #1]\n"
"fmla z23.h, p3/M, z5.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x24, x28, LSL #1]\n"
+ "whilelt p2.h, x12, %x[n_channels]\n"
+ "ld1h { z5.h }, p3/Z, [x11, #6, MUL VL]\n"
+ "cmp x15, %x[n_channels]\n"
+ "addvl x24, x24, #1\n"
"fmla z22.h, p3/M, z7.h, z17.h\n"
"fmla z21.h, p3/M, z6.h, z17.h\n"
- "ld1h { z3.h }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z11.h }, p1/Z, [x10, x27, LSL #1]\n"
"fmla z24.h, p3/M, z6.h, z19.h\n"
+ "ld1h { z6.h }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "ld1h { z9.h }, p1/Z, [x26, x14, LSL #1]\n"
"fmla z23.h, p3/M, z8.h, z18.h\n"
- "fmax z24.h, p3/M, z24.h, z26.h\n"
- "fmax z23.h, p3/M, z23.h, z26.h\n"
+ "ld1h { z10.h }, p1/Z, [x10]\n"
"fmla z22.h, p3/M, z8.h, z16.h\n"
"fmla z21.h, p3/M, z7.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z26.h\n"
- "fmax z21.h, p3/M, z21.h, z26.h\n"
- "ld1h { z6.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "ld1h { z9.h }, p1/Z, [x26, x13, LSL #1]\n"
- "cmp x11, %x[n_channels]\n"
- "fmin z24.h, p3/M, z24.h, z25.h\n"
- "ld1h { z10.h }, p1/Z, [x9]\n"
- "ld1h { z11.h }, p1/Z, [x9, x24, LSL #1]\n"
- "fmin z23.h, p3/M, z23.h, z25.h\n"
- "fmin z22.h, p3/M, z22.h, z25.h\n"
- "ld1h { z12.h }, p1/Z, [x26, x27, LSL #1]\n"
- "st1h { z24.h }, p0, [x28]\n"
- "fmin z21.h, p3/M, z21.h, z25.h\n"
+ "ld1h { z12.h }, p1/Z, [x26, x28, LSL #1]\n"
+ "fmax z24.h, p3/M, z24.h, z27.h\n"
+ "ld1h { z7.h }, p3/Z, [x11, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x11, #-7, MUL VL]\n"
+ "addvl x11, x11, #-6\n"
+ "fmax z23.h, p3/M, z23.h, z27.h\n"
+ "fmin z24.h, p3/M, z24.h, z26.h\n"
+ "fmax z22.h, p3/M, z22.h, z27.h\n"
+ "fmax z21.h, p3/M, z21.h, z27.h\n"
+ "fmin z23.h, p3/M, z23.h, z26.h\n"
+ "fmin z22.h, p3/M, z22.h, z26.h\n"
+ "st1h { z24.h }, p0, [x9]\n"
+ "fmin z21.h, p3/M, z21.h, z26.h\n"
+ "st1h { z23.h }, p0, [x9, x13, LSL #1]\n"
+ "addvl x9, x9, #1\n"
+ "st1h { z22.h }, p0, [x23]\n"
+ "st1h { z21.h }, p0, [x23, x13, LSL #1]\n"
"addvl x23, x23, #1\n"
- "st1h { z23.h }, p0, [x28, x12, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "st1h { z22.h }, p0, [x22]\n"
- "addvl x28, x28, #1\n"
- "ld1h { z8.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "addvl x10, x10, #-6\n"
- "st1h { z21.h }, p0, [x22, x12, LSL #1]\n"
- "addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z24, z27\n fmla z24.h, p3/M, z4.h, z9.h\n"
- "movprfx z23, z27\n fmla z23.h, p3/M, z3.h, z9.h\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z22, z27\n fmla z22.h, p3/M, z1.h, z9.h\n"
- "movprfx z21, z27\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "ld1h { z18.h }, p2/Z, [x23]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "movprfx z24, z25\n fmla z24.h, p3/M, z4.h, z9.h\n"
+ "movprfx z23, z25\n fmla z23.h, p3/M, z3.h, z9.h\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z22, z25\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "movprfx z21, z25\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z18.h }, p2/Z, [x24]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "mov p0.b, p2.b\n"
+ "add x16, x16, #0x1\n"
+ "add x20, x17, #0x1\n"
"fmla z24.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z20.h }, p2/Z, [x25, x28, LSL #1]\n"
"fmla z23.h, p3/M, z2.h, z11.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x24, LSL #1]\n"
- "ld1h { z20.h }, p2/Z, [x25, x27, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x24, x27, LSL #1]\n"
+ "cmp x16, x22\n"
"fmla z22.h, p3/M, z2.h, z12.h\n"
"fmla z21.h, p3/M, z1.h, z12.h\n"
- "add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "csel x17, x17, x20, LT\n"
+ "csel x16, x16, XZR, LT\n"
"fmla z24.h, p3/M, z5.h, z12.h\n"
"fmla z23.h, p3/M, z4.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x9, x13, LSL #1]\n"
- "add x21, x10, #0x1\n"
+ "ld1h { z16.h }, p2/Z, [x10, x14, LSL #1]\n"
"fmla z22.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x10, x28, LSL #1]\n"
"fmla z21.h, p3/M, z3.h, z13.h\n"
- "ld1h { z18.h }, p2/Z, [x9, x27, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "cmp x17, x21\n"
"fmla z24.h, p3/M, z7.h, z13.h\n"
"fmla z23.h, p3/M, z6.h, z13.h\n"
- "csel x10, x10, x21, LT\n"
- "mov p0.b, p2.b\n"
"fmla z22.h, p3/M, z4.h, z13.h\n"
"fmla z21.h, p3/M, z8.h, z17.h\n"
"ld1h { z17.h }, p2/Z, [x26]\n"
- "csel x14, x14, XZR, LT\n"
"fmla z24.h, p3/M, z1.h, z16.h\n"
"fmla z23.h, p3/M, z0.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x26, x24, LSL #1]\n"
- "cmp x10, x20\n"
+ "ld1h { z16.h }, p2/Z, [x26, x27, LSL #1]\n"
"fmla z22.h, p3/M, z5.h, z20.h\n"
"fmla z21.h, p3/M, z4.h, z20.h\n"
"fmla z24.h, p3/M, z2.h, z18.h\n"
@@ -278,35 +278,35 @@ void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"fmla z21.h, p3/M, z2.h, z16.h\n"
"fmla z24.h, p3/M, z8.h, z20.h\n"
"fmla z23.h, p3/M, z7.h, z20.h\n"
- "ld1h { z18.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x25, x27, LSL #1]\n"
"fmla z22.h, p3/M, z3.h, z19.h\n"
"fmla z21.h, p3/M, z5.h, z18.h\n"
"fmla z24.h, p3/M, z3.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x24, x14, LSL #1]\n"
"fmla z23.h, p3/M, z5.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x24, x28, LSL #1]\n"
"fmla z22.h, p3/M, z7.h, z17.h\n"
"fmla z21.h, p3/M, z6.h, z17.h\n"
"fmla z24.h, p3/M, z6.h, z19.h\n"
"fmla z23.h, p3/M, z8.h, z18.h\n"
- "fmax z24.h, p3/M, z24.h, z26.h\n"
- "fmax z23.h, p3/M, z23.h, z26.h\n"
"fmla z22.h, p3/M, z8.h, z16.h\n"
"fmla z21.h, p3/M, z7.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z26.h\n"
- "fmax z21.h, p3/M, z21.h, z26.h\n"
- "fmin z24.h, p3/M, z24.h, z25.h\n"
- "fmin z23.h, p3/M, z23.h, z25.h\n"
- "st1h { z24.h }, p0, [x28]\n"
- "fmin z22.h, p3/M, z22.h, z25.h\n"
- "fmin z21.h, p3/M, z21.h, z25.h\n"
- "st1h { z23.h }, p0, [x28, x12, LSL #1]\n"
- "st1h { z22.h }, p0, [x22]\n"
- "st1h { z21.h }, p0, [x22, x12, LSL #1]\n"
+ "fmax z24.h, p3/M, z24.h, z27.h\n"
+ "fmax z23.h, p3/M, z23.h, z27.h\n"
+ "fmin z24.h, p3/M, z24.h, z26.h\n"
+ "fmin z23.h, p3/M, z23.h, z26.h\n"
+ "fmax z22.h, p3/M, z22.h, z27.h\n"
+ "fmax z21.h, p3/M, z21.h, z27.h\n"
+ "st1h { z24.h }, p0, [x9]\n"
+ "st1h { z23.h }, p0, [x9, x13, LSL #1]\n"
+ "fmin z22.h, p3/M, z22.h, z26.h\n"
+ "fmin z21.h, p3/M, z21.h, z26.h\n"
+ "st1h { z22.h }, p0, [x23]\n"
+ "st1h { z21.h }, p0, [x23, x13, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 90982b6990..ff85bc51c7 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -83,210 +83,210 @@ void sve_fp16_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
"add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
"cnth x14\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "mov x13, #0x0\n"
+ "ldr x24, [x15, #0x20]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z20.h }, p3/Z, [x16]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
+ "ldp x10, x9, [x20, #0x10]\n"
+ "ld1h { z27.h }, p3/Z, [x16]\n"
"ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
"ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
"ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
+ "cmp x14, %x[n_channels]\n"
"sub x28, XZR, x14\n"
"ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
"ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
"ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
"ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
"addvl x16, x16, #16\n"
- "ldp x24, x23, [x15, #0x0]\n"
- "ldp x22, x21, [x15, #0x10]\n"
- "ldr x20, [x15, #0x20]\n"
+ "ldp x23, x22, [x15, #0x0]\n"
+ "ldp x21, x20, [x15, #0x10]\n"
"ld1rh { z26.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"ld1rh { z25.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
"ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x24, x9, LSL #1]\n"
"addvl x16, x16, #-6\n"
- "ld1h { z10.h }, p2/Z, [x23, x9, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z9.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x24, x13, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z24, z20\n fmla z24.h, p3/M, z4.h, z9.h\n"
- "movprfx z23, z20\n fmla z23.h, p3/M, z3.h, z9.h\n"
+ "movprfx z24, z27\n fmla z24.h, p3/M, z4.h, z9.h\n"
+ "movprfx z23, z27\n fmla z23.h, p3/M, z3.h, z9.h\n"
"ldr x21, [x15, #0x28]\n"
- "ldr x20, [x15, #0x30]\n"
- "movprfx z22, z20\n fmla z22.h, p3/M, z1.h, z9.h\n"
- "movprfx z21, z20\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "ld1h { z18.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ldr x22, [x15, #0x38]\n"
+ "ldr x25, [x15, #0x30]\n"
+ "movprfx z22, z27\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "movprfx z21, z27\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ldr x24, [x15, #0x38]\n"
+ "ldr x20, [x15, #0x48]\n"
+ "ldr x23, [x15, #0x40]\n"
+ "ldr x22, [x15, #0x50]\n"
+ "whilelt p1.h, x14, %x[n_channels]\n"
+ "inch x28\n"
+ "ld1h { z18.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ldr x21, [x15, #0x58]\n"
+ "mov p0.b, p2.b\n"
"fmla z24.h, p3/M, z0.h, z10.h\n"
"fmla z23.h, p3/M, z2.h, z11.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0x48]\n"
+ "ld1h { z17.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "ld1h { z20.h }, p2/Z, [x20, x13, LSL #1]\n"
"fmla z22.h, p3/M, z2.h, z12.h\n"
"fmla z21.h, p3/M, z1.h, z12.h\n"
- "ldr x20, [x15, #0x40]\n"
- "ld1h { z20.h }, p2/Z, [x21, x9, LSL #1]\n"
+ "ldr x20, [x15, #0x60]\n"
+ "ldr x27, [x15, #0x68]\n"
+ "ldr x26, [x15, #0x70]\n"
+ "ld1h { z27.h }, p3/Z, [x16]\n"
"fmla z24.h, p3/M, z5.h, z12.h\n"
"fmla z23.h, p3/M, z4.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x22, [x15, #0x50]\n"
+ "ld1h { z16.h }, p2/Z, [x24, x13, LSL #1]\n"
+ "ldr x25, [x15, #0x78]\n"
"fmla z22.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x23, x13, LSL #1]\n"
+ "ldp x24, x23, [x15, #0x0]\n"
"fmla z21.h, p3/M, z3.h, z13.h\n"
- "ld1h { z18.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0x58]\n"
"fmla z24.h, p3/M, z7.h, z13.h\n"
"fmla z23.h, p3/M, z6.h, z13.h\n"
- "ldr x20, [x15, #0x60]\n"
- "ldr x27, [x15, #0x68]\n"
"fmla z22.h, p3/M, z4.h, z13.h\n"
"fmla z21.h, p3/M, z8.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x26, [x15, #0x70]\n"
+ "ld1h { z17.h }, p2/Z, [x22, x13, LSL #1]\n"
"fmla z24.h, p3/M, z1.h, z16.h\n"
"fmla z23.h, p3/M, z0.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ldr x25, [x15, #0x78]\n"
+ "ld1h { z16.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ldp x22, x21, [x15, #0x10]\n"
"fmla z22.h, p3/M, z5.h, z20.h\n"
"fmla z21.h, p3/M, z4.h, z20.h\n"
- "whilelt p1.h, x14, %x[n_channels]\n"
- "ldp x24, x23, [x15, #0x0]\n"
+ "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
"fmla z24.h, p3/M, z2.h, z18.h\n"
"fmla z23.h, p3/M, z1.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldp x22, x21, [x15, #0x10]\n"
+ "ld1h { z19.h }, p2/Z, [x20, x13, LSL #1]\n"
+ "ldr x20, [x15, #0x20]\n"
+ "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
"fmla z22.h, p3/M, z0.h, z17.h\n"
+ "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
"fmla z21.h, p3/M, z2.h, z16.h\n"
- "ldr x20, [x15, #0x20]\n"
- "ld1h { z13.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
"fmla z24.h, p3/M, z8.h, z20.h\n"
+ "ld1h { z13.h }, p1/Z, [x20, x14, LSL #1]\n"
"fmla z23.h, p3/M, z7.h, z20.h\n"
- "ld1h { z18.h }, p2/Z, [x27, x9, LSL #1]\n"
- "inch x28\n"
+ "ld1h { z18.h }, p2/Z, [x27, x13, LSL #1]\n"
"fmla z22.h, p3/M, z3.h, z19.h\n"
"fmla z21.h, p3/M, z5.h, z18.h\n"
- "mov p0.b, p2.b\n"
- "ld1h { z20.h }, p3/Z, [x16]\n"
"fmla z24.h, p3/M, z3.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x26, x9, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x26, x13, LSL #1]\n"
+ "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
"fmla z23.h, p3/M, z5.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x9, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x25, x13, LSL #1]\n"
+ "inch x13\n"
+ "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
"fmla z22.h, p3/M, z7.h, z17.h\n"
"fmla z21.h, p3/M, z6.h, z17.h\n"
- "inch x9\n"
"ld1h { z11.h }, p1/Z, [x22, x14, LSL #1]\n"
"fmla z24.h, p3/M, z6.h, z19.h\n"
- "fmla z23.h, p3/M, z8.h, z18.h\n"
"ld1h { z9.h }, p1/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
+ "addvl x16, x16, #16\n"
+ "fmla z23.h, p3/M, z8.h, z18.h\n"
"ld1h { z10.h }, p1/Z, [x23, x14, LSL #1]\n"
+ "whilelt p2.h, x13, %x[n_channels]\n"
"fmla z22.h, p3/M, z8.h, z16.h\n"
"fmla z21.h, p3/M, z7.h, z16.h\n"
"ld1h { z12.h }, p1/Z, [x21, x14, LSL #1]\n"
"inch x14\n"
"fmax z24.h, p3/M, z24.h, z26.h\n"
+ "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
+ "addvl x16, x16, #-6\n"
"fmax z23.h, p3/M, z23.h, z26.h\n"
- "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
"fmax z22.h, p3/M, z22.h, z26.h\n"
"fmax z21.h, p3/M, z21.h, z26.h\n"
- "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
- "whilelt p2.h, x9, %x[n_channels]\n"
"cmp x14, %x[n_channels]\n"
- "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
"fmin z24.h, p3/M, z24.h, z25.h\n"
- "st1h { z24.h }, p0, [x13, x28, LSL #1]\n"
"fmin z23.h, p3/M, z23.h, z25.h\n"
"fmin z22.h, p3/M, z22.h, z25.h\n"
- "st1h { z23.h }, p0, [x12, x28, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
"fmin z21.h, p3/M, z21.h, z25.h\n"
- "st1h { z22.h }, p0, [x11, x28, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "st1h { z21.h }, p0, [x10, x28, LSL #1]\n"
+ "st1h { z24.h }, p0, [x12, x28, LSL #1]\n"
+ "st1h { z23.h }, p0, [x11, x28, LSL #1]\n"
+ "st1h { z22.h }, p0, [x10, x28, LSL #1]\n"
+ "st1h { z21.h }, p0, [x9, x28, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z24, z20\n fmla z24.h, p3/M, z4.h, z9.h\n"
- "movprfx z23, z20\n fmla z23.h, p3/M, z3.h, z9.h\n"
- "ldr x21, [x15, #0x28]\n"
- "ldr x20, [x15, #0x30]\n"
- "movprfx z22, z20\n fmla z22.h, p3/M, z1.h, z9.h\n"
- "movprfx z21, z20\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "ld1h { z18.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ldr x22, [x15, #0x38]\n"
+ "movprfx z24, z27\n fmla z24.h, p3/M, z4.h, z9.h\n"
+ "movprfx z23, z27\n fmla z23.h, p3/M, z3.h, z9.h\n"
+ "ldr x22, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x30]\n"
+ "movprfx z22, z27\n fmla z22.h, p3/M, z1.h, z9.h\n"
+ "movprfx z21, z27\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "ldr x27, [x15, #0x38]\n"
+ "ldr x20, [x15, #0x48]\n"
+ "ldr x26, [x15, #0x40]\n"
+ "ldr x25, [x15, #0x50]\n"
+ "inch x28\n"
+ "mov p0.b, p2.b\n"
+ "ld1h { z18.h }, p2/Z, [x22, x13, LSL #1]\n"
+ "ldr x24, [x15, #0x58]\n"
"fmla z24.h, p3/M, z0.h, z10.h\n"
"fmla z23.h, p3/M, z2.h, z11.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0x48]\n"
+ "ld1h { z17.h }, p2/Z, [x21, x13, LSL #1]\n"
+ "ld1h { z20.h }, p2/Z, [x20, x13, LSL #1]\n"
"fmla z22.h, p3/M, z2.h, z12.h\n"
"fmla z21.h, p3/M, z1.h, z12.h\n"
- "ldr x20, [x15, #0x40]\n"
- "ld1h { z20.h }, p2/Z, [x21, x9, LSL #1]\n"
+ "ldr x23, [x15, #0x60]\n"
+ "ldr x22, [x15, #0x68]\n"
+ "ldr x21, [x15, #0x70]\n"
"fmla z24.h, p3/M, z5.h, z12.h\n"
"fmla z23.h, p3/M, z4.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x21, [x15, #0x50]\n"
+ "ld1h { z16.h }, p2/Z, [x27, x13, LSL #1]\n"
+ "ldr x20, [x15, #0x78]\n"
"fmla z22.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x26, x13, LSL #1]\n"
"fmla z21.h, p3/M, z3.h, z13.h\n"
- "ld1h { z18.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x20, [x15, #0x58]\n"
"fmla z24.h, p3/M, z7.h, z13.h\n"
"fmla z23.h, p3/M, z6.h, z13.h\n"
- "ldr x23, [x15, #0x60]\n"
- "ldr x22, [x15, #0x68]\n"
"fmla z22.h, p3/M, z4.h, z13.h\n"
"fmla z21.h, p3/M, z8.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ldr x21, [x15, #0x70]\n"
+ "ld1h { z17.h }, p2/Z, [x25, x13, LSL #1]\n"
"fmla z24.h, p3/M, z1.h, z16.h\n"
"fmla z23.h, p3/M, z0.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ld1h { z16.h }, p2/Z, [x24, x13, LSL #1]\n"
"fmla z22.h, p3/M, z5.h, z20.h\n"
"fmla z21.h, p3/M, z4.h, z20.h\n"
- "inch x28\n"
- "mov p0.b, p2.b\n"
"fmla z24.h, p3/M, z2.h, z18.h\n"
"fmla z23.h, p3/M, z1.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z19.h }, p2/Z, [x23, x13, LSL #1]\n"
"fmla z22.h, p3/M, z0.h, z17.h\n"
"fmla z21.h, p3/M, z2.h, z16.h\n"
"fmla z24.h, p3/M, z8.h, z20.h\n"
"fmla z23.h, p3/M, z7.h, z20.h\n"
- "ld1h { z18.h }, p2/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x22, x13, LSL #1]\n"
"fmla z22.h, p3/M, z3.h, z19.h\n"
"fmla z21.h, p3/M, z5.h, z18.h\n"
"fmla z24.h, p3/M, z3.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x21, x13, LSL #1]\n"
"fmla z23.h, p3/M, z5.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x20, x13, LSL #1]\n"
"fmla z22.h, p3/M, z7.h, z17.h\n"
"fmla z21.h, p3/M, z6.h, z17.h\n"
"fmla z24.h, p3/M, z6.h, z19.h\n"
"fmla z23.h, p3/M, z8.h, z18.h\n"
- "fmax z24.h, p3/M, z24.h, z26.h\n"
- "fmax z23.h, p3/M, z23.h, z26.h\n"
"fmla z22.h, p3/M, z8.h, z16.h\n"
"fmla z21.h, p3/M, z7.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z26.h\n"
- "fmax z21.h, p3/M, z21.h, z26.h\n"
+ "fmax z24.h, p3/M, z24.h, z26.h\n"
+ "fmax z23.h, p3/M, z23.h, z26.h\n"
"fmin z24.h, p3/M, z24.h, z25.h\n"
"fmin z23.h, p3/M, z23.h, z25.h\n"
- "st1h { z24.h }, p0, [x13, x28, LSL #1]\n"
+ "fmax z22.h, p3/M, z22.h, z26.h\n"
+ "fmax z21.h, p3/M, z21.h, z26.h\n"
+ "st1h { z24.h }, p0, [x12, x28, LSL #1]\n"
+ "st1h { z23.h }, p0, [x11, x28, LSL #1]\n"
"fmin z22.h, p3/M, z22.h, z25.h\n"
"fmin z21.h, p3/M, z21.h, z25.h\n"
- "st1h { z23.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z22.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z21.h }, p0, [x10, x28, LSL #1]\n"
+ "st1h { z22.h }, p0, [x10, x28, LSL #1]\n"
+ "st1h { z21.h }, p0, [x9, x28, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index a22ab39d6f..0b903917bc 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,369 +88,369 @@ void sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x13, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x5, #0x0\n"
+ "mov x6, #0x0\n"
"1:" // Tile loop
- "str x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x3\n"
"mov x25, #0x3\n"
- "mov x24, #0x3\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x13, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x15\n"
- "mul x20, x13, x21\n" // offset = tile_i * ld_output_row
- "ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x12, x17, x17\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x10, x14, x23, LSL #1\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x10, x23, LSL #1\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "cnth x8\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z14.h }, p3/Z, [x13]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1h { z0.h }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x13, #2, MUL VL]\n"
- "add x28, x9, x23, LSL #1\n"
- "ld1h { z2.h }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x13, #4, MUL VL]\n"
- "add x27, x12, x17\n"
- "add x11, x11, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z4.h }, p3/Z, [x13, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x13, #6, MUL VL]\n"
- "add x26, x28, x23, LSL #1\n"
- "add x25, x27, x17\n"
- "ld1h { z6.h }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "add x24, x11, x21, LSL #1\n"
- "ld1rh { z31.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x15, %x[n_channels]\n"
- "add x23, x24, x21, LSL #1\n"
- "ld1rh { z30.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x13, #-8, MUL VL]\n"
- "add x22, x16, x16\n"
- "mov x21, #0x0\n"
- "ld1h { z8.h }, p3/Z, [x13, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x9, x12, LSL #1]\n"
- "sub x20, XZR, x15\n"
- "ld1h { z10.h }, p2/Z, [x14]\n"
- "ld1h { z11.h }, p2/Z, [x14, x25, LSL #1]\n"
- "addvl x13, x13, #-6\n"
- "ld1h { z12.h }, p2/Z, [x26]\n"
- "ld1h { z13.h }, p2/Z, [x10, x12, LSL #1]\n"
+ "mov x16, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x22, x5, x24\n" // offset = tile_i * ld_input_row
+ "ldr x13, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x12, x7, x7\n"
+ "cmp x8, %x[n_channels]\n"
+ "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mul x21, x5, x23\n" // offset = tile_i * ld_output_row
+ "add x11, x12, x7\n"
+ "add x10, x17, x17\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "madd x22, x6, x7, x22\n" // offset += tile_j * ld_input_col
+ "ld1h { z31.h }, p3/Z, [x14]\n"
+ "ld1h { z0.h }, p3/Z, [x14, #1, MUL VL]\n"
+ "add x9, x11, x7\n"
+ "ld1h { z1.h }, p3/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x14, #3, MUL VL]\n"
+ "sub x20, XZR, x8\n"
+ "madd x21, x6, x17, x21\n" // offset += tile_j * ld_output_col
+ "ld1h { z3.h }, p3/Z, [x14, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x14, #5, MUL VL]\n"
+ "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
+ "ld1h { z5.h }, p3/Z, [x14, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x14, #7, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "add x15, x15, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x28, x15, x24, LSL #1\n"
+ "add x27, x28, x24, LSL #1\n"
+ "ld1h { z10.h }, p2/Z, [x15]\n"
+ "ld1h { z11.h }, p2/Z, [x15, x9, LSL #1]\n"
+ "add x26, x27, x24, LSL #1\n"
+ "add x13, x13, x21, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x25, x26, x24, LSL #1\n"
+ "ld1h { z7.h }, p3/Z, [x14, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x14, #-7, MUL VL]\n"
+ "add x24, x13, x23, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x27, x12, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x25]\n"
+ "addvl x14, x14, #-6\n"
+ "add x23, x24, x23, LSL #1\n"
+ "ld1h { z13.h }, p2/Z, [x28, x12, LSL #1]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z29, z14\n fmla z29.h, p3/M, z7.h, z9.h\n"
- "movprfx z28, z14\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "whilelt p1.h, x15, %x[n_channels]\n"
- "inch x21\n"
- "movprfx z27, z14\n fmla z27.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z4.h, z13.h\n"
- "inch x15\n"
+ "movprfx z30, z31\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "movprfx z29, z31\n fmla z29.h, p3/M, z8.h, z9.h\n"
+ "whilelt p1.h, x8, %x[n_channels]\n"
+ "inch x16\n"
+ "movprfx z28, z31\n fmla z28.h, p3/M, z6.h, z9.h\n"
+ "movprfx z27, z31\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "inch x8\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z14\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "movprfx z25, z14\n fmla z25.h, p3/M, z4.h, z9.h\n"
+ "movprfx z26, z31\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "movprfx z25, z31\n fmla z25.h, p3/M, z3.h, z9.h\n"
"inch x20\n"
- "movprfx z24, z14\n fmla z24.h, p3/M, z3.h, z9.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z23.h }, p2/Z, [x9, x27, LSL #1]\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "ld1h { z18.h }, p2/Z, [x9, x17, LSL #1]\n"
- "movprfx z22, z14\n fmla z22.h, p3/M, z2.h, z9.h\n"
- "fmla z29.h, p3/M, z6.h, z18.h\n"
- "movprfx z21, z14\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z27.h, p3/M, z3.h, z13.h\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
- "fmla z25.h, p3/M, z1.h, z13.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z22.h, p3/M, z6.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x26, x25, LSL #1]\n"
- "movprfx z20, z14\n fmla z20.h, p3/M, z1.h, z9.h\n"
- "fmla z29.h, p3/M, z0.h, z17.h\n"
- "ld1h { z14.h }, p3/Z, [x13]\n"
- "fmla z21.h, p3/M, z8.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x14, x27, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z18.h\n"
- "fmla z20.h, p3/M, z0.h, z18.h\n"
- "fmla z26.h, p3/M, z4.h, z18.h\n"
- "fmla z25.h, p3/M, z3.h, z18.h\n"
- "fmla z22.h, p3/M, z1.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x10]\n"
- "fmla z29.h, p3/M, z2.h, z16.h\n"
- "fmla z27.h, p3/M, z1.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x28]\n"
- "fmla z24.h, p3/M, z4.h, z23.h\n"
- "fmla z28.h, p3/M, z1.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x10, x25, LSL #1]\n"
- "fmla z20.h, p3/M, z2.h, z23.h\n"
- "fmla z21.h, p3/M, z1.h, z23.h\n"
- "fmla z29.h, p3/M, z8.h, z23.h\n"
- "fmla z27.h, p3/M, z7.h, z23.h\n"
- "fmla z25.h, p3/M, z5.h, z23.h\n"
- "fmla z26.h, p3/M, z0.h, z19.h\n"
- "ld1h { z17.h }, p2/Z, [x28, x12, LSL #1]\n"
- "fmla z22.h, p3/M, z3.h, z18.h\n"
- "fmla z24.h, p3/M, z2.h, z16.h\n"
- "fmla z20.h, p3/M, z4.h, z17.h\n"
- "fmla z21.h, p3/M, z3.h, z17.h\n"
- "fmla z28.h, p3/M, z3.h, z19.h\n"
- "fmla z27.h, p3/M, z5.h, z16.h\n"
- "ld1h { z19.h }, p2/Z, [x28, x25, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x26, x17, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z18.h\n"
- "fmla z25.h, p3/M, z7.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x10, x17, LSL #1]\n"
- "fmla z22.h, p3/M, z5.h, z17.h\n"
- "fmla z24.h, p3/M, z6.h, z17.h\n"
- "fmla z21.h, p3/M, z5.h, z19.h\n"
- "fmla z20.h, p3/M, z6.h, z16.h\n"
- "fmla z26.h, p3/M, z8.h, z17.h\n"
- "fmla z22.h, p3/M, z7.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x26, x27, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z18.h\n"
- "fmla z25.h, p3/M, z0.h, z18.h\n"
- "fmla z24.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x10, x27, LSL #1]\n"
- "fmla z20.h, p3/M, z8.h, z17.h\n"
- "addvl x10, x10, #1\n"
- "fmla z21.h, p3/M, z7.h, z17.h\n"
- "fmla z28.h, p3/M, z4.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z18.h\n"
- "fmla z29.h, p3/M, z5.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x28, x17, LSL #1]\n"
- "addvl x28, x28, #1\n"
- "fmla z27.h, p3/M, z4.h, z16.h\n"
+ "movprfx z24, z31\n fmla z24.h, p3/M, z2.h, z9.h\n"
+ "movprfx z23, z31\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "fmla z30.h, p3/M, z4.h, z13.h\n"
+ "fmla z29.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z22.h }, p2/Z, [x27, x11, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x7, LSL #1]\n"
+ "fmla z27.h, p3/M, z2.h, z13.h\n"
+ "fmla z26.h, p3/M, z1.h, z13.h\n"
+ "fmla z25.h, p3/M, z0.h, z13.h\n"
+ "fmla z24.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x9, LSL #1]\n"
+ "movprfx z21, z31\n fmla z21.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z31.h }, p3/Z, [x14]\n"
+ "fmla z30.h, p3/M, z6.h, z17.h\n"
+ "fmla z29.h, p3/M, z5.h, z13.h\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, x7, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z17.h\n"
+ "fmla z23.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, x11, LSL #1]\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
+ "fmla z21.h, p3/M, z0.h, z17.h\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "fmla z30.h, p3/M, z0.h, z18.h\n"
+ "fmla z29.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x28]\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "fmla z25.h, p3/M, z4.h, z22.h\n"
+ "fmla z23.h, p3/M, z1.h, z22.h\n"
+ "fmla z26.h, p3/M, z5.h, z22.h\n"
+ "fmla z21.h, p3/M, z2.h, z22.h\n"
+ "fmla z27.h, p3/M, z0.h, z20.h\n"
+ "fmla z30.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26]\n"
+ "fmla z29.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x9, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z22.h\n"
+ "fmla z24.h, p3/M, z3.h, z17.h\n"
"fmla z25.h, p3/M, z2.h, z16.h\n"
- "fmla z24.h, p3/M, z1.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x14, x12, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z17.h\n"
- "addvl x14, x14, #1\n"
- "fmla z20.h, p3/M, z3.h, z17.h\n"
- "fmla z21.h, p3/M, z4.h, z19.h\n"
- "ld1h { z4.h }, p3/Z, [x13, #5, MUL VL]\n"
- "ld1h { z10.h }, p1/Z, [x14]\n"
- "fmla z26.h, p3/M, z7.h, z17.h\n"
- "fmla z25.h, p3/M, z6.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x9]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z29.h, p3/M, z1.h, z16.h\n"
- "fmax z29.h, p3/M, z29.h, z31.h\n"
- "ld1h { z1.h }, p3/Z, [x13, #2, MUL VL]\n"
- "fmla z27.h, p3/M, z0.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9, x25, LSL #1]\n"
- "fmla z24.h, p3/M, z7.h, z19.h\n"
- "addvl x9, x9, #1\n"
- "fmla z20.h, p3/M, z5.h, z19.h\n"
- "fmla z22.h, p3/M, z0.h, z18.h\n"
- "ld1h { z0.h }, p3/Z, [x13, #1, MUL VL]\n"
- "fmin z29.h, p3/M, z29.h, z30.h\n"
- "fmla z21.h, p3/M, z2.h, z17.h\n"
- "fmla z25.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x26, x12, LSL #1]\n"
- "fmax z25.h, p3/M, z25.h, z31.h\n"
- "fmla z28.h, p3/M, z6.h, z18.h\n"
- "fmla z26.h, p3/M, z3.h, z18.h\n"
- "fmax z28.h, p3/M, z28.h, z31.h\n"
- "fmax z26.h, p3/M, z26.h, z31.h\n"
- "fmla z27.h, p3/M, z8.h, z17.h\n"
- "fmla z24.h, p3/M, z5.h, z17.h\n"
- "fmax z27.h, p3/M, z27.h, z31.h\n"
- "fmax z24.h, p3/M, z24.h, z31.h\n"
- "fmla z22.h, p3/M, z8.h, z16.h\n"
- "fmla z20.h, p3/M, z7.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z31.h\n"
- "fmax z20.h, p3/M, z20.h, z31.h\n"
+ "fmla z27.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x28, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z22.h\n"
+ "ld1h { z18.h }, p2/Z, [x26, x12, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x9, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x7, LSL #1]\n"
+ "fmla z21.h, p3/M, z4.h, z18.h\n"
+ "fmla z23.h, p3/M, z3.h, z18.h\n"
+ "fmla z26.h, p3/M, z7.h, z18.h\n"
+ "fmla z24.h, p3/M, z5.h, z18.h\n"
+ "fmla z25.h, p3/M, z6.h, z18.h\n"
+ "fmla z27.h, p3/M, z8.h, z18.h\n"
+ "fmla z30.h, p3/M, z3.h, z19.h\n"
"fmla z21.h, p3/M, z6.h, z16.h\n"
- "fmax z21.h, p3/M, z21.h, z31.h\n"
+ "fmla z29.h, p3/M, z4.h, z19.h\n"
+ "fmla z23.h, p3/M, z5.h, z17.h\n"
+ "fmla z26.h, p3/M, z0.h, z19.h\n"
+ "fmla z24.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x25, x11, LSL #1]\n"
+ "fmla z25.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x11, LSL #1]\n"
+ "fmla z27.h, p3/M, z1.h, z19.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x7, LSL #1]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z21.h, p3/M, z8.h, z18.h\n"
+ "fmla z23.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x26, x11, LSL #1]\n"
"addvl x26, x26, #1\n"
- "ld1h { z2.h }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x13, #4, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x13, #6, MUL VL]\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1h { z6.h }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "fmin z28.h, p3/M, z28.h, z30.h\n"
- "ld1h { z9.h }, p1/Z, [x9, x12, LSL #1]\n"
- "fmin z27.h, p3/M, z27.h, z30.h\n"
- "fmin z26.h, p3/M, z26.h, z30.h\n"
- "ld1h { z11.h }, p1/Z, [x14, x25, LSL #1]\n"
- "ld1h { z12.h }, p1/Z, [x26]\n"
- "fmin z25.h, p3/M, z25.h, z30.h\n"
- "fmin z24.h, p3/M, z24.h, z30.h\n"
- "ld1h { z13.h }, p1/Z, [x10, x12, LSL #1]\n"
- "st1h { z28.h }, p0, [x11]\n"
- "fmin z22.h, p3/M, z22.h, z30.h\n"
- "fmin z20.h, p3/M, z20.h, z30.h\n"
- "st1h { z29.h }, p0, [x11, x16, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x13, #-8, MUL VL]\n"
- "fmin z21.h, p3/M, z21.h, z30.h\n"
- "st1h { z27.h }, p0, [x11, x22, LSL #1]\n"
- "addvl x11, x11, #1\n"
- "ld1h { z8.h }, p3/Z, [x13, #-7, MUL VL]\n"
- "st1h { z26.h }, p0, [x24]\n"
- "addvl x13, x13, #-6\n"
- "st1h { z25.h }, p0, [x24, x16, LSL #1]\n"
- "st1h { z24.h }, p0, [x24, x22, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z16.h\n"
+ "fmla z28.h, p3/M, z4.h, z16.h\n"
+ "fmla z26.h, p3/M, z2.h, z16.h\n"
+ "fmla z25.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, x12, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z17.h\n"
+ "addvl x15, x15, #1\n"
+ "fmla z21.h, p3/M, z3.h, z17.h\n"
+ "fmla z27.h, p3/M, z7.h, z17.h\n"
+ "fmla z23.h, p3/M, z4.h, z19.h\n"
+ "ld1h { z4.h }, p3/Z, [x14, #5, MUL VL]\n"
+ "fmla z26.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x27]\n"
+ "fmla z29.h, p3/M, z2.h, z16.h\n"
+ "fmla z30.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z1.h }, p3/Z, [x14, #2, MUL VL]\n"
+ "ld1h { z10.h }, p1/Z, [x15]\n"
+ "fmla z28.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x9, LSL #1]\n"
+ "fmla z25.h, p3/M, z7.h, z19.h\n"
+ "addvl x27, x27, #1\n"
+ "fmla z21.h, p3/M, z5.h, z19.h\n"
+ "fmla z24.h, p3/M, z0.h, z18.h\n"
+ "ld1h { z0.h }, p3/Z, [x14, #1, MUL VL]\n"
+ "fmla z26.h, p3/M, z8.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x12, LSL #1]\n"
+ "fmla z27.h, p3/M, z3.h, z18.h\n"
+ "addvl x25, x25, #1\n"
+ "fmla z23.h, p3/M, z2.h, z17.h\n"
+ "fmla z29.h, p3/M, z6.h, z18.h\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
+ "ld1h { z2.h }, p3/Z, [x14, #3, MUL VL]\n"
+ "fmla z28.h, p3/M, z8.h, z17.h\n"
+ "fmla z25.h, p3/M, z5.h, z17.h\n"
+ "ld1h { z3.h }, p3/Z, [x14, #4, MUL VL]\n"
+ "ld1h { z5.h }, p3/Z, [x14, #6, MUL VL]\n"
+ "fmla z24.h, p3/M, z8.h, z16.h\n"
+ "fmla z21.h, p3/M, z7.h, z16.h\n"
+ "whilelt p2.h, x16, %x[n_channels]\n"
+ "cmp x8, %x[n_channels]\n"
+ "fmax z27.h, p3/M, z27.h, z15.h\n"
+ "fmax z26.h, p3/M, z26.h, z15.h\n"
+ "ld1h { z9.h }, p1/Z, [x27, x12, LSL #1]\n"
+ "ld1h { z11.h }, p1/Z, [x15, x9, LSL #1]\n"
+ "fmla z23.h, p3/M, z6.h, z16.h\n"
+ "fmax z29.h, p3/M, z29.h, z15.h\n"
+ "ld1h { z6.h }, p3/Z, [x14, #7, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "fmax z28.h, p3/M, z28.h, z15.h\n"
+ "fmax z25.h, p3/M, z25.h, z15.h\n"
+ "ld1h { z12.h }, p1/Z, [x25]\n"
+ "ld1h { z13.h }, p1/Z, [x28, x12, LSL #1]\n"
+ "fmax z24.h, p3/M, z24.h, z15.h\n"
+ "fmax z21.h, p3/M, z21.h, z15.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "ld1h { z7.h }, p3/Z, [x14, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x14, #-7, MUL VL]\n"
+ "fmax z23.h, p3/M, z23.h, z15.h\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "fmin z27.h, p3/M, z27.h, z14.h\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ "st1h { z29.h }, p0, [x13]\n"
+ "fmin z21.h, p3/M, z21.h, z14.h\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "st1h { z30.h }, p0, [x13, x17, LSL #1]\n"
+ "st1h { z28.h }, p0, [x13, x10, LSL #1]\n"
+ "addvl x13, x13, #1\n"
+ "addvl x14, x14, #-6\n"
+ "st1h { z27.h }, p0, [x24]\n"
+ "st1h { z26.h }, p0, [x24, x17, LSL #1]\n"
+ "st1h { z25.h }, p0, [x24, x10, LSL #1]\n"
"addvl x24, x24, #1\n"
- "st1h { z22.h }, p0, [x23]\n"
- "st1h { z20.h }, p0, [x23, x16, LSL #1]\n"
- "st1h { z21.h }, p0, [x23, x22, LSL #1]\n"
+ "st1h { z24.h }, p0, [x23]\n"
+ "st1h { z21.h }, p0, [x23, x17, LSL #1]\n"
+ "st1h { z23.h }, p0, [x23, x10, LSL #1]\n"
"addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z29, z14\n fmla z29.h, p3/M, z7.h, z9.h\n"
- "movprfx z28, z14\n fmla z28.h, p3/M, z8.h, z9.h\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z27, z14\n fmla z27.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z4.h, z13.h\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x8, x8, #0x1\n"
- "movprfx z26, z14\n fmla z26.h, p3/M, z5.h, z9.h\n"
- "movprfx z25, z14\n fmla z25.h, p3/M, z4.h, z9.h\n"
- "cmp x8, x20\n"
- "add x21, x13, #0x1\n"
- "movprfx z24, z14\n fmla z24.h, p3/M, z3.h, z9.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z23.h }, p2/Z, [x9, x27, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "ld1h { z18.h }, p2/Z, [x9, x17, LSL #1]\n"
- "movprfx z22, z14\n fmla z22.h, p3/M, z2.h, z9.h\n"
- "csel x13, x13, x21, LT\n"
- "fmla z29.h, p3/M, z6.h, z18.h\n"
- "movprfx z21, z14\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "movprfx z30, z31\n fmla z30.h, p3/M, z7.h, z9.h\n"
+ "movprfx z29, z31\n fmla z29.h, p3/M, z8.h, z9.h\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z28, z31\n fmla z28.h, p3/M, z6.h, z9.h\n"
+ "movprfx z27, z31\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z26, z31\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "movprfx z25, z31\n fmla z25.h, p3/M, z3.h, z9.h\n"
"mov p0.b, p2.b\n"
- "csel x8, x8, XZR, LT\n"
- "fmla z28.h, p3/M, z5.h, z13.h\n"
- "fmla z27.h, p3/M, z3.h, z13.h\n"
- "cmp x13, x20\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
- "fmla z25.h, p3/M, z1.h, z13.h\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x14, x17, LSL #1]\n"
- "fmla z22.h, p3/M, z6.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x26, x25, LSL #1]\n"
- "movprfx z20, z14\n fmla z20.h, p3/M, z1.h, z9.h\n"
- "fmla z29.h, p3/M, z0.h, z17.h\n"
- "fmla z21.h, p3/M, z8.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x14, x27, LSL #1]\n"
- "fmla z28.h, p3/M, z7.h, z18.h\n"
- "fmla z20.h, p3/M, z0.h, z18.h\n"
- "fmla z26.h, p3/M, z4.h, z18.h\n"
- "fmla z25.h, p3/M, z3.h, z18.h\n"
- "fmla z22.h, p3/M, z1.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x10]\n"
- "fmla z29.h, p3/M, z2.h, z16.h\n"
- "fmla z27.h, p3/M, z1.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x28]\n"
- "fmla z24.h, p3/M, z4.h, z23.h\n"
- "fmla z28.h, p3/M, z1.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x10, x25, LSL #1]\n"
- "fmla z20.h, p3/M, z2.h, z23.h\n"
- "fmla z21.h, p3/M, z1.h, z23.h\n"
- "fmla z29.h, p3/M, z8.h, z23.h\n"
- "fmla z27.h, p3/M, z7.h, z23.h\n"
- "fmla z25.h, p3/M, z5.h, z23.h\n"
+ "movprfx z24, z31\n fmla z24.h, p3/M, z2.h, z9.h\n"
+ "movprfx z23, z31\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "add x6, x6, #0x1\n"
+ "add x20, x5, #0x1\n"
+ "fmla z30.h, p3/M, z4.h, z13.h\n"
+ "fmla z29.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z22.h }, p2/Z, [x27, x11, LSL #1]\n"
+ "cmp x6, x22\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x7, LSL #1]\n"
+ "fmla z27.h, p3/M, z2.h, z13.h\n"
+ "csel x5, x5, x20, LT\n"
+ "fmla z26.h, p3/M, z1.h, z13.h\n"
+ "fmla z25.h, p3/M, z0.h, z13.h\n"
+ "csel x6, x6, XZR, LT\n"
+ "fmla z24.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x9, LSL #1]\n"
+ "movprfx z21, z31\n fmla z21.h, p3/M, z1.h, z9.h\n"
+ "fmla z30.h, p3/M, z6.h, z17.h\n"
+ "fmla z29.h, p3/M, z5.h, z13.h\n"
+ "cmp x5, x21\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, x7, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z17.h\n"
+ "fmla z23.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, x11, LSL #1]\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
+ "fmla z21.h, p3/M, z0.h, z17.h\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "fmla z30.h, p3/M, z0.h, z18.h\n"
+ "fmla z29.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x28]\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "fmla z25.h, p3/M, z4.h, z22.h\n"
+ "fmla z23.h, p3/M, z1.h, z22.h\n"
+ "fmla z26.h, p3/M, z5.h, z22.h\n"
+ "fmla z21.h, p3/M, z2.h, z22.h\n"
+ "fmla z27.h, p3/M, z0.h, z20.h\n"
+ "fmla z30.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26]\n"
+ "fmla z29.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x9, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z22.h\n"
+ "fmla z24.h, p3/M, z3.h, z17.h\n"
+ "fmla z25.h, p3/M, z2.h, z16.h\n"
+ "fmla z27.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x28, x7, LSL #1]\n"
+ "fmla z30.h, p3/M, z8.h, z22.h\n"
+ "ld1h { z18.h }, p2/Z, [x26, x12, LSL #1]\n"
+ "fmla z29.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x9, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x7, LSL #1]\n"
+ "fmla z21.h, p3/M, z4.h, z18.h\n"
+ "fmla z23.h, p3/M, z3.h, z18.h\n"
+ "fmla z26.h, p3/M, z7.h, z18.h\n"
+ "fmla z24.h, p3/M, z5.h, z18.h\n"
+ "fmla z25.h, p3/M, z6.h, z18.h\n"
+ "fmla z27.h, p3/M, z8.h, z18.h\n"
+ "fmla z30.h, p3/M, z3.h, z19.h\n"
+ "fmla z21.h, p3/M, z6.h, z16.h\n"
+ "fmla z29.h, p3/M, z4.h, z19.h\n"
+ "fmla z23.h, p3/M, z5.h, z17.h\n"
"fmla z26.h, p3/M, z0.h, z19.h\n"
- "ld1h { z17.h }, p2/Z, [x28, x12, LSL #1]\n"
- "fmla z22.h, p3/M, z3.h, z18.h\n"
- "fmla z24.h, p3/M, z2.h, z16.h\n"
- "fmla z20.h, p3/M, z4.h, z17.h\n"
+ "fmla z24.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x25, x11, LSL #1]\n"
+ "fmla z25.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x11, LSL #1]\n"
+ "fmla z27.h, p3/M, z1.h, z19.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x7, LSL #1]\n"
+ "fmla z21.h, p3/M, z8.h, z18.h\n"
+ "fmla z23.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x26, x11, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z16.h\n"
+ "fmla z28.h, p3/M, z4.h, z16.h\n"
+ "fmla z26.h, p3/M, z2.h, z16.h\n"
+ "fmla z25.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, x12, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z17.h\n"
"fmla z21.h, p3/M, z3.h, z17.h\n"
- "fmla z28.h, p3/M, z3.h, z19.h\n"
- "fmla z27.h, p3/M, z5.h, z16.h\n"
- "ld1h { z19.h }, p2/Z, [x28, x25, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x26, x17, LSL #1]\n"
- "fmla z26.h, p3/M, z6.h, z18.h\n"
- "fmla z25.h, p3/M, z7.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x10, x17, LSL #1]\n"
- "fmla z22.h, p3/M, z5.h, z17.h\n"
- "fmla z24.h, p3/M, z6.h, z17.h\n"
+ "fmla z27.h, p3/M, z7.h, z17.h\n"
+ "fmla z23.h, p3/M, z4.h, z19.h\n"
+ "fmla z26.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x27]\n"
+ "fmla z29.h, p3/M, z2.h, z16.h\n"
+ "fmla z30.h, p3/M, z1.h, z16.h\n"
+ "fmla z28.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x9, LSL #1]\n"
+ "fmla z25.h, p3/M, z7.h, z19.h\n"
"fmla z21.h, p3/M, z5.h, z19.h\n"
- "fmla z20.h, p3/M, z6.h, z16.h\n"
- "fmla z26.h, p3/M, z8.h, z17.h\n"
- "fmla z22.h, p3/M, z7.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x26, x27, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z18.h\n"
- "fmla z25.h, p3/M, z0.h, z18.h\n"
- "fmla z24.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x10, x27, LSL #1]\n"
- "fmla z20.h, p3/M, z8.h, z17.h\n"
- "fmla z21.h, p3/M, z7.h, z17.h\n"
- "fmla z28.h, p3/M, z4.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z26.h, p3/M, z1.h, z18.h\n"
- "fmla z29.h, p3/M, z5.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x28, x17, LSL #1]\n"
- "fmla z27.h, p3/M, z4.h, z16.h\n"
- "fmla z25.h, p3/M, z2.h, z16.h\n"
- "fmla z24.h, p3/M, z1.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x14, x12, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z17.h\n"
- "fmla z20.h, p3/M, z3.h, z17.h\n"
- "fmla z21.h, p3/M, z4.h, z19.h\n"
- "fmla z26.h, p3/M, z7.h, z17.h\n"
- "fmla z25.h, p3/M, z6.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x9]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z29.h, p3/M, z1.h, z16.h\n"
- "fmax z29.h, p3/M, z29.h, z31.h\n"
- "fmin z29.h, p3/M, z29.h, z30.h\n"
- "fmla z27.h, p3/M, z0.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9, x25, LSL #1]\n"
- "fmla z24.h, p3/M, z7.h, z19.h\n"
- "fmla z20.h, p3/M, z5.h, z19.h\n"
- "fmla z22.h, p3/M, z0.h, z18.h\n"
- "fmla z21.h, p3/M, z2.h, z17.h\n"
- "fmla z25.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x26, x12, LSL #1]\n"
- "fmax z25.h, p3/M, z25.h, z31.h\n"
- "fmla z28.h, p3/M, z6.h, z18.h\n"
- "fmla z26.h, p3/M, z3.h, z18.h\n"
- "fmax z28.h, p3/M, z28.h, z31.h\n"
- "fmax z26.h, p3/M, z26.h, z31.h\n"
- "fmla z27.h, p3/M, z8.h, z17.h\n"
- "fmla z24.h, p3/M, z5.h, z17.h\n"
- "fmax z27.h, p3/M, z27.h, z31.h\n"
- "fmax z24.h, p3/M, z24.h, z31.h\n"
- "fmla z22.h, p3/M, z8.h, z16.h\n"
- "fmla z20.h, p3/M, z7.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z31.h\n"
- "fmax z20.h, p3/M, z20.h, z31.h\n"
- "fmla z21.h, p3/M, z6.h, z16.h\n"
- "fmax z21.h, p3/M, z21.h, z31.h\n"
- "fmin z28.h, p3/M, z28.h, z30.h\n"
- "st1h { z28.h }, p0, [x11]\n"
- "fmin z27.h, p3/M, z27.h, z30.h\n"
- "fmin z26.h, p3/M, z26.h, z30.h\n"
- "st1h { z29.h }, p0, [x11, x16, LSL #1]\n"
- "fmin z25.h, p3/M, z25.h, z30.h\n"
- "fmin z24.h, p3/M, z24.h, z30.h\n"
- "st1h { z27.h }, p0, [x11, x22, LSL #1]\n"
- "fmin z22.h, p3/M, z22.h, z30.h\n"
- "fmin z20.h, p3/M, z20.h, z30.h\n"
- "st1h { z26.h }, p0, [x24]\n"
- "fmin z21.h, p3/M, z21.h, z30.h\n"
- "st1h { z25.h }, p0, [x24, x16, LSL #1]\n"
- "st1h { z24.h }, p0, [x24, x22, LSL #1]\n"
- "st1h { z22.h }, p0, [x23]\n"
- "st1h { z20.h }, p0, [x23, x16, LSL #1]\n"
- "st1h { z21.h }, p0, [x23, x22, LSL #1]\n"
+ "fmla z24.h, p3/M, z0.h, z18.h\n"
+ "fmla z26.h, p3/M, z8.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x12, LSL #1]\n"
+ "fmla z27.h, p3/M, z3.h, z18.h\n"
+ "fmla z23.h, p3/M, z2.h, z17.h\n"
+ "fmla z29.h, p3/M, z6.h, z18.h\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
+ "fmla z28.h, p3/M, z8.h, z17.h\n"
+ "fmla z25.h, p3/M, z5.h, z17.h\n"
+ "fmla z24.h, p3/M, z8.h, z16.h\n"
+ "fmla z21.h, p3/M, z7.h, z16.h\n"
+ "fmax z27.h, p3/M, z27.h, z15.h\n"
+ "fmax z26.h, p3/M, z26.h, z15.h\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "fmla z23.h, p3/M, z6.h, z16.h\n"
+ "fmax z29.h, p3/M, z29.h, z15.h\n"
+ "fmax z28.h, p3/M, z28.h, z15.h\n"
+ "fmax z25.h, p3/M, z25.h, z15.h\n"
+ "fmin z27.h, p3/M, z27.h, z14.h\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "fmax z24.h, p3/M, z24.h, z15.h\n"
+ "fmax z21.h, p3/M, z21.h, z15.h\n"
+ "fmax z23.h, p3/M, z23.h, z15.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "st1h { z27.h }, p0, [x24]\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ "fmin z21.h, p3/M, z21.h, z14.h\n"
+ "st1h { z26.h }, p0, [x24, x17, LSL #1]\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "st1h { z29.h }, p0, [x13]\n"
+ "st1h { z30.h }, p0, [x13, x17, LSL #1]\n"
+ "st1h { z28.h }, p0, [x13, x10, LSL #1]\n"
+ "st1h { z25.h }, p0, [x24, x10, LSL #1]\n"
+ "st1h { z24.h }, p0, [x23]\n"
+ "st1h { z21.h }, p0, [x23, x17, LSL #1]\n"
+ "st1h { z23.h }, p0, [x23, x10, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 4f8368acd5..ecf912303d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -90,384 +90,384 @@ void sve_fp16_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ptrue p3.b\n"
"ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
"add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1h { z14.h }, p3/Z, [x8]\n"
- "cnth x16\n"
- "mov x15, #0x0\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "cnth x15\n"
+ "mov x14, #0x0\n"
+ "ldp x24, x23, [x17, #0x0]\n"
+ "ldp x22, x21, [x17, #0x10]\n"
+ "whilelt p2.h, XZR, %x[n_channels]\n"
+ "ldr x20, [x17, #0x20]\n"
+ "ld1h { z15.h }, p3/Z, [x8]\n"
"ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
"ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
- "whilelt p2.h, XZR, %x[n_channels]\n"
"ld1h { z2.h }, p3/Z, [x8, #3, MUL VL]\n"
"ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
- "cmp x16, %x[n_channels]\n"
+ "cmp x15, %x[n_channels]\n"
+ "sub x13, XZR, x15\n"
"ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
"ld1h { z5.h }, p3/Z, [x8, #6, MUL VL]\n"
- "sub x14, XZR, x16\n"
"ld1h { z6.h }, p3/Z, [x8, #7, MUL VL]\n"
"addvl x8, x8, #16\n"
- "ldp x24, x23, [x17, #0x0]\n"
- "ldp x22, x21, [x17, #0x10]\n"
- "ldr x20, [x17, #0x20]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rh { z31.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z30.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rh { z31.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1h { z9.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x22, x14, LSL #1]\n"
"ld1h { z7.h }, p3/Z, [x8, #-8, MUL VL]\n"
"ld1h { z8.h }, p3/Z, [x8, #-7, MUL VL]\n"
- "ld1h { z9.h }, p2/Z, [x24, x15, LSL #1]\n"
"addvl x8, x8, #-6\n"
- "ld1h { z10.h }, p2/Z, [x23, x15, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x22, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x21, x15, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x20, x14, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z29, z14\n fmla z29.h, p3/M, z8.h, z9.h\n"
- "movprfx z28, z14\n fmla z28.h, p3/M, z7.h, z9.h\n"
- "ldr x23, [x17, #0x30]\n"
- "ldr x26, [x17, #0x38]\n"
- "movprfx z27, z14\n fmla z27.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z0.h, z10.h\n"
- "ldr x22, [x17, #0x28]\n"
+ "movprfx z30, z15\n fmla z30.h, p3/M, z8.h, z9.h\n"
+ "movprfx z29, z15\n fmla z29.h, p3/M, z7.h, z9.h\n"
+ "ldr x22, [x17, #0x30]\n"
+ "ldr x27, [x17, #0x38]\n"
+ "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z9.h\n"
+ "movprfx z27, z15\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "ldr x26, [x17, #0x28]\n"
"ldr x21, [x17, #0x48]\n"
- "fmla z28.h, p3/M, z4.h, z13.h\n"
- "movprfx z26, z14\n fmla z26.h, p3/M, z5.h, z9.h\n"
+ "movprfx z26, z15\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "movprfx z25, z15\n fmla z25.h, p3/M, z3.h, z9.h\n"
"ldr x20, [x17, #0x40]\n"
- "ld1h { z19.h }, p2/Z, [x21, x15, LSL #1]\n"
- "movprfx z25, z14\n fmla z25.h, p3/M, z4.h, z9.h\n"
- "movprfx z24, z14\n fmla z24.h, p3/M, z3.h, z9.h\n"
"ldr x25, [x17, #0x50]\n"
+ "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
+ "movprfx z23, z15\n fmla z23.h, p3/M, z0.h, z9.h\n"
"ldr x24, [x17, #0x58]\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "ld1h { z18.h }, p2/Z, [x23, x15, LSL #1]\n"
- "movprfx z23, z14\n fmla z23.h, p3/M, z2.h, z9.h\n"
"ldr x23, [x17, #0x60]\n"
- "fmla z29.h, p3/M, z5.h, z13.h\n"
- "fmla z28.h, p3/M, z6.h, z18.h\n"
- "ldr x12, [x17, #0x70]\n"
- "ldr x11, [x17, #0x88]\n"
- "movprfx z22, z14\n fmla z22.h, p3/M, z0.h, z9.h\n"
- "fmla z27.h, p3/M, z3.h, z13.h\n"
- "inch x14\n"
+ "fmla z30.h, p3/M, z0.h, z10.h\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z22.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ldr x12, [x17, #0x88]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z2.h, z13.h\n"
+ "ldr x22, [x17, #0x70]\n"
+ "fmla z26.h, p3/M, z1.h, z13.h\n"
+ "fmla z25.h, p3/M, z0.h, z13.h\n"
+ "inch x13\n"
"mov p1.b, p2.b\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
- "fmla z25.h, p3/M, z1.h, z13.h\n"
- "ldr x10, [x13, #0x0]\n"
- "whilelt p0.h, x16, %x[n_channels]\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
- "movprfx z21, z14\n fmla z21.h, p3/M, z1.h, z9.h\n"
- "fmla z29.h, p3/M, z7.h, z18.h\n"
- "ldr x22, [x17, #0x68]\n"
- "ldr x21, [x17, #0x78]\n"
- "fmla z28.h, p3/M, z0.h, z17.h\n"
- "fmla z22.h, p3/M, z8.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z16.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "movprfx z21, z15\n fmla z21.h, p3/M, z1.h, z9.h\n"
+ "ldr x21, [x17, #0x68]\n"
+ "fmla z30.h, p3/M, z5.h, z13.h\n"
+ "fmla z29.h, p3/M, z6.h, z17.h\n"
+ "ldr x11, [x16, #0x0]\n"
+ "whilelt p0.h, x15, %x[n_channels]\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z18.h }, p2/Z, [x27, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z17.h\n"
+ "ldr x10, [x17, #0x78]\n"
+ "fmla z23.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
"ldr x20, [x17, #0x80]\n"
- "fmla z26.h, p3/M, z4.h, z18.h\n"
- "fmla z25.h, p3/M, z3.h, z18.h\n"
- "ldr x9, [x13, #0x8]\n"
- "ldr x28, [x13, #0x10]\n"
- "fmla z21.h, p3/M, z0.h, z18.h\n"
- "fmla z24.h, p3/M, z4.h, z19.h\n"
- "ldr x27, [x13, #0x18]\n"
- "ld1h { z14.h }, p3/Z, [x8]\n"
- "fmla z23.h, p3/M, z1.h, z18.h\n"
- "fmla z29.h, p3/M, z1.h, z17.h\n"
- "ld1h { z20.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ld1h { z17.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z27.h, p3/M, z1.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z21.h, p3/M, z0.h, z17.h\n"
+ "fmla z25.h, p3/M, z4.h, z22.h\n"
+ "ldr x9, [x16, #0x8]\n"
+ "ldr x28, [x16, #0x10]\n"
+ "fmla z30.h, p3/M, z7.h, z17.h\n"
+ "fmla z29.h, p3/M, z0.h, z18.h\n"
+ "ldr x27, [x16, #0x18]\n"
+ "ld1h { z15.h }, p3/Z, [x8]\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x14, LSL #1]\n"
"ldr x26, [x17, #0x90]\n"
- "fmla z25.h, p3/M, z5.h, z19.h\n"
- "fmla z21.h, p3/M, z2.h, z19.h\n"
- "ldr x25, [x17, #0xa0]\n"
- "ldr x24, [x17, #0x98]\n"
- "fmla z26.h, p3/M, z0.h, z20.h\n"
- "fmla z24.h, p3/M, z2.h, z17.h\n"
- "fmla z28.h, p3/M, z8.h, z19.h\n"
+ "fmla z26.h, p3/M, z5.h, z22.h\n"
+ "fmla z23.h, p3/M, z1.h, z22.h\n"
+ "fmla z21.h, p3/M, z2.h, z22.h\n"
+ "fmla z30.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z20.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ldr x25, [x17, #0x98]\n"
+ "fmla z29.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z0.h, z17.h\n"
+ "ldr x24, [x17, #0xa0]\n"
+ "fmla z28.h, p3/M, z7.h, z22.h\n"
+ "fmla z25.h, p3/M, z2.h, z20.h\n"
+ "fmla z24.h, p3/M, z3.h, z16.h\n"
+ "fmla z30.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "ldr x23, [x17, #0xb0]\n"
+ "fmla z29.h, p3/M, z8.h, z22.h\n"
+ "ld1h { z17.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ldr x22, [x17, #0xa8]\n"
+ "fmla z27.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "ldr x21, [x17, #0xc0]\n"
+ "fmla z28.h, p3/M, z5.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x10, x14, LSL #1]\n"
+ "ldr x20, [x17, #0xb8]\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "fmla z25.h, p3/M, z6.h, z17.h\n"
+ "fmla z21.h, p3/M, z4.h, z17.h\n"
+ "fmla z24.h, p3/M, z5.h, z17.h\n"
+ "fmla z23.h, p3/M, z3.h, z17.h\n"
+ "fmla z27.h, p3/M, z8.h, z17.h\n"
+ "fmla z29.h, p3/M, z3.h, z18.h\n"
+ "fmla z30.h, p3/M, z4.h, z18.h\n"
+ "fmla z25.h, p3/M, z8.h, z19.h\n"
+ "fmla z26.h, p3/M, z0.h, z18.h\n"
+ "fmla z21.h, p3/M, z6.h, z16.h\n"
+ "fmla z24.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x12, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x25, x14, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "fmla z28.h, p3/M, z4.h, z16.h\n"
+ "fmla z26.h, p3/M, z2.h, z16.h\n"
+ "fmla z25.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ldr x24, [x17, #0x20]\n"
+ "fmla z21.h, p3/M, z8.h, z17.h\n"
+ "fmla z24.h, p3/M, z4.h, z19.h\n"
+ "fmla z23.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x22, x14, LSL #1]\n"
"fmla z27.h, p3/M, z7.h, z19.h\n"
- "fmla z22.h, p3/M, z1.h, z19.h\n"
- "fmla z23.h, p3/M, z3.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x22, x15, LSL #1]\n"
- "ldr x23, [x17, #0xa8]\n"
- "fmla z26.h, p3/M, z6.h, z16.h\n"
+ "fmla z30.h, p3/M, z2.h, z16.h\n"
+ "fmla z29.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z26.h, p3/M, z6.h, z19.h\n"
+ "fmla z28.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "fmla z21.h, p3/M, z3.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x14, LSL #1]\n"
"fmla z25.h, p3/M, z7.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x20, x15, LSL #1]\n"
- "ldr x22, [x17, #0xc0]\n"
- "fmla z24.h, p3/M, z6.h, z18.h\n"
- "fmla z21.h, p3/M, z4.h, z18.h\n"
- "fmla z29.h, p3/M, z3.h, z20.h\n"
- "fmla z27.h, p3/M, z5.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z5.h, z18.h\n"
- "fmla z22.h, p3/M, z3.h, z18.h\n"
- "ldr x21, [x17, #0xb0]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "fmla z23.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmax z29.h, p3/M, z29.h, z14.h\n"
+ "fmla z30.h, p3/M, z6.h, z16.h\n"
+ "fmla z24.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
"fmla z26.h, p3/M, z8.h, z18.h\n"
- "fmla z24.h, p3/M, z8.h, z17.h\n"
- "fmla z21.h, p3/M, z6.h, z16.h\n"
- "fmla z28.h, p3/M, z3.h, z19.h\n"
- "fmla z25.h, p3/M, z0.h, z19.h\n"
- "fmla z22.h, p3/M, z5.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z7.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z19.h\n"
- "fmla z26.h, p3/M, z1.h, z19.h\n"
- "fmla z28.h, p3/M, z5.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z4.h, z17.h\n"
- "fmla z25.h, p3/M, z2.h, z17.h\n"
- "fmla z24.h, p3/M, z1.h, z17.h\n"
- "fmla z21.h, p3/M, z8.h, z18.h\n"
- "ld1h { z17.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ldr x25, [x17, #0x20]\n"
- "fmla z22.h, p3/M, z7.h, z18.h\n"
- "ld1h { z18.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z17.h\n"
- "fmla z26.h, p3/M, z7.h, z16.h\n"
- "fmla z25.h, p3/M, z6.h, z16.h\n"
- "fmla z23.h, p3/M, z4.h, z16.h\n"
- "fmla z21.h, p3/M, z3.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z18.h\n"
- "fmla z28.h, p3/M, z1.h, z17.h\n"
- "fmax z28.h, p3/M, z28.h, z31.h\n"
- "fmin z28.h, p3/M, z28.h, z30.h\n"
- "fmla z27.h, p3/M, z0.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z6.h, z16.h\n"
- "fmax z29.h, p3/M, z29.h, z31.h\n"
- "fmla z24.h, p3/M, z7.h, z18.h\n"
+ "fmla z27.h, p3/M, z3.h, z16.h\n"
+ "fmla z28.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
"fmla z21.h, p3/M, z5.h, z18.h\n"
- "fmin z29.h, p3/M, z29.h, z30.h\n"
- "st1h { z29.h }, p1, [x10, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z0.h, z16.h\n"
- "fmla z22.h, p3/M, z2.h, z17.h\n"
- "ldr x24, [x13, #0x20]\n"
- "st1h { z28.h }, p1, [x9, x14, LSL #1]\n"
- "fmla z25.h, p3/M, z8.h, z18.h\n"
- "fmla z26.h, p3/M, z3.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "fmla z25.h, p3/M, z5.h, z17.h\n"
"ldp x23, x22, [x17, #0x0]\n"
- "fmla z27.h, p3/M, z8.h, z17.h\n"
- "fmla z24.h, p3/M, z5.h, z17.h\n"
+ "fmla z23.h, p3/M, z2.h, z17.h\n"
+ "fmax z30.h, p3/M, z30.h, z14.h\n"
"ldp x21, x20, [x17, #0x10]\n"
- "fmax z27.h, p3/M, z27.h, z31.h\n"
- "fmla z23.h, p3/M, z8.h, z16.h\n"
- "fmla z21.h, p3/M, z7.h, z16.h\n"
- "fmax z26.h, p3/M, z26.h, z31.h\n"
- "fmax z25.h, p3/M, z25.h, z31.h\n"
- "fmla z22.h, p3/M, z6.h, z16.h\n"
- "inch x15\n"
- "ld1h { z9.h }, p0/Z, [x23, x16, LSL #1]\n"
- "ld1h { z10.h }, p0/Z, [x22, x16, LSL #1]\n"
- "ld1h { z11.h }, p0/Z, [x21, x16, LSL #1]\n"
- "ld1h { z12.h }, p0/Z, [x20, x16, LSL #1]\n"
- "fmin z27.h, p3/M, z27.h, z30.h\n"
- "fmin z26.h, p3/M, z26.h, z30.h\n"
- "ld1h { z13.h }, p0/Z, [x25, x16, LSL #1]\n"
- "inch x16\n"
- "fmin z25.h, p3/M, z25.h, z30.h\n"
- "st1h { z27.h }, p1, [x28, x14, LSL #1]\n"
- "fmax z24.h, p3/M, z24.h, z31.h\n"
- "fmax z23.h, p3/M, z23.h, z31.h\n"
- "st1h { z26.h }, p1, [x27, x14, LSL #1]\n"
- "ldr x23, [x13, #0x28]\n"
- "fmax z21.h, p3/M, z21.h, z31.h\n"
- "fmax z22.h, p3/M, z22.h, z31.h\n"
- "st1h { z25.h }, p1, [x24, x14, LSL #1]\n"
- "ldr x22, [x13, #0x30]\n"
- "ldr x21, [x13, #0x38]\n"
- "ldr x20, [x13, #0x40]\n"
- "whilelt p2.h, x15, %x[n_channels]\n"
- "cmp x16, %x[n_channels]\n"
- "ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
- "fmin z24.h, p3/M, z24.h, z30.h\n"
- "fmin z23.h, p3/M, z23.h, z30.h\n"
+ "inch x14\n"
+ "fmax z26.h, p3/M, z26.h, z14.h\n"
+ "fmin z29.h, p3/M, z29.h, z31.h\n"
"ld1h { z2.h }, p3/Z, [x8, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
- "fmin z21.h, p3/M, z21.h, z30.h\n"
- "fmin z22.h, p3/M, z22.h, z30.h\n"
- "ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
"ld1h { z5.h }, p3/Z, [x8, #6, MUL VL]\n"
- "st1h { z24.h }, p1, [x23, x14, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z16.h\n"
+ "fmax z28.h, p3/M, z28.h, z14.h\n"
+ "fmax z27.h, p3/M, z27.h, z14.h\n"
+ "ld1h { z9.h }, p0/Z, [x23, x15, LSL #1]\n"
+ "fmla z21.h, p3/M, z7.h, z16.h\n"
+ "fmin z30.h, p3/M, z30.h, z31.h\n"
+ "ld1h { z10.h }, p0/Z, [x22, x15, LSL #1]\n"
+ "ld1h { z11.h }, p0/Z, [x21, x15, LSL #1]\n"
+ "fmla z23.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z12.h }, p0/Z, [x20, x15, LSL #1]\n"
+ "ld1h { z13.h }, p0/Z, [x24, x15, LSL #1]\n"
+ "inch x15\n"
+ "fmin z28.h, p3/M, z28.h, z31.h\n"
+ "fmin z27.h, p3/M, z27.h, z31.h\n"
+ "st1h { z29.h }, p1, [x9, x13, LSL #1]\n"
+ "ldr x23, [x16, #0x28]\n"
+ "st1h { z30.h }, p1, [x11, x13, LSL #1]\n"
+ "ldr x20, [x16, #0x20]\n"
+ "fmin z26.h, p3/M, z26.h, z31.h\n"
+ "fmax z25.h, p3/M, z25.h, z14.h\n"
+ "fmax z24.h, p3/M, z24.h, z14.h\n"
+ "fmax z21.h, p3/M, z21.h, z14.h\n"
"ld1h { z6.h }, p3/Z, [x8, #7, MUL VL]\n"
+ "fmax z23.h, p3/M, z23.h, z14.h\n"
+ "st1h { z28.h }, p1, [x28, x13, LSL #1]\n"
+ "ldr x22, [x16, #0x30]\n"
"addvl x8, x8, #16\n"
- "st1h { z23.h }, p1, [x22, x14, LSL #1]\n"
+ "st1h { z27.h }, p1, [x27, x13, LSL #1]\n"
+ "ldr x21, [x16, #0x38]\n"
+ "whilelt p2.h, x14, %x[n_channels]\n"
+ "cmp x15, %x[n_channels]\n"
+ "st1h { z26.h }, p1, [x20, x13, LSL #1]\n"
+ "ldr x20, [x16, #0x40]\n"
+ "fmin z25.h, p3/M, z25.h, z31.h\n"
+ "fmin z24.h, p3/M, z24.h, z31.h\n"
+ "fmin z21.h, p3/M, z21.h, z31.h\n"
+ "fmin z23.h, p3/M, z23.h, z31.h\n"
"ld1h { z7.h }, p3/Z, [x8, #-8, MUL VL]\n"
- "st1h { z21.h }, p1, [x21, x14, LSL #1]\n"
"ld1h { z8.h }, p3/Z, [x8, #-7, MUL VL]\n"
"addvl x8, x8, #-6\n"
- "st1h { z22.h }, p1, [x20, x14, LSL #1]\n"
+ "st1h { z25.h }, p1, [x23, x13, LSL #1]\n"
+ "st1h { z24.h }, p1, [x22, x13, LSL #1]\n"
+ "st1h { z21.h }, p1, [x21, x13, LSL #1]\n"
+ "st1h { z23.h }, p1, [x20, x13, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z29, z14\n fmla z29.h, p3/M, z8.h, z9.h\n"
- "movprfx z28, z14\n fmla z28.h, p3/M, z7.h, z9.h\n"
- "ldr x23, [x17, #0x30]\n"
- "ldr x26, [x17, #0x38]\n"
- "movprfx z27, z14\n fmla z27.h, p3/M, z6.h, z9.h\n"
- "fmla z29.h, p3/M, z0.h, z10.h\n"
- "ldr x22, [x17, #0x28]\n"
+ "movprfx z30, z15\n fmla z30.h, p3/M, z8.h, z9.h\n"
+ "movprfx z29, z15\n fmla z29.h, p3/M, z7.h, z9.h\n"
+ "ldr x22, [x17, #0x30]\n"
+ "ldr x27, [x17, #0x38]\n"
+ "movprfx z28, z15\n fmla z28.h, p3/M, z6.h, z9.h\n"
+ "movprfx z27, z15\n fmla z27.h, p3/M, z5.h, z9.h\n"
+ "ldr x26, [x17, #0x28]\n"
"ldr x21, [x17, #0x48]\n"
- "fmla z28.h, p3/M, z4.h, z13.h\n"
- "movprfx z26, z14\n fmla z26.h, p3/M, z5.h, z9.h\n"
+ "movprfx z26, z15\n fmla z26.h, p3/M, z4.h, z9.h\n"
+ "movprfx z25, z15\n fmla z25.h, p3/M, z3.h, z9.h\n"
"ldr x20, [x17, #0x40]\n"
- "ld1h { z19.h }, p2/Z, [x21, x15, LSL #1]\n"
- "movprfx z25, z14\n fmla z25.h, p3/M, z4.h, z9.h\n"
- "movprfx z24, z14\n fmla z24.h, p3/M, z3.h, z9.h\n"
"ldr x25, [x17, #0x50]\n"
+ "movprfx z24, z15\n fmla z24.h, p3/M, z2.h, z9.h\n"
+ "movprfx z23, z15\n fmla z23.h, p3/M, z0.h, z9.h\n"
"ldr x24, [x17, #0x58]\n"
- "fmla z27.h, p3/M, z2.h, z11.h\n"
- "ld1h { z18.h }, p2/Z, [x23, x15, LSL #1]\n"
- "movprfx z23, z14\n fmla z23.h, p3/M, z2.h, z9.h\n"
"ldr x23, [x17, #0x60]\n"
- "fmla z29.h, p3/M, z5.h, z13.h\n"
- "fmla z28.h, p3/M, z6.h, z18.h\n"
- "ldr x12, [x17, #0x70]\n"
- "ldr x11, [x17, #0x88]\n"
- "movprfx z22, z14\n fmla z22.h, p3/M, z0.h, z9.h\n"
- "fmla z27.h, p3/M, z3.h, z13.h\n"
- "inch x14\n"
+ "fmla z30.h, p3/M, z0.h, z10.h\n"
+ "fmla z29.h, p3/M, z4.h, z13.h\n"
+ "ld1h { z22.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ldr x12, [x17, #0x88]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z2.h, z13.h\n"
+ "ldr x22, [x17, #0x70]\n"
+ "fmla z26.h, p3/M, z1.h, z13.h\n"
+ "fmla z25.h, p3/M, z0.h, z13.h\n"
+ "inch x13\n"
"mov p0.b, p2.b\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
- "fmla z25.h, p3/M, z1.h, z13.h\n"
- "ldr x10, [x13, #0x0]\n"
- "ldr x9, [x13, #0x8]\n"
- "fmla z24.h, p3/M, z0.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z6.h, z12.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
- "movprfx z21, z14\n fmla z21.h, p3/M, z1.h, z9.h\n"
- "fmla z29.h, p3/M, z7.h, z18.h\n"
- "ldr x22, [x17, #0x68]\n"
- "ldr x21, [x17, #0x78]\n"
- "fmla z28.h, p3/M, z0.h, z17.h\n"
- "fmla z22.h, p3/M, z8.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z16.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "movprfx z21, z15\n fmla z21.h, p3/M, z1.h, z9.h\n"
+ "ldr x21, [x17, #0x68]\n"
+ "fmla z30.h, p3/M, z5.h, z13.h\n"
+ "fmla z29.h, p3/M, z6.h, z17.h\n"
+ "ldr x11, [x16, #0x0]\n"
+ "ldr x10, [x16, #0x8]\n"
+ "fmla z28.h, p3/M, z3.h, z13.h\n"
+ "ld1h { z18.h }, p2/Z, [x27, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z17.h\n"
+ "ldr x9, [x17, #0x78]\n"
+ "fmla z23.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
"ldr x20, [x17, #0x80]\n"
- "fmla z26.h, p3/M, z4.h, z18.h\n"
- "fmla z25.h, p3/M, z3.h, z18.h\n"
- "ldr x28, [x13, #0x10]\n"
- "ldr x27, [x13, #0x18]\n"
- "fmla z21.h, p3/M, z0.h, z18.h\n"
- "fmla z24.h, p3/M, z4.h, z19.h\n"
- "fmla z23.h, p3/M, z1.h, z18.h\n"
- "fmla z29.h, p3/M, z1.h, z17.h\n"
- "ld1h { z20.h }, p2/Z, [x25, x15, LSL #1]\n"
- "ld1h { z17.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z16.h\n"
- "fmla z27.h, p3/M, z1.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z21.h, p3/M, z0.h, z17.h\n"
+ "fmla z25.h, p3/M, z4.h, z22.h\n"
+ "ldr x28, [x16, #0x10]\n"
+ "ldr x27, [x16, #0x18]\n"
+ "fmla z30.h, p3/M, z7.h, z17.h\n"
+ "fmla z29.h, p3/M, z0.h, z18.h\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x14, LSL #1]\n"
"ldr x26, [x17, #0x90]\n"
- "fmla z25.h, p3/M, z5.h, z19.h\n"
- "fmla z21.h, p3/M, z2.h, z19.h\n"
- "ldr x25, [x17, #0xa0]\n"
- "ldr x24, [x17, #0x98]\n"
- "fmla z26.h, p3/M, z0.h, z20.h\n"
- "fmla z24.h, p3/M, z2.h, z17.h\n"
- "fmla z28.h, p3/M, z8.h, z19.h\n"
+ "fmla z26.h, p3/M, z5.h, z22.h\n"
+ "fmla z23.h, p3/M, z1.h, z22.h\n"
+ "fmla z21.h, p3/M, z2.h, z22.h\n"
+ "fmla z30.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z20.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ldr x25, [x17, #0x98]\n"
+ "fmla z29.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z0.h, z17.h\n"
+ "ldr x24, [x17, #0xa0]\n"
+ "fmla z28.h, p3/M, z7.h, z22.h\n"
+ "fmla z25.h, p3/M, z2.h, z20.h\n"
+ "fmla z24.h, p3/M, z3.h, z16.h\n"
+ "fmla z30.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z19.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "ldr x23, [x17, #0xb0]\n"
+ "fmla z29.h, p3/M, z8.h, z22.h\n"
+ "ld1h { z17.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ldr x22, [x17, #0xa8]\n"
+ "fmla z27.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "ldr x21, [x17, #0xc0]\n"
+ "fmla z28.h, p3/M, z5.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x9, x14, LSL #1]\n"
+ "ldr x20, [x17, #0xb8]\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "fmla z25.h, p3/M, z6.h, z17.h\n"
+ "fmla z21.h, p3/M, z4.h, z17.h\n"
+ "fmla z24.h, p3/M, z5.h, z17.h\n"
+ "fmla z23.h, p3/M, z3.h, z17.h\n"
+ "fmla z27.h, p3/M, z8.h, z17.h\n"
+ "fmla z29.h, p3/M, z3.h, z18.h\n"
+ "fmla z30.h, p3/M, z4.h, z18.h\n"
+ "fmla z25.h, p3/M, z8.h, z19.h\n"
+ "fmla z26.h, p3/M, z0.h, z18.h\n"
+ "fmla z21.h, p3/M, z6.h, z16.h\n"
+ "fmla z24.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x12, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x25, x14, LSL #1]\n"
+ "fmla z29.h, p3/M, z5.h, z16.h\n"
+ "fmla z28.h, p3/M, z4.h, z16.h\n"
+ "fmla z26.h, p3/M, z2.h, z16.h\n"
+ "fmla z25.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "fmla z21.h, p3/M, z8.h, z17.h\n"
+ "fmla z24.h, p3/M, z4.h, z19.h\n"
+ "fmla z23.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x22, x14, LSL #1]\n"
"fmla z27.h, p3/M, z7.h, z19.h\n"
- "fmla z22.h, p3/M, z1.h, z19.h\n"
- "fmla z23.h, p3/M, z3.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x22, x15, LSL #1]\n"
- "ldr x23, [x17, #0xa8]\n"
- "fmla z26.h, p3/M, z6.h, z16.h\n"
+ "fmla z30.h, p3/M, z2.h, z16.h\n"
+ "fmla z29.h, p3/M, z1.h, z16.h\n"
+ "fmla z26.h, p3/M, z6.h, z19.h\n"
+ "fmla z28.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "fmla z21.h, p3/M, z3.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x14, LSL #1]\n"
"fmla z25.h, p3/M, z7.h, z18.h\n"
- "ld1h { z19.h }, p2/Z, [x20, x15, LSL #1]\n"
- "ldr x22, [x17, #0xc0]\n"
- "fmla z24.h, p3/M, z6.h, z18.h\n"
- "fmla z21.h, p3/M, z4.h, z18.h\n"
- "fmla z29.h, p3/M, z3.h, z20.h\n"
- "fmla z27.h, p3/M, z5.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z5.h, z18.h\n"
- "fmla z22.h, p3/M, z3.h, z18.h\n"
- "ldr x21, [x17, #0xb0]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "fmla z23.h, p3/M, z4.h, z18.h\n"
+ "fmax z29.h, p3/M, z29.h, z14.h\n"
+ "fmla z30.h, p3/M, z6.h, z16.h\n"
+ "fmla z24.h, p3/M, z0.h, z16.h\n"
"fmla z26.h, p3/M, z8.h, z18.h\n"
- "fmla z24.h, p3/M, z8.h, z17.h\n"
- "fmla z21.h, p3/M, z6.h, z16.h\n"
- "fmla z28.h, p3/M, z3.h, z19.h\n"
- "fmla z25.h, p3/M, z0.h, z19.h\n"
- "fmla z22.h, p3/M, z5.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x11, x15, LSL #1]\n"
- "fmla z23.h, p3/M, z7.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x26, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z4.h, z19.h\n"
- "fmla z26.h, p3/M, z1.h, z19.h\n"
- "fmla z28.h, p3/M, z5.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x24, x15, LSL #1]\n"
- "fmla z27.h, p3/M, z4.h, z17.h\n"
- "fmla z25.h, p3/M, z2.h, z17.h\n"
- "fmla z24.h, p3/M, z1.h, z17.h\n"
- "fmla z21.h, p3/M, z8.h, z18.h\n"
- "ld1h { z17.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z22.h, p3/M, z7.h, z18.h\n"
- "ld1h { z18.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z2.h, z17.h\n"
- "fmla z26.h, p3/M, z7.h, z16.h\n"
- "fmla z25.h, p3/M, z6.h, z16.h\n"
- "fmla z23.h, p3/M, z4.h, z16.h\n"
- "fmla z21.h, p3/M, z3.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z18.h\n"
- "fmla z28.h, p3/M, z1.h, z17.h\n"
- "fmax z28.h, p3/M, z28.h, z31.h\n"
- "fmin z28.h, p3/M, z28.h, z30.h\n"
- "fmla z27.h, p3/M, z0.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x15, LSL #1]\n"
- "fmla z29.h, p3/M, z6.h, z16.h\n"
- "fmax z29.h, p3/M, z29.h, z31.h\n"
- "fmla z24.h, p3/M, z7.h, z18.h\n"
+ "fmla z27.h, p3/M, z3.h, z16.h\n"
+ "fmla z28.h, p3/M, z8.h, z17.h\n"
"fmla z21.h, p3/M, z5.h, z18.h\n"
- "fmin z29.h, p3/M, z29.h, z30.h\n"
- "st1h { z29.h }, p0, [x10, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z0.h, z16.h\n"
- "fmla z22.h, p3/M, z2.h, z17.h\n"
- "ldr x20, [x13, #0x20]\n"
- "st1h { z28.h }, p0, [x9, x14, LSL #1]\n"
- "fmla z25.h, p3/M, z8.h, z18.h\n"
- "fmla z26.h, p3/M, z3.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x15, LSL #1]\n"
- "fmax z26.h, p3/M, z26.h, z31.h\n"
- "fmla z27.h, p3/M, z8.h, z17.h\n"
- "fmla z24.h, p3/M, z5.h, z17.h\n"
- "fmax z27.h, p3/M, z27.h, z31.h\n"
- "fmax z25.h, p3/M, z25.h, z31.h\n"
- "fmla z23.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "fmla z25.h, p3/M, z5.h, z17.h\n"
+ "fmin z29.h, p3/M, z29.h, z31.h\n"
+ "fmla z23.h, p3/M, z2.h, z17.h\n"
+ "fmax z30.h, p3/M, z30.h, z14.h\n"
+ "fmax z26.h, p3/M, z26.h, z14.h\n"
+ "fmla z24.h, p3/M, z8.h, z16.h\n"
+ "fmax z28.h, p3/M, z28.h, z14.h\n"
+ "fmax z27.h, p3/M, z27.h, z14.h\n"
+ "st1h { z29.h }, p0, [x10, x13, LSL #1]\n"
+ "ldr x23, [x16, #0x28]\n"
"fmla z21.h, p3/M, z7.h, z16.h\n"
- "fmin z27.h, p3/M, z27.h, z30.h\n"
- "fmin z26.h, p3/M, z26.h, z30.h\n"
- "fmla z22.h, p3/M, z6.h, z16.h\n"
- "fmin z25.h, p3/M, z25.h, z30.h\n"
- "fmax z24.h, p3/M, z24.h, z31.h\n"
- "st1h { z27.h }, p0, [x28, x14, LSL #1]\n"
- "fmax z23.h, p3/M, z23.h, z31.h\n"
- "fmax z21.h, p3/M, z21.h, z31.h\n"
- "st1h { z26.h }, p0, [x27, x14, LSL #1]\n"
- "ldr x23, [x13, #0x28]\n"
- "fmax z22.h, p3/M, z22.h, z31.h\n"
- "st1h { z25.h }, p0, [x20, x14, LSL #1]\n"
- "ldr x22, [x13, #0x30]\n"
- "ldr x21, [x13, #0x38]\n"
- "ldr x20, [x13, #0x40]\n"
- "fmin z24.h, p3/M, z24.h, z30.h\n"
- "fmin z23.h, p3/M, z23.h, z30.h\n"
- "st1h { z24.h }, p0, [x23, x14, LSL #1]\n"
- "fmin z21.h, p3/M, z21.h, z30.h\n"
- "fmin z22.h, p3/M, z22.h, z30.h\n"
- "st1h { z23.h }, p0, [x22, x14, LSL #1]\n"
- "st1h { z21.h }, p0, [x21, x14, LSL #1]\n"
- "st1h { z22.h }, p0, [x20, x14, LSL #1]\n"
+ "fmin z30.h, p3/M, z30.h, z31.h\n"
+ "fmin z26.h, p3/M, z26.h, z31.h\n"
+ "fmax z25.h, p3/M, z25.h, z14.h\n"
+ "fmla z23.h, p3/M, z6.h, z16.h\n"
+ "fmin z28.h, p3/M, z28.h, z31.h\n"
+ "fmin z27.h, p3/M, z27.h, z31.h\n"
+ "fmax z24.h, p3/M, z24.h, z14.h\n"
+ "st1h { z30.h }, p0, [x11, x13, LSL #1]\n"
+ "ldr x20, [x16, #0x20]\n"
+ "fmax z21.h, p3/M, z21.h, z14.h\n"
+ "st1h { z28.h }, p0, [x28, x13, LSL #1]\n"
+ "ldr x22, [x16, #0x30]\n"
+ "fmin z25.h, p3/M, z25.h, z31.h\n"
+ "fmax z23.h, p3/M, z23.h, z14.h\n"
+ "st1h { z27.h }, p0, [x27, x13, LSL #1]\n"
+ "ldr x21, [x16, #0x38]\n"
+ "st1h { z26.h }, p0, [x20, x13, LSL #1]\n"
+ "ldr x20, [x16, #0x40]\n"
+ "fmin z24.h, p3/M, z24.h, z31.h\n"
+ "fmin z21.h, p3/M, z21.h, z31.h\n"
+ "st1h { z25.h }, p0, [x23, x13, LSL #1]\n"
+ "fmin z23.h, p3/M, z23.h, z31.h\n"
+ "st1h { z24.h }, p0, [x22, x13, LSL #1]\n"
+ "st1h { z21.h }, p0, [x21, x13, LSL #1]\n"
+ "st1h { z23.h }, p0, [x20, x13, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index 41eaa4f18c..d71286f6c5 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,565 +88,565 @@ void sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x16, #0x0\n"
- "mov x4, #0x0\n"
+ "mov x1, #0x0\n"
+ "mov x2, #0x0\n"
"1:" // Tile loop
- "str x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x1, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x20, #0x4\n"
"mov x25, #0x4\n"
- "mov x24, #0x4\n"
- "str x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x16, x23\n" // offset = tile_i * ld_input_row
- "ldr x5, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x6, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x16, x22\n" // offset = tile_i * ld_output_row
- "add x7, x5, x5\n"
- "madd x21, x4, x5, x21\n" // offset += tile_j * ld_input_col
- "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "cnth x16\n"
- "madd x20, x4, x6, x20\n" // offset += tile_j * ld_output_col
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x14, x7, x5\n"
+ "str x2, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "cnth x3\n"
+ "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x8, x8, x21, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x13, x8, x23, LSL #1\n"
- "ld1h { z19.h }, p3/Z, [x17]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "add x12, x13, x23, LSL #1\n"
- "add x15, x15, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "add x11, x12, x23, LSL #1\n"
- "add x10, x14, x5\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "add x9, x15, x22, LSL #1\n"
- "add x28, x11, x23, LSL #1\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "add x27, x10, x5\n"
- "add x26, x9, x22, LSL #1\n"
- "add x25, x6, x6\n"
- "ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x16, %x[n_channels]\n"
- "add x24, x28, x23, LSL #1\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "add x23, x26, x22, LSL #1\n"
- "add x22, x25, x6\n"
- "ld1h { z9.h }, p2/Z, [x12, x7, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x8]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x16\n"
- "ld1h { z11.h }, p2/Z, [x8, x27, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x12, x14, LSL #1]\n"
- "addvl x17, x17, #-6\n"
+ "mov x6, #0x0\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x22, x1, x24\n" // offset = tile_i * ld_input_row
+ "mul x21, x1, x23\n" // offset = tile_i * ld_output_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "cmp x3, %x[n_channels]\n"
+ "ld1rh { z27.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x16, x4, x4\n"
+ "add x15, x5, x5\n"
+ "ld1rh { z29.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "madd x22, x2, x4, x22\n" // offset += tile_j * ld_input_col
+ "add x14, x16, x4\n"
+ "ld1h { z13.h }, p3/Z, [x8]\n"
+ "ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "add x13, x15, x5\n"
+ "madd x21, x2, x5, x21\n" // offset += tile_j * ld_output_col
+ "ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "add x12, x14, x4\n"
+ "mul x22, x22, x20\n" // offset *= kernel_stride * output_size
+ "ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "add x11, x12, x4\n"
+ "ld1h { z5.h }, p3/Z, [x8, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "sub x20, XZR, x3\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "add x7, x7, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x10, x7, x24, LSL #1\n"
+ "add x9, x10, x24, LSL #1\n"
+ "ld1h { z10.h }, p2/Z, [x7]\n"
+ "ld1h { z11.h }, p2/Z, [x7, x11, LSL #1]\n"
+ "add x28, x9, x24, LSL #1\n"
+ "add x27, x28, x24, LSL #1\n"
+ "ld1h { z7.h }, p3/Z, [x8, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x8, #-7, MUL VL]\n"
+ "addvl x8, x8, #-6\n"
+ "add x17, x17, x21, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x26, x27, x24, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x9, x16, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x9, x14, LSL #1]\n"
+ "add x25, x17, x23, LSL #1\n"
+ "add x24, x25, x23, LSL #1\n"
+ "add x23, x24, x23, LSL #1\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z14, z19\n fmla z14.h, p3/M, z4.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z8.h, z9.h\n"
- "whilelt p1.h, x16, %x[n_channels]\n"
- "inch x21\n"
- "movprfx z21, z19\n fmla z21.h, p3/M, z3.h, z9.h\n"
- "movprfx z22, z19\n fmla z22.h, p3/M, z1.h, z9.h\n"
- "inch x16\n"
+ "movprfx z14, z13\n fmla z14.h, p3/M, z4.h, z9.h\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z8.h, z9.h\n"
+ "whilelt p1.h, x3, %x[n_channels]\n"
+ "inch x6\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z3.h, z9.h\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z1.h, z9.h\n"
+ "inch x3\n"
"mov p0.b, p2.b\n"
- "movprfx z20, z19\n fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z14.h, p3/M, z5.h, z12.h\n"
+ "movprfx z15, z13\n fmla z15.h, p3/M, z0.h, z9.h\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z7.h, z9.h\n"
"inch x20\n"
- "movprfx z13, z19\n fmla z13.h, p3/M, z7.h, z9.h\n"
- "movprfx z17, z19\n fmla z17.h, p3/M, z6.h, z9.h\n"
- "movprfx z27, z19\n fmla z27.h, p3/M, z5.h, z9.h\n"
- "movprfx z18, z19\n fmla z18.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x7, LSL #1]\n"
- "fmla z31.h, p3/M, z0.h, z10.h\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z2.h, z11.h\n"
- "ld1h { z29.h }, p2/Z, [x24]\n"
- "ld1h { z11.h }, p2/Z, [x24, x27, LSL #1]\n"
- "fmla z21.h, p3/M, z4.h, z12.h\n"
- "fmla z22.h, p3/M, z2.h, z12.h\n"
- "fmla z20.h, p3/M, z1.h, z12.h\n"
- "movprfx z23, z19\n fmla z23.h, p3/M, z6.h, z29.h\n"
- "ld1h { z10.h }, p2/Z, [x11, x14, LSL #1]\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z6.h, z9.h\n"
+ "movprfx z21, z13\n fmla z21.h, p3/M, z5.h, z9.h\n"
+ "fmla z14.h, p3/M, z5.h, z12.h\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x28, x16, LSL #1]\n"
+ "fmla z19.h, p3/M, z0.h, z10.h\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26]\n"
+ "ld1h { z10.h }, p2/Z, [x26, x11, LSL #1]\n"
+ "fmla z18.h, p3/M, z4.h, z12.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
+ "fmla z15.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z8.h, z12.h\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z6.h, z11.h\n"
"fmla z14.h, p3/M, z7.h, z9.h\n"
- "fmla z13.h, p3/M, z8.h, z12.h\n"
- "fmla z17.h, p3/M, z7.h, z12.h\n"
- "fmla z30.h, p3/M, z6.h, z12.h\n"
- "movprfx z26, z19\n fmla z26.h, p3/M, z3.h, z12.h\n"
- "movprfx z28, z19\n fmla z28.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x8, x5, LSL #1]\n"
- "movprfx z24, z19\n fmla z24.h, p3/M, z8.h, z11.h\n"
- "fmla z21.h, p3/M, z6.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x8, x10, LSL #1]\n"
- "fmla z22.h, p3/M, z4.h, z9.h\n"
- "fmla z20.h, p3/M, z3.h, z9.h\n"
- "movprfx z25, z19\n fmla z25.h, p3/M, z1.h, z9.h\n"
- "movprfx z29, z19\n fmla z29.h, p3/M, z0.h, z9.h\n"
- "ld1h { z19.h }, p3/Z, [x17]\n"
- "fmla z27.h, p3/M, z8.h, z9.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, x14, LSL #1]\n"
+ "fmla z28.h, p3/M, z7.h, z12.h\n"
+ "fmla z22.h, p3/M, z6.h, z12.h\n"
+ "movprfx z31, z13\n fmla z31.h, p3/M, z3.h, z12.h\n"
+ "movprfx z17, z13\n fmla z17.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x7, x4, LSL #1]\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z8.h, z10.h\n"
+ "fmla z18.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z10.h }, p2/Z, [x7, x12, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z9.h\n"
+ "fmla z15.h, p3/M, z3.h, z9.h\n"
+ "movprfx z16, z13\n fmla z16.h, p3/M, z1.h, z9.h\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z13.h }, p3/Z, [x8]\n"
+ "fmla z21.h, p3/M, z8.h, z9.h\n"
+ "fmla z24.h, p3/M, z5.h, z9.h\n"
+ "fmla z25.h, p3/M, z2.h, z9.h\n"
+ "fmla z14.h, p3/M, z8.h, z11.h\n"
+ "ld1h { z9.h }, p2/Z, [x10]\n"
+ "fmla z19.h, p3/M, z1.h, z12.h\n"
+ "fmla z30.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x11, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z10.h\n"
+ "fmla z22.h, p3/M, z1.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x27]\n"
+ "fmla z18.h, p3/M, z7.h, z11.h\n"
+ "fmla z31.h, p3/M, z6.h, z11.h\n"
+ "fmla z26.h, p3/M, z5.h, z11.h\n"
+ "fmla z15.h, p3/M, z4.h, z11.h\n"
+ "fmla z17.h, p3/M, z3.h, z11.h\n"
+ "fmla z16.h, p3/M, z2.h, z11.h\n"
+ "fmla z23.h, p3/M, z1.h, z11.h\n"
+ "fmla z20.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x10, x16, LSL #1]\n"
+ "fmla z21.h, p3/M, z0.h, z9.h\n"
+ "fmla z24.h, p3/M, z6.h, z10.h\n"
+ "fmla z25.h, p3/M, z3.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x27, x11, LSL #1]\n"
+ "fmla z19.h, p3/M, z3.h, z9.h\n"
+ "fmla z14.h, p3/M, z1.h, z11.h\n"
+ "fmla z22.h, p3/M, z5.h, z12.h\n"
+ "fmla z31.h, p3/M, z2.h, z12.h\n"
+ "fmla z30.h, p3/M, z4.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x14, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z11.h\n"
+ "fmla z18.h, p3/M, z0.h, z11.h\n"
+ "fmla z17.h, p3/M, z8.h, z10.h\n"
+ "fmla z20.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x4, LSL #1]\n"
+ "fmla z21.h, p3/M, z2.h, z11.h\n"
+ "fmla z14.h, p3/M, z2.h, z12.h\n"
+ "fmla z19.h, p3/M, z5.h, z11.h\n"
+ "fmla z30.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x4, LSL #1]\n"
+ "fmla z28.h, p3/M, z4.h, z12.h\n"
+ "fmla z22.h, p3/M, z3.h, z12.h\n"
+ "fmla z18.h, p3/M, z1.h, z12.h\n"
+ "fmla z31.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x9, x12, LSL #1]\n"
+ "fmla z25.h, p3/M, z7.h, z10.h\n"
+ "fmla z16.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x12, LSL #1]\n"
+ "fmla z21.h, p3/M, z4.h, z11.h\n"
+ "fmla z14.h, p3/M, z3.h, z11.h\n"
+ "fmla z24.h, p3/M, z1.h, z11.h\n"
+ "fmla z26.h, p3/M, z0.h, z11.h\n"
+ "fmla z19.h, p3/M, z7.h, z11.h\n"
+ "fmla z30.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x7, x16, LSL #1]\n"
+ "fmla z23.h, p3/M, z8.h, z12.h\n"
+ "fmla z20.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x4, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z9.h\n"
+ "fmla z22.h, p3/M, z7.h, z9.h\n"
"fmla z18.h, p3/M, z5.h, z9.h\n"
- "fmla z23.h, p3/M, z2.h, z9.h\n"
- "fmla z14.h, p3/M, z8.h, z10.h\n"
- "ld1h { z9.h }, p2/Z, [x13]\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "fmla z13.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x27, LSL #1]\n"
- "fmla z17.h, p3/M, z2.h, z11.h\n"
- "fmla z30.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28]\n"
+ "fmla z31.h, p3/M, z4.h, z9.h\n"
+ "fmla z15.h, p3/M, z2.h, z9.h\n"
+ "fmla z17.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x7, x14, LSL #1]\n"
+ "addvl x7, x7, #1\n"
"fmla z21.h, p3/M, z7.h, z10.h\n"
- "fmla z26.h, p3/M, z6.h, z10.h\n"
- "fmla z22.h, p3/M, z5.h, z10.h\n"
- "fmla z20.h, p3/M, z4.h, z10.h\n"
- "fmla z28.h, p3/M, z3.h, z10.h\n"
- "fmla z25.h, p3/M, z2.h, z10.h\n"
- "fmla z29.h, p3/M, z1.h, z10.h\n"
+ "fmla z14.h, p3/M, z6.h, z10.h\n"
+ "fmla z24.h, p3/M, z4.h, z10.h\n"
+ "fmla z26.h, p3/M, z3.h, z10.h\n"
+ "fmla z25.h, p3/M, z1.h, z10.h\n"
+ "fmla z16.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z12.h }, p2/Z, [x28, x12, LSL #1]\n"
+ "fmla z19.h, p3/M, z2.h, z11.h\n"
+ "fmla z30.h, p3/M, z1.h, z11.h\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z10.h }, p2/Z, [x9]\n"
+ "fmla z22.h, p3/M, z0.h, z9.h\n"
+ "fmla z23.h, p3/M, z2.h, z12.h\n"
+ "fmla z18.h, p3/M, z8.h, z12.h\n"
+ "fmla z31.h, p3/M, z7.h, z12.h\n"
+ "fmla z15.h, p3/M, z5.h, z12.h\n"
+ "fmla z21.h, p3/M, z3.h, z10.h\n"
"fmla z24.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z27.h, p3/M, z0.h, z9.h\n"
- "fmla z18.h, p3/M, z6.h, z11.h\n"
- "fmla z23.h, p3/M, z3.h, z11.h\n"
- "fmla z14.h, p3/M, z1.h, z10.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z31.h, p3/M, z3.h, z9.h\n"
- "fmla z30.h, p3/M, z5.h, z12.h\n"
- "fmla z26.h, p3/M, z2.h, z12.h\n"
- "fmla z13.h, p3/M, z4.h, z10.h\n"
- "ld1h { z9.h }, p2/Z, [x13, x14, LSL #1]\n"
- "fmla z17.h, p3/M, z3.h, z10.h\n"
- "fmla z21.h, p3/M, z0.h, z10.h\n"
- "fmla z28.h, p3/M, z8.h, z11.h\n"
- "fmla z24.h, p3/M, z5.h, z11.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x5, LSL #1]\n"
- "fmla z27.h, p3/M, z2.h, z10.h\n"
- "fmla z14.h, p3/M, z2.h, z9.h\n"
- "fmla z31.h, p3/M, z5.h, z10.h\n"
- "fmla z13.h, p3/M, z5.h, z9.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x5, LSL #1]\n"
- "fmla z17.h, p3/M, z4.h, z9.h\n"
- "fmla z30.h, p3/M, z3.h, z9.h\n"
- "fmla z21.h, p3/M, z1.h, z9.h\n"
- "fmla z26.h, p3/M, z0.h, z9.h\n"
- "ld1h { z10.h }, p2/Z, [x12, x10, LSL #1]\n"
- "fmla z23.h, p3/M, z7.h, z12.h\n"
- "fmla z25.h, p3/M, z6.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x10, LSL #1]\n"
- "fmla z27.h, p3/M, z4.h, z11.h\n"
- "fmla z14.h, p3/M, z3.h, z11.h\n"
- "fmla z18.h, p3/M, z1.h, z11.h\n"
- "fmla z22.h, p3/M, z0.h, z11.h\n"
- "fmla z31.h, p3/M, z7.h, z11.h\n"
- "fmla z13.h, p3/M, z6.h, z11.h\n"
- "ld1h { z9.h }, p2/Z, [x8, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z8.h, z12.h\n"
- "fmla z24.h, p3/M, z7.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x5, LSL #1]\n"
- "fmla z17.h, p3/M, z8.h, z10.h\n"
- "fmla z30.h, p3/M, z7.h, z10.h\n"
- "fmla z21.h, p3/M, z5.h, z10.h\n"
- "fmla z26.h, p3/M, z4.h, z10.h\n"
- "fmla z20.h, p3/M, z2.h, z10.h\n"
- "fmla z28.h, p3/M, z1.h, z10.h\n"
- "ld1h { z11.h }, p2/Z, [x8, x14, LSL #1]\n"
- "addvl x8, x8, #1\n"
- "fmla z27.h, p3/M, z7.h, z12.h\n"
- "fmla z14.h, p3/M, z6.h, z12.h\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmla z22.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z1.h, z12.h\n"
- "fmla z25.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x11, x10, LSL #1]\n"
- "fmla z31.h, p3/M, z2.h, z9.h\n"
- "fmla z13.h, p3/M, z1.h, z9.h\n"
- "fmla z17.h, p3/M, z0.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x12]\n"
- "fmla z29.h, p3/M, z2.h, z12.h\n"
- "fmla z30.h, p3/M, z0.h, z11.h\n"
- "fmla z27.h, p3/M, z3.h, z9.h\n"
- "fmla z18.h, p3/M, z0.h, z9.h\n"
- "fmla z21.h, p3/M, z8.h, z12.h\n"
+ "fmla z17.h, p3/M, z4.h, z12.h\n"
+ "fmla z20.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z9.h\n"
+ "fmla z28.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z11.h }, p2/Z, [x9, x11, LSL #1]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z19.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x28]\n"
+ "fmla z16.h, p3/M, z4.h, z12.h\n"
+ "fmla z23.h, p3/M, z3.h, z12.h\n"
"fmla z26.h, p3/M, z7.h, z12.h\n"
- "fmla z20.h, p3/M, z5.h, z12.h\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
- "fmla z24.h, p3/M, z1.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x28, x7, LSL #1]\n"
- "fmla z13.h, p3/M, z2.h, z11.h\n"
- "fmla z17.h, p3/M, z1.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x12, x27, LSL #1]\n"
- "addvl x12, x12, #1\n"
- "fmla z31.h, p3/M, z6.h, z9.h\n"
- "ld1h { z12.h }, p2/Z, [x11]\n"
- "fmla z25.h, p3/M, z4.h, z10.h\n"
- "ld1h { z9.h }, p1/Z, [x12, x7, LSL #1]\n"
- "fmla z29.h, p3/M, z3.h, z10.h\n"
- "fmla z30.h, p3/M, z8.h, z11.h\n"
- "fmla z26.h, p3/M, z5.h, z11.h\n"
- "fmla z28.h, p3/M, z2.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x11, x27, LSL #1]\n"
- "addvl x11, x11, #1\n"
- "fmla z27.h, p3/M, z6.h, z12.h\n"
- "fmla z18.h, p3/M, z3.h, z12.h\n"
- "fmla z23.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x7, LSL #1]\n"
- "fmla z24.h, p3/M, z2.h, z11.h\n"
- "fmla z25.h, p3/M, z7.h, z12.h\n"
- "fmla z29.h, p3/M, z6.h, z12.h\n"
- "fmla z18.h, p3/M, z8.h, z10.h\n"
- "fmla z22.h, p3/M, z7.h, z10.h\n"
- "fmla z20.h, p3/M, z6.h, z10.h\n"
- "fmla z23.h, p3/M, z5.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x28, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z5.h, z11.h\n"
- "fmla z25.h, p3/M, z5.h, z10.h\n"
- "fmla z29.h, p3/M, z4.h, z10.h\n"
- "fmla z24.h, p3/M, z3.h, z10.h\n"
- "fmla z26.h, p3/M, z8.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z23.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x13, x5, LSL #1]\n"
- "fmla z22.h, p3/M, z8.h, z10.h\n"
- "fmla z20.h, p3/M, z7.h, z10.h\n"
- "addvl x24, x24, #1\n"
- "fmla z28.h, p3/M, z6.h, z10.h\n"
- "fmla z25.h, p3/M, z8.h, z11.h\n"
- "ld1h { z10.h }, p2/Z, [x13, x10, LSL #1]\n"
- "addvl x13, x13, #1\n"
- "fmla z29.h, p3/M, z7.h, z11.h\n"
- "fmla z24.h, p3/M, z6.h, z11.h\n"
- "ld1h { z11.h }, p2/Z, [x28, x5, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z12.h\n"
- "fmla z13.h, p3/M, z3.h, z12.h\n"
- "fmax z31.h, p3/M, z31.h, z15.h\n"
- "fmax z13.h, p3/M, z13.h, z15.h\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "fmla z14.h, p3/M, z0.h, z12.h\n"
- "ld1h { z0.h }, p2/Z, [x28, x10, LSL #1]\n"
- "fmax z27.h, p3/M, z27.h, z15.h\n"
- "fmla z17.h, p3/M, z5.h, z10.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "fmax z17.h, p3/M, z17.h, z15.h\n"
- "fmax z30.h, p3/M, z30.h, z15.h\n"
- "fmla z21.h, p3/M, z2.h, z10.h\n"
- "fmla z26.h, p3/M, z1.h, z10.h\n"
- "fmax z14.h, p3/M, z14.h, z15.h\n"
- "fmax z21.h, p3/M, z21.h, z15.h\n"
- "fmla z18.h, p3/M, z7.h, z11.h\n"
- "fmla z22.h, p3/M, z6.h, z11.h\n"
- "fmax z26.h, p3/M, z26.h, z15.h\n"
- "fmax z18.h, p3/M, z18.h, z15.h\n"
- "fmla z23.h, p3/M, z4.h, z11.h\n"
- "fmla z25.h, p3/M, z3.h, z11.h\n"
- "fmax z22.h, p3/M, z22.h, z15.h\n"
- "fmax z23.h, p3/M, z23.h, z15.h\n"
- "fmla z20.h, p3/M, z8.h, z0.h\n"
- "fmla z28.h, p3/M, z7.h, z0.h\n"
- "fmax z20.h, p3/M, z20.h, z15.h\n"
- "fmax z28.h, p3/M, z28.h, z15.h\n"
- "fmla z29.h, p3/M, z5.h, z0.h\n"
- "fmla z24.h, p3/M, z4.h, z0.h\n"
- "fmax z25.h, p3/M, z25.h, z15.h\n"
- "fmax z29.h, p3/M, z29.h, z15.h\n"
- "fmax z24.h, p3/M, z24.h, z15.h\n"
- "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
- "cmp x16, %x[n_channels]\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
- "fmin z13.h, p3/M, z13.h, z16.h\n"
- "fmin z17.h, p3/M, z17.h, z16.h\n"
- "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "ld1h { z10.h }, p1/Z, [x8]\n"
- "fmin z27.h, p3/M, z27.h, z16.h\n"
- "fmin z14.h, p3/M, z14.h, z16.h\n"
- "ld1h { z11.h }, p1/Z, [x8, x27, LSL #1]\n"
- "ld1h { z12.h }, p1/Z, [x12, x14, LSL #1]\n"
- "fmin z21.h, p3/M, z21.h, z16.h\n"
- "fmin z26.h, p3/M, z26.h, z16.h\n"
- "st1h { z31.h }, p0, [x15]\n"
- "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
- "fmin z18.h, p3/M, z18.h, z16.h\n"
- "fmin z22.h, p3/M, z22.h, z16.h\n"
- "st1h { z13.h }, p0, [x15, x6, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
- "fmin z20.h, p3/M, z20.h, z16.h\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "st1h { z17.h }, p0, [x15, x25, LSL #1]\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "fmin z25.h, p3/M, z25.h, z16.h\n"
- "st1h { z30.h }, p0, [x15, x22, LSL #1]\n"
- "fmin z29.h, p3/M, z29.h, z16.h\n"
- "fmin z24.h, p3/M, z24.h, z16.h\n"
- "st1h { z27.h }, p0, [x9]\n"
+ "fmla z22.h, p3/M, z8.h, z11.h\n"
+ "fmla z31.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z9.h }, p1/Z, [x9, x16, LSL #1]\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "fmla z21.h, p3/M, z6.h, z10.h\n"
+ "ld1h { z11.h }, p2/Z, [x28, x11, LSL #1]\n"
"addvl x28, x28, #1\n"
- "st1h { z14.h }, p0, [x9, x6, LSL #1]\n"
- "addvl x15, x15, #1\n"
- "st1h { z21.h }, p0, [x9, x25, LSL #1]\n"
- "addvl x17, x17, #-6\n"
- "st1h { z26.h }, p0, [x9, x22, LSL #1]\n"
- "addvl x9, x9, #1\n"
- "st1h { z18.h }, p0, [x26]\n"
- "st1h { z22.h }, p0, [x26, x6, LSL #1]\n"
- "st1h { z20.h }, p0, [x26, x25, LSL #1]\n"
- "st1h { z28.h }, p0, [x26, x22, LSL #1]\n"
+ "fmla z24.h, p3/M, z3.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "fmla z15.h, p3/M, z6.h, z12.h\n"
+ "fmla z20.h, p3/M, z2.h, z11.h\n"
+ "fmla z31.h, p3/M, z8.h, z11.h\n"
+ "fmla z16.h, p3/M, z7.h, z10.h\n"
+ "fmla z23.h, p3/M, z6.h, z10.h\n"
+ "fmla z17.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x14, LSL #1]\n"
"addvl x26, x26, #1\n"
- "st1h { z23.h }, p0, [x23]\n"
- "st1h { z25.h }, p0, [x23, x6, LSL #1]\n"
- "st1h { z29.h }, p0, [x23, x25, LSL #1]\n"
- "st1h { z24.h }, p0, [x23, x22, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z12.h\n"
+ "fmla z25.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x14, LSL #1]\n"
+ "fmla z16.h, p3/M, z5.h, z12.h\n"
+ "fmla z23.h, p3/M, z4.h, z12.h\n"
+ "fmla z20.h, p3/M, z3.h, z12.h\n"
+ "fmla z26.h, p3/M, z8.h, z12.h\n"
+ "fmla z15.h, p3/M, z7.h, z12.h\n"
+ "fmla z17.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x10, x12, LSL #1]\n"
+ "fmla z25.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x10, x4, LSL #1]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.h, p3/M, z8.h, z11.h\n"
+ "fmla z23.h, p3/M, z7.h, z11.h\n"
+ "fmla z20.h, p3/M, z6.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x4, LSL #1]\n"
+ "fmla z28.h, p3/M, z5.h, z12.h\n"
+ "fmla z22.h, p3/M, z4.h, z12.h\n"
+ "fmla z19.h, p3/M, z4.h, z10.h\n"
+ "fmla z30.h, p3/M, z3.h, z10.h\n"
+ "fmla z21.h, p3/M, z1.h, z10.h\n"
+ "fmla z14.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z10.h }, p2/Z, [x27, x12, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z18.h, p3/M, z2.h, z12.h\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x8, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "fmla z26.h, p3/M, z6.h, z11.h\n"
+ "fmax z28.h, p3/M, z28.h, z27.h\n"
+ "fmax z22.h, p3/M, z22.h, z27.h\n"
+ "fmla z25.h, p3/M, z4.h, z11.h\n"
+ "fmla z16.h, p3/M, z3.h, z11.h\n"
+ "fmax z19.h, p3/M, z19.h, z27.h\n"
+ "fmax z30.h, p3/M, z30.h, z27.h\n"
+ "fmla z15.h, p3/M, z8.h, z10.h\n"
+ "fmla z17.h, p3/M, z7.h, z10.h\n"
+ "fmax z21.h, p3/M, z21.h, z27.h\n"
+ "fmax z14.h, p3/M, z14.h, z27.h\n"
+ "fmla z23.h, p3/M, z5.h, z10.h\n"
+ "fmla z20.h, p3/M, z4.h, z10.h\n"
+ "fmax z18.h, p3/M, z18.h, z27.h\n"
+ "fmax z31.h, p3/M, z31.h, z27.h\n"
+ "fmax z24.h, p3/M, z24.h, z27.h\n"
+ "fmax z26.h, p3/M, z26.h, z27.h\n"
+ "ld1h { z3.h }, p3/Z, [x8, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmax z25.h, p3/M, z25.h, z27.h\n"
+ "fmax z16.h, p3/M, z16.h, z27.h\n"
+ "ld1h { z5.h }, p3/Z, [x8, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x8, #7, MUL VL]\n"
+ "fmax z15.h, p3/M, z15.h, z27.h\n"
+ "fmax z17.h, p3/M, z17.h, z27.h\n"
+ "ld1h { z10.h }, p1/Z, [x7]\n"
+ "ld1h { z11.h }, p1/Z, [x7, x11, LSL #1]\n"
+ "fmax z23.h, p3/M, z23.h, z27.h\n"
+ "fmax z20.h, p3/M, z20.h, z27.h\n"
+ "ld1h { z12.h }, p1/Z, [x9, x14, LSL #1]\n"
+ "addvl x8, x8, #16\n"
+ "whilelt p2.h, x6, %x[n_channels]\n"
+ "cmp x3, %x[n_channels]\n"
+ "fmin z19.h, p3/M, z19.h, z29.h\n"
+ "fmin z30.h, p3/M, z30.h, z29.h\n"
+ "fmin z28.h, p3/M, z28.h, z29.h\n"
+ "fmin z22.h, p3/M, z22.h, z29.h\n"
+ "fmin z21.h, p3/M, z21.h, z29.h\n"
+ "ld1h { z7.h }, p3/Z, [x8, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x8, #-7, MUL VL]\n"
+ "fmin z14.h, p3/M, z14.h, z29.h\n"
+ "fmin z18.h, p3/M, z18.h, z29.h\n"
+ "st1h { z19.h }, p0, [x17]\n"
+ "fmin z31.h, p3/M, z31.h, z29.h\n"
+ "fmin z24.h, p3/M, z24.h, z29.h\n"
+ "st1h { z30.h }, p0, [x17, x5, LSL #1]\n"
+ "fmin z26.h, p3/M, z26.h, z29.h\n"
+ "fmin z15.h, p3/M, z15.h, z29.h\n"
+ "st1h { z28.h }, p0, [x17, x15, LSL #1]\n"
+ "fmin z17.h, p3/M, z17.h, z29.h\n"
+ "fmin z25.h, p3/M, z25.h, z29.h\n"
+ "st1h { z22.h }, p0, [x17, x13, LSL #1]\n"
+ "fmin z16.h, p3/M, z16.h, z29.h\n"
+ "fmin z23.h, p3/M, z23.h, z29.h\n"
+ "st1h { z21.h }, p0, [x25]\n"
+ "fmin z20.h, p3/M, z20.h, z29.h\n"
+ "addvl x27, x27, #1\n"
+ "st1h { z14.h }, p0, [x25, x5, LSL #1]\n"
+ "st1h { z18.h }, p0, [x25, x15, LSL #1]\n"
+ "addvl x17, x17, #1\n"
+ "addvl x8, x8, #-6\n"
+ "st1h { z31.h }, p0, [x25, x13, LSL #1]\n"
+ "addvl x25, x25, #1\n"
+ "st1h { z24.h }, p0, [x24]\n"
+ "st1h { z26.h }, p0, [x24, x5, LSL #1]\n"
+ "st1h { z15.h }, p0, [x24, x15, LSL #1]\n"
+ "st1h { z17.h }, p0, [x24, x13, LSL #1]\n"
+ "addvl x24, x24, #1\n"
+ "st1h { z25.h }, p0, [x23]\n"
+ "st1h { z16.h }, p0, [x23, x5, LSL #1]\n"
+ "st1h { z23.h }, p0, [x23, x15, LSL #1]\n"
+ "st1h { z20.h }, p0, [x23, x13, LSL #1]\n"
"addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z14, z19\n fmla z14.h, p3/M, z4.h, z9.h\n"
- "movprfx z31, z19\n fmla z31.h, p3/M, z8.h, z9.h\n"
- "ldr x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z30, z19\n fmla z30.h, p3/M, z3.h, z9.h\n"
- "movprfx z13, z19\n fmla z13.h, p3/M, z1.h, z9.h\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x4, x4, #0x1\n"
- "movprfx z20, z19\n fmla z20.h, p3/M, z0.h, z9.h\n"
- "fmla z14.h, p3/M, z5.h, z12.h\n"
- "cmp x4, x20\n"
- "add x21, x16, #0x1\n"
- "movprfx z18, z19\n fmla z18.h, p3/M, z7.h, z9.h\n"
- "movprfx z28, z19\n fmla z28.h, p3/M, z6.h, z9.h\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x16, x16, x21, LT\n"
- "movprfx z17, z19\n fmla z17.h, p3/M, z5.h, z9.h\n"
- "movprfx z26, z19\n fmla z26.h, p3/M, z2.h, z9.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x7, LSL #1]\n"
+ "movprfx z14, z13\n fmla z14.h, p3/M, z4.h, z9.h\n"
+ "movprfx z18, z13\n fmla z18.h, p3/M, z8.h, z9.h\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x1, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z23, z13\n fmla z23.h, p3/M, z3.h, z9.h\n"
+ "movprfx z30, z13\n fmla z30.h, p3/M, z1.h, z9.h\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z20, z13\n fmla z20.h, p3/M, z0.h, z9.h\n"
+ "movprfx z25, z13\n fmla z25.h, p3/M, z7.h, z9.h\n"
"mov p0.b, p2.b\n"
- "fmla z31.h, p3/M, z0.h, z10.h\n"
- "movprfx z27, z19\n fmla z27.h, p3/M, z2.h, z11.h\n"
- "ld1h { z29.h }, p2/Z, [x24]\n"
- "ld1h { z21.h }, p2/Z, [x24, x27, LSL #1]\n"
- "fmla z30.h, p3/M, z4.h, z12.h\n"
- "fmla z13.h, p3/M, z2.h, z12.h\n"
- "csel x4, x4, XZR, LT\n"
- "cmp x16, x20\n"
+ "movprfx z19, z13\n fmla z19.h, p3/M, z6.h, z9.h\n"
+ "movprfx z26, z13\n fmla z26.h, p3/M, z5.h, z9.h\n"
+ "add x2, x2, #0x1\n"
+ "add x20, x1, #0x1\n"
+ "fmla z14.h, p3/M, z5.h, z12.h\n"
+ "movprfx z28, z13\n fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z15.h }, p2/Z, [x28, x16, LSL #1]\n"
+ "cmp x2, x22\n"
+ "fmla z18.h, p3/M, z0.h, z10.h\n"
+ "movprfx z9, z13\n fmla z9.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x26]\n"
+ "ld1h { z24.h }, p2/Z, [x26, x11, LSL #1]\n"
+ "fmla z23.h, p3/M, z4.h, z12.h\n"
+ "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "csel x1, x1, x20, LT\n"
+ "csel x2, x2, XZR, LT\n"
"fmla z20.h, p3/M, z1.h, z12.h\n"
- "movprfx z10, z19\n fmla z10.h, p3/M, z6.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x11, x14, LSL #1]\n"
- "fmla z14.h, p3/M, z7.h, z9.h\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmla z28.h, p3/M, z7.h, z12.h\n"
- "fmla z27.h, p3/M, z6.h, z12.h\n"
- "movprfx z11, z19\n fmla z11.h, p3/M, z3.h, z12.h\n"
- "movprfx z25, z19\n fmla z25.h, p3/M, z0.h, z12.h\n"
- "ld1h { z22.h }, p2/Z, [x8, x5, LSL #1]\n"
- "movprfx z24, z19\n fmla z24.h, p3/M, z8.h, z21.h\n"
- "fmla z30.h, p3/M, z6.h, z9.h\n"
- "ld1h { z21.h }, p2/Z, [x8, x10, LSL #1]\n"
- "fmla z13.h, p3/M, z4.h, z9.h\n"
- "fmla z20.h, p3/M, z3.h, z9.h\n"
- "movprfx z12, z19\n fmla z12.h, p3/M, z1.h, z9.h\n"
- "movprfx z23, z19\n fmla z23.h, p3/M, z0.h, z9.h\n"
- "fmla z17.h, p3/M, z8.h, z9.h\n"
- "fmla z26.h, p3/M, z5.h, z9.h\n"
- "fmla z10.h, p3/M, z2.h, z9.h\n"
- "fmla z14.h, p3/M, z8.h, z29.h\n"
- "ld1h { z9.h }, p2/Z, [x13]\n"
- "fmla z31.h, p3/M, z1.h, z22.h\n"
- "fmla z18.h, p3/M, z0.h, z22.h\n"
- "ld1h { z22.h }, p2/Z, [x13, x27, LSL #1]\n"
- "fmla z28.h, p3/M, z2.h, z21.h\n"
- "fmla z27.h, p3/M, z1.h, z21.h\n"
- "ld1h { z19.h }, p2/Z, [x28]\n"
- "fmla z30.h, p3/M, z7.h, z29.h\n"
- "fmla z11.h, p3/M, z6.h, z29.h\n"
- "fmla z13.h, p3/M, z5.h, z29.h\n"
- "fmla z20.h, p3/M, z4.h, z29.h\n"
- "fmla z25.h, p3/M, z3.h, z29.h\n"
- "fmla z12.h, p3/M, z2.h, z29.h\n"
- "fmla z23.h, p3/M, z1.h, z29.h\n"
- "fmla z24.h, p3/M, z0.h, z29.h\n"
- "ld1h { z21.h }, p2/Z, [x13, x7, LSL #1]\n"
- "fmla z17.h, p3/M, z0.h, z9.h\n"
- "fmla z26.h, p3/M, z6.h, z19.h\n"
- "fmla z10.h, p3/M, z3.h, z19.h\n"
- "fmla z14.h, p3/M, z1.h, z21.h\n"
- "ld1h { z19.h }, p2/Z, [x28, x27, LSL #1]\n"
- "fmla z31.h, p3/M, z3.h, z9.h\n"
- "fmla z27.h, p3/M, z5.h, z22.h\n"
- "fmla z11.h, p3/M, z2.h, z22.h\n"
- "fmla z18.h, p3/M, z4.h, z21.h\n"
- "ld1h { z29.h }, p2/Z, [x13, x14, LSL #1]\n"
- "fmla z28.h, p3/M, z3.h, z21.h\n"
- "fmla z30.h, p3/M, z0.h, z21.h\n"
- "fmla z25.h, p3/M, z8.h, z19.h\n"
- "fmla z24.h, p3/M, z5.h, z19.h\n"
- "ld1h { z19.h }, p2/Z, [x24, x5, LSL #1]\n"
- "fmla z17.h, p3/M, z2.h, z21.h\n"
- "fmla z14.h, p3/M, z2.h, z29.h\n"
- "fmla z31.h, p3/M, z5.h, z21.h\n"
- "fmla z18.h, p3/M, z5.h, z29.h\n"
- "ld1h { z22.h }, p2/Z, [x12, x5, LSL #1]\n"
- "fmla z28.h, p3/M, z4.h, z29.h\n"
- "fmla z27.h, p3/M, z3.h, z29.h\n"
- "fmla z30.h, p3/M, z1.h, z29.h\n"
- "fmla z11.h, p3/M, z0.h, z29.h\n"
- "ld1h { z21.h }, p2/Z, [x12, x10, LSL #1]\n"
- "fmla z10.h, p3/M, z7.h, z19.h\n"
- "fmla z12.h, p3/M, z6.h, z19.h\n"
- "ld1h { z19.h }, p2/Z, [x24, x10, LSL #1]\n"
- "fmla z17.h, p3/M, z4.h, z22.h\n"
- "fmla z14.h, p3/M, z3.h, z22.h\n"
- "fmla z26.h, p3/M, z1.h, z22.h\n"
- "fmla z13.h, p3/M, z0.h, z22.h\n"
- "fmla z31.h, p3/M, z7.h, z22.h\n"
- "fmla z18.h, p3/M, z6.h, z22.h\n"
- "ld1h { z29.h }, p2/Z, [x8, x7, LSL #1]\n"
- "fmla z23.h, p3/M, z8.h, z19.h\n"
- "fmla z24.h, p3/M, z7.h, z19.h\n"
- "ld1h { z19.h }, p2/Z, [x11, x5, LSL #1]\n"
- "fmla z28.h, p3/M, z8.h, z21.h\n"
- "fmla z27.h, p3/M, z7.h, z21.h\n"
- "fmla z30.h, p3/M, z5.h, z21.h\n"
- "fmla z11.h, p3/M, z4.h, z21.h\n"
+ "fmla z25.h, p3/M, z8.h, z12.h\n"
+ "movprfx z22, z13\n fmla z22.h, p3/M, z6.h, z17.h\n"
+ "fmla z14.h, p3/M, z7.h, z15.h\n"
+ "ld1h { z10.h }, p2/Z, [x28, x14, LSL #1]\n"
+ "fmla z19.h, p3/M, z7.h, z12.h\n"
+ "fmla z9.h, p3/M, z6.h, z12.h\n"
+ "cmp x1, x21\n"
+ "movprfx z31, z13\n fmla z31.h, p3/M, z3.h, z12.h\n"
+ "movprfx z11, z13\n fmla z11.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z16.h }, p2/Z, [x7, x4, LSL #1]\n"
+ "movprfx z12, z13\n fmla z12.h, p3/M, z8.h, z24.h\n"
+ "fmla z23.h, p3/M, z6.h, z15.h\n"
+ "ld1h { z17.h }, p2/Z, [x7, x12, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z15.h\n"
+ "fmla z20.h, p3/M, z3.h, z15.h\n"
+ "movprfx z24, z13\n fmla z24.h, p3/M, z1.h, z15.h\n"
+ "fmla z13.h, p3/M, z0.h, z15.h\n"
+ "fmla z26.h, p3/M, z8.h, z15.h\n"
+ "fmla z28.h, p3/M, z5.h, z15.h\n"
+ "fmla z22.h, p3/M, z2.h, z15.h\n"
+ "fmla z14.h, p3/M, z8.h, z10.h\n"
+ "ld1h { z15.h }, p2/Z, [x10]\n"
+ "fmla z18.h, p3/M, z1.h, z16.h\n"
+ "fmla z25.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z21.h }, p2/Z, [x10, x11, LSL #1]\n"
+ "fmla z19.h, p3/M, z2.h, z17.h\n"
+ "fmla z9.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x27]\n"
+ "fmla z23.h, p3/M, z7.h, z10.h\n"
+ "fmla z31.h, p3/M, z6.h, z10.h\n"
+ "fmla z30.h, p3/M, z5.h, z10.h\n"
+ "fmla z20.h, p3/M, z4.h, z10.h\n"
+ "fmla z11.h, p3/M, z3.h, z10.h\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "fmla z13.h, p3/M, z1.h, z10.h\n"
+ "fmla z12.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z17.h }, p2/Z, [x10, x16, LSL #1]\n"
+ "fmla z26.h, p3/M, z0.h, z15.h\n"
+ "fmla z28.h, p3/M, z6.h, z16.h\n"
+ "fmla z22.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x11, LSL #1]\n"
+ "fmla z18.h, p3/M, z3.h, z15.h\n"
+ "fmla z14.h, p3/M, z1.h, z17.h\n"
+ "fmla z9.h, p3/M, z5.h, z21.h\n"
+ "fmla z31.h, p3/M, z2.h, z21.h\n"
+ "fmla z25.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x10, x14, LSL #1]\n"
+ "fmla z19.h, p3/M, z3.h, z17.h\n"
+ "fmla z23.h, p3/M, z0.h, z17.h\n"
+ "fmla z11.h, p3/M, z8.h, z16.h\n"
+ "fmla z12.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x26, x4, LSL #1]\n"
+ "fmla z26.h, p3/M, z2.h, z17.h\n"
+ "fmla z14.h, p3/M, z2.h, z21.h\n"
+ "fmla z18.h, p3/M, z5.h, z17.h\n"
+ "fmla z25.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z17.h }, p2/Z, [x9, x4, LSL #1]\n"
+ "fmla z19.h, p3/M, z4.h, z21.h\n"
+ "fmla z9.h, p3/M, z3.h, z21.h\n"
+ "fmla z23.h, p3/M, z1.h, z21.h\n"
+ "fmla z31.h, p3/M, z0.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x9, x12, LSL #1]\n"
+ "fmla z22.h, p3/M, z7.h, z16.h\n"
+ "fmla z24.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x26, x12, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z17.h\n"
+ "fmla z14.h, p3/M, z3.h, z17.h\n"
+ "fmla z28.h, p3/M, z1.h, z17.h\n"
+ "fmla z30.h, p3/M, z0.h, z17.h\n"
+ "fmla z18.h, p3/M, z7.h, z17.h\n"
+ "fmla z25.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x7, x16, LSL #1]\n"
+ "fmla z13.h, p3/M, z8.h, z16.h\n"
+ "fmla z12.h, p3/M, z7.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x4, LSL #1]\n"
+ "fmla z19.h, p3/M, z8.h, z21.h\n"
+ "fmla z9.h, p3/M, z7.h, z21.h\n"
+ "fmla z23.h, p3/M, z5.h, z21.h\n"
+ "fmla z31.h, p3/M, z4.h, z21.h\n"
"fmla z20.h, p3/M, z2.h, z21.h\n"
- "fmla z25.h, p3/M, z1.h, z21.h\n"
- "ld1h { z22.h }, p2/Z, [x8, x14, LSL #1]\n"
- "fmla z17.h, p3/M, z7.h, z19.h\n"
- "fmla z14.h, p3/M, z6.h, z19.h\n"
- "fmla z26.h, p3/M, z4.h, z19.h\n"
- "fmla z13.h, p3/M, z3.h, z19.h\n"
- "fmla z10.h, p3/M, z1.h, z19.h\n"
- "fmla z12.h, p3/M, z0.h, z19.h\n"
- "ld1h { z21.h }, p2/Z, [x11, x10, LSL #1]\n"
- "fmla z31.h, p3/M, z2.h, z29.h\n"
- "fmla z18.h, p3/M, z1.h, z29.h\n"
- "fmla z28.h, p3/M, z0.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x12]\n"
- "fmla z23.h, p3/M, z2.h, z21.h\n"
- "fmla z27.h, p3/M, z0.h, z22.h\n"
- "fmla z17.h, p3/M, z3.h, z29.h\n"
- "fmla z26.h, p3/M, z0.h, z29.h\n"
- "fmla z30.h, p3/M, z8.h, z21.h\n"
- "fmla z11.h, p3/M, z7.h, z21.h\n"
- "fmla z20.h, p3/M, z5.h, z21.h\n"
- "fmla z25.h, p3/M, z4.h, z21.h\n"
- "fmla z24.h, p3/M, z1.h, z21.h\n"
- "ld1h { z19.h }, p2/Z, [x28, x7, LSL #1]\n"
- "fmla z18.h, p3/M, z2.h, z22.h\n"
- "fmla z28.h, p3/M, z1.h, z22.h\n"
- "ld1h { z21.h }, p2/Z, [x12, x27, LSL #1]\n"
- "fmla z31.h, p3/M, z6.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x11]\n"
- "fmla z12.h, p3/M, z4.h, z19.h\n"
- "fmla z23.h, p3/M, z3.h, z19.h\n"
- "fmla z27.h, p3/M, z8.h, z21.h\n"
- "fmla z11.h, p3/M, z5.h, z21.h\n"
+ "fmla z11.h, p3/M, z1.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x7, x14, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z16.h\n"
+ "fmla z14.h, p3/M, z6.h, z16.h\n"
+ "fmla z28.h, p3/M, z4.h, z16.h\n"
+ "fmla z30.h, p3/M, z3.h, z16.h\n"
+ "fmla z22.h, p3/M, z1.h, z16.h\n"
+ "fmla z24.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x12, LSL #1]\n"
+ "fmla z18.h, p3/M, z2.h, z17.h\n"
+ "fmla z25.h, p3/M, z1.h, z17.h\n"
+ "fmla z19.h, p3/M, z0.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x9]\n"
+ "fmla z9.h, p3/M, z0.h, z21.h\n"
+ "fmla z13.h, p3/M, z2.h, z16.h\n"
+ "fmla z23.h, p3/M, z8.h, z16.h\n"
+ "fmla z31.h, p3/M, z7.h, z16.h\n"
+ "fmla z20.h, p3/M, z5.h, z16.h\n"
+ "fmla z26.h, p3/M, z3.h, z17.h\n"
+ "fmla z28.h, p3/M, z0.h, z17.h\n"
+ "fmla z11.h, p3/M, z4.h, z16.h\n"
+ "fmla z12.h, p3/M, z1.h, z16.h\n"
+ "ld1h { z15.h }, p2/Z, [x27, x16, LSL #1]\n"
"fmla z25.h, p3/M, z2.h, z21.h\n"
- "ld1h { z9.h }, p2/Z, [x11, x27, LSL #1]\n"
- "fmla z17.h, p3/M, z6.h, z29.h\n"
- "fmla z26.h, p3/M, z3.h, z29.h\n"
- "fmla z10.h, p3/M, z0.h, z29.h\n"
- "ld1h { z22.h }, p2/Z, [x24, x7, LSL #1]\n"
- "fmla z24.h, p3/M, z2.h, z9.h\n"
- "fmla z12.h, p3/M, z7.h, z22.h\n"
- "fmla z23.h, p3/M, z6.h, z22.h\n"
- "fmla z26.h, p3/M, z8.h, z19.h\n"
- "fmla z13.h, p3/M, z7.h, z19.h\n"
- "fmla z20.h, p3/M, z6.h, z19.h\n"
- "fmla z10.h, p3/M, z5.h, z19.h\n"
- "ld1h { z21.h }, p2/Z, [x28, x14, LSL #1]\n"
- "fmla z25.h, p3/M, z5.h, z9.h\n"
- "fmla z12.h, p3/M, z5.h, z21.h\n"
- "fmla z23.h, p3/M, z4.h, z21.h\n"
- "fmla z24.h, p3/M, z3.h, z21.h\n"
- "fmla z11.h, p3/M, z8.h, z9.h\n"
- "ld1h { z19.h }, p2/Z, [x24, x14, LSL #1]\n"
- "fmla z10.h, p3/M, z8.h, z22.h\n"
- "ld1h { z22.h }, p2/Z, [x13, x5, LSL #1]\n"
- "fmla z13.h, p3/M, z8.h, z21.h\n"
- "fmla z20.h, p3/M, z7.h, z21.h\n"
- "fmla z25.h, p3/M, z6.h, z21.h\n"
- "fmla z12.h, p3/M, z8.h, z19.h\n"
- "ld1h { z29.h }, p2/Z, [x13, x10, LSL #1]\n"
- "fmla z23.h, p3/M, z7.h, z19.h\n"
- "fmla z24.h, p3/M, z6.h, z19.h\n"
- "ld1h { z21.h }, p2/Z, [x28, x5, LSL #1]\n"
- "fmla z31.h, p3/M, z4.h, z22.h\n"
- "fmla z18.h, p3/M, z3.h, z22.h\n"
- "fmax z31.h, p3/M, z31.h, z15.h\n"
- "fmax z18.h, p3/M, z18.h, z15.h\n"
- "fmla z17.h, p3/M, z1.h, z22.h\n"
- "fmla z14.h, p3/M, z0.h, z22.h\n"
- "ld1h { z9.h }, p2/Z, [x28, x10, LSL #1]\n"
- "fmax z17.h, p3/M, z17.h, z15.h\n"
- "fmla z28.h, p3/M, z5.h, z29.h\n"
- "fmla z27.h, p3/M, z4.h, z29.h\n"
- "fmax z28.h, p3/M, z28.h, z15.h\n"
- "fmax z27.h, p3/M, z27.h, z15.h\n"
- "fmla z30.h, p3/M, z2.h, z29.h\n"
- "fmla z11.h, p3/M, z1.h, z29.h\n"
- "fmax z14.h, p3/M, z14.h, z15.h\n"
- "fmax z30.h, p3/M, z30.h, z15.h\n"
- "fmla z26.h, p3/M, z7.h, z21.h\n"
+ "fmla z19.h, p3/M, z1.h, z21.h\n"
+ "ld1h { z16.h }, p2/Z, [x9, x11, LSL #1]\n"
+ "fmla z18.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x28]\n"
+ "fmla z24.h, p3/M, z4.h, z15.h\n"
+ "fmla z13.h, p3/M, z3.h, z15.h\n"
+ "fmla z30.h, p3/M, z7.h, z15.h\n"
+ "fmla z9.h, p3/M, z8.h, z16.h\n"
+ "fmla z31.h, p3/M, z5.h, z16.h\n"
+ "fmla z11.h, p3/M, z2.h, z16.h\n"
+ "fmla z26.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x11, LSL #1]\n"
+ "fmla z28.h, p3/M, z3.h, z17.h\n"
+ "fmla z22.h, p3/M, z0.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "fmla z20.h, p3/M, z6.h, z15.h\n"
+ "fmla z12.h, p3/M, z2.h, z16.h\n"
+ "fmla z31.h, p3/M, z8.h, z16.h\n"
+ "fmla z24.h, p3/M, z7.h, z21.h\n"
"fmla z13.h, p3/M, z6.h, z21.h\n"
- "fmax z11.h, p3/M, z11.h, z15.h\n"
- "fmax z26.h, p3/M, z26.h, z15.h\n"
- "fmla z10.h, p3/M, z4.h, z21.h\n"
- "fmla z12.h, p3/M, z3.h, z21.h\n"
- "fmax z13.h, p3/M, z13.h, z15.h\n"
- "fmax z10.h, p3/M, z10.h, z15.h\n"
- "fmla z20.h, p3/M, z8.h, z9.h\n"
- "fmla z25.h, p3/M, z7.h, z9.h\n"
- "fmax z20.h, p3/M, z20.h, z15.h\n"
- "fmax z25.h, p3/M, z25.h, z15.h\n"
- "fmla z23.h, p3/M, z5.h, z9.h\n"
- "fmla z24.h, p3/M, z4.h, z9.h\n"
- "fmax z12.h, p3/M, z12.h, z15.h\n"
- "fmax z23.h, p3/M, z23.h, z15.h\n"
- "fmax z24.h, p3/M, z24.h, z15.h\n"
- "fmin z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z31.h }, p0, [x15]\n"
- "fmin z18.h, p3/M, z18.h, z16.h\n"
- "fmin z28.h, p3/M, z28.h, z16.h\n"
- "st1h { z18.h }, p0, [x15, x6, LSL #1]\n"
- "fmin z27.h, p3/M, z27.h, z16.h\n"
- "fmin z17.h, p3/M, z17.h, z16.h\n"
- "st1h { z28.h }, p0, [x15, x25, LSL #1]\n"
- "fmin z14.h, p3/M, z14.h, z16.h\n"
- "fmin z30.h, p3/M, z30.h, z16.h\n"
- "st1h { z27.h }, p0, [x15, x22, LSL #1]\n"
- "fmin z11.h, p3/M, z11.h, z16.h\n"
- "fmin z26.h, p3/M, z26.h, z16.h\n"
- "st1h { z17.h }, p0, [x9]\n"
- "fmin z13.h, p3/M, z13.h, z16.h\n"
- "fmin z20.h, p3/M, z20.h, z16.h\n"
- "st1h { z14.h }, p0, [x9, x6, LSL #1]\n"
- "fmin z25.h, p3/M, z25.h, z16.h\n"
- "fmin z10.h, p3/M, z10.h, z16.h\n"
- "st1h { z30.h }, p0, [x9, x25, LSL #1]\n"
- "fmin z12.h, p3/M, z12.h, z16.h\n"
- "fmin z23.h, p3/M, z23.h, z16.h\n"
- "st1h { z11.h }, p0, [x9, x22, LSL #1]\n"
- "fmin z24.h, p3/M, z24.h, z16.h\n"
- "st1h { z26.h }, p0, [x26]\n"
- "st1h { z13.h }, p0, [x26, x6, LSL #1]\n"
- "st1h { z20.h }, p0, [x26, x25, LSL #1]\n"
- "st1h { z25.h }, p0, [x26, x22, LSL #1]\n"
- "st1h { z10.h }, p0, [x23]\n"
- "st1h { z12.h }, p0, [x23, x6, LSL #1]\n"
- "st1h { z23.h }, p0, [x23, x25, LSL #1]\n"
- "st1h { z24.h }, p0, [x23, x22, LSL #1]\n"
+ "fmla z11.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "fmla z28.h, p3/M, z8.h, z15.h\n"
+ "fmla z22.h, p3/M, z5.h, z15.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x14, LSL #1]\n"
+ "fmla z24.h, p3/M, z5.h, z16.h\n"
+ "fmla z13.h, p3/M, z4.h, z16.h\n"
+ "fmla z12.h, p3/M, z3.h, z16.h\n"
+ "fmla z30.h, p3/M, z8.h, z16.h\n"
+ "fmla z20.h, p3/M, z7.h, z16.h\n"
+ "fmla z11.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z15.h }, p2/Z, [x10, x12, LSL #1]\n"
+ "fmla z22.h, p3/M, z8.h, z21.h\n"
+ "ld1h { z16.h }, p2/Z, [x10, x4, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z17.h\n"
+ "fmla z13.h, p3/M, z7.h, z17.h\n"
+ "fmla z12.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x4, LSL #1]\n"
+ "fmla z19.h, p3/M, z5.h, z15.h\n"
+ "fmla z9.h, p3/M, z4.h, z15.h\n"
+ "fmla z18.h, p3/M, z4.h, z16.h\n"
+ "fmla z25.h, p3/M, z3.h, z16.h\n"
+ "fmla z26.h, p3/M, z1.h, z16.h\n"
+ "fmla z14.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x12, LSL #1]\n"
+ "fmla z23.h, p3/M, z2.h, z15.h\n"
+ "fmla z31.h, p3/M, z1.h, z15.h\n"
+ "fmla z28.h, p3/M, z7.h, z17.h\n"
+ "fmla z30.h, p3/M, z6.h, z17.h\n"
+ "fmax z19.h, p3/M, z19.h, z27.h\n"
+ "fmax z9.h, p3/M, z9.h, z27.h\n"
+ "fmla z22.h, p3/M, z4.h, z17.h\n"
+ "fmla z24.h, p3/M, z3.h, z17.h\n"
+ "fmax z18.h, p3/M, z18.h, z27.h\n"
+ "fmax z25.h, p3/M, z25.h, z27.h\n"
+ "fmla z20.h, p3/M, z8.h, z16.h\n"
+ "fmla z11.h, p3/M, z7.h, z16.h\n"
+ "fmax z26.h, p3/M, z26.h, z27.h\n"
+ "fmax z14.h, p3/M, z14.h, z27.h\n"
+ "fmla z13.h, p3/M, z5.h, z16.h\n"
+ "fmla z12.h, p3/M, z4.h, z16.h\n"
+ "fmax z23.h, p3/M, z23.h, z27.h\n"
+ "fmax z31.h, p3/M, z31.h, z27.h\n"
+ "fmax z28.h, p3/M, z28.h, z27.h\n"
+ "fmax z30.h, p3/M, z30.h, z27.h\n"
+ "fmax z22.h, p3/M, z22.h, z27.h\n"
+ "fmax z24.h, p3/M, z24.h, z27.h\n"
+ "fmax z20.h, p3/M, z20.h, z27.h\n"
+ "fmax z11.h, p3/M, z11.h, z27.h\n"
+ "fmax z13.h, p3/M, z13.h, z27.h\n"
+ "fmax z12.h, p3/M, z12.h, z27.h\n"
+ "fmin z18.h, p3/M, z18.h, z29.h\n"
+ "fmin z25.h, p3/M, z25.h, z29.h\n"
+ "fmin z19.h, p3/M, z19.h, z29.h\n"
+ "fmin z9.h, p3/M, z9.h, z29.h\n"
+ "fmin z26.h, p3/M, z26.h, z29.h\n"
+ "fmin z14.h, p3/M, z14.h, z29.h\n"
+ "fmin z23.h, p3/M, z23.h, z29.h\n"
+ "fmin z31.h, p3/M, z31.h, z29.h\n"
+ "st1h { z18.h }, p0, [x17]\n"
+ "fmin z28.h, p3/M, z28.h, z29.h\n"
+ "fmin z30.h, p3/M, z30.h, z29.h\n"
+ "st1h { z25.h }, p0, [x17, x5, LSL #1]\n"
+ "fmin z20.h, p3/M, z20.h, z29.h\n"
+ "fmin z11.h, p3/M, z11.h, z29.h\n"
+ "st1h { z19.h }, p0, [x17, x15, LSL #1]\n"
+ "fmin z22.h, p3/M, z22.h, z29.h\n"
+ "fmin z24.h, p3/M, z24.h, z29.h\n"
+ "st1h { z9.h }, p0, [x17, x13, LSL #1]\n"
+ "fmin z13.h, p3/M, z13.h, z29.h\n"
+ "fmin z12.h, p3/M, z12.h, z29.h\n"
+ "st1h { z26.h }, p0, [x25]\n"
+ "st1h { z14.h }, p0, [x25, x5, LSL #1]\n"
+ "st1h { z23.h }, p0, [x25, x15, LSL #1]\n"
+ "st1h { z31.h }, p0, [x25, x13, LSL #1]\n"
+ "st1h { z28.h }, p0, [x24]\n"
+ "st1h { z30.h }, p0, [x24, x5, LSL #1]\n"
+ "st1h { z20.h }, p0, [x24, x15, LSL #1]\n"
+ "st1h { z11.h }, p0, [x24, x13, LSL #1]\n"
+ "st1h { z22.h }, p0, [x23]\n"
+ "st1h { z24.h }, p0, [x23, x5, LSL #1]\n"
+ "st1h { z13.h }, p0, [x23, x15, LSL #1]\n"
+ "st1h { z12.h }, p0, [x23, x13, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index c0be293cd7..d024ad0479 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -101,607 +101,607 @@ void sve_fp16_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ptrue p3.b\n"
"ldr x7, [%x[params_struct], %[offsetof_args_params]]\n"
"add x8, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1h { z17.h }, p3/Z, [x7]\n"
- "cnth x17\n"
- "mov x16, #0x0\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "cnth x16\n"
+ "mov x15, #0x0\n"
+ "ldp x23, x22, [x8, #0x0]\n"
+ "ldp x21, x20, [x8, #0x10]\n"
+ "whilelt p2.h, XZR, %x[n_channels]\n"
+ "ld1h { z22.h }, p3/Z, [x7]\n"
"ld1h { z0.h }, p3/Z, [x7, #1, MUL VL]\n"
"ld1h { z1.h }, p3/Z, [x7, #2, MUL VL]\n"
- "whilelt p2.h, XZR, %x[n_channels]\n"
"ld1h { z2.h }, p3/Z, [x7, #3, MUL VL]\n"
"ld1h { z3.h }, p3/Z, [x7, #4, MUL VL]\n"
- "cmp x17, %x[n_channels]\n"
"ld1h { z4.h }, p3/Z, [x7, #5, MUL VL]\n"
+ "cmp x16, %x[n_channels]\n"
+ "sub x14, XZR, x16\n"
"ld1h { z5.h }, p3/Z, [x7, #6, MUL VL]\n"
- "sub x15, XZR, x17\n"
"ld1h { z6.h }, p3/Z, [x7, #7, MUL VL]\n"
"addvl x7, x7, #16\n"
- "ldp x23, x22, [x8, #0x0]\n"
- "ldp x21, x20, [x8, #0x10]\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rh { z16.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z19.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1rh { z19.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rh { z14.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1h { z9.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x22, x15, LSL #1]\n"
"ld1h { z7.h }, p3/Z, [x7, #-8, MUL VL]\n"
"ld1h { z8.h }, p3/Z, [x7, #-7, MUL VL]\n"
"addvl x7, x7, #-6\n"
- "ld1h { z9.h }, p2/Z, [x23, x16, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x22, x16, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x21, x16, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x20, x15, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z20, z17\n fmla z20.h, p3/M, z4.h, z9.h\n"
- "movprfx z26, z17\n fmla z26.h, p3/M, z8.h, z9.h\n"
+ "movprfx z29, z22\n fmla z29.h, p3/M, z4.h, z9.h\n"
+ "movprfx z18, z22\n fmla z18.h, p3/M, z8.h, z9.h\n"
"ldr x27, [x8, #0x20]\n"
"ldr x24, [x8, #0x30]\n"
- "movprfx z24, z17\n fmla z24.h, p3/M, z3.h, z9.h\n"
- "movprfx z30, z17\n fmla z30.h, p3/M, z1.h, z9.h\n"
+ "movprfx z17, z22\n fmla z17.h, p3/M, z3.h, z9.h\n"
+ "movprfx z26, z22\n fmla z26.h, p3/M, z1.h, z9.h\n"
"ldr x23, [x8, #0x28]\n"
"ldr x22, [x8, #0x38]\n"
- "movprfx z31, z17\n fmla z31.h, p3/M, z0.h, z9.h\n"
- "movprfx z22, z17\n fmla z22.h, p3/M, z7.h, z9.h\n"
+ "movprfx z21, z22\n fmla z21.h, p3/M, z0.h, z9.h\n"
+ "movprfx z28, z22\n fmla z28.h, p3/M, z7.h, z9.h\n"
"ldr x26, [x8, #0x40]\n"
- "ldr x21, [x8, #0x48]\n"
- "movprfx z27, z17\n fmla z27.h, p3/M, z6.h, z9.h\n"
- "fmla z20.h, p3/M, z5.h, z12.h\n"
+ "ldr x20, [x8, #0x48]\n"
+ "movprfx z25, z22\n fmla z25.h, p3/M, z6.h, z9.h\n"
+ "movprfx z30, z22\n fmla z30.h, p3/M, z5.h, z9.h\n"
"ldr x25, [x8, #0x50]\n"
- "ldr x20, [x8, #0x58]\n"
- "movprfx z14, z17\n fmla z14.h, p3/M, z5.h, z9.h\n"
- "movprfx z23, z17\n fmla z23.h, p3/M, z2.h, z9.h\n"
- "ld1h { z25.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "ldr x21, [x8, #0x58]\n"
+ "fmla z29.h, p3/M, z5.h, z12.h\n"
+ "movprfx z23, z22\n fmla z23.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x15, LSL #1]\n"
"ldr x13, [x8, #0x70]\n"
- "fmla z26.h, p3/M, z0.h, z10.h\n"
- "movprfx z9, z17\n fmla z9.h, p3/M, z2.h, z11.h\n"
- "ld1h { z28.h }, p2/Z, [x27, x16, LSL #1]\n"
- "ld1h { z21.h }, p2/Z, [x23, x16, LSL #1]\n"
- "fmla z24.h, p3/M, z4.h, z12.h\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "fmla z18.h, p3/M, z0.h, z10.h\n"
+ "movprfx z10, z22\n fmla z10.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z9.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z17.h, p3/M, z4.h, z12.h\n"
+ "fmla z26.h, p3/M, z2.h, z12.h\n"
"ldr x24, [x8, #0x60]\n"
"ldr x23, [x8, #0x68]\n"
- "fmla z31.h, p3/M, z1.h, z12.h\n"
- "fmla z22.h, p3/M, z8.h, z12.h\n"
- "inch x15\n"
+ "fmla z21.h, p3/M, z1.h, z12.h\n"
+ "fmla z28.h, p3/M, z8.h, z12.h\n"
+ "inch x14\n"
"mov p1.b, p2.b\n"
- "fmla z27.h, p3/M, z7.h, z12.h\n"
- "movprfx z15, z17\n fmla z15.h, p3/M, z6.h, z28.h\n"
- "ld1h { z10.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "fmla z25.h, p3/M, z7.h, z12.h\n"
+ "movprfx z31, z22\n fmla z31.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x20, x15, LSL #1]\n"
"ldr x28, [x8, #0x88]\n"
- "fmla z20.h, p3/M, z7.h, z25.h\n"
- "fmla z9.h, p3/M, z6.h, z12.h\n"
- "ldr x12, [x14, #0x0]\n"
- "ldr x11, [x14, #0x8]\n"
- "movprfx z11, z17\n fmla z11.h, p3/M, z3.h, z12.h\n"
- "movprfx z13, z17\n fmla z13.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z16.h\n"
+ "fmla z10.h, p3/M, z6.h, z12.h\n"
+ "ldr x12, [x17, #0x0]\n"
+ "ldr x11, [x17, #0x8]\n"
+ "movprfx z15, z22\n fmla z15.h, p3/M, z3.h, z12.h\n"
+ "movprfx z20, z22\n fmla z20.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x22, x15, LSL #1]\n"
"ldr x22, [x8, #0x78]\n"
- "movprfx z28, z17\n fmla z28.h, p3/M, z8.h, z21.h\n"
- "fmla z24.h, p3/M, z6.h, z25.h\n"
- "ld1h { z29.h }, p2/Z, [x26, x16, LSL #1]\n"
- "ldr x21, [x8, #0x80]\n"
- "fmla z30.h, p3/M, z4.h, z25.h\n"
- "fmla z31.h, p3/M, z3.h, z25.h\n"
- "ldr x10, [x14, #0x10]\n"
- "ldr x9, [x14, #0x18]\n"
- "movprfx z18, z17\n fmla z18.h, p3/M, z1.h, z25.h\n"
- "movprfx z21, z17\n fmla z21.h, p3/M, z0.h, z25.h\n"
- "whilelt p0.h, x17, %x[n_channels]\n"
- "ld1h { z17.h }, p3/Z, [x7]\n"
- "fmla z14.h, p3/M, z8.h, z25.h\n"
- "fmla z23.h, p3/M, z5.h, z25.h\n"
- "fmla z15.h, p3/M, z2.h, z25.h\n"
- "fmla z26.h, p3/M, z1.h, z12.h\n"
- "ld1h { z25.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "movprfx z24, z22\n fmla z24.h, p3/M, z8.h, z27.h\n"
+ "fmla z17.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z12.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "ldr x20, [x8, #0x80]\n"
+ "fmla z26.h, p3/M, z4.h, z16.h\n"
+ "fmla z21.h, p3/M, z3.h, z16.h\n"
+ "ldr x10, [x17, #0x10]\n"
+ "ldr x9, [x17, #0x18]\n"
+ "movprfx z13, z22\n fmla z13.h, p3/M, z1.h, z16.h\n"
+ "movprfx z27, z22\n fmla z27.h, p3/M, z0.h, z16.h\n"
+ "whilelt p0.h, x16, %x[n_channels]\n"
+ "ld1h { z22.h }, p3/Z, [x7]\n"
+ "fmla z30.h, p3/M, z8.h, z16.h\n"
+ "fmla z23.h, p3/M, z5.h, z16.h\n"
+ "fmla z31.h, p3/M, z2.h, z16.h\n"
+ "fmla z18.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x15, LSL #1]\n"
"ldr x27, [x8, #0x90]\n"
- "fmla z22.h, p3/M, z0.h, z12.h\n"
- "fmla z27.h, p3/M, z2.h, z29.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
- "ldr x20, [x8, #0x98]\n"
- "fmla z20.h, p3/M, z8.h, z10.h\n"
- "fmla z9.h, p3/M, z1.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "fmla z28.h, p3/M, z0.h, z11.h\n"
+ "fmla z25.h, p3/M, z2.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x21, [x8, #0x98]\n"
+ "fmla z29.h, p3/M, z8.h, z9.h\n"
+ "fmla z10.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
"ldr x26, [x8, #0xa0]\n"
- "fmla z24.h, p3/M, z7.h, z10.h\n"
- "fmla z11.h, p3/M, z6.h, z10.h\n"
- "fmla z30.h, p3/M, z5.h, z10.h\n"
- "fmla z31.h, p3/M, z4.h, z10.h\n"
- "fmla z13.h, p3/M, z3.h, z10.h\n"
- "fmla z18.h, p3/M, z2.h, z10.h\n"
- "fmla z21.h, p3/M, z1.h, z10.h\n"
- "fmla z28.h, p3/M, z0.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "fmla z17.h, p3/M, z7.h, z9.h\n"
+ "fmla z15.h, p3/M, z6.h, z9.h\n"
+ "fmla z26.h, p3/M, z5.h, z9.h\n"
+ "fmla z21.h, p3/M, z4.h, z9.h\n"
+ "fmla z20.h, p3/M, z3.h, z9.h\n"
+ "fmla z13.h, p3/M, z2.h, z9.h\n"
+ "fmla z27.h, p3/M, z1.h, z9.h\n"
+ "fmla z24.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z9.h }, p2/Z, [x23, x15, LSL #1]\n"
"ldr x25, [x8, #0xa8]\n"
- "fmla z26.h, p3/M, z3.h, z25.h\n"
- "fmla z14.h, p3/M, z0.h, z25.h\n"
- "fmla z23.h, p3/M, z6.h, z29.h\n"
- "fmla z15.h, p3/M, z3.h, z29.h\n"
- "ld1h { z25.h }, p2/Z, [x13, x16, LSL #1]\n"
+ "fmla z18.h, p3/M, z3.h, z16.h\n"
+ "fmla z30.h, p3/M, z0.h, z16.h\n"
+ "fmla z23.h, p3/M, z6.h, z12.h\n"
+ "fmla z31.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x13, x15, LSL #1]\n"
"ldr x24, [x8, #0xb0]\n"
- "fmla z22.h, p3/M, z4.h, z10.h\n"
- "fmla z27.h, p3/M, z3.h, z10.h\n"
- "fmla z20.h, p3/M, z1.h, z10.h\n"
- "fmla z9.h, p3/M, z5.h, z12.h\n"
- "fmla z11.h, p3/M, z2.h, z12.h\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "fmla z28.h, p3/M, z4.h, z9.h\n"
+ "fmla z25.h, p3/M, z3.h, z9.h\n"
+ "fmla z29.h, p3/M, z1.h, z9.h\n"
+ "fmla z10.h, p3/M, z5.h, z11.h\n"
+ "fmla z15.h, p3/M, z2.h, z11.h\n"
+ "fmla z17.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z11.h }, p2/Z, [x22, x15, LSL #1]\n"
"ldr x23, [x8, #0xb8]\n"
- "fmla z13.h, p3/M, z8.h, z25.h\n"
- "fmla z28.h, p3/M, z5.h, z25.h\n"
- "ld1h { z25.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "fmla z20.h, p3/M, z8.h, z12.h\n"
+ "fmla z24.h, p3/M, z5.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x20, x15, LSL #1]\n"
"ldr x22, [x8, #0xc0]\n"
- "fmla z26.h, p3/M, z5.h, z10.h\n"
- "fmla z14.h, p3/M, z2.h, z10.h\n"
- "ld1h { z29.h }, p2/Z, [x28, x16, LSL #1]\n"
- "ldr x21, [x8, #0xc8]\n"
- "fmla z22.h, p3/M, z5.h, z12.h\n"
- "fmla z27.h, p3/M, z4.h, z12.h\n"
- "fmla z20.h, p3/M, z2.h, z12.h\n"
- "fmla z9.h, p3/M, z3.h, z12.h\n"
- "fmla z24.h, p3/M, z1.h, z12.h\n"
- "fmla z11.h, p3/M, z0.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "fmla z18.h, p3/M, z5.h, z9.h\n"
+ "fmla z30.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x15, LSL #1]\n"
+ "ldr x20, [x8, #0xc8]\n"
+ "fmla z28.h, p3/M, z5.h, z11.h\n"
+ "fmla z25.h, p3/M, z4.h, z11.h\n"
+ "fmla z29.h, p3/M, z2.h, z11.h\n"
+ "fmla z10.h, p3/M, z3.h, z11.h\n"
+ "fmla z17.h, p3/M, z1.h, z11.h\n"
+ "fmla z15.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z9.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x28, [x8, #0xd8]\n"
- "fmla z15.h, p3/M, z7.h, z25.h\n"
- "fmla z18.h, p3/M, z6.h, z25.h\n"
- "ld1h { z25.h }, p2/Z, [x27, x16, LSL #1]\n"
- "ldr x20, [x8, #0xd0]\n"
- "fmla z26.h, p3/M, z7.h, z29.h\n"
- "fmla z22.h, p3/M, z6.h, z29.h\n"
- "fmla z14.h, p3/M, z4.h, z29.h\n"
- "fmla z20.h, p3/M, z3.h, z29.h\n"
- "fmla z23.h, p3/M, z1.h, z29.h\n"
- "fmla z30.h, p3/M, z0.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "fmla z31.h, p3/M, z7.h, z12.h\n"
+ "fmla z13.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z11.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "ldr x21, [x8, #0xd0]\n"
+ "fmla z18.h, p3/M, z7.h, z16.h\n"
+ "fmla z28.h, p3/M, z6.h, z16.h\n"
+ "fmla z30.h, p3/M, z4.h, z16.h\n"
+ "fmla z29.h, p3/M, z3.h, z16.h\n"
+ "fmla z23.h, p3/M, z1.h, z16.h\n"
+ "fmla z26.h, p3/M, z0.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x26, x15, LSL #1]\n"
"ldr x27, [x8, #0xe0]\n"
- "fmla z27.h, p3/M, z8.h, z10.h\n"
- "fmla z21.h, p3/M, z8.h, z25.h\n"
- "fmla z28.h, p3/M, z7.h, z25.h\n"
- "ld1h { z25.h }, p2/Z, [x25, x16, LSL #1]\n"
- "fmla z13.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z8.h, z9.h\n"
+ "fmla z27.h, p3/M, z8.h, z11.h\n"
+ "fmla z24.h, p3/M, z7.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "fmla z20.h, p3/M, z1.h, z9.h\n"
"ldr x26, [x8, #0xe8]\n"
- "fmla z9.h, p3/M, z7.h, z10.h\n"
- "fmla z24.h, p3/M, z5.h, z10.h\n"
- "fmla z11.h, p3/M, z4.h, z10.h\n"
- "fmla z31.h, p3/M, z2.h, z10.h\n"
- "ld1h { z10.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "fmla z10.h, p3/M, z7.h, z9.h\n"
+ "fmla z17.h, p3/M, z5.h, z9.h\n"
+ "fmla z15.h, p3/M, z4.h, z9.h\n"
+ "fmla z21.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z11.h }, p2/Z, [x24, x15, LSL #1]\n"
"ldr x25, [x8, #0xf0]\n"
- "fmla z26.h, p3/M, z2.h, z29.h\n"
- "fmla z22.h, p3/M, z1.h, z29.h\n"
- "fmla z27.h, p3/M, z0.h, z29.h\n"
- "fmla z14.h, p3/M, z7.h, z25.h\n"
- "ld1h { z29.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "fmla z18.h, p3/M, z2.h, z16.h\n"
+ "fmla z28.h, p3/M, z1.h, z16.h\n"
+ "fmla z25.h, p3/M, z0.h, z16.h\n"
+ "fmla z30.h, p3/M, z7.h, z12.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
"ldr x24, [x8, #0xf8]\n"
- "fmla z20.h, p3/M, z6.h, z25.h\n"
- "fmla z23.h, p3/M, z4.h, z25.h\n"
- "fmla z30.h, p3/M, z3.h, z25.h\n"
- "fmla z15.h, p3/M, z1.h, z25.h\n"
- "fmla z18.h, p3/M, z0.h, z25.h\n"
- "ld1h { z25.h }, p2/Z, [x22, x16, LSL #1]\n"
- "fmla z13.h, p3/M, z4.h, z25.h\n"
+ "fmla z29.h, p3/M, z6.h, z12.h\n"
+ "fmla z23.h, p3/M, z4.h, z12.h\n"
+ "fmla z26.h, p3/M, z3.h, z12.h\n"
+ "fmla z31.h, p3/M, z1.h, z12.h\n"
+ "fmla z13.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z28.h, p3/M, z2.h, z11.h\n"
"ldr x23, [x8, #0x100]\n"
- "fmla z21.h, p3/M, z2.h, z25.h\n"
- "fmla z22.h, p3/M, z2.h, z10.h\n"
- "fmla z27.h, p3/M, z1.h, z10.h\n"
- "fmla z9.h, p3/M, z0.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x21, x16, LSL #1]\n"
- "ldr x22, [x8, #0x108]\n"
- "fmla z26.h, p3/M, z6.h, z29.h\n"
- "fmla z14.h, p3/M, z3.h, z29.h\n"
- "fmla z23.h, p3/M, z0.h, z29.h\n"
- "fmla z24.h, p3/M, z8.h, z25.h\n"
- "ld1h { z10.h }, p2/Z, [x20, x16, LSL #1]\n"
- "ldr x21, [x8, #0x110]\n"
- "fmla z11.h, p3/M, z7.h, z25.h\n"
- "fmla z31.h, p3/M, z5.h, z25.h\n"
- "fmla z28.h, p3/M, z1.h, z25.h\n"
- "ld1h { z25.h }, p2/Z, [x28, x16, LSL #1]\n"
- "fmla z13.h, p3/M, z2.h, z12.h\n"
- "ldr x20, [x8, #0x118]\n"
- "fmla z15.h, p3/M, z0.h, z10.h\n"
- "fmla z18.h, p3/M, z4.h, z25.h\n"
- "fmla z21.h, p3/M, z3.h, z25.h\n"
- "fmla z9.h, p3/M, z8.h, z12.h\n"
- "fmla z11.h, p3/M, z5.h, z12.h\n"
- "fmla z14.h, p3/M, z6.h, z10.h\n"
- "ld1h { z12.h }, p2/Z, [x27, x16, LSL #1]\n"
- "fmla z23.h, p3/M, z3.h, z10.h\n"
- "ld1h { z29.h }, p2/Z, [x26, x16, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z25.h\n"
- "fmla z31.h, p3/M, z6.h, z25.h\n"
- "fmla z15.h, p3/M, z5.h, z25.h\n"
- "fmla z13.h, p3/M, z5.h, z12.h\n"
- "fmla z28.h, p3/M, z2.h, z12.h\n"
- "fmla z18.h, p3/M, z7.h, z29.h\n"
- "fmla z21.h, p3/M, z6.h, z29.h\n"
- "fmla z23.h, p3/M, z8.h, z25.h\n"
- "ld1h { z25.h }, p2/Z, [x25, x16, LSL #1]\n"
- "fmla z15.h, p3/M, z8.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x23, x16, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z25.h\n"
- "fmla z31.h, p3/M, z7.h, z25.h\n"
- "fmla z13.h, p3/M, z6.h, z25.h\n"
- "fmla z18.h, p3/M, z5.h, z25.h\n"
- "fmla z21.h, p3/M, z4.h, z25.h\n"
- "fmla z28.h, p3/M, z3.h, z25.h\n"
- "ld1h { z25.h }, p2/Z, [x22, x16, LSL #1]\n"
- "ldp x27, x26, [x8, #0x0]\n"
- "fmla z11.h, p3/M, z8.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x24, x16, LSL #1]\n"
- "fmla z26.h, p3/M, z4.h, z29.h\n"
- "fmax z26.h, p3/M, z26.h, z16.h\n"
- "fmla z22.h, p3/M, z3.h, z29.h\n"
- "fmla z27.h, p3/M, z5.h, z25.h\n"
- "fmax z22.h, p3/M, z22.h, z16.h\n"
- "fmax z27.h, p3/M, z27.h, z16.h\n"
- "fmla z9.h, p3/M, z4.h, z25.h\n"
- "fmla z18.h, p3/M, z8.h, z12.h\n"
- "fmax z9.h, p3/M, z9.h, z16.h\n"
- "fmin z26.h, p3/M, z26.h, z19.h\n"
- "fmla z21.h, p3/M, z7.h, z12.h\n"
- "fmla z28.h, p3/M, z6.h, z12.h\n"
- "ld1h { z10.h }, p2/Z, [x21, x16, LSL #1]\n"
- "fmin z22.h, p3/M, z22.h, z19.h\n"
- "fmla z14.h, p3/M, z1.h, z29.h\n"
- "fmla z20.h, p3/M, z0.h, z29.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
- "fmin z27.h, p3/M, z27.h, z19.h\n"
- "fmla z24.h, p3/M, z2.h, z25.h\n"
- "fmla z11.h, p3/M, z1.h, z25.h\n"
- "fmin z9.h, p3/M, z9.h, z19.h\n"
- "fmax z14.h, p3/M, z14.h, z16.h\n"
- "fmla z23.h, p3/M, z7.h, z10.h\n"
- "fmla z30.h, p3/M, z6.h, z10.h\n"
- "fmax z20.h, p3/M, z20.h, z16.h\n"
- "fmax z24.h, p3/M, z24.h, z16.h\n"
- "fmla z31.h, p3/M, z8.h, z12.h\n"
- "fmla z13.h, p3/M, z7.h, z12.h\n"
- "fmax z11.h, p3/M, z11.h, z16.h\n"
- "st1h { z26.h }, p1, [x12, x15, LSL #1]\n"
- "st1h { z22.h }, p1, [x11, x15, LSL #1]\n"
- "ldr x23, [x14, #0x20]\n"
- "ldr x22, [x14, #0x28]\n"
- "fmla z15.h, p3/M, z4.h, z10.h\n"
- "st1h { z27.h }, p1, [x10, x15, LSL #1]\n"
- "ldr x21, [x14, #0x30]\n"
- "fmla z18.h, p3/M, z3.h, z10.h\n"
- "fmla z21.h, p3/M, z5.h, z12.h\n"
- "st1h { z9.h }, p1, [x9, x15, LSL #1]\n"
- "ldr x20, [x14, #0x38]\n"
- "fmla z28.h, p3/M, z4.h, z12.h\n"
+ "fmla z25.h, p3/M, z1.h, z11.h\n"
+ "fmla z10.h, p3/M, z0.h, z11.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "ldr x20, [x8, #0x108]\n"
+ "fmla z18.h, p3/M, z6.h, z16.h\n"
+ "fmla z30.h, p3/M, z3.h, z16.h\n"
+ "fmla z20.h, p3/M, z4.h, z9.h\n"
+ "fmla z27.h, p3/M, z2.h, z9.h\n"
+ "fmla z23.h, p3/M, z0.h, z16.h\n"
+ "fmla z17.h, p3/M, z8.h, z9.h\n"
+ "ld1h { z12.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ldr x22, [x8, #0x110]\n"
+ "fmla z15.h, p3/M, z7.h, z9.h\n"
+ "fmla z21.h, p3/M, z5.h, z9.h\n"
+ "fmla z24.h, p3/M, z1.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x28, x15, LSL #1]\n"
+ "fmla z10.h, p3/M, z8.h, z11.h\n"
+ "ldr x21, [x8, #0x118]\n"
+ "fmla z20.h, p3/M, z2.h, z11.h\n"
+ "fmla z31.h, p3/M, z0.h, z12.h\n"
+ "fmla z30.h, p3/M, z6.h, z12.h\n"
+ "fmla z23.h, p3/M, z3.h, z12.h\n"
+ "ld1h { z9.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "fmla z13.h, p3/M, z4.h, z16.h\n"
+ "fmla z27.h, p3/M, z3.h, z16.h\n"
+ "fmla z15.h, p3/M, z5.h, z11.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z16.h\n"
+ "fmla z21.h, p3/M, z6.h, z16.h\n"
+ "fmla z31.h, p3/M, z5.h, z16.h\n"
+ "fmla z23.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "fmla z20.h, p3/M, z5.h, z12.h\n"
+ "fmla z24.h, p3/M, z2.h, z12.h\n"
+ "fmla z13.h, p3/M, z7.h, z9.h\n"
+ "fmla z27.h, p3/M, z6.h, z9.h\n"
+ "fmla z15.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z31.h, p3/M, z8.h, z9.h\n"
+ "fmla z26.h, p3/M, z8.h, z16.h\n"
+ "ld1h { z9.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z21.h, p3/M, z7.h, z16.h\n"
+ "fmla z20.h, p3/M, z6.h, z16.h\n"
+ "fmla z13.h, p3/M, z5.h, z16.h\n"
+ "fmla z24.h, p3/M, z3.h, z16.h\n"
+ "fmla z27.h, p3/M, z4.h, z16.h\n"
+ "ld1h { z11.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z18.h, p3/M, z4.h, z9.h\n"
+ "ldp x20, x26, [x8, #0x0]\n"
+ "fmla z28.h, p3/M, z3.h, z9.h\n"
+ "fmla z30.h, p3/M, z1.h, z9.h\n"
+ "fmla z29.h, p3/M, z0.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x7, #1, MUL VL]\n"
+ "fmla z25.h, p3/M, z5.h, z11.h\n"
+ "fmla z10.h, p3/M, z4.h, z11.h\n"
+ "fmla z13.h, p3/M, z8.h, z12.h\n"
+ "ld1h { z9.h }, p0/Z, [x20, x16, LSL #1]\n"
+ "fmla z27.h, p3/M, z7.h, z12.h\n"
+ "fmla z24.h, p3/M, z6.h, z12.h\n"
+ "ld1h { z12.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmax z18.h, p3/M, z18.h, z19.h\n"
+ "fmla z17.h, p3/M, z2.h, z11.h\n"
+ "fmla z15.h, p3/M, z1.h, z11.h\n"
+ "fmax z28.h, p3/M, z28.h, z19.h\n"
+ "fmax z30.h, p3/M, z30.h, z19.h\n"
+ "fmax z25.h, p3/M, z25.h, z19.h\n"
+ "fmla z21.h, p3/M, z8.h, z16.h\n"
+ "fmla z20.h, p3/M, z7.h, z16.h\n"
+ "fmax z29.h, p3/M, z29.h, z19.h\n"
+ "fmax z10.h, p3/M, z10.h, z19.h\n"
+ "fmla z23.h, p3/M, z7.h, z12.h\n"
+ "fmla z26.h, p3/M, z6.h, z12.h\n"
+ "fmin z18.h, p3/M, z18.h, z14.h\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "fmla z31.h, p3/M, z4.h, z12.h\n"
+ "fmla z13.h, p3/M, z3.h, z12.h\n"
"ldp x25, x24, [x8, #0x10]\n"
- "fmin z14.h, p3/M, z14.h, z19.h\n"
- "fmin z20.h, p3/M, z20.h, z19.h\n"
- "st1h { z14.h }, p1, [x23, x15, LSL #1]\n"
- "ldr x23, [x14, #0x40]\n"
- "fmin z24.h, p3/M, z24.h, z19.h\n"
- "fmin z11.h, p3/M, z11.h, z19.h\n"
- "st1h { z20.h }, p1, [x22, x15, LSL #1]\n"
- "ldr x22, [x14, #0x48]\n"
- "fmax z23.h, p3/M, z23.h, z16.h\n"
- "fmax z30.h, p3/M, z30.h, z16.h\n"
- "st1h { z24.h }, p1, [x21, x15, LSL #1]\n"
- "ldr x21, [x14, #0x50]\n"
- "fmax z31.h, p3/M, z31.h, z16.h\n"
- "fmax z13.h, p3/M, z13.h, z16.h\n"
- "st1h { z11.h }, p1, [x20, x15, LSL #1]\n"
- "ldr x20, [x14, #0x58]\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "fmax z17.h, p3/M, z17.h, z19.h\n"
+ "fmla z27.h, p3/M, z5.h, z16.h\n"
+ "fmla z24.h, p3/M, z4.h, z16.h\n"
+ "fmin z10.h, p3/M, z10.h, z14.h\n"
+ "fmax z15.h, p3/M, z15.h, z19.h\n"
+ "st1h { z18.h }, p1, [x12, x14, LSL #1]\n"
+ "ldr x23, [x17, #0x20]\n"
+ "st1h { z28.h }, p1, [x11, x14, LSL #1]\n"
+ "ldr x22, [x17, #0x28]\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "st1h { z25.h }, p1, [x10, x14, LSL #1]\n"
+ "ldr x21, [x17, #0x30]\n"
+ "fmin z17.h, p3/M, z17.h, z14.h\n"
+ "fmax z23.h, p3/M, z23.h, z19.h\n"
+ "st1h { z10.h }, p1, [x9, x14, LSL #1]\n"
+ "ldr x20, [x17, #0x38]\n"
+ "fmin z15.h, p3/M, z15.h, z14.h\n"
+ "fmax z26.h, p3/M, z26.h, z19.h\n"
+ "fmax z21.h, p3/M, z21.h, z19.h\n"
+ "fmax z20.h, p3/M, z20.h, z19.h\n"
+ "st1h { z30.h }, p1, [x23, x14, LSL #1]\n"
+ "ldr x23, [x17, #0x40]\n"
+ "st1h { z29.h }, p1, [x22, x14, LSL #1]\n"
+ "ldr x22, [x17, #0x48]\n"
+ "inch x15\n"
+ "ld1h { z10.h }, p0/Z, [x26, x16, LSL #1]\n"
+ "st1h { z17.h }, p1, [x21, x14, LSL #1]\n"
+ "ldr x21, [x17, #0x50]\n"
+ "ld1h { z11.h }, p0/Z, [x25, x16, LSL #1]\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "st1h { z15.h }, p1, [x20, x14, LSL #1]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "ld1h { z12.h }, p0/Z, [x24, x16, LSL #1]\n"
"inch x16\n"
- "ld1h { z9.h }, p0/Z, [x27, x17, LSL #1]\n"
- "ld1h { z10.h }, p0/Z, [x26, x17, LSL #1]\n"
- "fmin z23.h, p3/M, z23.h, z19.h\n"
- "ld1h { z11.h }, p0/Z, [x25, x17, LSL #1]\n"
- "ld1h { z12.h }, p0/Z, [x24, x17, LSL #1]\n"
- "inch x17\n"
- "fmin z30.h, p3/M, z30.h, z19.h\n"
- "fmin z31.h, p3/M, z31.h, z19.h\n"
- "fmin z13.h, p3/M, z13.h, z19.h\n"
- "st1h { z23.h }, p1, [x23, x15, LSL #1]\n"
- "ldr x23, [x14, #0x60]\n"
- "fmax z15.h, p3/M, z15.h, z16.h\n"
- "fmax z18.h, p3/M, z18.h, z16.h\n"
- "st1h { z30.h }, p1, [x22, x15, LSL #1]\n"
- "ldr x22, [x14, #0x68]\n"
- "fmax z21.h, p3/M, z21.h, z16.h\n"
- "fmax z28.h, p3/M, z28.h, z16.h\n"
- "st1h { z31.h }, p1, [x21, x15, LSL #1]\n"
- "ldr x21, [x14, #0x70]\n"
- "st1h { z13.h }, p1, [x20, x15, LSL #1]\n"
- "ldr x20, [x14, #0x78]\n"
- "ld1h { z0.h }, p3/Z, [x7, #1, MUL VL]\n"
- "whilelt p2.h, x16, %x[n_channels]\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "fmin z21.h, p3/M, z21.h, z14.h\n"
"ld1h { z1.h }, p3/Z, [x7, #2, MUL VL]\n"
"ld1h { z2.h }, p3/Z, [x7, #3, MUL VL]\n"
- "cmp x17, %x[n_channels]\n"
- "fmin z15.h, p3/M, z15.h, z19.h\n"
+ "fmin z20.h, p3/M, z20.h, z14.h\n"
+ "fmax z31.h, p3/M, z31.h, z19.h\n"
+ "st1h { z23.h }, p1, [x23, x14, LSL #1]\n"
+ "ldr x23, [x17, #0x60]\n"
+ "fmax z13.h, p3/M, z13.h, z19.h\n"
+ "fmax z27.h, p3/M, z27.h, z19.h\n"
"ld1h { z3.h }, p3/Z, [x7, #4, MUL VL]\n"
"ld1h { z4.h }, p3/Z, [x7, #5, MUL VL]\n"
- "fmin z18.h, p3/M, z18.h, z19.h\n"
- "fmin z21.h, p3/M, z21.h, z19.h\n"
+ "fmax z24.h, p3/M, z24.h, z19.h\n"
+ "st1h { z26.h }, p1, [x22, x14, LSL #1]\n"
+ "ldr x22, [x17, #0x68]\n"
"ld1h { z5.h }, p3/Z, [x7, #6, MUL VL]\n"
+ "st1h { z21.h }, p1, [x21, x14, LSL #1]\n"
+ "ldr x21, [x17, #0x70]\n"
"ld1h { z6.h }, p3/Z, [x7, #7, MUL VL]\n"
"addvl x7, x7, #16\n"
- "fmin z28.h, p3/M, z28.h, z19.h\n"
- "st1h { z15.h }, p1, [x23, x15, LSL #1]\n"
+ "st1h { z20.h }, p1, [x20, x14, LSL #1]\n"
+ "ldr x20, [x17, #0x78]\n"
+ "whilelt p2.h, x15, %x[n_channels]\n"
+ "cmp x16, %x[n_channels]\n"
+ "fmin z31.h, p3/M, z31.h, z14.h\n"
+ "fmin z13.h, p3/M, z13.h, z14.h\n"
+ "fmin z27.h, p3/M, z27.h, z14.h\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
"ld1h { z7.h }, p3/Z, [x7, #-8, MUL VL]\n"
"ld1h { z8.h }, p3/Z, [x7, #-7, MUL VL]\n"
"addvl x7, x7, #-6\n"
- "st1h { z18.h }, p1, [x22, x15, LSL #1]\n"
- "st1h { z21.h }, p1, [x21, x15, LSL #1]\n"
- "st1h { z28.h }, p1, [x20, x15, LSL #1]\n"
+ "st1h { z31.h }, p1, [x23, x14, LSL #1]\n"
+ "st1h { z13.h }, p1, [x22, x14, LSL #1]\n"
+ "st1h { z27.h }, p1, [x21, x14, LSL #1]\n"
+ "st1h { z24.h }, p1, [x20, x14, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z14, z17\n fmla z14.h, p3/M, z4.h, z9.h\n"
- "movprfx z18, z17\n fmla z18.h, p3/M, z8.h, z9.h\n"
+ "movprfx z16, z22\n fmla z16.h, p3/M, z4.h, z9.h\n"
+ "movprfx z30, z22\n fmla z30.h, p3/M, z8.h, z9.h\n"
"ldr x27, [x8, #0x20]\n"
"ldr x24, [x8, #0x30]\n"
- "movprfx z15, z17\n fmla z15.h, p3/M, z3.h, z9.h\n"
- "movprfx z30, z17\n fmla z30.h, p3/M, z1.h, z9.h\n"
+ "movprfx z13, z22\n fmla z13.h, p3/M, z3.h, z9.h\n"
+ "movprfx z15, z22\n fmla z15.h, p3/M, z1.h, z9.h\n"
"ldr x23, [x8, #0x28]\n"
"ldr x22, [x8, #0x38]\n"
- "movprfx z20, z17\n fmla z20.h, p3/M, z0.h, z9.h\n"
- "movprfx z13, z17\n fmla z13.h, p3/M, z7.h, z9.h\n"
+ "movprfx z20, z22\n fmla z20.h, p3/M, z0.h, z9.h\n"
+ "movprfx z18, z22\n fmla z18.h, p3/M, z7.h, z9.h\n"
"ldr x26, [x8, #0x40]\n"
"ldr x21, [x8, #0x48]\n"
- "movprfx z22, z17\n fmla z22.h, p3/M, z6.h, z9.h\n"
- "fmla z14.h, p3/M, z5.h, z12.h\n"
+ "movprfx z26, z22\n fmla z26.h, p3/M, z6.h, z9.h\n"
+ "movprfx z31, z22\n fmla z31.h, p3/M, z5.h, z9.h\n"
"ldr x25, [x8, #0x50]\n"
"ldr x20, [x8, #0x58]\n"
- "movprfx z27, z17\n fmla z27.h, p3/M, z5.h, z9.h\n"
- "movprfx z31, z17\n fmla z31.h, p3/M, z2.h, z9.h\n"
- "ld1h { z23.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "fmla z16.h, p3/M, z5.h, z12.h\n"
+ "movprfx z28, z22\n fmla z28.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z27.h }, p2/Z, [x24, x15, LSL #1]\n"
"ldr x13, [x8, #0x70]\n"
- "fmla z18.h, p3/M, z0.h, z10.h\n"
- "movprfx z9, z17\n fmla z9.h, p3/M, z2.h, z11.h\n"
- "ld1h { z21.h }, p2/Z, [x27, x16, LSL #1]\n"
- "ld1h { z25.h }, p2/Z, [x23, x16, LSL #1]\n"
- "fmla z15.h, p3/M, z4.h, z12.h\n"
- "fmla z30.h, p3/M, z2.h, z12.h\n"
+ "fmla z30.h, p3/M, z0.h, z10.h\n"
+ "movprfx z29, z22\n fmla z29.h, p3/M, z2.h, z11.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "ld1h { z24.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "fmla z13.h, p3/M, z4.h, z12.h\n"
+ "fmla z15.h, p3/M, z2.h, z12.h\n"
"ldr x24, [x8, #0x60]\n"
"ldr x23, [x8, #0x68]\n"
"fmla z20.h, p3/M, z1.h, z12.h\n"
- "fmla z13.h, p3/M, z8.h, z12.h\n"
- "inch x15\n"
+ "fmla z18.h, p3/M, z8.h, z12.h\n"
+ "inch x14\n"
"mov p0.b, p2.b\n"
- "fmla z22.h, p3/M, z7.h, z12.h\n"
- "movprfx z28, z17\n fmla z28.h, p3/M, z6.h, z21.h\n"
- "ld1h { z29.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z12.h\n"
+ "movprfx z9, z22\n fmla z9.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z10.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x28, [x8, #0x88]\n"
- "fmla z14.h, p3/M, z7.h, z23.h\n"
- "fmla z9.h, p3/M, z6.h, z12.h\n"
- "ldr x12, [x14, #0x0]\n"
- "ldr x11, [x14, #0x8]\n"
- "movprfx z11, z17\n fmla z11.h, p3/M, z3.h, z12.h\n"
- "movprfx z10, z17\n fmla z10.h, p3/M, z0.h, z12.h\n"
- "ld1h { z12.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "fmla z16.h, p3/M, z7.h, z27.h\n"
+ "fmla z29.h, p3/M, z6.h, z12.h\n"
+ "ldr x12, [x17, #0x0]\n"
+ "ldr x11, [x17, #0x8]\n"
+ "movprfx z11, z22\n fmla z11.h, p3/M, z3.h, z12.h\n"
+ "movprfx z23, z22\n fmla z23.h, p3/M, z0.h, z12.h\n"
+ "ld1h { z21.h }, p2/Z, [x22, x15, LSL #1]\n"
"ldr x22, [x8, #0x78]\n"
- "movprfx z26, z17\n fmla z26.h, p3/M, z8.h, z25.h\n"
- "fmla z15.h, p3/M, z6.h, z23.h\n"
- "ld1h { z21.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "movprfx z25, z22\n fmla z25.h, p3/M, z8.h, z24.h\n"
+ "fmla z13.h, p3/M, z6.h, z27.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x15, LSL #1]\n"
"ldr x21, [x8, #0x80]\n"
- "fmla z30.h, p3/M, z4.h, z23.h\n"
- "fmla z20.h, p3/M, z3.h, z23.h\n"
- "ldr x10, [x14, #0x10]\n"
- "ldr x9, [x14, #0x18]\n"
- "movprfx z25, z17\n fmla z25.h, p3/M, z1.h, z23.h\n"
- "movprfx z24, z17\n fmla z24.h, p3/M, z0.h, z23.h\n"
- "fmla z27.h, p3/M, z8.h, z23.h\n"
- "fmla z31.h, p3/M, z5.h, z23.h\n"
- "fmla z28.h, p3/M, z2.h, z23.h\n"
- "fmla z18.h, p3/M, z1.h, z12.h\n"
- "ld1h { z23.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "fmla z15.h, p3/M, z4.h, z27.h\n"
+ "fmla z20.h, p3/M, z3.h, z27.h\n"
+ "ldr x10, [x17, #0x10]\n"
+ "ldr x9, [x17, #0x18]\n"
+ "movprfx z24, z22\n fmla z24.h, p3/M, z1.h, z27.h\n"
+ "movprfx z12, z22\n fmla z12.h, p3/M, z0.h, z27.h\n"
+ "fmla z31.h, p3/M, z8.h, z27.h\n"
+ "fmla z28.h, p3/M, z5.h, z27.h\n"
+ "fmla z9.h, p3/M, z2.h, z27.h\n"
+ "fmla z30.h, p3/M, z1.h, z21.h\n"
+ "ld1h { z27.h }, p2/Z, [x25, x15, LSL #1]\n"
"ldr x27, [x8, #0x90]\n"
- "fmla z13.h, p3/M, z0.h, z12.h\n"
- "fmla z22.h, p3/M, z2.h, z21.h\n"
- "ld1h { z12.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "fmla z18.h, p3/M, z0.h, z21.h\n"
+ "fmla z26.h, p3/M, z2.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x20, x15, LSL #1]\n"
"ldr x20, [x8, #0x98]\n"
- "fmla z14.h, p3/M, z8.h, z29.h\n"
- "fmla z9.h, p3/M, z1.h, z21.h\n"
- "ld1h { z21.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "fmla z16.h, p3/M, z8.h, z10.h\n"
+ "fmla z29.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x15, LSL #1]\n"
"ldr x26, [x8, #0xa0]\n"
- "fmla z15.h, p3/M, z7.h, z29.h\n"
- "fmla z11.h, p3/M, z6.h, z29.h\n"
- "fmla z30.h, p3/M, z5.h, z29.h\n"
- "fmla z20.h, p3/M, z4.h, z29.h\n"
- "fmla z10.h, p3/M, z3.h, z29.h\n"
- "fmla z25.h, p3/M, z2.h, z29.h\n"
- "fmla z24.h, p3/M, z1.h, z29.h\n"
- "fmla z26.h, p3/M, z0.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "fmla z13.h, p3/M, z7.h, z10.h\n"
+ "fmla z11.h, p3/M, z6.h, z10.h\n"
+ "fmla z15.h, p3/M, z5.h, z10.h\n"
+ "fmla z20.h, p3/M, z4.h, z10.h\n"
+ "fmla z23.h, p3/M, z3.h, z10.h\n"
+ "fmla z24.h, p3/M, z2.h, z10.h\n"
+ "fmla z12.h, p3/M, z1.h, z10.h\n"
+ "fmla z25.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z22.h }, p2/Z, [x23, x15, LSL #1]\n"
"ldr x25, [x8, #0xa8]\n"
- "fmla z18.h, p3/M, z3.h, z23.h\n"
- "fmla z27.h, p3/M, z0.h, z23.h\n"
- "fmla z31.h, p3/M, z6.h, z21.h\n"
- "fmla z28.h, p3/M, z3.h, z21.h\n"
- "ld1h { z21.h }, p2/Z, [x13, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z3.h, z27.h\n"
+ "fmla z31.h, p3/M, z0.h, z27.h\n"
+ "fmla z28.h, p3/M, z6.h, z17.h\n"
+ "fmla z9.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x13, x15, LSL #1]\n"
"ldr x24, [x8, #0xb0]\n"
- "fmla z13.h, p3/M, z4.h, z29.h\n"
- "fmla z22.h, p3/M, z3.h, z29.h\n"
- "fmla z14.h, p3/M, z1.h, z29.h\n"
- "fmla z9.h, p3/M, z5.h, z12.h\n"
- "fmla z11.h, p3/M, z2.h, z12.h\n"
- "fmla z15.h, p3/M, z0.h, z29.h\n"
- "ld1h { z17.h }, p2/Z, [x22, x16, LSL #1]\n"
+ "fmla z18.h, p3/M, z4.h, z22.h\n"
+ "fmla z26.h, p3/M, z3.h, z22.h\n"
+ "fmla z16.h, p3/M, z1.h, z22.h\n"
+ "fmla z29.h, p3/M, z5.h, z21.h\n"
+ "fmla z11.h, p3/M, z2.h, z21.h\n"
+ "fmla z13.h, p3/M, z0.h, z22.h\n"
+ "ld1h { z21.h }, p2/Z, [x22, x15, LSL #1]\n"
"ldr x23, [x8, #0xb8]\n"
- "fmla z10.h, p3/M, z8.h, z21.h\n"
- "fmla z26.h, p3/M, z5.h, z21.h\n"
- "ld1h { z23.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "fmla z23.h, p3/M, z8.h, z17.h\n"
+ "fmla z25.h, p3/M, z5.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x22, [x8, #0xc0]\n"
- "fmla z18.h, p3/M, z5.h, z29.h\n"
- "fmla z27.h, p3/M, z2.h, z29.h\n"
- "ld1h { z21.h }, p2/Z, [x28, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z5.h, z22.h\n"
+ "fmla z31.h, p3/M, z2.h, z22.h\n"
+ "ld1h { z22.h }, p2/Z, [x28, x15, LSL #1]\n"
"ldr x21, [x8, #0xc8]\n"
- "fmla z13.h, p3/M, z5.h, z17.h\n"
- "fmla z22.h, p3/M, z4.h, z17.h\n"
- "fmla z14.h, p3/M, z2.h, z17.h\n"
- "fmla z9.h, p3/M, z3.h, z17.h\n"
- "fmla z15.h, p3/M, z1.h, z17.h\n"
- "fmla z11.h, p3/M, z0.h, z17.h\n"
- "ld1h { z29.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "fmla z18.h, p3/M, z5.h, z21.h\n"
+ "fmla z26.h, p3/M, z4.h, z21.h\n"
+ "fmla z16.h, p3/M, z2.h, z21.h\n"
+ "fmla z29.h, p3/M, z3.h, z21.h\n"
+ "fmla z13.h, p3/M, z1.h, z21.h\n"
+ "fmla z11.h, p3/M, z0.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x20, x15, LSL #1]\n"
"ldr x28, [x8, #0xd8]\n"
- "fmla z28.h, p3/M, z7.h, z23.h\n"
- "fmla z25.h, p3/M, z6.h, z23.h\n"
- "ld1h { z23.h }, p2/Z, [x27, x16, LSL #1]\n"
+ "fmla z9.h, p3/M, z7.h, z17.h\n"
+ "fmla z24.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, x15, LSL #1]\n"
"ldr x20, [x8, #0xd0]\n"
- "fmla z18.h, p3/M, z7.h, z21.h\n"
- "fmla z13.h, p3/M, z6.h, z21.h\n"
- "fmla z27.h, p3/M, z4.h, z21.h\n"
- "fmla z14.h, p3/M, z3.h, z21.h\n"
- "fmla z31.h, p3/M, z1.h, z21.h\n"
- "fmla z30.h, p3/M, z0.h, z21.h\n"
- "ld1h { z21.h }, p2/Z, [x26, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z7.h, z22.h\n"
+ "fmla z18.h, p3/M, z6.h, z22.h\n"
+ "fmla z31.h, p3/M, z4.h, z22.h\n"
+ "fmla z16.h, p3/M, z3.h, z22.h\n"
+ "fmla z28.h, p3/M, z1.h, z22.h\n"
+ "fmla z15.h, p3/M, z0.h, z22.h\n"
+ "ld1h { z22.h }, p2/Z, [x26, x15, LSL #1]\n"
"ldr x27, [x8, #0xe0]\n"
- "fmla z22.h, p3/M, z8.h, z29.h\n"
- "fmla z24.h, p3/M, z8.h, z23.h\n"
- "fmla z26.h, p3/M, z7.h, z23.h\n"
- "ld1h { z23.h }, p2/Z, [x25, x16, LSL #1]\n"
- "fmla z10.h, p3/M, z1.h, z29.h\n"
+ "fmla z26.h, p3/M, z8.h, z21.h\n"
+ "fmla z12.h, p3/M, z8.h, z17.h\n"
+ "fmla z25.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "fmla z23.h, p3/M, z1.h, z21.h\n"
"ldr x26, [x8, #0xe8]\n"
- "fmla z9.h, p3/M, z7.h, z29.h\n"
- "fmla z15.h, p3/M, z5.h, z29.h\n"
- "fmla z11.h, p3/M, z4.h, z29.h\n"
- "fmla z20.h, p3/M, z2.h, z29.h\n"
- "ld1h { z29.h }, p2/Z, [x24, x16, LSL #1]\n"
+ "fmla z29.h, p3/M, z7.h, z21.h\n"
+ "fmla z13.h, p3/M, z5.h, z21.h\n"
+ "fmla z11.h, p3/M, z4.h, z21.h\n"
+ "fmla z20.h, p3/M, z2.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x24, x15, LSL #1]\n"
"ldr x25, [x8, #0xf0]\n"
- "fmla z18.h, p3/M, z2.h, z21.h\n"
- "fmla z13.h, p3/M, z1.h, z21.h\n"
- "fmla z22.h, p3/M, z0.h, z21.h\n"
- "fmla z27.h, p3/M, z7.h, z23.h\n"
- "ld1h { z21.h }, p2/Z, [x23, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z2.h, z22.h\n"
+ "fmla z18.h, p3/M, z1.h, z22.h\n"
+ "fmla z26.h, p3/M, z0.h, z22.h\n"
+ "fmla z31.h, p3/M, z7.h, z17.h\n"
+ "ld1h { z22.h }, p2/Z, [x23, x15, LSL #1]\n"
"ldr x24, [x8, #0xf8]\n"
- "fmla z14.h, p3/M, z6.h, z23.h\n"
- "fmla z31.h, p3/M, z4.h, z23.h\n"
- "fmla z30.h, p3/M, z3.h, z23.h\n"
- "fmla z28.h, p3/M, z1.h, z23.h\n"
- "fmla z25.h, p3/M, z0.h, z23.h\n"
- "ld1h { z17.h }, p2/Z, [x22, x16, LSL #1]\n"
- "fmla z10.h, p3/M, z4.h, z17.h\n"
+ "fmla z16.h, p3/M, z6.h, z17.h\n"
+ "fmla z28.h, p3/M, z4.h, z17.h\n"
+ "fmla z15.h, p3/M, z3.h, z17.h\n"
+ "fmla z9.h, p3/M, z1.h, z17.h\n"
+ "fmla z24.h, p3/M, z0.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z18.h, p3/M, z2.h, z21.h\n"
"ldr x23, [x8, #0x100]\n"
- "fmla z24.h, p3/M, z2.h, z17.h\n"
- "fmla z13.h, p3/M, z2.h, z29.h\n"
- "fmla z22.h, p3/M, z1.h, z29.h\n"
- "fmla z9.h, p3/M, z0.h, z29.h\n"
- "ld1h { z23.h }, p2/Z, [x21, x16, LSL #1]\n"
+ "fmla z26.h, p3/M, z1.h, z21.h\n"
+ "fmla z29.h, p3/M, z0.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x21, x15, LSL #1]\n"
"ldr x22, [x8, #0x108]\n"
- "fmla z18.h, p3/M, z6.h, z21.h\n"
- "fmla z27.h, p3/M, z3.h, z21.h\n"
- "fmla z31.h, p3/M, z0.h, z21.h\n"
- "fmla z15.h, p3/M, z8.h, z17.h\n"
- "ld1h { z29.h }, p2/Z, [x20, x16, LSL #1]\n"
+ "fmla z30.h, p3/M, z6.h, z22.h\n"
+ "fmla z31.h, p3/M, z3.h, z22.h\n"
+ "fmla z23.h, p3/M, z4.h, z17.h\n"
+ "fmla z12.h, p3/M, z2.h, z17.h\n"
+ "fmla z28.h, p3/M, z0.h, z22.h\n"
+ "fmla z13.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z22.h }, p2/Z, [x20, x15, LSL #1]\n"
"ldr x21, [x8, #0x110]\n"
"fmla z11.h, p3/M, z7.h, z17.h\n"
"fmla z20.h, p3/M, z5.h, z17.h\n"
- "fmla z26.h, p3/M, z1.h, z17.h\n"
- "ld1h { z21.h }, p2/Z, [x28, x16, LSL #1]\n"
- "fmla z10.h, p3/M, z2.h, z23.h\n"
+ "fmla z25.h, p3/M, z1.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x28, x15, LSL #1]\n"
+ "fmla z29.h, p3/M, z8.h, z21.h\n"
"ldr x20, [x8, #0x118]\n"
- "fmla z28.h, p3/M, z0.h, z29.h\n"
- "fmla z25.h, p3/M, z4.h, z21.h\n"
- "fmla z24.h, p3/M, z3.h, z21.h\n"
- "fmla z9.h, p3/M, z8.h, z23.h\n"
- "fmla z11.h, p3/M, z5.h, z23.h\n"
- "fmla z27.h, p3/M, z6.h, z29.h\n"
- "ld1h { z23.h }, p2/Z, [x27, x16, LSL #1]\n"
- "fmla z31.h, p3/M, z3.h, z29.h\n"
- "ld1h { z17.h }, p2/Z, [x26, x16, LSL #1]\n"
- "fmla z30.h, p3/M, z7.h, z21.h\n"
- "fmla z20.h, p3/M, z6.h, z21.h\n"
- "fmla z28.h, p3/M, z5.h, z21.h\n"
- "fmla z10.h, p3/M, z5.h, z23.h\n"
- "fmla z26.h, p3/M, z2.h, z23.h\n"
- "fmla z25.h, p3/M, z7.h, z17.h\n"
- "fmla z24.h, p3/M, z6.h, z17.h\n"
- "fmla z31.h, p3/M, z8.h, z21.h\n"
- "ld1h { z21.h }, p2/Z, [x25, x16, LSL #1]\n"
+ "fmla z23.h, p3/M, z2.h, z21.h\n"
+ "fmla z9.h, p3/M, z0.h, z22.h\n"
+ "fmla z31.h, p3/M, z6.h, z22.h\n"
+ "fmla z28.h, p3/M, z3.h, z22.h\n"
+ "ld1h { z27.h }, p2/Z, [x26, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z17.h\n"
+ "fmla z12.h, p3/M, z3.h, z17.h\n"
+ "fmla z11.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z22.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "fmla z15.h, p3/M, z7.h, z17.h\n"
+ "fmla z20.h, p3/M, z6.h, z17.h\n"
+ "fmla z9.h, p3/M, z5.h, z17.h\n"
"fmla z28.h, p3/M, z8.h, z17.h\n"
- "ld1h { z12.h }, p2/Z, [x23, x16, LSL #1]\n"
- "fmla z30.h, p3/M, z8.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "fmla z23.h, p3/M, z5.h, z22.h\n"
+ "fmla z25.h, p3/M, z2.h, z22.h\n"
+ "fmla z24.h, p3/M, z7.h, z27.h\n"
+ "fmla z12.h, p3/M, z6.h, z27.h\n"
+ "fmla z11.h, p3/M, z8.h, z22.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x15, LSL #1]\n"
+ "fmla z9.h, p3/M, z8.h, z27.h\n"
+ "fmla z15.h, p3/M, z8.h, z21.h\n"
+ "ld1h { z27.h }, p2/Z, [x23, x15, LSL #1]\n"
"fmla z20.h, p3/M, z7.h, z21.h\n"
- "fmla z10.h, p3/M, z6.h, z21.h\n"
- "fmla z25.h, p3/M, z5.h, z21.h\n"
- "fmla z24.h, p3/M, z4.h, z21.h\n"
- "fmla z26.h, p3/M, z3.h, z21.h\n"
- "ld1h { z21.h }, p2/Z, [x22, x16, LSL #1]\n"
- "fmla z11.h, p3/M, z8.h, z23.h\n"
- "ld1h { z29.h }, p2/Z, [x24, x16, LSL #1]\n"
- "fmla z18.h, p3/M, z4.h, z12.h\n"
- "fmax z18.h, p3/M, z18.h, z16.h\n"
- "fmla z13.h, p3/M, z3.h, z12.h\n"
- "fmla z22.h, p3/M, z5.h, z21.h\n"
- "fmax z13.h, p3/M, z13.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z16.h\n"
- "fmla z9.h, p3/M, z4.h, z21.h\n"
- "fmla z25.h, p3/M, z8.h, z29.h\n"
- "fmax z9.h, p3/M, z9.h, z16.h\n"
- "fmin z18.h, p3/M, z18.h, z19.h\n"
- "fmla z24.h, p3/M, z7.h, z29.h\n"
- "fmla z26.h, p3/M, z6.h, z29.h\n"
- "ld1h { z23.h }, p2/Z, [x21, x16, LSL #1]\n"
- "fmin z13.h, p3/M, z13.h, z19.h\n"
- "fmla z27.h, p3/M, z1.h, z12.h\n"
- "fmla z14.h, p3/M, z0.h, z12.h\n"
- "ld1h { z29.h }, p2/Z, [x20, x16, LSL #1]\n"
- "fmin z22.h, p3/M, z22.h, z19.h\n"
- "fmla z15.h, p3/M, z2.h, z21.h\n"
+ "fmla z23.h, p3/M, z6.h, z21.h\n"
+ "fmla z24.h, p3/M, z5.h, z21.h\n"
+ "fmla z25.h, p3/M, z3.h, z21.h\n"
+ "fmla z12.h, p3/M, z4.h, z21.h\n"
+ "ld1h { z21.h }, p2/Z, [x22, x15, LSL #1]\n"
+ "fmla z30.h, p3/M, z4.h, z27.h\n"
+ "fmla z18.h, p3/M, z3.h, z27.h\n"
+ "fmla z31.h, p3/M, z1.h, z27.h\n"
+ "fmla z16.h, p3/M, z0.h, z27.h\n"
+ "ld1h { z27.h }, p2/Z, [x20, x15, LSL #1]\n"
+ "fmla z26.h, p3/M, z5.h, z21.h\n"
+ "fmla z29.h, p3/M, z4.h, z21.h\n"
+ "fmla z24.h, p3/M, z8.h, z17.h\n"
+ "fmla z12.h, p3/M, z7.h, z17.h\n"
+ "fmla z25.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z10.h }, p2/Z, [x21, x15, LSL #1]\n"
+ "fmax z30.h, p3/M, z30.h, z19.h\n"
+ "fmla z13.h, p3/M, z2.h, z21.h\n"
"fmla z11.h, p3/M, z1.h, z21.h\n"
- "fmin z9.h, p3/M, z9.h, z19.h\n"
- "fmax z27.h, p3/M, z27.h, z16.h\n"
- "fmla z31.h, p3/M, z7.h, z23.h\n"
- "fmla z30.h, p3/M, z6.h, z23.h\n"
- "fmax z14.h, p3/M, z14.h, z16.h\n"
- "fmax z15.h, p3/M, z15.h, z16.h\n"
- "fmla z20.h, p3/M, z8.h, z29.h\n"
- "fmla z10.h, p3/M, z7.h, z29.h\n"
- "fmax z11.h, p3/M, z11.h, z16.h\n"
- "st1h { z18.h }, p0, [x12, x15, LSL #1]\n"
- "st1h { z13.h }, p0, [x11, x15, LSL #1]\n"
- "ldr x23, [x14, #0x20]\n"
- "ldr x22, [x14, #0x28]\n"
- "fmla z28.h, p3/M, z4.h, z23.h\n"
- "st1h { z22.h }, p0, [x10, x15, LSL #1]\n"
- "ldr x21, [x14, #0x30]\n"
- "fmla z25.h, p3/M, z3.h, z23.h\n"
- "fmla z24.h, p3/M, z5.h, z29.h\n"
- "st1h { z9.h }, p0, [x9, x15, LSL #1]\n"
- "ldr x20, [x14, #0x38]\n"
- "fmla z26.h, p3/M, z4.h, z29.h\n"
- "fmin z27.h, p3/M, z27.h, z19.h\n"
- "fmin z14.h, p3/M, z14.h, z19.h\n"
- "fmin z15.h, p3/M, z15.h, z19.h\n"
- "st1h { z27.h }, p0, [x23, x15, LSL #1]\n"
- "ldr x23, [x14, #0x40]\n"
- "fmin z11.h, p3/M, z11.h, z19.h\n"
- "fmax z31.h, p3/M, z31.h, z16.h\n"
- "st1h { z14.h }, p0, [x22, x15, LSL #1]\n"
- "ldr x22, [x14, #0x48]\n"
- "fmax z30.h, p3/M, z30.h, z16.h\n"
- "fmax z20.h, p3/M, z20.h, z16.h\n"
- "st1h { z15.h }, p0, [x21, x15, LSL #1]\n"
- "ldr x21, [x14, #0x50]\n"
- "fmax z10.h, p3/M, z10.h, z16.h\n"
- "st1h { z11.h }, p0, [x20, x15, LSL #1]\n"
- "ldr x20, [x14, #0x58]\n"
- "fmin z31.h, p3/M, z31.h, z19.h\n"
- "fmin z30.h, p3/M, z30.h, z19.h\n"
- "fmin z20.h, p3/M, z20.h, z19.h\n"
- "st1h { z31.h }, p0, [x23, x15, LSL #1]\n"
- "ldr x23, [x14, #0x60]\n"
- "fmin z10.h, p3/M, z10.h, z19.h\n"
- "fmax z28.h, p3/M, z28.h, z16.h\n"
- "st1h { z30.h }, p0, [x22, x15, LSL #1]\n"
- "ldr x22, [x14, #0x68]\n"
- "fmax z25.h, p3/M, z25.h, z16.h\n"
- "fmax z24.h, p3/M, z24.h, z16.h\n"
- "st1h { z20.h }, p0, [x21, x15, LSL #1]\n"
- "ldr x21, [x14, #0x70]\n"
- "fmax z26.h, p3/M, z26.h, z16.h\n"
- "st1h { z10.h }, p0, [x20, x15, LSL #1]\n"
- "ldr x20, [x14, #0x78]\n"
- "fmin z28.h, p3/M, z28.h, z19.h\n"
- "fmin z25.h, p3/M, z25.h, z19.h\n"
- "fmin z24.h, p3/M, z24.h, z19.h\n"
- "st1h { z28.h }, p0, [x23, x15, LSL #1]\n"
- "fmin z26.h, p3/M, z26.h, z19.h\n"
- "st1h { z25.h }, p0, [x22, x15, LSL #1]\n"
- "st1h { z24.h }, p0, [x21, x15, LSL #1]\n"
- "st1h { z26.h }, p0, [x20, x15, LSL #1]\n"
+ "fmax z18.h, p3/M, z18.h, z19.h\n"
+ "fmax z31.h, p3/M, z31.h, z19.h\n"
+ "fmax z26.h, p3/M, z26.h, z19.h\n"
+ "fmla z20.h, p3/M, z8.h, z27.h\n"
+ "fmla z23.h, p3/M, z7.h, z27.h\n"
+ "fmax z16.h, p3/M, z16.h, z19.h\n"
+ "fmax z29.h, p3/M, z29.h, z19.h\n"
+ "fmla z28.h, p3/M, z7.h, z10.h\n"
+ "fmla z15.h, p3/M, z6.h, z10.h\n"
+ "fmin z30.h, p3/M, z30.h, z14.h\n"
+ "fmin z18.h, p3/M, z18.h, z14.h\n"
+ "fmla z9.h, p3/M, z4.h, z10.h\n"
+ "fmla z24.h, p3/M, z3.h, z10.h\n"
+ "fmin z31.h, p3/M, z31.h, z14.h\n"
+ "fmin z26.h, p3/M, z26.h, z14.h\n"
+ "fmax z13.h, p3/M, z13.h, z19.h\n"
+ "fmla z12.h, p3/M, z5.h, z27.h\n"
+ "fmla z25.h, p3/M, z4.h, z27.h\n"
+ "fmin z29.h, p3/M, z29.h, z14.h\n"
+ "fmax z11.h, p3/M, z11.h, z19.h\n"
+ "st1h { z30.h }, p0, [x12, x14, LSL #1]\n"
+ "ldr x23, [x17, #0x20]\n"
+ "st1h { z18.h }, p0, [x11, x14, LSL #1]\n"
+ "ldr x22, [x17, #0x28]\n"
+ "fmin z16.h, p3/M, z16.h, z14.h\n"
+ "fmax z28.h, p3/M, z28.h, z19.h\n"
+ "st1h { z26.h }, p0, [x10, x14, LSL #1]\n"
+ "ldr x21, [x17, #0x30]\n"
+ "fmin z13.h, p3/M, z13.h, z14.h\n"
+ "fmax z15.h, p3/M, z15.h, z19.h\n"
+ "st1h { z29.h }, p0, [x9, x14, LSL #1]\n"
+ "ldr x20, [x17, #0x38]\n"
+ "fmin z11.h, p3/M, z11.h, z14.h\n"
+ "fmax z20.h, p3/M, z20.h, z19.h\n"
+ "fmax z23.h, p3/M, z23.h, z19.h\n"
+ "st1h { z31.h }, p0, [x23, x14, LSL #1]\n"
+ "ldr x23, [x17, #0x40]\n"
+ "fmin z28.h, p3/M, z28.h, z14.h\n"
+ "st1h { z16.h }, p0, [x22, x14, LSL #1]\n"
+ "ldr x22, [x17, #0x48]\n"
+ "fmin z15.h, p3/M, z15.h, z14.h\n"
+ "fmax z9.h, p3/M, z9.h, z19.h\n"
+ "st1h { z13.h }, p0, [x21, x14, LSL #1]\n"
+ "ldr x21, [x17, #0x50]\n"
+ "fmin z20.h, p3/M, z20.h, z14.h\n"
+ "fmax z24.h, p3/M, z24.h, z19.h\n"
+ "st1h { z11.h }, p0, [x20, x14, LSL #1]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "fmin z23.h, p3/M, z23.h, z14.h\n"
+ "fmax z12.h, p3/M, z12.h, z19.h\n"
+ "fmax z25.h, p3/M, z25.h, z19.h\n"
+ "st1h { z28.h }, p0, [x23, x14, LSL #1]\n"
+ "ldr x23, [x17, #0x60]\n"
+ "fmin z9.h, p3/M, z9.h, z14.h\n"
+ "st1h { z15.h }, p0, [x22, x14, LSL #1]\n"
+ "ldr x22, [x17, #0x68]\n"
+ "fmin z24.h, p3/M, z24.h, z14.h\n"
+ "st1h { z20.h }, p0, [x21, x14, LSL #1]\n"
+ "ldr x21, [x17, #0x70]\n"
+ "fmin z12.h, p3/M, z12.h, z14.h\n"
+ "st1h { z23.h }, p0, [x20, x14, LSL #1]\n"
+ "ldr x20, [x17, #0x78]\n"
+ "fmin z25.h, p3/M, z25.h, z14.h\n"
+ "st1h { z9.h }, p0, [x23, x14, LSL #1]\n"
+ "st1h { z24.h }, p0, [x22, x14, LSL #1]\n"
+ "st1h { z12.h }, p0, [x21, x14, LSL #1]\n"
+ "st1h { z25.h }, p0, [x20, x14, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
: "cc", "memory", "p0", "p1", "p2", "p3", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index 58decdba1c..187c11aa3a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,246 +88,246 @@ void sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x11, #0x0\n"
- "mov x16, #0x0\n"
+ "mov x7, #0x0\n"
+ "mov x8, #0x0\n"
"1:" // Tile loop
- "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
- "mov x24, #0x2\n"
- "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x11, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x16, x15, x22\n" // offset += tile_j * ld_input_col
- "ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cnth x13\n"
- "mul x20, x11, x21\n" // offset = tile_i * ld_output_row
- "ldr x12, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x10, x15, x15\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x12, x12, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "ldr x9, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x28, x12, x23, LSL #1\n"
- "madd x20, x16, x14, x20\n" // offset += tile_j * ld_output_col
+ "str x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x4\n"
+ "mov x25, #0x2\n"
+ "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "cnth x16\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z30.h }, p3/Z, [x11]\n"
- "ld1h { z0.h }, p3/Z, [x11, #1, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1h { z1.h }, p3/Z, [x11, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x11, #3, MUL VL]\n"
- "add x27, x28, x23, LSL #1\n"
- "ld1h { z3.h }, p3/Z, [x11, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x11, #5, MUL VL]\n"
- "add x26, x10, x15\n"
- "add x25, x27, x23, LSL #1\n"
- "ld1h { z5.h }, p3/Z, [x11, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "add x24, x26, x15\n"
- "add x9, x9, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "cmp x13, %x[n_channels]\n"
- "ld1rh { z29.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z28.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x23, x25, x23, LSL #1\n"
- "add x22, x9, x21, LSL #1\n"
- "ld1h { z7.h }, p3/Z, [x11, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x11, #-7, MUL VL]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x13\n"
- "ld1h { z9.h }, p2/Z, [x27, x10, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x12]\n"
- "ld1h { z11.h }, p2/Z, [x12, x15, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x12, x26, LSL #1]\n"
- "addvl x11, x11, #-6\n"
- "ld1h { z13.h }, p2/Z, [x12, x24, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x28]\n"
- "ld1h { z15.h }, p2/Z, [x28, x15, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x12, x10, LSL #1]\n"
+ "mov x14, #0x0\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x12, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x22, x7, x24\n" // offset = tile_i * ld_input_row
+ "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x10, x17, x17\n"
+ "cmp x16, %x[n_channels]\n"
+ "ld1rh { z30.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mul x21, x7, x23\n" // offset = tile_i * ld_output_row
+ "add x9, x10, x17\n"
+ "ld1rh { z29.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x20, XZR, x16\n"
+ "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
+ "ld1h { z28.h }, p3/Z, [x12]\n"
+ "ld1h { z0.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "add x28, x9, x17\n"
+ "ld1h { z1.h }, p3/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x12, #3, MUL VL]\n"
+ "madd x21, x8, x15, x21\n" // offset += tile_j * ld_output_col
+ "ld1h { z3.h }, p3/Z, [x12, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x12, #5, MUL VL]\n"
+ "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
+ "ld1h { z5.h }, p3/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x12, #7, MUL VL]\n"
+ "addvl x12, x12, #16\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "add x13, x13, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x27, x13, x24, LSL #1\n"
+ "add x26, x27, x24, LSL #1\n"
+ "ld1h { z10.h }, p2/Z, [x13]\n"
+ "ld1h { z11.h }, p2/Z, [x13, x17, LSL #1]\n"
+ "add x25, x26, x24, LSL #1\n"
+ "add x11, x11, x21, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x24, x25, x24, LSL #1\n"
+ "ld1h { z7.h }, p3/Z, [x12, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x12, #-7, MUL VL]\n"
+ "add x23, x11, x23, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x26, x10, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x13, x9, LSL #1]\n"
+ "addvl x12, x12, #-6\n"
+ "ld1h { z13.h }, p2/Z, [x13, x28, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x27]\n"
+ "ld1h { z15.h }, p2/Z, [x27, x17, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x13, x10, LSL #1]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z27, z30\n fmla z27.h, p3/M, z8.h, z9.h\n"
- "movprfx z26, z30\n fmla z26.h, p3/M, z6.h, z9.h\n"
- "whilelt p1.h, x13, %x[n_channels]\n"
- "inch x21\n"
+ "movprfx z27, z28\n fmla z27.h, p3/M, z8.h, z9.h\n"
+ "movprfx z26, z28\n fmla z26.h, p3/M, z6.h, z9.h\n"
+ "whilelt p1.h, x16, %x[n_channels]\n"
+ "inch x14\n"
+ "movprfx z25, z28\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "movprfx z24, z28\n fmla z24.h, p3/M, z0.h, z9.h\n"
+ "inch x16\n"
+ "mov p0.b, p2.b\n"
+ "addvl x13, x13, #1\n"
+ "ld1h { z28.h }, p3/Z, [x12]\n"
+ "inch x20\n"
"fmla z27.h, p3/M, z0.h, z10.h\n"
"fmla z26.h, p3/M, z1.h, z12.h\n"
- "ld1h { z20.h }, p2/Z, [x28, x24, LSL #1]\n"
- "inch x13\n"
+ "ld1h { z21.h }, p2/Z, [x27, x28, LSL #1]\n"
+ "ld1h { z10.h }, p1/Z, [x13]\n"
"fmla z27.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z18.h }, p2/Z, [x27, x9, LSL #1]\n"
"fmla z26.h, p3/M, z2.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x28, x26, LSL #1]\n"
- "ld1h { z19.h }, p2/Z, [x28, x10, LSL #1]\n"
+ "ld1h { z20.h }, p2/Z, [x27, x10, LSL #1]\n"
+ "addvl x27, x27, #1\n"
"fmla z27.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z17.h }, p2/Z, [x25]\n"
"fmla z26.h, p3/M, z0.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x25]\n"
- "mov p0.b, p2.b\n"
+ "fmla z25.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z23.h }, p2/Z, [x25, x28, LSL #1]\n"
"fmla z27.h, p3/M, z4.h, z15.h\n"
- "fmla z26.h, p3/M, z4.h, z17.h\n"
- "ld1h { z25.h }, p2/Z, [x27]\n"
- "ld1h { z17.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x26]\n"
+ "fmla z26.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x17, LSL #1]\n"
"fmla z27.h, p3/M, z2.h, z16.h\n"
- "fmla z26.h, p3/M, z5.h, z20.h\n"
- "ld1h { z24.h }, p2/Z, [x27, x26, LSL #1]\n"
- "ld1h { z23.h }, p2/Z, [x27, x15, LSL #1]\n"
- "movprfx z22, z30\n fmla z22.h, p3/M, z2.h, z9.h\n"
- "movprfx z21, z30\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "addvl x12, x12, #1\n"
- "addvl x28, x28, #1\n"
- "fmla z27.h, p3/M, z5.h, z19.h\n"
- "fmla z26.h, p3/M, z3.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x26, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x11]\n"
- "fmla z22.h, p3/M, z3.h, z18.h\n"
- "fmla z21.h, p3/M, z4.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x24, LSL #1]\n"
- "ld1h { z20.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z22.h, p3/M, z0.h, z25.h\n"
- "fmla z21.h, p3/M, z1.h, z24.h\n"
- "ld1h { z0.h }, p3/Z, [x11, #1, MUL VL]\n"
- "inch x20\n"
- "fmla z22.h, p3/M, z4.h, z17.h\n"
- "fmla z21.h, p3/M, z5.h, z16.h\n"
- "ld1h { z19.h }, p2/Z, [x27, x24, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x23, x26, LSL #1]\n"
- "fmla z27.h, p3/M, z6.h, z25.h\n"
- "fmla z22.h, p3/M, z1.h, z23.h\n"
- "ld1h { z17.h }, p2/Z, [x23]\n"
- "addvl x27, x27, #1\n"
- "fmla z21.h, p3/M, z2.h, z19.h\n"
- "fmla z27.h, p3/M, z7.h, z23.h\n"
+ "ld1h { z19.h }, p2/Z, [x26, x17, LSL #1]\n"
+ "fmla z25.h, p3/M, z0.h, z22.h\n"
+ "ld1h { z0.h }, p3/Z, [x12, #1, MUL VL]\n"
+ "fmla z26.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z18.h }, p2/Z, [x26, x9, LSL #1]\n"
+ "fmla z27.h, p3/M, z5.h, z20.h\n"
+ "fmla z26.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x9, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z16.h\n"
+ "ld1h { z21.h }, p2/Z, [x24, x17, LSL #1]\n"
+ "fmla z25.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x26, x28, LSL #1]\n"
+ "addvl x26, x26, #1\n"
+ "ld1h { z4.h }, p3/Z, [x12, #5, MUL VL]\n"
+ "fmla z27.h, p3/M, z6.h, z22.h\n"
+ "ld1h { z17.h }, p2/Z, [x24]\n"
+ "fmla z26.h, p3/M, z7.h, z18.h\n"
+ "fmla z24.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z12.h }, p1/Z, [x13, x9, LSL #1]\n"
+ "fmla z25.h, p3/M, z1.h, z19.h\n"
+ "ld1h { z1.h }, p3/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z9.h }, p1/Z, [x26, x10, LSL #1]\n"
+ "fmla z27.h, p3/M, z7.h, z19.h\n"
"ld1h { z16.h }, p2/Z, [x25, x10, LSL #1]\n"
- "fmax z27.h, p3/M, z27.h, z29.h\n"
- "fmla z22.h, p3/M, z6.h, z17.h\n"
- "fmla z21.h, p3/M, z3.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x10, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x11, #2, MUL VL]\n"
- "fmla z22.h, p3/M, z7.h, z20.h\n"
- "fmla z21.h, p3/M, z7.h, z18.h\n"
- "ld1h { z2.h }, p3/Z, [x11, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x11, #4, MUL VL]\n"
- "fmla z26.h, p3/M, z7.h, z24.h\n"
- "fmla z22.h, p3/M, z5.h, z16.h\n"
- "ld1h { z4.h }, p3/Z, [x11, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x11, #6, MUL VL]\n"
- "fmla z21.h, p3/M, z6.h, z17.h\n"
- "fmla z26.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x24, LSL #1]\n"
- "fmax z26.h, p3/M, z26.h, z29.h\n"
- "fmla z22.h, p3/M, z8.h, z17.h\n"
- "fmla z21.h, p3/M, z8.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z29.h\n"
- "fmax z21.h, p3/M, z21.h, z29.h\n"
- "ld1h { z6.h }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "ld1h { z9.h }, p1/Z, [x27, x10, LSL #1]\n"
- "cmp x13, %x[n_channels]\n"
- "fmin z27.h, p3/M, z27.h, z28.h\n"
- "ld1h { z10.h }, p1/Z, [x12]\n"
- "ld1h { z11.h }, p1/Z, [x12, x15, LSL #1]\n"
- "fmin z26.h, p3/M, z26.h, z28.h\n"
- "fmin z22.h, p3/M, z22.h, z28.h\n"
- "ld1h { z12.h }, p1/Z, [x12, x26, LSL #1]\n"
- "ld1h { z13.h }, p1/Z, [x12, x24, LSL #1]\n"
- "fmin z21.h, p3/M, z21.h, z28.h\n"
"addvl x25, x25, #1\n"
- "ld1h { z14.h }, p1/Z, [x28]\n"
- "ld1h { z15.h }, p1/Z, [x28, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z5.h, z23.h\n"
+ "ld1h { z19.h }, p2/Z, [x24, x9, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z20.h\n"
+ "fmla z25.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x24, x10, LSL #1]\n"
+ "fmax z27.h, p3/M, z27.h, z30.h\n"
+ "fmla z24.h, p3/M, z2.h, z20.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x28, LSL #1]\n"
+ "ld1h { z2.h }, p3/Z, [x12, #3, MUL VL]\n"
+ "whilelt p2.h, x14, %x[n_channels]\n"
+ "cmp x16, %x[n_channels]\n"
+ "addvl x24, x24, #1\n"
+ "fmin z27.h, p3/M, z27.h, z29.h\n"
+ "fmla z25.h, p3/M, z7.h, z21.h\n"
+ "ld1h { z13.h }, p1/Z, [x13, x28, LSL #1]\n"
+ "fmax z26.h, p3/M, z26.h, z30.h\n"
+ "fmla z24.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z3.h }, p3/Z, [x12, #4, MUL VL]\n"
+ "fmin z26.h, p3/M, z26.h, z29.h\n"
+ "st1h { z27.h }, p0, [x11]\n"
+ "fmla z25.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z5.h }, p3/Z, [x12, #6, MUL VL]\n"
+ "ld1h { z16.h }, p1/Z, [x13, x10, LSL #1]\n"
+ "st1h { z26.h }, p0, [x11, x15, LSL #1]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.h, p3/M, z7.h, z19.h\n"
+ "ld1h { z14.h }, p1/Z, [x27]\n"
+ "fmla z25.h, p3/M, z8.h, z18.h\n"
+ "fmla z24.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z6.h }, p3/Z, [x12, #7, MUL VL]\n"
+ "addvl x12, x12, #16\n"
+ "ld1h { z15.h }, p1/Z, [x27, x17, LSL #1]\n"
+ "fmax z25.h, p3/M, z25.h, z30.h\n"
+ "ld1h { z7.h }, p3/Z, [x12, #-8, MUL VL]\n"
+ "fmla z24.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z11.h }, p1/Z, [x13, x17, LSL #1]\n"
+ "ld1h { z8.h }, p3/Z, [x12, #-7, MUL VL]\n"
+ "addvl x12, x12, #-6\n"
+ "fmin z25.h, p3/M, z25.h, z29.h\n"
+ "fmax z24.h, p3/M, z24.h, z30.h\n"
+ "st1h { z25.h }, p0, [x23]\n"
+ "fmin z24.h, p3/M, z24.h, z29.h\n"
+ "st1h { z24.h }, p0, [x23, x15, LSL #1]\n"
"addvl x23, x23, #1\n"
- "ld1h { z16.h }, p1/Z, [x12, x10, LSL #1]\n"
- "st1h { z27.h }, p0, [x9]\n"
- "ld1h { z7.h }, p3/Z, [x11, #-8, MUL VL]\n"
- "st1h { z26.h }, p0, [x9, x14, LSL #1]\n"
- "addvl x9, x9, #1\n"
- "ld1h { z8.h }, p3/Z, [x11, #-7, MUL VL]\n"
- "addvl x11, x11, #-6\n"
- "st1h { z22.h }, p0, [x22]\n"
- "st1h { z21.h }, p0, [x22, x14, LSL #1]\n"
- "addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z27, z30\n fmla z27.h, p3/M, z8.h, z9.h\n"
- "movprfx z26, z30\n fmla z26.h, p3/M, z6.h, z9.h\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z27, z28\n fmla z27.h, p3/M, z8.h, z9.h\n"
+ "movprfx z26, z28\n fmla z26.h, p3/M, z6.h, z9.h\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z25, z28\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "movprfx z24, z28\n fmla z24.h, p3/M, z0.h, z9.h\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "mov p0.b, p2.b\n"
+ "add x8, x8, #0x1\n"
+ "add x20, x7, #0x1\n"
"fmla z27.h, p3/M, z0.h, z10.h\n"
"fmla z26.h, p3/M, z1.h, z12.h\n"
- "ld1h { z20.h }, p2/Z, [x28, x24, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ld1h { z21.h }, p2/Z, [x27, x28, LSL #1]\n"
+ "cmp x8, x22\n"
+ "csel x7, x7, x20, LT\n"
+ "csel x8, x8, XZR, LT\n"
"fmla z27.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z18.h }, p2/Z, [x27, x9, LSL #1]\n"
"fmla z26.h, p3/M, z2.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x28, x26, LSL #1]\n"
- "ld1h { z19.h }, p2/Z, [x28, x10, LSL #1]\n"
+ "ld1h { z20.h }, p2/Z, [x27, x10, LSL #1]\n"
+ "cmp x7, x21\n"
"fmla z27.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z17.h }, p2/Z, [x25]\n"
"fmla z26.h, p3/M, z0.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x25]\n"
- "add x16, x16, #0x1\n"
+ "fmla z25.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z23.h }, p2/Z, [x25, x28, LSL #1]\n"
"fmla z27.h, p3/M, z4.h, z15.h\n"
- "fmla z26.h, p3/M, z4.h, z17.h\n"
- "ld1h { z25.h }, p2/Z, [x27]\n"
- "ld1h { z17.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x26]\n"
+ "fmla z26.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x25, x17, LSL #1]\n"
+ "fmla z25.h, p3/M, z0.h, z22.h\n"
"fmla z27.h, p3/M, z2.h, z16.h\n"
- "fmla z26.h, p3/M, z5.h, z20.h\n"
- "ld1h { z24.h }, p2/Z, [x27, x26, LSL #1]\n"
- "ld1h { z23.h }, p2/Z, [x27, x15, LSL #1]\n"
- "movprfx z22, z30\n fmla z22.h, p3/M, z2.h, z9.h\n"
- "movprfx z21, z30\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "cmp x16, x20\n"
- "add x21, x11, #0x1\n"
- "fmla z27.h, p3/M, z5.h, z19.h\n"
- "fmla z26.h, p3/M, z3.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x26, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z22.h, p3/M, z3.h, z18.h\n"
- "fmla z21.h, p3/M, z4.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x24, LSL #1]\n"
- "ld1h { z20.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z22.h, p3/M, z0.h, z25.h\n"
- "fmla z21.h, p3/M, z1.h, z24.h\n"
- "csel x11, x11, x21, LT\n"
- "mov p0.b, p2.b\n"
- "fmla z22.h, p3/M, z4.h, z17.h\n"
- "fmla z21.h, p3/M, z5.h, z16.h\n"
- "ld1h { z19.h }, p2/Z, [x27, x24, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x23, x26, LSL #1]\n"
- "fmla z27.h, p3/M, z6.h, z25.h\n"
- "fmla z22.h, p3/M, z1.h, z23.h\n"
- "ld1h { z17.h }, p2/Z, [x23]\n"
- "csel x16, x16, XZR, LT\n"
- "fmla z21.h, p3/M, z2.h, z19.h\n"
- "fmla z27.h, p3/M, z7.h, z23.h\n"
- "ld1h { z16.h }, p2/Z, [x25, x10, LSL #1]\n"
- "fmax z27.h, p3/M, z27.h, z29.h\n"
- "fmla z22.h, p3/M, z6.h, z17.h\n"
- "fmla z21.h, p3/M, z3.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x10, LSL #1]\n"
- "cmp x11, x20\n"
- "fmla z22.h, p3/M, z7.h, z20.h\n"
- "fmla z21.h, p3/M, z7.h, z18.h\n"
- "fmin z27.h, p3/M, z27.h, z28.h\n"
- "st1h { z27.h }, p0, [x9]\n"
- "fmla z26.h, p3/M, z7.h, z24.h\n"
- "fmla z22.h, p3/M, z5.h, z16.h\n"
- "fmla z21.h, p3/M, z6.h, z17.h\n"
- "fmla z26.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x24, LSL #1]\n"
- "fmax z26.h, p3/M, z26.h, z29.h\n"
- "fmla z22.h, p3/M, z8.h, z17.h\n"
- "fmla z21.h, p3/M, z8.h, z16.h\n"
- "fmax z22.h, p3/M, z22.h, z29.h\n"
- "fmax z21.h, p3/M, z21.h, z29.h\n"
- "fmin z26.h, p3/M, z26.h, z28.h\n"
- "fmin z22.h, p3/M, z22.h, z28.h\n"
- "st1h { z26.h }, p0, [x9, x14, LSL #1]\n"
- "fmin z21.h, p3/M, z21.h, z28.h\n"
- "st1h { z22.h }, p0, [x22]\n"
- "st1h { z21.h }, p0, [x22, x14, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x26, x17, LSL #1]\n"
+ "fmla z26.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, x9, LSL #1]\n"
+ "fmla z27.h, p3/M, z5.h, z20.h\n"
+ "fmla z25.h, p3/M, z4.h, z19.h\n"
+ "ld1h { z21.h }, p2/Z, [x26, x28, LSL #1]\n"
+ "fmla z26.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x25, x9, LSL #1]\n"
+ "fmla z24.h, p3/M, z4.h, z16.h\n"
+ "ld1h { z20.h }, p2/Z, [x24, x17, LSL #1]\n"
+ "fmla z27.h, p3/M, z6.h, z22.h\n"
+ "ld1h { z16.h }, p2/Z, [x24]\n"
+ "fmla z25.h, p3/M, z1.h, z18.h\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "fmla z27.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x25, x10, LSL #1]\n"
+ "fmla z25.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x24, x10, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z21.h\n"
+ "fmla z24.h, p3/M, z5.h, z23.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x9, LSL #1]\n"
+ "fmax z27.h, p3/M, z27.h, z30.h\n"
+ "fmla z25.h, p3/M, z7.h, z20.h\n"
+ "fmax z26.h, p3/M, z26.h, z30.h\n"
+ "fmin z27.h, p3/M, z27.h, z29.h\n"
+ "fmla z24.h, p3/M, z2.h, z21.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x28, LSL #1]\n"
+ "fmla z25.h, p3/M, z5.h, z19.h\n"
+ "fmin z26.h, p3/M, z26.h, z29.h\n"
+ "st1h { z27.h }, p0, [x11]\n"
+ "fmla z24.h, p3/M, z3.h, z19.h\n"
+ "st1h { z26.h }, p0, [x11, x15, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z17.h\n"
+ "fmla z25.h, p3/M, z8.h, z18.h\n"
+ "fmla z24.h, p3/M, z6.h, z18.h\n"
+ "fmax z25.h, p3/M, z25.h, z30.h\n"
+ "fmin z25.h, p3/M, z25.h, z29.h\n"
+ "st1h { z25.h }, p0, [x23]\n"
+ "fmla z24.h, p3/M, z8.h, z16.h\n"
+ "fmax z24.h, p3/M, z24.h, z30.h\n"
+ "fmin z24.h, p3/M, z24.h, z29.h\n"
+ "st1h { z24.h }, p0, [x23, x15, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index d5fbb6baee..a4ba50b9bb 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -89,245 +89,245 @@ void sve_fp16_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
"ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "cnth x14\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "cnth x15\n"
+ "mov x14, #0x0\n"
+ "whilelt p2.h, XZR, %x[n_channels]\n"
"ldp x13, x12, [x20, #0x0]\n"
"ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
- "whilelt p2.h, XZR, %x[n_channels]\n"
- "ld1h { z20.h }, p3/Z, [x16]\n"
- "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
- "sub x28, XZR, x14\n"
- "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
- "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1rh { z26.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z25.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
- "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "ld1h { z9.h }, p2/Z, [x27, x9, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x26, x9, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x25, x9, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x24, x9, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x23, x9, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ld1h { z15.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x17]\n"
+ "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
+ "cmp x15, %x[n_channels]\n"
+ "sub x9, XZR, x15\n"
+ "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
+ "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
+ "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
+ "addvl x17, x17, #16\n"
+ "ldp x27, x26, [x16, #0x0]\n"
+ "ldp x25, x24, [x16, #0x10]\n"
+ "ldp x23, x22, [x16, #0x20]\n"
+ "ldp x21, x20, [x16, #0x30]\n"
+ "ld1rh { z29.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rh { z28.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
+ "addvl x17, x17, #-6\n"
+ "ld1h { z9.h }, p2/Z, [x27, x14, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x25, x14, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "ld1h { z15.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x20, x14, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z24, z20\n fmla z24.h, p3/M, z8.h, z9.h\n"
- "movprfx z23, z20\n fmla z23.h, p3/M, z6.h, z9.h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z23.h, p3/M, z1.h, z12.h\n"
- "ld1h { z18.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x20, [x15, #0x50]\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmla z23.h, p3/M, z2.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z19.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z24.h, p3/M, z3.h, z14.h\n"
- "fmla z23.h, p3/M, z0.h, z16.h\n"
- "ldr x20, [x15, #0x58]\n"
- "ldr x22, [x15, #0x78]\n"
- "fmla z24.h, p3/M, z4.h, z15.h\n"
- "fmla z23.h, p3/M, z4.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0x60]\n"
- "fmla z24.h, p3/M, z2.h, z16.h\n"
- "fmla z23.h, p3/M, z5.h, z18.h\n"
- "ldr x20, [x15, #0x80]\n"
- "ld1h { z18.h }, p2/Z, [x21, x9, LSL #1]\n"
- "movprfx z22, z20\n fmla z22.h, p3/M, z2.h, z9.h\n"
- "movprfx z21, z20\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "ld1h { z20.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0x68]\n"
- "fmla z24.h, p3/M, z5.h, z19.h\n"
- "fmla z23.h, p3/M, z3.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x20, [x15, #0x88]\n"
- "fmla z22.h, p3/M, z3.h, z17.h\n"
- "fmla z21.h, p3/M, z4.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z0.h, z18.h\n"
- "fmla z21.h, p3/M, z1.h, z20.h\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x20, [x15, #0x98]\n"
- "fmla z22.h, p3/M, z4.h, z17.h\n"
- "fmla z21.h, p3/M, z5.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z19.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z24.h, p3/M, z6.h, z18.h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "fmla z22.h, p3/M, z1.h, z16.h\n"
- "fmla z21.h, p3/M, z2.h, z19.h\n"
- "fmla z24.h, p3/M, z7.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0xa0]\n"
- "ldr x20, [x15, #0xb0]\n"
- "fmla z22.h, p3/M, z6.h, z16.h\n"
- "fmla z21.h, p3/M, z3.h, z18.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z7.h, z17.h\n"
- "fmla z21.h, p3/M, z7.h, z16.h\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z23.h, p3/M, z7.h, z20.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z5.h, z18.h\n"
- "ldr x20, [x15, #0xc0]\n"
- "fmla z21.h, p3/M, z6.h, z17.h\n"
- "fmla z23.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z8.h, z17.h\n"
- "fmla z21.h, p3/M, z8.h, z16.h\n"
- "whilelt p1.h, x14, %x[n_channels]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "movprfx z27, z30\n fmla z27.h, p3/M, z8.h, z9.h\n"
+ "movprfx z26, z30\n fmla z26.h, p3/M, z6.h, z9.h\n"
+ "ldr x28, [x16, #0x40]\n"
+ "ldr x21, [x16, #0x48]\n"
+ "ldr x25, [x16, #0x50]\n"
+ "ldr x20, [x16, #0x58]\n"
+ "movprfx z25, z30\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "movprfx z24, z30\n fmla z24.h, p3/M, z0.h, z9.h\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "whilelt p1.h, x15, %x[n_channels]\n"
"inch x9\n"
- "fmax z24.h, p3/M, z24.h, z26.h\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1h { z9.h }, p1/Z, [x27, x14, LSL #1]\n"
- "fmax z23.h, p3/M, z23.h, z26.h\n"
- "fmax z22.h, p3/M, z22.h, z26.h\n"
- "ld1h { z10.h }, p1/Z, [x26, x14, LSL #1]\n"
- "ld1h { z11.h }, p1/Z, [x25, x14, LSL #1]\n"
- "fmax z21.h, p3/M, z21.h, z26.h\n"
- "inch x28\n"
- "ld1h { z12.h }, p1/Z, [x24, x14, LSL #1]\n"
- "ld1h { z13.h }, p1/Z, [x23, x14, LSL #1]\n"
+ "ldr x23, [x16, #0x68]\n"
+ "ldr x26, [x16, #0x70]\n"
"mov p0.b, p2.b\n"
- "whilelt p2.h, x9, %x[n_channels]\n"
- "ld1h { z14.h }, p1/Z, [x22, x14, LSL #1]\n"
- "ld1h { z15.h }, p1/Z, [x21, x14, LSL #1]\n"
- "fmin z24.h, p3/M, z24.h, z25.h\n"
- "fmin z23.h, p3/M, z23.h, z25.h\n"
- "ld1h { z16.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z0.h, z10.h\n"
+ "fmla z26.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z21.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ldr x22, [x16, #0x88]\n"
+ "ld1h { z30.h }, p3/Z, [x17]\n"
+ "fmla z27.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z18.h }, p2/Z, [x28, x14, LSL #1]\n"
+ "ldr x21, [x16, #0x80]\n"
+ "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z20.h }, p2/Z, [x25, x14, LSL #1]\n"
+ "ldr x25, [x16, #0x90]\n"
+ "fmla z27.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z17.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "ldr x20, [x16, #0x98]\n"
+ "fmla z26.h, p3/M, z0.h, z16.h\n"
+ "fmla z25.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z23.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z15.h\n"
+ "ld1h { z22.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "fmla z26.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z17.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z25.h, p3/M, z0.h, z22.h\n"
+ "ld1h { z0.h }, p3/Z, [x17, #1, MUL VL]\n"
+ "fmla z27.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z19.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "ldr x22, [x16, #0xb0]\n"
+ "fmla z26.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z18.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ldr x21, [x16, #0xc0]\n"
+ "fmla z25.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z5.h, z20.h\n"
+ "fmla z26.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x14, LSL #1]\n"
+ "ldr x20, [x16, #0xb8]\n"
+ "fmla z24.h, p3/M, z4.h, z16.h\n"
+ "ld1h { z20.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z4.h }, p3/Z, [x17, #5, MUL VL]\n"
+ "fmla z27.h, p3/M, z6.h, z22.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x14, LSL #1]\n"
+ "fmla z25.h, p3/M, z1.h, z19.h\n"
+ "fmla z24.h, p3/M, z1.h, z18.h\n"
+ "ld1h { z1.h }, p3/Z, [x17, #2, MUL VL]\n"
+ "fmla z26.h, p3/M, z7.h, z18.h\n"
+ "fmla z27.h, p3/M, z7.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "fmla z24.h, p3/M, z5.h, z23.h\n"
+ "ld1h { z19.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "fmla z25.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z18.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z21.h\n"
+ "fmax z27.h, p3/M, z27.h, z29.h\n"
+ "fmla z24.h, p3/M, z2.h, z21.h\n"
+ "ld1h { z17.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "ldp x27, x26, [x16, #0x0]\n"
+ "ldp x25, x24, [x16, #0x10]\n"
"inch x14\n"
- "ld1h { z20.h }, p3/Z, [x16]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1h { z0.h }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1h { z1.h }, p3/Z, [x16, #2, MUL VL]\n"
- "fmin z22.h, p3/M, z22.h, z25.h\n"
- "fmin z21.h, p3/M, z21.h, z25.h\n"
- "ld1h { z2.h }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1h { z3.h }, p3/Z, [x16, #4, MUL VL]\n"
- "st1h { z24.h }, p0, [x13, x28, LSL #1]\n"
- "ld1h { z4.h }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1h { z5.h }, p3/Z, [x16, #6, MUL VL]\n"
- "st1h { z23.h }, p0, [x12, x28, LSL #1]\n"
- "ld1h { z6.h }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "st1h { z22.h }, p0, [x11, x28, LSL #1]\n"
- "ld1h { z7.h }, p3/Z, [x16, #-8, MUL VL]\n"
- "st1h { z21.h }, p0, [x10, x28, LSL #1]\n"
- "ld1h { z8.h }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
+ "ldp x23, x22, [x16, #0x20]\n"
+ "ldp x21, x20, [x16, #0x30]\n"
+ "fmla z25.h, p3/M, z7.h, z20.h\n"
+ "ld1h { z2.h }, p3/Z, [x17, #3, MUL VL]\n"
+ "fmin z27.h, p3/M, z27.h, z28.h\n"
+ "fmla z24.h, p3/M, z3.h, z16.h\n"
+ "ld1h { z3.h }, p3/Z, [x17, #4, MUL VL]\n"
+ "ld1h { z9.h }, p1/Z, [x27, x15, LSL #1]\n"
+ "ld1h { z10.h }, p1/Z, [x26, x15, LSL #1]\n"
+ "fmax z26.h, p3/M, z26.h, z29.h\n"
+ "whilelt p2.h, x14, %x[n_channels]\n"
+ "ld1h { z12.h }, p1/Z, [x24, x15, LSL #1]\n"
+ "ld1h { z13.h }, p1/Z, [x23, x15, LSL #1]\n"
+ "fmla z25.h, p3/M, z5.h, z16.h\n"
+ "ld1h { z16.h }, p1/Z, [x20, x15, LSL #1]\n"
+ "ld1h { z5.h }, p3/Z, [x17, #6, MUL VL]\n"
+ "st1h { z27.h }, p0, [x13, x9, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z19.h\n"
+ "ld1h { z14.h }, p1/Z, [x22, x15, LSL #1]\n"
+ "fmin z26.h, p3/M, z26.h, z28.h\n"
+ "fmla z25.h, p3/M, z8.h, z18.h\n"
+ "fmla z24.h, p3/M, z6.h, z18.h\n"
+ "ld1h { z15.h }, p1/Z, [x21, x15, LSL #1]\n"
+ "ld1h { z6.h }, p3/Z, [x17, #7, MUL VL]\n"
+ "addvl x17, x17, #16\n"
+ "st1h { z26.h }, p0, [x12, x9, LSL #1]\n"
+ "fmla z24.h, p3/M, z8.h, z17.h\n"
+ "ld1h { z11.h }, p1/Z, [x25, x15, LSL #1]\n"
+ "inch x15\n"
+ "fmax z25.h, p3/M, z25.h, z29.h\n"
+ "ld1h { z7.h }, p3/Z, [x17, #-8, MUL VL]\n"
+ "ld1h { z8.h }, p3/Z, [x17, #-7, MUL VL]\n"
+ "addvl x17, x17, #-6\n"
+ "cmp x15, %x[n_channels]\n"
+ "fmin z25.h, p3/M, z25.h, z28.h\n"
+ "fmax z24.h, p3/M, z24.h, z29.h\n"
+ "fmin z24.h, p3/M, z24.h, z28.h\n"
+ "st1h { z25.h }, p0, [x11, x9, LSL #1]\n"
+ "st1h { z24.h }, p0, [x10, x9, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z24, z20\n fmla z24.h, p3/M, z8.h, z9.h\n"
- "movprfx z23, z20\n fmla z23.h, p3/M, z6.h, z9.h\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
- "fmla z24.h, p3/M, z0.h, z10.h\n"
- "fmla z23.h, p3/M, z1.h, z12.h\n"
- "ld1h { z18.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x20, [x15, #0x50]\n"
- "fmla z24.h, p3/M, z1.h, z11.h\n"
- "fmla z23.h, p3/M, z2.h, z13.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z19.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z24.h, p3/M, z3.h, z14.h\n"
- "fmla z23.h, p3/M, z0.h, z16.h\n"
- "ldr x20, [x15, #0x58]\n"
- "ldr x22, [x15, #0x78]\n"
- "fmla z24.h, p3/M, z4.h, z15.h\n"
- "fmla z23.h, p3/M, z4.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0x60]\n"
- "fmla z24.h, p3/M, z2.h, z16.h\n"
- "fmla z23.h, p3/M, z5.h, z18.h\n"
- "ldr x20, [x15, #0x80]\n"
- "ld1h { z18.h }, p2/Z, [x21, x9, LSL #1]\n"
- "movprfx z22, z20\n fmla z22.h, p3/M, z2.h, z9.h\n"
- "movprfx z21, z20\n fmla z21.h, p3/M, z0.h, z9.h\n"
- "ld1h { z20.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0x68]\n"
- "fmla z24.h, p3/M, z5.h, z19.h\n"
- "fmla z23.h, p3/M, z3.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x22, x9, LSL #1]\n"
- "ldr x20, [x15, #0x88]\n"
- "fmla z22.h, p3/M, z3.h, z17.h\n"
- "fmla z21.h, p3/M, z4.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z0.h, z18.h\n"
- "fmla z21.h, p3/M, z1.h, z20.h\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x20, [x15, #0x98]\n"
- "fmla z22.h, p3/M, z4.h, z17.h\n"
- "fmla z21.h, p3/M, z5.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z19.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z24.h, p3/M, z6.h, z18.h\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "fmla z22.h, p3/M, z1.h, z16.h\n"
- "fmla z21.h, p3/M, z2.h, z19.h\n"
- "fmla z24.h, p3/M, z7.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x20, x9, LSL #1]\n"
- "ldr x21, [x15, #0xa0]\n"
- "ldr x20, [x15, #0xb0]\n"
- "fmla z22.h, p3/M, z6.h, z16.h\n"
- "fmla z21.h, p3/M, z3.h, z18.h\n"
- "ld1h { z17.h }, p2/Z, [x21, x9, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z7.h, z17.h\n"
- "fmla z21.h, p3/M, z7.h, z16.h\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z23.h, p3/M, z7.h, z20.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z5.h, z18.h\n"
- "ldr x20, [x15, #0xc0]\n"
- "fmla z21.h, p3/M, z6.h, z17.h\n"
- "fmla z23.h, p3/M, z8.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x9, LSL #1]\n"
- "fmla z22.h, p3/M, z8.h, z17.h\n"
- "fmla z21.h, p3/M, z8.h, z16.h\n"
- "inch x28\n"
+ "movprfx z27, z30\n fmla z27.h, p3/M, z8.h, z9.h\n"
+ "movprfx z26, z30\n fmla z26.h, p3/M, z6.h, z9.h\n"
+ "ldr x28, [x16, #0x40]\n"
+ "ldr x20, [x16, #0x48]\n"
+ "ldr x26, [x16, #0x50]\n"
+ "ldr x25, [x16, #0x58]\n"
+ "movprfx z25, z30\n fmla z25.h, p3/M, z2.h, z9.h\n"
+ "movprfx z24, z30\n fmla z24.h, p3/M, z0.h, z9.h\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "inch x9\n"
"mov p0.b, p2.b\n"
- "fmax z24.h, p3/M, z24.h, z26.h\n"
- "fmax z23.h, p3/M, z23.h, z26.h\n"
- "fmax z22.h, p3/M, z22.h, z26.h\n"
- "fmax z21.h, p3/M, z21.h, z26.h\n"
- "fmin z24.h, p3/M, z24.h, z25.h\n"
- "fmin z23.h, p3/M, z23.h, z25.h\n"
- "st1h { z24.h }, p0, [x13, x28, LSL #1]\n"
- "fmin z22.h, p3/M, z22.h, z25.h\n"
- "fmin z21.h, p3/M, z21.h, z25.h\n"
- "st1h { z23.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z22.h }, p0, [x11, x28, LSL #1]\n"
- "st1h { z21.h }, p0, [x10, x28, LSL #1]\n"
+ "ldr x23, [x16, #0x68]\n"
+ "ldr x22, [x16, #0x70]\n"
+ "fmla z27.h, p3/M, z0.h, z10.h\n"
+ "fmla z26.h, p3/M, z1.h, z12.h\n"
+ "ld1h { z21.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "ldr x21, [x16, #0x88]\n"
+ "fmla z27.h, p3/M, z1.h, z11.h\n"
+ "ld1h { z18.h }, p2/Z, [x28, x14, LSL #1]\n"
+ "ldr x20, [x16, #0x80]\n"
+ "fmla z26.h, p3/M, z2.h, z13.h\n"
+ "ld1h { z20.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "ldr x26, [x16, #0x90]\n"
+ "fmla z27.h, p3/M, z3.h, z14.h\n"
+ "ld1h { z17.h }, p2/Z, [x25, x14, LSL #1]\n"
+ "ldr x25, [x16, #0x98]\n"
+ "fmla z26.h, p3/M, z0.h, z16.h\n"
+ "fmla z27.h, p3/M, z4.h, z15.h\n"
+ "ld1h { z23.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "fmla z25.h, p3/M, z3.h, z17.h\n"
+ "ld1h { z22.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z27.h, p3/M, z2.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "ldr x22, [x16, #0xb0]\n"
+ "fmla z25.h, p3/M, z0.h, z23.h\n"
+ "fmla z26.h, p3/M, z5.h, z21.h\n"
+ "ld1h { z17.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "ldr x21, [x16, #0xc0]\n"
+ "fmla z27.h, p3/M, z5.h, z20.h\n"
+ "fmla z26.h, p3/M, z3.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x27, x14, LSL #1]\n"
+ "ldr x20, [x16, #0xb8]\n"
+ "fmla z24.h, p3/M, z4.h, z16.h\n"
+ "ld1h { z21.h }, p2/Z, [x24, x14, LSL #1]\n"
+ "fmla z25.h, p3/M, z4.h, z19.h\n"
+ "ld1h { z20.h }, p2/Z, [x25, x14, LSL #1]\n"
+ "fmla z27.h, p3/M, z6.h, z23.h\n"
+ "ld1h { z16.h }, p2/Z, [x26, x14, LSL #1]\n"
+ "fmla z26.h, p3/M, z7.h, z17.h\n"
+ "fmla z24.h, p3/M, z1.h, z17.h\n"
+ "fmla z25.h, p3/M, z1.h, z18.h\n"
+ "fmla z27.h, p3/M, z7.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x23, x14, LSL #1]\n"
+ "fmla z26.h, p3/M, z8.h, z20.h\n"
+ "fmla z24.h, p3/M, z5.h, z22.h\n"
+ "ld1h { z18.h }, p2/Z, [x22, x14, LSL #1]\n"
+ "fmla z25.h, p3/M, z6.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x20, x14, LSL #1]\n"
+ "fmax z27.h, p3/M, z27.h, z29.h\n"
+ "fmax z26.h, p3/M, z26.h, z29.h\n"
+ "fmla z24.h, p3/M, z2.h, z20.h\n"
+ "ld1h { z16.h }, p2/Z, [x21, x14, LSL #1]\n"
+ "fmla z25.h, p3/M, z7.h, z21.h\n"
+ "fmin z27.h, p3/M, z27.h, z28.h\n"
+ "fmin z26.h, p3/M, z26.h, z28.h\n"
+ "fmla z24.h, p3/M, z3.h, z19.h\n"
+ "st1h { z27.h }, p0, [x13, x9, LSL #1]\n"
+ "st1h { z26.h }, p0, [x12, x9, LSL #1]\n"
+ "fmla z24.h, p3/M, z7.h, z18.h\n"
+ "fmla z25.h, p3/M, z5.h, z19.h\n"
+ "fmla z24.h, p3/M, z6.h, z17.h\n"
+ "fmla z25.h, p3/M, z8.h, z17.h\n"
+ "fmla z24.h, p3/M, z8.h, z16.h\n"
+ "fmax z25.h, p3/M, z25.h, z29.h\n"
+ "fmin z25.h, p3/M, z25.h, z28.h\n"
+ "st1h { z25.h }, p0, [x11, x9, LSL #1]\n"
+ "fmax z24.h, p3/M, z24.h, z29.h\n"
+ "fmin z24.h, p3/M, z24.h, z28.h\n"
+ "st1h { z24.h }, p0, [x10, x9, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index fdbee67926..5489cbd990 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,432 +88,432 @@ void sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x12, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x6, #0x0\n"
+ "mov x7, #0x0\n"
"1:" // Tile loop
- "str x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x2\n"
"mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x12, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
+ "str x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "cnth x17\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "add x15, x17, x17\n"
- "mul x20, x12, x21\n" // offset = tile_i * ld_output_row
+ "whilelt p2.h, XZR, %x[n_channels]\n"
+ "mov x15, #0x0\n"
"ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
"ldr x13, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "cnth x12\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
- "add x11, x14, x23, LSL #1\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x11, x23, LSL #1\n"
- "add x28, x15, x17\n"
+ "mul x20, x6, x24\n" // offset = tile_i * ld_input_row
+ "add x12, x8, x8\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x10, x12, x8\n"
+ "cmp x17, %x[n_channels]\n"
"ld1rh { z15.h }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "whilelt p2.h, XZR, %x[n_channels]\n"
- "add x27, x9, x23, LSL #1\n"
+ "mul x22, x6, x23\n" // offset = tile_i * ld_output_row
+ "add x9, x10, x8\n"
"ld1rh { z28.h }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x26, x28, x17\n"
- "add x25, x27, x23, LSL #1\n"
- "ld1h { z29.h }, p3/Z, [x10]\n"
- "ld1h { z0.h }, p3/Z, [x10, #1, MUL VL]\n"
- "add x24, x26, x17\n"
- "add x13, x13, x20, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
- "ld1h { z1.h }, p3/Z, [x10, #2, MUL VL]\n"
- "ld1h { z2.h }, p3/Z, [x10, #3, MUL VL]\n"
- "cmp x12, %x[n_channels]\n"
- "add x23, x25, x23, LSL #1\n"
- "ld1h { z3.h }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x10, #5, MUL VL]\n"
- "add x22, x13, x21, LSL #1\n"
- "mov x21, #0x0\n"
+ "sub x21, XZR, x17\n"
+ "madd x20, x7, x8, x20\n" // offset += tile_j * ld_input_col
+ "add x28, x9, x8\n"
+ "ld1h { z29.h }, p3/Z, [x11]\n"
+ "ld1h { z0.h }, p3/Z, [x11, #1, MUL VL]\n"
+ "ld1h { z1.h }, p3/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z2.h }, p3/Z, [x11, #3, MUL VL]\n"
+ "madd x22, x7, x16, x22\n" // offset += tile_j * ld_output_col
+ "ld1h { z3.h }, p3/Z, [x11, #4, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x11, #5, MUL VL]\n"
+ "addvl x11, x11, #6\n"
+ "mul x20, x20, x26\n" // offset *= kernel_stride * output_size
+ "mul x22, x22, x25\n" // offset *= output_tile_size
+ "add x14, x14, x20, LSL #1\n" // inptr[0] += offset * sizeof(__fp16)
+ "add x20, x14, x24, LSL #1\n"
+ "add x27, x20, x24, LSL #1\n"
"ld1h { z5.h }, p2/Z, [x14]\n"
- "ld1h { z6.h }, p2/Z, [x14, x17, LSL #1]\n"
- "sub x20, XZR, x12\n"
- "ld1h { z7.h }, p2/Z, [x11]\n"
- "ld1h { z8.h }, p2/Z, [x11, x17, LSL #1]\n"
- "addvl x10, x10, #6\n"
- "ld1h { z9.h }, p2/Z, [x14, x15, LSL #1]\n"
- "ld1h { z13.h }, p2/Z, [x11, x15, LSL #1]\n"
- "ld1h { z11.h }, p2/Z, [x14, x28, LSL #1]\n"
- "ld1h { z12.h }, p2/Z, [x14, x26, LSL #1]\n"
- "ld1h { z10.h }, p2/Z, [x11, x24, LSL #1]\n"
- "ld1h { z14.h }, p2/Z, [x9]\n"
+ "ld1h { z6.h }, p2/Z, [x14, x8, LSL #1]\n"
+ "add x26, x27, x24, LSL #1\n"
+ "add x25, x26, x24, LSL #1\n"
+ "ld1h { z7.h }, p2/Z, [x20]\n"
+ "ld1h { z8.h }, p2/Z, [x20, x8, LSL #1]\n"
+ "add x13, x13, x22, LSL #1\n" // outptrs[0] += offset * sizeof(__fp16)
+ "add x24, x25, x24, LSL #1\n"
+ "add x23, x13, x23, LSL #1\n"
+ "ld1h { z9.h }, p2/Z, [x14, x12, LSL #1]\n"
+ "ld1h { z13.h }, p2/Z, [x20, x12, LSL #1]\n"
+ "ld1h { z11.h }, p2/Z, [x14, x10, LSL #1]\n"
+ "ld1h { z12.h }, p2/Z, [x14, x9, LSL #1]\n"
+ "ld1h { z10.h }, p2/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z14.h }, p2/Z, [x27]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z27, z29\n fmla z27.h, p3/M, z0.h, z5.h\n"
+ "movprfx z30, z29\n fmla z30.h, p3/M, z0.h, z5.h\n"
"movprfx z31, z29\n fmla z31.h, p3/M, z0.h, z6.h\n"
- "ld1h { z24.h }, p2/Z, [x11, x28, LSL #1]\n"
- "whilelt p1.h, x12, %x[n_channels]\n"
- "movprfx z26, z29\n fmla z26.h, p3/M, z0.h, z7.h\n"
- "movprfx z30, z29\n fmla z30.h, p3/M, z0.h, z8.h\n"
- "ld1h { z18.h }, p3/Z, [x10]\n"
+ "ld1h { z25.h }, p2/Z, [x20, x10, LSL #1]\n"
+ "whilelt p1.h, x17, %x[n_channels]\n"
+ "movprfx z27, z29\n fmla z27.h, p3/M, z0.h, z7.h\n"
+ "movprfx z26, z29\n fmla z26.h, p3/M, z0.h, z8.h\n"
+ "ld1h { z23.h }, p3/Z, [x11]\n"
+ "inch x15\n"
+ "inch x17\n"
+ "mov p0.b, p2.b\n"
"inch x21\n"
- "fmla z27.h, p3/M, z1.h, z6.h\n"
+ "fmla z30.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z22.h }, p2/Z, [x20, x9, LSL #1]\n"
+ "addvl x20, x20, #1\n"
"fmla z31.h, p3/M, z1.h, z9.h\n"
- "ld1h { z23.h }, p2/Z, [x11, x26, LSL #1]\n"
- "inch x12\n"
- "fmla z26.h, p3/M, z1.h, z8.h\n"
- "fmla z30.h, p3/M, z1.h, z13.h\n"
- "ld1h { z22.h }, p3/Z, [x10, #1, MUL VL]\n"
- "mov p0.b, p2.b\n"
- "fmla z27.h, p3/M, z2.h, z9.h\n"
- "fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z16.h }, p2/Z, [x14, x24, LSL #1]\n"
+ "fmla z27.h, p3/M, z1.h, z8.h\n"
+ "fmla z26.h, p3/M, z1.h, z13.h\n"
+ "ld1h { z21.h }, p3/Z, [x11, #1, MUL VL]\n"
+ "fmla z30.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z18.h }, p2/Z, [x14, x28, LSL #1]\n"
"addvl x14, x14, #1\n"
- "fmla z26.h, p3/M, z2.h, z13.h\n"
- "fmla z30.h, p3/M, z2.h, z24.h\n"
- "ld1h { z20.h }, p3/Z, [x10, #2, MUL VL]\n"
- "addvl x11, x11, #1\n"
- "fmla z27.h, p3/M, z3.h, z11.h\n"
+ "fmla z31.h, p3/M, z2.h, z11.h\n"
+ "fmla z27.h, p3/M, z2.h, z13.h\n"
+ "fmla z26.h, p3/M, z2.h, z25.h\n"
+ "ld1h { z16.h }, p3/Z, [x11, #2, MUL VL]\n"
+ "fmla z30.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z20.h }, p2/Z, [x27, x8, LSL #1]\n"
"fmla z31.h, p3/M, z3.h, z12.h\n"
- "ld1h { z0.h }, p2/Z, [x9, x17, LSL #1]\n"
- "inch x20\n"
- "fmla z26.h, p3/M, z3.h, z24.h\n"
- "fmla z30.h, p3/M, z3.h, z23.h\n"
- "ld1h { z17.h }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z27.h, p3/M, z4.h, z12.h\n"
- "fmla z31.h, p3/M, z4.h, z16.h\n"
- "ld1h { z19.h }, p2/Z, [x9, x15, LSL #1]\n"
- "ld1h { z5.h }, p2/Z, [x9, x28, LSL #1]\n"
- "fmla z26.h, p3/M, z4.h, z23.h\n"
- "fmla z30.h, p3/M, z4.h, z10.h\n"
- "ld1h { z21.h }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z27.h, p3/M, z18.h, z7.h\n"
- "fmla z31.h, p3/M, z18.h, z8.h\n"
- "ld1h { z7.h }, p1/Z, [x11]\n"
- "fmla z26.h, p3/M, z18.h, z14.h\n"
- "fmla z30.h, p3/M, z18.h, z0.h\n"
- "ld1h { z18.h }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z27.h, p3/M, z22.h, z8.h\n"
- "fmla z31.h, p3/M, z22.h, z13.h\n"
- "ld1h { z3.h }, p2/Z, [x9, x24, LSL #1]\n"
- "fmla z26.h, p3/M, z22.h, z0.h\n"
- "fmla z30.h, p3/M, z22.h, z19.h\n"
- "ld1h { z8.h }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z27.h, p3/M, z20.h, z13.h\n"
- "fmla z31.h, p3/M, z20.h, z24.h\n"
- "ld1h { z2.h }, p2/Z, [x9, x26, LSL #1]\n"
- "addvl x9, x9, #1\n"
- "fmla z26.h, p3/M, z20.h, z19.h\n"
- "fmla z30.h, p3/M, z20.h, z5.h\n"
- "ld1h { z16.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "fmla z27.h, p3/M, z17.h, z24.h\n"
- "fmla z31.h, p3/M, z17.h, z23.h\n"
- "ld1h { z25.h }, p2/Z, [x27]\n"
- "ld1h { z29.h }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z26.h, p3/M, z17.h, z5.h\n"
- "fmla z30.h, p3/M, z17.h, z2.h\n"
- "ld1h { z17.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "fmla z27.h, p3/M, z21.h, z23.h\n"
- "fmla z31.h, p3/M, z21.h, z10.h\n"
- "ld1h { z24.h }, p2/Z, [x27, x17, LSL #1]\n"
- "ld1h { z22.h }, p2/Z, [x27, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z21.h, z2.h\n"
- "fmla z30.h, p3/M, z21.h, z3.h\n"
- "ld1h { z21.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "fmla z27.h, p3/M, z18.h, z14.h\n"
- "fmla z31.h, p3/M, z18.h, z0.h\n"
- "ld1h { z1.h }, p2/Z, [x27, x24, LSL #1]\n"
- "fmla z26.h, p3/M, z18.h, z25.h\n"
- "fmla z30.h, p3/M, z18.h, z24.h\n"
- "ld1h { z23.h }, p3/Z, [x10, #-6, MUL VL]\n"
- "fmla z27.h, p3/M, z8.h, z0.h\n"
- "fmla z31.h, p3/M, z8.h, z19.h\n"
- "ld1h { z0.h }, p2/Z, [x27, x28, LSL #1]\n"
- "fmla z26.h, p3/M, z8.h, z24.h\n"
- "fmla z30.h, p3/M, z8.h, z22.h\n"
- "ld1h { z20.h }, p3/Z, [x10, #-5, MUL VL]\n"
- "fmla z27.h, p3/M, z16.h, z19.h\n"
- "fmla z31.h, p3/M, z16.h, z5.h\n"
- "ld1h { z19.h }, p2/Z, [x27, x26, LSL #1]\n"
+ "fmla z27.h, p3/M, z3.h, z25.h\n"
+ "fmla z26.h, p3/M, z3.h, z22.h\n"
+ "ld1h { z17.h }, p3/Z, [x11, #3, MUL VL]\n"
+ "fmla z30.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z19.h }, p2/Z, [x27, x12, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z12.h }, p2/Z, [x27, x10, LSL #1]\n"
+ "fmla z27.h, p3/M, z4.h, z22.h\n"
+ "fmla z26.h, p3/M, z4.h, z10.h\n"
+ "ld1h { z0.h }, p3/Z, [x11, #4, MUL VL]\n"
+ "fmla z30.h, p3/M, z23.h, z7.h\n"
+ "ld1h { z7.h }, p1/Z, [x20]\n"
+ "fmla z31.h, p3/M, z23.h, z8.h\n"
+ "fmla z27.h, p3/M, z23.h, z14.h\n"
+ "fmla z26.h, p3/M, z23.h, z20.h\n"
+ "ld1h { z18.h }, p3/Z, [x11, #5, MUL VL]\n"
+ "fmla z30.h, p3/M, z21.h, z8.h\n"
+ "ld1h { z1.h }, p2/Z, [x27, x28, LSL #1]\n"
+ "fmla z31.h, p3/M, z21.h, z13.h\n"
+ "fmla z27.h, p3/M, z21.h, z20.h\n"
+ "fmla z26.h, p3/M, z21.h, z19.h\n"
+ "ld1h { z5.h }, p3/Z, [x11, #6, MUL VL]\n"
+ "fmla z30.h, p3/M, z16.h, z13.h\n"
+ "ld1h { z24.h }, p2/Z, [x27, x9, LSL #1]\n"
"addvl x27, x27, #1\n"
- "fmla z26.h, p3/M, z16.h, z22.h\n"
- "fmla z30.h, p3/M, z16.h, z0.h\n"
- "ld1h { z18.h }, p3/Z, [x10, #-4, MUL VL]\n"
- "fmla z27.h, p3/M, z17.h, z5.h\n"
- "fmla z31.h, p3/M, z17.h, z2.h\n"
+ "fmla z31.h, p3/M, z16.h, z25.h\n"
+ "fmla z27.h, p3/M, z16.h, z19.h\n"
+ "fmla z26.h, p3/M, z16.h, z12.h\n"
+ "ld1h { z16.h }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "fmla z30.h, p3/M, z17.h, z25.h\n"
+ "ld1h { z25.h }, p2/Z, [x26]\n"
+ "fmla z31.h, p3/M, z17.h, z22.h\n"
+ "fmla z27.h, p3/M, z17.h, z12.h\n"
+ "ld1h { z29.h }, p3/Z, [x11, #4, MUL VL]\n"
+ "fmla z26.h, p3/M, z17.h, z24.h\n"
+ "ld1h { z17.h }, p3/Z, [x11, #-8, MUL VL]\n"
+ "fmla z30.h, p3/M, z0.h, z22.h\n"
+ "ld1h { z23.h }, p2/Z, [x26, x8, LSL #1]\n"
+ "fmla z31.h, p3/M, z0.h, z10.h\n"
+ "ld1h { z22.h }, p2/Z, [x26, x12, LSL #1]\n"
+ "fmla z27.h, p3/M, z0.h, z24.h\n"
+ "fmla z26.h, p3/M, z0.h, z1.h\n"
+ "ld1h { z21.h }, p3/Z, [x11, #-7, MUL VL]\n"
+ "fmla z30.h, p3/M, z18.h, z14.h\n"
+ "ld1h { z10.h }, p2/Z, [x26, x28, LSL #1]\n"
+ "fmla z31.h, p3/M, z18.h, z20.h\n"
+ "fmla z27.h, p3/M, z18.h, z25.h\n"
+ "fmla z26.h, p3/M, z18.h, z23.h\n"
+ "ld1h { z6.h }, p3/Z, [x11, #-6, MUL VL]\n"
+ "fmla z30.h, p3/M, z5.h, z20.h\n"
+ "ld1h { z0.h }, p2/Z, [x26, x10, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z19.h\n"
+ "fmla z27.h, p3/M, z5.h, z23.h\n"
+ "fmla z26.h, p3/M, z5.h, z22.h\n"
+ "ld1h { z20.h }, p3/Z, [x11, #-5, MUL VL]\n"
+ "fmla z30.h, p3/M, z16.h, z19.h\n"
+ "ld1h { z19.h }, p2/Z, [x26, x9, LSL #1]\n"
+ "addvl x26, x26, #1\n"
+ "fmla z31.h, p3/M, z16.h, z12.h\n"
+ "fmla z27.h, p3/M, z16.h, z22.h\n"
+ "fmla z26.h, p3/M, z16.h, z0.h\n"
+ "ld1h { z18.h }, p3/Z, [x11, #-4, MUL VL]\n"
+ "fmla z30.h, p3/M, z17.h, z12.h\n"
"ld1h { z16.h }, p2/Z, [x25]\n"
- "fmla z26.h, p3/M, z17.h, z0.h\n"
- "fmla z30.h, p3/M, z17.h, z19.h\n"
- "ld1h { z17.h }, p3/Z, [x10, #-3, MUL VL]\n"
- "fmla z27.h, p3/M, z21.h, z2.h\n"
- "fmla z31.h, p3/M, z21.h, z3.h\n"
- "ld1h { z4.h }, p2/Z, [x25, x17, LSL #1]\n"
- "ld1h { z8.h }, p2/Z, [x25, x26, LSL #1]\n"
- "fmla z26.h, p3/M, z21.h, z19.h\n"
- "fmla z30.h, p3/M, z21.h, z1.h\n"
- "ld1h { z13.h }, p3/Z, [x10, #-2, MUL VL]\n"
- "fmla z27.h, p3/M, z23.h, z25.h\n"
- "fmla z31.h, p3/M, z23.h, z24.h\n"
- "ld1h { z25.h }, p2/Z, [x25, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z23.h, z16.h\n"
- "fmla z30.h, p3/M, z23.h, z4.h\n"
- "ld1h { z5.h }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z27.h, p3/M, z20.h, z24.h\n"
+ "fmla z31.h, p3/M, z17.h, z24.h\n"
+ "fmla z27.h, p3/M, z17.h, z0.h\n"
+ "fmla z26.h, p3/M, z17.h, z19.h\n"
+ "ld1h { z17.h }, p3/Z, [x11, #-3, MUL VL]\n"
+ "fmla z30.h, p3/M, z21.h, z24.h\n"
+ "ld1h { z9.h }, p2/Z, [x25, x8, LSL #1]\n"
+ "fmla z31.h, p3/M, z21.h, z1.h\n"
+ "ld1h { z8.h }, p2/Z, [x25, x9, LSL #1]\n"
+ "fmla z27.h, p3/M, z21.h, z19.h\n"
+ "fmla z26.h, p3/M, z21.h, z10.h\n"
+ "ld1h { z5.h }, p3/Z, [x11, #-2, MUL VL]\n"
+ "fmla z30.h, p3/M, z6.h, z25.h\n"
+ "ld1h { z25.h }, p2/Z, [x25, x12, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z23.h\n"
+ "fmla z27.h, p3/M, z6.h, z16.h\n"
+ "fmla z26.h, p3/M, z6.h, z9.h\n"
+ "ld1h { z4.h }, p3/Z, [x11, #-1, MUL VL]\n"
+ "fmla z30.h, p3/M, z20.h, z23.h\n"
+ "ld1h { z24.h }, p2/Z, [x25, x10, LSL #1]\n"
"fmla z31.h, p3/M, z20.h, z22.h\n"
- "ld1h { z24.h }, p2/Z, [x25, x28, LSL #1]\n"
- "fmla z26.h, p3/M, z20.h, z4.h\n"
- "fmla z30.h, p3/M, z20.h, z25.h\n"
- "ld1h { z23.h }, p3/Z, [x10]\n"
- "fmla z27.h, p3/M, z18.h, z22.h\n"
- "fmla z31.h, p3/M, z18.h, z0.h\n"
- "ld1h { z22.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "fmla z27.h, p3/M, z20.h, z9.h\n"
+ "fmla z26.h, p3/M, z20.h, z25.h\n"
+ "ld1h { z23.h }, p3/Z, [x11]\n"
+ "fmla z30.h, p3/M, z18.h, z22.h\n"
+ "ld1h { z22.h }, p2/Z, [x25, x28, LSL #1]\n"
"addvl x25, x25, #1\n"
- "fmla z26.h, p3/M, z18.h, z25.h\n"
- "fmla z30.h, p3/M, z18.h, z24.h\n"
- "ld1h { z21.h }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z27.h, p3/M, z17.h, z0.h\n"
+ "fmla z31.h, p3/M, z18.h, z0.h\n"
+ "fmla z27.h, p3/M, z18.h, z25.h\n"
+ "fmla z26.h, p3/M, z18.h, z24.h\n"
+ "ld1h { z21.h }, p3/Z, [x11, #1, MUL VL]\n"
+ "fmla z30.h, p3/M, z17.h, z0.h\n"
+ "ld1h { z18.h }, p2/Z, [x24]\n"
"fmla z31.h, p3/M, z17.h, z19.h\n"
- "ld1h { z18.h }, p2/Z, [x23]\n"
- "fmla z26.h, p3/M, z17.h, z24.h\n"
- "fmla z30.h, p3/M, z17.h, z8.h\n"
- "ld1h { z20.h }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z27.h, p3/M, z13.h, z19.h\n"
- "fmla z31.h, p3/M, z13.h, z1.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x17, LSL #1]\n"
- "ld1h { z14.h }, p1/Z, [x9]\n"
- "fmla z26.h, p3/M, z13.h, z8.h\n"
- "fmla z30.h, p3/M, z13.h, z22.h\n"
- "ld1h { z19.h }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z27.h, p3/M, z5.h, z16.h\n"
- "fmla z31.h, p3/M, z5.h, z4.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z5.h, z18.h\n"
- "fmla z30.h, p3/M, z5.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x23, x28, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z27.h, p3/M, z23.h, z4.h\n"
+ "fmla z27.h, p3/M, z17.h, z24.h\n"
+ "fmla z26.h, p3/M, z17.h, z8.h\n"
+ "ld1h { z20.h }, p3/Z, [x11, #2, MUL VL]\n"
+ "fmla z30.h, p3/M, z5.h, z19.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x8, LSL #1]\n"
+ "fmla z31.h, p3/M, z5.h, z10.h\n"
+ "ld1h { z14.h }, p1/Z, [x27]\n"
+ "fmla z27.h, p3/M, z5.h, z8.h\n"
+ "fmla z26.h, p3/M, z5.h, z22.h\n"
+ "ld1h { z19.h }, p3/Z, [x11, #3, MUL VL]\n"
+ "fmla z30.h, p3/M, z4.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x12, LSL #1]\n"
+ "fmla z31.h, p3/M, z4.h, z9.h\n"
+ "fmla z27.h, p3/M, z4.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x24, x10, LSL #1]\n"
+ "fmla z26.h, p3/M, z4.h, z17.h\n"
+ "ld1h { z0.h }, p3/Z, [x11, #5, MUL VL]\n"
+ "fmla z30.h, p3/M, z23.h, z9.h\n"
+ "ld1h { z13.h }, p1/Z, [x20, x12, LSL #1]\n"
"fmla z31.h, p3/M, z23.h, z25.h\n"
- "ld1h { z13.h }, p1/Z, [x11, x15, LSL #1]\n"
- "fmla z26.h, p3/M, z23.h, z17.h\n"
- "fmla z30.h, p3/M, z23.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z27.h, p3/M, z21.h, z25.h\n"
- "fmla z31.h, p3/M, z21.h, z24.h\n"
+ "fmla z27.h, p3/M, z23.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x9, LSL #1]\n"
+ "fmla z26.h, p3/M, z23.h, z16.h\n"
+ "ld1h { z1.h }, p3/Z, [x11, #6, MUL VL]\n"
+ "fmla z30.h, p3/M, z21.h, z25.h\n"
"ld1h { z5.h }, p1/Z, [x14]\n"
- "fmla z26.h, p3/M, z21.h, z16.h\n"
- "fmla z30.h, p3/M, z21.h, z18.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x24, LSL #1]\n"
- "ld1h { z2.h }, p3/Z, [x10, #7, MUL VL]\n"
- "fmla z27.h, p3/M, z20.h, z24.h\n"
+ "fmla z31.h, p3/M, z21.h, z24.h\n"
+ "fmla z27.h, p3/M, z21.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x28, LSL #1]\n"
+ "whilelt p2.h, x15, %x[n_channels]\n"
+ "cmp x17, %x[n_channels]\n"
+ "addvl x24, x24, #1\n"
+ "fmla z26.h, p3/M, z21.h, z18.h\n"
+ "ld1h { z2.h }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "fmla z30.h, p3/M, z20.h, z24.h\n"
+ "ld1h { z6.h }, p1/Z, [x14, x8, LSL #1]\n"
"fmla z31.h, p3/M, z20.h, z8.h\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.h, x21, %x[n_channels]\n"
- "fmla z26.h, p3/M, z20.h, z18.h\n"
- "fmla z30.h, p3/M, z20.h, z17.h\n"
- "cmp x12, %x[n_channels]\n"
- "addvl x23, x23, #1\n"
- "fmla z27.h, p3/M, z19.h, z8.h\n"
+ "fmla z27.h, p3/M, z20.h, z18.h\n"
+ "ld1h { z11.h }, p1/Z, [x14, x10, LSL #1]\n"
+ "fmla z26.h, p3/M, z20.h, z17.h\n"
+ "ld1h { z3.h }, p3/Z, [x11, #-8, MUL VL]\n"
+ "fmla z30.h, p3/M, z19.h, z8.h\n"
+ "ld1h { z8.h }, p1/Z, [x20, x8, LSL #1]\n"
"fmla z31.h, p3/M, z19.h, z22.h\n"
- "fmax z27.h, p3/M, z27.h, z15.h\n"
+ "ld1h { z10.h }, p1/Z, [x20, x28, LSL #1]\n"
+ "fmla z27.h, p3/M, z19.h, z17.h\n"
+ "ld1h { z12.h }, p1/Z, [x14, x9, LSL #1]\n"
+ "fmla z26.h, p3/M, z19.h, z16.h\n"
+ "ld1h { z9.h }, p1/Z, [x14, x12, LSL #1]\n"
+ "ld1h { z4.h }, p3/Z, [x11, #-7, MUL VL]\n"
+ "addvl x11, x11, #-6\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
"fmax z31.h, p3/M, z31.h, z15.h\n"
- "fmla z26.h, p3/M, z19.h, z17.h\n"
- "fmla z30.h, p3/M, z19.h, z16.h\n"
+ "fmax z27.h, p3/M, z27.h, z15.h\n"
"fmax z26.h, p3/M, z26.h, z15.h\n"
- "fmax z30.h, p3/M, z30.h, z15.h\n"
- "fmin z27.h, p3/M, z27.h, z28.h\n"
+ "fmin z30.h, p3/M, z30.h, z28.h\n"
"fmin z31.h, p3/M, z31.h, z28.h\n"
- "ld1h { z6.h }, p1/Z, [x14, x17, LSL #1]\n"
- "ld1h { z8.h }, p1/Z, [x11, x17, LSL #1]\n"
+ "fmin z27.h, p3/M, z27.h, z28.h\n"
"fmin z26.h, p3/M, z26.h, z28.h\n"
- "fmin z30.h, p3/M, z30.h, z28.h\n"
- "ld1h { z9.h }, p1/Z, [x14, x15, LSL #1]\n"
- "ld1h { z11.h }, p1/Z, [x14, x28, LSL #1]\n"
- "ld1h { z12.h }, p1/Z, [x14, x26, LSL #1]\n"
- "ld1h { z10.h }, p1/Z, [x11, x24, LSL #1]\n"
- "st1h { z27.h }, p0, [x13]\n"
+ "st1h { z30.h }, p0, [x13]\n"
"st1h { z31.h }, p0, [x13, x16, LSL #1]\n"
"addvl x13, x13, #1\n"
- "ld1h { z3.h }, p3/Z, [x10, #-8, MUL VL]\n"
- "ld1h { z4.h }, p3/Z, [x10, #-7, MUL VL]\n"
- "st1h { z26.h }, p0, [x22]\n"
- "addvl x10, x10, #-6\n"
- "st1h { z30.h }, p0, [x22, x16, LSL #1]\n"
- "addvl x22, x22, #1\n"
+ "st1h { z27.h }, p0, [x23]\n"
+ "st1h { z26.h }, p0, [x23, x16, LSL #1]\n"
+ "addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"movprfx z30, z29\n fmla z30.h, p3/M, z0.h, z5.h\n"
"movprfx z31, z29\n fmla z31.h, p3/M, z0.h, z6.h\n"
- "ld1h { z22.h }, p2/Z, [x11, x28, LSL #1]\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ld1h { z22.h }, p2/Z, [x20, x10, LSL #1]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"movprfx z5, z29\n fmla z5.h, p3/M, z0.h, z7.h\n"
"fmla z29.h, p3/M, z0.h, z8.h\n"
- "ld1h { z20.h }, p3/Z, [x10]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ld1h { z20.h }, p3/Z, [x11]\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "mov p0.b, p2.b\n"
+ "add x7, x7, #0x1\n"
"fmla z30.h, p3/M, z1.h, z6.h\n"
+ "ld1h { z6.h }, p2/Z, [x20, x9, LSL #1]\n"
"fmla z31.h, p3/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p2/Z, [x11, x26, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x20, x6, #0x1\n"
"fmla z5.h, p3/M, z1.h, z8.h\n"
"fmla z29.h, p3/M, z1.h, z13.h\n"
- "ld1h { z19.h }, p3/Z, [x10, #1, MUL VL]\n"
- "add x8, x8, #0x1\n"
+ "ld1h { z19.h }, p3/Z, [x11, #1, MUL VL]\n"
+ "cmp x7, x22\n"
+ "csel x6, x6, x20, LT\n"
+ "csel x7, x7, XZR, LT\n"
"fmla z30.h, p3/M, z2.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x14, x28, LSL #1]\n"
"fmla z31.h, p3/M, z2.h, z11.h\n"
- "ld1h { z16.h }, p2/Z, [x14, x24, LSL #1]\n"
- "cmp x8, x20\n"
"fmla z5.h, p3/M, z2.h, z13.h\n"
"fmla z29.h, p3/M, z2.h, z22.h\n"
- "ld1h { z18.h }, p3/Z, [x10, #2, MUL VL]\n"
- "add x21, x12, #0x1\n"
+ "ld1h { z18.h }, p3/Z, [x11, #2, MUL VL]\n"
+ "cmp x6, x21\n"
"fmla z30.h, p3/M, z3.h, z11.h\n"
+ "ld1h { z1.h }, p2/Z, [x27, x8, LSL #1]\n"
"fmla z31.h, p3/M, z3.h, z12.h\n"
- "ld1h { z1.h }, p2/Z, [x9, x17, LSL #1]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"fmla z5.h, p3/M, z3.h, z22.h\n"
"fmla z29.h, p3/M, z3.h, z6.h\n"
- "ld1h { z17.h }, p3/Z, [x10, #3, MUL VL]\n"
- "csel x12, x12, x21, LT\n"
+ "ld1h { z17.h }, p3/Z, [x11, #3, MUL VL]\n"
"fmla z30.h, p3/M, z4.h, z12.h\n"
+ "ld1h { z0.h }, p2/Z, [x27, x12, LSL #1]\n"
"fmla z31.h, p3/M, z4.h, z16.h\n"
- "ld1h { z0.h }, p2/Z, [x9, x15, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x9, x28, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x27, x10, LSL #1]\n"
"fmla z5.h, p3/M, z4.h, z6.h\n"
"fmla z29.h, p3/M, z4.h, z10.h\n"
- "ld1h { z16.h }, p3/Z, [x10, #4, MUL VL]\n"
- "mov p0.b, p2.b\n"
+ "ld1h { z16.h }, p3/Z, [x11, #4, MUL VL]\n"
"fmla z30.h, p3/M, z20.h, z7.h\n"
"fmla z31.h, p3/M, z20.h, z8.h\n"
- "csel x8, x8, XZR, LT\n"
- "cmp x12, x20\n"
"fmla z5.h, p3/M, z20.h, z14.h\n"
"fmla z29.h, p3/M, z20.h, z1.h\n"
- "ld1h { z21.h }, p3/Z, [x10, #5, MUL VL]\n"
+ "ld1h { z21.h }, p3/Z, [x11, #5, MUL VL]\n"
"fmla z30.h, p3/M, z19.h, z8.h\n"
+ "ld1h { z26.h }, p2/Z, [x27, x28, LSL #1]\n"
"fmla z31.h, p3/M, z19.h, z13.h\n"
- "ld1h { z26.h }, p2/Z, [x9, x24, LSL #1]\n"
"fmla z5.h, p3/M, z19.h, z1.h\n"
"fmla z29.h, p3/M, z19.h, z0.h\n"
- "ld1h { z25.h }, p3/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z25.h }, p3/Z, [x11, #6, MUL VL]\n"
"fmla z30.h, p3/M, z18.h, z13.h\n"
+ "ld1h { z24.h }, p2/Z, [x27, x9, LSL #1]\n"
"fmla z31.h, p3/M, z18.h, z22.h\n"
- "ld1h { z24.h }, p2/Z, [x9, x26, LSL #1]\n"
"fmla z5.h, p3/M, z18.h, z0.h\n"
"fmla z29.h, p3/M, z18.h, z27.h\n"
- "ld1h { z23.h }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1h { z23.h }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
"fmla z30.h, p3/M, z17.h, z22.h\n"
+ "ld1h { z22.h }, p2/Z, [x26]\n"
"fmla z31.h, p3/M, z17.h, z6.h\n"
- "ld1h { z22.h }, p2/Z, [x27]\n"
"fmla z5.h, p3/M, z17.h, z27.h\n"
"fmla z29.h, p3/M, z17.h, z24.h\n"
- "ld1h { z20.h }, p3/Z, [x10, #-8, MUL VL]\n"
+ "ld1h { z20.h }, p3/Z, [x11, #-8, MUL VL]\n"
"fmla z30.h, p3/M, z16.h, z6.h\n"
+ "ld1h { z18.h }, p2/Z, [x26, x8, LSL #1]\n"
"fmla z31.h, p3/M, z16.h, z10.h\n"
- "ld1h { z19.h }, p2/Z, [x27, x17, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x27, x15, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x26, x12, LSL #1]\n"
"fmla z5.h, p3/M, z16.h, z24.h\n"
"fmla z29.h, p3/M, z16.h, z26.h\n"
- "ld1h { z16.h }, p3/Z, [x10, #-7, MUL VL]\n"
+ "ld1h { z16.h }, p3/Z, [x11, #-7, MUL VL]\n"
"fmla z30.h, p3/M, z21.h, z14.h\n"
+ "ld1h { z19.h }, p2/Z, [x26, x28, LSL #1]\n"
"fmla z31.h, p3/M, z21.h, z1.h\n"
- "ld1h { z17.h }, p2/Z, [x27, x24, LSL #1]\n"
"fmla z5.h, p3/M, z21.h, z22.h\n"
- "fmla z29.h, p3/M, z21.h, z19.h\n"
- "ld1h { z21.h }, p3/Z, [x10, #-6, MUL VL]\n"
+ "fmla z29.h, p3/M, z21.h, z18.h\n"
+ "ld1h { z21.h }, p3/Z, [x11, #-6, MUL VL]\n"
"fmla z30.h, p3/M, z25.h, z1.h\n"
+ "ld1h { z8.h }, p2/Z, [x26, x10, LSL #1]\n"
"fmla z31.h, p3/M, z25.h, z0.h\n"
- "ld1h { z7.h }, p2/Z, [x27, x28, LSL #1]\n"
- "fmla z5.h, p3/M, z25.h, z19.h\n"
- "fmla z29.h, p3/M, z25.h, z18.h\n"
- "ld1h { z10.h }, p3/Z, [x10, #-5, MUL VL]\n"
+ "fmla z5.h, p3/M, z25.h, z18.h\n"
+ "fmla z29.h, p3/M, z25.h, z17.h\n"
+ "ld1h { z9.h }, p3/Z, [x11, #-5, MUL VL]\n"
"fmla z30.h, p3/M, z23.h, z0.h\n"
+ "ld1h { z11.h }, p2/Z, [x26, x9, LSL #1]\n"
"fmla z31.h, p3/M, z23.h, z27.h\n"
- "ld1h { z11.h }, p2/Z, [x27, x26, LSL #1]\n"
- "fmla z5.h, p3/M, z23.h, z18.h\n"
- "fmla z29.h, p3/M, z23.h, z7.h\n"
- "ld1h { z6.h }, p3/Z, [x10, #-4, MUL VL]\n"
+ "fmla z5.h, p3/M, z23.h, z17.h\n"
+ "fmla z29.h, p3/M, z23.h, z8.h\n"
+ "ld1h { z6.h }, p3/Z, [x11, #-4, MUL VL]\n"
"fmla z30.h, p3/M, z20.h, z27.h\n"
- "fmla z31.h, p3/M, z20.h, z24.h\n"
"ld1h { z0.h }, p2/Z, [x25]\n"
- "fmla z5.h, p3/M, z20.h, z7.h\n"
+ "fmla z31.h, p3/M, z20.h, z24.h\n"
+ "fmla z5.h, p3/M, z20.h, z8.h\n"
"fmla z29.h, p3/M, z20.h, z11.h\n"
- "ld1h { z9.h }, p3/Z, [x10, #-3, MUL VL]\n"
+ "ld1h { z4.h }, p3/Z, [x11, #-3, MUL VL]\n"
"fmla z30.h, p3/M, z16.h, z24.h\n"
+ "ld1h { z2.h }, p2/Z, [x25, x8, LSL #1]\n"
"fmla z31.h, p3/M, z16.h, z26.h\n"
- "ld1h { z3.h }, p2/Z, [x25, x17, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x25, x26, LSL #1]\n"
+ "ld1h { z27.h }, p2/Z, [x25, x9, LSL #1]\n"
"fmla z5.h, p3/M, z16.h, z11.h\n"
- "fmla z29.h, p3/M, z16.h, z17.h\n"
- "ld1h { z16.h }, p3/Z, [x10, #-2, MUL VL]\n"
+ "fmla z29.h, p3/M, z16.h, z19.h\n"
+ "ld1h { z16.h }, p3/Z, [x11, #-2, MUL VL]\n"
"fmla z30.h, p3/M, z21.h, z22.h\n"
- "fmla z31.h, p3/M, z21.h, z19.h\n"
- "ld1h { z26.h }, p2/Z, [x25, x15, LSL #1]\n"
+ "ld1h { z26.h }, p2/Z, [x25, x12, LSL #1]\n"
+ "fmla z31.h, p3/M, z21.h, z18.h\n"
"fmla z5.h, p3/M, z21.h, z0.h\n"
- "fmla z29.h, p3/M, z21.h, z3.h\n"
- "ld1h { z25.h }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z30.h, p3/M, z10.h, z19.h\n"
- "fmla z31.h, p3/M, z10.h, z18.h\n"
- "ld1h { z24.h }, p2/Z, [x25, x28, LSL #1]\n"
- "fmla z5.h, p3/M, z10.h, z3.h\n"
- "fmla z29.h, p3/M, z10.h, z26.h\n"
- "ld1h { z23.h }, p3/Z, [x10]\n"
- "fmla z30.h, p3/M, z6.h, z18.h\n"
- "fmla z31.h, p3/M, z6.h, z7.h\n"
- "ld1h { z22.h }, p2/Z, [x25, x24, LSL #1]\n"
+ "fmla z29.h, p3/M, z21.h, z2.h\n"
+ "ld1h { z25.h }, p3/Z, [x11, #-1, MUL VL]\n"
+ "fmla z30.h, p3/M, z9.h, z18.h\n"
+ "ld1h { z24.h }, p2/Z, [x25, x10, LSL #1]\n"
+ "fmla z31.h, p3/M, z9.h, z17.h\n"
+ "fmla z5.h, p3/M, z9.h, z2.h\n"
+ "fmla z29.h, p3/M, z9.h, z26.h\n"
+ "ld1h { z23.h }, p3/Z, [x11]\n"
+ "fmla z30.h, p3/M, z6.h, z17.h\n"
+ "ld1h { z22.h }, p2/Z, [x25, x28, LSL #1]\n"
+ "fmla z31.h, p3/M, z6.h, z8.h\n"
"fmla z5.h, p3/M, z6.h, z26.h\n"
"fmla z29.h, p3/M, z6.h, z24.h\n"
- "ld1h { z21.h }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z30.h, p3/M, z9.h, z7.h\n"
- "fmla z31.h, p3/M, z9.h, z11.h\n"
- "ld1h { z18.h }, p2/Z, [x23]\n"
- "fmla z5.h, p3/M, z9.h, z24.h\n"
- "fmla z29.h, p3/M, z9.h, z27.h\n"
- "ld1h { z20.h }, p3/Z, [x10, #2, MUL VL]\n"
+ "ld1h { z21.h }, p3/Z, [x11, #1, MUL VL]\n"
+ "fmla z30.h, p3/M, z4.h, z8.h\n"
+ "ld1h { z18.h }, p2/Z, [x24]\n"
+ "fmla z31.h, p3/M, z4.h, z11.h\n"
+ "fmla z5.h, p3/M, z4.h, z24.h\n"
+ "fmla z29.h, p3/M, z4.h, z27.h\n"
+ "ld1h { z20.h }, p3/Z, [x11, #2, MUL VL]\n"
"fmla z30.h, p3/M, z16.h, z11.h\n"
- "fmla z31.h, p3/M, z16.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x17, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x24, x8, LSL #1]\n"
+ "fmla z31.h, p3/M, z16.h, z19.h\n"
"fmla z5.h, p3/M, z16.h, z27.h\n"
"fmla z29.h, p3/M, z16.h, z22.h\n"
- "ld1h { z19.h }, p3/Z, [x10, #3, MUL VL]\n"
+ "ld1h { z19.h }, p3/Z, [x11, #3, MUL VL]\n"
"fmla z30.h, p3/M, z25.h, z0.h\n"
- "fmla z31.h, p3/M, z25.h, z3.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x15, LSL #1]\n"
+ "ld1h { z16.h }, p2/Z, [x24, x12, LSL #1]\n"
+ "fmla z31.h, p3/M, z25.h, z2.h\n"
"fmla z5.h, p3/M, z25.h, z18.h\n"
+ "ld1h { z18.h }, p2/Z, [x24, x10, LSL #1]\n"
"fmla z29.h, p3/M, z25.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x23, x28, LSL #1]\n"
- "fmla z30.h, p3/M, z23.h, z3.h\n"
+ "fmla z30.h, p3/M, z23.h, z2.h\n"
"fmla z31.h, p3/M, z23.h, z26.h\n"
"fmla z5.h, p3/M, z23.h, z17.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, x9, LSL #1]\n"
"fmla z29.h, p3/M, z23.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, x26, LSL #1]\n"
"fmla z30.h, p3/M, z21.h, z26.h\n"
"fmla z31.h, p3/M, z21.h, z24.h\n"
"fmla z5.h, p3/M, z21.h, z16.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, x28, LSL #1]\n"
"fmla z29.h, p3/M, z21.h, z18.h\n"
- "ld1h { z16.h }, p2/Z, [x23, x24, LSL #1]\n"
"fmla z30.h, p3/M, z20.h, z24.h\n"
"fmla z31.h, p3/M, z20.h, z27.h\n"
"fmla z5.h, p3/M, z20.h, z18.h\n"
"fmla z29.h, p3/M, z20.h, z17.h\n"
"fmla z30.h, p3/M, z19.h, z27.h\n"
"fmla z31.h, p3/M, z19.h, z22.h\n"
- "fmax z30.h, p3/M, z30.h, z15.h\n"
- "fmax z31.h, p3/M, z31.h, z15.h\n"
"fmla z5.h, p3/M, z19.h, z17.h\n"
"fmla z29.h, p3/M, z19.h, z16.h\n"
+ "fmax z30.h, p3/M, z30.h, z15.h\n"
+ "fmax z31.h, p3/M, z31.h, z15.h\n"
"fmax z5.h, p3/M, z5.h, z15.h\n"
- "fmax z29.h, p3/M, z29.h, z15.h\n"
"fmin z30.h, p3/M, z30.h, z28.h\n"
"fmin z31.h, p3/M, z31.h, z28.h\n"
- "st1h { z30.h }, p0, [x13]\n"
+ "fmax z29.h, p3/M, z29.h, z15.h\n"
"fmin z5.h, p3/M, z5.h, z28.h\n"
+ "st1h { z30.h }, p0, [x13]\n"
"fmin z29.h, p3/M, z29.h, z28.h\n"
"st1h { z31.h }, p0, [x13, x16, LSL #1]\n"
- "st1h { z5.h }, p0, [x22]\n"
- "st1h { z29.h }, p0, [x22, x16, LSL #1]\n"
+ "st1h { z5.h }, p0, [x23]\n"
+ "st1h { z29.h }, p0, [x23, x16, LSL #1]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 1ec0cb2cbf..0c084f5c83 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -99,449 +99,449 @@ void sve_fp16_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x15, x14, [x20, #0x0]\n"
- "mov x13, #0x0\n"
- "ldp x12, x11, [x20, #0x10]\n"
+ "add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x16, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
"whilelt p3.h, XZR, %x[n_channels]\n"
- "ldp x21, x20, [x16, #0x0]\n"
- "cnth x10\n"
+ "cnth x14\n"
"ptrue p2.b\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_params]]\n"
- "ld1h { z5.h }, p3/Z, [x21, x13, LSL #1]\n"
- "cmp x10, %x[n_channels]\n"
- "ld1h { z6.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldp x27, x26, [x16, #0x10]\n"
- "sub x28, XZR, x10\n"
- "ldp x25, x24, [x16, #0x20]\n"
- "ldp x23, x22, [x16, #0x30]\n"
- "ldp x21, x20, [x16, #0x40]\n"
- "ld1rh { z15.h }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rh { z28.h }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1h { z29.h }, p2/Z, [x9]\n"
- "ld1h { z0.h }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1h { z1.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z2.h }, p2/Z, [x9, #3, MUL VL]\n"
- "ld1h { z3.h }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1h { z4.h }, p2/Z, [x9, #5, MUL VL]\n"
- "ld1h { z7.h }, p3/Z, [x27, x13, LSL #1]\n"
- "addvl x9, x9, #6\n"
- "ld1h { z8.h }, p3/Z, [x26, x13, LSL #1]\n"
- "ld1h { z9.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ld1h { z13.h }, p3/Z, [x24, x13, LSL #1]\n"
- "ld1h { z11.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ld1h { z12.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ld1h { z10.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ld1h { z14.h }, p3/Z, [x20, x13, LSL #1]\n"
+ "ldp x13, x12, [x20, #0x0]\n"
+ "ldp x11, x10, [x20, #0x10]\n"
+ "ldp x21, x20, [x17, #0x0]\n"
+ "ldp x27, x26, [x17, #0x10]\n"
+ "ldp x25, x24, [x17, #0x20]\n"
+ "ldp x23, x22, [x17, #0x30]\n"
+ "cmp x14, %x[n_channels]\n"
+ "sub x9, XZR, x14\n"
+ "ld1rh { z17.h }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rh { z30.h }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1h { z5.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ld1h { z6.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldp x21, x20, [x17, #0x40]\n"
+ "ld1h { z29.h }, p2/Z, [x15]\n"
+ "ld1h { z0.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "ld1h { z1.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x15, #3, MUL VL]\n"
+ "ld1h { z3.h }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1h { z4.h }, p2/Z, [x15, #5, MUL VL]\n"
+ "ld1h { z7.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "addvl x15, x15, #6\n"
+ "ld1h { z8.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "ld1h { z9.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ld1h { z13.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ld1h { z11.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ld1h { z12.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ld1h { z10.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ld1h { z14.h }, p3/Z, [x20, x16, LSL #1]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z30, z29\n fmla z30.h, p2/M, z0.h, z5.h\n"
- "movprfx z27, z29\n fmla z27.h, p2/M, z0.h, z6.h\n"
- "ldr x20, [x16, #0x50]\n"
- "ld1h { z5.h }, p3/Z, [x20, x13, LSL #1]\n"
- "movprfx z31, z29\n fmla z31.h, p2/M, z0.h, z7.h\n"
- "movprfx z26, z29\n fmla z26.h, p2/M, z0.h, z8.h\n"
- "ldr x20, [x16, #0x58]\n"
- "ldr x21, [x16, #0x60]\n"
- "fmla z30.h, p2/M, z1.h, z6.h\n"
- "fmla z27.h, p2/M, z1.h, z9.h\n"
- "ld1h { z22.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x20, [x16, #0x68]\n"
- "fmla z31.h, p2/M, z1.h, z8.h\n"
- "fmla z26.h, p2/M, z1.h, z13.h\n"
- "ld1h { z21.h }, p2/Z, [x9]\n"
- "ldr x23, [x16, #0x70]\n"
- "fmla z30.h, p2/M, z2.h, z9.h\n"
- "fmla z27.h, p2/M, z2.h, z11.h\n"
- "ld1h { z20.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ld1h { z18.h }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z31.h, p2/M, z2.h, z13.h\n"
- "fmla z26.h, p2/M, z2.h, z5.h\n"
- "ldr x22, [x16, #0x78]\n"
- "ld1h { z17.h }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z27.h, p2/M, z3.h, z12.h\n"
- "ld1h { z11.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x21, [x16, #0x80]\n"
- "fmla z31.h, p2/M, z3.h, z5.h\n"
- "fmla z26.h, p2/M, z3.h, z22.h\n"
- "ld1h { z16.h }, p2/Z, [x9, #3, MUL VL]\n"
- "ldr x20, [x16, #0x88]\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z27.h, p2/M, z4.h, z20.h\n"
- "ld1h { z0.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ld1h { z29.h }, p3/Z, [x22, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z4.h, z22.h\n"
- "fmla z26.h, p2/M, z4.h, z10.h\n"
- "ld1h { z19.h }, p2/Z, [x9, #4, MUL VL]\n"
- "ldr x23, [x16, #0x90]\n"
- "fmla z30.h, p2/M, z21.h, z7.h\n"
- "fmla z27.h, p2/M, z21.h, z8.h\n"
- "ldr x26, [x16, #0x98]\n"
- "ldr x22, [x16, #0xa0]\n"
- "fmla z31.h, p2/M, z21.h, z14.h\n"
- "fmla z26.h, p2/M, z21.h, z11.h\n"
- "ld1h { z25.h }, p2/Z, [x9, #5, MUL VL]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z30.h, p2/M, z18.h, z8.h\n"
- "fmla z27.h, p2/M, z18.h, z13.h\n"
- "ld1h { z24.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x24, [x16, #0xb0]\n"
- "fmla z31.h, p2/M, z18.h, z11.h\n"
- "fmla z26.h, p2/M, z18.h, z0.h\n"
- "ld1h { z18.h }, p2/Z, [x9, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z30.h, p2/M, z17.h, z13.h\n"
- "fmla z27.h, p2/M, z17.h, z5.h\n"
- "ld1h { z3.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ldr x21, [x16, #0xc0]\n"
- "fmla z31.h, p2/M, z17.h, z0.h\n"
- "fmla z26.h, p2/M, z17.h, z29.h\n"
- "ld1h { z17.h }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "fmla z30.h, p2/M, z16.h, z5.h\n"
- "fmla z27.h, p2/M, z16.h, z22.h\n"
- "ld1h { z6.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ldr x27, [x16, #0xc8]\n"
- "fmla z31.h, p2/M, z16.h, z29.h\n"
- "fmla z26.h, p2/M, z16.h, z3.h\n"
- "ld1h { z16.h }, p2/Z, [x9, #-8, MUL VL]\n"
- "ldr x23, [x16, #0xd0]\n"
- "fmla z30.h, p2/M, z19.h, z22.h\n"
- "fmla z27.h, p2/M, z19.h, z10.h\n"
- "ld1h { z23.h }, p3/Z, [x26, x13, LSL #1]\n"
- "ld1h { z22.h }, p3/Z, [x22, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z19.h, z3.h\n"
- "fmla z26.h, p2/M, z19.h, z24.h\n"
- "ld1h { z21.h }, p2/Z, [x9, #-7, MUL VL]\n"
- "ldr x22, [x16, #0xd8]\n"
- "fmla z30.h, p2/M, z25.h, z14.h\n"
- "fmla z27.h, p2/M, z25.h, z11.h\n"
- "ld1h { z1.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x20, [x16, #0xe0]\n"
- "fmla z31.h, p2/M, z25.h, z6.h\n"
- "fmla z26.h, p2/M, z25.h, z23.h\n"
- "ld1h { z20.h }, p2/Z, [x9, #-6, MUL VL]\n"
- "ldr x26, [x16, #0xf8]\n"
- "fmla z30.h, p2/M, z18.h, z11.h\n"
- "fmla z27.h, p2/M, z18.h, z0.h\n"
- "ld1h { z7.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z31.h, p2/M, z18.h, z23.h\n"
- "fmla z26.h, p2/M, z18.h, z22.h\n"
- "ld1h { z18.h }, p2/Z, [x9, #-5, MUL VL]\n"
- "whilelt p1.h, x10, %x[n_channels]\n"
- "fmla z30.h, p2/M, z17.h, z0.h\n"
- "fmla z27.h, p2/M, z17.h, z29.h\n"
- "ld1h { z19.h }, p3/Z, [x24, x13, LSL #1]\n"
- "ldr x24, [x16, #0xf0]\n"
- "fmla z31.h, p2/M, z17.h, z22.h\n"
- "fmla z26.h, p2/M, z17.h, z7.h\n"
- "ld1h { z17.h }, p2/Z, [x9, #-4, MUL VL]\n"
- "inch x28\n"
- "fmla z30.h, p2/M, z16.h, z29.h\n"
- "fmla z27.h, p2/M, z16.h, z3.h\n"
- "ld1h { z0.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ldr x21, [x16, #0x100]\n"
- "fmla z31.h, p2/M, z16.h, z7.h\n"
- "fmla z26.h, p2/M, z16.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x9, #-3, MUL VL]\n"
+ "movprfx z15, z29\n fmla z15.h, p2/M, z0.h, z5.h\n"
+ "movprfx z28, z29\n fmla z28.h, p2/M, z0.h, z6.h\n"
+ "ldr x21, [x17, #0x50]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "movprfx z27, z29\n fmla z27.h, p2/M, z0.h, z7.h\n"
+ "movprfx z31, z29\n fmla z31.h, p2/M, z0.h, z8.h\n"
+ "ldr x22, [x17, #0x60]\n"
+ "ldr x25, [x17, #0x68]\n"
+ "ld1h { z19.h }, p2/Z, [x15]\n"
+ "ldr x24, [x17, #0x70]\n"
+ "whilelt p1.h, x14, %x[n_channels]\n"
+ "inch x9\n"
+ "ld1h { z25.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x78]\n"
"mov p0.b, p3.b\n"
- "fmla z30.h, p2/M, z21.h, z3.h\n"
- "fmla z27.h, p2/M, z21.h, z24.h\n"
- "ld1h { z11.h }, p3/Z, [x27, x13, LSL #1]\n"
- "ld1h { z13.h }, p3/Z, [x20, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z21.h, z19.h\n"
- "fmla z26.h, p2/M, z21.h, z1.h\n"
- "ld1h { z10.h }, p2/Z, [x9, #-2, MUL VL]\n"
- "ldr x20, [x16, #0x108]\n"
- "fmla z30.h, p2/M, z20.h, z6.h\n"
- "fmla z27.h, p2/M, z20.h, z23.h\n"
- "ld1h { z25.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ldr x23, [x16, #0x110]\n"
- "fmla z31.h, p2/M, z20.h, z0.h\n"
- "fmla z26.h, p2/M, z20.h, z11.h\n"
- "ld1h { z8.h }, p2/Z, [x9, #-1, MUL VL]\n"
- "ld1h { z29.h }, p2/Z, [x9, #4, MUL VL]\n"
- "fmla z30.h, p2/M, z18.h, z23.h\n"
- "fmla z27.h, p2/M, z18.h, z22.h\n"
- "ld1h { z24.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ldr x22, [x16, #0x118]\n"
- "fmla z31.h, p2/M, z18.h, z11.h\n"
- "fmla z26.h, p2/M, z18.h, z25.h\n"
- "ld1h { z23.h }, p2/Z, [x9]\n"
- "fmla z30.h, p2/M, z17.h, z22.h\n"
- "fmla z27.h, p2/M, z17.h, z7.h\n"
- "ld1h { z22.h }, p3/Z, [x25, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z17.h, z25.h\n"
- "fmla z26.h, p2/M, z17.h, z24.h\n"
- "ld1h { z21.h }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z30.h, p2/M, z16.h, z7.h\n"
- "fmla z27.h, p2/M, z16.h, z19.h\n"
- "ld1h { z18.h }, p3/Z, [x24, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z16.h, z24.h\n"
- "fmla z26.h, p2/M, z16.h, z13.h\n"
- "ld1h { z20.h }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.h, p2/M, z10.h, z19.h\n"
- "fmla z27.h, p2/M, z10.h, z1.h\n"
- "ld1h { z17.h }, p3/Z, [x26, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z10.h, z13.h\n"
- "fmla z26.h, p2/M, z10.h, z22.h\n"
- "ld1h { z19.h }, p2/Z, [x9, #3, MUL VL]\n"
- "fmla z30.h, p2/M, z8.h, z0.h\n"
- "fmla z27.h, p2/M, z8.h, z11.h\n"
- "ld1h { z16.h }, p3/Z, [x21, x13, LSL #1]\n"
- "fmla z31.h, p2/M, z8.h, z18.h\n"
- "fmla z26.h, p2/M, z8.h, z17.h\n"
- "ld1h { z18.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldp x21, x20, [x16, #0x0]\n"
- "fmla z30.h, p2/M, z23.h, z11.h\n"
- "fmla z27.h, p2/M, z23.h, z25.h\n"
- "ld1h { z0.h }, p2/Z, [x9, #5, MUL VL]\n"
- "fmla z31.h, p2/M, z23.h, z17.h\n"
- "fmla z26.h, p2/M, z23.h, z16.h\n"
- "ld1h { z17.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ld1h { z1.h }, p2/Z, [x9, #6, MUL VL]\n"
- "fmla z30.h, p2/M, z21.h, z25.h\n"
- "fmla z27.h, p2/M, z21.h, z24.h\n"
- "ld1h { z5.h }, p1/Z, [x21, x10, LSL #1]\n"
- "fmla z31.h, p2/M, z21.h, z16.h\n"
- "fmla z26.h, p2/M, z21.h, z18.h\n"
- "ld1h { z16.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ldp x27, x26, [x16, #0x10]\n"
- "fmla z30.h, p2/M, z20.h, z24.h\n"
+ "fmla z15.h, p2/M, z1.h, z6.h\n"
+ "fmla z28.h, p2/M, z1.h, z9.h\n"
+ "ld1h { z23.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x27, [x17, #0x80]\n"
+ "fmla z27.h, p2/M, z1.h, z8.h\n"
+ "fmla z31.h, p2/M, z1.h, z13.h\n"
+ "ld1h { z22.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "ldr x20, [x17, #0x88]\n"
+ "ldr x23, [x17, #0x90]\n"
+ "ldr x26, [x17, #0x98]\n"
+ "fmla z15.h, p2/M, z2.h, z9.h\n"
+ "ld1h { z18.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x22, [x17, #0xa0]\n"
+ "fmla z28.h, p2/M, z2.h, z11.h\n"
+ "fmla z27.h, p2/M, z2.h, z13.h\n"
+ "fmla z31.h, p2/M, z2.h, z25.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z15.h, p2/M, z3.h, z11.h\n"
+ "ld1h { z2.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xa8]\n"
+ "fmla z28.h, p2/M, z3.h, z12.h\n"
+ "fmla z27.h, p2/M, z3.h, z25.h\n"
+ "fmla z31.h, p2/M, z3.h, z23.h\n"
+ "ld1h { z21.h }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z15.h, p2/M, z4.h, z12.h\n"
+ "ld1h { z1.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xb0]\n"
+ "fmla z28.h, p2/M, z4.h, z18.h\n"
+ "ld1h { z0.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0xb8]\n"
+ "fmla z27.h, p2/M, z4.h, z23.h\n"
+ "fmla z31.h, p2/M, z4.h, z10.h\n"
+ "ld1h { z3.h }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z15.h, p2/M, z19.h, z7.h\n"
+ "fmla z28.h, p2/M, z19.h, z8.h\n"
+ "fmla z27.h, p2/M, z19.h, z14.h\n"
+ "fmla z31.h, p2/M, z19.h, z2.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z15.h, p2/M, z22.h, z8.h\n"
+ "ld1h { z26.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x28, [x17, #0xc8]\n"
+ "fmla z28.h, p2/M, z22.h, z13.h\n"
+ "fmla z27.h, p2/M, z22.h, z2.h\n"
+ "fmla z31.h, p2/M, z22.h, z1.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z15.h, p2/M, z16.h, z13.h\n"
+ "ld1h { z9.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x20, [x17, #0xc0]\n"
+ "fmla z28.h, p2/M, z16.h, z25.h\n"
+ "fmla z27.h, p2/M, z16.h, z1.h\n"
+ "fmla z31.h, p2/M, z16.h, z0.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z15.h, p2/M, z21.h, z25.h\n"
+ "ld1h { z25.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0xd0]\n"
+ "fmla z28.h, p2/M, z21.h, z23.h\n"
+ "ld1h { z29.h }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z27.h, p2/M, z21.h, z0.h\n"
+ "fmla z31.h, p2/M, z21.h, z9.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z15.h, p2/M, z3.h, z23.h\n"
+ "ld1h { z24.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "ldr x27, [x17, #0xd8]\n"
+ "fmla z28.h, p2/M, z3.h, z10.h\n"
+ "ld1h { z23.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x22, [x17, #0xe0]\n"
+ "fmla z27.h, p2/M, z3.h, z9.h\n"
+ "fmla z31.h, p2/M, z3.h, z26.h\n"
+ "ld1h { z22.h }, p2/Z, [x15, #-7, MUL VL]\n"
+ "fmla z15.h, p2/M, z20.h, z14.h\n"
+ "ld1h { z6.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x26, [x17, #0xf8]\n"
+ "fmla z28.h, p2/M, z20.h, z2.h\n"
+ "fmla z27.h, p2/M, z20.h, z25.h\n"
+ "fmla z31.h, p2/M, z20.h, z24.h\n"
+ "ld1h { z10.h }, p2/Z, [x15, #-6, MUL VL]\n"
+ "fmla z15.h, p2/M, z19.h, z2.h\n"
+ "ld1h { z21.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xe8]\n"
+ "fmla z28.h, p2/M, z19.h, z1.h\n"
+ "fmla z27.h, p2/M, z19.h, z24.h\n"
+ "fmla z31.h, p2/M, z19.h, z23.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #-5, MUL VL]\n"
+ "fmla z15.h, p2/M, z18.h, z1.h\n"
+ "ld1h { z19.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xf0]\n"
+ "fmla z28.h, p2/M, z18.h, z0.h\n"
+ "fmla z27.h, p2/M, z18.h, z23.h\n"
+ "fmla z31.h, p2/M, z18.h, z21.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, #-4, MUL VL]\n"
+ "fmla z15.h, p2/M, z16.h, z0.h\n"
+ "ld1h { z0.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x100]\n"
+ "fmla z28.h, p2/M, z16.h, z9.h\n"
+ "fmla z27.h, p2/M, z16.h, z21.h\n"
+ "fmla z31.h, p2/M, z16.h, z19.h\n"
+ "ld1h { z16.h }, p2/Z, [x15, #-3, MUL VL]\n"
+ "fmla z15.h, p2/M, z22.h, z9.h\n"
+ "ld1h { z12.h }, p3/Z, [x28, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x108]\n"
+ "fmla z28.h, p2/M, z22.h, z26.h\n"
+ "ld1h { z4.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "fmla z27.h, p2/M, z22.h, z19.h\n"
+ "fmla z31.h, p2/M, z22.h, z6.h\n"
+ "ld1h { z14.h }, p2/Z, [x15, #-2, MUL VL]\n"
+ "fmla z15.h, p2/M, z10.h, z25.h\n"
+ "ld1h { z26.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x110]\n"
+ "fmla z28.h, p2/M, z10.h, z24.h\n"
+ "fmla z27.h, p2/M, z10.h, z0.h\n"
+ "fmla z31.h, p2/M, z10.h, z12.h\n"
+ "ld1h { z10.h }, p2/Z, [x15, #-1, MUL VL]\n"
+ "fmla z15.h, p2/M, z20.h, z24.h\n"
+ "ld1h { z25.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x22, [x17, #0x118]\n"
+ "fmla z28.h, p2/M, z20.h, z23.h\n"
+ "fmla z27.h, p2/M, z20.h, z12.h\n"
+ "fmla z31.h, p2/M, z20.h, z26.h\n"
+ "ld1h { z24.h }, p2/Z, [x15]\n"
+ "fmla z15.h, p2/M, z18.h, z23.h\n"
+ "ld1h { z23.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z18.h, z21.h\n"
+ "fmla z27.h, p2/M, z18.h, z26.h\n"
+ "fmla z31.h, p2/M, z18.h, z25.h\n"
+ "ld1h { z22.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "fmla z15.h, p2/M, z16.h, z21.h\n"
+ "ld1h { z21.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z16.h, z19.h\n"
+ "fmla z27.h, p2/M, z16.h, z25.h\n"
+ "fmla z31.h, p2/M, z16.h, z4.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z15.h, p2/M, z14.h, z19.h\n"
+ "ld1h { z19.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z14.h, z6.h\n"
+ "fmla z27.h, p2/M, z14.h, z4.h\n"
+ "fmla z31.h, p2/M, z14.h, z23.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z15.h, p2/M, z10.h, z0.h\n"
+ "ld1h { z16.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "fmla z28.h, p2/M, z10.h, z12.h\n"
+ "fmla z27.h, p2/M, z10.h, z21.h\n"
+ "ld1h { z13.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldp x21, x20, [x17, #0x0]\n"
+ "fmla z31.h, p2/M, z10.h, z19.h\n"
+ "ld1h { z0.h }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z15.h, p2/M, z24.h, z12.h\n"
+ "fmla z28.h, p2/M, z24.h, z26.h\n"
+ "fmla z27.h, p2/M, z24.h, z19.h\n"
+ "ld1h { z12.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "fmla z31.h, p2/M, z24.h, z16.h\n"
+ "ld1h { z1.h }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z15.h, p2/M, z22.h, z26.h\n"
+ "ld1h { z5.h }, p1/Z, [x21, x14, LSL #1]\n"
+ "fmla z28.h, p2/M, z22.h, z25.h\n"
+ "fmla z27.h, p2/M, z22.h, z16.h\n"
+ "ld1h { z16.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldp x27, x26, [x17, #0x10]\n"
+ "ldp x25, x24, [x17, #0x20]\n"
+ "ldp x23, x22, [x17, #0x30]\n"
+ "inch x16\n"
+ "fmla z31.h, p2/M, z22.h, z13.h\n"
+ "ld1h { z2.h }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z15.h, p2/M, z20.h, z25.h\n"
+ "ld1h { z6.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "ldp x21, x20, [x17, #0x40]\n"
+ "ld1h { z7.h }, p1/Z, [x27, x14, LSL #1]\n"
+ "fmla z28.h, p2/M, z20.h, z4.h\n"
"fmla z27.h, p2/M, z20.h, z13.h\n"
- "ld1h { z6.h }, p1/Z, [x20, x10, LSL #1]\n"
- "ldp x25, x24, [x16, #0x20]\n"
- "fmla z31.h, p2/M, z20.h, z18.h\n"
- "fmla z26.h, p2/M, z20.h, z17.h\n"
- "ldp x23, x22, [x16, #0x30]\n"
- "ldp x21, x20, [x16, #0x40]\n"
- "fmla z30.h, p2/M, z19.h, z13.h\n"
- "fmla z27.h, p2/M, z19.h, z22.h\n"
- "inch x13\n"
- "ld1h { z7.h }, p1/Z, [x27, x10, LSL #1]\n"
- "fmla z31.h, p2/M, z19.h, z17.h\n"
- "fmla z26.h, p2/M, z19.h, z16.h\n"
- "ld1h { z8.h }, p1/Z, [x26, x10, LSL #1]\n"
- "ld1h { z9.h }, p1/Z, [x25, x10, LSL #1]\n"
- "ld1h { z13.h }, p1/Z, [x24, x10, LSL #1]\n"
- "ld1h { z11.h }, p1/Z, [x23, x10, LSL #1]\n"
- "fmax z30.h, p2/M, z30.h, z15.h\n"
- "fmax z27.h, p2/M, z27.h, z15.h\n"
- "ld1h { z12.h }, p1/Z, [x22, x10, LSL #1]\n"
- "ld1h { z10.h }, p1/Z, [x21, x10, LSL #1]\n"
- "fmax z31.h, p2/M, z31.h, z15.h\n"
- "fmax z26.h, p2/M, z26.h, z15.h\n"
- "ld1h { z14.h }, p1/Z, [x20, x10, LSL #1]\n"
- "inch x10\n"
- "ld1h { z2.h }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "whilelt p3.h, x13, %x[n_channels]\n"
- "cmp x10, %x[n_channels]\n"
- "ld1h { z3.h }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1h { z4.h }, p2/Z, [x9, #-7, MUL VL]\n"
- "fmin z30.h, p2/M, z30.h, z28.h\n"
- "fmin z27.h, p2/M, z27.h, z28.h\n"
- "st1h { z30.h }, p0, [x15, x28, LSL #1]\n"
- "fmin z31.h, p2/M, z31.h, z28.h\n"
- "fmin z26.h, p2/M, z26.h, z28.h\n"
- "st1h { z27.h }, p0, [x14, x28, LSL #1]\n"
- "st1h { z31.h }, p0, [x12, x28, LSL #1]\n"
- "addvl x9, x9, #-6\n"
- "st1h { z26.h }, p0, [x11, x28, LSL #1]\n"
+ "ld1h { z13.h }, p1/Z, [x24, x14, LSL #1]\n"
+ "ld1h { z11.h }, p1/Z, [x23, x14, LSL #1]\n"
+ "whilelt p3.h, x16, %x[n_channels]\n"
+ "fmla z31.h, p2/M, z20.h, z12.h\n"
+ "ld1h { z3.h }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z15.h, p2/M, z18.h, z4.h\n"
+ "ld1h { z8.h }, p1/Z, [x26, x14, LSL #1]\n"
+ "ld1h { z14.h }, p1/Z, [x20, x14, LSL #1]\n"
+ "fmla z28.h, p2/M, z18.h, z23.h\n"
+ "ld1h { z10.h }, p1/Z, [x21, x14, LSL #1]\n"
+ "fmla z27.h, p2/M, z18.h, z12.h\n"
+ "ld1h { z12.h }, p1/Z, [x22, x14, LSL #1]\n"
+ "fmla z31.h, p2/M, z18.h, z16.h\n"
+ "ld1h { z9.h }, p1/Z, [x25, x14, LSL #1]\n"
+ "inch x14\n"
+ "ld1h { z4.h }, p2/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmax z15.h, p2/M, z15.h, z17.h\n"
+ "fmax z28.h, p2/M, z28.h, z17.h\n"
+ "fmax z27.h, p2/M, z27.h, z17.h\n"
+ "cmp x14, %x[n_channels]\n"
+ "fmax z31.h, p2/M, z31.h, z17.h\n"
+ "fmin z15.h, p2/M, z15.h, z30.h\n"
+ "fmin z28.h, p2/M, z28.h, z30.h\n"
+ "fmin z27.h, p2/M, z27.h, z30.h\n"
+ "fmin z31.h, p2/M, z31.h, z30.h\n"
+ "st1h { z15.h }, p0, [x13, x9, LSL #1]\n"
+ "st1h { z28.h }, p0, [x12, x9, LSL #1]\n"
+ "st1h { z27.h }, p0, [x11, x9, LSL #1]\n"
+ "st1h { z31.h }, p0, [x10, x9, LSL #1]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z30, z29\n fmla z30.h, p2/M, z0.h, z5.h\n"
- "movprfx z31, z29\n fmla z31.h, p2/M, z0.h, z6.h\n"
- "ldr x20, [x16, #0x50]\n"
- "ld1h { z22.h }, p3/Z, [x20, x13, LSL #1]\n"
- "movprfx z5, z29\n fmla z5.h, p2/M, z0.h, z7.h\n"
- "fmla z29.h, p2/M, z0.h, z8.h\n"
- "ldr x20, [x16, #0x58]\n"
- "ldr x21, [x16, #0x60]\n"
- "fmla z30.h, p2/M, z1.h, z6.h\n"
- "fmla z31.h, p2/M, z1.h, z9.h\n"
- "ld1h { z6.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x20, [x16, #0x68]\n"
- "fmla z5.h, p2/M, z1.h, z8.h\n"
- "fmla z29.h, p2/M, z1.h, z13.h\n"
- "ld1h { z20.h }, p2/Z, [x9]\n"
- "ldr x23, [x16, #0x70]\n"
- "fmla z30.h, p2/M, z2.h, z9.h\n"
- "fmla z31.h, p2/M, z2.h, z11.h\n"
- "ld1h { z16.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ld1h { z19.h }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z5.h, p2/M, z2.h, z13.h\n"
- "fmla z29.h, p2/M, z2.h, z22.h\n"
- "ldr x21, [x16, #0x78]\n"
- "ld1h { z18.h }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.h, p2/M, z3.h, z11.h\n"
- "fmla z31.h, p2/M, z3.h, z12.h\n"
- "ld1h { z1.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x22, [x16, #0x80]\n"
- "fmla z5.h, p2/M, z3.h, z22.h\n"
- "fmla z29.h, p2/M, z3.h, z6.h\n"
- "ld1h { z17.h }, p2/Z, [x9, #3, MUL VL]\n"
- "ldr x20, [x16, #0x88]\n"
- "fmla z30.h, p2/M, z4.h, z12.h\n"
- "fmla z31.h, p2/M, z4.h, z16.h\n"
- "ld1h { z0.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ld1h { z27.h }, p3/Z, [x21, x13, LSL #1]\n"
- "fmla z5.h, p2/M, z4.h, z6.h\n"
- "fmla z29.h, p2/M, z4.h, z10.h\n"
- "ld1h { z16.h }, p2/Z, [x9, #4, MUL VL]\n"
- "ldr x21, [x16, #0x90]\n"
- "fmla z30.h, p2/M, z20.h, z7.h\n"
- "fmla z31.h, p2/M, z20.h, z8.h\n"
- "ldr x27, [x16, #0x98]\n"
- "ldr x26, [x16, #0xa0]\n"
- "fmla z5.h, p2/M, z20.h, z14.h\n"
- "fmla z29.h, p2/M, z20.h, z1.h\n"
- "ld1h { z21.h }, p2/Z, [x9, #5, MUL VL]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z30.h, p2/M, z19.h, z8.h\n"
- "fmla z31.h, p2/M, z19.h, z13.h\n"
- "ld1h { z26.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x24, [x16, #0xb0]\n"
- "fmla z5.h, p2/M, z19.h, z1.h\n"
- "fmla z29.h, p2/M, z19.h, z0.h\n"
- "ld1h { z25.h }, p2/Z, [x9, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z30.h, p2/M, z18.h, z13.h\n"
- "fmla z31.h, p2/M, z18.h, z22.h\n"
- "ld1h { z24.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ldr x23, [x16, #0xc0]\n"
- "fmla z5.h, p2/M, z18.h, z0.h\n"
- "fmla z29.h, p2/M, z18.h, z27.h\n"
- "ld1h { z23.h }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "fmla z30.h, p2/M, z17.h, z22.h\n"
- "fmla z31.h, p2/M, z17.h, z6.h\n"
- "ld1h { z22.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ldr x22, [x16, #0xc8]\n"
- "fmla z5.h, p2/M, z17.h, z27.h\n"
- "fmla z29.h, p2/M, z17.h, z24.h\n"
- "ld1h { z20.h }, p2/Z, [x9, #-8, MUL VL]\n"
- "ldr x21, [x16, #0xd0]\n"
- "fmla z30.h, p2/M, z16.h, z6.h\n"
- "fmla z31.h, p2/M, z16.h, z10.h\n"
- "ld1h { z19.h }, p3/Z, [x27, x13, LSL #1]\n"
- "ld1h { z18.h }, p3/Z, [x26, x13, LSL #1]\n"
- "fmla z5.h, p2/M, z16.h, z24.h\n"
- "fmla z29.h, p2/M, z16.h, z26.h\n"
- "ld1h { z16.h }, p2/Z, [x9, #-7, MUL VL]\n"
- "ldr x27, [x16, #0xd8]\n"
- "fmla z30.h, p2/M, z21.h, z14.h\n"
+ "movprfx z16, z29\n fmla z16.h, p2/M, z0.h, z5.h\n"
+ "movprfx z15, z29\n fmla z15.h, p2/M, z0.h, z6.h\n"
+ "ldr x22, [x17, #0x50]\n"
+ "ldr x21, [x17, #0x58]\n"
+ "movprfx z31, z29\n fmla z31.h, p2/M, z0.h, z7.h\n"
+ "movprfx z5, z29\n fmla z5.h, p2/M, z0.h, z8.h\n"
+ "ldr x20, [x17, #0x60]\n"
+ "ldr x25, [x17, #0x68]\n"
+ "ld1h { z25.h }, p2/Z, [x15]\n"
+ "ldr x24, [x17, #0x70]\n"
+ "inch x9\n"
+ "mov p0.b, p3.b\n"
+ "ld1h { z24.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x78]\n"
+ "fmla z16.h, p2/M, z1.h, z6.h\n"
+ "fmla z15.h, p2/M, z1.h, z9.h\n"
+ "ld1h { z23.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x27, [x17, #0x80]\n"
+ "fmla z31.h, p2/M, z1.h, z8.h\n"
+ "fmla z5.h, p2/M, z1.h, z13.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "ldr x22, [x17, #0x88]\n"
+ "ldr x21, [x17, #0x90]\n"
+ "ldr x26, [x17, #0x98]\n"
+ "fmla z16.h, p2/M, z2.h, z9.h\n"
+ "fmla z15.h, p2/M, z2.h, z11.h\n"
+ "ld1h { z18.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x20, [x17, #0xa0]\n"
+ "fmla z31.h, p2/M, z2.h, z13.h\n"
+ "fmla z5.h, p2/M, z2.h, z24.h\n"
+ "ld1h { z22.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z16.h, p2/M, z3.h, z11.h\n"
+ "ld1h { z1.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xa8]\n"
+ "fmla z15.h, p2/M, z3.h, z12.h\n"
+ "fmla z31.h, p2/M, z3.h, z24.h\n"
+ "fmla z5.h, p2/M, z3.h, z23.h\n"
+ "ld1h { z21.h }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z16.h, p2/M, z4.h, z12.h\n"
+ "ld1h { z0.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xb0]\n"
+ "fmla z15.h, p2/M, z4.h, z18.h\n"
+ "ld1h { z29.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x23, [x17, #0xb8]\n"
+ "fmla z31.h, p2/M, z4.h, z23.h\n"
+ "fmla z5.h, p2/M, z4.h, z10.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z16.h, p2/M, z25.h, z7.h\n"
+ "fmla z15.h, p2/M, z25.h, z8.h\n"
+ "fmla z31.h, p2/M, z25.h, z14.h\n"
+ "fmla z5.h, p2/M, z25.h, z1.h\n"
+ "ld1h { z18.h }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z16.h, p2/M, z20.h, z8.h\n"
+ "ld1h { z28.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x28, [x17, #0xc8]\n"
+ "fmla z15.h, p2/M, z20.h, z13.h\n"
+ "fmla z31.h, p2/M, z20.h, z1.h\n"
+ "fmla z5.h, p2/M, z20.h, z0.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z16.h, p2/M, z22.h, z13.h\n"
+ "ld1h { z27.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x22, [x17, #0xc0]\n"
+ "fmla z15.h, p2/M, z22.h, z24.h\n"
+ "fmla z31.h, p2/M, z22.h, z0.h\n"
+ "fmla z5.h, p2/M, z22.h, z29.h\n"
+ "ld1h { z26.h }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z16.h, p2/M, z21.h, z24.h\n"
+ "ld1h { z25.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0xd0]\n"
+ "fmla z15.h, p2/M, z21.h, z23.h\n"
+ "fmla z31.h, p2/M, z21.h, z29.h\n"
+ "fmla z5.h, p2/M, z21.h, z27.h\n"
+ "ld1h { z24.h }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z16.h, p2/M, z19.h, z23.h\n"
+ "ld1h { z23.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "ldr x27, [x17, #0xd8]\n"
+ "fmla z15.h, p2/M, z19.h, z10.h\n"
+ "ld1h { z22.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "ldr x20, [x17, #0xe0]\n"
+ "fmla z31.h, p2/M, z19.h, z27.h\n"
+ "fmla z5.h, p2/M, z19.h, z28.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #-7, MUL VL]\n"
+ "fmla z16.h, p2/M, z18.h, z14.h\n"
+ "ld1h { z2.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "ldr x26, [x17, #0xf8]\n"
+ "fmla z15.h, p2/M, z18.h, z1.h\n"
+ "fmla z31.h, p2/M, z18.h, z25.h\n"
+ "fmla z5.h, p2/M, z18.h, z23.h\n"
+ "ld1h { z21.h }, p2/Z, [x15, #-6, MUL VL]\n"
+ "fmla z16.h, p2/M, z20.h, z1.h\n"
+ "ld1h { z18.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "ldr x25, [x17, #0xe8]\n"
+ "fmla z15.h, p2/M, z20.h, z0.h\n"
+ "fmla z31.h, p2/M, z20.h, z23.h\n"
+ "fmla z5.h, p2/M, z20.h, z22.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #-5, MUL VL]\n"
+ "fmla z16.h, p2/M, z26.h, z0.h\n"
+ "ld1h { z9.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "ldr x24, [x17, #0xf0]\n"
+ "fmla z15.h, p2/M, z26.h, z29.h\n"
+ "fmla z31.h, p2/M, z26.h, z22.h\n"
+ "fmla z5.h, p2/M, z26.h, z18.h\n"
+ "ld1h { z4.h }, p2/Z, [x15, #-4, MUL VL]\n"
+ "fmla z16.h, p2/M, z24.h, z29.h\n"
+ "ld1h { z1.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "ldr x23, [x17, #0x100]\n"
+ "fmla z15.h, p2/M, z24.h, z27.h\n"
+ "fmla z31.h, p2/M, z24.h, z18.h\n"
+ "fmla z5.h, p2/M, z24.h, z9.h\n"
+ "ld1h { z3.h }, p2/Z, [x15, #-3, MUL VL]\n"
+ "fmla z16.h, p2/M, z19.h, z27.h\n"
+ "ld1h { z0.h }, p3/Z, [x28, x16, LSL #1]\n"
+ "ldr x22, [x17, #0x108]\n"
+ "fmla z15.h, p2/M, z19.h, z28.h\n"
+ "ld1h { z29.h }, p3/Z, [x20, x16, LSL #1]\n"
+ "fmla z31.h, p2/M, z19.h, z9.h\n"
+ "fmla z5.h, p2/M, z19.h, z2.h\n"
+ "ld1h { z19.h }, p2/Z, [x15, #-2, MUL VL]\n"
+ "fmla z16.h, p2/M, z21.h, z25.h\n"
+ "ld1h { z28.h }, p3/Z, [x21, x16, LSL #1]\n"
+ "ldr x21, [x17, #0x110]\n"
+ "fmla z15.h, p2/M, z21.h, z23.h\n"
"fmla z31.h, p2/M, z21.h, z1.h\n"
- "ld1h { z17.h }, p3/Z, [x20, x13, LSL #1]\n"
- "ldr x20, [x16, #0xe0]\n"
- "fmla z5.h, p2/M, z21.h, z22.h\n"
- "fmla z29.h, p2/M, z21.h, z19.h\n"
- "ld1h { z21.h }, p2/Z, [x9, #-6, MUL VL]\n"
- "ldr x26, [x16, #0xf8]\n"
- "fmla z30.h, p2/M, z25.h, z1.h\n"
- "fmla z31.h, p2/M, z25.h, z0.h\n"
- "ld1h { z9.h }, p3/Z, [x25, x13, LSL #1]\n"
- "ldr x25, [x16, #0xe8]\n"
+ "fmla z5.h, p2/M, z21.h, z0.h\n"
+ "ld1h { z27.h }, p2/Z, [x15, #-1, MUL VL]\n"
+ "fmla z16.h, p2/M, z20.h, z23.h\n"
+ "ld1h { z26.h }, p3/Z, [x27, x16, LSL #1]\n"
+ "ldr x20, [x17, #0x118]\n"
+ "fmla z15.h, p2/M, z20.h, z22.h\n"
+ "fmla z31.h, p2/M, z20.h, z0.h\n"
+ "fmla z5.h, p2/M, z20.h, z28.h\n"
+ "ld1h { z25.h }, p2/Z, [x15]\n"
+ "fmla z16.h, p2/M, z4.h, z22.h\n"
+ "ld1h { z24.h }, p3/Z, [x25, x16, LSL #1]\n"
+ "fmla z15.h, p2/M, z4.h, z18.h\n"
+ "fmla z31.h, p2/M, z4.h, z28.h\n"
+ "fmla z5.h, p2/M, z4.h, z26.h\n"
+ "ld1h { z23.h }, p2/Z, [x15, #1, MUL VL]\n"
+ "fmla z16.h, p2/M, z3.h, z18.h\n"
+ "ld1h { z18.h }, p3/Z, [x24, x16, LSL #1]\n"
+ "fmla z15.h, p2/M, z3.h, z9.h\n"
+ "fmla z31.h, p2/M, z3.h, z26.h\n"
+ "fmla z5.h, p2/M, z3.h, z29.h\n"
+ "ld1h { z22.h }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z16.h, p2/M, z19.h, z9.h\n"
+ "ld1h { z21.h }, p3/Z, [x26, x16, LSL #1]\n"
+ "fmla z15.h, p2/M, z19.h, z2.h\n"
+ "fmla z31.h, p2/M, z19.h, z29.h\n"
+ "fmla z5.h, p2/M, z19.h, z24.h\n"
+ "ld1h { z20.h }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z16.h, p2/M, z27.h, z1.h\n"
+ "ld1h { z19.h }, p3/Z, [x23, x16, LSL #1]\n"
+ "fmla z15.h, p2/M, z27.h, z0.h\n"
+ "fmla z31.h, p2/M, z27.h, z18.h\n"
+ "ld1h { z18.h }, p3/Z, [x22, x16, LSL #1]\n"
+ "fmla z5.h, p2/M, z27.h, z21.h\n"
+ "fmla z16.h, p2/M, z25.h, z0.h\n"
+ "fmla z15.h, p2/M, z25.h, z28.h\n"
+ "fmla z31.h, p2/M, z25.h, z21.h\n"
+ "ld1h { z21.h }, p3/Z, [x21, x16, LSL #1]\n"
"fmla z5.h, p2/M, z25.h, z19.h\n"
- "fmla z29.h, p2/M, z25.h, z18.h\n"
- "ld1h { z4.h }, p2/Z, [x9, #-5, MUL VL]\n"
- "inch x28\n"
- "fmla z30.h, p2/M, z23.h, z0.h\n"
- "fmla z31.h, p2/M, z23.h, z27.h\n"
- "ld1h { z8.h }, p3/Z, [x24, x13, LSL #1]\n"
- "ldr x24, [x16, #0xf0]\n"
+ "fmla z16.h, p2/M, z23.h, z28.h\n"
+ "fmla z15.h, p2/M, z23.h, z26.h\n"
+ "fmla z31.h, p2/M, z23.h, z19.h\n"
+ "ld1h { z12.h }, p3/Z, [x20, x16, LSL #1]\n"
"fmla z5.h, p2/M, z23.h, z18.h\n"
- "fmla z29.h, p2/M, z23.h, z9.h\n"
- "ld1h { z6.h }, p2/Z, [x9, #-4, MUL VL]\n"
- "mov p0.b, p3.b\n"
- "fmla z30.h, p2/M, z20.h, z27.h\n"
- "fmla z31.h, p2/M, z20.h, z24.h\n"
- "ld1h { z10.h }, p3/Z, [x23, x13, LSL #1]\n"
- "ldr x23, [x16, #0x100]\n"
- "fmla z5.h, p2/M, z20.h, z9.h\n"
- "fmla z29.h, p2/M, z20.h, z8.h\n"
- "ld1h { z11.h }, p2/Z, [x9, #-3, MUL VL]\n"
- "fmla z30.h, p2/M, z16.h, z24.h\n"
- "fmla z31.h, p2/M, z16.h, z26.h\n"
- "ld1h { z0.h }, p3/Z, [x22, x13, LSL #1]\n"
- "ld1h { z27.h }, p3/Z, [x20, x13, LSL #1]\n"
- "fmla z5.h, p2/M, z16.h, z8.h\n"
- "fmla z29.h, p2/M, z16.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x9, #-2, MUL VL]\n"
- "ldr x22, [x16, #0x108]\n"
- "fmla z30.h, p2/M, z21.h, z22.h\n"
- "fmla z31.h, p2/M, z21.h, z19.h\n"
- "ld1h { z26.h }, p3/Z, [x21, x13, LSL #1]\n"
- "ldr x21, [x16, #0x110]\n"
- "fmla z5.h, p2/M, z21.h, z10.h\n"
- "fmla z29.h, p2/M, z21.h, z0.h\n"
- "ld1h { z25.h }, p2/Z, [x9, #-1, MUL VL]\n"
- "fmla z30.h, p2/M, z4.h, z19.h\n"
- "fmla z31.h, p2/M, z4.h, z18.h\n"
- "ld1h { z24.h }, p3/Z, [x27, x13, LSL #1]\n"
- "ldr x20, [x16, #0x118]\n"
- "fmla z5.h, p2/M, z4.h, z0.h\n"
- "fmla z29.h, p2/M, z4.h, z26.h\n"
- "ld1h { z23.h }, p2/Z, [x9]\n"
- "fmla z30.h, p2/M, z6.h, z18.h\n"
- "fmla z31.h, p2/M, z6.h, z9.h\n"
- "ld1h { z22.h }, p3/Z, [x25, x13, LSL #1]\n"
- "fmla z5.h, p2/M, z6.h, z26.h\n"
- "fmla z29.h, p2/M, z6.h, z24.h\n"
- "ld1h { z21.h }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z30.h, p2/M, z11.h, z9.h\n"
- "fmla z31.h, p2/M, z11.h, z8.h\n"
- "ld1h { z18.h }, p3/Z, [x24, x13, LSL #1]\n"
- "fmla z5.h, p2/M, z11.h, z24.h\n"
- "fmla z29.h, p2/M, z11.h, z27.h\n"
- "ld1h { z20.h }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.h, p2/M, z16.h, z8.h\n"
- "fmla z31.h, p2/M, z16.h, z17.h\n"
- "ld1h { z17.h }, p3/Z, [x26, x13, LSL #1]\n"
- "fmla z5.h, p2/M, z16.h, z27.h\n"
- "fmla z29.h, p2/M, z16.h, z22.h\n"
- "ld1h { z19.h }, p2/Z, [x9, #3, MUL VL]\n"
- "fmla z30.h, p2/M, z25.h, z10.h\n"
- "fmla z31.h, p2/M, z25.h, z0.h\n"
- "ld1h { z16.h }, p3/Z, [x23, x13, LSL #1]\n"
- "fmla z5.h, p2/M, z25.h, z18.h\n"
- "fmla z29.h, p2/M, z25.h, z17.h\n"
- "ld1h { z18.h }, p3/Z, [x22, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z23.h, z0.h\n"
- "fmla z31.h, p2/M, z23.h, z26.h\n"
- "fmla z5.h, p2/M, z23.h, z17.h\n"
- "fmla z29.h, p2/M, z23.h, z16.h\n"
- "ld1h { z17.h }, p3/Z, [x21, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z21.h, z26.h\n"
- "fmla z31.h, p2/M, z21.h, z24.h\n"
- "fmla z5.h, p2/M, z21.h, z16.h\n"
- "fmla z29.h, p2/M, z21.h, z18.h\n"
- "ld1h { z16.h }, p3/Z, [x20, x13, LSL #1]\n"
- "fmla z30.h, p2/M, z20.h, z24.h\n"
- "fmla z31.h, p2/M, z20.h, z27.h\n"
- "fmla z5.h, p2/M, z20.h, z18.h\n"
- "fmla z29.h, p2/M, z20.h, z17.h\n"
- "fmla z30.h, p2/M, z19.h, z27.h\n"
- "fmla z31.h, p2/M, z19.h, z22.h\n"
- "fmax z30.h, p2/M, z30.h, z15.h\n"
- "fmax z31.h, p2/M, z31.h, z15.h\n"
- "fmla z5.h, p2/M, z19.h, z17.h\n"
- "fmla z29.h, p2/M, z19.h, z16.h\n"
- "fmax z5.h, p2/M, z5.h, z15.h\n"
- "fmax z29.h, p2/M, z29.h, z15.h\n"
- "fmin z30.h, p2/M, z30.h, z28.h\n"
- "fmin z31.h, p2/M, z31.h, z28.h\n"
- "st1h { z30.h }, p0, [x15, x28, LSL #1]\n"
- "fmin z5.h, p2/M, z5.h, z28.h\n"
- "fmin z29.h, p2/M, z29.h, z28.h\n"
- "st1h { z31.h }, p0, [x14, x28, LSL #1]\n"
- "st1h { z5.h }, p0, [x12, x28, LSL #1]\n"
- "st1h { z29.h }, p0, [x11, x28, LSL #1]\n"
+ "fmla z16.h, p2/M, z22.h, z26.h\n"
+ "fmla z15.h, p2/M, z22.h, z29.h\n"
+ "fmla z31.h, p2/M, z22.h, z18.h\n"
+ "fmla z5.h, p2/M, z22.h, z21.h\n"
+ "fmla z16.h, p2/M, z20.h, z29.h\n"
+ "fmla z15.h, p2/M, z20.h, z24.h\n"
+ "fmla z31.h, p2/M, z20.h, z21.h\n"
+ "fmla z5.h, p2/M, z20.h, z12.h\n"
+ "fmax z16.h, p2/M, z16.h, z17.h\n"
+ "fmax z15.h, p2/M, z15.h, z17.h\n"
+ "fmax z31.h, p2/M, z31.h, z17.h\n"
+ "fmin z16.h, p2/M, z16.h, z30.h\n"
+ "fmin z15.h, p2/M, z15.h, z30.h\n"
+ "fmax z5.h, p2/M, z5.h, z17.h\n"
+ "fmin z31.h, p2/M, z31.h, z30.h\n"
+ "st1h { z16.h }, p0, [x13, x9, LSL #1]\n"
+ "fmin z5.h, p2/M, z5.h, z30.h\n"
+ "st1h { z15.h }, p0, [x12, x9, LSL #1]\n"
+ "st1h { z31.h }, p0, [x11, x9, LSL #1]\n"
+ "st1h { z5.h }, p0, [x10, x9, LSL #1]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 1bdef85274..6044784ff9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,84 +88,84 @@ void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x10, #0x0\n"
- "mov x14, #0x0\n"
+ "mov x17, #0x0\n"
+ "mov x16, #0x0\n"
"1:" // Tile loop
- "str x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x2\n"
"mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x10, x23\n" // offset = tile_i * ld_input_row
- "ldr x13, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x10, x22\n" // offset = tile_i * ld_output_row
- "cntw x11\n"
- "madd x21, x14, x13, x21\n" // offset += tile_j * ld_input_col
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "cntw x15\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "madd x20, x14, x12, x20\n" // offset += tile_j * ld_output_col
- "ldr x28, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "ld1w { z27.s }, p3/Z, [x10]\n"
- "add x27, x13, x13\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x9, x9, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "ld1w { z0.s }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x10, #2, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1w { z2.s }, p3/Z, [x10, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x10, #4, MUL VL]\n"
- "add x26, x9, x23, LSL #2\n"
- "ld1w { z4.s }, p3/Z, [x10, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x10, #6, MUL VL]\n"
- "add x25, x26, x23, LSL #2\n"
- "add x24, x27, x13\n"
- "ld1w { z6.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "add x28, x28, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1rw { z26.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x11, %x[n_channels]\n"
- "add x23, x25, x23, LSL #2\n"
- "ld1rw { z25.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "add x22, x28, x22, LSL #2\n"
- "mov x21, #0x0\n"
- "ld1w { z8.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x26, x13, LSL #2]\n"
- "sub x20, XZR, x11\n"
- "ld1w { z10.s }, p2/Z, [x9]\n"
- "ld1w { z11.s }, p2/Z, [x9, x24, LSL #2]\n"
- "addvl x10, x10, #-6\n"
- "ld1w { z12.s }, p2/Z, [x26, x27, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "mov x12, #0x0\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
+ "ldr x10, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "mul x22, x17, x24\n" // offset = tile_i * ld_input_row
+ "mul x21, x17, x23\n" // offset = tile_i * ld_output_row
+ "ldr x9, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "cmp x15, %x[n_channels]\n"
+ "ld1rw { z27.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x28, x14, x14\n"
+ "ld1rw { z26.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x20, XZR, x15\n"
+ "madd x22, x16, x14, x22\n" // offset += tile_j * ld_input_col
+ "ld1w { z25.s }, p3/Z, [x11]\n"
+ "ld1w { z0.s }, p3/Z, [x11, #1, MUL VL]\n"
+ "add x27, x28, x14\n"
+ "madd x21, x16, x13, x21\n" // offset += tile_j * ld_output_col
+ "ld1w { z1.s }, p3/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x11, #3, MUL VL]\n"
+ "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
+ "ld1w { z3.s }, p3/Z, [x11, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x11, #5, MUL VL]\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "ld1w { z5.s }, p3/Z, [x11, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "add x10, x10, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x26, x10, x24, LSL #2\n"
+ "ld1w { z10.s }, p2/Z, [x10]\n"
+ "ld1w { z11.s }, p2/Z, [x10, x27, LSL #2]\n"
+ "add x25, x26, x24, LSL #2\n"
+ "add x9, x9, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x24, x25, x24, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "add x23, x9, x23, LSL #2\n"
+ "ld1w { z7.s }, p3/Z, [x11, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x11, #-7, MUL VL]\n"
+ "addvl x11, x11, #-6\n"
+ "ld1w { z13.s }, p2/Z, [x25, x14, LSL #2]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z24, z27\n fmla z24.s, p3/M, z4.s, z9.s\n"
- "movprfx z23, z27\n fmla z23.s, p3/M, z3.s, z9.s\n"
- "whilelt p1.s, x11, %x[n_channels]\n"
- "incw x21\n"
- "movprfx z22, z27\n fmla z22.s, p3/M, z1.s, z9.s\n"
- "movprfx z21, z27\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "ld1w { z18.s }, p2/Z, [x23]\n"
- "incw x11\n"
+ "movprfx z24, z25\n fmla z24.s, p3/M, z4.s, z9.s\n"
+ "movprfx z23, z25\n fmla z23.s, p3/M, z3.s, z9.s\n"
+ "whilelt p1.s, x15, %x[n_channels]\n"
+ "incw x12\n"
+ "movprfx z22, z25\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "movprfx z21, z25\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z18.s }, p2/Z, [x24]\n"
+ "incw x15\n"
+ "mov p0.b, p2.b\n"
+ "ld1w { z25.s }, p3/Z, [x11]\n"
+ "incw x20\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z20.s }, p2/Z, [x25, x28, LSL #2]\n"
"fmla z23.s, p3/M, z2.s, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x24, LSL #2]\n"
- "ld1w { z20.s }, p2/Z, [x25, x27, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x24, x27, LSL #2]\n"
"fmla z22.s, p3/M, z2.s, z12.s\n"
"fmla z21.s, p3/M, z1.s, z12.s\n"
- "mov p0.b, p2.b\n"
- "ld1w { z27.s }, p3/Z, [x10]\n"
"fmla z24.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x9, x13, LSL #2]\n"
- "incw x20\n"
+ "ld1w { z16.s }, p2/Z, [x10, x14, LSL #2]\n"
"fmla z22.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x10, x28, LSL #2]\n"
+ "addvl x10, x10, #1\n"
"fmla z21.s, p3/M, z3.s, z13.s\n"
- "ld1w { z18.s }, p2/Z, [x9, x27, LSL #2]\n"
- "addvl x9, x9, #1\n"
"fmla z24.s, p3/M, z7.s, z13.s\n"
"fmla z23.s, p3/M, z6.s, z13.s\n"
"fmla z22.s, p3/M, z4.s, z13.s\n"
@@ -173,102 +173,102 @@ void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"ld1w { z17.s }, p2/Z, [x26]\n"
"fmla z24.s, p3/M, z1.s, z16.s\n"
"fmla z23.s, p3/M, z0.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, x24, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x26, x27, LSL #2]\n"
"addvl x26, x26, #1\n"
"fmla z22.s, p3/M, z5.s, z20.s\n"
"fmla z21.s, p3/M, z4.s, z20.s\n"
- "ld1w { z4.s }, p3/Z, [x10, #5, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x11, #5, MUL VL]\n"
"fmla z24.s, p3/M, z2.s, z18.s\n"
"fmla z23.s, p3/M, z1.s, z18.s\n"
"ld1w { z19.s }, p2/Z, [x25]\n"
- "ld1w { z1.s }, p3/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z1.s }, p3/Z, [x11, #2, MUL VL]\n"
"fmla z22.s, p3/M, z0.s, z17.s\n"
+ "ld1w { z0.s }, p3/Z, [x11, #1, MUL VL]\n"
"fmla z21.s, p3/M, z2.s, z16.s\n"
- "ld1w { z0.s }, p3/Z, [x10, #1, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x11, #3, MUL VL]\n"
"fmla z24.s, p3/M, z8.s, z20.s\n"
"fmla z23.s, p3/M, z7.s, z20.s\n"
- "ld1w { z18.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x25, x27, LSL #2]\n"
"addvl x25, x25, #1\n"
"fmla z22.s, p3/M, z3.s, z19.s\n"
"fmla z21.s, p3/M, z5.s, z18.s\n"
- "ld1w { z13.s }, p1/Z, [x25, x13, LSL #2]\n"
"fmla z24.s, p3/M, z3.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ld1w { z3.s }, p3/Z, [x11, #4, MUL VL]\n"
+ "ld1w { z13.s }, p1/Z, [x25, x14, LSL #2]\n"
"fmla z23.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "whilelt p2.s, x12, %x[n_channels]\n"
+ "ld1w { z5.s }, p3/Z, [x11, #6, MUL VL]\n"
+ "cmp x15, %x[n_channels]\n"
+ "addvl x24, x24, #1\n"
"fmla z22.s, p3/M, z7.s, z17.s\n"
"fmla z21.s, p3/M, z6.s, z17.s\n"
- "ld1w { z3.s }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x10, #6, MUL VL]\n"
+ "ld1w { z11.s }, p1/Z, [x10, x27, LSL #2]\n"
"fmla z24.s, p3/M, z6.s, z19.s\n"
+ "ld1w { z6.s }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "ld1w { z9.s }, p1/Z, [x26, x14, LSL #2]\n"
"fmla z23.s, p3/M, z8.s, z18.s\n"
- "fmax z24.s, p3/M, z24.s, z26.s\n"
- "fmax z23.s, p3/M, z23.s, z26.s\n"
+ "ld1w { z10.s }, p1/Z, [x10]\n"
"fmla z22.s, p3/M, z8.s, z16.s\n"
"fmla z21.s, p3/M, z7.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z26.s\n"
- "fmax z21.s, p3/M, z21.s, z26.s\n"
- "ld1w { z6.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "ld1w { z9.s }, p1/Z, [x26, x13, LSL #2]\n"
- "cmp x11, %x[n_channels]\n"
- "fmin z24.s, p3/M, z24.s, z25.s\n"
- "ld1w { z10.s }, p1/Z, [x9]\n"
- "ld1w { z11.s }, p1/Z, [x9, x24, LSL #2]\n"
- "fmin z23.s, p3/M, z23.s, z25.s\n"
- "fmin z22.s, p3/M, z22.s, z25.s\n"
- "ld1w { z12.s }, p1/Z, [x26, x27, LSL #2]\n"
- "st1w { z24.s }, p0, [x28]\n"
- "fmin z21.s, p3/M, z21.s, z25.s\n"
+ "ld1w { z12.s }, p1/Z, [x26, x28, LSL #2]\n"
+ "fmax z24.s, p3/M, z24.s, z27.s\n"
+ "ld1w { z7.s }, p3/Z, [x11, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x11, #-7, MUL VL]\n"
+ "addvl x11, x11, #-6\n"
+ "fmax z23.s, p3/M, z23.s, z27.s\n"
+ "fmin z24.s, p3/M, z24.s, z26.s\n"
+ "fmax z22.s, p3/M, z22.s, z27.s\n"
+ "fmax z21.s, p3/M, z21.s, z27.s\n"
+ "fmin z23.s, p3/M, z23.s, z26.s\n"
+ "fmin z22.s, p3/M, z22.s, z26.s\n"
+ "st1w { z24.s }, p0, [x9]\n"
+ "fmin z21.s, p3/M, z21.s, z26.s\n"
+ "st1w { z23.s }, p0, [x9, x13, LSL #2]\n"
+ "addvl x9, x9, #1\n"
+ "st1w { z22.s }, p0, [x23]\n"
+ "st1w { z21.s }, p0, [x23, x13, LSL #2]\n"
"addvl x23, x23, #1\n"
- "st1w { z23.s }, p0, [x28, x12, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "st1w { z22.s }, p0, [x22]\n"
- "addvl x28, x28, #1\n"
- "ld1w { z8.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "addvl x10, x10, #-6\n"
- "st1w { z21.s }, p0, [x22, x12, LSL #2]\n"
- "addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z24, z27\n fmla z24.s, p3/M, z4.s, z9.s\n"
- "movprfx z23, z27\n fmla z23.s, p3/M, z3.s, z9.s\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z22, z27\n fmla z22.s, p3/M, z1.s, z9.s\n"
- "movprfx z21, z27\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "ld1w { z18.s }, p2/Z, [x23]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "movprfx z24, z25\n fmla z24.s, p3/M, z4.s, z9.s\n"
+ "movprfx z23, z25\n fmla z23.s, p3/M, z3.s, z9.s\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z22, z25\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "movprfx z21, z25\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z18.s }, p2/Z, [x24]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "mov p0.b, p2.b\n"
+ "add x16, x16, #0x1\n"
+ "add x20, x17, #0x1\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z20.s }, p2/Z, [x25, x28, LSL #2]\n"
"fmla z23.s, p3/M, z2.s, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x24, LSL #2]\n"
- "ld1w { z20.s }, p2/Z, [x25, x27, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x24, x27, LSL #2]\n"
+ "cmp x16, x22\n"
"fmla z22.s, p3/M, z2.s, z12.s\n"
"fmla z21.s, p3/M, z1.s, z12.s\n"
- "add x14, x14, #0x1\n"
- "cmp x14, x20\n"
+ "csel x17, x17, x20, LT\n"
+ "csel x16, x16, XZR, LT\n"
"fmla z24.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x9, x13, LSL #2]\n"
- "add x21, x10, #0x1\n"
+ "ld1w { z16.s }, p2/Z, [x10, x14, LSL #2]\n"
"fmla z22.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x10, x28, LSL #2]\n"
"fmla z21.s, p3/M, z3.s, z13.s\n"
- "ld1w { z18.s }, p2/Z, [x9, x27, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "cmp x17, x21\n"
"fmla z24.s, p3/M, z7.s, z13.s\n"
"fmla z23.s, p3/M, z6.s, z13.s\n"
- "csel x10, x10, x21, LT\n"
- "mov p0.b, p2.b\n"
"fmla z22.s, p3/M, z4.s, z13.s\n"
"fmla z21.s, p3/M, z8.s, z17.s\n"
"ld1w { z17.s }, p2/Z, [x26]\n"
- "csel x14, x14, XZR, LT\n"
"fmla z24.s, p3/M, z1.s, z16.s\n"
"fmla z23.s, p3/M, z0.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, x24, LSL #2]\n"
- "cmp x10, x20\n"
+ "ld1w { z16.s }, p2/Z, [x26, x27, LSL #2]\n"
"fmla z22.s, p3/M, z5.s, z20.s\n"
"fmla z21.s, p3/M, z4.s, z20.s\n"
"fmla z24.s, p3/M, z2.s, z18.s\n"
@@ -278,35 +278,35 @@ void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_direct_impl(
"fmla z21.s, p3/M, z2.s, z16.s\n"
"fmla z24.s, p3/M, z8.s, z20.s\n"
"fmla z23.s, p3/M, z7.s, z20.s\n"
- "ld1w { z18.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x25, x27, LSL #2]\n"
"fmla z22.s, p3/M, z3.s, z19.s\n"
"fmla z21.s, p3/M, z5.s, z18.s\n"
"fmla z24.s, p3/M, z3.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x24, x14, LSL #2]\n"
"fmla z23.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmla z22.s, p3/M, z7.s, z17.s\n"
"fmla z21.s, p3/M, z6.s, z17.s\n"
"fmla z24.s, p3/M, z6.s, z19.s\n"
"fmla z23.s, p3/M, z8.s, z18.s\n"
- "fmax z24.s, p3/M, z24.s, z26.s\n"
- "fmax z23.s, p3/M, z23.s, z26.s\n"
"fmla z22.s, p3/M, z8.s, z16.s\n"
"fmla z21.s, p3/M, z7.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z26.s\n"
- "fmax z21.s, p3/M, z21.s, z26.s\n"
- "fmin z24.s, p3/M, z24.s, z25.s\n"
- "fmin z23.s, p3/M, z23.s, z25.s\n"
- "st1w { z24.s }, p0, [x28]\n"
- "fmin z22.s, p3/M, z22.s, z25.s\n"
- "fmin z21.s, p3/M, z21.s, z25.s\n"
- "st1w { z23.s }, p0, [x28, x12, LSL #2]\n"
- "st1w { z22.s }, p0, [x22]\n"
- "st1w { z21.s }, p0, [x22, x12, LSL #2]\n"
+ "fmax z24.s, p3/M, z24.s, z27.s\n"
+ "fmax z23.s, p3/M, z23.s, z27.s\n"
+ "fmin z24.s, p3/M, z24.s, z26.s\n"
+ "fmin z23.s, p3/M, z23.s, z26.s\n"
+ "fmax z22.s, p3/M, z22.s, z27.s\n"
+ "fmax z21.s, p3/M, z21.s, z27.s\n"
+ "st1w { z24.s }, p0, [x9]\n"
+ "st1w { z23.s }, p0, [x9, x13, LSL #2]\n"
+ "fmin z22.s, p3/M, z22.s, z26.s\n"
+ "fmin z21.s, p3/M, z21.s, z26.s\n"
+ "st1w { z22.s }, p0, [x23]\n"
+ "st1w { z21.s }, p0, [x23, x13, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index 873b4736ff..4b100a9b21 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -83,210 +83,210 @@ void sve_fp32_nhwc_3x3_s1_output2x2_mla_depthfirst_indirect_impl(
"ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
"add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
"cntw x14\n"
- "ldp x13, x12, [x20, #0x0]\n"
- "ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
+ "mov x13, #0x0\n"
+ "ldr x24, [x15, #0x20]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z20.s }, p3/Z, [x16]\n"
+ "ldp x12, x11, [x20, #0x0]\n"
+ "ldp x10, x9, [x20, #0x10]\n"
+ "ld1w { z27.s }, p3/Z, [x16]\n"
"ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
"ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
"ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
+ "cmp x14, %x[n_channels]\n"
"sub x28, XZR, x14\n"
"ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
"ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
"ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
"ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
"addvl x16, x16, #16\n"
- "ldp x24, x23, [x15, #0x0]\n"
- "ldp x22, x21, [x15, #0x10]\n"
- "ldr x20, [x15, #0x20]\n"
+ "ldp x23, x22, [x15, #0x0]\n"
+ "ldp x21, x20, [x15, #0x10]\n"
"ld1rw { z26.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
"ld1rw { z25.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
"ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
"ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x24, x9, LSL #2]\n"
"addvl x16, x16, #-6\n"
- "ld1w { z10.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z9.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x24, x13, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z24, z20\n fmla z24.s, p3/M, z4.s, z9.s\n"
- "movprfx z23, z20\n fmla z23.s, p3/M, z3.s, z9.s\n"
+ "movprfx z24, z27\n fmla z24.s, p3/M, z4.s, z9.s\n"
+ "movprfx z23, z27\n fmla z23.s, p3/M, z3.s, z9.s\n"
"ldr x21, [x15, #0x28]\n"
- "ldr x20, [x15, #0x30]\n"
- "movprfx z22, z20\n fmla z22.s, p3/M, z1.s, z9.s\n"
- "movprfx z21, z20\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "ld1w { z18.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x22, [x15, #0x38]\n"
+ "ldr x25, [x15, #0x30]\n"
+ "movprfx z22, z27\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "movprfx z21, z27\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ldr x24, [x15, #0x38]\n"
+ "ldr x20, [x15, #0x48]\n"
+ "ldr x23, [x15, #0x40]\n"
+ "ldr x22, [x15, #0x50]\n"
+ "whilelt p1.s, x14, %x[n_channels]\n"
+ "incw x28\n"
+ "ld1w { z18.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ldr x21, [x15, #0x58]\n"
+ "mov p0.b, p2.b\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
"fmla z23.s, p3/M, z2.s, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0x48]\n"
+ "ld1w { z17.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x20, x13, LSL #2]\n"
"fmla z22.s, p3/M, z2.s, z12.s\n"
"fmla z21.s, p3/M, z1.s, z12.s\n"
- "ldr x20, [x15, #0x40]\n"
- "ld1w { z20.s }, p2/Z, [x21, x9, LSL #2]\n"
+ "ldr x20, [x15, #0x60]\n"
+ "ldr x27, [x15, #0x68]\n"
+ "ldr x26, [x15, #0x70]\n"
+ "ld1w { z27.s }, p3/Z, [x16]\n"
"fmla z24.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x22, [x15, #0x50]\n"
+ "ld1w { z16.s }, p2/Z, [x24, x13, LSL #2]\n"
+ "ldr x25, [x15, #0x78]\n"
"fmla z22.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x23, x13, LSL #2]\n"
+ "ldp x24, x23, [x15, #0x0]\n"
"fmla z21.s, p3/M, z3.s, z13.s\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0x58]\n"
"fmla z24.s, p3/M, z7.s, z13.s\n"
"fmla z23.s, p3/M, z6.s, z13.s\n"
- "ldr x20, [x15, #0x60]\n"
- "ldr x27, [x15, #0x68]\n"
"fmla z22.s, p3/M, z4.s, z13.s\n"
"fmla z21.s, p3/M, z8.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x26, [x15, #0x70]\n"
+ "ld1w { z17.s }, p2/Z, [x22, x13, LSL #2]\n"
"fmla z24.s, p3/M, z1.s, z16.s\n"
"fmla z23.s, p3/M, z0.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x25, [x15, #0x78]\n"
+ "ld1w { z16.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ldp x22, x21, [x15, #0x10]\n"
"fmla z22.s, p3/M, z5.s, z20.s\n"
"fmla z21.s, p3/M, z4.s, z20.s\n"
- "whilelt p1.s, x14, %x[n_channels]\n"
- "ldp x24, x23, [x15, #0x0]\n"
+ "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
"fmla z24.s, p3/M, z2.s, z18.s\n"
"fmla z23.s, p3/M, z1.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldp x22, x21, [x15, #0x10]\n"
+ "ld1w { z19.s }, p2/Z, [x20, x13, LSL #2]\n"
+ "ldr x20, [x15, #0x20]\n"
+ "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
"fmla z22.s, p3/M, z0.s, z17.s\n"
+ "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
"fmla z21.s, p3/M, z2.s, z16.s\n"
- "ldr x20, [x15, #0x20]\n"
- "ld1w { z13.s }, p1/Z, [x20, x14, LSL #2]\n"
+ "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
"fmla z24.s, p3/M, z8.s, z20.s\n"
+ "ld1w { z13.s }, p1/Z, [x20, x14, LSL #2]\n"
"fmla z23.s, p3/M, z7.s, z20.s\n"
- "ld1w { z18.s }, p2/Z, [x27, x9, LSL #2]\n"
- "incw x28\n"
+ "ld1w { z18.s }, p2/Z, [x27, x13, LSL #2]\n"
"fmla z22.s, p3/M, z3.s, z19.s\n"
"fmla z21.s, p3/M, z5.s, z18.s\n"
- "mov p0.b, p2.b\n"
- "ld1w { z20.s }, p3/Z, [x16]\n"
"fmla z24.s, p3/M, z3.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x26, x13, LSL #2]\n"
+ "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
"fmla z23.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x25, x13, LSL #2]\n"
+ "incw x13\n"
+ "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
"fmla z22.s, p3/M, z7.s, z17.s\n"
"fmla z21.s, p3/M, z6.s, z17.s\n"
- "incw x9\n"
"ld1w { z11.s }, p1/Z, [x22, x14, LSL #2]\n"
"fmla z24.s, p3/M, z6.s, z19.s\n"
- "fmla z23.s, p3/M, z8.s, z18.s\n"
"ld1w { z9.s }, p1/Z, [x24, x14, LSL #2]\n"
+ "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
+ "addvl x16, x16, #16\n"
+ "fmla z23.s, p3/M, z8.s, z18.s\n"
"ld1w { z10.s }, p1/Z, [x23, x14, LSL #2]\n"
+ "whilelt p2.s, x13, %x[n_channels]\n"
"fmla z22.s, p3/M, z8.s, z16.s\n"
"fmla z21.s, p3/M, z7.s, z16.s\n"
"ld1w { z12.s }, p1/Z, [x21, x14, LSL #2]\n"
"incw x14\n"
"fmax z24.s, p3/M, z24.s, z26.s\n"
+ "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
+ "addvl x16, x16, #-6\n"
"fmax z23.s, p3/M, z23.s, z26.s\n"
- "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
"fmax z22.s, p3/M, z22.s, z26.s\n"
"fmax z21.s, p3/M, z21.s, z26.s\n"
- "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
- "whilelt p2.s, x9, %x[n_channels]\n"
"cmp x14, %x[n_channels]\n"
- "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
"fmin z24.s, p3/M, z24.s, z25.s\n"
- "st1w { z24.s }, p0, [x13, x28, LSL #2]\n"
"fmin z23.s, p3/M, z23.s, z25.s\n"
"fmin z22.s, p3/M, z22.s, z25.s\n"
- "st1w { z23.s }, p0, [x12, x28, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
"fmin z21.s, p3/M, z21.s, z25.s\n"
- "st1w { z22.s }, p0, [x11, x28, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "st1w { z21.s }, p0, [x10, x28, LSL #2]\n"
+ "st1w { z24.s }, p0, [x12, x28, LSL #2]\n"
+ "st1w { z23.s }, p0, [x11, x28, LSL #2]\n"
+ "st1w { z22.s }, p0, [x10, x28, LSL #2]\n"
+ "st1w { z21.s }, p0, [x9, x28, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z24, z20\n fmla z24.s, p3/M, z4.s, z9.s\n"
- "movprfx z23, z20\n fmla z23.s, p3/M, z3.s, z9.s\n"
- "ldr x21, [x15, #0x28]\n"
- "ldr x20, [x15, #0x30]\n"
- "movprfx z22, z20\n fmla z22.s, p3/M, z1.s, z9.s\n"
- "movprfx z21, z20\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "ld1w { z18.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x22, [x15, #0x38]\n"
+ "movprfx z24, z27\n fmla z24.s, p3/M, z4.s, z9.s\n"
+ "movprfx z23, z27\n fmla z23.s, p3/M, z3.s, z9.s\n"
+ "ldr x22, [x15, #0x28]\n"
+ "ldr x21, [x15, #0x30]\n"
+ "movprfx z22, z27\n fmla z22.s, p3/M, z1.s, z9.s\n"
+ "movprfx z21, z27\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "ldr x27, [x15, #0x38]\n"
+ "ldr x20, [x15, #0x48]\n"
+ "ldr x26, [x15, #0x40]\n"
+ "ldr x25, [x15, #0x50]\n"
+ "incw x28\n"
+ "mov p0.b, p2.b\n"
+ "ld1w { z18.s }, p2/Z, [x22, x13, LSL #2]\n"
+ "ldr x24, [x15, #0x58]\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
"fmla z23.s, p3/M, z2.s, z11.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0x48]\n"
+ "ld1w { z17.s }, p2/Z, [x21, x13, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x20, x13, LSL #2]\n"
"fmla z22.s, p3/M, z2.s, z12.s\n"
"fmla z21.s, p3/M, z1.s, z12.s\n"
- "ldr x20, [x15, #0x40]\n"
- "ld1w { z20.s }, p2/Z, [x21, x9, LSL #2]\n"
+ "ldr x23, [x15, #0x60]\n"
+ "ldr x22, [x15, #0x68]\n"
+ "ldr x21, [x15, #0x70]\n"
"fmla z24.s, p3/M, z5.s, z12.s\n"
"fmla z23.s, p3/M, z4.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x21, [x15, #0x50]\n"
+ "ld1w { z16.s }, p2/Z, [x27, x13, LSL #2]\n"
+ "ldr x20, [x15, #0x78]\n"
"fmla z22.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x26, x13, LSL #2]\n"
"fmla z21.s, p3/M, z3.s, z13.s\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x15, #0x58]\n"
"fmla z24.s, p3/M, z7.s, z13.s\n"
"fmla z23.s, p3/M, z6.s, z13.s\n"
- "ldr x23, [x15, #0x60]\n"
- "ldr x22, [x15, #0x68]\n"
"fmla z22.s, p3/M, z4.s, z13.s\n"
"fmla z21.s, p3/M, z8.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ldr x21, [x15, #0x70]\n"
+ "ld1w { z17.s }, p2/Z, [x25, x13, LSL #2]\n"
"fmla z24.s, p3/M, z1.s, z16.s\n"
"fmla z23.s, p3/M, z0.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ld1w { z16.s }, p2/Z, [x24, x13, LSL #2]\n"
"fmla z22.s, p3/M, z5.s, z20.s\n"
"fmla z21.s, p3/M, z4.s, z20.s\n"
- "incw x28\n"
- "mov p0.b, p2.b\n"
"fmla z24.s, p3/M, z2.s, z18.s\n"
"fmla z23.s, p3/M, z1.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z19.s }, p2/Z, [x23, x13, LSL #2]\n"
"fmla z22.s, p3/M, z0.s, z17.s\n"
"fmla z21.s, p3/M, z2.s, z16.s\n"
"fmla z24.s, p3/M, z8.s, z20.s\n"
"fmla z23.s, p3/M, z7.s, z20.s\n"
- "ld1w { z18.s }, p2/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x22, x13, LSL #2]\n"
"fmla z22.s, p3/M, z3.s, z19.s\n"
"fmla z21.s, p3/M, z5.s, z18.s\n"
"fmla z24.s, p3/M, z3.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x21, x13, LSL #2]\n"
"fmla z23.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x20, x13, LSL #2]\n"
"fmla z22.s, p3/M, z7.s, z17.s\n"
"fmla z21.s, p3/M, z6.s, z17.s\n"
"fmla z24.s, p3/M, z6.s, z19.s\n"
"fmla z23.s, p3/M, z8.s, z18.s\n"
- "fmax z24.s, p3/M, z24.s, z26.s\n"
- "fmax z23.s, p3/M, z23.s, z26.s\n"
"fmla z22.s, p3/M, z8.s, z16.s\n"
"fmla z21.s, p3/M, z7.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z26.s\n"
- "fmax z21.s, p3/M, z21.s, z26.s\n"
+ "fmax z24.s, p3/M, z24.s, z26.s\n"
+ "fmax z23.s, p3/M, z23.s, z26.s\n"
"fmin z24.s, p3/M, z24.s, z25.s\n"
"fmin z23.s, p3/M, z23.s, z25.s\n"
- "st1w { z24.s }, p0, [x13, x28, LSL #2]\n"
+ "fmax z22.s, p3/M, z22.s, z26.s\n"
+ "fmax z21.s, p3/M, z21.s, z26.s\n"
+ "st1w { z24.s }, p0, [x12, x28, LSL #2]\n"
+ "st1w { z23.s }, p0, [x11, x28, LSL #2]\n"
"fmin z22.s, p3/M, z22.s, z25.s\n"
"fmin z21.s, p3/M, z21.s, z25.s\n"
- "st1w { z23.s }, p0, [x12, x28, LSL #2]\n"
- "st1w { z22.s }, p0, [x11, x28, LSL #2]\n"
- "st1w { z21.s }, p0, [x10, x28, LSL #2]\n"
+ "st1w { z22.s }, p0, [x10, x28, LSL #2]\n"
+ "st1w { z21.s }, p0, [x9, x28, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
index 015d0e63c2..17a8933c3f 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,369 +88,369 @@ void sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x13, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x5, #0x0\n"
+ "mov x6, #0x0\n"
"1:" // Tile loop
- "str x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x3\n"
"mov x25, #0x3\n"
- "mov x24, #0x3\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x13, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
- "ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cntw x15\n"
- "mul x20, x13, x21\n" // offset = tile_i * ld_output_row
- "ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x12, x17, x17\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x10, x14, x23, LSL #2\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x10, x23, LSL #2\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "cntw x8\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z14.s }, p3/Z, [x13]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1w { z0.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x13, #2, MUL VL]\n"
- "add x28, x9, x23, LSL #2\n"
- "ld1w { z2.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x13, #4, MUL VL]\n"
- "add x27, x12, x17\n"
- "add x11, x11, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1w { z4.s }, p3/Z, [x13, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x13, #6, MUL VL]\n"
- "add x26, x28, x23, LSL #2\n"
- "add x25, x27, x17\n"
- "ld1w { z6.s }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "add x24, x11, x21, LSL #2\n"
- "ld1rw { z31.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "cmp x15, %x[n_channels]\n"
- "add x23, x24, x21, LSL #2\n"
- "ld1rw { z30.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x13, #-8, MUL VL]\n"
- "add x22, x16, x16\n"
- "mov x21, #0x0\n"
- "ld1w { z8.s }, p3/Z, [x13, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x9, x12, LSL #2]\n"
- "sub x20, XZR, x15\n"
- "ld1w { z10.s }, p2/Z, [x14]\n"
- "ld1w { z11.s }, p2/Z, [x14, x25, LSL #2]\n"
- "addvl x13, x13, #-6\n"
- "ld1w { z12.s }, p2/Z, [x26]\n"
- "ld1w { z13.s }, p2/Z, [x10, x12, LSL #2]\n"
+ "mov x16, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x14, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x22, x5, x24\n" // offset = tile_i * ld_input_row
+ "ldr x13, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x12, x7, x7\n"
+ "cmp x8, %x[n_channels]\n"
+ "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mul x21, x5, x23\n" // offset = tile_i * ld_output_row
+ "add x11, x12, x7\n"
+ "add x10, x17, x17\n"
+ "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "madd x22, x6, x7, x22\n" // offset += tile_j * ld_input_col
+ "ld1w { z31.s }, p3/Z, [x14]\n"
+ "ld1w { z0.s }, p3/Z, [x14, #1, MUL VL]\n"
+ "add x9, x11, x7\n"
+ "ld1w { z1.s }, p3/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x14, #3, MUL VL]\n"
+ "sub x20, XZR, x8\n"
+ "madd x21, x6, x17, x21\n" // offset += tile_j * ld_output_col
+ "ld1w { z3.s }, p3/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x14, #5, MUL VL]\n"
+ "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
+ "ld1w { z5.s }, p3/Z, [x14, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x14, #7, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "add x15, x15, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x28, x15, x24, LSL #2\n"
+ "add x27, x28, x24, LSL #2\n"
+ "ld1w { z10.s }, p2/Z, [x15]\n"
+ "ld1w { z11.s }, p2/Z, [x15, x9, LSL #2]\n"
+ "add x26, x27, x24, LSL #2\n"
+ "add x13, x13, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x25, x26, x24, LSL #2\n"
+ "ld1w { z7.s }, p3/Z, [x14, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x14, #-7, MUL VL]\n"
+ "add x24, x13, x23, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x27, x12, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x25]\n"
+ "addvl x14, x14, #-6\n"
+ "add x23, x24, x23, LSL #2\n"
+ "ld1w { z13.s }, p2/Z, [x28, x12, LSL #2]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z29, z14\n fmla z29.s, p3/M, z7.s, z9.s\n"
- "movprfx z28, z14\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x15, %x[n_channels]\n"
- "incw x21\n"
- "movprfx z27, z14\n fmla z27.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z4.s, z13.s\n"
- "incw x15\n"
+ "movprfx z30, z31\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "movprfx z29, z31\n fmla z29.s, p3/M, z8.s, z9.s\n"
+ "whilelt p1.s, x8, %x[n_channels]\n"
+ "incw x16\n"
+ "movprfx z28, z31\n fmla z28.s, p3/M, z6.s, z9.s\n"
+ "movprfx z27, z31\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "incw x8\n"
"mov p0.b, p2.b\n"
- "movprfx z26, z14\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "movprfx z25, z14\n fmla z25.s, p3/M, z4.s, z9.s\n"
+ "movprfx z26, z31\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "movprfx z25, z31\n fmla z25.s, p3/M, z3.s, z9.s\n"
"incw x20\n"
- "movprfx z24, z14\n fmla z24.s, p3/M, z3.s, z9.s\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "ld1w { z23.s }, p2/Z, [x9, x27, LSL #2]\n"
- "fmla z27.s, p3/M, z2.s, z11.s\n"
- "ld1w { z18.s }, p2/Z, [x9, x17, LSL #2]\n"
- "movprfx z22, z14\n fmla z22.s, p3/M, z2.s, z9.s\n"
- "fmla z29.s, p3/M, z6.s, z18.s\n"
- "movprfx z21, z14\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "fmla z28.s, p3/M, z5.s, z13.s\n"
- "fmla z27.s, p3/M, z3.s, z13.s\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
- "fmla z25.s, p3/M, z1.s, z13.s\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x14, x17, LSL #2]\n"
- "fmla z22.s, p3/M, z6.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x26, x25, LSL #2]\n"
- "movprfx z20, z14\n fmla z20.s, p3/M, z1.s, z9.s\n"
- "fmla z29.s, p3/M, z0.s, z17.s\n"
- "ld1w { z14.s }, p3/Z, [x13]\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x27, LSL #2]\n"
- "fmla z28.s, p3/M, z7.s, z18.s\n"
- "fmla z20.s, p3/M, z0.s, z18.s\n"
- "fmla z26.s, p3/M, z4.s, z18.s\n"
- "fmla z25.s, p3/M, z3.s, z18.s\n"
- "fmla z22.s, p3/M, z1.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x10]\n"
- "fmla z29.s, p3/M, z2.s, z16.s\n"
- "fmla z27.s, p3/M, z1.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x28]\n"
- "fmla z24.s, p3/M, z4.s, z23.s\n"
- "fmla z28.s, p3/M, z1.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x10, x25, LSL #2]\n"
- "fmla z20.s, p3/M, z2.s, z23.s\n"
- "fmla z21.s, p3/M, z1.s, z23.s\n"
- "fmla z29.s, p3/M, z8.s, z23.s\n"
- "fmla z27.s, p3/M, z7.s, z23.s\n"
- "fmla z25.s, p3/M, z5.s, z23.s\n"
- "fmla z26.s, p3/M, z0.s, z19.s\n"
- "ld1w { z17.s }, p2/Z, [x28, x12, LSL #2]\n"
- "fmla z22.s, p3/M, z3.s, z18.s\n"
- "fmla z24.s, p3/M, z2.s, z16.s\n"
- "fmla z20.s, p3/M, z4.s, z17.s\n"
- "fmla z21.s, p3/M, z3.s, z17.s\n"
- "fmla z28.s, p3/M, z3.s, z19.s\n"
- "fmla z27.s, p3/M, z5.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x28, x25, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x26, x17, LSL #2]\n"
- "fmla z26.s, p3/M, z6.s, z18.s\n"
- "fmla z25.s, p3/M, z7.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x10, x17, LSL #2]\n"
- "fmla z22.s, p3/M, z5.s, z17.s\n"
- "fmla z24.s, p3/M, z6.s, z17.s\n"
- "fmla z21.s, p3/M, z5.s, z19.s\n"
- "fmla z20.s, p3/M, z6.s, z16.s\n"
- "fmla z26.s, p3/M, z8.s, z17.s\n"
- "fmla z22.s, p3/M, z7.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x26, x27, LSL #2]\n"
- "fmla z29.s, p3/M, z3.s, z18.s\n"
- "fmla z25.s, p3/M, z0.s, z18.s\n"
- "fmla z24.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x10, x27, LSL #2]\n"
- "fmla z20.s, p3/M, z8.s, z17.s\n"
- "addvl x10, x10, #1\n"
- "fmla z21.s, p3/M, z7.s, z17.s\n"
- "fmla z28.s, p3/M, z4.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z26.s, p3/M, z1.s, z18.s\n"
- "fmla z29.s, p3/M, z5.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x28, x17, LSL #2]\n"
- "addvl x28, x28, #1\n"
- "fmla z27.s, p3/M, z4.s, z16.s\n"
+ "movprfx z24, z31\n fmla z24.s, p3/M, z2.s, z9.s\n"
+ "movprfx z23, z31\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "fmla z30.s, p3/M, z4.s, z13.s\n"
+ "fmla z29.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z22.s }, p2/Z, [x27, x11, LSL #2]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x7, LSL #2]\n"
+ "fmla z27.s, p3/M, z2.s, z13.s\n"
+ "fmla z26.s, p3/M, z1.s, z13.s\n"
+ "fmla z25.s, p3/M, z0.s, z13.s\n"
+ "fmla z24.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "movprfx z21, z31\n fmla z21.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z31.s }, p3/Z, [x14]\n"
+ "fmla z30.s, p3/M, z6.s, z17.s\n"
+ "fmla z29.s, p3/M, z5.s, z13.s\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, x7, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z17.s\n"
+ "fmla z23.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, x11, LSL #2]\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
+ "fmla z21.s, p3/M, z0.s, z17.s\n"
+ "fmla z24.s, p3/M, z1.s, z17.s\n"
+ "fmla z30.s, p3/M, z0.s, z18.s\n"
+ "fmla z29.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z20.s }, p2/Z, [x28]\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "fmla z25.s, p3/M, z4.s, z22.s\n"
+ "fmla z23.s, p3/M, z1.s, z22.s\n"
+ "fmla z26.s, p3/M, z5.s, z22.s\n"
+ "fmla z21.s, p3/M, z2.s, z22.s\n"
+ "fmla z27.s, p3/M, z0.s, z20.s\n"
+ "fmla z30.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x26]\n"
+ "fmla z29.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x9, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z22.s\n"
+ "fmla z24.s, p3/M, z3.s, z17.s\n"
"fmla z25.s, p3/M, z2.s, z16.s\n"
- "fmla z24.s, p3/M, z1.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x12, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z17.s\n"
- "addvl x14, x14, #1\n"
- "fmla z20.s, p3/M, z3.s, z17.s\n"
- "fmla z21.s, p3/M, z4.s, z19.s\n"
- "ld1w { z4.s }, p3/Z, [x13, #5, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x14]\n"
- "fmla z26.s, p3/M, z7.s, z17.s\n"
- "fmla z25.s, p3/M, z6.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x9]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z1.s, z16.s\n"
- "fmax z29.s, p3/M, z29.s, z31.s\n"
- "ld1w { z1.s }, p3/Z, [x13, #2, MUL VL]\n"
- "fmla z27.s, p3/M, z0.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x9, x25, LSL #2]\n"
- "fmla z24.s, p3/M, z7.s, z19.s\n"
- "addvl x9, x9, #1\n"
- "fmla z20.s, p3/M, z5.s, z19.s\n"
- "fmla z22.s, p3/M, z0.s, z18.s\n"
- "ld1w { z0.s }, p3/Z, [x13, #1, MUL VL]\n"
- "fmin z29.s, p3/M, z29.s, z30.s\n"
- "fmla z21.s, p3/M, z2.s, z17.s\n"
- "fmla z25.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x26, x12, LSL #2]\n"
- "fmax z25.s, p3/M, z25.s, z31.s\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "fmla z26.s, p3/M, z3.s, z18.s\n"
- "fmax z28.s, p3/M, z28.s, z31.s\n"
- "fmax z26.s, p3/M, z26.s, z31.s\n"
- "fmla z27.s, p3/M, z8.s, z17.s\n"
- "fmla z24.s, p3/M, z5.s, z17.s\n"
- "fmax z27.s, p3/M, z27.s, z31.s\n"
- "fmax z24.s, p3/M, z24.s, z31.s\n"
- "fmla z22.s, p3/M, z8.s, z16.s\n"
- "fmla z20.s, p3/M, z7.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z31.s\n"
- "fmax z20.s, p3/M, z20.s, z31.s\n"
+ "fmla z27.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x28, x7, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z22.s\n"
+ "ld1w { z18.s }, p2/Z, [x26, x12, LSL #2]\n"
+ "fmla z29.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x7, LSL #2]\n"
+ "fmla z21.s, p3/M, z4.s, z18.s\n"
+ "fmla z23.s, p3/M, z3.s, z18.s\n"
+ "fmla z26.s, p3/M, z7.s, z18.s\n"
+ "fmla z24.s, p3/M, z5.s, z18.s\n"
+ "fmla z25.s, p3/M, z6.s, z18.s\n"
+ "fmla z27.s, p3/M, z8.s, z18.s\n"
+ "fmla z30.s, p3/M, z3.s, z19.s\n"
"fmla z21.s, p3/M, z6.s, z16.s\n"
- "fmax z21.s, p3/M, z21.s, z31.s\n"
+ "fmla z29.s, p3/M, z4.s, z19.s\n"
+ "fmla z23.s, p3/M, z5.s, z17.s\n"
+ "fmla z26.s, p3/M, z0.s, z19.s\n"
+ "fmla z24.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x25, x11, LSL #2]\n"
+ "fmla z25.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x11, LSL #2]\n"
+ "fmla z27.s, p3/M, z1.s, z19.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x7, LSL #2]\n"
+ "addvl x28, x28, #1\n"
+ "fmla z21.s, p3/M, z8.s, z18.s\n"
+ "fmla z23.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, x11, LSL #2]\n"
"addvl x26, x26, #1\n"
- "ld1w { z2.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x13, #4, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x13, #6, MUL VL]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "cmp x15, %x[n_channels]\n"
- "ld1w { z6.s }, p3/Z, [x13, #7, MUL VL]\n"
- "addvl x13, x13, #16\n"
- "fmin z28.s, p3/M, z28.s, z30.s\n"
- "ld1w { z9.s }, p1/Z, [x9, x12, LSL #2]\n"
- "fmin z27.s, p3/M, z27.s, z30.s\n"
- "fmin z26.s, p3/M, z26.s, z30.s\n"
- "ld1w { z11.s }, p1/Z, [x14, x25, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x26]\n"
- "fmin z25.s, p3/M, z25.s, z30.s\n"
- "fmin z24.s, p3/M, z24.s, z30.s\n"
- "ld1w { z13.s }, p1/Z, [x10, x12, LSL #2]\n"
- "st1w { z28.s }, p0, [x11]\n"
- "fmin z22.s, p3/M, z22.s, z30.s\n"
- "fmin z20.s, p3/M, z20.s, z30.s\n"
- "st1w { z29.s }, p0, [x11, x16, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x13, #-8, MUL VL]\n"
- "fmin z21.s, p3/M, z21.s, z30.s\n"
- "st1w { z27.s }, p0, [x11, x22, LSL #2]\n"
- "addvl x11, x11, #1\n"
- "ld1w { z8.s }, p3/Z, [x13, #-7, MUL VL]\n"
- "st1w { z26.s }, p0, [x24]\n"
- "addvl x13, x13, #-6\n"
- "st1w { z25.s }, p0, [x24, x16, LSL #2]\n"
- "st1w { z24.s }, p0, [x24, x22, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z16.s\n"
+ "fmla z28.s, p3/M, z4.s, z16.s\n"
+ "fmla z26.s, p3/M, z2.s, z16.s\n"
+ "fmla z25.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, x12, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z17.s\n"
+ "addvl x15, x15, #1\n"
+ "fmla z21.s, p3/M, z3.s, z17.s\n"
+ "fmla z27.s, p3/M, z7.s, z17.s\n"
+ "fmla z23.s, p3/M, z4.s, z19.s\n"
+ "ld1w { z4.s }, p3/Z, [x14, #5, MUL VL]\n"
+ "fmla z26.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x27]\n"
+ "fmla z29.s, p3/M, z2.s, z16.s\n"
+ "fmla z30.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z1.s }, p3/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z10.s }, p1/Z, [x15]\n"
+ "fmla z28.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x9, LSL #2]\n"
+ "fmla z25.s, p3/M, z7.s, z19.s\n"
+ "addvl x27, x27, #1\n"
+ "fmla z21.s, p3/M, z5.s, z19.s\n"
+ "fmla z24.s, p3/M, z0.s, z18.s\n"
+ "ld1w { z0.s }, p3/Z, [x14, #1, MUL VL]\n"
+ "fmla z26.s, p3/M, z8.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x12, LSL #2]\n"
+ "fmla z27.s, p3/M, z3.s, z18.s\n"
+ "addvl x25, x25, #1\n"
+ "fmla z23.s, p3/M, z2.s, z17.s\n"
+ "fmla z29.s, p3/M, z6.s, z18.s\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
+ "ld1w { z2.s }, p3/Z, [x14, #3, MUL VL]\n"
+ "fmla z28.s, p3/M, z8.s, z17.s\n"
+ "fmla z25.s, p3/M, z5.s, z17.s\n"
+ "ld1w { z3.s }, p3/Z, [x14, #4, MUL VL]\n"
+ "ld1w { z5.s }, p3/Z, [x14, #6, MUL VL]\n"
+ "fmla z24.s, p3/M, z8.s, z16.s\n"
+ "fmla z21.s, p3/M, z7.s, z16.s\n"
+ "whilelt p2.s, x16, %x[n_channels]\n"
+ "cmp x8, %x[n_channels]\n"
+ "fmax z27.s, p3/M, z27.s, z15.s\n"
+ "fmax z26.s, p3/M, z26.s, z15.s\n"
+ "ld1w { z9.s }, p1/Z, [x27, x12, LSL #2]\n"
+ "ld1w { z11.s }, p1/Z, [x15, x9, LSL #2]\n"
+ "fmla z23.s, p3/M, z6.s, z16.s\n"
+ "fmax z29.s, p3/M, z29.s, z15.s\n"
+ "ld1w { z6.s }, p3/Z, [x14, #7, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "fmax z28.s, p3/M, z28.s, z15.s\n"
+ "fmax z25.s, p3/M, z25.s, z15.s\n"
+ "ld1w { z12.s }, p1/Z, [x25]\n"
+ "ld1w { z13.s }, p1/Z, [x28, x12, LSL #2]\n"
+ "fmax z24.s, p3/M, z24.s, z15.s\n"
+ "fmax z21.s, p3/M, z21.s, z15.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "ld1w { z7.s }, p3/Z, [x14, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x14, #-7, MUL VL]\n"
+ "fmax z23.s, p3/M, z23.s, z15.s\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "fmin z27.s, p3/M, z27.s, z14.s\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ "st1w { z29.s }, p0, [x13]\n"
+ "fmin z21.s, p3/M, z21.s, z14.s\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "st1w { z30.s }, p0, [x13, x17, LSL #2]\n"
+ "st1w { z28.s }, p0, [x13, x10, LSL #2]\n"
+ "addvl x13, x13, #1\n"
+ "addvl x14, x14, #-6\n"
+ "st1w { z27.s }, p0, [x24]\n"
+ "st1w { z26.s }, p0, [x24, x17, LSL #2]\n"
+ "st1w { z25.s }, p0, [x24, x10, LSL #2]\n"
"addvl x24, x24, #1\n"
- "st1w { z22.s }, p0, [x23]\n"
- "st1w { z20.s }, p0, [x23, x16, LSL #2]\n"
- "st1w { z21.s }, p0, [x23, x22, LSL #2]\n"
+ "st1w { z24.s }, p0, [x23]\n"
+ "st1w { z21.s }, p0, [x23, x17, LSL #2]\n"
+ "st1w { z23.s }, p0, [x23, x10, LSL #2]\n"
"addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z29, z14\n fmla z29.s, p3/M, z7.s, z9.s\n"
- "movprfx z28, z14\n fmla z28.s, p3/M, z8.s, z9.s\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z27, z14\n fmla z27.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z4.s, z13.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x8, x8, #0x1\n"
- "movprfx z26, z14\n fmla z26.s, p3/M, z5.s, z9.s\n"
- "movprfx z25, z14\n fmla z25.s, p3/M, z4.s, z9.s\n"
- "cmp x8, x20\n"
- "add x21, x13, #0x1\n"
- "movprfx z24, z14\n fmla z24.s, p3/M, z3.s, z9.s\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "ld1w { z23.s }, p2/Z, [x9, x27, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z27.s, p3/M, z2.s, z11.s\n"
- "ld1w { z18.s }, p2/Z, [x9, x17, LSL #2]\n"
- "movprfx z22, z14\n fmla z22.s, p3/M, z2.s, z9.s\n"
- "csel x13, x13, x21, LT\n"
- "fmla z29.s, p3/M, z6.s, z18.s\n"
- "movprfx z21, z14\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "movprfx z30, z31\n fmla z30.s, p3/M, z7.s, z9.s\n"
+ "movprfx z29, z31\n fmla z29.s, p3/M, z8.s, z9.s\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z28, z31\n fmla z28.s, p3/M, z6.s, z9.s\n"
+ "movprfx z27, z31\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z26, z31\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "movprfx z25, z31\n fmla z25.s, p3/M, z3.s, z9.s\n"
"mov p0.b, p2.b\n"
- "csel x8, x8, XZR, LT\n"
- "fmla z28.s, p3/M, z5.s, z13.s\n"
- "fmla z27.s, p3/M, z3.s, z13.s\n"
- "cmp x13, x20\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
- "fmla z25.s, p3/M, z1.s, z13.s\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x14, x17, LSL #2]\n"
- "fmla z22.s, p3/M, z6.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x26, x25, LSL #2]\n"
- "movprfx z20, z14\n fmla z20.s, p3/M, z1.s, z9.s\n"
- "fmla z29.s, p3/M, z0.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x27, LSL #2]\n"
- "fmla z28.s, p3/M, z7.s, z18.s\n"
- "fmla z20.s, p3/M, z0.s, z18.s\n"
- "fmla z26.s, p3/M, z4.s, z18.s\n"
- "fmla z25.s, p3/M, z3.s, z18.s\n"
- "fmla z22.s, p3/M, z1.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x10]\n"
- "fmla z29.s, p3/M, z2.s, z16.s\n"
- "fmla z27.s, p3/M, z1.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x28]\n"
- "fmla z24.s, p3/M, z4.s, z23.s\n"
- "fmla z28.s, p3/M, z1.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x10, x25, LSL #2]\n"
- "fmla z20.s, p3/M, z2.s, z23.s\n"
- "fmla z21.s, p3/M, z1.s, z23.s\n"
- "fmla z29.s, p3/M, z8.s, z23.s\n"
- "fmla z27.s, p3/M, z7.s, z23.s\n"
- "fmla z25.s, p3/M, z5.s, z23.s\n"
+ "movprfx z24, z31\n fmla z24.s, p3/M, z2.s, z9.s\n"
+ "movprfx z23, z31\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "add x6, x6, #0x1\n"
+ "add x20, x5, #0x1\n"
+ "fmla z30.s, p3/M, z4.s, z13.s\n"
+ "fmla z29.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z22.s }, p2/Z, [x27, x11, LSL #2]\n"
+ "cmp x6, x22\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x7, LSL #2]\n"
+ "fmla z27.s, p3/M, z2.s, z13.s\n"
+ "csel x5, x5, x20, LT\n"
+ "fmla z26.s, p3/M, z1.s, z13.s\n"
+ "fmla z25.s, p3/M, z0.s, z13.s\n"
+ "csel x6, x6, XZR, LT\n"
+ "fmla z24.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "movprfx z21, z31\n fmla z21.s, p3/M, z1.s, z9.s\n"
+ "fmla z30.s, p3/M, z6.s, z17.s\n"
+ "fmla z29.s, p3/M, z5.s, z13.s\n"
+ "cmp x5, x21\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, x7, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z17.s\n"
+ "fmla z23.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, x11, LSL #2]\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
+ "fmla z21.s, p3/M, z0.s, z17.s\n"
+ "fmla z24.s, p3/M, z1.s, z17.s\n"
+ "fmla z30.s, p3/M, z0.s, z18.s\n"
+ "fmla z29.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z20.s }, p2/Z, [x28]\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "fmla z25.s, p3/M, z4.s, z22.s\n"
+ "fmla z23.s, p3/M, z1.s, z22.s\n"
+ "fmla z26.s, p3/M, z5.s, z22.s\n"
+ "fmla z21.s, p3/M, z2.s, z22.s\n"
+ "fmla z27.s, p3/M, z0.s, z20.s\n"
+ "fmla z30.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x26]\n"
+ "fmla z29.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x9, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z22.s\n"
+ "fmla z24.s, p3/M, z3.s, z17.s\n"
+ "fmla z25.s, p3/M, z2.s, z16.s\n"
+ "fmla z27.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x28, x7, LSL #2]\n"
+ "fmla z30.s, p3/M, z8.s, z22.s\n"
+ "ld1w { z18.s }, p2/Z, [x26, x12, LSL #2]\n"
+ "fmla z29.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x7, LSL #2]\n"
+ "fmla z21.s, p3/M, z4.s, z18.s\n"
+ "fmla z23.s, p3/M, z3.s, z18.s\n"
+ "fmla z26.s, p3/M, z7.s, z18.s\n"
+ "fmla z24.s, p3/M, z5.s, z18.s\n"
+ "fmla z25.s, p3/M, z6.s, z18.s\n"
+ "fmla z27.s, p3/M, z8.s, z18.s\n"
+ "fmla z30.s, p3/M, z3.s, z19.s\n"
+ "fmla z21.s, p3/M, z6.s, z16.s\n"
+ "fmla z29.s, p3/M, z4.s, z19.s\n"
+ "fmla z23.s, p3/M, z5.s, z17.s\n"
"fmla z26.s, p3/M, z0.s, z19.s\n"
- "ld1w { z17.s }, p2/Z, [x28, x12, LSL #2]\n"
- "fmla z22.s, p3/M, z3.s, z18.s\n"
- "fmla z24.s, p3/M, z2.s, z16.s\n"
- "fmla z20.s, p3/M, z4.s, z17.s\n"
+ "fmla z24.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x25, x11, LSL #2]\n"
+ "fmla z25.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x11, LSL #2]\n"
+ "fmla z27.s, p3/M, z1.s, z19.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x7, LSL #2]\n"
+ "fmla z21.s, p3/M, z8.s, z18.s\n"
+ "fmla z23.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, x11, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z16.s\n"
+ "fmla z28.s, p3/M, z4.s, z16.s\n"
+ "fmla z26.s, p3/M, z2.s, z16.s\n"
+ "fmla z25.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, x12, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z17.s\n"
"fmla z21.s, p3/M, z3.s, z17.s\n"
- "fmla z28.s, p3/M, z3.s, z19.s\n"
- "fmla z27.s, p3/M, z5.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x28, x25, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x26, x17, LSL #2]\n"
- "fmla z26.s, p3/M, z6.s, z18.s\n"
- "fmla z25.s, p3/M, z7.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x10, x17, LSL #2]\n"
- "fmla z22.s, p3/M, z5.s, z17.s\n"
- "fmla z24.s, p3/M, z6.s, z17.s\n"
+ "fmla z27.s, p3/M, z7.s, z17.s\n"
+ "fmla z23.s, p3/M, z4.s, z19.s\n"
+ "fmla z26.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x27]\n"
+ "fmla z29.s, p3/M, z2.s, z16.s\n"
+ "fmla z30.s, p3/M, z1.s, z16.s\n"
+ "fmla z28.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x9, LSL #2]\n"
+ "fmla z25.s, p3/M, z7.s, z19.s\n"
"fmla z21.s, p3/M, z5.s, z19.s\n"
- "fmla z20.s, p3/M, z6.s, z16.s\n"
- "fmla z26.s, p3/M, z8.s, z17.s\n"
- "fmla z22.s, p3/M, z7.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x26, x27, LSL #2]\n"
- "fmla z29.s, p3/M, z3.s, z18.s\n"
- "fmla z25.s, p3/M, z0.s, z18.s\n"
- "fmla z24.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x10, x27, LSL #2]\n"
- "fmla z20.s, p3/M, z8.s, z17.s\n"
- "fmla z21.s, p3/M, z7.s, z17.s\n"
- "fmla z28.s, p3/M, z4.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z26.s, p3/M, z1.s, z18.s\n"
- "fmla z29.s, p3/M, z5.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x28, x17, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z16.s\n"
- "fmla z25.s, p3/M, z2.s, z16.s\n"
- "fmla z24.s, p3/M, z1.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x12, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z17.s\n"
- "fmla z20.s, p3/M, z3.s, z17.s\n"
- "fmla z21.s, p3/M, z4.s, z19.s\n"
- "fmla z26.s, p3/M, z7.s, z17.s\n"
- "fmla z25.s, p3/M, z6.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x9]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z29.s, p3/M, z1.s, z16.s\n"
- "fmax z29.s, p3/M, z29.s, z31.s\n"
- "fmin z29.s, p3/M, z29.s, z30.s\n"
- "fmla z27.s, p3/M, z0.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x9, x25, LSL #2]\n"
- "fmla z24.s, p3/M, z7.s, z19.s\n"
- "fmla z20.s, p3/M, z5.s, z19.s\n"
- "fmla z22.s, p3/M, z0.s, z18.s\n"
- "fmla z21.s, p3/M, z2.s, z17.s\n"
- "fmla z25.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x26, x12, LSL #2]\n"
- "fmax z25.s, p3/M, z25.s, z31.s\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "fmla z26.s, p3/M, z3.s, z18.s\n"
- "fmax z28.s, p3/M, z28.s, z31.s\n"
- "fmax z26.s, p3/M, z26.s, z31.s\n"
- "fmla z27.s, p3/M, z8.s, z17.s\n"
- "fmla z24.s, p3/M, z5.s, z17.s\n"
- "fmax z27.s, p3/M, z27.s, z31.s\n"
- "fmax z24.s, p3/M, z24.s, z31.s\n"
- "fmla z22.s, p3/M, z8.s, z16.s\n"
- "fmla z20.s, p3/M, z7.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z31.s\n"
- "fmax z20.s, p3/M, z20.s, z31.s\n"
- "fmla z21.s, p3/M, z6.s, z16.s\n"
- "fmax z21.s, p3/M, z21.s, z31.s\n"
- "fmin z28.s, p3/M, z28.s, z30.s\n"
- "st1w { z28.s }, p0, [x11]\n"
- "fmin z27.s, p3/M, z27.s, z30.s\n"
- "fmin z26.s, p3/M, z26.s, z30.s\n"
- "st1w { z29.s }, p0, [x11, x16, LSL #2]\n"
- "fmin z25.s, p3/M, z25.s, z30.s\n"
- "fmin z24.s, p3/M, z24.s, z30.s\n"
- "st1w { z27.s }, p0, [x11, x22, LSL #2]\n"
- "fmin z22.s, p3/M, z22.s, z30.s\n"
- "fmin z20.s, p3/M, z20.s, z30.s\n"
- "st1w { z26.s }, p0, [x24]\n"
- "fmin z21.s, p3/M, z21.s, z30.s\n"
- "st1w { z25.s }, p0, [x24, x16, LSL #2]\n"
- "st1w { z24.s }, p0, [x24, x22, LSL #2]\n"
- "st1w { z22.s }, p0, [x23]\n"
- "st1w { z20.s }, p0, [x23, x16, LSL #2]\n"
- "st1w { z21.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z24.s, p3/M, z0.s, z18.s\n"
+ "fmla z26.s, p3/M, z8.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x12, LSL #2]\n"
+ "fmla z27.s, p3/M, z3.s, z18.s\n"
+ "fmla z23.s, p3/M, z2.s, z17.s\n"
+ "fmla z29.s, p3/M, z6.s, z18.s\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
+ "fmla z28.s, p3/M, z8.s, z17.s\n"
+ "fmla z25.s, p3/M, z5.s, z17.s\n"
+ "fmla z24.s, p3/M, z8.s, z16.s\n"
+ "fmla z21.s, p3/M, z7.s, z16.s\n"
+ "fmax z27.s, p3/M, z27.s, z15.s\n"
+ "fmax z26.s, p3/M, z26.s, z15.s\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "fmla z23.s, p3/M, z6.s, z16.s\n"
+ "fmax z29.s, p3/M, z29.s, z15.s\n"
+ "fmax z28.s, p3/M, z28.s, z15.s\n"
+ "fmax z25.s, p3/M, z25.s, z15.s\n"
+ "fmin z27.s, p3/M, z27.s, z14.s\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "fmax z24.s, p3/M, z24.s, z15.s\n"
+ "fmax z21.s, p3/M, z21.s, z15.s\n"
+ "fmax z23.s, p3/M, z23.s, z15.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "st1w { z27.s }, p0, [x24]\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ "fmin z21.s, p3/M, z21.s, z14.s\n"
+ "st1w { z26.s }, p0, [x24, x17, LSL #2]\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "st1w { z29.s }, p0, [x13]\n"
+ "st1w { z30.s }, p0, [x13, x17, LSL #2]\n"
+ "st1w { z28.s }, p0, [x13, x10, LSL #2]\n"
+ "st1w { z25.s }, p0, [x24, x10, LSL #2]\n"
+ "st1w { z24.s }, p0, [x23]\n"
+ "st1w { z21.s }, p0, [x23, x17, LSL #2]\n"
+ "st1w { z23.s }, p0, [x23, x10, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
index 4809b0c45c..27baf11d17 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -90,384 +90,384 @@ void sve_fp32_nhwc_3x3_s1_output3x3_mla_depthfirst_indirect_impl(
"ptrue p3.b\n"
"ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
"add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1w { z14.s }, p3/Z, [x8]\n"
- "cntw x16\n"
- "mov x15, #0x0\n"
+ "ldr x16, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "cntw x15\n"
+ "mov x14, #0x0\n"
+ "ldp x24, x23, [x17, #0x0]\n"
+ "ldp x22, x21, [x17, #0x10]\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ldr x20, [x17, #0x20]\n"
+ "ld1w { z15.s }, p3/Z, [x8]\n"
"ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
"ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
- "whilelt p2.s, XZR, %x[n_channels]\n"
"ld1w { z2.s }, p3/Z, [x8, #3, MUL VL]\n"
"ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
- "cmp x16, %x[n_channels]\n"
+ "cmp x15, %x[n_channels]\n"
+ "sub x13, XZR, x15\n"
"ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
"ld1w { z5.s }, p3/Z, [x8, #6, MUL VL]\n"
- "sub x14, XZR, x16\n"
"ld1w { z6.s }, p3/Z, [x8, #7, MUL VL]\n"
"addvl x8, x8, #16\n"
- "ldp x24, x23, [x17, #0x0]\n"
- "ldp x22, x21, [x17, #0x10]\n"
- "ldr x20, [x17, #0x20]\n"
- "ldr x13, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rw { z31.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z30.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rw { z31.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1w { z9.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x22, x14, LSL #2]\n"
"ld1w { z7.s }, p3/Z, [x8, #-8, MUL VL]\n"
"ld1w { z8.s }, p3/Z, [x8, #-7, MUL VL]\n"
- "ld1w { z9.s }, p2/Z, [x24, x15, LSL #2]\n"
"addvl x8, x8, #-6\n"
- "ld1w { z10.s }, p2/Z, [x23, x15, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x22, x15, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x21, x15, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x20, x14, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z29, z14\n fmla z29.s, p3/M, z8.s, z9.s\n"
- "movprfx z28, z14\n fmla z28.s, p3/M, z7.s, z9.s\n"
- "ldr x23, [x17, #0x30]\n"
- "ldr x26, [x17, #0x38]\n"
- "movprfx z27, z14\n fmla z27.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z0.s, z10.s\n"
- "ldr x22, [x17, #0x28]\n"
+ "movprfx z30, z15\n fmla z30.s, p3/M, z8.s, z9.s\n"
+ "movprfx z29, z15\n fmla z29.s, p3/M, z7.s, z9.s\n"
+ "ldr x22, [x17, #0x30]\n"
+ "ldr x27, [x17, #0x38]\n"
+ "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z9.s\n"
+ "movprfx z27, z15\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "ldr x26, [x17, #0x28]\n"
"ldr x21, [x17, #0x48]\n"
- "fmla z28.s, p3/M, z4.s, z13.s\n"
- "movprfx z26, z14\n fmla z26.s, p3/M, z5.s, z9.s\n"
+ "movprfx z26, z15\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "movprfx z25, z15\n fmla z25.s, p3/M, z3.s, z9.s\n"
"ldr x20, [x17, #0x40]\n"
- "ld1w { z19.s }, p2/Z, [x21, x15, LSL #2]\n"
- "movprfx z25, z14\n fmla z25.s, p3/M, z4.s, z9.s\n"
- "movprfx z24, z14\n fmla z24.s, p3/M, z3.s, z9.s\n"
"ldr x25, [x17, #0x50]\n"
+ "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
+ "movprfx z23, z15\n fmla z23.s, p3/M, z0.s, z9.s\n"
"ldr x24, [x17, #0x58]\n"
- "fmla z27.s, p3/M, z2.s, z11.s\n"
- "ld1w { z18.s }, p2/Z, [x23, x15, LSL #2]\n"
- "movprfx z23, z14\n fmla z23.s, p3/M, z2.s, z9.s\n"
"ldr x23, [x17, #0x60]\n"
- "fmla z29.s, p3/M, z5.s, z13.s\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "ldr x12, [x17, #0x70]\n"
- "ldr x11, [x17, #0x88]\n"
- "movprfx z22, z14\n fmla z22.s, p3/M, z0.s, z9.s\n"
- "fmla z27.s, p3/M, z3.s, z13.s\n"
- "incw x14\n"
+ "fmla z30.s, p3/M, z0.s, z10.s\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z22.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ldr x12, [x17, #0x88]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z2.s, z13.s\n"
+ "ldr x22, [x17, #0x70]\n"
+ "fmla z26.s, p3/M, z1.s, z13.s\n"
+ "fmla z25.s, p3/M, z0.s, z13.s\n"
+ "incw x13\n"
"mov p1.b, p2.b\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
- "fmla z25.s, p3/M, z1.s, z13.s\n"
- "ldr x10, [x13, #0x0]\n"
- "whilelt p0.s, x16, %x[n_channels]\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x26, x15, LSL #2]\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
- "movprfx z21, z14\n fmla z21.s, p3/M, z1.s, z9.s\n"
- "fmla z29.s, p3/M, z7.s, z18.s\n"
- "ldr x22, [x17, #0x68]\n"
- "ldr x21, [x17, #0x78]\n"
- "fmla z28.s, p3/M, z0.s, z17.s\n"
- "fmla z22.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "movprfx z21, z15\n fmla z21.s, p3/M, z1.s, z9.s\n"
+ "ldr x21, [x17, #0x68]\n"
+ "fmla z30.s, p3/M, z5.s, z13.s\n"
+ "fmla z29.s, p3/M, z6.s, z17.s\n"
+ "ldr x11, [x16, #0x0]\n"
+ "whilelt p0.s, x15, %x[n_channels]\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z18.s }, p2/Z, [x27, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z17.s\n"
+ "ldr x10, [x17, #0x78]\n"
+ "fmla z23.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
"ldr x20, [x17, #0x80]\n"
- "fmla z26.s, p3/M, z4.s, z18.s\n"
- "fmla z25.s, p3/M, z3.s, z18.s\n"
- "ldr x9, [x13, #0x8]\n"
- "ldr x28, [x13, #0x10]\n"
- "fmla z21.s, p3/M, z0.s, z18.s\n"
- "fmla z24.s, p3/M, z4.s, z19.s\n"
- "ldr x27, [x13, #0x18]\n"
- "ld1w { z14.s }, p3/Z, [x8]\n"
- "fmla z23.s, p3/M, z1.s, z18.s\n"
- "fmla z29.s, p3/M, z1.s, z17.s\n"
- "ld1w { z20.s }, p2/Z, [x25, x15, LSL #2]\n"
- "ld1w { z17.s }, p2/Z, [x24, x15, LSL #2]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z27.s, p3/M, z1.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z0.s, z17.s\n"
+ "fmla z25.s, p3/M, z4.s, z22.s\n"
+ "ldr x9, [x16, #0x8]\n"
+ "ldr x28, [x16, #0x10]\n"
+ "fmla z30.s, p3/M, z7.s, z17.s\n"
+ "fmla z29.s, p3/M, z0.s, z18.s\n"
+ "ldr x27, [x16, #0x18]\n"
+ "ld1w { z15.s }, p3/Z, [x8]\n"
+ "fmla z24.s, p3/M, z1.s, z17.s\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x14, LSL #2]\n"
"ldr x26, [x17, #0x90]\n"
- "fmla z25.s, p3/M, z5.s, z19.s\n"
- "fmla z21.s, p3/M, z2.s, z19.s\n"
- "ldr x25, [x17, #0xa0]\n"
- "ldr x24, [x17, #0x98]\n"
- "fmla z26.s, p3/M, z0.s, z20.s\n"
- "fmla z24.s, p3/M, z2.s, z17.s\n"
- "fmla z28.s, p3/M, z8.s, z19.s\n"
+ "fmla z26.s, p3/M, z5.s, z22.s\n"
+ "fmla z23.s, p3/M, z1.s, z22.s\n"
+ "fmla z21.s, p3/M, z2.s, z22.s\n"
+ "fmla z30.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z20.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ldr x25, [x17, #0x98]\n"
+ "fmla z29.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z0.s, z17.s\n"
+ "ldr x24, [x17, #0xa0]\n"
+ "fmla z28.s, p3/M, z7.s, z22.s\n"
+ "fmla z25.s, p3/M, z2.s, z20.s\n"
+ "fmla z24.s, p3/M, z3.s, z16.s\n"
+ "fmla z30.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "ldr x23, [x17, #0xb0]\n"
+ "fmla z29.s, p3/M, z8.s, z22.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ldr x22, [x17, #0xa8]\n"
+ "fmla z27.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "ldr x21, [x17, #0xc0]\n"
+ "fmla z28.s, p3/M, z5.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x10, x14, LSL #2]\n"
+ "ldr x20, [x17, #0xb8]\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "fmla z25.s, p3/M, z6.s, z17.s\n"
+ "fmla z21.s, p3/M, z4.s, z17.s\n"
+ "fmla z24.s, p3/M, z5.s, z17.s\n"
+ "fmla z23.s, p3/M, z3.s, z17.s\n"
+ "fmla z27.s, p3/M, z8.s, z17.s\n"
+ "fmla z29.s, p3/M, z3.s, z18.s\n"
+ "fmla z30.s, p3/M, z4.s, z18.s\n"
+ "fmla z25.s, p3/M, z8.s, z19.s\n"
+ "fmla z26.s, p3/M, z0.s, z18.s\n"
+ "fmla z21.s, p3/M, z6.s, z16.s\n"
+ "fmla z24.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x12, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x25, x14, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "fmla z28.s, p3/M, z4.s, z16.s\n"
+ "fmla z26.s, p3/M, z2.s, z16.s\n"
+ "fmla z25.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ldr x24, [x17, #0x20]\n"
+ "fmla z21.s, p3/M, z8.s, z17.s\n"
+ "fmla z24.s, p3/M, z4.s, z19.s\n"
+ "fmla z23.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, x14, LSL #2]\n"
"fmla z27.s, p3/M, z7.s, z19.s\n"
- "fmla z22.s, p3/M, z1.s, z19.s\n"
- "fmla z23.s, p3/M, z3.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x22, x15, LSL #2]\n"
- "ldr x23, [x17, #0xa8]\n"
- "fmla z26.s, p3/M, z6.s, z16.s\n"
+ "fmla z30.s, p3/M, z2.s, z16.s\n"
+ "fmla z29.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "fmla z26.s, p3/M, z6.s, z19.s\n"
+ "fmla z28.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "fmla z21.s, p3/M, z3.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x14, LSL #2]\n"
"fmla z25.s, p3/M, z7.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x20, x15, LSL #2]\n"
- "ldr x22, [x17, #0xc0]\n"
- "fmla z24.s, p3/M, z6.s, z18.s\n"
- "fmla z21.s, p3/M, z4.s, z18.s\n"
- "fmla z29.s, p3/M, z3.s, z20.s\n"
- "fmla z27.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x12, x15, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "fmla z23.s, p3/M, z5.s, z18.s\n"
- "fmla z22.s, p3/M, z3.s, z18.s\n"
- "ldr x21, [x17, #0xb0]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "fmla z23.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmax z29.s, p3/M, z29.s, z14.s\n"
+ "fmla z30.s, p3/M, z6.s, z16.s\n"
+ "fmla z24.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
"fmla z26.s, p3/M, z8.s, z18.s\n"
- "fmla z24.s, p3/M, z8.s, z17.s\n"
- "fmla z21.s, p3/M, z6.s, z16.s\n"
- "fmla z28.s, p3/M, z3.s, z19.s\n"
- "fmla z25.s, p3/M, z0.s, z19.s\n"
- "fmla z22.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x11, x15, LSL #2]\n"
- "fmla z23.s, p3/M, z7.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x26, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z4.s, z19.s\n"
- "fmla z26.s, p3/M, z1.s, z19.s\n"
- "fmla z28.s, p3/M, z5.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x24, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z17.s\n"
- "fmla z25.s, p3/M, z2.s, z17.s\n"
- "fmla z24.s, p3/M, z1.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z18.s\n"
- "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
- "ldr x25, [x17, #0x20]\n"
- "fmla z22.s, p3/M, z7.s, z18.s\n"
- "ld1w { z18.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z2.s, z17.s\n"
- "fmla z26.s, p3/M, z7.s, z16.s\n"
- "fmla z25.s, p3/M, z6.s, z16.s\n"
- "fmla z23.s, p3/M, z4.s, z16.s\n"
- "fmla z21.s, p3/M, z3.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z18.s\n"
- "fmla z28.s, p3/M, z1.s, z17.s\n"
- "fmax z28.s, p3/M, z28.s, z31.s\n"
- "fmin z28.s, p3/M, z28.s, z30.s\n"
- "fmla z27.s, p3/M, z0.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z6.s, z16.s\n"
- "fmax z29.s, p3/M, z29.s, z31.s\n"
- "fmla z24.s, p3/M, z7.s, z18.s\n"
+ "fmla z27.s, p3/M, z3.s, z16.s\n"
+ "fmla z28.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
"fmla z21.s, p3/M, z5.s, z18.s\n"
- "fmin z29.s, p3/M, z29.s, z30.s\n"
- "st1w { z29.s }, p1, [x10, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z0.s, z16.s\n"
- "fmla z22.s, p3/M, z2.s, z17.s\n"
- "ldr x24, [x13, #0x20]\n"
- "st1w { z28.s }, p1, [x9, x14, LSL #2]\n"
- "fmla z25.s, p3/M, z8.s, z18.s\n"
- "fmla z26.s, p3/M, z3.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z17.s\n"
"ldp x23, x22, [x17, #0x0]\n"
- "fmla z27.s, p3/M, z8.s, z17.s\n"
- "fmla z24.s, p3/M, z5.s, z17.s\n"
+ "fmla z23.s, p3/M, z2.s, z17.s\n"
+ "fmax z30.s, p3/M, z30.s, z14.s\n"
"ldp x21, x20, [x17, #0x10]\n"
- "fmax z27.s, p3/M, z27.s, z31.s\n"
- "fmla z23.s, p3/M, z8.s, z16.s\n"
- "fmla z21.s, p3/M, z7.s, z16.s\n"
- "fmax z26.s, p3/M, z26.s, z31.s\n"
- "fmax z25.s, p3/M, z25.s, z31.s\n"
- "fmla z22.s, p3/M, z6.s, z16.s\n"
- "incw x15\n"
- "ld1w { z9.s }, p0/Z, [x23, x16, LSL #2]\n"
- "ld1w { z10.s }, p0/Z, [x22, x16, LSL #2]\n"
- "ld1w { z11.s }, p0/Z, [x21, x16, LSL #2]\n"
- "ld1w { z12.s }, p0/Z, [x20, x16, LSL #2]\n"
- "fmin z27.s, p3/M, z27.s, z30.s\n"
- "fmin z26.s, p3/M, z26.s, z30.s\n"
- "ld1w { z13.s }, p0/Z, [x25, x16, LSL #2]\n"
- "incw x16\n"
- "fmin z25.s, p3/M, z25.s, z30.s\n"
- "st1w { z27.s }, p1, [x28, x14, LSL #2]\n"
- "fmax z24.s, p3/M, z24.s, z31.s\n"
- "fmax z23.s, p3/M, z23.s, z31.s\n"
- "st1w { z26.s }, p1, [x27, x14, LSL #2]\n"
- "ldr x23, [x13, #0x28]\n"
- "fmax z21.s, p3/M, z21.s, z31.s\n"
- "fmax z22.s, p3/M, z22.s, z31.s\n"
- "st1w { z25.s }, p1, [x24, x14, LSL #2]\n"
- "ldr x22, [x13, #0x30]\n"
- "ldr x21, [x13, #0x38]\n"
- "ldr x20, [x13, #0x40]\n"
- "whilelt p2.s, x15, %x[n_channels]\n"
- "cmp x16, %x[n_channels]\n"
- "ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
- "fmin z24.s, p3/M, z24.s, z30.s\n"
- "fmin z23.s, p3/M, z23.s, z30.s\n"
+ "incw x14\n"
+ "fmax z26.s, p3/M, z26.s, z14.s\n"
+ "fmin z29.s, p3/M, z29.s, z31.s\n"
"ld1w { z2.s }, p3/Z, [x8, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
- "fmin z21.s, p3/M, z21.s, z30.s\n"
- "fmin z22.s, p3/M, z22.s, z30.s\n"
- "ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
"ld1w { z5.s }, p3/Z, [x8, #6, MUL VL]\n"
- "st1w { z24.s }, p1, [x23, x14, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z16.s\n"
+ "fmax z28.s, p3/M, z28.s, z14.s\n"
+ "fmax z27.s, p3/M, z27.s, z14.s\n"
+ "ld1w { z9.s }, p0/Z, [x23, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z7.s, z16.s\n"
+ "fmin z30.s, p3/M, z30.s, z31.s\n"
+ "ld1w { z10.s }, p0/Z, [x22, x15, LSL #2]\n"
+ "ld1w { z11.s }, p0/Z, [x21, x15, LSL #2]\n"
+ "fmla z23.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z12.s }, p0/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z13.s }, p0/Z, [x24, x15, LSL #2]\n"
+ "incw x15\n"
+ "fmin z28.s, p3/M, z28.s, z31.s\n"
+ "fmin z27.s, p3/M, z27.s, z31.s\n"
+ "st1w { z29.s }, p1, [x9, x13, LSL #2]\n"
+ "ldr x23, [x16, #0x28]\n"
+ "st1w { z30.s }, p1, [x11, x13, LSL #2]\n"
+ "ldr x20, [x16, #0x20]\n"
+ "fmin z26.s, p3/M, z26.s, z31.s\n"
+ "fmax z25.s, p3/M, z25.s, z14.s\n"
+ "fmax z24.s, p3/M, z24.s, z14.s\n"
+ "fmax z21.s, p3/M, z21.s, z14.s\n"
"ld1w { z6.s }, p3/Z, [x8, #7, MUL VL]\n"
+ "fmax z23.s, p3/M, z23.s, z14.s\n"
+ "st1w { z28.s }, p1, [x28, x13, LSL #2]\n"
+ "ldr x22, [x16, #0x30]\n"
"addvl x8, x8, #16\n"
- "st1w { z23.s }, p1, [x22, x14, LSL #2]\n"
+ "st1w { z27.s }, p1, [x27, x13, LSL #2]\n"
+ "ldr x21, [x16, #0x38]\n"
+ "whilelt p2.s, x14, %x[n_channels]\n"
+ "cmp x15, %x[n_channels]\n"
+ "st1w { z26.s }, p1, [x20, x13, LSL #2]\n"
+ "ldr x20, [x16, #0x40]\n"
+ "fmin z25.s, p3/M, z25.s, z31.s\n"
+ "fmin z24.s, p3/M, z24.s, z31.s\n"
+ "fmin z21.s, p3/M, z21.s, z31.s\n"
+ "fmin z23.s, p3/M, z23.s, z31.s\n"
"ld1w { z7.s }, p3/Z, [x8, #-8, MUL VL]\n"
- "st1w { z21.s }, p1, [x21, x14, LSL #2]\n"
"ld1w { z8.s }, p3/Z, [x8, #-7, MUL VL]\n"
"addvl x8, x8, #-6\n"
- "st1w { z22.s }, p1, [x20, x14, LSL #2]\n"
+ "st1w { z25.s }, p1, [x23, x13, LSL #2]\n"
+ "st1w { z24.s }, p1, [x22, x13, LSL #2]\n"
+ "st1w { z21.s }, p1, [x21, x13, LSL #2]\n"
+ "st1w { z23.s }, p1, [x20, x13, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z29, z14\n fmla z29.s, p3/M, z8.s, z9.s\n"
- "movprfx z28, z14\n fmla z28.s, p3/M, z7.s, z9.s\n"
- "ldr x23, [x17, #0x30]\n"
- "ldr x26, [x17, #0x38]\n"
- "movprfx z27, z14\n fmla z27.s, p3/M, z6.s, z9.s\n"
- "fmla z29.s, p3/M, z0.s, z10.s\n"
- "ldr x22, [x17, #0x28]\n"
+ "movprfx z30, z15\n fmla z30.s, p3/M, z8.s, z9.s\n"
+ "movprfx z29, z15\n fmla z29.s, p3/M, z7.s, z9.s\n"
+ "ldr x22, [x17, #0x30]\n"
+ "ldr x27, [x17, #0x38]\n"
+ "movprfx z28, z15\n fmla z28.s, p3/M, z6.s, z9.s\n"
+ "movprfx z27, z15\n fmla z27.s, p3/M, z5.s, z9.s\n"
+ "ldr x26, [x17, #0x28]\n"
"ldr x21, [x17, #0x48]\n"
- "fmla z28.s, p3/M, z4.s, z13.s\n"
- "movprfx z26, z14\n fmla z26.s, p3/M, z5.s, z9.s\n"
+ "movprfx z26, z15\n fmla z26.s, p3/M, z4.s, z9.s\n"
+ "movprfx z25, z15\n fmla z25.s, p3/M, z3.s, z9.s\n"
"ldr x20, [x17, #0x40]\n"
- "ld1w { z19.s }, p2/Z, [x21, x15, LSL #2]\n"
- "movprfx z25, z14\n fmla z25.s, p3/M, z4.s, z9.s\n"
- "movprfx z24, z14\n fmla z24.s, p3/M, z3.s, z9.s\n"
"ldr x25, [x17, #0x50]\n"
+ "movprfx z24, z15\n fmla z24.s, p3/M, z2.s, z9.s\n"
+ "movprfx z23, z15\n fmla z23.s, p3/M, z0.s, z9.s\n"
"ldr x24, [x17, #0x58]\n"
- "fmla z27.s, p3/M, z2.s, z11.s\n"
- "ld1w { z18.s }, p2/Z, [x23, x15, LSL #2]\n"
- "movprfx z23, z14\n fmla z23.s, p3/M, z2.s, z9.s\n"
"ldr x23, [x17, #0x60]\n"
- "fmla z29.s, p3/M, z5.s, z13.s\n"
- "fmla z28.s, p3/M, z6.s, z18.s\n"
- "ldr x12, [x17, #0x70]\n"
- "ldr x11, [x17, #0x88]\n"
- "movprfx z22, z14\n fmla z22.s, p3/M, z0.s, z9.s\n"
- "fmla z27.s, p3/M, z3.s, z13.s\n"
- "incw x14\n"
+ "fmla z30.s, p3/M, z0.s, z10.s\n"
+ "fmla z29.s, p3/M, z4.s, z13.s\n"
+ "ld1w { z22.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ldr x12, [x17, #0x88]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z2.s, z13.s\n"
+ "ldr x22, [x17, #0x70]\n"
+ "fmla z26.s, p3/M, z1.s, z13.s\n"
+ "fmla z25.s, p3/M, z0.s, z13.s\n"
+ "incw x13\n"
"mov p0.b, p2.b\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
- "fmla z25.s, p3/M, z1.s, z13.s\n"
- "ldr x10, [x13, #0x0]\n"
- "ldr x9, [x13, #0x8]\n"
- "fmla z24.s, p3/M, z0.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x26, x15, LSL #2]\n"
- "fmla z23.s, p3/M, z6.s, z12.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
- "movprfx z21, z14\n fmla z21.s, p3/M, z1.s, z9.s\n"
- "fmla z29.s, p3/M, z7.s, z18.s\n"
- "ldr x22, [x17, #0x68]\n"
- "ldr x21, [x17, #0x78]\n"
- "fmla z28.s, p3/M, z0.s, z17.s\n"
- "fmla z22.s, p3/M, z8.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "movprfx z21, z15\n fmla z21.s, p3/M, z1.s, z9.s\n"
+ "ldr x21, [x17, #0x68]\n"
+ "fmla z30.s, p3/M, z5.s, z13.s\n"
+ "fmla z29.s, p3/M, z6.s, z17.s\n"
+ "ldr x11, [x16, #0x0]\n"
+ "ldr x10, [x16, #0x8]\n"
+ "fmla z28.s, p3/M, z3.s, z13.s\n"
+ "ld1w { z18.s }, p2/Z, [x27, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z17.s\n"
+ "ldr x9, [x17, #0x78]\n"
+ "fmla z23.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
"ldr x20, [x17, #0x80]\n"
- "fmla z26.s, p3/M, z4.s, z18.s\n"
- "fmla z25.s, p3/M, z3.s, z18.s\n"
- "ldr x28, [x13, #0x10]\n"
- "ldr x27, [x13, #0x18]\n"
- "fmla z21.s, p3/M, z0.s, z18.s\n"
- "fmla z24.s, p3/M, z4.s, z19.s\n"
- "fmla z23.s, p3/M, z1.s, z18.s\n"
- "fmla z29.s, p3/M, z1.s, z17.s\n"
- "ld1w { z20.s }, p2/Z, [x25, x15, LSL #2]\n"
- "ld1w { z17.s }, p2/Z, [x24, x15, LSL #2]\n"
- "fmla z28.s, p3/M, z2.s, z16.s\n"
- "fmla z27.s, p3/M, z1.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z0.s, z17.s\n"
+ "fmla z25.s, p3/M, z4.s, z22.s\n"
+ "ldr x28, [x16, #0x10]\n"
+ "ldr x27, [x16, #0x18]\n"
+ "fmla z30.s, p3/M, z7.s, z17.s\n"
+ "fmla z29.s, p3/M, z0.s, z18.s\n"
+ "fmla z24.s, p3/M, z1.s, z17.s\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x14, LSL #2]\n"
"ldr x26, [x17, #0x90]\n"
- "fmla z25.s, p3/M, z5.s, z19.s\n"
- "fmla z21.s, p3/M, z2.s, z19.s\n"
- "ldr x25, [x17, #0xa0]\n"
- "ldr x24, [x17, #0x98]\n"
- "fmla z26.s, p3/M, z0.s, z20.s\n"
- "fmla z24.s, p3/M, z2.s, z17.s\n"
- "fmla z28.s, p3/M, z8.s, z19.s\n"
+ "fmla z26.s, p3/M, z5.s, z22.s\n"
+ "fmla z23.s, p3/M, z1.s, z22.s\n"
+ "fmla z21.s, p3/M, z2.s, z22.s\n"
+ "fmla z30.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z20.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ldr x25, [x17, #0x98]\n"
+ "fmla z29.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z0.s, z17.s\n"
+ "ldr x24, [x17, #0xa0]\n"
+ "fmla z28.s, p3/M, z7.s, z22.s\n"
+ "fmla z25.s, p3/M, z2.s, z20.s\n"
+ "fmla z24.s, p3/M, z3.s, z16.s\n"
+ "fmla z30.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "ldr x23, [x17, #0xb0]\n"
+ "fmla z29.s, p3/M, z8.s, z22.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ldr x22, [x17, #0xa8]\n"
+ "fmla z27.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "ldr x21, [x17, #0xc0]\n"
+ "fmla z28.s, p3/M, z5.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x9, x14, LSL #2]\n"
+ "ldr x20, [x17, #0xb8]\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "fmla z25.s, p3/M, z6.s, z17.s\n"
+ "fmla z21.s, p3/M, z4.s, z17.s\n"
+ "fmla z24.s, p3/M, z5.s, z17.s\n"
+ "fmla z23.s, p3/M, z3.s, z17.s\n"
+ "fmla z27.s, p3/M, z8.s, z17.s\n"
+ "fmla z29.s, p3/M, z3.s, z18.s\n"
+ "fmla z30.s, p3/M, z4.s, z18.s\n"
+ "fmla z25.s, p3/M, z8.s, z19.s\n"
+ "fmla z26.s, p3/M, z0.s, z18.s\n"
+ "fmla z21.s, p3/M, z6.s, z16.s\n"
+ "fmla z24.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x12, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x25, x14, LSL #2]\n"
+ "fmla z29.s, p3/M, z5.s, z16.s\n"
+ "fmla z28.s, p3/M, z4.s, z16.s\n"
+ "fmla z26.s, p3/M, z2.s, z16.s\n"
+ "fmla z25.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "fmla z21.s, p3/M, z8.s, z17.s\n"
+ "fmla z24.s, p3/M, z4.s, z19.s\n"
+ "fmla z23.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, x14, LSL #2]\n"
"fmla z27.s, p3/M, z7.s, z19.s\n"
- "fmla z22.s, p3/M, z1.s, z19.s\n"
- "fmla z23.s, p3/M, z3.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x22, x15, LSL #2]\n"
- "ldr x23, [x17, #0xa8]\n"
- "fmla z26.s, p3/M, z6.s, z16.s\n"
+ "fmla z30.s, p3/M, z2.s, z16.s\n"
+ "fmla z29.s, p3/M, z1.s, z16.s\n"
+ "fmla z26.s, p3/M, z6.s, z19.s\n"
+ "fmla z28.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "fmla z21.s, p3/M, z3.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x14, LSL #2]\n"
"fmla z25.s, p3/M, z7.s, z18.s\n"
- "ld1w { z19.s }, p2/Z, [x20, x15, LSL #2]\n"
- "ldr x22, [x17, #0xc0]\n"
- "fmla z24.s, p3/M, z6.s, z18.s\n"
- "fmla z21.s, p3/M, z4.s, z18.s\n"
- "fmla z29.s, p3/M, z3.s, z20.s\n"
- "fmla z27.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x12, x15, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "fmla z23.s, p3/M, z5.s, z18.s\n"
- "fmla z22.s, p3/M, z3.s, z18.s\n"
- "ldr x21, [x17, #0xb0]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "fmla z23.s, p3/M, z4.s, z18.s\n"
+ "fmax z29.s, p3/M, z29.s, z14.s\n"
+ "fmla z30.s, p3/M, z6.s, z16.s\n"
+ "fmla z24.s, p3/M, z0.s, z16.s\n"
"fmla z26.s, p3/M, z8.s, z18.s\n"
- "fmla z24.s, p3/M, z8.s, z17.s\n"
- "fmla z21.s, p3/M, z6.s, z16.s\n"
- "fmla z28.s, p3/M, z3.s, z19.s\n"
- "fmla z25.s, p3/M, z0.s, z19.s\n"
- "fmla z22.s, p3/M, z5.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x11, x15, LSL #2]\n"
- "fmla z23.s, p3/M, z7.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x26, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z4.s, z19.s\n"
- "fmla z26.s, p3/M, z1.s, z19.s\n"
- "fmla z28.s, p3/M, z5.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x24, x15, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z17.s\n"
- "fmla z25.s, p3/M, z2.s, z17.s\n"
- "fmla z24.s, p3/M, z1.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z18.s\n"
- "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z22.s, p3/M, z7.s, z18.s\n"
- "ld1w { z18.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z2.s, z17.s\n"
- "fmla z26.s, p3/M, z7.s, z16.s\n"
- "fmla z25.s, p3/M, z6.s, z16.s\n"
- "fmla z23.s, p3/M, z4.s, z16.s\n"
- "fmla z21.s, p3/M, z3.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z18.s\n"
- "fmla z28.s, p3/M, z1.s, z17.s\n"
- "fmax z28.s, p3/M, z28.s, z31.s\n"
- "fmin z28.s, p3/M, z28.s, z30.s\n"
- "fmla z27.s, p3/M, z0.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x15, LSL #2]\n"
- "fmla z29.s, p3/M, z6.s, z16.s\n"
- "fmax z29.s, p3/M, z29.s, z31.s\n"
- "fmla z24.s, p3/M, z7.s, z18.s\n"
+ "fmla z27.s, p3/M, z3.s, z16.s\n"
+ "fmla z28.s, p3/M, z8.s, z17.s\n"
"fmla z21.s, p3/M, z5.s, z18.s\n"
- "fmin z29.s, p3/M, z29.s, z30.s\n"
- "st1w { z29.s }, p0, [x10, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z0.s, z16.s\n"
- "fmla z22.s, p3/M, z2.s, z17.s\n"
- "ldr x20, [x13, #0x20]\n"
- "st1w { z28.s }, p0, [x9, x14, LSL #2]\n"
- "fmla z25.s, p3/M, z8.s, z18.s\n"
- "fmla z26.s, p3/M, z3.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x15, LSL #2]\n"
- "fmax z26.s, p3/M, z26.s, z31.s\n"
- "fmla z27.s, p3/M, z8.s, z17.s\n"
- "fmla z24.s, p3/M, z5.s, z17.s\n"
- "fmax z27.s, p3/M, z27.s, z31.s\n"
- "fmax z25.s, p3/M, z25.s, z31.s\n"
- "fmla z23.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z17.s\n"
+ "fmin z29.s, p3/M, z29.s, z31.s\n"
+ "fmla z23.s, p3/M, z2.s, z17.s\n"
+ "fmax z30.s, p3/M, z30.s, z14.s\n"
+ "fmax z26.s, p3/M, z26.s, z14.s\n"
+ "fmla z24.s, p3/M, z8.s, z16.s\n"
+ "fmax z28.s, p3/M, z28.s, z14.s\n"
+ "fmax z27.s, p3/M, z27.s, z14.s\n"
+ "st1w { z29.s }, p0, [x10, x13, LSL #2]\n"
+ "ldr x23, [x16, #0x28]\n"
"fmla z21.s, p3/M, z7.s, z16.s\n"
- "fmin z27.s, p3/M, z27.s, z30.s\n"
- "fmin z26.s, p3/M, z26.s, z30.s\n"
- "fmla z22.s, p3/M, z6.s, z16.s\n"
- "fmin z25.s, p3/M, z25.s, z30.s\n"
- "fmax z24.s, p3/M, z24.s, z31.s\n"
- "st1w { z27.s }, p0, [x28, x14, LSL #2]\n"
- "fmax z23.s, p3/M, z23.s, z31.s\n"
- "fmax z21.s, p3/M, z21.s, z31.s\n"
- "st1w { z26.s }, p0, [x27, x14, LSL #2]\n"
- "ldr x23, [x13, #0x28]\n"
- "fmax z22.s, p3/M, z22.s, z31.s\n"
- "st1w { z25.s }, p0, [x20, x14, LSL #2]\n"
- "ldr x22, [x13, #0x30]\n"
- "ldr x21, [x13, #0x38]\n"
- "ldr x20, [x13, #0x40]\n"
- "fmin z24.s, p3/M, z24.s, z30.s\n"
- "fmin z23.s, p3/M, z23.s, z30.s\n"
- "st1w { z24.s }, p0, [x23, x14, LSL #2]\n"
- "fmin z21.s, p3/M, z21.s, z30.s\n"
- "fmin z22.s, p3/M, z22.s, z30.s\n"
- "st1w { z23.s }, p0, [x22, x14, LSL #2]\n"
- "st1w { z21.s }, p0, [x21, x14, LSL #2]\n"
- "st1w { z22.s }, p0, [x20, x14, LSL #2]\n"
+ "fmin z30.s, p3/M, z30.s, z31.s\n"
+ "fmin z26.s, p3/M, z26.s, z31.s\n"
+ "fmax z25.s, p3/M, z25.s, z14.s\n"
+ "fmla z23.s, p3/M, z6.s, z16.s\n"
+ "fmin z28.s, p3/M, z28.s, z31.s\n"
+ "fmin z27.s, p3/M, z27.s, z31.s\n"
+ "fmax z24.s, p3/M, z24.s, z14.s\n"
+ "st1w { z30.s }, p0, [x11, x13, LSL #2]\n"
+ "ldr x20, [x16, #0x20]\n"
+ "fmax z21.s, p3/M, z21.s, z14.s\n"
+ "st1w { z28.s }, p0, [x28, x13, LSL #2]\n"
+ "ldr x22, [x16, #0x30]\n"
+ "fmin z25.s, p3/M, z25.s, z31.s\n"
+ "fmax z23.s, p3/M, z23.s, z14.s\n"
+ "st1w { z27.s }, p0, [x27, x13, LSL #2]\n"
+ "ldr x21, [x16, #0x38]\n"
+ "st1w { z26.s }, p0, [x20, x13, LSL #2]\n"
+ "ldr x20, [x16, #0x40]\n"
+ "fmin z24.s, p3/M, z24.s, z31.s\n"
+ "fmin z21.s, p3/M, z21.s, z31.s\n"
+ "st1w { z25.s }, p0, [x23, x13, LSL #2]\n"
+ "fmin z23.s, p3/M, z23.s, z31.s\n"
+ "st1w { z24.s }, p0, [x22, x13, LSL #2]\n"
+ "st1w { z21.s }, p0, [x21, x13, LSL #2]\n"
+ "st1w { z23.s }, p0, [x20, x13, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
index 35445595f8..43d5b16dfb 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,565 +88,565 @@ void sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x16, #0x0\n"
- "mov x4, #0x0\n"
+ "mov x1, #0x0\n"
+ "mov x2, #0x0\n"
"1:" // Tile loop
- "str x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x1, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x20, #0x4\n"
"mov x25, #0x4\n"
- "mov x24, #0x4\n"
- "str x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x22, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "mul x21, x16, x23\n" // offset = tile_i * ld_input_row
- "ldr x5, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "ldr x6, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "mul x20, x16, x22\n" // offset = tile_i * ld_output_row
- "add x7, x5, x5\n"
- "madd x21, x4, x5, x21\n" // offset += tile_j * ld_input_col
- "ldr x8, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
- "cntw x16\n"
- "madd x20, x4, x6, x20\n" // offset += tile_j * ld_output_col
- "ldr x15, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x14, x7, x5\n"
+ "str x2, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "cntw x3\n"
+ "ldr x4, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "ldr x5, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "mul x21, x21, x25\n" // offset *= kernel_stride * output_size
- "add x8, x8, x21, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x13, x8, x23, LSL #2\n"
- "ld1w { z19.s }, p3/Z, [x17]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "add x12, x13, x23, LSL #2\n"
- "add x15, x15, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "add x11, x12, x23, LSL #2\n"
- "add x10, x14, x5\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "add x9, x15, x22, LSL #2\n"
- "add x28, x11, x23, LSL #2\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "add x27, x10, x5\n"
- "add x26, x9, x22, LSL #2\n"
- "add x25, x6, x6\n"
- "ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "cmp x16, %x[n_channels]\n"
- "add x24, x28, x23, LSL #2\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "add x23, x26, x22, LSL #2\n"
- "add x22, x25, x6\n"
- "ld1w { z9.s }, p2/Z, [x12, x7, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x8]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x16\n"
- "ld1w { z11.s }, p2/Z, [x8, x27, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x12, x14, LSL #2]\n"
- "addvl x17, x17, #-6\n"
+ "mov x6, #0x0\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x22, x1, x24\n" // offset = tile_i * ld_input_row
+ "mul x21, x1, x23\n" // offset = tile_i * ld_output_row
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "cmp x3, %x[n_channels]\n"
+ "ld1rw { z27.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "add x16, x4, x4\n"
+ "add x15, x5, x5\n"
+ "ld1rw { z29.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "madd x22, x2, x4, x22\n" // offset += tile_j * ld_input_col
+ "add x14, x16, x4\n"
+ "ld1w { z13.s }, p3/Z, [x8]\n"
+ "ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "add x13, x15, x5\n"
+ "madd x21, x2, x5, x21\n" // offset += tile_j * ld_output_col
+ "ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "add x12, x14, x4\n"
+ "mul x22, x22, x20\n" // offset *= kernel_stride * output_size
+ "ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "add x11, x12, x4\n"
+ "ld1w { z5.s }, p3/Z, [x8, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x8, #7, MUL VL]\n"
+ "addvl x8, x8, #16\n"
+ "sub x20, XZR, x3\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "add x7, x7, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x10, x7, x24, LSL #2\n"
+ "add x9, x10, x24, LSL #2\n"
+ "ld1w { z10.s }, p2/Z, [x7]\n"
+ "ld1w { z11.s }, p2/Z, [x7, x11, LSL #2]\n"
+ "add x28, x9, x24, LSL #2\n"
+ "add x27, x28, x24, LSL #2\n"
+ "ld1w { z7.s }, p3/Z, [x8, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x8, #-7, MUL VL]\n"
+ "addvl x8, x8, #-6\n"
+ "add x17, x17, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x26, x27, x24, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x9, x16, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x9, x14, LSL #2]\n"
+ "add x25, x17, x23, LSL #2\n"
+ "add x24, x25, x23, LSL #2\n"
+ "add x23, x24, x23, LSL #2\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z14, z19\n fmla z14.s, p3/M, z4.s, z9.s\n"
- "movprfx z31, z19\n fmla z31.s, p3/M, z8.s, z9.s\n"
- "whilelt p1.s, x16, %x[n_channels]\n"
- "incw x21\n"
- "movprfx z21, z19\n fmla z21.s, p3/M, z3.s, z9.s\n"
- "movprfx z22, z19\n fmla z22.s, p3/M, z1.s, z9.s\n"
- "incw x16\n"
+ "movprfx z14, z13\n fmla z14.s, p3/M, z4.s, z9.s\n"
+ "movprfx z19, z13\n fmla z19.s, p3/M, z8.s, z9.s\n"
+ "whilelt p1.s, x3, %x[n_channels]\n"
+ "incw x6\n"
+ "movprfx z18, z13\n fmla z18.s, p3/M, z3.s, z9.s\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z1.s, z9.s\n"
+ "incw x3\n"
"mov p0.b, p2.b\n"
- "movprfx z20, z19\n fmla z20.s, p3/M, z0.s, z9.s\n"
- "fmla z14.s, p3/M, z5.s, z12.s\n"
+ "movprfx z15, z13\n fmla z15.s, p3/M, z0.s, z9.s\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z7.s, z9.s\n"
"incw x20\n"
- "movprfx z13, z19\n fmla z13.s, p3/M, z7.s, z9.s\n"
- "movprfx z17, z19\n fmla z17.s, p3/M, z6.s, z9.s\n"
- "movprfx z27, z19\n fmla z27.s, p3/M, z5.s, z9.s\n"
- "movprfx z18, z19\n fmla z18.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x11, x7, LSL #2]\n"
- "fmla z31.s, p3/M, z0.s, z10.s\n"
- "movprfx z30, z19\n fmla z30.s, p3/M, z2.s, z11.s\n"
- "ld1w { z29.s }, p2/Z, [x24]\n"
- "ld1w { z11.s }, p2/Z, [x24, x27, LSL #2]\n"
- "fmla z21.s, p3/M, z4.s, z12.s\n"
- "fmla z22.s, p3/M, z2.s, z12.s\n"
- "fmla z20.s, p3/M, z1.s, z12.s\n"
- "movprfx z23, z19\n fmla z23.s, p3/M, z6.s, z29.s\n"
- "ld1w { z10.s }, p2/Z, [x11, x14, LSL #2]\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z6.s, z9.s\n"
+ "movprfx z21, z13\n fmla z21.s, p3/M, z5.s, z9.s\n"
+ "fmla z14.s, p3/M, z5.s, z12.s\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x28, x16, LSL #2]\n"
+ "fmla z19.s, p3/M, z0.s, z10.s\n"
+ "movprfx z22, z13\n fmla z22.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x11, LSL #2]\n"
+ "fmla z18.s, p3/M, z4.s, z12.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
+ "fmla z15.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z8.s, z12.s\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z6.s, z11.s\n"
"fmla z14.s, p3/M, z7.s, z9.s\n"
- "fmla z13.s, p3/M, z8.s, z12.s\n"
- "fmla z17.s, p3/M, z7.s, z12.s\n"
- "fmla z30.s, p3/M, z6.s, z12.s\n"
- "movprfx z26, z19\n fmla z26.s, p3/M, z3.s, z12.s\n"
- "movprfx z28, z19\n fmla z28.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x8, x5, LSL #2]\n"
- "movprfx z24, z19\n fmla z24.s, p3/M, z8.s, z11.s\n"
- "fmla z21.s, p3/M, z6.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x8, x10, LSL #2]\n"
- "fmla z22.s, p3/M, z4.s, z9.s\n"
- "fmla z20.s, p3/M, z3.s, z9.s\n"
- "movprfx z25, z19\n fmla z25.s, p3/M, z1.s, z9.s\n"
- "movprfx z29, z19\n fmla z29.s, p3/M, z0.s, z9.s\n"
- "ld1w { z19.s }, p3/Z, [x17]\n"
- "fmla z27.s, p3/M, z8.s, z9.s\n"
+ "ld1w { z11.s }, p2/Z, [x28, x14, LSL #2]\n"
+ "fmla z28.s, p3/M, z7.s, z12.s\n"
+ "fmla z22.s, p3/M, z6.s, z12.s\n"
+ "movprfx z31, z13\n fmla z31.s, p3/M, z3.s, z12.s\n"
+ "movprfx z17, z13\n fmla z17.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x7, x4, LSL #2]\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z8.s, z10.s\n"
+ "fmla z18.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z10.s }, p2/Z, [x7, x12, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z9.s\n"
+ "fmla z15.s, p3/M, z3.s, z9.s\n"
+ "movprfx z16, z13\n fmla z16.s, p3/M, z1.s, z9.s\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z13.s }, p3/Z, [x8]\n"
+ "fmla z21.s, p3/M, z8.s, z9.s\n"
+ "fmla z24.s, p3/M, z5.s, z9.s\n"
+ "fmla z25.s, p3/M, z2.s, z9.s\n"
+ "fmla z14.s, p3/M, z8.s, z11.s\n"
+ "ld1w { z9.s }, p2/Z, [x10]\n"
+ "fmla z19.s, p3/M, z1.s, z12.s\n"
+ "fmla z30.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x11, LSL #2]\n"
+ "fmla z28.s, p3/M, z2.s, z10.s\n"
+ "fmla z22.s, p3/M, z1.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x27]\n"
+ "fmla z18.s, p3/M, z7.s, z11.s\n"
+ "fmla z31.s, p3/M, z6.s, z11.s\n"
+ "fmla z26.s, p3/M, z5.s, z11.s\n"
+ "fmla z15.s, p3/M, z4.s, z11.s\n"
+ "fmla z17.s, p3/M, z3.s, z11.s\n"
+ "fmla z16.s, p3/M, z2.s, z11.s\n"
+ "fmla z23.s, p3/M, z1.s, z11.s\n"
+ "fmla z20.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x10, x16, LSL #2]\n"
+ "fmla z21.s, p3/M, z0.s, z9.s\n"
+ "fmla z24.s, p3/M, z6.s, z10.s\n"
+ "fmla z25.s, p3/M, z3.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x27, x11, LSL #2]\n"
+ "fmla z19.s, p3/M, z3.s, z9.s\n"
+ "fmla z14.s, p3/M, z1.s, z11.s\n"
+ "fmla z22.s, p3/M, z5.s, z12.s\n"
+ "fmla z31.s, p3/M, z2.s, z12.s\n"
+ "fmla z30.s, p3/M, z4.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x14, LSL #2]\n"
+ "fmla z28.s, p3/M, z3.s, z11.s\n"
+ "fmla z18.s, p3/M, z0.s, z11.s\n"
+ "fmla z17.s, p3/M, z8.s, z10.s\n"
+ "fmla z20.s, p3/M, z5.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x4, LSL #2]\n"
+ "fmla z21.s, p3/M, z2.s, z11.s\n"
+ "fmla z14.s, p3/M, z2.s, z12.s\n"
+ "fmla z19.s, p3/M, z5.s, z11.s\n"
+ "fmla z30.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x4, LSL #2]\n"
+ "fmla z28.s, p3/M, z4.s, z12.s\n"
+ "fmla z22.s, p3/M, z3.s, z12.s\n"
+ "fmla z18.s, p3/M, z1.s, z12.s\n"
+ "fmla z31.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x9, x12, LSL #2]\n"
+ "fmla z25.s, p3/M, z7.s, z10.s\n"
+ "fmla z16.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x12, LSL #2]\n"
+ "fmla z21.s, p3/M, z4.s, z11.s\n"
+ "fmla z14.s, p3/M, z3.s, z11.s\n"
+ "fmla z24.s, p3/M, z1.s, z11.s\n"
+ "fmla z26.s, p3/M, z0.s, z11.s\n"
+ "fmla z19.s, p3/M, z7.s, z11.s\n"
+ "fmla z30.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x7, x16, LSL #2]\n"
+ "fmla z23.s, p3/M, z8.s, z12.s\n"
+ "fmla z20.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x4, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z9.s\n"
+ "fmla z22.s, p3/M, z7.s, z9.s\n"
"fmla z18.s, p3/M, z5.s, z9.s\n"
- "fmla z23.s, p3/M, z2.s, z9.s\n"
- "fmla z14.s, p3/M, z8.s, z10.s\n"
- "ld1w { z9.s }, p2/Z, [x13]\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "fmla z13.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x27, LSL #2]\n"
- "fmla z17.s, p3/M, z2.s, z11.s\n"
- "fmla z30.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28]\n"
+ "fmla z31.s, p3/M, z4.s, z9.s\n"
+ "fmla z15.s, p3/M, z2.s, z9.s\n"
+ "fmla z17.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x7, x14, LSL #2]\n"
+ "addvl x7, x7, #1\n"
"fmla z21.s, p3/M, z7.s, z10.s\n"
- "fmla z26.s, p3/M, z6.s, z10.s\n"
- "fmla z22.s, p3/M, z5.s, z10.s\n"
- "fmla z20.s, p3/M, z4.s, z10.s\n"
- "fmla z28.s, p3/M, z3.s, z10.s\n"
- "fmla z25.s, p3/M, z2.s, z10.s\n"
- "fmla z29.s, p3/M, z1.s, z10.s\n"
+ "fmla z14.s, p3/M, z6.s, z10.s\n"
+ "fmla z24.s, p3/M, z4.s, z10.s\n"
+ "fmla z26.s, p3/M, z3.s, z10.s\n"
+ "fmla z25.s, p3/M, z1.s, z10.s\n"
+ "fmla z16.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z12.s }, p2/Z, [x28, x12, LSL #2]\n"
+ "fmla z19.s, p3/M, z2.s, z11.s\n"
+ "fmla z30.s, p3/M, z1.s, z11.s\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z10.s }, p2/Z, [x9]\n"
+ "fmla z22.s, p3/M, z0.s, z9.s\n"
+ "fmla z23.s, p3/M, z2.s, z12.s\n"
+ "fmla z18.s, p3/M, z8.s, z12.s\n"
+ "fmla z31.s, p3/M, z7.s, z12.s\n"
+ "fmla z15.s, p3/M, z5.s, z12.s\n"
+ "fmla z21.s, p3/M, z3.s, z10.s\n"
"fmla z24.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x7, LSL #2]\n"
- "fmla z27.s, p3/M, z0.s, z9.s\n"
- "fmla z18.s, p3/M, z6.s, z11.s\n"
- "fmla z23.s, p3/M, z3.s, z11.s\n"
- "fmla z14.s, p3/M, z1.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z31.s, p3/M, z3.s, z9.s\n"
- "fmla z30.s, p3/M, z5.s, z12.s\n"
- "fmla z26.s, p3/M, z2.s, z12.s\n"
- "fmla z13.s, p3/M, z4.s, z10.s\n"
- "ld1w { z9.s }, p2/Z, [x13, x14, LSL #2]\n"
- "fmla z17.s, p3/M, z3.s, z10.s\n"
- "fmla z21.s, p3/M, z0.s, z10.s\n"
- "fmla z28.s, p3/M, z8.s, z11.s\n"
- "fmla z24.s, p3/M, z5.s, z11.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x5, LSL #2]\n"
- "fmla z27.s, p3/M, z2.s, z10.s\n"
- "fmla z14.s, p3/M, z2.s, z9.s\n"
- "fmla z31.s, p3/M, z5.s, z10.s\n"
- "fmla z13.s, p3/M, z5.s, z9.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x5, LSL #2]\n"
- "fmla z17.s, p3/M, z4.s, z9.s\n"
- "fmla z30.s, p3/M, z3.s, z9.s\n"
- "fmla z21.s, p3/M, z1.s, z9.s\n"
- "fmla z26.s, p3/M, z0.s, z9.s\n"
- "ld1w { z10.s }, p2/Z, [x12, x10, LSL #2]\n"
- "fmla z23.s, p3/M, z7.s, z12.s\n"
- "fmla z25.s, p3/M, z6.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x10, LSL #2]\n"
- "fmla z27.s, p3/M, z4.s, z11.s\n"
- "fmla z14.s, p3/M, z3.s, z11.s\n"
- "fmla z18.s, p3/M, z1.s, z11.s\n"
- "fmla z22.s, p3/M, z0.s, z11.s\n"
- "fmla z31.s, p3/M, z7.s, z11.s\n"
- "fmla z13.s, p3/M, z6.s, z11.s\n"
- "ld1w { z9.s }, p2/Z, [x8, x7, LSL #2]\n"
- "fmla z29.s, p3/M, z8.s, z12.s\n"
- "fmla z24.s, p3/M, z7.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x11, x5, LSL #2]\n"
- "fmla z17.s, p3/M, z8.s, z10.s\n"
- "fmla z30.s, p3/M, z7.s, z10.s\n"
- "fmla z21.s, p3/M, z5.s, z10.s\n"
- "fmla z26.s, p3/M, z4.s, z10.s\n"
- "fmla z20.s, p3/M, z2.s, z10.s\n"
- "fmla z28.s, p3/M, z1.s, z10.s\n"
- "ld1w { z11.s }, p2/Z, [x8, x14, LSL #2]\n"
- "addvl x8, x8, #1\n"
- "fmla z27.s, p3/M, z7.s, z12.s\n"
- "fmla z14.s, p3/M, z6.s, z12.s\n"
- "fmla z18.s, p3/M, z4.s, z12.s\n"
- "fmla z22.s, p3/M, z3.s, z12.s\n"
- "fmla z23.s, p3/M, z1.s, z12.s\n"
- "fmla z25.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x11, x10, LSL #2]\n"
- "fmla z31.s, p3/M, z2.s, z9.s\n"
- "fmla z13.s, p3/M, z1.s, z9.s\n"
- "fmla z17.s, p3/M, z0.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x12]\n"
- "fmla z29.s, p3/M, z2.s, z12.s\n"
- "fmla z30.s, p3/M, z0.s, z11.s\n"
- "fmla z27.s, p3/M, z3.s, z9.s\n"
- "fmla z18.s, p3/M, z0.s, z9.s\n"
- "fmla z21.s, p3/M, z8.s, z12.s\n"
+ "fmla z17.s, p3/M, z4.s, z12.s\n"
+ "fmla z20.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z9.s\n"
+ "fmla z28.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z11.s }, p2/Z, [x9, x11, LSL #2]\n"
+ "addvl x9, x9, #1\n"
+ "fmla z19.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x28]\n"
+ "fmla z16.s, p3/M, z4.s, z12.s\n"
+ "fmla z23.s, p3/M, z3.s, z12.s\n"
"fmla z26.s, p3/M, z7.s, z12.s\n"
- "fmla z20.s, p3/M, z5.s, z12.s\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
- "fmla z24.s, p3/M, z1.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x28, x7, LSL #2]\n"
- "fmla z13.s, p3/M, z2.s, z11.s\n"
- "fmla z17.s, p3/M, z1.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x12, x27, LSL #2]\n"
- "addvl x12, x12, #1\n"
- "fmla z31.s, p3/M, z6.s, z9.s\n"
- "ld1w { z12.s }, p2/Z, [x11]\n"
- "fmla z25.s, p3/M, z4.s, z10.s\n"
- "ld1w { z9.s }, p1/Z, [x12, x7, LSL #2]\n"
- "fmla z29.s, p3/M, z3.s, z10.s\n"
- "fmla z30.s, p3/M, z8.s, z11.s\n"
- "fmla z26.s, p3/M, z5.s, z11.s\n"
- "fmla z28.s, p3/M, z2.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x11, x27, LSL #2]\n"
- "addvl x11, x11, #1\n"
- "fmla z27.s, p3/M, z6.s, z12.s\n"
- "fmla z18.s, p3/M, z3.s, z12.s\n"
- "fmla z23.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x7, LSL #2]\n"
- "fmla z24.s, p3/M, z2.s, z11.s\n"
- "fmla z25.s, p3/M, z7.s, z12.s\n"
- "fmla z29.s, p3/M, z6.s, z12.s\n"
- "fmla z18.s, p3/M, z8.s, z10.s\n"
- "fmla z22.s, p3/M, z7.s, z10.s\n"
- "fmla z20.s, p3/M, z6.s, z10.s\n"
- "fmla z23.s, p3/M, z5.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x28, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z5.s, z11.s\n"
- "fmla z25.s, p3/M, z5.s, z10.s\n"
- "fmla z29.s, p3/M, z4.s, z10.s\n"
- "fmla z24.s, p3/M, z3.s, z10.s\n"
- "fmla z26.s, p3/M, z8.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z23.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x13, x5, LSL #2]\n"
- "fmla z22.s, p3/M, z8.s, z10.s\n"
- "fmla z20.s, p3/M, z7.s, z10.s\n"
- "addvl x24, x24, #1\n"
- "fmla z28.s, p3/M, z6.s, z10.s\n"
- "fmla z25.s, p3/M, z8.s, z11.s\n"
- "ld1w { z10.s }, p2/Z, [x13, x10, LSL #2]\n"
- "addvl x13, x13, #1\n"
- "fmla z29.s, p3/M, z7.s, z11.s\n"
- "fmla z24.s, p3/M, z6.s, z11.s\n"
- "ld1w { z11.s }, p2/Z, [x28, x5, LSL #2]\n"
- "fmla z31.s, p3/M, z4.s, z12.s\n"
- "fmla z13.s, p3/M, z3.s, z12.s\n"
- "fmax z31.s, p3/M, z31.s, z15.s\n"
- "fmax z13.s, p3/M, z13.s, z15.s\n"
- "fmla z27.s, p3/M, z1.s, z12.s\n"
- "fmla z14.s, p3/M, z0.s, z12.s\n"
- "ld1w { z0.s }, p2/Z, [x28, x10, LSL #2]\n"
- "fmax z27.s, p3/M, z27.s, z15.s\n"
- "fmla z17.s, p3/M, z5.s, z10.s\n"
- "fmla z30.s, p3/M, z4.s, z10.s\n"
- "fmax z17.s, p3/M, z17.s, z15.s\n"
- "fmax z30.s, p3/M, z30.s, z15.s\n"
- "fmla z21.s, p3/M, z2.s, z10.s\n"
- "fmla z26.s, p3/M, z1.s, z10.s\n"
- "fmax z14.s, p3/M, z14.s, z15.s\n"
- "fmax z21.s, p3/M, z21.s, z15.s\n"
- "fmla z18.s, p3/M, z7.s, z11.s\n"
- "fmla z22.s, p3/M, z6.s, z11.s\n"
- "fmax z26.s, p3/M, z26.s, z15.s\n"
- "fmax z18.s, p3/M, z18.s, z15.s\n"
- "fmla z23.s, p3/M, z4.s, z11.s\n"
- "fmla z25.s, p3/M, z3.s, z11.s\n"
- "fmax z22.s, p3/M, z22.s, z15.s\n"
- "fmax z23.s, p3/M, z23.s, z15.s\n"
- "fmla z20.s, p3/M, z8.s, z0.s\n"
- "fmla z28.s, p3/M, z7.s, z0.s\n"
- "fmax z20.s, p3/M, z20.s, z15.s\n"
- "fmax z28.s, p3/M, z28.s, z15.s\n"
- "fmla z29.s, p3/M, z5.s, z0.s\n"
- "fmla z24.s, p3/M, z4.s, z0.s\n"
- "fmax z25.s, p3/M, z25.s, z15.s\n"
- "fmax z29.s, p3/M, z29.s, z15.s\n"
- "fmax z24.s, p3/M, z24.s, z15.s\n"
- "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
- "cmp x16, %x[n_channels]\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
- "fmin z13.s, p3/M, z13.s, z16.s\n"
- "fmin z17.s, p3/M, z17.s, z16.s\n"
- "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
- "addvl x17, x17, #16\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "ld1w { z10.s }, p1/Z, [x8]\n"
- "fmin z27.s, p3/M, z27.s, z16.s\n"
- "fmin z14.s, p3/M, z14.s, z16.s\n"
- "ld1w { z11.s }, p1/Z, [x8, x27, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x12, x14, LSL #2]\n"
- "fmin z21.s, p3/M, z21.s, z16.s\n"
- "fmin z26.s, p3/M, z26.s, z16.s\n"
- "st1w { z31.s }, p0, [x15]\n"
- "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
- "fmin z18.s, p3/M, z18.s, z16.s\n"
- "fmin z22.s, p3/M, z22.s, z16.s\n"
- "st1w { z13.s }, p0, [x15, x6, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
- "fmin z20.s, p3/M, z20.s, z16.s\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "st1w { z17.s }, p0, [x15, x25, LSL #2]\n"
- "fmin z23.s, p3/M, z23.s, z16.s\n"
- "fmin z25.s, p3/M, z25.s, z16.s\n"
- "st1w { z30.s }, p0, [x15, x22, LSL #2]\n"
- "fmin z29.s, p3/M, z29.s, z16.s\n"
- "fmin z24.s, p3/M, z24.s, z16.s\n"
- "st1w { z27.s }, p0, [x9]\n"
+ "fmla z22.s, p3/M, z8.s, z11.s\n"
+ "fmla z31.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z9.s }, p1/Z, [x9, x16, LSL #2]\n"
+ "fmla z17.s, p3/M, z2.s, z11.s\n"
+ "fmla z21.s, p3/M, z6.s, z10.s\n"
+ "ld1w { z11.s }, p2/Z, [x28, x11, LSL #2]\n"
"addvl x28, x28, #1\n"
- "st1w { z14.s }, p0, [x9, x6, LSL #2]\n"
- "addvl x15, x15, #1\n"
- "st1w { z21.s }, p0, [x9, x25, LSL #2]\n"
- "addvl x17, x17, #-6\n"
- "st1w { z26.s }, p0, [x9, x22, LSL #2]\n"
- "addvl x9, x9, #1\n"
- "st1w { z18.s }, p0, [x26]\n"
- "st1w { z22.s }, p0, [x26, x6, LSL #2]\n"
- "st1w { z20.s }, p0, [x26, x25, LSL #2]\n"
- "st1w { z28.s }, p0, [x26, x22, LSL #2]\n"
+ "fmla z24.s, p3/M, z3.s, z10.s\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x16, LSL #2]\n"
+ "fmla z15.s, p3/M, z6.s, z12.s\n"
+ "fmla z20.s, p3/M, z2.s, z11.s\n"
+ "fmla z31.s, p3/M, z8.s, z11.s\n"
+ "fmla z16.s, p3/M, z7.s, z10.s\n"
+ "fmla z23.s, p3/M, z6.s, z10.s\n"
+ "fmla z17.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x14, LSL #2]\n"
"addvl x26, x26, #1\n"
- "st1w { z23.s }, p0, [x23]\n"
- "st1w { z25.s }, p0, [x23, x6, LSL #2]\n"
- "st1w { z29.s }, p0, [x23, x25, LSL #2]\n"
- "st1w { z24.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z12.s\n"
+ "fmla z25.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x14, LSL #2]\n"
+ "fmla z16.s, p3/M, z5.s, z12.s\n"
+ "fmla z23.s, p3/M, z4.s, z12.s\n"
+ "fmla z20.s, p3/M, z3.s, z12.s\n"
+ "fmla z26.s, p3/M, z8.s, z12.s\n"
+ "fmla z15.s, p3/M, z7.s, z12.s\n"
+ "fmla z17.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x10, x12, LSL #2]\n"
+ "fmla z25.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x10, x4, LSL #2]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z16.s, p3/M, z8.s, z11.s\n"
+ "fmla z23.s, p3/M, z7.s, z11.s\n"
+ "fmla z20.s, p3/M, z6.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x4, LSL #2]\n"
+ "fmla z28.s, p3/M, z5.s, z12.s\n"
+ "fmla z22.s, p3/M, z4.s, z12.s\n"
+ "fmla z19.s, p3/M, z4.s, z10.s\n"
+ "fmla z30.s, p3/M, z3.s, z10.s\n"
+ "fmla z21.s, p3/M, z1.s, z10.s\n"
+ "fmla z14.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z10.s }, p2/Z, [x27, x12, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x8, #1, MUL VL]\n"
+ "fmla z18.s, p3/M, z2.s, z12.s\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x8, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x8, #3, MUL VL]\n"
+ "fmla z24.s, p3/M, z7.s, z11.s\n"
+ "fmla z26.s, p3/M, z6.s, z11.s\n"
+ "fmax z28.s, p3/M, z28.s, z27.s\n"
+ "fmax z22.s, p3/M, z22.s, z27.s\n"
+ "fmla z25.s, p3/M, z4.s, z11.s\n"
+ "fmla z16.s, p3/M, z3.s, z11.s\n"
+ "fmax z19.s, p3/M, z19.s, z27.s\n"
+ "fmax z30.s, p3/M, z30.s, z27.s\n"
+ "fmla z15.s, p3/M, z8.s, z10.s\n"
+ "fmla z17.s, p3/M, z7.s, z10.s\n"
+ "fmax z21.s, p3/M, z21.s, z27.s\n"
+ "fmax z14.s, p3/M, z14.s, z27.s\n"
+ "fmla z23.s, p3/M, z5.s, z10.s\n"
+ "fmla z20.s, p3/M, z4.s, z10.s\n"
+ "fmax z18.s, p3/M, z18.s, z27.s\n"
+ "fmax z31.s, p3/M, z31.s, z27.s\n"
+ "fmax z24.s, p3/M, z24.s, z27.s\n"
+ "fmax z26.s, p3/M, z26.s, z27.s\n"
+ "ld1w { z3.s }, p3/Z, [x8, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x8, #5, MUL VL]\n"
+ "fmax z25.s, p3/M, z25.s, z27.s\n"
+ "fmax z16.s, p3/M, z16.s, z27.s\n"
+ "ld1w { z5.s }, p3/Z, [x8, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x8, #7, MUL VL]\n"
+ "fmax z15.s, p3/M, z15.s, z27.s\n"
+ "fmax z17.s, p3/M, z17.s, z27.s\n"
+ "ld1w { z10.s }, p1/Z, [x7]\n"
+ "ld1w { z11.s }, p1/Z, [x7, x11, LSL #2]\n"
+ "fmax z23.s, p3/M, z23.s, z27.s\n"
+ "fmax z20.s, p3/M, z20.s, z27.s\n"
+ "ld1w { z12.s }, p1/Z, [x9, x14, LSL #2]\n"
+ "addvl x8, x8, #16\n"
+ "whilelt p2.s, x6, %x[n_channels]\n"
+ "cmp x3, %x[n_channels]\n"
+ "fmin z19.s, p3/M, z19.s, z29.s\n"
+ "fmin z30.s, p3/M, z30.s, z29.s\n"
+ "fmin z28.s, p3/M, z28.s, z29.s\n"
+ "fmin z22.s, p3/M, z22.s, z29.s\n"
+ "fmin z21.s, p3/M, z21.s, z29.s\n"
+ "ld1w { z7.s }, p3/Z, [x8, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x8, #-7, MUL VL]\n"
+ "fmin z14.s, p3/M, z14.s, z29.s\n"
+ "fmin z18.s, p3/M, z18.s, z29.s\n"
+ "st1w { z19.s }, p0, [x17]\n"
+ "fmin z31.s, p3/M, z31.s, z29.s\n"
+ "fmin z24.s, p3/M, z24.s, z29.s\n"
+ "st1w { z30.s }, p0, [x17, x5, LSL #2]\n"
+ "fmin z26.s, p3/M, z26.s, z29.s\n"
+ "fmin z15.s, p3/M, z15.s, z29.s\n"
+ "st1w { z28.s }, p0, [x17, x15, LSL #2]\n"
+ "fmin z17.s, p3/M, z17.s, z29.s\n"
+ "fmin z25.s, p3/M, z25.s, z29.s\n"
+ "st1w { z22.s }, p0, [x17, x13, LSL #2]\n"
+ "fmin z16.s, p3/M, z16.s, z29.s\n"
+ "fmin z23.s, p3/M, z23.s, z29.s\n"
+ "st1w { z21.s }, p0, [x25]\n"
+ "fmin z20.s, p3/M, z20.s, z29.s\n"
+ "addvl x27, x27, #1\n"
+ "st1w { z14.s }, p0, [x25, x5, LSL #2]\n"
+ "st1w { z18.s }, p0, [x25, x15, LSL #2]\n"
+ "addvl x17, x17, #1\n"
+ "addvl x8, x8, #-6\n"
+ "st1w { z31.s }, p0, [x25, x13, LSL #2]\n"
+ "addvl x25, x25, #1\n"
+ "st1w { z24.s }, p0, [x24]\n"
+ "st1w { z26.s }, p0, [x24, x5, LSL #2]\n"
+ "st1w { z15.s }, p0, [x24, x15, LSL #2]\n"
+ "st1w { z17.s }, p0, [x24, x13, LSL #2]\n"
+ "addvl x24, x24, #1\n"
+ "st1w { z25.s }, p0, [x23]\n"
+ "st1w { z16.s }, p0, [x23, x5, LSL #2]\n"
+ "st1w { z23.s }, p0, [x23, x15, LSL #2]\n"
+ "st1w { z20.s }, p0, [x23, x13, LSL #2]\n"
"addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z14, z19\n fmla z14.s, p3/M, z4.s, z9.s\n"
- "movprfx z31, z19\n fmla z31.s, p3/M, z8.s, z9.s\n"
- "ldr x4, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "movprfx z30, z19\n fmla z30.s, p3/M, z3.s, z9.s\n"
- "movprfx z13, z19\n fmla z13.s, p3/M, z1.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
- "add x4, x4, #0x1\n"
- "movprfx z20, z19\n fmla z20.s, p3/M, z0.s, z9.s\n"
- "fmla z14.s, p3/M, z5.s, z12.s\n"
- "cmp x4, x20\n"
- "add x21, x16, #0x1\n"
- "movprfx z18, z19\n fmla z18.s, p3/M, z7.s, z9.s\n"
- "movprfx z28, z19\n fmla z28.s, p3/M, z6.s, z9.s\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "csel x16, x16, x21, LT\n"
- "movprfx z17, z19\n fmla z17.s, p3/M, z5.s, z9.s\n"
- "movprfx z26, z19\n fmla z26.s, p3/M, z2.s, z9.s\n"
- "ld1w { z9.s }, p2/Z, [x11, x7, LSL #2]\n"
+ "movprfx z14, z13\n fmla z14.s, p3/M, z4.s, z9.s\n"
+ "movprfx z18, z13\n fmla z18.s, p3/M, z8.s, z9.s\n"
+ "ldr x2, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x1, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z23, z13\n fmla z23.s, p3/M, z3.s, z9.s\n"
+ "movprfx z30, z13\n fmla z30.s, p3/M, z1.s, z9.s\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "movprfx z20, z13\n fmla z20.s, p3/M, z0.s, z9.s\n"
+ "movprfx z25, z13\n fmla z25.s, p3/M, z7.s, z9.s\n"
"mov p0.b, p2.b\n"
- "fmla z31.s, p3/M, z0.s, z10.s\n"
- "movprfx z27, z19\n fmla z27.s, p3/M, z2.s, z11.s\n"
- "ld1w { z29.s }, p2/Z, [x24]\n"
- "ld1w { z21.s }, p2/Z, [x24, x27, LSL #2]\n"
- "fmla z30.s, p3/M, z4.s, z12.s\n"
- "fmla z13.s, p3/M, z2.s, z12.s\n"
- "csel x4, x4, XZR, LT\n"
- "cmp x16, x20\n"
+ "movprfx z19, z13\n fmla z19.s, p3/M, z6.s, z9.s\n"
+ "movprfx z26, z13\n fmla z26.s, p3/M, z5.s, z9.s\n"
+ "add x2, x2, #0x1\n"
+ "add x20, x1, #0x1\n"
+ "fmla z14.s, p3/M, z5.s, z12.s\n"
+ "movprfx z28, z13\n fmla z28.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z15.s }, p2/Z, [x28, x16, LSL #2]\n"
+ "cmp x2, x22\n"
+ "fmla z18.s, p3/M, z0.s, z10.s\n"
+ "movprfx z9, z13\n fmla z9.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x26]\n"
+ "ld1w { z24.s }, p2/Z, [x26, x11, LSL #2]\n"
+ "fmla z23.s, p3/M, z4.s, z12.s\n"
+ "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "csel x1, x1, x20, LT\n"
+ "csel x2, x2, XZR, LT\n"
"fmla z20.s, p3/M, z1.s, z12.s\n"
- "movprfx z10, z19\n fmla z10.s, p3/M, z6.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x11, x14, LSL #2]\n"
- "fmla z14.s, p3/M, z7.s, z9.s\n"
- "fmla z18.s, p3/M, z8.s, z12.s\n"
- "fmla z28.s, p3/M, z7.s, z12.s\n"
- "fmla z27.s, p3/M, z6.s, z12.s\n"
- "movprfx z11, z19\n fmla z11.s, p3/M, z3.s, z12.s\n"
- "movprfx z25, z19\n fmla z25.s, p3/M, z0.s, z12.s\n"
- "ld1w { z22.s }, p2/Z, [x8, x5, LSL #2]\n"
- "movprfx z24, z19\n fmla z24.s, p3/M, z8.s, z21.s\n"
- "fmla z30.s, p3/M, z6.s, z9.s\n"
- "ld1w { z21.s }, p2/Z, [x8, x10, LSL #2]\n"
- "fmla z13.s, p3/M, z4.s, z9.s\n"
- "fmla z20.s, p3/M, z3.s, z9.s\n"
- "movprfx z12, z19\n fmla z12.s, p3/M, z1.s, z9.s\n"
- "movprfx z23, z19\n fmla z23.s, p3/M, z0.s, z9.s\n"
- "fmla z17.s, p3/M, z8.s, z9.s\n"
- "fmla z26.s, p3/M, z5.s, z9.s\n"
- "fmla z10.s, p3/M, z2.s, z9.s\n"
- "fmla z14.s, p3/M, z8.s, z29.s\n"
- "ld1w { z9.s }, p2/Z, [x13]\n"
- "fmla z31.s, p3/M, z1.s, z22.s\n"
- "fmla z18.s, p3/M, z0.s, z22.s\n"
- "ld1w { z22.s }, p2/Z, [x13, x27, LSL #2]\n"
- "fmla z28.s, p3/M, z2.s, z21.s\n"
- "fmla z27.s, p3/M, z1.s, z21.s\n"
- "ld1w { z19.s }, p2/Z, [x28]\n"
- "fmla z30.s, p3/M, z7.s, z29.s\n"
- "fmla z11.s, p3/M, z6.s, z29.s\n"
- "fmla z13.s, p3/M, z5.s, z29.s\n"
- "fmla z20.s, p3/M, z4.s, z29.s\n"
- "fmla z25.s, p3/M, z3.s, z29.s\n"
- "fmla z12.s, p3/M, z2.s, z29.s\n"
- "fmla z23.s, p3/M, z1.s, z29.s\n"
- "fmla z24.s, p3/M, z0.s, z29.s\n"
- "ld1w { z21.s }, p2/Z, [x13, x7, LSL #2]\n"
- "fmla z17.s, p3/M, z0.s, z9.s\n"
- "fmla z26.s, p3/M, z6.s, z19.s\n"
- "fmla z10.s, p3/M, z3.s, z19.s\n"
- "fmla z14.s, p3/M, z1.s, z21.s\n"
- "ld1w { z19.s }, p2/Z, [x28, x27, LSL #2]\n"
- "fmla z31.s, p3/M, z3.s, z9.s\n"
- "fmla z27.s, p3/M, z5.s, z22.s\n"
- "fmla z11.s, p3/M, z2.s, z22.s\n"
- "fmla z18.s, p3/M, z4.s, z21.s\n"
- "ld1w { z29.s }, p2/Z, [x13, x14, LSL #2]\n"
- "fmla z28.s, p3/M, z3.s, z21.s\n"
- "fmla z30.s, p3/M, z0.s, z21.s\n"
- "fmla z25.s, p3/M, z8.s, z19.s\n"
- "fmla z24.s, p3/M, z5.s, z19.s\n"
- "ld1w { z19.s }, p2/Z, [x24, x5, LSL #2]\n"
- "fmla z17.s, p3/M, z2.s, z21.s\n"
- "fmla z14.s, p3/M, z2.s, z29.s\n"
- "fmla z31.s, p3/M, z5.s, z21.s\n"
- "fmla z18.s, p3/M, z5.s, z29.s\n"
- "ld1w { z22.s }, p2/Z, [x12, x5, LSL #2]\n"
- "fmla z28.s, p3/M, z4.s, z29.s\n"
- "fmla z27.s, p3/M, z3.s, z29.s\n"
- "fmla z30.s, p3/M, z1.s, z29.s\n"
- "fmla z11.s, p3/M, z0.s, z29.s\n"
- "ld1w { z21.s }, p2/Z, [x12, x10, LSL #2]\n"
- "fmla z10.s, p3/M, z7.s, z19.s\n"
- "fmla z12.s, p3/M, z6.s, z19.s\n"
- "ld1w { z19.s }, p2/Z, [x24, x10, LSL #2]\n"
- "fmla z17.s, p3/M, z4.s, z22.s\n"
- "fmla z14.s, p3/M, z3.s, z22.s\n"
- "fmla z26.s, p3/M, z1.s, z22.s\n"
- "fmla z13.s, p3/M, z0.s, z22.s\n"
- "fmla z31.s, p3/M, z7.s, z22.s\n"
- "fmla z18.s, p3/M, z6.s, z22.s\n"
- "ld1w { z29.s }, p2/Z, [x8, x7, LSL #2]\n"
- "fmla z23.s, p3/M, z8.s, z19.s\n"
- "fmla z24.s, p3/M, z7.s, z19.s\n"
- "ld1w { z19.s }, p2/Z, [x11, x5, LSL #2]\n"
- "fmla z28.s, p3/M, z8.s, z21.s\n"
- "fmla z27.s, p3/M, z7.s, z21.s\n"
- "fmla z30.s, p3/M, z5.s, z21.s\n"
- "fmla z11.s, p3/M, z4.s, z21.s\n"
+ "fmla z25.s, p3/M, z8.s, z12.s\n"
+ "movprfx z22, z13\n fmla z22.s, p3/M, z6.s, z17.s\n"
+ "fmla z14.s, p3/M, z7.s, z15.s\n"
+ "ld1w { z10.s }, p2/Z, [x28, x14, LSL #2]\n"
+ "fmla z19.s, p3/M, z7.s, z12.s\n"
+ "fmla z9.s, p3/M, z6.s, z12.s\n"
+ "cmp x1, x21\n"
+ "movprfx z31, z13\n fmla z31.s, p3/M, z3.s, z12.s\n"
+ "movprfx z11, z13\n fmla z11.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x7, x4, LSL #2]\n"
+ "movprfx z12, z13\n fmla z12.s, p3/M, z8.s, z24.s\n"
+ "fmla z23.s, p3/M, z6.s, z15.s\n"
+ "ld1w { z17.s }, p2/Z, [x7, x12, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z15.s\n"
+ "fmla z20.s, p3/M, z3.s, z15.s\n"
+ "movprfx z24, z13\n fmla z24.s, p3/M, z1.s, z15.s\n"
+ "fmla z13.s, p3/M, z0.s, z15.s\n"
+ "fmla z26.s, p3/M, z8.s, z15.s\n"
+ "fmla z28.s, p3/M, z5.s, z15.s\n"
+ "fmla z22.s, p3/M, z2.s, z15.s\n"
+ "fmla z14.s, p3/M, z8.s, z10.s\n"
+ "ld1w { z15.s }, p2/Z, [x10]\n"
+ "fmla z18.s, p3/M, z1.s, z16.s\n"
+ "fmla z25.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z21.s }, p2/Z, [x10, x11, LSL #2]\n"
+ "fmla z19.s, p3/M, z2.s, z17.s\n"
+ "fmla z9.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x27]\n"
+ "fmla z23.s, p3/M, z7.s, z10.s\n"
+ "fmla z31.s, p3/M, z6.s, z10.s\n"
+ "fmla z30.s, p3/M, z5.s, z10.s\n"
+ "fmla z20.s, p3/M, z4.s, z10.s\n"
+ "fmla z11.s, p3/M, z3.s, z10.s\n"
+ "fmla z24.s, p3/M, z2.s, z10.s\n"
+ "fmla z13.s, p3/M, z1.s, z10.s\n"
+ "fmla z12.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z17.s }, p2/Z, [x10, x16, LSL #2]\n"
+ "fmla z26.s, p3/M, z0.s, z15.s\n"
+ "fmla z28.s, p3/M, z6.s, z16.s\n"
+ "fmla z22.s, p3/M, z3.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x11, LSL #2]\n"
+ "fmla z18.s, p3/M, z3.s, z15.s\n"
+ "fmla z14.s, p3/M, z1.s, z17.s\n"
+ "fmla z9.s, p3/M, z5.s, z21.s\n"
+ "fmla z31.s, p3/M, z2.s, z21.s\n"
+ "fmla z25.s, p3/M, z4.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x10, x14, LSL #2]\n"
+ "fmla z19.s, p3/M, z3.s, z17.s\n"
+ "fmla z23.s, p3/M, z0.s, z17.s\n"
+ "fmla z11.s, p3/M, z8.s, z16.s\n"
+ "fmla z12.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x26, x4, LSL #2]\n"
+ "fmla z26.s, p3/M, z2.s, z17.s\n"
+ "fmla z14.s, p3/M, z2.s, z21.s\n"
+ "fmla z18.s, p3/M, z5.s, z17.s\n"
+ "fmla z25.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z17.s }, p2/Z, [x9, x4, LSL #2]\n"
+ "fmla z19.s, p3/M, z4.s, z21.s\n"
+ "fmla z9.s, p3/M, z3.s, z21.s\n"
+ "fmla z23.s, p3/M, z1.s, z21.s\n"
+ "fmla z31.s, p3/M, z0.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x9, x12, LSL #2]\n"
+ "fmla z22.s, p3/M, z7.s, z16.s\n"
+ "fmla z24.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x26, x12, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z17.s\n"
+ "fmla z14.s, p3/M, z3.s, z17.s\n"
+ "fmla z28.s, p3/M, z1.s, z17.s\n"
+ "fmla z30.s, p3/M, z0.s, z17.s\n"
+ "fmla z18.s, p3/M, z7.s, z17.s\n"
+ "fmla z25.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x7, x16, LSL #2]\n"
+ "fmla z13.s, p3/M, z8.s, z16.s\n"
+ "fmla z12.s, p3/M, z7.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x4, LSL #2]\n"
+ "fmla z19.s, p3/M, z8.s, z21.s\n"
+ "fmla z9.s, p3/M, z7.s, z21.s\n"
+ "fmla z23.s, p3/M, z5.s, z21.s\n"
+ "fmla z31.s, p3/M, z4.s, z21.s\n"
"fmla z20.s, p3/M, z2.s, z21.s\n"
- "fmla z25.s, p3/M, z1.s, z21.s\n"
- "ld1w { z22.s }, p2/Z, [x8, x14, LSL #2]\n"
- "fmla z17.s, p3/M, z7.s, z19.s\n"
- "fmla z14.s, p3/M, z6.s, z19.s\n"
- "fmla z26.s, p3/M, z4.s, z19.s\n"
- "fmla z13.s, p3/M, z3.s, z19.s\n"
- "fmla z10.s, p3/M, z1.s, z19.s\n"
- "fmla z12.s, p3/M, z0.s, z19.s\n"
- "ld1w { z21.s }, p2/Z, [x11, x10, LSL #2]\n"
- "fmla z31.s, p3/M, z2.s, z29.s\n"
- "fmla z18.s, p3/M, z1.s, z29.s\n"
- "fmla z28.s, p3/M, z0.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x12]\n"
- "fmla z23.s, p3/M, z2.s, z21.s\n"
- "fmla z27.s, p3/M, z0.s, z22.s\n"
- "fmla z17.s, p3/M, z3.s, z29.s\n"
- "fmla z26.s, p3/M, z0.s, z29.s\n"
- "fmla z30.s, p3/M, z8.s, z21.s\n"
- "fmla z11.s, p3/M, z7.s, z21.s\n"
- "fmla z20.s, p3/M, z5.s, z21.s\n"
- "fmla z25.s, p3/M, z4.s, z21.s\n"
- "fmla z24.s, p3/M, z1.s, z21.s\n"
- "ld1w { z19.s }, p2/Z, [x28, x7, LSL #2]\n"
- "fmla z18.s, p3/M, z2.s, z22.s\n"
- "fmla z28.s, p3/M, z1.s, z22.s\n"
- "ld1w { z21.s }, p2/Z, [x12, x27, LSL #2]\n"
- "fmla z31.s, p3/M, z6.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x11]\n"
- "fmla z12.s, p3/M, z4.s, z19.s\n"
- "fmla z23.s, p3/M, z3.s, z19.s\n"
- "fmla z27.s, p3/M, z8.s, z21.s\n"
- "fmla z11.s, p3/M, z5.s, z21.s\n"
+ "fmla z11.s, p3/M, z1.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x7, x14, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z16.s\n"
+ "fmla z14.s, p3/M, z6.s, z16.s\n"
+ "fmla z28.s, p3/M, z4.s, z16.s\n"
+ "fmla z30.s, p3/M, z3.s, z16.s\n"
+ "fmla z22.s, p3/M, z1.s, z16.s\n"
+ "fmla z24.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x12, LSL #2]\n"
+ "fmla z18.s, p3/M, z2.s, z17.s\n"
+ "fmla z25.s, p3/M, z1.s, z17.s\n"
+ "fmla z19.s, p3/M, z0.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x9]\n"
+ "fmla z9.s, p3/M, z0.s, z21.s\n"
+ "fmla z13.s, p3/M, z2.s, z16.s\n"
+ "fmla z23.s, p3/M, z8.s, z16.s\n"
+ "fmla z31.s, p3/M, z7.s, z16.s\n"
+ "fmla z20.s, p3/M, z5.s, z16.s\n"
+ "fmla z26.s, p3/M, z3.s, z17.s\n"
+ "fmla z28.s, p3/M, z0.s, z17.s\n"
+ "fmla z11.s, p3/M, z4.s, z16.s\n"
+ "fmla z12.s, p3/M, z1.s, z16.s\n"
+ "ld1w { z15.s }, p2/Z, [x27, x16, LSL #2]\n"
"fmla z25.s, p3/M, z2.s, z21.s\n"
- "ld1w { z9.s }, p2/Z, [x11, x27, LSL #2]\n"
- "fmla z17.s, p3/M, z6.s, z29.s\n"
- "fmla z26.s, p3/M, z3.s, z29.s\n"
- "fmla z10.s, p3/M, z0.s, z29.s\n"
- "ld1w { z22.s }, p2/Z, [x24, x7, LSL #2]\n"
- "fmla z24.s, p3/M, z2.s, z9.s\n"
- "fmla z12.s, p3/M, z7.s, z22.s\n"
- "fmla z23.s, p3/M, z6.s, z22.s\n"
- "fmla z26.s, p3/M, z8.s, z19.s\n"
- "fmla z13.s, p3/M, z7.s, z19.s\n"
- "fmla z20.s, p3/M, z6.s, z19.s\n"
- "fmla z10.s, p3/M, z5.s, z19.s\n"
- "ld1w { z21.s }, p2/Z, [x28, x14, LSL #2]\n"
- "fmla z25.s, p3/M, z5.s, z9.s\n"
- "fmla z12.s, p3/M, z5.s, z21.s\n"
- "fmla z23.s, p3/M, z4.s, z21.s\n"
- "fmla z24.s, p3/M, z3.s, z21.s\n"
- "fmla z11.s, p3/M, z8.s, z9.s\n"
- "ld1w { z19.s }, p2/Z, [x24, x14, LSL #2]\n"
- "fmla z10.s, p3/M, z8.s, z22.s\n"
- "ld1w { z22.s }, p2/Z, [x13, x5, LSL #2]\n"
- "fmla z13.s, p3/M, z8.s, z21.s\n"
- "fmla z20.s, p3/M, z7.s, z21.s\n"
- "fmla z25.s, p3/M, z6.s, z21.s\n"
- "fmla z12.s, p3/M, z8.s, z19.s\n"
- "ld1w { z29.s }, p2/Z, [x13, x10, LSL #2]\n"
- "fmla z23.s, p3/M, z7.s, z19.s\n"
- "fmla z24.s, p3/M, z6.s, z19.s\n"
- "ld1w { z21.s }, p2/Z, [x28, x5, LSL #2]\n"
- "fmla z31.s, p3/M, z4.s, z22.s\n"
- "fmla z18.s, p3/M, z3.s, z22.s\n"
- "fmax z31.s, p3/M, z31.s, z15.s\n"
- "fmax z18.s, p3/M, z18.s, z15.s\n"
- "fmla z17.s, p3/M, z1.s, z22.s\n"
- "fmla z14.s, p3/M, z0.s, z22.s\n"
- "ld1w { z9.s }, p2/Z, [x28, x10, LSL #2]\n"
- "fmax z17.s, p3/M, z17.s, z15.s\n"
- "fmla z28.s, p3/M, z5.s, z29.s\n"
- "fmla z27.s, p3/M, z4.s, z29.s\n"
- "fmax z28.s, p3/M, z28.s, z15.s\n"
- "fmax z27.s, p3/M, z27.s, z15.s\n"
- "fmla z30.s, p3/M, z2.s, z29.s\n"
- "fmla z11.s, p3/M, z1.s, z29.s\n"
- "fmax z14.s, p3/M, z14.s, z15.s\n"
- "fmax z30.s, p3/M, z30.s, z15.s\n"
- "fmla z26.s, p3/M, z7.s, z21.s\n"
+ "fmla z19.s, p3/M, z1.s, z21.s\n"
+ "ld1w { z16.s }, p2/Z, [x9, x11, LSL #2]\n"
+ "fmla z18.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x28]\n"
+ "fmla z24.s, p3/M, z4.s, z15.s\n"
+ "fmla z13.s, p3/M, z3.s, z15.s\n"
+ "fmla z30.s, p3/M, z7.s, z15.s\n"
+ "fmla z9.s, p3/M, z8.s, z16.s\n"
+ "fmla z31.s, p3/M, z5.s, z16.s\n"
+ "fmla z11.s, p3/M, z2.s, z16.s\n"
+ "fmla z26.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x11, LSL #2]\n"
+ "fmla z28.s, p3/M, z3.s, z17.s\n"
+ "fmla z22.s, p3/M, z0.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x26, x16, LSL #2]\n"
+ "fmla z20.s, p3/M, z6.s, z15.s\n"
+ "fmla z12.s, p3/M, z2.s, z16.s\n"
+ "fmla z31.s, p3/M, z8.s, z16.s\n"
+ "fmla z24.s, p3/M, z7.s, z21.s\n"
"fmla z13.s, p3/M, z6.s, z21.s\n"
- "fmax z11.s, p3/M, z11.s, z15.s\n"
- "fmax z26.s, p3/M, z26.s, z15.s\n"
- "fmla z10.s, p3/M, z4.s, z21.s\n"
- "fmla z12.s, p3/M, z3.s, z21.s\n"
- "fmax z13.s, p3/M, z13.s, z15.s\n"
- "fmax z10.s, p3/M, z10.s, z15.s\n"
- "fmla z20.s, p3/M, z8.s, z9.s\n"
- "fmla z25.s, p3/M, z7.s, z9.s\n"
- "fmax z20.s, p3/M, z20.s, z15.s\n"
- "fmax z25.s, p3/M, z25.s, z15.s\n"
- "fmla z23.s, p3/M, z5.s, z9.s\n"
- "fmla z24.s, p3/M, z4.s, z9.s\n"
- "fmax z12.s, p3/M, z12.s, z15.s\n"
- "fmax z23.s, p3/M, z23.s, z15.s\n"
- "fmax z24.s, p3/M, z24.s, z15.s\n"
- "fmin z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z31.s }, p0, [x15]\n"
- "fmin z18.s, p3/M, z18.s, z16.s\n"
- "fmin z28.s, p3/M, z28.s, z16.s\n"
- "st1w { z18.s }, p0, [x15, x6, LSL #2]\n"
- "fmin z27.s, p3/M, z27.s, z16.s\n"
- "fmin z17.s, p3/M, z17.s, z16.s\n"
- "st1w { z28.s }, p0, [x15, x25, LSL #2]\n"
- "fmin z14.s, p3/M, z14.s, z16.s\n"
- "fmin z30.s, p3/M, z30.s, z16.s\n"
- "st1w { z27.s }, p0, [x15, x22, LSL #2]\n"
- "fmin z11.s, p3/M, z11.s, z16.s\n"
- "fmin z26.s, p3/M, z26.s, z16.s\n"
- "st1w { z17.s }, p0, [x9]\n"
- "fmin z13.s, p3/M, z13.s, z16.s\n"
- "fmin z20.s, p3/M, z20.s, z16.s\n"
- "st1w { z14.s }, p0, [x9, x6, LSL #2]\n"
- "fmin z25.s, p3/M, z25.s, z16.s\n"
- "fmin z10.s, p3/M, z10.s, z16.s\n"
- "st1w { z30.s }, p0, [x9, x25, LSL #2]\n"
- "fmin z12.s, p3/M, z12.s, z16.s\n"
- "fmin z23.s, p3/M, z23.s, z16.s\n"
- "st1w { z11.s }, p0, [x9, x22, LSL #2]\n"
- "fmin z24.s, p3/M, z24.s, z16.s\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z13.s }, p0, [x26, x6, LSL #2]\n"
- "st1w { z20.s }, p0, [x26, x25, LSL #2]\n"
- "st1w { z25.s }, p0, [x26, x22, LSL #2]\n"
- "st1w { z10.s }, p0, [x23]\n"
- "st1w { z12.s }, p0, [x23, x6, LSL #2]\n"
- "st1w { z23.s }, p0, [x23, x25, LSL #2]\n"
- "st1w { z24.s }, p0, [x23, x22, LSL #2]\n"
+ "fmla z11.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "fmla z28.s, p3/M, z8.s, z15.s\n"
+ "fmla z22.s, p3/M, z5.s, z15.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x14, LSL #2]\n"
+ "fmla z24.s, p3/M, z5.s, z16.s\n"
+ "fmla z13.s, p3/M, z4.s, z16.s\n"
+ "fmla z12.s, p3/M, z3.s, z16.s\n"
+ "fmla z30.s, p3/M, z8.s, z16.s\n"
+ "fmla z20.s, p3/M, z7.s, z16.s\n"
+ "fmla z11.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z15.s }, p2/Z, [x10, x12, LSL #2]\n"
+ "fmla z22.s, p3/M, z8.s, z21.s\n"
+ "ld1w { z16.s }, p2/Z, [x10, x4, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z17.s\n"
+ "fmla z13.s, p3/M, z7.s, z17.s\n"
+ "fmla z12.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x4, LSL #2]\n"
+ "fmla z19.s, p3/M, z5.s, z15.s\n"
+ "fmla z9.s, p3/M, z4.s, z15.s\n"
+ "fmla z18.s, p3/M, z4.s, z16.s\n"
+ "fmla z25.s, p3/M, z3.s, z16.s\n"
+ "fmla z26.s, p3/M, z1.s, z16.s\n"
+ "fmla z14.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x12, LSL #2]\n"
+ "fmla z23.s, p3/M, z2.s, z15.s\n"
+ "fmla z31.s, p3/M, z1.s, z15.s\n"
+ "fmla z28.s, p3/M, z7.s, z17.s\n"
+ "fmla z30.s, p3/M, z6.s, z17.s\n"
+ "fmax z19.s, p3/M, z19.s, z27.s\n"
+ "fmax z9.s, p3/M, z9.s, z27.s\n"
+ "fmla z22.s, p3/M, z4.s, z17.s\n"
+ "fmla z24.s, p3/M, z3.s, z17.s\n"
+ "fmax z18.s, p3/M, z18.s, z27.s\n"
+ "fmax z25.s, p3/M, z25.s, z27.s\n"
+ "fmla z20.s, p3/M, z8.s, z16.s\n"
+ "fmla z11.s, p3/M, z7.s, z16.s\n"
+ "fmax z26.s, p3/M, z26.s, z27.s\n"
+ "fmax z14.s, p3/M, z14.s, z27.s\n"
+ "fmla z13.s, p3/M, z5.s, z16.s\n"
+ "fmla z12.s, p3/M, z4.s, z16.s\n"
+ "fmax z23.s, p3/M, z23.s, z27.s\n"
+ "fmax z31.s, p3/M, z31.s, z27.s\n"
+ "fmax z28.s, p3/M, z28.s, z27.s\n"
+ "fmax z30.s, p3/M, z30.s, z27.s\n"
+ "fmax z22.s, p3/M, z22.s, z27.s\n"
+ "fmax z24.s, p3/M, z24.s, z27.s\n"
+ "fmax z20.s, p3/M, z20.s, z27.s\n"
+ "fmax z11.s, p3/M, z11.s, z27.s\n"
+ "fmax z13.s, p3/M, z13.s, z27.s\n"
+ "fmax z12.s, p3/M, z12.s, z27.s\n"
+ "fmin z18.s, p3/M, z18.s, z29.s\n"
+ "fmin z25.s, p3/M, z25.s, z29.s\n"
+ "fmin z19.s, p3/M, z19.s, z29.s\n"
+ "fmin z9.s, p3/M, z9.s, z29.s\n"
+ "fmin z26.s, p3/M, z26.s, z29.s\n"
+ "fmin z14.s, p3/M, z14.s, z29.s\n"
+ "fmin z23.s, p3/M, z23.s, z29.s\n"
+ "fmin z31.s, p3/M, z31.s, z29.s\n"
+ "st1w { z18.s }, p0, [x17]\n"
+ "fmin z28.s, p3/M, z28.s, z29.s\n"
+ "fmin z30.s, p3/M, z30.s, z29.s\n"
+ "st1w { z25.s }, p0, [x17, x5, LSL #2]\n"
+ "fmin z20.s, p3/M, z20.s, z29.s\n"
+ "fmin z11.s, p3/M, z11.s, z29.s\n"
+ "st1w { z19.s }, p0, [x17, x15, LSL #2]\n"
+ "fmin z22.s, p3/M, z22.s, z29.s\n"
+ "fmin z24.s, p3/M, z24.s, z29.s\n"
+ "st1w { z9.s }, p0, [x17, x13, LSL #2]\n"
+ "fmin z13.s, p3/M, z13.s, z29.s\n"
+ "fmin z12.s, p3/M, z12.s, z29.s\n"
+ "st1w { z26.s }, p0, [x25]\n"
+ "st1w { z14.s }, p0, [x25, x5, LSL #2]\n"
+ "st1w { z23.s }, p0, [x25, x15, LSL #2]\n"
+ "st1w { z31.s }, p0, [x25, x13, LSL #2]\n"
+ "st1w { z28.s }, p0, [x24]\n"
+ "st1w { z30.s }, p0, [x24, x5, LSL #2]\n"
+ "st1w { z20.s }, p0, [x24, x15, LSL #2]\n"
+ "st1w { z11.s }, p0, [x24, x13, LSL #2]\n"
+ "st1w { z22.s }, p0, [x23]\n"
+ "st1w { z24.s }, p0, [x23, x5, LSL #2]\n"
+ "st1w { z13.s }, p0, [x23, x15, LSL #2]\n"
+ "st1w { z12.s }, p0, [x23, x13, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
index 3db248924f..587f18d90d 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -101,607 +101,607 @@ void sve_fp32_nhwc_3x3_s1_output4x4_mla_depthfirst_indirect_impl(
"ptrue p3.b\n"
"ldr x7, [%x[params_struct], %[offsetof_args_params]]\n"
"add x8, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ld1w { z17.s }, p3/Z, [x7]\n"
- "cntw x17\n"
- "mov x16, #0x0\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_outptrs]]\n"
+ "cntw x16\n"
+ "mov x15, #0x0\n"
+ "ldp x23, x22, [x8, #0x0]\n"
+ "ldp x21, x20, [x8, #0x10]\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "ld1w { z22.s }, p3/Z, [x7]\n"
"ld1w { z0.s }, p3/Z, [x7, #1, MUL VL]\n"
"ld1w { z1.s }, p3/Z, [x7, #2, MUL VL]\n"
- "whilelt p2.s, XZR, %x[n_channels]\n"
"ld1w { z2.s }, p3/Z, [x7, #3, MUL VL]\n"
"ld1w { z3.s }, p3/Z, [x7, #4, MUL VL]\n"
- "cmp x17, %x[n_channels]\n"
"ld1w { z4.s }, p3/Z, [x7, #5, MUL VL]\n"
+ "cmp x16, %x[n_channels]\n"
+ "sub x14, XZR, x16\n"
"ld1w { z5.s }, p3/Z, [x7, #6, MUL VL]\n"
- "sub x15, XZR, x17\n"
"ld1w { z6.s }, p3/Z, [x7, #7, MUL VL]\n"
"addvl x7, x7, #16\n"
- "ldp x23, x22, [x8, #0x0]\n"
- "ldp x21, x20, [x8, #0x10]\n"
- "ldr x14, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ld1rw { z16.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z19.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1rw { z19.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rw { z14.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x22, x15, LSL #2]\n"
"ld1w { z7.s }, p3/Z, [x7, #-8, MUL VL]\n"
"ld1w { z8.s }, p3/Z, [x7, #-7, MUL VL]\n"
"addvl x7, x7, #-6\n"
- "ld1w { z9.s }, p2/Z, [x23, x16, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x22, x16, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x21, x16, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x20, x15, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z20, z17\n fmla z20.s, p3/M, z4.s, z9.s\n"
- "movprfx z26, z17\n fmla z26.s, p3/M, z8.s, z9.s\n"
+ "movprfx z29, z22\n fmla z29.s, p3/M, z4.s, z9.s\n"
+ "movprfx z18, z22\n fmla z18.s, p3/M, z8.s, z9.s\n"
"ldr x27, [x8, #0x20]\n"
"ldr x24, [x8, #0x30]\n"
- "movprfx z24, z17\n fmla z24.s, p3/M, z3.s, z9.s\n"
- "movprfx z30, z17\n fmla z30.s, p3/M, z1.s, z9.s\n"
+ "movprfx z17, z22\n fmla z17.s, p3/M, z3.s, z9.s\n"
+ "movprfx z26, z22\n fmla z26.s, p3/M, z1.s, z9.s\n"
"ldr x23, [x8, #0x28]\n"
"ldr x22, [x8, #0x38]\n"
- "movprfx z31, z17\n fmla z31.s, p3/M, z0.s, z9.s\n"
- "movprfx z22, z17\n fmla z22.s, p3/M, z7.s, z9.s\n"
+ "movprfx z21, z22\n fmla z21.s, p3/M, z0.s, z9.s\n"
+ "movprfx z28, z22\n fmla z28.s, p3/M, z7.s, z9.s\n"
"ldr x26, [x8, #0x40]\n"
- "ldr x21, [x8, #0x48]\n"
- "movprfx z27, z17\n fmla z27.s, p3/M, z6.s, z9.s\n"
- "fmla z20.s, p3/M, z5.s, z12.s\n"
+ "ldr x20, [x8, #0x48]\n"
+ "movprfx z25, z22\n fmla z25.s, p3/M, z6.s, z9.s\n"
+ "movprfx z30, z22\n fmla z30.s, p3/M, z5.s, z9.s\n"
"ldr x25, [x8, #0x50]\n"
- "ldr x20, [x8, #0x58]\n"
- "movprfx z14, z17\n fmla z14.s, p3/M, z5.s, z9.s\n"
- "movprfx z23, z17\n fmla z23.s, p3/M, z2.s, z9.s\n"
- "ld1w { z25.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "ldr x21, [x8, #0x58]\n"
+ "fmla z29.s, p3/M, z5.s, z12.s\n"
+ "movprfx z23, z22\n fmla z23.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x15, LSL #2]\n"
"ldr x13, [x8, #0x70]\n"
- "fmla z26.s, p3/M, z0.s, z10.s\n"
- "movprfx z9, z17\n fmla z9.s, p3/M, z2.s, z11.s\n"
- "ld1w { z28.s }, p2/Z, [x27, x16, LSL #2]\n"
- "ld1w { z21.s }, p2/Z, [x23, x16, LSL #2]\n"
- "fmla z24.s, p3/M, z4.s, z12.s\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "fmla z18.s, p3/M, z0.s, z10.s\n"
+ "movprfx z10, z22\n fmla z10.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z9.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z17.s, p3/M, z4.s, z12.s\n"
+ "fmla z26.s, p3/M, z2.s, z12.s\n"
"ldr x24, [x8, #0x60]\n"
"ldr x23, [x8, #0x68]\n"
- "fmla z31.s, p3/M, z1.s, z12.s\n"
- "fmla z22.s, p3/M, z8.s, z12.s\n"
- "incw x15\n"
+ "fmla z21.s, p3/M, z1.s, z12.s\n"
+ "fmla z28.s, p3/M, z8.s, z12.s\n"
+ "incw x14\n"
"mov p1.b, p2.b\n"
- "fmla z27.s, p3/M, z7.s, z12.s\n"
- "movprfx z15, z17\n fmla z15.s, p3/M, z6.s, z28.s\n"
- "ld1w { z10.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "fmla z25.s, p3/M, z7.s, z12.s\n"
+ "movprfx z31, z22\n fmla z31.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x20, x15, LSL #2]\n"
"ldr x28, [x8, #0x88]\n"
- "fmla z20.s, p3/M, z7.s, z25.s\n"
- "fmla z9.s, p3/M, z6.s, z12.s\n"
- "ldr x12, [x14, #0x0]\n"
- "ldr x11, [x14, #0x8]\n"
- "movprfx z11, z17\n fmla z11.s, p3/M, z3.s, z12.s\n"
- "movprfx z13, z17\n fmla z13.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z16.s\n"
+ "fmla z10.s, p3/M, z6.s, z12.s\n"
+ "ldr x12, [x17, #0x0]\n"
+ "ldr x11, [x17, #0x8]\n"
+ "movprfx z15, z22\n fmla z15.s, p3/M, z3.s, z12.s\n"
+ "movprfx z20, z22\n fmla z20.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x22, x15, LSL #2]\n"
"ldr x22, [x8, #0x78]\n"
- "movprfx z28, z17\n fmla z28.s, p3/M, z8.s, z21.s\n"
- "fmla z24.s, p3/M, z6.s, z25.s\n"
- "ld1w { z29.s }, p2/Z, [x26, x16, LSL #2]\n"
- "ldr x21, [x8, #0x80]\n"
- "fmla z30.s, p3/M, z4.s, z25.s\n"
- "fmla z31.s, p3/M, z3.s, z25.s\n"
- "ldr x10, [x14, #0x10]\n"
- "ldr x9, [x14, #0x18]\n"
- "movprfx z18, z17\n fmla z18.s, p3/M, z1.s, z25.s\n"
- "movprfx z21, z17\n fmla z21.s, p3/M, z0.s, z25.s\n"
- "whilelt p0.s, x17, %x[n_channels]\n"
- "ld1w { z17.s }, p3/Z, [x7]\n"
- "fmla z14.s, p3/M, z8.s, z25.s\n"
- "fmla z23.s, p3/M, z5.s, z25.s\n"
- "fmla z15.s, p3/M, z2.s, z25.s\n"
- "fmla z26.s, p3/M, z1.s, z12.s\n"
- "ld1w { z25.s }, p2/Z, [x25, x16, LSL #2]\n"
+ "movprfx z24, z22\n fmla z24.s, p3/M, z8.s, z27.s\n"
+ "fmla z17.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z12.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "ldr x20, [x8, #0x80]\n"
+ "fmla z26.s, p3/M, z4.s, z16.s\n"
+ "fmla z21.s, p3/M, z3.s, z16.s\n"
+ "ldr x10, [x17, #0x10]\n"
+ "ldr x9, [x17, #0x18]\n"
+ "movprfx z13, z22\n fmla z13.s, p3/M, z1.s, z16.s\n"
+ "movprfx z27, z22\n fmla z27.s, p3/M, z0.s, z16.s\n"
+ "whilelt p0.s, x16, %x[n_channels]\n"
+ "ld1w { z22.s }, p3/Z, [x7]\n"
+ "fmla z30.s, p3/M, z8.s, z16.s\n"
+ "fmla z23.s, p3/M, z5.s, z16.s\n"
+ "fmla z31.s, p3/M, z2.s, z16.s\n"
+ "fmla z18.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x15, LSL #2]\n"
"ldr x27, [x8, #0x90]\n"
- "fmla z22.s, p3/M, z0.s, z12.s\n"
- "fmla z27.s, p3/M, z2.s, z29.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
- "ldr x20, [x8, #0x98]\n"
- "fmla z20.s, p3/M, z8.s, z10.s\n"
- "fmla z9.s, p3/M, z1.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "fmla z28.s, p3/M, z0.s, z11.s\n"
+ "fmla z25.s, p3/M, z2.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x21, [x8, #0x98]\n"
+ "fmla z29.s, p3/M, z8.s, z9.s\n"
+ "fmla z10.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x15, LSL #2]\n"
"ldr x26, [x8, #0xa0]\n"
- "fmla z24.s, p3/M, z7.s, z10.s\n"
- "fmla z11.s, p3/M, z6.s, z10.s\n"
- "fmla z30.s, p3/M, z5.s, z10.s\n"
- "fmla z31.s, p3/M, z4.s, z10.s\n"
- "fmla z13.s, p3/M, z3.s, z10.s\n"
- "fmla z18.s, p3/M, z2.s, z10.s\n"
- "fmla z21.s, p3/M, z1.s, z10.s\n"
- "fmla z28.s, p3/M, z0.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x23, x16, LSL #2]\n"
+ "fmla z17.s, p3/M, z7.s, z9.s\n"
+ "fmla z15.s, p3/M, z6.s, z9.s\n"
+ "fmla z26.s, p3/M, z5.s, z9.s\n"
+ "fmla z21.s, p3/M, z4.s, z9.s\n"
+ "fmla z20.s, p3/M, z3.s, z9.s\n"
+ "fmla z13.s, p3/M, z2.s, z9.s\n"
+ "fmla z27.s, p3/M, z1.s, z9.s\n"
+ "fmla z24.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
"ldr x25, [x8, #0xa8]\n"
- "fmla z26.s, p3/M, z3.s, z25.s\n"
- "fmla z14.s, p3/M, z0.s, z25.s\n"
- "fmla z23.s, p3/M, z6.s, z29.s\n"
- "fmla z15.s, p3/M, z3.s, z29.s\n"
- "ld1w { z25.s }, p2/Z, [x13, x16, LSL #2]\n"
+ "fmla z18.s, p3/M, z3.s, z16.s\n"
+ "fmla z30.s, p3/M, z0.s, z16.s\n"
+ "fmla z23.s, p3/M, z6.s, z12.s\n"
+ "fmla z31.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x13, x15, LSL #2]\n"
"ldr x24, [x8, #0xb0]\n"
- "fmla z22.s, p3/M, z4.s, z10.s\n"
- "fmla z27.s, p3/M, z3.s, z10.s\n"
- "fmla z20.s, p3/M, z1.s, z10.s\n"
- "fmla z9.s, p3/M, z5.s, z12.s\n"
- "fmla z11.s, p3/M, z2.s, z12.s\n"
- "fmla z24.s, p3/M, z0.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z28.s, p3/M, z4.s, z9.s\n"
+ "fmla z25.s, p3/M, z3.s, z9.s\n"
+ "fmla z29.s, p3/M, z1.s, z9.s\n"
+ "fmla z10.s, p3/M, z5.s, z11.s\n"
+ "fmla z15.s, p3/M, z2.s, z11.s\n"
+ "fmla z17.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z11.s }, p2/Z, [x22, x15, LSL #2]\n"
"ldr x23, [x8, #0xb8]\n"
- "fmla z13.s, p3/M, z8.s, z25.s\n"
- "fmla z28.s, p3/M, z5.s, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "fmla z20.s, p3/M, z8.s, z12.s\n"
+ "fmla z24.s, p3/M, z5.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x20, x15, LSL #2]\n"
"ldr x22, [x8, #0xc0]\n"
- "fmla z26.s, p3/M, z5.s, z10.s\n"
- "fmla z14.s, p3/M, z2.s, z10.s\n"
- "ld1w { z29.s }, p2/Z, [x28, x16, LSL #2]\n"
- "ldr x21, [x8, #0xc8]\n"
- "fmla z22.s, p3/M, z5.s, z12.s\n"
- "fmla z27.s, p3/M, z4.s, z12.s\n"
- "fmla z20.s, p3/M, z2.s, z12.s\n"
- "fmla z9.s, p3/M, z3.s, z12.s\n"
- "fmla z24.s, p3/M, z1.s, z12.s\n"
- "fmla z11.s, p3/M, z0.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x20, x16, LSL #2]\n"
+ "fmla z18.s, p3/M, z5.s, z9.s\n"
+ "fmla z30.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x15, LSL #2]\n"
+ "ldr x20, [x8, #0xc8]\n"
+ "fmla z28.s, p3/M, z5.s, z11.s\n"
+ "fmla z25.s, p3/M, z4.s, z11.s\n"
+ "fmla z29.s, p3/M, z2.s, z11.s\n"
+ "fmla z10.s, p3/M, z3.s, z11.s\n"
+ "fmla z17.s, p3/M, z1.s, z11.s\n"
+ "fmla z15.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z9.s }, p2/Z, [x21, x15, LSL #2]\n"
"ldr x28, [x8, #0xd8]\n"
- "fmla z15.s, p3/M, z7.s, z25.s\n"
- "fmla z18.s, p3/M, z6.s, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x27, x16, LSL #2]\n"
- "ldr x20, [x8, #0xd0]\n"
- "fmla z26.s, p3/M, z7.s, z29.s\n"
- "fmla z22.s, p3/M, z6.s, z29.s\n"
- "fmla z14.s, p3/M, z4.s, z29.s\n"
- "fmla z20.s, p3/M, z3.s, z29.s\n"
- "fmla z23.s, p3/M, z1.s, z29.s\n"
- "fmla z30.s, p3/M, z0.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x26, x16, LSL #2]\n"
+ "fmla z31.s, p3/M, z7.s, z12.s\n"
+ "fmla z13.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z11.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "ldr x21, [x8, #0xd0]\n"
+ "fmla z18.s, p3/M, z7.s, z16.s\n"
+ "fmla z28.s, p3/M, z6.s, z16.s\n"
+ "fmla z30.s, p3/M, z4.s, z16.s\n"
+ "fmla z29.s, p3/M, z3.s, z16.s\n"
+ "fmla z23.s, p3/M, z1.s, z16.s\n"
+ "fmla z26.s, p3/M, z0.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x26, x15, LSL #2]\n"
"ldr x27, [x8, #0xe0]\n"
- "fmla z27.s, p3/M, z8.s, z10.s\n"
- "fmla z21.s, p3/M, z8.s, z25.s\n"
- "fmla z28.s, p3/M, z7.s, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x25, x16, LSL #2]\n"
- "fmla z13.s, p3/M, z1.s, z10.s\n"
+ "fmla z25.s, p3/M, z8.s, z9.s\n"
+ "fmla z27.s, p3/M, z8.s, z11.s\n"
+ "fmla z24.s, p3/M, z7.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z20.s, p3/M, z1.s, z9.s\n"
"ldr x26, [x8, #0xe8]\n"
- "fmla z9.s, p3/M, z7.s, z10.s\n"
- "fmla z24.s, p3/M, z5.s, z10.s\n"
- "fmla z11.s, p3/M, z4.s, z10.s\n"
- "fmla z31.s, p3/M, z2.s, z10.s\n"
- "ld1w { z10.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "fmla z10.s, p3/M, z7.s, z9.s\n"
+ "fmla z17.s, p3/M, z5.s, z9.s\n"
+ "fmla z15.s, p3/M, z4.s, z9.s\n"
+ "fmla z21.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z11.s }, p2/Z, [x24, x15, LSL #2]\n"
"ldr x25, [x8, #0xf0]\n"
- "fmla z26.s, p3/M, z2.s, z29.s\n"
- "fmla z22.s, p3/M, z1.s, z29.s\n"
- "fmla z27.s, p3/M, z0.s, z29.s\n"
- "fmla z14.s, p3/M, z7.s, z25.s\n"
- "ld1w { z29.s }, p2/Z, [x23, x16, LSL #2]\n"
+ "fmla z18.s, p3/M, z2.s, z16.s\n"
+ "fmla z28.s, p3/M, z1.s, z16.s\n"
+ "fmla z25.s, p3/M, z0.s, z16.s\n"
+ "fmla z30.s, p3/M, z7.s, z12.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
"ldr x24, [x8, #0xf8]\n"
- "fmla z20.s, p3/M, z6.s, z25.s\n"
- "fmla z23.s, p3/M, z4.s, z25.s\n"
- "fmla z30.s, p3/M, z3.s, z25.s\n"
- "fmla z15.s, p3/M, z1.s, z25.s\n"
- "fmla z18.s, p3/M, z0.s, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x22, x16, LSL #2]\n"
- "fmla z13.s, p3/M, z4.s, z25.s\n"
+ "fmla z29.s, p3/M, z6.s, z12.s\n"
+ "fmla z23.s, p3/M, z4.s, z12.s\n"
+ "fmla z26.s, p3/M, z3.s, z12.s\n"
+ "fmla z31.s, p3/M, z1.s, z12.s\n"
+ "fmla z13.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z28.s, p3/M, z2.s, z11.s\n"
"ldr x23, [x8, #0x100]\n"
- "fmla z21.s, p3/M, z2.s, z25.s\n"
- "fmla z22.s, p3/M, z2.s, z10.s\n"
- "fmla z27.s, p3/M, z1.s, z10.s\n"
- "fmla z9.s, p3/M, z0.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x21, x16, LSL #2]\n"
- "ldr x22, [x8, #0x108]\n"
- "fmla z26.s, p3/M, z6.s, z29.s\n"
- "fmla z14.s, p3/M, z3.s, z29.s\n"
- "fmla z23.s, p3/M, z0.s, z29.s\n"
- "fmla z24.s, p3/M, z8.s, z25.s\n"
- "ld1w { z10.s }, p2/Z, [x20, x16, LSL #2]\n"
- "ldr x21, [x8, #0x110]\n"
- "fmla z11.s, p3/M, z7.s, z25.s\n"
- "fmla z31.s, p3/M, z5.s, z25.s\n"
- "fmla z28.s, p3/M, z1.s, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x28, x16, LSL #2]\n"
- "fmla z13.s, p3/M, z2.s, z12.s\n"
- "ldr x20, [x8, #0x118]\n"
- "fmla z15.s, p3/M, z0.s, z10.s\n"
- "fmla z18.s, p3/M, z4.s, z25.s\n"
- "fmla z21.s, p3/M, z3.s, z25.s\n"
- "fmla z9.s, p3/M, z8.s, z12.s\n"
- "fmla z11.s, p3/M, z5.s, z12.s\n"
- "fmla z14.s, p3/M, z6.s, z10.s\n"
- "ld1w { z12.s }, p2/Z, [x27, x16, LSL #2]\n"
- "fmla z23.s, p3/M, z3.s, z10.s\n"
- "ld1w { z29.s }, p2/Z, [x26, x16, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z25.s\n"
- "fmla z31.s, p3/M, z6.s, z25.s\n"
- "fmla z15.s, p3/M, z5.s, z25.s\n"
- "fmla z13.s, p3/M, z5.s, z12.s\n"
- "fmla z28.s, p3/M, z2.s, z12.s\n"
- "fmla z18.s, p3/M, z7.s, z29.s\n"
- "fmla z21.s, p3/M, z6.s, z29.s\n"
- "fmla z23.s, p3/M, z8.s, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x25, x16, LSL #2]\n"
- "fmla z15.s, p3/M, z8.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x23, x16, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z25.s\n"
- "fmla z31.s, p3/M, z7.s, z25.s\n"
- "fmla z13.s, p3/M, z6.s, z25.s\n"
- "fmla z18.s, p3/M, z5.s, z25.s\n"
- "fmla z21.s, p3/M, z4.s, z25.s\n"
- "fmla z28.s, p3/M, z3.s, z25.s\n"
- "ld1w { z25.s }, p2/Z, [x22, x16, LSL #2]\n"
- "ldp x27, x26, [x8, #0x0]\n"
- "fmla z11.s, p3/M, z8.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x24, x16, LSL #2]\n"
- "fmla z26.s, p3/M, z4.s, z29.s\n"
- "fmax z26.s, p3/M, z26.s, z16.s\n"
- "fmla z22.s, p3/M, z3.s, z29.s\n"
- "fmla z27.s, p3/M, z5.s, z25.s\n"
- "fmax z22.s, p3/M, z22.s, z16.s\n"
- "fmax z27.s, p3/M, z27.s, z16.s\n"
- "fmla z9.s, p3/M, z4.s, z25.s\n"
- "fmla z18.s, p3/M, z8.s, z12.s\n"
- "fmax z9.s, p3/M, z9.s, z16.s\n"
- "fmin z26.s, p3/M, z26.s, z19.s\n"
- "fmla z21.s, p3/M, z7.s, z12.s\n"
- "fmla z28.s, p3/M, z6.s, z12.s\n"
- "ld1w { z10.s }, p2/Z, [x21, x16, LSL #2]\n"
- "fmin z22.s, p3/M, z22.s, z19.s\n"
- "fmla z14.s, p3/M, z1.s, z29.s\n"
- "fmla z20.s, p3/M, z0.s, z29.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
- "fmin z27.s, p3/M, z27.s, z19.s\n"
- "fmla z24.s, p3/M, z2.s, z25.s\n"
- "fmla z11.s, p3/M, z1.s, z25.s\n"
- "fmin z9.s, p3/M, z9.s, z19.s\n"
- "fmax z14.s, p3/M, z14.s, z16.s\n"
- "fmla z23.s, p3/M, z7.s, z10.s\n"
- "fmla z30.s, p3/M, z6.s, z10.s\n"
- "fmax z20.s, p3/M, z20.s, z16.s\n"
- "fmax z24.s, p3/M, z24.s, z16.s\n"
- "fmla z31.s, p3/M, z8.s, z12.s\n"
- "fmla z13.s, p3/M, z7.s, z12.s\n"
- "fmax z11.s, p3/M, z11.s, z16.s\n"
- "st1w { z26.s }, p1, [x12, x15, LSL #2]\n"
- "st1w { z22.s }, p1, [x11, x15, LSL #2]\n"
- "ldr x23, [x14, #0x20]\n"
- "ldr x22, [x14, #0x28]\n"
- "fmla z15.s, p3/M, z4.s, z10.s\n"
- "st1w { z27.s }, p1, [x10, x15, LSL #2]\n"
- "ldr x21, [x14, #0x30]\n"
- "fmla z18.s, p3/M, z3.s, z10.s\n"
- "fmla z21.s, p3/M, z5.s, z12.s\n"
- "st1w { z9.s }, p1, [x9, x15, LSL #2]\n"
- "ldr x20, [x14, #0x38]\n"
- "fmla z28.s, p3/M, z4.s, z12.s\n"
+ "fmla z25.s, p3/M, z1.s, z11.s\n"
+ "fmla z10.s, p3/M, z0.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "ldr x20, [x8, #0x108]\n"
+ "fmla z18.s, p3/M, z6.s, z16.s\n"
+ "fmla z30.s, p3/M, z3.s, z16.s\n"
+ "fmla z20.s, p3/M, z4.s, z9.s\n"
+ "fmla z27.s, p3/M, z2.s, z9.s\n"
+ "fmla z23.s, p3/M, z0.s, z16.s\n"
+ "fmla z17.s, p3/M, z8.s, z9.s\n"
+ "ld1w { z12.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ldr x22, [x8, #0x110]\n"
+ "fmla z15.s, p3/M, z7.s, z9.s\n"
+ "fmla z21.s, p3/M, z5.s, z9.s\n"
+ "fmla z24.s, p3/M, z1.s, z9.s\n"
+ "ld1w { z16.s }, p2/Z, [x28, x15, LSL #2]\n"
+ "fmla z10.s, p3/M, z8.s, z11.s\n"
+ "ldr x21, [x8, #0x118]\n"
+ "fmla z20.s, p3/M, z2.s, z11.s\n"
+ "fmla z31.s, p3/M, z0.s, z12.s\n"
+ "fmla z30.s, p3/M, z6.s, z12.s\n"
+ "fmla z23.s, p3/M, z3.s, z12.s\n"
+ "ld1w { z9.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "fmla z13.s, p3/M, z4.s, z16.s\n"
+ "fmla z27.s, p3/M, z3.s, z16.s\n"
+ "fmla z15.s, p3/M, z5.s, z11.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z16.s\n"
+ "fmla z21.s, p3/M, z6.s, z16.s\n"
+ "fmla z31.s, p3/M, z5.s, z16.s\n"
+ "fmla z23.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z20.s, p3/M, z5.s, z12.s\n"
+ "fmla z24.s, p3/M, z2.s, z12.s\n"
+ "fmla z13.s, p3/M, z7.s, z9.s\n"
+ "fmla z27.s, p3/M, z6.s, z9.s\n"
+ "fmla z15.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z31.s, p3/M, z8.s, z9.s\n"
+ "fmla z26.s, p3/M, z8.s, z16.s\n"
+ "ld1w { z9.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z21.s, p3/M, z7.s, z16.s\n"
+ "fmla z20.s, p3/M, z6.s, z16.s\n"
+ "fmla z13.s, p3/M, z5.s, z16.s\n"
+ "fmla z24.s, p3/M, z3.s, z16.s\n"
+ "fmla z27.s, p3/M, z4.s, z16.s\n"
+ "ld1w { z11.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z18.s, p3/M, z4.s, z9.s\n"
+ "ldp x20, x26, [x8, #0x0]\n"
+ "fmla z28.s, p3/M, z3.s, z9.s\n"
+ "fmla z30.s, p3/M, z1.s, z9.s\n"
+ "fmla z29.s, p3/M, z0.s, z9.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x7, #1, MUL VL]\n"
+ "fmla z25.s, p3/M, z5.s, z11.s\n"
+ "fmla z10.s, p3/M, z4.s, z11.s\n"
+ "fmla z13.s, p3/M, z8.s, z12.s\n"
+ "ld1w { z9.s }, p0/Z, [x20, x16, LSL #2]\n"
+ "fmla z27.s, p3/M, z7.s, z12.s\n"
+ "fmla z24.s, p3/M, z6.s, z12.s\n"
+ "ld1w { z12.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmax z18.s, p3/M, z18.s, z19.s\n"
+ "fmla z17.s, p3/M, z2.s, z11.s\n"
+ "fmla z15.s, p3/M, z1.s, z11.s\n"
+ "fmax z28.s, p3/M, z28.s, z19.s\n"
+ "fmax z30.s, p3/M, z30.s, z19.s\n"
+ "fmax z25.s, p3/M, z25.s, z19.s\n"
+ "fmla z21.s, p3/M, z8.s, z16.s\n"
+ "fmla z20.s, p3/M, z7.s, z16.s\n"
+ "fmax z29.s, p3/M, z29.s, z19.s\n"
+ "fmax z10.s, p3/M, z10.s, z19.s\n"
+ "fmla z23.s, p3/M, z7.s, z12.s\n"
+ "fmla z26.s, p3/M, z6.s, z12.s\n"
+ "fmin z18.s, p3/M, z18.s, z14.s\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "fmla z31.s, p3/M, z4.s, z12.s\n"
+ "fmla z13.s, p3/M, z3.s, z12.s\n"
"ldp x25, x24, [x8, #0x10]\n"
- "fmin z14.s, p3/M, z14.s, z19.s\n"
- "fmin z20.s, p3/M, z20.s, z19.s\n"
- "st1w { z14.s }, p1, [x23, x15, LSL #2]\n"
- "ldr x23, [x14, #0x40]\n"
- "fmin z24.s, p3/M, z24.s, z19.s\n"
- "fmin z11.s, p3/M, z11.s, z19.s\n"
- "st1w { z20.s }, p1, [x22, x15, LSL #2]\n"
- "ldr x22, [x14, #0x48]\n"
- "fmax z23.s, p3/M, z23.s, z16.s\n"
- "fmax z30.s, p3/M, z30.s, z16.s\n"
- "st1w { z24.s }, p1, [x21, x15, LSL #2]\n"
- "ldr x21, [x14, #0x50]\n"
- "fmax z31.s, p3/M, z31.s, z16.s\n"
- "fmax z13.s, p3/M, z13.s, z16.s\n"
- "st1w { z11.s }, p1, [x20, x15, LSL #2]\n"
- "ldr x20, [x14, #0x58]\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "fmax z17.s, p3/M, z17.s, z19.s\n"
+ "fmla z27.s, p3/M, z5.s, z16.s\n"
+ "fmla z24.s, p3/M, z4.s, z16.s\n"
+ "fmin z10.s, p3/M, z10.s, z14.s\n"
+ "fmax z15.s, p3/M, z15.s, z19.s\n"
+ "st1w { z18.s }, p1, [x12, x14, LSL #2]\n"
+ "ldr x23, [x17, #0x20]\n"
+ "st1w { z28.s }, p1, [x11, x14, LSL #2]\n"
+ "ldr x22, [x17, #0x28]\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "st1w { z25.s }, p1, [x10, x14, LSL #2]\n"
+ "ldr x21, [x17, #0x30]\n"
+ "fmin z17.s, p3/M, z17.s, z14.s\n"
+ "fmax z23.s, p3/M, z23.s, z19.s\n"
+ "st1w { z10.s }, p1, [x9, x14, LSL #2]\n"
+ "ldr x20, [x17, #0x38]\n"
+ "fmin z15.s, p3/M, z15.s, z14.s\n"
+ "fmax z26.s, p3/M, z26.s, z19.s\n"
+ "fmax z21.s, p3/M, z21.s, z19.s\n"
+ "fmax z20.s, p3/M, z20.s, z19.s\n"
+ "st1w { z30.s }, p1, [x23, x14, LSL #2]\n"
+ "ldr x23, [x17, #0x40]\n"
+ "st1w { z29.s }, p1, [x22, x14, LSL #2]\n"
+ "ldr x22, [x17, #0x48]\n"
+ "incw x15\n"
+ "ld1w { z10.s }, p0/Z, [x26, x16, LSL #2]\n"
+ "st1w { z17.s }, p1, [x21, x14, LSL #2]\n"
+ "ldr x21, [x17, #0x50]\n"
+ "ld1w { z11.s }, p0/Z, [x25, x16, LSL #2]\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "st1w { z15.s }, p1, [x20, x14, LSL #2]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "ld1w { z12.s }, p0/Z, [x24, x16, LSL #2]\n"
"incw x16\n"
- "ld1w { z9.s }, p0/Z, [x27, x17, LSL #2]\n"
- "ld1w { z10.s }, p0/Z, [x26, x17, LSL #2]\n"
- "fmin z23.s, p3/M, z23.s, z19.s\n"
- "ld1w { z11.s }, p0/Z, [x25, x17, LSL #2]\n"
- "ld1w { z12.s }, p0/Z, [x24, x17, LSL #2]\n"
- "incw x17\n"
- "fmin z30.s, p3/M, z30.s, z19.s\n"
- "fmin z31.s, p3/M, z31.s, z19.s\n"
- "fmin z13.s, p3/M, z13.s, z19.s\n"
- "st1w { z23.s }, p1, [x23, x15, LSL #2]\n"
- "ldr x23, [x14, #0x60]\n"
- "fmax z15.s, p3/M, z15.s, z16.s\n"
- "fmax z18.s, p3/M, z18.s, z16.s\n"
- "st1w { z30.s }, p1, [x22, x15, LSL #2]\n"
- "ldr x22, [x14, #0x68]\n"
- "fmax z21.s, p3/M, z21.s, z16.s\n"
- "fmax z28.s, p3/M, z28.s, z16.s\n"
- "st1w { z31.s }, p1, [x21, x15, LSL #2]\n"
- "ldr x21, [x14, #0x70]\n"
- "st1w { z13.s }, p1, [x20, x15, LSL #2]\n"
- "ldr x20, [x14, #0x78]\n"
- "ld1w { z0.s }, p3/Z, [x7, #1, MUL VL]\n"
- "whilelt p2.s, x16, %x[n_channels]\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "fmin z21.s, p3/M, z21.s, z14.s\n"
"ld1w { z1.s }, p3/Z, [x7, #2, MUL VL]\n"
"ld1w { z2.s }, p3/Z, [x7, #3, MUL VL]\n"
- "cmp x17, %x[n_channels]\n"
- "fmin z15.s, p3/M, z15.s, z19.s\n"
+ "fmin z20.s, p3/M, z20.s, z14.s\n"
+ "fmax z31.s, p3/M, z31.s, z19.s\n"
+ "st1w { z23.s }, p1, [x23, x14, LSL #2]\n"
+ "ldr x23, [x17, #0x60]\n"
+ "fmax z13.s, p3/M, z13.s, z19.s\n"
+ "fmax z27.s, p3/M, z27.s, z19.s\n"
"ld1w { z3.s }, p3/Z, [x7, #4, MUL VL]\n"
"ld1w { z4.s }, p3/Z, [x7, #5, MUL VL]\n"
- "fmin z18.s, p3/M, z18.s, z19.s\n"
- "fmin z21.s, p3/M, z21.s, z19.s\n"
+ "fmax z24.s, p3/M, z24.s, z19.s\n"
+ "st1w { z26.s }, p1, [x22, x14, LSL #2]\n"
+ "ldr x22, [x17, #0x68]\n"
"ld1w { z5.s }, p3/Z, [x7, #6, MUL VL]\n"
+ "st1w { z21.s }, p1, [x21, x14, LSL #2]\n"
+ "ldr x21, [x17, #0x70]\n"
"ld1w { z6.s }, p3/Z, [x7, #7, MUL VL]\n"
"addvl x7, x7, #16\n"
- "fmin z28.s, p3/M, z28.s, z19.s\n"
- "st1w { z15.s }, p1, [x23, x15, LSL #2]\n"
+ "st1w { z20.s }, p1, [x20, x14, LSL #2]\n"
+ "ldr x20, [x17, #0x78]\n"
+ "whilelt p2.s, x15, %x[n_channels]\n"
+ "cmp x16, %x[n_channels]\n"
+ "fmin z31.s, p3/M, z31.s, z14.s\n"
+ "fmin z13.s, p3/M, z13.s, z14.s\n"
+ "fmin z27.s, p3/M, z27.s, z14.s\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
"ld1w { z7.s }, p3/Z, [x7, #-8, MUL VL]\n"
"ld1w { z8.s }, p3/Z, [x7, #-7, MUL VL]\n"
"addvl x7, x7, #-6\n"
- "st1w { z18.s }, p1, [x22, x15, LSL #2]\n"
- "st1w { z21.s }, p1, [x21, x15, LSL #2]\n"
- "st1w { z28.s }, p1, [x20, x15, LSL #2]\n"
+ "st1w { z31.s }, p1, [x23, x14, LSL #2]\n"
+ "st1w { z13.s }, p1, [x22, x14, LSL #2]\n"
+ "st1w { z27.s }, p1, [x21, x14, LSL #2]\n"
+ "st1w { z24.s }, p1, [x20, x14, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z14, z17\n fmla z14.s, p3/M, z4.s, z9.s\n"
- "movprfx z18, z17\n fmla z18.s, p3/M, z8.s, z9.s\n"
+ "movprfx z16, z22\n fmla z16.s, p3/M, z4.s, z9.s\n"
+ "movprfx z30, z22\n fmla z30.s, p3/M, z8.s, z9.s\n"
"ldr x27, [x8, #0x20]\n"
"ldr x24, [x8, #0x30]\n"
- "movprfx z15, z17\n fmla z15.s, p3/M, z3.s, z9.s\n"
- "movprfx z30, z17\n fmla z30.s, p3/M, z1.s, z9.s\n"
+ "movprfx z13, z22\n fmla z13.s, p3/M, z3.s, z9.s\n"
+ "movprfx z15, z22\n fmla z15.s, p3/M, z1.s, z9.s\n"
"ldr x23, [x8, #0x28]\n"
"ldr x22, [x8, #0x38]\n"
- "movprfx z20, z17\n fmla z20.s, p3/M, z0.s, z9.s\n"
- "movprfx z13, z17\n fmla z13.s, p3/M, z7.s, z9.s\n"
+ "movprfx z20, z22\n fmla z20.s, p3/M, z0.s, z9.s\n"
+ "movprfx z18, z22\n fmla z18.s, p3/M, z7.s, z9.s\n"
"ldr x26, [x8, #0x40]\n"
"ldr x21, [x8, #0x48]\n"
- "movprfx z22, z17\n fmla z22.s, p3/M, z6.s, z9.s\n"
- "fmla z14.s, p3/M, z5.s, z12.s\n"
+ "movprfx z26, z22\n fmla z26.s, p3/M, z6.s, z9.s\n"
+ "movprfx z31, z22\n fmla z31.s, p3/M, z5.s, z9.s\n"
"ldr x25, [x8, #0x50]\n"
"ldr x20, [x8, #0x58]\n"
- "movprfx z27, z17\n fmla z27.s, p3/M, z5.s, z9.s\n"
- "movprfx z31, z17\n fmla z31.s, p3/M, z2.s, z9.s\n"
- "ld1w { z23.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "fmla z16.s, p3/M, z5.s, z12.s\n"
+ "movprfx z28, z22\n fmla z28.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z27.s }, p2/Z, [x24, x15, LSL #2]\n"
"ldr x13, [x8, #0x70]\n"
- "fmla z18.s, p3/M, z0.s, z10.s\n"
- "movprfx z9, z17\n fmla z9.s, p3/M, z2.s, z11.s\n"
- "ld1w { z21.s }, p2/Z, [x27, x16, LSL #2]\n"
- "ld1w { z25.s }, p2/Z, [x23, x16, LSL #2]\n"
- "fmla z15.s, p3/M, z4.s, z12.s\n"
- "fmla z30.s, p3/M, z2.s, z12.s\n"
+ "fmla z30.s, p3/M, z0.s, z10.s\n"
+ "movprfx z29, z22\n fmla z29.s, p3/M, z2.s, z11.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "ld1w { z24.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "fmla z13.s, p3/M, z4.s, z12.s\n"
+ "fmla z15.s, p3/M, z2.s, z12.s\n"
"ldr x24, [x8, #0x60]\n"
"ldr x23, [x8, #0x68]\n"
"fmla z20.s, p3/M, z1.s, z12.s\n"
- "fmla z13.s, p3/M, z8.s, z12.s\n"
- "incw x15\n"
+ "fmla z18.s, p3/M, z8.s, z12.s\n"
+ "incw x14\n"
"mov p0.b, p2.b\n"
- "fmla z22.s, p3/M, z7.s, z12.s\n"
- "movprfx z28, z17\n fmla z28.s, p3/M, z6.s, z21.s\n"
- "ld1w { z29.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z12.s\n"
+ "movprfx z9, z22\n fmla z9.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z10.s }, p2/Z, [x21, x15, LSL #2]\n"
"ldr x28, [x8, #0x88]\n"
- "fmla z14.s, p3/M, z7.s, z23.s\n"
- "fmla z9.s, p3/M, z6.s, z12.s\n"
- "ldr x12, [x14, #0x0]\n"
- "ldr x11, [x14, #0x8]\n"
- "movprfx z11, z17\n fmla z11.s, p3/M, z3.s, z12.s\n"
- "movprfx z10, z17\n fmla z10.s, p3/M, z0.s, z12.s\n"
- "ld1w { z12.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z16.s, p3/M, z7.s, z27.s\n"
+ "fmla z29.s, p3/M, z6.s, z12.s\n"
+ "ldr x12, [x17, #0x0]\n"
+ "ldr x11, [x17, #0x8]\n"
+ "movprfx z11, z22\n fmla z11.s, p3/M, z3.s, z12.s\n"
+ "movprfx z23, z22\n fmla z23.s, p3/M, z0.s, z12.s\n"
+ "ld1w { z21.s }, p2/Z, [x22, x15, LSL #2]\n"
"ldr x22, [x8, #0x78]\n"
- "movprfx z26, z17\n fmla z26.s, p3/M, z8.s, z25.s\n"
- "fmla z15.s, p3/M, z6.s, z23.s\n"
- "ld1w { z21.s }, p2/Z, [x26, x16, LSL #2]\n"
+ "movprfx z25, z22\n fmla z25.s, p3/M, z8.s, z24.s\n"
+ "fmla z13.s, p3/M, z6.s, z27.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x15, LSL #2]\n"
"ldr x21, [x8, #0x80]\n"
- "fmla z30.s, p3/M, z4.s, z23.s\n"
- "fmla z20.s, p3/M, z3.s, z23.s\n"
- "ldr x10, [x14, #0x10]\n"
- "ldr x9, [x14, #0x18]\n"
- "movprfx z25, z17\n fmla z25.s, p3/M, z1.s, z23.s\n"
- "movprfx z24, z17\n fmla z24.s, p3/M, z0.s, z23.s\n"
- "fmla z27.s, p3/M, z8.s, z23.s\n"
- "fmla z31.s, p3/M, z5.s, z23.s\n"
- "fmla z28.s, p3/M, z2.s, z23.s\n"
- "fmla z18.s, p3/M, z1.s, z12.s\n"
- "ld1w { z23.s }, p2/Z, [x25, x16, LSL #2]\n"
+ "fmla z15.s, p3/M, z4.s, z27.s\n"
+ "fmla z20.s, p3/M, z3.s, z27.s\n"
+ "ldr x10, [x17, #0x10]\n"
+ "ldr x9, [x17, #0x18]\n"
+ "movprfx z24, z22\n fmla z24.s, p3/M, z1.s, z27.s\n"
+ "movprfx z12, z22\n fmla z12.s, p3/M, z0.s, z27.s\n"
+ "fmla z31.s, p3/M, z8.s, z27.s\n"
+ "fmla z28.s, p3/M, z5.s, z27.s\n"
+ "fmla z9.s, p3/M, z2.s, z27.s\n"
+ "fmla z30.s, p3/M, z1.s, z21.s\n"
+ "ld1w { z27.s }, p2/Z, [x25, x15, LSL #2]\n"
"ldr x27, [x8, #0x90]\n"
- "fmla z13.s, p3/M, z0.s, z12.s\n"
- "fmla z22.s, p3/M, z2.s, z21.s\n"
- "ld1w { z12.s }, p2/Z, [x20, x16, LSL #2]\n"
+ "fmla z18.s, p3/M, z0.s, z21.s\n"
+ "fmla z26.s, p3/M, z2.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x20, x15, LSL #2]\n"
"ldr x20, [x8, #0x98]\n"
- "fmla z14.s, p3/M, z8.s, z29.s\n"
- "fmla z9.s, p3/M, z1.s, z21.s\n"
- "ld1w { z21.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "fmla z16.s, p3/M, z8.s, z10.s\n"
+ "fmla z29.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x15, LSL #2]\n"
"ldr x26, [x8, #0xa0]\n"
- "fmla z15.s, p3/M, z7.s, z29.s\n"
- "fmla z11.s, p3/M, z6.s, z29.s\n"
- "fmla z30.s, p3/M, z5.s, z29.s\n"
- "fmla z20.s, p3/M, z4.s, z29.s\n"
- "fmla z10.s, p3/M, z3.s, z29.s\n"
- "fmla z25.s, p3/M, z2.s, z29.s\n"
- "fmla z24.s, p3/M, z1.s, z29.s\n"
- "fmla z26.s, p3/M, z0.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x23, x16, LSL #2]\n"
+ "fmla z13.s, p3/M, z7.s, z10.s\n"
+ "fmla z11.s, p3/M, z6.s, z10.s\n"
+ "fmla z15.s, p3/M, z5.s, z10.s\n"
+ "fmla z20.s, p3/M, z4.s, z10.s\n"
+ "fmla z23.s, p3/M, z3.s, z10.s\n"
+ "fmla z24.s, p3/M, z2.s, z10.s\n"
+ "fmla z12.s, p3/M, z1.s, z10.s\n"
+ "fmla z25.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z22.s }, p2/Z, [x23, x15, LSL #2]\n"
"ldr x25, [x8, #0xa8]\n"
- "fmla z18.s, p3/M, z3.s, z23.s\n"
- "fmla z27.s, p3/M, z0.s, z23.s\n"
- "fmla z31.s, p3/M, z6.s, z21.s\n"
- "fmla z28.s, p3/M, z3.s, z21.s\n"
- "ld1w { z21.s }, p2/Z, [x13, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z3.s, z27.s\n"
+ "fmla z31.s, p3/M, z0.s, z27.s\n"
+ "fmla z28.s, p3/M, z6.s, z17.s\n"
+ "fmla z9.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x13, x15, LSL #2]\n"
"ldr x24, [x8, #0xb0]\n"
- "fmla z13.s, p3/M, z4.s, z29.s\n"
- "fmla z22.s, p3/M, z3.s, z29.s\n"
- "fmla z14.s, p3/M, z1.s, z29.s\n"
- "fmla z9.s, p3/M, z5.s, z12.s\n"
- "fmla z11.s, p3/M, z2.s, z12.s\n"
- "fmla z15.s, p3/M, z0.s, z29.s\n"
- "ld1w { z17.s }, p2/Z, [x22, x16, LSL #2]\n"
+ "fmla z18.s, p3/M, z4.s, z22.s\n"
+ "fmla z26.s, p3/M, z3.s, z22.s\n"
+ "fmla z16.s, p3/M, z1.s, z22.s\n"
+ "fmla z29.s, p3/M, z5.s, z21.s\n"
+ "fmla z11.s, p3/M, z2.s, z21.s\n"
+ "fmla z13.s, p3/M, z0.s, z22.s\n"
+ "ld1w { z21.s }, p2/Z, [x22, x15, LSL #2]\n"
"ldr x23, [x8, #0xb8]\n"
- "fmla z10.s, p3/M, z8.s, z21.s\n"
- "fmla z26.s, p3/M, z5.s, z21.s\n"
- "ld1w { z23.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "fmla z23.s, p3/M, z8.s, z17.s\n"
+ "fmla z25.s, p3/M, z5.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, x15, LSL #2]\n"
"ldr x22, [x8, #0xc0]\n"
- "fmla z18.s, p3/M, z5.s, z29.s\n"
- "fmla z27.s, p3/M, z2.s, z29.s\n"
- "ld1w { z21.s }, p2/Z, [x28, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z5.s, z22.s\n"
+ "fmla z31.s, p3/M, z2.s, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [x28, x15, LSL #2]\n"
"ldr x21, [x8, #0xc8]\n"
- "fmla z13.s, p3/M, z5.s, z17.s\n"
- "fmla z22.s, p3/M, z4.s, z17.s\n"
- "fmla z14.s, p3/M, z2.s, z17.s\n"
- "fmla z9.s, p3/M, z3.s, z17.s\n"
- "fmla z15.s, p3/M, z1.s, z17.s\n"
- "fmla z11.s, p3/M, z0.s, z17.s\n"
- "ld1w { z29.s }, p2/Z, [x20, x16, LSL #2]\n"
+ "fmla z18.s, p3/M, z5.s, z21.s\n"
+ "fmla z26.s, p3/M, z4.s, z21.s\n"
+ "fmla z16.s, p3/M, z2.s, z21.s\n"
+ "fmla z29.s, p3/M, z3.s, z21.s\n"
+ "fmla z13.s, p3/M, z1.s, z21.s\n"
+ "fmla z11.s, p3/M, z0.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x20, x15, LSL #2]\n"
"ldr x28, [x8, #0xd8]\n"
- "fmla z28.s, p3/M, z7.s, z23.s\n"
- "fmla z25.s, p3/M, z6.s, z23.s\n"
- "ld1w { z23.s }, p2/Z, [x27, x16, LSL #2]\n"
+ "fmla z9.s, p3/M, z7.s, z17.s\n"
+ "fmla z24.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x27, x15, LSL #2]\n"
"ldr x20, [x8, #0xd0]\n"
- "fmla z18.s, p3/M, z7.s, z21.s\n"
- "fmla z13.s, p3/M, z6.s, z21.s\n"
- "fmla z27.s, p3/M, z4.s, z21.s\n"
- "fmla z14.s, p3/M, z3.s, z21.s\n"
- "fmla z31.s, p3/M, z1.s, z21.s\n"
- "fmla z30.s, p3/M, z0.s, z21.s\n"
- "ld1w { z21.s }, p2/Z, [x26, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z7.s, z22.s\n"
+ "fmla z18.s, p3/M, z6.s, z22.s\n"
+ "fmla z31.s, p3/M, z4.s, z22.s\n"
+ "fmla z16.s, p3/M, z3.s, z22.s\n"
+ "fmla z28.s, p3/M, z1.s, z22.s\n"
+ "fmla z15.s, p3/M, z0.s, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [x26, x15, LSL #2]\n"
"ldr x27, [x8, #0xe0]\n"
- "fmla z22.s, p3/M, z8.s, z29.s\n"
- "fmla z24.s, p3/M, z8.s, z23.s\n"
- "fmla z26.s, p3/M, z7.s, z23.s\n"
- "ld1w { z23.s }, p2/Z, [x25, x16, LSL #2]\n"
- "fmla z10.s, p3/M, z1.s, z29.s\n"
+ "fmla z26.s, p3/M, z8.s, z21.s\n"
+ "fmla z12.s, p3/M, z8.s, z17.s\n"
+ "fmla z25.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z23.s, p3/M, z1.s, z21.s\n"
"ldr x26, [x8, #0xe8]\n"
- "fmla z9.s, p3/M, z7.s, z29.s\n"
- "fmla z15.s, p3/M, z5.s, z29.s\n"
- "fmla z11.s, p3/M, z4.s, z29.s\n"
- "fmla z20.s, p3/M, z2.s, z29.s\n"
- "ld1w { z29.s }, p2/Z, [x24, x16, LSL #2]\n"
+ "fmla z29.s, p3/M, z7.s, z21.s\n"
+ "fmla z13.s, p3/M, z5.s, z21.s\n"
+ "fmla z11.s, p3/M, z4.s, z21.s\n"
+ "fmla z20.s, p3/M, z2.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x24, x15, LSL #2]\n"
"ldr x25, [x8, #0xf0]\n"
- "fmla z18.s, p3/M, z2.s, z21.s\n"
- "fmla z13.s, p3/M, z1.s, z21.s\n"
- "fmla z22.s, p3/M, z0.s, z21.s\n"
- "fmla z27.s, p3/M, z7.s, z23.s\n"
- "ld1w { z21.s }, p2/Z, [x23, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z2.s, z22.s\n"
+ "fmla z18.s, p3/M, z1.s, z22.s\n"
+ "fmla z26.s, p3/M, z0.s, z22.s\n"
+ "fmla z31.s, p3/M, z7.s, z17.s\n"
+ "ld1w { z22.s }, p2/Z, [x23, x15, LSL #2]\n"
"ldr x24, [x8, #0xf8]\n"
- "fmla z14.s, p3/M, z6.s, z23.s\n"
- "fmla z31.s, p3/M, z4.s, z23.s\n"
- "fmla z30.s, p3/M, z3.s, z23.s\n"
- "fmla z28.s, p3/M, z1.s, z23.s\n"
- "fmla z25.s, p3/M, z0.s, z23.s\n"
- "ld1w { z17.s }, p2/Z, [x22, x16, LSL #2]\n"
- "fmla z10.s, p3/M, z4.s, z17.s\n"
+ "fmla z16.s, p3/M, z6.s, z17.s\n"
+ "fmla z28.s, p3/M, z4.s, z17.s\n"
+ "fmla z15.s, p3/M, z3.s, z17.s\n"
+ "fmla z9.s, p3/M, z1.s, z17.s\n"
+ "fmla z24.s, p3/M, z0.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z18.s, p3/M, z2.s, z21.s\n"
"ldr x23, [x8, #0x100]\n"
- "fmla z24.s, p3/M, z2.s, z17.s\n"
- "fmla z13.s, p3/M, z2.s, z29.s\n"
- "fmla z22.s, p3/M, z1.s, z29.s\n"
- "fmla z9.s, p3/M, z0.s, z29.s\n"
- "ld1w { z23.s }, p2/Z, [x21, x16, LSL #2]\n"
+ "fmla z26.s, p3/M, z1.s, z21.s\n"
+ "fmla z29.s, p3/M, z0.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x21, x15, LSL #2]\n"
"ldr x22, [x8, #0x108]\n"
- "fmla z18.s, p3/M, z6.s, z21.s\n"
- "fmla z27.s, p3/M, z3.s, z21.s\n"
- "fmla z31.s, p3/M, z0.s, z21.s\n"
- "fmla z15.s, p3/M, z8.s, z17.s\n"
- "ld1w { z29.s }, p2/Z, [x20, x16, LSL #2]\n"
+ "fmla z30.s, p3/M, z6.s, z22.s\n"
+ "fmla z31.s, p3/M, z3.s, z22.s\n"
+ "fmla z23.s, p3/M, z4.s, z17.s\n"
+ "fmla z12.s, p3/M, z2.s, z17.s\n"
+ "fmla z28.s, p3/M, z0.s, z22.s\n"
+ "fmla z13.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z22.s }, p2/Z, [x20, x15, LSL #2]\n"
"ldr x21, [x8, #0x110]\n"
"fmla z11.s, p3/M, z7.s, z17.s\n"
"fmla z20.s, p3/M, z5.s, z17.s\n"
- "fmla z26.s, p3/M, z1.s, z17.s\n"
- "ld1w { z21.s }, p2/Z, [x28, x16, LSL #2]\n"
- "fmla z10.s, p3/M, z2.s, z23.s\n"
+ "fmla z25.s, p3/M, z1.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x28, x15, LSL #2]\n"
+ "fmla z29.s, p3/M, z8.s, z21.s\n"
"ldr x20, [x8, #0x118]\n"
- "fmla z28.s, p3/M, z0.s, z29.s\n"
- "fmla z25.s, p3/M, z4.s, z21.s\n"
- "fmla z24.s, p3/M, z3.s, z21.s\n"
- "fmla z9.s, p3/M, z8.s, z23.s\n"
- "fmla z11.s, p3/M, z5.s, z23.s\n"
- "fmla z27.s, p3/M, z6.s, z29.s\n"
- "ld1w { z23.s }, p2/Z, [x27, x16, LSL #2]\n"
- "fmla z31.s, p3/M, z3.s, z29.s\n"
- "ld1w { z17.s }, p2/Z, [x26, x16, LSL #2]\n"
- "fmla z30.s, p3/M, z7.s, z21.s\n"
- "fmla z20.s, p3/M, z6.s, z21.s\n"
- "fmla z28.s, p3/M, z5.s, z21.s\n"
- "fmla z10.s, p3/M, z5.s, z23.s\n"
- "fmla z26.s, p3/M, z2.s, z23.s\n"
- "fmla z25.s, p3/M, z7.s, z17.s\n"
- "fmla z24.s, p3/M, z6.s, z17.s\n"
- "fmla z31.s, p3/M, z8.s, z21.s\n"
- "ld1w { z21.s }, p2/Z, [x25, x16, LSL #2]\n"
+ "fmla z23.s, p3/M, z2.s, z21.s\n"
+ "fmla z9.s, p3/M, z0.s, z22.s\n"
+ "fmla z31.s, p3/M, z6.s, z22.s\n"
+ "fmla z28.s, p3/M, z3.s, z22.s\n"
+ "ld1w { z27.s }, p2/Z, [x26, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z17.s\n"
+ "fmla z12.s, p3/M, z3.s, z17.s\n"
+ "fmla z11.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z22.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "fmla z15.s, p3/M, z7.s, z17.s\n"
+ "fmla z20.s, p3/M, z6.s, z17.s\n"
+ "fmla z9.s, p3/M, z5.s, z17.s\n"
"fmla z28.s, p3/M, z8.s, z17.s\n"
- "ld1w { z12.s }, p2/Z, [x23, x16, LSL #2]\n"
- "fmla z30.s, p3/M, z8.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "fmla z23.s, p3/M, z5.s, z22.s\n"
+ "fmla z25.s, p3/M, z2.s, z22.s\n"
+ "fmla z24.s, p3/M, z7.s, z27.s\n"
+ "fmla z12.s, p3/M, z6.s, z27.s\n"
+ "fmla z11.s, p3/M, z8.s, z22.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x15, LSL #2]\n"
+ "fmla z9.s, p3/M, z8.s, z27.s\n"
+ "fmla z15.s, p3/M, z8.s, z21.s\n"
+ "ld1w { z27.s }, p2/Z, [x23, x15, LSL #2]\n"
"fmla z20.s, p3/M, z7.s, z21.s\n"
- "fmla z10.s, p3/M, z6.s, z21.s\n"
- "fmla z25.s, p3/M, z5.s, z21.s\n"
- "fmla z24.s, p3/M, z4.s, z21.s\n"
- "fmla z26.s, p3/M, z3.s, z21.s\n"
- "ld1w { z21.s }, p2/Z, [x22, x16, LSL #2]\n"
- "fmla z11.s, p3/M, z8.s, z23.s\n"
- "ld1w { z29.s }, p2/Z, [x24, x16, LSL #2]\n"
- "fmla z18.s, p3/M, z4.s, z12.s\n"
- "fmax z18.s, p3/M, z18.s, z16.s\n"
- "fmla z13.s, p3/M, z3.s, z12.s\n"
- "fmla z22.s, p3/M, z5.s, z21.s\n"
- "fmax z13.s, p3/M, z13.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z16.s\n"
- "fmla z9.s, p3/M, z4.s, z21.s\n"
- "fmla z25.s, p3/M, z8.s, z29.s\n"
- "fmax z9.s, p3/M, z9.s, z16.s\n"
- "fmin z18.s, p3/M, z18.s, z19.s\n"
- "fmla z24.s, p3/M, z7.s, z29.s\n"
- "fmla z26.s, p3/M, z6.s, z29.s\n"
- "ld1w { z23.s }, p2/Z, [x21, x16, LSL #2]\n"
- "fmin z13.s, p3/M, z13.s, z19.s\n"
- "fmla z27.s, p3/M, z1.s, z12.s\n"
- "fmla z14.s, p3/M, z0.s, z12.s\n"
- "ld1w { z29.s }, p2/Z, [x20, x16, LSL #2]\n"
- "fmin z22.s, p3/M, z22.s, z19.s\n"
- "fmla z15.s, p3/M, z2.s, z21.s\n"
+ "fmla z23.s, p3/M, z6.s, z21.s\n"
+ "fmla z24.s, p3/M, z5.s, z21.s\n"
+ "fmla z25.s, p3/M, z3.s, z21.s\n"
+ "fmla z12.s, p3/M, z4.s, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x22, x15, LSL #2]\n"
+ "fmla z30.s, p3/M, z4.s, z27.s\n"
+ "fmla z18.s, p3/M, z3.s, z27.s\n"
+ "fmla z31.s, p3/M, z1.s, z27.s\n"
+ "fmla z16.s, p3/M, z0.s, z27.s\n"
+ "ld1w { z27.s }, p2/Z, [x20, x15, LSL #2]\n"
+ "fmla z26.s, p3/M, z5.s, z21.s\n"
+ "fmla z29.s, p3/M, z4.s, z21.s\n"
+ "fmla z24.s, p3/M, z8.s, z17.s\n"
+ "fmla z12.s, p3/M, z7.s, z17.s\n"
+ "fmla z25.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z10.s }, p2/Z, [x21, x15, LSL #2]\n"
+ "fmax z30.s, p3/M, z30.s, z19.s\n"
+ "fmla z13.s, p3/M, z2.s, z21.s\n"
"fmla z11.s, p3/M, z1.s, z21.s\n"
- "fmin z9.s, p3/M, z9.s, z19.s\n"
- "fmax z27.s, p3/M, z27.s, z16.s\n"
- "fmla z31.s, p3/M, z7.s, z23.s\n"
- "fmla z30.s, p3/M, z6.s, z23.s\n"
- "fmax z14.s, p3/M, z14.s, z16.s\n"
- "fmax z15.s, p3/M, z15.s, z16.s\n"
- "fmla z20.s, p3/M, z8.s, z29.s\n"
- "fmla z10.s, p3/M, z7.s, z29.s\n"
- "fmax z11.s, p3/M, z11.s, z16.s\n"
- "st1w { z18.s }, p0, [x12, x15, LSL #2]\n"
- "st1w { z13.s }, p0, [x11, x15, LSL #2]\n"
- "ldr x23, [x14, #0x20]\n"
- "ldr x22, [x14, #0x28]\n"
- "fmla z28.s, p3/M, z4.s, z23.s\n"
- "st1w { z22.s }, p0, [x10, x15, LSL #2]\n"
- "ldr x21, [x14, #0x30]\n"
- "fmla z25.s, p3/M, z3.s, z23.s\n"
- "fmla z24.s, p3/M, z5.s, z29.s\n"
- "st1w { z9.s }, p0, [x9, x15, LSL #2]\n"
- "ldr x20, [x14, #0x38]\n"
- "fmla z26.s, p3/M, z4.s, z29.s\n"
- "fmin z27.s, p3/M, z27.s, z19.s\n"
- "fmin z14.s, p3/M, z14.s, z19.s\n"
- "fmin z15.s, p3/M, z15.s, z19.s\n"
- "st1w { z27.s }, p0, [x23, x15, LSL #2]\n"
- "ldr x23, [x14, #0x40]\n"
- "fmin z11.s, p3/M, z11.s, z19.s\n"
- "fmax z31.s, p3/M, z31.s, z16.s\n"
- "st1w { z14.s }, p0, [x22, x15, LSL #2]\n"
- "ldr x22, [x14, #0x48]\n"
- "fmax z30.s, p3/M, z30.s, z16.s\n"
- "fmax z20.s, p3/M, z20.s, z16.s\n"
- "st1w { z15.s }, p0, [x21, x15, LSL #2]\n"
- "ldr x21, [x14, #0x50]\n"
- "fmax z10.s, p3/M, z10.s, z16.s\n"
- "st1w { z11.s }, p0, [x20, x15, LSL #2]\n"
- "ldr x20, [x14, #0x58]\n"
- "fmin z31.s, p3/M, z31.s, z19.s\n"
- "fmin z30.s, p3/M, z30.s, z19.s\n"
- "fmin z20.s, p3/M, z20.s, z19.s\n"
- "st1w { z31.s }, p0, [x23, x15, LSL #2]\n"
- "ldr x23, [x14, #0x60]\n"
- "fmin z10.s, p3/M, z10.s, z19.s\n"
- "fmax z28.s, p3/M, z28.s, z16.s\n"
- "st1w { z30.s }, p0, [x22, x15, LSL #2]\n"
- "ldr x22, [x14, #0x68]\n"
- "fmax z25.s, p3/M, z25.s, z16.s\n"
- "fmax z24.s, p3/M, z24.s, z16.s\n"
- "st1w { z20.s }, p0, [x21, x15, LSL #2]\n"
- "ldr x21, [x14, #0x70]\n"
- "fmax z26.s, p3/M, z26.s, z16.s\n"
- "st1w { z10.s }, p0, [x20, x15, LSL #2]\n"
- "ldr x20, [x14, #0x78]\n"
- "fmin z28.s, p3/M, z28.s, z19.s\n"
- "fmin z25.s, p3/M, z25.s, z19.s\n"
- "fmin z24.s, p3/M, z24.s, z19.s\n"
- "st1w { z28.s }, p0, [x23, x15, LSL #2]\n"
- "fmin z26.s, p3/M, z26.s, z19.s\n"
- "st1w { z25.s }, p0, [x22, x15, LSL #2]\n"
- "st1w { z24.s }, p0, [x21, x15, LSL #2]\n"
- "st1w { z26.s }, p0, [x20, x15, LSL #2]\n"
+ "fmax z18.s, p3/M, z18.s, z19.s\n"
+ "fmax z31.s, p3/M, z31.s, z19.s\n"
+ "fmax z26.s, p3/M, z26.s, z19.s\n"
+ "fmla z20.s, p3/M, z8.s, z27.s\n"
+ "fmla z23.s, p3/M, z7.s, z27.s\n"
+ "fmax z16.s, p3/M, z16.s, z19.s\n"
+ "fmax z29.s, p3/M, z29.s, z19.s\n"
+ "fmla z28.s, p3/M, z7.s, z10.s\n"
+ "fmla z15.s, p3/M, z6.s, z10.s\n"
+ "fmin z30.s, p3/M, z30.s, z14.s\n"
+ "fmin z18.s, p3/M, z18.s, z14.s\n"
+ "fmla z9.s, p3/M, z4.s, z10.s\n"
+ "fmla z24.s, p3/M, z3.s, z10.s\n"
+ "fmin z31.s, p3/M, z31.s, z14.s\n"
+ "fmin z26.s, p3/M, z26.s, z14.s\n"
+ "fmax z13.s, p3/M, z13.s, z19.s\n"
+ "fmla z12.s, p3/M, z5.s, z27.s\n"
+ "fmla z25.s, p3/M, z4.s, z27.s\n"
+ "fmin z29.s, p3/M, z29.s, z14.s\n"
+ "fmax z11.s, p3/M, z11.s, z19.s\n"
+ "st1w { z30.s }, p0, [x12, x14, LSL #2]\n"
+ "ldr x23, [x17, #0x20]\n"
+ "st1w { z18.s }, p0, [x11, x14, LSL #2]\n"
+ "ldr x22, [x17, #0x28]\n"
+ "fmin z16.s, p3/M, z16.s, z14.s\n"
+ "fmax z28.s, p3/M, z28.s, z19.s\n"
+ "st1w { z26.s }, p0, [x10, x14, LSL #2]\n"
+ "ldr x21, [x17, #0x30]\n"
+ "fmin z13.s, p3/M, z13.s, z14.s\n"
+ "fmax z15.s, p3/M, z15.s, z19.s\n"
+ "st1w { z29.s }, p0, [x9, x14, LSL #2]\n"
+ "ldr x20, [x17, #0x38]\n"
+ "fmin z11.s, p3/M, z11.s, z14.s\n"
+ "fmax z20.s, p3/M, z20.s, z19.s\n"
+ "fmax z23.s, p3/M, z23.s, z19.s\n"
+ "st1w { z31.s }, p0, [x23, x14, LSL #2]\n"
+ "ldr x23, [x17, #0x40]\n"
+ "fmin z28.s, p3/M, z28.s, z14.s\n"
+ "st1w { z16.s }, p0, [x22, x14, LSL #2]\n"
+ "ldr x22, [x17, #0x48]\n"
+ "fmin z15.s, p3/M, z15.s, z14.s\n"
+ "fmax z9.s, p3/M, z9.s, z19.s\n"
+ "st1w { z13.s }, p0, [x21, x14, LSL #2]\n"
+ "ldr x21, [x17, #0x50]\n"
+ "fmin z20.s, p3/M, z20.s, z14.s\n"
+ "fmax z24.s, p3/M, z24.s, z19.s\n"
+ "st1w { z11.s }, p0, [x20, x14, LSL #2]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "fmin z23.s, p3/M, z23.s, z14.s\n"
+ "fmax z12.s, p3/M, z12.s, z19.s\n"
+ "fmax z25.s, p3/M, z25.s, z19.s\n"
+ "st1w { z28.s }, p0, [x23, x14, LSL #2]\n"
+ "ldr x23, [x17, #0x60]\n"
+ "fmin z9.s, p3/M, z9.s, z14.s\n"
+ "st1w { z15.s }, p0, [x22, x14, LSL #2]\n"
+ "ldr x22, [x17, #0x68]\n"
+ "fmin z24.s, p3/M, z24.s, z14.s\n"
+ "st1w { z20.s }, p0, [x21, x14, LSL #2]\n"
+ "ldr x21, [x17, #0x70]\n"
+ "fmin z12.s, p3/M, z12.s, z14.s\n"
+ "st1w { z23.s }, p0, [x20, x14, LSL #2]\n"
+ "ldr x20, [x17, #0x78]\n"
+ "fmin z25.s, p3/M, z25.s, z14.s\n"
+ "st1w { z9.s }, p0, [x23, x14, LSL #2]\n"
+ "st1w { z24.s }, p0, [x22, x14, LSL #2]\n"
+ "st1w { z12.s }, p0, [x21, x14, LSL #2]\n"
+ "st1w { z25.s }, p0, [x20, x14, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
: "cc", "memory", "p0", "p1", "p2", "p3", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
index e6090fda94..d17c63f7ae 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,246 +88,246 @@ void sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x11, #0x0\n"
- "mov x16, #0x0\n"
+ "mov x7, #0x0\n"
+ "mov x8, #0x0\n"
"1:" // Tile loop
- "str x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
- "mov x25, #0x4\n"
- "mov x24, #0x2\n"
- "str x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x15, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x11, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x16, x15, x22\n" // offset += tile_j * ld_input_col
- "ldr x14, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "cntw x13\n"
- "mul x20, x11, x21\n" // offset = tile_i * ld_output_row
- "ldr x12, [%x[params_struct], %[offsetof_args_inptr]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x10, x15, x15\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x12, x12, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "ldr x9, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "add x28, x12, x23, LSL #2\n"
- "madd x20, x16, x14, x20\n" // offset += tile_j * ld_output_col
+ "str x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x4\n"
+ "mov x25, #0x2\n"
+ "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "cntw x16\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
"whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z30.s }, p3/Z, [x11]\n"
- "ld1w { z0.s }, p3/Z, [x11, #1, MUL VL]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "ld1w { z1.s }, p3/Z, [x11, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x11, #3, MUL VL]\n"
- "add x27, x28, x23, LSL #2\n"
- "ld1w { z3.s }, p3/Z, [x11, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x11, #5, MUL VL]\n"
- "add x26, x10, x15\n"
- "add x25, x27, x23, LSL #2\n"
- "ld1w { z5.s }, p3/Z, [x11, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "add x24, x26, x15\n"
- "add x9, x9, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "cmp x13, %x[n_channels]\n"
- "ld1rw { z29.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z28.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x23, x25, x23, LSL #2\n"
- "add x22, x9, x21, LSL #2\n"
- "ld1w { z7.s }, p3/Z, [x11, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x11, #-7, MUL VL]\n"
- "mov x21, #0x0\n"
- "sub x20, XZR, x13\n"
- "ld1w { z9.s }, p2/Z, [x27, x10, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x12]\n"
- "ld1w { z11.s }, p2/Z, [x12, x15, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x12, x26, LSL #2]\n"
- "addvl x11, x11, #-6\n"
- "ld1w { z13.s }, p2/Z, [x12, x24, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x28]\n"
- "ld1w { z15.s }, p2/Z, [x28, x15, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x12, x10, LSL #2]\n"
+ "mov x14, #0x0\n"
+ "ldr x13, [%x[params_struct], %[offsetof_args_inptr]]\n"
+ "ldr x12, [%x[params_struct], %[offsetof_args_params]]\n"
+ "mul x22, x7, x24\n" // offset = tile_i * ld_input_row
+ "ldr x11, [%x[params_struct], %[offsetof_args_outptr]]\n"
+ "add x10, x17, x17\n"
+ "cmp x16, %x[n_channels]\n"
+ "ld1rw { z30.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "mul x21, x7, x23\n" // offset = tile_i * ld_output_row
+ "add x9, x10, x17\n"
+ "ld1rw { z29.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "sub x20, XZR, x16\n"
+ "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
+ "ld1w { z28.s }, p3/Z, [x12]\n"
+ "ld1w { z0.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "add x28, x9, x17\n"
+ "ld1w { z1.s }, p3/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "madd x21, x8, x15, x21\n" // offset += tile_j * ld_output_col
+ "ld1w { z3.s }, p3/Z, [x12, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x12, #5, MUL VL]\n"
+ "mul x22, x22, x26\n" // offset *= kernel_stride * output_size
+ "ld1w { z5.s }, p3/Z, [x12, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x12, #7, MUL VL]\n"
+ "addvl x12, x12, #16\n"
+ "mul x21, x21, x25\n" // offset *= output_tile_size
+ "add x13, x13, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x27, x13, x24, LSL #2\n"
+ "add x26, x27, x24, LSL #2\n"
+ "ld1w { z10.s }, p2/Z, [x13]\n"
+ "ld1w { z11.s }, p2/Z, [x13, x17, LSL #2]\n"
+ "add x25, x26, x24, LSL #2\n"
+ "add x11, x11, x21, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x24, x25, x24, LSL #2\n"
+ "ld1w { z7.s }, p3/Z, [x12, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x12, #-7, MUL VL]\n"
+ "add x23, x11, x23, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x26, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x13, x9, LSL #2]\n"
+ "addvl x12, x12, #-6\n"
+ "ld1w { z13.s }, p2/Z, [x13, x28, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x27]\n"
+ "ld1w { z15.s }, p2/Z, [x27, x17, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x13, x10, LSL #2]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z27, z30\n fmla z27.s, p3/M, z8.s, z9.s\n"
- "movprfx z26, z30\n fmla z26.s, p3/M, z6.s, z9.s\n"
- "whilelt p1.s, x13, %x[n_channels]\n"
- "incw x21\n"
+ "movprfx z27, z28\n fmla z27.s, p3/M, z8.s, z9.s\n"
+ "movprfx z26, z28\n fmla z26.s, p3/M, z6.s, z9.s\n"
+ "whilelt p1.s, x16, %x[n_channels]\n"
+ "incw x14\n"
+ "movprfx z25, z28\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "movprfx z24, z28\n fmla z24.s, p3/M, z0.s, z9.s\n"
+ "incw x16\n"
+ "mov p0.b, p2.b\n"
+ "addvl x13, x13, #1\n"
+ "ld1w { z28.s }, p3/Z, [x12]\n"
+ "incw x20\n"
"fmla z27.s, p3/M, z0.s, z10.s\n"
"fmla z26.s, p3/M, z1.s, z12.s\n"
- "ld1w { z20.s }, p2/Z, [x28, x24, LSL #2]\n"
- "incw x13\n"
+ "ld1w { z21.s }, p2/Z, [x27, x28, LSL #2]\n"
+ "ld1w { z10.s }, p1/Z, [x13]\n"
"fmla z27.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z18.s }, p2/Z, [x27, x9, LSL #2]\n"
"fmla z26.s, p3/M, z2.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x28, x26, LSL #2]\n"
- "ld1w { z19.s }, p2/Z, [x28, x10, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x27, x10, LSL #2]\n"
+ "addvl x27, x27, #1\n"
"fmla z27.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z17.s }, p2/Z, [x25]\n"
"fmla z26.s, p3/M, z0.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x25]\n"
- "mov p0.b, p2.b\n"
+ "fmla z25.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z23.s }, p2/Z, [x25, x28, LSL #2]\n"
"fmla z27.s, p3/M, z4.s, z15.s\n"
- "fmla z26.s, p3/M, z4.s, z17.s\n"
- "ld1w { z25.s }, p2/Z, [x27]\n"
- "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x26]\n"
+ "fmla z26.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x17, LSL #2]\n"
"fmla z27.s, p3/M, z2.s, z16.s\n"
- "fmla z26.s, p3/M, z5.s, z20.s\n"
- "ld1w { z24.s }, p2/Z, [x27, x26, LSL #2]\n"
- "ld1w { z23.s }, p2/Z, [x27, x15, LSL #2]\n"
- "movprfx z22, z30\n fmla z22.s, p3/M, z2.s, z9.s\n"
- "movprfx z21, z30\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "addvl x12, x12, #1\n"
- "addvl x28, x28, #1\n"
- "fmla z27.s, p3/M, z5.s, z19.s\n"
- "fmla z26.s, p3/M, z3.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x26, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x11]\n"
- "fmla z22.s, p3/M, z3.s, z18.s\n"
- "fmla z21.s, p3/M, z4.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x24, LSL #2]\n"
- "ld1w { z20.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z22.s, p3/M, z0.s, z25.s\n"
- "fmla z21.s, p3/M, z1.s, z24.s\n"
- "ld1w { z0.s }, p3/Z, [x11, #1, MUL VL]\n"
- "incw x20\n"
- "fmla z22.s, p3/M, z4.s, z17.s\n"
- "fmla z21.s, p3/M, z5.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x27, x24, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x23, x26, LSL #2]\n"
- "fmla z27.s, p3/M, z6.s, z25.s\n"
- "fmla z22.s, p3/M, z1.s, z23.s\n"
- "ld1w { z17.s }, p2/Z, [x23]\n"
- "addvl x27, x27, #1\n"
- "fmla z21.s, p3/M, z2.s, z19.s\n"
- "fmla z27.s, p3/M, z7.s, z23.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, x17, LSL #2]\n"
+ "fmla z25.s, p3/M, z0.s, z22.s\n"
+ "ld1w { z0.s }, p3/Z, [x12, #1, MUL VL]\n"
+ "fmla z26.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z18.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "fmla z27.s, p3/M, z5.s, z20.s\n"
+ "fmla z26.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z16.s\n"
+ "ld1w { z21.s }, p2/Z, [x24, x17, LSL #2]\n"
+ "fmla z25.s, p3/M, z4.s, z17.s\n"
+ "ld1w { z20.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "addvl x26, x26, #1\n"
+ "ld1w { z4.s }, p3/Z, [x12, #5, MUL VL]\n"
+ "fmla z27.s, p3/M, z6.s, z22.s\n"
+ "ld1w { z17.s }, p2/Z, [x24]\n"
+ "fmla z26.s, p3/M, z7.s, z18.s\n"
+ "fmla z24.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z12.s }, p1/Z, [x13, x9, LSL #2]\n"
+ "fmla z25.s, p3/M, z1.s, z19.s\n"
+ "ld1w { z1.s }, p3/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z9.s }, p1/Z, [x26, x10, LSL #2]\n"
+ "fmla z27.s, p3/M, z7.s, z19.s\n"
"ld1w { z16.s }, p2/Z, [x25, x10, LSL #2]\n"
- "fmax z27.s, p3/M, z27.s, z29.s\n"
- "fmla z22.s, p3/M, z6.s, z17.s\n"
- "fmla z21.s, p3/M, z3.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x10, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x11, #2, MUL VL]\n"
- "fmla z22.s, p3/M, z7.s, z20.s\n"
- "fmla z21.s, p3/M, z7.s, z18.s\n"
- "ld1w { z2.s }, p3/Z, [x11, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x11, #4, MUL VL]\n"
- "fmla z26.s, p3/M, z7.s, z24.s\n"
- "fmla z22.s, p3/M, z5.s, z16.s\n"
- "ld1w { z4.s }, p3/Z, [x11, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x11, #6, MUL VL]\n"
- "fmla z21.s, p3/M, z6.s, z17.s\n"
- "fmla z26.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x24, LSL #2]\n"
- "fmax z26.s, p3/M, z26.s, z29.s\n"
- "fmla z22.s, p3/M, z8.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z29.s\n"
- "fmax z21.s, p3/M, z21.s, z29.s\n"
- "ld1w { z6.s }, p3/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #16\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "ld1w { z9.s }, p1/Z, [x27, x10, LSL #2]\n"
- "cmp x13, %x[n_channels]\n"
- "fmin z27.s, p3/M, z27.s, z28.s\n"
- "ld1w { z10.s }, p1/Z, [x12]\n"
- "ld1w { z11.s }, p1/Z, [x12, x15, LSL #2]\n"
- "fmin z26.s, p3/M, z26.s, z28.s\n"
- "fmin z22.s, p3/M, z22.s, z28.s\n"
- "ld1w { z12.s }, p1/Z, [x12, x26, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x12, x24, LSL #2]\n"
- "fmin z21.s, p3/M, z21.s, z28.s\n"
"addvl x25, x25, #1\n"
- "ld1w { z14.s }, p1/Z, [x28]\n"
- "ld1w { z15.s }, p1/Z, [x28, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z5.s, z23.s\n"
+ "ld1w { z19.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "fmla z26.s, p3/M, z8.s, z20.s\n"
+ "fmla z25.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, x10, LSL #2]\n"
+ "fmax z27.s, p3/M, z27.s, z30.s\n"
+ "fmla z24.s, p3/M, z2.s, z20.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "ld1w { z2.s }, p3/Z, [x12, #3, MUL VL]\n"
+ "whilelt p2.s, x14, %x[n_channels]\n"
+ "cmp x16, %x[n_channels]\n"
+ "addvl x24, x24, #1\n"
+ "fmin z27.s, p3/M, z27.s, z29.s\n"
+ "fmla z25.s, p3/M, z7.s, z21.s\n"
+ "ld1w { z13.s }, p1/Z, [x13, x28, LSL #2]\n"
+ "fmax z26.s, p3/M, z26.s, z30.s\n"
+ "fmla z24.s, p3/M, z3.s, z16.s\n"
+ "ld1w { z3.s }, p3/Z, [x12, #4, MUL VL]\n"
+ "fmin z26.s, p3/M, z26.s, z29.s\n"
+ "st1w { z27.s }, p0, [x11]\n"
+ "fmla z25.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z5.s }, p3/Z, [x12, #6, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x13, x10, LSL #2]\n"
+ "st1w { z26.s }, p0, [x11, x15, LSL #2]\n"
+ "addvl x11, x11, #1\n"
+ "fmla z24.s, p3/M, z7.s, z19.s\n"
+ "ld1w { z14.s }, p1/Z, [x27]\n"
+ "fmla z25.s, p3/M, z8.s, z18.s\n"
+ "fmla z24.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z6.s }, p3/Z, [x12, #7, MUL VL]\n"
+ "addvl x12, x12, #16\n"
+ "ld1w { z15.s }, p1/Z, [x27, x17, LSL #2]\n"
+ "fmax z25.s, p3/M, z25.s, z30.s\n"
+ "ld1w { z7.s }, p3/Z, [x12, #-8, MUL VL]\n"
+ "fmla z24.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z11.s }, p1/Z, [x13, x17, LSL #2]\n"
+ "ld1w { z8.s }, p3/Z, [x12, #-7, MUL VL]\n"
+ "addvl x12, x12, #-6\n"
+ "fmin z25.s, p3/M, z25.s, z29.s\n"
+ "fmax z24.s, p3/M, z24.s, z30.s\n"
+ "st1w { z25.s }, p0, [x23]\n"
+ "fmin z24.s, p3/M, z24.s, z29.s\n"
+ "st1w { z24.s }, p0, [x23, x15, LSL #2]\n"
"addvl x23, x23, #1\n"
- "ld1w { z16.s }, p1/Z, [x12, x10, LSL #2]\n"
- "st1w { z27.s }, p0, [x9]\n"
- "ld1w { z7.s }, p3/Z, [x11, #-8, MUL VL]\n"
- "st1w { z26.s }, p0, [x9, x14, LSL #2]\n"
- "addvl x9, x9, #1\n"
- "ld1w { z8.s }, p3/Z, [x11, #-7, MUL VL]\n"
- "addvl x11, x11, #-6\n"
- "st1w { z22.s }, p0, [x22]\n"
- "st1w { z21.s }, p0, [x22, x14, LSL #2]\n"
- "addvl x22, x22, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
- "movprfx z27, z30\n fmla z27.s, p3/M, z8.s, z9.s\n"
- "movprfx z26, z30\n fmla z26.s, p3/M, z6.s, z9.s\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x11, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z27, z28\n fmla z27.s, p3/M, z8.s, z9.s\n"
+ "movprfx z26, z28\n fmla z26.s, p3/M, z6.s, z9.s\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "movprfx z25, z28\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "movprfx z24, z28\n fmla z24.s, p3/M, z0.s, z9.s\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "mov p0.b, p2.b\n"
+ "add x8, x8, #0x1\n"
+ "add x20, x7, #0x1\n"
"fmla z27.s, p3/M, z0.s, z10.s\n"
"fmla z26.s, p3/M, z1.s, z12.s\n"
- "ld1w { z20.s }, p2/Z, [x28, x24, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ld1w { z21.s }, p2/Z, [x27, x28, LSL #2]\n"
+ "cmp x8, x22\n"
+ "csel x7, x7, x20, LT\n"
+ "csel x8, x8, XZR, LT\n"
"fmla z27.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z18.s }, p2/Z, [x27, x9, LSL #2]\n"
"fmla z26.s, p3/M, z2.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x28, x26, LSL #2]\n"
- "ld1w { z19.s }, p2/Z, [x28, x10, LSL #2]\n"
+ "ld1w { z20.s }, p2/Z, [x27, x10, LSL #2]\n"
+ "cmp x7, x21\n"
"fmla z27.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z17.s }, p2/Z, [x25]\n"
"fmla z26.s, p3/M, z0.s, z16.s\n"
- "ld1w { z18.s }, p2/Z, [x25]\n"
- "add x16, x16, #0x1\n"
+ "fmla z25.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z23.s }, p2/Z, [x25, x28, LSL #2]\n"
"fmla z27.s, p3/M, z4.s, z15.s\n"
- "fmla z26.s, p3/M, z4.s, z17.s\n"
- "ld1w { z25.s }, p2/Z, [x27]\n"
- "ld1w { z17.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x26]\n"
+ "fmla z26.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x25, x17, LSL #2]\n"
+ "fmla z25.s, p3/M, z0.s, z22.s\n"
"fmla z27.s, p3/M, z2.s, z16.s\n"
- "fmla z26.s, p3/M, z5.s, z20.s\n"
- "ld1w { z24.s }, p2/Z, [x27, x26, LSL #2]\n"
- "ld1w { z23.s }, p2/Z, [x27, x15, LSL #2]\n"
- "movprfx z22, z30\n fmla z22.s, p3/M, z2.s, z9.s\n"
- "movprfx z21, z30\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "cmp x16, x20\n"
- "add x21, x11, #0x1\n"
- "fmla z27.s, p3/M, z5.s, z19.s\n"
- "fmla z26.s, p3/M, z3.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x26, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
- "fmla z22.s, p3/M, z3.s, z18.s\n"
- "fmla z21.s, p3/M, z4.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x24, LSL #2]\n"
- "ld1w { z20.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z22.s, p3/M, z0.s, z25.s\n"
- "fmla z21.s, p3/M, z1.s, z24.s\n"
- "csel x11, x11, x21, LT\n"
- "mov p0.b, p2.b\n"
- "fmla z22.s, p3/M, z4.s, z17.s\n"
- "fmla z21.s, p3/M, z5.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x27, x24, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x23, x26, LSL #2]\n"
- "fmla z27.s, p3/M, z6.s, z25.s\n"
- "fmla z22.s, p3/M, z1.s, z23.s\n"
- "ld1w { z17.s }, p2/Z, [x23]\n"
- "csel x16, x16, XZR, LT\n"
- "fmla z21.s, p3/M, z2.s, z19.s\n"
- "fmla z27.s, p3/M, z7.s, z23.s\n"
- "ld1w { z16.s }, p2/Z, [x25, x10, LSL #2]\n"
- "fmax z27.s, p3/M, z27.s, z29.s\n"
- "fmla z22.s, p3/M, z6.s, z17.s\n"
- "fmla z21.s, p3/M, z3.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x10, LSL #2]\n"
- "cmp x11, x20\n"
- "fmla z22.s, p3/M, z7.s, z20.s\n"
- "fmla z21.s, p3/M, z7.s, z18.s\n"
- "fmin z27.s, p3/M, z27.s, z28.s\n"
- "st1w { z27.s }, p0, [x9]\n"
- "fmla z26.s, p3/M, z7.s, z24.s\n"
- "fmla z22.s, p3/M, z5.s, z16.s\n"
- "fmla z21.s, p3/M, z6.s, z17.s\n"
- "fmla z26.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x24, LSL #2]\n"
- "fmax z26.s, p3/M, z26.s, z29.s\n"
- "fmla z22.s, p3/M, z8.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "fmax z22.s, p3/M, z22.s, z29.s\n"
- "fmax z21.s, p3/M, z21.s, z29.s\n"
- "fmin z26.s, p3/M, z26.s, z28.s\n"
- "fmin z22.s, p3/M, z22.s, z28.s\n"
- "st1w { z26.s }, p0, [x9, x14, LSL #2]\n"
- "fmin z21.s, p3/M, z21.s, z28.s\n"
- "st1w { z22.s }, p0, [x22]\n"
- "st1w { z21.s }, p0, [x22, x14, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x26, x17, LSL #2]\n"
+ "fmla z26.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "fmla z27.s, p3/M, z5.s, z20.s\n"
+ "fmla z25.s, p3/M, z4.s, z19.s\n"
+ "ld1w { z21.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "fmla z26.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "fmla z24.s, p3/M, z4.s, z16.s\n"
+ "ld1w { z20.s }, p2/Z, [x24, x17, LSL #2]\n"
+ "fmla z27.s, p3/M, z6.s, z22.s\n"
+ "ld1w { z16.s }, p2/Z, [x24]\n"
+ "fmla z25.s, p3/M, z1.s, z18.s\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "fmla z24.s, p3/M, z1.s, z17.s\n"
+ "fmla z27.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x25, x10, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, x10, LSL #2]\n"
+ "fmla z26.s, p3/M, z8.s, z21.s\n"
+ "fmla z24.s, p3/M, z5.s, z23.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "fmax z27.s, p3/M, z27.s, z30.s\n"
+ "fmla z25.s, p3/M, z7.s, z20.s\n"
+ "fmax z26.s, p3/M, z26.s, z30.s\n"
+ "fmin z27.s, p3/M, z27.s, z29.s\n"
+ "fmla z24.s, p3/M, z2.s, z21.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z19.s\n"
+ "fmin z26.s, p3/M, z26.s, z29.s\n"
+ "st1w { z27.s }, p0, [x11]\n"
+ "fmla z24.s, p3/M, z3.s, z19.s\n"
+ "st1w { z26.s }, p0, [x11, x15, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z17.s\n"
+ "fmla z25.s, p3/M, z8.s, z18.s\n"
+ "fmla z24.s, p3/M, z6.s, z18.s\n"
+ "fmax z25.s, p3/M, z25.s, z30.s\n"
+ "fmin z25.s, p3/M, z25.s, z29.s\n"
+ "st1w { z25.s }, p0, [x23]\n"
+ "fmla z24.s, p3/M, z8.s, z16.s\n"
+ "fmax z24.s, p3/M, z24.s, z30.s\n"
+ "fmin z24.s, p3/M, z24.s, z29.s\n"
+ "st1w { z24.s }, p0, [x23, x15, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
index 98427701fa..0a4929918b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -89,245 +89,245 @@ void sve_fp32_nhwc_3x3_s2_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
"ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "ldr x16, [%x[params_struct], %[offsetof_args_params]]\n"
- "add x15, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "cntw x14\n"
+ "ldr x17, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "cntw x15\n"
+ "mov x14, #0x0\n"
+ "whilelt p2.s, XZR, %x[n_channels]\n"
"ldp x13, x12, [x20, #0x0]\n"
"ldp x11, x10, [x20, #0x10]\n"
- "mov x9, #0x0\n"
- "whilelt p2.s, XZR, %x[n_channels]\n"
- "ld1w { z20.s }, p3/Z, [x16]\n"
- "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
- "sub x28, XZR, x14\n"
- "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
- "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1rw { z26.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z25.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
- "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
- "ld1w { z9.s }, p2/Z, [x27, x9, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x26, x9, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x25, x9, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x24, x9, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x23, x9, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ld1w { z15.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x17]\n"
+ "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
+ "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
+ "cmp x15, %x[n_channels]\n"
+ "sub x9, XZR, x15\n"
+ "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
+ "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
+ "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
+ "addvl x17, x17, #16\n"
+ "ldp x27, x26, [x16, #0x0]\n"
+ "ldp x25, x24, [x16, #0x10]\n"
+ "ldp x23, x22, [x16, #0x20]\n"
+ "ldp x21, x20, [x16, #0x30]\n"
+ "ld1rw { z29.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rw { z28.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
+ "addvl x17, x17, #-6\n"
+ "ld1w { z9.s }, p2/Z, [x27, x14, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x25, x14, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "ld1w { z15.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x20, x14, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z24, z20\n fmla z24.s, p3/M, z8.s, z9.s\n"
- "movprfx z23, z20\n fmla z23.s, p3/M, z6.s, z9.s\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
- "fmla z24.s, p3/M, z0.s, z10.s\n"
- "fmla z23.s, p3/M, z1.s, z12.s\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x15, #0x50]\n"
- "fmla z24.s, p3/M, z1.s, z11.s\n"
- "fmla z23.s, p3/M, z2.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z19.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z14.s\n"
- "fmla z23.s, p3/M, z0.s, z16.s\n"
- "ldr x20, [x15, #0x58]\n"
- "ldr x22, [x15, #0x78]\n"
- "fmla z24.s, p3/M, z4.s, z15.s\n"
- "fmla z23.s, p3/M, z4.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0x60]\n"
- "fmla z24.s, p3/M, z2.s, z16.s\n"
- "fmla z23.s, p3/M, z5.s, z18.s\n"
- "ldr x20, [x15, #0x80]\n"
- "ld1w { z18.s }, p2/Z, [x21, x9, LSL #2]\n"
- "movprfx z22, z20\n fmla z22.s, p3/M, z2.s, z9.s\n"
- "movprfx z21, z20\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "ld1w { z20.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0x68]\n"
- "fmla z24.s, p3/M, z5.s, z19.s\n"
- "fmla z23.s, p3/M, z3.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x20, [x15, #0x88]\n"
- "fmla z22.s, p3/M, z3.s, z17.s\n"
- "fmla z21.s, p3/M, z4.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z0.s, z18.s\n"
- "fmla z21.s, p3/M, z1.s, z20.s\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x20, [x15, #0x98]\n"
- "fmla z22.s, p3/M, z4.s, z17.s\n"
- "fmla z21.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z19.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z24.s, p3/M, z6.s, z18.s\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "fmla z22.s, p3/M, z1.s, z16.s\n"
- "fmla z21.s, p3/M, z2.s, z19.s\n"
- "fmla z24.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0xa0]\n"
- "ldr x20, [x15, #0xb0]\n"
- "fmla z22.s, p3/M, z6.s, z16.s\n"
- "fmla z21.s, p3/M, z3.s, z18.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z7.s, z17.s\n"
- "fmla z21.s, p3/M, z7.s, z16.s\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z23.s, p3/M, z7.s, z20.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z5.s, z18.s\n"
- "ldr x20, [x15, #0xc0]\n"
- "fmla z21.s, p3/M, z6.s, z17.s\n"
- "fmla z23.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z8.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "whilelt p1.s, x14, %x[n_channels]\n"
- "ldp x27, x26, [x15, #0x0]\n"
- "ldp x25, x24, [x15, #0x10]\n"
- "ldp x23, x22, [x15, #0x20]\n"
+ "movprfx z27, z30\n fmla z27.s, p3/M, z8.s, z9.s\n"
+ "movprfx z26, z30\n fmla z26.s, p3/M, z6.s, z9.s\n"
+ "ldr x28, [x16, #0x40]\n"
+ "ldr x21, [x16, #0x48]\n"
+ "ldr x25, [x16, #0x50]\n"
+ "ldr x20, [x16, #0x58]\n"
+ "movprfx z25, z30\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "movprfx z24, z30\n fmla z24.s, p3/M, z0.s, z9.s\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "whilelt p1.s, x15, %x[n_channels]\n"
"incw x9\n"
- "fmax z24.s, p3/M, z24.s, z26.s\n"
- "ldp x21, x20, [x15, #0x30]\n"
- "ld1w { z9.s }, p1/Z, [x27, x14, LSL #2]\n"
- "fmax z23.s, p3/M, z23.s, z26.s\n"
- "fmax z22.s, p3/M, z22.s, z26.s\n"
- "ld1w { z10.s }, p1/Z, [x26, x14, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x25, x14, LSL #2]\n"
- "fmax z21.s, p3/M, z21.s, z26.s\n"
- "incw x28\n"
- "ld1w { z12.s }, p1/Z, [x24, x14, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x23, x14, LSL #2]\n"
+ "ldr x23, [x16, #0x68]\n"
+ "ldr x26, [x16, #0x70]\n"
"mov p0.b, p2.b\n"
- "whilelt p2.s, x9, %x[n_channels]\n"
- "ld1w { z14.s }, p1/Z, [x22, x14, LSL #2]\n"
- "ld1w { z15.s }, p1/Z, [x21, x14, LSL #2]\n"
- "fmin z24.s, p3/M, z24.s, z25.s\n"
- "fmin z23.s, p3/M, z23.s, z25.s\n"
- "ld1w { z16.s }, p1/Z, [x20, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z0.s, z10.s\n"
+ "fmla z26.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z21.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ldr x22, [x16, #0x88]\n"
+ "ld1w { z30.s }, p3/Z, [x17]\n"
+ "fmla z27.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z18.s }, p2/Z, [x28, x14, LSL #2]\n"
+ "ldr x21, [x16, #0x80]\n"
+ "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z20.s }, p2/Z, [x25, x14, LSL #2]\n"
+ "ldr x25, [x16, #0x90]\n"
+ "fmla z27.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z17.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "ldr x20, [x16, #0x98]\n"
+ "fmla z26.s, p3/M, z0.s, z16.s\n"
+ "fmla z25.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z23.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z15.s\n"
+ "ld1w { z22.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "fmla z26.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z17.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z25.s, p3/M, z0.s, z22.s\n"
+ "ld1w { z0.s }, p3/Z, [x17, #1, MUL VL]\n"
+ "fmla z27.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "ldr x22, [x16, #0xb0]\n"
+ "fmla z26.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z18.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ldr x21, [x16, #0xc0]\n"
+ "fmla z25.s, p3/M, z4.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z5.s, z20.s\n"
+ "fmla z26.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x14, LSL #2]\n"
+ "ldr x20, [x16, #0xb8]\n"
+ "fmla z24.s, p3/M, z4.s, z16.s\n"
+ "ld1w { z20.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ld1w { z4.s }, p3/Z, [x17, #5, MUL VL]\n"
+ "fmla z27.s, p3/M, z6.s, z22.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x14, LSL #2]\n"
+ "fmla z25.s, p3/M, z1.s, z19.s\n"
+ "fmla z24.s, p3/M, z1.s, z18.s\n"
+ "ld1w { z1.s }, p3/Z, [x17, #2, MUL VL]\n"
+ "fmla z26.s, p3/M, z7.s, z18.s\n"
+ "fmla z27.s, p3/M, z7.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "fmla z24.s, p3/M, z5.s, z23.s\n"
+ "ld1w { z19.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "fmla z26.s, p3/M, z8.s, z21.s\n"
+ "fmax z27.s, p3/M, z27.s, z29.s\n"
+ "fmla z24.s, p3/M, z2.s, z21.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "ldp x27, x26, [x16, #0x0]\n"
+ "ldp x25, x24, [x16, #0x10]\n"
"incw x14\n"
- "ld1w { z20.s }, p3/Z, [x16]\n"
- "cmp x14, %x[n_channels]\n"
- "ld1w { z0.s }, p3/Z, [x16, #1, MUL VL]\n"
- "ld1w { z1.s }, p3/Z, [x16, #2, MUL VL]\n"
- "fmin z22.s, p3/M, z22.s, z25.s\n"
- "fmin z21.s, p3/M, z21.s, z25.s\n"
- "ld1w { z2.s }, p3/Z, [x16, #3, MUL VL]\n"
- "ld1w { z3.s }, p3/Z, [x16, #4, MUL VL]\n"
- "st1w { z24.s }, p0, [x13, x28, LSL #2]\n"
- "ld1w { z4.s }, p3/Z, [x16, #5, MUL VL]\n"
- "ld1w { z5.s }, p3/Z, [x16, #6, MUL VL]\n"
- "st1w { z23.s }, p0, [x12, x28, LSL #2]\n"
- "ld1w { z6.s }, p3/Z, [x16, #7, MUL VL]\n"
- "addvl x16, x16, #16\n"
- "st1w { z22.s }, p0, [x11, x28, LSL #2]\n"
- "ld1w { z7.s }, p3/Z, [x16, #-8, MUL VL]\n"
- "st1w { z21.s }, p0, [x10, x28, LSL #2]\n"
- "ld1w { z8.s }, p3/Z, [x16, #-7, MUL VL]\n"
- "addvl x16, x16, #-6\n"
+ "ldp x23, x22, [x16, #0x20]\n"
+ "ldp x21, x20, [x16, #0x30]\n"
+ "fmla z25.s, p3/M, z7.s, z20.s\n"
+ "ld1w { z2.s }, p3/Z, [x17, #3, MUL VL]\n"
+ "fmin z27.s, p3/M, z27.s, z28.s\n"
+ "fmla z24.s, p3/M, z3.s, z16.s\n"
+ "ld1w { z3.s }, p3/Z, [x17, #4, MUL VL]\n"
+ "ld1w { z9.s }, p1/Z, [x27, x15, LSL #2]\n"
+ "ld1w { z10.s }, p1/Z, [x26, x15, LSL #2]\n"
+ "fmax z26.s, p3/M, z26.s, z29.s\n"
+ "whilelt p2.s, x14, %x[n_channels]\n"
+ "ld1w { z12.s }, p1/Z, [x24, x15, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x23, x15, LSL #2]\n"
+ "fmla z25.s, p3/M, z5.s, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x20, x15, LSL #2]\n"
+ "ld1w { z5.s }, p3/Z, [x17, #6, MUL VL]\n"
+ "st1w { z27.s }, p0, [x13, x9, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z19.s\n"
+ "ld1w { z14.s }, p1/Z, [x22, x15, LSL #2]\n"
+ "fmin z26.s, p3/M, z26.s, z28.s\n"
+ "fmla z25.s, p3/M, z8.s, z18.s\n"
+ "fmla z24.s, p3/M, z6.s, z18.s\n"
+ "ld1w { z15.s }, p1/Z, [x21, x15, LSL #2]\n"
+ "ld1w { z6.s }, p3/Z, [x17, #7, MUL VL]\n"
+ "addvl x17, x17, #16\n"
+ "st1w { z26.s }, p0, [x12, x9, LSL #2]\n"
+ "fmla z24.s, p3/M, z8.s, z17.s\n"
+ "ld1w { z11.s }, p1/Z, [x25, x15, LSL #2]\n"
+ "incw x15\n"
+ "fmax z25.s, p3/M, z25.s, z29.s\n"
+ "ld1w { z7.s }, p3/Z, [x17, #-8, MUL VL]\n"
+ "ld1w { z8.s }, p3/Z, [x17, #-7, MUL VL]\n"
+ "addvl x17, x17, #-6\n"
+ "cmp x15, %x[n_channels]\n"
+ "fmin z25.s, p3/M, z25.s, z28.s\n"
+ "fmax z24.s, p3/M, z24.s, z29.s\n"
+ "fmin z24.s, p3/M, z24.s, z28.s\n"
+ "st1w { z25.s }, p0, [x11, x9, LSL #2]\n"
+ "st1w { z24.s }, p0, [x10, x9, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z24, z20\n fmla z24.s, p3/M, z8.s, z9.s\n"
- "movprfx z23, z20\n fmla z23.s, p3/M, z6.s, z9.s\n"
- "ldr x21, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
- "fmla z24.s, p3/M, z0.s, z10.s\n"
- "fmla z23.s, p3/M, z1.s, z12.s\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x20, [x15, #0x50]\n"
- "fmla z24.s, p3/M, z1.s, z11.s\n"
- "fmla z23.s, p3/M, z2.s, z13.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z19.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z24.s, p3/M, z3.s, z14.s\n"
- "fmla z23.s, p3/M, z0.s, z16.s\n"
- "ldr x20, [x15, #0x58]\n"
- "ldr x22, [x15, #0x78]\n"
- "fmla z24.s, p3/M, z4.s, z15.s\n"
- "fmla z23.s, p3/M, z4.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0x60]\n"
- "fmla z24.s, p3/M, z2.s, z16.s\n"
- "fmla z23.s, p3/M, z5.s, z18.s\n"
- "ldr x20, [x15, #0x80]\n"
- "ld1w { z18.s }, p2/Z, [x21, x9, LSL #2]\n"
- "movprfx z22, z20\n fmla z22.s, p3/M, z2.s, z9.s\n"
- "movprfx z21, z20\n fmla z21.s, p3/M, z0.s, z9.s\n"
- "ld1w { z20.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0x68]\n"
- "fmla z24.s, p3/M, z5.s, z19.s\n"
- "fmla z23.s, p3/M, z3.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x22, x9, LSL #2]\n"
- "ldr x20, [x15, #0x88]\n"
- "fmla z22.s, p3/M, z3.s, z17.s\n"
- "fmla z21.s, p3/M, z4.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z0.s, z18.s\n"
- "fmla z21.s, p3/M, z1.s, z20.s\n"
- "ldr x21, [x15, #0x70]\n"
- "ldr x20, [x15, #0x98]\n"
- "fmla z22.s, p3/M, z4.s, z17.s\n"
- "fmla z21.s, p3/M, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z19.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z24.s, p3/M, z6.s, z18.s\n"
- "ldr x21, [x15, #0x90]\n"
- "ldr x20, [x15, #0xa8]\n"
- "fmla z22.s, p3/M, z1.s, z16.s\n"
- "fmla z21.s, p3/M, z2.s, z19.s\n"
- "fmla z24.s, p3/M, z7.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x20, x9, LSL #2]\n"
- "ldr x21, [x15, #0xa0]\n"
- "ldr x20, [x15, #0xb0]\n"
- "fmla z22.s, p3/M, z6.s, z16.s\n"
- "fmla z21.s, p3/M, z3.s, z18.s\n"
- "ld1w { z17.s }, p2/Z, [x21, x9, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z7.s, z17.s\n"
- "fmla z21.s, p3/M, z7.s, z16.s\n"
- "ldr x20, [x15, #0xb8]\n"
- "fmla z23.s, p3/M, z7.s, z20.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z5.s, z18.s\n"
- "ldr x20, [x15, #0xc0]\n"
- "fmla z21.s, p3/M, z6.s, z17.s\n"
- "fmla z23.s, p3/M, z8.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x9, LSL #2]\n"
- "fmla z22.s, p3/M, z8.s, z17.s\n"
- "fmla z21.s, p3/M, z8.s, z16.s\n"
- "incw x28\n"
+ "movprfx z27, z30\n fmla z27.s, p3/M, z8.s, z9.s\n"
+ "movprfx z26, z30\n fmla z26.s, p3/M, z6.s, z9.s\n"
+ "ldr x28, [x16, #0x40]\n"
+ "ldr x20, [x16, #0x48]\n"
+ "ldr x26, [x16, #0x50]\n"
+ "ldr x25, [x16, #0x58]\n"
+ "movprfx z25, z30\n fmla z25.s, p3/M, z2.s, z9.s\n"
+ "movprfx z24, z30\n fmla z24.s, p3/M, z0.s, z9.s\n"
+ "ldr x27, [x16, #0x78]\n"
+ "ldr x24, [x16, #0x60]\n"
+ "incw x9\n"
"mov p0.b, p2.b\n"
- "fmax z24.s, p3/M, z24.s, z26.s\n"
- "fmax z23.s, p3/M, z23.s, z26.s\n"
- "fmax z22.s, p3/M, z22.s, z26.s\n"
- "fmax z21.s, p3/M, z21.s, z26.s\n"
- "fmin z24.s, p3/M, z24.s, z25.s\n"
- "fmin z23.s, p3/M, z23.s, z25.s\n"
- "st1w { z24.s }, p0, [x13, x28, LSL #2]\n"
- "fmin z22.s, p3/M, z22.s, z25.s\n"
- "fmin z21.s, p3/M, z21.s, z25.s\n"
- "st1w { z23.s }, p0, [x12, x28, LSL #2]\n"
- "st1w { z22.s }, p0, [x11, x28, LSL #2]\n"
- "st1w { z21.s }, p0, [x10, x28, LSL #2]\n"
+ "ldr x23, [x16, #0x68]\n"
+ "ldr x22, [x16, #0x70]\n"
+ "fmla z27.s, p3/M, z0.s, z10.s\n"
+ "fmla z26.s, p3/M, z1.s, z12.s\n"
+ "ld1w { z21.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "ldr x21, [x16, #0x88]\n"
+ "fmla z27.s, p3/M, z1.s, z11.s\n"
+ "ld1w { z18.s }, p2/Z, [x28, x14, LSL #2]\n"
+ "ldr x20, [x16, #0x80]\n"
+ "fmla z26.s, p3/M, z2.s, z13.s\n"
+ "ld1w { z20.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "ldr x26, [x16, #0x90]\n"
+ "fmla z27.s, p3/M, z3.s, z14.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, x14, LSL #2]\n"
+ "ldr x25, [x16, #0x98]\n"
+ "fmla z26.s, p3/M, z0.s, z16.s\n"
+ "fmla z27.s, p3/M, z4.s, z15.s\n"
+ "ld1w { z23.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "ldr x24, [x16, #0xa0]\n"
+ "fmla z25.s, p3/M, z3.s, z17.s\n"
+ "ld1w { z22.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "ldr x23, [x16, #0xa8]\n"
+ "fmla z27.s, p3/M, z2.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "ldr x22, [x16, #0xb0]\n"
+ "fmla z25.s, p3/M, z0.s, z23.s\n"
+ "fmla z26.s, p3/M, z5.s, z21.s\n"
+ "ld1w { z17.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "ldr x21, [x16, #0xc0]\n"
+ "fmla z27.s, p3/M, z5.s, z20.s\n"
+ "fmla z26.s, p3/M, z3.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x27, x14, LSL #2]\n"
+ "ldr x20, [x16, #0xb8]\n"
+ "fmla z24.s, p3/M, z4.s, z16.s\n"
+ "ld1w { z21.s }, p2/Z, [x24, x14, LSL #2]\n"
+ "fmla z25.s, p3/M, z4.s, z19.s\n"
+ "ld1w { z20.s }, p2/Z, [x25, x14, LSL #2]\n"
+ "fmla z27.s, p3/M, z6.s, z23.s\n"
+ "ld1w { z16.s }, p2/Z, [x26, x14, LSL #2]\n"
+ "fmla z26.s, p3/M, z7.s, z17.s\n"
+ "fmla z24.s, p3/M, z1.s, z17.s\n"
+ "fmla z25.s, p3/M, z1.s, z18.s\n"
+ "fmla z27.s, p3/M, z7.s, z18.s\n"
+ "ld1w { z19.s }, p2/Z, [x23, x14, LSL #2]\n"
+ "fmla z26.s, p3/M, z8.s, z20.s\n"
+ "fmla z24.s, p3/M, z5.s, z22.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, x14, LSL #2]\n"
+ "fmla z25.s, p3/M, z6.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x20, x14, LSL #2]\n"
+ "fmax z27.s, p3/M, z27.s, z29.s\n"
+ "fmax z26.s, p3/M, z26.s, z29.s\n"
+ "fmla z24.s, p3/M, z2.s, z20.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, x14, LSL #2]\n"
+ "fmla z25.s, p3/M, z7.s, z21.s\n"
+ "fmin z27.s, p3/M, z27.s, z28.s\n"
+ "fmin z26.s, p3/M, z26.s, z28.s\n"
+ "fmla z24.s, p3/M, z3.s, z19.s\n"
+ "st1w { z27.s }, p0, [x13, x9, LSL #2]\n"
+ "st1w { z26.s }, p0, [x12, x9, LSL #2]\n"
+ "fmla z24.s, p3/M, z7.s, z18.s\n"
+ "fmla z25.s, p3/M, z5.s, z19.s\n"
+ "fmla z24.s, p3/M, z6.s, z17.s\n"
+ "fmla z25.s, p3/M, z8.s, z17.s\n"
+ "fmla z24.s, p3/M, z8.s, z16.s\n"
+ "fmax z25.s, p3/M, z25.s, z29.s\n"
+ "fmin z25.s, p3/M, z25.s, z28.s\n"
+ "st1w { z25.s }, p0, [x11, x9, LSL #2]\n"
+ "fmax z24.s, p3/M, z24.s, z29.s\n"
+ "fmin z24.s, p3/M, z24.s, z28.s\n"
+ "st1w { z24.s }, p0, [x10, x9, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
index 075181a488..e1cc33db1b 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_direct.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -88,432 +88,432 @@ void sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_direct_impl(
__asm__ __volatile__(
"ptrue p3.b\n"
- "mov x12, #0x0\n"
- "mov x8, #0x0\n"
+ "mov x6, #0x0\n"
+ "mov x7, #0x0\n"
"1:" // Tile loop
- "str x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "str x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "mov x26, #0x2\n"
"mov x25, #0x2\n"
- "mov x24, #0x2\n"
- "str x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
- "ldr x23, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
- "ldr x17, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
- "mul x22, x12, x23\n" // offset = tile_i * ld_input_row
- "ldr x21, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
- "madd x22, x8, x17, x22\n" // offset += tile_j * ld_input_col
+ "str x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ldr x24, [%x[params_struct], %[offsetof_args_ld_input_row]]\n"
+ "ldr x8, [%x[params_struct], %[offsetof_args_ld_input_col]]\n"
+ "cntw x17\n"
+ "ldr x23, [%x[params_struct], %[offsetof_args_ld_output_row]]\n"
"ldr x16, [%x[params_struct], %[offsetof_args_ld_output_col]]\n"
- "add x15, x17, x17\n"
- "mul x20, x12, x21\n" // offset = tile_i * ld_output_row
+ "whilelt p2.s, XZR, %x[n_channels]\n"
+ "mov x15, #0x0\n"
"ldr x14, [%x[params_struct], %[offsetof_args_inptr]]\n"
"ldr x13, [%x[params_struct], %[offsetof_args_outptr]]\n"
- "cntw x12\n"
- "mul x22, x22, x25\n" // offset *= kernel_stride * output_size
- "add x14, x14, x22, LSL #2\n" // inptr[0] += offset * sizeof(float)
- "add x11, x14, x23, LSL #2\n"
- "ldr x10, [%x[params_struct], %[offsetof_args_params]]\n"
- "madd x20, x8, x16, x20\n" // offset += tile_j * ld_output_col
- "add x9, x11, x23, LSL #2\n"
- "add x28, x15, x17\n"
+ "mul x20, x6, x24\n" // offset = tile_i * ld_input_row
+ "add x12, x8, x8\n"
+ "ldr x11, [%x[params_struct], %[offsetof_args_params]]\n"
+ "add x10, x12, x8\n"
+ "cmp x17, %x[n_channels]\n"
"ld1rw { z15.s }, p3/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "mul x20, x20, x24\n" // offset *= output_tile_size
- "whilelt p2.s, XZR, %x[n_channels]\n"
- "add x27, x9, x23, LSL #2\n"
+ "mul x22, x6, x23\n" // offset = tile_i * ld_output_row
+ "add x9, x10, x8\n"
"ld1rw { z28.s }, p3/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "add x26, x28, x17\n"
- "add x25, x27, x23, LSL #2\n"
- "ld1w { z29.s }, p3/Z, [x10]\n"
- "ld1w { z0.s }, p3/Z, [x10, #1, MUL VL]\n"
- "add x24, x26, x17\n"
- "add x13, x13, x20, LSL #2\n" // outptrs[0] += offset * sizeof(float)
- "ld1w { z1.s }, p3/Z, [x10, #2, MUL VL]\n"
- "ld1w { z2.s }, p3/Z, [x10, #3, MUL VL]\n"
- "cmp x12, %x[n_channels]\n"
- "add x23, x25, x23, LSL #2\n"
- "ld1w { z3.s }, p3/Z, [x10, #4, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x10, #5, MUL VL]\n"
- "add x22, x13, x21, LSL #2\n"
- "mov x21, #0x0\n"
+ "sub x21, XZR, x17\n"
+ "madd x20, x7, x8, x20\n" // offset += tile_j * ld_input_col
+ "add x28, x9, x8\n"
+ "ld1w { z29.s }, p3/Z, [x11]\n"
+ "ld1w { z0.s }, p3/Z, [x11, #1, MUL VL]\n"
+ "ld1w { z1.s }, p3/Z, [x11, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x11, #3, MUL VL]\n"
+ "madd x22, x7, x16, x22\n" // offset += tile_j * ld_output_col
+ "ld1w { z3.s }, p3/Z, [x11, #4, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x11, #5, MUL VL]\n"
+ "addvl x11, x11, #6\n"
+ "mul x20, x20, x26\n" // offset *= kernel_stride * output_size
+ "mul x22, x22, x25\n" // offset *= output_tile_size
+ "add x14, x14, x20, LSL #2\n" // inptr[0] += offset * sizeof(float)
+ "add x20, x14, x24, LSL #2\n"
+ "add x27, x20, x24, LSL #2\n"
"ld1w { z5.s }, p2/Z, [x14]\n"
- "ld1w { z6.s }, p2/Z, [x14, x17, LSL #2]\n"
- "sub x20, XZR, x12\n"
- "ld1w { z7.s }, p2/Z, [x11]\n"
- "ld1w { z8.s }, p2/Z, [x11, x17, LSL #2]\n"
- "addvl x10, x10, #6\n"
- "ld1w { z9.s }, p2/Z, [x14, x15, LSL #2]\n"
- "ld1w { z13.s }, p2/Z, [x11, x15, LSL #2]\n"
- "ld1w { z11.s }, p2/Z, [x14, x28, LSL #2]\n"
- "ld1w { z12.s }, p2/Z, [x14, x26, LSL #2]\n"
- "ld1w { z10.s }, p2/Z, [x11, x24, LSL #2]\n"
- "ld1w { z14.s }, p2/Z, [x9]\n"
+ "ld1w { z6.s }, p2/Z, [x14, x8, LSL #2]\n"
+ "add x26, x27, x24, LSL #2\n"
+ "add x25, x26, x24, LSL #2\n"
+ "ld1w { z7.s }, p2/Z, [x20]\n"
+ "ld1w { z8.s }, p2/Z, [x20, x8, LSL #2]\n"
+ "add x13, x13, x22, LSL #2\n" // outptrs[0] += offset * sizeof(float)
+ "add x24, x25, x24, LSL #2\n"
+ "add x23, x13, x23, LSL #2\n"
+ "ld1w { z9.s }, p2/Z, [x14, x12, LSL #2]\n"
+ "ld1w { z13.s }, p2/Z, [x20, x12, LSL #2]\n"
+ "ld1w { z11.s }, p2/Z, [x14, x10, LSL #2]\n"
+ "ld1w { z12.s }, p2/Z, [x14, x9, LSL #2]\n"
+ "ld1w { z10.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z14.s }, p2/Z, [x27]\n"
"bge 3f\n"
"2:" // Tile loop: Channel loop
- "movprfx z27, z29\n fmla z27.s, p3/M, z0.s, z5.s\n"
+ "movprfx z30, z29\n fmla z30.s, p3/M, z0.s, z5.s\n"
"movprfx z31, z29\n fmla z31.s, p3/M, z0.s, z6.s\n"
- "ld1w { z24.s }, p2/Z, [x11, x28, LSL #2]\n"
- "whilelt p1.s, x12, %x[n_channels]\n"
- "movprfx z26, z29\n fmla z26.s, p3/M, z0.s, z7.s\n"
- "movprfx z30, z29\n fmla z30.s, p3/M, z0.s, z8.s\n"
- "ld1w { z18.s }, p3/Z, [x10]\n"
+ "ld1w { z25.s }, p2/Z, [x20, x10, LSL #2]\n"
+ "whilelt p1.s, x17, %x[n_channels]\n"
+ "movprfx z27, z29\n fmla z27.s, p3/M, z0.s, z7.s\n"
+ "movprfx z26, z29\n fmla z26.s, p3/M, z0.s, z8.s\n"
+ "ld1w { z23.s }, p3/Z, [x11]\n"
+ "incw x15\n"
+ "incw x17\n"
+ "mov p0.b, p2.b\n"
"incw x21\n"
- "fmla z27.s, p3/M, z1.s, z6.s\n"
+ "fmla z30.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z22.s }, p2/Z, [x20, x9, LSL #2]\n"
+ "addvl x20, x20, #1\n"
"fmla z31.s, p3/M, z1.s, z9.s\n"
- "ld1w { z23.s }, p2/Z, [x11, x26, LSL #2]\n"
- "incw x12\n"
- "fmla z26.s, p3/M, z1.s, z8.s\n"
- "fmla z30.s, p3/M, z1.s, z13.s\n"
- "ld1w { z22.s }, p3/Z, [x10, #1, MUL VL]\n"
- "mov p0.b, p2.b\n"
- "fmla z27.s, p3/M, z2.s, z9.s\n"
- "fmla z31.s, p3/M, z2.s, z11.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x24, LSL #2]\n"
+ "fmla z27.s, p3/M, z1.s, z8.s\n"
+ "fmla z26.s, p3/M, z1.s, z13.s\n"
+ "ld1w { z21.s }, p3/Z, [x11, #1, MUL VL]\n"
+ "fmla z30.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z18.s }, p2/Z, [x14, x28, LSL #2]\n"
"addvl x14, x14, #1\n"
- "fmla z26.s, p3/M, z2.s, z13.s\n"
- "fmla z30.s, p3/M, z2.s, z24.s\n"
- "ld1w { z20.s }, p3/Z, [x10, #2, MUL VL]\n"
- "addvl x11, x11, #1\n"
- "fmla z27.s, p3/M, z3.s, z11.s\n"
+ "fmla z31.s, p3/M, z2.s, z11.s\n"
+ "fmla z27.s, p3/M, z2.s, z13.s\n"
+ "fmla z26.s, p3/M, z2.s, z25.s\n"
+ "ld1w { z16.s }, p3/Z, [x11, #2, MUL VL]\n"
+ "fmla z30.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z20.s }, p2/Z, [x27, x8, LSL #2]\n"
"fmla z31.s, p3/M, z3.s, z12.s\n"
- "ld1w { z0.s }, p2/Z, [x9, x17, LSL #2]\n"
- "incw x20\n"
- "fmla z26.s, p3/M, z3.s, z24.s\n"
- "fmla z30.s, p3/M, z3.s, z23.s\n"
- "ld1w { z17.s }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z27.s, p3/M, z4.s, z12.s\n"
- "fmla z31.s, p3/M, z4.s, z16.s\n"
- "ld1w { z19.s }, p2/Z, [x9, x15, LSL #2]\n"
- "ld1w { z5.s }, p2/Z, [x9, x28, LSL #2]\n"
- "fmla z26.s, p3/M, z4.s, z23.s\n"
- "fmla z30.s, p3/M, z4.s, z10.s\n"
- "ld1w { z21.s }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z27.s, p3/M, z18.s, z7.s\n"
- "fmla z31.s, p3/M, z18.s, z8.s\n"
- "ld1w { z7.s }, p1/Z, [x11]\n"
- "fmla z26.s, p3/M, z18.s, z14.s\n"
- "fmla z30.s, p3/M, z18.s, z0.s\n"
- "ld1w { z18.s }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z27.s, p3/M, z22.s, z8.s\n"
- "fmla z31.s, p3/M, z22.s, z13.s\n"
- "ld1w { z3.s }, p2/Z, [x9, x24, LSL #2]\n"
- "fmla z26.s, p3/M, z22.s, z0.s\n"
- "fmla z30.s, p3/M, z22.s, z19.s\n"
- "ld1w { z8.s }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z27.s, p3/M, z20.s, z13.s\n"
- "fmla z31.s, p3/M, z20.s, z24.s\n"
- "ld1w { z2.s }, p2/Z, [x9, x26, LSL #2]\n"
- "addvl x9, x9, #1\n"
- "fmla z26.s, p3/M, z20.s, z19.s\n"
- "fmla z30.s, p3/M, z20.s, z5.s\n"
- "ld1w { z16.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
- "fmla z27.s, p3/M, z17.s, z24.s\n"
- "fmla z31.s, p3/M, z17.s, z23.s\n"
- "ld1w { z25.s }, p2/Z, [x27]\n"
- "ld1w { z29.s }, p3/Z, [x10, #4, MUL VL]\n"
- "fmla z26.s, p3/M, z17.s, z5.s\n"
- "fmla z30.s, p3/M, z17.s, z2.s\n"
- "ld1w { z17.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "fmla z27.s, p3/M, z21.s, z23.s\n"
- "fmla z31.s, p3/M, z21.s, z10.s\n"
- "ld1w { z24.s }, p2/Z, [x27, x17, LSL #2]\n"
- "ld1w { z22.s }, p2/Z, [x27, x15, LSL #2]\n"
- "fmla z26.s, p3/M, z21.s, z2.s\n"
- "fmla z30.s, p3/M, z21.s, z3.s\n"
- "ld1w { z21.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "fmla z27.s, p3/M, z18.s, z14.s\n"
- "fmla z31.s, p3/M, z18.s, z0.s\n"
- "ld1w { z1.s }, p2/Z, [x27, x24, LSL #2]\n"
- "fmla z26.s, p3/M, z18.s, z25.s\n"
- "fmla z30.s, p3/M, z18.s, z24.s\n"
- "ld1w { z23.s }, p3/Z, [x10, #-6, MUL VL]\n"
- "fmla z27.s, p3/M, z8.s, z0.s\n"
- "fmla z31.s, p3/M, z8.s, z19.s\n"
- "ld1w { z0.s }, p2/Z, [x27, x28, LSL #2]\n"
- "fmla z26.s, p3/M, z8.s, z24.s\n"
- "fmla z30.s, p3/M, z8.s, z22.s\n"
- "ld1w { z20.s }, p3/Z, [x10, #-5, MUL VL]\n"
- "fmla z27.s, p3/M, z16.s, z19.s\n"
- "fmla z31.s, p3/M, z16.s, z5.s\n"
- "ld1w { z19.s }, p2/Z, [x27, x26, LSL #2]\n"
+ "fmla z27.s, p3/M, z3.s, z25.s\n"
+ "fmla z26.s, p3/M, z3.s, z22.s\n"
+ "ld1w { z17.s }, p3/Z, [x11, #3, MUL VL]\n"
+ "fmla z30.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z19.s }, p2/Z, [x27, x12, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z12.s }, p2/Z, [x27, x10, LSL #2]\n"
+ "fmla z27.s, p3/M, z4.s, z22.s\n"
+ "fmla z26.s, p3/M, z4.s, z10.s\n"
+ "ld1w { z0.s }, p3/Z, [x11, #4, MUL VL]\n"
+ "fmla z30.s, p3/M, z23.s, z7.s\n"
+ "ld1w { z7.s }, p1/Z, [x20]\n"
+ "fmla z31.s, p3/M, z23.s, z8.s\n"
+ "fmla z27.s, p3/M, z23.s, z14.s\n"
+ "fmla z26.s, p3/M, z23.s, z20.s\n"
+ "ld1w { z18.s }, p3/Z, [x11, #5, MUL VL]\n"
+ "fmla z30.s, p3/M, z21.s, z8.s\n"
+ "ld1w { z1.s }, p2/Z, [x27, x28, LSL #2]\n"
+ "fmla z31.s, p3/M, z21.s, z13.s\n"
+ "fmla z27.s, p3/M, z21.s, z20.s\n"
+ "fmla z26.s, p3/M, z21.s, z19.s\n"
+ "ld1w { z5.s }, p3/Z, [x11, #6, MUL VL]\n"
+ "fmla z30.s, p3/M, z16.s, z13.s\n"
+ "ld1w { z24.s }, p2/Z, [x27, x9, LSL #2]\n"
"addvl x27, x27, #1\n"
- "fmla z26.s, p3/M, z16.s, z22.s\n"
- "fmla z30.s, p3/M, z16.s, z0.s\n"
- "ld1w { z18.s }, p3/Z, [x10, #-4, MUL VL]\n"
- "fmla z27.s, p3/M, z17.s, z5.s\n"
- "fmla z31.s, p3/M, z17.s, z2.s\n"
+ "fmla z31.s, p3/M, z16.s, z25.s\n"
+ "fmla z27.s, p3/M, z16.s, z19.s\n"
+ "fmla z26.s, p3/M, z16.s, z12.s\n"
+ "ld1w { z16.s }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "fmla z30.s, p3/M, z17.s, z25.s\n"
+ "ld1w { z25.s }, p2/Z, [x26]\n"
+ "fmla z31.s, p3/M, z17.s, z22.s\n"
+ "fmla z27.s, p3/M, z17.s, z12.s\n"
+ "ld1w { z29.s }, p3/Z, [x11, #4, MUL VL]\n"
+ "fmla z26.s, p3/M, z17.s, z24.s\n"
+ "ld1w { z17.s }, p3/Z, [x11, #-8, MUL VL]\n"
+ "fmla z30.s, p3/M, z0.s, z22.s\n"
+ "ld1w { z23.s }, p2/Z, [x26, x8, LSL #2]\n"
+ "fmla z31.s, p3/M, z0.s, z10.s\n"
+ "ld1w { z22.s }, p2/Z, [x26, x12, LSL #2]\n"
+ "fmla z27.s, p3/M, z0.s, z24.s\n"
+ "fmla z26.s, p3/M, z0.s, z1.s\n"
+ "ld1w { z21.s }, p3/Z, [x11, #-7, MUL VL]\n"
+ "fmla z30.s, p3/M, z18.s, z14.s\n"
+ "ld1w { z10.s }, p2/Z, [x26, x28, LSL #2]\n"
+ "fmla z31.s, p3/M, z18.s, z20.s\n"
+ "fmla z27.s, p3/M, z18.s, z25.s\n"
+ "fmla z26.s, p3/M, z18.s, z23.s\n"
+ "ld1w { z6.s }, p3/Z, [x11, #-6, MUL VL]\n"
+ "fmla z30.s, p3/M, z5.s, z20.s\n"
+ "ld1w { z0.s }, p2/Z, [x26, x10, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z19.s\n"
+ "fmla z27.s, p3/M, z5.s, z23.s\n"
+ "fmla z26.s, p3/M, z5.s, z22.s\n"
+ "ld1w { z20.s }, p3/Z, [x11, #-5, MUL VL]\n"
+ "fmla z30.s, p3/M, z16.s, z19.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, x9, LSL #2]\n"
+ "addvl x26, x26, #1\n"
+ "fmla z31.s, p3/M, z16.s, z12.s\n"
+ "fmla z27.s, p3/M, z16.s, z22.s\n"
+ "fmla z26.s, p3/M, z16.s, z0.s\n"
+ "ld1w { z18.s }, p3/Z, [x11, #-4, MUL VL]\n"
+ "fmla z30.s, p3/M, z17.s, z12.s\n"
"ld1w { z16.s }, p2/Z, [x25]\n"
- "fmla z26.s, p3/M, z17.s, z0.s\n"
- "fmla z30.s, p3/M, z17.s, z19.s\n"
- "ld1w { z17.s }, p3/Z, [x10, #-3, MUL VL]\n"
- "fmla z27.s, p3/M, z21.s, z2.s\n"
- "fmla z31.s, p3/M, z21.s, z3.s\n"
- "ld1w { z4.s }, p2/Z, [x25, x17, LSL #2]\n"
- "ld1w { z8.s }, p2/Z, [x25, x26, LSL #2]\n"
- "fmla z26.s, p3/M, z21.s, z19.s\n"
- "fmla z30.s, p3/M, z21.s, z1.s\n"
- "ld1w { z13.s }, p3/Z, [x10, #-2, MUL VL]\n"
- "fmla z27.s, p3/M, z23.s, z25.s\n"
- "fmla z31.s, p3/M, z23.s, z24.s\n"
- "ld1w { z25.s }, p2/Z, [x25, x15, LSL #2]\n"
- "fmla z26.s, p3/M, z23.s, z16.s\n"
- "fmla z30.s, p3/M, z23.s, z4.s\n"
- "ld1w { z5.s }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z27.s, p3/M, z20.s, z24.s\n"
+ "fmla z31.s, p3/M, z17.s, z24.s\n"
+ "fmla z27.s, p3/M, z17.s, z0.s\n"
+ "fmla z26.s, p3/M, z17.s, z19.s\n"
+ "ld1w { z17.s }, p3/Z, [x11, #-3, MUL VL]\n"
+ "fmla z30.s, p3/M, z21.s, z24.s\n"
+ "ld1w { z9.s }, p2/Z, [x25, x8, LSL #2]\n"
+ "fmla z31.s, p3/M, z21.s, z1.s\n"
+ "ld1w { z8.s }, p2/Z, [x25, x9, LSL #2]\n"
+ "fmla z27.s, p3/M, z21.s, z19.s\n"
+ "fmla z26.s, p3/M, z21.s, z10.s\n"
+ "ld1w { z5.s }, p3/Z, [x11, #-2, MUL VL]\n"
+ "fmla z30.s, p3/M, z6.s, z25.s\n"
+ "ld1w { z25.s }, p2/Z, [x25, x12, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z23.s\n"
+ "fmla z27.s, p3/M, z6.s, z16.s\n"
+ "fmla z26.s, p3/M, z6.s, z9.s\n"
+ "ld1w { z4.s }, p3/Z, [x11, #-1, MUL VL]\n"
+ "fmla z30.s, p3/M, z20.s, z23.s\n"
+ "ld1w { z24.s }, p2/Z, [x25, x10, LSL #2]\n"
"fmla z31.s, p3/M, z20.s, z22.s\n"
- "ld1w { z24.s }, p2/Z, [x25, x28, LSL #2]\n"
- "fmla z26.s, p3/M, z20.s, z4.s\n"
- "fmla z30.s, p3/M, z20.s, z25.s\n"
- "ld1w { z23.s }, p3/Z, [x10]\n"
- "fmla z27.s, p3/M, z18.s, z22.s\n"
- "fmla z31.s, p3/M, z18.s, z0.s\n"
- "ld1w { z22.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "fmla z27.s, p3/M, z20.s, z9.s\n"
+ "fmla z26.s, p3/M, z20.s, z25.s\n"
+ "ld1w { z23.s }, p3/Z, [x11]\n"
+ "fmla z30.s, p3/M, z18.s, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [x25, x28, LSL #2]\n"
"addvl x25, x25, #1\n"
- "fmla z26.s, p3/M, z18.s, z25.s\n"
- "fmla z30.s, p3/M, z18.s, z24.s\n"
- "ld1w { z21.s }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z27.s, p3/M, z17.s, z0.s\n"
+ "fmla z31.s, p3/M, z18.s, z0.s\n"
+ "fmla z27.s, p3/M, z18.s, z25.s\n"
+ "fmla z26.s, p3/M, z18.s, z24.s\n"
+ "ld1w { z21.s }, p3/Z, [x11, #1, MUL VL]\n"
+ "fmla z30.s, p3/M, z17.s, z0.s\n"
+ "ld1w { z18.s }, p2/Z, [x24]\n"
"fmla z31.s, p3/M, z17.s, z19.s\n"
- "ld1w { z18.s }, p2/Z, [x23]\n"
- "fmla z26.s, p3/M, z17.s, z24.s\n"
- "fmla z30.s, p3/M, z17.s, z8.s\n"
- "ld1w { z20.s }, p3/Z, [x10, #2, MUL VL]\n"
- "fmla z27.s, p3/M, z13.s, z19.s\n"
- "fmla z31.s, p3/M, z13.s, z1.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x17, LSL #2]\n"
- "ld1w { z14.s }, p1/Z, [x9]\n"
- "fmla z26.s, p3/M, z13.s, z8.s\n"
- "fmla z30.s, p3/M, z13.s, z22.s\n"
- "ld1w { z19.s }, p3/Z, [x10, #3, MUL VL]\n"
- "fmla z27.s, p3/M, z5.s, z16.s\n"
- "fmla z31.s, p3/M, z5.s, z4.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
- "fmla z26.s, p3/M, z5.s, z18.s\n"
- "fmla z30.s, p3/M, z5.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x23, x28, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x10, #5, MUL VL]\n"
- "fmla z27.s, p3/M, z23.s, z4.s\n"
+ "fmla z27.s, p3/M, z17.s, z24.s\n"
+ "fmla z26.s, p3/M, z17.s, z8.s\n"
+ "ld1w { z20.s }, p3/Z, [x11, #2, MUL VL]\n"
+ "fmla z30.s, p3/M, z5.s, z19.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x8, LSL #2]\n"
+ "fmla z31.s, p3/M, z5.s, z10.s\n"
+ "ld1w { z14.s }, p1/Z, [x27]\n"
+ "fmla z27.s, p3/M, z5.s, z8.s\n"
+ "fmla z26.s, p3/M, z5.s, z22.s\n"
+ "ld1w { z19.s }, p3/Z, [x11, #3, MUL VL]\n"
+ "fmla z30.s, p3/M, z4.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x12, LSL #2]\n"
+ "fmla z31.s, p3/M, z4.s, z9.s\n"
+ "fmla z27.s, p3/M, z4.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, x10, LSL #2]\n"
+ "fmla z26.s, p3/M, z4.s, z17.s\n"
+ "ld1w { z0.s }, p3/Z, [x11, #5, MUL VL]\n"
+ "fmla z30.s, p3/M, z23.s, z9.s\n"
+ "ld1w { z13.s }, p1/Z, [x20, x12, LSL #2]\n"
"fmla z31.s, p3/M, z23.s, z25.s\n"
- "ld1w { z13.s }, p1/Z, [x11, x15, LSL #2]\n"
- "fmla z26.s, p3/M, z23.s, z17.s\n"
- "fmla z30.s, p3/M, z23.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x10, #6, MUL VL]\n"
- "fmla z27.s, p3/M, z21.s, z25.s\n"
- "fmla z31.s, p3/M, z21.s, z24.s\n"
+ "fmla z27.s, p3/M, z23.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x9, LSL #2]\n"
+ "fmla z26.s, p3/M, z23.s, z16.s\n"
+ "ld1w { z1.s }, p3/Z, [x11, #6, MUL VL]\n"
+ "fmla z30.s, p3/M, z21.s, z25.s\n"
"ld1w { z5.s }, p1/Z, [x14]\n"
- "fmla z26.s, p3/M, z21.s, z16.s\n"
- "fmla z30.s, p3/M, z21.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x24, LSL #2]\n"
- "ld1w { z2.s }, p3/Z, [x10, #7, MUL VL]\n"
- "fmla z27.s, p3/M, z20.s, z24.s\n"
+ "fmla z31.s, p3/M, z21.s, z24.s\n"
+ "fmla z27.s, p3/M, z21.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x28, LSL #2]\n"
+ "whilelt p2.s, x15, %x[n_channels]\n"
+ "cmp x17, %x[n_channels]\n"
+ "addvl x24, x24, #1\n"
+ "fmla z26.s, p3/M, z21.s, z18.s\n"
+ "ld1w { z2.s }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
+ "fmla z30.s, p3/M, z20.s, z24.s\n"
+ "ld1w { z6.s }, p1/Z, [x14, x8, LSL #2]\n"
"fmla z31.s, p3/M, z20.s, z8.s\n"
- "addvl x10, x10, #16\n"
- "whilelt p2.s, x21, %x[n_channels]\n"
- "fmla z26.s, p3/M, z20.s, z18.s\n"
- "fmla z30.s, p3/M, z20.s, z17.s\n"
- "cmp x12, %x[n_channels]\n"
- "addvl x23, x23, #1\n"
- "fmla z27.s, p3/M, z19.s, z8.s\n"
+ "fmla z27.s, p3/M, z20.s, z18.s\n"
+ "ld1w { z11.s }, p1/Z, [x14, x10, LSL #2]\n"
+ "fmla z26.s, p3/M, z20.s, z17.s\n"
+ "ld1w { z3.s }, p3/Z, [x11, #-8, MUL VL]\n"
+ "fmla z30.s, p3/M, z19.s, z8.s\n"
+ "ld1w { z8.s }, p1/Z, [x20, x8, LSL #2]\n"
"fmla z31.s, p3/M, z19.s, z22.s\n"
- "fmax z27.s, p3/M, z27.s, z15.s\n"
+ "ld1w { z10.s }, p1/Z, [x20, x28, LSL #2]\n"
+ "fmla z27.s, p3/M, z19.s, z17.s\n"
+ "ld1w { z12.s }, p1/Z, [x14, x9, LSL #2]\n"
+ "fmla z26.s, p3/M, z19.s, z16.s\n"
+ "ld1w { z9.s }, p1/Z, [x14, x12, LSL #2]\n"
+ "ld1w { z4.s }, p3/Z, [x11, #-7, MUL VL]\n"
+ "addvl x11, x11, #-6\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
"fmax z31.s, p3/M, z31.s, z15.s\n"
- "fmla z26.s, p3/M, z19.s, z17.s\n"
- "fmla z30.s, p3/M, z19.s, z16.s\n"
+ "fmax z27.s, p3/M, z27.s, z15.s\n"
"fmax z26.s, p3/M, z26.s, z15.s\n"
- "fmax z30.s, p3/M, z30.s, z15.s\n"
- "fmin z27.s, p3/M, z27.s, z28.s\n"
+ "fmin z30.s, p3/M, z30.s, z28.s\n"
"fmin z31.s, p3/M, z31.s, z28.s\n"
- "ld1w { z6.s }, p1/Z, [x14, x17, LSL #2]\n"
- "ld1w { z8.s }, p1/Z, [x11, x17, LSL #2]\n"
+ "fmin z27.s, p3/M, z27.s, z28.s\n"
"fmin z26.s, p3/M, z26.s, z28.s\n"
- "fmin z30.s, p3/M, z30.s, z28.s\n"
- "ld1w { z9.s }, p1/Z, [x14, x15, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x14, x28, LSL #2]\n"
- "ld1w { z12.s }, p1/Z, [x14, x26, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x11, x24, LSL #2]\n"
- "st1w { z27.s }, p0, [x13]\n"
+ "st1w { z30.s }, p0, [x13]\n"
"st1w { z31.s }, p0, [x13, x16, LSL #2]\n"
"addvl x13, x13, #1\n"
- "ld1w { z3.s }, p3/Z, [x10, #-8, MUL VL]\n"
- "ld1w { z4.s }, p3/Z, [x10, #-7, MUL VL]\n"
- "st1w { z26.s }, p0, [x22]\n"
- "addvl x10, x10, #-6\n"
- "st1w { z30.s }, p0, [x22, x16, LSL #2]\n"
- "addvl x22, x22, #1\n"
+ "st1w { z27.s }, p0, [x23]\n"
+ "st1w { z26.s }, p0, [x23, x16, LSL #2]\n"
+ "addvl x23, x23, #1\n"
"blt 2b\n"
"3:" // Tile loop: Channel tail
"movprfx z30, z29\n fmla z30.s, p3/M, z0.s, z5.s\n"
"movprfx z31, z29\n fmla z31.s, p3/M, z0.s, z6.s\n"
- "ld1w { z22.s }, p2/Z, [x11, x28, LSL #2]\n"
- "ldr x8, [%x[params_struct], %[offsetof_args_tile_j]]\n"
+ "ld1w { z22.s }, p2/Z, [x20, x10, LSL #2]\n"
+ "ldr x7, [%x[params_struct], %[offsetof_args_tile_j]]\n"
"movprfx z5, z29\n fmla z5.s, p3/M, z0.s, z7.s\n"
"fmla z29.s, p3/M, z0.s, z8.s\n"
- "ld1w { z20.s }, p3/Z, [x10]\n"
- "ldr x12, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ld1w { z20.s }, p3/Z, [x11]\n"
+ "ldr x6, [%x[params_struct], %[offsetof_args_tile_i]]\n"
+ "ldr x22, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "ldr x21, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
+ "mov p0.b, p2.b\n"
+ "add x7, x7, #0x1\n"
"fmla z30.s, p3/M, z1.s, z6.s\n"
+ "ld1w { z6.s }, p2/Z, [x20, x9, LSL #2]\n"
"fmla z31.s, p3/M, z1.s, z9.s\n"
- "ld1w { z6.s }, p2/Z, [x11, x26, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_cols]]\n"
+ "add x20, x6, #0x1\n"
"fmla z5.s, p3/M, z1.s, z8.s\n"
"fmla z29.s, p3/M, z1.s, z13.s\n"
- "ld1w { z19.s }, p3/Z, [x10, #1, MUL VL]\n"
- "add x8, x8, #0x1\n"
+ "ld1w { z19.s }, p3/Z, [x11, #1, MUL VL]\n"
+ "cmp x7, x22\n"
+ "csel x6, x6, x20, LT\n"
+ "csel x7, x7, XZR, LT\n"
"fmla z30.s, p3/M, z2.s, z9.s\n"
+ "ld1w { z16.s }, p2/Z, [x14, x28, LSL #2]\n"
"fmla z31.s, p3/M, z2.s, z11.s\n"
- "ld1w { z16.s }, p2/Z, [x14, x24, LSL #2]\n"
- "cmp x8, x20\n"
"fmla z5.s, p3/M, z2.s, z13.s\n"
"fmla z29.s, p3/M, z2.s, z22.s\n"
- "ld1w { z18.s }, p3/Z, [x10, #2, MUL VL]\n"
- "add x21, x12, #0x1\n"
+ "ld1w { z18.s }, p3/Z, [x11, #2, MUL VL]\n"
+ "cmp x6, x21\n"
"fmla z30.s, p3/M, z3.s, z11.s\n"
+ "ld1w { z1.s }, p2/Z, [x27, x8, LSL #2]\n"
"fmla z31.s, p3/M, z3.s, z12.s\n"
- "ld1w { z1.s }, p2/Z, [x9, x17, LSL #2]\n"
- "ldr x20, [%x[params_struct], %[offsetof_args_n_tile_rows]]\n"
"fmla z5.s, p3/M, z3.s, z22.s\n"
"fmla z29.s, p3/M, z3.s, z6.s\n"
- "ld1w { z17.s }, p3/Z, [x10, #3, MUL VL]\n"
- "csel x12, x12, x21, LT\n"
+ "ld1w { z17.s }, p3/Z, [x11, #3, MUL VL]\n"
"fmla z30.s, p3/M, z4.s, z12.s\n"
+ "ld1w { z0.s }, p2/Z, [x27, x12, LSL #2]\n"
"fmla z31.s, p3/M, z4.s, z16.s\n"
- "ld1w { z0.s }, p2/Z, [x9, x15, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x9, x28, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x27, x10, LSL #2]\n"
"fmla z5.s, p3/M, z4.s, z6.s\n"
"fmla z29.s, p3/M, z4.s, z10.s\n"
- "ld1w { z16.s }, p3/Z, [x10, #4, MUL VL]\n"
- "mov p0.b, p2.b\n"
+ "ld1w { z16.s }, p3/Z, [x11, #4, MUL VL]\n"
"fmla z30.s, p3/M, z20.s, z7.s\n"
"fmla z31.s, p3/M, z20.s, z8.s\n"
- "csel x8, x8, XZR, LT\n"
- "cmp x12, x20\n"
"fmla z5.s, p3/M, z20.s, z14.s\n"
"fmla z29.s, p3/M, z20.s, z1.s\n"
- "ld1w { z21.s }, p3/Z, [x10, #5, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x11, #5, MUL VL]\n"
"fmla z30.s, p3/M, z19.s, z8.s\n"
+ "ld1w { z26.s }, p2/Z, [x27, x28, LSL #2]\n"
"fmla z31.s, p3/M, z19.s, z13.s\n"
- "ld1w { z26.s }, p2/Z, [x9, x24, LSL #2]\n"
"fmla z5.s, p3/M, z19.s, z1.s\n"
"fmla z29.s, p3/M, z19.s, z0.s\n"
- "ld1w { z25.s }, p3/Z, [x10, #6, MUL VL]\n"
+ "ld1w { z25.s }, p3/Z, [x11, #6, MUL VL]\n"
"fmla z30.s, p3/M, z18.s, z13.s\n"
+ "ld1w { z24.s }, p2/Z, [x27, x9, LSL #2]\n"
"fmla z31.s, p3/M, z18.s, z22.s\n"
- "ld1w { z24.s }, p2/Z, [x9, x26, LSL #2]\n"
"fmla z5.s, p3/M, z18.s, z0.s\n"
"fmla z29.s, p3/M, z18.s, z27.s\n"
- "ld1w { z23.s }, p3/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1w { z23.s }, p3/Z, [x11, #7, MUL VL]\n"
+ "addvl x11, x11, #16\n"
"fmla z30.s, p3/M, z17.s, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [x26]\n"
"fmla z31.s, p3/M, z17.s, z6.s\n"
- "ld1w { z22.s }, p2/Z, [x27]\n"
"fmla z5.s, p3/M, z17.s, z27.s\n"
"fmla z29.s, p3/M, z17.s, z24.s\n"
- "ld1w { z20.s }, p3/Z, [x10, #-8, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x11, #-8, MUL VL]\n"
"fmla z30.s, p3/M, z16.s, z6.s\n"
+ "ld1w { z18.s }, p2/Z, [x26, x8, LSL #2]\n"
"fmla z31.s, p3/M, z16.s, z10.s\n"
- "ld1w { z19.s }, p2/Z, [x27, x17, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x27, x15, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x26, x12, LSL #2]\n"
"fmla z5.s, p3/M, z16.s, z24.s\n"
"fmla z29.s, p3/M, z16.s, z26.s\n"
- "ld1w { z16.s }, p3/Z, [x10, #-7, MUL VL]\n"
+ "ld1w { z16.s }, p3/Z, [x11, #-7, MUL VL]\n"
"fmla z30.s, p3/M, z21.s, z14.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, x28, LSL #2]\n"
"fmla z31.s, p3/M, z21.s, z1.s\n"
- "ld1w { z17.s }, p2/Z, [x27, x24, LSL #2]\n"
"fmla z5.s, p3/M, z21.s, z22.s\n"
- "fmla z29.s, p3/M, z21.s, z19.s\n"
- "ld1w { z21.s }, p3/Z, [x10, #-6, MUL VL]\n"
+ "fmla z29.s, p3/M, z21.s, z18.s\n"
+ "ld1w { z21.s }, p3/Z, [x11, #-6, MUL VL]\n"
"fmla z30.s, p3/M, z25.s, z1.s\n"
+ "ld1w { z8.s }, p2/Z, [x26, x10, LSL #2]\n"
"fmla z31.s, p3/M, z25.s, z0.s\n"
- "ld1w { z7.s }, p2/Z, [x27, x28, LSL #2]\n"
- "fmla z5.s, p3/M, z25.s, z19.s\n"
- "fmla z29.s, p3/M, z25.s, z18.s\n"
- "ld1w { z10.s }, p3/Z, [x10, #-5, MUL VL]\n"
+ "fmla z5.s, p3/M, z25.s, z18.s\n"
+ "fmla z29.s, p3/M, z25.s, z17.s\n"
+ "ld1w { z9.s }, p3/Z, [x11, #-5, MUL VL]\n"
"fmla z30.s, p3/M, z23.s, z0.s\n"
+ "ld1w { z11.s }, p2/Z, [x26, x9, LSL #2]\n"
"fmla z31.s, p3/M, z23.s, z27.s\n"
- "ld1w { z11.s }, p2/Z, [x27, x26, LSL #2]\n"
- "fmla z5.s, p3/M, z23.s, z18.s\n"
- "fmla z29.s, p3/M, z23.s, z7.s\n"
- "ld1w { z6.s }, p3/Z, [x10, #-4, MUL VL]\n"
+ "fmla z5.s, p3/M, z23.s, z17.s\n"
+ "fmla z29.s, p3/M, z23.s, z8.s\n"
+ "ld1w { z6.s }, p3/Z, [x11, #-4, MUL VL]\n"
"fmla z30.s, p3/M, z20.s, z27.s\n"
- "fmla z31.s, p3/M, z20.s, z24.s\n"
"ld1w { z0.s }, p2/Z, [x25]\n"
- "fmla z5.s, p3/M, z20.s, z7.s\n"
+ "fmla z31.s, p3/M, z20.s, z24.s\n"
+ "fmla z5.s, p3/M, z20.s, z8.s\n"
"fmla z29.s, p3/M, z20.s, z11.s\n"
- "ld1w { z9.s }, p3/Z, [x10, #-3, MUL VL]\n"
+ "ld1w { z4.s }, p3/Z, [x11, #-3, MUL VL]\n"
"fmla z30.s, p3/M, z16.s, z24.s\n"
+ "ld1w { z2.s }, p2/Z, [x25, x8, LSL #2]\n"
"fmla z31.s, p3/M, z16.s, z26.s\n"
- "ld1w { z3.s }, p2/Z, [x25, x17, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x25, x26, LSL #2]\n"
+ "ld1w { z27.s }, p2/Z, [x25, x9, LSL #2]\n"
"fmla z5.s, p3/M, z16.s, z11.s\n"
- "fmla z29.s, p3/M, z16.s, z17.s\n"
- "ld1w { z16.s }, p3/Z, [x10, #-2, MUL VL]\n"
+ "fmla z29.s, p3/M, z16.s, z19.s\n"
+ "ld1w { z16.s }, p3/Z, [x11, #-2, MUL VL]\n"
"fmla z30.s, p3/M, z21.s, z22.s\n"
- "fmla z31.s, p3/M, z21.s, z19.s\n"
- "ld1w { z26.s }, p2/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z26.s }, p2/Z, [x25, x12, LSL #2]\n"
+ "fmla z31.s, p3/M, z21.s, z18.s\n"
"fmla z5.s, p3/M, z21.s, z0.s\n"
- "fmla z29.s, p3/M, z21.s, z3.s\n"
- "ld1w { z25.s }, p3/Z, [x10, #-1, MUL VL]\n"
- "fmla z30.s, p3/M, z10.s, z19.s\n"
- "fmla z31.s, p3/M, z10.s, z18.s\n"
- "ld1w { z24.s }, p2/Z, [x25, x28, LSL #2]\n"
- "fmla z5.s, p3/M, z10.s, z3.s\n"
- "fmla z29.s, p3/M, z10.s, z26.s\n"
- "ld1w { z23.s }, p3/Z, [x10]\n"
- "fmla z30.s, p3/M, z6.s, z18.s\n"
- "fmla z31.s, p3/M, z6.s, z7.s\n"
- "ld1w { z22.s }, p2/Z, [x25, x24, LSL #2]\n"
+ "fmla z29.s, p3/M, z21.s, z2.s\n"
+ "ld1w { z25.s }, p3/Z, [x11, #-1, MUL VL]\n"
+ "fmla z30.s, p3/M, z9.s, z18.s\n"
+ "ld1w { z24.s }, p2/Z, [x25, x10, LSL #2]\n"
+ "fmla z31.s, p3/M, z9.s, z17.s\n"
+ "fmla z5.s, p3/M, z9.s, z2.s\n"
+ "fmla z29.s, p3/M, z9.s, z26.s\n"
+ "ld1w { z23.s }, p3/Z, [x11]\n"
+ "fmla z30.s, p3/M, z6.s, z17.s\n"
+ "ld1w { z22.s }, p2/Z, [x25, x28, LSL #2]\n"
+ "fmla z31.s, p3/M, z6.s, z8.s\n"
"fmla z5.s, p3/M, z6.s, z26.s\n"
"fmla z29.s, p3/M, z6.s, z24.s\n"
- "ld1w { z21.s }, p3/Z, [x10, #1, MUL VL]\n"
- "fmla z30.s, p3/M, z9.s, z7.s\n"
- "fmla z31.s, p3/M, z9.s, z11.s\n"
- "ld1w { z18.s }, p2/Z, [x23]\n"
- "fmla z5.s, p3/M, z9.s, z24.s\n"
- "fmla z29.s, p3/M, z9.s, z27.s\n"
- "ld1w { z20.s }, p3/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x11, #1, MUL VL]\n"
+ "fmla z30.s, p3/M, z4.s, z8.s\n"
+ "ld1w { z18.s }, p2/Z, [x24]\n"
+ "fmla z31.s, p3/M, z4.s, z11.s\n"
+ "fmla z5.s, p3/M, z4.s, z24.s\n"
+ "fmla z29.s, p3/M, z4.s, z27.s\n"
+ "ld1w { z20.s }, p3/Z, [x11, #2, MUL VL]\n"
"fmla z30.s, p3/M, z16.s, z11.s\n"
- "fmla z31.s, p3/M, z16.s, z17.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x17, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x24, x8, LSL #2]\n"
+ "fmla z31.s, p3/M, z16.s, z19.s\n"
"fmla z5.s, p3/M, z16.s, z27.s\n"
"fmla z29.s, p3/M, z16.s, z22.s\n"
- "ld1w { z19.s }, p3/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z19.s }, p3/Z, [x11, #3, MUL VL]\n"
"fmla z30.s, p3/M, z25.s, z0.s\n"
- "fmla z31.s, p3/M, z25.s, z3.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x15, LSL #2]\n"
+ "ld1w { z16.s }, p2/Z, [x24, x12, LSL #2]\n"
+ "fmla z31.s, p3/M, z25.s, z2.s\n"
"fmla z5.s, p3/M, z25.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, x10, LSL #2]\n"
"fmla z29.s, p3/M, z25.s, z17.s\n"
- "ld1w { z18.s }, p2/Z, [x23, x28, LSL #2]\n"
- "fmla z30.s, p3/M, z23.s, z3.s\n"
+ "fmla z30.s, p3/M, z23.s, z2.s\n"
"fmla z31.s, p3/M, z23.s, z26.s\n"
"fmla z5.s, p3/M, z23.s, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, x9, LSL #2]\n"
"fmla z29.s, p3/M, z23.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x23, x26, LSL #2]\n"
"fmla z30.s, p3/M, z21.s, z26.s\n"
"fmla z31.s, p3/M, z21.s, z24.s\n"
"fmla z5.s, p3/M, z21.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, x28, LSL #2]\n"
"fmla z29.s, p3/M, z21.s, z18.s\n"
- "ld1w { z16.s }, p2/Z, [x23, x24, LSL #2]\n"
"fmla z30.s, p3/M, z20.s, z24.s\n"
"fmla z31.s, p3/M, z20.s, z27.s\n"
"fmla z5.s, p3/M, z20.s, z18.s\n"
"fmla z29.s, p3/M, z20.s, z17.s\n"
"fmla z30.s, p3/M, z19.s, z27.s\n"
"fmla z31.s, p3/M, z19.s, z22.s\n"
- "fmax z30.s, p3/M, z30.s, z15.s\n"
- "fmax z31.s, p3/M, z31.s, z15.s\n"
"fmla z5.s, p3/M, z19.s, z17.s\n"
"fmla z29.s, p3/M, z19.s, z16.s\n"
+ "fmax z30.s, p3/M, z30.s, z15.s\n"
+ "fmax z31.s, p3/M, z31.s, z15.s\n"
"fmax z5.s, p3/M, z5.s, z15.s\n"
- "fmax z29.s, p3/M, z29.s, z15.s\n"
"fmin z30.s, p3/M, z30.s, z28.s\n"
"fmin z31.s, p3/M, z31.s, z28.s\n"
- "st1w { z30.s }, p0, [x13]\n"
+ "fmax z29.s, p3/M, z29.s, z15.s\n"
"fmin z5.s, p3/M, z5.s, z28.s\n"
+ "st1w { z30.s }, p0, [x13]\n"
"fmin z29.s, p3/M, z29.s, z28.s\n"
"st1w { z31.s }, p0, [x13, x16, LSL #2]\n"
- "st1w { z5.s }, p0, [x22]\n"
- "st1w { z29.s }, p0, [x22, x16, LSL #2]\n"
+ "st1w { z5.s }, p0, [x23]\n"
+ "st1w { z29.s }, p0, [x23, x16, LSL #2]\n"
"blt 1b\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_args_inptr] "I" (offsetof(Args, inptr)), [offsetof_args_ld_input_col] "I" (offsetof(Args, ld_input_col)), [offsetof_args_ld_input_row] "I" (offsetof(Args, ld_input_row)), [offsetof_args_ld_output_col] "I" (offsetof(Args, ld_output_col)), [offsetof_args_ld_output_row] "I" (offsetof(Args, ld_output_row)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_n_tile_cols] "I" (offsetof(Args, n_tile_cols)), [offsetof_args_n_tile_rows] "I" (offsetof(Args, n_tile_rows)), [offsetof_args_outptr] "I" (offsetof(Args, outptr)), [offsetof_args_params] "I" (offsetof(Args, params)), [offsetof_args_tile_i] "I" (offsetof(Args, tile_i)), [offsetof_args_tile_j] "I" (offsetof(Args, tile_j)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
index bf65e04d32..517ebae6e1 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst/generic_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -99,449 +99,449 @@ void sve_fp32_nhwc_5x5_s1_output2x2_mla_depthfirst_indirect_impl(
__asm__ __volatile__(
"ldr x20, [%x[params_struct], %[offsetof_args_outptrs]]\n"
- "add x16, %x[params_struct], %[offsetof_Args_inptrs]\n"
- "ldp x15, x14, [x20, #0x0]\n"
- "mov x13, #0x0\n"
- "ldp x12, x11, [x20, #0x10]\n"
+ "add x17, %x[params_struct], %[offsetof_Args_inptrs]\n"
+ "mov x16, #0x0\n"
+ "ldr x15, [%x[params_struct], %[offsetof_args_params]]\n"
"whilelt p3.s, XZR, %x[n_channels]\n"
- "ldp x21, x20, [x16, #0x0]\n"
- "cntw x10\n"
+ "cntw x14\n"
"ptrue p2.b\n"
- "ldr x9, [%x[params_struct], %[offsetof_args_params]]\n"
- "ld1w { z5.s }, p3/Z, [x21, x13, LSL #2]\n"
- "cmp x10, %x[n_channels]\n"
- "ld1w { z6.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldp x27, x26, [x16, #0x10]\n"
- "sub x28, XZR, x10\n"
- "ldp x25, x24, [x16, #0x20]\n"
- "ldp x23, x22, [x16, #0x30]\n"
- "ldp x21, x20, [x16, #0x40]\n"
- "ld1rw { z15.s }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
- "ld1rw { z28.s }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
- "ld1w { z29.s }, p2/Z, [x9]\n"
- "ld1w { z0.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z1.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z2.s }, p2/Z, [x9, #3, MUL VL]\n"
- "ld1w { z3.s }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1w { z4.s }, p2/Z, [x9, #5, MUL VL]\n"
- "ld1w { z7.s }, p3/Z, [x27, x13, LSL #2]\n"
- "addvl x9, x9, #6\n"
- "ld1w { z8.s }, p3/Z, [x26, x13, LSL #2]\n"
- "ld1w { z9.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ld1w { z13.s }, p3/Z, [x24, x13, LSL #2]\n"
- "ld1w { z11.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ld1w { z12.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ld1w { z10.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ld1w { z14.s }, p3/Z, [x20, x13, LSL #2]\n"
+ "ldp x13, x12, [x20, #0x0]\n"
+ "ldp x11, x10, [x20, #0x10]\n"
+ "ldp x21, x20, [x17, #0x0]\n"
+ "ldp x27, x26, [x17, #0x10]\n"
+ "ldp x25, x24, [x17, #0x20]\n"
+ "ldp x23, x22, [x17, #0x30]\n"
+ "cmp x14, %x[n_channels]\n"
+ "sub x9, XZR, x14\n"
+ "ld1rw { z17.s }, p2/Z, [%x[params_struct], %[offsetof_args_min]]\n"
+ "ld1rw { z30.s }, p2/Z, [%x[params_struct], %[offsetof_args_max]]\n"
+ "ld1w { z5.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ld1w { z6.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "ldp x21, x20, [x17, #0x40]\n"
+ "ld1w { z29.s }, p2/Z, [x15]\n"
+ "ld1w { z0.s }, p2/Z, [x15, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x15, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x15, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "ld1w { z4.s }, p2/Z, [x15, #5, MUL VL]\n"
+ "ld1w { z7.s }, p3/Z, [x27, x16, LSL #2]\n"
+ "addvl x15, x15, #6\n"
+ "ld1w { z8.s }, p3/Z, [x26, x16, LSL #2]\n"
+ "ld1w { z9.s }, p3/Z, [x25, x16, LSL #2]\n"
+ "ld1w { z13.s }, p3/Z, [x24, x16, LSL #2]\n"
+ "ld1w { z11.s }, p3/Z, [x23, x16, LSL #2]\n"
+ "ld1w { z12.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "ld1w { z10.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ld1w { z14.s }, p3/Z, [x20, x16, LSL #2]\n"
"bge 2f\n"
"1:" // Channel loop
- "movprfx z30, z29\n fmla z30.s, p2/M, z0.s, z5.s\n"
- "movprfx z27, z29\n fmla z27.s, p2/M, z0.s, z6.s\n"
- "ldr x20, [x16, #0x50]\n"
- "ld1w { z5.s }, p3/Z, [x20, x13, LSL #2]\n"
- "movprfx z31, z29\n fmla z31.s, p2/M, z0.s, z7.s\n"
- "movprfx z26, z29\n fmla z26.s, p2/M, z0.s, z8.s\n"
- "ldr x20, [x16, #0x58]\n"
- "ldr x21, [x16, #0x60]\n"
- "fmla z30.s, p2/M, z1.s, z6.s\n"
- "fmla z27.s, p2/M, z1.s, z9.s\n"
- "ld1w { z22.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x20, [x16, #0x68]\n"
- "fmla z31.s, p2/M, z1.s, z8.s\n"
- "fmla z26.s, p2/M, z1.s, z13.s\n"
- "ld1w { z21.s }, p2/Z, [x9]\n"
- "ldr x23, [x16, #0x70]\n"
- "fmla z30.s, p2/M, z2.s, z9.s\n"
- "fmla z27.s, p2/M, z2.s, z11.s\n"
- "ld1w { z20.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ld1w { z18.s }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z31.s, p2/M, z2.s, z13.s\n"
- "fmla z26.s, p2/M, z2.s, z5.s\n"
- "ldr x22, [x16, #0x78]\n"
- "ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.s, p2/M, z3.s, z11.s\n"
- "fmla z27.s, p2/M, z3.s, z12.s\n"
- "ld1w { z11.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x21, [x16, #0x80]\n"
- "fmla z31.s, p2/M, z3.s, z5.s\n"
- "fmla z26.s, p2/M, z3.s, z22.s\n"
- "ld1w { z16.s }, p2/Z, [x9, #3, MUL VL]\n"
- "ldr x20, [x16, #0x88]\n"
- "fmla z30.s, p2/M, z4.s, z12.s\n"
- "fmla z27.s, p2/M, z4.s, z20.s\n"
- "ld1w { z0.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ld1w { z29.s }, p3/Z, [x22, x13, LSL #2]\n"
- "fmla z31.s, p2/M, z4.s, z22.s\n"
- "fmla z26.s, p2/M, z4.s, z10.s\n"
- "ld1w { z19.s }, p2/Z, [x9, #4, MUL VL]\n"
- "ldr x23, [x16, #0x90]\n"
- "fmla z30.s, p2/M, z21.s, z7.s\n"
- "fmla z27.s, p2/M, z21.s, z8.s\n"
- "ldr x26, [x16, #0x98]\n"
- "ldr x22, [x16, #0xa0]\n"
- "fmla z31.s, p2/M, z21.s, z14.s\n"
- "fmla z26.s, p2/M, z21.s, z11.s\n"
- "ld1w { z25.s }, p2/Z, [x9, #5, MUL VL]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z30.s, p2/M, z18.s, z8.s\n"
- "fmla z27.s, p2/M, z18.s, z13.s\n"
- "ld1w { z24.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x24, [x16, #0xb0]\n"
- "fmla z31.s, p2/M, z18.s, z11.s\n"
- "fmla z26.s, p2/M, z18.s, z0.s\n"
- "ld1w { z18.s }, p2/Z, [x9, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z30.s, p2/M, z17.s, z13.s\n"
- "fmla z27.s, p2/M, z17.s, z5.s\n"
- "ld1w { z3.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ldr x21, [x16, #0xc0]\n"
- "fmla z31.s, p2/M, z17.s, z0.s\n"
- "fmla z26.s, p2/M, z17.s, z29.s\n"
- "ld1w { z17.s }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "fmla z30.s, p2/M, z16.s, z5.s\n"
- "fmla z27.s, p2/M, z16.s, z22.s\n"
- "ld1w { z6.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ldr x27, [x16, #0xc8]\n"
- "fmla z31.s, p2/M, z16.s, z29.s\n"
- "fmla z26.s, p2/M, z16.s, z3.s\n"
- "ld1w { z16.s }, p2/Z, [x9, #-8, MUL VL]\n"
- "ldr x23, [x16, #0xd0]\n"
- "fmla z30.s, p2/M, z19.s, z22.s\n"
- "fmla z27.s, p2/M, z19.s, z10.s\n"
- "ld1w { z23.s }, p3/Z, [x26, x13, LSL #2]\n"
- "ld1w { z22.s }, p3/Z, [x22, x13, LSL #2]\n"
- "fmla z31.s, p2/M, z19.s, z3.s\n"
- "fmla z26.s, p2/M, z19.s, z24.s\n"
- "ld1w { z21.s }, p2/Z, [x9, #-7, MUL VL]\n"
- "ldr x22, [x16, #0xd8]\n"
- "fmla z30.s, p2/M, z25.s, z14.s\n"
- "fmla z27.s, p2/M, z25.s, z11.s\n"
- "ld1w { z1.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x20, [x16, #0xe0]\n"
- "fmla z31.s, p2/M, z25.s, z6.s\n"
- "fmla z26.s, p2/M, z25.s, z23.s\n"
- "ld1w { z20.s }, p2/Z, [x9, #-6, MUL VL]\n"
- "ldr x26, [x16, #0xf8]\n"
- "fmla z30.s, p2/M, z18.s, z11.s\n"
- "fmla z27.s, p2/M, z18.s, z0.s\n"
- "ld1w { z7.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ldr x25, [x16, #0xe8]\n"
- "fmla z31.s, p2/M, z18.s, z23.s\n"
- "fmla z26.s, p2/M, z18.s, z22.s\n"
- "ld1w { z18.s }, p2/Z, [x9, #-5, MUL VL]\n"
- "whilelt p1.s, x10, %x[n_channels]\n"
- "fmla z30.s, p2/M, z17.s, z0.s\n"
- "fmla z27.s, p2/M, z17.s, z29.s\n"
- "ld1w { z19.s }, p3/Z, [x24, x13, LSL #2]\n"
- "ldr x24, [x16, #0xf0]\n"
- "fmla z31.s, p2/M, z17.s, z22.s\n"
- "fmla z26.s, p2/M, z17.s, z7.s\n"
- "ld1w { z17.s }, p2/Z, [x9, #-4, MUL VL]\n"
- "incw x28\n"
- "fmla z30.s, p2/M, z16.s, z29.s\n"
- "fmla z27.s, p2/M, z16.s, z3.s\n"
- "ld1w { z0.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ldr x21, [x16, #0x100]\n"
- "fmla z31.s, p2/M, z16.s, z7.s\n"
- "fmla z26.s, p2/M, z16.s, z19.s\n"
- "ld1w { z16.s }, p2/Z, [x9, #-3, MUL VL]\n"
+ "movprfx z15, z29\n fmla z15.s, p2/M, z0.s, z5.s\n"
+ "movprfx z28, z29\n fmla z28.s, p2/M, z0.s, z6.s\n"
+ "ldr x21, [x17, #0x50]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "movprfx z27, z29\n fmla z27.s, p2/M, z0.s, z7.s\n"
+ "movprfx z31, z29\n fmla z31.s, p2/M, z0.s, z8.s\n"
+ "ldr x22, [x17, #0x60]\n"
+ "ldr x25, [x17, #0x68]\n"
+ "ld1w { z19.s }, p2/Z, [x15]\n"
+ "ldr x24, [x17, #0x70]\n"
+ "whilelt p1.s, x14, %x[n_channels]\n"
+ "incw x9\n"
+ "ld1w { z25.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ldr x21, [x17, #0x78]\n"
"mov p0.b, p3.b\n"
- "fmla z30.s, p2/M, z21.s, z3.s\n"
- "fmla z27.s, p2/M, z21.s, z24.s\n"
- "ld1w { z11.s }, p3/Z, [x27, x13, LSL #2]\n"
- "ld1w { z13.s }, p3/Z, [x20, x13, LSL #2]\n"
- "fmla z31.s, p2/M, z21.s, z19.s\n"
- "fmla z26.s, p2/M, z21.s, z1.s\n"
- "ld1w { z10.s }, p2/Z, [x9, #-2, MUL VL]\n"
- "ldr x20, [x16, #0x108]\n"
- "fmla z30.s, p2/M, z20.s, z6.s\n"
- "fmla z27.s, p2/M, z20.s, z23.s\n"
- "ld1w { z25.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ldr x23, [x16, #0x110]\n"
- "fmla z31.s, p2/M, z20.s, z0.s\n"
- "fmla z26.s, p2/M, z20.s, z11.s\n"
- "ld1w { z8.s }, p2/Z, [x9, #-1, MUL VL]\n"
- "ld1w { z29.s }, p2/Z, [x9, #4, MUL VL]\n"
- "fmla z30.s, p2/M, z18.s, z23.s\n"
- "fmla z27.s, p2/M, z18.s, z22.s\n"
- "ld1w { z24.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ldr x22, [x16, #0x118]\n"
- "fmla z31.s, p2/M, z18.s, z11.s\n"
- "fmla z26.s, p2/M, z18.s, z25.s\n"
- "ld1w { z23.s }, p2/Z, [x9]\n"
- "fmla z30.s, p2/M, z17.s, z22.s\n"
- "fmla z27.s, p2/M, z17.s, z7.s\n"
- "ld1w { z22.s }, p3/Z, [x25, x13, LSL #2]\n"
- "fmla z31.s, p2/M, z17.s, z25.s\n"
- "fmla z26.s, p2/M, z17.s, z24.s\n"
- "ld1w { z21.s }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z30.s, p2/M, z16.s, z7.s\n"
- "fmla z27.s, p2/M, z16.s, z19.s\n"
- "ld1w { z18.s }, p3/Z, [x24, x13, LSL #2]\n"
- "fmla z31.s, p2/M, z16.s, z24.s\n"
- "fmla z26.s, p2/M, z16.s, z13.s\n"
- "ld1w { z20.s }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.s, p2/M, z10.s, z19.s\n"
- "fmla z27.s, p2/M, z10.s, z1.s\n"
- "ld1w { z17.s }, p3/Z, [x26, x13, LSL #2]\n"
- "fmla z31.s, p2/M, z10.s, z13.s\n"
- "fmla z26.s, p2/M, z10.s, z22.s\n"
- "ld1w { z19.s }, p2/Z, [x9, #3, MUL VL]\n"
- "fmla z30.s, p2/M, z8.s, z0.s\n"
- "fmla z27.s, p2/M, z8.s, z11.s\n"
- "ld1w { z16.s }, p3/Z, [x21, x13, LSL #2]\n"
- "fmla z31.s, p2/M, z8.s, z18.s\n"
- "fmla z26.s, p2/M, z8.s, z17.s\n"
- "ld1w { z18.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldp x21, x20, [x16, #0x0]\n"
- "fmla z30.s, p2/M, z23.s, z11.s\n"
- "fmla z27.s, p2/M, z23.s, z25.s\n"
- "ld1w { z0.s }, p2/Z, [x9, #5, MUL VL]\n"
- "fmla z31.s, p2/M, z23.s, z17.s\n"
- "fmla z26.s, p2/M, z23.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ld1w { z1.s }, p2/Z, [x9, #6, MUL VL]\n"
- "fmla z30.s, p2/M, z21.s, z25.s\n"
- "fmla z27.s, p2/M, z21.s, z24.s\n"
- "ld1w { z5.s }, p1/Z, [x21, x10, LSL #2]\n"
- "fmla z31.s, p2/M, z21.s, z16.s\n"
- "fmla z26.s, p2/M, z21.s, z18.s\n"
- "ld1w { z16.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ldp x27, x26, [x16, #0x10]\n"
- "fmla z30.s, p2/M, z20.s, z24.s\n"
+ "fmla z15.s, p2/M, z1.s, z6.s\n"
+ "fmla z28.s, p2/M, z1.s, z9.s\n"
+ "ld1w { z23.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "ldr x27, [x17, #0x80]\n"
+ "fmla z27.s, p2/M, z1.s, z8.s\n"
+ "fmla z31.s, p2/M, z1.s, z13.s\n"
+ "ld1w { z22.s }, p2/Z, [x15, #1, MUL VL]\n"
+ "ldr x20, [x17, #0x88]\n"
+ "ldr x23, [x17, #0x90]\n"
+ "ldr x26, [x17, #0x98]\n"
+ "fmla z15.s, p2/M, z2.s, z9.s\n"
+ "ld1w { z18.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "ldr x22, [x17, #0xa0]\n"
+ "fmla z28.s, p2/M, z2.s, z11.s\n"
+ "fmla z27.s, p2/M, z2.s, z13.s\n"
+ "fmla z31.s, p2/M, z2.s, z25.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z15.s, p2/M, z3.s, z11.s\n"
+ "ld1w { z2.s }, p3/Z, [x25, x16, LSL #2]\n"
+ "ldr x25, [x17, #0xa8]\n"
+ "fmla z28.s, p2/M, z3.s, z12.s\n"
+ "fmla z27.s, p2/M, z3.s, z25.s\n"
+ "fmla z31.s, p2/M, z3.s, z23.s\n"
+ "ld1w { z21.s }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z15.s, p2/M, z4.s, z12.s\n"
+ "ld1w { z1.s }, p3/Z, [x24, x16, LSL #2]\n"
+ "ldr x24, [x17, #0xb0]\n"
+ "fmla z28.s, p2/M, z4.s, z18.s\n"
+ "ld1w { z0.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ldr x21, [x17, #0xb8]\n"
+ "fmla z27.s, p2/M, z4.s, z23.s\n"
+ "fmla z31.s, p2/M, z4.s, z10.s\n"
+ "ld1w { z3.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z15.s, p2/M, z19.s, z7.s\n"
+ "fmla z28.s, p2/M, z19.s, z8.s\n"
+ "fmla z27.s, p2/M, z19.s, z14.s\n"
+ "fmla z31.s, p2/M, z19.s, z2.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z15.s, p2/M, z22.s, z8.s\n"
+ "ld1w { z26.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "ldr x28, [x17, #0xc8]\n"
+ "fmla z28.s, p2/M, z22.s, z13.s\n"
+ "fmla z27.s, p2/M, z22.s, z2.s\n"
+ "fmla z31.s, p2/M, z22.s, z1.s\n"
+ "ld1w { z19.s }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z15.s, p2/M, z16.s, z13.s\n"
+ "ld1w { z9.s }, p3/Z, [x27, x16, LSL #2]\n"
+ "ldr x20, [x17, #0xc0]\n"
+ "fmla z28.s, p2/M, z16.s, z25.s\n"
+ "fmla z27.s, p2/M, z16.s, z1.s\n"
+ "fmla z31.s, p2/M, z16.s, z0.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z15.s, p2/M, z21.s, z25.s\n"
+ "ld1w { z25.s }, p3/Z, [x23, x16, LSL #2]\n"
+ "ldr x23, [x17, #0xd0]\n"
+ "fmla z28.s, p2/M, z21.s, z23.s\n"
+ "ld1w { z29.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z27.s, p2/M, z21.s, z0.s\n"
+ "fmla z31.s, p2/M, z21.s, z9.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z15.s, p2/M, z3.s, z23.s\n"
+ "ld1w { z24.s }, p3/Z, [x26, x16, LSL #2]\n"
+ "ldr x27, [x17, #0xd8]\n"
+ "fmla z28.s, p2/M, z3.s, z10.s\n"
+ "ld1w { z23.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "ldr x22, [x17, #0xe0]\n"
+ "fmla z27.s, p2/M, z3.s, z9.s\n"
+ "fmla z31.s, p2/M, z3.s, z26.s\n"
+ "ld1w { z22.s }, p2/Z, [x15, #-7, MUL VL]\n"
+ "fmla z15.s, p2/M, z20.s, z14.s\n"
+ "ld1w { z6.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ldr x26, [x17, #0xf8]\n"
+ "fmla z28.s, p2/M, z20.s, z2.s\n"
+ "fmla z27.s, p2/M, z20.s, z25.s\n"
+ "fmla z31.s, p2/M, z20.s, z24.s\n"
+ "ld1w { z10.s }, p2/Z, [x15, #-6, MUL VL]\n"
+ "fmla z15.s, p2/M, z19.s, z2.s\n"
+ "ld1w { z21.s }, p3/Z, [x25, x16, LSL #2]\n"
+ "ldr x25, [x17, #0xe8]\n"
+ "fmla z28.s, p2/M, z19.s, z1.s\n"
+ "fmla z27.s, p2/M, z19.s, z24.s\n"
+ "fmla z31.s, p2/M, z19.s, z23.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, #-5, MUL VL]\n"
+ "fmla z15.s, p2/M, z18.s, z1.s\n"
+ "ld1w { z19.s }, p3/Z, [x24, x16, LSL #2]\n"
+ "ldr x24, [x17, #0xf0]\n"
+ "fmla z28.s, p2/M, z18.s, z0.s\n"
+ "fmla z27.s, p2/M, z18.s, z23.s\n"
+ "fmla z31.s, p2/M, z18.s, z21.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, #-4, MUL VL]\n"
+ "fmla z15.s, p2/M, z16.s, z0.s\n"
+ "ld1w { z0.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "ldr x21, [x17, #0x100]\n"
+ "fmla z28.s, p2/M, z16.s, z9.s\n"
+ "fmla z27.s, p2/M, z16.s, z21.s\n"
+ "fmla z31.s, p2/M, z16.s, z19.s\n"
+ "ld1w { z16.s }, p2/Z, [x15, #-3, MUL VL]\n"
+ "fmla z15.s, p2/M, z22.s, z9.s\n"
+ "ld1w { z12.s }, p3/Z, [x28, x16, LSL #2]\n"
+ "ldr x20, [x17, #0x108]\n"
+ "fmla z28.s, p2/M, z22.s, z26.s\n"
+ "ld1w { z4.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "fmla z27.s, p2/M, z22.s, z19.s\n"
+ "fmla z31.s, p2/M, z22.s, z6.s\n"
+ "ld1w { z14.s }, p2/Z, [x15, #-2, MUL VL]\n"
+ "fmla z15.s, p2/M, z10.s, z25.s\n"
+ "ld1w { z26.s }, p3/Z, [x23, x16, LSL #2]\n"
+ "ldr x23, [x17, #0x110]\n"
+ "fmla z28.s, p2/M, z10.s, z24.s\n"
+ "fmla z27.s, p2/M, z10.s, z0.s\n"
+ "fmla z31.s, p2/M, z10.s, z12.s\n"
+ "ld1w { z10.s }, p2/Z, [x15, #-1, MUL VL]\n"
+ "fmla z15.s, p2/M, z20.s, z24.s\n"
+ "ld1w { z25.s }, p3/Z, [x27, x16, LSL #2]\n"
+ "ldr x22, [x17, #0x118]\n"
+ "fmla z28.s, p2/M, z20.s, z23.s\n"
+ "fmla z27.s, p2/M, z20.s, z12.s\n"
+ "fmla z31.s, p2/M, z20.s, z26.s\n"
+ "ld1w { z24.s }, p2/Z, [x15]\n"
+ "fmla z15.s, p2/M, z18.s, z23.s\n"
+ "ld1w { z23.s }, p3/Z, [x25, x16, LSL #2]\n"
+ "fmla z28.s, p2/M, z18.s, z21.s\n"
+ "fmla z27.s, p2/M, z18.s, z26.s\n"
+ "fmla z31.s, p2/M, z18.s, z25.s\n"
+ "ld1w { z22.s }, p2/Z, [x15, #1, MUL VL]\n"
+ "fmla z15.s, p2/M, z16.s, z21.s\n"
+ "ld1w { z21.s }, p3/Z, [x24, x16, LSL #2]\n"
+ "fmla z28.s, p2/M, z16.s, z19.s\n"
+ "fmla z27.s, p2/M, z16.s, z25.s\n"
+ "fmla z31.s, p2/M, z16.s, z4.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z15.s, p2/M, z14.s, z19.s\n"
+ "ld1w { z19.s }, p3/Z, [x26, x16, LSL #2]\n"
+ "fmla z28.s, p2/M, z14.s, z6.s\n"
+ "fmla z27.s, p2/M, z14.s, z4.s\n"
+ "fmla z31.s, p2/M, z14.s, z23.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z15.s, p2/M, z10.s, z0.s\n"
+ "ld1w { z16.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "fmla z28.s, p2/M, z10.s, z12.s\n"
+ "fmla z27.s, p2/M, z10.s, z21.s\n"
+ "ld1w { z13.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "ldp x21, x20, [x17, #0x0]\n"
+ "fmla z31.s, p2/M, z10.s, z19.s\n"
+ "ld1w { z0.s }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z15.s, p2/M, z24.s, z12.s\n"
+ "fmla z28.s, p2/M, z24.s, z26.s\n"
+ "fmla z27.s, p2/M, z24.s, z19.s\n"
+ "ld1w { z12.s }, p3/Z, [x23, x16, LSL #2]\n"
+ "fmla z31.s, p2/M, z24.s, z16.s\n"
+ "ld1w { z1.s }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z15.s, p2/M, z22.s, z26.s\n"
+ "ld1w { z5.s }, p1/Z, [x21, x14, LSL #2]\n"
+ "fmla z28.s, p2/M, z22.s, z25.s\n"
+ "fmla z27.s, p2/M, z22.s, z16.s\n"
+ "ld1w { z16.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "ldp x27, x26, [x17, #0x10]\n"
+ "ldp x25, x24, [x17, #0x20]\n"
+ "ldp x23, x22, [x17, #0x30]\n"
+ "incw x16\n"
+ "fmla z31.s, p2/M, z22.s, z13.s\n"
+ "ld1w { z2.s }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z15.s, p2/M, z20.s, z25.s\n"
+ "ld1w { z6.s }, p1/Z, [x20, x14, LSL #2]\n"
+ "ldp x21, x20, [x17, #0x40]\n"
+ "ld1w { z7.s }, p1/Z, [x27, x14, LSL #2]\n"
+ "fmla z28.s, p2/M, z20.s, z4.s\n"
"fmla z27.s, p2/M, z20.s, z13.s\n"
- "ld1w { z6.s }, p1/Z, [x20, x10, LSL #2]\n"
- "ldp x25, x24, [x16, #0x20]\n"
- "fmla z31.s, p2/M, z20.s, z18.s\n"
- "fmla z26.s, p2/M, z20.s, z17.s\n"
- "ldp x23, x22, [x16, #0x30]\n"
- "ldp x21, x20, [x16, #0x40]\n"
- "fmla z30.s, p2/M, z19.s, z13.s\n"
- "fmla z27.s, p2/M, z19.s, z22.s\n"
- "incw x13\n"
- "ld1w { z7.s }, p1/Z, [x27, x10, LSL #2]\n"
- "fmla z31.s, p2/M, z19.s, z17.s\n"
- "fmla z26.s, p2/M, z19.s, z16.s\n"
- "ld1w { z8.s }, p1/Z, [x26, x10, LSL #2]\n"
- "ld1w { z9.s }, p1/Z, [x25, x10, LSL #2]\n"
- "ld1w { z13.s }, p1/Z, [x24, x10, LSL #2]\n"
- "ld1w { z11.s }, p1/Z, [x23, x10, LSL #2]\n"
- "fmax z30.s, p2/M, z30.s, z15.s\n"
- "fmax z27.s, p2/M, z27.s, z15.s\n"
- "ld1w { z12.s }, p1/Z, [x22, x10, LSL #2]\n"
- "ld1w { z10.s }, p1/Z, [x21, x10, LSL #2]\n"
- "fmax z31.s, p2/M, z31.s, z15.s\n"
- "fmax z26.s, p2/M, z26.s, z15.s\n"
- "ld1w { z14.s }, p1/Z, [x20, x10, LSL #2]\n"
- "incw x10\n"
- "ld1w { z2.s }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "whilelt p3.s, x13, %x[n_channels]\n"
- "cmp x10, %x[n_channels]\n"
- "ld1w { z3.s }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1w { z4.s }, p2/Z, [x9, #-7, MUL VL]\n"
- "fmin z30.s, p2/M, z30.s, z28.s\n"
- "fmin z27.s, p2/M, z27.s, z28.s\n"
- "st1w { z30.s }, p0, [x15, x28, LSL #2]\n"
- "fmin z31.s, p2/M, z31.s, z28.s\n"
- "fmin z26.s, p2/M, z26.s, z28.s\n"
- "st1w { z27.s }, p0, [x14, x28, LSL #2]\n"
- "st1w { z31.s }, p0, [x12, x28, LSL #2]\n"
- "addvl x9, x9, #-6\n"
- "st1w { z26.s }, p0, [x11, x28, LSL #2]\n"
+ "ld1w { z13.s }, p1/Z, [x24, x14, LSL #2]\n"
+ "ld1w { z11.s }, p1/Z, [x23, x14, LSL #2]\n"
+ "whilelt p3.s, x16, %x[n_channels]\n"
+ "fmla z31.s, p2/M, z20.s, z12.s\n"
+ "ld1w { z3.s }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z15.s, p2/M, z18.s, z4.s\n"
+ "ld1w { z8.s }, p1/Z, [x26, x14, LSL #2]\n"
+ "ld1w { z14.s }, p1/Z, [x20, x14, LSL #2]\n"
+ "fmla z28.s, p2/M, z18.s, z23.s\n"
+ "ld1w { z10.s }, p1/Z, [x21, x14, LSL #2]\n"
+ "fmla z27.s, p2/M, z18.s, z12.s\n"
+ "ld1w { z12.s }, p1/Z, [x22, x14, LSL #2]\n"
+ "fmla z31.s, p2/M, z18.s, z16.s\n"
+ "ld1w { z9.s }, p1/Z, [x25, x14, LSL #2]\n"
+ "incw x14\n"
+ "ld1w { z4.s }, p2/Z, [x15, #-7, MUL VL]\n"
+ "addvl x15, x15, #-6\n"
+ "fmax z15.s, p2/M, z15.s, z17.s\n"
+ "fmax z28.s, p2/M, z28.s, z17.s\n"
+ "fmax z27.s, p2/M, z27.s, z17.s\n"
+ "cmp x14, %x[n_channels]\n"
+ "fmax z31.s, p2/M, z31.s, z17.s\n"
+ "fmin z15.s, p2/M, z15.s, z30.s\n"
+ "fmin z28.s, p2/M, z28.s, z30.s\n"
+ "fmin z27.s, p2/M, z27.s, z30.s\n"
+ "fmin z31.s, p2/M, z31.s, z30.s\n"
+ "st1w { z15.s }, p0, [x13, x9, LSL #2]\n"
+ "st1w { z28.s }, p0, [x12, x9, LSL #2]\n"
+ "st1w { z27.s }, p0, [x11, x9, LSL #2]\n"
+ "st1w { z31.s }, p0, [x10, x9, LSL #2]\n"
"blt 1b\n"
"2:" // Channel tail
- "movprfx z30, z29\n fmla z30.s, p2/M, z0.s, z5.s\n"
- "movprfx z31, z29\n fmla z31.s, p2/M, z0.s, z6.s\n"
- "ldr x20, [x16, #0x50]\n"
- "ld1w { z22.s }, p3/Z, [x20, x13, LSL #2]\n"
- "movprfx z5, z29\n fmla z5.s, p2/M, z0.s, z7.s\n"
- "fmla z29.s, p2/M, z0.s, z8.s\n"
- "ldr x20, [x16, #0x58]\n"
- "ldr x21, [x16, #0x60]\n"
- "fmla z30.s, p2/M, z1.s, z6.s\n"
- "fmla z31.s, p2/M, z1.s, z9.s\n"
- "ld1w { z6.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x20, [x16, #0x68]\n"
- "fmla z5.s, p2/M, z1.s, z8.s\n"
- "fmla z29.s, p2/M, z1.s, z13.s\n"
- "ld1w { z20.s }, p2/Z, [x9]\n"
- "ldr x23, [x16, #0x70]\n"
- "fmla z30.s, p2/M, z2.s, z9.s\n"
- "fmla z31.s, p2/M, z2.s, z11.s\n"
- "ld1w { z16.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ld1w { z19.s }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z5.s, p2/M, z2.s, z13.s\n"
- "fmla z29.s, p2/M, z2.s, z22.s\n"
- "ldr x21, [x16, #0x78]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.s, p2/M, z3.s, z11.s\n"
- "fmla z31.s, p2/M, z3.s, z12.s\n"
- "ld1w { z1.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x22, [x16, #0x80]\n"
- "fmla z5.s, p2/M, z3.s, z22.s\n"
- "fmla z29.s, p2/M, z3.s, z6.s\n"
- "ld1w { z17.s }, p2/Z, [x9, #3, MUL VL]\n"
- "ldr x20, [x16, #0x88]\n"
- "fmla z30.s, p2/M, z4.s, z12.s\n"
- "fmla z31.s, p2/M, z4.s, z16.s\n"
- "ld1w { z0.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ld1w { z27.s }, p3/Z, [x21, x13, LSL #2]\n"
- "fmla z5.s, p2/M, z4.s, z6.s\n"
- "fmla z29.s, p2/M, z4.s, z10.s\n"
- "ld1w { z16.s }, p2/Z, [x9, #4, MUL VL]\n"
- "ldr x21, [x16, #0x90]\n"
- "fmla z30.s, p2/M, z20.s, z7.s\n"
- "fmla z31.s, p2/M, z20.s, z8.s\n"
- "ldr x27, [x16, #0x98]\n"
- "ldr x26, [x16, #0xa0]\n"
- "fmla z5.s, p2/M, z20.s, z14.s\n"
- "fmla z29.s, p2/M, z20.s, z1.s\n"
- "ld1w { z21.s }, p2/Z, [x9, #5, MUL VL]\n"
- "ldr x25, [x16, #0xa8]\n"
- "fmla z30.s, p2/M, z19.s, z8.s\n"
- "fmla z31.s, p2/M, z19.s, z13.s\n"
- "ld1w { z26.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x24, [x16, #0xb0]\n"
- "fmla z5.s, p2/M, z19.s, z1.s\n"
- "fmla z29.s, p2/M, z19.s, z0.s\n"
- "ld1w { z25.s }, p2/Z, [x9, #6, MUL VL]\n"
- "ldr x20, [x16, #0xb8]\n"
- "fmla z30.s, p2/M, z18.s, z13.s\n"
- "fmla z31.s, p2/M, z18.s, z22.s\n"
- "ld1w { z24.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ldr x23, [x16, #0xc0]\n"
- "fmla z5.s, p2/M, z18.s, z0.s\n"
- "fmla z29.s, p2/M, z18.s, z27.s\n"
- "ld1w { z23.s }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "fmla z30.s, p2/M, z17.s, z22.s\n"
- "fmla z31.s, p2/M, z17.s, z6.s\n"
- "ld1w { z22.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ldr x22, [x16, #0xc8]\n"
- "fmla z5.s, p2/M, z17.s, z27.s\n"
- "fmla z29.s, p2/M, z17.s, z24.s\n"
- "ld1w { z20.s }, p2/Z, [x9, #-8, MUL VL]\n"
- "ldr x21, [x16, #0xd0]\n"
- "fmla z30.s, p2/M, z16.s, z6.s\n"
- "fmla z31.s, p2/M, z16.s, z10.s\n"
- "ld1w { z19.s }, p3/Z, [x27, x13, LSL #2]\n"
- "ld1w { z18.s }, p3/Z, [x26, x13, LSL #2]\n"
- "fmla z5.s, p2/M, z16.s, z24.s\n"
- "fmla z29.s, p2/M, z16.s, z26.s\n"
- "ld1w { z16.s }, p2/Z, [x9, #-7, MUL VL]\n"
- "ldr x27, [x16, #0xd8]\n"
- "fmla z30.s, p2/M, z21.s, z14.s\n"
+ "movprfx z16, z29\n fmla z16.s, p2/M, z0.s, z5.s\n"
+ "movprfx z15, z29\n fmla z15.s, p2/M, z0.s, z6.s\n"
+ "ldr x22, [x17, #0x50]\n"
+ "ldr x21, [x17, #0x58]\n"
+ "movprfx z31, z29\n fmla z31.s, p2/M, z0.s, z7.s\n"
+ "movprfx z5, z29\n fmla z5.s, p2/M, z0.s, z8.s\n"
+ "ldr x20, [x17, #0x60]\n"
+ "ldr x25, [x17, #0x68]\n"
+ "ld1w { z25.s }, p2/Z, [x15]\n"
+ "ldr x24, [x17, #0x70]\n"
+ "incw x9\n"
+ "mov p0.b, p3.b\n"
+ "ld1w { z24.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "ldr x23, [x17, #0x78]\n"
+ "fmla z16.s, p2/M, z1.s, z6.s\n"
+ "fmla z15.s, p2/M, z1.s, z9.s\n"
+ "ld1w { z23.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ldr x27, [x17, #0x80]\n"
+ "fmla z31.s, p2/M, z1.s, z8.s\n"
+ "fmla z5.s, p2/M, z1.s, z13.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, #1, MUL VL]\n"
+ "ldr x22, [x17, #0x88]\n"
+ "ldr x21, [x17, #0x90]\n"
+ "ldr x26, [x17, #0x98]\n"
+ "fmla z16.s, p2/M, z2.s, z9.s\n"
+ "fmla z15.s, p2/M, z2.s, z11.s\n"
+ "ld1w { z18.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "ldr x20, [x17, #0xa0]\n"
+ "fmla z31.s, p2/M, z2.s, z13.s\n"
+ "fmla z5.s, p2/M, z2.s, z24.s\n"
+ "ld1w { z22.s }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z16.s, p2/M, z3.s, z11.s\n"
+ "ld1w { z1.s }, p3/Z, [x25, x16, LSL #2]\n"
+ "ldr x25, [x17, #0xa8]\n"
+ "fmla z15.s, p2/M, z3.s, z12.s\n"
+ "fmla z31.s, p2/M, z3.s, z24.s\n"
+ "fmla z5.s, p2/M, z3.s, z23.s\n"
+ "ld1w { z21.s }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z16.s, p2/M, z4.s, z12.s\n"
+ "ld1w { z0.s }, p3/Z, [x24, x16, LSL #2]\n"
+ "ldr x24, [x17, #0xb0]\n"
+ "fmla z15.s, p2/M, z4.s, z18.s\n"
+ "ld1w { z29.s }, p3/Z, [x23, x16, LSL #2]\n"
+ "ldr x23, [x17, #0xb8]\n"
+ "fmla z31.s, p2/M, z4.s, z23.s\n"
+ "fmla z5.s, p2/M, z4.s, z10.s\n"
+ "ld1w { z19.s }, p2/Z, [x15, #4, MUL VL]\n"
+ "fmla z16.s, p2/M, z25.s, z7.s\n"
+ "fmla z15.s, p2/M, z25.s, z8.s\n"
+ "fmla z31.s, p2/M, z25.s, z14.s\n"
+ "fmla z5.s, p2/M, z25.s, z1.s\n"
+ "ld1w { z18.s }, p2/Z, [x15, #5, MUL VL]\n"
+ "fmla z16.s, p2/M, z20.s, z8.s\n"
+ "ld1w { z28.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "ldr x28, [x17, #0xc8]\n"
+ "fmla z15.s, p2/M, z20.s, z13.s\n"
+ "fmla z31.s, p2/M, z20.s, z1.s\n"
+ "fmla z5.s, p2/M, z20.s, z0.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, #6, MUL VL]\n"
+ "fmla z16.s, p2/M, z22.s, z13.s\n"
+ "ld1w { z27.s }, p3/Z, [x27, x16, LSL #2]\n"
+ "ldr x22, [x17, #0xc0]\n"
+ "fmla z15.s, p2/M, z22.s, z24.s\n"
+ "fmla z31.s, p2/M, z22.s, z0.s\n"
+ "fmla z5.s, p2/M, z22.s, z29.s\n"
+ "ld1w { z26.s }, p2/Z, [x15, #7, MUL VL]\n"
+ "addvl x15, x15, #16\n"
+ "fmla z16.s, p2/M, z21.s, z24.s\n"
+ "ld1w { z25.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ldr x21, [x17, #0xd0]\n"
+ "fmla z15.s, p2/M, z21.s, z23.s\n"
+ "fmla z31.s, p2/M, z21.s, z29.s\n"
+ "fmla z5.s, p2/M, z21.s, z27.s\n"
+ "ld1w { z24.s }, p2/Z, [x15, #-8, MUL VL]\n"
+ "fmla z16.s, p2/M, z19.s, z23.s\n"
+ "ld1w { z23.s }, p3/Z, [x26, x16, LSL #2]\n"
+ "ldr x27, [x17, #0xd8]\n"
+ "fmla z15.s, p2/M, z19.s, z10.s\n"
+ "ld1w { z22.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "ldr x20, [x17, #0xe0]\n"
+ "fmla z31.s, p2/M, z19.s, z27.s\n"
+ "fmla z5.s, p2/M, z19.s, z28.s\n"
+ "ld1w { z19.s }, p2/Z, [x15, #-7, MUL VL]\n"
+ "fmla z16.s, p2/M, z18.s, z14.s\n"
+ "ld1w { z2.s }, p3/Z, [x23, x16, LSL #2]\n"
+ "ldr x26, [x17, #0xf8]\n"
+ "fmla z15.s, p2/M, z18.s, z1.s\n"
+ "fmla z31.s, p2/M, z18.s, z25.s\n"
+ "fmla z5.s, p2/M, z18.s, z23.s\n"
+ "ld1w { z21.s }, p2/Z, [x15, #-6, MUL VL]\n"
+ "fmla z16.s, p2/M, z20.s, z1.s\n"
+ "ld1w { z18.s }, p3/Z, [x25, x16, LSL #2]\n"
+ "ldr x25, [x17, #0xe8]\n"
+ "fmla z15.s, p2/M, z20.s, z0.s\n"
+ "fmla z31.s, p2/M, z20.s, z23.s\n"
+ "fmla z5.s, p2/M, z20.s, z22.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, #-5, MUL VL]\n"
+ "fmla z16.s, p2/M, z26.s, z0.s\n"
+ "ld1w { z9.s }, p3/Z, [x24, x16, LSL #2]\n"
+ "ldr x24, [x17, #0xf0]\n"
+ "fmla z15.s, p2/M, z26.s, z29.s\n"
+ "fmla z31.s, p2/M, z26.s, z22.s\n"
+ "fmla z5.s, p2/M, z26.s, z18.s\n"
+ "ld1w { z4.s }, p2/Z, [x15, #-4, MUL VL]\n"
+ "fmla z16.s, p2/M, z24.s, z29.s\n"
+ "ld1w { z1.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "ldr x23, [x17, #0x100]\n"
+ "fmla z15.s, p2/M, z24.s, z27.s\n"
+ "fmla z31.s, p2/M, z24.s, z18.s\n"
+ "fmla z5.s, p2/M, z24.s, z9.s\n"
+ "ld1w { z3.s }, p2/Z, [x15, #-3, MUL VL]\n"
+ "fmla z16.s, p2/M, z19.s, z27.s\n"
+ "ld1w { z0.s }, p3/Z, [x28, x16, LSL #2]\n"
+ "ldr x22, [x17, #0x108]\n"
+ "fmla z15.s, p2/M, z19.s, z28.s\n"
+ "ld1w { z29.s }, p3/Z, [x20, x16, LSL #2]\n"
+ "fmla z31.s, p2/M, z19.s, z9.s\n"
+ "fmla z5.s, p2/M, z19.s, z2.s\n"
+ "ld1w { z19.s }, p2/Z, [x15, #-2, MUL VL]\n"
+ "fmla z16.s, p2/M, z21.s, z25.s\n"
+ "ld1w { z28.s }, p3/Z, [x21, x16, LSL #2]\n"
+ "ldr x21, [x17, #0x110]\n"
+ "fmla z15.s, p2/M, z21.s, z23.s\n"
"fmla z31.s, p2/M, z21.s, z1.s\n"
- "ld1w { z17.s }, p3/Z, [x20, x13, LSL #2]\n"
- "ldr x20, [x16, #0xe0]\n"
- "fmla z5.s, p2/M, z21.s, z22.s\n"
- "fmla z29.s, p2/M, z21.s, z19.s\n"
- "ld1w { z21.s }, p2/Z, [x9, #-6, MUL VL]\n"
- "ldr x26, [x16, #0xf8]\n"
- "fmla z30.s, p2/M, z25.s, z1.s\n"
- "fmla z31.s, p2/M, z25.s, z0.s\n"
- "ld1w { z9.s }, p3/Z, [x25, x13, LSL #2]\n"
- "ldr x25, [x16, #0xe8]\n"
+ "fmla z5.s, p2/M, z21.s, z0.s\n"
+ "ld1w { z27.s }, p2/Z, [x15, #-1, MUL VL]\n"
+ "fmla z16.s, p2/M, z20.s, z23.s\n"
+ "ld1w { z26.s }, p3/Z, [x27, x16, LSL #2]\n"
+ "ldr x20, [x17, #0x118]\n"
+ "fmla z15.s, p2/M, z20.s, z22.s\n"
+ "fmla z31.s, p2/M, z20.s, z0.s\n"
+ "fmla z5.s, p2/M, z20.s, z28.s\n"
+ "ld1w { z25.s }, p2/Z, [x15]\n"
+ "fmla z16.s, p2/M, z4.s, z22.s\n"
+ "ld1w { z24.s }, p3/Z, [x25, x16, LSL #2]\n"
+ "fmla z15.s, p2/M, z4.s, z18.s\n"
+ "fmla z31.s, p2/M, z4.s, z28.s\n"
+ "fmla z5.s, p2/M, z4.s, z26.s\n"
+ "ld1w { z23.s }, p2/Z, [x15, #1, MUL VL]\n"
+ "fmla z16.s, p2/M, z3.s, z18.s\n"
+ "ld1w { z18.s }, p3/Z, [x24, x16, LSL #2]\n"
+ "fmla z15.s, p2/M, z3.s, z9.s\n"
+ "fmla z31.s, p2/M, z3.s, z26.s\n"
+ "fmla z5.s, p2/M, z3.s, z29.s\n"
+ "ld1w { z22.s }, p2/Z, [x15, #2, MUL VL]\n"
+ "fmla z16.s, p2/M, z19.s, z9.s\n"
+ "ld1w { z21.s }, p3/Z, [x26, x16, LSL #2]\n"
+ "fmla z15.s, p2/M, z19.s, z2.s\n"
+ "fmla z31.s, p2/M, z19.s, z29.s\n"
+ "fmla z5.s, p2/M, z19.s, z24.s\n"
+ "ld1w { z20.s }, p2/Z, [x15, #3, MUL VL]\n"
+ "fmla z16.s, p2/M, z27.s, z1.s\n"
+ "ld1w { z19.s }, p3/Z, [x23, x16, LSL #2]\n"
+ "fmla z15.s, p2/M, z27.s, z0.s\n"
+ "fmla z31.s, p2/M, z27.s, z18.s\n"
+ "ld1w { z18.s }, p3/Z, [x22, x16, LSL #2]\n"
+ "fmla z5.s, p2/M, z27.s, z21.s\n"
+ "fmla z16.s, p2/M, z25.s, z0.s\n"
+ "fmla z15.s, p2/M, z25.s, z28.s\n"
+ "fmla z31.s, p2/M, z25.s, z21.s\n"
+ "ld1w { z21.s }, p3/Z, [x21, x16, LSL #2]\n"
"fmla z5.s, p2/M, z25.s, z19.s\n"
- "fmla z29.s, p2/M, z25.s, z18.s\n"
- "ld1w { z4.s }, p2/Z, [x9, #-5, MUL VL]\n"
- "incw x28\n"
- "fmla z30.s, p2/M, z23.s, z0.s\n"
- "fmla z31.s, p2/M, z23.s, z27.s\n"
- "ld1w { z8.s }, p3/Z, [x24, x13, LSL #2]\n"
- "ldr x24, [x16, #0xf0]\n"
+ "fmla z16.s, p2/M, z23.s, z28.s\n"
+ "fmla z15.s, p2/M, z23.s, z26.s\n"
+ "fmla z31.s, p2/M, z23.s, z19.s\n"
+ "ld1w { z12.s }, p3/Z, [x20, x16, LSL #2]\n"
"fmla z5.s, p2/M, z23.s, z18.s\n"
- "fmla z29.s, p2/M, z23.s, z9.s\n"
- "ld1w { z6.s }, p2/Z, [x9, #-4, MUL VL]\n"
- "mov p0.b, p3.b\n"
- "fmla z30.s, p2/M, z20.s, z27.s\n"
- "fmla z31.s, p2/M, z20.s, z24.s\n"
- "ld1w { z10.s }, p3/Z, [x23, x13, LSL #2]\n"
- "ldr x23, [x16, #0x100]\n"
- "fmla z5.s, p2/M, z20.s, z9.s\n"
- "fmla z29.s, p2/M, z20.s, z8.s\n"
- "ld1w { z11.s }, p2/Z, [x9, #-3, MUL VL]\n"
- "fmla z30.s, p2/M, z16.s, z24.s\n"
- "fmla z31.s, p2/M, z16.s, z26.s\n"
- "ld1w { z0.s }, p3/Z, [x22, x13, LSL #2]\n"
- "ld1w { z27.s }, p3/Z, [x20, x13, LSL #2]\n"
- "fmla z5.s, p2/M, z16.s, z8.s\n"
- "fmla z29.s, p2/M, z16.s, z17.s\n"
- "ld1w { z16.s }, p2/Z, [x9, #-2, MUL VL]\n"
- "ldr x22, [x16, #0x108]\n"
- "fmla z30.s, p2/M, z21.s, z22.s\n"
- "fmla z31.s, p2/M, z21.s, z19.s\n"
- "ld1w { z26.s }, p3/Z, [x21, x13, LSL #2]\n"
- "ldr x21, [x16, #0x110]\n"
- "fmla z5.s, p2/M, z21.s, z10.s\n"
- "fmla z29.s, p2/M, z21.s, z0.s\n"
- "ld1w { z25.s }, p2/Z, [x9, #-1, MUL VL]\n"
- "fmla z30.s, p2/M, z4.s, z19.s\n"
- "fmla z31.s, p2/M, z4.s, z18.s\n"
- "ld1w { z24.s }, p3/Z, [x27, x13, LSL #2]\n"
- "ldr x20, [x16, #0x118]\n"
- "fmla z5.s, p2/M, z4.s, z0.s\n"
- "fmla z29.s, p2/M, z4.s, z26.s\n"
- "ld1w { z23.s }, p2/Z, [x9]\n"
- "fmla z30.s, p2/M, z6.s, z18.s\n"
- "fmla z31.s, p2/M, z6.s, z9.s\n"
- "ld1w { z22.s }, p3/Z, [x25, x13, LSL #2]\n"
- "fmla z5.s, p2/M, z6.s, z26.s\n"
- "fmla z29.s, p2/M, z6.s, z24.s\n"
- "ld1w { z21.s }, p2/Z, [x9, #1, MUL VL]\n"
- "fmla z30.s, p2/M, z11.s, z9.s\n"
- "fmla z31.s, p2/M, z11.s, z8.s\n"
- "ld1w { z18.s }, p3/Z, [x24, x13, LSL #2]\n"
- "fmla z5.s, p2/M, z11.s, z24.s\n"
- "fmla z29.s, p2/M, z11.s, z27.s\n"
- "ld1w { z20.s }, p2/Z, [x9, #2, MUL VL]\n"
- "fmla z30.s, p2/M, z16.s, z8.s\n"
- "fmla z31.s, p2/M, z16.s, z17.s\n"
- "ld1w { z17.s }, p3/Z, [x26, x13, LSL #2]\n"
- "fmla z5.s, p2/M, z16.s, z27.s\n"
- "fmla z29.s, p2/M, z16.s, z22.s\n"
- "ld1w { z19.s }, p2/Z, [x9, #3, MUL VL]\n"
- "fmla z30.s, p2/M, z25.s, z10.s\n"
- "fmla z31.s, p2/M, z25.s, z0.s\n"
- "ld1w { z16.s }, p3/Z, [x23, x13, LSL #2]\n"
- "fmla z5.s, p2/M, z25.s, z18.s\n"
- "fmla z29.s, p2/M, z25.s, z17.s\n"
- "ld1w { z18.s }, p3/Z, [x22, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z23.s, z0.s\n"
- "fmla z31.s, p2/M, z23.s, z26.s\n"
- "fmla z5.s, p2/M, z23.s, z17.s\n"
- "fmla z29.s, p2/M, z23.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x21, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z21.s, z26.s\n"
- "fmla z31.s, p2/M, z21.s, z24.s\n"
- "fmla z5.s, p2/M, z21.s, z16.s\n"
- "fmla z29.s, p2/M, z21.s, z18.s\n"
- "ld1w { z16.s }, p3/Z, [x20, x13, LSL #2]\n"
- "fmla z30.s, p2/M, z20.s, z24.s\n"
- "fmla z31.s, p2/M, z20.s, z27.s\n"
- "fmla z5.s, p2/M, z20.s, z18.s\n"
- "fmla z29.s, p2/M, z20.s, z17.s\n"
- "fmla z30.s, p2/M, z19.s, z27.s\n"
- "fmla z31.s, p2/M, z19.s, z22.s\n"
- "fmax z30.s, p2/M, z30.s, z15.s\n"
- "fmax z31.s, p2/M, z31.s, z15.s\n"
- "fmla z5.s, p2/M, z19.s, z17.s\n"
- "fmla z29.s, p2/M, z19.s, z16.s\n"
- "fmax z5.s, p2/M, z5.s, z15.s\n"
- "fmax z29.s, p2/M, z29.s, z15.s\n"
- "fmin z30.s, p2/M, z30.s, z28.s\n"
- "fmin z31.s, p2/M, z31.s, z28.s\n"
- "st1w { z30.s }, p0, [x15, x28, LSL #2]\n"
- "fmin z5.s, p2/M, z5.s, z28.s\n"
- "fmin z29.s, p2/M, z29.s, z28.s\n"
- "st1w { z31.s }, p0, [x14, x28, LSL #2]\n"
- "st1w { z5.s }, p0, [x12, x28, LSL #2]\n"
- "st1w { z29.s }, p0, [x11, x28, LSL #2]\n"
+ "fmla z16.s, p2/M, z22.s, z26.s\n"
+ "fmla z15.s, p2/M, z22.s, z29.s\n"
+ "fmla z31.s, p2/M, z22.s, z18.s\n"
+ "fmla z5.s, p2/M, z22.s, z21.s\n"
+ "fmla z16.s, p2/M, z20.s, z29.s\n"
+ "fmla z15.s, p2/M, z20.s, z24.s\n"
+ "fmla z31.s, p2/M, z20.s, z21.s\n"
+ "fmla z5.s, p2/M, z20.s, z12.s\n"
+ "fmax z16.s, p2/M, z16.s, z17.s\n"
+ "fmax z15.s, p2/M, z15.s, z17.s\n"
+ "fmax z31.s, p2/M, z31.s, z17.s\n"
+ "fmin z16.s, p2/M, z16.s, z30.s\n"
+ "fmin z15.s, p2/M, z15.s, z30.s\n"
+ "fmax z5.s, p2/M, z5.s, z17.s\n"
+ "fmin z31.s, p2/M, z31.s, z30.s\n"
+ "st1w { z16.s }, p0, [x13, x9, LSL #2]\n"
+ "fmin z5.s, p2/M, z5.s, z30.s\n"
+ "st1w { z15.s }, p0, [x12, x9, LSL #2]\n"
+ "st1w { z31.s }, p0, [x11, x9, LSL #2]\n"
+ "st1w { z5.s }, p0, [x10, x9, LSL #2]\n"
:
: [n_channels] "r" ((unsigned long) n_channels), [offsetof_Args_inptrs] "I" (offsetof(Args, inptrs)), [offsetof_args_max] "I" (offsetof(Args, max)), [offsetof_args_min] "I" (offsetof(Args, min)), [offsetof_args_outptrs] "I" (offsetof(Args, outptrs)), [offsetof_args_params] "I" (offsetof(Args, params)), [params_struct] "r" (&params_struct)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
index d53daaa8a0..b5e2ef92f7 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_nhwc_generic_output9_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,118 +45,118 @@ void sve_fp32_nhwc_generic_output9_mla_depthfirst_impl(
__asm__ __volatile__(
"ptrue p1.b\n"
- "mov x11, #0x0\n"
+ "mov x9, #0x0\n"
"ld1rw { z2.s }, p1/Z, [%x[minmax_vals]]\n"
"ld1rw { z1.s }, p1/Z, [%x[minmax_vals], #4]\n"
- "whilelt p0.s, x11, %x[n_channels]\n"
+ "whilelt p0.s, x9, %x[n_channels]\n"
"1:" // Channel loop
"mov z23.b, #0x0\n"
"cbz %x[bias], 2f\n"
- "ld1w { z23.s }, p0/Z, [%x[bias], x11, LSL #2]\n"
+ "ld1w { z23.s }, p0/Z, [%x[bias], x9, LSL #2]\n"
"2:" // Channel loop: Load bias: Done
- "mov x10, %x[inptrs]\n"
- "ldp x28, x27, [x10], #0x10\n"
- "ldp x26, x25, [x10], #0x10\n"
- "subs x9, %x[n_points], #0x1\n"
- "ldp x24, x23, [x10], #0x10\n"
- "ldp x22, x21, [x10], #0x10\n"
+ "mov x25, %x[inptrs]\n"
+ "subs x24, %x[n_points], #0x1\n"
"mov z24.d, z23.d\n"
"mov z25.d, z23.d\n"
- "ldr x20, [x10], #0x8\n"
"mov z26.d, z23.d\n"
"mov z27.d, z23.d\n"
"ld1w { z0.s }, p1/Z, [%x[params]]\n"
+ "addvl %x[params], %x[params], #1\n"
"mov z28.d, z23.d\n"
"mov z29.d, z23.d\n"
- "ld1w { z14.s }, p0/Z, [x28, x11, LSL #2]\n"
- "ld1w { z15.s }, p0/Z, [x27, x11, LSL #2]\n"
+ "ldp x23, x20, [x25], #0x10\n"
"mov z30.d, z23.d\n"
"mov z31.d, z23.d\n"
- "ld1w { z16.s }, p0/Z, [x26, x11, LSL #2]\n"
- "ld1w { z17.s }, p0/Z, [x25, x11, LSL #2]\n"
- "ld1w { z18.s }, p0/Z, [x24, x11, LSL #2]\n"
- "ld1w { z19.s }, p0/Z, [x23, x11, LSL #2]\n"
- "addvl %x[params], %x[params], #1\n"
- "ld1w { z20.s }, p0/Z, [x22, x11, LSL #2]\n"
- "ld1w { z21.s }, p0/Z, [x21, x11, LSL #2]\n"
- "ld1w { z22.s }, p0/Z, [x20, x11, LSL #2]\n"
+ "ldp x22, x21, [x25], #0x10\n"
+ "ld1w { z14.s }, p0/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z15.s }, p0/Z, [x20, x9, LSL #2]\n"
+ "ldp x23, x20, [x25], #0x10\n"
+ "ld1w { z16.s }, p0/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z17.s }, p0/Z, [x21, x9, LSL #2]\n"
+ "ldp x22, x21, [x25], #0x10\n"
+ "ld1w { z18.s }, p0/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z19.s }, p0/Z, [x20, x9, LSL #2]\n"
+ "ldr x20, [x25], #0x8\n"
+ "ld1w { z20.s }, p0/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z21.s }, p0/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z22.s }, p0/Z, [x20, x9, LSL #2]\n"
"ble 4f\n"
"3:" // Channel loop: Planar loop
- "ldp x28, x27, [x10], #0x10\n"
- "ldp x26, x25, [x10], #0x10\n"
- "subs x9, x9, #0x1\n"
+ "ldp x23, x20, [x25], #0x10\n"
+ "subs x24, x24, #0x1\n"
"fmla z23.s, p1/M, z14.s, z0.s\n"
- "ldp x24, x23, [x10], #0x10\n"
- "ldp x22, x21, [x10], #0x10\n"
"fmla z24.s, p1/M, z15.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z0.s\n"
- "ldr x20, [x10], #0x8\n"
"fmla z26.s, p1/M, z17.s, z0.s\n"
"fmla z27.s, p1/M, z18.s, z0.s\n"
- "ld1w { z14.s }, p0/Z, [x28, x11, LSL #2]\n"
"fmla z28.s, p1/M, z19.s, z0.s\n"
+ "ldp x22, x21, [x25], #0x10\n"
"fmla z29.s, p1/M, z20.s, z0.s\n"
- "ld1w { z15.s }, p0/Z, [x27, x11, LSL #2]\n"
- "ld1w { z16.s }, p0/Z, [x26, x11, LSL #2]\n"
"fmla z30.s, p1/M, z21.s, z0.s\n"
+ "ld1w { z14.s }, p0/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z15.s }, p0/Z, [x20, x9, LSL #2]\n"
"fmla z31.s, p1/M, z22.s, z0.s\n"
"ld1w { z0.s }, p1/Z, [%x[params]]\n"
- "ld1w { z17.s }, p0/Z, [x25, x11, LSL #2]\n"
- "ld1w { z18.s }, p0/Z, [x24, x11, LSL #2]\n"
- "ld1w { z19.s }, p0/Z, [x23, x11, LSL #2]\n"
"addvl %x[params], %x[params], #1\n"
- "ld1w { z20.s }, p0/Z, [x22, x11, LSL #2]\n"
- "ld1w { z21.s }, p0/Z, [x21, x11, LSL #2]\n"
- "ld1w { z22.s }, p0/Z, [x20, x11, LSL #2]\n"
+ "ldp x23, x20, [x25], #0x10\n"
+ "ld1w { z16.s }, p0/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z17.s }, p0/Z, [x21, x9, LSL #2]\n"
+ "ldp x22, x21, [x25], #0x10\n"
+ "ld1w { z18.s }, p0/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z19.s }, p0/Z, [x20, x9, LSL #2]\n"
+ "ldr x20, [x25], #0x8\n"
+ "ld1w { z20.s }, p0/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z21.s }, p0/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z22.s }, p0/Z, [x20, x9, LSL #2]\n"
"bgt 3b\n"
"4:" // Channel loop: Planar tail
"fmla z23.s, p1/M, z14.s, z0.s\n"
"fmla z24.s, p1/M, z15.s, z0.s\n"
- "fmax z23.s, p1/M, z23.s, z2.s\n"
- "fmax z24.s, p1/M, z24.s, z2.s\n"
+ "ldp x28, x27, [%x[outptrs], #0x0]\n"
+ "ldp x26, x25, [%x[outptrs], #0x10]\n"
"fmla z25.s, p1/M, z16.s, z0.s\n"
"fmla z26.s, p1/M, z17.s, z0.s\n"
- "fmax z25.s, p1/M, z25.s, z2.s\n"
- "fmax z26.s, p1/M, z26.s, z2.s\n"
+ "ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmla z27.s, p1/M, z18.s, z0.s\n"
"fmla z28.s, p1/M, z19.s, z0.s\n"
- "fmax z27.s, p1/M, z27.s, z2.s\n"
- "fmax z28.s, p1/M, z28.s, z2.s\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
"fmla z29.s, p1/M, z20.s, z0.s\n"
"fmla z30.s, p1/M, z21.s, z0.s\n"
+ "fmla z31.s, p1/M, z22.s, z0.s\n"
+ "fmax z23.s, p1/M, z23.s, z2.s\n"
+ "fmax z24.s, p1/M, z24.s, z2.s\n"
+ "fmax z25.s, p1/M, z25.s, z2.s\n"
+ "fmax z26.s, p1/M, z26.s, z2.s\n"
+ "fmax z27.s, p1/M, z27.s, z2.s\n"
+ "fmax z28.s, p1/M, z28.s, z2.s\n"
"fmax z29.s, p1/M, z29.s, z2.s\n"
"fmax z30.s, p1/M, z30.s, z2.s\n"
- "fmla z31.s, p1/M, z22.s, z0.s\n"
"fmax z31.s, p1/M, z31.s, z2.s\n"
- "ldp x28, x27, [%x[outptrs], #0x0]\n"
- "ldp x26, x25, [%x[outptrs], #0x10]\n"
- "ldp x24, x23, [%x[outptrs], #0x20]\n"
- "ldp x22, x21, [%x[outptrs], #0x30]\n"
"fmin z23.s, p1/M, z23.s, z1.s\n"
"fmin z24.s, p1/M, z24.s, z1.s\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
"fmin z25.s, p1/M, z25.s, z1.s\n"
"fmin z26.s, p1/M, z26.s, z1.s\n"
- "st1w { z23.s }, p0, [x28, x11, LSL #2]\n"
"fmin z27.s, p1/M, z27.s, z1.s\n"
"fmin z28.s, p1/M, z28.s, z1.s\n"
- "st1w { z24.s }, p0, [x27, x11, LSL #2]\n"
"fmin z29.s, p1/M, z29.s, z1.s\n"
+ "st1w { z23.s }, p0, [x28, x9, LSL #2]\n"
"fmin z30.s, p1/M, z30.s, z1.s\n"
- "st1w { z25.s }, p0, [x26, x11, LSL #2]\n"
"fmin z31.s, p1/M, z31.s, z1.s\n"
- "st1w { z26.s }, p0, [x25, x11, LSL #2]\n"
- "st1w { z27.s }, p0, [x24, x11, LSL #2]\n"
- "st1w { z28.s }, p0, [x23, x11, LSL #2]\n"
- "st1w { z29.s }, p0, [x22, x11, LSL #2]\n"
- "st1w { z30.s }, p0, [x21, x11, LSL #2]\n"
- "st1w { z31.s }, p0, [x20, x11, LSL #2]\n"
- "incw x11\n"
- "whilelt p0.s, x11, %x[n_channels]\n"
+ "st1w { z24.s }, p0, [x27, x9, LSL #2]\n"
+ "st1w { z25.s }, p0, [x26, x9, LSL #2]\n"
+ "st1w { z26.s }, p0, [x25, x9, LSL #2]\n"
+ "st1w { z27.s }, p0, [x24, x9, LSL #2]\n"
+ "st1w { z28.s }, p0, [x23, x9, LSL #2]\n"
+ "st1w { z29.s }, p0, [x22, x9, LSL #2]\n"
+ "st1w { z30.s }, p0, [x21, x9, LSL #2]\n"
+ "st1w { z31.s }, p0, [x20, x9, LSL #2]\n"
+ "incw x9\n"
+ "whilelt p0.s, x9, %x[n_channels]\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [minmax_vals] "r" (minmax_vals), [n_channels] "r" ((uint64_t) n_channels), [n_points] "r" ((uint64_t) n_points), [outptrs] "r" (outptrs)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
index 3a71baaf61..4676465037 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,49 +43,49 @@ void sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
__asm__ __volatile__(
"mov x17, #0x0\n"
- "whilelt p2.s, x17, %x[channel_multiplier]\n"
+ "ptrue p2.b\n"
"ldr x16, [%x[inptrs], #0x0]\n"
"ldr x15, [%x[inptrs], #0x8]\n"
- "ptrue p1.b\n"
"ldr x14, [%x[inptrs], #0x10]\n"
"ldr x13, [%x[inptrs], #0x18]\n"
"mov x12, #0x0\n"
"ldr x11, [%x[inptrs], #0x20]\n"
"ldr x10, [%x[inptrs], #0x28]\n"
+ "whilelt p1.s, x17, %x[channel_multiplier]\n"
"ldr x9, [%x[inptrs], #0x30]\n"
- "ld1w { z24.s }, p2/Z, [%x[params]]\n"
- "mov z21.d, z24.d\n"
- "mov z25.d, z24.d\n"
"ldp x28, x27, [%x[outptrs], #0x0]\n"
"ldp x26, x25, [%x[outptrs], #0x10]\n"
- "mov z27.d, z24.d\n"
- "mov z26.d, z24.d\n"
"ldp x24, x23, [%x[outptrs], #0x20]\n"
+ "ld1w { z24.s }, p1/Z, [%x[params]]\n"
"ldp x22, x21, [%x[outptrs], #0x30]\n"
+ "ldr x20, [%x[outptrs], #0x40]\n"
+ "ld1rqw { z2.s }, p2/Z, [x16]\n"
+ "ld1rqw { z3.s }, p2/Z, [x16, #16]\n"
+ "ld1rqw { z4.s }, p2/Z, [x15]\n"
+ "ld1rqw { z5.s }, p2/Z, [x15, #16]\n"
+ "ld1rqw { z6.s }, p2/Z, [x14]\n"
+ "mov z21.d, z24.d\n"
+ "mov z25.d, z24.d\n"
+ "ld1rqw { z7.s }, p2/Z, [x14, #16]\n"
+ "ld1rqw { z8.s }, p2/Z, [x13]\n"
+ "mov z27.d, z24.d\n"
+ "mov z26.d, z24.d\n"
+ "ld1rqw { z9.s }, p2/Z, [x13, #16]\n"
+ "ld1rqw { z10.s }, p2/Z, [x11]\n"
"mov z28.d, z24.d\n"
+ "mov z16.d, z24.d\n"
+ "ld1rqw { z11.s }, p2/Z, [x11, #16]\n"
+ "ld1rqw { z12.s }, p2/Z, [x10]\n"
+ "mov z22.d, z24.d\n"
"mov z20.d, z24.d\n"
- "ldr x20, [%x[outptrs], #0x40]\n"
- "ld1rqw { z2.s }, p1/Z, [x16]\n"
- "mov z23.d, z24.d\n"
- "mov z19.d, z24.d\n"
- "ld1rqw { z3.s }, p1/Z, [x16, #16]\n"
- "ld1rqw { z4.s }, p1/Z, [x15]\n"
- "ld1rqw { z5.s }, p1/Z, [x15, #16]\n"
- "ld1rqw { z6.s }, p1/Z, [x14]\n"
- "ld1rqw { z7.s }, p1/Z, [x14, #16]\n"
- "ld1rqw { z8.s }, p1/Z, [x13]\n"
- "ld1rqw { z9.s }, p1/Z, [x13, #16]\n"
- "ld1rqw { z10.s }, p1/Z, [x11]\n"
- "ld1rqw { z11.s }, p1/Z, [x11, #16]\n"
- "ld1rqw { z12.s }, p1/Z, [x10]\n"
- "ld1rqw { z13.s }, p1/Z, [x10, #16]\n"
- "ld1rqw { z14.s }, p1/Z, [x9]\n"
- "ld1rqw { z15.s }, p1/Z, [x9, #16]\n"
- "ld1rw { z22.s }, p1/Z, [%x[clamps]]\n"
- "ld1rw { z16.s }, p1/Z, [%x[clamps], #4]\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "ld1rqw { z13.s }, p2/Z, [x10, #16]\n"
+ "ld1rqw { z14.s }, p2/Z, [x9]\n"
+ "ld1rqw { z15.s }, p2/Z, [x9, #16]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[clamps]]\n"
+ "ld1rw { z19.s }, p2/Z, [%x[clamps], #4]\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #3, MUL VL]\n"
"addvl %x[params], %x[params], #4\n"
"1:" // Output channel complete vector loop
"fmla z24.s, z31.s, z2.s[0]\n"
@@ -95,37 +95,37 @@ void sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"fmla z26.s, z31.s, z6.s[2]\n"
"fmla z28.s, z31.s, z7.s[0]\n"
"mov z0.d, z11.d\n"
- "mov p0.b, p2.b\n"
+ "mov p0.b, p1.b\n"
"fmla z21.s, z31.s, z2.s[2]\n"
"fmla z25.s, z31.s, z3.s[0]\n"
- "whilelt p2.s, x17, %x[channel_multiplier]\n"
- "fmla z20.s, z31.s, z1.s[0]\n"
- "fmla z23.s, z31.s, z1.s[2]\n"
- "fmla z19.s, z31.s, z0.s[0]\n"
+ "fmla z16.s, z31.s, z1.s[0]\n"
+ "fmla z22.s, z31.s, z1.s[2]\n"
+ "whilelt p1.s, x17, %x[channel_multiplier]\n"
+ "fmla z20.s, z31.s, z0.s[0]\n"
"fmla z24.s, z30.s, z2.s[1]\n"
- "ld1w { z18.s }, p1/Z, [%x[params]]\n"
+ "ld1w { z18.s }, p2/Z, [%x[params]]\n"
"fmla z27.s, z30.s, z6.s[1]\n"
"fmla z26.s, z30.s, z6.s[3]\n"
"fmla z28.s, z30.s, z7.s[1]\n"
"fmla z21.s, z30.s, z2.s[3]\n"
"fmla z25.s, z30.s, z3.s[1]\n"
- "fmla z20.s, z30.s, z1.s[1]\n"
- "fmla z23.s, z30.s, z1.s[3]\n"
- "fmla z19.s, z30.s, z0.s[1]\n"
- "ld1w { z17.s }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "fmla z16.s, z30.s, z1.s[1]\n"
+ "fmla z22.s, z30.s, z1.s[3]\n"
+ "fmla z20.s, z30.s, z0.s[1]\n"
+ "ld1w { z17.s }, p2/Z, [%x[params], #1, MUL VL]\n"
"fmla z24.s, z29.s, z2.s[2]\n"
"fmla z27.s, z29.s, z6.s[2]\n"
"fmla z26.s, z29.s, z7.s[0]\n"
"fmla z28.s, z29.s, z7.s[2]\n"
"fmla z21.s, z29.s, z3.s[0]\n"
"fmla z25.s, z29.s, z3.s[2]\n"
- "fmla z20.s, z29.s, z1.s[2]\n"
- "fmla z23.s, z29.s, z0.s[0]\n"
+ "fmla z16.s, z29.s, z1.s[2]\n"
+ "fmla z22.s, z29.s, z0.s[0]\n"
"mov z1.d, z8.d\n"
- "fmla z19.s, z29.s, z0.s[2]\n"
+ "fmla z20.s, z29.s, z0.s[2]\n"
"mov z0.d, z9.d\n"
"fmla z24.s, z18.s, z4.s[0]\n"
- "ld1w { z31.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"fmla z27.s, z18.s, z1.s[0]\n"
"fmla z26.s, z18.s, z1.s[2]\n"
"mov z1.d, z12.d\n"
@@ -133,40 +133,40 @@ void sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"mov z0.d, z13.d\n"
"fmla z21.s, z18.s, z4.s[2]\n"
"fmla z25.s, z18.s, z5.s[0]\n"
- "fmla z20.s, z18.s, z1.s[0]\n"
- "fmla z23.s, z18.s, z1.s[2]\n"
- "fmla z19.s, z18.s, z0.s[0]\n"
+ "fmla z16.s, z18.s, z1.s[0]\n"
+ "fmla z22.s, z18.s, z1.s[2]\n"
+ "fmla z20.s, z18.s, z0.s[0]\n"
"mov z1.d, z8.d\n"
- "ld1w { z18.s }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [%x[params], #3, MUL VL]\n"
"mov z0.d, z9.d\n"
"fmla z24.s, z17.s, z4.s[1]\n"
"fmla z27.s, z17.s, z1.s[1]\n"
"fmla z26.s, z17.s, z1.s[3]\n"
- "fmla z28.s, z17.s, z0.s[1]\n"
"mov z1.d, z12.d\n"
- "mov z0.d, z13.d\n"
"fmla z21.s, z17.s, z4.s[3]\n"
+ "fmla z28.s, z17.s, z0.s[1]\n"
+ "mov z0.d, z13.d\n"
"fmla z25.s, z17.s, z5.s[1]\n"
- "fmla z20.s, z17.s, z1.s[1]\n"
- "fmla z23.s, z17.s, z1.s[3]\n"
+ "fmla z16.s, z17.s, z1.s[1]\n"
+ "fmla z22.s, z17.s, z1.s[3]\n"
"mov z1.d, z8.d\n"
- "fmla z19.s, z17.s, z0.s[1]\n"
+ "fmla z20.s, z17.s, z0.s[1]\n"
"mov z0.d, z9.d\n"
"fmla z24.s, z31.s, z4.s[2]\n"
- "ld1w { z17.s }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "ld1w { z17.s }, p2/Z, [%x[params], #4, MUL VL]\n"
"fmla z27.s, z31.s, z1.s[2]\n"
- "fmla z26.s, z31.s, z0.s[0]\n"
"mov z1.d, z12.d\n"
+ "fmla z21.s, z31.s, z5.s[0]\n"
+ "fmla z26.s, z31.s, z0.s[0]\n"
"fmla z28.s, z31.s, z0.s[2]\n"
"mov z0.d, z13.d\n"
- "fmla z21.s, z31.s, z5.s[0]\n"
"fmla z25.s, z31.s, z5.s[2]\n"
- "fmla z20.s, z31.s, z1.s[2]\n"
+ "fmla z16.s, z31.s, z1.s[2]\n"
"mov z1.d, z10.d\n"
- "fmla z23.s, z31.s, z0.s[0]\n"
- "fmla z19.s, z31.s, z0.s[2]\n"
+ "fmla z22.s, z31.s, z0.s[0]\n"
+ "fmla z20.s, z31.s, z0.s[2]\n"
"mov z0.d, z11.d\n"
- "ld1w { z29.s }, p1/Z, [%x[params], #5, MUL VL]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #5, MUL VL]\n"
"fmla z24.s, z18.s, z6.s[0]\n"
"fmla z27.s, z18.s, z1.s[0]\n"
"fmla z26.s, z18.s, z1.s[2]\n"
@@ -175,13 +175,13 @@ void sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"mov z0.d, z15.d\n"
"fmla z21.s, z18.s, z6.s[2]\n"
"fmla z25.s, z18.s, z7.s[0]\n"
- "fmla z20.s, z18.s, z1.s[0]\n"
- "fmla z23.s, z18.s, z1.s[2]\n"
+ "fmla z16.s, z18.s, z1.s[0]\n"
+ "fmla z22.s, z18.s, z1.s[2]\n"
"mov z1.d, z10.d\n"
- "fmla z19.s, z18.s, z0.s[0]\n"
+ "fmla z20.s, z18.s, z0.s[0]\n"
"mov z0.d, z11.d\n"
"fmla z24.s, z17.s, z6.s[1]\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #7, MUL VL]\n"
"fmla z27.s, z17.s, z1.s[1]\n"
"fmla z26.s, z17.s, z1.s[3]\n"
"mov z1.d, z14.d\n"
@@ -189,63 +189,63 @@ void sve_fp32_packed_to_nhwc_3x3_s2_with_multiplier_output3x3_mla_depthfirst_imp
"mov z0.d, z15.d\n"
"fmla z21.s, z17.s, z6.s[3]\n"
"fmla z25.s, z17.s, z7.s[1]\n"
- "fmla z20.s, z17.s, z1.s[1]\n"
- "fmla z23.s, z17.s, z1.s[3]\n"
- "fmla z19.s, z17.s, z0.s[1]\n"
+ "fmla z16.s, z17.s, z1.s[1]\n"
+ "fmla z22.s, z17.s, z1.s[3]\n"
+ "fmla z20.s, z17.s, z0.s[1]\n"
"mov z1.d, z10.d\n"
"mov z0.d, z11.d\n"
"fmla z24.s, z29.s, z6.s[2]\n"
"fmla z27.s, z29.s, z1.s[2]\n"
- "fmin z24.s, p1/M, z24.s, z16.s\n"
+ "mov z1.d, z14.d\n"
+ "fmla z21.s, z29.s, z7.s[0]\n"
"fmla z26.s, z29.s, z0.s[0]\n"
"fmla z28.s, z29.s, z0.s[2]\n"
- "mov z1.d, z14.d\n"
- "fmax z24.s, p1/M, z24.s, z22.s\n"
"mov z0.d, z15.d\n"
- "fmla z21.s, z29.s, z7.s[0]\n"
"fmla z25.s, z29.s, z7.s[2]\n"
- "fmin z21.s, p1/M, z21.s, z16.s\n"
- "fmla z20.s, z29.s, z1.s[2]\n"
- "fmla z23.s, z29.s, z0.s[0]\n"
- "fmin z25.s, p1/M, z25.s, z16.s\n"
- "fmin z27.s, p1/M, z27.s, z16.s\n"
- "fmla z19.s, z29.s, z0.s[2]\n"
- "fmin z26.s, p1/M, z26.s, z16.s\n"
- "fmin z28.s, p1/M, z28.s, z16.s\n"
+ "fmla z16.s, z29.s, z1.s[2]\n"
+ "fmla z22.s, z29.s, z0.s[0]\n"
+ "fmla z20.s, z29.s, z0.s[2]\n"
+ "fmin z24.s, p2/M, z24.s, z19.s\n"
+ "fmin z27.s, p2/M, z27.s, z19.s\n"
+ "fmin z21.s, p2/M, z21.s, z19.s\n"
+ "fmin z26.s, p2/M, z26.s, z19.s\n"
+ "fmin z25.s, p2/M, z25.s, z19.s\n"
+ "fmin z28.s, p2/M, z28.s, z19.s\n"
+ "fmax z24.s, p2/M, z24.s, z23.s\n"
+ "fmin z16.s, p2/M, z16.s, z19.s\n"
+ "fmin z22.s, p2/M, z22.s, z19.s\n"
+ "fmin z20.s, p2/M, z20.s, z19.s\n"
+ "fmax z21.s, p2/M, z21.s, z23.s\n"
+ "fmax z25.s, p2/M, z25.s, z23.s\n"
"st1w { z24.s }, p0, [x28, x12, LSL #2]\n"
- "fmin z20.s, p1/M, z20.s, z16.s\n"
- "fmin z23.s, p1/M, z23.s, z16.s\n"
- "ld1w { z24.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "fmin z19.s, p1/M, z19.s, z16.s\n"
+ "ld1w { z24.s }, p1/Z, [%x[params], #6, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
- "fmax z21.s, p1/M, z21.s, z22.s\n"
- "fmax z25.s, p1/M, z25.s, z22.s\n"
+ "fmax z27.s, p2/M, z27.s, z23.s\n"
+ "fmax z26.s, p2/M, z26.s, z23.s\n"
+ "fmax z28.s, p2/M, z28.s, z23.s\n"
+ "fmax z16.s, p2/M, z16.s, z23.s\n"
+ "fmax z22.s, p2/M, z22.s, z23.s\n"
"st1w { z21.s }, p0, [x27, x12, LSL #2]\n"
"mov z21.d, z24.d\n"
- "fmax z27.s, p1/M, z27.s, z22.s\n"
- "fmax z26.s, p1/M, z26.s, z22.s\n"
+ "fmax z20.s, p2/M, z20.s, z23.s\n"
"st1w { z25.s }, p0, [x26, x12, LSL #2]\n"
"mov z25.d, z24.d\n"
- "fmax z28.s, p1/M, z28.s, z22.s\n"
- "fmax z20.s, p1/M, z20.s, z22.s\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
"st1w { z27.s }, p0, [x25, x12, LSL #2]\n"
"mov z27.d, z24.d\n"
- "fmax z23.s, p1/M, z23.s, z22.s\n"
- "fmax z19.s, p1/M, z19.s, z22.s\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #-7, MUL VL]\n"
+ "addvl %x[params], %x[params], #-6\n"
"st1w { z26.s }, p0, [x24, x12, LSL #2]\n"
"mov z26.d, z24.d\n"
"st1w { z28.s }, p0, [x23, x12, LSL #2]\n"
"mov z28.d, z24.d\n"
- "addvl %x[params], %x[params], #-6\n"
- "st1w { z20.s }, p0, [x22, x12, LSL #2]\n"
- "mov z20.d, z24.d\n"
- "st1w { z23.s }, p0, [x21, x12, LSL #2]\n"
- "mov z23.d, z24.d\n"
- "st1w { z19.s }, p0, [x20, x12, LSL #2]\n"
+ "st1w { z16.s }, p0, [x22, x12, LSL #2]\n"
+ "mov z16.d, z24.d\n"
+ "st1w { z22.s }, p0, [x21, x12, LSL #2]\n"
+ "mov z22.d, z24.d\n"
+ "st1w { z20.s }, p0, [x20, x12, LSL #2]\n"
"incw x12\n"
- "mov z19.d, z24.d\n"
+ "mov z20.d, z24.d\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
index 84ab4b5035..292fd70fba 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,46 +43,46 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
__asm__ __volatile__(
"mov x15, #0x0\n"
- "whilelt p2.s, x15, %x[channel_multiplier]\n"
+ "ptrue p2.b\n"
"ldr x14, [%x[inptrs], #0x0]\n"
"ldr x13, [%x[inptrs], #0x8]\n"
- "ptrue p1.b\n"
"ldr x12, [%x[inptrs], #0x10]\n"
"ldr x11, [%x[inptrs], #0x18]\n"
"mov x10, #0x0\n"
"ldr x9, [%x[inptrs], #0x20]\n"
"ldr x28, [%x[inptrs], #0x28]\n"
- "ld1w { z16.s }, p2/Z, [%x[params]]\n"
+ "whilelt p1.s, x15, %x[channel_multiplier]\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
- "mov z25.d, z16.d\n"
- "mov z15.d, z16.d\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
+ "ldp x21, x20, [%x[outptrs], #0x30]\n"
+ "ld1w { z16.s }, p1/Z, [%x[params]]\n"
+ "ld1rqw { z2.s }, p2/Z, [x14]\n"
+ "ld1rqw { z3.s }, p2/Z, [x14, #16]\n"
+ "ld1rqw { z4.s }, p2/Z, [x13]\n"
+ "ld1rqw { z5.s }, p2/Z, [x13, #16]\n"
+ "ld1rqw { z6.s }, p2/Z, [x12]\n"
+ "ld1rqw { z7.s }, p2/Z, [x12, #16]\n"
+ "ld1rqw { z8.s }, p2/Z, [x11]\n"
+ "mov z25.d, z16.d\n"
+ "mov z15.d, z16.d\n"
+ "ld1rqw { z9.s }, p2/Z, [x11, #16]\n"
+ "ld1rqw { z10.s }, p2/Z, [x9]\n"
"mov z24.d, z16.d\n"
"mov z14.d, z16.d\n"
- "ldp x21, x20, [%x[outptrs], #0x30]\n"
- "ld1rqw { z2.s }, p1/Z, [x14]\n"
+ "ld1rqw { z11.s }, p2/Z, [x9, #16]\n"
+ "ld1rqw { z12.s }, p2/Z, [x28]\n"
"mov z26.d, z16.d\n"
"mov z17.d, z16.d\n"
- "ld1rqw { z3.s }, p1/Z, [x14, #16]\n"
- "ld1rqw { z4.s }, p1/Z, [x13]\n"
- "mov z23.d, z16.d\n"
- "ld1rqw { z5.s }, p1/Z, [x13, #16]\n"
- "ld1rqw { z6.s }, p1/Z, [x12]\n"
- "ld1rqw { z7.s }, p1/Z, [x12, #16]\n"
- "ld1rqw { z8.s }, p1/Z, [x11]\n"
- "ld1rqw { z9.s }, p1/Z, [x11, #16]\n"
- "ld1rqw { z10.s }, p1/Z, [x9]\n"
- "ld1rqw { z11.s }, p1/Z, [x9, #16]\n"
- "ld1rqw { z12.s }, p1/Z, [x28]\n"
- "ld1rqw { z13.s }, p1/Z, [x28, #16]\n"
- "ld1rw { z21.s }, p1/Z, [%x[clamps]]\n"
- "ld1rw { z22.s }, p1/Z, [%x[clamps], #4]\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #3, MUL VL]\n"
- "ld1w { z28.s }, p2/Z, [%x[params], #4, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "ld1rqw { z13.s }, p2/Z, [x28, #16]\n"
+ "ld1rw { z23.s }, p2/Z, [%x[clamps]]\n"
+ "mov z21.d, z16.d\n"
+ "ld1rw { z22.s }, p2/Z, [%x[clamps], #4]\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "ld1w { z28.s }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #6\n"
"1:" // Output channel complete vector loop
"fmla z16.s, z31.s, z2.s[0]\n"
@@ -92,13 +92,13 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z15.s, z31.s, z2.s[2]\n"
"fmla z24.s, z31.s, z2.s[3]\n"
"mov z1.d, z9.d\n"
- "mov p0.b, p2.b\n"
+ "mov p0.b, p1.b\n"
"fmla z14.s, z31.s, z4.s[0]\n"
"fmla z26.s, z31.s, z4.s[1]\n"
- "whilelt p2.s, x15, %x[channel_multiplier]\n"
"fmla z17.s, z31.s, z4.s[2]\n"
- "fmla z23.s, z31.s, z4.s[3]\n"
- "ld1w { z20.s }, p1/Z, [%x[params]]\n"
+ "fmla z21.s, z31.s, z4.s[3]\n"
+ "ld1w { z20.s }, p2/Z, [%x[params]]\n"
+ "whilelt p1.s, x15, %x[channel_multiplier]\n"
"fmla z16.s, z30.s, z2.s[1]\n"
"fmla z25.s, z30.s, z2.s[2]\n"
"fmla z15.s, z30.s, z2.s[3]\n"
@@ -106,8 +106,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z30.s, z4.s[1]\n"
"fmla z26.s, z30.s, z4.s[2]\n"
"fmla z17.s, z30.s, z4.s[3]\n"
- "fmla z23.s, z30.s, z5.s[0]\n"
- "ld1w { z19.s }, p1/Z, [%x[params], #1, MUL VL]\n"
+ "fmla z21.s, z30.s, z5.s[0]\n"
+ "ld1w { z19.s }, p2/Z, [%x[params], #1, MUL VL]\n"
"fmla z16.s, z29.s, z2.s[2]\n"
"fmla z25.s, z29.s, z2.s[3]\n"
"fmla z15.s, z29.s, z3.s[0]\n"
@@ -115,8 +115,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z29.s, z4.s[2]\n"
"fmla z26.s, z29.s, z4.s[3]\n"
"fmla z17.s, z29.s, z5.s[0]\n"
- "fmla z23.s, z29.s, z5.s[1]\n"
- "ld1w { z18.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "fmla z21.s, z29.s, z5.s[1]\n"
+ "ld1w { z18.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"fmla z16.s, z28.s, z2.s[3]\n"
"fmla z25.s, z28.s, z3.s[0]\n"
"fmla z15.s, z28.s, z3.s[1]\n"
@@ -124,8 +124,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z28.s, z4.s[3]\n"
"fmla z26.s, z28.s, z5.s[0]\n"
"fmla z17.s, z28.s, z5.s[1]\n"
- "fmla z23.s, z28.s, z5.s[2]\n"
- "ld1w { z28.s }, p1/Z, [%x[params], #3, MUL VL]\n"
+ "fmla z21.s, z28.s, z5.s[2]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #3, MUL VL]\n"
"fmla z16.s, z27.s, z3.s[0]\n"
"fmla z25.s, z27.s, z3.s[1]\n"
"fmla z15.s, z27.s, z3.s[2]\n"
@@ -133,8 +133,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z27.s, z5.s[0]\n"
"fmla z26.s, z27.s, z5.s[1]\n"
"fmla z17.s, z27.s, z5.s[2]\n"
- "fmla z23.s, z27.s, z5.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #4, MUL VL]\n"
+ "fmla z21.s, z27.s, z5.s[3]\n"
+ "ld1w { z27.s }, p2/Z, [%x[params], #4, MUL VL]\n"
"fmla z16.s, z20.s, z4.s[0]\n"
"fmla z25.s, z20.s, z4.s[1]\n"
"fmla z15.s, z20.s, z4.s[2]\n"
@@ -142,8 +142,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z20.s, z6.s[0]\n"
"fmla z26.s, z20.s, z6.s[1]\n"
"fmla z17.s, z20.s, z6.s[2]\n"
- "fmla z23.s, z20.s, z6.s[3]\n"
- "ld1w { z20.s }, p1/Z, [%x[params], #5, MUL VL]\n"
+ "fmla z21.s, z20.s, z6.s[3]\n"
+ "ld1w { z20.s }, p2/Z, [%x[params], #5, MUL VL]\n"
"fmla z16.s, z19.s, z4.s[1]\n"
"fmla z25.s, z19.s, z4.s[2]\n"
"fmla z15.s, z19.s, z4.s[3]\n"
@@ -151,8 +151,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z19.s, z6.s[1]\n"
"fmla z26.s, z19.s, z6.s[2]\n"
"fmla z17.s, z19.s, z6.s[3]\n"
- "fmla z23.s, z19.s, z7.s[0]\n"
- "ld1w { z19.s }, p1/Z, [%x[params], #6, MUL VL]\n"
+ "fmla z21.s, z19.s, z7.s[0]\n"
+ "ld1w { z19.s }, p2/Z, [%x[params], #6, MUL VL]\n"
"fmla z16.s, z18.s, z4.s[2]\n"
"fmla z25.s, z18.s, z4.s[3]\n"
"fmla z15.s, z18.s, z5.s[0]\n"
@@ -160,18 +160,18 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z18.s, z6.s[2]\n"
"fmla z26.s, z18.s, z6.s[3]\n"
"fmla z17.s, z18.s, z7.s[0]\n"
- "fmla z23.s, z18.s, z7.s[1]\n"
- "ld1w { z18.s }, p1/Z, [%x[params], #7, MUL VL]\n"
+ "fmla z21.s, z18.s, z7.s[1]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #7, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "fmla z16.s, z28.s, z4.s[3]\n"
- "fmla z25.s, z28.s, z5.s[0]\n"
- "fmla z15.s, z28.s, z5.s[1]\n"
- "fmla z24.s, z28.s, z5.s[2]\n"
- "fmla z14.s, z28.s, z6.s[3]\n"
- "fmla z26.s, z28.s, z7.s[0]\n"
- "fmla z17.s, z28.s, z7.s[1]\n"
- "fmla z23.s, z28.s, z7.s[2]\n"
- "ld1w { z30.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
+ "fmla z16.s, z30.s, z4.s[3]\n"
+ "fmla z25.s, z30.s, z5.s[0]\n"
+ "fmla z15.s, z30.s, z5.s[1]\n"
+ "fmla z24.s, z30.s, z5.s[2]\n"
+ "fmla z14.s, z30.s, z6.s[3]\n"
+ "fmla z26.s, z30.s, z7.s[0]\n"
+ "fmla z17.s, z30.s, z7.s[1]\n"
+ "fmla z21.s, z30.s, z7.s[2]\n"
+ "ld1w { z18.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
"fmla z16.s, z27.s, z5.s[0]\n"
"fmla z25.s, z27.s, z5.s[1]\n"
"fmla z15.s, z27.s, z5.s[2]\n"
@@ -179,8 +179,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z27.s, z7.s[0]\n"
"fmla z26.s, z27.s, z7.s[1]\n"
"fmla z17.s, z27.s, z7.s[2]\n"
- "fmla z23.s, z27.s, z7.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #-7, MUL VL]\n"
+ "fmla z21.s, z27.s, z7.s[3]\n"
+ "ld1w { z27.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
"fmla z16.s, z20.s, z6.s[0]\n"
"fmla z25.s, z20.s, z6.s[1]\n"
"fmla z15.s, z20.s, z6.s[2]\n"
@@ -188,8 +188,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z20.s, z0.s[0]\n"
"fmla z26.s, z20.s, z0.s[1]\n"
"fmla z17.s, z20.s, z0.s[2]\n"
- "fmla z23.s, z20.s, z0.s[3]\n"
- "ld1w { z20.s }, p1/Z, [%x[params], #-6, MUL VL]\n"
+ "fmla z21.s, z20.s, z0.s[3]\n"
+ "ld1w { z20.s }, p2/Z, [%x[params], #-6, MUL VL]\n"
"fmla z16.s, z19.s, z6.s[1]\n"
"fmla z25.s, z19.s, z6.s[2]\n"
"fmla z15.s, z19.s, z6.s[3]\n"
@@ -197,26 +197,26 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z19.s, z0.s[1]\n"
"fmla z26.s, z19.s, z0.s[2]\n"
"fmla z17.s, z19.s, z0.s[3]\n"
- "fmla z23.s, z19.s, z1.s[0]\n"
- "ld1w { z19.s }, p1/Z, [%x[params], #-5, MUL VL]\n"
- "fmla z16.s, z18.s, z6.s[2]\n"
- "fmla z25.s, z18.s, z6.s[3]\n"
- "fmla z15.s, z18.s, z7.s[0]\n"
- "fmla z24.s, z18.s, z7.s[1]\n"
- "fmla z14.s, z18.s, z0.s[2]\n"
- "fmla z26.s, z18.s, z0.s[3]\n"
- "fmla z17.s, z18.s, z1.s[0]\n"
- "fmla z23.s, z18.s, z1.s[1]\n"
- "ld1w { z18.s }, p1/Z, [%x[params], #-4, MUL VL]\n"
- "fmla z16.s, z30.s, z6.s[3]\n"
- "fmla z25.s, z30.s, z7.s[0]\n"
- "fmla z15.s, z30.s, z7.s[1]\n"
- "fmla z24.s, z30.s, z7.s[2]\n"
- "fmla z14.s, z30.s, z0.s[3]\n"
- "fmla z26.s, z30.s, z1.s[0]\n"
- "fmla z17.s, z30.s, z1.s[1]\n"
- "fmla z23.s, z30.s, z1.s[2]\n"
- "ld1w { z31.s }, p1/Z, [%x[params], #-3, MUL VL]\n"
+ "fmla z21.s, z19.s, z1.s[0]\n"
+ "ld1w { z19.s }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "fmla z16.s, z29.s, z6.s[2]\n"
+ "fmla z25.s, z29.s, z6.s[3]\n"
+ "fmla z15.s, z29.s, z7.s[0]\n"
+ "fmla z24.s, z29.s, z7.s[1]\n"
+ "fmla z14.s, z29.s, z0.s[2]\n"
+ "fmla z26.s, z29.s, z0.s[3]\n"
+ "fmla z17.s, z29.s, z1.s[0]\n"
+ "fmla z21.s, z29.s, z1.s[1]\n"
+ "ld1w { z30.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
+ "fmla z16.s, z18.s, z6.s[3]\n"
+ "fmla z25.s, z18.s, z7.s[0]\n"
+ "fmla z15.s, z18.s, z7.s[1]\n"
+ "fmla z24.s, z18.s, z7.s[2]\n"
+ "fmla z14.s, z18.s, z0.s[3]\n"
+ "fmla z26.s, z18.s, z1.s[0]\n"
+ "fmla z17.s, z18.s, z1.s[1]\n"
+ "fmla z21.s, z18.s, z1.s[2]\n"
+ "ld1w { z28.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
"fmla z16.s, z27.s, z7.s[0]\n"
"fmla z25.s, z27.s, z7.s[1]\n"
"fmla z15.s, z27.s, z7.s[2]\n"
@@ -224,8 +224,8 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z27.s, z1.s[0]\n"
"fmla z26.s, z27.s, z1.s[1]\n"
"fmla z17.s, z27.s, z1.s[2]\n"
- "fmla z23.s, z27.s, z1.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #-2, MUL VL]\n"
+ "fmla z21.s, z27.s, z1.s[3]\n"
+ "ld1w { z27.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
"fmla z16.s, z20.s, z0.s[0]\n"
"fmla z25.s, z20.s, z0.s[1]\n"
"fmla z15.s, z20.s, z0.s[2]\n"
@@ -234,47 +234,47 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z20.s, z0.s[0]\n"
"fmla z26.s, z20.s, z0.s[1]\n"
"fmla z17.s, z20.s, z0.s[2]\n"
- "fmla z23.s, z20.s, z0.s[3]\n"
+ "fmla z21.s, z20.s, z0.s[3]\n"
"mov z0.d, z8.d\n"
- "ld1w { z20.s }, p1/Z, [%x[params], #-1, MUL VL]\n"
+ "ld1w { z31.s }, p2/Z, [%x[params], #-1, MUL VL]\n"
"fmla z16.s, z19.s, z0.s[1]\n"
"fmla z25.s, z19.s, z0.s[2]\n"
"fmla z15.s, z19.s, z0.s[3]\n"
"fmla z24.s, z19.s, z1.s[0]\n"
- "mov z1.d, z10.d\n"
- "mov z0.d, z11.d\n"
- "fmla z14.s, z19.s, z1.s[1]\n"
- "fmla z26.s, z19.s, z1.s[2]\n"
- "fmla z17.s, z19.s, z1.s[3]\n"
- "fmla z23.s, z19.s, z0.s[0]\n"
- "mov z1.d, z8.d\n"
- "ld1w { z19.s }, p1/Z, [%x[params]]\n"
- "mov z0.d, z9.d\n"
- "fmla z16.s, z18.s, z1.s[2]\n"
- "fmla z25.s, z18.s, z1.s[3]\n"
- "fmla z15.s, z18.s, z0.s[0]\n"
- "fmla z24.s, z18.s, z0.s[1]\n"
- "mov z1.d, z10.d\n"
- "mov z0.d, z11.d\n"
- "fmla z14.s, z18.s, z1.s[2]\n"
- "fmla z26.s, z18.s, z1.s[3]\n"
- "fmla z17.s, z18.s, z0.s[0]\n"
- "fmla z23.s, z18.s, z0.s[1]\n"
- "mov z1.d, z8.d\n"
- "ld1w { z18.s }, p1/Z, [%x[params], #1, MUL VL]\n"
- "mov z0.d, z9.d\n"
- "fmla z16.s, z31.s, z1.s[3]\n"
- "fmla z25.s, z31.s, z0.s[0]\n"
- "fmla z15.s, z31.s, z0.s[1]\n"
- "fmla z24.s, z31.s, z0.s[2]\n"
"mov z0.d, z10.d\n"
"mov z1.d, z11.d\n"
- "fmla z14.s, z31.s, z0.s[3]\n"
- "fmla z26.s, z31.s, z1.s[0]\n"
- "fmla z17.s, z31.s, z1.s[1]\n"
- "fmla z23.s, z31.s, z1.s[2]\n"
+ "fmla z14.s, z19.s, z0.s[1]\n"
+ "fmla z26.s, z19.s, z0.s[2]\n"
+ "fmla z17.s, z19.s, z0.s[3]\n"
+ "mov z0.d, z8.d\n"
+ "fmla z21.s, z19.s, z1.s[0]\n"
+ "ld1w { z20.s }, p2/Z, [%x[params]]\n"
+ "mov z1.d, z9.d\n"
+ "fmla z16.s, z30.s, z0.s[2]\n"
+ "fmla z25.s, z30.s, z0.s[3]\n"
+ "mov z0.d, z10.d\n"
+ "fmla z15.s, z30.s, z1.s[0]\n"
+ "fmla z24.s, z30.s, z1.s[1]\n"
+ "mov z1.d, z11.d\n"
+ "fmla z14.s, z30.s, z0.s[2]\n"
+ "fmla z26.s, z30.s, z0.s[3]\n"
+ "mov z0.d, z8.d\n"
+ "fmla z17.s, z30.s, z1.s[0]\n"
+ "fmla z21.s, z30.s, z1.s[1]\n"
+ "ld1w { z19.s }, p2/Z, [%x[params], #1, MUL VL]\n"
"mov z1.d, z9.d\n"
- "ld1w { z28.s }, p1/Z, [%x[params], #2, MUL VL]\n"
+ "fmla z16.s, z28.s, z0.s[3]\n"
+ "mov z0.d, z10.d\n"
+ "fmla z25.s, z28.s, z1.s[0]\n"
+ "fmla z15.s, z28.s, z1.s[1]\n"
+ "fmla z24.s, z28.s, z1.s[2]\n"
+ "mov z1.d, z11.d\n"
+ "fmla z14.s, z28.s, z0.s[3]\n"
+ "fmla z26.s, z28.s, z1.s[0]\n"
+ "fmla z17.s, z28.s, z1.s[1]\n"
+ "fmla z21.s, z28.s, z1.s[2]\n"
+ "mov z1.d, z9.d\n"
+ "ld1w { z18.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"fmla z16.s, z27.s, z1.s[0]\n"
"fmla z25.s, z27.s, z1.s[1]\n"
"fmla z15.s, z27.s, z1.s[2]\n"
@@ -283,102 +283,102 @@ void sve_fp32_packed_to_nhwc_5x5_s1_with_multiplier_output2x4_mla_depthfirst_imp
"fmla z14.s, z27.s, z1.s[0]\n"
"fmla z26.s, z27.s, z1.s[1]\n"
"fmla z17.s, z27.s, z1.s[2]\n"
- "fmla z23.s, z27.s, z1.s[3]\n"
- "ld1w { z27.s }, p1/Z, [%x[params], #3, MUL VL]\n"
- "fmla z16.s, z20.s, z0.s[0]\n"
- "fmla z25.s, z20.s, z0.s[1]\n"
- "fmla z15.s, z20.s, z0.s[2]\n"
- "fmla z24.s, z20.s, z0.s[3]\n"
+ "fmla z21.s, z27.s, z1.s[3]\n"
+ "fmla z16.s, z31.s, z0.s[0]\n"
+ "ld1w { z28.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "fmla z25.s, z31.s, z0.s[1]\n"
+ "fmla z15.s, z31.s, z0.s[2]\n"
+ "fmla z24.s, z31.s, z0.s[3]\n"
"mov z0.d, z12.d\n"
- "fmla z14.s, z20.s, z0.s[0]\n"
- "fmla z26.s, z20.s, z0.s[1]\n"
- "fmla z17.s, z20.s, z0.s[2]\n"
- "fmla z23.s, z20.s, z0.s[3]\n"
+ "fmla z14.s, z31.s, z0.s[0]\n"
+ "fmla z26.s, z31.s, z0.s[1]\n"
+ "fmla z17.s, z31.s, z0.s[2]\n"
+ "fmla z21.s, z31.s, z0.s[3]\n"
"mov z0.d, z10.d\n"
- "ld1w { z31.s }, p2/Z, [%x[params], #5, MUL VL]\n"
- "fmla z16.s, z19.s, z0.s[1]\n"
- "fmla z25.s, z19.s, z0.s[2]\n"
- "fmla z15.s, z19.s, z0.s[3]\n"
- "fmla z24.s, z19.s, z1.s[0]\n"
- "mov z1.d, z12.d\n"
- "mov z0.d, z13.d\n"
- "fmla z14.s, z19.s, z1.s[1]\n"
- "fmla z26.s, z19.s, z1.s[2]\n"
- "fmla z17.s, z19.s, z1.s[3]\n"
- "fmla z23.s, z19.s, z0.s[0]\n"
- "mov z1.d, z10.d\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "mov z0.d, z11.d\n"
- "fmla z16.s, z18.s, z1.s[2]\n"
- "fmla z25.s, z18.s, z1.s[3]\n"
- "fmla z15.s, z18.s, z0.s[0]\n"
- "fmla z24.s, z18.s, z0.s[1]\n"
- "mov z1.d, z12.d\n"
- "mov z0.d, z13.d\n"
- "fmla z14.s, z18.s, z1.s[2]\n"
- "fmla z26.s, z18.s, z1.s[3]\n"
- "fmla z17.s, z18.s, z0.s[0]\n"
- "fmla z23.s, z18.s, z0.s[1]\n"
- "mov z1.d, z10.d\n"
- "ld1w { z29.s }, p2/Z, [%x[params], #7, MUL VL]\n"
- "mov z0.d, z11.d\n"
- "fmla z16.s, z28.s, z1.s[3]\n"
- "fmla z25.s, z28.s, z0.s[0]\n"
- "fmla z15.s, z28.s, z0.s[1]\n"
- "fmla z24.s, z28.s, z0.s[2]\n"
- "mov z0.d, z13.d\n"
- "mov z1.d, z12.d\n"
- "fmla z26.s, z28.s, z0.s[0]\n"
- "fmla z17.s, z28.s, z0.s[1]\n"
- "fmla z23.s, z28.s, z0.s[2]\n"
+ "ld1w { z31.s }, p1/Z, [%x[params], #5, MUL VL]\n"
+ "fmla z16.s, z20.s, z0.s[1]\n"
+ "fmla z25.s, z20.s, z0.s[2]\n"
+ "fmla z15.s, z20.s, z0.s[3]\n"
+ "mov z0.d, z12.d\n"
+ "fmla z24.s, z20.s, z1.s[0]\n"
+ "mov z1.d, z13.d\n"
+ "fmla z14.s, z20.s, z0.s[1]\n"
+ "fmla z26.s, z20.s, z0.s[2]\n"
+ "fmla z17.s, z20.s, z0.s[3]\n"
+ "mov z0.d, z10.d\n"
+ "fmla z21.s, z20.s, z1.s[0]\n"
+ "mov z1.d, z11.d\n"
+ "ld1w { z30.s }, p1/Z, [%x[params], #6, MUL VL]\n"
+ "fmla z16.s, z19.s, z0.s[2]\n"
+ "fmla z25.s, z19.s, z0.s[3]\n"
+ "mov z0.d, z12.d\n"
+ "fmla z15.s, z19.s, z1.s[0]\n"
+ "fmla z24.s, z19.s, z1.s[1]\n"
+ "mov z1.d, z13.d\n"
+ "fmla z14.s, z19.s, z0.s[2]\n"
+ "fmla z26.s, z19.s, z0.s[3]\n"
+ "mov z0.d, z10.d\n"
+ "fmla z17.s, z19.s, z1.s[0]\n"
+ "fmla z21.s, z19.s, z1.s[1]\n"
+ "mov z1.d, z11.d\n"
+ "ld1w { z29.s }, p1/Z, [%x[params], #7, MUL VL]\n"
+ "fmla z16.s, z18.s, z0.s[3]\n"
+ "mov z0.d, z12.d\n"
+ "fmla z25.s, z18.s, z1.s[0]\n"
+ "fmla z15.s, z18.s, z1.s[1]\n"
+ "fmla z24.s, z18.s, z1.s[2]\n"
+ "mov z1.d, z13.d\n"
+ "fmla z14.s, z18.s, z0.s[3]\n"
+ "fmla z26.s, z18.s, z1.s[0]\n"
+ "fmla z17.s, z18.s, z1.s[1]\n"
+ "fmla z21.s, z18.s, z1.s[2]\n"
"mov z0.d, z11.d\n"
- "fmla z14.s, z28.s, z1.s[3]\n"
- "fmla z16.s, z27.s, z0.s[0]\n"
- "fmla z25.s, z27.s, z0.s[1]\n"
- "fmin z16.s, p1/M, z16.s, z22.s\n"
- "fmax z16.s, p1/M, z16.s, z21.s\n"
- "fmla z15.s, z27.s, z0.s[2]\n"
- "fmla z24.s, z27.s, z0.s[3]\n"
+ "fmla z16.s, z28.s, z0.s[0]\n"
+ "fmla z25.s, z28.s, z0.s[1]\n"
+ "fmla z15.s, z28.s, z0.s[2]\n"
+ "fmla z24.s, z28.s, z0.s[3]\n"
"mov z0.d, z13.d\n"
- "fmin z25.s, p1/M, z25.s, z22.s\n"
- "fmla z14.s, z27.s, z0.s[0]\n"
- "fmla z26.s, z27.s, z0.s[1]\n"
- "fmin z15.s, p1/M, z15.s, z22.s\n"
- "fmin z24.s, p1/M, z24.s, z22.s\n"
- "fmla z17.s, z27.s, z0.s[2]\n"
- "fmla z23.s, z27.s, z0.s[3]\n"
- "fmin z14.s, p1/M, z14.s, z22.s\n"
- "fmin z26.s, p1/M, z26.s, z22.s\n"
- "fmin z17.s, p1/M, z17.s, z22.s\n"
- "fmin z23.s, p1/M, z23.s, z22.s\n"
+ "fmla z14.s, z28.s, z0.s[0]\n"
+ "fmla z26.s, z28.s, z0.s[1]\n"
+ "fmla z17.s, z28.s, z0.s[2]\n"
+ "fmla z21.s, z28.s, z0.s[3]\n"
+ "fmin z16.s, p2/M, z16.s, z22.s\n"
+ "fmin z25.s, p2/M, z25.s, z22.s\n"
+ "fmin z15.s, p2/M, z15.s, z22.s\n"
+ "fmin z24.s, p2/M, z24.s, z22.s\n"
+ "fmin z14.s, p2/M, z14.s, z22.s\n"
+ "fmax z16.s, p2/M, z16.s, z23.s\n"
+ "fmin z26.s, p2/M, z26.s, z22.s\n"
+ "fmin z17.s, p2/M, z17.s, z22.s\n"
+ "fmin z21.s, p2/M, z21.s, z22.s\n"
+ "fmax z25.s, p2/M, z25.s, z23.s\n"
"st1w { z16.s }, p0, [x27, x10, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [%x[params], #4, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [%x[params], #4, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "fmax z25.s, p1/M, z25.s, z21.s\n"
+ "fmax z15.s, p2/M, z15.s, z23.s\n"
+ "fmax z24.s, p2/M, z24.s, z23.s\n"
+ "fmax z14.s, p2/M, z14.s, z23.s\n"
+ "fmax z26.s, p2/M, z26.s, z23.s\n"
+ "fmax z17.s, p2/M, z17.s, z23.s\n"
"st1w { z25.s }, p0, [x26, x10, LSL #2]\n"
"mov z25.d, z16.d\n"
- "fmax z15.s, p1/M, z15.s, z21.s\n"
- "fmax z24.s, p1/M, z24.s, z21.s\n"
+ "fmax z21.s, p2/M, z21.s, z23.s\n"
"st1w { z15.s }, p0, [x25, x10, LSL #2]\n"
"mov z15.d, z16.d\n"
- "fmax z14.s, p1/M, z14.s, z21.s\n"
- "fmax z26.s, p1/M, z26.s, z21.s\n"
+ "ld1w { z28.s }, p1/Z, [%x[params], #-8, MUL VL]\n"
"st1w { z24.s }, p0, [x24, x10, LSL #2]\n"
"mov z24.d, z16.d\n"
- "fmax z17.s, p1/M, z17.s, z21.s\n"
- "fmax z23.s, p1/M, z23.s, z21.s\n"
+ "ld1w { z27.s }, p1/Z, [%x[params], #-7, MUL VL]\n"
+ "addvl %x[params], %x[params], #-6\n"
"st1w { z14.s }, p0, [x23, x10, LSL #2]\n"
"mov z14.d, z16.d\n"
"st1w { z26.s }, p0, [x22, x10, LSL #2]\n"
"mov z26.d, z16.d\n"
- "ld1w { z28.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
- "ld1w { z27.s }, p2/Z, [%x[params], #-7, MUL VL]\n"
"st1w { z17.s }, p0, [x21, x10, LSL #2]\n"
"mov z17.d, z16.d\n"
- "addvl %x[params], %x[params], #-6\n"
- "st1w { z23.s }, p0, [x20, x10, LSL #2]\n"
+ "st1w { z21.s }, p0, [x20, x10, LSL #2]\n"
"incw x10\n"
- "mov z23.d, z16.d\n"
+ "mov z21.d, z16.d\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [channel_multiplier] "r" (n_output_channels), [clamps] "r" (minmax_vals), [inptrs] "r" (inptrs), [outptrs] "r" (outptrs)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
index 1770ec182c..7681f346ea 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,8 +46,8 @@ void sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
__asm__ __volatile__(
"ptrue p1.b\n"
"mov x9, #0x0\n"
- "ld1rw { z15.s }, p1/Z, [%x[minmax_vals]]\n"
- "ld1rw { z14.s }, p1/Z, [%x[minmax_vals], #4]\n"
+ "ld1rw { z10.s }, p1/Z, [%x[minmax_vals]]\n"
+ "ld1rw { z9.s }, p1/Z, [%x[minmax_vals], #4]\n"
"whilelt p0.s, x9, %x[n_output_channels]\n"
"1:" // Output channel loop
"mov z31.b, #0x0\n"
@@ -55,178 +55,178 @@ void sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"ld1w { z31.s }, p0/Z, [%x[bias], x9, LSL #2]\n"
"2:" // Output channel loop: Load bias: Done
"mov x23, %x[inptrs]\n"
- "ldp x21, x20, [x23], #0x10\n"
"lsr x22, %x[kernel_points], #0x1\n"
"mov z16.d, z31.d\n"
"mov z17.d, z31.d\n"
"mov z18.d, z31.d\n"
- "ld1rqw { z6.s }, p1/Z, [x21]\n"
- "ld1rqw { z5.s }, p1/Z, [x21, #16]\n"
"mov z19.d, z31.d\n"
+ "ld1w { z13.s }, p1/Z, [%x[weights]]\n"
+ "addvl %x[weights], %x[weights], #1\n"
"mov z20.d, z31.d\n"
- "ld1rqw { z1.s }, p1/Z, [x20]\n"
- "ld1rqw { z2.s }, p1/Z, [x20, #16]\n"
"mov z21.d, z31.d\n"
+ "ldp x21, x20, [x23], #0x10\n"
"mov z22.d, z31.d\n"
- "ld1w { z8.s }, p1/Z, [%x[weights]]\n"
- "addvl %x[weights], %x[weights], #1\n"
"mov z23.d, z31.d\n"
"mov z24.d, z31.d\n"
"mov z25.d, z31.d\n"
"mov z26.d, z31.d\n"
"mov z27.d, z31.d\n"
+ "ld1rqw { z7.s }, p1/Z, [x21]\n"
+ "ld1rqw { z6.s }, p1/Z, [x21, #16]\n"
"mov z28.d, z31.d\n"
"mov z29.d, z31.d\n"
+ "ld1rqw { z1.s }, p1/Z, [x20]\n"
+ "ld1rqw { z2.s }, p1/Z, [x20, #16]\n"
"mov z30.d, z31.d\n"
"mov z31.d, z31.d\n"
"cbz x22, 6f\n"
"ldp x21, x20, [x23], #0x10\n"
"subs x22, x22, #0x1\n"
- "ld1rqw { z0.s }, p1/Z, [x21]\n"
- "ld1rqw { z4.s }, p1/Z, [x21, #16]\n"
- "ld1rqw { z7.s }, p1/Z, [x20]\n"
- "ld1rqw { z3.s }, p1/Z, [x20, #16]\n"
- "ld1w { z11.s }, p1/Z, [%x[weights]]\n"
+ "ld1w { z8.s }, p1/Z, [%x[weights]]\n"
"addvl %x[weights], %x[weights], #1\n"
+ "ld1rqw { z5.s }, p1/Z, [x21]\n"
+ "ld1rqw { z3.s }, p1/Z, [x21, #16]\n"
+ "ld1rqw { z0.s }, p1/Z, [x20]\n"
+ "ld1rqw { z4.s }, p1/Z, [x20, #16]\n"
"beq 4f\n"
"3:" // Output channel loop: Kernel loop
"ldp x21, x20, [x23], #0x10\n"
- "fmla z16.s, z8.s, z6.s[0]\n"
- "fmla z17.s, z8.s, z6.s[1]\n"
+ "fmla z16.s, z13.s, z7.s[0]\n"
+ "fmla z17.s, z13.s, z7.s[1]\n"
"subs x22, x22, #0x1\n"
- "fmla z18.s, z8.s, z6.s[2]\n"
- "fmla z19.s, z8.s, z6.s[3]\n"
- "ld1rqw { z6.s }, p1/Z, [x21]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "fmla z21.s, z8.s, z5.s[1]\n"
- "fmla z22.s, z8.s, z5.s[2]\n"
- "fmla z23.s, z8.s, z5.s[3]\n"
- "ld1rqw { z5.s }, p1/Z, [x21, #16]\n"
- "fmla z24.s, z8.s, z1.s[0]\n"
- "fmla z25.s, z8.s, z1.s[1]\n"
- "fmla z26.s, z8.s, z1.s[2]\n"
- "fmla z27.s, z8.s, z1.s[3]\n"
+ "fmla z18.s, z13.s, z7.s[2]\n"
+ "fmla z19.s, z13.s, z7.s[3]\n"
+ "fmla z20.s, z13.s, z6.s[0]\n"
+ "fmla z21.s, z13.s, z6.s[1]\n"
+ "fmla z22.s, z13.s, z6.s[2]\n"
+ "fmla z23.s, z13.s, z6.s[3]\n"
+ "ld1rqw { z7.s }, p1/Z, [x21]\n"
+ "ld1rqw { z6.s }, p1/Z, [x21, #16]\n"
+ "fmla z24.s, z13.s, z1.s[0]\n"
+ "fmla z25.s, z13.s, z1.s[1]\n"
+ "fmla z26.s, z13.s, z1.s[2]\n"
+ "fmla z27.s, z13.s, z1.s[3]\n"
"ld1rqw { z1.s }, p1/Z, [x20]\n"
- "fmla z28.s, z8.s, z2.s[0]\n"
- "fmla z29.s, z8.s, z2.s[1]\n"
- "fmla z30.s, z8.s, z2.s[2]\n"
- "fmla z31.s, z8.s, z2.s[3]\n"
+ "fmla z28.s, z13.s, z2.s[0]\n"
+ "fmla z29.s, z13.s, z2.s[1]\n"
+ "fmla z30.s, z13.s, z2.s[2]\n"
+ "fmla z31.s, z13.s, z2.s[3]\n"
"ld1rqw { z2.s }, p1/Z, [x20, #16]\n"
"ldp x21, x20, [x23], #0x10\n"
- "ld1w { z8.s }, p1/Z, [%x[weights]]\n"
- "fmla z16.s, z11.s, z0.s[0]\n"
- "fmla z17.s, z11.s, z0.s[1]\n"
- "fmla z18.s, z11.s, z0.s[2]\n"
- "fmla z19.s, z11.s, z0.s[3]\n"
- "ld1rqw { z0.s }, p1/Z, [x21]\n"
- "fmla z20.s, z11.s, z4.s[0]\n"
- "fmla z21.s, z11.s, z4.s[1]\n"
- "fmla z22.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z4.s[3]\n"
- "ld1rqw { z4.s }, p1/Z, [x21, #16]\n"
- "fmla z24.s, z11.s, z7.s[0]\n"
- "fmla z25.s, z11.s, z7.s[1]\n"
- "fmla z26.s, z11.s, z7.s[2]\n"
- "fmla z27.s, z11.s, z7.s[3]\n"
- "ld1rqw { z7.s }, p1/Z, [x20]\n"
- "fmla z28.s, z11.s, z3.s[0]\n"
- "fmla z29.s, z11.s, z3.s[1]\n"
- "fmla z30.s, z11.s, z3.s[2]\n"
- "fmla z31.s, z11.s, z3.s[3]\n"
- "ld1rqw { z3.s }, p1/Z, [x20, #16]\n"
- "ld1w { z11.s }, p1/Z, [%x[weights], #1, MUL VL]\n"
+ "ld1w { z13.s }, p1/Z, [%x[weights]]\n"
+ "fmla z16.s, z8.s, z5.s[0]\n"
+ "fmla z17.s, z8.s, z5.s[1]\n"
+ "fmla z18.s, z8.s, z5.s[2]\n"
+ "fmla z19.s, z8.s, z5.s[3]\n"
+ "fmla z20.s, z8.s, z3.s[0]\n"
+ "fmla z21.s, z8.s, z3.s[1]\n"
+ "ld1rqw { z5.s }, p1/Z, [x21]\n"
+ "fmla z22.s, z8.s, z3.s[2]\n"
+ "fmla z23.s, z8.s, z3.s[3]\n"
+ "ld1rqw { z3.s }, p1/Z, [x21, #16]\n"
+ "fmla z24.s, z8.s, z0.s[0]\n"
+ "fmla z25.s, z8.s, z0.s[1]\n"
+ "fmla z26.s, z8.s, z0.s[2]\n"
+ "fmla z27.s, z8.s, z0.s[3]\n"
+ "ld1rqw { z0.s }, p1/Z, [x20]\n"
+ "fmla z28.s, z8.s, z4.s[0]\n"
+ "fmla z29.s, z8.s, z4.s[1]\n"
+ "fmla z30.s, z8.s, z4.s[2]\n"
+ "fmla z31.s, z8.s, z4.s[3]\n"
+ "ld1rqw { z4.s }, p1/Z, [x20, #16]\n"
+ "ld1w { z8.s }, p1/Z, [%x[weights], #1, MUL VL]\n"
"addvl %x[weights], %x[weights], #2\n"
"bgt 3b\n"
"4:" // Output channel loop: Kernel loop tail
"tbnz %x[kernel_points], #0, 5f\n"
- "fmla z16.s, z8.s, z6.s[0]\n"
- "fmla z17.s, z8.s, z6.s[1]\n"
+ "fmla z16.s, z13.s, z7.s[0]\n"
+ "fmla z17.s, z13.s, z7.s[1]\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "fmla z18.s, z8.s, z6.s[2]\n"
- "fmla z19.s, z8.s, z6.s[3]\n"
+ "fmla z18.s, z13.s, z7.s[2]\n"
+ "fmla z19.s, z13.s, z7.s[3]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "fmla z21.s, z8.s, z5.s[1]\n"
+ "fmla z20.s, z13.s, z6.s[0]\n"
+ "fmla z21.s, z13.s, z6.s[1]\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "fmla z22.s, z8.s, z5.s[2]\n"
- "fmla z23.s, z8.s, z5.s[3]\n"
+ "fmla z22.s, z13.s, z6.s[2]\n"
+ "fmla z23.s, z13.s, z6.s[3]\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "fmla z24.s, z8.s, z1.s[0]\n"
- "fmla z25.s, z8.s, z1.s[1]\n"
- "fmla z26.s, z8.s, z1.s[2]\n"
- "fmla z27.s, z8.s, z1.s[3]\n"
- "fmla z28.s, z8.s, z2.s[0]\n"
- "fmla z29.s, z8.s, z2.s[1]\n"
- "fmla z30.s, z8.s, z2.s[2]\n"
- "fmla z31.s, z8.s, z2.s[3]\n"
- "fmla z16.s, z11.s, z0.s[0]\n"
- "fmla z17.s, z11.s, z0.s[1]\n"
- "fmin z16.s, p1/M, z16.s, z14.s\n"
- "fmin z17.s, p1/M, z17.s, z14.s\n"
- "fmla z18.s, z11.s, z0.s[2]\n"
- "fmla z19.s, z11.s, z0.s[3]\n"
- "fmin z18.s, p1/M, z18.s, z14.s\n"
- "fmin z19.s, p1/M, z19.s, z14.s\n"
- "fmla z20.s, z11.s, z4.s[0]\n"
- "fmla z21.s, z11.s, z4.s[1]\n"
- "fmin z20.s, p1/M, z20.s, z14.s\n"
- "fmin z21.s, p1/M, z21.s, z14.s\n"
- "fmla z22.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z4.s[3]\n"
- "fmin z22.s, p1/M, z22.s, z14.s\n"
- "fmin z23.s, p1/M, z23.s, z14.s\n"
- "fmla z24.s, z11.s, z7.s[0]\n"
- "fmla z25.s, z11.s, z7.s[1]\n"
- "fmax z16.s, p1/M, z16.s, z15.s\n"
- "fmax z17.s, p1/M, z17.s, z15.s\n"
- "fmla z26.s, z11.s, z7.s[2]\n"
- "fmla z27.s, z11.s, z7.s[3]\n"
- "fmax z18.s, p1/M, z18.s, z15.s\n"
- "fmax z19.s, p1/M, z19.s, z15.s\n"
- "fmla z28.s, z11.s, z3.s[0]\n"
- "fmla z29.s, z11.s, z3.s[1]\n"
- "fmax z20.s, p1/M, z20.s, z15.s\n"
- "fmax z21.s, p1/M, z21.s, z15.s\n"
- "fmla z30.s, z11.s, z3.s[2]\n"
- "fmla z31.s, z11.s, z3.s[3]\n"
- "fmax z22.s, p1/M, z22.s, z15.s\n"
- "fmax z23.s, p1/M, z23.s, z15.s\n"
- "fmin z24.s, p1/M, z24.s, z14.s\n"
- "fmin z25.s, p1/M, z25.s, z14.s\n"
+ "fmla z24.s, z13.s, z1.s[0]\n"
+ "fmla z25.s, z13.s, z1.s[1]\n"
+ "fmla z26.s, z13.s, z1.s[2]\n"
+ "fmla z27.s, z13.s, z1.s[3]\n"
+ "fmla z28.s, z13.s, z2.s[0]\n"
+ "fmla z29.s, z13.s, z2.s[1]\n"
+ "fmla z30.s, z13.s, z2.s[2]\n"
+ "fmla z31.s, z13.s, z2.s[3]\n"
+ "fmla z16.s, z8.s, z5.s[0]\n"
+ "fmla z17.s, z8.s, z5.s[1]\n"
+ "fmla z18.s, z8.s, z5.s[2]\n"
+ "fmla z19.s, z8.s, z5.s[3]\n"
+ "fmla z20.s, z8.s, z3.s[0]\n"
+ "fmla z21.s, z8.s, z3.s[1]\n"
+ "fmla z22.s, z8.s, z3.s[2]\n"
+ "fmla z23.s, z8.s, z3.s[3]\n"
+ "fmla z24.s, z8.s, z0.s[0]\n"
+ "fmla z25.s, z8.s, z0.s[1]\n"
+ "fmin z16.s, p1/M, z16.s, z9.s\n"
+ "fmin z17.s, p1/M, z17.s, z9.s\n"
+ "fmla z26.s, z8.s, z0.s[2]\n"
+ "fmla z27.s, z8.s, z0.s[3]\n"
+ "fmin z18.s, p1/M, z18.s, z9.s\n"
+ "fmin z19.s, p1/M, z19.s, z9.s\n"
+ "fmla z28.s, z8.s, z4.s[0]\n"
+ "fmla z29.s, z8.s, z4.s[1]\n"
+ "fmin z20.s, p1/M, z20.s, z9.s\n"
+ "fmin z21.s, p1/M, z21.s, z9.s\n"
+ "fmla z30.s, z8.s, z4.s[2]\n"
+ "fmla z31.s, z8.s, z4.s[3]\n"
+ "fmin z22.s, p1/M, z22.s, z9.s\n"
+ "fmin z23.s, p1/M, z23.s, z9.s\n"
+ "fmax z16.s, p1/M, z16.s, z10.s\n"
+ "fmax z17.s, p1/M, z17.s, z10.s\n"
+ "fmax z18.s, p1/M, z18.s, z10.s\n"
+ "fmax z19.s, p1/M, z19.s, z10.s\n"
+ "fmax z20.s, p1/M, z20.s, z10.s\n"
+ "fmax z21.s, p1/M, z21.s, z10.s\n"
+ "fmax z22.s, p1/M, z22.s, z10.s\n"
+ "fmax z23.s, p1/M, z23.s, z10.s\n"
"st1w { z16.s }, p0, [x27, x9, LSL #2]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "fmin z26.s, p1/M, z26.s, z14.s\n"
- "fmin z27.s, p1/M, z27.s, z14.s\n"
+ "fmin z24.s, p1/M, z24.s, z9.s\n"
+ "fmin z25.s, p1/M, z25.s, z9.s\n"
"st1w { z17.s }, p0, [x26, x9, LSL #2]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "fmin z28.s, p1/M, z28.s, z14.s\n"
- "fmin z29.s, p1/M, z29.s, z14.s\n"
+ "fmin z26.s, p1/M, z26.s, z9.s\n"
+ "fmin z27.s, p1/M, z27.s, z9.s\n"
"st1w { z18.s }, p0, [x25, x9, LSL #2]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "fmin z30.s, p1/M, z30.s, z14.s\n"
- "fmin z31.s, p1/M, z31.s, z14.s\n"
+ "fmin z28.s, p1/M, z28.s, z9.s\n"
+ "fmin z29.s, p1/M, z29.s, z9.s\n"
"st1w { z19.s }, p0, [x24, x9, LSL #2]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
+ "fmin z30.s, p1/M, z30.s, z9.s\n"
+ "fmin z31.s, p1/M, z31.s, z9.s\n"
"st1w { z20.s }, p0, [x23, x9, LSL #2]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "fmax z24.s, p1/M, z24.s, z15.s\n"
- "fmax z25.s, p1/M, z25.s, z15.s\n"
"st1w { z21.s }, p0, [x22, x9, LSL #2]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "fmax z26.s, p1/M, z26.s, z15.s\n"
- "fmax z27.s, p1/M, z27.s, z15.s\n"
+ "fmax z24.s, p1/M, z24.s, z10.s\n"
+ "fmax z25.s, p1/M, z25.s, z10.s\n"
"st1w { z22.s }, p0, [x21, x9, LSL #2]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "fmax z28.s, p1/M, z28.s, z15.s\n"
- "fmax z29.s, p1/M, z29.s, z15.s\n"
+ "fmax z26.s, p1/M, z26.s, z10.s\n"
+ "fmax z27.s, p1/M, z27.s, z10.s\n"
"st1w { z23.s }, p0, [x20, x9, LSL #2]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "fmax z30.s, p1/M, z30.s, z15.s\n"
- "fmax z31.s, p1/M, z31.s, z15.s\n"
+ "fmax z28.s, p1/M, z28.s, z10.s\n"
+ "fmax z29.s, p1/M, z29.s, z10.s\n"
+ "fmax z30.s, p1/M, z30.s, z10.s\n"
+ "fmax z31.s, p1/M, z31.s, z10.s\n"
"st1w { z24.s }, p0, [x27, x9, LSL #2]\n"
"st1w { z25.s }, p0, [x26, x9, LSL #2]\n"
"st1w { z26.s }, p0, [x25, x9, LSL #2]\n"
@@ -237,117 +237,117 @@ void sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"st1w { z31.s }, p0, [x20, x9, LSL #2]\n"
"b 7f\n"
"5:" // Output channel loop: Odd tail
- "fmla z16.s, z8.s, z6.s[0]\n"
- "fmla z17.s, z8.s, z6.s[1]\n"
+ "fmla z16.s, z13.s, z7.s[0]\n"
+ "fmla z17.s, z13.s, z7.s[1]\n"
"ldp x20, x28, [x23], #0x10\n"
"ldr x27, [%x[outptrs], #0x0]\n"
- "fmla z18.s, z8.s, z6.s[2]\n"
- "fmla z19.s, z8.s, z6.s[3]\n"
- "ld1rqw { z6.s }, p1/Z, [x20]\n"
+ "fmla z18.s, z13.s, z7.s[2]\n"
+ "fmla z19.s, z13.s, z7.s[3]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "fmla z21.s, z8.s, z5.s[1]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
+ "fmla z20.s, z13.s, z6.s[0]\n"
+ "fmla z21.s, z13.s, z6.s[1]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "fmla z22.s, z8.s, z5.s[2]\n"
- "fmla z23.s, z8.s, z5.s[3]\n"
- "ld1rqw { z5.s }, p1/Z, [x20, #16]\n"
"ldr x23, [%x[outptrs], #0x20]\n"
- "fmla z24.s, z8.s, z1.s[0]\n"
- "fmla z25.s, z8.s, z1.s[1]\n"
+ "fmla z22.s, z13.s, z6.s[2]\n"
+ "fmla z23.s, z13.s, z6.s[3]\n"
+ "ld1rqw { z6.s }, p1/Z, [x20]\n"
+ "ld1rqw { z7.s }, p1/Z, [x20, #16]\n"
+ "fmla z24.s, z13.s, z1.s[0]\n"
+ "fmla z25.s, z13.s, z1.s[1]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
"ldr x21, [%x[outptrs], #0x30]\n"
- "fmla z26.s, z8.s, z1.s[2]\n"
- "fmla z27.s, z8.s, z1.s[3]\n"
+ "fmla z26.s, z13.s, z1.s[2]\n"
+ "fmla z27.s, z13.s, z1.s[3]\n"
"ld1rqw { z1.s }, p1/Z, [x28]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "fmla z28.s, z8.s, z2.s[0]\n"
- "fmla z29.s, z8.s, z2.s[1]\n"
- "fmla z30.s, z8.s, z2.s[2]\n"
- "fmla z31.s, z8.s, z2.s[3]\n"
- "ld1w { z10.s }, p1/Z, [%x[weights]]\n"
+ "fmla z28.s, z13.s, z2.s[0]\n"
+ "fmla z29.s, z13.s, z2.s[1]\n"
+ "fmla z30.s, z13.s, z2.s[2]\n"
+ "fmla z31.s, z13.s, z2.s[3]\n"
+ "ld1w { z13.s }, p1/Z, [%x[weights]]\n"
"ld1rqw { z2.s }, p1/Z, [x28, #16]\n"
- "fmla z16.s, z11.s, z0.s[0]\n"
- "fmla z17.s, z11.s, z0.s[1]\n"
+ "fmla z16.s, z8.s, z5.s[0]\n"
+ "fmla z17.s, z8.s, z5.s[1]\n"
"addvl %x[weights], %x[weights], #1\n"
- "fmla z18.s, z11.s, z0.s[2]\n"
- "fmla z19.s, z11.s, z0.s[3]\n"
- "fmla z20.s, z11.s, z4.s[0]\n"
- "fmla z21.s, z11.s, z4.s[1]\n"
- "fmla z22.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z4.s[3]\n"
- "fmla z24.s, z11.s, z7.s[0]\n"
- "fmla z25.s, z11.s, z7.s[1]\n"
- "fmla z26.s, z11.s, z7.s[2]\n"
- "fmla z27.s, z11.s, z7.s[3]\n"
- "fmla z28.s, z11.s, z3.s[0]\n"
- "fmla z29.s, z11.s, z3.s[1]\n"
- "fmla z30.s, z11.s, z3.s[2]\n"
- "fmla z31.s, z11.s, z3.s[3]\n"
- "fmla z16.s, z10.s, z6.s[0]\n"
- "fmla z17.s, z10.s, z6.s[1]\n"
- "fmin z16.s, p1/M, z16.s, z14.s\n"
- "fmin z17.s, p1/M, z17.s, z14.s\n"
- "fmla z18.s, z10.s, z6.s[2]\n"
- "fmla z19.s, z10.s, z6.s[3]\n"
- "fmin z18.s, p1/M, z18.s, z14.s\n"
- "fmin z19.s, p1/M, z19.s, z14.s\n"
- "fmla z20.s, z10.s, z5.s[0]\n"
- "fmla z21.s, z10.s, z5.s[1]\n"
- "fmin z20.s, p1/M, z20.s, z14.s\n"
- "fmin z21.s, p1/M, z21.s, z14.s\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z23.s, z10.s, z5.s[3]\n"
- "fmin z22.s, p1/M, z22.s, z14.s\n"
- "fmin z23.s, p1/M, z23.s, z14.s\n"
- "fmla z24.s, z10.s, z1.s[0]\n"
- "fmla z25.s, z10.s, z1.s[1]\n"
- "fmax z16.s, p1/M, z16.s, z15.s\n"
- "fmax z17.s, p1/M, z17.s, z15.s\n"
- "fmla z26.s, z10.s, z1.s[2]\n"
- "fmla z27.s, z10.s, z1.s[3]\n"
- "fmax z18.s, p1/M, z18.s, z15.s\n"
- "fmax z19.s, p1/M, z19.s, z15.s\n"
- "fmla z28.s, z10.s, z2.s[0]\n"
- "fmla z29.s, z10.s, z2.s[1]\n"
- "fmax z20.s, p1/M, z20.s, z15.s\n"
- "fmax z21.s, p1/M, z21.s, z15.s\n"
- "fmla z30.s, z10.s, z2.s[2]\n"
- "fmla z31.s, z10.s, z2.s[3]\n"
- "fmax z22.s, p1/M, z22.s, z15.s\n"
- "fmax z23.s, p1/M, z23.s, z15.s\n"
- "fmin z24.s, p1/M, z24.s, z14.s\n"
- "fmin z25.s, p1/M, z25.s, z14.s\n"
+ "fmla z18.s, z8.s, z5.s[2]\n"
+ "fmla z19.s, z8.s, z5.s[3]\n"
+ "fmla z20.s, z8.s, z3.s[0]\n"
+ "fmla z21.s, z8.s, z3.s[1]\n"
+ "fmla z22.s, z8.s, z3.s[2]\n"
+ "fmla z23.s, z8.s, z3.s[3]\n"
+ "fmla z24.s, z8.s, z0.s[0]\n"
+ "fmla z25.s, z8.s, z0.s[1]\n"
+ "fmla z26.s, z8.s, z0.s[2]\n"
+ "fmla z27.s, z8.s, z0.s[3]\n"
+ "fmla z28.s, z8.s, z4.s[0]\n"
+ "fmla z29.s, z8.s, z4.s[1]\n"
+ "fmla z30.s, z8.s, z4.s[2]\n"
+ "fmla z31.s, z8.s, z4.s[3]\n"
+ "fmla z16.s, z13.s, z6.s[0]\n"
+ "fmla z17.s, z13.s, z6.s[1]\n"
+ "fmla z18.s, z13.s, z6.s[2]\n"
+ "fmla z19.s, z13.s, z6.s[3]\n"
+ "fmla z20.s, z13.s, z7.s[0]\n"
+ "fmla z21.s, z13.s, z7.s[1]\n"
+ "fmla z22.s, z13.s, z7.s[2]\n"
+ "fmla z23.s, z13.s, z7.s[3]\n"
+ "fmla z24.s, z13.s, z1.s[0]\n"
+ "fmla z25.s, z13.s, z1.s[1]\n"
+ "fmin z16.s, p1/M, z16.s, z9.s\n"
+ "fmin z17.s, p1/M, z17.s, z9.s\n"
+ "fmla z26.s, z13.s, z1.s[2]\n"
+ "fmla z27.s, z13.s, z1.s[3]\n"
+ "fmin z18.s, p1/M, z18.s, z9.s\n"
+ "fmin z19.s, p1/M, z19.s, z9.s\n"
+ "fmla z28.s, z13.s, z2.s[0]\n"
+ "fmla z29.s, z13.s, z2.s[1]\n"
+ "fmin z20.s, p1/M, z20.s, z9.s\n"
+ "fmin z21.s, p1/M, z21.s, z9.s\n"
+ "fmla z30.s, z13.s, z2.s[2]\n"
+ "fmla z31.s, z13.s, z2.s[3]\n"
+ "fmin z22.s, p1/M, z22.s, z9.s\n"
+ "fmin z23.s, p1/M, z23.s, z9.s\n"
+ "fmax z16.s, p1/M, z16.s, z10.s\n"
+ "fmax z17.s, p1/M, z17.s, z10.s\n"
+ "fmax z18.s, p1/M, z18.s, z10.s\n"
+ "fmax z19.s, p1/M, z19.s, z10.s\n"
+ "fmax z20.s, p1/M, z20.s, z10.s\n"
+ "fmax z21.s, p1/M, z21.s, z10.s\n"
+ "fmax z22.s, p1/M, z22.s, z10.s\n"
+ "fmax z23.s, p1/M, z23.s, z10.s\n"
"st1w { z16.s }, p0, [x27, x9, LSL #2]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "fmin z26.s, p1/M, z26.s, z14.s\n"
- "fmin z27.s, p1/M, z27.s, z14.s\n"
+ "fmin z24.s, p1/M, z24.s, z9.s\n"
+ "fmin z25.s, p1/M, z25.s, z9.s\n"
"st1w { z17.s }, p0, [x26, x9, LSL #2]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "fmin z28.s, p1/M, z28.s, z14.s\n"
- "fmin z29.s, p1/M, z29.s, z14.s\n"
+ "fmin z26.s, p1/M, z26.s, z9.s\n"
+ "fmin z27.s, p1/M, z27.s, z9.s\n"
"st1w { z18.s }, p0, [x25, x9, LSL #2]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "fmin z30.s, p1/M, z30.s, z14.s\n"
- "fmin z31.s, p1/M, z31.s, z14.s\n"
+ "fmin z28.s, p1/M, z28.s, z9.s\n"
+ "fmin z29.s, p1/M, z29.s, z9.s\n"
"st1w { z19.s }, p0, [x24, x9, LSL #2]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
+ "fmin z30.s, p1/M, z30.s, z9.s\n"
+ "fmin z31.s, p1/M, z31.s, z9.s\n"
"st1w { z20.s }, p0, [x23, x9, LSL #2]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "fmax z24.s, p1/M, z24.s, z15.s\n"
- "fmax z25.s, p1/M, z25.s, z15.s\n"
"st1w { z21.s }, p0, [x22, x9, LSL #2]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "fmax z26.s, p1/M, z26.s, z15.s\n"
- "fmax z27.s, p1/M, z27.s, z15.s\n"
+ "fmax z24.s, p1/M, z24.s, z10.s\n"
+ "fmax z25.s, p1/M, z25.s, z10.s\n"
"st1w { z22.s }, p0, [x21, x9, LSL #2]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "fmax z28.s, p1/M, z28.s, z15.s\n"
- "fmax z29.s, p1/M, z29.s, z15.s\n"
+ "fmax z26.s, p1/M, z26.s, z10.s\n"
+ "fmax z27.s, p1/M, z27.s, z10.s\n"
"st1w { z23.s }, p0, [x20, x9, LSL #2]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "fmax z30.s, p1/M, z30.s, z15.s\n"
- "fmax z31.s, p1/M, z31.s, z15.s\n"
+ "fmax z28.s, p1/M, z28.s, z10.s\n"
+ "fmax z29.s, p1/M, z29.s, z10.s\n"
+ "fmax z30.s, p1/M, z30.s, z10.s\n"
+ "fmax z31.s, p1/M, z31.s, z10.s\n"
"st1w { z24.s }, p0, [x27, x9, LSL #2]\n"
"st1w { z25.s }, p0, [x26, x9, LSL #2]\n"
"st1w { z26.s }, p0, [x25, x9, LSL #2]\n"
@@ -358,81 +358,81 @@ void sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"st1w { z31.s }, p0, [x20, x9, LSL #2]\n"
"b 7f\n"
"6:" // Output channel loop: Single kernel point
- "fmla z16.s, z8.s, z6.s[0]\n"
- "fmla z17.s, z8.s, z6.s[1]\n"
- "fmin z16.s, p1/M, z16.s, z14.s\n"
- "fmin z17.s, p1/M, z17.s, z14.s\n"
- "fmla z18.s, z8.s, z6.s[2]\n"
- "fmla z19.s, z8.s, z6.s[3]\n"
- "fmin z18.s, p1/M, z18.s, z14.s\n"
- "fmin z19.s, p1/M, z19.s, z14.s\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "fmla z21.s, z8.s, z5.s[1]\n"
- "fmin z20.s, p1/M, z20.s, z14.s\n"
- "fmin z21.s, p1/M, z21.s, z14.s\n"
- "fmla z22.s, z8.s, z5.s[2]\n"
- "fmla z23.s, z8.s, z5.s[3]\n"
- "fmin z22.s, p1/M, z22.s, z14.s\n"
- "fmin z23.s, p1/M, z23.s, z14.s\n"
- "fmla z24.s, z8.s, z1.s[0]\n"
- "fmla z25.s, z8.s, z1.s[1]\n"
+ "fmla z16.s, z13.s, z7.s[0]\n"
+ "fmla z17.s, z13.s, z7.s[1]\n"
"ldr x27, [%x[outptrs], #0x0]\n"
"ldr x26, [%x[outptrs], #0x8]\n"
- "fmla z26.s, z8.s, z1.s[2]\n"
- "fmla z27.s, z8.s, z1.s[3]\n"
+ "fmla z18.s, z13.s, z7.s[2]\n"
+ "fmla z19.s, z13.s, z7.s[3]\n"
"ldr x25, [%x[outptrs], #0x10]\n"
"ldr x24, [%x[outptrs], #0x18]\n"
- "fmla z28.s, z8.s, z2.s[0]\n"
- "fmla z29.s, z8.s, z2.s[1]\n"
+ "fmla z20.s, z13.s, z6.s[0]\n"
+ "fmla z21.s, z13.s, z6.s[1]\n"
"ldr x23, [%x[outptrs], #0x20]\n"
"ldr x22, [%x[outptrs], #0x28]\n"
- "fmla z30.s, z8.s, z2.s[2]\n"
- "fmla z31.s, z8.s, z2.s[3]\n"
+ "fmla z22.s, z13.s, z6.s[2]\n"
+ "fmla z23.s, z13.s, z6.s[3]\n"
"ldr x21, [%x[outptrs], #0x30]\n"
"ldr x20, [%x[outptrs], #0x38]\n"
- "fmax z16.s, p1/M, z16.s, z15.s\n"
- "fmax z17.s, p1/M, z17.s, z15.s\n"
+ "fmla z24.s, z13.s, z1.s[0]\n"
+ "fmla z25.s, z13.s, z1.s[1]\n"
+ "fmin z16.s, p1/M, z16.s, z9.s\n"
+ "fmin z17.s, p1/M, z17.s, z9.s\n"
+ "fmla z26.s, z13.s, z1.s[2]\n"
+ "fmla z27.s, z13.s, z1.s[3]\n"
+ "fmin z18.s, p1/M, z18.s, z9.s\n"
+ "fmin z19.s, p1/M, z19.s, z9.s\n"
+ "fmla z28.s, z13.s, z2.s[0]\n"
+ "fmla z29.s, z13.s, z2.s[1]\n"
+ "fmin z20.s, p1/M, z20.s, z9.s\n"
+ "fmin z21.s, p1/M, z21.s, z9.s\n"
+ "fmla z30.s, z13.s, z2.s[2]\n"
+ "fmla z31.s, z13.s, z2.s[3]\n"
+ "fmin z22.s, p1/M, z22.s, z9.s\n"
+ "fmin z23.s, p1/M, z23.s, z9.s\n"
+ "fmax z16.s, p1/M, z16.s, z10.s\n"
+ "fmax z17.s, p1/M, z17.s, z10.s\n"
+ "fmax z18.s, p1/M, z18.s, z10.s\n"
+ "fmax z19.s, p1/M, z19.s, z10.s\n"
+ "fmax z20.s, p1/M, z20.s, z10.s\n"
+ "fmax z21.s, p1/M, z21.s, z10.s\n"
+ "fmax z22.s, p1/M, z22.s, z10.s\n"
+ "fmax z23.s, p1/M, z23.s, z10.s\n"
"st1w { z16.s }, p0, [x27, x9, LSL #2]\n"
"ldr x27, [%x[outptrs], #0x40]\n"
- "fmax z18.s, p1/M, z18.s, z15.s\n"
- "fmax z19.s, p1/M, z19.s, z15.s\n"
+ "fmin z24.s, p1/M, z24.s, z9.s\n"
+ "fmin z25.s, p1/M, z25.s, z9.s\n"
"st1w { z17.s }, p0, [x26, x9, LSL #2]\n"
"ldr x26, [%x[outptrs], #0x48]\n"
- "fmax z20.s, p1/M, z20.s, z15.s\n"
- "fmax z21.s, p1/M, z21.s, z15.s\n"
+ "fmin z26.s, p1/M, z26.s, z9.s\n"
+ "fmin z27.s, p1/M, z27.s, z9.s\n"
"st1w { z18.s }, p0, [x25, x9, LSL #2]\n"
"ldr x25, [%x[outptrs], #0x50]\n"
- "fmax z22.s, p1/M, z22.s, z15.s\n"
- "fmax z23.s, p1/M, z23.s, z15.s\n"
+ "fmin z28.s, p1/M, z28.s, z9.s\n"
+ "fmin z29.s, p1/M, z29.s, z9.s\n"
"st1w { z19.s }, p0, [x24, x9, LSL #2]\n"
"ldr x24, [%x[outptrs], #0x58]\n"
- "fmin z24.s, p1/M, z24.s, z14.s\n"
- "fmin z25.s, p1/M, z25.s, z14.s\n"
+ "fmin z30.s, p1/M, z30.s, z9.s\n"
+ "fmin z31.s, p1/M, z31.s, z9.s\n"
"st1w { z20.s }, p0, [x23, x9, LSL #2]\n"
"ldr x23, [%x[outptrs], #0x60]\n"
- "fmin z26.s, p1/M, z26.s, z14.s\n"
- "fmin z27.s, p1/M, z27.s, z14.s\n"
"st1w { z21.s }, p0, [x22, x9, LSL #2]\n"
"ldr x22, [%x[outptrs], #0x68]\n"
- "fmin z28.s, p1/M, z28.s, z14.s\n"
- "fmin z29.s, p1/M, z29.s, z14.s\n"
+ "fmax z24.s, p1/M, z24.s, z10.s\n"
+ "fmax z25.s, p1/M, z25.s, z10.s\n"
"st1w { z22.s }, p0, [x21, x9, LSL #2]\n"
"ldr x21, [%x[outptrs], #0x70]\n"
- "fmin z30.s, p1/M, z30.s, z14.s\n"
- "fmin z31.s, p1/M, z31.s, z14.s\n"
+ "fmax z26.s, p1/M, z26.s, z10.s\n"
+ "fmax z27.s, p1/M, z27.s, z10.s\n"
"st1w { z23.s }, p0, [x20, x9, LSL #2]\n"
"ldr x20, [%x[outptrs], #0x78]\n"
- "fmax z24.s, p1/M, z24.s, z15.s\n"
- "fmax z25.s, p1/M, z25.s, z15.s\n"
+ "fmax z28.s, p1/M, z28.s, z10.s\n"
+ "fmax z29.s, p1/M, z29.s, z10.s\n"
+ "fmax z30.s, p1/M, z30.s, z10.s\n"
+ "fmax z31.s, p1/M, z31.s, z10.s\n"
"st1w { z24.s }, p0, [x27, x9, LSL #2]\n"
- "fmax z26.s, p1/M, z26.s, z15.s\n"
- "fmax z27.s, p1/M, z27.s, z15.s\n"
"st1w { z25.s }, p0, [x26, x9, LSL #2]\n"
- "fmax z28.s, p1/M, z28.s, z15.s\n"
- "fmax z29.s, p1/M, z29.s, z15.s\n"
"st1w { z26.s }, p0, [x25, x9, LSL #2]\n"
- "fmax z30.s, p1/M, z30.s, z15.s\n"
- "fmax z31.s, p1/M, z31.s, z15.s\n"
"st1w { z27.s }, p0, [x24, x9, LSL #2]\n"
"st1w { z28.s }, p0, [x23, x9, LSL #2]\n"
"st1w { z29.s }, p0, [x22, x9, LSL #2]\n"
@@ -444,7 +444,7 @@ void sve_fp32_packed_to_nhwc_generic_with_multiplier_output2x8_mla_depthfirst_im
"b.any 1b\n"
: [weights] "+&r" (weights)
: [bias] "r" (bias), [inptrs] "r" (inptrs), [kernel_points] "r" ((uint64_t) kernel_points), [minmax_vals] "r" (minmax_vals), [n_output_channels] "r" ((uint64_t) n_output_channels), [outptrs] "r" (outptrs)
- : "cc", "memory", "p0", "p1", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z10", "z11", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z13", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index 0cee302c56..4149e0c117 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,456 +34,456 @@ void sve_s8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const unsigned int n_chan
{
__asm__ __volatile__(
"mov x14, #0x0\n"
- "whilelt p0.b, x14, %x[n_channels]\n"
"ldp x27, x26, [%x[inptrs], #0x0]\n"
"ldp x25, x24, [%x[inptrs], #0x10]\n"
+ "mov x28, #0x1\n"
"ldp x23, x22, [%x[inptrs], #0x20]\n"
- "ldp x13, x21, [%x[inptrs], #0x30]\n"
- "mov x20, #0x1\n"
+ "ldp x21, x20, [%x[inptrs], #0x30]\n"
"ptrue p2.b\n"
+ "mov x13, #0x0\n"
"ldp x12, x11, [%x[outptrs], #0x0]\n"
"ldp x10, x9, [%x[outptrs], #0x10]\n"
- "orr x20, x20, #0x100\n"
- "orr x20, x20, #0x10000\n"
- "ld1b { z15.b }, p0/Z, [x27, x14]\n"
- "ld1b { z21.b }, p0/Z, [x26, x14]\n"
- "dup z25.s, w20\n"
- "mov x28, #0x0\n"
+ "whilelt p0.b, x14, %x[n_channels]\n"
+ "orr x28, x28, #0x100\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "ld1rw { z21.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1b { z12.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z24.b }, p0/Z, [x26, x14]\n"
+ "orr x28, x28, #0x10000\n"
"ldp x27, x26, [%x[inptrs], #0x40]\n"
- "ld1b { z31.b }, p0/Z, [x25, x14]\n"
- "zip2 z16.b, z15.b, z31.b\n"
- "zip1 z15.b, z15.b, z31.b\n"
- "ld1b { z29.b }, p0/Z, [x24, x14]\n"
+ "ld1b { z26.b }, p0/Z, [x25, x14]\n"
+ "ld1b { z14.b }, p0/Z, [x24, x14]\n"
"ldp x25, x24, [%x[inptrs], #0x50]\n"
- "zip1 z30.b, z21.b, z29.b\n"
- "zip2 z29.b, z21.b, z29.b\n"
- "ld1b { z9.b }, p0/Z, [x23, x14]\n"
- "ld1b { z20.b }, p0/Z, [x22, x14]\n"
- "zip2 z13.b, z15.b, z30.b\n"
- "zip1 z15.b, z15.b, z30.b\n"
+ "ld1b { z5.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z3.b }, p0/Z, [x22, x14]\n"
+ "dup z9.s, w28\n"
"ldp x23, x22, [%x[inptrs], #0x60]\n"
- "ld1b { z5.b }, p0/Z, [x13, x14]\n"
- "zip1 z14.b, z16.b, z29.b\n"
- "zip2 z29.b, z16.b, z29.b\n"
- "ld1b { z17.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z19.b }, p0/Z, [x21, x14]\n"
+ "zip2 z18.b, z12.b, z26.b\n"
+ "zip1 z12.b, z12.b, z26.b\n"
+ "ld1b { z30.b }, p0/Z, [x20, x14]\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "zip2 z31.b, z9.b, z5.b\n"
- "zip1 z9.b, z9.b, z5.b\n"
- "ld1b { z18.b }, p0/Z, [x27, x14]\n"
- "ld1b { z28.b }, p0/Z, [x26, x14]\n"
- "zip1 z21.b, z20.b, z17.b\n"
- "zip2 z17.b, z20.b, z17.b\n"
- "ld1b { z6.b }, p0/Z, [x25, x14]\n"
- "ld1b { z4.b }, p0/Z, [x24, x14]\n"
- "zip2 z23.b, z18.b, z6.b\n"
- "zip1 z18.b, z18.b, z6.b\n"
- "ld1b { z2.b }, p0/Z, [x23, x14]\n"
- "ld1b { z19.b }, p0/Z, [x22, x14]\n"
- "zip1 z24.b, z28.b, z4.b\n"
- "zip2 z4.b, z28.b, z4.b\n"
- "ld1b { z16.b }, p0/Z, [x21, x14]\n"
- "ld1b { z5.b }, p0/Z, [x20, x14]\n"
- "zip2 z22.b, z2.b, z16.b\n"
- "zip1 z2.b, z2.b, z16.b\n"
- "zip1 z0.b, z19.b, z5.b\n"
- "zip2 z5.b, z19.b, z5.b\n"
- "ld1w { z10.s }, p2/Z, [%x[params]]\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "zip2 z19.b, z9.b, z21.b\n"
- "zip1 z9.b, z9.b, z21.b\n"
- "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ldp x27, x26, [%x[inptrs], #0x0]\n"
- "zip1 z11.b, z31.b, z17.b\n"
- "zip2 z17.b, z31.b, z17.b\n"
- "ldp x25, x23, [%x[inptrs], #0x10]\n"
+ "zip1 z17.b, z24.b, z14.b\n"
+ "zip2 z14.b, z24.b, z14.b\n"
+ "ld1b { z29.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z25.b }, p0/Z, [x26, x14]\n"
+ "ld1b { z16.b }, p0/Z, [x25, x14]\n"
+ "ld1b { z7.b }, p0/Z, [x24, x14]\n"
+ "zip2 z22.b, z5.b, z19.b\n"
+ "zip1 z5.b, z5.b, z19.b\n"
+ "ld1b { z6.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z28.b }, p0/Z, [x22, x14]\n"
+ "zip2 z2.b, z12.b, z17.b\n"
+ "zip1 z12.b, z12.b, z17.b\n"
+ "ld1b { z23.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z4.b }, p0/Z, [x20, x14]\n"
+ "zip1 z8.b, z18.b, z14.b\n"
+ "zip2 z14.b, z18.b, z14.b\n"
+ "zip1 z26.b, z3.b, z30.b\n"
+ "zip2 z30.b, z3.b, z30.b\n"
+ "ld1w { z0.s }, p2/Z, [%x[params]]\n"
+ "ldp x28, x27, [%x[inptrs], #0x0]\n"
+ "zip2 z24.b, z29.b, z16.b\n"
+ "zip1 z29.b, z29.b, z16.b\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
"ldp x24, x22, [%x[inptrs], #0x20]\n"
- "zip2 z12.b, z18.b, z24.b\n"
- "zip1 z18.b, z18.b, z24.b\n"
+ "zip1 z16.b, z25.b, z7.b\n"
+ "zip2 z7.b, z25.b, z7.b\n"
"ldp x21, x20, [%x[inptrs], #0x30]\n"
- "zip1 z20.b, z23.b, z4.b\n"
- "zip2 z4.b, z23.b, z4.b\n"
- "ld1b { z26.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "zip2 z24.b, z2.b, z0.b\n"
- "zip1 z2.b, z2.b, z0.b\n"
- "ld1b { z3.b }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z1.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "zip1 z0.b, z22.b, z5.b\n"
- "zip2 z5.b, z22.b, z5.b\n"
+ "ld1b { z17.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "zip2 z25.b, z6.b, z23.b\n"
+ "zip1 z6.b, z6.b, z23.b\n"
+ "ld1b { z20.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "zip1 z19.b, z28.b, z4.b\n"
+ "zip2 z4.b, z28.b, z4.b\n"
"addvl %x[params], %x[params], #4\n"
- "mov z22.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z21.d, z10.d\n"
+ "zip2 z23.b, z5.b, z26.b\n"
+ "zip1 z5.b, z5.b, z26.b\n"
+ "zip1 z3.b, z22.b, z30.b\n"
+ "zip2 z30.b, z22.b, z30.b\n"
+ "zip2 z11.b, z29.b, z16.b\n"
+ "zip1 z29.b, z29.b, z16.b\n"
+ "zip1 z16.b, z24.b, z7.b\n"
+ "zip2 z7.b, z24.b, z7.b\n"
+ "zip2 z1.b, z6.b, z19.b\n"
+ "zip1 z6.b, z6.b, z19.b\n"
+ "zip1 z27.b, z25.b, z4.b\n"
+ "zip2 z4.b, z25.b, z4.b\n"
+ "mov z26.d, z0.d\n"
+ "mov z25.d, z0.d\n"
+ "mov z28.d, z0.d\n"
"1:" // Loop
- "mov z30.s, #0x0\n"
- "sdot z30.s, z25.b, z9.b\n"
- "sdot z10.s, z26.b, z15.b\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "sdot z30.s, z25.b, z18.b\n"
- "sdot z31.s, z26.b, z9.b\n"
- "mov z27.s, #0x0\n"
- "incw x14, ALL, MUL #4\n"
- "sdot z10.s, z3.b, z9.b\n"
- "ext z9.b, z9.b, z9.b, #0x1\n"
- "movprfx z28, z30\n sdot z28.s, z25.b, z2.b\n"
- "sdot z30.s, z25.b, z15.b\n"
- "ext z15.b, z15.b, z15.b, #0x1\n"
- "sdot z27.s, z25.b, z9.b\n"
- "sdot z31.s, z3.b, z18.b\n"
- "sdot z10.s, z1.b, z18.b\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "sdot z22.s, z26.b, z15.b\n"
- "sdot z21.s, z26.b, z9.b\n"
- "sdot z27.s, z25.b, z18.b\n"
- "sdot z31.s, z1.b, z2.b\n"
- "ext z2.b, z2.b, z2.b, #0x1\n"
- "sdot z22.s, z3.b, z9.b\n"
- "sdot z21.s, z3.b, z18.b\n"
- "ld1w { z3.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "mls z10.s, p2/M, z30.s, z8.s\n"
- "movprfx z26, z27\n sdot z26.s, z25.b, z2.b\n"
- "mov z9.s, #0x0\n"
- "sdot z27.s, z25.b, z15.b\n"
- "ld1w { z23.s }, p2/Z, [%x[params]]\n"
- "sdot z22.s, z1.b, z18.b\n"
- ".inst 0x04b7754a // sqrdmulh z10.s, z10.s, z23.s\n"
- "sdot z21.s, z1.b, z2.b\n"
- "mls z22.s, p2/M, z27.s, z8.s\n"
- "and z18.d, z10.d, z3.d\n"
- "mls z31.s, p2/M, z28.s, z8.s\n"
- "mls z21.s, p2/M, z26.s, z8.s\n"
- "asr z18.s, z18.s, #0x1f\n"
- ".inst 0x04b776d6 // sqrdmulh z22.s, z22.s, z23.s\n"
- ".inst 0x04b777ff // sqrdmulh z31.s, z31.s, z23.s\n"
- "sdot z9.s, z25.b, z19.b\n"
- ".inst 0x04b776b5 // sqrdmulh z21.s, z21.s, z23.s\n"
- "sqadd z10.s, z10.s, z18.s\n"
- ".inst 0x4482886a // srshl z10.s, p2/M, z10.s, z3.s\n"
- "sdot z9.s, z25.b, z12.b\n"
- "and z28.d, z22.d, z3.d\n"
- "and z23.d, z31.d, z3.d\n"
- "movprfx z27, z9\n sdot z27.s, z25.b, z24.b\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "and z18.d, z21.d, z3.d\n"
- "asr z28.s, z28.s, #0x1f\n"
- "sdot z9.s, z25.b, z13.b\n"
- "asr z23.s, z23.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
- "sqadd z22.s, z22.s, z28.s\n"
- "sqadd z31.s, z31.s, z23.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x4482887f // srshl z31.s, p2/M, z31.s, z3.s\n"
- "sqadd z21.s, z21.s, z18.s\n"
- "add z10.s, z10.s, z16.s\n"
- ".inst 0x44828875 // srshl z21.s, p2/M, z21.s, z3.s\n"
- "smax z10.s, p2/M, z10.s, z7.s\n"
- "add z22.s, z22.s, z16.s\n"
- "add z31.s, z31.s, z16.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smax z22.s, p2/M, z22.s, z7.s\n"
- "add z21.s, z21.s, z16.s\n"
- "smax z31.s, p2/M, z31.s, z7.s\n"
- "smax z21.s, p2/M, z21.s, z7.s\n"
- "st1b { z10.s }, p0, [x12, x28]\n"
- "ld1w { z28.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z1.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "st1b { z22.s }, p0, [x11, x28]\n"
- "mov z26.d, z28.d\n"
- "ld1b { z15.b }, p2/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z31.s }, p0, [x10, x28]\n"
- "mov z31.d, z28.d\n"
- "sdot z31.s, z1.b, z19.b\n"
- "ld1b { z23.b }, p2/Z, [%x[params], #5, MUL VL]\n"
- "st1b { z21.s }, p0, [x9, x28]\n"
- "mov z22.d, z28.d\n"
- "sdot z28.s, z1.b, z13.b\n"
- "sdot z28.s, z15.b, z19.b\n"
- "ext z13.b, z13.b, z13.b, #0x1\n"
- "ext z19.b, z19.b, z19.b, #0x1\n"
- "sdot z26.s, z1.b, z13.b\n"
- "ld1w { z21.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "mov z24.s, #0x0\n"
+ "sdot z0.s, z17.b, z12.b\n"
+ "sdot z25.s, z17.b, z5.b\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
"mov z18.s, #0x0\n"
- "sdot z22.s, z1.b, z19.b\n"
- "sdot z18.s, z25.b, z19.b\n"
- "incw x28\n"
- "sdot z31.s, z15.b, z12.b\n"
- "sdot z28.s, z23.b, z12.b\n"
+ "incw x14, ALL, MUL #4\n"
+ "sdot z24.s, z9.b, z5.b\n"
+ "sdot z0.s, z20.b, z5.b\n"
+ "ext z5.b, z5.b, z5.b, #0x1\n"
+ "sdot z25.s, z20.b, z29.b\n"
+ "sdot z24.s, z9.b, z29.b\n"
+ "sdot z18.s, z9.b, z5.b\n"
+ "sdot z0.s, z10.b, z29.b\n"
+ "ext z29.b, z29.b, z29.b, #0x1\n"
+ "sdot z28.s, z17.b, z5.b\n"
+ "movprfx z19, z24\n sdot z19.s, z9.b, z6.b\n"
+ "sdot z24.s, z9.b, z12.b\n"
"ext z12.b, z12.b, z12.b, #0x1\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "sdot z26.s, z15.b, z19.b\n"
- "sdot z22.s, z15.b, z12.b\n"
+ "sdot z25.s, z10.b, z6.b\n"
+ "ext z6.b, z6.b, z6.b, #0x1\n"
+ "sdot z18.s, z9.b, z29.b\n"
+ "sdot z26.s, z17.b, z12.b\n"
+ "sdot z28.s, z20.b, z29.b\n"
+ "mls z0.s, p2/M, z24.s, z13.s\n"
+ "mov z22.s, #0x0\n"
+ "mls z25.s, p2/M, z19.s, z13.s\n"
+ "sdot z22.s, z9.b, z23.b\n"
+ "sdot z26.s, z20.b, z5.b\n"
+ "ld1w { z20.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "movprfx z5, z18\n sdot z5.s, z9.b, z6.b\n"
+ "sdot z18.s, z9.b, z12.b\n"
+ "ld1w { z19.s }, p2/Z, [%x[params]]\n"
+ "sdot z28.s, z10.b, z6.b\n"
+ "sdot z22.s, z9.b, z11.b\n"
+ "sdot z26.s, z10.b, z29.b\n"
+ ".inst 0x04b37400 // sqrdmulh z0.s, z0.s, z19.s\n"
+ ".inst 0x04b37739 // sqrdmulh z25.s, z25.s, z19.s\n"
+ "mls z28.s, p2/M, z5.s, z13.s\n"
+ "and z5.d, z0.d, z20.d\n"
+ "mls z26.s, p2/M, z18.s, z13.s\n"
+ "mov z18.s, #0x0\n"
+ "and z12.d, z25.d, z20.d\n"
+ "movprfx z10, z22\n sdot z10.s, z9.b, z1.b\n"
+ "sdot z22.s, z9.b, z2.b\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ ".inst 0x04b3779c // sqrdmulh z28.s, z28.s, z19.s\n"
+ ".inst 0x04b3775a // sqrdmulh z26.s, z26.s, z19.s\n"
+ "ld1w { z24.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "asr z12.s, z12.s, #0x1f\n"
+ "sqadd z0.s, z0.s, z5.s\n"
+ "and z19.d, z26.d, z20.d\n"
+ "and z6.d, z28.d, z20.d\n"
+ ".inst 0x44828a80 // srshl z0.s, p2/M, z0.s, z20.s\n"
+ "sqadd z25.s, z25.s, z12.s\n"
+ "ld1b { z5.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "add z0.s, z0.s, z21.s\n"
+ "sqadd z26.s, z26.s, z19.s\n"
+ "ld1b { z19.b }, p2/Z, [%x[params], #4, MUL VL]\n"
+ ".inst 0x44828a99 // srshl z25.s, p2/M, z25.s, z20.s\n"
+ "sqadd z28.s, z28.s, z6.s\n"
+ "ld1b { z17.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "smax z0.s, p2/M, z0.s, z31.s\n"
+ ".inst 0x44828a9a // srshl z26.s, p2/M, z26.s, z20.s\n"
+ ".inst 0x44828a9c // srshl z28.s, p2/M, z28.s, z20.s\n"
+ "ld1w { z12.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z26.s, z26.s, z21.s\n"
+ "smin z0.s, p2/M, z0.s, z15.s\n"
+ "add z28.s, z28.s, z21.s\n"
+ "smax z26.s, p2/M, z26.s, z31.s\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "smax z28.s, p2/M, z28.s, z31.s\n"
+ "st1b { z0.s }, p0, [x12, x13]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "sdot z18.s, z25.b, z12.b\n"
- "sdot z31.s, z23.b, z24.b\n"
- "ext z24.b, z24.b, z24.b, #0x1\n"
- "mls z28.s, p2/M, z9.s, z8.s\n"
- "sdot z26.s, z23.b, z12.b\n"
- ".inst 0x04be779c // sqrdmulh z28.s, z28.s, z30.s\n"
- "sdot z22.s, z23.b, z24.b\n"
- "movprfx z12, z18\n sdot z12.s, z25.b, z24.b\n"
- "and z2.d, z28.d, z21.d\n"
- "sdot z18.s, z25.b, z13.b\n"
- "mls z26.s, p2/M, z18.s, z8.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- "mls z31.s, p2/M, z27.s, z8.s\n"
- "mls z22.s, p2/M, z12.s, z8.s\n"
- ".inst 0x04be775a // sqrdmulh z26.s, z26.s, z30.s\n"
- ".inst 0x04be77ff // sqrdmulh z31.s, z31.s, z30.s\n"
- ".inst 0x04be76d6 // sqrdmulh z22.s, z22.s, z30.s\n"
- "ld1w { z1.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
- "sqadd z28.s, z28.s, z2.s\n"
- "and z24.d, z26.d, z21.d\n"
- ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
- "and z23.d, z31.d, z21.d\n"
- "and z18.d, z22.d, z21.d\n"
- "asr z24.s, z24.s, #0x1f\n"
- "asr z23.s, z23.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
- "sqadd z26.s, z26.s, z24.s\n"
- ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
- "ld1b { z30.b }, p2/Z, [%x[params], #-6, MUL VL]\n"
- "sqadd z31.s, z31.s, z23.s\n"
- "sqadd z22.s, z22.s, z18.s\n"
- ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
- ".inst 0x44828ab6 // srshl z22.s, p2/M, z22.s, z21.s\n"
- "add z28.s, z28.s, z16.s\n"
- "smax z28.s, p2/M, z28.s, z7.s\n"
- "add z26.s, z26.s, z16.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "add z31.s, z31.s, z16.s\n"
- "add z22.s, z22.s, z16.s\n"
- "smax z26.s, p2/M, z26.s, z7.s\n"
- "smax z31.s, p2/M, z31.s, z7.s\n"
- "mov z24.s, #0x0\n"
- "sdot z24.s, z25.b, z11.b\n"
- "smax z22.s, p2/M, z22.s, z7.s\n"
- "st1b { z28.s }, p0, [x12, x28]\n"
- "ld1w { z23.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
- "ld1b { z19.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "st1b { z26.s }, p0, [x11, x28]\n"
- "mov z28.d, z23.d\n"
- "sdot z24.s, z25.b, z20.b\n"
- "st1b { z31.s }, p0, [x10, x28]\n"
- "mov z27.d, z23.d\n"
- "sdot z27.s, z19.b, z11.b\n"
- "movprfx z13, z24\n sdot z13.s, z25.b, z0.b\n"
- "st1b { z22.s }, p0, [x9, x28]\n"
- "mov z26.d, z23.d\n"
- "sdot z23.s, z19.b, z14.b\n"
- "sdot z23.s, z30.b, z11.b\n"
- "sdot z24.s, z25.b, z14.b\n"
- "ext z14.b, z14.b, z14.b, #0x1\n"
- "ld1b { z21.b }, p2/Z, [%x[params], #-5, MUL VL]\n"
- "sdot z28.s, z19.b, z14.b\n"
+ "smin z26.s, p2/M, z26.s, z15.s\n"
+ "smin z25.s, p2/M, z25.s, z15.s\n"
+ "smin z28.s, p2/M, z28.s, z15.s\n"
+ "st1b { z26.s }, p0, [x11, x13]\n"
+ "mov z6.d, z29.d\n"
+ "st1b { z25.s }, p0, [x10, x13]\n"
+ "mov z25.d, z29.d\n"
+ "st1b { z28.s }, p0, [x9, x13]\n"
+ "mov z0.d, z29.d\n"
+ "sdot z29.s, z17.b, z2.b\n"
+ "incw x13\n"
+ "sdot z25.s, z17.b, z23.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
+ "sdot z29.s, z19.b, z23.b\n"
+ "ext z23.b, z23.b, z23.b, #0x1\n"
+ "sdot z6.s, z17.b, z2.b\n"
+ "sdot z0.s, z17.b, z23.b\n"
+ "sdot z18.s, z9.b, z23.b\n"
+ "sdot z25.s, z19.b, z11.b\n"
+ "sdot z29.s, z5.b, z11.b\n"
"ext z11.b, z11.b, z11.b, #0x1\n"
- "mov z12.s, #0x0\n"
- "sdot z26.s, z19.b, z11.b\n"
- "ld1w { z22.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
- "sdot z12.s, z25.b, z11.b\n"
- "sdot z27.s, z30.b, z20.b\n"
- "incw x28\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "sdot z23.s, z21.b, z20.b\n"
- "ext z20.b, z20.b, z20.b, #0x1\n"
- "sdot z28.s, z30.b, z11.b\n"
- "sdot z26.s, z30.b, z20.b\n"
- "sdot z12.s, z25.b, z20.b\n"
- "sdot z27.s, z21.b, z0.b\n"
- "ext z0.b, z0.b, z0.b, #0x1\n"
- "mls z23.s, p2/M, z24.s, z8.s\n"
- "sdot z28.s, z21.b, z20.b\n"
- "sdot z26.s, z21.b, z0.b\n"
- ".inst 0x04a176f7 // sqrdmulh z23.s, z23.s, z1.s\n"
- "movprfx z19, z12\n sdot z19.s, z25.b, z0.b\n"
- "sdot z12.s, z25.b, z14.b\n"
- "and z18.d, z23.d, z22.d\n"
- "mls z28.s, p2/M, z12.s, z8.s\n"
- "mls z27.s, p2/M, z13.s, z8.s\n"
+ "sdot z6.s, z19.b, z23.b\n"
+ "sdot z0.s, z19.b, z11.b\n"
+ "sdot z18.s, z9.b, z11.b\n"
+ "sdot z25.s, z5.b, z1.b\n"
+ "ext z1.b, z1.b, z1.b, #0x1\n"
+ "mls z29.s, p2/M, z22.s, z13.s\n"
+ "mov z28.s, #0x0\n"
+ "sdot z6.s, z5.b, z11.b\n"
+ "sdot z0.s, z5.b, z1.b\n"
+ "movprfx z11, z18\n sdot z11.s, z9.b, z1.b\n"
+ "sdot z18.s, z9.b, z2.b\n"
+ "sdot z28.s, z9.b, z3.b\n"
+ ".inst 0x04b877bd // sqrdmulh z29.s, z29.s, z24.s\n"
+ "mls z25.s, p2/M, z10.s, z13.s\n"
+ "mls z6.s, p2/M, z18.s, z13.s\n"
+ "mov z1.s, #0x0\n"
+ "mls z0.s, p2/M, z11.s, z13.s\n"
+ "and z11.d, z29.d, z12.d\n"
+ ".inst 0x04b87739 // sqrdmulh z25.s, z25.s, z24.s\n"
+ "sdot z28.s, z9.b, z16.b\n"
+ "asr z11.s, z11.s, #0x1f\n"
+ ".inst 0x04b874c6 // sqrdmulh z6.s, z6.s, z24.s\n"
+ ".inst 0x04b87400 // sqrdmulh z0.s, z0.s, z24.s\n"
+ "ld1w { z5.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
+ "and z22.d, z25.d, z12.d\n"
+ "sqadd z29.s, z29.s, z11.s\n"
+ "and z18.d, z6.d, z12.d\n"
+ "movprfx z24, z28\n sdot z24.s, z9.b, z27.b\n"
+ "sdot z28.s, z9.b, z8.b\n"
+ "and z11.d, z0.d, z12.d\n"
+ "asr z22.s, z22.s, #0x1f\n"
"asr z18.s, z18.s, #0x1f\n"
- "mls z26.s, p2/M, z19.s, z8.s\n"
- ".inst 0x04a1779c // sqrdmulh z28.s, z28.s, z1.s\n"
- ".inst 0x04a1777b // sqrdmulh z27.s, z27.s, z1.s\n"
- ".inst 0x04a1775a // sqrdmulh z26.s, z26.s, z1.s\n"
- "ld1w { z2.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "sqadd z23.s, z23.s, z18.s\n"
- "and z20.d, z28.d, z22.d\n"
- ".inst 0x44828ad7 // srshl z23.s, p2/M, z23.s, z22.s\n"
- "and z19.d, z27.d, z22.d\n"
- "and z18.d, z26.d, z22.d\n"
- "asr z20.s, z20.s, #0x1f\n"
+ ".inst 0x4482899d // srshl z29.s, p2/M, z29.s, z12.s\n"
+ "asr z11.s, z11.s, #0x1f\n"
+ "sqadd z6.s, z6.s, z18.s\n"
+ "ld1b { z20.b }, p2/Z, [%x[params], #-6, MUL VL]\n"
+ "sqadd z25.s, z25.s, z22.s\n"
+ "ld1b { z19.b }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "add z29.s, z29.s, z21.s\n"
+ "sqadd z0.s, z0.s, z11.s\n"
+ "ld1b { z18.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ ".inst 0x44828986 // srshl z6.s, p2/M, z6.s, z12.s\n"
+ ".inst 0x44828999 // srshl z25.s, p2/M, z25.s, z12.s\n"
+ "smax z29.s, p2/M, z29.s, z31.s\n"
+ ".inst 0x44828980 // srshl z0.s, p2/M, z0.s, z12.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
+ "add z6.s, z6.s, z21.s\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z0.s, z0.s, z21.s\n"
+ "smin z29.s, p2/M, z29.s, z15.s\n"
+ "smax z6.s, p2/M, z6.s, z31.s\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "smax z0.s, p2/M, z0.s, z31.s\n"
+ "st1b { z29.s }, p0, [x12, x13]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "smin z6.s, p2/M, z6.s, z15.s\n"
+ "smin z25.s, p2/M, z25.s, z15.s\n"
+ "smin z0.s, p2/M, z0.s, z15.s\n"
+ "st1b { z6.s }, p0, [x11, x13]\n"
+ "mov z11.d, z29.d\n"
+ "st1b { z25.s }, p0, [x10, x13]\n"
+ "mov z26.d, z29.d\n"
+ "st1b { z0.s }, p0, [x9, x13]\n"
+ "mov z25.d, z29.d\n"
+ "sdot z29.s, z18.b, z8.b\n"
+ "incw x13\n"
+ "sdot z26.s, z18.b, z3.b\n"
+ "ext z8.b, z8.b, z8.b, #0x1\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
+ "sdot z29.s, z20.b, z3.b\n"
+ "ext z3.b, z3.b, z3.b, #0x1\n"
+ "sdot z11.s, z18.b, z8.b\n"
+ "sdot z25.s, z18.b, z3.b\n"
+ "sdot z1.s, z9.b, z3.b\n"
+ "sdot z26.s, z20.b, z16.b\n"
+ "sdot z29.s, z19.b, z16.b\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "sdot z11.s, z20.b, z3.b\n"
+ "sdot z25.s, z20.b, z16.b\n"
+ "sdot z1.s, z9.b, z16.b\n"
+ "sdot z26.s, z19.b, z27.b\n"
+ "ext z27.b, z27.b, z27.b, #0x1\n"
+ "mls z29.s, p2/M, z28.s, z13.s\n"
+ "mov z22.s, #0x0\n"
+ "sdot z11.s, z19.b, z16.b\n"
+ "sdot z25.s, z19.b, z27.b\n"
+ "movprfx z18, z1\n sdot z18.s, z9.b, z27.b\n"
+ "sdot z1.s, z9.b, z8.b\n"
+ "sdot z22.s, z9.b, z30.b\n"
+ ".inst 0x04a577bd // sqrdmulh z29.s, z29.s, z5.s\n"
+ "mls z26.s, p2/M, z24.s, z13.s\n"
+ "mls z11.s, p2/M, z1.s, z13.s\n"
+ "mov z10.s, #0x0\n"
+ "mls z25.s, p2/M, z18.s, z13.s\n"
+ "and z18.d, z29.d, z23.d\n"
+ ".inst 0x04a5775a // sqrdmulh z26.s, z26.s, z5.s\n"
+ "sdot z22.s, z9.b, z7.b\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04a5756b // sqrdmulh z11.s, z11.s, z5.s\n"
+ ".inst 0x04a57739 // sqrdmulh z25.s, z25.s, z5.s\n"
+ "ld1w { z8.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "and z19.d, z26.d, z23.d\n"
+ "sqadd z29.s, z29.s, z18.s\n"
+ "and z18.d, z11.d, z23.d\n"
+ "movprfx z6, z22\n sdot z6.s, z9.b, z4.b\n"
+ "sdot z22.s, z9.b, z14.b\n"
+ "and z20.d, z25.d, z23.d\n"
"asr z19.s, z19.s, #0x1f\n"
"asr z18.s, z18.s, #0x1f\n"
- "sqadd z28.s, z28.s, z20.s\n"
- ".inst 0x44828adc // srshl z28.s, p2/M, z28.s, z22.s\n"
- "ld1b { z13.b }, p2/Z, [%x[params]]\n"
- "sqadd z27.s, z27.s, z19.s\n"
- "sqadd z26.s, z26.s, z18.s\n"
- ".inst 0x44828adb // srshl z27.s, p2/M, z27.s, z22.s\n"
- ".inst 0x44828ada // srshl z26.s, p2/M, z26.s, z22.s\n"
- "add z23.s, z23.s, z16.s\n"
- "smax z23.s, p2/M, z23.s, z7.s\n"
- "add z28.s, z28.s, z16.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "add z27.s, z27.s, z16.s\n"
- "add z26.s, z26.s, z16.s\n"
- "smax z28.s, p2/M, z28.s, z7.s\n"
- "smax z27.s, p2/M, z27.s, z7.s\n"
- "mov z24.s, #0x0\n"
- "sdot z24.s, z25.b, z17.b\n"
- "smax z26.s, p2/M, z26.s, z7.s\n"
- "st1b { z23.s }, p0, [x12, x28]\n"
- "ld1w { z1.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
- "ld1b { z21.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "st1b { z28.s }, p0, [x11, x28]\n"
- "mov z0.d, z1.d\n"
- "sdot z24.s, z25.b, z4.b\n"
- "st1b { z27.s }, p0, [x10, x28]\n"
- "mov z31.d, z1.d\n"
- "sdot z31.s, z21.b, z17.b\n"
- "movprfx z23, z24\n sdot z23.s, z25.b, z5.b\n"
- "st1b { z26.s }, p0, [x9, x28]\n"
- "mov z30.d, z1.d\n"
- "sdot z1.s, z21.b, z29.b\n"
- "sdot z1.s, z13.b, z17.b\n"
- "sdot z24.s, z25.b, z29.b\n"
- "ext z29.b, z29.b, z29.b, #0x1\n"
- "ld1b { z20.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "sdot z0.s, z21.b, z29.b\n"
- "ext z17.b, z17.b, z17.b, #0x1\n"
- "mov z19.s, #0x0\n"
- "sdot z30.s, z21.b, z17.b\n"
- "ld1w { z22.s }, p2/Z, [%x[params], #3, MUL VL]\n"
- "sdot z19.s, z25.b, z17.b\n"
- "sdot z31.s, z13.b, z4.b\n"
- "incw x28\n"
- "whilelt p1.s, x28, %x[n_channels]\n"
- "sdot z1.s, z20.b, z4.b\n"
- "ext z4.b, z4.b, z4.b, #0x1\n"
- "sdot z0.s, z13.b, z17.b\n"
+ ".inst 0x44828afd // srshl z29.s, p2/M, z29.s, z23.s\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "sqadd z11.s, z11.s, z18.s\n"
+ "ld1b { z24.b }, p2/Z, [%x[params]]\n"
+ "sqadd z26.s, z26.s, z19.s\n"
+ "ld1b { z19.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "add z29.s, z29.s, z21.s\n"
+ "sqadd z25.s, z25.s, z20.s\n"
+ "ld1b { z18.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
+ ".inst 0x44828aeb // srshl z11.s, p2/M, z11.s, z23.s\n"
+ ".inst 0x44828afa // srshl z26.s, p2/M, z26.s, z23.s\n"
+ "smax z29.s, p2/M, z29.s, z31.s\n"
+ ".inst 0x44828af9 // srshl z25.s, p2/M, z25.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "add z11.s, z11.s, z21.s\n"
+ "add z26.s, z26.s, z21.s\n"
+ "add z25.s, z25.s, z21.s\n"
+ "smin z29.s, p2/M, z29.s, z15.s\n"
+ "smax z11.s, p2/M, z11.s, z31.s\n"
+ "smax z26.s, p2/M, z26.s, z31.s\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "st1b { z29.s }, p0, [x12, x13]\n"
+ "ld1w { z2.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
+ "smin z11.s, p2/M, z11.s, z15.s\n"
+ "smin z26.s, p2/M, z26.s, z15.s\n"
+ "smin z25.s, p2/M, z25.s, z15.s\n"
+ "st1b { z11.s }, p0, [x11, x13]\n"
+ "mov z28.d, z2.d\n"
+ "st1b { z26.s }, p0, [x10, x13]\n"
+ "mov z1.d, z2.d\n"
+ "st1b { z25.s }, p0, [x9, x13]\n"
+ "mov z3.d, z2.d\n"
+ "sdot z2.s, z18.b, z14.b\n"
+ "incw x13\n"
+ "sdot z1.s, z18.b, z30.b\n"
+ "ext z14.b, z14.b, z14.b, #0x1\n"
+ "whilelt p1.s, x13, %x[n_channels]\n"
"whilelt p0.b, x14, %x[n_channels]\n"
- "sdot z30.s, z13.b, z4.b\n"
- "sdot z19.s, z25.b, z4.b\n"
- "ld1b { z13.b }, p0/Z, [x26, x14]\n"
- "ld1b { z28.b }, p0/Z, [x25, x14]\n"
- "sdot z31.s, z20.b, z5.b\n"
- "ext z5.b, z5.b, z5.b, #0x1\n"
- "mls z1.s, p2/M, z24.s, z8.s\n"
- "ld1b { z27.b }, p0/Z, [x22, x14]\n"
- "sdot z0.s, z20.b, z4.b\n"
- "sdot z30.s, z20.b, z5.b\n"
- ".inst 0x04a27421 // sqrdmulh z1.s, z1.s, z2.s\n"
- "ld1b { z26.b }, p0/Z, [x21, x14]\n"
- "movprfx z18, z19\n sdot z18.s, z25.b, z5.b\n"
- "sdot z19.s, z25.b, z29.b\n"
- "and z11.d, z1.d, z22.d\n"
- "ld1b { z29.b }, p0/Z, [x23, x14]\n"
- "mls z0.s, p2/M, z19.s, z8.s\n"
- "mls z31.s, p2/M, z23.s, z8.s\n"
- "asr z11.s, z11.s, #0x1f\n"
- "ld1b { z17.b }, p0/Z, [x20, x14]\n"
- "mls z30.s, p2/M, z18.s, z8.s\n"
- ".inst 0x04a27400 // sqrdmulh z0.s, z0.s, z2.s\n"
- ".inst 0x04a277ff // sqrdmulh z31.s, z31.s, z2.s\n"
- ".inst 0x04a277de // sqrdmulh z30.s, z30.s, z2.s\n"
- "ld1b { z15.b }, p0/Z, [x27, x14]\n"
+ "sdot z2.s, z24.b, z30.b\n"
+ "ext z30.b, z30.b, z30.b, #0x1\n"
+ "sdot z28.s, z18.b, z14.b\n"
+ "ld1b { z0.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z27.b }, p0/Z, [x26, x14]\n"
+ "ld1b { z26.b }, p0/Z, [x22, x14]\n"
+ "ld1b { z25.b }, p0/Z, [x21, x14]\n"
+ "sdot z3.s, z18.b, z30.b\n"
+ "sdot z10.s, z9.b, z30.b\n"
+ "sdot z1.s, z24.b, z7.b\n"
+ "sdot z2.s, z19.b, z7.b\n"
+ "ext z7.b, z7.b, z7.b, #0x1\n"
+ "sdot z28.s, z24.b, z30.b\n"
+ "ld1b { z30.b }, p0/Z, [x20, x14]\n"
+ "sdot z3.s, z24.b, z7.b\n"
+ "sdot z10.s, z9.b, z7.b\n"
+ "sdot z1.s, z19.b, z4.b\n"
+ "ext z4.b, z4.b, z4.b, #0x1\n"
+ "mls z2.s, p2/M, z22.s, z13.s\n"
+ "sdot z28.s, z19.b, z7.b\n"
+ "sdot z3.s, z19.b, z4.b\n"
+ "movprfx z18, z10\n sdot z18.s, z9.b, z4.b\n"
+ "sdot z10.s, z9.b, z14.b\n"
+ "ld1b { z14.b }, p0/Z, [x25, x14]\n"
+ "mls z1.s, p2/M, z6.s, z13.s\n"
+ ".inst 0x04a87442 // sqrdmulh z2.s, z2.s, z8.s\n"
+ "mls z3.s, p2/M, z18.s, z13.s\n"
+ "and z18.d, z2.d, z23.d\n"
+ "mls z28.s, p2/M, z10.s, z13.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04a87421 // sqrdmulh z1.s, z1.s, z8.s\n"
+ ".inst 0x04a8779c // sqrdmulh z28.s, z28.s, z8.s\n"
+ ".inst 0x04a87463 // sqrdmulh z3.s, z3.s, z8.s\n"
+ "ld1b { z12.b }, p0/Z, [x28, x14]\n"
"ldp x23, x22, [%x[inptrs], #0x40]\n"
- "sqadd z1.s, z1.s, z11.s\n"
- "and z21.d, z0.d, z22.d\n"
- ".inst 0x44828ac1 // srshl z1.s, p2/M, z1.s, z22.s\n"
"ldp x21, x20, [%x[inptrs], #0x50]\n"
- "and z20.d, z31.d, z22.d\n"
- "and z19.d, z30.d, z22.d\n"
- "ld1b { z18.b }, p0/Z, [x23, x14]\n"
- "ld1b { z11.b }, p0/Z, [x22, x14]\n"
- "asr z21.s, z21.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "ld1b { z24.b }, p0/Z, [x21, x14]\n"
- "ld1b { z4.b }, p0/Z, [x20, x14]\n"
+ "sqadd z2.s, z2.s, z18.s\n"
+ "and z22.d, z1.d, z23.d\n"
+ "and z18.d, z28.d, z23.d\n"
+ "and z19.d, z3.d, z23.d\n"
+ "ld1b { z29.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z24.b }, p0/Z, [x22, x14]\n"
+ "asr z22.s, z22.s, #0x1f\n"
+ ".inst 0x44828ae2 // srshl z2.s, p2/M, z2.s, z23.s\n"
+ "ld1b { z11.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z7.b }, p0/Z, [x20, x14]\n"
+ "asr z18.s, z18.s, #0x1f\n"
"asr z19.s, z19.s, #0x1f\n"
- "sqadd z0.s, z0.s, z21.s\n"
- ".inst 0x44828ac0 // srshl z0.s, p2/M, z0.s, z22.s\n"
- "ld1b { z3.b }, p2/Z, [%x[params], #6, MUL VL]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z30.s, z30.s, z19.s\n"
- ".inst 0x44828adf // srshl z31.s, p2/M, z31.s, z22.s\n"
- ".inst 0x44828ade // srshl z30.s, p2/M, z30.s, z22.s\n"
- "add z1.s, z1.s, z16.s\n"
- "smax z1.s, p2/M, z1.s, z7.s\n"
- "add z0.s, z0.s, z16.s\n"
- "ld1b { z9.b }, p0/Z, [x24, x14]\n"
- "add z31.s, z31.s, z16.s\n"
- "add z30.s, z30.s, z16.s\n"
+ "sqadd z1.s, z1.s, z22.s\n"
+ "ld1b { z10.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "add z2.s, z2.s, z21.s\n"
+ "sqadd z28.s, z28.s, z18.s\n"
+ "ld1b { z20.b }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "sqadd z3.s, z3.s, z19.s\n"
+ "ld1b { z17.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "smax z2.s, p2/M, z2.s, z31.s\n"
+ ".inst 0x44828afc // srshl z28.s, p2/M, z28.s, z23.s\n"
+ ".inst 0x44828ae3 // srshl z3.s, p2/M, z3.s, z23.s\n"
+ "ld1b { z5.b }, p0/Z, [x24, x14]\n"
"ldp x23, x22, [%x[inptrs], #0x60]\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "smin z1.s, p2/M, z1.s, z6.s\n"
- "smax z0.s, p2/M, z0.s, z7.s\n"
- "st1b { z1.s }, p1, [x12, x28]\n"
- "ld1b { z2.b }, p0/Z, [x23, x14]\n"
- "smax z31.s, p2/M, z31.s, z7.s\n"
- "smax z30.s, p2/M, z30.s, z7.s\n"
+ "ldp x28, x27, [%x[inptrs], #0x0]\n"
+ "add z1.s, z1.s, z21.s\n"
+ "smin z2.s, p2/M, z2.s, z15.s\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
+ "add z28.s, z28.s, z21.s\n"
+ "add z3.s, z3.s, z21.s\n"
+ "ld1b { z6.b }, p0/Z, [x23, x14]\n"
"ld1b { z23.b }, p0/Z, [x22, x14]\n"
- "ld1b { z22.b }, p0/Z, [x21, x14]\n"
- "ld1b { z5.b }, p0/Z, [x20, x14]\n"
- "zip2 z20.b, z15.b, z28.b\n"
- "zip1 z15.b, z15.b, z28.b\n"
- "smin z0.s, p2/M, z0.s, z6.s\n"
- "zip1 z19.b, z13.b, z29.b\n"
- "zip2 z29.b, z13.b, z29.b\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "st1b { z0.s }, p1, [x11, x28]\n"
- "zip2 z13.b, z15.b, z19.b\n"
- "zip1 z15.b, z15.b, z19.b\n"
- "ldp x27, x26, [%x[inptrs], #0x0]\n"
- "st1b { z31.s }, p1, [x10, x28]\n"
- "zip1 z14.b, z20.b, z29.b\n"
- "zip2 z29.b, z20.b, z29.b\n"
- "ld1w { z10.s }, p2/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z30.s }, p1, [x9, x28]\n"
- "zip2 z21.b, z9.b, z26.b\n"
- "zip1 z9.b, z9.b, z26.b\n"
- "incw x28\n"
- "zip1 z20.b, z27.b, z17.b\n"
- "zip2 z17.b, z27.b, z17.b\n"
- "ldp x25, x23, [%x[inptrs], #0x10]\n"
"ldp x24, x22, [%x[inptrs], #0x20]\n"
- "zip2 z31.b, z18.b, z24.b\n"
- "zip1 z18.b, z18.b, z24.b\n"
+ "smax z1.s, p2/M, z1.s, z31.s\n"
+ "st1b { z2.s }, p1, [x12, x13]\n"
+ "ld1b { z22.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z4.b }, p0/Z, [x20, x14]\n"
+ "zip2 z19.b, z12.b, z27.b\n"
+ "zip1 z12.b, z12.b, z27.b\n"
+ "smax z28.s, p2/M, z28.s, z31.s\n"
+ "smax z3.s, p2/M, z3.s, z31.s\n"
+ "zip1 z18.b, z0.b, z14.b\n"
+ "zip2 z14.b, z0.b, z14.b\n"
+ "smin z1.s, p2/M, z1.s, z15.s\n"
"ldp x21, x20, [%x[inptrs], #0x30]\n"
- "ld1b { z26.b }, p2/Z, [%x[params], #5, MUL VL]\n"
- "zip1 z27.b, z11.b, z4.b\n"
- "zip2 z4.b, z11.b, z4.b\n"
- "ld1b { z1.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "smin z28.s, p2/M, z28.s, z15.s\n"
+ "smin z3.s, p2/M, z3.s, z15.s\n"
+ "zip2 z2.b, z12.b, z18.b\n"
+ "zip1 z12.b, z12.b, z18.b\n"
+ "zip1 z8.b, z19.b, z14.b\n"
+ "zip2 z14.b, z19.b, z14.b\n"
+ "ld1w { z0.s }, p2/Z, [%x[params], #4, MUL VL]\n"
"addvl %x[params], %x[params], #8\n"
- "zip2 z30.b, z2.b, z22.b\n"
- "zip1 z2.b, z2.b, z22.b\n"
- "zip1 z28.b, z23.b, z5.b\n"
- "zip2 z5.b, z23.b, z5.b\n"
- "zip2 z19.b, z9.b, z20.b\n"
- "zip1 z9.b, z9.b, z20.b\n"
- "zip1 z11.b, z21.b, z17.b\n"
- "zip2 z17.b, z21.b, z17.b\n"
- "zip2 z12.b, z18.b, z27.b\n"
- "zip1 z18.b, z18.b, z27.b\n"
- "zip1 z20.b, z31.b, z4.b\n"
- "zip2 z4.b, z31.b, z4.b\n"
- "zip2 z24.b, z2.b, z28.b\n"
- "zip1 z2.b, z2.b, z28.b\n"
- "zip1 z0.b, z30.b, z5.b\n"
- "zip2 z5.b, z30.b, z5.b\n"
- "mov z22.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z21.d, z10.d\n"
+ "st1b { z28.s }, p1, [x11, x13]\n"
+ "zip2 z27.b, z5.b, z25.b\n"
+ "zip1 z5.b, z5.b, z25.b\n"
+ "st1b { z1.s }, p1, [x10, x13]\n"
+ "zip1 z18.b, z26.b, z30.b\n"
+ "zip2 z30.b, z26.b, z30.b\n"
+ "st1b { z3.s }, p1, [x9, x13]\n"
+ "zip2 z19.b, z29.b, z11.b\n"
+ "zip1 z29.b, z29.b, z11.b\n"
+ "incw x13\n"
+ "zip1 z28.b, z24.b, z7.b\n"
+ "zip2 z7.b, z24.b, z7.b\n"
+ "zip2 z25.b, z6.b, z22.b\n"
+ "zip1 z6.b, z6.b, z22.b\n"
+ "zip1 z22.b, z23.b, z4.b\n"
+ "zip2 z4.b, z23.b, z4.b\n"
+ "zip2 z23.b, z5.b, z18.b\n"
+ "zip1 z5.b, z5.b, z18.b\n"
+ "zip1 z3.b, z27.b, z30.b\n"
+ "zip2 z30.b, z27.b, z30.b\n"
+ "zip2 z11.b, z29.b, z28.b\n"
+ "zip1 z29.b, z29.b, z28.b\n"
+ "zip1 z16.b, z19.b, z7.b\n"
+ "zip2 z7.b, z19.b, z7.b\n"
+ "zip2 z1.b, z6.b, z22.b\n"
+ "zip1 z6.b, z6.b, z22.b\n"
+ "zip1 z27.b, z25.b, z4.b\n"
+ "zip2 z4.b, z25.b, z4.b\n"
+ "mov z26.d, z0.d\n"
+ "mov z25.d, z0.d\n"
+ "mov z28.d, z0.d\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 8ac522dc9a..08ef1d3aeb 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
const int8_t *inptrs[16];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const int8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -91,316 +91,316 @@ void sve_s8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x16, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x17, #0x0\n"
+ "ldr x26, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x16\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x16, [%x[params], %[offsetof_Params_outptrs]]\n"
"ldr x15, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z12.b }, p4/Z, [x21]\n"
- "ld1rb { z30.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z24.h }, p4/Z, [x22]\n"
- "ld1rh { z11.h }, p4/Z, [x21]\n"
- "ld1rh { z26.h }, p4/Z, [x20]\n"
- "ldp x13, x12, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x16, x15\n"
- "ldp x11, x10, [x24, #0x10]\n"
- "whilelt p2.s, x16, x15\n"
- "whilelt p1.s, x23, x15\n"
- "ldr x9, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z14.h }, p4/Z, [x14]\n"
- "ld1sb { z21.h }, p4/Z, [x14, #1, MUL VL]\n"
- "add x28, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x27, #0x0\n"
- "ld1sb { z1.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "add x13, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x12, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x17\n"
+ "add x20, x26, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x26, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x26, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z10.b }, p4/Z, [x20]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x26, %[offsetof_Requantize32_minval]\n"
+ "add x20, x26, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z15.b }, p4/Z, [x23]\n"
+ "ld1rh { z26.h }, p4/Z, [x22]\n"
+ "ld1rh { z2.h }, p4/Z, [x21]\n"
+ "ld1rh { z14.h }, p4/Z, [x20]\n"
+ "incw x24\n"
+ "whilelt p3.h, x17, x15\n"
+ "ldp x9, x28, [x16, #0x0]\n"
+ "ldp x27, x26, [x16, #0x10]\n"
+ "whilelt p2.s, x17, x15\n"
+ "whilelt p1.s, x24, x15\n"
+ "ld1sb { z13.h }, p4/Z, [x14]\n"
+ "ld1sb { z11.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x14, #2, MUL VL]\n"
"ld1sb { z6.h }, p4/Z, [x14, #3, MUL VL]\n"
- ".inst 0x455e11ce // ssublb z14.h, z14.b, z30.b\n"
- ".inst 0x455e12b5 // ssublb z21.h, z21.b, z30.b\n"
- "ld1sb { z2.h }, p4/Z, [x14, #4, MUL VL]\n"
- "ld1sb { z18.h }, p4/Z, [x14, #5, MUL VL]\n"
- ".inst 0x455e1021 // ssublb z1.h, z1.b, z30.b\n"
- ".inst 0x455e10c6 // ssublb z6.h, z6.b, z30.b\n"
- "ld1sb { z7.h }, p4/Z, [x14, #6, MUL VL]\n"
- "ld1sb { z10.h }, p4/Z, [x14, #7, MUL VL]\n"
+ "ld1sb { z20.h }, p4/Z, [x14, #4, MUL VL]\n"
+ "ld1sb { z30.h }, p4/Z, [x14, #5, MUL VL]\n"
+ "ld1sb { z28.h }, p4/Z, [x14, #6, MUL VL]\n"
+ "ld1sb { z17.h }, p4/Z, [x14, #7, MUL VL]\n"
"inch x14, ALL, MUL #8\n"
- ".inst 0x455e1042 // ssublb z2.h, z2.b, z30.b\n"
- "ld1w { z17.s }, p2/Z, [x9]\n"
- "ld1w { z16.s }, p1/Z, [x9, #1, MUL VL]\n"
- "uzp1 z5.s, z17.s, z16.s\n"
- "uzp2 z9.s, z17.s, z16.s\n"
- "ld1sb { z8.h }, p4/Z, [x14]\n"
- "ldp x24, x23, [x28, #0x0]\n"
- "addvl x9, x9, #2\n"
- "mov z17.d, z5.d\n"
- "ldp x22, x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x20]\n"
- "mov z25.d, z9.d\n"
- "mov z16.d, z5.d\n"
- "ld1sb { z0.h }, p3/Z, [x24, x16]\n"
- "ld1sb { z29.h }, p3/Z, [x23, x16]\n"
- "mov z23.d, z9.d\n"
- "mov z22.d, z5.d\n"
- "ld1sb { z4.h }, p3/Z, [x22, x16]\n"
- "ld1sb { z13.h }, p3/Z, [x21, x16]\n"
- "mov z27.d, z9.d\n"
- ".inst 0x455e1252 // ssublb z18.h, z18.b, z30.b\n"
- "ld1sb { z20.h }, p3/Z, [x20, x16]\n"
- "ldr x26, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x455e10e7 // ssublb z7.h, z7.b, z30.b\n"
- ".inst 0x455e114a // ssublb z10.h, z10.b, z30.b\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x9, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455e1108 // ssublb z8.h, z8.b, z30.b\n"
- ".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
- ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
- ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
- ".inst 0x454c11ad // ssublb z13.h, z13.b, z12.b\n"
- ".inst 0x454c1294 // ssublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454f11ad // ssublb z13.h, z13.b, z15.b\n"
+ "ld1w { z19.s }, p2/Z, [x25]\n"
+ "ld1w { z24.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ ".inst 0x454f116b // ssublb z11.h, z11.b, z15.b\n"
+ ".inst 0x454f1252 // ssublb z18.h, z18.b, z15.b\n"
+ ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
+ "ld1sb { z5.h }, p4/Z, [x14]\n"
+ "ldp x24, x23, [x13, #0x0]\n"
+ ".inst 0x454f1294 // ssublb z20.h, z20.b, z15.b\n"
+ ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
+ "uzp1 z3.s, z19.s, z24.s\n"
+ "uzp2 z16.s, z19.s, z24.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldp x22, x21, [x13, #0x10]\n"
+ ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
+ ".inst 0x454f1231 // ssublb z17.h, z17.b, z15.b\n"
+ ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
+ "ldr x20, [x13, #0x20]\n"
+ "ld1sb { z7.h }, p3/Z, [x24, x17]\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1sb { z4.h }, p3/Z, [x22, x17]\n"
+ "mov z8.d, z3.d\n"
+ "mov z21.d, z16.d\n"
+ "ld1sb { z1.h }, p3/Z, [x21, x17]\n"
+ "mov z0.d, z3.d\n"
+ "mov z29.d, z16.d\n"
+ "ld1sb { z27.h }, p3/Z, [x20, x17]\n"
+ "mov z19.d, z3.d\n"
+ "mov z9.d, z16.d\n"
+ ".inst 0x454a10e7 // ssublb z7.h, z7.b, z10.b\n"
+ ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
+ ".inst 0x454a1084 // ssublb z4.h, z4.b, z10.b\n"
+ ".inst 0x454a1021 // ssublb z1.h, z1.b, z10.b\n"
+ ".inst 0x454a137b // ssublb z27.h, z27.b, z10.b\n"
"1:" // Loop
- ".inst 0x44824005 // smlalb z5.s, p4/M, z0.h, z2.h\n"
- ".inst 0x44824409 // smlalt z9.s, p4/M, z0.h, z2.h\n"
- "ldr x20, [x28, #0x28]\n"
- "ldr x21, [x28, #0x38]\n"
- ".inst 0x448e43a5 // smlalb z5.s, p4/M, z29.h, z14.h\n"
- ".inst 0x44864011 // smlalb z17.s, p4/M, z0.h, z6.h\n"
- "ld1sb { z3.h }, p3/Z, [x20, x16]\n"
- "ldr x20, [x28, #0x30]\n"
- ".inst 0x44954010 // smlalb z16.s, p4/M, z0.h, z21.h\n"
- ".inst 0x448e4016 // smlalb z22.s, p4/M, z0.h, z14.h\n"
- "ld1sb { z31.h }, p3/Z, [x21, x16]\n"
- ".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
- ".inst 0x448e47a9 // smlalt z9.s, p4/M, z29.h, z14.h\n"
- ".inst 0x449241a5 // smlalb z5.s, p4/M, z13.h, z18.h\n"
- "ldr x21, [x28, #0x40]\n"
- "ld1sb { z15.h }, p3/Z, [x20, x16]\n"
- ".inst 0x44864419 // smlalt z25.s, p4/M, z0.h, z6.h\n"
- ".inst 0x44954417 // smlalt z23.s, p4/M, z0.h, z21.h\n"
- ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
- "ldr x20, [x28, #0x48]\n"
- ".inst 0x448e441b // smlalt z27.s, p4/M, z0.h, z14.h\n"
- ".inst 0x44814091 // smlalb z17.s, p4/M, z4.h, z1.h\n"
- "ld1sb { z19.h }, p3/Z, [x21, x16]\n"
- ".inst 0x454c11ef // ssublb z15.h, z15.b, z12.b\n"
- ".inst 0x448141b0 // smlalb z16.s, p4/M, z13.h, z1.h\n"
- ".inst 0x449541b6 // smlalb z22.s, p4/M, z13.h, z21.h\n"
- "ld1sb { z28.h }, p3/Z, [x20, x16]\n"
- ".inst 0x454c1273 // ssublb z19.h, z19.b, z12.b\n"
- ".inst 0x449245a9 // smlalt z9.s, p4/M, z13.h, z18.h\n"
- ".inst 0x448a4285 // smlalb z5.s, p4/M, z20.h, z10.h\n"
- "ldr x21, [x28, #0x50]\n"
- "ldr x20, [x28, #0x58]\n"
- ".inst 0x44814499 // smlalt z25.s, p4/M, z4.h, z1.h\n"
- ".inst 0x448145b7 // smlalt z23.s, p4/M, z13.h, z1.h\n"
- ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
- "ld1sb { z4.h }, p3/Z, [x21, x16]\n"
- ".inst 0x449545bb // smlalt z27.s, p4/M, z13.h, z21.h\n"
- ".inst 0x448241b1 // smlalb z17.s, p4/M, z13.h, z2.h\n"
- "ld1sb { z29.h }, p3/Z, [x20, x16]\n"
- "ldr x21, [x28, #0x60]\n"
- ".inst 0x44874070 // smlalb z16.s, p4/M, z3.h, z7.h\n"
- ".inst 0x44864296 // smlalb z22.s, p4/M, z20.h, z6.h\n"
- "ldr x20, [x28, #0x68]\n"
- ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
- ".inst 0x448a4689 // smlalt z9.s, p4/M, z20.h, z10.h\n"
- ".inst 0x449543e5 // smlalb z5.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
- "ld1sb { z0.h }, p3/Z, [x21, x16]\n"
- ".inst 0x448245b9 // smlalt z25.s, p4/M, z13.h, z2.h\n"
- ".inst 0x44874477 // smlalt z23.s, p4/M, z3.h, z7.h\n"
- "ld1sb { z3.h }, p3/Z, [x20, x16]\n"
- "ldr x20, [x28, #0x70]\n"
- ".inst 0x4486469b // smlalt z27.s, p4/M, z20.h, z6.h\n"
- ".inst 0x44874291 // smlalb z17.s, p4/M, z20.h, z7.h\n"
- ".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
- "ld1sb { z13.h }, p3/Z, [x20, x16]\n"
- ".inst 0x44824290 // smlalb z16.s, p4/M, z20.h, z2.h\n"
- ".inst 0x448841f6 // smlalb z22.s, p4/M, z15.h, z8.h\n"
- ".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
- "ldr x20, [x28, #0x78]\n"
- ".inst 0x449547e9 // smlalt z9.s, p4/M, z31.h, z21.h\n"
- ".inst 0x44814265 // smlalb z5.s, p4/M, z19.h, z1.h\n"
- ".inst 0x454c11ad // ssublb z13.h, z13.b, z12.b\n"
- "whilelt p0.h, x27, x15\n"
- ".inst 0x44874699 // smlalt z25.s, p4/M, z20.h, z7.h\n"
- ".inst 0x44824697 // smlalt z23.s, p4/M, z20.h, z2.h\n"
- "ld1w { z20.s }, p2/Z, [x26]\n"
+ ".inst 0x449440e3 // smlalb z3.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x449444f0 // smlalt z16.s, p4/M, z7.h, z20.h\n"
+ "ldr x25, [x13, #0x28]\n"
+ "ldr x24, [x13, #0x38]\n"
+ ".inst 0x448640e8 // smlalb z8.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x448b40e0 // smlalb z0.s, p4/M, z7.h, z11.h\n"
+ "ldr x23, [x13, #0x30]\n"
+ "ldr x22, [x13, #0x40]\n"
+ ".inst 0x448d40f3 // smlalb z19.s, p4/M, z7.h, z13.h\n"
+ ".inst 0x448644f5 // smlalt z21.s, p4/M, z7.h, z6.h\n"
+ "ldr x20, [x13, #0x48]\n"
+ "ldr x21, [x13, #0x50]\n"
+ "ld1sb { z22.h }, p3/Z, [x25, x17]\n"
+ ".inst 0x448b44fd // smlalt z29.s, p4/M, z7.h, z11.h\n"
+ ".inst 0x448d44e9 // smlalt z9.s, p4/M, z7.h, z13.h\n"
+ "ld1sb { z31.h }, p3/Z, [x24, x17]\n"
+ ".inst 0x448d4303 // smlalb z3.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x448d4710 // smlalt z16.s, p4/M, z24.h, z13.h\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1sb { z25.h }, p3/Z, [x22, x17]\n"
+ ".inst 0x44924088 // smlalb z8.s, p4/M, z4.h, z18.h\n"
+ ".inst 0x44924020 // smlalb z0.s, p4/M, z1.h, z18.h\n"
+ "ld1sb { z23.h }, p3/Z, [x20, x17]\n"
+ "ldr x20, [x13, #0x58]\n"
+ ".inst 0x448b4033 // smlalb z19.s, p4/M, z1.h, z11.h\n"
+ ".inst 0x454a12d6 // ssublb z22.h, z22.b, z10.b\n"
+ ".inst 0x44924495 // smlalt z21.s, p4/M, z4.h, z18.h\n"
+ "ld1sb { z12.h }, p3/Z, [x21, x17]\n"
+ ".inst 0x4492443d // smlalt z29.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448b4429 // smlalt z9.s, p4/M, z1.h, z11.h\n"
+ ".inst 0x454a13ff // ssublb z31.h, z31.b, z10.b\n"
+ "ldr x21, [x13, #0x60]\n"
+ ".inst 0x449e4023 // smlalb z3.s, p4/M, z1.h, z30.h\n"
+ ".inst 0x449e4430 // smlalt z16.s, p4/M, z1.h, z30.h\n"
+ ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
+ "ld1sb { z4.h }, p3/Z, [x20, x17]\n"
+ ".inst 0x44944028 // smlalb z8.s, p4/M, z1.h, z20.h\n"
+ ".inst 0x449c42c0 // smlalb z0.s, p4/M, z22.h, z28.h\n"
+ ".inst 0x454a1339 // ssublb z25.h, z25.b, z10.b\n"
+ "ldr x20, [x13, #0x68]\n"
+ ".inst 0x44864373 // smlalb z19.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x44944435 // smlalt z21.s, p4/M, z1.h, z20.h\n"
+ ".inst 0x454a12f7 // ssublb z23.h, z23.b, z10.b\n"
+ "ld1sb { z7.h }, p3/Z, [x21, x17]\n"
+ ".inst 0x449c46dd // smlalt z29.s, p4/M, z22.h, z28.h\n"
+ ".inst 0x44864769 // smlalt z9.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x454a118c // ssublb z12.h, z12.b, z10.b\n"
+ "ldr x21, [x13, #0x70]\n"
+ ".inst 0x44914363 // smlalb z3.s, p4/M, z27.h, z17.h\n"
+ ".inst 0x44914770 // smlalt z16.s, p4/M, z27.h, z17.h\n"
+ ".inst 0x454a1084 // ssublb z4.h, z4.b, z10.b\n"
+ "ld1sb { z22.h }, p3/Z, [x20, x17]\n"
+ ".inst 0x449c4368 // smlalb z8.s, p4/M, z27.h, z28.h\n"
+ ".inst 0x44944360 // smlalb z0.s, p4/M, z27.h, z20.h\n"
+ ".inst 0x454a10e7 // ssublb z7.h, z7.b, z10.b\n"
+ "ldr x20, [x13, #0x78]\n"
+ ".inst 0x44854313 // smlalb z19.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x449c4775 // smlalt z21.s, p4/M, z27.h, z28.h\n"
+ "ld1sb { z1.h }, p3/Z, [x21, x17]\n"
+ "whilelt p0.h, x12, x15\n"
+ ".inst 0x4494477d // smlalt z29.s, p4/M, z27.h, z20.h\n"
+ ".inst 0x44854709 // smlalt z9.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x454a12d6 // ssublb z22.h, z22.b, z10.b\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
+ ".inst 0x448b43e3 // smlalb z3.s, p4/M, z31.h, z11.h\n"
+ ".inst 0x448b47f0 // smlalt z16.s, p4/M, z31.h, z11.h\n"
+ "ld1w { z27.s }, p1/Z, [x11, #1, MUL VL]\n"
"inch x14\n"
- ".inst 0x448845fb // smlalt z27.s, p4/M, z15.h, z8.h\n"
- ".inst 0x448e43f1 // smlalb z17.s, p4/M, z31.h, z14.h\n"
- "ld1w { z15.s }, p1/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x448d43e8 // smlalb z8.s, p4/M, z31.h, z13.h\n"
+ ".inst 0x449e42e0 // smlalb z0.s, p4/M, z23.h, z30.h\n"
+ ".inst 0x454a1021 // ssublb z1.h, z1.b, z10.b\n"
"ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44924390 // smlalb z16.s, p4/M, z28.h, z18.h\n"
- ".inst 0x44824396 // smlalb z22.s, p4/M, z28.h, z2.h\n"
- "addvl x26, x26, #2\n"
- ".inst 0x44814669 // smlalt z9.s, p4/M, z19.h, z1.h\n"
- ".inst 0x44884385 // smlalb z5.s, p4/M, z28.h, z8.h\n"
- ".inst 0x448e47f9 // smlalt z25.s, p4/M, z31.h, z14.h\n"
- ".inst 0x44924797 // smlalt z23.s, p4/M, z28.h, z18.h\n"
- "ld1sb { z31.h }, p3/Z, [x20, x16]\n"
- ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
- ".inst 0x4482479b // smlalt z27.s, p4/M, z28.h, z2.h\n"
- ".inst 0x44954271 // smlalb z17.s, p4/M, z19.h, z21.h\n"
- "uzp1 z2.s, z20.s, z15.s\n"
- "inch x16\n"
- ".inst 0x448e4090 // smlalb z16.s, p4/M, z4.h, z14.h\n"
- ".inst 0x448143b6 // smlalb z22.s, p4/M, z29.h, z1.h\n"
- "uzp2 z15.s, z20.s, z15.s\n"
- "ld1w { z20.s }, p2/Z, [x25]\n"
- ".inst 0x44884789 // smlalt z9.s, p4/M, z28.h, z8.h\n"
- ".inst 0x44864085 // smlalb z5.s, p4/M, z4.h, z6.h\n"
- "mov x20, x16\n"
+ ".inst 0x449442f3 // smlalb z19.s, p4/M, z23.h, z20.h\n"
+ ".inst 0x448d47f5 // smlalt z21.s, p4/M, z31.h, z13.h\n"
+ "ld1sb { z31.h }, p3/Z, [x20, x17]\n"
+ "inch x17\n"
+ ".inst 0x449e46fd // smlalt z29.s, p4/M, z23.h, z30.h\n"
+ ".inst 0x449446e9 // smlalt z9.s, p4/M, z23.h, z20.h\n"
+ "uzp1 z20.s, z24.s, z27.s\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x44924323 // smlalb z3.s, p4/M, z25.h, z18.h\n"
+ ".inst 0x44924730 // smlalt z16.s, p4/M, z25.h, z18.h\n"
+ "uzp2 z24.s, z24.s, z27.s\n"
+ "ld1w { z27.s }, p2/Z, [x10]\n"
+ ".inst 0x448b4328 // smlalb z8.s, p4/M, z25.h, z11.h\n"
+ ".inst 0x448d4180 // smlalb z0.s, p4/M, z12.h, z13.h\n"
+ ".inst 0x454a13ff // ssublb z31.h, z31.b, z10.b\n"
+ "mov x20, x17\n"
+ ".inst 0x44924093 // smlalb z19.s, p4/M, z4.h, z18.h\n"
+ ".inst 0x448b4735 // smlalt z21.s, p4/M, z25.h, z11.h\n"
+ "ld1w { z25.s }, p1/Z, [x10, #1, MUL VL]\n"
+ "whilelt p2.s, x17, x15\n"
+ ".inst 0x448d459d // smlalt z29.s, p4/M, z12.h, z13.h\n"
+ ".inst 0x44924489 // smlalt z9.s, p4/M, z4.h, z18.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x448542e3 // smlalb z3.s, p4/M, z23.h, z5.h\n"
+ ".inst 0x448546f0 // smlalt z16.s, p4/M, z23.h, z5.h\n"
"incw x20\n"
- ".inst 0x44954679 // smlalt z25.s, p4/M, z19.h, z21.h\n"
- ".inst 0x448e4497 // smlalt z23.s, p4/M, z4.h, z14.h\n"
- "ld1w { z19.s }, p1/Z, [x25, #1, MUL VL]\n"
- "uzp1 z21.s, z20.s, z19.s\n"
- ".inst 0x448147bb // smlalt z27.s, p4/M, z29.h, z1.h\n"
- ".inst 0x448a4391 // smlalb z17.s, p4/M, z28.h, z10.h\n"
- "uzp2 z1.s, z20.s, z19.s\n"
- "whilelt p2.s, x16, x15\n"
- ".inst 0x44864010 // smlalb z16.s, p4/M, z0.h, z6.h\n"
- ".inst 0x44924076 // smlalb z22.s, p4/M, z3.h, z18.h\n"
+ ".inst 0x449142e8 // smlalb z8.s, p4/M, z23.h, z17.h\n"
+ ".inst 0x448640e0 // smlalb z0.s, p4/M, z7.h, z6.h\n"
+ "uzp1 z11.s, z27.s, z25.s\n"
+ ".inst 0x449e42d3 // smlalb z19.s, p4/M, z22.h, z30.h\n"
+ ".inst 0x449146f5 // smlalt z21.s, p4/M, z23.h, z17.h\n"
+ "uzp2 z27.s, z27.s, z25.s\n"
+ ".inst 0x448644fd // smlalt z29.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x449e46c9 // smlalt z9.s, p4/M, z22.h, z30.h\n"
"whilelt p1.s, x20, x15\n"
- "whilelt p3.h, x16, x15\n"
- ".inst 0x44864489 // smlalt z9.s, p4/M, z4.h, z6.h\n"
- ".inst 0x44874005 // smlalb z5.s, p4/M, z0.h, z7.h\n"
- ".inst 0x04a274a5 // sqrdmulh z5.s, z5.s, z2.s\n"
- "addvl x25, x25, #2\n"
- ".inst 0x448a4799 // smlalt z25.s, p4/M, z28.h, z10.h\n"
- ".inst 0x44864417 // smlalt z23.s, p4/M, z0.h, z6.h\n"
- "and z19.d, z5.d, z21.d\n"
- ".inst 0x4492447b // smlalt z27.s, p4/M, z3.h, z18.h\n"
- ".inst 0x449243b1 // smlalb z17.s, p4/M, z29.h, z18.h\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448a41b0 // smlalb z16.s, p4/M, z13.h, z10.h\n"
- ".inst 0x448741b6 // smlalb z22.s, p4/M, z13.h, z7.h\n"
- "sqadd z5.s, z5.s, z19.s\n"
- ".inst 0x448292a5 // srshl z5.s, p4/M, z5.s, z21.s\n"
- ".inst 0x44874409 // smlalt z9.s, p4/M, z0.h, z7.h\n"
- ".inst 0x449247b9 // smlalt z25.s, p4/M, z29.h, z18.h\n"
- ".inst 0x04af7529 // sqrdmulh z9.s, z9.s, z15.s\n"
- ".inst 0x448a45b7 // smlalt z23.s, p4/M, z13.h, z10.h\n"
- ".inst 0x448745bb // smlalt z27.s, p4/M, z13.h, z7.h\n"
- "and z29.d, z9.d, z1.d\n"
- ".inst 0x44884071 // smlalb z17.s, p4/M, z3.h, z8.h\n"
- ".inst 0x448843f0 // smlalb z16.s, p4/M, z31.h, z8.h\n"
- ".inst 0x04a27631 // sqrdmulh z17.s, z17.s, z2.s\n"
- ".inst 0x448a43f6 // smlalb z22.s, p4/M, z31.h, z10.h\n"
- ".inst 0x44884479 // smlalt z25.s, p4/M, z3.h, z8.h\n"
- ".inst 0x04a27610 // sqrdmulh z16.s, z16.s, z2.s\n"
- ".inst 0x448847f7 // smlalt z23.s, p4/M, z31.h, z8.h\n"
- ".inst 0x448a47fb // smlalt z27.s, p4/M, z31.h, z10.h\n"
- ".inst 0x04a276d6 // sqrdmulh z22.s, z22.s, z2.s\n"
- "asr z29.s, z29.s, #0x1f\n"
- "and z18.d, z17.d, z21.d\n"
- ".inst 0x04af7739 // sqrdmulh z25.s, z25.s, z15.s\n"
- "and z20.d, z16.d, z21.d\n"
- ".inst 0x04af76f7 // sqrdmulh z23.s, z23.s, z15.s\n"
- "and z19.d, z22.d, z21.d\n"
- ".inst 0x04af777b // sqrdmulh z27.s, z27.s, z15.s\n"
- "sqadd z9.s, z9.s, z29.s\n"
- ".inst 0x44829029 // srshl z9.s, p4/M, z9.s, z1.s\n"
- "asr z18.s, z18.s, #0x1f\n"
- "and z7.d, z25.d, z1.d\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z6.d, z23.d, z1.d\n"
- "asr z19.s, z19.s, #0x1f\n"
- "and z2.d, z27.d, z1.d\n"
- "sqadd z17.s, z17.s, z18.s\n"
- "asr z7.s, z7.s, #0x1f\n"
- ".inst 0x448292b1 // srshl z17.s, p4/M, z17.s, z21.s\n"
- "sqadd z16.s, z16.s, z20.s\n"
+ "whilelt p3.h, x17, x15\n"
+ ".inst 0x44864183 // smlalb z3.s, p4/M, z12.h, z6.h\n"
+ ".inst 0x44864590 // smlalt z16.s, p4/M, z12.h, z6.h\n"
+ ".inst 0x449e4088 // smlalb z8.s, p4/M, z4.h, z30.h\n"
+ ".inst 0x44914020 // smlalb z0.s, p4/M, z1.h, z17.h\n"
+ ".inst 0x449c4033 // smlalb z19.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449e4495 // smlalt z21.s, p4/M, z4.h, z30.h\n"
+ ".inst 0x4491443d // smlalt z29.s, p4/M, z1.h, z17.h\n"
+ ".inst 0x449c4429 // smlalt z9.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c40e3 // smlalb z3.s, p4/M, z7.h, z28.h\n"
+ ".inst 0x449c44f0 // smlalt z16.s, p4/M, z7.h, z28.h\n"
+ ".inst 0x448542c8 // smlalb z8.s, p4/M, z22.h, z5.h\n"
+ ".inst 0x448543e0 // smlalb z0.s, p4/M, z31.h, z5.h\n"
+ ".inst 0x449143f3 // smlalb z19.s, p4/M, z31.h, z17.h\n"
+ ".inst 0x448546d5 // smlalt z21.s, p4/M, z22.h, z5.h\n"
+ ".inst 0x448547fd // smlalt z29.s, p4/M, z31.h, z5.h\n"
+ ".inst 0x449147e9 // smlalt z9.s, p4/M, z31.h, z17.h\n"
+ ".inst 0x04b47463 // sqrdmulh z3.s, z3.s, z20.s\n"
+ ".inst 0x04b87610 // sqrdmulh z16.s, z16.s, z24.s\n"
+ ".inst 0x04b47508 // sqrdmulh z8.s, z8.s, z20.s\n"
+ ".inst 0x04b47400 // sqrdmulh z0.s, z0.s, z20.s\n"
+ "and z4.d, z3.d, z11.d\n"
+ ".inst 0x04b47673 // sqrdmulh z19.s, z19.s, z20.s\n"
+ ".inst 0x04b876b5 // sqrdmulh z21.s, z21.s, z24.s\n"
+ "and z13.d, z16.d, z27.d\n"
+ "and z6.d, z8.d, z11.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "and z7.d, z0.d, z11.d\n"
+ ".inst 0x04b877bd // sqrdmulh z29.s, z29.s, z24.s\n"
+ ".inst 0x04b87529 // sqrdmulh z9.s, z9.s, z24.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- ".inst 0x448292b0 // srshl z16.s, p4/M, z16.s, z21.s\n"
- "sqadd z22.s, z22.s, z19.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- ".inst 0x448292b6 // srshl z22.s, p4/M, z22.s, z21.s\n"
- "sqadd z25.s, z25.s, z7.s\n"
- "sqadd z23.s, z23.s, z6.s\n"
- ".inst 0x44829039 // srshl z25.s, p4/M, z25.s, z1.s\n"
- ".inst 0x44829037 // srshl z23.s, p4/M, z23.s, z1.s\n"
- "sqadd z27.s, z27.s, z2.s\n"
- ".inst 0x453040a5 // sqxtnb z5.h, z5.s\n"
- ".inst 0x4482903b // srshl z27.s, p4/M, z27.s, z1.s\n"
- ".inst 0x45304231 // sqxtnb z17.h, z17.s\n"
- ".inst 0x45304210 // sqxtnb z16.h, z16.s\n"
- ".inst 0x453042d6 // sqxtnb z22.h, z22.s\n"
- ".inst 0x45304525 // sqxtnt z5.h, z9.s\n"
- ".inst 0x45304731 // sqxtnt z17.h, z25.s\n"
- ".inst 0x453046f0 // sqxtnt z16.h, z23.s\n"
- ".inst 0x45304776 // sqxtnt z22.h, z27.s\n"
- "sqadd z5.h, z5.h, z24.h\n"
- "smax z5.h, p4/M, z5.h, z11.h\n"
- "smin z5.h, p4/M, z5.h, z26.h\n"
- "sqadd z17.h, z17.h, z24.h\n"
- "sqadd z16.h, z16.h, z24.h\n"
- "smax z17.h, p4/M, z17.h, z11.h\n"
- "smax z16.h, p4/M, z16.h, z11.h\n"
- "sqadd z22.h, z22.h, z24.h\n"
- "smax z22.h, p4/M, z22.h, z11.h\n"
- "smin z17.h, p4/M, z17.h, z26.h\n"
- "st1b { z5.h }, p0, [x13, x27]\n"
- "smin z16.h, p4/M, z16.h, z26.h\n"
- "smin z22.h, p4/M, z22.h, z26.h\n"
- "st1b { z17.h }, p0, [x12, x27]\n"
- "st1b { z16.h }, p0, [x11, x27]\n"
- "st1b { z22.h }, p0, [x10, x27]\n"
- "ld1sb { z14.h }, p4/Z, [x14]\n"
- "ld1sb { z21.h }, p4/Z, [x14, #1, MUL VL]\n"
- "inch x27\n"
- "ld1sb { z1.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "sqadd z3.s, z3.s, z4.s\n"
+ "and z20.d, z19.d, z11.d\n"
+ "and z18.d, z21.d, z27.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z13.s\n"
+ "and z13.d, z29.d, z27.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "and z23.d, z9.d, z27.d\n"
+ ".inst 0x44829163 // srshl z3.s, p4/M, z3.s, z11.s\n"
+ "sqadd z8.s, z8.s, z6.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z0.s, z0.s, z7.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
+ ".inst 0x44829370 // srshl z16.s, p4/M, z16.s, z27.s\n"
+ "sqadd z19.s, z19.s, z20.s\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ ".inst 0x44829168 // srshl z8.s, p4/M, z8.s, z11.s\n"
+ "sqadd z21.s, z21.s, z18.s\n"
+ ".inst 0x45304063 // sqxtnb z3.h, z3.s\n"
+ ".inst 0x44829160 // srshl z0.s, p4/M, z0.s, z11.s\n"
+ "sqadd z29.s, z29.s, z13.s\n"
+ ".inst 0x44829173 // srshl z19.s, p4/M, z19.s, z11.s\n"
+ "sqadd z9.s, z9.s, z23.s\n"
+ ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
+ ".inst 0x44829375 // srshl z21.s, p4/M, z21.s, z27.s\n"
+ ".inst 0x45304000 // sqxtnb z0.h, z0.s\n"
+ ".inst 0x45304603 // sqxtnt z3.h, z16.s\n"
+ ".inst 0x4482937d // srshl z29.s, p4/M, z29.s, z27.s\n"
+ ".inst 0x44829369 // srshl z9.s, p4/M, z9.s, z27.s\n"
+ ".inst 0x45304273 // sqxtnb z19.h, z19.s\n"
+ ".inst 0x453046a8 // sqxtnt z8.h, z21.s\n"
+ ".inst 0x453047a0 // sqxtnt z0.h, z29.s\n"
+ ".inst 0x45304533 // sqxtnt z19.h, z9.s\n"
+ "sqadd z3.h, z3.h, z26.h\n"
+ "sqadd z8.h, z8.h, z26.h\n"
+ "sqadd z0.h, z0.h, z26.h\n"
+ "sqadd z19.h, z19.h, z26.h\n"
+ "smax z3.h, p4/M, z3.h, z2.h\n"
+ "smax z8.h, p4/M, z8.h, z2.h\n"
+ "smax z0.h, p4/M, z0.h, z2.h\n"
+ "smax z19.h, p4/M, z19.h, z2.h\n"
+ "smin z3.h, p4/M, z3.h, z14.h\n"
+ "smin z8.h, p4/M, z8.h, z14.h\n"
+ "smin z0.h, p4/M, z0.h, z14.h\n"
+ "smin z19.h, p4/M, z19.h, z14.h\n"
+ "st1b { z3.h }, p0, [x9, x12]\n"
+ "st1b { z8.h }, p0, [x28, x12]\n"
+ "st1b { z0.h }, p0, [x27, x12]\n"
+ "st1b { z19.h }, p0, [x26, x12]\n"
+ "inch x12\n"
+ "ld1sb { z13.h }, p4/Z, [x14]\n"
+ "ld1sb { z11.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x14, #2, MUL VL]\n"
"ld1sb { z6.h }, p4/Z, [x14, #3, MUL VL]\n"
- ".inst 0x455e11ce // ssublb z14.h, z14.b, z30.b\n"
- ".inst 0x455e12b5 // ssublb z21.h, z21.b, z30.b\n"
- "ld1sb { z2.h }, p4/Z, [x14, #4, MUL VL]\n"
- "ld1sb { z18.h }, p4/Z, [x14, #5, MUL VL]\n"
- ".inst 0x455e1021 // ssublb z1.h, z1.b, z30.b\n"
- ".inst 0x455e10c6 // ssublb z6.h, z6.b, z30.b\n"
- "ld1sb { z7.h }, p4/Z, [x14, #6, MUL VL]\n"
- "ld1sb { z10.h }, p4/Z, [x14, #7, MUL VL]\n"
+ "ld1sb { z20.h }, p4/Z, [x14, #4, MUL VL]\n"
+ "ld1sb { z30.h }, p4/Z, [x14, #5, MUL VL]\n"
+ "ld1sb { z28.h }, p4/Z, [x14, #6, MUL VL]\n"
+ "ld1sb { z17.h }, p4/Z, [x14, #7, MUL VL]\n"
"inch x14, ALL, MUL #8\n"
- ".inst 0x455e1042 // ssublb z2.h, z2.b, z30.b\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
- "uzp1 z5.s, z17.s, z16.s\n"
- "uzp2 z9.s, z17.s, z16.s\n"
- "ld1sb { z8.h }, p4/Z, [x14]\n"
- "ldp x24, x23, [x28, #0x0]\n"
+ ".inst 0x454f11ad // ssublb z13.h, z13.b, z15.b\n"
+ "ld1w { z1.s }, p2/Z, [x21]\n"
+ "ld1w { z0.s }, p1/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
+ ".inst 0x454f116b // ssublb z11.h, z11.b, z15.b\n"
+ ".inst 0x454f1252 // ssublb z18.h, z18.b, z15.b\n"
+ ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
+ "ld1sb { z5.h }, p4/Z, [x14]\n"
+ "ldp x24, x23, [x13, #0x0]\n"
+ ".inst 0x454f1294 // ssublb z20.h, z20.b, z15.b\n"
+ ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
+ "uzp1 z3.s, z1.s, z0.s\n"
+ "uzp2 z16.s, z1.s, z0.s\n"
"str x21, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x22, x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x20]\n"
- "mov z17.d, z5.d\n"
- "mov z25.d, z9.d\n"
- "ld1sb { z0.h }, p3/Z, [x24, x16]\n"
- "ld1sb { z29.h }, p3/Z, [x23, x16]\n"
- "mov z16.d, z5.d\n"
- "mov z23.d, z9.d\n"
- "ld1sb { z4.h }, p3/Z, [x22, x16]\n"
- "ld1sb { z13.h }, p3/Z, [x21, x16]\n"
- "mov z22.d, z5.d\n"
- "mov z27.d, z9.d\n"
- "ld1sb { z20.h }, p3/Z, [x20, x16]\n"
- ".inst 0x455e1252 // ssublb z18.h, z18.b, z30.b\n"
- ".inst 0x455e10e7 // ssublb z7.h, z7.b, z30.b\n"
- ".inst 0x455e114a // ssublb z10.h, z10.b, z30.b\n"
- ".inst 0x455e1108 // ssublb z8.h, z8.b, z30.b\n"
- ".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
- ".inst 0x454c13bd // ssublb z29.h, z29.b, z12.b\n"
- ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
- ".inst 0x454c11ad // ssublb z13.h, z13.b, z12.b\n"
- ".inst 0x454c1294 // ssublb z20.h, z20.b, z12.b\n"
+ "ldp x22, x21, [x13, #0x10]\n"
+ ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
+ ".inst 0x454f1231 // ssublb z17.h, z17.b, z15.b\n"
+ ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
+ "ldr x20, [x13, #0x20]\n"
+ "ld1sb { z7.h }, p3/Z, [x24, x17]\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1sb { z4.h }, p3/Z, [x22, x17]\n"
+ "mov z8.d, z3.d\n"
+ "mov z21.d, z16.d\n"
+ "ld1sb { z1.h }, p3/Z, [x21, x17]\n"
+ "mov z0.d, z3.d\n"
+ "mov z29.d, z16.d\n"
+ "ld1sb { z27.h }, p3/Z, [x20, x17]\n"
+ "mov z19.d, z3.d\n"
+ "mov z9.d, z16.d\n"
+ ".inst 0x454a10e7 // ssublb z7.h, z7.b, z10.b\n"
+ ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
+ ".inst 0x454a1084 // ssublb z4.h, z4.b, z10.b\n"
+ ".inst 0x454a1021 // ssublb z1.h, z1.b, z10.b\n"
+ ".inst 0x454a137b // ssublb z27.h, z27.b, z10.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index fc9a48bb46..f00e1aecaf 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
const int8_t *inptrs[25];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const int8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -100,348 +100,348 @@ void sve_s8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x7, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x8, #0x0\n"
+ "ldr x27, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x7\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
- "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z26.b }, p4/Z, [x21]\n"
- "ld1rb { z13.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z19.h }, p4/Z, [x22]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
+ "ldr x26, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x16, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x15, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x14, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x8\n"
+ "add x20, x27, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x27, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x27, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z17.b }, p4/Z, [x20]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x27, %[offsetof_Requantize32_minval]\n"
+ "add x20, x27, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z12.b }, p4/Z, [x23]\n"
+ "ld1rh { z25.h }, p4/Z, [x22]\n"
+ "ld1rh { z14.h }, p4/Z, [x21]\n"
"ld1rh { z9.h }, p4/Z, [x20]\n"
- "ldp x16, x15, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x7, x8\n"
- "ldp x14, x13, [x24, #0x10]\n"
- "whilelt p2.s, x7, x8\n"
- "whilelt p1.s, x23, x8\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z25.h }, p4/Z, [x17]\n"
- "ld1sb { z30.h }, p4/Z, [x17, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
- "ld1sb { z14.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z4.h }, p4/Z, [x17, #3, MUL VL]\n"
- ".inst 0x454d1339 // ssublb z25.h, z25.b, z13.b\n"
- ".inst 0x454d13de // ssublb z30.h, z30.b, z13.b\n"
- "ld1sb { z10.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #5, MUL VL]\n"
- ".inst 0x454d11ce // ssublb z14.h, z14.b, z13.b\n"
- ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
- "ld1sb { z23.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
- ".inst 0x454d114a // ssublb z10.h, z10.b, z13.b\n"
- "ld1w { z17.s }, p2/Z, [x12]\n"
- "ld1w { z16.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z8.s, z17.s, z16.s\n"
- "uzp2 z24.s, z17.s, z16.s\n"
- "ld1sb { z2.h }, p4/Z, [x17]\n"
- "ldp x27, x26, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "mov z18.d, z8.d\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z0.d, z24.d\n"
- "mov z15.d, z8.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1sb { z21.h }, p3/Z, [x27, x7]\n"
- "mov z1.d, z24.d\n"
- "mov z5.d, z8.d\n"
- "ld1sb { z22.h }, p3/Z, [x26, x7]\n"
- "ld1sb { z11.h }, p3/Z, [x25, x7]\n"
- "mov z6.d, z24.d\n"
- ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
- "ld1sb { z20.h }, p3/Z, [x24, x7]\n"
- "ld1sb { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x454d12f7 // ssublb z23.h, z23.b, z13.b\n"
- ".inst 0x454d10e7 // ssublb z7.h, z7.b, z13.b\n"
- "ld1sb { z28.h }, p3/Z, [x22, x7]\n"
- "ld1sb { z16.h }, p3/Z, [x21, x7]\n"
- ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
- ".inst 0x455a12b5 // ssublb z21.h, z21.b, z26.b\n"
- "ld1sb { z31.h }, p3/Z, [x20, x7]\n"
- "ldr x9, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x455a12d6 // ssublb z22.h, z22.b, z26.b\n"
- ".inst 0x455a116b // ssublb z11.h, z11.b, z26.b\n"
- "ldr x28, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455a1294 // ssublb z20.h, z20.b, z26.b\n"
- ".inst 0x455a137b // ssublb z27.h, z27.b, z26.b\n"
- ".inst 0x455a139c // ssublb z28.h, z28.b, z26.b\n"
- ".inst 0x455a1210 // ssublb z16.h, z16.b, z26.b\n"
- ".inst 0x455a13ff // ssublb z31.h, z31.b, z26.b\n"
+ "incw x24\n"
+ "whilelt p3.h, x8, x17\n"
+ "ldp x11, x10, [x26, #0x0]\n"
+ "ldp x9, x28, [x26, #0x10]\n"
+ "whilelt p2.s, x8, x17\n"
+ "whilelt p1.s, x24, x17\n"
+ "ld1sb { z28.h }, p4/Z, [x16]\n"
+ "ld1sb { z20.h }, p4/Z, [x16, #1, MUL VL]\n"
+ "ld1sb { z13.h }, p4/Z, [x16, #2, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x16, #3, MUL VL]\n"
+ "ld1sb { z6.h }, p4/Z, [x16, #4, MUL VL]\n"
+ "ld1sb { z2.h }, p4/Z, [x16, #5, MUL VL]\n"
+ "ld1sb { z26.h }, p4/Z, [x16, #6, MUL VL]\n"
+ "ld1sb { z21.h }, p4/Z, [x16, #7, MUL VL]\n"
+ "inch x16, ALL, MUL #8\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ "ld1w { z11.s }, p2/Z, [x25]\n"
+ "ld1w { z4.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ ".inst 0x454c1294 // ssublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454c11ad // ssublb z13.h, z13.b, z12.b\n"
+ ".inst 0x454c1252 // ssublb z18.h, z18.b, z12.b\n"
+ "ld1sb { z15.h }, p4/Z, [x16]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ ".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "uzp1 z5.s, z11.s, z4.s\n"
+ "uzp2 z11.s, z11.s, z4.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
+ ".inst 0x454c12b5 // ssublb z21.h, z21.b, z12.b\n"
+ ".inst 0x454c11ef // ssublb z15.h, z15.b, z12.b\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "mov z30.d, z5.d\n"
+ "mov z16.d, z11.d\n"
+ "mov z4.d, z5.d\n"
+ "mov z8.d, z11.d\n"
+ "mov z31.d, z5.d\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "mov z10.d, z11.d\n"
+ "ld1sb { z3.h }, p3/Z, [x27, x8]\n"
+ "ld1sb { z29.h }, p3/Z, [x26, x8]\n"
+ "ld1sb { z23.h }, p3/Z, [x25, x8]\n"
+ "ld1sb { z0.h }, p3/Z, [x24, x8]\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x8]\n"
+ "ld1sb { z22.h }, p3/Z, [x22, x8]\n"
+ "ld1sb { z27.h }, p3/Z, [x21, x8]\n"
+ "ld1sb { z19.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
+ ".inst 0x455113bd // ssublb z29.h, z29.b, z17.b\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x455112d6 // ssublb z22.h, z22.b, z17.b\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ ".inst 0x45511273 // ssublb z19.h, z19.b, z17.b\n"
"1:" // Loop
- ".inst 0x448242a8 // smlalb z8.s, p4/M, z21.h, z2.h\n"
- "ldr x21, [x11, #0x58]\n"
- "ldr x20, [x11, #0x78]\n"
- ".inst 0x448246b8 // smlalt z24.s, p4/M, z21.h, z2.h\n"
- ".inst 0x449942c8 // smlalb z8.s, p4/M, z22.h, z25.h\n"
- "ld1sb { z17.h }, p3/Z, [x21, x7]\n"
- "ld1sb { z29.h }, p3/Z, [x20, x7]\n"
- ".inst 0x449742b2 // smlalb z18.s, p4/M, z21.h, z23.h\n"
- "ldr x21, [x11, #0x60]\n"
- "ldr x20, [x11, #0x80]\n"
- ".inst 0x448e42af // smlalb z15.s, p4/M, z21.h, z14.h\n"
- ".inst 0x449942a5 // smlalb z5.s, p4/M, z21.h, z25.h\n"
- ".inst 0x449946d8 // smlalt z24.s, p4/M, z22.h, z25.h\n"
- ".inst 0x455a1231 // ssublb z17.h, z17.b, z26.b\n"
- ".inst 0x449e4168 // smlalb z8.s, p4/M, z11.h, z30.h\n"
- "ld1sb { z22.h }, p3/Z, [x21, x7]\n"
- ".inst 0x455a13bd // ssublb z29.h, z29.b, z26.b\n"
- ".inst 0x449746a0 // smlalt z0.s, p4/M, z21.h, z23.h\n"
- ".inst 0x448e46a1 // smlalt z1.s, p4/M, z21.h, z14.h\n"
- "ldr x21, [x11, #0x68]\n"
- ".inst 0x449946a6 // smlalt z6.s, p4/M, z21.h, z25.h\n"
- "ld1sb { z21.h }, p3/Z, [x20, x7]\n"
- "ldr x20, [x11, #0x88]\n"
- ".inst 0x449e4292 // smlalb z18.s, p4/M, z20.h, z30.h\n"
- ".inst 0x4484422f // smlalb z15.s, p4/M, z17.h, z4.h\n"
- ".inst 0x448a43a5 // smlalb z5.s, p4/M, z29.h, z10.h\n"
- ".inst 0x455a12d6 // ssublb z22.h, z22.b, z26.b\n"
- "ldr x22, [x11, #0x40]\n"
- ".inst 0x449e4578 // smlalt z24.s, p4/M, z11.h, z30.h\n"
- ".inst 0x455a12b5 // ssublb z21.h, z21.b, z26.b\n"
- ".inst 0x44844388 // smlalb z8.s, p4/M, z28.h, z4.h\n"
- "ld1sb { z11.h }, p3/Z, [x21, x7]\n"
- ".inst 0x449e4680 // smlalt z0.s, p4/M, z20.h, z30.h\n"
- "ld1sb { z20.h }, p3/Z, [x20, x7]\n"
- ".inst 0x44844621 // smlalt z1.s, p4/M, z17.h, z4.h\n"
- "ldr x21, [x11, #0x70]\n"
- ".inst 0x448a47a6 // smlalt z6.s, p4/M, z29.h, z10.h\n"
- "ldr x20, [x11, #0x98]\n"
- ".inst 0x448e4372 // smlalb z18.s, p4/M, z27.h, z14.h\n"
- "ldr x23, [x11, #0x50]\n"
- ".inst 0x449942cf // smlalb z15.s, p4/M, z22.h, z25.h\n"
- ".inst 0x449e42a5 // smlalb z5.s, p4/M, z21.h, z30.h\n"
- ".inst 0x455a116b // ssublb z11.h, z11.b, z26.b\n"
- "ld1sb { z17.h }, p3/Z, [x22, x7]\n"
- ".inst 0x44844798 // smlalt z24.s, p4/M, z28.h, z4.h\n"
- ".inst 0x455a1294 // ssublb z20.h, z20.b, z26.b\n"
- ".inst 0x448a4208 // smlalb z8.s, p4/M, z16.h, z10.h\n"
- "ld1sb { z29.h }, p3/Z, [x21, x7]\n"
- "ld1sb { z28.h }, p3/Z, [x20, x7]\n"
- ".inst 0x448e4760 // smlalt z0.s, p4/M, z27.h, z14.h\n"
- "ldr x22, [x11, #0x48]\n"
- ".inst 0x449946c1 // smlalt z1.s, p4/M, z22.h, z25.h\n"
- ".inst 0x449e46a6 // smlalt z6.s, p4/M, z21.h, z30.h\n"
- "ldr x21, [x11, #0x90]\n"
- "ldr x20, [x11, #0xa8]\n"
- ".inst 0x449943f2 // smlalb z18.s, p4/M, z31.h, z25.h\n"
- "ld1sb { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x448a416f // smlalb z15.s, p4/M, z11.h, z10.h\n"
- ".inst 0x44834285 // smlalb z5.s, p4/M, z20.h, z3.h\n"
- ".inst 0x455a1231 // ssublb z17.h, z17.b, z26.b\n"
- ".inst 0x448a4618 // smlalt z24.s, p4/M, z16.h, z10.h\n"
- ".inst 0x455a13bd // ssublb z29.h, z29.b, z26.b\n"
- ".inst 0x448e43e8 // smlalb z8.s, p4/M, z31.h, z14.h\n"
- "ld1sb { z16.h }, p3/Z, [x22, x7]\n"
- ".inst 0x455a139c // ssublb z28.h, z28.b, z26.b\n"
- ".inst 0x449947e0 // smlalt z0.s, p4/M, z31.h, z25.h\n"
- "ld1sb { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x448a4561 // smlalt z1.s, p4/M, z11.h, z10.h\n"
- "ld1sb { z11.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455a137b // ssublb z27.h, z27.b, z26.b\n"
- ".inst 0x44834686 // smlalt z6.s, p4/M, z20.h, z3.h\n"
- "ldr x21, [x11, #0xa0]\n"
- "ldr x20, [x11, #0xb0]\n"
- ".inst 0x448a4232 // smlalb z18.s, p4/M, z17.h, z10.h\n"
- ".inst 0x449e43af // smlalb z15.s, p4/M, z29.h, z30.h\n"
- ".inst 0x455a1210 // ssublb z16.h, z16.b, z26.b\n"
- ".inst 0x448e4385 // smlalb z5.s, p4/M, z28.h, z14.h\n"
- ".inst 0x448e47f8 // smlalt z24.s, p4/M, z31.h, z14.h\n"
- ".inst 0x455a1339 // ssublb z25.h, z25.b, z26.b\n"
- "ld1sb { z20.h }, p3/Z, [x21, x7]\n"
- ".inst 0x455a116b // ssublb z11.h, z11.b, z26.b\n"
- ".inst 0x44834368 // smlalb z8.s, p4/M, z27.h, z3.h\n"
- "ld1sb { z31.h }, p3/Z, [x20, x7]\n"
- ".inst 0x448a4620 // smlalt z0.s, p4/M, z17.h, z10.h\n"
- ".inst 0x449e47a1 // smlalt z1.s, p4/M, z29.h, z30.h\n"
- ".inst 0x448e4786 // smlalt z6.s, p4/M, z28.h, z14.h\n"
- "ldr x20, [x11, #0xb8]\n"
- ".inst 0x455a1294 // ssublb z20.h, z20.b, z26.b\n"
- ".inst 0x44834212 // smlalb z18.s, p4/M, z16.h, z3.h\n"
- ".inst 0x4497432f // smlalb z15.s, p4/M, z25.h, z23.h\n"
- ".inst 0x455a13ff // ssublb z31.h, z31.b, z26.b\n"
- "ld1sb { z30.h }, p3/Z, [x20, x7]\n"
- ".inst 0x44844165 // smlalb z5.s, p4/M, z11.h, z4.h\n"
- ".inst 0x44834778 // smlalt z24.s, p4/M, z27.h, z3.h\n"
- "ldr x20, [x11, #0xc0]\n"
- "ld1w { z17.s }, p2/Z, [x9]\n"
- ".inst 0x449742c8 // smlalb z8.s, p4/M, z22.h, z23.h\n"
- ".inst 0x44834600 // smlalt z0.s, p4/M, z16.h, z3.h\n"
- "ld1w { z14.s }, p1/Z, [x9, #1, MUL VL]\n"
- ".inst 0x455a13de // ssublb z30.h, z30.b, z26.b\n"
- ".inst 0x44974721 // smlalt z1.s, p4/M, z25.h, z23.h\n"
- ".inst 0x44844566 // smlalt z6.s, p4/M, z11.h, z4.h\n"
- "ld1sb { z25.h }, p3/Z, [x20, x7]\n"
- "uzp1 z10.s, z17.s, z14.s\n"
- ".inst 0x44844372 // smlalb z18.s, p4/M, z27.h, z4.h\n"
- ".inst 0x4487428f // smlalb z15.s, p4/M, z20.h, z7.h\n"
- "uzp2 z14.s, z17.s, z14.s\n"
- "ld1w { z17.s }, p2/Z, [x28]\n"
- ".inst 0x448743e5 // smlalb z5.s, p4/M, z31.h, z7.h\n"
- ".inst 0x449746d8 // smlalt z24.s, p4/M, z22.h, z23.h\n"
- "ld1w { z16.s }, p1/Z, [x28, #1, MUL VL]\n"
- ".inst 0x455a1339 // ssublb z25.h, z25.b, z26.b\n"
- ".inst 0x448743a8 // smlalb z8.s, p4/M, z29.h, z7.h\n"
- ".inst 0x44844760 // smlalt z0.s, p4/M, z27.h, z4.h\n"
- "uzp1 z4.s, z17.s, z16.s\n"
- "inch x7\n"
- ".inst 0x44874681 // smlalt z1.s, p4/M, z20.h, z7.h\n"
- ".inst 0x448747e6 // smlalt z6.s, p4/M, z31.h, z7.h\n"
- ".inst 0x04aa7508 // sqrdmulh z8.s, z8.s, z10.s\n"
- "whilelt p0.h, x10, x8\n"
- ".inst 0x448742b2 // smlalb z18.s, p4/M, z21.h, z7.h\n"
- ".inst 0x4483416f // smlalb z15.s, p4/M, z11.h, z3.h\n"
- "uzp2 z22.s, z17.s, z16.s\n"
- "mov x20, x7\n"
- ".inst 0x449743c5 // smlalb z5.s, p4/M, z30.h, z23.h\n"
- ".inst 0x448747b8 // smlalt z24.s, p4/M, z29.h, z7.h\n"
- "and z17.d, z8.d, z4.d\n"
- "inch x17\n"
- ".inst 0x448746a0 // smlalt z0.s, p4/M, z21.h, z7.h\n"
- ".inst 0x44834561 // smlalt z1.s, p4/M, z11.h, z3.h\n"
- ".inst 0x04ae7718 // sqrdmulh z24.s, z24.s, z14.s\n"
- "incw x20\n"
- ".inst 0x449747c6 // smlalt z6.s, p4/M, z30.h, z23.h\n"
- ".inst 0x44824392 // smlalb z18.s, p4/M, z28.h, z2.h\n"
- "asr z17.s, z17.s, #0x1f\n"
- "whilelt p2.s, x7, x8\n"
- ".inst 0x448243cf // smlalb z15.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44824325 // smlalb z5.s, p4/M, z25.h, z2.h\n"
- "and z16.d, z24.d, z22.d\n"
- "whilelt p1.s, x20, x8\n"
- ".inst 0x44824780 // smlalt z0.s, p4/M, z28.h, z2.h\n"
- ".inst 0x448247c1 // smlalt z1.s, p4/M, z30.h, z2.h\n"
- ".inst 0x04aa7652 // sqrdmulh z18.s, z18.s, z10.s\n"
+ ".inst 0x448f4065 // smlalb z5.s, p4/M, z3.h, z15.h\n"
+ "ldr x25, [x15, #0x58]\n"
+ "ldr x24, [x15, #0x78]\n"
+ ".inst 0x448f446b // smlalt z11.s, p4/M, z3.h, z15.h\n"
+ "ldr x23, [x15, #0x60]\n"
+ "ldr x22, [x15, #0x80]\n"
+ ".inst 0x449a407e // smlalb z30.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x448d4064 // smlalb z4.s, p4/M, z3.h, z13.h\n"
+ ".inst 0x449c407f // smlalb z31.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449a4470 // smlalt z16.s, p4/M, z3.h, z26.h\n"
+ "ldr x21, [x15, #0x68]\n"
+ "ldr x20, [x15, #0x88]\n"
+ "ld1sb { z1.h }, p3/Z, [x25, x8]\n"
+ "ld1sb { z7.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448d4468 // smlalt z8.s, p4/M, z3.h, z13.h\n"
+ ".inst 0x449c446a // smlalt z10.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c43a5 // smlalb z5.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x449c47ab // smlalt z11.s, p4/M, z29.h, z28.h\n"
+ "ld1sb { z29.h }, p3/Z, [x23, x8]\n"
+ "ld1sb { z3.h }, p3/Z, [x22, x8]\n"
+ ".inst 0x4494401e // smlalb z30.s, p4/M, z0.h, z20.h\n"
+ "ldr x25, [x15, #0x40]\n"
+ "ldr x24, [x15, #0x70]\n"
+ "whilelt p0.h, x14, x17\n"
+ ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
+ ".inst 0x455110e7 // ssublb z7.h, z7.b, z17.b\n"
+ ".inst 0x44944410 // smlalt z16.s, p4/M, z0.h, z20.h\n"
+ "ld1sb { z0.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x455113bd // ssublb z29.h, z29.b, z17.b\n"
+ ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
+ "ldr x23, [x15, #0x98]\n"
+ "ldr x22, [x15, #0x50]\n"
+ ".inst 0x449442e5 // smlalb z5.s, p4/M, z23.h, z20.h\n"
+ ".inst 0x449446eb // smlalt z11.s, p4/M, z23.h, z20.h\n"
+ "ld1sb { z23.h }, p3/Z, [x20, x8]\n"
+ "ldr x21, [x15, #0x48]\n"
+ ".inst 0x44924024 // smlalb z4.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448640ff // smlalb z31.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
+ "ldr x20, [x15, #0x90]\n"
+ ".inst 0x44924428 // smlalt z8.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448644ea // smlalt z10.s, p4/M, z7.h, z6.h\n"
+ "ld1sb { z1.h }, p3/Z, [x25, x8]\n"
+ "ld1sb { z7.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448d431e // smlalb z30.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ ".inst 0x448d4710 // smlalt z16.s, p4/M, z24.h, z13.h\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x8]\n"
+ ".inst 0x449242c5 // smlalb z5.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x449246cb // smlalt z11.s, p4/M, z22.h, z18.h\n"
+ "ldr x24, [x15, #0xa8]\n"
+ "ld1sb { z22.h }, p3/Z, [x22, x8]\n"
+ ".inst 0x449c43a4 // smlalb z4.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x4494407f // smlalb z31.s, p4/M, z3.h, z20.h\n"
+ ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
+ "ldr x23, [x15, #0xa0]\n"
+ ".inst 0x449c47a8 // smlalt z8.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x4494446a // smlalt z10.s, p4/M, z3.h, z20.h\n"
+ ".inst 0x455110e7 // ssublb z7.h, z7.b, z17.b\n"
+ "ldr x22, [x15, #0xb0]\n"
+ ".inst 0x449c427e // smlalb z30.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x449c4670 // smlalt z16.s, p4/M, z19.h, z28.h\n"
+ "ld1sb { z28.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x44864365 // smlalb z5.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x4486476b // smlalt z11.s, p4/M, z27.h, z6.h\n"
+ "ld1sb { z27.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x455112d6 // ssublb z22.h, z22.b, z17.b\n"
+ ".inst 0x44864004 // smlalb z4.s, p4/M, z0.h, z6.h\n"
+ ".inst 0x448242ff // smlalb z31.s, p4/M, z23.h, z2.h\n"
+ "ldr x21, [x15, #0xb8]\n"
+ "ldr x20, [x15, #0xc0]\n"
+ ".inst 0x44864408 // smlalt z8.s, p4/M, z0.h, z6.h\n"
+ "ld1sb { z0.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448246ea // smlalt z10.s, p4/M, z23.h, z2.h\n"
+ ".inst 0x4551139c // ssublb z28.h, z28.b, z17.b\n"
+ ".inst 0x4486403e // smlalb z30.s, p4/M, z1.h, z6.h\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ "ld1sb { z23.h }, p3/Z, [x23, x8]\n"
+ ".inst 0x44864430 // smlalt z16.s, p4/M, z1.h, z6.h\n"
+ ".inst 0x448d4265 // smlalb z5.s, p4/M, z19.h, z13.h\n"
+ ".inst 0x448d466b // smlalt z11.s, p4/M, z19.h, z13.h\n"
+ "ld1sb { z6.h }, p3/Z, [x22, x8]\n"
+ "ld1sb { z1.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x449440e4 // smlalb z4.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x448d431f // smlalb z31.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
+ "ld1w { z19.s }, p2/Z, [x13]\n"
+ ".inst 0x449444e8 // smlalt z8.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x448d470a // smlalt z10.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ "ld1w { z20.s }, p1/Z, [x13, #1, MUL VL]\n"
+ ".inst 0x4482439e // smlalb z30.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x455110c6 // ssublb z6.h, z6.b, z17.b\n"
+ ".inst 0x44824790 // smlalt z16.s, p4/M, z28.h, z2.h\n"
+ "ld1sb { z13.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x448242c5 // smlalb z5.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x448246cb // smlalt z11.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x45511021 // ssublb z1.h, z1.b, z17.b\n"
+ "inch x8\n"
+ ".inst 0x449a4364 // smlalb z4.s, p4/M, z27.h, z26.h\n"
+ ".inst 0x4492401f // smlalb z31.s, p4/M, z0.h, z18.h\n"
+ "uzp1 z28.s, z19.s, z20.s\n"
+ "inch x16\n"
+ ".inst 0x449a4768 // smlalt z8.s, p4/M, z27.h, z26.h\n"
+ ".inst 0x4492440a // smlalt z10.s, p4/M, z0.h, z18.h\n"
+ "uzp2 z20.s, z19.s, z20.s\n"
+ "ld1w { z27.s }, p2/Z, [x12]\n"
+ ".inst 0x449242de // smlalb z30.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x449246d0 // smlalt z16.s, p4/M, z22.h, z18.h\n"
+ "ld1w { z19.s }, p1/Z, [x12, #1, MUL VL]\n"
+ ".inst 0x455111ad // ssublb z13.h, z13.b, z17.b\n"
+ ".inst 0x449a43a5 // smlalb z5.s, p4/M, z29.h, z26.h\n"
+ ".inst 0x449a47ab // smlalt z11.s, p4/M, z29.h, z26.h\n"
+ "mov x21, x8\n"
+ "whilelt p2.s, x8, x17\n"
+ ".inst 0x449542e4 // smlalb z4.s, p4/M, z23.h, z21.h\n"
+ ".inst 0x449540df // smlalb z31.s, p4/M, z6.h, z21.h\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44824726 // smlalt z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x04aa75ef // sqrdmulh z15.s, z15.s, z10.s\n"
- "whilelt p3.h, x7, x8\n"
- "addvl x9, x9, #2\n"
- ".inst 0x04aa74a5 // sqrdmulh z5.s, z5.s, z10.s\n"
- "sqadd z8.s, z8.s, z17.s\n"
- ".inst 0x44829088 // srshl z8.s, p4/M, z8.s, z4.s\n"
- "addvl x28, x28, #2\n"
- "asr z16.s, z16.s, #0x1f\n"
- "and z21.d, z18.d, z4.d\n"
- ".inst 0x04ae7400 // sqrdmulh z0.s, z0.s, z14.s\n"
- "and z20.d, z15.d, z4.d\n"
- ".inst 0x04ae7421 // sqrdmulh z1.s, z1.s, z14.s\n"
- "and z28.d, z5.d, z4.d\n"
- ".inst 0x04ae74c6 // sqrdmulh z6.s, z6.s, z14.s\n"
- "sqadd z24.s, z24.s, z16.s\n"
- ".inst 0x448292d8 // srshl z24.s, p4/M, z24.s, z22.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z25.d, z0.d, z22.d\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z17.d, z1.d, z22.d\n"
- "asr z28.s, z28.s, #0x1f\n"
- "and z16.d, z6.d, z22.d\n"
- "sqadd z18.s, z18.s, z21.s\n"
- "asr z25.s, z25.s, #0x1f\n"
- ".inst 0x44829092 // srshl z18.s, p4/M, z18.s, z4.s\n"
- "sqadd z15.s, z15.s, z20.s\n"
- "asr z17.s, z17.s, #0x1f\n"
- ".inst 0x4482908f // srshl z15.s, p4/M, z15.s, z4.s\n"
- "sqadd z5.s, z5.s, z28.s\n"
- "asr z16.s, z16.s, #0x1f\n"
- ".inst 0x44829085 // srshl z5.s, p4/M, z5.s, z4.s\n"
- "sqadd z0.s, z0.s, z25.s\n"
- "sqadd z1.s, z1.s, z17.s\n"
- ".inst 0x448292c0 // srshl z0.s, p4/M, z0.s, z22.s\n"
- ".inst 0x448292c1 // srshl z1.s, p4/M, z1.s, z22.s\n"
- "sqadd z6.s, z6.s, z16.s\n"
- ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
- ".inst 0x448292c6 // srshl z6.s, p4/M, z6.s, z22.s\n"
- ".inst 0x45304252 // sqxtnb z18.h, z18.s\n"
- ".inst 0x453041ef // sqxtnb z15.h, z15.s\n"
+ "addvl x13, x13, #2\n"
+ ".inst 0x449546e8 // smlalt z8.s, p4/M, z23.h, z21.h\n"
+ ".inst 0x449544ca // smlalt z10.s, p4/M, z6.h, z21.h\n"
+ "uzp1 z23.s, z27.s, z19.s\n"
+ "addvl x12, x12, #2\n"
+ ".inst 0x4495407e // smlalb z30.s, p4/M, z3.h, z21.h\n"
+ ".inst 0x44954470 // smlalt z16.s, p4/M, z3.h, z21.h\n"
+ "uzp2 z6.s, z27.s, z19.s\n"
+ "incw x21\n"
+ ".inst 0x449540e5 // smlalb z5.s, p4/M, z7.h, z21.h\n"
+ ".inst 0x449544eb // smlalt z11.s, p4/M, z7.h, z21.h\n"
+ ".inst 0x44824004 // smlalb z4.s, p4/M, z0.h, z2.h\n"
+ ".inst 0x449a403f // smlalb z31.s, p4/M, z1.h, z26.h\n"
+ ".inst 0x44824408 // smlalt z8.s, p4/M, z0.h, z2.h\n"
+ ".inst 0x449a442a // smlalt z10.s, p4/M, z1.h, z26.h\n"
+ "whilelt p1.s, x21, x17\n"
+ "whilelt p3.h, x8, x17\n"
+ ".inst 0x448f431e // smlalb z30.s, p4/M, z24.h, z15.h\n"
+ ".inst 0x448f4710 // smlalt z16.s, p4/M, z24.h, z15.h\n"
+ ".inst 0x04bc74a5 // sqrdmulh z5.s, z5.s, z28.s\n"
+ ".inst 0x04b4756b // sqrdmulh z11.s, z11.s, z20.s\n"
+ ".inst 0x448f4024 // smlalb z4.s, p4/M, z1.h, z15.h\n"
+ ".inst 0x448f41bf // smlalb z31.s, p4/M, z13.h, z15.h\n"
+ "and z24.d, z5.d, z23.d\n"
+ ".inst 0x448f4428 // smlalt z8.s, p4/M, z1.h, z15.h\n"
+ ".inst 0x448f45aa // smlalt z10.s, p4/M, z13.h, z15.h\n"
+ "and z19.d, z11.d, z6.d\n"
+ ".inst 0x04bc77de // sqrdmulh z30.s, z30.s, z28.s\n"
+ ".inst 0x04b47610 // sqrdmulh z16.s, z16.s, z20.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ ".inst 0x04bc7484 // sqrdmulh z4.s, z4.s, z28.s\n"
+ ".inst 0x04bc77ff // sqrdmulh z31.s, z31.s, z28.s\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z7.d, z30.d, z23.d\n"
+ "sqadd z5.s, z5.s, z24.s\n"
+ ".inst 0x04b47508 // sqrdmulh z8.s, z8.s, z20.s\n"
+ "and z15.d, z4.d, z23.d\n"
+ "and z24.d, z31.d, z23.d\n"
+ ".inst 0x04b4754a // sqrdmulh z10.s, z10.s, z20.s\n"
+ "sqadd z11.s, z11.s, z19.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "and z18.d, z16.d, z6.d\n"
+ ".inst 0x448292e5 // srshl z5.s, p4/M, z5.s, z23.s\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "and z13.d, z8.d, z6.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ "and z3.d, z10.d, z6.d\n"
+ ".inst 0x448290cb // srshl z11.s, p4/M, z11.s, z6.s\n"
+ "sqadd z30.s, z30.s, z7.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z4.s, z4.s, z15.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z24.s\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ ".inst 0x448292fe // srshl z30.s, p4/M, z30.s, z23.s\n"
+ "sqadd z16.s, z16.s, z18.s\n"
".inst 0x453040a5 // sqxtnb z5.h, z5.s\n"
- ".inst 0x45304708 // sqxtnt z8.h, z24.s\n"
- ".inst 0x45304412 // sqxtnt z18.h, z0.s\n"
- ".inst 0x4530442f // sqxtnt z15.h, z1.s\n"
- ".inst 0x453044c5 // sqxtnt z5.h, z6.s\n"
- "sqadd z8.h, z8.h, z19.h\n"
- "smax z8.h, p4/M, z8.h, z12.h\n"
- "smin z8.h, p4/M, z8.h, z9.h\n"
- "sqadd z18.h, z18.h, z19.h\n"
- "sqadd z15.h, z15.h, z19.h\n"
- "smax z18.h, p4/M, z18.h, z12.h\n"
- "smax z15.h, p4/M, z15.h, z12.h\n"
- "sqadd z5.h, z5.h, z19.h\n"
- "smax z5.h, p4/M, z5.h, z12.h\n"
- "smin z18.h, p4/M, z18.h, z9.h\n"
- "st1b { z8.h }, p0, [x16, x10]\n"
- "smin z15.h, p4/M, z15.h, z9.h\n"
+ ".inst 0x448292e4 // srshl z4.s, p4/M, z4.s, z23.s\n"
+ "sqadd z8.s, z8.s, z13.s\n"
+ ".inst 0x448292ff // srshl z31.s, p4/M, z31.s, z23.s\n"
+ "sqadd z10.s, z10.s, z3.s\n"
+ ".inst 0x453043de // sqxtnb z30.h, z30.s\n"
+ ".inst 0x448290d0 // srshl z16.s, p4/M, z16.s, z6.s\n"
+ ".inst 0x45304084 // sqxtnb z4.h, z4.s\n"
+ ".inst 0x45304565 // sqxtnt z5.h, z11.s\n"
+ ".inst 0x448290c8 // srshl z8.s, p4/M, z8.s, z6.s\n"
+ ".inst 0x448290ca // srshl z10.s, p4/M, z10.s, z6.s\n"
+ ".inst 0x453043ff // sqxtnb z31.h, z31.s\n"
+ ".inst 0x4530461e // sqxtnt z30.h, z16.s\n"
+ ".inst 0x45304504 // sqxtnt z4.h, z8.s\n"
+ ".inst 0x4530455f // sqxtnt z31.h, z10.s\n"
+ "sqadd z5.h, z5.h, z25.h\n"
+ "sqadd z30.h, z30.h, z25.h\n"
+ "sqadd z4.h, z4.h, z25.h\n"
+ "sqadd z31.h, z31.h, z25.h\n"
+ "smax z5.h, p4/M, z5.h, z14.h\n"
+ "smax z30.h, p4/M, z30.h, z14.h\n"
+ "smax z4.h, p4/M, z4.h, z14.h\n"
+ "smax z31.h, p4/M, z31.h, z14.h\n"
"smin z5.h, p4/M, z5.h, z9.h\n"
- "st1b { z18.h }, p0, [x15, x10]\n"
- "st1b { z15.h }, p0, [x14, x10]\n"
- "st1b { z5.h }, p0, [x13, x10]\n"
- "ld1sb { z25.h }, p4/Z, [x17]\n"
- "ld1sb { z30.h }, p4/Z, [x17, #1, MUL VL]\n"
- "inch x10\n"
- "ld1sb { z14.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z4.h }, p4/Z, [x17, #3, MUL VL]\n"
- ".inst 0x454d1339 // ssublb z25.h, z25.b, z13.b\n"
- ".inst 0x454d13de // ssublb z30.h, z30.b, z13.b\n"
- "ld1sb { z10.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #5, MUL VL]\n"
- ".inst 0x454d11ce // ssublb z14.h, z14.b, z13.b\n"
- ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
- "ld1sb { z23.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
- ".inst 0x454d114a // ssublb z10.h, z10.b, z13.b\n"
- "ld1w { z17.s }, p2/Z, [x20]\n"
- "ld1w { z16.s }, p1/Z, [x20, #1, MUL VL]\n"
- "uzp1 z8.s, z17.s, z16.s\n"
- "uzp2 z24.s, z17.s, z16.s\n"
- "ld1sb { z2.h }, p4/Z, [x17]\n"
- "ldp x27, x26, [x11, #0x0]\n"
+ "smin z30.h, p4/M, z30.h, z9.h\n"
+ "smin z4.h, p4/M, z4.h, z9.h\n"
+ "smin z31.h, p4/M, z31.h, z9.h\n"
+ "st1b { z5.h }, p0, [x11, x14]\n"
+ "st1b { z30.h }, p0, [x10, x14]\n"
+ "st1b { z4.h }, p0, [x9, x14]\n"
+ "st1b { z31.h }, p0, [x28, x14]\n"
+ "inch x14\n"
+ "ld1sb { z28.h }, p4/Z, [x16]\n"
+ "ld1sb { z20.h }, p4/Z, [x16, #1, MUL VL]\n"
+ "ld1sb { z13.h }, p4/Z, [x16, #2, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x16, #3, MUL VL]\n"
+ "ld1sb { z6.h }, p4/Z, [x16, #4, MUL VL]\n"
+ "ld1sb { z2.h }, p4/Z, [x16, #5, MUL VL]\n"
+ "ld1sb { z26.h }, p4/Z, [x16, #6, MUL VL]\n"
+ "ld1sb { z21.h }, p4/Z, [x16, #7, MUL VL]\n"
+ "inch x16, ALL, MUL #8\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ "ld1w { z10.s }, p2/Z, [x20]\n"
+ "ld1w { z1.s }, p1/Z, [x20, #1, MUL VL]\n"
"addvl x20, x20, #2\n"
+ ".inst 0x454c1294 // ssublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454c11ad // ssublb z13.h, z13.b, z12.b\n"
+ ".inst 0x454c1252 // ssublb z18.h, z18.b, z12.b\n"
+ "ld1sb { z15.h }, p4/Z, [x16]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ ".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "uzp1 z5.s, z10.s, z1.s\n"
+ "uzp2 z11.s, z10.s, z1.s\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z18.d, z8.d\n"
- "mov z0.d, z24.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1sb { z21.h }, p3/Z, [x27, x7]\n"
- "mov z15.d, z8.d\n"
- "mov z1.d, z24.d\n"
- "ld1sb { z22.h }, p3/Z, [x26, x7]\n"
- "ld1sb { z11.h }, p3/Z, [x25, x7]\n"
- "mov z5.d, z8.d\n"
- "mov z6.d, z24.d\n"
- "ld1sb { z20.h }, p3/Z, [x24, x7]\n"
- "ld1sb { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
- ".inst 0x454d12f7 // ssublb z23.h, z23.b, z13.b\n"
- "ld1sb { z28.h }, p3/Z, [x22, x7]\n"
- "ld1sb { z16.h }, p3/Z, [x21, x7]\n"
- ".inst 0x454d10e7 // ssublb z7.h, z7.b, z13.b\n"
- ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
- "ld1sb { z31.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455a12b5 // ssublb z21.h, z21.b, z26.b\n"
- ".inst 0x455a12d6 // ssublb z22.h, z22.b, z26.b\n"
- ".inst 0x455a116b // ssublb z11.h, z11.b, z26.b\n"
- ".inst 0x455a1294 // ssublb z20.h, z20.b, z26.b\n"
- ".inst 0x455a137b // ssublb z27.h, z27.b, z26.b\n"
- ".inst 0x455a139c // ssublb z28.h, z28.b, z26.b\n"
- ".inst 0x455a1210 // ssublb z16.h, z16.b, z26.b\n"
- ".inst 0x455a13ff // ssublb z31.h, z31.b, z26.b\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
+ ".inst 0x454c12b5 // ssublb z21.h, z21.b, z12.b\n"
+ ".inst 0x454c11ef // ssublb z15.h, z15.b, z12.b\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "mov z30.d, z5.d\n"
+ "mov z16.d, z11.d\n"
+ "mov z4.d, z5.d\n"
+ "mov z8.d, z11.d\n"
+ "mov z31.d, z5.d\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "mov z10.d, z11.d\n"
+ "ld1sb { z3.h }, p3/Z, [x27, x8]\n"
+ "ld1sb { z29.h }, p3/Z, [x26, x8]\n"
+ "ld1sb { z23.h }, p3/Z, [x25, x8]\n"
+ "ld1sb { z0.h }, p3/Z, [x24, x8]\n"
+ "ld1sb { z24.h }, p3/Z, [x23, x8]\n"
+ "ld1sb { z22.h }, p3/Z, [x22, x8]\n"
+ "ld1sb { z27.h }, p3/Z, [x21, x8]\n"
+ "ld1sb { z19.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511063 // ssublb z3.h, z3.b, z17.b\n"
+ ".inst 0x455113bd // ssublb z29.h, z29.b, z17.b\n"
+ ".inst 0x455112f7 // ssublb z23.h, z23.b, z17.b\n"
+ ".inst 0x45511000 // ssublb z0.h, z0.b, z17.b\n"
+ ".inst 0x45511318 // ssublb z24.h, z24.b, z17.b\n"
+ ".inst 0x455112d6 // ssublb z22.h, z22.b, z17.b\n"
+ ".inst 0x4551137b // ssublb z27.h, z27.b, z17.b\n"
+ ".inst 0x45511273 // ssublb z19.h, z19.b, z17.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index 7ff724ddd8..726c127d87 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
const int8_t *inptrs[36];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const int8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -112,533 +112,533 @@ void sve_s8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
__asm__ __volatile__(
"mov x2, #0x0\n"
- "mov x24, x2\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x27, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "incw x24\n"
+ "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x26, [%x[params], %[offsetof_Params_outptrs]]\n"
"ldr x4, [%x[params], %[offsetof_Params_weights]]\n"
- "add x21, x23, %[offsetof_Requantize32_a_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1rb { z30.b }, p4/Z, [x21]\n"
- "ld1rb { z10.b }, p4/Z, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x6, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x2\n"
+ "add x20, x27, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x27, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x27, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z14.b }, p4/Z, [x20]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x27, %[offsetof_Requantize32_minval]\n"
+ "add x20, x27, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z12.b }, p4/Z, [x23]\n"
+ "ld1rh { z10.h }, p4/Z, [x22]\n"
+ "incw x24\n"
"ld1rh { z15.h }, p4/Z, [x21]\n"
- "ld1rh { z12.h }, p4/Z, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
"ld1rh { z13.h }, p4/Z, [x20]\n"
- "ldp x5, x6, [x22, #0x0]\n"
"whilelt p3.h, x2, x3\n"
- "ldp x7, x8, [x22, #0x10]\n"
+ "ldp x17, x16, [x26, #0x0]\n"
+ "ldp x15, x14, [x26, #0x10]\n"
"whilelt p2.s, x2, x3\n"
"whilelt p1.s, x24, x3\n"
- "ldr x10, [%x[params], %[offsetof_Params_bias]]\n"
- "add x17, %x[params], %[offsetof_Params_inptrs]\n"
- "ld1w { z17.s }, p2/Z, [x10]\n"
- "ld1w { z16.s }, p1/Z, [x10, #1, MUL VL]\n"
- "uzp1 z14.s, z17.s, z16.s\n"
- "ld1sb { z26.h }, p4/Z, [x4]\n"
- "ld1sb { z8.h }, p4/Z, [x4, #1, MUL VL]\n"
- "uzp2 z23.s, z17.s, z16.s\n"
- "addvl x10, x10, #2\n"
- "ld1sb { z16.h }, p4/Z, [x4, #2, MUL VL]\n"
- "ld1sb { z21.h }, p4/Z, [x4, #3, MUL VL]\n"
- "mov x16, #0x0\n"
- "mov z6.d, z14.d\n"
- "ld1sb { z17.h }, p4/Z, [x4, #4, MUL VL]\n"
- "ldp x9, x28, [x17, #0x0]\n"
- "mov z18.d, z23.d\n"
- "mov z9.d, z14.d\n"
- "ldp x27, x26, [x17, #0x10]\n"
- "ldp x25, x24, [x17, #0x20]\n"
- "mov z20.d, z23.d\n"
- "mov z7.d, z14.d\n"
- "ldp x23, x22, [x17, #0x30]\n"
- "ldp x21, x20, [x17, #0x40]\n"
- "mov z1.d, z23.d\n"
- ".inst 0x454a135a // ssublb z26.h, z26.b, z10.b\n"
- "ld1sb { z22.h }, p3/Z, [x9, x2]\n"
- "ld1sb { z2.h }, p3/Z, [x28, x2]\n"
- ".inst 0x454a1108 // ssublb z8.h, z8.b, z10.b\n"
- ".inst 0x454a1210 // ssublb z16.h, z16.b, z10.b\n"
- "ld1sb { z11.h }, p3/Z, [x27, x2]\n"
- "ld1sb { z3.h }, p3/Z, [x26, x2]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x454a1231 // ssublb z17.h, z17.b, z10.b\n"
- "ld1sb { z29.h }, p3/Z, [x25, x2]\n"
- "ld1sb { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x455e12d6 // ssublb z22.h, z22.b, z30.b\n"
- ".inst 0x455e1042 // ssublb z2.h, z2.b, z30.b\n"
- "ld1sb { z31.h }, p3/Z, [x23, x2]\n"
- "ld1sb { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e116b // ssublb z11.h, z11.b, z30.b\n"
- ".inst 0x455e1063 // ssublb z3.h, z3.b, z30.b\n"
- "ld1sb { z19.h }, p3/Z, [x21, x2]\n"
- "ld1sb { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e13bd // ssublb z29.h, z29.b, z30.b\n"
- ".inst 0x455e1084 // ssublb z4.h, z4.b, z30.b\n"
- "ldr x15, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x14, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x10, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455e13ff // ssublb z31.h, z31.b, z30.b\n"
- ".inst 0x455e1000 // ssublb z0.h, z0.b, z30.b\n"
- ".inst 0x455e1273 // ssublb z19.h, z19.b, z30.b\n"
- ".inst 0x455e139c // ssublb z28.h, z28.b, z30.b\n"
+ "ld1w { z5.s }, p2/Z, [x25]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ "ld1sb { z25.h }, p4/Z, [x4]\n"
+ "ld1sb { z28.h }, p4/Z, [x4, #1, MUL VL]\n"
+ "ld1sb { z4.h }, p4/Z, [x4, #2, MUL VL]\n"
+ "ld1sb { z23.h }, p4/Z, [x4, #3, MUL VL]\n"
+ "ld1sb { z31.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldp x9, x28, [x5, #0x0]\n"
+ "uzp1 z6.s, z5.s, z16.s\n"
+ "uzp2 z30.s, z5.s, z16.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ ".inst 0x454c1339 // ssublb z25.h, z25.b, z12.b\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x454c12f7 // ssublb z23.h, z23.b, z12.b\n"
+ "ldp x27, x26, [x5, #0x10]\n"
+ "mov z17.d, z6.d\n"
+ "mov z8.d, z30.d\n"
+ "mov z21.d, z6.d\n"
+ "mov z27.d, z30.d\n"
+ "ldp x25, x24, [x5, #0x20]\n"
+ "mov z7.d, z6.d\n"
+ "mov z9.d, z30.d\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ldp x23, x22, [x5, #0x30]\n"
+ "ldp x21, x20, [x5, #0x40]\n"
+ "ld1sb { z26.h }, p3/Z, [x9, x2]\n"
+ "ld1sb { z16.h }, p3/Z, [x28, x2]\n"
+ "ld1sb { z24.h }, p3/Z, [x27, x2]\n"
+ "ld1sb { z5.h }, p3/Z, [x26, x2]\n"
+ "ld1sb { z18.h }, p3/Z, [x25, x2]\n"
+ "ld1sb { z3.h }, p3/Z, [x24, x2]\n"
+ "ld1sb { z19.h }, p3/Z, [x23, x2]\n"
+ "ld1sb { z11.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454e135a // ssublb z26.h, z26.b, z14.b\n"
+ ".inst 0x454e1210 // ssublb z16.h, z16.b, z14.b\n"
+ "ld1sb { z20.h }, p3/Z, [x21, x2]\n"
+ "ld1sb { z29.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454e1318 // ssublb z24.h, z24.b, z14.b\n"
+ ".inst 0x454e10a5 // ssublb z5.h, z5.b, z14.b\n"
+ ".inst 0x454e1252 // ssublb z18.h, z18.b, z14.b\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1273 // ssublb z19.h, z19.b, z14.b\n"
+ ".inst 0x454e116b // ssublb z11.h, z11.b, z14.b\n"
+ ".inst 0x454e1294 // ssublb z20.h, z20.b, z14.b\n"
+ ".inst 0x454e13bd // ssublb z29.h, z29.b, z14.b\n"
"1:" // Loop
- ".inst 0x449a42ce // smlalb z14.s, p4/M, z22.h, z26.h\n"
- ".inst 0x449a46d7 // smlalt z23.s, p4/M, z22.h, z26.h\n"
- "ldr x20, [x17, #0x50]\n"
- "ld1sb { z27.h }, p3/Z, [x20, x2]\n"
- ".inst 0x4488404e // smlalb z14.s, p4/M, z2.h, z8.h\n"
- ".inst 0x449a4046 // smlalb z6.s, p4/M, z2.h, z26.h\n"
- "ldr x20, [x17, #0x58]\n"
- ".inst 0x455e137b // ssublb z27.h, z27.b, z30.b\n"
- ".inst 0x449a4169 // smlalb z9.s, p4/M, z11.h, z26.h\n"
- ".inst 0x449a4067 // smlalb z7.s, p4/M, z3.h, z26.h\n"
- "ld1sb { z5.h }, p3/Z, [x20, x2]\n"
- "ldr x20, [x17, #0x60]\n"
- ".inst 0x44884457 // smlalt z23.s, p4/M, z2.h, z8.h\n"
- ".inst 0x449043ae // smlalb z14.s, p4/M, z29.h, z16.h\n"
- "ld1sb { z25.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x455e10a5 // ssublb z5.h, z5.b, z30.b\n"
- ".inst 0x449a4452 // smlalt z18.s, p4/M, z2.h, z26.h\n"
- ".inst 0x449a4574 // smlalt z20.s, p4/M, z11.h, z26.h\n"
- "ld1sb { z22.h }, p3/Z, [x20, x2]\n"
- ".inst 0x454a1339 // ssublb z25.h, z25.b, z10.b\n"
- ".inst 0x449a4461 // smlalt z1.s, p4/M, z3.h, z26.h\n"
- ".inst 0x448843a6 // smlalb z6.s, p4/M, z29.h, z8.h\n"
- "ldr x20, [x17, #0x68]\n"
+ ".inst 0x44994346 // smlalb z6.s, p4/M, z26.h, z25.h\n"
+ ".inst 0x4499475e // smlalt z30.s, p4/M, z26.h, z25.h\n"
+ "ldr x23, [x5, #0x50]\n"
+ "ldr x22, [x5, #0x58]\n"
+ ".inst 0x44994211 // smlalb z17.s, p4/M, z16.h, z25.h\n"
+ ".inst 0x44994315 // smlalb z21.s, p4/M, z24.h, z25.h\n"
+ "ldr x21, [x5, #0x60]\n"
+ "ld1sb { z0.h }, p4/Z, [x4, #5, MUL VL]\n"
+ ".inst 0x449940a7 // smlalb z7.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994608 // smlalt z8.s, p4/M, z16.h, z25.h\n"
+ "ldr x20, [x5, #0x68]\n"
+ "ld1sb { z26.h }, p4/Z, [x4, #6, MUL VL]\n"
+ "ld1sb { z2.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x4499471b // smlalt z27.s, p4/M, z24.h, z25.h\n"
+ ".inst 0x449944a9 // smlalt z9.s, p4/M, z5.h, z25.h\n"
+ "ld1sb { z22.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x449c4206 // smlalb z6.s, p4/M, z16.h, z28.h\n"
+ ".inst 0x449c461e // smlalt z30.s, p4/M, z16.h, z28.h\n"
+ "ld1sb { z1.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ ".inst 0x449c4251 // smlalb z17.s, p4/M, z18.h, z28.h\n"
+ ".inst 0x449c40b5 // smlalb z21.s, p4/M, z5.h, z28.h\n"
+ "ld1sb { z16.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
+ ".inst 0x449c4067 // smlalb z7.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x454e1042 // ssublb z2.h, z2.b, z14.b\n"
+ ".inst 0x449c4648 // smlalt z8.s, p4/M, z18.h, z28.h\n"
+ "ldr x20, [x5, #0x70]\n"
+ ".inst 0x449c44bb // smlalt z27.s, p4/M, z5.h, z28.h\n"
+ ".inst 0x449c4469 // smlalt z9.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x454e12d6 // ssublb z22.h, z22.b, z14.b\n"
+ "ld1sb { z28.h }, p4/Z, [x4, #7, MUL VL]\n"
+ ".inst 0x44844246 // smlalb z6.s, p4/M, z18.h, z4.h\n"
+ ".inst 0x4484465e // smlalt z30.s, p4/M, z18.h, z4.h\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ "inch x4, ALL, MUL #8\n"
+ ".inst 0x44844271 // smlalb z17.s, p4/M, z19.h, z4.h\n"
+ ".inst 0x44844075 // smlalb z21.s, p4/M, z3.h, z4.h\n"
+ ".inst 0x454e1210 // ssublb z16.h, z16.b, z14.b\n"
+ "ld1sb { z25.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x44844047 // smlalb z7.s, p4/M, z2.h, z4.h\n"
+ ".inst 0x44844668 // smlalt z8.s, p4/M, z19.h, z4.h\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ "ldr x20, [x5, #0x78]\n"
+ ".inst 0x4484447b // smlalt z27.s, p4/M, z3.h, z4.h\n"
+ ".inst 0x44844449 // smlalt z9.s, p4/M, z2.h, z4.h\n"
+ "ld1sb { z18.h }, p4/Z, [x4]\n"
+ "ldr x22, [x5, #0x80]\n"
+ ".inst 0x44974266 // smlalb z6.s, p4/M, z19.h, z23.h\n"
+ ".inst 0x4497467e // smlalt z30.s, p4/M, z19.h, z23.h\n"
+ ".inst 0x454e1339 // ssublb z25.h, z25.b, z14.b\n"
+ "ld1sb { z4.h }, p4/Z, [x4, #1, MUL VL]\n"
+ ".inst 0x44974171 // smlalb z17.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x44974055 // smlalb z21.s, p4/M, z2.h, z23.h\n"
+ "ld1sb { z19.h }, p3/Z, [x20, x2]\n"
+ "ldr x21, [x5, #0x88]\n"
+ ".inst 0x449742c7 // smlalb z7.s, p4/M, z22.h, z23.h\n"
+ ".inst 0x44974568 // smlalt z8.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x454c1252 // ssublb z18.h, z18.b, z12.b\n"
+ "ldr x20, [x5, #0x90]\n"
+ ".inst 0x4497445b // smlalt z27.s, p4/M, z2.h, z23.h\n"
+ ".inst 0x449746c9 // smlalt z9.s, p4/M, z22.h, z23.h\n"
+ "ld1sb { z23.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x449f4166 // smlalb z6.s, p4/M, z11.h, z31.h\n"
+ ".inst 0x449f457e // smlalt z30.s, p4/M, z11.h, z31.h\n"
+ ".inst 0x454e1273 // ssublb z19.h, z19.b, z14.b\n"
+ "ld1sb { z11.h }, p4/Z, [x4, #2, MUL VL]\n"
+ ".inst 0x449f4031 // smlalb z17.s, p4/M, z1.h, z31.h\n"
+ ".inst 0x449f42d5 // smlalb z21.s, p4/M, z22.h, z31.h\n"
+ "ldr x23, [x5, #0x98]\n"
+ "ldr x22, [x5, #0xa0]\n"
+ ".inst 0x449f4287 // smlalb z7.s, p4/M, z20.h, z31.h\n"
+ ".inst 0x449f4428 // smlalt z8.s, p4/M, z1.h, z31.h\n"
+ ".inst 0x454e12f7 // ssublb z23.h, z23.b, z14.b\n"
+ "ld1sb { z1.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x449f46db // smlalt z27.s, p4/M, z22.h, z31.h\n"
+ ".inst 0x449f4689 // smlalt z9.s, p4/M, z20.h, z31.h\n"
+ ".inst 0x454c116b // ssublb z11.h, z11.b, z12.b\n"
+ "ld1sb { z31.h }, p4/Z, [x4, #3, MUL VL]\n"
+ ".inst 0x44804306 // smlalb z6.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x4480471e // smlalt z30.s, p4/M, z24.h, z0.h\n"
+ "ld1sb { z24.h }, p3/Z, [x20, x2]\n"
+ "ldr x20, [x5, #0xa8]\n"
+ ".inst 0x448040b1 // smlalb z17.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x448043b5 // smlalb z21.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ "ldr x21, [x5, #0xb0]\n"
+ ".inst 0x44804207 // smlalb z7.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x448044a8 // smlalt z8.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ldr x13, [x5, #0xb8]\n"
+ ".inst 0x448047bb // smlalt z27.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x44804609 // smlalt z9.s, p4/M, z16.h, z0.h\n"
+ "ld1sb { z0.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x454e1318 // ssublb z24.h, z24.b, z14.b\n"
+ ".inst 0x449a40a6 // smlalb z6.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a44be // smlalt z30.s, p4/M, z5.h, z26.h\n"
+ "ld1sb { z5.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldr x12, [x5, #0xc0]\n"
+ ".inst 0x449a4071 // smlalb z17.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a4215 // smlalb z21.s, p4/M, z16.h, z26.h\n"
+ "ldr x11, [x5, #0xc8]\n"
+ "ldr x10, [x5, #0xd0]\n"
+ ".inst 0x449a4327 // smlalb z7.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449a4468 // smlalt z8.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x454e1000 // ssublb z0.h, z0.b, z14.b\n"
+ "ldr x9, [x5, #0xd8]\n"
+ ".inst 0x449a461b // smlalt z27.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4729 // smlalt z9.s, p4/M, z25.h, z26.h\n"
+ "ld1sb { z26.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c10a5 // ssublb z5.h, z5.b, z12.b\n"
+ ".inst 0x449c4066 // smlalb z6.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c447e // smlalt z30.s, p4/M, z3.h, z28.h\n"
+ "ld1sb { z3.h }, p4/Z, [x4, #5, MUL VL]\n"
+ "ldr x28, [x5, #0xe0]\n"
+ ".inst 0x449c4051 // smlalb z17.s, p4/M, z2.h, z28.h\n"
+ ".inst 0x449c4335 // smlalb z21.s, p4/M, z25.h, z28.h\n"
+ "ldr x27, [x5, #0xe8]\n"
+ "ldr x26, [x5, #0xf0]\n"
+ ".inst 0x449c4267 // smlalb z7.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x449c4448 // smlalt z8.s, p4/M, z2.h, z28.h\n"
+ ".inst 0x454e135a // ssublb z26.h, z26.b, z14.b\n"
+ "ldr x25, [x5, #0xf8]\n"
+ ".inst 0x449c473b // smlalt z27.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4669 // smlalt z9.s, p4/M, z19.h, z28.h\n"
+ "ld1sb { z28.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
+ ".inst 0x44924046 // smlalb z6.s, p4/M, z2.h, z18.h\n"
+ ".inst 0x4492445e // smlalt z30.s, p4/M, z2.h, z18.h\n"
"ld1sb { z2.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x44884069 // smlalb z9.s, p4/M, z3.h, z8.h\n"
- ".inst 0x44884087 // smlalb z7.s, p4/M, z4.h, z8.h\n"
- ".inst 0x455e12d6 // ssublb z22.h, z22.b, z30.b\n"
- "ld1sb { z26.h }, p3/Z, [x20, x2]\n"
- ".inst 0x449047b7 // smlalt z23.s, p4/M, z29.h, z16.h\n"
- ".inst 0x449543ee // smlalb z14.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454a1042 // ssublb z2.h, z2.b, z10.b\n"
- "ldr x20, [x17, #0x70]\n"
- ".inst 0x448847b2 // smlalt z18.s, p4/M, z29.h, z8.h\n"
- ".inst 0x44884474 // smlalt z20.s, p4/M, z3.h, z8.h\n"
- "ld1sb { z29.h }, p4/Z, [x4, #7, MUL VL]\n"
- ".inst 0x455e135a // ssublb z26.h, z26.b, z30.b\n"
- ".inst 0x44884481 // smlalt z1.s, p4/M, z4.h, z8.h\n"
- ".inst 0x449043e6 // smlalb z6.s, p4/M, z31.h, z16.h\n"
+ "ldr x24, [x5, #0x100]\n"
+ ".inst 0x449242d1 // smlalb z17.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x44924275 // smlalb z21.s, p4/M, z19.h, z18.h\n"
+ "ldr x23, [x5, #0x108]\n"
+ "ldr x22, [x5, #0x110]\n"
+ ".inst 0x449242e7 // smlalb z7.s, p4/M, z23.h, z18.h\n"
+ ".inst 0x449246c8 // smlalt z8.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x454e139c // ssublb z28.h, z28.b, z14.b\n"
+ "ldr x20, [x5, #0x118]\n"
+ ".inst 0x4492467b // smlalt z27.s, p4/M, z19.h, z18.h\n"
+ ".inst 0x449246e9 // smlalt z9.s, p4/M, z23.h, z18.h\n"
+ "ld1sb { z18.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ ".inst 0x448442c6 // smlalb z6.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x448446de // smlalt z30.s, p4/M, z22.h, z4.h\n"
+ "ld1sb { z22.h }, p4/Z, [x4, #7, MUL VL]\n"
"inch x4, ALL, MUL #8\n"
- "ld1sb { z8.h }, p3/Z, [x20, x2]\n"
- ".inst 0x44904089 // smlalb z9.s, p4/M, z4.h, z16.h\n"
- ".inst 0x44904367 // smlalb z7.s, p4/M, z27.h, z16.h\n"
- ".inst 0x454a13bd // ssublb z29.h, z29.b, z10.b\n"
- "ldr x20, [x17, #0x78]\n"
- ".inst 0x449547f7 // smlalt z23.s, p4/M, z31.h, z21.h\n"
- ".inst 0x4491400e // smlalb z14.s, p4/M, z0.h, z17.h\n"
- "ld1sb { z24.h }, p4/Z, [x4]\n"
- ".inst 0x455e1108 // ssublb z8.h, z8.b, z30.b\n"
- ".inst 0x449047f2 // smlalt z18.s, p4/M, z31.h, z16.h\n"
- ".inst 0x44904494 // smlalt z20.s, p4/M, z4.h, z16.h\n"
- "ld1sb { z31.h }, p3/Z, [x20, x2]\n"
- ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
- ".inst 0x44904761 // smlalt z1.s, p4/M, z27.h, z16.h\n"
- ".inst 0x44954006 // smlalb z6.s, p4/M, z0.h, z21.h\n"
- "ldr x22, [x17, #0x80]\n"
+ ".inst 0x44844291 // smlalb z17.s, p4/M, z20.h, z4.h\n"
+ ".inst 0x448442f5 // smlalb z21.s, p4/M, z23.h, z4.h\n"
+ "whilelt p0.h, x6, x3\n"
+ "ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
+ ".inst 0x44844027 // smlalb z7.s, p4/M, z1.h, z4.h\n"
+ ".inst 0x44844688 // smlalt z8.s, p4/M, z20.h, z4.h\n"
+ ".inst 0x454e1252 // ssublb z18.h, z18.b, z14.b\n"
+ "ld1sb { z20.h }, p3/Z, [x13, x2]\n"
+ ".inst 0x448446fb // smlalt z27.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x44844429 // smlalt z9.s, p4/M, z1.h, z4.h\n"
+ ".inst 0x454c12d6 // ssublb z22.h, z22.b, z12.b\n"
+ "ld1sb { z4.h }, p4/Z, [x4]\n"
+ ".inst 0x448b43a6 // smlalb z6.s, p4/M, z29.h, z11.h\n"
+ ".inst 0x448b47be // smlalt z30.s, p4/M, z29.h, z11.h\n"
+ "ld1sb { z29.h }, p3/Z, [x12, x2]\n"
+ ".inst 0x448b4211 // smlalb z17.s, p4/M, z16.h, z11.h\n"
+ ".inst 0x448b4315 // smlalb z21.s, p4/M, z24.h, z11.h\n"
+ ".inst 0x454e1294 // ssublb z20.h, z20.b, z14.b\n"
+ ".inst 0x448b4007 // smlalb z7.s, p4/M, z0.h, z11.h\n"
+ ".inst 0x448b4608 // smlalt z8.s, p4/M, z16.h, z11.h\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x448b471b // smlalt z27.s, p4/M, z24.h, z11.h\n"
+ ".inst 0x448b4409 // smlalt z9.s, p4/M, z0.h, z11.h\n"
+ "ld1sb { z11.h }, p3/Z, [x11, x2]\n"
+ ".inst 0x454e13bd // ssublb z29.h, z29.b, z14.b\n"
+ ".inst 0x449f4206 // smlalb z6.s, p4/M, z16.h, z31.h\n"
+ ".inst 0x449f461e // smlalt z30.s, p4/M, z16.h, z31.h\n"
"ld1sb { z16.h }, p4/Z, [x4, #1, MUL VL]\n"
- ".inst 0x44954369 // smlalb z9.s, p4/M, z27.h, z21.h\n"
- ".inst 0x449540a7 // smlalb z7.s, p4/M, z5.h, z21.h\n"
- ".inst 0x455e13ff // ssublb z31.h, z31.b, z30.b\n"
- "ldr x21, [x17, #0x88]\n"
- ".inst 0x44914417 // smlalt z23.s, p4/M, z0.h, z17.h\n"
- ".inst 0x4499416e // smlalb z14.s, p4/M, z11.h, z25.h\n"
- ".inst 0x454a1210 // ssublb z16.h, z16.b, z10.b\n"
- "ldr x20, [x17, #0x90]\n"
- ".inst 0x44954412 // smlalt z18.s, p4/M, z0.h, z21.h\n"
- ".inst 0x44954774 // smlalt z20.s, p4/M, z27.h, z21.h\n"
- "ld1sb { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1000 // ssublb z0.h, z0.b, z30.b\n"
- ".inst 0x449544a1 // smlalt z1.s, p4/M, z5.h, z21.h\n"
- ".inst 0x449142c6 // smlalb z6.s, p4/M, z22.h, z17.h\n"
- "ld1sb { z21.h }, p4/Z, [x4, #2, MUL VL]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x449140a9 // smlalb z9.s, p4/M, z5.h, z17.h\n"
- ".inst 0x44914267 // smlalb z7.s, p4/M, z19.h, z17.h\n"
- "ldr x23, [x17, #0x98]\n"
- "ldr x22, [x17, #0xa0]\n"
- ".inst 0x44994577 // smlalt z23.s, p4/M, z11.h, z25.h\n"
- ".inst 0x4482406e // smlalb z14.s, p4/M, z3.h, z2.h\n"
- "ld1sb { z11.h }, p3/Z, [x21, x2]\n"
- ".inst 0x455e116b // ssublb z11.h, z11.b, z30.b\n"
- ".inst 0x449146d2 // smlalt z18.s, p4/M, z22.h, z17.h\n"
- ".inst 0x449144b4 // smlalt z20.s, p4/M, z5.h, z17.h\n"
- "ld1sb { z22.h }, p4/Z, [x4, #3, MUL VL]\n"
- ".inst 0x454a12d6 // ssublb z22.h, z22.b, z10.b\n"
- ".inst 0x44914661 // smlalt z1.s, p4/M, z19.h, z17.h\n"
- ".inst 0x44994066 // smlalb z6.s, p4/M, z3.h, z25.h\n"
- "ld1sb { z17.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1231 // ssublb z17.h, z17.b, z30.b\n"
- ".inst 0x44994389 // smlalb z9.s, p4/M, z28.h, z25.h\n"
- ".inst 0x44994347 // smlalb z7.s, p4/M, z26.h, z25.h\n"
- "ldr x20, [x17, #0xa8]\n"
- "ldr x21, [x17, #0xb0]\n"
- ".inst 0x44824477 // smlalt z23.s, p4/M, z3.h, z2.h\n"
- ".inst 0x449d408e // smlalb z14.s, p4/M, z4.h, z29.h\n"
- "ldr x13, [x17, #0xb8]\n"
- "ldr x12, [x17, #0xc0]\n"
- ".inst 0x44994472 // smlalt z18.s, p4/M, z3.h, z25.h\n"
- ".inst 0x44994794 // smlalt z20.s, p4/M, z28.h, z25.h\n"
- "ld1sb { z3.h }, p3/Z, [x23, x2]\n"
- ".inst 0x455e1063 // ssublb z3.h, z3.b, z30.b\n"
- ".inst 0x44994741 // smlalt z1.s, p4/M, z26.h, z25.h\n"
- ".inst 0x44824086 // smlalb z6.s, p4/M, z4.h, z2.h\n"
- "ld1sb { z25.h }, p4/Z, [x4, #4, MUL VL]\n"
- ".inst 0x454a1339 // ssublb z25.h, z25.b, z10.b\n"
- ".inst 0x44824349 // smlalb z9.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44824107 // smlalb z7.s, p4/M, z8.h, z2.h\n"
- "ldr x11, [x17, #0xc8]\n"
- "ldr x10, [x17, #0xd0]\n"
- ".inst 0x449d4497 // smlalt z23.s, p4/M, z4.h, z29.h\n"
- ".inst 0x4498436e // smlalb z14.s, p4/M, z27.h, z24.h\n"
- "ldr x9, [x17, #0xd8]\n"
- "ldr x28, [x17, #0xe0]\n"
- ".inst 0x44824492 // smlalt z18.s, p4/M, z4.h, z2.h\n"
- ".inst 0x44824754 // smlalt z20.s, p4/M, z26.h, z2.h\n"
- "ld1sb { z4.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1084 // ssublb z4.h, z4.b, z30.b\n"
- ".inst 0x44824501 // smlalt z1.s, p4/M, z8.h, z2.h\n"
- ".inst 0x449d4366 // smlalb z6.s, p4/M, z27.h, z29.h\n"
+ ".inst 0x449f4331 // smlalb z17.s, p4/M, z25.h, z31.h\n"
+ ".inst 0x449f4015 // smlalb z21.s, p4/M, z0.h, z31.h\n"
+ ".inst 0x449f4347 // smlalb z7.s, p4/M, z26.h, z31.h\n"
+ ".inst 0x449f4728 // smlalt z8.s, p4/M, z25.h, z31.h\n"
+ ".inst 0x454e116b // ssublb z11.h, z11.b, z14.b\n"
+ ".inst 0x449f441b // smlalt z27.s, p4/M, z0.h, z31.h\n"
+ ".inst 0x449f4749 // smlalt z9.s, p4/M, z26.h, z31.h\n"
+ "ld1sb { z31.h }, p3/Z, [x10, x2]\n"
+ ".inst 0x454c1210 // ssublb z16.h, z16.b, z12.b\n"
+ ".inst 0x44854326 // smlalb z6.s, p4/M, z25.h, z5.h\n"
+ ".inst 0x4485473e // smlalt z30.s, p4/M, z25.h, z5.h\n"
+ "ld1sb { z25.h }, p4/Z, [x4, #2, MUL VL]\n"
+ ".inst 0x44854271 // smlalb z17.s, p4/M, z19.h, z5.h\n"
+ ".inst 0x44854355 // smlalb z21.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854387 // smlalb z7.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x44854668 // smlalt z8.s, p4/M, z19.h, z5.h\n"
+ ".inst 0x454e13ff // ssublb z31.h, z31.b, z14.b\n"
+ ".inst 0x4485475b // smlalt z27.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854789 // smlalt z9.s, p4/M, z28.h, z5.h\n"
+ "ld1sb { z5.h }, p3/Z, [x9, x2]\n"
+ ".inst 0x454c1339 // ssublb z25.h, z25.b, z12.b\n"
+ ".inst 0x44834266 // smlalb z6.s, p4/M, z19.h, z3.h\n"
+ ".inst 0x4483467e // smlalt z30.s, p4/M, z19.h, z3.h\n"
+ "ld1sb { z19.h }, p4/Z, [x4, #3, MUL VL]\n"
+ ".inst 0x448342f1 // smlalb z17.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x44834395 // smlalb z21.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834247 // smlalb z7.s, p4/M, z18.h, z3.h\n"
+ ".inst 0x448346e8 // smlalt z8.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x454e10a5 // ssublb z5.h, z5.b, z14.b\n"
+ ".inst 0x4483479b // smlalt z27.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834649 // smlalt z9.s, p4/M, z18.h, z3.h\n"
+ "ld1sb { z3.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x454c1273 // ssublb z19.h, z19.b, z12.b\n"
+ ".inst 0x448242e6 // smlalb z6.s, p4/M, z23.h, z2.h\n"
+ ".inst 0x448246fe // smlalt z30.s, p4/M, z23.h, z2.h\n"
+ "ld1sb { z23.h }, p4/Z, [x4, #4, MUL VL]\n"
+ ".inst 0x44824031 // smlalb z17.s, p4/M, z1.h, z2.h\n"
+ ".inst 0x44824255 // smlalb z21.s, p4/M, z18.h, z2.h\n"
+ ".inst 0x44824287 // smlalb z7.s, p4/M, z20.h, z2.h\n"
+ ".inst 0x44824428 // smlalt z8.s, p4/M, z1.h, z2.h\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ "ld1sb { z1.h }, p3/Z, [x27, x2]\n"
+ ".inst 0x4482465b // smlalt z27.s, p4/M, z18.h, z2.h\n"
+ ".inst 0x44824689 // smlalt z9.s, p4/M, z20.h, z2.h\n"
+ ".inst 0x454c12f7 // ssublb z23.h, z23.b, z12.b\n"
"ld1sb { z2.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x454a1042 // ssublb z2.h, z2.b, z10.b\n"
- ".inst 0x449d4109 // smlalb z9.s, p4/M, z8.h, z29.h\n"
- ".inst 0x449d43e7 // smlalb z7.s, p4/M, z31.h, z29.h\n"
- "ldr x27, [x17, #0xe8]\n"
- "ldr x26, [x17, #0xf0]\n"
- ".inst 0x44984777 // smlalt z23.s, p4/M, z27.h, z24.h\n"
- ".inst 0x449040ae // smlalb z14.s, p4/M, z5.h, z16.h\n"
- "ldr x25, [x17, #0xf8]\n"
- "ldr x24, [x17, #0x100]\n"
- ".inst 0x449d4772 // smlalt z18.s, p4/M, z27.h, z29.h\n"
- ".inst 0x449d4514 // smlalt z20.s, p4/M, z8.h, z29.h\n"
- "ld1sb { z27.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e137b // ssublb z27.h, z27.b, z30.b\n"
- ".inst 0x449d47e1 // smlalt z1.s, p4/M, z31.h, z29.h\n"
- ".inst 0x449840a6 // smlalb z6.s, p4/M, z5.h, z24.h\n"
- "ld1sb { z29.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x454a13bd // ssublb z29.h, z29.b, z10.b\n"
- ".inst 0x449843e9 // smlalb z9.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984007 // smlalb z7.s, p4/M, z0.h, z24.h\n"
- "ldr x23, [x17, #0x108]\n"
- "ldr x22, [x17, #0x110]\n"
- ".inst 0x449044b7 // smlalt z23.s, p4/M, z5.h, z16.h\n"
- ".inst 0x4495438e // smlalb z14.s, p4/M, z28.h, z21.h\n"
- "ldr x20, [x17, #0x118]\n"
- "whilelt p0.h, x16, x3\n"
- ".inst 0x449844b2 // smlalt z18.s, p4/M, z5.h, z24.h\n"
- ".inst 0x449847f4 // smlalt z20.s, p4/M, z31.h, z24.h\n"
- "ld1sb { z5.h }, p3/Z, [x21, x2]\n"
- ".inst 0x455e10a5 // ssublb z5.h, z5.b, z30.b\n"
- ".inst 0x44984401 // smlalt z1.s, p4/M, z0.h, z24.h\n"
- ".inst 0x44904266 // smlalb z6.s, p4/M, z19.h, z16.h\n"
- "ld1sb { z24.h }, p4/Z, [x4, #7, MUL VL]\n"
- "inch x4, ALL, MUL #8\n"
- ".inst 0x44904009 // smlalb z9.s, p4/M, z0.h, z16.h\n"
- ".inst 0x44904167 // smlalb z7.s, p4/M, z11.h, z16.h\n"
- ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
- "ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44954797 // smlalt z23.s, p4/M, z28.h, z21.h\n"
- ".inst 0x4496434e // smlalb z14.s, p4/M, z26.h, z22.h\n"
- "ld1sb { z28.h }, p3/Z, [x13, x2]\n"
- ".inst 0x455e139c // ssublb z28.h, z28.b, z30.b\n"
- ".inst 0x44904672 // smlalt z18.s, p4/M, z19.h, z16.h\n"
- ".inst 0x44904414 // smlalt z20.s, p4/M, z0.h, z16.h\n"
- "ld1sb { z19.h }, p4/Z, [x4]\n"
- ".inst 0x454a1273 // ssublb z19.h, z19.b, z10.b\n"
- ".inst 0x44904561 // smlalt z1.s, p4/M, z11.h, z16.h\n"
- ".inst 0x44954346 // smlalb z6.s, p4/M, z26.h, z21.h\n"
- "ld1sb { z16.h }, p3/Z, [x12, x2]\n"
- ".inst 0x455e1210 // ssublb z16.h, z16.b, z30.b\n"
- ".inst 0x44954229 // smlalb z9.s, p4/M, z17.h, z21.h\n"
- ".inst 0x44954067 // smlalb z7.s, p4/M, z3.h, z21.h\n"
- ".inst 0x44964757 // smlalt z23.s, p4/M, z26.h, z22.h\n"
- ".inst 0x4499410e // smlalb z14.s, p4/M, z8.h, z25.h\n"
- ".inst 0x44954752 // smlalt z18.s, p4/M, z26.h, z21.h\n"
- ".inst 0x44954634 // smlalt z20.s, p4/M, z17.h, z21.h\n"
- "ld1sb { z26.h }, p3/Z, [x11, x2]\n"
- ".inst 0x455e135a // ssublb z26.h, z26.b, z30.b\n"
- ".inst 0x44954461 // smlalt z1.s, p4/M, z3.h, z21.h\n"
- ".inst 0x44964106 // smlalb z6.s, p4/M, z8.h, z22.h\n"
- "ld1sb { z21.h }, p4/Z, [x4, #1, MUL VL]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x44964069 // smlalb z9.s, p4/M, z3.h, z22.h\n"
- ".inst 0x44964087 // smlalb z7.s, p4/M, z4.h, z22.h\n"
- ".inst 0x44994517 // smlalt z23.s, p4/M, z8.h, z25.h\n"
- ".inst 0x448243ee // smlalb z14.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44964512 // smlalt z18.s, p4/M, z8.h, z22.h\n"
- ".inst 0x44964474 // smlalt z20.s, p4/M, z3.h, z22.h\n"
- "ld1sb { z8.h }, p3/Z, [x10, x2]\n"
- ".inst 0x455e1108 // ssublb z8.h, z8.b, z30.b\n"
- ".inst 0x44964481 // smlalt z1.s, p4/M, z4.h, z22.h\n"
- ".inst 0x449943e6 // smlalb z6.s, p4/M, z31.h, z25.h\n"
- "ld1sb { z22.h }, p4/Z, [x4, #2, MUL VL]\n"
- ".inst 0x454a12d6 // ssublb z22.h, z22.b, z10.b\n"
- ".inst 0x44994089 // smlalb z9.s, p4/M, z4.h, z25.h\n"
- ".inst 0x44994367 // smlalb z7.s, p4/M, z27.h, z25.h\n"
- ".inst 0x448247f7 // smlalt z23.s, p4/M, z31.h, z2.h\n"
- ".inst 0x449d400e // smlalb z14.s, p4/M, z0.h, z29.h\n"
- ".inst 0x449947f2 // smlalt z18.s, p4/M, z31.h, z25.h\n"
- ".inst 0x44994494 // smlalt z20.s, p4/M, z4.h, z25.h\n"
- "ld1sb { z31.h }, p3/Z, [x9, x2]\n"
- ".inst 0x455e13ff // ssublb z31.h, z31.b, z30.b\n"
- ".inst 0x44994761 // smlalt z1.s, p4/M, z27.h, z25.h\n"
- ".inst 0x44824006 // smlalb z6.s, p4/M, z0.h, z2.h\n"
- "ld1sb { z25.h }, p4/Z, [x4, #3, MUL VL]\n"
- ".inst 0x454a1339 // ssublb z25.h, z25.b, z10.b\n"
- ".inst 0x44824369 // smlalb z9.s, p4/M, z27.h, z2.h\n"
- ".inst 0x448240a7 // smlalb z7.s, p4/M, z5.h, z2.h\n"
- ".inst 0x449d4417 // smlalt z23.s, p4/M, z0.h, z29.h\n"
- ".inst 0x4498422e // smlalb z14.s, p4/M, z17.h, z24.h\n"
- ".inst 0x44824412 // smlalt z18.s, p4/M, z0.h, z2.h\n"
- ".inst 0x44824774 // smlalt z20.s, p4/M, z27.h, z2.h\n"
- "ld1sb { z0.h }, p3/Z, [x28, x2]\n"
- ".inst 0x455e1000 // ssublb z0.h, z0.b, z30.b\n"
- ".inst 0x448244a1 // smlalt z1.s, p4/M, z5.h, z2.h\n"
- ".inst 0x449d4166 // smlalb z6.s, p4/M, z11.h, z29.h\n"
- "ld1sb { z2.h }, p4/Z, [x4, #4, MUL VL]\n"
- ".inst 0x454a1042 // ssublb z2.h, z2.b, z10.b\n"
- ".inst 0x449d40a9 // smlalb z9.s, p4/M, z5.h, z29.h\n"
- ".inst 0x449d4387 // smlalb z7.s, p4/M, z28.h, z29.h\n"
- ".inst 0x44984637 // smlalt z23.s, p4/M, z17.h, z24.h\n"
- ".inst 0x4493406e // smlalb z14.s, p4/M, z3.h, z19.h\n"
- "ld1sb { z17.h }, p3/Z, [x27, x2]\n"
- ".inst 0x455e1231 // ssublb z17.h, z17.b, z30.b\n"
- ".inst 0x449d4572 // smlalt z18.s, p4/M, z11.h, z29.h\n"
- ".inst 0x449d44b4 // smlalt z20.s, p4/M, z5.h, z29.h\n"
- "ld1sb { z11.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x454a116b // ssublb z11.h, z11.b, z10.b\n"
- ".inst 0x449d4781 // smlalt z1.s, p4/M, z28.h, z29.h\n"
- ".inst 0x44984066 // smlalb z6.s, p4/M, z3.h, z24.h\n"
- "ld1sb { z29.h }, p3/Z, [x26, x2]\n"
- ".inst 0x455e13bd // ssublb z29.h, z29.b, z30.b\n"
- ".inst 0x44984209 // smlalb z9.s, p4/M, z16.h, z24.h\n"
- ".inst 0x44984347 // smlalb z7.s, p4/M, z26.h, z24.h\n"
- ".inst 0x44934477 // smlalt z23.s, p4/M, z3.h, z19.h\n"
- ".inst 0x4495408e // smlalb z14.s, p4/M, z4.h, z21.h\n"
- ".inst 0x44984472 // smlalt z18.s, p4/M, z3.h, z24.h\n"
- ".inst 0x44984614 // smlalt z20.s, p4/M, z16.h, z24.h\n"
- "ld1sb { z3.h }, p3/Z, [x25, x2]\n"
- ".inst 0x455e1063 // ssublb z3.h, z3.b, z30.b\n"
- ".inst 0x44984741 // smlalt z1.s, p4/M, z26.h, z24.h\n"
- ".inst 0x44934086 // smlalb z6.s, p4/M, z4.h, z19.h\n"
- "ld1sb { z24.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
- ".inst 0x44934349 // smlalb z9.s, p4/M, z26.h, z19.h\n"
- ".inst 0x44934107 // smlalb z7.s, p4/M, z8.h, z19.h\n"
- ".inst 0x44954497 // smlalt z23.s, p4/M, z4.h, z21.h\n"
- ".inst 0x4496436e // smlalb z14.s, p4/M, z27.h, z22.h\n"
- ".inst 0x44934492 // smlalt z18.s, p4/M, z4.h, z19.h\n"
- ".inst 0x44934754 // smlalt z20.s, p4/M, z26.h, z19.h\n"
+ ".inst 0x44964306 // smlalb z6.s, p4/M, z24.h, z22.h\n"
+ ".inst 0x4496471e // smlalt z30.s, p4/M, z24.h, z22.h\n"
+ "ld1sb { z24.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x44964011 // smlalb z17.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x449643b5 // smlalb z21.s, p4/M, z29.h, z22.h\n"
+ ".inst 0x454e1021 // ssublb z1.h, z1.b, z14.b\n"
+ ".inst 0x44964167 // smlalb z7.s, p4/M, z11.h, z22.h\n"
+ ".inst 0x44964408 // smlalt z8.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ ".inst 0x449647bb // smlalt z27.s, p4/M, z29.h, z22.h\n"
+ ".inst 0x44964569 // smlalt z9.s, p4/M, z11.h, z22.h\n"
+ "ld1sb { z22.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x454e1318 // ssublb z24.h, z24.b, z14.b\n"
+ ".inst 0x44844006 // smlalb z6.s, p4/M, z0.h, z4.h\n"
+ ".inst 0x4484441e // smlalt z30.s, p4/M, z0.h, z4.h\n"
+ "ld1sb { z0.h }, p4/Z, [x4, #6, MUL VL]\n"
+ ".inst 0x44844351 // smlalb z17.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844175 // smlalb z21.s, p4/M, z11.h, z4.h\n"
+ ".inst 0x448443e7 // smlalb z7.s, p4/M, z31.h, z4.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x454e12d6 // ssublb z22.h, z22.b, z14.b\n"
+ ".inst 0x4484457b // smlalt z27.s, p4/M, z11.h, z4.h\n"
+ ".inst 0x448447e9 // smlalt z9.s, p4/M, z31.h, z4.h\n"
"ld1sb { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x455e1084 // ssublb z4.h, z4.b, z30.b\n"
- ".inst 0x44934501 // smlalt z1.s, p4/M, z8.h, z19.h\n"
- ".inst 0x44954366 // smlalb z6.s, p4/M, z27.h, z21.h\n"
- "ld1sb { z19.h }, p4/Z, [x4, #7, MUL VL]\n"
+ ".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ ".inst 0x44904346 // smlalb z6.s, p4/M, z26.h, z16.h\n"
+ ".inst 0x4490475e // smlalt z30.s, p4/M, z26.h, z16.h\n"
+ "ld1sb { z26.h }, p4/Z, [x4, #7, MUL VL]\n"
"inch x4, ALL, MUL #8\n"
- ".inst 0x44954109 // smlalb z9.s, p4/M, z8.h, z21.h\n"
- ".inst 0x449543e7 // smlalb z7.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454a1273 // ssublb z19.h, z19.b, z10.b\n"
- ".inst 0x44964777 // smlalt z23.s, p4/M, z27.h, z22.h\n"
- ".inst 0x449940ae // smlalb z14.s, p4/M, z5.h, z25.h\n"
- ".inst 0x44954772 // smlalt z18.s, p4/M, z27.h, z21.h\n"
- ".inst 0x44954514 // smlalt z20.s, p4/M, z8.h, z21.h\n"
- "ld1sb { z27.h }, p3/Z, [x23, x2]\n"
- ".inst 0x455e137b // ssublb z27.h, z27.b, z30.b\n"
- ".inst 0x449547e1 // smlalt z1.s, p4/M, z31.h, z21.h\n"
- ".inst 0x449640a6 // smlalb z6.s, p4/M, z5.h, z22.h\n"
- "ld1sb { z21.h }, p4/Z, [x4]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x449643e9 // smlalb z9.s, p4/M, z31.h, z22.h\n"
- ".inst 0x44964007 // smlalb z7.s, p4/M, z0.h, z22.h\n"
- "inch x4\n"
- ".inst 0x449944b7 // smlalt z23.s, p4/M, z5.h, z25.h\n"
- ".inst 0x4482420e // smlalb z14.s, p4/M, z16.h, z2.h\n"
- ".inst 0x449644b2 // smlalt z18.s, p4/M, z5.h, z22.h\n"
- ".inst 0x449647f4 // smlalt z20.s, p4/M, z31.h, z22.h\n"
- "ld1sb { z5.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e10a5 // ssublb z5.h, z5.b, z30.b\n"
- ".inst 0x44964401 // smlalt z1.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x44904391 // smlalb z17.s, p4/M, z28.h, z16.h\n"
+ ".inst 0x449043f5 // smlalb z21.s, p4/M, z31.h, z16.h\n"
+ ".inst 0x449040a7 // smlalb z7.s, p4/M, z5.h, z16.h\n"
+ ".inst 0x44904788 // smlalt z8.s, p4/M, z28.h, z16.h\n"
+ ".inst 0x454e1084 // ssublb z4.h, z4.b, z14.b\n"
+ ".inst 0x449047fb // smlalt z27.s, p4/M, z31.h, z16.h\n"
+ ".inst 0x449044a9 // smlalt z9.s, p4/M, z5.h, z16.h\n"
+ "ld1sb { z16.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
".inst 0x44994386 // smlalb z6.s, p4/M, z28.h, z25.h\n"
- "ld1w { z22.s }, p2/Z, [x15]\n"
- ".inst 0x44994009 // smlalb z9.s, p4/M, z0.h, z25.h\n"
- ".inst 0x44994227 // smlalb z7.s, p4/M, z17.h, z25.h\n"
- ".inst 0x44824617 // smlalt z23.s, p4/M, z16.h, z2.h\n"
- ".inst 0x448b434e // smlalb z14.s, p4/M, z26.h, z11.h\n"
- "ld1w { z16.s }, p1/Z, [x15, #1, MUL VL]\n"
- "addvl x15, x15, #2\n"
- ".inst 0x44994792 // smlalt z18.s, p4/M, z28.h, z25.h\n"
- ".inst 0x44994414 // smlalt z20.s, p4/M, z0.h, z25.h\n"
- "ld1sb { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e139c // ssublb z28.h, z28.b, z30.b\n"
- ".inst 0x44994621 // smlalt z1.s, p4/M, z17.h, z25.h\n"
- ".inst 0x44824346 // smlalb z6.s, p4/M, z26.h, z2.h\n"
- "uzp1 z25.s, z22.s, z16.s\n"
+ ".inst 0x4499479e // smlalt z30.s, p4/M, z28.h, z25.h\n"
+ "ld1sb { z28.h }, p4/Z, [x4]\n"
+ "inch x4\n"
+ ".inst 0x44994251 // smlalb z17.s, p4/M, z18.h, z25.h\n"
+ ".inst 0x449940b5 // smlalb z21.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994067 // smlalb z7.s, p4/M, z3.h, z25.h\n"
+ ".inst 0x44994648 // smlalt z8.s, p4/M, z18.h, z25.h\n"
+ ".inst 0x454e1210 // ssublb z16.h, z16.b, z14.b\n"
+ ".inst 0x449944bb // smlalt z27.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994469 // smlalt z9.s, p4/M, z3.h, z25.h\n"
+ "ld1sb { z25.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x44934246 // smlalb z6.s, p4/M, z18.h, z19.h\n"
+ ".inst 0x4493465e // smlalt z30.s, p4/M, z18.h, z19.h\n"
+ "ld1w { z18.s }, p2/Z, [x7]\n"
+ ".inst 0x44934291 // smlalb z17.s, p4/M, z20.h, z19.h\n"
+ ".inst 0x44934075 // smlalb z21.s, p4/M, z3.h, z19.h\n"
+ ".inst 0x44934027 // smlalb z7.s, p4/M, z1.h, z19.h\n"
+ ".inst 0x44934688 // smlalt z8.s, p4/M, z20.h, z19.h\n"
+ "ld1w { z20.s }, p1/Z, [x7, #1, MUL VL]\n"
+ ".inst 0x454e1339 // ssublb z25.h, z25.b, z14.b\n"
+ ".inst 0x4493447b // smlalt z27.s, p4/M, z3.h, z19.h\n"
+ ".inst 0x44934429 // smlalt z9.s, p4/M, z1.h, z19.h\n"
+ "ld1sb { z19.h }, p3/Z, [x20, x2]\n"
"inch x2\n"
- ".inst 0x448243a9 // smlalb z9.s, p4/M, z29.h, z2.h\n"
- ".inst 0x44824067 // smlalb z7.s, p4/M, z3.h, z2.h\n"
- "uzp2 z16.s, z22.s, z16.s\n"
- "ld1w { z22.s }, p2/Z, [x14]\n"
- ".inst 0x448b4757 // smlalt z23.s, p4/M, z26.h, z11.h\n"
- ".inst 0x4498410e // smlalb z14.s, p4/M, z8.h, z24.h\n"
+ ".inst 0x449743a6 // smlalb z6.s, p4/M, z29.h, z23.h\n"
+ ".inst 0x449747be // smlalt z30.s, p4/M, z29.h, z23.h\n"
+ "addvl x7, x7, #2\n"
+ ".inst 0x44974171 // smlalb z17.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x44974315 // smlalb z21.s, p4/M, z24.h, z23.h\n"
+ "uzp1 z29.s, z18.s, z20.s\n"
+ ".inst 0x449742c7 // smlalb z7.s, p4/M, z22.h, z23.h\n"
+ ".inst 0x44974568 // smlalt z8.s, p4/M, z11.h, z23.h\n"
+ "uzp2 z18.s, z18.s, z20.s\n"
+ "ld1w { z20.s }, p2/Z, [x8]\n"
+ ".inst 0x4497471b // smlalt z27.s, p4/M, z24.h, z23.h\n"
+ ".inst 0x449746c9 // smlalt z9.s, p4/M, z22.h, z23.h\n"
+ "ld1w { z24.s }, p1/Z, [x8, #1, MUL VL]\n"
+ ".inst 0x454e1273 // ssublb z19.h, z19.b, z14.b\n"
+ ".inst 0x44824166 // smlalb z6.s, p4/M, z11.h, z2.h\n"
+ ".inst 0x4482457e // smlalt z30.s, p4/M, z11.h, z2.h\n"
"mov x20, x2\n"
- "incw x20\n"
- ".inst 0x44824752 // smlalt z18.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448247b4 // smlalt z20.s, p4/M, z29.h, z2.h\n"
- "ld1w { z26.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z29.s, z22.s, z26.s\n"
- ".inst 0x44824461 // smlalt z1.s, p4/M, z3.h, z2.h\n"
- ".inst 0x448b4106 // smlalb z6.s, p4/M, z8.h, z11.h\n"
- "uzp2 z22.s, z22.s, z26.s\n"
"whilelt p2.s, x2, x3\n"
- ".inst 0x448b4069 // smlalb z9.s, p4/M, z3.h, z11.h\n"
- ".inst 0x448b4087 // smlalb z7.s, p4/M, z4.h, z11.h\n"
+ ".inst 0x448243f1 // smlalb z17.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448242d5 // smlalb z21.s, p4/M, z22.h, z2.h\n"
+ "addvl x8, x8, #2\n"
+ ".inst 0x44824087 // smlalb z7.s, p4/M, z4.h, z2.h\n"
+ ".inst 0x448247e8 // smlalt z8.s, p4/M, z31.h, z2.h\n"
+ "uzp1 z23.s, z20.s, z24.s\n"
+ ".inst 0x448246db // smlalt z27.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x44824489 // smlalt z9.s, p4/M, z4.h, z2.h\n"
+ "uzp2 z22.s, z20.s, z24.s\n"
+ "incw x20\n"
+ ".inst 0x448043e6 // smlalb z6.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448047fe // smlalt z30.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448040b1 // smlalb z17.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x44804095 // smlalb z21.s, p4/M, z4.h, z0.h\n"
+ ".inst 0x44804207 // smlalb z7.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x448044a8 // smlalt z8.s, p4/M, z5.h, z0.h\n"
"whilelt p1.s, x20, x3\n"
"whilelt p3.h, x2, x3\n"
- ".inst 0x44984517 // smlalt z23.s, p4/M, z8.h, z24.h\n"
- ".inst 0x449343ee // smlalb z14.s, p4/M, z31.h, z19.h\n"
- "addvl x14, x14, #2\n"
- ".inst 0x448b4512 // smlalt z18.s, p4/M, z8.h, z11.h\n"
- ".inst 0x448b4474 // smlalt z20.s, p4/M, z3.h, z11.h\n"
- ".inst 0x448b4481 // smlalt z1.s, p4/M, z4.h, z11.h\n"
- ".inst 0x449843e6 // smlalb z6.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984089 // smlalb z9.s, p4/M, z4.h, z24.h\n"
- ".inst 0x44984367 // smlalb z7.s, p4/M, z27.h, z24.h\n"
- ".inst 0x449347f7 // smlalt z23.s, p4/M, z31.h, z19.h\n"
- ".inst 0x4495400e // smlalb z14.s, p4/M, z0.h, z21.h\n"
- ".inst 0x04b975ce // sqrdmulh z14.s, z14.s, z25.s\n"
- ".inst 0x449847f2 // smlalt z18.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984494 // smlalt z20.s, p4/M, z4.h, z24.h\n"
- "and z3.d, z14.d, z29.d\n"
- ".inst 0x44984761 // smlalt z1.s, p4/M, z27.h, z24.h\n"
- ".inst 0x44934006 // smlalb z6.s, p4/M, z0.h, z19.h\n"
- "asr z3.s, z3.s, #0x1f\n"
- ".inst 0x44934369 // smlalb z9.s, p4/M, z27.h, z19.h\n"
- ".inst 0x449340a7 // smlalb z7.s, p4/M, z5.h, z19.h\n"
- "sqadd z14.s, z14.s, z3.s\n"
- ".inst 0x448293ae // srshl z14.s, p4/M, z14.s, z29.s\n"
- ".inst 0x44954417 // smlalt z23.s, p4/M, z0.h, z21.h\n"
- ".inst 0x44934412 // smlalt z18.s, p4/M, z0.h, z19.h\n"
- ".inst 0x04b076f7 // sqrdmulh z23.s, z23.s, z16.s\n"
- ".inst 0x44934774 // smlalt z20.s, p4/M, z27.h, z19.h\n"
- ".inst 0x449344a1 // smlalt z1.s, p4/M, z5.h, z19.h\n"
- "and z31.d, z23.d, z22.d\n"
- ".inst 0x44954226 // smlalb z6.s, p4/M, z17.h, z21.h\n"
- ".inst 0x449540a9 // smlalb z9.s, p4/M, z5.h, z21.h\n"
- ".inst 0x04b974c6 // sqrdmulh z6.s, z6.s, z25.s\n"
- ".inst 0x44954387 // smlalb z7.s, p4/M, z28.h, z21.h\n"
- ".inst 0x44954632 // smlalt z18.s, p4/M, z17.h, z21.h\n"
- ".inst 0x04b97529 // sqrdmulh z9.s, z9.s, z25.s\n"
- ".inst 0x449544b4 // smlalt z20.s, p4/M, z5.h, z21.h\n"
- ".inst 0x44954781 // smlalt z1.s, p4/M, z28.h, z21.h\n"
- ".inst 0x04b974e7 // sqrdmulh z7.s, z7.s, z25.s\n"
- "asr z31.s, z31.s, #0x1f\n"
- "and z3.d, z6.d, z29.d\n"
- ".inst 0x04b07652 // sqrdmulh z18.s, z18.s, z16.s\n"
- "and z0.d, z9.d, z29.d\n"
- ".inst 0x04b07694 // sqrdmulh z20.s, z20.s, z16.s\n"
- "and z19.d, z7.d, z29.d\n"
- ".inst 0x04b07421 // sqrdmulh z1.s, z1.s, z16.s\n"
- "sqadd z23.s, z23.s, z31.s\n"
- ".inst 0x448292d7 // srshl z23.s, p4/M, z23.s, z22.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- "and z21.d, z18.d, z22.d\n"
- "asr z0.s, z0.s, #0x1f\n"
- "and z17.d, z20.d, z22.d\n"
+ ".inst 0x4480449b // smlalt z27.s, p4/M, z4.h, z0.h\n"
+ ".inst 0x44804609 // smlalt z9.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x449a40a6 // smlalb z6.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a44be // smlalt z30.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a4071 // smlalb z17.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a4215 // smlalb z21.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4327 // smlalb z7.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449a4468 // smlalt z8.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a461b // smlalt z27.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4729 // smlalt z9.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449c4066 // smlalb z6.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c447e // smlalt z30.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c4031 // smlalb z17.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c4335 // smlalb z21.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4267 // smlalb z7.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x449c4428 // smlalt z8.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c473b // smlalt z27.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4669 // smlalt z9.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x04bd74c6 // sqrdmulh z6.s, z6.s, z29.s\n"
+ ".inst 0x04b277de // sqrdmulh z30.s, z30.s, z18.s\n"
+ ".inst 0x04bd7631 // sqrdmulh z17.s, z17.s, z29.s\n"
+ ".inst 0x04bd76b5 // sqrdmulh z21.s, z21.s, z29.s\n"
+ "and z19.d, z6.d, z23.d\n"
+ ".inst 0x04bd74e7 // sqrdmulh z7.s, z7.s, z29.s\n"
+ ".inst 0x04b27508 // sqrdmulh z8.s, z8.s, z18.s\n"
+ "and z16.d, z30.d, z22.d\n"
+ "and z2.d, z17.d, z23.d\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z20.d, z21.d, z23.d\n"
+ ".inst 0x04b2777b // sqrdmulh z27.s, z27.s, z18.s\n"
+ ".inst 0x04b27529 // sqrdmulh z9.s, z9.s, z18.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "sqadd z6.s, z6.s, z19.s\n"
+ "and z19.d, z7.d, z23.d\n"
+ "and z0.d, z8.d, z22.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "sqadd z30.s, z30.s, z16.s\n"
+ "and z26.d, z27.d, z22.d\n"
"asr z19.s, z19.s, #0x1f\n"
- "and z16.d, z1.d, z22.d\n"
- "sqadd z6.s, z6.s, z3.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- ".inst 0x448293a6 // srshl z6.s, p4/M, z6.s, z29.s\n"
- "sqadd z9.s, z9.s, z0.s\n"
- "asr z17.s, z17.s, #0x1f\n"
- ".inst 0x448293a9 // srshl z9.s, p4/M, z9.s, z29.s\n"
+ "and z16.d, z9.d, z22.d\n"
+ ".inst 0x448292e6 // srshl z6.s, p4/M, z6.s, z23.s\n"
+ "sqadd z17.s, z17.s, z2.s\n"
+ "asr z0.s, z0.s, #0x1f\n"
+ "sqadd z21.s, z21.s, z20.s\n"
+ "asr z26.s, z26.s, #0x1f\n"
+ ".inst 0x448292de // srshl z30.s, p4/M, z30.s, z22.s\n"
"sqadd z7.s, z7.s, z19.s\n"
"asr z16.s, z16.s, #0x1f\n"
- ".inst 0x448293a7 // srshl z7.s, p4/M, z7.s, z29.s\n"
- "sqadd z18.s, z18.s, z21.s\n"
- "sqadd z20.s, z20.s, z17.s\n"
- ".inst 0x448292d2 // srshl z18.s, p4/M, z18.s, z22.s\n"
- ".inst 0x448292d4 // srshl z20.s, p4/M, z20.s, z22.s\n"
- "sqadd z1.s, z1.s, z16.s\n"
- ".inst 0x453041ce // sqxtnb z14.h, z14.s\n"
- ".inst 0x448292c1 // srshl z1.s, p4/M, z1.s, z22.s\n"
+ ".inst 0x448292f1 // srshl z17.s, p4/M, z17.s, z23.s\n"
+ "sqadd z8.s, z8.s, z0.s\n"
".inst 0x453040c6 // sqxtnb z6.h, z6.s\n"
- ".inst 0x45304129 // sqxtnb z9.h, z9.s\n"
+ ".inst 0x448292f5 // srshl z21.s, p4/M, z21.s, z23.s\n"
+ "sqadd z27.s, z27.s, z26.s\n"
+ ".inst 0x448292e7 // srshl z7.s, p4/M, z7.s, z23.s\n"
+ "sqadd z9.s, z9.s, z16.s\n"
+ ".inst 0x45304231 // sqxtnb z17.h, z17.s\n"
+ ".inst 0x448292c8 // srshl z8.s, p4/M, z8.s, z22.s\n"
+ ".inst 0x453042b5 // sqxtnb z21.h, z21.s\n"
+ ".inst 0x453047c6 // sqxtnt z6.h, z30.s\n"
+ ".inst 0x448292db // srshl z27.s, p4/M, z27.s, z22.s\n"
+ ".inst 0x448292c9 // srshl z9.s, p4/M, z9.s, z22.s\n"
".inst 0x453040e7 // sqxtnb z7.h, z7.s\n"
- ".inst 0x453046ee // sqxtnt z14.h, z23.s\n"
- ".inst 0x45304646 // sqxtnt z6.h, z18.s\n"
- ".inst 0x45304689 // sqxtnt z9.h, z20.s\n"
- ".inst 0x45304427 // sqxtnt z7.h, z1.s\n"
- "sqadd z14.h, z14.h, z15.h\n"
- "smax z14.h, p4/M, z14.h, z12.h\n"
- "smin z14.h, p4/M, z14.h, z13.h\n"
- "sqadd z6.h, z6.h, z15.h\n"
- "sqadd z9.h, z9.h, z15.h\n"
- "smax z6.h, p4/M, z6.h, z12.h\n"
- "smax z9.h, p4/M, z9.h, z12.h\n"
- "sqadd z7.h, z7.h, z15.h\n"
- "smax z7.h, p4/M, z7.h, z12.h\n"
+ ".inst 0x45304511 // sqxtnt z17.h, z8.s\n"
+ ".inst 0x45304775 // sqxtnt z21.h, z27.s\n"
+ ".inst 0x45304527 // sqxtnt z7.h, z9.s\n"
+ "sqadd z6.h, z6.h, z10.h\n"
+ "sqadd z17.h, z17.h, z10.h\n"
+ "sqadd z21.h, z21.h, z10.h\n"
+ "sqadd z7.h, z7.h, z10.h\n"
+ "smax z6.h, p4/M, z6.h, z15.h\n"
+ "smax z17.h, p4/M, z17.h, z15.h\n"
+ "smax z21.h, p4/M, z21.h, z15.h\n"
+ "smax z7.h, p4/M, z7.h, z15.h\n"
"smin z6.h, p4/M, z6.h, z13.h\n"
- "st1b { z14.h }, p0, [x5, x16]\n"
- "smin z9.h, p4/M, z9.h, z13.h\n"
+ "smin z17.h, p4/M, z17.h, z13.h\n"
+ "smin z21.h, p4/M, z21.h, z13.h\n"
"smin z7.h, p4/M, z7.h, z13.h\n"
- "st1b { z6.h }, p0, [x6, x16]\n"
- "st1b { z9.h }, p0, [x7, x16]\n"
- "st1b { z7.h }, p0, [x8, x16]\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
+ "st1b { z6.h }, p0, [x17, x6]\n"
+ "st1b { z17.h }, p0, [x16, x6]\n"
+ "st1b { z21.h }, p0, [x15, x6]\n"
+ "st1b { z7.h }, p0, [x14, x6]\n"
+ "inch x6\n"
+ "ld1w { z21.s }, p2/Z, [x21]\n"
"ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
- "uzp1 z14.s, z17.s, z16.s\n"
- "ld1sb { z26.h }, p4/Z, [x4]\n"
- "ld1sb { z8.h }, p4/Z, [x4, #1, MUL VL]\n"
- "uzp2 z23.s, z17.s, z16.s\n"
"addvl x21, x21, #2\n"
- "ld1sb { z16.h }, p4/Z, [x4, #2, MUL VL]\n"
- "ld1sb { z21.h }, p4/Z, [x4, #3, MUL VL]\n"
- "inch x16\n"
+ "ld1sb { z25.h }, p4/Z, [x4]\n"
+ "ld1sb { z28.h }, p4/Z, [x4, #1, MUL VL]\n"
+ "ld1sb { z4.h }, p4/Z, [x4, #2, MUL VL]\n"
+ "ld1sb { z23.h }, p4/Z, [x4, #3, MUL VL]\n"
+ "ld1sb { z31.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldp x9, x28, [x5, #0x0]\n"
+ "uzp1 z6.s, z21.s, z16.s\n"
+ "uzp2 z30.s, z21.s, z16.s\n"
"str x21, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z17.h }, p4/Z, [x4, #4, MUL VL]\n"
- "ldp x9, x28, [x17, #0x0]\n"
- "mov z6.d, z14.d\n"
- "mov z18.d, z23.d\n"
- "ldp x27, x26, [x17, #0x10]\n"
- "ldp x25, x24, [x17, #0x20]\n"
- "mov z9.d, z14.d\n"
- "mov z20.d, z23.d\n"
- "ldp x23, x22, [x17, #0x30]\n"
- "ldp x21, x20, [x17, #0x40]\n"
- "mov z7.d, z14.d\n"
- "mov z1.d, z23.d\n"
- "ld1sb { z22.h }, p3/Z, [x9, x2]\n"
- "ld1sb { z2.h }, p3/Z, [x28, x2]\n"
- ".inst 0x454a135a // ssublb z26.h, z26.b, z10.b\n"
- ".inst 0x454a1108 // ssublb z8.h, z8.b, z10.b\n"
- "ld1sb { z11.h }, p3/Z, [x27, x2]\n"
- "ld1sb { z3.h }, p3/Z, [x26, x2]\n"
- ".inst 0x454a1210 // ssublb z16.h, z16.b, z10.b\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- "ld1sb { z29.h }, p3/Z, [x25, x2]\n"
- "ld1sb { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x454a1231 // ssublb z17.h, z17.b, z10.b\n"
- ".inst 0x455e12d6 // ssublb z22.h, z22.b, z30.b\n"
- "ld1sb { z31.h }, p3/Z, [x23, x2]\n"
- "ld1sb { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1042 // ssublb z2.h, z2.b, z30.b\n"
- ".inst 0x455e116b // ssublb z11.h, z11.b, z30.b\n"
- "ld1sb { z19.h }, p3/Z, [x21, x2]\n"
- "ld1sb { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1063 // ssublb z3.h, z3.b, z30.b\n"
- ".inst 0x455e13bd // ssublb z29.h, z29.b, z30.b\n"
- ".inst 0x455e1084 // ssublb z4.h, z4.b, z30.b\n"
- ".inst 0x455e13ff // ssublb z31.h, z31.b, z30.b\n"
- ".inst 0x455e1000 // ssublb z0.h, z0.b, z30.b\n"
- ".inst 0x455e1273 // ssublb z19.h, z19.b, z30.b\n"
- ".inst 0x455e139c // ssublb z28.h, z28.b, z30.b\n"
+ ".inst 0x454c1339 // ssublb z25.h, z25.b, z12.b\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x454c12f7 // ssublb z23.h, z23.b, z12.b\n"
+ "ldp x27, x26, [x5, #0x10]\n"
+ "mov z17.d, z6.d\n"
+ "mov z8.d, z30.d\n"
+ "mov z21.d, z6.d\n"
+ "mov z27.d, z30.d\n"
+ "ldp x25, x24, [x5, #0x20]\n"
+ "mov z7.d, z6.d\n"
+ "mov z9.d, z30.d\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ldp x23, x22, [x5, #0x30]\n"
+ "ldp x21, x20, [x5, #0x40]\n"
+ "ld1sb { z26.h }, p3/Z, [x9, x2]\n"
+ "ld1sb { z16.h }, p3/Z, [x28, x2]\n"
+ "ld1sb { z24.h }, p3/Z, [x27, x2]\n"
+ "ld1sb { z5.h }, p3/Z, [x26, x2]\n"
+ "ld1sb { z18.h }, p3/Z, [x25, x2]\n"
+ "ld1sb { z3.h }, p3/Z, [x24, x2]\n"
+ "ld1sb { z19.h }, p3/Z, [x23, x2]\n"
+ "ld1sb { z11.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454e135a // ssublb z26.h, z26.b, z14.b\n"
+ ".inst 0x454e1210 // ssublb z16.h, z16.b, z14.b\n"
+ "ld1sb { z20.h }, p3/Z, [x21, x2]\n"
+ "ld1sb { z29.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454e1318 // ssublb z24.h, z24.b, z14.b\n"
+ ".inst 0x454e10a5 // ssublb z5.h, z5.b, z14.b\n"
+ ".inst 0x454e1252 // ssublb z18.h, z18.b, z14.b\n"
+ ".inst 0x454e1063 // ssublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1273 // ssublb z19.h, z19.b, z14.b\n"
+ ".inst 0x454e116b // ssublb z11.h, z11.b, z14.b\n"
+ ".inst 0x454e1294 // ssublb z20.h, z20.b, z14.b\n"
+ ".inst 0x454e13bd // ssublb z29.h, z29.b, z14.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index 274b29dcfc..5a9f8e69ad 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,288 +41,288 @@ void sve_s8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "mov x20, #0x9\n"
- "whilelt p0.b, XZR, x20\n"
- "ldr x23, [%x[inptrs], #0x8]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
+ "mov x25, #0x9\n"
+ "ldr x24, [%x[inptrs], #0x8]\n"
+ "ldr x23, [%x[inptrs], #0x10]\n"
+ "mov z22.b, #0x1\n"
"ldr x22, [%x[inptrs], #0x20]\n"
"ldr x21, [%x[inptrs], #0x0]\n"
- "mov z13.b, #0x1\n"
- "lsr z13.s, z13.s, #0x8\n"
- "ld1b { z1.b }, p0/Z, [x23]\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
- "mov z8.d, z1.d\n"
- "mov z27.d, z1.d\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
"ldr x20, [%x[inptrs], #0x18]\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
- "mov z31.d, z1.d\n"
- "mov z28.d, z2.d\n"
- "ld1b { z0.b }, p0/Z, [x21]\n"
- "mov z30.d, z2.d\n"
- "mov z26.d, z2.d\n"
- "ld1b { z3.b }, p0/Z, [x20]\n"
- "mov z22.d, z4.d\n"
- "mov z10.d, z4.d\n"
+ "lsr z22.s, z22.s, #0x8\n"
+ "mov z29.s, #0x0\n"
"ptrue p2.b\n"
- "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z18.d, z4.d\n"
- "ext z8.b, z8.b, z8.b, #0x2\n"
+ "whilelt p0.b, XZR, x25\n"
+ "mov z14.s, #0x0\n"
+ "mov z23.s, #0x0\n"
"lsl x10, %x[n_channels], #0x2\n"
- "neg z11.s, p2/M, z11.s\n"
- "ext z27.b, z27.b, z27.b, #0x4\n"
- "ext z31.b, z31.b, z31.b, #0x6\n"
+ "mov z11.s, #0x0\n"
+ "mov z15.s, #0x0\n"
"mov x9, #0x0\n"
- "whilelt p0.b, x9, x10\n"
- "ext z28.b, z28.b, z28.b, #0x2\n"
- "ext z30.b, z30.b, z30.b, #0x4\n"
- "ld1w { z14.s }, p0/Z, [%x[params]]\n"
"mov x28, #0x0\n"
- "ext z26.b, z26.b, z26.b, #0x6\n"
- "ext z22.b, z22.b, z22.b, #0x2\n"
+ "mov z31.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "ld1b { z1.b }, p0/Z, [x24]\n"
+ "ld1b { z2.b }, p0/Z, [x23]\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "ld1b { z4.b }, p0/Z, [x22]\n"
+ "ld1b { z0.b }, p0/Z, [x21]\n"
+ "mov z24.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "ld1b { z3.b }, p0/Z, [x20]\n"
+ "mov z27.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "neg z16.s, p2/M, z16.s\n"
+ "mov z5.d, z1.d\n"
+ "mov z7.d, z1.d\n"
+ "whilelt p0.b, x9, x10\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ext z10.b, z10.b, z10.b, #0x4\n"
- "ext z18.b, z18.b, z18.b, #0x6\n"
+ "mov z30.d, z1.d\n"
+ "mov z6.d, z2.d\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
- "mov z21.d, z0.d\n"
- "mov z20.d, z0.d\n"
- "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "mov z19.d, z0.d\n"
- "mov z24.d, z3.d\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "mov z8.d, z2.d\n"
+ "mov z19.d, z2.d\n"
+ "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "mov z9.d, z4.d\n"
+ "mov z28.d, z4.d\n"
+ "ext z5.b, z5.b, z5.b, #0x2\n"
+ "ext z7.b, z7.b, z7.b, #0x4\n"
+ "ext z30.b, z30.b, z30.b, #0x6\n"
+ "ext z6.b, z6.b, z6.b, #0x2\n"
+ "ext z8.b, z8.b, z8.b, #0x4\n"
+ "ext z19.b, z19.b, z19.b, #0x6\n"
+ "ext z9.b, z9.b, z9.b, #0x2\n"
+ "ext z28.b, z28.b, z28.b, #0x4\n"
+ "zip1 z1.s, z1.s, z7.s\n"
+ "mov z7.d, z4.d\n"
+ "zip1 z5.s, z5.s, z30.s\n"
+ "mov z30.d, z0.d\n"
+ "ext z7.b, z7.b, z7.b, #0x6\n"
+ "zip1 z2.s, z2.s, z8.s\n"
+ "ld1w { z8.s }, p0/Z, [%x[params]]\n"
+ "ext z30.b, z30.b, z30.b, #0x2\n"
+ "zip1 z6.s, z6.s, z19.s\n"
+ "ld1rw { z19.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "zip1 z4.s, z4.s, z28.s\n"
+ "mov z28.d, z0.d\n"
+ "zip1 z9.s, z9.s, z7.s\n"
+ "mov z7.d, z0.d\n"
+ "ext z28.b, z28.b, z28.b, #0x4\n"
+ "zip1 z1.s, z1.s, z5.s\n"
"ld1b { z5.b }, p0/Z, [%x[params], #1, MUL VL]\n"
- "mov z17.d, z3.d\n"
- "mov z16.d, z3.d\n"
+ "ext z7.b, z7.b, z7.b, #0x6\n"
+ "zip1 z2.s, z2.s, z6.s\n"
"ld1b { z6.b }, p0/Z, [%x[params], #2, MUL VL]\n"
+ "zip1 z4.s, z4.s, z9.s\n"
+ "mov z9.d, z3.d\n"
+ "zip1 z0.s, z0.s, z28.s\n"
+ "mov z28.d, z3.d\n"
+ "ext z9.b, z9.b, z9.b, #0x2\n"
+ "zip1 z30.s, z30.s, z7.s\n"
"ld1b { z7.b }, p0/Z, [%x[params], #3, MUL VL]\n"
- "ext z21.b, z21.b, z21.b, #0x2\n"
- "ext z20.b, z20.b, z20.b, #0x4\n"
"addvl %x[params], %x[params], #4\n"
- "ext z19.b, z19.b, z19.b, #0x6\n"
- "zip1 z1.s, z1.s, z27.s\n"
- "zip1 z8.s, z8.s, z31.s\n"
- "zip1 z2.s, z2.s, z30.s\n"
- "zip1 z28.s, z28.s, z26.s\n"
- "ext z24.b, z24.b, z24.b, #0x2\n"
- "ext z17.b, z17.b, z17.b, #0x4\n"
- "ext z16.b, z16.b, z16.b, #0x6\n"
- "zip1 z4.s, z4.s, z10.s\n"
- "zip1 z22.s, z22.s, z18.s\n"
- "zip1 z0.s, z0.s, z20.s\n"
- "zip1 z21.s, z21.s, z19.s\n"
- "zip1 z1.s, z1.s, z8.s\n"
- "zip1 z2.s, z2.s, z28.s\n"
- "zip1 z3.s, z3.s, z17.s\n"
- "zip1 z24.s, z24.s, z16.s\n"
- "zip1 z4.s, z4.s, z22.s\n"
- "zip1 z0.s, z0.s, z21.s\n"
+ "ext z28.b, z28.b, z28.b, #0x4\n"
"mov z1.q, z1.q[0]\n"
"mov z2.q, z2.q[0]\n"
- "zip1 z3.s, z3.s, z24.s\n"
"mov z4.q, z4.q[0]\n"
- "mov z24.s, #0x0\n"
- "mov z25.s, #0x0\n"
- "sdot z24.s, z13.b, z1.b[0]\n"
- "mov z23.s, #0x0\n"
- "mov z22.s, #0x0\n"
- "sdot z25.s, z13.b, z1.b[1]\n"
- "mov z21.s, #0x0\n"
- "mov z19.s, #0x0\n"
- "sdot z23.s, z13.b, z1.b[2]\n"
- "mov z10.s, #0x0\n"
- "mov z8.s, #0x0\n"
- "sdot z22.s, z13.b, z1.b[3]\n"
- "mov z20.s, #0x0\n"
- "mov z18.s, #0x0\n"
- "sdot z21.s, z13.b, z2.b[0]\n"
- "mov z17.s, #0x0\n"
- "mov z16.s, #0x0\n"
- "sdot z19.s, z13.b, z2.b[1]\n"
- "sdot z10.s, z13.b, z2.b[2]\n"
- "sdot z8.s, z13.b, z2.b[3]\n"
+ "zip1 z0.s, z0.s, z30.s\n"
+ "mov z30.d, z3.d\n"
+ "sdot z25.s, z22.b, z1.b[0]\n"
+ "zip1 z3.s, z3.s, z28.s\n"
+ "sdot z26.s, z22.b, z1.b[1]\n"
+ "sdot z29.s, z22.b, z1.b[2]\n"
+ "ext z30.b, z30.b, z30.b, #0x6\n"
+ "sdot z14.s, z22.b, z1.b[3]\n"
+ "sdot z23.s, z22.b, z2.b[0]\n"
+ "sdot z11.s, z22.b, z2.b[1]\n"
+ "sdot z15.s, z22.b, z2.b[2]\n"
"mov z0.q, z0.q[0]\n"
- "sdot z20.s, z13.b, z4.b[0]\n"
- "sdot z18.s, z13.b, z4.b[1]\n"
- "mov z3.q, z3.q[0]\n"
- "sdot z17.s, z13.b, z4.b[2]\n"
- "sdot z16.s, z13.b, z4.b[3]\n"
- "mov z31.s, #0x0\n"
- "mov z30.s, #0x0\n"
- "mov z26.s, #0x0\n"
- "sdot z31.s, z13.b, z0.b[0]\n"
- "mov z27.s, #0x0\n"
+ "sdot z31.s, z22.b, z2.b[3]\n"
+ "sdot z17.s, z22.b, z4.b[0]\n"
"mov z28.s, #0x0\n"
- "sdot z30.s, z13.b, z0.b[1]\n"
- "mov z29.s, #0x0\n"
- "sdot z26.s, z13.b, z0.b[2]\n"
- "sdot z27.s, z13.b, z0.b[3]\n"
- "sdot z28.s, z13.b, z3.b[0]\n"
- "sdot z29.s, z13.b, z3.b[1]\n"
- "add z24.s, z24.s, z21.s\n"
- "add z25.s, z25.s, z19.s\n"
- "add z23.s, z23.s, z10.s\n"
- "add z22.s, z22.s, z8.s\n"
- "add z21.s, z20.s, z21.s\n"
+ "zip1 z9.s, z9.s, z30.s\n"
+ "sdot z20.s, z22.b, z4.b[1]\n"
+ "sdot z21.s, z22.b, z4.b[2]\n"
+ "sdot z24.s, z22.b, z4.b[3]\n"
+ "mov z30.s, #0x0\n"
+ "sdot z12.s, z22.b, z0.b[0]\n"
+ "sdot z27.s, z22.b, z0.b[1]\n"
+ "sdot z18.s, z22.b, z0.b[2]\n"
+ "add z25.s, z25.s, z23.s\n"
+ "zip1 z3.s, z3.s, z9.s\n"
+ "mov z9.s, #0x0\n"
+ "sdot z28.s, z22.b, z0.b[3]\n"
+ "add z26.s, z26.s, z11.s\n"
+ "add z29.s, z29.s, z15.s\n"
+ "add z14.s, z14.s, z31.s\n"
+ "add z23.s, z17.s, z23.s\n"
+ "mov z3.q, z3.q[0]\n"
+ "mov z17.s, #0x0\n"
+ "add z11.s, z20.s, z11.s\n"
"mov z20.s, #0x0\n"
- "sdot z20.s, z13.b, z3.b[2]\n"
- "add z19.s, z18.s, z19.s\n"
- "mov z18.s, #0x0\n"
- "sdot z18.s, z13.b, z3.b[3]\n"
- "add z17.s, z17.s, z10.s\n"
- "add z16.s, z16.s, z8.s\n"
- "add z24.s, z24.s, z31.s\n"
- "add z25.s, z25.s, z30.s\n"
- "mul z24.s, p2/M, z24.s, z11.s\n"
- "mul z25.s, p2/M, z25.s, z11.s\n"
- "add z26.s, z23.s, z26.s\n"
- "add z27.s, z22.s, z27.s\n"
- "mul z26.s, p2/M, z26.s, z11.s\n"
- "mul z27.s, p2/M, z27.s, z11.s\n"
- "add z28.s, z21.s, z28.s\n"
- "add z29.s, z19.s, z29.s\n"
- "mul z28.s, p2/M, z28.s, z11.s\n"
- "mul z29.s, p2/M, z29.s, z11.s\n"
- "add z30.s, z17.s, z20.s\n"
- "add z31.s, z16.s, z18.s\n"
- "mul z30.s, p2/M, z30.s, z11.s\n"
- "mul z31.s, p2/M, z31.s, z11.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
+ "sdot z30.s, z22.b, z3.b[0]\n"
+ "sdot z9.s, z22.b, z3.b[1]\n"
+ "sdot z17.s, z22.b, z3.b[2]\n"
+ "add z15.s, z21.s, z15.s\n"
+ "sdot z20.s, z22.b, z3.b[3]\n"
+ "add z31.s, z24.s, z31.s\n"
+ "add z24.s, z25.s, z12.s\n"
+ "add z25.s, z26.s, z27.s\n"
+ "add z26.s, z29.s, z18.s\n"
+ "add z27.s, z14.s, z28.s\n"
+ "add z28.s, z23.s, z30.s\n"
+ "add z29.s, z11.s, z9.s\n"
+ "add z30.s, z15.s, z17.s\n"
+ "add z31.s, z31.s, z20.s\n"
+ "mul z24.s, p2/M, z24.s, z16.s\n"
+ "mul z25.s, p2/M, z25.s, z16.s\n"
+ "mul z26.s, p2/M, z26.s, z16.s\n"
+ "mul z27.s, p2/M, z27.s, z16.s\n"
+ "mul z28.s, p2/M, z28.s, z16.s\n"
+ "mul z29.s, p2/M, z29.s, z16.s\n"
+ "mul z30.s, p2/M, z30.s, z16.s\n"
+ "mul z31.s, p2/M, z31.s, z16.s\n"
+ "zip1 z21.s, z24.s, z26.s\n"
+ "add z24.s, z24.s, z8.s\n"
+ "zip1 z23.s, z25.s, z27.s\n"
+ "add z25.s, z25.s, z8.s\n"
+ "add z26.s, z26.s, z8.s\n"
+ "add z27.s, z27.s, z8.s\n"
"zip1 z17.s, z28.s, z30.s\n"
"zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
+ "zip1 z22.s, z21.s, z23.s\n"
+ "add z28.s, z28.s, z8.s\n"
+ "add z29.s, z29.s, z8.s\n"
+ "add z30.s, z30.s, z8.s\n"
"zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z14.s\n"
- "add z25.s, z25.s, z14.s\n"
- "add z26.s, z26.s, z14.s\n"
- "add z27.s, z27.s, z14.s\n"
- "add z28.s, z28.s, z14.s\n"
- "add z29.s, z29.s, z14.s\n"
- "add z30.s, z30.s, z14.s\n"
- "add z31.s, z31.s, z14.s\n"
+ "add z31.s, z31.s, z8.s\n"
"1:" // Loop
"sdot z24.s, z5.b, z0.b[0]\n"
"sdot z25.s, z5.b, z0.b[1]\n"
- "ld1w { z8.s }, p2/Z, [%x[params]]\n"
+ "ld1w { z15.s }, p2/Z, [%x[params]]\n"
"ld1w { z21.s }, p2/Z, [%x[params], #1, MUL VL]\n"
"sdot z26.s, z5.b, z0.b[2]\n"
"sdot z27.s, z5.b, z0.b[3]\n"
"incb x9\n"
"whilelt p1.s, x28, %x[n_channels]\n"
+ "sdot z28.s, z5.b, z2.b[0]\n"
+ "sdot z29.s, z5.b, z2.b[1]\n"
+ "sdot z30.s, z5.b, z2.b[2]\n"
+ "sdot z31.s, z5.b, z2.b[3]\n"
"sdot z24.s, z6.b, z1.b[0]\n"
"sdot z25.s, z6.b, z1.b[1]\n"
"whilelt p0.b, x9, x10\n"
- "ld1w { z20.s }, p0/Z, [%x[params], #2, MUL VL]\n"
"sdot z26.s, z6.b, z1.b[2]\n"
"sdot z27.s, z6.b, z1.b[3]\n"
- "sdot z28.s, z5.b, z2.b[0]\n"
- "sdot z29.s, z5.b, z2.b[1]\n"
- "sdot z30.s, z5.b, z2.b[2]\n"
- "sdot z31.s, z5.b, z2.b[3]\n"
- "ld1b { z5.b }, p0/Z, [%x[params], #3, MUL VL]\n"
- "sdot z24.s, z7.b, z2.b[0]\n"
- "sdot z25.s, z7.b, z2.b[1]\n"
- ".inst 0x04a87718 // sqrdmulh z24.s, z24.s, z8.s\n"
- "sdot z26.s, z7.b, z2.b[2]\n"
- "sdot z27.s, z7.b, z2.b[3]\n"
- ".inst 0x04a87739 // sqrdmulh z25.s, z25.s, z8.s\n"
"sdot z28.s, z6.b, z3.b[0]\n"
"sdot z29.s, z6.b, z3.b[1]\n"
- ".inst 0x04a8775a // sqrdmulh z26.s, z26.s, z8.s\n"
"sdot z30.s, z6.b, z3.b[2]\n"
"sdot z31.s, z6.b, z3.b[3]\n"
- ".inst 0x04a8777b // sqrdmulh z27.s, z27.s, z8.s\n"
+ "ld1w { z20.s }, p0/Z, [%x[params], #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [%x[params], #3, MUL VL]\n"
+ "sdot z24.s, z7.b, z2.b[0]\n"
+ "sdot z25.s, z7.b, z2.b[1]\n"
"ld1b { z6.b }, p0/Z, [%x[params], #4, MUL VL]\n"
+ "sdot z26.s, z7.b, z2.b[2]\n"
+ "sdot z27.s, z7.b, z2.b[3]\n"
"sdot z28.s, z7.b, z4.b[0]\n"
"sdot z29.s, z7.b, z4.b[1]\n"
- "and z19.d, z24.d, z21.d\n"
"sdot z30.s, z7.b, z4.b[2]\n"
"sdot z31.s, z7.b, z4.b[3]\n"
- "and z18.d, z25.d, z21.d\n"
"ld1b { z7.b }, p0/Z, [%x[params], #5, MUL VL]\n"
+ "addvl %x[params], %x[params], #6\n"
+ ".inst 0x04af7718 // sqrdmulh z24.s, z24.s, z15.s\n"
+ ".inst 0x04af7739 // sqrdmulh z25.s, z25.s, z15.s\n"
+ ".inst 0x04af775a // sqrdmulh z26.s, z26.s, z15.s\n"
+ ".inst 0x04af777b // sqrdmulh z27.s, z27.s, z15.s\n"
+ ".inst 0x04af779c // sqrdmulh z28.s, z28.s, z15.s\n"
+ ".inst 0x04af77bd // sqrdmulh z29.s, z29.s, z15.s\n"
+ "and z14.d, z24.d, z21.d\n"
+ "and z12.d, z25.d, z21.d\n"
"and z17.d, z26.d, z21.d\n"
"and z16.d, z27.d, z21.d\n"
- "addvl %x[params], %x[params], #6\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04af77de // sqrdmulh z30.s, z30.s, z15.s\n"
+ ".inst 0x04af77ff // sqrdmulh z31.s, z31.s, z15.s\n"
+ "asr z14.s, z14.s, #0x1f\n"
+ "asr z12.s, z12.s, #0x1f\n"
"asr z17.s, z17.s, #0x1f\n"
"asr z16.s, z16.s, #0x1f\n"
- ".inst 0x04a8779c // sqrdmulh z28.s, z28.s, z8.s\n"
- ".inst 0x04a877bd // sqrdmulh z29.s, z29.s, z8.s\n"
- ".inst 0x04a877de // sqrdmulh z30.s, z30.s, z8.s\n"
- ".inst 0x04a877ff // sqrdmulh z31.s, z31.s, z8.s\n"
- "sqadd z24.s, z24.s, z19.s\n"
- "sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
- ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ "sqadd z24.s, z24.s, z14.s\n"
+ "and z14.d, z28.d, z21.d\n"
+ "sqadd z25.s, z25.s, z12.s\n"
+ "and z11.d, z29.d, z21.d\n"
"sqadd z26.s, z26.s, z17.s\n"
"sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
- ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
- "and z19.d, z28.d, z21.d\n"
- "and z18.d, z29.d, z21.d\n"
"and z17.d, z30.d, z21.d\n"
"and z16.d, z31.d, z21.d\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
+ "asr z14.s, z14.s, #0x1f\n"
+ "asr z11.s, z11.s, #0x1f\n"
+ ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
+ ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
"asr z17.s, z17.s, #0x1f\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z28.s, z28.s, z19.s\n"
- "sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
- ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "add z24.s, z24.s, z10.s\n"
+ "sqadd z28.s, z28.s, z14.s\n"
+ "sqadd z29.s, z29.s, z11.s\n"
+ "add z25.s, z25.s, z10.s\n"
"sqadd z30.s, z30.s, z17.s\n"
"sqadd z31.s, z31.s, z16.s\n"
+ ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
+ "add z26.s, z26.s, z10.s\n"
+ "add z27.s, z27.s, z10.s\n"
+ "smin z24.s, p2/M, z24.s, z19.s\n"
+ ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "smin z25.s, p2/M, z25.s, z19.s\n"
".inst 0x44828abe // srshl z30.s, p2/M, z30.s, z21.s\n"
".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
- "add z24.s, z24.s, z9.s\n"
- "add z25.s, z25.s, z9.s\n"
- "smin z24.s, p2/M, z24.s, z12.s\n"
- "smin z25.s, p2/M, z25.s, z12.s\n"
- "add z26.s, z26.s, z9.s\n"
- "add z27.s, z27.s, z9.s\n"
- "smin z26.s, p2/M, z26.s, z12.s\n"
- "smin z27.s, p2/M, z27.s, z12.s\n"
- "add z28.s, z28.s, z9.s\n"
- "add z29.s, z29.s, z9.s\n"
- "smin z28.s, p2/M, z28.s, z12.s\n"
- "smin z29.s, p2/M, z29.s, z12.s\n"
- "add z30.s, z30.s, z9.s\n"
- "add z31.s, z31.s, z9.s\n"
- "smin z30.s, p2/M, z30.s, z12.s\n"
- "smin z31.s, p2/M, z31.s, z12.s\n"
- "smax z24.s, p2/M, z24.s, z15.s\n"
- "smax z25.s, p2/M, z25.s, z15.s\n"
+ "add z28.s, z28.s, z10.s\n"
+ "add z29.s, z29.s, z10.s\n"
+ "smin z26.s, p2/M, z26.s, z19.s\n"
+ "smin z27.s, p2/M, z27.s, z19.s\n"
+ "smax z24.s, p2/M, z24.s, z13.s\n"
+ "add z30.s, z30.s, z10.s\n"
+ "smax z25.s, p2/M, z25.s, z13.s\n"
+ "add z31.s, z31.s, z10.s\n"
+ "smin z28.s, p2/M, z28.s, z19.s\n"
+ "smin z29.s, p2/M, z29.s, z19.s\n"
+ "smax z26.s, p2/M, z26.s, z13.s\n"
+ "smin z30.s, p2/M, z30.s, z19.s\n"
+ "smax z27.s, p2/M, z27.s, z13.s\n"
"st1b { z24.s }, p1, [x27, x28]\n"
"mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z15.s\n"
- "smax z27.s, p2/M, z27.s, z15.s\n"
+ "smin z31.s, p2/M, z31.s, z19.s\n"
+ "smax z28.s, p2/M, z28.s, z13.s\n"
"st1b { z25.s }, p1, [x26, x28]\n"
"mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z15.s\n"
- "smax z29.s, p2/M, z29.s, z15.s\n"
+ "smax z29.s, p2/M, z29.s, z13.s\n"
"st1b { z26.s }, p1, [x25, x28]\n"
"mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z15.s\n"
- "smax z31.s, p2/M, z31.s, z15.s\n"
+ "add z24.s, z24.s, z20.s\n"
+ "smax z30.s, p2/M, z30.s, z13.s\n"
"st1b { z27.s }, p1, [x24, x28]\n"
"mov z27.s, z22.s[3]\n"
+ "add z25.s, z25.s, z20.s\n"
+ "smax z31.s, p2/M, z31.s, z13.s\n"
"st1b { z28.s }, p1, [x23, x28]\n"
"mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z20.s\n"
+ "add z26.s, z26.s, z20.s\n"
"st1b { z29.s }, p1, [x22, x28]\n"
"mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z20.s\n"
+ "add z27.s, z27.s, z20.s\n"
"st1b { z30.s }, p1, [x21, x28]\n"
"mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z20.s\n"
+ "add z28.s, z28.s, z20.s\n"
"st1b { z31.s }, p1, [x20, x28]\n"
"mov z31.s, z23.s[3]\n"
"incw x28\n"
- "add z27.s, z27.s, z20.s\n"
- "add z28.s, z28.s, z20.s\n"
"add z29.s, z29.s, z20.s\n"
"add z30.s, z30.s, z20.s\n"
"add z31.s, z31.s, z20.s\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index a3b2b429c0..7843bfe1be 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,353 +42,353 @@ void sve_s8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
{
__asm__ __volatile__(
"mov x20, #0x6\n"
- "whilelt p0.b, XZR, x20\n"
- "ldr x22, [%x[inptrs], #0x18]\n"
- "ldr x21, [%x[inptrs], #0x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1b { z3.b }, p0/Z, [x22]\n"
- "mov z23.d, z3.d\n"
- "ext z23.b, z23.b, z23.b, #0x1\n"
- "ld1b { z4.b }, p0/Z, [x21]\n"
+ "ldr x27, [%x[inptrs], #0x18]\n"
+ "ldr x26, [%x[inptrs], #0x20]\n"
+ "mov z30.b, #0x1\n"
+ "ldr x25, [%x[inptrs], #0x10]\n"
"ldr x24, [%x[inptrs], #0x8]\n"
- "mov z18.d, z4.d\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
+ "mov z14.s, #0x0\n"
+ "mov z27.s, #0x0\n"
"ldr x23, [%x[inptrs], #0x28]\n"
- "mov z15.d, z2.d\n"
- "ext z15.b, z15.b, z15.b, #0x1\n"
"ldr x22, [%x[inptrs], #0x30]\n"
+ "mov z11.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "whilelt p0.b, XZR, x20\n"
"ldr x21, [%x[inptrs], #0x38]\n"
- "zip1 z3.d, z3.d, z23.d\n"
- "zip1 z4.d, z4.d, z18.d\n"
"ldr x20, [%x[inptrs], #0x0]\n"
+ "mov z28.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z21.s, #0x1\n"
+ "ptrue p2.b\n"
+ "lsl x10, %x[n_channels], #0x2\n"
+ "mov z24.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov x9, #0x0\n"
+ "mov x28, #0x0\n"
+ "ld1b { z3.b }, p0/Z, [x27]\n"
+ "ld1b { z4.b }, p0/Z, [x26]\n"
+ "mov z31.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "ld1b { z2.b }, p0/Z, [x25]\n"
"ld1b { z1.b }, p0/Z, [x24]\n"
- "mov z19.d, z1.d\n"
- "ext z19.b, z19.b, z19.b, #0x1\n"
+ "mov z20.s, #0x0\n"
+ "mov z17.s, #0x0\n"
"ld1b { z5.b }, p0/Z, [x23]\n"
"ld1b { z6.b }, p0/Z, [x22]\n"
- "mov z18.d, z5.d\n"
- "mov z22.d, z6.d\n"
+ "mov z18.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z16.d, z3.d\n"
+ "mov z13.d, z4.d\n"
"ld1b { z7.b }, p0/Z, [x21]\n"
"ld1b { z0.b }, p0/Z, [x20]\n"
- "mov z8.d, z7.d\n"
- "zip1 z2.d, z2.d, z15.d\n"
- "mov z3.q, z3.q[0]\n"
- "mov z4.q, z4.q[0]\n"
- "ptrue p2.b\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "ext z22.b, z22.b, z22.b, #0x1\n"
- "lsl x10, %x[n_channels], #0x2\n"
- "neg z23.s, p2/M, z23.s\n"
- "ext z8.b, z8.b, z8.b, #0x1\n"
- "mov z28.b, #0x1\n"
- "mov x9, #0x0\n"
+ "mov z12.d, z2.d\n"
+ "mov z19.d, z1.d\n"
+ "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"whilelt p0.b, x9, x10\n"
- "mov z25.s, #0x0\n"
- "mov z24.s, #0x0\n"
- "sdot z25.s, z28.b, z3.b[0]\n"
- "ld1w { z12.s }, p0/Z, [%x[params]]\n"
- "mov z17.s, #0x0\n"
- "mov z16.s, #0x0\n"
- "sdot z24.s, z28.b, z3.b[2]\n"
- "mov x28, #0x0\n"
- "mov z27.d, z0.d\n"
- "sdot z17.s, z28.b, z4.b[0]\n"
- "sdot z16.s, z28.b, z4.b[2]\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "ext z13.b, z13.b, z13.b, #0x1\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
- "ext z27.b, z27.b, z27.b, #0x1\n"
- "zip1 z1.d, z1.d, z19.d\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "ext z12.b, z12.b, z12.b, #0x1\n"
+ "mov z8.d, z5.d\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
- "mov z2.q, z2.q[0]\n"
- "zip1 z5.d, z5.d, z18.d\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
- "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "zip1 z6.d, z6.d, z22.d\n"
- "zip1 z7.d, z7.d, z8.d\n"
- "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "mov z30.s, #0x0\n"
- "mov z31.s, #0x0\n"
- "sdot z30.s, z28.b, z2.b[0]\n"
+ "mov z10.d, z6.d\n"
+ "mov z9.d, z7.d\n"
+ "neg z15.s, p2/M, z15.s\n"
+ "zip1 z3.d, z3.d, z16.d\n"
+ "zip1 z4.d, z4.d, z13.d\n"
+ "ld1w { z13.s }, p0/Z, [%x[params]]\n"
+ "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ext z19.b, z19.b, z19.b, #0x1\n"
+ "zip1 z2.d, z2.d, z12.d\n"
+ "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ext z8.b, z8.b, z8.b, #0x1\n"
+ "ext z10.b, z10.b, z10.b, #0x1\n"
+ "mov z3.q, z3.q[0]\n"
+ "mov z4.q, z4.q[0]\n"
+ "ext z9.b, z9.b, z9.b, #0x1\n"
+ "zip1 z1.d, z1.d, z19.d\n"
+ "ld1rw { z19.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "sdot z14.s, z30.b, z3.b[0]\n"
+ "sdot z27.s, z30.b, z3.b[2]\n"
+ "sdot z11.s, z30.b, z4.b[0]\n"
+ "mov z2.q, z2.q[0]\n"
+ "sdot z22.s, z30.b, z4.b[2]\n"
+ "zip1 z5.d, z5.d, z8.d\n"
"ld1b { z8.b }, p0/Z, [%x[params], #1, MUL VL]\n"
- "mov z29.s, #0x1\n"
- "sdot z31.s, z28.b, z2.b[2]\n"
- "sdot z25.s, z29.b, z3.b[1]\n"
+ "zip1 z6.d, z6.d, z10.d\n"
+ "mov z10.d, z0.d\n"
+ "sdot z28.s, z30.b, z2.b[0]\n"
+ "zip1 z7.d, z7.d, z9.d\n"
+ "sdot z25.s, z30.b, z2.b[2]\n"
+ "sdot z14.s, z21.b, z3.b[1]\n"
"ld1b { z9.b }, p0/Z, [%x[params], #2, MUL VL]\n"
- "zip1 z0.d, z0.d, z27.d\n"
+ "ext z10.b, z10.b, z10.b, #0x1\n"
"mov z1.q, z1.q[0]\n"
- "sdot z24.s, z29.b, z3.b[3]\n"
- "ld1b { z10.b }, p0/Z, [%x[params], #3, MUL VL]\n"
+ "sdot z27.s, z21.b, z3.b[3]\n"
"mov z5.q, z5.q[0]\n"
"mov z6.q, z6.q[0]\n"
- "sdot z17.s, z29.b, z4.b[1]\n"
- "ld1b { z11.b }, p0/Z, [%x[params], #4, MUL VL]\n"
+ "sdot z11.s, z21.b, z4.b[1]\n"
"mov z7.q, z7.q[0]\n"
- "mov z22.s, #0x0\n"
- "sdot z16.s, z29.b, z4.b[3]\n"
+ "sdot z22.s, z21.b, z4.b[3]\n"
+ "sdot z24.s, z30.b, z1.b[0]\n"
+ "zip1 z0.d, z0.d, z10.d\n"
+ "sdot z23.s, z30.b, z1.b[2]\n"
+ "sdot z31.s, z30.b, z5.b[0]\n"
+ "ld1b { z10.b }, p0/Z, [%x[params], #3, MUL VL]\n"
+ "sdot z29.s, z30.b, z5.b[2]\n"
+ "sdot z20.s, z30.b, z6.b[0]\n"
+ "sdot z17.s, z30.b, z6.b[2]\n"
+ "sdot z18.s, z30.b, z7.b[0]\n"
+ "add z14.s, z14.s, z11.s\n"
+ "ld1b { z11.b }, p0/Z, [%x[params], #4, MUL VL]\n"
+ "sdot z26.s, z30.b, z7.b[2]\n"
+ "mov z0.q, z0.q[0]\n"
+ "sdot z28.s, z21.b, z2.b[1]\n"
"addvl %x[params], %x[params], #5\n"
- "mov z21.s, #0x0\n"
- "mov z26.s, #0x0\n"
- "sdot z22.s, z28.b, z1.b[0]\n"
+ "sdot z25.s, z21.b, z2.b[3]\n"
+ "add z22.s, z27.s, z22.s\n"
+ "sdot z24.s, z21.b, z1.b[1]\n"
"mov z27.s, #0x0\n"
- "mov z20.s, #0x0\n"
- "sdot z21.s, z28.b, z1.b[2]\n"
- "mov z19.s, #0x0\n"
- "mov z18.s, #0x0\n"
- "sdot z26.s, z28.b, z5.b[0]\n"
- "sdot z27.s, z28.b, z5.b[2]\n"
- "sdot z20.s, z28.b, z6.b[0]\n"
- "mov z0.q, z0.q[0]\n"
- "sdot z19.s, z28.b, z6.b[2]\n"
- "sdot z18.s, z28.b, z7.b[0]\n"
- "add z17.s, z25.s, z17.s\n"
- "mov z25.s, #0x0\n"
- "sdot z25.s, z28.b, z7.b[2]\n"
- "sdot z30.s, z29.b, z2.b[1]\n"
- "sdot z31.s, z29.b, z2.b[3]\n"
- "add z16.s, z24.s, z16.s\n"
- "sdot z22.s, z29.b, z1.b[1]\n"
- "mov z24.s, #0x0\n"
- "sdot z24.s, z28.b, z0.b[0]\n"
- "sdot z21.s, z29.b, z1.b[3]\n"
- "sdot z26.s, z29.b, z5.b[1]\n"
- "sdot z27.s, z29.b, z5.b[3]\n"
- "add z30.s, z30.s, z17.s\n"
- "sdot z20.s, z29.b, z6.b[1]\n"
- "sdot z19.s, z29.b, z6.b[3]\n"
- "add z31.s, z31.s, z16.s\n"
- "sdot z18.s, z29.b, z7.b[1]\n"
- "sdot z25.s, z29.b, z7.b[3]\n"
- "add z22.s, z22.s, z30.s\n"
- "sdot z24.s, z29.b, z0.b[1]\n"
- "add z21.s, z21.s, z31.s\n"
- "add z20.s, z26.s, z20.s\n"
- "add z19.s, z27.s, z19.s\n"
- "add z18.s, z18.s, z17.s\n"
- "mov z17.s, #0x0\n"
- "sdot z17.s, z28.b, z0.b[2]\n"
- "sdot z17.s, z29.b, z0.b[3]\n"
- "add z16.s, z25.s, z16.s\n"
- "add z24.s, z22.s, z24.s\n"
- "add z25.s, z21.s, z17.s\n"
- "mul z24.s, p2/M, z24.s, z23.s\n"
- "mul z25.s, p2/M, z25.s, z23.s\n"
- "add z26.s, z26.s, z22.s\n"
- "add z27.s, z27.s, z21.s\n"
- "mul z26.s, p2/M, z26.s, z23.s\n"
- "mul z27.s, p2/M, z27.s, z23.s\n"
- "add z28.s, z20.s, z30.s\n"
- "add z29.s, z19.s, z31.s\n"
- "mul z28.s, p2/M, z28.s, z23.s\n"
- "mul z29.s, p2/M, z29.s, z23.s\n"
+ "sdot z23.s, z21.b, z1.b[3]\n"
+ "sdot z31.s, z21.b, z5.b[1]\n"
+ "sdot z29.s, z21.b, z5.b[3]\n"
+ "sdot z20.s, z21.b, z6.b[1]\n"
+ "sdot z27.s, z30.b, z0.b[0]\n"
+ "sdot z17.s, z21.b, z6.b[3]\n"
+ "add z28.s, z28.s, z14.s\n"
+ "sdot z18.s, z21.b, z7.b[1]\n"
+ "sdot z26.s, z21.b, z7.b[3]\n"
+ "add z25.s, z25.s, z22.s\n"
+ "add z24.s, z24.s, z28.s\n"
+ "add z20.s, z31.s, z20.s\n"
+ "sdot z27.s, z21.b, z0.b[1]\n"
+ "add z23.s, z23.s, z25.s\n"
+ "add z17.s, z29.s, z17.s\n"
+ "add z18.s, z18.s, z14.s\n"
+ "mov z14.s, #0x0\n"
+ "add z22.s, z26.s, z22.s\n"
+ "add z26.s, z31.s, z24.s\n"
+ "sdot z14.s, z30.b, z0.b[2]\n"
+ "add z24.s, z24.s, z27.s\n"
+ "add z27.s, z29.s, z23.s\n"
+ "add z28.s, z20.s, z28.s\n"
+ "add z29.s, z17.s, z25.s\n"
"add z30.s, z20.s, z18.s\n"
- "add z31.s, z19.s, z16.s\n"
- "mul z30.s, p2/M, z30.s, z23.s\n"
- "mul z31.s, p2/M, z31.s, z23.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
- "zip1 z17.s, z28.s, z30.s\n"
- "zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z12.s\n"
- "add z25.s, z25.s, z12.s\n"
- "add z26.s, z26.s, z12.s\n"
- "add z27.s, z27.s, z12.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "add z30.s, z30.s, z12.s\n"
- "add z31.s, z31.s, z12.s\n"
+ "add z31.s, z17.s, z22.s\n"
+ "mul z26.s, p2/M, z26.s, z15.s\n"
+ "sdot z14.s, z21.b, z0.b[3]\n"
+ "mul z24.s, p2/M, z24.s, z15.s\n"
+ "mul z27.s, p2/M, z27.s, z15.s\n"
+ "mul z28.s, p2/M, z28.s, z15.s\n"
+ "mul z29.s, p2/M, z29.s, z15.s\n"
+ "mul z30.s, p2/M, z30.s, z15.s\n"
+ "mul z31.s, p2/M, z31.s, z15.s\n"
+ "add z25.s, z23.s, z14.s\n"
+ "zip1 z21.s, z24.s, z26.s\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z26.s, z26.s, z13.s\n"
+ "mul z25.s, p2/M, z25.s, z15.s\n"
+ "zip1 z22.s, z28.s, z30.s\n"
+ "add z28.s, z28.s, z13.s\n"
+ "zip1 z18.s, z29.s, z31.s\n"
+ "add z29.s, z29.s, z13.s\n"
+ "zip1 z14.s, z25.s, z27.s\n"
+ "add z25.s, z25.s, z13.s\n"
+ "add z27.s, z27.s, z13.s\n"
+ "add z30.s, z30.s, z13.s\n"
+ "zip1 z23.s, z22.s, z18.s\n"
+ "add z31.s, z31.s, z13.s\n"
+ "zip1 z22.s, z21.s, z14.s\n"
"1:" // Loop
"sdot z24.s, z8.b, z0.b[0]\n"
"sdot z25.s, z8.b, z0.b[2]\n"
- "ld1w { z12.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #7, MUL VL]\n"
"sdot z26.s, z8.b, z1.b[0]\n"
"sdot z27.s, z8.b, z1.b[2]\n"
"incb x9\n"
"whilelt p1.s, x28, %x[n_channels]\n"
+ "sdot z28.s, z8.b, z2.b[0]\n"
+ "sdot z29.s, z8.b, z2.b[2]\n"
+ "sdot z30.s, z8.b, z3.b[0]\n"
+ "sdot z31.s, z8.b, z3.b[2]\n"
+ "ld1b { z15.b }, p2/Z, [%x[params]]\n"
"sdot z24.s, z9.b, z0.b[1]\n"
"sdot z25.s, z9.b, z0.b[3]\n"
"whilelt p0.b, x9, x10\n"
"sdot z26.s, z9.b, z1.b[1]\n"
"sdot z27.s, z9.b, z1.b[3]\n"
- "sdot z28.s, z8.b, z2.b[0]\n"
- "sdot z29.s, z8.b, z2.b[2]\n"
- "sdot z30.s, z8.b, z3.b[0]\n"
- "sdot z31.s, z8.b, z3.b[2]\n"
- "ld1b { z17.b }, p2/Z, [%x[params]]\n"
- "sdot z24.s, z10.b, z1.b[0]\n"
- "sdot z25.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z27.s, z10.b, z2.b[2]\n"
"sdot z28.s, z9.b, z2.b[1]\n"
"sdot z29.s, z9.b, z2.b[3]\n"
"sdot z30.s, z9.b, z3.b[1]\n"
"sdot z31.s, z9.b, z3.b[3]\n"
- "ld1b { z16.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "sdot z24.s, z11.b, z1.b[1]\n"
- "sdot z25.s, z11.b, z1.b[3]\n"
- "sdot z26.s, z11.b, z2.b[1]\n"
- "sdot z27.s, z11.b, z2.b[3]\n"
+ "ld1b { z8.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "sdot z24.s, z10.b, z1.b[0]\n"
+ "sdot z25.s, z10.b, z1.b[2]\n"
+ "sdot z26.s, z10.b, z2.b[0]\n"
+ "sdot z27.s, z10.b, z2.b[2]\n"
"sdot z28.s, z10.b, z3.b[0]\n"
"sdot z29.s, z10.b, z3.b[2]\n"
"sdot z30.s, z10.b, z4.b[0]\n"
"sdot z31.s, z10.b, z4.b[2]\n"
- "ld1b { z19.b }, p2/Z, [%x[params], #2, MUL VL]\n"
- "sdot z24.s, z17.b, z2.b[0]\n"
- "sdot z25.s, z17.b, z2.b[2]\n"
- "sdot z26.s, z17.b, z3.b[0]\n"
- "sdot z27.s, z17.b, z3.b[2]\n"
+ "ld1b { z21.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "sdot z24.s, z11.b, z1.b[1]\n"
+ "sdot z25.s, z11.b, z1.b[3]\n"
+ "sdot z26.s, z11.b, z2.b[1]\n"
+ "sdot z27.s, z11.b, z2.b[3]\n"
"sdot z28.s, z11.b, z3.b[1]\n"
"sdot z29.s, z11.b, z3.b[3]\n"
"sdot z30.s, z11.b, z4.b[1]\n"
"sdot z31.s, z11.b, z4.b[3]\n"
"ld1b { z18.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "sdot z24.s, z16.b, z2.b[1]\n"
- "sdot z25.s, z16.b, z2.b[3]\n"
- "sdot z26.s, z16.b, z3.b[1]\n"
- "sdot z27.s, z16.b, z3.b[3]\n"
- "sdot z28.s, z17.b, z4.b[0]\n"
- "sdot z29.s, z17.b, z4.b[2]\n"
- "sdot z30.s, z17.b, z5.b[0]\n"
- "sdot z31.s, z17.b, z5.b[2]\n"
+ "sdot z24.s, z15.b, z2.b[0]\n"
+ "sdot z25.s, z15.b, z2.b[2]\n"
+ "sdot z26.s, z15.b, z3.b[0]\n"
+ "sdot z27.s, z15.b, z3.b[2]\n"
+ "sdot z28.s, z15.b, z4.b[0]\n"
+ "sdot z29.s, z15.b, z4.b[2]\n"
+ "sdot z30.s, z15.b, z5.b[0]\n"
+ "sdot z31.s, z15.b, z5.b[2]\n"
"ld1b { z17.b }, p2/Z, [%x[params], #4, MUL VL]\n"
- "sdot z24.s, z19.b, z3.b[0]\n"
- "sdot z25.s, z19.b, z3.b[2]\n"
- "sdot z26.s, z19.b, z4.b[0]\n"
- "sdot z27.s, z19.b, z4.b[2]\n"
- "sdot z28.s, z16.b, z4.b[1]\n"
- "sdot z29.s, z16.b, z4.b[3]\n"
- "sdot z30.s, z16.b, z5.b[1]\n"
- "sdot z31.s, z16.b, z5.b[3]\n"
- "ld1b { z16.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "sdot z24.s, z8.b, z2.b[1]\n"
+ "sdot z25.s, z8.b, z2.b[3]\n"
+ "sdot z26.s, z8.b, z3.b[1]\n"
+ "sdot z27.s, z8.b, z3.b[3]\n"
+ "sdot z28.s, z8.b, z4.b[1]\n"
+ "sdot z29.s, z8.b, z4.b[3]\n"
+ "sdot z30.s, z8.b, z5.b[1]\n"
+ "sdot z31.s, z8.b, z5.b[3]\n"
+ "ld1b { z9.b }, p2/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
+ "sdot z24.s, z21.b, z3.b[0]\n"
+ "sdot z25.s, z21.b, z3.b[2]\n"
+ "sdot z26.s, z21.b, z4.b[0]\n"
+ "sdot z27.s, z21.b, z4.b[2]\n"
+ "sdot z28.s, z21.b, z5.b[0]\n"
+ "sdot z29.s, z21.b, z5.b[2]\n"
+ "ld1w { z14.s }, p0/Z, [%x[params], #-8, MUL VL]\n"
+ "sdot z30.s, z21.b, z6.b[0]\n"
+ "sdot z31.s, z21.b, z6.b[2]\n"
+ "ld1b { z10.b }, p0/Z, [%x[params], #-5, MUL VL]\n"
"sdot z24.s, z18.b, z3.b[1]\n"
"sdot z25.s, z18.b, z3.b[3]\n"
- "ld1w { z20.s }, p0/Z, [%x[params], #-8, MUL VL]\n"
"sdot z26.s, z18.b, z4.b[1]\n"
"sdot z27.s, z18.b, z4.b[3]\n"
- "sdot z28.s, z19.b, z5.b[0]\n"
- "sdot z29.s, z19.b, z5.b[2]\n"
- "sdot z30.s, z19.b, z6.b[0]\n"
- "sdot z31.s, z19.b, z6.b[2]\n"
- "ld1b { z10.b }, p0/Z, [%x[params], #-5, MUL VL]\n"
- "sdot z24.s, z17.b, z4.b[0]\n"
- "sdot z25.s, z17.b, z4.b[2]\n"
- "sdot z26.s, z17.b, z5.b[0]\n"
- "sdot z27.s, z17.b, z5.b[2]\n"
"sdot z28.s, z18.b, z5.b[1]\n"
"sdot z29.s, z18.b, z5.b[3]\n"
"sdot z30.s, z18.b, z6.b[1]\n"
"sdot z31.s, z18.b, z6.b[3]\n"
"ld1b { z11.b }, p0/Z, [%x[params], #-4, MUL VL]\n"
- "sdot z24.s, z16.b, z4.b[1]\n"
- "sdot z25.s, z16.b, z4.b[3]\n"
- ".inst 0x04ac7718 // sqrdmulh z24.s, z24.s, z12.s\n"
- "sdot z26.s, z16.b, z5.b[1]\n"
- "sdot z27.s, z16.b, z5.b[3]\n"
- ".inst 0x04ac7739 // sqrdmulh z25.s, z25.s, z12.s\n"
+ "sdot z24.s, z17.b, z4.b[0]\n"
+ "sdot z25.s, z17.b, z4.b[2]\n"
+ "sdot z26.s, z17.b, z5.b[0]\n"
+ "sdot z27.s, z17.b, z5.b[2]\n"
"sdot z28.s, z17.b, z6.b[0]\n"
"sdot z29.s, z17.b, z6.b[2]\n"
- ".inst 0x04ac775a // sqrdmulh z26.s, z26.s, z12.s\n"
"sdot z30.s, z17.b, z7.b[0]\n"
"sdot z31.s, z17.b, z7.b[2]\n"
- ".inst 0x04ac777b // sqrdmulh z27.s, z27.s, z12.s\n"
"ld1b { z8.b }, p0/Z, [%x[params], #-7, MUL VL]\n"
- "sdot z28.s, z16.b, z6.b[1]\n"
- "sdot z29.s, z16.b, z6.b[3]\n"
- "and z19.d, z24.d, z21.d\n"
- "sdot z30.s, z16.b, z7.b[1]\n"
- "sdot z31.s, z16.b, z7.b[3]\n"
- "and z18.d, z25.d, z21.d\n"
+ "sdot z24.s, z9.b, z4.b[1]\n"
+ "sdot z25.s, z9.b, z4.b[3]\n"
+ "sdot z26.s, z9.b, z5.b[1]\n"
+ "sdot z27.s, z9.b, z5.b[3]\n"
+ "sdot z28.s, z9.b, z6.b[1]\n"
+ "sdot z29.s, z9.b, z6.b[3]\n"
+ "sdot z30.s, z9.b, z7.b[1]\n"
+ "sdot z31.s, z9.b, z7.b[3]\n"
"ld1b { z9.b }, p0/Z, [%x[params], #-6, MUL VL]\n"
- "and z17.d, z26.d, z21.d\n"
- "and z16.d, z27.d, z21.d\n"
"addvl %x[params], %x[params], #-3\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04b47718 // sqrdmulh z24.s, z24.s, z20.s\n"
+ ".inst 0x04b47739 // sqrdmulh z25.s, z25.s, z20.s\n"
+ ".inst 0x04b4775a // sqrdmulh z26.s, z26.s, z20.s\n"
+ ".inst 0x04b4777b // sqrdmulh z27.s, z27.s, z20.s\n"
+ ".inst 0x04b4779c // sqrdmulh z28.s, z28.s, z20.s\n"
+ ".inst 0x04b477bd // sqrdmulh z29.s, z29.s, z20.s\n"
+ "and z17.d, z24.d, z13.d\n"
+ "and z18.d, z25.d, z13.d\n"
+ "and z15.d, z26.d, z13.d\n"
+ "and z21.d, z27.d, z13.d\n"
+ ".inst 0x04b477de // sqrdmulh z30.s, z30.s, z20.s\n"
+ ".inst 0x04b477ff // sqrdmulh z31.s, z31.s, z20.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- ".inst 0x04ac779c // sqrdmulh z28.s, z28.s, z12.s\n"
- ".inst 0x04ac77bd // sqrdmulh z29.s, z29.s, z12.s\n"
- ".inst 0x04ac77de // sqrdmulh z30.s, z30.s, z12.s\n"
- ".inst 0x04ac77ff // sqrdmulh z31.s, z31.s, z12.s\n"
- "sqadd z24.s, z24.s, z19.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "asr z21.s, z21.s, #0x1f\n"
+ "sqadd z24.s, z24.s, z17.s\n"
+ "and z20.d, z28.d, z13.d\n"
"sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
- ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
- "sqadd z26.s, z26.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
- ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
- "and z19.d, z28.d, z21.d\n"
- "and z18.d, z29.d, z21.d\n"
- "and z17.d, z30.d, z21.d\n"
- "and z16.d, z31.d, z21.d\n"
- "asr z19.s, z19.s, #0x1f\n"
+ "and z18.d, z29.d, z13.d\n"
+ "sqadd z26.s, z26.s, z15.s\n"
+ "sqadd z27.s, z27.s, z21.s\n"
+ "and z17.d, z30.d, z13.d\n"
+ "and z15.d, z31.d, z13.d\n"
+ ".inst 0x448289b8 // srshl z24.s, p2/M, z24.s, z13.s\n"
+ "asr z20.s, z20.s, #0x1f\n"
"asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x448289b9 // srshl z25.s, p2/M, z25.s, z13.s\n"
+ ".inst 0x448289ba // srshl z26.s, p2/M, z26.s, z13.s\n"
+ ".inst 0x448289bb // srshl z27.s, p2/M, z27.s, z13.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- "sqadd z28.s, z28.s, z19.s\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "add z24.s, z24.s, z16.s\n"
+ "sqadd z28.s, z28.s, z20.s\n"
"sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
- ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "add z25.s, z25.s, z16.s\n"
"sqadd z30.s, z30.s, z17.s\n"
- "sqadd z31.s, z31.s, z16.s\n"
- ".inst 0x44828abe // srshl z30.s, p2/M, z30.s, z21.s\n"
- ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
- "add z24.s, z24.s, z13.s\n"
- "add z25.s, z25.s, z13.s\n"
- "smin z24.s, p2/M, z24.s, z15.s\n"
- "smin z25.s, p2/M, z25.s, z15.s\n"
- "add z26.s, z26.s, z13.s\n"
- "add z27.s, z27.s, z13.s\n"
- "smin z26.s, p2/M, z26.s, z15.s\n"
- "smin z27.s, p2/M, z27.s, z15.s\n"
- "add z28.s, z28.s, z13.s\n"
- "add z29.s, z29.s, z13.s\n"
- "smin z28.s, p2/M, z28.s, z15.s\n"
- "smin z29.s, p2/M, z29.s, z15.s\n"
- "add z30.s, z30.s, z13.s\n"
- "add z31.s, z31.s, z13.s\n"
- "smin z30.s, p2/M, z30.s, z15.s\n"
- "smin z31.s, p2/M, z31.s, z15.s\n"
- "smax z24.s, p2/M, z24.s, z14.s\n"
- "smax z25.s, p2/M, z25.s, z14.s\n"
+ "sqadd z31.s, z31.s, z15.s\n"
+ ".inst 0x448289bc // srshl z28.s, p2/M, z28.s, z13.s\n"
+ "add z26.s, z26.s, z16.s\n"
+ "add z27.s, z27.s, z16.s\n"
+ "smin z24.s, p2/M, z24.s, z19.s\n"
+ ".inst 0x448289bd // srshl z29.s, p2/M, z29.s, z13.s\n"
+ "smin z25.s, p2/M, z25.s, z19.s\n"
+ ".inst 0x448289be // srshl z30.s, p2/M, z30.s, z13.s\n"
+ ".inst 0x448289bf // srshl z31.s, p2/M, z31.s, z13.s\n"
+ "add z28.s, z28.s, z16.s\n"
+ "add z29.s, z29.s, z16.s\n"
+ "smin z26.s, p2/M, z26.s, z19.s\n"
+ "smin z27.s, p2/M, z27.s, z19.s\n"
+ "smax z24.s, p2/M, z24.s, z12.s\n"
+ "add z30.s, z30.s, z16.s\n"
+ "smax z25.s, p2/M, z25.s, z12.s\n"
+ "add z31.s, z31.s, z16.s\n"
+ "smin z28.s, p2/M, z28.s, z19.s\n"
+ "smin z29.s, p2/M, z29.s, z19.s\n"
+ "smax z26.s, p2/M, z26.s, z12.s\n"
+ "smin z30.s, p2/M, z30.s, z19.s\n"
+ "smax z27.s, p2/M, z27.s, z12.s\n"
"st1b { z24.s }, p1, [x27, x28]\n"
"mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z14.s\n"
- "smax z27.s, p2/M, z27.s, z14.s\n"
+ "smin z31.s, p2/M, z31.s, z19.s\n"
+ "smax z28.s, p2/M, z28.s, z12.s\n"
"st1b { z25.s }, p1, [x26, x28]\n"
"mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z14.s\n"
- "smax z29.s, p2/M, z29.s, z14.s\n"
+ "smax z29.s, p2/M, z29.s, z12.s\n"
"st1b { z26.s }, p1, [x25, x28]\n"
"mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z14.s\n"
- "smax z31.s, p2/M, z31.s, z14.s\n"
+ "add z24.s, z24.s, z14.s\n"
+ "smax z30.s, p2/M, z30.s, z12.s\n"
"st1b { z27.s }, p1, [x24, x28]\n"
"mov z27.s, z22.s[3]\n"
+ "add z25.s, z25.s, z14.s\n"
+ "smax z31.s, p2/M, z31.s, z12.s\n"
"st1b { z28.s }, p1, [x23, x28]\n"
"mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z20.s\n"
+ "add z26.s, z26.s, z14.s\n"
"st1b { z29.s }, p1, [x22, x28]\n"
"mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z20.s\n"
+ "add z27.s, z27.s, z14.s\n"
"st1b { z30.s }, p1, [x21, x28]\n"
"mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z20.s\n"
+ "add z28.s, z28.s, z14.s\n"
"st1b { z31.s }, p1, [x20, x28]\n"
"mov z31.s, z23.s[3]\n"
"incw x28\n"
- "add z27.s, z27.s, z20.s\n"
- "add z28.s, z28.s, z20.s\n"
- "add z29.s, z29.s, z20.s\n"
- "add z30.s, z30.s, z20.s\n"
- "add z31.s, z31.s, z20.s\n"
+ "add z29.s, z29.s, z14.s\n"
+ "add z30.s, z30.s, z14.s\n"
+ "add z31.s, z31.s, z14.s\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_output_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index d9c8644fc4..0d0f3d76f9 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,400 +33,400 @@ namespace depthwise {
void sve_s8qs_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const unsigned int n_channels, const int8_t *const *const inptrs, const int8_t *params, const int32_t *, const arm_gemm::Requantize32& qp, const int32_t *, const int32_t *, int8_t *const *const outptrs)
{
__asm__ __volatile__(
- "mov x13, #0x0\n"
- "whilelt p0.b, x13, %x[n_channels]\n"
+ "mov x14, #0x0\n"
"ldp x27, x26, [%x[inptrs], #0x0]\n"
"ldp x25, x24, [%x[inptrs], #0x10]\n"
+ "ptrue p2.b\n"
"ldp x23, x22, [%x[inptrs], #0x20]\n"
"ldp x21, x20, [%x[inptrs], #0x30]\n"
- "ptrue p2.b\n"
- "mov x12, #0x0\n"
- "ldp x11, x10, [%x[outptrs], #0x0]\n"
- "ldp x9, x28, [%x[outptrs], #0x10]\n"
- "ld1b { z15.b }, p0/Z, [x27, x13]\n"
- "ld1b { z18.b }, p0/Z, [x26, x13]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [%x[outptrs], #0x0]\n"
+ "ldp x10, x9, [%x[outptrs], #0x10]\n"
+ "whilelt p0.b, x14, %x[n_channels]\n"
+ "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z30.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1b { z3.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "ld1b { z12.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z17.b }, p0/Z, [x26, x14]\n"
"ldp x27, x26, [%x[inptrs], #0x40]\n"
- "ld1b { z16.b }, p0/Z, [x25, x13]\n"
- "zip2 z17.b, z15.b, z16.b\n"
- "zip1 z15.b, z15.b, z16.b\n"
- "ld1b { z14.b }, p0/Z, [x24, x13]\n"
+ "ld1b { z16.b }, p0/Z, [x25, x14]\n"
+ "ld1b { z15.b }, p0/Z, [x24, x14]\n"
"ldp x25, x24, [%x[inptrs], #0x50]\n"
- "zip1 z16.b, z18.b, z14.b\n"
- "zip2 z14.b, z18.b, z14.b\n"
- "ld1b { z13.b }, p0/Z, [x23, x13]\n"
- "ld1b { z18.b }, p0/Z, [x22, x13]\n"
- "zip2 z12.b, z15.b, z16.b\n"
- "zip1 z15.b, z15.b, z16.b\n"
+ "ld1b { z10.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z24.b }, p0/Z, [x22, x14]\n"
"ldp x23, x22, [%x[inptrs], #0x60]\n"
- "ld1b { z16.b }, p0/Z, [x21, x13]\n"
- "zip1 z11.b, z17.b, z14.b\n"
- "zip2 z14.b, z17.b, z14.b\n"
- "ld1b { z10.b }, p0/Z, [x20, x13]\n"
+ "ld1b { z19.b }, p0/Z, [x21, x14]\n"
+ "zip2 z18.b, z12.b, z16.b\n"
+ "zip1 z12.b, z12.b, z16.b\n"
+ "ld1b { z8.b }, p0/Z, [x20, x14]\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "zip2 z22.b, z13.b, z16.b\n"
- "zip1 z13.b, z13.b, z16.b\n"
- "ld1b { z9.b }, p0/Z, [x27, x13]\n"
- "ld1b { z17.b }, p0/Z, [x26, x13]\n"
- "zip1 z21.b, z18.b, z10.b\n"
- "zip2 z10.b, z18.b, z10.b\n"
- "ld1b { z16.b }, p0/Z, [x25, x13]\n"
- "ld1b { z8.b }, p0/Z, [x24, x13]\n"
- "zip2 z20.b, z9.b, z16.b\n"
- "zip1 z9.b, z9.b, z16.b\n"
- "ld1b { z7.b }, p0/Z, [x23, x13]\n"
- "ld1b { z19.b }, p0/Z, [x22, x13]\n"
- "zip1 z18.b, z17.b, z8.b\n"
- "zip2 z8.b, z17.b, z8.b\n"
- "ld1b { z16.b }, p0/Z, [x21, x13]\n"
- "ld1b { z6.b }, p0/Z, [x20, x13]\n"
- "zip2 z17.b, z7.b, z16.b\n"
- "zip1 z7.b, z7.b, z16.b\n"
- "zip1 z16.b, z19.b, z6.b\n"
- "zip2 z6.b, z19.b, z6.b\n"
- "ld1w { z5.s }, p2/Z, [%x[params]]\n"
- "ld1rw { z4.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z3.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1rw { z2.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "zip2 z1.b, z13.b, z21.b\n"
- "zip1 z13.b, z13.b, z21.b\n"
- "ldp x27, x26, [%x[inptrs], #0x0]\n"
- "ldp x25, x23, [%x[inptrs], #0x10]\n"
- "zip1 z0.b, z22.b, z10.b\n"
- "zip2 z10.b, z22.b, z10.b\n"
+ "zip1 z16.b, z17.b, z15.b\n"
+ "zip2 z15.b, z17.b, z15.b\n"
+ "ld1b { z2.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z23.b }, p0/Z, [x26, x14]\n"
+ "ld1b { z17.b }, p0/Z, [x25, x14]\n"
+ "ld1b { z7.b }, p0/Z, [x24, x14]\n"
+ "zip2 z22.b, z10.b, z19.b\n"
+ "zip1 z10.b, z10.b, z19.b\n"
+ "ld1b { z4.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z21.b }, p0/Z, [x22, x14]\n"
+ "zip2 z5.b, z12.b, z16.b\n"
+ "zip1 z12.b, z12.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z6.b }, p0/Z, [x20, x14]\n"
+ "zip1 z9.b, z18.b, z15.b\n"
+ "zip2 z15.b, z18.b, z15.b\n"
+ "zip1 z20.b, z24.b, z8.b\n"
+ "zip2 z8.b, z24.b, z8.b\n"
+ "ld1w { z13.s }, p2/Z, [%x[params]]\n"
+ "ldp x28, x27, [%x[inptrs], #0x0]\n"
+ "zip2 z19.b, z2.b, z17.b\n"
+ "zip1 z2.b, z2.b, z17.b\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
"ldp x24, x22, [%x[inptrs], #0x20]\n"
+ "zip1 z18.b, z23.b, z7.b\n"
+ "zip2 z7.b, z23.b, z7.b\n"
"ldp x21, x20, [%x[inptrs], #0x30]\n"
- "zip2 z31.b, z9.b, z18.b\n"
- "zip1 z9.b, z9.b, z18.b\n"
- "zip1 z30.b, z20.b, z8.b\n"
- "zip2 z8.b, z20.b, z8.b\n"
- "ld1b { z29.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "ld1b { z28.b }, p2/Z, [%x[params], #2, MUL VL]\n"
- "zip2 z27.b, z7.b, z16.b\n"
- "zip1 z7.b, z7.b, z16.b\n"
- "ld1b { z26.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "zip2 z17.b, z4.b, z16.b\n"
+ "zip1 z4.b, z4.b, z16.b\n"
+ "ld1b { z1.b }, p2/Z, [%x[params], #3, MUL VL]\n"
"addvl %x[params], %x[params], #4\n"
- "zip1 z25.b, z17.b, z6.b\n"
+ "zip1 z16.b, z21.b, z6.b\n"
+ "zip2 z6.b, z21.b, z6.b\n"
+ "zip2 z31.b, z10.b, z20.b\n"
+ "zip1 z10.b, z10.b, z20.b\n"
+ "zip1 z26.b, z22.b, z8.b\n"
+ "zip2 z8.b, z22.b, z8.b\n"
+ "zip2 z25.b, z2.b, z18.b\n"
+ "zip1 z2.b, z2.b, z18.b\n"
+ "zip1 z28.b, z19.b, z7.b\n"
+ "zip2 z7.b, z19.b, z7.b\n"
+ "zip2 z27.b, z4.b, z16.b\n"
+ "zip1 z4.b, z4.b, z16.b\n"
+ "zip1 z29.b, z17.b, z6.b\n"
"zip2 z6.b, z17.b, z6.b\n"
- "mov z24.d, z5.d\n"
- "mov z22.d, z5.d\n"
- "mov z21.d, z5.d\n"
+ "mov z21.d, z13.d\n"
+ "mov z20.d, z13.d\n"
+ "mov z23.d, z13.d\n"
"1:" // Loop
- "sdot z5.s, z29.b, z15.b\n"
- "sdot z22.s, z29.b, z13.b\n"
- "ext z15.b, z15.b, z15.b, #0x1\n"
- "whilelt p0.s, x12, %x[n_channels]\n"
- "sdot z5.s, z28.b, z13.b\n"
- "ext z13.b, z13.b, z13.b, #0x1\n"
- "sdot z24.s, z29.b, z15.b\n"
+ "sdot z13.s, z3.b, z12.b\n"
+ "sdot z20.s, z3.b, z10.b\n"
+ "ext z12.b, z12.b, z12.b, #0x1\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
+ "incw x14, ALL, MUL #4\n"
+ "sdot z21.s, z3.b, z12.b\n"
"ld1w { z17.s }, p2/Z, [%x[params]]\n"
- "sdot z21.s, z29.b, z13.b\n"
- "sdot z22.s, z28.b, z9.b\n"
- "incw x13, ALL, MUL #4\n"
- "sdot z5.s, z26.b, z9.b\n"
- "ext z9.b, z9.b, z9.b, #0x1\n"
- "sdot z24.s, z28.b, z13.b\n"
- "ld1w { z20.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "sdot z21.s, z28.b, z9.b\n"
- "sdot z22.s, z26.b, z7.b\n"
- "ext z7.b, z7.b, z7.b, #0x1\n"
- ".inst 0x04b174a5 // sqrdmulh z5.s, z5.s, z17.s\n"
- "sdot z24.s, z26.b, z9.b\n"
- "sdot z21.s, z26.b, z7.b\n"
- "and z16.d, z5.d, z20.d\n"
+ "sdot z13.s, z0.b, z10.b\n"
+ "ext z10.b, z10.b, z10.b, #0x1\n"
+ "sdot z20.s, z0.b, z2.b\n"
+ "sdot z23.s, z3.b, z10.b\n"
+ "sdot z13.s, z1.b, z2.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "sdot z21.s, z0.b, z10.b\n"
+ "ld1w { z22.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "sdot z23.s, z0.b, z2.b\n"
+ "sdot z20.s, z1.b, z4.b\n"
+ "ext z4.b, z4.b, z4.b, #0x1\n"
+ ".inst 0x04b175ad // sqrdmulh z13.s, z13.s, z17.s\n"
+ "sdot z21.s, z1.b, z2.b\n"
+ "sdot z23.s, z1.b, z4.b\n"
+ "and z16.d, z13.d, z22.d\n"
+ ".inst 0x04b17694 // sqrdmulh z20.s, z20.s, z17.s\n"
"asr z16.s, z16.s, #0x1f\n"
- ".inst 0x04b17718 // sqrdmulh z24.s, z24.s, z17.s\n"
- ".inst 0x04b176d6 // sqrdmulh z22.s, z22.s, z17.s\n"
".inst 0x04b176b5 // sqrdmulh z21.s, z21.s, z17.s\n"
- "sqadd z5.s, z5.s, z16.s\n"
- ".inst 0x44828a85 // srshl z5.s, p2/M, z5.s, z20.s\n"
+ ".inst 0x04b176f7 // sqrdmulh z23.s, z23.s, z17.s\n"
"ld1w { z19.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "and z18.d, z24.d, z20.d\n"
- "and z17.d, z22.d, z20.d\n"
- "and z16.d, z21.d, z20.d\n"
+ "and z18.d, z20.d, z22.d\n"
+ "sqadd z13.s, z13.s, z16.s\n"
+ "and z17.d, z21.d, z22.d\n"
+ "and z16.d, z23.d, z22.d\n"
"asr z18.s, z18.s, #0x1f\n"
"asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44828acd // srshl z13.s, p2/M, z13.s, z22.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z24.s, z24.s, z18.s\n"
- "sqadd z22.s, z22.s, z17.s\n"
- ".inst 0x44828a98 // srshl z24.s, p2/M, z24.s, z20.s\n"
- ".inst 0x44828a96 // srshl z22.s, p2/M, z22.s, z20.s\n"
- "sqadd z21.s, z21.s, z16.s\n"
- "add z5.s, z5.s, z2.s\n"
- ".inst 0x44828a95 // srshl z21.s, p2/M, z21.s, z20.s\n"
- "smax z5.s, p2/M, z5.s, z4.s\n"
- "add z24.s, z24.s, z2.s\n"
- "add z22.s, z22.s, z2.s\n"
- "smin z5.s, p2/M, z5.s, z3.s\n"
- "smax z24.s, p2/M, z24.s, z4.s\n"
- "add z21.s, z21.s, z2.s\n"
- "smax z22.s, p2/M, z22.s, z4.s\n"
- "smax z21.s, p2/M, z21.s, z4.s\n"
- "st1b { z5.s }, p0, [x11, x12]\n"
- "ld1w { z23.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z18.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "smin z24.s, p2/M, z24.s, z3.s\n"
- "smin z22.s, p2/M, z22.s, z3.s\n"
- "smin z21.s, p2/M, z21.s, z3.s\n"
- "st1b { z24.s }, p0, [x10, x12]\n"
- "mov z24.d, z23.d\n"
+ "sqadd z20.s, z20.s, z18.s\n"
+ "ld1b { z18.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "sqadd z21.s, z21.s, z17.s\n"
"ld1b { z17.b }, p2/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z22.s }, p0, [x9, x12]\n"
- "mov z22.d, z23.d\n"
- "sdot z22.s, z18.b, z1.b\n"
- "ld1b { z16.b }, p2/Z, [%x[params], #5, MUL VL]\n"
- "st1b { z21.s }, p0, [x28, x12]\n"
- "mov z21.d, z23.d\n"
- "sdot z23.s, z18.b, z12.b\n"
- "sdot z23.s, z17.b, z1.b\n"
- "ext z12.b, z12.b, z12.b, #0x1\n"
- "ext z1.b, z1.b, z1.b, #0x1\n"
- "sdot z24.s, z18.b, z12.b\n"
- "ld1w { z20.s }, p2/Z, [%x[params], #7, MUL VL]\n"
- "sdot z21.s, z18.b, z1.b\n"
- "sdot z22.s, z17.b, z31.b\n"
- "incw x12\n"
- "whilelt p0.s, x12, %x[n_channels]\n"
- "sdot z23.s, z16.b, z31.b\n"
- "ext z31.b, z31.b, z31.b, #0x1\n"
- "sdot z24.s, z17.b, z1.b\n"
+ "sqadd z23.s, z23.s, z16.s\n"
+ "ld1b { z16.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "add z13.s, z13.s, z30.s\n"
+ ".inst 0x44828ad5 // srshl z21.s, p2/M, z21.s, z22.s\n"
+ ".inst 0x44828ad4 // srshl z20.s, p2/M, z20.s, z22.s\n"
+ ".inst 0x44828ad7 // srshl z23.s, p2/M, z23.s, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "smax z13.s, p2/M, z13.s, z11.s\n"
+ "add z21.s, z21.s, z30.s\n"
+ "add z20.s, z20.s, z30.s\n"
+ "add z23.s, z23.s, z30.s\n"
+ "smin z13.s, p2/M, z13.s, z14.s\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "smax z20.s, p2/M, z20.s, z11.s\n"
+ "smax z23.s, p2/M, z23.s, z11.s\n"
+ "st1b { z13.s }, p0, [x12, x13]\n"
+ "ld1w { z24.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "sdot z21.s, z17.b, z31.b\n"
- "sdot z22.s, z16.b, z27.b\n"
+ "smin z21.s, p2/M, z21.s, z14.s\n"
+ "smin z20.s, p2/M, z20.s, z14.s\n"
+ "smin z23.s, p2/M, z23.s, z14.s\n"
+ "st1b { z21.s }, p0, [x11, x13]\n"
+ "mov z13.d, z24.d\n"
+ "st1b { z20.s }, p0, [x10, x13]\n"
+ "mov z21.d, z24.d\n"
+ "st1b { z23.s }, p0, [x9, x13]\n"
+ "mov z20.d, z24.d\n"
+ "sdot z24.s, z16.b, z5.b\n"
+ "incw x13\n"
+ "sdot z21.s, z16.b, z31.b\n"
+ "ext z5.b, z5.b, z5.b, #0x1\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
+ "sdot z24.s, z17.b, z31.b\n"
+ "ext z31.b, z31.b, z31.b, #0x1\n"
+ "sdot z13.s, z16.b, z5.b\n"
+ "sdot z20.s, z16.b, z31.b\n"
+ "sdot z21.s, z17.b, z25.b\n"
+ "sdot z24.s, z18.b, z25.b\n"
+ "ext z25.b, z25.b, z25.b, #0x1\n"
+ "sdot z13.s, z17.b, z31.b\n"
+ "sdot z20.s, z17.b, z25.b\n"
+ "sdot z21.s, z18.b, z27.b\n"
"ext z27.b, z27.b, z27.b, #0x1\n"
- ".inst 0x04b376f7 // sqrdmulh z23.s, z23.s, z19.s\n"
- "sdot z24.s, z16.b, z31.b\n"
- "sdot z21.s, z16.b, z27.b\n"
- "and z16.d, z23.d, z20.d\n"
- "asr z16.s, z16.s, #0x1f\n"
".inst 0x04b37718 // sqrdmulh z24.s, z24.s, z19.s\n"
- ".inst 0x04b376d6 // sqrdmulh z22.s, z22.s, z19.s\n"
+ "sdot z13.s, z18.b, z25.b\n"
+ "sdot z20.s, z18.b, z27.b\n"
+ "and z16.d, z24.d, z22.d\n"
".inst 0x04b376b5 // sqrdmulh z21.s, z21.s, z19.s\n"
- "sqadd z23.s, z23.s, z16.s\n"
- ".inst 0x44828a97 // srshl z23.s, p2/M, z23.s, z20.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
+ ".inst 0x04b37694 // sqrdmulh z20.s, z20.s, z19.s\n"
"ld1w { z19.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
- "and z18.d, z24.d, z20.d\n"
- "and z17.d, z22.d, z20.d\n"
- "and z16.d, z21.d, z20.d\n"
+ "and z18.d, z21.d, z22.d\n"
+ "sqadd z24.s, z24.s, z16.s\n"
+ "and z17.d, z13.d, z22.d\n"
+ "and z16.d, z20.d, z22.d\n"
"asr z18.s, z18.s, #0x1f\n"
"asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44828ad8 // srshl z24.s, p2/M, z24.s, z22.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z24.s, z24.s, z18.s\n"
- "sqadd z22.s, z22.s, z17.s\n"
- ".inst 0x44828a98 // srshl z24.s, p2/M, z24.s, z20.s\n"
- ".inst 0x44828a96 // srshl z22.s, p2/M, z22.s, z20.s\n"
- "sqadd z21.s, z21.s, z16.s\n"
- "add z23.s, z23.s, z2.s\n"
- ".inst 0x44828a95 // srshl z21.s, p2/M, z21.s, z20.s\n"
- "smax z23.s, p2/M, z23.s, z4.s\n"
- "add z24.s, z24.s, z2.s\n"
- "add z22.s, z22.s, z2.s\n"
- "smin z23.s, p2/M, z23.s, z3.s\n"
- "smax z24.s, p2/M, z24.s, z4.s\n"
- "add z21.s, z21.s, z2.s\n"
- "smax z22.s, p2/M, z22.s, z4.s\n"
- "smax z21.s, p2/M, z21.s, z4.s\n"
- "st1b { z23.s }, p0, [x11, x12]\n"
- "ld1w { z23.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
- "ld1b { z18.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
- "smin z24.s, p2/M, z24.s, z3.s\n"
- "smin z22.s, p2/M, z22.s, z3.s\n"
- "smin z21.s, p2/M, z21.s, z3.s\n"
- "st1b { z24.s }, p0, [x10, x12]\n"
- "mov z24.d, z23.d\n"
+ "sqadd z21.s, z21.s, z18.s\n"
+ "ld1b { z18.b }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "sqadd z13.s, z13.s, z17.s\n"
"ld1b { z17.b }, p2/Z, [%x[params], #-6, MUL VL]\n"
- "st1b { z22.s }, p0, [x9, x12]\n"
- "mov z22.d, z23.d\n"
- "sdot z22.s, z18.b, z0.b\n"
- "ld1b { z16.b }, p2/Z, [%x[params], #-5, MUL VL]\n"
- "st1b { z21.s }, p0, [x28, x12]\n"
- "mov z21.d, z23.d\n"
- "sdot z23.s, z18.b, z11.b\n"
- "sdot z23.s, z17.b, z0.b\n"
- "ext z11.b, z11.b, z11.b, #0x1\n"
- "ext z0.b, z0.b, z0.b, #0x1\n"
- "sdot z24.s, z18.b, z11.b\n"
- "ld1w { z20.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
- "sdot z21.s, z18.b, z0.b\n"
- "sdot z22.s, z17.b, z30.b\n"
- "incw x12\n"
- "whilelt p0.s, x12, %x[n_channels]\n"
- "sdot z23.s, z16.b, z30.b\n"
- "ext z30.b, z30.b, z30.b, #0x1\n"
- "sdot z24.s, z17.b, z0.b\n"
- "sdot z21.s, z17.b, z30.b\n"
- "sdot z22.s, z16.b, z25.b\n"
- "ext z25.b, z25.b, z25.b, #0x1\n"
- ".inst 0x04b376f7 // sqrdmulh z23.s, z23.s, z19.s\n"
- "sdot z24.s, z16.b, z30.b\n"
- "sdot z21.s, z16.b, z25.b\n"
- "and z16.d, z23.d, z20.d\n"
- "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z20.s, z20.s, z16.s\n"
+ "ld1b { z16.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ "add z24.s, z24.s, z30.s\n"
+ ".inst 0x44828acd // srshl z13.s, p2/M, z13.s, z22.s\n"
+ ".inst 0x44828ad5 // srshl z21.s, p2/M, z21.s, z22.s\n"
+ ".inst 0x44828ad4 // srshl z20.s, p2/M, z20.s, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "add z13.s, z13.s, z30.s\n"
+ "add z21.s, z21.s, z30.s\n"
+ "add z20.s, z20.s, z30.s\n"
+ "smin z24.s, p2/M, z24.s, z14.s\n"
+ "smax z13.s, p2/M, z13.s, z11.s\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "smax z20.s, p2/M, z20.s, z11.s\n"
+ "st1b { z24.s }, p0, [x12, x13]\n"
+ "ld1w { z24.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "smin z13.s, p2/M, z13.s, z14.s\n"
+ "smin z21.s, p2/M, z21.s, z14.s\n"
+ "smin z20.s, p2/M, z20.s, z14.s\n"
+ "st1b { z13.s }, p0, [x11, x13]\n"
+ "mov z23.d, z24.d\n"
+ "st1b { z21.s }, p0, [x10, x13]\n"
+ "mov z21.d, z24.d\n"
+ "st1b { z20.s }, p0, [x9, x13]\n"
+ "mov z20.d, z24.d\n"
+ "sdot z24.s, z16.b, z9.b\n"
+ "incw x13\n"
+ "sdot z21.s, z16.b, z26.b\n"
+ "ext z9.b, z9.b, z9.b, #0x1\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
+ "sdot z24.s, z17.b, z26.b\n"
+ "ext z26.b, z26.b, z26.b, #0x1\n"
+ "sdot z23.s, z16.b, z9.b\n"
+ "sdot z20.s, z16.b, z26.b\n"
+ "sdot z21.s, z17.b, z28.b\n"
+ "sdot z24.s, z18.b, z28.b\n"
+ "ext z28.b, z28.b, z28.b, #0x1\n"
+ "sdot z23.s, z17.b, z26.b\n"
+ "sdot z20.s, z17.b, z28.b\n"
+ "sdot z21.s, z18.b, z29.b\n"
+ "ext z29.b, z29.b, z29.b, #0x1\n"
".inst 0x04b37718 // sqrdmulh z24.s, z24.s, z19.s\n"
- ".inst 0x04b376d6 // sqrdmulh z22.s, z22.s, z19.s\n"
+ "sdot z23.s, z18.b, z28.b\n"
+ "sdot z20.s, z18.b, z29.b\n"
+ "and z16.d, z24.d, z22.d\n"
".inst 0x04b376b5 // sqrdmulh z21.s, z21.s, z19.s\n"
- "sqadd z23.s, z23.s, z16.s\n"
- ".inst 0x44828a97 // srshl z23.s, p2/M, z23.s, z20.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ ".inst 0x04b376f7 // sqrdmulh z23.s, z23.s, z19.s\n"
+ ".inst 0x04b37694 // sqrdmulh z20.s, z20.s, z19.s\n"
"ld1w { z19.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "and z18.d, z24.d, z20.d\n"
- "and z17.d, z22.d, z20.d\n"
- "and z16.d, z21.d, z20.d\n"
+ "and z18.d, z21.d, z22.d\n"
+ "sqadd z24.s, z24.s, z16.s\n"
+ "and z17.d, z23.d, z22.d\n"
+ "and z16.d, z20.d, z22.d\n"
"asr z18.s, z18.s, #0x1f\n"
"asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44828ad8 // srshl z24.s, p2/M, z24.s, z22.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z24.s, z24.s, z18.s\n"
- "sqadd z22.s, z22.s, z17.s\n"
- ".inst 0x44828a98 // srshl z24.s, p2/M, z24.s, z20.s\n"
- ".inst 0x44828a96 // srshl z22.s, p2/M, z22.s, z20.s\n"
- "sqadd z21.s, z21.s, z16.s\n"
- "add z23.s, z23.s, z2.s\n"
- ".inst 0x44828a95 // srshl z21.s, p2/M, z21.s, z20.s\n"
- "smax z23.s, p2/M, z23.s, z4.s\n"
- "add z24.s, z24.s, z2.s\n"
- "add z22.s, z22.s, z2.s\n"
- "smin z23.s, p2/M, z23.s, z3.s\n"
- "smax z24.s, p2/M, z24.s, z4.s\n"
- "add z21.s, z21.s, z2.s\n"
- "smax z22.s, p2/M, z22.s, z4.s\n"
- "smax z21.s, p2/M, z21.s, z4.s\n"
- "st1b { z23.s }, p0, [x11, x12]\n"
- "ld1w { z23.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
- "ld1b { z18.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
- "smin z24.s, p2/M, z24.s, z3.s\n"
- "smin z22.s, p2/M, z22.s, z3.s\n"
- "smin z21.s, p2/M, z21.s, z3.s\n"
- "st1b { z24.s }, p0, [x10, x12]\n"
- "mov z29.d, z23.d\n"
+ "sqadd z21.s, z21.s, z18.s\n"
+ "ld1b { z18.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "sqadd z23.s, z23.s, z17.s\n"
"ld1b { z17.b }, p2/Z, [%x[params]]\n"
- "st1b { z22.s }, p0, [x9, x12]\n"
- "mov z28.d, z23.d\n"
- "sdot z28.s, z18.b, z10.b\n"
- "ld1b { z16.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "st1b { z21.s }, p0, [x28, x12]\n"
- "mov z27.d, z23.d\n"
- "sdot z23.s, z18.b, z14.b\n"
- "sdot z23.s, z17.b, z10.b\n"
- "ext z14.b, z14.b, z14.b, #0x1\n"
- "ext z10.b, z10.b, z10.b, #0x1\n"
- "sdot z29.s, z18.b, z14.b\n"
+ "sqadd z20.s, z20.s, z16.s\n"
+ "ld1b { z16.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
+ "add z24.s, z24.s, z30.s\n"
+ ".inst 0x44828ad7 // srshl z23.s, p2/M, z23.s, z22.s\n"
+ ".inst 0x44828ad5 // srshl z21.s, p2/M, z21.s, z22.s\n"
+ ".inst 0x44828ad4 // srshl z20.s, p2/M, z20.s, z22.s\n"
"ld1w { z22.s }, p2/Z, [%x[params], #3, MUL VL]\n"
- "sdot z27.s, z18.b, z10.b\n"
- "sdot z28.s, z17.b, z8.b\n"
- "incw x12\n"
- "whilelt p1.s, x12, %x[n_channels]\n"
- "sdot z23.s, z16.b, z8.b\n"
+ "smax z24.s, p2/M, z24.s, z11.s\n"
+ "add z23.s, z23.s, z30.s\n"
+ "add z21.s, z21.s, z30.s\n"
+ "add z20.s, z20.s, z30.s\n"
+ "smin z24.s, p2/M, z24.s, z14.s\n"
+ "smax z23.s, p2/M, z23.s, z11.s\n"
+ "smax z21.s, p2/M, z21.s, z11.s\n"
+ "smax z20.s, p2/M, z20.s, z11.s\n"
+ "st1b { z24.s }, p0, [x12, x13]\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
+ "smin z23.s, p2/M, z23.s, z14.s\n"
+ "smin z21.s, p2/M, z21.s, z14.s\n"
+ "smin z20.s, p2/M, z20.s, z14.s\n"
+ "st1b { z23.s }, p0, [x11, x13]\n"
+ "mov z29.d, z13.d\n"
+ "st1b { z21.s }, p0, [x10, x13]\n"
+ "mov z28.d, z13.d\n"
+ "st1b { z20.s }, p0, [x9, x13]\n"
+ "mov z27.d, z13.d\n"
+ "sdot z13.s, z16.b, z15.b\n"
+ "incw x13\n"
+ "sdot z28.s, z16.b, z8.b\n"
+ "ext z15.b, z15.b, z15.b, #0x1\n"
+ "whilelt p1.s, x13, %x[n_channels]\n"
+ "whilelt p0.b, x14, %x[n_channels]\n"
+ "sdot z13.s, z17.b, z8.b\n"
"ext z8.b, z8.b, z8.b, #0x1\n"
- "sdot z29.s, z17.b, z10.b\n"
- "whilelt p0.b, x13, %x[n_channels]\n"
- "sdot z27.s, z17.b, z8.b\n"
- "sdot z28.s, z16.b, z6.b\n"
+ "sdot z29.s, z16.b, z15.b\n"
+ "ld1b { z26.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z21.b }, p0/Z, [x26, x14]\n"
+ "ld1b { z15.b }, p0/Z, [x25, x14]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x14]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x14]\n"
+ "sdot z27.s, z16.b, z8.b\n"
+ "sdot z28.s, z17.b, z7.b\n"
+ "sdot z13.s, z18.b, z7.b\n"
+ "ext z7.b, z7.b, z7.b, #0x1\n"
+ "sdot z29.s, z17.b, z8.b\n"
+ "ld1b { z8.b }, p0/Z, [x20, x14]\n"
+ "sdot z27.s, z17.b, z7.b\n"
+ "sdot z28.s, z18.b, z6.b\n"
"ext z6.b, z6.b, z6.b, #0x1\n"
- "ld1b { z26.b }, p0/Z, [x26, x13]\n"
- ".inst 0x04b376f7 // sqrdmulh z23.s, z23.s, z19.s\n"
- "sdot z29.s, z16.b, z8.b\n"
- "sdot z27.s, z16.b, z6.b\n"
- "ld1b { z21.b }, p0/Z, [x25, x13]\n"
- "and z16.d, z23.d, z22.d\n"
+ ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
+ "sdot z29.s, z18.b, z7.b\n"
+ "sdot z27.s, z18.b, z6.b\n"
+ "and z16.d, z13.d, z22.d\n"
+ ".inst 0x04b3779c // sqrdmulh z28.s, z28.s, z19.s\n"
"asr z16.s, z16.s, #0x1f\n"
- "ld1b { z14.b }, p0/Z, [x23, x13]\n"
- "ld1b { z25.b }, p0/Z, [x22, x13]\n"
".inst 0x04b377bd // sqrdmulh z29.s, z29.s, z19.s\n"
- ".inst 0x04b3779c // sqrdmulh z28.s, z28.s, z19.s\n"
- "ld1b { z20.b }, p0/Z, [x21, x13]\n"
- "ld1b { z10.b }, p0/Z, [x20, x13]\n"
".inst 0x04b3777b // sqrdmulh z27.s, z27.s, z19.s\n"
- "sqadd z23.s, z23.s, z16.s\n"
- ".inst 0x44828ad7 // srshl z23.s, p2/M, z23.s, z22.s\n"
- "ld1b { z15.b }, p0/Z, [x27, x13]\n"
- "and z19.d, z29.d, z22.d\n"
- "and z17.d, z28.d, z22.d\n"
+ "ld1b { z12.b }, p0/Z, [x28, x14]\n"
"ldp x23, x22, [%x[inptrs], #0x40]\n"
+ "and z19.d, z28.d, z22.d\n"
"ldp x21, x20, [%x[inptrs], #0x50]\n"
+ "sqadd z13.s, z13.s, z16.s\n"
+ "and z17.d, z29.d, z22.d\n"
"and z16.d, z27.d, z22.d\n"
"asr z19.s, z19.s, #0x1f\n"
- "ld1b { z9.b }, p0/Z, [x23, x13]\n"
- "ld1b { z24.b }, p0/Z, [x22, x13]\n"
+ "ld1b { z2.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z24.b }, p0/Z, [x22, x14]\n"
"asr z17.s, z17.s, #0x1f\n"
+ ".inst 0x44828acd // srshl z13.s, p2/M, z13.s, z22.s\n"
+ "ld1b { z18.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z7.b }, p0/Z, [x20, x14]\n"
"asr z16.s, z16.s, #0x1f\n"
- "ld1b { z18.b }, p0/Z, [x21, x13]\n"
- "ld1b { z8.b }, p0/Z, [x20, x13]\n"
- "sqadd z29.s, z29.s, z19.s\n"
- "sqadd z28.s, z28.s, z17.s\n"
- ".inst 0x44828add // srshl z29.s, p2/M, z29.s, z22.s\n"
- ".inst 0x44828adc // srshl z28.s, p2/M, z28.s, z22.s\n"
+ "sqadd z28.s, z28.s, z19.s\n"
+ "ld1b { z1.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "sqadd z29.s, z29.s, z17.s\n"
+ "ld1b { z0.b }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "add z13.s, z13.s, z30.s\n"
"sqadd z27.s, z27.s, z16.s\n"
- "add z23.s, z23.s, z2.s\n"
+ "ld1b { z3.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ ".inst 0x44828adc // srshl z28.s, p2/M, z28.s, z22.s\n"
+ ".inst 0x44828add // srshl z29.s, p2/M, z29.s, z22.s\n"
+ "smax z13.s, p2/M, z13.s, z11.s\n"
".inst 0x44828adb // srshl z27.s, p2/M, z27.s, z22.s\n"
- "smax z23.s, p2/M, z23.s, z4.s\n"
- "add z29.s, z29.s, z2.s\n"
- "add z28.s, z28.s, z2.s\n"
- "ld1b { z13.b }, p0/Z, [x24, x13]\n"
+ "ld1b { z10.b }, p0/Z, [x24, x14]\n"
"ldp x23, x22, [%x[inptrs], #0x60]\n"
- "add z27.s, z27.s, z2.s\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "smin z23.s, p2/M, z23.s, z3.s\n"
- "smax z29.s, p2/M, z29.s, z4.s\n"
- "smax z28.s, p2/M, z28.s, z4.s\n"
- "smax z27.s, p2/M, z27.s, z4.s\n"
- "st1b { z23.s }, p1, [x11, x12]\n"
- "ld1b { z7.b }, p0/Z, [x23, x13]\n"
- "ld1b { z23.b }, p0/Z, [x22, x13]\n"
- "ld1b { z22.b }, p0/Z, [x21, x13]\n"
- "zip2 z17.b, z15.b, z21.b\n"
- "zip1 z15.b, z15.b, z21.b\n"
- "ld1b { z6.b }, p0/Z, [x20, x13]\n"
- "zip1 z16.b, z26.b, z14.b\n"
- "zip2 z14.b, z26.b, z14.b\n"
- "smin z29.s, p2/M, z29.s, z3.s\n"
- "smin z28.s, p2/M, z28.s, z3.s\n"
- "smin z27.s, p2/M, z27.s, z3.s\n"
- "st1b { z29.s }, p1, [x10, x12]\n"
- "zip2 z12.b, z15.b, z16.b\n"
- "st1b { z28.s }, p1, [x9, x12]\n"
- "zip1 z15.b, z15.b, z16.b\n"
- "zip1 z11.b, z17.b, z14.b\n"
- "ldp x27, x26, [%x[inptrs], #0x0]\n"
- "st1b { z27.s }, p1, [x28, x12]\n"
- "zip2 z14.b, z17.b, z14.b\n"
- "zip2 z21.b, z13.b, z20.b\n"
- "ld1w { z5.s }, p2/Z, [%x[params], #4, MUL VL]\n"
- "zip1 z13.b, z13.b, z20.b\n"
- "zip1 z20.b, z25.b, z10.b\n"
- "incw x12\n"
- "ldp x25, x23, [%x[inptrs], #0x10]\n"
- "zip2 z10.b, z25.b, z10.b\n"
- "zip2 z19.b, z9.b, z18.b\n"
+ "ldp x28, x27, [%x[inptrs], #0x0]\n"
+ "add z29.s, z29.s, z30.s\n"
+ "add z28.s, z28.s, z30.s\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
+ "add z27.s, z27.s, z30.s\n"
+ "smin z13.s, p2/M, z13.s, z14.s\n"
+ "ld1b { z4.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z23.b }, p0/Z, [x22, x14]\n"
"ldp x24, x22, [%x[inptrs], #0x20]\n"
+ "smax z29.s, p2/M, z29.s, z11.s\n"
+ "smax z28.s, p2/M, z28.s, z11.s\n"
+ "ld1b { z22.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z6.b }, p0/Z, [x20, x14]\n"
+ "smax z27.s, p2/M, z27.s, z11.s\n"
+ "st1b { z13.s }, p1, [x12, x13]\n"
+ "zip2 z17.b, z12.b, z21.b\n"
+ "zip1 z12.b, z12.b, z21.b\n"
"ldp x21, x20, [%x[inptrs], #0x30]\n"
- "zip1 z9.b, z9.b, z18.b\n"
- "zip1 z18.b, z24.b, z8.b\n"
- "ld1b { z29.b }, p2/Z, [%x[params], #5, MUL VL]\n"
- "ld1b { z28.b }, p2/Z, [%x[params], #6, MUL VL]\n"
- "zip2 z8.b, z24.b, z8.b\n"
- "zip2 z17.b, z7.b, z22.b\n"
- "ld1b { z26.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "zip1 z16.b, z26.b, z15.b\n"
+ "zip2 z15.b, z26.b, z15.b\n"
+ "smin z29.s, p2/M, z29.s, z14.s\n"
+ "smin z28.s, p2/M, z28.s, z14.s\n"
+ "smin z27.s, p2/M, z27.s, z14.s\n"
+ "st1b { z29.s }, p1, [x11, x13]\n"
+ "zip2 z21.b, z10.b, z20.b\n"
+ "zip1 z10.b, z10.b, z20.b\n"
+ "zip1 z20.b, z25.b, z8.b\n"
+ "zip2 z8.b, z25.b, z8.b\n"
+ "st1b { z28.s }, p1, [x10, x13]\n"
+ "zip2 z5.b, z12.b, z16.b\n"
+ "zip1 z12.b, z12.b, z16.b\n"
+ "st1b { z27.s }, p1, [x9, x13]\n"
+ "incw x13\n"
+ "zip1 z9.b, z17.b, z15.b\n"
+ "zip2 z15.b, z17.b, z15.b\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #4, MUL VL]\n"
"addvl %x[params], %x[params], #8\n"
- "zip1 z7.b, z7.b, z22.b\n"
+ "zip2 z19.b, z2.b, z18.b\n"
+ "zip1 z2.b, z2.b, z18.b\n"
+ "zip1 z18.b, z24.b, z7.b\n"
+ "zip2 z7.b, z24.b, z7.b\n"
+ "zip2 z17.b, z4.b, z22.b\n"
+ "zip1 z4.b, z4.b, z22.b\n"
"zip1 z16.b, z23.b, z6.b\n"
"zip2 z6.b, z23.b, z6.b\n"
- "zip2 z1.b, z13.b, z20.b\n"
- "zip1 z13.b, z13.b, z20.b\n"
- "zip1 z0.b, z21.b, z10.b\n"
- "zip2 z10.b, z21.b, z10.b\n"
- "zip2 z31.b, z9.b, z18.b\n"
- "zip1 z9.b, z9.b, z18.b\n"
- "zip1 z30.b, z19.b, z8.b\n"
- "zip2 z8.b, z19.b, z8.b\n"
- "zip2 z27.b, z7.b, z16.b\n"
- "zip1 z7.b, z7.b, z16.b\n"
- "zip1 z25.b, z17.b, z6.b\n"
+ "zip2 z31.b, z10.b, z20.b\n"
+ "zip1 z10.b, z10.b, z20.b\n"
+ "zip1 z26.b, z21.b, z8.b\n"
+ "zip2 z8.b, z21.b, z8.b\n"
+ "zip2 z25.b, z2.b, z18.b\n"
+ "zip1 z2.b, z2.b, z18.b\n"
+ "zip1 z28.b, z19.b, z7.b\n"
+ "zip2 z7.b, z19.b, z7.b\n"
+ "zip2 z27.b, z4.b, z16.b\n"
+ "zip1 z4.b, z4.b, z16.b\n"
+ "zip1 z29.b, z17.b, z6.b\n"
"zip2 z6.b, z17.b, z6.b\n"
- "mov z24.d, z5.d\n"
- "mov z22.d, z5.d\n"
- "mov z21.d, z5.d\n"
+ "mov z21.d, z13.d\n"
+ "mov z20.d, z13.d\n"
+ "mov z23.d, z13.d\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
index f0860c98b9..649540ace6 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,456 +34,456 @@ void sve_u8q_nhwc_3x3_s1_output2x2_dot_depthfirst_impl(const unsigned int n_chan
{
__asm__ __volatile__(
"mov x14, #0x0\n"
- "whilelt p0.b, x14, %x[n_channels]\n"
"ldp x27, x26, [%x[inptrs], #0x0]\n"
"ldp x25, x24, [%x[inptrs], #0x10]\n"
+ "mov x28, #0x1\n"
"ldp x23, x22, [%x[inptrs], #0x20]\n"
- "ldp x13, x21, [%x[inptrs], #0x30]\n"
- "mov x20, #0x1\n"
+ "ldp x21, x20, [%x[inptrs], #0x30]\n"
"ptrue p2.b\n"
+ "mov x13, #0x0\n"
"ldp x12, x11, [%x[outptrs], #0x0]\n"
"ldp x10, x9, [%x[outptrs], #0x10]\n"
- "orr x20, x20, #0x100\n"
- "orr x20, x20, #0x10000\n"
- "ld1b { z15.b }, p0/Z, [x27, x14]\n"
- "ld1b { z21.b }, p0/Z, [x26, x14]\n"
- "dup z25.s, w20\n"
- "mov x28, #0x0\n"
+ "whilelt p0.b, x14, %x[n_channels]\n"
+ "orr x28, x28, #0x100\n"
+ "ld1rw { z31.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
+ "ld1rw { z21.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1b { z12.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z24.b }, p0/Z, [x26, x14]\n"
+ "orr x28, x28, #0x10000\n"
"ldp x27, x26, [%x[inptrs], #0x40]\n"
- "ld1b { z31.b }, p0/Z, [x25, x14]\n"
- "zip2 z16.b, z15.b, z31.b\n"
- "zip1 z15.b, z15.b, z31.b\n"
- "ld1b { z29.b }, p0/Z, [x24, x14]\n"
+ "ld1b { z26.b }, p0/Z, [x25, x14]\n"
+ "ld1b { z14.b }, p0/Z, [x24, x14]\n"
"ldp x25, x24, [%x[inptrs], #0x50]\n"
- "zip1 z30.b, z21.b, z29.b\n"
- "zip2 z29.b, z21.b, z29.b\n"
- "ld1b { z9.b }, p0/Z, [x23, x14]\n"
- "ld1b { z20.b }, p0/Z, [x22, x14]\n"
- "zip2 z13.b, z15.b, z30.b\n"
- "zip1 z15.b, z15.b, z30.b\n"
+ "ld1b { z5.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z3.b }, p0/Z, [x22, x14]\n"
+ "dup z9.s, w28\n"
"ldp x23, x22, [%x[inptrs], #0x60]\n"
- "ld1b { z5.b }, p0/Z, [x13, x14]\n"
- "zip1 z14.b, z16.b, z29.b\n"
- "zip2 z29.b, z16.b, z29.b\n"
- "ld1b { z17.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z19.b }, p0/Z, [x21, x14]\n"
+ "zip2 z18.b, z12.b, z26.b\n"
+ "zip1 z12.b, z12.b, z26.b\n"
+ "ld1b { z30.b }, p0/Z, [x20, x14]\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "zip2 z31.b, z9.b, z5.b\n"
- "zip1 z9.b, z9.b, z5.b\n"
- "ld1b { z18.b }, p0/Z, [x27, x14]\n"
- "ld1b { z28.b }, p0/Z, [x26, x14]\n"
- "zip1 z21.b, z20.b, z17.b\n"
- "zip2 z17.b, z20.b, z17.b\n"
- "ld1b { z6.b }, p0/Z, [x25, x14]\n"
- "ld1b { z4.b }, p0/Z, [x24, x14]\n"
- "zip2 z23.b, z18.b, z6.b\n"
- "zip1 z18.b, z18.b, z6.b\n"
- "ld1b { z2.b }, p0/Z, [x23, x14]\n"
- "ld1b { z19.b }, p0/Z, [x22, x14]\n"
- "zip1 z24.b, z28.b, z4.b\n"
- "zip2 z4.b, z28.b, z4.b\n"
- "ld1b { z16.b }, p0/Z, [x21, x14]\n"
- "ld1b { z5.b }, p0/Z, [x20, x14]\n"
- "zip2 z22.b, z2.b, z16.b\n"
- "zip1 z2.b, z2.b, z16.b\n"
- "zip1 z0.b, z19.b, z5.b\n"
- "zip2 z5.b, z19.b, z5.b\n"
- "ld1w { z10.s }, p2/Z, [%x[params]]\n"
- "ld1rw { z7.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z6.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "ld1rw { z8.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "zip2 z19.b, z9.b, z21.b\n"
- "zip1 z9.b, z9.b, z21.b\n"
- "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ldp x27, x26, [%x[inptrs], #0x0]\n"
- "zip1 z11.b, z31.b, z17.b\n"
- "zip2 z17.b, z31.b, z17.b\n"
- "ldp x25, x23, [%x[inptrs], #0x10]\n"
+ "zip1 z17.b, z24.b, z14.b\n"
+ "zip2 z14.b, z24.b, z14.b\n"
+ "ld1b { z29.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z25.b }, p0/Z, [x26, x14]\n"
+ "ld1b { z16.b }, p0/Z, [x25, x14]\n"
+ "ld1b { z7.b }, p0/Z, [x24, x14]\n"
+ "zip2 z22.b, z5.b, z19.b\n"
+ "zip1 z5.b, z5.b, z19.b\n"
+ "ld1b { z6.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z28.b }, p0/Z, [x22, x14]\n"
+ "zip2 z2.b, z12.b, z17.b\n"
+ "zip1 z12.b, z12.b, z17.b\n"
+ "ld1b { z23.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z4.b }, p0/Z, [x20, x14]\n"
+ "zip1 z8.b, z18.b, z14.b\n"
+ "zip2 z14.b, z18.b, z14.b\n"
+ "zip1 z26.b, z3.b, z30.b\n"
+ "zip2 z30.b, z3.b, z30.b\n"
+ "ld1w { z0.s }, p2/Z, [%x[params]]\n"
+ "ldp x28, x27, [%x[inptrs], #0x0]\n"
+ "zip2 z24.b, z29.b, z16.b\n"
+ "zip1 z29.b, z29.b, z16.b\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
"ldp x24, x22, [%x[inptrs], #0x20]\n"
- "zip2 z12.b, z18.b, z24.b\n"
- "zip1 z18.b, z18.b, z24.b\n"
+ "zip1 z16.b, z25.b, z7.b\n"
+ "zip2 z7.b, z25.b, z7.b\n"
"ldp x21, x20, [%x[inptrs], #0x30]\n"
- "zip1 z20.b, z23.b, z4.b\n"
- "zip2 z4.b, z23.b, z4.b\n"
- "ld1b { z26.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "zip2 z24.b, z2.b, z0.b\n"
- "zip1 z2.b, z2.b, z0.b\n"
- "ld1b { z3.b }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z1.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "zip1 z0.b, z22.b, z5.b\n"
- "zip2 z5.b, z22.b, z5.b\n"
+ "ld1b { z17.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "zip2 z25.b, z6.b, z23.b\n"
+ "zip1 z6.b, z6.b, z23.b\n"
+ "ld1b { z20.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "zip1 z19.b, z28.b, z4.b\n"
+ "zip2 z4.b, z28.b, z4.b\n"
"addvl %x[params], %x[params], #4\n"
- "mov z22.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z21.d, z10.d\n"
+ "zip2 z23.b, z5.b, z26.b\n"
+ "zip1 z5.b, z5.b, z26.b\n"
+ "zip1 z3.b, z22.b, z30.b\n"
+ "zip2 z30.b, z22.b, z30.b\n"
+ "zip2 z11.b, z29.b, z16.b\n"
+ "zip1 z29.b, z29.b, z16.b\n"
+ "zip1 z16.b, z24.b, z7.b\n"
+ "zip2 z7.b, z24.b, z7.b\n"
+ "zip2 z1.b, z6.b, z19.b\n"
+ "zip1 z6.b, z6.b, z19.b\n"
+ "zip1 z27.b, z25.b, z4.b\n"
+ "zip2 z4.b, z25.b, z4.b\n"
+ "mov z26.d, z0.d\n"
+ "mov z25.d, z0.d\n"
+ "mov z28.d, z0.d\n"
"1:" // Loop
- "mov z30.s, #0x0\n"
- "udot z30.s, z25.b, z9.b\n"
- "udot z10.s, z26.b, z15.b\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "udot z30.s, z25.b, z18.b\n"
- "udot z31.s, z26.b, z9.b\n"
- "mov z27.s, #0x0\n"
- "incw x14, ALL, MUL #4\n"
- "udot z10.s, z3.b, z9.b\n"
- "ext z9.b, z9.b, z9.b, #0x1\n"
- "movprfx z28, z30\n udot z28.s, z25.b, z2.b\n"
- "udot z30.s, z25.b, z15.b\n"
- "ext z15.b, z15.b, z15.b, #0x1\n"
- "udot z27.s, z25.b, z9.b\n"
- "udot z31.s, z3.b, z18.b\n"
- "udot z10.s, z1.b, z18.b\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "udot z22.s, z26.b, z15.b\n"
- "udot z21.s, z26.b, z9.b\n"
- "udot z27.s, z25.b, z18.b\n"
- "udot z31.s, z1.b, z2.b\n"
- "ext z2.b, z2.b, z2.b, #0x1\n"
- "udot z22.s, z3.b, z9.b\n"
- "udot z21.s, z3.b, z18.b\n"
- "ld1w { z3.s }, p2/Z, [%x[params], #1, MUL VL]\n"
- "mls z10.s, p2/M, z30.s, z8.s\n"
- "movprfx z26, z27\n udot z26.s, z25.b, z2.b\n"
- "mov z9.s, #0x0\n"
- "udot z27.s, z25.b, z15.b\n"
- "ld1w { z23.s }, p2/Z, [%x[params]]\n"
- "udot z22.s, z1.b, z18.b\n"
- ".inst 0x04b7754a // sqrdmulh z10.s, z10.s, z23.s\n"
- "udot z21.s, z1.b, z2.b\n"
- "mls z22.s, p2/M, z27.s, z8.s\n"
- "and z18.d, z10.d, z3.d\n"
- "mls z31.s, p2/M, z28.s, z8.s\n"
- "mls z21.s, p2/M, z26.s, z8.s\n"
- "asr z18.s, z18.s, #0x1f\n"
- ".inst 0x04b776d6 // sqrdmulh z22.s, z22.s, z23.s\n"
- ".inst 0x04b777ff // sqrdmulh z31.s, z31.s, z23.s\n"
- "udot z9.s, z25.b, z19.b\n"
- ".inst 0x04b776b5 // sqrdmulh z21.s, z21.s, z23.s\n"
- "sqadd z10.s, z10.s, z18.s\n"
- ".inst 0x4482886a // srshl z10.s, p2/M, z10.s, z3.s\n"
- "udot z9.s, z25.b, z12.b\n"
- "and z28.d, z22.d, z3.d\n"
- "and z23.d, z31.d, z3.d\n"
- "movprfx z27, z9\n udot z27.s, z25.b, z24.b\n"
- "ld1w { z30.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "and z18.d, z21.d, z3.d\n"
- "asr z28.s, z28.s, #0x1f\n"
- "udot z9.s, z25.b, z13.b\n"
- "asr z23.s, z23.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
- "sqadd z22.s, z22.s, z28.s\n"
- "sqadd z31.s, z31.s, z23.s\n"
- ".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
- ".inst 0x4482887f // srshl z31.s, p2/M, z31.s, z3.s\n"
- "sqadd z21.s, z21.s, z18.s\n"
- "add z10.s, z10.s, z16.s\n"
- ".inst 0x44828875 // srshl z21.s, p2/M, z21.s, z3.s\n"
- "smax z10.s, p2/M, z10.s, z7.s\n"
- "add z22.s, z22.s, z16.s\n"
- "add z31.s, z31.s, z16.s\n"
- "smin z10.s, p2/M, z10.s, z6.s\n"
- "smax z22.s, p2/M, z22.s, z7.s\n"
- "add z21.s, z21.s, z16.s\n"
- "smax z31.s, p2/M, z31.s, z7.s\n"
- "smax z21.s, p2/M, z21.s, z7.s\n"
- "st1b { z10.s }, p0, [x12, x28]\n"
- "ld1w { z28.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "ld1b { z1.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
- "smin z21.s, p2/M, z21.s, z6.s\n"
- "st1b { z22.s }, p0, [x11, x28]\n"
- "mov z26.d, z28.d\n"
- "ld1b { z15.b }, p2/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z31.s }, p0, [x10, x28]\n"
- "mov z31.d, z28.d\n"
- "udot z31.s, z1.b, z19.b\n"
- "ld1b { z23.b }, p2/Z, [%x[params], #5, MUL VL]\n"
- "st1b { z21.s }, p0, [x9, x28]\n"
- "mov z22.d, z28.d\n"
- "udot z28.s, z1.b, z13.b\n"
- "udot z28.s, z15.b, z19.b\n"
- "ext z13.b, z13.b, z13.b, #0x1\n"
- "ext z19.b, z19.b, z19.b, #0x1\n"
- "udot z26.s, z1.b, z13.b\n"
- "ld1w { z21.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "mov z24.s, #0x0\n"
+ "udot z0.s, z17.b, z12.b\n"
+ "udot z25.s, z17.b, z5.b\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
"mov z18.s, #0x0\n"
- "udot z22.s, z1.b, z19.b\n"
- "udot z18.s, z25.b, z19.b\n"
- "incw x28\n"
- "udot z31.s, z15.b, z12.b\n"
- "udot z28.s, z23.b, z12.b\n"
+ "incw x14, ALL, MUL #4\n"
+ "udot z24.s, z9.b, z5.b\n"
+ "udot z0.s, z20.b, z5.b\n"
+ "ext z5.b, z5.b, z5.b, #0x1\n"
+ "udot z25.s, z20.b, z29.b\n"
+ "udot z24.s, z9.b, z29.b\n"
+ "udot z18.s, z9.b, z5.b\n"
+ "udot z0.s, z10.b, z29.b\n"
+ "ext z29.b, z29.b, z29.b, #0x1\n"
+ "udot z28.s, z17.b, z5.b\n"
+ "movprfx z19, z24\n udot z19.s, z9.b, z6.b\n"
+ "udot z24.s, z9.b, z12.b\n"
"ext z12.b, z12.b, z12.b, #0x1\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "udot z26.s, z15.b, z19.b\n"
- "udot z22.s, z15.b, z12.b\n"
+ "udot z25.s, z10.b, z6.b\n"
+ "ext z6.b, z6.b, z6.b, #0x1\n"
+ "udot z18.s, z9.b, z29.b\n"
+ "udot z26.s, z17.b, z12.b\n"
+ "udot z28.s, z20.b, z29.b\n"
+ "mls z0.s, p2/M, z24.s, z13.s\n"
+ "mov z22.s, #0x0\n"
+ "mls z25.s, p2/M, z19.s, z13.s\n"
+ "udot z22.s, z9.b, z23.b\n"
+ "udot z26.s, z20.b, z5.b\n"
+ "ld1w { z20.s }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "movprfx z5, z18\n udot z5.s, z9.b, z6.b\n"
+ "udot z18.s, z9.b, z12.b\n"
+ "ld1w { z19.s }, p2/Z, [%x[params]]\n"
+ "udot z28.s, z10.b, z6.b\n"
+ "udot z22.s, z9.b, z11.b\n"
+ "udot z26.s, z10.b, z29.b\n"
+ ".inst 0x04b37400 // sqrdmulh z0.s, z0.s, z19.s\n"
+ ".inst 0x04b37739 // sqrdmulh z25.s, z25.s, z19.s\n"
+ "mls z28.s, p2/M, z5.s, z13.s\n"
+ "and z5.d, z0.d, z20.d\n"
+ "mls z26.s, p2/M, z18.s, z13.s\n"
+ "mov z18.s, #0x0\n"
+ "and z12.d, z25.d, z20.d\n"
+ "movprfx z10, z22\n udot z10.s, z9.b, z1.b\n"
+ "udot z22.s, z9.b, z2.b\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ ".inst 0x04b3779c // sqrdmulh z28.s, z28.s, z19.s\n"
+ ".inst 0x04b3775a // sqrdmulh z26.s, z26.s, z19.s\n"
+ "ld1w { z24.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "asr z12.s, z12.s, #0x1f\n"
+ "sqadd z0.s, z0.s, z5.s\n"
+ "and z19.d, z26.d, z20.d\n"
+ "and z6.d, z28.d, z20.d\n"
+ ".inst 0x44828a80 // srshl z0.s, p2/M, z0.s, z20.s\n"
+ "sqadd z25.s, z25.s, z12.s\n"
+ "ld1b { z5.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "add z0.s, z0.s, z21.s\n"
+ "sqadd z26.s, z26.s, z19.s\n"
+ "ld1b { z19.b }, p2/Z, [%x[params], #4, MUL VL]\n"
+ ".inst 0x44828a99 // srshl z25.s, p2/M, z25.s, z20.s\n"
+ "sqadd z28.s, z28.s, z6.s\n"
+ "ld1b { z17.b }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "smax z0.s, p2/M, z0.s, z31.s\n"
+ ".inst 0x44828a9a // srshl z26.s, p2/M, z26.s, z20.s\n"
+ ".inst 0x44828a9c // srshl z28.s, p2/M, z28.s, z20.s\n"
+ "ld1w { z12.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z26.s, z26.s, z21.s\n"
+ "smin z0.s, p2/M, z0.s, z15.s\n"
+ "add z28.s, z28.s, z21.s\n"
+ "smax z26.s, p2/M, z26.s, z31.s\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "smax z28.s, p2/M, z28.s, z31.s\n"
+ "st1b { z0.s }, p0, [x12, x13]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #2, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
- "udot z18.s, z25.b, z12.b\n"
- "udot z31.s, z23.b, z24.b\n"
- "ext z24.b, z24.b, z24.b, #0x1\n"
- "mls z28.s, p2/M, z9.s, z8.s\n"
- "udot z26.s, z23.b, z12.b\n"
- ".inst 0x04be779c // sqrdmulh z28.s, z28.s, z30.s\n"
- "udot z22.s, z23.b, z24.b\n"
- "movprfx z12, z18\n udot z12.s, z25.b, z24.b\n"
- "and z2.d, z28.d, z21.d\n"
- "udot z18.s, z25.b, z13.b\n"
- "mls z26.s, p2/M, z18.s, z8.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- "mls z31.s, p2/M, z27.s, z8.s\n"
- "mls z22.s, p2/M, z12.s, z8.s\n"
- ".inst 0x04be775a // sqrdmulh z26.s, z26.s, z30.s\n"
- ".inst 0x04be77ff // sqrdmulh z31.s, z31.s, z30.s\n"
- ".inst 0x04be76d6 // sqrdmulh z22.s, z22.s, z30.s\n"
- "ld1w { z1.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
- "sqadd z28.s, z28.s, z2.s\n"
- "and z24.d, z26.d, z21.d\n"
- ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
- "and z23.d, z31.d, z21.d\n"
- "and z18.d, z22.d, z21.d\n"
- "asr z24.s, z24.s, #0x1f\n"
- "asr z23.s, z23.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
- "sqadd z26.s, z26.s, z24.s\n"
- ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
- "ld1b { z30.b }, p2/Z, [%x[params], #-6, MUL VL]\n"
- "sqadd z31.s, z31.s, z23.s\n"
- "sqadd z22.s, z22.s, z18.s\n"
- ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
- ".inst 0x44828ab6 // srshl z22.s, p2/M, z22.s, z21.s\n"
- "add z28.s, z28.s, z16.s\n"
- "smax z28.s, p2/M, z28.s, z7.s\n"
- "add z26.s, z26.s, z16.s\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "add z31.s, z31.s, z16.s\n"
- "add z22.s, z22.s, z16.s\n"
- "smax z26.s, p2/M, z26.s, z7.s\n"
- "smax z31.s, p2/M, z31.s, z7.s\n"
- "mov z24.s, #0x0\n"
- "udot z24.s, z25.b, z11.b\n"
- "smax z22.s, p2/M, z22.s, z7.s\n"
- "st1b { z28.s }, p0, [x12, x28]\n"
- "ld1w { z23.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
- "ld1b { z19.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
- "smin z22.s, p2/M, z22.s, z6.s\n"
- "st1b { z26.s }, p0, [x11, x28]\n"
- "mov z28.d, z23.d\n"
- "udot z24.s, z25.b, z20.b\n"
- "st1b { z31.s }, p0, [x10, x28]\n"
- "mov z27.d, z23.d\n"
- "udot z27.s, z19.b, z11.b\n"
- "movprfx z13, z24\n udot z13.s, z25.b, z0.b\n"
- "st1b { z22.s }, p0, [x9, x28]\n"
- "mov z26.d, z23.d\n"
- "udot z23.s, z19.b, z14.b\n"
- "udot z23.s, z30.b, z11.b\n"
- "udot z24.s, z25.b, z14.b\n"
- "ext z14.b, z14.b, z14.b, #0x1\n"
- "ld1b { z21.b }, p2/Z, [%x[params], #-5, MUL VL]\n"
- "udot z28.s, z19.b, z14.b\n"
+ "smin z26.s, p2/M, z26.s, z15.s\n"
+ "smin z25.s, p2/M, z25.s, z15.s\n"
+ "smin z28.s, p2/M, z28.s, z15.s\n"
+ "st1b { z26.s }, p0, [x11, x13]\n"
+ "mov z6.d, z29.d\n"
+ "st1b { z25.s }, p0, [x10, x13]\n"
+ "mov z25.d, z29.d\n"
+ "st1b { z28.s }, p0, [x9, x13]\n"
+ "mov z0.d, z29.d\n"
+ "udot z29.s, z17.b, z2.b\n"
+ "incw x13\n"
+ "udot z25.s, z17.b, z23.b\n"
+ "ext z2.b, z2.b, z2.b, #0x1\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
+ "udot z29.s, z19.b, z23.b\n"
+ "ext z23.b, z23.b, z23.b, #0x1\n"
+ "udot z6.s, z17.b, z2.b\n"
+ "udot z0.s, z17.b, z23.b\n"
+ "udot z18.s, z9.b, z23.b\n"
+ "udot z25.s, z19.b, z11.b\n"
+ "udot z29.s, z5.b, z11.b\n"
"ext z11.b, z11.b, z11.b, #0x1\n"
- "mov z12.s, #0x0\n"
- "udot z26.s, z19.b, z11.b\n"
- "ld1w { z22.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
- "udot z12.s, z25.b, z11.b\n"
- "udot z27.s, z30.b, z20.b\n"
- "incw x28\n"
- "whilelt p0.s, x28, %x[n_channels]\n"
- "udot z23.s, z21.b, z20.b\n"
- "ext z20.b, z20.b, z20.b, #0x1\n"
- "udot z28.s, z30.b, z11.b\n"
- "udot z26.s, z30.b, z20.b\n"
- "udot z12.s, z25.b, z20.b\n"
- "udot z27.s, z21.b, z0.b\n"
- "ext z0.b, z0.b, z0.b, #0x1\n"
- "mls z23.s, p2/M, z24.s, z8.s\n"
- "udot z28.s, z21.b, z20.b\n"
- "udot z26.s, z21.b, z0.b\n"
- ".inst 0x04a176f7 // sqrdmulh z23.s, z23.s, z1.s\n"
- "movprfx z19, z12\n udot z19.s, z25.b, z0.b\n"
- "udot z12.s, z25.b, z14.b\n"
- "and z18.d, z23.d, z22.d\n"
- "mls z28.s, p2/M, z12.s, z8.s\n"
- "mls z27.s, p2/M, z13.s, z8.s\n"
+ "udot z6.s, z19.b, z23.b\n"
+ "udot z0.s, z19.b, z11.b\n"
+ "udot z18.s, z9.b, z11.b\n"
+ "udot z25.s, z5.b, z1.b\n"
+ "ext z1.b, z1.b, z1.b, #0x1\n"
+ "mls z29.s, p2/M, z22.s, z13.s\n"
+ "mov z28.s, #0x0\n"
+ "udot z6.s, z5.b, z11.b\n"
+ "udot z0.s, z5.b, z1.b\n"
+ "movprfx z11, z18\n udot z11.s, z9.b, z1.b\n"
+ "udot z18.s, z9.b, z2.b\n"
+ "udot z28.s, z9.b, z3.b\n"
+ ".inst 0x04b877bd // sqrdmulh z29.s, z29.s, z24.s\n"
+ "mls z25.s, p2/M, z10.s, z13.s\n"
+ "mls z6.s, p2/M, z18.s, z13.s\n"
+ "mov z1.s, #0x0\n"
+ "mls z0.s, p2/M, z11.s, z13.s\n"
+ "and z11.d, z29.d, z12.d\n"
+ ".inst 0x04b87739 // sqrdmulh z25.s, z25.s, z24.s\n"
+ "udot z28.s, z9.b, z16.b\n"
+ "asr z11.s, z11.s, #0x1f\n"
+ ".inst 0x04b874c6 // sqrdmulh z6.s, z6.s, z24.s\n"
+ ".inst 0x04b87400 // sqrdmulh z0.s, z0.s, z24.s\n"
+ "ld1w { z5.s }, p2/Z, [%x[params], #-4, MUL VL]\n"
+ "and z22.d, z25.d, z12.d\n"
+ "sqadd z29.s, z29.s, z11.s\n"
+ "and z18.d, z6.d, z12.d\n"
+ "movprfx z24, z28\n udot z24.s, z9.b, z27.b\n"
+ "udot z28.s, z9.b, z8.b\n"
+ "and z11.d, z0.d, z12.d\n"
+ "asr z22.s, z22.s, #0x1f\n"
"asr z18.s, z18.s, #0x1f\n"
- "mls z26.s, p2/M, z19.s, z8.s\n"
- ".inst 0x04a1779c // sqrdmulh z28.s, z28.s, z1.s\n"
- ".inst 0x04a1777b // sqrdmulh z27.s, z27.s, z1.s\n"
- ".inst 0x04a1775a // sqrdmulh z26.s, z26.s, z1.s\n"
- "ld1w { z2.s }, p2/Z, [%x[params], #2, MUL VL]\n"
- "sqadd z23.s, z23.s, z18.s\n"
- "and z20.d, z28.d, z22.d\n"
- ".inst 0x44828ad7 // srshl z23.s, p2/M, z23.s, z22.s\n"
- "and z19.d, z27.d, z22.d\n"
- "and z18.d, z26.d, z22.d\n"
- "asr z20.s, z20.s, #0x1f\n"
+ ".inst 0x4482899d // srshl z29.s, p2/M, z29.s, z12.s\n"
+ "asr z11.s, z11.s, #0x1f\n"
+ "sqadd z6.s, z6.s, z18.s\n"
+ "ld1b { z20.b }, p2/Z, [%x[params], #-6, MUL VL]\n"
+ "sqadd z25.s, z25.s, z22.s\n"
+ "ld1b { z19.b }, p2/Z, [%x[params], #-5, MUL VL]\n"
+ "add z29.s, z29.s, z21.s\n"
+ "sqadd z0.s, z0.s, z11.s\n"
+ "ld1b { z18.b }, p2/Z, [%x[params], #-7, MUL VL]\n"
+ ".inst 0x44828986 // srshl z6.s, p2/M, z6.s, z12.s\n"
+ ".inst 0x44828999 // srshl z25.s, p2/M, z25.s, z12.s\n"
+ "smax z29.s, p2/M, z29.s, z31.s\n"
+ ".inst 0x44828980 // srshl z0.s, p2/M, z0.s, z12.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #-3, MUL VL]\n"
+ "add z6.s, z6.s, z21.s\n"
+ "add z25.s, z25.s, z21.s\n"
+ "add z0.s, z0.s, z21.s\n"
+ "smin z29.s, p2/M, z29.s, z15.s\n"
+ "smax z6.s, p2/M, z6.s, z31.s\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "smax z0.s, p2/M, z0.s, z31.s\n"
+ "st1b { z29.s }, p0, [x12, x13]\n"
+ "ld1w { z29.s }, p2/Z, [%x[params], #-8, MUL VL]\n"
+ "smin z6.s, p2/M, z6.s, z15.s\n"
+ "smin z25.s, p2/M, z25.s, z15.s\n"
+ "smin z0.s, p2/M, z0.s, z15.s\n"
+ "st1b { z6.s }, p0, [x11, x13]\n"
+ "mov z11.d, z29.d\n"
+ "st1b { z25.s }, p0, [x10, x13]\n"
+ "mov z26.d, z29.d\n"
+ "st1b { z0.s }, p0, [x9, x13]\n"
+ "mov z25.d, z29.d\n"
+ "udot z29.s, z18.b, z8.b\n"
+ "incw x13\n"
+ "udot z26.s, z18.b, z3.b\n"
+ "ext z8.b, z8.b, z8.b, #0x1\n"
+ "whilelt p0.s, x13, %x[n_channels]\n"
+ "udot z29.s, z20.b, z3.b\n"
+ "ext z3.b, z3.b, z3.b, #0x1\n"
+ "udot z11.s, z18.b, z8.b\n"
+ "udot z25.s, z18.b, z3.b\n"
+ "udot z1.s, z9.b, z3.b\n"
+ "udot z26.s, z20.b, z16.b\n"
+ "udot z29.s, z19.b, z16.b\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "udot z11.s, z20.b, z3.b\n"
+ "udot z25.s, z20.b, z16.b\n"
+ "udot z1.s, z9.b, z16.b\n"
+ "udot z26.s, z19.b, z27.b\n"
+ "ext z27.b, z27.b, z27.b, #0x1\n"
+ "mls z29.s, p2/M, z28.s, z13.s\n"
+ "mov z22.s, #0x0\n"
+ "udot z11.s, z19.b, z16.b\n"
+ "udot z25.s, z19.b, z27.b\n"
+ "movprfx z18, z1\n udot z18.s, z9.b, z27.b\n"
+ "udot z1.s, z9.b, z8.b\n"
+ "udot z22.s, z9.b, z30.b\n"
+ ".inst 0x04a577bd // sqrdmulh z29.s, z29.s, z5.s\n"
+ "mls z26.s, p2/M, z24.s, z13.s\n"
+ "mls z11.s, p2/M, z1.s, z13.s\n"
+ "mov z10.s, #0x0\n"
+ "mls z25.s, p2/M, z18.s, z13.s\n"
+ "and z18.d, z29.d, z23.d\n"
+ ".inst 0x04a5775a // sqrdmulh z26.s, z26.s, z5.s\n"
+ "udot z22.s, z9.b, z7.b\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04a5756b // sqrdmulh z11.s, z11.s, z5.s\n"
+ ".inst 0x04a57739 // sqrdmulh z25.s, z25.s, z5.s\n"
+ "ld1w { z8.s }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "and z19.d, z26.d, z23.d\n"
+ "sqadd z29.s, z29.s, z18.s\n"
+ "and z18.d, z11.d, z23.d\n"
+ "movprfx z6, z22\n udot z6.s, z9.b, z4.b\n"
+ "udot z22.s, z9.b, z14.b\n"
+ "and z20.d, z25.d, z23.d\n"
"asr z19.s, z19.s, #0x1f\n"
"asr z18.s, z18.s, #0x1f\n"
- "sqadd z28.s, z28.s, z20.s\n"
- ".inst 0x44828adc // srshl z28.s, p2/M, z28.s, z22.s\n"
- "ld1b { z13.b }, p2/Z, [%x[params]]\n"
- "sqadd z27.s, z27.s, z19.s\n"
- "sqadd z26.s, z26.s, z18.s\n"
- ".inst 0x44828adb // srshl z27.s, p2/M, z27.s, z22.s\n"
- ".inst 0x44828ada // srshl z26.s, p2/M, z26.s, z22.s\n"
- "add z23.s, z23.s, z16.s\n"
- "smax z23.s, p2/M, z23.s, z7.s\n"
- "add z28.s, z28.s, z16.s\n"
- "smin z23.s, p2/M, z23.s, z6.s\n"
- "add z27.s, z27.s, z16.s\n"
- "add z26.s, z26.s, z16.s\n"
- "smax z28.s, p2/M, z28.s, z7.s\n"
- "smax z27.s, p2/M, z27.s, z7.s\n"
- "mov z24.s, #0x0\n"
- "udot z24.s, z25.b, z17.b\n"
- "smax z26.s, p2/M, z26.s, z7.s\n"
- "st1b { z23.s }, p0, [x12, x28]\n"
- "ld1w { z1.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
- "ld1b { z21.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
- "smin z28.s, p2/M, z28.s, z6.s\n"
- "smin z27.s, p2/M, z27.s, z6.s\n"
- "smin z26.s, p2/M, z26.s, z6.s\n"
- "st1b { z28.s }, p0, [x11, x28]\n"
- "mov z0.d, z1.d\n"
- "udot z24.s, z25.b, z4.b\n"
- "st1b { z27.s }, p0, [x10, x28]\n"
- "mov z31.d, z1.d\n"
- "udot z31.s, z21.b, z17.b\n"
- "movprfx z23, z24\n udot z23.s, z25.b, z5.b\n"
- "st1b { z26.s }, p0, [x9, x28]\n"
- "mov z30.d, z1.d\n"
- "udot z1.s, z21.b, z29.b\n"
- "udot z1.s, z13.b, z17.b\n"
- "udot z24.s, z25.b, z29.b\n"
- "ext z29.b, z29.b, z29.b, #0x1\n"
- "ld1b { z20.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "udot z0.s, z21.b, z29.b\n"
- "ext z17.b, z17.b, z17.b, #0x1\n"
- "mov z19.s, #0x0\n"
- "udot z30.s, z21.b, z17.b\n"
- "ld1w { z22.s }, p2/Z, [%x[params], #3, MUL VL]\n"
- "udot z19.s, z25.b, z17.b\n"
- "udot z31.s, z13.b, z4.b\n"
- "incw x28\n"
- "whilelt p1.s, x28, %x[n_channels]\n"
- "udot z1.s, z20.b, z4.b\n"
- "ext z4.b, z4.b, z4.b, #0x1\n"
- "udot z0.s, z13.b, z17.b\n"
+ ".inst 0x44828afd // srshl z29.s, p2/M, z29.s, z23.s\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "sqadd z11.s, z11.s, z18.s\n"
+ "ld1b { z24.b }, p2/Z, [%x[params]]\n"
+ "sqadd z26.s, z26.s, z19.s\n"
+ "ld1b { z19.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "add z29.s, z29.s, z21.s\n"
+ "sqadd z25.s, z25.s, z20.s\n"
+ "ld1b { z18.b }, p2/Z, [%x[params], #-1, MUL VL]\n"
+ ".inst 0x44828aeb // srshl z11.s, p2/M, z11.s, z23.s\n"
+ ".inst 0x44828afa // srshl z26.s, p2/M, z26.s, z23.s\n"
+ "smax z29.s, p2/M, z29.s, z31.s\n"
+ ".inst 0x44828af9 // srshl z25.s, p2/M, z25.s, z23.s\n"
+ "ld1w { z23.s }, p2/Z, [%x[params], #3, MUL VL]\n"
+ "add z11.s, z11.s, z21.s\n"
+ "add z26.s, z26.s, z21.s\n"
+ "add z25.s, z25.s, z21.s\n"
+ "smin z29.s, p2/M, z29.s, z15.s\n"
+ "smax z11.s, p2/M, z11.s, z31.s\n"
+ "smax z26.s, p2/M, z26.s, z31.s\n"
+ "smax z25.s, p2/M, z25.s, z31.s\n"
+ "st1b { z29.s }, p0, [x12, x13]\n"
+ "ld1w { z2.s }, p2/Z, [%x[params], #-2, MUL VL]\n"
+ "smin z11.s, p2/M, z11.s, z15.s\n"
+ "smin z26.s, p2/M, z26.s, z15.s\n"
+ "smin z25.s, p2/M, z25.s, z15.s\n"
+ "st1b { z11.s }, p0, [x11, x13]\n"
+ "mov z28.d, z2.d\n"
+ "st1b { z26.s }, p0, [x10, x13]\n"
+ "mov z1.d, z2.d\n"
+ "st1b { z25.s }, p0, [x9, x13]\n"
+ "mov z3.d, z2.d\n"
+ "udot z2.s, z18.b, z14.b\n"
+ "incw x13\n"
+ "udot z1.s, z18.b, z30.b\n"
+ "ext z14.b, z14.b, z14.b, #0x1\n"
+ "whilelt p1.s, x13, %x[n_channels]\n"
"whilelt p0.b, x14, %x[n_channels]\n"
- "udot z30.s, z13.b, z4.b\n"
- "udot z19.s, z25.b, z4.b\n"
- "ld1b { z13.b }, p0/Z, [x26, x14]\n"
- "ld1b { z28.b }, p0/Z, [x25, x14]\n"
- "udot z31.s, z20.b, z5.b\n"
- "ext z5.b, z5.b, z5.b, #0x1\n"
- "mls z1.s, p2/M, z24.s, z8.s\n"
- "ld1b { z27.b }, p0/Z, [x22, x14]\n"
- "udot z0.s, z20.b, z4.b\n"
- "udot z30.s, z20.b, z5.b\n"
- ".inst 0x04a27421 // sqrdmulh z1.s, z1.s, z2.s\n"
- "ld1b { z26.b }, p0/Z, [x21, x14]\n"
- "movprfx z18, z19\n udot z18.s, z25.b, z5.b\n"
- "udot z19.s, z25.b, z29.b\n"
- "and z11.d, z1.d, z22.d\n"
- "ld1b { z29.b }, p0/Z, [x23, x14]\n"
- "mls z0.s, p2/M, z19.s, z8.s\n"
- "mls z31.s, p2/M, z23.s, z8.s\n"
- "asr z11.s, z11.s, #0x1f\n"
- "ld1b { z17.b }, p0/Z, [x20, x14]\n"
- "mls z30.s, p2/M, z18.s, z8.s\n"
- ".inst 0x04a27400 // sqrdmulh z0.s, z0.s, z2.s\n"
- ".inst 0x04a277ff // sqrdmulh z31.s, z31.s, z2.s\n"
- ".inst 0x04a277de // sqrdmulh z30.s, z30.s, z2.s\n"
- "ld1b { z15.b }, p0/Z, [x27, x14]\n"
+ "udot z2.s, z24.b, z30.b\n"
+ "ext z30.b, z30.b, z30.b, #0x1\n"
+ "udot z28.s, z18.b, z14.b\n"
+ "ld1b { z0.b }, p0/Z, [x27, x14]\n"
+ "ld1b { z27.b }, p0/Z, [x26, x14]\n"
+ "ld1b { z26.b }, p0/Z, [x22, x14]\n"
+ "ld1b { z25.b }, p0/Z, [x21, x14]\n"
+ "udot z3.s, z18.b, z30.b\n"
+ "udot z10.s, z9.b, z30.b\n"
+ "udot z1.s, z24.b, z7.b\n"
+ "udot z2.s, z19.b, z7.b\n"
+ "ext z7.b, z7.b, z7.b, #0x1\n"
+ "udot z28.s, z24.b, z30.b\n"
+ "ld1b { z30.b }, p0/Z, [x20, x14]\n"
+ "udot z3.s, z24.b, z7.b\n"
+ "udot z10.s, z9.b, z7.b\n"
+ "udot z1.s, z19.b, z4.b\n"
+ "ext z4.b, z4.b, z4.b, #0x1\n"
+ "mls z2.s, p2/M, z22.s, z13.s\n"
+ "udot z28.s, z19.b, z7.b\n"
+ "udot z3.s, z19.b, z4.b\n"
+ "movprfx z18, z10\n udot z18.s, z9.b, z4.b\n"
+ "udot z10.s, z9.b, z14.b\n"
+ "ld1b { z14.b }, p0/Z, [x25, x14]\n"
+ "mls z1.s, p2/M, z6.s, z13.s\n"
+ ".inst 0x04a87442 // sqrdmulh z2.s, z2.s, z8.s\n"
+ "mls z3.s, p2/M, z18.s, z13.s\n"
+ "and z18.d, z2.d, z23.d\n"
+ "mls z28.s, p2/M, z10.s, z13.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04a87421 // sqrdmulh z1.s, z1.s, z8.s\n"
+ ".inst 0x04a8779c // sqrdmulh z28.s, z28.s, z8.s\n"
+ ".inst 0x04a87463 // sqrdmulh z3.s, z3.s, z8.s\n"
+ "ld1b { z12.b }, p0/Z, [x28, x14]\n"
"ldp x23, x22, [%x[inptrs], #0x40]\n"
- "sqadd z1.s, z1.s, z11.s\n"
- "and z21.d, z0.d, z22.d\n"
- ".inst 0x44828ac1 // srshl z1.s, p2/M, z1.s, z22.s\n"
"ldp x21, x20, [%x[inptrs], #0x50]\n"
- "and z20.d, z31.d, z22.d\n"
- "and z19.d, z30.d, z22.d\n"
- "ld1b { z18.b }, p0/Z, [x23, x14]\n"
- "ld1b { z11.b }, p0/Z, [x22, x14]\n"
- "asr z21.s, z21.s, #0x1f\n"
- "asr z20.s, z20.s, #0x1f\n"
- "ld1b { z24.b }, p0/Z, [x21, x14]\n"
- "ld1b { z4.b }, p0/Z, [x20, x14]\n"
+ "sqadd z2.s, z2.s, z18.s\n"
+ "and z22.d, z1.d, z23.d\n"
+ "and z18.d, z28.d, z23.d\n"
+ "and z19.d, z3.d, z23.d\n"
+ "ld1b { z29.b }, p0/Z, [x23, x14]\n"
+ "ld1b { z24.b }, p0/Z, [x22, x14]\n"
+ "asr z22.s, z22.s, #0x1f\n"
+ ".inst 0x44828ae2 // srshl z2.s, p2/M, z2.s, z23.s\n"
+ "ld1b { z11.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z7.b }, p0/Z, [x20, x14]\n"
+ "asr z18.s, z18.s, #0x1f\n"
"asr z19.s, z19.s, #0x1f\n"
- "sqadd z0.s, z0.s, z21.s\n"
- ".inst 0x44828ac0 // srshl z0.s, p2/M, z0.s, z22.s\n"
- "ld1b { z3.b }, p2/Z, [%x[params], #6, MUL VL]\n"
- "sqadd z31.s, z31.s, z20.s\n"
- "sqadd z30.s, z30.s, z19.s\n"
- ".inst 0x44828adf // srshl z31.s, p2/M, z31.s, z22.s\n"
- ".inst 0x44828ade // srshl z30.s, p2/M, z30.s, z22.s\n"
- "add z1.s, z1.s, z16.s\n"
- "smax z1.s, p2/M, z1.s, z7.s\n"
- "add z0.s, z0.s, z16.s\n"
- "ld1b { z9.b }, p0/Z, [x24, x14]\n"
- "add z31.s, z31.s, z16.s\n"
- "add z30.s, z30.s, z16.s\n"
+ "sqadd z1.s, z1.s, z22.s\n"
+ "ld1b { z10.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "add z2.s, z2.s, z21.s\n"
+ "sqadd z28.s, z28.s, z18.s\n"
+ "ld1b { z20.b }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "sqadd z3.s, z3.s, z19.s\n"
+ "ld1b { z17.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ ".inst 0x44828ae1 // srshl z1.s, p2/M, z1.s, z23.s\n"
+ "smax z2.s, p2/M, z2.s, z31.s\n"
+ ".inst 0x44828afc // srshl z28.s, p2/M, z28.s, z23.s\n"
+ ".inst 0x44828ae3 // srshl z3.s, p2/M, z3.s, z23.s\n"
+ "ld1b { z5.b }, p0/Z, [x24, x14]\n"
"ldp x23, x22, [%x[inptrs], #0x60]\n"
"ldp x21, x20, [%x[inptrs], #0x70]\n"
- "smin z1.s, p2/M, z1.s, z6.s\n"
- "smax z0.s, p2/M, z0.s, z7.s\n"
- "st1b { z1.s }, p1, [x12, x28]\n"
- "ld1b { z2.b }, p0/Z, [x23, x14]\n"
- "smax z31.s, p2/M, z31.s, z7.s\n"
- "smax z30.s, p2/M, z30.s, z7.s\n"
+ "ldp x28, x27, [%x[inptrs], #0x0]\n"
+ "add z1.s, z1.s, z21.s\n"
+ "smin z2.s, p2/M, z2.s, z15.s\n"
+ "ldp x26, x25, [%x[inptrs], #0x10]\n"
+ "add z28.s, z28.s, z21.s\n"
+ "add z3.s, z3.s, z21.s\n"
+ "ld1b { z6.b }, p0/Z, [x23, x14]\n"
"ld1b { z23.b }, p0/Z, [x22, x14]\n"
- "ld1b { z22.b }, p0/Z, [x21, x14]\n"
- "ld1b { z5.b }, p0/Z, [x20, x14]\n"
- "zip2 z20.b, z15.b, z28.b\n"
- "zip1 z15.b, z15.b, z28.b\n"
- "smin z0.s, p2/M, z0.s, z6.s\n"
- "zip1 z19.b, z13.b, z29.b\n"
- "zip2 z29.b, z13.b, z29.b\n"
- "smin z31.s, p2/M, z31.s, z6.s\n"
- "smin z30.s, p2/M, z30.s, z6.s\n"
- "st1b { z0.s }, p1, [x11, x28]\n"
- "zip2 z13.b, z15.b, z19.b\n"
- "zip1 z15.b, z15.b, z19.b\n"
- "ldp x27, x26, [%x[inptrs], #0x0]\n"
- "st1b { z31.s }, p1, [x10, x28]\n"
- "zip1 z14.b, z20.b, z29.b\n"
- "zip2 z29.b, z20.b, z29.b\n"
- "ld1w { z10.s }, p2/Z, [%x[params], #4, MUL VL]\n"
- "st1b { z30.s }, p1, [x9, x28]\n"
- "zip2 z21.b, z9.b, z26.b\n"
- "zip1 z9.b, z9.b, z26.b\n"
- "incw x28\n"
- "zip1 z20.b, z27.b, z17.b\n"
- "zip2 z17.b, z27.b, z17.b\n"
- "ldp x25, x23, [%x[inptrs], #0x10]\n"
"ldp x24, x22, [%x[inptrs], #0x20]\n"
- "zip2 z31.b, z18.b, z24.b\n"
- "zip1 z18.b, z18.b, z24.b\n"
+ "smax z1.s, p2/M, z1.s, z31.s\n"
+ "st1b { z2.s }, p1, [x12, x13]\n"
+ "ld1b { z22.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z4.b }, p0/Z, [x20, x14]\n"
+ "zip2 z19.b, z12.b, z27.b\n"
+ "zip1 z12.b, z12.b, z27.b\n"
+ "smax z28.s, p2/M, z28.s, z31.s\n"
+ "smax z3.s, p2/M, z3.s, z31.s\n"
+ "zip1 z18.b, z0.b, z14.b\n"
+ "zip2 z14.b, z0.b, z14.b\n"
+ "smin z1.s, p2/M, z1.s, z15.s\n"
"ldp x21, x20, [%x[inptrs], #0x30]\n"
- "ld1b { z26.b }, p2/Z, [%x[params], #5, MUL VL]\n"
- "zip1 z27.b, z11.b, z4.b\n"
- "zip2 z4.b, z11.b, z4.b\n"
- "ld1b { z1.b }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "smin z28.s, p2/M, z28.s, z15.s\n"
+ "smin z3.s, p2/M, z3.s, z15.s\n"
+ "zip2 z2.b, z12.b, z18.b\n"
+ "zip1 z12.b, z12.b, z18.b\n"
+ "zip1 z8.b, z19.b, z14.b\n"
+ "zip2 z14.b, z19.b, z14.b\n"
+ "ld1w { z0.s }, p2/Z, [%x[params], #4, MUL VL]\n"
"addvl %x[params], %x[params], #8\n"
- "zip2 z30.b, z2.b, z22.b\n"
- "zip1 z2.b, z2.b, z22.b\n"
- "zip1 z28.b, z23.b, z5.b\n"
- "zip2 z5.b, z23.b, z5.b\n"
- "zip2 z19.b, z9.b, z20.b\n"
- "zip1 z9.b, z9.b, z20.b\n"
- "zip1 z11.b, z21.b, z17.b\n"
- "zip2 z17.b, z21.b, z17.b\n"
- "zip2 z12.b, z18.b, z27.b\n"
- "zip1 z18.b, z18.b, z27.b\n"
- "zip1 z20.b, z31.b, z4.b\n"
- "zip2 z4.b, z31.b, z4.b\n"
- "zip2 z24.b, z2.b, z28.b\n"
- "zip1 z2.b, z2.b, z28.b\n"
- "zip1 z0.b, z30.b, z5.b\n"
- "zip2 z5.b, z30.b, z5.b\n"
- "mov z22.d, z10.d\n"
- "mov z31.d, z10.d\n"
- "mov z21.d, z10.d\n"
+ "st1b { z28.s }, p1, [x11, x13]\n"
+ "zip2 z27.b, z5.b, z25.b\n"
+ "zip1 z5.b, z5.b, z25.b\n"
+ "st1b { z1.s }, p1, [x10, x13]\n"
+ "zip1 z18.b, z26.b, z30.b\n"
+ "zip2 z30.b, z26.b, z30.b\n"
+ "st1b { z3.s }, p1, [x9, x13]\n"
+ "zip2 z19.b, z29.b, z11.b\n"
+ "zip1 z29.b, z29.b, z11.b\n"
+ "incw x13\n"
+ "zip1 z28.b, z24.b, z7.b\n"
+ "zip2 z7.b, z24.b, z7.b\n"
+ "zip2 z25.b, z6.b, z22.b\n"
+ "zip1 z6.b, z6.b, z22.b\n"
+ "zip1 z22.b, z23.b, z4.b\n"
+ "zip2 z4.b, z23.b, z4.b\n"
+ "zip2 z23.b, z5.b, z18.b\n"
+ "zip1 z5.b, z5.b, z18.b\n"
+ "zip1 z3.b, z27.b, z30.b\n"
+ "zip2 z30.b, z27.b, z30.b\n"
+ "zip2 z11.b, z29.b, z28.b\n"
+ "zip1 z29.b, z29.b, z28.b\n"
+ "zip1 z16.b, z19.b, z7.b\n"
+ "zip2 z7.b, z19.b, z7.b\n"
+ "zip2 z1.b, z6.b, z22.b\n"
+ "zip1 z6.b, z6.b, z22.b\n"
+ "zip1 z27.b, z25.b, z4.b\n"
+ "zip2 z4.b, z25.b, z4.b\n"
+ "mov z26.d, z0.d\n"
+ "mov z25.d, z0.d\n"
+ "mov z28.d, z0.d\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 5c26010c0d..5e32044434 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[16];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -91,316 +91,316 @@ void sve_u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x16, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x17, #0x0\n"
+ "ldr x26, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x16\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x16, [%x[params], %[offsetof_Params_outptrs]]\n"
"ldr x15, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z12.b }, p4/Z, [x21]\n"
- "ld1rb { z30.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z24.h }, p4/Z, [x22]\n"
- "ld1rh { z11.h }, p4/Z, [x21]\n"
- "ld1rh { z26.h }, p4/Z, [x20]\n"
- "ldp x13, x12, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x16, x15\n"
- "ldp x11, x10, [x24, #0x10]\n"
- "whilelt p2.s, x16, x15\n"
- "whilelt p1.s, x23, x15\n"
- "ldr x9, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1b { z14.h }, p4/Z, [x14]\n"
- "ld1b { z21.h }, p4/Z, [x14, #1, MUL VL]\n"
- "add x28, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x27, #0x0\n"
- "ld1b { z1.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "add x13, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x12, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x17\n"
+ "add x20, x26, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x26, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x26, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z10.b }, p4/Z, [x20]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x26, %[offsetof_Requantize32_minval]\n"
+ "add x20, x26, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z15.b }, p4/Z, [x23]\n"
+ "ld1rh { z26.h }, p4/Z, [x22]\n"
+ "ld1rh { z2.h }, p4/Z, [x21]\n"
+ "ld1rh { z14.h }, p4/Z, [x20]\n"
+ "incw x24\n"
+ "whilelt p3.h, x17, x15\n"
+ "ldp x9, x28, [x16, #0x0]\n"
+ "ldp x27, x26, [x16, #0x10]\n"
+ "whilelt p2.s, x17, x15\n"
+ "whilelt p1.s, x24, x15\n"
+ "ld1b { z13.h }, p4/Z, [x14]\n"
+ "ld1b { z11.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1b { z18.h }, p4/Z, [x14, #2, MUL VL]\n"
"ld1b { z6.h }, p4/Z, [x14, #3, MUL VL]\n"
- ".inst 0x455e19ce // usublb z14.h, z14.b, z30.b\n"
- ".inst 0x455e1ab5 // usublb z21.h, z21.b, z30.b\n"
- "ld1b { z2.h }, p4/Z, [x14, #4, MUL VL]\n"
- "ld1b { z18.h }, p4/Z, [x14, #5, MUL VL]\n"
- ".inst 0x455e1821 // usublb z1.h, z1.b, z30.b\n"
- ".inst 0x455e18c6 // usublb z6.h, z6.b, z30.b\n"
- "ld1b { z7.h }, p4/Z, [x14, #6, MUL VL]\n"
- "ld1b { z10.h }, p4/Z, [x14, #7, MUL VL]\n"
+ "ld1b { z20.h }, p4/Z, [x14, #4, MUL VL]\n"
+ "ld1b { z30.h }, p4/Z, [x14, #5, MUL VL]\n"
+ "ld1b { z28.h }, p4/Z, [x14, #6, MUL VL]\n"
+ "ld1b { z17.h }, p4/Z, [x14, #7, MUL VL]\n"
"inch x14, ALL, MUL #8\n"
- ".inst 0x455e1842 // usublb z2.h, z2.b, z30.b\n"
- "ld1w { z17.s }, p2/Z, [x9]\n"
- "ld1w { z16.s }, p1/Z, [x9, #1, MUL VL]\n"
- "uzp1 z5.s, z17.s, z16.s\n"
- "uzp2 z9.s, z17.s, z16.s\n"
- "ld1b { z8.h }, p4/Z, [x14]\n"
- "ldp x24, x23, [x28, #0x0]\n"
- "addvl x9, x9, #2\n"
- "mov z17.d, z5.d\n"
- "ldp x22, x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x20]\n"
- "mov z25.d, z9.d\n"
- "mov z16.d, z5.d\n"
- "ld1b { z0.h }, p3/Z, [x24, x16]\n"
- "ld1b { z29.h }, p3/Z, [x23, x16]\n"
- "mov z23.d, z9.d\n"
- "mov z22.d, z5.d\n"
- "ld1b { z4.h }, p3/Z, [x22, x16]\n"
- "ld1b { z13.h }, p3/Z, [x21, x16]\n"
- "mov z27.d, z9.d\n"
- ".inst 0x455e1a52 // usublb z18.h, z18.b, z30.b\n"
- "ld1b { z20.h }, p3/Z, [x20, x16]\n"
- "ldr x26, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x455e18e7 // usublb z7.h, z7.b, z30.b\n"
- ".inst 0x455e194a // usublb z10.h, z10.b, z30.b\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x9, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455e1908 // usublb z8.h, z8.b, z30.b\n"
- ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
- ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
- ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
- ".inst 0x454c1a94 // usublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454f19ad // usublb z13.h, z13.b, z15.b\n"
+ "ld1w { z19.s }, p2/Z, [x25]\n"
+ "ld1w { z24.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ ".inst 0x454f196b // usublb z11.h, z11.b, z15.b\n"
+ ".inst 0x454f1a52 // usublb z18.h, z18.b, z15.b\n"
+ ".inst 0x454f18c6 // usublb z6.h, z6.b, z15.b\n"
+ "ld1b { z5.h }, p4/Z, [x14]\n"
+ "ldp x24, x23, [x13, #0x0]\n"
+ ".inst 0x454f1a94 // usublb z20.h, z20.b, z15.b\n"
+ ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
+ "uzp1 z3.s, z19.s, z24.s\n"
+ "uzp2 z16.s, z19.s, z24.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldp x22, x21, [x13, #0x10]\n"
+ ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
+ ".inst 0x454f1a31 // usublb z17.h, z17.b, z15.b\n"
+ ".inst 0x454f18a5 // usublb z5.h, z5.b, z15.b\n"
+ "ldr x20, [x13, #0x20]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x17]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1b { z4.h }, p3/Z, [x22, x17]\n"
+ "mov z8.d, z3.d\n"
+ "mov z21.d, z16.d\n"
+ "ld1b { z1.h }, p3/Z, [x21, x17]\n"
+ "mov z0.d, z3.d\n"
+ "mov z29.d, z16.d\n"
+ "ld1b { z27.h }, p3/Z, [x20, x17]\n"
+ "mov z19.d, z3.d\n"
+ "mov z9.d, z16.d\n"
+ ".inst 0x454a18e7 // usublb z7.h, z7.b, z10.b\n"
+ ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
+ ".inst 0x454a1884 // usublb z4.h, z4.b, z10.b\n"
+ ".inst 0x454a1821 // usublb z1.h, z1.b, z10.b\n"
+ ".inst 0x454a1b7b // usublb z27.h, z27.b, z10.b\n"
"1:" // Loop
- ".inst 0x44824005 // smlalb z5.s, p4/M, z0.h, z2.h\n"
- ".inst 0x44824409 // smlalt z9.s, p4/M, z0.h, z2.h\n"
- "ldr x20, [x28, #0x28]\n"
- "ldr x21, [x28, #0x38]\n"
- ".inst 0x448e43a5 // smlalb z5.s, p4/M, z29.h, z14.h\n"
- ".inst 0x44864011 // smlalb z17.s, p4/M, z0.h, z6.h\n"
- "ld1b { z3.h }, p3/Z, [x20, x16]\n"
- "ldr x20, [x28, #0x30]\n"
- ".inst 0x44954010 // smlalb z16.s, p4/M, z0.h, z21.h\n"
- ".inst 0x448e4016 // smlalb z22.s, p4/M, z0.h, z14.h\n"
- "ld1b { z31.h }, p3/Z, [x21, x16]\n"
- ".inst 0x454c1863 // usublb z3.h, z3.b, z12.b\n"
- ".inst 0x448e47a9 // smlalt z9.s, p4/M, z29.h, z14.h\n"
- ".inst 0x449241a5 // smlalb z5.s, p4/M, z13.h, z18.h\n"
- "ldr x21, [x28, #0x40]\n"
- "ld1b { z15.h }, p3/Z, [x20, x16]\n"
- ".inst 0x44864419 // smlalt z25.s, p4/M, z0.h, z6.h\n"
- ".inst 0x44954417 // smlalt z23.s, p4/M, z0.h, z21.h\n"
- ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
- "ldr x20, [x28, #0x48]\n"
- ".inst 0x448e441b // smlalt z27.s, p4/M, z0.h, z14.h\n"
- ".inst 0x44814091 // smlalb z17.s, p4/M, z4.h, z1.h\n"
- "ld1b { z19.h }, p3/Z, [x21, x16]\n"
- ".inst 0x454c19ef // usublb z15.h, z15.b, z12.b\n"
- ".inst 0x448141b0 // smlalb z16.s, p4/M, z13.h, z1.h\n"
- ".inst 0x449541b6 // smlalb z22.s, p4/M, z13.h, z21.h\n"
- "ld1b { z28.h }, p3/Z, [x20, x16]\n"
- ".inst 0x454c1a73 // usublb z19.h, z19.b, z12.b\n"
- ".inst 0x449245a9 // smlalt z9.s, p4/M, z13.h, z18.h\n"
- ".inst 0x448a4285 // smlalb z5.s, p4/M, z20.h, z10.h\n"
- "ldr x21, [x28, #0x50]\n"
- "ldr x20, [x28, #0x58]\n"
- ".inst 0x44814499 // smlalt z25.s, p4/M, z4.h, z1.h\n"
- ".inst 0x448145b7 // smlalt z23.s, p4/M, z13.h, z1.h\n"
- ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
- "ld1b { z4.h }, p3/Z, [x21, x16]\n"
- ".inst 0x449545bb // smlalt z27.s, p4/M, z13.h, z21.h\n"
- ".inst 0x448241b1 // smlalb z17.s, p4/M, z13.h, z2.h\n"
- "ld1b { z29.h }, p3/Z, [x20, x16]\n"
- "ldr x21, [x28, #0x60]\n"
- ".inst 0x44874070 // smlalb z16.s, p4/M, z3.h, z7.h\n"
- ".inst 0x44864296 // smlalb z22.s, p4/M, z20.h, z6.h\n"
- "ldr x20, [x28, #0x68]\n"
- ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- ".inst 0x448a4689 // smlalt z9.s, p4/M, z20.h, z10.h\n"
- ".inst 0x449543e5 // smlalb z5.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
- "ld1b { z0.h }, p3/Z, [x21, x16]\n"
- ".inst 0x448245b9 // smlalt z25.s, p4/M, z13.h, z2.h\n"
- ".inst 0x44874477 // smlalt z23.s, p4/M, z3.h, z7.h\n"
- "ld1b { z3.h }, p3/Z, [x20, x16]\n"
- "ldr x20, [x28, #0x70]\n"
- ".inst 0x4486469b // smlalt z27.s, p4/M, z20.h, z6.h\n"
- ".inst 0x44874291 // smlalb z17.s, p4/M, z20.h, z7.h\n"
- ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
- "ld1b { z13.h }, p3/Z, [x20, x16]\n"
- ".inst 0x44824290 // smlalb z16.s, p4/M, z20.h, z2.h\n"
- ".inst 0x448841f6 // smlalb z22.s, p4/M, z15.h, z8.h\n"
- ".inst 0x454c1863 // usublb z3.h, z3.b, z12.b\n"
- "ldr x20, [x28, #0x78]\n"
- ".inst 0x449547e9 // smlalt z9.s, p4/M, z31.h, z21.h\n"
- ".inst 0x44814265 // smlalb z5.s, p4/M, z19.h, z1.h\n"
- ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
- "whilelt p0.h, x27, x15\n"
- ".inst 0x44874699 // smlalt z25.s, p4/M, z20.h, z7.h\n"
- ".inst 0x44824697 // smlalt z23.s, p4/M, z20.h, z2.h\n"
- "ld1w { z20.s }, p2/Z, [x26]\n"
+ ".inst 0x449440e3 // smlalb z3.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x449444f0 // smlalt z16.s, p4/M, z7.h, z20.h\n"
+ "ldr x25, [x13, #0x28]\n"
+ "ldr x24, [x13, #0x38]\n"
+ ".inst 0x448640e8 // smlalb z8.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x448b40e0 // smlalb z0.s, p4/M, z7.h, z11.h\n"
+ "ldr x23, [x13, #0x30]\n"
+ "ldr x22, [x13, #0x40]\n"
+ ".inst 0x448d40f3 // smlalb z19.s, p4/M, z7.h, z13.h\n"
+ ".inst 0x448644f5 // smlalt z21.s, p4/M, z7.h, z6.h\n"
+ "ldr x20, [x13, #0x48]\n"
+ "ldr x21, [x13, #0x50]\n"
+ "ld1b { z22.h }, p3/Z, [x25, x17]\n"
+ ".inst 0x448b44fd // smlalt z29.s, p4/M, z7.h, z11.h\n"
+ ".inst 0x448d44e9 // smlalt z9.s, p4/M, z7.h, z13.h\n"
+ "ld1b { z31.h }, p3/Z, [x24, x17]\n"
+ ".inst 0x448d4303 // smlalb z3.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x448d4710 // smlalt z16.s, p4/M, z24.h, z13.h\n"
+ "ld1b { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1b { z25.h }, p3/Z, [x22, x17]\n"
+ ".inst 0x44924088 // smlalb z8.s, p4/M, z4.h, z18.h\n"
+ ".inst 0x44924020 // smlalb z0.s, p4/M, z1.h, z18.h\n"
+ "ld1b { z23.h }, p3/Z, [x20, x17]\n"
+ "ldr x20, [x13, #0x58]\n"
+ ".inst 0x448b4033 // smlalb z19.s, p4/M, z1.h, z11.h\n"
+ ".inst 0x454a1ad6 // usublb z22.h, z22.b, z10.b\n"
+ ".inst 0x44924495 // smlalt z21.s, p4/M, z4.h, z18.h\n"
+ "ld1b { z12.h }, p3/Z, [x21, x17]\n"
+ ".inst 0x4492443d // smlalt z29.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448b4429 // smlalt z9.s, p4/M, z1.h, z11.h\n"
+ ".inst 0x454a1bff // usublb z31.h, z31.b, z10.b\n"
+ "ldr x21, [x13, #0x60]\n"
+ ".inst 0x449e4023 // smlalb z3.s, p4/M, z1.h, z30.h\n"
+ ".inst 0x449e4430 // smlalt z16.s, p4/M, z1.h, z30.h\n"
+ ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
+ "ld1b { z4.h }, p3/Z, [x20, x17]\n"
+ ".inst 0x44944028 // smlalb z8.s, p4/M, z1.h, z20.h\n"
+ ".inst 0x449c42c0 // smlalb z0.s, p4/M, z22.h, z28.h\n"
+ ".inst 0x454a1b39 // usublb z25.h, z25.b, z10.b\n"
+ "ldr x20, [x13, #0x68]\n"
+ ".inst 0x44864373 // smlalb z19.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x44944435 // smlalt z21.s, p4/M, z1.h, z20.h\n"
+ ".inst 0x454a1af7 // usublb z23.h, z23.b, z10.b\n"
+ "ld1b { z7.h }, p3/Z, [x21, x17]\n"
+ ".inst 0x449c46dd // smlalt z29.s, p4/M, z22.h, z28.h\n"
+ ".inst 0x44864769 // smlalt z9.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x454a198c // usublb z12.h, z12.b, z10.b\n"
+ "ldr x21, [x13, #0x70]\n"
+ ".inst 0x44914363 // smlalb z3.s, p4/M, z27.h, z17.h\n"
+ ".inst 0x44914770 // smlalt z16.s, p4/M, z27.h, z17.h\n"
+ ".inst 0x454a1884 // usublb z4.h, z4.b, z10.b\n"
+ "ld1b { z22.h }, p3/Z, [x20, x17]\n"
+ ".inst 0x449c4368 // smlalb z8.s, p4/M, z27.h, z28.h\n"
+ ".inst 0x44944360 // smlalb z0.s, p4/M, z27.h, z20.h\n"
+ ".inst 0x454a18e7 // usublb z7.h, z7.b, z10.b\n"
+ "ldr x20, [x13, #0x78]\n"
+ ".inst 0x44854313 // smlalb z19.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x449c4775 // smlalt z21.s, p4/M, z27.h, z28.h\n"
+ "ld1b { z1.h }, p3/Z, [x21, x17]\n"
+ "whilelt p0.h, x12, x15\n"
+ ".inst 0x4494477d // smlalt z29.s, p4/M, z27.h, z20.h\n"
+ ".inst 0x44854709 // smlalt z9.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x454a1ad6 // usublb z22.h, z22.b, z10.b\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
+ ".inst 0x448b43e3 // smlalb z3.s, p4/M, z31.h, z11.h\n"
+ ".inst 0x448b47f0 // smlalt z16.s, p4/M, z31.h, z11.h\n"
+ "ld1w { z27.s }, p1/Z, [x11, #1, MUL VL]\n"
"inch x14\n"
- ".inst 0x448845fb // smlalt z27.s, p4/M, z15.h, z8.h\n"
- ".inst 0x448e43f1 // smlalb z17.s, p4/M, z31.h, z14.h\n"
- "ld1w { z15.s }, p1/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x448d43e8 // smlalb z8.s, p4/M, z31.h, z13.h\n"
+ ".inst 0x449e42e0 // smlalb z0.s, p4/M, z23.h, z30.h\n"
+ ".inst 0x454a1821 // usublb z1.h, z1.b, z10.b\n"
"ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44924390 // smlalb z16.s, p4/M, z28.h, z18.h\n"
- ".inst 0x44824396 // smlalb z22.s, p4/M, z28.h, z2.h\n"
- "addvl x26, x26, #2\n"
- ".inst 0x44814669 // smlalt z9.s, p4/M, z19.h, z1.h\n"
- ".inst 0x44884385 // smlalb z5.s, p4/M, z28.h, z8.h\n"
- ".inst 0x448e47f9 // smlalt z25.s, p4/M, z31.h, z14.h\n"
- ".inst 0x44924797 // smlalt z23.s, p4/M, z28.h, z18.h\n"
- "ld1b { z31.h }, p3/Z, [x20, x16]\n"
- ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
- ".inst 0x4482479b // smlalt z27.s, p4/M, z28.h, z2.h\n"
- ".inst 0x44954271 // smlalb z17.s, p4/M, z19.h, z21.h\n"
- "uzp1 z2.s, z20.s, z15.s\n"
- "inch x16\n"
- ".inst 0x448e4090 // smlalb z16.s, p4/M, z4.h, z14.h\n"
- ".inst 0x448143b6 // smlalb z22.s, p4/M, z29.h, z1.h\n"
- "uzp2 z15.s, z20.s, z15.s\n"
- "ld1w { z20.s }, p2/Z, [x25]\n"
- ".inst 0x44884789 // smlalt z9.s, p4/M, z28.h, z8.h\n"
- ".inst 0x44864085 // smlalb z5.s, p4/M, z4.h, z6.h\n"
- "mov x20, x16\n"
+ ".inst 0x449442f3 // smlalb z19.s, p4/M, z23.h, z20.h\n"
+ ".inst 0x448d47f5 // smlalt z21.s, p4/M, z31.h, z13.h\n"
+ "ld1b { z31.h }, p3/Z, [x20, x17]\n"
+ "inch x17\n"
+ ".inst 0x449e46fd // smlalt z29.s, p4/M, z23.h, z30.h\n"
+ ".inst 0x449446e9 // smlalt z9.s, p4/M, z23.h, z20.h\n"
+ "uzp1 z20.s, z24.s, z27.s\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x44924323 // smlalb z3.s, p4/M, z25.h, z18.h\n"
+ ".inst 0x44924730 // smlalt z16.s, p4/M, z25.h, z18.h\n"
+ "uzp2 z24.s, z24.s, z27.s\n"
+ "ld1w { z27.s }, p2/Z, [x10]\n"
+ ".inst 0x448b4328 // smlalb z8.s, p4/M, z25.h, z11.h\n"
+ ".inst 0x448d4180 // smlalb z0.s, p4/M, z12.h, z13.h\n"
+ ".inst 0x454a1bff // usublb z31.h, z31.b, z10.b\n"
+ "mov x20, x17\n"
+ ".inst 0x44924093 // smlalb z19.s, p4/M, z4.h, z18.h\n"
+ ".inst 0x448b4735 // smlalt z21.s, p4/M, z25.h, z11.h\n"
+ "ld1w { z25.s }, p1/Z, [x10, #1, MUL VL]\n"
+ "whilelt p2.s, x17, x15\n"
+ ".inst 0x448d459d // smlalt z29.s, p4/M, z12.h, z13.h\n"
+ ".inst 0x44924489 // smlalt z9.s, p4/M, z4.h, z18.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x448542e3 // smlalb z3.s, p4/M, z23.h, z5.h\n"
+ ".inst 0x448546f0 // smlalt z16.s, p4/M, z23.h, z5.h\n"
"incw x20\n"
- ".inst 0x44954679 // smlalt z25.s, p4/M, z19.h, z21.h\n"
- ".inst 0x448e4497 // smlalt z23.s, p4/M, z4.h, z14.h\n"
- "ld1w { z19.s }, p1/Z, [x25, #1, MUL VL]\n"
- "uzp1 z21.s, z20.s, z19.s\n"
- ".inst 0x448147bb // smlalt z27.s, p4/M, z29.h, z1.h\n"
- ".inst 0x448a4391 // smlalb z17.s, p4/M, z28.h, z10.h\n"
- "uzp2 z1.s, z20.s, z19.s\n"
- "whilelt p2.s, x16, x15\n"
- ".inst 0x44864010 // smlalb z16.s, p4/M, z0.h, z6.h\n"
- ".inst 0x44924076 // smlalb z22.s, p4/M, z3.h, z18.h\n"
+ ".inst 0x449142e8 // smlalb z8.s, p4/M, z23.h, z17.h\n"
+ ".inst 0x448640e0 // smlalb z0.s, p4/M, z7.h, z6.h\n"
+ "uzp1 z11.s, z27.s, z25.s\n"
+ ".inst 0x449e42d3 // smlalb z19.s, p4/M, z22.h, z30.h\n"
+ ".inst 0x449146f5 // smlalt z21.s, p4/M, z23.h, z17.h\n"
+ "uzp2 z27.s, z27.s, z25.s\n"
+ ".inst 0x448644fd // smlalt z29.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x449e46c9 // smlalt z9.s, p4/M, z22.h, z30.h\n"
"whilelt p1.s, x20, x15\n"
- "whilelt p3.h, x16, x15\n"
- ".inst 0x44864489 // smlalt z9.s, p4/M, z4.h, z6.h\n"
- ".inst 0x44874005 // smlalb z5.s, p4/M, z0.h, z7.h\n"
- ".inst 0x04a274a5 // sqrdmulh z5.s, z5.s, z2.s\n"
- "addvl x25, x25, #2\n"
- ".inst 0x448a4799 // smlalt z25.s, p4/M, z28.h, z10.h\n"
- ".inst 0x44864417 // smlalt z23.s, p4/M, z0.h, z6.h\n"
- "and z19.d, z5.d, z21.d\n"
- ".inst 0x4492447b // smlalt z27.s, p4/M, z3.h, z18.h\n"
- ".inst 0x449243b1 // smlalb z17.s, p4/M, z29.h, z18.h\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448a41b0 // smlalb z16.s, p4/M, z13.h, z10.h\n"
- ".inst 0x448741b6 // smlalb z22.s, p4/M, z13.h, z7.h\n"
- "sqadd z5.s, z5.s, z19.s\n"
- ".inst 0x448292a5 // srshl z5.s, p4/M, z5.s, z21.s\n"
- ".inst 0x44874409 // smlalt z9.s, p4/M, z0.h, z7.h\n"
- ".inst 0x449247b9 // smlalt z25.s, p4/M, z29.h, z18.h\n"
- ".inst 0x04af7529 // sqrdmulh z9.s, z9.s, z15.s\n"
- ".inst 0x448a45b7 // smlalt z23.s, p4/M, z13.h, z10.h\n"
- ".inst 0x448745bb // smlalt z27.s, p4/M, z13.h, z7.h\n"
- "and z29.d, z9.d, z1.d\n"
- ".inst 0x44884071 // smlalb z17.s, p4/M, z3.h, z8.h\n"
- ".inst 0x448843f0 // smlalb z16.s, p4/M, z31.h, z8.h\n"
- ".inst 0x04a27631 // sqrdmulh z17.s, z17.s, z2.s\n"
- ".inst 0x448a43f6 // smlalb z22.s, p4/M, z31.h, z10.h\n"
- ".inst 0x44884479 // smlalt z25.s, p4/M, z3.h, z8.h\n"
- ".inst 0x04a27610 // sqrdmulh z16.s, z16.s, z2.s\n"
- ".inst 0x448847f7 // smlalt z23.s, p4/M, z31.h, z8.h\n"
- ".inst 0x448a47fb // smlalt z27.s, p4/M, z31.h, z10.h\n"
- ".inst 0x04a276d6 // sqrdmulh z22.s, z22.s, z2.s\n"
- "asr z29.s, z29.s, #0x1f\n"
- "and z18.d, z17.d, z21.d\n"
- ".inst 0x04af7739 // sqrdmulh z25.s, z25.s, z15.s\n"
- "and z20.d, z16.d, z21.d\n"
- ".inst 0x04af76f7 // sqrdmulh z23.s, z23.s, z15.s\n"
- "and z19.d, z22.d, z21.d\n"
- ".inst 0x04af777b // sqrdmulh z27.s, z27.s, z15.s\n"
- "sqadd z9.s, z9.s, z29.s\n"
- ".inst 0x44829029 // srshl z9.s, p4/M, z9.s, z1.s\n"
- "asr z18.s, z18.s, #0x1f\n"
- "and z7.d, z25.d, z1.d\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z6.d, z23.d, z1.d\n"
- "asr z19.s, z19.s, #0x1f\n"
- "and z2.d, z27.d, z1.d\n"
- "sqadd z17.s, z17.s, z18.s\n"
- "asr z7.s, z7.s, #0x1f\n"
- ".inst 0x448292b1 // srshl z17.s, p4/M, z17.s, z21.s\n"
- "sqadd z16.s, z16.s, z20.s\n"
+ "whilelt p3.h, x17, x15\n"
+ ".inst 0x44864183 // smlalb z3.s, p4/M, z12.h, z6.h\n"
+ ".inst 0x44864590 // smlalt z16.s, p4/M, z12.h, z6.h\n"
+ ".inst 0x449e4088 // smlalb z8.s, p4/M, z4.h, z30.h\n"
+ ".inst 0x44914020 // smlalb z0.s, p4/M, z1.h, z17.h\n"
+ ".inst 0x449c4033 // smlalb z19.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449e4495 // smlalt z21.s, p4/M, z4.h, z30.h\n"
+ ".inst 0x4491443d // smlalt z29.s, p4/M, z1.h, z17.h\n"
+ ".inst 0x449c4429 // smlalt z9.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c40e3 // smlalb z3.s, p4/M, z7.h, z28.h\n"
+ ".inst 0x449c44f0 // smlalt z16.s, p4/M, z7.h, z28.h\n"
+ ".inst 0x448542c8 // smlalb z8.s, p4/M, z22.h, z5.h\n"
+ ".inst 0x448543e0 // smlalb z0.s, p4/M, z31.h, z5.h\n"
+ ".inst 0x449143f3 // smlalb z19.s, p4/M, z31.h, z17.h\n"
+ ".inst 0x448546d5 // smlalt z21.s, p4/M, z22.h, z5.h\n"
+ ".inst 0x448547fd // smlalt z29.s, p4/M, z31.h, z5.h\n"
+ ".inst 0x449147e9 // smlalt z9.s, p4/M, z31.h, z17.h\n"
+ ".inst 0x04b47463 // sqrdmulh z3.s, z3.s, z20.s\n"
+ ".inst 0x04b87610 // sqrdmulh z16.s, z16.s, z24.s\n"
+ ".inst 0x04b47508 // sqrdmulh z8.s, z8.s, z20.s\n"
+ ".inst 0x04b47400 // sqrdmulh z0.s, z0.s, z20.s\n"
+ "and z4.d, z3.d, z11.d\n"
+ ".inst 0x04b47673 // sqrdmulh z19.s, z19.s, z20.s\n"
+ ".inst 0x04b876b5 // sqrdmulh z21.s, z21.s, z24.s\n"
+ "and z13.d, z16.d, z27.d\n"
+ "and z6.d, z8.d, z11.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "and z7.d, z0.d, z11.d\n"
+ ".inst 0x04b877bd // sqrdmulh z29.s, z29.s, z24.s\n"
+ ".inst 0x04b87529 // sqrdmulh z9.s, z9.s, z24.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- ".inst 0x448292b0 // srshl z16.s, p4/M, z16.s, z21.s\n"
- "sqadd z22.s, z22.s, z19.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- ".inst 0x448292b6 // srshl z22.s, p4/M, z22.s, z21.s\n"
- "sqadd z25.s, z25.s, z7.s\n"
- "sqadd z23.s, z23.s, z6.s\n"
- ".inst 0x44829039 // srshl z25.s, p4/M, z25.s, z1.s\n"
- ".inst 0x44829037 // srshl z23.s, p4/M, z23.s, z1.s\n"
- "sqadd z27.s, z27.s, z2.s\n"
- ".inst 0x453040a5 // sqxtnb z5.h, z5.s\n"
- ".inst 0x4482903b // srshl z27.s, p4/M, z27.s, z1.s\n"
- ".inst 0x45304231 // sqxtnb z17.h, z17.s\n"
- ".inst 0x45304210 // sqxtnb z16.h, z16.s\n"
- ".inst 0x453042d6 // sqxtnb z22.h, z22.s\n"
- ".inst 0x45304525 // sqxtnt z5.h, z9.s\n"
- ".inst 0x45304731 // sqxtnt z17.h, z25.s\n"
- ".inst 0x453046f0 // sqxtnt z16.h, z23.s\n"
- ".inst 0x45304776 // sqxtnt z22.h, z27.s\n"
- "sqadd z5.h, z5.h, z24.h\n"
- "smax z5.h, p4/M, z5.h, z11.h\n"
- "smin z5.h, p4/M, z5.h, z26.h\n"
- "sqadd z17.h, z17.h, z24.h\n"
- "sqadd z16.h, z16.h, z24.h\n"
- "smax z17.h, p4/M, z17.h, z11.h\n"
- "smax z16.h, p4/M, z16.h, z11.h\n"
- "sqadd z22.h, z22.h, z24.h\n"
- "smax z22.h, p4/M, z22.h, z11.h\n"
- "smin z17.h, p4/M, z17.h, z26.h\n"
- "st1b { z5.h }, p0, [x13, x27]\n"
- "smin z16.h, p4/M, z16.h, z26.h\n"
- "smin z22.h, p4/M, z22.h, z26.h\n"
- "st1b { z17.h }, p0, [x12, x27]\n"
- "st1b { z16.h }, p0, [x11, x27]\n"
- "st1b { z22.h }, p0, [x10, x27]\n"
- "ld1b { z14.h }, p4/Z, [x14]\n"
- "ld1b { z21.h }, p4/Z, [x14, #1, MUL VL]\n"
- "inch x27\n"
- "ld1b { z1.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "sqadd z3.s, z3.s, z4.s\n"
+ "and z20.d, z19.d, z11.d\n"
+ "and z18.d, z21.d, z27.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z13.s\n"
+ "and z13.d, z29.d, z27.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "and z23.d, z9.d, z27.d\n"
+ ".inst 0x44829163 // srshl z3.s, p4/M, z3.s, z11.s\n"
+ "sqadd z8.s, z8.s, z6.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z0.s, z0.s, z7.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
+ ".inst 0x44829370 // srshl z16.s, p4/M, z16.s, z27.s\n"
+ "sqadd z19.s, z19.s, z20.s\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ ".inst 0x44829168 // srshl z8.s, p4/M, z8.s, z11.s\n"
+ "sqadd z21.s, z21.s, z18.s\n"
+ ".inst 0x45304063 // sqxtnb z3.h, z3.s\n"
+ ".inst 0x44829160 // srshl z0.s, p4/M, z0.s, z11.s\n"
+ "sqadd z29.s, z29.s, z13.s\n"
+ ".inst 0x44829173 // srshl z19.s, p4/M, z19.s, z11.s\n"
+ "sqadd z9.s, z9.s, z23.s\n"
+ ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
+ ".inst 0x44829375 // srshl z21.s, p4/M, z21.s, z27.s\n"
+ ".inst 0x45304000 // sqxtnb z0.h, z0.s\n"
+ ".inst 0x45304603 // sqxtnt z3.h, z16.s\n"
+ ".inst 0x4482937d // srshl z29.s, p4/M, z29.s, z27.s\n"
+ ".inst 0x44829369 // srshl z9.s, p4/M, z9.s, z27.s\n"
+ ".inst 0x45304273 // sqxtnb z19.h, z19.s\n"
+ ".inst 0x453046a8 // sqxtnt z8.h, z21.s\n"
+ ".inst 0x453047a0 // sqxtnt z0.h, z29.s\n"
+ ".inst 0x45304533 // sqxtnt z19.h, z9.s\n"
+ "sqadd z3.h, z3.h, z26.h\n"
+ "sqadd z8.h, z8.h, z26.h\n"
+ "sqadd z0.h, z0.h, z26.h\n"
+ "sqadd z19.h, z19.h, z26.h\n"
+ "smax z3.h, p4/M, z3.h, z2.h\n"
+ "smax z8.h, p4/M, z8.h, z2.h\n"
+ "smax z0.h, p4/M, z0.h, z2.h\n"
+ "smax z19.h, p4/M, z19.h, z2.h\n"
+ "smin z3.h, p4/M, z3.h, z14.h\n"
+ "smin z8.h, p4/M, z8.h, z14.h\n"
+ "smin z0.h, p4/M, z0.h, z14.h\n"
+ "smin z19.h, p4/M, z19.h, z14.h\n"
+ "st1b { z3.h }, p0, [x9, x12]\n"
+ "st1b { z8.h }, p0, [x28, x12]\n"
+ "st1b { z0.h }, p0, [x27, x12]\n"
+ "st1b { z19.h }, p0, [x26, x12]\n"
+ "inch x12\n"
+ "ld1b { z13.h }, p4/Z, [x14]\n"
+ "ld1b { z11.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1b { z18.h }, p4/Z, [x14, #2, MUL VL]\n"
"ld1b { z6.h }, p4/Z, [x14, #3, MUL VL]\n"
- ".inst 0x455e19ce // usublb z14.h, z14.b, z30.b\n"
- ".inst 0x455e1ab5 // usublb z21.h, z21.b, z30.b\n"
- "ld1b { z2.h }, p4/Z, [x14, #4, MUL VL]\n"
- "ld1b { z18.h }, p4/Z, [x14, #5, MUL VL]\n"
- ".inst 0x455e1821 // usublb z1.h, z1.b, z30.b\n"
- ".inst 0x455e18c6 // usublb z6.h, z6.b, z30.b\n"
- "ld1b { z7.h }, p4/Z, [x14, #6, MUL VL]\n"
- "ld1b { z10.h }, p4/Z, [x14, #7, MUL VL]\n"
+ "ld1b { z20.h }, p4/Z, [x14, #4, MUL VL]\n"
+ "ld1b { z30.h }, p4/Z, [x14, #5, MUL VL]\n"
+ "ld1b { z28.h }, p4/Z, [x14, #6, MUL VL]\n"
+ "ld1b { z17.h }, p4/Z, [x14, #7, MUL VL]\n"
"inch x14, ALL, MUL #8\n"
- ".inst 0x455e1842 // usublb z2.h, z2.b, z30.b\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
- "uzp1 z5.s, z17.s, z16.s\n"
- "uzp2 z9.s, z17.s, z16.s\n"
- "ld1b { z8.h }, p4/Z, [x14]\n"
- "ldp x24, x23, [x28, #0x0]\n"
+ ".inst 0x454f19ad // usublb z13.h, z13.b, z15.b\n"
+ "ld1w { z1.s }, p2/Z, [x21]\n"
+ "ld1w { z0.s }, p1/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
+ ".inst 0x454f196b // usublb z11.h, z11.b, z15.b\n"
+ ".inst 0x454f1a52 // usublb z18.h, z18.b, z15.b\n"
+ ".inst 0x454f18c6 // usublb z6.h, z6.b, z15.b\n"
+ "ld1b { z5.h }, p4/Z, [x14]\n"
+ "ldp x24, x23, [x13, #0x0]\n"
+ ".inst 0x454f1a94 // usublb z20.h, z20.b, z15.b\n"
+ ".inst 0x454f1bde // usublb z30.h, z30.b, z15.b\n"
+ "uzp1 z3.s, z1.s, z0.s\n"
+ "uzp2 z16.s, z1.s, z0.s\n"
"str x21, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x22, x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x20]\n"
- "mov z17.d, z5.d\n"
- "mov z25.d, z9.d\n"
- "ld1b { z0.h }, p3/Z, [x24, x16]\n"
- "ld1b { z29.h }, p3/Z, [x23, x16]\n"
- "mov z16.d, z5.d\n"
- "mov z23.d, z9.d\n"
- "ld1b { z4.h }, p3/Z, [x22, x16]\n"
- "ld1b { z13.h }, p3/Z, [x21, x16]\n"
- "mov z22.d, z5.d\n"
- "mov z27.d, z9.d\n"
- "ld1b { z20.h }, p3/Z, [x20, x16]\n"
- ".inst 0x455e1a52 // usublb z18.h, z18.b, z30.b\n"
- ".inst 0x455e18e7 // usublb z7.h, z7.b, z30.b\n"
- ".inst 0x455e194a // usublb z10.h, z10.b, z30.b\n"
- ".inst 0x455e1908 // usublb z8.h, z8.b, z30.b\n"
- ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
- ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
- ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
- ".inst 0x454c1a94 // usublb z20.h, z20.b, z12.b\n"
+ "ldp x22, x21, [x13, #0x10]\n"
+ ".inst 0x454f1b9c // usublb z28.h, z28.b, z15.b\n"
+ ".inst 0x454f1a31 // usublb z17.h, z17.b, z15.b\n"
+ ".inst 0x454f18a5 // usublb z5.h, z5.b, z15.b\n"
+ "ldr x20, [x13, #0x20]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x17]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1b { z4.h }, p3/Z, [x22, x17]\n"
+ "mov z8.d, z3.d\n"
+ "mov z21.d, z16.d\n"
+ "ld1b { z1.h }, p3/Z, [x21, x17]\n"
+ "mov z0.d, z3.d\n"
+ "mov z29.d, z16.d\n"
+ "ld1b { z27.h }, p3/Z, [x20, x17]\n"
+ "mov z19.d, z3.d\n"
+ "mov z9.d, z16.d\n"
+ ".inst 0x454a18e7 // usublb z7.h, z7.b, z10.b\n"
+ ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
+ ".inst 0x454a1884 // usublb z4.h, z4.b, z10.b\n"
+ ".inst 0x454a1821 // usublb z1.h, z1.b, z10.b\n"
+ ".inst 0x454a1b7b // usublb z27.h, z27.b, z10.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index 1ea2fcbfbd..d439d05a60 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[25];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -100,348 +100,348 @@ void sve_u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x7, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x8, #0x0\n"
+ "ldr x27, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x7\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
- "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z26.b }, p4/Z, [x21]\n"
- "ld1rb { z13.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z19.h }, p4/Z, [x22]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
+ "ldr x26, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x16, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x15, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x14, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x8\n"
+ "add x20, x27, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x27, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x27, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z17.b }, p4/Z, [x20]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x27, %[offsetof_Requantize32_minval]\n"
+ "add x20, x27, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z12.b }, p4/Z, [x23]\n"
+ "ld1rh { z25.h }, p4/Z, [x22]\n"
+ "ld1rh { z14.h }, p4/Z, [x21]\n"
"ld1rh { z9.h }, p4/Z, [x20]\n"
- "ldp x16, x15, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x7, x8\n"
- "ldp x14, x13, [x24, #0x10]\n"
- "whilelt p2.s, x7, x8\n"
- "whilelt p1.s, x23, x8\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1b { z25.h }, p4/Z, [x17]\n"
- "ld1b { z30.h }, p4/Z, [x17, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
- "ld1b { z14.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1b { z4.h }, p4/Z, [x17, #3, MUL VL]\n"
- ".inst 0x454d1b39 // usublb z25.h, z25.b, z13.b\n"
- ".inst 0x454d1bde // usublb z30.h, z30.b, z13.b\n"
- "ld1b { z10.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1b { z3.h }, p4/Z, [x17, #5, MUL VL]\n"
- ".inst 0x454d19ce // usublb z14.h, z14.b, z13.b\n"
- ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
- "ld1b { z23.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1b { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
- ".inst 0x454d194a // usublb z10.h, z10.b, z13.b\n"
- "ld1w { z17.s }, p2/Z, [x12]\n"
- "ld1w { z16.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z8.s, z17.s, z16.s\n"
- "uzp2 z24.s, z17.s, z16.s\n"
- "ld1b { z2.h }, p4/Z, [x17]\n"
- "ldp x27, x26, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "mov z18.d, z8.d\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z0.d, z24.d\n"
- "mov z15.d, z8.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z21.h }, p3/Z, [x27, x7]\n"
- "mov z1.d, z24.d\n"
- "mov z5.d, z8.d\n"
- "ld1b { z22.h }, p3/Z, [x26, x7]\n"
- "ld1b { z11.h }, p3/Z, [x25, x7]\n"
- "mov z6.d, z24.d\n"
- ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
- "ld1b { z20.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x454d1af7 // usublb z23.h, z23.b, z13.b\n"
- ".inst 0x454d18e7 // usublb z7.h, z7.b, z13.b\n"
- "ld1b { z28.h }, p3/Z, [x22, x7]\n"
- "ld1b { z16.h }, p3/Z, [x21, x7]\n"
- ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
- ".inst 0x455a1ab5 // usublb z21.h, z21.b, z26.b\n"
- "ld1b { z31.h }, p3/Z, [x20, x7]\n"
- "ldr x9, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x455a1ad6 // usublb z22.h, z22.b, z26.b\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- "ldr x28, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x455a1b7b // usublb z27.h, z27.b, z26.b\n"
- ".inst 0x455a1b9c // usublb z28.h, z28.b, z26.b\n"
- ".inst 0x455a1a10 // usublb z16.h, z16.b, z26.b\n"
- ".inst 0x455a1bff // usublb z31.h, z31.b, z26.b\n"
+ "incw x24\n"
+ "whilelt p3.h, x8, x17\n"
+ "ldp x11, x10, [x26, #0x0]\n"
+ "ldp x9, x28, [x26, #0x10]\n"
+ "whilelt p2.s, x8, x17\n"
+ "whilelt p1.s, x24, x17\n"
+ "ld1b { z28.h }, p4/Z, [x16]\n"
+ "ld1b { z20.h }, p4/Z, [x16, #1, MUL VL]\n"
+ "ld1b { z13.h }, p4/Z, [x16, #2, MUL VL]\n"
+ "ld1b { z18.h }, p4/Z, [x16, #3, MUL VL]\n"
+ "ld1b { z6.h }, p4/Z, [x16, #4, MUL VL]\n"
+ "ld1b { z2.h }, p4/Z, [x16, #5, MUL VL]\n"
+ "ld1b { z26.h }, p4/Z, [x16, #6, MUL VL]\n"
+ "ld1b { z21.h }, p4/Z, [x16, #7, MUL VL]\n"
+ "inch x16, ALL, MUL #8\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ "ld1w { z11.s }, p2/Z, [x25]\n"
+ "ld1w { z4.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ ".inst 0x454c1a94 // usublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
+ ".inst 0x454c1a52 // usublb z18.h, z18.b, z12.b\n"
+ "ld1b { z15.h }, p4/Z, [x16]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ ".inst 0x454c18c6 // usublb z6.h, z6.b, z12.b\n"
+ ".inst 0x454c1842 // usublb z2.h, z2.b, z12.b\n"
+ "uzp1 z5.s, z11.s, z4.s\n"
+ "uzp2 z11.s, z11.s, z4.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ ".inst 0x454c1b5a // usublb z26.h, z26.b, z12.b\n"
+ ".inst 0x454c1ab5 // usublb z21.h, z21.b, z12.b\n"
+ ".inst 0x454c19ef // usublb z15.h, z15.b, z12.b\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "mov z30.d, z5.d\n"
+ "mov z16.d, z11.d\n"
+ "mov z4.d, z5.d\n"
+ "mov z8.d, z11.d\n"
+ "mov z31.d, z5.d\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "mov z10.d, z11.d\n"
+ "ld1b { z3.h }, p3/Z, [x27, x8]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x8]\n"
+ "ld1b { z23.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z0.h }, p3/Z, [x24, x8]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x8]\n"
+ "ld1b { z22.h }, p3/Z, [x22, x8]\n"
+ "ld1b { z27.h }, p3/Z, [x21, x8]\n"
+ "ld1b { z19.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x45511a73 // usublb z19.h, z19.b, z17.b\n"
"1:" // Loop
- ".inst 0x448242a8 // smlalb z8.s, p4/M, z21.h, z2.h\n"
- "ldr x21, [x11, #0x58]\n"
- "ldr x20, [x11, #0x78]\n"
- ".inst 0x448246b8 // smlalt z24.s, p4/M, z21.h, z2.h\n"
- ".inst 0x449942c8 // smlalb z8.s, p4/M, z22.h, z25.h\n"
- "ld1b { z17.h }, p3/Z, [x21, x7]\n"
- "ld1b { z29.h }, p3/Z, [x20, x7]\n"
- ".inst 0x449742b2 // smlalb z18.s, p4/M, z21.h, z23.h\n"
- "ldr x21, [x11, #0x60]\n"
- "ldr x20, [x11, #0x80]\n"
- ".inst 0x448e42af // smlalb z15.s, p4/M, z21.h, z14.h\n"
- ".inst 0x449942a5 // smlalb z5.s, p4/M, z21.h, z25.h\n"
- ".inst 0x449946d8 // smlalt z24.s, p4/M, z22.h, z25.h\n"
- ".inst 0x455a1a31 // usublb z17.h, z17.b, z26.b\n"
- ".inst 0x449e4168 // smlalb z8.s, p4/M, z11.h, z30.h\n"
- "ld1b { z22.h }, p3/Z, [x21, x7]\n"
- ".inst 0x455a1bbd // usublb z29.h, z29.b, z26.b\n"
- ".inst 0x449746a0 // smlalt z0.s, p4/M, z21.h, z23.h\n"
- ".inst 0x448e46a1 // smlalt z1.s, p4/M, z21.h, z14.h\n"
- "ldr x21, [x11, #0x68]\n"
- ".inst 0x449946a6 // smlalt z6.s, p4/M, z21.h, z25.h\n"
- "ld1b { z21.h }, p3/Z, [x20, x7]\n"
- "ldr x20, [x11, #0x88]\n"
- ".inst 0x449e4292 // smlalb z18.s, p4/M, z20.h, z30.h\n"
- ".inst 0x4484422f // smlalb z15.s, p4/M, z17.h, z4.h\n"
- ".inst 0x448a43a5 // smlalb z5.s, p4/M, z29.h, z10.h\n"
- ".inst 0x455a1ad6 // usublb z22.h, z22.b, z26.b\n"
- "ldr x22, [x11, #0x40]\n"
- ".inst 0x449e4578 // smlalt z24.s, p4/M, z11.h, z30.h\n"
- ".inst 0x455a1ab5 // usublb z21.h, z21.b, z26.b\n"
- ".inst 0x44844388 // smlalb z8.s, p4/M, z28.h, z4.h\n"
- "ld1b { z11.h }, p3/Z, [x21, x7]\n"
- ".inst 0x449e4680 // smlalt z0.s, p4/M, z20.h, z30.h\n"
- "ld1b { z20.h }, p3/Z, [x20, x7]\n"
- ".inst 0x44844621 // smlalt z1.s, p4/M, z17.h, z4.h\n"
- "ldr x21, [x11, #0x70]\n"
- ".inst 0x448a47a6 // smlalt z6.s, p4/M, z29.h, z10.h\n"
- "ldr x20, [x11, #0x98]\n"
- ".inst 0x448e4372 // smlalb z18.s, p4/M, z27.h, z14.h\n"
- "ldr x23, [x11, #0x50]\n"
- ".inst 0x449942cf // smlalb z15.s, p4/M, z22.h, z25.h\n"
- ".inst 0x449e42a5 // smlalb z5.s, p4/M, z21.h, z30.h\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- "ld1b { z17.h }, p3/Z, [x22, x7]\n"
- ".inst 0x44844798 // smlalt z24.s, p4/M, z28.h, z4.h\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x448a4208 // smlalb z8.s, p4/M, z16.h, z10.h\n"
- "ld1b { z29.h }, p3/Z, [x21, x7]\n"
- "ld1b { z28.h }, p3/Z, [x20, x7]\n"
- ".inst 0x448e4760 // smlalt z0.s, p4/M, z27.h, z14.h\n"
- "ldr x22, [x11, #0x48]\n"
- ".inst 0x449946c1 // smlalt z1.s, p4/M, z22.h, z25.h\n"
- ".inst 0x449e46a6 // smlalt z6.s, p4/M, z21.h, z30.h\n"
- "ldr x21, [x11, #0x90]\n"
- "ldr x20, [x11, #0xa8]\n"
- ".inst 0x449943f2 // smlalb z18.s, p4/M, z31.h, z25.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x448a416f // smlalb z15.s, p4/M, z11.h, z10.h\n"
- ".inst 0x44834285 // smlalb z5.s, p4/M, z20.h, z3.h\n"
- ".inst 0x455a1a31 // usublb z17.h, z17.b, z26.b\n"
- ".inst 0x448a4618 // smlalt z24.s, p4/M, z16.h, z10.h\n"
- ".inst 0x455a1bbd // usublb z29.h, z29.b, z26.b\n"
- ".inst 0x448e43e8 // smlalb z8.s, p4/M, z31.h, z14.h\n"
- "ld1b { z16.h }, p3/Z, [x22, x7]\n"
- ".inst 0x455a1b9c // usublb z28.h, z28.b, z26.b\n"
- ".inst 0x449947e0 // smlalt z0.s, p4/M, z31.h, z25.h\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x448a4561 // smlalt z1.s, p4/M, z11.h, z10.h\n"
- "ld1b { z11.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455a1b7b // usublb z27.h, z27.b, z26.b\n"
- ".inst 0x44834686 // smlalt z6.s, p4/M, z20.h, z3.h\n"
- "ldr x21, [x11, #0xa0]\n"
- "ldr x20, [x11, #0xb0]\n"
- ".inst 0x448a4232 // smlalb z18.s, p4/M, z17.h, z10.h\n"
- ".inst 0x449e43af // smlalb z15.s, p4/M, z29.h, z30.h\n"
- ".inst 0x455a1a10 // usublb z16.h, z16.b, z26.b\n"
- ".inst 0x448e4385 // smlalb z5.s, p4/M, z28.h, z14.h\n"
- ".inst 0x448e47f8 // smlalt z24.s, p4/M, z31.h, z14.h\n"
- ".inst 0x455a1b39 // usublb z25.h, z25.b, z26.b\n"
- "ld1b { z20.h }, p3/Z, [x21, x7]\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- ".inst 0x44834368 // smlalb z8.s, p4/M, z27.h, z3.h\n"
- "ld1b { z31.h }, p3/Z, [x20, x7]\n"
- ".inst 0x448a4620 // smlalt z0.s, p4/M, z17.h, z10.h\n"
- ".inst 0x449e47a1 // smlalt z1.s, p4/M, z29.h, z30.h\n"
- ".inst 0x448e4786 // smlalt z6.s, p4/M, z28.h, z14.h\n"
- "ldr x20, [x11, #0xb8]\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x44834212 // smlalb z18.s, p4/M, z16.h, z3.h\n"
- ".inst 0x4497432f // smlalb z15.s, p4/M, z25.h, z23.h\n"
- ".inst 0x455a1bff // usublb z31.h, z31.b, z26.b\n"
- "ld1b { z30.h }, p3/Z, [x20, x7]\n"
- ".inst 0x44844165 // smlalb z5.s, p4/M, z11.h, z4.h\n"
- ".inst 0x44834778 // smlalt z24.s, p4/M, z27.h, z3.h\n"
- "ldr x20, [x11, #0xc0]\n"
- "ld1w { z17.s }, p2/Z, [x9]\n"
- ".inst 0x449742c8 // smlalb z8.s, p4/M, z22.h, z23.h\n"
- ".inst 0x44834600 // smlalt z0.s, p4/M, z16.h, z3.h\n"
- "ld1w { z14.s }, p1/Z, [x9, #1, MUL VL]\n"
- ".inst 0x455a1bde // usublb z30.h, z30.b, z26.b\n"
- ".inst 0x44974721 // smlalt z1.s, p4/M, z25.h, z23.h\n"
- ".inst 0x44844566 // smlalt z6.s, p4/M, z11.h, z4.h\n"
- "ld1b { z25.h }, p3/Z, [x20, x7]\n"
- "uzp1 z10.s, z17.s, z14.s\n"
- ".inst 0x44844372 // smlalb z18.s, p4/M, z27.h, z4.h\n"
- ".inst 0x4487428f // smlalb z15.s, p4/M, z20.h, z7.h\n"
- "uzp2 z14.s, z17.s, z14.s\n"
- "ld1w { z17.s }, p2/Z, [x28]\n"
- ".inst 0x448743e5 // smlalb z5.s, p4/M, z31.h, z7.h\n"
- ".inst 0x449746d8 // smlalt z24.s, p4/M, z22.h, z23.h\n"
- "ld1w { z16.s }, p1/Z, [x28, #1, MUL VL]\n"
- ".inst 0x455a1b39 // usublb z25.h, z25.b, z26.b\n"
- ".inst 0x448743a8 // smlalb z8.s, p4/M, z29.h, z7.h\n"
- ".inst 0x44844760 // smlalt z0.s, p4/M, z27.h, z4.h\n"
- "uzp1 z4.s, z17.s, z16.s\n"
- "inch x7\n"
- ".inst 0x44874681 // smlalt z1.s, p4/M, z20.h, z7.h\n"
- ".inst 0x448747e6 // smlalt z6.s, p4/M, z31.h, z7.h\n"
- ".inst 0x04aa7508 // sqrdmulh z8.s, z8.s, z10.s\n"
- "whilelt p0.h, x10, x8\n"
- ".inst 0x448742b2 // smlalb z18.s, p4/M, z21.h, z7.h\n"
- ".inst 0x4483416f // smlalb z15.s, p4/M, z11.h, z3.h\n"
- "uzp2 z22.s, z17.s, z16.s\n"
- "mov x20, x7\n"
- ".inst 0x449743c5 // smlalb z5.s, p4/M, z30.h, z23.h\n"
- ".inst 0x448747b8 // smlalt z24.s, p4/M, z29.h, z7.h\n"
- "and z17.d, z8.d, z4.d\n"
- "inch x17\n"
- ".inst 0x448746a0 // smlalt z0.s, p4/M, z21.h, z7.h\n"
- ".inst 0x44834561 // smlalt z1.s, p4/M, z11.h, z3.h\n"
- ".inst 0x04ae7718 // sqrdmulh z24.s, z24.s, z14.s\n"
- "incw x20\n"
- ".inst 0x449747c6 // smlalt z6.s, p4/M, z30.h, z23.h\n"
- ".inst 0x44824392 // smlalb z18.s, p4/M, z28.h, z2.h\n"
- "asr z17.s, z17.s, #0x1f\n"
- "whilelt p2.s, x7, x8\n"
- ".inst 0x448243cf // smlalb z15.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44824325 // smlalb z5.s, p4/M, z25.h, z2.h\n"
- "and z16.d, z24.d, z22.d\n"
- "whilelt p1.s, x20, x8\n"
- ".inst 0x44824780 // smlalt z0.s, p4/M, z28.h, z2.h\n"
- ".inst 0x448247c1 // smlalt z1.s, p4/M, z30.h, z2.h\n"
- ".inst 0x04aa7652 // sqrdmulh z18.s, z18.s, z10.s\n"
+ ".inst 0x448f4065 // smlalb z5.s, p4/M, z3.h, z15.h\n"
+ "ldr x25, [x15, #0x58]\n"
+ "ldr x24, [x15, #0x78]\n"
+ ".inst 0x448f446b // smlalt z11.s, p4/M, z3.h, z15.h\n"
+ "ldr x23, [x15, #0x60]\n"
+ "ldr x22, [x15, #0x80]\n"
+ ".inst 0x449a407e // smlalb z30.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x448d4064 // smlalb z4.s, p4/M, z3.h, z13.h\n"
+ ".inst 0x449c407f // smlalb z31.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449a4470 // smlalt z16.s, p4/M, z3.h, z26.h\n"
+ "ldr x21, [x15, #0x68]\n"
+ "ldr x20, [x15, #0x88]\n"
+ "ld1b { z1.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448d4468 // smlalt z8.s, p4/M, z3.h, z13.h\n"
+ ".inst 0x449c446a // smlalt z10.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c43a5 // smlalb z5.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x449c47ab // smlalt z11.s, p4/M, z29.h, z28.h\n"
+ "ld1b { z29.h }, p3/Z, [x23, x8]\n"
+ "ld1b { z3.h }, p3/Z, [x22, x8]\n"
+ ".inst 0x4494401e // smlalb z30.s, p4/M, z0.h, z20.h\n"
+ "ldr x25, [x15, #0x40]\n"
+ "ldr x24, [x15, #0x70]\n"
+ "whilelt p0.h, x14, x17\n"
+ ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
+ ".inst 0x455118e7 // usublb z7.h, z7.b, z17.b\n"
+ ".inst 0x44944410 // smlalt z16.s, p4/M, z0.h, z20.h\n"
+ "ld1b { z0.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
+ "ldr x23, [x15, #0x98]\n"
+ "ldr x22, [x15, #0x50]\n"
+ ".inst 0x449442e5 // smlalb z5.s, p4/M, z23.h, z20.h\n"
+ ".inst 0x449446eb // smlalt z11.s, p4/M, z23.h, z20.h\n"
+ "ld1b { z23.h }, p3/Z, [x20, x8]\n"
+ "ldr x21, [x15, #0x48]\n"
+ ".inst 0x44924024 // smlalb z4.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448640ff // smlalb z31.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ "ldr x20, [x15, #0x90]\n"
+ ".inst 0x44924428 // smlalt z8.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448644ea // smlalt z10.s, p4/M, z7.h, z6.h\n"
+ "ld1b { z1.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448d431e // smlalb z30.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x448d4710 // smlalt z16.s, p4/M, z24.h, z13.h\n"
+ "ld1b { z24.h }, p3/Z, [x23, x8]\n"
+ ".inst 0x449242c5 // smlalb z5.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x449246cb // smlalt z11.s, p4/M, z22.h, z18.h\n"
+ "ldr x24, [x15, #0xa8]\n"
+ "ld1b { z22.h }, p3/Z, [x22, x8]\n"
+ ".inst 0x449c43a4 // smlalb z4.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x4494407f // smlalb z31.s, p4/M, z3.h, z20.h\n"
+ ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
+ "ldr x23, [x15, #0xa0]\n"
+ ".inst 0x449c47a8 // smlalt z8.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x4494446a // smlalt z10.s, p4/M, z3.h, z20.h\n"
+ ".inst 0x455118e7 // usublb z7.h, z7.b, z17.b\n"
+ "ldr x22, [x15, #0xb0]\n"
+ ".inst 0x449c427e // smlalb z30.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x449c4670 // smlalt z16.s, p4/M, z19.h, z28.h\n"
+ "ld1b { z28.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x44864365 // smlalb z5.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x4486476b // smlalt z11.s, p4/M, z27.h, z6.h\n"
+ "ld1b { z27.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
+ ".inst 0x44864004 // smlalb z4.s, p4/M, z0.h, z6.h\n"
+ ".inst 0x448242ff // smlalb z31.s, p4/M, z23.h, z2.h\n"
+ "ldr x21, [x15, #0xb8]\n"
+ "ldr x20, [x15, #0xc0]\n"
+ ".inst 0x44864408 // smlalt z8.s, p4/M, z0.h, z6.h\n"
+ "ld1b { z0.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448246ea // smlalt z10.s, p4/M, z23.h, z2.h\n"
+ ".inst 0x45511b9c // usublb z28.h, z28.b, z17.b\n"
+ ".inst 0x4486403e // smlalb z30.s, p4/M, z1.h, z6.h\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ "ld1b { z23.h }, p3/Z, [x23, x8]\n"
+ ".inst 0x44864430 // smlalt z16.s, p4/M, z1.h, z6.h\n"
+ ".inst 0x448d4265 // smlalb z5.s, p4/M, z19.h, z13.h\n"
+ ".inst 0x448d466b // smlalt z11.s, p4/M, z19.h, z13.h\n"
+ "ld1b { z6.h }, p3/Z, [x22, x8]\n"
+ "ld1b { z1.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x449440e4 // smlalb z4.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x448d431f // smlalb z31.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ "ld1w { z19.s }, p2/Z, [x13]\n"
+ ".inst 0x449444e8 // smlalt z8.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x448d470a // smlalt z10.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ "ld1w { z20.s }, p1/Z, [x13, #1, MUL VL]\n"
+ ".inst 0x4482439e // smlalb z30.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x455118c6 // usublb z6.h, z6.b, z17.b\n"
+ ".inst 0x44824790 // smlalt z16.s, p4/M, z28.h, z2.h\n"
+ "ld1b { z13.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x448242c5 // smlalb z5.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x448246cb // smlalt z11.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
+ "inch x8\n"
+ ".inst 0x449a4364 // smlalb z4.s, p4/M, z27.h, z26.h\n"
+ ".inst 0x4492401f // smlalb z31.s, p4/M, z0.h, z18.h\n"
+ "uzp1 z28.s, z19.s, z20.s\n"
+ "inch x16\n"
+ ".inst 0x449a4768 // smlalt z8.s, p4/M, z27.h, z26.h\n"
+ ".inst 0x4492440a // smlalt z10.s, p4/M, z0.h, z18.h\n"
+ "uzp2 z20.s, z19.s, z20.s\n"
+ "ld1w { z27.s }, p2/Z, [x12]\n"
+ ".inst 0x449242de // smlalb z30.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x449246d0 // smlalt z16.s, p4/M, z22.h, z18.h\n"
+ "ld1w { z19.s }, p1/Z, [x12, #1, MUL VL]\n"
+ ".inst 0x455119ad // usublb z13.h, z13.b, z17.b\n"
+ ".inst 0x449a43a5 // smlalb z5.s, p4/M, z29.h, z26.h\n"
+ ".inst 0x449a47ab // smlalt z11.s, p4/M, z29.h, z26.h\n"
+ "mov x21, x8\n"
+ "whilelt p2.s, x8, x17\n"
+ ".inst 0x449542e4 // smlalb z4.s, p4/M, z23.h, z21.h\n"
+ ".inst 0x449540df // smlalb z31.s, p4/M, z6.h, z21.h\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44824726 // smlalt z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x04aa75ef // sqrdmulh z15.s, z15.s, z10.s\n"
- "whilelt p3.h, x7, x8\n"
- "addvl x9, x9, #2\n"
- ".inst 0x04aa74a5 // sqrdmulh z5.s, z5.s, z10.s\n"
- "sqadd z8.s, z8.s, z17.s\n"
- ".inst 0x44829088 // srshl z8.s, p4/M, z8.s, z4.s\n"
- "addvl x28, x28, #2\n"
- "asr z16.s, z16.s, #0x1f\n"
- "and z21.d, z18.d, z4.d\n"
- ".inst 0x04ae7400 // sqrdmulh z0.s, z0.s, z14.s\n"
- "and z20.d, z15.d, z4.d\n"
- ".inst 0x04ae7421 // sqrdmulh z1.s, z1.s, z14.s\n"
- "and z28.d, z5.d, z4.d\n"
- ".inst 0x04ae74c6 // sqrdmulh z6.s, z6.s, z14.s\n"
- "sqadd z24.s, z24.s, z16.s\n"
- ".inst 0x448292d8 // srshl z24.s, p4/M, z24.s, z22.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z25.d, z0.d, z22.d\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z17.d, z1.d, z22.d\n"
- "asr z28.s, z28.s, #0x1f\n"
- "and z16.d, z6.d, z22.d\n"
- "sqadd z18.s, z18.s, z21.s\n"
- "asr z25.s, z25.s, #0x1f\n"
- ".inst 0x44829092 // srshl z18.s, p4/M, z18.s, z4.s\n"
- "sqadd z15.s, z15.s, z20.s\n"
- "asr z17.s, z17.s, #0x1f\n"
- ".inst 0x4482908f // srshl z15.s, p4/M, z15.s, z4.s\n"
- "sqadd z5.s, z5.s, z28.s\n"
- "asr z16.s, z16.s, #0x1f\n"
- ".inst 0x44829085 // srshl z5.s, p4/M, z5.s, z4.s\n"
- "sqadd z0.s, z0.s, z25.s\n"
- "sqadd z1.s, z1.s, z17.s\n"
- ".inst 0x448292c0 // srshl z0.s, p4/M, z0.s, z22.s\n"
- ".inst 0x448292c1 // srshl z1.s, p4/M, z1.s, z22.s\n"
- "sqadd z6.s, z6.s, z16.s\n"
- ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
- ".inst 0x448292c6 // srshl z6.s, p4/M, z6.s, z22.s\n"
- ".inst 0x45304252 // sqxtnb z18.h, z18.s\n"
- ".inst 0x453041ef // sqxtnb z15.h, z15.s\n"
+ "addvl x13, x13, #2\n"
+ ".inst 0x449546e8 // smlalt z8.s, p4/M, z23.h, z21.h\n"
+ ".inst 0x449544ca // smlalt z10.s, p4/M, z6.h, z21.h\n"
+ "uzp1 z23.s, z27.s, z19.s\n"
+ "addvl x12, x12, #2\n"
+ ".inst 0x4495407e // smlalb z30.s, p4/M, z3.h, z21.h\n"
+ ".inst 0x44954470 // smlalt z16.s, p4/M, z3.h, z21.h\n"
+ "uzp2 z6.s, z27.s, z19.s\n"
+ "incw x21\n"
+ ".inst 0x449540e5 // smlalb z5.s, p4/M, z7.h, z21.h\n"
+ ".inst 0x449544eb // smlalt z11.s, p4/M, z7.h, z21.h\n"
+ ".inst 0x44824004 // smlalb z4.s, p4/M, z0.h, z2.h\n"
+ ".inst 0x449a403f // smlalb z31.s, p4/M, z1.h, z26.h\n"
+ ".inst 0x44824408 // smlalt z8.s, p4/M, z0.h, z2.h\n"
+ ".inst 0x449a442a // smlalt z10.s, p4/M, z1.h, z26.h\n"
+ "whilelt p1.s, x21, x17\n"
+ "whilelt p3.h, x8, x17\n"
+ ".inst 0x448f431e // smlalb z30.s, p4/M, z24.h, z15.h\n"
+ ".inst 0x448f4710 // smlalt z16.s, p4/M, z24.h, z15.h\n"
+ ".inst 0x04bc74a5 // sqrdmulh z5.s, z5.s, z28.s\n"
+ ".inst 0x04b4756b // sqrdmulh z11.s, z11.s, z20.s\n"
+ ".inst 0x448f4024 // smlalb z4.s, p4/M, z1.h, z15.h\n"
+ ".inst 0x448f41bf // smlalb z31.s, p4/M, z13.h, z15.h\n"
+ "and z24.d, z5.d, z23.d\n"
+ ".inst 0x448f4428 // smlalt z8.s, p4/M, z1.h, z15.h\n"
+ ".inst 0x448f45aa // smlalt z10.s, p4/M, z13.h, z15.h\n"
+ "and z19.d, z11.d, z6.d\n"
+ ".inst 0x04bc77de // sqrdmulh z30.s, z30.s, z28.s\n"
+ ".inst 0x04b47610 // sqrdmulh z16.s, z16.s, z20.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ ".inst 0x04bc7484 // sqrdmulh z4.s, z4.s, z28.s\n"
+ ".inst 0x04bc77ff // sqrdmulh z31.s, z31.s, z28.s\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z7.d, z30.d, z23.d\n"
+ "sqadd z5.s, z5.s, z24.s\n"
+ ".inst 0x04b47508 // sqrdmulh z8.s, z8.s, z20.s\n"
+ "and z15.d, z4.d, z23.d\n"
+ "and z24.d, z31.d, z23.d\n"
+ ".inst 0x04b4754a // sqrdmulh z10.s, z10.s, z20.s\n"
+ "sqadd z11.s, z11.s, z19.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "and z18.d, z16.d, z6.d\n"
+ ".inst 0x448292e5 // srshl z5.s, p4/M, z5.s, z23.s\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "and z13.d, z8.d, z6.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ "and z3.d, z10.d, z6.d\n"
+ ".inst 0x448290cb // srshl z11.s, p4/M, z11.s, z6.s\n"
+ "sqadd z30.s, z30.s, z7.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z4.s, z4.s, z15.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z24.s\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ ".inst 0x448292fe // srshl z30.s, p4/M, z30.s, z23.s\n"
+ "sqadd z16.s, z16.s, z18.s\n"
".inst 0x453040a5 // sqxtnb z5.h, z5.s\n"
- ".inst 0x45304708 // sqxtnt z8.h, z24.s\n"
- ".inst 0x45304412 // sqxtnt z18.h, z0.s\n"
- ".inst 0x4530442f // sqxtnt z15.h, z1.s\n"
- ".inst 0x453044c5 // sqxtnt z5.h, z6.s\n"
- "sqadd z8.h, z8.h, z19.h\n"
- "smax z8.h, p4/M, z8.h, z12.h\n"
- "smin z8.h, p4/M, z8.h, z9.h\n"
- "sqadd z18.h, z18.h, z19.h\n"
- "sqadd z15.h, z15.h, z19.h\n"
- "smax z18.h, p4/M, z18.h, z12.h\n"
- "smax z15.h, p4/M, z15.h, z12.h\n"
- "sqadd z5.h, z5.h, z19.h\n"
- "smax z5.h, p4/M, z5.h, z12.h\n"
- "smin z18.h, p4/M, z18.h, z9.h\n"
- "st1b { z8.h }, p0, [x16, x10]\n"
- "smin z15.h, p4/M, z15.h, z9.h\n"
+ ".inst 0x448292e4 // srshl z4.s, p4/M, z4.s, z23.s\n"
+ "sqadd z8.s, z8.s, z13.s\n"
+ ".inst 0x448292ff // srshl z31.s, p4/M, z31.s, z23.s\n"
+ "sqadd z10.s, z10.s, z3.s\n"
+ ".inst 0x453043de // sqxtnb z30.h, z30.s\n"
+ ".inst 0x448290d0 // srshl z16.s, p4/M, z16.s, z6.s\n"
+ ".inst 0x45304084 // sqxtnb z4.h, z4.s\n"
+ ".inst 0x45304565 // sqxtnt z5.h, z11.s\n"
+ ".inst 0x448290c8 // srshl z8.s, p4/M, z8.s, z6.s\n"
+ ".inst 0x448290ca // srshl z10.s, p4/M, z10.s, z6.s\n"
+ ".inst 0x453043ff // sqxtnb z31.h, z31.s\n"
+ ".inst 0x4530461e // sqxtnt z30.h, z16.s\n"
+ ".inst 0x45304504 // sqxtnt z4.h, z8.s\n"
+ ".inst 0x4530455f // sqxtnt z31.h, z10.s\n"
+ "sqadd z5.h, z5.h, z25.h\n"
+ "sqadd z30.h, z30.h, z25.h\n"
+ "sqadd z4.h, z4.h, z25.h\n"
+ "sqadd z31.h, z31.h, z25.h\n"
+ "smax z5.h, p4/M, z5.h, z14.h\n"
+ "smax z30.h, p4/M, z30.h, z14.h\n"
+ "smax z4.h, p4/M, z4.h, z14.h\n"
+ "smax z31.h, p4/M, z31.h, z14.h\n"
"smin z5.h, p4/M, z5.h, z9.h\n"
- "st1b { z18.h }, p0, [x15, x10]\n"
- "st1b { z15.h }, p0, [x14, x10]\n"
- "st1b { z5.h }, p0, [x13, x10]\n"
- "ld1b { z25.h }, p4/Z, [x17]\n"
- "ld1b { z30.h }, p4/Z, [x17, #1, MUL VL]\n"
- "inch x10\n"
- "ld1b { z14.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1b { z4.h }, p4/Z, [x17, #3, MUL VL]\n"
- ".inst 0x454d1b39 // usublb z25.h, z25.b, z13.b\n"
- ".inst 0x454d1bde // usublb z30.h, z30.b, z13.b\n"
- "ld1b { z10.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1b { z3.h }, p4/Z, [x17, #5, MUL VL]\n"
- ".inst 0x454d19ce // usublb z14.h, z14.b, z13.b\n"
- ".inst 0x454d1884 // usublb z4.h, z4.b, z13.b\n"
- "ld1b { z23.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1b { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
- ".inst 0x454d194a // usublb z10.h, z10.b, z13.b\n"
- "ld1w { z17.s }, p2/Z, [x20]\n"
- "ld1w { z16.s }, p1/Z, [x20, #1, MUL VL]\n"
- "uzp1 z8.s, z17.s, z16.s\n"
- "uzp2 z24.s, z17.s, z16.s\n"
- "ld1b { z2.h }, p4/Z, [x17]\n"
- "ldp x27, x26, [x11, #0x0]\n"
+ "smin z30.h, p4/M, z30.h, z9.h\n"
+ "smin z4.h, p4/M, z4.h, z9.h\n"
+ "smin z31.h, p4/M, z31.h, z9.h\n"
+ "st1b { z5.h }, p0, [x11, x14]\n"
+ "st1b { z30.h }, p0, [x10, x14]\n"
+ "st1b { z4.h }, p0, [x9, x14]\n"
+ "st1b { z31.h }, p0, [x28, x14]\n"
+ "inch x14\n"
+ "ld1b { z28.h }, p4/Z, [x16]\n"
+ "ld1b { z20.h }, p4/Z, [x16, #1, MUL VL]\n"
+ "ld1b { z13.h }, p4/Z, [x16, #2, MUL VL]\n"
+ "ld1b { z18.h }, p4/Z, [x16, #3, MUL VL]\n"
+ "ld1b { z6.h }, p4/Z, [x16, #4, MUL VL]\n"
+ "ld1b { z2.h }, p4/Z, [x16, #5, MUL VL]\n"
+ "ld1b { z26.h }, p4/Z, [x16, #6, MUL VL]\n"
+ "ld1b { z21.h }, p4/Z, [x16, #7, MUL VL]\n"
+ "inch x16, ALL, MUL #8\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ "ld1w { z10.s }, p2/Z, [x20]\n"
+ "ld1w { z1.s }, p1/Z, [x20, #1, MUL VL]\n"
"addvl x20, x20, #2\n"
+ ".inst 0x454c1a94 // usublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
+ ".inst 0x454c1a52 // usublb z18.h, z18.b, z12.b\n"
+ "ld1b { z15.h }, p4/Z, [x16]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ ".inst 0x454c18c6 // usublb z6.h, z6.b, z12.b\n"
+ ".inst 0x454c1842 // usublb z2.h, z2.b, z12.b\n"
+ "uzp1 z5.s, z10.s, z1.s\n"
+ "uzp2 z11.s, z10.s, z1.s\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z18.d, z8.d\n"
- "mov z0.d, z24.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z21.h }, p3/Z, [x27, x7]\n"
- "mov z15.d, z8.d\n"
- "mov z1.d, z24.d\n"
- "ld1b { z22.h }, p3/Z, [x26, x7]\n"
- "ld1b { z11.h }, p3/Z, [x25, x7]\n"
- "mov z5.d, z8.d\n"
- "mov z6.d, z24.d\n"
- "ld1b { z20.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x454d1863 // usublb z3.h, z3.b, z13.b\n"
- ".inst 0x454d1af7 // usublb z23.h, z23.b, z13.b\n"
- "ld1b { z28.h }, p3/Z, [x22, x7]\n"
- "ld1b { z16.h }, p3/Z, [x21, x7]\n"
- ".inst 0x454d18e7 // usublb z7.h, z7.b, z13.b\n"
- ".inst 0x454d1842 // usublb z2.h, z2.b, z13.b\n"
- "ld1b { z31.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455a1ab5 // usublb z21.h, z21.b, z26.b\n"
- ".inst 0x455a1ad6 // usublb z22.h, z22.b, z26.b\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x455a1b7b // usublb z27.h, z27.b, z26.b\n"
- ".inst 0x455a1b9c // usublb z28.h, z28.b, z26.b\n"
- ".inst 0x455a1a10 // usublb z16.h, z16.b, z26.b\n"
- ".inst 0x455a1bff // usublb z31.h, z31.b, z26.b\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ ".inst 0x454c1b5a // usublb z26.h, z26.b, z12.b\n"
+ ".inst 0x454c1ab5 // usublb z21.h, z21.b, z12.b\n"
+ ".inst 0x454c19ef // usublb z15.h, z15.b, z12.b\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "mov z30.d, z5.d\n"
+ "mov z16.d, z11.d\n"
+ "mov z4.d, z5.d\n"
+ "mov z8.d, z11.d\n"
+ "mov z31.d, z5.d\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "mov z10.d, z11.d\n"
+ "ld1b { z3.h }, p3/Z, [x27, x8]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x8]\n"
+ "ld1b { z23.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z0.h }, p3/Z, [x24, x8]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x8]\n"
+ "ld1b { z22.h }, p3/Z, [x22, x8]\n"
+ "ld1b { z27.h }, p3/Z, [x21, x8]\n"
+ "ld1b { z19.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x45511a73 // usublb z19.h, z19.b, z17.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index b8adbb8262..5604760aa3 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[36];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -112,533 +112,533 @@ void sve_u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
__asm__ __volatile__(
"mov x2, #0x0\n"
- "mov x24, x2\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x27, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "incw x24\n"
+ "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x26, [%x[params], %[offsetof_Params_outptrs]]\n"
"ldr x4, [%x[params], %[offsetof_Params_weights]]\n"
- "add x21, x23, %[offsetof_Requantize32_a_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1rb { z30.b }, p4/Z, [x21]\n"
- "ld1rb { z10.b }, p4/Z, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x6, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x2\n"
+ "add x20, x27, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x27, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x27, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z14.b }, p4/Z, [x20]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x27, %[offsetof_Requantize32_minval]\n"
+ "add x20, x27, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z12.b }, p4/Z, [x23]\n"
+ "ld1rh { z10.h }, p4/Z, [x22]\n"
+ "incw x24\n"
"ld1rh { z15.h }, p4/Z, [x21]\n"
- "ld1rh { z12.h }, p4/Z, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
"ld1rh { z13.h }, p4/Z, [x20]\n"
- "ldp x5, x6, [x22, #0x0]\n"
"whilelt p3.h, x2, x3\n"
- "ldp x7, x8, [x22, #0x10]\n"
+ "ldp x17, x16, [x26, #0x0]\n"
+ "ldp x15, x14, [x26, #0x10]\n"
"whilelt p2.s, x2, x3\n"
"whilelt p1.s, x24, x3\n"
- "ldr x10, [%x[params], %[offsetof_Params_bias]]\n"
- "add x17, %x[params], %[offsetof_Params_inptrs]\n"
- "ld1w { z17.s }, p2/Z, [x10]\n"
- "ld1w { z16.s }, p1/Z, [x10, #1, MUL VL]\n"
- "uzp1 z14.s, z17.s, z16.s\n"
- "ld1b { z26.h }, p4/Z, [x4]\n"
- "ld1b { z8.h }, p4/Z, [x4, #1, MUL VL]\n"
- "uzp2 z23.s, z17.s, z16.s\n"
- "addvl x10, x10, #2\n"
- "ld1b { z16.h }, p4/Z, [x4, #2, MUL VL]\n"
- "ld1b { z21.h }, p4/Z, [x4, #3, MUL VL]\n"
- "mov x16, #0x0\n"
- "mov z6.d, z14.d\n"
- "ld1b { z17.h }, p4/Z, [x4, #4, MUL VL]\n"
- "ldp x9, x28, [x17, #0x0]\n"
- "mov z18.d, z23.d\n"
- "mov z9.d, z14.d\n"
- "ldp x27, x26, [x17, #0x10]\n"
- "ldp x25, x24, [x17, #0x20]\n"
- "mov z20.d, z23.d\n"
- "mov z7.d, z14.d\n"
- "ldp x23, x22, [x17, #0x30]\n"
- "ldp x21, x20, [x17, #0x40]\n"
- "mov z1.d, z23.d\n"
- ".inst 0x454a1b5a // usublb z26.h, z26.b, z10.b\n"
- "ld1b { z22.h }, p3/Z, [x9, x2]\n"
- "ld1b { z2.h }, p3/Z, [x28, x2]\n"
- ".inst 0x454a1908 // usublb z8.h, z8.b, z10.b\n"
- ".inst 0x454a1a10 // usublb z16.h, z16.b, z10.b\n"
- "ld1b { z11.h }, p3/Z, [x27, x2]\n"
- "ld1b { z3.h }, p3/Z, [x26, x2]\n"
- ".inst 0x454a1ab5 // usublb z21.h, z21.b, z10.b\n"
- ".inst 0x454a1a31 // usublb z17.h, z17.b, z10.b\n"
- "ld1b { z29.h }, p3/Z, [x25, x2]\n"
- "ld1b { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x455e1ad6 // usublb z22.h, z22.b, z30.b\n"
- ".inst 0x455e1842 // usublb z2.h, z2.b, z30.b\n"
- "ld1b { z31.h }, p3/Z, [x23, x2]\n"
- "ld1b { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e196b // usublb z11.h, z11.b, z30.b\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- "ld1b { z19.h }, p3/Z, [x21, x2]\n"
- "ld1b { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1bbd // usublb z29.h, z29.b, z30.b\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- "ldr x15, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x14, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x10, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x455e1a73 // usublb z19.h, z19.b, z30.b\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
+ "ld1w { z5.s }, p2/Z, [x25]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ "ld1b { z25.h }, p4/Z, [x4]\n"
+ "ld1b { z28.h }, p4/Z, [x4, #1, MUL VL]\n"
+ "ld1b { z4.h }, p4/Z, [x4, #2, MUL VL]\n"
+ "ld1b { z23.h }, p4/Z, [x4, #3, MUL VL]\n"
+ "ld1b { z31.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldp x9, x28, [x5, #0x0]\n"
+ "uzp1 z6.s, z5.s, z16.s\n"
+ "uzp2 z30.s, z5.s, z16.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ ".inst 0x454c1b39 // usublb z25.h, z25.b, z12.b\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
+ ".inst 0x454c1af7 // usublb z23.h, z23.b, z12.b\n"
+ "ldp x27, x26, [x5, #0x10]\n"
+ "mov z17.d, z6.d\n"
+ "mov z8.d, z30.d\n"
+ "mov z21.d, z6.d\n"
+ "mov z27.d, z30.d\n"
+ "ldp x25, x24, [x5, #0x20]\n"
+ "mov z7.d, z6.d\n"
+ "mov z9.d, z30.d\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ "ldp x23, x22, [x5, #0x30]\n"
+ "ldp x21, x20, [x5, #0x40]\n"
+ "ld1b { z26.h }, p3/Z, [x9, x2]\n"
+ "ld1b { z16.h }, p3/Z, [x28, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z5.h }, p3/Z, [x26, x2]\n"
+ "ld1b { z18.h }, p3/Z, [x25, x2]\n"
+ "ld1b { z3.h }, p3/Z, [x24, x2]\n"
+ "ld1b { z19.h }, p3/Z, [x23, x2]\n"
+ "ld1b { z11.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454e1b5a // usublb z26.h, z26.b, z14.b\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ "ld1b { z20.h }, p3/Z, [x21, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x454e18a5 // usublb z5.h, z5.b, z14.b\n"
+ ".inst 0x454e1a52 // usublb z18.h, z18.b, z14.b\n"
+ ".inst 0x454e1863 // usublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ ".inst 0x454e196b // usublb z11.h, z11.b, z14.b\n"
+ ".inst 0x454e1a94 // usublb z20.h, z20.b, z14.b\n"
+ ".inst 0x454e1bbd // usublb z29.h, z29.b, z14.b\n"
"1:" // Loop
- ".inst 0x449a42ce // smlalb z14.s, p4/M, z22.h, z26.h\n"
- ".inst 0x449a46d7 // smlalt z23.s, p4/M, z22.h, z26.h\n"
- "ldr x20, [x17, #0x50]\n"
- "ld1b { z27.h }, p3/Z, [x20, x2]\n"
- ".inst 0x4488404e // smlalb z14.s, p4/M, z2.h, z8.h\n"
- ".inst 0x449a4046 // smlalb z6.s, p4/M, z2.h, z26.h\n"
- "ldr x20, [x17, #0x58]\n"
- ".inst 0x455e1b7b // usublb z27.h, z27.b, z30.b\n"
- ".inst 0x449a4169 // smlalb z9.s, p4/M, z11.h, z26.h\n"
- ".inst 0x449a4067 // smlalb z7.s, p4/M, z3.h, z26.h\n"
- "ld1b { z5.h }, p3/Z, [x20, x2]\n"
- "ldr x20, [x17, #0x60]\n"
- ".inst 0x44884457 // smlalt z23.s, p4/M, z2.h, z8.h\n"
- ".inst 0x449043ae // smlalb z14.s, p4/M, z29.h, z16.h\n"
- "ld1b { z25.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x455e18a5 // usublb z5.h, z5.b, z30.b\n"
- ".inst 0x449a4452 // smlalt z18.s, p4/M, z2.h, z26.h\n"
- ".inst 0x449a4574 // smlalt z20.s, p4/M, z11.h, z26.h\n"
- "ld1b { z22.h }, p3/Z, [x20, x2]\n"
- ".inst 0x454a1b39 // usublb z25.h, z25.b, z10.b\n"
- ".inst 0x449a4461 // smlalt z1.s, p4/M, z3.h, z26.h\n"
- ".inst 0x448843a6 // smlalb z6.s, p4/M, z29.h, z8.h\n"
- "ldr x20, [x17, #0x68]\n"
+ ".inst 0x44994346 // smlalb z6.s, p4/M, z26.h, z25.h\n"
+ ".inst 0x4499475e // smlalt z30.s, p4/M, z26.h, z25.h\n"
+ "ldr x23, [x5, #0x50]\n"
+ "ldr x22, [x5, #0x58]\n"
+ ".inst 0x44994211 // smlalb z17.s, p4/M, z16.h, z25.h\n"
+ ".inst 0x44994315 // smlalb z21.s, p4/M, z24.h, z25.h\n"
+ "ldr x21, [x5, #0x60]\n"
+ "ld1b { z0.h }, p4/Z, [x4, #5, MUL VL]\n"
+ ".inst 0x449940a7 // smlalb z7.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994608 // smlalt z8.s, p4/M, z16.h, z25.h\n"
+ "ldr x20, [x5, #0x68]\n"
+ "ld1b { z26.h }, p4/Z, [x4, #6, MUL VL]\n"
+ "ld1b { z2.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x4499471b // smlalt z27.s, p4/M, z24.h, z25.h\n"
+ ".inst 0x449944a9 // smlalt z9.s, p4/M, z5.h, z25.h\n"
+ "ld1b { z22.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x449c4206 // smlalb z6.s, p4/M, z16.h, z28.h\n"
+ ".inst 0x449c461e // smlalt z30.s, p4/M, z16.h, z28.h\n"
+ "ld1b { z1.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
+ ".inst 0x449c4251 // smlalb z17.s, p4/M, z18.h, z28.h\n"
+ ".inst 0x449c40b5 // smlalb z21.s, p4/M, z5.h, z28.h\n"
+ "ld1b { z16.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454c1b5a // usublb z26.h, z26.b, z12.b\n"
+ ".inst 0x449c4067 // smlalb z7.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x454e1842 // usublb z2.h, z2.b, z14.b\n"
+ ".inst 0x449c4648 // smlalt z8.s, p4/M, z18.h, z28.h\n"
+ "ldr x20, [x5, #0x70]\n"
+ ".inst 0x449c44bb // smlalt z27.s, p4/M, z5.h, z28.h\n"
+ ".inst 0x449c4469 // smlalt z9.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x454e1ad6 // usublb z22.h, z22.b, z14.b\n"
+ "ld1b { z28.h }, p4/Z, [x4, #7, MUL VL]\n"
+ ".inst 0x44844246 // smlalb z6.s, p4/M, z18.h, z4.h\n"
+ ".inst 0x4484465e // smlalt z30.s, p4/M, z18.h, z4.h\n"
+ ".inst 0x454e1821 // usublb z1.h, z1.b, z14.b\n"
+ "inch x4, ALL, MUL #8\n"
+ ".inst 0x44844271 // smlalb z17.s, p4/M, z19.h, z4.h\n"
+ ".inst 0x44844075 // smlalb z21.s, p4/M, z3.h, z4.h\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ "ld1b { z25.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x44844047 // smlalb z7.s, p4/M, z2.h, z4.h\n"
+ ".inst 0x44844668 // smlalt z8.s, p4/M, z19.h, z4.h\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ "ldr x20, [x5, #0x78]\n"
+ ".inst 0x4484447b // smlalt z27.s, p4/M, z3.h, z4.h\n"
+ ".inst 0x44844449 // smlalt z9.s, p4/M, z2.h, z4.h\n"
+ "ld1b { z18.h }, p4/Z, [x4]\n"
+ "ldr x22, [x5, #0x80]\n"
+ ".inst 0x44974266 // smlalb z6.s, p4/M, z19.h, z23.h\n"
+ ".inst 0x4497467e // smlalt z30.s, p4/M, z19.h, z23.h\n"
+ ".inst 0x454e1b39 // usublb z25.h, z25.b, z14.b\n"
+ "ld1b { z4.h }, p4/Z, [x4, #1, MUL VL]\n"
+ ".inst 0x44974171 // smlalb z17.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x44974055 // smlalb z21.s, p4/M, z2.h, z23.h\n"
+ "ld1b { z19.h }, p3/Z, [x20, x2]\n"
+ "ldr x21, [x5, #0x88]\n"
+ ".inst 0x449742c7 // smlalb z7.s, p4/M, z22.h, z23.h\n"
+ ".inst 0x44974568 // smlalt z8.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x454c1a52 // usublb z18.h, z18.b, z12.b\n"
+ "ldr x20, [x5, #0x90]\n"
+ ".inst 0x4497445b // smlalt z27.s, p4/M, z2.h, z23.h\n"
+ ".inst 0x449746c9 // smlalt z9.s, p4/M, z22.h, z23.h\n"
+ "ld1b { z23.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
+ ".inst 0x449f4166 // smlalb z6.s, p4/M, z11.h, z31.h\n"
+ ".inst 0x449f457e // smlalt z30.s, p4/M, z11.h, z31.h\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ "ld1b { z11.h }, p4/Z, [x4, #2, MUL VL]\n"
+ ".inst 0x449f4031 // smlalb z17.s, p4/M, z1.h, z31.h\n"
+ ".inst 0x449f42d5 // smlalb z21.s, p4/M, z22.h, z31.h\n"
+ "ldr x23, [x5, #0x98]\n"
+ "ldr x22, [x5, #0xa0]\n"
+ ".inst 0x449f4287 // smlalb z7.s, p4/M, z20.h, z31.h\n"
+ ".inst 0x449f4428 // smlalt z8.s, p4/M, z1.h, z31.h\n"
+ ".inst 0x454e1af7 // usublb z23.h, z23.b, z14.b\n"
+ "ld1b { z1.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x449f46db // smlalt z27.s, p4/M, z22.h, z31.h\n"
+ ".inst 0x449f4689 // smlalt z9.s, p4/M, z20.h, z31.h\n"
+ ".inst 0x454c196b // usublb z11.h, z11.b, z12.b\n"
+ "ld1b { z31.h }, p4/Z, [x4, #3, MUL VL]\n"
+ ".inst 0x44804306 // smlalb z6.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x4480471e // smlalt z30.s, p4/M, z24.h, z0.h\n"
+ "ld1b { z24.h }, p3/Z, [x20, x2]\n"
+ "ldr x20, [x5, #0xa8]\n"
+ ".inst 0x448040b1 // smlalb z17.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x448043b5 // smlalb z21.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x454e1821 // usublb z1.h, z1.b, z14.b\n"
+ "ldr x21, [x5, #0xb0]\n"
+ ".inst 0x44804207 // smlalb z7.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x448044a8 // smlalt z8.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ "ldr x13, [x5, #0xb8]\n"
+ ".inst 0x448047bb // smlalt z27.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x44804609 // smlalt z9.s, p4/M, z16.h, z0.h\n"
+ "ld1b { z0.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x449a40a6 // smlalb z6.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a44be // smlalt z30.s, p4/M, z5.h, z26.h\n"
+ "ld1b { z5.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldr x12, [x5, #0xc0]\n"
+ ".inst 0x449a4071 // smlalb z17.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a4215 // smlalb z21.s, p4/M, z16.h, z26.h\n"
+ "ldr x11, [x5, #0xc8]\n"
+ "ldr x10, [x5, #0xd0]\n"
+ ".inst 0x449a4327 // smlalb z7.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449a4468 // smlalt z8.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x454e1800 // usublb z0.h, z0.b, z14.b\n"
+ "ldr x9, [x5, #0xd8]\n"
+ ".inst 0x449a461b // smlalt z27.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4729 // smlalt z9.s, p4/M, z25.h, z26.h\n"
+ "ld1b { z26.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c18a5 // usublb z5.h, z5.b, z12.b\n"
+ ".inst 0x449c4066 // smlalb z6.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c447e // smlalt z30.s, p4/M, z3.h, z28.h\n"
+ "ld1b { z3.h }, p4/Z, [x4, #5, MUL VL]\n"
+ "ldr x28, [x5, #0xe0]\n"
+ ".inst 0x449c4051 // smlalb z17.s, p4/M, z2.h, z28.h\n"
+ ".inst 0x449c4335 // smlalb z21.s, p4/M, z25.h, z28.h\n"
+ "ldr x27, [x5, #0xe8]\n"
+ "ldr x26, [x5, #0xf0]\n"
+ ".inst 0x449c4267 // smlalb z7.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x449c4448 // smlalt z8.s, p4/M, z2.h, z28.h\n"
+ ".inst 0x454e1b5a // usublb z26.h, z26.b, z14.b\n"
+ "ldr x25, [x5, #0xf8]\n"
+ ".inst 0x449c473b // smlalt z27.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4669 // smlalt z9.s, p4/M, z19.h, z28.h\n"
+ "ld1b { z28.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454c1863 // usublb z3.h, z3.b, z12.b\n"
+ ".inst 0x44924046 // smlalb z6.s, p4/M, z2.h, z18.h\n"
+ ".inst 0x4492445e // smlalt z30.s, p4/M, z2.h, z18.h\n"
"ld1b { z2.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x44884069 // smlalb z9.s, p4/M, z3.h, z8.h\n"
- ".inst 0x44884087 // smlalb z7.s, p4/M, z4.h, z8.h\n"
- ".inst 0x455e1ad6 // usublb z22.h, z22.b, z30.b\n"
- "ld1b { z26.h }, p3/Z, [x20, x2]\n"
- ".inst 0x449047b7 // smlalt z23.s, p4/M, z29.h, z16.h\n"
- ".inst 0x449543ee // smlalb z14.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454a1842 // usublb z2.h, z2.b, z10.b\n"
- "ldr x20, [x17, #0x70]\n"
- ".inst 0x448847b2 // smlalt z18.s, p4/M, z29.h, z8.h\n"
- ".inst 0x44884474 // smlalt z20.s, p4/M, z3.h, z8.h\n"
- "ld1b { z29.h }, p4/Z, [x4, #7, MUL VL]\n"
- ".inst 0x455e1b5a // usublb z26.h, z26.b, z30.b\n"
- ".inst 0x44884481 // smlalt z1.s, p4/M, z4.h, z8.h\n"
- ".inst 0x449043e6 // smlalb z6.s, p4/M, z31.h, z16.h\n"
+ "ldr x24, [x5, #0x100]\n"
+ ".inst 0x449242d1 // smlalb z17.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x44924275 // smlalb z21.s, p4/M, z19.h, z18.h\n"
+ "ldr x23, [x5, #0x108]\n"
+ "ldr x22, [x5, #0x110]\n"
+ ".inst 0x449242e7 // smlalb z7.s, p4/M, z23.h, z18.h\n"
+ ".inst 0x449246c8 // smlalt z8.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x454e1b9c // usublb z28.h, z28.b, z14.b\n"
+ "ldr x20, [x5, #0x118]\n"
+ ".inst 0x4492467b // smlalt z27.s, p4/M, z19.h, z18.h\n"
+ ".inst 0x449246e9 // smlalt z9.s, p4/M, z23.h, z18.h\n"
+ "ld1b { z18.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x454c1842 // usublb z2.h, z2.b, z12.b\n"
+ ".inst 0x448442c6 // smlalb z6.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x448446de // smlalt z30.s, p4/M, z22.h, z4.h\n"
+ "ld1b { z22.h }, p4/Z, [x4, #7, MUL VL]\n"
"inch x4, ALL, MUL #8\n"
- "ld1b { z8.h }, p3/Z, [x20, x2]\n"
- ".inst 0x44904089 // smlalb z9.s, p4/M, z4.h, z16.h\n"
- ".inst 0x44904367 // smlalb z7.s, p4/M, z27.h, z16.h\n"
- ".inst 0x454a1bbd // usublb z29.h, z29.b, z10.b\n"
- "ldr x20, [x17, #0x78]\n"
- ".inst 0x449547f7 // smlalt z23.s, p4/M, z31.h, z21.h\n"
- ".inst 0x4491400e // smlalb z14.s, p4/M, z0.h, z17.h\n"
- "ld1b { z24.h }, p4/Z, [x4]\n"
- ".inst 0x455e1908 // usublb z8.h, z8.b, z30.b\n"
- ".inst 0x449047f2 // smlalt z18.s, p4/M, z31.h, z16.h\n"
- ".inst 0x44904494 // smlalt z20.s, p4/M, z4.h, z16.h\n"
- "ld1b { z31.h }, p3/Z, [x20, x2]\n"
- ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
- ".inst 0x44904761 // smlalt z1.s, p4/M, z27.h, z16.h\n"
- ".inst 0x44954006 // smlalb z6.s, p4/M, z0.h, z21.h\n"
- "ldr x22, [x17, #0x80]\n"
+ ".inst 0x44844291 // smlalb z17.s, p4/M, z20.h, z4.h\n"
+ ".inst 0x448442f5 // smlalb z21.s, p4/M, z23.h, z4.h\n"
+ "whilelt p0.h, x6, x3\n"
+ "ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
+ ".inst 0x44844027 // smlalb z7.s, p4/M, z1.h, z4.h\n"
+ ".inst 0x44844688 // smlalt z8.s, p4/M, z20.h, z4.h\n"
+ ".inst 0x454e1a52 // usublb z18.h, z18.b, z14.b\n"
+ "ld1b { z20.h }, p3/Z, [x13, x2]\n"
+ ".inst 0x448446fb // smlalt z27.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x44844429 // smlalt z9.s, p4/M, z1.h, z4.h\n"
+ ".inst 0x454c1ad6 // usublb z22.h, z22.b, z12.b\n"
+ "ld1b { z4.h }, p4/Z, [x4]\n"
+ ".inst 0x448b43a6 // smlalb z6.s, p4/M, z29.h, z11.h\n"
+ ".inst 0x448b47be // smlalt z30.s, p4/M, z29.h, z11.h\n"
+ "ld1b { z29.h }, p3/Z, [x12, x2]\n"
+ ".inst 0x448b4211 // smlalb z17.s, p4/M, z16.h, z11.h\n"
+ ".inst 0x448b4315 // smlalb z21.s, p4/M, z24.h, z11.h\n"
+ ".inst 0x454e1a94 // usublb z20.h, z20.b, z14.b\n"
+ ".inst 0x448b4007 // smlalb z7.s, p4/M, z0.h, z11.h\n"
+ ".inst 0x448b4608 // smlalt z8.s, p4/M, z16.h, z11.h\n"
+ ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
+ ".inst 0x448b471b // smlalt z27.s, p4/M, z24.h, z11.h\n"
+ ".inst 0x448b4409 // smlalt z9.s, p4/M, z0.h, z11.h\n"
+ "ld1b { z11.h }, p3/Z, [x11, x2]\n"
+ ".inst 0x454e1bbd // usublb z29.h, z29.b, z14.b\n"
+ ".inst 0x449f4206 // smlalb z6.s, p4/M, z16.h, z31.h\n"
+ ".inst 0x449f461e // smlalt z30.s, p4/M, z16.h, z31.h\n"
"ld1b { z16.h }, p4/Z, [x4, #1, MUL VL]\n"
- ".inst 0x44954369 // smlalb z9.s, p4/M, z27.h, z21.h\n"
- ".inst 0x449540a7 // smlalb z7.s, p4/M, z5.h, z21.h\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- "ldr x21, [x17, #0x88]\n"
- ".inst 0x44914417 // smlalt z23.s, p4/M, z0.h, z17.h\n"
- ".inst 0x4499416e // smlalb z14.s, p4/M, z11.h, z25.h\n"
- ".inst 0x454a1a10 // usublb z16.h, z16.b, z10.b\n"
- "ldr x20, [x17, #0x90]\n"
- ".inst 0x44954412 // smlalt z18.s, p4/M, z0.h, z21.h\n"
- ".inst 0x44954774 // smlalt z20.s, p4/M, z27.h, z21.h\n"
- "ld1b { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x449544a1 // smlalt z1.s, p4/M, z5.h, z21.h\n"
- ".inst 0x449142c6 // smlalb z6.s, p4/M, z22.h, z17.h\n"
- "ld1b { z21.h }, p4/Z, [x4, #2, MUL VL]\n"
- ".inst 0x454a1ab5 // usublb z21.h, z21.b, z10.b\n"
- ".inst 0x449140a9 // smlalb z9.s, p4/M, z5.h, z17.h\n"
- ".inst 0x44914267 // smlalb z7.s, p4/M, z19.h, z17.h\n"
- "ldr x23, [x17, #0x98]\n"
- "ldr x22, [x17, #0xa0]\n"
- ".inst 0x44994577 // smlalt z23.s, p4/M, z11.h, z25.h\n"
- ".inst 0x4482406e // smlalb z14.s, p4/M, z3.h, z2.h\n"
- "ld1b { z11.h }, p3/Z, [x21, x2]\n"
- ".inst 0x455e196b // usublb z11.h, z11.b, z30.b\n"
- ".inst 0x449146d2 // smlalt z18.s, p4/M, z22.h, z17.h\n"
- ".inst 0x449144b4 // smlalt z20.s, p4/M, z5.h, z17.h\n"
- "ld1b { z22.h }, p4/Z, [x4, #3, MUL VL]\n"
- ".inst 0x454a1ad6 // usublb z22.h, z22.b, z10.b\n"
- ".inst 0x44914661 // smlalt z1.s, p4/M, z19.h, z17.h\n"
- ".inst 0x44994066 // smlalb z6.s, p4/M, z3.h, z25.h\n"
- "ld1b { z17.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1a31 // usublb z17.h, z17.b, z30.b\n"
- ".inst 0x44994389 // smlalb z9.s, p4/M, z28.h, z25.h\n"
- ".inst 0x44994347 // smlalb z7.s, p4/M, z26.h, z25.h\n"
- "ldr x20, [x17, #0xa8]\n"
- "ldr x21, [x17, #0xb0]\n"
- ".inst 0x44824477 // smlalt z23.s, p4/M, z3.h, z2.h\n"
- ".inst 0x449d408e // smlalb z14.s, p4/M, z4.h, z29.h\n"
- "ldr x13, [x17, #0xb8]\n"
- "ldr x12, [x17, #0xc0]\n"
- ".inst 0x44994472 // smlalt z18.s, p4/M, z3.h, z25.h\n"
- ".inst 0x44994794 // smlalt z20.s, p4/M, z28.h, z25.h\n"
- "ld1b { z3.h }, p3/Z, [x23, x2]\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- ".inst 0x44994741 // smlalt z1.s, p4/M, z26.h, z25.h\n"
- ".inst 0x44824086 // smlalb z6.s, p4/M, z4.h, z2.h\n"
- "ld1b { z25.h }, p4/Z, [x4, #4, MUL VL]\n"
- ".inst 0x454a1b39 // usublb z25.h, z25.b, z10.b\n"
- ".inst 0x44824349 // smlalb z9.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44824107 // smlalb z7.s, p4/M, z8.h, z2.h\n"
- "ldr x11, [x17, #0xc8]\n"
- "ldr x10, [x17, #0xd0]\n"
- ".inst 0x449d4497 // smlalt z23.s, p4/M, z4.h, z29.h\n"
- ".inst 0x4498436e // smlalb z14.s, p4/M, z27.h, z24.h\n"
- "ldr x9, [x17, #0xd8]\n"
- "ldr x28, [x17, #0xe0]\n"
- ".inst 0x44824492 // smlalt z18.s, p4/M, z4.h, z2.h\n"
- ".inst 0x44824754 // smlalt z20.s, p4/M, z26.h, z2.h\n"
- "ld1b { z4.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- ".inst 0x44824501 // smlalt z1.s, p4/M, z8.h, z2.h\n"
- ".inst 0x449d4366 // smlalb z6.s, p4/M, z27.h, z29.h\n"
+ ".inst 0x449f4331 // smlalb z17.s, p4/M, z25.h, z31.h\n"
+ ".inst 0x449f4015 // smlalb z21.s, p4/M, z0.h, z31.h\n"
+ ".inst 0x449f4347 // smlalb z7.s, p4/M, z26.h, z31.h\n"
+ ".inst 0x449f4728 // smlalt z8.s, p4/M, z25.h, z31.h\n"
+ ".inst 0x454e196b // usublb z11.h, z11.b, z14.b\n"
+ ".inst 0x449f441b // smlalt z27.s, p4/M, z0.h, z31.h\n"
+ ".inst 0x449f4749 // smlalt z9.s, p4/M, z26.h, z31.h\n"
+ "ld1b { z31.h }, p3/Z, [x10, x2]\n"
+ ".inst 0x454c1a10 // usublb z16.h, z16.b, z12.b\n"
+ ".inst 0x44854326 // smlalb z6.s, p4/M, z25.h, z5.h\n"
+ ".inst 0x4485473e // smlalt z30.s, p4/M, z25.h, z5.h\n"
+ "ld1b { z25.h }, p4/Z, [x4, #2, MUL VL]\n"
+ ".inst 0x44854271 // smlalb z17.s, p4/M, z19.h, z5.h\n"
+ ".inst 0x44854355 // smlalb z21.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854387 // smlalb z7.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x44854668 // smlalt z8.s, p4/M, z19.h, z5.h\n"
+ ".inst 0x454e1bff // usublb z31.h, z31.b, z14.b\n"
+ ".inst 0x4485475b // smlalt z27.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854789 // smlalt z9.s, p4/M, z28.h, z5.h\n"
+ "ld1b { z5.h }, p3/Z, [x9, x2]\n"
+ ".inst 0x454c1b39 // usublb z25.h, z25.b, z12.b\n"
+ ".inst 0x44834266 // smlalb z6.s, p4/M, z19.h, z3.h\n"
+ ".inst 0x4483467e // smlalt z30.s, p4/M, z19.h, z3.h\n"
+ "ld1b { z19.h }, p4/Z, [x4, #3, MUL VL]\n"
+ ".inst 0x448342f1 // smlalb z17.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x44834395 // smlalb z21.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834247 // smlalb z7.s, p4/M, z18.h, z3.h\n"
+ ".inst 0x448346e8 // smlalt z8.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x454e18a5 // usublb z5.h, z5.b, z14.b\n"
+ ".inst 0x4483479b // smlalt z27.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834649 // smlalt z9.s, p4/M, z18.h, z3.h\n"
+ "ld1b { z3.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x454c1a73 // usublb z19.h, z19.b, z12.b\n"
+ ".inst 0x448242e6 // smlalb z6.s, p4/M, z23.h, z2.h\n"
+ ".inst 0x448246fe // smlalt z30.s, p4/M, z23.h, z2.h\n"
+ "ld1b { z23.h }, p4/Z, [x4, #4, MUL VL]\n"
+ ".inst 0x44824031 // smlalb z17.s, p4/M, z1.h, z2.h\n"
+ ".inst 0x44824255 // smlalb z21.s, p4/M, z18.h, z2.h\n"
+ ".inst 0x44824287 // smlalb z7.s, p4/M, z20.h, z2.h\n"
+ ".inst 0x44824428 // smlalt z8.s, p4/M, z1.h, z2.h\n"
+ ".inst 0x454e1863 // usublb z3.h, z3.b, z14.b\n"
+ "ld1b { z1.h }, p3/Z, [x27, x2]\n"
+ ".inst 0x4482465b // smlalt z27.s, p4/M, z18.h, z2.h\n"
+ ".inst 0x44824689 // smlalt z9.s, p4/M, z20.h, z2.h\n"
+ ".inst 0x454c1af7 // usublb z23.h, z23.b, z12.b\n"
"ld1b { z2.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x454a1842 // usublb z2.h, z2.b, z10.b\n"
- ".inst 0x449d4109 // smlalb z9.s, p4/M, z8.h, z29.h\n"
- ".inst 0x449d43e7 // smlalb z7.s, p4/M, z31.h, z29.h\n"
- "ldr x27, [x17, #0xe8]\n"
- "ldr x26, [x17, #0xf0]\n"
- ".inst 0x44984777 // smlalt z23.s, p4/M, z27.h, z24.h\n"
- ".inst 0x449040ae // smlalb z14.s, p4/M, z5.h, z16.h\n"
- "ldr x25, [x17, #0xf8]\n"
- "ldr x24, [x17, #0x100]\n"
- ".inst 0x449d4772 // smlalt z18.s, p4/M, z27.h, z29.h\n"
- ".inst 0x449d4514 // smlalt z20.s, p4/M, z8.h, z29.h\n"
- "ld1b { z27.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1b7b // usublb z27.h, z27.b, z30.b\n"
- ".inst 0x449d47e1 // smlalt z1.s, p4/M, z31.h, z29.h\n"
- ".inst 0x449840a6 // smlalb z6.s, p4/M, z5.h, z24.h\n"
- "ld1b { z29.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x454a1bbd // usublb z29.h, z29.b, z10.b\n"
- ".inst 0x449843e9 // smlalb z9.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984007 // smlalb z7.s, p4/M, z0.h, z24.h\n"
- "ldr x23, [x17, #0x108]\n"
- "ldr x22, [x17, #0x110]\n"
- ".inst 0x449044b7 // smlalt z23.s, p4/M, z5.h, z16.h\n"
- ".inst 0x4495438e // smlalb z14.s, p4/M, z28.h, z21.h\n"
- "ldr x20, [x17, #0x118]\n"
- "whilelt p0.h, x16, x3\n"
- ".inst 0x449844b2 // smlalt z18.s, p4/M, z5.h, z24.h\n"
- ".inst 0x449847f4 // smlalt z20.s, p4/M, z31.h, z24.h\n"
- "ld1b { z5.h }, p3/Z, [x21, x2]\n"
- ".inst 0x455e18a5 // usublb z5.h, z5.b, z30.b\n"
- ".inst 0x44984401 // smlalt z1.s, p4/M, z0.h, z24.h\n"
- ".inst 0x44904266 // smlalb z6.s, p4/M, z19.h, z16.h\n"
- "ld1b { z24.h }, p4/Z, [x4, #7, MUL VL]\n"
- "inch x4, ALL, MUL #8\n"
- ".inst 0x44904009 // smlalb z9.s, p4/M, z0.h, z16.h\n"
- ".inst 0x44904167 // smlalb z7.s, p4/M, z11.h, z16.h\n"
- ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
- "ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44954797 // smlalt z23.s, p4/M, z28.h, z21.h\n"
- ".inst 0x4496434e // smlalb z14.s, p4/M, z26.h, z22.h\n"
- "ld1b { z28.h }, p3/Z, [x13, x2]\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
- ".inst 0x44904672 // smlalt z18.s, p4/M, z19.h, z16.h\n"
- ".inst 0x44904414 // smlalt z20.s, p4/M, z0.h, z16.h\n"
- "ld1b { z19.h }, p4/Z, [x4]\n"
- ".inst 0x454a1a73 // usublb z19.h, z19.b, z10.b\n"
- ".inst 0x44904561 // smlalt z1.s, p4/M, z11.h, z16.h\n"
- ".inst 0x44954346 // smlalb z6.s, p4/M, z26.h, z21.h\n"
- "ld1b { z16.h }, p3/Z, [x12, x2]\n"
- ".inst 0x455e1a10 // usublb z16.h, z16.b, z30.b\n"
- ".inst 0x44954229 // smlalb z9.s, p4/M, z17.h, z21.h\n"
- ".inst 0x44954067 // smlalb z7.s, p4/M, z3.h, z21.h\n"
- ".inst 0x44964757 // smlalt z23.s, p4/M, z26.h, z22.h\n"
- ".inst 0x4499410e // smlalb z14.s, p4/M, z8.h, z25.h\n"
- ".inst 0x44954752 // smlalt z18.s, p4/M, z26.h, z21.h\n"
- ".inst 0x44954634 // smlalt z20.s, p4/M, z17.h, z21.h\n"
- "ld1b { z26.h }, p3/Z, [x11, x2]\n"
- ".inst 0x455e1b5a // usublb z26.h, z26.b, z30.b\n"
- ".inst 0x44954461 // smlalt z1.s, p4/M, z3.h, z21.h\n"
- ".inst 0x44964106 // smlalb z6.s, p4/M, z8.h, z22.h\n"
- "ld1b { z21.h }, p4/Z, [x4, #1, MUL VL]\n"
- ".inst 0x454a1ab5 // usublb z21.h, z21.b, z10.b\n"
- ".inst 0x44964069 // smlalb z9.s, p4/M, z3.h, z22.h\n"
- ".inst 0x44964087 // smlalb z7.s, p4/M, z4.h, z22.h\n"
- ".inst 0x44994517 // smlalt z23.s, p4/M, z8.h, z25.h\n"
- ".inst 0x448243ee // smlalb z14.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44964512 // smlalt z18.s, p4/M, z8.h, z22.h\n"
- ".inst 0x44964474 // smlalt z20.s, p4/M, z3.h, z22.h\n"
- "ld1b { z8.h }, p3/Z, [x10, x2]\n"
- ".inst 0x455e1908 // usublb z8.h, z8.b, z30.b\n"
- ".inst 0x44964481 // smlalt z1.s, p4/M, z4.h, z22.h\n"
- ".inst 0x449943e6 // smlalb z6.s, p4/M, z31.h, z25.h\n"
- "ld1b { z22.h }, p4/Z, [x4, #2, MUL VL]\n"
- ".inst 0x454a1ad6 // usublb z22.h, z22.b, z10.b\n"
- ".inst 0x44994089 // smlalb z9.s, p4/M, z4.h, z25.h\n"
- ".inst 0x44994367 // smlalb z7.s, p4/M, z27.h, z25.h\n"
- ".inst 0x448247f7 // smlalt z23.s, p4/M, z31.h, z2.h\n"
- ".inst 0x449d400e // smlalb z14.s, p4/M, z0.h, z29.h\n"
- ".inst 0x449947f2 // smlalt z18.s, p4/M, z31.h, z25.h\n"
- ".inst 0x44994494 // smlalt z20.s, p4/M, z4.h, z25.h\n"
- "ld1b { z31.h }, p3/Z, [x9, x2]\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- ".inst 0x44994761 // smlalt z1.s, p4/M, z27.h, z25.h\n"
- ".inst 0x44824006 // smlalb z6.s, p4/M, z0.h, z2.h\n"
- "ld1b { z25.h }, p4/Z, [x4, #3, MUL VL]\n"
- ".inst 0x454a1b39 // usublb z25.h, z25.b, z10.b\n"
- ".inst 0x44824369 // smlalb z9.s, p4/M, z27.h, z2.h\n"
- ".inst 0x448240a7 // smlalb z7.s, p4/M, z5.h, z2.h\n"
- ".inst 0x449d4417 // smlalt z23.s, p4/M, z0.h, z29.h\n"
- ".inst 0x4498422e // smlalb z14.s, p4/M, z17.h, z24.h\n"
- ".inst 0x44824412 // smlalt z18.s, p4/M, z0.h, z2.h\n"
- ".inst 0x44824774 // smlalt z20.s, p4/M, z27.h, z2.h\n"
- "ld1b { z0.h }, p3/Z, [x28, x2]\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x448244a1 // smlalt z1.s, p4/M, z5.h, z2.h\n"
- ".inst 0x449d4166 // smlalb z6.s, p4/M, z11.h, z29.h\n"
- "ld1b { z2.h }, p4/Z, [x4, #4, MUL VL]\n"
- ".inst 0x454a1842 // usublb z2.h, z2.b, z10.b\n"
- ".inst 0x449d40a9 // smlalb z9.s, p4/M, z5.h, z29.h\n"
- ".inst 0x449d4387 // smlalb z7.s, p4/M, z28.h, z29.h\n"
- ".inst 0x44984637 // smlalt z23.s, p4/M, z17.h, z24.h\n"
- ".inst 0x4493406e // smlalb z14.s, p4/M, z3.h, z19.h\n"
- "ld1b { z17.h }, p3/Z, [x27, x2]\n"
- ".inst 0x455e1a31 // usublb z17.h, z17.b, z30.b\n"
- ".inst 0x449d4572 // smlalt z18.s, p4/M, z11.h, z29.h\n"
- ".inst 0x449d44b4 // smlalt z20.s, p4/M, z5.h, z29.h\n"
- "ld1b { z11.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x454a196b // usublb z11.h, z11.b, z10.b\n"
- ".inst 0x449d4781 // smlalt z1.s, p4/M, z28.h, z29.h\n"
- ".inst 0x44984066 // smlalb z6.s, p4/M, z3.h, z24.h\n"
- "ld1b { z29.h }, p3/Z, [x26, x2]\n"
- ".inst 0x455e1bbd // usublb z29.h, z29.b, z30.b\n"
- ".inst 0x44984209 // smlalb z9.s, p4/M, z16.h, z24.h\n"
- ".inst 0x44984347 // smlalb z7.s, p4/M, z26.h, z24.h\n"
- ".inst 0x44934477 // smlalt z23.s, p4/M, z3.h, z19.h\n"
- ".inst 0x4495408e // smlalb z14.s, p4/M, z4.h, z21.h\n"
- ".inst 0x44984472 // smlalt z18.s, p4/M, z3.h, z24.h\n"
- ".inst 0x44984614 // smlalt z20.s, p4/M, z16.h, z24.h\n"
- "ld1b { z3.h }, p3/Z, [x25, x2]\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- ".inst 0x44984741 // smlalt z1.s, p4/M, z26.h, z24.h\n"
- ".inst 0x44934086 // smlalb z6.s, p4/M, z4.h, z19.h\n"
- "ld1b { z24.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
- ".inst 0x44934349 // smlalb z9.s, p4/M, z26.h, z19.h\n"
- ".inst 0x44934107 // smlalb z7.s, p4/M, z8.h, z19.h\n"
- ".inst 0x44954497 // smlalt z23.s, p4/M, z4.h, z21.h\n"
- ".inst 0x4496436e // smlalb z14.s, p4/M, z27.h, z22.h\n"
- ".inst 0x44934492 // smlalt z18.s, p4/M, z4.h, z19.h\n"
- ".inst 0x44934754 // smlalt z20.s, p4/M, z26.h, z19.h\n"
+ ".inst 0x44964306 // smlalb z6.s, p4/M, z24.h, z22.h\n"
+ ".inst 0x4496471e // smlalt z30.s, p4/M, z24.h, z22.h\n"
+ "ld1b { z24.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x44964011 // smlalb z17.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x449643b5 // smlalb z21.s, p4/M, z29.h, z22.h\n"
+ ".inst 0x454e1821 // usublb z1.h, z1.b, z14.b\n"
+ ".inst 0x44964167 // smlalb z7.s, p4/M, z11.h, z22.h\n"
+ ".inst 0x44964408 // smlalt z8.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x454c1842 // usublb z2.h, z2.b, z12.b\n"
+ ".inst 0x449647bb // smlalt z27.s, p4/M, z29.h, z22.h\n"
+ ".inst 0x44964569 // smlalt z9.s, p4/M, z11.h, z22.h\n"
+ "ld1b { z22.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x44844006 // smlalb z6.s, p4/M, z0.h, z4.h\n"
+ ".inst 0x4484441e // smlalt z30.s, p4/M, z0.h, z4.h\n"
+ "ld1b { z0.h }, p4/Z, [x4, #6, MUL VL]\n"
+ ".inst 0x44844351 // smlalb z17.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844175 // smlalb z21.s, p4/M, z11.h, z4.h\n"
+ ".inst 0x448443e7 // smlalb z7.s, p4/M, z31.h, z4.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x454e1ad6 // usublb z22.h, z22.b, z14.b\n"
+ ".inst 0x4484457b // smlalt z27.s, p4/M, z11.h, z4.h\n"
+ ".inst 0x448447e9 // smlalt z9.s, p4/M, z31.h, z4.h\n"
"ld1b { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- ".inst 0x44934501 // smlalt z1.s, p4/M, z8.h, z19.h\n"
- ".inst 0x44954366 // smlalb z6.s, p4/M, z27.h, z21.h\n"
- "ld1b { z19.h }, p4/Z, [x4, #7, MUL VL]\n"
+ ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
+ ".inst 0x44904346 // smlalb z6.s, p4/M, z26.h, z16.h\n"
+ ".inst 0x4490475e // smlalt z30.s, p4/M, z26.h, z16.h\n"
+ "ld1b { z26.h }, p4/Z, [x4, #7, MUL VL]\n"
"inch x4, ALL, MUL #8\n"
- ".inst 0x44954109 // smlalb z9.s, p4/M, z8.h, z21.h\n"
- ".inst 0x449543e7 // smlalb z7.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454a1a73 // usublb z19.h, z19.b, z10.b\n"
- ".inst 0x44964777 // smlalt z23.s, p4/M, z27.h, z22.h\n"
- ".inst 0x449940ae // smlalb z14.s, p4/M, z5.h, z25.h\n"
- ".inst 0x44954772 // smlalt z18.s, p4/M, z27.h, z21.h\n"
- ".inst 0x44954514 // smlalt z20.s, p4/M, z8.h, z21.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x2]\n"
- ".inst 0x455e1b7b // usublb z27.h, z27.b, z30.b\n"
- ".inst 0x449547e1 // smlalt z1.s, p4/M, z31.h, z21.h\n"
- ".inst 0x449640a6 // smlalb z6.s, p4/M, z5.h, z22.h\n"
- "ld1b { z21.h }, p4/Z, [x4]\n"
- ".inst 0x454a1ab5 // usublb z21.h, z21.b, z10.b\n"
- ".inst 0x449643e9 // smlalb z9.s, p4/M, z31.h, z22.h\n"
- ".inst 0x44964007 // smlalb z7.s, p4/M, z0.h, z22.h\n"
- "inch x4\n"
- ".inst 0x449944b7 // smlalt z23.s, p4/M, z5.h, z25.h\n"
- ".inst 0x4482420e // smlalb z14.s, p4/M, z16.h, z2.h\n"
- ".inst 0x449644b2 // smlalt z18.s, p4/M, z5.h, z22.h\n"
- ".inst 0x449647f4 // smlalt z20.s, p4/M, z31.h, z22.h\n"
- "ld1b { z5.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e18a5 // usublb z5.h, z5.b, z30.b\n"
- ".inst 0x44964401 // smlalt z1.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x44904391 // smlalb z17.s, p4/M, z28.h, z16.h\n"
+ ".inst 0x449043f5 // smlalb z21.s, p4/M, z31.h, z16.h\n"
+ ".inst 0x449040a7 // smlalb z7.s, p4/M, z5.h, z16.h\n"
+ ".inst 0x44904788 // smlalt z8.s, p4/M, z28.h, z16.h\n"
+ ".inst 0x454e1884 // usublb z4.h, z4.b, z14.b\n"
+ ".inst 0x449047fb // smlalt z27.s, p4/M, z31.h, z16.h\n"
+ ".inst 0x449044a9 // smlalt z9.s, p4/M, z5.h, z16.h\n"
+ "ld1b { z16.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x454c1b5a // usublb z26.h, z26.b, z12.b\n"
".inst 0x44994386 // smlalb z6.s, p4/M, z28.h, z25.h\n"
- "ld1w { z22.s }, p2/Z, [x15]\n"
- ".inst 0x44994009 // smlalb z9.s, p4/M, z0.h, z25.h\n"
- ".inst 0x44994227 // smlalb z7.s, p4/M, z17.h, z25.h\n"
- ".inst 0x44824617 // smlalt z23.s, p4/M, z16.h, z2.h\n"
- ".inst 0x448b434e // smlalb z14.s, p4/M, z26.h, z11.h\n"
- "ld1w { z16.s }, p1/Z, [x15, #1, MUL VL]\n"
- "addvl x15, x15, #2\n"
- ".inst 0x44994792 // smlalt z18.s, p4/M, z28.h, z25.h\n"
- ".inst 0x44994414 // smlalt z20.s, p4/M, z0.h, z25.h\n"
- "ld1b { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
- ".inst 0x44994621 // smlalt z1.s, p4/M, z17.h, z25.h\n"
- ".inst 0x44824346 // smlalb z6.s, p4/M, z26.h, z2.h\n"
- "uzp1 z25.s, z22.s, z16.s\n"
+ ".inst 0x4499479e // smlalt z30.s, p4/M, z28.h, z25.h\n"
+ "ld1b { z28.h }, p4/Z, [x4]\n"
+ "inch x4\n"
+ ".inst 0x44994251 // smlalb z17.s, p4/M, z18.h, z25.h\n"
+ ".inst 0x449940b5 // smlalb z21.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994067 // smlalb z7.s, p4/M, z3.h, z25.h\n"
+ ".inst 0x44994648 // smlalt z8.s, p4/M, z18.h, z25.h\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ ".inst 0x449944bb // smlalt z27.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994469 // smlalt z9.s, p4/M, z3.h, z25.h\n"
+ "ld1b { z25.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ ".inst 0x44934246 // smlalb z6.s, p4/M, z18.h, z19.h\n"
+ ".inst 0x4493465e // smlalt z30.s, p4/M, z18.h, z19.h\n"
+ "ld1w { z18.s }, p2/Z, [x7]\n"
+ ".inst 0x44934291 // smlalb z17.s, p4/M, z20.h, z19.h\n"
+ ".inst 0x44934075 // smlalb z21.s, p4/M, z3.h, z19.h\n"
+ ".inst 0x44934027 // smlalb z7.s, p4/M, z1.h, z19.h\n"
+ ".inst 0x44934688 // smlalt z8.s, p4/M, z20.h, z19.h\n"
+ "ld1w { z20.s }, p1/Z, [x7, #1, MUL VL]\n"
+ ".inst 0x454e1b39 // usublb z25.h, z25.b, z14.b\n"
+ ".inst 0x4493447b // smlalt z27.s, p4/M, z3.h, z19.h\n"
+ ".inst 0x44934429 // smlalt z9.s, p4/M, z1.h, z19.h\n"
+ "ld1b { z19.h }, p3/Z, [x20, x2]\n"
"inch x2\n"
- ".inst 0x448243a9 // smlalb z9.s, p4/M, z29.h, z2.h\n"
- ".inst 0x44824067 // smlalb z7.s, p4/M, z3.h, z2.h\n"
- "uzp2 z16.s, z22.s, z16.s\n"
- "ld1w { z22.s }, p2/Z, [x14]\n"
- ".inst 0x448b4757 // smlalt z23.s, p4/M, z26.h, z11.h\n"
- ".inst 0x4498410e // smlalb z14.s, p4/M, z8.h, z24.h\n"
+ ".inst 0x449743a6 // smlalb z6.s, p4/M, z29.h, z23.h\n"
+ ".inst 0x449747be // smlalt z30.s, p4/M, z29.h, z23.h\n"
+ "addvl x7, x7, #2\n"
+ ".inst 0x44974171 // smlalb z17.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x44974315 // smlalb z21.s, p4/M, z24.h, z23.h\n"
+ "uzp1 z29.s, z18.s, z20.s\n"
+ ".inst 0x449742c7 // smlalb z7.s, p4/M, z22.h, z23.h\n"
+ ".inst 0x44974568 // smlalt z8.s, p4/M, z11.h, z23.h\n"
+ "uzp2 z18.s, z18.s, z20.s\n"
+ "ld1w { z20.s }, p2/Z, [x8]\n"
+ ".inst 0x4497471b // smlalt z27.s, p4/M, z24.h, z23.h\n"
+ ".inst 0x449746c9 // smlalt z9.s, p4/M, z22.h, z23.h\n"
+ "ld1w { z24.s }, p1/Z, [x8, #1, MUL VL]\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ ".inst 0x44824166 // smlalb z6.s, p4/M, z11.h, z2.h\n"
+ ".inst 0x4482457e // smlalt z30.s, p4/M, z11.h, z2.h\n"
"mov x20, x2\n"
- "incw x20\n"
- ".inst 0x44824752 // smlalt z18.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448247b4 // smlalt z20.s, p4/M, z29.h, z2.h\n"
- "ld1w { z26.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z29.s, z22.s, z26.s\n"
- ".inst 0x44824461 // smlalt z1.s, p4/M, z3.h, z2.h\n"
- ".inst 0x448b4106 // smlalb z6.s, p4/M, z8.h, z11.h\n"
- "uzp2 z22.s, z22.s, z26.s\n"
"whilelt p2.s, x2, x3\n"
- ".inst 0x448b4069 // smlalb z9.s, p4/M, z3.h, z11.h\n"
- ".inst 0x448b4087 // smlalb z7.s, p4/M, z4.h, z11.h\n"
+ ".inst 0x448243f1 // smlalb z17.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448242d5 // smlalb z21.s, p4/M, z22.h, z2.h\n"
+ "addvl x8, x8, #2\n"
+ ".inst 0x44824087 // smlalb z7.s, p4/M, z4.h, z2.h\n"
+ ".inst 0x448247e8 // smlalt z8.s, p4/M, z31.h, z2.h\n"
+ "uzp1 z23.s, z20.s, z24.s\n"
+ ".inst 0x448246db // smlalt z27.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x44824489 // smlalt z9.s, p4/M, z4.h, z2.h\n"
+ "uzp2 z22.s, z20.s, z24.s\n"
+ "incw x20\n"
+ ".inst 0x448043e6 // smlalb z6.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448047fe // smlalt z30.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448040b1 // smlalb z17.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x44804095 // smlalb z21.s, p4/M, z4.h, z0.h\n"
+ ".inst 0x44804207 // smlalb z7.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x448044a8 // smlalt z8.s, p4/M, z5.h, z0.h\n"
"whilelt p1.s, x20, x3\n"
"whilelt p3.h, x2, x3\n"
- ".inst 0x44984517 // smlalt z23.s, p4/M, z8.h, z24.h\n"
- ".inst 0x449343ee // smlalb z14.s, p4/M, z31.h, z19.h\n"
- "addvl x14, x14, #2\n"
- ".inst 0x448b4512 // smlalt z18.s, p4/M, z8.h, z11.h\n"
- ".inst 0x448b4474 // smlalt z20.s, p4/M, z3.h, z11.h\n"
- ".inst 0x448b4481 // smlalt z1.s, p4/M, z4.h, z11.h\n"
- ".inst 0x449843e6 // smlalb z6.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984089 // smlalb z9.s, p4/M, z4.h, z24.h\n"
- ".inst 0x44984367 // smlalb z7.s, p4/M, z27.h, z24.h\n"
- ".inst 0x449347f7 // smlalt z23.s, p4/M, z31.h, z19.h\n"
- ".inst 0x4495400e // smlalb z14.s, p4/M, z0.h, z21.h\n"
- ".inst 0x04b975ce // sqrdmulh z14.s, z14.s, z25.s\n"
- ".inst 0x449847f2 // smlalt z18.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984494 // smlalt z20.s, p4/M, z4.h, z24.h\n"
- "and z3.d, z14.d, z29.d\n"
- ".inst 0x44984761 // smlalt z1.s, p4/M, z27.h, z24.h\n"
- ".inst 0x44934006 // smlalb z6.s, p4/M, z0.h, z19.h\n"
- "asr z3.s, z3.s, #0x1f\n"
- ".inst 0x44934369 // smlalb z9.s, p4/M, z27.h, z19.h\n"
- ".inst 0x449340a7 // smlalb z7.s, p4/M, z5.h, z19.h\n"
- "sqadd z14.s, z14.s, z3.s\n"
- ".inst 0x448293ae // srshl z14.s, p4/M, z14.s, z29.s\n"
- ".inst 0x44954417 // smlalt z23.s, p4/M, z0.h, z21.h\n"
- ".inst 0x44934412 // smlalt z18.s, p4/M, z0.h, z19.h\n"
- ".inst 0x04b076f7 // sqrdmulh z23.s, z23.s, z16.s\n"
- ".inst 0x44934774 // smlalt z20.s, p4/M, z27.h, z19.h\n"
- ".inst 0x449344a1 // smlalt z1.s, p4/M, z5.h, z19.h\n"
- "and z31.d, z23.d, z22.d\n"
- ".inst 0x44954226 // smlalb z6.s, p4/M, z17.h, z21.h\n"
- ".inst 0x449540a9 // smlalb z9.s, p4/M, z5.h, z21.h\n"
- ".inst 0x04b974c6 // sqrdmulh z6.s, z6.s, z25.s\n"
- ".inst 0x44954387 // smlalb z7.s, p4/M, z28.h, z21.h\n"
- ".inst 0x44954632 // smlalt z18.s, p4/M, z17.h, z21.h\n"
- ".inst 0x04b97529 // sqrdmulh z9.s, z9.s, z25.s\n"
- ".inst 0x449544b4 // smlalt z20.s, p4/M, z5.h, z21.h\n"
- ".inst 0x44954781 // smlalt z1.s, p4/M, z28.h, z21.h\n"
- ".inst 0x04b974e7 // sqrdmulh z7.s, z7.s, z25.s\n"
- "asr z31.s, z31.s, #0x1f\n"
- "and z3.d, z6.d, z29.d\n"
- ".inst 0x04b07652 // sqrdmulh z18.s, z18.s, z16.s\n"
- "and z0.d, z9.d, z29.d\n"
- ".inst 0x04b07694 // sqrdmulh z20.s, z20.s, z16.s\n"
- "and z19.d, z7.d, z29.d\n"
- ".inst 0x04b07421 // sqrdmulh z1.s, z1.s, z16.s\n"
- "sqadd z23.s, z23.s, z31.s\n"
- ".inst 0x448292d7 // srshl z23.s, p4/M, z23.s, z22.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- "and z21.d, z18.d, z22.d\n"
- "asr z0.s, z0.s, #0x1f\n"
- "and z17.d, z20.d, z22.d\n"
+ ".inst 0x4480449b // smlalt z27.s, p4/M, z4.h, z0.h\n"
+ ".inst 0x44804609 // smlalt z9.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x449a40a6 // smlalb z6.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a44be // smlalt z30.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a4071 // smlalb z17.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a4215 // smlalb z21.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4327 // smlalb z7.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449a4468 // smlalt z8.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a461b // smlalt z27.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4729 // smlalt z9.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449c4066 // smlalb z6.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c447e // smlalt z30.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c4031 // smlalb z17.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c4335 // smlalb z21.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4267 // smlalb z7.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x449c4428 // smlalt z8.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c473b // smlalt z27.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4669 // smlalt z9.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x04bd74c6 // sqrdmulh z6.s, z6.s, z29.s\n"
+ ".inst 0x04b277de // sqrdmulh z30.s, z30.s, z18.s\n"
+ ".inst 0x04bd7631 // sqrdmulh z17.s, z17.s, z29.s\n"
+ ".inst 0x04bd76b5 // sqrdmulh z21.s, z21.s, z29.s\n"
+ "and z19.d, z6.d, z23.d\n"
+ ".inst 0x04bd74e7 // sqrdmulh z7.s, z7.s, z29.s\n"
+ ".inst 0x04b27508 // sqrdmulh z8.s, z8.s, z18.s\n"
+ "and z16.d, z30.d, z22.d\n"
+ "and z2.d, z17.d, z23.d\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z20.d, z21.d, z23.d\n"
+ ".inst 0x04b2777b // sqrdmulh z27.s, z27.s, z18.s\n"
+ ".inst 0x04b27529 // sqrdmulh z9.s, z9.s, z18.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "sqadd z6.s, z6.s, z19.s\n"
+ "and z19.d, z7.d, z23.d\n"
+ "and z0.d, z8.d, z22.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "sqadd z30.s, z30.s, z16.s\n"
+ "and z26.d, z27.d, z22.d\n"
"asr z19.s, z19.s, #0x1f\n"
- "and z16.d, z1.d, z22.d\n"
- "sqadd z6.s, z6.s, z3.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- ".inst 0x448293a6 // srshl z6.s, p4/M, z6.s, z29.s\n"
- "sqadd z9.s, z9.s, z0.s\n"
- "asr z17.s, z17.s, #0x1f\n"
- ".inst 0x448293a9 // srshl z9.s, p4/M, z9.s, z29.s\n"
+ "and z16.d, z9.d, z22.d\n"
+ ".inst 0x448292e6 // srshl z6.s, p4/M, z6.s, z23.s\n"
+ "sqadd z17.s, z17.s, z2.s\n"
+ "asr z0.s, z0.s, #0x1f\n"
+ "sqadd z21.s, z21.s, z20.s\n"
+ "asr z26.s, z26.s, #0x1f\n"
+ ".inst 0x448292de // srshl z30.s, p4/M, z30.s, z22.s\n"
"sqadd z7.s, z7.s, z19.s\n"
"asr z16.s, z16.s, #0x1f\n"
- ".inst 0x448293a7 // srshl z7.s, p4/M, z7.s, z29.s\n"
- "sqadd z18.s, z18.s, z21.s\n"
- "sqadd z20.s, z20.s, z17.s\n"
- ".inst 0x448292d2 // srshl z18.s, p4/M, z18.s, z22.s\n"
- ".inst 0x448292d4 // srshl z20.s, p4/M, z20.s, z22.s\n"
- "sqadd z1.s, z1.s, z16.s\n"
- ".inst 0x453041ce // sqxtnb z14.h, z14.s\n"
- ".inst 0x448292c1 // srshl z1.s, p4/M, z1.s, z22.s\n"
+ ".inst 0x448292f1 // srshl z17.s, p4/M, z17.s, z23.s\n"
+ "sqadd z8.s, z8.s, z0.s\n"
".inst 0x453040c6 // sqxtnb z6.h, z6.s\n"
- ".inst 0x45304129 // sqxtnb z9.h, z9.s\n"
+ ".inst 0x448292f5 // srshl z21.s, p4/M, z21.s, z23.s\n"
+ "sqadd z27.s, z27.s, z26.s\n"
+ ".inst 0x448292e7 // srshl z7.s, p4/M, z7.s, z23.s\n"
+ "sqadd z9.s, z9.s, z16.s\n"
+ ".inst 0x45304231 // sqxtnb z17.h, z17.s\n"
+ ".inst 0x448292c8 // srshl z8.s, p4/M, z8.s, z22.s\n"
+ ".inst 0x453042b5 // sqxtnb z21.h, z21.s\n"
+ ".inst 0x453047c6 // sqxtnt z6.h, z30.s\n"
+ ".inst 0x448292db // srshl z27.s, p4/M, z27.s, z22.s\n"
+ ".inst 0x448292c9 // srshl z9.s, p4/M, z9.s, z22.s\n"
".inst 0x453040e7 // sqxtnb z7.h, z7.s\n"
- ".inst 0x453046ee // sqxtnt z14.h, z23.s\n"
- ".inst 0x45304646 // sqxtnt z6.h, z18.s\n"
- ".inst 0x45304689 // sqxtnt z9.h, z20.s\n"
- ".inst 0x45304427 // sqxtnt z7.h, z1.s\n"
- "sqadd z14.h, z14.h, z15.h\n"
- "smax z14.h, p4/M, z14.h, z12.h\n"
- "smin z14.h, p4/M, z14.h, z13.h\n"
- "sqadd z6.h, z6.h, z15.h\n"
- "sqadd z9.h, z9.h, z15.h\n"
- "smax z6.h, p4/M, z6.h, z12.h\n"
- "smax z9.h, p4/M, z9.h, z12.h\n"
- "sqadd z7.h, z7.h, z15.h\n"
- "smax z7.h, p4/M, z7.h, z12.h\n"
+ ".inst 0x45304511 // sqxtnt z17.h, z8.s\n"
+ ".inst 0x45304775 // sqxtnt z21.h, z27.s\n"
+ ".inst 0x45304527 // sqxtnt z7.h, z9.s\n"
+ "sqadd z6.h, z6.h, z10.h\n"
+ "sqadd z17.h, z17.h, z10.h\n"
+ "sqadd z21.h, z21.h, z10.h\n"
+ "sqadd z7.h, z7.h, z10.h\n"
+ "smax z6.h, p4/M, z6.h, z15.h\n"
+ "smax z17.h, p4/M, z17.h, z15.h\n"
+ "smax z21.h, p4/M, z21.h, z15.h\n"
+ "smax z7.h, p4/M, z7.h, z15.h\n"
"smin z6.h, p4/M, z6.h, z13.h\n"
- "st1b { z14.h }, p0, [x5, x16]\n"
- "smin z9.h, p4/M, z9.h, z13.h\n"
+ "smin z17.h, p4/M, z17.h, z13.h\n"
+ "smin z21.h, p4/M, z21.h, z13.h\n"
"smin z7.h, p4/M, z7.h, z13.h\n"
- "st1b { z6.h }, p0, [x6, x16]\n"
- "st1b { z9.h }, p0, [x7, x16]\n"
- "st1b { z7.h }, p0, [x8, x16]\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
+ "st1b { z6.h }, p0, [x17, x6]\n"
+ "st1b { z17.h }, p0, [x16, x6]\n"
+ "st1b { z21.h }, p0, [x15, x6]\n"
+ "st1b { z7.h }, p0, [x14, x6]\n"
+ "inch x6\n"
+ "ld1w { z21.s }, p2/Z, [x21]\n"
"ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
- "uzp1 z14.s, z17.s, z16.s\n"
- "ld1b { z26.h }, p4/Z, [x4]\n"
- "ld1b { z8.h }, p4/Z, [x4, #1, MUL VL]\n"
- "uzp2 z23.s, z17.s, z16.s\n"
"addvl x21, x21, #2\n"
- "ld1b { z16.h }, p4/Z, [x4, #2, MUL VL]\n"
- "ld1b { z21.h }, p4/Z, [x4, #3, MUL VL]\n"
- "inch x16\n"
+ "ld1b { z25.h }, p4/Z, [x4]\n"
+ "ld1b { z28.h }, p4/Z, [x4, #1, MUL VL]\n"
+ "ld1b { z4.h }, p4/Z, [x4, #2, MUL VL]\n"
+ "ld1b { z23.h }, p4/Z, [x4, #3, MUL VL]\n"
+ "ld1b { z31.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldp x9, x28, [x5, #0x0]\n"
+ "uzp1 z6.s, z21.s, z16.s\n"
+ "uzp2 z30.s, z21.s, z16.s\n"
"str x21, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1b { z17.h }, p4/Z, [x4, #4, MUL VL]\n"
- "ldp x9, x28, [x17, #0x0]\n"
- "mov z6.d, z14.d\n"
- "mov z18.d, z23.d\n"
- "ldp x27, x26, [x17, #0x10]\n"
- "ldp x25, x24, [x17, #0x20]\n"
- "mov z9.d, z14.d\n"
- "mov z20.d, z23.d\n"
- "ldp x23, x22, [x17, #0x30]\n"
- "ldp x21, x20, [x17, #0x40]\n"
- "mov z7.d, z14.d\n"
- "mov z1.d, z23.d\n"
- "ld1b { z22.h }, p3/Z, [x9, x2]\n"
- "ld1b { z2.h }, p3/Z, [x28, x2]\n"
- ".inst 0x454a1b5a // usublb z26.h, z26.b, z10.b\n"
- ".inst 0x454a1908 // usublb z8.h, z8.b, z10.b\n"
- "ld1b { z11.h }, p3/Z, [x27, x2]\n"
- "ld1b { z3.h }, p3/Z, [x26, x2]\n"
- ".inst 0x454a1a10 // usublb z16.h, z16.b, z10.b\n"
- ".inst 0x454a1ab5 // usublb z21.h, z21.b, z10.b\n"
- "ld1b { z29.h }, p3/Z, [x25, x2]\n"
- "ld1b { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x454a1a31 // usublb z17.h, z17.b, z10.b\n"
- ".inst 0x455e1ad6 // usublb z22.h, z22.b, z30.b\n"
- "ld1b { z31.h }, p3/Z, [x23, x2]\n"
- "ld1b { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1842 // usublb z2.h, z2.b, z30.b\n"
- ".inst 0x455e196b // usublb z11.h, z11.b, z30.b\n"
- "ld1b { z19.h }, p3/Z, [x21, x2]\n"
- "ld1b { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- ".inst 0x455e1bbd // usublb z29.h, z29.b, z30.b\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x455e1a73 // usublb z19.h, z19.b, z30.b\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
+ ".inst 0x454c1b39 // usublb z25.h, z25.b, z12.b\n"
+ ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
+ ".inst 0x454c1af7 // usublb z23.h, z23.b, z12.b\n"
+ "ldp x27, x26, [x5, #0x10]\n"
+ "mov z17.d, z6.d\n"
+ "mov z8.d, z30.d\n"
+ "mov z21.d, z6.d\n"
+ "mov z27.d, z30.d\n"
+ "ldp x25, x24, [x5, #0x20]\n"
+ "mov z7.d, z6.d\n"
+ "mov z9.d, z30.d\n"
+ ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
+ "ldp x23, x22, [x5, #0x30]\n"
+ "ldp x21, x20, [x5, #0x40]\n"
+ "ld1b { z26.h }, p3/Z, [x9, x2]\n"
+ "ld1b { z16.h }, p3/Z, [x28, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z5.h }, p3/Z, [x26, x2]\n"
+ "ld1b { z18.h }, p3/Z, [x25, x2]\n"
+ "ld1b { z3.h }, p3/Z, [x24, x2]\n"
+ "ld1b { z19.h }, p3/Z, [x23, x2]\n"
+ "ld1b { z11.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454e1b5a // usublb z26.h, z26.b, z14.b\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ "ld1b { z20.h }, p3/Z, [x21, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x454e18a5 // usublb z5.h, z5.b, z14.b\n"
+ ".inst 0x454e1a52 // usublb z18.h, z18.b, z14.b\n"
+ ".inst 0x454e1863 // usublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ ".inst 0x454e196b // usublb z11.h, z11.b, z14.b\n"
+ ".inst 0x454e1a94 // usublb z20.h, z20.b, z14.b\n"
+ ".inst 0x454e1bbd // usublb z29.h, z29.b, z14.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
index a9cd8a7fa9..e782eb3197 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,288 +41,288 @@ void sve_u8q_packed_to_nhwc_3x3_s2_with_multiplier_output2x4_dot_depthfirst_impl
)
{
__asm__ __volatile__(
- "mov x20, #0x9\n"
- "whilelt p0.b, XZR, x20\n"
- "ldr x23, [%x[inptrs], #0x8]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
+ "mov x25, #0x9\n"
+ "ldr x24, [%x[inptrs], #0x8]\n"
+ "ldr x23, [%x[inptrs], #0x10]\n"
+ "mov z22.b, #0x1\n"
"ldr x22, [%x[inptrs], #0x20]\n"
"ldr x21, [%x[inptrs], #0x0]\n"
- "mov z13.b, #0x1\n"
- "lsr z13.s, z13.s, #0x8\n"
- "ld1b { z1.b }, p0/Z, [x23]\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
- "mov z8.d, z1.d\n"
- "mov z27.d, z1.d\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
"ldr x20, [%x[inptrs], #0x18]\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
- "mov z31.d, z1.d\n"
- "mov z28.d, z2.d\n"
- "ld1b { z0.b }, p0/Z, [x21]\n"
- "mov z30.d, z2.d\n"
- "mov z26.d, z2.d\n"
- "ld1b { z3.b }, p0/Z, [x20]\n"
- "mov z22.d, z4.d\n"
- "mov z10.d, z4.d\n"
+ "lsr z22.s, z22.s, #0x8\n"
+ "mov z29.s, #0x0\n"
"ptrue p2.b\n"
- "ld1rw { z11.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "mov z18.d, z4.d\n"
- "ext z8.b, z8.b, z8.b, #0x2\n"
+ "whilelt p0.b, XZR, x25\n"
+ "mov z14.s, #0x0\n"
+ "mov z23.s, #0x0\n"
"lsl x10, %x[n_channels], #0x2\n"
- "neg z11.s, p2/M, z11.s\n"
- "ext z27.b, z27.b, z27.b, #0x4\n"
- "ext z31.b, z31.b, z31.b, #0x6\n"
+ "mov z11.s, #0x0\n"
+ "mov z15.s, #0x0\n"
"mov x9, #0x0\n"
- "whilelt p0.b, x9, x10\n"
- "ext z28.b, z28.b, z28.b, #0x2\n"
- "ext z30.b, z30.b, z30.b, #0x4\n"
- "ld1w { z14.s }, p0/Z, [%x[params]]\n"
"mov x28, #0x0\n"
- "ext z26.b, z26.b, z26.b, #0x6\n"
- "ext z22.b, z22.b, z22.b, #0x2\n"
+ "mov z31.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
+ "ld1b { z1.b }, p0/Z, [x24]\n"
+ "ld1b { z2.b }, p0/Z, [x23]\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "ld1b { z4.b }, p0/Z, [x22]\n"
+ "ld1b { z0.b }, p0/Z, [x21]\n"
+ "mov z24.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "ld1b { z3.b }, p0/Z, [x20]\n"
+ "mov z27.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "neg z16.s, p2/M, z16.s\n"
+ "mov z5.d, z1.d\n"
+ "mov z7.d, z1.d\n"
+ "whilelt p0.b, x9, x10\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
- "ext z10.b, z10.b, z10.b, #0x4\n"
- "ext z18.b, z18.b, z18.b, #0x6\n"
+ "mov z30.d, z1.d\n"
+ "mov z6.d, z2.d\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
- "mov z21.d, z0.d\n"
- "mov z20.d, z0.d\n"
- "ld1rw { z9.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "mov z19.d, z0.d\n"
- "mov z24.d, z3.d\n"
- "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "mov z8.d, z2.d\n"
+ "mov z19.d, z2.d\n"
+ "ld1rw { z10.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "mov z9.d, z4.d\n"
+ "mov z28.d, z4.d\n"
+ "ext z5.b, z5.b, z5.b, #0x2\n"
+ "ext z7.b, z7.b, z7.b, #0x4\n"
+ "ext z30.b, z30.b, z30.b, #0x6\n"
+ "ext z6.b, z6.b, z6.b, #0x2\n"
+ "ext z8.b, z8.b, z8.b, #0x4\n"
+ "ext z19.b, z19.b, z19.b, #0x6\n"
+ "ext z9.b, z9.b, z9.b, #0x2\n"
+ "ext z28.b, z28.b, z28.b, #0x4\n"
+ "zip1 z1.s, z1.s, z7.s\n"
+ "mov z7.d, z4.d\n"
+ "zip1 z5.s, z5.s, z30.s\n"
+ "mov z30.d, z0.d\n"
+ "ext z7.b, z7.b, z7.b, #0x6\n"
+ "zip1 z2.s, z2.s, z8.s\n"
+ "ld1w { z8.s }, p0/Z, [%x[params]]\n"
+ "ext z30.b, z30.b, z30.b, #0x2\n"
+ "zip1 z6.s, z6.s, z19.s\n"
+ "ld1rw { z19.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "zip1 z4.s, z4.s, z28.s\n"
+ "mov z28.d, z0.d\n"
+ "zip1 z9.s, z9.s, z7.s\n"
+ "mov z7.d, z0.d\n"
+ "ext z28.b, z28.b, z28.b, #0x4\n"
+ "zip1 z1.s, z1.s, z5.s\n"
"ld1b { z5.b }, p0/Z, [%x[params], #1, MUL VL]\n"
- "mov z17.d, z3.d\n"
- "mov z16.d, z3.d\n"
+ "ext z7.b, z7.b, z7.b, #0x6\n"
+ "zip1 z2.s, z2.s, z6.s\n"
"ld1b { z6.b }, p0/Z, [%x[params], #2, MUL VL]\n"
+ "zip1 z4.s, z4.s, z9.s\n"
+ "mov z9.d, z3.d\n"
+ "zip1 z0.s, z0.s, z28.s\n"
+ "mov z28.d, z3.d\n"
+ "ext z9.b, z9.b, z9.b, #0x2\n"
+ "zip1 z30.s, z30.s, z7.s\n"
"ld1b { z7.b }, p0/Z, [%x[params], #3, MUL VL]\n"
- "ext z21.b, z21.b, z21.b, #0x2\n"
- "ext z20.b, z20.b, z20.b, #0x4\n"
"addvl %x[params], %x[params], #4\n"
- "ext z19.b, z19.b, z19.b, #0x6\n"
- "zip1 z1.s, z1.s, z27.s\n"
- "zip1 z8.s, z8.s, z31.s\n"
- "zip1 z2.s, z2.s, z30.s\n"
- "zip1 z28.s, z28.s, z26.s\n"
- "ext z24.b, z24.b, z24.b, #0x2\n"
- "ext z17.b, z17.b, z17.b, #0x4\n"
- "ext z16.b, z16.b, z16.b, #0x6\n"
- "zip1 z4.s, z4.s, z10.s\n"
- "zip1 z22.s, z22.s, z18.s\n"
- "zip1 z0.s, z0.s, z20.s\n"
- "zip1 z21.s, z21.s, z19.s\n"
- "zip1 z1.s, z1.s, z8.s\n"
- "zip1 z2.s, z2.s, z28.s\n"
- "zip1 z3.s, z3.s, z17.s\n"
- "zip1 z24.s, z24.s, z16.s\n"
- "zip1 z4.s, z4.s, z22.s\n"
- "zip1 z0.s, z0.s, z21.s\n"
+ "ext z28.b, z28.b, z28.b, #0x4\n"
"mov z1.q, z1.q[0]\n"
"mov z2.q, z2.q[0]\n"
- "zip1 z3.s, z3.s, z24.s\n"
"mov z4.q, z4.q[0]\n"
- "mov z24.s, #0x0\n"
- "mov z25.s, #0x0\n"
- "udot z24.s, z13.b, z1.b[0]\n"
- "mov z23.s, #0x0\n"
- "mov z22.s, #0x0\n"
- "udot z25.s, z13.b, z1.b[1]\n"
- "mov z21.s, #0x0\n"
- "mov z19.s, #0x0\n"
- "udot z23.s, z13.b, z1.b[2]\n"
- "mov z10.s, #0x0\n"
- "mov z8.s, #0x0\n"
- "udot z22.s, z13.b, z1.b[3]\n"
- "mov z20.s, #0x0\n"
- "mov z18.s, #0x0\n"
- "udot z21.s, z13.b, z2.b[0]\n"
- "mov z17.s, #0x0\n"
- "mov z16.s, #0x0\n"
- "udot z19.s, z13.b, z2.b[1]\n"
- "udot z10.s, z13.b, z2.b[2]\n"
- "udot z8.s, z13.b, z2.b[3]\n"
+ "zip1 z0.s, z0.s, z30.s\n"
+ "mov z30.d, z3.d\n"
+ "udot z25.s, z22.b, z1.b[0]\n"
+ "zip1 z3.s, z3.s, z28.s\n"
+ "udot z26.s, z22.b, z1.b[1]\n"
+ "udot z29.s, z22.b, z1.b[2]\n"
+ "ext z30.b, z30.b, z30.b, #0x6\n"
+ "udot z14.s, z22.b, z1.b[3]\n"
+ "udot z23.s, z22.b, z2.b[0]\n"
+ "udot z11.s, z22.b, z2.b[1]\n"
+ "udot z15.s, z22.b, z2.b[2]\n"
"mov z0.q, z0.q[0]\n"
- "udot z20.s, z13.b, z4.b[0]\n"
- "udot z18.s, z13.b, z4.b[1]\n"
- "mov z3.q, z3.q[0]\n"
- "udot z17.s, z13.b, z4.b[2]\n"
- "udot z16.s, z13.b, z4.b[3]\n"
- "mov z31.s, #0x0\n"
- "mov z30.s, #0x0\n"
- "mov z26.s, #0x0\n"
- "udot z31.s, z13.b, z0.b[0]\n"
- "mov z27.s, #0x0\n"
+ "udot z31.s, z22.b, z2.b[3]\n"
+ "udot z17.s, z22.b, z4.b[0]\n"
"mov z28.s, #0x0\n"
- "udot z30.s, z13.b, z0.b[1]\n"
- "mov z29.s, #0x0\n"
- "udot z26.s, z13.b, z0.b[2]\n"
- "udot z27.s, z13.b, z0.b[3]\n"
- "udot z28.s, z13.b, z3.b[0]\n"
- "udot z29.s, z13.b, z3.b[1]\n"
- "add z24.s, z24.s, z21.s\n"
- "add z25.s, z25.s, z19.s\n"
- "add z23.s, z23.s, z10.s\n"
- "add z22.s, z22.s, z8.s\n"
- "add z21.s, z20.s, z21.s\n"
+ "zip1 z9.s, z9.s, z30.s\n"
+ "udot z20.s, z22.b, z4.b[1]\n"
+ "udot z21.s, z22.b, z4.b[2]\n"
+ "udot z24.s, z22.b, z4.b[3]\n"
+ "mov z30.s, #0x0\n"
+ "udot z12.s, z22.b, z0.b[0]\n"
+ "udot z27.s, z22.b, z0.b[1]\n"
+ "udot z18.s, z22.b, z0.b[2]\n"
+ "add z25.s, z25.s, z23.s\n"
+ "zip1 z3.s, z3.s, z9.s\n"
+ "mov z9.s, #0x0\n"
+ "udot z28.s, z22.b, z0.b[3]\n"
+ "add z26.s, z26.s, z11.s\n"
+ "add z29.s, z29.s, z15.s\n"
+ "add z14.s, z14.s, z31.s\n"
+ "add z23.s, z17.s, z23.s\n"
+ "mov z3.q, z3.q[0]\n"
+ "mov z17.s, #0x0\n"
+ "add z11.s, z20.s, z11.s\n"
"mov z20.s, #0x0\n"
- "udot z20.s, z13.b, z3.b[2]\n"
- "add z19.s, z18.s, z19.s\n"
- "mov z18.s, #0x0\n"
- "udot z18.s, z13.b, z3.b[3]\n"
- "add z17.s, z17.s, z10.s\n"
- "add z16.s, z16.s, z8.s\n"
- "add z24.s, z24.s, z31.s\n"
- "add z25.s, z25.s, z30.s\n"
- "mul z24.s, p2/M, z24.s, z11.s\n"
- "mul z25.s, p2/M, z25.s, z11.s\n"
- "add z26.s, z23.s, z26.s\n"
- "add z27.s, z22.s, z27.s\n"
- "mul z26.s, p2/M, z26.s, z11.s\n"
- "mul z27.s, p2/M, z27.s, z11.s\n"
- "add z28.s, z21.s, z28.s\n"
- "add z29.s, z19.s, z29.s\n"
- "mul z28.s, p2/M, z28.s, z11.s\n"
- "mul z29.s, p2/M, z29.s, z11.s\n"
- "add z30.s, z17.s, z20.s\n"
- "add z31.s, z16.s, z18.s\n"
- "mul z30.s, p2/M, z30.s, z11.s\n"
- "mul z31.s, p2/M, z31.s, z11.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
+ "udot z30.s, z22.b, z3.b[0]\n"
+ "udot z9.s, z22.b, z3.b[1]\n"
+ "udot z17.s, z22.b, z3.b[2]\n"
+ "add z15.s, z21.s, z15.s\n"
+ "udot z20.s, z22.b, z3.b[3]\n"
+ "add z31.s, z24.s, z31.s\n"
+ "add z24.s, z25.s, z12.s\n"
+ "add z25.s, z26.s, z27.s\n"
+ "add z26.s, z29.s, z18.s\n"
+ "add z27.s, z14.s, z28.s\n"
+ "add z28.s, z23.s, z30.s\n"
+ "add z29.s, z11.s, z9.s\n"
+ "add z30.s, z15.s, z17.s\n"
+ "add z31.s, z31.s, z20.s\n"
+ "mul z24.s, p2/M, z24.s, z16.s\n"
+ "mul z25.s, p2/M, z25.s, z16.s\n"
+ "mul z26.s, p2/M, z26.s, z16.s\n"
+ "mul z27.s, p2/M, z27.s, z16.s\n"
+ "mul z28.s, p2/M, z28.s, z16.s\n"
+ "mul z29.s, p2/M, z29.s, z16.s\n"
+ "mul z30.s, p2/M, z30.s, z16.s\n"
+ "mul z31.s, p2/M, z31.s, z16.s\n"
+ "zip1 z21.s, z24.s, z26.s\n"
+ "add z24.s, z24.s, z8.s\n"
+ "zip1 z23.s, z25.s, z27.s\n"
+ "add z25.s, z25.s, z8.s\n"
+ "add z26.s, z26.s, z8.s\n"
+ "add z27.s, z27.s, z8.s\n"
"zip1 z17.s, z28.s, z30.s\n"
"zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
+ "zip1 z22.s, z21.s, z23.s\n"
+ "add z28.s, z28.s, z8.s\n"
+ "add z29.s, z29.s, z8.s\n"
+ "add z30.s, z30.s, z8.s\n"
"zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z14.s\n"
- "add z25.s, z25.s, z14.s\n"
- "add z26.s, z26.s, z14.s\n"
- "add z27.s, z27.s, z14.s\n"
- "add z28.s, z28.s, z14.s\n"
- "add z29.s, z29.s, z14.s\n"
- "add z30.s, z30.s, z14.s\n"
- "add z31.s, z31.s, z14.s\n"
+ "add z31.s, z31.s, z8.s\n"
"1:" // Loop
"udot z24.s, z5.b, z0.b[0]\n"
"udot z25.s, z5.b, z0.b[1]\n"
- "ld1w { z8.s }, p2/Z, [%x[params]]\n"
+ "ld1w { z15.s }, p2/Z, [%x[params]]\n"
"ld1w { z21.s }, p2/Z, [%x[params], #1, MUL VL]\n"
"udot z26.s, z5.b, z0.b[2]\n"
"udot z27.s, z5.b, z0.b[3]\n"
"incb x9\n"
"whilelt p1.s, x28, %x[n_channels]\n"
+ "udot z28.s, z5.b, z2.b[0]\n"
+ "udot z29.s, z5.b, z2.b[1]\n"
+ "udot z30.s, z5.b, z2.b[2]\n"
+ "udot z31.s, z5.b, z2.b[3]\n"
"udot z24.s, z6.b, z1.b[0]\n"
"udot z25.s, z6.b, z1.b[1]\n"
"whilelt p0.b, x9, x10\n"
- "ld1w { z20.s }, p0/Z, [%x[params], #2, MUL VL]\n"
"udot z26.s, z6.b, z1.b[2]\n"
"udot z27.s, z6.b, z1.b[3]\n"
- "udot z28.s, z5.b, z2.b[0]\n"
- "udot z29.s, z5.b, z2.b[1]\n"
- "udot z30.s, z5.b, z2.b[2]\n"
- "udot z31.s, z5.b, z2.b[3]\n"
- "ld1b { z5.b }, p0/Z, [%x[params], #3, MUL VL]\n"
- "udot z24.s, z7.b, z2.b[0]\n"
- "udot z25.s, z7.b, z2.b[1]\n"
- ".inst 0x04a87718 // sqrdmulh z24.s, z24.s, z8.s\n"
- "udot z26.s, z7.b, z2.b[2]\n"
- "udot z27.s, z7.b, z2.b[3]\n"
- ".inst 0x04a87739 // sqrdmulh z25.s, z25.s, z8.s\n"
"udot z28.s, z6.b, z3.b[0]\n"
"udot z29.s, z6.b, z3.b[1]\n"
- ".inst 0x04a8775a // sqrdmulh z26.s, z26.s, z8.s\n"
"udot z30.s, z6.b, z3.b[2]\n"
"udot z31.s, z6.b, z3.b[3]\n"
- ".inst 0x04a8777b // sqrdmulh z27.s, z27.s, z8.s\n"
+ "ld1w { z20.s }, p0/Z, [%x[params], #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [%x[params], #3, MUL VL]\n"
+ "udot z24.s, z7.b, z2.b[0]\n"
+ "udot z25.s, z7.b, z2.b[1]\n"
"ld1b { z6.b }, p0/Z, [%x[params], #4, MUL VL]\n"
+ "udot z26.s, z7.b, z2.b[2]\n"
+ "udot z27.s, z7.b, z2.b[3]\n"
"udot z28.s, z7.b, z4.b[0]\n"
"udot z29.s, z7.b, z4.b[1]\n"
- "and z19.d, z24.d, z21.d\n"
"udot z30.s, z7.b, z4.b[2]\n"
"udot z31.s, z7.b, z4.b[3]\n"
- "and z18.d, z25.d, z21.d\n"
"ld1b { z7.b }, p0/Z, [%x[params], #5, MUL VL]\n"
+ "addvl %x[params], %x[params], #6\n"
+ ".inst 0x04af7718 // sqrdmulh z24.s, z24.s, z15.s\n"
+ ".inst 0x04af7739 // sqrdmulh z25.s, z25.s, z15.s\n"
+ ".inst 0x04af775a // sqrdmulh z26.s, z26.s, z15.s\n"
+ ".inst 0x04af777b // sqrdmulh z27.s, z27.s, z15.s\n"
+ ".inst 0x04af779c // sqrdmulh z28.s, z28.s, z15.s\n"
+ ".inst 0x04af77bd // sqrdmulh z29.s, z29.s, z15.s\n"
+ "and z14.d, z24.d, z21.d\n"
+ "and z12.d, z25.d, z21.d\n"
"and z17.d, z26.d, z21.d\n"
"and z16.d, z27.d, z21.d\n"
- "addvl %x[params], %x[params], #6\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04af77de // sqrdmulh z30.s, z30.s, z15.s\n"
+ ".inst 0x04af77ff // sqrdmulh z31.s, z31.s, z15.s\n"
+ "asr z14.s, z14.s, #0x1f\n"
+ "asr z12.s, z12.s, #0x1f\n"
"asr z17.s, z17.s, #0x1f\n"
"asr z16.s, z16.s, #0x1f\n"
- ".inst 0x04a8779c // sqrdmulh z28.s, z28.s, z8.s\n"
- ".inst 0x04a877bd // sqrdmulh z29.s, z29.s, z8.s\n"
- ".inst 0x04a877de // sqrdmulh z30.s, z30.s, z8.s\n"
- ".inst 0x04a877ff // sqrdmulh z31.s, z31.s, z8.s\n"
- "sqadd z24.s, z24.s, z19.s\n"
- "sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
- ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ "sqadd z24.s, z24.s, z14.s\n"
+ "and z14.d, z28.d, z21.d\n"
+ "sqadd z25.s, z25.s, z12.s\n"
+ "and z11.d, z29.d, z21.d\n"
"sqadd z26.s, z26.s, z17.s\n"
"sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
- ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
- "and z19.d, z28.d, z21.d\n"
- "and z18.d, z29.d, z21.d\n"
"and z17.d, z30.d, z21.d\n"
"and z16.d, z31.d, z21.d\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
+ "asr z14.s, z14.s, #0x1f\n"
+ "asr z11.s, z11.s, #0x1f\n"
+ ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
+ ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
+ ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
"asr z17.s, z17.s, #0x1f\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z28.s, z28.s, z19.s\n"
- "sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
- ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "add z24.s, z24.s, z10.s\n"
+ "sqadd z28.s, z28.s, z14.s\n"
+ "sqadd z29.s, z29.s, z11.s\n"
+ "add z25.s, z25.s, z10.s\n"
"sqadd z30.s, z30.s, z17.s\n"
"sqadd z31.s, z31.s, z16.s\n"
+ ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
+ "add z26.s, z26.s, z10.s\n"
+ "add z27.s, z27.s, z10.s\n"
+ "smin z24.s, p2/M, z24.s, z19.s\n"
+ ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "smin z25.s, p2/M, z25.s, z19.s\n"
".inst 0x44828abe // srshl z30.s, p2/M, z30.s, z21.s\n"
".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
- "add z24.s, z24.s, z9.s\n"
- "add z25.s, z25.s, z9.s\n"
- "smin z24.s, p2/M, z24.s, z12.s\n"
- "smin z25.s, p2/M, z25.s, z12.s\n"
- "add z26.s, z26.s, z9.s\n"
- "add z27.s, z27.s, z9.s\n"
- "smin z26.s, p2/M, z26.s, z12.s\n"
- "smin z27.s, p2/M, z27.s, z12.s\n"
- "add z28.s, z28.s, z9.s\n"
- "add z29.s, z29.s, z9.s\n"
- "smin z28.s, p2/M, z28.s, z12.s\n"
- "smin z29.s, p2/M, z29.s, z12.s\n"
- "add z30.s, z30.s, z9.s\n"
- "add z31.s, z31.s, z9.s\n"
- "smin z30.s, p2/M, z30.s, z12.s\n"
- "smin z31.s, p2/M, z31.s, z12.s\n"
- "smax z24.s, p2/M, z24.s, z15.s\n"
- "smax z25.s, p2/M, z25.s, z15.s\n"
+ "add z28.s, z28.s, z10.s\n"
+ "add z29.s, z29.s, z10.s\n"
+ "smin z26.s, p2/M, z26.s, z19.s\n"
+ "smin z27.s, p2/M, z27.s, z19.s\n"
+ "smax z24.s, p2/M, z24.s, z13.s\n"
+ "add z30.s, z30.s, z10.s\n"
+ "smax z25.s, p2/M, z25.s, z13.s\n"
+ "add z31.s, z31.s, z10.s\n"
+ "smin z28.s, p2/M, z28.s, z19.s\n"
+ "smin z29.s, p2/M, z29.s, z19.s\n"
+ "smax z26.s, p2/M, z26.s, z13.s\n"
+ "smin z30.s, p2/M, z30.s, z19.s\n"
+ "smax z27.s, p2/M, z27.s, z13.s\n"
"st1b { z24.s }, p1, [x27, x28]\n"
"mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z15.s\n"
- "smax z27.s, p2/M, z27.s, z15.s\n"
+ "smin z31.s, p2/M, z31.s, z19.s\n"
+ "smax z28.s, p2/M, z28.s, z13.s\n"
"st1b { z25.s }, p1, [x26, x28]\n"
"mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z15.s\n"
- "smax z29.s, p2/M, z29.s, z15.s\n"
+ "smax z29.s, p2/M, z29.s, z13.s\n"
"st1b { z26.s }, p1, [x25, x28]\n"
"mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z15.s\n"
- "smax z31.s, p2/M, z31.s, z15.s\n"
+ "add z24.s, z24.s, z20.s\n"
+ "smax z30.s, p2/M, z30.s, z13.s\n"
"st1b { z27.s }, p1, [x24, x28]\n"
"mov z27.s, z22.s[3]\n"
+ "add z25.s, z25.s, z20.s\n"
+ "smax z31.s, p2/M, z31.s, z13.s\n"
"st1b { z28.s }, p1, [x23, x28]\n"
"mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z20.s\n"
+ "add z26.s, z26.s, z20.s\n"
"st1b { z29.s }, p1, [x22, x28]\n"
"mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z20.s\n"
+ "add z27.s, z27.s, z20.s\n"
"st1b { z30.s }, p1, [x21, x28]\n"
"mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z20.s\n"
+ "add z28.s, z28.s, z20.s\n"
"st1b { z31.s }, p1, [x20, x28]\n"
"mov z31.s, z23.s[3]\n"
"incw x28\n"
- "add z27.s, z27.s, z20.s\n"
- "add z28.s, z28.s, z20.s\n"
"add z29.s, z29.s, z20.s\n"
"add z30.s, z30.s, z20.s\n"
"add z31.s, z31.s, z20.s\n"
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
index 4b65a67309..9149db7a0a 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,353 +42,353 @@ void sve_u8q_packed_to_nhwc_5x5_s1_with_multiplier_output4x2_dot_depthfirst_impl
{
__asm__ __volatile__(
"mov x20, #0x6\n"
- "whilelt p0.b, XZR, x20\n"
- "ldr x22, [%x[inptrs], #0x18]\n"
- "ldr x21, [%x[inptrs], #0x20]\n"
- "ldr x20, [%x[inptrs], #0x10]\n"
- "ld1b { z3.b }, p0/Z, [x22]\n"
- "mov z23.d, z3.d\n"
- "ext z23.b, z23.b, z23.b, #0x1\n"
- "ld1b { z4.b }, p0/Z, [x21]\n"
+ "ldr x27, [%x[inptrs], #0x18]\n"
+ "ldr x26, [%x[inptrs], #0x20]\n"
+ "mov z30.b, #0x1\n"
+ "ldr x25, [%x[inptrs], #0x10]\n"
"ldr x24, [%x[inptrs], #0x8]\n"
- "mov z18.d, z4.d\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "ld1b { z2.b }, p0/Z, [x20]\n"
+ "mov z14.s, #0x0\n"
+ "mov z27.s, #0x0\n"
"ldr x23, [%x[inptrs], #0x28]\n"
- "mov z15.d, z2.d\n"
- "ext z15.b, z15.b, z15.b, #0x1\n"
"ldr x22, [%x[inptrs], #0x30]\n"
+ "mov z11.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "whilelt p0.b, XZR, x20\n"
"ldr x21, [%x[inptrs], #0x38]\n"
- "zip1 z3.d, z3.d, z23.d\n"
- "zip1 z4.d, z4.d, z18.d\n"
"ldr x20, [%x[inptrs], #0x0]\n"
+ "mov z28.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z21.s, #0x1\n"
+ "ptrue p2.b\n"
+ "lsl x10, %x[n_channels], #0x2\n"
+ "mov z24.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov x9, #0x0\n"
+ "mov x28, #0x0\n"
+ "ld1b { z3.b }, p0/Z, [x27]\n"
+ "ld1b { z4.b }, p0/Z, [x26]\n"
+ "mov z31.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "ld1b { z2.b }, p0/Z, [x25]\n"
"ld1b { z1.b }, p0/Z, [x24]\n"
- "mov z19.d, z1.d\n"
- "ext z19.b, z19.b, z19.b, #0x1\n"
+ "mov z20.s, #0x0\n"
+ "mov z17.s, #0x0\n"
"ld1b { z5.b }, p0/Z, [x23]\n"
"ld1b { z6.b }, p0/Z, [x22]\n"
- "mov z18.d, z5.d\n"
- "mov z22.d, z6.d\n"
+ "mov z18.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z16.d, z3.d\n"
+ "mov z13.d, z4.d\n"
"ld1b { z7.b }, p0/Z, [x21]\n"
"ld1b { z0.b }, p0/Z, [x20]\n"
- "mov z8.d, z7.d\n"
- "zip1 z2.d, z2.d, z15.d\n"
- "mov z3.q, z3.q[0]\n"
- "mov z4.q, z4.q[0]\n"
- "ptrue p2.b\n"
- "ld1rw { z23.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
- "ext z18.b, z18.b, z18.b, #0x1\n"
- "ext z22.b, z22.b, z22.b, #0x1\n"
- "lsl x10, %x[n_channels], #0x2\n"
- "neg z23.s, p2/M, z23.s\n"
- "ext z8.b, z8.b, z8.b, #0x1\n"
- "mov z28.b, #0x1\n"
- "mov x9, #0x0\n"
+ "mov z12.d, z2.d\n"
+ "mov z19.d, z1.d\n"
+ "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_b_offset]]\n"
"whilelt p0.b, x9, x10\n"
- "mov z25.s, #0x0\n"
- "mov z24.s, #0x0\n"
- "udot z25.s, z28.b, z3.b[0]\n"
- "ld1w { z12.s }, p0/Z, [%x[params]]\n"
- "mov z17.s, #0x0\n"
- "mov z16.s, #0x0\n"
- "udot z24.s, z28.b, z3.b[2]\n"
- "mov x28, #0x0\n"
- "mov z27.d, z0.d\n"
- "udot z17.s, z28.b, z4.b[0]\n"
- "udot z16.s, z28.b, z4.b[2]\n"
+ "ext z16.b, z16.b, z16.b, #0x1\n"
+ "ext z13.b, z13.b, z13.b, #0x1\n"
"ldp x27, x26, [%x[outptrs], #0x0]\n"
- "ext z27.b, z27.b, z27.b, #0x1\n"
- "zip1 z1.d, z1.d, z19.d\n"
"ldp x25, x24, [%x[outptrs], #0x10]\n"
+ "ext z12.b, z12.b, z12.b, #0x1\n"
+ "mov z8.d, z5.d\n"
"ldp x23, x22, [%x[outptrs], #0x20]\n"
- "mov z2.q, z2.q[0]\n"
- "zip1 z5.d, z5.d, z18.d\n"
"ldp x21, x20, [%x[outptrs], #0x30]\n"
- "ld1rw { z13.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
- "zip1 z6.d, z6.d, z22.d\n"
- "zip1 z7.d, z7.d, z8.d\n"
- "ld1rw { z14.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z15.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
- "mov z30.s, #0x0\n"
- "mov z31.s, #0x0\n"
- "udot z30.s, z28.b, z2.b[0]\n"
+ "mov z10.d, z6.d\n"
+ "mov z9.d, z7.d\n"
+ "neg z15.s, p2/M, z15.s\n"
+ "zip1 z3.d, z3.d, z16.d\n"
+ "zip1 z4.d, z4.d, z13.d\n"
+ "ld1w { z13.s }, p0/Z, [%x[params]]\n"
+ "ld1rw { z16.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_c_offset]]\n"
+ "ext z19.b, z19.b, z19.b, #0x1\n"
+ "zip1 z2.d, z2.d, z12.d\n"
+ "ld1rw { z12.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_minval]]\n"
+ "ext z8.b, z8.b, z8.b, #0x1\n"
+ "ext z10.b, z10.b, z10.b, #0x1\n"
+ "mov z3.q, z3.q[0]\n"
+ "mov z4.q, z4.q[0]\n"
+ "ext z9.b, z9.b, z9.b, #0x1\n"
+ "zip1 z1.d, z1.d, z19.d\n"
+ "ld1rw { z19.s }, p2/Z, [%x[qp], %[offsetof_Requantize32_maxval]]\n"
+ "udot z14.s, z30.b, z3.b[0]\n"
+ "udot z27.s, z30.b, z3.b[2]\n"
+ "udot z11.s, z30.b, z4.b[0]\n"
+ "mov z2.q, z2.q[0]\n"
+ "udot z22.s, z30.b, z4.b[2]\n"
+ "zip1 z5.d, z5.d, z8.d\n"
"ld1b { z8.b }, p0/Z, [%x[params], #1, MUL VL]\n"
- "mov z29.s, #0x1\n"
- "udot z31.s, z28.b, z2.b[2]\n"
- "udot z25.s, z29.b, z3.b[1]\n"
+ "zip1 z6.d, z6.d, z10.d\n"
+ "mov z10.d, z0.d\n"
+ "udot z28.s, z30.b, z2.b[0]\n"
+ "zip1 z7.d, z7.d, z9.d\n"
+ "udot z25.s, z30.b, z2.b[2]\n"
+ "udot z14.s, z21.b, z3.b[1]\n"
"ld1b { z9.b }, p0/Z, [%x[params], #2, MUL VL]\n"
- "zip1 z0.d, z0.d, z27.d\n"
+ "ext z10.b, z10.b, z10.b, #0x1\n"
"mov z1.q, z1.q[0]\n"
- "udot z24.s, z29.b, z3.b[3]\n"
- "ld1b { z10.b }, p0/Z, [%x[params], #3, MUL VL]\n"
+ "udot z27.s, z21.b, z3.b[3]\n"
"mov z5.q, z5.q[0]\n"
"mov z6.q, z6.q[0]\n"
- "udot z17.s, z29.b, z4.b[1]\n"
- "ld1b { z11.b }, p0/Z, [%x[params], #4, MUL VL]\n"
+ "udot z11.s, z21.b, z4.b[1]\n"
"mov z7.q, z7.q[0]\n"
- "mov z22.s, #0x0\n"
- "udot z16.s, z29.b, z4.b[3]\n"
+ "udot z22.s, z21.b, z4.b[3]\n"
+ "udot z24.s, z30.b, z1.b[0]\n"
+ "zip1 z0.d, z0.d, z10.d\n"
+ "udot z23.s, z30.b, z1.b[2]\n"
+ "udot z31.s, z30.b, z5.b[0]\n"
+ "ld1b { z10.b }, p0/Z, [%x[params], #3, MUL VL]\n"
+ "udot z29.s, z30.b, z5.b[2]\n"
+ "udot z20.s, z30.b, z6.b[0]\n"
+ "udot z17.s, z30.b, z6.b[2]\n"
+ "udot z18.s, z30.b, z7.b[0]\n"
+ "add z14.s, z14.s, z11.s\n"
+ "ld1b { z11.b }, p0/Z, [%x[params], #4, MUL VL]\n"
+ "udot z26.s, z30.b, z7.b[2]\n"
+ "mov z0.q, z0.q[0]\n"
+ "udot z28.s, z21.b, z2.b[1]\n"
"addvl %x[params], %x[params], #5\n"
- "mov z21.s, #0x0\n"
- "mov z26.s, #0x0\n"
- "udot z22.s, z28.b, z1.b[0]\n"
+ "udot z25.s, z21.b, z2.b[3]\n"
+ "add z22.s, z27.s, z22.s\n"
+ "udot z24.s, z21.b, z1.b[1]\n"
"mov z27.s, #0x0\n"
- "mov z20.s, #0x0\n"
- "udot z21.s, z28.b, z1.b[2]\n"
- "mov z19.s, #0x0\n"
- "mov z18.s, #0x0\n"
- "udot z26.s, z28.b, z5.b[0]\n"
- "udot z27.s, z28.b, z5.b[2]\n"
- "udot z20.s, z28.b, z6.b[0]\n"
- "mov z0.q, z0.q[0]\n"
- "udot z19.s, z28.b, z6.b[2]\n"
- "udot z18.s, z28.b, z7.b[0]\n"
- "add z17.s, z25.s, z17.s\n"
- "mov z25.s, #0x0\n"
- "udot z25.s, z28.b, z7.b[2]\n"
- "udot z30.s, z29.b, z2.b[1]\n"
- "udot z31.s, z29.b, z2.b[3]\n"
- "add z16.s, z24.s, z16.s\n"
- "udot z22.s, z29.b, z1.b[1]\n"
- "mov z24.s, #0x0\n"
- "udot z24.s, z28.b, z0.b[0]\n"
- "udot z21.s, z29.b, z1.b[3]\n"
- "udot z26.s, z29.b, z5.b[1]\n"
- "udot z27.s, z29.b, z5.b[3]\n"
- "add z30.s, z30.s, z17.s\n"
- "udot z20.s, z29.b, z6.b[1]\n"
- "udot z19.s, z29.b, z6.b[3]\n"
- "add z31.s, z31.s, z16.s\n"
- "udot z18.s, z29.b, z7.b[1]\n"
- "udot z25.s, z29.b, z7.b[3]\n"
- "add z22.s, z22.s, z30.s\n"
- "udot z24.s, z29.b, z0.b[1]\n"
- "add z21.s, z21.s, z31.s\n"
- "add z20.s, z26.s, z20.s\n"
- "add z19.s, z27.s, z19.s\n"
- "add z18.s, z18.s, z17.s\n"
- "mov z17.s, #0x0\n"
- "udot z17.s, z28.b, z0.b[2]\n"
- "udot z17.s, z29.b, z0.b[3]\n"
- "add z16.s, z25.s, z16.s\n"
- "add z24.s, z22.s, z24.s\n"
- "add z25.s, z21.s, z17.s\n"
- "mul z24.s, p2/M, z24.s, z23.s\n"
- "mul z25.s, p2/M, z25.s, z23.s\n"
- "add z26.s, z26.s, z22.s\n"
- "add z27.s, z27.s, z21.s\n"
- "mul z26.s, p2/M, z26.s, z23.s\n"
- "mul z27.s, p2/M, z27.s, z23.s\n"
- "add z28.s, z20.s, z30.s\n"
- "add z29.s, z19.s, z31.s\n"
- "mul z28.s, p2/M, z28.s, z23.s\n"
- "mul z29.s, p2/M, z29.s, z23.s\n"
+ "udot z23.s, z21.b, z1.b[3]\n"
+ "udot z31.s, z21.b, z5.b[1]\n"
+ "udot z29.s, z21.b, z5.b[3]\n"
+ "udot z20.s, z21.b, z6.b[1]\n"
+ "udot z27.s, z30.b, z0.b[0]\n"
+ "udot z17.s, z21.b, z6.b[3]\n"
+ "add z28.s, z28.s, z14.s\n"
+ "udot z18.s, z21.b, z7.b[1]\n"
+ "udot z26.s, z21.b, z7.b[3]\n"
+ "add z25.s, z25.s, z22.s\n"
+ "add z24.s, z24.s, z28.s\n"
+ "add z20.s, z31.s, z20.s\n"
+ "udot z27.s, z21.b, z0.b[1]\n"
+ "add z23.s, z23.s, z25.s\n"
+ "add z17.s, z29.s, z17.s\n"
+ "add z18.s, z18.s, z14.s\n"
+ "mov z14.s, #0x0\n"
+ "add z22.s, z26.s, z22.s\n"
+ "add z26.s, z31.s, z24.s\n"
+ "udot z14.s, z30.b, z0.b[2]\n"
+ "add z24.s, z24.s, z27.s\n"
+ "add z27.s, z29.s, z23.s\n"
+ "add z28.s, z20.s, z28.s\n"
+ "add z29.s, z17.s, z25.s\n"
"add z30.s, z20.s, z18.s\n"
- "add z31.s, z19.s, z16.s\n"
- "mul z30.s, p2/M, z30.s, z23.s\n"
- "mul z31.s, p2/M, z31.s, z23.s\n"
- "zip1 z19.s, z24.s, z26.s\n"
- "zip1 z18.s, z25.s, z27.s\n"
- "zip1 z17.s, z28.s, z30.s\n"
- "zip1 z16.s, z29.s, z31.s\n"
- "zip1 z22.s, z19.s, z18.s\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "add z24.s, z24.s, z12.s\n"
- "add z25.s, z25.s, z12.s\n"
- "add z26.s, z26.s, z12.s\n"
- "add z27.s, z27.s, z12.s\n"
- "add z28.s, z28.s, z12.s\n"
- "add z29.s, z29.s, z12.s\n"
- "add z30.s, z30.s, z12.s\n"
- "add z31.s, z31.s, z12.s\n"
+ "add z31.s, z17.s, z22.s\n"
+ "mul z26.s, p2/M, z26.s, z15.s\n"
+ "udot z14.s, z21.b, z0.b[3]\n"
+ "mul z24.s, p2/M, z24.s, z15.s\n"
+ "mul z27.s, p2/M, z27.s, z15.s\n"
+ "mul z28.s, p2/M, z28.s, z15.s\n"
+ "mul z29.s, p2/M, z29.s, z15.s\n"
+ "mul z30.s, p2/M, z30.s, z15.s\n"
+ "mul z31.s, p2/M, z31.s, z15.s\n"
+ "add z25.s, z23.s, z14.s\n"
+ "zip1 z21.s, z24.s, z26.s\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z26.s, z26.s, z13.s\n"
+ "mul z25.s, p2/M, z25.s, z15.s\n"
+ "zip1 z22.s, z28.s, z30.s\n"
+ "add z28.s, z28.s, z13.s\n"
+ "zip1 z18.s, z29.s, z31.s\n"
+ "add z29.s, z29.s, z13.s\n"
+ "zip1 z14.s, z25.s, z27.s\n"
+ "add z25.s, z25.s, z13.s\n"
+ "add z27.s, z27.s, z13.s\n"
+ "add z30.s, z30.s, z13.s\n"
+ "zip1 z23.s, z22.s, z18.s\n"
+ "add z31.s, z31.s, z13.s\n"
+ "zip1 z22.s, z21.s, z14.s\n"
"1:" // Loop
"udot z24.s, z8.b, z0.b[0]\n"
"udot z25.s, z8.b, z0.b[2]\n"
- "ld1w { z12.s }, p2/Z, [%x[params], #6, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [%x[params], #7, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [%x[params], #6, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [%x[params], #7, MUL VL]\n"
"udot z26.s, z8.b, z1.b[0]\n"
"udot z27.s, z8.b, z1.b[2]\n"
"incb x9\n"
"whilelt p1.s, x28, %x[n_channels]\n"
+ "udot z28.s, z8.b, z2.b[0]\n"
+ "udot z29.s, z8.b, z2.b[2]\n"
+ "udot z30.s, z8.b, z3.b[0]\n"
+ "udot z31.s, z8.b, z3.b[2]\n"
+ "ld1b { z15.b }, p2/Z, [%x[params]]\n"
"udot z24.s, z9.b, z0.b[1]\n"
"udot z25.s, z9.b, z0.b[3]\n"
"whilelt p0.b, x9, x10\n"
"udot z26.s, z9.b, z1.b[1]\n"
"udot z27.s, z9.b, z1.b[3]\n"
- "udot z28.s, z8.b, z2.b[0]\n"
- "udot z29.s, z8.b, z2.b[2]\n"
- "udot z30.s, z8.b, z3.b[0]\n"
- "udot z31.s, z8.b, z3.b[2]\n"
- "ld1b { z17.b }, p2/Z, [%x[params]]\n"
- "udot z24.s, z10.b, z1.b[0]\n"
- "udot z25.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z27.s, z10.b, z2.b[2]\n"
"udot z28.s, z9.b, z2.b[1]\n"
"udot z29.s, z9.b, z2.b[3]\n"
"udot z30.s, z9.b, z3.b[1]\n"
"udot z31.s, z9.b, z3.b[3]\n"
- "ld1b { z16.b }, p2/Z, [%x[params], #1, MUL VL]\n"
- "udot z24.s, z11.b, z1.b[1]\n"
- "udot z25.s, z11.b, z1.b[3]\n"
- "udot z26.s, z11.b, z2.b[1]\n"
- "udot z27.s, z11.b, z2.b[3]\n"
+ "ld1b { z8.b }, p2/Z, [%x[params], #1, MUL VL]\n"
+ "udot z24.s, z10.b, z1.b[0]\n"
+ "udot z25.s, z10.b, z1.b[2]\n"
+ "udot z26.s, z10.b, z2.b[0]\n"
+ "udot z27.s, z10.b, z2.b[2]\n"
"udot z28.s, z10.b, z3.b[0]\n"
"udot z29.s, z10.b, z3.b[2]\n"
"udot z30.s, z10.b, z4.b[0]\n"
"udot z31.s, z10.b, z4.b[2]\n"
- "ld1b { z19.b }, p2/Z, [%x[params], #2, MUL VL]\n"
- "udot z24.s, z17.b, z2.b[0]\n"
- "udot z25.s, z17.b, z2.b[2]\n"
- "udot z26.s, z17.b, z3.b[0]\n"
- "udot z27.s, z17.b, z3.b[2]\n"
+ "ld1b { z21.b }, p2/Z, [%x[params], #2, MUL VL]\n"
+ "udot z24.s, z11.b, z1.b[1]\n"
+ "udot z25.s, z11.b, z1.b[3]\n"
+ "udot z26.s, z11.b, z2.b[1]\n"
+ "udot z27.s, z11.b, z2.b[3]\n"
"udot z28.s, z11.b, z3.b[1]\n"
"udot z29.s, z11.b, z3.b[3]\n"
"udot z30.s, z11.b, z4.b[1]\n"
"udot z31.s, z11.b, z4.b[3]\n"
"ld1b { z18.b }, p2/Z, [%x[params], #3, MUL VL]\n"
- "udot z24.s, z16.b, z2.b[1]\n"
- "udot z25.s, z16.b, z2.b[3]\n"
- "udot z26.s, z16.b, z3.b[1]\n"
- "udot z27.s, z16.b, z3.b[3]\n"
- "udot z28.s, z17.b, z4.b[0]\n"
- "udot z29.s, z17.b, z4.b[2]\n"
- "udot z30.s, z17.b, z5.b[0]\n"
- "udot z31.s, z17.b, z5.b[2]\n"
+ "udot z24.s, z15.b, z2.b[0]\n"
+ "udot z25.s, z15.b, z2.b[2]\n"
+ "udot z26.s, z15.b, z3.b[0]\n"
+ "udot z27.s, z15.b, z3.b[2]\n"
+ "udot z28.s, z15.b, z4.b[0]\n"
+ "udot z29.s, z15.b, z4.b[2]\n"
+ "udot z30.s, z15.b, z5.b[0]\n"
+ "udot z31.s, z15.b, z5.b[2]\n"
"ld1b { z17.b }, p2/Z, [%x[params], #4, MUL VL]\n"
- "udot z24.s, z19.b, z3.b[0]\n"
- "udot z25.s, z19.b, z3.b[2]\n"
- "udot z26.s, z19.b, z4.b[0]\n"
- "udot z27.s, z19.b, z4.b[2]\n"
- "udot z28.s, z16.b, z4.b[1]\n"
- "udot z29.s, z16.b, z4.b[3]\n"
- "udot z30.s, z16.b, z5.b[1]\n"
- "udot z31.s, z16.b, z5.b[3]\n"
- "ld1b { z16.b }, p2/Z, [%x[params], #5, MUL VL]\n"
+ "udot z24.s, z8.b, z2.b[1]\n"
+ "udot z25.s, z8.b, z2.b[3]\n"
+ "udot z26.s, z8.b, z3.b[1]\n"
+ "udot z27.s, z8.b, z3.b[3]\n"
+ "udot z28.s, z8.b, z4.b[1]\n"
+ "udot z29.s, z8.b, z4.b[3]\n"
+ "udot z30.s, z8.b, z5.b[1]\n"
+ "udot z31.s, z8.b, z5.b[3]\n"
+ "ld1b { z9.b }, p2/Z, [%x[params], #5, MUL VL]\n"
"addvl %x[params], %x[params], #16\n"
+ "udot z24.s, z21.b, z3.b[0]\n"
+ "udot z25.s, z21.b, z3.b[2]\n"
+ "udot z26.s, z21.b, z4.b[0]\n"
+ "udot z27.s, z21.b, z4.b[2]\n"
+ "udot z28.s, z21.b, z5.b[0]\n"
+ "udot z29.s, z21.b, z5.b[2]\n"
+ "ld1w { z14.s }, p0/Z, [%x[params], #-8, MUL VL]\n"
+ "udot z30.s, z21.b, z6.b[0]\n"
+ "udot z31.s, z21.b, z6.b[2]\n"
+ "ld1b { z10.b }, p0/Z, [%x[params], #-5, MUL VL]\n"
"udot z24.s, z18.b, z3.b[1]\n"
"udot z25.s, z18.b, z3.b[3]\n"
- "ld1w { z20.s }, p0/Z, [%x[params], #-8, MUL VL]\n"
"udot z26.s, z18.b, z4.b[1]\n"
"udot z27.s, z18.b, z4.b[3]\n"
- "udot z28.s, z19.b, z5.b[0]\n"
- "udot z29.s, z19.b, z5.b[2]\n"
- "udot z30.s, z19.b, z6.b[0]\n"
- "udot z31.s, z19.b, z6.b[2]\n"
- "ld1b { z10.b }, p0/Z, [%x[params], #-5, MUL VL]\n"
- "udot z24.s, z17.b, z4.b[0]\n"
- "udot z25.s, z17.b, z4.b[2]\n"
- "udot z26.s, z17.b, z5.b[0]\n"
- "udot z27.s, z17.b, z5.b[2]\n"
"udot z28.s, z18.b, z5.b[1]\n"
"udot z29.s, z18.b, z5.b[3]\n"
"udot z30.s, z18.b, z6.b[1]\n"
"udot z31.s, z18.b, z6.b[3]\n"
"ld1b { z11.b }, p0/Z, [%x[params], #-4, MUL VL]\n"
- "udot z24.s, z16.b, z4.b[1]\n"
- "udot z25.s, z16.b, z4.b[3]\n"
- ".inst 0x04ac7718 // sqrdmulh z24.s, z24.s, z12.s\n"
- "udot z26.s, z16.b, z5.b[1]\n"
- "udot z27.s, z16.b, z5.b[3]\n"
- ".inst 0x04ac7739 // sqrdmulh z25.s, z25.s, z12.s\n"
+ "udot z24.s, z17.b, z4.b[0]\n"
+ "udot z25.s, z17.b, z4.b[2]\n"
+ "udot z26.s, z17.b, z5.b[0]\n"
+ "udot z27.s, z17.b, z5.b[2]\n"
"udot z28.s, z17.b, z6.b[0]\n"
"udot z29.s, z17.b, z6.b[2]\n"
- ".inst 0x04ac775a // sqrdmulh z26.s, z26.s, z12.s\n"
"udot z30.s, z17.b, z7.b[0]\n"
"udot z31.s, z17.b, z7.b[2]\n"
- ".inst 0x04ac777b // sqrdmulh z27.s, z27.s, z12.s\n"
"ld1b { z8.b }, p0/Z, [%x[params], #-7, MUL VL]\n"
- "udot z28.s, z16.b, z6.b[1]\n"
- "udot z29.s, z16.b, z6.b[3]\n"
- "and z19.d, z24.d, z21.d\n"
- "udot z30.s, z16.b, z7.b[1]\n"
- "udot z31.s, z16.b, z7.b[3]\n"
- "and z18.d, z25.d, z21.d\n"
+ "udot z24.s, z9.b, z4.b[1]\n"
+ "udot z25.s, z9.b, z4.b[3]\n"
+ "udot z26.s, z9.b, z5.b[1]\n"
+ "udot z27.s, z9.b, z5.b[3]\n"
+ "udot z28.s, z9.b, z6.b[1]\n"
+ "udot z29.s, z9.b, z6.b[3]\n"
+ "udot z30.s, z9.b, z7.b[1]\n"
+ "udot z31.s, z9.b, z7.b[3]\n"
"ld1b { z9.b }, p0/Z, [%x[params], #-6, MUL VL]\n"
- "and z17.d, z26.d, z21.d\n"
- "and z16.d, z27.d, z21.d\n"
"addvl %x[params], %x[params], #-3\n"
- "asr z19.s, z19.s, #0x1f\n"
- "asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x04b47718 // sqrdmulh z24.s, z24.s, z20.s\n"
+ ".inst 0x04b47739 // sqrdmulh z25.s, z25.s, z20.s\n"
+ ".inst 0x04b4775a // sqrdmulh z26.s, z26.s, z20.s\n"
+ ".inst 0x04b4777b // sqrdmulh z27.s, z27.s, z20.s\n"
+ ".inst 0x04b4779c // sqrdmulh z28.s, z28.s, z20.s\n"
+ ".inst 0x04b477bd // sqrdmulh z29.s, z29.s, z20.s\n"
+ "and z17.d, z24.d, z13.d\n"
+ "and z18.d, z25.d, z13.d\n"
+ "and z15.d, z26.d, z13.d\n"
+ "and z21.d, z27.d, z13.d\n"
+ ".inst 0x04b477de // sqrdmulh z30.s, z30.s, z20.s\n"
+ ".inst 0x04b477ff // sqrdmulh z31.s, z31.s, z20.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- ".inst 0x04ac779c // sqrdmulh z28.s, z28.s, z12.s\n"
- ".inst 0x04ac77bd // sqrdmulh z29.s, z29.s, z12.s\n"
- ".inst 0x04ac77de // sqrdmulh z30.s, z30.s, z12.s\n"
- ".inst 0x04ac77ff // sqrdmulh z31.s, z31.s, z12.s\n"
- "sqadd z24.s, z24.s, z19.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "asr z21.s, z21.s, #0x1f\n"
+ "sqadd z24.s, z24.s, z17.s\n"
+ "and z20.d, z28.d, z13.d\n"
"sqadd z25.s, z25.s, z18.s\n"
- ".inst 0x44828ab8 // srshl z24.s, p2/M, z24.s, z21.s\n"
- ".inst 0x44828ab9 // srshl z25.s, p2/M, z25.s, z21.s\n"
- "sqadd z26.s, z26.s, z17.s\n"
- "sqadd z27.s, z27.s, z16.s\n"
- ".inst 0x44828aba // srshl z26.s, p2/M, z26.s, z21.s\n"
- ".inst 0x44828abb // srshl z27.s, p2/M, z27.s, z21.s\n"
- "and z19.d, z28.d, z21.d\n"
- "and z18.d, z29.d, z21.d\n"
- "and z17.d, z30.d, z21.d\n"
- "and z16.d, z31.d, z21.d\n"
- "asr z19.s, z19.s, #0x1f\n"
+ "and z18.d, z29.d, z13.d\n"
+ "sqadd z26.s, z26.s, z15.s\n"
+ "sqadd z27.s, z27.s, z21.s\n"
+ "and z17.d, z30.d, z13.d\n"
+ "and z15.d, z31.d, z13.d\n"
+ ".inst 0x448289b8 // srshl z24.s, p2/M, z24.s, z13.s\n"
+ "asr z20.s, z20.s, #0x1f\n"
"asr z18.s, z18.s, #0x1f\n"
+ ".inst 0x448289b9 // srshl z25.s, p2/M, z25.s, z13.s\n"
+ ".inst 0x448289ba // srshl z26.s, p2/M, z26.s, z13.s\n"
+ ".inst 0x448289bb // srshl z27.s, p2/M, z27.s, z13.s\n"
"asr z17.s, z17.s, #0x1f\n"
- "asr z16.s, z16.s, #0x1f\n"
- "sqadd z28.s, z28.s, z19.s\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "add z24.s, z24.s, z16.s\n"
+ "sqadd z28.s, z28.s, z20.s\n"
"sqadd z29.s, z29.s, z18.s\n"
- ".inst 0x44828abc // srshl z28.s, p2/M, z28.s, z21.s\n"
- ".inst 0x44828abd // srshl z29.s, p2/M, z29.s, z21.s\n"
+ "add z25.s, z25.s, z16.s\n"
"sqadd z30.s, z30.s, z17.s\n"
- "sqadd z31.s, z31.s, z16.s\n"
- ".inst 0x44828abe // srshl z30.s, p2/M, z30.s, z21.s\n"
- ".inst 0x44828abf // srshl z31.s, p2/M, z31.s, z21.s\n"
- "add z24.s, z24.s, z13.s\n"
- "add z25.s, z25.s, z13.s\n"
- "smin z24.s, p2/M, z24.s, z15.s\n"
- "smin z25.s, p2/M, z25.s, z15.s\n"
- "add z26.s, z26.s, z13.s\n"
- "add z27.s, z27.s, z13.s\n"
- "smin z26.s, p2/M, z26.s, z15.s\n"
- "smin z27.s, p2/M, z27.s, z15.s\n"
- "add z28.s, z28.s, z13.s\n"
- "add z29.s, z29.s, z13.s\n"
- "smin z28.s, p2/M, z28.s, z15.s\n"
- "smin z29.s, p2/M, z29.s, z15.s\n"
- "add z30.s, z30.s, z13.s\n"
- "add z31.s, z31.s, z13.s\n"
- "smin z30.s, p2/M, z30.s, z15.s\n"
- "smin z31.s, p2/M, z31.s, z15.s\n"
- "smax z24.s, p2/M, z24.s, z14.s\n"
- "smax z25.s, p2/M, z25.s, z14.s\n"
+ "sqadd z31.s, z31.s, z15.s\n"
+ ".inst 0x448289bc // srshl z28.s, p2/M, z28.s, z13.s\n"
+ "add z26.s, z26.s, z16.s\n"
+ "add z27.s, z27.s, z16.s\n"
+ "smin z24.s, p2/M, z24.s, z19.s\n"
+ ".inst 0x448289bd // srshl z29.s, p2/M, z29.s, z13.s\n"
+ "smin z25.s, p2/M, z25.s, z19.s\n"
+ ".inst 0x448289be // srshl z30.s, p2/M, z30.s, z13.s\n"
+ ".inst 0x448289bf // srshl z31.s, p2/M, z31.s, z13.s\n"
+ "add z28.s, z28.s, z16.s\n"
+ "add z29.s, z29.s, z16.s\n"
+ "smin z26.s, p2/M, z26.s, z19.s\n"
+ "smin z27.s, p2/M, z27.s, z19.s\n"
+ "smax z24.s, p2/M, z24.s, z12.s\n"
+ "add z30.s, z30.s, z16.s\n"
+ "smax z25.s, p2/M, z25.s, z12.s\n"
+ "add z31.s, z31.s, z16.s\n"
+ "smin z28.s, p2/M, z28.s, z19.s\n"
+ "smin z29.s, p2/M, z29.s, z19.s\n"
+ "smax z26.s, p2/M, z26.s, z12.s\n"
+ "smin z30.s, p2/M, z30.s, z19.s\n"
+ "smax z27.s, p2/M, z27.s, z12.s\n"
"st1b { z24.s }, p1, [x27, x28]\n"
"mov z24.s, z22.s[0]\n"
- "smax z26.s, p2/M, z26.s, z14.s\n"
- "smax z27.s, p2/M, z27.s, z14.s\n"
+ "smin z31.s, p2/M, z31.s, z19.s\n"
+ "smax z28.s, p2/M, z28.s, z12.s\n"
"st1b { z25.s }, p1, [x26, x28]\n"
"mov z25.s, z22.s[1]\n"
- "smax z28.s, p2/M, z28.s, z14.s\n"
- "smax z29.s, p2/M, z29.s, z14.s\n"
+ "smax z29.s, p2/M, z29.s, z12.s\n"
"st1b { z26.s }, p1, [x25, x28]\n"
"mov z26.s, z22.s[2]\n"
- "smax z30.s, p2/M, z30.s, z14.s\n"
- "smax z31.s, p2/M, z31.s, z14.s\n"
+ "add z24.s, z24.s, z14.s\n"
+ "smax z30.s, p2/M, z30.s, z12.s\n"
"st1b { z27.s }, p1, [x24, x28]\n"
"mov z27.s, z22.s[3]\n"
+ "add z25.s, z25.s, z14.s\n"
+ "smax z31.s, p2/M, z31.s, z12.s\n"
"st1b { z28.s }, p1, [x23, x28]\n"
"mov z28.s, z23.s[0]\n"
- "add z24.s, z24.s, z20.s\n"
+ "add z26.s, z26.s, z14.s\n"
"st1b { z29.s }, p1, [x22, x28]\n"
"mov z29.s, z23.s[1]\n"
- "add z25.s, z25.s, z20.s\n"
+ "add z27.s, z27.s, z14.s\n"
"st1b { z30.s }, p1, [x21, x28]\n"
"mov z30.s, z23.s[2]\n"
- "add z26.s, z26.s, z20.s\n"
+ "add z28.s, z28.s, z14.s\n"
"st1b { z31.s }, p1, [x20, x28]\n"
"mov z31.s, z23.s[3]\n"
"incw x28\n"
- "add z27.s, z27.s, z20.s\n"
- "add z28.s, z28.s, z20.s\n"
- "add z29.s, z29.s, z20.s\n"
- "add z30.s, z30.s, z20.s\n"
- "add z31.s, z31.s, z20.s\n"
+ "add z29.s, z29.s, z14.s\n"
+ "add z30.s, z30.s, z14.s\n"
+ "add z31.s, z31.s, z14.s\n"
"b.any 1b\n"
: [params] "+&r" (params)
: [inptrs] "r" (inptrs), [n_channels] "r" (n_output_channels), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [outptrs] "r" (outptrs), [qp] "r" (&qp)
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
index 887eccf1e9..b4b2a3a673 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[16];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -91,316 +91,316 @@ void sve_u8s8u8q_nhwc_3x3_s1_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x16, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x17, #0x0\n"
+ "ldr x26, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x16\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
+ "ldr x16, [%x[params], %[offsetof_Params_outptrs]]\n"
"ldr x15, [%x[params], %[offsetof_Params_n_channels]]\n"
"ldr x14, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z12.b }, p4/Z, [x21]\n"
- "ld1rb { z30.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z24.h }, p4/Z, [x22]\n"
- "ld1rh { z11.h }, p4/Z, [x21]\n"
- "ld1rh { z26.h }, p4/Z, [x20]\n"
- "ldp x13, x12, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x16, x15\n"
- "ldp x11, x10, [x24, #0x10]\n"
- "whilelt p2.s, x16, x15\n"
- "whilelt p1.s, x23, x15\n"
- "ldr x9, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z14.h }, p4/Z, [x14]\n"
- "ld1sb { z21.h }, p4/Z, [x14, #1, MUL VL]\n"
- "add x28, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x27, #0x0\n"
- "ld1sb { z1.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "add x13, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x12, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x11, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x17\n"
+ "add x20, x26, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x26, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x26, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z10.b }, p4/Z, [x20]\n"
+ "ldr x10, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x26, %[offsetof_Requantize32_minval]\n"
+ "add x20, x26, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z15.b }, p4/Z, [x23]\n"
+ "ld1rh { z26.h }, p4/Z, [x22]\n"
+ "ld1rh { z2.h }, p4/Z, [x21]\n"
+ "ld1rh { z14.h }, p4/Z, [x20]\n"
+ "incw x24\n"
+ "whilelt p3.h, x17, x15\n"
+ "ldp x9, x28, [x16, #0x0]\n"
+ "ldp x27, x26, [x16, #0x10]\n"
+ "whilelt p2.s, x17, x15\n"
+ "whilelt p1.s, x24, x15\n"
+ "ld1sb { z13.h }, p4/Z, [x14]\n"
+ "ld1sb { z11.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x14, #2, MUL VL]\n"
"ld1sb { z6.h }, p4/Z, [x14, #3, MUL VL]\n"
- ".inst 0x455e11ce // ssublb z14.h, z14.b, z30.b\n"
- ".inst 0x455e12b5 // ssublb z21.h, z21.b, z30.b\n"
- "ld1sb { z2.h }, p4/Z, [x14, #4, MUL VL]\n"
- "ld1sb { z18.h }, p4/Z, [x14, #5, MUL VL]\n"
- ".inst 0x455e1021 // ssublb z1.h, z1.b, z30.b\n"
- ".inst 0x455e10c6 // ssublb z6.h, z6.b, z30.b\n"
- "ld1sb { z7.h }, p4/Z, [x14, #6, MUL VL]\n"
- "ld1sb { z10.h }, p4/Z, [x14, #7, MUL VL]\n"
+ "ld1sb { z20.h }, p4/Z, [x14, #4, MUL VL]\n"
+ "ld1sb { z30.h }, p4/Z, [x14, #5, MUL VL]\n"
+ "ld1sb { z28.h }, p4/Z, [x14, #6, MUL VL]\n"
+ "ld1sb { z17.h }, p4/Z, [x14, #7, MUL VL]\n"
"inch x14, ALL, MUL #8\n"
- ".inst 0x455e1042 // ssublb z2.h, z2.b, z30.b\n"
- "ld1w { z17.s }, p2/Z, [x9]\n"
- "ld1w { z16.s }, p1/Z, [x9, #1, MUL VL]\n"
- "uzp1 z5.s, z17.s, z16.s\n"
- "uzp2 z9.s, z17.s, z16.s\n"
- "ld1sb { z8.h }, p4/Z, [x14]\n"
- "ldp x24, x23, [x28, #0x0]\n"
- "addvl x9, x9, #2\n"
- "mov z17.d, z5.d\n"
- "ldp x22, x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x20]\n"
- "mov z25.d, z9.d\n"
- "mov z16.d, z5.d\n"
- "ld1b { z0.h }, p3/Z, [x24, x16]\n"
- "ld1b { z29.h }, p3/Z, [x23, x16]\n"
- "mov z23.d, z9.d\n"
- "mov z22.d, z5.d\n"
- "ld1b { z4.h }, p3/Z, [x22, x16]\n"
- "ld1b { z13.h }, p3/Z, [x21, x16]\n"
- "mov z27.d, z9.d\n"
- ".inst 0x455e1252 // ssublb z18.h, z18.b, z30.b\n"
- "ld1b { z20.h }, p3/Z, [x20, x16]\n"
- "ldr x26, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x455e10e7 // ssublb z7.h, z7.b, z30.b\n"
- ".inst 0x455e114a // ssublb z10.h, z10.b, z30.b\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x9, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455e1108 // ssublb z8.h, z8.b, z30.b\n"
- ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
- ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
- ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
- ".inst 0x454c1a94 // usublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454f11ad // ssublb z13.h, z13.b, z15.b\n"
+ "ld1w { z19.s }, p2/Z, [x25]\n"
+ "ld1w { z24.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ ".inst 0x454f116b // ssublb z11.h, z11.b, z15.b\n"
+ ".inst 0x454f1252 // ssublb z18.h, z18.b, z15.b\n"
+ ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
+ "ld1sb { z5.h }, p4/Z, [x14]\n"
+ "ldp x24, x23, [x13, #0x0]\n"
+ ".inst 0x454f1294 // ssublb z20.h, z20.b, z15.b\n"
+ ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
+ "uzp1 z3.s, z19.s, z24.s\n"
+ "uzp2 z16.s, z19.s, z24.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldp x22, x21, [x13, #0x10]\n"
+ ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
+ ".inst 0x454f1231 // ssublb z17.h, z17.b, z15.b\n"
+ ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
+ "ldr x20, [x13, #0x20]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x17]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1b { z4.h }, p3/Z, [x22, x17]\n"
+ "mov z8.d, z3.d\n"
+ "mov z21.d, z16.d\n"
+ "ld1b { z1.h }, p3/Z, [x21, x17]\n"
+ "mov z0.d, z3.d\n"
+ "mov z29.d, z16.d\n"
+ "ld1b { z27.h }, p3/Z, [x20, x17]\n"
+ "mov z19.d, z3.d\n"
+ "mov z9.d, z16.d\n"
+ ".inst 0x454a18e7 // usublb z7.h, z7.b, z10.b\n"
+ ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
+ ".inst 0x454a1884 // usublb z4.h, z4.b, z10.b\n"
+ ".inst 0x454a1821 // usublb z1.h, z1.b, z10.b\n"
+ ".inst 0x454a1b7b // usublb z27.h, z27.b, z10.b\n"
"1:" // Loop
- ".inst 0x44824005 // smlalb z5.s, p4/M, z0.h, z2.h\n"
- ".inst 0x44824409 // smlalt z9.s, p4/M, z0.h, z2.h\n"
- "ldr x20, [x28, #0x28]\n"
- "ldr x21, [x28, #0x38]\n"
- ".inst 0x448e43a5 // smlalb z5.s, p4/M, z29.h, z14.h\n"
- ".inst 0x44864011 // smlalb z17.s, p4/M, z0.h, z6.h\n"
- "ld1b { z3.h }, p3/Z, [x20, x16]\n"
- "ldr x20, [x28, #0x30]\n"
- ".inst 0x44954010 // smlalb z16.s, p4/M, z0.h, z21.h\n"
- ".inst 0x448e4016 // smlalb z22.s, p4/M, z0.h, z14.h\n"
- "ld1b { z31.h }, p3/Z, [x21, x16]\n"
- ".inst 0x454c1863 // usublb z3.h, z3.b, z12.b\n"
- ".inst 0x448e47a9 // smlalt z9.s, p4/M, z29.h, z14.h\n"
- ".inst 0x449241a5 // smlalb z5.s, p4/M, z13.h, z18.h\n"
- "ldr x21, [x28, #0x40]\n"
- "ld1b { z15.h }, p3/Z, [x20, x16]\n"
- ".inst 0x44864419 // smlalt z25.s, p4/M, z0.h, z6.h\n"
- ".inst 0x44954417 // smlalt z23.s, p4/M, z0.h, z21.h\n"
- ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
- "ldr x20, [x28, #0x48]\n"
- ".inst 0x448e441b // smlalt z27.s, p4/M, z0.h, z14.h\n"
- ".inst 0x44814091 // smlalb z17.s, p4/M, z4.h, z1.h\n"
- "ld1b { z19.h }, p3/Z, [x21, x16]\n"
- ".inst 0x454c19ef // usublb z15.h, z15.b, z12.b\n"
- ".inst 0x448141b0 // smlalb z16.s, p4/M, z13.h, z1.h\n"
- ".inst 0x449541b6 // smlalb z22.s, p4/M, z13.h, z21.h\n"
- "ld1b { z28.h }, p3/Z, [x20, x16]\n"
- ".inst 0x454c1a73 // usublb z19.h, z19.b, z12.b\n"
- ".inst 0x449245a9 // smlalt z9.s, p4/M, z13.h, z18.h\n"
- ".inst 0x448a4285 // smlalb z5.s, p4/M, z20.h, z10.h\n"
- "ldr x21, [x28, #0x50]\n"
- "ldr x20, [x28, #0x58]\n"
- ".inst 0x44814499 // smlalt z25.s, p4/M, z4.h, z1.h\n"
- ".inst 0x448145b7 // smlalt z23.s, p4/M, z13.h, z1.h\n"
- ".inst 0x454c1b9c // usublb z28.h, z28.b, z12.b\n"
- "ld1b { z4.h }, p3/Z, [x21, x16]\n"
- ".inst 0x449545bb // smlalt z27.s, p4/M, z13.h, z21.h\n"
- ".inst 0x448241b1 // smlalb z17.s, p4/M, z13.h, z2.h\n"
- "ld1b { z29.h }, p3/Z, [x20, x16]\n"
- "ldr x21, [x28, #0x60]\n"
- ".inst 0x44874070 // smlalb z16.s, p4/M, z3.h, z7.h\n"
- ".inst 0x44864296 // smlalb z22.s, p4/M, z20.h, z6.h\n"
- "ldr x20, [x28, #0x68]\n"
- ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- ".inst 0x448a4689 // smlalt z9.s, p4/M, z20.h, z10.h\n"
- ".inst 0x449543e5 // smlalb z5.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
- "ld1b { z0.h }, p3/Z, [x21, x16]\n"
- ".inst 0x448245b9 // smlalt z25.s, p4/M, z13.h, z2.h\n"
- ".inst 0x44874477 // smlalt z23.s, p4/M, z3.h, z7.h\n"
- "ld1b { z3.h }, p3/Z, [x20, x16]\n"
- "ldr x20, [x28, #0x70]\n"
- ".inst 0x4486469b // smlalt z27.s, p4/M, z20.h, z6.h\n"
- ".inst 0x44874291 // smlalb z17.s, p4/M, z20.h, z7.h\n"
- ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
- "ld1b { z13.h }, p3/Z, [x20, x16]\n"
- ".inst 0x44824290 // smlalb z16.s, p4/M, z20.h, z2.h\n"
- ".inst 0x448841f6 // smlalb z22.s, p4/M, z15.h, z8.h\n"
- ".inst 0x454c1863 // usublb z3.h, z3.b, z12.b\n"
- "ldr x20, [x28, #0x78]\n"
- ".inst 0x449547e9 // smlalt z9.s, p4/M, z31.h, z21.h\n"
- ".inst 0x44814265 // smlalb z5.s, p4/M, z19.h, z1.h\n"
- ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
- "whilelt p0.h, x27, x15\n"
- ".inst 0x44874699 // smlalt z25.s, p4/M, z20.h, z7.h\n"
- ".inst 0x44824697 // smlalt z23.s, p4/M, z20.h, z2.h\n"
- "ld1w { z20.s }, p2/Z, [x26]\n"
+ ".inst 0x449440e3 // smlalb z3.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x449444f0 // smlalt z16.s, p4/M, z7.h, z20.h\n"
+ "ldr x25, [x13, #0x28]\n"
+ "ldr x24, [x13, #0x38]\n"
+ ".inst 0x448640e8 // smlalb z8.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x448b40e0 // smlalb z0.s, p4/M, z7.h, z11.h\n"
+ "ldr x23, [x13, #0x30]\n"
+ "ldr x22, [x13, #0x40]\n"
+ ".inst 0x448d40f3 // smlalb z19.s, p4/M, z7.h, z13.h\n"
+ ".inst 0x448644f5 // smlalt z21.s, p4/M, z7.h, z6.h\n"
+ "ldr x20, [x13, #0x48]\n"
+ "ldr x21, [x13, #0x50]\n"
+ "ld1b { z22.h }, p3/Z, [x25, x17]\n"
+ ".inst 0x448b44fd // smlalt z29.s, p4/M, z7.h, z11.h\n"
+ ".inst 0x448d44e9 // smlalt z9.s, p4/M, z7.h, z13.h\n"
+ "ld1b { z31.h }, p3/Z, [x24, x17]\n"
+ ".inst 0x448d4303 // smlalb z3.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x448d4710 // smlalt z16.s, p4/M, z24.h, z13.h\n"
+ "ld1b { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1b { z25.h }, p3/Z, [x22, x17]\n"
+ ".inst 0x44924088 // smlalb z8.s, p4/M, z4.h, z18.h\n"
+ ".inst 0x44924020 // smlalb z0.s, p4/M, z1.h, z18.h\n"
+ "ld1b { z23.h }, p3/Z, [x20, x17]\n"
+ "ldr x20, [x13, #0x58]\n"
+ ".inst 0x448b4033 // smlalb z19.s, p4/M, z1.h, z11.h\n"
+ ".inst 0x454a1ad6 // usublb z22.h, z22.b, z10.b\n"
+ ".inst 0x44924495 // smlalt z21.s, p4/M, z4.h, z18.h\n"
+ "ld1b { z12.h }, p3/Z, [x21, x17]\n"
+ ".inst 0x4492443d // smlalt z29.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448b4429 // smlalt z9.s, p4/M, z1.h, z11.h\n"
+ ".inst 0x454a1bff // usublb z31.h, z31.b, z10.b\n"
+ "ldr x21, [x13, #0x60]\n"
+ ".inst 0x449e4023 // smlalb z3.s, p4/M, z1.h, z30.h\n"
+ ".inst 0x449e4430 // smlalt z16.s, p4/M, z1.h, z30.h\n"
+ ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
+ "ld1b { z4.h }, p3/Z, [x20, x17]\n"
+ ".inst 0x44944028 // smlalb z8.s, p4/M, z1.h, z20.h\n"
+ ".inst 0x449c42c0 // smlalb z0.s, p4/M, z22.h, z28.h\n"
+ ".inst 0x454a1b39 // usublb z25.h, z25.b, z10.b\n"
+ "ldr x20, [x13, #0x68]\n"
+ ".inst 0x44864373 // smlalb z19.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x44944435 // smlalt z21.s, p4/M, z1.h, z20.h\n"
+ ".inst 0x454a1af7 // usublb z23.h, z23.b, z10.b\n"
+ "ld1b { z7.h }, p3/Z, [x21, x17]\n"
+ ".inst 0x449c46dd // smlalt z29.s, p4/M, z22.h, z28.h\n"
+ ".inst 0x44864769 // smlalt z9.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x454a198c // usublb z12.h, z12.b, z10.b\n"
+ "ldr x21, [x13, #0x70]\n"
+ ".inst 0x44914363 // smlalb z3.s, p4/M, z27.h, z17.h\n"
+ ".inst 0x44914770 // smlalt z16.s, p4/M, z27.h, z17.h\n"
+ ".inst 0x454a1884 // usublb z4.h, z4.b, z10.b\n"
+ "ld1b { z22.h }, p3/Z, [x20, x17]\n"
+ ".inst 0x449c4368 // smlalb z8.s, p4/M, z27.h, z28.h\n"
+ ".inst 0x44944360 // smlalb z0.s, p4/M, z27.h, z20.h\n"
+ ".inst 0x454a18e7 // usublb z7.h, z7.b, z10.b\n"
+ "ldr x20, [x13, #0x78]\n"
+ ".inst 0x44854313 // smlalb z19.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x449c4775 // smlalt z21.s, p4/M, z27.h, z28.h\n"
+ "ld1b { z1.h }, p3/Z, [x21, x17]\n"
+ "whilelt p0.h, x12, x15\n"
+ ".inst 0x4494477d // smlalt z29.s, p4/M, z27.h, z20.h\n"
+ ".inst 0x44854709 // smlalt z9.s, p4/M, z24.h, z5.h\n"
+ ".inst 0x454a1ad6 // usublb z22.h, z22.b, z10.b\n"
+ "ld1w { z24.s }, p2/Z, [x11]\n"
+ ".inst 0x448b43e3 // smlalb z3.s, p4/M, z31.h, z11.h\n"
+ ".inst 0x448b47f0 // smlalt z16.s, p4/M, z31.h, z11.h\n"
+ "ld1w { z27.s }, p1/Z, [x11, #1, MUL VL]\n"
"inch x14\n"
- ".inst 0x448845fb // smlalt z27.s, p4/M, z15.h, z8.h\n"
- ".inst 0x448e43f1 // smlalb z17.s, p4/M, z31.h, z14.h\n"
- "ld1w { z15.s }, p1/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x448d43e8 // smlalb z8.s, p4/M, z31.h, z13.h\n"
+ ".inst 0x449e42e0 // smlalb z0.s, p4/M, z23.h, z30.h\n"
+ ".inst 0x454a1821 // usublb z1.h, z1.b, z10.b\n"
"ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44924390 // smlalb z16.s, p4/M, z28.h, z18.h\n"
- ".inst 0x44824396 // smlalb z22.s, p4/M, z28.h, z2.h\n"
- "addvl x26, x26, #2\n"
- ".inst 0x44814669 // smlalt z9.s, p4/M, z19.h, z1.h\n"
- ".inst 0x44884385 // smlalb z5.s, p4/M, z28.h, z8.h\n"
- ".inst 0x448e47f9 // smlalt z25.s, p4/M, z31.h, z14.h\n"
- ".inst 0x44924797 // smlalt z23.s, p4/M, z28.h, z18.h\n"
- "ld1b { z31.h }, p3/Z, [x20, x16]\n"
- ".inst 0x454c1bff // usublb z31.h, z31.b, z12.b\n"
- ".inst 0x4482479b // smlalt z27.s, p4/M, z28.h, z2.h\n"
- ".inst 0x44954271 // smlalb z17.s, p4/M, z19.h, z21.h\n"
- "uzp1 z2.s, z20.s, z15.s\n"
- "inch x16\n"
- ".inst 0x448e4090 // smlalb z16.s, p4/M, z4.h, z14.h\n"
- ".inst 0x448143b6 // smlalb z22.s, p4/M, z29.h, z1.h\n"
- "uzp2 z15.s, z20.s, z15.s\n"
- "ld1w { z20.s }, p2/Z, [x25]\n"
- ".inst 0x44884789 // smlalt z9.s, p4/M, z28.h, z8.h\n"
- ".inst 0x44864085 // smlalb z5.s, p4/M, z4.h, z6.h\n"
- "mov x20, x16\n"
+ ".inst 0x449442f3 // smlalb z19.s, p4/M, z23.h, z20.h\n"
+ ".inst 0x448d47f5 // smlalt z21.s, p4/M, z31.h, z13.h\n"
+ "ld1b { z31.h }, p3/Z, [x20, x17]\n"
+ "inch x17\n"
+ ".inst 0x449e46fd // smlalt z29.s, p4/M, z23.h, z30.h\n"
+ ".inst 0x449446e9 // smlalt z9.s, p4/M, z23.h, z20.h\n"
+ "uzp1 z20.s, z24.s, z27.s\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x44924323 // smlalb z3.s, p4/M, z25.h, z18.h\n"
+ ".inst 0x44924730 // smlalt z16.s, p4/M, z25.h, z18.h\n"
+ "uzp2 z24.s, z24.s, z27.s\n"
+ "ld1w { z27.s }, p2/Z, [x10]\n"
+ ".inst 0x448b4328 // smlalb z8.s, p4/M, z25.h, z11.h\n"
+ ".inst 0x448d4180 // smlalb z0.s, p4/M, z12.h, z13.h\n"
+ ".inst 0x454a1bff // usublb z31.h, z31.b, z10.b\n"
+ "mov x20, x17\n"
+ ".inst 0x44924093 // smlalb z19.s, p4/M, z4.h, z18.h\n"
+ ".inst 0x448b4735 // smlalt z21.s, p4/M, z25.h, z11.h\n"
+ "ld1w { z25.s }, p1/Z, [x10, #1, MUL VL]\n"
+ "whilelt p2.s, x17, x15\n"
+ ".inst 0x448d459d // smlalt z29.s, p4/M, z12.h, z13.h\n"
+ ".inst 0x44924489 // smlalt z9.s, p4/M, z4.h, z18.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x448542e3 // smlalb z3.s, p4/M, z23.h, z5.h\n"
+ ".inst 0x448546f0 // smlalt z16.s, p4/M, z23.h, z5.h\n"
"incw x20\n"
- ".inst 0x44954679 // smlalt z25.s, p4/M, z19.h, z21.h\n"
- ".inst 0x448e4497 // smlalt z23.s, p4/M, z4.h, z14.h\n"
- "ld1w { z19.s }, p1/Z, [x25, #1, MUL VL]\n"
- "uzp1 z21.s, z20.s, z19.s\n"
- ".inst 0x448147bb // smlalt z27.s, p4/M, z29.h, z1.h\n"
- ".inst 0x448a4391 // smlalb z17.s, p4/M, z28.h, z10.h\n"
- "uzp2 z1.s, z20.s, z19.s\n"
- "whilelt p2.s, x16, x15\n"
- ".inst 0x44864010 // smlalb z16.s, p4/M, z0.h, z6.h\n"
- ".inst 0x44924076 // smlalb z22.s, p4/M, z3.h, z18.h\n"
+ ".inst 0x449142e8 // smlalb z8.s, p4/M, z23.h, z17.h\n"
+ ".inst 0x448640e0 // smlalb z0.s, p4/M, z7.h, z6.h\n"
+ "uzp1 z11.s, z27.s, z25.s\n"
+ ".inst 0x449e42d3 // smlalb z19.s, p4/M, z22.h, z30.h\n"
+ ".inst 0x449146f5 // smlalt z21.s, p4/M, z23.h, z17.h\n"
+ "uzp2 z27.s, z27.s, z25.s\n"
+ ".inst 0x448644fd // smlalt z29.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x449e46c9 // smlalt z9.s, p4/M, z22.h, z30.h\n"
"whilelt p1.s, x20, x15\n"
- "whilelt p3.h, x16, x15\n"
- ".inst 0x44864489 // smlalt z9.s, p4/M, z4.h, z6.h\n"
- ".inst 0x44874005 // smlalb z5.s, p4/M, z0.h, z7.h\n"
- ".inst 0x04a274a5 // sqrdmulh z5.s, z5.s, z2.s\n"
- "addvl x25, x25, #2\n"
- ".inst 0x448a4799 // smlalt z25.s, p4/M, z28.h, z10.h\n"
- ".inst 0x44864417 // smlalt z23.s, p4/M, z0.h, z6.h\n"
- "and z19.d, z5.d, z21.d\n"
- ".inst 0x4492447b // smlalt z27.s, p4/M, z3.h, z18.h\n"
- ".inst 0x449243b1 // smlalb z17.s, p4/M, z29.h, z18.h\n"
- "asr z19.s, z19.s, #0x1f\n"
- ".inst 0x448a41b0 // smlalb z16.s, p4/M, z13.h, z10.h\n"
- ".inst 0x448741b6 // smlalb z22.s, p4/M, z13.h, z7.h\n"
- "sqadd z5.s, z5.s, z19.s\n"
- ".inst 0x448292a5 // srshl z5.s, p4/M, z5.s, z21.s\n"
- ".inst 0x44874409 // smlalt z9.s, p4/M, z0.h, z7.h\n"
- ".inst 0x449247b9 // smlalt z25.s, p4/M, z29.h, z18.h\n"
- ".inst 0x04af7529 // sqrdmulh z9.s, z9.s, z15.s\n"
- ".inst 0x448a45b7 // smlalt z23.s, p4/M, z13.h, z10.h\n"
- ".inst 0x448745bb // smlalt z27.s, p4/M, z13.h, z7.h\n"
- "and z29.d, z9.d, z1.d\n"
- ".inst 0x44884071 // smlalb z17.s, p4/M, z3.h, z8.h\n"
- ".inst 0x448843f0 // smlalb z16.s, p4/M, z31.h, z8.h\n"
- ".inst 0x04a27631 // sqrdmulh z17.s, z17.s, z2.s\n"
- ".inst 0x448a43f6 // smlalb z22.s, p4/M, z31.h, z10.h\n"
- ".inst 0x44884479 // smlalt z25.s, p4/M, z3.h, z8.h\n"
- ".inst 0x04a27610 // sqrdmulh z16.s, z16.s, z2.s\n"
- ".inst 0x448847f7 // smlalt z23.s, p4/M, z31.h, z8.h\n"
- ".inst 0x448a47fb // smlalt z27.s, p4/M, z31.h, z10.h\n"
- ".inst 0x04a276d6 // sqrdmulh z22.s, z22.s, z2.s\n"
- "asr z29.s, z29.s, #0x1f\n"
- "and z18.d, z17.d, z21.d\n"
- ".inst 0x04af7739 // sqrdmulh z25.s, z25.s, z15.s\n"
- "and z20.d, z16.d, z21.d\n"
- ".inst 0x04af76f7 // sqrdmulh z23.s, z23.s, z15.s\n"
- "and z19.d, z22.d, z21.d\n"
- ".inst 0x04af777b // sqrdmulh z27.s, z27.s, z15.s\n"
- "sqadd z9.s, z9.s, z29.s\n"
- ".inst 0x44829029 // srshl z9.s, p4/M, z9.s, z1.s\n"
- "asr z18.s, z18.s, #0x1f\n"
- "and z7.d, z25.d, z1.d\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z6.d, z23.d, z1.d\n"
- "asr z19.s, z19.s, #0x1f\n"
- "and z2.d, z27.d, z1.d\n"
- "sqadd z17.s, z17.s, z18.s\n"
- "asr z7.s, z7.s, #0x1f\n"
- ".inst 0x448292b1 // srshl z17.s, p4/M, z17.s, z21.s\n"
- "sqadd z16.s, z16.s, z20.s\n"
+ "whilelt p3.h, x17, x15\n"
+ ".inst 0x44864183 // smlalb z3.s, p4/M, z12.h, z6.h\n"
+ ".inst 0x44864590 // smlalt z16.s, p4/M, z12.h, z6.h\n"
+ ".inst 0x449e4088 // smlalb z8.s, p4/M, z4.h, z30.h\n"
+ ".inst 0x44914020 // smlalb z0.s, p4/M, z1.h, z17.h\n"
+ ".inst 0x449c4033 // smlalb z19.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449e4495 // smlalt z21.s, p4/M, z4.h, z30.h\n"
+ ".inst 0x4491443d // smlalt z29.s, p4/M, z1.h, z17.h\n"
+ ".inst 0x449c4429 // smlalt z9.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c40e3 // smlalb z3.s, p4/M, z7.h, z28.h\n"
+ ".inst 0x449c44f0 // smlalt z16.s, p4/M, z7.h, z28.h\n"
+ ".inst 0x448542c8 // smlalb z8.s, p4/M, z22.h, z5.h\n"
+ ".inst 0x448543e0 // smlalb z0.s, p4/M, z31.h, z5.h\n"
+ ".inst 0x449143f3 // smlalb z19.s, p4/M, z31.h, z17.h\n"
+ ".inst 0x448546d5 // smlalt z21.s, p4/M, z22.h, z5.h\n"
+ ".inst 0x448547fd // smlalt z29.s, p4/M, z31.h, z5.h\n"
+ ".inst 0x449147e9 // smlalt z9.s, p4/M, z31.h, z17.h\n"
+ ".inst 0x04b47463 // sqrdmulh z3.s, z3.s, z20.s\n"
+ ".inst 0x04b87610 // sqrdmulh z16.s, z16.s, z24.s\n"
+ ".inst 0x04b47508 // sqrdmulh z8.s, z8.s, z20.s\n"
+ ".inst 0x04b47400 // sqrdmulh z0.s, z0.s, z20.s\n"
+ "and z4.d, z3.d, z11.d\n"
+ ".inst 0x04b47673 // sqrdmulh z19.s, z19.s, z20.s\n"
+ ".inst 0x04b876b5 // sqrdmulh z21.s, z21.s, z24.s\n"
+ "and z13.d, z16.d, z27.d\n"
+ "and z6.d, z8.d, z11.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "and z7.d, z0.d, z11.d\n"
+ ".inst 0x04b877bd // sqrdmulh z29.s, z29.s, z24.s\n"
+ ".inst 0x04b87529 // sqrdmulh z9.s, z9.s, z24.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
- ".inst 0x448292b0 // srshl z16.s, p4/M, z16.s, z21.s\n"
- "sqadd z22.s, z22.s, z19.s\n"
- "asr z2.s, z2.s, #0x1f\n"
- ".inst 0x448292b6 // srshl z22.s, p4/M, z22.s, z21.s\n"
- "sqadd z25.s, z25.s, z7.s\n"
- "sqadd z23.s, z23.s, z6.s\n"
- ".inst 0x44829039 // srshl z25.s, p4/M, z25.s, z1.s\n"
- ".inst 0x44829037 // srshl z23.s, p4/M, z23.s, z1.s\n"
- "sqadd z27.s, z27.s, z2.s\n"
- ".inst 0x453040a5 // sqxtnb z5.h, z5.s\n"
- ".inst 0x4482903b // srshl z27.s, p4/M, z27.s, z1.s\n"
- ".inst 0x45304231 // sqxtnb z17.h, z17.s\n"
- ".inst 0x45304210 // sqxtnb z16.h, z16.s\n"
- ".inst 0x453042d6 // sqxtnb z22.h, z22.s\n"
- ".inst 0x45304525 // sqxtnt z5.h, z9.s\n"
- ".inst 0x45304731 // sqxtnt z17.h, z25.s\n"
- ".inst 0x453046f0 // sqxtnt z16.h, z23.s\n"
- ".inst 0x45304776 // sqxtnt z22.h, z27.s\n"
- "sqadd z5.h, z5.h, z24.h\n"
- "smax z5.h, p4/M, z5.h, z11.h\n"
- "smin z5.h, p4/M, z5.h, z26.h\n"
- "sqadd z17.h, z17.h, z24.h\n"
- "sqadd z16.h, z16.h, z24.h\n"
- "smax z17.h, p4/M, z17.h, z11.h\n"
- "smax z16.h, p4/M, z16.h, z11.h\n"
- "sqadd z22.h, z22.h, z24.h\n"
- "smax z22.h, p4/M, z22.h, z11.h\n"
- "smin z17.h, p4/M, z17.h, z26.h\n"
- "st1b { z5.h }, p0, [x13, x27]\n"
- "smin z16.h, p4/M, z16.h, z26.h\n"
- "smin z22.h, p4/M, z22.h, z26.h\n"
- "st1b { z17.h }, p0, [x12, x27]\n"
- "st1b { z16.h }, p0, [x11, x27]\n"
- "st1b { z22.h }, p0, [x10, x27]\n"
- "ld1sb { z14.h }, p4/Z, [x14]\n"
- "ld1sb { z21.h }, p4/Z, [x14, #1, MUL VL]\n"
- "inch x27\n"
- "ld1sb { z1.h }, p4/Z, [x14, #2, MUL VL]\n"
+ "sqadd z3.s, z3.s, z4.s\n"
+ "and z20.d, z19.d, z11.d\n"
+ "and z18.d, z21.d, z27.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z13.s\n"
+ "and z13.d, z29.d, z27.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "and z23.d, z9.d, z27.d\n"
+ ".inst 0x44829163 // srshl z3.s, p4/M, z3.s, z11.s\n"
+ "sqadd z8.s, z8.s, z6.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z0.s, z0.s, z7.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
+ ".inst 0x44829370 // srshl z16.s, p4/M, z16.s, z27.s\n"
+ "sqadd z19.s, z19.s, z20.s\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ ".inst 0x44829168 // srshl z8.s, p4/M, z8.s, z11.s\n"
+ "sqadd z21.s, z21.s, z18.s\n"
+ ".inst 0x45304063 // sqxtnb z3.h, z3.s\n"
+ ".inst 0x44829160 // srshl z0.s, p4/M, z0.s, z11.s\n"
+ "sqadd z29.s, z29.s, z13.s\n"
+ ".inst 0x44829173 // srshl z19.s, p4/M, z19.s, z11.s\n"
+ "sqadd z9.s, z9.s, z23.s\n"
+ ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
+ ".inst 0x44829375 // srshl z21.s, p4/M, z21.s, z27.s\n"
+ ".inst 0x45304000 // sqxtnb z0.h, z0.s\n"
+ ".inst 0x45304603 // sqxtnt z3.h, z16.s\n"
+ ".inst 0x4482937d // srshl z29.s, p4/M, z29.s, z27.s\n"
+ ".inst 0x44829369 // srshl z9.s, p4/M, z9.s, z27.s\n"
+ ".inst 0x45304273 // sqxtnb z19.h, z19.s\n"
+ ".inst 0x453046a8 // sqxtnt z8.h, z21.s\n"
+ ".inst 0x453047a0 // sqxtnt z0.h, z29.s\n"
+ ".inst 0x45304533 // sqxtnt z19.h, z9.s\n"
+ "sqadd z3.h, z3.h, z26.h\n"
+ "sqadd z8.h, z8.h, z26.h\n"
+ "sqadd z0.h, z0.h, z26.h\n"
+ "sqadd z19.h, z19.h, z26.h\n"
+ "smax z3.h, p4/M, z3.h, z2.h\n"
+ "smax z8.h, p4/M, z8.h, z2.h\n"
+ "smax z0.h, p4/M, z0.h, z2.h\n"
+ "smax z19.h, p4/M, z19.h, z2.h\n"
+ "smin z3.h, p4/M, z3.h, z14.h\n"
+ "smin z8.h, p4/M, z8.h, z14.h\n"
+ "smin z0.h, p4/M, z0.h, z14.h\n"
+ "smin z19.h, p4/M, z19.h, z14.h\n"
+ "st1b { z3.h }, p0, [x9, x12]\n"
+ "st1b { z8.h }, p0, [x28, x12]\n"
+ "st1b { z0.h }, p0, [x27, x12]\n"
+ "st1b { z19.h }, p0, [x26, x12]\n"
+ "inch x12\n"
+ "ld1sb { z13.h }, p4/Z, [x14]\n"
+ "ld1sb { z11.h }, p4/Z, [x14, #1, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x14, #2, MUL VL]\n"
"ld1sb { z6.h }, p4/Z, [x14, #3, MUL VL]\n"
- ".inst 0x455e11ce // ssublb z14.h, z14.b, z30.b\n"
- ".inst 0x455e12b5 // ssublb z21.h, z21.b, z30.b\n"
- "ld1sb { z2.h }, p4/Z, [x14, #4, MUL VL]\n"
- "ld1sb { z18.h }, p4/Z, [x14, #5, MUL VL]\n"
- ".inst 0x455e1021 // ssublb z1.h, z1.b, z30.b\n"
- ".inst 0x455e10c6 // ssublb z6.h, z6.b, z30.b\n"
- "ld1sb { z7.h }, p4/Z, [x14, #6, MUL VL]\n"
- "ld1sb { z10.h }, p4/Z, [x14, #7, MUL VL]\n"
+ "ld1sb { z20.h }, p4/Z, [x14, #4, MUL VL]\n"
+ "ld1sb { z30.h }, p4/Z, [x14, #5, MUL VL]\n"
+ "ld1sb { z28.h }, p4/Z, [x14, #6, MUL VL]\n"
+ "ld1sb { z17.h }, p4/Z, [x14, #7, MUL VL]\n"
"inch x14, ALL, MUL #8\n"
- ".inst 0x455e1042 // ssublb z2.h, z2.b, z30.b\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
- "ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
- "uzp1 z5.s, z17.s, z16.s\n"
- "uzp2 z9.s, z17.s, z16.s\n"
- "ld1sb { z8.h }, p4/Z, [x14]\n"
- "ldp x24, x23, [x28, #0x0]\n"
+ ".inst 0x454f11ad // ssublb z13.h, z13.b, z15.b\n"
+ "ld1w { z1.s }, p2/Z, [x21]\n"
+ "ld1w { z0.s }, p1/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
+ ".inst 0x454f116b // ssublb z11.h, z11.b, z15.b\n"
+ ".inst 0x454f1252 // ssublb z18.h, z18.b, z15.b\n"
+ ".inst 0x454f10c6 // ssublb z6.h, z6.b, z15.b\n"
+ "ld1sb { z5.h }, p4/Z, [x14]\n"
+ "ldp x24, x23, [x13, #0x0]\n"
+ ".inst 0x454f1294 // ssublb z20.h, z20.b, z15.b\n"
+ ".inst 0x454f13de // ssublb z30.h, z30.b, z15.b\n"
+ "uzp1 z3.s, z1.s, z0.s\n"
+ "uzp2 z16.s, z1.s, z0.s\n"
"str x21, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x22, x21, [x28, #0x10]\n"
- "ldr x20, [x28, #0x20]\n"
- "mov z17.d, z5.d\n"
- "mov z25.d, z9.d\n"
- "ld1b { z0.h }, p3/Z, [x24, x16]\n"
- "ld1b { z29.h }, p3/Z, [x23, x16]\n"
- "mov z16.d, z5.d\n"
- "mov z23.d, z9.d\n"
- "ld1b { z4.h }, p3/Z, [x22, x16]\n"
- "ld1b { z13.h }, p3/Z, [x21, x16]\n"
- "mov z22.d, z5.d\n"
- "mov z27.d, z9.d\n"
- "ld1b { z20.h }, p3/Z, [x20, x16]\n"
- ".inst 0x455e1252 // ssublb z18.h, z18.b, z30.b\n"
- ".inst 0x455e10e7 // ssublb z7.h, z7.b, z30.b\n"
- ".inst 0x455e114a // ssublb z10.h, z10.b, z30.b\n"
- ".inst 0x455e1108 // ssublb z8.h, z8.b, z30.b\n"
- ".inst 0x454c1800 // usublb z0.h, z0.b, z12.b\n"
- ".inst 0x454c1bbd // usublb z29.h, z29.b, z12.b\n"
- ".inst 0x454c1884 // usublb z4.h, z4.b, z12.b\n"
- ".inst 0x454c19ad // usublb z13.h, z13.b, z12.b\n"
- ".inst 0x454c1a94 // usublb z20.h, z20.b, z12.b\n"
+ "ldp x22, x21, [x13, #0x10]\n"
+ ".inst 0x454f139c // ssublb z28.h, z28.b, z15.b\n"
+ ".inst 0x454f1231 // ssublb z17.h, z17.b, z15.b\n"
+ ".inst 0x454f10a5 // ssublb z5.h, z5.b, z15.b\n"
+ "ldr x20, [x13, #0x20]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x17]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x17]\n"
+ "ld1b { z4.h }, p3/Z, [x22, x17]\n"
+ "mov z8.d, z3.d\n"
+ "mov z21.d, z16.d\n"
+ "ld1b { z1.h }, p3/Z, [x21, x17]\n"
+ "mov z0.d, z3.d\n"
+ "mov z29.d, z16.d\n"
+ "ld1b { z27.h }, p3/Z, [x20, x17]\n"
+ "mov z19.d, z3.d\n"
+ "mov z9.d, z16.d\n"
+ ".inst 0x454a18e7 // usublb z7.h, z7.b, z10.b\n"
+ ".inst 0x454a1b18 // usublb z24.h, z24.b, z10.b\n"
+ ".inst 0x454a1884 // usublb z4.h, z4.b, z10.b\n"
+ ".inst 0x454a1821 // usublb z1.h, z1.b, z10.b\n"
+ ".inst 0x454a1b7b // usublb z27.h, z27.b, z10.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
index 754d06d443..7d2106ad08 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[25];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -100,348 +100,348 @@ void sve_u8s8u8q_nhwc_3x3_s2_output2x2_mla_depthfirst_impl(
requant_muls, requant_shifts, outptrs);
__asm__ __volatile__(
- "mov x7, #0x0\n"
- "ldr x25, [%x[params], %[offsetof_Params_requant]]\n"
+ "mov x8, #0x0\n"
+ "ldr x27, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x24, [%x[params], %[offsetof_Params_outptrs]]\n"
- "mov x23, x7\n"
- "add x21, x25, %[offsetof_Requantize32_a_offset]\n"
- "ldr x8, [%x[params], %[offsetof_Params_n_channels]]\n"
- "ldr x17, [%x[params], %[offsetof_Params_weights]]\n"
- "add x20, x25, %[offsetof_Requantize32_b_offset]\n"
- "add x22, x25, %[offsetof_Requantize32_c_offset]\n"
- "ld1rb { z26.b }, p4/Z, [x21]\n"
- "ld1rb { z13.b }, p4/Z, [x20]\n"
- "add x21, x25, %[offsetof_Requantize32_minval]\n"
- "add x20, x25, %[offsetof_Requantize32_maxval]\n"
- "ld1rh { z19.h }, p4/Z, [x22]\n"
- "ld1rh { z12.h }, p4/Z, [x21]\n"
+ "ldr x26, [%x[params], %[offsetof_Params_outptrs]]\n"
+ "ldr x17, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x16, [%x[params], %[offsetof_Params_weights]]\n"
+ "add x15, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x14, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x13, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x8\n"
+ "add x20, x27, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x27, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x27, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z17.b }, p4/Z, [x20]\n"
+ "ldr x12, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x27, %[offsetof_Requantize32_minval]\n"
+ "add x20, x27, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z12.b }, p4/Z, [x23]\n"
+ "ld1rh { z25.h }, p4/Z, [x22]\n"
+ "ld1rh { z14.h }, p4/Z, [x21]\n"
"ld1rh { z9.h }, p4/Z, [x20]\n"
- "ldp x16, x15, [x24, #0x0]\n"
- "incw x23\n"
- "whilelt p3.h, x7, x8\n"
- "ldp x14, x13, [x24, #0x10]\n"
- "whilelt p2.s, x7, x8\n"
- "whilelt p1.s, x23, x8\n"
- "ldr x12, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z25.h }, p4/Z, [x17]\n"
- "ld1sb { z30.h }, p4/Z, [x17, #1, MUL VL]\n"
- "add x11, %x[params], %[offsetof_Params_inptrs]\n"
- "mov x10, #0x0\n"
- "ld1sb { z14.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z4.h }, p4/Z, [x17, #3, MUL VL]\n"
- ".inst 0x454d1339 // ssublb z25.h, z25.b, z13.b\n"
- ".inst 0x454d13de // ssublb z30.h, z30.b, z13.b\n"
- "ld1sb { z10.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #5, MUL VL]\n"
- ".inst 0x454d11ce // ssublb z14.h, z14.b, z13.b\n"
- ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
- "ld1sb { z23.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
- ".inst 0x454d114a // ssublb z10.h, z10.b, z13.b\n"
- "ld1w { z17.s }, p2/Z, [x12]\n"
- "ld1w { z16.s }, p1/Z, [x12, #1, MUL VL]\n"
- "uzp1 z8.s, z17.s, z16.s\n"
- "uzp2 z24.s, z17.s, z16.s\n"
- "ld1sb { z2.h }, p4/Z, [x17]\n"
- "ldp x27, x26, [x11, #0x0]\n"
- "addvl x12, x12, #2\n"
- "mov z18.d, z8.d\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z0.d, z24.d\n"
- "mov z15.d, z8.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z21.h }, p3/Z, [x27, x7]\n"
- "mov z1.d, z24.d\n"
- "mov z5.d, z8.d\n"
- "ld1b { z22.h }, p3/Z, [x26, x7]\n"
- "ld1b { z11.h }, p3/Z, [x25, x7]\n"
- "mov z6.d, z24.d\n"
- ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
- "ld1b { z20.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x454d12f7 // ssublb z23.h, z23.b, z13.b\n"
- ".inst 0x454d10e7 // ssublb z7.h, z7.b, z13.b\n"
- "ld1b { z28.h }, p3/Z, [x22, x7]\n"
- "ld1b { z16.h }, p3/Z, [x21, x7]\n"
- ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
- ".inst 0x455a1ab5 // usublb z21.h, z21.b, z26.b\n"
- "ld1b { z31.h }, p3/Z, [x20, x7]\n"
- "ldr x9, [%x[params], %[offsetof_Params_requant_muls]]\n"
- ".inst 0x455a1ad6 // usublb z22.h, z22.b, z26.b\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- "ldr x28, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x12, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x455a1b7b // usublb z27.h, z27.b, z26.b\n"
- ".inst 0x455a1b9c // usublb z28.h, z28.b, z26.b\n"
- ".inst 0x455a1a10 // usublb z16.h, z16.b, z26.b\n"
- ".inst 0x455a1bff // usublb z31.h, z31.b, z26.b\n"
+ "incw x24\n"
+ "whilelt p3.h, x8, x17\n"
+ "ldp x11, x10, [x26, #0x0]\n"
+ "ldp x9, x28, [x26, #0x10]\n"
+ "whilelt p2.s, x8, x17\n"
+ "whilelt p1.s, x24, x17\n"
+ "ld1sb { z28.h }, p4/Z, [x16]\n"
+ "ld1sb { z20.h }, p4/Z, [x16, #1, MUL VL]\n"
+ "ld1sb { z13.h }, p4/Z, [x16, #2, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x16, #3, MUL VL]\n"
+ "ld1sb { z6.h }, p4/Z, [x16, #4, MUL VL]\n"
+ "ld1sb { z2.h }, p4/Z, [x16, #5, MUL VL]\n"
+ "ld1sb { z26.h }, p4/Z, [x16, #6, MUL VL]\n"
+ "ld1sb { z21.h }, p4/Z, [x16, #7, MUL VL]\n"
+ "inch x16, ALL, MUL #8\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ "ld1w { z11.s }, p2/Z, [x25]\n"
+ "ld1w { z4.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ ".inst 0x454c1294 // ssublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454c11ad // ssublb z13.h, z13.b, z12.b\n"
+ ".inst 0x454c1252 // ssublb z18.h, z18.b, z12.b\n"
+ "ld1sb { z15.h }, p4/Z, [x16]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ ".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "uzp1 z5.s, z11.s, z4.s\n"
+ "uzp2 z11.s, z11.s, z4.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
+ ".inst 0x454c12b5 // ssublb z21.h, z21.b, z12.b\n"
+ ".inst 0x454c11ef // ssublb z15.h, z15.b, z12.b\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "mov z30.d, z5.d\n"
+ "mov z16.d, z11.d\n"
+ "mov z4.d, z5.d\n"
+ "mov z8.d, z11.d\n"
+ "mov z31.d, z5.d\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "mov z10.d, z11.d\n"
+ "ld1b { z3.h }, p3/Z, [x27, x8]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x8]\n"
+ "ld1b { z23.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z0.h }, p3/Z, [x24, x8]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x8]\n"
+ "ld1b { z22.h }, p3/Z, [x22, x8]\n"
+ "ld1b { z27.h }, p3/Z, [x21, x8]\n"
+ "ld1b { z19.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x45511a73 // usublb z19.h, z19.b, z17.b\n"
"1:" // Loop
- ".inst 0x448242a8 // smlalb z8.s, p4/M, z21.h, z2.h\n"
- "ldr x21, [x11, #0x58]\n"
- "ldr x20, [x11, #0x78]\n"
- ".inst 0x448246b8 // smlalt z24.s, p4/M, z21.h, z2.h\n"
- ".inst 0x449942c8 // smlalb z8.s, p4/M, z22.h, z25.h\n"
- "ld1b { z17.h }, p3/Z, [x21, x7]\n"
- "ld1b { z29.h }, p3/Z, [x20, x7]\n"
- ".inst 0x449742b2 // smlalb z18.s, p4/M, z21.h, z23.h\n"
- "ldr x21, [x11, #0x60]\n"
- "ldr x20, [x11, #0x80]\n"
- ".inst 0x448e42af // smlalb z15.s, p4/M, z21.h, z14.h\n"
- ".inst 0x449942a5 // smlalb z5.s, p4/M, z21.h, z25.h\n"
- ".inst 0x449946d8 // smlalt z24.s, p4/M, z22.h, z25.h\n"
- ".inst 0x455a1a31 // usublb z17.h, z17.b, z26.b\n"
- ".inst 0x449e4168 // smlalb z8.s, p4/M, z11.h, z30.h\n"
- "ld1b { z22.h }, p3/Z, [x21, x7]\n"
- ".inst 0x455a1bbd // usublb z29.h, z29.b, z26.b\n"
- ".inst 0x449746a0 // smlalt z0.s, p4/M, z21.h, z23.h\n"
- ".inst 0x448e46a1 // smlalt z1.s, p4/M, z21.h, z14.h\n"
- "ldr x21, [x11, #0x68]\n"
- ".inst 0x449946a6 // smlalt z6.s, p4/M, z21.h, z25.h\n"
- "ld1b { z21.h }, p3/Z, [x20, x7]\n"
- "ldr x20, [x11, #0x88]\n"
- ".inst 0x449e4292 // smlalb z18.s, p4/M, z20.h, z30.h\n"
- ".inst 0x4484422f // smlalb z15.s, p4/M, z17.h, z4.h\n"
- ".inst 0x448a43a5 // smlalb z5.s, p4/M, z29.h, z10.h\n"
- ".inst 0x455a1ad6 // usublb z22.h, z22.b, z26.b\n"
- "ldr x22, [x11, #0x40]\n"
- ".inst 0x449e4578 // smlalt z24.s, p4/M, z11.h, z30.h\n"
- ".inst 0x455a1ab5 // usublb z21.h, z21.b, z26.b\n"
- ".inst 0x44844388 // smlalb z8.s, p4/M, z28.h, z4.h\n"
- "ld1b { z11.h }, p3/Z, [x21, x7]\n"
- ".inst 0x449e4680 // smlalt z0.s, p4/M, z20.h, z30.h\n"
- "ld1b { z20.h }, p3/Z, [x20, x7]\n"
- ".inst 0x44844621 // smlalt z1.s, p4/M, z17.h, z4.h\n"
- "ldr x21, [x11, #0x70]\n"
- ".inst 0x448a47a6 // smlalt z6.s, p4/M, z29.h, z10.h\n"
- "ldr x20, [x11, #0x98]\n"
- ".inst 0x448e4372 // smlalb z18.s, p4/M, z27.h, z14.h\n"
- "ldr x23, [x11, #0x50]\n"
- ".inst 0x449942cf // smlalb z15.s, p4/M, z22.h, z25.h\n"
- ".inst 0x449e42a5 // smlalb z5.s, p4/M, z21.h, z30.h\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- "ld1b { z17.h }, p3/Z, [x22, x7]\n"
- ".inst 0x44844798 // smlalt z24.s, p4/M, z28.h, z4.h\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x448a4208 // smlalb z8.s, p4/M, z16.h, z10.h\n"
- "ld1b { z29.h }, p3/Z, [x21, x7]\n"
- "ld1b { z28.h }, p3/Z, [x20, x7]\n"
- ".inst 0x448e4760 // smlalt z0.s, p4/M, z27.h, z14.h\n"
- "ldr x22, [x11, #0x48]\n"
- ".inst 0x449946c1 // smlalt z1.s, p4/M, z22.h, z25.h\n"
- ".inst 0x449e46a6 // smlalt z6.s, p4/M, z21.h, z30.h\n"
- "ldr x21, [x11, #0x90]\n"
- "ldr x20, [x11, #0xa8]\n"
- ".inst 0x449943f2 // smlalb z18.s, p4/M, z31.h, z25.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x448a416f // smlalb z15.s, p4/M, z11.h, z10.h\n"
- ".inst 0x44834285 // smlalb z5.s, p4/M, z20.h, z3.h\n"
- ".inst 0x455a1a31 // usublb z17.h, z17.b, z26.b\n"
- ".inst 0x448a4618 // smlalt z24.s, p4/M, z16.h, z10.h\n"
- ".inst 0x455a1bbd // usublb z29.h, z29.b, z26.b\n"
- ".inst 0x448e43e8 // smlalb z8.s, p4/M, z31.h, z14.h\n"
- "ld1b { z16.h }, p3/Z, [x22, x7]\n"
- ".inst 0x455a1b9c // usublb z28.h, z28.b, z26.b\n"
- ".inst 0x449947e0 // smlalt z0.s, p4/M, z31.h, z25.h\n"
- "ld1b { z25.h }, p3/Z, [x21, x7]\n"
- ".inst 0x448a4561 // smlalt z1.s, p4/M, z11.h, z10.h\n"
- "ld1b { z11.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455a1b7b // usublb z27.h, z27.b, z26.b\n"
- ".inst 0x44834686 // smlalt z6.s, p4/M, z20.h, z3.h\n"
- "ldr x21, [x11, #0xa0]\n"
- "ldr x20, [x11, #0xb0]\n"
- ".inst 0x448a4232 // smlalb z18.s, p4/M, z17.h, z10.h\n"
- ".inst 0x449e43af // smlalb z15.s, p4/M, z29.h, z30.h\n"
- ".inst 0x455a1a10 // usublb z16.h, z16.b, z26.b\n"
- ".inst 0x448e4385 // smlalb z5.s, p4/M, z28.h, z14.h\n"
- ".inst 0x448e47f8 // smlalt z24.s, p4/M, z31.h, z14.h\n"
- ".inst 0x455a1b39 // usublb z25.h, z25.b, z26.b\n"
- "ld1b { z20.h }, p3/Z, [x21, x7]\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- ".inst 0x44834368 // smlalb z8.s, p4/M, z27.h, z3.h\n"
- "ld1b { z31.h }, p3/Z, [x20, x7]\n"
- ".inst 0x448a4620 // smlalt z0.s, p4/M, z17.h, z10.h\n"
- ".inst 0x449e47a1 // smlalt z1.s, p4/M, z29.h, z30.h\n"
- ".inst 0x448e4786 // smlalt z6.s, p4/M, z28.h, z14.h\n"
- "ldr x20, [x11, #0xb8]\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x44834212 // smlalb z18.s, p4/M, z16.h, z3.h\n"
- ".inst 0x4497432f // smlalb z15.s, p4/M, z25.h, z23.h\n"
- ".inst 0x455a1bff // usublb z31.h, z31.b, z26.b\n"
- "ld1b { z30.h }, p3/Z, [x20, x7]\n"
- ".inst 0x44844165 // smlalb z5.s, p4/M, z11.h, z4.h\n"
- ".inst 0x44834778 // smlalt z24.s, p4/M, z27.h, z3.h\n"
- "ldr x20, [x11, #0xc0]\n"
- "ld1w { z17.s }, p2/Z, [x9]\n"
- ".inst 0x449742c8 // smlalb z8.s, p4/M, z22.h, z23.h\n"
- ".inst 0x44834600 // smlalt z0.s, p4/M, z16.h, z3.h\n"
- "ld1w { z14.s }, p1/Z, [x9, #1, MUL VL]\n"
- ".inst 0x455a1bde // usublb z30.h, z30.b, z26.b\n"
- ".inst 0x44974721 // smlalt z1.s, p4/M, z25.h, z23.h\n"
- ".inst 0x44844566 // smlalt z6.s, p4/M, z11.h, z4.h\n"
- "ld1b { z25.h }, p3/Z, [x20, x7]\n"
- "uzp1 z10.s, z17.s, z14.s\n"
- ".inst 0x44844372 // smlalb z18.s, p4/M, z27.h, z4.h\n"
- ".inst 0x4487428f // smlalb z15.s, p4/M, z20.h, z7.h\n"
- "uzp2 z14.s, z17.s, z14.s\n"
- "ld1w { z17.s }, p2/Z, [x28]\n"
- ".inst 0x448743e5 // smlalb z5.s, p4/M, z31.h, z7.h\n"
- ".inst 0x449746d8 // smlalt z24.s, p4/M, z22.h, z23.h\n"
- "ld1w { z16.s }, p1/Z, [x28, #1, MUL VL]\n"
- ".inst 0x455a1b39 // usublb z25.h, z25.b, z26.b\n"
- ".inst 0x448743a8 // smlalb z8.s, p4/M, z29.h, z7.h\n"
- ".inst 0x44844760 // smlalt z0.s, p4/M, z27.h, z4.h\n"
- "uzp1 z4.s, z17.s, z16.s\n"
- "inch x7\n"
- ".inst 0x44874681 // smlalt z1.s, p4/M, z20.h, z7.h\n"
- ".inst 0x448747e6 // smlalt z6.s, p4/M, z31.h, z7.h\n"
- ".inst 0x04aa7508 // sqrdmulh z8.s, z8.s, z10.s\n"
- "whilelt p0.h, x10, x8\n"
- ".inst 0x448742b2 // smlalb z18.s, p4/M, z21.h, z7.h\n"
- ".inst 0x4483416f // smlalb z15.s, p4/M, z11.h, z3.h\n"
- "uzp2 z22.s, z17.s, z16.s\n"
- "mov x20, x7\n"
- ".inst 0x449743c5 // smlalb z5.s, p4/M, z30.h, z23.h\n"
- ".inst 0x448747b8 // smlalt z24.s, p4/M, z29.h, z7.h\n"
- "and z17.d, z8.d, z4.d\n"
- "inch x17\n"
- ".inst 0x448746a0 // smlalt z0.s, p4/M, z21.h, z7.h\n"
- ".inst 0x44834561 // smlalt z1.s, p4/M, z11.h, z3.h\n"
- ".inst 0x04ae7718 // sqrdmulh z24.s, z24.s, z14.s\n"
- "incw x20\n"
- ".inst 0x449747c6 // smlalt z6.s, p4/M, z30.h, z23.h\n"
- ".inst 0x44824392 // smlalb z18.s, p4/M, z28.h, z2.h\n"
- "asr z17.s, z17.s, #0x1f\n"
- "whilelt p2.s, x7, x8\n"
- ".inst 0x448243cf // smlalb z15.s, p4/M, z30.h, z2.h\n"
- ".inst 0x44824325 // smlalb z5.s, p4/M, z25.h, z2.h\n"
- "and z16.d, z24.d, z22.d\n"
- "whilelt p1.s, x20, x8\n"
- ".inst 0x44824780 // smlalt z0.s, p4/M, z28.h, z2.h\n"
- ".inst 0x448247c1 // smlalt z1.s, p4/M, z30.h, z2.h\n"
- ".inst 0x04aa7652 // sqrdmulh z18.s, z18.s, z10.s\n"
+ ".inst 0x448f4065 // smlalb z5.s, p4/M, z3.h, z15.h\n"
+ "ldr x25, [x15, #0x58]\n"
+ "ldr x24, [x15, #0x78]\n"
+ ".inst 0x448f446b // smlalt z11.s, p4/M, z3.h, z15.h\n"
+ "ldr x23, [x15, #0x60]\n"
+ "ldr x22, [x15, #0x80]\n"
+ ".inst 0x449a407e // smlalb z30.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x448d4064 // smlalb z4.s, p4/M, z3.h, z13.h\n"
+ ".inst 0x449c407f // smlalb z31.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449a4470 // smlalt z16.s, p4/M, z3.h, z26.h\n"
+ "ldr x21, [x15, #0x68]\n"
+ "ldr x20, [x15, #0x88]\n"
+ "ld1b { z1.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448d4468 // smlalt z8.s, p4/M, z3.h, z13.h\n"
+ ".inst 0x449c446a // smlalt z10.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c43a5 // smlalb z5.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x449c47ab // smlalt z11.s, p4/M, z29.h, z28.h\n"
+ "ld1b { z29.h }, p3/Z, [x23, x8]\n"
+ "ld1b { z3.h }, p3/Z, [x22, x8]\n"
+ ".inst 0x4494401e // smlalb z30.s, p4/M, z0.h, z20.h\n"
+ "ldr x25, [x15, #0x40]\n"
+ "ldr x24, [x15, #0x70]\n"
+ "whilelt p0.h, x14, x17\n"
+ ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
+ ".inst 0x455118e7 // usublb z7.h, z7.b, z17.b\n"
+ ".inst 0x44944410 // smlalt z16.s, p4/M, z0.h, z20.h\n"
+ "ld1b { z0.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
+ "ldr x23, [x15, #0x98]\n"
+ "ldr x22, [x15, #0x50]\n"
+ ".inst 0x449442e5 // smlalb z5.s, p4/M, z23.h, z20.h\n"
+ ".inst 0x449446eb // smlalt z11.s, p4/M, z23.h, z20.h\n"
+ "ld1b { z23.h }, p3/Z, [x20, x8]\n"
+ "ldr x21, [x15, #0x48]\n"
+ ".inst 0x44924024 // smlalb z4.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448640ff // smlalb z31.s, p4/M, z7.h, z6.h\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ "ldr x20, [x15, #0x90]\n"
+ ".inst 0x44924428 // smlalt z8.s, p4/M, z1.h, z18.h\n"
+ ".inst 0x448644ea // smlalt z10.s, p4/M, z7.h, z6.h\n"
+ "ld1b { z1.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z7.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448d431e // smlalb z30.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x448d4710 // smlalt z16.s, p4/M, z24.h, z13.h\n"
+ "ld1b { z24.h }, p3/Z, [x23, x8]\n"
+ ".inst 0x449242c5 // smlalb z5.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x449246cb // smlalt z11.s, p4/M, z22.h, z18.h\n"
+ "ldr x24, [x15, #0xa8]\n"
+ "ld1b { z22.h }, p3/Z, [x22, x8]\n"
+ ".inst 0x449c43a4 // smlalb z4.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x4494407f // smlalb z31.s, p4/M, z3.h, z20.h\n"
+ ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
+ "ldr x23, [x15, #0xa0]\n"
+ ".inst 0x449c47a8 // smlalt z8.s, p4/M, z29.h, z28.h\n"
+ ".inst 0x4494446a // smlalt z10.s, p4/M, z3.h, z20.h\n"
+ ".inst 0x455118e7 // usublb z7.h, z7.b, z17.b\n"
+ "ldr x22, [x15, #0xb0]\n"
+ ".inst 0x449c427e // smlalb z30.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x449c4670 // smlalt z16.s, p4/M, z19.h, z28.h\n"
+ "ld1b { z28.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x44864365 // smlalb z5.s, p4/M, z27.h, z6.h\n"
+ ".inst 0x4486476b // smlalt z11.s, p4/M, z27.h, z6.h\n"
+ "ld1b { z27.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
+ ".inst 0x44864004 // smlalb z4.s, p4/M, z0.h, z6.h\n"
+ ".inst 0x448242ff // smlalb z31.s, p4/M, z23.h, z2.h\n"
+ "ldr x21, [x15, #0xb8]\n"
+ "ldr x20, [x15, #0xc0]\n"
+ ".inst 0x44864408 // smlalt z8.s, p4/M, z0.h, z6.h\n"
+ "ld1b { z0.h }, p3/Z, [x24, x8]\n"
+ ".inst 0x448246ea // smlalt z10.s, p4/M, z23.h, z2.h\n"
+ ".inst 0x45511b9c // usublb z28.h, z28.b, z17.b\n"
+ ".inst 0x4486403e // smlalb z30.s, p4/M, z1.h, z6.h\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ "ld1b { z23.h }, p3/Z, [x23, x8]\n"
+ ".inst 0x44864430 // smlalt z16.s, p4/M, z1.h, z6.h\n"
+ ".inst 0x448d4265 // smlalb z5.s, p4/M, z19.h, z13.h\n"
+ ".inst 0x448d466b // smlalt z11.s, p4/M, z19.h, z13.h\n"
+ "ld1b { z6.h }, p3/Z, [x22, x8]\n"
+ "ld1b { z1.h }, p3/Z, [x21, x8]\n"
+ ".inst 0x449440e4 // smlalb z4.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x448d431f // smlalb z31.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ "ld1w { z19.s }, p2/Z, [x13]\n"
+ ".inst 0x449444e8 // smlalt z8.s, p4/M, z7.h, z20.h\n"
+ ".inst 0x448d470a // smlalt z10.s, p4/M, z24.h, z13.h\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ "ld1w { z20.s }, p1/Z, [x13, #1, MUL VL]\n"
+ ".inst 0x4482439e // smlalb z30.s, p4/M, z28.h, z2.h\n"
+ ".inst 0x455118c6 // usublb z6.h, z6.b, z17.b\n"
+ ".inst 0x44824790 // smlalt z16.s, p4/M, z28.h, z2.h\n"
+ "ld1b { z13.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x448242c5 // smlalb z5.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x448246cb // smlalt z11.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x45511821 // usublb z1.h, z1.b, z17.b\n"
+ "inch x8\n"
+ ".inst 0x449a4364 // smlalb z4.s, p4/M, z27.h, z26.h\n"
+ ".inst 0x4492401f // smlalb z31.s, p4/M, z0.h, z18.h\n"
+ "uzp1 z28.s, z19.s, z20.s\n"
+ "inch x16\n"
+ ".inst 0x449a4768 // smlalt z8.s, p4/M, z27.h, z26.h\n"
+ ".inst 0x4492440a // smlalt z10.s, p4/M, z0.h, z18.h\n"
+ "uzp2 z20.s, z19.s, z20.s\n"
+ "ld1w { z27.s }, p2/Z, [x12]\n"
+ ".inst 0x449242de // smlalb z30.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x449246d0 // smlalt z16.s, p4/M, z22.h, z18.h\n"
+ "ld1w { z19.s }, p1/Z, [x12, #1, MUL VL]\n"
+ ".inst 0x455119ad // usublb z13.h, z13.b, z17.b\n"
+ ".inst 0x449a43a5 // smlalb z5.s, p4/M, z29.h, z26.h\n"
+ ".inst 0x449a47ab // smlalt z11.s, p4/M, z29.h, z26.h\n"
+ "mov x21, x8\n"
+ "whilelt p2.s, x8, x17\n"
+ ".inst 0x449542e4 // smlalb z4.s, p4/M, z23.h, z21.h\n"
+ ".inst 0x449540df // smlalb z31.s, p4/M, z6.h, z21.h\n"
"ldr x20, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44824726 // smlalt z6.s, p4/M, z25.h, z2.h\n"
- ".inst 0x04aa75ef // sqrdmulh z15.s, z15.s, z10.s\n"
- "whilelt p3.h, x7, x8\n"
- "addvl x9, x9, #2\n"
- ".inst 0x04aa74a5 // sqrdmulh z5.s, z5.s, z10.s\n"
- "sqadd z8.s, z8.s, z17.s\n"
- ".inst 0x44829088 // srshl z8.s, p4/M, z8.s, z4.s\n"
- "addvl x28, x28, #2\n"
- "asr z16.s, z16.s, #0x1f\n"
- "and z21.d, z18.d, z4.d\n"
- ".inst 0x04ae7400 // sqrdmulh z0.s, z0.s, z14.s\n"
- "and z20.d, z15.d, z4.d\n"
- ".inst 0x04ae7421 // sqrdmulh z1.s, z1.s, z14.s\n"
- "and z28.d, z5.d, z4.d\n"
- ".inst 0x04ae74c6 // sqrdmulh z6.s, z6.s, z14.s\n"
- "sqadd z24.s, z24.s, z16.s\n"
- ".inst 0x448292d8 // srshl z24.s, p4/M, z24.s, z22.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- "and z25.d, z0.d, z22.d\n"
- "asr z20.s, z20.s, #0x1f\n"
- "and z17.d, z1.d, z22.d\n"
- "asr z28.s, z28.s, #0x1f\n"
- "and z16.d, z6.d, z22.d\n"
- "sqadd z18.s, z18.s, z21.s\n"
- "asr z25.s, z25.s, #0x1f\n"
- ".inst 0x44829092 // srshl z18.s, p4/M, z18.s, z4.s\n"
- "sqadd z15.s, z15.s, z20.s\n"
- "asr z17.s, z17.s, #0x1f\n"
- ".inst 0x4482908f // srshl z15.s, p4/M, z15.s, z4.s\n"
- "sqadd z5.s, z5.s, z28.s\n"
- "asr z16.s, z16.s, #0x1f\n"
- ".inst 0x44829085 // srshl z5.s, p4/M, z5.s, z4.s\n"
- "sqadd z0.s, z0.s, z25.s\n"
- "sqadd z1.s, z1.s, z17.s\n"
- ".inst 0x448292c0 // srshl z0.s, p4/M, z0.s, z22.s\n"
- ".inst 0x448292c1 // srshl z1.s, p4/M, z1.s, z22.s\n"
- "sqadd z6.s, z6.s, z16.s\n"
- ".inst 0x45304108 // sqxtnb z8.h, z8.s\n"
- ".inst 0x448292c6 // srshl z6.s, p4/M, z6.s, z22.s\n"
- ".inst 0x45304252 // sqxtnb z18.h, z18.s\n"
- ".inst 0x453041ef // sqxtnb z15.h, z15.s\n"
+ "addvl x13, x13, #2\n"
+ ".inst 0x449546e8 // smlalt z8.s, p4/M, z23.h, z21.h\n"
+ ".inst 0x449544ca // smlalt z10.s, p4/M, z6.h, z21.h\n"
+ "uzp1 z23.s, z27.s, z19.s\n"
+ "addvl x12, x12, #2\n"
+ ".inst 0x4495407e // smlalb z30.s, p4/M, z3.h, z21.h\n"
+ ".inst 0x44954470 // smlalt z16.s, p4/M, z3.h, z21.h\n"
+ "uzp2 z6.s, z27.s, z19.s\n"
+ "incw x21\n"
+ ".inst 0x449540e5 // smlalb z5.s, p4/M, z7.h, z21.h\n"
+ ".inst 0x449544eb // smlalt z11.s, p4/M, z7.h, z21.h\n"
+ ".inst 0x44824004 // smlalb z4.s, p4/M, z0.h, z2.h\n"
+ ".inst 0x449a403f // smlalb z31.s, p4/M, z1.h, z26.h\n"
+ ".inst 0x44824408 // smlalt z8.s, p4/M, z0.h, z2.h\n"
+ ".inst 0x449a442a // smlalt z10.s, p4/M, z1.h, z26.h\n"
+ "whilelt p1.s, x21, x17\n"
+ "whilelt p3.h, x8, x17\n"
+ ".inst 0x448f431e // smlalb z30.s, p4/M, z24.h, z15.h\n"
+ ".inst 0x448f4710 // smlalt z16.s, p4/M, z24.h, z15.h\n"
+ ".inst 0x04bc74a5 // sqrdmulh z5.s, z5.s, z28.s\n"
+ ".inst 0x04b4756b // sqrdmulh z11.s, z11.s, z20.s\n"
+ ".inst 0x448f4024 // smlalb z4.s, p4/M, z1.h, z15.h\n"
+ ".inst 0x448f41bf // smlalb z31.s, p4/M, z13.h, z15.h\n"
+ "and z24.d, z5.d, z23.d\n"
+ ".inst 0x448f4428 // smlalt z8.s, p4/M, z1.h, z15.h\n"
+ ".inst 0x448f45aa // smlalt z10.s, p4/M, z13.h, z15.h\n"
+ "and z19.d, z11.d, z6.d\n"
+ ".inst 0x04bc77de // sqrdmulh z30.s, z30.s, z28.s\n"
+ ".inst 0x04b47610 // sqrdmulh z16.s, z16.s, z20.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ ".inst 0x04bc7484 // sqrdmulh z4.s, z4.s, z28.s\n"
+ ".inst 0x04bc77ff // sqrdmulh z31.s, z31.s, z28.s\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z7.d, z30.d, z23.d\n"
+ "sqadd z5.s, z5.s, z24.s\n"
+ ".inst 0x04b47508 // sqrdmulh z8.s, z8.s, z20.s\n"
+ "and z15.d, z4.d, z23.d\n"
+ "and z24.d, z31.d, z23.d\n"
+ ".inst 0x04b4754a // sqrdmulh z10.s, z10.s, z20.s\n"
+ "sqadd z11.s, z11.s, z19.s\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "and z18.d, z16.d, z6.d\n"
+ ".inst 0x448292e5 // srshl z5.s, p4/M, z5.s, z23.s\n"
+ "asr z15.s, z15.s, #0x1f\n"
+ "and z13.d, z8.d, z6.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ "and z3.d, z10.d, z6.d\n"
+ ".inst 0x448290cb // srshl z11.s, p4/M, z11.s, z6.s\n"
+ "sqadd z30.s, z30.s, z7.s\n"
+ "asr z18.s, z18.s, #0x1f\n"
+ "sqadd z4.s, z4.s, z15.s\n"
+ "asr z13.s, z13.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z24.s\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ ".inst 0x448292fe // srshl z30.s, p4/M, z30.s, z23.s\n"
+ "sqadd z16.s, z16.s, z18.s\n"
".inst 0x453040a5 // sqxtnb z5.h, z5.s\n"
- ".inst 0x45304708 // sqxtnt z8.h, z24.s\n"
- ".inst 0x45304412 // sqxtnt z18.h, z0.s\n"
- ".inst 0x4530442f // sqxtnt z15.h, z1.s\n"
- ".inst 0x453044c5 // sqxtnt z5.h, z6.s\n"
- "sqadd z8.h, z8.h, z19.h\n"
- "smax z8.h, p4/M, z8.h, z12.h\n"
- "smin z8.h, p4/M, z8.h, z9.h\n"
- "sqadd z18.h, z18.h, z19.h\n"
- "sqadd z15.h, z15.h, z19.h\n"
- "smax z18.h, p4/M, z18.h, z12.h\n"
- "smax z15.h, p4/M, z15.h, z12.h\n"
- "sqadd z5.h, z5.h, z19.h\n"
- "smax z5.h, p4/M, z5.h, z12.h\n"
- "smin z18.h, p4/M, z18.h, z9.h\n"
- "st1b { z8.h }, p0, [x16, x10]\n"
- "smin z15.h, p4/M, z15.h, z9.h\n"
+ ".inst 0x448292e4 // srshl z4.s, p4/M, z4.s, z23.s\n"
+ "sqadd z8.s, z8.s, z13.s\n"
+ ".inst 0x448292ff // srshl z31.s, p4/M, z31.s, z23.s\n"
+ "sqadd z10.s, z10.s, z3.s\n"
+ ".inst 0x453043de // sqxtnb z30.h, z30.s\n"
+ ".inst 0x448290d0 // srshl z16.s, p4/M, z16.s, z6.s\n"
+ ".inst 0x45304084 // sqxtnb z4.h, z4.s\n"
+ ".inst 0x45304565 // sqxtnt z5.h, z11.s\n"
+ ".inst 0x448290c8 // srshl z8.s, p4/M, z8.s, z6.s\n"
+ ".inst 0x448290ca // srshl z10.s, p4/M, z10.s, z6.s\n"
+ ".inst 0x453043ff // sqxtnb z31.h, z31.s\n"
+ ".inst 0x4530461e // sqxtnt z30.h, z16.s\n"
+ ".inst 0x45304504 // sqxtnt z4.h, z8.s\n"
+ ".inst 0x4530455f // sqxtnt z31.h, z10.s\n"
+ "sqadd z5.h, z5.h, z25.h\n"
+ "sqadd z30.h, z30.h, z25.h\n"
+ "sqadd z4.h, z4.h, z25.h\n"
+ "sqadd z31.h, z31.h, z25.h\n"
+ "smax z5.h, p4/M, z5.h, z14.h\n"
+ "smax z30.h, p4/M, z30.h, z14.h\n"
+ "smax z4.h, p4/M, z4.h, z14.h\n"
+ "smax z31.h, p4/M, z31.h, z14.h\n"
"smin z5.h, p4/M, z5.h, z9.h\n"
- "st1b { z18.h }, p0, [x15, x10]\n"
- "st1b { z15.h }, p0, [x14, x10]\n"
- "st1b { z5.h }, p0, [x13, x10]\n"
- "ld1sb { z25.h }, p4/Z, [x17]\n"
- "ld1sb { z30.h }, p4/Z, [x17, #1, MUL VL]\n"
- "inch x10\n"
- "ld1sb { z14.h }, p4/Z, [x17, #2, MUL VL]\n"
- "ld1sb { z4.h }, p4/Z, [x17, #3, MUL VL]\n"
- ".inst 0x454d1339 // ssublb z25.h, z25.b, z13.b\n"
- ".inst 0x454d13de // ssublb z30.h, z30.b, z13.b\n"
- "ld1sb { z10.h }, p4/Z, [x17, #4, MUL VL]\n"
- "ld1sb { z3.h }, p4/Z, [x17, #5, MUL VL]\n"
- ".inst 0x454d11ce // ssublb z14.h, z14.b, z13.b\n"
- ".inst 0x454d1084 // ssublb z4.h, z4.b, z13.b\n"
- "ld1sb { z23.h }, p4/Z, [x17, #6, MUL VL]\n"
- "ld1sb { z7.h }, p4/Z, [x17, #7, MUL VL]\n"
- "inch x17, ALL, MUL #8\n"
- ".inst 0x454d114a // ssublb z10.h, z10.b, z13.b\n"
- "ld1w { z17.s }, p2/Z, [x20]\n"
- "ld1w { z16.s }, p1/Z, [x20, #1, MUL VL]\n"
- "uzp1 z8.s, z17.s, z16.s\n"
- "uzp2 z24.s, z17.s, z16.s\n"
- "ld1sb { z2.h }, p4/Z, [x17]\n"
- "ldp x27, x26, [x11, #0x0]\n"
+ "smin z30.h, p4/M, z30.h, z9.h\n"
+ "smin z4.h, p4/M, z4.h, z9.h\n"
+ "smin z31.h, p4/M, z31.h, z9.h\n"
+ "st1b { z5.h }, p0, [x11, x14]\n"
+ "st1b { z30.h }, p0, [x10, x14]\n"
+ "st1b { z4.h }, p0, [x9, x14]\n"
+ "st1b { z31.h }, p0, [x28, x14]\n"
+ "inch x14\n"
+ "ld1sb { z28.h }, p4/Z, [x16]\n"
+ "ld1sb { z20.h }, p4/Z, [x16, #1, MUL VL]\n"
+ "ld1sb { z13.h }, p4/Z, [x16, #2, MUL VL]\n"
+ "ld1sb { z18.h }, p4/Z, [x16, #3, MUL VL]\n"
+ "ld1sb { z6.h }, p4/Z, [x16, #4, MUL VL]\n"
+ "ld1sb { z2.h }, p4/Z, [x16, #5, MUL VL]\n"
+ "ld1sb { z26.h }, p4/Z, [x16, #6, MUL VL]\n"
+ "ld1sb { z21.h }, p4/Z, [x16, #7, MUL VL]\n"
+ "inch x16, ALL, MUL #8\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ "ld1w { z10.s }, p2/Z, [x20]\n"
+ "ld1w { z1.s }, p1/Z, [x20, #1, MUL VL]\n"
"addvl x20, x20, #2\n"
+ ".inst 0x454c1294 // ssublb z20.h, z20.b, z12.b\n"
+ ".inst 0x454c11ad // ssublb z13.h, z13.b, z12.b\n"
+ ".inst 0x454c1252 // ssublb z18.h, z18.b, z12.b\n"
+ "ld1sb { z15.h }, p4/Z, [x16]\n"
+ "ldp x27, x26, [x15, #0x0]\n"
+ ".inst 0x454c10c6 // ssublb z6.h, z6.b, z12.b\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ "uzp1 z5.s, z10.s, z1.s\n"
+ "uzp2 z11.s, z10.s, z1.s\n"
"str x20, [%x[params], %[offsetof_Params_bias]]\n"
- "ldp x25, x24, [x11, #0x10]\n"
- "ldp x23, x22, [x11, #0x20]\n"
- "mov z18.d, z8.d\n"
- "mov z0.d, z24.d\n"
- "ldp x21, x20, [x11, #0x30]\n"
- "ld1b { z21.h }, p3/Z, [x27, x7]\n"
- "mov z15.d, z8.d\n"
- "mov z1.d, z24.d\n"
- "ld1b { z22.h }, p3/Z, [x26, x7]\n"
- "ld1b { z11.h }, p3/Z, [x25, x7]\n"
- "mov z5.d, z8.d\n"
- "mov z6.d, z24.d\n"
- "ld1b { z20.h }, p3/Z, [x24, x7]\n"
- "ld1b { z27.h }, p3/Z, [x23, x7]\n"
- ".inst 0x454d1063 // ssublb z3.h, z3.b, z13.b\n"
- ".inst 0x454d12f7 // ssublb z23.h, z23.b, z13.b\n"
- "ld1b { z28.h }, p3/Z, [x22, x7]\n"
- "ld1b { z16.h }, p3/Z, [x21, x7]\n"
- ".inst 0x454d10e7 // ssublb z7.h, z7.b, z13.b\n"
- ".inst 0x454d1042 // ssublb z2.h, z2.b, z13.b\n"
- "ld1b { z31.h }, p3/Z, [x20, x7]\n"
- ".inst 0x455a1ab5 // usublb z21.h, z21.b, z26.b\n"
- ".inst 0x455a1ad6 // usublb z22.h, z22.b, z26.b\n"
- ".inst 0x455a196b // usublb z11.h, z11.b, z26.b\n"
- ".inst 0x455a1a94 // usublb z20.h, z20.b, z26.b\n"
- ".inst 0x455a1b7b // usublb z27.h, z27.b, z26.b\n"
- ".inst 0x455a1b9c // usublb z28.h, z28.b, z26.b\n"
- ".inst 0x455a1a10 // usublb z16.h, z16.b, z26.b\n"
- ".inst 0x455a1bff // usublb z31.h, z31.b, z26.b\n"
+ "ldp x25, x24, [x15, #0x10]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
+ ".inst 0x454c12b5 // ssublb z21.h, z21.b, z12.b\n"
+ ".inst 0x454c11ef // ssublb z15.h, z15.b, z12.b\n"
+ "ldp x23, x22, [x15, #0x20]\n"
+ "mov z30.d, z5.d\n"
+ "mov z16.d, z11.d\n"
+ "mov z4.d, z5.d\n"
+ "mov z8.d, z11.d\n"
+ "mov z31.d, z5.d\n"
+ "ldp x21, x20, [x15, #0x30]\n"
+ "mov z10.d, z11.d\n"
+ "ld1b { z3.h }, p3/Z, [x27, x8]\n"
+ "ld1b { z29.h }, p3/Z, [x26, x8]\n"
+ "ld1b { z23.h }, p3/Z, [x25, x8]\n"
+ "ld1b { z0.h }, p3/Z, [x24, x8]\n"
+ "ld1b { z24.h }, p3/Z, [x23, x8]\n"
+ "ld1b { z22.h }, p3/Z, [x22, x8]\n"
+ "ld1b { z27.h }, p3/Z, [x21, x8]\n"
+ "ld1b { z19.h }, p3/Z, [x20, x8]\n"
+ ".inst 0x45511863 // usublb z3.h, z3.b, z17.b\n"
+ ".inst 0x45511bbd // usublb z29.h, z29.b, z17.b\n"
+ ".inst 0x45511af7 // usublb z23.h, z23.b, z17.b\n"
+ ".inst 0x45511800 // usublb z0.h, z0.b, z17.b\n"
+ ".inst 0x45511b18 // usublb z24.h, z24.b, z17.b\n"
+ ".inst 0x45511ad6 // usublb z22.h, z22.b, z17.b\n"
+ ".inst 0x45511b7b // usublb z27.h, z27.b, z17.b\n"
+ ".inst 0x45511a73 // usublb z19.h, z19.b, z17.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
index f24a258484..c7c4c86b20 100644
--- a/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/depthwise/kernels/sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,7 +45,7 @@ void sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
{
struct Params
{
- long unsigned int n_channels;
+ uint64_t n_channels;
const void *weights;
const int32_t *bias;
const arm_gemm::Requantize32 *requant;
@@ -55,7 +55,7 @@ void sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
const uint8_t *inptrs[36];
Params(
- long unsigned int n_channels,
+ uint64_t n_channels,
const uint8_t *const *inptrs_raw,
const void *const weights,
const int32_t *const bias,
@@ -112,533 +112,533 @@ void sve_u8s8u8q_nhwc_5x5_s1_output2x2_mla_depthfirst_impl(
__asm__ __volatile__(
"mov x2, #0x0\n"
- "mov x24, x2\n"
- "ldr x23, [%x[params], %[offsetof_Params_requant]]\n"
- "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x27, [%x[params], %[offsetof_Params_requant]]\n"
"ptrue p4.b\n"
- "ldr x22, [%x[params], %[offsetof_Params_outptrs]]\n"
- "incw x24\n"
+ "ldr x3, [%x[params], %[offsetof_Params_n_channels]]\n"
+ "ldr x26, [%x[params], %[offsetof_Params_outptrs]]\n"
"ldr x4, [%x[params], %[offsetof_Params_weights]]\n"
- "add x21, x23, %[offsetof_Requantize32_a_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_b_offset]\n"
- "ld1rb { z30.b }, p4/Z, [x21]\n"
- "ld1rb { z10.b }, p4/Z, [x20]\n"
- "add x21, x23, %[offsetof_Requantize32_c_offset]\n"
- "add x20, x23, %[offsetof_Requantize32_minval]\n"
+ "add x5, %x[params], %[offsetof_Params_inptrs]\n"
+ "mov x6, #0x0\n"
+ "ldr x25, [%x[params], %[offsetof_Params_bias]]\n"
+ "ldr x7, [%x[params], %[offsetof_Params_requant_muls]]\n"
+ "mov x24, x2\n"
+ "add x20, x27, %[offsetof_Requantize32_a_offset]\n"
+ "add x23, x27, %[offsetof_Requantize32_b_offset]\n"
+ "add x22, x27, %[offsetof_Requantize32_c_offset]\n"
+ "ld1rb { z14.b }, p4/Z, [x20]\n"
+ "ldr x8, [%x[params], %[offsetof_Params_requant_shifts]]\n"
+ "add x21, x27, %[offsetof_Requantize32_minval]\n"
+ "add x20, x27, %[offsetof_Requantize32_maxval]\n"
+ "ld1rb { z12.b }, p4/Z, [x23]\n"
+ "ld1rh { z10.h }, p4/Z, [x22]\n"
+ "incw x24\n"
"ld1rh { z15.h }, p4/Z, [x21]\n"
- "ld1rh { z12.h }, p4/Z, [x20]\n"
- "add x20, x23, %[offsetof_Requantize32_maxval]\n"
"ld1rh { z13.h }, p4/Z, [x20]\n"
- "ldp x5, x6, [x22, #0x0]\n"
"whilelt p3.h, x2, x3\n"
- "ldp x7, x8, [x22, #0x10]\n"
+ "ldp x17, x16, [x26, #0x0]\n"
+ "ldp x15, x14, [x26, #0x10]\n"
"whilelt p2.s, x2, x3\n"
"whilelt p1.s, x24, x3\n"
- "ldr x10, [%x[params], %[offsetof_Params_bias]]\n"
- "add x17, %x[params], %[offsetof_Params_inptrs]\n"
- "ld1w { z17.s }, p2/Z, [x10]\n"
- "ld1w { z16.s }, p1/Z, [x10, #1, MUL VL]\n"
- "uzp1 z14.s, z17.s, z16.s\n"
- "ld1sb { z26.h }, p4/Z, [x4]\n"
- "ld1sb { z8.h }, p4/Z, [x4, #1, MUL VL]\n"
- "uzp2 z23.s, z17.s, z16.s\n"
- "addvl x10, x10, #2\n"
- "ld1sb { z16.h }, p4/Z, [x4, #2, MUL VL]\n"
- "ld1sb { z21.h }, p4/Z, [x4, #3, MUL VL]\n"
- "mov x16, #0x0\n"
- "mov z6.d, z14.d\n"
- "ld1sb { z17.h }, p4/Z, [x4, #4, MUL VL]\n"
- "ldp x9, x28, [x17, #0x0]\n"
- "mov z18.d, z23.d\n"
- "mov z9.d, z14.d\n"
- "ldp x27, x26, [x17, #0x10]\n"
- "ldp x25, x24, [x17, #0x20]\n"
- "mov z20.d, z23.d\n"
- "mov z7.d, z14.d\n"
- "ldp x23, x22, [x17, #0x30]\n"
- "ldp x21, x20, [x17, #0x40]\n"
- "mov z1.d, z23.d\n"
- ".inst 0x454a135a // ssublb z26.h, z26.b, z10.b\n"
- "ld1b { z22.h }, p3/Z, [x9, x2]\n"
- "ld1b { z2.h }, p3/Z, [x28, x2]\n"
- ".inst 0x454a1108 // ssublb z8.h, z8.b, z10.b\n"
- ".inst 0x454a1210 // ssublb z16.h, z16.b, z10.b\n"
- "ld1b { z11.h }, p3/Z, [x27, x2]\n"
- "ld1b { z3.h }, p3/Z, [x26, x2]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x454a1231 // ssublb z17.h, z17.b, z10.b\n"
- "ld1b { z29.h }, p3/Z, [x25, x2]\n"
- "ld1b { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x455e1ad6 // usublb z22.h, z22.b, z30.b\n"
- ".inst 0x455e1842 // usublb z2.h, z2.b, z30.b\n"
- "ld1b { z31.h }, p3/Z, [x23, x2]\n"
- "ld1b { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e196b // usublb z11.h, z11.b, z30.b\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- "ld1b { z19.h }, p3/Z, [x21, x2]\n"
- "ld1b { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1bbd // usublb z29.h, z29.b, z30.b\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- "ldr x15, [%x[params], %[offsetof_Params_requant_muls]]\n"
- "ldr x14, [%x[params], %[offsetof_Params_requant_shifts]]\n"
- "str x10, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x455e1a73 // usublb z19.h, z19.b, z30.b\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
+ "ld1w { z5.s }, p2/Z, [x25]\n"
+ "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ "ld1sb { z25.h }, p4/Z, [x4]\n"
+ "ld1sb { z28.h }, p4/Z, [x4, #1, MUL VL]\n"
+ "ld1sb { z4.h }, p4/Z, [x4, #2, MUL VL]\n"
+ "ld1sb { z23.h }, p4/Z, [x4, #3, MUL VL]\n"
+ "ld1sb { z31.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldp x9, x28, [x5, #0x0]\n"
+ "uzp1 z6.s, z5.s, z16.s\n"
+ "uzp2 z30.s, z5.s, z16.s\n"
+ "str x25, [%x[params], %[offsetof_Params_bias]]\n"
+ ".inst 0x454c1339 // ssublb z25.h, z25.b, z12.b\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x454c12f7 // ssublb z23.h, z23.b, z12.b\n"
+ "ldp x27, x26, [x5, #0x10]\n"
+ "mov z17.d, z6.d\n"
+ "mov z8.d, z30.d\n"
+ "mov z21.d, z6.d\n"
+ "mov z27.d, z30.d\n"
+ "ldp x25, x24, [x5, #0x20]\n"
+ "mov z7.d, z6.d\n"
+ "mov z9.d, z30.d\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ldp x23, x22, [x5, #0x30]\n"
+ "ldp x21, x20, [x5, #0x40]\n"
+ "ld1b { z26.h }, p3/Z, [x9, x2]\n"
+ "ld1b { z16.h }, p3/Z, [x28, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z5.h }, p3/Z, [x26, x2]\n"
+ "ld1b { z18.h }, p3/Z, [x25, x2]\n"
+ "ld1b { z3.h }, p3/Z, [x24, x2]\n"
+ "ld1b { z19.h }, p3/Z, [x23, x2]\n"
+ "ld1b { z11.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454e1b5a // usublb z26.h, z26.b, z14.b\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ "ld1b { z20.h }, p3/Z, [x21, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x454e18a5 // usublb z5.h, z5.b, z14.b\n"
+ ".inst 0x454e1a52 // usublb z18.h, z18.b, z14.b\n"
+ ".inst 0x454e1863 // usublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ ".inst 0x454e196b // usublb z11.h, z11.b, z14.b\n"
+ ".inst 0x454e1a94 // usublb z20.h, z20.b, z14.b\n"
+ ".inst 0x454e1bbd // usublb z29.h, z29.b, z14.b\n"
"1:" // Loop
- ".inst 0x449a42ce // smlalb z14.s, p4/M, z22.h, z26.h\n"
- ".inst 0x449a46d7 // smlalt z23.s, p4/M, z22.h, z26.h\n"
- "ldr x20, [x17, #0x50]\n"
- "ld1b { z27.h }, p3/Z, [x20, x2]\n"
- ".inst 0x4488404e // smlalb z14.s, p4/M, z2.h, z8.h\n"
- ".inst 0x449a4046 // smlalb z6.s, p4/M, z2.h, z26.h\n"
- "ldr x20, [x17, #0x58]\n"
- ".inst 0x455e1b7b // usublb z27.h, z27.b, z30.b\n"
- ".inst 0x449a4169 // smlalb z9.s, p4/M, z11.h, z26.h\n"
- ".inst 0x449a4067 // smlalb z7.s, p4/M, z3.h, z26.h\n"
- "ld1b { z5.h }, p3/Z, [x20, x2]\n"
- "ldr x20, [x17, #0x60]\n"
- ".inst 0x44884457 // smlalt z23.s, p4/M, z2.h, z8.h\n"
- ".inst 0x449043ae // smlalb z14.s, p4/M, z29.h, z16.h\n"
- "ld1sb { z25.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x455e18a5 // usublb z5.h, z5.b, z30.b\n"
- ".inst 0x449a4452 // smlalt z18.s, p4/M, z2.h, z26.h\n"
- ".inst 0x449a4574 // smlalt z20.s, p4/M, z11.h, z26.h\n"
- "ld1b { z22.h }, p3/Z, [x20, x2]\n"
- ".inst 0x454a1339 // ssublb z25.h, z25.b, z10.b\n"
- ".inst 0x449a4461 // smlalt z1.s, p4/M, z3.h, z26.h\n"
- ".inst 0x448843a6 // smlalb z6.s, p4/M, z29.h, z8.h\n"
- "ldr x20, [x17, #0x68]\n"
+ ".inst 0x44994346 // smlalb z6.s, p4/M, z26.h, z25.h\n"
+ ".inst 0x4499475e // smlalt z30.s, p4/M, z26.h, z25.h\n"
+ "ldr x23, [x5, #0x50]\n"
+ "ldr x22, [x5, #0x58]\n"
+ ".inst 0x44994211 // smlalb z17.s, p4/M, z16.h, z25.h\n"
+ ".inst 0x44994315 // smlalb z21.s, p4/M, z24.h, z25.h\n"
+ "ldr x21, [x5, #0x60]\n"
+ "ld1sb { z0.h }, p4/Z, [x4, #5, MUL VL]\n"
+ ".inst 0x449940a7 // smlalb z7.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994608 // smlalt z8.s, p4/M, z16.h, z25.h\n"
+ "ldr x20, [x5, #0x68]\n"
+ "ld1sb { z26.h }, p4/Z, [x4, #6, MUL VL]\n"
+ "ld1b { z2.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x4499471b // smlalt z27.s, p4/M, z24.h, z25.h\n"
+ ".inst 0x449944a9 // smlalt z9.s, p4/M, z5.h, z25.h\n"
+ "ld1b { z22.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x449c4206 // smlalb z6.s, p4/M, z16.h, z28.h\n"
+ ".inst 0x449c461e // smlalt z30.s, p4/M, z16.h, z28.h\n"
+ "ld1b { z1.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ ".inst 0x449c4251 // smlalb z17.s, p4/M, z18.h, z28.h\n"
+ ".inst 0x449c40b5 // smlalb z21.s, p4/M, z5.h, z28.h\n"
+ "ld1b { z16.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
+ ".inst 0x449c4067 // smlalb z7.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x454e1842 // usublb z2.h, z2.b, z14.b\n"
+ ".inst 0x449c4648 // smlalt z8.s, p4/M, z18.h, z28.h\n"
+ "ldr x20, [x5, #0x70]\n"
+ ".inst 0x449c44bb // smlalt z27.s, p4/M, z5.h, z28.h\n"
+ ".inst 0x449c4469 // smlalt z9.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x454e1ad6 // usublb z22.h, z22.b, z14.b\n"
+ "ld1sb { z28.h }, p4/Z, [x4, #7, MUL VL]\n"
+ ".inst 0x44844246 // smlalb z6.s, p4/M, z18.h, z4.h\n"
+ ".inst 0x4484465e // smlalt z30.s, p4/M, z18.h, z4.h\n"
+ ".inst 0x454e1821 // usublb z1.h, z1.b, z14.b\n"
+ "inch x4, ALL, MUL #8\n"
+ ".inst 0x44844271 // smlalb z17.s, p4/M, z19.h, z4.h\n"
+ ".inst 0x44844075 // smlalb z21.s, p4/M, z3.h, z4.h\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ "ld1b { z25.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x44844047 // smlalb z7.s, p4/M, z2.h, z4.h\n"
+ ".inst 0x44844668 // smlalt z8.s, p4/M, z19.h, z4.h\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ "ldr x20, [x5, #0x78]\n"
+ ".inst 0x4484447b // smlalt z27.s, p4/M, z3.h, z4.h\n"
+ ".inst 0x44844449 // smlalt z9.s, p4/M, z2.h, z4.h\n"
+ "ld1sb { z18.h }, p4/Z, [x4]\n"
+ "ldr x22, [x5, #0x80]\n"
+ ".inst 0x44974266 // smlalb z6.s, p4/M, z19.h, z23.h\n"
+ ".inst 0x4497467e // smlalt z30.s, p4/M, z19.h, z23.h\n"
+ ".inst 0x454e1b39 // usublb z25.h, z25.b, z14.b\n"
+ "ld1sb { z4.h }, p4/Z, [x4, #1, MUL VL]\n"
+ ".inst 0x44974171 // smlalb z17.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x44974055 // smlalb z21.s, p4/M, z2.h, z23.h\n"
+ "ld1b { z19.h }, p3/Z, [x20, x2]\n"
+ "ldr x21, [x5, #0x88]\n"
+ ".inst 0x449742c7 // smlalb z7.s, p4/M, z22.h, z23.h\n"
+ ".inst 0x44974568 // smlalt z8.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x454c1252 // ssublb z18.h, z18.b, z12.b\n"
+ "ldr x20, [x5, #0x90]\n"
+ ".inst 0x4497445b // smlalt z27.s, p4/M, z2.h, z23.h\n"
+ ".inst 0x449746c9 // smlalt z9.s, p4/M, z22.h, z23.h\n"
+ "ld1b { z23.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x449f4166 // smlalb z6.s, p4/M, z11.h, z31.h\n"
+ ".inst 0x449f457e // smlalt z30.s, p4/M, z11.h, z31.h\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ "ld1sb { z11.h }, p4/Z, [x4, #2, MUL VL]\n"
+ ".inst 0x449f4031 // smlalb z17.s, p4/M, z1.h, z31.h\n"
+ ".inst 0x449f42d5 // smlalb z21.s, p4/M, z22.h, z31.h\n"
+ "ldr x23, [x5, #0x98]\n"
+ "ldr x22, [x5, #0xa0]\n"
+ ".inst 0x449f4287 // smlalb z7.s, p4/M, z20.h, z31.h\n"
+ ".inst 0x449f4428 // smlalt z8.s, p4/M, z1.h, z31.h\n"
+ ".inst 0x454e1af7 // usublb z23.h, z23.b, z14.b\n"
+ "ld1b { z1.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x449f46db // smlalt z27.s, p4/M, z22.h, z31.h\n"
+ ".inst 0x449f4689 // smlalt z9.s, p4/M, z20.h, z31.h\n"
+ ".inst 0x454c116b // ssublb z11.h, z11.b, z12.b\n"
+ "ld1sb { z31.h }, p4/Z, [x4, #3, MUL VL]\n"
+ ".inst 0x44804306 // smlalb z6.s, p4/M, z24.h, z0.h\n"
+ ".inst 0x4480471e // smlalt z30.s, p4/M, z24.h, z0.h\n"
+ "ld1b { z24.h }, p3/Z, [x20, x2]\n"
+ "ldr x20, [x5, #0xa8]\n"
+ ".inst 0x448040b1 // smlalb z17.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x448043b5 // smlalb z21.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x454e1821 // usublb z1.h, z1.b, z14.b\n"
+ "ldr x21, [x5, #0xb0]\n"
+ ".inst 0x44804207 // smlalb z7.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x448044a8 // smlalt z8.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ldr x13, [x5, #0xb8]\n"
+ ".inst 0x448047bb // smlalt z27.s, p4/M, z29.h, z0.h\n"
+ ".inst 0x44804609 // smlalt z9.s, p4/M, z16.h, z0.h\n"
+ "ld1b { z0.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x449a40a6 // smlalb z6.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a44be // smlalt z30.s, p4/M, z5.h, z26.h\n"
+ "ld1sb { z5.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldr x12, [x5, #0xc0]\n"
+ ".inst 0x449a4071 // smlalb z17.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a4215 // smlalb z21.s, p4/M, z16.h, z26.h\n"
+ "ldr x11, [x5, #0xc8]\n"
+ "ldr x10, [x5, #0xd0]\n"
+ ".inst 0x449a4327 // smlalb z7.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449a4468 // smlalt z8.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x454e1800 // usublb z0.h, z0.b, z14.b\n"
+ "ldr x9, [x5, #0xd8]\n"
+ ".inst 0x449a461b // smlalt z27.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4729 // smlalt z9.s, p4/M, z25.h, z26.h\n"
+ "ld1b { z26.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c10a5 // ssublb z5.h, z5.b, z12.b\n"
+ ".inst 0x449c4066 // smlalb z6.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c447e // smlalt z30.s, p4/M, z3.h, z28.h\n"
+ "ld1sb { z3.h }, p4/Z, [x4, #5, MUL VL]\n"
+ "ldr x28, [x5, #0xe0]\n"
+ ".inst 0x449c4051 // smlalb z17.s, p4/M, z2.h, z28.h\n"
+ ".inst 0x449c4335 // smlalb z21.s, p4/M, z25.h, z28.h\n"
+ "ldr x27, [x5, #0xe8]\n"
+ "ldr x26, [x5, #0xf0]\n"
+ ".inst 0x449c4267 // smlalb z7.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x449c4448 // smlalt z8.s, p4/M, z2.h, z28.h\n"
+ ".inst 0x454e1b5a // usublb z26.h, z26.b, z14.b\n"
+ "ldr x25, [x5, #0xf8]\n"
+ ".inst 0x449c473b // smlalt z27.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4669 // smlalt z9.s, p4/M, z19.h, z28.h\n"
+ "ld1b { z28.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454c1063 // ssublb z3.h, z3.b, z12.b\n"
+ ".inst 0x44924046 // smlalb z6.s, p4/M, z2.h, z18.h\n"
+ ".inst 0x4492445e // smlalt z30.s, p4/M, z2.h, z18.h\n"
"ld1sb { z2.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x44884069 // smlalb z9.s, p4/M, z3.h, z8.h\n"
- ".inst 0x44884087 // smlalb z7.s, p4/M, z4.h, z8.h\n"
- ".inst 0x455e1ad6 // usublb z22.h, z22.b, z30.b\n"
- "ld1b { z26.h }, p3/Z, [x20, x2]\n"
- ".inst 0x449047b7 // smlalt z23.s, p4/M, z29.h, z16.h\n"
- ".inst 0x449543ee // smlalb z14.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454a1042 // ssublb z2.h, z2.b, z10.b\n"
- "ldr x20, [x17, #0x70]\n"
- ".inst 0x448847b2 // smlalt z18.s, p4/M, z29.h, z8.h\n"
- ".inst 0x44884474 // smlalt z20.s, p4/M, z3.h, z8.h\n"
- "ld1sb { z29.h }, p4/Z, [x4, #7, MUL VL]\n"
- ".inst 0x455e1b5a // usublb z26.h, z26.b, z30.b\n"
- ".inst 0x44884481 // smlalt z1.s, p4/M, z4.h, z8.h\n"
- ".inst 0x449043e6 // smlalb z6.s, p4/M, z31.h, z16.h\n"
+ "ldr x24, [x5, #0x100]\n"
+ ".inst 0x449242d1 // smlalb z17.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x44924275 // smlalb z21.s, p4/M, z19.h, z18.h\n"
+ "ldr x23, [x5, #0x108]\n"
+ "ldr x22, [x5, #0x110]\n"
+ ".inst 0x449242e7 // smlalb z7.s, p4/M, z23.h, z18.h\n"
+ ".inst 0x449246c8 // smlalt z8.s, p4/M, z22.h, z18.h\n"
+ ".inst 0x454e1b9c // usublb z28.h, z28.b, z14.b\n"
+ "ldr x20, [x5, #0x118]\n"
+ ".inst 0x4492467b // smlalt z27.s, p4/M, z19.h, z18.h\n"
+ ".inst 0x449246e9 // smlalt z9.s, p4/M, z23.h, z18.h\n"
+ "ld1b { z18.h }, p3/Z, [x21, x2]\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ ".inst 0x448442c6 // smlalb z6.s, p4/M, z22.h, z4.h\n"
+ ".inst 0x448446de // smlalt z30.s, p4/M, z22.h, z4.h\n"
+ "ld1sb { z22.h }, p4/Z, [x4, #7, MUL VL]\n"
"inch x4, ALL, MUL #8\n"
- "ld1b { z8.h }, p3/Z, [x20, x2]\n"
- ".inst 0x44904089 // smlalb z9.s, p4/M, z4.h, z16.h\n"
- ".inst 0x44904367 // smlalb z7.s, p4/M, z27.h, z16.h\n"
- ".inst 0x454a13bd // ssublb z29.h, z29.b, z10.b\n"
- "ldr x20, [x17, #0x78]\n"
- ".inst 0x449547f7 // smlalt z23.s, p4/M, z31.h, z21.h\n"
- ".inst 0x4491400e // smlalb z14.s, p4/M, z0.h, z17.h\n"
- "ld1sb { z24.h }, p4/Z, [x4]\n"
- ".inst 0x455e1908 // usublb z8.h, z8.b, z30.b\n"
- ".inst 0x449047f2 // smlalt z18.s, p4/M, z31.h, z16.h\n"
- ".inst 0x44904494 // smlalt z20.s, p4/M, z4.h, z16.h\n"
- "ld1b { z31.h }, p3/Z, [x20, x2]\n"
- ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
- ".inst 0x44904761 // smlalt z1.s, p4/M, z27.h, z16.h\n"
- ".inst 0x44954006 // smlalb z6.s, p4/M, z0.h, z21.h\n"
- "ldr x22, [x17, #0x80]\n"
+ ".inst 0x44844291 // smlalb z17.s, p4/M, z20.h, z4.h\n"
+ ".inst 0x448442f5 // smlalb z21.s, p4/M, z23.h, z4.h\n"
+ "whilelt p0.h, x6, x3\n"
+ "ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
+ ".inst 0x44844027 // smlalb z7.s, p4/M, z1.h, z4.h\n"
+ ".inst 0x44844688 // smlalt z8.s, p4/M, z20.h, z4.h\n"
+ ".inst 0x454e1a52 // usublb z18.h, z18.b, z14.b\n"
+ "ld1b { z20.h }, p3/Z, [x13, x2]\n"
+ ".inst 0x448446fb // smlalt z27.s, p4/M, z23.h, z4.h\n"
+ ".inst 0x44844429 // smlalt z9.s, p4/M, z1.h, z4.h\n"
+ ".inst 0x454c12d6 // ssublb z22.h, z22.b, z12.b\n"
+ "ld1sb { z4.h }, p4/Z, [x4]\n"
+ ".inst 0x448b43a6 // smlalb z6.s, p4/M, z29.h, z11.h\n"
+ ".inst 0x448b47be // smlalt z30.s, p4/M, z29.h, z11.h\n"
+ "ld1b { z29.h }, p3/Z, [x12, x2]\n"
+ ".inst 0x448b4211 // smlalb z17.s, p4/M, z16.h, z11.h\n"
+ ".inst 0x448b4315 // smlalb z21.s, p4/M, z24.h, z11.h\n"
+ ".inst 0x454e1a94 // usublb z20.h, z20.b, z14.b\n"
+ ".inst 0x448b4007 // smlalb z7.s, p4/M, z0.h, z11.h\n"
+ ".inst 0x448b4608 // smlalt z8.s, p4/M, z16.h, z11.h\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x448b471b // smlalt z27.s, p4/M, z24.h, z11.h\n"
+ ".inst 0x448b4409 // smlalt z9.s, p4/M, z0.h, z11.h\n"
+ "ld1b { z11.h }, p3/Z, [x11, x2]\n"
+ ".inst 0x454e1bbd // usublb z29.h, z29.b, z14.b\n"
+ ".inst 0x449f4206 // smlalb z6.s, p4/M, z16.h, z31.h\n"
+ ".inst 0x449f461e // smlalt z30.s, p4/M, z16.h, z31.h\n"
"ld1sb { z16.h }, p4/Z, [x4, #1, MUL VL]\n"
- ".inst 0x44954369 // smlalb z9.s, p4/M, z27.h, z21.h\n"
- ".inst 0x449540a7 // smlalb z7.s, p4/M, z5.h, z21.h\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- "ldr x21, [x17, #0x88]\n"
- ".inst 0x44914417 // smlalt z23.s, p4/M, z0.h, z17.h\n"
- ".inst 0x4499416e // smlalb z14.s, p4/M, z11.h, z25.h\n"
- ".inst 0x454a1210 // ssublb z16.h, z16.b, z10.b\n"
- "ldr x20, [x17, #0x90]\n"
- ".inst 0x44954412 // smlalt z18.s, p4/M, z0.h, z21.h\n"
- ".inst 0x44954774 // smlalt z20.s, p4/M, z27.h, z21.h\n"
- "ld1b { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x449544a1 // smlalt z1.s, p4/M, z5.h, z21.h\n"
- ".inst 0x449142c6 // smlalb z6.s, p4/M, z22.h, z17.h\n"
- "ld1sb { z21.h }, p4/Z, [x4, #2, MUL VL]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x449140a9 // smlalb z9.s, p4/M, z5.h, z17.h\n"
- ".inst 0x44914267 // smlalb z7.s, p4/M, z19.h, z17.h\n"
- "ldr x23, [x17, #0x98]\n"
- "ldr x22, [x17, #0xa0]\n"
- ".inst 0x44994577 // smlalt z23.s, p4/M, z11.h, z25.h\n"
- ".inst 0x4482406e // smlalb z14.s, p4/M, z3.h, z2.h\n"
- "ld1b { z11.h }, p3/Z, [x21, x2]\n"
- ".inst 0x455e196b // usublb z11.h, z11.b, z30.b\n"
- ".inst 0x449146d2 // smlalt z18.s, p4/M, z22.h, z17.h\n"
- ".inst 0x449144b4 // smlalt z20.s, p4/M, z5.h, z17.h\n"
- "ld1sb { z22.h }, p4/Z, [x4, #3, MUL VL]\n"
- ".inst 0x454a12d6 // ssublb z22.h, z22.b, z10.b\n"
- ".inst 0x44914661 // smlalt z1.s, p4/M, z19.h, z17.h\n"
- ".inst 0x44994066 // smlalb z6.s, p4/M, z3.h, z25.h\n"
- "ld1b { z17.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1a31 // usublb z17.h, z17.b, z30.b\n"
- ".inst 0x44994389 // smlalb z9.s, p4/M, z28.h, z25.h\n"
- ".inst 0x44994347 // smlalb z7.s, p4/M, z26.h, z25.h\n"
- "ldr x20, [x17, #0xa8]\n"
- "ldr x21, [x17, #0xb0]\n"
- ".inst 0x44824477 // smlalt z23.s, p4/M, z3.h, z2.h\n"
- ".inst 0x449d408e // smlalb z14.s, p4/M, z4.h, z29.h\n"
- "ldr x13, [x17, #0xb8]\n"
- "ldr x12, [x17, #0xc0]\n"
- ".inst 0x44994472 // smlalt z18.s, p4/M, z3.h, z25.h\n"
- ".inst 0x44994794 // smlalt z20.s, p4/M, z28.h, z25.h\n"
- "ld1b { z3.h }, p3/Z, [x23, x2]\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- ".inst 0x44994741 // smlalt z1.s, p4/M, z26.h, z25.h\n"
- ".inst 0x44824086 // smlalb z6.s, p4/M, z4.h, z2.h\n"
- "ld1sb { z25.h }, p4/Z, [x4, #4, MUL VL]\n"
- ".inst 0x454a1339 // ssublb z25.h, z25.b, z10.b\n"
- ".inst 0x44824349 // smlalb z9.s, p4/M, z26.h, z2.h\n"
- ".inst 0x44824107 // smlalb z7.s, p4/M, z8.h, z2.h\n"
- "ldr x11, [x17, #0xc8]\n"
- "ldr x10, [x17, #0xd0]\n"
- ".inst 0x449d4497 // smlalt z23.s, p4/M, z4.h, z29.h\n"
- ".inst 0x4498436e // smlalb z14.s, p4/M, z27.h, z24.h\n"
- "ldr x9, [x17, #0xd8]\n"
- "ldr x28, [x17, #0xe0]\n"
- ".inst 0x44824492 // smlalt z18.s, p4/M, z4.h, z2.h\n"
- ".inst 0x44824754 // smlalt z20.s, p4/M, z26.h, z2.h\n"
- "ld1b { z4.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- ".inst 0x44824501 // smlalt z1.s, p4/M, z8.h, z2.h\n"
- ".inst 0x449d4366 // smlalb z6.s, p4/M, z27.h, z29.h\n"
+ ".inst 0x449f4331 // smlalb z17.s, p4/M, z25.h, z31.h\n"
+ ".inst 0x449f4015 // smlalb z21.s, p4/M, z0.h, z31.h\n"
+ ".inst 0x449f4347 // smlalb z7.s, p4/M, z26.h, z31.h\n"
+ ".inst 0x449f4728 // smlalt z8.s, p4/M, z25.h, z31.h\n"
+ ".inst 0x454e196b // usublb z11.h, z11.b, z14.b\n"
+ ".inst 0x449f441b // smlalt z27.s, p4/M, z0.h, z31.h\n"
+ ".inst 0x449f4749 // smlalt z9.s, p4/M, z26.h, z31.h\n"
+ "ld1b { z31.h }, p3/Z, [x10, x2]\n"
+ ".inst 0x454c1210 // ssublb z16.h, z16.b, z12.b\n"
+ ".inst 0x44854326 // smlalb z6.s, p4/M, z25.h, z5.h\n"
+ ".inst 0x4485473e // smlalt z30.s, p4/M, z25.h, z5.h\n"
+ "ld1sb { z25.h }, p4/Z, [x4, #2, MUL VL]\n"
+ ".inst 0x44854271 // smlalb z17.s, p4/M, z19.h, z5.h\n"
+ ".inst 0x44854355 // smlalb z21.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854387 // smlalb z7.s, p4/M, z28.h, z5.h\n"
+ ".inst 0x44854668 // smlalt z8.s, p4/M, z19.h, z5.h\n"
+ ".inst 0x454e1bff // usublb z31.h, z31.b, z14.b\n"
+ ".inst 0x4485475b // smlalt z27.s, p4/M, z26.h, z5.h\n"
+ ".inst 0x44854789 // smlalt z9.s, p4/M, z28.h, z5.h\n"
+ "ld1b { z5.h }, p3/Z, [x9, x2]\n"
+ ".inst 0x454c1339 // ssublb z25.h, z25.b, z12.b\n"
+ ".inst 0x44834266 // smlalb z6.s, p4/M, z19.h, z3.h\n"
+ ".inst 0x4483467e // smlalt z30.s, p4/M, z19.h, z3.h\n"
+ "ld1sb { z19.h }, p4/Z, [x4, #3, MUL VL]\n"
+ ".inst 0x448342f1 // smlalb z17.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x44834395 // smlalb z21.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834247 // smlalb z7.s, p4/M, z18.h, z3.h\n"
+ ".inst 0x448346e8 // smlalt z8.s, p4/M, z23.h, z3.h\n"
+ ".inst 0x454e18a5 // usublb z5.h, z5.b, z14.b\n"
+ ".inst 0x4483479b // smlalt z27.s, p4/M, z28.h, z3.h\n"
+ ".inst 0x44834649 // smlalt z9.s, p4/M, z18.h, z3.h\n"
+ "ld1b { z3.h }, p3/Z, [x28, x2]\n"
+ ".inst 0x454c1273 // ssublb z19.h, z19.b, z12.b\n"
+ ".inst 0x448242e6 // smlalb z6.s, p4/M, z23.h, z2.h\n"
+ ".inst 0x448246fe // smlalt z30.s, p4/M, z23.h, z2.h\n"
+ "ld1sb { z23.h }, p4/Z, [x4, #4, MUL VL]\n"
+ ".inst 0x44824031 // smlalb z17.s, p4/M, z1.h, z2.h\n"
+ ".inst 0x44824255 // smlalb z21.s, p4/M, z18.h, z2.h\n"
+ ".inst 0x44824287 // smlalb z7.s, p4/M, z20.h, z2.h\n"
+ ".inst 0x44824428 // smlalt z8.s, p4/M, z1.h, z2.h\n"
+ ".inst 0x454e1863 // usublb z3.h, z3.b, z14.b\n"
+ "ld1b { z1.h }, p3/Z, [x27, x2]\n"
+ ".inst 0x4482465b // smlalt z27.s, p4/M, z18.h, z2.h\n"
+ ".inst 0x44824689 // smlalt z9.s, p4/M, z20.h, z2.h\n"
+ ".inst 0x454c12f7 // ssublb z23.h, z23.b, z12.b\n"
"ld1sb { z2.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x454a1042 // ssublb z2.h, z2.b, z10.b\n"
- ".inst 0x449d4109 // smlalb z9.s, p4/M, z8.h, z29.h\n"
- ".inst 0x449d43e7 // smlalb z7.s, p4/M, z31.h, z29.h\n"
- "ldr x27, [x17, #0xe8]\n"
- "ldr x26, [x17, #0xf0]\n"
- ".inst 0x44984777 // smlalt z23.s, p4/M, z27.h, z24.h\n"
- ".inst 0x449040ae // smlalb z14.s, p4/M, z5.h, z16.h\n"
- "ldr x25, [x17, #0xf8]\n"
- "ldr x24, [x17, #0x100]\n"
- ".inst 0x449d4772 // smlalt z18.s, p4/M, z27.h, z29.h\n"
- ".inst 0x449d4514 // smlalt z20.s, p4/M, z8.h, z29.h\n"
- "ld1b { z27.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1b7b // usublb z27.h, z27.b, z30.b\n"
- ".inst 0x449d47e1 // smlalt z1.s, p4/M, z31.h, z29.h\n"
- ".inst 0x449840a6 // smlalb z6.s, p4/M, z5.h, z24.h\n"
- "ld1sb { z29.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x454a13bd // ssublb z29.h, z29.b, z10.b\n"
- ".inst 0x449843e9 // smlalb z9.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984007 // smlalb z7.s, p4/M, z0.h, z24.h\n"
- "ldr x23, [x17, #0x108]\n"
- "ldr x22, [x17, #0x110]\n"
- ".inst 0x449044b7 // smlalt z23.s, p4/M, z5.h, z16.h\n"
- ".inst 0x4495438e // smlalb z14.s, p4/M, z28.h, z21.h\n"
- "ldr x20, [x17, #0x118]\n"
- "whilelt p0.h, x16, x3\n"
- ".inst 0x449844b2 // smlalt z18.s, p4/M, z5.h, z24.h\n"
- ".inst 0x449847f4 // smlalt z20.s, p4/M, z31.h, z24.h\n"
- "ld1b { z5.h }, p3/Z, [x21, x2]\n"
- ".inst 0x455e18a5 // usublb z5.h, z5.b, z30.b\n"
- ".inst 0x44984401 // smlalt z1.s, p4/M, z0.h, z24.h\n"
- ".inst 0x44904266 // smlalb z6.s, p4/M, z19.h, z16.h\n"
- "ld1sb { z24.h }, p4/Z, [x4, #7, MUL VL]\n"
- "inch x4, ALL, MUL #8\n"
- ".inst 0x44904009 // smlalb z9.s, p4/M, z0.h, z16.h\n"
- ".inst 0x44904167 // smlalb z7.s, p4/M, z11.h, z16.h\n"
- ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
- "ldr x21, [%x[params], %[offsetof_Params_bias]]\n"
- ".inst 0x44954797 // smlalt z23.s, p4/M, z28.h, z21.h\n"
- ".inst 0x4496434e // smlalb z14.s, p4/M, z26.h, z22.h\n"
- "ld1b { z28.h }, p3/Z, [x13, x2]\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
- ".inst 0x44904672 // smlalt z18.s, p4/M, z19.h, z16.h\n"
- ".inst 0x44904414 // smlalt z20.s, p4/M, z0.h, z16.h\n"
- "ld1sb { z19.h }, p4/Z, [x4]\n"
- ".inst 0x454a1273 // ssublb z19.h, z19.b, z10.b\n"
- ".inst 0x44904561 // smlalt z1.s, p4/M, z11.h, z16.h\n"
- ".inst 0x44954346 // smlalb z6.s, p4/M, z26.h, z21.h\n"
- "ld1b { z16.h }, p3/Z, [x12, x2]\n"
- ".inst 0x455e1a10 // usublb z16.h, z16.b, z30.b\n"
- ".inst 0x44954229 // smlalb z9.s, p4/M, z17.h, z21.h\n"
- ".inst 0x44954067 // smlalb z7.s, p4/M, z3.h, z21.h\n"
- ".inst 0x44964757 // smlalt z23.s, p4/M, z26.h, z22.h\n"
- ".inst 0x4499410e // smlalb z14.s, p4/M, z8.h, z25.h\n"
- ".inst 0x44954752 // smlalt z18.s, p4/M, z26.h, z21.h\n"
- ".inst 0x44954634 // smlalt z20.s, p4/M, z17.h, z21.h\n"
- "ld1b { z26.h }, p3/Z, [x11, x2]\n"
- ".inst 0x455e1b5a // usublb z26.h, z26.b, z30.b\n"
- ".inst 0x44954461 // smlalt z1.s, p4/M, z3.h, z21.h\n"
- ".inst 0x44964106 // smlalb z6.s, p4/M, z8.h, z22.h\n"
- "ld1sb { z21.h }, p4/Z, [x4, #1, MUL VL]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x44964069 // smlalb z9.s, p4/M, z3.h, z22.h\n"
- ".inst 0x44964087 // smlalb z7.s, p4/M, z4.h, z22.h\n"
- ".inst 0x44994517 // smlalt z23.s, p4/M, z8.h, z25.h\n"
- ".inst 0x448243ee // smlalb z14.s, p4/M, z31.h, z2.h\n"
- ".inst 0x44964512 // smlalt z18.s, p4/M, z8.h, z22.h\n"
- ".inst 0x44964474 // smlalt z20.s, p4/M, z3.h, z22.h\n"
- "ld1b { z8.h }, p3/Z, [x10, x2]\n"
- ".inst 0x455e1908 // usublb z8.h, z8.b, z30.b\n"
- ".inst 0x44964481 // smlalt z1.s, p4/M, z4.h, z22.h\n"
- ".inst 0x449943e6 // smlalb z6.s, p4/M, z31.h, z25.h\n"
- "ld1sb { z22.h }, p4/Z, [x4, #2, MUL VL]\n"
- ".inst 0x454a12d6 // ssublb z22.h, z22.b, z10.b\n"
- ".inst 0x44994089 // smlalb z9.s, p4/M, z4.h, z25.h\n"
- ".inst 0x44994367 // smlalb z7.s, p4/M, z27.h, z25.h\n"
- ".inst 0x448247f7 // smlalt z23.s, p4/M, z31.h, z2.h\n"
- ".inst 0x449d400e // smlalb z14.s, p4/M, z0.h, z29.h\n"
- ".inst 0x449947f2 // smlalt z18.s, p4/M, z31.h, z25.h\n"
- ".inst 0x44994494 // smlalt z20.s, p4/M, z4.h, z25.h\n"
- "ld1b { z31.h }, p3/Z, [x9, x2]\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- ".inst 0x44994761 // smlalt z1.s, p4/M, z27.h, z25.h\n"
- ".inst 0x44824006 // smlalb z6.s, p4/M, z0.h, z2.h\n"
- "ld1sb { z25.h }, p4/Z, [x4, #3, MUL VL]\n"
- ".inst 0x454a1339 // ssublb z25.h, z25.b, z10.b\n"
- ".inst 0x44824369 // smlalb z9.s, p4/M, z27.h, z2.h\n"
- ".inst 0x448240a7 // smlalb z7.s, p4/M, z5.h, z2.h\n"
- ".inst 0x449d4417 // smlalt z23.s, p4/M, z0.h, z29.h\n"
- ".inst 0x4498422e // smlalb z14.s, p4/M, z17.h, z24.h\n"
- ".inst 0x44824412 // smlalt z18.s, p4/M, z0.h, z2.h\n"
- ".inst 0x44824774 // smlalt z20.s, p4/M, z27.h, z2.h\n"
- "ld1b { z0.h }, p3/Z, [x28, x2]\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x448244a1 // smlalt z1.s, p4/M, z5.h, z2.h\n"
- ".inst 0x449d4166 // smlalb z6.s, p4/M, z11.h, z29.h\n"
- "ld1sb { z2.h }, p4/Z, [x4, #4, MUL VL]\n"
- ".inst 0x454a1042 // ssublb z2.h, z2.b, z10.b\n"
- ".inst 0x449d40a9 // smlalb z9.s, p4/M, z5.h, z29.h\n"
- ".inst 0x449d4387 // smlalb z7.s, p4/M, z28.h, z29.h\n"
- ".inst 0x44984637 // smlalt z23.s, p4/M, z17.h, z24.h\n"
- ".inst 0x4493406e // smlalb z14.s, p4/M, z3.h, z19.h\n"
- "ld1b { z17.h }, p3/Z, [x27, x2]\n"
- ".inst 0x455e1a31 // usublb z17.h, z17.b, z30.b\n"
- ".inst 0x449d4572 // smlalt z18.s, p4/M, z11.h, z29.h\n"
- ".inst 0x449d44b4 // smlalt z20.s, p4/M, z5.h, z29.h\n"
- "ld1sb { z11.h }, p4/Z, [x4, #5, MUL VL]\n"
- ".inst 0x454a116b // ssublb z11.h, z11.b, z10.b\n"
- ".inst 0x449d4781 // smlalt z1.s, p4/M, z28.h, z29.h\n"
- ".inst 0x44984066 // smlalb z6.s, p4/M, z3.h, z24.h\n"
- "ld1b { z29.h }, p3/Z, [x26, x2]\n"
- ".inst 0x455e1bbd // usublb z29.h, z29.b, z30.b\n"
- ".inst 0x44984209 // smlalb z9.s, p4/M, z16.h, z24.h\n"
- ".inst 0x44984347 // smlalb z7.s, p4/M, z26.h, z24.h\n"
- ".inst 0x44934477 // smlalt z23.s, p4/M, z3.h, z19.h\n"
- ".inst 0x4495408e // smlalb z14.s, p4/M, z4.h, z21.h\n"
- ".inst 0x44984472 // smlalt z18.s, p4/M, z3.h, z24.h\n"
- ".inst 0x44984614 // smlalt z20.s, p4/M, z16.h, z24.h\n"
- "ld1b { z3.h }, p3/Z, [x25, x2]\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- ".inst 0x44984741 // smlalt z1.s, p4/M, z26.h, z24.h\n"
- ".inst 0x44934086 // smlalb z6.s, p4/M, z4.h, z19.h\n"
- "ld1sb { z24.h }, p4/Z, [x4, #6, MUL VL]\n"
- ".inst 0x454a1318 // ssublb z24.h, z24.b, z10.b\n"
- ".inst 0x44934349 // smlalb z9.s, p4/M, z26.h, z19.h\n"
- ".inst 0x44934107 // smlalb z7.s, p4/M, z8.h, z19.h\n"
- ".inst 0x44954497 // smlalt z23.s, p4/M, z4.h, z21.h\n"
- ".inst 0x4496436e // smlalb z14.s, p4/M, z27.h, z22.h\n"
- ".inst 0x44934492 // smlalt z18.s, p4/M, z4.h, z19.h\n"
- ".inst 0x44934754 // smlalt z20.s, p4/M, z26.h, z19.h\n"
+ ".inst 0x44964306 // smlalb z6.s, p4/M, z24.h, z22.h\n"
+ ".inst 0x4496471e // smlalt z30.s, p4/M, z24.h, z22.h\n"
+ "ld1b { z24.h }, p3/Z, [x26, x2]\n"
+ ".inst 0x44964011 // smlalb z17.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x449643b5 // smlalb z21.s, p4/M, z29.h, z22.h\n"
+ ".inst 0x454e1821 // usublb z1.h, z1.b, z14.b\n"
+ ".inst 0x44964167 // smlalb z7.s, p4/M, z11.h, z22.h\n"
+ ".inst 0x44964408 // smlalt z8.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x454c1042 // ssublb z2.h, z2.b, z12.b\n"
+ ".inst 0x449647bb // smlalt z27.s, p4/M, z29.h, z22.h\n"
+ ".inst 0x44964569 // smlalt z9.s, p4/M, z11.h, z22.h\n"
+ "ld1b { z22.h }, p3/Z, [x25, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x44844006 // smlalb z6.s, p4/M, z0.h, z4.h\n"
+ ".inst 0x4484441e // smlalt z30.s, p4/M, z0.h, z4.h\n"
+ "ld1sb { z0.h }, p4/Z, [x4, #6, MUL VL]\n"
+ ".inst 0x44844351 // smlalb z17.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x44844175 // smlalb z21.s, p4/M, z11.h, z4.h\n"
+ ".inst 0x448443e7 // smlalb z7.s, p4/M, z31.h, z4.h\n"
+ ".inst 0x44844748 // smlalt z8.s, p4/M, z26.h, z4.h\n"
+ ".inst 0x454e1ad6 // usublb z22.h, z22.b, z14.b\n"
+ ".inst 0x4484457b // smlalt z27.s, p4/M, z11.h, z4.h\n"
+ ".inst 0x448447e9 // smlalt z9.s, p4/M, z31.h, z4.h\n"
"ld1b { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- ".inst 0x44934501 // smlalt z1.s, p4/M, z8.h, z19.h\n"
- ".inst 0x44954366 // smlalb z6.s, p4/M, z27.h, z21.h\n"
- "ld1sb { z19.h }, p4/Z, [x4, #7, MUL VL]\n"
+ ".inst 0x454c1000 // ssublb z0.h, z0.b, z12.b\n"
+ ".inst 0x44904346 // smlalb z6.s, p4/M, z26.h, z16.h\n"
+ ".inst 0x4490475e // smlalt z30.s, p4/M, z26.h, z16.h\n"
+ "ld1sb { z26.h }, p4/Z, [x4, #7, MUL VL]\n"
"inch x4, ALL, MUL #8\n"
- ".inst 0x44954109 // smlalb z9.s, p4/M, z8.h, z21.h\n"
- ".inst 0x449543e7 // smlalb z7.s, p4/M, z31.h, z21.h\n"
- ".inst 0x454a1273 // ssublb z19.h, z19.b, z10.b\n"
- ".inst 0x44964777 // smlalt z23.s, p4/M, z27.h, z22.h\n"
- ".inst 0x449940ae // smlalb z14.s, p4/M, z5.h, z25.h\n"
- ".inst 0x44954772 // smlalt z18.s, p4/M, z27.h, z21.h\n"
- ".inst 0x44954514 // smlalt z20.s, p4/M, z8.h, z21.h\n"
- "ld1b { z27.h }, p3/Z, [x23, x2]\n"
- ".inst 0x455e1b7b // usublb z27.h, z27.b, z30.b\n"
- ".inst 0x449547e1 // smlalt z1.s, p4/M, z31.h, z21.h\n"
- ".inst 0x449640a6 // smlalb z6.s, p4/M, z5.h, z22.h\n"
- "ld1sb { z21.h }, p4/Z, [x4]\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- ".inst 0x449643e9 // smlalb z9.s, p4/M, z31.h, z22.h\n"
- ".inst 0x44964007 // smlalb z7.s, p4/M, z0.h, z22.h\n"
- "inch x4\n"
- ".inst 0x449944b7 // smlalt z23.s, p4/M, z5.h, z25.h\n"
- ".inst 0x4482420e // smlalb z14.s, p4/M, z16.h, z2.h\n"
- ".inst 0x449644b2 // smlalt z18.s, p4/M, z5.h, z22.h\n"
- ".inst 0x449647f4 // smlalt z20.s, p4/M, z31.h, z22.h\n"
- "ld1b { z5.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e18a5 // usublb z5.h, z5.b, z30.b\n"
- ".inst 0x44964401 // smlalt z1.s, p4/M, z0.h, z22.h\n"
+ ".inst 0x44904391 // smlalb z17.s, p4/M, z28.h, z16.h\n"
+ ".inst 0x449043f5 // smlalb z21.s, p4/M, z31.h, z16.h\n"
+ ".inst 0x449040a7 // smlalb z7.s, p4/M, z5.h, z16.h\n"
+ ".inst 0x44904788 // smlalt z8.s, p4/M, z28.h, z16.h\n"
+ ".inst 0x454e1884 // usublb z4.h, z4.b, z14.b\n"
+ ".inst 0x449047fb // smlalt z27.s, p4/M, z31.h, z16.h\n"
+ ".inst 0x449044a9 // smlalt z9.s, p4/M, z5.h, z16.h\n"
+ "ld1b { z16.h }, p3/Z, [x23, x2]\n"
+ ".inst 0x454c135a // ssublb z26.h, z26.b, z12.b\n"
".inst 0x44994386 // smlalb z6.s, p4/M, z28.h, z25.h\n"
- "ld1w { z22.s }, p2/Z, [x15]\n"
- ".inst 0x44994009 // smlalb z9.s, p4/M, z0.h, z25.h\n"
- ".inst 0x44994227 // smlalb z7.s, p4/M, z17.h, z25.h\n"
- ".inst 0x44824617 // smlalt z23.s, p4/M, z16.h, z2.h\n"
- ".inst 0x448b434e // smlalb z14.s, p4/M, z26.h, z11.h\n"
- "ld1w { z16.s }, p1/Z, [x15, #1, MUL VL]\n"
- "addvl x15, x15, #2\n"
- ".inst 0x44994792 // smlalt z18.s, p4/M, z28.h, z25.h\n"
- ".inst 0x44994414 // smlalt z20.s, p4/M, z0.h, z25.h\n"
- "ld1b { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
- ".inst 0x44994621 // smlalt z1.s, p4/M, z17.h, z25.h\n"
- ".inst 0x44824346 // smlalb z6.s, p4/M, z26.h, z2.h\n"
- "uzp1 z25.s, z22.s, z16.s\n"
+ ".inst 0x4499479e // smlalt z30.s, p4/M, z28.h, z25.h\n"
+ "ld1sb { z28.h }, p4/Z, [x4]\n"
+ "inch x4\n"
+ ".inst 0x44994251 // smlalb z17.s, p4/M, z18.h, z25.h\n"
+ ".inst 0x449940b5 // smlalb z21.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994067 // smlalb z7.s, p4/M, z3.h, z25.h\n"
+ ".inst 0x44994648 // smlalt z8.s, p4/M, z18.h, z25.h\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ ".inst 0x449944bb // smlalt z27.s, p4/M, z5.h, z25.h\n"
+ ".inst 0x44994469 // smlalt z9.s, p4/M, z3.h, z25.h\n"
+ "ld1b { z25.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x44934246 // smlalb z6.s, p4/M, z18.h, z19.h\n"
+ ".inst 0x4493465e // smlalt z30.s, p4/M, z18.h, z19.h\n"
+ "ld1w { z18.s }, p2/Z, [x7]\n"
+ ".inst 0x44934291 // smlalb z17.s, p4/M, z20.h, z19.h\n"
+ ".inst 0x44934075 // smlalb z21.s, p4/M, z3.h, z19.h\n"
+ ".inst 0x44934027 // smlalb z7.s, p4/M, z1.h, z19.h\n"
+ ".inst 0x44934688 // smlalt z8.s, p4/M, z20.h, z19.h\n"
+ "ld1w { z20.s }, p1/Z, [x7, #1, MUL VL]\n"
+ ".inst 0x454e1b39 // usublb z25.h, z25.b, z14.b\n"
+ ".inst 0x4493447b // smlalt z27.s, p4/M, z3.h, z19.h\n"
+ ".inst 0x44934429 // smlalt z9.s, p4/M, z1.h, z19.h\n"
+ "ld1b { z19.h }, p3/Z, [x20, x2]\n"
"inch x2\n"
- ".inst 0x448243a9 // smlalb z9.s, p4/M, z29.h, z2.h\n"
- ".inst 0x44824067 // smlalb z7.s, p4/M, z3.h, z2.h\n"
- "uzp2 z16.s, z22.s, z16.s\n"
- "ld1w { z22.s }, p2/Z, [x14]\n"
- ".inst 0x448b4757 // smlalt z23.s, p4/M, z26.h, z11.h\n"
- ".inst 0x4498410e // smlalb z14.s, p4/M, z8.h, z24.h\n"
+ ".inst 0x449743a6 // smlalb z6.s, p4/M, z29.h, z23.h\n"
+ ".inst 0x449747be // smlalt z30.s, p4/M, z29.h, z23.h\n"
+ "addvl x7, x7, #2\n"
+ ".inst 0x44974171 // smlalb z17.s, p4/M, z11.h, z23.h\n"
+ ".inst 0x44974315 // smlalb z21.s, p4/M, z24.h, z23.h\n"
+ "uzp1 z29.s, z18.s, z20.s\n"
+ ".inst 0x449742c7 // smlalb z7.s, p4/M, z22.h, z23.h\n"
+ ".inst 0x44974568 // smlalt z8.s, p4/M, z11.h, z23.h\n"
+ "uzp2 z18.s, z18.s, z20.s\n"
+ "ld1w { z20.s }, p2/Z, [x8]\n"
+ ".inst 0x4497471b // smlalt z27.s, p4/M, z24.h, z23.h\n"
+ ".inst 0x449746c9 // smlalt z9.s, p4/M, z22.h, z23.h\n"
+ "ld1w { z24.s }, p1/Z, [x8, #1, MUL VL]\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ ".inst 0x44824166 // smlalb z6.s, p4/M, z11.h, z2.h\n"
+ ".inst 0x4482457e // smlalt z30.s, p4/M, z11.h, z2.h\n"
"mov x20, x2\n"
- "incw x20\n"
- ".inst 0x44824752 // smlalt z18.s, p4/M, z26.h, z2.h\n"
- ".inst 0x448247b4 // smlalt z20.s, p4/M, z29.h, z2.h\n"
- "ld1w { z26.s }, p1/Z, [x14, #1, MUL VL]\n"
- "uzp1 z29.s, z22.s, z26.s\n"
- ".inst 0x44824461 // smlalt z1.s, p4/M, z3.h, z2.h\n"
- ".inst 0x448b4106 // smlalb z6.s, p4/M, z8.h, z11.h\n"
- "uzp2 z22.s, z22.s, z26.s\n"
"whilelt p2.s, x2, x3\n"
- ".inst 0x448b4069 // smlalb z9.s, p4/M, z3.h, z11.h\n"
- ".inst 0x448b4087 // smlalb z7.s, p4/M, z4.h, z11.h\n"
+ ".inst 0x448243f1 // smlalb z17.s, p4/M, z31.h, z2.h\n"
+ ".inst 0x448242d5 // smlalb z21.s, p4/M, z22.h, z2.h\n"
+ "addvl x8, x8, #2\n"
+ ".inst 0x44824087 // smlalb z7.s, p4/M, z4.h, z2.h\n"
+ ".inst 0x448247e8 // smlalt z8.s, p4/M, z31.h, z2.h\n"
+ "uzp1 z23.s, z20.s, z24.s\n"
+ ".inst 0x448246db // smlalt z27.s, p4/M, z22.h, z2.h\n"
+ ".inst 0x44824489 // smlalt z9.s, p4/M, z4.h, z2.h\n"
+ "uzp2 z22.s, z20.s, z24.s\n"
+ "incw x20\n"
+ ".inst 0x448043e6 // smlalb z6.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448047fe // smlalt z30.s, p4/M, z31.h, z0.h\n"
+ ".inst 0x448040b1 // smlalb z17.s, p4/M, z5.h, z0.h\n"
+ ".inst 0x44804095 // smlalb z21.s, p4/M, z4.h, z0.h\n"
+ ".inst 0x44804207 // smlalb z7.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x448044a8 // smlalt z8.s, p4/M, z5.h, z0.h\n"
"whilelt p1.s, x20, x3\n"
"whilelt p3.h, x2, x3\n"
- ".inst 0x44984517 // smlalt z23.s, p4/M, z8.h, z24.h\n"
- ".inst 0x449343ee // smlalb z14.s, p4/M, z31.h, z19.h\n"
- "addvl x14, x14, #2\n"
- ".inst 0x448b4512 // smlalt z18.s, p4/M, z8.h, z11.h\n"
- ".inst 0x448b4474 // smlalt z20.s, p4/M, z3.h, z11.h\n"
- ".inst 0x448b4481 // smlalt z1.s, p4/M, z4.h, z11.h\n"
- ".inst 0x449843e6 // smlalb z6.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984089 // smlalb z9.s, p4/M, z4.h, z24.h\n"
- ".inst 0x44984367 // smlalb z7.s, p4/M, z27.h, z24.h\n"
- ".inst 0x449347f7 // smlalt z23.s, p4/M, z31.h, z19.h\n"
- ".inst 0x4495400e // smlalb z14.s, p4/M, z0.h, z21.h\n"
- ".inst 0x04b975ce // sqrdmulh z14.s, z14.s, z25.s\n"
- ".inst 0x449847f2 // smlalt z18.s, p4/M, z31.h, z24.h\n"
- ".inst 0x44984494 // smlalt z20.s, p4/M, z4.h, z24.h\n"
- "and z3.d, z14.d, z29.d\n"
- ".inst 0x44984761 // smlalt z1.s, p4/M, z27.h, z24.h\n"
- ".inst 0x44934006 // smlalb z6.s, p4/M, z0.h, z19.h\n"
- "asr z3.s, z3.s, #0x1f\n"
- ".inst 0x44934369 // smlalb z9.s, p4/M, z27.h, z19.h\n"
- ".inst 0x449340a7 // smlalb z7.s, p4/M, z5.h, z19.h\n"
- "sqadd z14.s, z14.s, z3.s\n"
- ".inst 0x448293ae // srshl z14.s, p4/M, z14.s, z29.s\n"
- ".inst 0x44954417 // smlalt z23.s, p4/M, z0.h, z21.h\n"
- ".inst 0x44934412 // smlalt z18.s, p4/M, z0.h, z19.h\n"
- ".inst 0x04b076f7 // sqrdmulh z23.s, z23.s, z16.s\n"
- ".inst 0x44934774 // smlalt z20.s, p4/M, z27.h, z19.h\n"
- ".inst 0x449344a1 // smlalt z1.s, p4/M, z5.h, z19.h\n"
- "and z31.d, z23.d, z22.d\n"
- ".inst 0x44954226 // smlalb z6.s, p4/M, z17.h, z21.h\n"
- ".inst 0x449540a9 // smlalb z9.s, p4/M, z5.h, z21.h\n"
- ".inst 0x04b974c6 // sqrdmulh z6.s, z6.s, z25.s\n"
- ".inst 0x44954387 // smlalb z7.s, p4/M, z28.h, z21.h\n"
- ".inst 0x44954632 // smlalt z18.s, p4/M, z17.h, z21.h\n"
- ".inst 0x04b97529 // sqrdmulh z9.s, z9.s, z25.s\n"
- ".inst 0x449544b4 // smlalt z20.s, p4/M, z5.h, z21.h\n"
- ".inst 0x44954781 // smlalt z1.s, p4/M, z28.h, z21.h\n"
- ".inst 0x04b974e7 // sqrdmulh z7.s, z7.s, z25.s\n"
- "asr z31.s, z31.s, #0x1f\n"
- "and z3.d, z6.d, z29.d\n"
- ".inst 0x04b07652 // sqrdmulh z18.s, z18.s, z16.s\n"
- "and z0.d, z9.d, z29.d\n"
- ".inst 0x04b07694 // sqrdmulh z20.s, z20.s, z16.s\n"
- "and z19.d, z7.d, z29.d\n"
- ".inst 0x04b07421 // sqrdmulh z1.s, z1.s, z16.s\n"
- "sqadd z23.s, z23.s, z31.s\n"
- ".inst 0x448292d7 // srshl z23.s, p4/M, z23.s, z22.s\n"
- "asr z3.s, z3.s, #0x1f\n"
- "and z21.d, z18.d, z22.d\n"
- "asr z0.s, z0.s, #0x1f\n"
- "and z17.d, z20.d, z22.d\n"
+ ".inst 0x4480449b // smlalt z27.s, p4/M, z4.h, z0.h\n"
+ ".inst 0x44804609 // smlalt z9.s, p4/M, z16.h, z0.h\n"
+ ".inst 0x449a40a6 // smlalb z6.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a44be // smlalt z30.s, p4/M, z5.h, z26.h\n"
+ ".inst 0x449a4071 // smlalb z17.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a4215 // smlalb z21.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4327 // smlalb z7.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449a4468 // smlalt z8.s, p4/M, z3.h, z26.h\n"
+ ".inst 0x449a461b // smlalt z27.s, p4/M, z16.h, z26.h\n"
+ ".inst 0x449a4729 // smlalt z9.s, p4/M, z25.h, z26.h\n"
+ ".inst 0x449c4066 // smlalb z6.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c447e // smlalt z30.s, p4/M, z3.h, z28.h\n"
+ ".inst 0x449c4031 // smlalb z17.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c4335 // smlalb z21.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4267 // smlalb z7.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x449c4428 // smlalt z8.s, p4/M, z1.h, z28.h\n"
+ ".inst 0x449c473b // smlalt z27.s, p4/M, z25.h, z28.h\n"
+ ".inst 0x449c4669 // smlalt z9.s, p4/M, z19.h, z28.h\n"
+ ".inst 0x04bd74c6 // sqrdmulh z6.s, z6.s, z29.s\n"
+ ".inst 0x04b277de // sqrdmulh z30.s, z30.s, z18.s\n"
+ ".inst 0x04bd7631 // sqrdmulh z17.s, z17.s, z29.s\n"
+ ".inst 0x04bd76b5 // sqrdmulh z21.s, z21.s, z29.s\n"
+ "and z19.d, z6.d, z23.d\n"
+ ".inst 0x04bd74e7 // sqrdmulh z7.s, z7.s, z29.s\n"
+ ".inst 0x04b27508 // sqrdmulh z8.s, z8.s, z18.s\n"
+ "and z16.d, z30.d, z22.d\n"
+ "and z2.d, z17.d, z23.d\n"
+ "asr z19.s, z19.s, #0x1f\n"
+ "and z20.d, z21.d, z23.d\n"
+ ".inst 0x04b2777b // sqrdmulh z27.s, z27.s, z18.s\n"
+ ".inst 0x04b27529 // sqrdmulh z9.s, z9.s, z18.s\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "sqadd z6.s, z6.s, z19.s\n"
+ "and z19.d, z7.d, z23.d\n"
+ "and z0.d, z8.d, z22.d\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "sqadd z30.s, z30.s, z16.s\n"
+ "and z26.d, z27.d, z22.d\n"
"asr z19.s, z19.s, #0x1f\n"
- "and z16.d, z1.d, z22.d\n"
- "sqadd z6.s, z6.s, z3.s\n"
- "asr z21.s, z21.s, #0x1f\n"
- ".inst 0x448293a6 // srshl z6.s, p4/M, z6.s, z29.s\n"
- "sqadd z9.s, z9.s, z0.s\n"
- "asr z17.s, z17.s, #0x1f\n"
- ".inst 0x448293a9 // srshl z9.s, p4/M, z9.s, z29.s\n"
+ "and z16.d, z9.d, z22.d\n"
+ ".inst 0x448292e6 // srshl z6.s, p4/M, z6.s, z23.s\n"
+ "sqadd z17.s, z17.s, z2.s\n"
+ "asr z0.s, z0.s, #0x1f\n"
+ "sqadd z21.s, z21.s, z20.s\n"
+ "asr z26.s, z26.s, #0x1f\n"
+ ".inst 0x448292de // srshl z30.s, p4/M, z30.s, z22.s\n"
"sqadd z7.s, z7.s, z19.s\n"
"asr z16.s, z16.s, #0x1f\n"
- ".inst 0x448293a7 // srshl z7.s, p4/M, z7.s, z29.s\n"
- "sqadd z18.s, z18.s, z21.s\n"
- "sqadd z20.s, z20.s, z17.s\n"
- ".inst 0x448292d2 // srshl z18.s, p4/M, z18.s, z22.s\n"
- ".inst 0x448292d4 // srshl z20.s, p4/M, z20.s, z22.s\n"
- "sqadd z1.s, z1.s, z16.s\n"
- ".inst 0x453041ce // sqxtnb z14.h, z14.s\n"
- ".inst 0x448292c1 // srshl z1.s, p4/M, z1.s, z22.s\n"
+ ".inst 0x448292f1 // srshl z17.s, p4/M, z17.s, z23.s\n"
+ "sqadd z8.s, z8.s, z0.s\n"
".inst 0x453040c6 // sqxtnb z6.h, z6.s\n"
- ".inst 0x45304129 // sqxtnb z9.h, z9.s\n"
+ ".inst 0x448292f5 // srshl z21.s, p4/M, z21.s, z23.s\n"
+ "sqadd z27.s, z27.s, z26.s\n"
+ ".inst 0x448292e7 // srshl z7.s, p4/M, z7.s, z23.s\n"
+ "sqadd z9.s, z9.s, z16.s\n"
+ ".inst 0x45304231 // sqxtnb z17.h, z17.s\n"
+ ".inst 0x448292c8 // srshl z8.s, p4/M, z8.s, z22.s\n"
+ ".inst 0x453042b5 // sqxtnb z21.h, z21.s\n"
+ ".inst 0x453047c6 // sqxtnt z6.h, z30.s\n"
+ ".inst 0x448292db // srshl z27.s, p4/M, z27.s, z22.s\n"
+ ".inst 0x448292c9 // srshl z9.s, p4/M, z9.s, z22.s\n"
".inst 0x453040e7 // sqxtnb z7.h, z7.s\n"
- ".inst 0x453046ee // sqxtnt z14.h, z23.s\n"
- ".inst 0x45304646 // sqxtnt z6.h, z18.s\n"
- ".inst 0x45304689 // sqxtnt z9.h, z20.s\n"
- ".inst 0x45304427 // sqxtnt z7.h, z1.s\n"
- "sqadd z14.h, z14.h, z15.h\n"
- "smax z14.h, p4/M, z14.h, z12.h\n"
- "smin z14.h, p4/M, z14.h, z13.h\n"
- "sqadd z6.h, z6.h, z15.h\n"
- "sqadd z9.h, z9.h, z15.h\n"
- "smax z6.h, p4/M, z6.h, z12.h\n"
- "smax z9.h, p4/M, z9.h, z12.h\n"
- "sqadd z7.h, z7.h, z15.h\n"
- "smax z7.h, p4/M, z7.h, z12.h\n"
+ ".inst 0x45304511 // sqxtnt z17.h, z8.s\n"
+ ".inst 0x45304775 // sqxtnt z21.h, z27.s\n"
+ ".inst 0x45304527 // sqxtnt z7.h, z9.s\n"
+ "sqadd z6.h, z6.h, z10.h\n"
+ "sqadd z17.h, z17.h, z10.h\n"
+ "sqadd z21.h, z21.h, z10.h\n"
+ "sqadd z7.h, z7.h, z10.h\n"
+ "smax z6.h, p4/M, z6.h, z15.h\n"
+ "smax z17.h, p4/M, z17.h, z15.h\n"
+ "smax z21.h, p4/M, z21.h, z15.h\n"
+ "smax z7.h, p4/M, z7.h, z15.h\n"
"smin z6.h, p4/M, z6.h, z13.h\n"
- "st1b { z14.h }, p0, [x5, x16]\n"
- "smin z9.h, p4/M, z9.h, z13.h\n"
+ "smin z17.h, p4/M, z17.h, z13.h\n"
+ "smin z21.h, p4/M, z21.h, z13.h\n"
"smin z7.h, p4/M, z7.h, z13.h\n"
- "st1b { z6.h }, p0, [x6, x16]\n"
- "st1b { z9.h }, p0, [x7, x16]\n"
- "st1b { z7.h }, p0, [x8, x16]\n"
- "ld1w { z17.s }, p2/Z, [x21]\n"
+ "st1b { z6.h }, p0, [x17, x6]\n"
+ "st1b { z17.h }, p0, [x16, x6]\n"
+ "st1b { z21.h }, p0, [x15, x6]\n"
+ "st1b { z7.h }, p0, [x14, x6]\n"
+ "inch x6\n"
+ "ld1w { z21.s }, p2/Z, [x21]\n"
"ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
- "uzp1 z14.s, z17.s, z16.s\n"
- "ld1sb { z26.h }, p4/Z, [x4]\n"
- "ld1sb { z8.h }, p4/Z, [x4, #1, MUL VL]\n"
- "uzp2 z23.s, z17.s, z16.s\n"
"addvl x21, x21, #2\n"
- "ld1sb { z16.h }, p4/Z, [x4, #2, MUL VL]\n"
- "ld1sb { z21.h }, p4/Z, [x4, #3, MUL VL]\n"
- "inch x16\n"
+ "ld1sb { z25.h }, p4/Z, [x4]\n"
+ "ld1sb { z28.h }, p4/Z, [x4, #1, MUL VL]\n"
+ "ld1sb { z4.h }, p4/Z, [x4, #2, MUL VL]\n"
+ "ld1sb { z23.h }, p4/Z, [x4, #3, MUL VL]\n"
+ "ld1sb { z31.h }, p4/Z, [x4, #4, MUL VL]\n"
+ "ldp x9, x28, [x5, #0x0]\n"
+ "uzp1 z6.s, z21.s, z16.s\n"
+ "uzp2 z30.s, z21.s, z16.s\n"
"str x21, [%x[params], %[offsetof_Params_bias]]\n"
- "ld1sb { z17.h }, p4/Z, [x4, #4, MUL VL]\n"
- "ldp x9, x28, [x17, #0x0]\n"
- "mov z6.d, z14.d\n"
- "mov z18.d, z23.d\n"
- "ldp x27, x26, [x17, #0x10]\n"
- "ldp x25, x24, [x17, #0x20]\n"
- "mov z9.d, z14.d\n"
- "mov z20.d, z23.d\n"
- "ldp x23, x22, [x17, #0x30]\n"
- "ldp x21, x20, [x17, #0x40]\n"
- "mov z7.d, z14.d\n"
- "mov z1.d, z23.d\n"
- "ld1b { z22.h }, p3/Z, [x9, x2]\n"
- "ld1b { z2.h }, p3/Z, [x28, x2]\n"
- ".inst 0x454a135a // ssublb z26.h, z26.b, z10.b\n"
- ".inst 0x454a1108 // ssublb z8.h, z8.b, z10.b\n"
- "ld1b { z11.h }, p3/Z, [x27, x2]\n"
- "ld1b { z3.h }, p3/Z, [x26, x2]\n"
- ".inst 0x454a1210 // ssublb z16.h, z16.b, z10.b\n"
- ".inst 0x454a12b5 // ssublb z21.h, z21.b, z10.b\n"
- "ld1b { z29.h }, p3/Z, [x25, x2]\n"
- "ld1b { z4.h }, p3/Z, [x24, x2]\n"
- ".inst 0x454a1231 // ssublb z17.h, z17.b, z10.b\n"
- ".inst 0x455e1ad6 // usublb z22.h, z22.b, z30.b\n"
- "ld1b { z31.h }, p3/Z, [x23, x2]\n"
- "ld1b { z0.h }, p3/Z, [x22, x2]\n"
- ".inst 0x455e1842 // usublb z2.h, z2.b, z30.b\n"
- ".inst 0x455e196b // usublb z11.h, z11.b, z30.b\n"
- "ld1b { z19.h }, p3/Z, [x21, x2]\n"
- "ld1b { z28.h }, p3/Z, [x20, x2]\n"
- ".inst 0x455e1863 // usublb z3.h, z3.b, z30.b\n"
- ".inst 0x455e1bbd // usublb z29.h, z29.b, z30.b\n"
- ".inst 0x455e1884 // usublb z4.h, z4.b, z30.b\n"
- ".inst 0x455e1bff // usublb z31.h, z31.b, z30.b\n"
- ".inst 0x455e1800 // usublb z0.h, z0.b, z30.b\n"
- ".inst 0x455e1a73 // usublb z19.h, z19.b, z30.b\n"
- ".inst 0x455e1b9c // usublb z28.h, z28.b, z30.b\n"
+ ".inst 0x454c1339 // ssublb z25.h, z25.b, z12.b\n"
+ ".inst 0x454c139c // ssublb z28.h, z28.b, z12.b\n"
+ ".inst 0x454c1084 // ssublb z4.h, z4.b, z12.b\n"
+ ".inst 0x454c12f7 // ssublb z23.h, z23.b, z12.b\n"
+ "ldp x27, x26, [x5, #0x10]\n"
+ "mov z17.d, z6.d\n"
+ "mov z8.d, z30.d\n"
+ "mov z21.d, z6.d\n"
+ "mov z27.d, z30.d\n"
+ "ldp x25, x24, [x5, #0x20]\n"
+ "mov z7.d, z6.d\n"
+ "mov z9.d, z30.d\n"
+ ".inst 0x454c13ff // ssublb z31.h, z31.b, z12.b\n"
+ "ldp x23, x22, [x5, #0x30]\n"
+ "ldp x21, x20, [x5, #0x40]\n"
+ "ld1b { z26.h }, p3/Z, [x9, x2]\n"
+ "ld1b { z16.h }, p3/Z, [x28, x2]\n"
+ "ld1b { z24.h }, p3/Z, [x27, x2]\n"
+ "ld1b { z5.h }, p3/Z, [x26, x2]\n"
+ "ld1b { z18.h }, p3/Z, [x25, x2]\n"
+ "ld1b { z3.h }, p3/Z, [x24, x2]\n"
+ "ld1b { z19.h }, p3/Z, [x23, x2]\n"
+ "ld1b { z11.h }, p3/Z, [x22, x2]\n"
+ ".inst 0x454e1b5a // usublb z26.h, z26.b, z14.b\n"
+ ".inst 0x454e1a10 // usublb z16.h, z16.b, z14.b\n"
+ "ld1b { z20.h }, p3/Z, [x21, x2]\n"
+ "ld1b { z29.h }, p3/Z, [x20, x2]\n"
+ ".inst 0x454e1b18 // usublb z24.h, z24.b, z14.b\n"
+ ".inst 0x454e18a5 // usublb z5.h, z5.b, z14.b\n"
+ ".inst 0x454e1a52 // usublb z18.h, z18.b, z14.b\n"
+ ".inst 0x454e1863 // usublb z3.h, z3.b, z14.b\n"
+ ".inst 0x454e1a73 // usublb z19.h, z19.b, z14.b\n"
+ ".inst 0x454e196b // usublb z11.h, z11.b, z14.b\n"
+ ".inst 0x454e1a94 // usublb z20.h, z20.b, z14.b\n"
+ ".inst 0x454e1bbd // usublb z29.h, z29.b, z14.b\n"
"b.any 1b\n"
:
: [offsetof_Params_bias] "I" (offsetof(Params, bias)), [offsetof_Params_inptrs] "I" (offsetof(Params, inptrs)), [offsetof_Params_n_channels] "I" (offsetof(Params, n_channels)), [offsetof_Params_outptrs] "I" (offsetof(Params, outptrs)), [offsetof_Params_requant] "I" (offsetof(Params, requant)), [offsetof_Params_requant_muls] "I" (offsetof(Params, requant_muls)), [offsetof_Params_requant_shifts] "I" (offsetof(Params, requant_shifts)), [offsetof_Params_weights] "I" (offsetof(Params, weights)), [offsetof_Requantize32_a_offset] "I" (offsetof(arm_gemm::Requantize32, a_offset)), [offsetof_Requantize32_b_offset] "I" (offsetof(arm_gemm::Requantize32, b_offset)), [offsetof_Requantize32_c_offset] "I" (offsetof(arm_gemm::Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(arm_gemm::Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(arm_gemm::Requantize32, minval)), [params] "r" (&params)
diff --git a/src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp b/src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp
index d0e8639229..a553f1be9e 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/depthfirst_driver.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,10 +64,10 @@ class DepthfirstDriver : public PoolingCommon<TInput, TOutput>
std::unique_ptr<const IDepthfirstStrategy> m_strat;
/* Compute the amount of working space required for a single thread. */
- virtual size_t get_working_size_per_thread() const = 0;
+ virtual size_t get_working_size_per_thread(unsigned int) const = 0;
/* Initialise the working space for a thread. */
- virtual void initialise_working_space(void *) const = 0;
+ virtual void initialise_working_space(void *, unsigned int) const = 0;
/* Compute a portion of the output tensor with padding. */
virtual void compute_tile_padded(
@@ -148,8 +148,8 @@ class DepthfirstDriver : public PoolingCommon<TInput, TOutput>
{
// Get and initialise the working space for this thread.
void *thread_working_space =
- static_cast<uint8_t *>(working_space) + thread_id * this->get_working_size_per_thread();
- this->initialise_working_space(thread_working_space);
+ static_cast<uint8_t *>(working_space) + thread_id * this->get_working_size_per_thread(n_channels);
+ this->initialise_working_space(thread_working_space, n_channels);
// Construct convenient representations of the input/output tensors.
TensorSpec<const TInput *> input_tensor(reinterpret_cast<const TInput *>(input), ld_input_row, ld_input_col);
@@ -289,9 +289,14 @@ class DepthfirstDriver : public PoolingCommon<TInput, TOutput>
{
}
- size_t get_working_size(unsigned int n_threads) const override final
+ size_t get_working_size(unsigned int n_threads) const override
{
- return n_threads * this->get_working_size_per_thread();
+ return this->get_working_size(n_threads, this->m_args.n_channels);
+ }
+
+ size_t get_working_size(unsigned int n_threads, unsigned int n_channels) const override final
+ {
+ return n_threads * this->get_working_size_per_thread(n_channels);
}
};
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 5df848d1dd..45315d5a5d 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,13 +82,13 @@ void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr d7, [%x[args], %[offsetof_rescale]]\n"
+ "ldr d8, [%x[args], %[offsetof_rescale]]\n"
"ldr x3, [%x[args], %[offsetof_n_channels]]\n"
- "cmp x3, #0x8\n"
"mov x4, #0x0\n"
+ "mov x5, #0x0\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
"ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "mov x5, #0x0\n"
+ "cmp x3, #0x8\n"
"ldp x6, x7, [x21, #0x0]\n"
"ldp x8, x17, [x21, #0x10]\n"
"ldp x16, x15, [x20, #0x0]\n"
@@ -100,142 +100,142 @@ void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"ldp x24, x23, [x20, #0x60]\n"
"ldp x22, x21, [x20, #0x70]\n"
"blt 3f\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
+ "ldr q7, [x11, x4]\n"
+ "ldr q6, [x10, x4]\n"
"lsr x20, x3, #0x3\n"
+ "ldr q5, [x27, x4]\n"
+ "ldr q4, [x26, x4]\n"
+ "ldr q3, [x15, x4]\n"
+ "ldr q2, [x14, x4]\n"
+ "ldr q1, [x12, x4]\n"
+ "ldr q0, [x28, x4]\n"
"sub x3, x3, x20, LSL #3\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
"subs x20, x20, #0x1\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
+ "ldr q31, [x9, x4]\n"
+ "ldr q30, [x25, x4]\n"
+ "ldr q29, [x23, x4]\n"
+ "ldr q28, [x22, x4]\n"
+ "ldr q27, [x16, x4]\n"
+ "ldr q26, [x13, x4]\n"
+ "ldr q25, [x24, x4]\n"
+ "ldr q24, [x21, x4]\n"
"add x4, x4, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
- "fadd v17.8h, v6.8h, v5.8h\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
- "fadd v16.8h, v4.8h, v3.8h\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
- "fadd v19.8h, v17.8h, v16.8h\n"
- "fadd v18.8h, v2.8h, v1.8h\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
- "fadd v17.8h, v0.8h, v31.8h\n"
- "fadd v22.8h, v30.8h, v29.8h\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
- "fadd v16.8h, v28.8h, v27.8h\n"
- "fadd v21.8h, v18.8h, v19.8h\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
- "fadd v20.8h, v16.8h, v19.8h\n"
- "fadd v19.8h, v26.8h, v17.8h\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
- "fadd v18.8h, v25.8h, v22.8h\n"
- "fadd v17.8h, v24.8h, v17.8h\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
- "fadd v16.8h, v23.8h, v22.8h\n"
- "fadd v19.8h, v21.8h, v19.8h\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
- "fadd v18.8h, v21.8h, v18.8h\n"
- "fadd v17.8h, v17.8h, v20.8h\n"
- "fadd v16.8h, v16.8h, v20.8h\n"
+ "fadd v19.8h, v7.8h, v6.8h\n"
+ "ldr q7, [x11, x4]\n"
+ "ldr q6, [x10, x4]\n"
+ "fadd v16.8h, v5.8h, v4.8h\n"
+ "ldr q5, [x27, x4]\n"
+ "ldr q4, [x26, x4]\n"
+ "fadd v23.8h, v3.8h, v2.8h\n"
+ "fadd v18.8h, v1.8h, v0.8h\n"
+ "ldr q3, [x15, x4]\n"
+ "ldr q2, [x14, x4]\n"
+ "fadd v17.8h, v31.8h, v30.8h\n"
+ "fadd v22.8h, v29.8h, v28.8h\n"
+ "ldr q1, [x12, x4]\n"
+ "ldr q0, [x28, x4]\n"
+ "fadd v16.8h, v19.8h, v16.8h\n"
"subs x20, x20, #0x1\n"
- "fmul v19.8h, v19.8h, v7.h[0]\n"
+ "ldr q31, [x9, x4]\n"
+ "ldr q30, [x25, x4]\n"
+ "fadd v19.8h, v27.8h, v18.8h\n"
+ "fadd v21.8h, v25.8h, v18.8h\n"
+ "ldr q29, [x23, x4]\n"
+ "ldr q28, [x22, x4]\n"
+ "fadd v18.8h, v26.8h, v17.8h\n"
+ "fadd v20.8h, v24.8h, v17.8h\n"
+ "ldr q27, [x16, x4]\n"
+ "ldr q26, [x13, x4]\n"
+ "fadd v17.8h, v23.8h, v16.8h\n"
+ "fadd v16.8h, v22.8h, v16.8h\n"
+ "ldr q25, [x24, x4]\n"
+ "ldr q24, [x21, x4]\n"
"add x4, x4, #0x10\n"
- "fmul v18.8h, v18.8h, v7.h[1]\n"
- "fmul v17.8h, v17.8h, v7.h[2]\n"
+ "fadd v19.8h, v17.8h, v19.8h\n"
+ "fadd v18.8h, v17.8h, v18.8h\n"
+ "fadd v17.8h, v21.8h, v16.8h\n"
+ "fadd v16.8h, v20.8h, v16.8h\n"
+ "fmul v19.8h, v19.8h, v8.h[0]\n"
+ "fmul v18.8h, v18.8h, v8.h[1]\n"
+ "fmul v17.8h, v17.8h, v8.h[2]\n"
+ "fmul v16.8h, v16.8h, v8.h[3]\n"
"str q19, [x6, x5]\n"
- "fmul v16.8h, v16.8h, v7.h[3]\n"
"str q18, [x7, x5]\n"
"str q17, [x8, x5]\n"
"str q16, [x17, x5]\n"
"add x5, x5, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
- "fadd v17.8h, v6.8h, v5.8h\n"
- "fadd v16.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v17.8h, v16.8h\n"
- "fadd v18.8h, v2.8h, v1.8h\n"
- "fadd v17.8h, v0.8h, v31.8h\n"
- "fadd v22.8h, v30.8h, v29.8h\n"
- "fadd v16.8h, v28.8h, v27.8h\n"
- "fadd v21.8h, v18.8h, v19.8h\n"
- "fadd v20.8h, v16.8h, v19.8h\n"
- "fadd v19.8h, v26.8h, v17.8h\n"
- "fadd v18.8h, v25.8h, v22.8h\n"
- "fadd v17.8h, v24.8h, v17.8h\n"
- "fadd v16.8h, v23.8h, v22.8h\n"
- "fadd v19.8h, v21.8h, v19.8h\n"
- "fadd v18.8h, v21.8h, v18.8h\n"
- "fadd v17.8h, v17.8h, v20.8h\n"
- "fadd v16.8h, v16.8h, v20.8h\n"
- "fmul v19.8h, v19.8h, v7.h[0]\n"
+ "fadd v19.8h, v7.8h, v6.8h\n"
+ "fadd v16.8h, v5.8h, v4.8h\n"
+ "fadd v23.8h, v3.8h, v2.8h\n"
+ "fadd v18.8h, v1.8h, v0.8h\n"
+ "fadd v17.8h, v31.8h, v30.8h\n"
+ "fadd v22.8h, v29.8h, v28.8h\n"
+ "fadd v16.8h, v19.8h, v16.8h\n"
+ "fadd v19.8h, v27.8h, v18.8h\n"
+ "fadd v21.8h, v25.8h, v18.8h\n"
+ "fadd v18.8h, v26.8h, v17.8h\n"
+ "fadd v20.8h, v24.8h, v17.8h\n"
+ "fadd v17.8h, v23.8h, v16.8h\n"
+ "fadd v16.8h, v22.8h, v16.8h\n"
+ "fadd v19.8h, v17.8h, v19.8h\n"
+ "fadd v18.8h, v17.8h, v18.8h\n"
+ "fadd v17.8h, v21.8h, v16.8h\n"
+ "fadd v16.8h, v20.8h, v16.8h\n"
+ "fmul v19.8h, v19.8h, v8.h[0]\n"
+ "fmul v18.8h, v18.8h, v8.h[1]\n"
+ "fmul v17.8h, v17.8h, v8.h[2]\n"
+ "fmul v16.8h, v16.8h, v8.h[3]\n"
"str q19, [x6, x5]\n"
- "fmul v18.8h, v18.8h, v7.h[1]\n"
- "fmul v17.8h, v17.8h, v7.h[2]\n"
"str q18, [x7, x5]\n"
- "fmul v16.8h, v16.8h, v7.h[3]\n"
"str q17, [x8, x5]\n"
"str q16, [x17, x5]\n"
"add x5, x5, #0x10\n"
"cbz x3, 4f\n"
"3:" // Oddments
- "ldr h17, [x11, x4]\n"
- "ldr h16, [x10, x4]\n"
- "fadd v18.8h, v17.8h, v16.8h\n"
+ "ldr h22, [x11, x4]\n"
+ "ldr h21, [x10, x4]\n"
"subs x3, x3, #0x1\n"
- "ldr h17, [x27, x4]\n"
+ "ldr h20, [x27, x4]\n"
"ldr h16, [x26, x4]\n"
- "fadd v16.8h, v17.8h, v16.8h\n"
- "fadd v18.8h, v18.8h, v16.8h\n"
- "ldr h17, [x15, x4]\n"
- "ldr h16, [x14, x4]\n"
- "fadd v16.8h, v17.8h, v16.8h\n"
- "fadd v23.8h, v16.8h, v18.8h\n"
- "ldr h17, [x12, x4]\n"
- "ldr h16, [x28, x4]\n"
- "fadd v22.8h, v17.8h, v16.8h\n"
- "ldr h17, [x9, x4]\n"
- "ldr h16, [x25, x4]\n"
- "fadd v21.8h, v17.8h, v16.8h\n"
- "ldr h17, [x23, x4]\n"
+ "ldr h19, [x15, x4]\n"
+ "ldr h18, [x14, x4]\n"
+ "ldr h23, [x12, x4]\n"
+ "ldr h17, [x28, x4]\n"
+ "fadd v22.8h, v22.8h, v21.8h\n"
+ "ldr h27, [x9, x4]\n"
+ "ldr h26, [x25, x4]\n"
+ "fadd v20.8h, v20.8h, v16.8h\n"
+ "ldr h25, [x23, x4]\n"
"ldr h16, [x22, x4]\n"
- "fadd v16.8h, v17.8h, v16.8h\n"
- "fadd v20.8h, v16.8h, v18.8h\n"
- "ldr h17, [x16, x4]\n"
- "ldr h16, [x13, x4]\n"
- "fadd v19.8h, v17.8h, v22.8h\n"
- "fadd v18.8h, v16.8h, v21.8h\n"
+ "fadd v21.8h, v19.8h, v18.8h\n"
+ "ldr h19, [x16, x4]\n"
+ "ldr h18, [x13, x4]\n"
+ "fadd v24.8h, v23.8h, v17.8h\n"
"ldr h17, [x24, x4]\n"
- "ldr h16, [x21, x4]\n"
- "fadd v17.8h, v17.8h, v22.8h\n"
- "fadd v16.8h, v16.8h, v21.8h\n"
- "fadd v19.8h, v23.8h, v19.8h\n"
- "fadd v18.8h, v23.8h, v18.8h\n"
+ "ldr h23, [x21, x4]\n"
+ "fadd v22.8h, v22.8h, v20.8h\n"
+ "fadd v20.8h, v27.8h, v26.8h\n"
+ "fadd v16.8h, v25.8h, v16.8h\n"
"add x4, x4, #0x2\n"
- "fadd v17.8h, v17.8h, v20.8h\n"
- "fadd v16.8h, v16.8h, v20.8h\n"
- "fmul v19.8h, v19.8h, v7.h[0]\n"
- "fmul v18.8h, v18.8h, v7.h[1]\n"
+ "fadd v19.8h, v19.8h, v24.8h\n"
+ "fadd v21.8h, v21.8h, v22.8h\n"
+ "fadd v18.8h, v18.8h, v20.8h\n"
+ "fadd v17.8h, v17.8h, v24.8h\n"
+ "fadd v20.8h, v23.8h, v20.8h\n"
+ "fadd v16.8h, v16.8h, v22.8h\n"
+ "fadd v19.8h, v21.8h, v19.8h\n"
+ "fadd v18.8h, v21.8h, v18.8h\n"
+ "fadd v17.8h, v17.8h, v16.8h\n"
+ "fadd v16.8h, v20.8h, v16.8h\n"
+ "fmul v19.8h, v19.8h, v8.h[0]\n"
+ "fmul v18.8h, v18.8h, v8.h[1]\n"
+ "fmul v17.8h, v17.8h, v8.h[2]\n"
+ "fmul v16.8h, v16.8h, v8.h[3]\n"
"str h19, [x6, x5]\n"
- "fmul v17.8h, v17.8h, v7.h[2]\n"
- "fmul v16.8h, v16.8h, v7.h[3]\n"
"str h18, [x7, x5]\n"
"str h17, [x8, x5]\n"
"str h16, [x17, x5]\n"
@@ -244,7 +244,7 @@ void a64_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index f7be92e53f..15696d3e76 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,127 +42,127 @@ void a64_fp16_nhwc_avg_generic_depthfirst_impl(
const auto rescale_value = static_cast<__fp16>(1.0f / static_cast<float>(window_cells));
__asm__ __volatile__(
- "ld1r { v9.8h }, [%x[rescale_ptr]]\n"
+ "ld1r { v10.8h }, [%x[rescale_ptr]]\n"
"cmp %x[n_channels], #0x20\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "movi v9.16b, #0x0\n"
"movi v8.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"movi v7.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
"movi v6.16b, #0x0\n"
- "movi v5.16b, #0x0\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fadd v23.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v28.8h, v22.8h\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fadd v22.8h, v2.8h, v1.8h\n"
- "ldr q2, [x21, x26]\n"
- "fadd v18.8h, v27.8h, v21.8h\n"
- "ldr q1, [x20, x26]\n"
- "fadd v21.8h, v0.8h, v31.8h\n"
- "ldr q0, [x21, x24]\n"
- "fadd v17.8h, v26.8h, v20.8h\n"
- "ldr q31, [x20, x24]\n"
- "fadd v20.8h, v30.8h, v29.8h\n"
- "ldr q30, [x21, x23]\n"
+ "fadd v23.8h, v5.8h, v4.8h\n"
+ "fadd v19.8h, v3.8h, v2.8h\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "fadd v22.8h, v1.8h, v0.8h\n"
+ "fadd v18.8h, v31.8h, v30.8h\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "fadd v21.8h, v29.8h, v21.8h\n"
+ "fadd v17.8h, v28.8h, v27.8h\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fadd v20.8h, v26.8h, v20.8h\n"
"fadd v16.8h, v25.8h, v24.8h\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"fadd v19.8h, v23.8h, v19.8h\n"
"fadd v18.8h, v22.8h, v18.8h\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"fadd v17.8h, v21.8h, v17.8h\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"fadd v16.8h, v20.8h, v16.8h\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "fadd v8.8h, v8.8h, v19.8h\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "fadd v7.8h, v7.8h, v18.8h\n"
- "fadd v6.8h, v6.8h, v17.8h\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "fadd v5.8h, v5.8h, v16.8h\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "fadd v9.8h, v9.8h, v19.8h\n"
+ "fadd v8.8h, v8.8h, v18.8h\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "fadd v7.8h, v7.8h, v17.8h\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "fadd v6.8h, v6.8h, v16.8h\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fadd v23.8h, v4.8h, v3.8h\n"
- "fadd v19.8h, v28.8h, v22.8h\n"
- "fadd v22.8h, v2.8h, v1.8h\n"
- "fadd v18.8h, v27.8h, v21.8h\n"
- "fadd v21.8h, v0.8h, v31.8h\n"
- "fadd v17.8h, v26.8h, v20.8h\n"
- "fadd v20.8h, v30.8h, v29.8h\n"
+ "fadd v23.8h, v5.8h, v4.8h\n"
+ "fadd v19.8h, v3.8h, v2.8h\n"
+ "fadd v22.8h, v1.8h, v0.8h\n"
+ "fadd v18.8h, v31.8h, v30.8h\n"
+ "fadd v21.8h, v29.8h, v21.8h\n"
+ "fadd v17.8h, v28.8h, v27.8h\n"
+ "fadd v20.8h, v26.8h, v20.8h\n"
"fadd v16.8h, v25.8h, v24.8h\n"
"fadd v19.8h, v23.8h, v19.8h\n"
"fadd v18.8h, v22.8h, v18.8h\n"
"fadd v17.8h, v21.8h, v17.8h\n"
"fadd v16.8h, v20.8h, v16.8h\n"
- "fadd v8.8h, v8.8h, v19.8h\n"
- "fadd v7.8h, v7.8h, v18.8h\n"
- "fadd v6.8h, v6.8h, v17.8h\n"
- "fadd v5.8h, v5.8h, v16.8h\n"
+ "fadd v9.8h, v9.8h, v19.8h\n"
+ "fadd v8.8h, v8.8h, v18.8h\n"
+ "fadd v7.8h, v7.8h, v17.8h\n"
+ "fadd v6.8h, v6.8h, v16.8h\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fadd v8.8h, v8.8h, v16.8h\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "fadd v9.8h, v9.8h, v19.8h\n"
+ "fadd v8.8h, v8.8h, v18.8h\n"
"fadd v7.8h, v7.8h, v17.8h\n"
"fadd v6.8h, v6.8h, v16.8h\n"
- "ldr q16, [x20, x23]\n"
- "fadd v5.8h, v5.8h, v16.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x20\n"
+ "fmul v9.8h, v9.8h, v10.8h\n"
+ "fmul v8.8h, v8.8h, v10.8h\n"
"cmp %x[n_channels], #0x20\n"
- "fmul v8.8h, v8.8h, v9.8h\n"
- "fmul v7.8h, v7.8h, v9.8h\n"
- "fmul v6.8h, v6.8h, v9.8h\n"
- "fmul v5.8h, v5.8h, v9.8h\n"
- "str q8, [%x[outptr], x27]\n"
+ "fmul v7.8h, v7.8h, v10.8h\n"
+ "fmul v6.8h, v6.8h, v10.8h\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
+ "str q8, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q7, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q7, [%x[outptr], x26]\n"
+ "str q6, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "str q6, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q5, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 31f\n"
"7:" // Single vector of channels
@@ -170,178 +170,178 @@ void a64_fp16_nhwc_avg_generic_depthfirst_impl(
"blt 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
+ "movi v9.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fadd v17.8h, v4.8h, v3.8h\n"
- "fadd v16.8h, v28.8h, v22.8h\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fadd v16.8h, v17.8h, v16.8h\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "fadd v17.8h, v5.8h, v4.8h\n"
+ "fadd v16.8h, v3.8h, v2.8h\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "fadd v8.8h, v8.8h, v16.8h\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fadd v16.8h, v17.8h, v16.8h\n"
+ "fadd v9.8h, v9.8h, v16.8h\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fadd v17.8h, v4.8h, v3.8h\n"
- "fadd v16.8h, v28.8h, v22.8h\n"
+ "fadd v17.8h, v5.8h, v4.8h\n"
+ "fadd v16.8h, v3.8h, v2.8h\n"
"fadd v16.8h, v17.8h, v16.8h\n"
- "fadd v8.8h, v8.8h, v16.8h\n"
+ "fadd v9.8h, v9.8h, v16.8h\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fadd v8.8h, v8.8h, v16.8h\n"
+ "ldr q16, [x20, x9]\n"
+ "fadd v9.8h, v9.8h, v16.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x8\n"
+ "fmul v9.8h, v9.8h, v10.8h\n"
"cmp %x[n_channels], #0x8\n"
- "fmul v8.8h, v8.8h, v9.8h\n"
- "str q8, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 31f\n"
"14:" // Oddments
"lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x27\n"
- "movi v8.16b, #0x0\n"
+ "add %x[outptr], %x[outptr], x9\n"
+ "movi v9.16b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 20f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #2, 17f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
- "ld1 { v3.h }[6], [x22], #0x2\n"
- "ld1 { v28.h }[6], [x21], #0x2\n"
- "ld1 { v22.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
+ "ld1 { v3.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x20], #0x2\n"
"b 19f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
- "ld1 { v3.h }[4], [x22], #0x2\n"
- "ld1 { v28.h }[4], [x21], #0x2\n"
- "ld1 { v22.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
+ "ld1 { v3.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x20], #0x2\n"
"b 19f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"b 19f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h28, [x21], #0x2\n"
- "ldr h22, [x20], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h2, [x20], #0x2\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 2: End
- "fadd v17.8h, v4.8h, v3.8h\n"
- "fadd v16.8h, v28.8h, v22.8h\n"
+ "fadd v17.8h, v5.8h, v4.8h\n"
+ "fadd v16.8h, v3.8h, v2.8h\n"
"subs x25, x25, #0x1\n"
"fadd v16.8h, v17.8h, v16.8h\n"
- "fadd v8.8h, v8.8h, v16.8h\n"
+ "fadd v9.8h, v9.8h, v16.8h\n"
"bgt 15b\n"
"20:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 26f\n"
"21:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #2, 23f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
"b 25f\n"
"22:" // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
"b 25f\n"
"23:" // Oddments: Single input loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 24f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
"b 25f\n"
"24:" // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
"25:" // Oddments: Single input loop: Load: Bit 2: End
"subs x21, x21, #0x1\n"
- "fadd v8.8h, v8.8h, v4.8h\n"
+ "fadd v9.8h, v9.8h, v5.8h\n"
"bgt 21b\n"
"26:" // Oddments: Single input loop: End
- "fmul v8.8h, v8.8h, v9.8h\n"
+ "fmul v9.8h, v9.8h, v10.8h\n"
"tbz %x[n_channels], #2, 28f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v9.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #1, 27f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[6], [%x[outptr]], #0x2\n"
"b 30f\n"
"27:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[4], [%x[outptr]], #0x2\n"
"b 30f\n"
"28:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 29f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[2], [%x[outptr]], #0x2\n"
"b 30f\n"
"29:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[0], [%x[outptr]], #0x2\n"
"30:" // Oddments: Store: Bit 2: End
"31:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 4b073b9076..83293fb4f5 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,11 +65,11 @@ void a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
__asm__ __volatile__(
"ldr x16, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x8\n"
"mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
+ "cmp x16, #0x8\n"
+ "ldp x13, x12, [x21, #0x0]\n"
"ldp x11, x10, [x21, #0x10]\n"
"ldp x9, x28, [x20, #0x0]\n"
"ldp x27, x26, [x20, #0x10]\n"
@@ -80,14 +80,14 @@ void a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q30, [x28, x15]\n"
"ldr q29, [x25, x15]\n"
"lsr x20, x16, #0x3\n"
- "sub x16, x16, x20, LSL #3\n"
"ldr q28, [x22, x15]\n"
"ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
"ldr q26, [x9, x15]\n"
"ldr q25, [x27, x15]\n"
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
+ "sub x16, x16, x20, LSL #3\n"
+ "subs x20, x20, #0x1\n"
"ldr q22, [x21, x15]\n"
"add x15, x15, #0x10\n"
"beq 2f\n"
@@ -107,62 +107,62 @@ void a64_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
"subs x20, x20, #0x1\n"
- "fmax v19.8h, v21.8h, v19.8h\n"
"ldr q22, [x21, x15]\n"
+ "fmax v19.8h, v21.8h, v19.8h\n"
"fmax v18.8h, v18.8h, v21.8h\n"
- "fmax v17.8h, v17.8h, v20.8h\n"
"add x15, x15, #0x10\n"
+ "fmax v17.8h, v17.8h, v20.8h\n"
"fmax v16.8h, v20.8h, v16.8h\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"fmax v21.8h, v30.8h, v29.8h\n"
"fmax v20.8h, v29.8h, v28.8h\n"
- "fmax v16.8h, v27.8h, v26.8h\n"
+ "fmax v19.8h, v27.8h, v26.8h\n"
"fmax v18.8h, v25.8h, v24.8h\n"
"fmax v17.8h, v27.8h, v23.8h\n"
- "fmax v19.8h, v24.8h, v22.8h\n"
- "fmax v16.8h, v21.8h, v16.8h\n"
+ "fmax v16.8h, v24.8h, v22.8h\n"
+ "fmax v19.8h, v21.8h, v19.8h\n"
"fmax v18.8h, v18.8h, v21.8h\n"
- "str q16, [x14, x12]\n"
"fmax v17.8h, v17.8h, v20.8h\n"
- "fmax v16.8h, v20.8h, v19.8h\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "fmax v16.8h, v20.8h, v16.8h\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"cbz x16, 4f\n"
"3:" // Oddments
"ldr h16, [x28, x15]\n"
- "ldr h17, [x25, x15]\n"
- "fmax v23.8h, v16.8h, v17.8h\n"
+ "ldr h24, [x25, x15]\n"
"subs x16, x16, #0x1\n"
- "ldr h16, [x22, x15]\n"
- "ldr h22, [x26, x15]\n"
- "fmax v21.8h, v17.8h, v16.8h\n"
- "ldr h16, [x9, x15]\n"
- "ldr h17, [x27, x15]\n"
- "fmax v16.8h, v22.8h, v16.8h\n"
- "fmax v20.8h, v23.8h, v16.8h\n"
- "ldr h19, [x24, x15]\n"
- "ldr h16, [x23, x15]\n"
- "fmax v18.8h, v17.8h, v19.8h\n"
- "fmax v17.8h, v22.8h, v16.8h\n"
+ "ldr h20, [x22, x15]\n"
+ "ldr h23, [x26, x15]\n"
+ "ldr h19, [x9, x15]\n"
+ "ldr h18, [x27, x15]\n"
+ "ldr h22, [x24, x15]\n"
+ "ldr h17, [x23, x15]\n"
+ "fmax v21.8h, v16.8h, v24.8h\n"
"ldr h16, [x21, x15]\n"
- "fmax v16.8h, v19.8h, v16.8h\n"
+ "fmax v20.8h, v24.8h, v20.8h\n"
"add x15, x15, #0x2\n"
- "fmax v18.8h, v18.8h, v23.8h\n"
- "fmax v17.8h, v17.8h, v21.8h\n"
- "fmax v16.8h, v21.8h, v16.8h\n"
- "str h20, [x14, x12]\n"
- "str h18, [x13, x12]\n"
- "str h17, [x11, x12]\n"
- "str h16, [x10, x12]\n"
- "add x12, x12, #0x2\n"
+ "fmax v19.8h, v23.8h, v19.8h\n"
+ "fmax v18.8h, v18.8h, v22.8h\n"
+ "fmax v17.8h, v23.8h, v17.8h\n"
+ "fmax v16.8h, v22.8h, v16.8h\n"
+ "fmax v19.8h, v21.8h, v19.8h\n"
+ "fmax v18.8h, v18.8h, v21.8h\n"
+ "fmax v17.8h, v17.8h, v20.8h\n"
+ "fmax v16.8h, v20.8h, v16.8h\n"
+ "str h19, [x13, x14]\n"
+ "str h18, [x12, x14]\n"
+ "str h17, [x11, x14]\n"
+ "str h16, [x10, x14]\n"
+ "add x14, x14, #0x2\n"
"bgt 3b\n"
"4:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
index c92e2cdebd..d7bf97db02 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,122 +41,122 @@ void a64_fp16_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x20\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"mov w20, #0xfc00\n"
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "mov x24, %x[inptrs]\n"
+ "dup v9.8h, w20\n"
"dup v8.8h, w20\n"
"dup v7.8h, w20\n"
"dup v6.8h, w20\n"
- "dup v5.8h, w20\n"
- "mov x22, %x[inptrs]\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fmax v23.8h, v4.8h, v3.8h\n"
- "fmax v19.8h, v28.8h, v22.8h\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fmax v22.8h, v2.8h, v1.8h\n"
- "ldr q2, [x21, x26]\n"
- "fmax v18.8h, v27.8h, v21.8h\n"
- "ldr q1, [x20, x26]\n"
- "fmax v21.8h, v0.8h, v31.8h\n"
- "ldr q0, [x21, x24]\n"
- "fmax v17.8h, v26.8h, v20.8h\n"
- "ldr q31, [x20, x24]\n"
- "fmax v20.8h, v30.8h, v29.8h\n"
- "ldr q30, [x21, x23]\n"
+ "fmax v23.8h, v5.8h, v4.8h\n"
+ "fmax v19.8h, v3.8h, v2.8h\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "fmax v22.8h, v1.8h, v0.8h\n"
+ "fmax v18.8h, v31.8h, v30.8h\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "fmax v21.8h, v29.8h, v21.8h\n"
+ "fmax v17.8h, v28.8h, v27.8h\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fmax v20.8h, v26.8h, v20.8h\n"
"fmax v16.8h, v25.8h, v24.8h\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"fmax v19.8h, v23.8h, v19.8h\n"
"fmax v18.8h, v22.8h, v18.8h\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"fmax v17.8h, v21.8h, v17.8h\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"fmax v16.8h, v20.8h, v16.8h\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "fmax v8.8h, v8.8h, v19.8h\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "fmax v7.8h, v7.8h, v18.8h\n"
- "fmax v6.8h, v6.8h, v17.8h\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "fmax v5.8h, v5.8h, v16.8h\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "fmax v9.8h, v9.8h, v19.8h\n"
+ "fmax v8.8h, v8.8h, v18.8h\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "fmax v7.8h, v7.8h, v17.8h\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "fmax v6.8h, v6.8h, v16.8h\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fmax v23.8h, v4.8h, v3.8h\n"
- "fmax v19.8h, v28.8h, v22.8h\n"
- "fmax v22.8h, v2.8h, v1.8h\n"
- "fmax v18.8h, v27.8h, v21.8h\n"
- "fmax v21.8h, v0.8h, v31.8h\n"
- "fmax v17.8h, v26.8h, v20.8h\n"
- "fmax v20.8h, v30.8h, v29.8h\n"
+ "fmax v23.8h, v5.8h, v4.8h\n"
+ "fmax v19.8h, v3.8h, v2.8h\n"
+ "fmax v22.8h, v1.8h, v0.8h\n"
+ "fmax v18.8h, v31.8h, v30.8h\n"
+ "fmax v21.8h, v29.8h, v21.8h\n"
+ "fmax v17.8h, v28.8h, v27.8h\n"
+ "fmax v20.8h, v26.8h, v20.8h\n"
"fmax v16.8h, v25.8h, v24.8h\n"
"fmax v19.8h, v23.8h, v19.8h\n"
"fmax v18.8h, v22.8h, v18.8h\n"
"fmax v17.8h, v21.8h, v17.8h\n"
"fmax v16.8h, v20.8h, v16.8h\n"
- "fmax v8.8h, v8.8h, v19.8h\n"
- "fmax v7.8h, v7.8h, v18.8h\n"
- "fmax v6.8h, v6.8h, v17.8h\n"
- "fmax v5.8h, v5.8h, v16.8h\n"
+ "fmax v9.8h, v9.8h, v19.8h\n"
+ "fmax v8.8h, v8.8h, v18.8h\n"
+ "fmax v7.8h, v7.8h, v17.8h\n"
+ "fmax v6.8h, v6.8h, v16.8h\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fmax v8.8h, v8.8h, v16.8h\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "fmax v9.8h, v9.8h, v19.8h\n"
+ "fmax v8.8h, v8.8h, v18.8h\n"
"fmax v7.8h, v7.8h, v17.8h\n"
"fmax v6.8h, v6.8h, v16.8h\n"
- "ldr q16, [x20, x23]\n"
- "fmax v5.8h, v5.8h, v16.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x20\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
"cmp %x[n_channels], #0x20\n"
- "str q8, [%x[outptr], x27]\n"
- "str q7, [%x[outptr], x26]\n"
+ "str q8, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q7, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
+ "str q6, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "str q6, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q5, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 31f\n"
"7:" // Single vector of channels
@@ -165,177 +165,177 @@ void a64_fp16_nhwc_max_generic_depthfirst_impl(
"8:" // Single vector of channels: Loop
"mov w20, #0xfc00\n"
"lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.8h, w20\n"
- "mov x22, %x[inptrs]\n"
+ "mov x24, %x[inptrs]\n"
+ "dup v9.8h, w20\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fmax v17.8h, v4.8h, v3.8h\n"
- "fmax v16.8h, v28.8h, v22.8h\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fmax v16.8h, v17.8h, v16.8h\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "fmax v17.8h, v5.8h, v4.8h\n"
+ "fmax v16.8h, v3.8h, v2.8h\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "fmax v8.8h, v8.8h, v16.8h\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fmax v16.8h, v17.8h, v16.8h\n"
+ "fmax v9.8h, v9.8h, v16.8h\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fmax v17.8h, v4.8h, v3.8h\n"
- "fmax v16.8h, v28.8h, v22.8h\n"
+ "fmax v17.8h, v5.8h, v4.8h\n"
+ "fmax v16.8h, v3.8h, v2.8h\n"
"fmax v16.8h, v17.8h, v16.8h\n"
- "fmax v8.8h, v8.8h, v16.8h\n"
+ "fmax v9.8h, v9.8h, v16.8h\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fmax v8.8h, v8.8h, v16.8h\n"
+ "ldr q16, [x20, x9]\n"
+ "fmax v9.8h, v9.8h, v16.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x8\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"cmp %x[n_channels], #0x8\n"
- "str q8, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 31f\n"
"14:" // Oddments
"mov w20, #0xfc00\n"
"lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.8h, w20\n"
- "add %x[outptr], %x[outptr], x27\n"
+ "add %x[outptr], %x[outptr], x9\n"
"mov x24, %x[inptrs]\n"
+ "dup v9.8h, w20\n"
"cbz x25, 20f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #2, 17f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
- "ld1 { v3.h }[6], [x22], #0x2\n"
- "ld1 { v28.h }[6], [x21], #0x2\n"
- "ld1 { v22.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
+ "ld1 { v3.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x20], #0x2\n"
"b 19f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
- "ld1 { v3.h }[4], [x22], #0x2\n"
- "ld1 { v28.h }[4], [x21], #0x2\n"
- "ld1 { v22.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
+ "ld1 { v3.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x20], #0x2\n"
"b 19f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"tbz %x[n_channels], #0, 19f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"b 19f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 19f\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h28, [x21], #0x2\n"
- "ldr h22, [x20], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h2, [x20], #0x2\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 2: End
- "fmax v17.8h, v4.8h, v3.8h\n"
- "fmax v16.8h, v28.8h, v22.8h\n"
+ "fmax v17.8h, v5.8h, v4.8h\n"
+ "fmax v16.8h, v3.8h, v2.8h\n"
"subs x25, x25, #0x1\n"
"fmax v16.8h, v17.8h, v16.8h\n"
- "fmax v8.8h, v8.8h, v16.8h\n"
+ "fmax v9.8h, v9.8h, v16.8h\n"
"bgt 15b\n"
"20:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 26f\n"
"21:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #2, 23f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #1, 22f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
"b 25f\n"
"22:" // Oddments: Single input loop: Load: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
"b 25f\n"
"23:" // Oddments: Single input loop: Load: Bit 2: Unset
"tbz %x[n_channels], #1, 24f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"tbz %x[n_channels], #0, 25f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
"b 25f\n"
"24:" // Oddments: Single input loop: Load: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 25f\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
"25:" // Oddments: Single input loop: Load: Bit 2: End
"subs x21, x21, #0x1\n"
- "fmax v8.8h, v8.8h, v4.8h\n"
+ "fmax v9.8h, v9.8h, v5.8h\n"
"bgt 21b\n"
"26:" // Oddments: Single input loop: End
"tbz %x[n_channels], #2, 28f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v9.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #1, 27f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[6], [%x[outptr]], #0x2\n"
"b 30f\n"
"27:" // Oddments: Store: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[4], [%x[outptr]], #0x2\n"
"b 30f\n"
"28:" // Oddments: Store: Bit 2: Unset
"tbz %x[n_channels], #1, 29f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[2], [%x[outptr]], #0x2\n"
"b 30f\n"
"29:" // Oddments: Store: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 30f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[0], [%x[outptr]], #0x2\n"
"30:" // Oddments: Store: Bit 2: End
"31:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index cf0047638e..86095a6f2c 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,13 +82,13 @@ void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
pad_left, pad_top, pad_right, pad_bottom);
__asm__ __volatile__(
- "ldr q7, [%x[args], %[offsetof_rescale]]\n"
+ "ldr q8, [%x[args], %[offsetof_rescale]]\n"
"ldr x3, [%x[args], %[offsetof_n_channels]]\n"
- "cmp x3, #0x4\n"
"mov x4, #0x0\n"
+ "mov x5, #0x0\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
"ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "mov x5, #0x0\n"
+ "cmp x3, #0x4\n"
"ldp x6, x7, [x21, #0x0]\n"
"ldp x8, x17, [x21, #0x10]\n"
"ldp x16, x15, [x20, #0x0]\n"
@@ -100,142 +100,142 @@ void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"ldp x24, x23, [x20, #0x60]\n"
"ldp x22, x21, [x20, #0x70]\n"
"blt 3f\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
+ "ldr q7, [x11, x4]\n"
+ "ldr q6, [x10, x4]\n"
"lsr x20, x3, #0x2\n"
+ "ldr q5, [x27, x4]\n"
+ "ldr q4, [x26, x4]\n"
+ "ldr q3, [x15, x4]\n"
+ "ldr q2, [x14, x4]\n"
+ "ldr q1, [x12, x4]\n"
+ "ldr q0, [x28, x4]\n"
"sub x3, x3, x20, LSL #2\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
"subs x20, x20, #0x1\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
+ "ldr q31, [x9, x4]\n"
+ "ldr q30, [x25, x4]\n"
+ "ldr q29, [x23, x4]\n"
+ "ldr q28, [x22, x4]\n"
+ "ldr q27, [x16, x4]\n"
+ "ldr q26, [x13, x4]\n"
+ "ldr q25, [x24, x4]\n"
+ "ldr q24, [x21, x4]\n"
"add x4, x4, #0x10\n"
"beq 2f\n"
"1:" // Vector: Loop
- "fadd v17.4s, v6.4s, v5.4s\n"
- "ldr q6, [x11, x4]\n"
- "ldr q5, [x10, x4]\n"
- "fadd v16.4s, v4.4s, v3.4s\n"
- "ldr q4, [x27, x4]\n"
- "ldr q3, [x26, x4]\n"
- "fadd v19.4s, v17.4s, v16.4s\n"
- "fadd v18.4s, v2.4s, v1.4s\n"
- "ldr q2, [x15, x4]\n"
- "ldr q1, [x14, x4]\n"
- "fadd v17.4s, v0.4s, v31.4s\n"
- "fadd v22.4s, v30.4s, v29.4s\n"
- "ldr q0, [x12, x4]\n"
- "ldr q31, [x28, x4]\n"
- "fadd v16.4s, v28.4s, v27.4s\n"
- "fadd v21.4s, v18.4s, v19.4s\n"
- "ldr q30, [x9, x4]\n"
- "ldr q29, [x25, x4]\n"
- "fadd v20.4s, v16.4s, v19.4s\n"
- "fadd v19.4s, v26.4s, v17.4s\n"
- "ldr q28, [x23, x4]\n"
- "ldr q27, [x22, x4]\n"
- "fadd v18.4s, v25.4s, v22.4s\n"
- "fadd v17.4s, v24.4s, v17.4s\n"
- "ldr q26, [x16, x4]\n"
- "ldr q25, [x13, x4]\n"
- "fadd v16.4s, v23.4s, v22.4s\n"
- "fadd v19.4s, v21.4s, v19.4s\n"
- "ldr q24, [x24, x4]\n"
- "ldr q23, [x21, x4]\n"
- "fadd v18.4s, v21.4s, v18.4s\n"
- "fadd v17.4s, v17.4s, v20.4s\n"
- "fadd v16.4s, v16.4s, v20.4s\n"
+ "fadd v19.4s, v7.4s, v6.4s\n"
+ "ldr q7, [x11, x4]\n"
+ "ldr q6, [x10, x4]\n"
+ "fadd v16.4s, v5.4s, v4.4s\n"
+ "ldr q5, [x27, x4]\n"
+ "ldr q4, [x26, x4]\n"
+ "fadd v23.4s, v3.4s, v2.4s\n"
+ "fadd v18.4s, v1.4s, v0.4s\n"
+ "ldr q3, [x15, x4]\n"
+ "ldr q2, [x14, x4]\n"
+ "fadd v17.4s, v31.4s, v30.4s\n"
+ "fadd v22.4s, v29.4s, v28.4s\n"
+ "ldr q1, [x12, x4]\n"
+ "ldr q0, [x28, x4]\n"
+ "fadd v16.4s, v19.4s, v16.4s\n"
"subs x20, x20, #0x1\n"
- "fmul v19.4s, v19.4s, v7.s[0]\n"
+ "ldr q31, [x9, x4]\n"
+ "ldr q30, [x25, x4]\n"
+ "fadd v19.4s, v27.4s, v18.4s\n"
+ "fadd v21.4s, v25.4s, v18.4s\n"
+ "ldr q29, [x23, x4]\n"
+ "ldr q28, [x22, x4]\n"
+ "fadd v18.4s, v26.4s, v17.4s\n"
+ "fadd v20.4s, v24.4s, v17.4s\n"
+ "ldr q27, [x16, x4]\n"
+ "ldr q26, [x13, x4]\n"
+ "fadd v17.4s, v23.4s, v16.4s\n"
+ "fadd v16.4s, v22.4s, v16.4s\n"
+ "ldr q25, [x24, x4]\n"
+ "ldr q24, [x21, x4]\n"
"add x4, x4, #0x10\n"
- "fmul v18.4s, v18.4s, v7.s[1]\n"
- "fmul v17.4s, v17.4s, v7.s[2]\n"
+ "fadd v19.4s, v17.4s, v19.4s\n"
+ "fadd v18.4s, v17.4s, v18.4s\n"
+ "fadd v17.4s, v21.4s, v16.4s\n"
+ "fadd v16.4s, v20.4s, v16.4s\n"
+ "fmul v19.4s, v19.4s, v8.s[0]\n"
+ "fmul v18.4s, v18.4s, v8.s[1]\n"
+ "fmul v17.4s, v17.4s, v8.s[2]\n"
+ "fmul v16.4s, v16.4s, v8.s[3]\n"
"str q19, [x6, x5]\n"
- "fmul v16.4s, v16.4s, v7.s[3]\n"
"str q18, [x7, x5]\n"
"str q17, [x8, x5]\n"
"str q16, [x17, x5]\n"
"add x5, x5, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
- "fadd v17.4s, v6.4s, v5.4s\n"
- "fadd v16.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v17.4s, v16.4s\n"
- "fadd v18.4s, v2.4s, v1.4s\n"
- "fadd v17.4s, v0.4s, v31.4s\n"
- "fadd v22.4s, v30.4s, v29.4s\n"
- "fadd v16.4s, v28.4s, v27.4s\n"
- "fadd v21.4s, v18.4s, v19.4s\n"
- "fadd v20.4s, v16.4s, v19.4s\n"
- "fadd v19.4s, v26.4s, v17.4s\n"
- "fadd v18.4s, v25.4s, v22.4s\n"
- "fadd v17.4s, v24.4s, v17.4s\n"
- "fadd v16.4s, v23.4s, v22.4s\n"
- "fadd v19.4s, v21.4s, v19.4s\n"
- "fadd v18.4s, v21.4s, v18.4s\n"
- "fadd v17.4s, v17.4s, v20.4s\n"
- "fadd v16.4s, v16.4s, v20.4s\n"
- "fmul v19.4s, v19.4s, v7.s[0]\n"
+ "fadd v19.4s, v7.4s, v6.4s\n"
+ "fadd v16.4s, v5.4s, v4.4s\n"
+ "fadd v23.4s, v3.4s, v2.4s\n"
+ "fadd v18.4s, v1.4s, v0.4s\n"
+ "fadd v17.4s, v31.4s, v30.4s\n"
+ "fadd v22.4s, v29.4s, v28.4s\n"
+ "fadd v16.4s, v19.4s, v16.4s\n"
+ "fadd v19.4s, v27.4s, v18.4s\n"
+ "fadd v21.4s, v25.4s, v18.4s\n"
+ "fadd v18.4s, v26.4s, v17.4s\n"
+ "fadd v20.4s, v24.4s, v17.4s\n"
+ "fadd v17.4s, v23.4s, v16.4s\n"
+ "fadd v16.4s, v22.4s, v16.4s\n"
+ "fadd v19.4s, v17.4s, v19.4s\n"
+ "fadd v18.4s, v17.4s, v18.4s\n"
+ "fadd v17.4s, v21.4s, v16.4s\n"
+ "fadd v16.4s, v20.4s, v16.4s\n"
+ "fmul v19.4s, v19.4s, v8.s[0]\n"
+ "fmul v18.4s, v18.4s, v8.s[1]\n"
+ "fmul v17.4s, v17.4s, v8.s[2]\n"
+ "fmul v16.4s, v16.4s, v8.s[3]\n"
"str q19, [x6, x5]\n"
- "fmul v18.4s, v18.4s, v7.s[1]\n"
- "fmul v17.4s, v17.4s, v7.s[2]\n"
"str q18, [x7, x5]\n"
- "fmul v16.4s, v16.4s, v7.s[3]\n"
"str q17, [x8, x5]\n"
"str q16, [x17, x5]\n"
"add x5, x5, #0x10\n"
"cbz x3, 4f\n"
"3:" // Oddments
- "ldr s17, [x11, x4]\n"
- "ldr s16, [x10, x4]\n"
- "fadd v18.4s, v17.4s, v16.4s\n"
+ "ldr s22, [x11, x4]\n"
+ "ldr s21, [x10, x4]\n"
"subs x3, x3, #0x1\n"
- "ldr s17, [x27, x4]\n"
+ "ldr s20, [x27, x4]\n"
"ldr s16, [x26, x4]\n"
- "fadd v16.4s, v17.4s, v16.4s\n"
- "fadd v18.4s, v18.4s, v16.4s\n"
- "ldr s17, [x15, x4]\n"
- "ldr s16, [x14, x4]\n"
- "fadd v16.4s, v17.4s, v16.4s\n"
- "fadd v23.4s, v16.4s, v18.4s\n"
- "ldr s17, [x12, x4]\n"
- "ldr s16, [x28, x4]\n"
- "fadd v22.4s, v17.4s, v16.4s\n"
- "ldr s17, [x9, x4]\n"
- "ldr s16, [x25, x4]\n"
- "fadd v21.4s, v17.4s, v16.4s\n"
- "ldr s17, [x23, x4]\n"
+ "ldr s19, [x15, x4]\n"
+ "ldr s18, [x14, x4]\n"
+ "ldr s23, [x12, x4]\n"
+ "ldr s17, [x28, x4]\n"
+ "fadd v22.4s, v22.4s, v21.4s\n"
+ "ldr s27, [x9, x4]\n"
+ "ldr s26, [x25, x4]\n"
+ "fadd v20.4s, v20.4s, v16.4s\n"
+ "ldr s25, [x23, x4]\n"
"ldr s16, [x22, x4]\n"
- "fadd v16.4s, v17.4s, v16.4s\n"
- "fadd v20.4s, v16.4s, v18.4s\n"
- "ldr s17, [x16, x4]\n"
- "ldr s16, [x13, x4]\n"
- "fadd v19.4s, v17.4s, v22.4s\n"
- "fadd v18.4s, v16.4s, v21.4s\n"
+ "fadd v21.4s, v19.4s, v18.4s\n"
+ "ldr s19, [x16, x4]\n"
+ "ldr s18, [x13, x4]\n"
+ "fadd v24.4s, v23.4s, v17.4s\n"
"ldr s17, [x24, x4]\n"
- "ldr s16, [x21, x4]\n"
- "fadd v17.4s, v17.4s, v22.4s\n"
- "fadd v16.4s, v16.4s, v21.4s\n"
- "fadd v19.4s, v23.4s, v19.4s\n"
- "fadd v18.4s, v23.4s, v18.4s\n"
+ "ldr s23, [x21, x4]\n"
+ "fadd v22.4s, v22.4s, v20.4s\n"
+ "fadd v20.4s, v27.4s, v26.4s\n"
+ "fadd v16.4s, v25.4s, v16.4s\n"
"add x4, x4, #0x4\n"
- "fadd v17.4s, v17.4s, v20.4s\n"
- "fadd v16.4s, v16.4s, v20.4s\n"
- "fmul v19.4s, v19.4s, v7.s[0]\n"
- "fmul v18.4s, v18.4s, v7.s[1]\n"
+ "fadd v19.4s, v19.4s, v24.4s\n"
+ "fadd v21.4s, v21.4s, v22.4s\n"
+ "fadd v18.4s, v18.4s, v20.4s\n"
+ "fadd v17.4s, v17.4s, v24.4s\n"
+ "fadd v20.4s, v23.4s, v20.4s\n"
+ "fadd v16.4s, v16.4s, v22.4s\n"
+ "fadd v19.4s, v21.4s, v19.4s\n"
+ "fadd v18.4s, v21.4s, v18.4s\n"
+ "fadd v17.4s, v17.4s, v16.4s\n"
+ "fadd v16.4s, v20.4s, v16.4s\n"
+ "fmul v19.4s, v19.4s, v8.s[0]\n"
+ "fmul v18.4s, v18.4s, v8.s[1]\n"
+ "fmul v17.4s, v17.4s, v8.s[2]\n"
+ "fmul v16.4s, v16.4s, v8.s[3]\n"
"str s19, [x6, x5]\n"
- "fmul v17.4s, v17.4s, v7.s[2]\n"
- "fmul v16.4s, v16.4s, v7.s[3]\n"
"str s18, [x7, x5]\n"
"str s17, [x8, x5]\n"
"str s16, [x17, x5]\n"
@@ -244,7 +244,7 @@ void a64_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"4:" // End
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index d236f07b1c..71450f56e2 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,127 +42,127 @@ void a64_fp32_nhwc_avg_generic_depthfirst_impl(
const auto rescale_value = static_cast<float>(1.0f / static_cast<float>(window_cells));
__asm__ __volatile__(
- "ld1r { v9.4s }, [%x[rescale_ptr]]\n"
+ "ld1r { v10.4s }, [%x[rescale_ptr]]\n"
"cmp %x[n_channels], #0x10\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "movi v9.16b, #0x0\n"
"movi v8.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"movi v7.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
"movi v6.16b, #0x0\n"
- "movi v5.16b, #0x0\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fadd v23.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v28.4s, v22.4s\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fadd v22.4s, v2.4s, v1.4s\n"
- "ldr q2, [x21, x26]\n"
- "fadd v18.4s, v27.4s, v21.4s\n"
- "ldr q1, [x20, x26]\n"
- "fadd v21.4s, v0.4s, v31.4s\n"
- "ldr q0, [x21, x24]\n"
- "fadd v17.4s, v26.4s, v20.4s\n"
- "ldr q31, [x20, x24]\n"
- "fadd v20.4s, v30.4s, v29.4s\n"
- "ldr q30, [x21, x23]\n"
+ "fadd v23.4s, v5.4s, v4.4s\n"
+ "fadd v19.4s, v3.4s, v2.4s\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "fadd v22.4s, v1.4s, v0.4s\n"
+ "fadd v18.4s, v31.4s, v30.4s\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "fadd v21.4s, v29.4s, v21.4s\n"
+ "fadd v17.4s, v28.4s, v27.4s\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fadd v20.4s, v26.4s, v20.4s\n"
"fadd v16.4s, v25.4s, v24.4s\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"fadd v19.4s, v23.4s, v19.4s\n"
"fadd v18.4s, v22.4s, v18.4s\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"fadd v17.4s, v21.4s, v17.4s\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"fadd v16.4s, v20.4s, v16.4s\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "fadd v8.4s, v8.4s, v19.4s\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "fadd v7.4s, v7.4s, v18.4s\n"
- "fadd v6.4s, v6.4s, v17.4s\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "fadd v5.4s, v5.4s, v16.4s\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "fadd v9.4s, v9.4s, v19.4s\n"
+ "fadd v8.4s, v8.4s, v18.4s\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "fadd v7.4s, v7.4s, v17.4s\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "fadd v6.4s, v6.4s, v16.4s\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fadd v23.4s, v4.4s, v3.4s\n"
- "fadd v19.4s, v28.4s, v22.4s\n"
- "fadd v22.4s, v2.4s, v1.4s\n"
- "fadd v18.4s, v27.4s, v21.4s\n"
- "fadd v21.4s, v0.4s, v31.4s\n"
- "fadd v17.4s, v26.4s, v20.4s\n"
- "fadd v20.4s, v30.4s, v29.4s\n"
+ "fadd v23.4s, v5.4s, v4.4s\n"
+ "fadd v19.4s, v3.4s, v2.4s\n"
+ "fadd v22.4s, v1.4s, v0.4s\n"
+ "fadd v18.4s, v31.4s, v30.4s\n"
+ "fadd v21.4s, v29.4s, v21.4s\n"
+ "fadd v17.4s, v28.4s, v27.4s\n"
+ "fadd v20.4s, v26.4s, v20.4s\n"
"fadd v16.4s, v25.4s, v24.4s\n"
"fadd v19.4s, v23.4s, v19.4s\n"
"fadd v18.4s, v22.4s, v18.4s\n"
"fadd v17.4s, v21.4s, v17.4s\n"
"fadd v16.4s, v20.4s, v16.4s\n"
- "fadd v8.4s, v8.4s, v19.4s\n"
- "fadd v7.4s, v7.4s, v18.4s\n"
- "fadd v6.4s, v6.4s, v17.4s\n"
- "fadd v5.4s, v5.4s, v16.4s\n"
+ "fadd v9.4s, v9.4s, v19.4s\n"
+ "fadd v8.4s, v8.4s, v18.4s\n"
+ "fadd v7.4s, v7.4s, v17.4s\n"
+ "fadd v6.4s, v6.4s, v16.4s\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fadd v8.4s, v8.4s, v16.4s\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "fadd v9.4s, v9.4s, v19.4s\n"
+ "fadd v8.4s, v8.4s, v18.4s\n"
"fadd v7.4s, v7.4s, v17.4s\n"
"fadd v6.4s, v6.4s, v16.4s\n"
- "ldr q16, [x20, x23]\n"
- "fadd v5.4s, v5.4s, v16.4s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
+ "fmul v9.4s, v9.4s, v10.4s\n"
+ "fmul v8.4s, v8.4s, v10.4s\n"
"cmp %x[n_channels], #0x10\n"
- "fmul v8.4s, v8.4s, v9.4s\n"
- "fmul v7.4s, v7.4s, v9.4s\n"
- "fmul v6.4s, v6.4s, v9.4s\n"
- "fmul v5.4s, v5.4s, v9.4s\n"
- "str q8, [%x[outptr], x27]\n"
+ "fmul v7.4s, v7.4s, v10.4s\n"
+ "fmul v6.4s, v6.4s, v10.4s\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
+ "str q8, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q7, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "str q7, [%x[outptr], x26]\n"
+ "str q6, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "str q6, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q5, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 25f\n"
"7:" // Single vector of channels
@@ -170,130 +170,130 @@ void a64_fp32_nhwc_avg_generic_depthfirst_impl(
"blt 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
+ "movi v9.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fadd v17.4s, v4.4s, v3.4s\n"
- "fadd v16.4s, v28.4s, v22.4s\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fadd v16.4s, v17.4s, v16.4s\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "fadd v17.4s, v5.4s, v4.4s\n"
+ "fadd v16.4s, v3.4s, v2.4s\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "fadd v8.4s, v8.4s, v16.4s\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fadd v16.4s, v17.4s, v16.4s\n"
+ "fadd v9.4s, v9.4s, v16.4s\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fadd v17.4s, v4.4s, v3.4s\n"
- "fadd v16.4s, v28.4s, v22.4s\n"
+ "fadd v17.4s, v5.4s, v4.4s\n"
+ "fadd v16.4s, v3.4s, v2.4s\n"
"fadd v16.4s, v17.4s, v16.4s\n"
- "fadd v8.4s, v8.4s, v16.4s\n"
+ "fadd v9.4s, v9.4s, v16.4s\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fadd v8.4s, v8.4s, v16.4s\n"
+ "ldr q16, [x20, x9]\n"
+ "fadd v9.4s, v9.4s, v16.4s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x4\n"
+ "fmul v9.4s, v9.4s, v10.4s\n"
"cmp %x[n_channels], #0x4\n"
- "fmul v8.4s, v8.4s, v9.4s\n"
- "str q8, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 25f\n"
"14:" // Oddments
"lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x27\n"
- "movi v8.16b, #0x0\n"
+ "add %x[outptr], %x[outptr], x9\n"
+ "movi v9.16b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 18f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #1, 16f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"b 17f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 17f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 1: End
- "fadd v17.4s, v4.4s, v3.4s\n"
- "fadd v16.4s, v28.4s, v22.4s\n"
+ "fadd v17.4s, v5.4s, v4.4s\n"
+ "fadd v16.4s, v3.4s, v2.4s\n"
"subs x25, x25, #0x1\n"
"fadd v16.4s, v17.4s, v16.4s\n"
- "fadd v8.4s, v8.4s, v16.4s\n"
+ "fadd v9.4s, v9.4s, v16.4s\n"
"bgt 15b\n"
"18:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 22f\n"
"19:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #1, 20f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"b 21f\n"
"20:" // Oddments: Single input loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 21f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"21:" // Oddments: Single input loop: Load: Bit 1: End
"subs x21, x21, #0x1\n"
- "fadd v8.4s, v8.4s, v4.4s\n"
+ "fadd v9.4s, v9.4s, v5.4s\n"
"bgt 19b\n"
"22:" // Oddments: Single input loop: End
- "fmul v8.4s, v8.4s, v9.4s\n"
+ "fmul v9.4s, v9.4s, v10.4s\n"
"tbz %x[n_channels], #1, 23f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v9.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[2], [%x[outptr]], #0x4\n"
"b 24f\n"
"23:" // Oddments: Store: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[0], [%x[outptr]], #0x4\n"
"24:" // Oddments: Store: Bit 1: End
"25:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index f4202de1ed..9fa8e7c609 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,11 +65,11 @@ void a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
__asm__ __volatile__(
"ldr x16, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x4\n"
"mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
+ "cmp x16, #0x4\n"
+ "ldp x13, x12, [x21, #0x0]\n"
"ldp x11, x10, [x21, #0x10]\n"
"ldp x9, x28, [x20, #0x0]\n"
"ldp x27, x26, [x20, #0x10]\n"
@@ -80,14 +80,14 @@ void a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q30, [x28, x15]\n"
"ldr q29, [x25, x15]\n"
"lsr x20, x16, #0x2\n"
- "sub x16, x16, x20, LSL #2\n"
"ldr q28, [x22, x15]\n"
"ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
"ldr q26, [x9, x15]\n"
"ldr q25, [x27, x15]\n"
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
+ "sub x16, x16, x20, LSL #2\n"
+ "subs x20, x20, #0x1\n"
"ldr q22, [x21, x15]\n"
"add x15, x15, #0x10\n"
"beq 2f\n"
@@ -107,62 +107,62 @@ void a64_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
"subs x20, x20, #0x1\n"
- "fmax v19.4s, v21.4s, v19.4s\n"
"ldr q22, [x21, x15]\n"
+ "fmax v19.4s, v21.4s, v19.4s\n"
"fmax v18.4s, v18.4s, v21.4s\n"
- "fmax v17.4s, v17.4s, v20.4s\n"
"add x15, x15, #0x10\n"
+ "fmax v17.4s, v17.4s, v20.4s\n"
"fmax v16.4s, v20.4s, v16.4s\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"fmax v21.4s, v30.4s, v29.4s\n"
"fmax v20.4s, v29.4s, v28.4s\n"
- "fmax v16.4s, v27.4s, v26.4s\n"
+ "fmax v19.4s, v27.4s, v26.4s\n"
"fmax v18.4s, v25.4s, v24.4s\n"
"fmax v17.4s, v27.4s, v23.4s\n"
- "fmax v19.4s, v24.4s, v22.4s\n"
- "fmax v16.4s, v21.4s, v16.4s\n"
+ "fmax v16.4s, v24.4s, v22.4s\n"
+ "fmax v19.4s, v21.4s, v19.4s\n"
"fmax v18.4s, v18.4s, v21.4s\n"
- "str q16, [x14, x12]\n"
"fmax v17.4s, v17.4s, v20.4s\n"
- "fmax v16.4s, v20.4s, v19.4s\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "fmax v16.4s, v20.4s, v16.4s\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"cbz x16, 4f\n"
"3:" // Oddments
"ldr s16, [x28, x15]\n"
- "ldr s17, [x25, x15]\n"
- "fmax v23.4s, v16.4s, v17.4s\n"
+ "ldr s24, [x25, x15]\n"
"subs x16, x16, #0x1\n"
- "ldr s16, [x22, x15]\n"
- "ldr s22, [x26, x15]\n"
- "fmax v21.4s, v17.4s, v16.4s\n"
- "ldr s16, [x9, x15]\n"
- "ldr s17, [x27, x15]\n"
- "fmax v16.4s, v22.4s, v16.4s\n"
- "fmax v20.4s, v23.4s, v16.4s\n"
- "ldr s19, [x24, x15]\n"
- "ldr s16, [x23, x15]\n"
- "fmax v18.4s, v17.4s, v19.4s\n"
- "fmax v17.4s, v22.4s, v16.4s\n"
+ "ldr s20, [x22, x15]\n"
+ "ldr s23, [x26, x15]\n"
+ "ldr s19, [x9, x15]\n"
+ "ldr s18, [x27, x15]\n"
+ "ldr s22, [x24, x15]\n"
+ "ldr s17, [x23, x15]\n"
+ "fmax v21.4s, v16.4s, v24.4s\n"
"ldr s16, [x21, x15]\n"
- "fmax v16.4s, v19.4s, v16.4s\n"
+ "fmax v20.4s, v24.4s, v20.4s\n"
"add x15, x15, #0x4\n"
- "fmax v18.4s, v18.4s, v23.4s\n"
- "fmax v17.4s, v17.4s, v21.4s\n"
- "fmax v16.4s, v21.4s, v16.4s\n"
- "str s20, [x14, x12]\n"
- "str s18, [x13, x12]\n"
- "str s17, [x11, x12]\n"
- "str s16, [x10, x12]\n"
- "add x12, x12, #0x4\n"
+ "fmax v19.4s, v23.4s, v19.4s\n"
+ "fmax v18.4s, v18.4s, v22.4s\n"
+ "fmax v17.4s, v23.4s, v17.4s\n"
+ "fmax v16.4s, v22.4s, v16.4s\n"
+ "fmax v19.4s, v21.4s, v19.4s\n"
+ "fmax v18.4s, v18.4s, v21.4s\n"
+ "fmax v17.4s, v17.4s, v20.4s\n"
+ "fmax v16.4s, v20.4s, v16.4s\n"
+ "str s19, [x13, x14]\n"
+ "str s18, [x12, x14]\n"
+ "str s17, [x11, x14]\n"
+ "str s16, [x10, x14]\n"
+ "add x14, x14, #0x4\n"
"bgt 3b\n"
"4:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
index f4706635dc..317966d53a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,122 +41,122 @@ void a64_fp32_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x10\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"mov w20, #0xff800000\n"
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "mov x24, %x[inptrs]\n"
+ "dup v9.4s, w20\n"
"dup v8.4s, w20\n"
"dup v7.4s, w20\n"
"dup v6.4s, w20\n"
- "dup v5.4s, w20\n"
- "mov x22, %x[inptrs]\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fmax v23.4s, v4.4s, v3.4s\n"
- "fmax v19.4s, v28.4s, v22.4s\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fmax v22.4s, v2.4s, v1.4s\n"
- "ldr q2, [x21, x26]\n"
- "fmax v18.4s, v27.4s, v21.4s\n"
- "ldr q1, [x20, x26]\n"
- "fmax v21.4s, v0.4s, v31.4s\n"
- "ldr q0, [x21, x24]\n"
- "fmax v17.4s, v26.4s, v20.4s\n"
- "ldr q31, [x20, x24]\n"
- "fmax v20.4s, v30.4s, v29.4s\n"
- "ldr q30, [x21, x23]\n"
+ "fmax v23.4s, v5.4s, v4.4s\n"
+ "fmax v19.4s, v3.4s, v2.4s\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "fmax v22.4s, v1.4s, v0.4s\n"
+ "fmax v18.4s, v31.4s, v30.4s\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "fmax v21.4s, v29.4s, v21.4s\n"
+ "fmax v17.4s, v28.4s, v27.4s\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fmax v20.4s, v26.4s, v20.4s\n"
"fmax v16.4s, v25.4s, v24.4s\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"fmax v19.4s, v23.4s, v19.4s\n"
"fmax v18.4s, v22.4s, v18.4s\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"fmax v17.4s, v21.4s, v17.4s\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"fmax v16.4s, v20.4s, v16.4s\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "fmax v8.4s, v8.4s, v19.4s\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "fmax v7.4s, v7.4s, v18.4s\n"
- "fmax v6.4s, v6.4s, v17.4s\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "fmax v5.4s, v5.4s, v16.4s\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "fmax v9.4s, v9.4s, v19.4s\n"
+ "fmax v8.4s, v8.4s, v18.4s\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "fmax v7.4s, v7.4s, v17.4s\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "fmax v6.4s, v6.4s, v16.4s\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fmax v23.4s, v4.4s, v3.4s\n"
- "fmax v19.4s, v28.4s, v22.4s\n"
- "fmax v22.4s, v2.4s, v1.4s\n"
- "fmax v18.4s, v27.4s, v21.4s\n"
- "fmax v21.4s, v0.4s, v31.4s\n"
- "fmax v17.4s, v26.4s, v20.4s\n"
- "fmax v20.4s, v30.4s, v29.4s\n"
+ "fmax v23.4s, v5.4s, v4.4s\n"
+ "fmax v19.4s, v3.4s, v2.4s\n"
+ "fmax v22.4s, v1.4s, v0.4s\n"
+ "fmax v18.4s, v31.4s, v30.4s\n"
+ "fmax v21.4s, v29.4s, v21.4s\n"
+ "fmax v17.4s, v28.4s, v27.4s\n"
+ "fmax v20.4s, v26.4s, v20.4s\n"
"fmax v16.4s, v25.4s, v24.4s\n"
"fmax v19.4s, v23.4s, v19.4s\n"
"fmax v18.4s, v22.4s, v18.4s\n"
"fmax v17.4s, v21.4s, v17.4s\n"
"fmax v16.4s, v20.4s, v16.4s\n"
- "fmax v8.4s, v8.4s, v19.4s\n"
- "fmax v7.4s, v7.4s, v18.4s\n"
- "fmax v6.4s, v6.4s, v17.4s\n"
- "fmax v5.4s, v5.4s, v16.4s\n"
+ "fmax v9.4s, v9.4s, v19.4s\n"
+ "fmax v8.4s, v8.4s, v18.4s\n"
+ "fmax v7.4s, v7.4s, v17.4s\n"
+ "fmax v6.4s, v6.4s, v16.4s\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fmax v8.4s, v8.4s, v16.4s\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "fmax v9.4s, v9.4s, v19.4s\n"
+ "fmax v8.4s, v8.4s, v18.4s\n"
"fmax v7.4s, v7.4s, v17.4s\n"
"fmax v6.4s, v6.4s, v16.4s\n"
- "ldr q16, [x20, x23]\n"
- "fmax v5.4s, v5.4s, v16.4s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
"cmp %x[n_channels], #0x10\n"
- "str q8, [%x[outptr], x27]\n"
- "str q7, [%x[outptr], x26]\n"
+ "str q8, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q7, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
+ "str q6, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "str q6, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q5, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 25f\n"
"7:" // Single vector of channels
@@ -165,129 +165,129 @@ void a64_fp32_nhwc_max_generic_depthfirst_impl(
"8:" // Single vector of channels: Loop
"mov w20, #0xff800000\n"
"lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.4s, w20\n"
- "mov x22, %x[inptrs]\n"
+ "mov x24, %x[inptrs]\n"
+ "dup v9.4s, w20\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fmax v17.4s, v4.4s, v3.4s\n"
- "fmax v16.4s, v28.4s, v22.4s\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "fmax v16.4s, v17.4s, v16.4s\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "fmax v17.4s, v5.4s, v4.4s\n"
+ "fmax v16.4s, v3.4s, v2.4s\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "fmax v8.4s, v8.4s, v16.4s\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "fmax v16.4s, v17.4s, v16.4s\n"
+ "fmax v9.4s, v9.4s, v16.4s\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fmax v17.4s, v4.4s, v3.4s\n"
- "fmax v16.4s, v28.4s, v22.4s\n"
+ "fmax v17.4s, v5.4s, v4.4s\n"
+ "fmax v16.4s, v3.4s, v2.4s\n"
"fmax v16.4s, v17.4s, v16.4s\n"
- "fmax v8.4s, v8.4s, v16.4s\n"
+ "fmax v9.4s, v9.4s, v16.4s\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "fmax v8.4s, v8.4s, v16.4s\n"
+ "ldr q16, [x20, x9]\n"
+ "fmax v9.4s, v9.4s, v16.4s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x4\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"cmp %x[n_channels], #0x4\n"
- "str q8, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 25f\n"
"14:" // Oddments
"mov w20, #0xff800000\n"
"lsr x25, %x[n_valid_cells], #0x2\n"
- "dup v8.4s, w20\n"
- "add %x[outptr], %x[outptr], x27\n"
+ "add %x[outptr], %x[outptr], x9\n"
"mov x24, %x[inptrs]\n"
+ "dup v9.4s, w20\n"
"cbz x25, 18f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #1, 16f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #0, 17f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"b 17f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 17f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 1: End
- "fmax v17.4s, v4.4s, v3.4s\n"
- "fmax v16.4s, v28.4s, v22.4s\n"
+ "fmax v17.4s, v5.4s, v4.4s\n"
+ "fmax v16.4s, v3.4s, v2.4s\n"
"subs x25, x25, #0x1\n"
"fmax v16.4s, v17.4s, v16.4s\n"
- "fmax v8.4s, v8.4s, v16.4s\n"
+ "fmax v9.4s, v9.4s, v16.4s\n"
"bgt 15b\n"
"18:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 22f\n"
"19:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #1, 20f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #0, 21f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"b 21f\n"
"20:" // Oddments: Single input loop: Load: Bit 1: Unset
"tbz %x[n_channels], #0, 21f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"21:" // Oddments: Single input loop: Load: Bit 1: End
"subs x21, x21, #0x1\n"
- "fmax v8.4s, v8.4s, v4.4s\n"
+ "fmax v9.4s, v9.4s, v5.4s\n"
"bgt 19b\n"
"22:" // Oddments: Single input loop: End
"tbz %x[n_channels], #1, 23f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v9.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[2], [%x[outptr]], #0x4\n"
"b 24f\n"
"23:" // Oddments: Store: Bit 1: Unset
"tbz %x[n_channels], #0, 24f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[0], [%x[outptr]], #0x4\n"
"24:" // Oddments: Store: Bit 1: End
"25:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
index 5d082102b3..63796ab4a4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -122,9 +122,9 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"movi v0.4s, #0x0\n"
"cbz x23, 4f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"ldr q29, [x21, x26]\n"
"ldr q28, [x20, x26]\n"
@@ -137,26 +137,26 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
- "ldr q30, [x20, x27]\n"
+ "subs x23, x23, #0x1\n"
"saddl v21.8h, v29.8b, v28.8b\n"
"saddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x21, x26]\n"
- "ldr q28, [x20, x26]\n"
+ "add x22, x22, #0x10\n"
"saddl v19.8h, v27.8b, v26.8b\n"
"saddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x21, x25]\n"
- "ldr q26, [x20, x25]\n"
+ "ldr q31, [x21, x27]\n"
+ "ldr q30, [x20, x27]\n"
+ "ldr q29, [x21, x26]\n"
+ "ldr q28, [x20, x26]\n"
"saddl v17.8h, v25.8b, v24.8b\n"
"saddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x21, x24]\n"
- "ldr q24, [x20, x24]\n"
- "subs x23, x23, #0x1\n"
+ "ldr q27, [x21, x25]\n"
+ "ldr q26, [x20, x25]\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q25, [x21, x24]\n"
+ "ldr q24, [x20, x24]\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
- "add x22, x22, #0x10\n"
"saddw v11.4s, v11.4s, v21.4h\n"
"saddw2 v10.4s, v10.4s, v21.8h\n"
"saddw v9.4s, v9.4s, v20.4h\n"
@@ -200,17 +200,17 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
- "sxtl v23.8h, v16.8b\n"
- "sxtl2 v22.8h, v16.16b\n"
- "ldr q16, [x20, x26]\n"
+ "subs x23, x23, #0x1\n"
+ "ldr q19, [x20, x27]\n"
+ "ldr q18, [x20, x26]\n"
"ldr q17, [x20, x25]\n"
- "sxtl v21.8h, v16.8b\n"
- "sxtl2 v20.8h, v16.16b\n"
"ldr q16, [x20, x24]\n"
+ "sxtl v23.8h, v19.8b\n"
+ "sxtl2 v22.8h, v19.16b\n"
+ "sxtl v21.8h, v18.8b\n"
+ "sxtl2 v20.8h, v18.16b\n"
"sxtl v19.8h, v17.8b\n"
"sxtl2 v18.8h, v17.16b\n"
- "subs x23, x23, #0x1\n"
"sxtl v17.8h, v16.8b\n"
"sxtl2 v16.8h, v16.16b\n"
"saddw v15.4s, v15.4s, v23.4h\n"
@@ -231,44 +231,44 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"saddw2 v0.4s, v0.4s, v16.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
- "ld1r { v16.4s }, [%x[shift_ptr]]\n"
- "sqdmulh v15.4s, v15.4s, v17.4s\n"
- "sqdmulh v14.4s, v14.4s, v17.4s\n"
- "sqdmulh v13.4s, v13.4s, v17.4s\n"
- "sqdmulh v12.4s, v12.4s, v17.4s\n"
+ "ld1r { v19.4s }, [%x[rescale_ptr]]\n"
+ "ld1r { v18.4s }, [%x[shift_ptr]]\n"
+ "movi v17.4s, #0x7f\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
"cmp %x[n_channels], #0x40\n"
- "sqdmulh v11.4s, v11.4s, v17.4s\n"
- "sqdmulh v10.4s, v10.4s, v17.4s\n"
- "sqdmulh v9.4s, v9.4s, v17.4s\n"
- "sqdmulh v8.4s, v8.4s, v17.4s\n"
- "sqdmulh v7.4s, v7.4s, v17.4s\n"
- "sqdmulh v6.4s, v6.4s, v17.4s\n"
- "sqdmulh v5.4s, v5.4s, v17.4s\n"
- "sqdmulh v4.4s, v4.4s, v17.4s\n"
- "sqdmulh v3.4s, v3.4s, v17.4s\n"
- "sqdmulh v2.4s, v2.4s, v17.4s\n"
- "sqdmulh v1.4s, v1.4s, v17.4s\n"
- "sqdmulh v0.4s, v0.4s, v17.4s\n"
- "movi v17.4s, #0x7f\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
- "srshl v11.4s, v11.4s, v16.4s\n"
- "srshl v10.4s, v10.4s, v16.4s\n"
- "srshl v9.4s, v9.4s, v16.4s\n"
- "srshl v8.4s, v8.4s, v16.4s\n"
- "srshl v7.4s, v7.4s, v16.4s\n"
- "srshl v6.4s, v6.4s, v16.4s\n"
- "srshl v5.4s, v5.4s, v16.4s\n"
- "srshl v4.4s, v4.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v16.4s\n"
- "srshl v2.4s, v2.4s, v16.4s\n"
- "srshl v1.4s, v1.4s, v16.4s\n"
- "srshl v0.4s, v0.4s, v16.4s\n"
"not v16.16b, v17.16b\n"
+ "sqdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqdmulh v12.4s, v12.4s, v19.4s\n"
+ "sqdmulh v11.4s, v11.4s, v19.4s\n"
+ "sqdmulh v10.4s, v10.4s, v19.4s\n"
+ "sqdmulh v9.4s, v9.4s, v19.4s\n"
+ "sqdmulh v8.4s, v8.4s, v19.4s\n"
+ "sqdmulh v7.4s, v7.4s, v19.4s\n"
+ "sqdmulh v6.4s, v6.4s, v19.4s\n"
+ "sqdmulh v5.4s, v5.4s, v19.4s\n"
+ "sqdmulh v4.4s, v4.4s, v19.4s\n"
+ "sqdmulh v3.4s, v3.4s, v19.4s\n"
+ "sqdmulh v2.4s, v2.4s, v19.4s\n"
+ "sqdmulh v1.4s, v1.4s, v19.4s\n"
+ "sqdmulh v0.4s, v0.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "srshl v11.4s, v11.4s, v18.4s\n"
+ "srshl v10.4s, v10.4s, v18.4s\n"
+ "srshl v9.4s, v9.4s, v18.4s\n"
+ "srshl v8.4s, v8.4s, v18.4s\n"
+ "srshl v7.4s, v7.4s, v18.4s\n"
+ "srshl v6.4s, v6.4s, v18.4s\n"
+ "srshl v5.4s, v5.4s, v18.4s\n"
+ "srshl v4.4s, v4.4s, v18.4s\n"
+ "srshl v3.4s, v3.4s, v18.4s\n"
+ "srshl v2.4s, v2.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v18.4s\n"
+ "srshl v0.4s, v0.4s, v18.4s\n"
"smax v15.4s, v15.4s, v16.4s\n"
"smax v14.4s, v14.4s, v16.4s\n"
"smax v13.4s, v13.4s, v16.4s\n"
@@ -302,19 +302,19 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"smin v1.4s, v1.4s, v17.4s\n"
"smin v0.4s, v0.4s, v17.4s\n"
"uzp1 v23.16b, v15.16b, v14.16b\n"
- "uzp1 v16.16b, v13.16b, v12.16b\n"
+ "uzp1 v19.16b, v13.16b, v12.16b\n"
"uzp1 v22.16b, v11.16b, v10.16b\n"
"uzp1 v18.16b, v9.16b, v8.16b\n"
"uzp1 v21.16b, v7.16b, v6.16b\n"
"uzp1 v17.16b, v5.16b, v4.16b\n"
"uzp1 v20.16b, v3.16b, v2.16b\n"
- "uzp1 v19.16b, v1.16b, v0.16b\n"
- "uzp1 v16.16b, v23.16b, v16.16b\n"
+ "uzp1 v16.16b, v1.16b, v0.16b\n"
+ "uzp1 v19.16b, v23.16b, v19.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
- "uzp1 v16.16b, v20.16b, v19.16b\n"
+ "uzp1 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [%x[outptr], x27]\n"
+ "add x27, x27, #0x40\n"
"str q18, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
"str q17, [%x[outptr], x25]\n"
@@ -335,23 +335,23 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"movi v12.4s, #0x0\n"
"cbz x23, 11f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"saddl v17.8h, v31.8b, v30.8b\n"
"saddl2 v16.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
+ "subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
"ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
- "subs x23, x23, #0x1\n"
"saddw v15.4s, v15.4s, v17.4h\n"
"saddw2 v14.4s, v14.4s, v17.8h\n"
"saddw v13.4s, v13.4s, v16.4h\n"
"saddw2 v12.4s, v12.4s, v16.8h\n"
- "add x22, x22, #0x10\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"saddl v17.8h, v31.8b, v30.8b\n"
@@ -365,30 +365,30 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x23, x23, #0x1\n"
"ldr q16, [x20, x27]\n"
"sxtl v17.8h, v16.8b\n"
"sxtl2 v16.8h, v16.16b\n"
- "subs x23, x23, #0x1\n"
"saddw v15.4s, v15.4s, v17.4h\n"
"saddw2 v14.4s, v14.4s, v17.8h\n"
"saddw v13.4s, v13.4s, v16.4h\n"
"saddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
- "ld1r { v16.4s }, [%x[shift_ptr]]\n"
- "sqdmulh v15.4s, v15.4s, v17.4s\n"
- "sqdmulh v14.4s, v14.4s, v17.4s\n"
- "sqdmulh v13.4s, v13.4s, v17.4s\n"
- "sqdmulh v12.4s, v12.4s, v17.4s\n"
+ "ld1r { v19.4s }, [%x[rescale_ptr]]\n"
+ "ld1r { v18.4s }, [%x[shift_ptr]]\n"
+ "movi v17.4s, #0x7f\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "movi v17.4s, #0x7f\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
"not v16.16b, v17.16b\n"
+ "sqdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqdmulh v12.4s, v12.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
"smax v15.4s, v15.4s, v16.4s\n"
"smax v14.4s, v14.4s, v16.4s\n"
"smax v13.4s, v13.4s, v16.4s\n"
@@ -416,10 +416,10 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"15:" // Oddments: 2 inputs loop
"ldp x21, x20, [x22, #0x0]\n"
"add x22, x22, #0x10\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
- "add x20, x20, x27\n"
"movi v30.16b, #0x0\n"
+ "add x21, x21, x27\n"
+ "add x20, x20, x27\n"
"tbz %x[n_channels], #3, 19f\n"
"ldr d31, [x21], #0x8\n"
"ldr d30, [x20], #0x8\n"
@@ -493,8 +493,8 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x21, [x22], #0x8\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
+ "add x21, x21, x27\n"
"tbz %x[n_channels], #3, 29f\n"
"ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
@@ -549,18 +549,18 @@ void a64_s8_nhwc_avg_generic_depthfirst_impl(
"saddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
- "ld1r { v16.4s }, [%x[shift_ptr]]\n"
- "sqdmulh v15.4s, v15.4s, v17.4s\n"
- "sqdmulh v14.4s, v14.4s, v17.4s\n"
- "sqdmulh v13.4s, v13.4s, v17.4s\n"
- "sqdmulh v12.4s, v12.4s, v17.4s\n"
+ "ld1r { v19.4s }, [%x[rescale_ptr]]\n"
+ "ld1r { v18.4s }, [%x[shift_ptr]]\n"
"movi v17.4s, #0x7f\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
"not v16.16b, v17.16b\n"
+ "sqdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqdmulh v12.4s, v12.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
"smax v15.4s, v15.4s, v16.4s\n"
"smax v14.4s, v14.4s, v16.4s\n"
"smax v13.4s, v13.4s, v16.4s\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 7e62ac1afc..eef399efbc 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,11 +65,11 @@ void a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
__asm__ __volatile__(
"ldr x16, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x10\n"
"mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
+ "cmp x16, #0x10\n"
+ "ldp x13, x12, [x21, #0x0]\n"
"ldp x11, x10, [x21, #0x10]\n"
"ldp x9, x28, [x20, #0x0]\n"
"ldp x27, x26, [x20, #0x10]\n"
@@ -80,14 +80,14 @@ void a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q30, [x28, x15]\n"
"ldr q29, [x25, x15]\n"
"lsr x20, x16, #0x4\n"
- "sub x16, x16, x20, LSL #4\n"
"ldr q28, [x22, x15]\n"
"ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
"ldr q26, [x9, x15]\n"
"ldr q25, [x27, x15]\n"
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
+ "sub x16, x16, x20, LSL #4\n"
+ "subs x20, x20, #0x1\n"
"ldr q22, [x21, x15]\n"
"add x15, x15, #0x10\n"
"beq 2f\n"
@@ -107,62 +107,62 @@ void a64_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
"subs x20, x20, #0x1\n"
- "smax v19.16b, v21.16b, v19.16b\n"
"ldr q22, [x21, x15]\n"
+ "smax v19.16b, v21.16b, v19.16b\n"
"smax v18.16b, v18.16b, v21.16b\n"
- "smax v17.16b, v17.16b, v20.16b\n"
"add x15, x15, #0x10\n"
+ "smax v17.16b, v17.16b, v20.16b\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"smax v21.16b, v30.16b, v29.16b\n"
"smax v20.16b, v29.16b, v28.16b\n"
- "smax v16.16b, v27.16b, v26.16b\n"
+ "smax v19.16b, v27.16b, v26.16b\n"
"smax v18.16b, v25.16b, v24.16b\n"
"smax v17.16b, v27.16b, v23.16b\n"
- "smax v19.16b, v24.16b, v22.16b\n"
- "smax v16.16b, v21.16b, v16.16b\n"
+ "smax v16.16b, v24.16b, v22.16b\n"
+ "smax v19.16b, v21.16b, v19.16b\n"
"smax v18.16b, v18.16b, v21.16b\n"
- "str q16, [x14, x12]\n"
"smax v17.16b, v17.16b, v20.16b\n"
- "smax v16.16b, v20.16b, v19.16b\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "smax v16.16b, v20.16b, v16.16b\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"cbz x16, 4f\n"
"3:" // Oddments
"ldr b16, [x28, x15]\n"
- "ldr b17, [x25, x15]\n"
- "smax v23.16b, v16.16b, v17.16b\n"
+ "ldr b24, [x25, x15]\n"
"subs x16, x16, #0x1\n"
- "ldr b16, [x22, x15]\n"
- "ldr b22, [x26, x15]\n"
- "smax v21.16b, v17.16b, v16.16b\n"
- "ldr b16, [x9, x15]\n"
- "ldr b17, [x27, x15]\n"
- "smax v16.16b, v22.16b, v16.16b\n"
- "smax v20.16b, v23.16b, v16.16b\n"
- "ldr b19, [x24, x15]\n"
- "ldr b16, [x23, x15]\n"
- "smax v18.16b, v17.16b, v19.16b\n"
- "smax v17.16b, v22.16b, v16.16b\n"
+ "ldr b20, [x22, x15]\n"
+ "ldr b23, [x26, x15]\n"
+ "ldr b19, [x9, x15]\n"
+ "ldr b18, [x27, x15]\n"
+ "ldr b22, [x24, x15]\n"
+ "ldr b17, [x23, x15]\n"
+ "smax v21.16b, v16.16b, v24.16b\n"
"ldr b16, [x21, x15]\n"
- "smax v16.16b, v19.16b, v16.16b\n"
+ "smax v20.16b, v24.16b, v20.16b\n"
"add x15, x15, #0x1\n"
- "smax v18.16b, v18.16b, v23.16b\n"
- "smax v17.16b, v17.16b, v21.16b\n"
- "smax v16.16b, v21.16b, v16.16b\n"
- "str b20, [x14, x12]\n"
- "str b18, [x13, x12]\n"
- "str b17, [x11, x12]\n"
- "str b16, [x10, x12]\n"
- "add x12, x12, #0x1\n"
+ "smax v19.16b, v23.16b, v19.16b\n"
+ "smax v18.16b, v18.16b, v22.16b\n"
+ "smax v17.16b, v23.16b, v17.16b\n"
+ "smax v16.16b, v22.16b, v16.16b\n"
+ "smax v19.16b, v21.16b, v19.16b\n"
+ "smax v18.16b, v18.16b, v21.16b\n"
+ "smax v17.16b, v17.16b, v20.16b\n"
+ "smax v16.16b, v20.16b, v16.16b\n"
+ "str b19, [x13, x14]\n"
+ "str b18, [x12, x14]\n"
+ "str b17, [x11, x14]\n"
+ "str b16, [x10, x14]\n"
+ "add x14, x14, #0x1\n"
"bgt 3b\n"
"4:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
index 411fd11460..334d85bfb5 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,121 +41,121 @@ void a64_s8_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "movi v9.16b, #0x80\n"
"movi v8.16b, #0x80\n"
+ "mov x24, %x[inptrs]\n"
"movi v7.16b, #0x80\n"
- "mov x22, %x[inptrs]\n"
"movi v6.16b, #0x80\n"
- "movi v5.16b, #0x80\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x21, x26]\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x20, x26]\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x21, x24]\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x20, x24]\n"
- "smax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x21, x23]\n"
+ "smax v23.16b, v5.16b, v4.16b\n"
+ "smax v19.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "smax v22.16b, v1.16b, v0.16b\n"
+ "smax v18.16b, v31.16b, v30.16b\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "smax v21.16b, v29.16b, v21.16b\n"
+ "smax v17.16b, v28.16b, v27.16b\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "smax v20.16b, v26.16b, v20.16b\n"
"smax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"smax v17.16b, v21.16b, v17.16b\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "smax v7.16b, v7.16b, v18.16b\n"
- "smax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "smax v5.16b, v5.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "smax v9.16b, v9.16b, v19.16b\n"
+ "smax v8.16b, v8.16b, v18.16b\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "smax v7.16b, v7.16b, v17.16b\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "smax v6.16b, v6.16b, v16.16b\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "smax v20.16b, v30.16b, v29.16b\n"
+ "smax v23.16b, v5.16b, v4.16b\n"
+ "smax v19.16b, v3.16b, v2.16b\n"
+ "smax v22.16b, v1.16b, v0.16b\n"
+ "smax v18.16b, v31.16b, v30.16b\n"
+ "smax v21.16b, v29.16b, v21.16b\n"
+ "smax v17.16b, v28.16b, v27.16b\n"
+ "smax v20.16b, v26.16b, v20.16b\n"
"smax v16.16b, v25.16b, v24.16b\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
"smax v17.16b, v21.16b, v17.16b\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "smax v7.16b, v7.16b, v18.16b\n"
- "smax v6.16b, v6.16b, v17.16b\n"
- "smax v5.16b, v5.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v19.16b\n"
+ "smax v8.16b, v8.16b, v18.16b\n"
+ "smax v7.16b, v7.16b, v17.16b\n"
+ "smax v6.16b, v6.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v16.16b\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "smax v9.16b, v9.16b, v19.16b\n"
+ "smax v8.16b, v8.16b, v18.16b\n"
"smax v7.16b, v7.16b, v17.16b\n"
"smax v6.16b, v6.16b, v16.16b\n"
- "ldr q16, [x20, x23]\n"
- "smax v5.16b, v5.16b, v16.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x40\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
"cmp %x[n_channels], #0x40\n"
- "str q8, [%x[outptr], x27]\n"
- "str q7, [%x[outptr], x26]\n"
+ "str q8, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q7, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
+ "str q6, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "str q6, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q5, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
@@ -163,272 +163,272 @@ void a64_s8_nhwc_max_generic_depthfirst_impl(
"blt 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x80\n"
- "mov x22, %x[inptrs]\n"
+ "movi v9.16b, #0x80\n"
+ "mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "smax v17.16b, v4.16b, v3.16b\n"
- "smax v16.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "smax v16.16b, v17.16b, v16.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "smax v17.16b, v5.16b, v4.16b\n"
+ "smax v16.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "smax v8.16b, v8.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "smax v16.16b, v17.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "smax v17.16b, v4.16b, v3.16b\n"
- "smax v16.16b, v28.16b, v22.16b\n"
+ "smax v17.16b, v5.16b, v4.16b\n"
+ "smax v16.16b, v3.16b, v2.16b\n"
"smax v16.16b, v17.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v16.16b\n"
+ "ldr q16, [x20, x9]\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "str q8, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
"lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x27\n"
- "movi v8.16b, #0x80\n"
+ "add %x[outptr], %x[outptr], x9\n"
+ "movi v9.16b, #0x80\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 24f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
- "ld1 { v3.h }[6], [x22], #0x2\n"
- "ld1 { v28.h }[6], [x21], #0x2\n"
- "ld1 { v22.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
+ "ld1 { v3.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
- "ld1 { v3.b }[14], [x22], #0x1\n"
- "ld1 { v28.b }[14], [x21], #0x1\n"
- "ld1 { v22.b }[14], [x20], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
+ "ld1 { v4.b }[14], [x22], #0x1\n"
+ "ld1 { v3.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
- "ld1 { v3.b }[12], [x22], #0x1\n"
- "ld1 { v28.b }[12], [x21], #0x1\n"
- "ld1 { v22.b }[12], [x20], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
+ "ld1 { v4.b }[12], [x22], #0x1\n"
+ "ld1 { v3.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
- "ld1 { v3.h }[4], [x22], #0x2\n"
- "ld1 { v28.h }[4], [x21], #0x2\n"
- "ld1 { v22.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
+ "ld1 { v3.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
- "ld1 { v3.b }[10], [x22], #0x1\n"
- "ld1 { v28.b }[10], [x21], #0x1\n"
- "ld1 { v22.b }[10], [x20], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
+ "ld1 { v4.b }[10], [x22], #0x1\n"
+ "ld1 { v3.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
- "ld1 { v3.b }[8], [x22], #0x1\n"
- "ld1 { v28.b }[8], [x21], #0x1\n"
- "ld1 { v22.b }[8], [x20], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
+ "ld1 { v4.b }[8], [x22], #0x1\n"
+ "ld1 { v3.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
- "ld1 { v3.b }[6], [x22], #0x1\n"
- "ld1 { v28.b }[6], [x21], #0x1\n"
- "ld1 { v22.b }[6], [x20], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
+ "ld1 { v4.b }[6], [x22], #0x1\n"
+ "ld1 { v3.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
- "ld1 { v3.b }[4], [x22], #0x1\n"
- "ld1 { v28.b }[4], [x21], #0x1\n"
- "ld1 { v22.b }[4], [x20], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
+ "ld1 { v4.b }[4], [x22], #0x1\n"
+ "ld1 { v3.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h28, [x21], #0x2\n"
- "ldr h22, [x20], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h2, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
- "ld1 { v3.b }[2], [x22], #0x1\n"
- "ld1 { v28.b }[2], [x21], #0x1\n"
- "ld1 { v22.b }[2], [x20], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
+ "ld1 { v4.b }[2], [x22], #0x1\n"
+ "ld1 { v3.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x23], #0x1\n"
- "ldr b3, [x22], #0x1\n"
- "ldr b28, [x21], #0x1\n"
- "ldr b22, [x20], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
+ "ldr b4, [x22], #0x1\n"
+ "ldr b3, [x21], #0x1\n"
+ "ldr b2, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "smax v17.16b, v4.16b, v3.16b\n"
- "smax v16.16b, v28.16b, v22.16b\n"
+ "smax v17.16b, v5.16b, v4.16b\n"
+ "smax v16.16b, v3.16b, v2.16b\n"
"subs x25, x25, #0x1\n"
"smax v16.16b, v17.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x23], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
+ "smax v9.16b, v9.16b, v5.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
"tbz %x[n_channels], #3, 38f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v9.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #2, 36f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 35f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[6], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[14], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[14], [%x[outptr]], #0x1\n"
"b 42f\n"
"35:" // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[12], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[12], [%x[outptr]], #0x1\n"
"b 42f\n"
"36:" // Oddments: Store: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 37f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[4], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[10], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[10], [%x[outptr]], #0x1\n"
"b 42f\n"
"37:" // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[8], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[8], [%x[outptr]], #0x1\n"
"b 42f\n"
"38:" // Oddments: Store: Bit 3: Unset
"tbz %x[n_channels], #2, 40f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 39f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[2], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[6], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[6], [%x[outptr]], #0x1\n"
"b 42f\n"
"39:" // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[4], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[4], [%x[outptr]], #0x1\n"
"b 42f\n"
"40:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 41f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[0], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[2], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[2], [%x[outptr]], #0x1\n"
"b 42f\n"
"41:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[0], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[0], [%x[outptr]], #0x1\n"
"42:" // Oddments: Store: Bit 3: End
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index 019f402911..60135a42d5 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -141,9 +141,9 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"movi v0.4s, #0x0\n"
"cbz x23, 4f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"ldr q29, [x21, x26]\n"
"ldr q28, [x20, x26]\n"
@@ -156,26 +156,26 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"saddl v23.8h, v31.8b, v30.8b\n"
"saddl2 v22.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
- "ldr q30, [x20, x27]\n"
+ "subs x23, x23, #0x1\n"
"saddl v21.8h, v29.8b, v28.8b\n"
"saddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x21, x26]\n"
- "ldr q28, [x20, x26]\n"
+ "add x22, x22, #0x10\n"
"saddl v19.8h, v27.8b, v26.8b\n"
"saddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x21, x25]\n"
- "ldr q26, [x20, x25]\n"
+ "ldr q31, [x21, x27]\n"
+ "ldr q30, [x20, x27]\n"
+ "ldr q29, [x21, x26]\n"
+ "ldr q28, [x20, x26]\n"
"saddl v17.8h, v25.8b, v24.8b\n"
"saddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x21, x24]\n"
- "ldr q24, [x20, x24]\n"
- "subs x23, x23, #0x1\n"
+ "ldr q27, [x21, x25]\n"
+ "ldr q26, [x20, x25]\n"
"saddw v15.4s, v15.4s, v23.4h\n"
"saddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q25, [x21, x24]\n"
+ "ldr q24, [x20, x24]\n"
"saddw v13.4s, v13.4s, v22.4h\n"
"saddw2 v12.4s, v12.4s, v22.8h\n"
- "add x22, x22, #0x10\n"
"saddw v11.4s, v11.4s, v21.4h\n"
"saddw2 v10.4s, v10.4s, v21.8h\n"
"saddw v9.4s, v9.4s, v20.4h\n"
@@ -219,17 +219,17 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
- "sxtl v23.8h, v16.8b\n"
- "sxtl2 v22.8h, v16.16b\n"
- "ldr q16, [x20, x26]\n"
+ "subs x23, x23, #0x1\n"
+ "ldr q19, [x20, x27]\n"
+ "ldr q18, [x20, x26]\n"
"ldr q17, [x20, x25]\n"
- "sxtl v21.8h, v16.8b\n"
- "sxtl2 v20.8h, v16.16b\n"
"ldr q16, [x20, x24]\n"
+ "sxtl v23.8h, v19.8b\n"
+ "sxtl2 v22.8h, v19.16b\n"
+ "sxtl v21.8h, v18.8b\n"
+ "sxtl2 v20.8h, v18.16b\n"
"sxtl v19.8h, v17.8b\n"
"sxtl2 v18.8h, v17.16b\n"
- "subs x23, x23, #0x1\n"
"sxtl v17.8h, v16.8b\n"
"sxtl2 v16.8h, v16.16b\n"
"saddw v15.4s, v15.4s, v23.4h\n"
@@ -250,61 +250,61 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"saddw2 v0.4s, v0.4s, v16.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1r { v18.4s }, [%x[left_shift]]\n"
- "ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v14.4s, v14.4s, v18.4s\n"
- "ld1r { v16.4s }, [%x[right_shift]]\n"
- "srshl v13.4s, v13.4s, v18.4s\n"
- "srshl v12.4s, v12.4s, v18.4s\n"
+ "ld1r { v20.4s }, [%x[left_shift]]\n"
+ "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
+ "movi v18.4s, #0x7f\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
- "srshl v11.4s, v11.4s, v18.4s\n"
- "srshl v10.4s, v10.4s, v18.4s\n"
+ "ld1r { v17.4s }, [%x[right_shift]]\n"
"cmp %x[n_channels], #0x40\n"
- "srshl v9.4s, v9.4s, v18.4s\n"
- "srshl v8.4s, v8.4s, v18.4s\n"
- "srshl v7.4s, v7.4s, v18.4s\n"
- "srshl v6.4s, v6.4s, v18.4s\n"
- "srshl v5.4s, v5.4s, v18.4s\n"
- "srshl v4.4s, v4.4s, v18.4s\n"
- "srshl v3.4s, v3.4s, v18.4s\n"
- "srshl v2.4s, v2.4s, v18.4s\n"
- "srshl v1.4s, v1.4s, v18.4s\n"
- "srshl v0.4s, v0.4s, v18.4s\n"
- "sqrdmulh v15.4s, v15.4s, v17.4s\n"
- "sqrdmulh v14.4s, v14.4s, v17.4s\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "sqrdmulh v12.4s, v12.4s, v17.4s\n"
- "sqrdmulh v11.4s, v11.4s, v17.4s\n"
- "sqrdmulh v10.4s, v10.4s, v17.4s\n"
- "sqrdmulh v9.4s, v9.4s, v17.4s\n"
- "sqrdmulh v8.4s, v8.4s, v17.4s\n"
- "sqrdmulh v7.4s, v7.4s, v17.4s\n"
- "sqrdmulh v6.4s, v6.4s, v17.4s\n"
- "sqrdmulh v5.4s, v5.4s, v17.4s\n"
- "sqrdmulh v4.4s, v4.4s, v17.4s\n"
- "sqrdmulh v3.4s, v3.4s, v17.4s\n"
- "sqrdmulh v2.4s, v2.4s, v17.4s\n"
- "sqrdmulh v1.4s, v1.4s, v17.4s\n"
- "sqrdmulh v0.4s, v0.4s, v17.4s\n"
- "movi v17.4s, #0x7f\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
- "srshl v11.4s, v11.4s, v16.4s\n"
- "srshl v10.4s, v10.4s, v16.4s\n"
- "srshl v9.4s, v9.4s, v16.4s\n"
- "srshl v8.4s, v8.4s, v16.4s\n"
- "srshl v7.4s, v7.4s, v16.4s\n"
- "srshl v6.4s, v6.4s, v16.4s\n"
- "srshl v5.4s, v5.4s, v16.4s\n"
- "srshl v4.4s, v4.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v16.4s\n"
- "srshl v2.4s, v2.4s, v16.4s\n"
- "srshl v1.4s, v1.4s, v16.4s\n"
- "srshl v0.4s, v0.4s, v16.4s\n"
- "not v16.16b, v17.16b\n"
+ "not v16.16b, v18.16b\n"
+ "srshl v15.4s, v15.4s, v20.4s\n"
+ "srshl v14.4s, v14.4s, v20.4s\n"
+ "srshl v13.4s, v13.4s, v20.4s\n"
+ "srshl v12.4s, v12.4s, v20.4s\n"
+ "srshl v11.4s, v11.4s, v20.4s\n"
+ "srshl v10.4s, v10.4s, v20.4s\n"
+ "srshl v9.4s, v9.4s, v20.4s\n"
+ "srshl v8.4s, v8.4s, v20.4s\n"
+ "srshl v7.4s, v7.4s, v20.4s\n"
+ "srshl v6.4s, v6.4s, v20.4s\n"
+ "srshl v5.4s, v5.4s, v20.4s\n"
+ "srshl v4.4s, v4.4s, v20.4s\n"
+ "srshl v3.4s, v3.4s, v20.4s\n"
+ "srshl v2.4s, v2.4s, v20.4s\n"
+ "srshl v1.4s, v1.4s, v20.4s\n"
+ "srshl v0.4s, v0.4s, v20.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v19.4s\n"
+ "sqrdmulh v11.4s, v11.4s, v19.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v19.4s\n"
+ "sqrdmulh v9.4s, v9.4s, v19.4s\n"
+ "sqrdmulh v8.4s, v8.4s, v19.4s\n"
+ "sqrdmulh v7.4s, v7.4s, v19.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v19.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v19.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v19.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v19.4s\n"
+ "sqrdmulh v2.4s, v2.4s, v19.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v19.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v17.4s\n"
+ "srshl v14.4s, v14.4s, v17.4s\n"
+ "srshl v13.4s, v13.4s, v17.4s\n"
+ "srshl v12.4s, v12.4s, v17.4s\n"
+ "srshl v11.4s, v11.4s, v17.4s\n"
+ "srshl v10.4s, v10.4s, v17.4s\n"
+ "srshl v9.4s, v9.4s, v17.4s\n"
+ "srshl v8.4s, v8.4s, v17.4s\n"
+ "srshl v7.4s, v7.4s, v17.4s\n"
+ "srshl v6.4s, v6.4s, v17.4s\n"
+ "srshl v5.4s, v5.4s, v17.4s\n"
+ "srshl v4.4s, v4.4s, v17.4s\n"
+ "srshl v3.4s, v3.4s, v17.4s\n"
+ "srshl v2.4s, v2.4s, v17.4s\n"
+ "srshl v1.4s, v1.4s, v17.4s\n"
+ "srshl v0.4s, v0.4s, v17.4s\n"
"smax v15.4s, v15.4s, v16.4s\n"
"smax v14.4s, v14.4s, v16.4s\n"
"smax v13.4s, v13.4s, v16.4s\n"
@@ -321,36 +321,36 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"smax v2.4s, v2.4s, v16.4s\n"
"smax v1.4s, v1.4s, v16.4s\n"
"smax v0.4s, v0.4s, v16.4s\n"
- "smin v15.4s, v15.4s, v17.4s\n"
- "smin v14.4s, v14.4s, v17.4s\n"
- "smin v13.4s, v13.4s, v17.4s\n"
- "smin v12.4s, v12.4s, v17.4s\n"
- "smin v11.4s, v11.4s, v17.4s\n"
- "smin v10.4s, v10.4s, v17.4s\n"
- "smin v9.4s, v9.4s, v17.4s\n"
- "smin v8.4s, v8.4s, v17.4s\n"
- "smin v7.4s, v7.4s, v17.4s\n"
- "smin v6.4s, v6.4s, v17.4s\n"
- "smin v5.4s, v5.4s, v17.4s\n"
- "smin v4.4s, v4.4s, v17.4s\n"
- "smin v3.4s, v3.4s, v17.4s\n"
- "smin v2.4s, v2.4s, v17.4s\n"
- "smin v1.4s, v1.4s, v17.4s\n"
- "smin v0.4s, v0.4s, v17.4s\n"
+ "smin v15.4s, v15.4s, v18.4s\n"
+ "smin v14.4s, v14.4s, v18.4s\n"
+ "smin v13.4s, v13.4s, v18.4s\n"
+ "smin v12.4s, v12.4s, v18.4s\n"
+ "smin v11.4s, v11.4s, v18.4s\n"
+ "smin v10.4s, v10.4s, v18.4s\n"
+ "smin v9.4s, v9.4s, v18.4s\n"
+ "smin v8.4s, v8.4s, v18.4s\n"
+ "smin v7.4s, v7.4s, v18.4s\n"
+ "smin v6.4s, v6.4s, v18.4s\n"
+ "smin v5.4s, v5.4s, v18.4s\n"
+ "smin v4.4s, v4.4s, v18.4s\n"
+ "smin v3.4s, v3.4s, v18.4s\n"
+ "smin v2.4s, v2.4s, v18.4s\n"
+ "smin v1.4s, v1.4s, v18.4s\n"
+ "smin v0.4s, v0.4s, v18.4s\n"
"uzp1 v23.16b, v15.16b, v14.16b\n"
- "uzp1 v16.16b, v13.16b, v12.16b\n"
+ "uzp1 v19.16b, v13.16b, v12.16b\n"
"uzp1 v22.16b, v11.16b, v10.16b\n"
"uzp1 v18.16b, v9.16b, v8.16b\n"
"uzp1 v21.16b, v7.16b, v6.16b\n"
"uzp1 v17.16b, v5.16b, v4.16b\n"
"uzp1 v20.16b, v3.16b, v2.16b\n"
- "uzp1 v19.16b, v1.16b, v0.16b\n"
- "uzp1 v16.16b, v23.16b, v16.16b\n"
+ "uzp1 v16.16b, v1.16b, v0.16b\n"
+ "uzp1 v19.16b, v23.16b, v19.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
- "uzp1 v16.16b, v20.16b, v19.16b\n"
+ "uzp1 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [%x[outptr], x27]\n"
+ "add x27, x27, #0x40\n"
"str q18, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
"str q17, [%x[outptr], x25]\n"
@@ -371,23 +371,23 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"movi v12.4s, #0x0\n"
"cbz x23, 11f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"saddl v17.8h, v31.8b, v30.8b\n"
"saddl2 v16.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
+ "subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
"ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
- "subs x23, x23, #0x1\n"
"saddw v15.4s, v15.4s, v17.4h\n"
"saddw2 v14.4s, v14.4s, v17.8h\n"
"saddw v13.4s, v13.4s, v16.4h\n"
"saddw2 v12.4s, v12.4s, v16.8h\n"
- "add x22, x22, #0x10\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"saddl v17.8h, v31.8b, v30.8b\n"
@@ -401,43 +401,43 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x23, x23, #0x1\n"
"ldr q16, [x20, x27]\n"
"sxtl v17.8h, v16.8b\n"
"sxtl2 v16.8h, v16.16b\n"
- "subs x23, x23, #0x1\n"
"saddw v15.4s, v15.4s, v17.4h\n"
"saddw2 v14.4s, v14.4s, v17.8h\n"
"saddw v13.4s, v13.4s, v16.4h\n"
"saddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1r { v18.4s }, [%x[left_shift]]\n"
- "ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v14.4s, v14.4s, v18.4s\n"
- "ld1r { v16.4s }, [%x[right_shift]]\n"
- "srshl v13.4s, v13.4s, v18.4s\n"
- "srshl v12.4s, v12.4s, v18.4s\n"
+ "ld1r { v20.4s }, [%x[left_shift]]\n"
+ "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
+ "movi v18.4s, #0x7f\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
- "sqrdmulh v15.4s, v15.4s, v17.4s\n"
- "sqrdmulh v14.4s, v14.4s, v17.4s\n"
+ "ld1r { v17.4s }, [%x[right_shift]]\n"
"cmp %x[n_channels], #0x10\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "sqrdmulh v12.4s, v12.4s, v17.4s\n"
- "movi v17.4s, #0x7f\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
- "not v16.16b, v17.16b\n"
+ "not v16.16b, v18.16b\n"
+ "srshl v15.4s, v15.4s, v20.4s\n"
+ "srshl v14.4s, v14.4s, v20.4s\n"
+ "srshl v13.4s, v13.4s, v20.4s\n"
+ "srshl v12.4s, v12.4s, v20.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v17.4s\n"
+ "srshl v14.4s, v14.4s, v17.4s\n"
+ "srshl v13.4s, v13.4s, v17.4s\n"
+ "srshl v12.4s, v12.4s, v17.4s\n"
"smax v15.4s, v15.4s, v16.4s\n"
"smax v14.4s, v14.4s, v16.4s\n"
"smax v13.4s, v13.4s, v16.4s\n"
"smax v12.4s, v12.4s, v16.4s\n"
- "smin v15.4s, v15.4s, v17.4s\n"
- "smin v14.4s, v14.4s, v17.4s\n"
- "smin v13.4s, v13.4s, v17.4s\n"
- "smin v12.4s, v12.4s, v17.4s\n"
+ "smin v15.4s, v15.4s, v18.4s\n"
+ "smin v14.4s, v14.4s, v18.4s\n"
+ "smin v13.4s, v13.4s, v18.4s\n"
+ "smin v12.4s, v12.4s, v18.4s\n"
"uzp1 v17.16b, v15.16b, v14.16b\n"
"uzp1 v16.16b, v13.16b, v12.16b\n"
"uzp1 v16.16b, v17.16b, v16.16b\n"
@@ -457,10 +457,10 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"15:" // Oddments: 2 inputs loop
"ldp x21, x20, [x22, #0x0]\n"
"add x22, x22, #0x10\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
- "add x20, x20, x27\n"
"movi v30.16b, #0x0\n"
+ "add x21, x21, x27\n"
+ "add x20, x20, x27\n"
"tbz %x[n_channels], #3, 19f\n"
"ldr d31, [x21], #0x8\n"
"ldr d30, [x20], #0x8\n"
@@ -534,8 +534,8 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x21, [x22], #0x8\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
+ "add x21, x21, x27\n"
"tbz %x[n_channels], #3, 29f\n"
"ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
@@ -590,31 +590,31 @@ void a64_s8q_nhwc_avg_generic_depthfirst_impl(
"saddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "ld1r { v18.4s }, [%x[left_shift]]\n"
- "ld1r { v17.4s }, [%x[combined_rescale_value]]\n"
- "srshl v15.4s, v15.4s, v18.4s\n"
- "srshl v14.4s, v14.4s, v18.4s\n"
- "ld1r { v16.4s }, [%x[right_shift]]\n"
- "srshl v13.4s, v13.4s, v18.4s\n"
- "srshl v12.4s, v12.4s, v18.4s\n"
- "sqrdmulh v15.4s, v15.4s, v17.4s\n"
- "sqrdmulh v14.4s, v14.4s, v17.4s\n"
- "sqrdmulh v13.4s, v13.4s, v17.4s\n"
- "sqrdmulh v12.4s, v12.4s, v17.4s\n"
- "movi v17.4s, #0x7f\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
- "not v16.16b, v17.16b\n"
+ "ld1r { v20.4s }, [%x[left_shift]]\n"
+ "ld1r { v19.4s }, [%x[combined_rescale_value]]\n"
+ "movi v18.4s, #0x7f\n"
+ "ld1r { v17.4s }, [%x[right_shift]]\n"
+ "not v16.16b, v18.16b\n"
+ "srshl v15.4s, v15.4s, v20.4s\n"
+ "srshl v14.4s, v14.4s, v20.4s\n"
+ "srshl v13.4s, v13.4s, v20.4s\n"
+ "srshl v12.4s, v12.4s, v20.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v17.4s\n"
+ "srshl v14.4s, v14.4s, v17.4s\n"
+ "srshl v13.4s, v13.4s, v17.4s\n"
+ "srshl v12.4s, v12.4s, v17.4s\n"
"smax v15.4s, v15.4s, v16.4s\n"
"smax v14.4s, v14.4s, v16.4s\n"
"smax v13.4s, v13.4s, v16.4s\n"
"smax v12.4s, v12.4s, v16.4s\n"
- "smin v15.4s, v15.4s, v17.4s\n"
- "smin v14.4s, v14.4s, v17.4s\n"
- "smin v13.4s, v13.4s, v17.4s\n"
- "smin v12.4s, v12.4s, v17.4s\n"
+ "smin v15.4s, v15.4s, v18.4s\n"
+ "smin v14.4s, v14.4s, v18.4s\n"
+ "smin v13.4s, v13.4s, v18.4s\n"
+ "smin v12.4s, v12.4s, v18.4s\n"
"uzp1 v17.16b, v15.16b, v14.16b\n"
"uzp1 v16.16b, v13.16b, v12.16b\n"
"uzp1 v16.16b, v17.16b, v16.16b\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
index f7b8dc761c..797a8f9235 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,245 +43,245 @@ void a64_s8q_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "movi v9.16b, #0x80\n"
"movi v8.16b, #0x80\n"
+ "mov x24, %x[inptrs]\n"
"movi v7.16b, #0x80\n"
- "mov x22, %x[inptrs]\n"
"movi v6.16b, #0x80\n"
- "movi v5.16b, #0x80\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x21, x26]\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x20, x26]\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x21, x24]\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x20, x24]\n"
- "smax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x21, x23]\n"
+ "smax v23.16b, v5.16b, v4.16b\n"
+ "smax v19.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "smax v22.16b, v1.16b, v0.16b\n"
+ "smax v18.16b, v31.16b, v30.16b\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "smax v21.16b, v29.16b, v21.16b\n"
+ "smax v17.16b, v28.16b, v27.16b\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "smax v20.16b, v26.16b, v20.16b\n"
"smax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"smax v17.16b, v21.16b, v17.16b\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "smax v7.16b, v7.16b, v18.16b\n"
- "smax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "smax v5.16b, v5.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "smax v9.16b, v9.16b, v19.16b\n"
+ "smax v8.16b, v8.16b, v18.16b\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "smax v7.16b, v7.16b, v17.16b\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "smax v6.16b, v6.16b, v16.16b\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "smax v23.16b, v4.16b, v3.16b\n"
- "smax v19.16b, v28.16b, v22.16b\n"
- "smax v22.16b, v2.16b, v1.16b\n"
- "smax v18.16b, v27.16b, v21.16b\n"
- "smax v21.16b, v0.16b, v31.16b\n"
- "smax v17.16b, v26.16b, v20.16b\n"
- "smax v20.16b, v30.16b, v29.16b\n"
+ "smax v23.16b, v5.16b, v4.16b\n"
+ "smax v19.16b, v3.16b, v2.16b\n"
+ "smax v22.16b, v1.16b, v0.16b\n"
+ "smax v18.16b, v31.16b, v30.16b\n"
+ "smax v21.16b, v29.16b, v21.16b\n"
+ "smax v17.16b, v28.16b, v27.16b\n"
+ "smax v20.16b, v26.16b, v20.16b\n"
"smax v16.16b, v25.16b, v24.16b\n"
"smax v19.16b, v23.16b, v19.16b\n"
"smax v18.16b, v22.16b, v18.16b\n"
"smax v17.16b, v21.16b, v17.16b\n"
"smax v16.16b, v20.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v19.16b\n"
- "smax v7.16b, v7.16b, v18.16b\n"
- "smax v6.16b, v6.16b, v17.16b\n"
- "smax v5.16b, v5.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v19.16b\n"
+ "smax v8.16b, v8.16b, v18.16b\n"
+ "smax v7.16b, v7.16b, v17.16b\n"
+ "smax v6.16b, v6.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v16.16b\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "smax v9.16b, v9.16b, v19.16b\n"
+ "smax v8.16b, v8.16b, v18.16b\n"
"smax v7.16b, v7.16b, v17.16b\n"
"smax v6.16b, v6.16b, v16.16b\n"
- "ldr q16, [x20, x23]\n"
- "smax v5.16b, v5.16b, v16.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "sxtl v23.8h, v8.8b\n"
- "sxtl2 v22.8h, v8.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v4.4s }, [x20]\n"
- "sxtl v21.8h, v7.8b\n"
- "sxtl2 v18.8h, v7.16b\n"
+ "sxtl v23.8h, v9.8b\n"
+ "sxtl2 v19.8h, v9.16b\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1r { v4.4s }, [x21]\n"
"ld1r { v3.4s }, [x20]\n"
- "sxtl v20.8h, v6.8b\n"
- "sxtl2 v19.8h, v6.16b\n"
+ "sxtl v22.8h, v8.8b\n"
+ "sxtl2 v18.8h, v8.16b\n"
+ "sxtl v21.8h, v7.8b\n"
+ "sxtl2 v20.8h, v7.16b\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v2.4s }, [x20]\n"
- "sxtl v17.8h, v5.8b\n"
- "sxtl2 v16.8h, v5.16b\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
+ "ld1r { v2.4s }, [x20]\n"
+ "sxtl v17.8h, v6.8b\n"
+ "sxtl2 v16.8h, v6.16b\n"
"cmp %x[n_channels], #0x40\n"
"sxtl v1.4s, v23.4h\n"
"sxtl2 v23.4s, v23.8h\n"
- "sxtl v0.4s, v22.4h\n"
- "sxtl2 v31.4s, v22.8h\n"
- "sxtl v30.4s, v21.4h\n"
- "sxtl2 v22.4s, v21.8h\n"
- "sxtl v29.4s, v18.4h\n"
+ "sxtl v0.4s, v19.4h\n"
+ "sxtl2 v19.4s, v19.8h\n"
+ "sxtl v31.4s, v22.4h\n"
+ "sxtl2 v22.4s, v22.8h\n"
+ "sxtl v30.4s, v18.4h\n"
"sxtl2 v18.4s, v18.8h\n"
+ "sxtl v29.4s, v21.4h\n"
+ "sxtl2 v21.4s, v21.8h\n"
"sxtl v28.4s, v20.4h\n"
- "sxtl2 v21.4s, v20.8h\n"
- "sxtl v27.4s, v19.4h\n"
- "sxtl2 v26.4s, v19.8h\n"
- "sxtl v25.4s, v17.4h\n"
+ "sxtl2 v27.4s, v20.8h\n"
+ "sxtl v26.4s, v17.4h\n"
"sxtl2 v20.4s, v17.8h\n"
- "sxtl v24.4s, v16.4h\n"
- "sxtl2 v19.4s, v16.8h\n"
+ "sxtl v25.4s, v16.4h\n"
+ "sxtl2 v24.4s, v16.8h\n"
"srshl v1.4s, v1.4s, v4.4s\n"
"srshl v23.4s, v23.4s, v4.4s\n"
"srshl v0.4s, v0.4s, v4.4s\n"
+ "srshl v19.4s, v19.4s, v4.4s\n"
"srshl v31.4s, v31.4s, v4.4s\n"
- "srshl v30.4s, v30.4s, v4.4s\n"
"srshl v22.4s, v22.4s, v4.4s\n"
- "srshl v29.4s, v29.4s, v4.4s\n"
+ "srshl v30.4s, v30.4s, v4.4s\n"
"srshl v18.4s, v18.4s, v4.4s\n"
- "srshl v28.4s, v28.4s, v4.4s\n"
+ "srshl v29.4s, v29.4s, v4.4s\n"
"srshl v21.4s, v21.4s, v4.4s\n"
+ "srshl v28.4s, v28.4s, v4.4s\n"
"srshl v27.4s, v27.4s, v4.4s\n"
"srshl v26.4s, v26.4s, v4.4s\n"
- "srshl v25.4s, v25.4s, v4.4s\n"
"srshl v20.4s, v20.4s, v4.4s\n"
+ "srshl v25.4s, v25.4s, v4.4s\n"
"srshl v24.4s, v24.4s, v4.4s\n"
- "srshl v19.4s, v19.4s, v4.4s\n"
"sqrdmulh v1.4s, v1.4s, v3.4s\n"
"sqrdmulh v23.4s, v23.4s, v3.4s\n"
"sqrdmulh v0.4s, v0.4s, v3.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v3.4s\n"
"sqrdmulh v31.4s, v31.4s, v3.4s\n"
- "sqrdmulh v30.4s, v30.4s, v3.4s\n"
"sqrdmulh v22.4s, v22.4s, v3.4s\n"
- "sqrdmulh v29.4s, v29.4s, v3.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v3.4s\n"
"sqrdmulh v18.4s, v18.4s, v3.4s\n"
- "sqrdmulh v28.4s, v28.4s, v3.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v3.4s\n"
"sqrdmulh v21.4s, v21.4s, v3.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v3.4s\n"
"sqrdmulh v27.4s, v27.4s, v3.4s\n"
"sqrdmulh v26.4s, v26.4s, v3.4s\n"
- "sqrdmulh v25.4s, v25.4s, v3.4s\n"
"sqrdmulh v20.4s, v20.4s, v3.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v3.4s\n"
"sqrdmulh v24.4s, v24.4s, v3.4s\n"
- "sqrdmulh v19.4s, v19.4s, v3.4s\n"
"movi v17.4s, #0x7f\n"
"srshl v1.4s, v1.4s, v2.4s\n"
"srshl v23.4s, v23.4s, v2.4s\n"
"srshl v0.4s, v0.4s, v2.4s\n"
+ "srshl v19.4s, v19.4s, v2.4s\n"
"srshl v31.4s, v31.4s, v2.4s\n"
- "srshl v30.4s, v30.4s, v2.4s\n"
"srshl v22.4s, v22.4s, v2.4s\n"
- "srshl v29.4s, v29.4s, v2.4s\n"
+ "srshl v30.4s, v30.4s, v2.4s\n"
"srshl v18.4s, v18.4s, v2.4s\n"
- "srshl v28.4s, v28.4s, v2.4s\n"
+ "srshl v29.4s, v29.4s, v2.4s\n"
"srshl v21.4s, v21.4s, v2.4s\n"
+ "srshl v28.4s, v28.4s, v2.4s\n"
"srshl v27.4s, v27.4s, v2.4s\n"
"srshl v26.4s, v26.4s, v2.4s\n"
- "srshl v25.4s, v25.4s, v2.4s\n"
"srshl v20.4s, v20.4s, v2.4s\n"
+ "srshl v25.4s, v25.4s, v2.4s\n"
"srshl v24.4s, v24.4s, v2.4s\n"
- "srshl v19.4s, v19.4s, v2.4s\n"
"not v16.16b, v17.16b\n"
"smax v1.4s, v1.4s, v16.4s\n"
"smax v23.4s, v23.4s, v16.4s\n"
"smax v0.4s, v0.4s, v16.4s\n"
+ "smax v19.4s, v19.4s, v16.4s\n"
"smax v31.4s, v31.4s, v16.4s\n"
- "smax v30.4s, v30.4s, v16.4s\n"
"smax v22.4s, v22.4s, v16.4s\n"
- "smax v29.4s, v29.4s, v16.4s\n"
+ "smax v30.4s, v30.4s, v16.4s\n"
"smax v18.4s, v18.4s, v16.4s\n"
- "smax v28.4s, v28.4s, v16.4s\n"
+ "smax v29.4s, v29.4s, v16.4s\n"
"smax v21.4s, v21.4s, v16.4s\n"
+ "smax v28.4s, v28.4s, v16.4s\n"
"smax v27.4s, v27.4s, v16.4s\n"
"smax v26.4s, v26.4s, v16.4s\n"
- "smax v25.4s, v25.4s, v16.4s\n"
"smax v20.4s, v20.4s, v16.4s\n"
+ "smax v25.4s, v25.4s, v16.4s\n"
"smax v24.4s, v24.4s, v16.4s\n"
- "smax v19.4s, v19.4s, v16.4s\n"
"smin v1.4s, v1.4s, v17.4s\n"
"smin v23.4s, v23.4s, v17.4s\n"
"smin v0.4s, v0.4s, v17.4s\n"
+ "smin v19.4s, v19.4s, v17.4s\n"
"smin v31.4s, v31.4s, v17.4s\n"
- "smin v30.4s, v30.4s, v17.4s\n"
"smin v22.4s, v22.4s, v17.4s\n"
- "smin v29.4s, v29.4s, v17.4s\n"
+ "smin v30.4s, v30.4s, v17.4s\n"
"smin v18.4s, v18.4s, v17.4s\n"
- "smin v28.4s, v28.4s, v17.4s\n"
+ "smin v29.4s, v29.4s, v17.4s\n"
"smin v21.4s, v21.4s, v17.4s\n"
+ "smin v28.4s, v28.4s, v17.4s\n"
"smin v27.4s, v27.4s, v17.4s\n"
"smin v26.4s, v26.4s, v17.4s\n"
- "smin v25.4s, v25.4s, v17.4s\n"
"smin v20.4s, v20.4s, v17.4s\n"
+ "smin v25.4s, v25.4s, v17.4s\n"
"smin v24.4s, v24.4s, v17.4s\n"
- "smin v19.4s, v19.4s, v17.4s\n"
"uzp1 v23.16b, v1.16b, v23.16b\n"
- "uzp1 v16.16b, v0.16b, v31.16b\n"
- "uzp1 v22.16b, v30.16b, v22.16b\n"
- "uzp1 v18.16b, v29.16b, v18.16b\n"
- "uzp1 v21.16b, v28.16b, v21.16b\n"
- "uzp1 v17.16b, v27.16b, v26.16b\n"
- "uzp1 v20.16b, v25.16b, v20.16b\n"
- "uzp1 v19.16b, v24.16b, v19.16b\n"
- "uzp1 v16.16b, v23.16b, v16.16b\n"
+ "uzp1 v19.16b, v0.16b, v19.16b\n"
+ "uzp1 v22.16b, v31.16b, v22.16b\n"
+ "uzp1 v18.16b, v30.16b, v18.16b\n"
+ "uzp1 v21.16b, v29.16b, v21.16b\n"
+ "uzp1 v17.16b, v28.16b, v27.16b\n"
+ "uzp1 v20.16b, v26.16b, v20.16b\n"
+ "uzp1 v16.16b, v25.16b, v24.16b\n"
+ "uzp1 v19.16b, v23.16b, v19.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
- "uzp1 v16.16b, v20.16b, v19.16b\n"
- "str q18, [%x[outptr], x26]\n"
+ "uzp1 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
+ "str q18, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q17, [%x[outptr], x27]\n"
+ "add x27, x27, #0x40\n"
+ "str q16, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "str q17, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q16, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
@@ -289,295 +289,295 @@ void a64_s8q_nhwc_max_generic_depthfirst_impl(
"blt 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x80\n"
- "mov x22, %x[inptrs]\n"
+ "movi v9.16b, #0x80\n"
+ "mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "smax v17.16b, v4.16b, v3.16b\n"
- "smax v16.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "smax v16.16b, v17.16b, v16.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "smax v17.16b, v5.16b, v4.16b\n"
+ "smax v16.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "smax v8.16b, v8.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "smax v16.16b, v17.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "smax v17.16b, v4.16b, v3.16b\n"
- "smax v16.16b, v28.16b, v22.16b\n"
+ "smax v17.16b, v5.16b, v4.16b\n"
+ "smax v16.16b, v3.16b, v2.16b\n"
"smax v16.16b, v17.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v16.16b\n"
+ "ldr q16, [x20, x9]\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "sxtl v17.8h, v8.8b\n"
- "sxtl2 v16.8h, v8.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v22.4s }, [x20]\n"
- "sxtl v21.4s, v17.4h\n"
- "sxtl2 v20.4s, v17.8h\n"
+ "sxtl v17.8h, v9.8b\n"
+ "sxtl2 v16.8h, v9.16b\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v17.4s }, [x20]\n"
- "sxtl v19.4s, v16.4h\n"
- "sxtl2 v18.4s, v16.8h\n"
+ "ld1r { v24.4s }, [x21]\n"
+ "ld1r { v23.4s }, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v16.4s }, [x20]\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
- "srshl v20.4s, v20.4s, v22.4s\n"
+ "movi v22.4s, #0x7f\n"
+ "ld1r { v21.4s }, [x20]\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
+ "sxtl v20.4s, v17.4h\n"
+ "sxtl2 v17.4s, v17.8h\n"
+ "sxtl v19.4s, v16.4h\n"
+ "sxtl2 v18.4s, v16.8h\n"
"cmp %x[n_channels], #0x10\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "srshl v18.4s, v18.4s, v22.4s\n"
- "sqrdmulh v21.4s, v21.4s, v17.4s\n"
- "sqrdmulh v20.4s, v20.4s, v17.4s\n"
- "sqrdmulh v19.4s, v19.4s, v17.4s\n"
- "sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "movi v17.4s, #0x7f\n"
- "srshl v21.4s, v21.4s, v16.4s\n"
- "srshl v20.4s, v20.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v16.4s\n"
- "srshl v18.4s, v18.4s, v16.4s\n"
- "not v16.16b, v17.16b\n"
- "smax v21.4s, v21.4s, v16.4s\n"
+ "not v16.16b, v22.16b\n"
+ "srshl v20.4s, v20.4s, v24.4s\n"
+ "srshl v17.4s, v17.4s, v24.4s\n"
+ "srshl v19.4s, v19.4s, v24.4s\n"
+ "srshl v18.4s, v18.4s, v24.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v23.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v23.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "srshl v17.4s, v17.4s, v21.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "srshl v18.4s, v18.4s, v21.4s\n"
"smax v20.4s, v20.4s, v16.4s\n"
+ "smax v17.4s, v17.4s, v16.4s\n"
"smax v19.4s, v19.4s, v16.4s\n"
"smax v18.4s, v18.4s, v16.4s\n"
- "smin v21.4s, v21.4s, v17.4s\n"
- "smin v20.4s, v20.4s, v17.4s\n"
- "smin v19.4s, v19.4s, v17.4s\n"
- "smin v18.4s, v18.4s, v17.4s\n"
- "uzp1 v17.16b, v21.16b, v20.16b\n"
+ "smin v20.4s, v20.4s, v22.4s\n"
+ "smin v17.4s, v17.4s, v22.4s\n"
+ "smin v19.4s, v19.4s, v22.4s\n"
+ "smin v18.4s, v18.4s, v22.4s\n"
+ "uzp1 v17.16b, v20.16b, v17.16b\n"
"uzp1 v16.16b, v19.16b, v18.16b\n"
"uzp1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
"lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x27\n"
- "movi v8.16b, #0x80\n"
+ "add %x[outptr], %x[outptr], x9\n"
+ "movi v9.16b, #0x80\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 24f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
- "ld1 { v3.h }[6], [x22], #0x2\n"
- "ld1 { v28.h }[6], [x21], #0x2\n"
- "ld1 { v22.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
+ "ld1 { v3.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
- "ld1 { v3.b }[14], [x22], #0x1\n"
- "ld1 { v28.b }[14], [x21], #0x1\n"
- "ld1 { v22.b }[14], [x20], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
+ "ld1 { v4.b }[14], [x22], #0x1\n"
+ "ld1 { v3.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
- "ld1 { v3.b }[12], [x22], #0x1\n"
- "ld1 { v28.b }[12], [x21], #0x1\n"
- "ld1 { v22.b }[12], [x20], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
+ "ld1 { v4.b }[12], [x22], #0x1\n"
+ "ld1 { v3.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
- "ld1 { v3.h }[4], [x22], #0x2\n"
- "ld1 { v28.h }[4], [x21], #0x2\n"
- "ld1 { v22.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
+ "ld1 { v3.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
- "ld1 { v3.b }[10], [x22], #0x1\n"
- "ld1 { v28.b }[10], [x21], #0x1\n"
- "ld1 { v22.b }[10], [x20], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
+ "ld1 { v4.b }[10], [x22], #0x1\n"
+ "ld1 { v3.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
- "ld1 { v3.b }[8], [x22], #0x1\n"
- "ld1 { v28.b }[8], [x21], #0x1\n"
- "ld1 { v22.b }[8], [x20], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
+ "ld1 { v4.b }[8], [x22], #0x1\n"
+ "ld1 { v3.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
- "ld1 { v3.b }[6], [x22], #0x1\n"
- "ld1 { v28.b }[6], [x21], #0x1\n"
- "ld1 { v22.b }[6], [x20], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
+ "ld1 { v4.b }[6], [x22], #0x1\n"
+ "ld1 { v3.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
- "ld1 { v3.b }[4], [x22], #0x1\n"
- "ld1 { v28.b }[4], [x21], #0x1\n"
- "ld1 { v22.b }[4], [x20], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
+ "ld1 { v4.b }[4], [x22], #0x1\n"
+ "ld1 { v3.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h28, [x21], #0x2\n"
- "ldr h22, [x20], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h2, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
- "ld1 { v3.b }[2], [x22], #0x1\n"
- "ld1 { v28.b }[2], [x21], #0x1\n"
- "ld1 { v22.b }[2], [x20], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
+ "ld1 { v4.b }[2], [x22], #0x1\n"
+ "ld1 { v3.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x23], #0x1\n"
- "ldr b3, [x22], #0x1\n"
- "ldr b28, [x21], #0x1\n"
- "ldr b22, [x20], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
+ "ldr b4, [x22], #0x1\n"
+ "ldr b3, [x21], #0x1\n"
+ "ldr b2, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "smax v17.16b, v4.16b, v3.16b\n"
- "smax v16.16b, v28.16b, v22.16b\n"
+ "smax v17.16b, v5.16b, v4.16b\n"
+ "smax v16.16b, v3.16b, v2.16b\n"
"subs x25, x25, #0x1\n"
"smax v16.16b, v17.16b, v16.16b\n"
- "smax v8.16b, v8.16b, v16.16b\n"
+ "smax v9.16b, v9.16b, v16.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x23], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"subs x21, x21, #0x1\n"
- "smax v8.16b, v8.16b, v4.16b\n"
+ "smax v9.16b, v9.16b, v5.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "sxtl v17.8h, v8.8b\n"
- "sxtl2 v16.8h, v8.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v22.4s }, [x20]\n"
- "sxtl v21.4s, v17.4h\n"
- "sxtl2 v20.4s, v17.8h\n"
+ "sxtl v17.8h, v9.8b\n"
+ "sxtl2 v16.8h, v9.16b\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "ld1r { v24.4s }, [x21]\n"
+ "ld1r { v23.4s }, [x20]\n"
+ "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "movi v22.4s, #0x7f\n"
+ "ld1r { v21.4s }, [x20]\n"
+ "sxtl v20.4s, v17.4h\n"
+ "sxtl2 v17.4s, v17.8h\n"
"sxtl v19.4s, v16.4h\n"
"sxtl2 v18.4s, v16.8h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v16.4s }, [x20]\n"
- "srshl v21.4s, v21.4s, v22.4s\n"
- "srshl v20.4s, v20.4s, v22.4s\n"
- "srshl v19.4s, v19.4s, v22.4s\n"
- "srshl v18.4s, v18.4s, v22.4s\n"
- "sqrdmulh v21.4s, v21.4s, v17.4s\n"
- "sqrdmulh v20.4s, v20.4s, v17.4s\n"
- "sqrdmulh v19.4s, v19.4s, v17.4s\n"
- "sqrdmulh v18.4s, v18.4s, v17.4s\n"
- "movi v17.4s, #0x7f\n"
- "srshl v21.4s, v21.4s, v16.4s\n"
- "srshl v20.4s, v20.4s, v16.4s\n"
- "srshl v19.4s, v19.4s, v16.4s\n"
- "srshl v18.4s, v18.4s, v16.4s\n"
- "not v16.16b, v17.16b\n"
- "smax v21.4s, v21.4s, v16.4s\n"
+ "not v16.16b, v22.16b\n"
+ "srshl v20.4s, v20.4s, v24.4s\n"
+ "srshl v17.4s, v17.4s, v24.4s\n"
+ "srshl v19.4s, v19.4s, v24.4s\n"
+ "srshl v18.4s, v18.4s, v24.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v23.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v23.4s\n"
+ "srshl v20.4s, v20.4s, v21.4s\n"
+ "srshl v17.4s, v17.4s, v21.4s\n"
+ "srshl v19.4s, v19.4s, v21.4s\n"
+ "srshl v18.4s, v18.4s, v21.4s\n"
"smax v20.4s, v20.4s, v16.4s\n"
+ "smax v17.4s, v17.4s, v16.4s\n"
"smax v19.4s, v19.4s, v16.4s\n"
"smax v18.4s, v18.4s, v16.4s\n"
- "smin v21.4s, v21.4s, v17.4s\n"
- "smin v20.4s, v20.4s, v17.4s\n"
- "smin v19.4s, v19.4s, v17.4s\n"
- "smin v18.4s, v18.4s, v17.4s\n"
- "uzp1 v17.16b, v21.16b, v20.16b\n"
+ "smin v20.4s, v20.4s, v22.4s\n"
+ "smin v17.4s, v17.4s, v22.4s\n"
+ "smin v19.4s, v19.4s, v22.4s\n"
+ "smin v18.4s, v18.4s, v22.4s\n"
+ "uzp1 v17.16b, v20.16b, v17.16b\n"
"uzp1 v16.16b, v19.16b, v18.16b\n"
"uzp1 v16.16b, v17.16b, v16.16b\n"
"tbz %x[n_channels], #3, 38f\n"
@@ -628,7 +628,7 @@ void a64_s8q_nhwc_max_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
index f8984c451c..dbbf4ae2b3 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -122,9 +122,9 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"movi v0.4s, #0x0\n"
"cbz x23, 4f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"ldr q29, [x21, x26]\n"
"ldr q28, [x20, x26]\n"
@@ -137,26 +137,26 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
- "ldr q30, [x20, x27]\n"
+ "subs x23, x23, #0x1\n"
"uaddl v21.8h, v29.8b, v28.8b\n"
"uaddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x21, x26]\n"
- "ldr q28, [x20, x26]\n"
+ "add x22, x22, #0x10\n"
"uaddl v19.8h, v27.8b, v26.8b\n"
"uaddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x21, x25]\n"
- "ldr q26, [x20, x25]\n"
+ "ldr q31, [x21, x27]\n"
+ "ldr q30, [x20, x27]\n"
+ "ldr q29, [x21, x26]\n"
+ "ldr q28, [x20, x26]\n"
"uaddl v17.8h, v25.8b, v24.8b\n"
"uaddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x21, x24]\n"
- "ldr q24, [x20, x24]\n"
- "subs x23, x23, #0x1\n"
+ "ldr q27, [x21, x25]\n"
+ "ldr q26, [x20, x25]\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q25, [x21, x24]\n"
+ "ldr q24, [x20, x24]\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
- "add x22, x22, #0x10\n"
"uaddw v11.4s, v11.4s, v21.4h\n"
"uaddw2 v10.4s, v10.4s, v21.8h\n"
"uaddw v9.4s, v9.4s, v20.4h\n"
@@ -200,17 +200,17 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
- "uxtl v23.8h, v16.8b\n"
- "uxtl2 v22.8h, v16.16b\n"
- "ldr q16, [x20, x26]\n"
+ "subs x23, x23, #0x1\n"
+ "ldr q19, [x20, x27]\n"
+ "ldr q18, [x20, x26]\n"
"ldr q17, [x20, x25]\n"
- "uxtl v21.8h, v16.8b\n"
- "uxtl2 v20.8h, v16.16b\n"
"ldr q16, [x20, x24]\n"
+ "uxtl v23.8h, v19.8b\n"
+ "uxtl2 v22.8h, v19.16b\n"
+ "uxtl v21.8h, v18.8b\n"
+ "uxtl2 v20.8h, v18.16b\n"
"uxtl v19.8h, v17.8b\n"
"uxtl2 v18.8h, v17.16b\n"
- "subs x23, x23, #0x1\n"
"uxtl v17.8h, v16.8b\n"
"uxtl2 v16.8h, v16.16b\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
@@ -231,60 +231,60 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"uaddw2 v0.4s, v0.4s, v16.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
- "ld1r { v16.4s }, [%x[shift_ptr]]\n"
- "sqdmulh v15.4s, v15.4s, v17.4s\n"
- "sqdmulh v14.4s, v14.4s, v17.4s\n"
- "sqdmulh v13.4s, v13.4s, v17.4s\n"
- "sqdmulh v12.4s, v12.4s, v17.4s\n"
+ "ld1r { v19.4s }, [%x[rescale_ptr]]\n"
+ "ld1r { v18.4s }, [%x[shift_ptr]]\n"
+ "movi v17.4s, #0x0\n"
+ "movi v16.4s, #0xff\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
"cmp %x[n_channels], #0x40\n"
- "sqdmulh v11.4s, v11.4s, v17.4s\n"
- "sqdmulh v10.4s, v10.4s, v17.4s\n"
- "sqdmulh v9.4s, v9.4s, v17.4s\n"
- "sqdmulh v8.4s, v8.4s, v17.4s\n"
- "sqdmulh v7.4s, v7.4s, v17.4s\n"
- "sqdmulh v6.4s, v6.4s, v17.4s\n"
- "sqdmulh v5.4s, v5.4s, v17.4s\n"
- "sqdmulh v4.4s, v4.4s, v17.4s\n"
- "sqdmulh v3.4s, v3.4s, v17.4s\n"
- "sqdmulh v2.4s, v2.4s, v17.4s\n"
- "sqdmulh v1.4s, v1.4s, v17.4s\n"
- "sqdmulh v0.4s, v0.4s, v17.4s\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
- "srshl v11.4s, v11.4s, v16.4s\n"
- "srshl v10.4s, v10.4s, v16.4s\n"
- "srshl v9.4s, v9.4s, v16.4s\n"
- "srshl v8.4s, v8.4s, v16.4s\n"
- "srshl v7.4s, v7.4s, v16.4s\n"
- "srshl v6.4s, v6.4s, v16.4s\n"
- "srshl v5.4s, v5.4s, v16.4s\n"
- "srshl v4.4s, v4.4s, v16.4s\n"
- "srshl v3.4s, v3.4s, v16.4s\n"
- "srshl v2.4s, v2.4s, v16.4s\n"
- "srshl v1.4s, v1.4s, v16.4s\n"
- "srshl v0.4s, v0.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v15.4s, v15.4s, v16.4s\n"
- "smax v14.4s, v14.4s, v16.4s\n"
- "smax v13.4s, v13.4s, v16.4s\n"
- "smax v12.4s, v12.4s, v16.4s\n"
- "smax v11.4s, v11.4s, v16.4s\n"
- "smax v10.4s, v10.4s, v16.4s\n"
- "smax v9.4s, v9.4s, v16.4s\n"
- "smax v8.4s, v8.4s, v16.4s\n"
- "smax v7.4s, v7.4s, v16.4s\n"
- "smax v6.4s, v6.4s, v16.4s\n"
- "smax v5.4s, v5.4s, v16.4s\n"
- "smax v4.4s, v4.4s, v16.4s\n"
- "smax v3.4s, v3.4s, v16.4s\n"
- "smax v2.4s, v2.4s, v16.4s\n"
- "smax v1.4s, v1.4s, v16.4s\n"
- "smax v0.4s, v0.4s, v16.4s\n"
- "movi v16.4s, #0xff\n"
+ "sqdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqdmulh v12.4s, v12.4s, v19.4s\n"
+ "sqdmulh v11.4s, v11.4s, v19.4s\n"
+ "sqdmulh v10.4s, v10.4s, v19.4s\n"
+ "sqdmulh v9.4s, v9.4s, v19.4s\n"
+ "sqdmulh v8.4s, v8.4s, v19.4s\n"
+ "sqdmulh v7.4s, v7.4s, v19.4s\n"
+ "sqdmulh v6.4s, v6.4s, v19.4s\n"
+ "sqdmulh v5.4s, v5.4s, v19.4s\n"
+ "sqdmulh v4.4s, v4.4s, v19.4s\n"
+ "sqdmulh v3.4s, v3.4s, v19.4s\n"
+ "sqdmulh v2.4s, v2.4s, v19.4s\n"
+ "sqdmulh v1.4s, v1.4s, v19.4s\n"
+ "sqdmulh v0.4s, v0.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "srshl v11.4s, v11.4s, v18.4s\n"
+ "srshl v10.4s, v10.4s, v18.4s\n"
+ "srshl v9.4s, v9.4s, v18.4s\n"
+ "srshl v8.4s, v8.4s, v18.4s\n"
+ "srshl v7.4s, v7.4s, v18.4s\n"
+ "srshl v6.4s, v6.4s, v18.4s\n"
+ "srshl v5.4s, v5.4s, v18.4s\n"
+ "srshl v4.4s, v4.4s, v18.4s\n"
+ "srshl v3.4s, v3.4s, v18.4s\n"
+ "srshl v2.4s, v2.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v18.4s\n"
+ "srshl v0.4s, v0.4s, v18.4s\n"
+ "smax v15.4s, v15.4s, v17.4s\n"
+ "smax v14.4s, v14.4s, v17.4s\n"
+ "smax v13.4s, v13.4s, v17.4s\n"
+ "smax v12.4s, v12.4s, v17.4s\n"
+ "smax v11.4s, v11.4s, v17.4s\n"
+ "smax v10.4s, v10.4s, v17.4s\n"
+ "smax v9.4s, v9.4s, v17.4s\n"
+ "smax v8.4s, v8.4s, v17.4s\n"
+ "smax v7.4s, v7.4s, v17.4s\n"
+ "smax v6.4s, v6.4s, v17.4s\n"
+ "smax v5.4s, v5.4s, v17.4s\n"
+ "smax v4.4s, v4.4s, v17.4s\n"
+ "smax v3.4s, v3.4s, v17.4s\n"
+ "smax v2.4s, v2.4s, v17.4s\n"
+ "smax v1.4s, v1.4s, v17.4s\n"
+ "smax v0.4s, v0.4s, v17.4s\n"
"smin v15.4s, v15.4s, v16.4s\n"
"smin v14.4s, v14.4s, v16.4s\n"
"smin v13.4s, v13.4s, v16.4s\n"
@@ -302,19 +302,19 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"smin v1.4s, v1.4s, v16.4s\n"
"smin v0.4s, v0.4s, v16.4s\n"
"uzp1 v23.16b, v15.16b, v14.16b\n"
- "uzp1 v16.16b, v13.16b, v12.16b\n"
+ "uzp1 v19.16b, v13.16b, v12.16b\n"
"uzp1 v22.16b, v11.16b, v10.16b\n"
"uzp1 v18.16b, v9.16b, v8.16b\n"
"uzp1 v21.16b, v7.16b, v6.16b\n"
"uzp1 v17.16b, v5.16b, v4.16b\n"
"uzp1 v20.16b, v3.16b, v2.16b\n"
- "uzp1 v19.16b, v1.16b, v0.16b\n"
- "uzp1 v16.16b, v23.16b, v16.16b\n"
+ "uzp1 v16.16b, v1.16b, v0.16b\n"
+ "uzp1 v19.16b, v23.16b, v19.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
- "uzp1 v16.16b, v20.16b, v19.16b\n"
+ "uzp1 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [%x[outptr], x27]\n"
+ "add x27, x27, #0x40\n"
"str q18, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
"str q17, [%x[outptr], x25]\n"
@@ -335,23 +335,23 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"movi v12.4s, #0x0\n"
"cbz x23, 11f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"uaddl v17.8h, v31.8b, v30.8b\n"
"uaddl2 v16.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
+ "subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
"ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
- "subs x23, x23, #0x1\n"
"uaddw v15.4s, v15.4s, v17.4h\n"
"uaddw2 v14.4s, v14.4s, v17.8h\n"
"uaddw v13.4s, v13.4s, v16.4h\n"
"uaddw2 v12.4s, v12.4s, v16.8h\n"
- "add x22, x22, #0x10\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"uaddl v17.8h, v31.8b, v30.8b\n"
@@ -365,34 +365,34 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x23, x23, #0x1\n"
"ldr q16, [x20, x27]\n"
"uxtl v17.8h, v16.8b\n"
"uxtl2 v16.8h, v16.16b\n"
- "subs x23, x23, #0x1\n"
"uaddw v15.4s, v15.4s, v17.4h\n"
"uaddw2 v14.4s, v14.4s, v17.8h\n"
"uaddw v13.4s, v13.4s, v16.4h\n"
"uaddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
- "ld1r { v16.4s }, [%x[shift_ptr]]\n"
- "sqdmulh v15.4s, v15.4s, v17.4s\n"
- "sqdmulh v14.4s, v14.4s, v17.4s\n"
- "sqdmulh v13.4s, v13.4s, v17.4s\n"
- "sqdmulh v12.4s, v12.4s, v17.4s\n"
+ "ld1r { v19.4s }, [%x[rescale_ptr]]\n"
+ "ld1r { v18.4s }, [%x[shift_ptr]]\n"
+ "movi v17.4s, #0x0\n"
+ "movi v16.4s, #0xff\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v15.4s, v15.4s, v16.4s\n"
- "smax v14.4s, v14.4s, v16.4s\n"
- "smax v13.4s, v13.4s, v16.4s\n"
- "smax v12.4s, v12.4s, v16.4s\n"
- "movi v16.4s, #0xff\n"
+ "sqdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqdmulh v12.4s, v12.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "smax v15.4s, v15.4s, v17.4s\n"
+ "smax v14.4s, v14.4s, v17.4s\n"
+ "smax v13.4s, v13.4s, v17.4s\n"
+ "smax v12.4s, v12.4s, v17.4s\n"
"smin v15.4s, v15.4s, v16.4s\n"
"smin v14.4s, v14.4s, v16.4s\n"
"smin v13.4s, v13.4s, v16.4s\n"
@@ -416,10 +416,10 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"15:" // Oddments: 2 inputs loop
"ldp x21, x20, [x22, #0x0]\n"
"add x22, x22, #0x10\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
- "add x20, x20, x27\n"
"movi v30.16b, #0x0\n"
+ "add x21, x21, x27\n"
+ "add x20, x20, x27\n"
"tbz %x[n_channels], #3, 19f\n"
"ldr d31, [x21], #0x8\n"
"ldr d30, [x20], #0x8\n"
@@ -493,8 +493,8 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x21, [x22], #0x8\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
+ "add x21, x21, x27\n"
"tbz %x[n_channels], #3, 29f\n"
"ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
@@ -549,22 +549,22 @@ void a64_u8_nhwc_avg_generic_depthfirst_impl(
"uaddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "ld1r { v17.4s }, [%x[rescale_ptr]]\n"
- "ld1r { v16.4s }, [%x[shift_ptr]]\n"
- "sqdmulh v15.4s, v15.4s, v17.4s\n"
- "sqdmulh v14.4s, v14.4s, v17.4s\n"
- "sqdmulh v13.4s, v13.4s, v17.4s\n"
- "sqdmulh v12.4s, v12.4s, v17.4s\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v15.4s, v15.4s, v16.4s\n"
- "smax v14.4s, v14.4s, v16.4s\n"
- "smax v13.4s, v13.4s, v16.4s\n"
- "smax v12.4s, v12.4s, v16.4s\n"
+ "ld1r { v19.4s }, [%x[rescale_ptr]]\n"
+ "ld1r { v18.4s }, [%x[shift_ptr]]\n"
+ "movi v17.4s, #0x0\n"
"movi v16.4s, #0xff\n"
+ "sqdmulh v15.4s, v15.4s, v19.4s\n"
+ "sqdmulh v14.4s, v14.4s, v19.4s\n"
+ "sqdmulh v13.4s, v13.4s, v19.4s\n"
+ "sqdmulh v12.4s, v12.4s, v19.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "smax v15.4s, v15.4s, v17.4s\n"
+ "smax v14.4s, v14.4s, v17.4s\n"
+ "smax v13.4s, v13.4s, v17.4s\n"
+ "smax v12.4s, v12.4s, v17.4s\n"
"smin v15.4s, v15.4s, v16.4s\n"
"smin v14.4s, v14.4s, v16.4s\n"
"smin v13.4s, v13.4s, v16.4s\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 66cdb7f849..d12733c7de 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,11 +65,11 @@ void a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
__asm__ __volatile__(
"ldr x16, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
- "cmp x16, #0x10\n"
"mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x14, x13, [x21, #0x0]\n"
- "mov x12, #0x0\n"
+ "cmp x16, #0x10\n"
+ "ldp x13, x12, [x21, #0x0]\n"
"ldp x11, x10, [x21, #0x10]\n"
"ldp x9, x28, [x20, #0x0]\n"
"ldp x27, x26, [x20, #0x10]\n"
@@ -80,14 +80,14 @@ void a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q30, [x28, x15]\n"
"ldr q29, [x25, x15]\n"
"lsr x20, x16, #0x4\n"
- "sub x16, x16, x20, LSL #4\n"
"ldr q28, [x22, x15]\n"
"ldr q27, [x26, x15]\n"
- "subs x20, x20, #0x1\n"
"ldr q26, [x9, x15]\n"
"ldr q25, [x27, x15]\n"
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
+ "sub x16, x16, x20, LSL #4\n"
+ "subs x20, x20, #0x1\n"
"ldr q22, [x21, x15]\n"
"add x15, x15, #0x10\n"
"beq 2f\n"
@@ -107,62 +107,62 @@ void a64_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr q24, [x24, x15]\n"
"ldr q23, [x23, x15]\n"
"subs x20, x20, #0x1\n"
- "umax v19.16b, v21.16b, v19.16b\n"
"ldr q22, [x21, x15]\n"
+ "umax v19.16b, v21.16b, v19.16b\n"
"umax v18.16b, v18.16b, v21.16b\n"
- "umax v17.16b, v17.16b, v20.16b\n"
"add x15, x15, #0x10\n"
+ "umax v17.16b, v17.16b, v20.16b\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "str q19, [x14, x12]\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"bgt 1b\n"
"2:" // Vector: Tail
"umax v21.16b, v30.16b, v29.16b\n"
"umax v20.16b, v29.16b, v28.16b\n"
- "umax v16.16b, v27.16b, v26.16b\n"
+ "umax v19.16b, v27.16b, v26.16b\n"
"umax v18.16b, v25.16b, v24.16b\n"
"umax v17.16b, v27.16b, v23.16b\n"
- "umax v19.16b, v24.16b, v22.16b\n"
- "umax v16.16b, v21.16b, v16.16b\n"
+ "umax v16.16b, v24.16b, v22.16b\n"
+ "umax v19.16b, v21.16b, v19.16b\n"
"umax v18.16b, v18.16b, v21.16b\n"
- "str q16, [x14, x12]\n"
"umax v17.16b, v17.16b, v20.16b\n"
- "umax v16.16b, v20.16b, v19.16b\n"
- "str q18, [x13, x12]\n"
- "str q17, [x11, x12]\n"
- "str q16, [x10, x12]\n"
- "add x12, x12, #0x10\n"
+ "umax v16.16b, v20.16b, v16.16b\n"
+ "str q19, [x13, x14]\n"
+ "str q18, [x12, x14]\n"
+ "str q17, [x11, x14]\n"
+ "str q16, [x10, x14]\n"
+ "add x14, x14, #0x10\n"
"cbz x16, 4f\n"
"3:" // Oddments
"ldr b16, [x28, x15]\n"
- "ldr b17, [x25, x15]\n"
- "umax v23.16b, v16.16b, v17.16b\n"
+ "ldr b24, [x25, x15]\n"
"subs x16, x16, #0x1\n"
- "ldr b16, [x22, x15]\n"
- "ldr b22, [x26, x15]\n"
- "umax v21.16b, v17.16b, v16.16b\n"
- "ldr b16, [x9, x15]\n"
- "ldr b17, [x27, x15]\n"
- "umax v16.16b, v22.16b, v16.16b\n"
- "umax v20.16b, v23.16b, v16.16b\n"
- "ldr b19, [x24, x15]\n"
- "ldr b16, [x23, x15]\n"
- "umax v18.16b, v17.16b, v19.16b\n"
- "umax v17.16b, v22.16b, v16.16b\n"
+ "ldr b20, [x22, x15]\n"
+ "ldr b23, [x26, x15]\n"
+ "ldr b19, [x9, x15]\n"
+ "ldr b18, [x27, x15]\n"
+ "ldr b22, [x24, x15]\n"
+ "ldr b17, [x23, x15]\n"
+ "umax v21.16b, v16.16b, v24.16b\n"
"ldr b16, [x21, x15]\n"
- "umax v16.16b, v19.16b, v16.16b\n"
+ "umax v20.16b, v24.16b, v20.16b\n"
"add x15, x15, #0x1\n"
- "umax v18.16b, v18.16b, v23.16b\n"
- "umax v17.16b, v17.16b, v21.16b\n"
- "umax v16.16b, v21.16b, v16.16b\n"
- "str b20, [x14, x12]\n"
- "str b18, [x13, x12]\n"
- "str b17, [x11, x12]\n"
- "str b16, [x10, x12]\n"
- "add x12, x12, #0x1\n"
+ "umax v19.16b, v23.16b, v19.16b\n"
+ "umax v18.16b, v18.16b, v22.16b\n"
+ "umax v17.16b, v23.16b, v17.16b\n"
+ "umax v16.16b, v22.16b, v16.16b\n"
+ "umax v19.16b, v21.16b, v19.16b\n"
+ "umax v18.16b, v18.16b, v21.16b\n"
+ "umax v17.16b, v17.16b, v20.16b\n"
+ "umax v16.16b, v20.16b, v16.16b\n"
+ "str b19, [x13, x14]\n"
+ "str b18, [x12, x14]\n"
+ "str b17, [x11, x14]\n"
+ "str b16, [x10, x14]\n"
+ "add x14, x14, #0x1\n"
"bgt 3b\n"
"4:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
index 2ceef125ca..bf6335b71a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,121 +41,121 @@ void a64_u8_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "movi v9.16b, #0x0\n"
"movi v8.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"movi v7.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
"movi v6.16b, #0x0\n"
- "movi v5.16b, #0x0\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x21, x26]\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x20, x26]\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x21, x24]\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x20, x24]\n"
- "umax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x21, x23]\n"
+ "umax v23.16b, v5.16b, v4.16b\n"
+ "umax v19.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "umax v22.16b, v1.16b, v0.16b\n"
+ "umax v18.16b, v31.16b, v30.16b\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "umax v21.16b, v29.16b, v21.16b\n"
+ "umax v17.16b, v28.16b, v27.16b\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "umax v20.16b, v26.16b, v20.16b\n"
"umax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"umax v17.16b, v21.16b, v17.16b\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "umax v7.16b, v7.16b, v18.16b\n"
- "umax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "umax v5.16b, v5.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "umax v9.16b, v9.16b, v19.16b\n"
+ "umax v8.16b, v8.16b, v18.16b\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "umax v7.16b, v7.16b, v17.16b\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "umax v6.16b, v6.16b, v16.16b\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "umax v20.16b, v30.16b, v29.16b\n"
+ "umax v23.16b, v5.16b, v4.16b\n"
+ "umax v19.16b, v3.16b, v2.16b\n"
+ "umax v22.16b, v1.16b, v0.16b\n"
+ "umax v18.16b, v31.16b, v30.16b\n"
+ "umax v21.16b, v29.16b, v21.16b\n"
+ "umax v17.16b, v28.16b, v27.16b\n"
+ "umax v20.16b, v26.16b, v20.16b\n"
"umax v16.16b, v25.16b, v24.16b\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
"umax v17.16b, v21.16b, v17.16b\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "umax v7.16b, v7.16b, v18.16b\n"
- "umax v6.16b, v6.16b, v17.16b\n"
- "umax v5.16b, v5.16b, v16.16b\n"
+ "umax v9.16b, v9.16b, v19.16b\n"
+ "umax v8.16b, v8.16b, v18.16b\n"
+ "umax v7.16b, v7.16b, v17.16b\n"
+ "umax v6.16b, v6.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v16.16b\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "umax v9.16b, v9.16b, v19.16b\n"
+ "umax v8.16b, v8.16b, v18.16b\n"
"umax v7.16b, v7.16b, v17.16b\n"
"umax v6.16b, v6.16b, v16.16b\n"
- "ldr q16, [x20, x23]\n"
- "umax v5.16b, v5.16b, v16.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x40\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
"cmp %x[n_channels], #0x40\n"
- "str q8, [%x[outptr], x27]\n"
- "str q7, [%x[outptr], x26]\n"
+ "str q8, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q7, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
+ "str q6, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "str q6, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q5, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
@@ -163,272 +163,272 @@ void a64_u8_nhwc_max_generic_depthfirst_impl(
"blt 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
+ "movi v9.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "umax v17.16b, v4.16b, v3.16b\n"
- "umax v16.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "umax v16.16b, v17.16b, v16.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "umax v17.16b, v5.16b, v4.16b\n"
+ "umax v16.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "umax v8.16b, v8.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "umax v16.16b, v17.16b, v16.16b\n"
+ "umax v9.16b, v9.16b, v16.16b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "umax v17.16b, v4.16b, v3.16b\n"
- "umax v16.16b, v28.16b, v22.16b\n"
+ "umax v17.16b, v5.16b, v4.16b\n"
+ "umax v16.16b, v3.16b, v2.16b\n"
"umax v16.16b, v17.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v16.16b\n"
+ "umax v9.16b, v9.16b, v16.16b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v16.16b\n"
+ "ldr q16, [x20, x9]\n"
+ "umax v9.16b, v9.16b, v16.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"sub %x[n_channels], %x[n_channels], #0x10\n"
+ "str q9, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"cmp %x[n_channels], #0x10\n"
- "str q8, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
"lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x27\n"
- "movi v8.16b, #0x0\n"
+ "add %x[outptr], %x[outptr], x9\n"
+ "movi v9.16b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 24f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
- "ld1 { v3.h }[6], [x22], #0x2\n"
- "ld1 { v28.h }[6], [x21], #0x2\n"
- "ld1 { v22.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
+ "ld1 { v3.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
- "ld1 { v3.b }[14], [x22], #0x1\n"
- "ld1 { v28.b }[14], [x21], #0x1\n"
- "ld1 { v22.b }[14], [x20], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
+ "ld1 { v4.b }[14], [x22], #0x1\n"
+ "ld1 { v3.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
- "ld1 { v3.b }[12], [x22], #0x1\n"
- "ld1 { v28.b }[12], [x21], #0x1\n"
- "ld1 { v22.b }[12], [x20], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
+ "ld1 { v4.b }[12], [x22], #0x1\n"
+ "ld1 { v3.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
- "ld1 { v3.h }[4], [x22], #0x2\n"
- "ld1 { v28.h }[4], [x21], #0x2\n"
- "ld1 { v22.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
+ "ld1 { v3.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
- "ld1 { v3.b }[10], [x22], #0x1\n"
- "ld1 { v28.b }[10], [x21], #0x1\n"
- "ld1 { v22.b }[10], [x20], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
+ "ld1 { v4.b }[10], [x22], #0x1\n"
+ "ld1 { v3.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
- "ld1 { v3.b }[8], [x22], #0x1\n"
- "ld1 { v28.b }[8], [x21], #0x1\n"
- "ld1 { v22.b }[8], [x20], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
+ "ld1 { v4.b }[8], [x22], #0x1\n"
+ "ld1 { v3.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
- "ld1 { v3.b }[6], [x22], #0x1\n"
- "ld1 { v28.b }[6], [x21], #0x1\n"
- "ld1 { v22.b }[6], [x20], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
+ "ld1 { v4.b }[6], [x22], #0x1\n"
+ "ld1 { v3.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
- "ld1 { v3.b }[4], [x22], #0x1\n"
- "ld1 { v28.b }[4], [x21], #0x1\n"
- "ld1 { v22.b }[4], [x20], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
+ "ld1 { v4.b }[4], [x22], #0x1\n"
+ "ld1 { v3.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h28, [x21], #0x2\n"
- "ldr h22, [x20], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h2, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
- "ld1 { v3.b }[2], [x22], #0x1\n"
- "ld1 { v28.b }[2], [x21], #0x1\n"
- "ld1 { v22.b }[2], [x20], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
+ "ld1 { v4.b }[2], [x22], #0x1\n"
+ "ld1 { v3.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x23], #0x1\n"
- "ldr b3, [x22], #0x1\n"
- "ldr b28, [x21], #0x1\n"
- "ldr b22, [x20], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
+ "ldr b4, [x22], #0x1\n"
+ "ldr b3, [x21], #0x1\n"
+ "ldr b2, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "umax v17.16b, v4.16b, v3.16b\n"
- "umax v16.16b, v28.16b, v22.16b\n"
+ "umax v17.16b, v5.16b, v4.16b\n"
+ "umax v16.16b, v3.16b, v2.16b\n"
"subs x25, x25, #0x1\n"
"umax v16.16b, v17.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v16.16b\n"
+ "umax v9.16b, v9.16b, v16.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x23], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
+ "umax v9.16b, v9.16b, v5.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
"tbz %x[n_channels], #3, 38f\n"
- "st1 { v8.d }[0], [%x[outptr]], #0x8\n"
+ "st1 { v9.d }[0], [%x[outptr]], #0x8\n"
"tbz %x[n_channels], #2, 36f\n"
- "st1 { v8.s }[2], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[2], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 35f\n"
- "st1 { v8.h }[6], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[6], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[14], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[14], [%x[outptr]], #0x1\n"
"b 42f\n"
"35:" // Oddments: Store: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[12], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[12], [%x[outptr]], #0x1\n"
"b 42f\n"
"36:" // Oddments: Store: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 37f\n"
- "st1 { v8.h }[4], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[4], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[10], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[10], [%x[outptr]], #0x1\n"
"b 42f\n"
"37:" // Oddments: Store: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[8], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[8], [%x[outptr]], #0x1\n"
"b 42f\n"
"38:" // Oddments: Store: Bit 3: Unset
"tbz %x[n_channels], #2, 40f\n"
- "st1 { v8.s }[0], [%x[outptr]], #0x4\n"
+ "st1 { v9.s }[0], [%x[outptr]], #0x4\n"
"tbz %x[n_channels], #1, 39f\n"
- "st1 { v8.h }[2], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[2], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[6], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[6], [%x[outptr]], #0x1\n"
"b 42f\n"
"39:" // Oddments: Store: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[4], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[4], [%x[outptr]], #0x1\n"
"b 42f\n"
"40:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 41f\n"
- "st1 { v8.h }[0], [%x[outptr]], #0x2\n"
+ "st1 { v9.h }[0], [%x[outptr]], #0x2\n"
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[2], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[2], [%x[outptr]], #0x1\n"
"b 42f\n"
"41:" // Oddments: Store: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 42f\n"
- "st1 { v8.b }[0], [%x[outptr]], #0x1\n"
+ "st1 { v9.b }[0], [%x[outptr]], #0x1\n"
"42:" // Oddments: Store: Bit 3: End
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index 31a3489e5c..0734e9b128 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -128,11 +128,11 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"1:" // 4-vectors of channels
"ld1r { v15.4s }, [%x[accumulator_init]]\n"
"lsr x23, %x[n_valid_cells], #0x1\n"
+ "mov x22, %x[inptrs]\n"
"mov v14.16b, v15.16b\n"
"mov v13.16b, v15.16b\n"
"mov v12.16b, v15.16b\n"
"mov v11.16b, v15.16b\n"
- "mov x22, %x[inptrs]\n"
"mov v10.16b, v15.16b\n"
"mov v9.16b, v15.16b\n"
"mov v8.16b, v15.16b\n"
@@ -146,9 +146,9 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"mov v0.16b, v15.16b\n"
"cbz x23, 4f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"ldr q29, [x21, x26]\n"
"ldr q28, [x20, x26]\n"
@@ -161,26 +161,26 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"uaddl v23.8h, v31.8b, v30.8b\n"
"uaddl2 v22.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
- "ldr q30, [x20, x27]\n"
+ "subs x23, x23, #0x1\n"
"uaddl v21.8h, v29.8b, v28.8b\n"
"uaddl2 v20.8h, v29.16b, v28.16b\n"
- "ldr q29, [x21, x26]\n"
- "ldr q28, [x20, x26]\n"
+ "add x22, x22, #0x10\n"
"uaddl v19.8h, v27.8b, v26.8b\n"
"uaddl2 v18.8h, v27.16b, v26.16b\n"
- "ldr q27, [x21, x25]\n"
- "ldr q26, [x20, x25]\n"
+ "ldr q31, [x21, x27]\n"
+ "ldr q30, [x20, x27]\n"
+ "ldr q29, [x21, x26]\n"
+ "ldr q28, [x20, x26]\n"
"uaddl v17.8h, v25.8b, v24.8b\n"
"uaddl2 v16.8h, v25.16b, v24.16b\n"
- "ldr q25, [x21, x24]\n"
- "ldr q24, [x20, x24]\n"
- "subs x23, x23, #0x1\n"
+ "ldr q27, [x21, x25]\n"
+ "ldr q26, [x20, x25]\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
"uaddw2 v14.4s, v14.4s, v23.8h\n"
+ "ldr q25, [x21, x24]\n"
+ "ldr q24, [x20, x24]\n"
"uaddw v13.4s, v13.4s, v22.4h\n"
"uaddw2 v12.4s, v12.4s, v22.8h\n"
- "add x22, x22, #0x10\n"
"uaddw v11.4s, v11.4s, v21.4h\n"
"uaddw2 v10.4s, v10.4s, v21.8h\n"
"uaddw v9.4s, v9.4s, v20.4h\n"
@@ -224,17 +224,17 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
- "uxtl v23.8h, v16.8b\n"
- "uxtl2 v22.8h, v16.16b\n"
- "ldr q16, [x20, x26]\n"
+ "subs x23, x23, #0x1\n"
+ "ldr q19, [x20, x27]\n"
+ "ldr q18, [x20, x26]\n"
"ldr q17, [x20, x25]\n"
- "uxtl v21.8h, v16.8b\n"
- "uxtl2 v20.8h, v16.16b\n"
"ldr q16, [x20, x24]\n"
+ "uxtl v23.8h, v19.8b\n"
+ "uxtl2 v22.8h, v19.16b\n"
+ "uxtl v21.8h, v18.8b\n"
+ "uxtl2 v20.8h, v18.16b\n"
"uxtl v19.8h, v17.8b\n"
"uxtl2 v18.8h, v17.16b\n"
- "subs x23, x23, #0x1\n"
"uxtl v17.8h, v16.8b\n"
"uxtl2 v16.8h, v16.16b\n"
"uaddw v15.4s, v15.4s, v23.4h\n"
@@ -255,95 +255,95 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"uaddw2 v0.4s, v0.4s, v16.8h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1r { v19.4s }, [%x[left_shift]]\n"
- "ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
- "srshl v15.4s, v15.4s, v19.4s\n"
- "srshl v14.4s, v14.4s, v19.4s\n"
- "ld1r { v17.4s }, [%x[right_shift]]\n"
- "srshl v13.4s, v13.4s, v19.4s\n"
- "srshl v12.4s, v12.4s, v19.4s\n"
+ "ld1r { v21.4s }, [%x[left_shift]]\n"
+ "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
- "srshl v11.4s, v11.4s, v19.4s\n"
- "srshl v10.4s, v10.4s, v19.4s\n"
+ "movi v19.4s, #0x0\n"
+ "ld1r { v18.4s }, [%x[right_shift]]\n"
+ "ld1r { v17.4s }, [x20]\n"
+ "movi v16.4s, #0xff\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
- "srshl v9.4s, v9.4s, v19.4s\n"
- "srshl v8.4s, v8.4s, v19.4s\n"
"cmp %x[n_channels], #0x40\n"
- "srshl v7.4s, v7.4s, v19.4s\n"
- "srshl v6.4s, v6.4s, v19.4s\n"
- "srshl v5.4s, v5.4s, v19.4s\n"
- "srshl v4.4s, v4.4s, v19.4s\n"
- "srshl v3.4s, v3.4s, v19.4s\n"
- "srshl v2.4s, v2.4s, v19.4s\n"
- "srshl v1.4s, v1.4s, v19.4s\n"
- "srshl v0.4s, v0.4s, v19.4s\n"
- "sqrdmulh v15.4s, v15.4s, v18.4s\n"
- "sqrdmulh v14.4s, v14.4s, v18.4s\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "sqrdmulh v12.4s, v12.4s, v18.4s\n"
- "sqrdmulh v11.4s, v11.4s, v18.4s\n"
- "sqrdmulh v10.4s, v10.4s, v18.4s\n"
- "sqrdmulh v9.4s, v9.4s, v18.4s\n"
- "sqrdmulh v8.4s, v8.4s, v18.4s\n"
- "sqrdmulh v7.4s, v7.4s, v18.4s\n"
- "sqrdmulh v6.4s, v6.4s, v18.4s\n"
- "sqrdmulh v5.4s, v5.4s, v18.4s\n"
- "sqrdmulh v4.4s, v4.4s, v18.4s\n"
- "sqrdmulh v3.4s, v3.4s, v18.4s\n"
- "sqrdmulh v2.4s, v2.4s, v18.4s\n"
- "sqrdmulh v1.4s, v1.4s, v18.4s\n"
- "sqrdmulh v0.4s, v0.4s, v18.4s\n"
- "srshl v15.4s, v15.4s, v17.4s\n"
- "srshl v14.4s, v14.4s, v17.4s\n"
- "srshl v13.4s, v13.4s, v17.4s\n"
- "srshl v12.4s, v12.4s, v17.4s\n"
- "srshl v11.4s, v11.4s, v17.4s\n"
- "srshl v10.4s, v10.4s, v17.4s\n"
- "srshl v9.4s, v9.4s, v17.4s\n"
- "srshl v8.4s, v8.4s, v17.4s\n"
- "srshl v7.4s, v7.4s, v17.4s\n"
- "srshl v6.4s, v6.4s, v17.4s\n"
- "srshl v5.4s, v5.4s, v17.4s\n"
- "srshl v4.4s, v4.4s, v17.4s\n"
- "srshl v3.4s, v3.4s, v17.4s\n"
- "srshl v2.4s, v2.4s, v17.4s\n"
- "srshl v1.4s, v1.4s, v17.4s\n"
- "srshl v0.4s, v0.4s, v17.4s\n"
- "add v15.4s, v15.4s, v16.4s\n"
- "add v14.4s, v14.4s, v16.4s\n"
- "add v13.4s, v13.4s, v16.4s\n"
- "add v12.4s, v12.4s, v16.4s\n"
- "add v11.4s, v11.4s, v16.4s\n"
- "add v10.4s, v10.4s, v16.4s\n"
- "add v9.4s, v9.4s, v16.4s\n"
- "add v8.4s, v8.4s, v16.4s\n"
- "add v7.4s, v7.4s, v16.4s\n"
- "add v6.4s, v6.4s, v16.4s\n"
- "add v5.4s, v5.4s, v16.4s\n"
- "add v4.4s, v4.4s, v16.4s\n"
- "add v3.4s, v3.4s, v16.4s\n"
- "add v2.4s, v2.4s, v16.4s\n"
- "add v1.4s, v1.4s, v16.4s\n"
- "add v0.4s, v0.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v15.4s, v15.4s, v16.4s\n"
- "smax v14.4s, v14.4s, v16.4s\n"
- "smax v13.4s, v13.4s, v16.4s\n"
- "smax v12.4s, v12.4s, v16.4s\n"
- "smax v11.4s, v11.4s, v16.4s\n"
- "smax v10.4s, v10.4s, v16.4s\n"
- "smax v9.4s, v9.4s, v16.4s\n"
- "smax v8.4s, v8.4s, v16.4s\n"
- "smax v7.4s, v7.4s, v16.4s\n"
- "smax v6.4s, v6.4s, v16.4s\n"
- "smax v5.4s, v5.4s, v16.4s\n"
- "smax v4.4s, v4.4s, v16.4s\n"
- "smax v3.4s, v3.4s, v16.4s\n"
- "smax v2.4s, v2.4s, v16.4s\n"
- "smax v1.4s, v1.4s, v16.4s\n"
- "smax v0.4s, v0.4s, v16.4s\n"
- "movi v16.4s, #0xff\n"
+ "srshl v15.4s, v15.4s, v21.4s\n"
+ "srshl v14.4s, v14.4s, v21.4s\n"
+ "srshl v13.4s, v13.4s, v21.4s\n"
+ "srshl v12.4s, v12.4s, v21.4s\n"
+ "srshl v11.4s, v11.4s, v21.4s\n"
+ "srshl v10.4s, v10.4s, v21.4s\n"
+ "srshl v9.4s, v9.4s, v21.4s\n"
+ "srshl v8.4s, v8.4s, v21.4s\n"
+ "srshl v7.4s, v7.4s, v21.4s\n"
+ "srshl v6.4s, v6.4s, v21.4s\n"
+ "srshl v5.4s, v5.4s, v21.4s\n"
+ "srshl v4.4s, v4.4s, v21.4s\n"
+ "srshl v3.4s, v3.4s, v21.4s\n"
+ "srshl v2.4s, v2.4s, v21.4s\n"
+ "srshl v1.4s, v1.4s, v21.4s\n"
+ "srshl v0.4s, v0.4s, v21.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v20.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v20.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v20.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+ "sqrdmulh v11.4s, v11.4s, v20.4s\n"
+ "sqrdmulh v10.4s, v10.4s, v20.4s\n"
+ "sqrdmulh v9.4s, v9.4s, v20.4s\n"
+ "sqrdmulh v8.4s, v8.4s, v20.4s\n"
+ "sqrdmulh v7.4s, v7.4s, v20.4s\n"
+ "sqrdmulh v6.4s, v6.4s, v20.4s\n"
+ "sqrdmulh v5.4s, v5.4s, v20.4s\n"
+ "sqrdmulh v4.4s, v4.4s, v20.4s\n"
+ "sqrdmulh v3.4s, v3.4s, v20.4s\n"
+ "sqrdmulh v2.4s, v2.4s, v20.4s\n"
+ "sqrdmulh v1.4s, v1.4s, v20.4s\n"
+ "sqrdmulh v0.4s, v0.4s, v20.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "srshl v11.4s, v11.4s, v18.4s\n"
+ "srshl v10.4s, v10.4s, v18.4s\n"
+ "srshl v9.4s, v9.4s, v18.4s\n"
+ "srshl v8.4s, v8.4s, v18.4s\n"
+ "srshl v7.4s, v7.4s, v18.4s\n"
+ "srshl v6.4s, v6.4s, v18.4s\n"
+ "srshl v5.4s, v5.4s, v18.4s\n"
+ "srshl v4.4s, v4.4s, v18.4s\n"
+ "srshl v3.4s, v3.4s, v18.4s\n"
+ "srshl v2.4s, v2.4s, v18.4s\n"
+ "srshl v1.4s, v1.4s, v18.4s\n"
+ "srshl v0.4s, v0.4s, v18.4s\n"
+ "add v15.4s, v15.4s, v17.4s\n"
+ "add v14.4s, v14.4s, v17.4s\n"
+ "add v13.4s, v13.4s, v17.4s\n"
+ "add v12.4s, v12.4s, v17.4s\n"
+ "add v11.4s, v11.4s, v17.4s\n"
+ "add v10.4s, v10.4s, v17.4s\n"
+ "add v9.4s, v9.4s, v17.4s\n"
+ "add v8.4s, v8.4s, v17.4s\n"
+ "add v7.4s, v7.4s, v17.4s\n"
+ "add v6.4s, v6.4s, v17.4s\n"
+ "add v5.4s, v5.4s, v17.4s\n"
+ "add v4.4s, v4.4s, v17.4s\n"
+ "add v3.4s, v3.4s, v17.4s\n"
+ "add v2.4s, v2.4s, v17.4s\n"
+ "add v1.4s, v1.4s, v17.4s\n"
+ "add v0.4s, v0.4s, v17.4s\n"
+ "smax v15.4s, v15.4s, v19.4s\n"
+ "smax v14.4s, v14.4s, v19.4s\n"
+ "smax v13.4s, v13.4s, v19.4s\n"
+ "smax v12.4s, v12.4s, v19.4s\n"
+ "smax v11.4s, v11.4s, v19.4s\n"
+ "smax v10.4s, v10.4s, v19.4s\n"
+ "smax v9.4s, v9.4s, v19.4s\n"
+ "smax v8.4s, v8.4s, v19.4s\n"
+ "smax v7.4s, v7.4s, v19.4s\n"
+ "smax v6.4s, v6.4s, v19.4s\n"
+ "smax v5.4s, v5.4s, v19.4s\n"
+ "smax v4.4s, v4.4s, v19.4s\n"
+ "smax v3.4s, v3.4s, v19.4s\n"
+ "smax v2.4s, v2.4s, v19.4s\n"
+ "smax v1.4s, v1.4s, v19.4s\n"
+ "smax v0.4s, v0.4s, v19.4s\n"
"smin v15.4s, v15.4s, v16.4s\n"
"smin v14.4s, v14.4s, v16.4s\n"
"smin v13.4s, v13.4s, v16.4s\n"
@@ -361,19 +361,19 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"smin v1.4s, v1.4s, v16.4s\n"
"smin v0.4s, v0.4s, v16.4s\n"
"uzp1 v23.16b, v15.16b, v14.16b\n"
- "uzp1 v16.16b, v13.16b, v12.16b\n"
+ "uzp1 v19.16b, v13.16b, v12.16b\n"
"uzp1 v22.16b, v11.16b, v10.16b\n"
"uzp1 v18.16b, v9.16b, v8.16b\n"
"uzp1 v21.16b, v7.16b, v6.16b\n"
"uzp1 v17.16b, v5.16b, v4.16b\n"
"uzp1 v20.16b, v3.16b, v2.16b\n"
- "uzp1 v19.16b, v1.16b, v0.16b\n"
- "uzp1 v16.16b, v23.16b, v16.16b\n"
+ "uzp1 v16.16b, v1.16b, v0.16b\n"
+ "uzp1 v19.16b, v23.16b, v19.16b\n"
"uzp1 v18.16b, v22.16b, v18.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x40\n"
"uzp1 v17.16b, v21.16b, v17.16b\n"
- "uzp1 v16.16b, v20.16b, v19.16b\n"
+ "uzp1 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [%x[outptr], x27]\n"
+ "add x27, x27, #0x40\n"
"str q18, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
"str q17, [%x[outptr], x25]\n"
@@ -388,29 +388,29 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"8:" // Single vector of channels: Loop
"ld1r { v15.4s }, [%x[accumulator_init]]\n"
"lsr x23, %x[n_valid_cells], #0x1\n"
+ "mov x22, %x[inptrs]\n"
"mov v14.16b, v15.16b\n"
"mov v13.16b, v15.16b\n"
"mov v12.16b, v15.16b\n"
- "mov x22, %x[inptrs]\n"
"cbz x23, 11f\n"
"ldp x21, x20, [x22, #0x0]\n"
- "ldr q31, [x21, x27]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
+ "ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
"uaddl v17.8h, v31.8b, v30.8b\n"
"uaddl2 v16.8h, v31.16b, v30.16b\n"
"ldp x21, x20, [x22, #0x0]\n"
+ "subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
"ldr q31, [x21, x27]\n"
"ldr q30, [x20, x27]\n"
- "subs x23, x23, #0x1\n"
"uaddw v15.4s, v15.4s, v17.4h\n"
"uaddw2 v14.4s, v14.4s, v17.8h\n"
"uaddw v13.4s, v13.4s, v16.4h\n"
"uaddw2 v12.4s, v12.4s, v16.8h\n"
- "add x22, x22, #0x10\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
"uaddl v17.8h, v31.8b, v30.8b\n"
@@ -424,45 +424,45 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x23, x23, #0x1\n"
"ldr q16, [x20, x27]\n"
"uxtl v17.8h, v16.8b\n"
"uxtl2 v16.8h, v16.16b\n"
- "subs x23, x23, #0x1\n"
"uaddw v15.4s, v15.4s, v17.4h\n"
"uaddw2 v14.4s, v14.4s, v17.8h\n"
"uaddw v13.4s, v13.4s, v16.4h\n"
"uaddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1r { v16.4s }, [%x[left_shift]]\n"
- "ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "ld1r { v17.4s }, [%x[right_shift]]\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
+ "ld1r { v21.4s }, [%x[left_shift]]\n"
+ "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
- "sqrdmulh v15.4s, v15.4s, v18.4s\n"
- "sqrdmulh v14.4s, v14.4s, v18.4s\n"
+ "movi v19.4s, #0x0\n"
+ "ld1r { v18.4s }, [%x[right_shift]]\n"
+ "ld1r { v17.4s }, [x20]\n"
+ "movi v16.4s, #0xff\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "sqrdmulh v12.4s, v12.4s, v18.4s\n"
"cmp %x[n_channels], #0x10\n"
- "srshl v15.4s, v15.4s, v17.4s\n"
- "srshl v14.4s, v14.4s, v17.4s\n"
- "srshl v13.4s, v13.4s, v17.4s\n"
- "srshl v12.4s, v12.4s, v17.4s\n"
- "add v15.4s, v15.4s, v16.4s\n"
- "add v14.4s, v14.4s, v16.4s\n"
- "add v13.4s, v13.4s, v16.4s\n"
- "add v12.4s, v12.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v15.4s, v15.4s, v16.4s\n"
- "smax v14.4s, v14.4s, v16.4s\n"
- "smax v13.4s, v13.4s, v16.4s\n"
- "smax v12.4s, v12.4s, v16.4s\n"
- "movi v16.4s, #0xff\n"
+ "srshl v15.4s, v15.4s, v21.4s\n"
+ "srshl v14.4s, v14.4s, v21.4s\n"
+ "srshl v13.4s, v13.4s, v21.4s\n"
+ "srshl v12.4s, v12.4s, v21.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v20.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v20.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v20.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "add v15.4s, v15.4s, v17.4s\n"
+ "add v14.4s, v14.4s, v17.4s\n"
+ "add v13.4s, v13.4s, v17.4s\n"
+ "add v12.4s, v12.4s, v17.4s\n"
+ "smax v15.4s, v15.4s, v19.4s\n"
+ "smax v14.4s, v14.4s, v19.4s\n"
+ "smax v13.4s, v13.4s, v19.4s\n"
+ "smax v12.4s, v12.4s, v19.4s\n"
"smin v15.4s, v15.4s, v16.4s\n"
"smin v14.4s, v14.4s, v16.4s\n"
"smin v13.4s, v13.4s, v16.4s\n"
@@ -478,18 +478,18 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"ld1r { v15.4s }, [%x[accumulator_init]]\n"
"lsr x23, %x[n_valid_cells], #0x1\n"
"add %x[outptr], %x[outptr], x27\n"
+ "mov x22, %x[inptrs]\n"
"mov v14.16b, v15.16b\n"
"mov v13.16b, v15.16b\n"
"mov v12.16b, v15.16b\n"
- "mov x22, %x[inptrs]\n"
"cbz x23, 24f\n"
"15:" // Oddments: 2 inputs loop
"ldp x21, x20, [x22, #0x0]\n"
"add x22, x22, #0x10\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
- "add x20, x20, x27\n"
"movi v30.16b, #0x0\n"
+ "add x21, x21, x27\n"
+ "add x20, x20, x27\n"
"tbz %x[n_channels], #3, 19f\n"
"ldr d31, [x21], #0x8\n"
"ldr d30, [x20], #0x8\n"
@@ -563,8 +563,8 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x21, [x22], #0x8\n"
- "add x21, x21, x27\n"
"movi v31.16b, #0x0\n"
+ "add x21, x21, x27\n"
"tbz %x[n_channels], #3, 29f\n"
"ldr d31, [x21], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
@@ -619,33 +619,33 @@ void a64_u8q_nhwc_avg_generic_depthfirst_impl(
"uaddw2 v12.4s, v12.4s, v16.8h\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "ld1r { v16.4s }, [%x[left_shift]]\n"
- "ld1r { v18.4s }, [%x[combined_rescale_value]]\n"
- "srshl v15.4s, v15.4s, v16.4s\n"
- "srshl v14.4s, v14.4s, v16.4s\n"
- "ld1r { v17.4s }, [%x[right_shift]]\n"
- "srshl v13.4s, v13.4s, v16.4s\n"
- "srshl v12.4s, v12.4s, v16.4s\n"
+ "ld1r { v21.4s }, [%x[left_shift]]\n"
+ "ld1r { v20.4s }, [%x[combined_rescale_value]]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
- "sqrdmulh v15.4s, v15.4s, v18.4s\n"
- "sqrdmulh v14.4s, v14.4s, v18.4s\n"
- "sqrdmulh v13.4s, v13.4s, v18.4s\n"
- "sqrdmulh v12.4s, v12.4s, v18.4s\n"
- "srshl v15.4s, v15.4s, v17.4s\n"
- "srshl v14.4s, v14.4s, v17.4s\n"
- "srshl v13.4s, v13.4s, v17.4s\n"
- "srshl v12.4s, v12.4s, v17.4s\n"
- "add v15.4s, v15.4s, v16.4s\n"
- "add v14.4s, v14.4s, v16.4s\n"
- "add v13.4s, v13.4s, v16.4s\n"
- "add v12.4s, v12.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v15.4s, v15.4s, v16.4s\n"
- "smax v14.4s, v14.4s, v16.4s\n"
- "smax v13.4s, v13.4s, v16.4s\n"
- "smax v12.4s, v12.4s, v16.4s\n"
+ "movi v19.4s, #0x0\n"
+ "ld1r { v18.4s }, [%x[right_shift]]\n"
+ "ld1r { v17.4s }, [x20]\n"
"movi v16.4s, #0xff\n"
+ "srshl v15.4s, v15.4s, v21.4s\n"
+ "srshl v14.4s, v14.4s, v21.4s\n"
+ "srshl v13.4s, v13.4s, v21.4s\n"
+ "srshl v12.4s, v12.4s, v21.4s\n"
+ "sqrdmulh v15.4s, v15.4s, v20.4s\n"
+ "sqrdmulh v14.4s, v14.4s, v20.4s\n"
+ "sqrdmulh v13.4s, v13.4s, v20.4s\n"
+ "sqrdmulh v12.4s, v12.4s, v20.4s\n"
+ "srshl v15.4s, v15.4s, v18.4s\n"
+ "srshl v14.4s, v14.4s, v18.4s\n"
+ "srshl v13.4s, v13.4s, v18.4s\n"
+ "srshl v12.4s, v12.4s, v18.4s\n"
+ "add v15.4s, v15.4s, v17.4s\n"
+ "add v14.4s, v14.4s, v17.4s\n"
+ "add v13.4s, v13.4s, v17.4s\n"
+ "add v12.4s, v12.4s, v17.4s\n"
+ "smax v15.4s, v15.4s, v19.4s\n"
+ "smax v14.4s, v14.4s, v19.4s\n"
+ "smax v13.4s, v13.4s, v19.4s\n"
+ "smax v12.4s, v12.4s, v19.4s\n"
"smin v15.4s, v15.4s, v16.4s\n"
"smin v14.4s, v14.4s, v16.4s\n"
"smin v13.4s, v13.4s, v16.4s\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
index f4927c5536..11a8ad88ec 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/a64_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,266 +43,266 @@ void a64_u8q_nhwc_max_generic_depthfirst_impl(
{
__asm__ __volatile__(
"cmp %x[n_channels], #0x40\n"
- "mov x27, #0x0\n"
- "mov x26, #0x10\n" // cntb _, ALL, #1
- "mov x24, #0x20\n" // cntb _, ALL, #2
- "mov x23, #0x30\n" // cntb _, ALL, #3
+ "mov x9, #0x0\n"
+ "mov x28, #0x10\n" // cntb _, ALL, #1
+ "mov x27, #0x20\n" // cntb _, ALL, #2
+ "mov x26, #0x30\n" // cntb _, ALL, #3
"blt 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "movi v6.16b, #0x0\n"
+ "movi v9.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"movi v8.16b, #0x0\n"
"movi v7.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
- "movi v6.16b, #0x0\n"
- "movi v5.16b, #0x0\n"
"cbz x25, 4f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldr q2, [x21, x26]\n"
- "ldr q1, [x20, x26]\n"
- "ldr q0, [x21, x24]\n"
- "ldr q31, [x20, x24]\n"
- "ldr q30, [x21, x23]\n"
- "ldr q29, [x20, x23]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
+ "ldr q27, [x20, x27]\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "ldr q2, [x21, x26]\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "ldr q1, [x20, x26]\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "ldr q0, [x21, x24]\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "ldr q31, [x20, x24]\n"
- "umax v20.16b, v30.16b, v29.16b\n"
- "ldr q30, [x21, x23]\n"
+ "umax v23.16b, v5.16b, v4.16b\n"
+ "umax v19.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
+ "umax v22.16b, v1.16b, v0.16b\n"
+ "umax v18.16b, v31.16b, v30.16b\n"
+ "subs x25, x25, #0x1\n"
+ "add x24, x24, #0x20\n"
+ "umax v21.16b, v29.16b, v21.16b\n"
+ "umax v17.16b, v28.16b, v27.16b\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "umax v20.16b, v26.16b, v20.16b\n"
"umax v16.16b, v25.16b, v24.16b\n"
- "ldr q29, [x20, x23]\n"
+ "ldr q1, [x23, x28]\n"
+ "ldr q0, [x22, x28]\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "ldr q31, [x21, x28]\n"
+ "ldr q30, [x20, x28]\n"
"umax v17.16b, v21.16b, v17.16b\n"
+ "ldr q29, [x23, x27]\n"
+ "ldr q21, [x22, x27]\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "ldr q27, [x21, x26]\n"
- "ldr q21, [x20, x26]\n"
- "subs x25, x25, #0x1\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "ldr q26, [x21, x24]\n"
- "ldr q20, [x20, x24]\n"
- "umax v7.16b, v7.16b, v18.16b\n"
- "umax v6.16b, v6.16b, v17.16b\n"
- "ldr q25, [x21, x23]\n"
- "ldr q24, [x20, x23]\n"
- "umax v5.16b, v5.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "ldr q28, [x21, x27]\n"
+ "ldr q27, [x20, x27]\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
+ "umax v9.16b, v9.16b, v18.16b\n"
+ "ldr q26, [x23, x26]\n"
+ "ldr q20, [x22, x26]\n"
+ "umax v8.16b, v8.16b, v17.16b\n"
+ "ldr q25, [x21, x26]\n"
+ "ldr q24, [x20, x26]\n"
+ "umax v7.16b, v7.16b, v16.16b\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "umax v23.16b, v4.16b, v3.16b\n"
- "umax v19.16b, v28.16b, v22.16b\n"
- "umax v22.16b, v2.16b, v1.16b\n"
- "umax v18.16b, v27.16b, v21.16b\n"
- "umax v21.16b, v0.16b, v31.16b\n"
- "umax v17.16b, v26.16b, v20.16b\n"
- "umax v20.16b, v30.16b, v29.16b\n"
+ "umax v23.16b, v5.16b, v4.16b\n"
+ "umax v19.16b, v3.16b, v2.16b\n"
+ "umax v22.16b, v1.16b, v0.16b\n"
+ "umax v18.16b, v31.16b, v30.16b\n"
+ "umax v21.16b, v29.16b, v21.16b\n"
+ "umax v17.16b, v28.16b, v27.16b\n"
+ "umax v20.16b, v26.16b, v20.16b\n"
"umax v16.16b, v25.16b, v24.16b\n"
"umax v19.16b, v23.16b, v19.16b\n"
"umax v18.16b, v22.16b, v18.16b\n"
"umax v17.16b, v21.16b, v17.16b\n"
"umax v16.16b, v20.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v19.16b\n"
- "umax v7.16b, v7.16b, v18.16b\n"
- "umax v6.16b, v6.16b, v17.16b\n"
- "umax v5.16b, v5.16b, v16.16b\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
+ "umax v9.16b, v9.16b, v18.16b\n"
+ "umax v8.16b, v8.16b, v17.16b\n"
+ "umax v7.16b, v7.16b, v16.16b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v16.16b\n"
- "ldr q17, [x20, x26]\n"
- "ldr q16, [x20, x24]\n"
- "umax v7.16b, v7.16b, v17.16b\n"
- "umax v6.16b, v6.16b, v16.16b\n"
- "ldr q16, [x20, x23]\n"
- "umax v5.16b, v5.16b, v16.16b\n"
+ "ldr q19, [x20, x9]\n"
+ "ldr q18, [x20, x28]\n"
+ "ldr q17, [x20, x27]\n"
+ "ldr q16, [x20, x26]\n"
+ "umax v6.16b, v6.16b, v19.16b\n"
+ "umax v9.16b, v9.16b, v18.16b\n"
+ "umax v8.16b, v8.16b, v17.16b\n"
+ "umax v7.16b, v7.16b, v16.16b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1r { v4.4s }, [x20]\n"
- "uxtl v23.8h, v8.8b\n"
- "uxtl2 v24.8h, v8.16b\n"
- "uxtl v22.8h, v7.8b\n"
- "uxtl2 v21.8h, v7.16b\n"
+ "add x21, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "uxtl v23.8h, v6.8b\n"
+ "uxtl2 v19.8h, v6.16b\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v3.4s }, [x20]\n"
- "uxtl v20.8h, v6.8b\n"
- "uxtl2 v17.8h, v6.16b\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v2.4s }, [x20]\n"
- "uxtl v19.8h, v5.8b\n"
- "uxtl2 v18.8h, v5.16b\n"
+ "ld1r { v6.4s }, [x21]\n"
+ "ld1r { v5.4s }, [x20]\n"
+ "uxtl v22.8h, v9.8b\n"
+ "uxtl2 v18.8h, v9.16b\n"
+ "uxtl v21.8h, v8.8b\n"
+ "uxtl2 v17.8h, v8.16b\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v1.4s }, [x20]\n"
- "neg v4.4s, v4.4s\n"
- "saddw v0.4s, v4.4s, v23.4h\n"
+ "ld1r { v4.4s }, [x21]\n"
+ "ld1r { v3.4s }, [x20]\n"
+ "uxtl v20.8h, v7.8b\n"
+ "uxtl2 v16.8h, v7.16b\n"
+ "neg v6.4s, v6.4s\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
- "saddw2 v23.4s, v4.4s, v23.8h\n"
- "saddw v31.4s, v4.4s, v24.4h\n"
+ "movi v2.4s, #0x0\n"
"sub %x[n_channels], %x[n_channels], #0x40\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "movi v0.4s, #0xff\n"
"cmp %x[n_channels], #0x40\n"
- "saddw2 v30.4s, v4.4s, v24.8h\n"
- "saddw v29.4s, v4.4s, v22.4h\n"
- "saddw2 v22.4s, v4.4s, v22.8h\n"
- "saddw v28.4s, v4.4s, v21.4h\n"
- "saddw2 v21.4s, v4.4s, v21.8h\n"
- "saddw v27.4s, v4.4s, v20.4h\n"
- "saddw2 v20.4s, v4.4s, v20.8h\n"
- "saddw v26.4s, v4.4s, v17.4h\n"
- "saddw2 v17.4s, v4.4s, v17.8h\n"
- "saddw v25.4s, v4.4s, v19.4h\n"
- "saddw2 v19.4s, v4.4s, v19.8h\n"
- "saddw v24.4s, v4.4s, v18.4h\n"
- "saddw2 v18.4s, v4.4s, v18.8h\n"
- "srshl v0.4s, v0.4s, v3.4s\n"
- "srshl v23.4s, v23.4s, v3.4s\n"
+ "saddw v31.4s, v6.4s, v23.4h\n"
+ "saddw2 v23.4s, v6.4s, v23.8h\n"
+ "saddw v30.4s, v6.4s, v19.4h\n"
+ "saddw2 v19.4s, v6.4s, v19.8h\n"
+ "saddw v29.4s, v6.4s, v22.4h\n"
+ "saddw2 v22.4s, v6.4s, v22.8h\n"
+ "saddw v28.4s, v6.4s, v18.4h\n"
+ "saddw2 v18.4s, v6.4s, v18.8h\n"
+ "saddw v27.4s, v6.4s, v21.4h\n"
+ "saddw2 v21.4s, v6.4s, v21.8h\n"
+ "saddw v26.4s, v6.4s, v17.4h\n"
+ "saddw2 v17.4s, v6.4s, v17.8h\n"
+ "saddw v25.4s, v6.4s, v20.4h\n"
+ "saddw2 v20.4s, v6.4s, v20.8h\n"
+ "saddw v24.4s, v6.4s, v16.4h\n"
+ "saddw2 v16.4s, v6.4s, v16.8h\n"
+ "srshl v31.4s, v31.4s, v5.4s\n"
+ "srshl v23.4s, v23.4s, v5.4s\n"
+ "srshl v30.4s, v30.4s, v5.4s\n"
+ "srshl v19.4s, v19.4s, v5.4s\n"
+ "srshl v29.4s, v29.4s, v5.4s\n"
+ "srshl v22.4s, v22.4s, v5.4s\n"
+ "srshl v28.4s, v28.4s, v5.4s\n"
+ "srshl v18.4s, v18.4s, v5.4s\n"
+ "srshl v27.4s, v27.4s, v5.4s\n"
+ "srshl v21.4s, v21.4s, v5.4s\n"
+ "srshl v26.4s, v26.4s, v5.4s\n"
+ "srshl v17.4s, v17.4s, v5.4s\n"
+ "srshl v25.4s, v25.4s, v5.4s\n"
+ "srshl v20.4s, v20.4s, v5.4s\n"
+ "srshl v24.4s, v24.4s, v5.4s\n"
+ "srshl v16.4s, v16.4s, v5.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v4.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v4.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v4.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v4.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v4.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v4.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v4.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v4.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v4.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v4.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v4.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v4.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v4.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v4.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v4.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v4.4s\n"
"srshl v31.4s, v31.4s, v3.4s\n"
+ "srshl v23.4s, v23.4s, v3.4s\n"
"srshl v30.4s, v30.4s, v3.4s\n"
+ "srshl v19.4s, v19.4s, v3.4s\n"
"srshl v29.4s, v29.4s, v3.4s\n"
"srshl v22.4s, v22.4s, v3.4s\n"
"srshl v28.4s, v28.4s, v3.4s\n"
- "srshl v21.4s, v21.4s, v3.4s\n"
+ "srshl v18.4s, v18.4s, v3.4s\n"
"srshl v27.4s, v27.4s, v3.4s\n"
- "srshl v20.4s, v20.4s, v3.4s\n"
+ "srshl v21.4s, v21.4s, v3.4s\n"
"srshl v26.4s, v26.4s, v3.4s\n"
"srshl v17.4s, v17.4s, v3.4s\n"
"srshl v25.4s, v25.4s, v3.4s\n"
- "srshl v19.4s, v19.4s, v3.4s\n"
+ "srshl v20.4s, v20.4s, v3.4s\n"
"srshl v24.4s, v24.4s, v3.4s\n"
- "srshl v18.4s, v18.4s, v3.4s\n"
- "sqrdmulh v0.4s, v0.4s, v2.4s\n"
- "sqrdmulh v23.4s, v23.4s, v2.4s\n"
- "sqrdmulh v31.4s, v31.4s, v2.4s\n"
- "sqrdmulh v30.4s, v30.4s, v2.4s\n"
- "sqrdmulh v29.4s, v29.4s, v2.4s\n"
- "sqrdmulh v22.4s, v22.4s, v2.4s\n"
- "sqrdmulh v28.4s, v28.4s, v2.4s\n"
- "sqrdmulh v21.4s, v21.4s, v2.4s\n"
- "sqrdmulh v27.4s, v27.4s, v2.4s\n"
- "sqrdmulh v20.4s, v20.4s, v2.4s\n"
- "sqrdmulh v26.4s, v26.4s, v2.4s\n"
- "sqrdmulh v17.4s, v17.4s, v2.4s\n"
- "sqrdmulh v25.4s, v25.4s, v2.4s\n"
- "sqrdmulh v19.4s, v19.4s, v2.4s\n"
- "sqrdmulh v24.4s, v24.4s, v2.4s\n"
- "sqrdmulh v18.4s, v18.4s, v2.4s\n"
- "srshl v0.4s, v0.4s, v1.4s\n"
- "srshl v23.4s, v23.4s, v1.4s\n"
- "srshl v31.4s, v31.4s, v1.4s\n"
- "srshl v30.4s, v30.4s, v1.4s\n"
- "srshl v29.4s, v29.4s, v1.4s\n"
- "srshl v22.4s, v22.4s, v1.4s\n"
- "srshl v28.4s, v28.4s, v1.4s\n"
- "srshl v21.4s, v21.4s, v1.4s\n"
- "srshl v27.4s, v27.4s, v1.4s\n"
- "srshl v20.4s, v20.4s, v1.4s\n"
- "srshl v26.4s, v26.4s, v1.4s\n"
- "srshl v17.4s, v17.4s, v1.4s\n"
- "srshl v25.4s, v25.4s, v1.4s\n"
- "srshl v19.4s, v19.4s, v1.4s\n"
- "srshl v24.4s, v24.4s, v1.4s\n"
- "srshl v18.4s, v18.4s, v1.4s\n"
- "add v0.4s, v0.4s, v16.4s\n"
- "add v23.4s, v23.4s, v16.4s\n"
- "add v31.4s, v31.4s, v16.4s\n"
- "add v30.4s, v30.4s, v16.4s\n"
- "add v29.4s, v29.4s, v16.4s\n"
- "add v22.4s, v22.4s, v16.4s\n"
- "add v28.4s, v28.4s, v16.4s\n"
- "add v21.4s, v21.4s, v16.4s\n"
- "add v27.4s, v27.4s, v16.4s\n"
- "add v20.4s, v20.4s, v16.4s\n"
- "add v26.4s, v26.4s, v16.4s\n"
- "add v17.4s, v17.4s, v16.4s\n"
- "add v25.4s, v25.4s, v16.4s\n"
- "add v19.4s, v19.4s, v16.4s\n"
- "add v24.4s, v24.4s, v16.4s\n"
- "add v18.4s, v18.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v0.4s, v0.4s, v16.4s\n"
- "smax v23.4s, v23.4s, v16.4s\n"
- "smax v31.4s, v31.4s, v16.4s\n"
- "smax v30.4s, v30.4s, v16.4s\n"
- "smax v29.4s, v29.4s, v16.4s\n"
- "smax v22.4s, v22.4s, v16.4s\n"
- "smax v28.4s, v28.4s, v16.4s\n"
- "smax v21.4s, v21.4s, v16.4s\n"
- "smax v27.4s, v27.4s, v16.4s\n"
- "smax v20.4s, v20.4s, v16.4s\n"
- "smax v26.4s, v26.4s, v16.4s\n"
- "smax v17.4s, v17.4s, v16.4s\n"
- "smax v25.4s, v25.4s, v16.4s\n"
- "smax v19.4s, v19.4s, v16.4s\n"
- "smax v24.4s, v24.4s, v16.4s\n"
- "smax v18.4s, v18.4s, v16.4s\n"
- "movi v16.4s, #0xff\n"
- "smin v0.4s, v0.4s, v16.4s\n"
- "smin v23.4s, v23.4s, v16.4s\n"
- "smin v31.4s, v31.4s, v16.4s\n"
- "smin v30.4s, v30.4s, v16.4s\n"
- "smin v29.4s, v29.4s, v16.4s\n"
- "smin v22.4s, v22.4s, v16.4s\n"
- "smin v28.4s, v28.4s, v16.4s\n"
- "smin v21.4s, v21.4s, v16.4s\n"
- "smin v27.4s, v27.4s, v16.4s\n"
- "smin v20.4s, v20.4s, v16.4s\n"
- "smin v26.4s, v26.4s, v16.4s\n"
- "smin v17.4s, v17.4s, v16.4s\n"
- "smin v25.4s, v25.4s, v16.4s\n"
- "smin v19.4s, v19.4s, v16.4s\n"
- "smin v24.4s, v24.4s, v16.4s\n"
- "smin v18.4s, v18.4s, v16.4s\n"
- "uzp1 v23.16b, v0.16b, v23.16b\n"
- "uzp1 v16.16b, v31.16b, v30.16b\n"
+ "srshl v16.4s, v16.4s, v3.4s\n"
+ "add v31.4s, v31.4s, v1.4s\n"
+ "add v23.4s, v23.4s, v1.4s\n"
+ "add v30.4s, v30.4s, v1.4s\n"
+ "add v19.4s, v19.4s, v1.4s\n"
+ "add v29.4s, v29.4s, v1.4s\n"
+ "add v22.4s, v22.4s, v1.4s\n"
+ "add v28.4s, v28.4s, v1.4s\n"
+ "add v18.4s, v18.4s, v1.4s\n"
+ "add v27.4s, v27.4s, v1.4s\n"
+ "add v21.4s, v21.4s, v1.4s\n"
+ "add v26.4s, v26.4s, v1.4s\n"
+ "add v17.4s, v17.4s, v1.4s\n"
+ "add v25.4s, v25.4s, v1.4s\n"
+ "add v20.4s, v20.4s, v1.4s\n"
+ "add v24.4s, v24.4s, v1.4s\n"
+ "add v16.4s, v16.4s, v1.4s\n"
+ "smax v31.4s, v31.4s, v2.4s\n"
+ "smax v23.4s, v23.4s, v2.4s\n"
+ "smax v30.4s, v30.4s, v2.4s\n"
+ "smax v19.4s, v19.4s, v2.4s\n"
+ "smax v29.4s, v29.4s, v2.4s\n"
+ "smax v22.4s, v22.4s, v2.4s\n"
+ "smax v28.4s, v28.4s, v2.4s\n"
+ "smax v18.4s, v18.4s, v2.4s\n"
+ "smax v27.4s, v27.4s, v2.4s\n"
+ "smax v21.4s, v21.4s, v2.4s\n"
+ "smax v26.4s, v26.4s, v2.4s\n"
+ "smax v17.4s, v17.4s, v2.4s\n"
+ "smax v25.4s, v25.4s, v2.4s\n"
+ "smax v20.4s, v20.4s, v2.4s\n"
+ "smax v24.4s, v24.4s, v2.4s\n"
+ "smax v16.4s, v16.4s, v2.4s\n"
+ "smin v31.4s, v31.4s, v0.4s\n"
+ "smin v23.4s, v23.4s, v0.4s\n"
+ "smin v30.4s, v30.4s, v0.4s\n"
+ "smin v19.4s, v19.4s, v0.4s\n"
+ "smin v29.4s, v29.4s, v0.4s\n"
+ "smin v22.4s, v22.4s, v0.4s\n"
+ "smin v28.4s, v28.4s, v0.4s\n"
+ "smin v18.4s, v18.4s, v0.4s\n"
+ "smin v27.4s, v27.4s, v0.4s\n"
+ "smin v21.4s, v21.4s, v0.4s\n"
+ "smin v26.4s, v26.4s, v0.4s\n"
+ "smin v17.4s, v17.4s, v0.4s\n"
+ "smin v25.4s, v25.4s, v0.4s\n"
+ "smin v20.4s, v20.4s, v0.4s\n"
+ "smin v24.4s, v24.4s, v0.4s\n"
+ "smin v16.4s, v16.4s, v0.4s\n"
+ "uzp1 v23.16b, v31.16b, v23.16b\n"
+ "uzp1 v19.16b, v30.16b, v19.16b\n"
"uzp1 v22.16b, v29.16b, v22.16b\n"
- "uzp1 v21.16b, v28.16b, v21.16b\n"
- "uzp1 v20.16b, v27.16b, v20.16b\n"
+ "uzp1 v18.16b, v28.16b, v18.16b\n"
+ "uzp1 v21.16b, v27.16b, v21.16b\n"
"uzp1 v17.16b, v26.16b, v17.16b\n"
- "uzp1 v19.16b, v25.16b, v19.16b\n"
- "uzp1 v18.16b, v24.16b, v18.16b\n"
- "uzp1 v16.16b, v23.16b, v16.16b\n"
- "str q16, [%x[outptr], x27]\n"
+ "uzp1 v20.16b, v25.16b, v20.16b\n"
+ "uzp1 v16.16b, v24.16b, v16.16b\n"
+ "uzp1 v19.16b, v23.16b, v19.16b\n"
+ "uzp1 v18.16b, v22.16b, v18.16b\n"
+ "uzp1 v17.16b, v21.16b, v17.16b\n"
+ "uzp1 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [%x[outptr], x9]\n"
+ "add x9, x9, #0x40\n"
+ "str q18, [%x[outptr], x28]\n"
+ "add x28, x28, #0x40\n"
+ "str q17, [%x[outptr], x27]\n"
"add x27, x27, #0x40\n"
- "uzp1 v16.16b, v22.16b, v21.16b\n"
- "uzp1 v17.16b, v20.16b, v17.16b\n"
"str q16, [%x[outptr], x26]\n"
"add x26, x26, #0x40\n"
- "uzp1 v16.16b, v19.16b, v18.16b\n"
- "str q17, [%x[outptr], x24]\n"
- "add x24, x24, #0x40\n"
- "str q16, [%x[outptr], x23]\n"
- "add x23, x23, #0x40\n"
"bge 1b\n"
"cbz %x[n_channels], 43f\n"
"7:" // Single vector of channels
@@ -310,314 +310,314 @@ void a64_u8q_nhwc_max_generic_depthfirst_impl(
"blt 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "movi v8.16b, #0x0\n"
- "mov x22, %x[inptrs]\n"
+ "movi v6.16b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q3, [x20, x27]\n"
- "ldp x21, x20, [x22, #0x10]\n"
- "add x22, x22, #0x20\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "umax v17.16b, v4.16b, v3.16b\n"
- "umax v16.16b, v28.16b, v22.16b\n"
- "ldp x21, x20, [x22, #0x0]\n"
- "ldr q4, [x21, x27]\n"
- "ldr q3, [x20, x27]\n"
- "umax v16.16b, v17.16b, v16.16b\n"
- "ldp x21, x20, [x22, #0x10]\n"
+ "umax v17.16b, v5.16b, v4.16b\n"
+ "umax v16.16b, v3.16b, v2.16b\n"
+ "ldp x23, x22, [x24, #0x0]\n"
+ "ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
- "ldr q28, [x21, x27]\n"
- "ldr q22, [x20, x27]\n"
- "umax v8.16b, v8.16b, v16.16b\n"
- "add x22, x22, #0x20\n"
+ "add x24, x24, #0x20\n"
+ "ldr q5, [x23, x9]\n"
+ "ldr q4, [x22, x9]\n"
+ "ldr q3, [x21, x9]\n"
+ "ldr q2, [x20, x9]\n"
+ "umax v16.16b, v17.16b, v16.16b\n"
+ "umax v6.16b, v6.16b, v16.16b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "umax v17.16b, v4.16b, v3.16b\n"
- "umax v16.16b, v28.16b, v22.16b\n"
+ "umax v17.16b, v5.16b, v4.16b\n"
+ "umax v16.16b, v3.16b, v2.16b\n"
"umax v16.16b, v17.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v16.16b\n"
+ "umax v6.16b, v6.16b, v16.16b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
- "ldr x20, [x22], #0x8\n"
- "ldr q16, [x20, x27]\n"
+ "ldr x20, [x24], #0x8\n"
"subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v16.16b\n"
+ "ldr q16, [x20, x9]\n"
+ "umax v6.16b, v6.16b, v16.16b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1r { v18.4s }, [x20]\n"
- "uxtl v17.8h, v8.8b\n"
- "uxtl2 v16.8h, v8.16b\n"
- "neg v18.4s, v18.4s\n"
+ "add x21, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "uxtl v17.8h, v6.8b\n"
+ "uxtl2 v26.8h, v6.16b\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v23.4s }, [x20]\n"
- "saddw v22.4s, v18.4s, v17.4h\n"
- "saddw2 v21.4s, v18.4s, v17.8h\n"
- "saddw v20.4s, v18.4s, v16.4h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v19.4s }, [x20]\n"
- "saddw2 v18.4s, v18.4s, v16.8h\n"
- "srshl v22.4s, v22.4s, v23.4s\n"
+ "ld1r { v16.4s }, [x21]\n"
+ "ld1r { v25.4s }, [x20]\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v17.4s }, [x20]\n"
- "srshl v21.4s, v21.4s, v23.4s\n"
- "srshl v20.4s, v20.4s, v23.4s\n"
+ "ld1r { v24.4s }, [x21]\n"
+ "ld1r { v23.4s }, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
- "srshl v18.4s, v18.4s, v23.4s\n"
- "sqrdmulh v22.4s, v22.4s, v19.4s\n"
+ "movi v22.4s, #0x0\n"
+ "ld1r { v21.4s }, [x20]\n"
+ "movi v20.4s, #0xff\n"
"sub %x[n_channels], %x[n_channels], #0x10\n"
+ "neg v16.4s, v16.4s\n"
"cmp %x[n_channels], #0x10\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqrdmulh v20.4s, v20.4s, v19.4s\n"
- "sqrdmulh v18.4s, v18.4s, v19.4s\n"
- "srshl v22.4s, v22.4s, v17.4s\n"
- "srshl v21.4s, v21.4s, v17.4s\n"
- "srshl v20.4s, v20.4s, v17.4s\n"
- "srshl v18.4s, v18.4s, v17.4s\n"
- "add v22.4s, v22.4s, v16.4s\n"
- "add v21.4s, v21.4s, v16.4s\n"
- "add v20.4s, v20.4s, v16.4s\n"
- "add v18.4s, v18.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v22.4s, v22.4s, v16.4s\n"
- "smax v21.4s, v21.4s, v16.4s\n"
- "smax v20.4s, v20.4s, v16.4s\n"
- "smax v18.4s, v18.4s, v16.4s\n"
- "movi v16.4s, #0xff\n"
- "smin v22.4s, v22.4s, v16.4s\n"
- "smin v21.4s, v21.4s, v16.4s\n"
- "smin v20.4s, v20.4s, v16.4s\n"
- "smin v18.4s, v18.4s, v16.4s\n"
- "uzp1 v17.16b, v22.16b, v21.16b\n"
- "uzp1 v16.16b, v20.16b, v18.16b\n"
+ "saddw v19.4s, v16.4s, v17.4h\n"
+ "saddw2 v17.4s, v16.4s, v17.8h\n"
+ "saddw v18.4s, v16.4s, v26.4h\n"
+ "saddw2 v16.4s, v16.4s, v26.8h\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "srshl v17.4s, v17.4s, v25.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v25.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v24.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v24.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v24.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v24.4s\n"
+ "srshl v19.4s, v19.4s, v23.4s\n"
+ "srshl v17.4s, v17.4s, v23.4s\n"
+ "srshl v18.4s, v18.4s, v23.4s\n"
+ "srshl v16.4s, v16.4s, v23.4s\n"
+ "add v19.4s, v19.4s, v21.4s\n"
+ "add v17.4s, v17.4s, v21.4s\n"
+ "add v18.4s, v18.4s, v21.4s\n"
+ "add v16.4s, v16.4s, v21.4s\n"
+ "smax v19.4s, v19.4s, v22.4s\n"
+ "smax v17.4s, v17.4s, v22.4s\n"
+ "smax v18.4s, v18.4s, v22.4s\n"
+ "smax v16.4s, v16.4s, v22.4s\n"
+ "smin v19.4s, v19.4s, v20.4s\n"
+ "smin v17.4s, v17.4s, v20.4s\n"
+ "smin v18.4s, v18.4s, v20.4s\n"
+ "smin v16.4s, v16.4s, v20.4s\n"
+ "uzp1 v17.16b, v19.16b, v17.16b\n"
+ "uzp1 v16.16b, v18.16b, v16.16b\n"
"uzp1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [%x[outptr], x27]\n"
- "add x27, x27, #0x10\n"
+ "str q16, [%x[outptr], x9]\n"
+ "add x9, x9, #0x10\n"
"bge 8b\n"
"cbz %x[n_channels], 43f\n"
"14:" // Oddments
"lsr x25, %x[n_valid_cells], #0x2\n"
- "add %x[outptr], %x[outptr], x27\n"
- "movi v8.16b, #0x0\n"
+ "add %x[outptr], %x[outptr], x9\n"
+ "movi v6.16b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 24f\n"
"15:" // Oddments: 4 inputs loop
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "add x23, x23, x27\n"
- "add x22, x22, x27\n"
- "add x21, x21, x27\n"
+ "movi v5.16b, #0x0\n"
"movi v4.16b, #0x0\n"
"movi v3.16b, #0x0\n"
- "add x20, x20, x27\n"
- "movi v28.16b, #0x0\n"
- "movi v22.16b, #0x0\n"
+ "movi v2.16b, #0x0\n"
+ "add x23, x23, x9\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x9\n"
+ "add x20, x20, x9\n"
"tbz %x[n_channels], #3, 19f\n"
- "ldr d4, [x23], #0x8\n"
- "ldr d3, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
- "ldr d22, [x20], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
+ "ldr d4, [x22], #0x8\n"
+ "ldr d3, [x21], #0x8\n"
+ "ldr d2, [x20], #0x8\n"
"tbz %x[n_channels], #2, 17f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
- "ld1 { v3.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
- "ld1 { v22.s }[2], [x20], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
+ "ld1 { v4.s }[2], [x22], #0x4\n"
+ "ld1 { v3.s }[2], [x21], #0x4\n"
+ "ld1 { v2.s }[2], [x20], #0x4\n"
"tbz %x[n_channels], #1, 16f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
- "ld1 { v3.h }[6], [x22], #0x2\n"
- "ld1 { v28.h }[6], [x21], #0x2\n"
- "ld1 { v22.h }[6], [x20], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
+ "ld1 { v4.h }[6], [x22], #0x2\n"
+ "ld1 { v3.h }[6], [x21], #0x2\n"
+ "ld1 { v2.h }[6], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
- "ld1 { v3.b }[14], [x22], #0x1\n"
- "ld1 { v28.b }[14], [x21], #0x1\n"
- "ld1 { v22.b }[14], [x20], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
+ "ld1 { v4.b }[14], [x22], #0x1\n"
+ "ld1 { v3.b }[14], [x21], #0x1\n"
+ "ld1 { v2.b }[14], [x20], #0x1\n"
"b 23f\n"
"16:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
- "ld1 { v3.b }[12], [x22], #0x1\n"
- "ld1 { v28.b }[12], [x21], #0x1\n"
- "ld1 { v22.b }[12], [x20], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
+ "ld1 { v4.b }[12], [x22], #0x1\n"
+ "ld1 { v3.b }[12], [x21], #0x1\n"
+ "ld1 { v2.b }[12], [x20], #0x1\n"
"b 23f\n"
"17:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 18f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
- "ld1 { v3.h }[4], [x22], #0x2\n"
- "ld1 { v28.h }[4], [x21], #0x2\n"
- "ld1 { v22.h }[4], [x20], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
+ "ld1 { v4.h }[4], [x22], #0x2\n"
+ "ld1 { v3.h }[4], [x21], #0x2\n"
+ "ld1 { v2.h }[4], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
- "ld1 { v3.b }[10], [x22], #0x1\n"
- "ld1 { v28.b }[10], [x21], #0x1\n"
- "ld1 { v22.b }[10], [x20], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
+ "ld1 { v4.b }[10], [x22], #0x1\n"
+ "ld1 { v3.b }[10], [x21], #0x1\n"
+ "ld1 { v2.b }[10], [x20], #0x1\n"
"b 23f\n"
"18:" // Oddments: 4 inputs loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
- "ld1 { v3.b }[8], [x22], #0x1\n"
- "ld1 { v28.b }[8], [x21], #0x1\n"
- "ld1 { v22.b }[8], [x20], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
+ "ld1 { v4.b }[8], [x22], #0x1\n"
+ "ld1 { v3.b }[8], [x21], #0x1\n"
+ "ld1 { v2.b }[8], [x20], #0x1\n"
"b 23f\n"
"19:" // Oddments: 4 inputs loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 21f\n"
- "ldr s4, [x23], #0x4\n"
- "ldr s3, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
- "ldr s22, [x20], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
+ "ldr s4, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "ldr s2, [x20], #0x4\n"
"tbz %x[n_channels], #1, 20f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
- "ld1 { v3.h }[2], [x22], #0x2\n"
- "ld1 { v28.h }[2], [x21], #0x2\n"
- "ld1 { v22.h }[2], [x20], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
+ "ld1 { v4.h }[2], [x22], #0x2\n"
+ "ld1 { v3.h }[2], [x21], #0x2\n"
+ "ld1 { v2.h }[2], [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
- "ld1 { v3.b }[6], [x22], #0x1\n"
- "ld1 { v28.b }[6], [x21], #0x1\n"
- "ld1 { v22.b }[6], [x20], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
+ "ld1 { v4.b }[6], [x22], #0x1\n"
+ "ld1 { v3.b }[6], [x21], #0x1\n"
+ "ld1 { v2.b }[6], [x20], #0x1\n"
"b 23f\n"
"20:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
- "ld1 { v3.b }[4], [x22], #0x1\n"
- "ld1 { v28.b }[4], [x21], #0x1\n"
- "ld1 { v22.b }[4], [x20], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
+ "ld1 { v4.b }[4], [x22], #0x1\n"
+ "ld1 { v3.b }[4], [x21], #0x1\n"
+ "ld1 { v2.b }[4], [x20], #0x1\n"
"b 23f\n"
"21:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 22f\n"
- "ldr h4, [x23], #0x2\n"
- "ldr h3, [x22], #0x2\n"
- "ldr h28, [x21], #0x2\n"
- "ldr h22, [x20], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "ldr h2, [x20], #0x2\n"
"tbz %x[n_channels], #0, 23f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
- "ld1 { v3.b }[2], [x22], #0x1\n"
- "ld1 { v28.b }[2], [x21], #0x1\n"
- "ld1 { v22.b }[2], [x20], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
+ "ld1 { v4.b }[2], [x22], #0x1\n"
+ "ld1 { v3.b }[2], [x21], #0x1\n"
+ "ld1 { v2.b }[2], [x20], #0x1\n"
"b 23f\n"
"22:" // Oddments: 4 inputs loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 23f\n"
- "ldr b4, [x23], #0x1\n"
- "ldr b3, [x22], #0x1\n"
- "ldr b28, [x21], #0x1\n"
- "ldr b22, [x20], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
+ "ldr b4, [x22], #0x1\n"
+ "ldr b3, [x21], #0x1\n"
+ "ldr b2, [x20], #0x1\n"
"23:" // Oddments: 4 inputs loop: Load: Bit 3: End
- "umax v17.16b, v4.16b, v3.16b\n"
- "umax v16.16b, v28.16b, v22.16b\n"
+ "umax v17.16b, v5.16b, v4.16b\n"
+ "umax v16.16b, v3.16b, v2.16b\n"
"subs x25, x25, #0x1\n"
"umax v16.16b, v17.16b, v16.16b\n"
- "umax v8.16b, v8.16b, v16.16b\n"
+ "umax v6.16b, v6.16b, v16.16b\n"
"bgt 15b\n"
"24:" // Oddments: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 34f\n"
"25:" // Oddments: Single input loop
"ldr x23, [x24], #0x8\n"
- "add x23, x23, x27\n"
- "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "add x23, x23, x9\n"
"tbz %x[n_channels], #3, 29f\n"
- "ldr d4, [x23], #0x8\n"
+ "ldr d5, [x23], #0x8\n"
"tbz %x[n_channels], #2, 27f\n"
- "ld1 { v4.s }[2], [x23], #0x4\n"
+ "ld1 { v5.s }[2], [x23], #0x4\n"
"tbz %x[n_channels], #1, 26f\n"
- "ld1 { v4.h }[6], [x23], #0x2\n"
+ "ld1 { v5.h }[6], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[14], [x23], #0x1\n"
+ "ld1 { v5.b }[14], [x23], #0x1\n"
"b 33f\n"
"26:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[12], [x23], #0x1\n"
+ "ld1 { v5.b }[12], [x23], #0x1\n"
"b 33f\n"
"27:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset
"tbz %x[n_channels], #1, 28f\n"
- "ld1 { v4.h }[4], [x23], #0x2\n"
+ "ld1 { v5.h }[4], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[10], [x23], #0x1\n"
+ "ld1 { v5.b }[10], [x23], #0x1\n"
"b 33f\n"
"28:" // Oddments: Single input loop: Load: Bit 3: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[8], [x23], #0x1\n"
+ "ld1 { v5.b }[8], [x23], #0x1\n"
"b 33f\n"
"29:" // Oddments: Single input loop: Load: Bit 3: Unset
"tbz %x[n_channels], #2, 31f\n"
- "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x23], #0x4\n"
"tbz %x[n_channels], #1, 30f\n"
- "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[6], [x23], #0x1\n"
+ "ld1 { v5.b }[6], [x23], #0x1\n"
"b 33f\n"
"30:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[4], [x23], #0x1\n"
+ "ld1 { v5.b }[4], [x23], #0x1\n"
"b 33f\n"
"31:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset
"tbz %x[n_channels], #1, 32f\n"
- "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x23], #0x2\n"
"tbz %x[n_channels], #0, 33f\n"
- "ld1 { v4.b }[2], [x23], #0x1\n"
+ "ld1 { v5.b }[2], [x23], #0x1\n"
"b 33f\n"
"32:" // Oddments: Single input loop: Load: Bit 3: Unset: Bit 2: Unset: Bit 1: Unset
"tbz %x[n_channels], #0, 33f\n"
- "ldr b4, [x23], #0x1\n"
+ "ldr b5, [x23], #0x1\n"
"33:" // Oddments: Single input loop: Load: Bit 3: End
"subs x21, x21, #0x1\n"
- "umax v8.16b, v8.16b, v4.16b\n"
+ "umax v6.16b, v6.16b, v5.16b\n"
"bgt 25b\n"
"34:" // Oddments: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1r { v18.4s }, [x20]\n"
- "uxtl v17.8h, v8.8b\n"
- "uxtl2 v16.8h, v8.16b\n"
- "neg v18.4s, v18.4s\n"
+ "add x21, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ "uxtl v17.8h, v6.8b\n"
+ "uxtl2 v26.8h, v6.16b\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1r { v23.4s }, [x20]\n"
- "saddw v22.4s, v18.4s, v17.4h\n"
- "saddw2 v21.4s, v18.4s, v17.8h\n"
- "saddw v20.4s, v18.4s, v16.4h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1r { v19.4s }, [x20]\n"
- "saddw2 v18.4s, v18.4s, v16.8h\n"
- "srshl v22.4s, v22.4s, v23.4s\n"
+ "ld1r { v16.4s }, [x21]\n"
+ "ld1r { v25.4s }, [x20]\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1r { v17.4s }, [x20]\n"
- "srshl v21.4s, v21.4s, v23.4s\n"
- "srshl v20.4s, v20.4s, v23.4s\n"
+ "ld1r { v24.4s }, [x21]\n"
+ "ld1r { v23.4s }, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
+ "movi v22.4s, #0x0\n"
+ "ld1r { v21.4s }, [x20]\n"
+ "movi v20.4s, #0xff\n"
+ "neg v16.4s, v16.4s\n"
+ "saddw v19.4s, v16.4s, v17.4h\n"
+ "saddw2 v17.4s, v16.4s, v17.8h\n"
+ "saddw v18.4s, v16.4s, v26.4h\n"
+ "saddw2 v16.4s, v16.4s, v26.8h\n"
+ "srshl v19.4s, v19.4s, v25.4s\n"
+ "srshl v17.4s, v17.4s, v25.4s\n"
+ "srshl v18.4s, v18.4s, v25.4s\n"
+ "srshl v16.4s, v16.4s, v25.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v24.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v24.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v24.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v24.4s\n"
+ "srshl v19.4s, v19.4s, v23.4s\n"
+ "srshl v17.4s, v17.4s, v23.4s\n"
"srshl v18.4s, v18.4s, v23.4s\n"
- "sqrdmulh v22.4s, v22.4s, v19.4s\n"
- "sqrdmulh v21.4s, v21.4s, v19.4s\n"
- "sqrdmulh v20.4s, v20.4s, v19.4s\n"
- "sqrdmulh v18.4s, v18.4s, v19.4s\n"
- "srshl v22.4s, v22.4s, v17.4s\n"
- "srshl v21.4s, v21.4s, v17.4s\n"
- "srshl v20.4s, v20.4s, v17.4s\n"
- "srshl v18.4s, v18.4s, v17.4s\n"
- "add v22.4s, v22.4s, v16.4s\n"
- "add v21.4s, v21.4s, v16.4s\n"
- "add v20.4s, v20.4s, v16.4s\n"
- "add v18.4s, v18.4s, v16.4s\n"
- "movi v16.4s, #0x0\n"
- "smax v22.4s, v22.4s, v16.4s\n"
- "smax v21.4s, v21.4s, v16.4s\n"
- "smax v20.4s, v20.4s, v16.4s\n"
- "smax v18.4s, v18.4s, v16.4s\n"
- "movi v16.4s, #0xff\n"
- "smin v22.4s, v22.4s, v16.4s\n"
- "smin v21.4s, v21.4s, v16.4s\n"
- "smin v20.4s, v20.4s, v16.4s\n"
- "smin v18.4s, v18.4s, v16.4s\n"
- "uzp1 v17.16b, v22.16b, v21.16b\n"
- "uzp1 v16.16b, v20.16b, v18.16b\n"
+ "srshl v16.4s, v16.4s, v23.4s\n"
+ "add v19.4s, v19.4s, v21.4s\n"
+ "add v17.4s, v17.4s, v21.4s\n"
+ "add v18.4s, v18.4s, v21.4s\n"
+ "add v16.4s, v16.4s, v21.4s\n"
+ "smax v19.4s, v19.4s, v22.4s\n"
+ "smax v17.4s, v17.4s, v22.4s\n"
+ "smax v18.4s, v18.4s, v22.4s\n"
+ "smax v16.4s, v16.4s, v22.4s\n"
+ "smin v19.4s, v19.4s, v20.4s\n"
+ "smin v17.4s, v17.4s, v20.4s\n"
+ "smin v18.4s, v18.4s, v20.4s\n"
+ "smin v16.4s, v16.4s, v20.4s\n"
+ "uzp1 v17.16b, v19.16b, v17.16b\n"
+ "uzp1 v16.16b, v18.16b, v16.16b\n"
"uzp1 v16.16b, v17.16b, v16.16b\n"
"tbz %x[n_channels], #3, 38f\n"
"st1 { v16.d }[0], [%x[outptr]], #0x8\n"
@@ -667,7 +667,7 @@ void a64_u8q_nhwc_max_generic_depthfirst_impl(
"43:" // End
: [n_channels] "+&r" (n_channels), [outptr] "+&r" (outptr)
: [inptrs] "r" (inptrs), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [quant_params] "r" (&qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 67b07205cd..672a9aefe0 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -89,111 +89,111 @@ void sme_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"ldr x4, [%x[args], %[offsetof_inptrs]]\n"
"whilelt p0.h, XZR, x20\n"
"add x20, %x[args], %[offsetof_rescale]\n"
- "ld1rqh { z4.h }, p0/Z, [x20]\n"
"ldr x5, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p0.h, x3, x5\n"
"mov x6, #0x0\n"
+ "ld1rqh { z5.h }, p0/Z, [x20]\n"
"ldp x7, x8, [x21, #0x0]\n"
"ldp x17, x16, [x21, #0x10]\n"
"ldp x15, x14, [x4, #0x0]\n"
- "ld1h { z3.h }, p0/Z, [x14, x3, LSL #1]\n"
+ "whilelt p0.h, x3, x5\n"
"ldp x13, x12, [x4, #0x10]\n"
- "ld1h { z2.h }, p0/Z, [x13, x3, LSL #1]\n"
"ldp x11, x10, [x4, #0x20]\n"
- "ld1h { z1.h }, p0/Z, [x10, x3, LSL #1]\n"
"ldp x9, x28, [x4, #0x30]\n"
- "ld1h { z0.h }, p0/Z, [x9, x3, LSL #1]\n"
+ "ld1h { z4.h }, p0/Z, [x14, x3, LSL #1]\n"
"ldp x27, x26, [x4, #0x40]\n"
- "ld1h { z31.h }, p0/Z, [x26, x3, LSL #1]\n"
+ "ld1h { z3.h }, p0/Z, [x13, x3, LSL #1]\n"
"ldp x25, x24, [x4, #0x50]\n"
- "ld1h { z30.h }, p0/Z, [x25, x3, LSL #1]\n"
+ "ld1h { z2.h }, p0/Z, [x10, x3, LSL #1]\n"
"ldp x23, x22, [x4, #0x60]\n"
- "ld1h { z29.h }, p0/Z, [x11, x3, LSL #1]\n"
+ "ld1h { z1.h }, p0/Z, [x9, x3, LSL #1]\n"
"ldp x21, x20, [x4, #0x70]\n"
- "ld1h { z28.h }, p0/Z, [x27, x3, LSL #1]\n"
- "ld1h { z27.h }, p0/Z, [x28, x3, LSL #1]\n"
- "ld1h { z22.h }, p0/Z, [x24, x3, LSL #1]\n"
- "ld1h { z21.h }, p0/Z, [x22, x3, LSL #1]\n"
- "ld1h { z20.h }, p0/Z, [x21, x3, LSL #1]\n"
- "ld1h { z26.h }, p0/Z, [x15, x3, LSL #1]\n"
- "ld1h { z25.h }, p0/Z, [x12, x3, LSL #1]\n"
- "ld1h { z24.h }, p0/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p0/Z, [x20, x3, LSL #1]\n"
+ "ld1h { z0.h }, p0/Z, [x26, x3, LSL #1]\n"
+ "ld1h { z31.h }, p0/Z, [x25, x3, LSL #1]\n"
+ "ld1h { z30.h }, p0/Z, [x11, x3, LSL #1]\n"
+ "ld1h { z29.h }, p0/Z, [x27, x3, LSL #1]\n"
+ "ld1h { z28.h }, p0/Z, [x28, x3, LSL #1]\n"
+ "ld1h { z27.h }, p0/Z, [x24, x3, LSL #1]\n"
+ "ld1h { z26.h }, p0/Z, [x22, x3, LSL #1]\n"
+ "ld1h { z22.h }, p0/Z, [x21, x3, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x15, x3, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x12, x3, LSL #1]\n"
+ "ld1h { z21.h }, p0/Z, [x23, x3, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x20, x3, LSL #1]\n"
"incw x3\n"
"whilelt p1.h, x3, x5\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "fadd z17.h, z1.h, z0.h\n"
- "fadd z16.h, z31.h, z30.h\n"
- "ld1h { z1.h }, p1/Z, [x10, x3, LSL #1]\n"
+ "fadd z19.h, z2.h, z1.h\n"
+ "fadd z16.h, z0.h, z31.h\n"
+ "ld1h { z2.h }, p1/Z, [x10, x3, LSL #1]\n"
"whilelt p0.h, x6, x5\n"
- "fadd z19.h, z17.h, z16.h\n"
- "fadd z18.h, z3.h, z2.h\n"
- "ld1h { z0.h }, p1/Z, [x9, x3, LSL #1]\n"
- "fadd z17.h, z29.h, z28.h\n"
- "fadd z22.h, z27.h, z22.h\n"
- "ld1h { z31.h }, p1/Z, [x26, x3, LSL #1]\n"
- "fadd z16.h, z21.h, z20.h\n"
- "fadd z21.h, z18.h, z19.h\n"
- "ld1h { z30.h }, p1/Z, [x25, x3, LSL #1]\n"
- "fadd z20.h, z16.h, z19.h\n"
- "fadd z19.h, z26.h, z17.h\n"
- "ld1h { z3.h }, p1/Z, [x14, x3, LSL #1]\n"
- "fadd z18.h, z25.h, z22.h\n"
- "fadd z17.h, z24.h, z17.h\n"
- "ld1h { z2.h }, p1/Z, [x13, x3, LSL #1]\n"
- "fadd z16.h, z23.h, z22.h\n"
- "fadd z19.h, z21.h, z19.h\n"
- "ld1h { z29.h }, p1/Z, [x11, x3, LSL #1]\n"
- "fadd z18.h, z21.h, z18.h\n"
- "fadd z17.h, z17.h, z20.h\n"
- "ld1h { z28.h }, p1/Z, [x27, x3, LSL #1]\n"
- "fadd z16.h, z16.h, z20.h\n"
- "ld1h { z27.h }, p1/Z, [x28, x3, LSL #1]\n"
- "fmul z19.h, z19.h, z4.h[0]\n"
- "ld1h { z22.h }, p1/Z, [x24, x3, LSL #1]\n"
- "fmul z18.h, z18.h, z4.h[1]\n"
- "fmul z17.h, z17.h, z4.h[2]\n"
- "ld1h { z21.h }, p1/Z, [x22, x3, LSL #1]\n"
- "fmul z16.h, z16.h, z4.h[3]\n"
+ "fadd z23.h, z4.h, z3.h\n"
+ "fadd z18.h, z30.h, z29.h\n"
+ "ld1h { z1.h }, p1/Z, [x9, x3, LSL #1]\n"
+ "fadd z17.h, z28.h, z27.h\n"
+ "fadd z22.h, z26.h, z22.h\n"
+ "ld1h { z0.h }, p1/Z, [x26, x3, LSL #1]\n"
+ "ld1h { z31.h }, p1/Z, [x25, x3, LSL #1]\n"
+ "fadd z16.h, z19.h, z16.h\n"
+ "ld1h { z4.h }, p1/Z, [x14, x3, LSL #1]\n"
+ "fadd z19.h, z25.h, z18.h\n"
+ "fadd z21.h, z21.h, z18.h\n"
+ "ld1h { z3.h }, p1/Z, [x13, x3, LSL #1]\n"
+ "fadd z18.h, z24.h, z17.h\n"
+ "fadd z20.h, z20.h, z17.h\n"
+ "ld1h { z30.h }, p1/Z, [x11, x3, LSL #1]\n"
+ "ld1h { z29.h }, p1/Z, [x27, x3, LSL #1]\n"
+ "fadd z17.h, z23.h, z16.h\n"
+ "fadd z16.h, z22.h, z16.h\n"
+ "ld1h { z28.h }, p1/Z, [x28, x3, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x24, x3, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x22, x3, LSL #1]\n"
+ "ld1h { z22.h }, p1/Z, [x21, x3, LSL #1]\n"
+ "fadd z19.h, z17.h, z19.h\n"
+ "fadd z18.h, z17.h, z18.h\n"
+ "ld1h { z25.h }, p1/Z, [x15, x3, LSL #1]\n"
+ "fadd z17.h, z21.h, z16.h\n"
+ "fadd z16.h, z20.h, z16.h\n"
+ "ld1h { z24.h }, p1/Z, [x12, x3, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x23, x3, LSL #1]\n"
+ "ld1h { z20.h }, p1/Z, [x20, x3, LSL #1]\n"
+ "incw x3\n"
+ "whilelt p1.h, x3, x5\n"
+ "fmul z19.h, z19.h, z5.h[0]\n"
+ "fmul z18.h, z18.h, z5.h[1]\n"
+ "fmul z17.h, z17.h, z5.h[2]\n"
+ "fmul z16.h, z16.h, z5.h[3]\n"
"st1h { z19.h }, p0, [x7, x6, LSL #1]\n"
- "ld1h { z20.h }, p1/Z, [x21, x3, LSL #1]\n"
"st1h { z18.h }, p0, [x8, x6, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x15, x3, LSL #1]\n"
"st1h { z17.h }, p0, [x17, x6, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x12, x3, LSL #1]\n"
"st1h { z16.h }, p0, [x16, x6, LSL #1]\n"
"incw x6\n"
- "ld1h { z24.h }, p1/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x20, x3, LSL #1]\n"
- "incw x3\n"
- "whilelt p1.h, x3, x5\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "fadd z17.h, z1.h, z0.h\n"
- "fadd z16.h, z31.h, z30.h\n"
+ "fadd z19.h, z2.h, z1.h\n"
+ "fadd z16.h, z0.h, z31.h\n"
"whilelt p0.h, x6, x5\n"
- "fadd z19.h, z17.h, z16.h\n"
- "fadd z18.h, z3.h, z2.h\n"
- "fadd z17.h, z29.h, z28.h\n"
- "fadd z22.h, z27.h, z22.h\n"
- "fadd z16.h, z21.h, z20.h\n"
- "fadd z21.h, z18.h, z19.h\n"
- "fadd z20.h, z16.h, z19.h\n"
- "fadd z19.h, z26.h, z17.h\n"
- "fadd z18.h, z25.h, z22.h\n"
- "fadd z17.h, z24.h, z17.h\n"
- "fadd z16.h, z23.h, z22.h\n"
- "fadd z19.h, z21.h, z19.h\n"
- "fadd z18.h, z21.h, z18.h\n"
- "fadd z17.h, z17.h, z20.h\n"
- "fadd z16.h, z16.h, z20.h\n"
- "fmul z19.h, z19.h, z4.h[0]\n"
+ "fadd z23.h, z4.h, z3.h\n"
+ "fadd z18.h, z30.h, z29.h\n"
+ "fadd z17.h, z28.h, z27.h\n"
+ "fadd z22.h, z26.h, z22.h\n"
+ "fadd z16.h, z19.h, z16.h\n"
+ "fadd z19.h, z25.h, z18.h\n"
+ "fadd z21.h, z21.h, z18.h\n"
+ "fadd z18.h, z24.h, z17.h\n"
+ "fadd z20.h, z20.h, z17.h\n"
+ "fadd z17.h, z23.h, z16.h\n"
+ "fadd z16.h, z22.h, z16.h\n"
+ "fadd z19.h, z17.h, z19.h\n"
+ "fadd z18.h, z17.h, z18.h\n"
+ "fadd z17.h, z21.h, z16.h\n"
+ "fadd z16.h, z20.h, z16.h\n"
+ "fmul z19.h, z19.h, z5.h[0]\n"
+ "fmul z18.h, z18.h, z5.h[1]\n"
+ "fmul z17.h, z17.h, z5.h[2]\n"
+ "fmul z16.h, z16.h, z5.h[3]\n"
"st1h { z19.h }, p0, [x7, x6, LSL #1]\n"
- "fmul z18.h, z18.h, z4.h[1]\n"
- "fmul z17.h, z17.h, z4.h[2]\n"
"st1h { z18.h }, p0, [x8, x6, LSL #1]\n"
- "fmul z16.h, z16.h, z4.h[3]\n"
"st1h { z17.h }, p0, [x17, x6, LSL #1]\n"
"st1h { z16.h }, p0, [x16, x6, LSL #1]\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index 60f17b7bc2..dee5b4a230 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,25 +49,25 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"cnth x26, ALL, MUL #3\n"
"ptrue p0.b\n"
"whilelt p3.h, x9, %x[n_channels]\n"
- "ld1rh { z6.h }, p0/Z, [%x[rescale_ptr]]\n"
"whilelt p2.h, x28, %x[n_channels]\n"
+ "ld1rh { z5.h }, p0/Z, [%x[rescale_ptr]]\n"
"whilelt p1.h, x27, %x[n_channels]\n"
"whilelt p0.h, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z5.b, #0x0\n"
"mov z4.b, #0x0\n"
- "mov x24, %x[inptrs]\n"
"mov z3.b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"mov z2.b, #0x0\n"
+ "mov z1.b, #0x0\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1h { z1.h }, p3/Z, [x23, x9, LSL #1]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z23.h }, p3/Z, [x22, x9, LSL #1]\n"
"ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
"ld1h { z30.h }, p3/Z, [x20, x9, LSL #1]\n"
"ld1h { z29.h }, p2/Z, [x23, x28, LSL #1]\n"
@@ -84,7 +84,7 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"ld1h { z16.h }, p0/Z, [x20, x26, LSL #1]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fadd z23.h, z1.h, z0.h\n"
+ "fadd z23.h, z0.h, z23.h\n"
"fadd z19.h, z31.h, z30.h\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
@@ -94,24 +94,24 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"add x24, x24, #0x20\n"
"fadd z21.h, z27.h, z21.h\n"
"fadd z17.h, z26.h, z17.h\n"
- "ld1h { z1.h }, p3/Z, [x23, x9, LSL #1]\n"
"fadd z20.h, z25.h, z20.h\n"
"fadd z16.h, z24.h, z16.h\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
"fadd z19.h, z23.h, z19.h\n"
+ "ld1h { z23.h }, p3/Z, [x22, x9, LSL #1]\n"
"fadd z18.h, z22.h, z18.h\n"
"ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
"fadd z17.h, z21.h, z17.h\n"
- "fadd z16.h, z20.h, z16.h\n"
"ld1h { z30.h }, p3/Z, [x20, x9, LSL #1]\n"
- "fadd z5.h, z5.h, z19.h\n"
- "fadd z4.h, z4.h, z18.h\n"
+ "fadd z16.h, z20.h, z16.h\n"
"ld1h { z29.h }, p2/Z, [x23, x28, LSL #1]\n"
- "fadd z3.h, z3.h, z17.h\n"
- "fadd z2.h, z2.h, z16.h\n"
+ "fadd z4.h, z4.h, z19.h\n"
"ld1h { z22.h }, p2/Z, [x22, x28, LSL #1]\n"
+ "fadd z3.h, z3.h, z18.h\n"
"ld1h { z28.h }, p2/Z, [x21, x28, LSL #1]\n"
+ "fadd z2.h, z2.h, z17.h\n"
"ld1h { z18.h }, p2/Z, [x20, x28, LSL #1]\n"
+ "fadd z1.h, z1.h, z16.h\n"
"ld1h { z27.h }, p1/Z, [x23, x27, LSL #1]\n"
"ld1h { z21.h }, p1/Z, [x22, x27, LSL #1]\n"
"ld1h { z26.h }, p1/Z, [x21, x27, LSL #1]\n"
@@ -122,7 +122,7 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"ld1h { z16.h }, p0/Z, [x20, x26, LSL #1]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fadd z23.h, z1.h, z0.h\n"
+ "fadd z23.h, z0.h, z23.h\n"
"fadd z19.h, z31.h, z30.h\n"
"fadd z22.h, z29.h, z22.h\n"
"fadd z18.h, z28.h, z18.h\n"
@@ -134,37 +134,37 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"fadd z18.h, z22.h, z18.h\n"
"fadd z17.h, z21.h, z17.h\n"
"fadd z16.h, z20.h, z16.h\n"
- "fadd z5.h, z5.h, z19.h\n"
- "fadd z4.h, z4.h, z18.h\n"
- "fadd z3.h, z3.h, z17.h\n"
- "fadd z2.h, z2.h, z16.h\n"
+ "fadd z4.h, z4.h, z19.h\n"
+ "fadd z3.h, z3.h, z18.h\n"
+ "fadd z2.h, z2.h, z17.h\n"
+ "fadd z1.h, z1.h, z16.h\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p3/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
- "fadd z5.h, z5.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x28, LSL #1]\n"
- "fadd z4.h, z4.h, z16.h\n"
- "ld1h { z16.h }, p1/Z, [x20, x27, LSL #1]\n"
- "fadd z3.h, z3.h, z16.h\n"
+ "ld1h { z19.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z17.h }, p1/Z, [x20, x27, LSL #1]\n"
"ld1h { z16.h }, p0/Z, [x20, x26, LSL #1]\n"
- "fadd z2.h, z2.h, z16.h\n"
+ "fadd z4.h, z4.h, z19.h\n"
+ "fadd z3.h, z3.h, z18.h\n"
+ "fadd z2.h, z2.h, z17.h\n"
+ "fadd z1.h, z1.h, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "fmul z5.h, z5.h, z6.h\n"
- "fmul z4.h, z4.h, z6.h\n"
- "st1h { z5.h }, p3, [%x[outptr], x9, LSL #1]\n"
+ "fmul z4.h, z4.h, z5.h\n"
+ "fmul z3.h, z3.h, z5.h\n"
+ "fmul z2.h, z2.h, z5.h\n"
+ "fmul z1.h, z1.h, z5.h\n"
+ "st1h { z4.h }, p3, [%x[outptr], x9, LSL #1]\n"
"inch x9, ALL, MUL #4\n"
- "fmul z3.h, z3.h, z6.h\n"
- "fmul z2.h, z2.h, z6.h\n"
- "st1h { z4.h }, p2, [%x[outptr], x28, LSL #1]\n"
+ "st1h { z3.h }, p2, [%x[outptr], x28, LSL #1]\n"
"inch x28, ALL, MUL #4\n"
- "st1h { z3.h }, p1, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z2.h }, p1, [%x[outptr], x27, LSL #1]\n"
"inch x27, ALL, MUL #4\n"
- "st1h { z2.h }, p0, [%x[outptr], x26, LSL #1]\n"
+ "st1h { z1.h }, p0, [%x[outptr], x26, LSL #1]\n"
"inch x26, ALL, MUL #4\n"
"whilelt p0.h, x26, %x[n_channels]\n"
"b.any 1b\n"
@@ -173,49 +173,49 @@ void sme_fp16_nhwc_avg_generic_depthfirst_impl(
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z5.b, #0x0\n"
+ "mov z4.b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1h { z1.h }, p3/Z, [x20, x9, LSL #1]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z23.h }, p3/Z, [x22, x9, LSL #1]\n"
"ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
"ld1h { z30.h }, p3/Z, [x20, x9, LSL #1]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fadd z17.h, z1.h, z0.h\n"
+ "fadd z17.h, z0.h, z23.h\n"
"fadd z16.h, z31.h, z30.h\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "fadd z16.h, z17.h, z16.h\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fadd z5.h, z5.h, z16.h\n"
"add x24, x24, #0x20\n"
- "ld1h { z1.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "fadd z16.h, z17.h, z16.h\n"
+ "ld1h { z23.h }, p3/Z, [x22, x9, LSL #1]\n"
"ld1h { z31.h }, p3/Z, [x21, x9, LSL #1]\n"
"ld1h { z30.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "fadd z4.h, z4.h, z16.h\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fadd z17.h, z1.h, z0.h\n"
+ "fadd z17.h, z0.h, z23.h\n"
"fadd z16.h, z31.h, z30.h\n"
"fadd z16.h, z17.h, z16.h\n"
- "fadd z5.h, z5.h, z16.h\n"
+ "fadd z4.h, z4.h, z16.h\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p3/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
- "fadd z5.h, z5.h, z16.h\n"
+ "ld1h { z16.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "fadd z4.h, z4.h, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "fmul z5.h, z5.h, z6.h\n"
- "st1h { z5.h }, p3, [%x[outptr], x9, LSL #1]\n"
+ "fmul z4.h, z4.h, z5.h\n"
+ "st1h { z4.h }, p3, [%x[outptr], x9, LSL #1]\n"
"inch x9\n"
"whilelt p3.h, x9, %x[n_channels]\n"
"b.any 8b\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 7fc776ed4e..7c2ca4c452 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,26 +66,26 @@ void sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "ptrue p2.b\n"
"ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p0.h, x15, x13\n"
"ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
"ldp x28, x27, [x20, #0x0]\n"
- "ld1h { z30.h }, p0/Z, [x27, x15, LSL #1]\n"
+ "whilelt p0.h, x15, x13\n"
"ldp x26, x25, [x20, #0x10]\n"
- "ld1h { z29.h }, p0/Z, [x25, x15, LSL #1]\n"
"ldp x24, x23, [x20, #0x20]\n"
- "ld1h { z28.h }, p0/Z, [x24, x15, LSL #1]\n"
"ldp x22, x21, [x20, #0x30]\n"
- "ld1h { z27.h }, p0/Z, [x21, x15, LSL #1]\n"
+ "ld1h { z30.h }, p0/Z, [x27, x15, LSL #1]\n"
"ldr x20, [x20, #0x40]\n"
+ "ld1h { z29.h }, p0/Z, [x25, x15, LSL #1]\n"
+ "ld1h { z28.h }, p0/Z, [x24, x15, LSL #1]\n"
+ "ld1h { z27.h }, p0/Z, [x21, x15, LSL #1]\n"
"ld1h { z26.h }, p0/Z, [x28, x15, LSL #1]\n"
"ld1h { z25.h }, p0/Z, [x26, x15, LSL #1]\n"
"ld1h { z24.h }, p0/Z, [x23, x15, LSL #1]\n"
- "ld1h { z19.h }, p0/Z, [x22, x15, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x22, x15, LSL #1]\n"
"ld1h { z23.h }, p0/Z, [x20, x15, LSL #1]\n"
"incw x15\n"
"whilelt p1.h, x15, x13\n"
@@ -98,24 +98,24 @@ void sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z18, z29\n fmax z18.h, p2/M, z18.h, z26.h\n"
"movprfx z17, z25\n fmax z17.h, p2/M, z17.h, z24.h\n"
"ld1h { z28.h }, p1/Z, [x24, x15, LSL #1]\n"
- "movprfx z16, z29\n fmax z16.h, p2/M, z16.h, z19.h\n"
+ "movprfx z16, z29\n fmax z16.h, p2/M, z16.h, z20.h\n"
"movprfx z20, z24\n fmax z20.h, p2/M, z20.h, z23.h\n"
"ld1h { z27.h }, p1/Z, [x21, x15, LSL #1]\n"
"ld1h { z29.h }, p1/Z, [x25, x15, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x28, x15, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x26, x15, LSL #1]\n"
"movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
"movprfx z18, z17\n fmax z18.h, p2/M, z18.h, z22.h\n"
- "ld1h { z26.h }, p1/Z, [x28, x15, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x23, x15, LSL #1]\n"
"movprfx z17, z16\n fmax z17.h, p2/M, z17.h, z21.h\n"
"movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
- "ld1h { z25.h }, p1/Z, [x26, x15, LSL #1]\n"
- "st1h { z19.h }, p0, [x12, x14, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x23, x15, LSL #1]\n"
- "st1h { z18.h }, p0, [x11, x14, LSL #1]\n"
- "ld1h { z19.h }, p1/Z, [x22, x15, LSL #1]\n"
- "st1h { z17.h }, p0, [x10, x14, LSL #1]\n"
+ "ld1h { z20.h }, p1/Z, [x22, x15, LSL #1]\n"
"ld1h { z23.h }, p1/Z, [x20, x15, LSL #1]\n"
"incw x15\n"
"whilelt p1.h, x15, x13\n"
+ "st1h { z19.h }, p0, [x12, x14, LSL #1]\n"
+ "st1h { z18.h }, p0, [x11, x14, LSL #1]\n"
+ "st1h { z17.h }, p0, [x10, x14, LSL #1]\n"
"st1h { z16.h }, p0, [x9, x14, LSL #1]\n"
"incw x14\n"
"b.any 1b\n"
@@ -123,15 +123,15 @@ void sme_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z22, z30\n fmax z22.h, p2/M, z22.h, z28.h\n"
"movprfx z21, z28\n fmax z21.h, p2/M, z21.h, z27.h\n"
"whilelt p0.h, x14, x13\n"
- "movprfx z20, z29\n fmax z20.h, p2/M, z20.h, z26.h\n"
- "movprfx z18, z25\n fmax z18.h, p2/M, z18.h, z24.h\n"
- "movprfx z17, z29\n fmax z17.h, p2/M, z17.h, z19.h\n"
- "movprfx z19, z24\n fmax z19.h, p2/M, z19.h, z23.h\n"
- "movprfx z16, z22\n fmax z16.h, p2/M, z16.h, z20.h\n"
- "fmax z18.h, p2/M, z18.h, z22.h\n"
- "st1h { z16.h }, p0, [x12, x14, LSL #1]\n"
- "fmax z17.h, p2/M, z17.h, z21.h\n"
- "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z19.h\n"
+ "movprfx z18, z29\n fmax z18.h, p2/M, z18.h, z26.h\n"
+ "movprfx z17, z25\n fmax z17.h, p2/M, z17.h, z24.h\n"
+ "movprfx z16, z29\n fmax z16.h, p2/M, z16.h, z20.h\n"
+ "movprfx z20, z24\n fmax z20.h, p2/M, z20.h, z23.h\n"
+ "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
+ "movprfx z18, z17\n fmax z18.h, p2/M, z18.h, z22.h\n"
+ "movprfx z17, z16\n fmax z17.h, p2/M, z17.h, z21.h\n"
+ "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
+ "st1h { z19.h }, p0, [x12, x14, LSL #1]\n"
"st1h { z18.h }, p0, [x11, x14, LSL #1]\n"
"st1h { z17.h }, p0, [x10, x14, LSL #1]\n"
"st1h { z16.h }, p0, [x9, x14, LSL #1]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
index afa2ccbd71..bfdf1b8b5a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,21 +53,21 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "mov z5.h, #0xfc00\n"
"mov z4.h, #0xfc00\n"
- "mov z3.h, #0xfc00\n"
"mov x24, %x[inptrs]\n"
+ "mov z3.h, #0xfc00\n"
"mov z2.h, #0xfc00\n"
- "mov z1.h, #0xfc00\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1h { z0.h }, p4/Z, [x23, x9, LSL #1]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1h { z31.h }, p4/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z0.h }, p4/Z, [x22, x9, LSL #1]\n"
"ld1h { z23.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z30.h }, p4/Z, [x20, x9, LSL #1]\n"
- "ld1h { z18.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z31.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z30.h }, p3/Z, [x23, x28, LSL #1]\n"
"ld1h { z29.h }, p3/Z, [x22, x28, LSL #1]\n"
"ld1h { z22.h }, p3/Z, [x21, x28, LSL #1]\n"
"ld1h { z28.h }, p3/Z, [x20, x28, LSL #1]\n"
@@ -81,34 +81,34 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
"ld1h { z24.h }, p1/Z, [x20, x26, LSL #1]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
- "fmax z23.h, p0/M, z23.h, z30.h\n"
+ "movprfx z19, z1\n fmax z19.h, p0/M, z19.h, z0.h\n"
+ "fmax z23.h, p0/M, z23.h, z31.h\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "fmax z18.h, p0/M, z18.h, z29.h\n"
+ "movprfx z18, z30\n fmax z18.h, p0/M, z18.h, z29.h\n"
"fmax z22.h, p0/M, z22.h, z28.h\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
"fmax z17.h, p0/M, z17.h, z27.h\n"
"fmax z21.h, p0/M, z21.h, z26.h\n"
- "ld1h { z0.h }, p4/Z, [x23, x9, LSL #1]\n"
"fmax z16.h, p0/M, z16.h, z25.h\n"
"fmax z20.h, p0/M, z20.h, z24.h\n"
- "ld1h { z31.h }, p4/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x23, x9, LSL #1]\n"
"fmax z19.h, p0/M, z19.h, z23.h\n"
+ "ld1h { z0.h }, p4/Z, [x22, x9, LSL #1]\n"
"fmax z18.h, p0/M, z18.h, z22.h\n"
"ld1h { z23.h }, p4/Z, [x21, x9, LSL #1]\n"
"fmax z17.h, p0/M, z17.h, z21.h\n"
+ "ld1h { z31.h }, p4/Z, [x20, x9, LSL #1]\n"
"fmax z16.h, p0/M, z16.h, z20.h\n"
- "ld1h { z30.h }, p4/Z, [x20, x9, LSL #1]\n"
- "fmax z4.h, p0/M, z4.h, z19.h\n"
- "fmax z3.h, p0/M, z3.h, z18.h\n"
- "ld1h { z18.h }, p3/Z, [x23, x28, LSL #1]\n"
- "fmax z2.h, p0/M, z2.h, z17.h\n"
- "fmax z1.h, p0/M, z1.h, z16.h\n"
+ "ld1h { z30.h }, p3/Z, [x23, x28, LSL #1]\n"
+ "fmax z5.h, p0/M, z5.h, z19.h\n"
"ld1h { z29.h }, p3/Z, [x22, x28, LSL #1]\n"
+ "fmax z4.h, p0/M, z4.h, z18.h\n"
"ld1h { z22.h }, p3/Z, [x21, x28, LSL #1]\n"
+ "fmax z3.h, p0/M, z3.h, z17.h\n"
"ld1h { z28.h }, p3/Z, [x20, x28, LSL #1]\n"
+ "fmax z2.h, p0/M, z2.h, z16.h\n"
"ld1h { z17.h }, p2/Z, [x23, x27, LSL #1]\n"
"ld1h { z27.h }, p2/Z, [x22, x27, LSL #1]\n"
"ld1h { z21.h }, p2/Z, [x21, x27, LSL #1]\n"
@@ -119,9 +119,9 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
"ld1h { z24.h }, p1/Z, [x20, x26, LSL #1]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z0\n fmax z19.h, p0/M, z19.h, z31.h\n"
- "fmax z23.h, p0/M, z23.h, z30.h\n"
- "fmax z18.h, p0/M, z18.h, z29.h\n"
+ "movprfx z19, z1\n fmax z19.h, p0/M, z19.h, z0.h\n"
+ "fmax z23.h, p0/M, z23.h, z31.h\n"
+ "movprfx z18, z30\n fmax z18.h, p0/M, z18.h, z29.h\n"
"fmax z22.h, p0/M, z22.h, z28.h\n"
"fmax z17.h, p0/M, z17.h, z27.h\n"
"fmax z21.h, p0/M, z21.h, z26.h\n"
@@ -131,33 +131,33 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
"fmax z18.h, p0/M, z18.h, z22.h\n"
"fmax z17.h, p0/M, z17.h, z21.h\n"
"fmax z16.h, p0/M, z16.h, z20.h\n"
- "fmax z4.h, p0/M, z4.h, z19.h\n"
- "fmax z3.h, p0/M, z3.h, z18.h\n"
- "fmax z2.h, p0/M, z2.h, z17.h\n"
- "fmax z1.h, p0/M, z1.h, z16.h\n"
+ "fmax z5.h, p0/M, z5.h, z19.h\n"
+ "fmax z4.h, p0/M, z4.h, z18.h\n"
+ "fmax z3.h, p0/M, z3.h, z17.h\n"
+ "fmax z2.h, p0/M, z2.h, z16.h\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p4/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
- "fmax z4.h, p0/M, z4.h, z16.h\n"
- "ld1h { z16.h }, p3/Z, [x20, x28, LSL #1]\n"
- "fmax z3.h, p0/M, z3.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x20, x27, LSL #1]\n"
- "fmax z2.h, p0/M, z2.h, z16.h\n"
+ "ld1h { z19.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z18.h }, p3/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z17.h }, p2/Z, [x20, x27, LSL #1]\n"
"ld1h { z16.h }, p1/Z, [x20, x26, LSL #1]\n"
- "fmax z1.h, p0/M, z1.h, z16.h\n"
+ "fmax z5.h, p0/M, z5.h, z19.h\n"
+ "fmax z4.h, p0/M, z4.h, z18.h\n"
+ "fmax z3.h, p0/M, z3.h, z17.h\n"
+ "fmax z2.h, p0/M, z2.h, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1h { z4.h }, p4, [%x[outptr], x9, LSL #1]\n"
+ "st1h { z5.h }, p4, [%x[outptr], x9, LSL #1]\n"
"inch x9, ALL, MUL #4\n"
- "st1h { z3.h }, p3, [%x[outptr], x28, LSL #1]\n"
+ "st1h { z4.h }, p3, [%x[outptr], x28, LSL #1]\n"
"inch x28, ALL, MUL #4\n"
- "st1h { z2.h }, p2, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z3.h }, p2, [%x[outptr], x27, LSL #1]\n"
"inch x27, ALL, MUL #4\n"
- "st1h { z1.h }, p1, [%x[outptr], x26, LSL #1]\n"
+ "st1h { z2.h }, p1, [%x[outptr], x26, LSL #1]\n"
"inch x26, ALL, MUL #4\n"
"whilelt p1.h, x26, %x[n_channels]\n"
"b.any 1b\n"
@@ -166,48 +166,48 @@ void sme_fp16_nhwc_max_generic_depthfirst_impl(
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z4.h, #0xfc00\n"
+ "mov z5.h, #0xfc00\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1h { z0.h }, p4/Z, [x20, x9, LSL #1]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1h { z31.h }, p4/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z0.h }, p4/Z, [x22, x9, LSL #1]\n"
"ld1h { z23.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z30.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z31.h }, p4/Z, [x20, x9, LSL #1]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z0\n fmax z16.h, p0/M, z16.h, z31.h\n"
- "movprfx z17, z23\n fmax z17.h, p0/M, z17.h, z30.h\n"
+ "movprfx z16, z1\n fmax z16.h, p0/M, z16.h, z0.h\n"
+ "movprfx z17, z23\n fmax z17.h, p0/M, z17.h, z31.h\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "fmax z16.h, p0/M, z16.h, z17.h\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fmax z4.h, p0/M, z4.h, z16.h\n"
"add x24, x24, #0x20\n"
- "ld1h { z0.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z31.h }, p4/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z1.h }, p4/Z, [x23, x9, LSL #1]\n"
+ "fmax z16.h, p0/M, z16.h, z17.h\n"
+ "ld1h { z0.h }, p4/Z, [x22, x9, LSL #1]\n"
"ld1h { z23.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z30.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z31.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "fmax z5.h, p0/M, z5.h, z16.h\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z0\n fmax z16.h, p0/M, z16.h, z31.h\n"
- "movprfx z17, z23\n fmax z17.h, p0/M, z17.h, z30.h\n"
+ "movprfx z16, z1\n fmax z16.h, p0/M, z16.h, z0.h\n"
+ "movprfx z17, z23\n fmax z17.h, p0/M, z17.h, z31.h\n"
"fmax z16.h, p0/M, z16.h, z17.h\n"
- "fmax z4.h, p0/M, z4.h, z16.h\n"
+ "fmax z5.h, p0/M, z5.h, z16.h\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p4/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
- "fmax z4.h, p0/M, z4.h, z16.h\n"
+ "ld1h { z16.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "fmax z5.h, p0/M, z5.h, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1h { z4.h }, p4, [%x[outptr], x9, LSL #1]\n"
+ "st1h { z5.h }, p4, [%x[outptr], x9, LSL #1]\n"
"inch x9\n"
"whilelt p4.h, x9, %x[n_channels]\n"
"b.any 8b\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 8c8532827a..51096c8f29 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -89,111 +89,111 @@ void sme_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"ldr x4, [%x[args], %[offsetof_inptrs]]\n"
"whilelt p0.s, XZR, x20\n"
"add x20, %x[args], %[offsetof_rescale]\n"
- "ld1rqw { z4.s }, p0/Z, [x20]\n"
"ldr x5, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p0.s, x3, x5\n"
"mov x6, #0x0\n"
+ "ld1rqw { z5.s }, p0/Z, [x20]\n"
"ldp x7, x8, [x21, #0x0]\n"
"ldp x17, x16, [x21, #0x10]\n"
"ldp x15, x14, [x4, #0x0]\n"
- "ld1w { z3.s }, p0/Z, [x14, x3, LSL #2]\n"
+ "whilelt p0.s, x3, x5\n"
"ldp x13, x12, [x4, #0x10]\n"
- "ld1w { z2.s }, p0/Z, [x13, x3, LSL #2]\n"
"ldp x11, x10, [x4, #0x20]\n"
- "ld1w { z1.s }, p0/Z, [x10, x3, LSL #2]\n"
"ldp x9, x28, [x4, #0x30]\n"
- "ld1w { z0.s }, p0/Z, [x9, x3, LSL #2]\n"
+ "ld1w { z4.s }, p0/Z, [x14, x3, LSL #2]\n"
"ldp x27, x26, [x4, #0x40]\n"
- "ld1w { z31.s }, p0/Z, [x26, x3, LSL #2]\n"
+ "ld1w { z3.s }, p0/Z, [x13, x3, LSL #2]\n"
"ldp x25, x24, [x4, #0x50]\n"
- "ld1w { z30.s }, p0/Z, [x25, x3, LSL #2]\n"
+ "ld1w { z2.s }, p0/Z, [x10, x3, LSL #2]\n"
"ldp x23, x22, [x4, #0x60]\n"
- "ld1w { z29.s }, p0/Z, [x11, x3, LSL #2]\n"
+ "ld1w { z1.s }, p0/Z, [x9, x3, LSL #2]\n"
"ldp x21, x20, [x4, #0x70]\n"
- "ld1w { z28.s }, p0/Z, [x27, x3, LSL #2]\n"
- "ld1w { z27.s }, p0/Z, [x28, x3, LSL #2]\n"
- "ld1w { z22.s }, p0/Z, [x24, x3, LSL #2]\n"
- "ld1w { z21.s }, p0/Z, [x22, x3, LSL #2]\n"
- "ld1w { z20.s }, p0/Z, [x21, x3, LSL #2]\n"
- "ld1w { z26.s }, p0/Z, [x15, x3, LSL #2]\n"
- "ld1w { z25.s }, p0/Z, [x12, x3, LSL #2]\n"
- "ld1w { z24.s }, p0/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p0/Z, [x20, x3, LSL #2]\n"
+ "ld1w { z0.s }, p0/Z, [x26, x3, LSL #2]\n"
+ "ld1w { z31.s }, p0/Z, [x25, x3, LSL #2]\n"
+ "ld1w { z30.s }, p0/Z, [x11, x3, LSL #2]\n"
+ "ld1w { z29.s }, p0/Z, [x27, x3, LSL #2]\n"
+ "ld1w { z28.s }, p0/Z, [x28, x3, LSL #2]\n"
+ "ld1w { z27.s }, p0/Z, [x24, x3, LSL #2]\n"
+ "ld1w { z26.s }, p0/Z, [x22, x3, LSL #2]\n"
+ "ld1w { z22.s }, p0/Z, [x21, x3, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x15, x3, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x12, x3, LSL #2]\n"
+ "ld1w { z21.s }, p0/Z, [x23, x3, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x20, x3, LSL #2]\n"
"incw x3\n"
"whilelt p1.s, x3, x5\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "fadd z17.s, z1.s, z0.s\n"
- "fadd z16.s, z31.s, z30.s\n"
- "ld1w { z1.s }, p1/Z, [x10, x3, LSL #2]\n"
+ "fadd z19.s, z2.s, z1.s\n"
+ "fadd z16.s, z0.s, z31.s\n"
+ "ld1w { z2.s }, p1/Z, [x10, x3, LSL #2]\n"
"whilelt p0.s, x6, x5\n"
- "fadd z19.s, z17.s, z16.s\n"
- "fadd z18.s, z3.s, z2.s\n"
- "ld1w { z0.s }, p1/Z, [x9, x3, LSL #2]\n"
- "fadd z17.s, z29.s, z28.s\n"
- "fadd z22.s, z27.s, z22.s\n"
- "ld1w { z31.s }, p1/Z, [x26, x3, LSL #2]\n"
- "fadd z16.s, z21.s, z20.s\n"
- "fadd z21.s, z18.s, z19.s\n"
- "ld1w { z30.s }, p1/Z, [x25, x3, LSL #2]\n"
- "fadd z20.s, z16.s, z19.s\n"
- "fadd z19.s, z26.s, z17.s\n"
- "ld1w { z3.s }, p1/Z, [x14, x3, LSL #2]\n"
- "fadd z18.s, z25.s, z22.s\n"
- "fadd z17.s, z24.s, z17.s\n"
- "ld1w { z2.s }, p1/Z, [x13, x3, LSL #2]\n"
- "fadd z16.s, z23.s, z22.s\n"
- "fadd z19.s, z21.s, z19.s\n"
- "ld1w { z29.s }, p1/Z, [x11, x3, LSL #2]\n"
- "fadd z18.s, z21.s, z18.s\n"
- "fadd z17.s, z17.s, z20.s\n"
- "ld1w { z28.s }, p1/Z, [x27, x3, LSL #2]\n"
- "fadd z16.s, z16.s, z20.s\n"
- "ld1w { z27.s }, p1/Z, [x28, x3, LSL #2]\n"
- "fmul z19.s, z19.s, z4.s[0]\n"
- "ld1w { z22.s }, p1/Z, [x24, x3, LSL #2]\n"
- "fmul z18.s, z18.s, z4.s[1]\n"
- "fmul z17.s, z17.s, z4.s[2]\n"
- "ld1w { z21.s }, p1/Z, [x22, x3, LSL #2]\n"
- "fmul z16.s, z16.s, z4.s[3]\n"
+ "fadd z23.s, z4.s, z3.s\n"
+ "fadd z18.s, z30.s, z29.s\n"
+ "ld1w { z1.s }, p1/Z, [x9, x3, LSL #2]\n"
+ "fadd z17.s, z28.s, z27.s\n"
+ "fadd z22.s, z26.s, z22.s\n"
+ "ld1w { z0.s }, p1/Z, [x26, x3, LSL #2]\n"
+ "ld1w { z31.s }, p1/Z, [x25, x3, LSL #2]\n"
+ "fadd z16.s, z19.s, z16.s\n"
+ "ld1w { z4.s }, p1/Z, [x14, x3, LSL #2]\n"
+ "fadd z19.s, z25.s, z18.s\n"
+ "fadd z21.s, z21.s, z18.s\n"
+ "ld1w { z3.s }, p1/Z, [x13, x3, LSL #2]\n"
+ "fadd z18.s, z24.s, z17.s\n"
+ "fadd z20.s, z20.s, z17.s\n"
+ "ld1w { z30.s }, p1/Z, [x11, x3, LSL #2]\n"
+ "ld1w { z29.s }, p1/Z, [x27, x3, LSL #2]\n"
+ "fadd z17.s, z23.s, z16.s\n"
+ "fadd z16.s, z22.s, z16.s\n"
+ "ld1w { z28.s }, p1/Z, [x28, x3, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x24, x3, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x22, x3, LSL #2]\n"
+ "ld1w { z22.s }, p1/Z, [x21, x3, LSL #2]\n"
+ "fadd z19.s, z17.s, z19.s\n"
+ "fadd z18.s, z17.s, z18.s\n"
+ "ld1w { z25.s }, p1/Z, [x15, x3, LSL #2]\n"
+ "fadd z17.s, z21.s, z16.s\n"
+ "fadd z16.s, z20.s, z16.s\n"
+ "ld1w { z24.s }, p1/Z, [x12, x3, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x23, x3, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x20, x3, LSL #2]\n"
+ "incw x3\n"
+ "whilelt p1.s, x3, x5\n"
+ "fmul z19.s, z19.s, z5.s[0]\n"
+ "fmul z18.s, z18.s, z5.s[1]\n"
+ "fmul z17.s, z17.s, z5.s[2]\n"
+ "fmul z16.s, z16.s, z5.s[3]\n"
"st1w { z19.s }, p0, [x7, x6, LSL #2]\n"
- "ld1w { z20.s }, p1/Z, [x21, x3, LSL #2]\n"
"st1w { z18.s }, p0, [x8, x6, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x15, x3, LSL #2]\n"
"st1w { z17.s }, p0, [x17, x6, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x12, x3, LSL #2]\n"
"st1w { z16.s }, p0, [x16, x6, LSL #2]\n"
"incw x6\n"
- "ld1w { z24.s }, p1/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x20, x3, LSL #2]\n"
- "incw x3\n"
- "whilelt p1.s, x3, x5\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "fadd z17.s, z1.s, z0.s\n"
- "fadd z16.s, z31.s, z30.s\n"
+ "fadd z19.s, z2.s, z1.s\n"
+ "fadd z16.s, z0.s, z31.s\n"
"whilelt p0.s, x6, x5\n"
- "fadd z19.s, z17.s, z16.s\n"
- "fadd z18.s, z3.s, z2.s\n"
- "fadd z17.s, z29.s, z28.s\n"
- "fadd z22.s, z27.s, z22.s\n"
- "fadd z16.s, z21.s, z20.s\n"
- "fadd z21.s, z18.s, z19.s\n"
- "fadd z20.s, z16.s, z19.s\n"
- "fadd z19.s, z26.s, z17.s\n"
- "fadd z18.s, z25.s, z22.s\n"
- "fadd z17.s, z24.s, z17.s\n"
- "fadd z16.s, z23.s, z22.s\n"
- "fadd z19.s, z21.s, z19.s\n"
- "fadd z18.s, z21.s, z18.s\n"
- "fadd z17.s, z17.s, z20.s\n"
- "fadd z16.s, z16.s, z20.s\n"
- "fmul z19.s, z19.s, z4.s[0]\n"
+ "fadd z23.s, z4.s, z3.s\n"
+ "fadd z18.s, z30.s, z29.s\n"
+ "fadd z17.s, z28.s, z27.s\n"
+ "fadd z22.s, z26.s, z22.s\n"
+ "fadd z16.s, z19.s, z16.s\n"
+ "fadd z19.s, z25.s, z18.s\n"
+ "fadd z21.s, z21.s, z18.s\n"
+ "fadd z18.s, z24.s, z17.s\n"
+ "fadd z20.s, z20.s, z17.s\n"
+ "fadd z17.s, z23.s, z16.s\n"
+ "fadd z16.s, z22.s, z16.s\n"
+ "fadd z19.s, z17.s, z19.s\n"
+ "fadd z18.s, z17.s, z18.s\n"
+ "fadd z17.s, z21.s, z16.s\n"
+ "fadd z16.s, z20.s, z16.s\n"
+ "fmul z19.s, z19.s, z5.s[0]\n"
+ "fmul z18.s, z18.s, z5.s[1]\n"
+ "fmul z17.s, z17.s, z5.s[2]\n"
+ "fmul z16.s, z16.s, z5.s[3]\n"
"st1w { z19.s }, p0, [x7, x6, LSL #2]\n"
- "fmul z18.s, z18.s, z4.s[1]\n"
- "fmul z17.s, z17.s, z4.s[2]\n"
"st1w { z18.s }, p0, [x8, x6, LSL #2]\n"
- "fmul z16.s, z16.s, z4.s[3]\n"
"st1w { z17.s }, p0, [x17, x6, LSL #2]\n"
"st1w { z16.s }, p0, [x16, x6, LSL #2]\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index 86e7f84542..908c66b4d5 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,25 +49,25 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"cntw x26, ALL, MUL #3\n"
"ptrue p0.b\n"
"whilelt p3.s, x9, %x[n_channels]\n"
- "ld1rw { z6.s }, p0/Z, [%x[rescale_ptr]]\n"
"whilelt p2.s, x28, %x[n_channels]\n"
+ "ld1rw { z5.s }, p0/Z, [%x[rescale_ptr]]\n"
"whilelt p1.s, x27, %x[n_channels]\n"
"whilelt p0.s, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z5.b, #0x0\n"
"mov z4.b, #0x0\n"
- "mov x24, %x[inptrs]\n"
"mov z3.b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
"mov z2.b, #0x0\n"
+ "mov z1.b, #0x0\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1w { z1.s }, p3/Z, [x23, x9, LSL #2]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z23.s }, p3/Z, [x22, x9, LSL #2]\n"
"ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
"ld1w { z30.s }, p3/Z, [x20, x9, LSL #2]\n"
"ld1w { z29.s }, p2/Z, [x23, x28, LSL #2]\n"
@@ -84,7 +84,7 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"ld1w { z16.s }, p0/Z, [x20, x26, LSL #2]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "fadd z23.s, z1.s, z0.s\n"
+ "fadd z23.s, z0.s, z23.s\n"
"fadd z19.s, z31.s, z30.s\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
@@ -94,24 +94,24 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"add x24, x24, #0x20\n"
"fadd z21.s, z27.s, z21.s\n"
"fadd z17.s, z26.s, z17.s\n"
- "ld1w { z1.s }, p3/Z, [x23, x9, LSL #2]\n"
"fadd z20.s, z25.s, z20.s\n"
"fadd z16.s, z24.s, z16.s\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
"fadd z19.s, z23.s, z19.s\n"
+ "ld1w { z23.s }, p3/Z, [x22, x9, LSL #2]\n"
"fadd z18.s, z22.s, z18.s\n"
"ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
"fadd z17.s, z21.s, z17.s\n"
- "fadd z16.s, z20.s, z16.s\n"
"ld1w { z30.s }, p3/Z, [x20, x9, LSL #2]\n"
- "fadd z5.s, z5.s, z19.s\n"
- "fadd z4.s, z4.s, z18.s\n"
+ "fadd z16.s, z20.s, z16.s\n"
"ld1w { z29.s }, p2/Z, [x23, x28, LSL #2]\n"
- "fadd z3.s, z3.s, z17.s\n"
- "fadd z2.s, z2.s, z16.s\n"
+ "fadd z4.s, z4.s, z19.s\n"
"ld1w { z22.s }, p2/Z, [x22, x28, LSL #2]\n"
+ "fadd z3.s, z3.s, z18.s\n"
"ld1w { z28.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "fadd z2.s, z2.s, z17.s\n"
"ld1w { z18.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "fadd z1.s, z1.s, z16.s\n"
"ld1w { z27.s }, p1/Z, [x23, x27, LSL #2]\n"
"ld1w { z21.s }, p1/Z, [x22, x27, LSL #2]\n"
"ld1w { z26.s }, p1/Z, [x21, x27, LSL #2]\n"
@@ -122,7 +122,7 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"ld1w { z16.s }, p0/Z, [x20, x26, LSL #2]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "fadd z23.s, z1.s, z0.s\n"
+ "fadd z23.s, z0.s, z23.s\n"
"fadd z19.s, z31.s, z30.s\n"
"fadd z22.s, z29.s, z22.s\n"
"fadd z18.s, z28.s, z18.s\n"
@@ -134,37 +134,37 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"fadd z18.s, z22.s, z18.s\n"
"fadd z17.s, z21.s, z17.s\n"
"fadd z16.s, z20.s, z16.s\n"
- "fadd z5.s, z5.s, z19.s\n"
- "fadd z4.s, z4.s, z18.s\n"
- "fadd z3.s, z3.s, z17.s\n"
- "fadd z2.s, z2.s, z16.s\n"
+ "fadd z4.s, z4.s, z19.s\n"
+ "fadd z3.s, z3.s, z18.s\n"
+ "fadd z2.s, z2.s, z17.s\n"
+ "fadd z1.s, z1.s, z16.s\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p3/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
- "fadd z5.s, z5.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x28, LSL #2]\n"
- "fadd z4.s, z4.s, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20, x27, LSL #2]\n"
- "fadd z3.s, z3.s, z16.s\n"
+ "ld1w { z19.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z17.s }, p1/Z, [x20, x27, LSL #2]\n"
"ld1w { z16.s }, p0/Z, [x20, x26, LSL #2]\n"
- "fadd z2.s, z2.s, z16.s\n"
+ "fadd z4.s, z4.s, z19.s\n"
+ "fadd z3.s, z3.s, z18.s\n"
+ "fadd z2.s, z2.s, z17.s\n"
+ "fadd z1.s, z1.s, z16.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "fmul z5.s, z5.s, z6.s\n"
- "fmul z4.s, z4.s, z6.s\n"
- "st1w { z5.s }, p3, [%x[outptr], x9, LSL #2]\n"
+ "fmul z4.s, z4.s, z5.s\n"
+ "fmul z3.s, z3.s, z5.s\n"
+ "fmul z2.s, z2.s, z5.s\n"
+ "fmul z1.s, z1.s, z5.s\n"
+ "st1w { z4.s }, p3, [%x[outptr], x9, LSL #2]\n"
"incw x9, ALL, MUL #4\n"
- "fmul z3.s, z3.s, z6.s\n"
- "fmul z2.s, z2.s, z6.s\n"
- "st1w { z4.s }, p2, [%x[outptr], x28, LSL #2]\n"
+ "st1w { z3.s }, p2, [%x[outptr], x28, LSL #2]\n"
"incw x28, ALL, MUL #4\n"
- "st1w { z3.s }, p1, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z2.s }, p1, [%x[outptr], x27, LSL #2]\n"
"incw x27, ALL, MUL #4\n"
- "st1w { z2.s }, p0, [%x[outptr], x26, LSL #2]\n"
+ "st1w { z1.s }, p0, [%x[outptr], x26, LSL #2]\n"
"incw x26, ALL, MUL #4\n"
"whilelt p0.s, x26, %x[n_channels]\n"
"b.any 1b\n"
@@ -173,49 +173,49 @@ void sme_fp32_nhwc_avg_generic_depthfirst_impl(
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z5.b, #0x0\n"
+ "mov z4.b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1w { z1.s }, p3/Z, [x20, x9, LSL #2]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z23.s }, p3/Z, [x22, x9, LSL #2]\n"
"ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
"ld1w { z30.s }, p3/Z, [x20, x9, LSL #2]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "fadd z17.s, z1.s, z0.s\n"
+ "fadd z17.s, z0.s, z23.s\n"
"fadd z16.s, z31.s, z30.s\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "fadd z16.s, z17.s, z16.s\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fadd z5.s, z5.s, z16.s\n"
"add x24, x24, #0x20\n"
- "ld1w { z1.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "fadd z16.s, z17.s, z16.s\n"
+ "ld1w { z23.s }, p3/Z, [x22, x9, LSL #2]\n"
"ld1w { z31.s }, p3/Z, [x21, x9, LSL #2]\n"
"ld1w { z30.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "fadd z4.s, z4.s, z16.s\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "fadd z17.s, z1.s, z0.s\n"
+ "fadd z17.s, z0.s, z23.s\n"
"fadd z16.s, z31.s, z30.s\n"
"fadd z16.s, z17.s, z16.s\n"
- "fadd z5.s, z5.s, z16.s\n"
+ "fadd z4.s, z4.s, z16.s\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p3/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
- "fadd z5.s, z5.s, z16.s\n"
+ "ld1w { z16.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "fadd z4.s, z4.s, z16.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "fmul z5.s, z5.s, z6.s\n"
- "st1w { z5.s }, p3, [%x[outptr], x9, LSL #2]\n"
+ "fmul z4.s, z4.s, z5.s\n"
+ "st1w { z4.s }, p3, [%x[outptr], x9, LSL #2]\n"
"incw x9\n"
"whilelt p3.s, x9, %x[n_channels]\n"
"b.any 8b\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 3c7213a498..e460009bdf 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,26 +66,26 @@ void sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "ptrue p2.b\n"
"ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p0.s, x15, x13\n"
"ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
"ldp x28, x27, [x20, #0x0]\n"
- "ld1w { z30.s }, p0/Z, [x27, x15, LSL #2]\n"
+ "whilelt p0.s, x15, x13\n"
"ldp x26, x25, [x20, #0x10]\n"
- "ld1w { z29.s }, p0/Z, [x25, x15, LSL #2]\n"
"ldp x24, x23, [x20, #0x20]\n"
- "ld1w { z28.s }, p0/Z, [x24, x15, LSL #2]\n"
"ldp x22, x21, [x20, #0x30]\n"
- "ld1w { z27.s }, p0/Z, [x21, x15, LSL #2]\n"
+ "ld1w { z30.s }, p0/Z, [x27, x15, LSL #2]\n"
"ldr x20, [x20, #0x40]\n"
+ "ld1w { z29.s }, p0/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z28.s }, p0/Z, [x24, x15, LSL #2]\n"
+ "ld1w { z27.s }, p0/Z, [x21, x15, LSL #2]\n"
"ld1w { z26.s }, p0/Z, [x28, x15, LSL #2]\n"
"ld1w { z25.s }, p0/Z, [x26, x15, LSL #2]\n"
"ld1w { z24.s }, p0/Z, [x23, x15, LSL #2]\n"
- "ld1w { z19.s }, p0/Z, [x22, x15, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x22, x15, LSL #2]\n"
"ld1w { z23.s }, p0/Z, [x20, x15, LSL #2]\n"
"incw x15\n"
"whilelt p1.s, x15, x13\n"
@@ -98,24 +98,24 @@ void sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z18, z29\n fmax z18.s, p2/M, z18.s, z26.s\n"
"movprfx z17, z25\n fmax z17.s, p2/M, z17.s, z24.s\n"
"ld1w { z28.s }, p1/Z, [x24, x15, LSL #2]\n"
- "movprfx z16, z29\n fmax z16.s, p2/M, z16.s, z19.s\n"
+ "movprfx z16, z29\n fmax z16.s, p2/M, z16.s, z20.s\n"
"movprfx z20, z24\n fmax z20.s, p2/M, z20.s, z23.s\n"
"ld1w { z27.s }, p1/Z, [x21, x15, LSL #2]\n"
"ld1w { z29.s }, p1/Z, [x25, x15, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x28, x15, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x26, x15, LSL #2]\n"
"movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
"movprfx z18, z17\n fmax z18.s, p2/M, z18.s, z22.s\n"
- "ld1w { z26.s }, p1/Z, [x28, x15, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x23, x15, LSL #2]\n"
"movprfx z17, z16\n fmax z17.s, p2/M, z17.s, z21.s\n"
"movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
- "ld1w { z25.s }, p1/Z, [x26, x15, LSL #2]\n"
- "st1w { z19.s }, p0, [x12, x14, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x23, x15, LSL #2]\n"
- "st1w { z18.s }, p0, [x11, x14, LSL #2]\n"
- "ld1w { z19.s }, p1/Z, [x22, x15, LSL #2]\n"
- "st1w { z17.s }, p0, [x10, x14, LSL #2]\n"
+ "ld1w { z20.s }, p1/Z, [x22, x15, LSL #2]\n"
"ld1w { z23.s }, p1/Z, [x20, x15, LSL #2]\n"
"incw x15\n"
"whilelt p1.s, x15, x13\n"
+ "st1w { z19.s }, p0, [x12, x14, LSL #2]\n"
+ "st1w { z18.s }, p0, [x11, x14, LSL #2]\n"
+ "st1w { z17.s }, p0, [x10, x14, LSL #2]\n"
"st1w { z16.s }, p0, [x9, x14, LSL #2]\n"
"incw x14\n"
"b.any 1b\n"
@@ -123,15 +123,15 @@ void sme_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z22, z30\n fmax z22.s, p2/M, z22.s, z28.s\n"
"movprfx z21, z28\n fmax z21.s, p2/M, z21.s, z27.s\n"
"whilelt p0.s, x14, x13\n"
- "movprfx z20, z29\n fmax z20.s, p2/M, z20.s, z26.s\n"
- "movprfx z18, z25\n fmax z18.s, p2/M, z18.s, z24.s\n"
- "movprfx z17, z29\n fmax z17.s, p2/M, z17.s, z19.s\n"
- "movprfx z19, z24\n fmax z19.s, p2/M, z19.s, z23.s\n"
- "movprfx z16, z22\n fmax z16.s, p2/M, z16.s, z20.s\n"
- "fmax z18.s, p2/M, z18.s, z22.s\n"
- "st1w { z16.s }, p0, [x12, x14, LSL #2]\n"
- "fmax z17.s, p2/M, z17.s, z21.s\n"
- "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z19.s\n"
+ "movprfx z18, z29\n fmax z18.s, p2/M, z18.s, z26.s\n"
+ "movprfx z17, z25\n fmax z17.s, p2/M, z17.s, z24.s\n"
+ "movprfx z16, z29\n fmax z16.s, p2/M, z16.s, z20.s\n"
+ "movprfx z20, z24\n fmax z20.s, p2/M, z20.s, z23.s\n"
+ "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
+ "movprfx z18, z17\n fmax z18.s, p2/M, z18.s, z22.s\n"
+ "movprfx z17, z16\n fmax z17.s, p2/M, z17.s, z21.s\n"
+ "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
+ "st1w { z19.s }, p0, [x12, x14, LSL #2]\n"
"st1w { z18.s }, p0, [x11, x14, LSL #2]\n"
"st1w { z17.s }, p0, [x10, x14, LSL #2]\n"
"st1w { z16.s }, p0, [x9, x14, LSL #2]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
index 0dabc2f292..6d2641b035 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,21 +53,21 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "mov z5.s, #0xff800000\n"
"mov z4.s, #0xff800000\n"
- "mov z3.s, #0xff800000\n"
"mov x24, %x[inptrs]\n"
+ "mov z3.s, #0xff800000\n"
"mov z2.s, #0xff800000\n"
- "mov z1.s, #0xff800000\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1w { z0.s }, p4/Z, [x23, x9, LSL #2]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1w { z31.s }, p4/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z0.s }, p4/Z, [x22, x9, LSL #2]\n"
"ld1w { z23.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z30.s }, p4/Z, [x20, x9, LSL #2]\n"
- "ld1w { z18.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z31.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z30.s }, p3/Z, [x23, x28, LSL #2]\n"
"ld1w { z29.s }, p3/Z, [x22, x28, LSL #2]\n"
"ld1w { z22.s }, p3/Z, [x21, x28, LSL #2]\n"
"ld1w { z28.s }, p3/Z, [x20, x28, LSL #2]\n"
@@ -81,34 +81,34 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
"ld1w { z24.s }, p1/Z, [x20, x26, LSL #2]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
- "fmax z23.s, p0/M, z23.s, z30.s\n"
+ "movprfx z19, z1\n fmax z19.s, p0/M, z19.s, z0.s\n"
+ "fmax z23.s, p0/M, z23.s, z31.s\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "fmax z18.s, p0/M, z18.s, z29.s\n"
+ "movprfx z18, z30\n fmax z18.s, p0/M, z18.s, z29.s\n"
"fmax z22.s, p0/M, z22.s, z28.s\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
"fmax z17.s, p0/M, z17.s, z27.s\n"
"fmax z21.s, p0/M, z21.s, z26.s\n"
- "ld1w { z0.s }, p4/Z, [x23, x9, LSL #2]\n"
"fmax z16.s, p0/M, z16.s, z25.s\n"
"fmax z20.s, p0/M, z20.s, z24.s\n"
- "ld1w { z31.s }, p4/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x23, x9, LSL #2]\n"
"fmax z19.s, p0/M, z19.s, z23.s\n"
+ "ld1w { z0.s }, p4/Z, [x22, x9, LSL #2]\n"
"fmax z18.s, p0/M, z18.s, z22.s\n"
"ld1w { z23.s }, p4/Z, [x21, x9, LSL #2]\n"
"fmax z17.s, p0/M, z17.s, z21.s\n"
+ "ld1w { z31.s }, p4/Z, [x20, x9, LSL #2]\n"
"fmax z16.s, p0/M, z16.s, z20.s\n"
- "ld1w { z30.s }, p4/Z, [x20, x9, LSL #2]\n"
- "fmax z4.s, p0/M, z4.s, z19.s\n"
- "fmax z3.s, p0/M, z3.s, z18.s\n"
- "ld1w { z18.s }, p3/Z, [x23, x28, LSL #2]\n"
- "fmax z2.s, p0/M, z2.s, z17.s\n"
- "fmax z1.s, p0/M, z1.s, z16.s\n"
+ "ld1w { z30.s }, p3/Z, [x23, x28, LSL #2]\n"
+ "fmax z5.s, p0/M, z5.s, z19.s\n"
"ld1w { z29.s }, p3/Z, [x22, x28, LSL #2]\n"
+ "fmax z4.s, p0/M, z4.s, z18.s\n"
"ld1w { z22.s }, p3/Z, [x21, x28, LSL #2]\n"
+ "fmax z3.s, p0/M, z3.s, z17.s\n"
"ld1w { z28.s }, p3/Z, [x20, x28, LSL #2]\n"
+ "fmax z2.s, p0/M, z2.s, z16.s\n"
"ld1w { z17.s }, p2/Z, [x23, x27, LSL #2]\n"
"ld1w { z27.s }, p2/Z, [x22, x27, LSL #2]\n"
"ld1w { z21.s }, p2/Z, [x21, x27, LSL #2]\n"
@@ -119,9 +119,9 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
"ld1w { z24.s }, p1/Z, [x20, x26, LSL #2]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z0\n fmax z19.s, p0/M, z19.s, z31.s\n"
- "fmax z23.s, p0/M, z23.s, z30.s\n"
- "fmax z18.s, p0/M, z18.s, z29.s\n"
+ "movprfx z19, z1\n fmax z19.s, p0/M, z19.s, z0.s\n"
+ "fmax z23.s, p0/M, z23.s, z31.s\n"
+ "movprfx z18, z30\n fmax z18.s, p0/M, z18.s, z29.s\n"
"fmax z22.s, p0/M, z22.s, z28.s\n"
"fmax z17.s, p0/M, z17.s, z27.s\n"
"fmax z21.s, p0/M, z21.s, z26.s\n"
@@ -131,33 +131,33 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
"fmax z18.s, p0/M, z18.s, z22.s\n"
"fmax z17.s, p0/M, z17.s, z21.s\n"
"fmax z16.s, p0/M, z16.s, z20.s\n"
- "fmax z4.s, p0/M, z4.s, z19.s\n"
- "fmax z3.s, p0/M, z3.s, z18.s\n"
- "fmax z2.s, p0/M, z2.s, z17.s\n"
- "fmax z1.s, p0/M, z1.s, z16.s\n"
+ "fmax z5.s, p0/M, z5.s, z19.s\n"
+ "fmax z4.s, p0/M, z4.s, z18.s\n"
+ "fmax z3.s, p0/M, z3.s, z17.s\n"
+ "fmax z2.s, p0/M, z2.s, z16.s\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p4/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
- "fmax z4.s, p0/M, z4.s, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x20, x28, LSL #2]\n"
- "fmax z3.s, p0/M, z3.s, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x20, x27, LSL #2]\n"
- "fmax z2.s, p0/M, z2.s, z16.s\n"
+ "ld1w { z19.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z18.s }, p3/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z17.s }, p2/Z, [x20, x27, LSL #2]\n"
"ld1w { z16.s }, p1/Z, [x20, x26, LSL #2]\n"
- "fmax z1.s, p0/M, z1.s, z16.s\n"
+ "fmax z5.s, p0/M, z5.s, z19.s\n"
+ "fmax z4.s, p0/M, z4.s, z18.s\n"
+ "fmax z3.s, p0/M, z3.s, z17.s\n"
+ "fmax z2.s, p0/M, z2.s, z16.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1w { z4.s }, p4, [%x[outptr], x9, LSL #2]\n"
+ "st1w { z5.s }, p4, [%x[outptr], x9, LSL #2]\n"
"incw x9, ALL, MUL #4\n"
- "st1w { z3.s }, p3, [%x[outptr], x28, LSL #2]\n"
+ "st1w { z4.s }, p3, [%x[outptr], x28, LSL #2]\n"
"incw x28, ALL, MUL #4\n"
- "st1w { z2.s }, p2, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z3.s }, p2, [%x[outptr], x27, LSL #2]\n"
"incw x27, ALL, MUL #4\n"
- "st1w { z1.s }, p1, [%x[outptr], x26, LSL #2]\n"
+ "st1w { z2.s }, p1, [%x[outptr], x26, LSL #2]\n"
"incw x26, ALL, MUL #4\n"
"whilelt p1.s, x26, %x[n_channels]\n"
"b.any 1b\n"
@@ -166,48 +166,48 @@ void sme_fp32_nhwc_max_generic_depthfirst_impl(
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z4.s, #0xff800000\n"
+ "mov z5.s, #0xff800000\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1w { z0.s }, p4/Z, [x20, x9, LSL #2]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1w { z31.s }, p4/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z0.s }, p4/Z, [x22, x9, LSL #2]\n"
"ld1w { z23.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z30.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z31.s }, p4/Z, [x20, x9, LSL #2]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z0\n fmax z16.s, p0/M, z16.s, z31.s\n"
- "movprfx z17, z23\n fmax z17.s, p0/M, z17.s, z30.s\n"
+ "movprfx z16, z1\n fmax z16.s, p0/M, z16.s, z0.s\n"
+ "movprfx z17, z23\n fmax z17.s, p0/M, z17.s, z31.s\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "fmax z16.s, p0/M, z16.s, z17.s\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fmax z4.s, p0/M, z4.s, z16.s\n"
"add x24, x24, #0x20\n"
- "ld1w { z0.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z31.s }, p4/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z1.s }, p4/Z, [x23, x9, LSL #2]\n"
+ "fmax z16.s, p0/M, z16.s, z17.s\n"
+ "ld1w { z0.s }, p4/Z, [x22, x9, LSL #2]\n"
"ld1w { z23.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z30.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z31.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "fmax z5.s, p0/M, z5.s, z16.s\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z0\n fmax z16.s, p0/M, z16.s, z31.s\n"
- "movprfx z17, z23\n fmax z17.s, p0/M, z17.s, z30.s\n"
+ "movprfx z16, z1\n fmax z16.s, p0/M, z16.s, z0.s\n"
+ "movprfx z17, z23\n fmax z17.s, p0/M, z17.s, z31.s\n"
"fmax z16.s, p0/M, z16.s, z17.s\n"
- "fmax z4.s, p0/M, z4.s, z16.s\n"
+ "fmax z5.s, p0/M, z5.s, z16.s\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p4/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
- "fmax z4.s, p0/M, z4.s, z16.s\n"
+ "ld1w { z16.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "fmax z5.s, p0/M, z5.s, z16.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1w { z4.s }, p4, [%x[outptr], x9, LSL #2]\n"
+ "st1w { z5.s }, p4, [%x[outptr], x9, LSL #2]\n"
"incw x9\n"
"whilelt p4.s, x9, %x[n_channels]\n"
"b.any 8b\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
index c24e977dc6..b931767710 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -146,32 +146,32 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x26]\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
".inst 0x45944508 // saddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x25]\n"
".inst 0x459340e7 // saddwb z7.s, z7.s, z19.h\n"
".inst 0x459344c6 // saddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x25]\n"
".inst 0x459240a5 // saddwb z5.s, z5.s, z18.h\n"
".inst 0x45924484 // saddwt z4.s, z4.s, z18.h\n"
+ "ld1b { z25.b }, p1/Z, [x21, x24]\n"
".inst 0x45914063 // saddwb z3.s, z3.s, z17.h\n"
".inst 0x45914442 // saddwt z2.s, z2.s, z17.h\n"
+ "ld1b { z24.b }, p1/Z, [x20, x24]\n"
".inst 0x45904021 // saddwb z1.s, z1.s, z16.h\n"
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"bgt 2b\n"
@@ -205,17 +205,17 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508a217 // sshllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508a616 // sshllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- ".inst 0x4508a215 // sshllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508a614 // sshllt z20.h, z16.b, #0x0\n"
"subs x21, x21, #0x1\n"
- "ld1b { z16.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508a213 // sshllb z19.h, z16.b, #0x0\n"
- ".inst 0x4508a612 // sshllt z18.h, z16.b, #0x0\n"
+ "ld1b { z19.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x25]\n"
"ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ ".inst 0x4508a277 // sshllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508a676 // sshllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508a255 // sshllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508a654 // sshllt z20.h, z18.b, #0x0\n"
+ ".inst 0x4508a233 // sshllb z19.h, z17.b, #0x0\n"
+ ".inst 0x4508a632 // sshllt z18.h, z17.b, #0x0\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -236,25 +236,25 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
- ".inst 0x04b175ef // sqdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x04b1758c // sqdmulh z12.s, z12.s, z17.s\n"
+ "ld1rw { z18.s }, p0/Z, [%x[rescale_ptr]]\n"
+ "mov z17.s, #0x7f\n"
"ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
- ".inst 0x04b1756b // sqdmulh z11.s, z11.s, z17.s\n"
- ".inst 0x04b1754a // sqdmulh z10.s, z10.s, z17.s\n"
- ".inst 0x04b17529 // sqdmulh z9.s, z9.s, z17.s\n"
- ".inst 0x04b17508 // sqdmulh z8.s, z8.s, z17.s\n"
- ".inst 0x04b174e7 // sqdmulh z7.s, z7.s, z17.s\n"
- ".inst 0x04b174c6 // sqdmulh z6.s, z6.s, z17.s\n"
- ".inst 0x04b174a5 // sqdmulh z5.s, z5.s, z17.s\n"
- ".inst 0x04b17484 // sqdmulh z4.s, z4.s, z17.s\n"
- ".inst 0x04b17463 // sqdmulh z3.s, z3.s, z17.s\n"
- ".inst 0x04b17442 // sqdmulh z2.s, z2.s, z17.s\n"
- ".inst 0x04b17421 // sqdmulh z1.s, z1.s, z17.s\n"
- ".inst 0x04b17400 // sqdmulh z0.s, z0.s, z17.s\n"
- "mov z19.s, #0x7f\n"
+ ".inst 0x04b275ef // sqdmulh z15.s, z15.s, z18.s\n"
+ ".inst 0x04b275ce // sqdmulh z14.s, z14.s, z18.s\n"
+ ".inst 0x04b275ad // sqdmulh z13.s, z13.s, z18.s\n"
+ ".inst 0x04b2758c // sqdmulh z12.s, z12.s, z18.s\n"
+ ".inst 0x04b2756b // sqdmulh z11.s, z11.s, z18.s\n"
+ ".inst 0x04b2754a // sqdmulh z10.s, z10.s, z18.s\n"
+ ".inst 0x04b27529 // sqdmulh z9.s, z9.s, z18.s\n"
+ ".inst 0x04b27508 // sqdmulh z8.s, z8.s, z18.s\n"
+ ".inst 0x04b274e7 // sqdmulh z7.s, z7.s, z18.s\n"
+ ".inst 0x04b274c6 // sqdmulh z6.s, z6.s, z18.s\n"
+ ".inst 0x04b274a5 // sqdmulh z5.s, z5.s, z18.s\n"
+ ".inst 0x04b27484 // sqdmulh z4.s, z4.s, z18.s\n"
+ ".inst 0x04b27463 // sqdmulh z3.s, z3.s, z18.s\n"
+ ".inst 0x04b27442 // sqdmulh z2.s, z2.s, z18.s\n"
+ ".inst 0x04b27421 // sqdmulh z1.s, z1.s, z18.s\n"
+ ".inst 0x04b27400 // sqdmulh z0.s, z0.s, z18.s\n"
".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
@@ -271,7 +271,7 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x44828202 // srshl z2.s, p0/M, z2.s, z16.s\n"
".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
- "not z16.s, p0/M, z19.s\n"
+ "not z16.s, p0/M, z17.s\n"
"smax z15.s, p0/M, z15.s, z16.s\n"
"smax z14.s, p0/M, z14.s, z16.s\n"
"smax z13.s, p0/M, z13.s, z16.s\n"
@@ -288,36 +288,36 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
"smax z2.s, p0/M, z2.s, z16.s\n"
"smax z1.s, p0/M, z1.s, z16.s\n"
"smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z19.s\n"
- "smin z14.s, p0/M, z14.s, z19.s\n"
- "trn1 z23.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z19.s\n"
- "smin z12.s, p0/M, z12.s, z19.s\n"
+ "smin z15.s, p0/M, z15.s, z17.s\n"
+ "smin z14.s, p0/M, z14.s, z17.s\n"
+ "smin z13.s, p0/M, z13.s, z17.s\n"
+ "smin z12.s, p0/M, z12.s, z17.s\n"
+ "smin z11.s, p0/M, z11.s, z17.s\n"
+ "smin z10.s, p0/M, z10.s, z17.s\n"
+ "smin z9.s, p0/M, z9.s, z17.s\n"
+ "smin z8.s, p0/M, z8.s, z17.s\n"
+ "smin z7.s, p0/M, z7.s, z17.s\n"
+ "smin z6.s, p0/M, z6.s, z17.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z5.s, p0/M, z5.s, z17.s\n"
+ "smin z4.s, p0/M, z4.s, z17.s\n"
"trn1 z16.h, z13.h, z12.h\n"
- "smin z11.s, p0/M, z11.s, z19.s\n"
- "smin z10.s, p0/M, z10.s, z19.s\n"
+ "smin z3.s, p0/M, z3.s, z17.s\n"
+ "smin z2.s, p0/M, z2.s, z17.s\n"
"trn1 z22.h, z11.h, z10.h\n"
- "smin z9.s, p0/M, z9.s, z19.s\n"
- "smin z8.s, p0/M, z8.s, z19.s\n"
+ "smin z1.s, p0/M, z1.s, z17.s\n"
+ "smin z0.s, p0/M, z0.s, z17.s\n"
"trn1 z18.h, z9.h, z8.h\n"
- "smin z7.s, p0/M, z7.s, z19.s\n"
- "smin z6.s, p0/M, z6.s, z19.s\n"
"trn1 z21.h, z7.h, z6.h\n"
- "smin z5.s, p0/M, z5.s, z19.s\n"
- "smin z4.s, p0/M, z4.s, z19.s\n"
"trn1 z17.h, z5.h, z4.h\n"
- "smin z3.s, p0/M, z3.s, z19.s\n"
- "smin z2.s, p0/M, z2.s, z19.s\n"
- "trn1 z20.h, z3.h, z2.h\n"
- "smin z1.s, p0/M, z1.s, z19.s\n"
- "smin z0.s, p0/M, z0.s, z19.s\n"
- "trn1 z19.h, z1.h, z0.h\n"
- "trn1 z16.b, z23.b, z16.b\n"
+ "trn1 z20.b, z19.b, z16.b\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z16.h, z1.h, z0.h\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
- "trn1 z16.b, z20.b, z19.b\n"
+ "st1b { z20.b }, p4, [%x[outptr], x27]\n"
+ "incb x27, ALL, MUL #4\n"
+ "trn1 z16.b, z19.b, z16.b\n"
"st1b { z18.b }, p3, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
"st1b { z17.b }, p2, [%x[outptr], x25]\n"
@@ -348,13 +348,13 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x455e07f0 // saddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
- ".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
- ".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
"add x22, x22, #0x10\n"
"ld1b { z31.b }, p4/Z, [x21, x27]\n"
+ ".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
+ ".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
".inst 0x4590458c // saddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f1 // saddlb z17.h, z31.b, z30.b\n"
@@ -368,10 +368,10 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x21, x21, #0x1\n"
"ld1b { z16.b }, p4/Z, [x20, x27]\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
@@ -379,26 +379,26 @@ void sme_s8_nhwc_avg_generic_depthfirst_impl(
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"ld1rw { z16.s }, p0/Z, [%x[rescale_ptr]]\n"
+ "mov z18.s, #0x7f\n"
+ "ld1rw { z17.s }, p0/Z, [%x[shift_ptr]]\n"
".inst 0x04b075ef // sqdmulh z15.s, z15.s, z16.s\n"
".inst 0x04b075ce // sqdmulh z14.s, z14.s, z16.s\n"
".inst 0x04b075ad // sqdmulh z13.s, z13.s, z16.s\n"
".inst 0x04b0758c // sqdmulh z12.s, z12.s, z16.s\n"
- "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
- "mov z18.s, #0x7f\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
"not z16.s, p0/M, z18.s\n"
+ ".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
+ ".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
+ ".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
+ ".inst 0x4482822c // srshl z12.s, p0/M, z12.s, z17.s\n"
"smax z15.s, p0/M, z15.s, z16.s\n"
"smax z14.s, p0/M, z14.s, z16.s\n"
"smax z13.s, p0/M, z13.s, z16.s\n"
"smax z12.s, p0/M, z12.s, z16.s\n"
"smin z15.s, p0/M, z15.s, z18.s\n"
"smin z14.s, p0/M, z14.s, z18.s\n"
- "trn1 z17.h, z15.h, z14.h\n"
"smin z13.s, p0/M, z13.s, z18.s\n"
"smin z12.s, p0/M, z12.s, z18.s\n"
+ "trn1 z17.h, z15.h, z14.h\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
"st1b { z16.b }, p4, [%x[outptr], x27]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 96617566a8..f139b834c6 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,26 +66,26 @@ void sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "ptrue p2.b\n"
"ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p0.b, x15, x13\n"
"ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
"ldp x28, x27, [x20, #0x0]\n"
- "ld1b { z30.b }, p0/Z, [x27, x15]\n"
+ "whilelt p0.b, x15, x13\n"
"ldp x26, x25, [x20, #0x10]\n"
- "ld1b { z29.b }, p0/Z, [x25, x15]\n"
"ldp x24, x23, [x20, #0x20]\n"
- "ld1b { z28.b }, p0/Z, [x24, x15]\n"
"ldp x22, x21, [x20, #0x30]\n"
- "ld1b { z27.b }, p0/Z, [x21, x15]\n"
+ "ld1b { z30.b }, p0/Z, [x27, x15]\n"
"ldr x20, [x20, #0x40]\n"
+ "ld1b { z29.b }, p0/Z, [x25, x15]\n"
+ "ld1b { z28.b }, p0/Z, [x24, x15]\n"
+ "ld1b { z27.b }, p0/Z, [x21, x15]\n"
"ld1b { z26.b }, p0/Z, [x28, x15]\n"
"ld1b { z25.b }, p0/Z, [x26, x15]\n"
"ld1b { z24.b }, p0/Z, [x23, x15]\n"
- "ld1b { z19.b }, p0/Z, [x22, x15]\n"
+ "ld1b { z20.b }, p0/Z, [x22, x15]\n"
"ld1b { z23.b }, p0/Z, [x20, x15]\n"
"incw x15\n"
"whilelt p1.b, x15, x13\n"
@@ -98,24 +98,24 @@ void sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z18, z29\n smax z18.b, p2/M, z18.b, z26.b\n"
"movprfx z17, z25\n smax z17.b, p2/M, z17.b, z24.b\n"
"ld1b { z28.b }, p1/Z, [x24, x15]\n"
- "movprfx z16, z29\n smax z16.b, p2/M, z16.b, z19.b\n"
+ "movprfx z16, z29\n smax z16.b, p2/M, z16.b, z20.b\n"
"movprfx z20, z24\n smax z20.b, p2/M, z20.b, z23.b\n"
"ld1b { z27.b }, p1/Z, [x21, x15]\n"
"ld1b { z29.b }, p1/Z, [x25, x15]\n"
+ "ld1b { z26.b }, p1/Z, [x28, x15]\n"
+ "ld1b { z25.b }, p1/Z, [x26, x15]\n"
"movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
"movprfx z18, z17\n smax z18.b, p2/M, z18.b, z22.b\n"
- "ld1b { z26.b }, p1/Z, [x28, x15]\n"
+ "ld1b { z24.b }, p1/Z, [x23, x15]\n"
"movprfx z17, z16\n smax z17.b, p2/M, z17.b, z21.b\n"
"movprfx z16, z21\n smax z16.b, p2/M, z16.b, z20.b\n"
- "ld1b { z25.b }, p1/Z, [x26, x15]\n"
- "st1b { z19.b }, p0, [x12, x14]\n"
- "ld1b { z24.b }, p1/Z, [x23, x15]\n"
- "st1b { z18.b }, p0, [x11, x14]\n"
- "ld1b { z19.b }, p1/Z, [x22, x15]\n"
- "st1b { z17.b }, p0, [x10, x14]\n"
+ "ld1b { z20.b }, p1/Z, [x22, x15]\n"
"ld1b { z23.b }, p1/Z, [x20, x15]\n"
"incw x15\n"
"whilelt p1.b, x15, x13\n"
+ "st1b { z19.b }, p0, [x12, x14]\n"
+ "st1b { z18.b }, p0, [x11, x14]\n"
+ "st1b { z17.b }, p0, [x10, x14]\n"
"st1b { z16.b }, p0, [x9, x14]\n"
"incw x14\n"
"b.any 1b\n"
@@ -123,15 +123,15 @@ void sme_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z22, z30\n smax z22.b, p2/M, z22.b, z28.b\n"
"movprfx z21, z28\n smax z21.b, p2/M, z21.b, z27.b\n"
"whilelt p0.b, x14, x13\n"
- "movprfx z20, z29\n smax z20.b, p2/M, z20.b, z26.b\n"
- "movprfx z18, z25\n smax z18.b, p2/M, z18.b, z24.b\n"
- "movprfx z17, z29\n smax z17.b, p2/M, z17.b, z19.b\n"
- "movprfx z19, z24\n smax z19.b, p2/M, z19.b, z23.b\n"
- "movprfx z16, z22\n smax z16.b, p2/M, z16.b, z20.b\n"
- "smax z18.b, p2/M, z18.b, z22.b\n"
- "st1b { z16.b }, p0, [x12, x14]\n"
- "smax z17.b, p2/M, z17.b, z21.b\n"
- "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z19.b\n"
+ "movprfx z18, z29\n smax z18.b, p2/M, z18.b, z26.b\n"
+ "movprfx z17, z25\n smax z17.b, p2/M, z17.b, z24.b\n"
+ "movprfx z16, z29\n smax z16.b, p2/M, z16.b, z20.b\n"
+ "movprfx z20, z24\n smax z20.b, p2/M, z20.b, z23.b\n"
+ "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
+ "movprfx z18, z17\n smax z18.b, p2/M, z18.b, z22.b\n"
+ "movprfx z17, z16\n smax z17.b, p2/M, z17.b, z21.b\n"
+ "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z20.b\n"
+ "st1b { z19.b }, p0, [x12, x14]\n"
"st1b { z18.b }, p0, [x11, x14]\n"
"st1b { z17.b }, p0, [x10, x14]\n"
"st1b { z16.b }, p0, [x9, x14]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
index d2b45cd353..5cf60e9315 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,21 +53,21 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "mov z5.b, #0x80\n"
"mov z4.b, #0x80\n"
- "mov z3.b, #0x80\n"
"mov x24, %x[inptrs]\n"
+ "mov z3.b, #0x80\n"
"mov z2.b, #0x80\n"
- "mov z1.b, #0x80\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
@@ -81,34 +81,34 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
- "smax z23.b, p0/M, z23.b, z30.b\n"
+ "movprfx z19, z1\n smax z19.b, p0/M, z19.b, z0.b\n"
+ "smax z23.b, p0/M, z23.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "smax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z18, z30\n smax z18.b, p0/M, z18.b, z29.b\n"
"smax z22.b, p0/M, z22.b, z28.b\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
"smax z17.b, p0/M, z17.b, z27.b\n"
"smax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"smax z16.b, p0/M, z16.b, z25.b\n"
"smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"smax z18.b, p0/M, z18.b, z22.b\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
- "smax z4.b, p0/M, z4.b, z19.b\n"
- "smax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
- "smax z2.b, p0/M, z2.b, z17.b\n"
- "smax z1.b, p0/M, z1.b, z16.b\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
+ "smax z5.b, p0/M, z5.b, z19.b\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
+ "smax z4.b, p0/M, z4.b, z18.b\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
+ "smax z3.b, p0/M, z3.b, z17.b\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
+ "smax z2.b, p0/M, z2.b, z16.b\n"
"ld1b { z17.b }, p2/Z, [x23, x27]\n"
"ld1b { z27.b }, p2/Z, [x22, x27]\n"
"ld1b { z21.b }, p2/Z, [x21, x27]\n"
@@ -119,9 +119,9 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
- "smax z23.b, p0/M, z23.b, z30.b\n"
- "smax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z19, z1\n smax z19.b, p0/M, z19.b, z0.b\n"
+ "smax z23.b, p0/M, z23.b, z31.b\n"
+ "movprfx z18, z30\n smax z18.b, p0/M, z18.b, z29.b\n"
"smax z22.b, p0/M, z22.b, z28.b\n"
"smax z17.b, p0/M, z17.b, z27.b\n"
"smax z21.b, p0/M, z21.b, z26.b\n"
@@ -131,33 +131,33 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
"smax z18.b, p0/M, z18.b, z22.b\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "smax z4.b, p0/M, z4.b, z19.b\n"
- "smax z3.b, p0/M, z3.b, z18.b\n"
- "smax z2.b, p0/M, z2.b, z17.b\n"
- "smax z1.b, p0/M, z1.b, z16.b\n"
+ "smax z5.b, p0/M, z5.b, z19.b\n"
+ "smax z4.b, p0/M, z4.b, z18.b\n"
+ "smax z3.b, p0/M, z3.b, z17.b\n"
+ "smax z2.b, p0/M, z2.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
- "ld1b { z16.b }, p3/Z, [x20, x28]\n"
- "smax z3.b, p0/M, z3.b, z16.b\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "smax z2.b, p0/M, z2.b, z16.b\n"
+ "ld1b { z19.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x27]\n"
"ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "smax z1.b, p0/M, z1.b, z16.b\n"
+ "smax z5.b, p0/M, z5.b, z19.b\n"
+ "smax z4.b, p0/M, z4.b, z18.b\n"
+ "smax z3.b, p0/M, z3.b, z17.b\n"
+ "smax z2.b, p0/M, z2.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z5.b }, p4, [%x[outptr], x9]\n"
"incb x9, ALL, MUL #4\n"
- "st1b { z3.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z4.b }, p3, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z2.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z3.b }, p2, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z1.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z2.b }, p1, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
"whilelt p1.b, x26, %x[n_channels]\n"
"b.any 1b\n"
@@ -166,48 +166,48 @@ void sme_s8_nhwc_max_generic_depthfirst_impl(
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z4.b, #0x80\n"
+ "mov z5.b, #0x80\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x20, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z0\n smax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n smax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "smax z16.b, p0/M, z16.b, z17.b\n"
"ldp x21, x20, [x24, #0x10]\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "smax z16.b, p0/M, z16.b, z17.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "smax z5.b, p0/M, z5.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z0\n smax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n smax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z31.b\n"
"smax z16.b, p0/M, z16.b, z17.b\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
+ "smax z5.b, p0/M, z5.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
+ "ld1b { z16.b }, p4/Z, [x20, x9]\n"
+ "smax z5.b, p0/M, z5.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z5.b }, p4, [%x[outptr], x9]\n"
"incb x9\n"
"whilelt p4.b, x9, %x[n_channels]\n"
"b.any 8b\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index 91f2f7ab31..c4a6290dac 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -165,32 +165,32 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x26]\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
".inst 0x45944508 // saddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x25]\n"
".inst 0x459340e7 // saddwb z7.s, z7.s, z19.h\n"
".inst 0x459344c6 // saddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x25]\n"
".inst 0x459240a5 // saddwb z5.s, z5.s, z18.h\n"
".inst 0x45924484 // saddwt z4.s, z4.s, z18.h\n"
+ "ld1b { z25.b }, p1/Z, [x21, x24]\n"
".inst 0x45914063 // saddwb z3.s, z3.s, z17.h\n"
".inst 0x45914442 // saddwt z2.s, z2.s, z17.h\n"
+ "ld1b { z24.b }, p1/Z, [x20, x24]\n"
".inst 0x45904021 // saddwb z1.s, z1.s, z16.h\n"
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"bgt 2b\n"
@@ -224,17 +224,17 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508a217 // sshllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508a616 // sshllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- ".inst 0x4508a215 // sshllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508a614 // sshllt z20.h, z16.b, #0x0\n"
"subs x21, x21, #0x1\n"
- "ld1b { z16.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508a213 // sshllb z19.h, z16.b, #0x0\n"
- ".inst 0x4508a612 // sshllt z18.h, z16.b, #0x0\n"
+ "ld1b { z19.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x25]\n"
"ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ ".inst 0x4508a277 // sshllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508a676 // sshllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508a255 // sshllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508a654 // sshllt z20.h, z18.b, #0x0\n"
+ ".inst 0x4508a233 // sshllb z19.h, z17.b, #0x0\n"
+ ".inst 0x4508a632 // sshllt z18.h, z17.b, #0x0\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -255,25 +255,26 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
- ".inst 0x4482824f // srshl z15.s, p0/M, z15.s, z18.s\n"
- ".inst 0x4482824e // srshl z14.s, p0/M, z14.s, z18.s\n"
- ".inst 0x4482824d // srshl z13.s, p0/M, z13.s, z18.s\n"
- ".inst 0x4482824c // srshl z12.s, p0/M, z12.s, z18.s\n"
+ "ld1rw { z19.s }, p0/Z, [%x[left_shift]]\n"
+ "mov z18.s, #0x7f\n"
"ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x4482824b // srshl z11.s, p0/M, z11.s, z18.s\n"
- ".inst 0x4482824a // srshl z10.s, p0/M, z10.s, z18.s\n"
"ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x44828249 // srshl z9.s, p0/M, z9.s, z18.s\n"
- ".inst 0x44828248 // srshl z8.s, p0/M, z8.s, z18.s\n"
- ".inst 0x44828247 // srshl z7.s, p0/M, z7.s, z18.s\n"
- ".inst 0x44828246 // srshl z6.s, p0/M, z6.s, z18.s\n"
- ".inst 0x44828245 // srshl z5.s, p0/M, z5.s, z18.s\n"
- ".inst 0x44828244 // srshl z4.s, p0/M, z4.s, z18.s\n"
- ".inst 0x44828243 // srshl z3.s, p0/M, z3.s, z18.s\n"
- ".inst 0x44828242 // srshl z2.s, p0/M, z2.s, z18.s\n"
- ".inst 0x44828241 // srshl z1.s, p0/M, z1.s, z18.s\n"
- ".inst 0x44828240 // srshl z0.s, p0/M, z0.s, z18.s\n"
+ ".inst 0x4482826f // srshl z15.s, p0/M, z15.s, z19.s\n"
+ ".inst 0x4482826e // srshl z14.s, p0/M, z14.s, z19.s\n"
+ ".inst 0x4482826d // srshl z13.s, p0/M, z13.s, z19.s\n"
+ ".inst 0x4482826c // srshl z12.s, p0/M, z12.s, z19.s\n"
+ ".inst 0x4482826b // srshl z11.s, p0/M, z11.s, z19.s\n"
+ ".inst 0x4482826a // srshl z10.s, p0/M, z10.s, z19.s\n"
+ ".inst 0x44828269 // srshl z9.s, p0/M, z9.s, z19.s\n"
+ ".inst 0x44828268 // srshl z8.s, p0/M, z8.s, z19.s\n"
+ ".inst 0x44828267 // srshl z7.s, p0/M, z7.s, z19.s\n"
+ ".inst 0x44828266 // srshl z6.s, p0/M, z6.s, z19.s\n"
+ ".inst 0x44828265 // srshl z5.s, p0/M, z5.s, z19.s\n"
+ ".inst 0x44828264 // srshl z4.s, p0/M, z4.s, z19.s\n"
+ ".inst 0x44828263 // srshl z3.s, p0/M, z3.s, z19.s\n"
+ ".inst 0x44828262 // srshl z2.s, p0/M, z2.s, z19.s\n"
+ ".inst 0x44828261 // srshl z1.s, p0/M, z1.s, z19.s\n"
+ ".inst 0x44828260 // srshl z0.s, p0/M, z0.s, z19.s\n"
".inst 0x04b175ef // sqrdmulh z15.s, z15.s, z17.s\n"
".inst 0x04b175ce // sqrdmulh z14.s, z14.s, z17.s\n"
".inst 0x04b175ad // sqrdmulh z13.s, z13.s, z17.s\n"
@@ -290,7 +291,6 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x04b17442 // sqrdmulh z2.s, z2.s, z17.s\n"
".inst 0x04b17421 // sqrdmulh z1.s, z1.s, z17.s\n"
".inst 0x04b17400 // sqrdmulh z0.s, z0.s, z17.s\n"
- "mov z19.s, #0x7f\n"
".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
@@ -307,7 +307,7 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x44828202 // srshl z2.s, p0/M, z2.s, z16.s\n"
".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
- "not z16.s, p0/M, z19.s\n"
+ "not z16.s, p0/M, z18.s\n"
"smax z15.s, p0/M, z15.s, z16.s\n"
"smax z14.s, p0/M, z14.s, z16.s\n"
"smax z13.s, p0/M, z13.s, z16.s\n"
@@ -324,36 +324,36 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
"smax z2.s, p0/M, z2.s, z16.s\n"
"smax z1.s, p0/M, z1.s, z16.s\n"
"smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z19.s\n"
- "smin z14.s, p0/M, z14.s, z19.s\n"
- "trn1 z23.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z19.s\n"
- "smin z12.s, p0/M, z12.s, z19.s\n"
+ "smin z15.s, p0/M, z15.s, z18.s\n"
+ "smin z14.s, p0/M, z14.s, z18.s\n"
+ "smin z13.s, p0/M, z13.s, z18.s\n"
+ "smin z12.s, p0/M, z12.s, z18.s\n"
+ "smin z11.s, p0/M, z11.s, z18.s\n"
+ "smin z10.s, p0/M, z10.s, z18.s\n"
+ "smin z9.s, p0/M, z9.s, z18.s\n"
+ "smin z8.s, p0/M, z8.s, z18.s\n"
+ "smin z7.s, p0/M, z7.s, z18.s\n"
+ "smin z6.s, p0/M, z6.s, z18.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z5.s, p0/M, z5.s, z18.s\n"
+ "smin z4.s, p0/M, z4.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
- "smin z11.s, p0/M, z11.s, z19.s\n"
- "smin z10.s, p0/M, z10.s, z19.s\n"
+ "smin z3.s, p0/M, z3.s, z18.s\n"
+ "smin z2.s, p0/M, z2.s, z18.s\n"
"trn1 z22.h, z11.h, z10.h\n"
- "smin z9.s, p0/M, z9.s, z19.s\n"
- "smin z8.s, p0/M, z8.s, z19.s\n"
+ "smin z1.s, p0/M, z1.s, z18.s\n"
+ "smin z0.s, p0/M, z0.s, z18.s\n"
"trn1 z18.h, z9.h, z8.h\n"
- "smin z7.s, p0/M, z7.s, z19.s\n"
- "smin z6.s, p0/M, z6.s, z19.s\n"
"trn1 z21.h, z7.h, z6.h\n"
- "smin z5.s, p0/M, z5.s, z19.s\n"
- "smin z4.s, p0/M, z4.s, z19.s\n"
"trn1 z17.h, z5.h, z4.h\n"
- "smin z3.s, p0/M, z3.s, z19.s\n"
- "smin z2.s, p0/M, z2.s, z19.s\n"
- "trn1 z20.h, z3.h, z2.h\n"
- "smin z1.s, p0/M, z1.s, z19.s\n"
- "smin z0.s, p0/M, z0.s, z19.s\n"
- "trn1 z19.h, z1.h, z0.h\n"
- "trn1 z16.b, z23.b, z16.b\n"
+ "trn1 z20.b, z19.b, z16.b\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z16.h, z1.h, z0.h\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
- "trn1 z16.b, z20.b, z19.b\n"
+ "st1b { z20.b }, p4, [%x[outptr], x27]\n"
+ "incb x27, ALL, MUL #4\n"
+ "trn1 z16.b, z19.b, z16.b\n"
"st1b { z18.b }, p3, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
"st1b { z17.b }, p2, [%x[outptr], x25]\n"
@@ -384,13 +384,13 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x455e07f0 // saddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
- ".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
- ".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
"add x22, x22, #0x10\n"
"ld1b { z31.b }, p4/Z, [x21, x27]\n"
+ ".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
+ ".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
".inst 0x4590458c // saddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f1 // saddlb z17.h, z31.b, z30.b\n"
@@ -404,10 +404,10 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x21, x21, #0x1\n"
"ld1b { z16.b }, p4/Z, [x20, x27]\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
@@ -415,31 +415,31 @@ void sme_s8q_nhwc_avg_generic_depthfirst_impl(
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
"ld1rw { z16.s }, p0/Z, [%x[left_shift]]\n"
+ "mov z19.s, #0x7f\n"
+ "ld1rw { z18.s }, p0/Z, [%x[combined_rescale_value]]\n"
+ "ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- "ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x04b175ef // sqrdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqrdmulh z14.s, z14.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x04b175ad // sqrdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x04b1758c // sqrdmulh z12.s, z12.s, z17.s\n"
- "mov z18.s, #0x7f\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- "not z16.s, p0/M, z18.s\n"
+ "not z16.s, p0/M, z19.s\n"
+ ".inst 0x04b275ef // sqrdmulh z15.s, z15.s, z18.s\n"
+ ".inst 0x04b275ce // sqrdmulh z14.s, z14.s, z18.s\n"
+ ".inst 0x04b275ad // sqrdmulh z13.s, z13.s, z18.s\n"
+ ".inst 0x04b2758c // sqrdmulh z12.s, z12.s, z18.s\n"
+ ".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
+ ".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
+ ".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
+ ".inst 0x4482822c // srshl z12.s, p0/M, z12.s, z17.s\n"
"smax z15.s, p0/M, z15.s, z16.s\n"
"smax z14.s, p0/M, z14.s, z16.s\n"
"smax z13.s, p0/M, z13.s, z16.s\n"
"smax z12.s, p0/M, z12.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z18.s\n"
- "smin z14.s, p0/M, z14.s, z18.s\n"
+ "smin z15.s, p0/M, z15.s, z19.s\n"
+ "smin z14.s, p0/M, z14.s, z19.s\n"
+ "smin z13.s, p0/M, z13.s, z19.s\n"
+ "smin z12.s, p0/M, z12.s, z19.s\n"
"trn1 z17.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z18.s\n"
- "smin z12.s, p0/M, z12.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
"st1b { z16.b }, p4, [%x[outptr], x27]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
index e9b586f4ce..6895fd2011 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,21 +55,21 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z4.b, #0x80\n"
+ "mov z5.b, #0x80\n"
"mov z3.b, #0x80\n"
"mov x24, %x[inptrs]\n"
"mov z2.b, #0x80\n"
- "mov z1.b, #0x80\n"
+ "mov z4.b, #0x80\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
@@ -83,34 +83,34 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
- "smax z23.b, p0/M, z23.b, z30.b\n"
+ "movprfx z19, z1\n smax z19.b, p0/M, z19.b, z0.b\n"
+ "smax z23.b, p0/M, z23.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "smax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z18, z30\n smax z18.b, p0/M, z18.b, z29.b\n"
"smax z22.b, p0/M, z22.b, z28.b\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
"smax z17.b, p0/M, z17.b, z27.b\n"
"smax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"smax z16.b, p0/M, z16.b, z25.b\n"
"smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
"smax z19.b, p0/M, z19.b, z23.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"smax z18.b, p0/M, z18.b, z22.b\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
- "smax z4.b, p0/M, z4.b, z19.b\n"
- "smax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
- "smax z2.b, p0/M, z2.b, z17.b\n"
- "smax z1.b, p0/M, z1.b, z16.b\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
+ "smax z5.b, p0/M, z5.b, z19.b\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
+ "smax z3.b, p0/M, z3.b, z18.b\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
+ "smax z2.b, p0/M, z2.b, z17.b\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
+ "smax z4.b, p0/M, z4.b, z16.b\n"
"ld1b { z17.b }, p2/Z, [x23, x27]\n"
"ld1b { z27.b }, p2/Z, [x22, x27]\n"
"ld1b { z21.b }, p2/Z, [x21, x27]\n"
@@ -121,9 +121,9 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z0\n smax z19.b, p0/M, z19.b, z31.b\n"
- "smax z23.b, p0/M, z23.b, z30.b\n"
- "smax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z19, z1\n smax z19.b, p0/M, z19.b, z0.b\n"
+ "smax z23.b, p0/M, z23.b, z31.b\n"
+ "movprfx z18, z30\n smax z18.b, p0/M, z18.b, z29.b\n"
"smax z22.b, p0/M, z22.b, z28.b\n"
"smax z17.b, p0/M, z17.b, z27.b\n"
"smax z21.b, p0/M, z21.b, z26.b\n"
@@ -133,108 +133,108 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"smax z18.b, p0/M, z18.b, z22.b\n"
"smax z17.b, p0/M, z17.b, z21.b\n"
"smax z16.b, p0/M, z16.b, z20.b\n"
- "smax z4.b, p0/M, z4.b, z19.b\n"
+ "smax z5.b, p0/M, z5.b, z19.b\n"
"smax z3.b, p0/M, z3.b, z18.b\n"
"smax z2.b, p0/M, z2.b, z17.b\n"
- "smax z1.b, p0/M, z1.b, z16.b\n"
+ "smax z4.b, p0/M, z4.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
- "ld1b { z16.b }, p3/Z, [x20, x28]\n"
- "smax z3.b, p0/M, z3.b, z16.b\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "smax z2.b, p0/M, z2.b, z16.b\n"
+ "ld1b { z19.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x27]\n"
"ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "smax z1.b, p0/M, z1.b, z16.b\n"
+ "smax z5.b, p0/M, z5.b, z19.b\n"
+ "smax z3.b, p0/M, z3.b, z18.b\n"
+ "smax z2.b, p0/M, z2.b, z17.b\n"
+ "smax z4.b, p0/M, z4.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- ".inst 0x4508a097 // sshllb z23.h, z4.b, #0x0\n"
- ".inst 0x4508a496 // sshllt z22.h, z4.b, #0x0\n"
+ ".inst 0x4508a0b3 // sshllb z19.h, z5.b, #0x0\n"
+ ".inst 0x4508a4b8 // sshllt z24.h, z5.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
- ".inst 0x4508a075 // sshllb z21.h, z3.b, #0x0\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ ".inst 0x4508a076 // sshllb z22.h, z3.b, #0x0\n"
".inst 0x4508a472 // sshllt z18.h, z3.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
"ld1rw { z3.s }, p0/Z, [x20]\n"
- ".inst 0x4508a054 // sshllb z20.h, z2.b, #0x0\n"
- ".inst 0x4508a451 // sshllt z17.h, z2.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
- ".inst 0x4508a033 // sshllb z19.h, z1.b, #0x0\n"
- ".inst 0x4508a430 // sshllt z16.h, z1.b, #0x0\n"
- ".inst 0x4510a2e1 // sshllb z1.s, z23.h, #0x0\n"
- ".inst 0x4510a6f7 // sshllt z23.s, z23.h, #0x0\n"
- ".inst 0x4510a2c0 // sshllb z0.s, z22.h, #0x0\n"
- ".inst 0x4510a6df // sshllt z31.s, z22.h, #0x0\n"
- ".inst 0x4510a2be // sshllb z30.s, z21.h, #0x0\n"
- ".inst 0x4510a6b6 // sshllt z22.s, z21.h, #0x0\n"
+ ".inst 0x4508a055 // sshllb z21.h, z2.b, #0x0\n"
+ ".inst 0x4508a454 // sshllt z20.h, z2.b, #0x0\n"
+ "ld1rw { z2.s }, p0/Z, [x21]\n"
+ ".inst 0x4508a097 // sshllb z23.h, z4.b, #0x0\n"
+ ".inst 0x4508a491 // sshllt z17.h, z4.b, #0x0\n"
+ "ld1rw { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x4510a261 // sshllb z1.s, z19.h, #0x0\n"
+ ".inst 0x4510a673 // sshllt z19.s, z19.h, #0x0\n"
+ ".inst 0x4510a300 // sshllb z0.s, z24.h, #0x0\n"
+ ".inst 0x4510a71f // sshllt z31.s, z24.h, #0x0\n"
+ ".inst 0x4510a2de // sshllb z30.s, z22.h, #0x0\n"
+ ".inst 0x4510a6d6 // sshllt z22.s, z22.h, #0x0\n"
".inst 0x4510a25d // sshllb z29.s, z18.h, #0x0\n"
".inst 0x4510a652 // sshllt z18.s, z18.h, #0x0\n"
- ".inst 0x4510a29c // sshllb z28.s, z20.h, #0x0\n"
- ".inst 0x4510a695 // sshllt z21.s, z20.h, #0x0\n"
- ".inst 0x4510a23b // sshllb z27.s, z17.h, #0x0\n"
- ".inst 0x4510a631 // sshllt z17.s, z17.h, #0x0\n"
- ".inst 0x4510a27a // sshllb z26.s, z19.h, #0x0\n"
- ".inst 0x4510a674 // sshllt z20.s, z19.h, #0x0\n"
- ".inst 0x4510a219 // sshllb z25.s, z16.h, #0x0\n"
- ".inst 0x4510a618 // sshllt z24.s, z16.h, #0x0\n"
- ".inst 0x44828081 // srshl z1.s, p0/M, z1.s, z4.s\n"
- ".inst 0x44828097 // srshl z23.s, p0/M, z23.s, z4.s\n"
- ".inst 0x44828080 // srshl z0.s, p0/M, z0.s, z4.s\n"
- ".inst 0x4482809f // srshl z31.s, p0/M, z31.s, z4.s\n"
- ".inst 0x4482809e // srshl z30.s, p0/M, z30.s, z4.s\n"
- ".inst 0x44828096 // srshl z22.s, p0/M, z22.s, z4.s\n"
- ".inst 0x4482809d // srshl z29.s, p0/M, z29.s, z4.s\n"
- ".inst 0x44828092 // srshl z18.s, p0/M, z18.s, z4.s\n"
- ".inst 0x4482809c // srshl z28.s, p0/M, z28.s, z4.s\n"
- ".inst 0x44828095 // srshl z21.s, p0/M, z21.s, z4.s\n"
- ".inst 0x4482809b // srshl z27.s, p0/M, z27.s, z4.s\n"
- ".inst 0x44828091 // srshl z17.s, p0/M, z17.s, z4.s\n"
- ".inst 0x4482809a // srshl z26.s, p0/M, z26.s, z4.s\n"
- ".inst 0x44828094 // srshl z20.s, p0/M, z20.s, z4.s\n"
- ".inst 0x44828099 // srshl z25.s, p0/M, z25.s, z4.s\n"
- ".inst 0x44828098 // srshl z24.s, p0/M, z24.s, z4.s\n"
- ".inst 0x04a37421 // sqrdmulh z1.s, z1.s, z3.s\n"
- ".inst 0x04a376f7 // sqrdmulh z23.s, z23.s, z3.s\n"
- ".inst 0x04a37400 // sqrdmulh z0.s, z0.s, z3.s\n"
- ".inst 0x04a377ff // sqrdmulh z31.s, z31.s, z3.s\n"
- ".inst 0x04a377de // sqrdmulh z30.s, z30.s, z3.s\n"
- ".inst 0x04a376d6 // sqrdmulh z22.s, z22.s, z3.s\n"
- ".inst 0x04a377bd // sqrdmulh z29.s, z29.s, z3.s\n"
- ".inst 0x04a37652 // sqrdmulh z18.s, z18.s, z3.s\n"
- ".inst 0x04a3779c // sqrdmulh z28.s, z28.s, z3.s\n"
- ".inst 0x04a376b5 // sqrdmulh z21.s, z21.s, z3.s\n"
- ".inst 0x04a3777b // sqrdmulh z27.s, z27.s, z3.s\n"
- ".inst 0x04a37631 // sqrdmulh z17.s, z17.s, z3.s\n"
- ".inst 0x04a3775a // sqrdmulh z26.s, z26.s, z3.s\n"
- ".inst 0x04a37694 // sqrdmulh z20.s, z20.s, z3.s\n"
- ".inst 0x04a37739 // sqrdmulh z25.s, z25.s, z3.s\n"
- ".inst 0x04a37718 // sqrdmulh z24.s, z24.s, z3.s\n"
- "mov z19.s, #0x7f\n"
- ".inst 0x44828041 // srshl z1.s, p0/M, z1.s, z2.s\n"
- ".inst 0x44828057 // srshl z23.s, p0/M, z23.s, z2.s\n"
- ".inst 0x44828040 // srshl z0.s, p0/M, z0.s, z2.s\n"
- ".inst 0x4482805f // srshl z31.s, p0/M, z31.s, z2.s\n"
- ".inst 0x4482805e // srshl z30.s, p0/M, z30.s, z2.s\n"
- ".inst 0x44828056 // srshl z22.s, p0/M, z22.s, z2.s\n"
- ".inst 0x4482805d // srshl z29.s, p0/M, z29.s, z2.s\n"
- ".inst 0x44828052 // srshl z18.s, p0/M, z18.s, z2.s\n"
- ".inst 0x4482805c // srshl z28.s, p0/M, z28.s, z2.s\n"
- ".inst 0x44828055 // srshl z21.s, p0/M, z21.s, z2.s\n"
- ".inst 0x4482805b // srshl z27.s, p0/M, z27.s, z2.s\n"
- ".inst 0x44828051 // srshl z17.s, p0/M, z17.s, z2.s\n"
- ".inst 0x4482805a // srshl z26.s, p0/M, z26.s, z2.s\n"
- ".inst 0x44828054 // srshl z20.s, p0/M, z20.s, z2.s\n"
- ".inst 0x44828059 // srshl z25.s, p0/M, z25.s, z2.s\n"
- ".inst 0x44828058 // srshl z24.s, p0/M, z24.s, z2.s\n"
- "not z16.s, p0/M, z19.s\n"
+ ".inst 0x4510a2bc // sshllb z28.s, z21.h, #0x0\n"
+ ".inst 0x4510a6b5 // sshllt z21.s, z21.h, #0x0\n"
+ ".inst 0x4510a29b // sshllb z27.s, z20.h, #0x0\n"
+ ".inst 0x4510a694 // sshllt z20.s, z20.h, #0x0\n"
+ ".inst 0x4510a2fa // sshllb z26.s, z23.h, #0x0\n"
+ ".inst 0x4510a6f9 // sshllt z25.s, z23.h, #0x0\n"
+ ".inst 0x4510a238 // sshllb z24.s, z17.h, #0x0\n"
+ ".inst 0x4510a637 // sshllt z23.s, z17.h, #0x0\n"
+ ".inst 0x44828061 // srshl z1.s, p0/M, z1.s, z3.s\n"
+ ".inst 0x44828073 // srshl z19.s, p0/M, z19.s, z3.s\n"
+ ".inst 0x44828060 // srshl z0.s, p0/M, z0.s, z3.s\n"
+ ".inst 0x4482807f // srshl z31.s, p0/M, z31.s, z3.s\n"
+ ".inst 0x4482807e // srshl z30.s, p0/M, z30.s, z3.s\n"
+ ".inst 0x44828076 // srshl z22.s, p0/M, z22.s, z3.s\n"
+ ".inst 0x4482807d // srshl z29.s, p0/M, z29.s, z3.s\n"
+ ".inst 0x44828072 // srshl z18.s, p0/M, z18.s, z3.s\n"
+ ".inst 0x4482807c // srshl z28.s, p0/M, z28.s, z3.s\n"
+ ".inst 0x44828075 // srshl z21.s, p0/M, z21.s, z3.s\n"
+ ".inst 0x4482807b // srshl z27.s, p0/M, z27.s, z3.s\n"
+ ".inst 0x44828074 // srshl z20.s, p0/M, z20.s, z3.s\n"
+ ".inst 0x4482807a // srshl z26.s, p0/M, z26.s, z3.s\n"
+ ".inst 0x44828079 // srshl z25.s, p0/M, z25.s, z3.s\n"
+ ".inst 0x44828078 // srshl z24.s, p0/M, z24.s, z3.s\n"
+ ".inst 0x44828077 // srshl z23.s, p0/M, z23.s, z3.s\n"
+ ".inst 0x04a27421 // sqrdmulh z1.s, z1.s, z2.s\n"
+ ".inst 0x04a27673 // sqrdmulh z19.s, z19.s, z2.s\n"
+ ".inst 0x04a27400 // sqrdmulh z0.s, z0.s, z2.s\n"
+ ".inst 0x04a277ff // sqrdmulh z31.s, z31.s, z2.s\n"
+ ".inst 0x04a277de // sqrdmulh z30.s, z30.s, z2.s\n"
+ ".inst 0x04a276d6 // sqrdmulh z22.s, z22.s, z2.s\n"
+ ".inst 0x04a277bd // sqrdmulh z29.s, z29.s, z2.s\n"
+ ".inst 0x04a27652 // sqrdmulh z18.s, z18.s, z2.s\n"
+ ".inst 0x04a2779c // sqrdmulh z28.s, z28.s, z2.s\n"
+ ".inst 0x04a276b5 // sqrdmulh z21.s, z21.s, z2.s\n"
+ ".inst 0x04a2777b // sqrdmulh z27.s, z27.s, z2.s\n"
+ ".inst 0x04a27694 // sqrdmulh z20.s, z20.s, z2.s\n"
+ ".inst 0x04a2775a // sqrdmulh z26.s, z26.s, z2.s\n"
+ ".inst 0x04a27739 // sqrdmulh z25.s, z25.s, z2.s\n"
+ ".inst 0x04a27718 // sqrdmulh z24.s, z24.s, z2.s\n"
+ ".inst 0x04a276f7 // sqrdmulh z23.s, z23.s, z2.s\n"
+ "mov z17.s, #0x7f\n"
+ ".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
+ ".inst 0x44828213 // srshl z19.s, p0/M, z19.s, z16.s\n"
+ ".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
+ ".inst 0x4482821f // srshl z31.s, p0/M, z31.s, z16.s\n"
+ ".inst 0x4482821e // srshl z30.s, p0/M, z30.s, z16.s\n"
+ ".inst 0x44828216 // srshl z22.s, p0/M, z22.s, z16.s\n"
+ ".inst 0x4482821d // srshl z29.s, p0/M, z29.s, z16.s\n"
+ ".inst 0x44828212 // srshl z18.s, p0/M, z18.s, z16.s\n"
+ ".inst 0x4482821c // srshl z28.s, p0/M, z28.s, z16.s\n"
+ ".inst 0x44828215 // srshl z21.s, p0/M, z21.s, z16.s\n"
+ ".inst 0x4482821b // srshl z27.s, p0/M, z27.s, z16.s\n"
+ ".inst 0x44828214 // srshl z20.s, p0/M, z20.s, z16.s\n"
+ ".inst 0x4482821a // srshl z26.s, p0/M, z26.s, z16.s\n"
+ ".inst 0x44828219 // srshl z25.s, p0/M, z25.s, z16.s\n"
+ ".inst 0x44828218 // srshl z24.s, p0/M, z24.s, z16.s\n"
+ ".inst 0x44828217 // srshl z23.s, p0/M, z23.s, z16.s\n"
+ "not z16.s, p0/M, z17.s\n"
"smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z23.s, p0/M, z23.s, z16.s\n"
+ "smax z19.s, p0/M, z19.s, z16.s\n"
"smax z0.s, p0/M, z0.s, z16.s\n"
"smax z31.s, p0/M, z31.s, z16.s\n"
"smax z30.s, p0/M, z30.s, z16.s\n"
@@ -244,41 +244,41 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"smax z28.s, p0/M, z28.s, z16.s\n"
"smax z21.s, p0/M, z21.s, z16.s\n"
"smax z27.s, p0/M, z27.s, z16.s\n"
- "smax z17.s, p0/M, z17.s, z16.s\n"
- "smax z26.s, p0/M, z26.s, z16.s\n"
"smax z20.s, p0/M, z20.s, z16.s\n"
+ "smax z26.s, p0/M, z26.s, z16.s\n"
"smax z25.s, p0/M, z25.s, z16.s\n"
"smax z24.s, p0/M, z24.s, z16.s\n"
- "smin z1.s, p0/M, z1.s, z19.s\n"
- "smin z23.s, p0/M, z23.s, z19.s\n"
- "trn1 z23.h, z1.h, z23.h\n"
- "smin z0.s, p0/M, z0.s, z19.s\n"
- "smin z31.s, p0/M, z31.s, z19.s\n"
+ "smax z23.s, p0/M, z23.s, z16.s\n"
+ "smin z1.s, p0/M, z1.s, z17.s\n"
+ "smin z19.s, p0/M, z19.s, z17.s\n"
+ "smin z0.s, p0/M, z0.s, z17.s\n"
+ "smin z31.s, p0/M, z31.s, z17.s\n"
+ "smin z30.s, p0/M, z30.s, z17.s\n"
+ "smin z22.s, p0/M, z22.s, z17.s\n"
+ "smin z29.s, p0/M, z29.s, z17.s\n"
+ "smin z18.s, p0/M, z18.s, z17.s\n"
+ "smin z28.s, p0/M, z28.s, z17.s\n"
+ "smin z21.s, p0/M, z21.s, z17.s\n"
+ "trn1 z19.h, z1.h, z19.h\n"
+ "smin z27.s, p0/M, z27.s, z17.s\n"
+ "smin z20.s, p0/M, z20.s, z17.s\n"
"trn1 z16.h, z0.h, z31.h\n"
- "smin z30.s, p0/M, z30.s, z19.s\n"
- "smin z22.s, p0/M, z22.s, z19.s\n"
+ "smin z26.s, p0/M, z26.s, z17.s\n"
+ "smin z25.s, p0/M, z25.s, z17.s\n"
"trn1 z22.h, z30.h, z22.h\n"
- "smin z29.s, p0/M, z29.s, z19.s\n"
- "smin z18.s, p0/M, z18.s, z19.s\n"
+ "smin z24.s, p0/M, z24.s, z17.s\n"
+ "smin z23.s, p0/M, z23.s, z17.s\n"
"trn1 z18.h, z29.h, z18.h\n"
- "smin z28.s, p0/M, z28.s, z19.s\n"
- "smin z21.s, p0/M, z21.s, z19.s\n"
"trn1 z21.h, z28.h, z21.h\n"
- "smin z27.s, p0/M, z27.s, z19.s\n"
- "smin z17.s, p0/M, z17.s, z19.s\n"
- "trn1 z17.h, z27.h, z17.h\n"
- "smin z26.s, p0/M, z26.s, z19.s\n"
- "smin z20.s, p0/M, z20.s, z19.s\n"
- "trn1 z20.h, z26.h, z20.h\n"
- "smin z25.s, p0/M, z25.s, z19.s\n"
- "smin z24.s, p0/M, z24.s, z19.s\n"
- "trn1 z19.h, z25.h, z24.h\n"
- "trn1 z16.b, z23.b, z16.b\n"
+ "trn1 z17.h, z27.h, z20.h\n"
+ "trn1 z20.b, z19.b, z16.b\n"
+ "trn1 z19.h, z26.h, z25.h\n"
+ "trn1 z16.h, z24.h, z23.h\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "incb x9, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
- "trn1 z16.b, z20.b, z19.b\n"
+ "st1b { z20.b }, p4, [%x[outptr], x9]\n"
+ "incb x9, ALL, MUL #4\n"
+ "trn1 z16.b, z19.b, z16.b\n"
"st1b { z18.b }, p3, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
"st1b { z17.b }, p2, [%x[outptr], x27]\n"
@@ -292,83 +292,83 @@ void sme_s8q_nhwc_max_generic_depthfirst_impl(
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z4.b, #0x80\n"
+ "mov z5.b, #0x80\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x20, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z0\n smax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n smax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "smax z16.b, p0/M, z16.b, z17.b\n"
"ldp x21, x20, [x24, #0x10]\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "smax z16.b, p0/M, z16.b, z17.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "smax z5.b, p0/M, z5.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z0\n smax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n smax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n smax z17.b, p0/M, z17.b, z31.b\n"
"smax z16.b, p0/M, z16.b, z17.b\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
+ "smax z5.b, p0/M, z5.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z4.b, p0/M, z4.b, z16.b\n"
+ "ld1b { z16.b }, p4/Z, [x20, x9]\n"
+ "smax z5.b, p0/M, z5.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- ".inst 0x4508a091 // sshllb z17.h, z4.b, #0x0\n"
- ".inst 0x4508a490 // sshllt z16.h, z4.b, #0x0\n"
+ ".inst 0x4508a0b1 // sshllb z17.h, z5.b, #0x0\n"
+ ".inst 0x4508a4b0 // sshllt z16.h, z5.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z18.s }, p0/Z, [x20]\n"
- ".inst 0x4510a236 // sshllb z22.s, z17.h, #0x0\n"
- ".inst 0x4510a635 // sshllt z21.s, z17.h, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z17.s }, p0/Z, [x20]\n"
- ".inst 0x4510a214 // sshllb z20.s, z16.h, #0x0\n"
- ".inst 0x4510a613 // sshllt z19.s, z16.h, #0x0\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "ld1rw { z24.s }, p0/Z, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x44828256 // srshl z22.s, p0/M, z22.s, z18.s\n"
- ".inst 0x44828255 // srshl z21.s, p0/M, z21.s, z18.s\n"
- ".inst 0x44828254 // srshl z20.s, p0/M, z20.s, z18.s\n"
- ".inst 0x44828253 // srshl z19.s, p0/M, z19.s, z18.s\n"
- ".inst 0x04b176d6 // sqrdmulh z22.s, z22.s, z17.s\n"
- ".inst 0x04b176b5 // sqrdmulh z21.s, z21.s, z17.s\n"
- ".inst 0x04b17694 // sqrdmulh z20.s, z20.s, z17.s\n"
- ".inst 0x04b17673 // sqrdmulh z19.s, z19.s, z17.s\n"
- "mov z18.s, #0x7f\n"
- ".inst 0x44828216 // srshl z22.s, p0/M, z22.s, z16.s\n"
- ".inst 0x44828215 // srshl z21.s, p0/M, z21.s, z16.s\n"
- ".inst 0x44828214 // srshl z20.s, p0/M, z20.s, z16.s\n"
- ".inst 0x44828213 // srshl z19.s, p0/M, z19.s, z16.s\n"
- "not z16.s, p0/M, z18.s\n"
- "smax z22.s, p0/M, z22.s, z16.s\n"
- "smax z21.s, p0/M, z21.s, z16.s\n"
+ "mov z23.s, #0x7f\n"
+ "ld1rw { z22.s }, p0/Z, [x21]\n"
+ "ld1rw { z21.s }, p0/Z, [x20]\n"
+ ".inst 0x4510a234 // sshllb z20.s, z17.h, #0x0\n"
+ ".inst 0x4510a631 // sshllt z17.s, z17.h, #0x0\n"
+ ".inst 0x4510a213 // sshllb z19.s, z16.h, #0x0\n"
+ ".inst 0x4510a612 // sshllt z18.s, z16.h, #0x0\n"
+ "not z16.s, p0/M, z23.s\n"
+ ".inst 0x44828314 // srshl z20.s, p0/M, z20.s, z24.s\n"
+ ".inst 0x44828311 // srshl z17.s, p0/M, z17.s, z24.s\n"
+ ".inst 0x44828313 // srshl z19.s, p0/M, z19.s, z24.s\n"
+ ".inst 0x44828312 // srshl z18.s, p0/M, z18.s, z24.s\n"
+ ".inst 0x04b67694 // sqrdmulh z20.s, z20.s, z22.s\n"
+ ".inst 0x04b67631 // sqrdmulh z17.s, z17.s, z22.s\n"
+ ".inst 0x04b67673 // sqrdmulh z19.s, z19.s, z22.s\n"
+ ".inst 0x04b67652 // sqrdmulh z18.s, z18.s, z22.s\n"
+ ".inst 0x448282b4 // srshl z20.s, p0/M, z20.s, z21.s\n"
+ ".inst 0x448282b1 // srshl z17.s, p0/M, z17.s, z21.s\n"
+ ".inst 0x448282b3 // srshl z19.s, p0/M, z19.s, z21.s\n"
+ ".inst 0x448282b2 // srshl z18.s, p0/M, z18.s, z21.s\n"
"smax z20.s, p0/M, z20.s, z16.s\n"
+ "smax z17.s, p0/M, z17.s, z16.s\n"
"smax z19.s, p0/M, z19.s, z16.s\n"
- "smin z22.s, p0/M, z22.s, z18.s\n"
- "smin z21.s, p0/M, z21.s, z18.s\n"
- "trn1 z17.h, z22.h, z21.h\n"
- "smin z20.s, p0/M, z20.s, z18.s\n"
- "smin z19.s, p0/M, z19.s, z18.s\n"
- "trn1 z16.h, z20.h, z19.h\n"
+ "smax z18.s, p0/M, z18.s, z16.s\n"
+ "smin z20.s, p0/M, z20.s, z23.s\n"
+ "smin z17.s, p0/M, z17.s, z23.s\n"
+ "smin z19.s, p0/M, z19.s, z23.s\n"
+ "smin z18.s, p0/M, z18.s, z23.s\n"
+ "trn1 z17.h, z20.h, z17.h\n"
+ "trn1 z16.h, z19.h, z18.h\n"
"trn1 z16.b, z17.b, z16.b\n"
"st1b { z16.b }, p4, [%x[outptr], x9]\n"
"incb x9\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
index f0e7bbf5cc..0aa6fc8881 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -146,32 +146,32 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x26]\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
".inst 0x45944d08 // uaddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x25]\n"
".inst 0x459348e7 // uaddwb z7.s, z7.s, z19.h\n"
".inst 0x45934cc6 // uaddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x25]\n"
".inst 0x459248a5 // uaddwb z5.s, z5.s, z18.h\n"
".inst 0x45924c84 // uaddwt z4.s, z4.s, z18.h\n"
+ "ld1b { z25.b }, p1/Z, [x21, x24]\n"
".inst 0x45914863 // uaddwb z3.s, z3.s, z17.h\n"
".inst 0x45914c42 // uaddwt z2.s, z2.s, z17.h\n"
+ "ld1b { z24.b }, p1/Z, [x20, x24]\n"
".inst 0x45904821 // uaddwb z1.s, z1.s, z16.h\n"
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"bgt 2b\n"
@@ -205,17 +205,17 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508aa17 // ushllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508ae16 // ushllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- ".inst 0x4508aa15 // ushllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508ae14 // ushllt z20.h, z16.b, #0x0\n"
"subs x21, x21, #0x1\n"
- "ld1b { z16.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508aa13 // ushllb z19.h, z16.b, #0x0\n"
- ".inst 0x4508ae12 // ushllt z18.h, z16.b, #0x0\n"
+ "ld1b { z19.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x25]\n"
"ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ ".inst 0x4508aa77 // ushllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508ae76 // ushllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508aa55 // ushllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508ae54 // ushllt z20.h, z18.b, #0x0\n"
+ ".inst 0x4508aa33 // ushllb z19.h, z17.b, #0x0\n"
+ ".inst 0x4508ae32 // ushllt z18.h, z17.b, #0x0\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -236,24 +236,26 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
- ".inst 0x04b175ef // sqdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x04b1758c // sqdmulh z12.s, z12.s, z17.s\n"
+ "ld1rw { z19.s }, p0/Z, [%x[rescale_ptr]]\n"
+ "mov z18.s, #0x0\n"
+ "mov z17.s, #0xff\n"
"ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
- ".inst 0x04b1756b // sqdmulh z11.s, z11.s, z17.s\n"
- ".inst 0x04b1754a // sqdmulh z10.s, z10.s, z17.s\n"
- ".inst 0x04b17529 // sqdmulh z9.s, z9.s, z17.s\n"
- ".inst 0x04b17508 // sqdmulh z8.s, z8.s, z17.s\n"
- ".inst 0x04b174e7 // sqdmulh z7.s, z7.s, z17.s\n"
- ".inst 0x04b174c6 // sqdmulh z6.s, z6.s, z17.s\n"
- ".inst 0x04b174a5 // sqdmulh z5.s, z5.s, z17.s\n"
- ".inst 0x04b17484 // sqdmulh z4.s, z4.s, z17.s\n"
- ".inst 0x04b17463 // sqdmulh z3.s, z3.s, z17.s\n"
- ".inst 0x04b17442 // sqdmulh z2.s, z2.s, z17.s\n"
- ".inst 0x04b17421 // sqdmulh z1.s, z1.s, z17.s\n"
- ".inst 0x04b17400 // sqdmulh z0.s, z0.s, z17.s\n"
+ ".inst 0x04b375ef // sqdmulh z15.s, z15.s, z19.s\n"
+ ".inst 0x04b375ce // sqdmulh z14.s, z14.s, z19.s\n"
+ ".inst 0x04b375ad // sqdmulh z13.s, z13.s, z19.s\n"
+ ".inst 0x04b3758c // sqdmulh z12.s, z12.s, z19.s\n"
+ ".inst 0x04b3756b // sqdmulh z11.s, z11.s, z19.s\n"
+ ".inst 0x04b3754a // sqdmulh z10.s, z10.s, z19.s\n"
+ ".inst 0x04b37529 // sqdmulh z9.s, z9.s, z19.s\n"
+ ".inst 0x04b37508 // sqdmulh z8.s, z8.s, z19.s\n"
+ ".inst 0x04b374e7 // sqdmulh z7.s, z7.s, z19.s\n"
+ ".inst 0x04b374c6 // sqdmulh z6.s, z6.s, z19.s\n"
+ ".inst 0x04b374a5 // sqdmulh z5.s, z5.s, z19.s\n"
+ ".inst 0x04b37484 // sqdmulh z4.s, z4.s, z19.s\n"
+ ".inst 0x04b37463 // sqdmulh z3.s, z3.s, z19.s\n"
+ ".inst 0x04b37442 // sqdmulh z2.s, z2.s, z19.s\n"
+ ".inst 0x04b37421 // sqdmulh z1.s, z1.s, z19.s\n"
+ ".inst 0x04b37400 // sqdmulh z0.s, z0.s, z19.s\n"
".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
@@ -270,54 +272,52 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x44828202 // srshl z2.s, p0/M, z2.s, z16.s\n"
".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
- "mov z16.s, #0x0\n"
- "mov z19.s, #0xff\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smax z11.s, p0/M, z11.s, z16.s\n"
- "smax z10.s, p0/M, z10.s, z16.s\n"
- "smax z9.s, p0/M, z9.s, z16.s\n"
- "smax z8.s, p0/M, z8.s, z16.s\n"
- "smax z7.s, p0/M, z7.s, z16.s\n"
- "smax z6.s, p0/M, z6.s, z16.s\n"
- "smax z5.s, p0/M, z5.s, z16.s\n"
- "smax z4.s, p0/M, z4.s, z16.s\n"
- "smax z3.s, p0/M, z3.s, z16.s\n"
- "smax z2.s, p0/M, z2.s, z16.s\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z19.s\n"
- "smin z14.s, p0/M, z14.s, z19.s\n"
- "trn1 z23.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z19.s\n"
- "smin z12.s, p0/M, z12.s, z19.s\n"
+ "smax z15.s, p0/M, z15.s, z18.s\n"
+ "smax z14.s, p0/M, z14.s, z18.s\n"
+ "smax z13.s, p0/M, z13.s, z18.s\n"
+ "smax z12.s, p0/M, z12.s, z18.s\n"
+ "smax z11.s, p0/M, z11.s, z18.s\n"
+ "smax z10.s, p0/M, z10.s, z18.s\n"
+ "smax z9.s, p0/M, z9.s, z18.s\n"
+ "smax z8.s, p0/M, z8.s, z18.s\n"
+ "smax z7.s, p0/M, z7.s, z18.s\n"
+ "smax z6.s, p0/M, z6.s, z18.s\n"
+ "smax z5.s, p0/M, z5.s, z18.s\n"
+ "smax z4.s, p0/M, z4.s, z18.s\n"
+ "smax z3.s, p0/M, z3.s, z18.s\n"
+ "smax z2.s, p0/M, z2.s, z18.s\n"
+ "smax z1.s, p0/M, z1.s, z18.s\n"
+ "smax z0.s, p0/M, z0.s, z18.s\n"
+ "smin z15.s, p0/M, z15.s, z17.s\n"
+ "smin z14.s, p0/M, z14.s, z17.s\n"
+ "smin z13.s, p0/M, z13.s, z17.s\n"
+ "smin z12.s, p0/M, z12.s, z17.s\n"
+ "smin z11.s, p0/M, z11.s, z17.s\n"
+ "smin z10.s, p0/M, z10.s, z17.s\n"
+ "smin z9.s, p0/M, z9.s, z17.s\n"
+ "smin z8.s, p0/M, z8.s, z17.s\n"
+ "smin z7.s, p0/M, z7.s, z17.s\n"
+ "smin z6.s, p0/M, z6.s, z17.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z5.s, p0/M, z5.s, z17.s\n"
+ "smin z4.s, p0/M, z4.s, z17.s\n"
"trn1 z16.h, z13.h, z12.h\n"
- "smin z11.s, p0/M, z11.s, z19.s\n"
- "smin z10.s, p0/M, z10.s, z19.s\n"
+ "smin z3.s, p0/M, z3.s, z17.s\n"
+ "smin z2.s, p0/M, z2.s, z17.s\n"
"trn1 z22.h, z11.h, z10.h\n"
- "smin z9.s, p0/M, z9.s, z19.s\n"
- "smin z8.s, p0/M, z8.s, z19.s\n"
+ "smin z1.s, p0/M, z1.s, z17.s\n"
+ "smin z0.s, p0/M, z0.s, z17.s\n"
"trn1 z18.h, z9.h, z8.h\n"
- "smin z7.s, p0/M, z7.s, z19.s\n"
- "smin z6.s, p0/M, z6.s, z19.s\n"
"trn1 z21.h, z7.h, z6.h\n"
- "smin z5.s, p0/M, z5.s, z19.s\n"
- "smin z4.s, p0/M, z4.s, z19.s\n"
"trn1 z17.h, z5.h, z4.h\n"
- "smin z3.s, p0/M, z3.s, z19.s\n"
- "smin z2.s, p0/M, z2.s, z19.s\n"
- "trn1 z20.h, z3.h, z2.h\n"
- "smin z1.s, p0/M, z1.s, z19.s\n"
- "smin z0.s, p0/M, z0.s, z19.s\n"
- "trn1 z19.h, z1.h, z0.h\n"
- "trn1 z16.b, z23.b, z16.b\n"
+ "trn1 z20.b, z19.b, z16.b\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z16.h, z1.h, z0.h\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
- "trn1 z16.b, z20.b, z19.b\n"
+ "st1b { z20.b }, p4, [%x[outptr], x27]\n"
+ "incb x27, ALL, MUL #4\n"
+ "trn1 z16.b, z19.b, z16.b\n"
"st1b { z18.b }, p3, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
"st1b { z17.b }, p2, [%x[outptr], x25]\n"
@@ -348,13 +348,13 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x455e0ff0 // uaddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
- ".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
- ".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
"add x22, x22, #0x10\n"
"ld1b { z31.b }, p4/Z, [x21, x27]\n"
+ ".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
+ ".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf1 // uaddlb z17.h, z31.b, z30.b\n"
@@ -368,37 +368,37 @@ void sme_u8_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x21, x21, #0x1\n"
"ld1b { z16.b }, p4/Z, [x20, x27]\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1rw { z16.s }, p0/Z, [%x[rescale_ptr]]\n"
- ".inst 0x04b075ef // sqdmulh z15.s, z15.s, z16.s\n"
- ".inst 0x04b075ce // sqdmulh z14.s, z14.s, z16.s\n"
- ".inst 0x04b075ad // sqdmulh z13.s, z13.s, z16.s\n"
- ".inst 0x04b0758c // sqdmulh z12.s, z12.s, z16.s\n"
+ "ld1rw { z19.s }, p0/Z, [%x[rescale_ptr]]\n"
+ "mov z18.s, #0x0\n"
+ "mov z17.s, #0xff\n"
"ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
+ ".inst 0x04b375ef // sqdmulh z15.s, z15.s, z19.s\n"
+ ".inst 0x04b375ce // sqdmulh z14.s, z14.s, z19.s\n"
+ ".inst 0x04b375ad // sqdmulh z13.s, z13.s, z19.s\n"
+ ".inst 0x04b3758c // sqdmulh z12.s, z12.s, z19.s\n"
".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- "mov z17.s, #0x0\n"
- "mov z16.s, #0xff\n"
- "smax z15.s, p0/M, z15.s, z17.s\n"
- "smax z14.s, p0/M, z14.s, z17.s\n"
- "smax z13.s, p0/M, z13.s, z17.s\n"
- "smax z12.s, p0/M, z12.s, z17.s\n"
- "smin z15.s, p0/M, z15.s, z16.s\n"
- "smin z14.s, p0/M, z14.s, z16.s\n"
+ "smax z15.s, p0/M, z15.s, z18.s\n"
+ "smax z14.s, p0/M, z14.s, z18.s\n"
+ "smax z13.s, p0/M, z13.s, z18.s\n"
+ "smax z12.s, p0/M, z12.s, z18.s\n"
+ "smin z15.s, p0/M, z15.s, z17.s\n"
+ "smin z14.s, p0/M, z14.s, z17.s\n"
+ "smin z13.s, p0/M, z13.s, z17.s\n"
+ "smin z12.s, p0/M, z12.s, z17.s\n"
"trn1 z17.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z16.s\n"
- "smin z12.s, p0/M, z12.s, z16.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
"st1b { z16.b }, p4, [%x[outptr], x27]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 9088cbde89..393047c8bc 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,26 +66,26 @@ void sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"mov x15, #0x0\n"
- "ptrue p2.b\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
"mov x14, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "ptrue p2.b\n"
"ldr x13, [%x[args], %[offsetof_n_channels]]\n"
- "whilelt p0.b, x15, x13\n"
"ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
"ldp x28, x27, [x20, #0x0]\n"
- "ld1b { z30.b }, p0/Z, [x27, x15]\n"
+ "whilelt p0.b, x15, x13\n"
"ldp x26, x25, [x20, #0x10]\n"
- "ld1b { z29.b }, p0/Z, [x25, x15]\n"
"ldp x24, x23, [x20, #0x20]\n"
- "ld1b { z28.b }, p0/Z, [x24, x15]\n"
"ldp x22, x21, [x20, #0x30]\n"
- "ld1b { z27.b }, p0/Z, [x21, x15]\n"
+ "ld1b { z30.b }, p0/Z, [x27, x15]\n"
"ldr x20, [x20, #0x40]\n"
+ "ld1b { z29.b }, p0/Z, [x25, x15]\n"
+ "ld1b { z28.b }, p0/Z, [x24, x15]\n"
+ "ld1b { z27.b }, p0/Z, [x21, x15]\n"
"ld1b { z26.b }, p0/Z, [x28, x15]\n"
"ld1b { z25.b }, p0/Z, [x26, x15]\n"
"ld1b { z24.b }, p0/Z, [x23, x15]\n"
- "ld1b { z19.b }, p0/Z, [x22, x15]\n"
+ "ld1b { z20.b }, p0/Z, [x22, x15]\n"
"ld1b { z23.b }, p0/Z, [x20, x15]\n"
"incw x15\n"
"whilelt p1.b, x15, x13\n"
@@ -98,24 +98,24 @@ void sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z18, z29\n umax z18.b, p2/M, z18.b, z26.b\n"
"movprfx z17, z25\n umax z17.b, p2/M, z17.b, z24.b\n"
"ld1b { z28.b }, p1/Z, [x24, x15]\n"
- "movprfx z16, z29\n umax z16.b, p2/M, z16.b, z19.b\n"
+ "movprfx z16, z29\n umax z16.b, p2/M, z16.b, z20.b\n"
"movprfx z20, z24\n umax z20.b, p2/M, z20.b, z23.b\n"
"ld1b { z27.b }, p1/Z, [x21, x15]\n"
"ld1b { z29.b }, p1/Z, [x25, x15]\n"
+ "ld1b { z26.b }, p1/Z, [x28, x15]\n"
+ "ld1b { z25.b }, p1/Z, [x26, x15]\n"
"movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
"movprfx z18, z17\n umax z18.b, p2/M, z18.b, z22.b\n"
- "ld1b { z26.b }, p1/Z, [x28, x15]\n"
+ "ld1b { z24.b }, p1/Z, [x23, x15]\n"
"movprfx z17, z16\n umax z17.b, p2/M, z17.b, z21.b\n"
"movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
- "ld1b { z25.b }, p1/Z, [x26, x15]\n"
- "st1b { z19.b }, p0, [x12, x14]\n"
- "ld1b { z24.b }, p1/Z, [x23, x15]\n"
- "st1b { z18.b }, p0, [x11, x14]\n"
- "ld1b { z19.b }, p1/Z, [x22, x15]\n"
- "st1b { z17.b }, p0, [x10, x14]\n"
+ "ld1b { z20.b }, p1/Z, [x22, x15]\n"
"ld1b { z23.b }, p1/Z, [x20, x15]\n"
"incw x15\n"
"whilelt p1.b, x15, x13\n"
+ "st1b { z19.b }, p0, [x12, x14]\n"
+ "st1b { z18.b }, p0, [x11, x14]\n"
+ "st1b { z17.b }, p0, [x10, x14]\n"
"st1b { z16.b }, p0, [x9, x14]\n"
"incw x14\n"
"b.any 1b\n"
@@ -123,15 +123,15 @@ void sme_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"movprfx z22, z30\n umax z22.b, p2/M, z22.b, z28.b\n"
"movprfx z21, z28\n umax z21.b, p2/M, z21.b, z27.b\n"
"whilelt p0.b, x14, x13\n"
- "movprfx z20, z29\n umax z20.b, p2/M, z20.b, z26.b\n"
- "movprfx z18, z25\n umax z18.b, p2/M, z18.b, z24.b\n"
- "movprfx z17, z29\n umax z17.b, p2/M, z17.b, z19.b\n"
- "movprfx z19, z24\n umax z19.b, p2/M, z19.b, z23.b\n"
- "movprfx z16, z22\n umax z16.b, p2/M, z16.b, z20.b\n"
- "umax z18.b, p2/M, z18.b, z22.b\n"
- "st1b { z16.b }, p0, [x12, x14]\n"
- "umax z17.b, p2/M, z17.b, z21.b\n"
- "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z19.b\n"
+ "movprfx z18, z29\n umax z18.b, p2/M, z18.b, z26.b\n"
+ "movprfx z17, z25\n umax z17.b, p2/M, z17.b, z24.b\n"
+ "movprfx z16, z29\n umax z16.b, p2/M, z16.b, z20.b\n"
+ "movprfx z20, z24\n umax z20.b, p2/M, z20.b, z23.b\n"
+ "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
+ "movprfx z18, z17\n umax z18.b, p2/M, z18.b, z22.b\n"
+ "movprfx z17, z16\n umax z17.b, p2/M, z17.b, z21.b\n"
+ "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
+ "st1b { z19.b }, p0, [x12, x14]\n"
"st1b { z18.b }, p0, [x11, x14]\n"
"st1b { z17.b }, p0, [x10, x14]\n"
"st1b { z16.b }, p0, [x9, x14]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
index 06f13e8111..8755113b9a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,21 +53,21 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
+ "mov z5.b, #0x0\n"
"mov z4.b, #0x0\n"
- "mov z3.b, #0x0\n"
"mov x24, %x[inptrs]\n"
+ "mov z3.b, #0x0\n"
"mov z2.b, #0x0\n"
- "mov z1.b, #0x0\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
@@ -81,34 +81,34 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
- "umax z23.b, p0/M, z23.b, z30.b\n"
+ "movprfx z19, z1\n umax z19.b, p0/M, z19.b, z0.b\n"
+ "umax z23.b, p0/M, z23.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "umax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z18, z30\n umax z18.b, p0/M, z18.b, z29.b\n"
"umax z22.b, p0/M, z22.b, z28.b\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
"umax z17.b, p0/M, z17.b, z27.b\n"
"umax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"umax z16.b, p0/M, z16.b, z25.b\n"
"umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"umax z18.b, p0/M, z18.b, z22.b\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
"umax z17.b, p0/M, z17.b, z21.b\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
- "umax z4.b, p0/M, z4.b, z19.b\n"
- "umax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
- "umax z2.b, p0/M, z2.b, z17.b\n"
- "umax z1.b, p0/M, z1.b, z16.b\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
+ "umax z5.b, p0/M, z5.b, z19.b\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
+ "umax z4.b, p0/M, z4.b, z18.b\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
+ "umax z3.b, p0/M, z3.b, z17.b\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
+ "umax z2.b, p0/M, z2.b, z16.b\n"
"ld1b { z17.b }, p2/Z, [x23, x27]\n"
"ld1b { z27.b }, p2/Z, [x22, x27]\n"
"ld1b { z21.b }, p2/Z, [x21, x27]\n"
@@ -119,9 +119,9 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
- "umax z23.b, p0/M, z23.b, z30.b\n"
- "umax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z19, z1\n umax z19.b, p0/M, z19.b, z0.b\n"
+ "umax z23.b, p0/M, z23.b, z31.b\n"
+ "movprfx z18, z30\n umax z18.b, p0/M, z18.b, z29.b\n"
"umax z22.b, p0/M, z22.b, z28.b\n"
"umax z17.b, p0/M, z17.b, z27.b\n"
"umax z21.b, p0/M, z21.b, z26.b\n"
@@ -131,33 +131,33 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
"umax z18.b, p0/M, z18.b, z22.b\n"
"umax z17.b, p0/M, z17.b, z21.b\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
- "umax z4.b, p0/M, z4.b, z19.b\n"
- "umax z3.b, p0/M, z3.b, z18.b\n"
- "umax z2.b, p0/M, z2.b, z17.b\n"
- "umax z1.b, p0/M, z1.b, z16.b\n"
+ "umax z5.b, p0/M, z5.b, z19.b\n"
+ "umax z4.b, p0/M, z4.b, z18.b\n"
+ "umax z3.b, p0/M, z3.b, z17.b\n"
+ "umax z2.b, p0/M, z2.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "umax z4.b, p0/M, z4.b, z16.b\n"
- "ld1b { z16.b }, p3/Z, [x20, x28]\n"
- "umax z3.b, p0/M, z3.b, z16.b\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "umax z2.b, p0/M, z2.b, z16.b\n"
+ "ld1b { z19.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x27]\n"
"ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "umax z1.b, p0/M, z1.b, z16.b\n"
+ "umax z5.b, p0/M, z5.b, z19.b\n"
+ "umax z4.b, p0/M, z4.b, z18.b\n"
+ "umax z3.b, p0/M, z3.b, z17.b\n"
+ "umax z2.b, p0/M, z2.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z5.b }, p4, [%x[outptr], x9]\n"
"incb x9, ALL, MUL #4\n"
- "st1b { z3.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z4.b }, p3, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z2.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z3.b }, p2, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z1.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z2.b }, p1, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
"whilelt p1.b, x26, %x[n_channels]\n"
"b.any 1b\n"
@@ -166,48 +166,48 @@ void sme_u8_nhwc_max_generic_depthfirst_impl(
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z4.b, #0x0\n"
+ "mov z5.b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x20, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z0\n umax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n umax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "umax z16.b, p0/M, z16.b, z17.b\n"
"ldp x21, x20, [x24, #0x10]\n"
- "umax z4.b, p0/M, z4.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "umax z16.b, p0/M, z16.b, z17.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "umax z5.b, p0/M, z5.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z0\n umax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n umax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z31.b\n"
"umax z16.b, p0/M, z16.b, z17.b\n"
- "umax z4.b, p0/M, z4.b, z16.b\n"
+ "umax z5.b, p0/M, z5.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "umax z4.b, p0/M, z4.b, z16.b\n"
+ "ld1b { z16.b }, p4/Z, [x20, x9]\n"
+ "umax z5.b, p0/M, z5.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z4.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z5.b }, p4, [%x[outptr], x9]\n"
"incb x9\n"
"whilelt p4.b, x9, %x[n_channels]\n"
"b.any 8b\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index 52c52ccdb9..d08863105b 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -133,11 +133,11 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"1:" // 4-vectors of channels
"ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
"lsr x23, %x[n_valid_cells], #0x1\n"
+ "mov x22, %x[inptrs]\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
"mov z11.d, z15.d\n"
- "mov x22, %x[inptrs]\n"
"mov z10.d, z15.d\n"
"mov z9.d, z15.d\n"
"mov z8.d, z15.d\n"
@@ -170,32 +170,32 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
+ "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
+ "ld1b { z28.b }, p3/Z, [x20, x26]\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
".inst 0x45944d08 // uaddwt z8.s, z8.s, z20.h\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z27.b }, p2/Z, [x21, x25]\n"
".inst 0x459348e7 // uaddwb z7.s, z7.s, z19.h\n"
".inst 0x45934cc6 // uaddwt z6.s, z6.s, z19.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z26.b }, p2/Z, [x20, x25]\n"
".inst 0x459248a5 // uaddwb z5.s, z5.s, z18.h\n"
".inst 0x45924c84 // uaddwt z4.s, z4.s, z18.h\n"
+ "ld1b { z25.b }, p1/Z, [x21, x24]\n"
".inst 0x45914863 // uaddwb z3.s, z3.s, z17.h\n"
".inst 0x45914c42 // uaddwt z2.s, z2.s, z17.h\n"
+ "ld1b { z24.b }, p1/Z, [x20, x24]\n"
".inst 0x45904821 // uaddwb z1.s, z1.s, z16.h\n"
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"bgt 2b\n"
@@ -229,17 +229,17 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508aa17 // ushllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508ae16 // ushllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- ".inst 0x4508aa15 // ushllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508ae14 // ushllt z20.h, z16.b, #0x0\n"
"subs x21, x21, #0x1\n"
- "ld1b { z16.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508aa13 // ushllb z19.h, z16.b, #0x0\n"
- ".inst 0x4508ae12 // ushllt z18.h, z16.b, #0x0\n"
+ "ld1b { z19.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x25]\n"
"ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ ".inst 0x4508aa77 // ushllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508ae76 // ushllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508aa55 // ushllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508ae54 // ushllt z20.h, z18.b, #0x0\n"
+ ".inst 0x4508aa33 // ushllb z19.h, z17.b, #0x0\n"
+ ".inst 0x4508ae32 // ushllt z18.h, z17.b, #0x0\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -260,27 +260,29 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z19.s }, p0/Z, [%x[left_shift]]\n"
- ".inst 0x4482826f // srshl z15.s, p0/M, z15.s, z19.s\n"
- ".inst 0x4482826e // srshl z14.s, p0/M, z14.s, z19.s\n"
+ "ld1rw { z21.s }, p0/Z, [%x[left_shift]]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- ".inst 0x4482826d // srshl z13.s, p0/M, z13.s, z19.s\n"
- ".inst 0x4482826c // srshl z12.s, p0/M, z12.s, z19.s\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0xff\n"
"ld1rw { z18.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x4482826b // srshl z11.s, p0/M, z11.s, z19.s\n"
- ".inst 0x4482826a // srshl z10.s, p0/M, z10.s, z19.s\n"
"ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x44828269 // srshl z9.s, p0/M, z9.s, z19.s\n"
- ".inst 0x44828268 // srshl z8.s, p0/M, z8.s, z19.s\n"
+ ".inst 0x448282af // srshl z15.s, p0/M, z15.s, z21.s\n"
+ ".inst 0x448282ae // srshl z14.s, p0/M, z14.s, z21.s\n"
"ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x44828267 // srshl z7.s, p0/M, z7.s, z19.s\n"
- ".inst 0x44828266 // srshl z6.s, p0/M, z6.s, z19.s\n"
- ".inst 0x44828265 // srshl z5.s, p0/M, z5.s, z19.s\n"
- ".inst 0x44828264 // srshl z4.s, p0/M, z4.s, z19.s\n"
- ".inst 0x44828263 // srshl z3.s, p0/M, z3.s, z19.s\n"
- ".inst 0x44828262 // srshl z2.s, p0/M, z2.s, z19.s\n"
- ".inst 0x44828261 // srshl z1.s, p0/M, z1.s, z19.s\n"
- ".inst 0x44828260 // srshl z0.s, p0/M, z0.s, z19.s\n"
+ ".inst 0x448282ad // srshl z13.s, p0/M, z13.s, z21.s\n"
+ ".inst 0x448282ac // srshl z12.s, p0/M, z12.s, z21.s\n"
+ ".inst 0x448282ab // srshl z11.s, p0/M, z11.s, z21.s\n"
+ ".inst 0x448282aa // srshl z10.s, p0/M, z10.s, z21.s\n"
+ ".inst 0x448282a9 // srshl z9.s, p0/M, z9.s, z21.s\n"
+ ".inst 0x448282a8 // srshl z8.s, p0/M, z8.s, z21.s\n"
+ ".inst 0x448282a7 // srshl z7.s, p0/M, z7.s, z21.s\n"
+ ".inst 0x448282a6 // srshl z6.s, p0/M, z6.s, z21.s\n"
+ ".inst 0x448282a5 // srshl z5.s, p0/M, z5.s, z21.s\n"
+ ".inst 0x448282a4 // srshl z4.s, p0/M, z4.s, z21.s\n"
+ ".inst 0x448282a3 // srshl z3.s, p0/M, z3.s, z21.s\n"
+ ".inst 0x448282a2 // srshl z2.s, p0/M, z2.s, z21.s\n"
+ ".inst 0x448282a1 // srshl z1.s, p0/M, z1.s, z21.s\n"
+ ".inst 0x448282a0 // srshl z0.s, p0/M, z0.s, z21.s\n"
".inst 0x04b275ef // sqrdmulh z15.s, z15.s, z18.s\n"
".inst 0x04b275ce // sqrdmulh z14.s, z14.s, z18.s\n"
".inst 0x04b275ad // sqrdmulh z13.s, z13.s, z18.s\n"
@@ -329,54 +331,52 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"add z2.s, z2.s, z16.s\n"
"add z1.s, z1.s, z16.s\n"
"add z0.s, z0.s, z16.s\n"
- "mov z16.s, #0x0\n"
- "mov z19.s, #0xff\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smax z11.s, p0/M, z11.s, z16.s\n"
- "smax z10.s, p0/M, z10.s, z16.s\n"
- "smax z9.s, p0/M, z9.s, z16.s\n"
- "smax z8.s, p0/M, z8.s, z16.s\n"
- "smax z7.s, p0/M, z7.s, z16.s\n"
- "smax z6.s, p0/M, z6.s, z16.s\n"
- "smax z5.s, p0/M, z5.s, z16.s\n"
- "smax z4.s, p0/M, z4.s, z16.s\n"
- "smax z3.s, p0/M, z3.s, z16.s\n"
- "smax z2.s, p0/M, z2.s, z16.s\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z19.s\n"
- "smin z14.s, p0/M, z14.s, z19.s\n"
- "trn1 z23.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z19.s\n"
- "smin z12.s, p0/M, z12.s, z19.s\n"
+ "smax z15.s, p0/M, z15.s, z19.s\n"
+ "smax z14.s, p0/M, z14.s, z19.s\n"
+ "smax z13.s, p0/M, z13.s, z19.s\n"
+ "smax z12.s, p0/M, z12.s, z19.s\n"
+ "smax z11.s, p0/M, z11.s, z19.s\n"
+ "smax z10.s, p0/M, z10.s, z19.s\n"
+ "smax z9.s, p0/M, z9.s, z19.s\n"
+ "smax z8.s, p0/M, z8.s, z19.s\n"
+ "smax z7.s, p0/M, z7.s, z19.s\n"
+ "smax z6.s, p0/M, z6.s, z19.s\n"
+ "smax z5.s, p0/M, z5.s, z19.s\n"
+ "smax z4.s, p0/M, z4.s, z19.s\n"
+ "smax z3.s, p0/M, z3.s, z19.s\n"
+ "smax z2.s, p0/M, z2.s, z19.s\n"
+ "smax z1.s, p0/M, z1.s, z19.s\n"
+ "smax z0.s, p0/M, z0.s, z19.s\n"
+ "smin z15.s, p0/M, z15.s, z20.s\n"
+ "smin z14.s, p0/M, z14.s, z20.s\n"
+ "smin z13.s, p0/M, z13.s, z20.s\n"
+ "smin z12.s, p0/M, z12.s, z20.s\n"
+ "smin z11.s, p0/M, z11.s, z20.s\n"
+ "smin z10.s, p0/M, z10.s, z20.s\n"
+ "smin z9.s, p0/M, z9.s, z20.s\n"
+ "smin z8.s, p0/M, z8.s, z20.s\n"
+ "smin z7.s, p0/M, z7.s, z20.s\n"
+ "smin z6.s, p0/M, z6.s, z20.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z5.s, p0/M, z5.s, z20.s\n"
+ "smin z4.s, p0/M, z4.s, z20.s\n"
"trn1 z16.h, z13.h, z12.h\n"
- "smin z11.s, p0/M, z11.s, z19.s\n"
- "smin z10.s, p0/M, z10.s, z19.s\n"
+ "smin z3.s, p0/M, z3.s, z20.s\n"
+ "smin z2.s, p0/M, z2.s, z20.s\n"
"trn1 z22.h, z11.h, z10.h\n"
- "smin z9.s, p0/M, z9.s, z19.s\n"
- "smin z8.s, p0/M, z8.s, z19.s\n"
+ "smin z1.s, p0/M, z1.s, z20.s\n"
+ "smin z0.s, p0/M, z0.s, z20.s\n"
"trn1 z18.h, z9.h, z8.h\n"
- "smin z7.s, p0/M, z7.s, z19.s\n"
- "smin z6.s, p0/M, z6.s, z19.s\n"
"trn1 z21.h, z7.h, z6.h\n"
- "smin z5.s, p0/M, z5.s, z19.s\n"
- "smin z4.s, p0/M, z4.s, z19.s\n"
"trn1 z17.h, z5.h, z4.h\n"
- "smin z3.s, p0/M, z3.s, z19.s\n"
- "smin z2.s, p0/M, z2.s, z19.s\n"
- "trn1 z20.h, z3.h, z2.h\n"
- "smin z1.s, p0/M, z1.s, z19.s\n"
- "smin z0.s, p0/M, z0.s, z19.s\n"
- "trn1 z19.h, z1.h, z0.h\n"
- "trn1 z16.b, z23.b, z16.b\n"
+ "trn1 z20.b, z19.b, z16.b\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z16.h, z1.h, z0.h\n"
"trn1 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "incb x27, ALL, MUL #4\n"
"trn1 z17.b, z21.b, z17.b\n"
- "trn1 z16.b, z20.b, z19.b\n"
+ "st1b { z20.b }, p4, [%x[outptr], x27]\n"
+ "incb x27, ALL, MUL #4\n"
+ "trn1 z16.b, z19.b, z16.b\n"
"st1b { z18.b }, p3, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
"st1b { z17.b }, p2, [%x[outptr], x25]\n"
@@ -391,10 +391,10 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"8:" // Single vector of channels: Loop
"ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
"lsr x23, %x[n_valid_cells], #0x1\n"
+ "mov x22, %x[inptrs]\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
- "mov x22, %x[inptrs]\n"
"cbz x23, 11f\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
@@ -407,13 +407,13 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x455e0ff0 // uaddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
- ".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
- ".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
"add x22, x22, #0x10\n"
"ld1b { z31.b }, p4/Z, [x21, x27]\n"
+ ".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
+ ".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
+ "ld1b { z30.b }, p4/Z, [x20, x27]\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf1 // uaddlb z17.h, z31.b, z30.b\n"
@@ -427,29 +427,31 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
+ "subs x21, x21, #0x1\n"
"ld1b { z16.b }, p4/Z, [x20, x27]\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1rw { z16.s }, p0/Z, [%x[left_shift]]\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
+ "ld1rw { z21.s }, p0/Z, [%x[left_shift]]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- "ld1rw { z16.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x04b075ef // sqrdmulh z15.s, z15.s, z16.s\n"
- ".inst 0x04b075ce // sqrdmulh z14.s, z14.s, z16.s\n"
+ "mov z20.s, #0x0\n"
+ "mov z19.s, #0xff\n"
+ "ld1rw { z18.s }, p0/Z, [%x[combined_rescale_value]]\n"
"ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x04b075ad // sqrdmulh z13.s, z13.s, z16.s\n"
- ".inst 0x04b0758c // sqrdmulh z12.s, z12.s, z16.s\n"
+ ".inst 0x448282af // srshl z15.s, p0/M, z15.s, z21.s\n"
+ ".inst 0x448282ae // srshl z14.s, p0/M, z14.s, z21.s\n"
"ld1rw { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x448282ad // srshl z13.s, p0/M, z13.s, z21.s\n"
+ ".inst 0x448282ac // srshl z12.s, p0/M, z12.s, z21.s\n"
+ ".inst 0x04b275ef // sqrdmulh z15.s, z15.s, z18.s\n"
+ ".inst 0x04b275ce // sqrdmulh z14.s, z14.s, z18.s\n"
+ ".inst 0x04b275ad // sqrdmulh z13.s, z13.s, z18.s\n"
+ ".inst 0x04b2758c // sqrdmulh z12.s, z12.s, z18.s\n"
".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
@@ -458,17 +460,15 @@ void sme_u8q_nhwc_avg_generic_depthfirst_impl(
"add z14.s, z14.s, z16.s\n"
"add z13.s, z13.s, z16.s\n"
"add z12.s, z12.s, z16.s\n"
- "mov z17.s, #0x0\n"
- "mov z16.s, #0xff\n"
- "smax z15.s, p0/M, z15.s, z17.s\n"
- "smax z14.s, p0/M, z14.s, z17.s\n"
- "smax z13.s, p0/M, z13.s, z17.s\n"
- "smax z12.s, p0/M, z12.s, z17.s\n"
- "smin z15.s, p0/M, z15.s, z16.s\n"
- "smin z14.s, p0/M, z14.s, z16.s\n"
+ "smax z15.s, p0/M, z15.s, z20.s\n"
+ "smax z14.s, p0/M, z14.s, z20.s\n"
+ "smax z13.s, p0/M, z13.s, z20.s\n"
+ "smax z12.s, p0/M, z12.s, z20.s\n"
+ "smin z15.s, p0/M, z15.s, z19.s\n"
+ "smin z14.s, p0/M, z14.s, z19.s\n"
+ "smin z13.s, p0/M, z13.s, z19.s\n"
+ "smin z12.s, p0/M, z12.s, z19.s\n"
"trn1 z17.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z16.s\n"
- "smin z12.s, p0/M, z12.s, z16.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
"st1b { z16.b }, p4, [%x[outptr], x27]\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
index c8e8e7d399..5632c96834 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sme_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,20 +56,20 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
"mov z5.b, #0x0\n"
- "mov z3.b, #0x0\n"
+ "mov z4.b, #0x0\n"
"mov x24, %x[inptrs]\n"
+ "mov z3.b, #0x0\n"
"mov z2.b, #0x0\n"
- "mov z1.b, #0x0\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
@@ -83,34 +83,34 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
- "umax z23.b, p0/M, z23.b, z30.b\n"
+ "movprfx z19, z1\n umax z19.b, p0/M, z19.b, z0.b\n"
+ "umax z23.b, p0/M, z23.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "umax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z18, z30\n umax z18.b, p0/M, z18.b, z29.b\n"
"umax z22.b, p0/M, z22.b, z28.b\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
"umax z17.b, p0/M, z17.b, z27.b\n"
"umax z21.b, p0/M, z21.b, z26.b\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
"umax z16.b, p0/M, z16.b, z25.b\n"
"umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
"umax z19.b, p0/M, z19.b, z23.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"umax z18.b, p0/M, z18.b, z22.b\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
"umax z17.b, p0/M, z17.b, z21.b\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z30.b }, p3/Z, [x23, x28]\n"
"umax z5.b, p0/M, z5.b, z19.b\n"
- "umax z3.b, p0/M, z3.b, z18.b\n"
- "ld1b { z18.b }, p3/Z, [x23, x28]\n"
- "umax z2.b, p0/M, z2.b, z17.b\n"
- "umax z1.b, p0/M, z1.b, z16.b\n"
"ld1b { z29.b }, p3/Z, [x22, x28]\n"
+ "umax z4.b, p0/M, z4.b, z18.b\n"
"ld1b { z22.b }, p3/Z, [x21, x28]\n"
+ "umax z3.b, p0/M, z3.b, z17.b\n"
"ld1b { z28.b }, p3/Z, [x20, x28]\n"
+ "umax z2.b, p0/M, z2.b, z16.b\n"
"ld1b { z17.b }, p2/Z, [x23, x27]\n"
"ld1b { z27.b }, p2/Z, [x22, x27]\n"
"ld1b { z21.b }, p2/Z, [x21, x27]\n"
@@ -121,9 +121,9 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"ld1b { z24.b }, p1/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z0\n umax z19.b, p0/M, z19.b, z31.b\n"
- "umax z23.b, p0/M, z23.b, z30.b\n"
- "umax z18.b, p0/M, z18.b, z29.b\n"
+ "movprfx z19, z1\n umax z19.b, p0/M, z19.b, z0.b\n"
+ "umax z23.b, p0/M, z23.b, z31.b\n"
+ "movprfx z18, z30\n umax z18.b, p0/M, z18.b, z29.b\n"
"umax z22.b, p0/M, z22.b, z28.b\n"
"umax z17.b, p0/M, z17.b, z27.b\n"
"umax z21.b, p0/M, z21.b, z26.b\n"
@@ -134,172 +134,172 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"umax z17.b, p0/M, z17.b, z21.b\n"
"umax z16.b, p0/M, z16.b, z20.b\n"
"umax z5.b, p0/M, z5.b, z19.b\n"
- "umax z3.b, p0/M, z3.b, z18.b\n"
- "umax z2.b, p0/M, z2.b, z17.b\n"
- "umax z1.b, p0/M, z1.b, z16.b\n"
+ "umax z4.b, p0/M, z4.b, z18.b\n"
+ "umax z3.b, p0/M, z3.b, z17.b\n"
+ "umax z2.b, p0/M, z2.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
- "ld1b { z16.b }, p3/Z, [x20, x28]\n"
- "umax z3.b, p0/M, z3.b, z16.b\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "umax z2.b, p0/M, z2.b, z16.b\n"
+ "ld1b { z19.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p3/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p2/Z, [x20, x27]\n"
"ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "umax z1.b, p0/M, z1.b, z16.b\n"
+ "umax z5.b, p0/M, z5.b, z19.b\n"
+ "umax z4.b, p0/M, z4.b, z18.b\n"
+ "umax z3.b, p0/M, z3.b, z17.b\n"
+ "umax z2.b, p0/M, z2.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
- ".inst 0x4508a8b7 // ushllb z23.h, z5.b, #0x0\n"
- ".inst 0x4508acb9 // ushllt z25.h, z5.b, #0x0\n"
- ".inst 0x4508a876 // ushllb z22.h, z3.b, #0x0\n"
- ".inst 0x4508ac72 // ushllt z18.h, z3.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
- ".inst 0x4508a855 // ushllb z21.h, z2.b, #0x0\n"
- ".inst 0x4508ac51 // ushllt z17.h, z2.b, #0x0\n"
+ ".inst 0x4508a8b3 // ushllb z19.h, z5.b, #0x0\n"
+ ".inst 0x4508acb0 // ushllt z16.h, z5.b, #0x0\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
+ "ld1rw { z6.s }, p0/Z, [x20]\n"
+ ".inst 0x4508a895 // ushllb z21.h, z4.b, #0x0\n"
+ ".inst 0x4508ac92 // ushllt z18.h, z4.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
- ".inst 0x4508a834 // ushllb z20.h, z1.b, #0x0\n"
- ".inst 0x4508ac38 // ushllt z24.h, z1.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z19.s }, p0/Z, [x20]\n"
- "neg z4.s, p0/M, z4.s\n"
- ".inst 0x45974081 // saddwb z1.s, z4.s, z23.h\n"
+ ".inst 0x4508a874 // ushllb z20.h, z3.b, #0x0\n"
+ ".inst 0x4508ac71 // ushllt z17.h, z3.b, #0x0\n"
+ "ld1rw { z5.s }, p0/Z, [x21]\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ ".inst 0x4508a858 // ushllb z24.h, z2.b, #0x0\n"
+ ".inst 0x4508ac57 // ushllt z23.h, z2.b, #0x0\n"
+ "ld1rw { z4.s }, p0/Z, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x45974497 // saddwt z23.s, z4.s, z23.h\n"
- ".inst 0x45994080 // saddwb z0.s, z4.s, z25.h\n"
- ".inst 0x4599449f // saddwt z31.s, z4.s, z25.h\n"
- ".inst 0x4596409e // saddwb z30.s, z4.s, z22.h\n"
- ".inst 0x45964496 // saddwt z22.s, z4.s, z22.h\n"
- ".inst 0x4592409d // saddwb z29.s, z4.s, z18.h\n"
- ".inst 0x45924492 // saddwt z18.s, z4.s, z18.h\n"
- ".inst 0x4595409c // saddwb z28.s, z4.s, z21.h\n"
- ".inst 0x45954495 // saddwt z21.s, z4.s, z21.h\n"
- ".inst 0x4591409b // saddwb z27.s, z4.s, z17.h\n"
- ".inst 0x45914491 // saddwt z17.s, z4.s, z17.h\n"
- ".inst 0x4594409a // saddwb z26.s, z4.s, z20.h\n"
- ".inst 0x45944494 // saddwt z20.s, z4.s, z20.h\n"
- ".inst 0x45984099 // saddwb z25.s, z4.s, z24.h\n"
- ".inst 0x45984498 // saddwt z24.s, z4.s, z24.h\n"
- ".inst 0x44828061 // srshl z1.s, p0/M, z1.s, z3.s\n"
- ".inst 0x44828077 // srshl z23.s, p0/M, z23.s, z3.s\n"
- ".inst 0x44828060 // srshl z0.s, p0/M, z0.s, z3.s\n"
+ "neg z6.s, p0/M, z6.s\n"
+ "ld1rw { z3.s }, p0/Z, [x21]\n"
+ "mov z2.s, #0x0\n"
+ "ld1rw { z1.s }, p0/Z, [x20]\n"
+ "mov z0.s, #0xff\n"
+ ".inst 0x459340df // saddwb z31.s, z6.s, z19.h\n"
+ ".inst 0x459344d3 // saddwt z19.s, z6.s, z19.h\n"
+ ".inst 0x459040de // saddwb z30.s, z6.s, z16.h\n"
+ ".inst 0x459044d0 // saddwt z16.s, z6.s, z16.h\n"
+ ".inst 0x459540dd // saddwb z29.s, z6.s, z21.h\n"
+ ".inst 0x459544d6 // saddwt z22.s, z6.s, z21.h\n"
+ ".inst 0x459240dc // saddwb z28.s, z6.s, z18.h\n"
+ ".inst 0x459244d2 // saddwt z18.s, z6.s, z18.h\n"
+ ".inst 0x459440db // saddwb z27.s, z6.s, z20.h\n"
+ ".inst 0x459444d5 // saddwt z21.s, z6.s, z20.h\n"
+ ".inst 0x459140d4 // saddwb z20.s, z6.s, z17.h\n"
+ ".inst 0x459144d1 // saddwt z17.s, z6.s, z17.h\n"
+ ".inst 0x459840da // saddwb z26.s, z6.s, z24.h\n"
+ ".inst 0x459844d9 // saddwt z25.s, z6.s, z24.h\n"
+ ".inst 0x459740d8 // saddwb z24.s, z6.s, z23.h\n"
+ ".inst 0x459744d7 // saddwt z23.s, z6.s, z23.h\n"
+ ".inst 0x448280bf // srshl z31.s, p0/M, z31.s, z5.s\n"
+ ".inst 0x448280b3 // srshl z19.s, p0/M, z19.s, z5.s\n"
+ ".inst 0x448280be // srshl z30.s, p0/M, z30.s, z5.s\n"
+ ".inst 0x448280b0 // srshl z16.s, p0/M, z16.s, z5.s\n"
+ ".inst 0x448280bd // srshl z29.s, p0/M, z29.s, z5.s\n"
+ ".inst 0x448280b6 // srshl z22.s, p0/M, z22.s, z5.s\n"
+ ".inst 0x448280bc // srshl z28.s, p0/M, z28.s, z5.s\n"
+ ".inst 0x448280b2 // srshl z18.s, p0/M, z18.s, z5.s\n"
+ ".inst 0x448280bb // srshl z27.s, p0/M, z27.s, z5.s\n"
+ ".inst 0x448280b5 // srshl z21.s, p0/M, z21.s, z5.s\n"
+ ".inst 0x448280b4 // srshl z20.s, p0/M, z20.s, z5.s\n"
+ ".inst 0x448280b1 // srshl z17.s, p0/M, z17.s, z5.s\n"
+ ".inst 0x448280ba // srshl z26.s, p0/M, z26.s, z5.s\n"
+ ".inst 0x448280b9 // srshl z25.s, p0/M, z25.s, z5.s\n"
+ ".inst 0x448280b8 // srshl z24.s, p0/M, z24.s, z5.s\n"
+ ".inst 0x448280b7 // srshl z23.s, p0/M, z23.s, z5.s\n"
+ ".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
+ ".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
+ ".inst 0x04a477de // sqrdmulh z30.s, z30.s, z4.s\n"
+ ".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
+ ".inst 0x04a477bd // sqrdmulh z29.s, z29.s, z4.s\n"
+ ".inst 0x04a476d6 // sqrdmulh z22.s, z22.s, z4.s\n"
+ ".inst 0x04a4779c // sqrdmulh z28.s, z28.s, z4.s\n"
+ ".inst 0x04a47652 // sqrdmulh z18.s, z18.s, z4.s\n"
+ ".inst 0x04a4777b // sqrdmulh z27.s, z27.s, z4.s\n"
+ ".inst 0x04a476b5 // sqrdmulh z21.s, z21.s, z4.s\n"
+ ".inst 0x04a47694 // sqrdmulh z20.s, z20.s, z4.s\n"
+ ".inst 0x04a47631 // sqrdmulh z17.s, z17.s, z4.s\n"
+ ".inst 0x04a4775a // sqrdmulh z26.s, z26.s, z4.s\n"
+ ".inst 0x04a47739 // sqrdmulh z25.s, z25.s, z4.s\n"
+ ".inst 0x04a47718 // sqrdmulh z24.s, z24.s, z4.s\n"
+ ".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
".inst 0x4482807f // srshl z31.s, p0/M, z31.s, z3.s\n"
+ ".inst 0x44828073 // srshl z19.s, p0/M, z19.s, z3.s\n"
".inst 0x4482807e // srshl z30.s, p0/M, z30.s, z3.s\n"
- ".inst 0x44828076 // srshl z22.s, p0/M, z22.s, z3.s\n"
+ ".inst 0x44828070 // srshl z16.s, p0/M, z16.s, z3.s\n"
".inst 0x4482807d // srshl z29.s, p0/M, z29.s, z3.s\n"
- ".inst 0x44828072 // srshl z18.s, p0/M, z18.s, z3.s\n"
+ ".inst 0x44828076 // srshl z22.s, p0/M, z22.s, z3.s\n"
".inst 0x4482807c // srshl z28.s, p0/M, z28.s, z3.s\n"
- ".inst 0x44828075 // srshl z21.s, p0/M, z21.s, z3.s\n"
+ ".inst 0x44828072 // srshl z18.s, p0/M, z18.s, z3.s\n"
".inst 0x4482807b // srshl z27.s, p0/M, z27.s, z3.s\n"
+ ".inst 0x44828075 // srshl z21.s, p0/M, z21.s, z3.s\n"
+ ".inst 0x44828074 // srshl z20.s, p0/M, z20.s, z3.s\n"
".inst 0x44828071 // srshl z17.s, p0/M, z17.s, z3.s\n"
".inst 0x4482807a // srshl z26.s, p0/M, z26.s, z3.s\n"
- ".inst 0x44828074 // srshl z20.s, p0/M, z20.s, z3.s\n"
".inst 0x44828079 // srshl z25.s, p0/M, z25.s, z3.s\n"
".inst 0x44828078 // srshl z24.s, p0/M, z24.s, z3.s\n"
- ".inst 0x04a27421 // sqrdmulh z1.s, z1.s, z2.s\n"
- ".inst 0x04a276f7 // sqrdmulh z23.s, z23.s, z2.s\n"
- ".inst 0x04a27400 // sqrdmulh z0.s, z0.s, z2.s\n"
- ".inst 0x04a277ff // sqrdmulh z31.s, z31.s, z2.s\n"
- ".inst 0x04a277de // sqrdmulh z30.s, z30.s, z2.s\n"
- ".inst 0x04a276d6 // sqrdmulh z22.s, z22.s, z2.s\n"
- ".inst 0x04a277bd // sqrdmulh z29.s, z29.s, z2.s\n"
- ".inst 0x04a27652 // sqrdmulh z18.s, z18.s, z2.s\n"
- ".inst 0x04a2779c // sqrdmulh z28.s, z28.s, z2.s\n"
- ".inst 0x04a276b5 // sqrdmulh z21.s, z21.s, z2.s\n"
- ".inst 0x04a2777b // sqrdmulh z27.s, z27.s, z2.s\n"
- ".inst 0x04a27631 // sqrdmulh z17.s, z17.s, z2.s\n"
- ".inst 0x04a2775a // sqrdmulh z26.s, z26.s, z2.s\n"
- ".inst 0x04a27694 // sqrdmulh z20.s, z20.s, z2.s\n"
- ".inst 0x04a27739 // sqrdmulh z25.s, z25.s, z2.s\n"
- ".inst 0x04a27718 // sqrdmulh z24.s, z24.s, z2.s\n"
- ".inst 0x44828261 // srshl z1.s, p0/M, z1.s, z19.s\n"
- ".inst 0x44828277 // srshl z23.s, p0/M, z23.s, z19.s\n"
- ".inst 0x44828260 // srshl z0.s, p0/M, z0.s, z19.s\n"
- ".inst 0x4482827f // srshl z31.s, p0/M, z31.s, z19.s\n"
- ".inst 0x4482827e // srshl z30.s, p0/M, z30.s, z19.s\n"
- ".inst 0x44828276 // srshl z22.s, p0/M, z22.s, z19.s\n"
- ".inst 0x4482827d // srshl z29.s, p0/M, z29.s, z19.s\n"
- ".inst 0x44828272 // srshl z18.s, p0/M, z18.s, z19.s\n"
- ".inst 0x4482827c // srshl z28.s, p0/M, z28.s, z19.s\n"
- ".inst 0x44828275 // srshl z21.s, p0/M, z21.s, z19.s\n"
- ".inst 0x4482827b // srshl z27.s, p0/M, z27.s, z19.s\n"
- ".inst 0x44828271 // srshl z17.s, p0/M, z17.s, z19.s\n"
- ".inst 0x4482827a // srshl z26.s, p0/M, z26.s, z19.s\n"
- ".inst 0x44828274 // srshl z20.s, p0/M, z20.s, z19.s\n"
- ".inst 0x44828279 // srshl z25.s, p0/M, z25.s, z19.s\n"
- ".inst 0x44828278 // srshl z24.s, p0/M, z24.s, z19.s\n"
- "add z1.s, z1.s, z16.s\n"
- "add z23.s, z23.s, z16.s\n"
- "add z0.s, z0.s, z16.s\n"
- "add z31.s, z31.s, z16.s\n"
- "add z30.s, z30.s, z16.s\n"
- "add z22.s, z22.s, z16.s\n"
- "add z29.s, z29.s, z16.s\n"
- "add z18.s, z18.s, z16.s\n"
- "add z28.s, z28.s, z16.s\n"
- "add z21.s, z21.s, z16.s\n"
- "add z27.s, z27.s, z16.s\n"
- "add z17.s, z17.s, z16.s\n"
- "add z26.s, z26.s, z16.s\n"
- "add z20.s, z20.s, z16.s\n"
- "add z25.s, z25.s, z16.s\n"
- "add z24.s, z24.s, z16.s\n"
- "mov z16.s, #0x0\n"
- "mov z19.s, #0xff\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z23.s, p0/M, z23.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smax z31.s, p0/M, z31.s, z16.s\n"
- "smax z30.s, p0/M, z30.s, z16.s\n"
- "smax z22.s, p0/M, z22.s, z16.s\n"
- "smax z29.s, p0/M, z29.s, z16.s\n"
- "smax z18.s, p0/M, z18.s, z16.s\n"
- "smax z28.s, p0/M, z28.s, z16.s\n"
- "smax z21.s, p0/M, z21.s, z16.s\n"
- "smax z27.s, p0/M, z27.s, z16.s\n"
- "smax z17.s, p0/M, z17.s, z16.s\n"
- "smax z26.s, p0/M, z26.s, z16.s\n"
- "smax z20.s, p0/M, z20.s, z16.s\n"
- "smax z25.s, p0/M, z25.s, z16.s\n"
- "smax z24.s, p0/M, z24.s, z16.s\n"
- "smin z1.s, p0/M, z1.s, z19.s\n"
- "smin z23.s, p0/M, z23.s, z19.s\n"
- "smin z0.s, p0/M, z0.s, z19.s\n"
- "trn1 z23.h, z1.h, z23.h\n"
- "smin z31.s, p0/M, z31.s, z19.s\n"
- "smin z30.s, p0/M, z30.s, z19.s\n"
- "trn1 z16.h, z0.h, z31.h\n"
- "smin z22.s, p0/M, z22.s, z19.s\n"
- "smin z29.s, p0/M, z29.s, z19.s\n"
- "trn1 z22.h, z30.h, z22.h\n"
- "smin z18.s, p0/M, z18.s, z19.s\n"
- "smin z28.s, p0/M, z28.s, z19.s\n"
- "trn1 z18.h, z29.h, z18.h\n"
- "smin z21.s, p0/M, z21.s, z19.s\n"
- "smin z27.s, p0/M, z27.s, z19.s\n"
- "trn1 z21.h, z28.h, z21.h\n"
- "smin z17.s, p0/M, z17.s, z19.s\n"
- "smin z26.s, p0/M, z26.s, z19.s\n"
- "trn1 z17.h, z27.h, z17.h\n"
- "smin z20.s, p0/M, z20.s, z19.s\n"
- "smin z25.s, p0/M, z25.s, z19.s\n"
- "trn1 z20.h, z26.h, z20.h\n"
- "smin z24.s, p0/M, z24.s, z19.s\n"
- "trn1 z19.h, z25.h, z24.h\n"
- "trn1 z16.b, z23.b, z16.b\n"
+ ".inst 0x44828077 // srshl z23.s, p0/M, z23.s, z3.s\n"
+ "add z31.s, z31.s, z1.s\n"
+ "add z19.s, z19.s, z1.s\n"
+ "add z30.s, z30.s, z1.s\n"
+ "add z16.s, z16.s, z1.s\n"
+ "add z29.s, z29.s, z1.s\n"
+ "add z22.s, z22.s, z1.s\n"
+ "add z28.s, z28.s, z1.s\n"
+ "add z18.s, z18.s, z1.s\n"
+ "add z27.s, z27.s, z1.s\n"
+ "add z21.s, z21.s, z1.s\n"
+ "add z20.s, z20.s, z1.s\n"
+ "add z17.s, z17.s, z1.s\n"
+ "add z26.s, z26.s, z1.s\n"
+ "add z25.s, z25.s, z1.s\n"
+ "add z24.s, z24.s, z1.s\n"
+ "add z23.s, z23.s, z1.s\n"
+ "smax z31.s, p0/M, z31.s, z2.s\n"
+ "smax z19.s, p0/M, z19.s, z2.s\n"
+ "smax z30.s, p0/M, z30.s, z2.s\n"
+ "smax z16.s, p0/M, z16.s, z2.s\n"
+ "smax z29.s, p0/M, z29.s, z2.s\n"
+ "smax z22.s, p0/M, z22.s, z2.s\n"
+ "smax z28.s, p0/M, z28.s, z2.s\n"
+ "smax z18.s, p0/M, z18.s, z2.s\n"
+ "smax z27.s, p0/M, z27.s, z2.s\n"
+ "smax z21.s, p0/M, z21.s, z2.s\n"
+ "smax z20.s, p0/M, z20.s, z2.s\n"
+ "smax z17.s, p0/M, z17.s, z2.s\n"
+ "smax z26.s, p0/M, z26.s, z2.s\n"
+ "smax z25.s, p0/M, z25.s, z2.s\n"
+ "smax z24.s, p0/M, z24.s, z2.s\n"
+ "smax z23.s, p0/M, z23.s, z2.s\n"
+ "smin z31.s, p0/M, z31.s, z0.s\n"
+ "smin z19.s, p0/M, z19.s, z0.s\n"
+ "smin z30.s, p0/M, z30.s, z0.s\n"
+ "smin z16.s, p0/M, z16.s, z0.s\n"
+ "smin z29.s, p0/M, z29.s, z0.s\n"
+ "smin z22.s, p0/M, z22.s, z0.s\n"
+ "smin z28.s, p0/M, z28.s, z0.s\n"
+ "smin z18.s, p0/M, z18.s, z0.s\n"
+ "smin z27.s, p0/M, z27.s, z0.s\n"
+ "smin z21.s, p0/M, z21.s, z0.s\n"
+ "trn1 z19.h, z31.h, z19.h\n"
+ "smin z20.s, p0/M, z20.s, z0.s\n"
+ "smin z17.s, p0/M, z17.s, z0.s\n"
+ "trn1 z16.h, z30.h, z16.h\n"
+ "smin z26.s, p0/M, z26.s, z0.s\n"
+ "smin z25.s, p0/M, z25.s, z0.s\n"
+ "trn1 z22.h, z29.h, z22.h\n"
+ "smin z24.s, p0/M, z24.s, z0.s\n"
+ "smin z23.s, p0/M, z23.s, z0.s\n"
+ "trn1 z18.h, z28.h, z18.h\n"
+ "trn1 z21.h, z27.h, z21.h\n"
+ "trn1 z17.h, z20.h, z17.h\n"
+ "trn1 z20.b, z19.b, z16.b\n"
+ "trn1 z19.h, z26.h, z25.h\n"
+ "trn1 z16.h, z24.h, z23.h\n"
"trn1 z18.b, z22.b, z18.b\n"
"trn1 z17.b, z21.b, z17.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z20.b }, p4, [%x[outptr], x9]\n"
"incb x9, ALL, MUL #4\n"
- "trn1 z16.b, z20.b, z19.b\n"
+ "trn1 z16.b, z19.b, z16.b\n"
"st1b { z18.b }, p3, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
"st1b { z17.b }, p2, [%x[outptr], x27]\n"
@@ -316,32 +316,32 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"mov z5.b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
- "ldp x20, x22, [x24, #0x0]\n"
+ "ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "ld1b { z0.b }, p4/Z, [x20, x9]\n"
"ldp x21, x20, [x24, #0x10]\n"
"add x24, x24, #0x20\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z0\n umax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n umax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z31.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"subs x25, x25, #0x1\n"
- "umax z16.b, p0/M, z16.b, z17.b\n"
"ldp x21, x20, [x24, #0x10]\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z0.b }, p4/Z, [x23, x9]\n"
- "ld1b { z31.b }, p4/Z, [x22, x9]\n"
+ "ld1b { z1.b }, p4/Z, [x23, x9]\n"
+ "umax z16.b, p0/M, z16.b, z17.b\n"
+ "ld1b { z0.b }, p4/Z, [x22, x9]\n"
"ld1b { z23.b }, p4/Z, [x21, x9]\n"
- "ld1b { z30.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p4/Z, [x20, x9]\n"
+ "umax z5.b, p0/M, z5.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z0\n umax z16.b, p0/M, z16.b, z31.b\n"
- "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z30.b\n"
+ "movprfx z16, z1\n umax z16.b, p0/M, z16.b, z0.b\n"
+ "movprfx z17, z23\n umax z17.b, p0/M, z17.b, z31.b\n"
"umax z16.b, p0/M, z16.b, z17.b\n"
"umax z5.b, p0/M, z5.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
@@ -349,56 +349,56 @@ void sme_u8q_nhwc_max_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
+ "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"umax z5.b, p0/M, z5.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z18.s }, p0/Z, [x20]\n"
+ "add x21, %x[quant_params], %[offsetof_qp_input_offset]\n"
".inst 0x4508a8b1 // ushllb z17.h, z5.b, #0x0\n"
- ".inst 0x4508acb0 // ushllt z16.h, z5.b, #0x0\n"
- "neg z18.s, p0/M, z18.s\n"
- ".inst 0x45914257 // saddwb z23.s, z18.s, z17.h\n"
+ ".inst 0x4508acba // ushllt z26.h, z5.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z22.s }, p0/Z, [x20]\n"
- ".inst 0x45914655 // saddwt z21.s, z18.s, z17.h\n"
- ".inst 0x45904254 // saddwb z20.s, z18.s, z16.h\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z19.s }, p0/Z, [x20]\n"
- ".inst 0x45904652 // saddwt z18.s, z18.s, z16.h\n"
- ".inst 0x448282d7 // srshl z23.s, p0/M, z23.s, z22.s\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z17.s }, p0/Z, [x20]\n"
- ".inst 0x448282d5 // srshl z21.s, p0/M, z21.s, z22.s\n"
- ".inst 0x448282d4 // srshl z20.s, p0/M, z20.s, z22.s\n"
+ "ld1rw { z16.s }, p0/Z, [x21]\n"
+ "add x22, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
+ "mov z25.s, #0x0\n"
+ "ld1rw { z24.s }, p0/Z, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x448282d2 // srshl z18.s, p0/M, z18.s, z22.s\n"
- ".inst 0x04b376f7 // sqrdmulh z23.s, z23.s, z19.s\n"
- ".inst 0x04b376b5 // sqrdmulh z21.s, z21.s, z19.s\n"
- ".inst 0x04b37694 // sqrdmulh z20.s, z20.s, z19.s\n"
- ".inst 0x04b37652 // sqrdmulh z18.s, z18.s, z19.s\n"
- ".inst 0x44828237 // srshl z23.s, p0/M, z23.s, z17.s\n"
- ".inst 0x44828235 // srshl z21.s, p0/M, z21.s, z17.s\n"
- ".inst 0x44828234 // srshl z20.s, p0/M, z20.s, z17.s\n"
- ".inst 0x44828232 // srshl z18.s, p0/M, z18.s, z17.s\n"
- "add z23.s, z23.s, z16.s\n"
- "add z21.s, z21.s, z16.s\n"
- "add z20.s, z20.s, z16.s\n"
- "add z18.s, z18.s, z16.s\n"
- "mov z17.s, #0x0\n"
- "mov z16.s, #0xff\n"
- "smax z23.s, p0/M, z23.s, z17.s\n"
- "smax z21.s, p0/M, z21.s, z17.s\n"
- "smax z20.s, p0/M, z20.s, z17.s\n"
- "smax z18.s, p0/M, z18.s, z17.s\n"
- "smin z23.s, p0/M, z23.s, z16.s\n"
- "smin z21.s, p0/M, z21.s, z16.s\n"
- "smin z20.s, p0/M, z20.s, z16.s\n"
- "trn1 z17.h, z23.h, z21.h\n"
- "smin z18.s, p0/M, z18.s, z16.s\n"
- "trn1 z16.h, z20.h, z18.h\n"
+ "mov z23.s, #0xff\n"
+ "ld1rw { z22.s }, p0/Z, [x22]\n"
+ "neg z16.s, p0/M, z16.s\n"
+ "ld1rw { z21.s }, p0/Z, [x21]\n"
+ "ld1rw { z20.s }, p0/Z, [x20]\n"
+ ".inst 0x45914213 // saddwb z19.s, z16.s, z17.h\n"
+ ".inst 0x45914611 // saddwt z17.s, z16.s, z17.h\n"
+ ".inst 0x459a4212 // saddwb z18.s, z16.s, z26.h\n"
+ ".inst 0x459a4610 // saddwt z16.s, z16.s, z26.h\n"
+ ".inst 0x44828313 // srshl z19.s, p0/M, z19.s, z24.s\n"
+ ".inst 0x44828311 // srshl z17.s, p0/M, z17.s, z24.s\n"
+ ".inst 0x44828312 // srshl z18.s, p0/M, z18.s, z24.s\n"
+ ".inst 0x44828310 // srshl z16.s, p0/M, z16.s, z24.s\n"
+ ".inst 0x04b67673 // sqrdmulh z19.s, z19.s, z22.s\n"
+ ".inst 0x04b67631 // sqrdmulh z17.s, z17.s, z22.s\n"
+ ".inst 0x04b67652 // sqrdmulh z18.s, z18.s, z22.s\n"
+ ".inst 0x04b67610 // sqrdmulh z16.s, z16.s, z22.s\n"
+ ".inst 0x448282b3 // srshl z19.s, p0/M, z19.s, z21.s\n"
+ ".inst 0x448282b1 // srshl z17.s, p0/M, z17.s, z21.s\n"
+ ".inst 0x448282b2 // srshl z18.s, p0/M, z18.s, z21.s\n"
+ ".inst 0x448282b0 // srshl z16.s, p0/M, z16.s, z21.s\n"
+ "add z19.s, z19.s, z20.s\n"
+ "add z17.s, z17.s, z20.s\n"
+ "add z18.s, z18.s, z20.s\n"
+ "add z16.s, z16.s, z20.s\n"
+ "smax z19.s, p0/M, z19.s, z25.s\n"
+ "smax z17.s, p0/M, z17.s, z25.s\n"
+ "smax z18.s, p0/M, z18.s, z25.s\n"
+ "smax z16.s, p0/M, z16.s, z25.s\n"
+ "smin z19.s, p0/M, z19.s, z23.s\n"
+ "smin z17.s, p0/M, z17.s, z23.s\n"
+ "smin z18.s, p0/M, z18.s, z23.s\n"
+ "smin z16.s, p0/M, z16.s, z23.s\n"
+ "trn1 z17.h, z19.h, z17.h\n"
+ "trn1 z16.h, z18.h, z16.h\n"
"trn1 z16.b, z17.b, z16.b\n"
"st1b { z16.b }, p4, [%x[outptr], x9]\n"
"incb x9\n"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index 1ba78f3fba..02b165da73 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,13 +87,13 @@ void sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"mov x3, #0x0\n"
"mov x20, #0x4\n"
"ldr x4, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x5, x6, [x21, #0x0]\n"
- "whilelt p2.h, XZR, x20\n"
+ "add x5, %x[args], %[offsetof_rescale]\n"
+ "mov x6, #0x0\n"
+ "ldp x7, x8, [x21, #0x0]\n"
+ "ldp x17, x16, [x21, #0x10]\n"
+ "whilelt p1.h, XZR, x20\n"
"whilelt p0.h, x3, x2\n"
- "ldp x7, x8, [x21, #0x10]\n"
- "ldp x17, x16, [x4, #0x0]\n"
- "add x15, %x[args], %[offsetof_rescale]\n"
- "mov x14, #0x0\n"
+ "ldp x15, x14, [x4, #0x0]\n"
"ldp x13, x12, [x4, #0x10]\n"
"ldp x11, x10, [x4, #0x20]\n"
"ldp x9, x28, [x4, #0x30]\n"
@@ -101,103 +101,103 @@ void sve_fp16_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"ldp x25, x24, [x4, #0x50]\n"
"ldp x23, x22, [x4, #0x60]\n"
"ldp x21, x20, [x4, #0x70]\n"
- "ld1h { z7.h }, p0/Z, [x10, x3, LSL #1]\n"
- "ld1h { z6.h }, p0/Z, [x9, x3, LSL #1]\n"
- "ld1h { z5.h }, p0/Z, [x26, x3, LSL #1]\n"
- "ld1h { z4.h }, p0/Z, [x25, x3, LSL #1]\n"
- "ld1h { z3.h }, p0/Z, [x16, x3, LSL #1]\n"
- "ld1h { z2.h }, p0/Z, [x13, x3, LSL #1]\n"
- "ld1h { z1.h }, p0/Z, [x11, x3, LSL #1]\n"
- "ld1h { z31.h }, p0/Z, [x27, x3, LSL #1]\n"
- "ld1h { z30.h }, p0/Z, [x28, x3, LSL #1]\n"
- "ld1h { z29.h }, p0/Z, [x24, x3, LSL #1]\n"
- "ld1h { z28.h }, p0/Z, [x22, x3, LSL #1]\n"
- "ld1h { z27.h }, p0/Z, [x21, x3, LSL #1]\n"
- "ld1h { z26.h }, p0/Z, [x17, x3, LSL #1]\n"
- "ld1h { z25.h }, p0/Z, [x12, x3, LSL #1]\n"
- "ld1h { z24.h }, p0/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p0/Z, [x20, x3, LSL #1]\n"
+ "ld1h { z8.h }, p0/Z, [x10, x3, LSL #1]\n"
+ "ld1h { z7.h }, p0/Z, [x9, x3, LSL #1]\n"
+ "ld1h { z6.h }, p0/Z, [x26, x3, LSL #1]\n"
+ "ld1h { z5.h }, p0/Z, [x25, x3, LSL #1]\n"
+ "ld1h { z4.h }, p0/Z, [x14, x3, LSL #1]\n"
+ "ld1h { z3.h }, p0/Z, [x13, x3, LSL #1]\n"
+ "ld1h { z2.h }, p0/Z, [x11, x3, LSL #1]\n"
+ "ld1h { z1.h }, p0/Z, [x27, x3, LSL #1]\n"
+ "ld1h { z31.h }, p0/Z, [x28, x3, LSL #1]\n"
+ "ld1h { z30.h }, p0/Z, [x24, x3, LSL #1]\n"
+ "ld1h { z29.h }, p0/Z, [x22, x3, LSL #1]\n"
+ "ld1h { z28.h }, p0/Z, [x21, x3, LSL #1]\n"
+ "ld1h { z27.h }, p0/Z, [x15, x3, LSL #1]\n"
+ "ld1h { z26.h }, p0/Z, [x12, x3, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x23, x3, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x20, x3, LSL #1]\n"
"incw x3\n"
+ "ld1rqh { z0.h }, p1/Z, [x5]\n"
"whilelt p1.h, x3, x2\n"
- "ld1rqh { z0.h }, p2/Z, [x15]\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "fadd z17.h, z7.h, z6.h\n"
- "fadd z16.h, z5.h, z4.h\n"
- "ld1h { z7.h }, p1/Z, [x10, x3, LSL #1]\n"
- "ld1h { z6.h }, p1/Z, [x9, x3, LSL #1]\n"
- "fadd z19.h, z17.h, z16.h\n"
- "fadd z18.h, z3.h, z2.h\n"
- "ld1h { z5.h }, p1/Z, [x26, x3, LSL #1]\n"
- "ld1h { z4.h }, p1/Z, [x25, x3, LSL #1]\n"
- "fadd z17.h, z1.h, z31.h\n"
- "fadd z22.h, z30.h, z29.h\n"
- "ld1h { z3.h }, p1/Z, [x16, x3, LSL #1]\n"
- "ld1h { z2.h }, p1/Z, [x13, x3, LSL #1]\n"
- "fadd z16.h, z28.h, z27.h\n"
- "fadd z21.h, z18.h, z19.h\n"
- "ld1h { z1.h }, p1/Z, [x11, x3, LSL #1]\n"
- "ld1h { z31.h }, p1/Z, [x27, x3, LSL #1]\n"
- "fadd z20.h, z16.h, z19.h\n"
- "fadd z19.h, z26.h, z17.h\n"
- "ld1h { z30.h }, p1/Z, [x28, x3, LSL #1]\n"
- "ld1h { z29.h }, p1/Z, [x24, x3, LSL #1]\n"
- "fadd z18.h, z25.h, z22.h\n"
- "fadd z17.h, z24.h, z17.h\n"
- "ld1h { z28.h }, p1/Z, [x22, x3, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x21, x3, LSL #1]\n"
- "fadd z16.h, z23.h, z22.h\n"
- "ld1h { z26.h }, p1/Z, [x17, x3, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x12, x3, LSL #1]\n"
- "fadd z19.h, z21.h, z19.h\n"
- "ld1h { z24.h }, p1/Z, [x23, x3, LSL #1]\n"
- "ld1h { z23.h }, p1/Z, [x20, x3, LSL #1]\n"
+ "fadd z19.h, z8.h, z7.h\n"
+ "fadd z16.h, z6.h, z5.h\n"
+ "ld1h { z8.h }, p1/Z, [x10, x3, LSL #1]\n"
+ "ld1h { z7.h }, p1/Z, [x9, x3, LSL #1]\n"
+ "fadd z23.h, z4.h, z3.h\n"
+ "fadd z18.h, z2.h, z1.h\n"
+ "ld1h { z6.h }, p1/Z, [x26, x3, LSL #1]\n"
+ "ld1h { z5.h }, p1/Z, [x25, x3, LSL #1]\n"
+ "fadd z17.h, z31.h, z30.h\n"
+ "fadd z22.h, z29.h, z28.h\n"
+ "ld1h { z4.h }, p1/Z, [x14, x3, LSL #1]\n"
+ "ld1h { z3.h }, p1/Z, [x13, x3, LSL #1]\n"
+ "fadd z16.h, z19.h, z16.h\n"
+ "ld1h { z2.h }, p1/Z, [x11, x3, LSL #1]\n"
+ "ld1h { z1.h }, p1/Z, [x27, x3, LSL #1]\n"
+ "whilelt p0.h, x6, x2\n"
+ "fadd z19.h, z27.h, z18.h\n"
+ "fadd z21.h, z25.h, z18.h\n"
+ "ld1h { z31.h }, p1/Z, [x28, x3, LSL #1]\n"
+ "ld1h { z30.h }, p1/Z, [x24, x3, LSL #1]\n"
+ "fadd z18.h, z26.h, z17.h\n"
+ "fadd z20.h, z24.h, z17.h\n"
+ "ld1h { z29.h }, p1/Z, [x22, x3, LSL #1]\n"
+ "ld1h { z28.h }, p1/Z, [x21, x3, LSL #1]\n"
+ "fadd z17.h, z23.h, z16.h\n"
+ "fadd z16.h, z22.h, z16.h\n"
+ "ld1h { z27.h }, p1/Z, [x15, x3, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x12, x3, LSL #1]\n"
+ "ld1h { z25.h }, p1/Z, [x23, x3, LSL #1]\n"
+ "ld1h { z24.h }, p1/Z, [x20, x3, LSL #1]\n"
"incw x3\n"
- "fadd z18.h, z21.h, z18.h\n"
- "fadd z17.h, z17.h, z20.h\n"
- "fadd z16.h, z16.h, z20.h\n"
- "whilelt p0.h, x14, x2\n"
+ "fadd z19.h, z17.h, z19.h\n"
+ "fadd z18.h, z17.h, z18.h\n"
+ "fadd z17.h, z21.h, z16.h\n"
+ "fadd z16.h, z20.h, z16.h\n"
"whilelt p1.h, x3, x2\n"
"fmul z19.h, z19.h, z0.h[0]\n"
"fmul z18.h, z18.h, z0.h[1]\n"
- "st1h { z19.h }, p0, [x5, x14, LSL #1]\n"
"fmul z17.h, z17.h, z0.h[2]\n"
"fmul z16.h, z16.h, z0.h[3]\n"
- "st1h { z18.h }, p0, [x6, x14, LSL #1]\n"
- "st1h { z17.h }, p0, [x7, x14, LSL #1]\n"
- "st1h { z16.h }, p0, [x8, x14, LSL #1]\n"
- "incw x14\n"
+ "st1h { z19.h }, p0, [x7, x6, LSL #1]\n"
+ "st1h { z18.h }, p0, [x8, x6, LSL #1]\n"
+ "st1h { z17.h }, p0, [x17, x6, LSL #1]\n"
+ "st1h { z16.h }, p0, [x16, x6, LSL #1]\n"
+ "incw x6\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "fadd z17.h, z7.h, z6.h\n"
- "fadd z16.h, z5.h, z4.h\n"
- "whilelt p0.h, x14, x2\n"
- "fadd z20.h, z17.h, z16.h\n"
- "fadd z18.h, z3.h, z2.h\n"
- "fadd z17.h, z1.h, z31.h\n"
- "fadd z19.h, z30.h, z29.h\n"
- "fadd z16.h, z28.h, z27.h\n"
- "fadd z21.h, z18.h, z20.h\n"
- "fadd z20.h, z16.h, z20.h\n"
- "fadd z16.h, z26.h, z17.h\n"
- "fadd z18.h, z25.h, z19.h\n"
- "fadd z17.h, z24.h, z17.h\n"
- "fadd z19.h, z23.h, z19.h\n"
- "fadd z16.h, z21.h, z16.h\n"
- "fmul z16.h, z16.h, z0.h[0]\n"
- "st1h { z16.h }, p0, [x5, x14, LSL #1]\n"
- "fadd z18.h, z21.h, z18.h\n"
- "fadd z17.h, z17.h, z20.h\n"
+ "fadd z19.h, z8.h, z7.h\n"
+ "fadd z16.h, z6.h, z5.h\n"
+ "whilelt p0.h, x6, x2\n"
+ "fadd z23.h, z4.h, z3.h\n"
+ "fadd z18.h, z2.h, z1.h\n"
+ "fadd z17.h, z31.h, z30.h\n"
+ "fadd z22.h, z29.h, z28.h\n"
+ "fadd z16.h, z19.h, z16.h\n"
+ "fadd z19.h, z27.h, z18.h\n"
+ "fadd z21.h, z25.h, z18.h\n"
+ "fadd z18.h, z26.h, z17.h\n"
+ "fadd z20.h, z24.h, z17.h\n"
+ "fadd z17.h, z23.h, z16.h\n"
+ "fadd z16.h, z22.h, z16.h\n"
+ "fadd z19.h, z17.h, z19.h\n"
+ "fadd z18.h, z17.h, z18.h\n"
+ "fadd z17.h, z21.h, z16.h\n"
+ "fadd z16.h, z20.h, z16.h\n"
+ "fmul z19.h, z19.h, z0.h[0]\n"
"fmul z18.h, z18.h, z0.h[1]\n"
"fmul z17.h, z17.h, z0.h[2]\n"
- "fadd z16.h, z19.h, z20.h\n"
"fmul z16.h, z16.h, z0.h[3]\n"
- "st1h { z18.h }, p0, [x6, x14, LSL #1]\n"
- "st1h { z17.h }, p0, [x7, x14, LSL #1]\n"
- "st1h { z16.h }, p0, [x8, x14, LSL #1]\n"
+ "st1h { z19.h }, p0, [x7, x6, LSL #1]\n"
+ "st1h { z18.h }, p0, [x8, x6, LSL #1]\n"
+ "st1h { z17.h }, p0, [x17, x6, LSL #1]\n"
+ "st1h { z16.h }, p0, [x16, x6, LSL #1]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "p0", "p1", "p2", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
index 2bef44ea5c..942240d816 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,12 +46,12 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"cnth x28\n"
"cnth x27, ALL, MUL #2\n"
"cnth x26, ALL, MUL #3\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
"whilelt p3.h, x9, %x[n_channels]\n"
- "ld1rh { z7.h }, p0/Z, [%x[rescale_ptr]]\n"
"whilelt p2.h, x28, %x[n_channels]\n"
"whilelt p1.h, x27, %x[n_channels]\n"
"whilelt p0.h, x26, %x[n_channels]\n"
+ "ld1rh { z7.h }, p4/Z, [%x[rescale_ptr]]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
@@ -93,17 +93,17 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"add x24, x24, #0x20\n"
"fadd z21.h, z27.h, z21.h\n"
"fadd z17.h, z26.h, z17.h\n"
- "ld1h { z2.h }, p3/Z, [x23, x9, LSL #1]\n"
- "ld1h { z1.h }, p3/Z, [x22, x9, LSL #1]\n"
"fadd z20.h, z25.h, z20.h\n"
"fadd z16.h, z24.h, z16.h\n"
- "ld1h { z0.h }, p3/Z, [x21, x9, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z2.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x9, LSL #1]\n"
"fadd z19.h, z23.h, z19.h\n"
"fadd z18.h, z22.h, z18.h\n"
+ "ld1h { z0.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z31.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "fadd z17.h, z21.h, z17.h\n"
"ld1h { z30.h }, p2/Z, [x23, x28, LSL #1]\n"
"ld1h { z22.h }, p2/Z, [x22, x28, LSL #1]\n"
- "fadd z17.h, z21.h, z17.h\n"
"fadd z16.h, z20.h, z16.h\n"
"ld1h { z29.h }, p2/Z, [x21, x28, LSL #1]\n"
"ld1h { z28.h }, p2/Z, [x20, x28, LSL #1]\n"
@@ -142,30 +142,30 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p3/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
- "fadd z6.h, z6.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x20, x28, LSL #1]\n"
- "ld1h { z16.h }, p1/Z, [x20, x27, LSL #1]\n"
- "fadd z5.h, z5.h, z17.h\n"
- "fadd z4.h, z4.h, z16.h\n"
+ "ld1h { z19.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z17.h }, p1/Z, [x20, x27, LSL #1]\n"
"ld1h { z16.h }, p0/Z, [x20, x26, LSL #1]\n"
+ "fadd z6.h, z6.h, z19.h\n"
+ "fadd z5.h, z5.h, z18.h\n"
+ "fadd z4.h, z4.h, z17.h\n"
"fadd z3.h, z3.h, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"fmul z6.h, z6.h, z7.h\n"
"fmul z5.h, z5.h, z7.h\n"
- "st1h { z6.h }, p3, [%x[outptr], x9, LSL #1]\n"
"fmul z4.h, z4.h, z7.h\n"
"fmul z3.h, z3.h, z7.h\n"
- "st1h { z5.h }, p2, [%x[outptr], x28, LSL #1]\n"
- "st1h { z4.h }, p1, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z6.h }, p3, [%x[outptr], x9, LSL #1]\n"
"inch x9, ALL, MUL #4\n"
+ "st1h { z5.h }, p2, [%x[outptr], x28, LSL #1]\n"
"inch x28, ALL, MUL #4\n"
+ "st1h { z4.h }, p1, [%x[outptr], x27, LSL #1]\n"
+ "inch x27, ALL, MUL #4\n"
"st1h { z3.h }, p0, [%x[outptr], x26, LSL #1]\n"
"inch x26, ALL, MUL #4\n"
"whilelt p0.h, x26, %x[n_channels]\n"
- "inch x27, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
"whilelt p3.h, x9, %x[n_channels]\n"
@@ -189,14 +189,14 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"fadd z16.h, z0.h, z31.h\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fadd z16.h, z17.h, z16.h\n"
"subs x25, x25, #0x1\n"
- "fadd z6.h, z6.h, z16.h\n"
"add x24, x24, #0x20\n"
+ "fadd z16.h, z17.h, z16.h\n"
"ld1h { z2.h }, p3/Z, [x23, x9, LSL #1]\n"
"ld1h { z1.h }, p3/Z, [x22, x9, LSL #1]\n"
"ld1h { z0.h }, p3/Z, [x21, x9, LSL #1]\n"
"ld1h { z31.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "fadd z6.h, z6.h, z16.h\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"fadd z17.h, z2.h, z1.h\n"
@@ -208,8 +208,8 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p3/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
+ "ld1h { z16.h }, p3/Z, [x20, x9, LSL #1]\n"
"fadd z6.h, z6.h, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
@@ -221,7 +221,7 @@ void sve_fp16_nhwc_avg_generic_depthfirst_impl(
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 31bbfd085e..eef19e9993 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,22 +66,22 @@ void sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x15, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
"mov x14, #0x0\n"
- "whilelt p0.h, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
"ptrue p2.b\n"
- "mov x11, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
+ "whilelt p0.h, x14, x15\n"
"ldp x28, x27, [x20, #0x0]\n"
"ldp x26, x25, [x20, #0x10]\n"
"ldp x24, x23, [x20, #0x20]\n"
"ldp x22, x21, [x20, #0x30]\n"
"ldr x20, [x20, #0x40]\n"
"ld1h { z31.h }, p0/Z, [x27, x14, LSL #1]\n"
- "ld1h { z30.h }, p0/Z, [x24, x14, LSL #1]\n"
- "ld1h { z29.h }, p0/Z, [x21, x14, LSL #1]\n"
+ "ld1h { z30.h }, p0/Z, [x28, x14, LSL #1]\n"
+ "ld1h { z29.h }, p0/Z, [x24, x14, LSL #1]\n"
"ld1h { z28.h }, p0/Z, [x25, x14, LSL #1]\n"
- "ld1h { z27.h }, p0/Z, [x28, x14, LSL #1]\n"
+ "ld1h { z27.h }, p0/Z, [x21, x14, LSL #1]\n"
"ld1h { z26.h }, p0/Z, [x26, x14, LSL #1]\n"
"ld1h { z25.h }, p0/Z, [x23, x14, LSL #1]\n"
"ld1h { z24.h }, p0/Z, [x22, x14, LSL #1]\n"
@@ -90,50 +90,50 @@ void sve_fp16_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"whilelt p1.h, x14, x15\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z30.h\n"
- "movprfx z21, z30\n fmax z21.h, p2/M, z21.h, z29.h\n"
+ "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z29.h\n"
+ "movprfx z21, z29\n fmax z21.h, p2/M, z21.h, z27.h\n"
"ld1h { z31.h }, p1/Z, [x27, x14, LSL #1]\n"
- "ld1h { z30.h }, p1/Z, [x24, x14, LSL #1]\n"
- "movprfx z20, z28\n fmax z20.h, p2/M, z20.h, z27.h\n"
- "movprfx z19, z26\n fmax z19.h, p2/M, z19.h, z25.h\n"
- "ld1h { z29.h }, p1/Z, [x21, x14, LSL #1]\n"
- "ld1h { z27.h }, p1/Z, [x28, x14, LSL #1]\n"
- "movprfx z17, z28\n fmax z17.h, p2/M, z17.h, z24.h\n"
- "movprfx z18, z25\n fmax z18.h, p2/M, z18.h, z23.h\n"
+ "ld1h { z29.h }, p1/Z, [x24, x14, LSL #1]\n"
+ "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z30.h\n"
+ "movprfx z17, z26\n fmax z17.h, p2/M, z17.h, z25.h\n"
+ "ld1h { z27.h }, p1/Z, [x21, x14, LSL #1]\n"
+ "ld1h { z30.h }, p1/Z, [x28, x14, LSL #1]\n"
+ "movprfx z16, z28\n fmax z16.h, p2/M, z16.h, z24.h\n"
+ "movprfx z20, z25\n fmax z20.h, p2/M, z20.h, z23.h\n"
"ld1h { z28.h }, p1/Z, [x25, x14, LSL #1]\n"
"ld1h { z26.h }, p1/Z, [x26, x14, LSL #1]\n"
"ld1h { z25.h }, p1/Z, [x23, x14, LSL #1]\n"
"ld1h { z24.h }, p1/Z, [x22, x14, LSL #1]\n"
- "whilelt p0.h, x11, x15\n"
- "movprfx z16, z22\n fmax z16.h, p2/M, z16.h, z20.h\n"
+ "whilelt p0.h, x13, x15\n"
"ld1h { z23.h }, p1/Z, [x20, x14, LSL #1]\n"
"incw x14\n"
+ "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
+ "movprfx z18, z17\n fmax z18.h, p2/M, z18.h, z22.h\n"
+ "movprfx z17, z16\n fmax z17.h, p2/M, z17.h, z21.h\n"
+ "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
"whilelt p1.h, x14, x15\n"
- "st1h { z16.h }, p0, [x13, x11, LSL #1]\n"
- "movprfx z16, z19\n fmax z16.h, p2/M, z16.h, z22.h\n"
- "fmax z17.h, p2/M, z17.h, z21.h\n"
- "st1h { z16.h }, p0, [x12, x11, LSL #1]\n"
- "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z18.h\n"
- "st1h { z17.h }, p0, [x10, x11, LSL #1]\n"
- "st1h { z16.h }, p0, [x9, x11, LSL #1]\n"
- "incw x11\n"
+ "st1h { z19.h }, p0, [x12, x13, LSL #1]\n"
+ "st1h { z18.h }, p0, [x11, x13, LSL #1]\n"
+ "st1h { z17.h }, p0, [x10, x13, LSL #1]\n"
+ "st1h { z16.h }, p0, [x9, x13, LSL #1]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z30.h\n"
- "movprfx z21, z30\n fmax z21.h, p2/M, z21.h, z29.h\n"
- "movprfx z20, z28\n fmax z20.h, p2/M, z20.h, z27.h\n"
- "movprfx z19, z26\n fmax z19.h, p2/M, z19.h, z25.h\n"
- "movprfx z17, z28\n fmax z17.h, p2/M, z17.h, z24.h\n"
- "movprfx z18, z25\n fmax z18.h, p2/M, z18.h, z23.h\n"
- "whilelt p0.h, x11, x15\n"
- "movprfx z16, z22\n fmax z16.h, p2/M, z16.h, z20.h\n"
- "st1h { z16.h }, p0, [x13, x11, LSL #1]\n"
- "movprfx z16, z19\n fmax z16.h, p2/M, z16.h, z22.h\n"
- "fmax z17.h, p2/M, z17.h, z21.h\n"
- "st1h { z16.h }, p0, [x12, x11, LSL #1]\n"
- "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z18.h\n"
- "st1h { z17.h }, p0, [x10, x11, LSL #1]\n"
- "st1h { z16.h }, p0, [x9, x11, LSL #1]\n"
+ "movprfx z22, z31\n fmax z22.h, p2/M, z22.h, z29.h\n"
+ "movprfx z21, z29\n fmax z21.h, p2/M, z21.h, z27.h\n"
+ "movprfx z18, z28\n fmax z18.h, p2/M, z18.h, z30.h\n"
+ "movprfx z17, z26\n fmax z17.h, p2/M, z17.h, z25.h\n"
+ "movprfx z16, z28\n fmax z16.h, p2/M, z16.h, z24.h\n"
+ "movprfx z20, z25\n fmax z20.h, p2/M, z20.h, z23.h\n"
+ "whilelt p0.h, x13, x15\n"
+ "movprfx z19, z22\n fmax z19.h, p2/M, z19.h, z18.h\n"
+ "movprfx z18, z17\n fmax z18.h, p2/M, z18.h, z22.h\n"
+ "movprfx z17, z16\n fmax z17.h, p2/M, z17.h, z21.h\n"
+ "movprfx z16, z21\n fmax z16.h, p2/M, z16.h, z20.h\n"
+ "st1h { z19.h }, p0, [x12, x13, LSL #1]\n"
+ "st1h { z18.h }, p0, [x11, x13, LSL #1]\n"
+ "st1h { z17.h }, p0, [x10, x13, LSL #1]\n"
+ "st1h { z16.h }, p0, [x9, x13, LSL #1]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
index 1a01412836..31c4f48b96 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp16_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,176 +44,176 @@ void sve_fp16_nhwc_max_generic_depthfirst_impl(
"cnth x28\n"
"cnth x27, ALL, MUL #2\n"
"cnth x26, ALL, MUL #3\n"
- "whilelt p4.h, x9, %x[n_channels]\n"
- "whilelt p3.h, x28, %x[n_channels]\n"
- "whilelt p2.h, x27, %x[n_channels]\n"
- "whilelt p1.h, x26, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.h, x9, %x[n_channels]\n"
+ "whilelt p2.h, x28, %x[n_channels]\n"
+ "whilelt p1.h, x27, %x[n_channels]\n"
+ "whilelt p0.h, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.h, #0xfc00\n"
- "mov z7.h, #0xfc00\n"
- "mov x24, %x[inptrs]\n"
"mov z6.h, #0xfc00\n"
"mov z5.h, #0xfc00\n"
+ "mov x24, %x[inptrs]\n"
+ "mov z4.h, #0xfc00\n"
+ "mov z3.h, #0xfc00\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1h { z4.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z2.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x20, x9, LSL #1]\n"
- "ld1h { z0.h }, p3/Z, [x23, x28, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x22, x28, LSL #1]\n"
- "ld1h { z22.h }, p3/Z, [x21, x28, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
- "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x22, x27, LSL #1]\n"
- "ld1h { z21.h }, p2/Z, [x21, x27, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x20, x27, LSL #1]\n"
- "ld1h { z26.h }, p1/Z, [x23, x26, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x22, x26, LSL #1]\n"
- "ld1h { z20.h }, p1/Z, [x21, x26, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z2.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z23.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z31.h }, p2/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z30.h }, p2/Z, [x22, x28, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z28.h }, p1/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x22, x27, LSL #1]\n"
+ "ld1h { z21.h }, p1/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z16.h }, p0/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x20, x26, LSL #1]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
- "movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
+ "movprfx z19, z2\n fmax z19.h, p4/M, z19.h, z1.h\n"
+ "fmax z23.h, p4/M, z23.h, z0.h\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "movprfx z18, z0\n fmax z18.h, p0/M, z18.h, z31.h\n"
- "fmax z22.h, p0/M, z22.h, z30.h\n"
- "ld1h { z4.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x22, x9, LSL #1]\n"
- "movprfx z17, z29\n fmax z17.h, p0/M, z17.h, z28.h\n"
- "fmax z21.h, p0/M, z21.h, z27.h\n"
- "ld1h { z2.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x20, x9, LSL #1]\n"
- "movprfx z16, z26\n fmax z16.h, p0/M, z16.h, z25.h\n"
- "fmax z20.h, p0/M, z20.h, z24.h\n"
- "ld1h { z0.h }, p3/Z, [x23, x28, LSL #1]\n"
- "ld1h { z31.h }, p3/Z, [x22, x28, LSL #1]\n"
- "fmax z19.h, p0/M, z19.h, z23.h\n"
- "fmax z18.h, p0/M, z18.h, z22.h\n"
- "ld1h { z22.h }, p3/Z, [x21, x28, LSL #1]\n"
- "ld1h { z30.h }, p3/Z, [x20, x28, LSL #1]\n"
- "fmax z17.h, p0/M, z17.h, z21.h\n"
- "fmax z16.h, p0/M, z16.h, z20.h\n"
- "ld1h { z29.h }, p2/Z, [x23, x27, LSL #1]\n"
- "ld1h { z28.h }, p2/Z, [x22, x27, LSL #1]\n"
+ "movprfx z18, z31\n fmax z18.h, p4/M, z18.h, z30.h\n"
+ "fmax z22.h, p4/M, z22.h, z29.h\n"
+ "movprfx z17, z28\n fmax z17.h, p4/M, z17.h, z27.h\n"
+ "fmax z21.h, p4/M, z21.h, z26.h\n"
+ "fmax z16.h, p4/M, z16.h, z25.h\n"
+ "fmax z20.h, p4/M, z20.h, z24.h\n"
+ "ld1h { z2.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "fmax z19.h, p4/M, z19.h, z23.h\n"
+ "fmax z18.h, p4/M, z18.h, z22.h\n"
+ "ld1h { z23.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "fmax z17.h, p4/M, z17.h, z21.h\n"
"subs x25, x25, #0x1\n"
- "fmax z8.h, p0/M, z8.h, z19.h\n"
- "ld1h { z21.h }, p2/Z, [x21, x27, LSL #1]\n"
- "ld1h { z27.h }, p2/Z, [x20, x27, LSL #1]\n"
- "fmax z7.h, p0/M, z7.h, z18.h\n"
- "fmax z6.h, p0/M, z6.h, z17.h\n"
- "ld1h { z26.h }, p1/Z, [x23, x26, LSL #1]\n"
- "ld1h { z25.h }, p1/Z, [x22, x26, LSL #1]\n"
- "fmax z5.h, p0/M, z5.h, z16.h\n"
+ "ld1h { z31.h }, p2/Z, [x23, x28, LSL #1]\n"
+ "ld1h { z30.h }, p2/Z, [x22, x28, LSL #1]\n"
+ "fmax z16.h, p4/M, z16.h, z20.h\n"
"add x24, x24, #0x20\n"
- "ld1h { z20.h }, p1/Z, [x21, x26, LSL #1]\n"
- "ld1h { z24.h }, p1/Z, [x20, x26, LSL #1]\n"
+ "ld1h { z22.h }, p2/Z, [x21, x28, LSL #1]\n"
+ "ld1h { z29.h }, p2/Z, [x20, x28, LSL #1]\n"
+ "fmax z6.h, p4/M, z6.h, z19.h\n"
+ "fmax z5.h, p4/M, z5.h, z18.h\n"
+ "ld1h { z28.h }, p1/Z, [x23, x27, LSL #1]\n"
+ "ld1h { z27.h }, p1/Z, [x22, x27, LSL #1]\n"
+ "fmax z4.h, p4/M, z4.h, z17.h\n"
+ "ld1h { z21.h }, p1/Z, [x21, x27, LSL #1]\n"
+ "ld1h { z26.h }, p1/Z, [x20, x27, LSL #1]\n"
+ "fmax z3.h, p4/M, z3.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x23, x26, LSL #1]\n"
+ "ld1h { z25.h }, p0/Z, [x22, x26, LSL #1]\n"
+ "ld1h { z20.h }, p0/Z, [x21, x26, LSL #1]\n"
+ "ld1h { z24.h }, p0/Z, [x20, x26, LSL #1]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z4\n fmax z19.h, p0/M, z19.h, z3.h\n"
- "movprfx z23, z2\n fmax z23.h, p0/M, z23.h, z1.h\n"
- "movprfx z18, z0\n fmax z18.h, p0/M, z18.h, z31.h\n"
- "fmax z22.h, p0/M, z22.h, z30.h\n"
- "movprfx z17, z29\n fmax z17.h, p0/M, z17.h, z28.h\n"
- "fmax z21.h, p0/M, z21.h, z27.h\n"
- "movprfx z16, z26\n fmax z16.h, p0/M, z16.h, z25.h\n"
- "fmax z20.h, p0/M, z20.h, z24.h\n"
- "fmax z19.h, p0/M, z19.h, z23.h\n"
- "fmax z18.h, p0/M, z18.h, z22.h\n"
- "fmax z17.h, p0/M, z17.h, z21.h\n"
- "fmax z16.h, p0/M, z16.h, z20.h\n"
- "fmax z8.h, p0/M, z8.h, z19.h\n"
- "fmax z7.h, p0/M, z7.h, z18.h\n"
- "fmax z6.h, p0/M, z6.h, z17.h\n"
- "fmax z5.h, p0/M, z5.h, z16.h\n"
+ "movprfx z19, z2\n fmax z19.h, p4/M, z19.h, z1.h\n"
+ "fmax z23.h, p4/M, z23.h, z0.h\n"
+ "movprfx z18, z31\n fmax z18.h, p4/M, z18.h, z30.h\n"
+ "fmax z22.h, p4/M, z22.h, z29.h\n"
+ "movprfx z17, z28\n fmax z17.h, p4/M, z17.h, z27.h\n"
+ "fmax z21.h, p4/M, z21.h, z26.h\n"
+ "fmax z16.h, p4/M, z16.h, z25.h\n"
+ "fmax z20.h, p4/M, z20.h, z24.h\n"
+ "fmax z19.h, p4/M, z19.h, z23.h\n"
+ "fmax z18.h, p4/M, z18.h, z22.h\n"
+ "fmax z17.h, p4/M, z17.h, z21.h\n"
+ "fmax z16.h, p4/M, z16.h, z20.h\n"
+ "fmax z6.h, p4/M, z6.h, z19.h\n"
+ "fmax z5.h, p4/M, z5.h, z18.h\n"
+ "fmax z4.h, p4/M, z4.h, z17.h\n"
+ "fmax z3.h, p4/M, z3.h, z16.h\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p4/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
- "fmax z8.h, p0/M, z8.h, z16.h\n"
- "ld1h { z17.h }, p3/Z, [x20, x28, LSL #1]\n"
- "ld1h { z16.h }, p2/Z, [x20, x27, LSL #1]\n"
- "fmax z7.h, p0/M, z7.h, z17.h\n"
- "fmax z6.h, p0/M, z6.h, z16.h\n"
- "ld1h { z16.h }, p1/Z, [x20, x26, LSL #1]\n"
- "fmax z5.h, p0/M, z5.h, z16.h\n"
+ "ld1h { z19.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z18.h }, p2/Z, [x20, x28, LSL #1]\n"
+ "ld1h { z17.h }, p1/Z, [x20, x27, LSL #1]\n"
+ "ld1h { z16.h }, p0/Z, [x20, x26, LSL #1]\n"
+ "fmax z6.h, p4/M, z6.h, z19.h\n"
+ "fmax z5.h, p4/M, z5.h, z18.h\n"
+ "fmax z4.h, p4/M, z4.h, z17.h\n"
+ "fmax z3.h, p4/M, z3.h, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1h { z8.h }, p4, [%x[outptr], x9, LSL #1]\n"
+ "st1h { z6.h }, p3, [%x[outptr], x9, LSL #1]\n"
"inch x9, ALL, MUL #4\n"
- "st1h { z7.h }, p3, [%x[outptr], x28, LSL #1]\n"
+ "st1h { z5.h }, p2, [%x[outptr], x28, LSL #1]\n"
"inch x28, ALL, MUL #4\n"
- "st1h { z6.h }, p2, [%x[outptr], x27, LSL #1]\n"
+ "st1h { z4.h }, p1, [%x[outptr], x27, LSL #1]\n"
"inch x27, ALL, MUL #4\n"
- "st1h { z5.h }, p1, [%x[outptr], x26, LSL #1]\n"
+ "st1h { z3.h }, p0, [%x[outptr], x26, LSL #1]\n"
"inch x26, ALL, MUL #4\n"
- "whilelt p1.h, x26, %x[n_channels]\n"
+ "whilelt p0.h, x26, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.h, x9, %x[n_channels]\n"
+ "whilelt p3.h, x9, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.h, #0xfc00\n"
+ "mov z6.h, #0xfc00\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1h { z4.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x22, x9, LSL #1]\n"
- "ld1h { z2.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "ld1h { z2.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z23.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x20, x9, LSL #1]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z4\n fmax z16.h, p0/M, z16.h, z3.h\n"
- "movprfx z17, z2\n fmax z17.h, p0/M, z17.h, z1.h\n"
+ "movprfx z16, z2\n fmax z16.h, p4/M, z16.h, z1.h\n"
+ "movprfx z17, z23\n fmax z17.h, p4/M, z17.h, z0.h\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fmax z16.h, p0/M, z16.h, z17.h\n"
"subs x25, x25, #0x1\n"
- "ld1h { z4.h }, p4/Z, [x23, x9, LSL #1]\n"
- "ld1h { z3.h }, p4/Z, [x22, x9, LSL #1]\n"
- "fmax z8.h, p0/M, z8.h, z16.h\n"
"add x24, x24, #0x20\n"
- "ld1h { z2.h }, p4/Z, [x21, x9, LSL #1]\n"
- "ld1h { z1.h }, p4/Z, [x20, x9, LSL #1]\n"
+ "fmax z16.h, p4/M, z16.h, z17.h\n"
+ "ld1h { z2.h }, p3/Z, [x23, x9, LSL #1]\n"
+ "ld1h { z1.h }, p3/Z, [x22, x9, LSL #1]\n"
+ "ld1h { z23.h }, p3/Z, [x21, x9, LSL #1]\n"
+ "ld1h { z0.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "fmax z6.h, p4/M, z6.h, z16.h\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z4\n fmax z16.h, p0/M, z16.h, z3.h\n"
- "movprfx z17, z2\n fmax z17.h, p0/M, z17.h, z1.h\n"
- "fmax z16.h, p0/M, z16.h, z17.h\n"
- "fmax z8.h, p0/M, z8.h, z16.h\n"
+ "movprfx z16, z2\n fmax z16.h, p4/M, z16.h, z1.h\n"
+ "movprfx z17, z23\n fmax z17.h, p4/M, z17.h, z0.h\n"
+ "fmax z16.h, p4/M, z16.h, z17.h\n"
+ "fmax z6.h, p4/M, z6.h, z16.h\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1h { z16.h }, p4/Z, [x20, x9, LSL #1]\n"
"subs x21, x21, #0x1\n"
- "fmax z8.h, p0/M, z8.h, z16.h\n"
+ "ld1h { z16.h }, p3/Z, [x20, x9, LSL #1]\n"
+ "fmax z6.h, p4/M, z6.h, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1h { z8.h }, p4, [%x[outptr], x9, LSL #1]\n"
+ "st1h { z6.h }, p3, [%x[outptr], x9, LSL #1]\n"
"inch x9\n"
- "whilelt p4.h, x9, %x[n_channels]\n"
+ "whilelt p3.h, x9, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
index c5ea5adea0..059c0468df 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -87,13 +87,13 @@ void sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"mov x3, #0x0\n"
"mov x20, #0x4\n"
"ldr x4, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x5, x6, [x21, #0x0]\n"
- "whilelt p2.s, XZR, x20\n"
+ "add x5, %x[args], %[offsetof_rescale]\n"
+ "mov x6, #0x0\n"
+ "ldp x7, x8, [x21, #0x0]\n"
+ "ldp x17, x16, [x21, #0x10]\n"
+ "whilelt p1.s, XZR, x20\n"
"whilelt p0.s, x3, x2\n"
- "ldp x7, x8, [x21, #0x10]\n"
- "ldp x17, x16, [x4, #0x0]\n"
- "add x15, %x[args], %[offsetof_rescale]\n"
- "mov x14, #0x0\n"
+ "ldp x15, x14, [x4, #0x0]\n"
"ldp x13, x12, [x4, #0x10]\n"
"ldp x11, x10, [x4, #0x20]\n"
"ldp x9, x28, [x4, #0x30]\n"
@@ -101,103 +101,103 @@ void sve_fp32_nhwc_avg_3x3_s1_output2x2_depthfirst_impl(
"ldp x25, x24, [x4, #0x50]\n"
"ldp x23, x22, [x4, #0x60]\n"
"ldp x21, x20, [x4, #0x70]\n"
- "ld1w { z7.s }, p0/Z, [x10, x3, LSL #2]\n"
- "ld1w { z6.s }, p0/Z, [x9, x3, LSL #2]\n"
- "ld1w { z5.s }, p0/Z, [x26, x3, LSL #2]\n"
- "ld1w { z4.s }, p0/Z, [x25, x3, LSL #2]\n"
- "ld1w { z3.s }, p0/Z, [x16, x3, LSL #2]\n"
- "ld1w { z2.s }, p0/Z, [x13, x3, LSL #2]\n"
- "ld1w { z1.s }, p0/Z, [x11, x3, LSL #2]\n"
- "ld1w { z31.s }, p0/Z, [x27, x3, LSL #2]\n"
- "ld1w { z30.s }, p0/Z, [x28, x3, LSL #2]\n"
- "ld1w { z29.s }, p0/Z, [x24, x3, LSL #2]\n"
- "ld1w { z28.s }, p0/Z, [x22, x3, LSL #2]\n"
- "ld1w { z27.s }, p0/Z, [x21, x3, LSL #2]\n"
- "ld1w { z26.s }, p0/Z, [x17, x3, LSL #2]\n"
- "ld1w { z25.s }, p0/Z, [x12, x3, LSL #2]\n"
- "ld1w { z24.s }, p0/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p0/Z, [x20, x3, LSL #2]\n"
+ "ld1w { z8.s }, p0/Z, [x10, x3, LSL #2]\n"
+ "ld1w { z7.s }, p0/Z, [x9, x3, LSL #2]\n"
+ "ld1w { z6.s }, p0/Z, [x26, x3, LSL #2]\n"
+ "ld1w { z5.s }, p0/Z, [x25, x3, LSL #2]\n"
+ "ld1w { z4.s }, p0/Z, [x14, x3, LSL #2]\n"
+ "ld1w { z3.s }, p0/Z, [x13, x3, LSL #2]\n"
+ "ld1w { z2.s }, p0/Z, [x11, x3, LSL #2]\n"
+ "ld1w { z1.s }, p0/Z, [x27, x3, LSL #2]\n"
+ "ld1w { z31.s }, p0/Z, [x28, x3, LSL #2]\n"
+ "ld1w { z30.s }, p0/Z, [x24, x3, LSL #2]\n"
+ "ld1w { z29.s }, p0/Z, [x22, x3, LSL #2]\n"
+ "ld1w { z28.s }, p0/Z, [x21, x3, LSL #2]\n"
+ "ld1w { z27.s }, p0/Z, [x15, x3, LSL #2]\n"
+ "ld1w { z26.s }, p0/Z, [x12, x3, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x23, x3, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x20, x3, LSL #2]\n"
"incw x3\n"
+ "ld1rqw { z0.s }, p1/Z, [x5]\n"
"whilelt p1.s, x3, x2\n"
- "ld1rqw { z0.s }, p2/Z, [x15]\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "fadd z17.s, z7.s, z6.s\n"
- "fadd z16.s, z5.s, z4.s\n"
- "ld1w { z7.s }, p1/Z, [x10, x3, LSL #2]\n"
- "ld1w { z6.s }, p1/Z, [x9, x3, LSL #2]\n"
- "fadd z19.s, z17.s, z16.s\n"
- "fadd z18.s, z3.s, z2.s\n"
- "ld1w { z5.s }, p1/Z, [x26, x3, LSL #2]\n"
- "ld1w { z4.s }, p1/Z, [x25, x3, LSL #2]\n"
- "fadd z17.s, z1.s, z31.s\n"
- "fadd z22.s, z30.s, z29.s\n"
- "ld1w { z3.s }, p1/Z, [x16, x3, LSL #2]\n"
- "ld1w { z2.s }, p1/Z, [x13, x3, LSL #2]\n"
- "fadd z16.s, z28.s, z27.s\n"
- "fadd z21.s, z18.s, z19.s\n"
- "ld1w { z1.s }, p1/Z, [x11, x3, LSL #2]\n"
- "ld1w { z31.s }, p1/Z, [x27, x3, LSL #2]\n"
- "fadd z20.s, z16.s, z19.s\n"
- "fadd z19.s, z26.s, z17.s\n"
- "ld1w { z30.s }, p1/Z, [x28, x3, LSL #2]\n"
- "ld1w { z29.s }, p1/Z, [x24, x3, LSL #2]\n"
- "fadd z18.s, z25.s, z22.s\n"
- "fadd z17.s, z24.s, z17.s\n"
- "ld1w { z28.s }, p1/Z, [x22, x3, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x21, x3, LSL #2]\n"
- "fadd z16.s, z23.s, z22.s\n"
- "ld1w { z26.s }, p1/Z, [x17, x3, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x12, x3, LSL #2]\n"
- "fadd z19.s, z21.s, z19.s\n"
- "ld1w { z24.s }, p1/Z, [x23, x3, LSL #2]\n"
- "ld1w { z23.s }, p1/Z, [x20, x3, LSL #2]\n"
+ "fadd z19.s, z8.s, z7.s\n"
+ "fadd z16.s, z6.s, z5.s\n"
+ "ld1w { z8.s }, p1/Z, [x10, x3, LSL #2]\n"
+ "ld1w { z7.s }, p1/Z, [x9, x3, LSL #2]\n"
+ "fadd z23.s, z4.s, z3.s\n"
+ "fadd z18.s, z2.s, z1.s\n"
+ "ld1w { z6.s }, p1/Z, [x26, x3, LSL #2]\n"
+ "ld1w { z5.s }, p1/Z, [x25, x3, LSL #2]\n"
+ "fadd z17.s, z31.s, z30.s\n"
+ "fadd z22.s, z29.s, z28.s\n"
+ "ld1w { z4.s }, p1/Z, [x14, x3, LSL #2]\n"
+ "ld1w { z3.s }, p1/Z, [x13, x3, LSL #2]\n"
+ "fadd z16.s, z19.s, z16.s\n"
+ "ld1w { z2.s }, p1/Z, [x11, x3, LSL #2]\n"
+ "ld1w { z1.s }, p1/Z, [x27, x3, LSL #2]\n"
+ "whilelt p0.s, x6, x2\n"
+ "fadd z19.s, z27.s, z18.s\n"
+ "fadd z21.s, z25.s, z18.s\n"
+ "ld1w { z31.s }, p1/Z, [x28, x3, LSL #2]\n"
+ "ld1w { z30.s }, p1/Z, [x24, x3, LSL #2]\n"
+ "fadd z18.s, z26.s, z17.s\n"
+ "fadd z20.s, z24.s, z17.s\n"
+ "ld1w { z29.s }, p1/Z, [x22, x3, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x21, x3, LSL #2]\n"
+ "fadd z17.s, z23.s, z16.s\n"
+ "fadd z16.s, z22.s, z16.s\n"
+ "ld1w { z27.s }, p1/Z, [x15, x3, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x12, x3, LSL #2]\n"
+ "ld1w { z25.s }, p1/Z, [x23, x3, LSL #2]\n"
+ "ld1w { z24.s }, p1/Z, [x20, x3, LSL #2]\n"
"incw x3\n"
- "fadd z18.s, z21.s, z18.s\n"
- "fadd z17.s, z17.s, z20.s\n"
- "fadd z16.s, z16.s, z20.s\n"
- "whilelt p0.s, x14, x2\n"
+ "fadd z19.s, z17.s, z19.s\n"
+ "fadd z18.s, z17.s, z18.s\n"
+ "fadd z17.s, z21.s, z16.s\n"
+ "fadd z16.s, z20.s, z16.s\n"
"whilelt p1.s, x3, x2\n"
"fmul z19.s, z19.s, z0.s[0]\n"
"fmul z18.s, z18.s, z0.s[1]\n"
- "st1w { z19.s }, p0, [x5, x14, LSL #2]\n"
"fmul z17.s, z17.s, z0.s[2]\n"
"fmul z16.s, z16.s, z0.s[3]\n"
- "st1w { z18.s }, p0, [x6, x14, LSL #2]\n"
- "st1w { z17.s }, p0, [x7, x14, LSL #2]\n"
- "st1w { z16.s }, p0, [x8, x14, LSL #2]\n"
- "incw x14\n"
+ "st1w { z19.s }, p0, [x7, x6, LSL #2]\n"
+ "st1w { z18.s }, p0, [x8, x6, LSL #2]\n"
+ "st1w { z17.s }, p0, [x17, x6, LSL #2]\n"
+ "st1w { z16.s }, p0, [x16, x6, LSL #2]\n"
+ "incw x6\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "fadd z17.s, z7.s, z6.s\n"
- "fadd z16.s, z5.s, z4.s\n"
- "whilelt p0.s, x14, x2\n"
- "fadd z20.s, z17.s, z16.s\n"
- "fadd z18.s, z3.s, z2.s\n"
- "fadd z17.s, z1.s, z31.s\n"
- "fadd z19.s, z30.s, z29.s\n"
- "fadd z16.s, z28.s, z27.s\n"
- "fadd z21.s, z18.s, z20.s\n"
- "fadd z20.s, z16.s, z20.s\n"
- "fadd z16.s, z26.s, z17.s\n"
- "fadd z18.s, z25.s, z19.s\n"
- "fadd z17.s, z24.s, z17.s\n"
- "fadd z19.s, z23.s, z19.s\n"
- "fadd z16.s, z21.s, z16.s\n"
- "fmul z16.s, z16.s, z0.s[0]\n"
- "st1w { z16.s }, p0, [x5, x14, LSL #2]\n"
- "fadd z18.s, z21.s, z18.s\n"
- "fadd z17.s, z17.s, z20.s\n"
+ "fadd z19.s, z8.s, z7.s\n"
+ "fadd z16.s, z6.s, z5.s\n"
+ "whilelt p0.s, x6, x2\n"
+ "fadd z23.s, z4.s, z3.s\n"
+ "fadd z18.s, z2.s, z1.s\n"
+ "fadd z17.s, z31.s, z30.s\n"
+ "fadd z22.s, z29.s, z28.s\n"
+ "fadd z16.s, z19.s, z16.s\n"
+ "fadd z19.s, z27.s, z18.s\n"
+ "fadd z21.s, z25.s, z18.s\n"
+ "fadd z18.s, z26.s, z17.s\n"
+ "fadd z20.s, z24.s, z17.s\n"
+ "fadd z17.s, z23.s, z16.s\n"
+ "fadd z16.s, z22.s, z16.s\n"
+ "fadd z19.s, z17.s, z19.s\n"
+ "fadd z18.s, z17.s, z18.s\n"
+ "fadd z17.s, z21.s, z16.s\n"
+ "fadd z16.s, z20.s, z16.s\n"
+ "fmul z19.s, z19.s, z0.s[0]\n"
"fmul z18.s, z18.s, z0.s[1]\n"
"fmul z17.s, z17.s, z0.s[2]\n"
- "fadd z16.s, z19.s, z20.s\n"
"fmul z16.s, z16.s, z0.s[3]\n"
- "st1w { z18.s }, p0, [x6, x14, LSL #2]\n"
- "st1w { z17.s }, p0, [x7, x14, LSL #2]\n"
- "st1w { z16.s }, p0, [x8, x14, LSL #2]\n"
+ "st1w { z19.s }, p0, [x7, x6, LSL #2]\n"
+ "st1w { z18.s }, p0, [x8, x6, LSL #2]\n"
+ "st1w { z17.s }, p0, [x17, x6, LSL #2]\n"
+ "st1w { z16.s }, p0, [x16, x6, LSL #2]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs)), [offsetof_rescale] "I" (offsetof(KernelArgs, rescale_vals))
- : "cc", "memory", "p0", "p1", "p2", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
index 7c94894892..4fd624ca9d 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,12 +46,12 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"cntw x28\n"
"cntw x27, ALL, MUL #2\n"
"cntw x26, ALL, MUL #3\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
"whilelt p3.s, x9, %x[n_channels]\n"
- "ld1rw { z7.s }, p0/Z, [%x[rescale_ptr]]\n"
"whilelt p2.s, x28, %x[n_channels]\n"
"whilelt p1.s, x27, %x[n_channels]\n"
"whilelt p0.s, x26, %x[n_channels]\n"
+ "ld1rw { z7.s }, p4/Z, [%x[rescale_ptr]]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
@@ -93,17 +93,17 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"add x24, x24, #0x20\n"
"fadd z21.s, z27.s, z21.s\n"
"fadd z17.s, z26.s, z17.s\n"
- "ld1w { z2.s }, p3/Z, [x23, x9, LSL #2]\n"
- "ld1w { z1.s }, p3/Z, [x22, x9, LSL #2]\n"
"fadd z20.s, z25.s, z20.s\n"
"fadd z16.s, z24.s, z16.s\n"
- "ld1w { z0.s }, p3/Z, [x21, x9, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z2.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x9, LSL #2]\n"
"fadd z19.s, z23.s, z19.s\n"
"fadd z18.s, z22.s, z18.s\n"
+ "ld1w { z0.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z31.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "fadd z17.s, z21.s, z17.s\n"
"ld1w { z30.s }, p2/Z, [x23, x28, LSL #2]\n"
"ld1w { z22.s }, p2/Z, [x22, x28, LSL #2]\n"
- "fadd z17.s, z21.s, z17.s\n"
"fadd z16.s, z20.s, z16.s\n"
"ld1w { z29.s }, p2/Z, [x21, x28, LSL #2]\n"
"ld1w { z28.s }, p2/Z, [x20, x28, LSL #2]\n"
@@ -142,30 +142,30 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p3/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
- "fadd z6.s, z6.s, z16.s\n"
- "ld1w { z17.s }, p2/Z, [x20, x28, LSL #2]\n"
- "ld1w { z16.s }, p1/Z, [x20, x27, LSL #2]\n"
- "fadd z5.s, z5.s, z17.s\n"
- "fadd z4.s, z4.s, z16.s\n"
+ "ld1w { z19.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z17.s }, p1/Z, [x20, x27, LSL #2]\n"
"ld1w { z16.s }, p0/Z, [x20, x26, LSL #2]\n"
+ "fadd z6.s, z6.s, z19.s\n"
+ "fadd z5.s, z5.s, z18.s\n"
+ "fadd z4.s, z4.s, z17.s\n"
"fadd z3.s, z3.s, z16.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
"fmul z6.s, z6.s, z7.s\n"
"fmul z5.s, z5.s, z7.s\n"
- "st1w { z6.s }, p3, [%x[outptr], x9, LSL #2]\n"
"fmul z4.s, z4.s, z7.s\n"
"fmul z3.s, z3.s, z7.s\n"
- "st1w { z5.s }, p2, [%x[outptr], x28, LSL #2]\n"
- "st1w { z4.s }, p1, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z6.s }, p3, [%x[outptr], x9, LSL #2]\n"
"incw x9, ALL, MUL #4\n"
+ "st1w { z5.s }, p2, [%x[outptr], x28, LSL #2]\n"
"incw x28, ALL, MUL #4\n"
+ "st1w { z4.s }, p1, [%x[outptr], x27, LSL #2]\n"
+ "incw x27, ALL, MUL #4\n"
"st1w { z3.s }, p0, [%x[outptr], x26, LSL #2]\n"
"incw x26, ALL, MUL #4\n"
"whilelt p0.s, x26, %x[n_channels]\n"
- "incw x27, ALL, MUL #4\n"
"b.any 1b\n"
"7:" // Single vector of channels
"whilelt p3.s, x9, %x[n_channels]\n"
@@ -189,14 +189,14 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"fadd z16.s, z0.s, z31.s\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fadd z16.s, z17.s, z16.s\n"
"subs x25, x25, #0x1\n"
- "fadd z6.s, z6.s, z16.s\n"
"add x24, x24, #0x20\n"
+ "fadd z16.s, z17.s, z16.s\n"
"ld1w { z2.s }, p3/Z, [x23, x9, LSL #2]\n"
"ld1w { z1.s }, p3/Z, [x22, x9, LSL #2]\n"
"ld1w { z0.s }, p3/Z, [x21, x9, LSL #2]\n"
"ld1w { z31.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "fadd z6.s, z6.s, z16.s\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
"fadd z17.s, z2.s, z1.s\n"
@@ -208,8 +208,8 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p3/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
+ "ld1w { z16.s }, p3/Z, [x20, x9, LSL #2]\n"
"fadd z6.s, z6.s, z16.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
@@ -221,7 +221,7 @@ void sve_fp32_nhwc_avg_generic_depthfirst_impl(
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr), [rescale_ptr] "r" (&rescale_value)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index d9cebd1363..dcd182fa97 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,22 +66,22 @@ void sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x15, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
"mov x14, #0x0\n"
- "whilelt p0.s, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
"ptrue p2.b\n"
- "mov x11, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
+ "whilelt p0.s, x14, x15\n"
"ldp x28, x27, [x20, #0x0]\n"
"ldp x26, x25, [x20, #0x10]\n"
"ldp x24, x23, [x20, #0x20]\n"
"ldp x22, x21, [x20, #0x30]\n"
"ldr x20, [x20, #0x40]\n"
"ld1w { z31.s }, p0/Z, [x27, x14, LSL #2]\n"
- "ld1w { z30.s }, p0/Z, [x24, x14, LSL #2]\n"
- "ld1w { z29.s }, p0/Z, [x21, x14, LSL #2]\n"
+ "ld1w { z30.s }, p0/Z, [x28, x14, LSL #2]\n"
+ "ld1w { z29.s }, p0/Z, [x24, x14, LSL #2]\n"
"ld1w { z28.s }, p0/Z, [x25, x14, LSL #2]\n"
- "ld1w { z27.s }, p0/Z, [x28, x14, LSL #2]\n"
+ "ld1w { z27.s }, p0/Z, [x21, x14, LSL #2]\n"
"ld1w { z26.s }, p0/Z, [x26, x14, LSL #2]\n"
"ld1w { z25.s }, p0/Z, [x23, x14, LSL #2]\n"
"ld1w { z24.s }, p0/Z, [x22, x14, LSL #2]\n"
@@ -90,50 +90,50 @@ void sve_fp32_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"whilelt p1.s, x14, x15\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z30.s\n"
- "movprfx z21, z30\n fmax z21.s, p2/M, z21.s, z29.s\n"
+ "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z29.s\n"
+ "movprfx z21, z29\n fmax z21.s, p2/M, z21.s, z27.s\n"
"ld1w { z31.s }, p1/Z, [x27, x14, LSL #2]\n"
- "ld1w { z30.s }, p1/Z, [x24, x14, LSL #2]\n"
- "movprfx z20, z28\n fmax z20.s, p2/M, z20.s, z27.s\n"
- "movprfx z19, z26\n fmax z19.s, p2/M, z19.s, z25.s\n"
- "ld1w { z29.s }, p1/Z, [x21, x14, LSL #2]\n"
- "ld1w { z27.s }, p1/Z, [x28, x14, LSL #2]\n"
- "movprfx z17, z28\n fmax z17.s, p2/M, z17.s, z24.s\n"
- "movprfx z18, z25\n fmax z18.s, p2/M, z18.s, z23.s\n"
+ "ld1w { z29.s }, p1/Z, [x24, x14, LSL #2]\n"
+ "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z30.s\n"
+ "movprfx z17, z26\n fmax z17.s, p2/M, z17.s, z25.s\n"
+ "ld1w { z27.s }, p1/Z, [x21, x14, LSL #2]\n"
+ "ld1w { z30.s }, p1/Z, [x28, x14, LSL #2]\n"
+ "movprfx z16, z28\n fmax z16.s, p2/M, z16.s, z24.s\n"
+ "movprfx z20, z25\n fmax z20.s, p2/M, z20.s, z23.s\n"
"ld1w { z28.s }, p1/Z, [x25, x14, LSL #2]\n"
"ld1w { z26.s }, p1/Z, [x26, x14, LSL #2]\n"
"ld1w { z25.s }, p1/Z, [x23, x14, LSL #2]\n"
"ld1w { z24.s }, p1/Z, [x22, x14, LSL #2]\n"
- "whilelt p0.s, x11, x15\n"
- "movprfx z16, z22\n fmax z16.s, p2/M, z16.s, z20.s\n"
+ "whilelt p0.s, x13, x15\n"
"ld1w { z23.s }, p1/Z, [x20, x14, LSL #2]\n"
"incw x14\n"
+ "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
+ "movprfx z18, z17\n fmax z18.s, p2/M, z18.s, z22.s\n"
+ "movprfx z17, z16\n fmax z17.s, p2/M, z17.s, z21.s\n"
+ "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
"whilelt p1.s, x14, x15\n"
- "st1w { z16.s }, p0, [x13, x11, LSL #2]\n"
- "movprfx z16, z19\n fmax z16.s, p2/M, z16.s, z22.s\n"
- "fmax z17.s, p2/M, z17.s, z21.s\n"
- "st1w { z16.s }, p0, [x12, x11, LSL #2]\n"
- "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z18.s\n"
- "st1w { z17.s }, p0, [x10, x11, LSL #2]\n"
- "st1w { z16.s }, p0, [x9, x11, LSL #2]\n"
- "incw x11\n"
+ "st1w { z19.s }, p0, [x12, x13, LSL #2]\n"
+ "st1w { z18.s }, p0, [x11, x13, LSL #2]\n"
+ "st1w { z17.s }, p0, [x10, x13, LSL #2]\n"
+ "st1w { z16.s }, p0, [x9, x13, LSL #2]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z30.s\n"
- "movprfx z21, z30\n fmax z21.s, p2/M, z21.s, z29.s\n"
- "movprfx z20, z28\n fmax z20.s, p2/M, z20.s, z27.s\n"
- "movprfx z19, z26\n fmax z19.s, p2/M, z19.s, z25.s\n"
- "movprfx z17, z28\n fmax z17.s, p2/M, z17.s, z24.s\n"
- "movprfx z18, z25\n fmax z18.s, p2/M, z18.s, z23.s\n"
- "whilelt p0.s, x11, x15\n"
- "movprfx z16, z22\n fmax z16.s, p2/M, z16.s, z20.s\n"
- "st1w { z16.s }, p0, [x13, x11, LSL #2]\n"
- "movprfx z16, z19\n fmax z16.s, p2/M, z16.s, z22.s\n"
- "fmax z17.s, p2/M, z17.s, z21.s\n"
- "st1w { z16.s }, p0, [x12, x11, LSL #2]\n"
- "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z18.s\n"
- "st1w { z17.s }, p0, [x10, x11, LSL #2]\n"
- "st1w { z16.s }, p0, [x9, x11, LSL #2]\n"
+ "movprfx z22, z31\n fmax z22.s, p2/M, z22.s, z29.s\n"
+ "movprfx z21, z29\n fmax z21.s, p2/M, z21.s, z27.s\n"
+ "movprfx z18, z28\n fmax z18.s, p2/M, z18.s, z30.s\n"
+ "movprfx z17, z26\n fmax z17.s, p2/M, z17.s, z25.s\n"
+ "movprfx z16, z28\n fmax z16.s, p2/M, z16.s, z24.s\n"
+ "movprfx z20, z25\n fmax z20.s, p2/M, z20.s, z23.s\n"
+ "whilelt p0.s, x13, x15\n"
+ "movprfx z19, z22\n fmax z19.s, p2/M, z19.s, z18.s\n"
+ "movprfx z18, z17\n fmax z18.s, p2/M, z18.s, z22.s\n"
+ "movprfx z17, z16\n fmax z17.s, p2/M, z17.s, z21.s\n"
+ "movprfx z16, z21\n fmax z16.s, p2/M, z16.s, z20.s\n"
+ "st1w { z19.s }, p0, [x12, x13, LSL #2]\n"
+ "st1w { z18.s }, p0, [x11, x13, LSL #2]\n"
+ "st1w { z17.s }, p0, [x10, x13, LSL #2]\n"
+ "st1w { z16.s }, p0, [x9, x13, LSL #2]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
index 87fc75adda..132c8bd8db 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_fp32_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,176 +44,176 @@ void sve_fp32_nhwc_max_generic_depthfirst_impl(
"cntw x28\n"
"cntw x27, ALL, MUL #2\n"
"cntw x26, ALL, MUL #3\n"
- "whilelt p4.s, x9, %x[n_channels]\n"
- "whilelt p3.s, x28, %x[n_channels]\n"
- "whilelt p2.s, x27, %x[n_channels]\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.s, x9, %x[n_channels]\n"
+ "whilelt p2.s, x28, %x[n_channels]\n"
+ "whilelt p1.s, x27, %x[n_channels]\n"
+ "whilelt p0.s, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.s, #0xff800000\n"
- "mov z7.s, #0xff800000\n"
- "mov x24, %x[inptrs]\n"
"mov z6.s, #0xff800000\n"
"mov z5.s, #0xff800000\n"
+ "mov x24, %x[inptrs]\n"
+ "mov z4.s, #0xff800000\n"
+ "mov z3.s, #0xff800000\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1w { z4.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z2.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x20, x9, LSL #2]\n"
- "ld1w { z0.s }, p3/Z, [x23, x28, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x22, x28, LSL #2]\n"
- "ld1w { z22.s }, p3/Z, [x21, x28, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
- "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x22, x27, LSL #2]\n"
- "ld1w { z21.s }, p2/Z, [x21, x27, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x20, x27, LSL #2]\n"
- "ld1w { z26.s }, p1/Z, [x23, x26, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x22, x26, LSL #2]\n"
- "ld1w { z20.s }, p1/Z, [x21, x26, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z2.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z23.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z31.s }, p2/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z30.s }, p2/Z, [x22, x28, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z28.s }, p1/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x22, x27, LSL #2]\n"
+ "ld1w { z21.s }, p1/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z16.s }, p0/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x20, x26, LSL #2]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
- "movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
+ "movprfx z19, z2\n fmax z19.s, p4/M, z19.s, z1.s\n"
+ "fmax z23.s, p4/M, z23.s, z0.s\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "movprfx z18, z0\n fmax z18.s, p0/M, z18.s, z31.s\n"
- "fmax z22.s, p0/M, z22.s, z30.s\n"
- "ld1w { z4.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x22, x9, LSL #2]\n"
- "movprfx z17, z29\n fmax z17.s, p0/M, z17.s, z28.s\n"
- "fmax z21.s, p0/M, z21.s, z27.s\n"
- "ld1w { z2.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x20, x9, LSL #2]\n"
- "movprfx z16, z26\n fmax z16.s, p0/M, z16.s, z25.s\n"
- "fmax z20.s, p0/M, z20.s, z24.s\n"
- "ld1w { z0.s }, p3/Z, [x23, x28, LSL #2]\n"
- "ld1w { z31.s }, p3/Z, [x22, x28, LSL #2]\n"
- "fmax z19.s, p0/M, z19.s, z23.s\n"
- "fmax z18.s, p0/M, z18.s, z22.s\n"
- "ld1w { z22.s }, p3/Z, [x21, x28, LSL #2]\n"
- "ld1w { z30.s }, p3/Z, [x20, x28, LSL #2]\n"
- "fmax z17.s, p0/M, z17.s, z21.s\n"
- "fmax z16.s, p0/M, z16.s, z20.s\n"
- "ld1w { z29.s }, p2/Z, [x23, x27, LSL #2]\n"
- "ld1w { z28.s }, p2/Z, [x22, x27, LSL #2]\n"
+ "movprfx z18, z31\n fmax z18.s, p4/M, z18.s, z30.s\n"
+ "fmax z22.s, p4/M, z22.s, z29.s\n"
+ "movprfx z17, z28\n fmax z17.s, p4/M, z17.s, z27.s\n"
+ "fmax z21.s, p4/M, z21.s, z26.s\n"
+ "fmax z16.s, p4/M, z16.s, z25.s\n"
+ "fmax z20.s, p4/M, z20.s, z24.s\n"
+ "ld1w { z2.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "fmax z19.s, p4/M, z19.s, z23.s\n"
+ "fmax z18.s, p4/M, z18.s, z22.s\n"
+ "ld1w { z23.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "fmax z17.s, p4/M, z17.s, z21.s\n"
"subs x25, x25, #0x1\n"
- "fmax z8.s, p0/M, z8.s, z19.s\n"
- "ld1w { z21.s }, p2/Z, [x21, x27, LSL #2]\n"
- "ld1w { z27.s }, p2/Z, [x20, x27, LSL #2]\n"
- "fmax z7.s, p0/M, z7.s, z18.s\n"
- "fmax z6.s, p0/M, z6.s, z17.s\n"
- "ld1w { z26.s }, p1/Z, [x23, x26, LSL #2]\n"
- "ld1w { z25.s }, p1/Z, [x22, x26, LSL #2]\n"
- "fmax z5.s, p0/M, z5.s, z16.s\n"
+ "ld1w { z31.s }, p2/Z, [x23, x28, LSL #2]\n"
+ "ld1w { z30.s }, p2/Z, [x22, x28, LSL #2]\n"
+ "fmax z16.s, p4/M, z16.s, z20.s\n"
"add x24, x24, #0x20\n"
- "ld1w { z20.s }, p1/Z, [x21, x26, LSL #2]\n"
- "ld1w { z24.s }, p1/Z, [x20, x26, LSL #2]\n"
+ "ld1w { z22.s }, p2/Z, [x21, x28, LSL #2]\n"
+ "ld1w { z29.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "fmax z6.s, p4/M, z6.s, z19.s\n"
+ "fmax z5.s, p4/M, z5.s, z18.s\n"
+ "ld1w { z28.s }, p1/Z, [x23, x27, LSL #2]\n"
+ "ld1w { z27.s }, p1/Z, [x22, x27, LSL #2]\n"
+ "fmax z4.s, p4/M, z4.s, z17.s\n"
+ "ld1w { z21.s }, p1/Z, [x21, x27, LSL #2]\n"
+ "ld1w { z26.s }, p1/Z, [x20, x27, LSL #2]\n"
+ "fmax z3.s, p4/M, z3.s, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x23, x26, LSL #2]\n"
+ "ld1w { z25.s }, p0/Z, [x22, x26, LSL #2]\n"
+ "ld1w { z20.s }, p0/Z, [x21, x26, LSL #2]\n"
+ "ld1w { z24.s }, p0/Z, [x20, x26, LSL #2]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z4\n fmax z19.s, p0/M, z19.s, z3.s\n"
- "movprfx z23, z2\n fmax z23.s, p0/M, z23.s, z1.s\n"
- "movprfx z18, z0\n fmax z18.s, p0/M, z18.s, z31.s\n"
- "fmax z22.s, p0/M, z22.s, z30.s\n"
- "movprfx z17, z29\n fmax z17.s, p0/M, z17.s, z28.s\n"
- "fmax z21.s, p0/M, z21.s, z27.s\n"
- "movprfx z16, z26\n fmax z16.s, p0/M, z16.s, z25.s\n"
- "fmax z20.s, p0/M, z20.s, z24.s\n"
- "fmax z19.s, p0/M, z19.s, z23.s\n"
- "fmax z18.s, p0/M, z18.s, z22.s\n"
- "fmax z17.s, p0/M, z17.s, z21.s\n"
- "fmax z16.s, p0/M, z16.s, z20.s\n"
- "fmax z8.s, p0/M, z8.s, z19.s\n"
- "fmax z7.s, p0/M, z7.s, z18.s\n"
- "fmax z6.s, p0/M, z6.s, z17.s\n"
- "fmax z5.s, p0/M, z5.s, z16.s\n"
+ "movprfx z19, z2\n fmax z19.s, p4/M, z19.s, z1.s\n"
+ "fmax z23.s, p4/M, z23.s, z0.s\n"
+ "movprfx z18, z31\n fmax z18.s, p4/M, z18.s, z30.s\n"
+ "fmax z22.s, p4/M, z22.s, z29.s\n"
+ "movprfx z17, z28\n fmax z17.s, p4/M, z17.s, z27.s\n"
+ "fmax z21.s, p4/M, z21.s, z26.s\n"
+ "fmax z16.s, p4/M, z16.s, z25.s\n"
+ "fmax z20.s, p4/M, z20.s, z24.s\n"
+ "fmax z19.s, p4/M, z19.s, z23.s\n"
+ "fmax z18.s, p4/M, z18.s, z22.s\n"
+ "fmax z17.s, p4/M, z17.s, z21.s\n"
+ "fmax z16.s, p4/M, z16.s, z20.s\n"
+ "fmax z6.s, p4/M, z6.s, z19.s\n"
+ "fmax z5.s, p4/M, z5.s, z18.s\n"
+ "fmax z4.s, p4/M, z4.s, z17.s\n"
+ "fmax z3.s, p4/M, z3.s, z16.s\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p4/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
- "fmax z8.s, p0/M, z8.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x20, x28, LSL #2]\n"
- "ld1w { z16.s }, p2/Z, [x20, x27, LSL #2]\n"
- "fmax z7.s, p0/M, z7.s, z17.s\n"
- "fmax z6.s, p0/M, z6.s, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20, x26, LSL #2]\n"
- "fmax z5.s, p0/M, z5.s, z16.s\n"
+ "ld1w { z19.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z18.s }, p2/Z, [x20, x28, LSL #2]\n"
+ "ld1w { z17.s }, p1/Z, [x20, x27, LSL #2]\n"
+ "ld1w { z16.s }, p0/Z, [x20, x26, LSL #2]\n"
+ "fmax z6.s, p4/M, z6.s, z19.s\n"
+ "fmax z5.s, p4/M, z5.s, z18.s\n"
+ "fmax z4.s, p4/M, z4.s, z17.s\n"
+ "fmax z3.s, p4/M, z3.s, z16.s\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1w { z8.s }, p4, [%x[outptr], x9, LSL #2]\n"
+ "st1w { z6.s }, p3, [%x[outptr], x9, LSL #2]\n"
"incw x9, ALL, MUL #4\n"
- "st1w { z7.s }, p3, [%x[outptr], x28, LSL #2]\n"
+ "st1w { z5.s }, p2, [%x[outptr], x28, LSL #2]\n"
"incw x28, ALL, MUL #4\n"
- "st1w { z6.s }, p2, [%x[outptr], x27, LSL #2]\n"
+ "st1w { z4.s }, p1, [%x[outptr], x27, LSL #2]\n"
"incw x27, ALL, MUL #4\n"
- "st1w { z5.s }, p1, [%x[outptr], x26, LSL #2]\n"
+ "st1w { z3.s }, p0, [%x[outptr], x26, LSL #2]\n"
"incw x26, ALL, MUL #4\n"
- "whilelt p1.s, x26, %x[n_channels]\n"
+ "whilelt p0.s, x26, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.s, x9, %x[n_channels]\n"
+ "whilelt p3.s, x9, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.s, #0xff800000\n"
+ "mov z6.s, #0xff800000\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1w { z4.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x22, x9, LSL #2]\n"
- "ld1w { z2.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "ld1w { z2.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z23.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x20, x9, LSL #2]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z4\n fmax z16.s, p0/M, z16.s, z3.s\n"
- "movprfx z17, z2\n fmax z17.s, p0/M, z17.s, z1.s\n"
+ "movprfx z16, z2\n fmax z16.s, p4/M, z16.s, z1.s\n"
+ "movprfx z17, z23\n fmax z17.s, p4/M, z17.s, z0.s\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "fmax z16.s, p0/M, z16.s, z17.s\n"
"subs x25, x25, #0x1\n"
- "ld1w { z4.s }, p4/Z, [x23, x9, LSL #2]\n"
- "ld1w { z3.s }, p4/Z, [x22, x9, LSL #2]\n"
- "fmax z8.s, p0/M, z8.s, z16.s\n"
"add x24, x24, #0x20\n"
- "ld1w { z2.s }, p4/Z, [x21, x9, LSL #2]\n"
- "ld1w { z1.s }, p4/Z, [x20, x9, LSL #2]\n"
+ "fmax z16.s, p4/M, z16.s, z17.s\n"
+ "ld1w { z2.s }, p3/Z, [x23, x9, LSL #2]\n"
+ "ld1w { z1.s }, p3/Z, [x22, x9, LSL #2]\n"
+ "ld1w { z23.s }, p3/Z, [x21, x9, LSL #2]\n"
+ "ld1w { z0.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "fmax z6.s, p4/M, z6.s, z16.s\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z4\n fmax z16.s, p0/M, z16.s, z3.s\n"
- "movprfx z17, z2\n fmax z17.s, p0/M, z17.s, z1.s\n"
- "fmax z16.s, p0/M, z16.s, z17.s\n"
- "fmax z8.s, p0/M, z8.s, z16.s\n"
+ "movprfx z16, z2\n fmax z16.s, p4/M, z16.s, z1.s\n"
+ "movprfx z17, z23\n fmax z17.s, p4/M, z17.s, z0.s\n"
+ "fmax z16.s, p4/M, z16.s, z17.s\n"
+ "fmax z6.s, p4/M, z6.s, z16.s\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1w { z16.s }, p4/Z, [x20, x9, LSL #2]\n"
"subs x21, x21, #0x1\n"
- "fmax z8.s, p0/M, z8.s, z16.s\n"
+ "ld1w { z16.s }, p3/Z, [x20, x9, LSL #2]\n"
+ "fmax z6.s, p4/M, z6.s, z16.s\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1w { z8.s }, p4, [%x[outptr], x9, LSL #2]\n"
+ "st1w { z6.s }, p3, [%x[outptr], x9, LSL #2]\n"
"incw x9\n"
- "whilelt p4.s, x9, %x[n_channels]\n"
+ "whilelt p3.s, x9, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
index 7925905e64..d59765af0a 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -99,11 +99,11 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"cntb x26\n"
"cntb x25, ALL, MUL #2\n"
"cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x23, %x[n_valid_cells], #0x1\n"
@@ -128,14 +128,14 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
@@ -145,24 +145,24 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
".inst 0x45944508 // saddwt z8.s, z8.s, z20.h\n"
".inst 0x459340e7 // saddwb z7.s, z7.s, z19.h\n"
@@ -204,17 +204,17 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508a217 // sshllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508a616 // sshllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- "ld1b { z17.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508a215 // sshllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508a614 // sshllt z20.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z19.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x24]\n"
+ ".inst 0x4508a277 // sshllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508a676 // sshllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508a255 // sshllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508a654 // sshllt z20.h, z18.b, #0x0\n"
".inst 0x4508a233 // sshllb z19.h, z17.b, #0x0\n"
".inst 0x4508a632 // sshllt z18.h, z17.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -235,98 +235,98 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
- "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
- ".inst 0x04b175ef // sqdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x04b1758c // sqdmulh z12.s, z12.s, z17.s\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x04b1756b // sqdmulh z11.s, z11.s, z17.s\n"
- ".inst 0x04b1754a // sqdmulh z10.s, z10.s, z17.s\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- ".inst 0x04b17529 // sqdmulh z9.s, z9.s, z17.s\n"
- ".inst 0x04b17508 // sqdmulh z8.s, z8.s, z17.s\n"
- ".inst 0x4482820b // srshl z11.s, p0/M, z11.s, z16.s\n"
- ".inst 0x4482820a // srshl z10.s, p0/M, z10.s, z16.s\n"
- ".inst 0x04b174e7 // sqdmulh z7.s, z7.s, z17.s\n"
- ".inst 0x04b174c6 // sqdmulh z6.s, z6.s, z17.s\n"
- ".inst 0x44828209 // srshl z9.s, p0/M, z9.s, z16.s\n"
- ".inst 0x44828208 // srshl z8.s, p0/M, z8.s, z16.s\n"
- ".inst 0x04b174a5 // sqdmulh z5.s, z5.s, z17.s\n"
- ".inst 0x04b17484 // sqdmulh z4.s, z4.s, z17.s\n"
- ".inst 0x44828207 // srshl z7.s, p0/M, z7.s, z16.s\n"
- ".inst 0x44828206 // srshl z6.s, p0/M, z6.s, z16.s\n"
- ".inst 0x04b17463 // sqdmulh z3.s, z3.s, z17.s\n"
- ".inst 0x04b17442 // sqdmulh z2.s, z2.s, z17.s\n"
- ".inst 0x44828205 // srshl z5.s, p0/M, z5.s, z16.s\n"
- ".inst 0x44828204 // srshl z4.s, p0/M, z4.s, z16.s\n"
- ".inst 0x04b17421 // sqdmulh z1.s, z1.s, z17.s\n"
- ".inst 0x04b17400 // sqdmulh z0.s, z0.s, z17.s\n"
- ".inst 0x44828203 // srshl z3.s, p0/M, z3.s, z16.s\n"
- ".inst 0x44828202 // srshl z2.s, p0/M, z2.s, z16.s\n"
- "mov z18.s, #0x7f\n"
- ".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
- ".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
- "not z16.s, p0/M, z18.s\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smax z11.s, p0/M, z11.s, z16.s\n"
- "smax z10.s, p0/M, z10.s, z16.s\n"
- "smax z9.s, p0/M, z9.s, z16.s\n"
- "smax z8.s, p0/M, z8.s, z16.s\n"
- "smax z7.s, p0/M, z7.s, z16.s\n"
- "smax z6.s, p0/M, z6.s, z16.s\n"
- "smax z5.s, p0/M, z5.s, z16.s\n"
- "smax z4.s, p0/M, z4.s, z16.s\n"
- "smax z3.s, p0/M, z3.s, z16.s\n"
- "smax z2.s, p0/M, z2.s, z16.s\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z18.s\n"
- "smin z14.s, p0/M, z14.s, z18.s\n"
- "smin z13.s, p0/M, z13.s, z18.s\n"
- "trn1 z17.h, z15.h, z14.h\n"
- "smin z12.s, p0/M, z12.s, z18.s\n"
- "smin z11.s, p0/M, z11.s, z18.s\n"
- "trn1 z16.h, z13.h, z12.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z10.s, p0/M, z10.s, z18.s\n"
- "smin z9.s, p0/M, z9.s, z18.s\n"
- "trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "smin z8.s, p0/M, z8.s, z18.s\n"
- "smin z7.s, p0/M, z7.s, z18.s\n"
+ "ld1rw { z18.s }, p4/Z, [%x[rescale_ptr]]\n"
+ "ld1rw { z17.s }, p4/Z, [%x[shift_ptr]]\n"
+ "mov z20.s, #0x7f\n"
+ "not z16.s, p4/M, z20.s\n"
+ ".inst 0x04b275ef // sqdmulh z15.s, z15.s, z18.s\n"
+ ".inst 0x04b275ce // sqdmulh z14.s, z14.s, z18.s\n"
+ ".inst 0x04b275ad // sqdmulh z13.s, z13.s, z18.s\n"
+ ".inst 0x04b2758c // sqdmulh z12.s, z12.s, z18.s\n"
+ ".inst 0x04b2756b // sqdmulh z11.s, z11.s, z18.s\n"
+ ".inst 0x04b2754a // sqdmulh z10.s, z10.s, z18.s\n"
+ ".inst 0x04b27529 // sqdmulh z9.s, z9.s, z18.s\n"
+ ".inst 0x04b27508 // sqdmulh z8.s, z8.s, z18.s\n"
+ ".inst 0x4482922f // srshl z15.s, p4/M, z15.s, z17.s\n"
+ ".inst 0x4482922e // srshl z14.s, p4/M, z14.s, z17.s\n"
+ ".inst 0x04b274e7 // sqdmulh z7.s, z7.s, z18.s\n"
+ ".inst 0x04b274c6 // sqdmulh z6.s, z6.s, z18.s\n"
+ ".inst 0x4482922d // srshl z13.s, p4/M, z13.s, z17.s\n"
+ ".inst 0x4482922c // srshl z12.s, p4/M, z12.s, z17.s\n"
+ ".inst 0x04b274a5 // sqdmulh z5.s, z5.s, z18.s\n"
+ ".inst 0x04b27484 // sqdmulh z4.s, z4.s, z18.s\n"
+ ".inst 0x4482922b // srshl z11.s, p4/M, z11.s, z17.s\n"
+ ".inst 0x4482922a // srshl z10.s, p4/M, z10.s, z17.s\n"
+ ".inst 0x04b27463 // sqdmulh z3.s, z3.s, z18.s\n"
+ ".inst 0x04b27442 // sqdmulh z2.s, z2.s, z18.s\n"
+ ".inst 0x44829229 // srshl z9.s, p4/M, z9.s, z17.s\n"
+ ".inst 0x44829228 // srshl z8.s, p4/M, z8.s, z17.s\n"
+ ".inst 0x04b27421 // sqdmulh z1.s, z1.s, z18.s\n"
+ ".inst 0x04b27400 // sqdmulh z0.s, z0.s, z18.s\n"
+ ".inst 0x44829227 // srshl z7.s, p4/M, z7.s, z17.s\n"
+ ".inst 0x44829226 // srshl z6.s, p4/M, z6.s, z17.s\n"
+ ".inst 0x44829225 // srshl z5.s, p4/M, z5.s, z17.s\n"
+ ".inst 0x44829224 // srshl z4.s, p4/M, z4.s, z17.s\n"
+ ".inst 0x44829223 // srshl z3.s, p4/M, z3.s, z17.s\n"
+ ".inst 0x44829222 // srshl z2.s, p4/M, z2.s, z17.s\n"
+ ".inst 0x44829221 // srshl z1.s, p4/M, z1.s, z17.s\n"
+ ".inst 0x44829220 // srshl z0.s, p4/M, z0.s, z17.s\n"
+ "smax z15.s, p4/M, z15.s, z16.s\n"
+ "smax z14.s, p4/M, z14.s, z16.s\n"
+ "smax z13.s, p4/M, z13.s, z16.s\n"
+ "smax z12.s, p4/M, z12.s, z16.s\n"
+ "smax z11.s, p4/M, z11.s, z16.s\n"
+ "smax z10.s, p4/M, z10.s, z16.s\n"
+ "smax z9.s, p4/M, z9.s, z16.s\n"
+ "smax z8.s, p4/M, z8.s, z16.s\n"
+ "smax z7.s, p4/M, z7.s, z16.s\n"
+ "smax z6.s, p4/M, z6.s, z16.s\n"
+ "smax z5.s, p4/M, z5.s, z16.s\n"
+ "smax z4.s, p4/M, z4.s, z16.s\n"
+ "smax z3.s, p4/M, z3.s, z16.s\n"
+ "smax z2.s, p4/M, z2.s, z16.s\n"
+ "smax z1.s, p4/M, z1.s, z16.s\n"
+ "smax z0.s, p4/M, z0.s, z16.s\n"
+ "smin z15.s, p4/M, z15.s, z20.s\n"
+ "smin z14.s, p4/M, z14.s, z20.s\n"
+ "smin z13.s, p4/M, z13.s, z20.s\n"
+ "smin z12.s, p4/M, z12.s, z20.s\n"
+ "smin z11.s, p4/M, z11.s, z20.s\n"
+ "smin z10.s, p4/M, z10.s, z20.s\n"
+ "smin z9.s, p4/M, z9.s, z20.s\n"
+ "smin z8.s, p4/M, z8.s, z20.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z7.s, p4/M, z7.s, z20.s\n"
+ "smin z6.s, p4/M, z6.s, z20.s\n"
+ "trn1 z17.h, z13.h, z12.h\n"
+ "smin z5.s, p4/M, z5.s, z20.s\n"
+ "smin z4.s, p4/M, z4.s, z20.s\n"
+ "trn1 z18.h, z11.h, z10.h\n"
+ "smin z3.s, p4/M, z3.s, z20.s\n"
+ "smin z2.s, p4/M, z2.s, z20.s\n"
"trn1 z16.h, z9.h, z8.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z6.s, p0/M, z6.s, z18.s\n"
- "smin z5.s, p0/M, z5.s, z18.s\n"
- "trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
- "smin z4.s, p0/M, z4.s, z18.s\n"
- "smin z3.s, p0/M, z3.s, z18.s\n"
- "trn1 z16.h, z5.h, z4.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z2.s, p0/M, z2.s, z18.s\n"
- "smin z1.s, p0/M, z1.s, z18.s\n"
- "trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
- "smin z0.s, p0/M, z0.s, z18.s\n"
+ "smin z1.s, p4/M, z1.s, z20.s\n"
+ "smin z0.s, p4/M, z0.s, z20.s\n"
+ "trn1 z21.h, z7.h, z6.h\n"
+ "trn1 z20.b, z19.b, z17.b\n"
+ "trn1 z17.h, z5.h, z4.h\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z18.b, z18.b, z16.b\n"
"trn1 z16.h, z1.h, z0.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z20.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
+ "trn1 z17.b, z21.b, z17.b\n"
+ "trn1 z16.b, z19.b, z16.b\n"
+ "st1b { z18.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
+ "st1b { z17.b }, p1, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
+ "st1b { z16.b }, p0, [%x[outptr], x24]\n"
+ "incb x24, ALL, MUL #4\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x23, %x[n_valid_cells], #0x1\n"
@@ -339,21 +339,21 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e03f1 // saddlb z17.h, z31.b, z30.b\n"
".inst 0x455e07f0 // saddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
- "add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
".inst 0x4590458c // saddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f1 // saddlb z17.h, z31.b, z30.b\n"
@@ -367,42 +367,42 @@ void sve_s8_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z16.b }, p3/Z, [x20, x27]\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
".inst 0x4590458c // saddwt z12.s, z12.s, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
- "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
- ".inst 0x04b175ef // sqdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x04b1758c // sqdmulh z12.s, z12.s, z17.s\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- "mov z18.s, #0x7f\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- "not z16.s, p0/M, z18.s\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z18.s\n"
- "smin z14.s, p0/M, z14.s, z18.s\n"
- "smin z13.s, p0/M, z13.s, z18.s\n"
+ "ld1rw { z19.s }, p4/Z, [%x[rescale_ptr]]\n"
+ "ld1rw { z18.s }, p4/Z, [%x[shift_ptr]]\n"
+ "mov z17.s, #0x7f\n"
+ "not z16.s, p4/M, z17.s\n"
+ ".inst 0x04b375ef // sqdmulh z15.s, z15.s, z19.s\n"
+ ".inst 0x04b375ce // sqdmulh z14.s, z14.s, z19.s\n"
+ ".inst 0x04b375ad // sqdmulh z13.s, z13.s, z19.s\n"
+ ".inst 0x04b3758c // sqdmulh z12.s, z12.s, z19.s\n"
+ ".inst 0x4482924f // srshl z15.s, p4/M, z15.s, z18.s\n"
+ ".inst 0x4482924e // srshl z14.s, p4/M, z14.s, z18.s\n"
+ ".inst 0x4482924d // srshl z13.s, p4/M, z13.s, z18.s\n"
+ ".inst 0x4482924c // srshl z12.s, p4/M, z12.s, z18.s\n"
+ "smax z15.s, p4/M, z15.s, z16.s\n"
+ "smax z14.s, p4/M, z14.s, z16.s\n"
+ "smax z13.s, p4/M, z13.s, z16.s\n"
+ "smax z12.s, p4/M, z12.s, z16.s\n"
+ "smin z15.s, p4/M, z15.s, z17.s\n"
+ "smin z14.s, p4/M, z14.s, z17.s\n"
+ "smin z13.s, p4/M, z13.s, z17.s\n"
+ "smin z12.s, p4/M, z12.s, z17.s\n"
"trn1 z17.h, z15.h, z14.h\n"
- "smin z12.s, p0/M, z12.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x27]\n"
"incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 5681cc1f3d..6e9422025c 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,22 +66,22 @@ void sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x15, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
"mov x14, #0x0\n"
- "whilelt p0.b, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
"ptrue p2.b\n"
- "mov x11, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
+ "whilelt p0.b, x14, x15\n"
"ldp x28, x27, [x20, #0x0]\n"
"ldp x26, x25, [x20, #0x10]\n"
"ldp x24, x23, [x20, #0x20]\n"
"ldp x22, x21, [x20, #0x30]\n"
"ldr x20, [x20, #0x40]\n"
"ld1b { z31.b }, p0/Z, [x27, x14]\n"
- "ld1b { z30.b }, p0/Z, [x24, x14]\n"
- "ld1b { z29.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z30.b }, p0/Z, [x28, x14]\n"
+ "ld1b { z29.b }, p0/Z, [x24, x14]\n"
"ld1b { z28.b }, p0/Z, [x25, x14]\n"
- "ld1b { z27.b }, p0/Z, [x28, x14]\n"
+ "ld1b { z27.b }, p0/Z, [x21, x14]\n"
"ld1b { z26.b }, p0/Z, [x26, x14]\n"
"ld1b { z25.b }, p0/Z, [x23, x14]\n"
"ld1b { z24.b }, p0/Z, [x22, x14]\n"
@@ -90,50 +90,50 @@ void sve_s8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"whilelt p1.b, x14, x15\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z30.b\n"
- "movprfx z21, z30\n smax z21.b, p2/M, z21.b, z29.b\n"
+ "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z29.b\n"
+ "movprfx z21, z29\n smax z21.b, p2/M, z21.b, z27.b\n"
"ld1b { z31.b }, p1/Z, [x27, x14]\n"
- "ld1b { z30.b }, p1/Z, [x24, x14]\n"
- "movprfx z20, z28\n smax z20.b, p2/M, z20.b, z27.b\n"
- "movprfx z19, z26\n smax z19.b, p2/M, z19.b, z25.b\n"
- "ld1b { z29.b }, p1/Z, [x21, x14]\n"
- "ld1b { z27.b }, p1/Z, [x28, x14]\n"
- "movprfx z17, z28\n smax z17.b, p2/M, z17.b, z24.b\n"
- "movprfx z18, z25\n smax z18.b, p2/M, z18.b, z23.b\n"
+ "ld1b { z29.b }, p1/Z, [x24, x14]\n"
+ "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z30.b\n"
+ "movprfx z17, z26\n smax z17.b, p2/M, z17.b, z25.b\n"
+ "ld1b { z27.b }, p1/Z, [x21, x14]\n"
+ "ld1b { z30.b }, p1/Z, [x28, x14]\n"
+ "movprfx z16, z28\n smax z16.b, p2/M, z16.b, z24.b\n"
+ "movprfx z20, z25\n smax z20.b, p2/M, z20.b, z23.b\n"
"ld1b { z28.b }, p1/Z, [x25, x14]\n"
"ld1b { z26.b }, p1/Z, [x26, x14]\n"
"ld1b { z25.b }, p1/Z, [x23, x14]\n"
"ld1b { z24.b }, p1/Z, [x22, x14]\n"
- "whilelt p0.b, x11, x15\n"
- "movprfx z16, z22\n smax z16.b, p2/M, z16.b, z20.b\n"
+ "whilelt p0.b, x13, x15\n"
"ld1b { z23.b }, p1/Z, [x20, x14]\n"
"incw x14\n"
+ "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
+ "movprfx z18, z17\n smax z18.b, p2/M, z18.b, z22.b\n"
+ "movprfx z17, z16\n smax z17.b, p2/M, z17.b, z21.b\n"
+ "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z20.b\n"
"whilelt p1.b, x14, x15\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n smax z16.b, p2/M, z16.b, z22.b\n"
- "smax z17.b, p2/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z18.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
- "incw x11\n"
+ "st1b { z19.b }, p0, [x12, x13]\n"
+ "st1b { z18.b }, p0, [x11, x13]\n"
+ "st1b { z17.b }, p0, [x10, x13]\n"
+ "st1b { z16.b }, p0, [x9, x13]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z30.b\n"
- "movprfx z21, z30\n smax z21.b, p2/M, z21.b, z29.b\n"
- "movprfx z20, z28\n smax z20.b, p2/M, z20.b, z27.b\n"
- "movprfx z19, z26\n smax z19.b, p2/M, z19.b, z25.b\n"
- "movprfx z17, z28\n smax z17.b, p2/M, z17.b, z24.b\n"
- "movprfx z18, z25\n smax z18.b, p2/M, z18.b, z23.b\n"
- "whilelt p0.b, x11, x15\n"
- "movprfx z16, z22\n smax z16.b, p2/M, z16.b, z20.b\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n smax z16.b, p2/M, z16.b, z22.b\n"
- "smax z17.b, p2/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z18.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
+ "movprfx z22, z31\n smax z22.b, p2/M, z22.b, z29.b\n"
+ "movprfx z21, z29\n smax z21.b, p2/M, z21.b, z27.b\n"
+ "movprfx z18, z28\n smax z18.b, p2/M, z18.b, z30.b\n"
+ "movprfx z17, z26\n smax z17.b, p2/M, z17.b, z25.b\n"
+ "movprfx z16, z28\n smax z16.b, p2/M, z16.b, z24.b\n"
+ "movprfx z20, z25\n smax z20.b, p2/M, z20.b, z23.b\n"
+ "whilelt p0.b, x13, x15\n"
+ "movprfx z19, z22\n smax z19.b, p2/M, z19.b, z18.b\n"
+ "movprfx z18, z17\n smax z18.b, p2/M, z18.b, z22.b\n"
+ "movprfx z17, z16\n smax z17.b, p2/M, z17.b, z21.b\n"
+ "movprfx z16, z21\n smax z16.b, p2/M, z16.b, z20.b\n"
+ "st1b { z19.b }, p0, [x12, x13]\n"
+ "st1b { z18.b }, p0, [x11, x13]\n"
+ "st1b { z17.b }, p0, [x10, x13]\n"
+ "st1b { z16.b }, p0, [x9, x13]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
index da9e1408f9..0d9f607066 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,176 +44,176 @@ void sve_s8_nhwc_max_generic_depthfirst_impl(
"cntb x28\n"
"cntb x27, ALL, MUL #2\n"
"cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
+ "whilelt p2.b, x28, %x[n_channels]\n"
+ "whilelt p1.b, x27, %x[n_channels]\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x80\n"
- "mov z7.b, #0x80\n"
- "mov x24, %x[inptrs]\n"
"mov z6.b, #0x80\n"
"mov z5.b, #0x80\n"
+ "mov x24, %x[inptrs]\n"
+ "mov z4.b, #0x80\n"
+ "mov z3.b, #0x80\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
+ "movprfx z19, z2\n smax z19.b, p4/M, z19.b, z1.b\n"
+ "smax z23.b, p4/M, z23.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
- "smax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
- "smax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
- "smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "smax z19.b, p0/M, z19.b, z23.b\n"
- "smax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "smax z17.b, p0/M, z17.b, z21.b\n"
- "smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
+ "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+ "smax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+ "smax z21.b, p4/M, z21.b, z26.b\n"
+ "smax z16.b, p4/M, z16.b, z25.b\n"
+ "smax z20.b, p4/M, z20.b, z24.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "smax z19.b, p4/M, z19.b, z23.b\n"
+ "smax z18.b, p4/M, z18.b, z22.b\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "smax z17.b, p4/M, z17.b, z21.b\n"
"subs x25, x25, #0x1\n"
- "smax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "smax z7.b, p0/M, z7.b, z18.b\n"
- "smax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "smax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "smax z16.b, p4/M, z16.b, z20.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "smax z6.b, p4/M, z6.b, z19.b\n"
+ "smax z5.b, p4/M, z5.b, z18.b\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "smax z4.b, p4/M, z4.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "smax z3.b, p4/M, z3.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
- "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
- "smax z22.b, p0/M, z22.b, z30.b\n"
- "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
- "smax z21.b, p0/M, z21.b, z27.b\n"
- "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
- "smax z20.b, p0/M, z20.b, z24.b\n"
- "smax z19.b, p0/M, z19.b, z23.b\n"
- "smax z18.b, p0/M, z18.b, z22.b\n"
- "smax z17.b, p0/M, z17.b, z21.b\n"
- "smax z16.b, p0/M, z16.b, z20.b\n"
- "smax z8.b, p0/M, z8.b, z19.b\n"
- "smax z7.b, p0/M, z7.b, z18.b\n"
- "smax z6.b, p0/M, z6.b, z17.b\n"
- "smax z5.b, p0/M, z5.b, z16.b\n"
+ "movprfx z19, z2\n smax z19.b, p4/M, z19.b, z1.b\n"
+ "smax z23.b, p4/M, z23.b, z0.b\n"
+ "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+ "smax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+ "smax z21.b, p4/M, z21.b, z26.b\n"
+ "smax z16.b, p4/M, z16.b, z25.b\n"
+ "smax z20.b, p4/M, z20.b, z24.b\n"
+ "smax z19.b, p4/M, z19.b, z23.b\n"
+ "smax z18.b, p4/M, z18.b, z22.b\n"
+ "smax z17.b, p4/M, z17.b, z21.b\n"
+ "smax z16.b, p4/M, z16.b, z20.b\n"
+ "smax z6.b, p4/M, z6.b, z19.b\n"
+ "smax z5.b, p4/M, z5.b, z18.b\n"
+ "smax z4.b, p4/M, z4.b, z17.b\n"
+ "smax z3.b, p4/M, z3.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
- "ld1b { z17.b }, p3/Z, [x20, x28]\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "smax z7.b, p0/M, z7.b, z17.b\n"
- "smax z6.b, p0/M, z6.b, z16.b\n"
- "ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "smax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z19.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x26]\n"
+ "smax z6.b, p4/M, z6.b, z19.b\n"
+ "smax z5.b, p4/M, z5.b, z18.b\n"
+ "smax z4.b, p4/M, z4.b, z17.b\n"
+ "smax z3.b, p4/M, z3.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z6.b }, p3, [%x[outptr], x9]\n"
"incb x9, ALL, MUL #4\n"
- "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z5.b }, p2, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z4.b }, p1, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z3.b }, p0, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x80\n"
+ "mov z6.b, #0x80\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z4\n smax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n smax z17.b, p0/M, z17.b, z1.b\n"
+ "movprfx z16, z2\n smax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n smax z17.b, p4/M, z17.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "smax z16.b, p0/M, z16.b, z17.b\n"
"subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "smax z16.b, p4/M, z16.b, z17.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "smax z6.b, p4/M, z6.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z4\n smax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n smax z17.b, p0/M, z17.b, z1.b\n"
- "smax z16.b, p0/M, z16.b, z17.b\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
+ "movprfx z16, z2\n smax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n smax z17.b, p4/M, z17.b, z0.b\n"
+ "smax z16.b, p4/M, z16.b, z17.b\n"
+ "smax z6.b, p4/M, z6.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
+ "ld1b { z16.b }, p3/Z, [x20, x9]\n"
+ "smax z6.b, p4/M, z6.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z6.b }, p3, [%x[outptr], x9]\n"
"incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
index 19a3b112ad..f09cbc9666 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -118,11 +118,11 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"cntb x26\n"
"cntb x25, ALL, MUL #2\n"
"cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x23, %x[n_valid_cells], #0x1\n"
@@ -147,14 +147,14 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e03f7 // saddlb z23.h, z31.b, z30.b\n"
@@ -164,24 +164,24 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c03b5 // saddlb z21.h, z29.b, z28.b\n"
".inst 0x455c07b4 // saddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0373 // saddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0772 // saddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x45580331 // saddlb z17.h, z25.b, z24.b\n"
".inst 0x45580730 // saddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
".inst 0x459745ce // saddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
".inst 0x459641ad // saddwb z13.s, z13.s, z22.h\n"
".inst 0x4596458c // saddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
".inst 0x4595416b // saddwb z11.s, z11.s, z21.h\n"
".inst 0x4595454a // saddwt z10.s, z10.s, z21.h\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
".inst 0x45944129 // saddwb z9.s, z9.s, z20.h\n"
".inst 0x45944508 // saddwt z8.s, z8.s, z20.h\n"
".inst 0x459340e7 // saddwb z7.s, z7.s, z19.h\n"
@@ -223,17 +223,17 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508a217 // sshllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508a616 // sshllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- "ld1b { z17.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508a215 // sshllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508a614 // sshllt z20.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z19.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x24]\n"
+ ".inst 0x4508a277 // sshllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508a676 // sshllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508a255 // sshllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508a654 // sshllt z20.h, z18.b, #0x0\n"
".inst 0x4508a233 // sshllb z19.h, z17.b, #0x0\n"
".inst 0x4508a632 // sshllt z18.h, z17.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
".inst 0x459741ef // saddwb z15.s, z15.s, z23.h\n"
@@ -254,115 +254,115 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904400 // saddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
- "ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x4482824f // srshl z15.s, p0/M, z15.s, z18.s\n"
- ".inst 0x4482824e // srshl z14.s, p0/M, z14.s, z18.s\n"
- ".inst 0x4482824d // srshl z13.s, p0/M, z13.s, z18.s\n"
- ".inst 0x4482824c // srshl z12.s, p0/M, z12.s, z18.s\n"
- "ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x04b175ef // sqrdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x4482824b // srshl z11.s, p0/M, z11.s, z18.s\n"
- ".inst 0x4482824a // srshl z10.s, p0/M, z10.s, z18.s\n"
- ".inst 0x04b175ce // sqrdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqrdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x44828249 // srshl z9.s, p0/M, z9.s, z18.s\n"
- ".inst 0x44828248 // srshl z8.s, p0/M, z8.s, z18.s\n"
- ".inst 0x04b1758c // sqrdmulh z12.s, z12.s, z17.s\n"
- ".inst 0x04b1756b // sqrdmulh z11.s, z11.s, z17.s\n"
- ".inst 0x44828247 // srshl z7.s, p0/M, z7.s, z18.s\n"
- ".inst 0x44828246 // srshl z6.s, p0/M, z6.s, z18.s\n"
- ".inst 0x04b1754a // sqrdmulh z10.s, z10.s, z17.s\n"
- ".inst 0x04b17529 // sqrdmulh z9.s, z9.s, z17.s\n"
- ".inst 0x44828245 // srshl z5.s, p0/M, z5.s, z18.s\n"
- ".inst 0x44828244 // srshl z4.s, p0/M, z4.s, z18.s\n"
- ".inst 0x04b17508 // sqrdmulh z8.s, z8.s, z17.s\n"
- ".inst 0x04b174e7 // sqrdmulh z7.s, z7.s, z17.s\n"
- ".inst 0x44828243 // srshl z3.s, p0/M, z3.s, z18.s\n"
- ".inst 0x44828242 // srshl z2.s, p0/M, z2.s, z18.s\n"
- ".inst 0x04b174c6 // sqrdmulh z6.s, z6.s, z17.s\n"
- ".inst 0x04b174a5 // sqrdmulh z5.s, z5.s, z17.s\n"
- ".inst 0x44828241 // srshl z1.s, p0/M, z1.s, z18.s\n"
- ".inst 0x44828240 // srshl z0.s, p0/M, z0.s, z18.s\n"
- ".inst 0x04b17484 // sqrdmulh z4.s, z4.s, z17.s\n"
- ".inst 0x04b17463 // sqrdmulh z3.s, z3.s, z17.s\n"
- ".inst 0x04b17442 // sqrdmulh z2.s, z2.s, z17.s\n"
- ".inst 0x04b17421 // sqrdmulh z1.s, z1.s, z17.s\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x04b17400 // sqrdmulh z0.s, z0.s, z17.s\n"
- "mov z18.s, #0x7f\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- ".inst 0x4482820b // srshl z11.s, p0/M, z11.s, z16.s\n"
- ".inst 0x4482820a // srshl z10.s, p0/M, z10.s, z16.s\n"
- ".inst 0x44828209 // srshl z9.s, p0/M, z9.s, z16.s\n"
- ".inst 0x44828208 // srshl z8.s, p0/M, z8.s, z16.s\n"
- ".inst 0x44828207 // srshl z7.s, p0/M, z7.s, z16.s\n"
- ".inst 0x44828206 // srshl z6.s, p0/M, z6.s, z16.s\n"
- ".inst 0x44828205 // srshl z5.s, p0/M, z5.s, z16.s\n"
- ".inst 0x44828204 // srshl z4.s, p0/M, z4.s, z16.s\n"
- ".inst 0x44828203 // srshl z3.s, p0/M, z3.s, z16.s\n"
- ".inst 0x44828202 // srshl z2.s, p0/M, z2.s, z16.s\n"
- ".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
- ".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
- "not z16.s, p0/M, z18.s\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smax z11.s, p0/M, z11.s, z16.s\n"
- "smax z10.s, p0/M, z10.s, z16.s\n"
- "smax z9.s, p0/M, z9.s, z16.s\n"
- "smax z8.s, p0/M, z8.s, z16.s\n"
- "smax z7.s, p0/M, z7.s, z16.s\n"
- "smax z6.s, p0/M, z6.s, z16.s\n"
- "smax z5.s, p0/M, z5.s, z16.s\n"
- "smax z4.s, p0/M, z4.s, z16.s\n"
- "smax z3.s, p0/M, z3.s, z16.s\n"
- "smax z2.s, p0/M, z2.s, z16.s\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z18.s\n"
- "smin z14.s, p0/M, z14.s, z18.s\n"
- "smin z13.s, p0/M, z13.s, z18.s\n"
- "trn1 z17.h, z15.h, z14.h\n"
- "smin z12.s, p0/M, z12.s, z18.s\n"
- "smin z11.s, p0/M, z11.s, z18.s\n"
- "trn1 z16.h, z13.h, z12.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z10.s, p0/M, z10.s, z18.s\n"
- "smin z9.s, p0/M, z9.s, z18.s\n"
- "trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "smin z8.s, p0/M, z8.s, z18.s\n"
- "smin z7.s, p0/M, z7.s, z18.s\n"
+ "ld1rw { z19.s }, p4/Z, [%x[left_shift]]\n"
+ "ld1rw { z18.s }, p4/Z, [%x[combined_rescale_value]]\n"
+ "mov z20.s, #0x7f\n"
+ "ld1rw { z17.s }, p4/Z, [%x[right_shift]]\n"
+ "not z16.s, p4/M, z20.s\n"
+ ".inst 0x4482926f // srshl z15.s, p4/M, z15.s, z19.s\n"
+ ".inst 0x4482926e // srshl z14.s, p4/M, z14.s, z19.s\n"
+ ".inst 0x4482926d // srshl z13.s, p4/M, z13.s, z19.s\n"
+ ".inst 0x4482926c // srshl z12.s, p4/M, z12.s, z19.s\n"
+ ".inst 0x4482926b // srshl z11.s, p4/M, z11.s, z19.s\n"
+ ".inst 0x4482926a // srshl z10.s, p4/M, z10.s, z19.s\n"
+ ".inst 0x44829269 // srshl z9.s, p4/M, z9.s, z19.s\n"
+ ".inst 0x04b275ef // sqrdmulh z15.s, z15.s, z18.s\n"
+ ".inst 0x44829268 // srshl z8.s, p4/M, z8.s, z19.s\n"
+ ".inst 0x44829267 // srshl z7.s, p4/M, z7.s, z19.s\n"
+ ".inst 0x04b275ce // sqrdmulh z14.s, z14.s, z18.s\n"
+ ".inst 0x04b275ad // sqrdmulh z13.s, z13.s, z18.s\n"
+ ".inst 0x44829266 // srshl z6.s, p4/M, z6.s, z19.s\n"
+ ".inst 0x44829265 // srshl z5.s, p4/M, z5.s, z19.s\n"
+ ".inst 0x04b2758c // sqrdmulh z12.s, z12.s, z18.s\n"
+ ".inst 0x04b2756b // sqrdmulh z11.s, z11.s, z18.s\n"
+ ".inst 0x44829264 // srshl z4.s, p4/M, z4.s, z19.s\n"
+ ".inst 0x44829263 // srshl z3.s, p4/M, z3.s, z19.s\n"
+ ".inst 0x04b2754a // sqrdmulh z10.s, z10.s, z18.s\n"
+ ".inst 0x04b27529 // sqrdmulh z9.s, z9.s, z18.s\n"
+ ".inst 0x44829262 // srshl z2.s, p4/M, z2.s, z19.s\n"
+ ".inst 0x44829261 // srshl z1.s, p4/M, z1.s, z19.s\n"
+ ".inst 0x04b27508 // sqrdmulh z8.s, z8.s, z18.s\n"
+ ".inst 0x04b274e7 // sqrdmulh z7.s, z7.s, z18.s\n"
+ ".inst 0x44829260 // srshl z0.s, p4/M, z0.s, z19.s\n"
+ ".inst 0x04b274c6 // sqrdmulh z6.s, z6.s, z18.s\n"
+ ".inst 0x04b274a5 // sqrdmulh z5.s, z5.s, z18.s\n"
+ ".inst 0x4482922f // srshl z15.s, p4/M, z15.s, z17.s\n"
+ ".inst 0x04b27484 // sqrdmulh z4.s, z4.s, z18.s\n"
+ ".inst 0x04b27463 // sqrdmulh z3.s, z3.s, z18.s\n"
+ ".inst 0x4482922e // srshl z14.s, p4/M, z14.s, z17.s\n"
+ ".inst 0x4482922d // srshl z13.s, p4/M, z13.s, z17.s\n"
+ ".inst 0x04b27442 // sqrdmulh z2.s, z2.s, z18.s\n"
+ ".inst 0x04b27421 // sqrdmulh z1.s, z1.s, z18.s\n"
+ ".inst 0x4482922c // srshl z12.s, p4/M, z12.s, z17.s\n"
+ ".inst 0x4482922b // srshl z11.s, p4/M, z11.s, z17.s\n"
+ ".inst 0x04b27400 // sqrdmulh z0.s, z0.s, z18.s\n"
+ ".inst 0x4482922a // srshl z10.s, p4/M, z10.s, z17.s\n"
+ ".inst 0x44829229 // srshl z9.s, p4/M, z9.s, z17.s\n"
+ ".inst 0x44829228 // srshl z8.s, p4/M, z8.s, z17.s\n"
+ ".inst 0x44829227 // srshl z7.s, p4/M, z7.s, z17.s\n"
+ ".inst 0x44829226 // srshl z6.s, p4/M, z6.s, z17.s\n"
+ ".inst 0x44829225 // srshl z5.s, p4/M, z5.s, z17.s\n"
+ ".inst 0x44829224 // srshl z4.s, p4/M, z4.s, z17.s\n"
+ ".inst 0x44829223 // srshl z3.s, p4/M, z3.s, z17.s\n"
+ ".inst 0x44829222 // srshl z2.s, p4/M, z2.s, z17.s\n"
+ ".inst 0x44829221 // srshl z1.s, p4/M, z1.s, z17.s\n"
+ ".inst 0x44829220 // srshl z0.s, p4/M, z0.s, z17.s\n"
+ "smax z15.s, p4/M, z15.s, z16.s\n"
+ "smax z14.s, p4/M, z14.s, z16.s\n"
+ "smax z13.s, p4/M, z13.s, z16.s\n"
+ "smax z12.s, p4/M, z12.s, z16.s\n"
+ "smax z11.s, p4/M, z11.s, z16.s\n"
+ "smax z10.s, p4/M, z10.s, z16.s\n"
+ "smax z9.s, p4/M, z9.s, z16.s\n"
+ "smax z8.s, p4/M, z8.s, z16.s\n"
+ "smax z7.s, p4/M, z7.s, z16.s\n"
+ "smax z6.s, p4/M, z6.s, z16.s\n"
+ "smax z5.s, p4/M, z5.s, z16.s\n"
+ "smax z4.s, p4/M, z4.s, z16.s\n"
+ "smax z3.s, p4/M, z3.s, z16.s\n"
+ "smax z2.s, p4/M, z2.s, z16.s\n"
+ "smax z1.s, p4/M, z1.s, z16.s\n"
+ "smax z0.s, p4/M, z0.s, z16.s\n"
+ "smin z15.s, p4/M, z15.s, z20.s\n"
+ "smin z14.s, p4/M, z14.s, z20.s\n"
+ "smin z13.s, p4/M, z13.s, z20.s\n"
+ "smin z12.s, p4/M, z12.s, z20.s\n"
+ "smin z11.s, p4/M, z11.s, z20.s\n"
+ "smin z10.s, p4/M, z10.s, z20.s\n"
+ "smin z9.s, p4/M, z9.s, z20.s\n"
+ "smin z8.s, p4/M, z8.s, z20.s\n"
+ "smin z7.s, p4/M, z7.s, z20.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z6.s, p4/M, z6.s, z20.s\n"
+ "smin z5.s, p4/M, z5.s, z20.s\n"
+ "trn1 z17.h, z13.h, z12.h\n"
+ "smin z4.s, p4/M, z4.s, z20.s\n"
+ "smin z3.s, p4/M, z3.s, z20.s\n"
+ "trn1 z18.h, z11.h, z10.h\n"
+ "smin z2.s, p4/M, z2.s, z20.s\n"
+ "smin z1.s, p4/M, z1.s, z20.s\n"
"trn1 z16.h, z9.h, z8.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z6.s, p0/M, z6.s, z18.s\n"
- "smin z5.s, p0/M, z5.s, z18.s\n"
- "trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
- "smin z4.s, p0/M, z4.s, z18.s\n"
- "smin z3.s, p0/M, z3.s, z18.s\n"
- "trn1 z16.h, z5.h, z4.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z2.s, p0/M, z2.s, z18.s\n"
- "smin z1.s, p0/M, z1.s, z18.s\n"
- "trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
- "smin z0.s, p0/M, z0.s, z18.s\n"
+ "smin z0.s, p4/M, z0.s, z20.s\n"
+ "trn1 z21.h, z7.h, z6.h\n"
+ "trn1 z20.b, z19.b, z17.b\n"
+ "trn1 z17.h, z5.h, z4.h\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z18.b, z18.b, z16.b\n"
"trn1 z16.h, z1.h, z0.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z20.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
+ "trn1 z17.b, z21.b, z17.b\n"
+ "trn1 z16.b, z19.b, z16.b\n"
+ "st1b { z18.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
+ "st1b { z17.b }, p1, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
+ "st1b { z16.b }, p0, [%x[outptr], x24]\n"
+ "incb x24, ALL, MUL #4\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x23, %x[n_valid_cells], #0x1\n"
@@ -375,21 +375,21 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e03f1 // saddlb z17.h, z31.b, z30.b\n"
".inst 0x455e07f0 // saddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
- "add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
".inst 0x4590458c // saddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e03f1 // saddlb z17.h, z31.b, z30.b\n"
@@ -403,47 +403,47 @@ void sve_s8q_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z16.b }, p3/Z, [x20, x27]\n"
".inst 0x4508a211 // sshllb z17.h, z16.b, #0x0\n"
".inst 0x4508a610 // sshllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459141ef // saddwb z15.s, z15.s, z17.h\n"
".inst 0x459145ce // saddwt z14.s, z14.s, z17.h\n"
".inst 0x459041ad // saddwb z13.s, z13.s, z16.h\n"
".inst 0x4590458c // saddwt z12.s, z12.s, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1rw { z16.s }, p0/Z, [%x[left_shift]]\n"
- "ld1rw { z17.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- "ld1rw { z16.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x04b175ef // sqrdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqrdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqrdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x04b1758c // sqrdmulh z12.s, z12.s, z17.s\n"
+ "ld1rw { z20.s }, p4/Z, [%x[left_shift]]\n"
+ "ld1rw { z19.s }, p4/Z, [%x[combined_rescale_value]]\n"
"mov z18.s, #0x7f\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- "not z16.s, p0/M, z18.s\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z18.s\n"
- "smin z14.s, p0/M, z14.s, z18.s\n"
- "smin z13.s, p0/M, z13.s, z18.s\n"
+ "ld1rw { z17.s }, p4/Z, [%x[right_shift]]\n"
+ "not z16.s, p4/M, z18.s\n"
+ ".inst 0x4482928f // srshl z15.s, p4/M, z15.s, z20.s\n"
+ ".inst 0x4482928e // srshl z14.s, p4/M, z14.s, z20.s\n"
+ ".inst 0x4482928d // srshl z13.s, p4/M, z13.s, z20.s\n"
+ ".inst 0x4482928c // srshl z12.s, p4/M, z12.s, z20.s\n"
+ ".inst 0x04b375ef // sqrdmulh z15.s, z15.s, z19.s\n"
+ ".inst 0x04b375ce // sqrdmulh z14.s, z14.s, z19.s\n"
+ ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
+ ".inst 0x04b3758c // sqrdmulh z12.s, z12.s, z19.s\n"
+ ".inst 0x4482922f // srshl z15.s, p4/M, z15.s, z17.s\n"
+ ".inst 0x4482922e // srshl z14.s, p4/M, z14.s, z17.s\n"
+ ".inst 0x4482922d // srshl z13.s, p4/M, z13.s, z17.s\n"
+ ".inst 0x4482922c // srshl z12.s, p4/M, z12.s, z17.s\n"
+ "smax z15.s, p4/M, z15.s, z16.s\n"
+ "smax z14.s, p4/M, z14.s, z16.s\n"
+ "smax z13.s, p4/M, z13.s, z16.s\n"
+ "smax z12.s, p4/M, z12.s, z16.s\n"
+ "smin z15.s, p4/M, z15.s, z18.s\n"
+ "smin z14.s, p4/M, z14.s, z18.s\n"
+ "smin z13.s, p4/M, z13.s, z18.s\n"
+ "smin z12.s, p4/M, z12.s, z18.s\n"
"trn1 z17.h, z15.h, z14.h\n"
- "smin z12.s, p0/M, z12.s, z18.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x27]\n"
"incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
index 4fc1532d5a..5033aa9d73 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_s8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,337 +46,337 @@ void sve_s8q_nhwc_max_generic_depthfirst_impl(
"cntb x28\n"
"cntb x27, ALL, MUL #2\n"
"cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
+ "whilelt p2.b, x28, %x[n_channels]\n"
+ "whilelt p1.b, x27, %x[n_channels]\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x80\n"
- "mov z7.b, #0x80\n"
- "mov x24, %x[inptrs]\n"
"mov z6.b, #0x80\n"
+ "mov z3.b, #0x80\n"
+ "mov x24, %x[inptrs]\n"
"mov z5.b, #0x80\n"
+ "mov z4.b, #0x80\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
+ "movprfx z19, z2\n smax z19.b, p4/M, z19.b, z1.b\n"
+ "smax z23.b, p4/M, z23.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
- "smax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
- "smax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
- "smax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "smax z19.b, p0/M, z19.b, z23.b\n"
- "smax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "smax z17.b, p0/M, z17.b, z21.b\n"
- "smax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
+ "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+ "smax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+ "smax z21.b, p4/M, z21.b, z26.b\n"
+ "smax z16.b, p4/M, z16.b, z25.b\n"
+ "smax z20.b, p4/M, z20.b, z24.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "smax z19.b, p4/M, z19.b, z23.b\n"
+ "smax z18.b, p4/M, z18.b, z22.b\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "smax z17.b, p4/M, z17.b, z21.b\n"
"subs x25, x25, #0x1\n"
- "smax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "smax z7.b, p0/M, z7.b, z18.b\n"
- "smax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "smax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "smax z16.b, p4/M, z16.b, z20.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "smax z6.b, p4/M, z6.b, z19.b\n"
+ "smax z3.b, p4/M, z3.b, z18.b\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "smax z5.b, p4/M, z5.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "smax z4.b, p4/M, z4.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z4\n smax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n smax z23.b, p0/M, z23.b, z1.b\n"
- "movprfx z18, z0\n smax z18.b, p0/M, z18.b, z31.b\n"
- "smax z22.b, p0/M, z22.b, z30.b\n"
- "movprfx z17, z29\n smax z17.b, p0/M, z17.b, z28.b\n"
- "smax z21.b, p0/M, z21.b, z27.b\n"
- "movprfx z16, z26\n smax z16.b, p0/M, z16.b, z25.b\n"
- "smax z20.b, p0/M, z20.b, z24.b\n"
- "smax z19.b, p0/M, z19.b, z23.b\n"
- "smax z18.b, p0/M, z18.b, z22.b\n"
- "smax z17.b, p0/M, z17.b, z21.b\n"
- "smax z16.b, p0/M, z16.b, z20.b\n"
- "smax z8.b, p0/M, z8.b, z19.b\n"
- "smax z7.b, p0/M, z7.b, z18.b\n"
- "smax z6.b, p0/M, z6.b, z17.b\n"
- "smax z5.b, p0/M, z5.b, z16.b\n"
+ "movprfx z19, z2\n smax z19.b, p4/M, z19.b, z1.b\n"
+ "smax z23.b, p4/M, z23.b, z0.b\n"
+ "movprfx z18, z31\n smax z18.b, p4/M, z18.b, z30.b\n"
+ "smax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n smax z17.b, p4/M, z17.b, z27.b\n"
+ "smax z21.b, p4/M, z21.b, z26.b\n"
+ "smax z16.b, p4/M, z16.b, z25.b\n"
+ "smax z20.b, p4/M, z20.b, z24.b\n"
+ "smax z19.b, p4/M, z19.b, z23.b\n"
+ "smax z18.b, p4/M, z18.b, z22.b\n"
+ "smax z17.b, p4/M, z17.b, z21.b\n"
+ "smax z16.b, p4/M, z16.b, z20.b\n"
+ "smax z6.b, p4/M, z6.b, z19.b\n"
+ "smax z3.b, p4/M, z3.b, z18.b\n"
+ "smax z5.b, p4/M, z5.b, z17.b\n"
+ "smax z4.b, p4/M, z4.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
- "ld1b { z17.b }, p3/Z, [x20, x28]\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "smax z7.b, p0/M, z7.b, z17.b\n"
- "smax z6.b, p0/M, z6.b, z16.b\n"
- "ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "smax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z19.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x26]\n"
+ "smax z6.b, p4/M, z6.b, z19.b\n"
+ "smax z3.b, p4/M, z3.b, z18.b\n"
+ "smax z5.b, p4/M, z5.b, z17.b\n"
+ "smax z4.b, p4/M, z4.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- ".inst 0x4508a111 // sshllb z17.h, z8.b, #0x0\n"
- ".inst 0x4508a517 // sshllt z23.h, z8.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z4.s }, p0/Z, [x20]\n"
- ".inst 0x4508a0f6 // sshllb z22.h, z7.b, #0x0\n"
- ".inst 0x4508a4f5 // sshllt z21.h, z7.b, #0x0\n"
+ ".inst 0x4508a0d3 // sshllb z19.h, z6.b, #0x0\n"
+ ".inst 0x4508a4d1 // sshllt z17.h, z6.b, #0x0\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
- ".inst 0x4508a0d4 // sshllb z20.h, z6.b, #0x0\n"
- ".inst 0x4508a4d3 // sshllt z19.h, z6.b, #0x0\n"
+ ".inst 0x4508a072 // sshllb z18.h, z3.b, #0x0\n"
+ ".inst 0x4508a478 // sshllt z24.h, z3.b, #0x0\n"
+ "ld1rw { z3.s }, p4/Z, [x21]\n"
+ "ld1rw { z2.s }, p4/Z, [x20]\n"
+ ".inst 0x4508a0b5 // sshllb z21.h, z5.b, #0x0\n"
+ ".inst 0x4508a4b7 // sshllt z23.h, z5.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
- ".inst 0x4508a0b2 // sshllb z18.h, z5.b, #0x0\n"
- ".inst 0x4508a4b0 // sshllt z16.h, z5.b, #0x0\n"
- ".inst 0x4510a221 // sshllb z1.s, z17.h, #0x0\n"
+ ".inst 0x4508a096 // sshllb z22.h, z4.b, #0x0\n"
+ ".inst 0x4508a494 // sshllt z20.h, z4.b, #0x0\n"
+ "ld1rw { z16.s }, p4/Z, [x20]\n"
+ ".inst 0x4510a261 // sshllb z1.s, z19.h, #0x0\n"
+ ".inst 0x4510a673 // sshllt z19.s, z19.h, #0x0\n"
+ ".inst 0x4510a220 // sshllb z0.s, z17.h, #0x0\n"
".inst 0x4510a631 // sshllt z17.s, z17.h, #0x0\n"
- ".inst 0x44828081 // srshl z1.s, p0/M, z1.s, z4.s\n"
- ".inst 0x44828091 // srshl z17.s, p0/M, z17.s, z4.s\n"
- ".inst 0x4510a2e0 // sshllb z0.s, z23.h, #0x0\n"
- ".inst 0x4510a6ff // sshllt z31.s, z23.h, #0x0\n"
- ".inst 0x44828080 // srshl z0.s, p0/M, z0.s, z4.s\n"
- ".inst 0x4482809f // srshl z31.s, p0/M, z31.s, z4.s\n"
- ".inst 0x4510a2de // sshllb z30.s, z22.h, #0x0\n"
- ".inst 0x4510a6dd // sshllt z29.s, z22.h, #0x0\n"
- ".inst 0x4482809e // srshl z30.s, p0/M, z30.s, z4.s\n"
- ".inst 0x4482809d // srshl z29.s, p0/M, z29.s, z4.s\n"
+ ".inst 0x4510a25f // sshllb z31.s, z18.h, #0x0\n"
+ ".inst 0x4510a652 // sshllt z18.s, z18.h, #0x0\n"
+ ".inst 0x4510a31e // sshllb z30.s, z24.h, #0x0\n"
+ ".inst 0x4510a71d // sshllt z29.s, z24.h, #0x0\n"
+ ".inst 0x44829061 // srshl z1.s, p4/M, z1.s, z3.s\n"
+ ".inst 0x44829073 // srshl z19.s, p4/M, z19.s, z3.s\n"
".inst 0x4510a2bc // sshllb z28.s, z21.h, #0x0\n"
- ".inst 0x4510a6bb // sshllt z27.s, z21.h, #0x0\n"
- ".inst 0x4482809c // srshl z28.s, p0/M, z28.s, z4.s\n"
- ".inst 0x4482809b // srshl z27.s, p0/M, z27.s, z4.s\n"
- ".inst 0x4510a29a // sshllb z26.s, z20.h, #0x0\n"
- ".inst 0x4510a699 // sshllt z25.s, z20.h, #0x0\n"
- ".inst 0x4482809a // srshl z26.s, p0/M, z26.s, z4.s\n"
- ".inst 0x44828099 // srshl z25.s, p0/M, z25.s, z4.s\n"
- ".inst 0x4510a278 // sshllb z24.s, z19.h, #0x0\n"
- ".inst 0x4510a677 // sshllt z23.s, z19.h, #0x0\n"
- ".inst 0x44828098 // srshl z24.s, p0/M, z24.s, z4.s\n"
- ".inst 0x44828097 // srshl z23.s, p0/M, z23.s, z4.s\n"
- ".inst 0x4510a256 // sshllb z22.s, z18.h, #0x0\n"
- ".inst 0x4510a655 // sshllt z21.s, z18.h, #0x0\n"
- ".inst 0x44828096 // srshl z22.s, p0/M, z22.s, z4.s\n"
- ".inst 0x44828095 // srshl z21.s, p0/M, z21.s, z4.s\n"
- ".inst 0x4510a214 // sshllb z20.s, z16.h, #0x0\n"
- ".inst 0x4510a613 // sshllt z19.s, z16.h, #0x0\n"
- ".inst 0x44828094 // srshl z20.s, p0/M, z20.s, z4.s\n"
- ".inst 0x44828093 // srshl z19.s, p0/M, z19.s, z4.s\n"
- ".inst 0x04a37421 // sqrdmulh z1.s, z1.s, z3.s\n"
- ".inst 0x04a37631 // sqrdmulh z17.s, z17.s, z3.s\n"
- ".inst 0x44828041 // srshl z1.s, p0/M, z1.s, z2.s\n"
- ".inst 0x44828051 // srshl z17.s, p0/M, z17.s, z2.s\n"
- ".inst 0x04a37400 // sqrdmulh z0.s, z0.s, z3.s\n"
- ".inst 0x04a377ff // sqrdmulh z31.s, z31.s, z3.s\n"
- ".inst 0x44828040 // srshl z0.s, p0/M, z0.s, z2.s\n"
- ".inst 0x4482805f // srshl z31.s, p0/M, z31.s, z2.s\n"
- ".inst 0x04a377de // sqrdmulh z30.s, z30.s, z3.s\n"
- ".inst 0x04a377bd // sqrdmulh z29.s, z29.s, z3.s\n"
- ".inst 0x4482805e // srshl z30.s, p0/M, z30.s, z2.s\n"
- ".inst 0x4482805d // srshl z29.s, p0/M, z29.s, z2.s\n"
- ".inst 0x04a3779c // sqrdmulh z28.s, z28.s, z3.s\n"
- ".inst 0x04a3777b // sqrdmulh z27.s, z27.s, z3.s\n"
- ".inst 0x4482805c // srshl z28.s, p0/M, z28.s, z2.s\n"
- ".inst 0x4482805b // srshl z27.s, p0/M, z27.s, z2.s\n"
- ".inst 0x04a3775a // sqrdmulh z26.s, z26.s, z3.s\n"
- ".inst 0x04a37739 // sqrdmulh z25.s, z25.s, z3.s\n"
- ".inst 0x4482805a // srshl z26.s, p0/M, z26.s, z2.s\n"
- ".inst 0x44828059 // srshl z25.s, p0/M, z25.s, z2.s\n"
- ".inst 0x04a37718 // sqrdmulh z24.s, z24.s, z3.s\n"
- ".inst 0x04a376f7 // sqrdmulh z23.s, z23.s, z3.s\n"
- ".inst 0x44828058 // srshl z24.s, p0/M, z24.s, z2.s\n"
- ".inst 0x44828057 // srshl z23.s, p0/M, z23.s, z2.s\n"
- ".inst 0x04a376d6 // sqrdmulh z22.s, z22.s, z3.s\n"
- ".inst 0x04a376b5 // sqrdmulh z21.s, z21.s, z3.s\n"
- ".inst 0x44828056 // srshl z22.s, p0/M, z22.s, z2.s\n"
- ".inst 0x44828055 // srshl z21.s, p0/M, z21.s, z2.s\n"
- ".inst 0x04a37694 // sqrdmulh z20.s, z20.s, z3.s\n"
- ".inst 0x04a37673 // sqrdmulh z19.s, z19.s, z3.s\n"
- ".inst 0x44828054 // srshl z20.s, p0/M, z20.s, z2.s\n"
- ".inst 0x44828053 // srshl z19.s, p0/M, z19.s, z2.s\n"
- "mov z18.s, #0x7f\n"
- "not z16.s, p0/M, z18.s\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z17.s, p0/M, z17.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smax z31.s, p0/M, z31.s, z16.s\n"
- "smax z30.s, p0/M, z30.s, z16.s\n"
- "smax z29.s, p0/M, z29.s, z16.s\n"
- "smax z28.s, p0/M, z28.s, z16.s\n"
- "smax z27.s, p0/M, z27.s, z16.s\n"
- "smax z26.s, p0/M, z26.s, z16.s\n"
- "smax z25.s, p0/M, z25.s, z16.s\n"
- "smax z24.s, p0/M, z24.s, z16.s\n"
- "smax z23.s, p0/M, z23.s, z16.s\n"
- "smax z22.s, p0/M, z22.s, z16.s\n"
- "smax z21.s, p0/M, z21.s, z16.s\n"
- "smax z20.s, p0/M, z20.s, z16.s\n"
- "smax z19.s, p0/M, z19.s, z16.s\n"
- "smin z1.s, p0/M, z1.s, z18.s\n"
- "smin z17.s, p0/M, z17.s, z18.s\n"
- "smin z0.s, p0/M, z0.s, z18.s\n"
- "trn1 z17.h, z1.h, z17.h\n"
- "smin z31.s, p0/M, z31.s, z18.s\n"
- "smin z30.s, p0/M, z30.s, z18.s\n"
- "trn1 z16.h, z0.h, z31.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z29.s, p0/M, z29.s, z18.s\n"
- "smin z28.s, p0/M, z28.s, z18.s\n"
- "trn1 z17.h, z30.h, z29.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "smin z27.s, p0/M, z27.s, z18.s\n"
- "smin z26.s, p0/M, z26.s, z18.s\n"
- "trn1 z16.h, z28.h, z27.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z25.s, p0/M, z25.s, z18.s\n"
- "smin z24.s, p0/M, z24.s, z18.s\n"
- "trn1 z17.h, z26.h, z25.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x28]\n"
- "smin z23.s, p0/M, z23.s, z18.s\n"
- "smin z22.s, p0/M, z22.s, z18.s\n"
- "trn1 z16.h, z24.h, z23.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z21.s, p0/M, z21.s, z18.s\n"
- "smin z20.s, p0/M, z20.s, z18.s\n"
- "trn1 z17.h, z22.h, z21.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x27]\n"
- "smin z19.s, p0/M, z19.s, z18.s\n"
- "trn1 z16.h, z20.h, z19.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ ".inst 0x4510a6b5 // sshllt z21.s, z21.h, #0x0\n"
+ ".inst 0x44829060 // srshl z0.s, p4/M, z0.s, z3.s\n"
+ ".inst 0x44829071 // srshl z17.s, p4/M, z17.s, z3.s\n"
+ ".inst 0x4510a2fb // sshllb z27.s, z23.h, #0x0\n"
+ ".inst 0x4510a6fa // sshllt z26.s, z23.h, #0x0\n"
+ ".inst 0x4482907f // srshl z31.s, p4/M, z31.s, z3.s\n"
+ ".inst 0x44829072 // srshl z18.s, p4/M, z18.s, z3.s\n"
+ ".inst 0x4510a2d9 // sshllb z25.s, z22.h, #0x0\n"
+ ".inst 0x4510a6d8 // sshllt z24.s, z22.h, #0x0\n"
+ ".inst 0x4482907e // srshl z30.s, p4/M, z30.s, z3.s\n"
+ ".inst 0x4482907d // srshl z29.s, p4/M, z29.s, z3.s\n"
+ ".inst 0x4510a297 // sshllb z23.s, z20.h, #0x0\n"
+ ".inst 0x4510a696 // sshllt z22.s, z20.h, #0x0\n"
+ ".inst 0x4482907c // srshl z28.s, p4/M, z28.s, z3.s\n"
+ ".inst 0x44829075 // srshl z21.s, p4/M, z21.s, z3.s\n"
+ ".inst 0x4482907b // srshl z27.s, p4/M, z27.s, z3.s\n"
+ ".inst 0x4482907a // srshl z26.s, p4/M, z26.s, z3.s\n"
+ ".inst 0x04a27421 // sqrdmulh z1.s, z1.s, z2.s\n"
+ ".inst 0x04a27673 // sqrdmulh z19.s, z19.s, z2.s\n"
+ ".inst 0x44829079 // srshl z25.s, p4/M, z25.s, z3.s\n"
+ ".inst 0x44829078 // srshl z24.s, p4/M, z24.s, z3.s\n"
+ ".inst 0x04a27400 // sqrdmulh z0.s, z0.s, z2.s\n"
+ ".inst 0x04a27631 // sqrdmulh z17.s, z17.s, z2.s\n"
+ ".inst 0x44829077 // srshl z23.s, p4/M, z23.s, z3.s\n"
+ ".inst 0x44829076 // srshl z22.s, p4/M, z22.s, z3.s\n"
+ ".inst 0x04a277ff // sqrdmulh z31.s, z31.s, z2.s\n"
+ ".inst 0x04a27652 // sqrdmulh z18.s, z18.s, z2.s\n"
+ ".inst 0x04a277de // sqrdmulh z30.s, z30.s, z2.s\n"
+ ".inst 0x04a277bd // sqrdmulh z29.s, z29.s, z2.s\n"
+ ".inst 0x44829201 // srshl z1.s, p4/M, z1.s, z16.s\n"
+ ".inst 0x44829213 // srshl z19.s, p4/M, z19.s, z16.s\n"
+ ".inst 0x04a2779c // sqrdmulh z28.s, z28.s, z2.s\n"
+ ".inst 0x04a276b5 // sqrdmulh z21.s, z21.s, z2.s\n"
+ ".inst 0x44829200 // srshl z0.s, p4/M, z0.s, z16.s\n"
+ ".inst 0x44829211 // srshl z17.s, p4/M, z17.s, z16.s\n"
+ ".inst 0x04a2777b // sqrdmulh z27.s, z27.s, z2.s\n"
+ ".inst 0x04a2775a // sqrdmulh z26.s, z26.s, z2.s\n"
+ ".inst 0x4482921f // srshl z31.s, p4/M, z31.s, z16.s\n"
+ ".inst 0x44829212 // srshl z18.s, p4/M, z18.s, z16.s\n"
+ ".inst 0x04a27739 // sqrdmulh z25.s, z25.s, z2.s\n"
+ ".inst 0x04a27718 // sqrdmulh z24.s, z24.s, z2.s\n"
+ ".inst 0x4482921e // srshl z30.s, p4/M, z30.s, z16.s\n"
+ ".inst 0x4482921d // srshl z29.s, p4/M, z29.s, z16.s\n"
+ ".inst 0x04a276f7 // sqrdmulh z23.s, z23.s, z2.s\n"
+ ".inst 0x04a276d6 // sqrdmulh z22.s, z22.s, z2.s\n"
+ ".inst 0x4482921c // srshl z28.s, p4/M, z28.s, z16.s\n"
+ ".inst 0x44829215 // srshl z21.s, p4/M, z21.s, z16.s\n"
+ "mov z20.s, #0x7f\n"
+ ".inst 0x4482921b // srshl z27.s, p4/M, z27.s, z16.s\n"
+ ".inst 0x4482921a // srshl z26.s, p4/M, z26.s, z16.s\n"
+ ".inst 0x44829219 // srshl z25.s, p4/M, z25.s, z16.s\n"
+ ".inst 0x44829218 // srshl z24.s, p4/M, z24.s, z16.s\n"
+ ".inst 0x44829217 // srshl z23.s, p4/M, z23.s, z16.s\n"
+ ".inst 0x44829216 // srshl z22.s, p4/M, z22.s, z16.s\n"
+ "not z16.s, p4/M, z20.s\n"
+ "smax z1.s, p4/M, z1.s, z16.s\n"
+ "smax z19.s, p4/M, z19.s, z16.s\n"
+ "smax z0.s, p4/M, z0.s, z16.s\n"
+ "smax z17.s, p4/M, z17.s, z16.s\n"
+ "smax z31.s, p4/M, z31.s, z16.s\n"
+ "smax z18.s, p4/M, z18.s, z16.s\n"
+ "smax z30.s, p4/M, z30.s, z16.s\n"
+ "smax z29.s, p4/M, z29.s, z16.s\n"
+ "smax z28.s, p4/M, z28.s, z16.s\n"
+ "smax z21.s, p4/M, z21.s, z16.s\n"
+ "smax z27.s, p4/M, z27.s, z16.s\n"
+ "smax z26.s, p4/M, z26.s, z16.s\n"
+ "smax z25.s, p4/M, z25.s, z16.s\n"
+ "smax z24.s, p4/M, z24.s, z16.s\n"
+ "smax z23.s, p4/M, z23.s, z16.s\n"
+ "smax z22.s, p4/M, z22.s, z16.s\n"
+ "smin z1.s, p4/M, z1.s, z20.s\n"
+ "smin z19.s, p4/M, z19.s, z20.s\n"
+ "smin z0.s, p4/M, z0.s, z20.s\n"
+ "smin z17.s, p4/M, z17.s, z20.s\n"
+ "smin z31.s, p4/M, z31.s, z20.s\n"
+ "smin z18.s, p4/M, z18.s, z20.s\n"
+ "smin z30.s, p4/M, z30.s, z20.s\n"
+ "smin z29.s, p4/M, z29.s, z20.s\n"
+ "smin z28.s, p4/M, z28.s, z20.s\n"
+ "trn1 z19.h, z1.h, z19.h\n"
+ "smin z21.s, p4/M, z21.s, z20.s\n"
+ "smin z27.s, p4/M, z27.s, z20.s\n"
+ "trn1 z17.h, z0.h, z17.h\n"
+ "smin z26.s, p4/M, z26.s, z20.s\n"
+ "smin z25.s, p4/M, z25.s, z20.s\n"
+ "trn1 z18.h, z31.h, z18.h\n"
+ "smin z24.s, p4/M, z24.s, z20.s\n"
+ "smin z23.s, p4/M, z23.s, z20.s\n"
+ "trn1 z16.h, z30.h, z29.h\n"
+ "smin z22.s, p4/M, z22.s, z20.s\n"
+ "trn1 z21.h, z28.h, z21.h\n"
+ "trn1 z20.b, z19.b, z17.b\n"
+ "trn1 z17.h, z27.h, z26.h\n"
+ "trn1 z19.h, z25.h, z24.h\n"
+ "trn1 z18.b, z18.b, z16.b\n"
+ "trn1 z16.h, z23.h, z22.h\n"
+ "st1b { z20.b }, p3, [%x[outptr], x9]\n"
"incb x9, ALL, MUL #4\n"
+ "trn1 z17.b, z21.b, z17.b\n"
+ "trn1 z16.b, z19.b, z16.b\n"
+ "st1b { z18.b }, p2, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
+ "st1b { z17.b }, p1, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p0, [%x[outptr], x26]\n"
+ "incb x26, ALL, MUL #4\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x80\n"
+ "mov z6.b, #0x80\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z4\n smax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n smax z17.b, p0/M, z17.b, z1.b\n"
+ "movprfx z16, z2\n smax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n smax z17.b, p4/M, z17.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "smax z16.b, p0/M, z16.b, z17.b\n"
"subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "smax z16.b, p4/M, z16.b, z17.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "smax z6.b, p4/M, z6.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z4\n smax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n smax z17.b, p0/M, z17.b, z1.b\n"
- "smax z16.b, p0/M, z16.b, z17.b\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
+ "movprfx z16, z2\n smax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n smax z17.b, p4/M, z17.b, z0.b\n"
+ "smax z16.b, p4/M, z16.b, z17.b\n"
+ "smax z6.b, p4/M, z6.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "smax z8.b, p0/M, z8.b, z16.b\n"
+ "ld1b { z16.b }, p3/Z, [x20, x9]\n"
+ "smax z6.b, p4/M, z6.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- ".inst 0x4508a111 // sshllb z17.h, z8.b, #0x0\n"
- ".inst 0x4508a512 // sshllt z18.h, z8.b, #0x0\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x4510a236 // sshllb z22.s, z17.h, #0x0\n"
- ".inst 0x4510a635 // sshllt z21.s, z17.h, #0x0\n"
+ ".inst 0x4508a0d1 // sshllb z17.h, z6.b, #0x0\n"
+ ".inst 0x4508a4d0 // sshllt z16.h, z6.b, #0x0\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z17.s }, p0/Z, [x20]\n"
- ".inst 0x4510a254 // sshllb z20.s, z18.h, #0x0\n"
- ".inst 0x4510a653 // sshllt z19.s, z18.h, #0x0\n"
- ".inst 0x44828216 // srshl z22.s, p0/M, z22.s, z16.s\n"
- ".inst 0x44828215 // srshl z21.s, p0/M, z21.s, z16.s\n"
- ".inst 0x44828214 // srshl z20.s, p0/M, z20.s, z16.s\n"
- ".inst 0x44828213 // srshl z19.s, p0/M, z19.s, z16.s\n"
- ".inst 0x04b176d6 // sqrdmulh z22.s, z22.s, z17.s\n"
- ".inst 0x04b176b5 // sqrdmulh z21.s, z21.s, z17.s\n"
+ "ld1rw { z24.s }, p4/Z, [x21]\n"
+ "ld1rw { z23.s }, p4/Z, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x04b17694 // sqrdmulh z20.s, z20.s, z17.s\n"
- ".inst 0x04b17673 // sqrdmulh z19.s, z19.s, z17.s\n"
- "mov z18.s, #0x7f\n"
- ".inst 0x44828216 // srshl z22.s, p0/M, z22.s, z16.s\n"
- ".inst 0x44828215 // srshl z21.s, p0/M, z21.s, z16.s\n"
- ".inst 0x44828214 // srshl z20.s, p0/M, z20.s, z16.s\n"
- ".inst 0x44828213 // srshl z19.s, p0/M, z19.s, z16.s\n"
- "not z16.s, p0/M, z18.s\n"
- "smax z22.s, p0/M, z22.s, z16.s\n"
- "smax z21.s, p0/M, z21.s, z16.s\n"
- "smax z20.s, p0/M, z20.s, z16.s\n"
- "smax z19.s, p0/M, z19.s, z16.s\n"
- "smin z22.s, p0/M, z22.s, z18.s\n"
- "smin z21.s, p0/M, z21.s, z18.s\n"
- "smin z20.s, p0/M, z20.s, z18.s\n"
- "trn1 z17.h, z22.h, z21.h\n"
- "smin z19.s, p0/M, z19.s, z18.s\n"
- "trn1 z16.h, z20.h, z19.h\n"
+ "mov z22.s, #0x7f\n"
+ "ld1rw { z21.s }, p4/Z, [x20]\n"
+ ".inst 0x4510a234 // sshllb z20.s, z17.h, #0x0\n"
+ ".inst 0x4510a631 // sshllt z17.s, z17.h, #0x0\n"
+ ".inst 0x4510a213 // sshllb z19.s, z16.h, #0x0\n"
+ ".inst 0x4510a612 // sshllt z18.s, z16.h, #0x0\n"
+ "not z16.s, p4/M, z22.s\n"
+ ".inst 0x44829314 // srshl z20.s, p4/M, z20.s, z24.s\n"
+ ".inst 0x44829311 // srshl z17.s, p4/M, z17.s, z24.s\n"
+ ".inst 0x44829313 // srshl z19.s, p4/M, z19.s, z24.s\n"
+ ".inst 0x44829312 // srshl z18.s, p4/M, z18.s, z24.s\n"
+ ".inst 0x04b77694 // sqrdmulh z20.s, z20.s, z23.s\n"
+ ".inst 0x04b77631 // sqrdmulh z17.s, z17.s, z23.s\n"
+ ".inst 0x04b77673 // sqrdmulh z19.s, z19.s, z23.s\n"
+ ".inst 0x04b77652 // sqrdmulh z18.s, z18.s, z23.s\n"
+ ".inst 0x448292b4 // srshl z20.s, p4/M, z20.s, z21.s\n"
+ ".inst 0x448292b1 // srshl z17.s, p4/M, z17.s, z21.s\n"
+ ".inst 0x448292b3 // srshl z19.s, p4/M, z19.s, z21.s\n"
+ ".inst 0x448292b2 // srshl z18.s, p4/M, z18.s, z21.s\n"
+ "smax z20.s, p4/M, z20.s, z16.s\n"
+ "smax z17.s, p4/M, z17.s, z16.s\n"
+ "smax z19.s, p4/M, z19.s, z16.s\n"
+ "smax z18.s, p4/M, z18.s, z16.s\n"
+ "smin z20.s, p4/M, z20.s, z22.s\n"
+ "smin z17.s, p4/M, z17.s, z22.s\n"
+ "smin z19.s, p4/M, z19.s, z22.s\n"
+ "smin z18.s, p4/M, z18.s, z22.s\n"
+ "trn1 z17.h, z20.h, z17.h\n"
+ "trn1 z16.h, z19.h, z18.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x9]\n"
"incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
index f3f4950a1f..f07acd8734 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -99,11 +99,11 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"cntb x26\n"
"cntb x25, ALL, MUL #2\n"
"cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x23, %x[n_valid_cells], #0x1\n"
@@ -128,14 +128,14 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
@@ -145,24 +145,24 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
".inst 0x45944d08 // uaddwt z8.s, z8.s, z20.h\n"
".inst 0x459348e7 // uaddwb z7.s, z7.s, z19.h\n"
@@ -204,17 +204,17 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508aa17 // ushllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508ae16 // ushllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- "ld1b { z17.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508aa15 // ushllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508ae14 // ushllt z20.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z19.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x24]\n"
+ ".inst 0x4508aa77 // ushllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508ae76 // ushllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508aa55 // ushllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508ae54 // ushllt z20.h, z18.b, #0x0\n"
".inst 0x4508aa33 // ushllb z19.h, z17.b, #0x0\n"
".inst 0x4508ae32 // ushllt z18.h, z17.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -235,98 +235,98 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
- "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
- ".inst 0x04b175ef // sqdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x04b1758c // sqdmulh z12.s, z12.s, z17.s\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x04b1756b // sqdmulh z11.s, z11.s, z17.s\n"
- ".inst 0x04b1754a // sqdmulh z10.s, z10.s, z17.s\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
- ".inst 0x04b17529 // sqdmulh z9.s, z9.s, z17.s\n"
- ".inst 0x04b17508 // sqdmulh z8.s, z8.s, z17.s\n"
- ".inst 0x4482820b // srshl z11.s, p0/M, z11.s, z16.s\n"
- ".inst 0x4482820a // srshl z10.s, p0/M, z10.s, z16.s\n"
- ".inst 0x04b174e7 // sqdmulh z7.s, z7.s, z17.s\n"
- ".inst 0x04b174c6 // sqdmulh z6.s, z6.s, z17.s\n"
- ".inst 0x44828209 // srshl z9.s, p0/M, z9.s, z16.s\n"
- ".inst 0x44828208 // srshl z8.s, p0/M, z8.s, z16.s\n"
- ".inst 0x04b174a5 // sqdmulh z5.s, z5.s, z17.s\n"
- ".inst 0x04b17484 // sqdmulh z4.s, z4.s, z17.s\n"
- ".inst 0x44828207 // srshl z7.s, p0/M, z7.s, z16.s\n"
- ".inst 0x44828206 // srshl z6.s, p0/M, z6.s, z16.s\n"
- ".inst 0x04b17463 // sqdmulh z3.s, z3.s, z17.s\n"
- ".inst 0x04b17442 // sqdmulh z2.s, z2.s, z17.s\n"
- ".inst 0x44828205 // srshl z5.s, p0/M, z5.s, z16.s\n"
- ".inst 0x44828204 // srshl z4.s, p0/M, z4.s, z16.s\n"
- ".inst 0x04b17421 // sqdmulh z1.s, z1.s, z17.s\n"
- ".inst 0x04b17400 // sqdmulh z0.s, z0.s, z17.s\n"
- ".inst 0x44828203 // srshl z3.s, p0/M, z3.s, z16.s\n"
- ".inst 0x44828202 // srshl z2.s, p0/M, z2.s, z16.s\n"
- ".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
- ".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
+ "ld1rw { z18.s }, p4/Z, [%x[rescale_ptr]]\n"
+ "ld1rw { z17.s }, p4/Z, [%x[shift_ptr]]\n"
"mov z16.s, #0x0\n"
- "mov z18.s, #0xff\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smax z11.s, p0/M, z11.s, z16.s\n"
- "smax z10.s, p0/M, z10.s, z16.s\n"
- "smax z9.s, p0/M, z9.s, z16.s\n"
- "smax z8.s, p0/M, z8.s, z16.s\n"
- "smax z7.s, p0/M, z7.s, z16.s\n"
- "smax z6.s, p0/M, z6.s, z16.s\n"
- "smax z5.s, p0/M, z5.s, z16.s\n"
- "smax z4.s, p0/M, z4.s, z16.s\n"
- "smax z3.s, p0/M, z3.s, z16.s\n"
- "smax z2.s, p0/M, z2.s, z16.s\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z18.s\n"
- "smin z14.s, p0/M, z14.s, z18.s\n"
- "trn1 z17.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z18.s\n"
- "smin z12.s, p0/M, z12.s, z18.s\n"
- "trn1 z16.h, z13.h, z12.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z11.s, p0/M, z11.s, z18.s\n"
- "smin z10.s, p0/M, z10.s, z18.s\n"
- "trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "smin z9.s, p0/M, z9.s, z18.s\n"
- "smin z8.s, p0/M, z8.s, z18.s\n"
+ "mov z20.s, #0xff\n"
+ ".inst 0x04b275ef // sqdmulh z15.s, z15.s, z18.s\n"
+ ".inst 0x04b275ce // sqdmulh z14.s, z14.s, z18.s\n"
+ ".inst 0x04b275ad // sqdmulh z13.s, z13.s, z18.s\n"
+ ".inst 0x04b2758c // sqdmulh z12.s, z12.s, z18.s\n"
+ ".inst 0x04b2756b // sqdmulh z11.s, z11.s, z18.s\n"
+ ".inst 0x04b2754a // sqdmulh z10.s, z10.s, z18.s\n"
+ ".inst 0x04b27529 // sqdmulh z9.s, z9.s, z18.s\n"
+ ".inst 0x04b27508 // sqdmulh z8.s, z8.s, z18.s\n"
+ ".inst 0x4482922f // srshl z15.s, p4/M, z15.s, z17.s\n"
+ ".inst 0x4482922e // srshl z14.s, p4/M, z14.s, z17.s\n"
+ ".inst 0x04b274e7 // sqdmulh z7.s, z7.s, z18.s\n"
+ ".inst 0x04b274c6 // sqdmulh z6.s, z6.s, z18.s\n"
+ ".inst 0x4482922d // srshl z13.s, p4/M, z13.s, z17.s\n"
+ ".inst 0x4482922c // srshl z12.s, p4/M, z12.s, z17.s\n"
+ ".inst 0x04b274a5 // sqdmulh z5.s, z5.s, z18.s\n"
+ ".inst 0x04b27484 // sqdmulh z4.s, z4.s, z18.s\n"
+ ".inst 0x4482922b // srshl z11.s, p4/M, z11.s, z17.s\n"
+ ".inst 0x4482922a // srshl z10.s, p4/M, z10.s, z17.s\n"
+ ".inst 0x04b27463 // sqdmulh z3.s, z3.s, z18.s\n"
+ ".inst 0x04b27442 // sqdmulh z2.s, z2.s, z18.s\n"
+ ".inst 0x44829229 // srshl z9.s, p4/M, z9.s, z17.s\n"
+ ".inst 0x44829228 // srshl z8.s, p4/M, z8.s, z17.s\n"
+ ".inst 0x04b27421 // sqdmulh z1.s, z1.s, z18.s\n"
+ ".inst 0x04b27400 // sqdmulh z0.s, z0.s, z18.s\n"
+ ".inst 0x44829227 // srshl z7.s, p4/M, z7.s, z17.s\n"
+ ".inst 0x44829226 // srshl z6.s, p4/M, z6.s, z17.s\n"
+ ".inst 0x44829225 // srshl z5.s, p4/M, z5.s, z17.s\n"
+ ".inst 0x44829224 // srshl z4.s, p4/M, z4.s, z17.s\n"
+ ".inst 0x44829223 // srshl z3.s, p4/M, z3.s, z17.s\n"
+ ".inst 0x44829222 // srshl z2.s, p4/M, z2.s, z17.s\n"
+ ".inst 0x44829221 // srshl z1.s, p4/M, z1.s, z17.s\n"
+ ".inst 0x44829220 // srshl z0.s, p4/M, z0.s, z17.s\n"
+ "smax z15.s, p4/M, z15.s, z16.s\n"
+ "smax z14.s, p4/M, z14.s, z16.s\n"
+ "smax z13.s, p4/M, z13.s, z16.s\n"
+ "smax z12.s, p4/M, z12.s, z16.s\n"
+ "smax z11.s, p4/M, z11.s, z16.s\n"
+ "smax z10.s, p4/M, z10.s, z16.s\n"
+ "smax z9.s, p4/M, z9.s, z16.s\n"
+ "smax z8.s, p4/M, z8.s, z16.s\n"
+ "smax z7.s, p4/M, z7.s, z16.s\n"
+ "smax z6.s, p4/M, z6.s, z16.s\n"
+ "smax z5.s, p4/M, z5.s, z16.s\n"
+ "smax z4.s, p4/M, z4.s, z16.s\n"
+ "smax z3.s, p4/M, z3.s, z16.s\n"
+ "smax z2.s, p4/M, z2.s, z16.s\n"
+ "smax z1.s, p4/M, z1.s, z16.s\n"
+ "smax z0.s, p4/M, z0.s, z16.s\n"
+ "smin z15.s, p4/M, z15.s, z20.s\n"
+ "smin z14.s, p4/M, z14.s, z20.s\n"
+ "smin z13.s, p4/M, z13.s, z20.s\n"
+ "smin z12.s, p4/M, z12.s, z20.s\n"
+ "smin z11.s, p4/M, z11.s, z20.s\n"
+ "smin z10.s, p4/M, z10.s, z20.s\n"
+ "smin z9.s, p4/M, z9.s, z20.s\n"
+ "smin z8.s, p4/M, z8.s, z20.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z7.s, p4/M, z7.s, z20.s\n"
+ "smin z6.s, p4/M, z6.s, z20.s\n"
+ "trn1 z17.h, z13.h, z12.h\n"
+ "smin z5.s, p4/M, z5.s, z20.s\n"
+ "smin z4.s, p4/M, z4.s, z20.s\n"
+ "trn1 z18.h, z11.h, z10.h\n"
+ "smin z3.s, p4/M, z3.s, z20.s\n"
+ "smin z2.s, p4/M, z2.s, z20.s\n"
"trn1 z16.h, z9.h, z8.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z7.s, p0/M, z7.s, z18.s\n"
- "smin z6.s, p0/M, z6.s, z18.s\n"
- "trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
- "smin z5.s, p0/M, z5.s, z18.s\n"
- "smin z4.s, p0/M, z4.s, z18.s\n"
- "trn1 z16.h, z5.h, z4.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z3.s, p0/M, z3.s, z18.s\n"
- "smin z2.s, p0/M, z2.s, z18.s\n"
- "trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
- "smin z1.s, p0/M, z1.s, z18.s\n"
- "smin z0.s, p0/M, z0.s, z18.s\n"
+ "smin z1.s, p4/M, z1.s, z20.s\n"
+ "smin z0.s, p4/M, z0.s, z20.s\n"
+ "trn1 z21.h, z7.h, z6.h\n"
+ "trn1 z20.b, z19.b, z17.b\n"
+ "trn1 z17.h, z5.h, z4.h\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z18.b, z18.b, z16.b\n"
"trn1 z16.h, z1.h, z0.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z20.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
+ "trn1 z17.b, z21.b, z17.b\n"
+ "trn1 z16.b, z19.b, z16.b\n"
+ "st1b { z18.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
+ "st1b { z17.b }, p1, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
+ "st1b { z16.b }, p0, [%x[outptr], x24]\n"
+ "incb x24, ALL, MUL #4\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x23, %x[n_valid_cells], #0x1\n"
@@ -339,21 +339,21 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e0bf1 // uaddlb z17.h, z31.b, z30.b\n"
".inst 0x455e0ff0 // uaddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
- "add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf1 // uaddlb z17.h, z31.b, z30.b\n"
@@ -367,42 +367,42 @@ void sve_u8_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z16.b }, p3/Z, [x20, x27]\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1rw { z17.s }, p0/Z, [%x[rescale_ptr]]\n"
- "ld1rw { z16.s }, p0/Z, [%x[shift_ptr]]\n"
- ".inst 0x04b175ef // sqdmulh z15.s, z15.s, z17.s\n"
- ".inst 0x04b175ce // sqdmulh z14.s, z14.s, z17.s\n"
- ".inst 0x04b175ad // sqdmulh z13.s, z13.s, z17.s\n"
- ".inst 0x04b1758c // sqdmulh z12.s, z12.s, z17.s\n"
- ".inst 0x4482820f // srshl z15.s, p0/M, z15.s, z16.s\n"
- ".inst 0x4482820e // srshl z14.s, p0/M, z14.s, z16.s\n"
- ".inst 0x4482820d // srshl z13.s, p0/M, z13.s, z16.s\n"
- ".inst 0x4482820c // srshl z12.s, p0/M, z12.s, z16.s\n"
+ "ld1rw { z19.s }, p4/Z, [%x[rescale_ptr]]\n"
+ "ld1rw { z18.s }, p4/Z, [%x[shift_ptr]]\n"
"mov z17.s, #0x0\n"
"mov z16.s, #0xff\n"
- "smax z15.s, p0/M, z15.s, z17.s\n"
- "smax z14.s, p0/M, z14.s, z17.s\n"
- "smax z13.s, p0/M, z13.s, z17.s\n"
- "smax z12.s, p0/M, z12.s, z17.s\n"
- "smin z15.s, p0/M, z15.s, z16.s\n"
- "smin z14.s, p0/M, z14.s, z16.s\n"
+ ".inst 0x04b375ef // sqdmulh z15.s, z15.s, z19.s\n"
+ ".inst 0x04b375ce // sqdmulh z14.s, z14.s, z19.s\n"
+ ".inst 0x04b375ad // sqdmulh z13.s, z13.s, z19.s\n"
+ ".inst 0x04b3758c // sqdmulh z12.s, z12.s, z19.s\n"
+ ".inst 0x4482924f // srshl z15.s, p4/M, z15.s, z18.s\n"
+ ".inst 0x4482924e // srshl z14.s, p4/M, z14.s, z18.s\n"
+ ".inst 0x4482924d // srshl z13.s, p4/M, z13.s, z18.s\n"
+ ".inst 0x4482924c // srshl z12.s, p4/M, z12.s, z18.s\n"
+ "smax z15.s, p4/M, z15.s, z17.s\n"
+ "smax z14.s, p4/M, z14.s, z17.s\n"
+ "smax z13.s, p4/M, z13.s, z17.s\n"
+ "smax z12.s, p4/M, z12.s, z17.s\n"
+ "smin z15.s, p4/M, z15.s, z16.s\n"
+ "smin z14.s, p4/M, z14.s, z16.s\n"
+ "smin z13.s, p4/M, z13.s, z16.s\n"
+ "smin z12.s, p4/M, z12.s, z16.s\n"
"trn1 z17.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z16.s\n"
- "smin z12.s, p0/M, z12.s, z16.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x27]\n"
"incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
index 8612555bfb..74dfac4133 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -66,22 +66,22 @@ void sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"ldr x15, [%x[args], %[offsetof_n_channels]]\n"
"ldr x21, [%x[args], %[offsetof_outptrs]]\n"
"mov x14, #0x0\n"
- "whilelt p0.b, x14, x15\n"
- "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
- "ldp x13, x12, [x21, #0x0]\n"
"ptrue p2.b\n"
- "mov x11, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_inptrs]]\n"
+ "mov x13, #0x0\n"
+ "ldp x12, x11, [x21, #0x0]\n"
"ldp x10, x9, [x21, #0x10]\n"
+ "whilelt p0.b, x14, x15\n"
"ldp x28, x27, [x20, #0x0]\n"
"ldp x26, x25, [x20, #0x10]\n"
"ldp x24, x23, [x20, #0x20]\n"
"ldp x22, x21, [x20, #0x30]\n"
"ldr x20, [x20, #0x40]\n"
"ld1b { z31.b }, p0/Z, [x27, x14]\n"
- "ld1b { z30.b }, p0/Z, [x24, x14]\n"
- "ld1b { z29.b }, p0/Z, [x21, x14]\n"
+ "ld1b { z30.b }, p0/Z, [x28, x14]\n"
+ "ld1b { z29.b }, p0/Z, [x24, x14]\n"
"ld1b { z28.b }, p0/Z, [x25, x14]\n"
- "ld1b { z27.b }, p0/Z, [x28, x14]\n"
+ "ld1b { z27.b }, p0/Z, [x21, x14]\n"
"ld1b { z26.b }, p0/Z, [x26, x14]\n"
"ld1b { z25.b }, p0/Z, [x23, x14]\n"
"ld1b { z24.b }, p0/Z, [x22, x14]\n"
@@ -90,50 +90,50 @@ void sve_u8_nhwc_max_2x2_s1_output2x2_depthfirst_impl(
"whilelt p1.b, x14, x15\n"
"b.none 2f\n"
"1:" // Vector: Loop
- "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z30.b\n"
- "movprfx z21, z30\n umax z21.b, p2/M, z21.b, z29.b\n"
+ "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z29.b\n"
+ "movprfx z21, z29\n umax z21.b, p2/M, z21.b, z27.b\n"
"ld1b { z31.b }, p1/Z, [x27, x14]\n"
- "ld1b { z30.b }, p1/Z, [x24, x14]\n"
- "movprfx z20, z28\n umax z20.b, p2/M, z20.b, z27.b\n"
- "movprfx z19, z26\n umax z19.b, p2/M, z19.b, z25.b\n"
- "ld1b { z29.b }, p1/Z, [x21, x14]\n"
- "ld1b { z27.b }, p1/Z, [x28, x14]\n"
- "movprfx z17, z28\n umax z17.b, p2/M, z17.b, z24.b\n"
- "movprfx z18, z25\n umax z18.b, p2/M, z18.b, z23.b\n"
+ "ld1b { z29.b }, p1/Z, [x24, x14]\n"
+ "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z30.b\n"
+ "movprfx z17, z26\n umax z17.b, p2/M, z17.b, z25.b\n"
+ "ld1b { z27.b }, p1/Z, [x21, x14]\n"
+ "ld1b { z30.b }, p1/Z, [x28, x14]\n"
+ "movprfx z16, z28\n umax z16.b, p2/M, z16.b, z24.b\n"
+ "movprfx z20, z25\n umax z20.b, p2/M, z20.b, z23.b\n"
"ld1b { z28.b }, p1/Z, [x25, x14]\n"
"ld1b { z26.b }, p1/Z, [x26, x14]\n"
"ld1b { z25.b }, p1/Z, [x23, x14]\n"
"ld1b { z24.b }, p1/Z, [x22, x14]\n"
- "whilelt p0.b, x11, x15\n"
- "movprfx z16, z22\n umax z16.b, p2/M, z16.b, z20.b\n"
+ "whilelt p0.b, x13, x15\n"
"ld1b { z23.b }, p1/Z, [x20, x14]\n"
"incw x14\n"
+ "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
+ "movprfx z18, z17\n umax z18.b, p2/M, z18.b, z22.b\n"
+ "movprfx z17, z16\n umax z17.b, p2/M, z17.b, z21.b\n"
+ "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
"whilelt p1.b, x14, x15\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n umax z16.b, p2/M, z16.b, z22.b\n"
- "umax z17.b, p2/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z18.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
- "incw x11\n"
+ "st1b { z19.b }, p0, [x12, x13]\n"
+ "st1b { z18.b }, p0, [x11, x13]\n"
+ "st1b { z17.b }, p0, [x10, x13]\n"
+ "st1b { z16.b }, p0, [x9, x13]\n"
+ "incw x13\n"
"b.any 1b\n"
"2:" // Vector: Tail
- "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z30.b\n"
- "movprfx z21, z30\n umax z21.b, p2/M, z21.b, z29.b\n"
- "movprfx z20, z28\n umax z20.b, p2/M, z20.b, z27.b\n"
- "movprfx z19, z26\n umax z19.b, p2/M, z19.b, z25.b\n"
- "movprfx z17, z28\n umax z17.b, p2/M, z17.b, z24.b\n"
- "movprfx z18, z25\n umax z18.b, p2/M, z18.b, z23.b\n"
- "whilelt p0.b, x11, x15\n"
- "movprfx z16, z22\n umax z16.b, p2/M, z16.b, z20.b\n"
- "st1b { z16.b }, p0, [x13, x11]\n"
- "movprfx z16, z19\n umax z16.b, p2/M, z16.b, z22.b\n"
- "umax z17.b, p2/M, z17.b, z21.b\n"
- "st1b { z16.b }, p0, [x12, x11]\n"
- "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z18.b\n"
- "st1b { z17.b }, p0, [x10, x11]\n"
- "st1b { z16.b }, p0, [x9, x11]\n"
+ "movprfx z22, z31\n umax z22.b, p2/M, z22.b, z29.b\n"
+ "movprfx z21, z29\n umax z21.b, p2/M, z21.b, z27.b\n"
+ "movprfx z18, z28\n umax z18.b, p2/M, z18.b, z30.b\n"
+ "movprfx z17, z26\n umax z17.b, p2/M, z17.b, z25.b\n"
+ "movprfx z16, z28\n umax z16.b, p2/M, z16.b, z24.b\n"
+ "movprfx z20, z25\n umax z20.b, p2/M, z20.b, z23.b\n"
+ "whilelt p0.b, x13, x15\n"
+ "movprfx z19, z22\n umax z19.b, p2/M, z19.b, z18.b\n"
+ "movprfx z18, z17\n umax z18.b, p2/M, z18.b, z22.b\n"
+ "movprfx z17, z16\n umax z17.b, p2/M, z17.b, z21.b\n"
+ "movprfx z16, z21\n umax z16.b, p2/M, z16.b, z20.b\n"
+ "st1b { z19.b }, p0, [x12, x13]\n"
+ "st1b { z18.b }, p0, [x11, x13]\n"
+ "st1b { z17.b }, p0, [x10, x13]\n"
+ "st1b { z16.b }, p0, [x9, x13]\n"
:
: [args] "r" (&args), [offsetof_inptrs] "I" (offsetof(KernelArgs, inptrs)), [offsetof_n_channels] "I" (offsetof(KernelArgs, n_channels)), [offsetof_outptrs] "I" (offsetof(KernelArgs, outptrs))
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
index be0eb398ae..340a35a5f8 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,176 +44,176 @@ void sve_u8_nhwc_max_generic_depthfirst_impl(
"cntb x28\n"
"cntb x27, ALL, MUL #2\n"
"cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
+ "whilelt p2.b, x28, %x[n_channels]\n"
+ "whilelt p1.b, x27, %x[n_channels]\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x0\n"
- "mov z7.b, #0x0\n"
- "mov x24, %x[inptrs]\n"
"mov z6.b, #0x0\n"
"mov z5.b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
+ "mov z4.b, #0x0\n"
+ "mov z3.b, #0x0\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
+ "movprfx z19, z2\n umax z19.b, p4/M, z19.b, z1.b\n"
+ "umax z23.b, p4/M, z23.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
- "umax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
- "umax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
- "umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "umax z19.b, p0/M, z19.b, z23.b\n"
- "umax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "umax z17.b, p0/M, z17.b, z21.b\n"
- "umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
+ "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+ "umax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+ "umax z21.b, p4/M, z21.b, z26.b\n"
+ "umax z16.b, p4/M, z16.b, z25.b\n"
+ "umax z20.b, p4/M, z20.b, z24.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "umax z19.b, p4/M, z19.b, z23.b\n"
+ "umax z18.b, p4/M, z18.b, z22.b\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "umax z17.b, p4/M, z17.b, z21.b\n"
"subs x25, x25, #0x1\n"
- "umax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "umax z7.b, p0/M, z7.b, z18.b\n"
- "umax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "umax z16.b, p4/M, z16.b, z20.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "umax z6.b, p4/M, z6.b, z19.b\n"
+ "umax z5.b, p4/M, z5.b, z18.b\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "umax z4.b, p4/M, z4.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "umax z3.b, p4/M, z3.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
- "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
- "umax z22.b, p0/M, z22.b, z30.b\n"
- "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
- "umax z21.b, p0/M, z21.b, z27.b\n"
- "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
- "umax z20.b, p0/M, z20.b, z24.b\n"
- "umax z19.b, p0/M, z19.b, z23.b\n"
- "umax z18.b, p0/M, z18.b, z22.b\n"
- "umax z17.b, p0/M, z17.b, z21.b\n"
- "umax z16.b, p0/M, z16.b, z20.b\n"
- "umax z8.b, p0/M, z8.b, z19.b\n"
- "umax z7.b, p0/M, z7.b, z18.b\n"
- "umax z6.b, p0/M, z6.b, z17.b\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
+ "movprfx z19, z2\n umax z19.b, p4/M, z19.b, z1.b\n"
+ "umax z23.b, p4/M, z23.b, z0.b\n"
+ "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+ "umax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+ "umax z21.b, p4/M, z21.b, z26.b\n"
+ "umax z16.b, p4/M, z16.b, z25.b\n"
+ "umax z20.b, p4/M, z20.b, z24.b\n"
+ "umax z19.b, p4/M, z19.b, z23.b\n"
+ "umax z18.b, p4/M, z18.b, z22.b\n"
+ "umax z17.b, p4/M, z17.b, z21.b\n"
+ "umax z16.b, p4/M, z16.b, z20.b\n"
+ "umax z6.b, p4/M, z6.b, z19.b\n"
+ "umax z5.b, p4/M, z5.b, z18.b\n"
+ "umax z4.b, p4/M, z4.b, z17.b\n"
+ "umax z3.b, p4/M, z3.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
- "ld1b { z17.b }, p3/Z, [x20, x28]\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "umax z7.b, p0/M, z7.b, z17.b\n"
- "umax z6.b, p0/M, z6.b, z16.b\n"
- "ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z19.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x26]\n"
+ "umax z6.b, p4/M, z6.b, z19.b\n"
+ "umax z5.b, p4/M, z5.b, z18.b\n"
+ "umax z4.b, p4/M, z4.b, z17.b\n"
+ "umax z3.b, p4/M, z3.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z6.b }, p3, [%x[outptr], x9]\n"
"incb x9, ALL, MUL #4\n"
- "st1b { z7.b }, p3, [%x[outptr], x28]\n"
+ "st1b { z5.b }, p2, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
- "st1b { z6.b }, p2, [%x[outptr], x27]\n"
+ "st1b { z4.b }, p1, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
- "st1b { z5.b }, p1, [%x[outptr], x26]\n"
+ "st1b { z3.b }, p0, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x0\n"
+ "mov z6.b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z4\n umax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n umax z17.b, p0/M, z17.b, z1.b\n"
+ "movprfx z16, z2\n umax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n umax z17.b, p4/M, z17.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "umax z16.b, p0/M, z16.b, z17.b\n"
"subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "umax z16.b, p4/M, z16.b, z17.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "umax z6.b, p4/M, z6.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z4\n umax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n umax z17.b, p0/M, z17.b, z1.b\n"
- "umax z16.b, p0/M, z16.b, z17.b\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
+ "movprfx z16, z2\n umax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n umax z17.b, p4/M, z17.b, z0.b\n"
+ "umax z16.b, p4/M, z16.b, z17.b\n"
+ "umax z6.b, p4/M, z6.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
+ "ld1b { z16.b }, p3/Z, [x20, x9]\n"
+ "umax z6.b, p4/M, z6.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "st1b { z8.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z6.b }, p3, [%x[outptr], x9]\n"
"incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [outptr] "r" (outptr)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
index e8339a2cd9..db90c8a3a2 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_avg_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -123,20 +123,20 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"cntb x26\n"
"cntb x25, ALL, MUL #2\n"
"cntb x24, ALL, MUL #3\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
- "whilelt p3.b, x26, %x[n_channels]\n"
- "whilelt p2.b, x25, %x[n_channels]\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
+ "whilelt p2.b, x26, %x[n_channels]\n"
+ "whilelt p1.b, x25, %x[n_channels]\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
- "ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
+ "ld1rw { z15.s }, p4/Z, [%x[accumulator_init]]\n"
"lsr x23, %x[n_valid_cells], #0x1\n"
+ "mov x22, %x[inptrs]\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
"mov z11.d, z15.d\n"
- "mov x22, %x[inptrs]\n"
"mov z10.d, z15.d\n"
"mov z9.d, z15.d\n"
"mov z8.d, z15.d\n"
@@ -152,14 +152,14 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 2 inputs loop
".inst 0x455e0bf7 // uaddlb z23.h, z31.b, z30.b\n"
@@ -169,24 +169,24 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x455c0bb5 // uaddlb z21.h, z29.b, z28.b\n"
".inst 0x455c0fb4 // uaddlt z20.h, z29.b, z28.b\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x455a0b73 // uaddlb z19.h, z27.b, z26.b\n"
".inst 0x455a0f72 // uaddlt z18.h, z27.b, z26.b\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
- "ld1b { z29.b }, p3/Z, [x21, x26]\n"
".inst 0x45580b31 // uaddlb z17.h, z25.b, z24.b\n"
".inst 0x45580f30 // uaddlt z16.h, z25.b, z24.b\n"
- "ld1b { z28.b }, p3/Z, [x20, x26]\n"
- "ld1b { z27.b }, p2/Z, [x21, x25]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
".inst 0x45974dce // uaddwt z14.s, z14.s, z23.h\n"
- "ld1b { z26.b }, p2/Z, [x20, x25]\n"
- "ld1b { z25.b }, p1/Z, [x21, x24]\n"
+ "ld1b { z29.b }, p2/Z, [x21, x26]\n"
+ "ld1b { z28.b }, p2/Z, [x20, x26]\n"
".inst 0x459649ad // uaddwb z13.s, z13.s, z22.h\n"
".inst 0x45964d8c // uaddwt z12.s, z12.s, z22.h\n"
- "ld1b { z24.b }, p1/Z, [x20, x24]\n"
+ "ld1b { z27.b }, p1/Z, [x21, x25]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x25]\n"
".inst 0x4595496b // uaddwb z11.s, z11.s, z21.h\n"
".inst 0x45954d4a // uaddwt z10.s, z10.s, z21.h\n"
+ "ld1b { z25.b }, p0/Z, [x21, x24]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x24]\n"
".inst 0x45944929 // uaddwb z9.s, z9.s, z20.h\n"
".inst 0x45944d08 // uaddwt z8.s, z8.s, z20.h\n"
".inst 0x459348e7 // uaddwb z7.s, z7.s, z19.h\n"
@@ -228,17 +228,17 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
- ".inst 0x4508aa17 // ushllb z23.h, z16.b, #0x0\n"
- ".inst 0x4508ae16 // ushllt z22.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p3/Z, [x20, x26]\n"
- "ld1b { z17.b }, p2/Z, [x20, x25]\n"
- ".inst 0x4508aa15 // ushllb z21.h, z16.b, #0x0\n"
- ".inst 0x4508ae14 // ushllt z20.h, z16.b, #0x0\n"
- "ld1b { z16.b }, p1/Z, [x20, x24]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z19.b }, p3/Z, [x20, x27]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x26]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x25]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x24]\n"
+ ".inst 0x4508aa77 // ushllb z23.h, z19.b, #0x0\n"
+ ".inst 0x4508ae76 // ushllt z22.h, z19.b, #0x0\n"
+ ".inst 0x4508aa55 // ushllb z21.h, z18.b, #0x0\n"
+ ".inst 0x4508ae54 // ushllt z20.h, z18.b, #0x0\n"
".inst 0x4508aa33 // ushllb z19.h, z17.b, #0x0\n"
".inst 0x4508ae32 // ushllt z18.h, z17.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
".inst 0x459749ef // uaddwb z15.s, z15.s, z23.h\n"
@@ -259,160 +259,160 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
".inst 0x45904c00 // uaddwt z0.s, z0.s, z16.h\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "ld1rw { z18.s }, p0/Z, [%x[left_shift]]\n"
- "ld1rw { z16.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x4482824f // srshl z15.s, p0/M, z15.s, z18.s\n"
- ".inst 0x4482824e // srshl z14.s, p0/M, z14.s, z18.s\n"
- ".inst 0x4482824d // srshl z13.s, p0/M, z13.s, z18.s\n"
- ".inst 0x4482824c // srshl z12.s, p0/M, z12.s, z18.s\n"
- "ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x04b075ef // sqrdmulh z15.s, z15.s, z16.s\n"
- ".inst 0x4482824b // srshl z11.s, p0/M, z11.s, z18.s\n"
- ".inst 0x4482824a // srshl z10.s, p0/M, z10.s, z18.s\n"
- ".inst 0x04b075ce // sqrdmulh z14.s, z14.s, z16.s\n"
- ".inst 0x04b075ad // sqrdmulh z13.s, z13.s, z16.s\n"
- ".inst 0x44828249 // srshl z9.s, p0/M, z9.s, z18.s\n"
- ".inst 0x44828248 // srshl z8.s, p0/M, z8.s, z18.s\n"
- ".inst 0x04b0758c // sqrdmulh z12.s, z12.s, z16.s\n"
- ".inst 0x04b0756b // sqrdmulh z11.s, z11.s, z16.s\n"
- ".inst 0x44828247 // srshl z7.s, p0/M, z7.s, z18.s\n"
- ".inst 0x44828246 // srshl z6.s, p0/M, z6.s, z18.s\n"
- ".inst 0x04b0754a // sqrdmulh z10.s, z10.s, z16.s\n"
- ".inst 0x04b07529 // sqrdmulh z9.s, z9.s, z16.s\n"
- ".inst 0x44828245 // srshl z5.s, p0/M, z5.s, z18.s\n"
- ".inst 0x44828244 // srshl z4.s, p0/M, z4.s, z18.s\n"
- ".inst 0x04b07508 // sqrdmulh z8.s, z8.s, z16.s\n"
- ".inst 0x04b074e7 // sqrdmulh z7.s, z7.s, z16.s\n"
- ".inst 0x44828243 // srshl z3.s, p0/M, z3.s, z18.s\n"
- ".inst 0x44828242 // srshl z2.s, p0/M, z2.s, z18.s\n"
- ".inst 0x04b074c6 // sqrdmulh z6.s, z6.s, z16.s\n"
- ".inst 0x04b074a5 // sqrdmulh z5.s, z5.s, z16.s\n"
- ".inst 0x44828241 // srshl z1.s, p0/M, z1.s, z18.s\n"
- ".inst 0x44828240 // srshl z0.s, p0/M, z0.s, z18.s\n"
- ".inst 0x04b07484 // sqrdmulh z4.s, z4.s, z16.s\n"
- ".inst 0x04b07463 // sqrdmulh z3.s, z3.s, z16.s\n"
- ".inst 0x04b07442 // sqrdmulh z2.s, z2.s, z16.s\n"
- ".inst 0x04b07421 // sqrdmulh z1.s, z1.s, z16.s\n"
+ "ld1rw { z21.s }, p4/Z, [%x[left_shift]]\n"
+ "ld1rw { z19.s }, p4/Z, [%x[combined_rescale_value]]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- ".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
- ".inst 0x04b07400 // sqrdmulh z0.s, z0.s, z16.s\n"
- ".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
- ".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x4482822c // srshl z12.s, p0/M, z12.s, z17.s\n"
- ".inst 0x4482822b // srshl z11.s, p0/M, z11.s, z17.s\n"
+ "mov z18.s, #0x0\n"
+ "ld1rw { z17.s }, p4/Z, [%x[right_shift]]\n"
+ "ld1rw { z16.s }, p4/Z, [x20]\n"
+ "mov z20.s, #0xff\n"
+ ".inst 0x448292af // srshl z15.s, p4/M, z15.s, z21.s\n"
+ ".inst 0x448292ae // srshl z14.s, p4/M, z14.s, z21.s\n"
+ ".inst 0x448292ad // srshl z13.s, p4/M, z13.s, z21.s\n"
+ ".inst 0x448292ac // srshl z12.s, p4/M, z12.s, z21.s\n"
+ ".inst 0x448292ab // srshl z11.s, p4/M, z11.s, z21.s\n"
+ ".inst 0x448292aa // srshl z10.s, p4/M, z10.s, z21.s\n"
+ ".inst 0x448292a9 // srshl z9.s, p4/M, z9.s, z21.s\n"
+ ".inst 0x04b375ef // sqrdmulh z15.s, z15.s, z19.s\n"
+ ".inst 0x448292a8 // srshl z8.s, p4/M, z8.s, z21.s\n"
+ ".inst 0x448292a7 // srshl z7.s, p4/M, z7.s, z21.s\n"
+ ".inst 0x04b375ce // sqrdmulh z14.s, z14.s, z19.s\n"
+ ".inst 0x04b375ad // sqrdmulh z13.s, z13.s, z19.s\n"
+ ".inst 0x448292a6 // srshl z6.s, p4/M, z6.s, z21.s\n"
+ ".inst 0x448292a5 // srshl z5.s, p4/M, z5.s, z21.s\n"
+ ".inst 0x04b3758c // sqrdmulh z12.s, z12.s, z19.s\n"
+ ".inst 0x04b3756b // sqrdmulh z11.s, z11.s, z19.s\n"
+ ".inst 0x448292a4 // srshl z4.s, p4/M, z4.s, z21.s\n"
+ ".inst 0x448292a3 // srshl z3.s, p4/M, z3.s, z21.s\n"
+ ".inst 0x04b3754a // sqrdmulh z10.s, z10.s, z19.s\n"
+ ".inst 0x04b37529 // sqrdmulh z9.s, z9.s, z19.s\n"
+ ".inst 0x448292a2 // srshl z2.s, p4/M, z2.s, z21.s\n"
+ ".inst 0x448292a1 // srshl z1.s, p4/M, z1.s, z21.s\n"
+ ".inst 0x04b37508 // sqrdmulh z8.s, z8.s, z19.s\n"
+ ".inst 0x04b374e7 // sqrdmulh z7.s, z7.s, z19.s\n"
+ ".inst 0x448292a0 // srshl z0.s, p4/M, z0.s, z21.s\n"
+ ".inst 0x04b374c6 // sqrdmulh z6.s, z6.s, z19.s\n"
+ ".inst 0x04b374a5 // sqrdmulh z5.s, z5.s, z19.s\n"
+ ".inst 0x4482922f // srshl z15.s, p4/M, z15.s, z17.s\n"
+ ".inst 0x04b37484 // sqrdmulh z4.s, z4.s, z19.s\n"
+ ".inst 0x04b37463 // sqrdmulh z3.s, z3.s, z19.s\n"
+ ".inst 0x4482922e // srshl z14.s, p4/M, z14.s, z17.s\n"
+ ".inst 0x4482922d // srshl z13.s, p4/M, z13.s, z17.s\n"
+ ".inst 0x04b37442 // sqrdmulh z2.s, z2.s, z19.s\n"
+ ".inst 0x04b37421 // sqrdmulh z1.s, z1.s, z19.s\n"
+ ".inst 0x4482922c // srshl z12.s, p4/M, z12.s, z17.s\n"
+ ".inst 0x4482922b // srshl z11.s, p4/M, z11.s, z17.s\n"
+ ".inst 0x04b37400 // sqrdmulh z0.s, z0.s, z19.s\n"
+ ".inst 0x4482922a // srshl z10.s, p4/M, z10.s, z17.s\n"
+ ".inst 0x44829229 // srshl z9.s, p4/M, z9.s, z17.s\n"
"add z15.s, z15.s, z16.s\n"
+ ".inst 0x44829228 // srshl z8.s, p4/M, z8.s, z17.s\n"
+ ".inst 0x44829227 // srshl z7.s, p4/M, z7.s, z17.s\n"
"add z14.s, z14.s, z16.s\n"
- ".inst 0x4482822a // srshl z10.s, p0/M, z10.s, z17.s\n"
- ".inst 0x44828229 // srshl z9.s, p0/M, z9.s, z17.s\n"
"add z13.s, z13.s, z16.s\n"
+ ".inst 0x44829226 // srshl z6.s, p4/M, z6.s, z17.s\n"
+ ".inst 0x44829225 // srshl z5.s, p4/M, z5.s, z17.s\n"
"add z12.s, z12.s, z16.s\n"
- ".inst 0x44828228 // srshl z8.s, p0/M, z8.s, z17.s\n"
- ".inst 0x44828227 // srshl z7.s, p0/M, z7.s, z17.s\n"
"add z11.s, z11.s, z16.s\n"
+ ".inst 0x44829224 // srshl z4.s, p4/M, z4.s, z17.s\n"
+ ".inst 0x44829223 // srshl z3.s, p4/M, z3.s, z17.s\n"
"add z10.s, z10.s, z16.s\n"
- ".inst 0x44828226 // srshl z6.s, p0/M, z6.s, z17.s\n"
- ".inst 0x44828225 // srshl z5.s, p0/M, z5.s, z17.s\n"
"add z9.s, z9.s, z16.s\n"
+ ".inst 0x44829222 // srshl z2.s, p4/M, z2.s, z17.s\n"
+ ".inst 0x44829221 // srshl z1.s, p4/M, z1.s, z17.s\n"
"add z8.s, z8.s, z16.s\n"
- ".inst 0x44828224 // srshl z4.s, p0/M, z4.s, z17.s\n"
- ".inst 0x44828223 // srshl z3.s, p0/M, z3.s, z17.s\n"
"add z7.s, z7.s, z16.s\n"
+ ".inst 0x44829220 // srshl z0.s, p4/M, z0.s, z17.s\n"
"add z6.s, z6.s, z16.s\n"
- ".inst 0x44828222 // srshl z2.s, p0/M, z2.s, z17.s\n"
- ".inst 0x44828221 // srshl z1.s, p0/M, z1.s, z17.s\n"
"add z5.s, z5.s, z16.s\n"
+ "smax z15.s, p4/M, z15.s, z18.s\n"
"add z4.s, z4.s, z16.s\n"
- ".inst 0x44828220 // srshl z0.s, p0/M, z0.s, z17.s\n"
"add z3.s, z3.s, z16.s\n"
+ "smax z14.s, p4/M, z14.s, z18.s\n"
+ "smax z13.s, p4/M, z13.s, z18.s\n"
"add z2.s, z2.s, z16.s\n"
"add z1.s, z1.s, z16.s\n"
+ "smax z12.s, p4/M, z12.s, z18.s\n"
+ "smax z11.s, p4/M, z11.s, z18.s\n"
"add z0.s, z0.s, z16.s\n"
- "mov z16.s, #0x0\n"
- "smax z15.s, p0/M, z15.s, z16.s\n"
- "smax z14.s, p0/M, z14.s, z16.s\n"
- "mov z18.s, #0xff\n"
- "smax z13.s, p0/M, z13.s, z16.s\n"
- "smax z12.s, p0/M, z12.s, z16.s\n"
- "smax z11.s, p0/M, z11.s, z16.s\n"
- "smax z10.s, p0/M, z10.s, z16.s\n"
- "smax z9.s, p0/M, z9.s, z16.s\n"
- "smax z8.s, p0/M, z8.s, z16.s\n"
- "smax z7.s, p0/M, z7.s, z16.s\n"
- "smax z6.s, p0/M, z6.s, z16.s\n"
- "smax z5.s, p0/M, z5.s, z16.s\n"
- "smax z4.s, p0/M, z4.s, z16.s\n"
- "smax z3.s, p0/M, z3.s, z16.s\n"
- "smax z2.s, p0/M, z2.s, z16.s\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smin z15.s, p0/M, z15.s, z18.s\n"
- "smin z14.s, p0/M, z14.s, z18.s\n"
- "trn1 z17.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z18.s\n"
- "smin z12.s, p0/M, z12.s, z18.s\n"
- "trn1 z16.h, z13.h, z12.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z11.s, p0/M, z11.s, z18.s\n"
- "smin z10.s, p0/M, z10.s, z18.s\n"
- "trn1 z17.h, z11.h, z10.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
- "smin z9.s, p0/M, z9.s, z18.s\n"
- "smin z8.s, p0/M, z8.s, z18.s\n"
+ "smax z10.s, p4/M, z10.s, z18.s\n"
+ "smax z9.s, p4/M, z9.s, z18.s\n"
+ "smax z8.s, p4/M, z8.s, z18.s\n"
+ "smax z7.s, p4/M, z7.s, z18.s\n"
+ "smax z6.s, p4/M, z6.s, z18.s\n"
+ "smax z5.s, p4/M, z5.s, z18.s\n"
+ "smax z4.s, p4/M, z4.s, z18.s\n"
+ "smax z3.s, p4/M, z3.s, z18.s\n"
+ "smax z2.s, p4/M, z2.s, z18.s\n"
+ "smax z1.s, p4/M, z1.s, z18.s\n"
+ "smax z0.s, p4/M, z0.s, z18.s\n"
+ "smin z15.s, p4/M, z15.s, z20.s\n"
+ "smin z14.s, p4/M, z14.s, z20.s\n"
+ "smin z13.s, p4/M, z13.s, z20.s\n"
+ "smin z12.s, p4/M, z12.s, z20.s\n"
+ "smin z11.s, p4/M, z11.s, z20.s\n"
+ "smin z10.s, p4/M, z10.s, z20.s\n"
+ "smin z9.s, p4/M, z9.s, z20.s\n"
+ "smin z8.s, p4/M, z8.s, z20.s\n"
+ "smin z7.s, p4/M, z7.s, z20.s\n"
+ "trn1 z19.h, z15.h, z14.h\n"
+ "smin z6.s, p4/M, z6.s, z20.s\n"
+ "smin z5.s, p4/M, z5.s, z20.s\n"
+ "trn1 z17.h, z13.h, z12.h\n"
+ "smin z4.s, p4/M, z4.s, z20.s\n"
+ "smin z3.s, p4/M, z3.s, z20.s\n"
+ "trn1 z18.h, z11.h, z10.h\n"
+ "smin z2.s, p4/M, z2.s, z20.s\n"
+ "smin z1.s, p4/M, z1.s, z20.s\n"
"trn1 z16.h, z9.h, z8.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z7.s, p0/M, z7.s, z18.s\n"
- "smin z6.s, p0/M, z6.s, z18.s\n"
- "trn1 z17.h, z7.h, z6.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x26]\n"
- "smin z5.s, p0/M, z5.s, z18.s\n"
- "smin z4.s, p0/M, z4.s, z18.s\n"
- "trn1 z16.h, z5.h, z4.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z3.s, p0/M, z3.s, z18.s\n"
- "smin z2.s, p0/M, z2.s, z18.s\n"
- "trn1 z17.h, z3.h, z2.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x25]\n"
- "smin z1.s, p0/M, z1.s, z18.s\n"
- "smin z0.s, p0/M, z0.s, z18.s\n"
+ "smin z0.s, p4/M, z0.s, z20.s\n"
+ "trn1 z21.h, z7.h, z6.h\n"
+ "trn1 z20.b, z19.b, z17.b\n"
+ "trn1 z17.h, z5.h, z4.h\n"
+ "trn1 z19.h, z3.h, z2.h\n"
+ "trn1 z18.b, z18.b, z16.b\n"
"trn1 z16.h, z1.h, z0.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x24]\n"
- "incb x24, ALL, MUL #4\n"
- "whilelt p1.b, x24, %x[n_channels]\n"
+ "st1b { z20.b }, p3, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
+ "trn1 z17.b, z21.b, z17.b\n"
+ "trn1 z16.b, z19.b, z16.b\n"
+ "st1b { z18.b }, p2, [%x[outptr], x26]\n"
"incb x26, ALL, MUL #4\n"
+ "st1b { z17.b }, p1, [%x[outptr], x25]\n"
"incb x25, ALL, MUL #4\n"
+ "st1b { z16.b }, p0, [%x[outptr], x24]\n"
+ "incb x24, ALL, MUL #4\n"
+ "whilelt p0.b, x24, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
- "ld1rw { z15.s }, p0/Z, [%x[accumulator_init]]\n"
+ "ld1rw { z15.s }, p4/Z, [%x[accumulator_init]]\n"
"lsr x23, %x[n_valid_cells], #0x1\n"
+ "mov x22, %x[inptrs]\n"
"mov z14.d, z15.d\n"
"mov z13.d, z15.d\n"
"mov z12.d, z15.d\n"
- "mov x22, %x[inptrs]\n"
"cbz x23, 11f\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
"add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 2 inputs loop
".inst 0x455e0bf1 // uaddlb z17.h, z31.b, z30.b\n"
".inst 0x455e0ff0 // uaddlt z16.h, z31.b, z30.b\n"
"ldp x21, x20, [x22, #0x0]\n"
"subs x23, x23, #0x1\n"
+ "add x22, x22, #0x10\n"
".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
- "add x22, x22, #0x10\n"
- "ld1b { z31.b }, p4/Z, [x21, x27]\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
- "ld1b { z30.b }, p4/Z, [x20, x27]\n"
+ "ld1b { z31.b }, p3/Z, [x21, x27]\n"
+ "ld1b { z30.b }, p3/Z, [x20, x27]\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 2 inputs tail
".inst 0x455e0bf1 // uaddlb z17.h, z31.b, z30.b\n"
@@ -426,53 +426,53 @@ void sve_u8q_nhwc_avg_generic_depthfirst_impl(
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x22], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x27]\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z16.b }, p3/Z, [x20, x27]\n"
".inst 0x4508aa11 // ushllb z17.h, z16.b, #0x0\n"
".inst 0x4508ae10 // ushllt z16.h, z16.b, #0x0\n"
- "subs x21, x21, #0x1\n"
".inst 0x459149ef // uaddwb z15.s, z15.s, z17.h\n"
".inst 0x45914dce // uaddwt z14.s, z14.s, z17.h\n"
".inst 0x459049ad // uaddwb z13.s, z13.s, z16.h\n"
".inst 0x45904d8c // uaddwt z12.s, z12.s, z16.h\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "ld1rw { z17.s }, p0/Z, [%x[left_shift]]\n"
- "ld1rw { z16.s }, p0/Z, [%x[combined_rescale_value]]\n"
- ".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
- ".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
- ".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
- ".inst 0x4482822c // srshl z12.s, p0/M, z12.s, z17.s\n"
- "ld1rw { z17.s }, p0/Z, [%x[right_shift]]\n"
- ".inst 0x04b075ef // sqrdmulh z15.s, z15.s, z16.s\n"
- ".inst 0x04b075ce // sqrdmulh z14.s, z14.s, z16.s\n"
- ".inst 0x04b075ad // sqrdmulh z13.s, z13.s, z16.s\n"
+ "ld1rw { z21.s }, p4/Z, [%x[left_shift]]\n"
+ "ld1rw { z20.s }, p4/Z, [%x[combined_rescale_value]]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- ".inst 0x4482822f // srshl z15.s, p0/M, z15.s, z17.s\n"
- ".inst 0x04b0758c // sqrdmulh z12.s, z12.s, z16.s\n"
- ".inst 0x4482822e // srshl z14.s, p0/M, z14.s, z17.s\n"
- ".inst 0x4482822d // srshl z13.s, p0/M, z13.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x4482822c // srshl z12.s, p0/M, z12.s, z17.s\n"
- "add z15.s, z15.s, z16.s\n"
- "add z14.s, z14.s, z16.s\n"
- "add z13.s, z13.s, z16.s\n"
- "add z12.s, z12.s, z16.s\n"
- "mov z17.s, #0x0\n"
- "smax z15.s, p0/M, z15.s, z17.s\n"
- "smax z14.s, p0/M, z14.s, z17.s\n"
+ "mov z19.s, #0x0\n"
+ "ld1rw { z18.s }, p4/Z, [%x[right_shift]]\n"
+ "ld1rw { z17.s }, p4/Z, [x20]\n"
"mov z16.s, #0xff\n"
- "smax z13.s, p0/M, z13.s, z17.s\n"
- "smax z12.s, p0/M, z12.s, z17.s\n"
- "smin z15.s, p0/M, z15.s, z16.s\n"
- "smin z14.s, p0/M, z14.s, z16.s\n"
+ ".inst 0x448292af // srshl z15.s, p4/M, z15.s, z21.s\n"
+ ".inst 0x448292ae // srshl z14.s, p4/M, z14.s, z21.s\n"
+ ".inst 0x448292ad // srshl z13.s, p4/M, z13.s, z21.s\n"
+ ".inst 0x448292ac // srshl z12.s, p4/M, z12.s, z21.s\n"
+ ".inst 0x04b475ef // sqrdmulh z15.s, z15.s, z20.s\n"
+ ".inst 0x04b475ce // sqrdmulh z14.s, z14.s, z20.s\n"
+ ".inst 0x04b475ad // sqrdmulh z13.s, z13.s, z20.s\n"
+ ".inst 0x04b4758c // sqrdmulh z12.s, z12.s, z20.s\n"
+ ".inst 0x4482924f // srshl z15.s, p4/M, z15.s, z18.s\n"
+ ".inst 0x4482924e // srshl z14.s, p4/M, z14.s, z18.s\n"
+ ".inst 0x4482924d // srshl z13.s, p4/M, z13.s, z18.s\n"
+ ".inst 0x4482924c // srshl z12.s, p4/M, z12.s, z18.s\n"
+ "add z15.s, z15.s, z17.s\n"
+ "add z14.s, z14.s, z17.s\n"
+ "add z13.s, z13.s, z17.s\n"
+ "add z12.s, z12.s, z17.s\n"
+ "smax z15.s, p4/M, z15.s, z19.s\n"
+ "smax z14.s, p4/M, z14.s, z19.s\n"
+ "smax z13.s, p4/M, z13.s, z19.s\n"
+ "smax z12.s, p4/M, z12.s, z19.s\n"
+ "smin z15.s, p4/M, z15.s, z16.s\n"
+ "smin z14.s, p4/M, z14.s, z16.s\n"
+ "smin z13.s, p4/M, z13.s, z16.s\n"
+ "smin z12.s, p4/M, z12.s, z16.s\n"
"trn1 z17.h, z15.h, z14.h\n"
- "smin z13.s, p0/M, z13.s, z16.s\n"
- "smin z12.s, p0/M, z12.s, z16.s\n"
"trn1 z16.h, z13.h, z12.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x27]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x27]\n"
"incb x27\n"
- "whilelt p4.b, x27, %x[n_channels]\n"
+ "whilelt p3.b, x27, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
diff --git a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
index 94522cdaaa..8308a115a4 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/kernels/sve_u8q_nhwc_max_generic_depthfirst/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,367 +46,367 @@ void sve_u8q_nhwc_max_generic_depthfirst_impl(
"cntb x28\n"
"cntb x27, ALL, MUL #2\n"
"cntb x26, ALL, MUL #3\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
- "whilelt p3.b, x28, %x[n_channels]\n"
- "whilelt p2.b, x27, %x[n_channels]\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
- "ptrue p0.b\n"
+ "ptrue p4.b\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
+ "whilelt p2.b, x28, %x[n_channels]\n"
+ "whilelt p1.b, x27, %x[n_channels]\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.none 7f\n"
"1:" // 4-vectors of channels
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x0\n"
- "mov z7.b, #0x0\n"
- "mov x24, %x[inptrs]\n"
"mov z6.b, #0x0\n"
"mov z5.b, #0x0\n"
+ "mov x24, %x[inptrs]\n"
+ "mov z4.b, #0x0\n"
+ "mov z3.b, #0x0\n"
"cbz x25, 4f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"beq 3f\n"
"2:" // 4-vectors of channels: 4 inputs loop
- "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
+ "movprfx z19, z2\n umax z19.b, p4/M, z19.b, z1.b\n"
+ "umax z23.b, p4/M, z23.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
- "umax z22.b, p0/M, z22.b, z30.b\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
- "umax z21.b, p0/M, z21.b, z27.b\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
- "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
- "umax z20.b, p0/M, z20.b, z24.b\n"
- "ld1b { z0.b }, p3/Z, [x23, x28]\n"
- "ld1b { z31.b }, p3/Z, [x22, x28]\n"
- "umax z19.b, p0/M, z19.b, z23.b\n"
- "umax z18.b, p0/M, z18.b, z22.b\n"
- "ld1b { z22.b }, p3/Z, [x21, x28]\n"
- "ld1b { z30.b }, p3/Z, [x20, x28]\n"
- "umax z17.b, p0/M, z17.b, z21.b\n"
- "umax z16.b, p0/M, z16.b, z20.b\n"
- "ld1b { z29.b }, p2/Z, [x23, x27]\n"
- "ld1b { z28.b }, p2/Z, [x22, x27]\n"
+ "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+ "umax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+ "umax z21.b, p4/M, z21.b, z26.b\n"
+ "umax z16.b, p4/M, z16.b, z25.b\n"
+ "umax z20.b, p4/M, z20.b, z24.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "umax z19.b, p4/M, z19.b, z23.b\n"
+ "umax z18.b, p4/M, z18.b, z22.b\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "umax z17.b, p4/M, z17.b, z21.b\n"
"subs x25, x25, #0x1\n"
- "umax z8.b, p0/M, z8.b, z19.b\n"
- "ld1b { z21.b }, p2/Z, [x21, x27]\n"
- "ld1b { z27.b }, p2/Z, [x20, x27]\n"
- "umax z7.b, p0/M, z7.b, z18.b\n"
- "umax z6.b, p0/M, z6.b, z17.b\n"
- "ld1b { z26.b }, p1/Z, [x23, x26]\n"
- "ld1b { z25.b }, p1/Z, [x22, x26]\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z31.b }, p2/Z, [x23, x28]\n"
+ "ld1b { z30.b }, p2/Z, [x22, x28]\n"
+ "umax z16.b, p4/M, z16.b, z20.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z20.b }, p1/Z, [x21, x26]\n"
- "ld1b { z24.b }, p1/Z, [x20, x26]\n"
+ "ld1b { z22.b }, p2/Z, [x21, x28]\n"
+ "ld1b { z29.b }, p2/Z, [x20, x28]\n"
+ "umax z6.b, p4/M, z6.b, z19.b\n"
+ "umax z5.b, p4/M, z5.b, z18.b\n"
+ "ld1b { z28.b }, p1/Z, [x23, x27]\n"
+ "ld1b { z27.b }, p1/Z, [x22, x27]\n"
+ "umax z4.b, p4/M, z4.b, z17.b\n"
+ "ld1b { z21.b }, p1/Z, [x21, x27]\n"
+ "ld1b { z26.b }, p1/Z, [x20, x27]\n"
+ "umax z3.b, p4/M, z3.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x23, x26]\n"
+ "ld1b { z25.b }, p0/Z, [x22, x26]\n"
+ "ld1b { z20.b }, p0/Z, [x21, x26]\n"
+ "ld1b { z24.b }, p0/Z, [x20, x26]\n"
"bgt 2b\n"
"3:" // 4-vectors of channels: 4 inputs tail
- "movprfx z19, z4\n umax z19.b, p0/M, z19.b, z3.b\n"
- "movprfx z23, z2\n umax z23.b, p0/M, z23.b, z1.b\n"
- "movprfx z18, z0\n umax z18.b, p0/M, z18.b, z31.b\n"
- "umax z22.b, p0/M, z22.b, z30.b\n"
- "movprfx z17, z29\n umax z17.b, p0/M, z17.b, z28.b\n"
- "umax z21.b, p0/M, z21.b, z27.b\n"
- "movprfx z16, z26\n umax z16.b, p0/M, z16.b, z25.b\n"
- "umax z20.b, p0/M, z20.b, z24.b\n"
- "umax z19.b, p0/M, z19.b, z23.b\n"
- "umax z18.b, p0/M, z18.b, z22.b\n"
- "umax z17.b, p0/M, z17.b, z21.b\n"
- "umax z16.b, p0/M, z16.b, z20.b\n"
- "umax z8.b, p0/M, z8.b, z19.b\n"
- "umax z7.b, p0/M, z7.b, z18.b\n"
- "umax z6.b, p0/M, z6.b, z17.b\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
+ "movprfx z19, z2\n umax z19.b, p4/M, z19.b, z1.b\n"
+ "umax z23.b, p4/M, z23.b, z0.b\n"
+ "movprfx z18, z31\n umax z18.b, p4/M, z18.b, z30.b\n"
+ "umax z22.b, p4/M, z22.b, z29.b\n"
+ "movprfx z17, z28\n umax z17.b, p4/M, z17.b, z27.b\n"
+ "umax z21.b, p4/M, z21.b, z26.b\n"
+ "umax z16.b, p4/M, z16.b, z25.b\n"
+ "umax z20.b, p4/M, z20.b, z24.b\n"
+ "umax z19.b, p4/M, z19.b, z23.b\n"
+ "umax z18.b, p4/M, z18.b, z22.b\n"
+ "umax z17.b, p4/M, z17.b, z21.b\n"
+ "umax z16.b, p4/M, z16.b, z20.b\n"
+ "umax z6.b, p4/M, z6.b, z19.b\n"
+ "umax z5.b, p4/M, z5.b, z18.b\n"
+ "umax z4.b, p4/M, z4.b, z17.b\n"
+ "umax z3.b, p4/M, z3.b, z16.b\n"
"4:" // 4-vectors of channels: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 6f\n"
"5:" // 4-vectors of channels: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
- "ld1b { z17.b }, p3/Z, [x20, x28]\n"
- "ld1b { z16.b }, p2/Z, [x20, x27]\n"
- "umax z7.b, p0/M, z7.b, z17.b\n"
- "umax z6.b, p0/M, z6.b, z16.b\n"
- "ld1b { z16.b }, p1/Z, [x20, x26]\n"
- "umax z5.b, p0/M, z5.b, z16.b\n"
+ "ld1b { z19.b }, p3/Z, [x20, x9]\n"
+ "ld1b { z18.b }, p2/Z, [x20, x28]\n"
+ "ld1b { z17.b }, p1/Z, [x20, x27]\n"
+ "ld1b { z16.b }, p0/Z, [x20, x26]\n"
+ "umax z6.b, p4/M, z6.b, z19.b\n"
+ "umax z5.b, p4/M, z5.b, z18.b\n"
+ "umax z4.b, p4/M, z4.b, z17.b\n"
+ "umax z3.b, p4/M, z3.b, z16.b\n"
"bgt 5b\n"
"6:" // 4-vectors of channels: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z3.s }, p0/Z, [x20]\n"
- ".inst 0x4508a911 // ushllb z17.h, z8.b, #0x0\n"
- ".inst 0x4508ad18 // ushllt z24.h, z8.b, #0x0\n"
- ".inst 0x4508a8f7 // ushllb z23.h, z7.b, #0x0\n"
- ".inst 0x4508acf6 // ushllt z22.h, z7.b, #0x0\n"
- "neg z3.s, p0/M, z3.s\n"
+ "add x21, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ ".inst 0x4508a8d3 // ushllb z19.h, z6.b, #0x0\n"
+ ".inst 0x4508acd1 // ushllt z17.h, z6.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- ".inst 0x4508a8d5 // ushllb z21.h, z6.b, #0x0\n"
- ".inst 0x4508acd4 // ushllt z20.h, z6.b, #0x0\n"
- "ld1rw { z2.s }, p0/Z, [x20]\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- ".inst 0x4508a8b3 // ushllb z19.h, z5.b, #0x0\n"
+ "ld1rw { z6.s }, p4/Z, [x21]\n"
+ ".inst 0x4508a8b2 // ushllb z18.h, z5.b, #0x0\n"
".inst 0x4508acb0 // ushllt z16.h, z5.b, #0x0\n"
- "ld1rw { z18.s }, p0/Z, [x20]\n"
+ "ld1rw { z5.s }, p4/Z, [x20]\n"
+ ".inst 0x4508a894 // ushllb z20.h, z4.b, #0x0\n"
+ ".inst 0x4508ac98 // ushllt z24.h, z4.b, #0x0\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- ".inst 0x45914061 // saddwb z1.s, z3.s, z17.h\n"
- ".inst 0x45914471 // saddwt z17.s, z3.s, z17.h\n"
- ".inst 0x44828041 // srshl z1.s, p0/M, z1.s, z2.s\n"
- ".inst 0x44828051 // srshl z17.s, p0/M, z17.s, z2.s\n"
- ".inst 0x45984060 // saddwb z0.s, z3.s, z24.h\n"
- ".inst 0x4598447f // saddwt z31.s, z3.s, z24.h\n"
- ".inst 0x44828040 // srshl z0.s, p0/M, z0.s, z2.s\n"
- ".inst 0x4482805f // srshl z31.s, p0/M, z31.s, z2.s\n"
- ".inst 0x4597407e // saddwb z30.s, z3.s, z23.h\n"
- ".inst 0x4597447d // saddwt z29.s, z3.s, z23.h\n"
- ".inst 0x4482805e // srshl z30.s, p0/M, z30.s, z2.s\n"
- ".inst 0x4482805d // srshl z29.s, p0/M, z29.s, z2.s\n"
- ".inst 0x4596407c // saddwb z28.s, z3.s, z22.h\n"
- ".inst 0x4596447b // saddwt z27.s, z3.s, z22.h\n"
- ".inst 0x4482805c // srshl z28.s, p0/M, z28.s, z2.s\n"
- ".inst 0x4482805b // srshl z27.s, p0/M, z27.s, z2.s\n"
- ".inst 0x4595407a // saddwb z26.s, z3.s, z21.h\n"
- ".inst 0x45954479 // saddwt z25.s, z3.s, z21.h\n"
- ".inst 0x4482805a // srshl z26.s, p0/M, z26.s, z2.s\n"
- ".inst 0x44828059 // srshl z25.s, p0/M, z25.s, z2.s\n"
- ".inst 0x45944078 // saddwb z24.s, z3.s, z20.h\n"
- ".inst 0x45944477 // saddwt z23.s, z3.s, z20.h\n"
- ".inst 0x44828058 // srshl z24.s, p0/M, z24.s, z2.s\n"
- ".inst 0x44828057 // srshl z23.s, p0/M, z23.s, z2.s\n"
- ".inst 0x45934076 // saddwb z22.s, z3.s, z19.h\n"
- ".inst 0x45934475 // saddwt z21.s, z3.s, z19.h\n"
- ".inst 0x44828056 // srshl z22.s, p0/M, z22.s, z2.s\n"
- ".inst 0x44828055 // srshl z21.s, p0/M, z21.s, z2.s\n"
- ".inst 0x45904074 // saddwb z20.s, z3.s, z16.h\n"
- ".inst 0x45904473 // saddwt z19.s, z3.s, z16.h\n"
- ".inst 0x44828054 // srshl z20.s, p0/M, z20.s, z2.s\n"
- ".inst 0x44828053 // srshl z19.s, p0/M, z19.s, z2.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x04b27421 // sqrdmulh z1.s, z1.s, z18.s\n"
- ".inst 0x04b27631 // sqrdmulh z17.s, z17.s, z18.s\n"
+ ".inst 0x4508a877 // ushllb z23.h, z3.b, #0x0\n"
+ ".inst 0x4508ac76 // ushllt z22.h, z3.b, #0x0\n"
+ "ld1rw { z4.s }, p4/Z, [x21]\n"
+ "ld1rw { z3.s }, p4/Z, [x20]\n"
+ "neg z6.s, p4/M, z6.s\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- ".inst 0x04b27400 // sqrdmulh z0.s, z0.s, z18.s\n"
- ".inst 0x04b277ff // sqrdmulh z31.s, z31.s, z18.s\n"
- ".inst 0x44828201 // srshl z1.s, p0/M, z1.s, z16.s\n"
- ".inst 0x44828211 // srshl z17.s, p0/M, z17.s, z16.s\n"
- ".inst 0x04b277de // sqrdmulh z30.s, z30.s, z18.s\n"
- ".inst 0x04b277bd // sqrdmulh z29.s, z29.s, z18.s\n"
- ".inst 0x44828200 // srshl z0.s, p0/M, z0.s, z16.s\n"
- ".inst 0x4482821f // srshl z31.s, p0/M, z31.s, z16.s\n"
- ".inst 0x04b2779c // sqrdmulh z28.s, z28.s, z18.s\n"
- ".inst 0x04b2777b // sqrdmulh z27.s, z27.s, z18.s\n"
- ".inst 0x4482821e // srshl z30.s, p0/M, z30.s, z16.s\n"
- ".inst 0x4482821d // srshl z29.s, p0/M, z29.s, z16.s\n"
- ".inst 0x04b2775a // sqrdmulh z26.s, z26.s, z18.s\n"
- ".inst 0x04b27739 // sqrdmulh z25.s, z25.s, z18.s\n"
- ".inst 0x4482821c // srshl z28.s, p0/M, z28.s, z16.s\n"
- ".inst 0x4482821b // srshl z27.s, p0/M, z27.s, z16.s\n"
- ".inst 0x04b27718 // sqrdmulh z24.s, z24.s, z18.s\n"
- ".inst 0x04b276f7 // sqrdmulh z23.s, z23.s, z18.s\n"
- ".inst 0x4482821a // srshl z26.s, p0/M, z26.s, z16.s\n"
- ".inst 0x44828219 // srshl z25.s, p0/M, z25.s, z16.s\n"
- ".inst 0x04b276d6 // sqrdmulh z22.s, z22.s, z18.s\n"
- ".inst 0x04b276b5 // sqrdmulh z21.s, z21.s, z18.s\n"
- ".inst 0x44828218 // srshl z24.s, p0/M, z24.s, z16.s\n"
- ".inst 0x44828217 // srshl z23.s, p0/M, z23.s, z16.s\n"
- ".inst 0x04b27694 // sqrdmulh z20.s, z20.s, z18.s\n"
- ".inst 0x04b27673 // sqrdmulh z19.s, z19.s, z18.s\n"
- ".inst 0x44828216 // srshl z22.s, p0/M, z22.s, z16.s\n"
- ".inst 0x44828215 // srshl z21.s, p0/M, z21.s, z16.s\n"
- ".inst 0x44828214 // srshl z20.s, p0/M, z20.s, z16.s\n"
- ".inst 0x44828213 // srshl z19.s, p0/M, z19.s, z16.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- "add z1.s, z1.s, z16.s\n"
- "add z17.s, z17.s, z16.s\n"
- "add z0.s, z0.s, z16.s\n"
- "add z31.s, z31.s, z16.s\n"
- "add z30.s, z30.s, z16.s\n"
- "add z29.s, z29.s, z16.s\n"
- "add z28.s, z28.s, z16.s\n"
- "add z27.s, z27.s, z16.s\n"
- "add z26.s, z26.s, z16.s\n"
- "add z25.s, z25.s, z16.s\n"
- "add z24.s, z24.s, z16.s\n"
- "add z23.s, z23.s, z16.s\n"
- "add z22.s, z22.s, z16.s\n"
- "add z21.s, z21.s, z16.s\n"
- "add z20.s, z20.s, z16.s\n"
- "add z19.s, z19.s, z16.s\n"
- "mov z16.s, #0x0\n"
- "smax z1.s, p0/M, z1.s, z16.s\n"
- "smax z17.s, p0/M, z17.s, z16.s\n"
- "smax z0.s, p0/M, z0.s, z16.s\n"
- "smax z31.s, p0/M, z31.s, z16.s\n"
- "mov z18.s, #0xff\n"
- "smax z30.s, p0/M, z30.s, z16.s\n"
- "smax z29.s, p0/M, z29.s, z16.s\n"
- "smax z28.s, p0/M, z28.s, z16.s\n"
- "smax z27.s, p0/M, z27.s, z16.s\n"
- "smax z26.s, p0/M, z26.s, z16.s\n"
- "smax z25.s, p0/M, z25.s, z16.s\n"
- "smax z24.s, p0/M, z24.s, z16.s\n"
- "smax z23.s, p0/M, z23.s, z16.s\n"
- "smax z22.s, p0/M, z22.s, z16.s\n"
- "smax z21.s, p0/M, z21.s, z16.s\n"
- "smax z20.s, p0/M, z20.s, z16.s\n"
- "smax z19.s, p0/M, z19.s, z16.s\n"
- "smin z1.s, p0/M, z1.s, z18.s\n"
- "smin z17.s, p0/M, z17.s, z18.s\n"
- "trn1 z17.h, z1.h, z17.h\n"
- "smin z0.s, p0/M, z0.s, z18.s\n"
- "smin z31.s, p0/M, z31.s, z18.s\n"
- "trn1 z16.h, z0.h, z31.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z30.s, p0/M, z30.s, z18.s\n"
- "smin z29.s, p0/M, z29.s, z18.s\n"
- "trn1 z17.h, z30.h, z29.h\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
- "smin z28.s, p0/M, z28.s, z18.s\n"
- "smin z27.s, p0/M, z27.s, z18.s\n"
- "trn1 z16.h, z28.h, z27.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z26.s, p0/M, z26.s, z18.s\n"
- "smin z25.s, p0/M, z25.s, z18.s\n"
- "trn1 z17.h, z26.h, z25.h\n"
- "st1b { z16.b }, p3, [%x[outptr], x28]\n"
- "smin z24.s, p0/M, z24.s, z18.s\n"
- "smin z23.s, p0/M, z23.s, z18.s\n"
- "trn1 z16.h, z24.h, z23.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "smin z22.s, p0/M, z22.s, z18.s\n"
- "smin z21.s, p0/M, z21.s, z18.s\n"
- "trn1 z17.h, z22.h, z21.h\n"
- "st1b { z16.b }, p2, [%x[outptr], x27]\n"
- "smin z20.s, p0/M, z20.s, z18.s\n"
- "smin z19.s, p0/M, z19.s, z18.s\n"
- "trn1 z16.h, z20.h, z19.h\n"
- "trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [%x[outptr], x26]\n"
- "incb x26, ALL, MUL #4\n"
- "whilelt p1.b, x26, %x[n_channels]\n"
+ "mov z2.s, #0x0\n"
+ "mov z1.s, #0xff\n"
+ "ld1rw { z0.s }, p4/Z, [x20]\n"
+ ".inst 0x459340df // saddwb z31.s, z6.s, z19.h\n"
+ ".inst 0x459344d3 // saddwt z19.s, z6.s, z19.h\n"
+ ".inst 0x459140de // saddwb z30.s, z6.s, z17.h\n"
+ ".inst 0x459144d1 // saddwt z17.s, z6.s, z17.h\n"
+ ".inst 0x459240dd // saddwb z29.s, z6.s, z18.h\n"
+ ".inst 0x459244d2 // saddwt z18.s, z6.s, z18.h\n"
+ ".inst 0x459040dc // saddwb z28.s, z6.s, z16.h\n"
+ ".inst 0x459044d0 // saddwt z16.s, z6.s, z16.h\n"
+ ".inst 0x448290bf // srshl z31.s, p4/M, z31.s, z5.s\n"
+ ".inst 0x448290b3 // srshl z19.s, p4/M, z19.s, z5.s\n"
+ ".inst 0x459440d5 // saddwb z21.s, z6.s, z20.h\n"
+ ".inst 0x459444d4 // saddwt z20.s, z6.s, z20.h\n"
+ ".inst 0x448290be // srshl z30.s, p4/M, z30.s, z5.s\n"
+ ".inst 0x448290b1 // srshl z17.s, p4/M, z17.s, z5.s\n"
+ ".inst 0x459840db // saddwb z27.s, z6.s, z24.h\n"
+ ".inst 0x459844da // saddwt z26.s, z6.s, z24.h\n"
+ ".inst 0x448290bd // srshl z29.s, p4/M, z29.s, z5.s\n"
+ ".inst 0x448290b2 // srshl z18.s, p4/M, z18.s, z5.s\n"
+ ".inst 0x459740d9 // saddwb z25.s, z6.s, z23.h\n"
+ ".inst 0x459744d8 // saddwt z24.s, z6.s, z23.h\n"
+ ".inst 0x448290bc // srshl z28.s, p4/M, z28.s, z5.s\n"
+ ".inst 0x448290b0 // srshl z16.s, p4/M, z16.s, z5.s\n"
+ ".inst 0x459640d7 // saddwb z23.s, z6.s, z22.h\n"
+ ".inst 0x459644d6 // saddwt z22.s, z6.s, z22.h\n"
+ ".inst 0x448290b5 // srshl z21.s, p4/M, z21.s, z5.s\n"
+ ".inst 0x448290b4 // srshl z20.s, p4/M, z20.s, z5.s\n"
+ ".inst 0x448290bb // srshl z27.s, p4/M, z27.s, z5.s\n"
+ ".inst 0x448290ba // srshl z26.s, p4/M, z26.s, z5.s\n"
+ ".inst 0x04a477ff // sqrdmulh z31.s, z31.s, z4.s\n"
+ ".inst 0x04a47673 // sqrdmulh z19.s, z19.s, z4.s\n"
+ ".inst 0x448290b9 // srshl z25.s, p4/M, z25.s, z5.s\n"
+ ".inst 0x448290b8 // srshl z24.s, p4/M, z24.s, z5.s\n"
+ ".inst 0x04a477de // sqrdmulh z30.s, z30.s, z4.s\n"
+ ".inst 0x04a47631 // sqrdmulh z17.s, z17.s, z4.s\n"
+ ".inst 0x448290b7 // srshl z23.s, p4/M, z23.s, z5.s\n"
+ ".inst 0x448290b6 // srshl z22.s, p4/M, z22.s, z5.s\n"
+ ".inst 0x04a477bd // sqrdmulh z29.s, z29.s, z4.s\n"
+ ".inst 0x04a47652 // sqrdmulh z18.s, z18.s, z4.s\n"
+ ".inst 0x04a4779c // sqrdmulh z28.s, z28.s, z4.s\n"
+ ".inst 0x04a47610 // sqrdmulh z16.s, z16.s, z4.s\n"
+ ".inst 0x4482907f // srshl z31.s, p4/M, z31.s, z3.s\n"
+ ".inst 0x44829073 // srshl z19.s, p4/M, z19.s, z3.s\n"
+ ".inst 0x04a476b5 // sqrdmulh z21.s, z21.s, z4.s\n"
+ ".inst 0x04a47694 // sqrdmulh z20.s, z20.s, z4.s\n"
+ ".inst 0x4482907e // srshl z30.s, p4/M, z30.s, z3.s\n"
+ ".inst 0x44829071 // srshl z17.s, p4/M, z17.s, z3.s\n"
+ ".inst 0x04a4777b // sqrdmulh z27.s, z27.s, z4.s\n"
+ ".inst 0x04a4775a // sqrdmulh z26.s, z26.s, z4.s\n"
+ ".inst 0x4482907d // srshl z29.s, p4/M, z29.s, z3.s\n"
+ ".inst 0x44829072 // srshl z18.s, p4/M, z18.s, z3.s\n"
+ ".inst 0x04a47739 // sqrdmulh z25.s, z25.s, z4.s\n"
+ ".inst 0x04a47718 // sqrdmulh z24.s, z24.s, z4.s\n"
+ ".inst 0x4482907c // srshl z28.s, p4/M, z28.s, z3.s\n"
+ ".inst 0x44829070 // srshl z16.s, p4/M, z16.s, z3.s\n"
+ ".inst 0x04a476f7 // sqrdmulh z23.s, z23.s, z4.s\n"
+ ".inst 0x04a476d6 // sqrdmulh z22.s, z22.s, z4.s\n"
+ ".inst 0x44829075 // srshl z21.s, p4/M, z21.s, z3.s\n"
+ ".inst 0x44829074 // srshl z20.s, p4/M, z20.s, z3.s\n"
+ ".inst 0x4482907b // srshl z27.s, p4/M, z27.s, z3.s\n"
+ ".inst 0x4482907a // srshl z26.s, p4/M, z26.s, z3.s\n"
+ "add z31.s, z31.s, z0.s\n"
+ "add z19.s, z19.s, z0.s\n"
+ ".inst 0x44829079 // srshl z25.s, p4/M, z25.s, z3.s\n"
+ ".inst 0x44829078 // srshl z24.s, p4/M, z24.s, z3.s\n"
+ "add z30.s, z30.s, z0.s\n"
+ "add z17.s, z17.s, z0.s\n"
+ ".inst 0x44829077 // srshl z23.s, p4/M, z23.s, z3.s\n"
+ ".inst 0x44829076 // srshl z22.s, p4/M, z22.s, z3.s\n"
+ "add z29.s, z29.s, z0.s\n"
+ "add z18.s, z18.s, z0.s\n"
+ "add z28.s, z28.s, z0.s\n"
+ "add z16.s, z16.s, z0.s\n"
+ "smax z31.s, p4/M, z31.s, z2.s\n"
+ "smax z19.s, p4/M, z19.s, z2.s\n"
+ "add z21.s, z21.s, z0.s\n"
+ "add z20.s, z20.s, z0.s\n"
+ "smax z30.s, p4/M, z30.s, z2.s\n"
+ "smax z17.s, p4/M, z17.s, z2.s\n"
+ "add z27.s, z27.s, z0.s\n"
+ "add z26.s, z26.s, z0.s\n"
+ "smax z29.s, p4/M, z29.s, z2.s\n"
+ "smax z18.s, p4/M, z18.s, z2.s\n"
+ "add z25.s, z25.s, z0.s\n"
+ "add z24.s, z24.s, z0.s\n"
+ "smax z28.s, p4/M, z28.s, z2.s\n"
+ "smax z16.s, p4/M, z16.s, z2.s\n"
+ "add z23.s, z23.s, z0.s\n"
+ "add z22.s, z22.s, z0.s\n"
+ "smax z21.s, p4/M, z21.s, z2.s\n"
+ "smax z20.s, p4/M, z20.s, z2.s\n"
+ "smax z27.s, p4/M, z27.s, z2.s\n"
+ "smax z26.s, p4/M, z26.s, z2.s\n"
+ "smax z25.s, p4/M, z25.s, z2.s\n"
+ "smax z24.s, p4/M, z24.s, z2.s\n"
+ "smax z23.s, p4/M, z23.s, z2.s\n"
+ "smax z22.s, p4/M, z22.s, z2.s\n"
+ "smin z31.s, p4/M, z31.s, z1.s\n"
+ "smin z19.s, p4/M, z19.s, z1.s\n"
+ "smin z30.s, p4/M, z30.s, z1.s\n"
+ "smin z17.s, p4/M, z17.s, z1.s\n"
+ "smin z29.s, p4/M, z29.s, z1.s\n"
+ "smin z18.s, p4/M, z18.s, z1.s\n"
+ "smin z28.s, p4/M, z28.s, z1.s\n"
+ "smin z16.s, p4/M, z16.s, z1.s\n"
+ "trn1 z19.h, z31.h, z19.h\n"
+ "smin z21.s, p4/M, z21.s, z1.s\n"
+ "smin z20.s, p4/M, z20.s, z1.s\n"
+ "trn1 z17.h, z30.h, z17.h\n"
+ "smin z27.s, p4/M, z27.s, z1.s\n"
+ "smin z26.s, p4/M, z26.s, z1.s\n"
+ "trn1 z18.h, z29.h, z18.h\n"
+ "smin z25.s, p4/M, z25.s, z1.s\n"
+ "smin z24.s, p4/M, z24.s, z1.s\n"
+ "trn1 z16.h, z28.h, z16.h\n"
+ "smin z23.s, p4/M, z23.s, z1.s\n"
+ "smin z22.s, p4/M, z22.s, z1.s\n"
+ "trn1 z21.h, z21.h, z20.h\n"
+ "trn1 z20.b, z19.b, z17.b\n"
+ "trn1 z17.h, z27.h, z26.h\n"
+ "trn1 z19.h, z25.h, z24.h\n"
+ "trn1 z18.b, z18.b, z16.b\n"
+ "trn1 z16.h, z23.h, z22.h\n"
+ "st1b { z20.b }, p3, [%x[outptr], x9]\n"
"incb x9, ALL, MUL #4\n"
+ "trn1 z17.b, z21.b, z17.b\n"
+ "trn1 z16.b, z19.b, z16.b\n"
+ "st1b { z18.b }, p2, [%x[outptr], x28]\n"
"incb x28, ALL, MUL #4\n"
+ "st1b { z17.b }, p1, [%x[outptr], x27]\n"
"incb x27, ALL, MUL #4\n"
+ "st1b { z16.b }, p0, [%x[outptr], x26]\n"
+ "incb x26, ALL, MUL #4\n"
+ "whilelt p0.b, x26, %x[n_channels]\n"
"b.any 1b\n"
"7:" // Single vector of channels
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.none 14f\n"
"8:" // Single vector of channels: Loop
"lsr x25, %x[n_valid_cells], #0x2\n"
- "mov z8.b, #0x0\n"
+ "mov z6.b, #0x0\n"
"mov x24, %x[inptrs]\n"
"cbz x25, 11f\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
"subs x25, x25, #0x1\n"
"add x24, x24, #0x20\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
"beq 10f\n"
"9:" // Single vector of channels: Loop: 4 inputs loop
- "movprfx z16, z4\n umax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n umax z17.b, p0/M, z17.b, z1.b\n"
+ "movprfx z16, z2\n umax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n umax z17.b, p4/M, z17.b, z0.b\n"
"ldp x23, x22, [x24, #0x0]\n"
"ldp x21, x20, [x24, #0x10]\n"
- "umax z16.b, p0/M, z16.b, z17.b\n"
"subs x25, x25, #0x1\n"
- "ld1b { z4.b }, p4/Z, [x23, x9]\n"
- "ld1b { z3.b }, p4/Z, [x22, x9]\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
"add x24, x24, #0x20\n"
- "ld1b { z2.b }, p4/Z, [x21, x9]\n"
- "ld1b { z1.b }, p4/Z, [x20, x9]\n"
+ "umax z16.b, p4/M, z16.b, z17.b\n"
+ "ld1b { z2.b }, p3/Z, [x23, x9]\n"
+ "ld1b { z1.b }, p3/Z, [x22, x9]\n"
+ "ld1b { z23.b }, p3/Z, [x21, x9]\n"
+ "ld1b { z0.b }, p3/Z, [x20, x9]\n"
+ "umax z6.b, p4/M, z6.b, z16.b\n"
"bgt 9b\n"
"10:" // Single vector of channels: Loop: 4 inputs tail
- "movprfx z16, z4\n umax z16.b, p0/M, z16.b, z3.b\n"
- "movprfx z17, z2\n umax z17.b, p0/M, z17.b, z1.b\n"
- "umax z16.b, p0/M, z16.b, z17.b\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
+ "movprfx z16, z2\n umax z16.b, p4/M, z16.b, z1.b\n"
+ "movprfx z17, z23\n umax z17.b, p4/M, z17.b, z0.b\n"
+ "umax z16.b, p4/M, z16.b, z17.b\n"
+ "umax z6.b, p4/M, z6.b, z16.b\n"
"11:" // Single vector of channels: Loop: After loop
"ands x21, %x[n_valid_cells], #0x3\n"
"beq 13f\n"
"12:" // Single vector of channels: Loop: Single input loop
"ldr x20, [x24], #0x8\n"
- "ld1b { z16.b }, p4/Z, [x20, x9]\n"
"subs x21, x21, #0x1\n"
- "umax z8.b, p0/M, z8.b, z16.b\n"
+ "ld1b { z16.b }, p3/Z, [x20, x9]\n"
+ "umax z6.b, p4/M, z6.b, z16.b\n"
"bgt 12b\n"
"13:" // Single vector of channels: Loop: Single input loop: End
- "add x20, %x[quant_params], %[offsetof_qp_input_offset]\n"
- "ld1rw { z18.s }, p0/Z, [x20]\n"
- ".inst 0x4508a911 // ushllb z17.h, z8.b, #0x0\n"
- ".inst 0x4508ad10 // ushllt z16.h, z8.b, #0x0\n"
- "neg z18.s, p0/M, z18.s\n"
+ "add x21, %x[quant_params], %[offsetof_qp_input_offset]\n"
+ ".inst 0x4508a8d1 // ushllb z17.h, z6.b, #0x0\n"
+ ".inst 0x4508acda // ushllt z26.h, z6.b, #0x0\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_left_shift]\n"
- ".inst 0x45914255 // saddwb z21.s, z18.s, z17.h\n"
- ".inst 0x45914654 // saddwt z20.s, z18.s, z17.h\n"
- ".inst 0x45904253 // saddwb z19.s, z18.s, z16.h\n"
- ".inst 0x45904652 // saddwt z18.s, z18.s, z16.h\n"
- "ld1rw { z17.s }, p0/Z, [x20]\n"
- "add x20, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- ".inst 0x44828235 // srshl z21.s, p0/M, z21.s, z17.s\n"
- ".inst 0x44828234 // srshl z20.s, p0/M, z20.s, z17.s\n"
- ".inst 0x04b076b5 // sqrdmulh z21.s, z21.s, z16.s\n"
- ".inst 0x44828233 // srshl z19.s, p0/M, z19.s, z17.s\n"
- ".inst 0x44828232 // srshl z18.s, p0/M, z18.s, z17.s\n"
- ".inst 0x04b07694 // sqrdmulh z20.s, z20.s, z16.s\n"
- ".inst 0x04b07673 // sqrdmulh z19.s, z19.s, z16.s\n"
+ "ld1rw { z16.s }, p4/Z, [x21]\n"
+ "ld1rw { z25.s }, p4/Z, [x20]\n"
+ "add x21, %x[quant_params], %[offsetof_qp_per_layer_mul]\n"
"add x20, %x[quant_params], %[offsetof_qp_per_layer_right_shift]\n"
- "ld1rw { z17.s }, p0/Z, [x20]\n"
- ".inst 0x04b07652 // sqrdmulh z18.s, z18.s, z16.s\n"
+ "ld1rw { z24.s }, p4/Z, [x21]\n"
+ "ld1rw { z23.s }, p4/Z, [x20]\n"
"add x20, %x[quant_params], %[offsetof_qp_output_offset]\n"
- ".inst 0x44828235 // srshl z21.s, p0/M, z21.s, z17.s\n"
- ".inst 0x44828234 // srshl z20.s, p0/M, z20.s, z17.s\n"
- "ld1rw { z16.s }, p0/Z, [x20]\n"
- "add z21.s, z21.s, z16.s\n"
- ".inst 0x44828233 // srshl z19.s, p0/M, z19.s, z17.s\n"
- ".inst 0x44828232 // srshl z18.s, p0/M, z18.s, z17.s\n"
- "add z20.s, z20.s, z16.s\n"
- "add z19.s, z19.s, z16.s\n"
- "add z18.s, z18.s, z16.s\n"
- "mov z16.s, #0x0\n"
- "smax z21.s, p0/M, z21.s, z16.s\n"
- "smax z20.s, p0/M, z20.s, z16.s\n"
- "smax z19.s, p0/M, z19.s, z16.s\n"
- "smax z18.s, p0/M, z18.s, z16.s\n"
- "mov z16.s, #0xff\n"
- "smin z21.s, p0/M, z21.s, z16.s\n"
- "smin z20.s, p0/M, z20.s, z16.s\n"
- "trn1 z17.h, z21.h, z20.h\n"
- "smin z19.s, p0/M, z19.s, z16.s\n"
- "smin z18.s, p0/M, z18.s, z16.s\n"
- "trn1 z16.h, z19.h, z18.h\n"
+ "mov z22.s, #0x0\n"
+ "ld1rw { z21.s }, p4/Z, [x20]\n"
+ "mov z20.s, #0xff\n"
+ "neg z16.s, p4/M, z16.s\n"
+ ".inst 0x45914213 // saddwb z19.s, z16.s, z17.h\n"
+ ".inst 0x45914611 // saddwt z17.s, z16.s, z17.h\n"
+ ".inst 0x459a4212 // saddwb z18.s, z16.s, z26.h\n"
+ ".inst 0x459a4610 // saddwt z16.s, z16.s, z26.h\n"
+ ".inst 0x44829333 // srshl z19.s, p4/M, z19.s, z25.s\n"
+ ".inst 0x44829331 // srshl z17.s, p4/M, z17.s, z25.s\n"
+ ".inst 0x44829332 // srshl z18.s, p4/M, z18.s, z25.s\n"
+ ".inst 0x44829330 // srshl z16.s, p4/M, z16.s, z25.s\n"
+ ".inst 0x04b87673 // sqrdmulh z19.s, z19.s, z24.s\n"
+ ".inst 0x04b87631 // sqrdmulh z17.s, z17.s, z24.s\n"
+ ".inst 0x04b87652 // sqrdmulh z18.s, z18.s, z24.s\n"
+ ".inst 0x04b87610 // sqrdmulh z16.s, z16.s, z24.s\n"
+ ".inst 0x448292f3 // srshl z19.s, p4/M, z19.s, z23.s\n"
+ ".inst 0x448292f1 // srshl z17.s, p4/M, z17.s, z23.s\n"
+ ".inst 0x448292f2 // srshl z18.s, p4/M, z18.s, z23.s\n"
+ ".inst 0x448292f0 // srshl z16.s, p4/M, z16.s, z23.s\n"
+ "add z19.s, z19.s, z21.s\n"
+ "add z17.s, z17.s, z21.s\n"
+ "add z18.s, z18.s, z21.s\n"
+ "add z16.s, z16.s, z21.s\n"
+ "smax z19.s, p4/M, z19.s, z22.s\n"
+ "smax z17.s, p4/M, z17.s, z22.s\n"
+ "smax z18.s, p4/M, z18.s, z22.s\n"
+ "smax z16.s, p4/M, z16.s, z22.s\n"
+ "smin z19.s, p4/M, z19.s, z20.s\n"
+ "smin z17.s, p4/M, z17.s, z20.s\n"
+ "smin z18.s, p4/M, z18.s, z20.s\n"
+ "smin z16.s, p4/M, z16.s, z20.s\n"
+ "trn1 z17.h, z19.h, z17.h\n"
+ "trn1 z16.h, z18.h, z16.h\n"
"trn1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p4, [%x[outptr], x9]\n"
+ "st1b { z16.b }, p3, [%x[outptr], x9]\n"
"incb x9\n"
- "whilelt p4.b, x9, %x[n_channels]\n"
+ "whilelt p3.b, x9, %x[n_channels]\n"
"b.any 8b\n"
"14:" // End
:
: [inptrs] "r" (inptrs), [n_channels] "r" (n_channels), [n_valid_cells] "r" (n_valid_cells), [offsetof_qp_input_offset] "I" (offsetof(Requantize32, input_offset)), [offsetof_qp_output_offset] "I" (offsetof(Requantize32, output_offset)), [offsetof_qp_per_layer_left_shift] "I" (offsetof(Requantize32, per_layer_left_shift)), [offsetof_qp_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_qp_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [outptr] "r" (outptr), [quant_params] "r" (&qp)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
index 1ca478513c..dbd1f9516d 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -91,17 +91,17 @@ class PoolingDepthfirst : public DepthfirstDriver<TInput, TOutput>
protected:
/* Compute the amount of working space required for a single thread. */
- size_t get_working_size_per_thread() const override
+ size_t get_working_size_per_thread(unsigned int n_channels) const override
{
- return sizeof(WorkingSpace) + this->m_args.n_channels * (sizeof(TInput) + sizeof(TOutput));
+ return sizeof(WorkingSpace) + n_channels * (sizeof(TInput) + sizeof(TOutput));
}
/* Initialise the working space for a thread. */
- void initialise_working_space(void *raw_ws) const override
+ void initialise_working_space(void *raw_ws, unsigned int n_channels) const override
{
auto ws = reinterpret_cast<WorkingSpace *>(raw_ws);
ws->input_buffer = ws + 1;
- ws->output_buffer = reinterpret_cast<char *>(ws + 1) + sizeof(TInput) * this->m_args.n_channels;
+ ws->output_buffer = reinterpret_cast<char *>(ws + 1) + sizeof(TInput) * n_channels;
// Fill the input buffer with an appropriate value
TInput fill_val = 0;
@@ -119,7 +119,6 @@ class PoolingDepthfirst : public DepthfirstDriver<TInput, TOutput>
}
auto ptr = reinterpret_cast<TInput *>(ws->input_buffer);
- auto n_channels = this->m_args.n_channels;
for (; n_channels; n_channels--)
{
*(ptr++) = fill_val;
diff --git a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
index ded2c75127..cb241cf76f 100644
--- a/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
+++ b/src/core/NEON/kernels/arm_conv/pooling/pooling_depthfirst_generic.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -136,8 +136,8 @@ class PoolingDepthfirstGeneric : public DepthfirstDriver<TInput, TOutput>
const OutputStage m_os;
protected:
- size_t get_working_size_per_thread() const override { return 0; }
- void initialise_working_space(void *) const override { /* Nothing */ }
+ size_t get_working_size_per_thread(unsigned int) const override { return 0; }
+ void initialise_working_space(void *, unsigned int) const override { /* Nothing */ }
/* Compute a portion of the output tensor with padding. */
void compute_tile_padded(
diff --git a/src/core/NEON/kernels/arm_gemm/barrier.hpp b/src/core/NEON/kernels/arm_gemm/barrier.hpp
index 8fbcddfef8..3b34e8089f 100644
--- a/src/core/NEON/kernels/arm_gemm/barrier.hpp
+++ b/src/core/NEON/kernels/arm_gemm/barrier.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,6 +39,14 @@ private:
public:
barrier(unsigned int threads) : m_threads(threads), m_waiters(0), m_leavers(0) { }
+ // Add a move constructor because these objects might be moved around at setup time.
+ // Moving while the barrier is active won't work.
+ barrier(barrier &&other) : m_threads(other.m_threads), m_waiters(0), m_leavers(0) {
+ // This doesn't make it safe, but will have a chance of firing if something odd is occurring.
+ assert(other.m_waiters==0);
+ assert(other.m_leavers==0);
+ }
+
/* This isn't safe if any thread is waiting... */
void set_nthreads(unsigned int nthreads) {
m_threads = nthreads;
diff --git a/src/core/NEON/kernels/arm_gemm/convolver.hpp b/src/core/NEON/kernels/arm_gemm/convolver.hpp
index b15f669132..a9f3dd0ad8 100644
--- a/src/core/NEON/kernels/arm_gemm/convolver.hpp
+++ b/src/core/NEON/kernels/arm_gemm/convolver.hpp
@@ -231,8 +231,8 @@ public:
for (unsigned int ky=0; ky<params.kernel_height; ky++) {
for (unsigned int kx=0; kx<params.kernel_width; kx++) {
unsigned int n = (ky * params.kernel_width) + kx;
- m_kernel_y[n] = ky - params.padding_top;
- m_kernel_x[n] = kx - params.padding_left;
+ m_kernel_y[n] = (ky * params.dilation_h) - params.padding_top;
+ m_kernel_x[n] = (kx * params.dilation_w) - params.padding_left;
}
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
index 5c08e6137d..7d746789e4 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -46,6 +46,7 @@
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp"
+#include "kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL.hpp"
#include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp"
#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
@@ -67,10 +68,10 @@
namespace arm_gemm {
-static const GemmImplementation<bfloat16, float> gemm_bf16_methods[] =
+static const GemmImplementation<bfloat16, bfloat16, float> gemm_bf16_methods[] =
{
-#ifdef __aarch64__
#ifdef ARM_COMPUTE_ENABLE_BF16
+#ifdef __aarch64__
#ifdef ARM_COMPUTE_ENABLE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
// SME kernels
@@ -86,7 +87,7 @@ static const GemmImplementation<bfloat16, float> gemm_bf16_methods[] =
"sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL",
[](const GemmArgs &args) { return args._ci->has_sme2(); },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
- return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL, bfloat16, float>(args); }
},
{
@@ -106,36 +107,36 @@ static const GemmImplementation<bfloat16, float> gemm_bf16_methods[] =
},
#endif // ARM_COMPUTE_ENABLE_SME2
// gemm_bf16_interleaved
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_bf16fp32_mmla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_svebf16() && (args._Ksize>4); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, bfloat16, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_bf16fp32_mmla_6x4VL",
[](const GemmArgs &args) { return args._ci->has_svebf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_bf16fp32_mmla_6x4VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_bf16fp32_mmla_6x4VL, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_bf16fp32_mmla_6x4VL, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_bf16fp32_mmla_6x4VL, bfloat16, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_bf16fp32_dot_6x4VL",
[](const GemmArgs &args) { return args._ci->has_svebf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_bf16fp32_dot_6x4VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_bf16fp32_dot_6x4VL, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_bf16fp32_dot_6x4VL, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_bf16fp32_dot_6x4VL, bfloat16, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_bf16fp32_dot_8x3VL",
[](const GemmArgs &args) { return args._ci->has_svebf16() && (args._Ksize>2); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_dot_8x3VL, bfloat16, bfloat16, float>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffinterleaved_bf16fp32_mmla_8x3VL",
KernelWeightFormat::VL2VL_BL64,
@@ -143,7 +144,15 @@ GemmImplementation<bfloat16, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffinterleaved_bf16fp32_dot_8x3VL",
+ KernelWeightFormat::VL1VL_BL32,
+ [](const GemmArgs &args) { return args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_dot_8x3VL, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_dot_8x3VL, bfloat16, float>(args); }
+),
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffhybrid_bf16fp32_mmla_6x4VL",
KernelWeightFormat::VL2VL_BL64,
@@ -153,36 +162,36 @@ GemmImplementation<bfloat16, float>::with_estimate(
),
#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#endif // ARM_COMPUTE_ENABLE_SVE
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_bf16fp32_mmla_6x16",
[](const GemmArgs &args) { return args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_bf16fp32_mmla_6x16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_bf16fp32_mmla_6x16, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_bf16fp32_mmla_6x16, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_bf16fp32_mmla_6x16, bfloat16, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_bf16fp32_mmla_8x12",
[](const GemmArgs &args) { return args._ci->has_bf16() && (args._Ksize>4); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, bfloat16, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_bf16fp32_dot_6x16",
[](const GemmArgs &args) { return args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_bf16fp32_dot_6x16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_bf16fp32_dot_6x16, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_bf16fp32_dot_6x16, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_bf16fp32_dot_6x16, bfloat16, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_bf16fp32_dot_8x12",
[](const GemmArgs &args) { return args._ci->has_bf16() && (args._Ksize>2); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_dot_8x12, bfloat16, bfloat16, float>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_bf16fp32_mmla_8x12",
KernelWeightFormat::VL256_BL64,
@@ -190,7 +199,7 @@ GemmImplementation<bfloat16, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffhybrid_bf16fp32_mmla_6x16",
KernelWeightFormat::VL256_BL64,
@@ -198,7 +207,7 @@ GemmImplementation<bfloat16, float>::with_estimate(
[](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_bf16fp32_mmla_6x16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
[](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_bf16fp32_mmla_6x16, bfloat16, float>(args); }
),
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_bf16fp32_dot_8x12",
KernelWeightFormat::VL128_BL32,
@@ -207,15 +216,25 @@ GemmImplementation<bfloat16, float>::with_estimate(
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_dot_8x12, bfloat16, float>(args); }
),
#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
-GemmImplementation<bfloat16, float>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_sgemm_8x12",
nullptr,
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, bfloat16, float>::estimate_cycles<bfloat16>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, bfloat16, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, bfloat16, bfloat16, float>::estimate_cycles<bfloat16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, bfloat16, bfloat16, float>(args); }
),
+#elif defined(__arm__)
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sgemm_8x6",
+ nullptr,
+ nullptr,
+ [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, bfloat16, bfloat16, float>(args); }
+},
+#else
+# error "Unknown Architecture"
+#endif
#endif // ARM_COMPUTE_ENABLE_BF16
-#endif // __aarch64__
{
GemmMethod::DEFAULT,
"",
@@ -226,14 +245,14 @@ GemmImplementation<bfloat16, float>::with_estimate(
};
template<>
-const GemmImplementation<bfloat16, float> *gemm_implementation_list<bfloat16, float>() {
+const GemmImplementation<bfloat16, bfloat16, float> *gemm_implementation_list<bfloat16, bfloat16, float>() {
return gemm_bf16_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<bfloat16, float> gemm<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<bfloat16, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<bfloat16, bfloat16, float> gemm<bfloat16, bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<bfloat16, bfloat16, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<bfloat16, bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<bfloat16, bfloat16, float, Nothing>(const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp
index aa761b46e4..1e4de4a39e 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16bf16.cpp
@@ -32,12 +32,12 @@
namespace arm_gemm {
-static const GemmImplementation<bfloat16, bfloat16> gemm_bf16bf16_methods[] =
+static const GemmImplementation<bfloat16, bfloat16, bfloat16> gemm_bf16bf16_methods[] =
{
#ifdef __aarch64__
#ifdef ARM_COMPUTE_ENABLE_BF16
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
-GemmImplementation<bfloat16, bfloat16>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, bfloat16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_bf16fp32_mmla_8x12",
KernelWeightFormat::VL256_BL64,
@@ -45,7 +45,7 @@ GemmImplementation<bfloat16, bfloat16>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, bfloat16, bfloat16>::estimate_cycles<bfloat16>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, bfloat16, bfloat16>(args); }
),
-GemmImplementation<bfloat16, bfloat16>::with_estimate(
+GemmImplementation<bfloat16, bfloat16, bfloat16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffinterleaved_bf16fp32_mmla_8x3VL",
KernelWeightFormat::VL2VL_BL64,
@@ -66,14 +66,14 @@ GemmImplementation<bfloat16, bfloat16>::with_estimate(
};
template<>
-const GemmImplementation<bfloat16, bfloat16> *gemm_implementation_list<bfloat16, bfloat16>() {
+const GemmImplementation<bfloat16, bfloat16, bfloat16> *gemm_implementation_list<bfloat16, bfloat16, bfloat16>() {
return gemm_bf16bf16_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<bfloat16, bfloat16> gemm<bfloat16, bfloat16, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<bfloat16, bfloat16, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<bfloat16, bfloat16, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<bfloat16, bfloat16, Nothing>(const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<bfloat16, bfloat16, bfloat16> gemm<bfloat16, bfloat16, bfloat16, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<bfloat16, bfloat16, bfloat16, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<bfloat16, bfloat16, bfloat16, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<bfloat16, bfloat16, bfloat16, Nothing>(const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
index 3b444ae333..0d9f53b84d 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
// This can only be built if the target/compiler supports FP16 arguments.
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+#if defined(__aarch64__) && (defined(ENABLE_FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
#include "arm_gemm.hpp"
@@ -42,12 +42,10 @@
#include "kernels/a64_hgemm_8x24.hpp"
#include "kernels/a64_hybrid_fp16_mla_6x32.hpp"
#include "kernels/a64_sgemm_8x12.hpp"
-#ifdef ARM_COMPUTE_ENABLE_SME2
#include "kernels/sme2_gemv_fp16fp32fp16_dot_16VL.hpp"
#include "kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp"
#include "kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp"
#include "kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp"
-#endif // ARM_COMPUTE_ENABLE_SME2
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#include "kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp"
#include "kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp"
@@ -57,7 +55,7 @@
namespace arm_gemm {
-static const GemmImplementation<__fp16, __fp16> gemm_fp16_methods[] = {
+static const GemmImplementation<__fp16, __fp16, __fp16> gemm_fp16_methods[] = {
#ifdef ARM_COMPUTE_ENABLE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
{
@@ -69,44 +67,44 @@ static const GemmImplementation<__fp16, __fp16> gemm_fp16_methods[] = {
},
{
GemmMethod::GEMM_INTERLEAVED,
- "sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL",
+ "sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL",
[](const GemmArgs &args) { return args._ci->has_sme2(); },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
- return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL, __fp16, __fp16, Nothing, false, false, false, true>(args); }
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL, __fp16, __fp16, __fp16, Nothing, false, false, false, true>(args); }
},
{
GemmMethod::GEMM_INTERLEAVED,
- "sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL",
+ "sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL",
[](const GemmArgs &args) { return args._ci->has_sme2(); },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
- return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL, __fp16, __fp16, Nothing, false, false, false, true>(args); }
+ return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL, __fp16, __fp16, __fp16, Nothing, false, false, false, true>(args); }
},
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL",
[](const GemmArgs &args) { return args._ci->has_sme2(); },
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL, __fp16, __fp16, Nothing, false, false, false, true>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL, __fp16, __fp16, __fp16, Nothing, false, false, false, true>(args); }
},
#endif // ARM_COMPUTE_ENABLE_SME2
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp16_mla_6x4VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp16_mla_6x4VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp16_mla_6x4VL, __fp16, __fp16>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp16_mla_6x4VL, __fp16, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp16_mla_6x4VL, __fp16, __fp16, __fp16>(args); }
),
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_fp16_mla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize > 4); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16, __fp16>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffinterleaved_fp16_mla_8x3VL",
KernelWeightFormat::VL1VL_BL16,
@@ -114,7 +112,7 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp16_mla_8x3VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp16_mla_8x3VL, __fp16, __fp16>(args); }
),
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_ffhybrid_fp16_mla_6x4VL",
KernelWeightFormat::VL1VL_BL16,
@@ -125,22 +123,22 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#endif // ARM_COMPUTE_ENABLE_SVE
#if defined(__aarch64__)
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp16_mla_6x32",
[](const GemmArgs &args) { return args._ci->has_fp16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16, __fp16>(args); }
),
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_hgemm_8x24",
[](const GemmArgs &args) { return args._ci->has_fp16(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16, __fp16>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_fp16_mla_8x24",
KernelWeightFormat::VL128_BL16,
@@ -148,7 +146,7 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp16_mla_8x24, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp16_mla_8x24, __fp16, __fp16>(args); }
),
-GemmImplementation<__fp16, __fp16>::with_estimate(
+GemmImplementation<__fp16, __fp16, __fp16>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_ffhybrid_fp16_mla_6x32",
KernelWeightFormat::VL128_BL16,
@@ -162,7 +160,7 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
"a64_sgemm_8x12",
nullptr,
[](const GemmArgs &args) { return !args._ci->has_fp16(); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, __fp16, __fp16>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, __fp16, __fp16, __fp16>(args); }
},
#elif defined(__arm__)
{
@@ -170,7 +168,7 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
"sgemm_8x6",
nullptr,
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, __fp16, __fp16>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, __fp16, __fp16, __fp16>(args); }
},
#else // not AArch64 or AArch32
# error Unknown Architecture
@@ -185,16 +183,16 @@ GemmImplementation<__fp16, __fp16>::with_estimate(
};
template<>
-const GemmImplementation<__fp16, __fp16> *gemm_implementation_list<__fp16, __fp16>() {
+const GemmImplementation<__fp16, __fp16, __fp16> *gemm_implementation_list<__fp16, __fp16, __fp16>() {
return gemm_fp16_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<__fp16, __fp16> gemm<__fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<__fp16, __fp16, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<__fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<__fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<__fp16, __fp16, __fp16> gemm<__fp16, __fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<__fp16, __fp16, __fp16, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<__fp16, __fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<__fp16, __fp16, __fp16, Nothing>(const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
-#endif // defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+#endif // defined(__aarch64__) && (defined(ENABLE_FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index 290fe87230..5da7161671 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -82,7 +82,7 @@
namespace arm_gemm {
-static const GemmImplementation<float, float> gemm_fp32_methods[] =
+static const GemmImplementation<float, float, float> gemm_fp32_methods[] =
{
// GEMV cases - starting with 'gemv_batched' wrapper to turn batched GEMV into GEMM.
{
@@ -95,27 +95,27 @@ static const GemmImplementation<float, float> gemm_fp32_methods[] =
#ifdef __aarch64__
#ifdef ARM_COMPUTE_ENABLE_BF16
// "fast mode" (BF16) kernels
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_bf16fp32_mmla_8x12",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32bf16fp32_mmla_6x16",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32bf16fp32_mmla_4x24",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float, float>(args); }
),
#endif // ARM_COMPUTE_ENABLE_BF16
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -124,14 +124,14 @@ GemmImplementation<float, float>::with_estimate(
{
GemmMethod::GEMM_HYBRID,
"sme2_gemv_fp32bf16fp32_dot_16VL",
- [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input && !args._accumulate; },
nullptr,
[](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32bf16fp32_dot_16VL, float, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"sme2_gemv_fp32_mla_16VL",
- [](const GemmArgs &args) { return args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; },
+ [](const GemmArgs &args) { return args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input && !args._accumulate; },
nullptr,
[](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32_mla_16VL, float, float>(args); }
},
@@ -139,25 +139,25 @@ GemmImplementation<float, float>::with_estimate(
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL",
- [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
- return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL, float, float>(args); }
},
#endif // ARM_COMPUTE_ENABLE_BF16
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_fp32_mopa_1VLx4VL",
- [](const GemmArgs &args) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
- return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL, float, float>(args); }
},
#ifdef ARM_COMPUTE_ENABLE_BF16
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL",
- [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL, float, float>(args); }
@@ -166,7 +166,7 @@ GemmImplementation<float, float>::with_estimate(
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_fp32_mopa_4VLx1VL",
- [](const GemmArgs &args) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL, float, float>(args); }
@@ -175,7 +175,7 @@ GemmImplementation<float, float>::with_estimate(
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL",
- [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2(); },
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
nullptr,
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL, float, float>(args); }
},
@@ -183,32 +183,32 @@ GemmImplementation<float, float>::with_estimate(
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_fp32_mopa_2VLx2VL",
- [](const GemmArgs &args) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
nullptr,
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL, float, float>(args); }
},
#endif // ARM_COMPUTE_ENABLE_SME2
#ifdef ARM_COMPUTE_ENABLE_BF16
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_bf16fp32_mmla_8x3VL",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp32bf16fp32_mmla_6x4VL",
- [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float>(args); }
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp32bf16fp32_mmla_4x6VL",
- [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float>(args); }
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float, float>(args); }
),
#endif // ARM_COMPUTE_ENABLE_BF16
#ifdef ARM_COMPUTE_ENABLE_SVEF32MM
@@ -218,8 +218,8 @@ GemmImplementation<float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_fp32_mmla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_svef32mm() && (args._Ksize>4); },
- [](const GemmArgs &args) { return !(args._fast_mode && args._ci->has_bf16()); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mmla_8x3VL, float, float>(args); }
+ [](const GemmArgs &args) { return !(args._fast_mode && args._ci->has_svebf16()); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mmla_8x3VL, float, float, float>(args); }
},
#endif // ARM_COMPUTE_ENABLE_SVEF32MM
// SVE kernels
@@ -228,25 +228,25 @@ GemmImplementation<float, float>::with_estimate(
"sve_hybrid_fp32_mla_8x1VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
[](const GemmArgs &args) { return (args._Nsize < 12); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_8x1VL, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_8x1VL, float, float, float>(args); }
},
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp32_mla_6x4VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_fp32_mla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float, float>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#ifdef ARM_COMPUTE_ENABLE_BF16
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffinterleaved_bf16fp32_mmla_8x3VL",
KernelWeightFormat::VL2VL_BL64_BF16,
@@ -254,7 +254,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_ffhybrid_fp32bf16fp32_mmla_4x6VL",
KernelWeightFormat::VL2VL_BL64_BF16,
@@ -263,7 +263,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL, float, float>(args); }
),
#endif
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffinterleaved_fp32_mla_8x3VL",
KernelWeightFormat::VL1VL_BL32,
@@ -271,7 +271,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_ffhybrid_fp32_mla_6x4VL",
KernelWeightFormat::VL1VL_BL32,
@@ -287,7 +287,7 @@ GemmImplementation<float, float>::with_estimate(
"a64_sgemm_8x6",
nullptr,
[](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A35; },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x6, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x6, float, float, float>(args); }
},
// Arm® Neon™ hybrid methods
{
@@ -309,33 +309,33 @@ GemmImplementation<float, float>::with_estimate(
"a64_hybrid_fp32_mla_8x4",
nullptr,
[](const GemmArgs &args) { return (args._Nsize < 12); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_8x4, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_8x4, float, float, float>(args); }
},
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32_mla_4x24",
nullptr,
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32_mla_6x16",
nullptr,
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_sgemm_8x12",
nullptr,
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, float, float, float>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#ifdef ARM_COMPUTE_ENABLE_BF16
// "fast mode" (BF16) kernels
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_bf16fp32_mmla_8x12",
KernelWeightFormat::VL256_BL64_BF16,
@@ -343,7 +343,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_ffhybrid_fp32bf16fp32_mmla_4x24",
KernelWeightFormat::VL256_BL64_BF16,
@@ -351,7 +351,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_ffhybrid_fp32bf16fp32_mmla_6x16",
KernelWeightFormat::VL256_BL64_BF16,
@@ -359,8 +359,9 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_6x16, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_6x16, float, float>(args); }
),
+
#endif // BF16
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_fp32_mla_8x12",
KernelWeightFormat::VL128_BL32,
@@ -368,7 +369,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_ffhybrid_fp32_mla_6x16",
KernelWeightFormat::VL128_BL32,
@@ -385,7 +386,7 @@ GemmImplementation<float, float>::with_estimate(
"sgemm_8x6",
nullptr,
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, float, float, float>(args); }
},
#endif // __arm__
{
@@ -399,14 +400,14 @@ GemmImplementation<float, float>::with_estimate(
/* Templated function to return this list. */
template<>
-const GemmImplementation<float, float> *gemm_implementation_list<float, float>() {
+const GemmImplementation<float, float, float> *gemm_implementation_list<float, float, float>() {
return gemm_fp32_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<float, float> gemm<float, float, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<float, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<float, float, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<float, float, Nothing> (const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<float, float, float> gemm<float, float, float, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<float, float, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<float, float, float, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<float, float, float, Nothing> (const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
index a6c9677305..834751f1fe 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
@@ -41,7 +41,7 @@ namespace arm_gemm {
// Implementation of the GemmCommon abstract class.
template<typename strategy, typename To, typename Tr>
-class GemmHybrid : public GemmCommon<To, Tr> {
+class GemmHybrid : public GemmCommon<To, To, Tr> {
typedef typename strategy::operand_type Toi;
typedef typename strategy::result_type Tri;
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp
index 0cc4d4f3d9..8bbb877c1b 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp
@@ -260,8 +260,8 @@ struct kernel_weight_format<strategy, false> {
} // anonymous namespace
// Implementation of the GemmCommon abstract class.
-template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool SeparateQuantize=false, bool FixedFormat=false>
-class GemmHybridIndirect : public GemmCommon<To, Tr> {
+template<typename strategy, typename To, typename Tw, typename Tr, typename OutputStage=Nothing, bool SeparateQuantize=false, bool FixedFormat=false>
+class GemmHybridIndirect : public GemmCommon<To, Tw, Tr> {
typedef typename strategy::lhs_operand_type Tloi;
typedef typename strategy::rhs_operand_type Troi;
typedef typename strategy::result_type Tri;
@@ -618,7 +618,7 @@ public:
return _args._nmulti * iceildiv(_args._Nsize, strategy::out_width());
}
- void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
+ void requantize_bias(void *in_buffer, const Tw *B, const int ldb, const int B_multi_stride) override {
if (std::is_same<OutputStage, Requantize32>::value) {
_col_bias = reinterpret_cast<int32_t *>(in_buffer);
@@ -636,11 +636,11 @@ public:
return strat.transforms.PrepareB_supports_transpose();
}
- void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, bool transposed) override {
+ void pretranspose_B_array(void *in_buffer, const Tw *B, const int ldb, const int B_multi_stride, bool transposed) override {
pretranspose_B_array_part(in_buffer, B, ldb, B_multi_stride, transposed, 0, get_B_pretranspose_window_size());
}
- void pretranspose_B_array_part(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, bool transposed, size_t start, size_t end) override {
+ void pretranspose_B_array_part(void *in_buffer, const Tw *B, const int ldb, const int B_multi_stride, bool transposed, size_t start, size_t end) override {
if (end >= get_B_pretranspose_window_size()) {
requantize_bias(in_buffer, B, ldb, B_multi_stride);
}
@@ -835,7 +835,7 @@ public:
};
template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
-using GemmHybridIndirectFixedFormat = GemmHybridIndirect<strategy, To, Tr, OutputStage, false, true>;
+using GemmHybridIndirectFixedFormat = GemmHybridIndirect<strategy, To, To, Tr, OutputStage, false, true>;
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
index f12efe4282..62184bcbd1 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
@@ -42,7 +42,7 @@ namespace arm_gemm {
// Implementation of the GemmCommon abstract class.
template<typename strategy, typename To, typename Tr>
-class GemmHybridQuantized : public GemmCommon<To, Tr> {
+class GemmHybridQuantized : public GemmCommon<To, To, Tr> {
typedef typename strategy::operand_type Toi;
typedef typename strategy::result_type Tri;
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp b/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp
index 5e77df7d4a..19d5e3e23d 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2018-2020, 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,17 +35,17 @@ namespace arm_gemm {
* of types, a static list of these structures is built up to describe the
* implementations available.
*/
-template<typename Top, typename Tret, class OutputStage = Nothing>
+template<typename Tlop, typename Trop, typename Tret, class OutputStage = Nothing>
struct GemmImplementation {
const GemmMethod method;
const char * name;
const KernelWeightFormat kernel_weight_format = KernelWeightFormat::NON_FIXED;
std::function<bool(const GemmArgs &, const OutputStage &)> is_supported = {};
std::function<uint64_t(const GemmArgs &, const OutputStage &)> cycle_estimate = {};
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &, const OutputStage &)> instantiate = {};
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &, const OutputStage &)> instantiate = {};
bool do_is_supported(const GemmArgs &args, const OutputStage &os) const {
- // Check supplied is_supported() function first.
+ // Check supplied is_supported() function first.
if (is_supported != nullptr && !is_supported(args, os)) {
return false;
}
@@ -68,7 +68,7 @@ struct GemmImplementation {
// If we get here it means there is a config and it specifies a format. Check it matches this kernel.
// NOTE: this will execute SVE instructions if it's an SVE kernel, so it's important that is_supported()
// was called above first.
- return (args._cfg->weight_format == get_weight_format(kernel_weight_format, sizeof(Top)));
+ return (args._cfg->weight_format == get_weight_format(kernel_weight_format, sizeof(Tlop)));
}
}
@@ -80,13 +80,13 @@ struct GemmImplementation {
}
}
- GemmCommon<Top, Tret> *do_instantiate(const GemmArgs &args, const OutputStage &os) const {
+ GemmCommon<Tlop, Trop, Tret> *do_instantiate(const GemmArgs &args, const OutputStage &os) const {
return instantiate(args, os);
}
static GemmImplementation with_estimate(GemmMethod m, const char *n,
std::function<bool(const GemmArgs &, const OutputStage &)> is_supported, std::function<uint64_t(const GemmArgs &, const OutputStage &)> cycle_estimate,
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &, const OutputStage &)> instantiate) {
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &, const OutputStage &)> instantiate) {
GemmImplementation impl(m,n);
impl.is_supported=is_supported;
@@ -103,14 +103,14 @@ struct GemmImplementation {
GemmImplementation(GemmMethod m, const char *n,
std::function<bool(const GemmArgs &, const OutputStage &)> is_supported, std::function<bool(const GemmArgs &, const OutputStage &)> is_recommended,
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &, const OutputStage &)> instantiate) :
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &, const OutputStage &)> instantiate) :
method(m), name(n), is_supported(is_supported),
cycle_estimate( [is_recommended](const GemmArgs &args, const OutputStage &os) { return (is_recommended == nullptr) ? 0 : (is_recommended(args, os) ? 0 : UINT64_MAX); } ),
instantiate(instantiate) { }
GemmImplementation(GemmMethod m, const char *n, KernelWeightFormat kwf,
std::function<bool(const GemmArgs &, const OutputStage &)> is_supported, std::function<bool(const GemmArgs &, const OutputStage &)> is_recommended,
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &, const OutputStage &)> instantiate) :
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &, const OutputStage &)> instantiate) :
method(m), name(n), kernel_weight_format(kwf), is_supported(is_supported),
cycle_estimate( [is_recommended](const GemmArgs &args, const OutputStage &os) { return (is_recommended == nullptr) ? 0 : (is_recommended(args, os) ? 0 : UINT64_MAX); } ),
instantiate(instantiate) { }
@@ -119,17 +119,17 @@ struct GemmImplementation {
/* Slightly different version of above for straightforward GEMMs with no
* output stage, so the std::functions there don't have to deal with the
* unnecessary second argument. */
-template<typename Top, typename Tret>
-struct GemmImplementation<Top, Tret, Nothing> {
+template<typename Tlop, typename Trop, typename Tret>
+struct GemmImplementation<Tlop, Trop, Tret, Nothing> {
const GemmMethod method;
const char * name;
const KernelWeightFormat kernel_weight_format = KernelWeightFormat::NON_FIXED;
std::function<bool(const GemmArgs &)> is_supported = {};
std::function<uint64_t(const GemmArgs &)> cycle_estimate = {};
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate = {};
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &)> instantiate = {};
bool do_is_supported(const GemmArgs &args, const Nothing &) const {
- // Check supplied is_supported() function first.
+ // Check supplied is_supported() function first.
if (is_supported != nullptr && !is_supported(args)) {
return false;
}
@@ -152,7 +152,7 @@ struct GemmImplementation<Top, Tret, Nothing> {
// If we get here it means there is a config and it specifies a format. Check it matches this kernel.
// NOTE: this will execute SVE instructions if it's an SVE kernel, so it's important that is_supported()
// was called above first.
- return (args._cfg->weight_format == get_weight_format(kernel_weight_format, sizeof(Top)));
+ return (args._cfg->weight_format == get_weight_format(kernel_weight_format, sizeof(Tlop)));
}
}
@@ -164,13 +164,13 @@ struct GemmImplementation<Top, Tret, Nothing> {
}
}
- GemmCommon<Top, Tret> *do_instantiate(const GemmArgs &args, const Nothing &) const {
+ GemmCommon<Tlop, Trop, Tret> *do_instantiate(const GemmArgs &args, const Nothing &) const {
return instantiate(args);
}
static GemmImplementation with_estimate(GemmMethod m, const char *n,
std::function<bool(const GemmArgs &)> is_supported, std::function<uint64_t(const GemmArgs &)> cycle_estimate,
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate) {
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &)> instantiate) {
GemmImplementation impl(m,n);
impl.is_supported=is_supported;
@@ -182,7 +182,7 @@ struct GemmImplementation<Top, Tret, Nothing> {
static GemmImplementation with_estimate(GemmMethod m, const char *n, KernelWeightFormat f,
std::function<bool(const GemmArgs &)> is_supported, std::function<uint64_t(const GemmArgs &)> cycle_estimate,
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate) {
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &)> instantiate) {
GemmImplementation impl(m,n,f);
impl.is_supported=is_supported;
@@ -199,14 +199,14 @@ struct GemmImplementation<Top, Tret, Nothing> {
GemmImplementation(GemmMethod m, const char *n,
std::function<bool(const GemmArgs &)> is_supported, std::function<bool(const GemmArgs &)> is_recommended,
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate) :
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &)> instantiate) :
method(m), name(n), is_supported(is_supported),
cycle_estimate( [is_recommended](const GemmArgs &args) -> uint64_t { return (is_recommended == nullptr) ? 0 : (is_recommended(args) ? 0 : UINT64_MAX); } ),
instantiate(instantiate) { }
GemmImplementation(GemmMethod m, const char *n, KernelWeightFormat kwf,
std::function<bool(const GemmArgs &)> is_supported, std::function<bool(const GemmArgs &)> is_recommended,
- std::function<GemmCommon<Top, Tret> *(const GemmArgs &)> instantiate) :
+ std::function<GemmCommon<Tlop, Trop, Tret> *(const GemmArgs &)> instantiate) :
method(m), name(n), kernel_weight_format(kwf), is_supported(is_supported),
cycle_estimate( [is_recommended](const GemmArgs &args) -> uint64_t { return (is_recommended == nullptr) ? 0 : (is_recommended(args) ? 0 : UINT64_MAX); } ),
instantiate(instantiate) { }
@@ -218,8 +218,8 @@ struct GemmImplementation<Top, Tret, Nothing> {
* A specialised version is provided for each supported combination of types.
* The end of the list is indicated by a sentinel descriptor with
* method==GemmMethod::DEFAULT. */
-template<typename Top, typename Tret, class OutputStage = Nothing>
-const GemmImplementation<Top, Tret, OutputStage> *gemm_implementation_list();
+template<typename Tlop, typename Trop, typename Tret, class OutputStage = Nothing>
+const GemmImplementation<Tlop, Trop, Tret, OutputStage> *gemm_implementation_list();
/*
* Select a GEMM implementation for the given arguments.
@@ -234,15 +234,15 @@ const GemmImplementation<Top, Tret, OutputStage> *gemm_implementation_list();
* this function returns false and doesn't touch the provided pointer
* reference.
*/
-template<typename Top, typename Tret, class OutputStage>
-bool find_implementation(const GemmArgs &args, const OutputStage &os, const GemmImplementation<Top, Tret, OutputStage> * &impl) {
- auto gemms = gemm_implementation_list<Top, Tret, OutputStage>();
+template<typename Tlop, typename Trop, typename Tret, class OutputStage>
+bool find_implementation(const GemmArgs &args, const OutputStage &os, const GemmImplementation<Tlop, Trop, Tret, OutputStage> * &impl) {
+ auto gemms = gemm_implementation_list<Tlop, Trop, Tret, OutputStage>();
const GemmConfig *cfg = args._cfg;
- const GemmImplementation<Top, Tret, OutputStage> *saved_impl = nullptr;
+ const GemmImplementation<Tlop, Trop, Tret, OutputStage> *saved_impl = nullptr;
uint64_t best_estimate = 0;
- for (const GemmImplementation<Top, Tret, OutputStage> *i = gemms; i->method != GemmMethod::DEFAULT; i++) {
+ for (const GemmImplementation<Tlop, Trop, Tret, OutputStage> *i = gemms; i->method != GemmMethod::DEFAULT; i++) {
/* Skip if this implementation doesn't support these args. */
if (!i->do_is_supported(args, os)) {
continue;
@@ -284,17 +284,17 @@ bool find_implementation(const GemmArgs &args, const OutputStage &os, const Gemm
return false;
}
-template<typename Top, typename Tret, class OutputStage>
+template<typename Tlop, typename Trop, typename Tret, class OutputStage>
std::vector<KernelDescription> get_compatible_kernels(const GemmArgs &args, const OutputStage &os) {
std::vector<KernelDescription> res;
/* Find out what the default implementation in so we can set the flag accordingly later. */
- const GemmImplementation<Top, Tret, OutputStage> *default_impl;
+ const GemmImplementation<Tlop, Trop, Tret, OutputStage> *default_impl;
find_implementation(args, os, default_impl);
- auto gemms = gemm_implementation_list<Top, Tret, OutputStage>();
+ auto gemms = gemm_implementation_list<Tlop, Trop, Tret, OutputStage>();
- for (const GemmImplementation<Top, Tret, OutputStage> *i = gemms; i->method != GemmMethod::DEFAULT; i++) {
+ for (const GemmImplementation<Tlop, Trop, Tret, OutputStage> *i = gemms; i->method != GemmMethod::DEFAULT; i++) {
/* Check that this implementation supports the presented problem. */
if (!i->do_is_supported(args, os)) {
@@ -307,31 +307,31 @@ std::vector<KernelDescription> get_compatible_kernels(const GemmArgs &args, cons
return res;
}
-template<typename Top, typename Tret, class OutputStage>
+template<typename Tlop, typename Trop, typename Tret, class OutputStage>
bool has_opt_gemm(WeightFormat &wf, const GemmArgs &args, const OutputStage &os) {
- const GemmImplementation<Top, Tret, OutputStage> *impl;
- const bool success = find_implementation<Top, Tret, OutputStage>(args, os, impl);
+ const GemmImplementation<Tlop, Trop, Tret, OutputStage> *impl;
+ const bool success = find_implementation<Tlop, Trop, Tret, OutputStage>(args, os, impl);
if (success)
- wf = UniqueGemmCommon<Top, Tret>(impl->do_instantiate(args, os))->get_config().weight_format;
+ wf = UniqueGemmCommon<Tlop, Trop, Tret>(impl->do_instantiate(args, os))->get_config().weight_format;
return success;
}
-template<typename Top, typename Tret, class OutputStage>
-UniqueGemmCommon<Top, Tret> gemm(const GemmArgs &args, const OutputStage &os) {
- const GemmImplementation<Top, Tret, OutputStage> *impl;
+template<typename Tlop, typename Trop, typename Tret, class OutputStage>
+UniqueGemmCommon<Tlop, Trop, Tret> gemm(const GemmArgs &args, const OutputStage &os) {
+ const GemmImplementation<Tlop, Trop, Tret, OutputStage> *impl;
- if (find_implementation<Top, Tret, OutputStage>(args, os, impl)) {
- return UniqueGemmCommon<Top, Tret>(impl->do_instantiate(args, os));
+ if (find_implementation<Tlop, Trop, Tret, OutputStage>(args, os, impl)) {
+ return UniqueGemmCommon<Tlop, Trop, Tret>(impl->do_instantiate(args, os));
}
- return UniqueGemmCommon<Top, Tret>(nullptr);
+ return UniqueGemmCommon<Tlop, Trop, Tret>(nullptr);
}
-template<typename Top, typename Tret, class OutputStage>
+template<typename Tlop, typename Trop, typename Tret, class OutputStage>
KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage &os) {
- const GemmImplementation<Top, Tret, OutputStage> *impl;
+ const GemmImplementation<Tlop, Trop, Tret, OutputStage> *impl;
- if (find_implementation<Top, Tret>(args, os, impl)) {
+ if (find_implementation<Tlop, Trop, Tret>(args, os, impl)) {
return KernelDescription(impl->method, impl->name);
}
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
index aa6ecc2919..c44e7be4a3 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,13 +32,13 @@
namespace arm_gemm {
-static const GemmImplementation<int16_t, int32_t> gemm_s16_methods[] = {
+static const GemmImplementation<int16_t, int16_t, int32_t> gemm_s16_methods[] = {
{
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s16_8x12",
nullptr,
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s16_8x12, int16_t, int32_t>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s16_8x12, int16_t, int16_t, int32_t>(args); }
},
{
GemmMethod::DEFAULT,
@@ -50,15 +50,15 @@ static const GemmImplementation<int16_t, int32_t> gemm_s16_methods[] = {
};
template<>
-const GemmImplementation<int16_t, int32_t> *gemm_implementation_list<int16_t, int32_t>() {
+const GemmImplementation<int16_t, int16_t, int32_t> *gemm_implementation_list<int16_t, int16_t, int32_t>() {
return gemm_s16_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<int16_t, int32_t> gemm<int16_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<int16_t, int32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<int16_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<int16_t, int32_t, Nothing> (const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<int16_t, int16_t, int32_t> gemm<int16_t, int16_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<int16_t, int16_t, int32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<int16_t, int16_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<int16_t, int16_t, int32_t, Nothing> (const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index 0dc0d55b27..464e2c6059 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -54,7 +54,7 @@
namespace arm_gemm {
-static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
+static const GemmImplementation<int8_t, int8_t, int32_t> gemm_s8_methods[] = {
#ifdef ARM_COMPUTE_ENABLE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
// SME kernels
@@ -63,7 +63,7 @@ static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
"sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL",
[](const GemmArgs &args) { return args._ci->has_sme2(); },
[](const GemmArgs &args) { const auto VL = sme::get_vector_length<int32_t>();
- return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL, int8_t, int32_t>(args); }
},
{
@@ -82,48 +82,48 @@ static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
[](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL, int8_t, int32_t>(args); }
},
#endif // ARM_COMPUTE_ENABLE_SME2
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8s32_mmla_6x4VL",
[](const GemmArgs &args) { return args._ci->has_svei8mm(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, int32_t>(args); }
),
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_s8s32_mmla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_svei8mm() && (args._Ksize>8); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t, int32_t>(args); }
),
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8s32_dot_6x4VL",
[](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize>=16; },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, int32_t>(args); }
),
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_s8s32_dot_8x3VL",
[](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t, int32_t>(args); }
),
#endif // ARM_COMPUTE_ENABLE_SVE
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_s8s32_mmla_8x12",
[](const GemmArgs &args) { return args._ci->has_i8mm() && (args._Ksize>8); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t, int32_t>(args); }
),
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_s8s32_mmla_6x16",
[](const GemmArgs &args) { return args._ci->has_i8mm(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, int32_t>(args); }
),
{
GemmMethod::GEMM_HYBRID,
@@ -144,29 +144,29 @@ GemmImplementation<int8_t, int32_t>::with_estimate(
"a64_gemm_s16_8x12",
nullptr,
[](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53 && ((args._Msize > 28) || ((args._Msize % 8) > 4)); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s16_8x12, int8_t, int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s16_8x12, int8_t, int8_t, int32_t>(args); },
},
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_s8s32_dot_6x16",
[](const GemmArgs &args) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, int32_t>(args); }
),
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s8_8x12",
[](const GemmArgs &args) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_s8_8x12, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s8_8x12, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_s8_8x12, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s8_8x12, int8_t, int8_t, int32_t>(args); }
),
-GemmImplementation<int8_t, int32_t>::with_estimate(
+GemmImplementation<int8_t, int8_t, int32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s8_4x4",
nullptr,
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_s8_4x4, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s8_4x4, int8_t, int32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_s8_4x4, int8_t, int8_t, int32_t>::estimate_cycles<int32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_s8_4x4, int8_t, int8_t, int32_t>(args); }
),
{
@@ -179,15 +179,15 @@ GemmImplementation<int8_t, int32_t>::with_estimate(
};
template<>
-const GemmImplementation<int8_t, int32_t> *gemm_implementation_list<int8_t, int32_t>() {
+const GemmImplementation<int8_t, int8_t, int32_t> *gemm_implementation_list<int8_t, int8_t, int32_t>() {
return gemm_s8_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<int8_t, int32_t> gemm<int8_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<int8_t, int32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<int8_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<int8_t, int32_t, Nothing> (const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<int8_t, int8_t, int32_t> gemm<int8_t, int8_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<int8_t, int8_t, int32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<int8_t, int8_t, int32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<int8_t, int8_t, int32_t, Nothing> (const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
index ae344f09b5..5214a71cce 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
@@ -62,12 +62,12 @@ namespace {
template<bool MergeStep, bool FixedFormat, typename OutputStage>
class kernel_and_merge {
public:
- template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+ template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
static void run (
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t b_stride, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
const Activation &act, bool accumulate, const OutputStage &os, const int32_t *col_bias,
@@ -76,12 +76,12 @@ public:
// Run a kernel and call the separate merge step
template<>
-template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
void kernel_and_merge<true, false, Nothing>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
const Activation &act, bool accumulate, const Nothing &, const int32_t *, Tab *)
@@ -106,12 +106,12 @@ void kernel_and_merge<true, false, Nothing>::run(
// Run a fixed-format kernel and call the separate merge step
template<>
-template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
void kernel_and_merge<true, true, Nothing>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t b_stride, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
const Activation &act, bool accumulate, const Nothing &, const int32_t *, Tab *)
@@ -136,12 +136,12 @@ void kernel_and_merge<true, true, Nothing>::run(
// Run a kernel with integrated merge
template<>
-template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
void kernel_and_merge<false, false, Nothing>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t, Tri *,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
unsigned int n_0, unsigned int n_max, const Tr *biasptr,
const Activation &act, bool accumulate, const Nothing &, const int32_t *,
@@ -175,12 +175,12 @@ void kernel_and_merge<false, false, Nothing>::run(
// Run a kernel with integrated merge, quantizing
template<>
-template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
void kernel_and_merge<false, false, Requantize32>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t, Tri *,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
unsigned int n_0, unsigned int n_max, const Tr *,
const Activation &, bool accumulate, const Requantize32 &qp, const int32_t *col_bias,
@@ -190,10 +190,19 @@ void kernel_and_merge<false, false, Requantize32>::run(
auto p=prof.ScopedProfiler(PROFILE_KERNEL, (m_max - m_0) * (n_max - n_0) * kern_k);
#endif
+ // Offset C pointer in a similar way to non-quantized case above.
+ Tri *offset_c_ptr;
+
+ if (c_ptr == nullptr) {
+ offset_c_ptr = nullptr;
+ } else {
+ offset_c_ptr = c_ptr + m_0 * ldc + n_0;
+ }
+
strat.kernel(// A and B pointers are just the packed panels.
a_ptr, b_panel,
// Provide relevant part of output array and row stride.
- c_ptr + m_0 * ldc + n_0, ldc,
+ offset_c_ptr, ldc,
// M, N, K sizes
m_max-m_0, n_max - n_0, kern_k,
// Bias, activation, accumulation. Need to offset the bias as needed.
@@ -202,12 +211,12 @@ void kernel_and_merge<false, false, Requantize32>::run(
// Run a kernel and call the separate quantize step
template<>
-template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
void kernel_and_merge<true, false, Requantize32>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *,
const Activation &, bool, const Requantize32 &qp, const int32_t *col_bias,
@@ -248,12 +257,12 @@ void kernel_and_merge<true, false, Requantize32>::run(
// Run a kernel with integrated merge, dequantizing to FP32
template<>
-template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
void kernel_and_merge<false, false, DequantizeFloat>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t, Tri *,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
unsigned int n_0, unsigned int n_max, const Tr *bias,
const Activation &act, bool accumulate, const DequantizeFloat &dq, const int32_t *col_bias,
@@ -285,12 +294,12 @@ void kernel_and_merge<false, false, DequantizeFloat>::run(
}
template<>
-template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename Tri, typename Tab>
void kernel_and_merge<true, false, DequantizeFloat>::run(
#ifdef CYCLE_PROFILING
profiler &prof,
#endif
- strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
+ strategy &strat, const Tlo *a_ptr, const Tro *b_panel, size_t, Tri *c_panel,
Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *bias,
const Activation &act, bool accumulate, const DequantizeFloat &qp, const int32_t *,
@@ -385,21 +394,21 @@ struct get_stripe_width<strategy, true> {
};
// KernelWeightFormat is a similar story.
-template<typename strategy, bool FixedFormat, typename To>
+template<typename strategy, bool FixedFormat, typename Tro>
struct get_kernel_weight_format {
static KernelWeightFormat get() {
return KernelWeightFormat::NON_FIXED;
}
};
-template<typename strategy, typename To>
-struct get_kernel_weight_format<strategy, true, To> {
+template<typename strategy, typename Tro>
+struct get_kernel_weight_format<strategy, true, Tro> {
static KernelWeightFormat get() {
KernelWeightFormat kwf = strategy::kernel_weight_format();
// If we are using a BF16 kernel to do an FP32 problem (fast mode) then we need to set the BF16 flag on the
// weight format.
- if (std::is_same<To, float>::value && std::is_same<typename strategy::operand_type, bfloat16>::value) {
+ if (std::is_same<Tro, float>::value && std::is_same<typename strategy::rhs_operand_type, bfloat16>::value) {
uint32_t kwf_i = static_cast<uint32_t>(kwf);
kwf_i |= 0x10;
kwf = static_cast<KernelWeightFormat>(kwf_i);
@@ -411,9 +420,10 @@ struct get_kernel_weight_format<strategy, true, To> {
} // anonymous namespace
-template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool MergeStep=true, bool FixedFormat=false, bool ForceThreadColumns=false, bool ForceFloatAccumulate=false>
-class GemmInterleaved : public GemmCommon<To, Tr> {
- typedef typename strategy::operand_type Toi;
+template<typename strategy, typename Tlo, typename Tro, typename Tr, typename OutputStage=Nothing, bool MergeStep=true, bool FixedFormat=false, bool ForceThreadColumns=false, bool ForceFloatAccumulate=false>
+class GemmInterleaved : public GemmCommon<Tlo, Tro, Tr> {
+ typedef typename strategy::lhs_operand_type Tloi;
+ typedef typename strategy::rhs_operand_type Troi;
typedef typename strategy::result_type Tri;
typedef typename accumulate_buffer_type<strategy, OutputStage, ForceFloatAccumulate>::type Tab;
@@ -444,7 +454,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
unsigned int _Mround=0;
/* Working space, pretransposed buffer, buffer manager */
- const Toi *_B_transposed=nullptr;
+ const Troi *_B_transposed=nullptr;
void *_working_space=nullptr;
Tab *_accumulation_buffer=nullptr;
@@ -456,10 +466,10 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
int32_t *col_bias = nullptr;
/* Indirect parameters. _indirect_buf doubles as a flag to indicate that "indirect" transform should be used. */
- const To * const * const * _indirect_buf = nullptr;
+ const Tlo * const * const * _indirect_buf = nullptr;
/* Convolver - only set up for convolution problems, so also doubles as a flag. */
- std::unique_ptr<convolver<To>> _convolver = nullptr;
+ std::unique_ptr<convolver<Tlo>> _convolver = nullptr;
unsigned int get_col_sum_size() const {
if (std::is_same<OutputStage, Requantize32>::value) {
@@ -474,7 +484,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
class blockwalker {
private:
/* Size loops, etc. based on our parent's configuration */
- const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &_parent;
+ const GemmInterleaved<strategy, Tlo, Tro, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &_parent;
/* K, X and multi parameters for current iteration. */
unsigned int _k0=0, _x0=0, _multi=0;
@@ -489,9 +499,9 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
bool _newmulti=true;
public:
- blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &parent) : _parent(parent) { }
+ blockwalker(const GemmInterleaved<strategy, Tlo, Tro, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &parent) : _parent(parent) { }
- blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &parent,
+ blockwalker(const GemmInterleaved<strategy, Tlo, Tro, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns, ForceFloatAccumulate> &parent,
unsigned int x_start, unsigned int x_end) : _parent(parent), _x0 (_x_start), _x_start(x_start), _x_end(x_end) { }
unsigned int xmax() {
@@ -545,7 +555,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
unsigned int k_depth = _k_block;
if (std::is_same<OutputStage, Requantize32>::value) {
- k_depth += sizeof(int32_t) / sizeof(Toi);
+ k_depth += sizeof(int32_t) / sizeof(Tloi);
}
return k_depth;
@@ -555,10 +565,10 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
size_t get_a_working_size() const {
if (_thread_columns) {
// For 2D threading: allocate a buffer of one block of rows per thread
- return ROUND_UP(sizeof(Toi) * get_total_k_depth() * strategy::out_height() * _maxthreads);
+ return ROUND_UP(sizeof(Tloi) * get_total_k_depth() * strategy::out_height() * _maxthreads);
} else {
// For 1D threaded: one of these needed, regardless of thread count. Divided according to window.
- return ROUND_UP(sizeof(Toi) * get_total_k_depth() * _Mround * _nbatches);
+ return ROUND_UP(sizeof(Tloi) * get_total_k_depth() * _Mround * _nbatches);
}
}
@@ -663,15 +673,27 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
return roundup(args._cfg->inner_block_size, strategy::k_unroll());
}
- // K blocking not supported if we are requantizing.
- if (std::is_same<OutputStage, Requantize32>::value) {
+ // K blocking not supported if we are requantizing with the merging
+ // kernels.
+ if (std::is_same<OutputStage, Requantize32>::value && MergeStep) {
return get_ktotal(args);
}
+ const unsigned int L1_size = args._ci->get_L1_cache_size();
+
// Special blocking for SME
if (is_sme<strategy>::value) {
- // Don't bother to block below this size threshold, experimentally determined to be 320 for FP32
- unsigned int scaling_threshold = 1280 / sizeof(Toi);
+ // Target 512 bytes for 64kB L1, or 1024 bytes for 128kB L1.
+ unsigned int target_bytes_per_block = L1_size / 128;
+
+ // Default cache size in gemm-linux is 32kB though - so make
+ // sure minimum is 512
+ if (target_bytes_per_block < 512) {
+ target_bytes_per_block = 512;
+ }
+
+ // Don't bother to block below this size threshold (1.25X target size)
+ unsigned int scaling_threshold = ((target_bytes_per_block * 5) / 4) / sizeof(Tloi);
if (get_ktotal(args) <= scaling_threshold) {
return get_ktotal(args);
@@ -679,7 +701,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
// Once we are blocking, this (lower) threshold determines when we should use more blocks
// NOTE: Could be that some factor-based solution would work better here.
- unsigned int max_block_size = 1024 / sizeof(Toi);
+ unsigned int max_block_size = target_bytes_per_block / sizeof(Tloi);
unsigned int num_k_blocks = iceildiv(get_ktotal(args), max_block_size);
@@ -688,12 +710,11 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
return k_block;
}
- const unsigned int L1_size = args._ci->get_L1_cache_size();
unsigned int k_block;
// k_block: Find out how much of the larger array can be loaded into half the cache.
// This should account for associative caches.
- k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
+ k_block = (L1_size / 2) / (sizeof(Tloi) * (std::max(strategy::out_width(), strategy::out_height())));
// Needs to be (at least a single) multiple of the K unroll level.
k_block /= strategy::k_unroll();
@@ -723,6 +744,17 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
return roundup(args._cfg->outer_block_size, strategy::out_width());
}
+ // Special blocking for SME
+ if (is_sme<strategy>::value) {
+ // If total width is less than 4x kernel width, return the entire width.
+ if (args._Nsize < strategy::out_width()*4) {
+ return roundup(args._Nsize, strategy::out_width());
+ }
+
+ // Otherwise block to single kernel width.
+ return strategy::out_width();
+ }
+
unsigned int x_block;
const unsigned int L2_size = args._ci->get_L2_cache_size();
const unsigned int k_block = get_k_block_size(args);
@@ -730,14 +762,14 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
// x_block: Work out how many rows (of length k_block) will fit in the L2
// Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
const unsigned int scaled_l2_size = (L2_size * 9) / 10;
- const unsigned int k_block_area = k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height());
+ const unsigned int k_block_area = k_block * sizeof(Tloi) * (strategy::out_width() + strategy::out_height());
// .. if the L1 contents is bigger than the L2, just return a minimal size block.
if (k_block_area > scaled_l2_size) {
return strategy::out_width();
}
- x_block = (scaled_l2_size - k_block_area) / (sizeof(Toi) * k_block);
+ x_block = (scaled_l2_size - k_block_area) / (sizeof(Tloi) * k_block);
// Needs to be (at least a single) multiple of the kernel output width.
x_block /= strategy::out_width();
@@ -835,8 +867,8 @@ public:
const auto end_x = std::min(work_range.get_position_end(1) * strategy::out_width(), _Nsize);
Tri * const c_panel = reinterpret_cast<Tri *>(working_space_bytes + (threadid * get_c_working_size()));
- Toi * const a_panel = reinterpret_cast<Toi *>(working_space_bytes + (_maxthreads * get_c_working_size()) +
- (threadid * sizeof(Toi) * get_total_k_depth() * strategy::out_height()));
+ Tloi * const a_panel = reinterpret_cast<Tloi *>(working_space_bytes + (_maxthreads * get_c_working_size()) +
+ (threadid * sizeof(Tloi) * get_total_k_depth() * strategy::out_height()));
for (unsigned int multi=0; multi<_nmulti; multi++) {
for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
@@ -853,8 +885,8 @@ public:
// Figure out how many "K" the kernel will actually process.
unsigned int kern_k = roundup(kmax - k0, strategy::k_unroll());
- const Toi *b_ptr = FixedFormat ?
- reinterpret_cast<const Toi *>(this->_Bptr) + (multi * this->_B_multi_stride) +
+ const Troi *b_ptr = FixedFormat ?
+ reinterpret_cast<const Troi *>(this->_Bptr) + (multi * this->_B_multi_stride) +
((start_x / get_stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
(k0 * get_stripe_width<strategy, FixedFormat>::get()) :
_B_transposed + (rounded_width * _Ktotal * multi) + (k0 * rounded_width) + (start_x * kern_k);
@@ -868,7 +900,7 @@ public:
// Set up transposed 'A' block
{
#ifdef CYCLE_PROFILING
- auto p=prof.ScopedProfiler(PROFILE_PREPA, strategy::out_height() * (kmax-k0) * sizeof(Toi));
+ auto p=prof.ScopedProfiler(PROFILE_PREPA, strategy::out_height() * (kmax-k0) * sizeof(Tloi));
#endif
// See comment above on transform_type<> class: this extracts either 'transforms' or
// 'transforms_quantized' as appropriate.
@@ -936,10 +968,10 @@ public:
// (one per thread) first, followed by the (window-divided) A
// buffer.
// Set a_panel to the base of the A buffers - compute offsets into it based on M/batches later.
- Toi * const a_panel = reinterpret_cast<Toi *>(working_space_bytes + (_maxthreads * get_c_working_size()));
+ Tloi * const a_panel = reinterpret_cast<Tloi *>(working_space_bytes + (_maxthreads * get_c_working_size()));
Tri * const c_panel = reinterpret_cast<Tri *>(working_space_bytes + (threadid * get_c_working_size()));
- const Toi *b_panel;
+ const Troi *b_panel;
b_panel = _B_transposed;
// newkblock() is always true on the first iteration, so these will be set properly on the first loop.
@@ -958,7 +990,7 @@ public:
for (;!current.done();current.advance()) {
if (current.newkblock()) {
#ifdef CYCLE_PROFILING
- auto p=prof.ScopedProfiler(PROFILE_PREPA, (end - start) * strategy::out_height() * (current.kmax()-current.k0()) * sizeof(Toi));
+ auto p=prof.ScopedProfiler(PROFILE_PREPA, (end - start) * strategy::out_height() * (current.kmax()-current.k0()) * sizeof(Tloi));
#endif
// See comment above on transform_type<> class: this extracts either 'transforms' or
// 'transforms_quantized' as appropriate.
@@ -994,7 +1026,7 @@ public:
// larger than the (rounded) K value.
if(std::is_same<OutputStage, Requantize32>::value) {
- a_panel_stride = kern_k + (sizeof(int32_t) / sizeof(Toi));
+ a_panel_stride = kern_k + (sizeof(int32_t) / sizeof(Tloi));
} else {
a_panel_stride = kern_k;
}
@@ -1002,7 +1034,7 @@ public:
// For FixedFormat cases, figure out the B pointer. The loop below moves through batches and vertically through the output so this will be the same throughout.
if (FixedFormat) {
- b_panel = reinterpret_cast<const Toi *>(this->_Bptr) + (current.multi() * this->_B_multi_stride) +
+ b_panel = reinterpret_cast<const Troi *>(this->_Bptr) + (current.multi() * this->_B_multi_stride) +
((current.x0() / get_stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
(current.k0() * get_stripe_width<strategy, FixedFormat>::get());
}
@@ -1012,7 +1044,7 @@ public:
unsigned int first_m = (batch == batch_0) ? m_0 : 0;
unsigned int last_m = (batch == batch_end) ? m_max : _Msize;
- const Toi *a_ptr = a_panel + (batch * _Mround + first_m) * get_total_k_depth();
+ const Tloi *a_ptr = a_panel + (batch * _Mround + first_m) * get_total_k_depth();
if (first_m >= last_m)
continue;
@@ -1134,7 +1166,7 @@ public:
unsigned int x_size = roundup(_Nsize, strategy::out_width());
- return (x_size * _Ktotal * _nmulti * sizeof(Toi)) + get_col_sum_size();
+ return (x_size * _Ktotal * _nmulti * sizeof(Troi)) + get_col_sum_size();
}
size_t get_B_pretranspose_window_size() const override {
@@ -1144,7 +1176,7 @@ public:
return n_blocks * k_blocks * _nmulti;
}
- void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
+ void requantize_bias(void *in_buffer, const Tro *B, const int ldb, const int B_multi_stride) override {
if (std::is_same<OutputStage, Requantize32>::value) {
col_bias = reinterpret_cast<int32_t *>(in_buffer);
@@ -1164,11 +1196,11 @@ public:
return transforms.PrepareB_supports_transpose();
}
- void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, const bool transposed) override {
+ void pretranspose_B_array(void *in_buffer, const Tro *B, const int ldb, const int B_multi_stride, const bool transposed) override {
pretranspose_B_array_part(in_buffer, B, ldb, B_multi_stride, transposed, 0, get_B_pretranspose_window_size());
}
- void pretranspose_B_array_part(void *in_buffer, const To *B, const int ldb, const int B_multi_stride, const bool transposed, size_t start, size_t end) override {
+ void pretranspose_B_array_part(void *in_buffer, const Tro *B, const int ldb, const int B_multi_stride, const bool transposed, size_t start, size_t end) override {
// Perform column sums etc as part of the last block.
if (end >= get_B_pretranspose_window_size()) {
requantize_bias(in_buffer, B, ldb, B_multi_stride);
@@ -1176,7 +1208,7 @@ public:
// Put the transposed data after the column sums - in non-quantized cases get_col_sum_size() == 0
uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
- Toi *buffer = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
+ Troi *buffer = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
_B_transposed = buffer;
blockwalker current(*this);
@@ -1261,7 +1293,7 @@ public:
void set_pretransposed_B_data(void *in_buffer) override {
// Put the transposed data after the column sums - in non-quantized cases get_col_sum_size() == 0
uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
- _B_transposed = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
+ _B_transposed = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
col_bias = reinterpret_cast<int32_t *>(in_buffer);
}
@@ -1281,14 +1313,14 @@ public:
}
}
- void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
+ void set_indirect_parameters(size_t string_len, const Tlo * const * const *ptr) override {
assert(string_len == _Ksize);
_indirect_buf = ptr;
}
void set_convolution_parameters(ConvolutionParameters parms) override {
assert(parms.input_channels == _Ksize);
- _convolver = std::unique_ptr<convolver<To>>(new convolver<To>(parms));
+ _convolver = std::unique_ptr<convolver<Tlo>>(new convolver<Tlo>(parms));
}
// Estimate cycles for given problem given provided parameters
@@ -1299,7 +1331,7 @@ public:
const PerformanceParameters &params = strategy::template get_performance_parameters<perf_type>(args._ci);
uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * roundup(args._Msize, strategy::out_height()) * roundup(args._Nsize, strategy::out_width()) * get_ktotal(args);
- uint64_t prepare_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * roundup(args._Msize, strategy::out_height()) * get_ktotal(args) * sizeof(Toi);
+ uint64_t prepare_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * roundup(args._Msize, strategy::out_height()) * get_ktotal(args) * sizeof(Tloi);
uint64_t merge_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * k_blocks * args._Msize * roundup(args._Nsize, strategy::out_width()) * sizeof(Tr);
float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
@@ -1326,7 +1358,7 @@ public:
c.inner_block_size = _k_block;
c.outer_block_size = _x_block;
c.filter = get_type_name<strategy>();
- c.weight_format = get_weight_format(get_kernel_weight_format<strategy, FixedFormat, To>::get(), sizeof(To));
+ c.weight_format = get_weight_format(get_kernel_weight_format<strategy, FixedFormat, Tro>::get(), sizeof(Tro));
return c;
}
@@ -1334,21 +1366,21 @@ public:
// Aliases for the variations
template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
-using GemmInterleavedNoMerge = GemmInterleaved<strategy, To, Tr, OutputStage, false>;
+using GemmInterleavedNoMerge = GemmInterleaved<strategy, To, To, Tr, OutputStage, false>;
template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
-using GemmInterleavedFixedFormat = GemmInterleaved<strategy, To, Tr, OutputStage, true, true>;
+using GemmInterleavedFixedFormat = GemmInterleaved<strategy, To, To, Tr, OutputStage, true, true>;
template<typename strategy, typename To, typename Tr>
-using GemmInterleavedPretransposedNoMergeQuantizedInline = GemmInterleaved<strategy, To, Tr, Requantize32, false>;
+using GemmInterleavedPretransposedNoMergeQuantizedInline = GemmInterleaved<strategy, To, To, Tr, Requantize32, false>;
-template<typename strategy, typename To, typename Tr>
-using GemmInterleavedQuantized = GemmInterleaved<strategy, To, Tr, Requantize32>;
+template<typename strategy, typename Tlo, typename Tro, typename Tr>
+using GemmInterleavedQuantized = GemmInterleaved<strategy, Tlo, Tro, Tr, Requantize32>;
template<typename strategy, typename To, typename Tr>
-using GemmInterleavedNoMergeDequantized = GemmInterleaved<strategy, To, Tr, DequantizeFloat, false>;
+using GemmInterleavedNoMergeDequantized = GemmInterleaved<strategy, To, To, Tr, DequantizeFloat, false>;
-template<typename strategy, typename To, typename Tr>
-using GemmInterleavedDequantized = GemmInterleaved<strategy, To, Tr, DequantizeFloat>;
+template<typename strategy, typename Tlo, typename Tro, typename Tr>
+using GemmInterleavedDequantized = GemmInterleaved<strategy, Tlo, Tro, Tr, DequantizeFloat>;
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_q8_mixed.cpp b/src/core/NEON/kernels/arm_gemm/gemm_q8_mixed.cpp
new file mode 100644
index 0000000000..a48244cb3c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/gemm_q8_mixed.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+
+#include "kernels/a64_hybrid_u8s8qa_dot_4x16.hpp"
+#include "kernels/a64_hybrid_u8s8qa_mmla_4x16.hpp"
+#include "kernels/a64_hybrid_u8s8s32_dot_6x16.hpp"
+#include "kernels/a64_hybrid_u8s8s32_mmla_6x16.hpp"
+#include "kernels/a64_interleaved_u8s8s32_mmla_8x12.hpp"
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#include "kernels/sve_hybrid_u8s8qa_dot_4x4VL.hpp"
+#include "kernels/sve_interleaved_u8s8s32_mmla_8x3VL.hpp"
+#include "kernels/sve_hybrid_u8s8s32_mmla_6x4VL.hpp"
+#include "kernels/sve_hybrid_u8s8qa_mmla_4x4VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
+
+#include "gemm_hybrid_indirect.hpp"
+#include "gemm_hybrid_quantized.hpp"
+#include "gemm_interleaved.hpp"
+#include "gemv_pretransposed.hpp"
+#include "quantize_wrapper.hpp"
+#include "utils.hpp"
+
+namespace arm_gemm {
+
+static const GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32> gemm_q8_mixed_methods[] =
+{
+#ifdef ARM_COMPUTE_ENABLE_SVE
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_hybrid_u8s8qa_mmla_4x4VL",
+ [](const GemmArgs &args, const Requantize32 &qp) { return quant_hybrid_asymmetric(qp) && args._ci->has_sve2() && args._ci->has_svei8mm(); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8s8qa_mmla_4x4VL, uint8_t, int8_t, uint8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8s8qa_mmla_4x4VL, uint8_t, int8_t, uint8_t, Requantize32>(args, qp); }
+),
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_interleaved_u8s8s32_mmla_8x3VL",
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm() && (args._Ksize>8); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_u8s8s32_mmla_8x3VL, uint8_t, int8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_u8s8s32_mmla_8x3VL, uint8_t, int8_t, uint8_t>(args, qp); }
+),
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_hybrid_u8s8s32_mmla_6x4VL",
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm(); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8s8s32_mmla_6x4VL, uint8_t, int8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8s8s32_mmla_6x4VL, uint8_t, int8_t, uint8_t, Requantize32, true>(args, qp); }
+),
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_hybrid_u8s8qa_dot_4x4VL",
+ [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_asymmetric(qp); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8s8qa_dot_4x4VL, uint8_t, int8_t, uint8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8s8qa_dot_4x4VL, uint8_t, int8_t, uint8_t, Requantize32>(args, qp); }
+),
+#endif // ARM_COMPUTE_ENABLE_SVE
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_u8s8qa_mmla_4x16",
+ [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_i8mm() && quant_hybrid_asymmetric(qp); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8s8qa_mmla_4x16, uint8_t, int8_t, uint8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8s8qa_mmla_4x16, uint8_t, int8_t, uint8_t, Requantize32>(args, qp); }
+),
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_interleaved_u8s8s32_mmla_8x12",
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm() && (args._Ksize>8); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_interleaved_u8s8s32_mmla_8x12, uint8_t, int8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_interleaved_u8s8s32_mmla_8x12, uint8_t, int8_t, uint8_t>(args, qp); }
+),
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_hybrid_u8s8s32_mmla_6x16",
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm(); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8s8s32_mmla_6x16, uint8_t, int8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8s8s32_mmla_6x16, uint8_t, int8_t, uint8_t, Requantize32, true>(args, qp); }
+),
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_u8s8qa_dot_4x16",
+ [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_svei8mm() && quant_hybrid_asymmetric(qp); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8s8qa_dot_4x16, uint8_t, int8_t, uint8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8s8qa_dot_4x16, uint8_t, int8_t, uint8_t, Requantize32>(args, qp); }
+),
+GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_u8s8s32_dot_6x16",
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && args._ci->has_i8mm(); },
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8s8s32_dot_6x16, uint8_t, int8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8s8s32_dot_6x16, uint8_t, int8_t, uint8_t, Requantize32, true>(args, qp); }
+),
+{
+ GemmMethod::DEFAULT,
+ "",
+ nullptr,
+ nullptr,
+ nullptr
+}
+};
+
+template<>
+const GemmImplementation<uint8_t, int8_t, uint8_t, Requantize32> *gemm_implementation_list<uint8_t, int8_t, uint8_t, Requantize32>() {
+ return gemm_q8_mixed_methods;
+}
+
+template UniqueGemmCommon<uint8_t, int8_t, uint8_t> gemm<uint8_t, int8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template bool has_opt_gemm<uint8_t, int8_t, uint8_t, Requantize32>(WeightFormat &weight_format, const GemmArgs &args, const Requantize32 &);
+template KernelDescription get_gemm_method<uint8_t, int8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template std::vector<KernelDescription> get_compatible_kernels<uint8_t, int8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
index d1c4e49edb..18008e713e 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
@@ -58,7 +58,6 @@
#include "gemm_hybrid_indirect.hpp"
#include "gemm_hybrid_quantized.hpp"
-#include "gemm_hybrid_quantized_inline.hpp"
#include "gemm_interleaved.hpp"
#include "gemv_pretransposed.hpp"
#include "quantize_wrapper.hpp"
@@ -66,7 +65,7 @@
namespace arm_gemm {
-static const GemmImplementation<int8_t, int8_t, Requantize32> gemm_qint8_methods[] =
+static const GemmImplementation<int8_t, int8_t, int8_t, Requantize32> gemm_qint8_methods[] =
{
#ifdef ARM_COMPUTE_ENABLE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -82,7 +81,7 @@ static const GemmImplementation<int8_t, int8_t, Requantize32> gemm_qint8_methods
"sme2_interleaved_nomerge_s8q_mopa_1VLx4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
[](const GemmArgs &args, const Requantize32 &) { const auto VL = sme::get_vector_length<int32_t>();
- return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_s8q_mopa_1VLx4VL, int8_t, int8_t>(args, qp); }
},
{
@@ -101,90 +100,90 @@ static const GemmImplementation<int8_t, int8_t, Requantize32> gemm_qint8_methods
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_s8q_mopa_2VLx2VL, int8_t, int8_t>(args, qp); }
},
#endif // ARM_COMPUTE_ENABLE_SME2
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8qa_mmla_4x4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return quant_hybrid_asymmetric(qp) && args._ci->has_sve2() && args._ci->has_svei8mm(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qa_mmla_4x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qa_mmla_4x4VL, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qa_mmla_4x4VL, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qa_mmla_4x4VL, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8qs_mmla_6x4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return quant_hybrid_symmetric(qp) && args._ci->has_sve2() && args._ci->has_svei8mm(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qs_mmla_6x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qs_mmla_6x4VL, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qs_mmla_6x4VL, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qs_mmla_6x4VL, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_s8s32_mmla_8x3VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm() && (args._Ksize>8); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t, int8_t>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_hybrid_s8s32_mmla_6x4VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, int8_t, Requantize32, true>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8qs_dot_6x4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_symmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qs_dot_6x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qs_dot_6x4VL, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qs_dot_6x4VL, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qs_dot_6x4VL, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8qa_dot_4x4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_asymmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qa_dot_4x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qa_dot_4x4VL, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qa_dot_4x4VL, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qa_dot_4x4VL, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_s8s32_dot_6x4VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, int8_t, Requantize32, true>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_s8s32_dot_8x3VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>4); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t, int8_t>(args, qp); }
),
#endif // ARM_COMPUTE_ENABLE_SVE
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_s8qa_mmla_4x16",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_i8mm() && quant_hybrid_asymmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qa_mmla_4x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qa_mmla_4x16, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qa_mmla_4x16, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qa_mmla_4x16, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_s8qs_mmla_6x16",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_i8mm() && quant_hybrid_symmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qs_mmla_6x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qs_mmla_6x16, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qs_mmla_6x16, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qs_mmla_6x16, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_s8s32_mmla_8x12",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm() && (args._Ksize>8); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t, int8_t>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_hybrid_s8s32_mmla_6x16",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, int8_t, Requantize32, true>(args, qp); }
),
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
@@ -205,42 +204,42 @@ GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
"a64_gemm_s16_8x12",
nullptr,
[](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() == CPUModel::A53 && ((args._Msize > 28) || ((args._Msize % 8) > 4)); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s16_8x12, int8_t, int8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s16_8x12, int8_t, int8_t, int8_t>(args, qp); }
},
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_s8qs_dot_6x16",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_symmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qs_dot_6x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qs_dot_6x16, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qs_dot_6x16, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qs_dot_6x16, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_s8qa_dot_4x16",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_asymmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qa_dot_4x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qa_dot_4x16, int8_t, int8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qa_dot_4x16, int8_t, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qa_dot_4x16, int8_t, int8_t, int8_t, Requantize32>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_s8s32_dot_6x16",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, int8_t, Requantize32, true>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s8_8x12",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_s8_8x12, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s8_8x12, int8_t, int8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_s8_8x12, int8_t, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s8_8x12, int8_t, int8_t, int8_t>(args, qp); }
),
-GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
+GemmImplementation<int8_t, int8_t, int8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s8_4x4",
nullptr,
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_s8_4x4, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s8_4x4, int8_t, int8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_s8_4x4, int8_t, int8_t, int8_t>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s8_4x4, int8_t, int8_t, int8_t>(args, qp); }
),
{
GemmMethod::QUANTIZE_WRAPPER,
@@ -259,14 +258,14 @@ GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate(
};
template<>
-const GemmImplementation<int8_t, int8_t, Requantize32> *gemm_implementation_list<int8_t, int8_t, Requantize32>() {
+const GemmImplementation<int8_t, int8_t, int8_t, Requantize32> *gemm_implementation_list<int8_t, int8_t, int8_t, Requantize32>() {
return gemm_qint8_methods;
}
-template UniqueGemmCommon<int8_t, int8_t> gemm<int8_t, int8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
-template bool has_opt_gemm<int8_t, int8_t, Requantize32>(WeightFormat &weight_format, const GemmArgs &args, const Requantize32 &os);
-template KernelDescription get_gemm_method<int8_t, int8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
-template std::vector<KernelDescription> get_compatible_kernels<int8_t, int8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template UniqueGemmCommon<int8_t, int8_t, int8_t> gemm<int8_t, int8_t, int8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template bool has_opt_gemm<int8_t, int8_t, int8_t, Requantize32>(WeightFormat &weight_format, const GemmArgs &args, const Requantize32 &os);
+template KernelDescription get_gemm_method<int8_t, int8_t, int8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template std::vector<KernelDescription> get_compatible_kernels<int8_t, int8_t, int8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
index b85b1c4fcf..7c182b6777 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
@@ -54,14 +54,13 @@
#include "gemm_hybrid_indirect.hpp"
#include "gemm_hybrid_quantized.hpp"
-#include "gemm_hybrid_quantized_inline.hpp"
#include "gemm_interleaved.hpp"
#include "gemv_pretransposed.hpp"
#include "quantize_wrapper.hpp"
namespace arm_gemm {
-static const GemmImplementation<uint8_t, uint8_t, Requantize32> gemm_quint8_methods[] =
+static const GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32> gemm_quint8_methods[] =
{
#ifdef ARM_COMPUTE_ENABLE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
@@ -78,7 +77,7 @@ static const GemmImplementation<uint8_t, uint8_t, Requantize32> gemm_quint8_meth
"sme2_interleaved_nomerge_u8q_mopa_1VLx4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sme2() && ((qp.per_channel_requant && (qp.per_channel_left_shifts == nullptr)) || (!qp.per_channel_requant && (qp.per_layer_left_shift == 0)));},
[](const GemmArgs &args, const Requantize32 &) { const auto VL = sme::get_vector_length<uint32_t>();
- return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_u8q_mopa_1VLx4VL, uint8_t, uint8_t>(args, qp); }
},
{
@@ -97,69 +96,69 @@ static const GemmImplementation<uint8_t, uint8_t, Requantize32> gemm_quint8_meth
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedPretransposedNoMergeQuantizedInline<cls_sme2_interleaved_nomerge_u8q_mopa_2VLx2VL, uint8_t, uint8_t>(args, qp); }
},
#endif // ARM_COMPUTE_ENABLE_SME2
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_u8qa_mmla_4x4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return quant_hybrid_asymmetric(qp) && args._ci->has_sve2() && args._ci->has_svei8mm(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8qa_mmla_4x4VL, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8qa_mmla_4x4VL, uint8_t, uint8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8qa_mmla_4x4VL, uint8_t, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8qa_mmla_4x4VL, uint8_t, uint8_t, uint8_t, Requantize32>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_u8u32_mmla_8x3VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm() && (args._Ksize>8); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint8_t, uint8_t>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_hybrid_u8u32_mmla_6x4VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, uint8_t, Requantize32, true>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_u8qa_dot_4x4VL",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_asymmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8qa_dot_4x4VL, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8qa_dot_4x4VL, uint8_t, uint8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8qa_dot_4x4VL, uint8_t, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8qa_dot_4x4VL, uint8_t, uint8_t, uint8_t, Requantize32>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_u8u32_dot_6x4VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint8_t, uint8_t, Requantize32, true>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_u8u32_dot_8x3VL",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>4); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint8_t, uint8_t>(args, qp); }
),
#endif // ARM_COMPUTE_ENABLE_SVE
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_u8qa_mmla_4x16",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_i8mm() && quant_hybrid_asymmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8qa_mmla_4x16, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8qa_mmla_4x16, uint8_t, uint8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8qa_mmla_4x16, uint8_t, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8qa_mmla_4x16, uint8_t, uint8_t, uint8_t, Requantize32>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_u8u32_mmla_8x12",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm() && (args._Ksize>8); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint8_t, uint8_t>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_hybrid_u8u32_mmla_6x16",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint8_t, uint8_t, Requantize32, true>(args, qp); }
),
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
@@ -180,35 +179,35 @@ GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
"a64_gemm_u16_8x12",
nullptr,
[](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() == CPUModel::A53 && args._Msize > 4; },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_u16_8x12, uint8_t, uint8_t>(args, qp); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_u16_8x12, uint8_t, uint8_t, uint8_t>(args, qp); },
},
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_u8qa_dot_4x16",
[](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_asymmetric(qp); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8qa_dot_4x16, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8qa_dot_4x16, uint8_t, uint8_t, Requantize32>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8qa_dot_4x16, uint8_t, uint8_t, uint8_t, Requantize32>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8qa_dot_4x16, uint8_t, uint8_t, uint8_t, Requantize32>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_u8u32_dot_6x16",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint8_t, Requantize32, true>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint8_t, uint8_t, Requantize32, true>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint8_t, uint8_t, Requantize32, true>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_u8_8x12",
[](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_u8_8x12, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_u8_8x12, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_u8_8x12, uint8_t, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_u8_8x12, uint8_t, uint8_t, uint8_t>(args, qp); }
),
-GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_u8_4x4",
nullptr,
- [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_u8_4x4, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
- [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_u8_4x4, uint8_t, uint8_t>(args, qp); }
+ [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_u8_4x4, uint8_t, uint8_t, uint8_t>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_u8_4x4, uint8_t, uint8_t, uint8_t>(args, qp); }
),
{
GemmMethod::QUANTIZE_WRAPPER,
@@ -227,14 +226,14 @@ GemmImplementation<uint8_t, uint8_t, Requantize32>::with_estimate(
};
template<>
-const GemmImplementation<uint8_t, uint8_t, Requantize32> *gemm_implementation_list<uint8_t, uint8_t, Requantize32>() {
+const GemmImplementation<uint8_t, uint8_t, uint8_t, Requantize32> *gemm_implementation_list<uint8_t, uint8_t, uint8_t, Requantize32>() {
return gemm_quint8_methods;
}
-template UniqueGemmCommon<uint8_t, uint8_t> gemm<uint8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
-template bool has_opt_gemm<uint8_t, uint8_t, Requantize32>(WeightFormat &weight_format, const GemmArgs &args, const Requantize32 &os);
-template KernelDescription get_gemm_method<uint8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
-template std::vector<KernelDescription> get_compatible_kernels<uint8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template UniqueGemmCommon<uint8_t, uint8_t, uint8_t> gemm<uint8_t, uint8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template bool has_opt_gemm<uint8_t, uint8_t, uint8_t, Requantize32>(WeightFormat &weight_format, const GemmArgs &args, const Requantize32 &os);
+template KernelDescription get_gemm_method<uint8_t, uint8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
+template std::vector<KernelDescription> get_compatible_kernels<uint8_t, uint8_t, uint8_t, Requantize32>(const GemmArgs &args, const Requantize32 &os);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp
index 782399df8c..1d995a87b5 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp
@@ -48,14 +48,14 @@
#include <vector>
namespace arm_gemm {
-static const GemmImplementation<int8_t, float, DequantizeFloat> gemm_s8fp32_methods[] =
+static const GemmImplementation<int8_t, int8_t, float, DequantizeFloat> gemm_s8fp32_methods[] =
{
#ifdef ARM_COMPUTE_ENABLE_SVE
#ifdef ARM_COMPUTE_ENABLE_SME2
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp",
- [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2() && !args._accumulate; },
[](const GemmArgs &args, const DequantizeFloat &) { const auto VL = sme::get_vector_length<float>();
return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
[](const GemmArgs &args, const DequantizeFloat &dq) { return new GemmInterleavedNoMergeDequantized<cls_sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL, int8_t, float>(args, dq); }
@@ -63,7 +63,7 @@ static const GemmImplementation<int8_t, float, DequantizeFloat> gemm_s8fp32_meth
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_s8qfp32_mopa_4Vx1VL.hpp",
- [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2() && !args._accumulate; },
[](const GemmArgs &args, const DequantizeFloat &) { const auto VL = sme::get_vector_length<float>();
return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
[](const GemmArgs &args, const DequantizeFloat &dq) { return new GemmInterleavedNoMergeDequantized<cls_sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL, int8_t, float>(args, dq); }
@@ -71,53 +71,53 @@ static const GemmImplementation<int8_t, float, DequantizeFloat> gemm_s8fp32_meth
{
GemmMethod::GEMM_INTERLEAVED,
"sme2_interleaved_nomerge_s8qfp32_mopa_2Vx2VL.hpp",
- [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2() && !args._accumulate; },
nullptr,
[](const GemmArgs &args, const DequantizeFloat &dq) { return new GemmInterleavedNoMergeDequantized<cls_sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL, int8_t, float>(args, dq); }
},
#endif // ARM_COMPUTE_ENABLE_SME2
-GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+GemmImplementation<int8_t, int8_t, float, DequantizeFloat>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_s8s32_mmla_8x3VL",
[](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_svei8mm(); },
- [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, float>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, float>(args, qp); }
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t, float>(args, qp); }
),
-GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+GemmImplementation<int8_t, int8_t, float, DequantizeFloat>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_s8s32_dot_8x3VL",
[](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sve(); },
- [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, float>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, float>(args, qp); }
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t, float>(args, qp); }
),
#endif // ARM_COMPUTE_ENABLE_SVE
-GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+GemmImplementation<int8_t, int8_t, float, DequantizeFloat>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_s8s32_mmla_8x12",
[](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_i8mm(); },
- [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, float>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, float>(args, qp); }
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t, float>(args, qp); }
),
{
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s16_8x12",
nullptr,
[](const GemmArgs &args, const DequantizeFloat &) { return args._ci->get_cpu_model() == CPUModel::A53 && ((args._Msize > 28) || ((args._Msize % 8) > 4)); },
- [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s16_8x12, int8_t, float>(args, qp); }
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s16_8x12, int8_t, int8_t, float>(args, qp); }
},
-GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+GemmImplementation<int8_t, int8_t, float, DequantizeFloat>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s8_8x12",
[](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_gemm_s8_8x12, int8_t, float>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s8_8x12, int8_t, float>(args, qp); }
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_gemm_s8_8x12, int8_t, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s8_8x12, int8_t, int8_t, float>(args, qp); }
),
-GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+GemmImplementation<int8_t, int8_t, float, DequantizeFloat>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_s8_4x4",
nullptr,
- [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_gemm_s8_4x4, int8_t, float>::estimate_cycles<int8_t>(args); },
- [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s8_4x4, int8_t, float>(args, qp); }
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_gemm_s8_4x4, int8_t, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s8_4x4, int8_t, int8_t, float>(args, qp); }
),
{
GemmMethod::DEFAULT,
@@ -129,13 +129,13 @@ GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
};
template<>
-const GemmImplementation<int8_t, float, DequantizeFloat> *gemm_implementation_list<int8_t, float, DequantizeFloat>() {
+const GemmImplementation<int8_t, int8_t, float, DequantizeFloat> *gemm_implementation_list<int8_t, int8_t, float, DequantizeFloat>() {
return gemm_s8fp32_methods;
}
-template UniqueGemmCommon<int8_t, float> gemm<int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
-template KernelDescription get_gemm_method<int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
-template std::vector<KernelDescription> get_compatible_kernels<int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+template UniqueGemmCommon<int8_t, int8_t, float> gemm<int8_t, int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+template KernelDescription get_gemm_method<int8_t, int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+template std::vector<KernelDescription> get_compatible_kernels<int8_t, int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_u8s8fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_u8s8fp32.cpp
new file mode 100644
index 0000000000..606b422b0b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/gemm_u8s8fp32.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+
+#include "kernels/a64_hybrid_u8s8qa_dot_4x16.hpp"
+#include "kernels/a64_hybrid_u8s8qa_mmla_4x16.hpp"
+#include "kernels/a64_hybrid_u8s8s32_dot_6x16.hpp"
+#include "kernels/a64_hybrid_u8s8s32_mmla_6x16.hpp"
+#include "kernels/a64_interleaved_u8s8s32_mmla_8x12.hpp"
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#include "kernels/sve_hybrid_u8s8qa_dot_4x4VL.hpp"
+#include "kernels/sve_interleaved_u8s8s32_mmla_8x3VL.hpp"
+#include "kernels/sve_hybrid_u8s8s32_mmla_6x4VL.hpp"
+#include "kernels/sve_hybrid_u8s8qa_mmla_4x4VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
+
+#include "gemm_hybrid_indirect.hpp"
+#include "gemm_hybrid_quantized.hpp"
+#include "gemm_interleaved.hpp"
+#include "gemv_pretransposed.hpp"
+#include "quantize_wrapper.hpp"
+#include "utils.hpp"
+
+namespace arm_gemm {
+
+static const GemmImplementation<uint8_t, int8_t, float, DequantizeFloat> gemm_u8s8fp32_methods[] =
+{
+#ifdef ARM_COMPUTE_ENABLE_SVE
+GemmImplementation<uint8_t, int8_t, float, DequantizeFloat>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_interleaved_u8s8s32_mmla_8x3VL",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_svei8mm(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_sve_interleaved_u8s8s32_mmla_8x3VL, uint8_t, int8_t, float>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_sve_interleaved_u8s8s32_mmla_8x3VL, uint8_t, int8_t, float>(args, qp); }
+),
+#endif // ARM_COMPUTE_ENABLE_SVE
+GemmImplementation<uint8_t, int8_t, float, DequantizeFloat>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_interleaved_u8s8s32_mmla_8x12",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_i8mm(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_interleaved_u8s8s32_mmla_8x12, uint8_t, int8_t, float>::estimate_cycles<uint8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_interleaved_u8s8s32_mmla_8x12, uint8_t, int8_t, float>(args, qp); }
+),
+{
+ GemmMethod::DEFAULT,
+ "",
+ nullptr,
+ nullptr,
+ nullptr
+}
+};
+
+template<>
+const GemmImplementation<uint8_t, int8_t, float, DequantizeFloat> *gemm_implementation_list<uint8_t, int8_t, float, DequantizeFloat>() {
+ return gemm_u8s8fp32_methods;
+}
+
+template UniqueGemmCommon<uint8_t, int8_t, float> gemm<uint8_t, int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+template bool has_opt_gemm<uint8_t, int8_t, float, DequantizeFloat>(WeightFormat &weight_format, const GemmArgs &args, const DequantizeFloat &os);
+template KernelDescription get_gemm_method<uint8_t, int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+template std::vector<KernelDescription> get_compatible_kernels<uint8_t, int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+
+} // namespace arm_gemm
+
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint16.cpp
index 25b6cf0cf2..308452e304 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020, 2022-2023 Arm Limited.
+ * Copyright (c) 2017-2020, 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,13 +32,13 @@
namespace arm_gemm {
-static const GemmImplementation<uint16_t, uint32_t> gemm_u16_methods[] = {
+static const GemmImplementation<uint16_t, uint16_t, uint32_t> gemm_u16_methods[] = {
{
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_u16_8x12",
nullptr,
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u16_8x12, uint16_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u16_8x12, uint16_t, uint16_t, uint32_t>(args); }
},
{
GemmMethod::DEFAULT,
@@ -50,15 +50,15 @@ static const GemmImplementation<uint16_t, uint32_t> gemm_u16_methods[] = {
};
template<>
-const GemmImplementation<uint16_t, uint32_t> *gemm_implementation_list<uint16_t, uint32_t>() {
+const GemmImplementation<uint16_t, uint16_t, uint32_t> *gemm_implementation_list<uint16_t, uint16_t, uint32_t>() {
return gemm_u16_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<uint16_t, uint32_t> gemm<uint16_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<uint16_t, uint32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<uint16_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<uint16_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<uint16_t, uint16_t, uint32_t> gemm<uint16_t, uint16_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<uint16_t, uint16_t, uint32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<uint16_t, uint16_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<uint16_t, uint16_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
index dfacb687a8..fb442419b7 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
@@ -46,50 +46,50 @@
namespace arm_gemm {
-static const GemmImplementation<uint8_t, uint32_t> gemm_u8_methods[] = {
+static const GemmImplementation<uint8_t, uint8_t, uint32_t> gemm_u8_methods[] = {
#ifdef ARM_COMPUTE_ENABLE_SVE
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_u8u32_mmla_6x4VL",
[](const GemmArgs &args) { return args._ci->has_svei8mm(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_mmla_6x4VL, uint8_t, uint8_t, uint32_t>(args); }
),
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_u8u32_mmla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_svei8mm() && (args._Ksize>8); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_u8u32_mmla_8x3VL, uint8_t, uint8_t, uint32_t>(args); }
),
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_u8u32_dot_6x4VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_u8u32_dot_6x4VL, uint8_t, uint8_t, uint32_t>(args); }
),
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_u8u32_dot_8x3VL",
[](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_u8u32_dot_8x3VL, uint8_t, uint8_t, uint32_t>(args); }
),
#endif // ARM_COMPUTE_ENABLE_SVE
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_u8u32_mmla_8x12",
[](const GemmArgs &args) { return args._ci->has_i8mm() && (args._Ksize>8); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_u8u32_mmla_8x12, uint8_t, uint8_t, uint32_t>(args); }
),
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_u8u32_mmla_6x16",
[](const GemmArgs &args) { return args._ci->has_i8mm(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_mmla_6x16, uint8_t, uint8_t, uint32_t>(args); }
),
{
GemmMethod::GEMM_HYBRID,
@@ -110,28 +110,28 @@ GemmImplementation<uint8_t, uint32_t>::with_estimate(
"a64_gemm_u16_8x12",
nullptr,
[](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A53 && args._Msize > 4; },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u16_8x12, uint8_t, uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u16_8x12, uint8_t, uint8_t, uint32_t>(args); },
},
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_u8u32_dot_6x16",
[](const GemmArgs &args) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_u8u32_dot_6x16, uint8_t, uint8_t, uint32_t>(args); }
),
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_u8_8x12",
[](const GemmArgs &args) { return args._ci->has_dotprod(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_u8_8x12, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u8_8x12, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_u8_8x12, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u8_8x12, uint8_t, uint8_t, uint32_t>(args); }
),
-GemmImplementation<uint8_t, uint32_t>::with_estimate(
+GemmImplementation<uint8_t, uint8_t, uint32_t>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_gemm_u8_4x4",
nullptr,
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_u8_4x4, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u8_4x4, uint8_t, uint32_t>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_gemm_u8_4x4, uint8_t, uint8_t, uint32_t>::estimate_cycles<uint32_t>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_gemm_u8_4x4, uint8_t, uint8_t, uint32_t>(args); }
),
{
GemmMethod::DEFAULT,
@@ -143,15 +143,15 @@ GemmImplementation<uint8_t, uint32_t>::with_estimate(
};
template<>
-const GemmImplementation<uint8_t, uint32_t> *gemm_implementation_list<uint8_t, uint32_t>() {
+const GemmImplementation<uint8_t, uint8_t, uint32_t> *gemm_implementation_list<uint8_t, uint8_t, uint32_t>() {
return gemm_u8_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<uint8_t, uint32_t> gemm<uint8_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<uint8_t, uint32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<uint8_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<uint8_t, uint32_t, Nothing> (const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<uint8_t, uint8_t, uint32_t> gemm<uint8_t, uint8_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<uint8_t, uint8_t, uint32_t, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<uint8_t, uint8_t, uint32_t, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<uint8_t, uint8_t, uint32_t, Nothing> (const GemmArgs &args, const Nothing &);
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemv_batched.hpp b/src/core/NEON/kernels/arm_gemm/gemv_batched.hpp
index ad504f2664..aa03fb6aa1 100644
--- a/src/core/NEON/kernels/arm_gemm/gemv_batched.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemv_batched.hpp
@@ -31,9 +31,9 @@ namespace arm_gemm {
* efficiently as a GEMM (with M'=nbatches and nbatches'=1). This wrapper
* implements this. */
template<typename To, typename Tr>
-class GemvBatched : public GemmCommon<To, Tr> {
+class GemvBatched : public GemmCommon<To, To, Tr> {
private:
- UniqueGemmCommon<To, Tr> _subgemm = nullptr;
+ UniqueGemmCommon<To, To, Tr> _subgemm = nullptr;
public:
GemvBatched(const GemmArgs &args) {
@@ -42,7 +42,7 @@ public:
newargs._Msize = args._nbatches;
newargs._nbatches = 1;
newargs._cfg = nullptr;
- _subgemm = gemm<To,Tr>(newargs);
+ _subgemm = gemm<To,To,Tr>(newargs);
}
void set_arrays(const To *A, const int, const int A_batch_stride, const int A_multi_stride,
diff --git a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
index dbada36052..1a7c51c7a4 100644
--- a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
@@ -85,7 +85,7 @@ void run_gemv_kernel<Requantize32>::run(
//
// batches are not supported as a batched GEMV makes no sense (can be converted to a GEMM).
template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
-class GemvPretransposed : public GemmCommon<To, Tr> {
+class GemvPretransposed : public GemmCommon<To, To, Tr> {
typedef typename strategy::operand_type Toi;
typedef typename strategy::result_type Tri;
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp
index 807511f0d2..3ad32d8a50 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a32_interleave6_block1_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -75,61 +75,61 @@ void interleave_block<6, 1, VLType::None, false>(
for (;width>7;width-=8) {
__asm __volatile (
// Load up 8 elements (2 vectors) from each of 8 sources.
- "VLD1.32 {d0-d3}, [%[inptr0]]!\n" // q0=A0A1A2A3
- "VLD1.32 {d4-d7}, [%[inptr1]]!\n" // q2=B0B1B2B3
- "VLD1.32 {d8-d11}, [%[inptr2]]!\n" // q4=C0C1C2C3
- "VZIP.32 q0, q4\n" // q0=A0C0A1C1, q4 = A2C2A3C3
- "VLD1.32 {d12-d15}, [%[inptr3]]!\n" // q6=D0D1D2D3
- "VZIP.32 q2, q6\n" // q2=B0D0B1D1, q6 = B2D2B3D3
- "VLD1.32 {d16-d19}, [%[inptr4]]!\n"
- "VLD1.32 {d20-d23}, [%[inptr5]]!\n"
- "VZIP.32 q8, q10\n" // q8=E0F0E1F1, q10 = E2F2E3F3
+ "VLD1.32 {d0-d3}, [%[inptr0]]!\n" // q0=A0A1A2A3
+ "VLD1.32 {d4-d7}, [%[inptr1]]!\n" // q2=B0B1B2B3
+ "VLD1.32 {d8-d11}, [%[inptr2]]!\n" // q4=C0C1C2C3
+ "VZIP.32 q0, q4\n" // q0=A0C0A1C1, q4 = A2C2A3C3
+ "VLD1.32 {d12-d15}, [%[inptr3]]!\n" // q6=D0D1D2D3
+ "VZIP.32 q2, q6\n" // q2=B0D0B1D1, q6 = B2D2B3D3
+ "VLD1.32 {d16-d19}, [%[inptr4]]!\n"
+ "VLD1.32 {d20-d23}, [%[inptr5]]!\n"
+ "VZIP.32 q8, q10\n" // q8=E0F0E1F1, q10 = E2F2E3F3
ASM_PREFETCH("[%[inptr0], #128]")
- "VZIP.32 q0, q2\n" // q0 = A0B0C0D0, q2 = A1B1C1D1
+ "VZIP.32 q0, q2\n" // q0 = A0B0C0D0, q2 = A1B1C1D1
// Store first elements
- "VST1.32 {d0-d1}, [%[outptr]]!\n"
- "VST1.32 {d16}, [%[outptr]]!\n"
+ "VST1.32 {d0-d1}, [%[outptr]]!\n"
+ "VST1.32 {d16}, [%[outptr]]!\n"
- "VZIP.32 q4, q6\n" // q4 = A2B2C2D2, q6 = A3B3C3D3
+ "VZIP.32 q4, q6\n" // q4 = A2B2C2D2, q6 = A3B3C3D3
// Store second elements
- "VST1.32 {d4-d5}, [%[outptr]]!\n"
- "VZIP.32 q1, q5\n"
+ "VST1.32 {d4-d5}, [%[outptr]]!\n"
+ "VZIP.32 q1, q5\n"
ASM_PREFETCH("[%[inptr1], #128]")
- "VST1.32 {d17}, [%[outptr]]!\n"
- "VZIP.32 q3, q7\n"
+ "VST1.32 {d17}, [%[outptr]]!\n"
+ "VZIP.32 q3, q7\n"
// Store third elements
- "VZIP.32 q9, q11\n"
- "VST1.32 {d8-d9}, [%[outptr]]!\n"
- "VZIP.32 q1, q3\n"
+ "VZIP.32 q9, q11\n"
+ "VST1.32 {d8-d9}, [%[outptr]]!\n"
+ "VZIP.32 q1, q3\n"
ASM_PREFETCH("[%[inptr2], #128]")
- "VST1.32 {d20}, [%[outptr]]!\n"
+ "VST1.32 {d20}, [%[outptr]]!\n"
// Store fourth elements
- "VZIP.32 q5, q7\n"
- "VST1.32 {d12-d13}, [%[outptr]]!\n"
+ "VZIP.32 q5, q7\n"
+ "VST1.32 {d12-d13}, [%[outptr]]!\n"
ASM_PREFETCH("[%[inptr3], #128]")
- "VST1.32 {d21}, [%[outptr]]!\n"
+ "VST1.32 {d21}, [%[outptr]]!\n"
// Fifth
- "VST1.32 {d2-d3}, [%[outptr]]!\n"
+ "VST1.32 {d2-d3}, [%[outptr]]!\n"
ASM_PREFETCH("[%[inptr4], #128]")
- "VST1.32 {d18}, [%[outptr]]!\n"
+ "VST1.32 {d18}, [%[outptr]]!\n"
// Sixth
- "VST1.32 {d6-d7}, [%[outptr]]!\n"
+ "VST1.32 {d6-d7}, [%[outptr]]!\n"
ASM_PREFETCH("[%[inptr5], #128]")
- "VST1.32 {d19}, [%[outptr]]!\n"
+ "VST1.32 {d19}, [%[outptr]]!\n"
// Seventh
- "VST1.32 {d10-d11}, [%[outptr]]!\n"
- "VST1.32 {d22}, [%[outptr]]!\n"
+ "VST1.32 {d10-d11}, [%[outptr]]!\n"
+ "VST1.32 {d22}, [%[outptr]]!\n"
// Eighth
- "VST1.32 {d14-d15}, [%[outptr]]!\n"
- "VST1.32 {d23}, [%[outptr]]!\n"
+ "VST1.32 {d14-d15}, [%[outptr]]!\n"
+ "VST1.32 {d23}, [%[outptr]]!\n"
: [inptr0] "+r" (inptr0), [inptr1] "+r" (inptr1), [inptr2] "+r" (inptr2), [inptr3] "+r" (inptr3),
[inptr4] "+r" (inptr4), [inptr5] "+r" (inptr5), [outptr] "+r" (outptr)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp
index e4bfc0f6e4..4d065300ae 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave4_block16_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,9 +34,9 @@ void interleave_block<4, 16, VLType::None, false>(
"ldr x23, [%x[in], #0x0]\n"
"ldr x22, [%x[in], #0x8]\n"
"cmp %x[height], #0x4\n"
- "add x23, x23, %x[row_offset]\n"
"ldr x21, [%x[in], #0x10]\n"
"ldr x20, [%x[in], #0x18]\n"
+ "add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
"add x20, x20, %x[row_offset]\n"
@@ -60,12 +60,12 @@ void interleave_block<4, 16, VLType::None, false>(
"ldr q19, [x23], #0x10\n"
"ldr q18, [x22], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
- "cmp %x[width], #0x10\n"
"ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q19, [%x[out_ptr], #0x0]\n"
+ "cmp %x[width], #0x10\n"
"prfm pldl1keep, [x23, #0x70]\n"
"prfm pldl1keep, [x22, #0x70]\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
"str q18, [%x[out_ptr], #0x10]\n"
"prfm pldl1keep, [x21, #0x70]\n"
"prfm pldl1keep, [x20, #0x70]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp
index e54b3b9f41..1cd6523c76 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 1, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #1\n"
- "add x26, x26, %x[row_offset], LSL #1\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #1\n"
- "add x24, x24, %x[row_offset], LSL #1\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #1\n"
+ "add x27, x27, %x[row_offset], LSL #1\n"
+ "add x26, x26, %x[row_offset], LSL #1\n"
+ "add x25, x25, %x[row_offset], LSL #1\n"
+ "add x24, x24, %x[row_offset], LSL #1\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,52 +79,52 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q25, [x28], #0x10\n"
- "ldr q27, [x27], #0x10\n"
+ "ldr q23, [x28], #0x10\n"
+ "ldr q25, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
"cmp %x[width], #0x8\n"
- "ldr q26, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "ldr q21, [x24], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v27.8h, v20.8h\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x23], #0x10\n"
"ldr q17, [x22], #0x10\n"
"ldr q16, [x21], #0x10\n"
- "zip1 v19.8h, v26.8h, v17.8h\n"
- "zip1 v18.8h, v24.8h, v16.8h\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v26.8h, v17.8h\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v20.8h, v27.8h, v20.8h\n"
- "zip2 v16.8h, v24.8h, v16.8h\n"
+ "zip1 v20.8h, v23.8h, v19.8h\n"
+ "zip1 v24.8h, v25.8h, v18.8h\n"
+ "zip2 v23.8h, v23.8h, v19.8h\n"
+ "zip2 v25.8h, v25.8h, v18.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v24.8h, v23.8h, v19.8h\n"
- "zip1 v17.8h, v22.8h, v18.8h\n"
+ "zip1 v19.8h, v22.8h, v17.8h\n"
+ "zip1 v18.8h, v21.8h, v16.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v23.8h, v23.8h, v19.8h\n"
- "zip2 v19.8h, v22.8h, v18.8h\n"
+ "zip2 v22.8h, v22.8h, v17.8h\n"
+ "zip2 v17.8h, v21.8h, v16.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip1 v16.8h, v24.8h, v18.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip2 v19.8h, v24.8h, v18.8h\n"
+ "zip1 v24.8h, v23.8h, v22.8h\n"
+ "zip1 v18.8h, v25.8h, v17.8h\n"
+ "zip2 v23.8h, v23.8h, v22.8h\n"
+ "zip2 v22.8h, v25.8h, v17.8h\n"
+ "zip1 v17.8h, v21.8h, v16.8h\n"
+ "zip2 v16.8h, v21.8h, v16.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip1 v19.8h, v24.8h, v18.8h\n"
+ "zip2 v18.8h, v24.8h, v18.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
+ "zip1 v17.8h, v23.8h, v22.8h\n"
+ "zip2 v16.8h, v23.8h, v22.8h\n"
+ "str q21, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
"str q19, [%x[out_ptr], #0x40]\n"
"str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp
index 3a5dcf4a6b..81c758f498 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp16_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 1, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #1\n"
- "add x26, x26, %x[row_offset], LSL #1\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #1\n"
- "add x24, x24, %x[row_offset], LSL #1\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #1\n"
+ "add x27, x27, %x[row_offset], LSL #1\n"
+ "add x26, x26, %x[row_offset], LSL #1\n"
+ "add x25, x25, %x[row_offset], LSL #1\n"
+ "add x24, x24, %x[row_offset], LSL #1\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,53 +79,53 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr d27, [x28], #0x8\n"
- "ldr d26, [x27], #0x8\n"
- "fcvtl v27.4s, v27.4h\n"
- "fcvtl v26.4s, v26.4h\n"
- "ldr d22, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "fcvtl v22.4s, v22.4h\n"
- "fcvtl v21.4s, v21.4h\n"
- "ldr d20, [x24], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "fcvtl v20.4s, v20.4h\n"
- "fcvtl v25.4s, v25.4h\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d16, [x21], #0x8\n"
- "fcvtl v19.4s, v19.4h\n"
- "fcvtl v16.4s, v16.4h\n"
- "zip1 v24.4s, v27.4s, v22.4s\n"
- "zip1 v23.4s, v26.4s, v21.4s\n"
+ "ldr d25, [x28], #0x8\n"
+ "ldr d24, [x27], #0x8\n"
"subs %x[width], %x[width], #0x4\n"
+ "ldr d18, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"cmp %x[width], #0x4\n"
- "zip1 v18.4s, v20.4s, v19.4s\n"
- "zip1 v17.4s, v25.4s, v16.4s\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d16, [x21], #0x8\n"
+ "fcvtl v25.4s, v25.4h\n"
+ "fcvtl v24.4s, v24.4h\n"
+ "fcvtl v18.4s, v18.4h\n"
+ "fcvtl v17.4s, v17.4h\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v22.4s, v27.4s, v22.4s\n"
- "zip2 v21.4s, v26.4s, v21.4s\n"
+ "fcvtl v23.4s, v23.4h\n"
+ "fcvtl v22.4s, v22.4h\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v20.4s, v20.4s, v19.4s\n"
- "zip2 v19.4s, v25.4s, v16.4s\n"
+ "fcvtl v21.4s, v21.4h\n"
+ "fcvtl v16.4s, v16.4h\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
+ "zip1 v20.4s, v25.4s, v18.4s\n"
+ "zip1 v19.4s, v24.4s, v17.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip1 v16.4s, v24.4s, v23.4s\n"
+ "zip2 v26.4s, v25.4s, v18.4s\n"
+ "zip2 v25.4s, v24.4s, v17.4s\n"
+ "zip1 v18.4s, v23.4s, v21.4s\n"
+ "zip1 v17.4s, v22.4s, v16.4s\n"
+ "zip2 v24.4s, v23.4s, v21.4s\n"
+ "zip2 v23.4s, v22.4s, v16.4s\n"
+ "zip1 v16.4s, v20.4s, v19.4s\n"
+ "zip2 v22.4s, v20.4s, v19.4s\n"
+ "zip1 v21.4s, v18.4s, v17.4s\n"
+ "zip2 v20.4s, v18.4s, v17.4s\n"
+ "zip1 v19.4s, v26.4s, v25.4s\n"
+ "zip1 v18.4s, v24.4s, v23.4s\n"
"str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v17.4s, v26.4s, v25.4s\n"
"zip2 v16.4s, v24.4s, v23.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v17.4s, v18.4s, v17.4s\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q17, [%x[out_ptr], #0x30]\n"
- "zip1 v18.4s, v20.4s, v19.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "str q19, [%x[out_ptr], #0x40]\n"
"str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
@@ -134,76 +134,76 @@ void interleave_block<8, 1, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr s28, [x28], #0x4\n"
- "ldr s27, [x27], #0x4\n"
+ "ldr s29, [x28], #0x4\n"
+ "ldr s28, [x27], #0x4\n"
"mov x20, #0x2\n"
- "ldr s26, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s24, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s22, [x22], #0x4\n"
- "ldr s21, [x21], #0x4\n"
+ "ldr s27, [x26], #0x4\n"
+ "ldr s26, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s22, [x21], #0x4\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v28.h }[2], [x28]\n"
- "ld1 { v27.h }[2], [x27]\n"
+ "ld1 { v29.h }[2], [x28]\n"
+ "ld1 { v28.h }[2], [x27]\n"
"mov x20, #0x3\n"
- "ld1 { v26.h }[2], [x26]\n"
- "ld1 { v25.h }[2], [x25]\n"
- "ld1 { v24.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v22.h }[2], [x22]\n"
- "ld1 { v21.h }[2], [x21]\n"
+ "ld1 { v27.h }[2], [x26]\n"
+ "ld1 { v26.h }[2], [x25]\n"
+ "ld1 { v25.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v22.h }[2], [x21]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr h28, [x28, #0x0]\n"
- "ldr h27, [x27, #0x0]\n"
+ "ldr h29, [x28, #0x0]\n"
+ "ldr h28, [x27, #0x0]\n"
"mov x20, #0x1\n"
- "ldr h26, [x26, #0x0]\n"
- "ldr h25, [x25, #0x0]\n"
- "ldr h24, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h22, [x22, #0x0]\n"
- "ldr h21, [x21, #0x0]\n"
+ "ldr h27, [x26, #0x0]\n"
+ "ldr h26, [x25, #0x0]\n"
+ "ldr h25, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h22, [x21, #0x0]\n"
"5:" // Odd load end
+ "fcvtl v29.4s, v29.4h\n"
"fcvtl v28.4s, v28.4h\n"
- "fcvtl v27.4s, v27.4h\n"
"subs x20, x20, #0x1\n"
+ "fcvtl v27.4s, v27.4h\n"
"fcvtl v26.4s, v26.4h\n"
"fcvtl v25.4s, v25.4h\n"
"fcvtl v24.4s, v24.4h\n"
"fcvtl v23.4s, v23.4h\n"
"fcvtl v22.4s, v22.4h\n"
- "fcvtl v21.4s, v21.4h\n"
+ "zip1 v21.4s, v29.4s, v27.4s\n"
"zip1 v20.4s, v28.4s, v26.4s\n"
- "zip1 v19.4s, v27.4s, v25.4s\n"
+ "zip1 v19.4s, v25.4s, v23.4s\n"
"zip1 v18.4s, v24.4s, v22.4s\n"
- "zip1 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v17.4s, v21.4s, v20.4s\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
"subs x20, x20, #0x1\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "zip2 v19.4s, v28.4s, v26.4s\n"
- "zip2 v16.4s, v27.4s, v25.4s\n"
- "zip2 v18.4s, v24.4s, v22.4s\n"
- "zip2 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v29.4s, v27.4s\n"
+ "zip2 v17.4s, v28.4s, v26.4s\n"
+ "zip2 v18.4s, v25.4s, v23.4s\n"
+ "zip2 v16.4s, v24.4s, v22.4s\n"
+ "zip1 v17.4s, v19.4s, v17.4s\n"
+ "zip1 v16.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp
index 80c387db47..521b3bf8f5 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 1, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #2\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #2\n"
- "add x26, x26, %x[row_offset], LSL #2\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #2\n"
- "add x24, x24, %x[row_offset], LSL #2\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #2\n"
+ "add x27, x27, %x[row_offset], LSL #2\n"
+ "add x26, x26, %x[row_offset], LSL #2\n"
+ "add x25, x25, %x[row_offset], LSL #2\n"
+ "add x24, x24, %x[row_offset], LSL #2\n"
"add x23, x23, %x[row_offset], LSL #2\n"
"add x22, x22, %x[row_offset], LSL #2\n"
"add x21, x21, %x[row_offset], LSL #2\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,46 +79,46 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q20, [x28], #0x10\n"
- "ldr q18, [x27], #0x10\n"
+ "ldr q25, [x28], #0x10\n"
+ "ldr q24, [x27], #0x10\n"
"subs %x[width], %x[width], #0x4\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
"cmp %x[width], #0x4\n"
- "ldr q17, [x26], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "zip1 v25.4s, v20.4s, v17.4s\n"
- "zip1 v24.4s, v18.4s, v16.4s\n"
- "ldr q19, [x24], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip2 v22.4s, v20.4s, v17.4s\n"
- "zip2 v21.4s, v18.4s, v16.4s\n"
- "ldr q18, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x23], #0x10\n"
+ "ldr q21, [x22], #0x10\n"
"ldr q16, [x21], #0x10\n"
- "zip1 v20.4s, v19.4s, v18.4s\n"
- "zip1 v17.4s, v23.4s, v16.4s\n"
- "zip2 v19.4s, v19.4s, v18.4s\n"
- "zip2 v18.4s, v23.4s, v16.4s\n"
+ "zip1 v20.4s, v25.4s, v18.4s\n"
+ "zip1 v19.4s, v24.4s, v17.4s\n"
+ "zip2 v26.4s, v25.4s, v18.4s\n"
+ "zip2 v25.4s, v24.4s, v17.4s\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.4s, v25.4s, v24.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v23.4s, v21.4s\n"
+ "zip1 v17.4s, v22.4s, v16.4s\n"
+ "zip2 v24.4s, v23.4s, v21.4s\n"
+ "zip2 v23.4s, v22.4s, v16.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v16.4s, v25.4s, v24.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip1 v16.4s, v20.4s, v19.4s\n"
+ "zip2 v22.4s, v20.4s, v19.4s\n"
+ "zip1 v21.4s, v18.4s, v17.4s\n"
+ "zip2 v20.4s, v18.4s, v17.4s\n"
+ "zip1 v19.4s, v26.4s, v25.4s\n"
+ "zip1 v18.4s, v24.4s, v23.4s\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "zip2 v17.4s, v26.4s, v25.4s\n"
+ "zip2 v16.4s, v24.4s, v23.4s\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "str q19, [%x[out_ptr], #0x40]\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
@@ -126,68 +126,68 @@ void interleave_block<8, 1, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
+ "ldr d29, [x28], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
"mov x20, #0x2\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d21, [x21], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v28.s }[2], [x28]\n"
- "ld1 { v27.s }[2], [x27]\n"
+ "ld1 { v29.s }[2], [x28]\n"
+ "ld1 { v28.s }[2], [x27]\n"
"mov x20, #0x3\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v25.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v21.s }[2], [x21]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v26.s }[2], [x25]\n"
+ "ld1 { v25.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v22.s }[2], [x21]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr s28, [x28, #0x0]\n"
- "ldr s27, [x27, #0x0]\n"
+ "ldr s29, [x28, #0x0]\n"
+ "ldr s28, [x27, #0x0]\n"
"mov x20, #0x1\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s25, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s21, [x21, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s26, [x25, #0x0]\n"
+ "ldr s25, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s22, [x21, #0x0]\n"
"5:" // Odd load end
+ "zip1 v21.4s, v29.4s, v27.4s\n"
"zip1 v20.4s, v28.4s, v26.4s\n"
- "zip1 v19.4s, v27.4s, v25.4s\n"
"subs x20, x20, #0x1\n"
+ "zip1 v19.4s, v25.4s, v23.4s\n"
"zip1 v18.4s, v24.4s, v22.4s\n"
- "zip1 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v17.4s, v21.4s, v20.4s\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
"subs x20, x20, #0x1\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 6f\n"
- "zip2 v19.4s, v28.4s, v26.4s\n"
- "zip2 v16.4s, v27.4s, v25.4s\n"
- "zip2 v18.4s, v24.4s, v22.4s\n"
- "zip2 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v19.4s, v29.4s, v27.4s\n"
+ "zip2 v17.4s, v28.4s, v26.4s\n"
+ "zip2 v18.4s, v25.4s, v23.4s\n"
+ "zip2 v16.4s, v24.4s, v22.4s\n"
+ "zip1 v17.4s, v19.4s, v17.4s\n"
+ "zip1 v16.4s, v18.4s, v16.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp
index 8e06b7ecab..8f67a21d05 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s16_s16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 1, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #1\n"
- "add x26, x26, %x[row_offset], LSL #1\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #1\n"
- "add x24, x24, %x[row_offset], LSL #1\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #1\n"
+ "add x27, x27, %x[row_offset], LSL #1\n"
+ "add x26, x26, %x[row_offset], LSL #1\n"
+ "add x25, x25, %x[row_offset], LSL #1\n"
+ "add x24, x24, %x[row_offset], LSL #1\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,52 +79,52 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q25, [x28], #0x10\n"
- "ldr q27, [x27], #0x10\n"
+ "ldr q23, [x28], #0x10\n"
+ "ldr q25, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
"cmp %x[width], #0x8\n"
- "ldr q26, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "ldr q21, [x24], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v27.8h, v20.8h\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q18, [x23], #0x10\n"
"ldr q17, [x22], #0x10\n"
"ldr q16, [x21], #0x10\n"
- "zip1 v19.8h, v26.8h, v17.8h\n"
- "zip1 v18.8h, v24.8h, v16.8h\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v26.8h, v17.8h\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v20.8h, v27.8h, v20.8h\n"
- "zip2 v16.8h, v24.8h, v16.8h\n"
+ "zip1 v20.8h, v23.8h, v19.8h\n"
+ "zip1 v24.8h, v25.8h, v18.8h\n"
+ "zip2 v23.8h, v23.8h, v19.8h\n"
+ "zip2 v25.8h, v25.8h, v18.8h\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v24.8h, v23.8h, v19.8h\n"
- "zip1 v17.8h, v22.8h, v18.8h\n"
+ "zip1 v19.8h, v22.8h, v17.8h\n"
+ "zip1 v18.8h, v21.8h, v16.8h\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip2 v23.8h, v23.8h, v19.8h\n"
- "zip2 v19.8h, v22.8h, v18.8h\n"
+ "zip2 v22.8h, v22.8h, v17.8h\n"
+ "zip2 v17.8h, v21.8h, v16.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip1 v16.8h, v24.8h, v18.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip2 v19.8h, v24.8h, v18.8h\n"
+ "zip1 v24.8h, v23.8h, v22.8h\n"
+ "zip1 v18.8h, v25.8h, v17.8h\n"
+ "zip2 v23.8h, v23.8h, v22.8h\n"
+ "zip2 v22.8h, v25.8h, v17.8h\n"
+ "zip1 v17.8h, v21.8h, v16.8h\n"
+ "zip2 v16.8h, v21.8h, v16.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip1 v19.8h, v24.8h, v18.8h\n"
+ "zip2 v18.8h, v24.8h, v18.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
+ "zip1 v17.8h, v23.8h, v22.8h\n"
+ "zip2 v16.8h, v23.8h, v22.8h\n"
+ "str q21, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
"str q19, [%x[out_ptr], #0x40]\n"
"str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp
index c41120c698..0a76fa812e 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_s8_s16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 1, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset]\n"
- "add x26, x26, %x[row_offset]\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset]\n"
- "add x24, x24, %x[row_offset]\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset]\n"
+ "add x27, x27, %x[row_offset]\n"
+ "add x26, x26, %x[row_offset]\n"
+ "add x25, x25, %x[row_offset]\n"
+ "add x24, x24, %x[row_offset]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,60 +79,60 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr d25, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "sshll v25.8h, v25.8b, #0x0\n"
- "sshll v27.8h, v27.8b, #0x0\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d24, [x25], #0x8\n"
- "sshll v26.8h, v26.8b, #0x0\n"
- "sshll v24.8h, v24.8b, #0x0\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "sshll v21.8h, v21.8b, #0x0\n"
- "sshll v20.8h, v20.8b, #0x0\n"
- "ldr d17, [x22], #0x8\n"
- "ldr d16, [x21], #0x8\n"
- "sshll v17.8h, v17.8b, #0x0\n"
- "sshll v16.8h, v16.8b, #0x0\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v26.8h, v17.8h\n"
+ "ldr d23, [x28], #0x8\n"
+ "ldr d25, [x27], #0x8\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr d22, [x26], #0x8\n"
+ "ldr d21, [x25], #0x8\n"
"cmp %x[width], #0x8\n"
- "zip1 v19.8h, v27.8h, v20.8h\n"
- "zip1 v18.8h, v24.8h, v16.8h\n"
+ "ldr d19, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d16, [x21], #0x8\n"
+ "sshll v23.8h, v23.8b, #0x0\n"
+ "sshll v25.8h, v25.8b, #0x0\n"
+ "sshll v22.8h, v22.8b, #0x0\n"
+ "sshll v21.8h, v21.8b, #0x0\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v26.8h, v17.8h\n"
+ "sshll v19.8h, v19.8b, #0x0\n"
+ "sshll v18.8h, v18.8b, #0x0\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v20.8h, v27.8h, v20.8h\n"
- "zip2 v16.8h, v24.8h, v16.8h\n"
+ "sshll v17.8h, v17.8b, #0x0\n"
+ "sshll v16.8h, v16.8b, #0x0\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v24.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v19.8h, v18.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "zip1 v20.8h, v23.8h, v19.8h\n"
+ "zip1 v24.8h, v25.8h, v18.8h\n"
+ "zip2 v23.8h, v23.8h, v19.8h\n"
+ "zip2 v25.8h, v25.8h, v18.8h\n"
+ "zip1 v19.8h, v22.8h, v17.8h\n"
+ "zip1 v18.8h, v21.8h, v16.8h\n"
+ "zip2 v22.8h, v22.8h, v17.8h\n"
+ "zip2 v17.8h, v21.8h, v16.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip1 v16.8h, v24.8h, v18.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip2 v19.8h, v24.8h, v18.8h\n"
+ "zip1 v24.8h, v23.8h, v22.8h\n"
+ "zip1 v18.8h, v25.8h, v17.8h\n"
"zip2 v23.8h, v23.8h, v22.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v22.8h, v25.8h, v17.8h\n"
+ "zip1 v17.8h, v21.8h, v16.8h\n"
+ "zip2 v16.8h, v21.8h, v16.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip1 v19.8h, v24.8h, v18.8h\n"
+ "zip2 v18.8h, v24.8h, v18.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
+ "zip1 v17.8h, v23.8h, v22.8h\n"
+ "zip2 v16.8h, v23.8h, v22.8h\n"
+ "str q21, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
"str q19, [%x[out_ptr], #0x40]\n"
"str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
@@ -227,11 +227,11 @@ void interleave_block<8, 1, VLType::None, false>(
"sshll v24.8h, v24.8b, #0x0\n"
"sshll v23.8h, v23.8b, #0x0\n"
"zip1 v22.8h, v30.8h, v26.8h\n"
- "zip1 v21.8h, v28.8h, v24.8h\n"
- "zip1 v20.8h, v29.8h, v25.8h\n"
+ "zip1 v21.8h, v29.8h, v25.8h\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
"zip1 v19.8h, v27.8h, v23.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
+ "zip1 v18.8h, v22.8h, v20.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
@@ -241,8 +241,8 @@ void interleave_block<8, 1, VLType::None, false>(
"str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
+ "zip2 v18.8h, v22.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
"subs x20, x20, #0x1\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp
index d29a995b46..be6e8980f6 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block1_u8_u16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 1, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset]\n"
- "add x26, x26, %x[row_offset]\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset]\n"
- "add x24, x24, %x[row_offset]\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset]\n"
+ "add x27, x27, %x[row_offset]\n"
+ "add x26, x26, %x[row_offset]\n"
+ "add x25, x25, %x[row_offset]\n"
+ "add x24, x24, %x[row_offset]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,60 +79,60 @@ void interleave_block<8, 1, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr d25, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "ushll v25.8h, v25.8b, #0x0\n"
- "ushll v27.8h, v27.8b, #0x0\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d24, [x25], #0x8\n"
- "ushll v26.8h, v26.8b, #0x0\n"
- "ushll v24.8h, v24.8b, #0x0\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ushll v21.8h, v21.8b, #0x0\n"
- "ushll v20.8h, v20.8b, #0x0\n"
- "ldr d17, [x22], #0x8\n"
- "ldr d16, [x21], #0x8\n"
- "ushll v17.8h, v17.8b, #0x0\n"
- "ushll v16.8h, v16.8b, #0x0\n"
- "zip1 v23.8h, v25.8h, v21.8h\n"
- "zip1 v22.8h, v26.8h, v17.8h\n"
+ "ldr d23, [x28], #0x8\n"
+ "ldr d25, [x27], #0x8\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr d22, [x26], #0x8\n"
+ "ldr d21, [x25], #0x8\n"
"cmp %x[width], #0x8\n"
- "zip1 v19.8h, v27.8h, v20.8h\n"
- "zip1 v18.8h, v24.8h, v16.8h\n"
+ "ldr d19, [x24], #0x8\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d16, [x21], #0x8\n"
+ "ushll v23.8h, v23.8b, #0x0\n"
+ "ushll v25.8h, v25.8b, #0x0\n"
+ "ushll v22.8h, v22.8b, #0x0\n"
+ "ushll v21.8h, v21.8b, #0x0\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "zip2 v25.8h, v25.8h, v21.8h\n"
- "zip2 v21.8h, v26.8h, v17.8h\n"
+ "ushll v19.8h, v19.8b, #0x0\n"
+ "ushll v18.8h, v18.8b, #0x0\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip2 v20.8h, v27.8h, v20.8h\n"
- "zip2 v16.8h, v24.8h, v16.8h\n"
+ "ushll v17.8h, v17.8b, #0x0\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v24.8h, v23.8h, v22.8h\n"
- "zip1 v17.8h, v19.8h, v18.8h\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "zip1 v20.8h, v23.8h, v19.8h\n"
+ "zip1 v24.8h, v25.8h, v18.8h\n"
+ "zip2 v23.8h, v23.8h, v19.8h\n"
+ "zip2 v25.8h, v25.8h, v18.8h\n"
+ "zip1 v19.8h, v22.8h, v17.8h\n"
+ "zip1 v18.8h, v21.8h, v16.8h\n"
+ "zip2 v22.8h, v22.8h, v17.8h\n"
+ "zip2 v17.8h, v21.8h, v16.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip1 v16.8h, v24.8h, v18.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip2 v19.8h, v24.8h, v18.8h\n"
+ "zip1 v24.8h, v23.8h, v22.8h\n"
+ "zip1 v18.8h, v25.8h, v17.8h\n"
"zip2 v23.8h, v23.8h, v22.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "zip1 v22.8h, v25.8h, v21.8h\n"
- "zip1 v18.8h, v20.8h, v16.8h\n"
- "zip2 v21.8h, v25.8h, v21.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "zip1 v16.8h, v24.8h, v17.8h\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.8h, v24.8h, v17.8h\n"
+ "zip2 v22.8h, v25.8h, v17.8h\n"
+ "zip1 v17.8h, v21.8h, v16.8h\n"
+ "zip2 v16.8h, v21.8h, v16.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip1 v19.8h, v24.8h, v18.8h\n"
+ "zip2 v18.8h, v24.8h, v18.8h\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.8h, v23.8h, v19.8h\n"
- "zip2 v16.8h, v23.8h, v19.8h\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
+ "zip1 v17.8h, v23.8h, v22.8h\n"
+ "zip2 v16.8h, v23.8h, v22.8h\n"
+ "str q21, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
"str q19, [%x[out_ptr], #0x40]\n"
"str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
@@ -227,11 +227,11 @@ void interleave_block<8, 1, VLType::None, false>(
"ushll v24.8h, v24.8b, #0x0\n"
"ushll v23.8h, v23.8b, #0x0\n"
"zip1 v22.8h, v30.8h, v26.8h\n"
- "zip1 v21.8h, v28.8h, v24.8h\n"
- "zip1 v20.8h, v29.8h, v25.8h\n"
+ "zip1 v21.8h, v29.8h, v25.8h\n"
+ "zip1 v20.8h, v28.8h, v24.8h\n"
"zip1 v19.8h, v27.8h, v23.8h\n"
- "zip1 v18.8h, v22.8h, v21.8h\n"
- "zip1 v17.8h, v20.8h, v19.8h\n"
+ "zip1 v18.8h, v22.8h, v20.8h\n"
+ "zip1 v17.8h, v21.8h, v19.8h\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
@@ -241,8 +241,8 @@ void interleave_block<8, 1, VLType::None, false>(
"str q16, [%x[out_ptr], #0x0]\n"
"add %x[out_ptr], %x[out_ptr], #0x10\n"
"beq 8f\n"
- "zip2 v18.8h, v22.8h, v21.8h\n"
- "zip2 v17.8h, v20.8h, v19.8h\n"
+ "zip2 v18.8h, v22.8h, v20.8h\n"
+ "zip2 v17.8h, v21.8h, v19.8h\n"
"subs x20, x20, #0x1\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
"str q16, [%x[out_ptr], #0x0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp
index 43d9d20c10..f034b2b45c 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 2, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #1\n"
- "add x26, x26, %x[row_offset], LSL #1\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #1\n"
- "add x24, x24, %x[row_offset], LSL #1\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #1\n"
+ "add x27, x27, %x[row_offset], LSL #1\n"
+ "add x26, x26, %x[row_offset], LSL #1\n"
+ "add x25, x25, %x[row_offset], LSL #1\n"
+ "add x24, x24, %x[row_offset], LSL #1\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,46 +79,46 @@ void interleave_block<8, 2, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q20, [x28], #0x10\n"
- "ldr q18, [x27], #0x10\n"
+ "ldr q25, [x28], #0x10\n"
+ "ldr q24, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
"cmp %x[width], #0x8\n"
- "ldr q17, [x26], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "zip1 v25.4s, v20.4s, v17.4s\n"
- "zip1 v24.4s, v18.4s, v16.4s\n"
- "ldr q19, [x24], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip2 v22.4s, v20.4s, v17.4s\n"
- "zip2 v21.4s, v18.4s, v16.4s\n"
- "ldr q18, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x23], #0x10\n"
+ "ldr q21, [x22], #0x10\n"
"ldr q16, [x21], #0x10\n"
- "zip1 v20.4s, v19.4s, v18.4s\n"
- "zip1 v17.4s, v23.4s, v16.4s\n"
- "zip2 v19.4s, v19.4s, v18.4s\n"
- "zip2 v18.4s, v23.4s, v16.4s\n"
+ "zip1 v20.4s, v25.4s, v18.4s\n"
+ "zip1 v19.4s, v24.4s, v17.4s\n"
+ "zip2 v26.4s, v25.4s, v18.4s\n"
+ "zip2 v25.4s, v24.4s, v17.4s\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.4s, v25.4s, v24.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v23.4s, v21.4s\n"
+ "zip1 v17.4s, v22.4s, v16.4s\n"
+ "zip2 v24.4s, v23.4s, v21.4s\n"
+ "zip2 v23.4s, v22.4s, v16.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v16.4s, v25.4s, v24.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip1 v16.4s, v20.4s, v19.4s\n"
+ "zip2 v22.4s, v20.4s, v19.4s\n"
+ "zip1 v21.4s, v18.4s, v17.4s\n"
+ "zip2 v20.4s, v18.4s, v17.4s\n"
+ "zip1 v19.4s, v26.4s, v25.4s\n"
+ "zip1 v18.4s, v24.4s, v23.4s\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "zip2 v17.4s, v26.4s, v25.4s\n"
+ "zip2 v16.4s, v24.4s, v23.4s\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "str q19, [%x[out_ptr], #0x40]\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
@@ -126,119 +126,119 @@ void interleave_block<8, 2, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d21, [x21], #0x8\n"
+ "ldr d29, [x28], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v28.s }[2], [x28], #0x4\n"
- "ld1 { v27.s }[2], [x27], #0x4\n"
+ "ld1 { v29.s }[2], [x28], #0x4\n"
+ "ld1 { v28.s }[2], [x27], #0x4\n"
"mov x20, #0x3\n"
- "ld1 { v26.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v24.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v22.s }[2], [x22], #0x4\n"
- "ld1 { v21.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x26], #0x4\n"
+ "ld1 { v26.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v22.s }[2], [x21], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v28.h }[6], [x28]\n"
- "ld1 { v27.h }[6], [x27]\n"
+ "ld1 { v29.h }[6], [x28]\n"
+ "ld1 { v28.h }[6], [x27]\n"
"mov x20, #0x4\n"
- "ld1 { v26.h }[6], [x26]\n"
- "ld1 { v25.h }[6], [x25]\n"
- "ld1 { v24.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v22.h }[6], [x22]\n"
- "ld1 { v21.h }[6], [x21]\n"
+ "ld1 { v27.h }[6], [x26]\n"
+ "ld1 { v26.h }[6], [x25]\n"
+ "ld1 { v25.h }[6], [x24]\n"
+ "ld1 { v24.h }[6], [x23]\n"
+ "ld1 { v23.h }[6], [x22]\n"
+ "ld1 { v22.h }[6], [x21]\n"
"b 7f\n"
"4:" // odd_loads_1_4
"mov x20, #0x2\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v28.h }[4], [x28]\n"
- "ld1 { v27.h }[4], [x27]\n"
+ "ld1 { v29.h }[4], [x28]\n"
+ "ld1 { v28.h }[4], [x27]\n"
"mov x20, #0x3\n"
- "ld1 { v26.h }[4], [x26]\n"
- "ld1 { v25.h }[4], [x25]\n"
- "ld1 { v24.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v22.h }[4], [x22]\n"
- "ld1 { v21.h }[4], [x21]\n"
+ "ld1 { v27.h }[4], [x26]\n"
+ "ld1 { v26.h }[4], [x25]\n"
+ "ld1 { v25.h }[4], [x24]\n"
+ "ld1 { v24.h }[4], [x23]\n"
+ "ld1 { v23.h }[4], [x22]\n"
+ "ld1 { v22.h }[4], [x21]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr s28, [x28], #0x4\n"
- "ldr s27, [x27], #0x4\n"
+ "ldr s29, [x28], #0x4\n"
+ "ldr s28, [x27], #0x4\n"
"mov x20, #0x1\n"
- "ldr s26, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s24, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s22, [x22], #0x4\n"
- "ldr s21, [x21], #0x4\n"
+ "ldr s27, [x26], #0x4\n"
+ "ldr s26, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s22, [x21], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v28.h }[2], [x28]\n"
- "ld1 { v27.h }[2], [x27]\n"
+ "ld1 { v29.h }[2], [x28]\n"
+ "ld1 { v28.h }[2], [x27]\n"
"mov x20, #0x2\n"
- "ld1 { v26.h }[2], [x26]\n"
- "ld1 { v25.h }[2], [x25]\n"
- "ld1 { v24.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v22.h }[2], [x22]\n"
- "ld1 { v21.h }[2], [x21]\n"
+ "ld1 { v27.h }[2], [x26]\n"
+ "ld1 { v26.h }[2], [x25]\n"
+ "ld1 { v25.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
+ "ld1 { v23.h }[2], [x22]\n"
+ "ld1 { v22.h }[2], [x21]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr h28, [x28, #0x0]\n"
- "ldr h27, [x27, #0x0]\n"
+ "ldr h29, [x28, #0x0]\n"
+ "ldr h28, [x27, #0x0]\n"
"mov x20, #0x1\n"
- "ldr h26, [x26, #0x0]\n"
- "ldr h25, [x25, #0x0]\n"
- "ldr h24, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h22, [x22, #0x0]\n"
- "ldr h21, [x21, #0x0]\n"
+ "ldr h27, [x26, #0x0]\n"
+ "ldr h26, [x25, #0x0]\n"
+ "ldr h25, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
+ "ldr h23, [x22, #0x0]\n"
+ "ldr h22, [x21, #0x0]\n"
"7:" // Odd load end
+ "zip1 v21.4s, v29.4s, v27.4s\n"
"zip1 v20.4s, v28.4s, v26.4s\n"
- "zip1 v19.4s, v27.4s, v25.4s\n"
"subs x20, x20, #0x1\n"
+ "zip1 v19.4s, v25.4s, v23.4s\n"
"zip1 v18.4s, v24.4s, v22.4s\n"
- "zip1 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v17.4s, v21.4s, v20.4s\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 8f\n"
"subs x20, x20, #0x1\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 8f\n"
+ "zip2 v21.4s, v29.4s, v27.4s\n"
"zip2 v20.4s, v28.4s, v26.4s\n"
- "zip2 v19.4s, v27.4s, v25.4s\n"
"subs x20, x20, #0x1\n"
+ "zip2 v19.4s, v25.4s, v23.4s\n"
"zip2 v18.4s, v24.4s, v22.4s\n"
- "zip2 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v17.4s, v21.4s, v20.4s\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 8f\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp
index 3ec03370a0..a57810ce20 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block2_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 2, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #2\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #2\n"
- "add x26, x26, %x[row_offset], LSL #2\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #2\n"
- "add x24, x24, %x[row_offset], LSL #2\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #2\n"
+ "add x27, x27, %x[row_offset], LSL #2\n"
+ "add x26, x26, %x[row_offset], LSL #2\n"
+ "add x25, x25, %x[row_offset], LSL #2\n"
+ "add x24, x24, %x[row_offset], LSL #2\n"
"add x23, x23, %x[row_offset], LSL #2\n"
"add x22, x22, %x[row_offset], LSL #2\n"
"add x21, x21, %x[row_offset], LSL #2\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,100 +79,100 @@ void interleave_block<8, 2, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q20, [x28], #0x10\n"
- "ldr q19, [x27], #0x10\n"
+ "ldr q21, [x28], #0x10\n"
+ "ldr q16, [x27], #0x10\n"
"subs %x[width], %x[width], #0x4\n"
+ "ldr q20, [x26], #0x10\n"
+ "ldr q19, [x25], #0x10\n"
"cmp %x[width], #0x4\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip2 v21.2d, v20.2d, v19.2d\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q19, [x21], #0x10\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
+ "ldr q24, [x24], #0x10\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip1 v17.2d, v21.2d, v16.2d\n"
+ "zip2 v21.2d, v21.2d, v16.2d\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip1 v16.2d, v20.2d, v19.2d\n"
+ "zip2 v20.2d, v20.2d, v19.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v18.2d\n"
+ "zip2 v18.2d, v24.2d, v18.2d\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip1 v17.2d, v23.2d, v22.2d\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.2d, v23.2d, v22.2d\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
"str q21, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "str q20, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 6f\n"
"tbz %x[width], #1, 4f\n"
- "ldr d25, [x28], #0x8\n"
- "ldr d24, [x27], #0x8\n"
+ "ldr d27, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
"mov x20, #0x1\n"
- "ldr d23, [x26], #0x8\n"
- "ldr d22, [x25], #0x8\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
"tbz %x[width], #0, 5f\n"
- "ld1 { v25.s }[2], [x28]\n"
- "ld1 { v24.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
"mov x20, #0x2\n"
- "ld1 { v23.s }[2], [x26]\n"
- "ld1 { v22.s }[2], [x25]\n"
- "ld1 { v21.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v19.s }[2], [x22]\n"
- "ld1 { v18.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v20.s }[2], [x21]\n"
"b 5f\n"
"4:" // odd_loads_1_0
- "ldr s25, [x28, #0x0]\n"
- "ldr s24, [x27, #0x0]\n"
+ "ldr s27, [x28, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
"mov x20, #0x1\n"
- "ldr s23, [x26, #0x0]\n"
- "ldr s22, [x25, #0x0]\n"
- "ldr s21, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s19, [x22, #0x0]\n"
- "ldr s18, [x21, #0x0]\n"
+ "ldr s25, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s20, [x21, #0x0]\n"
"5:" // Odd load end
"subs x20, x20, #0x1\n"
- "zip1 v16.2d, v25.2d, v24.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v23.2d, v22.2d\n"
- "str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.2d, v21.2d, v20.2d\n"
- "zip1 v16.2d, v19.2d, v18.2d\n"
+ "zip1 v19.2d, v27.2d, v26.2d\n"
+ "zip1 v18.2d, v25.2d, v24.2d\n"
+ "zip1 v17.2d, v23.2d, v22.2d\n"
+ "zip1 v16.2d, v21.2d, v20.2d\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 6f\n"
- "zip2 v16.2d, v25.2d, v24.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.2d, v23.2d, v22.2d\n"
- "str q16, [%x[out_ptr], #0x10]\n"
- "zip2 v17.2d, v21.2d, v20.2d\n"
- "zip2 v16.2d, v19.2d, v18.2d\n"
+ "zip2 v19.2d, v27.2d, v26.2d\n"
+ "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip2 v16.2d, v21.2d, v20.2d\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"6:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp
index e9799f87a9..edc1375b02 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 4, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #1\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #1\n"
- "add x26, x26, %x[row_offset], LSL #1\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #1\n"
- "add x24, x24, %x[row_offset], LSL #1\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #1\n"
+ "add x27, x27, %x[row_offset], LSL #1\n"
+ "add x26, x26, %x[row_offset], LSL #1\n"
+ "add x25, x25, %x[row_offset], LSL #1\n"
+ "add x24, x24, %x[row_offset], LSL #1\n"
"add x23, x23, %x[row_offset], LSL #1\n"
"add x22, x22, %x[row_offset], LSL #1\n"
"add x21, x21, %x[row_offset], LSL #1\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,142 +79,142 @@ void interleave_block<8, 4, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q20, [x28], #0x10\n"
- "ldr q19, [x27], #0x10\n"
+ "ldr q21, [x28], #0x10\n"
+ "ldr q16, [x27], #0x10\n"
"subs %x[width], %x[width], #0x8\n"
+ "ldr q20, [x26], #0x10\n"
+ "ldr q19, [x25], #0x10\n"
"cmp %x[width], #0x8\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip2 v21.2d, v20.2d, v19.2d\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q19, [x21], #0x10\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
+ "ldr q24, [x24], #0x10\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip1 v17.2d, v21.2d, v16.2d\n"
+ "zip2 v21.2d, v21.2d, v16.2d\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip1 v16.2d, v20.2d, v19.2d\n"
+ "zip2 v20.2d, v20.2d, v19.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v18.2d\n"
+ "zip2 v18.2d, v24.2d, v18.2d\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip1 v17.2d, v23.2d, v22.2d\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.2d, v23.2d, v22.2d\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
"str q21, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "str q20, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 8f\n"
"tbz %x[width], #2, 5f\n"
- "ldr d25, [x28], #0x8\n"
- "ldr d24, [x27], #0x8\n"
- "ldr d23, [x26], #0x8\n"
- "ldr d22, [x25], #0x8\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d27, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v25.s }[2], [x28], #0x4\n"
- "ld1 { v24.s }[2], [x27], #0x4\n"
+ "ld1 { v27.s }[2], [x28], #0x4\n"
+ "ld1 { v26.s }[2], [x27], #0x4\n"
"mov x20, #0x2\n"
- "ld1 { v23.s }[2], [x26], #0x4\n"
- "ld1 { v22.s }[2], [x25], #0x4\n"
- "ld1 { v21.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v19.s }[2], [x22], #0x4\n"
- "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v25.s }[2], [x26], #0x4\n"
+ "ld1 { v24.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v20.s }[2], [x21], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[6], [x28]\n"
- "ld1 { v24.h }[6], [x27]\n"
- "ld1 { v23.h }[6], [x26]\n"
- "ld1 { v22.h }[6], [x25]\n"
- "ld1 { v21.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v19.h }[6], [x22]\n"
- "ld1 { v18.h }[6], [x21]\n"
+ "ld1 { v27.h }[6], [x28]\n"
+ "ld1 { v26.h }[6], [x27]\n"
+ "ld1 { v25.h }[6], [x26]\n"
+ "ld1 { v24.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
+ "ld1 { v22.h }[6], [x23]\n"
+ "ld1 { v21.h }[6], [x22]\n"
+ "ld1 { v20.h }[6], [x21]\n"
"b 7f\n"
"4:" // odd_loads_1_4
"mov x20, #0x1\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[4], [x28]\n"
- "ld1 { v24.h }[4], [x27]\n"
+ "ld1 { v27.h }[4], [x28]\n"
+ "ld1 { v26.h }[4], [x27]\n"
"mov x20, #0x2\n"
- "ld1 { v23.h }[4], [x26]\n"
- "ld1 { v22.h }[4], [x25]\n"
- "ld1 { v21.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v19.h }[4], [x22]\n"
- "ld1 { v18.h }[4], [x21]\n"
+ "ld1 { v25.h }[4], [x26]\n"
+ "ld1 { v24.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
+ "ld1 { v22.h }[4], [x23]\n"
+ "ld1 { v21.h }[4], [x22]\n"
+ "ld1 { v20.h }[4], [x21]\n"
"b 7f\n"
"5:" // odd_loads_2_0
"tbz %x[width], #1, 6f\n"
- "ldr s25, [x28], #0x4\n"
- "ldr s24, [x27], #0x4\n"
+ "ldr s27, [x28], #0x4\n"
+ "ldr s26, [x27], #0x4\n"
"mov x20, #0x1\n"
- "ldr s23, [x26], #0x4\n"
- "ldr s22, [x25], #0x4\n"
- "ldr s21, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s19, [x22], #0x4\n"
- "ldr s18, [x21], #0x4\n"
+ "ldr s25, [x26], #0x4\n"
+ "ldr s24, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s20, [x21], #0x4\n"
"tbz %x[width], #0, 7f\n"
- "ld1 { v25.h }[2], [x28]\n"
- "ld1 { v24.h }[2], [x27]\n"
- "ld1 { v23.h }[2], [x26]\n"
- "ld1 { v22.h }[2], [x25]\n"
- "ld1 { v21.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v19.h }[2], [x22]\n"
- "ld1 { v18.h }[2], [x21]\n"
+ "ld1 { v27.h }[2], [x28]\n"
+ "ld1 { v26.h }[2], [x27]\n"
+ "ld1 { v25.h }[2], [x26]\n"
+ "ld1 { v24.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
+ "ld1 { v22.h }[2], [x23]\n"
+ "ld1 { v21.h }[2], [x22]\n"
+ "ld1 { v20.h }[2], [x21]\n"
"b 7f\n"
"6:" // odd_loads_1_0
- "ldr h25, [x28, #0x0]\n"
- "ldr h24, [x27, #0x0]\n"
+ "ldr h27, [x28, #0x0]\n"
+ "ldr h26, [x27, #0x0]\n"
"mov x20, #0x1\n"
- "ldr h23, [x26, #0x0]\n"
- "ldr h22, [x25, #0x0]\n"
- "ldr h21, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h19, [x22, #0x0]\n"
- "ldr h18, [x21, #0x0]\n"
+ "ldr h25, [x26, #0x0]\n"
+ "ldr h24, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
+ "ldr h22, [x23, #0x0]\n"
+ "ldr h21, [x22, #0x0]\n"
+ "ldr h20, [x21, #0x0]\n"
"7:" // Odd load end
"subs x20, x20, #0x1\n"
- "zip1 v16.2d, v25.2d, v24.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v23.2d, v22.2d\n"
- "str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.2d, v21.2d, v20.2d\n"
- "zip1 v16.2d, v19.2d, v18.2d\n"
+ "zip1 v19.2d, v27.2d, v26.2d\n"
+ "zip1 v18.2d, v25.2d, v24.2d\n"
+ "zip1 v17.2d, v23.2d, v22.2d\n"
+ "zip1 v16.2d, v21.2d, v20.2d\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 8f\n"
- "zip2 v16.2d, v25.2d, v24.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.2d, v23.2d, v22.2d\n"
- "str q16, [%x[out_ptr], #0x10]\n"
- "zip2 v17.2d, v21.2d, v20.2d\n"
- "zip2 v16.2d, v19.2d, v18.2d\n"
+ "zip2 v19.2d, v27.2d, v26.2d\n"
+ "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip2 v16.2d, v21.2d, v20.2d\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"8:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp
index 730bfd6342..ef1493b605 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 4, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset], LSL #2\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset], LSL #2\n"
- "add x26, x26, %x[row_offset], LSL #2\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset], LSL #2\n"
- "add x24, x24, %x[row_offset], LSL #2\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset], LSL #2\n"
+ "add x27, x27, %x[row_offset], LSL #2\n"
+ "add x26, x26, %x[row_offset], LSL #2\n"
+ "add x25, x25, %x[row_offset], LSL #2\n"
+ "add x24, x24, %x[row_offset], LSL #2\n"
"add x23, x23, %x[row_offset], LSL #2\n"
"add x22, x22, %x[row_offset], LSL #2\n"
"add x21, x21, %x[row_offset], LSL #2\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,36 +79,36 @@ void interleave_block<8, 4, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q17, [x28], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- ".inst 0x0ea16a37 // bfcvtn v23.4h, v17.4s\n"
- ".inst 0x0ea16a16 // bfcvtn v22.4h, v16.4s\n"
+ "ldr q19, [x28], #0x10\n"
+ "ldr q18, [x26], #0x10\n"
+ "subs %x[width], %x[width], #0x4\n"
"ldr q17, [x24], #0x10\n"
"ldr q16, [x22], #0x10\n"
- ".inst 0x0ea16a35 // bfcvtn v21.4h, v17.4s\n"
- ".inst 0x0ea16a14 // bfcvtn v20.4h, v16.4s\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x25], #0x10\n"
- "subs %x[width], %x[width], #0x4\n"
"cmp %x[width], #0x4\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x21], #0x10\n"
- ".inst 0x4ea16a77 // bfcvtn2 v23.8h, v19.4s\n"
- ".inst 0x4ea16a56 // bfcvtn2 v22.8h, v18.4s\n"
+ "ldr q23, [x27], #0x10\n"
+ "ldr q22, [x25], #0x10\n"
+ "ldr q21, [x23], #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
+ ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
+ ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- ".inst 0x4ea16a35 // bfcvtn2 v21.8h, v17.4s\n"
- ".inst 0x4ea16a14 // bfcvtn2 v20.8h, v16.4s\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q23, [%x[out_ptr], #0x0]\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q22, [%x[out_ptr], #0x10]\n"
+ ".inst 0x4ea16af3 // bfcvtn2 v19.8h, v23.4s\n"
+ ".inst 0x4ea16ad2 // bfcvtn2 v18.8h, v22.4s\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "str q21, [%x[out_ptr], #0x20]\n"
- "str q20, [%x[out_ptr], #0x30]\n"
+ ".inst 0x4ea16ab1 // bfcvtn2 v17.8h, v21.4s\n"
+ ".inst 0x4ea16a90 // bfcvtn2 v16.8h, v20.4s\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
+ "str q17, [%x[out_ptr], #0x20]\n"
+ "str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"bge 2b\n"
"3:" // Main loop skip
@@ -150,9 +150,9 @@ void interleave_block<8, 4, VLType::None, false>(
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
".inst 0x4ea16af3 // bfcvtn2 v19.8h, v23.4s\n"
".inst 0x4ea16ad2 // bfcvtn2 v18.8h, v22.4s\n"
- "str q19, [%x[out_ptr], #0x0]\n"
".inst 0x4ea16ab1 // bfcvtn2 v17.8h, v21.4s\n"
".inst 0x4ea16a90 // bfcvtn2 v16.8h, v20.4s\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
"str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp
index 15d8ddbe53..ad213db3e5 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 4, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset]\n"
- "add x26, x26, %x[row_offset]\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset]\n"
- "add x24, x24, %x[row_offset]\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset]\n"
+ "add x27, x27, %x[row_offset]\n"
+ "add x26, x26, %x[row_offset]\n"
+ "add x25, x25, %x[row_offset]\n"
+ "add x24, x24, %x[row_offset]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,46 +79,46 @@ void interleave_block<8, 4, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q20, [x28], #0x10\n"
- "ldr q18, [x27], #0x10\n"
+ "ldr q25, [x28], #0x10\n"
+ "ldr q24, [x27], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q17, [x26], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "zip1 v25.4s, v20.4s, v17.4s\n"
- "zip1 v24.4s, v18.4s, v16.4s\n"
- "ldr q19, [x24], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip2 v22.4s, v20.4s, v17.4s\n"
- "zip2 v21.4s, v18.4s, v16.4s\n"
- "ldr q18, [x22], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q22, [x23], #0x10\n"
+ "ldr q21, [x22], #0x10\n"
"ldr q16, [x21], #0x10\n"
- "zip1 v20.4s, v19.4s, v18.4s\n"
- "zip1 v17.4s, v23.4s, v16.4s\n"
- "zip2 v19.4s, v19.4s, v18.4s\n"
- "zip2 v18.4s, v23.4s, v16.4s\n"
+ "zip1 v20.4s, v25.4s, v18.4s\n"
+ "zip1 v19.4s, v24.4s, v17.4s\n"
+ "zip2 v26.4s, v25.4s, v18.4s\n"
+ "zip2 v25.4s, v24.4s, v17.4s\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "zip1 v16.4s, v25.4s, v24.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
+ "zip1 v18.4s, v23.4s, v21.4s\n"
+ "zip1 v17.4s, v22.4s, v16.4s\n"
+ "zip2 v24.4s, v23.4s, v21.4s\n"
+ "zip2 v23.4s, v22.4s, v16.4s\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "zip1 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x10]\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
- "zip2 v16.4s, v25.4s, v24.4s\n"
- "str q16, [%x[out_ptr], #0x20]\n"
- "zip2 v16.4s, v20.4s, v17.4s\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip1 v16.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x40]\n"
- "zip1 v16.4s, v19.4s, v18.4s\n"
- "zip2 v17.4s, v22.4s, v21.4s\n"
- "str q16, [%x[out_ptr], #0x50]\n"
- "zip2 v16.4s, v19.4s, v18.4s\n"
+ "zip1 v16.4s, v20.4s, v19.4s\n"
+ "zip2 v22.4s, v20.4s, v19.4s\n"
+ "zip1 v21.4s, v18.4s, v17.4s\n"
+ "zip2 v20.4s, v18.4s, v17.4s\n"
+ "zip1 v19.4s, v26.4s, v25.4s\n"
+ "zip1 v18.4s, v24.4s, v23.4s\n"
+ "str q16, [%x[out_ptr], #0x0]\n"
+ "zip2 v17.4s, v26.4s, v25.4s\n"
+ "zip2 v16.4s, v24.4s, v23.4s\n"
+ "str q21, [%x[out_ptr], #0x10]\n"
+ "str q22, [%x[out_ptr], #0x20]\n"
+ "str q20, [%x[out_ptr], #0x30]\n"
+ "str q19, [%x[out_ptr], #0x40]\n"
+ "str q18, [%x[out_ptr], #0x50]\n"
"str q17, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
@@ -126,203 +126,203 @@ void interleave_block<8, 4, VLType::None, false>(
"3:" // Main loop skip
"cbz %x[width], 12f\n"
"tbz %x[width], #3, 7f\n"
- "ldr d28, [x28], #0x8\n"
- "ldr d27, [x27], #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d21, [x21], #0x8\n"
+ "ldr d29, [x28], #0x8\n"
+ "ldr d28, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d25, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d22, [x21], #0x8\n"
"tbz %x[width], #2, 5f\n"
- "ld1 { v28.s }[2], [x28], #0x4\n"
- "ld1 { v27.s }[2], [x27], #0x4\n"
- "ld1 { v26.s }[2], [x26], #0x4\n"
- "ld1 { v25.s }[2], [x25], #0x4\n"
- "ld1 { v24.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v22.s }[2], [x22], #0x4\n"
- "ld1 { v21.s }[2], [x21], #0x4\n"
+ "ld1 { v29.s }[2], [x28], #0x4\n"
+ "ld1 { v28.s }[2], [x27], #0x4\n"
+ "ld1 { v27.s }[2], [x26], #0x4\n"
+ "ld1 { v26.s }[2], [x25], #0x4\n"
+ "ld1 { v25.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
+ "ld1 { v23.s }[2], [x22], #0x4\n"
+ "ld1 { v22.s }[2], [x21], #0x4\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v28.h }[6], [x28], #0x2\n"
- "ld1 { v27.h }[6], [x27], #0x2\n"
+ "ld1 { v29.h }[6], [x28], #0x2\n"
+ "ld1 { v28.h }[6], [x27], #0x2\n"
"mov x20, #0x4\n"
- "ld1 { v26.h }[6], [x26], #0x2\n"
- "ld1 { v25.h }[6], [x25], #0x2\n"
- "ld1 { v24.h }[6], [x24], #0x2\n"
- "ld1 { v23.h }[6], [x23], #0x2\n"
- "ld1 { v22.h }[6], [x22], #0x2\n"
- "ld1 { v21.h }[6], [x21], #0x2\n"
+ "ld1 { v27.h }[6], [x26], #0x2\n"
+ "ld1 { v26.h }[6], [x25], #0x2\n"
+ "ld1 { v25.h }[6], [x24], #0x2\n"
+ "ld1 { v24.h }[6], [x23], #0x2\n"
+ "ld1 { v23.h }[6], [x22], #0x2\n"
+ "ld1 { v22.h }[6], [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[14], [x28]\n"
- "ld1 { v27.b }[14], [x27]\n"
- "ld1 { v26.b }[14], [x26]\n"
- "ld1 { v25.b }[14], [x25]\n"
- "ld1 { v24.b }[14], [x24]\n"
- "ld1 { v23.b }[14], [x23]\n"
- "ld1 { v22.b }[14], [x22]\n"
- "ld1 { v21.b }[14], [x21]\n"
+ "ld1 { v29.b }[14], [x28]\n"
+ "ld1 { v28.b }[14], [x27]\n"
+ "ld1 { v27.b }[14], [x26]\n"
+ "ld1 { v26.b }[14], [x25]\n"
+ "ld1 { v25.b }[14], [x24]\n"
+ "ld1 { v24.b }[14], [x23]\n"
+ "ld1 { v23.b }[14], [x22]\n"
+ "ld1 { v22.b }[14], [x21]\n"
"b 11f\n"
"4:" // odd_loads_1_12
"mov x20, #0x3\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[12], [x28]\n"
- "ld1 { v27.b }[12], [x27]\n"
+ "ld1 { v29.b }[12], [x28]\n"
+ "ld1 { v28.b }[12], [x27]\n"
"mov x20, #0x4\n"
- "ld1 { v26.b }[12], [x26]\n"
- "ld1 { v25.b }[12], [x25]\n"
- "ld1 { v24.b }[12], [x24]\n"
- "ld1 { v23.b }[12], [x23]\n"
- "ld1 { v22.b }[12], [x22]\n"
- "ld1 { v21.b }[12], [x21]\n"
+ "ld1 { v27.b }[12], [x26]\n"
+ "ld1 { v26.b }[12], [x25]\n"
+ "ld1 { v25.b }[12], [x24]\n"
+ "ld1 { v24.b }[12], [x23]\n"
+ "ld1 { v23.b }[12], [x22]\n"
+ "ld1 { v22.b }[12], [x21]\n"
"b 11f\n"
"5:" // odd_loads_2_8
"tbz %x[width], #1, 6f\n"
- "ld1 { v28.h }[4], [x28], #0x2\n"
- "ld1 { v27.h }[4], [x27], #0x2\n"
+ "ld1 { v29.h }[4], [x28], #0x2\n"
+ "ld1 { v28.h }[4], [x27], #0x2\n"
"mov x20, #0x3\n"
- "ld1 { v26.h }[4], [x26], #0x2\n"
- "ld1 { v25.h }[4], [x25], #0x2\n"
- "ld1 { v24.h }[4], [x24], #0x2\n"
- "ld1 { v23.h }[4], [x23], #0x2\n"
- "ld1 { v22.h }[4], [x22], #0x2\n"
- "ld1 { v21.h }[4], [x21], #0x2\n"
+ "ld1 { v27.h }[4], [x26], #0x2\n"
+ "ld1 { v26.h }[4], [x25], #0x2\n"
+ "ld1 { v25.h }[4], [x24], #0x2\n"
+ "ld1 { v24.h }[4], [x23], #0x2\n"
+ "ld1 { v23.h }[4], [x22], #0x2\n"
+ "ld1 { v22.h }[4], [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[10], [x28]\n"
- "ld1 { v27.b }[10], [x27]\n"
- "ld1 { v26.b }[10], [x26]\n"
- "ld1 { v25.b }[10], [x25]\n"
- "ld1 { v24.b }[10], [x24]\n"
- "ld1 { v23.b }[10], [x23]\n"
- "ld1 { v22.b }[10], [x22]\n"
- "ld1 { v21.b }[10], [x21]\n"
+ "ld1 { v29.b }[10], [x28]\n"
+ "ld1 { v28.b }[10], [x27]\n"
+ "ld1 { v27.b }[10], [x26]\n"
+ "ld1 { v26.b }[10], [x25]\n"
+ "ld1 { v25.b }[10], [x24]\n"
+ "ld1 { v24.b }[10], [x23]\n"
+ "ld1 { v23.b }[10], [x22]\n"
+ "ld1 { v22.b }[10], [x21]\n"
"b 11f\n"
"6:" // odd_loads_1_8
"mov x20, #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[8], [x28]\n"
- "ld1 { v27.b }[8], [x27]\n"
+ "ld1 { v29.b }[8], [x28]\n"
+ "ld1 { v28.b }[8], [x27]\n"
"mov x20, #0x3\n"
- "ld1 { v26.b }[8], [x26]\n"
- "ld1 { v25.b }[8], [x25]\n"
- "ld1 { v24.b }[8], [x24]\n"
- "ld1 { v23.b }[8], [x23]\n"
- "ld1 { v22.b }[8], [x22]\n"
- "ld1 { v21.b }[8], [x21]\n"
+ "ld1 { v27.b }[8], [x26]\n"
+ "ld1 { v26.b }[8], [x25]\n"
+ "ld1 { v25.b }[8], [x24]\n"
+ "ld1 { v24.b }[8], [x23]\n"
+ "ld1 { v23.b }[8], [x22]\n"
+ "ld1 { v22.b }[8], [x21]\n"
"b 11f\n"
"7:" // odd_loads_4_0
"tbz %x[width], #2, 9f\n"
- "ldr s28, [x28], #0x4\n"
- "ldr s27, [x27], #0x4\n"
- "ldr s26, [x26], #0x4\n"
- "ldr s25, [x25], #0x4\n"
- "ldr s24, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s22, [x22], #0x4\n"
- "ldr s21, [x21], #0x4\n"
+ "ldr s29, [x28], #0x4\n"
+ "ldr s28, [x27], #0x4\n"
+ "ldr s27, [x26], #0x4\n"
+ "ldr s26, [x25], #0x4\n"
+ "ldr s25, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "ldr s22, [x21], #0x4\n"
"tbz %x[width], #1, 8f\n"
- "ld1 { v28.h }[2], [x28], #0x2\n"
- "ld1 { v27.h }[2], [x27], #0x2\n"
+ "ld1 { v29.h }[2], [x28], #0x2\n"
+ "ld1 { v28.h }[2], [x27], #0x2\n"
"mov x20, #0x2\n"
- "ld1 { v26.h }[2], [x26], #0x2\n"
- "ld1 { v25.h }[2], [x25], #0x2\n"
- "ld1 { v24.h }[2], [x24], #0x2\n"
- "ld1 { v23.h }[2], [x23], #0x2\n"
- "ld1 { v22.h }[2], [x22], #0x2\n"
- "ld1 { v21.h }[2], [x21], #0x2\n"
+ "ld1 { v27.h }[2], [x26], #0x2\n"
+ "ld1 { v26.h }[2], [x25], #0x2\n"
+ "ld1 { v25.h }[2], [x24], #0x2\n"
+ "ld1 { v24.h }[2], [x23], #0x2\n"
+ "ld1 { v23.h }[2], [x22], #0x2\n"
+ "ld1 { v22.h }[2], [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[6], [x28]\n"
- "ld1 { v27.b }[6], [x27]\n"
- "ld1 { v26.b }[6], [x26]\n"
- "ld1 { v25.b }[6], [x25]\n"
- "ld1 { v24.b }[6], [x24]\n"
- "ld1 { v23.b }[6], [x23]\n"
- "ld1 { v22.b }[6], [x22]\n"
- "ld1 { v21.b }[6], [x21]\n"
+ "ld1 { v29.b }[6], [x28]\n"
+ "ld1 { v28.b }[6], [x27]\n"
+ "ld1 { v27.b }[6], [x26]\n"
+ "ld1 { v26.b }[6], [x25]\n"
+ "ld1 { v25.b }[6], [x24]\n"
+ "ld1 { v24.b }[6], [x23]\n"
+ "ld1 { v23.b }[6], [x22]\n"
+ "ld1 { v22.b }[6], [x21]\n"
"b 11f\n"
"8:" // odd_loads_1_4
"mov x20, #0x1\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[4], [x28]\n"
- "ld1 { v27.b }[4], [x27]\n"
+ "ld1 { v29.b }[4], [x28]\n"
+ "ld1 { v28.b }[4], [x27]\n"
"mov x20, #0x2\n"
- "ld1 { v26.b }[4], [x26]\n"
- "ld1 { v25.b }[4], [x25]\n"
- "ld1 { v24.b }[4], [x24]\n"
- "ld1 { v23.b }[4], [x23]\n"
- "ld1 { v22.b }[4], [x22]\n"
- "ld1 { v21.b }[4], [x21]\n"
+ "ld1 { v27.b }[4], [x26]\n"
+ "ld1 { v26.b }[4], [x25]\n"
+ "ld1 { v25.b }[4], [x24]\n"
+ "ld1 { v24.b }[4], [x23]\n"
+ "ld1 { v23.b }[4], [x22]\n"
+ "ld1 { v22.b }[4], [x21]\n"
"b 11f\n"
"9:" // odd_loads_2_0
"tbz %x[width], #1, 10f\n"
- "ldr h28, [x28], #0x2\n"
- "ldr h27, [x27], #0x2\n"
+ "ldr h29, [x28], #0x2\n"
+ "ldr h28, [x27], #0x2\n"
"mov x20, #0x1\n"
- "ldr h26, [x26], #0x2\n"
- "ldr h25, [x25], #0x2\n"
- "ldr h24, [x24], #0x2\n"
- "ldr h23, [x23], #0x2\n"
- "ldr h22, [x22], #0x2\n"
- "ldr h21, [x21], #0x2\n"
+ "ldr h27, [x26], #0x2\n"
+ "ldr h26, [x25], #0x2\n"
+ "ldr h25, [x24], #0x2\n"
+ "ldr h24, [x23], #0x2\n"
+ "ldr h23, [x22], #0x2\n"
+ "ldr h22, [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v28.b }[2], [x28]\n"
- "ld1 { v27.b }[2], [x27]\n"
- "ld1 { v26.b }[2], [x26]\n"
- "ld1 { v25.b }[2], [x25]\n"
- "ld1 { v24.b }[2], [x24]\n"
- "ld1 { v23.b }[2], [x23]\n"
- "ld1 { v22.b }[2], [x22]\n"
- "ld1 { v21.b }[2], [x21]\n"
+ "ld1 { v29.b }[2], [x28]\n"
+ "ld1 { v28.b }[2], [x27]\n"
+ "ld1 { v27.b }[2], [x26]\n"
+ "ld1 { v26.b }[2], [x25]\n"
+ "ld1 { v25.b }[2], [x24]\n"
+ "ld1 { v24.b }[2], [x23]\n"
+ "ld1 { v23.b }[2], [x22]\n"
+ "ld1 { v22.b }[2], [x21]\n"
"b 11f\n"
"10:" // odd_loads_1_0
- "ldr b28, [x28, #0x0]\n"
- "ldr b27, [x27, #0x0]\n"
+ "ldr b29, [x28, #0x0]\n"
+ "ldr b28, [x27, #0x0]\n"
"mov x20, #0x1\n"
- "ldr b26, [x26, #0x0]\n"
- "ldr b25, [x25, #0x0]\n"
- "ldr b24, [x24, #0x0]\n"
- "ldr b23, [x23, #0x0]\n"
- "ldr b22, [x22, #0x0]\n"
- "ldr b21, [x21, #0x0]\n"
+ "ldr b27, [x26, #0x0]\n"
+ "ldr b26, [x25, #0x0]\n"
+ "ldr b25, [x24, #0x0]\n"
+ "ldr b24, [x23, #0x0]\n"
+ "ldr b23, [x22, #0x0]\n"
+ "ldr b22, [x21, #0x0]\n"
"11:" // Odd load end
+ "zip1 v21.4s, v29.4s, v27.4s\n"
"zip1 v20.4s, v28.4s, v26.4s\n"
- "zip1 v19.4s, v27.4s, v25.4s\n"
"subs x20, x20, #0x1\n"
+ "zip1 v19.4s, v25.4s, v23.4s\n"
"zip1 v18.4s, v24.4s, v22.4s\n"
- "zip1 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v17.4s, v21.4s, v20.4s\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 12f\n"
"subs x20, x20, #0x1\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 12f\n"
+ "zip2 v21.4s, v29.4s, v27.4s\n"
"zip2 v20.4s, v28.4s, v26.4s\n"
- "zip2 v19.4s, v27.4s, v25.4s\n"
"subs x20, x20, #0x1\n"
+ "zip2 v19.4s, v25.4s, v23.4s\n"
"zip2 v18.4s, v24.4s, v22.4s\n"
- "zip2 v17.4s, v23.4s, v21.4s\n"
- "zip1 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.4s, v18.4s, v17.4s\n"
+ "zip1 v17.4s, v21.4s, v20.4s\n"
+ "zip1 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"beq 12f\n"
- "zip2 v16.4s, v20.4s, v19.4s\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.4s, v18.4s, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ "zip2 v16.4s, v19.4s, v18.4s\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
"str q16, [%x[out_ptr], #0x10]\n"
"add %x[out_ptr], %x[out_ptr], #0x20\n"
"12:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp
index 7b445ef3d4..28d2196ade 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/a64_interleave8_block8_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,29 +34,29 @@ void interleave_block<8, 8, VLType::None, false>(
"ldr x28, [%x[in], #0x0]\n"
"ldr x27, [%x[in], #0x8]\n"
"cmp %x[height], #0x8\n"
- "add x28, x28, %x[row_offset]\n"
"ldr x26, [%x[in], #0x10]\n"
"ldr x25, [%x[in], #0x18]\n"
- "add x27, x27, %x[row_offset]\n"
- "add x26, x26, %x[row_offset]\n"
"ldr x24, [%x[in], #0x20]\n"
"ldr x23, [%x[in], #0x28]\n"
- "add x25, x25, %x[row_offset]\n"
- "add x24, x24, %x[row_offset]\n"
"ldr x22, [%x[in], #0x30]\n"
"ldr x21, [%x[in], #0x38]\n"
+ "add x28, x28, %x[row_offset]\n"
+ "add x27, x27, %x[row_offset]\n"
+ "add x26, x26, %x[row_offset]\n"
+ "add x25, x25, %x[row_offset]\n"
+ "add x24, x24, %x[row_offset]\n"
"add x23, x23, %x[row_offset]\n"
"add x22, x22, %x[row_offset]\n"
"add x21, x21, %x[row_offset]\n"
"beq 1f\n"
"cmp %x[height], #0x2\n"
+ "mov x21, x28\n"
"csel x27, x27, x28, GE\n"
"csel x26, x26, x28, GT\n"
"cmp %x[height], #0x4\n"
"csel x25, x25, x28, GE\n"
"csel x24, x24, x28, GT\n"
"cmp %x[height], #0x6\n"
- "mov x21, x28\n"
"csel x23, x23, x28, GE\n"
"csel x22, x22, x28, GT\n"
"1:" // no_pointer_adj
@@ -79,226 +79,226 @@ void interleave_block<8, 8, VLType::None, false>(
"prfm pldl1keep, [x21, #0x40]\n"
"blt 3f\n"
"2:" // Main loop head
- "ldr q20, [x28], #0x10\n"
- "ldr q19, [x27], #0x10\n"
+ "ldr q21, [x28], #0x10\n"
+ "ldr q16, [x27], #0x10\n"
"subs %x[width], %x[width], #0x10\n"
+ "ldr q20, [x26], #0x10\n"
+ "ldr q19, [x25], #0x10\n"
"cmp %x[width], #0x10\n"
- "ldr q25, [x26], #0x10\n"
- "ldr q24, [x25], #0x10\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
- "zip1 v18.2d, v25.2d, v24.2d\n"
- "ldr q23, [x24], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "zip1 v17.2d, v23.2d, v22.2d\n"
- "zip2 v21.2d, v20.2d, v19.2d\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q19, [x21], #0x10\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v20.2d, v19.2d\n"
+ "ldr q24, [x24], #0x10\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip1 v17.2d, v21.2d, v16.2d\n"
+ "zip2 v21.2d, v21.2d, v16.2d\n"
"prfm pldl1keep, [x28, #0x70]\n"
"prfm pldl1keep, [x27, #0x70]\n"
- "str q18, [%x[out_ptr], #0x10]\n"
- "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip1 v16.2d, v20.2d, v19.2d\n"
+ "zip2 v20.2d, v20.2d, v19.2d\n"
"prfm pldl1keep, [x26, #0x70]\n"
"prfm pldl1keep, [x25, #0x70]\n"
- "str q17, [%x[out_ptr], #0x20]\n"
- "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v18.2d\n"
+ "zip2 v18.2d, v24.2d, v18.2d\n"
"prfm pldl1keep, [x24, #0x70]\n"
"prfm pldl1keep, [x23, #0x70]\n"
- "str q16, [%x[out_ptr], #0x30]\n"
- "zip2 v16.2d, v20.2d, v19.2d\n"
+ "str q17, [%x[out_ptr], #0x0]\n"
+ "zip1 v17.2d, v23.2d, v22.2d\n"
"prfm pldl1keep, [x22, #0x70]\n"
"prfm pldl1keep, [x21, #0x70]\n"
+ "str q16, [%x[out_ptr], #0x10]\n"
+ "zip2 v16.2d, v23.2d, v22.2d\n"
+ "str q19, [%x[out_ptr], #0x20]\n"
+ "str q17, [%x[out_ptr], #0x30]\n"
"str q21, [%x[out_ptr], #0x40]\n"
- "str q18, [%x[out_ptr], #0x50]\n"
- "str q17, [%x[out_ptr], #0x60]\n"
+ "str q20, [%x[out_ptr], #0x50]\n"
+ "str q18, [%x[out_ptr], #0x60]\n"
"str q16, [%x[out_ptr], #0x70]\n"
"add %x[out_ptr], %x[out_ptr], #0x80\n"
"bge 2b\n"
"3:" // Main loop skip
"cbz %x[width], 12f\n"
"tbz %x[width], #3, 7f\n"
- "ldr d25, [x28], #0x8\n"
- "ldr d24, [x27], #0x8\n"
- "ldr d23, [x26], #0x8\n"
- "ldr d22, [x25], #0x8\n"
- "ldr d21, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d19, [x22], #0x8\n"
- "ldr d18, [x21], #0x8\n"
+ "ldr d27, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d25, [x26], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d20, [x21], #0x8\n"
"tbz %x[width], #2, 5f\n"
- "ld1 { v25.s }[2], [x28], #0x4\n"
- "ld1 { v24.s }[2], [x27], #0x4\n"
- "ld1 { v23.s }[2], [x26], #0x4\n"
- "ld1 { v22.s }[2], [x25], #0x4\n"
- "ld1 { v21.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v19.s }[2], [x22], #0x4\n"
- "ld1 { v18.s }[2], [x21], #0x4\n"
+ "ld1 { v27.s }[2], [x28], #0x4\n"
+ "ld1 { v26.s }[2], [x27], #0x4\n"
+ "ld1 { v25.s }[2], [x26], #0x4\n"
+ "ld1 { v24.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v21.s }[2], [x22], #0x4\n"
+ "ld1 { v20.s }[2], [x21], #0x4\n"
"tbz %x[width], #1, 4f\n"
- "ld1 { v25.h }[6], [x28], #0x2\n"
- "ld1 { v24.h }[6], [x27], #0x2\n"
+ "ld1 { v27.h }[6], [x28], #0x2\n"
+ "ld1 { v26.h }[6], [x27], #0x2\n"
"mov x20, #0x2\n"
- "ld1 { v23.h }[6], [x26], #0x2\n"
- "ld1 { v22.h }[6], [x25], #0x2\n"
- "ld1 { v21.h }[6], [x24], #0x2\n"
- "ld1 { v20.h }[6], [x23], #0x2\n"
- "ld1 { v19.h }[6], [x22], #0x2\n"
- "ld1 { v18.h }[6], [x21], #0x2\n"
+ "ld1 { v25.h }[6], [x26], #0x2\n"
+ "ld1 { v24.h }[6], [x25], #0x2\n"
+ "ld1 { v23.h }[6], [x24], #0x2\n"
+ "ld1 { v22.h }[6], [x23], #0x2\n"
+ "ld1 { v21.h }[6], [x22], #0x2\n"
+ "ld1 { v20.h }[6], [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v25.b }[14], [x28]\n"
- "ld1 { v24.b }[14], [x27]\n"
- "ld1 { v23.b }[14], [x26]\n"
- "ld1 { v22.b }[14], [x25]\n"
- "ld1 { v21.b }[14], [x24]\n"
- "ld1 { v20.b }[14], [x23]\n"
- "ld1 { v19.b }[14], [x22]\n"
- "ld1 { v18.b }[14], [x21]\n"
+ "ld1 { v27.b }[14], [x28]\n"
+ "ld1 { v26.b }[14], [x27]\n"
+ "ld1 { v25.b }[14], [x26]\n"
+ "ld1 { v24.b }[14], [x25]\n"
+ "ld1 { v23.b }[14], [x24]\n"
+ "ld1 { v22.b }[14], [x23]\n"
+ "ld1 { v21.b }[14], [x22]\n"
+ "ld1 { v20.b }[14], [x21]\n"
"b 11f\n"
"4:" // odd_loads_1_12
"mov x20, #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v25.b }[12], [x28]\n"
- "ld1 { v24.b }[12], [x27]\n"
- "ld1 { v23.b }[12], [x26]\n"
- "ld1 { v22.b }[12], [x25]\n"
- "ld1 { v21.b }[12], [x24]\n"
- "ld1 { v20.b }[12], [x23]\n"
- "ld1 { v19.b }[12], [x22]\n"
- "ld1 { v18.b }[12], [x21]\n"
+ "ld1 { v27.b }[12], [x28]\n"
+ "ld1 { v26.b }[12], [x27]\n"
+ "ld1 { v25.b }[12], [x26]\n"
+ "ld1 { v24.b }[12], [x25]\n"
+ "ld1 { v23.b }[12], [x24]\n"
+ "ld1 { v22.b }[12], [x23]\n"
+ "ld1 { v21.b }[12], [x22]\n"
+ "ld1 { v20.b }[12], [x21]\n"
"b 11f\n"
"5:" // odd_loads_2_8
"tbz %x[width], #1, 6f\n"
- "ld1 { v25.h }[4], [x28], #0x2\n"
- "ld1 { v24.h }[4], [x27], #0x2\n"
+ "ld1 { v27.h }[4], [x28], #0x2\n"
+ "ld1 { v26.h }[4], [x27], #0x2\n"
"mov x20, #0x2\n"
- "ld1 { v23.h }[4], [x26], #0x2\n"
- "ld1 { v22.h }[4], [x25], #0x2\n"
- "ld1 { v21.h }[4], [x24], #0x2\n"
- "ld1 { v20.h }[4], [x23], #0x2\n"
- "ld1 { v19.h }[4], [x22], #0x2\n"
- "ld1 { v18.h }[4], [x21], #0x2\n"
+ "ld1 { v25.h }[4], [x26], #0x2\n"
+ "ld1 { v24.h }[4], [x25], #0x2\n"
+ "ld1 { v23.h }[4], [x24], #0x2\n"
+ "ld1 { v22.h }[4], [x23], #0x2\n"
+ "ld1 { v21.h }[4], [x22], #0x2\n"
+ "ld1 { v20.h }[4], [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v25.b }[10], [x28]\n"
- "ld1 { v24.b }[10], [x27]\n"
- "ld1 { v23.b }[10], [x26]\n"
- "ld1 { v22.b }[10], [x25]\n"
- "ld1 { v21.b }[10], [x24]\n"
- "ld1 { v20.b }[10], [x23]\n"
- "ld1 { v19.b }[10], [x22]\n"
- "ld1 { v18.b }[10], [x21]\n"
+ "ld1 { v27.b }[10], [x28]\n"
+ "ld1 { v26.b }[10], [x27]\n"
+ "ld1 { v25.b }[10], [x26]\n"
+ "ld1 { v24.b }[10], [x25]\n"
+ "ld1 { v23.b }[10], [x24]\n"
+ "ld1 { v22.b }[10], [x23]\n"
+ "ld1 { v21.b }[10], [x22]\n"
+ "ld1 { v20.b }[10], [x21]\n"
"b 11f\n"
"6:" // odd_loads_1_8
"mov x20, #0x1\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v25.b }[8], [x28]\n"
- "ld1 { v24.b }[8], [x27]\n"
+ "ld1 { v27.b }[8], [x28]\n"
+ "ld1 { v26.b }[8], [x27]\n"
"mov x20, #0x2\n"
- "ld1 { v23.b }[8], [x26]\n"
- "ld1 { v22.b }[8], [x25]\n"
- "ld1 { v21.b }[8], [x24]\n"
- "ld1 { v20.b }[8], [x23]\n"
- "ld1 { v19.b }[8], [x22]\n"
- "ld1 { v18.b }[8], [x21]\n"
+ "ld1 { v25.b }[8], [x26]\n"
+ "ld1 { v24.b }[8], [x25]\n"
+ "ld1 { v23.b }[8], [x24]\n"
+ "ld1 { v22.b }[8], [x23]\n"
+ "ld1 { v21.b }[8], [x22]\n"
+ "ld1 { v20.b }[8], [x21]\n"
"b 11f\n"
"7:" // odd_loads_4_0
"tbz %x[width], #2, 9f\n"
- "ldr s25, [x28], #0x4\n"
- "ldr s24, [x27], #0x4\n"
- "ldr s23, [x26], #0x4\n"
- "ldr s22, [x25], #0x4\n"
- "ldr s21, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s19, [x22], #0x4\n"
- "ldr s18, [x21], #0x4\n"
+ "ldr s27, [x28], #0x4\n"
+ "ldr s26, [x27], #0x4\n"
+ "ldr s25, [x26], #0x4\n"
+ "ldr s24, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s22, [x23], #0x4\n"
+ "ldr s21, [x22], #0x4\n"
+ "ldr s20, [x21], #0x4\n"
"tbz %x[width], #1, 8f\n"
- "ld1 { v25.h }[2], [x28], #0x2\n"
- "ld1 { v24.h }[2], [x27], #0x2\n"
+ "ld1 { v27.h }[2], [x28], #0x2\n"
+ "ld1 { v26.h }[2], [x27], #0x2\n"
"mov x20, #0x1\n"
- "ld1 { v23.h }[2], [x26], #0x2\n"
- "ld1 { v22.h }[2], [x25], #0x2\n"
- "ld1 { v21.h }[2], [x24], #0x2\n"
- "ld1 { v20.h }[2], [x23], #0x2\n"
- "ld1 { v19.h }[2], [x22], #0x2\n"
- "ld1 { v18.h }[2], [x21], #0x2\n"
+ "ld1 { v25.h }[2], [x26], #0x2\n"
+ "ld1 { v24.h }[2], [x25], #0x2\n"
+ "ld1 { v23.h }[2], [x24], #0x2\n"
+ "ld1 { v22.h }[2], [x23], #0x2\n"
+ "ld1 { v21.h }[2], [x22], #0x2\n"
+ "ld1 { v20.h }[2], [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v25.b }[6], [x28]\n"
- "ld1 { v24.b }[6], [x27]\n"
- "ld1 { v23.b }[6], [x26]\n"
- "ld1 { v22.b }[6], [x25]\n"
- "ld1 { v21.b }[6], [x24]\n"
- "ld1 { v20.b }[6], [x23]\n"
- "ld1 { v19.b }[6], [x22]\n"
- "ld1 { v18.b }[6], [x21]\n"
+ "ld1 { v27.b }[6], [x28]\n"
+ "ld1 { v26.b }[6], [x27]\n"
+ "ld1 { v25.b }[6], [x26]\n"
+ "ld1 { v24.b }[6], [x25]\n"
+ "ld1 { v23.b }[6], [x24]\n"
+ "ld1 { v22.b }[6], [x23]\n"
+ "ld1 { v21.b }[6], [x22]\n"
+ "ld1 { v20.b }[6], [x21]\n"
"b 11f\n"
"8:" // odd_loads_1_4
"mov x20, #0x1\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v25.b }[4], [x28]\n"
- "ld1 { v24.b }[4], [x27]\n"
- "ld1 { v23.b }[4], [x26]\n"
- "ld1 { v22.b }[4], [x25]\n"
- "ld1 { v21.b }[4], [x24]\n"
- "ld1 { v20.b }[4], [x23]\n"
- "ld1 { v19.b }[4], [x22]\n"
- "ld1 { v18.b }[4], [x21]\n"
+ "ld1 { v27.b }[4], [x28]\n"
+ "ld1 { v26.b }[4], [x27]\n"
+ "ld1 { v25.b }[4], [x26]\n"
+ "ld1 { v24.b }[4], [x25]\n"
+ "ld1 { v23.b }[4], [x24]\n"
+ "ld1 { v22.b }[4], [x23]\n"
+ "ld1 { v21.b }[4], [x22]\n"
+ "ld1 { v20.b }[4], [x21]\n"
"b 11f\n"
"9:" // odd_loads_2_0
"tbz %x[width], #1, 10f\n"
- "ldr h25, [x28], #0x2\n"
- "ldr h24, [x27], #0x2\n"
+ "ldr h27, [x28], #0x2\n"
+ "ldr h26, [x27], #0x2\n"
"mov x20, #0x1\n"
- "ldr h23, [x26], #0x2\n"
- "ldr h22, [x25], #0x2\n"
- "ldr h21, [x24], #0x2\n"
- "ldr h20, [x23], #0x2\n"
- "ldr h19, [x22], #0x2\n"
- "ldr h18, [x21], #0x2\n"
+ "ldr h25, [x26], #0x2\n"
+ "ldr h24, [x25], #0x2\n"
+ "ldr h23, [x24], #0x2\n"
+ "ldr h22, [x23], #0x2\n"
+ "ldr h21, [x22], #0x2\n"
+ "ldr h20, [x21], #0x2\n"
"tbz %x[width], #0, 11f\n"
- "ld1 { v25.b }[2], [x28]\n"
- "ld1 { v24.b }[2], [x27]\n"
- "ld1 { v23.b }[2], [x26]\n"
- "ld1 { v22.b }[2], [x25]\n"
- "ld1 { v21.b }[2], [x24]\n"
- "ld1 { v20.b }[2], [x23]\n"
- "ld1 { v19.b }[2], [x22]\n"
- "ld1 { v18.b }[2], [x21]\n"
+ "ld1 { v27.b }[2], [x28]\n"
+ "ld1 { v26.b }[2], [x27]\n"
+ "ld1 { v25.b }[2], [x26]\n"
+ "ld1 { v24.b }[2], [x25]\n"
+ "ld1 { v23.b }[2], [x24]\n"
+ "ld1 { v22.b }[2], [x23]\n"
+ "ld1 { v21.b }[2], [x22]\n"
+ "ld1 { v20.b }[2], [x21]\n"
"b 11f\n"
"10:" // odd_loads_1_0
- "ldr b25, [x28, #0x0]\n"
- "ldr b24, [x27, #0x0]\n"
+ "ldr b27, [x28, #0x0]\n"
+ "ldr b26, [x27, #0x0]\n"
"mov x20, #0x1\n"
- "ldr b23, [x26, #0x0]\n"
- "ldr b22, [x25, #0x0]\n"
- "ldr b21, [x24, #0x0]\n"
- "ldr b20, [x23, #0x0]\n"
- "ldr b19, [x22, #0x0]\n"
- "ldr b18, [x21, #0x0]\n"
+ "ldr b25, [x26, #0x0]\n"
+ "ldr b24, [x25, #0x0]\n"
+ "ldr b23, [x24, #0x0]\n"
+ "ldr b22, [x23, #0x0]\n"
+ "ldr b21, [x22, #0x0]\n"
+ "ldr b20, [x21, #0x0]\n"
"11:" // Odd load end
"subs x20, x20, #0x1\n"
- "zip1 v16.2d, v25.2d, v24.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip1 v16.2d, v23.2d, v22.2d\n"
- "str q16, [%x[out_ptr], #0x10]\n"
- "zip1 v17.2d, v21.2d, v20.2d\n"
- "zip1 v16.2d, v19.2d, v18.2d\n"
+ "zip1 v19.2d, v27.2d, v26.2d\n"
+ "zip1 v18.2d, v25.2d, v24.2d\n"
+ "zip1 v17.2d, v23.2d, v22.2d\n"
+ "zip1 v16.2d, v21.2d, v20.2d\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"beq 12f\n"
- "zip2 v16.2d, v25.2d, v24.2d\n"
- "str q16, [%x[out_ptr], #0x0]\n"
- "zip2 v16.2d, v23.2d, v22.2d\n"
- "str q16, [%x[out_ptr], #0x10]\n"
- "zip2 v17.2d, v21.2d, v20.2d\n"
- "zip2 v16.2d, v19.2d, v18.2d\n"
+ "zip2 v19.2d, v27.2d, v26.2d\n"
+ "zip2 v18.2d, v25.2d, v24.2d\n"
+ "zip2 v17.2d, v23.2d, v22.2d\n"
+ "zip2 v16.2d, v21.2d, v20.2d\n"
+ "str q19, [%x[out_ptr], #0x0]\n"
+ "str q18, [%x[out_ptr], #0x10]\n"
"str q17, [%x[out_ptr], #0x20]\n"
"str q16, [%x[out_ptr], #0x30]\n"
"add %x[out_ptr], %x[out_ptr], #0x40\n"
"12:" // Odds skip
: [out_ptr] "+&r" (out_ptr), [width] "+&r" (width)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset)
- : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
index a5f4754d3d..ff171984e7 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave1VL_block2_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,103 +32,103 @@ void interleave_block<1, 2, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x22, ALL, MUL #2\n"
"sub x28, %x[width], #0x1\n"
+ "mov x27, #0x0\n"
+ "cntw x22, ALL, MUL #2\n"
"cntw x21, ALL, MUL #2\n"
"sub x20, x22, #0x1\n"
"whilelt p10.s, XZR, %x[height]\n"
"add x28, x28, x21\n"
- "ands x27, %x[width], x20\n"
+ "ands x26, %x[width], x20\n"
"udiv x28, x28, x21\n"
- "csel x27, x27, x22, NE\n"
- "mov x26, #0x0\n"
+ "csel x26, x26, x22, NE\n"
"and x25, x28, #0x1\n"
"sub x28, x28, #0x1\n"
- "add x27, x27, #0x1\n"
+ "add x26, x26, #0x1\n"
"mov x20, %x[width]\n"
"ptrue p0.b\n"
"mov x24, %x[outptr_raw]\n"
"mov x23, %x[row_offset]\n"
"cntw x22\n"
"lsr x28, x28, #0x1\n"
- "lsr x27, x27, #0x1\n"
+ "lsr x26, x26, #0x1\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
+ ".inst 0x25b44771 // whilelt pn9.s, x27, x20, VLx2\n"
"mov x21, %x[in]\n"
"1:" // Width loop: Preamble: Loop
"ldr x20, [x21], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
- ".inst 0xa0174286 // ld1w { z6.s-z7.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
- ".inst 0xc160e0c6 // bfcvt z6.h, { z6.s-z7.s }\n"
- ".inst 0xc08000c0 // mova za0h.s[x12], p0/M, z6.s\n"
+ ".inst 0xa0174294 // ld1w { z20.s-z21.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xc160e294 // bfcvt z20.h, { z20.s-z21.s }\n"
+ ".inst 0xc0800280 // mova za0h.s[x12], p0/M, z20.s\n"
"add x12, x12, #0x1\n"
"cmp x12, x22\n"
"blt 1b\n"
"incw x23, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
+ "incw x27, ALL, MUL #2\n"
"cbz x28, 5f\n"
"2:" // Width loop
"mov x20, %x[width]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
+ ".inst 0x25b44771 // whilelt pn9.s, x27, x20, VLx2\n"
"mov x21, %x[in]\n"
"3:" // Width loop: Odd: Loop
"ldr x20, [x21], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
- ".inst 0xa017429e // ld1w { z30.s-z31.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
- ".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
- ".inst 0xc08003c8 // mova za2h.s[x12], p0/M, z30.s\n"
- ".inst 0xc082800f // mova z15.s, p0/M, za0v.s[x12]\n"
+ ".inst 0xc0828018 // mova z24.s, p0/M, za0v.s[x12]\n"
+ ".inst 0xa0174294 // ld1w { z20.s-z21.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ "st1w { z24.s }, p0, [x24]\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0xc160e294 // bfcvt z20.h, { z20.s-z21.s }\n"
+ ".inst 0xc0800288 // mova za2h.s[x12], p0/M, z20.s\n"
"add x12, x12, #0x1\n"
"cmp x12, x22\n"
- "st1w { z15.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
"blt 3b\n"
- "incw x26, ALL, MUL #2\n"
+ "incw x27, ALL, MUL #2\n"
"mov x20, %x[width]\n"
"incw x23, ALL, MUL #2\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
+ ".inst 0x25b44771 // whilelt pn9.s, x27, x20, VLx2\n"
"mov x21, %x[in]\n"
"4:" // Width loop: Even: Loop
"ldr x20, [x21], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
- ".inst 0xa0174298 // ld1w { z24.s-z25.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
- ".inst 0xc160e318 // bfcvt z24.h, { z24.s-z25.s }\n"
- ".inst 0xc0800300 // mova za0h.s[x12], p0/M, z24.s\n"
- ".inst 0xc0828110 // mova z16.s, p0/M, za2v.s[x12]\n"
+ ".inst 0xc082810c // mova z12.s, p0/M, za2v.s[x12]\n"
+ ".inst 0xa0174284 // ld1w { z4.s-z5.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ "st1w { z12.s }, p0, [x24]\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0xc160e084 // bfcvt z4.h, { z4.s-z5.s }\n"
+ ".inst 0xc0800080 // mova za0h.s[x12], p0/M, z4.s\n"
"add x12, x12, #0x1\n"
"cmp x12, x22\n"
- "st1w { z16.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
"blt 4b\n"
"subs x28, x28, #0x1\n"
"incw x23, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
+ "incw x27, ALL, MUL #2\n"
"bgt 2b\n"
"5:" // Width loop: Tails
"cbnz x25, 8f\n"
"mov x20, %x[width]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44751 // whilelt pn9.s, x26, x20, VLx2\n"
+ ".inst 0x25b44771 // whilelt pn9.s, x27, x20, VLx2\n"
"mov x21, %x[in]\n"
"6:" // Width loop: Tails: Even: Odd: Loop
"ldr x20, [x21], #0x8\n"
".inst 0x25306548 // psel p8.s, p9.s/Z, p10.s[w12]\n"
+ ".inst 0xc0828010 // mova z16.s, p0/M, za0v.s[x12]\n"
".inst 0xa017428e // ld1w { z14.s-z15.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ "st1w { z16.s }, p0, [x24]\n"
+ "addvl x24, x24, #1\n"
".inst 0xc160e1ce // bfcvt z14.h, { z14.s-z15.s }\n"
".inst 0xc08001c8 // mova za2h.s[x12], p0/M, z14.s\n"
- ".inst 0xc0828010 // mova z16.s, p0/M, za0v.s[x12]\n"
"add x12, x12, #0x1\n"
"cmp x12, x22\n"
- "st1w { z16.s }, p0, [x24]\n"
- "addvl x24, x24, #1\n"
"blt 6b\n"
"mov x12, #0x0\n"
"7:" // Width loop: Tails: Even: Even: Loop
".inst 0xc0828110 // mova z16.s, p0/M, za2v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x27\n"
+ "cmp x12, x26\n"
"st1w { z16.s }, p0, [x24]\n"
"addvl x24, x24, #1\n"
"blt 7b\n"
@@ -138,7 +138,7 @@ void interleave_block<1, 2, VLType::SME, false>(
"9:" // Width loop: Tails: Odd: Loop
".inst 0xc0828010 // mova z16.s, p0/M, za0v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x27\n"
+ "cmp x12, x26\n"
"st1w { z16.s }, p0, [x24]\n"
"addvl x24, x24, #1\n"
"blt 9b\n"
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
index c1d0ac5bc7..6d3601d165 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave2VL_block2_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,126 +32,126 @@ void interleave_block<2, 2, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
+ "sub x10, %x[width], #0x1\n"
+ "mov x9, #0x0\n"
"cntw x22, ALL, MUL #2\n"
- "cntw x9\n"
- "sub x28, %x[width], #0x1\n"
+ "cntw x28\n"
"cntw x21, ALL, MUL #2\n"
"sub x20, x22, #0x1\n"
".inst 0x25207815 // ptrue pn13.b\n"
"whilelt p12.s, XZR, %x[height]\n"
- "whilelt p11.s, x9, %x[height]\n"
- "add x28, x28, x21\n"
+ "whilelt p11.s, x28, %x[height]\n"
+ "add x10, x10, x21\n"
"ands x27, %x[width], x20\n"
- "udiv x28, x28, x21\n"
+ "udiv x10, x10, x21\n"
"csel x27, x27, x22, NE\n"
- "mov x26, #0x0\n"
- "and x25, x28, #0x1\n"
- "sub x28, x28, #0x1\n"
+ "and x26, x10, #0x1\n"
+ "sub x10, x10, #0x1\n"
"add x27, x27, #0x1\n"
"mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
+ "mov x25, %x[in]\n"
"ptrue p0.b\n"
- "mov x23, %x[outptr_raw]\n"
- "mov x22, %x[row_offset]\n"
- "lsr x28, x28, #0x1\n"
+ "mov x24, %x[outptr_raw]\n"
+ "mov x23, %x[row_offset]\n"
+ "lsr x10, x10, #0x1\n"
"lsr x27, x27, #0x1\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b44532 // whilelt pn10.s, x9, x20, VLx2\n"
+ "add x22, x25, x28, LSL #3\n"
"1:" // Width loop: Preamble: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa0164698 // ld1w { z24.s-z25.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa0164296 // ld1w { z22.s-z23.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
- ".inst 0xc160e318 // bfcvt z24.h, { z24.s-z25.s }\n"
- ".inst 0xc160e2d6 // bfcvt z22.h, { z22.s-z23.s }\n"
- ".inst 0xc0800300 // mova za0h.s[x12], p0/M, z24.s\n"
- ".inst 0xc08002c4 // mova za1h.s[x12], p0/M, z22.s\n"
+ "ldr x20, [x22], #0x8\n"
+ ".inst 0xa01746b4 // ld1w { z20.s-z21.s }, pn9.s/Z, [x21, x23, LSL #2]\n"
+ ".inst 0xa017428c // ld1w { z12.s-z13.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xc160e294 // bfcvt z20.h, { z20.s-z21.s }\n"
+ ".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
+ ".inst 0xc0800280 // mova za0h.s[x12], p0/M, z20.s\n"
+ ".inst 0xc0800184 // mova za1h.s[x12], p0/M, z12.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
+ "cmp x12, x28\n"
"blt 1b\n"
- "incw x22, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
- "cbz x28, 5f\n"
+ "incw x23, ALL, MUL #2\n"
+ "incw x9, ALL, MUL #2\n"
+ "cbz x10, 5f\n"
"2:" // Width loop
"mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
+ "mov x25, %x[in]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b44532 // whilelt pn10.s, x9, x20, VLx2\n"
+ "add x22, x25, x28, LSL #3\n"
"3:" // Width loop: Odd: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa0164696 // ld1w { z22.s-z23.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016428a // ld1w { z10.s-z11.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xc0828007 // mova z7.s, p0/M, za0v.s[x12]\n"
+ "ldr x20, [x22], #0x8\n"
+ ".inst 0xc082808f // mova z15.s, p0/M, za1v.s[x12]\n"
+ ".inst 0xa01746b6 // ld1w { z22.s-z23.s }, pn9.s/Z, [x21, x23, LSL #2]\n"
+ ".inst 0xa017429a // ld1w { z26.s-z27.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xa1605707 // st1w { z7.s, z15.s }, pn13.b, [x24]\n"
+ "addvl x24, x24, #2\n"
".inst 0xc160e2d6 // bfcvt z22.h, { z22.s-z23.s }\n"
- ".inst 0xc160e14a // bfcvt z10.h, { z10.s-z11.s }\n"
+ ".inst 0xc160e35a // bfcvt z26.h, { z26.s-z27.s }\n"
".inst 0xc08002c8 // mova za2h.s[x12], p0/M, z22.s\n"
- ".inst 0xc080014c // mova za3h.s[x12], p0/M, z10.s\n"
- ".inst 0xc0828008 // mova z8.s, p0/M, za0v.s[x12]\n"
- ".inst 0xc0828089 // mova z9.s, p0/M, za1v.s[x12]\n"
+ ".inst 0xc080034c // mova za3h.s[x12], p0/M, z26.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- ".inst 0xa06056e8 // st1w { z8.s-z9.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x28\n"
"blt 3b\n"
- "incw x26, ALL, MUL #2\n"
+ "incw x9, ALL, MUL #2\n"
"mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
- "incw x22, ALL, MUL #2\n"
+ "mov x25, %x[in]\n"
+ "incw x23, ALL, MUL #2\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b44532 // whilelt pn10.s, x9, x20, VLx2\n"
+ "add x22, x25, x28, LSL #3\n"
"4:" // Width loop: Even: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa016469a // ld1w { z26.s-z27.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016429e // ld1w { z30.s-z31.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
- ".inst 0xc160e35a // bfcvt z26.h, { z26.s-z27.s }\n"
- ".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
- ".inst 0xc0800340 // mova za0h.s[x12], p0/M, z26.s\n"
- ".inst 0xc08003c4 // mova za1h.s[x12], p0/M, z30.s\n"
- ".inst 0xc0828106 // mova z6.s, p0/M, za2v.s[x12]\n"
- ".inst 0xc082818e // mova z14.s, p0/M, za3v.s[x12]\n"
+ ".inst 0xc0828108 // mova z8.s, p0/M, za2v.s[x12]\n"
+ "ldr x20, [x22], #0x8\n"
+ ".inst 0xc0828189 // mova z9.s, p0/M, za3v.s[x12]\n"
+ ".inst 0xa01746ae // ld1w { z14.s-z15.s }, pn9.s/Z, [x21, x23, LSL #2]\n"
+ ".inst 0xa017428c // ld1w { z12.s-z13.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xa0605708 // st1w { z8.s-z9.s }, pn13.b, [x24]\n"
+ "addvl x24, x24, #2\n"
+ ".inst 0xc160e1ce // bfcvt z14.h, { z14.s-z15.s }\n"
+ ".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
+ ".inst 0xc08001c0 // mova za0h.s[x12], p0/M, z14.s\n"
+ ".inst 0xc0800184 // mova za1h.s[x12], p0/M, z12.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- ".inst 0xa16056e6 // st1w { z6.s, z14.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x28\n"
"blt 4b\n"
- "subs x28, x28, #0x1\n"
- "incw x22, ALL, MUL #2\n"
- "incw x26, ALL, MUL #2\n"
+ "subs x10, x10, #0x1\n"
+ "incw x23, ALL, MUL #2\n"
+ "incw x9, ALL, MUL #2\n"
"bgt 2b\n"
"5:" // Width loop: Tails
- "cbnz x25, 8f\n"
+ "cbnz x26, 8f\n"
"mov x20, %x[width]\n"
- "mov x24, %x[in]\n"
+ "mov x25, %x[in]\n"
"mov x12, #0x0\n"
- ".inst 0x25b44752 // whilelt pn10.s, x26, x20, VLx2\n"
- "add x21, x24, x9, LSL #3\n"
+ ".inst 0x25b44532 // whilelt pn10.s, x9, x20, VLx2\n"
+ "add x22, x25, x28, LSL #3\n"
"6:" // Width loop: Tails: Even: Odd: Loop
- "ldr x20, [x24], #0x8\n"
+ "ldr x21, [x25], #0x8\n"
".inst 0x25306989 // psel p9.s, p10.s/Z, p12.s[w12]\n"
".inst 0x25306968 // psel p8.s, p10.s/Z, p11.s[w12]\n"
- ".inst 0xa016468c // ld1w { z12.s-z13.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016428e // ld1w { z14.s-z15.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xc0828003 // mova z3.s, p0/M, za0v.s[x12]\n"
+ "ldr x20, [x22], #0x8\n"
+ ".inst 0xc082808b // mova z11.s, p0/M, za1v.s[x12]\n"
+ ".inst 0xa01746ac // ld1w { z12.s-z13.s }, pn9.s/Z, [x21, x23, LSL #2]\n"
+ ".inst 0xa017428e // ld1w { z14.s-z15.s }, pn8.s/Z, [x20, x23, LSL #2]\n"
+ ".inst 0xa1605703 // st1w { z3.s, z11.s }, pn13.b, [x24]\n"
+ "addvl x24, x24, #2\n"
".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
".inst 0xc160e1ce // bfcvt z14.h, { z14.s-z15.s }\n"
".inst 0xc0800188 // mova za2h.s[x12], p0/M, z12.s\n"
".inst 0xc08001cc // mova za3h.s[x12], p0/M, z14.s\n"
- ".inst 0xc0828007 // mova z7.s, p0/M, za0v.s[x12]\n"
- ".inst 0xc082808f // mova z15.s, p0/M, za1v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
- ".inst 0xa16056e7 // st1w { z7.s, z15.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ "cmp x12, x28\n"
"blt 6b\n"
"mov x12, #0x0\n"
"7:" // Width loop: Tails: Even: Even: Loop
@@ -159,8 +159,8 @@ void interleave_block<2, 2, VLType::SME, false>(
".inst 0xc082818f // mova z15.s, p0/M, za3v.s[x12]\n"
"add x12, x12, #0x1\n"
"cmp x12, x27\n"
- ".inst 0xa06056ee // st1w { z14.s-z15.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ ".inst 0xa060570e // st1w { z14.s-z15.s }, pn13.b, [x24]\n"
+ "addvl x24, x24, #2\n"
"blt 7b\n"
"b 10f\n"
"8:" // Width loop: Tails: Odd
@@ -170,15 +170,15 @@ void interleave_block<2, 2, VLType::SME, false>(
".inst 0xc0828095 // mova z21.s, p0/M, za1v.s[x12]\n"
"add x12, x12, #0x1\n"
"cmp x12, x27\n"
- ".inst 0xa06056f4 // st1w { z20.s-z21.s }, pn13.b, [x23]\n"
- "addvl x23, x23, #2\n"
+ ".inst 0xa0605714 // st1w { z20.s-z21.s }, pn13.b, [x24]\n"
+ "addvl x24, x24, #2\n"
"blt 9b\n"
"10:" // End
- "mov %x[outptr_raw], x23\n"
+ "mov %x[outptr_raw], x24\n"
".inst 0xd503467f // SMSTOP\n"
: [outptr_raw] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
index 03575d7ff2..a8187f78e8 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme2_interleave4VL_block2_fp32_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,63 +32,63 @@ void interleave_block<4, 2, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
+ "sub x14, %x[width], #0x1\n"
+ "mov x13, %x[in]\n"
"cntw x23, ALL, MUL #2\n"
- "cntw x10\n"
+ "cntw x11\n"
"cntw x22, ALL, MUL #2\n"
"cntw x20, ALL, MUL #3\n"
"sub x21, x23, #0x1\n"
".inst 0x25207817 // ptrue pn15.b\n"
- "whilelt p1.s, XZR, %x[height]\n"
- "whilelt p14.s, x10, %x[height]\n"
- "whilelt p13.s, x22, %x[height]\n"
- "whilelt p12.s, x20, %x[height]\n"
- "sub x9, %x[width], #0x1\n"
+ "whilelt p2.s, XZR, %x[height]\n"
+ "whilelt p1.s, x11, %x[height]\n"
+ "whilelt p14.s, x22, %x[height]\n"
+ "whilelt p13.s, x20, %x[height]\n"
"cntw x20, ALL, MUL #2\n"
- "ands x28, %x[width], x21\n"
- "mov x27, %x[in]\n"
- "add x9, x9, x20\n"
- "csel x28, x28, x23, NE\n"
- "add x26, x27, x10, LSL #3\n"
- "mov x25, #0x0\n"
- "udiv x9, x9, x20\n"
- "add x28, x28, #0x1\n"
+ "ands x10, %x[width], x21\n"
+ "add x14, x14, x20\n"
+ "csel x10, x10, x23, NE\n"
+ "add x9, x13, x11, LSL #3\n"
+ "mov x28, #0x0\n"
+ "udiv x14, x14, x20\n"
+ "add x10, x10, #0x1\n"
"mov x20, %x[width]\n"
- "add x24, x26, x10, LSL #3\n"
+ "add x27, x9, x11, LSL #3\n"
"ptrue p0.b\n"
- "mov x23, %x[outptr_raw]\n"
- "mov x22, %x[row_offset]\n"
- "sub x9, x9, #0x1\n"
- "lsr x28, x28, #0x1\n"
+ "mov x26, %x[outptr_raw]\n"
+ "mov x25, %x[row_offset]\n"
+ "sub x14, x14, #0x1\n"
+ "lsr x10, x10, #0x1\n"
"mov x12, #0x0\n"
- ".inst 0x25b44733 // whilelt pn11.s, x25, x20, VLx2\n"
- "add x21, x24, x10, LSL #3\n"
+ ".inst 0x25b44794 // whilelt pn12.s, x28, x20, VLx2\n"
+ "add x24, x27, x11, LSL #3\n"
"1:" // Width loop: Preamble: Loop
- "ldr x20, [x27], #0x8\n"
- ".inst 0x25306c28 // psel p8.s, p11.s/Z, p1.s[w12]\n"
- ".inst 0x25306dca // psel p10.s, p11.s/Z, p14.s[w12]\n"
- ".inst 0xa0164298 // ld1w { z24.s-z25.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0x25306da9 // psel p9.s, p11.s/Z, p13.s[w12]\n"
- ".inst 0x25306d88 // psel p8.s, p11.s/Z, p12.s[w12]\n"
- ".inst 0xa0164a82 // ld1w { z2.s-z3.s }, pn10.s/Z, [x20, x22, LSL #2]\n"
+ "ldr x23, [x13], #0x8\n"
+ ".inst 0x2530704b // psel p11.s, p12.s/Z, p2.s[w12]\n"
+ ".inst 0x2530702a // psel p10.s, p12.s/Z, p1.s[w12]\n"
+ "ldr x22, [x9], #0x8\n"
+ ".inst 0x253071c9 // psel p9.s, p12.s/Z, p14.s[w12]\n"
+ ".inst 0x253071a8 // psel p8.s, p12.s/Z, p13.s[w12]\n"
+ "ldr x21, [x27], #0x8\n"
"ldr x20, [x24], #0x8\n"
- ".inst 0xa016468a // ld1w { z10.s-z11.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
- ".inst 0xc160e318 // bfcvt z24.h, { z24.s-z25.s }\n"
- ".inst 0xc160e042 // bfcvt z2.h, { z2.s-z3.s }\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016428c // ld1w { z12.s-z13.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa0194eea // ld1w { z10.s-z11.s }, pn11.s/Z, [x23, x25, LSL #2]\n"
+ ".inst 0xa0194ada // ld1w { z26.s-z27.s }, pn10.s/Z, [x22, x25, LSL #2]\n"
+ ".inst 0xa01946be // ld1w { z30.s-z31.s }, pn9.s/Z, [x21, x25, LSL #2]\n"
+ ".inst 0xa019428c // ld1w { z12.s-z13.s }, pn8.s/Z, [x20, x25, LSL #2]\n"
".inst 0xc160e14a // bfcvt z10.h, { z10.s-z11.s }\n"
+ ".inst 0xc160e35a // bfcvt z26.h, { z26.s-z27.s }\n"
+ ".inst 0xc0800140 // mova za0h.s[x12], p0/M, z10.s\n"
+ ".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
+ ".inst 0xc0800344 // mova za1h.s[x12], p0/M, z26.s\n"
".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
- ".inst 0xc0800300 // mova za0h.s[x12], p0/M, z24.s\n"
- ".inst 0xc0800044 // mova za1h.s[x12], p0/M, z2.s\n"
- ".inst 0xc0800148 // mova za2h.s[x12], p0/M, z10.s\n"
+ ".inst 0xc08003c8 // mova za2h.s[x12], p0/M, z30.s\n"
".inst 0xc080018c // mova za3h.s[x12], p0/M, z12.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
+ "cmp x12, x11\n"
"blt 1b\n"
- "incw x22, ALL, MUL #2\n"
"incw x25, ALL, MUL #2\n"
- "cbz x9, 5f\n"
+ "incw x28, ALL, MUL #2\n"
+ "cbz x14, 5f\n"
"2:" // Width loop
"mov x12, #0x0\n"
"3:" // Width loop: Store: Loop
@@ -97,44 +97,44 @@ void interleave_block<4, 2, VLType::SME, false>(
".inst 0xc0828119 // mova z25.s, p0/M, za2v.s[x12]\n"
".inst 0xc082819d // mova z29.s, p0/M, za3v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- ".inst 0xa160def1 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x23]\n"
- "addvl x23, x23, #4\n"
+ "cmp x12, x11\n"
+ ".inst 0xa160df51 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x26]\n"
+ "addvl x26, x26, #4\n"
"blt 3b\n"
- "mov x27, %x[in]\n"
- "add x26, x27, x10, LSL #3\n"
+ "mov x13, %x[in]\n"
"mov x20, %x[width]\n"
- "add x24, x26, x10, LSL #3\n"
+ "add x9, x13, x11, LSL #3\n"
"mov x12, #0x0\n"
- ".inst 0x25b44733 // whilelt pn11.s, x25, x20, VLx2\n"
- "add x21, x24, x10, LSL #3\n"
+ "add x27, x9, x11, LSL #3\n"
+ ".inst 0x25b44794 // whilelt pn12.s, x28, x20, VLx2\n"
+ "add x24, x27, x11, LSL #3\n"
"4:" // Width loop: Load: Loop
- "ldr x20, [x27], #0x8\n"
- ".inst 0x25306c28 // psel p8.s, p11.s/Z, p1.s[w12]\n"
- ".inst 0x25306dca // psel p10.s, p11.s/Z, p14.s[w12]\n"
- ".inst 0xa016428c // ld1w { z12.s-z13.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0x25306da9 // psel p9.s, p11.s/Z, p13.s[w12]\n"
- ".inst 0x25306d88 // psel p8.s, p11.s/Z, p12.s[w12]\n"
- ".inst 0xa0164a8e // ld1w { z14.s-z15.s }, pn10.s/Z, [x20, x22, LSL #2]\n"
+ "ldr x23, [x13], #0x8\n"
+ ".inst 0x2530704b // psel p11.s, p12.s/Z, p2.s[w12]\n"
+ ".inst 0x2530702a // psel p10.s, p12.s/Z, p1.s[w12]\n"
+ "ldr x22, [x9], #0x8\n"
+ ".inst 0x253071c9 // psel p9.s, p12.s/Z, p14.s[w12]\n"
+ ".inst 0x253071a8 // psel p8.s, p12.s/Z, p13.s[w12]\n"
+ "ldr x21, [x27], #0x8\n"
"ldr x20, [x24], #0x8\n"
- ".inst 0xa0164692 // ld1w { z18.s-z19.s }, pn9.s/Z, [x20, x22, LSL #2]\n"
+ ".inst 0xa0194eec // ld1w { z12.s-z13.s }, pn11.s/Z, [x23, x25, LSL #2]\n"
+ ".inst 0xa0194ace // ld1w { z14.s-z15.s }, pn10.s/Z, [x22, x25, LSL #2]\n"
+ ".inst 0xa01946b2 // ld1w { z18.s-z19.s }, pn9.s/Z, [x21, x25, LSL #2]\n"
+ ".inst 0xa019429e // ld1w { z30.s-z31.s }, pn8.s/Z, [x20, x25, LSL #2]\n"
".inst 0xc160e18c // bfcvt z12.h, { z12.s-z13.s }\n"
".inst 0xc160e1ce // bfcvt z14.h, { z14.s-z15.s }\n"
- "ldr x20, [x21], #0x8\n"
- ".inst 0xa016429e // ld1w { z30.s-z31.s }, pn8.s/Z, [x20, x22, LSL #2]\n"
- ".inst 0xc160e252 // bfcvt z18.h, { z18.s-z19.s }\n"
- ".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
".inst 0xc0800180 // mova za0h.s[x12], p0/M, z12.s\n"
+ ".inst 0xc160e252 // bfcvt z18.h, { z18.s-z19.s }\n"
".inst 0xc08001c4 // mova za1h.s[x12], p0/M, z14.s\n"
+ ".inst 0xc160e3de // bfcvt z30.h, { z30.s-z31.s }\n"
".inst 0xc0800248 // mova za2h.s[x12], p0/M, z18.s\n"
".inst 0xc08003cc // mova za3h.s[x12], p0/M, z30.s\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
+ "cmp x12, x11\n"
"blt 4b\n"
- "subs x9, x9, #0x1\n"
- "incw x22, ALL, MUL #2\n"
+ "subs x14, x14, #0x1\n"
"incw x25, ALL, MUL #2\n"
+ "incw x28, ALL, MUL #2\n"
"bgt 2b\n"
"5:" // Width loop: Tails
"mov x12, #0x0\n"
@@ -144,16 +144,16 @@ void interleave_block<4, 2, VLType::SME, false>(
".inst 0xc0828119 // mova z25.s, p0/M, za2v.s[x12]\n"
".inst 0xc082819d // mova z29.s, p0/M, za3v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x28\n"
- ".inst 0xa160def1 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x23]\n"
- "addvl x23, x23, #4\n"
+ "cmp x12, x10\n"
+ ".inst 0xa160df51 // st1w { z17.s, z21.s, z25.s, z29.s }, pn15.b, [x26]\n"
+ "addvl x26, x26, #4\n"
"blt 6b\n"
"7:" // End
- "mov %x[outptr_raw], x23\n"
+ "mov %x[outptr_raw], x26\n"
".inst 0xd503467f // SMSTOP\n"
: [outptr_raw] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
index 453778ae3f..bc9f68ed72 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,25 +33,25 @@ void interleave_block<1, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"inch x21\n"
"cnth x11\n"
"sub x21, x21, #0x1\n"
- "udiv x21, x21, x11\n" // n_passes = ceildiv(width, VL<T>)
- "mov x20, %x[width]\n"
"sub x10, x11, #0x1\n"
- "sub x9, x21, #0x1\n"
+ "udiv x21, x21, x11\n" // n_passes = ceildiv(width, VL<T>)
"ands x10, x20, x10\n"
+ "sub x9, x21, #0x1\n"
"sub x28, x11, #0x2\n"
"lsl x20, %x[height], #0x1\n" // height * 2
"mov x27, #0x0\n"
"mov x26, %x[in]\n"
"lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "and x25, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
"csel x10, x10, x11, NE\n"
- "ldr x23, [x26, #0x8]\n"
+ "ldr x24, [x26, #0x0]\n"
"ptrue p11.h\n"
"whilelt p10.h, XZR, x20\n"
+ "ldr x23, [x26, #0x8]\n"
"mov x22, %x[row_offset]\n"
"mov x21, %x[out]\n"
"whilelt p9.h, x27, %x[width]\n"
@@ -60,119 +60,119 @@ void interleave_block<1, 1, VLType::SME, false>(
"mov x12, #0x0\n"
"cbz x28, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "cmp x12, x28\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
"mov x26, %x[in]\n"
+ "inch x27\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
- "inch x22\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- "inch x27\n"
+ "inch x22\n"
"cbz x9, 8f\n"
"mov x20, x9\n"
"3:" // K loop: Main loop
"whilelt p8.h, x27, %x[width]\n"
- "mov x12, #0x0\n"
+ "mov x13, #0x0\n"
"cbz x28, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25396142 // psel p2.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0x25296d21 // psel p1.h, p11.h/Z, p9.h[w13]\n"
+ ".inst 0x25396d20 // psel p0.h, p11.h/Z, p9.h[w13, #1]\n"
+ ".inst 0xe0562f08 // ld1h { za1h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0562ae9 // ld1h { za1h.h[x13, #1] }, p2/Z, [x23, x22, LSL #1]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
+ ".inst 0xe07fa6a0 // st1h { za0v.h[x13] }, p1/Z, [x21, XZR, LSL #1]\n"
"add x26, x26, #0x10\n"
+ ".inst 0xe06ba2a1 // st1h { za0v.h[x13, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ "add x13, x13, #0x2\n"
"addvl x21, x21, #2\n"
+ "cmp x13, x28\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25396142 // psel p2.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0x25296d21 // psel p1.h, p11.h/Z, p9.h[w13]\n"
+ ".inst 0x25396d20 // psel p0.h, p11.h/Z, p9.h[w13, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
"whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0562f08 // ld1h { za1h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
"inch x27\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0562ae9 // ld1h { za1h.h[x13, #1] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe07fa6a0 // st1h { za0v.h[x13] }, p1/Z, [x21, XZR, LSL #1]\n"
"add x26, x26, #0x10\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ ".inst 0xe06ba2a1 // st1h { za0v.h[x13, #1] }, p0/Z, [x21, x11, LSL #1]\n"
"addvl x21, x21, #2\n"
"inch x22\n"
"whilelt p8.h, x27, %x[width]\n"
- "mov x12, #0x0\n"
"cbz x28, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0x25286143 // psel p3.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25386142 // psel p2.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0560f00 // ld1h { za0h.h[x12] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0560ae1 // ld1h { za0h.h[x12, #1] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe07f86a8 // st1h { za1v.h[x12] }, p1/Z, [x21, XZR, LSL #1]\n"
+ "add x26, x26, #0x10\n"
".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
"addvl x21, x21, #2\n"
+ "cmp x12, x28\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0x25286143 // psel p3.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25386142 // psel p2.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+ "mov x26, %x[in]\n"
"whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0560f00 // ld1h { za0h.h[x12] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "inch x27\n"
+ ".inst 0xe0560ae1 // ld1h { za0h.h[x12, #1] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe07f86a8 // st1h { za1v.h[x12] }, p1/Z, [x21, XZR, LSL #1]\n"
"add x26, x26, #0x10\n"
".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
"addvl x21, x21, #2\n"
- "inch x27\n"
"inch x22\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
+ "cbnz x25, 11f\n"
"mov x26, %x[in]\n"
"whilelt p8.h, x27, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- "ldr x20, [x26, #0x0]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0xe07f86a0 // st1h { za0v.h[x12] }, p1/Z, [x21, XZR, LSL #1]\n"
+ "addvl x21, x21, #1\n"
+ "ldr x20, [x26, #0x0]\n"
+ "add x26, x26, #0x8\n"
".inst 0xe0560288 // ld1h { za1h.h[x12] }, p0/Z, [x20, x22, LSL #1]\n"
"add x12, x12, #0x1\n"
"cmp x12, x11\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
"blt 9b\n"
"whilelt p9.h, x27, %x[width]\n"
"whilelt p8.h, x27, %x[width]\n"
@@ -181,8 +181,8 @@ void interleave_block<1, 1, VLType::SME, false>(
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
"blt 10b\n"
"whilelt p8.h, x27, %x[width]\n"
"b 13f\n"
@@ -192,15 +192,15 @@ void interleave_block<1, 1, VLType::SME, false>(
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x21\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
index 98bdcd2fa2..793bc80524 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,28 +32,28 @@ void interleave_block<1, 2, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x22\n"
+ "mov x22, %x[width]\n"
"mov x21, %x[width]\n"
- "inch x21\n"
- "mov x20, %x[width]\n"
- "sub x11, x22, #0x1\n"
- "sub x21, x21, #0x1\n"
- "ands x11, x20, x11\n"
+ "cnth x20\n"
+ "inch x22\n"
+ "sub x11, x20, #0x1\n"
+ "sub x22, x22, #0x1\n"
+ "ands x11, x21, x11\n"
"cntw x10\n"
- "udiv x21, x21, x22\n" // n_passes = ceildiv(width, VL<T>)
- "csel x11, x11, x22, NE\n"
- "sub x9, x21, #0x1\n"
+ "udiv x22, x22, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x11, x11, x20, NE\n"
+ "sub x9, x22, #0x1\n"
"add x11, x11, #0x1\n"
"sub x28, x10, #0x2\n"
"lsl x20, %x[height], #0x1\n" // height * 2
"mov x27, #0x0\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
"lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x24, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x23, [x26, #0x8]\n"
+ "and x25, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "ldr x24, [x26, #0x0]\n"
"lsr x11, x11, #0x1\n"
"ptrue p11.s\n"
+ "ldr x23, [x26, #0x8]\n"
"whilelt p10.h, XZR, x20\n"
"mov x22, %x[row_offset]\n"
"mov x21, %x[out]\n"
@@ -63,124 +63,124 @@ void interleave_block<1, 2, VLType::SME, false>(
"mov x12, #0x0\n"
"cbz x28, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe05602e2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x28, LSL #1\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "cmp x12, x28, LSL #1\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
"mov x26, %x[in]\n"
+ "inch x27\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe05602e2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
- "inch x22\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- "inch x27\n"
+ "inch x22\n"
"cbz x9, 8f\n"
"mov x20, x9\n"
"3:" // K loop: Main loop
"whilelt p8.h, x27, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ "mov x14, #0x0\n"
"cbz x28, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
- ".inst 0xe0562321 // ld1h { za0h.h[x13, #1] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25796141 // psel p1.h, p8.h/Z, p10.h[w13, #3]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e3 // ld1h { za0h.h[x13, #3] }, p1/Z, [x23, x22, LSL #1]\n"
+ ".inst 0x25386143 // psel p3.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25786142 // psel p2.h, p8.h/Z, p10.h[w12, #3]\n"
+ ".inst 0x252a6d21 // psel p1.h, p11.h/Z, p9.h[w14]\n"
+ ".inst 0x253a6d20 // psel p0.h, p11.h/Z, p9.h[w14, #1]\n"
+ ".inst 0xe0560f01 // ld1h { za0h.h[x12, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0560ae3 // ld1h { za0h.h[x12, #3] }, p2/Z, [x23, x22, LSL #1]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
"add x26, x26, #0x10\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0aac2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ "add x14, x14, #0x2\n"
"addvl x21, x21, #2\n"
- "add x13, x13, #0x4\n"
+ "cmp x14, x28\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
- ".inst 0xe0562321 // ld1h { za0h.h[x13, #1] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25386143 // psel p3.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25786142 // psel p2.h, p8.h/Z, p10.h[w12, #3]\n"
+ ".inst 0x252a6d21 // psel p1.h, p11.h/Z, p9.h[w14]\n"
+ ".inst 0x253a6d20 // psel p0.h, p11.h/Z, p9.h[w14, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25796141 // psel p1.h, p8.h/Z, p10.h[w13, #3]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e3 // ld1h { za0h.h[x13, #3] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0560f01 // ld1h { za0h.h[x12, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
"inch x27\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0560ae3 // ld1h { za0h.h[x12, #3] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "addvl x21, x21, #2\n"
"inch x22\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
"whilelt p8.h, x27, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ ".inst 0xe0aac2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ "addvl x21, x21, #2\n"
"cbz x28, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25296140 // psel p0.h, p8.h/Z, p10.h[w13]\n"
- ".inst 0xe0562320 // ld1h { za0h.h[x13] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25696141 // psel p1.h, p8.h/Z, p10.h[w13, #2]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e2 // ld1h { za0h.h[x13, #2] }, p1/Z, [x23, x22, LSL #1]\n"
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25696142 // psel p2.h, p8.h/Z, p10.h[w13, #2]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
+ ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0562f00 // ld1h { za0h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0562ae2 // ld1h { za0h.h[x13, #2] }, p2/Z, [x23, x22, LSL #1]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x26, x26, #0x10\n"
+ "add x13, x13, #0x4\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
"addvl x21, x21, #2\n"
- "add x13, x13, #0x4\n"
+ "cmp x12, x28\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25296140 // psel p0.h, p8.h/Z, p10.h[w13]\n"
- ".inst 0xe0562320 // ld1h { za0h.h[x13] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25696142 // psel p2.h, p8.h/Z, p10.h[w13, #2]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
+ ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25696141 // psel p1.h, p8.h/Z, p10.h[w13, #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe05626e2 // ld1h { za0h.h[x13, #2] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0562f00 // ld1h { za0h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "inch x27\n"
+ ".inst 0xe0562ae2 // ld1h { za0h.h[x13, #2] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "inch x22\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
"addvl x21, x21, #2\n"
- "inch x27\n"
- "inch x22\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
+ "cbnz x25, 11f\n"
"mov x26, %x[in]\n"
"whilelt p8.h, x27, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x20, [x26, #0x0]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x21, x21, #1\n"
+ "ldr x20, [x26, #0x0]\n"
"cmp x12, x10\n"
- ".inst 0xe0562281 // ld1h { za0h.h[x13, #1] }, p0/Z, [x20, x22, LSL #1]\n"
"add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
+ ".inst 0xe0562281 // ld1h { za0h.h[x13, #1] }, p0/Z, [x20, x22, LSL #1]\n"
"add x13, x13, #0x2\n"
"blt 9b\n"
"whilelt p9.h, x27, %x[width]\n"
@@ -189,11 +189,11 @@ void interleave_block<1, 2, VLType::SME, false>(
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x20, x20, #0x2\n"
".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x11\n"
"addvl x21, x21, #1\n"
- "add x20, x20, #0x2\n"
+ "cmp x12, x11\n"
"blt 10b\n"
"whilelt p8.h, x27, %x[width]\n"
"b 13f\n"
@@ -203,15 +203,15 @@ void interleave_block<1, 2, VLType::SME, false>(
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x11\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x11\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x21\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp
index 30c3e42aed..0e0e4e462c 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block2_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,19 +10,19 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
-#if defined(__ARM_FEATURE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME)
template <>
void interleave_block<1, 2, VLType::SME, false>(
@@ -63,11 +63,11 @@ void interleave_block<1, 2, VLType::SME, false>(
"mov x12, #0x0\n"
"cbz x28, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286143 // psel p3.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0x25686142 // psel p2.h, p8.h/Z, p10.h[w12, #2]\n"
- ".inst 0xe0560f00 // ld1h { za0h.h[x12] }, p3/Z, [x24, x22, LSL #1]\n"
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe0560ae2 // ld1h { za0h.h[x12, #2] }, p2/Z, [x23, x22, LSL #1]\n"
+ ".inst 0xe05602e2 // ld1h { za0h.h[x12, #2] }, p0/Z, [x23, x22, LSL #1]\n"
"add x12, x12, #0x4\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
@@ -88,20 +88,20 @@ void interleave_block<1, 2, VLType::SME, false>(
"mov x20, x9\n"
"3:" // K loop: Main loop
"whilelt p8.h, x27, %x[width]\n"
- "mov x15, #0x0\n"
+ "mov x12, #0x0\n"
"mov x14, #0x0\n"
"cbz x28, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x253b6143 // psel p3.h, p8.h/Z, p10.h[w15, #1]\n"
- ".inst 0x257b6142 // psel p2.h, p8.h/Z, p10.h[w15, #3]\n"
+ ".inst 0x25386143 // psel p3.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25786142 // psel p2.h, p8.h/Z, p10.h[w12, #3]\n"
".inst 0x252a6d21 // psel p1.h, p11.h/Z, p9.h[w14]\n"
".inst 0x253a6d20 // psel p0.h, p11.h/Z, p9.h[w14, #1]\n"
- ".inst 0xe0566f01 // ld1h { za0h.h[x15, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ ".inst 0xe0560f01 // ld1h { za0h.h[x12, #1] }, p3/Z, [x24, x22, LSL #1]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe0566ae3 // ld1h { za0h.h[x15, #3] }, p2/Z, [x23, x22, LSL #1]\n"
+ ".inst 0xe0560ae3 // ld1h { za0h.h[x12, #3] }, p2/Z, [x23, x22, LSL #1]\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- "add x15, x15, #0x4\n"
+ "add x12, x12, #0x4\n"
".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
".inst 0xe0aac2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x10, LSL #2]\n"
"add x14, x14, #0x2\n"
@@ -109,23 +109,23 @@ void interleave_block<1, 2, VLType::SME, false>(
"cmp x14, x28\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x253b6143 // psel p3.h, p8.h/Z, p10.h[w15, #1]\n"
- ".inst 0x257b6142 // psel p2.h, p8.h/Z, p10.h[w15, #3]\n"
+ ".inst 0x25386143 // psel p3.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25786142 // psel p2.h, p8.h/Z, p10.h[w12, #3]\n"
".inst 0x252a6d21 // psel p1.h, p11.h/Z, p9.h[w14]\n"
".inst 0x253a6d20 // psel p0.h, p11.h/Z, p9.h[w14, #1]\n"
"mov x26, %x[in]\n"
"whilelt p9.h, x27, %x[width]\n"
- ".inst 0xe0566f01 // ld1h { za0h.h[x15, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ ".inst 0xe0560f01 // ld1h { za0h.h[x12, #1] }, p3/Z, [x24, x22, LSL #1]\n"
"ldr x24, [x26, #0x0]\n"
"inch x27\n"
"mov x13, #0x0\n"
- "whilelt p8.h, x27, %x[width]\n"
- "mov x12, #0x0\n"
- ".inst 0xe0566ae3 // ld1h { za0h.h[x15, #3] }, p2/Z, [x23, x22, LSL #1]\n"
+ ".inst 0xe0560ae3 // ld1h { za0h.h[x12, #3] }, p2/Z, [x23, x22, LSL #1]\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
"inch x22\n"
".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
+ "whilelt p8.h, x27, %x[width]\n"
+ "mov x12, #0x0\n"
".inst 0xe0aac2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x10, LSL #2]\n"
"addvl x21, x21, #2\n"
"cbz x28, 7f\n"
@@ -172,15 +172,15 @@ void interleave_block<1, 2, VLType::SME, false>(
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0x25396143 // psel p3.h, p8.h/Z, p10.h[w13, #1]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x24, [x26, #0x0]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "add x26, x26, #0x8\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
- ".inst 0xe0562f01 // ld1h { za0h.h[x13, #1] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x20, [x26, #0x0]\n"
+ "cmp x12, x10\n"
+ "add x26, x26, #0x8\n"
+ ".inst 0xe0562281 // ld1h { za0h.h[x13, #1] }, p0/Z, [x20, x22, LSL #1]\n"
"add x13, x13, #0x2\n"
"blt 9b\n"
"whilelt p9.h, x27, %x[width]\n"
@@ -195,7 +195,7 @@ void interleave_block<1, 2, VLType::SME, false>(
"addvl x21, x21, #1\n"
"cmp x12, x11\n"
"blt 10b\n"
- "whilelt p9.h, x27, %x[width]\n"
+ "whilelt p8.h, x27, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
@@ -203,16 +203,16 @@ void interleave_block<1, 2, VLType::SME, false>(
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x11\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x11\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x21\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
-#endif // defined(__ARM_FEATURE_SVE)
+#endif // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
index 4390bb7c7f..558a5d7637 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,16 +32,16 @@ void interleave_block<1, 4, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
- "sub x10, x21, #0x1\n"
+ "sub x10, x20, #0x1\n"
"cntw x9\n"
"sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
+ "ands x10, x21, x10\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x10, x10, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
"lsl x21, x9, #0x1\n"
"sub x20, x23, #0x1\n"
@@ -52,12 +52,12 @@ void interleave_block<1, 4, VLType::SME, false>(
"mov x27, #0x0\n"
"mov x26, %x[in]\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "and x25, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
"lsr x10, x10, #0x2\n"
- "ldr x23, [x26, #0x8]\n"
+ "ldr x24, [x26, #0x0]\n"
"ptrue p11.s\n"
"zip1 p10.b, p9.b, p8.b\n"
+ "ldr x23, [x26, #0x8]\n"
"mov x22, %x[row_offset]\n"
"mov x21, %x[out]\n"
"whilelt p9.b, x27, %x[width]\n"
@@ -66,124 +66,124 @@ void interleave_block<1, 4, VLType::SME, false>(
"mov x12, #0x0\n"
"cbz x28, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0160700 // ld1b { za0h.b[x12] }, p1/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x28, LSL #2\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "cmp x12, x28, LSL #2\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
"mov x26, %x[in]\n"
+ "incb x27\n"
+ ".inst 0xe0160700 // ld1b { za0h.b[x12] }, p1/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
- "ldr x25, [x26, #0x0]\n"
- "incb x22\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- "incb x27\n"
+ "incb x22\n"
"cbz x20, 8f\n"
"mov x20, x20\n"
"3:" // K loop: Main loop
"whilelt p8.b, x27, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ "mov x14, #0x0\n"
"cbz x28, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ ".inst 0xe0160f02 // ld1b { za0h.b[x12, #2] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0160ae6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x23, x22]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
"add x26, x26, #0x10\n"
+ "add x12, x12, #0x8\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0a9c2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ "add x14, x14, #0x2\n"
"addvl x21, x21, #2\n"
- "add x13, x13, #0x8\n"
+ "cmp x14, x28\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"whilelt p9.b, x27, %x[width]\n"
+ ".inst 0xe0160f02 // ld1b { za0h.b[x12, #2] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
"incb x27\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0160ae6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x23, x22]\n"
+ "ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "addvl x21, x21, #2\n"
"incb x22\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
"whilelt p8.b, x27, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ ".inst 0xe0a9c2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ "addvl x21, x21, #2\n"
"cbz x28, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
- ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ ".inst 0xe0162f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0162ae4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x23, x22]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x26, x26, #0x10\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
"addvl x21, x21, #2\n"
- "add x13, x13, #0x8\n"
+ "cmp x12, x28\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"whilelt p9.b, x27, %x[width]\n"
+ ".inst 0xe0162f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "incb x27\n"
+ ".inst 0xe0162ae4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x23, x22]\n"
+ "ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "incb x22\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
"addvl x21, x21, #2\n"
- "incb x27\n"
- "incb x22\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
+ "cbnz x25, 11f\n"
"mov x26, %x[in]\n"
"whilelt p8.b, x27, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x20, [x26, #0x0]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x21, x21, #1\n"
+ "ldr x20, [x26, #0x0]\n"
"cmp x12, x9\n"
- ".inst 0xe0162282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x22]\n"
"add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
+ ".inst 0xe0162282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x22]\n"
"add x13, x13, #0x4\n"
"blt 9b\n"
"whilelt p9.b, x27, %x[width]\n"
@@ -192,11 +192,11 @@ void interleave_block<1, 4, VLType::SME, false>(
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x20, x20, #0x4\n"
".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x10\n"
"blt 10b\n"
"whilelt p8.b, x27, %x[width]\n"
"b 13f\n"
@@ -206,15 +206,15 @@ void interleave_block<1, 4, VLType::SME, false>(
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x21\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
index f5ee261964..ba8be81ade 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,18 +32,18 @@ void interleave_block<1, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
- "mov z18.b, #0x1\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
+ "mov z18.b, #0x1\n"
"mov z17.s, #0x0\n"
- "sub x10, x21, #0x1\n"
+ "sub x10, x20, #0x1\n"
"cntw x9\n"
"sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
+ "ands x10, x21, x10\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x10, x10, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
"lsl x21, x9, #0x1\n"
"sub x20, x23, #0x1\n"
@@ -51,7 +51,7 @@ void interleave_block<1, 4, VLType::SME, true>(
"whilelt p9.b, XZR, x22\n"
"whilelt p8.b, x21, x22\n"
"mov x28, #0x0\n"
- "ptrue p2.b\n"
+ "ptrue p4.b\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
"and x27, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
"lsr x10, x10, #0x2\n"
@@ -64,132 +64,132 @@ void interleave_block<1, 4, VLType::SME, true>(
"whilelt p8.b, x28, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x24, x24, #-1\n"
- "ld1w { z17.s }, p2/Z, [x24]\n"
+ "ld1w { z17.s }, p4/Z, [x24]\n"
"1:" // K loop: Load row sums: End
"mov x23, %x[in]\n"
- "ldr x22, [x23, #0x0]\n"
"mov x12, #0x0\n"
+ "ldr x22, [x23, #0x0]\n"
"ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
"cbz x26, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01906c0 // ld1b { za0h.b[x12] }, p1/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x26, LSL #2\n"
"ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
+ "cmp x12, x26, LSL #2\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
"mov x23, %x[in]\n"
- ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
+ "incb x28\n"
+ ".inst 0xe01906c0 // ld1b { za0h.b[x12] }, p1/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
- "incb x25\n"
+ ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
"ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
- "incb x28\n"
+ "incb x25\n"
"cbz x20, 9f\n"
"mov x20, x20\n"
"4:" // K loop: Main loop
"whilelt p8.b, x28, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ "mov x14, #0x0\n"
"cbz x26, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ ".inst 0xe0190ec2 // ld1b { za0h.b[x12, #2] }, p3/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xe0190aa6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x21, x25]\n"
"ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x26\n"
- "sdot z17.s, z16.b, z18.b\n"
"add x23, x23, #0x10\n"
+ "add x12, x12, #0x8\n"
+ ".inst 0xc082d010 // mova z16.s, p4/M, za0v.s[x14]\n"
+ ".inst 0xe0bfc700 // st1w { za0v.s[x14] }, p1/Z, [x24, XZR, LSL #2]\n"
+ "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xe0a9c301 // st1w { za0v.s[x14, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0xc082d030 // mova z16.s, p4/M, za0v.s[x14, #1]\n"
+ "add x14, x14, #0x2\n"
+ "cmp x14, x26\n"
"addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
+ "sdot z17.s, z16.b, z18.b\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8700 // st1w { za0v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
"whilelt p9.b, x28, %x[width]\n"
+ ".inst 0xe0190ec2 // ld1b { za0h.b[x12, #2] }, p3/Z, [x22, x25]\n"
+ "ldr x22, [x23, #0x0]\n"
"incb x28\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0190aa6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x21, x25]\n"
+ "ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
- "sdot z17.s, z16.b, z18.b\n"
- "addvl x24, x24, #2\n"
"incb x25\n"
+ ".inst 0xc082d010 // mova z16.s, p4/M, za0v.s[x14]\n"
+ ".inst 0xe0bfc700 // st1w { za0v.s[x14] }, p1/Z, [x24, XZR, LSL #2]\n"
"whilelt p8.b, x28, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xc082d030 // mova z16.s, p4/M, za0v.s[x14, #1]\n"
+ ".inst 0xe0a9c301 // st1w { za0v.s[x14, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "sdot z17.s, z16.b, z18.b\n"
"cbz x26, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ ".inst 0xe0192ec0 // ld1b { za0h.b[x13] }, p3/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xe0192aa4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x21, x25]\n"
"ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
+ "add x23, x23, #0x10\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
+ "sdot z17.s, z16.b, z18.b\n"
".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
"add x12, x12, #0x2\n"
"cmp x12, x26\n"
- "sdot z17.s, z16.b, z18.b\n"
- "add x23, x23, #0x10\n"
"addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
+ "sdot z17.s, z16.b, z18.b\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- "sdot z17.s, z16.b, z18.b\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
"mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
"whilelt p9.b, x28, %x[width]\n"
+ ".inst 0xe0192ec0 // ld1b { za0h.b[x13] }, p3/Z, [x22, x25]\n"
+ "ldr x22, [x23, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "incb x28\n"
+ ".inst 0xe0192aa4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x21, x25]\n"
+ "ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
- ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "incb x25\n"
+ ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
"sdot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
+ ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
"addvl x24, x24, #2\n"
- "incb x28\n"
- "incb x25\n"
+ "sdot z17.s, z16.b, z18.b\n"
"bgt 4b\n"
"9:" // K loop: Tails
"cbnz x27, 12f\n"
@@ -198,17 +198,17 @@ void interleave_block<1, 4, VLType::SME, true>(
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- "ldr x20, [x23, #0x0]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
"sdot z17.s, z16.b, z18.b\n"
- ".inst 0xe0192282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x25]\n"
+ ".inst 0xe0bf8700 // st1w { za0v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x24, x24, #1\n"
+ "ldr x20, [x23, #0x0]\n"
"cmp x12, x9\n"
"add x23, x23, #0x8\n"
- "addvl x24, x24, #1\n"
+ ".inst 0xe0192282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x25]\n"
"add x13, x13, #0x4\n"
"blt 10b\n"
"whilelt p9.b, x28, %x[width]\n"
@@ -217,13 +217,13 @@ void interleave_block<1, 4, VLType::SME, true>(
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ "add x20, x20, #0x4\n"
+ "sdot z17.s, z16.b, z18.b\n"
".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "sdot z17.s, z16.b, z18.b\n"
"addvl x24, x24, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x10\n"
"blt 11b\n"
"whilelt p8.b, x28, %x[width]\n"
"b 14f\n"
@@ -231,21 +231,21 @@ void interleave_block<1, 4, VLType::SME, true>(
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
+ "sdot z17.s, z16.b, z18.b\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "sdot z17.s, z16.b, z18.b\n"
"addvl x24, x24, #1\n"
+ "cmp x12, x10\n"
"blt 13b\n"
"14:" // K loop: End
- "st1w { z17.s }, p2, [x24]\n"
+ "st1w { z17.s }, p4, [x24]\n"
"addvl x24, x24, #1\n"
- "mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
+ "mov %x[out], x24\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
index 76c1d053cd..6d1c1a207f 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,16 +32,16 @@ void interleave_block<1, 4, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
- "sub x10, x21, #0x1\n"
+ "sub x10, x20, #0x1\n"
"cntw x9\n"
"sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
+ "ands x10, x21, x10\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x10, x10, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
"lsl x21, x9, #0x1\n"
"sub x20, x23, #0x1\n"
@@ -52,12 +52,12 @@ void interleave_block<1, 4, VLType::SME, false>(
"mov x27, #0x0\n"
"mov x26, %x[in]\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "and x25, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
"lsr x10, x10, #0x2\n"
- "ldr x23, [x26, #0x8]\n"
+ "ldr x24, [x26, #0x0]\n"
"ptrue p11.s\n"
"zip1 p10.b, p9.b, p8.b\n"
+ "ldr x23, [x26, #0x8]\n"
"mov x22, %x[row_offset]\n"
"mov x21, %x[out]\n"
"whilelt p9.b, x27, %x[width]\n"
@@ -66,124 +66,124 @@ void interleave_block<1, 4, VLType::SME, false>(
"mov x12, #0x0\n"
"cbz x28, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0160700 // ld1b { za0h.b[x12] }, p1/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x28, LSL #2\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "cmp x12, x28, LSL #2\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
"mov x26, %x[in]\n"
+ "incb x27\n"
+ ".inst 0xe0160700 // ld1b { za0h.b[x12] }, p1/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe01602e4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x23, x22]\n"
- "ldr x25, [x26, #0x0]\n"
- "incb x22\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- "incb x27\n"
+ "incb x22\n"
"cbz x20, 8f\n"
"mov x20, x20\n"
"3:" // K loop: Main loop
"whilelt p8.b, x27, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ "mov x14, #0x0\n"
"cbz x28, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ ".inst 0xe0160f02 // ld1b { za0h.b[x12, #2] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0160ae6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x23, x22]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
"add x26, x26, #0x10\n"
+ "add x12, x12, #0x8\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0xe0a9c2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ "add x14, x14, #0x2\n"
"addvl x21, x21, #2\n"
- "add x13, x13, #0x8\n"
+ "cmp x14, x28\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162322 // ld1b { za0h.b[x13, #2] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"whilelt p9.b, x27, %x[width]\n"
+ ".inst 0xe0160f02 // ld1b { za0h.b[x12, #2] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
"incb x27\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0160ae6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x23, x22]\n"
+ "ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- ".inst 0xe0a982a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
- "addvl x21, x21, #2\n"
"incb x22\n"
+ ".inst 0xe0bfc6a0 // st1w { za0v.s[x14] }, p1/Z, [x21, XZR, LSL #2]\n"
"whilelt p8.b, x27, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ ".inst 0xe0a9c2a1 // st1w { za0v.s[x14, #1] }, p0/Z, [x21, x9, LSL #2]\n"
+ "addvl x21, x21, #2\n"
"cbz x28, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
- ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ ".inst 0xe0162f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0162ae4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x23, x22]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x26, x26, #0x10\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
"addvl x21, x21, #2\n"
- "add x13, x13, #0x8\n"
+ "cmp x12, x28\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162320 // ld1b { za0h.b[x13] }, p0/Z, [x25, x22]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x23, x22]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"whilelt p9.b, x27, %x[width]\n"
+ ".inst 0xe0162f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x22]\n"
+ "ldr x24, [x26, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "incb x27\n"
+ ".inst 0xe0162ae4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x23, x22]\n"
+ "ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "incb x22\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
".inst 0xe0a982a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x9, LSL #2]\n"
"addvl x21, x21, #2\n"
- "incb x27\n"
- "incb x22\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
+ "cbnz x25, 11f\n"
"mov x26, %x[in]\n"
"whilelt p8.b, x27, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x20, [x26, #0x0]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x21, x21, #1\n"
+ "ldr x20, [x26, #0x0]\n"
"cmp x12, x9\n"
- ".inst 0xe0162282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x22]\n"
"add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
+ ".inst 0xe0162282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x22]\n"
"add x13, x13, #0x4\n"
"blt 9b\n"
"whilelt p9.b, x27, %x[width]\n"
@@ -192,11 +192,11 @@ void interleave_block<1, 4, VLType::SME, false>(
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ "add x20, x20, #0x4\n"
".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x10\n"
"blt 10b\n"
"whilelt p8.b, x27, %x[width]\n"
"b 13f\n"
@@ -206,15 +206,15 @@ void interleave_block<1, 4, VLType::SME, false>(
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x21\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
index daf2d3a100..dbcd18678b 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_block4_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,18 +32,18 @@ void interleave_block<1, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
- "mov z18.b, #0x1\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
+ "mov z18.b, #0x1\n"
"mov z17.s, #0x0\n"
- "sub x10, x21, #0x1\n"
+ "sub x10, x20, #0x1\n"
"cntw x9\n"
"sub x23, x23, #0x1\n"
- "ands x10, x20, x10\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x10, x10, x21, NE\n"
+ "ands x10, x21, x10\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x10, x10, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
"lsl x21, x9, #0x1\n"
"sub x20, x23, #0x1\n"
@@ -51,7 +51,7 @@ void interleave_block<1, 4, VLType::SME, true>(
"whilelt p9.b, XZR, x22\n"
"whilelt p8.b, x21, x22\n"
"mov x28, #0x0\n"
- "ptrue p2.b\n"
+ "ptrue p4.b\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
"and x27, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
"lsr x10, x10, #0x2\n"
@@ -64,132 +64,132 @@ void interleave_block<1, 4, VLType::SME, true>(
"whilelt p8.b, x28, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x24, x24, #-1\n"
- "ld1w { z17.s }, p2/Z, [x24]\n"
+ "ld1w { z17.s }, p4/Z, [x24]\n"
"1:" // K loop: Load row sums: End
"mov x23, %x[in]\n"
- "ldr x22, [x23, #0x0]\n"
"mov x12, #0x0\n"
+ "ldr x22, [x23, #0x0]\n"
"ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
"cbz x26, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
+ ".inst 0xe01906c0 // ld1b { za0h.b[x12] }, p1/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x26, LSL #2\n"
"ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
+ "cmp x12, x26, LSL #2\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01902c0 // ld1b { za0h.b[x12] }, p0/Z, [x22, x25]\n"
+ ".inst 0x25246141 // psel p1.b, p8.b/Z, p10.b[w12]\n"
".inst 0x25646140 // psel p0.b, p8.b/Z, p10.b[w12, #4]\n"
"mov x23, %x[in]\n"
- ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
+ "incb x28\n"
+ ".inst 0xe01906c0 // ld1b { za0h.b[x12] }, p1/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
- "incb x25\n"
+ ".inst 0xe01902a4 // ld1b { za0h.b[x12, #4] }, p0/Z, [x21, x25]\n"
"ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
- "incb x28\n"
+ "incb x25\n"
"cbz x20, 9f\n"
"mov x20, x20\n"
"4:" // K loop: Main loop
"whilelt p8.b, x28, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ "mov x14, #0x0\n"
"cbz x26, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ ".inst 0xe0190ec2 // ld1b { za0h.b[x12, #2] }, p3/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "udot z17.s, z16.b, z18.b\n"
+ ".inst 0xe0190aa6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x21, x25]\n"
"ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x26\n"
- "udot z17.s, z16.b, z18.b\n"
"add x23, x23, #0x10\n"
+ "add x12, x12, #0x8\n"
+ ".inst 0xc082d010 // mova z16.s, p4/M, za0v.s[x14]\n"
+ ".inst 0xe0bfc700 // st1w { za0v.s[x14] }, p1/Z, [x24, XZR, LSL #2]\n"
+ "udot z17.s, z16.b, z18.b\n"
+ ".inst 0xe0a9c301 // st1w { za0v.s[x14, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0xc082d030 // mova z16.s, p4/M, za0v.s[x14, #1]\n"
+ "add x14, x14, #0x2\n"
+ "cmp x14, x26\n"
"addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
+ "udot z17.s, z16.b, z18.b\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01922c2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x22, x25]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0xe01922a6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "udot z17.s, z16.b, z18.b\n"
+ ".inst 0x25346143 // psel p3.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25746142 // psel p2.b, p8.b/Z, p10.b[w12, #6]\n"
+ ".inst 0x25266d21 // psel p1.b, p11.b/Z, p9.b[w14]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828830 // mova z16.s, p2/M, za0v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8700 // st1w { za0v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
"whilelt p9.b, x28, %x[width]\n"
+ ".inst 0xe0190ec2 // ld1b { za0h.b[x12, #2] }, p3/Z, [x22, x25]\n"
+ "ldr x22, [x23, #0x0]\n"
"incb x28\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0190aa6 // ld1b { za0h.b[x12, #6] }, p2/Z, [x21, x25]\n"
+ "ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
- ".inst 0xe0a98301 // st1w { za0v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
- "udot z17.s, z16.b, z18.b\n"
- "addvl x24, x24, #2\n"
"incb x25\n"
+ ".inst 0xc082d010 // mova z16.s, p4/M, za0v.s[x14]\n"
+ ".inst 0xe0bfc700 // st1w { za0v.s[x14] }, p1/Z, [x24, XZR, LSL #2]\n"
"whilelt p8.b, x28, %x[width]\n"
- "mov x13, #0x0\n"
"mov x12, #0x0\n"
+ "udot z17.s, z16.b, z18.b\n"
+ ".inst 0xc082d030 // mova z16.s, p4/M, za0v.s[x14, #1]\n"
+ ".inst 0xe0a9c301 // st1w { za0v.s[x14, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "addvl x24, x24, #2\n"
+ "udot z17.s, z16.b, z18.b\n"
"cbz x26, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ ".inst 0xe0192ec0 // ld1b { za0h.b[x13] }, p3/Z, [x22, x25]\n"
"ldr x22, [x23, #0x0]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- "udot z17.s, z16.b, z18.b\n"
+ ".inst 0xe0192aa4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x21, x25]\n"
"ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
+ "add x23, x23, #0x10\n"
+ "add x13, x13, #0x8\n"
+ ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
+ "udot z17.s, z16.b, z18.b\n"
".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
"add x12, x12, #0x2\n"
"cmp x12, x26\n"
- "udot z17.s, z16.b, z18.b\n"
- "add x23, x23, #0x10\n"
"addvl x24, x24, #2\n"
- "add x13, x13, #0x8\n"
+ "udot z17.s, z16.b, z18.b\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01922c0 // ld1b { za0h.b[x13] }, p0/Z, [x22, x25]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0xe01922a4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x21, x25]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
- "udot z17.s, z16.b, z18.b\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x25656142 // psel p2.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25246d21 // psel p1.b, p11.b/Z, p9.b[w12]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
"mov x23, %x[in]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- "ldr x22, [x23, #0x0]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0828930 // mova z16.s, p2/M, za2v.s[x12, #1]\n"
- "ldr x21, [x23, #0x8]\n"
- ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
"whilelt p9.b, x28, %x[width]\n"
+ ".inst 0xe0192ec0 // ld1b { za0h.b[x13] }, p3/Z, [x22, x25]\n"
+ "ldr x22, [x23, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "incb x28\n"
+ ".inst 0xe0192aa4 // ld1b { za0h.b[x13, #4] }, p2/Z, [x21, x25]\n"
+ "ldr x21, [x23, #0x8]\n"
"add x23, x23, #0x10\n"
- ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
+ "incb x25\n"
+ ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ ".inst 0xe0bf8708 // st1w { za2v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
"udot z17.s, z16.b, z18.b\n"
+ ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
+ ".inst 0xe0a98309 // st1w { za2v.s[x12, #1] }, p0/Z, [x24, x9, LSL #2]\n"
"addvl x24, x24, #2\n"
- "incb x28\n"
- "incb x25\n"
+ "udot z17.s, z16.b, z18.b\n"
"bgt 4b\n"
"9:" // K loop: Tails
"cbnz x27, 12f\n"
@@ -198,17 +198,17 @@ void interleave_block<1, 4, VLType::SME, true>(
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- "ldr x20, [x23, #0x0]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
"udot z17.s, z16.b, z18.b\n"
- ".inst 0xe0192282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x25]\n"
+ ".inst 0xe0bf8700 // st1w { za0v.s[x12] }, p1/Z, [x24, XZR, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x24, x24, #1\n"
+ "ldr x20, [x23, #0x0]\n"
"cmp x12, x9\n"
"add x23, x23, #0x8\n"
- "addvl x24, x24, #1\n"
+ ".inst 0xe0192282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x25]\n"
"add x13, x13, #0x4\n"
"blt 10b\n"
"whilelt p9.b, x28, %x[width]\n"
@@ -217,13 +217,13 @@ void interleave_block<1, 4, VLType::SME, true>(
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ "add x20, x20, #0x4\n"
+ "udot z17.s, z16.b, z18.b\n"
".inst 0xe0bf8308 // st1w { za2v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "udot z17.s, z16.b, z18.b\n"
"addvl x24, x24, #1\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x10\n"
"blt 11b\n"
"whilelt p8.b, x28, %x[width]\n"
"b 14f\n"
@@ -231,21 +231,21 @@ void interleave_block<1, 4, VLType::SME, true>(
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
+ "udot z17.s, z16.b, z18.b\n"
".inst 0xe0bf8300 // st1w { za0v.s[x12] }, p0/Z, [x24, XZR, LSL #2]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
- "udot z17.s, z16.b, z18.b\n"
"addvl x24, x24, #1\n"
+ "cmp x12, x10\n"
"blt 13b\n"
"14:" // K loop: End
- "st1w { z17.s }, p2, [x24]\n"
+ "st1w { z17.s }, p4, [x24]\n"
"addvl x24, x24, #1\n"
- "mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
+ "mov %x[out], x24\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
index 274f69f370..591c08dcb2 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,25 +33,25 @@ void interleave_block<1, 1, VLType::SME, false>(
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
"mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
"inch x21\n"
"cnth x11\n"
"sub x21, x21, #0x1\n"
- "udiv x21, x21, x11\n" // n_passes = ceildiv(width, VL<T>)
- "mov x20, %x[width]\n"
"sub x10, x11, #0x1\n"
- "sub x9, x21, #0x1\n"
+ "udiv x21, x21, x11\n" // n_passes = ceildiv(width, VL<T>)
"ands x10, x20, x10\n"
+ "sub x9, x21, #0x1\n"
"sub x28, x11, #0x2\n"
"lsl x20, %x[height], #0x1\n" // height * 2
"mov x27, #0x0\n"
"mov x26, %x[in]\n"
"lsr x9, x9, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x26, #0x0]\n"
- "and x24, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "and x25, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
"csel x10, x10, x11, NE\n"
- "ldr x23, [x26, #0x8]\n"
+ "ldr x24, [x26, #0x0]\n"
"ptrue p11.h\n"
"whilelt p10.h, XZR, x20\n"
+ "ldr x23, [x26, #0x8]\n"
"mov x22, %x[row_offset]\n"
"mov x21, %x[out]\n"
"whilelt p9.h, x27, %x[width]\n"
@@ -60,119 +60,119 @@ void interleave_block<1, 1, VLType::SME, false>(
"mov x12, #0x0\n"
"cbz x28, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "cmp x12, x28\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25286141 // psel p1.h, p8.h/Z, p10.h[w12]\n"
".inst 0x25386140 // psel p0.h, p8.h/Z, p10.h[w12, #1]\n"
"mov x26, %x[in]\n"
+ "inch x27\n"
+ ".inst 0xe0560700 // ld1h { za0h.h[x12] }, p1/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe05602e1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x23, x22, LSL #1]\n"
- "ldr x25, [x26, #0x0]\n"
- "inch x22\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- "inch x27\n"
+ "inch x22\n"
"cbz x9, 8f\n"
"mov x20, x9\n"
"3:" // K loop: Main loop
"whilelt p8.h, x27, %x[width]\n"
- "mov x12, #0x0\n"
+ "mov x13, #0x0\n"
"cbz x28, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25396142 // psel p2.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0x25296d21 // psel p1.h, p11.h/Z, p9.h[w13]\n"
+ ".inst 0x25396d20 // psel p0.h, p11.h/Z, p9.h[w13, #1]\n"
+ ".inst 0xe0562f08 // ld1h { za1h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0562ae9 // ld1h { za1h.h[x13, #1] }, p2/Z, [x23, x22, LSL #1]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
+ ".inst 0xe07fa6a0 // st1h { za0v.h[x13] }, p1/Z, [x21, XZR, LSL #1]\n"
"add x26, x26, #0x10\n"
+ ".inst 0xe06ba2a1 // st1h { za0v.h[x13, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ "add x13, x13, #0x2\n"
"addvl x21, x21, #2\n"
+ "cmp x13, x28\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560328 // ld1h { za1h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
+ ".inst 0x25296143 // psel p3.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25396142 // psel p2.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0x25296d21 // psel p1.h, p11.h/Z, p9.h[w13]\n"
+ ".inst 0x25396d20 // psel p0.h, p11.h/Z, p9.h[w13, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e9 // ld1h { za1h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- ".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
"whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0562f08 // ld1h { za1h.h[x13] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
"inch x27\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0562ae9 // ld1h { za1h.h[x13, #1] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe07fa6a0 // st1h { za0v.h[x13] }, p1/Z, [x21, XZR, LSL #1]\n"
"add x26, x26, #0x10\n"
- ".inst 0xe06b82a1 // st1h { za0v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
+ ".inst 0xe06ba2a1 // st1h { za0v.h[x13, #1] }, p0/Z, [x21, x11, LSL #1]\n"
"addvl x21, x21, #2\n"
"inch x22\n"
"whilelt p8.h, x27, %x[width]\n"
- "mov x12, #0x0\n"
"cbz x28, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0x25286143 // psel p3.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25386142 // psel p2.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0560f00 // ld1h { za0h.h[x12] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0560ae1 // ld1h { za0h.h[x12, #1] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe07f86a8 // st1h { za1v.h[x12] }, p1/Z, [x21, XZR, LSL #1]\n"
+ "add x26, x26, #0x10\n"
".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
"addvl x21, x21, #2\n"
+ "cmp x12, x28\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
- ".inst 0xe0560320 // ld1h { za0h.h[x12] }, p0/Z, [x25, x22, LSL #1]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25386141 // psel p1.h, p8.h/Z, p10.h[w12, #1]\n"
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe05606e1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x23, x22, LSL #1]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
+ ".inst 0x25286143 // psel p3.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25386142 // psel p2.h, p8.h/Z, p10.h[w12, #1]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
".inst 0x25386d20 // psel p0.h, p11.h/Z, p9.h[w12, #1]\n"
+ "mov x26, %x[in]\n"
"whilelt p9.h, x27, %x[width]\n"
+ ".inst 0xe0560f00 // ld1h { za0h.h[x12] }, p3/Z, [x24, x22, LSL #1]\n"
+ "ldr x24, [x26, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "inch x27\n"
+ ".inst 0xe0560ae1 // ld1h { za0h.h[x12, #1] }, p2/Z, [x23, x22, LSL #1]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe07f86a8 // st1h { za1v.h[x12] }, p1/Z, [x21, XZR, LSL #1]\n"
"add x26, x26, #0x10\n"
".inst 0xe06b82a9 // st1h { za1v.h[x12, #1] }, p0/Z, [x21, x11, LSL #1]\n"
"addvl x21, x21, #2\n"
- "inch x27\n"
"inch x22\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
+ "cbnz x25, 11f\n"
"mov x26, %x[in]\n"
"whilelt p8.h, x27, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
- ".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
- "ldr x20, [x26, #0x0]\n"
+ ".inst 0x25286d21 // psel p1.h, p11.h/Z, p9.h[w12]\n"
".inst 0x25286140 // psel p0.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0xe07f86a0 // st1h { za0v.h[x12] }, p1/Z, [x21, XZR, LSL #1]\n"
+ "addvl x21, x21, #1\n"
+ "ldr x20, [x26, #0x0]\n"
+ "add x26, x26, #0x8\n"
".inst 0xe0560288 // ld1h { za1h.h[x12] }, p0/Z, [x20, x22, LSL #1]\n"
"add x12, x12, #0x1\n"
"cmp x12, x11\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
"blt 9b\n"
"whilelt p9.h, x27, %x[width]\n"
"whilelt p8.h, x27, %x[width]\n"
@@ -181,8 +181,8 @@ void interleave_block<1, 1, VLType::SME, false>(
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
".inst 0xe07f82a8 // st1h { za1v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
"blt 10b\n"
"whilelt p8.h, x27, %x[width]\n"
"b 13f\n"
@@ -192,15 +192,15 @@ void interleave_block<1, 1, VLType::SME, false>(
".inst 0x25286d20 // psel p0.h, p11.h/Z, p9.h[w12]\n"
".inst 0xe07f82a0 // st1h { za0v.h[x12] }, p0/Z, [x21, XZR, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x10\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x10\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x21\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
index ab290649fd..b76ec57d22 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave1VL_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,24 +32,24 @@ void interleave_block<1, 1, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x22, %x[width]\n"
- "incw x22\n"
- "cntw x10\n"
- "sub x22, x22, #0x1\n"
- "udiv x22, x22, x10\n" // n_passes = ceildiv(width, VL<T>)
"mov x21, %x[width]\n"
+ "mov x20, %x[width]\n"
+ "incw x21\n"
+ "cntw x10\n"
+ "sub x21, x21, #0x1\n"
"sub x9, x10, #0x1\n"
- "sub x20, x22, #0x1\n"
- "ands x9, x21, x9\n"
+ "udiv x21, x21, x10\n" // n_passes = ceildiv(width, VL<T>)
+ "ands x9, x20, x9\n"
+ "sub x20, x21, #0x1\n"
"sub x28, x10, #0x2\n"
"mov x27, #0x0\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x24, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x23, [x26, #0x8]\n"
+ "and x25, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "ldr x24, [x26, #0x0]\n"
"csel x9, x9, x10, NE\n"
"ptrue p11.s\n"
+ "ldr x23, [x26, #0x8]\n"
"whilelt p10.s, XZR, %x[height]\n"
"mov x22, %x[row_offset]\n"
"mov x21, %x[out]\n"
@@ -59,119 +59,119 @@ void interleave_block<1, 1, VLType::SME, false>(
"mov x12, #0x0\n"
"cbz x28, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
+ ".inst 0x25306141 // psel p1.s, p8.s/Z, p10.s[w12]\n"
".inst 0x25706140 // psel p0.s, p8.s/Z, p10.s[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
+ ".inst 0xe0960700 // ld1w { za0h.s[x12] }, p1/Z, [x24, x22, LSL #2]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe09602e1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x23, x22, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ "cmp x12, x28\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
+ ".inst 0x25306141 // psel p1.s, p8.s/Z, p10.s[w12]\n"
".inst 0x25706140 // psel p0.s, p8.s/Z, p10.s[w12, #1]\n"
"mov x26, %x[in]\n"
+ "incw x27\n"
+ ".inst 0xe0960700 // ld1w { za0h.s[x12] }, p1/Z, [x24, x22, LSL #2]\n"
+ "ldr x24, [x26, #0x0]\n"
".inst 0xe09602e1 // ld1w { za0h.s[x12, #1] }, p0/Z, [x23, x22, LSL #2]\n"
- "ldr x25, [x26, #0x0]\n"
- "incw x22\n"
"ldr x23, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
- "incw x27\n"
+ "incw x22\n"
"cbz x20, 8f\n"
"mov x20, x20\n"
"3:" // K loop: Main loop
"whilelt p8.s, x27, %x[width]\n"
- "mov x12, #0x0\n"
+ "mov x13, #0x0\n"
"cbz x28, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960328 // ld1w { za2h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e9 // ld1w { za2h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
+ ".inst 0x25316143 // psel p3.s, p8.s/Z, p10.s[w13]\n"
+ ".inst 0x25716142 // psel p2.s, p8.s/Z, p10.s[w13, #1]\n"
+ ".inst 0x25316d21 // psel p1.s, p11.s/Z, p9.s[w13]\n"
+ ".inst 0x25716d20 // psel p0.s, p11.s/Z, p9.s[w13, #1]\n"
+ ".inst 0xe0962f08 // ld1w { za2h.s[x13] }, p3/Z, [x24, x22, LSL #2]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0962ae9 // ld1w { za2h.s[x13, #1] }, p2/Z, [x23, x22, LSL #2]\n"
"ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28\n"
+ ".inst 0xe0bfa6a0 // st1w { za0v.s[x13] }, p1/Z, [x21, XZR, LSL #2]\n"
"add x26, x26, #0x10\n"
+ ".inst 0xe0aaa2a1 // st1w { za0v.s[x13, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ "add x13, x13, #0x2\n"
"addvl x21, x21, #2\n"
+ "cmp x13, x28\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960328 // ld1w { za2h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
+ ".inst 0x25316143 // psel p3.s, p8.s/Z, p10.s[w13]\n"
+ ".inst 0x25716142 // psel p2.s, p8.s/Z, p10.s[w13, #1]\n"
+ ".inst 0x25316d21 // psel p1.s, p11.s/Z, p9.s[w13]\n"
+ ".inst 0x25716d20 // psel p0.s, p11.s/Z, p9.s[w13, #1]\n"
"mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e9 // ld1w { za2h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"whilelt p9.s, x27, %x[width]\n"
+ ".inst 0xe0962f08 // ld1w { za2h.s[x13] }, p3/Z, [x24, x22, LSL #2]\n"
+ "ldr x24, [x26, #0x0]\n"
"incw x27\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0962ae9 // ld1w { za2h.s[x13, #1] }, p2/Z, [x23, x22, LSL #2]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe0bfa6a0 // st1w { za0v.s[x13] }, p1/Z, [x21, XZR, LSL #2]\n"
"add x26, x26, #0x10\n"
- ".inst 0xe0aa82a1 // st1w { za0v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
+ ".inst 0xe0aaa2a1 // st1w { za0v.s[x13, #1] }, p0/Z, [x21, x10, LSL #2]\n"
"addvl x21, x21, #2\n"
"incw x22\n"
"whilelt p8.s, x27, %x[width]\n"
- "mov x12, #0x0\n"
"cbz x28, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e1 // ld1w { za0h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0x25306143 // psel p3.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0x25706142 // psel p2.s, p8.s/Z, p10.s[w12, #1]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0960f00 // ld1w { za0h.s[x12] }, p3/Z, [x24, x22, LSL #2]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe0960ae1 // ld1w { za0h.s[x12, #1] }, p2/Z, [x23, x22, LSL #2]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
+ "add x26, x26, #0x10\n"
".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x28\n"
- "add x26, x26, #0x10\n"
"addvl x21, x21, #2\n"
+ "cmp x12, x28\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
- ".inst 0xe0960320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x22, LSL #2]\n"
- "mov x26, %x[in]\n"
- "ldr x25, [x26, #0x0]\n"
- ".inst 0x25706141 // psel p1.s, p8.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe09606e1 // ld1w { za0h.s[x12, #1] }, p1/Z, [x23, x22, LSL #2]\n"
- "ldr x23, [x26, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
+ ".inst 0x25306143 // psel p3.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0x25706142 // psel p2.s, p8.s/Z, p10.s[w12, #1]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "mov x26, %x[in]\n"
"whilelt p9.s, x27, %x[width]\n"
+ ".inst 0xe0960f00 // ld1w { za0h.s[x12] }, p3/Z, [x24, x22, LSL #2]\n"
+ "ldr x24, [x26, #0x0]\n"
"subs x20, x20, #0x1\n"
+ "incw x27\n"
+ ".inst 0xe0960ae1 // ld1w { za0h.s[x12, #1] }, p2/Z, [x23, x22, LSL #2]\n"
+ "ldr x23, [x26, #0x8]\n"
+ ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
"add x26, x26, #0x10\n"
".inst 0xe0aa82a9 // st1w { za2v.s[x12, #1] }, p0/Z, [x21, x10, LSL #2]\n"
"addvl x21, x21, #2\n"
- "incw x27\n"
"incw x22\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
+ "cbnz x25, 11f\n"
"mov x26, %x[in]\n"
"whilelt p8.s, x27, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- "ldr x20, [x26, #0x0]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
+ "addvl x21, x21, #1\n"
+ "ldr x20, [x26, #0x0]\n"
+ "add x26, x26, #0x8\n"
".inst 0xe0960288 // ld1w { za2h.s[x12] }, p0/Z, [x20, x22, LSL #2]\n"
"add x12, x12, #0x1\n"
"cmp x12, x10\n"
- "add x26, x26, #0x8\n"
- "addvl x21, x21, #1\n"
"blt 9b\n"
"whilelt p9.s, x27, %x[width]\n"
"whilelt p8.s, x27, %x[width]\n"
@@ -180,8 +180,8 @@ void interleave_block<1, 1, VLType::SME, false>(
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
"blt 10b\n"
"whilelt p8.s, x27, %x[width]\n"
"b 13f\n"
@@ -191,15 +191,15 @@ void interleave_block<1, 1, VLType::SME, false>(
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x9\n"
"addvl x21, x21, #1\n"
+ "cmp x12, x9\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x21\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
index dc6d12b61e..a07831e7bd 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,66 +32,66 @@ void interleave_block<2, 1, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x28\n"
- "cmp %x[height], x28\n"
- "cnth x27\n"
- "csel x28, %x[height], x28, LT\n"
- "mov x26, #0x0\n"
+ "mov x28, #0x0\n"
+ "mov x27, %x[row_offset]\n"
+ "cnth x26\n"
+ "cnth x25\n"
+ "cmp %x[height], x26\n"
"ptrue p13.s\n"
- "sub x28, x28, #0x1\n"
+ "csel x26, %x[height], x26, LT\n"
"whilelt p12.h, XZR, %x[height]\n"
- "whilelt p11.h, x27, %x[height]\n"
- "mov x25, %x[row_offset]\n"
+ "sub x26, x26, #0x1\n"
+ "whilelt p11.h, x25, %x[height]\n"
"mov x24, %x[out]\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
+ "whilelt p10.h, x28, %x[width]\n"
+ "whilelt p9.h, x28, %x[width]\n"
+ "whilelt p8.h, x28, %x[width]\n"
"1:" // Width loop
"add x23, %x[in], XZR, LSL #3\n"
- "add x20, %x[in], x27, LSL #3\n"
+ "add x20, %x[in], x25, LSL #3\n"
+ "mov x13, #0x0\n"
"ldr x22, [x23], #0x8\n"
- "mov x12, #0x0\n"
"ldr x21, [x20], #0x8\n"
- "cbz x28, 3f\n"
+ "cbz x26, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe05906c0 // ld1h { za0h.h[x12] }, p1/Z, [x22, x25, LSL #1]\n"
+ ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0xe05b26c0 // ld1h { za0h.h[x13] }, p1/Z, [x22, x27, LSL #1]\n"
"ldr x22, [x23], #0x8\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28, LSL #1\n"
+ ".inst 0xe05b22a8 // ld1h { za1h.h[x13] }, p0/Z, [x21, x27, LSL #1]\n"
+ "add x13, x13, #0x2\n"
"ldr x21, [x20], #0x8\n"
+ "cmp x13, x26, LSL #1\n"
"blt 2b\n"
"3:" // Loads: Tail
- "sub x20, %x[width], x26\n"
- ".inst 0x25286580 // psel p0.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0xe05902c0 // ld1h { za0h.h[x12] }, p0/Z, [x22, x25, LSL #1]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- "cmp x20, x27\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
+ "sub x20, %x[width], x28\n"
"mov x12, #0x0\n"
- "csel x20, x20, x27, LT\n"
+ "cmp x20, x25\n"
+ ".inst 0xe05b26c0 // ld1h { za0h.h[x13] }, p1/Z, [x22, x27, LSL #1]\n"
+ "csel x20, x20, x25, LT\n"
+ ".inst 0xe05b22a8 // ld1h { za1h.h[x13] }, p0/Z, [x21, x27, LSL #1]\n"
"4:" // Stores: Loop
+ ".inst 0x25287541 // psel p1.h, p13.h/Z, p10.h[w12]\n"
".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07f8300 // st1h { za0v.h[x12] }, p0/Z, [x24, XZR, LSL #1]\n"
- ".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07b8308 // st1h { za1v.h[x12] }, p0/Z, [x24, x27, LSL #1]\n"
+ ".inst 0xe07f8700 // st1h { za0v.h[x12] }, p1/Z, [x24, XZR, LSL #1]\n"
+ ".inst 0xe0798308 // st1h { za1v.h[x12] }, p0/Z, [x24, x25, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x24, x24, #4\n"
+ "cmp x12, x20\n"
"blt 4b\n"
- "inch x26\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
- "inch x25\n"
+ "inch x28\n"
+ "inch x27\n"
+ "whilelt p10.h, x28, %x[width]\n"
+ "whilelt p9.h, x28, %x[width]\n"
+ "whilelt p8.h, x28, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
index d9189258c1..01dfecc4ef 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,269 +32,264 @@ void interleave_block<2, 2, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x22\n"
+ "mov x22, %x[width]\n"
"mov x21, %x[width]\n"
- "inch x21\n"
- "mov x20, %x[width]\n"
- "sub x17, x22, #0x1\n"
- "sub x21, x21, #0x1\n"
- "ands x17, x20, x17\n"
- "cntw x16\n"
- "udiv x21, x21, x22\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x22, NE\n"
- "sub x13, x21, #0x1\n"
- "add x17, x17, #0x1\n"
- "sub x15, x16, #0x2\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x20, x16, #0x1\n"
- "mov x14, #0x0\n"
+ "cnth x20\n"
+ "inch x22\n"
+ "sub x7, x20, #0x1\n"
+ "sub x22, x22, #0x1\n"
+ "ands x7, x21, x7\n"
+ "cntw x8\n"
+ "udiv x22, x22, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x7, x7, x20, NE\n"
+ "sub x13, x22, #0x1\n"
+ "add x7, x7, #0x1\n"
+ "sub x17, x8, #0x2\n"
+ "lsl x21, %x[height], #0x1\n" // height * 2
+ "lsl x20, x8, #0x1\n"
+ "mov x16, #0x0\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ "cntw x9, ALL, MUL #2\n"
+ "cntw x28, ALL, MUL #3\n"
+ "ldr x27, [x11, #0x0]\n"
"lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "and x26, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "ldr x25, [x10, #0x0]\n"
+ "lsr x7, x7, #0x1\n"
+ "ptrue p12.s\n"
"ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x1\n"
- "ptrue p13.s\n"
+ "whilelt p11.h, XZR, x21\n"
+ "whilelt p10.h, x20, x21\n"
"ldr x21, [x10, #0x8]\n"
- "whilelt p12.h, XZR, x22\n"
- "whilelt p11.h, x20, x22\n"
"mov x23, %x[row_offset]\n"
"mov x22, %x[out]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p9.h, x16, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x17, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0570520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0570348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
- "ldr x26, [x10, #0x0]\n"
+ ".inst 0x25286163 // psel p3.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0x25286142 // psel p2.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25686161 // psel p1.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
+ ".inst 0xe0570f60 // ld1h { za0h.h[x12] }, p3/Z, [x27, x23, LSL #1]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0570b28 // ld1h { za1h.h[x12] }, p2/Z, [x25, x23, LSL #1]\n"
+ "ldr x25, [x10, #0x0]\n"
".inst 0xe0570702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x23, LSL #1]\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
".inst 0xe05702aa // ld1h { za1h.h[x12, #2] }, p0/Z, [x21, x23, LSL #1]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x15, LSL #1\n"
"ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "cmp x12, x17, LSL #1\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0570520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x23, LSL #1]\n"
- ".inst 0xe0570348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0x25286163 // psel p3.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0x25286142 // psel p2.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25686161 // psel p1.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0570f60 // ld1h { za0h.h[x12] }, p3/Z, [x27, x23, LSL #1]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "inch x16\n"
+ ".inst 0xe0570b28 // ld1h { za1h.h[x12] }, p2/Z, [x25, x23, LSL #1]\n"
+ "ldr x25, [x10, #0x0]\n"
".inst 0xe0570702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe05702aa // ld1h { za1h.h[x12, #2] }, p0/Z, [x21, x23, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- "inch x23\n"
- "inch x14\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe05702aa // ld1h { za1h.h[x12, #2] }, p0/Z, [x21, x23, LSL #1]\n"
"ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "inch x23\n"
"cbz x13, 8f\n"
"mov x20, x13\n"
"3:" // K loop: Main loop
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "whilelt p8.h, x16, %x[width]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "cbz x17, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0572521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0572349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796162 // psel p2.h, p8.h/Z, p11.h[w13, #3]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x253b6160 // psel p0.h, p8.h/Z, p11.h[w15, #1]\n"
+ ".inst 0x253b6142 // psel p2.h, p8.h/Z, p10.h[w15, #1]\n"
+ ".inst 0x257b6161 // psel p1.h, p8.h/Z, p11.h[w15, #3]\n"
+ ".inst 0x257b6143 // psel p3.h, p8.h/Z, p10.h[w15, #3]\n"
+ ".inst 0xe0576361 // ld1h { za0h.h[x15, #1] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x252a7120 // psel p0.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0576b29 // ld1h { za1h.h[x15, #1] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x252a7122 // psel p2.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0576703 // ld1h { za0h.h[x15, #3] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x253a7121 // psel p1.h, p12.h/Z, p9.h[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572aab // ld1h { za1h.h[x13, #3] }, p2/Z, [x21, x23, LSL #1]\n"
- "ldr x21, [x10, #0x8]\n"
- ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86c1 // st1w { za0v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
+ ".inst 0xe0576eab // ld1h { za1h.h[x15, #3] }, p3/Z, [x21, x23, LSL #1]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bfc2c0 // st1w { za0v.s[x14] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x253a7120 // psel p0.h, p12.h/Z, p9.h[w14, #1]\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
"add x10, x10, #0x10\n"
- "add x13, x13, #0x4\n"
- ".inst 0xe0bb82c5 // st1w { za1v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x15\n"
+ "add x15, x15, #0x4\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "add x14, x14, #0x2\n"
"addvl x22, x22, #4\n"
+ "cmp x14, x17\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0572521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x23, LSL #1]\n"
- ".inst 0xe0572349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x23, LSL #1]\n"
+ ".inst 0x253b6160 // psel p0.h, p8.h/Z, p11.h[w15, #1]\n"
+ ".inst 0x253b6142 // psel p2.h, p8.h/Z, p10.h[w15, #1]\n"
+ ".inst 0x257b6161 // psel p1.h, p8.h/Z, p11.h[w15, #3]\n"
+ ".inst 0x257b6143 // psel p3.h, p8.h/Z, p10.h[w15, #3]\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796161 // psel p1.h, p8.h/Z, p11.h[w13, #3]\n"
- ".inst 0xe0572303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x23, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05726ab // ld1h { za1h.h[x13, #3] }, p1/Z, [x21, x23, LSL #1]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0576361 // ld1h { za0h.h[x15, #1] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x252a7120 // psel p0.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0576b29 // ld1h { za1h.h[x15, #1] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x252a7122 // psel p2.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0576703 // ld1h { za0h.h[x15, #3] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x253a7121 // psel p1.h, p12.h/Z, p9.h[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x21, [x10, #0x8]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "inch x14\n"
- ".inst 0xe0bc86c1 // st1w { za0v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe0576eab // ld1h { za1h.h[x15, #3] }, p3/Z, [x21, x23, LSL #1]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bfc2c0 // st1w { za0v.s[x14] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x253a7120 // psel p0.h, p12.h/Z, p9.h[w14, #1]\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
+ "whilelt p9.h, x16, %x[width]\n"
+ "inch x16\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82c5 // st1w { za1v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
- "addvl x22, x22, #4\n"
"inch x23\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "addvl x22, x22, #4\n"
+ "whilelt p8.h, x16, %x[width]\n"
+ "cbz x17, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0572520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0572348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
- ".inst 0x25696162 // psel p2.h, p8.h/Z, p11.h[w13, #2]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x25296142 // psel p2.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25696161 // psel p1.h, p8.h/Z, p11.h[w13, #2]\n"
+ ".inst 0x25696143 // psel p3.h, p8.h/Z, p10.h[w13, #2]\n"
+ ".inst 0xe0572360 // ld1h { za0h.h[x13] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x25287120 // psel p0.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0572b28 // ld1h { za1h.h[x13] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x25287122 // psel p2.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0572702 // ld1h { za0h.h[x13, #2] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x25387121 // psel p1.h, p12.h/Z, p9.h[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572aaa // ld1h { za1h.h[x13, #2] }, p2/Z, [x21, x23, LSL #1]\n"
- "ldr x21, [x10, #0x8]\n"
- ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082cc // st1w { za3v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
+ ".inst 0xe0572eaa // ld1h { za1h.h[x13, #2] }, p3/Z, [x21, x23, LSL #1]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x25387120 // psel p0.h, p12.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
"add x10, x10, #0x10\n"
"add x13, x13, #0x4\n"
- ".inst 0xe0bb82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
"addvl x22, x22, #4\n"
+ "cmp x12, x17\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0572520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x23, LSL #1]\n"
- ".inst 0xe0572348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x23, LSL #1]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
+ ".inst 0x25296142 // psel p2.h, p8.h/Z, p10.h[w13]\n"
".inst 0x25696161 // psel p1.h, p8.h/Z, p11.h[w13, #2]\n"
- ".inst 0xe0572302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x23, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05726aa // ld1h { za1h.h[x13, #2] }, p1/Z, [x21, x23, LSL #1]\n"
+ ".inst 0x25696143 // psel p3.h, p8.h/Z, p10.h[w13, #2]\n"
+ "mov x11, %x[in]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0572360 // ld1h { za0h.h[x13] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x25287120 // psel p0.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0572b28 // ld1h { za1h.h[x13] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x25287122 // psel p2.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0572702 // ld1h { za0h.h[x13, #2] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x25387121 // psel p1.h, p12.h/Z, p9.h[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
+ "add x11, x11, #0x10\n"
+ ".inst 0xe0572eaa // ld1h { za1h.h[x13, #2] }, p3/Z, [x21, x23, LSL #1]\n"
"ldr x21, [x10, #0x8]\n"
".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08acc // st1w { za3v.s[x12] }, p2/Z, [x22, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
+ ".inst 0x25387120 // psel p0.h, p12.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ "whilelt p9.h, x16, %x[width]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xe0bc86c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
+ "inch x16\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
"addvl x22, x22, #4\n"
- "inch x14\n"
"inch x23\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
+ "cbnz x26, 11f\n"
"mov x11, %x[in]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- "ldr x21, [x11, #0x0]\n"
+ ".inst 0x25307123 // psel p3.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307122 // psel p2.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25396161 // psel p1.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0xe0bf8ec0 // st1w { za0v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a88ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- "ldr x20, [x11, x16, LSL #0x3]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- "cmp x12, x16\n"
+ "addvl x22, x22, #2\n"
+ "ldr x21, [x11, #0x0]\n"
+ "cmp x12, x8\n"
+ "ldr x20, [x11, x8, LSL #0x3]\n"
+ "add x11, x11, #0x8\n"
".inst 0xe05726a1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x21, x23, LSL #1]\n"
".inst 0xe0572289 // ld1h { za1h.h[x13, #1] }, p0/Z, [x20, x23, LSL #1]\n"
- "add x11, x11, #0x8\n"
- "addvl x22, x22, #2\n"
"add x13, x13, #0x2\n"
"blt 9b\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p9.h, x16, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"mov x20, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082cc // st1w { za3v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
+ ".inst 0x25307121 // psel p1.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ "add x20, x20, #0x2\n"
+ ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882cc // st1w { za3v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"addvl x22, x22, #2\n"
- "add x20, x20, #0x2\n"
+ "cmp x12, x7\n"
"blt 10b\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
+ ".inst 0x25307121 // psel p1.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"addvl x22, x22, #2\n"
+ "cmp x12, x7\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x22\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
index ef787c89b9..b1e9226773 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block2_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,269 +32,264 @@ void interleave_block<2, 2, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x22\n"
+ "mov x22, %x[width]\n"
"mov x21, %x[width]\n"
- "inch x21\n"
- "mov x20, %x[width]\n"
- "sub x17, x22, #0x1\n"
- "sub x21, x21, #0x1\n"
- "ands x17, x20, x17\n"
- "cntw x16\n"
- "udiv x21, x21, x22\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x22, NE\n"
- "sub x13, x21, #0x1\n"
- "add x17, x17, #0x1\n"
- "sub x15, x16, #0x2\n"
- "lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x20, x16, #0x1\n"
- "mov x14, #0x0\n"
+ "cnth x20\n"
+ "inch x22\n"
+ "sub x7, x20, #0x1\n"
+ "sub x22, x22, #0x1\n"
+ "ands x7, x21, x7\n"
+ "cntw x8\n"
+ "udiv x22, x22, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x7, x7, x20, NE\n"
+ "sub x13, x22, #0x1\n"
+ "add x7, x7, #0x1\n"
+ "sub x17, x8, #0x2\n"
+ "lsl x21, %x[height], #0x1\n" // height * 2
+ "lsl x20, x8, #0x1\n"
+ "mov x16, #0x0\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ "cntw x9, ALL, MUL #2\n"
+ "cntw x28, ALL, MUL #3\n"
+ "ldr x27, [x11, #0x0]\n"
"lsr x13, x13, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "and x26, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "ldr x25, [x10, #0x0]\n"
+ "lsr x7, x7, #0x1\n"
+ "ptrue p12.s\n"
"ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x1\n"
- "ptrue p13.s\n"
+ "whilelt p11.h, XZR, x21\n"
+ "whilelt p10.h, x20, x21\n"
"ldr x21, [x10, #0x8]\n"
- "whilelt p12.h, XZR, x22\n"
- "whilelt p11.h, x20, x22\n"
"mov x23, %x[row_offset]\n"
"mov x22, %x[out]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p9.h, x16, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x17, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0570520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0570348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
- "ldr x26, [x10, #0x0]\n"
+ ".inst 0x25286163 // psel p3.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0x25286142 // psel p2.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25686161 // psel p1.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
+ ".inst 0xe0570f60 // ld1h { za0h.h[x12] }, p3/Z, [x27, x23, LSL #1]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0570b28 // ld1h { za1h.h[x12] }, p2/Z, [x25, x23, LSL #1]\n"
+ "ldr x25, [x10, #0x0]\n"
".inst 0xe0570702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x23, LSL #1]\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
".inst 0xe05702aa // ld1h { za1h.h[x12, #2] }, p0/Z, [x21, x23, LSL #1]\n"
"add x12, x12, #0x4\n"
- "cmp x12, x15, LSL #1\n"
"ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "cmp x12, x17, LSL #1\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0570520 // ld1h { za0h.h[x12] }, p1/Z, [x9, x23, LSL #1]\n"
- ".inst 0xe0570348 // ld1h { za1h.h[x12] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25686581 // psel p1.h, p9.h/Z, p12.h[w12, #2]\n"
- ".inst 0x25686160 // psel p0.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0x25286163 // psel p3.h, p8.h/Z, p11.h[w12]\n"
+ ".inst 0x25286142 // psel p2.h, p8.h/Z, p10.h[w12]\n"
+ ".inst 0x25686161 // psel p1.h, p8.h/Z, p11.h[w12, #2]\n"
+ ".inst 0x25686140 // psel p0.h, p8.h/Z, p10.h[w12, #2]\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0570f60 // ld1h { za0h.h[x12] }, p3/Z, [x27, x23, LSL #1]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "inch x16\n"
+ ".inst 0xe0570b28 // ld1h { za1h.h[x12] }, p2/Z, [x25, x23, LSL #1]\n"
+ "ldr x25, [x10, #0x0]\n"
".inst 0xe0570702 // ld1h { za0h.h[x12, #2] }, p1/Z, [x24, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe05702aa // ld1h { za1h.h[x12, #2] }, p0/Z, [x21, x23, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- "inch x23\n"
- "inch x14\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe05702aa // ld1h { za1h.h[x12, #2] }, p0/Z, [x21, x23, LSL #1]\n"
"ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "inch x23\n"
"cbz x13, 8f\n"
"mov x20, x13\n"
"3:" // K loop: Main loop
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "whilelt p8.h, x16, %x[width]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "cbz x17, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0572521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0572349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796162 // psel p2.h, p8.h/Z, p11.h[w13, #3]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x253b6160 // psel p0.h, p8.h/Z, p11.h[w15, #1]\n"
+ ".inst 0x253b6142 // psel p2.h, p8.h/Z, p10.h[w15, #1]\n"
+ ".inst 0x257b6161 // psel p1.h, p8.h/Z, p11.h[w15, #3]\n"
+ ".inst 0x257b6143 // psel p3.h, p8.h/Z, p10.h[w15, #3]\n"
+ ".inst 0xe0576361 // ld1h { za0h.h[x15, #1] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x252a7120 // psel p0.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0576b29 // ld1h { za1h.h[x15, #1] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x252a7122 // psel p2.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0576703 // ld1h { za0h.h[x15, #3] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x253a7121 // psel p1.h, p12.h/Z, p9.h[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572aab // ld1h { za1h.h[x13, #3] }, p2/Z, [x21, x23, LSL #1]\n"
- "ldr x21, [x10, #0x8]\n"
- ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86c1 // st1w { za0v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
+ ".inst 0xe0576eab // ld1h { za1h.h[x15, #3] }, p3/Z, [x21, x23, LSL #1]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bfc2c0 // st1w { za0v.s[x14] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x253a7120 // psel p0.h, p12.h/Z, p9.h[w14, #1]\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
"add x10, x10, #0x10\n"
- "add x13, x13, #0x4\n"
- ".inst 0xe0bb82c5 // st1w { za1v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x15\n"
+ "add x15, x15, #0x4\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "add x14, x14, #0x2\n"
"addvl x22, x22, #4\n"
+ "cmp x14, x17\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- ".inst 0xe0572521 // ld1h { za0h.h[x13, #1] }, p1/Z, [x9, x23, LSL #1]\n"
- ".inst 0xe0572349 // ld1h { za1h.h[x13, #1] }, p0/Z, [x26, x23, LSL #1]\n"
+ ".inst 0x253b6160 // psel p0.h, p8.h/Z, p11.h[w15, #1]\n"
+ ".inst 0x253b6142 // psel p2.h, p8.h/Z, p10.h[w15, #1]\n"
+ ".inst 0x257b6161 // psel p1.h, p8.h/Z, p11.h[w15, #3]\n"
+ ".inst 0x257b6143 // psel p3.h, p8.h/Z, p10.h[w15, #3]\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25796580 // psel p0.h, p9.h/Z, p12.h[w13, #3]\n"
- ".inst 0x25796161 // psel p1.h, p8.h/Z, p11.h[w13, #3]\n"
- ".inst 0xe0572303 // ld1h { za0h.h[x13, #3] }, p0/Z, [x24, x23, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05726ab // ld1h { za1h.h[x13, #3] }, p1/Z, [x21, x23, LSL #1]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0576361 // ld1h { za0h.h[x15, #1] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x252a7120 // psel p0.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0576b29 // ld1h { za1h.h[x15, #1] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x252a7122 // psel p2.h, p12.h/Z, p9.h[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0576703 // ld1h { za0h.h[x15, #3] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x253a7121 // psel p1.h, p12.h/Z, p9.h[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x21, [x10, #0x8]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
- "inch x14\n"
- ".inst 0xe0bc86c1 // st1w { za0v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe0576eab // ld1h { za1h.h[x15, #3] }, p3/Z, [x21, x23, LSL #1]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bfc2c0 // st1w { za0v.s[x14] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x253a7120 // psel p0.h, p12.h/Z, p9.h[w14, #1]\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
+ "whilelt p9.h, x16, %x[width]\n"
+ "inch x16\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82c5 // st1w { za1v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
- "addvl x22, x22, #4\n"
"inch x23\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "addvl x22, x22, #4\n"
+ "whilelt p8.h, x16, %x[width]\n"
+ "cbz x17, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0572520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x23, LSL #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0572348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x23, LSL #1]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
- ".inst 0x25696162 // psel p2.h, p8.h/Z, p11.h[w13, #2]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x25296142 // psel p2.h, p8.h/Z, p10.h[w13]\n"
+ ".inst 0x25696161 // psel p1.h, p8.h/Z, p11.h[w13, #2]\n"
+ ".inst 0x25696143 // psel p3.h, p8.h/Z, p10.h[w13, #2]\n"
+ ".inst 0xe0572360 // ld1h { za0h.h[x13] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x25287120 // psel p0.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0572b28 // ld1h { za1h.h[x13] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x25287122 // psel p2.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0572702 // ld1h { za0h.h[x13, #2] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x25387121 // psel p1.h, p12.h/Z, p9.h[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0572aaa // ld1h { za1h.h[x13, #2] }, p2/Z, [x21, x23, LSL #1]\n"
- "ldr x21, [x10, #0x8]\n"
- ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082cc // st1w { za3v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
+ ".inst 0xe0572eaa // ld1h { za1h.h[x13, #2] }, p3/Z, [x21, x23, LSL #1]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x25387120 // psel p0.h, p12.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
"add x10, x10, #0x10\n"
"add x13, x13, #0x4\n"
- ".inst 0xe0bb82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
"addvl x22, x22, #4\n"
+ "cmp x12, x17\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0xe0572520 // ld1h { za0h.h[x13] }, p1/Z, [x9, x23, LSL #1]\n"
- ".inst 0xe0572348 // ld1h { za1h.h[x13] }, p0/Z, [x26, x23, LSL #1]\n"
- "mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0x25696580 // psel p0.h, p9.h/Z, p12.h[w13, #2]\n"
+ ".inst 0x25296142 // psel p2.h, p8.h/Z, p10.h[w13]\n"
".inst 0x25696161 // psel p1.h, p8.h/Z, p11.h[w13, #2]\n"
- ".inst 0xe0572302 // ld1h { za0h.h[x13, #2] }, p0/Z, [x24, x23, LSL #1]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe05726aa // ld1h { za1h.h[x13, #2] }, p1/Z, [x21, x23, LSL #1]\n"
+ ".inst 0x25696143 // psel p3.h, p8.h/Z, p10.h[w13, #2]\n"
+ "mov x11, %x[in]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0572360 // ld1h { za0h.h[x13] }, p0/Z, [x27, x23, LSL #1]\n"
+ ".inst 0x25287120 // psel p0.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0572b28 // ld1h { za1h.h[x13] }, p2/Z, [x25, x23, LSL #1]\n"
+ ".inst 0x25287122 // psel p2.h, p12.h/Z, p9.h[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0572702 // ld1h { za0h.h[x13, #2] }, p1/Z, [x24, x23, LSL #1]\n"
+ ".inst 0x25387121 // psel p1.h, p12.h/Z, p9.h[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
+ "add x11, x11, #0x10\n"
+ ".inst 0xe0572eaa // ld1h { za1h.h[x13, #2] }, p3/Z, [x21, x23, LSL #1]\n"
"ldr x21, [x10, #0x8]\n"
".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08acc // st1w { za3v.s[x12] }, p2/Z, [x22, x16, LSL #2]\n"
- "whilelt p10.h, x14, %x[width]\n"
+ ".inst 0x25387120 // psel p0.h, p12.h/Z, p9.h[w12, #1]\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ "whilelt p9.h, x16, %x[width]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xe0bc86c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
+ "inch x16\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
"addvl x22, x22, #4\n"
- "inch x14\n"
"inch x23\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
+ "cbnz x26, 11f\n"
"mov x11, %x[in]\n"
- "whilelt p9.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- "ldr x21, [x11, #0x0]\n"
+ ".inst 0x25307123 // psel p3.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307122 // psel p2.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25396161 // psel p1.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0x25396140 // psel p0.h, p8.h/Z, p10.h[w13, #1]\n"
+ ".inst 0xe0bf8ec0 // st1w { za0v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a88ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
- "ldr x20, [x11, x16, LSL #0x3]\n"
- ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
- "cmp x12, x16\n"
+ "addvl x22, x22, #2\n"
+ "ldr x21, [x11, #0x0]\n"
+ "cmp x12, x8\n"
+ "ldr x20, [x11, x8, LSL #0x3]\n"
+ "add x11, x11, #0x8\n"
".inst 0xe05726a1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x21, x23, LSL #1]\n"
".inst 0xe0572289 // ld1h { za1h.h[x13, #1] }, p0/Z, [x20, x23, LSL #1]\n"
- "add x11, x11, #0x8\n"
- "addvl x22, x22, #2\n"
"add x13, x13, #0x2\n"
"blt 9b\n"
- "whilelt p10.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p9.h, x16, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"mov x20, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082cc // st1w { za3v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
+ ".inst 0x25307121 // psel p1.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ "add x20, x20, #0x2\n"
+ ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882cc // st1w { za3v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"addvl x22, x22, #2\n"
- "add x20, x20, #0x2\n"
+ "cmp x12, x7\n"
"blt 10b\n"
- "whilelt p8.h, x14, %x[width]\n"
+ "whilelt p8.h, x16, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
+ ".inst 0x25307121 // psel p1.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"addvl x22, x22, #2\n"
+ "cmp x12, x7\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x22\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
index 905c6b41eb..7b42b6fb93 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,265 +32,265 @@ void interleave_block<2, 4, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
- "sub x17, x21, #0x1\n"
- "cntw x16\n"
+ "sub x7, x20, #0x1\n"
+ "cntw x8\n"
"sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
+ "ands x7, x21, x7\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x7, x7, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
+ "lsl x21, x8, #0x1\n"
"sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
- "sub x15, x16, #0x2\n"
+ "add x7, x7, #0x3\n"
+ "sub x17, x8, #0x2\n"
"whilelt p9.b, XZR, x22\n"
"whilelt p8.b, x21, x22\n"
- "mov x14, #0x0\n"
+ "mov x16, #0x0\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ "cntw x9, ALL, MUL #2\n"
+ "cntw x28, ALL, MUL #3\n"
+ "ldr x27, [x11, #0x0]\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x2\n"
+ "and x26, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "ldr x25, [x10, #0x0]\n"
+ "lsr x7, x7, #0x2\n"
"ptrue p11.s\n"
- "ldr x23, [x10, #0x8]\n"
+ "ldr x24, [x11, #0x8]\n"
"zip1 p10.b, p9.b, p8.b\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "mov x23, %x[row_offset]\n"
+ "ldr x21, [x10, #0x8]\n"
+ "mov x22, %x[out]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x17, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
+ ".inst 0xe0170f60 // ld1b { za0h.b[x12] }, p3/Z, [x27, x23]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0170b21 // ld1b { za0h.b[x12, #1] }, p2/Z, [x25, x23]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0170704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x23]\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
+ ".inst 0xe01702a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x23]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x15, LSL #2\n"
- "ldr x23, [x10, #0x8]\n"
+ "ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "cmp x12, x17, LSL #2\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
- "ldr x26, [x10, #0x0]\n"
- "incb x22\n"
- "incb x14\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0170f60 // ld1b { za0h.b[x12] }, p3/Z, [x27, x23]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "incb x16\n"
+ ".inst 0xe0170b21 // ld1b { za0h.b[x12, #1] }, p2/Z, [x25, x23]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0170704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x23]\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
- "ldr x23, [x10, #0x8]\n"
+ ".inst 0xe01702a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "incb x23\n"
"cbz x20, 8f\n"
"mov x20, x20\n"
"3:" // K loop: Main loop
- "whilelt p8.b, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "whilelt p8.b, x16, %x[width]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "cbz x17, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
+ ".inst 0xe0176f62 // ld1b { za0h.b[x15, #2] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0176b23 // ld1b { za0h.b[x15, #3] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0176706 // ld1b { za0h.b[x15, #6] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
+ ".inst 0xe01762a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"add x10, x10, #0x10\n"
- "add x13, x13, #0x8\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ ".inst 0xe0bfcec0 // st1w { za0v.s[x14] }, p3/Z, [x22, XZR, LSL #2]\n"
+ "add x15, x15, #0x8\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "add x14, x14, #0x2\n"
+ "addvl x22, x22, #4\n"
+ "cmp x14, x17\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
"mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x257d6141 // psel p1.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e7 // ld1b { za0h.b[x13, #7] }, p1/Z, [x23, x22]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0176f62 // ld1b { za0h.b[x15, #2] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0176b23 // ld1b { za0h.b[x15, #3] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0176706 // ld1b { za0h.b[x15, #6] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aa4 // st1w { za1v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "incb x14\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe01762a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ ".inst 0xe0bfcec0 // st1w { za0v.s[x14] }, p3/Z, [x22, XZR, LSL #2]\n"
+ "incb x16\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x22\n"
- "whilelt p8.b, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
+ "incb x23\n"
+ "whilelt p8.b, x16, %x[width]\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "addvl x22, x22, #4\n"
+ "cbz x17, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0xe0172f60 // ld1b { za0h.b[x13] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0172b21 // ld1b { za0h.b[x13, #1] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0172704 // ld1b { za0h.b[x13, #4] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
+ ".inst 0xe01722a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
"add x10, x10, #0x10\n"
+ ".inst 0xe0bf8ec8 // st1w { za2v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
"add x13, x13, #0x8\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "addvl x22, x22, #4\n"
+ "cmp x12, x17\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
"mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x256d6141 // psel p1.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e5 // ld1b { za0h.b[x13, #5] }, p1/Z, [x23, x22]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0172f60 // ld1b { za0h.b[x13] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0172b21 // ld1b { za0h.b[x13, #1] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0172704 // ld1b { za0h.b[x13, #4] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aac // st1w { za3v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe01722a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ ".inst 0xe0bf8ec8 // st1w { za2v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x14\n"
- "incb x22\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ "incb x16\n"
+ "incb x23\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "addvl x22, x22, #4\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
+ "cbnz x26, 11f\n"
"mov x11, %x[in]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "ldr x20, [x11, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x22]\n"
- "ldr x20, [x11, x16, LSL #0x3]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x22]\n"
- "cmp x12, x16\n"
+ ".inst 0xe0bf8ec0 // st1w { za0v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a88ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x22, x22, #2\n"
+ "ldr x21, [x11, #0x0]\n"
+ "cmp x12, x8\n"
+ "ldr x20, [x11, x8, LSL #0x3]\n"
"add x11, x11, #0x8\n"
- "addvl x21, x21, #2\n"
+ ".inst 0xe01726a2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x21, x23]\n"
+ ".inst 0xe0172283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x23]\n"
"add x13, x13, #0x4\n"
"blt 9b\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"mov x20, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
"add x20, x20, #0x4\n"
+ ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882cc // st1w { za3v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x22, x22, #2\n"
+ "cmp x12, x7\n"
"blt 10b\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
+ "addvl x22, x22, #2\n"
+ "cmp x12, x7\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x22\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
index c5c5af20e2..6930bf4056 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,321 +32,321 @@ void interleave_block<2, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
- "mov z20.b, #0x1\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
+ "mov z20.b, #0x1\n"
"mov z19.s, #0x0\n"
+ "sub x7, x20, #0x1\n"
+ "cntw x8\n"
"mov z18.s, #0x0\n"
- "sub x17, x21, #0x1\n"
- "cntw x16\n"
"sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
+ "ands x7, x21, x7\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x7, x7, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
+ "lsl x21, x8, #0x1\n"
"sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
+ "add x7, x7, #0x3\n"
"whilelt p9.b, XZR, x22\n"
"whilelt p8.b, x21, x22\n"
- "mov x15, #0x0\n"
- "cntw x14, ALL, MUL #2\n"
+ "mov x17, #0x0\n"
+ "cntw x16, ALL, MUL #2\n"
"cntw x11, ALL, MUL #3\n"
"ptrue p4.b\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
"and x10, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x17, x17, #0x2\n"
- "sub x9, x16, #0x2\n"
+ "lsr x7, x7, #0x2\n"
+ "sub x9, x8, #0x2\n"
"ptrue p11.s\n"
"zip1 p10.b, p9.b, p8.b\n"
"mov x28, %x[row_offset]\n"
"mov x27, %x[out]\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x27, x27, #-2\n"
"ld1w { z19.s }, p4/Z, [x27]\n"
"ld1w { z18.s }, p4/Z, [x27, #1, MUL VL]\n"
"1:" // K loop: Load row sums: End
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
"ldr x24, [x26, #0x0]\n"
- "ldr x23, [x25, #0x0]\n"
"mov x12, #0x0\n"
+ "ldr x23, [x25, #0x0]\n"
"ldr x22, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
"ldr x21, [x25, #0x8]\n"
"add x25, x25, #0x10\n"
"cbz x9, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ ".inst 0xe01c0f00 // ld1b { za0h.b[x12] }, p3/Z, [x24, x28]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe01c0ae1 // ld1b { za0h.b[x12, #1] }, p2/Z, [x23, x28]\n"
"ldr x23, [x25, #0x0]\n"
".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
"ldr x22, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x9, LSL #2\n"
"ldr x21, [x25, #0x8]\n"
"add x25, x25, #0x10\n"
+ "cmp x12, x9, LSL #2\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
+ ".inst 0xe01c0f00 // ld1b { za0h.b[x12] }, p3/Z, [x24, x28]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
+ "incb x17\n"
+ ".inst 0xe01c0ae1 // ld1b { za0h.b[x12, #1] }, p2/Z, [x23, x28]\n"
"ldr x23, [x25, #0x0]\n"
- "incb x28\n"
- "incb x15\n"
+ ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
"ldr x22, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
"ldr x21, [x25, #0x8]\n"
"add x25, x25, #0x10\n"
+ "incb x28\n"
"cbz x20, 9f\n"
"mov x20, x20\n"
"4:" // K loop: Main loop
- "whilelt p8.b, x15, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
+ "whilelt p8.b, x17, %x[width]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"cbz x9, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
+ ".inst 0xe01c6f02 // ld1b { za0h.b[x15, #2] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
+ ".inst 0xe01c6ae3 // ld1b { za0h.b[x15, #3] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe01c66c6 // ld1b { za0h.b[x15, #6] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x21, x28]\n"
- "ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8760 // st1w { za0v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
- ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
- "sdot z19.s, z17.b, z20.b\n"
- ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "sdot z18.s, z16.b, z20.b\n"
- ".inst 0xe0ae8361 // st1w { za0v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829031 // mova z17.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- ".inst 0xc08290b0 // mova z16.s, p4/M, za1v.s[x12, #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x9\n"
"add x26, x26, #0x10\n"
+ ".inst 0xe01c62a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x28]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"add x25, x25, #0x10\n"
+ ".inst 0xe0bfcf60 // st1w { za0v.s[x14] }, p3/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xc082d011 // mova z17.s, p4/M, za0v.s[x14]\n"
+ "add x15, x15, #0x8\n"
+ ".inst 0xc082d090 // mova z16.s, p4/M, za1v.s[x14]\n"
"sdot z19.s, z17.b, z20.b\n"
+ ".inst 0xe0a8cb64 // st1w { za1v.s[x14] }, p2/Z, [x27, x8, LSL #2]\n"
"sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0b0c761 // st1w { za0v.s[x14, #1] }, p1/Z, [x27, x16, LSL #2]\n"
+ ".inst 0xc082d031 // mova z17.s, p4/M, za0v.s[x14, #1]\n"
+ ".inst 0xe0abc365 // st1w { za1v.s[x14, #1] }, p0/Z, [x27, x11, LSL #2]\n"
+ ".inst 0xc082d0b0 // mova z16.s, p4/M, za1v.s[x14, #1]\n"
+ "add x14, x14, #0x2\n"
"addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
+ "cmp x14, x9\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6140 // psel p0.b, p8.b/Z, p10.b[w13, #7]\n"
- ".inst 0xe01c26c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x28]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
+ ".inst 0xe01c6f02 // ld1b { za0h.b[x15, #2] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a7 // ld1b { za0h.b[x13, #7] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "sdot z19.s, z17.b, z20.b\n"
- ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
- "sdot z18.s, z16.b, z20.b\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe01c6ae3 // ld1b { za0h.b[x15, #3] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe01c66c6 // ld1b { za0h.b[x15, #6] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829031 // mova z17.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8f60 // st1w { za0v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08290b0 // mova z16.s, p4/M, za1v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b64 // st1w { za1v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
- "incb x15\n"
"add x26, x26, #0x10\n"
- "sdot z19.s, z17.b, z20.b\n"
- ".inst 0xe0ae8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
+ ".inst 0xe01c62a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x28]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ ".inst 0xc082d011 // mova z17.s, p4/M, za0v.s[x14]\n"
+ ".inst 0xe0bfcf60 // st1w { za0v.s[x14] }, p3/Z, [x27, XZR, LSL #2]\n"
+ "incb x17\n"
"add x25, x25, #0x10\n"
- "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xc082d090 // mova z16.s, p4/M, za1v.s[x14]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ ".inst 0xe0a8cb64 // st1w { za1v.s[x14] }, p2/Z, [x27, x8, LSL #2]\n"
"incb x28\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
+ "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0b0c761 // st1w { za0v.s[x14, #1] }, p1/Z, [x27, x16, LSL #2]\n"
+ ".inst 0xc082d031 // mova z17.s, p4/M, za0v.s[x14, #1]\n"
+ "whilelt p8.b, x17, %x[width]\n"
+ ".inst 0xc082d0b0 // mova z16.s, p4/M, za1v.s[x14, #1]\n"
+ ".inst 0xe0abc365 // st1w { za1v.s[x14, #1] }, p0/Z, [x27, x11, LSL #2]\n"
"addvl x27, x27, #4\n"
- "whilelt p8.b, x15, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
"cbz x9, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0xe01c2f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0xe01c2ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x21, x28]\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8768 // st1w { za2v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe0bf8f68 // st1w { za2v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
".inst 0xc0829111 // mova z17.s, p4/M, za2v.s[x12]\n"
+ "add x13, x13, #0x8\n"
".inst 0xc0829190 // mova z16.s, p4/M, za3v.s[x12]\n"
"sdot z19.s, z17.b, z20.b\n"
- ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0a88b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x8, LSL #2]\n"
"sdot z18.s, z16.b, z20.b\n"
- ".inst 0xe0ae8369 // st1w { za2v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b08769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x16, LSL #2]\n"
".inst 0xc0829131 // mova z17.s, p4/M, za2v.s[x12, #1]\n"
".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
".inst 0xc08291b0 // mova z16.s, p4/M, za3v.s[x12, #1]\n"
"add x12, x12, #0x2\n"
+ "addvl x27, x27, #4\n"
"cmp x12, x9\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"sdot z19.s, z17.b, z20.b\n"
"sdot z18.s, z16.b, z20.b\n"
- "addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
- ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
+ ".inst 0xe01c2f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829111 // mova z17.s, p4/M, za2v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "sdot z19.s, z17.b, z20.b\n"
- ".inst 0xc0829190 // mova z16.s, p4/M, za3v.s[x12]\n"
- "sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xe01c2ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829131 // mova z17.s, p4/M, za2v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
"ldr x21, [x25, #0x8]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ ".inst 0xc0829111 // mova z17.s, p4/M, za2v.s[x12]\n"
".inst 0xe0bf8f68 // st1w { za2v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08291b0 // mova z16.s, p4/M, za3v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
"subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- "sdot z19.s, z17.b, z20.b\n"
- ".inst 0xe0ae8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
"add x25, x25, #0x10\n"
+ ".inst 0xc0829190 // mova z16.s, p4/M, za3v.s[x12]\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ ".inst 0xe0a88b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x8, LSL #2]\n"
+ "incb x17\n"
"sdot z18.s, z16.b, z20.b\n"
- "incb x15\n"
+ ".inst 0xe0b08769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x16, LSL #2]\n"
+ ".inst 0xc0829131 // mova z17.s, p4/M, za2v.s[x12, #1]\n"
+ "incb x28\n"
+ ".inst 0xc08291b0 // mova z16.s, p4/M, za3v.s[x12, #1]\n"
".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
"addvl x27, x27, #4\n"
- "incb x28\n"
+ "sdot z19.s, z17.b, z20.b\n"
+ "sdot z18.s, z16.b, z20.b\n"
"bgt 4b\n"
"9:" // K loop: Tails
"cbnz x10, 12f\n"
"mov x26, %x[in]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
- ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- "ldr x21, [x26, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
- "ldr x20, [x26, x16, LSL #0x3]\n"
- ".inst 0xe01c22a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x28]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "cmp x12, x16\n"
+ ".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
"sdot z19.s, z17.b, z20.b\n"
"sdot z18.s, z16.b, z20.b\n"
- ".inst 0xe01c2283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x28]\n"
- "add x26, x26, #0x8\n"
+ ".inst 0xe0bf8f60 // st1w { za0v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xe0a88b64 // st1w { za1v.s[x12] }, p2/Z, [x27, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
"addvl x27, x27, #2\n"
+ "ldr x21, [x26, #0x0]\n"
+ "cmp x12, x8\n"
+ "ldr x20, [x26, x8, LSL #0x3]\n"
+ "add x26, x26, #0x8\n"
+ ".inst 0xe01c26a2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x21, x28]\n"
+ ".inst 0xe01c2283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x28]\n"
"add x13, x13, #0x4\n"
"blt 10b\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"mov x20, #0x0\n"
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0829111 // mova z17.s, p4/M, za2v.s[x12]\n"
- ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
".inst 0xc0829190 // mova z16.s, p4/M, za3v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"sdot z19.s, z17.b, z20.b\n"
+ "add x20, x20, #0x4\n"
"sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0bf8768 // st1w { za2v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xe0a8836c // st1w { za3v.s[x12] }, p0/Z, [x27, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
"addvl x27, x27, #2\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x7\n"
"blt 11b\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"b 14f\n"
"12:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0829011 // mova z17.s, p4/M, za0v.s[x12]\n"
- ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
".inst 0xc0829090 // mova z16.s, p4/M, za1v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"sdot z19.s, z17.b, z20.b\n"
"sdot z18.s, z16.b, z20.b\n"
+ ".inst 0xe0bf8760 // st1w { za0v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xe0a88364 // st1w { za1v.s[x12] }, p0/Z, [x27, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
"addvl x27, x27, #2\n"
+ "cmp x12, x7\n"
"blt 13b\n"
"14:" // K loop: End
"st1w { z19.s }, p4, [x27]\n"
"st1w { z18.s }, p4, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
- "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
+ "mov %x[out], x27\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
index ce9a0065c7..9ce93ed95c 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,265 +32,265 @@ void interleave_block<2, 4, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
- "sub x17, x21, #0x1\n"
- "cntw x16\n"
+ "sub x7, x20, #0x1\n"
+ "cntw x8\n"
"sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
+ "ands x7, x21, x7\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x7, x7, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
+ "lsl x21, x8, #0x1\n"
"sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
- "sub x15, x16, #0x2\n"
+ "add x7, x7, #0x3\n"
+ "sub x17, x8, #0x2\n"
"whilelt p9.b, XZR, x22\n"
"whilelt p8.b, x21, x22\n"
- "mov x14, #0x0\n"
+ "mov x16, #0x0\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- "cntw x28, ALL, MUL #2\n"
- "cntw x27, ALL, MUL #3\n"
- "ldr x26, [x10, #0x0]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ "cntw x9, ALL, MUL #2\n"
+ "cntw x28, ALL, MUL #3\n"
+ "ldr x27, [x11, #0x0]\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "and x25, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "ldr x24, [x11, #0x8]\n"
- "lsr x17, x17, #0x2\n"
+ "and x26, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "ldr x25, [x10, #0x0]\n"
+ "lsr x7, x7, #0x2\n"
"ptrue p11.s\n"
- "ldr x23, [x10, #0x8]\n"
+ "ldr x24, [x11, #0x8]\n"
"zip1 p10.b, p9.b, p8.b\n"
- "mov x22, %x[row_offset]\n"
- "mov x21, %x[out]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "mov x23, %x[row_offset]\n"
+ "ldr x21, [x10, #0x8]\n"
+ "mov x22, %x[out]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"add x11, x11, #0x10\n"
"add x10, x10, #0x10\n"
"mov x12, #0x0\n"
- "cbz x15, 2f\n"
+ "cbz x17, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
+ ".inst 0xe0170f60 // ld1b { za0h.b[x12] }, p3/Z, [x27, x23]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0170b21 // ld1b { za0h.b[x12, #1] }, p2/Z, [x25, x23]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0170704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x23]\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
+ ".inst 0xe01702a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x23]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x15, LSL #2\n"
- "ldr x23, [x10, #0x8]\n"
+ "ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "cmp x12, x17, LSL #2\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0160120 // ld1b { za0h.b[x12] }, p0/Z, [x9, x22]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0160341 // ld1b { za0h.b[x12, #1] }, p0/Z, [x26, x22]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe0160704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x22]\n"
"mov x11, %x[in]\n"
- "add x10, %x[in], x16, LSL #3\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe01602e5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x23, x22]\n"
- "ldr x26, [x10, #0x0]\n"
- "incb x22\n"
- "incb x14\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0170f60 // ld1b { za0h.b[x12] }, p3/Z, [x27, x23]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "incb x16\n"
+ ".inst 0xe0170b21 // ld1b { za0h.b[x12, #1] }, p2/Z, [x25, x23]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0170704 // ld1b { za0h.b[x12, #4] }, p1/Z, [x24, x23]\n"
"ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
- "ldr x23, [x10, #0x8]\n"
+ ".inst 0xe01702a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
"add x10, x10, #0x10\n"
+ "incb x23\n"
"cbz x20, 8f\n"
"mov x20, x20\n"
"3:" // K loop: Main loop
- "whilelt p8.b, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 5f\n"
+ "whilelt p8.b, x16, %x[width]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
+ "cbz x17, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
+ ".inst 0xe0176f62 // ld1b { za0h.b[x15, #2] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0176b23 // ld1b { za0h.b[x15, #3] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0176706 // ld1b { za0h.b[x15, #6] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a0 // st1w { za0v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
+ ".inst 0xe01762a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"add x10, x10, #0x10\n"
- "add x13, x13, #0x8\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ ".inst 0xe0bfcec0 // st1w { za0v.s[x14] }, p3/Z, [x22, XZR, LSL #2]\n"
+ "add x15, x15, #0x8\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "add x14, x14, #0x2\n"
+ "addvl x22, x22, #4\n"
+ "cmp x14, x17\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162122 // ld1b { za0h.b[x13, #2] }, p0/Z, [x9, x22]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162343 // ld1b { za0h.b[x13, #3] }, p0/Z, [x26, x22]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
"mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162306 // ld1b { za0h.b[x13, #6] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x257d6141 // psel p1.b, p8.b/Z, p10.b[w13, #7]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e7 // ld1b { za0h.b[x13, #7] }, p1/Z, [x23, x22]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0176f62 // ld1b { za0h.b[x15, #2] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x27, [x11, #0x0]\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe0176b23 // ld1b { za0h.b[x15, #3] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
+ "ldr x25, [x10, #0x0]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0176706 // ld1b { za0h.b[x15, #6] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aa4 // st1w { za1v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "incb x14\n"
- ".inst 0xe0bc86a1 // st1w { za0v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe01762a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ ".inst 0xe0bfcec0 // st1w { za0v.s[x14] }, p3/Z, [x22, XZR, LSL #2]\n"
+ "incb x16\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82a5 // st1w { za1v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x22\n"
- "whilelt p8.b, x14, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
- "cbz x15, 7f\n"
+ ".inst 0xe0a8cac4 // st1w { za1v.s[x14] }, p2/Z, [x22, x8, LSL #2]\n"
+ "incb x23\n"
+ "whilelt p8.b, x16, %x[width]\n"
+ ".inst 0xe0a9c6c1 // st1w { za0v.s[x14, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bcc2c5 // st1w { za1v.s[x14, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "addvl x22, x22, #4\n"
+ "cbz x17, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0xe0172f60 // ld1b { za0h.b[x13] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0172b21 // ld1b { za0h.b[x13, #1] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0172704 // ld1b { za0h.b[x13, #4] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0162ae5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x23, x22]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf86a8 // st1w { za2v.s[x12] }, p1/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
+ ".inst 0xe01722a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
"add x10, x10, #0x10\n"
+ ".inst 0xe0bf8ec8 // st1w { za2v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
"add x13, x13, #0x8\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x15\n"
- "addvl x21, x21, #4\n"
+ "addvl x22, x22, #4\n"
+ "cmp x12, x17\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe0162120 // ld1b { za0h.b[x13] }, p0/Z, [x9, x22]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe0162341 // ld1b { za0h.b[x13, #1] }, p0/Z, [x26, x22]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
"mov x11, %x[in]\n"
- "ldr x9, [x11, #0x0]\n"
- ".inst 0xe0162304 // ld1b { za0h.b[x13, #4] }, p0/Z, [x24, x22]\n"
- "add x10, %x[in], x16, LSL #3\n"
- ".inst 0x256d6141 // psel p1.b, p8.b/Z, p10.b[w13, #5]\n"
- "ldr x26, [x10, #0x0]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01626e5 // ld1b { za0h.b[x13, #5] }, p1/Z, [x23, x22]\n"
+ "add x10, %x[in], x8, LSL #3\n"
+ ".inst 0xe0172f60 // ld1b { za0h.b[x13] }, p3/Z, [x27, x23]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x27, [x11, #0x0]\n"
+ ".inst 0xe0172b21 // ld1b { za0h.b[x13, #1] }, p2/Z, [x25, x23]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
+ "ldr x25, [x10, #0x0]\n"
+ ".inst 0xe0172704 // ld1b { za0h.b[x13, #4] }, p1/Z, [x24, x23]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x24, [x11, #0x8]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
- "ldr x23, [x10, #0x8]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xe0b08aac // st1w { za3v.s[x12] }, p2/Z, [x21, x16, LSL #2]\n"
- "whilelt p9.b, x14, %x[width]\n"
- "subs x20, x20, #0x1\n"
- ".inst 0xe0bc86a9 // st1w { za2v.s[x12, #1] }, p1/Z, [x21, x28, LSL #2]\n"
"add x11, x11, #0x10\n"
+ ".inst 0xe01722a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x23]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ ".inst 0xe0bf8ec8 // st1w { za2v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
+ "subs x20, x20, #0x1\n"
"add x10, x10, #0x10\n"
- ".inst 0xe0bb82ad // st1w { za3v.s[x12, #1] }, p0/Z, [x21, x27, LSL #2]\n"
- "addvl x21, x21, #4\n"
- "incb x14\n"
- "incb x22\n"
+ ".inst 0xe0a88acc // st1w { za3v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ "incb x16\n"
+ "incb x23\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bc82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x28, LSL #2]\n"
+ "addvl x22, x22, #4\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x25, 11f\n"
+ "cbnz x26, 11f\n"
"mov x11, %x[in]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "ldr x20, [x11, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe0162282 // ld1b { za0h.b[x13, #2] }, p0/Z, [x20, x22]\n"
- "ldr x20, [x11, x16, LSL #0x3]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe0162283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x22]\n"
- "cmp x12, x16\n"
+ ".inst 0xe0bf8ec0 // st1w { za0v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a88ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x22, x22, #2\n"
+ "ldr x21, [x11, #0x0]\n"
+ "cmp x12, x8\n"
+ "ldr x20, [x11, x8, LSL #0x3]\n"
"add x11, x11, #0x8\n"
- "addvl x21, x21, #2\n"
+ ".inst 0xe01726a2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x21, x23]\n"
+ ".inst 0xe0172283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x23]\n"
"add x13, x13, #0x4\n"
"blt 9b\n"
- "whilelt p9.b, x14, %x[width]\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p9.b, x16, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"mov x20, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a8 // st1w { za2v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082ac // st1w { za3v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
"add x20, x20, #0x4\n"
+ ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882cc // st1w { za3v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
+ "addvl x22, x22, #2\n"
+ "cmp x12, x7\n"
"blt 10b\n"
- "whilelt p8.b, x14, %x[width]\n"
+ "whilelt p8.b, x16, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf82a0 // st1w { za0v.s[x12] }, p0/Z, [x21, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0b082a4 // st1w { za1v.s[x12] }, p0/Z, [x21, x16, LSL #2]\n"
+ ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0a882c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x8, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x17\n"
- "addvl x21, x21, #2\n"
+ "addvl x22, x22, #2\n"
+ "cmp x12, x7\n"
"blt 12b\n"
"13:" // K loop: End
- "mov %x[out], x21\n"
+ "mov %x[out], x22\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
index 7805152656..915381334e 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_block4_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,321 +32,321 @@ void interleave_block<2, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntb x21\n"
"mov x23, %x[width]\n"
- "mov z20.b, #0x1\n"
+ "mov x21, %x[width]\n"
+ "cntb x20\n"
"incb x23\n"
- "mov x20, %x[width]\n"
+ "mov z20.b, #0x1\n"
"mov z19.s, #0x0\n"
+ "sub x7, x20, #0x1\n"
+ "cntw x8\n"
"mov z18.s, #0x0\n"
- "sub x17, x21, #0x1\n"
- "cntw x16\n"
"sub x23, x23, #0x1\n"
- "ands x17, x20, x17\n"
- "udiv x23, x23, x21\n" // n_passes = ceildiv(width, VL<T>)
- "csel x17, x17, x21, NE\n"
+ "ands x7, x21, x7\n"
+ "udiv x23, x23, x20\n" // n_passes = ceildiv(width, VL<T>)
+ "csel x7, x7, x20, NE\n"
"lsl x22, %x[height], #0x1\n" // height * 2
- "lsl x21, x16, #0x1\n"
+ "lsl x21, x8, #0x1\n"
"sub x20, x23, #0x1\n"
- "add x17, x17, #0x3\n"
+ "add x7, x7, #0x3\n"
"whilelt p9.b, XZR, x22\n"
"whilelt p8.b, x21, x22\n"
- "mov x15, #0x0\n"
- "cntw x14, ALL, MUL #2\n"
+ "mov x17, #0x0\n"
+ "cntw x16, ALL, MUL #2\n"
"cntw x11, ALL, MUL #3\n"
"ptrue p4.b\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
"and x10, x23, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "lsr x17, x17, #0x2\n"
- "sub x9, x16, #0x2\n"
+ "lsr x7, x7, #0x2\n"
+ "sub x9, x8, #0x2\n"
"ptrue p11.s\n"
"zip1 p10.b, p9.b, p8.b\n"
"mov x28, %x[row_offset]\n"
"mov x27, %x[out]\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"cbnz %x[first], 1f\n"
"addvl x27, x27, #-2\n"
"ld1w { z19.s }, p4/Z, [x27]\n"
"ld1w { z18.s }, p4/Z, [x27, #1, MUL VL]\n"
"1:" // K loop: Load row sums: End
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
"ldr x24, [x26, #0x0]\n"
- "ldr x23, [x25, #0x0]\n"
"mov x12, #0x0\n"
+ "ldr x23, [x25, #0x0]\n"
"ldr x22, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
"ldr x21, [x25, #0x8]\n"
"add x25, x25, #0x10\n"
"cbz x9, 3f\n"
"2:" // K loop: Charge: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
+ ".inst 0xe01c0f00 // ld1b { za0h.b[x12] }, p3/Z, [x24, x28]\n"
+ "ldr x24, [x26, #0x0]\n"
+ ".inst 0xe01c0ae1 // ld1b { za0h.b[x12, #1] }, p2/Z, [x23, x28]\n"
"ldr x23, [x25, #0x0]\n"
".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
"ldr x22, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
"add x12, x12, #0x8\n"
- "cmp x12, x9, LSL #2\n"
"ldr x21, [x25, #0x8]\n"
"add x25, x25, #0x10\n"
+ "cmp x12, x9, LSL #2\n"
"blt 2b\n"
"3:" // K loop: Charge: End
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe01c0300 // ld1b { za0h.b[x12] }, p0/Z, [x24, x28]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe01c02e1 // ld1b { za0h.b[x12, #1] }, p0/Z, [x23, x28]\n"
+ ".inst 0x25246143 // psel p3.b, p8.b/Z, p10.b[w12]\n"
+ ".inst 0x252c6142 // psel p2.b, p8.b/Z, p10.b[w12, #1]\n"
".inst 0x25646141 // psel p1.b, p8.b/Z, p10.b[w12, #4]\n"
".inst 0x256c6140 // psel p0.b, p8.b/Z, p10.b[w12, #5]\n"
- ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
+ ".inst 0xe01c0f00 // ld1b { za0h.b[x12] }, p3/Z, [x24, x28]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
+ "incb x17\n"
+ ".inst 0xe01c0ae1 // ld1b { za0h.b[x12, #1] }, p2/Z, [x23, x28]\n"
"ldr x23, [x25, #0x0]\n"
- "incb x28\n"
- "incb x15\n"
+ ".inst 0xe01c06c4 // ld1b { za0h.b[x12, #4] }, p1/Z, [x22, x28]\n"
"ldr x22, [x26, #0x8]\n"
"add x26, x26, #0x10\n"
+ ".inst 0xe01c02a5 // ld1b { za0h.b[x12, #5] }, p0/Z, [x21, x28]\n"
"ldr x21, [x25, #0x8]\n"
"add x25, x25, #0x10\n"
+ "incb x28\n"
"cbz x20, 9f\n"
"mov x20, x20\n"
"4:" // K loop: Main loop
- "whilelt p8.b, x15, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
+ "whilelt p8.b, x17, %x[width]\n"
+ "mov x15, #0x0\n"
+ "mov x14, #0x0\n"
"cbz x9, 6f\n"
"5:" // K loop: Main loop: First: Loop
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
+ ".inst 0xe01c6f02 // ld1b { za0h.b[x15, #2] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756140 // psel p0.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6142 // psel p2.b, p8.b/Z, p10.b[w13, #7]\n"
+ ".inst 0xe01c6ae3 // ld1b { za0h.b[x15, #3] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c6 // ld1b { za0h.b[x13, #6] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe01c66c6 // ld1b { za0h.b[x15, #6] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa7 // ld1b { za0h.b[x13, #7] }, p2/Z, [x21, x28]\n"
- "ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8760 // st1w { za0v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
- ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "udot z19.s, z16.b, z20.b\n"
- ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "udot z18.s, z17.b, z20.b\n"
- ".inst 0xe0ae8361 // st1w { za0v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829030 // mova z16.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
- ".inst 0xc08290b1 // mova z17.s, p4/M, za1v.s[x12, #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x9\n"
"add x26, x26, #0x10\n"
+ ".inst 0xe01c62a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x28]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
"add x25, x25, #0x10\n"
+ ".inst 0xe0bfcf60 // st1w { za0v.s[x14] }, p3/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xc082d010 // mova z16.s, p4/M, za0v.s[x14]\n"
+ "add x15, x15, #0x8\n"
+ ".inst 0xc082d091 // mova z17.s, p4/M, za1v.s[x14]\n"
"udot z19.s, z16.b, z20.b\n"
+ ".inst 0xe0a8cb64 // st1w { za1v.s[x14] }, p2/Z, [x27, x8, LSL #2]\n"
"udot z18.s, z17.b, z20.b\n"
+ ".inst 0xe0b0c761 // st1w { za0v.s[x14, #1] }, p1/Z, [x27, x16, LSL #2]\n"
+ ".inst 0xc082d030 // mova z16.s, p4/M, za0v.s[x14, #1]\n"
+ ".inst 0xe0abc365 // st1w { za1v.s[x14, #1] }, p0/Z, [x27, x11, LSL #2]\n"
+ ".inst 0xc082d0b1 // mova z17.s, p4/M, za1v.s[x14, #1]\n"
+ "add x14, x14, #0x2\n"
"addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
+ "cmp x14, x9\n"
+ "udot z19.s, z16.b, z20.b\n"
+ "udot z18.s, z17.b, z20.b\n"
"blt 5b\n"
"6:" // K loop: Main loop: First: Tail
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xe01c2302 // ld1b { za0h.b[x13, #2] }, p0/Z, [x24, x28]\n"
- ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- ".inst 0xe01c22e3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x23, x28]\n"
- ".inst 0x25756141 // psel p1.b, p8.b/Z, p10.b[w13, #6]\n"
- ".inst 0x257d6140 // psel p0.b, p8.b/Z, p10.b[w13, #7]\n"
- ".inst 0xe01c26c6 // ld1b { za0h.b[x13, #6] }, p1/Z, [x22, x28]\n"
+ ".inst 0x25376143 // psel p3.b, p8.b/Z, p10.b[w15, #2]\n"
+ ".inst 0x253f6142 // psel p2.b, p8.b/Z, p10.b[w15, #3]\n"
+ ".inst 0x25776141 // psel p1.b, p8.b/Z, p10.b[w15, #6]\n"
+ ".inst 0x257f6140 // psel p0.b, p8.b/Z, p10.b[w15, #7]\n"
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
+ ".inst 0xe01c6f02 // ld1b { za0h.b[x15, #2] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25266d23 // psel p3.b, p11.b/Z, p9.b[w14]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a7 // ld1b { za0h.b[x13, #7] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "udot z19.s, z16.b, z20.b\n"
- ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "udot z18.s, z17.b, z20.b\n"
+ "mov x13, #0x0\n"
+ ".inst 0xe01c6ae3 // ld1b { za0h.b[x15, #3] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25266d22 // psel p2.b, p11.b/Z, p9.b[w14]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe01c66c6 // ld1b { za0h.b[x15, #6] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252e6d21 // psel p1.b, p11.b/Z, p9.b[w14, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829030 // mova z16.s, p4/M, za0v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
- "ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8f60 // st1w { za0v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08290b1 // mova z17.s, p4/M, za1v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b64 // st1w { za1v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
- "incb x15\n"
"add x26, x26, #0x10\n"
- "udot z19.s, z16.b, z20.b\n"
- ".inst 0xe0ae8761 // st1w { za0v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
+ ".inst 0xe01c62a7 // ld1b { za0h.b[x15, #7] }, p0/Z, [x21, x28]\n"
+ "ldr x21, [x25, #0x8]\n"
+ ".inst 0x252e6d20 // psel p0.b, p11.b/Z, p9.b[w14, #1]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ ".inst 0xc082d010 // mova z16.s, p4/M, za0v.s[x14]\n"
+ ".inst 0xe0bfcf60 // st1w { za0v.s[x14] }, p3/Z, [x27, XZR, LSL #2]\n"
+ "incb x17\n"
"add x25, x25, #0x10\n"
- "udot z18.s, z17.b, z20.b\n"
+ ".inst 0xc082d091 // mova z17.s, p4/M, za1v.s[x14]\n"
+ "udot z19.s, z16.b, z20.b\n"
+ ".inst 0xe0a8cb64 // st1w { za1v.s[x14] }, p2/Z, [x27, x8, LSL #2]\n"
"incb x28\n"
- ".inst 0xe0ab8365 // st1w { za1v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
+ "udot z18.s, z17.b, z20.b\n"
+ ".inst 0xe0b0c761 // st1w { za0v.s[x14, #1] }, p1/Z, [x27, x16, LSL #2]\n"
+ ".inst 0xc082d030 // mova z16.s, p4/M, za0v.s[x14, #1]\n"
+ "whilelt p8.b, x17, %x[width]\n"
+ ".inst 0xc082d0b1 // mova z17.s, p4/M, za1v.s[x14, #1]\n"
+ ".inst 0xe0abc365 // st1w { za1v.s[x14, #1] }, p0/Z, [x27, x11, LSL #2]\n"
"addvl x27, x27, #4\n"
- "whilelt p8.b, x15, %x[width]\n"
- "mov x13, #0x0\n"
- "mov x12, #0x0\n"
+ "udot z19.s, z16.b, z20.b\n"
+ "udot z18.s, z17.b, z20.b\n"
"cbz x9, 8f\n"
"7:" // K loop: Main loop: Second: Loop
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
+ ".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0xe01c2f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
- ".inst 0x25656140 // psel p0.b, p8.b/Z, p10.b[w13, #4]\n"
- ".inst 0x256d6142 // psel p2.b, p8.b/Z, p10.b[w13, #5]\n"
+ ".inst 0xe01c2ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0xe01c22c4 // ld1b { za0h.b[x13, #4] }, p0/Z, [x22, x28]\n"
- ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe01c2aa5 // ld1b { za0h.b[x13, #5] }, p2/Z, [x21, x28]\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
"ldr x21, [x25, #0x8]\n"
- ".inst 0xe0bf8768 // st1w { za2v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0xe0bf8f68 // st1w { za2v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
+ "add x13, x13, #0x8\n"
".inst 0xc0829191 // mova z17.s, p4/M, za3v.s[x12]\n"
"udot z19.s, z16.b, z20.b\n"
- ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0a88b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x8, LSL #2]\n"
"udot z18.s, z17.b, z20.b\n"
- ".inst 0xe0ae8369 // st1w { za2v.s[x12, #1] }, p0/Z, [x27, x14, LSL #2]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b08769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x16, LSL #2]\n"
".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
".inst 0xc08291b1 // mova z17.s, p4/M, za3v.s[x12, #1]\n"
"add x12, x12, #0x2\n"
+ "addvl x27, x27, #4\n"
"cmp x12, x9\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"udot z19.s, z16.b, z20.b\n"
"udot z18.s, z17.b, z20.b\n"
- "addvl x27, x27, #4\n"
- "add x13, x13, #0x8\n"
"blt 7b\n"
"8:" // K loop: Main loop: Second: Tail
- ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
- ".inst 0xe01c2300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x28]\n"
- ".inst 0x252d6140 // psel p0.b, p8.b/Z, p10.b[w13, #1]\n"
- ".inst 0xe01c22e1 // ld1b { za0h.b[x13, #1] }, p0/Z, [x23, x28]\n"
+ ".inst 0x25256143 // psel p3.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
".inst 0x25656141 // psel p1.b, p8.b/Z, p10.b[w13, #4]\n"
".inst 0x256d6140 // psel p0.b, p8.b/Z, p10.b[w13, #5]\n"
- ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
"mov x26, %x[in]\n"
- "add x25, %x[in], x16, LSL #3\n"
+ "add x25, %x[in], x8, LSL #3\n"
+ ".inst 0xe01c2f00 // ld1b { za0h.b[x13] }, p3/Z, [x24, x28]\n"
+ ".inst 0x25246d23 // psel p3.b, p11.b/Z, p9.b[w12]\n"
"ldr x24, [x26, #0x0]\n"
- ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
- ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
- ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
- "udot z19.s, z16.b, z20.b\n"
- ".inst 0xc0829191 // mova z17.s, p4/M, za3v.s[x12]\n"
- "udot z18.s, z17.b, z20.b\n"
+ ".inst 0xe01c2ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x28]\n"
+ ".inst 0x25246d22 // psel p2.b, p11.b/Z, p9.b[w12]\n"
"ldr x23, [x25, #0x0]\n"
- ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe01c26c4 // ld1b { za0h.b[x13, #4] }, p1/Z, [x22, x28]\n"
+ ".inst 0x252c6d21 // psel p1.b, p11.b/Z, p9.b[w12, #1]\n"
"ldr x22, [x26, #0x8]\n"
- ".inst 0x25706d21 // psel p1.s, p11.s/Z, p9.s[w12, #1]\n"
- ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
- ".inst 0x25706d20 // psel p0.s, p11.s/Z, p9.s[w12, #1]\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0xe01c22a5 // ld1b { za0h.b[x13, #5] }, p0/Z, [x21, x28]\n"
"ldr x21, [x25, #0x8]\n"
+ ".inst 0x252c6d20 // psel p0.b, p11.b/Z, p9.b[w12, #1]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ ".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
".inst 0xe0bf8f68 // st1w { za2v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
- ".inst 0xc08291b1 // mova z17.s, p4/M, za3v.s[x12, #1]\n"
- "whilelt p9.b, x15, %x[width]\n"
- ".inst 0xe0b08b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x16, LSL #2]\n"
"subs x20, x20, #0x1\n"
- "add x26, x26, #0x10\n"
- "udot z19.s, z16.b, z20.b\n"
- ".inst 0xe0ae8769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x14, LSL #2]\n"
"add x25, x25, #0x10\n"
+ ".inst 0xc0829191 // mova z17.s, p4/M, za3v.s[x12]\n"
+ "udot z19.s, z16.b, z20.b\n"
+ ".inst 0xe0a88b6c // st1w { za3v.s[x12] }, p2/Z, [x27, x8, LSL #2]\n"
+ "incb x17\n"
"udot z18.s, z17.b, z20.b\n"
- "incb x15\n"
+ ".inst 0xe0b08769 // st1w { za2v.s[x12, #1] }, p1/Z, [x27, x16, LSL #2]\n"
+ ".inst 0xc0829130 // mova z16.s, p4/M, za2v.s[x12, #1]\n"
+ "incb x28\n"
+ ".inst 0xc08291b1 // mova z17.s, p4/M, za3v.s[x12, #1]\n"
".inst 0xe0ab836d // st1w { za3v.s[x12, #1] }, p0/Z, [x27, x11, LSL #2]\n"
"addvl x27, x27, #4\n"
- "incb x28\n"
+ "udot z19.s, z16.b, z20.b\n"
+ "udot z18.s, z17.b, z20.b\n"
"bgt 4b\n"
"9:" // K loop: Tails
"cbnz x10, 12f\n"
"mov x26, %x[in]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"mov x13, #0x0\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: First
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25306d23 // psel p3.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
- ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
- "ldr x21, [x26, #0x0]\n"
- ".inst 0x25356140 // psel p0.b, p8.b/Z, p10.b[w13, #2]\n"
- ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "ldr x20, [x26, x16, LSL #0x3]\n"
- ".inst 0xe01c22a2 // ld1b { za0h.b[x13, #2] }, p0/Z, [x21, x28]\n"
- "add x12, x12, #0x1\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
- "cmp x12, x16\n"
+ ".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
"udot z19.s, z16.b, z20.b\n"
"udot z18.s, z17.b, z20.b\n"
- ".inst 0xe01c2283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x28]\n"
- "add x26, x26, #0x8\n"
+ ".inst 0xe0bf8f60 // st1w { za0v.s[x12] }, p3/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xe0a88b64 // st1w { za1v.s[x12] }, p2/Z, [x27, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
"addvl x27, x27, #2\n"
+ "ldr x21, [x26, #0x0]\n"
+ "cmp x12, x8\n"
+ "ldr x20, [x26, x8, LSL #0x3]\n"
+ "add x26, x26, #0x8\n"
+ ".inst 0xe01c26a2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x21, x28]\n"
+ ".inst 0xe01c2283 // ld1b { za0h.b[x13, #3] }, p0/Z, [x20, x28]\n"
"add x13, x13, #0x4\n"
"blt 10b\n"
- "whilelt p9.b, x15, %x[width]\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"mov x20, #0x0\n"
"mov x12, #0x0\n"
"11:" // K loop: Tails: Even: Second
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8368 // st1w { za2v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0829110 // mova z16.s, p4/M, za2v.s[x12]\n"
- ".inst 0xe0b0836c // st1w { za3v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
".inst 0xc0829191 // mova z17.s, p4/M, za3v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"udot z19.s, z16.b, z20.b\n"
+ "add x20, x20, #0x4\n"
"udot z18.s, z17.b, z20.b\n"
+ ".inst 0xe0bf8768 // st1w { za2v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xe0a8836c // st1w { za3v.s[x12] }, p0/Z, [x27, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
"addvl x27, x27, #2\n"
- "add x20, x20, #0x4\n"
+ "cmp x12, x7\n"
"blt 11b\n"
- "whilelt p8.b, x15, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"b 14f\n"
"12:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"13:" // K loop: Tails: Odd: Loop
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8360 // st1w { za0v.s[x12] }, p0/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
".inst 0xc0829010 // mova z16.s, p4/M, za0v.s[x12]\n"
- ".inst 0xe0b08364 // st1w { za1v.s[x12] }, p0/Z, [x27, x16, LSL #2]\n"
".inst 0xc0829091 // mova z17.s, p4/M, za1v.s[x12]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x17\n"
"udot z19.s, z16.b, z20.b\n"
"udot z18.s, z17.b, z20.b\n"
+ ".inst 0xe0bf8760 // st1w { za0v.s[x12] }, p1/Z, [x27, XZR, LSL #2]\n"
+ ".inst 0xe0a88364 // st1w { za1v.s[x12] }, p0/Z, [x27, x8, LSL #2]\n"
+ "add x12, x12, #0x1\n"
"addvl x27, x27, #2\n"
+ "cmp x12, x7\n"
"blt 13b\n"
"14:" // K loop: End
"st1w { z19.s }, p4, [x27]\n"
"st1w { z18.s }, p4, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
- "mov %x[out], x27\n"
".inst 0xd503467f // SMSTOP\n"
+ "mov %x[out], x27\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
index 96ab55ee06..19d87039fe 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,66 +32,66 @@ void interleave_block<2, 1, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cnth x28\n"
- "cmp %x[height], x28\n"
- "cnth x27\n"
- "csel x28, %x[height], x28, LT\n"
- "mov x26, #0x0\n"
+ "mov x28, #0x0\n"
+ "mov x27, %x[row_offset]\n"
+ "cnth x26\n"
+ "cnth x25\n"
+ "cmp %x[height], x26\n"
"ptrue p13.s\n"
- "sub x28, x28, #0x1\n"
+ "csel x26, %x[height], x26, LT\n"
"whilelt p12.h, XZR, %x[height]\n"
- "whilelt p11.h, x27, %x[height]\n"
- "mov x25, %x[row_offset]\n"
+ "sub x26, x26, #0x1\n"
+ "whilelt p11.h, x25, %x[height]\n"
"mov x24, %x[out]\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
+ "whilelt p10.h, x28, %x[width]\n"
+ "whilelt p9.h, x28, %x[width]\n"
+ "whilelt p8.h, x28, %x[width]\n"
"1:" // Width loop
"add x23, %x[in], XZR, LSL #3\n"
- "add x20, %x[in], x27, LSL #3\n"
+ "add x20, %x[in], x25, LSL #3\n"
+ "mov x13, #0x0\n"
"ldr x22, [x23], #0x8\n"
- "mov x12, #0x0\n"
"ldr x21, [x20], #0x8\n"
- "cbz x28, 3f\n"
+ "cbz x26, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe05906c0 // ld1h { za0h.h[x12] }, p1/Z, [x22, x25, LSL #1]\n"
+ ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0xe05b26c0 // ld1h { za0h.h[x13] }, p1/Z, [x22, x27, LSL #1]\n"
"ldr x22, [x23], #0x8\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x28, LSL #1\n"
+ ".inst 0xe05b22a8 // ld1h { za1h.h[x13] }, p0/Z, [x21, x27, LSL #1]\n"
+ "add x13, x13, #0x2\n"
"ldr x21, [x20], #0x8\n"
+ "cmp x13, x26, LSL #1\n"
"blt 2b\n"
"3:" // Loads: Tail
- "sub x20, %x[width], x26\n"
- ".inst 0x25286580 // psel p0.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0xe05902c0 // ld1h { za0h.h[x12] }, p0/Z, [x22, x25, LSL #1]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- "cmp x20, x27\n"
- ".inst 0xe05902a8 // ld1h { za1h.h[x12] }, p0/Z, [x21, x25, LSL #1]\n"
+ ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
+ "sub x20, %x[width], x28\n"
"mov x12, #0x0\n"
- "csel x20, x20, x27, LT\n"
+ "cmp x20, x25\n"
+ ".inst 0xe05b26c0 // ld1h { za0h.h[x13] }, p1/Z, [x22, x27, LSL #1]\n"
+ "csel x20, x20, x25, LT\n"
+ ".inst 0xe05b22a8 // ld1h { za1h.h[x13] }, p0/Z, [x21, x27, LSL #1]\n"
"4:" // Stores: Loop
+ ".inst 0x25287541 // psel p1.h, p13.h/Z, p10.h[w12]\n"
".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07f8300 // st1h { za0v.h[x12] }, p0/Z, [x24, XZR, LSL #1]\n"
- ".inst 0x25287540 // psel p0.h, p13.h/Z, p10.h[w12]\n"
- ".inst 0xe07b8308 // st1h { za1v.h[x12] }, p0/Z, [x24, x27, LSL #1]\n"
+ ".inst 0xe07f8700 // st1h { za0v.h[x12] }, p1/Z, [x24, XZR, LSL #1]\n"
+ ".inst 0xe0798308 // st1h { za1v.h[x12] }, p0/Z, [x24, x25, LSL #1]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x24, x24, #4\n"
+ "cmp x12, x20\n"
"blt 4b\n"
- "inch x26\n"
- "whilelt p10.h, x26, %x[width]\n"
- "whilelt p9.h, x26, %x[width]\n"
- "whilelt p8.h, x26, %x[width]\n"
- "inch x25\n"
+ "inch x28\n"
+ "inch x27\n"
+ "whilelt p10.h, x28, %x[width]\n"
+ "whilelt p9.h, x28, %x[width]\n"
+ "whilelt p8.h, x28, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x24\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
index ac4b1b5086..68fabe3523 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave2VL_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,256 +32,251 @@ void interleave_block<2, 1, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "mov x22, %x[width]\n"
- "incw x22\n"
- "cntw x16\n"
- "sub x22, x22, #0x1\n"
- "udiv x22, x22, x16\n" // n_passes = ceildiv(width, VL<T>)
"mov x21, %x[width]\n"
- "sub x15, x16, #0x1\n"
- "sub x20, x22, #0x1\n"
- "ands x15, x21, x15\n"
- "sub x14, x16, #0x2\n"
- "mov x13, #0x0\n"
+ "mov x20, %x[width]\n"
+ "incw x21\n"
+ "cntw x17\n"
+ "sub x21, x21, #0x1\n"
+ "sub x16, x17, #0x1\n"
+ "udiv x21, x21, x17\n" // n_passes = ceildiv(width, VL<T>)
+ "ands x16, x20, x16\n"
+ "sub x20, x21, #0x1\n"
+ "sub x15, x17, #0x2\n"
+ "mov x14, #0x0\n"
"mov x11, %x[in]\n"
- "ldr x10, [x11, #0x0]\n"
- "add x9, %x[in], x16, LSL #3\n"
- "cntw x28, ALL, MUL #2\n"
- "ldr x27, [x9, #0x0]\n"
- "cntw x26, ALL, MUL #3\n"
+ "add x10, %x[in], x17, LSL #3\n"
+ "cntw x9, ALL, MUL #2\n"
+ "ldr x28, [x11, #0x0]\n"
+ "cntw x27, ALL, MUL #3\n"
"lsr x20, x20, #0x1\n" // n_loops = (n_passes - 1) / 2
- "ldr x25, [x11, #0x8]\n"
- "and x24, x22, #0x1\n" // odd_tail = bool(n_passes & 0x1)
- "csel x15, x15, x16, NE\n"
- "ldr x21, [x9, #0x8]\n"
- "ptrue p13.s\n"
- "whilelt p12.s, XZR, %x[height]\n"
- "whilelt p11.s, x16, %x[height]\n"
+ "ldr x26, [x10, #0x0]\n"
+ "and x25, x21, #0x1\n" // odd_tail = bool(n_passes & 0x1)
+ "csel x16, x16, x17, NE\n"
+ "ldr x24, [x11, #0x8]\n"
+ "ptrue p12.s\n"
+ "whilelt p11.s, XZR, %x[height]\n"
+ "ldr x21, [x10, #0x8]\n"
+ "whilelt p10.s, x17, %x[height]\n"
"mov x23, %x[row_offset]\n"
"mov x22, %x[out]\n"
- "whilelt p10.s, x13, %x[width]\n"
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "whilelt p9.s, x14, %x[width]\n"
+ "whilelt p8.s, x14, %x[width]\n"
"add x11, x11, #0x10\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
"mov x12, #0x0\n"
- "cbz x14, 2f\n"
+ "cbz x15, 2f\n"
"1:" // K loop: Charge: Loop
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0970540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x23, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0xe0970364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x23, LSL #2]\n"
- ".inst 0x25706581 // psel p1.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706160 // psel p0.s, p8.s/Z, p11.s[w12, #1]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0xe0970721 // ld1w { za0h.s[x12, #1] }, p1/Z, [x25, x23, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
+ ".inst 0x25306163 // psel p3.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0x25306142 // psel p2.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0x25706161 // psel p1.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0x25706140 // psel p0.s, p8.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0970f80 // ld1w { za0h.s[x12] }, p3/Z, [x28, x23, LSL #2]\n"
+ "ldr x28, [x11, #0x0]\n"
+ ".inst 0xe0970b44 // ld1w { za1h.s[x12] }, p2/Z, [x26, x23, LSL #2]\n"
+ "ldr x26, [x10, #0x0]\n"
+ ".inst 0xe0970701 // ld1w { za0h.s[x12, #1] }, p1/Z, [x24, x23, LSL #2]\n"
+ "ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
".inst 0xe09702a5 // ld1w { za1h.s[x12, #1] }, p0/Z, [x21, x23, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x14\n"
- "ldr x21, [x9, #0x8]\n"
- "add x9, x9, #0x10\n"
+ "ldr x21, [x10, #0x8]\n"
+ "add x10, x10, #0x10\n"
+ "cmp x12, x15\n"
"blt 1b\n"
"2:" // K loop: Charge: End
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0970540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x23, LSL #2]\n"
- ".inst 0xe0970364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x23, LSL #2]\n"
- ".inst 0x25706581 // psel p1.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706160 // psel p0.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0x25306163 // psel p3.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0x25306142 // psel p2.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0x25706161 // psel p1.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0x25706140 // psel p0.s, p8.s/Z, p10.s[w12, #1]\n"
"mov x11, %x[in]\n"
- "add x9, %x[in], x16, LSL #3\n"
- ".inst 0xe0970721 // ld1w { za0h.s[x12, #1] }, p1/Z, [x25, x23, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
+ "add x10, %x[in], x17, LSL #3\n"
+ ".inst 0xe0970f80 // ld1w { za0h.s[x12] }, p3/Z, [x28, x23, LSL #2]\n"
+ "ldr x28, [x11, #0x0]\n"
+ "incw x14\n"
+ ".inst 0xe0970b44 // ld1w { za1h.s[x12] }, p2/Z, [x26, x23, LSL #2]\n"
+ "ldr x26, [x10, #0x0]\n"
+ ".inst 0xe0970701 // ld1w { za0h.s[x12, #1] }, p1/Z, [x24, x23, LSL #2]\n"
+ "ldr x24, [x11, #0x8]\n"
+ "add x11, x11, #0x10\n"
".inst 0xe09702a5 // ld1w { za1h.s[x12, #1] }, p0/Z, [x21, x23, LSL #2]\n"
- "ldr x27, [x9, #0x0]\n"
+ "ldr x21, [x10, #0x8]\n"
+ "add x10, x10, #0x10\n"
"incw x23\n"
- "incw x13\n"
- "ldr x25, [x11, #0x8]\n"
- "add x11, x11, #0x10\n"
- "ldr x21, [x9, #0x8]\n"
- "add x9, x9, #0x10\n"
"cbz x20, 8f\n"
"mov x20, x20\n"
"3:" // K loop: Main loop
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
- "mov x12, #0x0\n"
- "cbz x14, 5f\n"
+ "whilelt p8.s, x14, %x[width]\n"
+ "mov x13, #0x0\n"
+ "cbz x15, 5f\n"
"4:" // K loop: Main loop: First: Loop
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0970548 // ld1w { za2h.s[x12] }, p1/Z, [x10, x23, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0xe097036c // ld1w { za3h.s[x12] }, p0/Z, [x27, x23, LSL #2]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706162 // psel p2.s, p8.s/Z, p11.s[w12, #1]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0970329 // ld1w { za2h.s[x12, #1] }, p0/Z, [x25, x23, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0970aad // ld1w { za3h.s[x12, #1] }, p2/Z, [x21, x23, LSL #2]\n"
- "ldr x21, [x9, #0x8]\n"
- ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0x25316160 // psel p0.s, p8.s/Z, p11.s[w13]\n"
+ ".inst 0x25316142 // psel p2.s, p8.s/Z, p10.s[w13]\n"
+ ".inst 0x25716161 // psel p1.s, p8.s/Z, p11.s[w13, #1]\n"
+ ".inst 0x25716143 // psel p3.s, p8.s/Z, p10.s[w13, #1]\n"
+ ".inst 0xe0972388 // ld1w { za2h.s[x13] }, p0/Z, [x28, x23, LSL #2]\n"
+ ".inst 0x25317120 // psel p0.s, p12.s/Z, p9.s[w13]\n"
+ "ldr x28, [x11, #0x0]\n"
+ ".inst 0xe0972b4c // ld1w { za3h.s[x13] }, p2/Z, [x26, x23, LSL #2]\n"
+ ".inst 0x25317122 // psel p2.s, p12.s/Z, p9.s[w13]\n"
+ "ldr x26, [x10, #0x0]\n"
+ ".inst 0xe0972709 // ld1w { za2h.s[x13, #1] }, p1/Z, [x24, x23, LSL #2]\n"
+ ".inst 0x25717121 // psel p1.s, p12.s/Z, p9.s[w13, #1]\n"
+ "ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86c1 // st1w { za0v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82c5 // st1w { za1v.s[x12, #1] }, p0/Z, [x22, x26, LSL #2]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x14\n"
+ ".inst 0xe0972ead // ld1w { za3h.s[x13, #1] }, p3/Z, [x21, x23, LSL #2]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bfa2c0 // st1w { za0v.s[x13] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x25717120 // psel p0.s, p12.s/Z, p9.s[w13, #1]\n"
+ ".inst 0xe0b1aac4 // st1w { za1v.s[x13] }, p2/Z, [x22, x17, LSL #2]\n"
+ "add x10, x10, #0x10\n"
+ ".inst 0xe0a9a6c1 // st1w { za0v.s[x13, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bba2c5 // st1w { za1v.s[x13, #1] }, p0/Z, [x22, x27, LSL #2]\n"
+ "add x13, x13, #0x2\n"
"addvl x22, x22, #4\n"
+ "cmp x13, x15\n"
"blt 4b\n"
"5:" // K loop: Main loop: First: Tail
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0970548 // ld1w { za2h.s[x12] }, p1/Z, [x10, x23, LSL #2]\n"
- ".inst 0xe097036c // ld1w { za3h.s[x12] }, p0/Z, [x27, x23, LSL #2]\n"
+ ".inst 0x25316160 // psel p0.s, p8.s/Z, p11.s[w13]\n"
+ ".inst 0x25316142 // psel p2.s, p8.s/Z, p10.s[w13]\n"
+ ".inst 0x25716161 // psel p1.s, p8.s/Z, p11.s[w13, #1]\n"
+ ".inst 0x25716143 // psel p3.s, p8.s/Z, p10.s[w13, #1]\n"
"mov x11, %x[in]\n"
- "add x9, %x[in], x16, LSL #3\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706161 // psel p1.s, p8.s/Z, p11.s[w12, #1]\n"
- ".inst 0xe0970329 // ld1w { za2h.s[x12, #1] }, p0/Z, [x25, x23, LSL #2]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe09706ad // ld1w { za3h.s[x12, #1] }, p1/Z, [x21, x23, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x21, [x9, #0x8]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x16, LSL #2]\n"
- "whilelt p10.s, x13, %x[width]\n"
- "incw x13\n"
- ".inst 0xe0bc86c1 // st1w { za0v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
+ "add x10, %x[in], x17, LSL #3\n"
+ ".inst 0xe0972388 // ld1w { za2h.s[x13] }, p0/Z, [x28, x23, LSL #2]\n"
+ ".inst 0x25317120 // psel p0.s, p12.s/Z, p9.s[w13]\n"
+ "ldr x28, [x11, #0x0]\n"
+ "mov x12, #0x0\n"
+ ".inst 0xe0972b4c // ld1w { za3h.s[x13] }, p2/Z, [x26, x23, LSL #2]\n"
+ ".inst 0x25317122 // psel p2.s, p12.s/Z, p9.s[w13]\n"
+ "ldr x26, [x10, #0x0]\n"
+ ".inst 0xe0972709 // ld1w { za2h.s[x13, #1] }, p1/Z, [x24, x23, LSL #2]\n"
+ ".inst 0x25717121 // psel p1.s, p12.s/Z, p9.s[w13, #1]\n"
+ "ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82c5 // st1w { za1v.s[x12, #1] }, p0/Z, [x22, x26, LSL #2]\n"
- "addvl x22, x22, #4\n"
+ ".inst 0xe0972ead // ld1w { za3h.s[x13, #1] }, p3/Z, [x21, x23, LSL #2]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bfa2c0 // st1w { za0v.s[x13] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x25717120 // psel p0.s, p12.s/Z, p9.s[w13, #1]\n"
+ ".inst 0xe0b1aac4 // st1w { za1v.s[x13] }, p2/Z, [x22, x17, LSL #2]\n"
+ "whilelt p9.s, x14, %x[width]\n"
+ "incw x14\n"
+ ".inst 0xe0a9a6c1 // st1w { za0v.s[x13, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ "add x10, x10, #0x10\n"
"incw x23\n"
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
- "mov x12, #0x0\n"
- "cbz x14, 7f\n"
+ ".inst 0xe0bba2c5 // st1w { za1v.s[x13, #1] }, p0/Z, [x22, x27, LSL #2]\n"
+ "addvl x22, x22, #4\n"
+ "whilelt p8.s, x14, %x[width]\n"
+ "cbz x15, 7f\n"
"6:" // K loop: Main loop: Second: Loop
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0970540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x23, LSL #2]\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0xe0970364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x23, LSL #2]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
- ".inst 0x25706162 // psel p2.s, p8.s/Z, p11.s[w12, #1]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0970321 // ld1w { za0h.s[x12, #1] }, p0/Z, [x25, x23, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0970aa5 // ld1w { za1h.s[x12, #1] }, p2/Z, [x21, x23, LSL #2]\n"
- "ldr x21, [x9, #0x8]\n"
- ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b082cc // st1w { za3v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
+ ".inst 0x25306142 // psel p2.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0x25706161 // psel p1.s, p8.s/Z, p11.s[w12, #1]\n"
+ ".inst 0x25706143 // psel p3.s, p8.s/Z, p10.s[w12, #1]\n"
+ ".inst 0xe0970380 // ld1w { za0h.s[x12] }, p0/Z, [x28, x23, LSL #2]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ "ldr x28, [x11, #0x0]\n"
+ ".inst 0xe0970b44 // ld1w { za1h.s[x12] }, p2/Z, [x26, x23, LSL #2]\n"
+ ".inst 0x25307122 // psel p2.s, p12.s/Z, p9.s[w12]\n"
+ "ldr x26, [x10, #0x0]\n"
+ ".inst 0xe0970701 // ld1w { za0h.s[x12, #1] }, p1/Z, [x24, x23, LSL #2]\n"
+ ".inst 0x25707121 // psel p1.s, p12.s/Z, p9.s[w12, #1]\n"
+ "ldr x24, [x11, #0x8]\n"
"add x11, x11, #0x10\n"
- ".inst 0xe0bc86c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x26, LSL #2]\n"
+ ".inst 0xe0970ea5 // ld1w { za1h.s[x12, #1] }, p3/Z, [x21, x23, LSL #2]\n"
+ "ldr x21, [x10, #0x8]\n"
+ ".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0x25707120 // psel p0.s, p12.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b18acc // st1w { za3v.s[x12] }, p2/Z, [x22, x17, LSL #2]\n"
+ "add x10, x10, #0x10\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ ".inst 0xe0bb82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
"add x12, x12, #0x2\n"
- "cmp x12, x14\n"
"addvl x22, x22, #4\n"
+ "cmp x12, x15\n"
"blt 6b\n"
"7:" // K loop: Main loop: Second: Tail
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- ".inst 0xe0970540 // ld1w { za0h.s[x12] }, p1/Z, [x10, x23, LSL #2]\n"
- ".inst 0xe0970364 // ld1w { za1h.s[x12] }, p0/Z, [x27, x23, LSL #2]\n"
- "mov x11, %x[in]\n"
- "add x9, %x[in], x16, LSL #3\n"
- "ldr x10, [x11, #0x0]\n"
- ".inst 0x25706580 // psel p0.s, p9.s/Z, p12.s[w12, #1]\n"
+ ".inst 0x25306142 // psel p2.s, p8.s/Z, p10.s[w12]\n"
".inst 0x25706161 // psel p1.s, p8.s/Z, p11.s[w12, #1]\n"
- ".inst 0xe0970321 // ld1w { za0h.s[x12, #1] }, p0/Z, [x25, x23, LSL #2]\n"
- "ldr x27, [x9, #0x0]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe09706a5 // ld1w { za1h.s[x12, #1] }, p1/Z, [x21, x23, LSL #2]\n"
- "ldr x25, [x11, #0x8]\n"
- ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
- "ldr x21, [x9, #0x8]\n"
+ ".inst 0x25706143 // psel p3.s, p8.s/Z, p10.s[w12, #1]\n"
+ "mov x11, %x[in]\n"
+ "add x10, %x[in], x17, LSL #3\n"
+ ".inst 0xe0970380 // ld1w { za0h.s[x12] }, p0/Z, [x28, x23, LSL #2]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ "ldr x28, [x11, #0x0]\n"
+ ".inst 0xe0970b44 // ld1w { za1h.s[x12] }, p2/Z, [x26, x23, LSL #2]\n"
+ ".inst 0x25307122 // psel p2.s, p12.s/Z, p9.s[w12]\n"
+ "ldr x26, [x10, #0x0]\n"
+ ".inst 0xe0970701 // ld1w { za0h.s[x12, #1] }, p1/Z, [x24, x23, LSL #2]\n"
+ ".inst 0x25707121 // psel p1.s, p12.s/Z, p9.s[w12, #1]\n"
+ "ldr x24, [x11, #0x8]\n"
+ "add x11, x11, #0x10\n"
+ ".inst 0xe0970ea5 // ld1w { za1h.s[x12, #1] }, p3/Z, [x21, x23, LSL #2]\n"
+ "ldr x21, [x10, #0x8]\n"
".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25707541 // psel p1.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0x25707540 // psel p0.s, p13.s/Z, p10.s[w12, #1]\n"
- ".inst 0xe0b08acc // st1w { za3v.s[x12] }, p2/Z, [x22, x16, LSL #2]\n"
- "whilelt p10.s, x13, %x[width]\n"
+ ".inst 0x25707120 // psel p0.s, p12.s/Z, p9.s[w12, #1]\n"
+ ".inst 0xe0b18acc // st1w { za3v.s[x12] }, p2/Z, [x22, x17, LSL #2]\n"
+ "whilelt p9.s, x14, %x[width]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xe0bc86c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x28, LSL #2]\n"
- "add x11, x11, #0x10\n"
- "add x9, x9, #0x10\n"
- ".inst 0xe0ba82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x26, LSL #2]\n"
+ ".inst 0xe0a986c9 // st1w { za2v.s[x12, #1] }, p1/Z, [x22, x9, LSL #2]\n"
+ "add x10, x10, #0x10\n"
+ "incw x14\n"
+ ".inst 0xe0bb82cd // st1w { za3v.s[x12, #1] }, p0/Z, [x22, x27, LSL #2]\n"
"addvl x22, x22, #4\n"
- "incw x13\n"
"incw x23\n"
"bgt 3b\n"
"8:" // K loop: Tails
- "cbnz x24, 11f\n"
+ "cbnz x25, 11f\n"
"mov x11, %x[in]\n"
- "whilelt p9.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "whilelt p8.s, x14, %x[width]\n"
"mov x12, #0x0\n"
"9:" // K loop: Tails: Even: First
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
+ ".inst 0x25307123 // psel p3.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307122 // psel p2.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25306161 // psel p1.s, p8.s/Z, p11.s[w12]\n"
+ ".inst 0x25306140 // psel p0.s, p8.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8ec0 // st1w { za0v.s[x12] }, p3/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0b18ac4 // st1w { za1v.s[x12] }, p2/Z, [x22, x17, LSL #2]\n"
+ "addvl x22, x22, #2\n"
"ldr x21, [x11, #0x0]\n"
- ".inst 0x25306581 // psel p1.s, p9.s/Z, p12.s[w12]\n"
- ".inst 0x25306160 // psel p0.s, p8.s/Z, p11.s[w12]\n"
- "ldr x20, [x11, x16, LSL #0x3]\n"
- ".inst 0xe09706a8 // ld1w { za2h.s[x12] }, p1/Z, [x21, x23, LSL #2]\n"
+ "ldr x20, [x11, x17, LSL #0x3]\n"
"add x11, x11, #0x8\n"
- "addvl x22, x22, #2\n"
+ ".inst 0xe09706a8 // ld1w { za2h.s[x12] }, p1/Z, [x21, x23, LSL #2]\n"
".inst 0xe097028c // ld1w { za3h.s[x12] }, p0/Z, [x20, x23, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x16\n"
+ "cmp x12, x17\n"
"blt 9b\n"
- "whilelt p10.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "whilelt p9.s, x14, %x[width]\n"
+ "whilelt p8.s, x14, %x[width]\n"
"mov x12, #0x0\n"
"10:" // K loop: Tails: Even: Second
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c8 // st1w { za2v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082cc // st1w { za3v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
+ ".inst 0x25307121 // psel p1.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf86c8 // st1w { za2v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0b182cc // st1w { za3v.s[x12] }, p0/Z, [x22, x17, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x15\n"
"addvl x22, x22, #2\n"
+ "cmp x12, x16\n"
"blt 10b\n"
- "whilelt p8.s, x13, %x[width]\n"
+ "whilelt p8.s, x14, %x[width]\n"
"b 13f\n"
"11:" // K loop: Tails: Odd
"mov x12, #0x0\n"
"12:" // K loop: Tails: Odd: Loop
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf82c0 // st1w { za0v.s[x12] }, p0/Z, [x22, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0b082c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x16, LSL #2]\n"
+ ".inst 0x25307121 // psel p1.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0x25307120 // psel p0.s, p12.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf86c0 // st1w { za0v.s[x12] }, p1/Z, [x22, XZR, LSL #2]\n"
+ ".inst 0xe0b182c4 // st1w { za1v.s[x12] }, p0/Z, [x22, x17, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x15\n"
"addvl x22, x22, #2\n"
+ "cmp x12, x16\n"
"blt 12b\n"
"13:" // K loop: End
"mov %x[out], x22\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
index 2e53475b5c..f5c756eba6 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_bf16_bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,93 +32,93 @@ void interleave_block<4, 2, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
+ "mov x17, #0x0\n"
+ "mov x16, %x[row_offset]\n"
"cntw x15\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
+ "cntw x14\n"
+ "cntw x11, ALL, MUL #2\n"
+ "cntw x10, ALL, MUL #3\n"
+ "cmp %x[height], x15\n"
+ "cnth x9\n"
+ "csel x15, %x[height], x15, LT\n"
"whilelt p11.h, XZR, %x[height]\n"
- "whilelt p10.h, x15, %x[height]\n"
- "whilelt p9.h, x14, %x[height]\n"
- "whilelt p8.h, x13, %x[height]\n"
- "mov x11, #0x0\n"
- "cnth x10\n"
+ "whilelt p10.h, x14, %x[height]\n"
+ "whilelt p9.h, x11, %x[height]\n"
+ "whilelt p8.h, x10, %x[height]\n"
"ptrue p13.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
"zip1 p12.h, p11.h, p9.h\n"
"zip1 p11.h, p10.h, p8.h\n"
- "mov x9, %x[row_offset]\n"
"mov x28, %x[out]\n"
- "whilelt p10.h, x11, %x[width]\n"
- "whilelt p9.h, x11, %x[width]\n"
- "whilelt p8.h, x11, %x[width]\n"
+ "whilelt p10.h, x17, %x[width]\n"
+ "whilelt p9.h, x17, %x[width]\n"
+ "whilelt p8.h, x17, %x[width]\n"
"1:" // Width loop
"add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x20, %x[in], x13, LSL #3\n"
+ "add x26, %x[in], x14, LSL #3\n"
+ "add x25, %x[in], x11, LSL #3\n"
+ "add x20, %x[in], x10, LSL #3\n"
+ "ldr x24, [x27], #0x8\n"
+ "mov x13, #0x0\n"
"ldr x23, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
+ "ldr x22, [x25], #0x8\n"
"ldr x21, [x20], #0x8\n"
- "cbz x16, 3f\n"
+ "cbz x15, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0490720 // ld1h { za0h.h[x12] }, p1/Z, [x25, x9, LSL #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe04902e8 // ld1h { za1h.h[x12] }, p0/Z, [x23, x9, LSL #1]\n"
- ".inst 0x25386581 // psel p1.h, p9.h/Z, p12.h[w12, #1]\n"
- ".inst 0x25386160 // psel p0.h, p8.h/Z, p11.h[w12, #1]\n"
+ ".inst 0x25296580 // psel p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296162 // psel p2.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe0502300 // ld1h { za0h.h[x13] }, p0/Z, [x24, x16, LSL #1]\n"
+ ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ "ldr x24, [x27], #0x8\n"
+ ".inst 0xe0502ae8 // ld1h { za1h.h[x13] }, p2/Z, [x23, x16, LSL #1]\n"
"ldr x23, [x26], #0x8\n"
- ".inst 0xe04906c1 // ld1h { za0h.h[x12, #1] }, p1/Z, [x22, x9, LSL #1]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe04902a9 // ld1h { za1h.h[x12, #1] }, p0/Z, [x21, x9, LSL #1]\n"
- "add x12, x12, #0x2\n"
- "cmp x12, x16, LSL #1\n"
+ ".inst 0xe05026c1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x22, x16, LSL #1]\n"
+ "ldr x22, [x25], #0x8\n"
+ ".inst 0xe05022a9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x21, x16, LSL #1]\n"
+ "add x13, x13, #0x2\n"
"ldr x21, [x20], #0x8\n"
+ "cmp x13, x15, LSL #1\n"
"blt 2b\n"
"3:" // Loads: Tail
- ".inst 0x25286581 // psel p1.h, p9.h/Z, p12.h[w12]\n"
- ".inst 0x25286160 // psel p0.h, p8.h/Z, p11.h[w12]\n"
- ".inst 0xe0490720 // ld1h { za0h.h[x12] }, p1/Z, [x25, x9, LSL #1]\n"
- "sub x20, %x[width], x11\n"
- ".inst 0xe04902e8 // ld1h { za1h.h[x12] }, p0/Z, [x23, x9, LSL #1]\n"
- "cmp x20, x10\n"
- "csel x20, x20, x10, LT\n"
- ".inst 0x25386580 // psel p0.h, p9.h/Z, p12.h[w12, #1]\n"
- ".inst 0xe04902c1 // ld1h { za0h.h[x12, #1] }, p0/Z, [x22, x9, LSL #1]\n"
- ".inst 0x25386160 // psel p0.h, p8.h/Z, p11.h[w12, #1]\n"
- "add x20, x20, #0x1\n"
- ".inst 0xe04902a9 // ld1h { za1h.h[x12, #1] }, p0/Z, [x21, x9, LSL #1]\n"
+ ".inst 0x25296580 // psel p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296162 // psel p2.h, p8.h/Z, p11.h[w13]\n"
+ "sub x20, %x[width], x17\n"
+ ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
+ "cmp x20, x9\n"
"mov x12, #0x0\n"
+ ".inst 0xe0502300 // ld1h { za0h.h[x13] }, p0/Z, [x24, x16, LSL #1]\n"
+ ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
+ "csel x20, x20, x9, LT\n"
+ ".inst 0xe0502ae8 // ld1h { za1h.h[x13] }, p2/Z, [x23, x16, LSL #1]\n"
+ "add x20, x20, #0x1\n"
+ ".inst 0xe05026c1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x22, x16, LSL #1]\n"
"lsr x20, x20, #0x1\n"
+ ".inst 0xe05022a9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x21, x16, LSL #1]\n"
"4:" // Stores: Loop
".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
- ".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0af8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x15, LSL #2]\n"
+ ".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
".inst 0x25307541 // psel p1.s, p13.s/Z, p10.s[w12]\n"
+ ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
- ".inst 0xe0ae8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x14, LSL #2]\n"
- ".inst 0xe0ad838c // st1w { za3v.s[x12] }, p0/Z, [x28, x13, LSL #2]\n"
+ ".inst 0xe0ae8b84 // st1w { za1v.s[x12] }, p2/Z, [x28, x14, LSL #2]\n"
+ ".inst 0xe0ab8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x11, LSL #2]\n"
+ ".inst 0xe0aa838c // st1w { za3v.s[x12] }, p0/Z, [x28, x10, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x28, x28, #4\n"
+ "cmp x12, x20\n"
"blt 4b\n"
- "inch x11\n"
- "whilelt p10.h, x11, %x[width]\n"
- "whilelt p9.h, x11, %x[width]\n"
- "whilelt p8.h, x11, %x[width]\n"
- "inch x9\n"
+ "inch x17\n"
+ "inch x16\n"
+ "whilelt p10.h, x17, %x[width]\n"
+ "whilelt p9.h, x17, %x[width]\n"
+ "whilelt p8.h, x17, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp
index 268bdbb924..9e0ab463be 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block2_fp16_fp16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,19 +10,19 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
-#if defined(__ARM_FEATURE_SVE)
+#if defined(ARM_COMPUTE_ENABLE_SME)
template <>
void interleave_block<4, 2, VLType::SME, false>(
@@ -65,36 +65,36 @@ void interleave_block<4, 2, VLType::SME, false>(
"ldr x21, [x20], #0x8\n"
"cbz x15, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25296582 // psel p2.h, p9.h/Z, p12.h[w13]\n"
- ".inst 0x25296161 // psel p1.h, p8.h/Z, p11.h[w13]\n"
- ".inst 0x25396580 // psel p0.h, p9.h/Z, p12.h[w13, #1]\n"
- ".inst 0xe0502b00 // ld1h { za0h.h[x13] }, p2/Z, [x24, x16, LSL #1]\n"
- ".inst 0x25396162 // psel p2.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0x25296580 // psel p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296162 // psel p2.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0xe0502300 // ld1h { za0h.h[x13] }, p0/Z, [x24, x16, LSL #1]\n"
+ ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
"ldr x24, [x27], #0x8\n"
- ".inst 0xe05026e8 // ld1h { za1h.h[x13] }, p1/Z, [x23, x16, LSL #1]\n"
+ ".inst 0xe0502ae8 // ld1h { za1h.h[x13] }, p2/Z, [x23, x16, LSL #1]\n"
"ldr x23, [x26], #0x8\n"
- ".inst 0xe05022c1 // ld1h { za0h.h[x13, #1] }, p0/Z, [x22, x16, LSL #1]\n"
+ ".inst 0xe05026c1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x22, x16, LSL #1]\n"
"ldr x22, [x25], #0x8\n"
- ".inst 0xe0502aa9 // ld1h { za1h.h[x13, #1] }, p2/Z, [x21, x16, LSL #1]\n"
+ ".inst 0xe05022a9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x21, x16, LSL #1]\n"
"add x13, x13, #0x2\n"
"ldr x21, [x20], #0x8\n"
"cmp x13, x15, LSL #1\n"
"blt 2b\n"
"3:" // Loads: Tail
- ".inst 0x25296581 // psel p1.h, p9.h/Z, p12.h[w13]\n"
- ".inst 0x25296160 // psel p0.h, p8.h/Z, p11.h[w13]\n"
+ ".inst 0x25296580 // psel p0.h, p9.h/Z, p12.h[w13]\n"
+ ".inst 0x25296162 // psel p2.h, p8.h/Z, p11.h[w13]\n"
"sub x20, %x[width], x17\n"
- ".inst 0x25396582 // psel p2.h, p9.h/Z, p12.h[w13, #1]\n"
+ ".inst 0x25396581 // psel p1.h, p9.h/Z, p12.h[w13, #1]\n"
"cmp x20, x9\n"
"mov x12, #0x0\n"
- ".inst 0xe0502700 // ld1h { za0h.h[x13] }, p1/Z, [x24, x16, LSL #1]\n"
- ".inst 0xe05022e8 // ld1h { za1h.h[x13] }, p0/Z, [x23, x16, LSL #1]\n"
- ".inst 0x25396161 // psel p1.h, p8.h/Z, p11.h[w13, #1]\n"
+ ".inst 0xe0502300 // ld1h { za0h.h[x13] }, p0/Z, [x24, x16, LSL #1]\n"
+ ".inst 0x25396160 // psel p0.h, p8.h/Z, p11.h[w13, #1]\n"
"csel x20, x20, x9, LT\n"
+ ".inst 0xe0502ae8 // ld1h { za1h.h[x13] }, p2/Z, [x23, x16, LSL #1]\n"
"add x20, x20, #0x1\n"
- ".inst 0xe0502ac1 // ld1h { za0h.h[x13, #1] }, p2/Z, [x22, x16, LSL #1]\n"
+ ".inst 0xe05026c1 // ld1h { za0h.h[x13, #1] }, p1/Z, [x22, x16, LSL #1]\n"
"lsr x20, x20, #0x1\n"
- ".inst 0xe05026a9 // ld1h { za1h.h[x13, #1] }, p1/Z, [x21, x16, LSL #1]\n"
+ ".inst 0xe05022a9 // ld1h { za1h.h[x13, #1] }, p0/Z, [x21, x16, LSL #1]\n"
"4:" // Stores: Loop
".inst 0x25307540 // psel p0.s, p13.s/Z, p10.s[w12]\n"
".inst 0x25307542 // psel p2.s, p13.s/Z, p10.s[w12]\n"
@@ -122,4 +122,4 @@ void interleave_block<4, 2, VLType::SME, false>(
);
}
-#endif // defined(__ARM_FEATURE_SVE)
+#endif // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
index 67dd5a9bb7..b0b3aa85c1 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,92 +32,92 @@ void interleave_block<4, 4, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
+ "mov x17, #0x0\n"
+ "mov x16, %x[row_offset]\n"
"cntw x15\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
+ "cntw x14\n"
+ "cntw x11, ALL, MUL #2\n"
+ "cntw x10, ALL, MUL #3\n"
+ "cmp %x[height], x15\n"
+ "cntb x9\n"
+ "csel x15, %x[height], x15, LT\n"
"whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
+ "whilelt p10.b, x14, %x[height]\n"
+ "whilelt p9.b, x11, %x[height]\n"
+ "whilelt p8.b, x10, %x[height]\n"
"zip1 p12.b, p12.b, p9.b\n"
"zip1 p10.b, p10.b, p8.b\n"
- "mov x11, #0x0\n"
- "cntb x10\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
"zip1 p10.b, p12.b, p10.b\n"
- "mov x9, %x[row_offset]\n"
"mov x28, %x[out]\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"1:" // Width loop
"add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x23], #0x8\n"
- "cbz x16, 3f\n"
+ "add x26, %x[in], x14, LSL #3\n"
+ "add x25, %x[in], x11, LSL #3\n"
+ "add x20, %x[in], x10, LSL #3\n"
+ "ldr x24, [x27], #0x8\n"
+ "mov x13, #0x0\n"
+ "ldr x23, [x26], #0x8\n"
+ "ldr x22, [x25], #0x8\n"
+ "ldr x21, [x20], #0x8\n"
+ "cbz x15, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
- ".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00906c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x9]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
- "ldr x21, [x23], #0x8\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0102300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x16]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "ldr x24, [x27], #0x8\n"
+ ".inst 0xe0102ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x16]\n"
+ "ldr x23, [x26], #0x8\n"
+ ".inst 0xe01026c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x16]\n"
+ "ldr x22, [x25], #0x8\n"
+ ".inst 0xe01022a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x16]\n"
+ "add x13, x13, #0x4\n"
+ "ldr x21, [x20], #0x8\n"
+ "cmp x13, x15, LSL #2\n"
"blt 2b\n"
"3:" // Loads: Tail
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
- ".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
- "sub x20, %x[width], x11\n"
- ".inst 0xe00902c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x9]\n"
- "cmp x20, x10\n"
- "csel x20, x20, x10, LT\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ "sub x20, %x[width], x17\n"
+ "cmp x20, x9\n"
"mov x12, #0x0\n"
+ ".inst 0xe0102300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x16]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "csel x20, x20, x9, LT\n"
+ ".inst 0xe0102ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x16]\n"
+ "add x20, x20, #0x3\n"
+ ".inst 0xe01026c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x16]\n"
"lsr x20, x20, #0x2\n"
+ ".inst 0xe01022a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x16]\n"
"4:" // Stores: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0af8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x15, LSL #2]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0ae8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x14, LSL #2]\n"
- ".inst 0xe0ad838c // st1w { za3v.s[x12] }, p0/Z, [x28, x13, LSL #2]\n"
+ ".inst 0xe0ae8b84 // st1w { za1v.s[x12] }, p2/Z, [x28, x14, LSL #2]\n"
+ ".inst 0xe0ab8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x11, LSL #2]\n"
+ ".inst 0xe0aa838c // st1w { za3v.s[x12] }, p0/Z, [x28, x10, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x28, x28, #4\n"
+ "cmp x12, x20\n"
"blt 4b\n"
- "incb x11\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
- "incb x9\n"
+ "incb x17\n"
+ "incb x16\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
index 21d9378368..a4696816e9 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_s8_s8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,118 +32,118 @@ void interleave_block<4, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
+ "mov x17, %x[row_offset]\n"
+ "mov x16, %x[out]\n"
"cntw x15\n"
+ "cntw x14\n"
"mov z24.b, #0x1\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
"mov z23.s, #0x0\n"
+ "cntw x11, ALL, MUL #2\n"
+ "cntw x10, ALL, MUL #3\n"
"mov z22.s, #0x0\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
"mov z21.s, #0x0\n"
+ "cmp %x[height], x15\n"
+ "ptrue p3.b\n"
"mov z20.s, #0x0\n"
+ "csel x15, %x[height], x15, LT\n"
"whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
+ "whilelt p10.b, x14, %x[height]\n"
+ "whilelt p9.b, x11, %x[height]\n"
+ "whilelt p8.b, x10, %x[height]\n"
"zip1 p12.b, p12.b, p9.b\n"
"zip1 p10.b, p10.b, p8.b\n"
- "ptrue p2.b\n"
- "cntb x11\n"
+ "cntb x9\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
"zip1 p10.b, p12.b, p10.b\n"
- "mov x10, %x[row_offset]\n"
- "mov x9, %x[out]\n"
"cbnz %x[first], 1f\n"
- "addvl x9, x9, #-4\n"
- "ld1w { z23.s }, p2/Z, [x9]\n"
- "ld1w { z22.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #3, MUL VL]\n"
+ "addvl x16, x16, #-4\n"
+ "ld1w { z23.s }, p3/Z, [x16]\n"
+ "ld1w { z22.s }, p3/Z, [x16, #1, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x16, #2, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x16, #3, MUL VL]\n"
"1:" // Initialise row sums: End
"mov x28, #0x0\n"
"whilelt p9.b, x28, %x[width]\n"
"whilelt p8.b, x28, %x[width]\n"
"2:" // Width loop
"add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x23], #0x8\n"
- "cbz x16, 4f\n"
+ "add x26, %x[in], x14, LSL #3\n"
+ "add x25, %x[in], x11, LSL #3\n"
+ "add x20, %x[in], x10, LSL #3\n"
+ "ldr x24, [x27], #0x8\n"
+ "mov x13, #0x0\n"
+ "ldr x23, [x26], #0x8\n"
+ "ldr x22, [x25], #0x8\n"
+ "ldr x21, [x20], #0x8\n"
+ "cbz x15, 4f\n"
"3:" // Loads: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00a06c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x10]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
- "ldr x21, [x23], #0x8\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0112300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x17]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "ldr x24, [x27], #0x8\n"
+ ".inst 0xe0112ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x17]\n"
+ "ldr x23, [x26], #0x8\n"
+ ".inst 0xe01126c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x17]\n"
+ "ldr x22, [x25], #0x8\n"
+ ".inst 0xe01122a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x17]\n"
+ "add x13, x13, #0x4\n"
+ "ldr x21, [x20], #0x8\n"
+ "cmp x13, x15, LSL #2\n"
"blt 3b\n"
"4:" // Loads: Tail
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
"sub x20, %x[width], x28\n"
- ".inst 0xe00a02c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x10]\n"
- "cmp x20, x11\n"
- "csel x20, x20, x11, LT\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
+ "cmp x20, x9\n"
"mov x12, #0x0\n"
+ ".inst 0xe0112300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x17]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "csel x20, x20, x9, LT\n"
+ ".inst 0xe0112ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x17]\n"
+ "add x20, x20, #0x3\n"
+ ".inst 0xe01126c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x17]\n"
"lsr x20, x20, #0x2\n"
+ ".inst 0xe01122a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x17]\n"
"5:" // Stores: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8120 // st1w { za0v.s[x12] }, p0/Z, [x9, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0828811 // mova z17.s, p2/M, za0v.s[x12]\n"
- ".inst 0xe0af8124 // st1w { za1v.s[x12] }, p0/Z, [x9, x15, LSL #2]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828c11 // mova z17.s, p3/M, za0v.s[x12]\n"
".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0828893 // mova z19.s, p2/M, za1v.s[x12]\n"
- ".inst 0xe0ae8528 // st1w { za2v.s[x12] }, p1/Z, [x9, x14, LSL #2]\n"
- ".inst 0xc0828910 // mova z16.s, p2/M, za2v.s[x12]\n"
+ ".inst 0xc0828c90 // mova z16.s, p3/M, za1v.s[x12]\n"
"sdot z23.s, z17.b, z24.b\n"
- ".inst 0xe0ad812c // st1w { za3v.s[x12] }, p0/Z, [x9, x13, LSL #2]\n"
- ".inst 0xc0828992 // mova z18.s, p2/M, za3v.s[x12]\n"
+ ".inst 0xc0828d13 // mova z19.s, p3/M, za2v.s[x12]\n"
+ "sdot z22.s, z16.b, z24.b\n"
+ ".inst 0xe0bf8200 // st1w { za0v.s[x12] }, p0/Z, [x16, XZR, LSL #2]\n"
+ ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828d92 // mova z18.s, p3/M, za3v.s[x12]\n"
+ "sdot z21.s, z19.b, z24.b\n"
+ ".inst 0xe0ae8a04 // st1w { za1v.s[x12] }, p2/Z, [x16, x14, LSL #2]\n"
+ "sdot z20.s, z18.b, z24.b\n"
+ ".inst 0xe0ab8608 // st1w { za2v.s[x12] }, p1/Z, [x16, x11, LSL #2]\n"
+ ".inst 0xe0aa820c // st1w { za3v.s[x12] }, p0/Z, [x16, x10, LSL #2]\n"
"add x12, x12, #0x1\n"
+ "addvl x16, x16, #4\n"
"cmp x12, x20\n"
- "sdot z22.s, z19.b, z24.b\n"
- "sdot z21.s, z16.b, z24.b\n"
- "addvl x9, x9, #4\n"
- "sdot z20.s, z18.b, z24.b\n"
"blt 5b\n"
"incb x28\n"
+ "incb x17\n"
"whilelt p9.b, x28, %x[width]\n"
"whilelt p8.b, x28, %x[width]\n"
- "incb x10\n"
"b.any 2b\n"
- "st1w { z23.s }, p2, [x9]\n"
- "st1w { z22.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z20.s }, p2, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "mov %x[out], x9\n"
+ "st1w { z23.s }, p3, [x16]\n"
+ "st1w { z22.s }, p3, [x16, #1, MUL VL]\n"
+ "st1w { z21.s }, p3, [x16, #2, MUL VL]\n"
+ "st1w { z20.s }, p3, [x16, #3, MUL VL]\n"
+ "addvl x16, x16, #4\n"
".inst 0xd503467f // SMSTOP\n"
+ "mov %x[out], x16\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
index f149c93293..df77398acc 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,92 +32,92 @@ void interleave_block<4, 4, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
+ "mov x17, #0x0\n"
+ "mov x16, %x[row_offset]\n"
"cntw x15\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
+ "cntw x14\n"
+ "cntw x11, ALL, MUL #2\n"
+ "cntw x10, ALL, MUL #3\n"
+ "cmp %x[height], x15\n"
+ "cntb x9\n"
+ "csel x15, %x[height], x15, LT\n"
"whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
+ "whilelt p10.b, x14, %x[height]\n"
+ "whilelt p9.b, x11, %x[height]\n"
+ "whilelt p8.b, x10, %x[height]\n"
"zip1 p12.b, p12.b, p9.b\n"
"zip1 p10.b, p10.b, p8.b\n"
- "mov x11, #0x0\n"
- "cntb x10\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
"zip1 p10.b, p12.b, p10.b\n"
- "mov x9, %x[row_offset]\n"
"mov x28, %x[out]\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"1:" // Width loop
"add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x23], #0x8\n"
- "cbz x16, 3f\n"
+ "add x26, %x[in], x14, LSL #3\n"
+ "add x25, %x[in], x11, LSL #3\n"
+ "add x20, %x[in], x10, LSL #3\n"
+ "ldr x24, [x27], #0x8\n"
+ "mov x13, #0x0\n"
+ "ldr x23, [x26], #0x8\n"
+ "ldr x22, [x25], #0x8\n"
+ "ldr x21, [x20], #0x8\n"
+ "cbz x15, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
- ".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00906c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x9]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
- "ldr x21, [x23], #0x8\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0102300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x16]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "ldr x24, [x27], #0x8\n"
+ ".inst 0xe0102ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x16]\n"
+ "ldr x23, [x26], #0x8\n"
+ ".inst 0xe01026c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x16]\n"
+ "ldr x22, [x25], #0x8\n"
+ ".inst 0xe01022a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x16]\n"
+ "add x13, x13, #0x4\n"
+ "ldr x21, [x20], #0x8\n"
+ "cmp x13, x15, LSL #2\n"
"blt 2b\n"
"3:" // Loads: Tail
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe0090320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x9]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe0090281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x9]\n"
- ".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
- "sub x20, %x[width], x11\n"
- ".inst 0xe00902c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x9]\n"
- "cmp x20, x10\n"
- "csel x20, x20, x10, LT\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00902a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x9]\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ "sub x20, %x[width], x17\n"
+ "cmp x20, x9\n"
"mov x12, #0x0\n"
+ ".inst 0xe0102300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x16]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "csel x20, x20, x9, LT\n"
+ ".inst 0xe0102ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x16]\n"
+ "add x20, x20, #0x3\n"
+ ".inst 0xe01026c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x16]\n"
"lsr x20, x20, #0x2\n"
+ ".inst 0xe01022a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x16]\n"
"4:" // Stores: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0af8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x15, LSL #2]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0ae8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x14, LSL #2]\n"
- ".inst 0xe0ad838c // st1w { za3v.s[x12] }, p0/Z, [x28, x13, LSL #2]\n"
+ ".inst 0xe0ae8b84 // st1w { za1v.s[x12] }, p2/Z, [x28, x14, LSL #2]\n"
+ ".inst 0xe0ab8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x11, LSL #2]\n"
+ ".inst 0xe0aa838c // st1w { za3v.s[x12] }, p0/Z, [x28, x10, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x28, x28, #4\n"
+ "cmp x12, x20\n"
"blt 4b\n"
- "incb x11\n"
- "whilelt p9.b, x11, %x[width]\n"
- "whilelt p8.b, x11, %x[width]\n"
- "incb x9\n"
+ "incb x17\n"
+ "incb x16\n"
+ "whilelt p9.b, x17, %x[width]\n"
+ "whilelt p8.b, x17, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
index 252152e3da..14ab3f476b 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_block4_u8_u8_summing.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,118 +32,118 @@ void interleave_block<4, 4, VLType::SME, true>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x16\n"
+ "mov x17, %x[row_offset]\n"
+ "mov x16, %x[out]\n"
"cntw x15\n"
+ "cntw x14\n"
"mov z24.b, #0x1\n"
- "cntw x14, ALL, MUL #2\n"
- "cntw x13, ALL, MUL #3\n"
"mov z23.s, #0x0\n"
+ "cntw x11, ALL, MUL #2\n"
+ "cntw x10, ALL, MUL #3\n"
"mov z22.s, #0x0\n"
- "cmp %x[height], x16\n"
- "csel x16, %x[height], x16, LT\n"
"mov z21.s, #0x0\n"
+ "cmp %x[height], x15\n"
+ "ptrue p3.b\n"
"mov z20.s, #0x0\n"
+ "csel x15, %x[height], x15, LT\n"
"whilelt p12.b, XZR, %x[height]\n"
- "whilelt p10.b, x15, %x[height]\n"
- "whilelt p9.b, x14, %x[height]\n"
- "whilelt p8.b, x13, %x[height]\n"
+ "whilelt p10.b, x14, %x[height]\n"
+ "whilelt p9.b, x11, %x[height]\n"
+ "whilelt p8.b, x10, %x[height]\n"
"zip1 p12.b, p12.b, p9.b\n"
"zip1 p10.b, p10.b, p8.b\n"
- "ptrue p2.b\n"
- "cntb x11\n"
+ "cntb x9\n"
"ptrue p11.s\n"
- "sub x16, x16, #0x1\n"
+ "sub x15, x15, #0x1\n"
"zip1 p10.b, p12.b, p10.b\n"
- "mov x10, %x[row_offset]\n"
- "mov x9, %x[out]\n"
"cbnz %x[first], 1f\n"
- "addvl x9, x9, #-4\n"
- "ld1w { z23.s }, p2/Z, [x9]\n"
- "ld1w { z22.s }, p2/Z, [x9, #1, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #3, MUL VL]\n"
+ "addvl x16, x16, #-4\n"
+ "ld1w { z23.s }, p3/Z, [x16]\n"
+ "ld1w { z22.s }, p3/Z, [x16, #1, MUL VL]\n"
+ "ld1w { z21.s }, p3/Z, [x16, #2, MUL VL]\n"
+ "ld1w { z20.s }, p3/Z, [x16, #3, MUL VL]\n"
"1:" // Initialise row sums: End
"mov x28, #0x0\n"
"whilelt p9.b, x28, %x[width]\n"
"whilelt p8.b, x28, %x[width]\n"
"2:" // Width loop
"add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x15, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x14, LSL #3\n"
- "add x23, %x[in], x13, LSL #3\n"
- "ldr x20, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
- "ldr x21, [x23], #0x8\n"
- "cbz x16, 4f\n"
+ "add x26, %x[in], x14, LSL #3\n"
+ "add x25, %x[in], x11, LSL #3\n"
+ "add x20, %x[in], x10, LSL #3\n"
+ "ldr x24, [x27], #0x8\n"
+ "mov x13, #0x0\n"
+ "ldr x23, [x26], #0x8\n"
+ "ldr x22, [x25], #0x8\n"
+ "ldr x21, [x20], #0x8\n"
+ "cbz x15, 4f\n"
"3:" // Loads: Loop
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346141 // psel p1.b, p8.b/Z, p10.b[w12, #2]\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "ldr x20, [x26], #0x8\n"
- ".inst 0xe00a06c2 // ld1b { za0h.b[x12, #2] }, p1/Z, [x22, x10]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x16, LSL #2\n"
- "ldr x21, [x23], #0x8\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
+ ".inst 0xe0112300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x17]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "ldr x24, [x27], #0x8\n"
+ ".inst 0xe0112ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x17]\n"
+ "ldr x23, [x26], #0x8\n"
+ ".inst 0xe01126c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x17]\n"
+ "ldr x22, [x25], #0x8\n"
+ ".inst 0xe01122a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x17]\n"
+ "add x13, x13, #0x4\n"
+ "ldr x21, [x20], #0x8\n"
+ "cmp x13, x15, LSL #2\n"
"blt 3b\n"
"4:" // Loads: Tail
- ".inst 0x25246140 // psel p0.b, p8.b/Z, p10.b[w12]\n"
- ".inst 0xe00a0320 // ld1b { za0h.b[x12] }, p0/Z, [x25, x10]\n"
- ".inst 0x252c6140 // psel p0.b, p8.b/Z, p10.b[w12, #1]\n"
- ".inst 0xe00a0281 // ld1b { za0h.b[x12, #1] }, p0/Z, [x20, x10]\n"
- ".inst 0x25346140 // psel p0.b, p8.b/Z, p10.b[w12, #2]\n"
+ ".inst 0x25256140 // psel p0.b, p8.b/Z, p10.b[w13]\n"
+ ".inst 0x252d6142 // psel p2.b, p8.b/Z, p10.b[w13, #1]\n"
+ ".inst 0x25356141 // psel p1.b, p8.b/Z, p10.b[w13, #2]\n"
"sub x20, %x[width], x28\n"
- ".inst 0xe00a02c2 // ld1b { za0h.b[x12, #2] }, p0/Z, [x22, x10]\n"
- "cmp x20, x11\n"
- "csel x20, x20, x11, LT\n"
- ".inst 0x253c6140 // psel p0.b, p8.b/Z, p10.b[w12, #3]\n"
- "add x20, x20, #0x3\n"
- ".inst 0xe00a02a3 // ld1b { za0h.b[x12, #3] }, p0/Z, [x21, x10]\n"
+ "cmp x20, x9\n"
"mov x12, #0x0\n"
+ ".inst 0xe0112300 // ld1b { za0h.b[x13] }, p0/Z, [x24, x17]\n"
+ ".inst 0x253d6140 // psel p0.b, p8.b/Z, p10.b[w13, #3]\n"
+ "csel x20, x20, x9, LT\n"
+ ".inst 0xe0112ae1 // ld1b { za0h.b[x13, #1] }, p2/Z, [x23, x17]\n"
+ "add x20, x20, #0x3\n"
+ ".inst 0xe01126c2 // ld1b { za0h.b[x13, #2] }, p1/Z, [x22, x17]\n"
"lsr x20, x20, #0x2\n"
+ ".inst 0xe01122a3 // ld1b { za0h.b[x13, #3] }, p0/Z, [x21, x17]\n"
"5:" // Stores: Loop
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xe0bf8120 // st1w { za0v.s[x12] }, p0/Z, [x9, XZR, LSL #2]\n"
- ".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0828810 // mova z16.s, p2/M, za0v.s[x12]\n"
- ".inst 0xe0af8124 // st1w { za1v.s[x12] }, p0/Z, [x9, x15, LSL #2]\n"
+ ".inst 0x25306d22 // psel p2.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828c13 // mova z19.s, p3/M, za0v.s[x12]\n"
".inst 0x25306d21 // psel p1.s, p11.s/Z, p9.s[w12]\n"
+ ".inst 0xc0828c92 // mova z18.s, p3/M, za1v.s[x12]\n"
+ "udot z23.s, z19.b, z24.b\n"
+ ".inst 0xc0828d11 // mova z17.s, p3/M, za2v.s[x12]\n"
+ "udot z22.s, z18.b, z24.b\n"
+ ".inst 0xe0bf8200 // st1w { za0v.s[x12] }, p0/Z, [x16, XZR, LSL #2]\n"
".inst 0x25306d20 // psel p0.s, p11.s/Z, p9.s[w12]\n"
- ".inst 0xc0828891 // mova z17.s, p2/M, za1v.s[x12]\n"
- ".inst 0xe0ae8528 // st1w { za2v.s[x12] }, p1/Z, [x9, x14, LSL #2]\n"
- ".inst 0xc0828913 // mova z19.s, p2/M, za2v.s[x12]\n"
- "udot z23.s, z16.b, z24.b\n"
- ".inst 0xe0ad812c // st1w { za3v.s[x12] }, p0/Z, [x9, x13, LSL #2]\n"
- ".inst 0xc0828992 // mova z18.s, p2/M, za3v.s[x12]\n"
+ ".inst 0xc0828d90 // mova z16.s, p3/M, za3v.s[x12]\n"
+ "udot z21.s, z17.b, z24.b\n"
+ ".inst 0xe0ae8a04 // st1w { za1v.s[x12] }, p2/Z, [x16, x14, LSL #2]\n"
+ "udot z20.s, z16.b, z24.b\n"
+ ".inst 0xe0ab8608 // st1w { za2v.s[x12] }, p1/Z, [x16, x11, LSL #2]\n"
+ ".inst 0xe0aa820c // st1w { za3v.s[x12] }, p0/Z, [x16, x10, LSL #2]\n"
"add x12, x12, #0x1\n"
+ "addvl x16, x16, #4\n"
"cmp x12, x20\n"
- "udot z22.s, z17.b, z24.b\n"
- "udot z21.s, z19.b, z24.b\n"
- "addvl x9, x9, #4\n"
- "udot z20.s, z18.b, z24.b\n"
"blt 5b\n"
"incb x28\n"
+ "incb x17\n"
"whilelt p9.b, x28, %x[width]\n"
"whilelt p8.b, x28, %x[width]\n"
- "incb x10\n"
"b.any 2b\n"
- "st1w { z23.s }, p2, [x9]\n"
- "st1w { z22.s }, p2, [x9, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z20.s }, p2, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
- "mov %x[out], x9\n"
+ "st1w { z23.s }, p3, [x16]\n"
+ "st1w { z22.s }, p3, [x16, #1, MUL VL]\n"
+ "st1w { z21.s }, p3, [x16, #2, MUL VL]\n"
+ "st1w { z20.s }, p3, [x16, #3, MUL VL]\n"
+ "addvl x16, x16, #4\n"
".inst 0xd503467f // SMSTOP\n"
+ "mov %x[out], x16\n"
: [out] "+&r" (out)
: [first] "r" (first), [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
index b11bb93c42..09c961f1cd 100644
--- a/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/indirect-interleaves/sme_interleave4VL_fp32_fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,92 +32,92 @@ void interleave_block<4, 1, VLType::SME, false>(
{
__asm__ __volatile__(
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x15\n"
- "cmp %x[height], x15\n"
+ "mov x16, #0x0\n"
+ "mov x15, %x[row_offset]\n"
"cntw x14\n"
- "cntw x13, ALL, MUL #2\n"
- "cntw x11, ALL, MUL #3\n"
- "csel x15, %x[height], x15, LT\n"
- "mov x10, #0x0\n"
+ "cntw x11\n"
+ "cmp %x[height], x14\n"
+ "cntw x10, ALL, MUL #2\n"
+ "cntw x9, ALL, MUL #3\n"
+ "csel x14, %x[height], x14, LT\n"
"ptrue p4.s\n"
- "sub x15, x15, #0x1\n"
+ "sub x14, x14, #0x1\n"
"whilelt p3.s, XZR, %x[height]\n"
- "whilelt p15.s, x14, %x[height]\n"
- "whilelt p14.s, x13, %x[height]\n"
- "whilelt p13.s, x11, %x[height]\n"
- "mov x9, %x[row_offset]\n"
+ "whilelt p15.s, x11, %x[height]\n"
+ "whilelt p14.s, x10, %x[height]\n"
+ "whilelt p13.s, x9, %x[height]\n"
"mov x28, %x[out]\n"
- "whilelt p12.s, x10, %x[width]\n"
- "whilelt p11.s, x10, %x[width]\n"
- "whilelt p10.s, x10, %x[width]\n"
- "whilelt p9.s, x10, %x[width]\n"
- "whilelt p8.s, x10, %x[width]\n"
+ "whilelt p12.s, x16, %x[width]\n"
+ "whilelt p11.s, x16, %x[width]\n"
+ "whilelt p10.s, x16, %x[width]\n"
+ "whilelt p9.s, x16, %x[width]\n"
+ "whilelt p8.s, x16, %x[width]\n"
"1:" // Width loop
"add x27, %x[in], XZR, LSL #3\n"
- "add x26, %x[in], x14, LSL #3\n"
- "ldr x25, [x27], #0x8\n"
- "add x24, %x[in], x13, LSL #3\n"
- "add x20, %x[in], x11, LSL #3\n"
+ "add x26, %x[in], x11, LSL #3\n"
+ "add x25, %x[in], x10, LSL #3\n"
+ "add x20, %x[in], x9, LSL #3\n"
+ "ldr x24, [x27], #0x8\n"
+ "mov x13, #0x0\n"
"ldr x23, [x26], #0x8\n"
- "mov x12, #0x0\n"
- "ldr x22, [x24], #0x8\n"
+ "ldr x22, [x25], #0x8\n"
"ldr x21, [x20], #0x8\n"
- "cbz x15, 3f\n"
+ "cbz x14, 3f\n"
"2:" // Loads: Loop
- ".inst 0x25306c60 // psel p0.s, p11.s/Z, p3.s[w12]\n"
- ".inst 0x253069e2 // psel p2.s, p10.s/Z, p15.s[w12]\n"
- ".inst 0xe0890320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x9, LSL #2]\n"
- "ldr x25, [x27], #0x8\n"
- ".inst 0x253065c1 // psel p1.s, p9.s/Z, p14.s[w12]\n"
- ".inst 0x253061a0 // psel p0.s, p8.s/Z, p13.s[w12]\n"
- ".inst 0xe0890ae4 // ld1w { za1h.s[x12] }, p2/Z, [x23, x9, LSL #2]\n"
+ ".inst 0x25316c60 // psel p0.s, p11.s/Z, p3.s[w13]\n"
+ ".inst 0x253169e2 // psel p2.s, p10.s/Z, p15.s[w13]\n"
+ ".inst 0x253165c1 // psel p1.s, p9.s/Z, p14.s[w13]\n"
+ ".inst 0xe08f2300 // ld1w { za0h.s[x13] }, p0/Z, [x24, x15, LSL #2]\n"
+ ".inst 0x253161a0 // psel p0.s, p8.s/Z, p13.s[w13]\n"
+ "ldr x24, [x27], #0x8\n"
+ ".inst 0xe08f2ae4 // ld1w { za1h.s[x13] }, p2/Z, [x23, x15, LSL #2]\n"
"ldr x23, [x26], #0x8\n"
- ".inst 0xe08906c8 // ld1w { za2h.s[x12] }, p1/Z, [x22, x9, LSL #2]\n"
- "ldr x22, [x24], #0x8\n"
- ".inst 0xe08902ac // ld1w { za3h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
- "add x12, x12, #0x1\n"
- "cmp x12, x15\n"
+ ".inst 0xe08f26c8 // ld1w { za2h.s[x13] }, p1/Z, [x22, x15, LSL #2]\n"
+ "ldr x22, [x25], #0x8\n"
+ ".inst 0xe08f22ac // ld1w { za3h.s[x13] }, p0/Z, [x21, x15, LSL #2]\n"
+ "add x13, x13, #0x1\n"
"ldr x21, [x20], #0x8\n"
+ "cmp x13, x14\n"
"blt 2b\n"
"3:" // Loads: Tail
- "sub x20, %x[width], x10\n"
- ".inst 0x25306c60 // psel p0.s, p11.s/Z, p3.s[w12]\n"
- ".inst 0xe0890320 // ld1w { za0h.s[x12] }, p0/Z, [x25, x9, LSL #2]\n"
- ".inst 0x253069e0 // psel p0.s, p10.s/Z, p15.s[w12]\n"
- ".inst 0x253065c1 // psel p1.s, p9.s/Z, p14.s[w12]\n"
- ".inst 0xe08902e4 // ld1w { za1h.s[x12] }, p0/Z, [x23, x9, LSL #2]\n"
- ".inst 0x253061a0 // psel p0.s, p8.s/Z, p13.s[w12]\n"
- "cmp x20, x14\n"
- ".inst 0xe08906c8 // ld1w { za2h.s[x12] }, p1/Z, [x22, x9, LSL #2]\n"
- ".inst 0xe08902ac // ld1w { za3h.s[x12] }, p0/Z, [x21, x9, LSL #2]\n"
+ ".inst 0x25316c60 // psel p0.s, p11.s/Z, p3.s[w13]\n"
+ ".inst 0x253169e2 // psel p2.s, p10.s/Z, p15.s[w13]\n"
+ ".inst 0x253165c1 // psel p1.s, p9.s/Z, p14.s[w13]\n"
+ "sub x20, %x[width], x16\n"
+ "cmp x20, x11\n"
"mov x12, #0x0\n"
- "csel x20, x20, x14, LT\n"
+ ".inst 0xe08f2300 // ld1w { za0h.s[x13] }, p0/Z, [x24, x15, LSL #2]\n"
+ ".inst 0x253161a0 // psel p0.s, p8.s/Z, p13.s[w13]\n"
+ "csel x20, x20, x11, LT\n"
+ ".inst 0xe08f2ae4 // ld1w { za1h.s[x13] }, p2/Z, [x23, x15, LSL #2]\n"
+ ".inst 0xe08f26c8 // ld1w { za2h.s[x13] }, p1/Z, [x22, x15, LSL #2]\n"
+ ".inst 0xe08f22ac // ld1w { za3h.s[x13] }, p0/Z, [x21, x15, LSL #2]\n"
"4:" // Stores: Loop
".inst 0x25305180 // psel p0.s, p4.s/Z, p12.s[w12]\n"
- ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
- ".inst 0x25305180 // psel p0.s, p4.s/Z, p12.s[w12]\n"
- ".inst 0xe0ae8384 // st1w { za1v.s[x12] }, p0/Z, [x28, x14, LSL #2]\n"
+ ".inst 0x25305182 // psel p2.s, p4.s/Z, p12.s[w12]\n"
".inst 0x25305181 // psel p1.s, p4.s/Z, p12.s[w12]\n"
+ ".inst 0xe0bf8380 // st1w { za0v.s[x12] }, p0/Z, [x28, XZR, LSL #2]\n"
".inst 0x25305180 // psel p0.s, p4.s/Z, p12.s[w12]\n"
- ".inst 0xe0ad8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x13, LSL #2]\n"
- ".inst 0xe0ab838c // st1w { za3v.s[x12] }, p0/Z, [x28, x11, LSL #2]\n"
+ ".inst 0xe0ab8b84 // st1w { za1v.s[x12] }, p2/Z, [x28, x11, LSL #2]\n"
+ ".inst 0xe0aa8788 // st1w { za2v.s[x12] }, p1/Z, [x28, x10, LSL #2]\n"
+ ".inst 0xe0a9838c // st1w { za3v.s[x12] }, p0/Z, [x28, x9, LSL #2]\n"
"add x12, x12, #0x1\n"
- "cmp x12, x20\n"
"addvl x28, x28, #4\n"
+ "cmp x12, x20\n"
"blt 4b\n"
- "incw x10\n"
- "whilelt p12.s, x10, %x[width]\n"
- "whilelt p11.s, x10, %x[width]\n"
- "whilelt p10.s, x10, %x[width]\n"
- "whilelt p9.s, x10, %x[width]\n"
- "whilelt p8.s, x10, %x[width]\n"
- "incw x9\n"
+ "incw x16\n"
+ "incw x15\n"
+ "whilelt p12.s, x16, %x[width]\n"
+ "whilelt p11.s, x16, %x[width]\n"
+ "whilelt p10.s, x16, %x[width]\n"
+ "whilelt p9.s, x16, %x[width]\n"
+ "whilelt p8.s, x16, %x[width]\n"
"b.any 1b\n"
"mov %x[out], x28\n"
".inst 0xd503467f // SMSTOP\n"
: [out] "+&r" (out)
: [height] "r" (height), [in] "r" (in), [row_offset] "r" (row_offset), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
index 59591935cd..da27f31428 100644
--- a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
+++ b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022 Arm Limited.
+ * Copyright (c) 2020-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,245 +47,7 @@
namespace arm_gemm {
-/*
- * Core function that does heavy lifting - interleave 'int_by' rows of width 'width' together.
- *
- * 'height' indicates the actual number of rows to interleave, so if it's less than int_by then the remaining
- * entries are padded (note that this is "GEMM" padding rather than convolution padding, so there is no need to pad
- * with a particular value.
- *
- * Note that it is not expected for this templated version to ever be used - all cases that matter should be
- * explicitly specialized with an optimized implementation.
- */
-template<unsigned int height_vectors, unsigned int block, VLType vlt, bool integrate_sums, typename TIn, typename TOut>
-void interleave_block( TOut * &out, const TIn * const *in, size_t width, size_t height, size_t row_offset, bool first) {
- const unsigned int int_by = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block : 1);
-
- std::vector<int32_t> the_sums;
-
- if (integrate_sums) {
- the_sums = std::vector<int32_t>(int_by, 0);
-
- if (!first) {
- // In 'integrate sums' mode, we dump the sums at the end on each pass.
-
- // On the last pass this is correct, but on other passes it is not -
- // so on the subsequent pass we need to take the output written by
- // the previous pass as starting point for the sums, and then
- // overwrite them with new interleaved data.
- int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
-
- // Rewind pointer to where we wrote out the sums last time.
- out_int32 -= int_by;
-
- // Restore the running sums.
- memcpy(the_sums.data(), out_int32, int_by * sizeof(int32_t));
-
- // Update the "real" pointer so that the next output will clobber the old sums.
- out = reinterpret_cast<TOut *>(out_int32);
- }
- }
-
- for (unsigned int pos=0; pos<width; pos+=block) {
- for (unsigned int row=0; row<int_by; row++) {
- // Row out of range - pad 'block' entries.
- if (row >= height) {
- for (unsigned int col=0; col<block; col++) {
- *out++ = 0;
- }
- continue;
- }
-
- for (unsigned int col=0; col<block; col++) {
- // Column out of range - pad a single entry
- if (pos + col >= width) {
- *out++ = 0;
- continue;
- }
-
- if (integrate_sums) {
- the_sums[row] += in[row][row_offset + pos + col];
- }
-
- *out++ = in[row][row_offset + pos + col];
- }
- }
- }
-
- if (integrate_sums) {
- int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
-
- memcpy(out_int32, the_sums.data(), int_by * sizeof(int32_t));
-
- out = reinterpret_cast<TOut *>(out_int32 + int_by);
- }
-}
-
-template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TOut>
-inline void FixupRowSums(TOut * &out, const int32_t row_sum_multiplier) {
- const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block : 1);
-
- // If we are integrating row sums, we need to do some fix up, depending on whether the multiplier is non-zero or not.
- if (row_sum_multiplier) {
- // Non-zero: interleave_block<>() will have done the sums, so 'out' will point to the start of the
- // next block (post sums).
- // We need to go back and apply the multiplier to the computed sums. We don't need to change 'out'.
- int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
-
- out_int32 -= height;
- for (unsigned int i=0; i<height; i++) {
- out_int32[i] *= row_sum_multiplier;
- }
- } else {
- // Zero: interleave_block<>() will *not* have done the sums, so 'out' will point to the start of the
- // sum block. We need to insert the (zero) sums, and advance 'out'.
- int32_t *out_int32 = reinterpret_cast<int32_t *>(out);
-
- for (unsigned int i=0; i<height; i++) {
- out_int32[i] = 0;
- }
-
- out_int32 += height;
-
- out = reinterpret_cast<TOut *>(out_int32);
- }
-}
-
-template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TIn, typename TOut>
-void IndirectInterleave(TOut *out, const TIn * const * const *ptr, unsigned int stringlen,
- unsigned int rounded_stringlen, const unsigned int y0, const unsigned int ymax,
- const unsigned int k0, const unsigned int kmax, bool integrate_sums,
- const int32_t row_sum_multiplier) {
- const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block : 1);
-
- // 'interleave_block' implementations are entitled to read a pointer for each row they handle from the input
- // pointer array, even for out of range rows (although they must not subsequently dereference those pointers for
- // out of range rows). This allows interleave_block to use techniques like row predication, or loading all
- // pointers and conditionally overriding the out of range ones.
-
- // This is problematic in the "pure" indirect case when we get to the last rows, where it can lead to out of
- // range reads. Avoid this with a local buffer to use in last-rows cases. Use alloca as a std::vector can be
- // expensive in highly threaded scenarios.
- const TIn **row_ptrs = reinterpret_cast<const TIn **>(alloca(height * sizeof(const TIn *)));
-
- // Figure out the starting position based on k0 (with rounded length)
- unsigned int start_string = k0 / rounded_stringlen;
- unsigned int start_stringpos = k0 % rounded_stringlen;
-
- // Process blocks of 'height' height...
- for (unsigned int ybase = y0; ybase < ymax; ybase+=height) {
- // Height to process
- unsigned int active_height = std::min(ymax - ybase, height);
-
- // Track our progress through the various strings
- unsigned int k_left = (kmax - k0);
- unsigned int string = start_string;
- unsigned int stringpos = start_stringpos;
-
- bool first = true;
-
- // Prepare to call 'interleave_block' above for each string encompassed by K range
- while (k_left > 0) {
- // Width to process - and the width we will generate (with padding)
- unsigned int in_width = std::min(k_left, stringlen - stringpos);
- unsigned int out_width = std::min(k_left, rounded_stringlen - stringpos);
-
- const TIn * const *row_base = ptr[string] + ybase;
-
- // If not all rows are valid, copy the ones that are into local array (see above comment).
- if (active_height < height) {
- for (unsigned int i=0; i<active_height; i++) {
- row_ptrs[i] = ptr[string][ybase + i];
- }
-
- row_base = row_ptrs;
- }
-
- // 'integrate_sums' is a function parameter rather than a template parameter to prevent duplicating too
- // much code. However, integrated sums make no sense for non-integral types and won't ever be
- // requested. So put a type trait check here to avoid generating pointless code.
- if (std::is_integral<TOut>::value && integrate_sums && row_sum_multiplier) {
- interleave_block<height_vectors, block, vlt, true>(out, row_base, in_width, active_height, stringpos, first);
- } else {
- interleave_block<height_vectors, block, vlt, false>(out, row_base, in_width, active_height, stringpos, first);
- }
-
- k_left -= out_width;
- string++;
- stringpos=0;
- first=false;
- }
-
- if (std::is_integral<TOut>::value && integrate_sums) {
- FixupRowSums<height_vectors, block, vlt>(out, row_sum_multiplier);
- }
- }
-}
-
-template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TIn, typename TOut>
-void ConvolutionInterleave(TOut *out, const TIn *in, size_t in_stride, const convolver<TIn> &conv, const unsigned int rounded_stringlen,
- const unsigned int y0, const unsigned int ymax, const unsigned int k0, const unsigned int kmax, bool integrate_sums, const int32_t row_sum_multiplier) {
- const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block : 1);
-
- auto conv_cols = conv.process_columns(in, in_stride, k0, kmax, rounded_stringlen);
-
- // Use alloca here as a std::vector can be expensive in highly threaded scenarios.
- const TIn **row_ptrs = reinterpret_cast<const TIn **>(alloca(height * sizeof(const TIn *)));
-
- for (unsigned int ybase = y0; ybase < ymax; ybase += height) {
- // How many of the rows are active - the rest will get padded in interleave_block.
- unsigned int active_height = std::min(ymax - ybase, height);
- bool first = true;
-
- auto conv_rows = conv_cols.process_rows(ybase, active_height);
-
- while (!conv_rows.finished()) {
- unsigned int width, offset;
-
- // Get next set of parameters
- std::tie(width, offset) = conv_rows.next_block(row_ptrs);
-
- // Perform the interleave
- if (std::is_integral<TOut>::value && integrate_sums && row_sum_multiplier) {
- interleave_block<height_vectors, block, vlt, true>(out, row_ptrs, width, active_height, offset, first);
- } else {
- interleave_block<height_vectors, block, vlt, false>(out, row_ptrs, width, active_height, offset, first);
- }
-
- first=false;
- }
-
- if (std::is_integral<TOut>::value && integrate_sums) {
- FixupRowSums<height_vectors, block, vlt>(out, row_sum_multiplier);
- }
- }
-}
-
-template<unsigned int height_vectors, unsigned int block, VLType vlt, typename TIn, typename TOut>
-void Interleave(TOut *out, const TIn *in, size_t in_stride, const unsigned int y0, const unsigned int ymax, const unsigned int k0, const unsigned int kmax, bool integrate_sums, const int32_t row_sum_multiplier) {
- const unsigned int height = height_vectors * (vlt == VLType::SVE ? get_vector_length<TOut>() / block : 1);
-
- // Use alloca here as a std::vector can be expensive in highly threaded scenarios.
- const TIn **row_ptrs = reinterpret_cast<const TIn **>(alloca(height * sizeof(const TIn *)));
-
- const unsigned int width=kmax-k0;
-
- for (unsigned int y=y0; y<ymax; y+=height) {
- for (unsigned int r=0; r<height; r++) {
- row_ptrs[r] = in + ((y + r) * in_stride);
- }
-
- if (std::is_integral<TOut>::value && integrate_sums && row_sum_multiplier) {
- interleave_block<height_vectors, block, vlt, true>(out, row_ptrs, width, std::min(height, ymax-y), k0, true);
- } else {
- interleave_block<height_vectors, block, vlt, false>(out, row_ptrs, width, std::min(height, ymax-y), k0, true);
- }
-
- if (std::is_integral<TOut>::value && integrate_sums) {
- FixupRowSums<height_vectors, block, vlt>(out, row_sum_multiplier);
- }
- }
-}
+#include "interleave_indirect_impl.hpp"
#include "indirect-interleaves/list.hpp"
@@ -316,6 +78,7 @@ template void Interleave<6, 1, VLType::None>(float *, const bfloat16 *, size_t,
/* AArch64 */
#ifdef __aarch64__
+
/* FP32 */
/* Arm® Neon™/SVE implementation (height 8) */
template void IndirectInterleave<8, 1, VLType::None>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
@@ -330,11 +93,11 @@ template void Interleave<8, 2, VLType::None>(float *, const float *, size_t, uns
#endif // ARM_COMPUTE_ENABLE_SVE && ARM_COMPUTE_ENABLE_SVEF32MM
/* FP16 */
-#if defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(FP16_KERNELS) || defined(ARM_COMPUTE_ENABLE_FP16)
template void IndirectInterleave<8, 1, VLType::None>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<8, 1, VLType::None>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void Interleave<8, 1, VLType::None>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
-#endif // FP16_KERNELS ar __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#endif // FP16_KERNELS ar ARM_COMPUTE_ENABLE_FP16
template void IndirectInterleave<8, 1, VLType::None>(float *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
template void ConvolutionInterleave<8, 1, VLType::None>(float *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t);
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6.hpp
index ef175beeb7..011459c157 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,7 +44,8 @@ void a32_sgemm_8x6_a55r1(const float *, const float *, float *, int, int, int);
// structure.
class sgemm_8x6 {
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
@@ -63,7 +64,7 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 6, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 8> transforms = {};
kern_type kernel = a32_sgemm_8x6;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp
index 8a98f667f4..95c2682bf6 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a53.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,347 +56,347 @@ void a32_sgemm_8x6_a53(const float *Apanel, const float *Bpanel, float *Cpanel,
int k = ((K+3)/4) - 1;
__asm __volatile (
- "vmov.i32 q4, #0\n"
- "vld1.32 {d0-d1}, [%[a_ptr] :64]\n"
- "vmov.i32 q5, #0\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]\n"
- "vmov.i32 q6, #0\n"
- "ldr r0, [%[a_ptr], #0x10]\n"
- "vmov.i32 q7, #0\n"
- "ldr r1, [%[a_ptr], #0x14]\n"
- "vmov.i32 q8, #0\n"
+ "vmov.i32 q4, #0\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]\n"
+ "vmov.i32 q5, #0\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]\n"
+ "vmov.i32 q6, #0\n"
+ "ldr r0, [%[a_ptr], #0x10]\n"
+ "vmov.i32 q7, #0\n"
+ "ldr r1, [%[a_ptr], #0x14]\n"
+ "vmov.i32 q8, #0\n"
ASM_PREFETCH("[%[a_ptr], #0x40]")
- "vmov.i32 q9, #0\n"
+ "vmov.i32 q9, #0\n"
ASM_PREFETCH("[%[b_ptr], #0x40]")
- "vmov.i32 q10, #0\n"
+ "vmov.i32 q10, #0\n"
ASM_PREFETCH("[%[a_ptr], #0x80]")
- "vmov.i32 q11, #0\n"
+ "vmov.i32 q11, #0\n"
ASM_PREFETCH("[%[b_ptr], #0x80]")
- "vmov.i32 q12, #0\n"
- "vmov.i32 q13, #0\n"
+ "vmov.i32 q12, #0\n"
+ "vmov.i32 q13, #0\n"
ASM_PREFETCH("[%[a_ptr], #0xC0]")
- "vmov.i32 q14, #0\n"
+ "vmov.i32 q14, #0\n"
ASM_PREFETCH("[%[b_ptr], #0XC0]")
- "vmov.i32 q15, #0\n"
- "cmp %[k], #0\n"
- "beq 6f\n"
+ "vmov.i32 q15, #0\n"
+ "cmp %[k], #0\n"
+ "beq 6f\n"
"1:\n"
// Unroll 0
- "vldr d6, [%[b_ptr], #0x10]\n"
- "vmov d2, r0, r1\n"
- "vmla.f32 q4, q2, d0[0]\n"
- "ldr r0, [%[b_ptr], #0x18]\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "ldr r1, [%[b_ptr], #0x1C]\n"
- "vmla.f32 q6, q2, d1[0]\n"
-
- "vldr d3, [%[a_ptr], #0x18]\n"
- "vmov d7, r0, r1\n"
- "vmla.f32 q7, q2, d1[1]\n"
+ "vldr d6, [%[b_ptr], #0x10]\n"
+ "vmov d2, r0, r1\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "ldr r0, [%[b_ptr], #0x18]\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "ldr r1, [%[b_ptr], #0x1C]\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+
+ "vldr d3, [%[a_ptr], #0x18]\n"
+ "vmov d7, r0, r1\n"
+ "vmla.f32 q7, q2, d1[1]\n"
ASM_PREFETCH("[%[a_ptr], #0x100]")
- "vmla.f32 q8, q2, d2[0]\n"
- "vmla.f32 q9, q2, d2[1]\n"
-
- "vldr d4, [%[b_ptr], #0x20]\n"
- "vmla.f32 q10, q3, d0[0]\n"
- "ldr r0, [%[b_ptr], #0x28]\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "ldr r1, [%[b_ptr], #0x2C]\n"
- "vmla.f32 q12, q3, d1[0]\n"
-
- "vldr d0, [%[a_ptr], #0x20]\n"
- "vmov d5, r0, r1\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "ldr r0, [%[a_ptr], #0x28]\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "ldr r1, [%[a_ptr], #0x2C]\n"
- "vmla.f32 q15, q3, d2[1]\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+
+ "vldr d4, [%[b_ptr], #0x20]\n"
+ "vmla.f32 q10, q3, d0[0]\n"
+ "ldr r0, [%[b_ptr], #0x28]\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "ldr r1, [%[b_ptr], #0x2C]\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+
+ "vldr d0, [%[a_ptr], #0x20]\n"
+ "vmov d5, r0, r1\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "ldr r0, [%[a_ptr], #0x28]\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "ldr r1, [%[a_ptr], #0x2C]\n"
+ "vmla.f32 q15, q3, d2[1]\n"
// Unroll 1
- "vldr d6, [%[b_ptr], #0x30]\n"
- "vmov d1, r0, r1\n"
- "vmla.f32 q4, q2, d3[0]\n"
- "ldr r0, [%[b_ptr], #0x38]\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "ldr r1, [%[b_ptr], #0x3C]\n"
- "vmla.f32 q6, q2, d0[0]\n"
-
- "vldr d2, [%[a_ptr], #0x30]\n"
- "vmov d7, r0, r1\n"
- "vmla.f32 q7, q2, d0[1]\n"
+ "vldr d6, [%[b_ptr], #0x30]\n"
+ "vmov d1, r0, r1\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "ldr r0, [%[b_ptr], #0x38]\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "ldr r1, [%[b_ptr], #0x3C]\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+
+ "vldr d2, [%[a_ptr], #0x30]\n"
+ "vmov d7, r0, r1\n"
+ "vmla.f32 q7, q2, d0[1]\n"
ASM_PREFETCH("[%[b_ptr], #0x100]")
- "vmla.f32 q8, q2, d1[0]\n"
- "vmla.f32 q9, q2, d1[1]\n"
-
- "vldr d4, [%[b_ptr], #0x40]\n"
- "vmla.f32 q10, q3, d3[0]\n"
- "ldr r0, [%[b_ptr], #0x48]\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "ldr r1, [%[b_ptr], #0x4C]\n"
- "vmla.f32 q12, q3, d0[0]\n"
-
- "vldr d3, [%[a_ptr], #0x38]\n"
- "vmov d5, r0, r1\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "ldr r0, [%[a_ptr], #0x40]\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "ldr r1, [%[a_ptr], #0x44]\n"
- "vmla.f32 q15, q3, d1[1]\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+
+ "vldr d4, [%[b_ptr], #0x40]\n"
+ "vmla.f32 q10, q3, d3[0]\n"
+ "ldr r0, [%[b_ptr], #0x48]\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "ldr r1, [%[b_ptr], #0x4C]\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+
+ "vldr d3, [%[a_ptr], #0x38]\n"
+ "vmov d5, r0, r1\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "ldr r0, [%[a_ptr], #0x40]\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "ldr r1, [%[a_ptr], #0x44]\n"
+ "vmla.f32 q15, q3, d1[1]\n"
// Unroll 2
- "vldr d6, [%[b_ptr], #0x50]\n"
- "vmov d0, r0, r1\n"
- "vmla.f32 q4, q2, d2[0]\n"
- "ldr r0, [%[b_ptr], #0x58]\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "ldr r1, [%[b_ptr], #0x5C]\n"
- "vmla.f32 q6, q2, d3[0]\n"
-
- "vldr d1, [%[a_ptr], #0x48]\n"
- "vmov d7, r0, r1\n"
- "vmla.f32 q7, q2, d3[1]\n"
+ "vldr d6, [%[b_ptr], #0x50]\n"
+ "vmov d0, r0, r1\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "ldr r0, [%[b_ptr], #0x58]\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "ldr r1, [%[b_ptr], #0x5C]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+
+ "vldr d1, [%[a_ptr], #0x48]\n"
+ "vmov d7, r0, r1\n"
+ "vmla.f32 q7, q2, d3[1]\n"
ASM_PREFETCH("[%[a_ptr], #0x140]")
- "vmla.f32 q8, q2, d0[0]\n"
- "vmla.f32 q9, q2, d0[1]\n"
-
- "vldr d4, [%[b_ptr], #0x60]\n"
- "vmla.f32 q10, q3, d2[0]\n"
- "ldr r0, [%[b_ptr], #0x68]\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "ldr r1, [%[b_ptr], #0x6C]\n"
- "vmla.f32 q12, q3, d3[0]\n"
-
- "vldr d2, [%[a_ptr], #0x50]\n"
- "vmov d5, r0, r1\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "ldr r0, [%[a_ptr], #0x58]\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "ldr r1, [%[a_ptr], #0x5C]\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "add %[a_ptr], %[a_ptr], #0x60\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+
+ "vldr d4, [%[b_ptr], #0x60]\n"
+ "vmla.f32 q10, q3, d2[0]\n"
+ "ldr r0, [%[b_ptr], #0x68]\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "ldr r1, [%[b_ptr], #0x6C]\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+
+ "vldr d2, [%[a_ptr], #0x50]\n"
+ "vmov d5, r0, r1\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "ldr r0, [%[a_ptr], #0x58]\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "ldr r1, [%[a_ptr], #0x5C]\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "add %[a_ptr], %[a_ptr], #0x60\n"
// Unroll 3
- "vldr d6, [%[b_ptr], #0x70]\n"
- "vmov d3, r0, r1\n"
- "vmla.f32 q4, q2, d1[0]\n"
- "ldr r0, [%[b_ptr], #0x78]\n"
- "vmla.f32 q5, q2, d1[1]\n"
- "ldr r1, [%[b_ptr], #0x7C]\n"
- "vmla.f32 q6, q2, d2[0]\n"
- "add %[b_ptr], %[b_ptr], #0x80\n"
-
- "vldr d0, [%[a_ptr], #0x00]\n"
- "vmov d7, r0, r1\n"
- "vmla.f32 q7, q2, d2[1]\n"
+ "vldr d6, [%[b_ptr], #0x70]\n"
+ "vmov d3, r0, r1\n"
+ "vmla.f32 q4, q2, d1[0]\n"
+ "ldr r0, [%[b_ptr], #0x78]\n"
+ "vmla.f32 q5, q2, d1[1]\n"
+ "ldr r1, [%[b_ptr], #0x7C]\n"
+ "vmla.f32 q6, q2, d2[0]\n"
+ "add %[b_ptr], %[b_ptr], #0x80\n"
+
+ "vldr d0, [%[a_ptr], #0x00]\n"
+ "vmov d7, r0, r1\n"
+ "vmla.f32 q7, q2, d2[1]\n"
ASM_PREFETCH("[%[b_ptr], #0xC0]")
- "vmla.f32 q8, q2, d3[0]\n"
- "vmla.f32 q9, q2, d3[1]\n"
-
- "vldr d4, [%[b_ptr], #0x00]\n"
- "vmla.f32 q10, q3, d1[0]\n"
- "ldr r0, [%[b_ptr], #0x08]\n"
- "vmla.f32 q11, q3, d1[1]\n"
- "ldr r1, [%[b_ptr], #0x0C]\n"
- "vmla.f32 q12, q3, d2[0]\n"
- "subs %[k], %[k], #1\n"
-
- "vldr d1, [%[a_ptr], #0x08]\n"
- "vmov d5, r0, r1\n"
- "vmla.f32 q13, q3, d2[1]\n"
- "ldr r0, [%[a_ptr], #0x10]\n"
- "vmla.f32 q14, q3, d3[0]\n"
- "ldr r1, [%[a_ptr], #0x14]\n"
- "vmla.f32 q15, q3, d3[1]\n"
- "bne 1b\n"
+ "vmla.f32 q8, q2, d3[0]\n"
+ "vmla.f32 q9, q2, d3[1]\n"
+
+ "vldr d4, [%[b_ptr], #0x00]\n"
+ "vmla.f32 q10, q3, d1[0]\n"
+ "ldr r0, [%[b_ptr], #0x08]\n"
+ "vmla.f32 q11, q3, d1[1]\n"
+ "ldr r1, [%[b_ptr], #0x0C]\n"
+ "vmla.f32 q12, q3, d2[0]\n"
+ "subs %[k], %[k], #1\n"
+
+ "vldr d1, [%[a_ptr], #0x08]\n"
+ "vmov d5, r0, r1\n"
+ "vmla.f32 q13, q3, d2[1]\n"
+ "ldr r0, [%[a_ptr], #0x10]\n"
+ "vmla.f32 q14, q3, d3[0]\n"
+ "ldr r1, [%[a_ptr], #0x14]\n"
+ "vmla.f32 q15, q3, d3[1]\n"
+ "bne 1b\n"
// "Tails" shows how many multiply blocks are needed at the
// end, must be 1-4 inclusive. Bail out to alternative tail
// immediately if it's 1.
"6:\n"
- "subs %[tails], %[tails], #1\n"
- "beq 3f\n"
+ "subs %[tails], %[tails], #1\n"
+ "beq 3f\n"
// Detached final iteration - for now adapt the generic
// tails rather than reimplementing for A53.
// Unroll 0
- "vmov d2, r0, r1\n"
- "add %[a_ptr], %[a_ptr], #0x18\n"
- "vmla.f32 q4, q2, d0[0]\n"
- "vld1.32 {d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "add %[b_ptr], %[b_ptr], #0x10\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "vmla.f32 q7, q2, d1[1]\n"
- "vmla.f32 q8, q2, d2[0]\n"
- "subs %[tails], %[tails], #1\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d0[0]\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vmla.f32 q12, q3, d1[0]\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "vmla.f32 q15, q3, d2[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "beq 4f\n"
+ "vmov d2, r0, r1\n"
+ "add %[a_ptr], %[a_ptr], #0x18\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "vld1.32 {d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "add %[b_ptr], %[b_ptr], #0x10\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d1[1]\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "subs %[tails], %[tails], #1\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "vmla.f32 q15, q3, d2[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "beq 4f\n"
// Unroll 1
- "vmla.f32 q4, q2, d3[0]\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "subs %[tails], %[tails], #1\n"
- "vmla.f32 q6, q2, d0[0]\n"
- "vmla.f32 q7, q2, d0[1]\n"
- "vmla.f32 q8, q2, d1[0]\n"
- "vmla.f32 q9, q2, d1[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d3[0]\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q12, q3, d0[0]\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "beq 5f\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "subs %[tails], %[tails], #1\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+ "vmla.f32 q7, q2, d0[1]\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "beq 5f\n"
// Unroll 2
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
- "vmla.f32 q4, q2, d2[0]\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vmla.f32 q6, q2, d3[0]\n"
- "vmla.f32 q7, q2, d3[1]\n"
- "vmla.f32 q8, q2, d0[0]\n"
- "vmla.f32 q9, q2, d0[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d2[0]\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "vmla.f32 q12, q3, d3[0]\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+ "vmla.f32 q7, q2, d3[1]\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
// Unroll 3
- "vmla.f32 q4, q2, d1[0]\n"
- "vmla.f32 q10, q3, d1[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q5, q2, d1[1]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d1[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q6, q2, d2[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d2[0]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d2[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d2[1]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d3[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d3[0]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d3[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d3[1]\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d1[0]\n"
+ "vmla.f32 q10, q3, d1[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q5, q2, d1[1]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d1[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d2[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d2[0]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d2[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d2[1]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d3[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d3[0]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d3[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d3[1]\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "b 2f\n"
// tails==1 final tail
"3:\n"
- "vmov d2, r0, r1\n"
- "add %[b_ptr], %[b_ptr], #0x10\n"
- "vmla.f32 q4, q2, d0[0]\n"
- "add %[a_ptr], %[a_ptr], #0x18\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q10, q3, d0[0]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d1[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d1[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d2[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d2[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "b 2f\n"
+ "vmov d2, r0, r1\n"
+ "add %[b_ptr], %[b_ptr], #0x10\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "add %[a_ptr], %[a_ptr], #0x18\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d1[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d2[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "b 2f\n"
// tails==2 final tail
"4:\n"
- "vmla.f32 q4, q2, d3[0]\n"
- "vmla.f32 q10, q3, d3[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q6, q2, d0[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d0[0]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d0[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d1[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d1[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d0[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "b 2f\n"
// tails==3 final tail
"5:\n"
- "vmla.f32 q4, q2, d2[0]\n"
- "vld1.32 {d0}, [%[a_ptr] :64]!\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vmla.f32 q6, q2, d3[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q10, q3, d2[0]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d3[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d3[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d0[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d0[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vld1.32 {d0}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d3[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
"2:\n"
- "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n"
+ "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n"
: [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), [k] "+r" (k), [tails] "+r" (tails)
:
: "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp
index 8126826998..54e0a26843 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/a55r1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,345 +62,345 @@ void a32_sgemm_8x6_a55r1(const float *Apanel, const float *Bpanel, float *Cpanel
a_ptr = a_ptr0;
__asm __volatile (
- "vldr d0, [%[a_ptr]]\n"
- "vmov.i32 q4, #0\n"
- "vldr d1, [%[a_ptr], #0x08]\n"
- "vmov.i32 q5, #0\n"
- "vldr d4, [%[b_ptr]]\n"
- "vmov.i32 q6, #0\n"
- "vldr d5, [%[b_ptr], #0x08]\n"
- "vmov.i32 q7, #0\n"
- "vldr d2, [%[a_ptr], #0x10]\n"
- "vmov.i32 q8, #0\n"
+ "vldr d0, [%[a_ptr]]\n"
+ "vmov.i32 q4, #0\n"
+ "vldr d1, [%[a_ptr], #0x08]\n"
+ "vmov.i32 q5, #0\n"
+ "vldr d4, [%[b_ptr]]\n"
+ "vmov.i32 q6, #0\n"
+ "vldr d5, [%[b_ptr], #0x08]\n"
+ "vmov.i32 q7, #0\n"
+ "vldr d2, [%[a_ptr], #0x10]\n"
+ "vmov.i32 q8, #0\n"
ASM_PREFETCH("[%[b_ptr], #0x40]")
- "vmov.i32 q9, #0\n"
+ "vmov.i32 q9, #0\n"
ASM_PREFETCH("[%[a_ptr], #0x40]")
- "vmov.i32 q10, #0\n"
+ "vmov.i32 q10, #0\n"
ASM_PREFETCH("[%[b_ptr], #0x80]")
- "vmov.i32 q11, #0\n"
+ "vmov.i32 q11, #0\n"
ASM_PREFETCH("[%[a_ptr], #0x80]")
- "vmov.i32 q12, #0\n"
+ "vmov.i32 q12, #0\n"
ASM_PREFETCH("[%[b_ptr], #0XC0]")
- "vmov.i32 q13, #0\n"
+ "vmov.i32 q13, #0\n"
ASM_PREFETCH("[%[a_ptr], #0xC0]")
- "vmov.i32 q14, #0\n"
+ "vmov.i32 q14, #0\n"
ASM_PREFETCH("[%[b_ptr], #0x100]")
- "vmov.i32 q15, #0\n"
+ "vmov.i32 q15, #0\n"
ASM_PREFETCH("[%[a_ptr], #0x100]")
- "cmp %[k], #0\n"
+ "cmp %[k], #0\n"
ASM_PREFETCH("[%[b_ptr], #0x140]")
- "beq 6f\n"
+ "beq 6f\n"
ASM_PREFETCH("[%[b_ptr], #0x180]")
"1:\n"
// Unroll 0
- "vmla.f32 q4, q2, d0[0]\n"
- "vldr d6, [%[b_ptr], #0x10]\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "vldr d7, [%[b_ptr], #0x18]\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vldr d3, [%[a_ptr], #0x18]\n"
- "vmla.f32 q7, q2, d1[1]\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "vldr d6, [%[b_ptr], #0x10]\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "vldr d7, [%[b_ptr], #0x18]\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vldr d3, [%[a_ptr], #0x18]\n"
+ "vmla.f32 q7, q2, d1[1]\n"
ASM_PREFETCH("[%[a_ptr], #0x140]")
- "vmla.f32 q8, q2, d2[0]\n"
- "subs %[k], %[k], #1\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vldr d4, [%[b_ptr], #0x20]\n"
- "vmla.f32 q10, q3, d0[0]\n"
- "vldr d5, [%[b_ptr], #0x28]\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vldr d0, [%[a_ptr], #0x20]\n"
- "vmla.f32 q12, q3, d1[0]\n"
-
- "vmla.f32 q13, q3, d1[1]\n"
- "vldr d1, [%[a_ptr], #0x28]\n"
- "vmla.f32 q14, q3, d2[0]\n"
-
- "vmla.f32 q15, q3, d2[1]\n"
- "vldr d6, [%[b_ptr], #0x30]\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "subs %[k], %[k], #1\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vldr d4, [%[b_ptr], #0x20]\n"
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vldr d5, [%[b_ptr], #0x28]\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vldr d0, [%[a_ptr], #0x20]\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vldr d1, [%[a_ptr], #0x28]\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+
+ "vmla.f32 q15, q3, d2[1]\n"
+ "vldr d6, [%[b_ptr], #0x30]\n"
// Unroll 1
- "vmla.f32 q4, q2, d3[0]\n"
- "vldr d7, [%[b_ptr], #0x38]\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "vldr d2, [%[a_ptr], #0x30]\n"
- "vmla.f32 q6, q2, d0[0]\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "vldr d7, [%[b_ptr], #0x38]\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "vldr d2, [%[a_ptr], #0x30]\n"
+ "vmla.f32 q6, q2, d0[0]\n"
- "vmla.f32 q7, q2, d0[1]\n"
+ "vmla.f32 q7, q2, d0[1]\n"
ASM_PREFETCH("[%[b_ptr], #0x1C0]")
- "vmla.f32 q8, q2, d1[0]\n"
+ "vmla.f32 q8, q2, d1[0]\n"
- "vmla.f32 q9, q2, d1[1]\n"
- "vldr d4, [%[b_ptr], #0x40]\n"
- "vmla.f32 q10, q3, d3[0]\n"
- "vldr d5, [%[b_ptr], #0x48]\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vldr d3, [%[a_ptr], #0x38]\n"
- "vmla.f32 q12, q3, d0[0]\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+ "vldr d4, [%[b_ptr], #0x40]\n"
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vldr d5, [%[b_ptr], #0x48]\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vldr d3, [%[a_ptr], #0x38]\n"
+ "vmla.f32 q12, q3, d0[0]\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vldr d0, [%[a_ptr], #0x40]\n"
- "vmla.f32 q14, q3, d1[0]\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vldr d0, [%[a_ptr], #0x40]\n"
+ "vmla.f32 q14, q3, d1[0]\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "vldr d6, [%[b_ptr], #0x50]\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "vldr d6, [%[b_ptr], #0x50]\n"
// Unroll 2
- "vmla.f32 q4, q2, d2[0]\n"
- "vldr d7, [%[b_ptr], #0x58]\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vldr d1, [%[a_ptr], #0x48]\n"
- "vmla.f32 q6, q2, d3[0]\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vldr d7, [%[b_ptr], #0x58]\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vldr d1, [%[a_ptr], #0x48]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
- "vmla.f32 q7, q2, d3[1]\n"
+ "vmla.f32 q7, q2, d3[1]\n"
ASM_PREFETCH("[%[a_ptr], #0x180]")
- "vmla.f32 q8, q2, d0[0]\n"
-
- "vmla.f32 q9, q2, d0[1]\n"
- "vldr d4, [%[b_ptr], #0x60]\n"
- "vmla.f32 q10, q3, d2[0]\n"
- "vldr d5, [%[b_ptr], #0x68]\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "vldr d2, [%[a_ptr], #0x50]\n"
- "vmla.f32 q12, q3, d3[0]\n"
-
- "vmla.f32 q13, q3, d3[1]\n"
- "vldr d3, [%[a_ptr], #0x58]\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "add %[a_ptr], %[a_ptr], #0x60\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "vldr d6, [%[b_ptr], #0x70]\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+
+ "vmla.f32 q9, q2, d0[1]\n"
+ "vldr d4, [%[b_ptr], #0x60]\n"
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vldr d5, [%[b_ptr], #0x68]\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "vldr d2, [%[a_ptr], #0x50]\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vldr d3, [%[a_ptr], #0x58]\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "add %[a_ptr], %[a_ptr], #0x60\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "vldr d6, [%[b_ptr], #0x70]\n"
// Unroll 3
- "vmla.f32 q4, q2, d1[0]\n"
- "vldr d7, [%[b_ptr], #0x78]\n"
- "vmla.f32 q5, q2, d1[1]\n"
- "add %[b_ptr], %[b_ptr], #0x80\n"
- "vmla.f32 q6, q2, d2[0]\n"
- "vldr d0, [%[a_ptr], #0x00]\n"
- "vmla.f32 q7, q2, d2[1]\n"
+ "vmla.f32 q4, q2, d1[0]\n"
+ "vldr d7, [%[b_ptr], #0x78]\n"
+ "vmla.f32 q5, q2, d1[1]\n"
+ "add %[b_ptr], %[b_ptr], #0x80\n"
+ "vmla.f32 q6, q2, d2[0]\n"
+ "vldr d0, [%[a_ptr], #0x00]\n"
+ "vmla.f32 q7, q2, d2[1]\n"
ASM_PREFETCH("[%[b_ptr], #0x180]")
- "vmla.f32 q8, q2, d3[0]\n"
+ "vmla.f32 q8, q2, d3[0]\n"
- "vmla.f32 q9, q2, d3[1]\n"
- "vldr d4, [%[b_ptr], #0x00]\n"
- "vmla.f32 q10, q3, d1[0]\n"
- "vldr d5, [%[b_ptr], #0x08]\n"
- "vmla.f32 q11, q3, d1[1]\n"
- "vldr d1, [%[a_ptr], #0x08]\n"
- "vmla.f32 q12, q3, d2[0]\n"
+ "vmla.f32 q9, q2, d3[1]\n"
+ "vldr d4, [%[b_ptr], #0x00]\n"
+ "vmla.f32 q10, q3, d1[0]\n"
+ "vldr d5, [%[b_ptr], #0x08]\n"
+ "vmla.f32 q11, q3, d1[1]\n"
+ "vldr d1, [%[a_ptr], #0x08]\n"
+ "vmla.f32 q12, q3, d2[0]\n"
- "vmla.f32 q13, q3, d2[1]\n"
- "vldr d2, [%[a_ptr], #0x10]\n"
- "vmla.f32 q14, q3, d3[0]\n"
+ "vmla.f32 q13, q3, d2[1]\n"
+ "vldr d2, [%[a_ptr], #0x10]\n"
+ "vmla.f32 q14, q3, d3[0]\n"
- "vmla.f32 q15, q3, d3[1]\n"
- "bne 1b\n"
+ "vmla.f32 q15, q3, d3[1]\n"
+ "bne 1b\n"
// "Tails" shows how many multiply blocks are needed at the
// end, must be 1-4 inclusive. Bail out to alternative tail
// immediately if it's 1.
"6:\n"
- "subs %[tails], %[tails], #1\n"
- "beq 3f\n"
+ "subs %[tails], %[tails], #1\n"
+ "beq 3f\n"
// Detached final iteration
// Unroll 0
- "vmla.f32 q4, q2, d0[0]\n"
- "vldr d6, [%[b_ptr], #0x10]\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "vldr d7, [%[b_ptr], #0x18]\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vldr d3, [%[a_ptr], #0x18]\n"
- "vmla.f32 q7, q2, d1[1]\n"
- "subs %[tails], %[tails], #1\n"
- "vmla.f32 q8, q2, d2[0]\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vldr d4, [%[b_ptr], #0x20]\n"
-
- "vmla.f32 q10, q3, d0[0]\n"
- "vldr d5, [%[b_ptr], #0x28]\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vldr d0, [%[a_ptr], #0x20]\n"
- "vmla.f32 q12, q3, d1[0]\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "vldr d1, [%[a_ptr], #0x28]\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "vmla.f32 q15, q3, d2[1]\n"
- "beq 4f\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "vldr d6, [%[b_ptr], #0x10]\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "vldr d7, [%[b_ptr], #0x18]\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vldr d3, [%[a_ptr], #0x18]\n"
+ "vmla.f32 q7, q2, d1[1]\n"
+ "subs %[tails], %[tails], #1\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vldr d4, [%[b_ptr], #0x20]\n"
+
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vldr d5, [%[b_ptr], #0x28]\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vldr d0, [%[a_ptr], #0x20]\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vldr d1, [%[a_ptr], #0x28]\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "vmla.f32 q15, q3, d2[1]\n"
+ "beq 4f\n"
// Unroll 1
- "vmla.f32 q4, q2, d3[0]\n"
- "vldr d6, [%[b_ptr], #0x30]\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "vldr d7, [%[b_ptr], #0x38]\n"
- "vmla.f32 q6, q2, d0[0]\n"
- "vldr d2, [%[a_ptr], #0x30]\n"
- "vmla.f32 q7, q2, d0[1]\n"
- "subs %[tails], %[tails], #1\n"
- "vmla.f32 q8, q2, d1[0]\n"
-
- "vmla.f32 q9, q2, d1[1]\n"
-
- "vmla.f32 q10, q3, d3[0]\n"
- "vldr d4, [%[b_ptr], #0x40]\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vldr d5, [%[b_ptr], #0x48]\n"
- "vmla.f32 q12, q3, d0[0]\n"
- "vldr d3, [%[a_ptr], #0x38]\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vldr d0, [%[a_ptr], #0x40]\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "beq 5f\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "vldr d6, [%[b_ptr], #0x30]\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "vldr d7, [%[b_ptr], #0x38]\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+ "vldr d2, [%[a_ptr], #0x30]\n"
+ "vmla.f32 q7, q2, d0[1]\n"
+ "subs %[tails], %[tails], #1\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+
+ "vmla.f32 q9, q2, d1[1]\n"
+
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vldr d4, [%[b_ptr], #0x40]\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vldr d5, [%[b_ptr], #0x48]\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+ "vldr d3, [%[a_ptr], #0x38]\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vldr d0, [%[a_ptr], #0x40]\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "beq 5f\n"
// Unroll 2
- "vmla.f32 q4, q2, d2[0]\n"
- "vldr d6, [%[b_ptr], #0x50]\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vldr d7, [%[b_ptr], #0x58]\n"
- "vmla.f32 q6, q2, d3[0]\n"
- "vldr d1, [%[a_ptr], #0x48]\n"
- "vmla.f32 q7, q2, d3[1]\n"
- "vmla.f32 q8, q2, d0[0]\n"
- "vmla.f32 q9, q2, d0[1]\n"
-
- "vmla.f32 q10, q3, d2[0]\n"
- "vldr d4, [%[b_ptr], #0x60]\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "vldr d5, [%[b_ptr], #0x68]\n"
- "vmla.f32 q12, q3, d3[0]\n"
- "vldr d2, [%[a_ptr], #0x50]\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "vldr d3, [%[a_ptr], #0x58]\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "vmla.f32 q15, q3, d0[1]\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vldr d6, [%[b_ptr], #0x50]\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vldr d7, [%[b_ptr], #0x58]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+ "vldr d1, [%[a_ptr], #0x48]\n"
+ "vmla.f32 q7, q2, d3[1]\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vldr d4, [%[b_ptr], #0x60]\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "vldr d5, [%[b_ptr], #0x68]\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+ "vldr d2, [%[a_ptr], #0x50]\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vldr d3, [%[a_ptr], #0x58]\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "vmla.f32 q15, q3, d0[1]\n"
// Unroll 3
- "vmla.f32 q4, q2, d1[0]\n"
- "vldr d6, [%[b_ptr], #0x70]\n"
- "vmla.f32 q5, q2, d1[1]\n"
- "vldr d7, [%[b_ptr], #0x78]\n"
- "vmla.f32 q10, q3, d1[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d1[1]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q6, q2, d2[0]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d2[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d2[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d2[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d3[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d3[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d3[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d3[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "add %[a_ptr], %[a_ptr], #0x60\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "add %[b_ptr], %[b_ptr], #0x80\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d1[0]\n"
+ "vldr d6, [%[b_ptr], #0x70]\n"
+ "vmla.f32 q5, q2, d1[1]\n"
+ "vldr d7, [%[b_ptr], #0x78]\n"
+ "vmla.f32 q10, q3, d1[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d1[1]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d2[0]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d2[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d2[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d2[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d3[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d3[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d3[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d3[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "add %[a_ptr], %[a_ptr], #0x60\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "add %[b_ptr], %[b_ptr], #0x80\n"
+ "b 2f\n"
// tails==1 final tail
"3:\n"
- "vmla.f32 q4, q2, d0[0]\n"
- "vldr d6, [%[b_ptr], #0x10]\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "vldr d7, [%[b_ptr], #0x18]\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q10, q3, d0[0]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d1[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d1[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d2[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d2[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "add %[a_ptr], %[a_ptr], #0x18\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "add %[b_ptr], %[b_ptr], #0x20\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "vldr d6, [%[b_ptr], #0x10]\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "vldr d7, [%[b_ptr], #0x18]\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d1[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d2[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "add %[a_ptr], %[a_ptr], #0x18\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "add %[b_ptr], %[b_ptr], #0x20\n"
+ "b 2f\n"
// tails==2 final tail
"4:\n"
- "vmla.f32 q4, q2, d3[0]\n"
- "vldr d6, [%[b_ptr], #0x30]\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "vldr d7, [%[b_ptr], #0x38]\n"
- "vmla.f32 q10, q3, d3[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q6, q2, d0[0]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d0[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d0[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d1[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d1[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "add %[b_ptr], %[b_ptr], #0x40\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "add %[a_ptr], %[a_ptr], #0x30\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "vldr d6, [%[b_ptr], #0x30]\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "vldr d7, [%[b_ptr], #0x38]\n"
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d0[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "add %[b_ptr], %[b_ptr], #0x40\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "add %[a_ptr], %[a_ptr], #0x30\n"
+ "b 2f\n"
// tails==3 final tail
"5:\n"
- "vmla.f32 q4, q2, d2[0]\n"
- "vldr d6, [%[b_ptr], #0x50]\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vldr d7, [%[b_ptr], #0x58]\n"
- "vmla.f32 q6, q2, d3[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q10, q3, d2[0]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d3[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d3[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d0[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d0[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "add %[a_ptr], %[a_ptr], #0x48\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "add %[b_ptr], %[b_ptr], #0x60\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vldr d6, [%[b_ptr], #0x50]\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vldr d7, [%[b_ptr], #0x58]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d3[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "add %[a_ptr], %[a_ptr], #0x48\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "add %[b_ptr], %[b_ptr], #0x60\n"
"2:\n"
- "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n"
+ "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n"
: [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), [k] "+r" (k), [tails] "+r" (tails)
:
: "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp
index a7494d500c..b230dc1fb7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a32_sgemm_8x6/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -56,110 +56,110 @@ void a32_sgemm_8x6(const float *Apanel, const float *Bpanel, float *Cpanel, int
int k = ((K+3)/4) - 1;
__asm __volatile (
- "vmov.i32 q4, #0\n"
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
- "vmov.i32 q5, #0\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
- "vmov.i32 q6, #0\n"
+ "vmov.i32 q4, #0\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmov.i32 q5, #0\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+ "vmov.i32 q6, #0\n"
ASM_PREFETCH("[%[a_ptr], #48]")
- "vmov.i32 q7, #0\n"
+ "vmov.i32 q7, #0\n"
ASM_PREFETCH("[%[b_ptr], #48]")
- "vmov.i32 q8, #0\n"
+ "vmov.i32 q8, #0\n"
ASM_PREFETCH("[%[a_ptr], #112]")
- "vmov.i32 q9, #0\n"
+ "vmov.i32 q9, #0\n"
ASM_PREFETCH("[%[b_ptr], #112]")
- "vmov.i32 q10, #0\n"
- "vmov.i32 q11, #0\n"
- "vmov.i32 q12, #0\n"
- "vmov.i32 q13, #0\n"
+ "vmov.i32 q10, #0\n"
+ "vmov.i32 q11, #0\n"
+ "vmov.i32 q12, #0\n"
+ "vmov.i32 q13, #0\n"
ASM_PREFETCH("[%[a_ptr], #176]")
- "vmov.i32 q14, #0\n"
+ "vmov.i32 q14, #0\n"
ASM_PREFETCH("[%[b_ptr], #176]")
- "vmov.i32 q15, #0\n"
+ "vmov.i32 q15, #0\n"
- "cmp %[k], #0\n"
- "beq 6f\n"
+ "cmp %[k], #0\n"
+ "beq 6f\n"
"1:\n"
// Unroll 0
- "vmla.f32 q4, q2, d0[0]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "vmla.f32 q7, q2, d1[1]\n"
- "vmla.f32 q8, q2, d2[0]\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d0[0]\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vmla.f32 q12, q3, d1[0]\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "vmla.f32 q15, q3, d2[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d1[1]\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "vmla.f32 q15, q3, d2[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
// Unroll 1
- "vmla.f32 q4, q2, d3[0]\n"
- "subs %[k], %[k], #1\n"
- "vmla.f32 q5, q2, d3[1]\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "subs %[k], %[k], #1\n"
+ "vmla.f32 q5, q2, d3[1]\n"
ASM_PREFETCH("[%[a_ptr], #208]")
- "vmla.f32 q6, q2, d0[0]\n"
- "vmla.f32 q7, q2, d0[1]\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+ "vmla.f32 q7, q2, d0[1]\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "vmla.f32 q8, q2, d1[0]\n"
- "vmla.f32 q9, q2, d1[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d3[0]\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q12, q3, d0[0]\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
// Unroll 2
- "vmla.f32 q4, q2, d2[0]\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "vmla.f32 q6, q2, d3[0]\n"
- "vmla.f32 q7, q2, d3[1]\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+ "vmla.f32 q7, q2, d3[1]\n"
ASM_PREFETCH("[%[a_ptr], #240]")
- "vmla.f32 q8, q2, d0[0]\n"
- "vmla.f32 q9, q2, d0[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
- "vmla.f32 q10, q3, d2[0]\n"
- "vmla.f32 q11, q3, d2[1]\n"
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vmla.f32 q11, q3, d2[1]\n"
ASM_PREFETCH("[%[b_ptr], #208]")
- "vmla.f32 q12, q3, d3[0]\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
// Unroll 3
- "vmla.f32 q4, q2, d1[0]\n"
- "vmla.f32 q5, q2, d1[1]\n"
- "vmla.f32 q6, q2, d2[0]\n"
- "vmla.f32 q7, q2, d2[1]\n"
- "vmla.f32 q8, q2, d3[0]\n"
- "vmla.f32 q9, q2, d3[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d1[0]\n"
- "vmla.f32 q11, q3, d1[1]\n"
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
- "vmla.f32 q12, q3, d2[0]\n"
- "vmla.f32 q13, q3, d2[1]\n"
- "vmla.f32 q14, q3, d3[0]\n"
- "vmla.f32 q15, q3, d3[1]\n"
- "bne 1b\n"
+ "vmla.f32 q4, q2, d1[0]\n"
+ "vmla.f32 q5, q2, d1[1]\n"
+ "vmla.f32 q6, q2, d2[0]\n"
+ "vmla.f32 q7, q2, d2[1]\n"
+ "vmla.f32 q8, q2, d3[0]\n"
+ "vmla.f32 q9, q2, d3[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d1[0]\n"
+ "vmla.f32 q11, q3, d1[1]\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q12, q3, d2[0]\n"
+ "vmla.f32 q13, q3, d2[1]\n"
+ "vmla.f32 q14, q3, d3[0]\n"
+ "vmla.f32 q15, q3, d3[1]\n"
+ "bne 1b\n"
// Branch here if we never execute main loop.
"6:\n"
@@ -167,182 +167,182 @@ void a32_sgemm_8x6(const float *Apanel, const float *Bpanel, float *Cpanel, int
// "Tails" shows how many multiply blocks are needed at the
// end, must be 1-4 inclusive. Bail out to alternative tail
// immediately if it's 1.
- "subs %[tails], %[tails], #1\n"
- "beq 3f\n"
+ "subs %[tails], %[tails], #1\n"
+ "beq 3f\n"
// Detached final iteration
// Unroll 0
- "vmla.f32 q4, q2, d0[0]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "vmla.f32 q7, q2, d1[1]\n"
- "vmla.f32 q8, q2, d2[0]\n"
- "subs %[tails], %[tails], #1\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d0[0]\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vmla.f32 q12, q3, d1[0]\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "vmla.f32 q15, q3, d2[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "beq 4f\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d1[1]\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "subs %[tails], %[tails], #1\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "vmla.f32 q15, q3, d2[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "beq 4f\n"
// Unroll 1
- "vmla.f32 q4, q2, d3[0]\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "subs %[tails], %[tails], #1\n"
- "vmla.f32 q6, q2, d0[0]\n"
- "vmla.f32 q7, q2, d0[1]\n"
- "vmla.f32 q8, q2, d1[0]\n"
- "vmla.f32 q9, q2, d1[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d3[0]\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q12, q3, d0[0]\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "beq 5f\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "subs %[tails], %[tails], #1\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+ "vmla.f32 q7, q2, d0[1]\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "beq 5f\n"
// Unroll 2
- "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
- "vmla.f32 q4, q2, d2[0]\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vmla.f32 q6, q2, d3[0]\n"
- "vmla.f32 q7, q2, d3[1]\n"
- "vmla.f32 q8, q2, d0[0]\n"
- "vmla.f32 q9, q2, d0[1]\n"
- "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
-
- "vmla.f32 q10, q3, d2[0]\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "vmla.f32 q12, q3, d3[0]\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vld1.32 {d0-d1}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+ "vmla.f32 q7, q2, d3[1]\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+ "vld1.32 {d4-d5}, [%[b_ptr] :128]!\n"
+
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vld1.32 {d2-d3}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
// Unroll 3
- "vmla.f32 q4, q2, d1[0]\n"
- "vmla.f32 q10, q3, d1[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q5, q2, d1[1]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d1[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q6, q2, d2[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d2[0]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d2[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d2[1]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d3[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d3[0]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d3[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d3[1]\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d1[0]\n"
+ "vmla.f32 q10, q3, d1[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q5, q2, d1[1]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d1[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d2[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d2[0]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d2[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d2[1]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d3[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d3[0]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d3[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d3[1]\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "b 2f\n"
// tails==1 final tail
"3:\n"
- "vmla.f32 q4, q2, d0[0]\n"
- "vld1.32 {d2}, [%[a_ptr] :64]!\n"
- "vmla.f32 q5, q2, d0[1]\n"
- "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
- "vmla.f32 q6, q2, d1[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q10, q3, d0[0]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d0[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d1[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d1[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d1[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d2[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d2[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d2[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d2[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d0[0]\n"
+ "vld1.32 {d2}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q5, q2, d0[1]\n"
+ "vld1.32 {d6-d7}, [%[b_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d1[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q10, q3, d0[0]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d0[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d1[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d1[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d1[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d2[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d2[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d2[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d2[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "b 2f\n"
// tails==2 final tail
"4:\n"
- "vmla.f32 q4, q2, d3[0]\n"
- "vmla.f32 q10, q3, d3[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q5, q2, d3[1]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d3[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q6, q2, d0[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d0[0]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d0[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d0[1]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d1[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d1[0]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d1[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d1[1]\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
- "b 2f\n"
+ "vmla.f32 q4, q2, d3[0]\n"
+ "vmla.f32 q10, q3, d3[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q5, q2, d3[1]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d3[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q6, q2, d0[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d0[0]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d0[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d0[1]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d1[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d1[0]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d1[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d1[1]\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "b 2f\n"
// tails==3 final tail
"5:\n"
- "vmla.f32 q4, q2, d2[0]\n"
- "vld1.32 {d0}, [%[a_ptr] :64]!\n"
- "vmla.f32 q5, q2, d2[1]\n"
- "vmla.f32 q6, q2, d3[0]\n"
- "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
- "vmla.f32 q10, q3, d2[0]\n"
- "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
- "vmla.f32 q11, q3, d2[1]\n"
- "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
- "vmla.f32 q12, q3, d3[0]\n"
- "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
- "vmla.f32 q7, q2, d3[1]\n"
- "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
- "vmla.f32 q13, q3, d3[1]\n"
- "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
- "vmla.f32 q8, q2, d0[0]\n"
- "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
- "vmla.f32 q14, q3, d0[0]\n"
- "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
- "vmla.f32 q9, q2, d0[1]\n"
- "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
- "vmla.f32 q15, q3, d0[1]\n"
- "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
- "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q4, q2, d2[0]\n"
+ "vld1.32 {d0}, [%[a_ptr] :64]!\n"
+ "vmla.f32 q5, q2, d2[1]\n"
+ "vmla.f32 q6, q2, d3[0]\n"
+ "vst1.32 {d8-d9}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q10, q3, d2[0]\n"
+ "vst1.32 {d20-d21}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q11, q3, d2[1]\n"
+ "vst1.32 {d10-d11}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q12, q3, d3[0]\n"
+ "vst1.32 {d22-d23}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q7, q2, d3[1]\n"
+ "vst1.32 {d12-d13}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q13, q3, d3[1]\n"
+ "vst1.32 {d24-d25}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q8, q2, d0[0]\n"
+ "vst1.32 {d14-d15}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q14, q3, d0[0]\n"
+ "vst1.32 {d26-d27}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q9, q2, d0[1]\n"
+ "vst1.32 {d16-d17}, [%[c_ptr] :128]!\n"
+ "vmla.f32 q15, q3, d0[1]\n"
+ "vst1.32 {d28-d29}, [%[c_ptr] :128]!\n"
+ "vst1.32 {d18-d19}, [%[c_ptr] :128]!\n"
"2:\n"
- "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n"
+ "vst1.32 {d30-d31}, [%[c_ptr] :128]!\n"
: [a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr), [k] "+r" (k), [tails] "+r" (tails)
:
: "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp
index 72e414969e..fe939b1084 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,7 +82,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
index 377daddae9..022a34fdcd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_bf16fp32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,18 +50,19 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -82,6 +83,7 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -106,19 +108,19 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"bgt 77f\n"
"beq 39f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 3f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -131,15 +133,15 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cbz x15, 4f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 16f\n"
@@ -230,8 +232,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"17:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 18f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -251,7 +253,11 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q6, [x12, #0x10]\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Main loop head
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"trn1 v20.2d, v1.2d, v21.2d\n"
+ "trn2 v1.2d, v1.2d, v21.2d\n"
".inst 0x6e47ee88 // bfmmla v8.4s, v20.8h, v7.8h\n"
"ldr q17, [x11, #0x0]\n"
".inst 0x6e46ee8c // bfmmla v12.4s, v20.8h, v6.8h\n"
@@ -264,38 +270,37 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x9, #0x0]\n"
".inst 0x6e51ee8e // bfmmla v14.4s, v20.8h, v17.8h\n"
"ldr q17, [x9, #0x10]\n"
- "trn2 v1.2d, v1.2d, v21.2d\n"
".inst 0x6e52ee8b // bfmmla v11.4s, v20.8h, v18.8h\n"
"ldr q18, [x12, #0x20]\n"
".inst 0x6e51ee8f // bfmmla v15.4s, v20.8h, v17.8h\n"
"ldr q17, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
".inst 0x6e52ec28 // bfmmla v8.4s, v1.8h, v18.8h\n"
"ldr q18, [x11, #0x20]\n"
".inst 0x6e51ec2c // bfmmla v12.4s, v1.8h, v17.8h\n"
"ldr q17, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e52ec29 // bfmmla v9.4s, v1.8h, v18.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e51ec2d // bfmmla v13.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e52ec2a // bfmmla v10.4s, v1.8h, v18.8h\n"
"ldr q18, [x9, #0x20]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
+ "ldr q7, [x12, #0x0]\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
"ldr q1, [x26, #0x0]\n"
- "add x12, x12, #0x40\n"
- "ldr q7, [x12, #0x0]\n"
- "add x11, x11, #0x40\n"
"ldr q6, [x12, #0x10]\n"
- "add x10, x10, #0x40\n"
- "add x9, x9, #0x40\n"
"bge 20b\n"
"21:" // Height 1: Multiply loop: Single iteration only
- "trn1 v19.2d, v1.2d, v20.2d\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "trn1 v19.2d, v1.2d, v17.2d\n"
+ "trn2 v1.2d, v1.2d, v17.2d\n"
".inst 0x6e47ee68 // bfmmla v8.4s, v19.8h, v7.8h\n"
"ldr q17, [x11, #0x0]\n"
".inst 0x6e46ee6c // bfmmla v12.4s, v19.8h, v6.8h\n"
@@ -307,61 +312,58 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e51ee6a // bfmmla v10.4s, v19.8h, v17.8h\n"
"ldr q17, [x9, #0x0]\n"
".inst 0x6e52ee6e // bfmmla v14.4s, v19.8h, v18.8h\n"
- "ldr q24, [x9, #0x10]\n"
- "trn2 v1.2d, v1.2d, v20.2d\n"
+ "ldr q25, [x9, #0x10]\n"
".inst 0x6e51ee6b // bfmmla v11.4s, v19.8h, v17.8h\n"
- "ldr q18, [x12, #0x20]\n"
- ".inst 0x6e58ee6f // bfmmla v15.4s, v19.8h, v24.8h\n"
- "ldr q17, [x12, #0x30]\n"
- ".inst 0x6e52ec28 // bfmmla v8.4s, v1.8h, v18.8h\n"
+ "ldr q17, [x12, #0x20]\n"
+ ".inst 0x6e59ee6f // bfmmla v15.4s, v19.8h, v25.8h\n"
+ "ldr q3, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ ".inst 0x6e51ec28 // bfmmla v8.4s, v1.8h, v17.8h\n"
"ldr q19, [x11, #0x20]\n"
- ".inst 0x6e51ec2c // bfmmla v12.4s, v1.8h, v17.8h\n"
+ ".inst 0x6e43ec2c // bfmmla v12.4s, v1.8h, v3.8h\n"
"ldr q17, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e53ec29 // bfmmla v9.4s, v1.8h, v19.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e51ec2d // bfmmla v13.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e52ec2a // bfmmla v10.4s, v1.8h, v18.8h\n"
"ldr q18, [x9, #0x20]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
- "add x26, x26, #0x10\n"
- "add x12, x12, #0x40\n"
- "add x11, x11, #0x40\n"
- "add x10, x10, #0x40\n"
- "add x9, x9, #0x40\n"
"22:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 27f\n"
"cmp x27, #0x4\n"
"blt 24f\n"
"23:" // Height 1: Multiply loop: Odd block loop
"ldr d19, [x26], #0x8\n"
- "ldr q18, [x12, #0x0]\n"
- "trn1 v19.2d, v19.2d, v17.2d\n"
+ "ldr q20, [x12, #0x0]\n"
+ "sub x27, x27, #0x4\n"
"ldr q17, [x12, #0x10]\n"
- ".inst 0x6e52ee68 // bfmmla v8.4s, v19.8h, v18.8h\n"
+ "cmp x27, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "trn1 v19.2d, v19.2d, v18.2d\n"
+ ".inst 0x6e54ee68 // bfmmla v8.4s, v19.8h, v20.8h\n"
"ldr q18, [x11, #0x0]\n"
".inst 0x6e51ee6c // bfmmla v12.4s, v19.8h, v17.8h\n"
"ldr q17, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e52ee69 // bfmmla v9.4s, v19.8h, v18.8h\n"
"ldr q18, [x10, #0x0]\n"
".inst 0x6e51ee6d // bfmmla v13.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e52ee6a // bfmmla v10.4s, v19.8h, v18.8h\n"
"ldr q18, [x9, #0x0]\n"
".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x9, #0x10]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
"bge 23b\n"
"24:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 27f\n"
@@ -375,25 +377,25 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"26:" // Height 1: Multiply loop: Ragged operand read: Done
"ldr q20, [x12, #0x0]\n"
"ldr q18, [x12, #0x10]\n"
+ "add x12, x12, #0x20\n"
"trn1 v19.2d, v1.2d, v17.2d\n"
".inst 0x6e54ee68 // bfmmla v8.4s, v19.8h, v20.8h\n"
"ldr q17, [x11, #0x0]\n"
".inst 0x6e52ee6c // bfmmla v12.4s, v19.8h, v18.8h\n"
"ldr q18, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e51ee69 // bfmmla v9.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x0]\n"
".inst 0x6e52ee6d // bfmmla v13.4s, v19.8h, v18.8h\n"
- "ldr q6, [x10, #0x10]\n"
+ "ldr q2, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e51ee6a // bfmmla v10.4s, v19.8h, v17.8h\n"
"ldr q18, [x9, #0x0]\n"
- ".inst 0x6e46ee6e // bfmmla v14.4s, v19.8h, v6.8h\n"
+ ".inst 0x6e42ee6e // bfmmla v14.4s, v19.8h, v2.8h\n"
"ldr q17, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
"27:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -404,9 +406,9 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
"tbz %x[flags], #1, 28f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v18.4s\n"
"fmin v9.4s, v9.4s, v18.4s\n"
@@ -477,19 +479,19 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"b 230f\n"
"39:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"40:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 41f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -502,15 +504,15 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cbz x15, 42f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 54f\n"
@@ -518,75 +520,75 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"tbz %x[flags], #0, 53f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x14, #0x10\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
"bge 51f\n"
"tbz x14, #3, 46f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x14, #2, 44f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
"tbz x14, #1, 43f\n"
"ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
"tbz x14, #0, 50f\n"
"ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v15.s }[2], [x26]\n"
"b 50f\n"
"43:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 50f\n"
"ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
"b 50f\n"
"44:" // Height 2: Partial accumulate: partial_2_8
"tbz x14, #1, 45f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
"tbz x14, #0, 50f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v14.s }[2], [x26]\n"
"b 50f\n"
"45:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 50f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
"b 50f\n"
"46:" // Height 2: Partial accumulate: partial_4_0
"tbz x14, #2, 48f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x14, #1, 47f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
"tbz x14, #0, 50f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 50f\n"
"47:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 50f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 50f\n"
"48:" // Height 2: Partial accumulate: partial_2_0
"tbz x14, #1, 49f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
"tbz x14, #0, 50f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 50f\n"
"49:" // Height 2: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"50:" // Height 2: Partial accumulate: Done
"sub x13, x13, x20\n"
@@ -596,10 +598,10 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x13, #0x10]\n"
"ldr q11, [x13, #0x20]\n"
"ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"52:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -623,8 +625,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"55:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 56f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -649,6 +651,12 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"blt 59f\n"
"58:" // Height 2: Multiply loop: Main loop head
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "ldr q2, [x25, #0x0]\n"
".inst 0x6e47ee68 // bfmmla v8.4s, v19.8h, v7.8h\n"
"ldr q18, [x11, #0x0]\n"
".inst 0x6e46ee6c // bfmmla v12.4s, v19.8h, v6.8h\n"
@@ -661,40 +669,38 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x9, #0x0]\n"
".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x9, #0x10]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
"ldr q18, [x12, #0x20]\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
"ldr q17, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
".inst 0x6e52ec28 // bfmmla v8.4s, v1.8h, v18.8h\n"
"ldr q18, [x11, #0x20]\n"
".inst 0x6e51ec2c // bfmmla v12.4s, v1.8h, v17.8h\n"
"ldr q17, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e52ec29 // bfmmla v9.4s, v1.8h, v18.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e51ec2d // bfmmla v13.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e52ec2a // bfmmla v10.4s, v1.8h, v18.8h\n"
"ldr q18, [x9, #0x20]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
- "add x12, x12, #0x40\n"
"ldr q7, [x12, #0x0]\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
"ldr q1, [x26, #0x0]\n"
"ldr q6, [x12, #0x10]\n"
- "add x11, x11, #0x40\n"
- "add x10, x10, #0x40\n"
- "add x9, x9, #0x40\n"
"bge 58b\n"
"59:" // Height 2: Multiply loop: Single iteration only
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
".inst 0x6e47ee68 // bfmmla v8.4s, v19.8h, v7.8h\n"
"ldr q18, [x11, #0x0]\n"
".inst 0x6e46ee6c // bfmmla v12.4s, v19.8h, v6.8h\n"
@@ -707,62 +713,58 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x9, #0x0]\n"
".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x9, #0x10]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
"ldr q18, [x12, #0x20]\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
"ldr q17, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
".inst 0x6e52ec28 // bfmmla v8.4s, v1.8h, v18.8h\n"
"ldr q18, [x11, #0x20]\n"
".inst 0x6e51ec2c // bfmmla v12.4s, v1.8h, v17.8h\n"
"ldr q17, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e52ec29 // bfmmla v9.4s, v1.8h, v18.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e51ec2d // bfmmla v13.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6e52ec2a // bfmmla v10.4s, v1.8h, v18.8h\n"
"ldr q18, [x9, #0x20]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x9, #0x30]\n"
- "sub x27, x27, #0x8\n"
+ "add x9, x9, #0x40\n"
".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "add x12, x12, #0x40\n"
- "add x11, x11, #0x40\n"
- "add x10, x10, #0x40\n"
- "add x9, x9, #0x40\n"
"60:" // Height 2: Multiply loop: Main loop skip
"cbz x27, 65f\n"
"cmp x27, #0x4\n"
"blt 62f\n"
"61:" // Height 2: Multiply loop: Odd block loop
- "ldr d18, [x26], #0x8\n"
- "ldr d17, [x25], #0x8\n"
- "trn1 v19.2d, v18.2d, v17.2d\n"
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
"ldr q18, [x12, #0x0]\n"
"ldr q17, [x12, #0x10]\n"
+ "cmp x27, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "trn1 v19.2d, v20.2d, v19.2d\n"
".inst 0x6e52ee68 // bfmmla v8.4s, v19.8h, v18.8h\n"
- ".inst 0x6e51ee6c // bfmmla v12.4s, v19.8h, v17.8h\n"
"ldr q26, [x11, #0x0]\n"
+ ".inst 0x6e51ee6c // bfmmla v12.4s, v19.8h, v17.8h\n"
"ldr q6, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e5aee69 // bfmmla v9.4s, v19.8h, v26.8h\n"
- ".inst 0x6e46ee6d // bfmmla v13.4s, v19.8h, v6.8h\n"
"ldr q18, [x10, #0x0]\n"
+ ".inst 0x6e46ee6d // bfmmla v13.4s, v19.8h, v6.8h\n"
"ldr q17, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e52ee6a // bfmmla v10.4s, v19.8h, v18.8h\n"
- ".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q18, [x9, #0x0]\n"
+ ".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x9, #0x10]\n"
- "cmp x27, #0x4\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
"bge 61b\n"
"62:" // Height 2: Multiply loop: Skip odd blocks
"cbz x27, 65f\n"
@@ -780,24 +782,24 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x12, #0x0]\n"
"ldr q17, [x12, #0x10]\n"
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "add x12, x12, #0x20\n"
".inst 0x6e52ee68 // bfmmla v8.4s, v19.8h, v18.8h\n"
"ldr q18, [x11, #0x0]\n"
".inst 0x6e51ee6c // bfmmla v12.4s, v19.8h, v17.8h\n"
"ldr q17, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e52ee69 // bfmmla v9.4s, v19.8h, v18.8h\n"
- "ldr q3, [x10, #0x0]\n"
+ "ldr q30, [x10, #0x0]\n"
".inst 0x6e51ee6d // bfmmla v13.4s, v19.8h, v17.8h\n"
- "ldr q27, [x10, #0x10]\n"
- ".inst 0x6e43ee6a // bfmmla v10.4s, v19.8h, v3.8h\n"
+ "ldr q31, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e5eee6a // bfmmla v10.4s, v19.8h, v30.8h\n"
"ldr q18, [x9, #0x0]\n"
- ".inst 0x6e5bee6e // bfmmla v14.4s, v19.8h, v27.8h\n"
+ ".inst 0x6e5fee6e // bfmmla v14.4s, v19.8h, v31.8h\n"
"ldr q17, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
"65:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -806,17 +808,17 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "add x25, x13, x20, LSL #2\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "add x26, x13, x20, LSL #2\n"
"tbz %x[flags], #1, 66f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v18.4s\n"
"fmin v12.4s, v12.4s, v18.4s\n"
@@ -840,63 +842,63 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"tbz x14, #3, 70f\n"
"st1 { v7.4s }, [x13], #0x10\n"
"st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
"tbz x14, #2, 68f\n"
"st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
"tbz x14, #1, 67f\n"
"str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
+ "str d11, [x26], #0x8\n"
"tbz x14, #0, 74f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
+ "st1 { v11.s }[2], [x26]\n"
"b 74f\n"
"67:" // Height 2: Partial direct writeback: partial_1_12
"tbz x14, #0, 74f\n"
"str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
"b 74f\n"
"68:" // Height 2: Partial direct writeback: partial_2_8
"tbz x14, #1, 69f\n"
"str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
+ "str d10, [x26], #0x8\n"
"tbz x14, #0, 74f\n"
"st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
+ "st1 { v10.s }[2], [x26]\n"
"b 74f\n"
"69:" // Height 2: Partial direct writeback: partial_1_8
"tbz x14, #0, 74f\n"
"str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
"b 74f\n"
"70:" // Height 2: Partial direct writeback: partial_4_0
"tbz x14, #2, 72f\n"
"st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
"tbz x14, #1, 71f\n"
"str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
+ "str d9, [x26], #0x8\n"
"tbz x14, #0, 74f\n"
"st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
+ "st1 { v9.s }[2], [x26]\n"
"b 74f\n"
"71:" // Height 2: Partial direct writeback: partial_1_4
"tbz x14, #0, 74f\n"
"str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
"b 74f\n"
"72:" // Height 2: Partial direct writeback: partial_2_0
"tbz x14, #1, 73f\n"
"str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
+ "str d8, [x26], #0x8\n"
"tbz x14, #0, 74f\n"
"st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
+ "st1 { v8.s }[2], [x26]\n"
"b 74f\n"
"73:" // Height 2: Partial direct writeback: partial_1_0
"str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
"74:" // Height 2: Partial direct writeback: Done
"b 76f\n"
"75:" // Height 2: Full writeback
@@ -905,29 +907,29 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"str q13, [x13, #0x20]\n"
"str q14, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
"76:" // Height 2: Writeback done
"subs x14, x14, #0x10\n"
"bgt 40b\n"
"b 230f\n"
"77:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"78:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 79f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -940,15 +942,15 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cbz x15, 80f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -963,94 +965,94 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"80:" // Height 3: no bias
"tbz %x[flags], #0, 91f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
"cmp x14, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 89f\n"
"tbz x14, #3, 84f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
"tbz x14, #2, 82f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
"tbz x14, #1, 81f\n"
"ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
"tbz x14, #0, 88f\n"
"ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
"b 88f\n"
"81:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 88f\n"
"ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
"b 88f\n"
"82:" // Height 3: Partial accumulate: partial_2_8
"tbz x14, #1, 83f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x14, #0, 88f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
"b 88f\n"
"83:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 88f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
"b 88f\n"
"84:" // Height 3: Partial accumulate: partial_4_0
"tbz x14, #2, 86f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"tbz x14, #1, 85f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x14, #0, 88f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
"b 88f\n"
"85:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 88f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
"b 88f\n"
"86:" // Height 3: Partial accumulate: partial_2_0
"tbz x14, #1, 87f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x14, #0, 88f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
"b 88f\n"
"87:" // Height 3: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
"88:" // Height 3: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 90f\n"
@@ -1059,14 +1061,14 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x13, #0x10]\n"
"ldr q11, [x13, #0x20]\n"
"ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
"90:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1106,8 +1108,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"93:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 94f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1137,54 +1139,54 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"96:" // Height 3: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x11, #0x0]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x11, #0x10]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x0]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x10]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x9, #0x0]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x9, #0x10]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x12, #0x20]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"ldr q25, [x12, #0x30]\n"
"ldr q2, [x25, #0x0]\n"
+ "add x12, x12, #0x40\n"
".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x11, #0x20]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
- "add x12, x12, #0x40\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x11, #0x30]\n"
- ".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
"add x11, x11, #0x40\n"
+ ".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec71 // bfmmla v17.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e59ec2d // bfmmla v13.4s, v1.8h, v25.8h\n"
".inst 0x6e59ec75 // bfmmla v21.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0x30]\n"
- ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec72 // bfmmla v18.4s, v3.8h, v26.8h\n"
"ldr q26, [x9, #0x20]\n"
".inst 0x6e59ec2e // bfmmla v14.4s, v1.8h, v25.8h\n"
@@ -1203,52 +1205,52 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"97:" // Height 3: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x11, #0x0]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x11, #0x10]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x0]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x10]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x9, #0x0]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x9, #0x10]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x12, #0x20]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"ldr q25, [x12, #0x30]\n"
- ".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
"add x12, x12, #0x40\n"
+ ".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x11, #0x20]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x11, #0x30]\n"
- ".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
"add x11, x11, #0x40\n"
+ ".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec71 // bfmmla v17.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e59ec2d // bfmmla v13.4s, v1.8h, v25.8h\n"
".inst 0x6e59ec75 // bfmmla v21.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0x30]\n"
- ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec72 // bfmmla v18.4s, v3.8h, v26.8h\n"
"ldr q26, [x9, #0x20]\n"
".inst 0x6e59ec2e // bfmmla v14.4s, v1.8h, v25.8h\n"
@@ -1264,40 +1266,40 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 100f\n"
"99:" // Height 3: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
- "ldr d25, [x24], #0x8\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "sub x27, x27, #0x4\n"
+ "ldr d27, [x24], #0x8\n"
"ldr q26, [x12, #0x0]\n"
- "trn1 v27.2d, v25.2d, v27.2d\n"
- ".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
+ "cmp x27, #0x4\n"
"ldr q25, [x12, #0x10]\n"
+ "add x12, x12, #0x20\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v27.2d, v29.2d\n"
+ ".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef70 // bfmmla v16.4s, v27.8h, v26.8h\n"
"ldr q26, [x11, #0x0]\n"
".inst 0x6e59ef8c // bfmmla v12.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef74 // bfmmla v20.4s, v27.8h, v25.8h\n"
"ldr q25, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "sub x27, x27, #0x4\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x0]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "cmp x27, #0x4\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x12, x12, #0x20\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x9, #0x0]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x11, x11, #0x20\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "add x10, x10, #0x20\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
- "add x9, x9, #0x20\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"bge 99b\n"
"100:" // Height 3: Multiply loop: Skip odd blocks
@@ -1319,18 +1321,18 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q26, [x12, #0x0]\n"
"ldr q29, [x12, #0x10]\n"
"trn1 v28.2d, v1.2d, v2.2d\n"
+ "add x12, x12, #0x20\n"
"trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
+ ".inst 0x6e5def8c // bfmmla v12.4s, v28.8h, v29.8h\n"
".inst 0x6e5aef70 // bfmmla v16.4s, v27.8h, v26.8h\n"
"ldr q26, [x11, #0x0]\n"
- ".inst 0x6e5def8c // bfmmla v12.4s, v28.8h, v29.8h\n"
".inst 0x6e5def74 // bfmmla v20.4s, v27.8h, v29.8h\n"
"ldr q25, [x11, #0x10]\n"
- "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x0]\n"
- "add x11, x11, #0x20\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x10]\n"
@@ -1352,24 +1354,24 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 93b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x13, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 104f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v26.4s\n"
"fmin v12.4s, v12.4s, v26.4s\n"
@@ -1401,79 +1403,79 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"tbz x14, #3, 108f\n"
"st1 { v7.4s }, [x13], #0x10\n"
"st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
"tbz x14, #2, 106f\n"
"st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
"tbz x14, #1, 105f\n"
"str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x14, #0, 112f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
"b 112f\n"
"105:" // Height 3: Partial direct writeback: partial_1_12
"tbz x14, #0, 112f\n"
"str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
"b 112f\n"
"106:" // Height 3: Partial direct writeback: partial_2_8
"tbz x14, #1, 107f\n"
"str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x14, #0, 112f\n"
"st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
"b 112f\n"
"107:" // Height 3: Partial direct writeback: partial_1_8
"tbz x14, #0, 112f\n"
"str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
"b 112f\n"
"108:" // Height 3: Partial direct writeback: partial_4_0
"tbz x14, #2, 110f\n"
"st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
"tbz x14, #1, 109f\n"
"str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x14, #0, 112f\n"
"st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
"b 112f\n"
"109:" // Height 3: Partial direct writeback: partial_1_4
"tbz x14, #0, 112f\n"
"str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
"b 112f\n"
"110:" // Height 3: Partial direct writeback: partial_2_0
"tbz x14, #1, 111f\n"
"str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x14, #0, 112f\n"
"st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
"b 112f\n"
"111:" // Height 3: Partial direct writeback: partial_1_0
"str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
"112:" // Height 3: Partial direct writeback: Done
"b 114f\n"
"113:" // Height 3: Full writeback
@@ -1482,33 +1484,33 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"str q13, [x13, #0x20]\n"
"str q14, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"114:" // Height 3: Writeback done
"subs x14, x14, #0x10\n"
"bgt 78b\n"
"b 230f\n"
"115:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"116:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 117f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -1521,15 +1523,15 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cbz x15, 118f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -1544,111 +1546,111 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"118:" // Height 4: no bias
"tbz %x[flags], #0, 129f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"cmp x14, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 127f\n"
"tbz x14, #3, 122f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
"tbz x14, #2, 120f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
"tbz x14, #1, 119f\n"
"ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x14, #0, 126f\n"
"ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
"b 126f\n"
"119:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 126f\n"
"ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
"b 126f\n"
"120:" // Height 4: Partial accumulate: partial_2_8
"tbz x14, #1, 121f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x14, #0, 126f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
"b 126f\n"
"121:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 126f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
"b 126f\n"
"122:" // Height 4: Partial accumulate: partial_4_0
"tbz x14, #2, 124f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"tbz x14, #1, 123f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x14, #0, 126f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
"b 126f\n"
"123:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 126f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
"b 126f\n"
"124:" // Height 4: Partial accumulate: partial_2_0
"tbz x14, #1, 125f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x14, #0, 126f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
"b 126f\n"
"125:" // Height 4: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
"126:" // Height 4: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 128f\n"
@@ -1657,18 +1659,18 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x13, #0x10]\n"
"ldr q11, [x13, #0x20]\n"
"ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"128:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1708,8 +1710,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"131:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 132f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1743,56 +1745,56 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"134:" // Height 4: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
"sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "cmp x27, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x11, #0x0]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x11, #0x10]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x0]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x10]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x9, #0x0]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x9, #0x10]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x12, #0x20]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"ldr q25, [x12, #0x30]\n"
- ".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
"ldr q2, [x25, #0x0]\n"
+ "add x12, x12, #0x40\n"
+ ".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x11, #0x20]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
- "add x12, x12, #0x40\n"
".inst 0x6e5aec71 // bfmmla v17.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e59ec2d // bfmmla v13.4s, v1.8h, v25.8h\n"
- "add x11, x11, #0x40\n"
".inst 0x6e59ec75 // bfmmla v21.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0x30]\n"
- ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec72 // bfmmla v18.4s, v3.8h, v26.8h\n"
"ldr q26, [x9, #0x20]\n"
".inst 0x6e59ec2e // bfmmla v14.4s, v1.8h, v25.8h\n"
@@ -1811,53 +1813,53 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"135:" // Height 4: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
"sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x11, #0x0]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x11, #0x10]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x0]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x10]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x9, #0x0]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x9, #0x10]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "add x23, x23, #0x10\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x12, #0x20]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"ldr q25, [x12, #0x30]\n"
- ".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
"add x12, x12, #0x40\n"
+ ".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x11, #0x20]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x11, #0x30]\n"
- ".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
"add x11, x11, #0x40\n"
+ ".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec71 // bfmmla v17.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e59ec2d // bfmmla v13.4s, v1.8h, v25.8h\n"
".inst 0x6e59ec75 // bfmmla v21.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0x30]\n"
- ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
".inst 0x6e5aec72 // bfmmla v18.4s, v3.8h, v26.8h\n"
"ldr q26, [x9, #0x20]\n"
".inst 0x6e59ec2e // bfmmla v14.4s, v1.8h, v25.8h\n"
@@ -1873,34 +1875,34 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 138f\n"
"137:" // Height 4: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
- "ldr d26, [x24], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "trn1 v27.2d, v26.2d, v25.2d\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"cmp x27, #0x4\n"
"ldr q26, [x12, #0x0]\n"
"ldr q25, [x12, #0x10]\n"
+ "add x12, x12, #0x20\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v29.2d, v27.2d\n"
".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
+ ".inst 0x6e59ef8c // bfmmla v12.4s, v28.8h, v25.8h\n"
".inst 0x6e5aef70 // bfmmla v16.4s, v27.8h, v26.8h\n"
"ldr q26, [x11, #0x0]\n"
- ".inst 0x6e59ef8c // bfmmla v12.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef74 // bfmmla v20.4s, v27.8h, v25.8h\n"
"ldr q25, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x0]\n"
- "add x12, x12, #0x20\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x9, #0x0]\n"
- "add x10, x10, #0x20\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x9, #0x10]\n"
@@ -1933,10 +1935,10 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q25, [x12, #0x10]\n"
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "add x12, x12, #0x20\n"
".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef70 // bfmmla v16.4s, v27.8h, v26.8h\n"
"ldr q26, [x11, #0x0]\n"
- "add x12, x12, #0x20\n"
".inst 0x6e59ef8c // bfmmla v12.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef74 // bfmmla v20.4s, v27.8h, v25.8h\n"
"ldr q25, [x11, #0x10]\n"
@@ -1965,17 +1967,17 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 131b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
@@ -1985,9 +1987,9 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 142f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v26.4s\n"
"fmin v12.4s, v12.4s, v26.4s\n"
@@ -2027,95 +2029,95 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"tbz x14, #3, 146f\n"
"st1 { v7.4s }, [x13], #0x10\n"
"st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
"tbz x14, #2, 144f\n"
"st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
"tbz x14, #1, 143f\n"
"str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
"tbz x14, #0, 150f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
"b 150f\n"
"143:" // Height 4: Partial direct writeback: partial_1_12
"tbz x14, #0, 150f\n"
"str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
"b 150f\n"
"144:" // Height 4: Partial direct writeback: partial_2_8
"tbz x14, #1, 145f\n"
"str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
"tbz x14, #0, 150f\n"
"st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
"b 150f\n"
"145:" // Height 4: Partial direct writeback: partial_1_8
"tbz x14, #0, 150f\n"
"str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
"b 150f\n"
"146:" // Height 4: Partial direct writeback: partial_4_0
"tbz x14, #2, 148f\n"
"st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
"tbz x14, #1, 147f\n"
"str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
"tbz x14, #0, 150f\n"
"st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
"b 150f\n"
"147:" // Height 4: Partial direct writeback: partial_1_4
"tbz x14, #0, 150f\n"
"str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
"b 150f\n"
"148:" // Height 4: Partial direct writeback: partial_2_0
"tbz x14, #1, 149f\n"
"str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
"tbz x14, #0, 150f\n"
"st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
"b 150f\n"
"149:" // Height 4: Partial direct writeback: partial_1_0
"str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
"150:" // Height 4: Partial direct writeback: Done
"b 152f\n"
"151:" // Height 4: Full writeback
@@ -2124,37 +2126,37 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"str q13, [x13, #0x20]\n"
"str q14, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
"152:" // Height 4: Writeback done
"subs x14, x14, #0x10\n"
"bgt 116b\n"
"b 230f\n"
"153:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"154:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 155f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -2167,15 +2169,15 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cbz x15, 156f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -2198,128 +2200,128 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"156:" // Height 5: no bias
"tbz %x[flags], #0, 167f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "cmp x14, #0x10\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
"bge 165f\n"
"tbz x14, #3, 160f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
"tbz x14, #2, 158f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v27.4s }, [x23], #0x10\n"
"tbz x14, #1, 157f\n"
"ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d6, [x23], #0x8\n"
"tbz x14, #0, 164f\n"
"ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v6.s }[2], [x23]\n"
"b 164f\n"
"157:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 164f\n"
"ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s6, [x23, #0x0]\n"
"b 164f\n"
"158:" // Height 5: Partial accumulate: partial_2_8
"tbz x14, #1, 159f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x14, #0, 164f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
"b 164f\n"
"159:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 164f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
"b 164f\n"
"160:" // Height 5: Partial accumulate: partial_4_0
"tbz x14, #2, 162f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x14, #1, 161f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x14, #0, 164f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
"b 164f\n"
"161:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 164f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
"b 164f\n"
"162:" // Height 5: Partial accumulate: partial_2_0
"tbz x14, #1, 163f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x14, #0, 164f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 164f\n"
"163:" // Height 5: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"164:" // Height 5: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 166f\n"
@@ -2328,22 +2330,22 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x13, #0x10]\n"
"ldr q11, [x13, #0x20]\n"
"ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q25, [x23, #0x0]\n"
+ "ldr q26, [x23, #0x10]\n"
+ "ldr q27, [x23, #0x20]\n"
+ "ldr q6, [x23, #0x30]\n"
"166:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2399,8 +2401,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"169:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 170f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2437,35 +2439,35 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"172:" // Height 5: Multiply loop: Main loop head
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ "sub x27, x27, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x12, #0x10]\n"
+ ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x11, #0x0]\n"
".inst 0x6e40eccc // bfmmla v12.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec54 // bfmmla v20.4s, v2.8h, v0.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e40ec9c // bfmmla v28.4s, v4.8h, v0.8h\n"
"ldr q0, [x11, #0x10]\n"
".inst 0x6e47ecc9 // bfmmla v9.4s, v6.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x0]\n"
- "add x25, x25, #0x10\n"
".inst 0x6e40eccd // bfmmla v13.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec55 // bfmmla v21.4s, v2.8h, v0.8h\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
".inst 0x6e40ec9d // bfmmla v29.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x10]\n"
".inst 0x6e47ecca // bfmmla v10.4s, v6.8h, v7.8h\n"
- "add x22, x22, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x9, #0x0]\n"
@@ -2483,17 +2485,17 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e40ec9f // bfmmla v31.4s, v4.8h, v0.8h\n"
"ldr q0, [x12, #0x30]\n"
"ldr q4, [x23, #0x0]\n"
+ "add x12, x12, #0x40\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
"ldr q6, [x11, #0x20]\n"
- "add x12, x12, #0x40\n"
".inst 0x6e40ec2c // bfmmla v12.4s, v1.8h, v0.8h\n"
".inst 0x6e40ec74 // bfmmla v20.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecbc // bfmmla v28.4s, v5.8h, v0.8h\n"
"ldr q0, [x11, #0x30]\n"
- ".inst 0x6e46ec29 // bfmmla v9.4s, v1.8h, v6.8h\n"
"add x11, x11, #0x40\n"
+ ".inst 0x6e46ec29 // bfmmla v9.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec71 // bfmmla v17.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecb9 // bfmmla v25.4s, v5.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
@@ -2501,8 +2503,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e40ec75 // bfmmla v21.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecbd // bfmmla v29.4s, v5.8h, v0.8h\n"
"ldr q0, [x10, #0x30]\n"
- ".inst 0x6e46ec2a // bfmmla v10.4s, v1.8h, v6.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e46ec2a // bfmmla v10.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec72 // bfmmla v18.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecba // bfmmla v26.4s, v5.8h, v6.8h\n"
"ldr q6, [x9, #0x20]\n"
@@ -2525,31 +2527,31 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"173:" // Height 5: Multiply loop: Single iteration only
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ "sub x27, x27, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x12, #0x10]\n"
+ ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x11, #0x0]\n"
".inst 0x6e40eccc // bfmmla v12.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec54 // bfmmla v20.4s, v2.8h, v0.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e40ec9c // bfmmla v28.4s, v4.8h, v0.8h\n"
"ldr q0, [x11, #0x10]\n"
".inst 0x6e47ecc9 // bfmmla v9.4s, v6.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x0]\n"
- "add x24, x24, #0x10\n"
".inst 0x6e40eccd // bfmmla v13.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec55 // bfmmla v21.4s, v2.8h, v0.8h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6e40ec9d // bfmmla v29.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x10]\n"
".inst 0x6e47ecca // bfmmla v10.4s, v6.8h, v7.8h\n"
@@ -2568,8 +2570,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e40ec57 // bfmmla v23.4s, v2.8h, v0.8h\n"
".inst 0x6e40ec9f // bfmmla v31.4s, v4.8h, v0.8h\n"
"ldr q2, [x12, #0x30]\n"
- ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
"add x12, x12, #0x40\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
"ldr q0, [x11, #0x20]\n"
@@ -2577,8 +2579,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e42ec74 // bfmmla v20.4s, v3.8h, v2.8h\n"
".inst 0x6e42ecbc // bfmmla v28.4s, v5.8h, v2.8h\n"
"ldr q2, [x11, #0x30]\n"
- ".inst 0x6e40ec29 // bfmmla v9.4s, v1.8h, v0.8h\n"
"add x11, x11, #0x40\n"
+ ".inst 0x6e40ec29 // bfmmla v9.4s, v1.8h, v0.8h\n"
".inst 0x6e40ec71 // bfmmla v17.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecb9 // bfmmla v25.4s, v5.8h, v0.8h\n"
"ldr q0, [x10, #0x20]\n"
@@ -2586,8 +2588,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e42ec75 // bfmmla v21.4s, v3.8h, v2.8h\n"
".inst 0x6e42ecbd // bfmmla v29.4s, v5.8h, v2.8h\n"
"ldr q2, [x10, #0x30]\n"
- ".inst 0x6e40ec2a // bfmmla v10.4s, v1.8h, v0.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e40ec2a // bfmmla v10.4s, v1.8h, v0.8h\n"
".inst 0x6e40ec72 // bfmmla v18.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecba // bfmmla v26.4s, v5.8h, v0.8h\n"
"ldr q0, [x9, #0x20]\n"
@@ -2607,29 +2609,29 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 176f\n"
"175:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "cmp x27, #0x4\n"
"ldr d0, [x22], #0x8\n"
"ldr q1, [x12, #0x0]\n"
- "trn1 v2.2d, v0.2d, v2.2d\n"
- ".inst 0x6e41ec88 // bfmmla v8.4s, v4.8h, v1.8h\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v3.2d, v2.2d\n"
+ "trn1 v2.2d, v0.2d, v5.2d\n"
"ldr q0, [x12, #0x10]\n"
+ "add x12, x12, #0x20\n"
+ ".inst 0x6e41ec88 // bfmmla v8.4s, v4.8h, v1.8h\n"
".inst 0x6e41ec70 // bfmmla v16.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec58 // bfmmla v24.4s, v2.8h, v1.8h\n"
"ldr q1, [x11, #0x0]\n"
".inst 0x6e40ec8c // bfmmla v12.4s, v4.8h, v0.8h\n"
".inst 0x6e40ec74 // bfmmla v20.4s, v3.8h, v0.8h\n"
- "cmp x27, #0x4\n"
- "add x12, x12, #0x20\n"
".inst 0x6e40ec5c // bfmmla v28.4s, v2.8h, v0.8h\n"
"ldr q0, [x11, #0x10]\n"
- ".inst 0x6e41ec89 // bfmmla v9.4s, v4.8h, v1.8h\n"
"add x11, x11, #0x20\n"
+ ".inst 0x6e41ec89 // bfmmla v9.4s, v4.8h, v1.8h\n"
".inst 0x6e41ec71 // bfmmla v17.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec59 // bfmmla v25.4s, v2.8h, v1.8h\n"
"ldr q1, [x10, #0x0]\n"
@@ -2637,8 +2639,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e40ec75 // bfmmla v21.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec5d // bfmmla v29.4s, v2.8h, v0.8h\n"
"ldr q0, [x10, #0x10]\n"
- ".inst 0x6e41ec8a // bfmmla v10.4s, v4.8h, v1.8h\n"
"add x10, x10, #0x20\n"
+ ".inst 0x6e41ec8a // bfmmla v10.4s, v4.8h, v1.8h\n"
".inst 0x6e41ec72 // bfmmla v18.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec5a // bfmmla v26.4s, v2.8h, v1.8h\n"
"ldr q6, [x9, #0x0]\n"
@@ -2646,8 +2648,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e40ec76 // bfmmla v22.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec5e // bfmmla v30.4s, v2.8h, v0.8h\n"
"ldr q0, [x9, #0x10]\n"
- ".inst 0x6e46ec8b // bfmmla v11.4s, v4.8h, v6.8h\n"
"add x9, x9, #0x20\n"
+ ".inst 0x6e46ec8b // bfmmla v11.4s, v4.8h, v6.8h\n"
".inst 0x6e46ec73 // bfmmla v19.4s, v3.8h, v6.8h\n"
".inst 0x6e46ec5b // bfmmla v27.4s, v2.8h, v6.8h\n"
".inst 0x6e40ec8f // bfmmla v15.4s, v4.8h, v0.8h\n"
@@ -2681,12 +2683,12 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"trn1 v3.2d, v3.2d, v4.2d\n"
"trn1 v2.2d, v5.2d, v0.2d\n"
"ldr q1, [x12, #0x10]\n"
+ "add x12, x12, #0x20\n"
".inst 0x6e46ece8 // bfmmla v8.4s, v7.8h, v6.8h\n"
".inst 0x6e46ec70 // bfmmla v16.4s, v3.8h, v6.8h\n"
".inst 0x6e46ec58 // bfmmla v24.4s, v2.8h, v6.8h\n"
"ldr q0, [x11, #0x0]\n"
".inst 0x6e41ecec // bfmmla v12.4s, v7.8h, v1.8h\n"
- "add x12, x12, #0x20\n"
".inst 0x6e41ec74 // bfmmla v20.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec5c // bfmmla v28.4s, v2.8h, v1.8h\n"
"ldr q1, [x11, #0x10]\n"
@@ -2721,20 +2723,20 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 169b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
@@ -2746,9 +2748,9 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 180f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
@@ -2796,111 +2798,111 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"tbz x14, #3, 184f\n"
"st1 { v7.4s }, [x13], #0x10\n"
"st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
"tbz x14, #2, 182f\n"
"st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
"tbz x14, #1, 181f\n"
"str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x14, #0, 188f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
"b 188f\n"
"181:" // Height 5: Partial direct writeback: partial_1_12
"tbz x14, #0, 188f\n"
"str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
"b 188f\n"
"182:" // Height 5: Partial direct writeback: partial_2_8
"tbz x14, #1, 183f\n"
"str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x14, #0, 188f\n"
"st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
"b 188f\n"
"183:" // Height 5: Partial direct writeback: partial_1_8
"tbz x14, #0, 188f\n"
"str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
"b 188f\n"
"184:" // Height 5: Partial direct writeback: partial_4_0
"tbz x14, #2, 186f\n"
"st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x14, #1, 185f\n"
"str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x14, #0, 188f\n"
"st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 188f\n"
"185:" // Height 5: Partial direct writeback: partial_1_4
"tbz x14, #0, 188f\n"
"str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 188f\n"
"186:" // Height 5: Partial direct writeback: partial_2_0
"tbz x14, #1, 187f\n"
"str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x14, #0, 188f\n"
"st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 188f\n"
"187:" // Height 5: Partial direct writeback: partial_1_0
"str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"188:" // Height 5: Partial direct writeback: Done
"b 190f\n"
"189:" // Height 5: Full writeback
@@ -2909,44 +2911,45 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"str q13, [x13, #0x20]\n"
"str q14, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"190:" // Height 5: Writeback done
"subs x14, x14, #0x10\n"
"bgt 154b\n"
"b 230f\n"
"191:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0x18\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"192:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 193f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -2959,15 +2962,15 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cbz x15, 194f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x15, x15, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -2990,145 +2993,145 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"194:" // Height 6: no bias
"tbz %x[flags], #0, 205f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "cmp x14, #0x10\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
"bge 203f\n"
"tbz x14, #3, 198f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x14, #2, 196f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v27.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x14, #1, 195f\n"
"ldr d16, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d6, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"ld1 { v16.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v6.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 202f\n"
"195:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 202f\n"
"ldr s16, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s6, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial accumulate: partial_2_8
"tbz x14, #1, 197f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 202f\n"
"197:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 202f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial accumulate: partial_4_0
"tbz x14, #2, 200f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x14, #1, 199f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 202f\n"
"199:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 202f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial accumulate: partial_2_0
"tbz x14, #1, 201f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 202f\n"
"201:" // Height 6: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"202:" // Height 6: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 204f\n"
@@ -3137,26 +3140,26 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x13, #0x10]\n"
"ldr q11, [x13, #0x20]\n"
"ldr q16, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q25, [x23, #0x0]\n"
+ "ldr q26, [x23, #0x10]\n"
+ "ldr q27, [x23, #0x20]\n"
+ "ldr q6, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"204:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -3212,8 +3215,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"207:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 208f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -3254,36 +3257,36 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"210:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
"cmp x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x12, #0x10]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "add x21, x21, #0x10\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x11, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x0]\n"
- "add x24, x24, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "add x21, x21, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x9, #0x0]\n"
@@ -3301,17 +3304,17 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"ldr q0, [x12, #0x30]\n"
"ldr q4, [x23, #0x0]\n"
+ "add x12, x12, #0x40\n"
".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
"ldr q6, [x11, #0x20]\n"
- "add x12, x12, #0x40\n"
".inst 0x6e40ec2c // bfmmla v12.4s, v1.8h, v0.8h\n"
".inst 0x6e40ec74 // bfmmla v20.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecbc // bfmmla v28.4s, v5.8h, v0.8h\n"
"ldr q0, [x11, #0x30]\n"
- ".inst 0x6e46ec29 // bfmmla v9.4s, v1.8h, v6.8h\n"
"add x11, x11, #0x40\n"
+ ".inst 0x6e46ec29 // bfmmla v9.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec71 // bfmmla v17.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecb9 // bfmmla v25.4s, v5.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
@@ -3319,8 +3322,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e40ec75 // bfmmla v21.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecbd // bfmmla v29.4s, v5.8h, v0.8h\n"
"ldr q0, [x10, #0x30]\n"
- ".inst 0x6e46ec2a // bfmmla v10.4s, v1.8h, v6.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e46ec2a // bfmmla v10.4s, v1.8h, v6.8h\n"
".inst 0x6e46ec72 // bfmmla v18.4s, v3.8h, v6.8h\n"
".inst 0x6e46ecba // bfmmla v26.4s, v5.8h, v6.8h\n"
"ldr q6, [x9, #0x20]\n"
@@ -3344,32 +3347,32 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"211:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x12, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x11, #0x0]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x11, #0x10]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x0]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x10]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
@@ -3388,8 +3391,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"ldr q2, [x12, #0x30]\n"
- ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
"add x12, x12, #0x40\n"
+ ".inst 0x6e47ec28 // bfmmla v8.4s, v1.8h, v7.8h\n"
".inst 0x6e47ec70 // bfmmla v16.4s, v3.8h, v7.8h\n"
".inst 0x6e47ecb8 // bfmmla v24.4s, v5.8h, v7.8h\n"
"ldr q0, [x11, #0x20]\n"
@@ -3397,8 +3400,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e42ec74 // bfmmla v20.4s, v3.8h, v2.8h\n"
".inst 0x6e42ecbc // bfmmla v28.4s, v5.8h, v2.8h\n"
"ldr q2, [x11, #0x30]\n"
- ".inst 0x6e40ec29 // bfmmla v9.4s, v1.8h, v0.8h\n"
"add x11, x11, #0x40\n"
+ ".inst 0x6e40ec29 // bfmmla v9.4s, v1.8h, v0.8h\n"
".inst 0x6e40ec71 // bfmmla v17.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecb9 // bfmmla v25.4s, v5.8h, v0.8h\n"
"ldr q0, [x10, #0x20]\n"
@@ -3406,8 +3409,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e42ec75 // bfmmla v21.4s, v3.8h, v2.8h\n"
".inst 0x6e42ecbd // bfmmla v29.4s, v5.8h, v2.8h\n"
"ldr q2, [x10, #0x30]\n"
- ".inst 0x6e40ec2a // bfmmla v10.4s, v1.8h, v0.8h\n"
"add x10, x10, #0x40\n"
+ ".inst 0x6e40ec2a // bfmmla v10.4s, v1.8h, v0.8h\n"
".inst 0x6e40ec72 // bfmmla v18.4s, v3.8h, v0.8h\n"
".inst 0x6e40ecba // bfmmla v26.4s, v5.8h, v0.8h\n"
"ldr q0, [x9, #0x20]\n"
@@ -3427,25 +3430,25 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 214f\n"
"213:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d5, [x24], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
"cmp x27, #0x4\n"
- "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"ldr d0, [x21], #0x8\n"
- "trn1 v2.2d, v1.2d, v0.2d\n"
"ldr q1, [x12, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v5.2d, v3.2d\n"
+ "trn1 v2.2d, v2.2d, v0.2d\n"
"ldr q0, [x12, #0x10]\n"
+ "add x12, x12, #0x20\n"
".inst 0x6e41ec88 // bfmmla v8.4s, v4.8h, v1.8h\n"
".inst 0x6e41ec70 // bfmmla v16.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec58 // bfmmla v24.4s, v2.8h, v1.8h\n"
"ldr q1, [x11, #0x0]\n"
".inst 0x6e40ec8c // bfmmla v12.4s, v4.8h, v0.8h\n"
- "add x12, x12, #0x20\n"
".inst 0x6e40ec74 // bfmmla v20.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec5c // bfmmla v28.4s, v2.8h, v0.8h\n"
"ldr q0, [x11, #0x10]\n"
@@ -3503,19 +3506,19 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"ldr q0, [x12, #0x0]\n"
"trn1 v7.2d, v1.2d, v2.2d\n"
"trn1 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e40ece8 // bfmmla v8.4s, v7.8h, v0.8h\n"
"trn1 v2.2d, v5.2d, v6.2d\n"
"ldr q1, [x12, #0x10]\n"
+ "add x12, x12, #0x20\n"
+ ".inst 0x6e40ece8 // bfmmla v8.4s, v7.8h, v0.8h\n"
".inst 0x6e40ec70 // bfmmla v16.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec58 // bfmmla v24.4s, v2.8h, v0.8h\n"
"ldr q0, [x11, #0x0]\n"
".inst 0x6e41ecec // bfmmla v12.4s, v7.8h, v1.8h\n"
".inst 0x6e41ec74 // bfmmla v20.4s, v3.8h, v1.8h\n"
- "add x12, x12, #0x20\n"
".inst 0x6e41ec5c // bfmmla v28.4s, v2.8h, v1.8h\n"
"ldr q1, [x11, #0x10]\n"
- ".inst 0x6e40ece9 // bfmmla v9.4s, v7.8h, v0.8h\n"
"add x11, x11, #0x20\n"
+ ".inst 0x6e40ece9 // bfmmla v9.4s, v7.8h, v0.8h\n"
".inst 0x6e40ec71 // bfmmla v17.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec59 // bfmmla v25.4s, v2.8h, v0.8h\n"
"ldr q0, [x10, #0x0]\n"
@@ -3523,8 +3526,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e41ec75 // bfmmla v21.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec5d // bfmmla v29.4s, v2.8h, v1.8h\n"
"ldr q1, [x10, #0x10]\n"
- ".inst 0x6e40ecea // bfmmla v10.4s, v7.8h, v0.8h\n"
"add x10, x10, #0x20\n"
+ ".inst 0x6e40ecea // bfmmla v10.4s, v7.8h, v0.8h\n"
".inst 0x6e40ec72 // bfmmla v18.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec5a // bfmmla v26.4s, v2.8h, v0.8h\n"
"ldr q0, [x9, #0x0]\n"
@@ -3532,8 +3535,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
".inst 0x6e41ec76 // bfmmla v22.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec5e // bfmmla v30.4s, v2.8h, v1.8h\n"
"ldr q6, [x9, #0x10]\n"
- ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
"add x9, x9, #0x20\n"
+ ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
".inst 0x6e40ec73 // bfmmla v19.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec5b // bfmmla v27.4s, v2.8h, v0.8h\n"
".inst 0x6e46ecef // bfmmla v15.4s, v7.8h, v6.8h\n"
@@ -3545,21 +3548,21 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 207b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
@@ -3575,9 +3578,9 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 218f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
@@ -3633,127 +3636,127 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"tbz x14, #3, 222f\n"
"st1 { v7.4s }, [x13], #0x10\n"
"st1 { v12.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v25.4s }, [x22], #0x10\n"
"tbz x14, #2, 220f\n"
"st1 { v13.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v29.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
"tbz x14, #1, 219f\n"
"str d14, [x13], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
+ "str d27, [x22], #0x8\n"
"tbz x14, #0, 226f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
+ "st1 { v27.s }[2], [x22]\n"
"b 226f\n"
"219:" // Height 6: Partial direct writeback: partial_1_12
"tbz x14, #0, 226f\n"
"str s14, [x13, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
+ "str s27, [x22, #0x0]\n"
"b 226f\n"
"220:" // Height 6: Partial direct writeback: partial_2_8
"tbz x14, #1, 221f\n"
"str d13, [x13], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d29, [x23], #0x8\n"
+ "str d26, [x22], #0x8\n"
"tbz x14, #0, 226f\n"
"st1 { v13.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v29.s }[2], [x23]\n"
+ "st1 { v26.s }[2], [x22]\n"
"b 226f\n"
"221:" // Height 6: Partial direct writeback: partial_1_8
"tbz x14, #0, 226f\n"
"str s13, [x13, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s29, [x23, #0x0]\n"
+ "str s26, [x22, #0x0]\n"
"b 226f\n"
"222:" // Height 6: Partial direct writeback: partial_4_0
"tbz x14, #2, 224f\n"
"st1 { v7.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
"tbz x14, #1, 223f\n"
"str d12, [x13], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d25, [x22], #0x8\n"
"tbz x14, #0, 226f\n"
"st1 { v12.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v25.s }[2], [x22]\n"
"b 226f\n"
"223:" // Height 6: Partial direct writeback: partial_1_4
"tbz x14, #0, 226f\n"
"str s12, [x13, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s25, [x22, #0x0]\n"
"b 226f\n"
"224:" // Height 6: Partial direct writeback: partial_2_0
"tbz x14, #1, 225f\n"
"str d7, [x13], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x14, #0, 226f\n"
"st1 { v7.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
+ "st1 { v24.s }[2], [x22]\n"
"b 226f\n"
"225:" // Height 6: Partial direct writeback: partial_1_0
"str s7, [x13, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
+ "str s24, [x22, #0x0]\n"
"226:" // Height 6: Partial direct writeback: Done
"b 228f\n"
"227:" // Height 6: Full writeback
@@ -3762,26 +3765,26 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"str q13, [x13, #0x20]\n"
"str q14, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q23, [x22, #0x0]\n"
- "str q28, [x22, #0x10]\n"
- "str q29, [x22, #0x20]\n"
- "str q30, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q28, [x23, #0x10]\n"
+ "str q29, [x23, #0x20]\n"
+ "str q30, [x23, #0x30]\n"
+ "str q24, [x22, #0x0]\n"
+ "str q25, [x22, #0x10]\n"
+ "str q26, [x22, #0x20]\n"
+ "str q27, [x22, #0x30]\n"
"228:" // Height 6: Writeback done
"subs x14, x14, #0x10\n"
"bgt 192b\n"
@@ -3797,8 +3800,8 @@ void a64_ffhybrid_bf16fp32_mmla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"230:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp
index 4924b3a549..20138ffe7e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -81,7 +81,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 32, 1> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 32, 1> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
index 8038612200..93e5e051f8 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp16_mla_6x32/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,18 +49,19 @@ void a64_ffhybrid_fp16_mla_6x32 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const __fp16 *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -81,6 +82,7 @@ void a64_ffhybrid_fp16_mla_6x32 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -105,19 +107,19 @@ void a64_ffhybrid_fp16_mla_6x32 (
"bgt 101f\n"
"beq 51f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x18\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
"bgt 3f\n"
"cmp x14, #0x10\n"
"mov x9, x12\n"
@@ -264,8 +266,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"24:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 25f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -289,6 +291,9 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q17, [x10, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"ldr q17, [x12, #0x10]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
@@ -339,30 +344,29 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q16, [x9, #0x60]\n"
"fmla v10.8h, v17.8h, v0.h[6]\n"
"ldr q17, [x12, #0x70]\n"
+ "add x12, x12, #0x80\n"
"fmla v11.8h, v16.8h, v0.h[6]\n"
"ldr q16, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v8.8h, v17.8h, v0.h[7]\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v9.8h, v16.8h, v0.h[7]\n"
"ldr q16, [x9, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "add x9, x9, #0x80\n"
"fmla v10.8h, v17.8h, v0.h[7]\n"
+ "ldr q6, [x12, #0x0]\n"
"fmla v11.8h, v16.8h, v0.h[7]\n"
- "add x26, x26, #0x10\n"
"ldr q0, [x26, #0x0]\n"
- "add x12, x12, #0x80\n"
- "ldr q6, [x12, #0x0]\n"
- "add x11, x11, #0x80\n"
"ldr q7, [x11, #0x0]\n"
- "add x10, x10, #0x80\n"
- "add x9, x9, #0x80\n"
"bge 27b\n"
"28:" // Height 1: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q17, [x10, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"ldr q17, [x12, #0x10]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
@@ -413,37 +417,35 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q16, [x9, #0x60]\n"
"fmla v10.8h, v17.8h, v0.h[6]\n"
"ldr q17, [x12, #0x70]\n"
+ "add x12, x12, #0x80\n"
"fmla v11.8h, v16.8h, v0.h[6]\n"
"ldr q16, [x11, #0x70]\n"
+ "add x11, x11, #0x80\n"
"fmla v8.8h, v17.8h, v0.h[7]\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
"fmla v9.8h, v16.8h, v0.h[7]\n"
"ldr q16, [x9, #0x70]\n"
- "sub x27, x27, #0x8\n"
+ "add x9, x9, #0x80\n"
"fmla v10.8h, v17.8h, v0.h[7]\n"
"fmla v11.8h, v16.8h, v0.h[7]\n"
- "add x26, x26, #0x10\n"
- "add x12, x12, #0x80\n"
- "add x11, x11, #0x80\n"
- "add x10, x10, #0x80\n"
- "add x9, x9, #0x80\n"
"29:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 31f\n"
"30:" // Height 1: Multiply loop: Odd block loop
"ldr h0, [x26], #0x2\n"
- "ldr q16, [x12, #0x0]\n"
- "fmla v8.8h, v16.8h, v0.h[0]\n"
+ "ldr q17, [x12, #0x0]\n"
"sub x27, x27, #0x1\n"
- "ldr q17, [x11, #0x0]\n"
- "ldr q16, [x10, #0x0]\n"
- "fmla v9.8h, v17.8h, v0.h[0]\n"
- "fmla v10.8h, v16.8h, v0.h[0]\n"
- "ldr q16, [x9, #0x0]\n"
- "fmla v11.8h, v16.8h, v0.h[0]\n"
"add x12, x12, #0x10\n"
+ "ldr q16, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
+ "fmla v8.8h, v17.8h, v0.h[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"add x10, x10, #0x10\n"
+ "fmla v9.8h, v16.8h, v0.h[0]\n"
+ "ldr q16, [x9, #0x0]\n"
"add x9, x9, #0x10\n"
+ "fmla v10.8h, v17.8h, v0.h[0]\n"
+ "fmla v11.8h, v16.8h, v0.h[0]\n"
"cbnz x27, 30b\n"
"31:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -451,9 +453,9 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 24b\n"
"tbz %x[flags], #1, 32f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.8h }, [x21]\n"
"ld1r { v16.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v17.8h\n"
"fmin v9.8h, v9.8h, v17.8h\n"
@@ -572,19 +574,19 @@ void a64_ffhybrid_fp16_mla_6x32 (
"b 302f\n"
"51:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"52:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x18\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
"bgt 53f\n"
"cmp x14, #0x10\n"
"mov x9, x12\n"
@@ -597,159 +599,159 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cbz x15, 54f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x15, x15, #0x40\n"
"b 73f\n"
"54:" // Height 2: no bias
"tbz %x[flags], #0, 72f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x14, #0x20\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
"bge 71f\n"
"tbz x14, #4, 62f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
"ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
"tbz x14, #3, 58f\n"
"ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
"tbz x14, #2, 56f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"tbz x14, #1, 55f\n"
"ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
"tbz x14, #0, 70f\n"
"ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
+ "ld1 { v15.h }[6], [x26]\n"
"b 70f\n"
"55:" // Height 2: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x14, #0, 70f\n"
"ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
+ "ld1 { v15.h }[4], [x26]\n"
"b 70f\n"
"56:" // Height 2: Partial accumulate: partial_2_24
"tbz x14, #1, 57f\n"
"ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
"tbz x14, #0, 70f\n"
"ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
+ "ld1 { v15.h }[2], [x26]\n"
"b 70f\n"
"57:" // Height 2: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x14, #0, 70f\n"
"ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
"b 70f\n"
"58:" // Height 2: Partial accumulate: partial_4_16
"tbz x14, #2, 60f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"tbz x14, #1, 59f\n"
"ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
"tbz x14, #0, 70f\n"
"ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
+ "ld1 { v14.h }[6], [x26]\n"
"b 70f\n"
"59:" // Height 2: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x14, #0, 70f\n"
"ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
+ "ld1 { v14.h }[4], [x26]\n"
"b 70f\n"
"60:" // Height 2: Partial accumulate: partial_2_16
"tbz x14, #1, 61f\n"
"ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
"tbz x14, #0, 70f\n"
"ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
+ "ld1 { v14.h }[2], [x26]\n"
"b 70f\n"
"61:" // Height 2: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x14, #0, 70f\n"
"ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
"b 70f\n"
"62:" // Height 2: Partial accumulate: partial_8_0
"tbz x14, #3, 66f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
"tbz x14, #2, 64f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"tbz x14, #1, 63f\n"
"ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
"tbz x14, #0, 70f\n"
"ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
+ "ld1 { v13.h }[6], [x26]\n"
"b 70f\n"
"63:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x14, #0, 70f\n"
"ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
+ "ld1 { v13.h }[4], [x26]\n"
"b 70f\n"
"64:" // Height 2: Partial accumulate: partial_2_8
"tbz x14, #1, 65f\n"
"ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
"tbz x14, #0, 70f\n"
"ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
+ "ld1 { v13.h }[2], [x26]\n"
"b 70f\n"
"65:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x14, #0, 70f\n"
"ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
"b 70f\n"
"66:" // Height 2: Partial accumulate: partial_4_0
"tbz x14, #2, 68f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"tbz x14, #1, 67f\n"
"ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
"tbz x14, #0, 70f\n"
"ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
+ "ld1 { v12.h }[6], [x26]\n"
"b 70f\n"
"67:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x14, #0, 70f\n"
"ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
+ "ld1 { v12.h }[4], [x26]\n"
"b 70f\n"
"68:" // Height 2: Partial accumulate: partial_2_0
"tbz x14, #1, 69f\n"
"ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
"tbz x14, #0, 70f\n"
"ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
+ "ld1 { v12.h }[2], [x26]\n"
"b 70f\n"
"69:" // Height 2: Partial accumulate: partial_1_0
"ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"70:" // Height 2: Partial accumulate: Done
"sub x13, x13, x20\n"
@@ -759,10 +761,10 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"b 73f\n"
"72:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -777,8 +779,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"74:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 75f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -809,15 +811,15 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "add x26, x26, #0x10\n"
"cmp x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"fmla v14.8h, v17.8h, v1.h[0]\n"
"ldr q17, [x12, #0x10]\n"
- "add x26, x26, #0x10\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
"fmla v15.8h, v16.8h, v1.h[0]\n"
"ldr q16, [x11, #0x10]\n"
- "add x25, x25, #0x10\n"
"fmla v8.8h, v17.8h, v0.h[1]\n"
"fmla v12.8h, v17.8h, v1.h[1]\n"
"ldr q17, [x10, #0x10]\n"
@@ -918,10 +920,10 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q16, [x9, #0x0]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"fmla v14.8h, v17.8h, v1.h[0]\n"
"ldr q17, [x12, #0x10]\n"
- "add x25, x25, #0x10\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
"fmla v15.8h, v16.8h, v1.h[0]\n"
"ldr q16, [x11, #0x10]\n"
@@ -1019,20 +1021,20 @@ void a64_ffhybrid_fp16_mla_6x32 (
"sub x27, x27, #0x1\n"
"ldr q17, [x12, #0x0]\n"
"ldr q16, [x11, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v8.8h, v17.8h, v1.h[0]\n"
"fmla v12.8h, v17.8h, v0.h[0]\n"
"ldr q17, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.8h, v16.8h, v1.h[0]\n"
"fmla v13.8h, v16.8h, v0.h[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"fmla v10.8h, v17.8h, v1.h[0]\n"
"fmla v14.8h, v17.8h, v0.h[0]\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
"fmla v11.8h, v16.8h, v1.h[0]\n"
"fmla v15.8h, v16.8h, v0.h[0]\n"
- "add x10, x10, #0x10\n"
- "add x9, x9, #0x10\n"
"cbnz x27, 80b\n"
"81:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -1040,11 +1042,11 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 74b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
"tbz %x[flags], #1, 82f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.8h }, [x21]\n"
"ld1r { v16.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v17.8h\n"
"fmin v9.8h, v9.8h, v17.8h\n"
@@ -1068,127 +1070,127 @@ void a64_ffhybrid_fp16_mla_6x32 (
"tbz x14, #4, 90f\n"
"st1 { v8.8h }, [x13], #0x10\n"
"st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
"tbz x14, #3, 86f\n"
"st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
"tbz x14, #2, 84f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d15, [x26], #0x8\n"
"tbz x14, #1, 83f\n"
"st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
+ "st1 { v15.h }[6], [x26]\n"
"b 98f\n"
"83:" // Height 2: Partial direct writeback: partial_1_28
"tbz x14, #0, 98f\n"
"st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
+ "st1 { v15.h }[4], [x26]\n"
"b 98f\n"
"84:" // Height 2: Partial direct writeback: partial_2_24
"tbz x14, #1, 85f\n"
"str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
+ "str s15, [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
+ "st1 { v15.h }[2], [x26]\n"
"b 98f\n"
"85:" // Height 2: Partial direct writeback: partial_1_24
"tbz x14, #0, 98f\n"
"str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
"b 98f\n"
"86:" // Height 2: Partial direct writeback: partial_4_16
"tbz x14, #2, 88f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d14, [x26], #0x8\n"
"tbz x14, #1, 87f\n"
"st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
+ "st1 { v14.h }[6], [x26]\n"
"b 98f\n"
"87:" // Height 2: Partial direct writeback: partial_1_20
"tbz x14, #0, 98f\n"
"st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
+ "st1 { v14.h }[4], [x26]\n"
"b 98f\n"
"88:" // Height 2: Partial direct writeback: partial_2_16
"tbz x14, #1, 89f\n"
"str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
+ "str s14, [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
+ "st1 { v14.h }[2], [x26]\n"
"b 98f\n"
"89:" // Height 2: Partial direct writeback: partial_1_16
"tbz x14, #0, 98f\n"
"str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
"b 98f\n"
"90:" // Height 2: Partial direct writeback: partial_8_0
"tbz x14, #3, 94f\n"
"st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
"tbz x14, #2, 92f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x14, #1, 91f\n"
"st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
+ "st1 { v13.h }[6], [x26]\n"
"b 98f\n"
"91:" // Height 2: Partial direct writeback: partial_1_12
"tbz x14, #0, 98f\n"
"st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
+ "st1 { v13.h }[4], [x26]\n"
"b 98f\n"
"92:" // Height 2: Partial direct writeback: partial_2_8
"tbz x14, #1, 93f\n"
"str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
+ "str s13, [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
+ "st1 { v13.h }[2], [x26]\n"
"b 98f\n"
"93:" // Height 2: Partial direct writeback: partial_1_8
"tbz x14, #0, 98f\n"
"str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
"b 98f\n"
"94:" // Height 2: Partial direct writeback: partial_4_0
"tbz x14, #2, 96f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x14, #1, 95f\n"
"st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
+ "st1 { v12.h }[6], [x26]\n"
"b 98f\n"
"95:" // Height 2: Partial direct writeback: partial_1_4
"tbz x14, #0, 98f\n"
"st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
+ "st1 { v12.h }[4], [x26]\n"
"b 98f\n"
"96:" // Height 2: Partial direct writeback: partial_2_0
"tbz x14, #1, 97f\n"
"str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
+ "str s12, [x26], #0x4\n"
"tbz x14, #0, 98f\n"
"st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
+ "st1 { v12.h }[2], [x26]\n"
"b 98f\n"
"97:" // Height 2: Partial direct writeback: partial_1_0
"str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
"98:" // Height 2: Partial direct writeback: Done
"b 100f\n"
"99:" // Height 2: Full writeback
@@ -1197,29 +1199,29 @@ void a64_ffhybrid_fp16_mla_6x32 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
"100:" // Height 2: Writeback done
"subs x14, x14, #0x20\n"
"bgt 52b\n"
"b 302f\n"
"101:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"102:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x18\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
"bgt 103f\n"
"cmp x14, #0x10\n"
"mov x9, x12\n"
@@ -1232,197 +1234,197 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cbz x15, 104f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 123f\n"
"104:" // Height 3: no bias
"tbz %x[flags], #0, 122f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
"cmp x14, #0x20\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"bge 121f\n"
"tbz x14, #4, 112f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
"ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
"tbz x14, #3, 108f\n"
"ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
"tbz x14, #2, 106f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x14, #1, 105f\n"
"ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
"b 120f\n"
"105:" // Height 3: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x14, #0, 120f\n"
"ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
"b 120f\n"
"106:" // Height 3: Partial accumulate: partial_2_24
"tbz x14, #1, 107f\n"
"ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
"b 120f\n"
"107:" // Height 3: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x14, #0, 120f\n"
"ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
"b 120f\n"
"108:" // Height 3: Partial accumulate: partial_4_16
"tbz x14, #2, 110f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x14, #1, 109f\n"
"ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
"b 120f\n"
"109:" // Height 3: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x14, #0, 120f\n"
"ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
"b 120f\n"
"110:" // Height 3: Partial accumulate: partial_2_16
"tbz x14, #1, 111f\n"
"ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
"b 120f\n"
"111:" // Height 3: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x14, #0, 120f\n"
"ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
"b 120f\n"
"112:" // Height 3: Partial accumulate: partial_8_0
"tbz x14, #3, 116f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
"tbz x14, #2, 114f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x14, #1, 113f\n"
"ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
"b 120f\n"
"113:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x14, #0, 120f\n"
"ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
"b 120f\n"
"114:" // Height 3: Partial accumulate: partial_2_8
"tbz x14, #1, 115f\n"
"ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
"b 120f\n"
"115:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x14, #0, 120f\n"
"ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
"b 120f\n"
"116:" // Height 3: Partial accumulate: partial_4_0
"tbz x14, #2, 118f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
"tbz x14, #1, 117f\n"
"ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
"b 120f\n"
"117:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x14, #0, 120f\n"
"ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
"b 120f\n"
"118:" // Height 3: Partial accumulate: partial_2_0
"tbz x14, #1, 119f\n"
"ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
"tbz x14, #0, 120f\n"
"ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
"b 120f\n"
"119:" // Height 3: Partial accumulate: partial_1_0
"ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
"120:" // Height 3: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 123f\n"
@@ -1431,14 +1433,14 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
"b 123f\n"
"122:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1457,8 +1459,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"124:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 125f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1489,18 +1491,18 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"ldr q21, [x10, #0x0]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"ldr q20, [x9, #0x0]\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v10.8h, v21.8h, v0.h[0]\n"
"fmla v14.8h, v21.8h, v1.h[0]\n"
- "add x24, x24, #0x10\n"
"fmla v18.8h, v21.8h, v2.h[0]\n"
"ldr q21, [x12, #0x10]\n"
"fmla v11.8h, v20.8h, v0.h[0]\n"
@@ -1599,8 +1601,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v14.8h, v21.8h, v1.h[6]\n"
"fmla v18.8h, v21.8h, v2.h[6]\n"
"ldr q21, [x12, #0x70]\n"
- "fmla v11.8h, v20.8h, v0.h[6]\n"
"add x12, x12, #0x80\n"
+ "fmla v11.8h, v20.8h, v0.h[6]\n"
"fmla v15.8h, v20.8h, v1.h[6]\n"
"fmla v19.8h, v20.8h, v2.h[6]\n"
"ldr q20, [x11, #0x70]\n"
@@ -1609,8 +1611,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v12.8h, v21.8h, v1.h[7]\n"
"fmla v16.8h, v21.8h, v2.h[7]\n"
"ldr q21, [x10, #0x70]\n"
- "fmla v9.8h, v20.8h, v0.h[7]\n"
"add x10, x10, #0x80\n"
+ "fmla v9.8h, v20.8h, v0.h[7]\n"
"fmla v13.8h, v20.8h, v1.h[7]\n"
"fmla v17.8h, v20.8h, v2.h[7]\n"
"ldr q20, [x9, #0x70]\n"
@@ -1740,8 +1742,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v14.8h, v21.8h, v1.h[6]\n"
"fmla v18.8h, v21.8h, v2.h[6]\n"
"ldr q21, [x12, #0x70]\n"
- "fmla v11.8h, v20.8h, v0.h[6]\n"
"add x12, x12, #0x80\n"
+ "fmla v11.8h, v20.8h, v0.h[6]\n"
"fmla v15.8h, v20.8h, v1.h[6]\n"
"fmla v19.8h, v20.8h, v2.h[6]\n"
"ldr q20, [x11, #0x70]\n"
@@ -1750,8 +1752,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v12.8h, v21.8h, v1.h[7]\n"
"fmla v16.8h, v21.8h, v2.h[7]\n"
"ldr q21, [x10, #0x70]\n"
- "fmla v9.8h, v20.8h, v0.h[7]\n"
"add x10, x10, #0x80\n"
+ "fmla v9.8h, v20.8h, v0.h[7]\n"
"fmla v13.8h, v20.8h, v1.h[7]\n"
"fmla v17.8h, v20.8h, v2.h[7]\n"
"ldr q20, [x9, #0x70]\n"
@@ -1770,23 +1772,23 @@ void a64_ffhybrid_fp16_mla_6x32 (
"sub x27, x27, #0x1\n"
"ldr h0, [x24], #0x2\n"
"ldr q21, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "ldr q20, [x11, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v8.8h, v21.8h, v2.h[0]\n"
"fmla v12.8h, v21.8h, v1.h[0]\n"
- "ldr q20, [x11, #0x0]\n"
"fmla v16.8h, v21.8h, v0.h[0]\n"
"ldr q21, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.8h, v20.8h, v2.h[0]\n"
"fmla v13.8h, v20.8h, v1.h[0]\n"
"fmla v17.8h, v20.8h, v0.h[0]\n"
"ldr q20, [x9, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v10.8h, v21.8h, v2.h[0]\n"
"fmla v14.8h, v21.8h, v1.h[0]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
"fmla v18.8h, v21.8h, v0.h[0]\n"
"fmla v11.8h, v20.8h, v2.h[0]\n"
- "add x9, x9, #0x10\n"
"fmla v15.8h, v20.8h, v1.h[0]\n"
"fmla v19.8h, v20.8h, v0.h[0]\n"
"cbnz x27, 130b\n"
@@ -1796,12 +1798,12 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 124b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"tbz %x[flags], #1, 132f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v21.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.8h }, [x21]\n"
"ld1r { v20.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v21.8h\n"
"fmin v9.8h, v9.8h, v21.8h\n"
@@ -1833,159 +1835,159 @@ void a64_ffhybrid_fp16_mla_6x32 (
"tbz x14, #4, 140f\n"
"st1 { v8.8h }, [x13], #0x10\n"
"st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
"tbz x14, #3, 136f\n"
"st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
"tbz x14, #2, 134f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x14, #1, 133f\n"
"st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
"b 148f\n"
"133:" // Height 3: Partial direct writeback: partial_1_28
"tbz x14, #0, 148f\n"
"st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
"b 148f\n"
"134:" // Height 3: Partial direct writeback: partial_2_24
"tbz x14, #1, 135f\n"
"str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
"b 148f\n"
"135:" // Height 3: Partial direct writeback: partial_1_24
"tbz x14, #0, 148f\n"
"str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
"b 148f\n"
"136:" // Height 3: Partial direct writeback: partial_4_16
"tbz x14, #2, 138f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x14, #1, 137f\n"
"st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
"b 148f\n"
"137:" // Height 3: Partial direct writeback: partial_1_20
"tbz x14, #0, 148f\n"
"st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
"b 148f\n"
"138:" // Height 3: Partial direct writeback: partial_2_16
"tbz x14, #1, 139f\n"
"str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
"b 148f\n"
"139:" // Height 3: Partial direct writeback: partial_1_16
"tbz x14, #0, 148f\n"
"str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
"b 148f\n"
"140:" // Height 3: Partial direct writeback: partial_8_0
"tbz x14, #3, 144f\n"
"st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
"tbz x14, #2, 142f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x14, #1, 141f\n"
"st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
"b 148f\n"
"141:" // Height 3: Partial direct writeback: partial_1_12
"tbz x14, #0, 148f\n"
"st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
"b 148f\n"
"142:" // Height 3: Partial direct writeback: partial_2_8
"tbz x14, #1, 143f\n"
"str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
"b 148f\n"
"143:" // Height 3: Partial direct writeback: partial_1_8
"tbz x14, #0, 148f\n"
"str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
"b 148f\n"
"144:" // Height 3: Partial direct writeback: partial_4_0
"tbz x14, #2, 146f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x14, #1, 145f\n"
"st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
"b 148f\n"
"145:" // Height 3: Partial direct writeback: partial_1_4
"tbz x14, #0, 148f\n"
"st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
"b 148f\n"
"146:" // Height 3: Partial direct writeback: partial_2_0
"tbz x14, #1, 147f\n"
"str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
"tbz x14, #0, 148f\n"
"st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
"b 148f\n"
"147:" // Height 3: Partial direct writeback: partial_1_0
"str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
"148:" // Height 3: Partial direct writeback: Done
"b 150f\n"
"149:" // Height 3: Full writeback
@@ -1994,33 +1996,33 @@ void a64_ffhybrid_fp16_mla_6x32 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"150:" // Height 3: Writeback done
"subs x14, x14, #0x20\n"
"bgt 102b\n"
"b 302f\n"
"151:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"152:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x18\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
"bgt 153f\n"
"cmp x14, #0x10\n"
"mov x9, x12\n"
@@ -2033,18 +2035,18 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cbz x15, 154f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -2052,215 +2054,215 @@ void a64_ffhybrid_fp16_mla_6x32 (
"154:" // Height 4: no bias
"tbz %x[flags], #0, 172f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"cmp x14, #0x20\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
+ "add x24, x25, x20, LSL #1\n"
"bge 171f\n"
"tbz x14, #4, 162f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
"ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
"tbz x14, #3, 158f\n"
"ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
"tbz x14, #2, 156f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x14, #1, 155f\n"
"ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
"b 170f\n"
"155:" // Height 4: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x14, #0, 170f\n"
"ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
"b 170f\n"
"156:" // Height 4: Partial accumulate: partial_2_24
"tbz x14, #1, 157f\n"
"ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
"b 170f\n"
"157:" // Height 4: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x14, #0, 170f\n"
"ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
"b 170f\n"
"158:" // Height 4: Partial accumulate: partial_4_16
"tbz x14, #2, 160f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x14, #1, 159f\n"
"ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
"b 170f\n"
"159:" // Height 4: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x14, #0, 170f\n"
"ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
"b 170f\n"
"160:" // Height 4: Partial accumulate: partial_2_16
"tbz x14, #1, 161f\n"
"ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
"b 170f\n"
"161:" // Height 4: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x14, #0, 170f\n"
"ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
"b 170f\n"
"162:" // Height 4: Partial accumulate: partial_8_0
"tbz x14, #3, 166f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
"tbz x14, #2, 164f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x14, #1, 163f\n"
"ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
"b 170f\n"
"163:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x14, #0, 170f\n"
"ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
"b 170f\n"
"164:" // Height 4: Partial accumulate: partial_2_8
"tbz x14, #1, 165f\n"
"ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
"b 170f\n"
"165:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x14, #0, 170f\n"
"ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
"b 170f\n"
"166:" // Height 4: Partial accumulate: partial_4_0
"tbz x14, #2, 168f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x14, #1, 167f\n"
"ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
"b 170f\n"
"167:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x14, #0, 170f\n"
"ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
"b 170f\n"
"168:" // Height 4: Partial accumulate: partial_2_0
"tbz x14, #1, 169f\n"
"ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
"tbz x14, #0, 170f\n"
"ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
"b 170f\n"
"169:" // Height 4: Partial accumulate: partial_1_0
"ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
"170:" // Height 4: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 173f\n"
@@ -2269,18 +2271,18 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"b 173f\n"
"172:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -2303,8 +2305,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"174:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 175f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2339,11 +2341,11 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
"ldr q25, [x10, #0x0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"add x25, x25, #0x10\n"
@@ -2689,16 +2691,16 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr h0, [x23], #0x2\n"
"ldr q25, [x12, #0x0]\n"
"ldr q24, [x11, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v8.8h, v25.8h, v3.h[0]\n"
"fmla v12.8h, v25.8h, v2.h[0]\n"
"fmla v16.8h, v25.8h, v1.h[0]\n"
"fmla v20.8h, v25.8h, v0.h[0]\n"
"ldr q25, [x10, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v9.8h, v24.8h, v3.h[0]\n"
"fmla v13.8h, v24.8h, v2.h[0]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
"fmla v17.8h, v24.8h, v1.h[0]\n"
"fmla v21.8h, v24.8h, v0.h[0]\n"
"ldr q24, [x9, #0x0]\n"
@@ -2718,13 +2720,13 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 174b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"tbz %x[flags], #1, 182f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v25.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v25.8h }, [x21]\n"
"ld1r { v24.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v25.8h\n"
"fmin v9.8h, v9.8h, v25.8h\n"
@@ -2764,191 +2766,191 @@ void a64_ffhybrid_fp16_mla_6x32 (
"tbz x14, #4, 190f\n"
"st1 { v8.8h }, [x13], #0x10\n"
"st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
"tbz x14, #3, 186f\n"
"st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
"tbz x14, #2, 184f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
"tbz x14, #1, 183f\n"
"st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
"b 198f\n"
"183:" // Height 4: Partial direct writeback: partial_1_28
"tbz x14, #0, 198f\n"
"st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
"b 198f\n"
"184:" // Height 4: Partial direct writeback: partial_2_24
"tbz x14, #1, 185f\n"
"str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
"b 198f\n"
"185:" // Height 4: Partial direct writeback: partial_1_24
"tbz x14, #0, 198f\n"
"str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
"b 198f\n"
"186:" // Height 4: Partial direct writeback: partial_4_16
"tbz x14, #2, 188f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
"tbz x14, #1, 187f\n"
"st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
"b 198f\n"
"187:" // Height 4: Partial direct writeback: partial_1_20
"tbz x14, #0, 198f\n"
"st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
"b 198f\n"
"188:" // Height 4: Partial direct writeback: partial_2_16
"tbz x14, #1, 189f\n"
"str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
"b 198f\n"
"189:" // Height 4: Partial direct writeback: partial_1_16
"tbz x14, #0, 198f\n"
"str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
"b 198f\n"
"190:" // Height 4: Partial direct writeback: partial_8_0
"tbz x14, #3, 194f\n"
"st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
"tbz x14, #2, 192f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
"tbz x14, #1, 191f\n"
"st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
"b 198f\n"
"191:" // Height 4: Partial direct writeback: partial_1_12
"tbz x14, #0, 198f\n"
"st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
"b 198f\n"
"192:" // Height 4: Partial direct writeback: partial_2_8
"tbz x14, #1, 193f\n"
"str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
"b 198f\n"
"193:" // Height 4: Partial direct writeback: partial_1_8
"tbz x14, #0, 198f\n"
"str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
"b 198f\n"
"194:" // Height 4: Partial direct writeback: partial_4_0
"tbz x14, #2, 196f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x14, #1, 195f\n"
"st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
"b 198f\n"
"195:" // Height 4: Partial direct writeback: partial_1_4
"tbz x14, #0, 198f\n"
"st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
"b 198f\n"
"196:" // Height 4: Partial direct writeback: partial_2_0
"tbz x14, #1, 197f\n"
"str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
"tbz x14, #0, 198f\n"
"st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
"b 198f\n"
"197:" // Height 4: Partial direct writeback: partial_1_0
"str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
"198:" // Height 4: Partial direct writeback: Done
"b 200f\n"
"199:" // Height 4: Full writeback
@@ -2957,37 +2959,37 @@ void a64_ffhybrid_fp16_mla_6x32 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
"200:" // Height 4: Writeback done
"subs x14, x14, #0x20\n"
"bgt 152b\n"
"b 302f\n"
"201:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"202:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x18\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
"bgt 203f\n"
"cmp x14, #0x10\n"
"mov x9, x12\n"
@@ -3000,18 +3002,18 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cbz x15, 204f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -3023,248 +3025,248 @@ void a64_ffhybrid_fp16_mla_6x32 (
"204:" // Height 5: no bias
"tbz %x[flags], #0, 222f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "cmp x14, #0x20\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "cmp x14, #0x20\n"
- "add x22, x23, x20, LSL #1\n"
"bge 221f\n"
"tbz x14, #4, 212f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
"ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
+ "ld1 { v25.8h }, [x23], #0x10\n"
"tbz x14, #3, 208f\n"
"ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
+ "ld1 { v26.8h }, [x23], #0x10\n"
"tbz x14, #2, 206f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x14, #1, 205f\n"
"ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
+ "ld1 { v27.h }[6], [x23]\n"
"b 220f\n"
"205:" // Height 5: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x14, #0, 220f\n"
"ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
+ "ld1 { v27.h }[4], [x23]\n"
"b 220f\n"
"206:" // Height 5: Partial accumulate: partial_2_24
"tbz x14, #1, 207f\n"
"ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
"b 220f\n"
"207:" // Height 5: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x14, #0, 220f\n"
"ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
"b 220f\n"
"208:" // Height 5: Partial accumulate: partial_4_16
"tbz x14, #2, 210f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x14, #1, 209f\n"
"ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
+ "ld1 { v26.s }[2], [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
+ "ld1 { v26.h }[6], [x23]\n"
"b 220f\n"
"209:" // Height 5: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x14, #0, 220f\n"
"ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
+ "ld1 { v26.h }[4], [x23]\n"
"b 220f\n"
"210:" // Height 5: Partial accumulate: partial_2_16
"tbz x14, #1, 211f\n"
"ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
+ "ld1 { v26.h }[2], [x23]\n"
"b 220f\n"
"211:" // Height 5: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x14, #0, 220f\n"
"ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
+ "ldr h26, [x23, #0x0]\n"
"b 220f\n"
"212:" // Height 5: Partial accumulate: partial_8_0
"tbz x14, #3, 216f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
"tbz x14, #2, 214f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x14, #1, 213f\n"
"ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
"b 220f\n"
"213:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x14, #0, 220f\n"
"ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
"b 220f\n"
"214:" // Height 5: Partial accumulate: partial_2_8
"tbz x14, #1, 215f\n"
"ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
"b 220f\n"
"215:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x14, #0, 220f\n"
"ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
"b 220f\n"
"216:" // Height 5: Partial accumulate: partial_4_0
"tbz x14, #2, 218f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x14, #1, 217f\n"
"ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
+ "ld1 { v24.h }[6], [x23]\n"
"b 220f\n"
"217:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x14, #0, 220f\n"
"ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
+ "ld1 { v24.h }[4], [x23]\n"
"b 220f\n"
"218:" // Height 5: Partial accumulate: partial_2_0
"tbz x14, #1, 219f\n"
"ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
"tbz x14, #0, 220f\n"
"ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
"b 220f\n"
"219:" // Height 5: Partial accumulate: partial_1_0
"ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
"220:" // Height 5: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 223f\n"
@@ -3273,22 +3275,22 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
"b 223f\n"
"222:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -3315,8 +3317,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"224:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 225f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -3355,10 +3357,10 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
"add x25, x25, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
"ldr q29, [x10, #0x0]\n"
@@ -3772,19 +3774,19 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr h1, [x23], #0x2\n"
"ldr h0, [x22], #0x2\n"
"ldr q29, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "ldr q28, [x11, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v8.8h, v29.8h, v4.h[0]\n"
"fmla v12.8h, v29.8h, v3.h[0]\n"
- "ldr q28, [x11, #0x0]\n"
"fmla v16.8h, v29.8h, v2.h[0]\n"
"fmla v20.8h, v29.8h, v1.h[0]\n"
- "add x12, x12, #0x10\n"
"fmla v24.8h, v29.8h, v0.h[0]\n"
"ldr q29, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.8h, v28.8h, v4.h[0]\n"
- "add x11, x11, #0x10\n"
"fmla v13.8h, v28.8h, v3.h[0]\n"
"fmla v17.8h, v28.8h, v2.h[0]\n"
- "add x10, x10, #0x10\n"
"fmla v21.8h, v28.8h, v1.h[0]\n"
"fmla v25.8h, v28.8h, v0.h[0]\n"
"ldr q28, [x9, #0x0]\n"
@@ -3806,14 +3808,14 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 224b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"tbz %x[flags], #1, 232f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v29.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v29.8h }, [x21]\n"
"ld1r { v28.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v29.8h\n"
"fmin v9.8h, v9.8h, v29.8h\n"
@@ -3861,223 +3863,223 @@ void a64_ffhybrid_fp16_mla_6x32 (
"tbz x14, #4, 240f\n"
"st1 { v8.8h }, [x13], #0x10\n"
"st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v25.8h }, [x23], #0x10\n"
"tbz x14, #3, 236f\n"
"st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
+ "st1 { v26.8h }, [x23], #0x10\n"
"tbz x14, #2, 234f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x14, #1, 233f\n"
"st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
+ "st1 { v27.s }[2], [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
+ "st1 { v27.h }[6], [x23]\n"
"b 248f\n"
"233:" // Height 5: Partial direct writeback: partial_1_28
"tbz x14, #0, 248f\n"
"st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
+ "st1 { v27.h }[4], [x23]\n"
"b 248f\n"
"234:" // Height 5: Partial direct writeback: partial_2_24
"tbz x14, #1, 235f\n"
"str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
+ "str s27, [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
+ "st1 { v27.h }[2], [x23]\n"
"b 248f\n"
"235:" // Height 5: Partial direct writeback: partial_1_24
"tbz x14, #0, 248f\n"
"str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
+ "str h27, [x23, #0x0]\n"
"b 248f\n"
"236:" // Height 5: Partial direct writeback: partial_4_16
"tbz x14, #2, 238f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x14, #1, 237f\n"
"st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
+ "st1 { v26.s }[2], [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
+ "st1 { v26.h }[6], [x23]\n"
"b 248f\n"
"237:" // Height 5: Partial direct writeback: partial_1_20
"tbz x14, #0, 248f\n"
"st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
+ "st1 { v26.h }[4], [x23]\n"
"b 248f\n"
"238:" // Height 5: Partial direct writeback: partial_2_16
"tbz x14, #1, 239f\n"
"str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
+ "str s26, [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
+ "st1 { v26.h }[2], [x23]\n"
"b 248f\n"
"239:" // Height 5: Partial direct writeback: partial_1_16
"tbz x14, #0, 248f\n"
"str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
+ "str h26, [x23, #0x0]\n"
"b 248f\n"
"240:" // Height 5: Partial direct writeback: partial_8_0
"tbz x14, #3, 244f\n"
"st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
"tbz x14, #2, 242f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x14, #1, 241f\n"
"st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v25.s }[2], [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
+ "st1 { v25.h }[6], [x23]\n"
"b 248f\n"
"241:" // Height 5: Partial direct writeback: partial_1_12
"tbz x14, #0, 248f\n"
"st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
+ "st1 { v25.h }[4], [x23]\n"
"b 248f\n"
"242:" // Height 5: Partial direct writeback: partial_2_8
"tbz x14, #1, 243f\n"
"str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
+ "str s25, [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
+ "st1 { v25.h }[2], [x23]\n"
"b 248f\n"
"243:" // Height 5: Partial direct writeback: partial_1_8
"tbz x14, #0, 248f\n"
"str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
+ "str h25, [x23, #0x0]\n"
"b 248f\n"
"244:" // Height 5: Partial direct writeback: partial_4_0
"tbz x14, #2, 246f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x14, #1, 245f\n"
"st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
+ "st1 { v24.h }[6], [x23]\n"
"b 248f\n"
"245:" // Height 5: Partial direct writeback: partial_1_4
"tbz x14, #0, 248f\n"
"st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
+ "st1 { v24.h }[4], [x23]\n"
"b 248f\n"
"246:" // Height 5: Partial direct writeback: partial_2_0
"tbz x14, #1, 247f\n"
"str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
"tbz x14, #0, 248f\n"
"st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
+ "st1 { v24.h }[2], [x23]\n"
"b 248f\n"
"247:" // Height 5: Partial direct writeback: partial_1_0
"str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
+ "str h24, [x23, #0x0]\n"
"248:" // Height 5: Partial direct writeback: Done
"b 250f\n"
"249:" // Height 5: Full writeback
@@ -4086,44 +4088,45 @@ void a64_ffhybrid_fp16_mla_6x32 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"250:" // Height 5: Writeback done
"subs x14, x14, #0x20\n"
"bgt 202b\n"
"b 302f\n"
"251:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0xc\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0xc\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"252:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x18\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x18\n"
"bgt 253f\n"
"cmp x14, #0x10\n"
"mov x9, x12\n"
@@ -4136,18 +4139,18 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cbz x15, 254f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -4163,281 +4166,281 @@ void a64_ffhybrid_fp16_mla_6x32 (
"254:" // Height 6: no bias
"tbz %x[flags], #0, 272f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "cmp x14, #0x20\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "cmp x14, #0x20\n"
- "add x21, x22, x20, LSL #1\n"
"bge 271f\n"
"tbz x14, #4, 262f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
+ "ld1 { v28.8h }, [x22], #0x10\n"
"ld1 { v9.8h }, [x13], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
- "ld1 { v29.8h }, [x21], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
+ "ld1 { v25.8h }, [x23], #0x10\n"
+ "ld1 { v29.8h }, [x22], #0x10\n"
"tbz x14, #3, 258f\n"
"ld1 { v10.8h }, [x13], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
- "ld1 { v30.8h }, [x21], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
+ "ld1 { v26.8h }, [x23], #0x10\n"
+ "ld1 { v30.8h }, [x22], #0x10\n"
"tbz x14, #2, 256f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x14, #1, 255f\n"
"ld1 { v11.s }[2], [x13], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
- "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
+ "ld1 { v31.s }[2], [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v11.h }[6], [x13]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
- "ld1 { v31.h }[6], [x21]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
+ "ld1 { v27.h }[6], [x23]\n"
+ "ld1 { v31.h }[6], [x22]\n"
"b 270f\n"
"255:" // Height 6: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x14, #0, 270f\n"
"ld1 { v11.h }[4], [x13]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
- "ld1 { v31.h }[4], [x21]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
+ "ld1 { v27.h }[4], [x23]\n"
+ "ld1 { v31.h }[4], [x22]\n"
"b 270f\n"
"256:" // Height 6: Partial accumulate: partial_2_24
"tbz x14, #1, 257f\n"
"ldr s11, [x13], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
- "ldr s31, [x21], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s31, [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v11.h }[2], [x13]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
- "ld1 { v31.h }[2], [x21]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
+ "ld1 { v31.h }[2], [x22]\n"
"b 270f\n"
"257:" // Height 6: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x14, #0, 270f\n"
"ldr h11, [x13, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
- "ldr h31, [x21, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
+ "ldr h31, [x22, #0x0]\n"
"b 270f\n"
"258:" // Height 6: Partial accumulate: partial_4_16
"tbz x14, #2, 260f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x14, #1, 259f\n"
"ld1 { v10.s }[2], [x13], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
+ "ld1 { v26.s }[2], [x23], #0x4\n"
+ "ld1 { v30.s }[2], [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v10.h }[6], [x13]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
- "ld1 { v30.h }[6], [x21]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
+ "ld1 { v26.h }[6], [x23]\n"
+ "ld1 { v30.h }[6], [x22]\n"
"b 270f\n"
"259:" // Height 6: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x14, #0, 270f\n"
"ld1 { v10.h }[4], [x13]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
- "ld1 { v30.h }[4], [x21]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
+ "ld1 { v26.h }[4], [x23]\n"
+ "ld1 { v30.h }[4], [x22]\n"
"b 270f\n"
"260:" // Height 6: Partial accumulate: partial_2_16
"tbz x14, #1, 261f\n"
"ldr s10, [x13], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
+ "ldr s30, [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v10.h }[2], [x13]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
- "ld1 { v30.h }[2], [x21]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
+ "ld1 { v26.h }[2], [x23]\n"
+ "ld1 { v30.h }[2], [x22]\n"
"b 270f\n"
"261:" // Height 6: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x14, #0, 270f\n"
"ldr h10, [x13, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
- "ldr h30, [x21, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
+ "ldr h26, [x23, #0x0]\n"
+ "ldr h30, [x22, #0x0]\n"
"b 270f\n"
"262:" // Height 6: Partial accumulate: partial_8_0
"tbz x14, #3, 266f\n"
"ld1 { v8.8h }, [x13], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
+ "ld1 { v28.8h }, [x22], #0x10\n"
"tbz x14, #2, 264f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x14, #1, 263f\n"
"ld1 { v9.s }[2], [x13], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
- "ld1 { v29.s }[2], [x21], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
+ "ld1 { v29.s }[2], [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v9.h }[6], [x13]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
- "ld1 { v29.h }[6], [x21]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
+ "ld1 { v29.h }[6], [x22]\n"
"b 270f\n"
"263:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x14, #0, 270f\n"
"ld1 { v9.h }[4], [x13]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
- "ld1 { v29.h }[4], [x21]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
+ "ld1 { v29.h }[4], [x22]\n"
"b 270f\n"
"264:" // Height 6: Partial accumulate: partial_2_8
"tbz x14, #1, 265f\n"
"ldr s9, [x13], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "ldr s29, [x21], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s29, [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v9.h }[2], [x13]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
- "ld1 { v29.h }[2], [x21]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
+ "ld1 { v29.h }[2], [x22]\n"
"b 270f\n"
"265:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x14, #0, 270f\n"
"ldr h9, [x13, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
- "ldr h29, [x21, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
+ "ldr h29, [x22, #0x0]\n"
"b 270f\n"
"266:" // Height 6: Partial accumulate: partial_4_0
"tbz x14, #2, 268f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x14, #1, 267f\n"
"ld1 { v8.s }[2], [x13], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
+ "ld1 { v28.s }[2], [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v8.h }[6], [x13]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
- "ld1 { v28.h }[6], [x21]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
+ "ld1 { v24.h }[6], [x23]\n"
+ "ld1 { v28.h }[6], [x22]\n"
"b 270f\n"
"267:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x14, #0, 270f\n"
"ld1 { v8.h }[4], [x13]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
- "ld1 { v28.h }[4], [x21]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
+ "ld1 { v24.h }[4], [x23]\n"
+ "ld1 { v28.h }[4], [x22]\n"
"b 270f\n"
"268:" // Height 6: Partial accumulate: partial_2_0
"tbz x14, #1, 269f\n"
"ldr s8, [x13], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s28, [x22], #0x4\n"
"tbz x14, #0, 270f\n"
"ld1 { v8.h }[2], [x13]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
- "ld1 { v28.h }[2], [x21]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
+ "ld1 { v28.h }[2], [x22]\n"
"b 270f\n"
"269:" // Height 6: Partial accumulate: partial_1_0
"ldr h8, [x13, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
- "ldr h28, [x21, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
+ "ldr h28, [x22, #0x0]\n"
"270:" // Height 6: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 273f\n"
@@ -4446,26 +4449,26 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"b 273f\n"
"272:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -4496,8 +4499,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"274:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 275f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -4540,10 +4543,10 @@ void a64_ffhybrid_fp16_mla_6x32 (
"fmla v8.8h, v6.8h, v0.h[0]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"sub x27, x27, #0x8\n"
- "cmp x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
"add x25, x25, #0x10\n"
"fmla v24.8h, v6.8h, v4.h[0]\n"
"fmla v28.8h, v6.8h, v5.h[0]\n"
@@ -5026,12 +5029,12 @@ void a64_ffhybrid_fp16_mla_6x32 (
"ldr h2, [x21], #0x2\n"
"ldr q1, [x12, #0x0]\n"
"ldr q0, [x11, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v8.8h, v1.8h, v7.h[0]\n"
"fmla v12.8h, v1.8h, v6.h[0]\n"
"fmla v16.8h, v1.8h, v5.h[0]\n"
"fmla v20.8h, v1.8h, v4.h[0]\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
"fmla v24.8h, v1.8h, v3.h[0]\n"
"fmla v28.8h, v1.8h, v2.h[0]\n"
"ldr q1, [x10, #0x0]\n"
@@ -5063,15 +5066,15 @@ void a64_ffhybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 274b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"tbz %x[flags], #1, 282f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x21]\n"
"ld1r { v0.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
@@ -5127,255 +5130,255 @@ void a64_ffhybrid_fp16_mla_6x32 (
"tbz x14, #4, 290f\n"
"st1 { v8.8h }, [x13], #0x10\n"
"st1 { v9.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
- "st1 { v29.8h }, [x21], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v25.8h }, [x23], #0x10\n"
+ "st1 { v28.8h }, [x22], #0x10\n"
+ "st1 { v29.8h }, [x22], #0x10\n"
"tbz x14, #3, 286f\n"
"st1 { v10.8h }, [x13], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
- "st1 { v30.8h }, [x21], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
+ "st1 { v26.8h }, [x23], #0x10\n"
+ "st1 { v30.8h }, [x22], #0x10\n"
"tbz x14, #2, 284f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x14, #1, 283f\n"
"st1 { v11.s }[2], [x13], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
- "st1 { v31.s }[2], [x21], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
+ "st1 { v27.s }[2], [x23], #0x4\n"
+ "st1 { v31.s }[2], [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v11.h }[6], [x13]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
- "st1 { v31.h }[6], [x21]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
+ "st1 { v27.h }[6], [x23]\n"
+ "st1 { v31.h }[6], [x22]\n"
"b 298f\n"
"283:" // Height 6: Partial direct writeback: partial_1_28
"tbz x14, #0, 298f\n"
"st1 { v11.h }[4], [x13]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
- "st1 { v31.h }[4], [x21]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
+ "st1 { v27.h }[4], [x23]\n"
+ "st1 { v31.h }[4], [x22]\n"
"b 298f\n"
"284:" // Height 6: Partial direct writeback: partial_2_24
"tbz x14, #1, 285f\n"
"str s11, [x13], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
- "str s31, [x21], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
+ "str s27, [x23], #0x4\n"
+ "str s31, [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v11.h }[2], [x13]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
- "st1 { v31.h }[2], [x21]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
+ "st1 { v27.h }[2], [x23]\n"
+ "st1 { v31.h }[2], [x22]\n"
"b 298f\n"
"285:" // Height 6: Partial direct writeback: partial_1_24
"tbz x14, #0, 298f\n"
"str h11, [x13, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
- "str h31, [x21, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
+ "str h27, [x23, #0x0]\n"
+ "str h31, [x22, #0x0]\n"
"b 298f\n"
"286:" // Height 6: Partial direct writeback: partial_4_16
"tbz x14, #2, 288f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x14, #1, 287f\n"
"st1 { v10.s }[2], [x13], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
- "st1 { v30.s }[2], [x21], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
+ "st1 { v26.s }[2], [x23], #0x4\n"
+ "st1 { v30.s }[2], [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v10.h }[6], [x13]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
- "st1 { v30.h }[6], [x21]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
+ "st1 { v26.h }[6], [x23]\n"
+ "st1 { v30.h }[6], [x22]\n"
"b 298f\n"
"287:" // Height 6: Partial direct writeback: partial_1_20
"tbz x14, #0, 298f\n"
"st1 { v10.h }[4], [x13]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
- "st1 { v30.h }[4], [x21]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
+ "st1 { v26.h }[4], [x23]\n"
+ "st1 { v30.h }[4], [x22]\n"
"b 298f\n"
"288:" // Height 6: Partial direct writeback: partial_2_16
"tbz x14, #1, 289f\n"
"str s10, [x13], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
- "str s30, [x21], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
+ "str s26, [x23], #0x4\n"
+ "str s30, [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v10.h }[2], [x13]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
- "st1 { v30.h }[2], [x21]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
+ "st1 { v26.h }[2], [x23]\n"
+ "st1 { v30.h }[2], [x22]\n"
"b 298f\n"
"289:" // Height 6: Partial direct writeback: partial_1_16
"tbz x14, #0, 298f\n"
"str h10, [x13, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
- "str h30, [x21, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
+ "str h26, [x23, #0x0]\n"
+ "str h30, [x22, #0x0]\n"
"b 298f\n"
"290:" // Height 6: Partial direct writeback: partial_8_0
"tbz x14, #3, 294f\n"
"st1 { v8.8h }, [x13], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v28.8h }, [x22], #0x10\n"
"tbz x14, #2, 292f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x14, #1, 291f\n"
"st1 { v9.s }[2], [x13], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
- "st1 { v29.s }[2], [x21], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v25.s }[2], [x23], #0x4\n"
+ "st1 { v29.s }[2], [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v9.h }[6], [x13]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
- "st1 { v29.h }[6], [x21]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
+ "st1 { v25.h }[6], [x23]\n"
+ "st1 { v29.h }[6], [x22]\n"
"b 298f\n"
"291:" // Height 6: Partial direct writeback: partial_1_12
"tbz x14, #0, 298f\n"
"st1 { v9.h }[4], [x13]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
- "st1 { v29.h }[4], [x21]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
+ "st1 { v25.h }[4], [x23]\n"
+ "st1 { v29.h }[4], [x22]\n"
"b 298f\n"
"292:" // Height 6: Partial direct writeback: partial_2_8
"tbz x14, #1, 293f\n"
"str s9, [x13], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
- "str s29, [x21], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
+ "str s25, [x23], #0x4\n"
+ "str s29, [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v9.h }[2], [x13]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
- "st1 { v29.h }[2], [x21]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
+ "st1 { v25.h }[2], [x23]\n"
+ "st1 { v29.h }[2], [x22]\n"
"b 298f\n"
"293:" // Height 6: Partial direct writeback: partial_1_8
"tbz x14, #0, 298f\n"
"str h9, [x13, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
- "str h29, [x21, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
+ "str h25, [x23, #0x0]\n"
+ "str h29, [x22, #0x0]\n"
"b 298f\n"
"294:" // Height 6: Partial direct writeback: partial_4_0
"tbz x14, #2, 296f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x14, #1, 295f\n"
"st1 { v8.s }[2], [x13], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v8.h }[6], [x13]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
- "st1 { v28.h }[6], [x21]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
+ "st1 { v24.h }[6], [x23]\n"
+ "st1 { v28.h }[6], [x22]\n"
"b 298f\n"
"295:" // Height 6: Partial direct writeback: partial_1_4
"tbz x14, #0, 298f\n"
"st1 { v8.h }[4], [x13]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
- "st1 { v28.h }[4], [x21]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
+ "st1 { v24.h }[4], [x23]\n"
+ "st1 { v28.h }[4], [x22]\n"
"b 298f\n"
"296:" // Height 6: Partial direct writeback: partial_2_0
"tbz x14, #1, 297f\n"
"str s8, [x13], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
"tbz x14, #0, 298f\n"
"st1 { v8.h }[2], [x13]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
- "st1 { v28.h }[2], [x21]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
+ "st1 { v24.h }[2], [x23]\n"
+ "st1 { v28.h }[2], [x22]\n"
"b 298f\n"
"297:" // Height 6: Partial direct writeback: partial_1_0
"str h8, [x13, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
- "str h28, [x21, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
+ "str h24, [x23, #0x0]\n"
+ "str h28, [x22, #0x0]\n"
"298:" // Height 6: Partial direct writeback: Done
"b 300f\n"
"299:" // Height 6: Full writeback
@@ -5384,26 +5387,26 @@ void a64_ffhybrid_fp16_mla_6x32 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q28, [x22, #0x0]\n"
+ "str q29, [x22, #0x10]\n"
+ "str q30, [x22, #0x20]\n"
+ "str q31, [x22, #0x30]\n"
"300:" // Height 6: Writeback done
"subs x14, x14, #0x20\n"
"bgt 252b\n"
@@ -5419,8 +5422,8 @@ void a64_ffhybrid_fp16_mla_6x32 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"302:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp
index 94fb84e409..658850e12c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -81,7 +81,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 1> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 1> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
index b1cd6dc970..5dcaa9e5d5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32_mla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,18 +49,19 @@ void a64_ffhybrid_fp32_mla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -81,6 +82,7 @@ void a64_ffhybrid_fp32_mla_6x16 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -105,19 +107,19 @@ void a64_ffhybrid_fp32_mla_6x16 (
"bgt 69f\n"
"beq 35f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 3f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -208,8 +210,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"16:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -233,6 +235,9 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q17, [x10, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "sub x27, x27, #0x4\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x8\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"ldr q17, [x12, #0x10]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
@@ -251,30 +256,29 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q16, [x9, #0x20]\n"
"fmla v10.4s, v17.4s, v0.s[2]\n"
"ldr q17, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
"fmla v11.4s, v16.4s, v0.s[2]\n"
"ldr q16, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v8.4s, v17.4s, v0.s[3]\n"
"ldr q17, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr q16, [x9, #0x30]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "add x9, x9, #0x40\n"
"fmla v10.4s, v17.4s, v0.s[3]\n"
+ "ldr q6, [x12, #0x0]\n"
"fmla v11.4s, v16.4s, v0.s[3]\n"
- "add x26, x26, #0x10\n"
"ldr q0, [x26, #0x0]\n"
- "add x12, x12, #0x40\n"
- "ldr q6, [x12, #0x0]\n"
- "add x11, x11, #0x40\n"
"ldr q7, [x11, #0x0]\n"
- "add x10, x10, #0x40\n"
- "add x9, x9, #0x40\n"
"bge 19b\n"
"20:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q17, [x10, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "sub x27, x27, #0x4\n"
+ "add x26, x26, #0x10\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"ldr q17, [x12, #0x10]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
@@ -293,37 +297,35 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q16, [x9, #0x20]\n"
"fmla v10.4s, v17.4s, v0.s[2]\n"
"ldr q17, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
"fmla v11.4s, v16.4s, v0.s[2]\n"
"ldr q16, [x11, #0x30]\n"
+ "add x11, x11, #0x40\n"
"fmla v8.4s, v17.4s, v0.s[3]\n"
"ldr q17, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr q16, [x9, #0x30]\n"
- "sub x27, x27, #0x4\n"
+ "add x9, x9, #0x40\n"
"fmla v10.4s, v17.4s, v0.s[3]\n"
"fmla v11.4s, v16.4s, v0.s[3]\n"
- "add x26, x26, #0x10\n"
- "add x12, x12, #0x40\n"
- "add x11, x11, #0x40\n"
- "add x10, x10, #0x40\n"
- "add x9, x9, #0x40\n"
"21:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 23f\n"
"22:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x26], #0x4\n"
- "ldr q16, [x12, #0x0]\n"
- "fmla v8.4s, v16.4s, v18.s[0]\n"
+ "ldr q17, [x12, #0x0]\n"
"sub x27, x27, #0x1\n"
- "ldr q17, [x11, #0x0]\n"
- "ldr q16, [x10, #0x0]\n"
- "fmla v9.4s, v17.4s, v18.s[0]\n"
- "fmla v10.4s, v16.4s, v18.s[0]\n"
- "ldr q16, [x9, #0x0]\n"
- "fmla v11.4s, v16.4s, v18.s[0]\n"
"add x12, x12, #0x10\n"
+ "ldr q16, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
+ "fmla v8.4s, v17.4s, v18.s[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"add x10, x10, #0x10\n"
+ "fmla v9.4s, v16.4s, v18.s[0]\n"
+ "ldr q16, [x9, #0x0]\n"
"add x9, x9, #0x10\n"
+ "fmla v10.4s, v17.4s, v18.s[0]\n"
+ "fmla v11.4s, v16.4s, v18.s[0]\n"
"cbnz x27, 22b\n"
"23:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -331,9 +333,9 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 16b\n"
"tbz %x[flags], #1, 24f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v17.4s\n"
"fmin v9.4s, v9.4s, v17.4s\n"
@@ -404,19 +406,19 @@ void a64_ffhybrid_fp32_mla_6x16 (
"b 206f\n"
"35:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"36:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 37f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -429,87 +431,87 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cbz x15, 38f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x15, x15, #0x40\n"
"b 49f\n"
"38:" // Height 2: no bias
"tbz %x[flags], #0, 48f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x14, #0x10\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
"bge 47f\n"
"tbz x14, #3, 42f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x14, #2, 40f\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
"tbz x14, #1, 39f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
"tbz x14, #0, 46f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v15.s }[2], [x26]\n"
"b 46f\n"
"39:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 46f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
"b 46f\n"
"40:" // Height 2: Partial accumulate: partial_2_8
"tbz x14, #1, 41f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
"tbz x14, #0, 46f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v14.s }[2], [x26]\n"
"b 46f\n"
"41:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 46f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
"b 46f\n"
"42:" // Height 2: Partial accumulate: partial_4_0
"tbz x14, #2, 44f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x14, #1, 43f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
"tbz x14, #0, 46f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 46f\n"
"43:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 46f\n"
"ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 46f\n"
"44:" // Height 2: Partial accumulate: partial_2_0
"tbz x14, #1, 45f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
"tbz x14, #0, 46f\n"
"ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 46f\n"
"45:" // Height 2: Partial accumulate: partial_1_0
"ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"46:" // Height 2: Partial accumulate: Done
"sub x13, x13, x20\n"
@@ -519,10 +521,10 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"b 49f\n"
"48:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -537,8 +539,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"50:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 51f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -569,15 +571,15 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "add x26, x26, #0x10\n"
"cmp x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"fmla v14.4s, v17.4s, v1.s[0]\n"
"ldr q17, [x12, #0x10]\n"
- "add x26, x26, #0x10\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
"fmla v15.4s, v16.4s, v1.s[0]\n"
"ldr q16, [x11, #0x10]\n"
- "add x25, x25, #0x10\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"fmla v12.4s, v17.4s, v1.s[1]\n"
"ldr q17, [x10, #0x10]\n"
@@ -630,10 +632,10 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q16, [x9, #0x0]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"fmla v14.4s, v17.4s, v1.s[0]\n"
"ldr q17, [x12, #0x10]\n"
- "add x25, x25, #0x10\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
"fmla v15.4s, v16.4s, v1.s[0]\n"
"ldr q16, [x11, #0x10]\n"
@@ -683,20 +685,20 @@ void a64_ffhybrid_fp32_mla_6x16 (
"sub x27, x27, #0x1\n"
"ldr q17, [x12, #0x0]\n"
"ldr q16, [x11, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v8.4s, v17.4s, v19.s[0]\n"
"fmla v12.4s, v17.4s, v18.s[0]\n"
"ldr q17, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.4s, v16.4s, v19.s[0]\n"
"fmla v13.4s, v16.4s, v18.s[0]\n"
"ldr q16, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"fmla v10.4s, v17.4s, v19.s[0]\n"
"fmla v14.4s, v17.4s, v18.s[0]\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
"fmla v11.4s, v16.4s, v19.s[0]\n"
"fmla v15.4s, v16.4s, v18.s[0]\n"
- "add x10, x10, #0x10\n"
- "add x9, x9, #0x10\n"
"cbnz x27, 56b\n"
"57:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -704,11 +706,11 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 50b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
"tbz %x[flags], #1, 58f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v17.4s\n"
"fmin v9.4s, v9.4s, v17.4s\n"
@@ -732,63 +734,63 @@ void a64_ffhybrid_fp32_mla_6x16 (
"tbz x14, #3, 62f\n"
"st1 { v8.4s }, [x13], #0x10\n"
"st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
"tbz x14, #2, 60f\n"
"st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
"tbz x14, #1, 59f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d15, [x26], #0x8\n"
"tbz x14, #0, 66f\n"
"st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
+ "st1 { v15.s }[2], [x26]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
"tbz x14, #0, 66f\n"
"str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
"tbz x14, #1, 61f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d14, [x26], #0x8\n"
"tbz x14, #0, 66f\n"
"st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
+ "st1 { v14.s }[2], [x26]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
"tbz x14, #0, 66f\n"
"str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
"tbz x14, #2, 64f\n"
"st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
"tbz x14, #1, 63f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x14, #0, 66f\n"
"st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
+ "st1 { v13.s }[2], [x26]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
"tbz x14, #0, 66f\n"
"str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
"tbz x14, #1, 65f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x14, #0, 66f\n"
"st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
+ "st1 { v12.s }[2], [x26]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
"str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
@@ -797,29 +799,29 @@ void a64_ffhybrid_fp32_mla_6x16 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
"68:" // Height 2: Writeback done
"subs x14, x14, #0x10\n"
"bgt 36b\n"
"b 206f\n"
"69:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"70:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 71f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -832,109 +834,109 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cbz x15, 72f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 83f\n"
"72:" // Height 3: no bias
"tbz %x[flags], #0, 82f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
"cmp x14, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 81f\n"
"tbz x14, #3, 76f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"tbz x14, #2, 74f\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
"tbz x14, #1, 73f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x14, #0, 80f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
"b 80f\n"
"73:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 80f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
"b 80f\n"
"74:" // Height 3: Partial accumulate: partial_2_8
"tbz x14, #1, 75f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x14, #0, 80f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
"b 80f\n"
"75:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 80f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
"b 80f\n"
"76:" // Height 3: Partial accumulate: partial_4_0
"tbz x14, #2, 78f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"tbz x14, #1, 77f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x14, #0, 80f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
"b 80f\n"
"77:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 80f\n"
"ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
"b 80f\n"
"78:" // Height 3: Partial accumulate: partial_2_0
"tbz x14, #1, 79f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
"tbz x14, #0, 80f\n"
"ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
"b 80f\n"
"79:" // Height 3: Partial accumulate: partial_1_0
"ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
"80:" // Height 3: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 83f\n"
@@ -943,14 +945,14 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
"b 83f\n"
"82:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -969,8 +971,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"84:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 85f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1001,18 +1003,18 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"ldr q21, [x10, #0x0]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x8\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"ldr q20, [x9, #0x0]\n"
"add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"fmla v10.4s, v21.4s, v0.s[0]\n"
"fmla v14.4s, v21.4s, v1.s[0]\n"
- "add x24, x24, #0x10\n"
"fmla v18.4s, v21.4s, v2.s[0]\n"
"ldr q21, [x12, #0x10]\n"
"fmla v11.4s, v20.4s, v0.s[0]\n"
@@ -1047,8 +1049,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v14.4s, v21.4s, v1.s[2]\n"
"fmla v18.4s, v21.4s, v2.s[2]\n"
"ldr q21, [x12, #0x30]\n"
- "fmla v11.4s, v20.4s, v0.s[2]\n"
"add x12, x12, #0x40\n"
+ "fmla v11.4s, v20.4s, v0.s[2]\n"
"fmla v15.4s, v20.4s, v1.s[2]\n"
"fmla v19.4s, v20.4s, v2.s[2]\n"
"ldr q20, [x11, #0x30]\n"
@@ -1057,8 +1059,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v12.4s, v21.4s, v1.s[3]\n"
"fmla v16.4s, v21.4s, v2.s[3]\n"
"ldr q21, [x10, #0x30]\n"
- "fmla v9.4s, v20.4s, v0.s[3]\n"
"add x10, x10, #0x40\n"
+ "fmla v9.4s, v20.4s, v0.s[3]\n"
"fmla v13.4s, v20.4s, v1.s[3]\n"
"fmla v17.4s, v20.4s, v2.s[3]\n"
"ldr q20, [x9, #0x30]\n"
@@ -1124,8 +1126,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v14.4s, v21.4s, v1.s[2]\n"
"fmla v18.4s, v21.4s, v2.s[2]\n"
"ldr q21, [x12, #0x30]\n"
- "fmla v11.4s, v20.4s, v0.s[2]\n"
"add x12, x12, #0x40\n"
+ "fmla v11.4s, v20.4s, v0.s[2]\n"
"fmla v15.4s, v20.4s, v1.s[2]\n"
"fmla v19.4s, v20.4s, v2.s[2]\n"
"ldr q20, [x11, #0x30]\n"
@@ -1134,8 +1136,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v12.4s, v21.4s, v1.s[3]\n"
"fmla v16.4s, v21.4s, v2.s[3]\n"
"ldr q21, [x10, #0x30]\n"
- "fmla v9.4s, v20.4s, v0.s[3]\n"
"add x10, x10, #0x40\n"
+ "fmla v9.4s, v20.4s, v0.s[3]\n"
"fmla v13.4s, v20.4s, v1.s[3]\n"
"fmla v17.4s, v20.4s, v2.s[3]\n"
"ldr q20, [x9, #0x30]\n"
@@ -1154,23 +1156,23 @@ void a64_ffhybrid_fp32_mla_6x16 (
"sub x27, x27, #0x1\n"
"ldr s22, [x24], #0x4\n"
"ldr q21, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "ldr q20, [x11, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v8.4s, v21.4s, v24.s[0]\n"
"fmla v12.4s, v21.4s, v23.s[0]\n"
- "ldr q20, [x11, #0x0]\n"
"fmla v16.4s, v21.4s, v22.s[0]\n"
"ldr q21, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.4s, v20.4s, v24.s[0]\n"
"fmla v13.4s, v20.4s, v23.s[0]\n"
"fmla v17.4s, v20.4s, v22.s[0]\n"
"ldr q20, [x9, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v10.4s, v21.4s, v24.s[0]\n"
"fmla v14.4s, v21.4s, v23.s[0]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
"fmla v18.4s, v21.4s, v22.s[0]\n"
"fmla v11.4s, v20.4s, v24.s[0]\n"
- "add x9, x9, #0x10\n"
"fmla v15.4s, v20.4s, v23.s[0]\n"
"fmla v19.4s, v20.4s, v22.s[0]\n"
"cbnz x27, 90b\n"
@@ -1180,12 +1182,12 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 84b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 92f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v21.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v21.4s\n"
"fmin v9.4s, v9.4s, v21.4s\n"
@@ -1217,79 +1219,79 @@ void a64_ffhybrid_fp32_mla_6x16 (
"tbz x14, #3, 96f\n"
"st1 { v8.4s }, [x13], #0x10\n"
"st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
"tbz x14, #2, 94f\n"
"st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
"tbz x14, #1, 93f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x14, #0, 100f\n"
"st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
"tbz x14, #0, 100f\n"
"str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
"tbz x14, #1, 95f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x14, #0, 100f\n"
"st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
"tbz x14, #0, 100f\n"
"str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
"tbz x14, #2, 98f\n"
"st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
"tbz x14, #1, 97f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x14, #0, 100f\n"
"st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
"tbz x14, #0, 100f\n"
"str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
"tbz x14, #1, 99f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x14, #0, 100f\n"
"st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
"str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
@@ -1298,33 +1300,33 @@ void a64_ffhybrid_fp32_mla_6x16 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"102:" // Height 3: Writeback done
"subs x14, x14, #0x10\n"
"bgt 70b\n"
"b 206f\n"
"103:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"104:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 105f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -1337,18 +1339,18 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cbz x15, 106f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1356,111 +1358,111 @@ void a64_ffhybrid_fp32_mla_6x16 (
"106:" // Height 4: no bias
"tbz %x[flags], #0, 116f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"cmp x14, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 115f\n"
"tbz x14, #3, 110f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
"tbz x14, #2, 108f\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
"tbz x14, #1, 107f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x14, #0, 114f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
"b 114f\n"
"107:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 114f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
"b 114f\n"
"108:" // Height 4: Partial accumulate: partial_2_8
"tbz x14, #1, 109f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x14, #0, 114f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
"b 114f\n"
"109:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 114f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
"b 114f\n"
"110:" // Height 4: Partial accumulate: partial_4_0
"tbz x14, #2, 112f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"tbz x14, #1, 111f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x14, #0, 114f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
"b 114f\n"
"111:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 114f\n"
"ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
"b 114f\n"
"112:" // Height 4: Partial accumulate: partial_2_0
"tbz x14, #1, 113f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x14, #0, 114f\n"
"ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
"b 114f\n"
"113:" // Height 4: Partial accumulate: partial_1_0
"ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
"114:" // Height 4: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 117f\n"
@@ -1469,18 +1471,18 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"b 117f\n"
"116:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1503,8 +1505,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"118:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 119f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1539,11 +1541,11 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
"ldr q25, [x10, #0x0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x8\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"add x25, x25, #0x10\n"
@@ -1729,16 +1731,16 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr s26, [x23], #0x4\n"
"ldr q25, [x12, #0x0]\n"
"ldr q24, [x11, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v8.4s, v25.4s, v29.s[0]\n"
"fmla v12.4s, v25.4s, v28.s[0]\n"
"fmla v16.4s, v25.4s, v27.s[0]\n"
"fmla v20.4s, v25.4s, v26.s[0]\n"
"ldr q25, [x10, #0x0]\n"
- "add x12, x12, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v9.4s, v24.4s, v29.s[0]\n"
"fmla v13.4s, v24.4s, v28.s[0]\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
"fmla v17.4s, v24.4s, v27.s[0]\n"
"fmla v21.4s, v24.4s, v26.s[0]\n"
"ldr q24, [x9, #0x0]\n"
@@ -1758,13 +1760,13 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 118b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 126f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v25.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v25.4s }, [x21]\n"
"ld1r { v24.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v25.4s\n"
"fmin v9.4s, v9.4s, v25.4s\n"
@@ -1804,95 +1806,95 @@ void a64_ffhybrid_fp32_mla_6x16 (
"tbz x14, #3, 130f\n"
"st1 { v8.4s }, [x13], #0x10\n"
"st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
"tbz x14, #2, 128f\n"
"st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
"tbz x14, #1, 127f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
"tbz x14, #0, 134f\n"
"st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
"tbz x14, #0, 134f\n"
"str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
"tbz x14, #1, 129f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
"tbz x14, #0, 134f\n"
"st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
"tbz x14, #0, 134f\n"
"str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
"tbz x14, #2, 132f\n"
"st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
"tbz x14, #1, 131f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
"tbz x14, #0, 134f\n"
"st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
"tbz x14, #0, 134f\n"
"str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
"tbz x14, #1, 133f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x14, #0, 134f\n"
"st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
"str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
@@ -1901,37 +1903,37 @@ void a64_ffhybrid_fp32_mla_6x16 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
"136:" // Height 4: Writeback done
"subs x14, x14, #0x10\n"
"bgt 104b\n"
"b 206f\n"
"137:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"138:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 139f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -1944,18 +1946,18 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cbz x15, 140f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1967,128 +1969,128 @@ void a64_ffhybrid_fp32_mla_6x16 (
"140:" // Height 5: no bias
"tbz %x[flags], #0, 150f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "cmp x14, #0x10\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
"bge 149f\n"
"tbz x14, #3, 144f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x14, #2, 142f\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
"tbz x14, #1, 141f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
"b 148f\n"
"141:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 148f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
"b 148f\n"
"142:" // Height 5: Partial accumulate: partial_2_8
"tbz x14, #1, 143f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
"b 148f\n"
"143:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 148f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
"b 148f\n"
"144:" // Height 5: Partial accumulate: partial_4_0
"tbz x14, #2, 146f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x14, #1, 145f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 148f\n"
"145:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 148f\n"
"ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 148f\n"
"146:" // Height 5: Partial accumulate: partial_2_0
"tbz x14, #1, 147f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 148f\n"
"147:" // Height 5: Partial accumulate: partial_1_0
"ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"148:" // Height 5: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 151f\n"
@@ -2097,22 +2099,22 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
"b 151f\n"
"150:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -2139,8 +2141,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"152:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 153f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2179,10 +2181,10 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x8\n"
"add x25, x25, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
"ldr q29, [x10, #0x0]\n"
@@ -2404,19 +2406,19 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr s31, [x23], #0x4\n"
"ldr s30, [x22], #0x4\n"
"ldr q29, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "ldr q28, [x11, #0x0]\n"
+ "add x11, x11, #0x10\n"
"fmla v8.4s, v29.4s, v2.s[0]\n"
"fmla v12.4s, v29.4s, v1.s[0]\n"
- "ldr q28, [x11, #0x0]\n"
"fmla v16.4s, v29.4s, v0.s[0]\n"
"fmla v20.4s, v29.4s, v31.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v24.4s, v29.4s, v30.s[0]\n"
"ldr q29, [x10, #0x0]\n"
+ "add x10, x10, #0x10\n"
"fmla v9.4s, v28.4s, v2.s[0]\n"
- "add x11, x11, #0x10\n"
"fmla v13.4s, v28.4s, v1.s[0]\n"
"fmla v17.4s, v28.4s, v0.s[0]\n"
- "add x10, x10, #0x10\n"
"fmla v21.4s, v28.4s, v31.s[0]\n"
"fmla v25.4s, v28.4s, v30.s[0]\n"
"ldr q28, [x9, #0x0]\n"
@@ -2438,14 +2440,14 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 152b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 160f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v29.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v29.4s }, [x21]\n"
"ld1r { v28.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v29.4s\n"
"fmin v9.4s, v9.4s, v29.4s\n"
@@ -2493,111 +2495,111 @@ void a64_ffhybrid_fp32_mla_6x16 (
"tbz x14, #3, 164f\n"
"st1 { v8.4s }, [x13], #0x10\n"
"st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
"tbz x14, #2, 162f\n"
"st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
"tbz x14, #1, 161f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x14, #0, 168f\n"
"st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
"tbz x14, #0, 168f\n"
"str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
"tbz x14, #1, 163f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x14, #0, 168f\n"
"st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
"tbz x14, #0, 168f\n"
"str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
"tbz x14, #2, 166f\n"
"st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x14, #1, 165f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x14, #0, 168f\n"
"st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
"tbz x14, #0, 168f\n"
"str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
"tbz x14, #1, 167f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x14, #0, 168f\n"
"st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
"str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
@@ -2606,44 +2608,45 @@ void a64_ffhybrid_fp32_mla_6x16 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"170:" // Height 5: Writeback done
"subs x14, x14, #0x10\n"
"bgt 138b\n"
"b 206f\n"
"171:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0x18\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"172:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0xc\n"
"add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0xc\n"
"bgt 173f\n"
"cmp x14, #0x8\n"
"mov x9, x12\n"
@@ -2656,18 +2659,18 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cbz x15, 174f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x15, x15, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x15, x15, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -2683,145 +2686,145 @@ void a64_ffhybrid_fp32_mla_6x16 (
"174:" // Height 6: no bias
"tbz %x[flags], #0, 184f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "cmp x14, #0x10\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x14, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
"bge 183f\n"
"tbz x14, #3, 178f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x14, #2, 176f\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x14, #1, 175f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x14, #0, 182f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 182f\n"
"175:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 182f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 182f\n"
"176:" // Height 6: Partial accumulate: partial_2_8
"tbz x14, #1, 177f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x14, #0, 182f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 182f\n"
"177:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 182f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 182f\n"
"178:" // Height 6: Partial accumulate: partial_4_0
"tbz x14, #2, 180f\n"
"ld1 { v8.4s }, [x13], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x14, #1, 179f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x14, #0, 182f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 182f\n"
"179:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 182f\n"
"ldr s9, [x13, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 182f\n"
"180:" // Height 6: Partial accumulate: partial_2_0
"tbz x14, #1, 181f\n"
"ldr d8, [x13], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x14, #0, 182f\n"
"ld1 { v8.s }[2], [x13]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 182f\n"
"181:" // Height 6: Partial accumulate: partial_1_0
"ldr s8, [x13, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"182:" // Height 6: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 185f\n"
@@ -2830,26 +2833,26 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr q9, [x13, #0x10]\n"
"ldr q10, [x13, #0x20]\n"
"ldr q11, [x13, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"b 185f\n"
"184:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -2880,8 +2883,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"186:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 187f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2924,10 +2927,10 @@ void a64_ffhybrid_fp32_mla_6x16 (
"fmla v8.4s, v6.4s, v0.s[0]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
- "add x26, x26, #0x10\n"
+ "cmp x27, #0x8\n"
"add x25, x25, #0x10\n"
"fmla v24.4s, v6.4s, v4.s[0]\n"
"fmla v28.4s, v6.4s, v5.s[0]\n"
@@ -3186,12 +3189,12 @@ void a64_ffhybrid_fp32_mla_6x16 (
"ldr s2, [x21], #0x4\n"
"ldr q1, [x12, #0x0]\n"
"ldr q0, [x11, #0x0]\n"
+ "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
"fmla v8.4s, v1.4s, v7.s[0]\n"
"fmla v12.4s, v1.4s, v6.s[0]\n"
"fmla v16.4s, v1.4s, v5.s[0]\n"
"fmla v20.4s, v1.4s, v4.s[0]\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
"fmla v24.4s, v1.4s, v3.s[0]\n"
"fmla v28.4s, v1.4s, v2.s[0]\n"
"ldr q1, [x10, #0x0]\n"
@@ -3223,15 +3226,15 @@ void a64_ffhybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 186b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 194f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
@@ -3287,127 +3290,127 @@ void a64_ffhybrid_fp32_mla_6x16 (
"tbz x14, #3, 198f\n"
"st1 { v8.4s }, [x13], #0x10\n"
"st1 { v9.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x22], #0x10\n"
"tbz x14, #2, 196f\n"
"st1 { v10.4s }, [x13], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v30.4s }, [x22], #0x10\n"
"tbz x14, #1, 195f\n"
"str d11, [x13], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"st1 { v11.s }[2], [x13]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
"tbz x14, #0, 202f\n"
"str s11, [x13, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
"tbz x14, #1, 197f\n"
"str d10, [x13], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"st1 { v10.s }[2], [x13]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
+ "st1 { v30.s }[2], [x22]\n"
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
"tbz x14, #0, 202f\n"
"str s10, [x13, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
+ "str s30, [x22, #0x0]\n"
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
"tbz x14, #2, 200f\n"
"st1 { v8.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
"tbz x14, #1, 199f\n"
"str d9, [x13], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"st1 { v9.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
"tbz x14, #0, 202f\n"
"str s9, [x13, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
"tbz x14, #1, 201f\n"
"str d8, [x13], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x14, #0, 202f\n"
"st1 { v8.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "st1 { v28.s }[2], [x22]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
"str s8, [x13, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
+ "str s28, [x22, #0x0]\n"
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
@@ -3416,26 +3419,26 @@ void a64_ffhybrid_fp32_mla_6x16 (
"str q10, [x13, #0x20]\n"
"str q11, [x13, #0x30]\n"
"add x13, x13, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q28, [x22, #0x0]\n"
+ "str q29, [x22, #0x10]\n"
+ "str q30, [x22, #0x20]\n"
+ "str q31, [x22, #0x30]\n"
"204:" // Height 6: Writeback done
"subs x14, x14, #0x10\n"
"bgt 172b\n"
@@ -3451,8 +3454,8 @@ void a64_ffhybrid_fp32_mla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp
index ac3cbf943f..73c096ca00 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp
@@ -82,16 +82,14 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 4, 24, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 24, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
if (std::is_same<T, float>::value) {
switch (ci->get_cpu_model()) {
- case CPUModel::V1:
- return { 23.64 };
default:
- return { 16.89 };
+ return { 28.48 };
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
index 8961e615d7..1f7804453c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,18 +50,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -82,6 +83,7 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -103,13 +105,14 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"bgt 89f\n"
"beq 45f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x14\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
@@ -117,7 +120,6 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
"bgt 3f\n"
"cmp x14, #0x10\n"
"mov x27, x12\n"
@@ -136,19 +138,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"cbz x15, 4f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x15, #0x40]\n"
"ldr q13, [x15, #0x50]\n"
+ "add x15, x15, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -282,8 +284,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"21:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 22f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -306,28 +308,32 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"blt 25f\n"
"24:" // Height 1: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "cmp x25, #0x8\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q24, [x10, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
"ldr q23, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q22, [x9, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q21, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n"
"ldr q24, [x28, #0x0]\n"
".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
"ldr q22, [x27, #0x0]\n"
".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
- "add x12, x12, #0x20\n"
"ldr q4, [x12, #0x0]\n"
- "add x11, x11, #0x20\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
"ldr q5, [x12, #0x10]\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
@@ -335,40 +341,36 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
"ld1 { v0.4s }, [x24], #0x10\n"
"ldr q7, [x11, #0x10]\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
- "add x28, x28, #0x20\n"
- "add x27, x27, #0x20\n"
"bge 24b\n"
"25:" // Height 1: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q22, [x10, #0x0]\n"
+ "ldr q23, [x10, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q25, [x10, #0x10]\n"
+ "ldr q22, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q21, [x9, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q24, [x9, #0x10]\n"
- ".inst 0x6e56ec0a // bfmmla v10.4s, v0.8h, v22.8h\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e57ec0a // bfmmla v10.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x0]\n"
- ".inst 0x6e59ec10 // bfmmla v16.4s, v0.8h, v25.8h\n"
+ ".inst 0x6e56ec10 // bfmmla v16.4s, v0.8h, v22.8h\n"
"ldr q22, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e55ec0b // bfmmla v11.4s, v0.8h, v21.8h\n"
"ldr q21, [x27, #0x0]\n"
".inst 0x6e58ec11 // bfmmla v17.4s, v0.8h, v24.8h\n"
"ldr q3, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e57ec0c // bfmmla v12.4s, v0.8h, v23.8h\n"
".inst 0x6e56ec12 // bfmmla v18.4s, v0.8h, v22.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
".inst 0x6e55ec0d // bfmmla v13.4s, v0.8h, v21.8h\n"
".inst 0x6e43ec13 // bfmmla v19.4s, v0.8h, v3.8h\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
- "add x28, x28, #0x20\n"
- "add x27, x27, #0x20\n"
"26:" // Height 1: Multiply loop: Main loop skip
"cbz x25, 29f\n"
"cbz x25, 29f\n"
@@ -380,37 +382,37 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"27:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr s0, [x24, #0x0]\n"
"28:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q21, [x12, #0x0]\n"
- "ldr q30, [x12, #0x10]\n"
+ "ldr q23, [x12, #0x0]\n"
+ "ldr q29, [x12, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x6e55ec08 // bfmmla v8.4s, v0.8h, v21.8h\n"
- "ldr q21, [x11, #0x0]\n"
- "ldr q22, [x11, #0x10]\n"
- ".inst 0x6e5eec0e // bfmmla v14.4s, v0.8h, v30.8h\n"
- ".inst 0x6e55ec09 // bfmmla v9.4s, v0.8h, v21.8h\n"
- "ldr q21, [x10, #0x0]\n"
+ "add x12, x12, #0x20\n"
+ "ldr q22, [x11, #0x0]\n"
+ "ldr q21, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x6e57ec08 // bfmmla v8.4s, v0.8h, v23.8h\n"
+ "ldr q24, [x10, #0x0]\n"
+ ".inst 0x6e5dec0e // bfmmla v14.4s, v0.8h, v29.8h\n"
"ldr q23, [x10, #0x10]\n"
- ".inst 0x6e56ec0f // bfmmla v15.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec0a // bfmmla v10.4s, v0.8h, v21.8h\n"
- "ldr q21, [x9, #0x0]\n"
- "ldr q22, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ ".inst 0x6e56ec09 // bfmmla v9.4s, v0.8h, v22.8h\n"
+ "ldr q22, [x9, #0x0]\n"
+ ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n"
+ "ldr q21, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n"
+ "ldr q24, [x28, #0x0]\n"
".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
- ".inst 0x6e55ec0b // bfmmla v11.4s, v0.8h, v21.8h\n"
- "ldr q21, [x28, #0x0]\n"
"ldr q23, [x28, #0x10]\n"
- ".inst 0x6e56ec11 // bfmmla v17.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec0c // bfmmla v12.4s, v0.8h, v21.8h\n"
+ "add x28, x28, #0x20\n"
+ ".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
"ldr q22, [x27, #0x0]\n"
+ ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
- "add x28, x28, #0x20\n"
- "add x27, x27, #0x20\n"
"29:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -423,9 +425,9 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v12.2d, v12.2d, v18.2d\n"
"uzp1 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 30f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v22.4s\n"
"fmin v9.4s, v9.4s, v22.4s\n"
@@ -529,13 +531,14 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"b 178f\n"
"45:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"46:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x14\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
@@ -543,7 +546,6 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
"bgt 47f\n"
"cmp x14, #0x10\n"
"mov x27, x12\n"
@@ -562,19 +564,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"cbz x15, 48f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x15, #0x40]\n"
"ldr q13, [x15, #0x50]\n"
+ "add x15, x15, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -584,117 +586,117 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"tbz %x[flags], #0, 63f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x14, #0x18\n"
- "add x23, x13, x20, LSL #2\n"
+ "add x24, x13, x20, LSL #2\n"
"bge 61f\n"
"tbz x14, #4, 52f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"ld1 { v12.4s }, [x13], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
"tbz x14, #2, 50f\n"
"ld1 { v13.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
"tbz x14, #1, 49f\n"
"ldr d20, [x13], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
"tbz x14, #0, 60f\n"
"ld1 { v20.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x24]\n"
"b 60f\n"
"49:" // Height 2: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x14, #0, 60f\n"
"ldr s20, [x13, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
"b 60f\n"
"50:" // Height 2: Partial accumulate: partial_2_16
"tbz x14, #1, 51f\n"
"ldr d13, [x13], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
"tbz x14, #0, 60f\n"
"ld1 { v13.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x24]\n"
"b 60f\n"
"51:" // Height 2: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x14, #0, 60f\n"
"ldr s13, [x13, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
"b 60f\n"
"52:" // Height 2: Partial accumulate: partial_8_0
"tbz x14, #3, 56f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"tbz x14, #2, 54f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"tbz x14, #1, 53f\n"
"ldr d12, [x13], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
"tbz x14, #0, 60f\n"
"ld1 { v12.s }[2], [x13]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x24]\n"
"b 60f\n"
"53:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 60f\n"
"ldr s12, [x13, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
"b 60f\n"
"54:" // Height 2: Partial accumulate: partial_2_8
"tbz x14, #1, 55f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
"tbz x14, #0, 60f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x24]\n"
"b 60f\n"
"55:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 60f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
"b 60f\n"
"56:" // Height 2: Partial accumulate: partial_4_0
"tbz x14, #2, 58f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x14, #1, 57f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
"tbz x14, #0, 60f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 60f\n"
"57:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 60f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 60f\n"
"58:" // Height 2: Partial accumulate: partial_2_0
"tbz x14, #1, 59f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
"tbz x14, #0, 60f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 60f\n"
"59:" // Height 2: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
"60:" // Height 2: Partial accumulate: Done
"sub x13, x13, x20\n"
@@ -706,12 +708,12 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"ldr q12, [x13, #0x30]\n"
"ldr q13, [x13, #0x40]\n"
"ldr q20, [x13, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
"62:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -743,8 +745,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"65:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 66f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -771,72 +773,72 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"blt 69f\n"
"68:" // Height 2: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "cmp x25, #0x8\n"
+ "add x11, x11, #0x20\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
"ld1 { v1.4s }, [x23], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q30, [x10, #0x0]\n"
+ "ldr q29, [x10, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
"ldr q23, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q22, [x9, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q21, [x9, #0x10]\n"
- ".inst 0x6e5eec0a // bfmmla v10.4s, v0.8h, v30.8h\n"
- "ldr q2, [x28, #0x0]\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e5dec0a // bfmmla v10.4s, v0.8h, v29.8h\n"
+ "ldr q30, [x28, #0x0]\n"
".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
"ldr q22, [x27, #0x0]\n"
".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
- "add x12, x12, #0x20\n"
- ".inst 0x6e42ec0c // bfmmla v12.4s, v0.8h, v2.8h\n"
+ "add x27, x27, #0x20\n"
+ ".inst 0x6e5eec0c // bfmmla v12.4s, v0.8h, v30.8h\n"
"ldr q4, [x12, #0x0]\n"
- "add x11, x11, #0x20\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
"ldr q5, [x12, #0x10]\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
"ldr q6, [x11, #0x0]\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
"ld1 { v0.4s }, [x24], #0x10\n"
- "add x10, x10, #0x20\n"
"ldr q7, [x11, #0x10]\n"
- "add x9, x9, #0x20\n"
- "add x28, x28, #0x20\n"
- "add x27, x27, #0x20\n"
"bge 68b\n"
"69:" // Height 2: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q24, [x10, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
"ldr q23, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q22, [x9, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q21, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n"
"ldr q24, [x28, #0x0]\n"
".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
"ldr q22, [x27, #0x0]\n"
".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x27, #0x10]\n"
- "sub x25, x25, #0x4\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
- "add x28, x28, #0x20\n"
- "add x27, x27, #0x20\n"
"70:" // Height 2: Multiply loop: Main loop skip
"cbz x25, 73f\n"
"cbz x25, 73f\n"
@@ -854,35 +856,35 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"ldr q24, [x12, #0x0]\n"
"ldr q23, [x12, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "add x12, x12, #0x20\n"
"ldr q22, [x11, #0x0]\n"
"ldr q21, [x11, #0x10]\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e58ec08 // bfmmla v8.4s, v0.8h, v24.8h\n"
- ".inst 0x6e57ec0e // bfmmla v14.4s, v0.8h, v23.8h\n"
"ldr q24, [x10, #0x0]\n"
+ ".inst 0x6e57ec0e // bfmmla v14.4s, v0.8h, v23.8h\n"
"ldr q23, [x10, #0x10]\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e56ec09 // bfmmla v9.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n"
"ldr q22, [x9, #0x0]\n"
+ ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n"
"ldr q21, [x9, #0x10]\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n"
- ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q24, [x28, #0x0]\n"
+ ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x10]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q22, [x27, #0x0]\n"
+ ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "add x12, x12, #0x20\n"
- "add x11, x11, #0x20\n"
- "add x10, x10, #0x20\n"
- "add x9, x9, #0x20\n"
- "add x28, x28, #0x20\n"
- "add x27, x27, #0x20\n"
"73:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -891,21 +893,21 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
- "add x23, x13, x20, LSL #2\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
+ "add x24, x13, x20, LSL #2\n"
"uzp1 v17.2d, v12.2d, v18.2d\n"
"uzp2 v12.2d, v12.2d, v18.2d\n"
"uzp1 v18.2d, v13.2d, v19.2d\n"
"uzp2 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 74f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
"fmin v4.4s, v4.4s, v22.4s\n"
"fmin v14.4s, v14.4s, v22.4s\n"
@@ -939,99 +941,99 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"st1 { v14.4s }, [x13], #0x10\n"
"st1 { v15.4s }, [x13], #0x10\n"
"st1 { v16.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v11.4s }, [x24], #0x10\n"
"tbz x14, #2, 76f\n"
"st1 { v17.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
"tbz x14, #1, 75f\n"
"str d18, [x13], #0x8\n"
- "str d13, [x23], #0x8\n"
+ "str d13, [x24], #0x8\n"
"tbz x14, #0, 86f\n"
"st1 { v18.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x24]\n"
"b 86f\n"
"75:" // Height 2: Partial direct writeback: partial_1_20
"tbz x14, #0, 86f\n"
"str s18, [x13, #0x0]\n"
- "str s13, [x23, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
"b 86f\n"
"76:" // Height 2: Partial direct writeback: partial_2_16
"tbz x14, #1, 77f\n"
"str d17, [x13], #0x8\n"
- "str d12, [x23], #0x8\n"
+ "str d12, [x24], #0x8\n"
"tbz x14, #0, 86f\n"
"st1 { v17.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x24]\n"
"b 86f\n"
"77:" // Height 2: Partial direct writeback: partial_1_16
"tbz x14, #0, 86f\n"
"str s17, [x13, #0x0]\n"
- "str s12, [x23, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
"b 86f\n"
"78:" // Height 2: Partial direct writeback: partial_8_0
"tbz x14, #3, 82f\n"
"st1 { v4.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
"tbz x14, #2, 80f\n"
"st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
"tbz x14, #1, 79f\n"
"str d16, [x13], #0x8\n"
- "str d11, [x23], #0x8\n"
+ "str d11, [x24], #0x8\n"
"tbz x14, #0, 86f\n"
"st1 { v16.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x24]\n"
"b 86f\n"
"79:" // Height 2: Partial direct writeback: partial_1_12
"tbz x14, #0, 86f\n"
"str s16, [x13, #0x0]\n"
- "str s11, [x23, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
"b 86f\n"
"80:" // Height 2: Partial direct writeback: partial_2_8
"tbz x14, #1, 81f\n"
"str d15, [x13], #0x8\n"
- "str d10, [x23], #0x8\n"
+ "str d10, [x24], #0x8\n"
"tbz x14, #0, 86f\n"
"st1 { v15.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x24]\n"
"b 86f\n"
"81:" // Height 2: Partial direct writeback: partial_1_8
"tbz x14, #0, 86f\n"
"str s15, [x13, #0x0]\n"
- "str s10, [x23, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
"b 86f\n"
"82:" // Height 2: Partial direct writeback: partial_4_0
"tbz x14, #2, 84f\n"
"st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
"tbz x14, #1, 83f\n"
"str d14, [x13], #0x8\n"
- "str d9, [x23], #0x8\n"
+ "str d9, [x24], #0x8\n"
"tbz x14, #0, 86f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x24]\n"
"b 86f\n"
"83:" // Height 2: Partial direct writeback: partial_1_4
"tbz x14, #0, 86f\n"
"str s14, [x13, #0x0]\n"
- "str s9, [x23, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
"b 86f\n"
"84:" // Height 2: Partial direct writeback: partial_2_0
"tbz x14, #1, 85f\n"
"str d4, [x13], #0x8\n"
- "str d8, [x23], #0x8\n"
+ "str d8, [x24], #0x8\n"
"tbz x14, #0, 86f\n"
"st1 { v4.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x24]\n"
"b 86f\n"
"85:" // Height 2: Partial direct writeback: partial_1_0
"str s4, [x13, #0x0]\n"
- "str s8, [x23, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
"86:" // Height 2: Partial direct writeback: Done
"b 88f\n"
"87:" // Height 2: Full writeback
@@ -1042,25 +1044,26 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"str q17, [x13, #0x40]\n"
"str q18, [x13, #0x50]\n"
"add x13, x13, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q12, [x24, #0x40]\n"
+ "str q13, [x24, #0x50]\n"
"88:" // Height 2: Writeback done
"subs x14, x14, #0x18\n"
"bgt 46b\n"
"b 178f\n"
"89:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"90:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x14\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
@@ -1068,7 +1071,6 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
"bgt 91f\n"
"cmp x14, #0x10\n"
"mov x27, x12\n"
@@ -1087,19 +1089,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"cbz x15, 92f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x15, #0x40]\n"
"ldr q13, [x15, #0x50]\n"
+ "add x15, x15, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -1120,147 +1122,147 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"92:" // Height 3: no bias
"tbz %x[flags], #0, 107f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
"cmp x14, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x13, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"bge 105f\n"
"tbz x14, #4, 96f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
"ld1 { v12.4s }, [x13], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x14, #2, 94f\n"
"ld1 { v13.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x14, #1, 93f\n"
"ldr d20, [x13], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
+ "ldr d4, [x23], #0x8\n"
"tbz x14, #0, 104f\n"
"ld1 { v20.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v4.s }[2], [x23]\n"
"b 104f\n"
"93:" // Height 3: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x14, #0, 104f\n"
"ldr s20, [x13, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s4, [x23, #0x0]\n"
"b 104f\n"
"94:" // Height 3: Partial accumulate: partial_2_16
"tbz x14, #1, 95f\n"
"ldr d13, [x13], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x14, #0, 104f\n"
"ld1 { v13.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 104f\n"
"95:" // Height 3: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x14, #0, 104f\n"
"ldr s13, [x13, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 104f\n"
"96:" // Height 3: Partial accumulate: partial_8_0
"tbz x14, #3, 100f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"tbz x14, #2, 98f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
"tbz x14, #1, 97f\n"
"ldr d12, [x13], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x14, #0, 104f\n"
"ld1 { v12.s }[2], [x13]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 104f\n"
"97:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 104f\n"
"ldr s12, [x13, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"b 104f\n"
"98:" // Height 3: Partial accumulate: partial_2_8
"tbz x14, #1, 99f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
"tbz x14, #0, 104f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
"b 104f\n"
"99:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 104f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
"b 104f\n"
"100:" // Height 3: Partial accumulate: partial_4_0
"tbz x14, #2, 102f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"tbz x14, #1, 101f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
"tbz x14, #0, 104f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
"b 104f\n"
"101:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 104f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
"b 104f\n"
"102:" // Height 3: Partial accumulate: partial_2_0
"tbz x14, #1, 103f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
"tbz x14, #0, 104f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
"b 104f\n"
"103:" // Height 3: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
"104:" // Height 3: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 106f\n"
@@ -1271,18 +1273,18 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"ldr q12, [x13, #0x30]\n"
"ldr q13, [x13, #0x40]\n"
"ldr q20, [x13, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q21, [x23, #0x0]\n"
+ "ldr q22, [x23, #0x10]\n"
+ "ldr q23, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q25, [x23, #0x40]\n"
+ "ldr q4, [x23, #0x50]\n"
"106:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -1338,8 +1340,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"109:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 110f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1370,42 +1372,42 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"blt 113f\n"
"112:" // Height 3: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x25, x25, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "cmp x25, #0x8\n"
+ "add x11, x11, #0x20\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
"ld1 { v1.4s }, [x23], #0x10\n"
- ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q4, [x10, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x10, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "sub x25, x25, #0x4\n"
- ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x9, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "cmp x25, #0x8\n"
- ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q3, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "add x12, x12, #0x20\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x0]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
- "add x11, x11, #0x20\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x20\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x27, #0x0]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e43ec11 // bfmmla v17.4s, v0.8h, v3.8h\n"
- "add x9, x9, #0x20\n"
".inst 0x6e43ec5d // bfmmla v29.4s, v2.8h, v3.8h\n"
"ldr q3, [x27, #0x10]\n"
- "add x28, x28, #0x20\n"
- ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
"add x27, x27, #0x20\n"
+ ".inst 0x6e44ec0c // bfmmla v12.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec58 // bfmmla v24.4s, v2.8h, v4.8h\n"
"ldr q4, [x12, #0x0]\n"
".inst 0x6e45ec12 // bfmmla v18.4s, v0.8h, v5.8h\n"
@@ -1422,35 +1424,35 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"bge 112b\n"
"113:" // Height 3: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "sub x25, x25, #0x4\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x25, x25, #0x4\n"
+ "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q3, [x10, #0x0]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q4, [x10, #0x10]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "add x12, x12, #0x20\n"
- ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x9, #0x0]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "add x11, x11, #0x20\n"
- ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q1, [x9, #0x10]\n"
- ".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n"
"add x10, x10, #0x20\n"
+ "add x9, x9, #0x20\n"
+ ".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n"
".inst 0x6e43ec56 // bfmmla v22.4s, v2.8h, v3.8h\n"
"ldr q5, [x28, #0x0]\n"
".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n"
- "add x9, x9, #0x20\n"
".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x10]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "add x28, x28, #0x20\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q3, [x27, #0x0]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n"
".inst 0x6e41ec5d // bfmmla v29.4s, v2.8h, v1.8h\n"
"ldr q1, [x27, #0x10]\n"
@@ -1483,41 +1485,41 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"ldr q5, [x12, #0x0]\n"
"ldr q4, [x12, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- "ldr q3, [x11, #0x0]\n"
- "ldr q1, [x11, #0x10]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- ".inst 0x6e45ec08 // bfmmla v8.4s, v0.8h, v5.8h\n"
+ "ldr q3, [x11, #0x0]\n"
+ "ldr q6, [x11, #0x10]\n"
+ "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e43ec55 // bfmmla v21.4s, v2.8h, v3.8h\n"
+ ".inst 0x6e46ec5b // bfmmla v27.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e45ec08 // bfmmla v8.4s, v0.8h, v5.8h\n"
"ldr q5, [x10, #0x0]\n"
".inst 0x6e44ec0e // bfmmla v14.4s, v0.8h, v4.8h\n"
- "add x12, x12, #0x20\n"
- ".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n"
"ldr q4, [x10, #0x10]\n"
".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n"
- "add x11, x11, #0x20\n"
- ".inst 0x6e43ec55 // bfmmla v21.4s, v2.8h, v3.8h\n"
"ldr q3, [x9, #0x0]\n"
- ".inst 0x6e41ec0f // bfmmla v15.4s, v0.8h, v1.8h\n"
- "add x10, x10, #0x20\n"
- ".inst 0x6e41ec5b // bfmmla v27.4s, v2.8h, v1.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
"ldr q1, [x9, #0x10]\n"
- ".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n"
+ "add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
+ ".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec56 // bfmmla v22.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x0]\n"
".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x10]\n"
".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n"
- "add x28, x28, #0x20\n"
".inst 0x6e43ec57 // bfmmla v23.4s, v2.8h, v3.8h\n"
"ldr q3, [x27, #0x0]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n"
".inst 0x6e41ec5d // bfmmla v29.4s, v2.8h, v1.8h\n"
"ldr q1, [x27, #0x10]\n"
- ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
"add x27, x27, #0x20\n"
+ ".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec12 // bfmmla v18.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec5e // bfmmla v30.4s, v2.8h, v4.8h\n"
@@ -1531,16 +1533,16 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"cmp x26, x20\n"
"bne 109b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
+ "add x24, x13, x20, LSL #2\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v17.2d, v12.2d, v18.2d\n"
"uzp2 v12.2d, v12.2d, v18.2d\n"
"uzp1 v18.2d, v13.2d, v19.2d\n"
@@ -1552,9 +1554,9 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v24.2d, v24.2d, v30.2d\n"
"uzp1 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 118f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v4.4s, v4.4s, v1.4s\n"
"fmin v14.4s, v14.4s, v1.4s\n"
@@ -1600,126 +1602,126 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"st1 { v14.4s }, [x13], #0x10\n"
"st1 { v15.4s }, [x13], #0x10\n"
"st1 { v16.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v11.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
"tbz x14, #2, 120f\n"
"st1 { v17.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x14, #1, 119f\n"
"str d18, [x13], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x14, #0, 130f\n"
"st1 { v18.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 130f\n"
"119:" // Height 3: Partial direct writeback: partial_1_20
"tbz x14, #0, 130f\n"
"str s18, [x13, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 130f\n"
"120:" // Height 3: Partial direct writeback: partial_2_16
"tbz x14, #1, 121f\n"
"str d17, [x13], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x14, #0, 130f\n"
"st1 { v17.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 130f\n"
"121:" // Height 3: Partial direct writeback: partial_1_16
"tbz x14, #0, 130f\n"
"str s17, [x13, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"b 130f\n"
"122:" // Height 3: Partial direct writeback: partial_8_0
"tbz x14, #3, 126f\n"
"st1 { v4.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
"tbz x14, #2, 124f\n"
"st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
"tbz x14, #1, 123f\n"
"str d16, [x13], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
"tbz x14, #0, 130f\n"
"st1 { v16.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
"b 130f\n"
"123:" // Height 3: Partial direct writeback: partial_1_12
"tbz x14, #0, 130f\n"
"str s16, [x13, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
"b 130f\n"
"124:" // Height 3: Partial direct writeback: partial_2_8
"tbz x14, #1, 125f\n"
"str d15, [x13], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
"tbz x14, #0, 130f\n"
"st1 { v15.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
"b 130f\n"
"125:" // Height 3: Partial direct writeback: partial_1_8
"tbz x14, #0, 130f\n"
"str s15, [x13, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
"b 130f\n"
"126:" // Height 3: Partial direct writeback: partial_4_0
"tbz x14, #2, 128f\n"
"st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
"tbz x14, #1, 127f\n"
"str d14, [x13], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
"tbz x14, #0, 130f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
"b 130f\n"
"127:" // Height 3: Partial direct writeback: partial_1_4
"tbz x14, #0, 130f\n"
"str s14, [x13, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
"b 130f\n"
"128:" // Height 3: Partial direct writeback: partial_2_0
"tbz x14, #1, 129f\n"
"str d4, [x13], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
"tbz x14, #0, 130f\n"
"st1 { v4.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
"b 130f\n"
"129:" // Height 3: Partial direct writeback: partial_1_0
"str s4, [x13, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
"130:" // Height 3: Partial direct writeback: Done
"b 132f\n"
"131:" // Height 3: Full writeback
@@ -1730,34 +1732,36 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"str q17, [x13, #0x40]\n"
"str q18, [x13, #0x50]\n"
"add x13, x13, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q12, [x24, #0x40]\n"
+ "str q13, [x24, #0x50]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q22, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
"132:" // Height 3: Writeback done
"subs x14, x14, #0x18\n"
"bgt 90b\n"
"b 178f\n"
"133:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0x10\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x10\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"134:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x14, #0x14\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
@@ -1765,7 +1769,6 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x14, #0x14\n"
"bgt 135f\n"
"cmp x14, #0x10\n"
"mov x27, x12\n"
@@ -1784,19 +1787,19 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"cbz x15, 136f\n"
"ldr q8, [x15, #0x0]\n"
"ldr q9, [x15, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x15, #0x20]\n"
"ldr q11, [x15, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x15, #0x40]\n"
"ldr q13, [x15, #0x50]\n"
+ "add x15, x15, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x15, x15, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -1817,175 +1820,175 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"136:" // Height 4: no bias
"tbz %x[flags], #0, 151f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x14, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x13, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"bge 149f\n"
"tbz x14, #4, 140f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v12.4s }, [x13], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x14, #2, 138f\n"
"ld1 { v13.4s }, [x13], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x14, #1, 137f\n"
"ldr d20, [x13], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d4, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v20.s }[2], [x13]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v4.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 148f\n"
"137:" // Height 4: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x14, #0, 148f\n"
"ldr s20, [x13, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s4, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 148f\n"
"138:" // Height 4: Partial accumulate: partial_2_16
"tbz x14, #1, 139f\n"
"ldr d13, [x13], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v13.s }[2], [x13]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 148f\n"
"139:" // Height 4: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x14, #0, 148f\n"
"ldr s13, [x13, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 148f\n"
"140:" // Height 4: Partial accumulate: partial_8_0
"tbz x14, #3, 144f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x13], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"tbz x14, #2, 142f\n"
"ld1 { v11.4s }, [x13], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x14, #1, 141f\n"
"ldr d12, [x13], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v12.s }[2], [x13]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 148f\n"
"141:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x14, #0, 148f\n"
"ldr s12, [x13, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 148f\n"
"142:" // Height 4: Partial accumulate: partial_2_8
"tbz x14, #1, 143f\n"
"ldr d11, [x13], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v11.s }[2], [x13]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 148f\n"
"143:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x14, #0, 148f\n"
"ldr s11, [x13, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"b 148f\n"
"144:" // Height 4: Partial accumulate: partial_4_0
"tbz x14, #2, 146f\n"
"ld1 { v9.4s }, [x13], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"tbz x14, #1, 145f\n"
"ldr d10, [x13], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v10.s }[2], [x13]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v27.s }[2], [x22]\n"
"b 148f\n"
"145:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x14, #0, 148f\n"
"ldr s10, [x13, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
+ "ldr s27, [x22, #0x0]\n"
"b 148f\n"
"146:" // Height 4: Partial accumulate: partial_2_0
"tbz x14, #1, 147f\n"
"ldr d9, [x13], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
"tbz x14, #0, 148f\n"
"ld1 { v9.s }[2], [x13]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v26.s }[2], [x22]\n"
"b 148f\n"
"147:" // Height 4: Partial accumulate: partial_1_0
"ldr s9, [x13, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
+ "ldr s26, [x22, #0x0]\n"
"148:" // Height 4: Partial accumulate: Done
"sub x13, x13, x20\n"
"b 150f\n"
@@ -1996,24 +1999,24 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"ldr q12, [x13, #0x30]\n"
"ldr q13, [x13, #0x40]\n"
"ldr q20, [x13, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
- "ldr q26, [x21, #0x0]\n"
- "ldr q27, [x21, #0x10]\n"
- "ldr q28, [x21, #0x20]\n"
- "ldr q29, [x21, #0x30]\n"
- "ldr q30, [x21, #0x40]\n"
- "ldr q31, [x21, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q21, [x23, #0x0]\n"
+ "ldr q22, [x23, #0x10]\n"
+ "ldr q23, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q25, [x23, #0x40]\n"
+ "ldr q4, [x23, #0x50]\n"
+ "ldr q26, [x22, #0x0]\n"
+ "ldr q27, [x22, #0x10]\n"
+ "ldr q28, [x22, #0x20]\n"
+ "ldr q29, [x22, #0x30]\n"
+ "ldr q30, [x22, #0x40]\n"
+ "ldr q31, [x22, #0x50]\n"
"150:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -2069,8 +2072,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"153:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 154f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2107,26 +2110,26 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x25, x25, #0x4\n"
+ "add x12, x12, #0x20\n"
"cmp x25, #0x8\n"
+ "add x11, x11, #0x20\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
"ld1 { v1.4s }, [x23], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
"ld1 { v3.4s }, [x21], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
"ldr q4, [x10, #0x0]\n"
- "add x12, x12, #0x20\n"
- ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x10, #0x10]\n"
- "add x11, x11, #0x20\n"
- ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x9, #0x0]\n"
- "add x10, x10, #0x20\n"
- ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q7, [x9, #0x10]\n"
+ "add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
@@ -2134,10 +2137,10 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x27, #0x0]\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e47ec11 // bfmmla v17.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5d // bfmmla v29.4s, v2.8h, v7.8h\n"
"ldr q7, [x27, #0x10]\n"
@@ -2162,31 +2165,31 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x25, x25, #0x4\n"
"add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "add x11, x11, #0x20\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
"ldr q3, [x10, #0x0]\n"
- ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q4, [x10, #0x10]\n"
- ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "add x10, x10, #0x20\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x9, #0x0]\n"
- ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q1, [x9, #0x10]\n"
- ".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n"
+ "add x10, x10, #0x20\n"
"add x9, x9, #0x20\n"
+ ".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n"
".inst 0x6e43ec56 // bfmmla v22.4s, v2.8h, v3.8h\n"
"ldr q5, [x28, #0x0]\n"
".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x10]\n"
- ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
"add x28, x28, #0x20\n"
+ ".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q3, [x27, #0x0]\n"
".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n"
@@ -2227,39 +2230,39 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"ldr q7, [x11, #0x0]\n"
"ldr q6, [x11, #0x10]\n"
+ "add x12, x12, #0x20\n"
+ "add x11, x11, #0x20\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e45ec08 // bfmmla v8.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n"
"ldr q5, [x10, #0x0]\n"
- "add x12, x12, #0x20\n"
".inst 0x6e44ec0e // bfmmla v14.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n"
"ldr q4, [x10, #0x10]\n"
- "add x11, x11, #0x20\n"
+ "add x10, x10, #0x20\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
"ldr q3, [x9, #0x0]\n"
- "add x10, x10, #0x20\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec5b // bfmmla v27.4s, v2.8h, v6.8h\n"
"ldr q1, [x9, #0x10]\n"
- "add x9, x9, #0x20\n"
".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n"
+ "add x9, x9, #0x20\n"
".inst 0x6e45ec56 // bfmmla v22.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x0]\n"
".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec5c // bfmmla v28.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x10]\n"
- "add x28, x28, #0x20\n"
".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n"
+ "add x28, x28, #0x20\n"
".inst 0x6e43ec57 // bfmmla v23.4s, v2.8h, v3.8h\n"
"ldr q3, [x27, #0x0]\n"
".inst 0x6e41ec11 // bfmmla v17.4s, v0.8h, v1.8h\n"
".inst 0x6e41ec5d // bfmmla v29.4s, v2.8h, v1.8h\n"
"ldr q1, [x27, #0x10]\n"
- "add x27, x27, #0x20\n"
".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
+ "add x27, x27, #0x20\n"
".inst 0x6e45ec58 // bfmmla v24.4s, v2.8h, v5.8h\n"
".inst 0x6e44ec12 // bfmmla v18.4s, v0.8h, v4.8h\n"
".inst 0x6e44ec5e // bfmmla v30.4s, v2.8h, v4.8h\n"
@@ -2273,17 +2276,17 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"cmp x26, x20\n"
"bne 153b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
+ "add x24, x13, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v17.2d, v12.2d, v18.2d\n"
"uzp2 v12.2d, v12.2d, v18.2d\n"
"uzp1 v18.2d, v13.2d, v19.2d\n"
@@ -2301,9 +2304,9 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v30.2d, v25.2d, v31.2d\n"
"uzp2 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 162f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v4.4s, v4.4s, v1.4s\n"
"fmin v14.4s, v14.4s, v1.4s\n"
@@ -2361,153 +2364,153 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"st1 { v14.4s }, [x13], #0x10\n"
"st1 { v15.4s }, [x13], #0x10\n"
"st1 { v16.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v11.4s }, [x24], #0x10\n"
+ "st1 { v19.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v27.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x22], #0x10\n"
"tbz x14, #2, 164f\n"
"st1 { v17.4s }, [x13], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v29.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
"tbz x14, #1, 163f\n"
"str d18, [x13], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
+ "str d25, [x22], #0x8\n"
"tbz x14, #0, 174f\n"
"st1 { v18.s }[2], [x13]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
+ "st1 { v25.s }[2], [x22]\n"
"b 174f\n"
"163:" // Height 4: Partial direct writeback: partial_1_20
"tbz x14, #0, 174f\n"
"str s18, [x13, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
+ "str s25, [x22, #0x0]\n"
"b 174f\n"
"164:" // Height 4: Partial direct writeback: partial_2_16
"tbz x14, #1, 165f\n"
"str d17, [x13], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d29, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x14, #0, 174f\n"
"st1 { v17.s }[2], [x13]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v29.s }[2], [x23]\n"
+ "st1 { v24.s }[2], [x22]\n"
"b 174f\n"
"165:" // Height 4: Partial direct writeback: partial_1_16
"tbz x14, #0, 174f\n"
"str s17, [x13, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s29, [x23, #0x0]\n"
+ "str s24, [x22, #0x0]\n"
"b 174f\n"
"166:" // Height 4: Partial direct writeback: partial_8_0
"tbz x14, #3, 170f\n"
"st1 { v4.4s }, [x13], #0x10\n"
"st1 { v14.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v19.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
"tbz x14, #2, 168f\n"
"st1 { v15.4s }, [x13], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v27.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
"tbz x14, #1, 167f\n"
"str d16, [x13], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
"tbz x14, #0, 174f\n"
"st1 { v16.s }[2], [x13]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v23.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
"b 174f\n"
"167:" // Height 4: Partial direct writeback: partial_1_12
"tbz x14, #0, 174f\n"
"str s16, [x13, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s23, [x21, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
"b 174f\n"
"168:" // Height 4: Partial direct writeback: partial_2_8
"tbz x14, #1, 169f\n"
"str d15, [x13], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d22, [x21], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
"tbz x14, #0, 174f\n"
"st1 { v15.s }[2], [x13]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v22.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
"b 174f\n"
"169:" // Height 4: Partial direct writeback: partial_1_8
"tbz x14, #0, 174f\n"
"str s15, [x13, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s22, [x21, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
"b 174f\n"
"170:" // Height 4: Partial direct writeback: partial_4_0
"tbz x14, #2, 172f\n"
"st1 { v4.4s }, [x13], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v19.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
"tbz x14, #1, 171f\n"
"str d14, [x13], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d21, [x21], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
"tbz x14, #0, 174f\n"
"st1 { v14.s }[2], [x13]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v21.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
"b 174f\n"
"171:" // Height 4: Partial direct writeback: partial_1_4
"tbz x14, #0, 174f\n"
"str s14, [x13, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s21, [x21, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
"b 174f\n"
"172:" // Height 4: Partial direct writeback: partial_2_0
"tbz x14, #1, 173f\n"
"str d4, [x13], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d20, [x21], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
"tbz x14, #0, 174f\n"
"st1 { v4.s }[2], [x13]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v20.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
"b 174f\n"
"173:" // Height 4: Partial direct writeback: partial_1_0
"str s4, [x13, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s20, [x21, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
"174:" // Height 4: Partial direct writeback: Done
"b 176f\n"
"175:" // Height 4: Full writeback
@@ -2518,24 +2521,24 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"str q17, [x13, #0x40]\n"
"str q18, [x13, #0x50]\n"
"add x13, x13, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q19, [x22, #0x0]\n"
- "str q26, [x22, #0x10]\n"
- "str q27, [x22, #0x20]\n"
- "str q28, [x22, #0x30]\n"
- "str q29, [x22, #0x40]\n"
- "str q30, [x22, #0x50]\n"
- "str q20, [x21, #0x0]\n"
- "str q21, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q23, [x21, #0x30]\n"
- "str q24, [x21, #0x40]\n"
- "str q25, [x21, #0x50]\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q12, [x24, #0x40]\n"
+ "str q13, [x24, #0x50]\n"
+ "str q19, [x23, #0x0]\n"
+ "str q26, [x23, #0x10]\n"
+ "str q27, [x23, #0x20]\n"
+ "str q28, [x23, #0x30]\n"
+ "str q29, [x23, #0x40]\n"
+ "str q30, [x23, #0x50]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x22, #0x40]\n"
+ "str q25, [x22, #0x50]\n"
"176:" // Height 4: Writeback done
"subs x14, x14, #0x18\n"
"bgt 134b\n"
@@ -2551,8 +2554,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_4x24 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"178:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16.hpp
index 98f7fc9403..376920a17b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16.hpp
@@ -82,7 +82,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16/generic.cpp
index 9ab4aa98f9..9497508289 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16/generic.cpp
@@ -50,8 +50,8 @@ void a64_ffhybrid_fp32bf16fp32_mmla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
- void *output_ptr = nullptr;
- const float *bias = nullptr;
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp
index 745f89eff6..eb08de0ade 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,8 @@ void a64_ffinterleaved_bf16fp32_dot_8x12( ARGLIST );
class cls_a64_ffinterleaved_bf16fp32_dot_8x12
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -72,8 +73,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 12, 2> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 2, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 2> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 2, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
index 5f4fcac690..349ad1c985 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_dot_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,17 +54,17 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x24, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x23, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x25, #0x8\n"
+ "mov %x[Apanel], x24\n"
"add x22, x23, x20, LSL #1\n"
"add x21, x22, x20, LSL #1\n"
"add x20, x21, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x25, #0x8\n"
- "mov %x[Apanel], x24\n"
"bgt 3f\n"
"cmp x25, #0x4\n"
"mov x21, x23\n"
@@ -79,12 +79,12 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
"movi v9.16b, #0x0\n"
"ldr q6, [x21, #0x0]\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
"movi v14.16b, #0x0\n"
+ "cmp x20, #0x2\n"
"movi v15.16b, #0x0\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
@@ -170,18 +170,18 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
"ldr q6, [x21, #0x0]\n"
"bge 4b\n"
"5:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "add x23, x23, #0x10\n"
".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
".inst 0x4f60f08b // bfdot v11.4s, v4.8h, v0.h[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
- "add x23, x23, #0x10\n"
".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
".inst 0x4f61f097 // bfdot v23.4s, v4.8h, v1.h[1]\n"
- "add x22, x22, #0x10\n"
".inst 0x4f41f89a // bfdot v26.4s, v4.8h, v1.h[2]\n"
".inst 0x4f61f89d // bfdot v29.4s, v4.8h, v1.h[3]\n"
- "add x21, x21, #0x10\n"
".inst 0x4f40f0a9 // bfdot v9.4s, v5.8h, v0.h[0]\n"
".inst 0x4f60f0ac // bfdot v12.4s, v5.8h, v0.h[1]\n"
".inst 0x4f40f8af // bfdot v15.4s, v5.8h, v0.h[2]\n"
@@ -204,8 +204,8 @@ void a64_ffinterleaved_bf16fp32_dot_8x12(
"add %x[Apanel], %x[Apanel], #0x20\n"
"ldr q2, [x23, #0x0]\n"
"ldr q1, [x22, #0x0]\n"
- ".inst 0x4f44f048 // bfdot v8.4s, v2.8h, v4.h[0]\n"
"ldr q0, [x21, #0x0]\n"
+ ".inst 0x4f44f048 // bfdot v8.4s, v2.8h, v4.h[0]\n"
".inst 0x4f64f04b // bfdot v11.4s, v2.8h, v4.h[1]\n"
".inst 0x4f44f84e // bfdot v14.4s, v2.8h, v4.h[2]\n"
".inst 0x4f64f851 // bfdot v17.4s, v2.8h, v4.h[3]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp
index 1a8b0fd630..eb382952fa 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp
@@ -41,7 +41,8 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12( ARGLIST );
class cls_a64_ffinterleaved_bf16fp32_mmla_8x12
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -72,8 +73,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 12, 4> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
@@ -88,10 +89,8 @@ public:
if (std::is_same<T, float>::value) {
switch (ci->get_cpu_model()) {
- case CPUModel::V1:
- return { 45.25, 4.29, 4.80 };
default:
- return { 29.85, 2.60, 5.49 };
+ return { 38.10, 5.23, 3.15 };
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
index 4a1c1b5638..5331f9e652 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_bf16fp32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,17 +54,17 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x24, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x23, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x25, #0x8\n"
+ "mov %x[Apanel], x24\n"
"add x22, x23, x20, LSL #1\n"
"add x21, x22, x20, LSL #1\n"
"add x20, x21, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x25, #0x8\n"
- "mov %x[Apanel], x24\n"
"bgt 3f\n"
"cmp x25, #0x4\n"
"mov x21, x23\n"
@@ -79,14 +79,14 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
"movi v9.16b, #0x0\n"
"ldr q2, [%x[Apanel], #0x20]\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
- "add x23, x23, #0x20\n"
"movi v12.16b, #0x0\n"
+ "add x23, x23, #0x20\n"
"movi v13.16b, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v14.16b, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
+ "cmp x20, #0x2\n"
"movi v15.16b, #0x0\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
@@ -217,19 +217,19 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
"cbz x20, 6f\n"
"ldr q1, [x23, #0x0]\n"
"ldr q7, [%x[Apanel], #0x0]\n"
- ".inst 0x6e41ece8 // bfmmla v8.4s, v7.8h, v1.8h\n"
"ldr q6, [%x[Apanel], #0x10]\n"
"ldr q0, [x23, #0x10]\n"
- ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
"ldr q5, [%x[Apanel], #0x20]\n"
"ldr q4, [%x[Apanel], #0x30]\n"
- ".inst 0x6e41ecce // bfmmla v14.4s, v6.8h, v1.8h\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
"ldr q3, [x22, #0x0]\n"
"ldr q2, [x22, #0x10]\n"
+ ".inst 0x6e41ece8 // bfmmla v8.4s, v7.8h, v1.8h\n"
+ ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
+ ".inst 0x6e41ecce // bfmmla v14.4s, v6.8h, v1.8h\n"
".inst 0x6e40ecd1 // bfmmla v17.4s, v6.8h, v0.8h\n"
".inst 0x6e41ecb4 // bfmmla v20.4s, v5.8h, v1.8h\n"
".inst 0x6e40ecb7 // bfmmla v23.4s, v5.8h, v0.8h\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6e41ec9a // bfmmla v26.4s, v4.8h, v1.8h\n"
"ldr q1, [x21, #0x0]\n"
".inst 0x6e40ec9d // bfmmla v29.4s, v4.8h, v0.8h\n"
@@ -252,41 +252,41 @@ void a64_ffinterleaved_bf16fp32_mmla_8x12(
".inst 0x6e40ec9f // bfmmla v31.4s, v4.8h, v0.8h\n"
"6:" // multiply loop done
"subs x25, x25, #0xc\n"
- "uzp1 v0.2d, v8.2d, v11.2d\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v1.2d, v9.2d, v12.2d\n"
"uzp2 v9.2d, v9.2d, v12.2d\n"
- "str q0, [%x[Cpanel], #0x0]\n"
"uzp1 v0.2d, v10.2d, v13.2d\n"
"uzp2 v10.2d, v10.2d, v13.2d\n"
- "str q1, [%x[Cpanel], #0x10]\n"
- "str q0, [%x[Cpanel], #0x20]\n"
- "uzp1 v0.2d, v14.2d, v17.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
"uzp2 v14.2d, v14.2d, v17.2d\n"
- "str q8, [%x[Cpanel], #0x30]\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
"uzp1 v2.2d, v15.2d, v18.2d\n"
"uzp2 v15.2d, v15.2d, v18.2d\n"
- "str q9, [%x[Cpanel], #0x40]\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
"uzp1 v17.2d, v16.2d, v19.2d\n"
"uzp2 v16.2d, v16.2d, v19.2d\n"
- "str q10, [%x[Cpanel], #0x50]\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
"uzp1 v1.2d, v20.2d, v23.2d\n"
"uzp2 v20.2d, v20.2d, v23.2d\n"
- "str q0, [%x[Cpanel], #0x60]\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
"uzp1 v0.2d, v21.2d, v24.2d\n"
"uzp2 v21.2d, v21.2d, v24.2d\n"
- "str q2, [%x[Cpanel], #0x70]\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
"uzp1 v23.2d, v22.2d, v25.2d\n"
"uzp2 v22.2d, v22.2d, v25.2d\n"
- "str q17, [%x[Cpanel], #0x80]\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
"uzp1 v19.2d, v26.2d, v29.2d\n"
"uzp2 v26.2d, v26.2d, v29.2d\n"
- "str q14, [%x[Cpanel], #0x90]\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
"uzp1 v18.2d, v27.2d, v30.2d\n"
"uzp2 v27.2d, v27.2d, v30.2d\n"
- "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
"uzp1 v17.2d, v28.2d, v31.2d\n"
"uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
"str q16, [%x[Cpanel], #0xb0]\n"
"str q1, [%x[Cpanel], #0xc0]\n"
"str q0, [%x[Cpanel], #0xd0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp
index b9b4ad54df..42136ce085 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void a64_ffinterleaved_fp16_mla_8x24( ARGLIST );
class cls_a64_ffinterleaved_fp16_mla_8x24
{
public:
- typedef __fp16 operand_type;
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
typedef __fp16 result_type;
typedef void (*kern_type)( ARGLIST );
@@ -71,8 +72,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 24, 1> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 24, 1, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 24, 1> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 24, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
index 1e3f2f300b..2ad85ad424 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp16_mla_8x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,17 +53,17 @@ void a64_ffinterleaved_fp16_mla_8x24(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x24, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x23, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x25, #0x10\n"
+ "mov %x[Apanel], x24\n"
"add x22, x23, x20, LSL #1\n"
"add x21, x22, x20, LSL #1\n"
"add x20, x21, x20, LSL #1\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x25, #0x10\n"
- "mov %x[Apanel], x24\n"
"bgt 3f\n"
"cmp x25, #0x8\n"
"mov x21, x23\n"
@@ -77,13 +77,13 @@ void a64_ffinterleaved_fp16_mla_8x24(
"ldr q4, [x21, #0x0]\n"
"movi v9.16b, #0x0\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
+ "cmp x20, #0x2\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
"movi v18.16b, #0x0\n"
@@ -166,18 +166,18 @@ void a64_ffinterleaved_fp16_mla_8x24(
"fmla v31.8h, v1.8h, v7.h[7]\n"
"bge 4b\n"
"5:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla v8.8h, v2.8h, v0.h[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v11.8h, v2.8h, v0.h[1]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla v14.8h, v2.8h, v0.h[2]\n"
"fmla v17.8h, v2.8h, v0.h[3]\n"
- "add x23, x23, #0x10\n"
"fmla v20.8h, v2.8h, v0.h[4]\n"
"fmla v23.8h, v2.8h, v0.h[5]\n"
- "add x22, x22, #0x10\n"
"fmla v26.8h, v2.8h, v0.h[6]\n"
"fmla v29.8h, v2.8h, v0.h[7]\n"
- "add x21, x21, #0x10\n"
"fmla v9.8h, v3.8h, v0.h[0]\n"
"fmla v12.8h, v3.8h, v0.h[1]\n"
"fmla v15.8h, v3.8h, v0.h[2]\n"
@@ -197,13 +197,13 @@ void a64_ffinterleaved_fp16_mla_8x24(
"cbz x20, 6f\n"
"ldr q3, [%x[Apanel], #0x0]\n"
"ldr q2, [x23, #0x0]\n"
- "fmla v8.8h, v2.8h, v3.h[0]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
"ldr q1, [x22, #0x0]\n"
"ldr q0, [x21, #0x0]\n"
+ "fmla v8.8h, v2.8h, v3.h[0]\n"
"fmla v11.8h, v2.8h, v3.h[1]\n"
"fmla v14.8h, v2.8h, v3.h[2]\n"
"fmla v17.8h, v2.8h, v3.h[3]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla v20.8h, v2.8h, v3.h[4]\n"
"fmla v23.8h, v2.8h, v3.h[5]\n"
"fmla v26.8h, v2.8h, v3.h[6]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp
index c4445ba14a..bb6deaf68f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void a64_ffinterleaved_fp32_mla_8x12( ARGLIST );
class cls_a64_ffinterleaved_fp32_mla_8x12
{
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -71,8 +72,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 12, 1> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 1, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 1> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
index 6de0a380eb..45970fdc0b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_ffinterleaved_fp32_mla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,17 +53,17 @@ void a64_ffinterleaved_fp32_mla_8x12(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x25, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x24, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x23, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cmp x25, #0x8\n"
+ "mov %x[Apanel], x24\n"
"add x22, x23, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "cmp x25, #0x8\n"
- "mov %x[Apanel], x24\n"
"bgt 3f\n"
"cmp x25, #0x4\n"
"mov x21, x23\n"
@@ -78,12 +78,12 @@ void a64_ffinterleaved_fp32_mla_8x12(
"movi v9.16b, #0x0\n"
"ldr q6, [x21, #0x0]\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x4\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
"movi v14.16b, #0x0\n"
+ "cmp x20, #0x4\n"
"movi v15.16b, #0x0\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
@@ -227,18 +227,18 @@ void a64_ffinterleaved_fp32_mla_8x12(
"fmla v31.4s, v2.4s, v7.s[3]\n"
"bge 4b\n"
"5:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "add x23, x23, #0x10\n"
"fmla v8.4s, v4.4s, v0.s[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla v11.4s, v4.4s, v0.s[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla v14.4s, v4.4s, v0.s[2]\n"
"fmla v17.4s, v4.4s, v0.s[3]\n"
- "add x23, x23, #0x10\n"
"fmla v20.4s, v4.4s, v1.s[0]\n"
"fmla v23.4s, v4.4s, v1.s[1]\n"
- "add x22, x22, #0x10\n"
"fmla v26.4s, v4.4s, v1.s[2]\n"
"fmla v29.4s, v4.4s, v1.s[3]\n"
- "add x21, x21, #0x10\n"
"fmla v9.4s, v5.4s, v0.s[0]\n"
"fmla v12.4s, v5.4s, v0.s[1]\n"
"fmla v15.4s, v5.4s, v0.s[2]\n"
@@ -262,22 +262,22 @@ void a64_ffinterleaved_fp32_mla_8x12(
"subs x20, x20, #0x1\n"
"ldr q2, [x23, #0x0]\n"
"ldr q1, [x22, #0x0]\n"
- "fmla v8.4s, v2.4s, v4.s[0]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"ldr q0, [x21, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "fmla v8.4s, v2.4s, v4.s[0]\n"
"fmla v11.4s, v2.4s, v4.s[1]\n"
"fmla v14.4s, v2.4s, v4.s[2]\n"
"fmla v17.4s, v2.4s, v4.s[3]\n"
"fmla v20.4s, v2.4s, v3.s[0]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla v23.4s, v2.4s, v3.s[1]\n"
"fmla v26.4s, v2.4s, v3.s[2]\n"
- "add x23, x23, #0x10\n"
"fmla v29.4s, v2.4s, v3.s[3]\n"
"fmla v9.4s, v1.4s, v4.s[0]\n"
- "add x22, x22, #0x10\n"
"fmla v12.4s, v1.4s, v4.s[1]\n"
"fmla v15.4s, v1.4s, v4.s[2]\n"
- "add x21, x21, #0x10\n"
"fmla v18.4s, v1.4s, v4.s[3]\n"
"fmla v21.4s, v1.4s, v3.s[0]\n"
"fmla v24.4s, v1.4s, v3.s[1]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s16_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s16_8x12.hpp
index 8bf8d8442e..8dc9112ebe 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s16_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s16_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,8 @@ void a64_gemm_s16_asimd_8x12(const int16_t *, const int16_t *, int32_t *, int, i
// structure.
class cls_a64_gemm_s16_8x12 {
public:
- typedef int16_t operand_type;
+ typedef int16_t lhs_operand_type;
+ typedef int16_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)(const int16_t *, const int16_t *, int32_t *, int, int, int);
@@ -61,8 +62,8 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 8, 12> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 1, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 1, true> transforms_quantized = {};
kern_type kernel = a64_gemm_s16_asimd_8x12;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4.hpp
index 1363b939ab..d0edcfcb5e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void a64_gemm_s8_4x4(const int8_t *, const int8_t *, int32_t *, int, int, int);
class cls_a64_gemm_s8_4x4 {
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)(const int8_t *, const int8_t *, int32_t *, int, int, int);
@@ -56,8 +57,8 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 4, 4, 16> transforms = {};
- StdTransformsFixed<operand_type, result_type, 4, 4, 16, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 4, 16> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 4, 16, true> transforms_quantized = {};
template<typename T>
static PerformanceParameters get_performance_parameters(const CPUInfo *ci) {
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4/generic.cpp
index 3b9a85577e..b55ac61403 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_4x4/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Arm Limited.
+ * Copyright (c) 2017, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,37 +55,37 @@ void a64_gemm_s8_4x4(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel
register int8x16_t b3a asm("v11");
__asm __volatile (
- "movi v16.4s, #0x0\n"
- "ldr q0, [%[a_ptr]]\n"
- "movi v17.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v18.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v19.4s, #0x0\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "movi v20.4s, #0x0\n"
- "ldr %q[b3], [%[b_ptr], #48]\n"
- "movi v21.4s, #0x0\n"
- "ldr q1, [%[a_ptr], #16]\n"
- "movi v22.4s, #0x0\n"
- "ldr q2, [%[a_ptr], #32]\n"
- "movi v23.4s, #0x0\n"
- "ldr q3, [%[a_ptr], #48]\n"
- "movi v24.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "ldr q0, [%[a_ptr]]\n"
+ "movi v17.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v18.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v19.4s, #0x0\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "movi v20.4s, #0x0\n"
+ "ldr %q[b3], [%[b_ptr], #48]\n"
+ "movi v21.4s, #0x0\n"
+ "ldr q1, [%[a_ptr], #16]\n"
+ "movi v22.4s, #0x0\n"
+ "ldr q2, [%[a_ptr], #32]\n"
+ "movi v23.4s, #0x0\n"
+ "ldr q3, [%[a_ptr], #48]\n"
+ "movi v24.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v25.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v26.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v27.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v28.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v29.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v30.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v31.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
// Loop structure optimized for A57 (after r0).
@@ -107,351 +107,351 @@ void a64_gemm_s8_4x4(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpanel
// of multiplies that need to be pulled out.
// Start of unroll 0 (first iteration)
- "smull v12.8h, v0.8b, %[b0].8b\n"
- "smull v13.8h, v0.8b, %[b1].8b\n"
+ "smull v12.8h, v0.8b, %[b0].8b\n"
+ "smull v13.8h, v0.8b, %[b1].8b\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Unroll 0 continuation (branch target)
"1:\n"
- "smull v14.8h, v0.8b, %[b2].8b\n"
- "subs %w[k], %w[k], #1\n"
- "smull v15.8h, v0.8b, %[b3].8b\n"
- "ldr %q[b0a], [%[b_ptr], #64]\n"
- "smlal2 v12.8h, v0.16b, %[b0].16b\n"
- "smlal2 v13.8h, v0.16b, %[b1].16b\n"
- "ldr %q[b1a], [%[b_ptr], #80]\n"
- "smlal2 v14.8h, v0.16b, %[b2].16b\n"
- "smlal2 v15.8h, v0.16b, %[b3].16b\n"
- "ldr q0, [%[a_ptr], #64]\n"
-
- "sadalp v16.4s, v12.8h\n"
- "smull v12.8h, v1.8b, %[b0].8b\n"
- "sadalp v17.4s, v13.8h\n"
- "sadalp v18.4s, v14.8h\n"
- "smull v13.8h, v1.8b, %[b1].8b\n"
- "sadalp v19.4s, v15.8h\n"
- "smull v14.8h, v1.8b, %[b2].8b\n"
- "ldr %q[b2a], [%[b_ptr], #96]\n"
- "smull v15.8h, v1.8b, %[b3].8b\n"
- "smlal2 v12.8h, v1.16b, %[b0].16b\n"
- "ldr %q[b3a], [%[b_ptr], #112]\n"
- "smlal2 v13.8h, v1.16b, %[b1].16b\n"
- "add %[b_ptr], %[b_ptr], #128\n"
- "smlal2 v14.8h, v1.16b, %[b2].16b\n"
- "smlal2 v15.8h, v1.16b, %[b3].16b\n"
- "ldr q1, [%[a_ptr], #80]\n"
-
- "sadalp v20.4s, v12.8h\n"
- "smull v12.8h, v2.8b, %[b0].8b\n"
- "sadalp v21.4s, v13.8h\n"
- "sadalp v22.4s, v14.8h\n"
- "smull v13.8h, v2.8b, %[b1].8b\n"
- "sadalp v23.4s, v15.8h\n"
- "smull v14.8h, v2.8b, %[b2].8b\n"
- "smull v15.8h, v2.8b, %[b3].8b\n"
- "smlal2 v12.8h, v2.16b, %[b0].16b\n"
+ "smull v14.8h, v0.8b, %[b2].8b\n"
+ "subs %w[k], %w[k], #1\n"
+ "smull v15.8h, v0.8b, %[b3].8b\n"
+ "ldr %q[b0a], [%[b_ptr], #64]\n"
+ "smlal2 v12.8h, v0.16b, %[b0].16b\n"
+ "smlal2 v13.8h, v0.16b, %[b1].16b\n"
+ "ldr %q[b1a], [%[b_ptr], #80]\n"
+ "smlal2 v14.8h, v0.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v0.16b, %[b3].16b\n"
+ "ldr q0, [%[a_ptr], #64]\n"
+
+ "sadalp v16.4s, v12.8h\n"
+ "smull v12.8h, v1.8b, %[b0].8b\n"
+ "sadalp v17.4s, v13.8h\n"
+ "sadalp v18.4s, v14.8h\n"
+ "smull v13.8h, v1.8b, %[b1].8b\n"
+ "sadalp v19.4s, v15.8h\n"
+ "smull v14.8h, v1.8b, %[b2].8b\n"
+ "ldr %q[b2a], [%[b_ptr], #96]\n"
+ "smull v15.8h, v1.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v1.16b, %[b0].16b\n"
+ "ldr %q[b3a], [%[b_ptr], #112]\n"
+ "smlal2 v13.8h, v1.16b, %[b1].16b\n"
+ "add %[b_ptr], %[b_ptr], #128\n"
+ "smlal2 v14.8h, v1.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v1.16b, %[b3].16b\n"
+ "ldr q1, [%[a_ptr], #80]\n"
+
+ "sadalp v20.4s, v12.8h\n"
+ "smull v12.8h, v2.8b, %[b0].8b\n"
+ "sadalp v21.4s, v13.8h\n"
+ "sadalp v22.4s, v14.8h\n"
+ "smull v13.8h, v2.8b, %[b1].8b\n"
+ "sadalp v23.4s, v15.8h\n"
+ "smull v14.8h, v2.8b, %[b2].8b\n"
+ "smull v15.8h, v2.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v2.16b, %[b0].16b\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "smlal2 v13.8h, v2.16b, %[b1].16b\n"
- "smlal2 v14.8h, v2.16b, %[b2].16b\n"
+ "smlal2 v13.8h, v2.16b, %[b1].16b\n"
+ "smlal2 v14.8h, v2.16b, %[b2].16b\n"
ASM_PREFETCH("[%[a_ptr], #320]")
- "smlal2 v15.8h, v2.16b, %[b3].16b\n"
- "ldr q2, [%[a_ptr], #96]\n"
-
- "sadalp v24.4s, v12.8h\n"
- "smull v12.8h, v3.8b, %[b0].8b\n"
- "sadalp v25.4s, v13.8h\n"
- "sadalp v26.4s, v14.8h\n"
- "smull v13.8h, v3.8b, %[b1].8b\n"
- "sadalp v27.4s, v15.8h\n"
- "smull v14.8h, v3.8b, %[b2].8b\n"
- "smull v15.8h, v3.8b, %[b3].8b\n"
- "smlal2 v12.8h, v3.16b, %[b0].16b\n"
- "ldr %q[b0], [%[b_ptr], #0]\n"
- "smlal2 v13.8h, v3.16b, %[b1].16b\n"
- "smlal2 v14.8h, v3.16b, %[b2].16b\n"
- "smlal2 v15.8h, v3.16b, %[b3].16b\n"
- "ldr q3, [%[a_ptr], #112]\n"
+ "smlal2 v15.8h, v2.16b, %[b3].16b\n"
+ "ldr q2, [%[a_ptr], #96]\n"
+
+ "sadalp v24.4s, v12.8h\n"
+ "smull v12.8h, v3.8b, %[b0].8b\n"
+ "sadalp v25.4s, v13.8h\n"
+ "sadalp v26.4s, v14.8h\n"
+ "smull v13.8h, v3.8b, %[b1].8b\n"
+ "sadalp v27.4s, v15.8h\n"
+ "smull v14.8h, v3.8b, %[b2].8b\n"
+ "smull v15.8h, v3.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v3.16b, %[b0].16b\n"
+ "ldr %q[b0], [%[b_ptr], #0]\n"
+ "smlal2 v13.8h, v3.16b, %[b1].16b\n"
+ "smlal2 v14.8h, v3.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v3.16b, %[b3].16b\n"
+ "ldr q3, [%[a_ptr], #112]\n"
// Unroll 1
- "sadalp v28.4s, v12.8h\n"
- "smull v12.8h, v0.8b, %[b0a].8b\n"
- "sadalp v29.4s, v13.8h\n"
- "sadalp v30.4s, v14.8h\n"
- "smull v13.8h, v0.8b, %[b1a].8b\n"
- "sadalp v31.4s, v15.8h\n"
- "smull v14.8h, v0.8b, %[b2a].8b\n"
- "smull v15.8h, v0.8b, %[b3a].8b\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "smlal2 v12.8h, v0.16b, %[b0a].16b\n"
- "smlal2 v13.8h, v0.16b, %[b1a].16b\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "smlal2 v14.8h, v0.16b, %[b2a].16b\n"
- "smlal2 v15.8h, v0.16b, %[b3a].16b\n"
- "ldr q0, [%[a_ptr], #128]\n"
-
- "sadalp v16.4s, v12.8h\n"
- "smull v12.8h, v1.8b, %[b0a].8b\n"
- "sadalp v17.4s, v13.8h\n"
- "sadalp v18.4s, v14.8h\n"
- "smull v13.8h, v1.8b, %[b1a].8b\n"
- "sadalp v19.4s, v15.8h\n"
- "add %[a_ptr], %[a_ptr], #128\n"
- "smull v14.8h, v1.8b, %[b2a].8b\n"
- "smull v15.8h, v1.8b, %[b3a].8b\n"
- "ldr %q[b3], [%[b_ptr], #48]\n"
- "smlal2 v12.8h, v1.16b, %[b0a].16b\n"
- "smlal2 v13.8h, v1.16b, %[b1a].16b\n"
- "smlal2 v14.8h, v1.16b, %[b2a].16b\n"
- "smlal2 v15.8h, v1.16b, %[b3a].16b\n"
- "ldr q1, [%[a_ptr], #16]\n"
-
- "sadalp v20.4s, v12.8h\n"
- "smull v12.8h, v2.8b, %[b0a].8b\n"
- "sadalp v21.4s, v13.8h\n"
- "sadalp v22.4s, v14.8h\n"
- "smull v13.8h, v2.8b, %[b1a].8b\n"
- "sadalp v23.4s, v15.8h\n"
- "smull v14.8h, v2.8b, %[b2a].8b\n"
- "smull v15.8h, v2.8b, %[b3a].8b\n"
- "smlal2 v12.8h, v2.16b, %[b0a].16b\n"
+ "sadalp v28.4s, v12.8h\n"
+ "smull v12.8h, v0.8b, %[b0a].8b\n"
+ "sadalp v29.4s, v13.8h\n"
+ "sadalp v30.4s, v14.8h\n"
+ "smull v13.8h, v0.8b, %[b1a].8b\n"
+ "sadalp v31.4s, v15.8h\n"
+ "smull v14.8h, v0.8b, %[b2a].8b\n"
+ "smull v15.8h, v0.8b, %[b3a].8b\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "smlal2 v12.8h, v0.16b, %[b0a].16b\n"
+ "smlal2 v13.8h, v0.16b, %[b1a].16b\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "smlal2 v14.8h, v0.16b, %[b2a].16b\n"
+ "smlal2 v15.8h, v0.16b, %[b3a].16b\n"
+ "ldr q0, [%[a_ptr], #128]\n"
+
+ "sadalp v16.4s, v12.8h\n"
+ "smull v12.8h, v1.8b, %[b0a].8b\n"
+ "sadalp v17.4s, v13.8h\n"
+ "sadalp v18.4s, v14.8h\n"
+ "smull v13.8h, v1.8b, %[b1a].8b\n"
+ "sadalp v19.4s, v15.8h\n"
+ "add %[a_ptr], %[a_ptr], #128\n"
+ "smull v14.8h, v1.8b, %[b2a].8b\n"
+ "smull v15.8h, v1.8b, %[b3a].8b\n"
+ "ldr %q[b3], [%[b_ptr], #48]\n"
+ "smlal2 v12.8h, v1.16b, %[b0a].16b\n"
+ "smlal2 v13.8h, v1.16b, %[b1a].16b\n"
+ "smlal2 v14.8h, v1.16b, %[b2a].16b\n"
+ "smlal2 v15.8h, v1.16b, %[b3a].16b\n"
+ "ldr q1, [%[a_ptr], #16]\n"
+
+ "sadalp v20.4s, v12.8h\n"
+ "smull v12.8h, v2.8b, %[b0a].8b\n"
+ "sadalp v21.4s, v13.8h\n"
+ "sadalp v22.4s, v14.8h\n"
+ "smull v13.8h, v2.8b, %[b1a].8b\n"
+ "sadalp v23.4s, v15.8h\n"
+ "smull v14.8h, v2.8b, %[b2a].8b\n"
+ "smull v15.8h, v2.8b, %[b3a].8b\n"
+ "smlal2 v12.8h, v2.16b, %[b0a].16b\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "smlal2 v13.8h, v2.16b, %[b1a].16b\n"
- "smlal2 v14.8h, v2.16b, %[b2a].16b\n"
+ "smlal2 v13.8h, v2.16b, %[b1a].16b\n"
+ "smlal2 v14.8h, v2.16b, %[b2a].16b\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "smlal2 v15.8h, v2.16b, %[b3a].16b\n"
- "ldr q2, [%[a_ptr], #32]\n"
-
- "sadalp v24.4s, v12.8h\n"
- "smull v12.8h, v3.8b, %[b0a].8b\n"
- "sadalp v25.4s, v13.8h\n"
- "sadalp v26.4s, v14.8h\n"
- "smull v13.8h, v3.8b, %[b1a].8b\n"
- "sadalp v27.4s, v15.8h\n"
- "smull v14.8h, v3.8b, %[b2a].8b\n"
- "smull v15.8h, v3.8b, %[b3a].8b\n"
- "smlal2 v12.8h, v3.16b, %[b0a].16b\n"
- "smlal2 v13.8h, v3.16b, %[b1a].16b\n"
- "smlal2 v14.8h, v3.16b, %[b2a].16b\n"
- "smlal2 v15.8h, v3.16b, %[b3a].16b\n"
- "ldr q3, [%[a_ptr], #48]\n"
+ "smlal2 v15.8h, v2.16b, %[b3a].16b\n"
+ "ldr q2, [%[a_ptr], #32]\n"
+
+ "sadalp v24.4s, v12.8h\n"
+ "smull v12.8h, v3.8b, %[b0a].8b\n"
+ "sadalp v25.4s, v13.8h\n"
+ "sadalp v26.4s, v14.8h\n"
+ "smull v13.8h, v3.8b, %[b1a].8b\n"
+ "sadalp v27.4s, v15.8h\n"
+ "smull v14.8h, v3.8b, %[b2a].8b\n"
+ "smull v15.8h, v3.8b, %[b3a].8b\n"
+ "smlal2 v12.8h, v3.16b, %[b0a].16b\n"
+ "smlal2 v13.8h, v3.16b, %[b1a].16b\n"
+ "smlal2 v14.8h, v3.16b, %[b2a].16b\n"
+ "smlal2 v15.8h, v3.16b, %[b3a].16b\n"
+ "ldr q3, [%[a_ptr], #48]\n"
// Start of unroll 0 for next iteration.
- "sadalp v28.4s, v12.8h\n"
- "smull v12.8h, v0.8b, %[b0].8b\n"
- "sadalp v29.4s, v13.8h\n"
- "sadalp v30.4s, v14.8h\n"
- "smull v13.8h, v0.8b, %[b1].8b\n"
- "sadalp v31.4s, v15.8h\n"
- "bne 1b\n"
+ "sadalp v28.4s, v12.8h\n"
+ "smull v12.8h, v0.8b, %[b0].8b\n"
+ "sadalp v29.4s, v13.8h\n"
+ "sadalp v30.4s, v14.8h\n"
+ "smull v13.8h, v0.8b, %[b1].8b\n"
+ "sadalp v31.4s, v15.8h\n"
+ "bne 1b\n"
// Target to use when K=1 or 2 (i.e. zero iterations of main loop)
"4:\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
- "smull v14.8h, v0.8b, %[b2].8b\n"
- "smull v15.8h, v0.8b, %[b3].8b\n"
- "ldr %q[b0a], [%[b_ptr], #64]\n"
- "smlal2 v12.8h, v0.16b, %[b0].16b\n"
- "smlal2 v13.8h, v0.16b, %[b1].16b\n"
- "ldr %q[b1a], [%[b_ptr], #80]\n"
- "smlal2 v14.8h, v0.16b, %[b2].16b\n"
- "smlal2 v15.8h, v0.16b, %[b3].16b\n"
- "ldr q0, [%[a_ptr], #64]\n"
-
- "sadalp v16.4s, v12.8h\n"
- "smull v12.8h, v1.8b, %[b0].8b\n"
- "sadalp v17.4s, v13.8h\n"
- "sadalp v18.4s, v14.8h\n"
- "smull v13.8h, v1.8b, %[b1].8b\n"
- "sadalp v19.4s, v15.8h\n"
- "smull v14.8h, v1.8b, %[b2].8b\n"
- "ldr %q[b2a], [%[b_ptr], #96]\n"
- "smull v15.8h, v1.8b, %[b3].8b\n"
- "smlal2 v12.8h, v1.16b, %[b0].16b\n"
- "ldr %q[b3a], [%[b_ptr], #112]\n"
- "smlal2 v13.8h, v1.16b, %[b1].16b\n"
- "add %[b_ptr], %[b_ptr], #128\n"
- "smlal2 v14.8h, v1.16b, %[b2].16b\n"
- "smlal2 v15.8h, v1.16b, %[b3].16b\n"
- "ldr q1, [%[a_ptr], #80]\n"
-
- "sadalp v20.4s, v12.8h\n"
- "smull v12.8h, v2.8b, %[b0].8b\n"
- "sadalp v21.4s, v13.8h\n"
- "sadalp v22.4s, v14.8h\n"
- "smull v13.8h, v2.8b, %[b1].8b\n"
- "sadalp v23.4s, v15.8h\n"
- "smull v14.8h, v2.8b, %[b2].8b\n"
- "smull v15.8h, v2.8b, %[b3].8b\n"
- "smlal2 v12.8h, v2.16b, %[b0].16b\n"
- "smlal2 v13.8h, v2.16b, %[b1].16b\n"
- "smlal2 v14.8h, v2.16b, %[b2].16b\n"
- "smlal2 v15.8h, v2.16b, %[b3].16b\n"
- "ldr q2, [%[a_ptr], #96]\n"
-
- "sadalp v24.4s, v12.8h\n"
- "smull v12.8h, v3.8b, %[b0].8b\n"
- "sadalp v25.4s, v13.8h\n"
- "sadalp v26.4s, v14.8h\n"
- "smull v13.8h, v3.8b, %[b1].8b\n"
- "sadalp v27.4s, v15.8h\n"
- "smull v14.8h, v3.8b, %[b2].8b\n"
- "smull v15.8h, v3.8b, %[b3].8b\n"
- "smlal2 v12.8h, v3.16b, %[b0].16b\n"
- "smlal2 v13.8h, v3.16b, %[b1].16b\n"
- "smlal2 v14.8h, v3.16b, %[b2].16b\n"
- "smlal2 v15.8h, v3.16b, %[b3].16b\n"
- "ldr q3, [%[a_ptr], #112]\n"
+ "smull v14.8h, v0.8b, %[b2].8b\n"
+ "smull v15.8h, v0.8b, %[b3].8b\n"
+ "ldr %q[b0a], [%[b_ptr], #64]\n"
+ "smlal2 v12.8h, v0.16b, %[b0].16b\n"
+ "smlal2 v13.8h, v0.16b, %[b1].16b\n"
+ "ldr %q[b1a], [%[b_ptr], #80]\n"
+ "smlal2 v14.8h, v0.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v0.16b, %[b3].16b\n"
+ "ldr q0, [%[a_ptr], #64]\n"
+
+ "sadalp v16.4s, v12.8h\n"
+ "smull v12.8h, v1.8b, %[b0].8b\n"
+ "sadalp v17.4s, v13.8h\n"
+ "sadalp v18.4s, v14.8h\n"
+ "smull v13.8h, v1.8b, %[b1].8b\n"
+ "sadalp v19.4s, v15.8h\n"
+ "smull v14.8h, v1.8b, %[b2].8b\n"
+ "ldr %q[b2a], [%[b_ptr], #96]\n"
+ "smull v15.8h, v1.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v1.16b, %[b0].16b\n"
+ "ldr %q[b3a], [%[b_ptr], #112]\n"
+ "smlal2 v13.8h, v1.16b, %[b1].16b\n"
+ "add %[b_ptr], %[b_ptr], #128\n"
+ "smlal2 v14.8h, v1.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v1.16b, %[b3].16b\n"
+ "ldr q1, [%[a_ptr], #80]\n"
+
+ "sadalp v20.4s, v12.8h\n"
+ "smull v12.8h, v2.8b, %[b0].8b\n"
+ "sadalp v21.4s, v13.8h\n"
+ "sadalp v22.4s, v14.8h\n"
+ "smull v13.8h, v2.8b, %[b1].8b\n"
+ "sadalp v23.4s, v15.8h\n"
+ "smull v14.8h, v2.8b, %[b2].8b\n"
+ "smull v15.8h, v2.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v2.16b, %[b0].16b\n"
+ "smlal2 v13.8h, v2.16b, %[b1].16b\n"
+ "smlal2 v14.8h, v2.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v2.16b, %[b3].16b\n"
+ "ldr q2, [%[a_ptr], #96]\n"
+
+ "sadalp v24.4s, v12.8h\n"
+ "smull v12.8h, v3.8b, %[b0].8b\n"
+ "sadalp v25.4s, v13.8h\n"
+ "sadalp v26.4s, v14.8h\n"
+ "smull v13.8h, v3.8b, %[b1].8b\n"
+ "sadalp v27.4s, v15.8h\n"
+ "smull v14.8h, v3.8b, %[b2].8b\n"
+ "smull v15.8h, v3.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v3.16b, %[b0].16b\n"
+ "smlal2 v13.8h, v3.16b, %[b1].16b\n"
+ "smlal2 v14.8h, v3.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v3.16b, %[b3].16b\n"
+ "ldr q3, [%[a_ptr], #112]\n"
// Unroll 1
- "sadalp v28.4s, v12.8h\n"
- "smull v12.8h, v0.8b, %[b0a].8b\n"
- "sadalp v29.4s, v13.8h\n"
- "sadalp v30.4s, v14.8h\n"
- "smull v13.8h, v0.8b, %[b1a].8b\n"
- "sadalp v31.4s, v15.8h\n"
- "smull v14.8h, v0.8b, %[b2a].8b\n"
- "add %[a_ptr], %[a_ptr], #128\n"
- "smull v15.8h, v0.8b, %[b3a].8b\n"
- "smlal2 v12.8h, v0.16b, %[b0a].16b\n"
- "smlal2 v13.8h, v0.16b, %[b1a].16b\n"
- "smlal2 v14.8h, v0.16b, %[b2a].16b\n"
- "smlal2 v15.8h, v0.16b, %[b3a].16b\n"
-
- "sadalp v16.4s, v12.8h\n"
- "smull v12.8h, v1.8b, %[b0a].8b\n"
- "sadalp v17.4s, v13.8h\n"
- "sadalp v18.4s, v14.8h\n"
- "smull v13.8h, v1.8b, %[b1a].8b\n"
- "sadalp v19.4s, v15.8h\n"
- "smull v14.8h, v1.8b, %[b2a].8b\n"
- "smull v15.8h, v1.8b, %[b3a].8b\n"
- "smlal2 v12.8h, v1.16b, %[b0a].16b\n"
- "addp v16.4s, v16.4s, v17.4s\n"
- "smlal2 v13.8h, v1.16b, %[b1a].16b\n"
- "addp v17.4s, v18.4s, v19.4s\n"
- "smlal2 v14.8h, v1.16b, %[b2a].16b\n"
- "smlal2 v15.8h, v1.16b, %[b3a].16b\n"
-
- "sadalp v20.4s, v12.8h\n"
- "smull v12.8h, v2.8b, %[b0a].8b\n"
- "sadalp v21.4s, v13.8h\n"
- "sadalp v22.4s, v14.8h\n"
- "smull v13.8h, v2.8b, %[b1a].8b\n"
- "sadalp v23.4s, v15.8h\n"
- "addp v16.4s, v16.4s, v17.4s\n"
- "smull v14.8h, v2.8b, %[b2a].8b\n"
- "addp v18.4s, v20.4s, v21.4s\n"
- "addp v19.4s, v22.4s, v23.4s\n"
- "smull v15.8h, v2.8b, %[b3a].8b\n"
- "smlal2 v12.8h, v2.16b, %[b0a].16b\n"
- "str q16, [%[c_ptr]]\n"
- "smlal2 v13.8h, v2.16b, %[b1a].16b\n"
- "smlal2 v14.8h, v2.16b, %[b2a].16b\n"
- "smlal2 v15.8h, v2.16b, %[b3a].16b\n"
-
- "sadalp v24.4s, v12.8h\n"
- "smull v12.8h, v3.8b, %[b0a].8b\n"
- "sadalp v25.4s, v13.8h\n"
- "sadalp v26.4s, v14.8h\n"
- "smull v13.8h, v3.8b, %[b1a].8b\n"
- "sadalp v27.4s, v15.8h\n"
- "addp v17.4s, v18.4s, v19.4s\n"
- "smull v14.8h, v3.8b, %[b2a].8b\n"
- "addp v20.4s, v24.4s, v25.4s\n"
- "addp v21.4s, v26.4s, v27.4s\n"
- "smull v15.8h, v3.8b, %[b3a].8b\n"
- "smlal2 v12.8h, v3.16b, %[b0a].16b\n"
- "str q17, [%[c_ptr], #16]\n"
- "smlal2 v13.8h, v3.16b, %[b1a].16b\n"
- "smlal2 v14.8h, v3.16b, %[b2a].16b\n"
- "addp v18.4s, v20.4s, v21.4s\n"
- "smlal2 v15.8h, v3.16b, %[b3a].16b\n"
- "b 3f\n"
+ "sadalp v28.4s, v12.8h\n"
+ "smull v12.8h, v0.8b, %[b0a].8b\n"
+ "sadalp v29.4s, v13.8h\n"
+ "sadalp v30.4s, v14.8h\n"
+ "smull v13.8h, v0.8b, %[b1a].8b\n"
+ "sadalp v31.4s, v15.8h\n"
+ "smull v14.8h, v0.8b, %[b2a].8b\n"
+ "add %[a_ptr], %[a_ptr], #128\n"
+ "smull v15.8h, v0.8b, %[b3a].8b\n"
+ "smlal2 v12.8h, v0.16b, %[b0a].16b\n"
+ "smlal2 v13.8h, v0.16b, %[b1a].16b\n"
+ "smlal2 v14.8h, v0.16b, %[b2a].16b\n"
+ "smlal2 v15.8h, v0.16b, %[b3a].16b\n"
+
+ "sadalp v16.4s, v12.8h\n"
+ "smull v12.8h, v1.8b, %[b0a].8b\n"
+ "sadalp v17.4s, v13.8h\n"
+ "sadalp v18.4s, v14.8h\n"
+ "smull v13.8h, v1.8b, %[b1a].8b\n"
+ "sadalp v19.4s, v15.8h\n"
+ "smull v14.8h, v1.8b, %[b2a].8b\n"
+ "smull v15.8h, v1.8b, %[b3a].8b\n"
+ "smlal2 v12.8h, v1.16b, %[b0a].16b\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "smlal2 v13.8h, v1.16b, %[b1a].16b\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "smlal2 v14.8h, v1.16b, %[b2a].16b\n"
+ "smlal2 v15.8h, v1.16b, %[b3a].16b\n"
+
+ "sadalp v20.4s, v12.8h\n"
+ "smull v12.8h, v2.8b, %[b0a].8b\n"
+ "sadalp v21.4s, v13.8h\n"
+ "sadalp v22.4s, v14.8h\n"
+ "smull v13.8h, v2.8b, %[b1a].8b\n"
+ "sadalp v23.4s, v15.8h\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "smull v14.8h, v2.8b, %[b2a].8b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "addp v19.4s, v22.4s, v23.4s\n"
+ "smull v15.8h, v2.8b, %[b3a].8b\n"
+ "smlal2 v12.8h, v2.16b, %[b0a].16b\n"
+ "str q16, [%[c_ptr]]\n"
+ "smlal2 v13.8h, v2.16b, %[b1a].16b\n"
+ "smlal2 v14.8h, v2.16b, %[b2a].16b\n"
+ "smlal2 v15.8h, v2.16b, %[b3a].16b\n"
+
+ "sadalp v24.4s, v12.8h\n"
+ "smull v12.8h, v3.8b, %[b0a].8b\n"
+ "sadalp v25.4s, v13.8h\n"
+ "sadalp v26.4s, v14.8h\n"
+ "smull v13.8h, v3.8b, %[b1a].8b\n"
+ "sadalp v27.4s, v15.8h\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "smull v14.8h, v3.8b, %[b2a].8b\n"
+ "addp v20.4s, v24.4s, v25.4s\n"
+ "addp v21.4s, v26.4s, v27.4s\n"
+ "smull v15.8h, v3.8b, %[b3a].8b\n"
+ "smlal2 v12.8h, v3.16b, %[b0a].16b\n"
+ "str q17, [%[c_ptr], #16]\n"
+ "smlal2 v13.8h, v3.16b, %[b1a].16b\n"
+ "smlal2 v14.8h, v3.16b, %[b2a].16b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "smlal2 v15.8h, v3.16b, %[b3a].16b\n"
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
- "smull v14.8h, v0.8b, %[b2].8b\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "smull v15.8h, v0.8b, %[b3].8b\n"
- "add %[b_ptr], %[b_ptr], #64\n"
- "smlal2 v12.8h, v0.16b, %[b0].16b\n"
- "smlal2 v13.8h, v0.16b, %[b1].16b\n"
- "smlal2 v14.8h, v0.16b, %[b2].16b\n"
- "smlal2 v15.8h, v0.16b, %[b3].16b\n"
-
- "sadalp v16.4s, v12.8h\n"
- "smull v12.8h, v1.8b, %[b0].8b\n"
- "sadalp v17.4s, v13.8h\n"
- "sadalp v18.4s, v14.8h\n"
- "smull v13.8h, v1.8b, %[b1].8b\n"
- "sadalp v19.4s, v15.8h\n"
- "smull v14.8h, v1.8b, %[b2].8b\n"
- "smull v15.8h, v1.8b, %[b3].8b\n"
- "smlal2 v12.8h, v1.16b, %[b0].16b\n"
- "addp v16.4s, v16.4s, v17.4s\n"
- "smlal2 v13.8h, v1.16b, %[b1].16b\n"
- "addp v17.4s, v18.4s, v19.4s\n"
- "smlal2 v14.8h, v1.16b, %[b2].16b\n"
- "smlal2 v15.8h, v1.16b, %[b3].16b\n"
-
- "sadalp v20.4s, v12.8h\n"
- "smull v12.8h, v2.8b, %[b0].8b\n"
- "sadalp v21.4s, v13.8h\n"
- "sadalp v22.4s, v14.8h\n"
- "smull v13.8h, v2.8b, %[b1].8b\n"
- "sadalp v23.4s, v15.8h\n"
- "addp v16.4s, v16.4s, v17.4s\n"
- "smull v14.8h, v2.8b, %[b2].8b\n"
- "addp v18.4s, v20.4s, v21.4s\n"
- "addp v19.4s, v22.4s, v23.4s\n"
- "smull v15.8h, v2.8b, %[b3].8b\n"
- "smlal2 v12.8h, v2.16b, %[b0].16b\n"
- "str q16, [%[c_ptr]]\n"
- "smlal2 v13.8h, v2.16b, %[b1].16b\n"
- "smlal2 v14.8h, v2.16b, %[b2].16b\n"
- "smlal2 v15.8h, v2.16b, %[b3].16b\n"
-
- "sadalp v24.4s, v12.8h\n"
- "smull v12.8h, v3.8b, %[b0].8b\n"
- "sadalp v25.4s, v13.8h\n"
- "sadalp v26.4s, v14.8h\n"
- "smull v13.8h, v3.8b, %[b1].8b\n"
- "sadalp v27.4s, v15.8h\n"
- "addp v17.4s, v18.4s, v19.4s\n"
- "smull v14.8h, v3.8b, %[b2].8b\n"
- "addp v20.4s, v24.4s, v25.4s\n"
- "addp v21.4s, v26.4s, v27.4s\n"
- "smull v15.8h, v3.8b, %[b3].8b\n"
- "smlal2 v12.8h, v3.16b, %[b0].16b\n"
- "str q17, [%[c_ptr], #16]\n"
- "smlal2 v13.8h, v3.16b, %[b1].16b\n"
- "smlal2 v14.8h, v3.16b, %[b2].16b\n"
- "addp v18.4s, v20.4s, v21.4s\n"
- "smlal2 v15.8h, v3.16b, %[b3].16b\n"
+ "smull v14.8h, v0.8b, %[b2].8b\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "smull v15.8h, v0.8b, %[b3].8b\n"
+ "add %[b_ptr], %[b_ptr], #64\n"
+ "smlal2 v12.8h, v0.16b, %[b0].16b\n"
+ "smlal2 v13.8h, v0.16b, %[b1].16b\n"
+ "smlal2 v14.8h, v0.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v0.16b, %[b3].16b\n"
+
+ "sadalp v16.4s, v12.8h\n"
+ "smull v12.8h, v1.8b, %[b0].8b\n"
+ "sadalp v17.4s, v13.8h\n"
+ "sadalp v18.4s, v14.8h\n"
+ "smull v13.8h, v1.8b, %[b1].8b\n"
+ "sadalp v19.4s, v15.8h\n"
+ "smull v14.8h, v1.8b, %[b2].8b\n"
+ "smull v15.8h, v1.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v1.16b, %[b0].16b\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "smlal2 v13.8h, v1.16b, %[b1].16b\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "smlal2 v14.8h, v1.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v1.16b, %[b3].16b\n"
+
+ "sadalp v20.4s, v12.8h\n"
+ "smull v12.8h, v2.8b, %[b0].8b\n"
+ "sadalp v21.4s, v13.8h\n"
+ "sadalp v22.4s, v14.8h\n"
+ "smull v13.8h, v2.8b, %[b1].8b\n"
+ "sadalp v23.4s, v15.8h\n"
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "smull v14.8h, v2.8b, %[b2].8b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "addp v19.4s, v22.4s, v23.4s\n"
+ "smull v15.8h, v2.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v2.16b, %[b0].16b\n"
+ "str q16, [%[c_ptr]]\n"
+ "smlal2 v13.8h, v2.16b, %[b1].16b\n"
+ "smlal2 v14.8h, v2.16b, %[b2].16b\n"
+ "smlal2 v15.8h, v2.16b, %[b3].16b\n"
+
+ "sadalp v24.4s, v12.8h\n"
+ "smull v12.8h, v3.8b, %[b0].8b\n"
+ "sadalp v25.4s, v13.8h\n"
+ "sadalp v26.4s, v14.8h\n"
+ "smull v13.8h, v3.8b, %[b1].8b\n"
+ "sadalp v27.4s, v15.8h\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "smull v14.8h, v3.8b, %[b2].8b\n"
+ "addp v20.4s, v24.4s, v25.4s\n"
+ "addp v21.4s, v26.4s, v27.4s\n"
+ "smull v15.8h, v3.8b, %[b3].8b\n"
+ "smlal2 v12.8h, v3.16b, %[b0].16b\n"
+ "str q17, [%[c_ptr], #16]\n"
+ "smlal2 v13.8h, v3.16b, %[b1].16b\n"
+ "smlal2 v14.8h, v3.16b, %[b2].16b\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "smlal2 v15.8h, v3.16b, %[b3].16b\n"
"3:\n"
// Final additions
- "sadalp v28.4s, v12.8h\n"
- "str q18, [%[c_ptr], #32]\n"
- "sadalp v29.4s, v13.8h\n"
- "sadalp v30.4s, v14.8h\n"
- "sadalp v31.4s, v15.8h\n"
+ "sadalp v28.4s, v12.8h\n"
+ "str q18, [%[c_ptr], #32]\n"
+ "sadalp v29.4s, v13.8h\n"
+ "sadalp v30.4s, v14.8h\n"
+ "sadalp v31.4s, v15.8h\n"
// Horizontal reduction, phase 1
- "addp v22.4s, v28.4s, v29.4s\n"
- "addp v23.4s, v30.4s, v31.4s\n"
+ "addp v22.4s, v28.4s, v29.4s\n"
+ "addp v23.4s, v30.4s, v31.4s\n"
// Horizontal reduction, phase 2
- "addp v19.4s, v22.4s, v23.4s\n"
- "str q19, [%[c_ptr], #48]\n"
- "add %[c_ptr], %[c_ptr], #64\n"
+ "addp v19.4s, v22.4s, v23.4s\n"
+ "str q19, [%[c_ptr], #48]\n"
+ "add %[c_ptr], %[c_ptr], #64\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp
index 9af1b4df12..d0c64b2f6c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,8 @@ void a64_gemm_s8_8x12_x1(const int8_t *, const int8_t *, int32_t *, int, int, in
class cls_a64_gemm_s8_8x12 {
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)(const int8_t *, const int8_t *, int32_t *, int, int, int);
@@ -58,8 +59,8 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 8, 12, 4> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
template<typename T>
static PerformanceParameters get_performance_parameters(const CPUInfo *ci) {
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/a55r1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/a55r1.cpp
index bb5226e093..fc46781100 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/a55r1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/a55r1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,11 +111,11 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"1:\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "subs %w[k], %w[k], #1\n"
+ "subs %w[k], %w[k], #1\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
"ins %[b2].d[1], x20\n"
@@ -123,7 +123,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[a_ptr], #40]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
"ins %[a0a].d[1], x20\n"
@@ -131,7 +131,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[a_ptr], #56]\n"
".word 0x4f80e872 // sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
".word 0x4fa0e873 // sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
".word 0x4f81e074 // sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
"ins %[a1a].d[1], x20\n"
@@ -139,7 +139,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[b_ptr], #56]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
"ins %[b0].d[1], x20\n"
@@ -155,8 +155,8 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- // Unroll 1
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ // Unroll 1
+ "ldr %d[b2], [%[b_ptr], #80]\n"
".word 0x4f85e048 // sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
"ins %[b1].d[1], x20\n"
@@ -164,7 +164,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[b_ptr], #88]\n"
".word 0x4f85e84a // sdot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
".word 0x4fa5e84b // sdot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
- "ldr %d[a0], [%[a_ptr], #64]\n"
+ "ldr %d[a0], [%[a_ptr], #64]\n"
".word 0x4f86e04c // sdot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
"ins %[b2].d[1], x20\n"
@@ -172,7 +172,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[a_ptr], #72]\n"
".word 0x4f86e84e // sdot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
".word 0x4fa6e84f // sdot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "ldr %d[a1], [%[a_ptr], #80]\n"
+ "ldr %d[a1], [%[a_ptr], #80]\n"
".word 0x4f85e070 // sdot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
"ins %[a0].d[1], x20\n"
@@ -180,7 +180,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[a_ptr], #88]\n"
".word 0x4f85e872 // sdot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
".word 0x4fa5e873 // sdot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
- "ldr %d[b0], [%[b_ptr], #96]\n"
+ "ldr %d[b0], [%[b_ptr], #96]\n"
".word 0x4f86e074 // sdot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
"ins %[a1].d[1], x20\n"
@@ -188,7 +188,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[b_ptr], #104]\n"
".word 0x4f86e876 // sdot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
".word 0x4fa6e877 // sdot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "ldr %d[b1], [%[b_ptr], #112]\n"
+ "ldr %d[b1], [%[b_ptr], #112]\n"
".word 0x4f85e098 // sdot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
"ins %[b0].d[1], x20\n"
@@ -196,19 +196,19 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[b_ptr], #120]\n"
".word 0x4f85e89a // sdot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
".word 0x4fa5e89b // sdot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x4f86e09c // sdot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
ASM_PREFETCH("[%[b_ptr], #640]")
".word 0x4fa6e09d // sdot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x4f86e89e // sdot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
"ins %[b1].d[1], x20\n"
".word 0x4fa6e89f // sdot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
"ldr %d[b2], [%[b_ptr], #32]\n"
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "b.ne 1b\n"
+ "b.ne 1b\n"
// Branch here if K=1 or 2. Do the right thing for odd/even at the end.
"4:\n"
@@ -221,7 +221,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
// Even K continuation
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
"ins %[b2].d[1], x20\n"
@@ -230,7 +230,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
ASM_PREFETCHW("[%[c_ptr]]")
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
"ins %[a0a].d[1], x20\n"
@@ -238,7 +238,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
"ldr x20, [%[a_ptr], #56]\n"
".word 0x4f80e872 // sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
".word 0x4fa0e873 // sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
".word 0x4f81e074 // sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
"ins %[a1a].d[1], x20\n"
@@ -253,7 +253,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
ASM_PREFETCHW("[%[c_ptr], #128]")
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
"ins %[b0].d[1], x20\n"
@@ -262,7 +262,7 @@ void a64_gemm_s8_8x12_a55r1(const int8_t *Apanel, const int8_t *Bpanel, int32_t
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
ASM_PREFETCHW("[%[c_ptr], #192]")
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
".word 0x4f85e048 // sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
"ins %[b1].d[1], x20\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/generic.cpp
index 7bf36a5900..f25947da26 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,63 +53,63 @@ void a64_gemm_s8_8x12(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpane
register int32x4_t a1a asm("v6");
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Loop proper
"1:\n"
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -120,7 +120,7 @@ void a64_gemm_s8_8x12(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpane
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
@@ -131,19 +131,19 @@ void a64_gemm_s8_8x12(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpane
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x4f85e048 // sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
".word 0x4fa5e049 // sdot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
- "ldr %q[a0], [%[a_ptr], #64]\n"
+ "ldr %q[a0], [%[a_ptr], #64]\n"
".word 0x4f85e84a // sdot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
".word 0x4fa5e84b // sdot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
".word 0x4f86e04c // sdot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
- "ldr %q[a1], [%[a_ptr], #80]\n"
+ "ldr %q[a1], [%[a_ptr], #80]\n"
".word 0x4fa6e04d // sdot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
".word 0x4f86e84e // sdot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
".word 0x4fa6e84f // sdot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
".word 0x4f85e070 // sdot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
".word 0x4fa5e071 // sdot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
@@ -154,40 +154,40 @@ void a64_gemm_s8_8x12(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpane
".word 0x4fa6e075 // sdot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
".word 0x4f86e876 // sdot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
".word 0x4fa6e877 // sdot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
".word 0x4f85e098 // sdot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
".word 0x4fa5e099 // sdot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x4f85e89a // sdot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
".word 0x4fa5e89b // sdot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x4f86e09c // sdot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
".word 0x4fa6e09d // sdot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "subs %w[k], %w[k], #1\n"
+ "subs %w[k], %w[k], #1\n"
".word 0x4f86e89e // sdot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
".word 0x4fa6e89f // sdot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
- "bne 1b\n"
+ "bne 1b\n"
// Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
"4:\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -197,140 +197,140 @@ void a64_gemm_s8_8x12(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cpane
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x4f85e048 // sdot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
".word 0x4f85e070 // sdot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x4fa5e049 // sdot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x4fa5e071 // sdot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x4f85e098 // sdot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
- "str q24, [%[c_ptr], #32]\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x4fa5e099 // sdot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x4f85e84a // sdot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x4f85e872 // sdot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x4f85e89a // sdot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x4fa5e84b // sdot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x4fa5e873 // sdot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x4fa5e89b // sdot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x4f86e04c // sdot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x4f86e074 // sdot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x4f86e09c // sdot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x4fa6e04d // sdot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x4fa6e075 // sdot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x4fa6e09d // sdot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x4f86e84e // sdot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x4f86e876 // sdot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x4f86e89e // sdot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x4fa6e84f // sdot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x4fa6e877 // sdot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x4fa6e89f // sdot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
- "b 3f\n"
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "str q24, [%[c_ptr], #32]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x4f80e872 // sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x4fa0e873 // sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x4f81e074 // sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
// Common tail
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/x1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/x1.cpp
index afd2427b85..30f819d45e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/x1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12/x1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,60 +52,60 @@ void a64_gemm_s8_8x12_x1(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cp
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Loop proper
"1:\n"
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -116,74 +116,74 @@ void a64_gemm_s8_8x12_x1(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cp
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
ASM_PREFETCH("[%[b_ptr], #448]")
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %q[a0], [%[a_ptr], #32]\n"
+ "ldr %q[a0], [%[a_ptr], #32]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[a1], [%[a_ptr], #48]\n"
+ "ldr %q[a1], [%[a_ptr], #48]\n"
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
ASM_PREFETCH("[%[b_ptr], #512]")
".word 0x4f80e872 // sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
".word 0x4fa0e873 // sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "subs %w[k], %w[k], #1\n"
+ "subs %w[k], %w[k], #1\n"
".word 0x4f81e074 // sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %q[a0], [%[a_ptr]]\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "bne 1b\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "bne 1b\n"
// Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
"4:\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -193,142 +193,142 @@ void a64_gemm_s8_8x12_x1(const int8_t *Apanel, const int8_t *Bpanel, int32_t *Cp
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %q[a0], [%[a_ptr], #-32]\n"
+ "ldr %q[a0], [%[a_ptr], #-32]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[a1], [%[a_ptr], #-16]\n"
+ "ldr %q[a1], [%[a_ptr], #-16]\n"
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "str q24, [%[c_ptr], #32]\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x4f80e872 // sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x4fa0e873 // sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x4f81e074 // sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
- "b 3f\n"
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
".word 0x4f80e048 // sdot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x4f80e070 // sdot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x4fa0e049 // sdot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x4fa0e071 // sdot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x4f80e098 // sdot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "str q24, [%[c_ptr], #32]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x4fa0e099 // sdot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x4f80e84a // sdot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x4f80e872 // sdot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x4f80e89a // sdot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x4fa0e84b // sdot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x4fa0e873 // sdot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x4fa0e89b // sdot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x4f81e04c // sdot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x4f81e074 // sdot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x4f81e09c // sdot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x4fa1e04d // sdot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x4fa1e075 // sdot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x4fa1e09d // sdot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x4f81e84e // sdot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x4f81e876 // sdot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x4f81e89e // sdot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x4fa1e84f // sdot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x4fa1e877 // sdot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x4fa1e89f // sdot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
// Common tail
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a1] "+w" (a1),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u16_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u16_8x12.hpp
index e49ebbd84e..af13fbd4e9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u16_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u16_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,7 +34,8 @@ void a64_gemm_u16_asimd_8x12(const uint16_t *, const uint16_t *, uint32_t *, int
class cls_a64_gemm_u16_8x12 {
public:
- typedef uint16_t operand_type;
+ typedef uint16_t lhs_operand_type;
+ typedef uint16_t rhs_operand_type;
typedef uint32_t result_type;
typedef void (*kern_type)(const uint16_t *, const uint16_t *, uint32_t *, int, int, int);
@@ -53,8 +54,8 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 8, 12> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 1, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 1, true> transforms_quantized = {};
kern_type kernel = a64_gemm_u16_asimd_8x12;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4.hpp
index b747a1cf84..cdc902f2cc 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,7 +35,8 @@ void a64_gemm_u8_4x4(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cpa
class cls_a64_gemm_u8_4x4 {
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint32_t result_type;
typedef void (*kern_type)(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
@@ -64,8 +65,8 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 4, 4, 16> transforms = {};
- StdTransformsFixed<operand_type, result_type, 4, 4, 16, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 4, 16> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 4, 16, true> transforms_quantized = {};
template<typename T>
static PerformanceParameters get_performance_parameters(const CPUInfo *ci) {
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4/generic.cpp
index 073aeab7f6..c227f21702 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_4x4/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Arm Limited.
+ * Copyright (c) 2017, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,225 +49,225 @@ void a64_gemm_u8_4x4(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cpa
register uint8x16_t b3 asm("v7");
__asm __volatile (
- "movi v16.4s, #0x0\n"
- "ldr q0, [%[a_ptr]]\n"
- "movi v17.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v18.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v19.4s, #0x0\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "movi v20.4s, #0x0\n"
- "ldr %q[b3], [%[b_ptr], #48]\n"
- "movi v21.4s, #0x0\n"
- "ldr q1, [%[a_ptr], #16]\n"
- "movi v22.4s, #0x0\n"
- "ldr q2, [%[a_ptr], #32]\n"
- "movi v23.4s, #0x0\n"
- "ldr q3, [%[a_ptr], #48]\n"
- "movi v24.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "ldr q0, [%[a_ptr]]\n"
+ "movi v17.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v18.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v19.4s, #0x0\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "movi v20.4s, #0x0\n"
+ "ldr %q[b3], [%[b_ptr], #48]\n"
+ "movi v21.4s, #0x0\n"
+ "ldr q1, [%[a_ptr], #16]\n"
+ "movi v22.4s, #0x0\n"
+ "ldr q2, [%[a_ptr], #32]\n"
+ "movi v23.4s, #0x0\n"
+ "ldr q3, [%[a_ptr], #48]\n"
+ "movi v24.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v25.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v26.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v27.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v28.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v29.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v30.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v31.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "umull v12.8h, v0.8b, %[b0].8b\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "umull v13.8h, v0.8b, %[b1].8b\n"
- "umull v14.8h, v0.8b, %[b2].8b\n"
- "add %[b_ptr], %[b_ptr], #64\n"
- "umull v15.8h, v0.8b, %[b3].8b\n"
+ "umull v12.8h, v0.8b, %[b0].8b\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "umull v13.8h, v0.8b, %[b1].8b\n"
+ "umull v14.8h, v0.8b, %[b2].8b\n"
+ "add %[b_ptr], %[b_ptr], #64\n"
+ "umull v15.8h, v0.8b, %[b3].8b\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 2f\n"
+ "cbz %w[k], 2f\n"
"1:\n"
- "uadalp v16.4s, v12.8h\n"
- "umull2 v12.8h, v0.16b, %[b0].16b\n"
- "uadalp v17.4s, v13.8h\n"
- "umull2 v13.8h, v0.16b, %[b1].16b\n"
- "uadalp v18.4s, v14.8h\n"
- "umull2 v14.8h, v0.16b, %[b2].16b\n"
- "uadalp v19.4s, v15.8h\n"
- "umull2 v15.8h, v0.16b, %[b3].16b\n"
- "ldr q0, [%[a_ptr]]\n"
-
- "uadalp v16.4s, v12.8h\n"
- "umull v12.8h, v1.8b, %[b0].8b\n"
- "uadalp v17.4s, v13.8h\n"
- "umull v13.8h, v1.8b, %[b1].8b\n"
- "subs %w[k], %w[k], #1\n"
- "uadalp v18.4s, v14.8h\n"
- "umull v14.8h, v1.8b, %[b2].8b\n"
- "uadalp v19.4s, v15.8h\n"
- "umull v15.8h, v1.8b, %[b3].8b\n"
-
- "uadalp v20.4s, v12.8h\n"
- "umull2 v12.8h, v1.16b, %[b0].16b\n"
- "uadalp v21.4s, v13.8h\n"
- "umull2 v13.8h, v1.16b, %[b1].16b\n"
+ "uadalp v16.4s, v12.8h\n"
+ "umull2 v12.8h, v0.16b, %[b0].16b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "umull2 v13.8h, v0.16b, %[b1].16b\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull2 v14.8h, v0.16b, %[b2].16b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull2 v15.8h, v0.16b, %[b3].16b\n"
+ "ldr q0, [%[a_ptr]]\n"
+
+ "uadalp v16.4s, v12.8h\n"
+ "umull v12.8h, v1.8b, %[b0].8b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "umull v13.8h, v1.8b, %[b1].8b\n"
+ "subs %w[k], %w[k], #1\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull v14.8h, v1.8b, %[b2].8b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull v15.8h, v1.8b, %[b3].8b\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull2 v12.8h, v1.16b, %[b0].16b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "umull2 v13.8h, v1.16b, %[b1].16b\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "uadalp v22.4s, v14.8h\n"
- "umull2 v14.8h, v1.16b, %[b2].16b\n"
- "uadalp v23.4s, v15.8h\n"
- "umull2 v15.8h, v1.16b, %[b3].16b\n"
- "ldr q1, [%[a_ptr], #16]\n"
-
- "uadalp v20.4s, v12.8h\n"
- "umull v12.8h, v2.8b, %[b0].8b\n"
- "uadalp v21.4s, v13.8h\n"
- "umull v13.8h, v2.8b, %[b1].8b\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull2 v14.8h, v1.16b, %[b2].16b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "umull2 v15.8h, v1.16b, %[b3].16b\n"
+ "ldr q1, [%[a_ptr], #16]\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull v12.8h, v2.8b, %[b0].8b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "umull v13.8h, v2.8b, %[b1].8b\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "uadalp v22.4s, v14.8h\n"
- "umull v14.8h, v2.8b, %[b2].8b\n"
- "uadalp v23.4s, v15.8h\n"
- "umull v15.8h, v2.8b, %[b3].8b\n"
-
- "uadalp v24.4s, v12.8h\n"
- "umull2 v12.8h, v2.16b, %[b0].16b\n"
- "uadalp v25.4s, v13.8h\n"
- "umull2 v13.8h, v2.16b, %[b1].16b\n"
- "uadalp v26.4s, v14.8h\n"
- "umull2 v14.8h, v2.16b, %[b2].16b\n"
- "uadalp v27.4s, v15.8h\n"
- "umull2 v15.8h, v2.16b, %[b3].16b\n"
- "ldr q2, [%[a_ptr], #32]\n"
-
- "uadalp v24.4s, v12.8h\n"
- "umull v12.8h, v3.8b, %[b0].8b\n"
- "uadalp v25.4s, v13.8h\n"
- "umull v13.8h, v3.8b, %[b1].8b\n"
- "uadalp v26.4s, v14.8h\n"
- "umull v14.8h, v3.8b, %[b2].8b\n"
- "uadalp v27.4s, v15.8h\n"
- "umull v15.8h, v3.8b, %[b3].8b\n"
-
- "uadalp v28.4s, v12.8h\n"
- "umull2 v12.8h, v3.16b, %[b0].16b\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "uadalp v29.4s, v13.8h\n"
- "umull2 v13.8h, v3.16b, %[b1].16b\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "uadalp v30.4s, v14.8h\n"
- "umull2 v14.8h, v3.16b, %[b2].16b\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "uadalp v31.4s, v15.8h\n"
- "umull2 v15.8h, v3.16b, %[b3].16b\n"
- "ldr %q[b3], [%[b_ptr], #48]\n"
-
- "uadalp v28.4s, v12.8h\n"
- "umull v12.8h, v0.8b, %[b0].8b\n"
- "add %[b_ptr], %[b_ptr], #64\n"
- "uadalp v29.4s, v13.8h\n"
- "umull v13.8h, v0.8b, %[b1].8b\n"
- "ldr q3, [%[a_ptr], #48]\n"
- "uadalp v30.4s, v14.8h\n"
- "umull v14.8h, v0.8b, %[b2].8b\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "uadalp v31.4s, v15.8h\n"
- "umull v15.8h, v0.8b, %[b3].8b\n"
- "bne 1b\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull v14.8h, v2.8b, %[b2].8b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "umull v15.8h, v2.8b, %[b3].8b\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull2 v12.8h, v2.16b, %[b0].16b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "umull2 v13.8h, v2.16b, %[b1].16b\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull2 v14.8h, v2.16b, %[b2].16b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "umull2 v15.8h, v2.16b, %[b3].16b\n"
+ "ldr q2, [%[a_ptr], #32]\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull v12.8h, v3.8b, %[b0].8b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "umull v13.8h, v3.8b, %[b1].8b\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull v14.8h, v3.8b, %[b2].8b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "umull v15.8h, v3.8b, %[b3].8b\n"
+
+ "uadalp v28.4s, v12.8h\n"
+ "umull2 v12.8h, v3.16b, %[b0].16b\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "uadalp v29.4s, v13.8h\n"
+ "umull2 v13.8h, v3.16b, %[b1].16b\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "uadalp v30.4s, v14.8h\n"
+ "umull2 v14.8h, v3.16b, %[b2].16b\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "uadalp v31.4s, v15.8h\n"
+ "umull2 v15.8h, v3.16b, %[b3].16b\n"
+ "ldr %q[b3], [%[b_ptr], #48]\n"
+
+ "uadalp v28.4s, v12.8h\n"
+ "umull v12.8h, v0.8b, %[b0].8b\n"
+ "add %[b_ptr], %[b_ptr], #64\n"
+ "uadalp v29.4s, v13.8h\n"
+ "umull v13.8h, v0.8b, %[b1].8b\n"
+ "ldr q3, [%[a_ptr], #48]\n"
+ "uadalp v30.4s, v14.8h\n"
+ "umull v14.8h, v0.8b, %[b2].8b\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "uadalp v31.4s, v15.8h\n"
+ "umull v15.8h, v0.8b, %[b3].8b\n"
+ "bne 1b\n"
// Branch target
"2:\n"
- "uadalp v16.4s, v12.8h\n"
- "umull2 v12.8h, v0.16b, %[b0].16b\n"
- "uadalp v17.4s, v13.8h\n"
- "umull2 v13.8h, v0.16b, %[b1].16b\n"
- "uadalp v18.4s, v14.8h\n"
- "umull2 v14.8h, v0.16b, %[b2].16b\n"
- "uadalp v19.4s, v15.8h\n"
- "umull2 v15.8h, v0.16b, %[b3].16b\n"
-
- "uadalp v16.4s, v12.8h\n"
- "umull v12.8h, v1.8b, %[b0].8b\n"
- "uadalp v17.4s, v13.8h\n"
- "umull v13.8h, v1.8b, %[b1].8b\n"
- "uadalp v18.4s, v14.8h\n"
- "umull v14.8h, v1.8b, %[b2].8b\n"
- "uadalp v19.4s, v15.8h\n"
- "umull v15.8h, v1.8b, %[b3].8b\n"
-
- "uadalp v20.4s, v12.8h\n"
- "umull2 v12.8h, v1.16b, %[b0].16b\n"
- "uadalp v21.4s, v13.8h\n"
- "umull2 v13.8h, v1.16b, %[b1].16b\n"
- "uadalp v22.4s, v14.8h\n"
- "umull2 v14.8h, v1.16b, %[b2].16b\n"
- "uadalp v23.4s, v15.8h\n"
- "umull2 v15.8h, v1.16b, %[b3].16b\n"
-
- "uadalp v20.4s, v12.8h\n"
- "umull v12.8h, v2.8b, %[b0].8b\n"
- "uadalp v21.4s, v13.8h\n"
- "umull v13.8h, v2.8b, %[b1].8b\n"
- "uadalp v22.4s, v14.8h\n"
- "umull v14.8h, v2.8b, %[b2].8b\n"
- "uadalp v23.4s, v15.8h\n"
- "umull v15.8h, v2.8b, %[b3].8b\n"
-
- "uadalp v24.4s, v12.8h\n"
- "umull2 v12.8h, v2.16b, %[b0].16b\n"
- "uadalp v25.4s, v13.8h\n"
- "umull2 v13.8h, v2.16b, %[b1].16b\n"
- "uadalp v26.4s, v14.8h\n"
- "umull2 v14.8h, v2.16b, %[b2].16b\n"
- "uadalp v27.4s, v15.8h\n"
- "umull2 v15.8h, v2.16b, %[b3].16b\n"
-
- "uadalp v24.4s, v12.8h\n"
- "umull v12.8h, v3.8b, %[b0].8b\n"
- "uadalp v25.4s, v13.8h\n"
- "umull v13.8h, v3.8b, %[b1].8b\n"
- "uadalp v26.4s, v14.8h\n"
- "umull v14.8h, v3.8b, %[b2].8b\n"
- "uadalp v27.4s, v15.8h\n"
- "umull v15.8h, v3.8b, %[b3].8b\n"
-
- "uadalp v28.4s, v12.8h\n"
- "umull2 v12.8h, v3.16b, %[b0].16b\n"
- "uadalp v29.4s, v13.8h\n"
- "umull2 v13.8h, v3.16b, %[b1].16b\n"
- "uadalp v30.4s, v14.8h\n"
- "umull2 v14.8h, v3.16b, %[b2].16b\n"
- "uadalp v31.4s, v15.8h\n"
- "umull2 v15.8h, v3.16b, %[b3].16b\n"
-
- "uadalp v28.4s, v12.8h\n"
- "uadalp v29.4s, v13.8h\n"
- "uadalp v30.4s, v14.8h\n"
- "uadalp v31.4s, v15.8h\n"
-
- "addp v16.4s, v16.4s, v17.4s\n"
- "addp v17.4s, v18.4s, v19.4s\n"
- "addp v18.4s, v20.4s, v21.4s\n"
- "addp v19.4s, v22.4s, v23.4s\n"
- "addp v20.4s, v24.4s, v25.4s\n"
- "addp v21.4s, v26.4s, v27.4s\n"
- "addp v22.4s, v28.4s, v29.4s\n"
- "addp v23.4s, v30.4s, v31.4s\n"
-
- "addp v16.4s, v16.4s, v17.4s\n"
- "addp v17.4s, v18.4s, v19.4s\n"
- "addp v18.4s, v20.4s, v21.4s\n"
- "addp v19.4s, v22.4s, v23.4s\n"
-
- "str q16, [%[c_ptr]]\n"
- "str q17, [%[c_ptr], #16]\n"
- "str q18, [%[c_ptr], #32]\n"
- "str q19, [%[c_ptr], #48]\n"
- "add %[c_ptr], %[c_ptr], #64\n"
+ "uadalp v16.4s, v12.8h\n"
+ "umull2 v12.8h, v0.16b, %[b0].16b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "umull2 v13.8h, v0.16b, %[b1].16b\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull2 v14.8h, v0.16b, %[b2].16b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull2 v15.8h, v0.16b, %[b3].16b\n"
+
+ "uadalp v16.4s, v12.8h\n"
+ "umull v12.8h, v1.8b, %[b0].8b\n"
+ "uadalp v17.4s, v13.8h\n"
+ "umull v13.8h, v1.8b, %[b1].8b\n"
+ "uadalp v18.4s, v14.8h\n"
+ "umull v14.8h, v1.8b, %[b2].8b\n"
+ "uadalp v19.4s, v15.8h\n"
+ "umull v15.8h, v1.8b, %[b3].8b\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull2 v12.8h, v1.16b, %[b0].16b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "umull2 v13.8h, v1.16b, %[b1].16b\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull2 v14.8h, v1.16b, %[b2].16b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "umull2 v15.8h, v1.16b, %[b3].16b\n"
+
+ "uadalp v20.4s, v12.8h\n"
+ "umull v12.8h, v2.8b, %[b0].8b\n"
+ "uadalp v21.4s, v13.8h\n"
+ "umull v13.8h, v2.8b, %[b1].8b\n"
+ "uadalp v22.4s, v14.8h\n"
+ "umull v14.8h, v2.8b, %[b2].8b\n"
+ "uadalp v23.4s, v15.8h\n"
+ "umull v15.8h, v2.8b, %[b3].8b\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull2 v12.8h, v2.16b, %[b0].16b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "umull2 v13.8h, v2.16b, %[b1].16b\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull2 v14.8h, v2.16b, %[b2].16b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "umull2 v15.8h, v2.16b, %[b3].16b\n"
+
+ "uadalp v24.4s, v12.8h\n"
+ "umull v12.8h, v3.8b, %[b0].8b\n"
+ "uadalp v25.4s, v13.8h\n"
+ "umull v13.8h, v3.8b, %[b1].8b\n"
+ "uadalp v26.4s, v14.8h\n"
+ "umull v14.8h, v3.8b, %[b2].8b\n"
+ "uadalp v27.4s, v15.8h\n"
+ "umull v15.8h, v3.8b, %[b3].8b\n"
+
+ "uadalp v28.4s, v12.8h\n"
+ "umull2 v12.8h, v3.16b, %[b0].16b\n"
+ "uadalp v29.4s, v13.8h\n"
+ "umull2 v13.8h, v3.16b, %[b1].16b\n"
+ "uadalp v30.4s, v14.8h\n"
+ "umull2 v14.8h, v3.16b, %[b2].16b\n"
+ "uadalp v31.4s, v15.8h\n"
+ "umull2 v15.8h, v3.16b, %[b3].16b\n"
+
+ "uadalp v28.4s, v12.8h\n"
+ "uadalp v29.4s, v13.8h\n"
+ "uadalp v30.4s, v14.8h\n"
+ "uadalp v31.4s, v15.8h\n"
+
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "addp v19.4s, v22.4s, v23.4s\n"
+ "addp v20.4s, v24.4s, v25.4s\n"
+ "addp v21.4s, v26.4s, v27.4s\n"
+ "addp v22.4s, v28.4s, v29.4s\n"
+ "addp v23.4s, v30.4s, v31.4s\n"
+
+ "addp v16.4s, v16.4s, v17.4s\n"
+ "addp v17.4s, v18.4s, v19.4s\n"
+ "addp v18.4s, v20.4s, v21.4s\n"
+ "addp v19.4s, v22.4s, v23.4s\n"
+
+ "str q16, [%[c_ptr]]\n"
+ "str q17, [%[c_ptr], #16]\n"
+ "str q18, [%[c_ptr], #32]\n"
+ "str q19, [%[c_ptr], #48]\n"
+ "add %[c_ptr], %[c_ptr], #64\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp
index 6d333f3449..f4e43407df 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018,2021 Arm Limited.
+ * Copyright (c) 2017-2018,2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void a64_gemm_u8_8x12_x1(const uint8_t *, const uint8_t *, uint32_t *, int, int,
class cls_a64_gemm_u8_8x12 {
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint32_t result_type;
typedef void (*kern_type)(const uint8_t *, const uint8_t *, uint32_t *, int, int, int);
@@ -66,8 +67,8 @@ public:
}
// Use the standard fixed sized transforms.
- StdTransformsFixed<operand_type, result_type, 8, 12, 4> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
template<typename T>
static PerformanceParameters get_performance_parameters(const CPUInfo *ci) {
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/a55r1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/a55r1.cpp
index 63869c9fd4..8203db21d0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/a55r1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/a55r1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -111,11 +111,11 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"1:\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "subs %w[k], %w[k], #1\n"
+ "subs %w[k], %w[k], #1\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
"ins %[b2].d[1], x20\n"
@@ -123,7 +123,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[a_ptr], #40]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
"ins %[a0a].d[1], x20\n"
@@ -131,7 +131,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[a_ptr], #56]\n"
".word 0x6f80e872 // udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
".word 0x6fa0e873 // udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
".word 0x6f81e074 // udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
"ins %[a1a].d[1], x20\n"
@@ -139,7 +139,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[b_ptr], #56]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
"ins %[b0].d[1], x20\n"
@@ -155,8 +155,8 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- // Unroll 1
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ // Unroll 1
+ "ldr %d[b2], [%[b_ptr], #80]\n"
".word 0x6f85e048 // udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
"ins %[b1].d[1], x20\n"
@@ -164,7 +164,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[b_ptr], #88]\n"
".word 0x6f85e84a // udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
".word 0x6fa5e84b // udot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
- "ldr %d[a0], [%[a_ptr], #64]\n"
+ "ldr %d[a0], [%[a_ptr], #64]\n"
".word 0x6f86e04c // udot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
"ins %[b2].d[1], x20\n"
@@ -172,7 +172,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[a_ptr], #72]\n"
".word 0x6f86e84e // udot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
".word 0x6fa6e84f // udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "ldr %d[a1], [%[a_ptr], #80]\n"
+ "ldr %d[a1], [%[a_ptr], #80]\n"
".word 0x6f85e070 // udot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
"ins %[a0].d[1], x20\n"
@@ -180,7 +180,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[a_ptr], #88]\n"
".word 0x6f85e872 // udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
".word 0x6fa5e873 // udot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
- "ldr %d[b0], [%[b_ptr], #96]\n"
+ "ldr %d[b0], [%[b_ptr], #96]\n"
".word 0x6f86e074 // udot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
"ins %[a1].d[1], x20\n"
@@ -188,7 +188,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[b_ptr], #104]\n"
".word 0x6f86e876 // udot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
".word 0x6fa6e877 // udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "ldr %d[b1], [%[b_ptr], #112]\n"
+ "ldr %d[b1], [%[b_ptr], #112]\n"
".word 0x6f85e098 // udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
"ins %[b0].d[1], x20\n"
@@ -196,19 +196,19 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[b_ptr], #120]\n"
".word 0x6f85e89a // udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
".word 0x6fa5e89b // udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x6f86e09c // udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
ASM_PREFETCH("[%[b_ptr], #640]")
".word 0x6fa6e09d // udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x6f86e89e // udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
"ins %[b1].d[1], x20\n"
".word 0x6fa6e89f // udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
"ldr %d[b2], [%[b_ptr], #32]\n"
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "b.ne 1b\n"
+ "b.ne 1b\n"
// Branch here if K=1 or 2. Do the right thing for odd/even at the end.
"4:\n"
@@ -221,7 +221,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
// Even K continuation
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
"ins %[b2].d[1], x20\n"
@@ -230,7 +230,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
ASM_PREFETCHW("[%[c_ptr]]")
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
"ins %[a0a].d[1], x20\n"
@@ -238,7 +238,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
"ldr x20, [%[a_ptr], #56]\n"
".word 0x6f80e872 // udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
".word 0x6fa0e873 // udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
".word 0x6f81e074 // udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
"ins %[a1a].d[1], x20\n"
@@ -253,7 +253,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
ASM_PREFETCHW("[%[c_ptr], #128]")
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
"ins %[b0].d[1], x20\n"
@@ -262,7 +262,7 @@ void a64_gemm_u8_8x12_a55r1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
ASM_PREFETCHW("[%[c_ptr], #192]")
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
".word 0x6f85e048 // udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
"ins %[b1].d[1], x20\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/generic.cpp
index ff60cbc905..956ad9448e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -53,63 +53,63 @@ void a64_gemm_u8_8x12(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cp
register uint8x16_t a1a asm("v6");
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Loop proper
"1:\n"
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -120,7 +120,7 @@ void a64_gemm_u8_8x12(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cp
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
@@ -131,19 +131,19 @@ void a64_gemm_u8_8x12(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cp
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x6f85e048 // udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
".word 0x6fa5e049 // udot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
- "ldr %q[a0], [%[a_ptr], #64]\n"
+ "ldr %q[a0], [%[a_ptr], #64]\n"
".word 0x6f85e84a // udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
".word 0x6fa5e84b // udot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
".word 0x6f86e04c // udot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
- "ldr %q[a1], [%[a_ptr], #80]\n"
+ "ldr %q[a1], [%[a_ptr], #80]\n"
".word 0x6fa6e04d // udot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
".word 0x6f86e84e // udot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
".word 0x6fa6e84f // udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
".word 0x6f85e070 // udot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
".word 0x6fa5e071 // udot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
@@ -154,40 +154,40 @@ void a64_gemm_u8_8x12(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cp
".word 0x6fa6e075 // udot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
".word 0x6f86e876 // udot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
".word 0x6fa6e877 // udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
".word 0x6f85e098 // udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
".word 0x6fa5e099 // udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x6f85e89a // udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
".word 0x6fa5e89b // udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x6f86e09c // udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
".word 0x6fa6e09d // udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "subs %w[k], %w[k], #1\n"
+ "subs %w[k], %w[k], #1\n"
".word 0x6f86e89e // udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
".word 0x6fa6e89f // udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
- "bne 1b\n"
+ "bne 1b\n"
// Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
"4:\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -197,140 +197,140 @@ void a64_gemm_u8_8x12(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t *Cp
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x6f85e048 // udot v8.4s , %[b0].16b, %[a0a].4b[0]\n"
".word 0x6f85e070 // udot v16.4s, %[b1].16b, %[a0a].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x6fa5e049 // udot v9.4s , %[b0].16b, %[a0a].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x6fa5e071 // udot v17.4s, %[b1].16b, %[a0a].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x6f85e098 // udot v24.4s, %[b2].16b, %[a0a].4b[0]\n"
- "str q24, [%[c_ptr], #32]\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x6fa5e099 // udot v25.4s, %[b2].16b, %[a0a].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x6f85e84a // udot v10.4s, %[b0].16b, %[a0a].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x6f85e872 // udot v18.4s, %[b1].16b, %[a0a].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x6f85e89a // udot v26.4s, %[b2].16b, %[a0a].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x6fa5e84b // udot v11.4s, %[b0].16b, %[a0a].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x6fa5e873 // udot v19.4s, %[b1].16b, %[a0a].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x6fa5e89b // udot v27.4s, %[b2].16b, %[a0a].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x6f86e04c // udot v12.4s, %[b0].16b, %[a1a].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x6f86e074 // udot v20.4s, %[b1].16b, %[a1a].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x6f86e09c // udot v28.4s, %[b2].16b, %[a1a].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x6fa6e04d // udot v13.4s, %[b0].16b, %[a1a].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x6fa6e075 // udot v21.4s, %[b1].16b, %[a1a].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x6fa6e09d // udot v29.4s, %[b2].16b, %[a1a].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x6f86e84e // udot v14.4s, %[b0].16b, %[a1a].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x6f86e876 // udot v22.4s, %[b1].16b, %[a1a].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x6f86e89e // udot v30.4s, %[b2].16b, %[a1a].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x6fa6e84f // udot v15.4s, %[b0].16b, %[a1a].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x6fa6e877 // udot v23.4s, %[b1].16b, %[a1a].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x6fa6e89f // udot v31.4s, %[b2].16b, %[a1a].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
- "b 3f\n"
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "str q24, [%[c_ptr], #32]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x6f80e872 // udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x6fa0e873 // udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x6f81e074 // udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
// Common tail
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/x1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/x1.cpp
index 1c1196b7a6..182d246d7a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/x1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12/x1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,60 +52,60 @@ void a64_gemm_u8_8x12_x1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Loop proper
"1:\n"
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -116,74 +116,74 @@ void a64_gemm_u8_8x12_x1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
ASM_PREFETCH("[%[b_ptr], #448]")
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %q[a0], [%[a_ptr], #32]\n"
+ "ldr %q[a0], [%[a_ptr], #32]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[a1], [%[a_ptr], #48]\n"
+ "ldr %q[a1], [%[a_ptr], #48]\n"
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
ASM_PREFETCH("[%[b_ptr], #512]")
".word 0x6f80e872 // udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
".word 0x6fa0e873 // udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "subs %w[k], %w[k], #1\n"
+ "subs %w[k], %w[k], #1\n"
".word 0x6f81e074 // udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %q[a0], [%[a_ptr]]\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "bne 1b\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "bne 1b\n"
// Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
"4:\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
@@ -193,142 +193,142 @@ void a64_gemm_u8_8x12_x1(const uint8_t *Apanel, const uint8_t *Bpanel, uint32_t
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "ldr %q[a0], [%[a_ptr], #-32]\n"
+ "ldr %q[a0], [%[a_ptr], #-32]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "ldr %q[a1], [%[a_ptr], #-16]\n"
+ "ldr %q[a1], [%[a_ptr], #-16]\n"
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "str q24, [%[c_ptr], #32]\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x6f80e872 // udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x6fa0e873 // udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x6f81e074 // udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
- "b 3f\n"
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
".word 0x6f80e048 // udot v8.4s , %[b0].16b, %[a0].4b[0]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
".word 0x6f80e070 // udot v16.4s, %[b1].16b, %[a0].4b[0]\n"
".word 0x6fa0e049 // udot v9.4s , %[b0].16b, %[a0].4b[1]\n"
- "str q8, [%[c_ptr], #0]\n"
+ "str q8, [%[c_ptr], #0]\n"
".word 0x6fa0e071 // udot v17.4s, %[b1].16b, %[a0].4b[1]\n"
- "str q16, [%[c_ptr], #16]\n"
+ "str q16, [%[c_ptr], #16]\n"
".word 0x6f80e098 // udot v24.4s, %[b2].16b, %[a0].4b[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "str q24, [%[c_ptr], #32]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
".word 0x6fa0e099 // udot v25.4s, %[b2].16b, %[a0].4b[1]\n"
- "str q9, [%[c_ptr], #48]\n"
+ "str q9, [%[c_ptr], #48]\n"
".word 0x6f80e84a // udot v10.4s, %[b0].16b, %[a0].4b[2]\n"
- "str q17, [%[c_ptr], #64]\n"
+ "str q17, [%[c_ptr], #64]\n"
".word 0x6f80e872 // udot v18.4s, %[b1].16b, %[a0].4b[2]\n"
- "str q25, [%[c_ptr], #80]\n"
+ "str q25, [%[c_ptr], #80]\n"
".word 0x6f80e89a // udot v26.4s, %[b2].16b, %[a0].4b[2]\n"
- "str q10, [%[c_ptr], #96]\n"
+ "str q10, [%[c_ptr], #96]\n"
".word 0x6fa0e84b // udot v11.4s, %[b0].16b, %[a0].4b[3]\n"
- "str q18, [%[c_ptr], #112]\n"
+ "str q18, [%[c_ptr], #112]\n"
".word 0x6fa0e873 // udot v19.4s, %[b1].16b, %[a0].4b[3]\n"
- "str q26, [%[c_ptr], #128]\n"
+ "str q26, [%[c_ptr], #128]\n"
".word 0x6fa0e89b // udot v27.4s, %[b2].16b, %[a0].4b[3]\n"
- "str q11, [%[c_ptr], #144]\n"
+ "str q11, [%[c_ptr], #144]\n"
".word 0x6f81e04c // udot v12.4s, %[b0].16b, %[a1].4b[0]\n"
- "str q19, [%[c_ptr], #160]\n"
+ "str q19, [%[c_ptr], #160]\n"
".word 0x6f81e074 // udot v20.4s, %[b1].16b, %[a1].4b[0]\n"
- "str q27, [%[c_ptr], #176]\n"
+ "str q27, [%[c_ptr], #176]\n"
".word 0x6f81e09c // udot v28.4s, %[b2].16b, %[a1].4b[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q12, [%[c_ptr], #192]\n"
".word 0x6fa1e04d // udot v13.4s, %[b0].16b, %[a1].4b[1]\n"
- "str q20, [%[c_ptr], #208]\n"
+ "str q20, [%[c_ptr], #208]\n"
".word 0x6fa1e075 // udot v21.4s, %[b1].16b, %[a1].4b[1]\n"
- "str q28, [%[c_ptr], #224]\n"
+ "str q28, [%[c_ptr], #224]\n"
".word 0x6fa1e09d // udot v29.4s, %[b2].16b, %[a1].4b[1]\n"
- "str q13, [%[c_ptr], #240]\n"
+ "str q13, [%[c_ptr], #240]\n"
".word 0x6f81e84e // udot v14.4s, %[b0].16b, %[a1].4b[2]\n"
- "str q21, [%[c_ptr], #256]\n"
+ "str q21, [%[c_ptr], #256]\n"
".word 0x6f81e876 // udot v22.4s, %[b1].16b, %[a1].4b[2]\n"
- "str q29, [%[c_ptr], #272]\n"
+ "str q29, [%[c_ptr], #272]\n"
".word 0x6f81e89e // udot v30.4s, %[b2].16b, %[a1].4b[2]\n"
- "str q14, [%[c_ptr], #288]\n"
+ "str q14, [%[c_ptr], #288]\n"
".word 0x6fa1e84f // udot v15.4s, %[b0].16b, %[a1].4b[3]\n"
- "str q22, [%[c_ptr], #304]\n"
+ "str q22, [%[c_ptr], #304]\n"
".word 0x6fa1e877 // udot v23.4s, %[b1].16b, %[a1].4b[3]\n"
- "str q30, [%[c_ptr], #320]\n"
+ "str q30, [%[c_ptr], #320]\n"
".word 0x6fa1e89f // udot v31.4s, %[b2].16b, %[a1].4b[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q15, [%[c_ptr], #336]\n"
// Common tail
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp
index 586d6a64a4..6bc40b4ac8 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
#pragma once
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(ARM_COMPUTE_ENABLE_FP16))
#include "../performance_parameters.hpp"
#include "../std_transforms_fixed.hpp"
@@ -41,7 +41,8 @@ void a64_hgemm_asimd_8x24_x1(const __fp16 *, const __fp16 *, __fp16 *, int, int,
// the constructor to pick a kernel implementation).
class cls_a64_hgemm_8x24 {
public:
- typedef __fp16 operand_type;
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
typedef __fp16 result_type;
typedef void (*kern_type)(const __fp16 *, const __fp16 *, __fp16 *, int, int, int);
@@ -60,7 +61,7 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 8, 24> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 24> transforms = {};
template<typename T>
static PerformanceParameters get_performance_parameters(const CPUInfo *ci) {
@@ -89,4 +90,4 @@ public:
} // namespace arm_gemm
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // __aarch64__ && (FP16_KERNELS || ARM_COMPUTE_ENABLE_FP16)
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/a55r1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/a55r1.cpp
index e5728beba8..742b406438 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/a55r1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/a55r1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,8 +22,8 @@
* SOFTWARE.
*/
-// Build on AArch64 where either FP16_KERNELS is set or FP16 is explicitly supported.
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+// Build on AArch64 where either ENABLE_FP16_KERNELS is set or FP16 is explicitly supported.
+#if defined(__aarch64__) && (defined(ENABLE_FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
#include <arm_neon.h>
@@ -72,311 +72,311 @@ void a64_hgemm_asimd_8x24_a55r1(const __fp16 *Apanel, const __fp16 *Bpanel, __fp
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.8h, #0x0\n"
- "ldr %d[a0], [%[a_ptr]]\n"
- "movi v9.8h, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.8h, #0x0\n"
- "ldr %d[a1], [%[a_ptr], #8]\n"
- "movi v11.8h, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.8h, #0x0\n"
- "movi v13.8h, #0x0\n"
+ "movi v8.8h, #0x0\n"
+ "ldr %d[a0], [%[a_ptr]]\n"
+ "movi v9.8h, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.8h, #0x0\n"
+ "ldr %d[a1], [%[a_ptr], #8]\n"
+ "movi v11.8h, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.8h, #0x0\n"
+ "movi v13.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v14.8h, #0x0\n"
- "movi v15.8h, #0x0\n"
+ "movi v14.8h, #0x0\n"
+ "movi v15.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v16.8h, #0x0\n"
- "movi v17.8h, #0x0\n"
+ "movi v16.8h, #0x0\n"
+ "movi v17.8h, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v18.8h, #0x0\n"
- "movi v19.8h, #0x0\n"
+ "movi v18.8h, #0x0\n"
+ "movi v19.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v20.8h, #0x0\n"
- "movi v21.8h, #0x0\n"
+ "movi v20.8h, #0x0\n"
+ "movi v21.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v22.8h, #0x0\n"
- "movi v23.8h, #0x0\n"
+ "movi v22.8h, #0x0\n"
+ "movi v23.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v24.8h, #0x0\n"
- "movi v25.8h, #0x0\n"
- "movi v26.8h, #0x0\n"
- "movi v27.8h, #0x0\n"
- "movi v28.8h, #0x0\n"
- "movi v29.8h, #0x0\n"
- "movi v30.8h, #0x0\n"
- "movi v31.8h, #0x0\n"
+ "movi v24.8h, #0x0\n"
+ "movi v25.8h, #0x0\n"
+ "movi v26.8h, #0x0\n"
+ "movi v27.8h, #0x0\n"
+ "movi v28.8h, #0x0\n"
+ "movi v29.8h, #0x0\n"
+ "movi v30.8h, #0x0\n"
+ "movi v31.8h, #0x0\n"
// The loop is offset by these two instructions which must
// always be executed.
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
- "ldr %d[b2], [%[b_ptr], #32]\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
"1:\n"
- "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "ldr %d[a0a], [%[a_ptr], #16]\n"
-
- "fmla v12.8h, %[b0].8h, %[a1].h[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.8h, %[b0].8h, %[a1].h[1]\n"
- "fmla v14.8h, %[b0].8h, %[a1].h[2]\n"
- "fmla v15.8h, %[b0].8h, %[a1].h[3]\n"
- "ldr %d[a1a], [%[a_ptr], #24]\n"
-
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
-
- "fmla v20.8h, %[b1].8h, %[a1].h[0]\n"
- "fmla v21.8h, %[b1].8h, %[a1].h[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v22.8h, %[b1].8h, %[a1].h[2]\n"
- "fmla v23.8h, %[b1].8h, %[a1].h[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "ins %[b0].d[1], x20\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "ldr %d[a0a], [%[a_ptr], #16]\n"
+
+ "fmla v12.8h, %[b0].8h, %[a1].h[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.8h, %[b0].8h, %[a1].h[1]\n"
+ "fmla v14.8h, %[b0].8h, %[a1].h[2]\n"
+ "fmla v15.8h, %[b0].8h, %[a1].h[3]\n"
+ "ldr %d[a1a], [%[a_ptr], #24]\n"
+
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+
+ "fmla v20.8h, %[b1].8h, %[a1].h[0]\n"
+ "fmla v21.8h, %[b1].8h, %[a1].h[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v22.8h, %[b1].8h, %[a1].h[2]\n"
+ "fmla v23.8h, %[b1].8h, %[a1].h[3]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "fmla v28.8h, %[b2].8h, %[a1].h[0]\n"
- "fmla v29.8h, %[b2].8h, %[a1].h[1]\n"
+ "fmla v28.8h, %[b2].8h, %[a1].h[0]\n"
+ "fmla v29.8h, %[b2].8h, %[a1].h[1]\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "fmla v30.8h, %[b2].8h, %[a1].h[2]\n"
- "fmla v31.8h, %[b2].8h, %[a1].h[3]\n"
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ "fmla v30.8h, %[b2].8h, %[a1].h[2]\n"
+ "fmla v31.8h, %[b2].8h, %[a1].h[3]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
// Unroll 1
- "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
- "ins %[b1].d[1], x20\n"
- "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
- "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
- "ldr %d[a0], [%[a_ptr], #32]\n"
-
- "fmla v12.8h, %[b0].8h, %[a1a].h[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.8h, %[b0].8h, %[a1a].h[1]\n"
- "fmla v14.8h, %[b0].8h, %[a1a].h[2]\n"
- "fmla v15.8h, %[b0].8h, %[a1a].h[3]\n"
- "ldr %d[a1], [%[a_ptr], #40]\n"
-
- "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
- "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
- "ldr %d[b0], [%[b_ptr], #96]\n"
-
- "fmla v20.8h, %[b1].8h, %[a1a].h[0]\n"
- "fmla v21.8h, %[b1].8h, %[a1a].h[1]\n"
- "ldr x20, [%[b_ptr], #104]\n"
- "fmla v22.8h, %[b1].8h, %[a1a].h[2]\n"
- "fmla v23.8h, %[b1].8h, %[a1a].h[3]\n"
- "ldr %d[b1], [%[b_ptr], #112]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
- "ins %[b0].d[1], x20\n"
- "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
- "ldr x20, [%[b_ptr], #120]\n"
- "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
-
- "fmla v28.8h, %[b2].8h, %[a1a].h[0]\n"
+ "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
+ "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
+ "ldr %d[a0], [%[a_ptr], #32]\n"
+
+ "fmla v12.8h, %[b0].8h, %[a1a].h[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.8h, %[b0].8h, %[a1a].h[1]\n"
+ "fmla v14.8h, %[b0].8h, %[a1a].h[2]\n"
+ "fmla v15.8h, %[b0].8h, %[a1a].h[3]\n"
+ "ldr %d[a1], [%[a_ptr], #40]\n"
+
+ "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
+ "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
+ "ldr %d[b0], [%[b_ptr], #96]\n"
+
+ "fmla v20.8h, %[b1].8h, %[a1a].h[0]\n"
+ "fmla v21.8h, %[b1].8h, %[a1a].h[1]\n"
+ "ldr x20, [%[b_ptr], #104]\n"
+ "fmla v22.8h, %[b1].8h, %[a1a].h[2]\n"
+ "fmla v23.8h, %[b1].8h, %[a1a].h[3]\n"
+ "ldr %d[b1], [%[b_ptr], #112]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
+ "ldr x20, [%[b_ptr], #120]\n"
+ "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
+
+ "fmla v28.8h, %[b2].8h, %[a1a].h[0]\n"
ASM_PREFETCH("[%[b_ptr], #448]")
- "fmla v29.8h, %[b2].8h, %[a1a].h[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v30.8h, %[b2].8h, %[a1a].h[2]\n"
- "ins %[b1].d[1], x20\n"
- "fmla v31.8h, %[b2].8h, %[a1a].h[3]\n"
- "ldr %d[b2], [%[b_ptr], #32]\n"
+ "fmla v29.8h, %[b2].8h, %[a1a].h[1]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v30.8h, %[b2].8h, %[a1a].h[2]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v31.8h, %[b2].8h, %[a1a].h[3]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
- "bne 1b\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "bne 1b\n"
"4:\n"
// Start final iteration - branch off to "odd" code before we load a0a
- "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "cbnz %w[oddk], 2f\n"
+ "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "cbnz %w[oddk], 2f\n"
// Even K continuation
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "ldr %d[a0a], [%[a_ptr], #16]\n"
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "ldr %d[a0a], [%[a_ptr], #16]\n"
- "fmla v12.8h, %[b0].8h, %[a1].h[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.8h, %[b0].8h, %[a1].h[1]\n"
+ "fmla v12.8h, %[b0].8h, %[a1].h[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.8h, %[b0].8h, %[a1].h[1]\n"
ASM_PREFETCHW("[%[c_ptr]]")
- "fmla v14.8h, %[b0].8h, %[a1].h[2]\n"
- "fmla v15.8h, %[b0].8h, %[a1].h[3]\n"
- "ldr %d[a1a], [%[a_ptr], #24]\n"
+ "fmla v14.8h, %[b0].8h, %[a1].h[2]\n"
+ "fmla v15.8h, %[b0].8h, %[a1].h[3]\n"
+ "ldr %d[a1a], [%[a_ptr], #24]\n"
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
ASM_PREFETCHW("[%[c_ptr], #64]")
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
-
- "fmla v20.8h, %[b1].8h, %[a1].h[0]\n"
- "fmla v21.8h, %[b1].8h, %[a1].h[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v22.8h, %[b1].8h, %[a1].h[2]\n"
- "fmla v23.8h, %[b1].8h, %[a1].h[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "ins %[b0].d[1], x20\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+
+ "fmla v20.8h, %[b1].8h, %[a1].h[0]\n"
+ "fmla v21.8h, %[b1].8h, %[a1].h[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v22.8h, %[b1].8h, %[a1].h[2]\n"
+ "fmla v23.8h, %[b1].8h, %[a1].h[3]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
ASM_PREFETCHW("[%[c_ptr], #128]")
- "fmla v28.8h, %[b2].8h, %[a1].h[0]\n"
- "fmla v29.8h, %[b2].8h, %[a1].h[1]\n"
+ "fmla v28.8h, %[b2].8h, %[a1].h[0]\n"
+ "fmla v29.8h, %[b2].8h, %[a1].h[1]\n"
ASM_PREFETCHW("[%[c_ptr], #192]")
- "fmla v30.8h, %[b2].8h, %[a1].h[2]\n"
- "fmla v31.8h, %[b2].8h, %[a1].h[3]\n"
- "ldr %d[b2], [%[b_ptr], #80]\n"
-
- "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
- "ins %[b1].d[1], x20\n"
- "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
- "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
+ "fmla v30.8h, %[b2].8h, %[a1].h[2]\n"
+ "fmla v31.8h, %[b2].8h, %[a1].h[3]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
+
+ "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
+ "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
ASM_PREFETCHW("[%[c_ptr], #256]")
- "fmla v12.8h, %[b0].8h, %[a1a].h[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.8h, %[b0].8h, %[a1a].h[1]\n"
+ "fmla v12.8h, %[b0].8h, %[a1a].h[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.8h, %[b0].8h, %[a1a].h[1]\n"
ASM_PREFETCHW("[%[c_ptr], #320]")
- "fmla v14.8h, %[b0].8h, %[a1a].h[2]\n"
- "fmla v15.8h, %[b0].8h, %[a1a].h[3]\n"
- "ldr %d[a1], [%[a_ptr], #40]\n"
+ "fmla v14.8h, %[b0].8h, %[a1a].h[2]\n"
+ "fmla v15.8h, %[b0].8h, %[a1a].h[3]\n"
+ "ldr %d[a1], [%[a_ptr], #40]\n"
- "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
+ "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #384]")
- "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
+ "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #448]")
- "fmla v20.8h, %[b1].8h, %[a1a].h[0]\n"
- "fmla v21.8h, %[b1].8h, %[a1a].h[1]\n"
+ "fmla v20.8h, %[b1].8h, %[a1a].h[0]\n"
+ "fmla v21.8h, %[b1].8h, %[a1a].h[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #512]")
- "fmla v22.8h, %[b1].8h, %[a1a].h[2]\n"
- "fmla v23.8h, %[b1].8h, %[a1a].h[3]\n"
+ "fmla v22.8h, %[b1].8h, %[a1a].h[2]\n"
+ "fmla v23.8h, %[b1].8h, %[a1a].h[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #576]")
- "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
- "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
+ "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
+ "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #640]")
- "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
+ "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #704]")
- "fmla v28.8h, %[b2].8h, %[a1a].h[0]\n"
- "fmla v29.8h, %[b2].8h, %[a1a].h[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v30.8h, %[b2].8h, %[a1a].h[2]\n"
- "fmla v31.8h, %[b2].8h, %[a1a].h[3]\n"
- "b 3f\n"
+ "fmla v28.8h, %[b2].8h, %[a1a].h[0]\n"
+ "fmla v29.8h, %[b2].8h, %[a1a].h[1]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v30.8h, %[b2].8h, %[a1a].h[2]\n"
+ "fmla v31.8h, %[b2].8h, %[a1a].h[3]\n"
+ "b 3f\n"
"2:\n"
// Odd tail
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
ASM_PREFETCHW("[%[c_ptr]]")
- "fmla v12.8h, %[b0].8h, %[a1].h[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.8h, %[b0].8h, %[a1].h[1]\n"
+ "fmla v12.8h, %[b0].8h, %[a1].h[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.8h, %[b0].8h, %[a1].h[1]\n"
ASM_PREFETCHW("[%[c_ptr], #64]")
- "fmla v14.8h, %[b0].8h, %[a1].h[2]\n"
- "add %[a_ptr], %[a_ptr], #16\n"
- "fmla v15.8h, %[b0].8h, %[a1].h[3]\n"
+ "fmla v14.8h, %[b0].8h, %[a1].h[2]\n"
+ "add %[a_ptr], %[a_ptr], #16\n"
+ "fmla v15.8h, %[b0].8h, %[a1].h[3]\n"
ASM_PREFETCHW("[%[c_ptr], #128]")
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
ASM_PREFETCHW("[%[c_ptr], #192]")
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
ASM_PREFETCHW("[%[c_ptr], #256]")
- "fmla v20.8h, %[b1].8h, %[a1].h[0]\n"
- "fmla v21.8h, %[b1].8h, %[a1].h[1]\n"
+ "fmla v20.8h, %[b1].8h, %[a1].h[0]\n"
+ "fmla v21.8h, %[b1].8h, %[a1].h[1]\n"
ASM_PREFETCHW("[%[c_ptr], #320]")
- "fmla v22.8h, %[b1].8h, %[a1].h[2]\n"
- "fmla v23.8h, %[b1].8h, %[a1].h[3]\n"
+ "fmla v22.8h, %[b1].8h, %[a1].h[2]\n"
+ "fmla v23.8h, %[b1].8h, %[a1].h[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #384]")
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #384]")
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #448]")
- "fmla v28.8h, %[b2].8h, %[a1].h[0]\n"
+ "fmla v28.8h, %[b2].8h, %[a1].h[0]\n"
ASM_PREFETCHWL2("[%[c_ptr], #512]")
- "fmla v29.8h, %[b2].8h, %[a1].h[1]\n"
+ "fmla v29.8h, %[b2].8h, %[a1].h[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #576]")
- "fmla v30.8h, %[b2].8h, %[a1].h[2]\n"
+ "fmla v30.8h, %[b2].8h, %[a1].h[2]\n"
ASM_PREFETCHWL2("[%[c_ptr], #640]")
- "fmla v31.8h, %[b2].8h, %[a1].h[3]\n"
+ "fmla v31.8h, %[b2].8h, %[a1].h[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #704]")
// Common tail
// A55 won't dual issue these stores with anything else, so
// simplest to do them all in this common code.
"3:\n"
- "str q8, [%[c_ptr]]\n"
- "str q16, [%[c_ptr], #16]\n"
- "str q24, [%[c_ptr], #32]\n"
- "str q9, [%[c_ptr], #48]\n"
- "str q17, [%[c_ptr], #64]\n"
- "str q25, [%[c_ptr], #80]\n"
- "str q10, [%[c_ptr], #96]\n"
- "str q18, [%[c_ptr], #112]\n"
- "str q26, [%[c_ptr], #128]\n"
- "str q11, [%[c_ptr], #144]\n"
- "str q19, [%[c_ptr], #160]\n"
- "str q27, [%[c_ptr], #176]\n"
- "str q12, [%[c_ptr], #192]\n"
- "str q20, [%[c_ptr], #208]\n"
- "str q28, [%[c_ptr], #224]\n"
- "str q13, [%[c_ptr], #240]\n"
- "str q21, [%[c_ptr], #256]\n"
- "str q29, [%[c_ptr], #272]\n"
- "str q14, [%[c_ptr], #288]\n"
- "str q22, [%[c_ptr], #304]\n"
- "str q30, [%[c_ptr], #320]\n"
- "str q15, [%[c_ptr], #336]\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
+ "str q8, [%[c_ptr]]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
"5:\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "=w" (a0), [a0a] "=w" (a0a), [a1] "=w" (a1), [a1a] "=w" (a1a),
@@ -391,4 +391,4 @@ void a64_hgemm_asimd_8x24_a55r1(const __fp16 *Apanel, const __fp16 *Bpanel, __fp
} // namespace arm_gemm
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // __aarch64__ && (ENABLE_FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/generic.cpp
index 23b87fa192..be97c0d2d7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,8 +22,8 @@
* SOFTWARE.
*/
-// Build on AArch64 where either FP16_KERNELS is set or FP16 is explicitly supported.
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+// Build on AArch64 where either ENABLE_FP16_KERNELS is set or FP16 is explicitly supported.
+#if defined(__aarch64__) && (defined(ENABLE_FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
#include <arm_neon.h>
@@ -67,270 +67,270 @@ void a64_hgemm_asimd_8x24(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16 *Cp
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.8h, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.8h, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.8h, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v11.8h, #0x0\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "movi v12.8h, #0x0\n"
- "ldr %q[b0a], [%[b_ptr], #48]\n"
- "movi v13.8h, #0x0\n"
- "ldr %q[b1a], [%[b_ptr], #64]\n"
- "movi v14.8h, #0x0\n"
+ "movi v8.8h, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.8h, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.8h, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v11.8h, #0x0\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "movi v12.8h, #0x0\n"
+ "ldr %q[b0a], [%[b_ptr], #48]\n"
+ "movi v13.8h, #0x0\n"
+ "ldr %q[b1a], [%[b_ptr], #64]\n"
+ "movi v14.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v15.8h, #0x0\n"
+ "movi v15.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v16.8h, #0x0\n"
+ "movi v16.8h, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v17.8h, #0x0\n"
+ "movi v17.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v18.8h, #0x0\n"
+ "movi v18.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v19.8h, #0x0\n"
+ "movi v19.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.8h, #0x0\n"
- "movi v21.8h, #0x0\n"
- "movi v22.8h, #0x0\n"
- "movi v23.8h, #0x0\n"
- "movi v24.8h, #0x0\n"
- "movi v25.8h, #0x0\n"
- "movi v26.8h, #0x0\n"
- "movi v27.8h, #0x0\n"
- "movi v28.8h, #0x0\n"
- "movi v29.8h, #0x0\n"
- "movi v30.8h, #0x0\n"
- "movi v31.8h, #0x0\n"
+ "movi v20.8h, #0x0\n"
+ "movi v21.8h, #0x0\n"
+ "movi v22.8h, #0x0\n"
+ "movi v23.8h, #0x0\n"
+ "movi v24.8h, #0x0\n"
+ "movi v25.8h, #0x0\n"
+ "movi v26.8h, #0x0\n"
+ "movi v27.8h, #0x0\n"
+ "movi v28.8h, #0x0\n"
+ "movi v29.8h, #0x0\n"
+ "movi v30.8h, #0x0\n"
+ "movi v31.8h, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
"1:\n"
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
- "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "ldr %q[a0a], [%[a_ptr], #16]\n"
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "ldr %q[b2a], [%[b_ptr], #80]\n"
- "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
- "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
- "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
- "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
-
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
+ "ldr %q[a0a], [%[a_ptr], #16]\n"
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "ldr %q[b2a], [%[b_ptr], #80]\n"
+ "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
+ "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
+ "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
+ "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
+
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
- "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
- "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
- "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
+ "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
+ "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
+ "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
ASM_PREFETCH("[%[b_ptr], #288]")
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
- "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
- "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
- "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
- "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
- "ldr %q[a0], [%[a_ptr], #32]\n"
-
- "fmla v8.8h , %[b0a].8h, %[a0a].h[0]\n"
- "fmla v9.8h , %[b0a].8h, %[a0a].h[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v10.8h, %[b0a].8h, %[a0a].h[2]\n"
- "fmla v11.8h, %[b0a].8h, %[a0a].h[3]\n"
- "fmla v12.8h, %[b0a].8h, %[a0a].h[4]\n"
- "fmla v13.8h, %[b0a].8h, %[a0a].h[5]\n"
- "fmla v14.8h, %[b0a].8h, %[a0a].h[6]\n"
- "fmla v15.8h, %[b0a].8h, %[a0a].h[7]\n"
- "ldr %q[b0a], [%[b_ptr], #48]\n"
-
- "fmla v16.8h, %[b1a].8h, %[a0a].h[0]\n"
- "fmla v17.8h, %[b1a].8h, %[a0a].h[1]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
+ "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
+ "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
+ "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
+ "ldr %q[a0], [%[a_ptr], #32]\n"
+
+ "fmla v8.8h , %[b0a].8h, %[a0a].h[0]\n"
+ "fmla v9.8h , %[b0a].8h, %[a0a].h[1]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v10.8h, %[b0a].8h, %[a0a].h[2]\n"
+ "fmla v11.8h, %[b0a].8h, %[a0a].h[3]\n"
+ "fmla v12.8h, %[b0a].8h, %[a0a].h[4]\n"
+ "fmla v13.8h, %[b0a].8h, %[a0a].h[5]\n"
+ "fmla v14.8h, %[b0a].8h, %[a0a].h[6]\n"
+ "fmla v15.8h, %[b0a].8h, %[a0a].h[7]\n"
+ "ldr %q[b0a], [%[b_ptr], #48]\n"
+
+ "fmla v16.8h, %[b1a].8h, %[a0a].h[0]\n"
+ "fmla v17.8h, %[b1a].8h, %[a0a].h[1]\n"
ASM_PREFETCH("[%[b_ptr], #352]")
- "fmla v18.8h, %[b1a].8h, %[a0a].h[2]\n"
- "fmla v19.8h, %[b1a].8h, %[a0a].h[3]\n"
- "fmla v20.8h, %[b1a].8h, %[a0a].h[4]\n"
- "fmla v21.8h, %[b1a].8h, %[a0a].h[5]\n"
- "fmla v22.8h, %[b1a].8h, %[a0a].h[6]\n"
- "fmla v23.8h, %[b1a].8h, %[a0a].h[7]\n"
- "ldr %q[b1a], [%[b_ptr], #64]\n"
-
- "fmla v24.8h, %[b2a].8h, %[a0a].h[0]\n"
- "fmla v25.8h, %[b2a].8h, %[a0a].h[1]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v26.8h, %[b2a].8h, %[a0a].h[2]\n"
- "fmla v27.8h, %[b2a].8h, %[a0a].h[3]\n"
- "fmla v28.8h, %[b2a].8h, %[a0a].h[4]\n"
- "fmla v29.8h, %[b2a].8h, %[a0a].h[5]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v30.8h, %[b2a].8h, %[a0a].h[6]\n"
- "fmla v31.8h, %[b2a].8h, %[a0a].h[7]\n"
-
- "bne 1b\n"
+ "fmla v18.8h, %[b1a].8h, %[a0a].h[2]\n"
+ "fmla v19.8h, %[b1a].8h, %[a0a].h[3]\n"
+ "fmla v20.8h, %[b1a].8h, %[a0a].h[4]\n"
+ "fmla v21.8h, %[b1a].8h, %[a0a].h[5]\n"
+ "fmla v22.8h, %[b1a].8h, %[a0a].h[6]\n"
+ "fmla v23.8h, %[b1a].8h, %[a0a].h[7]\n"
+ "ldr %q[b1a], [%[b_ptr], #64]\n"
+
+ "fmla v24.8h, %[b2a].8h, %[a0a].h[0]\n"
+ "fmla v25.8h, %[b2a].8h, %[a0a].h[1]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v26.8h, %[b2a].8h, %[a0a].h[2]\n"
+ "fmla v27.8h, %[b2a].8h, %[a0a].h[3]\n"
+ "fmla v28.8h, %[b2a].8h, %[a0a].h[4]\n"
+ "fmla v29.8h, %[b2a].8h, %[a0a].h[5]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v30.8h, %[b2a].8h, %[a0a].h[6]\n"
+ "fmla v31.8h, %[b2a].8h, %[a0a].h[7]\n"
+
+ "bne 1b\n"
"4:\n"
// Jump to odd tail if necessary.
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Even tail.
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
"fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "ldr %q[a0a], [%[a_ptr], #16]\n"
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "ldr %q[b2a], [%[b_ptr], #80]\n"
- "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
+ "ldr %q[a0a], [%[a_ptr], #16]\n"
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "ldr %q[b2a], [%[b_ptr], #80]\n"
+ "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
"fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
- "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
- "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
-
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
- "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
- "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
- "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
- "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
- "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
- "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
-
- "fmla v8.8h , %[b0a].8h, %[a0a].h[0]\n"
- "fmla v16.8h, %[b1a].8h, %[a0a].h[0]\n"
- "str q8, [%[c_ptr]]\n"
- "fmla v24.8h, %[b2a].8h, %[a0a].h[0]\n"
- "str q16, [%[c_ptr], #16]\n"
-
- "fmla v9.8h , %[b0a].8h, %[a0a].h[1]\n"
- "str q24, [%[c_ptr], #32]\n"
- "fmla v17.8h, %[b1a].8h, %[a0a].h[1]\n"
- "str q9, [%[c_ptr], #48]\n"
- "fmla v25.8h, %[b2a].8h, %[a0a].h[1]\n"
- "str q17, [%[c_ptr], #64]\n"
-
- "fmla v10.8h, %[b0a].8h, %[a0a].h[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v18.8h, %[b1a].8h, %[a0a].h[2]\n"
- "str q10, [%[c_ptr], #96]\n"
- "fmla v26.8h, %[b2a].8h, %[a0a].h[2]\n"
- "str q18, [%[c_ptr], #112]\n"
-
- "fmla v11.8h, %[b0a].8h, %[a0a].h[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v19.8h, %[b1a].8h, %[a0a].h[3]\n"
- "str q11, [%[c_ptr], #144]\n"
- "fmla v27.8h, %[b2a].8h, %[a0a].h[3]\n"
- "str q19, [%[c_ptr], #160]\n"
-
- "fmla v12.8h, %[b0a].8h, %[a0a].h[4]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v20.8h, %[b1a].8h, %[a0a].h[4]\n"
- "str q12, [%[c_ptr], #192]\n"
- "fmla v28.8h, %[b2a].8h, %[a0a].h[4]\n"
- "str q20, [%[c_ptr], #208]\n"
-
- "fmla v13.8h, %[b0a].8h, %[a0a].h[5]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v21.8h, %[b1a].8h, %[a0a].h[5]\n"
- "str q13, [%[c_ptr], #240]\n"
- "fmla v29.8h, %[b2a].8h, %[a0a].h[5]\n"
- "str q21, [%[c_ptr], #256]\n"
-
- "fmla v14.8h, %[b0a].8h, %[a0a].h[6]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v22.8h, %[b1a].8h, %[a0a].h[6]\n"
- "str q14, [%[c_ptr], #288]\n"
- "fmla v30.8h, %[b2a].8h, %[a0a].h[6]\n"
- "str q22, [%[c_ptr], #304]\n"
-
- "fmla v15.8h, %[b0a].8h, %[a0a].h[7]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v23.8h, %[b1a].8h, %[a0a].h[7]\n"
- "str q15, [%[c_ptr], #336]\n"
- "fmla v31.8h, %[b2a].8h, %[a0a].h[7]\n"
- "b 3f\n"
+ "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
+ "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
+
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
+ "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
+ "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
+ "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
+ "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
+ "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
+
+ "fmla v8.8h , %[b0a].8h, %[a0a].h[0]\n"
+ "fmla v16.8h, %[b1a].8h, %[a0a].h[0]\n"
+ "str q8, [%[c_ptr]]\n"
+ "fmla v24.8h, %[b2a].8h, %[a0a].h[0]\n"
+ "str q16, [%[c_ptr], #16]\n"
+
+ "fmla v9.8h , %[b0a].8h, %[a0a].h[1]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "fmla v17.8h, %[b1a].8h, %[a0a].h[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "fmla v25.8h, %[b2a].8h, %[a0a].h[1]\n"
+ "str q17, [%[c_ptr], #64]\n"
+
+ "fmla v10.8h, %[b0a].8h, %[a0a].h[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v18.8h, %[b1a].8h, %[a0a].h[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "fmla v26.8h, %[b2a].8h, %[a0a].h[2]\n"
+ "str q18, [%[c_ptr], #112]\n"
+
+ "fmla v11.8h, %[b0a].8h, %[a0a].h[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v19.8h, %[b1a].8h, %[a0a].h[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "fmla v27.8h, %[b2a].8h, %[a0a].h[3]\n"
+ "str q19, [%[c_ptr], #160]\n"
+
+ "fmla v12.8h, %[b0a].8h, %[a0a].h[4]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v20.8h, %[b1a].8h, %[a0a].h[4]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "fmla v28.8h, %[b2a].8h, %[a0a].h[4]\n"
+ "str q20, [%[c_ptr], #208]\n"
+
+ "fmla v13.8h, %[b0a].8h, %[a0a].h[5]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v21.8h, %[b1a].8h, %[a0a].h[5]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "fmla v29.8h, %[b2a].8h, %[a0a].h[5]\n"
+ "str q21, [%[c_ptr], #256]\n"
+
+ "fmla v14.8h, %[b0a].8h, %[a0a].h[6]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v22.8h, %[b1a].8h, %[a0a].h[6]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "fmla v30.8h, %[b2a].8h, %[a0a].h[6]\n"
+ "str q22, [%[c_ptr], #304]\n"
+
+ "fmla v15.8h, %[b0a].8h, %[a0a].h[7]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v23.8h, %[b1a].8h, %[a0a].h[7]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "fmla v31.8h, %[b2a].8h, %[a0a].h[7]\n"
+ "b 3f\n"
// Odd tail
"2:\n"
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "add %[a_ptr], %[a_ptr], #16\n"
- "str q8, [%[c_ptr]]\n"
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "str q16, [%[c_ptr], #16]\n"
-
- "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "str q24, [%[c_ptr], #32]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
- "str q9, [%[c_ptr], #48]\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
- "str q17, [%[c_ptr], #64]\n"
-
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "str q10, [%[c_ptr], #96]\n"
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "str q18, [%[c_ptr], #112]\n"
-
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "str q11, [%[c_ptr], #144]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
- "str q19, [%[c_ptr], #160]\n"
-
- "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
- "str q12, [%[c_ptr], #192]\n"
- "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
- "str q20, [%[c_ptr], #208]\n"
-
- "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
- "str q13, [%[c_ptr], #240]\n"
- "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
- "str q21, [%[c_ptr], #256]\n"
-
- "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
- "str q14, [%[c_ptr], #288]\n"
- "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
- "str q22, [%[c_ptr], #304]\n"
-
- "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
- "str q15, [%[c_ptr], #336]\n"
- "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "add %[a_ptr], %[a_ptr], #16\n"
+ "str q8, [%[c_ptr]]\n"
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "str q16, [%[c_ptr], #16]\n"
+
+ "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "str q17, [%[c_ptr], #64]\n"
+
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "str q18, [%[c_ptr], #112]\n"
+
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "str q19, [%[c_ptr], #160]\n"
+
+ "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
+ "str q20, [%[c_ptr], #208]\n"
+
+ "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
+ "str q21, [%[c_ptr], #256]\n"
+
+ "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
+ "str q22, [%[c_ptr], #304]\n"
+
+ "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a0a] "+w" (a0a),
@@ -346,4 +346,4 @@ void a64_hgemm_asimd_8x24(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16 *Cp
} // namespace arm_gemm
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // __aarch64__ && (ENABLE_FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/x1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/x1.cpp
index b47fa6a2d7..6e9349fac2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/x1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hgemm_8x24/x1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,8 +22,8 @@
* SOFTWARE.
*/
-// Build on AArch64 where either FP16_KERNELS is set or FP16 is explicitly supported.
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+// Build on AArch64 where either ENABLE_FP16_KERNELS is set or FP16 is explicitly supported.
+#if defined(__aarch64__) && (defined(ENABLE_FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
#include <arm_neon.h>
@@ -64,271 +64,271 @@ void a64_hgemm_asimd_8x24_x1(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.8h, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.8h, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.8h, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v11.8h, #0x0\n"
- "movi v12.8h, #0x0\n"
- "movi v13.8h, #0x0\n"
- "movi v14.8h, #0x0\n"
+ "movi v8.8h, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.8h, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.8h, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v11.8h, #0x0\n"
+ "movi v12.8h, #0x0\n"
+ "movi v13.8h, #0x0\n"
+ "movi v14.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v15.8h, #0x0\n"
+ "movi v15.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v16.8h, #0x0\n"
+ "movi v16.8h, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v17.8h, #0x0\n"
+ "movi v17.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v18.8h, #0x0\n"
+ "movi v18.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v19.8h, #0x0\n"
+ "movi v19.8h, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.8h, #0x0\n"
- "movi v21.8h, #0x0\n"
- "movi v22.8h, #0x0\n"
- "movi v23.8h, #0x0\n"
- "movi v24.8h, #0x0\n"
- "movi v25.8h, #0x0\n"
- "movi v26.8h, #0x0\n"
- "movi v27.8h, #0x0\n"
- "movi v28.8h, #0x0\n"
- "movi v29.8h, #0x0\n"
- "movi v30.8h, #0x0\n"
- "movi v31.8h, #0x0\n"
+ "movi v20.8h, #0x0\n"
+ "movi v21.8h, #0x0\n"
+ "movi v22.8h, #0x0\n"
+ "movi v23.8h, #0x0\n"
+ "movi v24.8h, #0x0\n"
+ "movi v25.8h, #0x0\n"
+ "movi v26.8h, #0x0\n"
+ "movi v27.8h, #0x0\n"
+ "movi v28.8h, #0x0\n"
+ "movi v29.8h, #0x0\n"
+ "movi v30.8h, #0x0\n"
+ "movi v31.8h, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
"1:\n"
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
- "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
- "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
- "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
- "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
+ "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
+ "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
+ "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
- "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
- "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
- "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
- "ldr %q[b1], [%[b_ptr], #-32]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
+ "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
+ "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
+ "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
+ "ldr %q[b1], [%[b_ptr], #-32]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
ASM_PREFETCH("[%[b_ptr], #288]")
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
- "ldr %q[a0a], [%[a_ptr], #16]\n"
- "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
- "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
- "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
- "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
- "ldr %q[b2], [%[b_ptr], #-16]\n"
-
- "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
- "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
- "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
- "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
- "fmla v12.8h, %[b0].8h, %[a0a].h[4]\n"
- "fmla v13.8h, %[b0].8h, %[a0a].h[5]\n"
- "fmla v14.8h, %[b0].8h, %[a0a].h[6]\n"
- "fmla v15.8h, %[b0].8h, %[a0a].h[7]\n"
- "ldr %q[b0], [%[b_ptr]]\n"
-
- "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
- "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #16]\n"
+ "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
+ "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
+ "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
+ "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
+ "ldr %q[b2], [%[b_ptr], #-16]\n"
+
+ "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
+ "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
+ "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
+ "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
+ "fmla v12.8h, %[b0].8h, %[a0a].h[4]\n"
+ "fmla v13.8h, %[b0].8h, %[a0a].h[5]\n"
+ "fmla v14.8h, %[b0].8h, %[a0a].h[6]\n"
+ "fmla v15.8h, %[b0].8h, %[a0a].h[7]\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+
+ "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
+ "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
ASM_PREFETCH("[%[b_ptr], #352]")
- "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
- "fmla v20.8h, %[b1].8h, %[a0a].h[4]\n"
- "fmla v21.8h, %[b1].8h, %[a0a].h[5]\n"
- "fmla v22.8h, %[b1].8h, %[a0a].h[6]\n"
- "fmla v23.8h, %[b1].8h, %[a0a].h[7]\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
- "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "fmla v28.8h, %[b2].8h, %[a0a].h[4]\n"
- "fmla v29.8h, %[b2].8h, %[a0a].h[5]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v30.8h, %[b2].8h, %[a0a].h[6]\n"
- "fmla v31.8h, %[b2].8h, %[a0a].h[7]\n"
-
- "bne 1b\n"
+ "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
+ "fmla v20.8h, %[b1].8h, %[a0a].h[4]\n"
+ "fmla v21.8h, %[b1].8h, %[a0a].h[5]\n"
+ "fmla v22.8h, %[b1].8h, %[a0a].h[6]\n"
+ "fmla v23.8h, %[b1].8h, %[a0a].h[7]\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
+ "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "fmla v28.8h, %[b2].8h, %[a0a].h[4]\n"
+ "fmla v29.8h, %[b2].8h, %[a0a].h[5]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v30.8h, %[b2].8h, %[a0a].h[6]\n"
+ "fmla v31.8h, %[b2].8h, %[a0a].h[7]\n"
+
+ "bne 1b\n"
"4:\n"
// Jump to odd tail if necessary.
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Even tail.
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
"fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
"fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
- "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
- "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
- "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
- "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
- "ldr %q[b1], [%[b_ptr], #-32]\n"
-
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
- "ldr %q[a0a], [%[a_ptr], #-16]\n"
- "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
- "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
- "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
- "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
- "ldr %q[b2], [%[b_ptr], #-16]\n"
-
- "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
- "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
- "str q8, [%[c_ptr]]\n"
- "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
- "str q16, [%[c_ptr], #16]\n"
-
- "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
- "str q24, [%[c_ptr], #32]\n"
- "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
- "str q9, [%[c_ptr], #48]\n"
- "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
- "str q17, [%[c_ptr], #64]\n"
-
- "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
- "str q10, [%[c_ptr], #96]\n"
- "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
- "str q18, [%[c_ptr], #112]\n"
-
- "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
- "str q11, [%[c_ptr], #144]\n"
- "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
- "str q19, [%[c_ptr], #160]\n"
-
- "fmla v12.8h, %[b0].8h, %[a0a].h[4]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v20.8h, %[b1].8h, %[a0a].h[4]\n"
- "str q12, [%[c_ptr], #192]\n"
- "fmla v28.8h, %[b2].8h, %[a0a].h[4]\n"
- "str q20, [%[c_ptr], #208]\n"
-
- "fmla v13.8h, %[b0].8h, %[a0a].h[5]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v21.8h, %[b1].8h, %[a0a].h[5]\n"
- "str q13, [%[c_ptr], #240]\n"
- "fmla v29.8h, %[b2].8h, %[a0a].h[5]\n"
- "str q21, [%[c_ptr], #256]\n"
-
- "fmla v14.8h, %[b0].8h, %[a0a].h[6]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v22.8h, %[b1].8h, %[a0a].h[6]\n"
- "str q14, [%[c_ptr], #288]\n"
- "fmla v30.8h, %[b2].8h, %[a0a].h[6]\n"
- "str q22, [%[c_ptr], #304]\n"
-
- "fmla v15.8h, %[b0].8h, %[a0a].h[7]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v23.8h, %[b1].8h, %[a0a].h[7]\n"
- "str q15, [%[c_ptr], #336]\n"
- "fmla v31.8h, %[b2].8h, %[a0a].h[7]\n"
- "b 3f\n"
+ "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
+ "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
+ "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
+ "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
+ "ldr %q[b1], [%[b_ptr], #-32]\n"
+
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #-16]\n"
+ "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
+ "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
+ "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
+ "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
+ "ldr %q[b2], [%[b_ptr], #-16]\n"
+
+ "fmla v8.8h , %[b0].8h, %[a0a].h[0]\n"
+ "fmla v16.8h, %[b1].8h, %[a0a].h[0]\n"
+ "str q8, [%[c_ptr]]\n"
+ "fmla v24.8h, %[b2].8h, %[a0a].h[0]\n"
+ "str q16, [%[c_ptr], #16]\n"
+
+ "fmla v9.8h , %[b0].8h, %[a0a].h[1]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "fmla v17.8h, %[b1].8h, %[a0a].h[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "fmla v25.8h, %[b2].8h, %[a0a].h[1]\n"
+ "str q17, [%[c_ptr], #64]\n"
+
+ "fmla v10.8h, %[b0].8h, %[a0a].h[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v18.8h, %[b1].8h, %[a0a].h[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "fmla v26.8h, %[b2].8h, %[a0a].h[2]\n"
+ "str q18, [%[c_ptr], #112]\n"
+
+ "fmla v11.8h, %[b0].8h, %[a0a].h[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v19.8h, %[b1].8h, %[a0a].h[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "fmla v27.8h, %[b2].8h, %[a0a].h[3]\n"
+ "str q19, [%[c_ptr], #160]\n"
+
+ "fmla v12.8h, %[b0].8h, %[a0a].h[4]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v20.8h, %[b1].8h, %[a0a].h[4]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "fmla v28.8h, %[b2].8h, %[a0a].h[4]\n"
+ "str q20, [%[c_ptr], #208]\n"
+
+ "fmla v13.8h, %[b0].8h, %[a0a].h[5]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v21.8h, %[b1].8h, %[a0a].h[5]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "fmla v29.8h, %[b2].8h, %[a0a].h[5]\n"
+ "str q21, [%[c_ptr], #256]\n"
+
+ "fmla v14.8h, %[b0].8h, %[a0a].h[6]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v22.8h, %[b1].8h, %[a0a].h[6]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "fmla v30.8h, %[b2].8h, %[a0a].h[6]\n"
+ "str q22, [%[c_ptr], #304]\n"
+
+ "fmla v15.8h, %[b0].8h, %[a0a].h[7]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v23.8h, %[b1].8h, %[a0a].h[7]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "fmla v31.8h, %[b2].8h, %[a0a].h[7]\n"
+ "b 3f\n"
// Odd tail
"2:\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
- "add %[a_ptr], %[a_ptr], #16\n"
- "str q8, [%[c_ptr]]\n"
- "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
- "str q16, [%[c_ptr], #16]\n"
-
- "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
- "str q24, [%[c_ptr], #32]\n"
- "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
- "str q9, [%[c_ptr], #48]\n"
- "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
- "str q17, [%[c_ptr], #64]\n"
-
- "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
- "str q10, [%[c_ptr], #96]\n"
- "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
- "str q18, [%[c_ptr], #112]\n"
-
- "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
- "str q11, [%[c_ptr], #144]\n"
- "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
- "str q19, [%[c_ptr], #160]\n"
-
- "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
- "str q12, [%[c_ptr], #192]\n"
- "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
- "str q20, [%[c_ptr], #208]\n"
-
- "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
- "str q13, [%[c_ptr], #240]\n"
- "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
- "str q21, [%[c_ptr], #256]\n"
-
- "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
- "str q14, [%[c_ptr], #288]\n"
- "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
- "str q22, [%[c_ptr], #304]\n"
-
- "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
- "str q15, [%[c_ptr], #336]\n"
- "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v8.8h , %[b0].8h, %[a0].h[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "fmla v16.8h, %[b1].8h, %[a0].h[0]\n"
+ "add %[a_ptr], %[a_ptr], #16\n"
+ "str q8, [%[c_ptr]]\n"
+ "fmla v24.8h, %[b2].8h, %[a0].h[0]\n"
+ "str q16, [%[c_ptr], #16]\n"
+
+ "fmla v9.8h , %[b0].8h, %[a0].h[1]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "fmla v17.8h, %[b1].8h, %[a0].h[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "fmla v25.8h, %[b2].8h, %[a0].h[1]\n"
+ "str q17, [%[c_ptr], #64]\n"
+
+ "fmla v10.8h, %[b0].8h, %[a0].h[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v18.8h, %[b1].8h, %[a0].h[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "fmla v26.8h, %[b2].8h, %[a0].h[2]\n"
+ "str q18, [%[c_ptr], #112]\n"
+
+ "fmla v11.8h, %[b0].8h, %[a0].h[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v19.8h, %[b1].8h, %[a0].h[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "fmla v27.8h, %[b2].8h, %[a0].h[3]\n"
+ "str q19, [%[c_ptr], #160]\n"
+
+ "fmla v12.8h, %[b0].8h, %[a0].h[4]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v20.8h, %[b1].8h, %[a0].h[4]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "fmla v28.8h, %[b2].8h, %[a0].h[4]\n"
+ "str q20, [%[c_ptr], #208]\n"
+
+ "fmla v13.8h, %[b0].8h, %[a0].h[5]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v21.8h, %[b1].8h, %[a0].h[5]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "fmla v29.8h, %[b2].8h, %[a0].h[5]\n"
+ "str q21, [%[c_ptr], #256]\n"
+
+ "fmla v14.8h, %[b0].8h, %[a0].h[6]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v22.8h, %[b1].8h, %[a0].h[6]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "fmla v30.8h, %[b2].8h, %[a0].h[6]\n"
+ "str q22, [%[c_ptr], #304]\n"
+
+ "fmla v15.8h, %[b0].8h, %[a0].h[7]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v23.8h, %[b1].8h, %[a0].h[7]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "fmla v31.8h, %[b2].8h, %[a0].h[7]\n"
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a0a] "+w" (a0a),
@@ -343,4 +343,4 @@ void a64_hgemm_asimd_8x24_x1(const __fp16 *Apanel, const __fp16 *Bpanel, __fp16
} // namespace arm_gemm
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // __aarch64__ && (ENABLE_FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp
index f1427669ea..278d869afb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 2> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 2> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp
index fc323ea4fc..4494e2ac13 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void a64_hybrid_bf16fp32_dot_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void a64_hybrid_bf16fp32_dot_6x16 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -102,10 +104,10 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"cmp %x[M], #0x2\n"
"bgt 71f\n"
"beq 36f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x12, 3f\n"
"ldr q8, [x12, #0x0]\n"
@@ -188,8 +190,8 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"mov x28, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -213,6 +215,10 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q17, [x10, #0x20]\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f40f22a // bfdot v10.4s, v17.8h, v0.h[0]\n"
"ldr q17, [x10, #0x40]\n"
".inst 0x4f40f20b // bfdot v11.4s, v16.8h, v0.h[0]\n"
@@ -237,22 +243,21 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q17, [x10, #0xe0]\n"
".inst 0x4f60fa09 // bfdot v9.4s, v16.8h, v0.h[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x4f60fa2a // bfdot v10.4s, v17.8h, v0.h[3]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4f60fa0b // bfdot v11.4s, v16.8h, v0.h[3]\n"
"ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x10\n"
- "add x10, x10, #0x100\n"
- "ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f40f22a // bfdot v10.4s, v17.8h, v0.h[0]\n"
"ldr q17, [x10, #0x40]\n"
".inst 0x4f40f20b // bfdot v11.4s, v16.8h, v0.h[0]\n"
@@ -277,29 +282,26 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q17, [x10, #0xe0]\n"
".inst 0x4f60fa09 // bfdot v9.4s, v16.8h, v0.h[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "add x10, x10, #0x100\n"
".inst 0x4f60fa2a // bfdot v10.4s, v17.8h, v0.h[3]\n"
".inst 0x4f60fa0b // bfdot v11.4s, v16.8h, v0.h[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 24f\n"
"cmp x27, #0x2\n"
"blt 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x26], #0x4\n"
- "ldr q16, [x10, #0x0]\n"
- ".inst 0x4f52f208 // bfdot v8.4s, v16.8h, v18.h[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"sub x27, x27, #0x2\n"
"ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x2\n"
+ ".inst 0x4f52f228 // bfdot v8.4s, v17.8h, v18.h[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x4f52f209 // bfdot v9.4s, v16.8h, v18.h[0]\n"
- "cmp x27, #0x2\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f52f22a // bfdot v10.4s, v17.8h, v18.h[0]\n"
".inst 0x4f52f20b // bfdot v11.4s, v16.8h, v18.h[0]\n"
- "add x10, x10, #0x40\n"
"bge 21b\n"
"22:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 24f\n"
@@ -308,12 +310,12 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q17, [x10, #0x0]\n"
"ldr q16, [x10, #0x10]\n"
".inst 0x4f40f228 // bfdot v8.4s, v17.8h, v0.h[0]\n"
- ".inst 0x4f40f209 // bfdot v9.4s, v16.8h, v0.h[0]\n"
"ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f40f209 // bfdot v9.4s, v16.8h, v0.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f40f22a // bfdot v10.4s, v17.8h, v0.h[0]\n"
".inst 0x4f40f20b // bfdot v11.4s, v16.8h, v0.h[0]\n"
- "add x10, x10, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -321,9 +323,9 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"bne 15b\n"
"prfm pstl1keep, [x9, #0x0]\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v17.4s\n"
"fmin v9.4s, v9.4s, v17.4s\n"
@@ -393,95 +395,95 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"bgt 2b\n"
"b 212f\n"
"36:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"37:" // Height 2: Column loop
"cbz x12, 38f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x12, x12, #0x40\n"
"b 49f\n"
"38:" // Height 2: no bias
"tbz %x[flags], #0, 48f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
"bge 47f\n"
"tbz x11, #3, 42f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x11, #2, 40f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
"tbz x11, #1, 39f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
"tbz x11, #0, 46f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v15.s }[2], [x26]\n"
"b 46f\n"
"39:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 46f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
"b 46f\n"
"40:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 41f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
"tbz x11, #0, 46f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v14.s }[2], [x26]\n"
"b 46f\n"
"41:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 46f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
"b 46f\n"
"42:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 44f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x11, #1, 43f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
"tbz x11, #0, 46f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 46f\n"
"43:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 46f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 46f\n"
"44:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 45f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
"tbz x11, #0, 46f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 46f\n"
"45:" // Height 2: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"46:" // Height 2: Partial accumulate: Done
"sub x9, x9, x20\n"
@@ -491,10 +493,10 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"b 49f\n"
"48:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -509,8 +511,8 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"mov x28, #0x0\n"
"50:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 51f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -542,22 +544,22 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f40f22a // bfdot v10.4s, v17.8h, v0.h[0]\n"
".inst 0x4f41f22e // bfdot v14.4s, v17.8h, v1.h[0]\n"
"ldr q17, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f40f20b // bfdot v11.4s, v16.8h, v0.h[0]\n"
".inst 0x4f41f20f // bfdot v15.4s, v16.8h, v1.h[0]\n"
"ldr q16, [x10, #0x50]\n"
- "cmp x27, #0x10\n"
".inst 0x4f60f228 // bfdot v8.4s, v17.8h, v0.h[1]\n"
".inst 0x4f61f22c // bfdot v12.4s, v17.8h, v1.h[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f60f209 // bfdot v9.4s, v16.8h, v0.h[1]\n"
".inst 0x4f61f20d // bfdot v13.4s, v16.8h, v1.h[1]\n"
"ldr q16, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f60f22a // bfdot v10.4s, v17.8h, v0.h[1]\n"
".inst 0x4f61f22e // bfdot v14.4s, v17.8h, v1.h[1]\n"
"ldr q17, [x10, #0x80]\n"
@@ -601,18 +603,18 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x25, x25, #0x10\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f40f22a // bfdot v10.4s, v17.8h, v0.h[0]\n"
".inst 0x4f41f22e // bfdot v14.4s, v17.8h, v1.h[0]\n"
"ldr q17, [x10, #0x40]\n"
- "sub x27, x27, #0x8\n"
".inst 0x4f40f20b // bfdot v11.4s, v16.8h, v0.h[0]\n"
".inst 0x4f41f20f // bfdot v15.4s, v16.8h, v1.h[0]\n"
"ldr q16, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f60f228 // bfdot v8.4s, v17.8h, v0.h[1]\n"
".inst 0x4f61f22c // bfdot v12.4s, v17.8h, v1.h[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f60f209 // bfdot v9.4s, v16.8h, v0.h[1]\n"
".inst 0x4f61f20d // bfdot v13.4s, v16.8h, v1.h[1]\n"
"ldr q16, [x10, #0x70]\n"
@@ -653,18 +655,18 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr s19, [x26], #0x4\n"
"ldr s18, [x25], #0x4\n"
"sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
"ldr q17, [x10, #0x0]\n"
"ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x2\n"
".inst 0x4f53f228 // bfdot v8.4s, v17.8h, v19.h[0]\n"
".inst 0x4f52f22c // bfdot v12.4s, v17.8h, v18.h[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x4f53f209 // bfdot v9.4s, v16.8h, v19.h[0]\n"
".inst 0x4f52f20d // bfdot v13.4s, v16.8h, v18.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f53f22a // bfdot v10.4s, v17.8h, v19.h[0]\n"
".inst 0x4f52f22e // bfdot v14.4s, v17.8h, v18.h[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f53f20b // bfdot v11.4s, v16.8h, v19.h[0]\n"
".inst 0x4f52f20f // bfdot v15.4s, v16.8h, v18.h[0]\n"
"bge 56b\n"
@@ -681,9 +683,9 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f40f209 // bfdot v9.4s, v16.8h, v0.h[0]\n"
".inst 0x4f41f20d // bfdot v13.4s, v16.8h, v1.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f40f22a // bfdot v10.4s, v17.8h, v0.h[0]\n"
".inst 0x4f41f22e // bfdot v14.4s, v17.8h, v1.h[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f40f20b // bfdot v11.4s, v16.8h, v0.h[0]\n"
".inst 0x4f41f20f // bfdot v15.4s, v16.8h, v1.h[0]\n"
"59:" // Height 2: Multiply loop: No odd multiplies
@@ -692,13 +694,13 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"cmp x28, x20\n"
"bne 50b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 60f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v17.4s\n"
"fmin v9.4s, v9.4s, v17.4s\n"
@@ -722,63 +724,63 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"tbz x11, #3, 64f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
"tbz x11, #2, 62f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
"tbz x11, #1, 61f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d15, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
+ "st1 { v15.s }[2], [x26]\n"
"b 68f\n"
"61:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 68f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
"b 68f\n"
"62:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 63f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d14, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
+ "st1 { v14.s }[2], [x26]\n"
"b 68f\n"
"63:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 68f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 66f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
"tbz x11, #1, 65f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
+ "st1 { v13.s }[2], [x26]\n"
"b 68f\n"
"65:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 68f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
"b 68f\n"
"66:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 67f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
+ "st1 { v12.s }[2], [x26]\n"
"b 68f\n"
"67:" // Height 2: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
"68:" // Height 2: Partial direct writeback: Done
"b 70f\n"
"69:" // Height 2: Full writeback
@@ -787,126 +789,126 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
"70:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 37b\n"
"b 212f\n"
"71:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"72:" // Height 3: Column loop
"cbz x12, 73f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 84f\n"
"73:" // Height 3: no bias
"tbz %x[flags], #0, 83f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 82f\n"
"tbz x11, #3, 77f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #2, 75f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #1, 74f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
"b 81f\n"
"74:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 81f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
"b 81f\n"
"75:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 76f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
"b 81f\n"
"76:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 81f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
"b 81f\n"
"77:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 79f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"tbz x11, #1, 78f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
"b 81f\n"
"78:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 81f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
"b 81f\n"
"79:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 80f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
"b 81f\n"
"80:" // Height 3: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
"81:" // Height 3: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 84f\n"
@@ -915,14 +917,14 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
"b 84f\n"
"83:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -941,8 +943,8 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"mov x28, #0x0\n"
"85:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 86f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -982,18 +984,18 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
"ldr q20, [x10, #0x30]\n"
"add x24, x24, #0x10\n"
- ".inst 0x4f40f2aa // bfdot v10.4s, v21.8h, v0.h[0]\n"
- ".inst 0x4f41f2ae // bfdot v14.4s, v21.8h, v1.h[0]\n"
"cmp x27, #0x10\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f40f2aa // bfdot v10.4s, v21.8h, v0.h[0]\n"
+ ".inst 0x4f41f2ae // bfdot v14.4s, v21.8h, v1.h[0]\n"
".inst 0x4f42f2b2 // bfdot v18.4s, v21.8h, v2.h[0]\n"
"ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f40f28b // bfdot v11.4s, v20.8h, v0.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f41f28f // bfdot v15.4s, v20.8h, v1.h[0]\n"
".inst 0x4f42f293 // bfdot v19.4s, v20.8h, v2.h[0]\n"
"ldr q20, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f60f2a8 // bfdot v8.4s, v21.8h, v0.h[1]\n"
".inst 0x4f61f2ac // bfdot v12.4s, v21.8h, v1.h[1]\n"
".inst 0x4f62f2b0 // bfdot v16.4s, v21.8h, v2.h[1]\n"
@@ -1060,14 +1062,14 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
"ldr q20, [x10, #0x30]\n"
"sub x27, x27, #0x8\n"
- ".inst 0x4f40f2aa // bfdot v10.4s, v21.8h, v0.h[0]\n"
- ".inst 0x4f41f2ae // bfdot v14.4s, v21.8h, v1.h[0]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f40f2aa // bfdot v10.4s, v21.8h, v0.h[0]\n"
+ ".inst 0x4f41f2ae // bfdot v14.4s, v21.8h, v1.h[0]\n"
".inst 0x4f42f2b2 // bfdot v18.4s, v21.8h, v2.h[0]\n"
"ldr q21, [x10, #0x40]\n"
- ".inst 0x4f40f28b // bfdot v11.4s, v20.8h, v0.h[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f40f28b // bfdot v11.4s, v20.8h, v0.h[0]\n"
".inst 0x4f41f28f // bfdot v15.4s, v20.8h, v1.h[0]\n"
".inst 0x4f42f293 // bfdot v19.4s, v20.8h, v2.h[0]\n"
"ldr q20, [x10, #0x50]\n"
@@ -1126,12 +1128,12 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr s24, [x26], #0x4\n"
"ldr s23, [x25], #0x4\n"
"sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
"ldr s22, [x24], #0x4\n"
"ldr q21, [x10, #0x0]\n"
+ "cmp x27, #0x2\n"
+ "ldr q20, [x10, #0x10]\n"
".inst 0x4f58f2a8 // bfdot v8.4s, v21.8h, v24.h[0]\n"
".inst 0x4f57f2ac // bfdot v12.4s, v21.8h, v23.h[0]\n"
- "ldr q20, [x10, #0x10]\n"
".inst 0x4f56f2b0 // bfdot v16.4s, v21.8h, v22.h[0]\n"
"ldr q21, [x10, #0x20]\n"
".inst 0x4f58f289 // bfdot v9.4s, v20.8h, v24.h[0]\n"
@@ -1175,15 +1177,15 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"cmp x28, x20\n"
"bne 85b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 95f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v21.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v21.4s\n"
"fmin v9.4s, v9.4s, v21.4s\n"
@@ -1215,79 +1217,79 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"tbz x11, #3, 99f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #2, 97f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #1, 96f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
"b 103f\n"
"96:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 103f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
"b 103f\n"
"97:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 98f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
"b 103f\n"
"98:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 103f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
"b 103f\n"
"99:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 101f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
"tbz x11, #1, 100f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
"b 103f\n"
"100:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 103f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
"b 103f\n"
"101:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 102f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
"b 103f\n"
"102:" // Height 3: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
"103:" // Height 3: Partial direct writeback: Done
"b 105f\n"
"104:" // Height 3: Full writeback
@@ -1296,39 +1298,39 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"105:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 72b\n"
"b 212f\n"
"106:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"107:" // Height 4: Column loop
"cbz x12, 108f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1336,111 +1338,111 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"108:" // Height 4: no bias
"tbz %x[flags], #0, 118f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 117f\n"
"tbz x11, #3, 112f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
"tbz x11, #2, 110f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
"tbz x11, #1, 109f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
"b 116f\n"
"109:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 116f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
"b 116f\n"
"110:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 111f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
"b 116f\n"
"111:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 116f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
"b 116f\n"
"112:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 114f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"tbz x11, #1, 113f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
"b 116f\n"
"113:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 116f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
"b 116f\n"
"114:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 115f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
"b 116f\n"
"115:" // Height 4: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
"116:" // Height 4: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 119f\n"
@@ -1449,18 +1451,18 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"b 119f\n"
"118:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1483,8 +1485,8 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"mov x28, #0x0\n"
"120:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 121f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1624,14 +1626,14 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"add x23, x23, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
"ldr q24, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f40f32a // bfdot v10.4s, v25.8h, v0.h[0]\n"
".inst 0x4f41f32e // bfdot v14.4s, v25.8h, v1.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x8\n"
"prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f42f332 // bfdot v18.4s, v25.8h, v2.h[0]\n"
".inst 0x4f43f336 // bfdot v22.4s, v25.8h, v3.h[0]\n"
@@ -1709,9 +1711,9 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr s29, [x26], #0x4\n"
"ldr s28, [x25], #0x4\n"
"sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
"ldr s27, [x24], #0x4\n"
"ldr s26, [x23], #0x4\n"
+ "cmp x27, #0x2\n"
"ldr q25, [x10, #0x0]\n"
"ldr q24, [x10, #0x10]\n"
".inst 0x4f5df328 // bfdot v8.4s, v25.8h, v29.h[0]\n"
@@ -1768,17 +1770,17 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"cmp x28, x20\n"
"bne 120b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 130f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v25.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v25.4s }, [x21]\n"
"ld1r { v24.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v25.4s\n"
"fmin v9.4s, v9.4s, v25.4s\n"
@@ -1818,95 +1820,95 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"tbz x11, #3, 134f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
"tbz x11, #2, 132f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
"tbz x11, #1, 131f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
"b 138f\n"
"131:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 138f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 133f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
"b 138f\n"
"133:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 138f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 136f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
"tbz x11, #1, 135f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
"b 138f\n"
"135:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 138f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 137f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
"b 138f\n"
"137:" // Height 4: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
"138:" // Height 4: Partial direct writeback: Done
"b 140f\n"
"139:" // Height 4: Full writeback
@@ -1915,43 +1917,43 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
"140:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 107b\n"
"b 212f\n"
"141:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"142:" // Height 5: Column loop
"cbz x12, 143f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1963,128 +1965,128 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"143:" // Height 5: no bias
"tbz %x[flags], #0, 153f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
"bge 152f\n"
"tbz x11, #3, 147f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #2, 145f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #1, 144f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
"b 151f\n"
"144:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 151f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
"b 151f\n"
"145:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 146f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
"b 151f\n"
"146:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 151f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
"b 151f\n"
"147:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 149f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x11, #1, 148f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 151f\n"
"148:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 151f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 151f\n"
"149:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 150f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 151f\n"
"150:" // Height 5: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"151:" // Height 5: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 154f\n"
@@ -2093,22 +2095,22 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
"b 154f\n"
"153:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -2135,8 +2137,8 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"mov x28, #0x0\n"
"155:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 156f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2302,12 +2304,12 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"add x22, x22, #0x10\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
- "sub x27, x27, #0x8\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
".inst 0x4f44f0f9 // bfdot v25.4s, v7.8h, v4.h[0]\n"
"ldr q28, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x8\n"
".inst 0x4f40f3aa // bfdot v10.4s, v29.8h, v0.h[0]\n"
".inst 0x4f41f3ae // bfdot v14.4s, v29.8h, v1.h[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
@@ -2402,14 +2404,14 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr s2, [x26], #0x4\n"
"ldr s1, [x25], #0x4\n"
"sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
"ldr s0, [x24], #0x4\n"
"ldr s31, [x23], #0x4\n"
+ "cmp x27, #0x2\n"
"ldr s30, [x22], #0x4\n"
"ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
".inst 0x4f42f3a8 // bfdot v8.4s, v29.8h, v2.h[0]\n"
".inst 0x4f41f3ac // bfdot v12.4s, v29.8h, v1.h[0]\n"
- "ldr q28, [x10, #0x10]\n"
".inst 0x4f40f3b0 // bfdot v16.4s, v29.8h, v0.h[0]\n"
".inst 0x4f5ff3b4 // bfdot v20.4s, v29.8h, v31.h[0]\n"
".inst 0x4f5ef3b8 // bfdot v24.4s, v29.8h, v30.h[0]\n"
@@ -2471,19 +2473,19 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"cmp x28, x20\n"
"bne 155b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 165f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v29.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v29.4s }, [x21]\n"
"ld1r { v28.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v29.4s\n"
"fmin v9.4s, v9.4s, v29.4s\n"
@@ -2531,111 +2533,111 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"tbz x11, #3, 169f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #2, 167f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #1, 166f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
"b 173f\n"
"166:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 173f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
"b 173f\n"
"167:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 168f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
"b 173f\n"
"168:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 173f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
"b 173f\n"
"169:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 171f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x11, #1, 170f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 173f\n"
"170:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 173f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 173f\n"
"171:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 172f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 173f\n"
"172:" // Height 5: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"173:" // Height 5: Partial direct writeback: Done
"b 175f\n"
"174:" // Height 5: Full writeback
@@ -2644,50 +2646,51 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"175:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 142b\n"
"b 212f\n"
"176:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"177:" // Height 6: Column loop
"cbz x12, 178f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -2703,145 +2706,145 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"178:" // Height 6: no bias
"tbz %x[flags], #0, 188f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
"bge 187f\n"
"tbz x11, #3, 182f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x11, #2, 180f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x11, #1, 179f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 186f\n"
"179:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 186f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 186f\n"
"180:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 181f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 186f\n"
"181:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 186f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 186f\n"
"182:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 184f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x11, #1, 183f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 186f\n"
"183:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 186f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 186f\n"
"184:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 185f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 186f\n"
"185:" // Height 6: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"186:" // Height 6: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 189f\n"
@@ -2850,26 +2853,26 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"b 189f\n"
"188:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -2900,8 +2903,8 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"mov x28, #0x0\n"
"190:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 191f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -3091,18 +3094,18 @@ void a64_hybrid_bf16fp32_dot_6x16 (
".inst 0x4f40f0e9 // bfdot v9.4s, v7.8h, v0.h[0]\n"
".inst 0x4f41f0ed // bfdot v13.4s, v7.8h, v1.h[0]\n"
"add x21, x21, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f42f0f1 // bfdot v17.4s, v7.8h, v2.h[0]\n"
".inst 0x4f43f0f5 // bfdot v21.4s, v7.8h, v3.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f44f0f9 // bfdot v25.4s, v7.8h, v4.h[0]\n"
".inst 0x4f45f0fd // bfdot v29.4s, v7.8h, v5.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n"
".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x27, x27, #0x8\n"
"prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f42f0d2 // bfdot v18.4s, v6.8h, v2.h[0]\n"
".inst 0x4f43f0d6 // bfdot v22.4s, v6.8h, v3.h[0]\n"
@@ -3208,9 +3211,9 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"ldr s7, [x26], #0x4\n"
"ldr s6, [x25], #0x4\n"
"sub x27, x27, #0x2\n"
- "cmp x27, #0x2\n"
"ldr s5, [x24], #0x4\n"
"ldr s4, [x23], #0x4\n"
+ "cmp x27, #0x2\n"
"ldr s3, [x22], #0x4\n"
"ldr s2, [x21], #0x4\n"
"ldr q1, [x10, #0x0]\n"
@@ -3287,21 +3290,21 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"cmp x28, x20\n"
"bne 190b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 200f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
@@ -3357,127 +3360,127 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"tbz x11, #3, 204f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x22], #0x10\n"
"tbz x11, #2, 202f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v30.4s }, [x22], #0x10\n"
"tbz x11, #1, 201f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 208f\n"
"201:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 208f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"b 208f\n"
"202:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 203f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
+ "st1 { v30.s }[2], [x22]\n"
"b 208f\n"
"203:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 208f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
+ "str s30, [x22, #0x0]\n"
"b 208f\n"
"204:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 206f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
"tbz x11, #1, 205f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
"b 208f\n"
"205:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 208f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
"b 208f\n"
"206:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 207f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "st1 { v28.s }[2], [x22]\n"
"b 208f\n"
"207:" // Height 6: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
+ "str s28, [x22, #0x0]\n"
"208:" // Height 6: Partial direct writeback: Done
"b 210f\n"
"209:" // Height 6: Full writeback
@@ -3486,26 +3489,26 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q28, [x22, #0x0]\n"
+ "str q29, [x22, #0x10]\n"
+ "str q30, [x22, #0x20]\n"
+ "str q31, [x22, #0x30]\n"
"210:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 177b\n"
@@ -3521,8 +3524,8 @@ void a64_hybrid_bf16fp32_dot_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"212:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16.hpp
index d9e7259fa2..ee57113f9b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp
index f6389e27d1..47a85803d0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -102,23 +104,23 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 75f\n"
"beq 38f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x12, 3f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 15f\n"
@@ -209,8 +211,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"16:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -230,7 +232,12 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q6, [x10, #0x10]\n"
"blt 20f\n"
"19:" // Height 1: Multiply loop: Main loop head
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"trn1 v20.2d, v1.2d, v21.2d\n"
+ "trn2 v1.2d, v1.2d, v21.2d\n"
".inst 0x6e47ee88 // bfmmla v8.4s, v20.8h, v7.8h\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x6e46ee8c // bfmmla v12.4s, v20.8h, v6.8h\n"
@@ -243,7 +250,6 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ee8e // bfmmla v14.4s, v20.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v21.2d\n"
".inst 0x6e52ee8b // bfmmla v11.4s, v20.8h, v18.8h\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x6e51ee8f // bfmmla v15.4s, v20.8h, v17.8h\n"
@@ -260,19 +266,19 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0xf0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
"ldr q1, [x26, #0x0]\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 19b\n"
"20:" // Height 1: Multiply loop: Single iteration only
- "trn1 v19.2d, v1.2d, v20.2d\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "trn1 v19.2d, v1.2d, v17.2d\n"
+ "trn2 v1.2d, v1.2d, v17.2d\n"
".inst 0x6e47ee68 // bfmmla v8.4s, v19.8h, v7.8h\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x6e46ee6c // bfmmla v12.4s, v19.8h, v6.8h\n"
@@ -284,17 +290,16 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e51ee6a // bfmmla v10.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x60]\n"
".inst 0x6e52ee6e // bfmmla v14.4s, v19.8h, v18.8h\n"
- "ldr q24, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v20.2d\n"
+ "ldr q25, [x10, #0x70]\n"
".inst 0x6e51ee6b // bfmmla v11.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x80]\n"
- ".inst 0x6e58ee6f // bfmmla v15.4s, v19.8h, v24.8h\n"
- "ldr q2, [x10, #0x90]\n"
+ ".inst 0x6e59ee6f // bfmmla v15.4s, v19.8h, v25.8h\n"
+ "ldr q3, [x10, #0x90]\n"
".inst 0x6e51ec28 // bfmmla v8.4s, v1.8h, v17.8h\n"
- "ldr q18, [x10, #0xa0]\n"
- ".inst 0x6e42ec2c // bfmmla v12.4s, v1.8h, v2.8h\n"
+ "ldr q19, [x10, #0xa0]\n"
+ ".inst 0x6e43ec2c // bfmmla v12.4s, v1.8h, v3.8h\n"
"ldr q17, [x10, #0xb0]\n"
- ".inst 0x6e52ec29 // bfmmla v9.4s, v1.8h, v18.8h\n"
+ ".inst 0x6e53ec29 // bfmmla v9.4s, v1.8h, v19.8h\n"
"ldr q18, [x10, #0xc0]\n"
".inst 0x6e51ec2d // bfmmla v13.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0xd0]\n"
@@ -302,22 +307,21 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"21:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 26f\n"
"cmp x27, #0x4\n"
"blt 23f\n"
"22:" // Height 1: Multiply loop: Odd block loop
"ldr d19, [x26], #0x8\n"
- "ldr q18, [x10, #0x0]\n"
- "trn1 v19.2d, v19.2d, v17.2d\n"
+ "ldr q20, [x10, #0x0]\n"
+ "sub x27, x27, #0x4\n"
"ldr q17, [x10, #0x10]\n"
- ".inst 0x6e52ee68 // bfmmla v8.4s, v19.8h, v18.8h\n"
+ "cmp x27, #0x4\n"
+ "trn1 v19.2d, v19.2d, v18.2d\n"
+ ".inst 0x6e54ee68 // bfmmla v8.4s, v19.8h, v20.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e51ee6c // bfmmla v12.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x30]\n"
@@ -329,11 +333,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x10, x10, #0x80\n"
"bge 22b\n"
"23:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 26f\n"
@@ -360,9 +362,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e42ee6e // bfmmla v14.4s, v19.8h, v2.8h\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x10, x10, #0x80\n"
"26:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -374,9 +376,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v18.4s\n"
"fmin v9.4s, v9.4s, v18.4s\n"
@@ -446,23 +448,23 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"bgt 2b\n"
"b 224f\n"
"38:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"39:" // Height 2: Column loop
"cbz x12, 40f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 52f\n"
@@ -470,75 +472,75 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"tbz %x[flags], #0, 51f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
"bge 49f\n"
"tbz x11, #3, 44f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x11, #2, 42f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
"tbz x11, #1, 41f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
"tbz x11, #0, 48f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v15.s }[2], [x26]\n"
"b 48f\n"
"41:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 48f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
"b 48f\n"
"42:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 43f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
"tbz x11, #0, 48f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v14.s }[2], [x26]\n"
"b 48f\n"
"43:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 48f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
"b 48f\n"
"44:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 46f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x11, #1, 45f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
"tbz x11, #0, 48f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 48f\n"
"45:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 48f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 48f\n"
"46:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 47f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
"tbz x11, #0, 48f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 48f\n"
"47:" // Height 2: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"48:" // Height 2: Partial accumulate: Done
"sub x9, x9, x20\n"
@@ -548,10 +550,10 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"50:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -575,8 +577,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"53:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 54f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -601,6 +603,14 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"blt 57f\n"
"56:" // Height 2: Multiply loop: Main loop head
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ee68 // bfmmla v8.4s, v19.8h, v7.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e46ee6c // bfmmla v12.4s, v19.8h, v6.8h\n"
@@ -613,7 +623,6 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
@@ -630,22 +639,21 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0xf0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x10\n"
- ".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
"add x10, x10, #0x100\n"
+ ".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
"ldr q7, [x10, #0x0]\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
"ldr q1, [x26, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"bge 56b\n"
"57:" // Height 2: Multiply loop: Single iteration only
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e47ee68 // bfmmla v8.4s, v19.8h, v7.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e46ee6c // bfmmla v12.4s, v19.8h, v6.8h\n"
@@ -658,7 +666,6 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
@@ -675,41 +682,36 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e51ec2e // bfmmla v14.4s, v1.8h, v17.8h\n"
"ldr q17, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e52ec2b // bfmmla v11.4s, v1.8h, v18.8h\n"
".inst 0x6e51ec2f // bfmmla v15.4s, v1.8h, v17.8h\n"
- "sub x27, x27, #0x8\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x100\n"
"58:" // Height 2: Multiply loop: Main loop skip
"cbz x27, 63f\n"
"cmp x27, #0x4\n"
"blt 60f\n"
"59:" // Height 2: Multiply loop: Odd block loop
- "ldr d18, [x26], #0x8\n"
- "ldr d17, [x25], #0x8\n"
- "trn1 v19.2d, v18.2d, v17.2d\n"
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
"ldr q18, [x10, #0x0]\n"
"ldr q17, [x10, #0x10]\n"
+ "cmp x27, #0x4\n"
+ "trn1 v19.2d, v20.2d, v19.2d\n"
".inst 0x6e52ee68 // bfmmla v8.4s, v19.8h, v18.8h\n"
- ".inst 0x6e51ee6c // bfmmla v12.4s, v19.8h, v17.8h\n"
"ldr q26, [x10, #0x20]\n"
- "ldr q5, [x10, #0x30]\n"
+ ".inst 0x6e51ee6c // bfmmla v12.4s, v19.8h, v17.8h\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x6e5aee69 // bfmmla v9.4s, v19.8h, v26.8h\n"
- ".inst 0x6e45ee6d // bfmmla v13.4s, v19.8h, v5.8h\n"
"ldr q18, [x10, #0x40]\n"
+ ".inst 0x6e46ee6d // bfmmla v13.4s, v19.8h, v6.8h\n"
"ldr q17, [x10, #0x50]\n"
".inst 0x6e52ee6a // bfmmla v10.4s, v19.8h, v18.8h\n"
- ".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q18, [x10, #0x60]\n"
+ ".inst 0x6e51ee6e // bfmmla v14.4s, v19.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "cmp x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x10, x10, #0x80\n"
"bge 59b\n"
"60:" // Height 2: Multiply loop: Skip odd blocks
"cbz x27, 63f\n"
@@ -734,35 +736,35 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e52ee69 // bfmmla v9.4s, v19.8h, v18.8h\n"
"ldr q30, [x10, #0x40]\n"
".inst 0x6e51ee6d // bfmmla v13.4s, v19.8h, v17.8h\n"
- "ldr q26, [x10, #0x50]\n"
+ "ldr q31, [x10, #0x50]\n"
".inst 0x6e5eee6a // bfmmla v10.4s, v19.8h, v30.8h\n"
"ldr q18, [x10, #0x60]\n"
- ".inst 0x6e5aee6e // bfmmla v14.4s, v19.8h, v26.8h\n"
+ ".inst 0x6e5fee6e // bfmmla v14.4s, v19.8h, v31.8h\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ee6b // bfmmla v11.4s, v19.8h, v18.8h\n"
".inst 0x6e51ee6f // bfmmla v15.4s, v19.8h, v17.8h\n"
- "add x10, x10, #0x80\n"
"63:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 53b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v18.4s\n"
"fmin v12.4s, v12.4s, v18.4s\n"
@@ -786,63 +788,63 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"tbz x11, #3, 68f\n"
"st1 { v7.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
"tbz x11, #2, 66f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
"tbz x11, #1, 65f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
+ "str d11, [x26], #0x8\n"
"tbz x11, #0, 72f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
+ "st1 { v11.s }[2], [x26]\n"
"b 72f\n"
"65:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 72f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
"b 72f\n"
"66:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 67f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
+ "str d10, [x26], #0x8\n"
"tbz x11, #0, 72f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
+ "st1 { v10.s }[2], [x26]\n"
"b 72f\n"
"67:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 72f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
"b 72f\n"
"68:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 70f\n"
"st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
"tbz x11, #1, 69f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
+ "str d9, [x26], #0x8\n"
"tbz x11, #0, 72f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
+ "st1 { v9.s }[2], [x26]\n"
"b 72f\n"
"69:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 72f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
"b 72f\n"
"70:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 71f\n"
"str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
+ "str d8, [x26], #0x8\n"
"tbz x11, #0, 72f\n"
"st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
+ "st1 { v8.s }[2], [x26]\n"
"b 72f\n"
"71:" // Height 2: Partial direct writeback: partial_1_0
"str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
"72:" // Height 2: Partial direct writeback: Done
"b 74f\n"
"73:" // Height 2: Full writeback
@@ -851,32 +853,32 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
"74:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 39b\n"
"b 224f\n"
"75:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"76:" // Height 3: Column loop
"cbz x12, 77f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -891,94 +893,94 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"77:" // Height 3: no bias
"tbz %x[flags], #0, 88f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 86f\n"
"tbz x11, #3, 81f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #2, 79f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
"tbz x11, #1, 78f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
"tbz x11, #0, 85f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
"b 85f\n"
"78:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 85f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
"b 85f\n"
"79:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 80f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x11, #0, 85f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
"b 85f\n"
"80:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 85f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
"b 85f\n"
"81:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 83f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #1, 82f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x11, #0, 85f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
"b 85f\n"
"82:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 85f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
"b 85f\n"
"83:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 84f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x11, #0, 85f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
"b 85f\n"
"84:" // Height 3: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
"85:" // Height 3: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 87f\n"
@@ -987,14 +989,14 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
"87:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1034,8 +1036,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"90:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 91f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1065,35 +1067,38 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"93:" // Height 3: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x90]\n"
"ldr q2, [x25, #0x0]\n"
@@ -1101,15 +1106,12 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e5aec71 // bfmmla v17.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x6e59ec2d // bfmmla v13.4s, v1.8h, v25.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e59ec75 // bfmmla v21.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0xd0]\n"
".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
@@ -1131,43 +1133,43 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"94:" // Height 3: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x90]\n"
".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
@@ -1192,25 +1194,25 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 97f\n"
"96:" // Height 3: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
- "ldr d25, [x24], #0x8\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "sub x27, x27, #0x4\n"
+ "ldr d27, [x24], #0x8\n"
"ldr q26, [x10, #0x0]\n"
- "trn1 v27.2d, v25.2d, v27.2d\n"
- ".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
+ "cmp x27, #0x4\n"
"ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v27.2d, v29.2d\n"
+ ".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef70 // bfmmla v16.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e59ef8c // bfmmla v12.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef74 // bfmmla v20.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "sub x27, x27, #0x4\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "cmp x27, #0x4\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
@@ -1219,8 +1221,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x70]\n"
- ".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
@@ -1246,9 +1248,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
+ ".inst 0x6e5def8c // bfmmla v12.4s, v28.8h, v29.8h\n"
".inst 0x6e5aef70 // bfmmla v16.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e5def8c // bfmmla v12.4s, v28.8h, v29.8h\n"
".inst 0x6e5def74 // bfmmla v20.4s, v27.8h, v29.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
@@ -1274,27 +1276,27 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 90b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "uzp1 v12.2d, v9.2d, v13.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 101f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v26.4s\n"
"fmin v12.4s, v12.4s, v26.4s\n"
@@ -1326,79 +1328,79 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"tbz x11, #3, 105f\n"
"st1 { v7.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #2, 103f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #1, 102f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x11, #0, 109f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
"b 109f\n"
"102:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 109f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
"b 109f\n"
"103:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 104f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x11, #0, 109f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
"b 109f\n"
"104:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 109f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
"b 109f\n"
"105:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 107f\n"
"st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
"tbz x11, #1, 106f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x11, #0, 109f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
"b 109f\n"
"106:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 109f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
"b 109f\n"
"107:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 108f\n"
"str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x11, #0, 109f\n"
"st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
"b 109f\n"
"108:" // Height 3: Partial direct writeback: partial_1_0
"str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
"109:" // Height 3: Partial direct writeback: Done
"b 111f\n"
"110:" // Height 3: Full writeback
@@ -1407,36 +1409,36 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"111:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 76b\n"
"b 224f\n"
"112:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"113:" // Height 4: Column loop
"cbz x12, 114f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -1451,111 +1453,111 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"114:" // Height 4: no bias
"tbz %x[flags], #0, 125f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 123f\n"
"tbz x11, #3, 118f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
"tbz x11, #2, 116f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
"tbz x11, #1, 115f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x11, #0, 122f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
"b 122f\n"
"115:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 122f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
"b 122f\n"
"116:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 117f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x11, #0, 122f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
"b 122f\n"
"117:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 122f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
"b 122f\n"
"118:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 120f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"tbz x11, #1, 119f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x11, #0, 122f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
"b 122f\n"
"119:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 122f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
"b 122f\n"
"120:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 121f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x11, #0, 122f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
"b 122f\n"
"121:" // Height 4: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
"122:" // Height 4: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 124f\n"
@@ -1564,18 +1566,18 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"124:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1615,8 +1617,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"127:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 128f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1650,33 +1652,38 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"130:" // Height 4: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
"sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
@@ -1687,23 +1694,18 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e5aec71 // bfmmla v17.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x6e59ec2d // bfmmla v13.4s, v1.8h, v25.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e59ec75 // bfmmla v21.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0xd0]\n"
".inst 0x6e5aec2a // bfmmla v10.4s, v1.8h, v26.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e5aec72 // bfmmla v18.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xe0]\n"
".inst 0x6e59ec2e // bfmmla v14.4s, v1.8h, v25.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e59ec76 // bfmmla v22.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
@@ -1719,48 +1721,48 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"131:" // Height 4: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x8\n"
+ ".inst 0x6e47ef88 // bfmmla v8.4s, v28.8h, v7.8h\n"
+ ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ef70 // bfmmla v16.4s, v27.8h, v7.8h\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e46ef8c // bfmmla v12.4s, v28.8h, v6.8h\n"
".inst 0x6e46ef74 // bfmmla v20.4s, v27.8h, v6.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aef89 // bfmmla v9.4s, v28.8h, v26.8h\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e5aef71 // bfmmla v17.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ef8d // bfmmla v13.4s, v28.8h, v25.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e59ef75 // bfmmla v21.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aef8a // bfmmla v10.4s, v28.8h, v26.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e5aef72 // bfmmla v18.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e59ef8e // bfmmla v14.4s, v28.8h, v25.8h\n"
- "add x23, x23, #0x10\n"
".inst 0x6e59ef76 // bfmmla v22.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e5aef8b // bfmmla v11.4s, v28.8h, v26.8h\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e5aef73 // bfmmla v19.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e59ef8f // bfmmla v15.4s, v28.8h, v25.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e59ef77 // bfmmla v23.4s, v27.8h, v25.8h\n"
"ldr q25, [x10, #0x90]\n"
".inst 0x6e5aec28 // bfmmla v8.4s, v1.8h, v26.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e5aec70 // bfmmla v16.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e59ec2c // bfmmla v12.4s, v1.8h, v25.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e59ec74 // bfmmla v20.4s, v3.8h, v25.8h\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e5aec29 // bfmmla v9.4s, v1.8h, v26.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e5aec71 // bfmmla v17.4s, v3.8h, v26.8h\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x6e59ec2d // bfmmla v13.4s, v1.8h, v25.8h\n"
@@ -1782,16 +1784,16 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 134f\n"
"133:" // Height 4: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
- "ldr d26, [x24], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "trn1 v27.2d, v26.2d, v25.2d\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"cmp x27, #0x4\n"
"ldr q26, [x10, #0x0]\n"
"ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v29.2d, v27.2d\n"
".inst 0x6e5aef88 // bfmmla v8.4s, v28.8h, v26.8h\n"
".inst 0x6e5aef70 // bfmmla v16.4s, v27.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
@@ -1868,23 +1870,23 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 127b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "uzp1 v12.2d, v9.2d, v13.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
@@ -1892,9 +1894,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 138f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v26.4s\n"
"fmin v12.4s, v12.4s, v26.4s\n"
@@ -1934,95 +1936,95 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"tbz x11, #3, 142f\n"
"st1 { v7.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
"tbz x11, #2, 140f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
"tbz x11, #1, 139f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
"tbz x11, #0, 146f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
"b 146f\n"
"139:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 146f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
"b 146f\n"
"140:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 141f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
"tbz x11, #0, 146f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
"b 146f\n"
"141:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 146f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
"b 146f\n"
"142:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 144f\n"
"st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
"tbz x11, #1, 143f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
"tbz x11, #0, 146f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
"b 146f\n"
"143:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 146f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
"b 146f\n"
"144:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 145f\n"
"str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
"tbz x11, #0, 146f\n"
"st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
"b 146f\n"
"145:" // Height 4: Partial direct writeback: partial_1_0
"str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
"146:" // Height 4: Partial direct writeback: Done
"b 148f\n"
"147:" // Height 4: Full writeback
@@ -2031,40 +2033,40 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
"148:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 113b\n"
"b 224f\n"
"149:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"150:" // Height 5: Column loop
"cbz x12, 151f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -2087,128 +2089,128 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"151:" // Height 5: no bias
"tbz %x[flags], #0, 162f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
"bge 160f\n"
"tbz x11, #3, 155f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #2, 153f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v27.4s }, [x23], #0x10\n"
"tbz x11, #1, 152f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d6, [x23], #0x8\n"
"tbz x11, #0, 159f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v6.s }[2], [x23]\n"
"b 159f\n"
"152:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 159f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s6, [x23, #0x0]\n"
"b 159f\n"
"153:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 154f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x11, #0, 159f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
"b 159f\n"
"154:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 159f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
"b 159f\n"
"155:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 157f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #1, 156f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x11, #0, 159f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
"b 159f\n"
"156:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 159f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
"b 159f\n"
"157:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 158f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x11, #0, 159f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 159f\n"
"158:" // Height 5: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"159:" // Height 5: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 161f\n"
@@ -2217,22 +2219,22 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q25, [x23, #0x0]\n"
+ "ldr q26, [x23, #0x10]\n"
+ "ldr q27, [x23, #0x20]\n"
+ "ldr q6, [x23, #0x30]\n"
"161:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2288,8 +2290,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"164:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 165f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2326,51 +2328,51 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"167:" // Height 5: Multiply loop: Main loop head
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ "sub x27, x27, #0x8\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "add x22, x22, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e40eccc // bfmmla v12.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec54 // bfmmla v20.4s, v2.8h, v0.8h\n"
- "add x26, x26, #0x10\n"
".inst 0x6e40ec9c // bfmmla v28.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x6e47ecc9 // bfmmla v9.4s, v6.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x24, x24, #0x10\n"
".inst 0x6e40eccd // bfmmla v13.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec55 // bfmmla v21.4s, v2.8h, v0.8h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6e40ec9d // bfmmla v29.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x50]\n"
".inst 0x6e47ecca // bfmmla v10.4s, v6.8h, v7.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e40ecce // bfmmla v14.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec56 // bfmmla v22.4s, v2.8h, v0.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e40ec9e // bfmmla v30.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x70]\n"
".inst 0x6e47eccb // bfmmla v11.4s, v6.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e40eccf // bfmmla v15.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec57 // bfmmla v23.4s, v2.8h, v0.8h\n"
"ldr q2, [x25, #0x0]\n"
@@ -2416,47 +2418,47 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"168:" // Height 5: Multiply loop: Single iteration only
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x6e47ecc8 // bfmmla v8.4s, v6.8h, v7.8h\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e40eccc // bfmmla v12.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec54 // bfmmla v20.4s, v2.8h, v0.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e40ec9c // bfmmla v28.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x6e47ecc9 // bfmmla v9.4s, v6.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e40eccd // bfmmla v13.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec55 // bfmmla v21.4s, v2.8h, v0.8h\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e40ec9d // bfmmla v29.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x50]\n"
".inst 0x6e47ecca // bfmmla v10.4s, v6.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e40ecce // bfmmla v14.4s, v6.8h, v0.8h\n"
".inst 0x6e40ec56 // bfmmla v22.4s, v2.8h, v0.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e40ec9e // bfmmla v30.4s, v4.8h, v0.8h\n"
"ldr q0, [x10, #0x70]\n"
".inst 0x6e47eccb // bfmmla v11.4s, v6.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
@@ -2500,24 +2502,24 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 171f\n"
"170:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "cmp x27, #0x4\n"
"ldr d0, [x22], #0x8\n"
"ldr q1, [x10, #0x0]\n"
- "trn1 v2.2d, v0.2d, v2.2d\n"
- ".inst 0x6e41ec88 // bfmmla v8.4s, v4.8h, v1.8h\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v3.2d, v2.2d\n"
+ "trn1 v2.2d, v0.2d, v5.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x6e41ec88 // bfmmla v8.4s, v4.8h, v1.8h\n"
".inst 0x6e41ec70 // bfmmla v16.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec58 // bfmmla v24.4s, v2.8h, v1.8h\n"
"ldr q1, [x10, #0x20]\n"
".inst 0x6e40ec8c // bfmmla v12.4s, v4.8h, v0.8h\n"
".inst 0x6e40ec74 // bfmmla v20.4s, v3.8h, v0.8h\n"
- "cmp x27, #0x4\n"
".inst 0x6e40ec5c // bfmmla v28.4s, v2.8h, v0.8h\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x6e41ec89 // bfmmla v9.4s, v4.8h, v1.8h\n"
@@ -2536,8 +2538,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e40ec76 // bfmmla v22.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec5e // bfmmla v30.4s, v2.8h, v0.8h\n"
"ldr q0, [x10, #0x70]\n"
- ".inst 0x6e46ec8b // bfmmla v11.4s, v4.8h, v6.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e46ec8b // bfmmla v11.4s, v4.8h, v6.8h\n"
".inst 0x6e46ec73 // bfmmla v19.4s, v3.8h, v6.8h\n"
".inst 0x6e46ec5b // bfmmla v27.4s, v2.8h, v6.8h\n"
".inst 0x6e40ec8f // bfmmla v15.4s, v4.8h, v0.8h\n"
@@ -2608,27 +2610,27 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 164b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
@@ -2638,9 +2640,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 175f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
@@ -2688,111 +2690,111 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"tbz x11, #3, 179f\n"
"st1 { v7.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #2, 177f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #1, 176f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x11, #0, 183f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
"b 183f\n"
"176:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 183f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
"b 183f\n"
"177:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 178f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x11, #0, 183f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
"b 183f\n"
"178:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 183f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
"b 183f\n"
"179:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 181f\n"
"st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x11, #1, 180f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x11, #0, 183f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 183f\n"
"180:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 183f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 183f\n"
"181:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 182f\n"
"str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x11, #0, 183f\n"
"st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 183f\n"
"182:" // Height 5: Partial direct writeback: partial_1_0
"str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"183:" // Height 5: Partial direct writeback: Done
"b 185f\n"
"184:" // Height 5: Full writeback
@@ -2801,47 +2803,48 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"185:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 150b\n"
"b 224f\n"
"186:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"187:" // Height 6: Column loop
"cbz x12, 188f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -2864,145 +2867,145 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"188:" // Height 6: no bias
"tbz %x[flags], #0, 199f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
"bge 197f\n"
"tbz x11, #3, 192f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x11, #2, 190f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v27.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x11, #1, 189f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d6, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v6.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 196f\n"
"189:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 196f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s6, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 196f\n"
"190:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 191f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 196f\n"
"191:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 196f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 196f\n"
"192:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 194f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x11, #1, 193f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 196f\n"
"193:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 196f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 196f\n"
"194:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 195f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 196f\n"
"195:" // Height 6: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"196:" // Height 6: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 198f\n"
@@ -3011,26 +3014,26 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q25, [x23, #0x0]\n"
+ "ldr q26, [x23, #0x10]\n"
+ "ldr q27, [x23, #0x20]\n"
+ "ldr q6, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"198:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -3086,8 +3089,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"201:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 202f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -3128,56 +3131,56 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"204:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x25, x25, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "cmp x27, #0x10\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
"ldr q0, [x10, #0x90]\n"
"ldr q4, [x23, #0x0]\n"
@@ -3221,52 +3224,52 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"205:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
- "add x24, x24, #0x10\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e47ec09 // bfmmla v9.4s, v0.8h, v7.8h\n"
- "add x23, x23, #0x10\n"
".inst 0x6e47ec51 // bfmmla v17.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec99 // bfmmla v25.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x40]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e46ec0d // bfmmla v13.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e46ec9d // bfmmla v29.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e47ec0a // bfmmla v10.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e47ec52 // bfmmla v18.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9a // bfmmla v26.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e46ec0e // bfmmla v14.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec56 // bfmmla v22.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e46ec9e // bfmmla v30.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec53 // bfmmla v19.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9b // bfmmla v27.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9f // bfmmla v31.4s, v4.8h, v6.8h\n"
@@ -3307,18 +3310,18 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x27, #0x4\n"
"blt 208f\n"
"207:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x4\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d5, [x24], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
"cmp x27, #0x4\n"
- "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"ldr d0, [x21], #0x8\n"
- "trn1 v2.2d, v1.2d, v0.2d\n"
"ldr q1, [x10, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v5.2d, v3.2d\n"
+ "trn1 v2.2d, v2.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
".inst 0x6e41ec88 // bfmmla v8.4s, v4.8h, v1.8h\n"
".inst 0x6e41ec70 // bfmmla v16.4s, v3.8h, v1.8h\n"
@@ -3380,9 +3383,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"ldr q0, [x10, #0x0]\n"
"trn1 v7.2d, v1.2d, v2.2d\n"
"trn1 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e40ece8 // bfmmla v8.4s, v7.8h, v0.8h\n"
"trn1 v2.2d, v5.2d, v6.2d\n"
"ldr q1, [x10, #0x10]\n"
+ ".inst 0x6e40ece8 // bfmmla v8.4s, v7.8h, v0.8h\n"
".inst 0x6e40ec70 // bfmmla v16.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec58 // bfmmla v24.4s, v2.8h, v0.8h\n"
"ldr q0, [x10, #0x20]\n"
@@ -3406,8 +3409,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
".inst 0x6e41ec76 // bfmmla v22.4s, v3.8h, v1.8h\n"
".inst 0x6e41ec5e // bfmmla v30.4s, v2.8h, v1.8h\n"
"ldr q6, [x10, #0x70]\n"
- ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
".inst 0x6e40ec73 // bfmmla v19.4s, v3.8h, v0.8h\n"
".inst 0x6e40ec5b // bfmmla v27.4s, v2.8h, v0.8h\n"
".inst 0x6e46ecef // bfmmla v15.4s, v7.8h, v6.8h\n"
@@ -3419,31 +3422,31 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 201b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
@@ -3455,9 +3458,9 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 212f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v7.4s, v7.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
@@ -3513,127 +3516,127 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"tbz x11, #3, 216f\n"
"st1 { v7.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v25.4s }, [x22], #0x10\n"
"tbz x11, #2, 214f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v29.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
"tbz x11, #1, 213f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
+ "str d27, [x22], #0x8\n"
"tbz x11, #0, 220f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
+ "st1 { v27.s }[2], [x22]\n"
"b 220f\n"
"213:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 220f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
+ "str s27, [x22, #0x0]\n"
"b 220f\n"
"214:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 215f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d29, [x23], #0x8\n"
+ "str d26, [x22], #0x8\n"
"tbz x11, #0, 220f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v29.s }[2], [x23]\n"
+ "st1 { v26.s }[2], [x22]\n"
"b 220f\n"
"215:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 220f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s29, [x23, #0x0]\n"
+ "str s26, [x22, #0x0]\n"
"b 220f\n"
"216:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 218f\n"
"st1 { v7.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
"tbz x11, #1, 217f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d25, [x22], #0x8\n"
"tbz x11, #0, 220f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v25.s }[2], [x22]\n"
"b 220f\n"
"217:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 220f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s25, [x22, #0x0]\n"
"b 220f\n"
"218:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 219f\n"
"str d7, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x11, #0, 220f\n"
"st1 { v7.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
+ "st1 { v24.s }[2], [x22]\n"
"b 220f\n"
"219:" // Height 6: Partial direct writeback: partial_1_0
"str s7, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
+ "str s24, [x22, #0x0]\n"
"220:" // Height 6: Partial direct writeback: Done
"b 222f\n"
"221:" // Height 6: Full writeback
@@ -3642,26 +3645,26 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q23, [x22, #0x0]\n"
- "str q28, [x22, #0x10]\n"
- "str q29, [x22, #0x20]\n"
- "str q30, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q28, [x23, #0x10]\n"
+ "str q29, [x23, #0x20]\n"
+ "str q30, [x23, #0x30]\n"
+ "str q24, [x22, #0x0]\n"
+ "str q25, [x22, #0x10]\n"
+ "str q26, [x22, #0x20]\n"
+ "str q27, [x22, #0x30]\n"
"222:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 187b\n"
@@ -3677,8 +3680,8 @@ void a64_hybrid_bf16fp32_mmla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"224:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp
index 8b80c25beb..12244a2e99 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 32, 1> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 32, 1> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp
index b049ed45f9..aae6322b59 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp16_mla_6x32_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const __fp16 *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp16_mla_6x32_a55 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -101,10 +103,10 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"cmp %x[M], #0x2\n"
"bgt 99f\n"
"beq 50f\n"
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x7, 3f\n"
"ldr q8, [x7, #0x0]\n"
@@ -243,8 +245,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"mov x15, #0x0\n"
"23:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 24f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -266,150 +268,153 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"26:" // Height 1: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr d17, [x17, #0x20]\n"
- "ldr x20, [x17, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"ldr d16, [x17, #0x30]\n"
- "mov v17.d[1], x20\n"
+ "add x13, x13, #0x10\n"
"ldr x20, [x17, #0x38]\n"
+ "sub x14, x14, #0x8\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "ldr x22, [x13, #0x8]\n"
+ "cmp x14, #0x10\n"
"mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"ldr d17, [x17, #0x40]\n"
- "ldr x20, [x17, #0x48]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
"ldr d16, [x17, #0x50]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x58]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v16.d[1], x20\n"
"fmla v8.8h, v17.8h, v0.h[1]\n"
"ldr d17, [x17, #0x60]\n"
- "ldr x20, [x17, #0x68]\n"
"fmla v9.8h, v16.8h, v0.h[1]\n"
"ldr d16, [x17, #0x70]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x78]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
"mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[1]\n"
"ldr d17, [x17, #0x80]\n"
- "ldr x20, [x17, #0x88]\n"
"fmla v11.8h, v16.8h, v0.h[1]\n"
"ldr d16, [x17, #0x90]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x98]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v16.d[1], x20\n"
"fmla v8.8h, v17.8h, v0.h[2]\n"
"ldr d17, [x17, #0xa0]\n"
- "ldr x20, [x17, #0xa8]\n"
"fmla v9.8h, v16.8h, v0.h[2]\n"
"ldr d16, [x17, #0xb0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0xb8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[2]\n"
"ldr d17, [x17, #0xc0]\n"
- "ldr x20, [x17, #0xc8]\n"
"fmla v11.8h, v16.8h, v0.h[2]\n"
"ldr d16, [x17, #0xd0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0xd8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v16.d[1], x20\n"
"fmla v8.8h, v17.8h, v0.h[3]\n"
"ldr d17, [x17, #0xe0]\n"
- "ldr x20, [x17, #0xe8]\n"
"fmla v9.8h, v16.8h, v0.h[3]\n"
"ldr d16, [x17, #0xf0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0xf8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x108]\n"
"mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[3]\n"
"ldr d17, [x17, #0x100]\n"
- "ldr x20, [x17, #0x108]\n"
"fmla v11.8h, v16.8h, v0.h[3]\n"
"ldr d16, [x17, #0x110]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x118]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x128]\n"
"mov v16.d[1], x20\n"
"fmla v8.8h, v17.8h, v0.h[4]\n"
"ldr d17, [x17, #0x120]\n"
- "ldr x20, [x17, #0x128]\n"
"fmla v9.8h, v16.8h, v0.h[4]\n"
"ldr d16, [x17, #0x130]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x138]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x148]\n"
"mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[4]\n"
"ldr d17, [x17, #0x140]\n"
- "ldr x20, [x17, #0x148]\n"
"fmla v11.8h, v16.8h, v0.h[4]\n"
"ldr d16, [x17, #0x150]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x158]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x168]\n"
"mov v16.d[1], x20\n"
"fmla v8.8h, v17.8h, v0.h[5]\n"
"ldr d17, [x17, #0x160]\n"
- "ldr x20, [x17, #0x168]\n"
"fmla v9.8h, v16.8h, v0.h[5]\n"
"ldr d16, [x17, #0x170]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x178]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x188]\n"
"mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[5]\n"
"ldr d17, [x17, #0x180]\n"
- "ldr x20, [x17, #0x188]\n"
"fmla v11.8h, v16.8h, v0.h[5]\n"
"ldr d16, [x17, #0x190]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x198]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x1a8]\n"
"mov v16.d[1], x20\n"
"fmla v8.8h, v17.8h, v0.h[6]\n"
"ldr d17, [x17, #0x1a0]\n"
- "ldr x20, [x17, #0x1a8]\n"
"fmla v9.8h, v16.8h, v0.h[6]\n"
"ldr d16, [x17, #0x1b0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x1b8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x1c8]\n"
"mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[6]\n"
"ldr d17, [x17, #0x1c0]\n"
- "ldr x20, [x17, #0x1c8]\n"
"fmla v11.8h, v16.8h, v0.h[6]\n"
"ldr d16, [x17, #0x1d0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x1d8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x1e8]\n"
"mov v16.d[1], x20\n"
"fmla v8.8h, v17.8h, v0.h[7]\n"
"ldr d17, [x17, #0x1e0]\n"
- "ldr x20, [x17, #0x1e8]\n"
"fmla v9.8h, v16.8h, v0.h[7]\n"
"ldr d16, [x17, #0x1f0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x1f8]\n"
- "mov v16.d[1], x20\n"
- "add x13, x13, #0x10\n"
"add x17, x17, #0x200\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v16.d[1], x20\n"
"fmla v10.8h, v17.8h, v0.h[7]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x20, [x17, #0x8]\n"
"fmla v11.8h, v16.8h, v0.h[7]\n"
"ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x8\n"
"ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x10\n"
- "ldr x21, [x13, #0x8]\n"
- "mov v6.d[1], x20\n"
"ldr x20, [x17, #0x18]\n"
- "mov v0.d[1], x21\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"bge 26b\n"
"27:" // Height 1: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q17, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"ldr q16, [x17, #0x30]\n"
+ "add x13, x13, #0x10\n"
+ "sub x14, x14, #0x8\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"ldr q17, [x17, #0x40]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
"ldr q16, [x17, #0x50]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"fmla v8.8h, v17.8h, v0.h[1]\n"
"ldr q17, [x17, #0x60]\n"
"fmla v9.8h, v16.8h, v0.h[1]\n"
@@ -462,26 +467,23 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr q17, [x17, #0x1e0]\n"
"fmla v9.8h, v16.8h, v0.h[7]\n"
"ldr q16, [x17, #0x1f0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x8\n"
+ "add x17, x17, #0x200\n"
"fmla v10.8h, v17.8h, v0.h[7]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"fmla v11.8h, v16.8h, v0.h[7]\n"
- "add x17, x17, #0x200\n"
"28:" // Height 1: Multiply loop: Main loop skip
"cbz x14, 30f\n"
"29:" // Height 1: Multiply loop: Odd block loop
"ldr h0, [x13], #0x2\n"
"sub x14, x14, #0x1\n"
- "ldr q16, [x17, #0x0]\n"
- "fmla v8.8h, v16.8h, v0.h[0]\n"
+ "ldr q17, [x17, #0x0]\n"
"ldr q16, [x17, #0x10]\n"
+ "fmla v8.8h, v17.8h, v0.h[0]\n"
+ "ldr q17, [x17, #0x20]\n"
"fmla v9.8h, v16.8h, v0.h[0]\n"
- "ldr q16, [x17, #0x20]\n"
- "fmla v10.8h, v16.8h, v0.h[0]\n"
"ldr q16, [x17, #0x30]\n"
- "fmla v11.8h, v16.8h, v0.h[0]\n"
"add x17, x17, #0x40\n"
+ "fmla v10.8h, v17.8h, v0.h[0]\n"
+ "fmla v11.8h, v16.8h, v0.h[0]\n"
"cbnz x14, 29b\n"
"30:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -490,14 +492,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"bne 23b\n"
"prfm pstl1keep, [x16, #0x0]\n"
"tbz %x[flags], #1, 31f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v16.8h\n"
- "fmin v9.8h, v9.8h, v16.8h\n"
- "fmin v10.8h, v10.8h, v16.8h\n"
- "fmin v11.8h, v11.8h, v16.8h\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.8h }, [x21]\n"
"ld1r { v16.8h }, [x20]\n"
+ "fmin v8.8h, v8.8h, v17.8h\n"
+ "fmin v9.8h, v9.8h, v17.8h\n"
+ "fmin v10.8h, v10.8h, v17.8h\n"
+ "fmin v11.8h, v11.8h, v17.8h\n"
"fmax v8.8h, v8.8h, v16.8h\n"
"fmax v9.8h, v9.8h, v16.8h\n"
"fmax v10.8h, v10.8h, v16.8h\n"
@@ -610,168 +612,168 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"bgt 2b\n"
"b 296f\n"
"50:" // Height 2
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"51:" // Height 2: Column loop
"cbz x7, 52f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
- "mov v15.16b, v11.16b\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"b 71f\n"
"52:" // Height 2: no bias
"tbz %x[flags], #0, 70f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x20\n"
- "add x25, x16, x20, LSL #1\n"
+ "add x26, x16, x20, LSL #1\n"
"bge 69f\n"
"tbz x8, #4, 60f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
"ld1 { v9.8h }, [x16], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
"tbz x8, #3, 56f\n"
"ld1 { v10.8h }, [x16], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
"tbz x8, #2, 54f\n"
"ldr d11, [x16], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"tbz x8, #1, 53f\n"
"ld1 { v11.s }[2], [x16], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v11.h }[6], [x16]\n"
- "ld1 { v15.h }[6], [x25]\n"
+ "ld1 { v15.h }[6], [x26]\n"
"b 68f\n"
"53:" // Height 2: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x8, #0, 68f\n"
"ld1 { v11.h }[4], [x16]\n"
- "ld1 { v15.h }[4], [x25]\n"
+ "ld1 { v15.h }[4], [x26]\n"
"b 68f\n"
"54:" // Height 2: Partial accumulate: partial_2_24
"tbz x8, #1, 55f\n"
"ldr s11, [x16], #0x4\n"
"mov x20, #0x34\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v11.h }[2], [x16]\n"
- "ld1 { v15.h }[2], [x25]\n"
+ "ld1 { v15.h }[2], [x26]\n"
"b 68f\n"
"55:" // Height 2: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x8, #0, 68f\n"
"ldr h11, [x16, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
"b 68f\n"
"56:" // Height 2: Partial accumulate: partial_4_16
"tbz x8, #2, 58f\n"
"ldr d10, [x16], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"tbz x8, #1, 57f\n"
"ld1 { v10.s }[2], [x16], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v10.h }[6], [x16]\n"
- "ld1 { v14.h }[6], [x25]\n"
+ "ld1 { v14.h }[6], [x26]\n"
"b 68f\n"
"57:" // Height 2: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x8, #0, 68f\n"
"ld1 { v10.h }[4], [x16]\n"
- "ld1 { v14.h }[4], [x25]\n"
+ "ld1 { v14.h }[4], [x26]\n"
"b 68f\n"
"58:" // Height 2: Partial accumulate: partial_2_16
"tbz x8, #1, 59f\n"
"ldr s10, [x16], #0x4\n"
"mov x20, #0x24\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v10.h }[2], [x16]\n"
- "ld1 { v14.h }[2], [x25]\n"
+ "ld1 { v14.h }[2], [x26]\n"
"b 68f\n"
"59:" // Height 2: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x8, #0, 68f\n"
"ldr h10, [x16, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
"b 68f\n"
"60:" // Height 2: Partial accumulate: partial_8_0
"tbz x8, #3, 64f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
"tbz x8, #2, 62f\n"
"ldr d9, [x16], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"tbz x8, #1, 61f\n"
"ld1 { v9.s }[2], [x16], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v9.h }[6], [x16]\n"
- "ld1 { v13.h }[6], [x25]\n"
+ "ld1 { v13.h }[6], [x26]\n"
"b 68f\n"
"61:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x8, #0, 68f\n"
"ld1 { v9.h }[4], [x16]\n"
- "ld1 { v13.h }[4], [x25]\n"
+ "ld1 { v13.h }[4], [x26]\n"
"b 68f\n"
"62:" // Height 2: Partial accumulate: partial_2_8
"tbz x8, #1, 63f\n"
"ldr s9, [x16], #0x4\n"
"mov x20, #0x14\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v9.h }[2], [x16]\n"
- "ld1 { v13.h }[2], [x25]\n"
+ "ld1 { v13.h }[2], [x26]\n"
"b 68f\n"
"63:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x8, #0, 68f\n"
"ldr h9, [x16, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial accumulate: partial_4_0
"tbz x8, #2, 66f\n"
"ldr d8, [x16], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"tbz x8, #1, 65f\n"
"ld1 { v8.s }[2], [x16], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v8.h }[6], [x16]\n"
- "ld1 { v12.h }[6], [x25]\n"
+ "ld1 { v12.h }[6], [x26]\n"
"b 68f\n"
"65:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x8, #0, 68f\n"
"ld1 { v8.h }[4], [x16]\n"
- "ld1 { v12.h }[4], [x25]\n"
+ "ld1 { v12.h }[4], [x26]\n"
"b 68f\n"
"66:" // Height 2: Partial accumulate: partial_2_0
"tbz x8, #1, 67f\n"
"ldr s8, [x16], #0x4\n"
"mov x20, #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"tbz x8, #0, 68f\n"
"ld1 { v8.h }[2], [x16]\n"
- "ld1 { v12.h }[2], [x25]\n"
+ "ld1 { v12.h }[2], [x26]\n"
"b 68f\n"
"67:" // Height 2: Partial accumulate: partial_1_0
"ldr h8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"68:" // Height 2: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 71f\n"
@@ -780,10 +782,10 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"b 71f\n"
"70:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -798,8 +800,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"mov x15, #0x0\n"
"72:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -824,178 +826,178 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"blt 76f\n"
"75:" // Height 2: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr x21, [x17, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
"ldr d17, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "ldr x20, [x17, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr d16, [x17, #0x30]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x48]\n"
+ "add x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
- "mov v16.d[1], x20\n"
"fmla v14.8h, v17.8h, v1.h[0]\n"
"ldr d17, [x17, #0x40]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
- "ldr x20, [x17, #0x48]\n"
+ "ldr x21, [x17, #0x58]\n"
"fmla v15.8h, v16.8h, v1.h[0]\n"
"ldr d16, [x17, #0x50]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x58]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x68]\n"
+ "ldr x23, [x13, #0x8]\n"
+ "sub x14, x14, #0x8\n"
+ "mov v16.d[1], x21\n"
"fmla v8.8h, v17.8h, v0.h[1]\n"
- "ldr x21, [x17, #0x68]\n"
"fmla v12.8h, v17.8h, v1.h[1]\n"
"ldr d17, [x17, #0x60]\n"
"fmla v9.8h, v16.8h, v0.h[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "ldr x21, [x17, #0x78]\n"
"fmla v13.8h, v16.8h, v1.h[1]\n"
"ldr d16, [x17, #0x70]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x88]\n"
+ "ldr x22, [x12, #0x8]\n"
+ "cmp x14, #0x10\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[1]\n"
- "mov v16.d[1], x20\n"
"fmla v14.8h, v17.8h, v1.h[1]\n"
"ldr d17, [x17, #0x80]\n"
"fmla v11.8h, v16.8h, v0.h[1]\n"
- "ldr x20, [x17, #0x88]\n"
+ "ldr x21, [x17, #0x98]\n"
"fmla v15.8h, v16.8h, v1.h[1]\n"
"ldr d16, [x17, #0x90]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x98]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xa8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.8h, v17.8h, v0.h[2]\n"
- "ldr x21, [x17, #0xa8]\n"
"fmla v12.8h, v17.8h, v1.h[2]\n"
"ldr d17, [x17, #0xa0]\n"
"fmla v9.8h, v16.8h, v0.h[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "ldr x21, [x17, #0xb8]\n"
"fmla v13.8h, v16.8h, v1.h[2]\n"
"ldr d16, [x17, #0xb0]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0xc8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[2]\n"
- "mov v16.d[1], x20\n"
"fmla v14.8h, v17.8h, v1.h[2]\n"
"ldr d17, [x17, #0xc0]\n"
"fmla v11.8h, v16.8h, v0.h[2]\n"
- "ldr x20, [x17, #0xc8]\n"
+ "ldr x21, [x17, #0xd8]\n"
"fmla v15.8h, v16.8h, v1.h[2]\n"
"ldr d16, [x17, #0xd0]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0xd8]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xe8]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.8h, v17.8h, v0.h[3]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v12.8h, v17.8h, v1.h[3]\n"
"ldr d17, [x17, #0xe0]\n"
"fmla v9.8h, v16.8h, v0.h[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "ldr x21, [x17, #0xf8]\n"
"fmla v13.8h, v16.8h, v1.h[3]\n"
"ldr d16, [x17, #0xf0]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x108]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[3]\n"
- "mov v16.d[1], x20\n"
"fmla v14.8h, v17.8h, v1.h[3]\n"
"ldr d17, [x17, #0x100]\n"
"fmla v11.8h, v16.8h, v0.h[3]\n"
- "ldr x20, [x17, #0x108]\n"
+ "ldr x21, [x17, #0x118]\n"
"fmla v15.8h, v16.8h, v1.h[3]\n"
"ldr d16, [x17, #0x110]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x118]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x128]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.8h, v17.8h, v0.h[4]\n"
- "ldr x21, [x17, #0x128]\n"
"fmla v12.8h, v17.8h, v1.h[4]\n"
"ldr d17, [x17, #0x120]\n"
"fmla v9.8h, v16.8h, v0.h[4]\n"
- "ldr x20, [x17, #0x138]\n"
+ "ldr x21, [x17, #0x138]\n"
"fmla v13.8h, v16.8h, v1.h[4]\n"
"ldr d16, [x17, #0x130]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x148]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[4]\n"
- "mov v16.d[1], x20\n"
"fmla v14.8h, v17.8h, v1.h[4]\n"
"ldr d17, [x17, #0x140]\n"
"fmla v11.8h, v16.8h, v0.h[4]\n"
- "ldr x20, [x17, #0x148]\n"
+ "ldr x21, [x17, #0x158]\n"
"fmla v15.8h, v16.8h, v1.h[4]\n"
"ldr d16, [x17, #0x150]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x158]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x168]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.8h, v17.8h, v0.h[5]\n"
- "ldr x21, [x17, #0x168]\n"
"fmla v12.8h, v17.8h, v1.h[5]\n"
"ldr d17, [x17, #0x160]\n"
"fmla v9.8h, v16.8h, v0.h[5]\n"
- "ldr x20, [x17, #0x178]\n"
+ "ldr x21, [x17, #0x178]\n"
"fmla v13.8h, v16.8h, v1.h[5]\n"
"ldr d16, [x17, #0x170]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x188]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[5]\n"
- "mov v16.d[1], x20\n"
"fmla v14.8h, v17.8h, v1.h[5]\n"
"ldr d17, [x17, #0x180]\n"
"fmla v11.8h, v16.8h, v0.h[5]\n"
- "ldr x20, [x17, #0x188]\n"
+ "ldr x21, [x17, #0x198]\n"
"fmla v15.8h, v16.8h, v1.h[5]\n"
"ldr d16, [x17, #0x190]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x198]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x1a8]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.8h, v17.8h, v0.h[6]\n"
- "ldr x21, [x17, #0x1a8]\n"
"fmla v12.8h, v17.8h, v1.h[6]\n"
"ldr d17, [x17, #0x1a0]\n"
"fmla v9.8h, v16.8h, v0.h[6]\n"
- "ldr x20, [x17, #0x1b8]\n"
+ "ldr x21, [x17, #0x1b8]\n"
"fmla v13.8h, v16.8h, v1.h[6]\n"
"ldr d16, [x17, #0x1b0]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x1c8]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[6]\n"
- "mov v16.d[1], x20\n"
"fmla v14.8h, v17.8h, v1.h[6]\n"
"ldr d17, [x17, #0x1c0]\n"
"fmla v11.8h, v16.8h, v0.h[6]\n"
- "ldr x20, [x17, #0x1c8]\n"
+ "ldr x21, [x17, #0x1d8]\n"
"fmla v15.8h, v16.8h, v1.h[6]\n"
"ldr d16, [x17, #0x1d0]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x1d8]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x1e8]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.8h, v17.8h, v0.h[7]\n"
- "ldr x21, [x17, #0x1e8]\n"
"fmla v12.8h, v17.8h, v1.h[7]\n"
"ldr d17, [x17, #0x1e0]\n"
"fmla v9.8h, v16.8h, v0.h[7]\n"
- "ldr x20, [x17, #0x1f8]\n"
+ "ldr x21, [x17, #0x1f8]\n"
"fmla v13.8h, v16.8h, v1.h[7]\n"
"ldr d16, [x17, #0x1f0]\n"
- "mov v17.d[1], x21\n"
- "add x13, x13, #0x10\n"
- "mov v16.d[1], x20\n"
- "add x12, x12, #0x10\n"
+ "mov v17.d[1], x20\n"
"add x17, x17, #0x200\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.8h, v17.8h, v0.h[7]\n"
"fmla v14.8h, v17.8h, v1.h[7]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v11.8h, v16.8h, v0.h[7]\n"
"ldr d0, [x13, #0x0]\n"
"fmla v15.8h, v16.8h, v1.h[7]\n"
"ldr d1, [x12, #0x0]\n"
- "sub x14, x14, #0x8\n"
"ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x10\n"
- "ldr x20, [x13, #0x8]\n"
- "mov v6.d[1], x21\n"
- "ldr x21, [x12, #0x8]\n"
- "mov v0.d[1], x20\n"
+ "mov v6.d[1], x20\n"
"ldr x20, [x17, #0x18]\n"
- "mov v1.d[1], x21\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v0.d[1], x23\n"
+ "mov v1.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x12, #0x80]\n"
"bge 75b\n"
"76:" // Height 2: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
@@ -1105,8 +1107,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"sub x14, x14, #0x1\n"
"ldr h0, [x12], #0x2\n"
"ldr q17, [x17, #0x0]\n"
- "fmla v8.8h, v17.8h, v1.h[0]\n"
"ldr q16, [x17, #0x10]\n"
+ "fmla v8.8h, v17.8h, v1.h[0]\n"
"fmla v12.8h, v17.8h, v0.h[0]\n"
"ldr q17, [x17, #0x20]\n"
"fmla v9.8h, v16.8h, v1.h[0]\n"
@@ -1124,22 +1126,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"cmp x15, x20\n"
"bne 72b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
"prfm pstl1keep, [x16, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x16, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v16.8h\n"
- "fmin v9.8h, v9.8h, v16.8h\n"
- "fmin v10.8h, v10.8h, v16.8h\n"
- "fmin v11.8h, v11.8h, v16.8h\n"
- "fmin v12.8h, v12.8h, v16.8h\n"
- "fmin v13.8h, v13.8h, v16.8h\n"
- "fmin v14.8h, v14.8h, v16.8h\n"
- "fmin v15.8h, v15.8h, v16.8h\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.8h }, [x21]\n"
"ld1r { v16.8h }, [x20]\n"
+ "fmin v8.8h, v8.8h, v17.8h\n"
+ "fmin v9.8h, v9.8h, v17.8h\n"
+ "fmin v10.8h, v10.8h, v17.8h\n"
+ "fmin v11.8h, v11.8h, v17.8h\n"
+ "fmin v12.8h, v12.8h, v17.8h\n"
+ "fmin v13.8h, v13.8h, v17.8h\n"
+ "fmin v14.8h, v14.8h, v17.8h\n"
+ "fmin v15.8h, v15.8h, v17.8h\n"
"fmax v8.8h, v8.8h, v16.8h\n"
"fmax v9.8h, v9.8h, v16.8h\n"
"fmax v10.8h, v10.8h, v16.8h\n"
@@ -1154,127 +1156,127 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"tbz x8, #4, 88f\n"
"st1 { v8.8h }, [x16], #0x10\n"
"st1 { v9.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
"tbz x8, #3, 84f\n"
"st1 { v10.8h }, [x16], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
"tbz x8, #2, 82f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d15, [x26], #0x8\n"
"tbz x8, #1, 81f\n"
"st1 { v11.s }[2], [x16], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v11.h }[6], [x16]\n"
- "st1 { v15.h }[6], [x25]\n"
+ "st1 { v15.h }[6], [x26]\n"
"b 96f\n"
"81:" // Height 2: Partial direct writeback: partial_1_28
"tbz x8, #0, 96f\n"
"st1 { v11.h }[4], [x16]\n"
- "st1 { v15.h }[4], [x25]\n"
+ "st1 { v15.h }[4], [x26]\n"
"b 96f\n"
"82:" // Height 2: Partial direct writeback: partial_2_24
"tbz x8, #1, 83f\n"
"str s11, [x16], #0x4\n"
- "str s15, [x25], #0x4\n"
+ "str s15, [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v11.h }[2], [x16]\n"
- "st1 { v15.h }[2], [x25]\n"
+ "st1 { v15.h }[2], [x26]\n"
"b 96f\n"
"83:" // Height 2: Partial direct writeback: partial_1_24
"tbz x8, #0, 96f\n"
"str h11, [x16, #0x0]\n"
- "str h15, [x25, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
"b 96f\n"
"84:" // Height 2: Partial direct writeback: partial_4_16
"tbz x8, #2, 86f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d14, [x26], #0x8\n"
"tbz x8, #1, 85f\n"
"st1 { v10.s }[2], [x16], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v10.h }[6], [x16]\n"
- "st1 { v14.h }[6], [x25]\n"
+ "st1 { v14.h }[6], [x26]\n"
"b 96f\n"
"85:" // Height 2: Partial direct writeback: partial_1_20
"tbz x8, #0, 96f\n"
"st1 { v10.h }[4], [x16]\n"
- "st1 { v14.h }[4], [x25]\n"
+ "st1 { v14.h }[4], [x26]\n"
"b 96f\n"
"86:" // Height 2: Partial direct writeback: partial_2_16
"tbz x8, #1, 87f\n"
"str s10, [x16], #0x4\n"
- "str s14, [x25], #0x4\n"
+ "str s14, [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v10.h }[2], [x16]\n"
- "st1 { v14.h }[2], [x25]\n"
+ "st1 { v14.h }[2], [x26]\n"
"b 96f\n"
"87:" // Height 2: Partial direct writeback: partial_1_16
"tbz x8, #0, 96f\n"
"str h10, [x16, #0x0]\n"
- "str h14, [x25, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
"b 96f\n"
"88:" // Height 2: Partial direct writeback: partial_8_0
"tbz x8, #3, 92f\n"
"st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
"tbz x8, #2, 90f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x8, #1, 89f\n"
"st1 { v9.s }[2], [x16], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v9.h }[6], [x16]\n"
- "st1 { v13.h }[6], [x25]\n"
+ "st1 { v13.h }[6], [x26]\n"
"b 96f\n"
"89:" // Height 2: Partial direct writeback: partial_1_12
"tbz x8, #0, 96f\n"
"st1 { v9.h }[4], [x16]\n"
- "st1 { v13.h }[4], [x25]\n"
+ "st1 { v13.h }[4], [x26]\n"
"b 96f\n"
"90:" // Height 2: Partial direct writeback: partial_2_8
"tbz x8, #1, 91f\n"
"str s9, [x16], #0x4\n"
- "str s13, [x25], #0x4\n"
+ "str s13, [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v9.h }[2], [x16]\n"
- "st1 { v13.h }[2], [x25]\n"
+ "st1 { v13.h }[2], [x26]\n"
"b 96f\n"
"91:" // Height 2: Partial direct writeback: partial_1_8
"tbz x8, #0, 96f\n"
"str h9, [x16, #0x0]\n"
- "str h13, [x25, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
"b 96f\n"
"92:" // Height 2: Partial direct writeback: partial_4_0
"tbz x8, #2, 94f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x8, #1, 93f\n"
"st1 { v8.s }[2], [x16], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v8.h }[6], [x16]\n"
- "st1 { v12.h }[6], [x25]\n"
+ "st1 { v12.h }[6], [x26]\n"
"b 96f\n"
"93:" // Height 2: Partial direct writeback: partial_1_4
"tbz x8, #0, 96f\n"
"st1 { v8.h }[4], [x16]\n"
- "st1 { v12.h }[4], [x25]\n"
+ "st1 { v12.h }[4], [x26]\n"
"b 96f\n"
"94:" // Height 2: Partial direct writeback: partial_2_0
"tbz x8, #1, 95f\n"
"str s8, [x16], #0x4\n"
- "str s12, [x25], #0x4\n"
+ "str s12, [x26], #0x4\n"
"tbz x8, #0, 96f\n"
"st1 { v8.h }[2], [x16]\n"
- "st1 { v12.h }[2], [x25]\n"
+ "st1 { v12.h }[2], [x26]\n"
"b 96f\n"
"95:" // Height 2: Partial direct writeback: partial_1_0
"str h8, [x16, #0x0]\n"
- "str h12, [x25, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
"96:" // Height 2: Partial direct writeback: Done
"b 98f\n"
"97:" // Height 2: Full writeback
@@ -1283,31 +1285,31 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
"98:" // Height 2: Writeback done
"subs x8, x8, #0x20\n"
"bgt 51b\n"
"b 296f\n"
"99:" // Height 3
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"100:" // Height 3: Column loop
"cbz x7, 101f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -1315,182 +1317,182 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"101:" // Height 3: no bias
"tbz %x[flags], #0, 119f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
"cmp x8, #0x20\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"bge 118f\n"
"tbz x8, #4, 109f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
"ld1 { v9.8h }, [x16], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
"tbz x8, #3, 105f\n"
"ld1 { v10.8h }, [x16], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
"tbz x8, #2, 103f\n"
"ldr d11, [x16], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x8, #1, 102f\n"
"ld1 { v11.s }[2], [x16], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v11.h }[6], [x16]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
"b 117f\n"
"102:" // Height 3: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x8, #0, 117f\n"
"ld1 { v11.h }[4], [x16]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
"b 117f\n"
"103:" // Height 3: Partial accumulate: partial_2_24
"tbz x8, #1, 104f\n"
"ldr s11, [x16], #0x4\n"
"mov x20, #0x34\n"
- "ldr s15, [x25], #0x4\n"
- "ldr s19, [x24], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v11.h }[2], [x16]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
"b 117f\n"
"104:" // Height 3: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x8, #0, 117f\n"
"ldr h11, [x16, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
"b 117f\n"
"105:" // Height 3: Partial accumulate: partial_4_16
"tbz x8, #2, 107f\n"
"ldr d10, [x16], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x8, #1, 106f\n"
"ld1 { v10.s }[2], [x16], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v10.h }[6], [x16]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
"b 117f\n"
"106:" // Height 3: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x8, #0, 117f\n"
"ld1 { v10.h }[4], [x16]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
"b 117f\n"
"107:" // Height 3: Partial accumulate: partial_2_16
"tbz x8, #1, 108f\n"
"ldr s10, [x16], #0x4\n"
"mov x20, #0x24\n"
- "ldr s14, [x25], #0x4\n"
- "ldr s18, [x24], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v10.h }[2], [x16]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
"b 117f\n"
"108:" // Height 3: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x8, #0, 117f\n"
"ldr h10, [x16, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
"b 117f\n"
"109:" // Height 3: Partial accumulate: partial_8_0
"tbz x8, #3, 113f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
"tbz x8, #2, 111f\n"
"ldr d9, [x16], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x8, #1, 110f\n"
"ld1 { v9.s }[2], [x16], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v9.h }[6], [x16]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
"b 117f\n"
"110:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x8, #0, 117f\n"
"ld1 { v9.h }[4], [x16]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
"b 117f\n"
"111:" // Height 3: Partial accumulate: partial_2_8
"tbz x8, #1, 112f\n"
"ldr s9, [x16], #0x4\n"
"mov x20, #0x14\n"
- "ldr s13, [x25], #0x4\n"
- "ldr s17, [x24], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v9.h }[2], [x16]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
"b 117f\n"
"112:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x8, #0, 117f\n"
"ldr h9, [x16, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
"b 117f\n"
"113:" // Height 3: Partial accumulate: partial_4_0
"tbz x8, #2, 115f\n"
"ldr d8, [x16], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
"tbz x8, #1, 114f\n"
"ld1 { v8.s }[2], [x16], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v8.h }[6], [x16]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
"b 117f\n"
"114:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x8, #0, 117f\n"
"ld1 { v8.h }[4], [x16]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
"b 117f\n"
"115:" // Height 3: Partial accumulate: partial_2_0
"tbz x8, #1, 116f\n"
"ldr s8, [x16], #0x4\n"
"mov x20, #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
"tbz x8, #0, 117f\n"
"ld1 { v8.h }[2], [x16]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
"b 117f\n"
"116:" // Height 3: Partial accumulate: partial_1_0
"ldr h8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h12, [x25, #0x0]\n"
- "ldr h16, [x24, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
"117:" // Height 3: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 120f\n"
@@ -1499,14 +1501,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
"b 120f\n"
"119:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1525,8 +1527,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"mov x15, #0x0\n"
"121:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 122f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1561,11 +1563,15 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v6.8h, v2.h[0]\n"
"ldr d21, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v21.d[1], x21\n"
+ "add x13, x13, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x12, x12, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"ldr d20, [x17, #0x30]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "add x11, x11, #0x10\n"
+ "ldr x24, [x13, #0x8]\n"
"mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[0]\n"
"fmla v14.8h, v21.8h, v1.h[0]\n"
@@ -1573,11 +1579,15 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v18.8h, v21.8h, v2.h[0]\n"
"ldr d21, [x17, #0x40]\n"
"fmla v11.8h, v20.8h, v0.h[0]\n"
- "mov v21.d[1], x21\n"
+ "ldr x23, [x12, #0x8]\n"
"fmla v15.8h, v20.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x22, [x11, #0x8]\n"
"fmla v19.8h, v20.8h, v2.h[0]\n"
"ldr d20, [x17, #0x50]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "sub x14, x14, #0x8\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v20.d[1], x20\n"
"fmla v8.8h, v21.8h, v0.h[1]\n"
"fmla v12.8h, v21.8h, v1.h[1]\n"
@@ -1585,11 +1595,14 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v21.8h, v2.h[1]\n"
"ldr d21, [x17, #0x60]\n"
"fmla v9.8h, v20.8h, v0.h[1]\n"
- "mov v21.d[1], x21\n"
+ "cmp x14, #0x10\n"
"fmla v13.8h, v20.8h, v1.h[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v17.8h, v20.8h, v2.h[1]\n"
"ldr d20, [x17, #0x70]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[1]\n"
"fmla v14.8h, v21.8h, v1.h[1]\n"
@@ -1597,11 +1610,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v18.8h, v21.8h, v2.h[1]\n"
"ldr d21, [x17, #0x80]\n"
"fmla v11.8h, v20.8h, v0.h[1]\n"
- "mov v21.d[1], x21\n"
"fmla v15.8h, v20.8h, v1.h[1]\n"
- "ldr x21, [x17, #0xa8]\n"
"fmla v19.8h, v20.8h, v2.h[1]\n"
"ldr d20, [x17, #0x90]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v20.d[1], x20\n"
"fmla v8.8h, v21.8h, v0.h[2]\n"
"fmla v12.8h, v21.8h, v1.h[2]\n"
@@ -1609,11 +1622,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v21.8h, v2.h[2]\n"
"ldr d21, [x17, #0xa0]\n"
"fmla v9.8h, v20.8h, v0.h[2]\n"
- "mov v21.d[1], x21\n"
"fmla v13.8h, v20.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xc8]\n"
"fmla v17.8h, v20.8h, v2.h[2]\n"
"ldr d20, [x17, #0xb0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[2]\n"
"fmla v14.8h, v21.8h, v1.h[2]\n"
@@ -1621,11 +1634,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v18.8h, v21.8h, v2.h[2]\n"
"ldr d21, [x17, #0xc0]\n"
"fmla v11.8h, v20.8h, v0.h[2]\n"
- "mov v21.d[1], x21\n"
"fmla v15.8h, v20.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v19.8h, v20.8h, v2.h[2]\n"
"ldr d20, [x17, #0xd0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v20.d[1], x20\n"
"fmla v8.8h, v21.8h, v0.h[3]\n"
"fmla v12.8h, v21.8h, v1.h[3]\n"
@@ -1633,11 +1646,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v21.8h, v2.h[3]\n"
"ldr d21, [x17, #0xe0]\n"
"fmla v9.8h, v20.8h, v0.h[3]\n"
- "mov v21.d[1], x21\n"
"fmla v13.8h, v20.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x108]\n"
"fmla v17.8h, v20.8h, v2.h[3]\n"
"ldr d20, [x17, #0xf0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x108]\n"
"mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[3]\n"
"fmla v14.8h, v21.8h, v1.h[3]\n"
@@ -1645,11 +1658,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v18.8h, v21.8h, v2.h[3]\n"
"ldr d21, [x17, #0x100]\n"
"fmla v11.8h, v20.8h, v0.h[3]\n"
- "mov v21.d[1], x21\n"
"fmla v15.8h, v20.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x128]\n"
"fmla v19.8h, v20.8h, v2.h[3]\n"
"ldr d20, [x17, #0x110]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x128]\n"
"mov v20.d[1], x20\n"
"fmla v8.8h, v21.8h, v0.h[4]\n"
"fmla v12.8h, v21.8h, v1.h[4]\n"
@@ -1657,11 +1670,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v21.8h, v2.h[4]\n"
"ldr d21, [x17, #0x120]\n"
"fmla v9.8h, v20.8h, v0.h[4]\n"
- "mov v21.d[1], x21\n"
"fmla v13.8h, v20.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x148]\n"
"fmla v17.8h, v20.8h, v2.h[4]\n"
"ldr d20, [x17, #0x130]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x148]\n"
"mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[4]\n"
"fmla v14.8h, v21.8h, v1.h[4]\n"
@@ -1669,11 +1682,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v18.8h, v21.8h, v2.h[4]\n"
"ldr d21, [x17, #0x140]\n"
"fmla v11.8h, v20.8h, v0.h[4]\n"
- "mov v21.d[1], x21\n"
"fmla v15.8h, v20.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x168]\n"
"fmla v19.8h, v20.8h, v2.h[4]\n"
"ldr d20, [x17, #0x150]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x168]\n"
"mov v20.d[1], x20\n"
"fmla v8.8h, v21.8h, v0.h[5]\n"
"fmla v12.8h, v21.8h, v1.h[5]\n"
@@ -1681,11 +1694,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v21.8h, v2.h[5]\n"
"ldr d21, [x17, #0x160]\n"
"fmla v9.8h, v20.8h, v0.h[5]\n"
- "mov v21.d[1], x21\n"
"fmla v13.8h, v20.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x188]\n"
"fmla v17.8h, v20.8h, v2.h[5]\n"
"ldr d20, [x17, #0x170]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x188]\n"
"mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[5]\n"
"fmla v14.8h, v21.8h, v1.h[5]\n"
@@ -1693,11 +1706,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v18.8h, v21.8h, v2.h[5]\n"
"ldr d21, [x17, #0x180]\n"
"fmla v11.8h, v20.8h, v0.h[5]\n"
- "mov v21.d[1], x21\n"
"fmla v15.8h, v20.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x1a8]\n"
"fmla v19.8h, v20.8h, v2.h[5]\n"
"ldr d20, [x17, #0x190]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x1a8]\n"
"mov v20.d[1], x20\n"
"fmla v8.8h, v21.8h, v0.h[6]\n"
"fmla v12.8h, v21.8h, v1.h[6]\n"
@@ -1705,11 +1718,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v21.8h, v2.h[6]\n"
"ldr d21, [x17, #0x1a0]\n"
"fmla v9.8h, v20.8h, v0.h[6]\n"
- "mov v21.d[1], x21\n"
"fmla v13.8h, v20.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1c8]\n"
"fmla v17.8h, v20.8h, v2.h[6]\n"
"ldr d20, [x17, #0x1b0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x1c8]\n"
"mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[6]\n"
"fmla v14.8h, v21.8h, v1.h[6]\n"
@@ -1717,11 +1730,11 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v18.8h, v21.8h, v2.h[6]\n"
"ldr d21, [x17, #0x1c0]\n"
"fmla v11.8h, v20.8h, v0.h[6]\n"
- "mov v21.d[1], x21\n"
"fmla v15.8h, v20.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1e8]\n"
"fmla v19.8h, v20.8h, v2.h[6]\n"
"ldr d20, [x17, #0x1d0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x1e8]\n"
"mov v20.d[1], x20\n"
"fmla v8.8h, v21.8h, v0.h[7]\n"
"fmla v12.8h, v21.8h, v1.h[7]\n"
@@ -1729,40 +1742,29 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v16.8h, v21.8h, v2.h[7]\n"
"ldr d21, [x17, #0x1e0]\n"
"fmla v9.8h, v20.8h, v0.h[7]\n"
- "mov v21.d[1], x21\n"
"fmla v13.8h, v20.8h, v1.h[7]\n"
- "add x13, x13, #0x10\n"
"fmla v17.8h, v20.8h, v2.h[7]\n"
"ldr d20, [x17, #0x1f0]\n"
- "mov v20.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
+ "mov v21.d[1], x21\n"
"add x17, x17, #0x200\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v20.d[1], x20\n"
"fmla v10.8h, v21.8h, v0.h[7]\n"
- "ldr x20, [x17, #0x8]\n"
"fmla v14.8h, v21.8h, v1.h[7]\n"
- "ldr x23, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x18]\n"
"fmla v18.8h, v21.8h, v2.h[7]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.8h, v20.8h, v0.h[7]\n"
"ldr d0, [x13, #0x0]\n"
"fmla v15.8h, v20.8h, v1.h[7]\n"
"ldr d1, [x12, #0x0]\n"
- "ldr x22, [x12, #0x8]\n"
"fmla v19.8h, v20.8h, v2.h[7]\n"
"ldr d2, [x11, #0x0]\n"
- "sub x14, x14, #0x8\n"
"ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x10\n"
- "ldr x21, [x11, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x17, #0x18]\n"
- "mov v0.d[1], x23\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x22\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- "mov v2.d[1], x21\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x24\n"
+ "mov v1.d[1], x23\n"
+ "mov v2.d[1], x22\n"
"mov v7.d[1], x20\n"
"bge 124b\n"
"125:" // Height 3: Multiply loop: Single iteration only
@@ -1908,8 +1910,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr h1, [x12], #0x2\n"
"ldr h0, [x11], #0x2\n"
"ldr q21, [x17, #0x0]\n"
- "fmla v8.8h, v21.8h, v2.h[0]\n"
"ldr q20, [x17, #0x10]\n"
+ "fmla v8.8h, v21.8h, v2.h[0]\n"
"fmla v12.8h, v21.8h, v1.h[0]\n"
"fmla v16.8h, v21.8h, v0.h[0]\n"
"ldr q21, [x17, #0x20]\n"
@@ -1931,28 +1933,28 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"cmp x15, x20\n"
"bne 121b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 129f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v20.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v20.8h\n"
- "fmin v9.8h, v9.8h, v20.8h\n"
- "fmin v10.8h, v10.8h, v20.8h\n"
- "fmin v11.8h, v11.8h, v20.8h\n"
- "fmin v12.8h, v12.8h, v20.8h\n"
- "fmin v13.8h, v13.8h, v20.8h\n"
- "fmin v14.8h, v14.8h, v20.8h\n"
- "fmin v15.8h, v15.8h, v20.8h\n"
- "fmin v16.8h, v16.8h, v20.8h\n"
- "fmin v17.8h, v17.8h, v20.8h\n"
- "fmin v18.8h, v18.8h, v20.8h\n"
- "fmin v19.8h, v19.8h, v20.8h\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.8h }, [x21]\n"
"ld1r { v20.8h }, [x20]\n"
+ "fmin v8.8h, v8.8h, v21.8h\n"
+ "fmin v9.8h, v9.8h, v21.8h\n"
+ "fmin v10.8h, v10.8h, v21.8h\n"
+ "fmin v11.8h, v11.8h, v21.8h\n"
+ "fmin v12.8h, v12.8h, v21.8h\n"
+ "fmin v13.8h, v13.8h, v21.8h\n"
+ "fmin v14.8h, v14.8h, v21.8h\n"
+ "fmin v15.8h, v15.8h, v21.8h\n"
+ "fmin v16.8h, v16.8h, v21.8h\n"
+ "fmin v17.8h, v17.8h, v21.8h\n"
+ "fmin v18.8h, v18.8h, v21.8h\n"
+ "fmin v19.8h, v19.8h, v21.8h\n"
"fmax v8.8h, v8.8h, v20.8h\n"
"fmax v9.8h, v9.8h, v20.8h\n"
"fmax v10.8h, v10.8h, v20.8h\n"
@@ -1971,159 +1973,159 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"tbz x8, #4, 137f\n"
"st1 { v8.8h }, [x16], #0x10\n"
"st1 { v9.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
"tbz x8, #3, 133f\n"
"st1 { v10.8h }, [x16], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
"tbz x8, #2, 131f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x8, #1, 130f\n"
"st1 { v11.s }[2], [x16], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v11.h }[6], [x16]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
"b 145f\n"
"130:" // Height 3: Partial direct writeback: partial_1_28
"tbz x8, #0, 145f\n"
"st1 { v11.h }[4], [x16]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
"b 145f\n"
"131:" // Height 3: Partial direct writeback: partial_2_24
"tbz x8, #1, 132f\n"
"str s11, [x16], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v11.h }[2], [x16]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
"b 145f\n"
"132:" // Height 3: Partial direct writeback: partial_1_24
"tbz x8, #0, 145f\n"
"str h11, [x16, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
"b 145f\n"
"133:" // Height 3: Partial direct writeback: partial_4_16
"tbz x8, #2, 135f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x8, #1, 134f\n"
"st1 { v10.s }[2], [x16], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v10.h }[6], [x16]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
"b 145f\n"
"134:" // Height 3: Partial direct writeback: partial_1_20
"tbz x8, #0, 145f\n"
"st1 { v10.h }[4], [x16]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
"b 145f\n"
"135:" // Height 3: Partial direct writeback: partial_2_16
"tbz x8, #1, 136f\n"
"str s10, [x16], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v10.h }[2], [x16]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
"b 145f\n"
"136:" // Height 3: Partial direct writeback: partial_1_16
"tbz x8, #0, 145f\n"
"str h10, [x16, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
"b 145f\n"
"137:" // Height 3: Partial direct writeback: partial_8_0
"tbz x8, #3, 141f\n"
"st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
"tbz x8, #2, 139f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x8, #1, 138f\n"
"st1 { v9.s }[2], [x16], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v9.h }[6], [x16]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
"b 145f\n"
"138:" // Height 3: Partial direct writeback: partial_1_12
"tbz x8, #0, 145f\n"
"st1 { v9.h }[4], [x16]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
"b 145f\n"
"139:" // Height 3: Partial direct writeback: partial_2_8
"tbz x8, #1, 140f\n"
"str s9, [x16], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v9.h }[2], [x16]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
"b 145f\n"
"140:" // Height 3: Partial direct writeback: partial_1_8
"tbz x8, #0, 145f\n"
"str h9, [x16, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
"b 145f\n"
"141:" // Height 3: Partial direct writeback: partial_4_0
"tbz x8, #2, 143f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x8, #1, 142f\n"
"st1 { v8.s }[2], [x16], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v8.h }[6], [x16]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
"b 145f\n"
"142:" // Height 3: Partial direct writeback: partial_1_4
"tbz x8, #0, 145f\n"
"st1 { v8.h }[4], [x16]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
"b 145f\n"
"143:" // Height 3: Partial direct writeback: partial_2_0
"tbz x8, #1, 144f\n"
"str s8, [x16], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
"tbz x8, #0, 145f\n"
"st1 { v8.h }[2], [x16]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
"b 145f\n"
"144:" // Height 3: Partial direct writeback: partial_1_0
"str h8, [x16, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
"145:" // Height 3: Partial direct writeback: Done
"b 147f\n"
"146:" // Height 3: Full writeback
@@ -2132,35 +2134,35 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"147:" // Height 3: Writeback done
"subs x8, x8, #0x20\n"
"bgt 100b\n"
"b 296f\n"
"148:" // Height 4
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"149:" // Height 4: Column loop
"cbz x7, 150f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -2172,215 +2174,215 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"150:" // Height 4: no bias
"tbz %x[flags], #0, 168f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"cmp x8, #0x20\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
+ "add x24, x25, x20, LSL #1\n"
"bge 167f\n"
"tbz x8, #4, 158f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
"ld1 { v9.8h }, [x16], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
"tbz x8, #3, 154f\n"
"ld1 { v10.8h }, [x16], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
"tbz x8, #2, 152f\n"
"ldr d11, [x16], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x8, #1, 151f\n"
"ld1 { v11.s }[2], [x16], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v11.h }[6], [x16]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
"b 166f\n"
"151:" // Height 4: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x8, #0, 166f\n"
"ld1 { v11.h }[4], [x16]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
"b 166f\n"
"152:" // Height 4: Partial accumulate: partial_2_24
"tbz x8, #1, 153f\n"
"ldr s11, [x16], #0x4\n"
"mov x20, #0x34\n"
- "ldr s15, [x25], #0x4\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v11.h }[2], [x16]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
"b 166f\n"
"153:" // Height 4: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x8, #0, 166f\n"
"ldr h11, [x16, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
"b 166f\n"
"154:" // Height 4: Partial accumulate: partial_4_16
"tbz x8, #2, 156f\n"
"ldr d10, [x16], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x8, #1, 155f\n"
"ld1 { v10.s }[2], [x16], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v10.h }[6], [x16]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
"b 166f\n"
"155:" // Height 4: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x8, #0, 166f\n"
"ld1 { v10.h }[4], [x16]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
"b 166f\n"
"156:" // Height 4: Partial accumulate: partial_2_16
"tbz x8, #1, 157f\n"
"ldr s10, [x16], #0x4\n"
"mov x20, #0x24\n"
- "ldr s14, [x25], #0x4\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v10.h }[2], [x16]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
"b 166f\n"
"157:" // Height 4: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x8, #0, 166f\n"
"ldr h10, [x16, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
"b 166f\n"
"158:" // Height 4: Partial accumulate: partial_8_0
"tbz x8, #3, 162f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
"tbz x8, #2, 160f\n"
"ldr d9, [x16], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x8, #1, 159f\n"
"ld1 { v9.s }[2], [x16], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v9.h }[6], [x16]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
"b 166f\n"
"159:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x8, #0, 166f\n"
"ld1 { v9.h }[4], [x16]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
"b 166f\n"
"160:" // Height 4: Partial accumulate: partial_2_8
"tbz x8, #1, 161f\n"
"ldr s9, [x16], #0x4\n"
"mov x20, #0x14\n"
- "ldr s13, [x25], #0x4\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v9.h }[2], [x16]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
"b 166f\n"
"161:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x8, #0, 166f\n"
"ldr h9, [x16, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
"b 166f\n"
"162:" // Height 4: Partial accumulate: partial_4_0
"tbz x8, #2, 164f\n"
"ldr d8, [x16], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x8, #1, 163f\n"
"ld1 { v8.s }[2], [x16], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v8.h }[6], [x16]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
"b 166f\n"
"163:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x8, #0, 166f\n"
"ld1 { v8.h }[4], [x16]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
"b 166f\n"
"164:" // Height 4: Partial accumulate: partial_2_0
"tbz x8, #1, 165f\n"
"ldr s8, [x16], #0x4\n"
"mov x20, #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
"tbz x8, #0, 166f\n"
"ld1 { v8.h }[2], [x16]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
"b 166f\n"
"165:" // Height 4: Partial accumulate: partial_1_0
"ldr h8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h12, [x25, #0x0]\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
"166:" // Height 4: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 169f\n"
@@ -2389,18 +2391,18 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"b 169f\n"
"168:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -2423,8 +2425,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"mov x15, #0x0\n"
"170:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 171f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2457,234 +2459,233 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"blt 174f\n"
"173:" // Height 4: Multiply loop: Main loop head
"fmla v8.8h, v6.8h, v0.h[0]\n"
- "ldr x21, [x17, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
"fmla v12.8h, v6.8h, v1.h[0]\n"
- "ldr x20, [x17, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
"fmla v16.8h, v6.8h, v2.h[0]\n"
"add x13, x13, #0x10\n"
"fmla v20.8h, v6.8h, v3.h[0]\n"
"ldr d25, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v25.d[1], x21\n"
+ "add x12, x12, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x11, x11, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x12, x12, #0x10\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"ldr d24, [x17, #0x30]\n"
- "mov v24.d[1], x20\n"
"fmla v10.8h, v25.8h, v0.h[0]\n"
+ "ldr x20, [x17, #0x48]\n"
"fmla v14.8h, v25.8h, v1.h[0]\n"
- "ldr x20, [x17, #0x58]\n"
+ "add x10, x10, #0x10\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[0]\n"
- "add x11, x11, #0x10\n"
"fmla v22.8h, v25.8h, v3.h[0]\n"
"ldr d25, [x17, #0x40]\n"
"fmla v11.8h, v24.8h, v0.h[0]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x58]\n"
"fmla v15.8h, v24.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x25, [x13, #0x8]\n"
"fmla v19.8h, v24.8h, v2.h[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v25.d[1], x20\n"
"fmla v23.8h, v24.8h, v3.h[0]\n"
"ldr d24, [x17, #0x50]\n"
- "mov v24.d[1], x20\n"
"fmla v8.8h, v25.8h, v0.h[1]\n"
+ "ldr x20, [x17, #0x68]\n"
"fmla v12.8h, v25.8h, v1.h[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "ldr x24, [x12, #0x8]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.8h, v25.8h, v2.h[1]\n"
- "ldr x25, [x13, #0x8]\n"
"fmla v20.8h, v25.8h, v3.h[1]\n"
"ldr d25, [x17, #0x60]\n"
"fmla v9.8h, v24.8h, v0.h[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x78]\n"
"fmla v13.8h, v24.8h, v1.h[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "ldr x23, [x11, #0x8]\n"
"fmla v17.8h, v24.8h, v2.h[1]\n"
- "ldr x24, [x12, #0x8]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v24.8h, v3.h[1]\n"
"ldr d24, [x17, #0x70]\n"
- "mov v24.d[1], x20\n"
"fmla v10.8h, v25.8h, v0.h[1]\n"
+ "ldr x20, [x17, #0x88]\n"
"fmla v14.8h, v25.8h, v1.h[1]\n"
- "ldr x20, [x17, #0x98]\n"
+ "ldr x22, [x10, #0x8]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[1]\n"
- "ldr x23, [x11, #0x8]\n"
"fmla v22.8h, v25.8h, v3.h[1]\n"
"ldr d25, [x17, #0x80]\n"
"fmla v11.8h, v24.8h, v0.h[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x98]\n"
"fmla v15.8h, v24.8h, v1.h[1]\n"
- "ldr x21, [x17, #0xa8]\n"
+ "sub x14, x14, #0x8\n"
"fmla v19.8h, v24.8h, v2.h[1]\n"
- "ldr x22, [x10, #0x8]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.8h, v24.8h, v3.h[1]\n"
"ldr d24, [x17, #0x90]\n"
- "mov v24.d[1], x20\n"
"fmla v8.8h, v25.8h, v0.h[2]\n"
+ "ldr x20, [x17, #0xa8]\n"
"fmla v12.8h, v25.8h, v1.h[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "cmp x14, #0x10\n"
+ "mov v24.d[1], x21\n"
"fmla v16.8h, v25.8h, v2.h[2]\n"
- "sub x14, x14, #0x8\n"
"fmla v20.8h, v25.8h, v3.h[2]\n"
"ldr d25, [x17, #0xa0]\n"
"fmla v9.8h, v24.8h, v0.h[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xb8]\n"
"fmla v13.8h, v24.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xc8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"fmla v17.8h, v24.8h, v2.h[2]\n"
- "cmp x14, #0x10\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v24.8h, v3.h[2]\n"
"ldr d24, [x17, #0xb0]\n"
- "mov v24.d[1], x20\n"
"fmla v10.8h, v25.8h, v0.h[2]\n"
+ "ldr x20, [x17, #0xc8]\n"
"fmla v14.8h, v25.8h, v1.h[2]\n"
- "ldr x20, [x17, #0xd8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"fmla v22.8h, v25.8h, v3.h[2]\n"
"ldr d25, [x17, #0xc0]\n"
"fmla v11.8h, v24.8h, v0.h[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xd8]\n"
"fmla v15.8h, v24.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xe8]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"fmla v19.8h, v24.8h, v2.h[2]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.8h, v24.8h, v3.h[2]\n"
"ldr d24, [x17, #0xd0]\n"
- "mov v24.d[1], x20\n"
"fmla v8.8h, v25.8h, v0.h[3]\n"
+ "ldr x20, [x17, #0xe8]\n"
"fmla v12.8h, v25.8h, v1.h[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.8h, v25.8h, v2.h[3]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
"fmla v20.8h, v25.8h, v3.h[3]\n"
"ldr d25, [x17, #0xe0]\n"
"fmla v9.8h, v24.8h, v0.h[3]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xf8]\n"
"fmla v13.8h, v24.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x108]\n"
"fmla v17.8h, v24.8h, v2.h[3]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v24.8h, v3.h[3]\n"
"ldr d24, [x17, #0xf0]\n"
- "mov v24.d[1], x20\n"
"fmla v10.8h, v25.8h, v0.h[3]\n"
+ "ldr x20, [x17, #0x108]\n"
"fmla v14.8h, v25.8h, v1.h[3]\n"
- "ldr x20, [x17, #0x118]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[3]\n"
"fmla v22.8h, v25.8h, v3.h[3]\n"
"ldr d25, [x17, #0x100]\n"
"fmla v11.8h, v24.8h, v0.h[3]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x118]\n"
"fmla v15.8h, v24.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x128]\n"
"fmla v19.8h, v24.8h, v2.h[3]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.8h, v24.8h, v3.h[3]\n"
"ldr d24, [x17, #0x110]\n"
- "mov v24.d[1], x20\n"
"fmla v8.8h, v25.8h, v0.h[4]\n"
+ "ldr x20, [x17, #0x128]\n"
"fmla v12.8h, v25.8h, v1.h[4]\n"
- "ldr x20, [x17, #0x138]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.8h, v25.8h, v2.h[4]\n"
"fmla v20.8h, v25.8h, v3.h[4]\n"
"ldr d25, [x17, #0x120]\n"
"fmla v9.8h, v24.8h, v0.h[4]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x138]\n"
"fmla v13.8h, v24.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x148]\n"
"fmla v17.8h, v24.8h, v2.h[4]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v24.8h, v3.h[4]\n"
"ldr d24, [x17, #0x130]\n"
- "mov v24.d[1], x20\n"
"fmla v10.8h, v25.8h, v0.h[4]\n"
+ "ldr x20, [x17, #0x148]\n"
"fmla v14.8h, v25.8h, v1.h[4]\n"
- "ldr x20, [x17, #0x158]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[4]\n"
"fmla v22.8h, v25.8h, v3.h[4]\n"
"ldr d25, [x17, #0x140]\n"
"fmla v11.8h, v24.8h, v0.h[4]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x158]\n"
"fmla v15.8h, v24.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x168]\n"
"fmla v19.8h, v24.8h, v2.h[4]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.8h, v24.8h, v3.h[4]\n"
"ldr d24, [x17, #0x150]\n"
- "mov v24.d[1], x20\n"
"fmla v8.8h, v25.8h, v0.h[5]\n"
+ "ldr x20, [x17, #0x168]\n"
"fmla v12.8h, v25.8h, v1.h[5]\n"
- "ldr x20, [x17, #0x178]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.8h, v25.8h, v2.h[5]\n"
"fmla v20.8h, v25.8h, v3.h[5]\n"
"ldr d25, [x17, #0x160]\n"
"fmla v9.8h, v24.8h, v0.h[5]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x178]\n"
"fmla v13.8h, v24.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x188]\n"
"fmla v17.8h, v24.8h, v2.h[5]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v24.8h, v3.h[5]\n"
"ldr d24, [x17, #0x170]\n"
- "mov v24.d[1], x20\n"
"fmla v10.8h, v25.8h, v0.h[5]\n"
+ "ldr x20, [x17, #0x188]\n"
"fmla v14.8h, v25.8h, v1.h[5]\n"
- "ldr x20, [x17, #0x198]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[5]\n"
"fmla v22.8h, v25.8h, v3.h[5]\n"
"ldr d25, [x17, #0x180]\n"
"fmla v11.8h, v24.8h, v0.h[5]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x198]\n"
"fmla v15.8h, v24.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x1a8]\n"
"fmla v19.8h, v24.8h, v2.h[5]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.8h, v24.8h, v3.h[5]\n"
"ldr d24, [x17, #0x190]\n"
- "mov v24.d[1], x20\n"
"fmla v8.8h, v25.8h, v0.h[6]\n"
+ "ldr x20, [x17, #0x1a8]\n"
"fmla v12.8h, v25.8h, v1.h[6]\n"
- "ldr x20, [x17, #0x1b8]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.8h, v25.8h, v2.h[6]\n"
"fmla v20.8h, v25.8h, v3.h[6]\n"
"ldr d25, [x17, #0x1a0]\n"
"fmla v9.8h, v24.8h, v0.h[6]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x1b8]\n"
"fmla v13.8h, v24.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1c8]\n"
"fmla v17.8h, v24.8h, v2.h[6]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v24.8h, v3.h[6]\n"
"ldr d24, [x17, #0x1b0]\n"
- "mov v24.d[1], x20\n"
"fmla v10.8h, v25.8h, v0.h[6]\n"
+ "ldr x20, [x17, #0x1c8]\n"
"fmla v14.8h, v25.8h, v1.h[6]\n"
- "ldr x20, [x17, #0x1d8]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[6]\n"
"fmla v22.8h, v25.8h, v3.h[6]\n"
"ldr d25, [x17, #0x1c0]\n"
"fmla v11.8h, v24.8h, v0.h[6]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x1d8]\n"
"fmla v15.8h, v24.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1e8]\n"
"fmla v19.8h, v24.8h, v2.h[6]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.8h, v24.8h, v3.h[6]\n"
"ldr d24, [x17, #0x1d0]\n"
- "mov v24.d[1], x20\n"
"fmla v8.8h, v25.8h, v0.h[7]\n"
+ "ldr x20, [x17, #0x1e8]\n"
"fmla v12.8h, v25.8h, v1.h[7]\n"
- "ldr x20, [x17, #0x1f8]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.8h, v25.8h, v2.h[7]\n"
"fmla v20.8h, v25.8h, v3.h[7]\n"
"ldr d25, [x17, #0x1e0]\n"
"fmla v9.8h, v24.8h, v0.h[7]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x1f8]\n"
"fmla v13.8h, v24.8h, v1.h[7]\n"
"fmla v17.8h, v24.8h, v2.h[7]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.8h, v24.8h, v3.h[7]\n"
"ldr d24, [x17, #0x1f0]\n"
- "mov v24.d[1], x20\n"
"add x17, x17, #0x200\n"
"fmla v10.8h, v25.8h, v0.h[7]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v14.8h, v25.8h, v1.h[7]\n"
- "ldr x20, [x17, #0x18]\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.8h, v25.8h, v2.h[7]\n"
"fmla v22.8h, v25.8h, v3.h[7]\n"
"ldr d6, [x17, #0x0]\n"
@@ -2697,7 +2698,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v23.8h, v24.8h, v3.h[7]\n"
"ldr d3, [x10, #0x0]\n"
"ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x21\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x17, #0x18]\n"
"mov v0.d[1], x25\n"
"mov v1.d[1], x24\n"
"mov v2.d[1], x23\n"
@@ -2882,8 +2884,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr h1, [x11], #0x2\n"
"ldr h0, [x10], #0x2\n"
"ldr q25, [x17, #0x0]\n"
- "fmla v8.8h, v25.8h, v3.h[0]\n"
"ldr q24, [x17, #0x10]\n"
+ "fmla v8.8h, v25.8h, v3.h[0]\n"
"fmla v12.8h, v25.8h, v2.h[0]\n"
"fmla v16.8h, v25.8h, v1.h[0]\n"
"fmla v20.8h, v25.8h, v0.h[0]\n"
@@ -2909,34 +2911,34 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"cmp x15, x20\n"
"bne 170b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 178f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v24.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v24.8h\n"
- "fmin v9.8h, v9.8h, v24.8h\n"
- "fmin v10.8h, v10.8h, v24.8h\n"
- "fmin v11.8h, v11.8h, v24.8h\n"
- "fmin v12.8h, v12.8h, v24.8h\n"
- "fmin v13.8h, v13.8h, v24.8h\n"
- "fmin v14.8h, v14.8h, v24.8h\n"
- "fmin v15.8h, v15.8h, v24.8h\n"
- "fmin v16.8h, v16.8h, v24.8h\n"
- "fmin v17.8h, v17.8h, v24.8h\n"
- "fmin v18.8h, v18.8h, v24.8h\n"
- "fmin v19.8h, v19.8h, v24.8h\n"
- "fmin v20.8h, v20.8h, v24.8h\n"
- "fmin v21.8h, v21.8h, v24.8h\n"
- "fmin v22.8h, v22.8h, v24.8h\n"
- "fmin v23.8h, v23.8h, v24.8h\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v25.8h }, [x21]\n"
"ld1r { v24.8h }, [x20]\n"
+ "fmin v8.8h, v8.8h, v25.8h\n"
+ "fmin v9.8h, v9.8h, v25.8h\n"
+ "fmin v10.8h, v10.8h, v25.8h\n"
+ "fmin v11.8h, v11.8h, v25.8h\n"
+ "fmin v12.8h, v12.8h, v25.8h\n"
+ "fmin v13.8h, v13.8h, v25.8h\n"
+ "fmin v14.8h, v14.8h, v25.8h\n"
+ "fmin v15.8h, v15.8h, v25.8h\n"
+ "fmin v16.8h, v16.8h, v25.8h\n"
+ "fmin v17.8h, v17.8h, v25.8h\n"
+ "fmin v18.8h, v18.8h, v25.8h\n"
+ "fmin v19.8h, v19.8h, v25.8h\n"
+ "fmin v20.8h, v20.8h, v25.8h\n"
+ "fmin v21.8h, v21.8h, v25.8h\n"
+ "fmin v22.8h, v22.8h, v25.8h\n"
+ "fmin v23.8h, v23.8h, v25.8h\n"
"fmax v8.8h, v8.8h, v24.8h\n"
"fmax v9.8h, v9.8h, v24.8h\n"
"fmax v10.8h, v10.8h, v24.8h\n"
@@ -2959,191 +2961,191 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"tbz x8, #4, 186f\n"
"st1 { v8.8h }, [x16], #0x10\n"
"st1 { v9.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
"tbz x8, #3, 182f\n"
"st1 { v10.8h }, [x16], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
"tbz x8, #2, 180f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
"tbz x8, #1, 179f\n"
"st1 { v11.s }[2], [x16], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v11.h }[6], [x16]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
"b 194f\n"
"179:" // Height 4: Partial direct writeback: partial_1_28
"tbz x8, #0, 194f\n"
"st1 { v11.h }[4], [x16]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
"b 194f\n"
"180:" // Height 4: Partial direct writeback: partial_2_24
"tbz x8, #1, 181f\n"
"str s11, [x16], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v11.h }[2], [x16]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
"b 194f\n"
"181:" // Height 4: Partial direct writeback: partial_1_24
"tbz x8, #0, 194f\n"
"str h11, [x16, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
"b 194f\n"
"182:" // Height 4: Partial direct writeback: partial_4_16
"tbz x8, #2, 184f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
"tbz x8, #1, 183f\n"
"st1 { v10.s }[2], [x16], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v10.h }[6], [x16]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
"b 194f\n"
"183:" // Height 4: Partial direct writeback: partial_1_20
"tbz x8, #0, 194f\n"
"st1 { v10.h }[4], [x16]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
"b 194f\n"
"184:" // Height 4: Partial direct writeback: partial_2_16
"tbz x8, #1, 185f\n"
"str s10, [x16], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v10.h }[2], [x16]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
"b 194f\n"
"185:" // Height 4: Partial direct writeback: partial_1_16
"tbz x8, #0, 194f\n"
"str h10, [x16, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
"b 194f\n"
"186:" // Height 4: Partial direct writeback: partial_8_0
"tbz x8, #3, 190f\n"
"st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
"tbz x8, #2, 188f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
"tbz x8, #1, 187f\n"
"st1 { v9.s }[2], [x16], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v9.h }[6], [x16]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
"b 194f\n"
"187:" // Height 4: Partial direct writeback: partial_1_12
"tbz x8, #0, 194f\n"
"st1 { v9.h }[4], [x16]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
"b 194f\n"
"188:" // Height 4: Partial direct writeback: partial_2_8
"tbz x8, #1, 189f\n"
"str s9, [x16], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v9.h }[2], [x16]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
"b 194f\n"
"189:" // Height 4: Partial direct writeback: partial_1_8
"tbz x8, #0, 194f\n"
"str h9, [x16, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
"b 194f\n"
"190:" // Height 4: Partial direct writeback: partial_4_0
"tbz x8, #2, 192f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x8, #1, 191f\n"
"st1 { v8.s }[2], [x16], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v8.h }[6], [x16]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
"b 194f\n"
"191:" // Height 4: Partial direct writeback: partial_1_4
"tbz x8, #0, 194f\n"
"st1 { v8.h }[4], [x16]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
"b 194f\n"
"192:" // Height 4: Partial direct writeback: partial_2_0
"tbz x8, #1, 193f\n"
"str s8, [x16], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
"tbz x8, #0, 194f\n"
"st1 { v8.h }[2], [x16]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
"b 194f\n"
"193:" // Height 4: Partial direct writeback: partial_1_0
"str h8, [x16, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
"194:" // Height 4: Partial direct writeback: Done
"b 196f\n"
"195:" // Height 4: Full writeback
@@ -3152,39 +3154,39 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
"196:" // Height 4: Writeback done
"subs x8, x8, #0x20\n"
"bgt 149b\n"
"b 296f\n"
"197:" // Height 5
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"198:" // Height 5: Column loop
"cbz x7, 199f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -3200,248 +3202,248 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"199:" // Height 5: no bias
"tbz %x[flags], #0, 217f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
+ "cmp x8, #0x20\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "cmp x8, #0x20\n"
- "add x22, x23, x20, LSL #1\n"
"bge 216f\n"
"tbz x8, #4, 207f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
"ld1 { v9.8h }, [x16], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
+ "ld1 { v25.8h }, [x23], #0x10\n"
"tbz x8, #3, 203f\n"
"ld1 { v10.8h }, [x16], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
+ "ld1 { v26.8h }, [x23], #0x10\n"
"tbz x8, #2, 201f\n"
"ldr d11, [x16], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x8, #1, 200f\n"
"ld1 { v11.s }[2], [x16], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v11.h }[6], [x16]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
+ "ld1 { v27.h }[6], [x23]\n"
"b 215f\n"
"200:" // Height 5: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x8, #0, 215f\n"
"ld1 { v11.h }[4], [x16]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
+ "ld1 { v27.h }[4], [x23]\n"
"b 215f\n"
"201:" // Height 5: Partial accumulate: partial_2_24
"tbz x8, #1, 202f\n"
"ldr s11, [x16], #0x4\n"
"mov x20, #0x34\n"
- "ldr s15, [x25], #0x4\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v11.h }[2], [x16]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
"b 215f\n"
"202:" // Height 5: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x8, #0, 215f\n"
"ldr h11, [x16, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
"b 215f\n"
"203:" // Height 5: Partial accumulate: partial_4_16
"tbz x8, #2, 205f\n"
"ldr d10, [x16], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x8, #1, 204f\n"
"ld1 { v10.s }[2], [x16], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
+ "ld1 { v26.s }[2], [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v10.h }[6], [x16]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
+ "ld1 { v26.h }[6], [x23]\n"
"b 215f\n"
"204:" // Height 5: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x8, #0, 215f\n"
"ld1 { v10.h }[4], [x16]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
+ "ld1 { v26.h }[4], [x23]\n"
"b 215f\n"
"205:" // Height 5: Partial accumulate: partial_2_16
"tbz x8, #1, 206f\n"
"ldr s10, [x16], #0x4\n"
"mov x20, #0x24\n"
- "ldr s14, [x25], #0x4\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v10.h }[2], [x16]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
+ "ld1 { v26.h }[2], [x23]\n"
"b 215f\n"
"206:" // Height 5: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x8, #0, 215f\n"
"ldr h10, [x16, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
+ "ldr h26, [x23, #0x0]\n"
"b 215f\n"
"207:" // Height 5: Partial accumulate: partial_8_0
"tbz x8, #3, 211f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
"tbz x8, #2, 209f\n"
"ldr d9, [x16], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x8, #1, 208f\n"
"ld1 { v9.s }[2], [x16], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v9.h }[6], [x16]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
"b 215f\n"
"208:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x8, #0, 215f\n"
"ld1 { v9.h }[4], [x16]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
"b 215f\n"
"209:" // Height 5: Partial accumulate: partial_2_8
"tbz x8, #1, 210f\n"
"ldr s9, [x16], #0x4\n"
"mov x20, #0x14\n"
- "ldr s13, [x25], #0x4\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v9.h }[2], [x16]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
"b 215f\n"
"210:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x8, #0, 215f\n"
"ldr h9, [x16, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
"b 215f\n"
"211:" // Height 5: Partial accumulate: partial_4_0
"tbz x8, #2, 213f\n"
"ldr d8, [x16], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x8, #1, 212f\n"
"ld1 { v8.s }[2], [x16], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v8.h }[6], [x16]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
+ "ld1 { v24.h }[6], [x23]\n"
"b 215f\n"
"212:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x8, #0, 215f\n"
"ld1 { v8.h }[4], [x16]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
+ "ld1 { v24.h }[4], [x23]\n"
"b 215f\n"
"213:" // Height 5: Partial accumulate: partial_2_0
"tbz x8, #1, 214f\n"
"ldr s8, [x16], #0x4\n"
"mov x20, #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
"tbz x8, #0, 215f\n"
"ld1 { v8.h }[2], [x16]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
"b 215f\n"
"214:" // Height 5: Partial accumulate: partial_1_0
"ldr h8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h12, [x25, #0x0]\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
"215:" // Height 5: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 218f\n"
@@ -3450,22 +3452,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
"b 218f\n"
"217:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -3492,8 +3494,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"mov x15, #0x0\n"
"219:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 220f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -3540,259 +3542,259 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v24.8h, v6.8h, v4.h[0]\n"
"ldr d29, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v29.d[1], x21\n"
+ "add x11, x11, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x10, x10, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x11, x11, #0x10\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"ldr d28, [x17, #0x30]\n"
- "mov v28.d[1], x20\n"
"fmla v10.8h, v29.8h, v0.h[0]\n"
+ "add x9, x9, #0x10\n"
"fmla v14.8h, v29.8h, v1.h[0]\n"
- "ldr x20, [x17, #0x58]\n"
+ "ldr x26, [x13, #0x8]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[0]\n"
- "add x9, x9, #0x10\n"
"fmla v22.8h, v29.8h, v3.h[0]\n"
- "ldr x26, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
"fmla v26.8h, v29.8h, v4.h[0]\n"
"ldr d29, [x17, #0x40]\n"
"fmla v11.8h, v28.8h, v0.h[0]\n"
- "mov v29.d[1], x21\n"
+ "ldr x25, [x12, #0x8]\n"
"fmla v15.8h, v28.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x24, [x11, #0x8]\n"
"fmla v19.8h, v28.8h, v2.h[0]\n"
- "ldr x25, [x12, #0x8]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.8h, v28.8h, v3.h[0]\n"
- "ldr x24, [x11, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
"fmla v27.8h, v28.8h, v4.h[0]\n"
"ldr d28, [x17, #0x50]\n"
- "mov v28.d[1], x20\n"
"fmla v8.8h, v29.8h, v0.h[1]\n"
+ "ldr x23, [x10, #0x8]\n"
"fmla v12.8h, v29.8h, v1.h[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "ldr x22, [x9, #0x8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.8h, v29.8h, v2.h[1]\n"
- "ldr x23, [x10, #0x8]\n"
"fmla v20.8h, v29.8h, v3.h[1]\n"
- "ldr x22, [x9, #0x8]\n"
+ "ldr x20, [x17, #0x78]\n"
"fmla v24.8h, v29.8h, v4.h[1]\n"
"ldr d29, [x17, #0x60]\n"
"fmla v9.8h, v28.8h, v0.h[1]\n"
- "mov v29.d[1], x21\n"
+ "sub x14, x14, #0x8\n"
"fmla v13.8h, v28.8h, v1.h[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "cmp x14, #0x10\n"
"fmla v17.8h, v28.8h, v2.h[1]\n"
- "sub x14, x14, #0x8\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v28.8h, v3.h[1]\n"
- "cmp x14, #0x10\n"
+ "ldr x21, [x17, #0x88]\n"
"fmla v25.8h, v28.8h, v4.h[1]\n"
"ldr d28, [x17, #0x70]\n"
- "mov v28.d[1], x20\n"
"fmla v10.8h, v29.8h, v0.h[1]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"fmla v14.8h, v29.8h, v1.h[1]\n"
- "ldr x20, [x17, #0x98]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"fmla v22.8h, v29.8h, v3.h[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
"fmla v26.8h, v29.8h, v4.h[1]\n"
"ldr d29, [x17, #0x80]\n"
"fmla v11.8h, v28.8h, v0.h[1]\n"
- "mov v29.d[1], x21\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"fmla v15.8h, v28.8h, v1.h[1]\n"
- "ldr x21, [x17, #0xa8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v19.8h, v28.8h, v2.h[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.8h, v28.8h, v3.h[1]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "ldr x21, [x17, #0xa8]\n"
"fmla v27.8h, v28.8h, v4.h[1]\n"
"ldr d28, [x17, #0x90]\n"
- "mov v28.d[1], x20\n"
"fmla v8.8h, v29.8h, v0.h[2]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"fmla v12.8h, v29.8h, v1.h[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.8h, v29.8h, v2.h[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
"fmla v20.8h, v29.8h, v3.h[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
"fmla v24.8h, v29.8h, v4.h[2]\n"
"ldr d29, [x17, #0xa0]\n"
"fmla v9.8h, v28.8h, v0.h[2]\n"
- "mov v29.d[1], x21\n"
"fmla v13.8h, v28.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xc8]\n"
"fmla v17.8h, v28.8h, v2.h[2]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v28.8h, v3.h[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
"fmla v25.8h, v28.8h, v4.h[2]\n"
"ldr d28, [x17, #0xb0]\n"
- "mov v28.d[1], x20\n"
"fmla v10.8h, v29.8h, v0.h[2]\n"
"fmla v14.8h, v29.8h, v1.h[2]\n"
- "ldr x20, [x17, #0xd8]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[2]\n"
"fmla v22.8h, v29.8h, v3.h[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
"fmla v26.8h, v29.8h, v4.h[2]\n"
"ldr d29, [x17, #0xc0]\n"
"fmla v11.8h, v28.8h, v0.h[2]\n"
- "mov v29.d[1], x21\n"
"fmla v15.8h, v28.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v19.8h, v28.8h, v2.h[2]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.8h, v28.8h, v3.h[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
"fmla v27.8h, v28.8h, v4.h[2]\n"
"ldr d28, [x17, #0xd0]\n"
- "mov v28.d[1], x20\n"
"fmla v8.8h, v29.8h, v0.h[3]\n"
"fmla v12.8h, v29.8h, v1.h[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.8h, v29.8h, v2.h[3]\n"
"fmla v20.8h, v29.8h, v3.h[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
"fmla v24.8h, v29.8h, v4.h[3]\n"
"ldr d29, [x17, #0xe0]\n"
"fmla v9.8h, v28.8h, v0.h[3]\n"
- "mov v29.d[1], x21\n"
"fmla v13.8h, v28.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x108]\n"
"fmla v17.8h, v28.8h, v2.h[3]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v28.8h, v3.h[3]\n"
+ "ldr x21, [x17, #0x108]\n"
"fmla v25.8h, v28.8h, v4.h[3]\n"
"ldr d28, [x17, #0xf0]\n"
- "mov v28.d[1], x20\n"
"fmla v10.8h, v29.8h, v0.h[3]\n"
"fmla v14.8h, v29.8h, v1.h[3]\n"
- "ldr x20, [x17, #0x118]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[3]\n"
"fmla v22.8h, v29.8h, v3.h[3]\n"
+ "ldr x20, [x17, #0x118]\n"
"fmla v26.8h, v29.8h, v4.h[3]\n"
"ldr d29, [x17, #0x100]\n"
"fmla v11.8h, v28.8h, v0.h[3]\n"
- "mov v29.d[1], x21\n"
"fmla v15.8h, v28.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x128]\n"
"fmla v19.8h, v28.8h, v2.h[3]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.8h, v28.8h, v3.h[3]\n"
+ "ldr x21, [x17, #0x128]\n"
"fmla v27.8h, v28.8h, v4.h[3]\n"
"ldr d28, [x17, #0x110]\n"
- "mov v28.d[1], x20\n"
"fmla v8.8h, v29.8h, v0.h[4]\n"
"fmla v12.8h, v29.8h, v1.h[4]\n"
- "ldr x20, [x17, #0x138]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.8h, v29.8h, v2.h[4]\n"
"fmla v20.8h, v29.8h, v3.h[4]\n"
+ "ldr x20, [x17, #0x138]\n"
"fmla v24.8h, v29.8h, v4.h[4]\n"
"ldr d29, [x17, #0x120]\n"
"fmla v9.8h, v28.8h, v0.h[4]\n"
- "mov v29.d[1], x21\n"
"fmla v13.8h, v28.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x148]\n"
"fmla v17.8h, v28.8h, v2.h[4]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v28.8h, v3.h[4]\n"
+ "ldr x21, [x17, #0x148]\n"
"fmla v25.8h, v28.8h, v4.h[4]\n"
"ldr d28, [x17, #0x130]\n"
- "mov v28.d[1], x20\n"
"fmla v10.8h, v29.8h, v0.h[4]\n"
"fmla v14.8h, v29.8h, v1.h[4]\n"
- "ldr x20, [x17, #0x158]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[4]\n"
"fmla v22.8h, v29.8h, v3.h[4]\n"
+ "ldr x20, [x17, #0x158]\n"
"fmla v26.8h, v29.8h, v4.h[4]\n"
"ldr d29, [x17, #0x140]\n"
"fmla v11.8h, v28.8h, v0.h[4]\n"
- "mov v29.d[1], x21\n"
"fmla v15.8h, v28.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x168]\n"
"fmla v19.8h, v28.8h, v2.h[4]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.8h, v28.8h, v3.h[4]\n"
+ "ldr x21, [x17, #0x168]\n"
"fmla v27.8h, v28.8h, v4.h[4]\n"
"ldr d28, [x17, #0x150]\n"
- "mov v28.d[1], x20\n"
"fmla v8.8h, v29.8h, v0.h[5]\n"
"fmla v12.8h, v29.8h, v1.h[5]\n"
- "ldr x20, [x17, #0x178]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.8h, v29.8h, v2.h[5]\n"
"fmla v20.8h, v29.8h, v3.h[5]\n"
+ "ldr x20, [x17, #0x178]\n"
"fmla v24.8h, v29.8h, v4.h[5]\n"
"ldr d29, [x17, #0x160]\n"
"fmla v9.8h, v28.8h, v0.h[5]\n"
- "mov v29.d[1], x21\n"
"fmla v13.8h, v28.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x188]\n"
"fmla v17.8h, v28.8h, v2.h[5]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v28.8h, v3.h[5]\n"
+ "ldr x21, [x17, #0x188]\n"
"fmla v25.8h, v28.8h, v4.h[5]\n"
"ldr d28, [x17, #0x170]\n"
- "mov v28.d[1], x20\n"
"fmla v10.8h, v29.8h, v0.h[5]\n"
"fmla v14.8h, v29.8h, v1.h[5]\n"
- "ldr x20, [x17, #0x198]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[5]\n"
"fmla v22.8h, v29.8h, v3.h[5]\n"
+ "ldr x20, [x17, #0x198]\n"
"fmla v26.8h, v29.8h, v4.h[5]\n"
"ldr d29, [x17, #0x180]\n"
"fmla v11.8h, v28.8h, v0.h[5]\n"
- "mov v29.d[1], x21\n"
"fmla v15.8h, v28.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x1a8]\n"
"fmla v19.8h, v28.8h, v2.h[5]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.8h, v28.8h, v3.h[5]\n"
+ "ldr x21, [x17, #0x1a8]\n"
"fmla v27.8h, v28.8h, v4.h[5]\n"
"ldr d28, [x17, #0x190]\n"
- "mov v28.d[1], x20\n"
"fmla v8.8h, v29.8h, v0.h[6]\n"
"fmla v12.8h, v29.8h, v1.h[6]\n"
- "ldr x20, [x17, #0x1b8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.8h, v29.8h, v2.h[6]\n"
"fmla v20.8h, v29.8h, v3.h[6]\n"
+ "ldr x20, [x17, #0x1b8]\n"
"fmla v24.8h, v29.8h, v4.h[6]\n"
"ldr d29, [x17, #0x1a0]\n"
"fmla v9.8h, v28.8h, v0.h[6]\n"
- "mov v29.d[1], x21\n"
"fmla v13.8h, v28.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1c8]\n"
"fmla v17.8h, v28.8h, v2.h[6]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v28.8h, v3.h[6]\n"
+ "ldr x21, [x17, #0x1c8]\n"
"fmla v25.8h, v28.8h, v4.h[6]\n"
"ldr d28, [x17, #0x1b0]\n"
- "mov v28.d[1], x20\n"
"fmla v10.8h, v29.8h, v0.h[6]\n"
"fmla v14.8h, v29.8h, v1.h[6]\n"
- "ldr x20, [x17, #0x1d8]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[6]\n"
"fmla v22.8h, v29.8h, v3.h[6]\n"
+ "ldr x20, [x17, #0x1d8]\n"
"fmla v26.8h, v29.8h, v4.h[6]\n"
"ldr d29, [x17, #0x1c0]\n"
"fmla v11.8h, v28.8h, v0.h[6]\n"
- "mov v29.d[1], x21\n"
"fmla v15.8h, v28.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1e8]\n"
"fmla v19.8h, v28.8h, v2.h[6]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.8h, v28.8h, v3.h[6]\n"
+ "ldr x21, [x17, #0x1e8]\n"
"fmla v27.8h, v28.8h, v4.h[6]\n"
"ldr d28, [x17, #0x1d0]\n"
- "mov v28.d[1], x20\n"
"fmla v8.8h, v29.8h, v0.h[7]\n"
"fmla v12.8h, v29.8h, v1.h[7]\n"
- "ldr x20, [x17, #0x1f8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.8h, v29.8h, v2.h[7]\n"
"fmla v20.8h, v29.8h, v3.h[7]\n"
+ "ldr x20, [x17, #0x1f8]\n"
"fmla v24.8h, v29.8h, v4.h[7]\n"
"ldr d29, [x17, #0x1e0]\n"
"fmla v9.8h, v28.8h, v0.h[7]\n"
- "mov v29.d[1], x21\n"
"fmla v13.8h, v28.8h, v1.h[7]\n"
"fmla v17.8h, v28.8h, v2.h[7]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.8h, v28.8h, v3.h[7]\n"
"fmla v25.8h, v28.8h, v4.h[7]\n"
"ldr d28, [x17, #0x1f0]\n"
- "mov v28.d[1], x20\n"
"add x17, x17, #0x200\n"
"fmla v10.8h, v29.8h, v0.h[7]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v14.8h, v29.8h, v1.h[7]\n"
- "ldr x20, [x17, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.8h, v29.8h, v2.h[7]\n"
"fmla v22.8h, v29.8h, v3.h[7]\n"
+ "ldr x20, [x17, #0x18]\n"
"fmla v26.8h, v29.8h, v4.h[7]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.8h, v28.8h, v0.h[7]\n"
@@ -4027,8 +4029,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr h1, [x10], #0x2\n"
"ldr h0, [x9], #0x2\n"
"ldr q29, [x17, #0x0]\n"
- "fmla v8.8h, v29.8h, v4.h[0]\n"
"ldr q28, [x17, #0x10]\n"
+ "fmla v8.8h, v29.8h, v4.h[0]\n"
"fmla v12.8h, v29.8h, v3.h[0]\n"
"fmla v16.8h, v29.8h, v2.h[0]\n"
"fmla v20.8h, v29.8h, v1.h[0]\n"
@@ -4058,40 +4060,40 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"cmp x15, x20\n"
"bne 219b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x23, x24, x20, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 227f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v28.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v28.8h\n"
- "fmin v9.8h, v9.8h, v28.8h\n"
- "fmin v10.8h, v10.8h, v28.8h\n"
- "fmin v11.8h, v11.8h, v28.8h\n"
- "fmin v12.8h, v12.8h, v28.8h\n"
- "fmin v13.8h, v13.8h, v28.8h\n"
- "fmin v14.8h, v14.8h, v28.8h\n"
- "fmin v15.8h, v15.8h, v28.8h\n"
- "fmin v16.8h, v16.8h, v28.8h\n"
- "fmin v17.8h, v17.8h, v28.8h\n"
- "fmin v18.8h, v18.8h, v28.8h\n"
- "fmin v19.8h, v19.8h, v28.8h\n"
- "fmin v20.8h, v20.8h, v28.8h\n"
- "fmin v21.8h, v21.8h, v28.8h\n"
- "fmin v22.8h, v22.8h, v28.8h\n"
- "fmin v23.8h, v23.8h, v28.8h\n"
- "fmin v24.8h, v24.8h, v28.8h\n"
- "fmin v25.8h, v25.8h, v28.8h\n"
- "fmin v26.8h, v26.8h, v28.8h\n"
- "fmin v27.8h, v27.8h, v28.8h\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v29.8h }, [x21]\n"
"ld1r { v28.8h }, [x20]\n"
+ "fmin v8.8h, v8.8h, v29.8h\n"
+ "fmin v9.8h, v9.8h, v29.8h\n"
+ "fmin v10.8h, v10.8h, v29.8h\n"
+ "fmin v11.8h, v11.8h, v29.8h\n"
+ "fmin v12.8h, v12.8h, v29.8h\n"
+ "fmin v13.8h, v13.8h, v29.8h\n"
+ "fmin v14.8h, v14.8h, v29.8h\n"
+ "fmin v15.8h, v15.8h, v29.8h\n"
+ "fmin v16.8h, v16.8h, v29.8h\n"
+ "fmin v17.8h, v17.8h, v29.8h\n"
+ "fmin v18.8h, v18.8h, v29.8h\n"
+ "fmin v19.8h, v19.8h, v29.8h\n"
+ "fmin v20.8h, v20.8h, v29.8h\n"
+ "fmin v21.8h, v21.8h, v29.8h\n"
+ "fmin v22.8h, v22.8h, v29.8h\n"
+ "fmin v23.8h, v23.8h, v29.8h\n"
+ "fmin v24.8h, v24.8h, v29.8h\n"
+ "fmin v25.8h, v25.8h, v29.8h\n"
+ "fmin v26.8h, v26.8h, v29.8h\n"
+ "fmin v27.8h, v27.8h, v29.8h\n"
"fmax v8.8h, v8.8h, v28.8h\n"
"fmax v9.8h, v9.8h, v28.8h\n"
"fmax v10.8h, v10.8h, v28.8h\n"
@@ -4118,223 +4120,223 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"tbz x8, #4, 235f\n"
"st1 { v8.8h }, [x16], #0x10\n"
"st1 { v9.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v25.8h }, [x23], #0x10\n"
"tbz x8, #3, 231f\n"
"st1 { v10.8h }, [x16], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
+ "st1 { v26.8h }, [x23], #0x10\n"
"tbz x8, #2, 229f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x8, #1, 228f\n"
"st1 { v11.s }[2], [x16], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
+ "st1 { v27.s }[2], [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v11.h }[6], [x16]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
+ "st1 { v27.h }[6], [x23]\n"
"b 243f\n"
"228:" // Height 5: Partial direct writeback: partial_1_28
"tbz x8, #0, 243f\n"
"st1 { v11.h }[4], [x16]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
+ "st1 { v27.h }[4], [x23]\n"
"b 243f\n"
"229:" // Height 5: Partial direct writeback: partial_2_24
"tbz x8, #1, 230f\n"
"str s11, [x16], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
+ "str s27, [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v11.h }[2], [x16]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
+ "st1 { v27.h }[2], [x23]\n"
"b 243f\n"
"230:" // Height 5: Partial direct writeback: partial_1_24
"tbz x8, #0, 243f\n"
"str h11, [x16, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
+ "str h27, [x23, #0x0]\n"
"b 243f\n"
"231:" // Height 5: Partial direct writeback: partial_4_16
"tbz x8, #2, 233f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x8, #1, 232f\n"
"st1 { v10.s }[2], [x16], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
+ "st1 { v26.s }[2], [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v10.h }[6], [x16]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
+ "st1 { v26.h }[6], [x23]\n"
"b 243f\n"
"232:" // Height 5: Partial direct writeback: partial_1_20
"tbz x8, #0, 243f\n"
"st1 { v10.h }[4], [x16]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
+ "st1 { v26.h }[4], [x23]\n"
"b 243f\n"
"233:" // Height 5: Partial direct writeback: partial_2_16
"tbz x8, #1, 234f\n"
"str s10, [x16], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
+ "str s26, [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v10.h }[2], [x16]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
+ "st1 { v26.h }[2], [x23]\n"
"b 243f\n"
"234:" // Height 5: Partial direct writeback: partial_1_16
"tbz x8, #0, 243f\n"
"str h10, [x16, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
+ "str h26, [x23, #0x0]\n"
"b 243f\n"
"235:" // Height 5: Partial direct writeback: partial_8_0
"tbz x8, #3, 239f\n"
"st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
"tbz x8, #2, 237f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x8, #1, 236f\n"
"st1 { v9.s }[2], [x16], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v25.s }[2], [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v9.h }[6], [x16]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
+ "st1 { v25.h }[6], [x23]\n"
"b 243f\n"
"236:" // Height 5: Partial direct writeback: partial_1_12
"tbz x8, #0, 243f\n"
"st1 { v9.h }[4], [x16]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
+ "st1 { v25.h }[4], [x23]\n"
"b 243f\n"
"237:" // Height 5: Partial direct writeback: partial_2_8
"tbz x8, #1, 238f\n"
"str s9, [x16], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
+ "str s25, [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v9.h }[2], [x16]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
+ "st1 { v25.h }[2], [x23]\n"
"b 243f\n"
"238:" // Height 5: Partial direct writeback: partial_1_8
"tbz x8, #0, 243f\n"
"str h9, [x16, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
+ "str h25, [x23, #0x0]\n"
"b 243f\n"
"239:" // Height 5: Partial direct writeback: partial_4_0
"tbz x8, #2, 241f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x8, #1, 240f\n"
"st1 { v8.s }[2], [x16], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v8.h }[6], [x16]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
+ "st1 { v24.h }[6], [x23]\n"
"b 243f\n"
"240:" // Height 5: Partial direct writeback: partial_1_4
"tbz x8, #0, 243f\n"
"st1 { v8.h }[4], [x16]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
+ "st1 { v24.h }[4], [x23]\n"
"b 243f\n"
"241:" // Height 5: Partial direct writeback: partial_2_0
"tbz x8, #1, 242f\n"
"str s8, [x16], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
"tbz x8, #0, 243f\n"
"st1 { v8.h }[2], [x16]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
+ "st1 { v24.h }[2], [x23]\n"
"b 243f\n"
"242:" // Height 5: Partial direct writeback: partial_1_0
"str h8, [x16, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
+ "str h24, [x23, #0x0]\n"
"243:" // Height 5: Partial direct writeback: Done
"b 245f\n"
"244:" // Height 5: Full writeback
@@ -4343,22 +4345,22 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"245:" // Height 5: Writeback done
"subs x8, x8, #0x20\n"
"bgt 198b\n"
@@ -4366,23 +4368,24 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"246:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0xc\n"
- "mov x7, %x[bias]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "madd x20, x21, x20, x16\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"247:" // Height 6: Column loop
"cbz x7, 248f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -4402,281 +4405,281 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"248:" // Height 6: no bias
"tbz %x[flags], #0, 266f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
+ "cmp x8, #0x20\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "cmp x8, #0x20\n"
- "add x21, x22, x20, LSL #1\n"
"bge 265f\n"
"tbz x8, #4, 256f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
+ "ld1 { v28.8h }, [x22], #0x10\n"
"ld1 { v9.8h }, [x16], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
- "ld1 { v29.8h }, [x21], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
+ "ld1 { v25.8h }, [x23], #0x10\n"
+ "ld1 { v29.8h }, [x22], #0x10\n"
"tbz x8, #3, 252f\n"
"ld1 { v10.8h }, [x16], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
- "ld1 { v30.8h }, [x21], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
+ "ld1 { v26.8h }, [x23], #0x10\n"
+ "ld1 { v30.8h }, [x22], #0x10\n"
"tbz x8, #2, 250f\n"
"ldr d11, [x16], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x8, #1, 249f\n"
"ld1 { v11.s }[2], [x16], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
- "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
+ "ld1 { v31.s }[2], [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v11.h }[6], [x16]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
- "ld1 { v31.h }[6], [x21]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
+ "ld1 { v27.h }[6], [x23]\n"
+ "ld1 { v31.h }[6], [x22]\n"
"b 264f\n"
"249:" // Height 6: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x8, #0, 264f\n"
"ld1 { v11.h }[4], [x16]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
- "ld1 { v31.h }[4], [x21]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
+ "ld1 { v27.h }[4], [x23]\n"
+ "ld1 { v31.h }[4], [x22]\n"
"b 264f\n"
"250:" // Height 6: Partial accumulate: partial_2_24
"tbz x8, #1, 251f\n"
"ldr s11, [x16], #0x4\n"
"mov x20, #0x34\n"
- "ldr s15, [x25], #0x4\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
- "ldr s31, [x21], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s31, [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v11.h }[2], [x16]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
- "ld1 { v31.h }[2], [x21]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
+ "ld1 { v31.h }[2], [x22]\n"
"b 264f\n"
"251:" // Height 6: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x8, #0, 264f\n"
"ldr h11, [x16, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
- "ldr h31, [x21, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
+ "ldr h31, [x22, #0x0]\n"
"b 264f\n"
"252:" // Height 6: Partial accumulate: partial_4_16
"tbz x8, #2, 254f\n"
"ldr d10, [x16], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x8, #1, 253f\n"
"ld1 { v10.s }[2], [x16], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
+ "ld1 { v26.s }[2], [x23], #0x4\n"
+ "ld1 { v30.s }[2], [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v10.h }[6], [x16]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
- "ld1 { v30.h }[6], [x21]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
+ "ld1 { v26.h }[6], [x23]\n"
+ "ld1 { v30.h }[6], [x22]\n"
"b 264f\n"
"253:" // Height 6: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x8, #0, 264f\n"
"ld1 { v10.h }[4], [x16]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
- "ld1 { v30.h }[4], [x21]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
+ "ld1 { v26.h }[4], [x23]\n"
+ "ld1 { v30.h }[4], [x22]\n"
"b 264f\n"
"254:" // Height 6: Partial accumulate: partial_2_16
"tbz x8, #1, 255f\n"
"ldr s10, [x16], #0x4\n"
"mov x20, #0x24\n"
- "ldr s14, [x25], #0x4\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
+ "ldr s30, [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v10.h }[2], [x16]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
- "ld1 { v30.h }[2], [x21]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
+ "ld1 { v26.h }[2], [x23]\n"
+ "ld1 { v30.h }[2], [x22]\n"
"b 264f\n"
"255:" // Height 6: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x8, #0, 264f\n"
"ldr h10, [x16, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
- "ldr h30, [x21, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
+ "ldr h26, [x23, #0x0]\n"
+ "ldr h30, [x22, #0x0]\n"
"b 264f\n"
"256:" // Height 6: Partial accumulate: partial_8_0
"tbz x8, #3, 260f\n"
"ld1 { v8.8h }, [x16], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
+ "ld1 { v28.8h }, [x22], #0x10\n"
"tbz x8, #2, 258f\n"
"ldr d9, [x16], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x8, #1, 257f\n"
"ld1 { v9.s }[2], [x16], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
- "ld1 { v29.s }[2], [x21], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
+ "ld1 { v29.s }[2], [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v9.h }[6], [x16]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
- "ld1 { v29.h }[6], [x21]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
+ "ld1 { v29.h }[6], [x22]\n"
"b 264f\n"
"257:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x8, #0, 264f\n"
"ld1 { v9.h }[4], [x16]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
- "ld1 { v29.h }[4], [x21]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
+ "ld1 { v29.h }[4], [x22]\n"
"b 264f\n"
"258:" // Height 6: Partial accumulate: partial_2_8
"tbz x8, #1, 259f\n"
"ldr s9, [x16], #0x4\n"
"mov x20, #0x14\n"
- "ldr s13, [x25], #0x4\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "ldr s29, [x21], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s29, [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v9.h }[2], [x16]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
- "ld1 { v29.h }[2], [x21]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
+ "ld1 { v29.h }[2], [x22]\n"
"b 264f\n"
"259:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x8, #0, 264f\n"
"ldr h9, [x16, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
- "ldr h29, [x21, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
+ "ldr h29, [x22, #0x0]\n"
"b 264f\n"
"260:" // Height 6: Partial accumulate: partial_4_0
"tbz x8, #2, 262f\n"
"ldr d8, [x16], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x8, #1, 261f\n"
"ld1 { v8.s }[2], [x16], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
+ "ld1 { v28.s }[2], [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v8.h }[6], [x16]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
- "ld1 { v28.h }[6], [x21]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
+ "ld1 { v24.h }[6], [x23]\n"
+ "ld1 { v28.h }[6], [x22]\n"
"b 264f\n"
"261:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x8, #0, 264f\n"
"ld1 { v8.h }[4], [x16]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
- "ld1 { v28.h }[4], [x21]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
+ "ld1 { v24.h }[4], [x23]\n"
+ "ld1 { v28.h }[4], [x22]\n"
"b 264f\n"
"262:" // Height 6: Partial accumulate: partial_2_0
"tbz x8, #1, 263f\n"
"ldr s8, [x16], #0x4\n"
"mov x20, #0x4\n"
- "ldr s12, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s28, [x22], #0x4\n"
"tbz x8, #0, 264f\n"
"ld1 { v8.h }[2], [x16]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
- "ld1 { v28.h }[2], [x21]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
+ "ld1 { v28.h }[2], [x22]\n"
"b 264f\n"
"263:" // Height 6: Partial accumulate: partial_1_0
"ldr h8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h12, [x25, #0x0]\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
- "ldr h28, [x21, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
+ "ldr h28, [x22, #0x0]\n"
"264:" // Height 6: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 267f\n"
@@ -4685,26 +4688,26 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"b 267f\n"
"266:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -4735,8 +4738,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"mov x15, #0x0\n"
"268:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 269f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -4789,290 +4792,290 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"fmla v28.8h, v6.8h, v5.h[0]\n"
"ldr d6, [x17, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x21\n"
+ "add x10, x10, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x9, x9, #0x10\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "add x9, x9, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"add x28, x28, #0x10\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x20\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
+ "ldr x27, [x13, #0x8]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "ldr x20, [x17, #0x58]\n"
+ "ldr x26, [x12, #0x8]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
- "ldr x27, [x13, #0x8]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
- "ldr x26, [x12, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
"fmla v26.8h, v6.8h, v4.h[0]\n"
"ldr x25, [x11, #0x8]\n"
"fmla v30.8h, v6.8h, v5.h[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.8h, v7.8h, v0.h[0]\n"
- "mov v6.d[1], x21\n"
+ "ldr x24, [x10, #0x8]\n"
"fmla v15.8h, v7.8h, v1.h[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x23, [x9, #0x8]\n"
"fmla v19.8h, v7.8h, v2.h[0]\n"
- "ldr x24, [x10, #0x8]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.8h, v7.8h, v3.h[0]\n"
- "ldr x23, [x9, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
"fmla v27.8h, v7.8h, v4.h[0]\n"
"ldr x22, [x28, #0x8]\n"
"fmla v31.8h, v7.8h, v5.h[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x20\n"
"fmla v8.8h, v6.8h, v0.h[1]\n"
+ "sub x14, x14, #0x8\n"
"fmla v12.8h, v6.8h, v1.h[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "cmp x14, #0x10\n"
+ "mov v7.d[1], x20\n"
"fmla v16.8h, v6.8h, v2.h[1]\n"
- "sub x14, x14, #0x8\n"
"fmla v20.8h, v6.8h, v3.h[1]\n"
- "cmp x14, #0x10\n"
+ "ldr x20, [x17, #0x78]\n"
"fmla v24.8h, v6.8h, v4.h[1]\n"
"prfm pldl1keep, [x13, #0x80]\n"
"fmla v28.8h, v6.8h, v5.h[1]\n"
"ldr d6, [x17, #0x60]\n"
"fmla v9.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x21\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v13.8h, v7.8h, v1.h[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr x21, [x17, #0x88]\n"
"fmla v25.8h, v7.8h, v4.h[1]\n"
"prfm pldl1keep, [x10, #0x80]\n"
"fmla v29.8h, v7.8h, v5.h[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x20\n"
"fmla v10.8h, v6.8h, v0.h[1]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"fmla v14.8h, v6.8h, v1.h[1]\n"
- "ldr x20, [x17, #0x98]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
"fmla v22.8h, v6.8h, v3.h[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
"fmla v26.8h, v6.8h, v4.h[1]\n"
"fmla v30.8h, v6.8h, v5.h[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.8h, v7.8h, v0.h[1]\n"
- "mov v6.d[1], x21\n"
"fmla v15.8h, v7.8h, v1.h[1]\n"
- "ldr x21, [x17, #0xa8]\n"
"fmla v19.8h, v7.8h, v2.h[1]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.8h, v7.8h, v3.h[1]\n"
+ "ldr x21, [x17, #0xa8]\n"
"fmla v27.8h, v7.8h, v4.h[1]\n"
"fmla v31.8h, v7.8h, v5.h[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x20\n"
"fmla v8.8h, v6.8h, v0.h[2]\n"
"fmla v12.8h, v6.8h, v1.h[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.8h, v6.8h, v2.h[2]\n"
"fmla v20.8h, v6.8h, v3.h[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
"fmla v24.8h, v6.8h, v4.h[2]\n"
"fmla v28.8h, v6.8h, v5.h[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x21\n"
"fmla v13.8h, v7.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xc8]\n"
"fmla v17.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
"fmla v25.8h, v7.8h, v4.h[2]\n"
"fmla v29.8h, v7.8h, v5.h[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x20\n"
"fmla v10.8h, v6.8h, v0.h[2]\n"
"fmla v14.8h, v6.8h, v1.h[2]\n"
- "ldr x20, [x17, #0xd8]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[2]\n"
"fmla v22.8h, v6.8h, v3.h[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
"fmla v26.8h, v6.8h, v4.h[2]\n"
"fmla v30.8h, v6.8h, v5.h[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.8h, v7.8h, v0.h[2]\n"
- "mov v6.d[1], x21\n"
"fmla v15.8h, v7.8h, v1.h[2]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v19.8h, v7.8h, v2.h[2]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.8h, v7.8h, v3.h[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
"fmla v27.8h, v7.8h, v4.h[2]\n"
"fmla v31.8h, v7.8h, v5.h[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x20\n"
"fmla v8.8h, v6.8h, v0.h[3]\n"
"fmla v12.8h, v6.8h, v1.h[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.8h, v6.8h, v2.h[3]\n"
"fmla v20.8h, v6.8h, v3.h[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
"fmla v24.8h, v6.8h, v4.h[3]\n"
"fmla v28.8h, v6.8h, v5.h[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x21\n"
"fmla v13.8h, v7.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x108]\n"
"fmla v17.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[3]\n"
+ "ldr x21, [x17, #0x108]\n"
"fmla v25.8h, v7.8h, v4.h[3]\n"
"fmla v29.8h, v7.8h, v5.h[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x20\n"
"fmla v10.8h, v6.8h, v0.h[3]\n"
"fmla v14.8h, v6.8h, v1.h[3]\n"
- "ldr x20, [x17, #0x118]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[3]\n"
"fmla v22.8h, v6.8h, v3.h[3]\n"
+ "ldr x20, [x17, #0x118]\n"
"fmla v26.8h, v6.8h, v4.h[3]\n"
"fmla v30.8h, v6.8h, v5.h[3]\n"
"ldr d6, [x17, #0x100]\n"
"fmla v11.8h, v7.8h, v0.h[3]\n"
- "mov v6.d[1], x21\n"
"fmla v15.8h, v7.8h, v1.h[3]\n"
- "ldr x21, [x17, #0x128]\n"
"fmla v19.8h, v7.8h, v2.h[3]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.8h, v7.8h, v3.h[3]\n"
+ "ldr x21, [x17, #0x128]\n"
"fmla v27.8h, v7.8h, v4.h[3]\n"
"fmla v31.8h, v7.8h, v5.h[3]\n"
"ldr d7, [x17, #0x110]\n"
- "mov v7.d[1], x20\n"
"fmla v8.8h, v6.8h, v0.h[4]\n"
"fmla v12.8h, v6.8h, v1.h[4]\n"
- "ldr x20, [x17, #0x138]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.8h, v6.8h, v2.h[4]\n"
"fmla v20.8h, v6.8h, v3.h[4]\n"
+ "ldr x20, [x17, #0x138]\n"
"fmla v24.8h, v6.8h, v4.h[4]\n"
"fmla v28.8h, v6.8h, v5.h[4]\n"
"ldr d6, [x17, #0x120]\n"
"fmla v9.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x21\n"
"fmla v13.8h, v7.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x148]\n"
"fmla v17.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[4]\n"
+ "ldr x21, [x17, #0x148]\n"
"fmla v25.8h, v7.8h, v4.h[4]\n"
"fmla v29.8h, v7.8h, v5.h[4]\n"
"ldr d7, [x17, #0x130]\n"
- "mov v7.d[1], x20\n"
"fmla v10.8h, v6.8h, v0.h[4]\n"
"fmla v14.8h, v6.8h, v1.h[4]\n"
- "ldr x20, [x17, #0x158]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[4]\n"
"fmla v22.8h, v6.8h, v3.h[4]\n"
+ "ldr x20, [x17, #0x158]\n"
"fmla v26.8h, v6.8h, v4.h[4]\n"
"fmla v30.8h, v6.8h, v5.h[4]\n"
"ldr d6, [x17, #0x140]\n"
"fmla v11.8h, v7.8h, v0.h[4]\n"
- "mov v6.d[1], x21\n"
"fmla v15.8h, v7.8h, v1.h[4]\n"
- "ldr x21, [x17, #0x168]\n"
"fmla v19.8h, v7.8h, v2.h[4]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.8h, v7.8h, v3.h[4]\n"
+ "ldr x21, [x17, #0x168]\n"
"fmla v27.8h, v7.8h, v4.h[4]\n"
"fmla v31.8h, v7.8h, v5.h[4]\n"
"ldr d7, [x17, #0x150]\n"
- "mov v7.d[1], x20\n"
"fmla v8.8h, v6.8h, v0.h[5]\n"
"fmla v12.8h, v6.8h, v1.h[5]\n"
- "ldr x20, [x17, #0x178]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.8h, v6.8h, v2.h[5]\n"
"fmla v20.8h, v6.8h, v3.h[5]\n"
+ "ldr x20, [x17, #0x178]\n"
"fmla v24.8h, v6.8h, v4.h[5]\n"
"fmla v28.8h, v6.8h, v5.h[5]\n"
"ldr d6, [x17, #0x160]\n"
"fmla v9.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x21\n"
"fmla v13.8h, v7.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x188]\n"
"fmla v17.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[5]\n"
+ "ldr x21, [x17, #0x188]\n"
"fmla v25.8h, v7.8h, v4.h[5]\n"
"fmla v29.8h, v7.8h, v5.h[5]\n"
"ldr d7, [x17, #0x170]\n"
- "mov v7.d[1], x20\n"
"fmla v10.8h, v6.8h, v0.h[5]\n"
"fmla v14.8h, v6.8h, v1.h[5]\n"
- "ldr x20, [x17, #0x198]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[5]\n"
"fmla v22.8h, v6.8h, v3.h[5]\n"
+ "ldr x20, [x17, #0x198]\n"
"fmla v26.8h, v6.8h, v4.h[5]\n"
"fmla v30.8h, v6.8h, v5.h[5]\n"
"ldr d6, [x17, #0x180]\n"
"fmla v11.8h, v7.8h, v0.h[5]\n"
- "mov v6.d[1], x21\n"
"fmla v15.8h, v7.8h, v1.h[5]\n"
- "ldr x21, [x17, #0x1a8]\n"
"fmla v19.8h, v7.8h, v2.h[5]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.8h, v7.8h, v3.h[5]\n"
+ "ldr x21, [x17, #0x1a8]\n"
"fmla v27.8h, v7.8h, v4.h[5]\n"
"fmla v31.8h, v7.8h, v5.h[5]\n"
"ldr d7, [x17, #0x190]\n"
- "mov v7.d[1], x20\n"
"fmla v8.8h, v6.8h, v0.h[6]\n"
"fmla v12.8h, v6.8h, v1.h[6]\n"
- "ldr x20, [x17, #0x1b8]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.8h, v6.8h, v2.h[6]\n"
"fmla v20.8h, v6.8h, v3.h[6]\n"
+ "ldr x20, [x17, #0x1b8]\n"
"fmla v24.8h, v6.8h, v4.h[6]\n"
"fmla v28.8h, v6.8h, v5.h[6]\n"
"ldr d6, [x17, #0x1a0]\n"
"fmla v9.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x21\n"
"fmla v13.8h, v7.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1c8]\n"
"fmla v17.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[6]\n"
+ "ldr x21, [x17, #0x1c8]\n"
"fmla v25.8h, v7.8h, v4.h[6]\n"
"fmla v29.8h, v7.8h, v5.h[6]\n"
"ldr d7, [x17, #0x1b0]\n"
- "mov v7.d[1], x20\n"
"fmla v10.8h, v6.8h, v0.h[6]\n"
"fmla v14.8h, v6.8h, v1.h[6]\n"
- "ldr x20, [x17, #0x1d8]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[6]\n"
"fmla v22.8h, v6.8h, v3.h[6]\n"
+ "ldr x20, [x17, #0x1d8]\n"
"fmla v26.8h, v6.8h, v4.h[6]\n"
"fmla v30.8h, v6.8h, v5.h[6]\n"
"ldr d6, [x17, #0x1c0]\n"
"fmla v11.8h, v7.8h, v0.h[6]\n"
- "mov v6.d[1], x21\n"
"fmla v15.8h, v7.8h, v1.h[6]\n"
- "ldr x21, [x17, #0x1e8]\n"
"fmla v19.8h, v7.8h, v2.h[6]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.8h, v7.8h, v3.h[6]\n"
+ "ldr x21, [x17, #0x1e8]\n"
"fmla v27.8h, v7.8h, v4.h[6]\n"
"fmla v31.8h, v7.8h, v5.h[6]\n"
"ldr d7, [x17, #0x1d0]\n"
- "mov v7.d[1], x20\n"
"fmla v8.8h, v6.8h, v0.h[7]\n"
"fmla v12.8h, v6.8h, v1.h[7]\n"
- "ldr x20, [x17, #0x1f8]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.8h, v6.8h, v2.h[7]\n"
"fmla v20.8h, v6.8h, v3.h[7]\n"
+ "ldr x20, [x17, #0x1f8]\n"
"fmla v24.8h, v6.8h, v4.h[7]\n"
"fmla v28.8h, v6.8h, v5.h[7]\n"
"ldr d6, [x17, #0x1e0]\n"
"fmla v9.8h, v7.8h, v0.h[7]\n"
- "mov v6.d[1], x21\n"
"fmla v13.8h, v7.8h, v1.h[7]\n"
"fmla v17.8h, v7.8h, v2.h[7]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.8h, v7.8h, v3.h[7]\n"
"fmla v25.8h, v7.8h, v4.h[7]\n"
"fmla v29.8h, v7.8h, v5.h[7]\n"
"ldr d7, [x17, #0x1f0]\n"
- "mov v7.d[1], x20\n"
"add x17, x17, #0x200\n"
"fmla v10.8h, v6.8h, v0.h[7]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v14.8h, v6.8h, v1.h[7]\n"
- "ldr x20, [x17, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.8h, v6.8h, v2.h[7]\n"
"fmla v22.8h, v6.8h, v3.h[7]\n"
+ "ldr x20, [x17, #0x18]\n"
"fmla v26.8h, v6.8h, v4.h[7]\n"
"fmla v30.8h, v6.8h, v5.h[7]\n"
"ldr d6, [x17, #0x0]\n"
@@ -5346,8 +5349,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"ldr h3, [x9], #0x2\n"
"ldr h2, [x28], #0x2\n"
"ldr q1, [x17, #0x0]\n"
- "fmla v8.8h, v1.8h, v7.h[0]\n"
"ldr q0, [x17, #0x10]\n"
+ "fmla v8.8h, v1.8h, v7.h[0]\n"
"fmla v12.8h, v1.8h, v6.h[0]\n"
"fmla v16.8h, v1.8h, v5.h[0]\n"
"fmla v20.8h, v1.8h, v4.h[0]\n"
@@ -5381,46 +5384,46 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"cmp x15, x20\n"
"bne 268b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x23, x24, x20, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 276f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.8h }, [x20]\n"
- "fmin v8.8h, v8.8h, v0.8h\n"
- "fmin v9.8h, v9.8h, v0.8h\n"
- "fmin v10.8h, v10.8h, v0.8h\n"
- "fmin v11.8h, v11.8h, v0.8h\n"
- "fmin v12.8h, v12.8h, v0.8h\n"
- "fmin v13.8h, v13.8h, v0.8h\n"
- "fmin v14.8h, v14.8h, v0.8h\n"
- "fmin v15.8h, v15.8h, v0.8h\n"
- "fmin v16.8h, v16.8h, v0.8h\n"
- "fmin v17.8h, v17.8h, v0.8h\n"
- "fmin v18.8h, v18.8h, v0.8h\n"
- "fmin v19.8h, v19.8h, v0.8h\n"
- "fmin v20.8h, v20.8h, v0.8h\n"
- "fmin v21.8h, v21.8h, v0.8h\n"
- "fmin v22.8h, v22.8h, v0.8h\n"
- "fmin v23.8h, v23.8h, v0.8h\n"
- "fmin v24.8h, v24.8h, v0.8h\n"
- "fmin v25.8h, v25.8h, v0.8h\n"
- "fmin v26.8h, v26.8h, v0.8h\n"
- "fmin v27.8h, v27.8h, v0.8h\n"
- "fmin v28.8h, v28.8h, v0.8h\n"
- "fmin v29.8h, v29.8h, v0.8h\n"
- "fmin v30.8h, v30.8h, v0.8h\n"
- "fmin v31.8h, v31.8h, v0.8h\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x21]\n"
"ld1r { v0.8h }, [x20]\n"
+ "fmin v8.8h, v8.8h, v1.8h\n"
+ "fmin v9.8h, v9.8h, v1.8h\n"
+ "fmin v10.8h, v10.8h, v1.8h\n"
+ "fmin v11.8h, v11.8h, v1.8h\n"
+ "fmin v12.8h, v12.8h, v1.8h\n"
+ "fmin v13.8h, v13.8h, v1.8h\n"
+ "fmin v14.8h, v14.8h, v1.8h\n"
+ "fmin v15.8h, v15.8h, v1.8h\n"
+ "fmin v16.8h, v16.8h, v1.8h\n"
+ "fmin v17.8h, v17.8h, v1.8h\n"
+ "fmin v18.8h, v18.8h, v1.8h\n"
+ "fmin v19.8h, v19.8h, v1.8h\n"
+ "fmin v20.8h, v20.8h, v1.8h\n"
+ "fmin v21.8h, v21.8h, v1.8h\n"
+ "fmin v22.8h, v22.8h, v1.8h\n"
+ "fmin v23.8h, v23.8h, v1.8h\n"
+ "fmin v24.8h, v24.8h, v1.8h\n"
+ "fmin v25.8h, v25.8h, v1.8h\n"
+ "fmin v26.8h, v26.8h, v1.8h\n"
+ "fmin v27.8h, v27.8h, v1.8h\n"
+ "fmin v28.8h, v28.8h, v1.8h\n"
+ "fmin v29.8h, v29.8h, v1.8h\n"
+ "fmin v30.8h, v30.8h, v1.8h\n"
+ "fmin v31.8h, v31.8h, v1.8h\n"
"fmax v8.8h, v8.8h, v0.8h\n"
"fmax v9.8h, v9.8h, v0.8h\n"
"fmax v10.8h, v10.8h, v0.8h\n"
@@ -5451,255 +5454,255 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"tbz x8, #4, 284f\n"
"st1 { v8.8h }, [x16], #0x10\n"
"st1 { v9.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
- "st1 { v29.8h }, [x21], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v25.8h }, [x23], #0x10\n"
+ "st1 { v28.8h }, [x22], #0x10\n"
+ "st1 { v29.8h }, [x22], #0x10\n"
"tbz x8, #3, 280f\n"
"st1 { v10.8h }, [x16], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
- "st1 { v30.8h }, [x21], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
+ "st1 { v26.8h }, [x23], #0x10\n"
+ "st1 { v30.8h }, [x22], #0x10\n"
"tbz x8, #2, 278f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x8, #1, 277f\n"
"st1 { v11.s }[2], [x16], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
- "st1 { v31.s }[2], [x21], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
+ "st1 { v27.s }[2], [x23], #0x4\n"
+ "st1 { v31.s }[2], [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v11.h }[6], [x16]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
- "st1 { v31.h }[6], [x21]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
+ "st1 { v27.h }[6], [x23]\n"
+ "st1 { v31.h }[6], [x22]\n"
"b 292f\n"
"277:" // Height 6: Partial direct writeback: partial_1_28
"tbz x8, #0, 292f\n"
"st1 { v11.h }[4], [x16]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
- "st1 { v31.h }[4], [x21]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
+ "st1 { v27.h }[4], [x23]\n"
+ "st1 { v31.h }[4], [x22]\n"
"b 292f\n"
"278:" // Height 6: Partial direct writeback: partial_2_24
"tbz x8, #1, 279f\n"
"str s11, [x16], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
- "str s31, [x21], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
+ "str s27, [x23], #0x4\n"
+ "str s31, [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v11.h }[2], [x16]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
- "st1 { v31.h }[2], [x21]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
+ "st1 { v27.h }[2], [x23]\n"
+ "st1 { v31.h }[2], [x22]\n"
"b 292f\n"
"279:" // Height 6: Partial direct writeback: partial_1_24
"tbz x8, #0, 292f\n"
"str h11, [x16, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
- "str h31, [x21, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
+ "str h27, [x23, #0x0]\n"
+ "str h31, [x22, #0x0]\n"
"b 292f\n"
"280:" // Height 6: Partial direct writeback: partial_4_16
"tbz x8, #2, 282f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x8, #1, 281f\n"
"st1 { v10.s }[2], [x16], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
- "st1 { v30.s }[2], [x21], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
+ "st1 { v26.s }[2], [x23], #0x4\n"
+ "st1 { v30.s }[2], [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v10.h }[6], [x16]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
- "st1 { v30.h }[6], [x21]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
+ "st1 { v26.h }[6], [x23]\n"
+ "st1 { v30.h }[6], [x22]\n"
"b 292f\n"
"281:" // Height 6: Partial direct writeback: partial_1_20
"tbz x8, #0, 292f\n"
"st1 { v10.h }[4], [x16]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
- "st1 { v30.h }[4], [x21]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
+ "st1 { v26.h }[4], [x23]\n"
+ "st1 { v30.h }[4], [x22]\n"
"b 292f\n"
"282:" // Height 6: Partial direct writeback: partial_2_16
"tbz x8, #1, 283f\n"
"str s10, [x16], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
- "str s30, [x21], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
+ "str s26, [x23], #0x4\n"
+ "str s30, [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v10.h }[2], [x16]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
- "st1 { v30.h }[2], [x21]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
+ "st1 { v26.h }[2], [x23]\n"
+ "st1 { v30.h }[2], [x22]\n"
"b 292f\n"
"283:" // Height 6: Partial direct writeback: partial_1_16
"tbz x8, #0, 292f\n"
"str h10, [x16, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
- "str h30, [x21, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
+ "str h26, [x23, #0x0]\n"
+ "str h30, [x22, #0x0]\n"
"b 292f\n"
"284:" // Height 6: Partial direct writeback: partial_8_0
"tbz x8, #3, 288f\n"
"st1 { v8.8h }, [x16], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v28.8h }, [x22], #0x10\n"
"tbz x8, #2, 286f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x8, #1, 285f\n"
"st1 { v9.s }[2], [x16], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
- "st1 { v29.s }[2], [x21], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v25.s }[2], [x23], #0x4\n"
+ "st1 { v29.s }[2], [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v9.h }[6], [x16]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
- "st1 { v29.h }[6], [x21]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
+ "st1 { v25.h }[6], [x23]\n"
+ "st1 { v29.h }[6], [x22]\n"
"b 292f\n"
"285:" // Height 6: Partial direct writeback: partial_1_12
"tbz x8, #0, 292f\n"
"st1 { v9.h }[4], [x16]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
- "st1 { v29.h }[4], [x21]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
+ "st1 { v25.h }[4], [x23]\n"
+ "st1 { v29.h }[4], [x22]\n"
"b 292f\n"
"286:" // Height 6: Partial direct writeback: partial_2_8
"tbz x8, #1, 287f\n"
"str s9, [x16], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
- "str s29, [x21], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
+ "str s25, [x23], #0x4\n"
+ "str s29, [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v9.h }[2], [x16]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
- "st1 { v29.h }[2], [x21]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
+ "st1 { v25.h }[2], [x23]\n"
+ "st1 { v29.h }[2], [x22]\n"
"b 292f\n"
"287:" // Height 6: Partial direct writeback: partial_1_8
"tbz x8, #0, 292f\n"
"str h9, [x16, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
- "str h29, [x21, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
+ "str h25, [x23, #0x0]\n"
+ "str h29, [x22, #0x0]\n"
"b 292f\n"
"288:" // Height 6: Partial direct writeback: partial_4_0
"tbz x8, #2, 290f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x8, #1, 289f\n"
"st1 { v8.s }[2], [x16], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v8.h }[6], [x16]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
- "st1 { v28.h }[6], [x21]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
+ "st1 { v24.h }[6], [x23]\n"
+ "st1 { v28.h }[6], [x22]\n"
"b 292f\n"
"289:" // Height 6: Partial direct writeback: partial_1_4
"tbz x8, #0, 292f\n"
"st1 { v8.h }[4], [x16]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
- "st1 { v28.h }[4], [x21]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
+ "st1 { v24.h }[4], [x23]\n"
+ "st1 { v28.h }[4], [x22]\n"
"b 292f\n"
"290:" // Height 6: Partial direct writeback: partial_2_0
"tbz x8, #1, 291f\n"
"str s8, [x16], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
"tbz x8, #0, 292f\n"
"st1 { v8.h }[2], [x16]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
- "st1 { v28.h }[2], [x21]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
+ "st1 { v24.h }[2], [x23]\n"
+ "st1 { v28.h }[2], [x22]\n"
"b 292f\n"
"291:" // Height 6: Partial direct writeback: partial_1_0
"str h8, [x16, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
- "str h28, [x21, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
+ "str h24, [x23, #0x0]\n"
+ "str h28, [x22, #0x0]\n"
"292:" // Height 6: Partial direct writeback: Done
"b 294f\n"
"293:" // Height 6: Full writeback
@@ -5708,26 +5711,26 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q28, [x22, #0x0]\n"
+ "str q29, [x22, #0x10]\n"
+ "str q30, [x22, #0x20]\n"
+ "str q31, [x22, #0x30]\n"
"294:" // Height 6: Writeback done
"subs x8, x8, #0x20\n"
"bgt 247b\n"
@@ -5743,8 +5746,8 @@ void a64_hybrid_fp16_mla_6x32_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"296:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp
index 8e5f600c83..4e81b724eb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp16_mla_6x32 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const __fp16 *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp16_mla_6x32 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -101,10 +103,10 @@ void a64_hybrid_fp16_mla_6x32 (
"cmp %x[M], #0x2\n"
"bgt 99f\n"
"beq 50f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x12, 3f\n"
"ldr q8, [x12, #0x0]\n"
@@ -243,8 +245,8 @@ void a64_hybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"23:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 24f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -268,6 +270,10 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q17, [x10, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "sub x27, x27, #0x8\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"ldr q17, [x10, #0x40]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
@@ -324,22 +330,21 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q17, [x10, #0x1e0]\n"
"fmla v9.8h, v16.8h, v0.h[7]\n"
"ldr q16, [x10, #0x1f0]\n"
- "sub x27, x27, #0x8\n"
- "add x26, x26, #0x10\n"
+ "add x10, x10, #0x200\n"
"fmla v10.8h, v17.8h, v0.h[7]\n"
+ "ldr q6, [x10, #0x0]\n"
"fmla v11.8h, v16.8h, v0.h[7]\n"
"ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x10\n"
- "add x10, x10, #0x200\n"
- "ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 26b\n"
"27:" // Height 1: Multiply loop: Single iteration only
"fmla v8.8h, v6.8h, v0.h[0]\n"
"ldr q17, [x10, #0x20]\n"
"fmla v9.8h, v7.8h, v0.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"ldr q17, [x10, #0x40]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
@@ -396,26 +401,23 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q17, [x10, #0x1e0]\n"
"fmla v9.8h, v16.8h, v0.h[7]\n"
"ldr q16, [x10, #0x1f0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "add x10, x10, #0x200\n"
"fmla v10.8h, v17.8h, v0.h[7]\n"
"fmla v11.8h, v16.8h, v0.h[7]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x200\n"
"28:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 30f\n"
"29:" // Height 1: Multiply loop: Odd block loop
"ldr h0, [x26], #0x2\n"
- "ldr q16, [x10, #0x0]\n"
- "fmla v8.8h, v16.8h, v0.h[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"sub x27, x27, #0x1\n"
- "ldr q17, [x10, #0x10]\n"
- "ldr q16, [x10, #0x20]\n"
- "fmla v9.8h, v17.8h, v0.h[0]\n"
- "fmla v10.8h, v16.8h, v0.h[0]\n"
+ "ldr q16, [x10, #0x10]\n"
+ "fmla v8.8h, v17.8h, v0.h[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ "fmla v9.8h, v16.8h, v0.h[0]\n"
"ldr q16, [x10, #0x30]\n"
- "fmla v11.8h, v16.8h, v0.h[0]\n"
"add x10, x10, #0x40\n"
+ "fmla v10.8h, v17.8h, v0.h[0]\n"
+ "fmla v11.8h, v16.8h, v0.h[0]\n"
"cbnz x27, 29b\n"
"30:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -424,9 +426,9 @@ void a64_hybrid_fp16_mla_6x32 (
"bne 23b\n"
"prfm pstl1keep, [x9, #0x0]\n"
"tbz %x[flags], #1, 31f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.8h }, [x21]\n"
"ld1r { v16.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v17.8h\n"
"fmin v9.8h, v9.8h, v17.8h\n"
@@ -544,167 +546,167 @@ void a64_hybrid_fp16_mla_6x32 (
"bgt 2b\n"
"b 296f\n"
"50:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"51:" // Height 2: Column loop
"cbz x12, 52f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x12, x12, #0x40\n"
"b 71f\n"
"52:" // Height 2: no bias
"tbz %x[flags], #0, 70f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x20\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
"bge 69f\n"
"tbz x11, #4, 60f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
"ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
"tbz x11, #3, 56f\n"
"ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
"tbz x11, #2, 54f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"tbz x11, #1, 53f\n"
"ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
"tbz x11, #0, 68f\n"
"ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
+ "ld1 { v15.h }[6], [x26]\n"
"b 68f\n"
"53:" // Height 2: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x11, #0, 68f\n"
"ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
+ "ld1 { v15.h }[4], [x26]\n"
"b 68f\n"
"54:" // Height 2: Partial accumulate: partial_2_24
"tbz x11, #1, 55f\n"
"ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
"tbz x11, #0, 68f\n"
"ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
+ "ld1 { v15.h }[2], [x26]\n"
"b 68f\n"
"55:" // Height 2: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x11, #0, 68f\n"
"ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
"b 68f\n"
"56:" // Height 2: Partial accumulate: partial_4_16
"tbz x11, #2, 58f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"tbz x11, #1, 57f\n"
"ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
"tbz x11, #0, 68f\n"
"ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
+ "ld1 { v14.h }[6], [x26]\n"
"b 68f\n"
"57:" // Height 2: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x11, #0, 68f\n"
"ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
+ "ld1 { v14.h }[4], [x26]\n"
"b 68f\n"
"58:" // Height 2: Partial accumulate: partial_2_16
"tbz x11, #1, 59f\n"
"ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
"tbz x11, #0, 68f\n"
"ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
+ "ld1 { v14.h }[2], [x26]\n"
"b 68f\n"
"59:" // Height 2: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x11, #0, 68f\n"
"ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
"b 68f\n"
"60:" // Height 2: Partial accumulate: partial_8_0
"tbz x11, #3, 64f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
"tbz x11, #2, 62f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"tbz x11, #1, 61f\n"
"ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
"tbz x11, #0, 68f\n"
"ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
+ "ld1 { v13.h }[6], [x26]\n"
"b 68f\n"
"61:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x11, #0, 68f\n"
"ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
+ "ld1 { v13.h }[4], [x26]\n"
"b 68f\n"
"62:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 63f\n"
"ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
"tbz x11, #0, 68f\n"
"ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
+ "ld1 { v13.h }[2], [x26]\n"
"b 68f\n"
"63:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x11, #0, 68f\n"
"ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 66f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"tbz x11, #1, 65f\n"
"ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
"tbz x11, #0, 68f\n"
"ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
+ "ld1 { v12.h }[6], [x26]\n"
"b 68f\n"
"65:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x11, #0, 68f\n"
"ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
+ "ld1 { v12.h }[4], [x26]\n"
"b 68f\n"
"66:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 67f\n"
"ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
"tbz x11, #0, 68f\n"
"ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
+ "ld1 { v12.h }[2], [x26]\n"
"b 68f\n"
"67:" // Height 2: Partial accumulate: partial_1_0
"ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"68:" // Height 2: Partial accumulate: Done
"sub x9, x9, x20\n"
@@ -714,10 +716,10 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"b 71f\n"
"70:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -732,8 +734,8 @@ void a64_hybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"72:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -765,22 +767,22 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"fmla v14.8h, v17.8h, v1.h[0]\n"
"ldr q17, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
"fmla v15.8h, v16.8h, v1.h[0]\n"
"ldr q16, [x10, #0x50]\n"
- "cmp x27, #0x10\n"
"fmla v8.8h, v17.8h, v0.h[1]\n"
"fmla v12.8h, v17.8h, v1.h[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v9.8h, v16.8h, v0.h[1]\n"
"fmla v13.8h, v16.8h, v1.h[1]\n"
"ldr q16, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.8h, v17.8h, v0.h[1]\n"
"fmla v14.8h, v17.8h, v1.h[1]\n"
"ldr q17, [x10, #0x80]\n"
@@ -872,18 +874,18 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v13.8h, v7.8h, v1.h[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x25, x25, #0x10\n"
+ "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.8h, v17.8h, v0.h[0]\n"
"fmla v14.8h, v17.8h, v1.h[0]\n"
"ldr q17, [x10, #0x40]\n"
- "sub x27, x27, #0x8\n"
"fmla v11.8h, v16.8h, v0.h[0]\n"
"fmla v15.8h, v16.8h, v1.h[0]\n"
"ldr q16, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v8.8h, v17.8h, v0.h[1]\n"
"fmla v12.8h, v17.8h, v1.h[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v9.8h, v16.8h, v0.h[1]\n"
"fmla v13.8h, v16.8h, v1.h[1]\n"
"ldr q16, [x10, #0x70]\n"
@@ -978,9 +980,9 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v9.8h, v16.8h, v1.h[0]\n"
"fmla v13.8h, v16.8h, v0.h[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v10.8h, v17.8h, v1.h[0]\n"
"fmla v14.8h, v17.8h, v0.h[0]\n"
- "add x10, x10, #0x40\n"
"fmla v11.8h, v16.8h, v1.h[0]\n"
"fmla v15.8h, v16.8h, v0.h[0]\n"
"cbnz x27, 78b\n"
@@ -990,13 +992,13 @@ void a64_hybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 72b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x9, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.8h }, [x21]\n"
"ld1r { v16.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v17.8h\n"
"fmin v9.8h, v9.8h, v17.8h\n"
@@ -1020,127 +1022,127 @@ void a64_hybrid_fp16_mla_6x32 (
"tbz x11, #4, 88f\n"
"st1 { v8.8h }, [x9], #0x10\n"
"st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
"tbz x11, #3, 84f\n"
"st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
"tbz x11, #2, 82f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d15, [x26], #0x8\n"
"tbz x11, #1, 81f\n"
"st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
+ "st1 { v15.h }[6], [x26]\n"
"b 96f\n"
"81:" // Height 2: Partial direct writeback: partial_1_28
"tbz x11, #0, 96f\n"
"st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
+ "st1 { v15.h }[4], [x26]\n"
"b 96f\n"
"82:" // Height 2: Partial direct writeback: partial_2_24
"tbz x11, #1, 83f\n"
"str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
+ "str s15, [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
+ "st1 { v15.h }[2], [x26]\n"
"b 96f\n"
"83:" // Height 2: Partial direct writeback: partial_1_24
"tbz x11, #0, 96f\n"
"str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
"b 96f\n"
"84:" // Height 2: Partial direct writeback: partial_4_16
"tbz x11, #2, 86f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d14, [x26], #0x8\n"
"tbz x11, #1, 85f\n"
"st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
+ "st1 { v14.h }[6], [x26]\n"
"b 96f\n"
"85:" // Height 2: Partial direct writeback: partial_1_20
"tbz x11, #0, 96f\n"
"st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
+ "st1 { v14.h }[4], [x26]\n"
"b 96f\n"
"86:" // Height 2: Partial direct writeback: partial_2_16
"tbz x11, #1, 87f\n"
"str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
+ "str s14, [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
+ "st1 { v14.h }[2], [x26]\n"
"b 96f\n"
"87:" // Height 2: Partial direct writeback: partial_1_16
"tbz x11, #0, 96f\n"
"str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
"b 96f\n"
"88:" // Height 2: Partial direct writeback: partial_8_0
"tbz x11, #3, 92f\n"
"st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
"tbz x11, #2, 90f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x11, #1, 89f\n"
"st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
+ "st1 { v13.h }[6], [x26]\n"
"b 96f\n"
"89:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 96f\n"
"st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
+ "st1 { v13.h }[4], [x26]\n"
"b 96f\n"
"90:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 91f\n"
"str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
+ "str s13, [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
+ "st1 { v13.h }[2], [x26]\n"
"b 96f\n"
"91:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 96f\n"
"str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
"b 96f\n"
"92:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 94f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x11, #1, 93f\n"
"st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
+ "st1 { v12.h }[6], [x26]\n"
"b 96f\n"
"93:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 96f\n"
"st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
+ "st1 { v12.h }[4], [x26]\n"
"b 96f\n"
"94:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 95f\n"
"str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
+ "str s12, [x26], #0x4\n"
"tbz x11, #0, 96f\n"
"st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
+ "st1 { v12.h }[2], [x26]\n"
"b 96f\n"
"95:" // Height 2: Partial direct writeback: partial_1_0
"str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
"96:" // Height 2: Partial direct writeback: Done
"b 98f\n"
"97:" // Height 2: Full writeback
@@ -1149,214 +1151,214 @@ void a64_hybrid_fp16_mla_6x32 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
"98:" // Height 2: Writeback done
"subs x11, x11, #0x20\n"
"bgt 51b\n"
"b 296f\n"
"99:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"100:" // Height 3: Column loop
"cbz x12, 101f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 120f\n"
"101:" // Height 3: no bias
"tbz %x[flags], #0, 119f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
"cmp x11, #0x20\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"bge 118f\n"
"tbz x11, #4, 109f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
"ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
"tbz x11, #3, 105f\n"
"ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
"tbz x11, #2, 103f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x11, #1, 102f\n"
"ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
"b 117f\n"
"102:" // Height 3: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x11, #0, 117f\n"
"ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
"b 117f\n"
"103:" // Height 3: Partial accumulate: partial_2_24
"tbz x11, #1, 104f\n"
"ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
"b 117f\n"
"104:" // Height 3: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x11, #0, 117f\n"
"ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
"b 117f\n"
"105:" // Height 3: Partial accumulate: partial_4_16
"tbz x11, #2, 107f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x11, #1, 106f\n"
"ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
"b 117f\n"
"106:" // Height 3: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x11, #0, 117f\n"
"ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
"b 117f\n"
"107:" // Height 3: Partial accumulate: partial_2_16
"tbz x11, #1, 108f\n"
"ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
"b 117f\n"
"108:" // Height 3: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x11, #0, 117f\n"
"ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
"b 117f\n"
"109:" // Height 3: Partial accumulate: partial_8_0
"tbz x11, #3, 113f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
"tbz x11, #2, 111f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x11, #1, 110f\n"
"ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
"b 117f\n"
"110:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x11, #0, 117f\n"
"ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
"b 117f\n"
"111:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 112f\n"
"ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
"b 117f\n"
"112:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x11, #0, 117f\n"
"ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
"b 117f\n"
"113:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 115f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
"tbz x11, #1, 114f\n"
"ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
"b 117f\n"
"114:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x11, #0, 117f\n"
"ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
"b 117f\n"
"115:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 116f\n"
"ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
"tbz x11, #0, 117f\n"
"ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
"b 117f\n"
"116:" // Height 3: Partial accumulate: partial_1_0
"ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
"117:" // Height 3: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 120f\n"
@@ -1365,14 +1367,14 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
"b 120f\n"
"119:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1391,8 +1393,8 @@ void a64_hybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"121:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 122f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1432,18 +1434,18 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v17.8h, v7.8h, v2.h[0]\n"
"ldr q20, [x10, #0x30]\n"
"add x24, x24, #0x10\n"
- "fmla v10.8h, v21.8h, v0.h[0]\n"
- "fmla v14.8h, v21.8h, v1.h[0]\n"
"cmp x27, #0x10\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "fmla v10.8h, v21.8h, v0.h[0]\n"
+ "fmla v14.8h, v21.8h, v1.h[0]\n"
"fmla v18.8h, v21.8h, v2.h[0]\n"
"ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v11.8h, v20.8h, v0.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v15.8h, v20.8h, v1.h[0]\n"
"fmla v19.8h, v20.8h, v2.h[0]\n"
"ldr q20, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v8.8h, v21.8h, v0.h[1]\n"
"fmla v12.8h, v21.8h, v1.h[1]\n"
"fmla v16.8h, v21.8h, v2.h[1]\n"
@@ -1574,14 +1576,14 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v17.8h, v7.8h, v2.h[0]\n"
"ldr q20, [x10, #0x30]\n"
"sub x27, x27, #0x8\n"
- "fmla v10.8h, v21.8h, v0.h[0]\n"
- "fmla v14.8h, v21.8h, v1.h[0]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "fmla v10.8h, v21.8h, v0.h[0]\n"
+ "fmla v14.8h, v21.8h, v1.h[0]\n"
"fmla v18.8h, v21.8h, v2.h[0]\n"
"ldr q21, [x10, #0x40]\n"
- "fmla v11.8h, v20.8h, v0.h[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ "fmla v11.8h, v20.8h, v0.h[0]\n"
"fmla v15.8h, v20.8h, v1.h[0]\n"
"fmla v19.8h, v20.8h, v2.h[0]\n"
"ldr q20, [x10, #0x50]\n"
@@ -1704,9 +1706,9 @@ void a64_hybrid_fp16_mla_6x32 (
"sub x27, x27, #0x1\n"
"ldr h0, [x24], #0x2\n"
"ldr q21, [x10, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
"fmla v8.8h, v21.8h, v2.h[0]\n"
"fmla v12.8h, v21.8h, v1.h[0]\n"
- "ldr q20, [x10, #0x10]\n"
"fmla v16.8h, v21.8h, v0.h[0]\n"
"ldr q21, [x10, #0x20]\n"
"fmla v9.8h, v20.8h, v2.h[0]\n"
@@ -1727,15 +1729,15 @@ void a64_hybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 121b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "add x26, x9, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 129f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v21.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.8h }, [x21]\n"
"ld1r { v20.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v21.8h\n"
"fmin v9.8h, v9.8h, v21.8h\n"
@@ -1767,159 +1769,159 @@ void a64_hybrid_fp16_mla_6x32 (
"tbz x11, #4, 137f\n"
"st1 { v8.8h }, [x9], #0x10\n"
"st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
"tbz x11, #3, 133f\n"
"st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
"tbz x11, #2, 131f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x11, #1, 130f\n"
"st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
"b 145f\n"
"130:" // Height 3: Partial direct writeback: partial_1_28
"tbz x11, #0, 145f\n"
"st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
"b 145f\n"
"131:" // Height 3: Partial direct writeback: partial_2_24
"tbz x11, #1, 132f\n"
"str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
"b 145f\n"
"132:" // Height 3: Partial direct writeback: partial_1_24
"tbz x11, #0, 145f\n"
"str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
"b 145f\n"
"133:" // Height 3: Partial direct writeback: partial_4_16
"tbz x11, #2, 135f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x11, #1, 134f\n"
"st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
"b 145f\n"
"134:" // Height 3: Partial direct writeback: partial_1_20
"tbz x11, #0, 145f\n"
"st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
"b 145f\n"
"135:" // Height 3: Partial direct writeback: partial_2_16
"tbz x11, #1, 136f\n"
"str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
"b 145f\n"
"136:" // Height 3: Partial direct writeback: partial_1_16
"tbz x11, #0, 145f\n"
"str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
"b 145f\n"
"137:" // Height 3: Partial direct writeback: partial_8_0
"tbz x11, #3, 141f\n"
"st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
"tbz x11, #2, 139f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x11, #1, 138f\n"
"st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
"b 145f\n"
"138:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 145f\n"
"st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
"b 145f\n"
"139:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 140f\n"
"str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
"b 145f\n"
"140:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 145f\n"
"str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
"b 145f\n"
"141:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 143f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x11, #1, 142f\n"
"st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
"b 145f\n"
"142:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 145f\n"
"st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
"b 145f\n"
"143:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 144f\n"
"str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
"tbz x11, #0, 145f\n"
"st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
"b 145f\n"
"144:" // Height 3: Partial direct writeback: partial_1_0
"str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
"145:" // Height 3: Partial direct writeback: Done
"b 147f\n"
"146:" // Height 3: Full writeback
@@ -1928,39 +1930,39 @@ void a64_hybrid_fp16_mla_6x32 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"147:" // Height 3: Writeback done
"subs x11, x11, #0x20\n"
"bgt 100b\n"
"b 296f\n"
"148:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"149:" // Height 4: Column loop
"cbz x12, 150f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1968,215 +1970,215 @@ void a64_hybrid_fp16_mla_6x32 (
"150:" // Height 4: no bias
"tbz %x[flags], #0, 168f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"cmp x11, #0x20\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
+ "add x24, x25, x20, LSL #1\n"
"bge 167f\n"
"tbz x11, #4, 158f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
"ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
"tbz x11, #3, 154f\n"
"ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
"tbz x11, #2, 152f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x11, #1, 151f\n"
"ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
"b 166f\n"
"151:" // Height 4: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x11, #0, 166f\n"
"ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
"b 166f\n"
"152:" // Height 4: Partial accumulate: partial_2_24
"tbz x11, #1, 153f\n"
"ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
"b 166f\n"
"153:" // Height 4: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x11, #0, 166f\n"
"ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
"b 166f\n"
"154:" // Height 4: Partial accumulate: partial_4_16
"tbz x11, #2, 156f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x11, #1, 155f\n"
"ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
"b 166f\n"
"155:" // Height 4: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x11, #0, 166f\n"
"ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
"b 166f\n"
"156:" // Height 4: Partial accumulate: partial_2_16
"tbz x11, #1, 157f\n"
"ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
"b 166f\n"
"157:" // Height 4: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x11, #0, 166f\n"
"ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
"b 166f\n"
"158:" // Height 4: Partial accumulate: partial_8_0
"tbz x11, #3, 162f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
"tbz x11, #2, 160f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x11, #1, 159f\n"
"ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
"b 166f\n"
"159:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x11, #0, 166f\n"
"ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
"b 166f\n"
"160:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 161f\n"
"ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
"b 166f\n"
"161:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x11, #0, 166f\n"
"ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
"b 166f\n"
"162:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 164f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x11, #1, 163f\n"
"ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
"b 166f\n"
"163:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x11, #0, 166f\n"
"ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
"b 166f\n"
"164:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 165f\n"
"ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
"tbz x11, #0, 166f\n"
"ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
"b 166f\n"
"165:" // Height 4: Partial accumulate: partial_1_0
"ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
"166:" // Height 4: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 169f\n"
@@ -2185,18 +2187,18 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"b 169f\n"
"168:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -2219,8 +2221,8 @@ void a64_hybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"170:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 171f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2440,14 +2442,14 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"add x23, x23, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"ldr q24, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.8h, v25.8h, v0.h[0]\n"
"fmla v14.8h, v25.8h, v1.h[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x8\n"
"prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.8h, v25.8h, v2.h[0]\n"
"fmla v22.8h, v25.8h, v3.h[0]\n"
@@ -2633,17 +2635,17 @@ void a64_hybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 170b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 178f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v25.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v25.8h }, [x21]\n"
"ld1r { v24.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v25.8h\n"
"fmin v9.8h, v9.8h, v25.8h\n"
@@ -2683,191 +2685,191 @@ void a64_hybrid_fp16_mla_6x32 (
"tbz x11, #4, 186f\n"
"st1 { v8.8h }, [x9], #0x10\n"
"st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
"tbz x11, #3, 182f\n"
"st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
"tbz x11, #2, 180f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
"tbz x11, #1, 179f\n"
"st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
"b 194f\n"
"179:" // Height 4: Partial direct writeback: partial_1_28
"tbz x11, #0, 194f\n"
"st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
"b 194f\n"
"180:" // Height 4: Partial direct writeback: partial_2_24
"tbz x11, #1, 181f\n"
"str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
"b 194f\n"
"181:" // Height 4: Partial direct writeback: partial_1_24
"tbz x11, #0, 194f\n"
"str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
"b 194f\n"
"182:" // Height 4: Partial direct writeback: partial_4_16
"tbz x11, #2, 184f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
"tbz x11, #1, 183f\n"
"st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
"b 194f\n"
"183:" // Height 4: Partial direct writeback: partial_1_20
"tbz x11, #0, 194f\n"
"st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
"b 194f\n"
"184:" // Height 4: Partial direct writeback: partial_2_16
"tbz x11, #1, 185f\n"
"str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
"b 194f\n"
"185:" // Height 4: Partial direct writeback: partial_1_16
"tbz x11, #0, 194f\n"
"str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
"b 194f\n"
"186:" // Height 4: Partial direct writeback: partial_8_0
"tbz x11, #3, 190f\n"
"st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
"tbz x11, #2, 188f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
"tbz x11, #1, 187f\n"
"st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
"b 194f\n"
"187:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 194f\n"
"st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
"b 194f\n"
"188:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 189f\n"
"str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
"b 194f\n"
"189:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 194f\n"
"str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
"b 194f\n"
"190:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 192f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x11, #1, 191f\n"
"st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
"b 194f\n"
"191:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 194f\n"
"st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
"b 194f\n"
"192:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 193f\n"
"str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
"tbz x11, #0, 194f\n"
"st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
"b 194f\n"
"193:" // Height 4: Partial direct writeback: partial_1_0
"str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
"194:" // Height 4: Partial direct writeback: Done
"b 196f\n"
"195:" // Height 4: Full writeback
@@ -2876,43 +2878,43 @@ void a64_hybrid_fp16_mla_6x32 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
"196:" // Height 4: Writeback done
"subs x11, x11, #0x20\n"
"bgt 149b\n"
"b 296f\n"
"197:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"198:" // Height 5: Column loop
"cbz x12, 199f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -2924,248 +2926,248 @@ void a64_hybrid_fp16_mla_6x32 (
"199:" // Height 5: no bias
"tbz %x[flags], #0, 217f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "cmp x11, #0x20\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "cmp x11, #0x20\n"
- "add x22, x23, x20, LSL #1\n"
"bge 216f\n"
"tbz x11, #4, 207f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
"ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
+ "ld1 { v25.8h }, [x23], #0x10\n"
"tbz x11, #3, 203f\n"
"ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
+ "ld1 { v26.8h }, [x23], #0x10\n"
"tbz x11, #2, 201f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x11, #1, 200f\n"
"ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
+ "ld1 { v27.h }[6], [x23]\n"
"b 215f\n"
"200:" // Height 5: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x11, #0, 215f\n"
"ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
+ "ld1 { v27.h }[4], [x23]\n"
"b 215f\n"
"201:" // Height 5: Partial accumulate: partial_2_24
"tbz x11, #1, 202f\n"
"ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
"b 215f\n"
"202:" // Height 5: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x11, #0, 215f\n"
"ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
"b 215f\n"
"203:" // Height 5: Partial accumulate: partial_4_16
"tbz x11, #2, 205f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x11, #1, 204f\n"
"ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
+ "ld1 { v26.s }[2], [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
+ "ld1 { v26.h }[6], [x23]\n"
"b 215f\n"
"204:" // Height 5: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x11, #0, 215f\n"
"ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
+ "ld1 { v26.h }[4], [x23]\n"
"b 215f\n"
"205:" // Height 5: Partial accumulate: partial_2_16
"tbz x11, #1, 206f\n"
"ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
+ "ld1 { v26.h }[2], [x23]\n"
"b 215f\n"
"206:" // Height 5: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x11, #0, 215f\n"
"ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
+ "ldr h26, [x23, #0x0]\n"
"b 215f\n"
"207:" // Height 5: Partial accumulate: partial_8_0
"tbz x11, #3, 211f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
"tbz x11, #2, 209f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x11, #1, 208f\n"
"ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
"b 215f\n"
"208:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x11, #0, 215f\n"
"ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
"b 215f\n"
"209:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 210f\n"
"ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
"b 215f\n"
"210:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x11, #0, 215f\n"
"ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
"b 215f\n"
"211:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 213f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x11, #1, 212f\n"
"ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
+ "ld1 { v24.h }[6], [x23]\n"
"b 215f\n"
"212:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x11, #0, 215f\n"
"ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
+ "ld1 { v24.h }[4], [x23]\n"
"b 215f\n"
"213:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 214f\n"
"ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
"tbz x11, #0, 215f\n"
"ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
"b 215f\n"
"214:" // Height 5: Partial accumulate: partial_1_0
"ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
"215:" // Height 5: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 218f\n"
@@ -3174,22 +3176,22 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
"b 218f\n"
"217:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -3216,8 +3218,8 @@ void a64_hybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"219:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 220f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -3479,12 +3481,12 @@ void a64_hybrid_fp16_mla_6x32 (
"add x22, x22, #0x10\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
- "sub x27, x27, #0x8\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"ldr q28, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x8\n"
"fmla v10.8h, v29.8h, v0.h[0]\n"
"fmla v14.8h, v29.8h, v1.h[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
@@ -3677,9 +3679,9 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr h1, [x23], #0x2\n"
"ldr h0, [x22], #0x2\n"
"ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
"fmla v8.8h, v29.8h, v4.h[0]\n"
"fmla v12.8h, v29.8h, v3.h[0]\n"
- "ldr q28, [x10, #0x10]\n"
"fmla v16.8h, v29.8h, v2.h[0]\n"
"fmla v20.8h, v29.8h, v1.h[0]\n"
"fmla v24.8h, v29.8h, v0.h[0]\n"
@@ -3708,19 +3710,19 @@ void a64_hybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 219b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 227f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v29.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v29.8h }, [x21]\n"
"ld1r { v28.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v29.8h\n"
"fmin v9.8h, v9.8h, v29.8h\n"
@@ -3768,223 +3770,223 @@ void a64_hybrid_fp16_mla_6x32 (
"tbz x11, #4, 235f\n"
"st1 { v8.8h }, [x9], #0x10\n"
"st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v25.8h }, [x23], #0x10\n"
"tbz x11, #3, 231f\n"
"st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
+ "st1 { v26.8h }, [x23], #0x10\n"
"tbz x11, #2, 229f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x11, #1, 228f\n"
"st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
+ "st1 { v27.s }[2], [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
+ "st1 { v27.h }[6], [x23]\n"
"b 243f\n"
"228:" // Height 5: Partial direct writeback: partial_1_28
"tbz x11, #0, 243f\n"
"st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
+ "st1 { v27.h }[4], [x23]\n"
"b 243f\n"
"229:" // Height 5: Partial direct writeback: partial_2_24
"tbz x11, #1, 230f\n"
"str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
+ "str s27, [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
+ "st1 { v27.h }[2], [x23]\n"
"b 243f\n"
"230:" // Height 5: Partial direct writeback: partial_1_24
"tbz x11, #0, 243f\n"
"str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
+ "str h27, [x23, #0x0]\n"
"b 243f\n"
"231:" // Height 5: Partial direct writeback: partial_4_16
"tbz x11, #2, 233f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x11, #1, 232f\n"
"st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
+ "st1 { v26.s }[2], [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
+ "st1 { v26.h }[6], [x23]\n"
"b 243f\n"
"232:" // Height 5: Partial direct writeback: partial_1_20
"tbz x11, #0, 243f\n"
"st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
+ "st1 { v26.h }[4], [x23]\n"
"b 243f\n"
"233:" // Height 5: Partial direct writeback: partial_2_16
"tbz x11, #1, 234f\n"
"str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
+ "str s26, [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
+ "st1 { v26.h }[2], [x23]\n"
"b 243f\n"
"234:" // Height 5: Partial direct writeback: partial_1_16
"tbz x11, #0, 243f\n"
"str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
+ "str h26, [x23, #0x0]\n"
"b 243f\n"
"235:" // Height 5: Partial direct writeback: partial_8_0
"tbz x11, #3, 239f\n"
"st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
"tbz x11, #2, 237f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x11, #1, 236f\n"
"st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v25.s }[2], [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
+ "st1 { v25.h }[6], [x23]\n"
"b 243f\n"
"236:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 243f\n"
"st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
+ "st1 { v25.h }[4], [x23]\n"
"b 243f\n"
"237:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 238f\n"
"str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
+ "str s25, [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
+ "st1 { v25.h }[2], [x23]\n"
"b 243f\n"
"238:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 243f\n"
"str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
+ "str h25, [x23, #0x0]\n"
"b 243f\n"
"239:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 241f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x11, #1, 240f\n"
"st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
+ "st1 { v24.h }[6], [x23]\n"
"b 243f\n"
"240:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 243f\n"
"st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
+ "st1 { v24.h }[4], [x23]\n"
"b 243f\n"
"241:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 242f\n"
"str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
"tbz x11, #0, 243f\n"
"st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
+ "st1 { v24.h }[2], [x23]\n"
"b 243f\n"
"242:" // Height 5: Partial direct writeback: partial_1_0
"str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
+ "str h24, [x23, #0x0]\n"
"243:" // Height 5: Partial direct writeback: Done
"b 245f\n"
"244:" // Height 5: Full writeback
@@ -3993,50 +3995,51 @@ void a64_hybrid_fp16_mla_6x32 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"245:" // Height 5: Writeback done
"subs x11, x11, #0x20\n"
"bgt 198b\n"
"b 296f\n"
"246:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0xc\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"247:" // Height 6: Column loop
"cbz x12, 248f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -4052,281 +4055,281 @@ void a64_hybrid_fp16_mla_6x32 (
"248:" // Height 6: no bias
"tbz %x[flags], #0, 266f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "cmp x11, #0x20\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "cmp x11, #0x20\n"
- "add x21, x22, x20, LSL #1\n"
"bge 265f\n"
"tbz x11, #4, 256f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
+ "ld1 { v28.8h }, [x22], #0x10\n"
"ld1 { v9.8h }, [x9], #0x10\n"
- "ld1 { v13.8h }, [x25], #0x10\n"
- "ld1 { v17.8h }, [x24], #0x10\n"
- "ld1 { v21.8h }, [x23], #0x10\n"
- "ld1 { v25.8h }, [x22], #0x10\n"
- "ld1 { v29.8h }, [x21], #0x10\n"
+ "ld1 { v13.8h }, [x26], #0x10\n"
+ "ld1 { v17.8h }, [x25], #0x10\n"
+ "ld1 { v21.8h }, [x24], #0x10\n"
+ "ld1 { v25.8h }, [x23], #0x10\n"
+ "ld1 { v29.8h }, [x22], #0x10\n"
"tbz x11, #3, 252f\n"
"ld1 { v10.8h }, [x9], #0x10\n"
- "ld1 { v14.8h }, [x25], #0x10\n"
- "ld1 { v18.8h }, [x24], #0x10\n"
- "ld1 { v22.8h }, [x23], #0x10\n"
- "ld1 { v26.8h }, [x22], #0x10\n"
- "ld1 { v30.8h }, [x21], #0x10\n"
+ "ld1 { v14.8h }, [x26], #0x10\n"
+ "ld1 { v18.8h }, [x25], #0x10\n"
+ "ld1 { v22.8h }, [x24], #0x10\n"
+ "ld1 { v26.8h }, [x23], #0x10\n"
+ "ld1 { v30.8h }, [x22], #0x10\n"
"tbz x11, #2, 250f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x11, #1, 249f\n"
"ld1 { v11.s }[2], [x9], #0x4\n"
- "ld1 { v15.s }[2], [x25], #0x4\n"
+ "ld1 { v15.s }[2], [x26], #0x4\n"
"mov x20, #0x3c\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "ld1 { v23.s }[2], [x23], #0x4\n"
- "ld1 { v27.s }[2], [x22], #0x4\n"
- "ld1 { v31.s }[2], [x21], #0x4\n"
+ "ld1 { v19.s }[2], [x25], #0x4\n"
+ "ld1 { v23.s }[2], [x24], #0x4\n"
+ "ld1 { v27.s }[2], [x23], #0x4\n"
+ "ld1 { v31.s }[2], [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v11.h }[6], [x9]\n"
- "ld1 { v15.h }[6], [x25]\n"
- "ld1 { v19.h }[6], [x24]\n"
- "ld1 { v23.h }[6], [x23]\n"
- "ld1 { v27.h }[6], [x22]\n"
- "ld1 { v31.h }[6], [x21]\n"
+ "ld1 { v15.h }[6], [x26]\n"
+ "ld1 { v19.h }[6], [x25]\n"
+ "ld1 { v23.h }[6], [x24]\n"
+ "ld1 { v27.h }[6], [x23]\n"
+ "ld1 { v31.h }[6], [x22]\n"
"b 264f\n"
"249:" // Height 6: Partial accumulate: partial_1_28
"mov x20, #0x38\n"
"tbz x11, #0, 264f\n"
"ld1 { v11.h }[4], [x9]\n"
- "ld1 { v15.h }[4], [x25]\n"
- "ld1 { v19.h }[4], [x24]\n"
- "ld1 { v23.h }[4], [x23]\n"
- "ld1 { v27.h }[4], [x22]\n"
- "ld1 { v31.h }[4], [x21]\n"
+ "ld1 { v15.h }[4], [x26]\n"
+ "ld1 { v19.h }[4], [x25]\n"
+ "ld1 { v23.h }[4], [x24]\n"
+ "ld1 { v27.h }[4], [x23]\n"
+ "ld1 { v31.h }[4], [x22]\n"
"b 264f\n"
"250:" // Height 6: Partial accumulate: partial_2_24
"tbz x11, #1, 251f\n"
"ldr s11, [x9], #0x4\n"
- "ldr s15, [x25], #0x4\n"
+ "ldr s15, [x26], #0x4\n"
"mov x20, #0x34\n"
- "ldr s19, [x24], #0x4\n"
- "ldr s23, [x23], #0x4\n"
- "ldr s27, [x22], #0x4\n"
- "ldr s31, [x21], #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s23, [x24], #0x4\n"
+ "ldr s27, [x23], #0x4\n"
+ "ldr s31, [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v11.h }[2], [x9]\n"
- "ld1 { v15.h }[2], [x25]\n"
- "ld1 { v19.h }[2], [x24]\n"
- "ld1 { v23.h }[2], [x23]\n"
- "ld1 { v27.h }[2], [x22]\n"
- "ld1 { v31.h }[2], [x21]\n"
+ "ld1 { v15.h }[2], [x26]\n"
+ "ld1 { v19.h }[2], [x25]\n"
+ "ld1 { v23.h }[2], [x24]\n"
+ "ld1 { v27.h }[2], [x23]\n"
+ "ld1 { v31.h }[2], [x22]\n"
"b 264f\n"
"251:" // Height 6: Partial accumulate: partial_1_24
"mov x20, #0x30\n"
"tbz x11, #0, 264f\n"
"ldr h11, [x9, #0x0]\n"
- "ldr h15, [x25, #0x0]\n"
- "ldr h19, [x24, #0x0]\n"
- "ldr h23, [x23, #0x0]\n"
- "ldr h27, [x22, #0x0]\n"
- "ldr h31, [x21, #0x0]\n"
+ "ldr h15, [x26, #0x0]\n"
+ "ldr h19, [x25, #0x0]\n"
+ "ldr h23, [x24, #0x0]\n"
+ "ldr h27, [x23, #0x0]\n"
+ "ldr h31, [x22, #0x0]\n"
"b 264f\n"
"252:" // Height 6: Partial accumulate: partial_4_16
"tbz x11, #2, 254f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x11, #1, 253f\n"
"ld1 { v10.s }[2], [x9], #0x4\n"
- "ld1 { v14.s }[2], [x25], #0x4\n"
+ "ld1 { v14.s }[2], [x26], #0x4\n"
"mov x20, #0x2c\n"
- "ld1 { v18.s }[2], [x24], #0x4\n"
- "ld1 { v22.s }[2], [x23], #0x4\n"
- "ld1 { v26.s }[2], [x22], #0x4\n"
- "ld1 { v30.s }[2], [x21], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v22.s }[2], [x24], #0x4\n"
+ "ld1 { v26.s }[2], [x23], #0x4\n"
+ "ld1 { v30.s }[2], [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v10.h }[6], [x9]\n"
- "ld1 { v14.h }[6], [x25]\n"
- "ld1 { v18.h }[6], [x24]\n"
- "ld1 { v22.h }[6], [x23]\n"
- "ld1 { v26.h }[6], [x22]\n"
- "ld1 { v30.h }[6], [x21]\n"
+ "ld1 { v14.h }[6], [x26]\n"
+ "ld1 { v18.h }[6], [x25]\n"
+ "ld1 { v22.h }[6], [x24]\n"
+ "ld1 { v26.h }[6], [x23]\n"
+ "ld1 { v30.h }[6], [x22]\n"
"b 264f\n"
"253:" // Height 6: Partial accumulate: partial_1_20
"mov x20, #0x28\n"
"tbz x11, #0, 264f\n"
"ld1 { v10.h }[4], [x9]\n"
- "ld1 { v14.h }[4], [x25]\n"
- "ld1 { v18.h }[4], [x24]\n"
- "ld1 { v22.h }[4], [x23]\n"
- "ld1 { v26.h }[4], [x22]\n"
- "ld1 { v30.h }[4], [x21]\n"
+ "ld1 { v14.h }[4], [x26]\n"
+ "ld1 { v18.h }[4], [x25]\n"
+ "ld1 { v22.h }[4], [x24]\n"
+ "ld1 { v26.h }[4], [x23]\n"
+ "ld1 { v30.h }[4], [x22]\n"
"b 264f\n"
"254:" // Height 6: Partial accumulate: partial_2_16
"tbz x11, #1, 255f\n"
"ldr s10, [x9], #0x4\n"
- "ldr s14, [x25], #0x4\n"
+ "ldr s14, [x26], #0x4\n"
"mov x20, #0x24\n"
- "ldr s18, [x24], #0x4\n"
- "ldr s22, [x23], #0x4\n"
- "ldr s26, [x22], #0x4\n"
- "ldr s30, [x21], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
+ "ldr s30, [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v10.h }[2], [x9]\n"
- "ld1 { v14.h }[2], [x25]\n"
- "ld1 { v18.h }[2], [x24]\n"
- "ld1 { v22.h }[2], [x23]\n"
- "ld1 { v26.h }[2], [x22]\n"
- "ld1 { v30.h }[2], [x21]\n"
+ "ld1 { v14.h }[2], [x26]\n"
+ "ld1 { v18.h }[2], [x25]\n"
+ "ld1 { v22.h }[2], [x24]\n"
+ "ld1 { v26.h }[2], [x23]\n"
+ "ld1 { v30.h }[2], [x22]\n"
"b 264f\n"
"255:" // Height 6: Partial accumulate: partial_1_16
"mov x20, #0x20\n"
"tbz x11, #0, 264f\n"
"ldr h10, [x9, #0x0]\n"
- "ldr h14, [x25, #0x0]\n"
- "ldr h18, [x24, #0x0]\n"
- "ldr h22, [x23, #0x0]\n"
- "ldr h26, [x22, #0x0]\n"
- "ldr h30, [x21, #0x0]\n"
+ "ldr h14, [x26, #0x0]\n"
+ "ldr h18, [x25, #0x0]\n"
+ "ldr h22, [x24, #0x0]\n"
+ "ldr h26, [x23, #0x0]\n"
+ "ldr h30, [x22, #0x0]\n"
"b 264f\n"
"256:" // Height 6: Partial accumulate: partial_8_0
"tbz x11, #3, 260f\n"
"ld1 { v8.8h }, [x9], #0x10\n"
- "ld1 { v12.8h }, [x25], #0x10\n"
- "ld1 { v16.8h }, [x24], #0x10\n"
- "ld1 { v20.8h }, [x23], #0x10\n"
- "ld1 { v24.8h }, [x22], #0x10\n"
- "ld1 { v28.8h }, [x21], #0x10\n"
+ "ld1 { v12.8h }, [x26], #0x10\n"
+ "ld1 { v16.8h }, [x25], #0x10\n"
+ "ld1 { v20.8h }, [x24], #0x10\n"
+ "ld1 { v24.8h }, [x23], #0x10\n"
+ "ld1 { v28.8h }, [x22], #0x10\n"
"tbz x11, #2, 258f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x11, #1, 257f\n"
"ld1 { v9.s }[2], [x9], #0x4\n"
- "ld1 { v13.s }[2], [x25], #0x4\n"
+ "ld1 { v13.s }[2], [x26], #0x4\n"
"mov x20, #0x1c\n"
- "ld1 { v17.s }[2], [x24], #0x4\n"
- "ld1 { v21.s }[2], [x23], #0x4\n"
- "ld1 { v25.s }[2], [x22], #0x4\n"
- "ld1 { v29.s }[2], [x21], #0x4\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v21.s }[2], [x24], #0x4\n"
+ "ld1 { v25.s }[2], [x23], #0x4\n"
+ "ld1 { v29.s }[2], [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v9.h }[6], [x9]\n"
- "ld1 { v13.h }[6], [x25]\n"
- "ld1 { v17.h }[6], [x24]\n"
- "ld1 { v21.h }[6], [x23]\n"
- "ld1 { v25.h }[6], [x22]\n"
- "ld1 { v29.h }[6], [x21]\n"
+ "ld1 { v13.h }[6], [x26]\n"
+ "ld1 { v17.h }[6], [x25]\n"
+ "ld1 { v21.h }[6], [x24]\n"
+ "ld1 { v25.h }[6], [x23]\n"
+ "ld1 { v29.h }[6], [x22]\n"
"b 264f\n"
"257:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x18\n"
"tbz x11, #0, 264f\n"
"ld1 { v9.h }[4], [x9]\n"
- "ld1 { v13.h }[4], [x25]\n"
- "ld1 { v17.h }[4], [x24]\n"
- "ld1 { v21.h }[4], [x23]\n"
- "ld1 { v25.h }[4], [x22]\n"
- "ld1 { v29.h }[4], [x21]\n"
+ "ld1 { v13.h }[4], [x26]\n"
+ "ld1 { v17.h }[4], [x25]\n"
+ "ld1 { v21.h }[4], [x24]\n"
+ "ld1 { v25.h }[4], [x23]\n"
+ "ld1 { v29.h }[4], [x22]\n"
"b 264f\n"
"258:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 259f\n"
"ldr s9, [x9], #0x4\n"
- "ldr s13, [x25], #0x4\n"
+ "ldr s13, [x26], #0x4\n"
"mov x20, #0x14\n"
- "ldr s17, [x24], #0x4\n"
- "ldr s21, [x23], #0x4\n"
- "ldr s25, [x22], #0x4\n"
- "ldr s29, [x21], #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s21, [x24], #0x4\n"
+ "ldr s25, [x23], #0x4\n"
+ "ldr s29, [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v9.h }[2], [x9]\n"
- "ld1 { v13.h }[2], [x25]\n"
- "ld1 { v17.h }[2], [x24]\n"
- "ld1 { v21.h }[2], [x23]\n"
- "ld1 { v25.h }[2], [x22]\n"
- "ld1 { v29.h }[2], [x21]\n"
+ "ld1 { v13.h }[2], [x26]\n"
+ "ld1 { v17.h }[2], [x25]\n"
+ "ld1 { v21.h }[2], [x24]\n"
+ "ld1 { v25.h }[2], [x23]\n"
+ "ld1 { v29.h }[2], [x22]\n"
"b 264f\n"
"259:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x10\n"
"tbz x11, #0, 264f\n"
"ldr h9, [x9, #0x0]\n"
- "ldr h13, [x25, #0x0]\n"
- "ldr h17, [x24, #0x0]\n"
- "ldr h21, [x23, #0x0]\n"
- "ldr h25, [x22, #0x0]\n"
- "ldr h29, [x21, #0x0]\n"
+ "ldr h13, [x26, #0x0]\n"
+ "ldr h17, [x25, #0x0]\n"
+ "ldr h21, [x24, #0x0]\n"
+ "ldr h25, [x23, #0x0]\n"
+ "ldr h29, [x22, #0x0]\n"
"b 264f\n"
"260:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 262f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x11, #1, 261f\n"
"ld1 { v8.s }[2], [x9], #0x4\n"
- "ld1 { v12.s }[2], [x25], #0x4\n"
+ "ld1 { v12.s }[2], [x26], #0x4\n"
"mov x20, #0xc\n"
- "ld1 { v16.s }[2], [x24], #0x4\n"
- "ld1 { v20.s }[2], [x23], #0x4\n"
- "ld1 { v24.s }[2], [x22], #0x4\n"
- "ld1 { v28.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
+ "ld1 { v24.s }[2], [x23], #0x4\n"
+ "ld1 { v28.s }[2], [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v8.h }[6], [x9]\n"
- "ld1 { v12.h }[6], [x25]\n"
- "ld1 { v16.h }[6], [x24]\n"
- "ld1 { v20.h }[6], [x23]\n"
- "ld1 { v24.h }[6], [x22]\n"
- "ld1 { v28.h }[6], [x21]\n"
+ "ld1 { v12.h }[6], [x26]\n"
+ "ld1 { v16.h }[6], [x25]\n"
+ "ld1 { v20.h }[6], [x24]\n"
+ "ld1 { v24.h }[6], [x23]\n"
+ "ld1 { v28.h }[6], [x22]\n"
"b 264f\n"
"261:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x8\n"
"tbz x11, #0, 264f\n"
"ld1 { v8.h }[4], [x9]\n"
- "ld1 { v12.h }[4], [x25]\n"
- "ld1 { v16.h }[4], [x24]\n"
- "ld1 { v20.h }[4], [x23]\n"
- "ld1 { v24.h }[4], [x22]\n"
- "ld1 { v28.h }[4], [x21]\n"
+ "ld1 { v12.h }[4], [x26]\n"
+ "ld1 { v16.h }[4], [x25]\n"
+ "ld1 { v20.h }[4], [x24]\n"
+ "ld1 { v24.h }[4], [x23]\n"
+ "ld1 { v28.h }[4], [x22]\n"
"b 264f\n"
"262:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 263f\n"
"ldr s8, [x9], #0x4\n"
- "ldr s12, [x25], #0x4\n"
+ "ldr s12, [x26], #0x4\n"
"mov x20, #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "ldr s20, [x23], #0x4\n"
- "ldr s24, [x22], #0x4\n"
- "ldr s28, [x21], #0x4\n"
+ "ldr s16, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s28, [x22], #0x4\n"
"tbz x11, #0, 264f\n"
"ld1 { v8.h }[2], [x9]\n"
- "ld1 { v12.h }[2], [x25]\n"
- "ld1 { v16.h }[2], [x24]\n"
- "ld1 { v20.h }[2], [x23]\n"
- "ld1 { v24.h }[2], [x22]\n"
- "ld1 { v28.h }[2], [x21]\n"
+ "ld1 { v12.h }[2], [x26]\n"
+ "ld1 { v16.h }[2], [x25]\n"
+ "ld1 { v20.h }[2], [x24]\n"
+ "ld1 { v24.h }[2], [x23]\n"
+ "ld1 { v28.h }[2], [x22]\n"
"b 264f\n"
"263:" // Height 6: Partial accumulate: partial_1_0
"ldr h8, [x9, #0x0]\n"
- "ldr h12, [x25, #0x0]\n"
+ "ldr h12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr h16, [x24, #0x0]\n"
- "ldr h20, [x23, #0x0]\n"
- "ldr h24, [x22, #0x0]\n"
- "ldr h28, [x21, #0x0]\n"
+ "ldr h16, [x25, #0x0]\n"
+ "ldr h20, [x24, #0x0]\n"
+ "ldr h24, [x23, #0x0]\n"
+ "ldr h28, [x22, #0x0]\n"
"264:" // Height 6: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 267f\n"
@@ -4335,26 +4338,26 @@ void a64_hybrid_fp16_mla_6x32 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"b 267f\n"
"266:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -4385,8 +4388,8 @@ void a64_hybrid_fp16_mla_6x32 (
"mov x28, #0x0\n"
"268:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 269f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -4688,18 +4691,18 @@ void a64_hybrid_fp16_mla_6x32 (
"fmla v9.8h, v7.8h, v0.h[0]\n"
"fmla v13.8h, v7.8h, v1.h[0]\n"
"add x21, x21, #0x10\n"
- "sub x27, x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v17.8h, v7.8h, v2.h[0]\n"
"fmla v21.8h, v7.8h, v3.h[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v25.8h, v7.8h, v4.h[0]\n"
"fmla v29.8h, v7.8h, v5.h[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v10.8h, v6.8h, v0.h[0]\n"
"fmla v14.8h, v6.8h, v1.h[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x27, x27, #0x8\n"
"prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.8h, v6.8h, v2.h[0]\n"
"fmla v22.8h, v6.8h, v3.h[0]\n"
@@ -4955,21 +4958,21 @@ void a64_hybrid_fp16_mla_6x32 (
"cmp x28, x20\n"
"bne 268b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #1\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #1\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #1\n"
+ "add x23, x24, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 276f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.8h }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.8h }, [x21]\n"
"ld1r { v0.8h }, [x20]\n"
"fmin v8.8h, v8.8h, v1.8h\n"
"fmin v9.8h, v9.8h, v1.8h\n"
@@ -5025,255 +5028,255 @@ void a64_hybrid_fp16_mla_6x32 (
"tbz x11, #4, 284f\n"
"st1 { v8.8h }, [x9], #0x10\n"
"st1 { v9.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v13.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v17.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v21.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v25.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
- "st1 { v29.8h }, [x21], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v13.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v17.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v21.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v25.8h }, [x23], #0x10\n"
+ "st1 { v28.8h }, [x22], #0x10\n"
+ "st1 { v29.8h }, [x22], #0x10\n"
"tbz x11, #3, 280f\n"
"st1 { v10.8h }, [x9], #0x10\n"
- "st1 { v14.8h }, [x25], #0x10\n"
- "st1 { v18.8h }, [x24], #0x10\n"
- "st1 { v22.8h }, [x23], #0x10\n"
- "st1 { v26.8h }, [x22], #0x10\n"
- "st1 { v30.8h }, [x21], #0x10\n"
+ "st1 { v14.8h }, [x26], #0x10\n"
+ "st1 { v18.8h }, [x25], #0x10\n"
+ "st1 { v22.8h }, [x24], #0x10\n"
+ "st1 { v26.8h }, [x23], #0x10\n"
+ "st1 { v30.8h }, [x22], #0x10\n"
"tbz x11, #2, 278f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x11, #1, 277f\n"
"st1 { v11.s }[2], [x9], #0x4\n"
- "st1 { v15.s }[2], [x25], #0x4\n"
- "st1 { v19.s }[2], [x24], #0x4\n"
- "st1 { v23.s }[2], [x23], #0x4\n"
- "st1 { v27.s }[2], [x22], #0x4\n"
- "st1 { v31.s }[2], [x21], #0x4\n"
+ "st1 { v15.s }[2], [x26], #0x4\n"
+ "st1 { v19.s }[2], [x25], #0x4\n"
+ "st1 { v23.s }[2], [x24], #0x4\n"
+ "st1 { v27.s }[2], [x23], #0x4\n"
+ "st1 { v31.s }[2], [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v11.h }[6], [x9]\n"
- "st1 { v15.h }[6], [x25]\n"
- "st1 { v19.h }[6], [x24]\n"
- "st1 { v23.h }[6], [x23]\n"
- "st1 { v27.h }[6], [x22]\n"
- "st1 { v31.h }[6], [x21]\n"
+ "st1 { v15.h }[6], [x26]\n"
+ "st1 { v19.h }[6], [x25]\n"
+ "st1 { v23.h }[6], [x24]\n"
+ "st1 { v27.h }[6], [x23]\n"
+ "st1 { v31.h }[6], [x22]\n"
"b 292f\n"
"277:" // Height 6: Partial direct writeback: partial_1_28
"tbz x11, #0, 292f\n"
"st1 { v11.h }[4], [x9]\n"
- "st1 { v15.h }[4], [x25]\n"
- "st1 { v19.h }[4], [x24]\n"
- "st1 { v23.h }[4], [x23]\n"
- "st1 { v27.h }[4], [x22]\n"
- "st1 { v31.h }[4], [x21]\n"
+ "st1 { v15.h }[4], [x26]\n"
+ "st1 { v19.h }[4], [x25]\n"
+ "st1 { v23.h }[4], [x24]\n"
+ "st1 { v27.h }[4], [x23]\n"
+ "st1 { v31.h }[4], [x22]\n"
"b 292f\n"
"278:" // Height 6: Partial direct writeback: partial_2_24
"tbz x11, #1, 279f\n"
"str s11, [x9], #0x4\n"
- "str s15, [x25], #0x4\n"
- "str s19, [x24], #0x4\n"
- "str s23, [x23], #0x4\n"
- "str s27, [x22], #0x4\n"
- "str s31, [x21], #0x4\n"
+ "str s15, [x26], #0x4\n"
+ "str s19, [x25], #0x4\n"
+ "str s23, [x24], #0x4\n"
+ "str s27, [x23], #0x4\n"
+ "str s31, [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v11.h }[2], [x9]\n"
- "st1 { v15.h }[2], [x25]\n"
- "st1 { v19.h }[2], [x24]\n"
- "st1 { v23.h }[2], [x23]\n"
- "st1 { v27.h }[2], [x22]\n"
- "st1 { v31.h }[2], [x21]\n"
+ "st1 { v15.h }[2], [x26]\n"
+ "st1 { v19.h }[2], [x25]\n"
+ "st1 { v23.h }[2], [x24]\n"
+ "st1 { v27.h }[2], [x23]\n"
+ "st1 { v31.h }[2], [x22]\n"
"b 292f\n"
"279:" // Height 6: Partial direct writeback: partial_1_24
"tbz x11, #0, 292f\n"
"str h11, [x9, #0x0]\n"
- "str h15, [x25, #0x0]\n"
- "str h19, [x24, #0x0]\n"
- "str h23, [x23, #0x0]\n"
- "str h27, [x22, #0x0]\n"
- "str h31, [x21, #0x0]\n"
+ "str h15, [x26, #0x0]\n"
+ "str h19, [x25, #0x0]\n"
+ "str h23, [x24, #0x0]\n"
+ "str h27, [x23, #0x0]\n"
+ "str h31, [x22, #0x0]\n"
"b 292f\n"
"280:" // Height 6: Partial direct writeback: partial_4_16
"tbz x11, #2, 282f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x11, #1, 281f\n"
"st1 { v10.s }[2], [x9], #0x4\n"
- "st1 { v14.s }[2], [x25], #0x4\n"
- "st1 { v18.s }[2], [x24], #0x4\n"
- "st1 { v22.s }[2], [x23], #0x4\n"
- "st1 { v26.s }[2], [x22], #0x4\n"
- "st1 { v30.s }[2], [x21], #0x4\n"
+ "st1 { v14.s }[2], [x26], #0x4\n"
+ "st1 { v18.s }[2], [x25], #0x4\n"
+ "st1 { v22.s }[2], [x24], #0x4\n"
+ "st1 { v26.s }[2], [x23], #0x4\n"
+ "st1 { v30.s }[2], [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v10.h }[6], [x9]\n"
- "st1 { v14.h }[6], [x25]\n"
- "st1 { v18.h }[6], [x24]\n"
- "st1 { v22.h }[6], [x23]\n"
- "st1 { v26.h }[6], [x22]\n"
- "st1 { v30.h }[6], [x21]\n"
+ "st1 { v14.h }[6], [x26]\n"
+ "st1 { v18.h }[6], [x25]\n"
+ "st1 { v22.h }[6], [x24]\n"
+ "st1 { v26.h }[6], [x23]\n"
+ "st1 { v30.h }[6], [x22]\n"
"b 292f\n"
"281:" // Height 6: Partial direct writeback: partial_1_20
"tbz x11, #0, 292f\n"
"st1 { v10.h }[4], [x9]\n"
- "st1 { v14.h }[4], [x25]\n"
- "st1 { v18.h }[4], [x24]\n"
- "st1 { v22.h }[4], [x23]\n"
- "st1 { v26.h }[4], [x22]\n"
- "st1 { v30.h }[4], [x21]\n"
+ "st1 { v14.h }[4], [x26]\n"
+ "st1 { v18.h }[4], [x25]\n"
+ "st1 { v22.h }[4], [x24]\n"
+ "st1 { v26.h }[4], [x23]\n"
+ "st1 { v30.h }[4], [x22]\n"
"b 292f\n"
"282:" // Height 6: Partial direct writeback: partial_2_16
"tbz x11, #1, 283f\n"
"str s10, [x9], #0x4\n"
- "str s14, [x25], #0x4\n"
- "str s18, [x24], #0x4\n"
- "str s22, [x23], #0x4\n"
- "str s26, [x22], #0x4\n"
- "str s30, [x21], #0x4\n"
+ "str s14, [x26], #0x4\n"
+ "str s18, [x25], #0x4\n"
+ "str s22, [x24], #0x4\n"
+ "str s26, [x23], #0x4\n"
+ "str s30, [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v10.h }[2], [x9]\n"
- "st1 { v14.h }[2], [x25]\n"
- "st1 { v18.h }[2], [x24]\n"
- "st1 { v22.h }[2], [x23]\n"
- "st1 { v26.h }[2], [x22]\n"
- "st1 { v30.h }[2], [x21]\n"
+ "st1 { v14.h }[2], [x26]\n"
+ "st1 { v18.h }[2], [x25]\n"
+ "st1 { v22.h }[2], [x24]\n"
+ "st1 { v26.h }[2], [x23]\n"
+ "st1 { v30.h }[2], [x22]\n"
"b 292f\n"
"283:" // Height 6: Partial direct writeback: partial_1_16
"tbz x11, #0, 292f\n"
"str h10, [x9, #0x0]\n"
- "str h14, [x25, #0x0]\n"
- "str h18, [x24, #0x0]\n"
- "str h22, [x23, #0x0]\n"
- "str h26, [x22, #0x0]\n"
- "str h30, [x21, #0x0]\n"
+ "str h14, [x26, #0x0]\n"
+ "str h18, [x25, #0x0]\n"
+ "str h22, [x24, #0x0]\n"
+ "str h26, [x23, #0x0]\n"
+ "str h30, [x22, #0x0]\n"
"b 292f\n"
"284:" // Height 6: Partial direct writeback: partial_8_0
"tbz x11, #3, 288f\n"
"st1 { v8.8h }, [x9], #0x10\n"
- "st1 { v12.8h }, [x25], #0x10\n"
- "st1 { v16.8h }, [x24], #0x10\n"
- "st1 { v20.8h }, [x23], #0x10\n"
- "st1 { v24.8h }, [x22], #0x10\n"
- "st1 { v28.8h }, [x21], #0x10\n"
+ "st1 { v12.8h }, [x26], #0x10\n"
+ "st1 { v16.8h }, [x25], #0x10\n"
+ "st1 { v20.8h }, [x24], #0x10\n"
+ "st1 { v24.8h }, [x23], #0x10\n"
+ "st1 { v28.8h }, [x22], #0x10\n"
"tbz x11, #2, 286f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x11, #1, 285f\n"
"st1 { v9.s }[2], [x9], #0x4\n"
- "st1 { v13.s }[2], [x25], #0x4\n"
- "st1 { v17.s }[2], [x24], #0x4\n"
- "st1 { v21.s }[2], [x23], #0x4\n"
- "st1 { v25.s }[2], [x22], #0x4\n"
- "st1 { v29.s }[2], [x21], #0x4\n"
+ "st1 { v13.s }[2], [x26], #0x4\n"
+ "st1 { v17.s }[2], [x25], #0x4\n"
+ "st1 { v21.s }[2], [x24], #0x4\n"
+ "st1 { v25.s }[2], [x23], #0x4\n"
+ "st1 { v29.s }[2], [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v9.h }[6], [x9]\n"
- "st1 { v13.h }[6], [x25]\n"
- "st1 { v17.h }[6], [x24]\n"
- "st1 { v21.h }[6], [x23]\n"
- "st1 { v25.h }[6], [x22]\n"
- "st1 { v29.h }[6], [x21]\n"
+ "st1 { v13.h }[6], [x26]\n"
+ "st1 { v17.h }[6], [x25]\n"
+ "st1 { v21.h }[6], [x24]\n"
+ "st1 { v25.h }[6], [x23]\n"
+ "st1 { v29.h }[6], [x22]\n"
"b 292f\n"
"285:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 292f\n"
"st1 { v9.h }[4], [x9]\n"
- "st1 { v13.h }[4], [x25]\n"
- "st1 { v17.h }[4], [x24]\n"
- "st1 { v21.h }[4], [x23]\n"
- "st1 { v25.h }[4], [x22]\n"
- "st1 { v29.h }[4], [x21]\n"
+ "st1 { v13.h }[4], [x26]\n"
+ "st1 { v17.h }[4], [x25]\n"
+ "st1 { v21.h }[4], [x24]\n"
+ "st1 { v25.h }[4], [x23]\n"
+ "st1 { v29.h }[4], [x22]\n"
"b 292f\n"
"286:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 287f\n"
"str s9, [x9], #0x4\n"
- "str s13, [x25], #0x4\n"
- "str s17, [x24], #0x4\n"
- "str s21, [x23], #0x4\n"
- "str s25, [x22], #0x4\n"
- "str s29, [x21], #0x4\n"
+ "str s13, [x26], #0x4\n"
+ "str s17, [x25], #0x4\n"
+ "str s21, [x24], #0x4\n"
+ "str s25, [x23], #0x4\n"
+ "str s29, [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v9.h }[2], [x9]\n"
- "st1 { v13.h }[2], [x25]\n"
- "st1 { v17.h }[2], [x24]\n"
- "st1 { v21.h }[2], [x23]\n"
- "st1 { v25.h }[2], [x22]\n"
- "st1 { v29.h }[2], [x21]\n"
+ "st1 { v13.h }[2], [x26]\n"
+ "st1 { v17.h }[2], [x25]\n"
+ "st1 { v21.h }[2], [x24]\n"
+ "st1 { v25.h }[2], [x23]\n"
+ "st1 { v29.h }[2], [x22]\n"
"b 292f\n"
"287:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 292f\n"
"str h9, [x9, #0x0]\n"
- "str h13, [x25, #0x0]\n"
- "str h17, [x24, #0x0]\n"
- "str h21, [x23, #0x0]\n"
- "str h25, [x22, #0x0]\n"
- "str h29, [x21, #0x0]\n"
+ "str h13, [x26, #0x0]\n"
+ "str h17, [x25, #0x0]\n"
+ "str h21, [x24, #0x0]\n"
+ "str h25, [x23, #0x0]\n"
+ "str h29, [x22, #0x0]\n"
"b 292f\n"
"288:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 290f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x11, #1, 289f\n"
"st1 { v8.s }[2], [x9], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v8.h }[6], [x9]\n"
- "st1 { v12.h }[6], [x25]\n"
- "st1 { v16.h }[6], [x24]\n"
- "st1 { v20.h }[6], [x23]\n"
- "st1 { v24.h }[6], [x22]\n"
- "st1 { v28.h }[6], [x21]\n"
+ "st1 { v12.h }[6], [x26]\n"
+ "st1 { v16.h }[6], [x25]\n"
+ "st1 { v20.h }[6], [x24]\n"
+ "st1 { v24.h }[6], [x23]\n"
+ "st1 { v28.h }[6], [x22]\n"
"b 292f\n"
"289:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 292f\n"
"st1 { v8.h }[4], [x9]\n"
- "st1 { v12.h }[4], [x25]\n"
- "st1 { v16.h }[4], [x24]\n"
- "st1 { v20.h }[4], [x23]\n"
- "st1 { v24.h }[4], [x22]\n"
- "st1 { v28.h }[4], [x21]\n"
+ "st1 { v12.h }[4], [x26]\n"
+ "st1 { v16.h }[4], [x25]\n"
+ "st1 { v20.h }[4], [x24]\n"
+ "st1 { v24.h }[4], [x23]\n"
+ "st1 { v28.h }[4], [x22]\n"
"b 292f\n"
"290:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 291f\n"
"str s8, [x9], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
"tbz x11, #0, 292f\n"
"st1 { v8.h }[2], [x9]\n"
- "st1 { v12.h }[2], [x25]\n"
- "st1 { v16.h }[2], [x24]\n"
- "st1 { v20.h }[2], [x23]\n"
- "st1 { v24.h }[2], [x22]\n"
- "st1 { v28.h }[2], [x21]\n"
+ "st1 { v12.h }[2], [x26]\n"
+ "st1 { v16.h }[2], [x25]\n"
+ "st1 { v20.h }[2], [x24]\n"
+ "st1 { v24.h }[2], [x23]\n"
+ "st1 { v28.h }[2], [x22]\n"
"b 292f\n"
"291:" // Height 6: Partial direct writeback: partial_1_0
"str h8, [x9, #0x0]\n"
- "str h12, [x25, #0x0]\n"
- "str h16, [x24, #0x0]\n"
- "str h20, [x23, #0x0]\n"
- "str h24, [x22, #0x0]\n"
- "str h28, [x21, #0x0]\n"
+ "str h12, [x26, #0x0]\n"
+ "str h16, [x25, #0x0]\n"
+ "str h20, [x24, #0x0]\n"
+ "str h24, [x23, #0x0]\n"
+ "str h28, [x22, #0x0]\n"
"292:" // Height 6: Partial direct writeback: Done
"b 294f\n"
"293:" // Height 6: Full writeback
@@ -5282,26 +5285,26 @@ void a64_hybrid_fp16_mla_6x32 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q28, [x22, #0x0]\n"
+ "str q29, [x22, #0x10]\n"
+ "str q30, [x22, #0x20]\n"
+ "str q31, [x22, #0x30]\n"
"294:" // Height 6: Writeback done
"subs x11, x11, #0x20\n"
"bgt 247b\n"
@@ -5317,8 +5320,8 @@ void a64_hybrid_fp16_mla_6x32 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"296:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24.hpp
index bce4de74f7..7a025a5deb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24.hpp
@@ -71,7 +71,8 @@ public:
return true;
}
- StdTransformsFixedTRB<rhs_operand_type, result_type, 4, 24, 1> transforms = {};
+ StdTransformsFixedTRB<lhs_operand_type, rhs_operand_type, result_type, 4, 24, 1> transforms = {};
+
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp
index 9ceda8fd0c..23587e6317 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp32_mla_4x24_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp32_mla_4x24_a55 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -98,10 +100,10 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"cmp %x[M], #0x2\n"
"bgt 83f\n"
"beq 42f\n"
- "mov x17, %x[bias]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x17, 3f\n"
"ldr q8, [x17, #0x0]\n"
@@ -221,8 +223,8 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"mov x13, #0x0\n"
"19:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -254,47 +256,50 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v11.4s, v7.4s, v0.s[0]\n"
"ldr d16, [x15, #0x70]\n"
"mov v19.d[1], x20\n"
- "ldr x20, [x15, #0x58]\n"
- "mov v18.d[1], x20\n"
+ "ldr x22, [x15, #0x58]\n"
+ "add x11, x11, #0x10\n"
"ldr x20, [x15, #0x68]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x78]\n"
- "mov v16.d[1], x20\n"
"fmla v12.4s, v19.4s, v0.s[0]\n"
"ldr d19, [x15, #0x80]\n"
+ "sub x12, x12, #0x4\n"
+ "ldr x21, [x15, #0x78]\n"
+ "mov v18.d[1], x22\n"
+ "mov v17.d[1], x20\n"
"ldr x20, [x15, #0x88]\n"
"fmla v13.4s, v18.4s, v0.s[0]\n"
"ldr d18, [x15, #0x90]\n"
+ "mov v16.d[1], x21\n"
+ "ldr x22, [x15, #0x98]\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"ldr d17, [x15, #0xa0]\n"
+ "ldr x21, [x15, #0xa8]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
"ldr d16, [x15, #0xb0]\n"
"mov v19.d[1], x20\n"
- "ldr x20, [x15, #0x98]\n"
- "mov v18.d[1], x20\n"
- "ldr x20, [x15, #0xa8]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x15, #0xb8]\n"
- "mov v16.d[1], x20\n"
+ "mov v18.d[1], x22\n"
+ "mov v17.d[1], x21\n"
"fmla v10.4s, v19.4s, v0.s[1]\n"
"ldr d19, [x15, #0xc0]\n"
- "ldr x20, [x15, #0xc8]\n"
"fmla v11.4s, v18.4s, v0.s[1]\n"
"ldr d18, [x15, #0xd0]\n"
+ "mov v16.d[1], x20\n"
+ "ldr x20, [x15, #0xc8]\n"
"fmla v12.4s, v17.4s, v0.s[1]\n"
"ldr d17, [x15, #0xe0]\n"
"fmla v13.4s, v16.4s, v0.s[1]\n"
"ldr d16, [x15, #0xf0]\n"
+ "cmp x12, #0x8\n"
+ "ldr x22, [x15, #0xd8]\n"
"mov v19.d[1], x20\n"
- "ldr x20, [x15, #0xd8]\n"
- "mov v18.d[1], x20\n"
"ldr x20, [x15, #0xe8]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0xf8]\n"
- "mov v16.d[1], x20\n"
+ "ldr x21, [x15, #0xf8]\n"
"fmla v8.4s, v19.4s, v0.s[2]\n"
"ldr d19, [x15, #0x100]\n"
+ "mov v18.d[1], x22\n"
+ "mov v17.d[1], x20\n"
"ldr x20, [x15, #0x108]\n"
+ "mov v16.d[1], x21\n"
"fmla v9.4s, v18.4s, v0.s[2]\n"
"ldr d18, [x15, #0x110]\n"
"fmla v10.4s, v17.4s, v0.s[2]\n"
@@ -302,52 +307,49 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v11.4s, v16.4s, v0.s[2]\n"
"ldr d16, [x15, #0x130]\n"
"mov v19.d[1], x20\n"
- "ldr x20, [x15, #0x118]\n"
- "mov v18.d[1], x20\n"
+ "ldr x22, [x15, #0x118]\n"
"ldr x20, [x15, #0x128]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x138]\n"
- "mov v16.d[1], x20\n"
"fmla v12.4s, v19.4s, v0.s[2]\n"
"ldr d19, [x15, #0x140]\n"
+ "ldr x21, [x15, #0x138]\n"
+ "mov v18.d[1], x22\n"
+ "mov v17.d[1], x20\n"
"ldr x20, [x15, #0x148]\n"
"fmla v13.4s, v18.4s, v0.s[2]\n"
"ldr d18, [x15, #0x150]\n"
+ "mov v16.d[1], x21\n"
+ "ldr x22, [x15, #0x158]\n"
"fmla v8.4s, v17.4s, v0.s[3]\n"
"ldr d17, [x15, #0x160]\n"
+ "ldr x21, [x15, #0x168]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr d16, [x15, #0x170]\n"
"mov v19.d[1], x20\n"
- "ldr x20, [x15, #0x158]\n"
- "mov v18.d[1], x20\n"
- "ldr x20, [x15, #0x168]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x15, #0x178]\n"
- "mov v16.d[1], x20\n"
- "add x11, x11, #0x10\n"
+ "mov v18.d[1], x22\n"
+ "mov v17.d[1], x21\n"
"add x15, x15, #0x180\n"
"fmla v10.4s, v19.4s, v0.s[3]\n"
"ldr d4, [x15, #0x0]\n"
- "ldr x20, [x15, #0x8]\n"
+ "mov v16.d[1], x20\n"
+ "ldr x21, [x15, #0x8]\n"
"fmla v11.4s, v18.4s, v0.s[3]\n"
"ldr d5, [x15, #0x10]\n"
+ "ldr x20, [x15, #0x18]\n"
"fmla v12.4s, v17.4s, v0.s[3]\n"
"ldr d6, [x15, #0x20]\n"
"fmla v13.4s, v16.4s, v0.s[3]\n"
"ldr d0, [x11, #0x0]\n"
- "sub x12, x12, #0x4\n"
+ "mov v4.d[1], x21\n"
"ldr d7, [x15, #0x30]\n"
- "cmp x12, #0x8\n"
- "ldr x21, [x15, #0x18]\n"
- "mov v4.d[1], x20\n"
- "ldr x20, [x15, #0x28]\n"
- "mov v5.d[1], x21\n"
+ "mov v5.d[1], x20\n"
+ "ldr x22, [x15, #0x28]\n"
"ldr x21, [x11, #0x8]\n"
- "mov v6.d[1], x20\n"
"ldr x20, [x15, #0x38]\n"
+ "mov v6.d[1], x22\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"mov v0.d[1], x21\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x11, #0x80]\n"
"bge 22b\n"
"23:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
@@ -401,21 +403,21 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"24:" // Height 1: Multiply loop: Main loop skip
"cbz x12, 26f\n"
"25:" // Height 1: Multiply loop: Odd block loop
- "ldr s17, [x11], #0x4\n"
+ "ldr s20, [x11], #0x4\n"
"sub x12, x12, #0x1\n"
- "ldr q16, [x15, #0x0]\n"
- "fmla v8.4s, v16.4s, v17.s[0]\n"
+ "ldr q17, [x15, #0x0]\n"
"ldr q16, [x15, #0x10]\n"
- "fmla v9.4s, v16.4s, v17.s[0]\n"
- "ldr q16, [x15, #0x20]\n"
- "fmla v10.4s, v16.4s, v17.s[0]\n"
- "ldr q16, [x15, #0x30]\n"
- "fmla v11.4s, v16.4s, v17.s[0]\n"
- "ldr q16, [x15, #0x40]\n"
- "fmla v12.4s, v16.4s, v17.s[0]\n"
+ "ldr q19, [x15, #0x20]\n"
+ "ldr q18, [x15, #0x30]\n"
+ "fmla v8.4s, v17.4s, v20.s[0]\n"
+ "ldr q17, [x15, #0x40]\n"
+ "fmla v9.4s, v16.4s, v20.s[0]\n"
"ldr q16, [x15, #0x50]\n"
- "fmla v13.4s, v16.4s, v17.s[0]\n"
+ "fmla v10.4s, v19.4s, v20.s[0]\n"
+ "fmla v11.4s, v18.4s, v20.s[0]\n"
"add x15, x15, #0x60\n"
+ "fmla v12.4s, v17.4s, v20.s[0]\n"
+ "fmla v13.4s, v16.4s, v20.s[0]\n"
"cbnz x12, 25b\n"
"26:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -424,16 +426,16 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"bne 19b\n"
"prfm pstl1keep, [x14, #0x0]\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v16.4s\n"
- "fmin v9.4s, v9.4s, v16.4s\n"
- "fmin v10.4s, v10.4s, v16.4s\n"
- "fmin v11.4s, v11.4s, v16.4s\n"
- "fmin v12.4s, v12.4s, v16.4s\n"
- "fmin v13.4s, v13.4s, v16.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v17.4s\n"
+ "fmin v9.4s, v9.4s, v17.4s\n"
+ "fmin v10.4s, v10.4s, v17.4s\n"
+ "fmin v11.4s, v11.4s, v17.4s\n"
+ "fmin v12.4s, v12.4s, v17.4s\n"
+ "fmin v13.4s, v13.4s, v17.4s\n"
"fmax v8.4s, v8.4s, v16.4s\n"
"fmax v9.4s, v9.4s, v16.4s\n"
"fmax v10.4s, v10.4s, v16.4s\n"
@@ -529,142 +531,142 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"bgt 2b\n"
"b 166f\n"
"42:" // Height 2
- "mov x17, %x[bias]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"43:" // Height 2: Column loop
"cbz x17, 44f\n"
"ldr q8, [x17, #0x0]\n"
- "mov v14.16b, v8.16b\n"
"ldr q9, [x17, #0x10]\n"
- "mov v15.16b, v9.16b\n"
"ldr q10, [x17, #0x20]\n"
- "mov v16.16b, v10.16b\n"
"ldr q11, [x17, #0x30]\n"
- "mov v17.16b, v11.16b\n"
+ "mov v14.16b, v8.16b\n"
"ldr q12, [x17, #0x40]\n"
- "mov v18.16b, v12.16b\n"
+ "mov v15.16b, v9.16b\n"
"ldr q13, [x17, #0x50]\n"
- "mov v19.16b, v13.16b\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v17.16b, v11.16b\n"
"add x17, x17, #0x60\n"
+ "mov v18.16b, v12.16b\n"
+ "mov v19.16b, v13.16b\n"
"b 59f\n"
"44:" // Height 2: no bias
"tbz %x[flags], #0, 58f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x16, #0x18\n"
- "add x23, x14, x20, LSL #2\n"
+ "add x24, x14, x20, LSL #2\n"
"bge 57f\n"
"tbz x16, #4, 48f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x14], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x14], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"ld1 { v11.4s }, [x14], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
"tbz x16, #2, 46f\n"
"ld1 { v12.4s }, [x14], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
"tbz x16, #1, 45f\n"
"ldr d13, [x14], #0x8\n"
"mov x20, #0x58\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"tbz x16, #0, 56f\n"
"ld1 { v13.s }[2], [x14]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x24]\n"
"b 56f\n"
"45:" // Height 2: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x16, #0, 56f\n"
"ldr s13, [x14, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
"b 56f\n"
"46:" // Height 2: Partial accumulate: partial_2_16
"tbz x16, #1, 47f\n"
"ldr d12, [x14], #0x8\n"
"mov x20, #0x48\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"tbz x16, #0, 56f\n"
"ld1 { v12.s }[2], [x14]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x24]\n"
"b 56f\n"
"47:" // Height 2: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x16, #0, 56f\n"
"ldr s12, [x14, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
"b 56f\n"
"48:" // Height 2: Partial accumulate: partial_8_0
"tbz x16, #3, 52f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x14], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"tbz x16, #2, 50f\n"
"ld1 { v10.4s }, [x14], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"tbz x16, #1, 49f\n"
"ldr d11, [x14], #0x8\n"
"mov x20, #0x38\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"tbz x16, #0, 56f\n"
"ld1 { v11.s }[2], [x14]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x24]\n"
"b 56f\n"
"49:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x16, #0, 56f\n"
"ldr s11, [x14, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
"b 56f\n"
"50:" // Height 2: Partial accumulate: partial_2_8
"tbz x16, #1, 51f\n"
"ldr d10, [x14], #0x8\n"
"mov x20, #0x28\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"tbz x16, #0, 56f\n"
"ld1 { v10.s }[2], [x14]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x24]\n"
"b 56f\n"
"51:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x16, #0, 56f\n"
"ldr s10, [x14, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
"b 56f\n"
"52:" // Height 2: Partial accumulate: partial_4_0
"tbz x16, #2, 54f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x16, #1, 53f\n"
"ldr d9, [x14], #0x8\n"
"mov x20, #0x18\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"tbz x16, #0, 56f\n"
"ld1 { v9.s }[2], [x14]\n"
- "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 56f\n"
"53:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x16, #0, 56f\n"
"ldr s9, [x14, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 56f\n"
"54:" // Height 2: Partial accumulate: partial_2_0
"tbz x16, #1, 55f\n"
"ldr d8, [x14], #0x8\n"
"mov x20, #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"tbz x16, #0, 56f\n"
"ld1 { v8.s }[2], [x14]\n"
- "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 56f\n"
"55:" // Height 2: Partial accumulate: partial_1_0
"ldr s8, [x14, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"56:" // Height 2: Partial accumulate: Done
"sub x14, x14, x20\n"
"b 59f\n"
@@ -675,12 +677,12 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"ldr q11, [x14, #0x30]\n"
"ldr q12, [x14, #0x40]\n"
"ldr q13, [x14, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
"b 59f\n"
"58:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -699,8 +701,8 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"mov x13, #0x0\n"
"60:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -743,15 +745,15 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v17.4s, v7.4s, v1.s[0]\n"
"ldr d20, [x15, #0x70]\n"
"mov v23.d[1], x23\n"
- "fmla v12.4s, v23.4s, v0.s[0]\n"
+ "ldr x23, [x15, #0x88]\n"
"mov v22.d[1], x22\n"
- "fmla v18.4s, v23.4s, v1.s[0]\n"
- "ldr d23, [x15, #0x80]\n"
+ "ldr x22, [x15, #0x98]\n"
"mov v21.d[1], x21\n"
+ "fmla v12.4s, v23.4s, v0.s[0]\n"
"mov v20.d[1], x20\n"
- "ldr x23, [x15, #0x88]\n"
+ "fmla v18.4s, v23.4s, v1.s[0]\n"
+ "ldr d23, [x15, #0x80]\n"
"fmla v13.4s, v22.4s, v0.s[0]\n"
- "ldr x22, [x15, #0x98]\n"
"fmla v19.4s, v22.4s, v1.s[0]\n"
"ldr d22, [x15, #0x90]\n"
"fmla v8.4s, v21.4s, v0.s[1]\n"
@@ -763,15 +765,15 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v15.4s, v20.4s, v1.s[1]\n"
"ldr d20, [x15, #0xb0]\n"
"mov v23.d[1], x23\n"
- "fmla v10.4s, v23.4s, v0.s[1]\n"
+ "ldr x23, [x15, #0xc8]\n"
"mov v22.d[1], x22\n"
- "fmla v16.4s, v23.4s, v1.s[1]\n"
- "ldr d23, [x15, #0xc0]\n"
+ "ldr x22, [x15, #0xd8]\n"
"mov v21.d[1], x21\n"
+ "fmla v10.4s, v23.4s, v0.s[1]\n"
"mov v20.d[1], x20\n"
- "ldr x23, [x15, #0xc8]\n"
+ "fmla v16.4s, v23.4s, v1.s[1]\n"
+ "ldr d23, [x15, #0xc0]\n"
"fmla v11.4s, v22.4s, v0.s[1]\n"
- "ldr x22, [x15, #0xd8]\n"
"fmla v17.4s, v22.4s, v1.s[1]\n"
"ldr d22, [x15, #0xd0]\n"
"fmla v12.4s, v21.4s, v0.s[1]\n"
@@ -783,15 +785,15 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v19.4s, v20.4s, v1.s[1]\n"
"ldr d20, [x15, #0xf0]\n"
"mov v23.d[1], x23\n"
- "fmla v8.4s, v23.4s, v0.s[2]\n"
+ "ldr x23, [x15, #0x108]\n"
"mov v22.d[1], x22\n"
- "fmla v14.4s, v23.4s, v1.s[2]\n"
- "ldr d23, [x15, #0x100]\n"
+ "ldr x22, [x15, #0x118]\n"
"mov v21.d[1], x21\n"
+ "fmla v8.4s, v23.4s, v0.s[2]\n"
"mov v20.d[1], x20\n"
- "ldr x23, [x15, #0x108]\n"
+ "fmla v14.4s, v23.4s, v1.s[2]\n"
+ "ldr d23, [x15, #0x100]\n"
"fmla v9.4s, v22.4s, v0.s[2]\n"
- "ldr x22, [x15, #0x118]\n"
"fmla v15.4s, v22.4s, v1.s[2]\n"
"ldr d22, [x15, #0x110]\n"
"fmla v10.4s, v21.4s, v0.s[2]\n"
@@ -803,15 +805,15 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v17.4s, v20.4s, v1.s[2]\n"
"ldr d20, [x15, #0x130]\n"
"mov v23.d[1], x23\n"
- "fmla v12.4s, v23.4s, v0.s[2]\n"
+ "ldr x23, [x15, #0x148]\n"
"mov v22.d[1], x22\n"
- "fmla v18.4s, v23.4s, v1.s[2]\n"
- "ldr d23, [x15, #0x140]\n"
+ "ldr x22, [x15, #0x158]\n"
"mov v21.d[1], x21\n"
+ "fmla v12.4s, v23.4s, v0.s[2]\n"
"mov v20.d[1], x20\n"
- "ldr x23, [x15, #0x148]\n"
+ "fmla v18.4s, v23.4s, v1.s[2]\n"
+ "ldr d23, [x15, #0x140]\n"
"fmla v13.4s, v22.4s, v0.s[2]\n"
- "ldr x22, [x15, #0x158]\n"
"fmla v19.4s, v22.4s, v1.s[2]\n"
"ldr d22, [x15, #0x150]\n"
"fmla v8.4s, v21.4s, v0.s[3]\n"
@@ -942,19 +944,19 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"sub x12, x12, #0x1\n"
"ldr s24, [x10], #0x4\n"
"ldr q21, [x15, #0x0]\n"
- "fmla v8.4s, v21.4s, v25.s[0]\n"
"ldr q20, [x15, #0x10]\n"
- "fmla v14.4s, v21.4s, v24.s[0]\n"
"ldr q23, [x15, #0x20]\n"
- "fmla v9.4s, v20.4s, v25.s[0]\n"
"ldr q22, [x15, #0x30]\n"
- "fmla v15.4s, v20.4s, v24.s[0]\n"
+ "fmla v8.4s, v21.4s, v25.s[0]\n"
+ "fmla v14.4s, v21.4s, v24.s[0]\n"
"ldr q21, [x15, #0x40]\n"
- "fmla v10.4s, v23.4s, v25.s[0]\n"
+ "fmla v9.4s, v20.4s, v25.s[0]\n"
+ "fmla v15.4s, v20.4s, v24.s[0]\n"
"ldr q20, [x15, #0x50]\n"
+ "fmla v10.4s, v23.4s, v25.s[0]\n"
+ "add x15, x15, #0x60\n"
"fmla v16.4s, v23.4s, v24.s[0]\n"
"fmla v11.4s, v22.4s, v25.s[0]\n"
- "add x15, x15, #0x60\n"
"fmla v17.4s, v22.4s, v24.s[0]\n"
"fmla v12.4s, v21.4s, v25.s[0]\n"
"fmla v18.4s, v21.4s, v24.s[0]\n"
@@ -967,26 +969,26 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"cmp x13, x20\n"
"bne 60b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
"prfm pstl1keep, [x14, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add x24, x14, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 68f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v20.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v20.4s\n"
- "fmin v9.4s, v9.4s, v20.4s\n"
- "fmin v10.4s, v10.4s, v20.4s\n"
- "fmin v11.4s, v11.4s, v20.4s\n"
- "fmin v12.4s, v12.4s, v20.4s\n"
- "fmin v13.4s, v13.4s, v20.4s\n"
- "fmin v14.4s, v14.4s, v20.4s\n"
- "fmin v15.4s, v15.4s, v20.4s\n"
- "fmin v16.4s, v16.4s, v20.4s\n"
- "fmin v17.4s, v17.4s, v20.4s\n"
- "fmin v18.4s, v18.4s, v20.4s\n"
- "fmin v19.4s, v19.4s, v20.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v21.4s\n"
+ "fmin v9.4s, v9.4s, v21.4s\n"
+ "fmin v10.4s, v10.4s, v21.4s\n"
+ "fmin v11.4s, v11.4s, v21.4s\n"
+ "fmin v12.4s, v12.4s, v21.4s\n"
+ "fmin v13.4s, v13.4s, v21.4s\n"
+ "fmin v14.4s, v14.4s, v21.4s\n"
+ "fmin v15.4s, v15.4s, v21.4s\n"
+ "fmin v16.4s, v16.4s, v21.4s\n"
+ "fmin v17.4s, v17.4s, v21.4s\n"
+ "fmin v18.4s, v18.4s, v21.4s\n"
+ "fmin v19.4s, v19.4s, v21.4s\n"
"fmax v8.4s, v8.4s, v20.4s\n"
"fmax v9.4s, v9.4s, v20.4s\n"
"fmax v10.4s, v10.4s, v20.4s\n"
@@ -1007,99 +1009,99 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"st1 { v9.4s }, [x14], #0x10\n"
"st1 { v10.4s }, [x14], #0x10\n"
"st1 { v11.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
"tbz x16, #2, 70f\n"
"st1 { v12.4s }, [x14], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
"tbz x16, #1, 69f\n"
"str d13, [x14], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d19, [x24], #0x8\n"
"tbz x16, #0, 80f\n"
"st1 { v13.s }[2], [x14]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x24]\n"
"b 80f\n"
"69:" // Height 2: Partial direct writeback: partial_1_20
"tbz x16, #0, 80f\n"
"str s13, [x14, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
"b 80f\n"
"70:" // Height 2: Partial direct writeback: partial_2_16
"tbz x16, #1, 71f\n"
"str d12, [x14], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d18, [x24], #0x8\n"
"tbz x16, #0, 80f\n"
"st1 { v12.s }[2], [x14]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x24]\n"
"b 80f\n"
"71:" // Height 2: Partial direct writeback: partial_1_16
"tbz x16, #0, 80f\n"
"str s12, [x14, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
"b 80f\n"
"72:" // Height 2: Partial direct writeback: partial_8_0
"tbz x16, #3, 76f\n"
"st1 { v8.4s }, [x14], #0x10\n"
"st1 { v9.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
"tbz x16, #2, 74f\n"
"st1 { v10.4s }, [x14], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
"tbz x16, #1, 73f\n"
"str d11, [x14], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d17, [x24], #0x8\n"
"tbz x16, #0, 80f\n"
"st1 { v11.s }[2], [x14]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x24]\n"
"b 80f\n"
"73:" // Height 2: Partial direct writeback: partial_1_12
"tbz x16, #0, 80f\n"
"str s11, [x14, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
"b 80f\n"
"74:" // Height 2: Partial direct writeback: partial_2_8
"tbz x16, #1, 75f\n"
"str d10, [x14], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d16, [x24], #0x8\n"
"tbz x16, #0, 80f\n"
"st1 { v10.s }[2], [x14]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x24]\n"
"b 80f\n"
"75:" // Height 2: Partial direct writeback: partial_1_8
"tbz x16, #0, 80f\n"
"str s10, [x14, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
"b 80f\n"
"76:" // Height 2: Partial direct writeback: partial_4_0
"tbz x16, #2, 78f\n"
"st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
"tbz x16, #1, 77f\n"
"str d9, [x14], #0x8\n"
- "str d15, [x23], #0x8\n"
+ "str d15, [x24], #0x8\n"
"tbz x16, #0, 80f\n"
"st1 { v9.s }[2], [x14]\n"
- "st1 { v15.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x24]\n"
"b 80f\n"
"77:" // Height 2: Partial direct writeback: partial_1_4
"tbz x16, #0, 80f\n"
"str s9, [x14, #0x0]\n"
- "str s15, [x23, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
"b 80f\n"
"78:" // Height 2: Partial direct writeback: partial_2_0
"tbz x16, #1, 79f\n"
"str d8, [x14], #0x8\n"
- "str d14, [x23], #0x8\n"
+ "str d14, [x24], #0x8\n"
"tbz x16, #0, 80f\n"
"st1 { v8.s }[2], [x14]\n"
- "st1 { v14.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x24]\n"
"b 80f\n"
"79:" // Height 2: Partial direct writeback: partial_1_0
"str s8, [x14, #0x0]\n"
- "str s14, [x23, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
"80:" // Height 2: Partial direct writeback: Done
"b 82f\n"
"81:" // Height 2: Full writeback
@@ -1110,37 +1112,37 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"str q12, [x14, #0x40]\n"
"str q13, [x14, #0x50]\n"
"add x14, x14, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
+ "str q14, [x24, #0x0]\n"
+ "str q15, [x24, #0x10]\n"
+ "str q16, [x24, #0x20]\n"
+ "str q17, [x24, #0x30]\n"
+ "str q18, [x24, #0x40]\n"
+ "str q19, [x24, #0x50]\n"
"82:" // Height 2: Writeback done
"subs x16, x16, #0x18\n"
"bgt 43b\n"
"b 166f\n"
"83:" // Height 3
- "mov x17, %x[bias]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"84:" // Height 3: Column loop
"cbz x17, 85f\n"
"ldr q8, [x17, #0x0]\n"
- "mov v14.16b, v8.16b\n"
"ldr q9, [x17, #0x10]\n"
- "mov v15.16b, v9.16b\n"
"ldr q10, [x17, #0x20]\n"
- "mov v16.16b, v10.16b\n"
"ldr q11, [x17, #0x30]\n"
- "mov v17.16b, v11.16b\n"
+ "mov v14.16b, v8.16b\n"
"ldr q12, [x17, #0x40]\n"
- "mov v18.16b, v12.16b\n"
+ "mov v15.16b, v9.16b\n"
"ldr q13, [x17, #0x50]\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v17.16b, v11.16b\n"
+ "add x17, x17, #0x60\n"
+ "mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
"mov v20.16b, v8.16b\n"
- "add x17, x17, #0x60\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1150,147 +1152,147 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"85:" // Height 3: no bias
"tbz %x[flags], #0, 99f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
"cmp x16, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x14, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"bge 98f\n"
"tbz x16, #4, 89f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x14], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x14], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"ld1 { v11.4s }, [x14], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
"tbz x16, #2, 87f\n"
"ld1 { v12.4s }, [x14], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x16, #1, 86f\n"
"ldr d13, [x14], #0x8\n"
"mov x20, #0x58\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x16, #0, 97f\n"
"ld1 { v13.s }[2], [x14]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 97f\n"
"86:" // Height 3: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x16, #0, 97f\n"
"ldr s13, [x14, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 97f\n"
"87:" // Height 3: Partial accumulate: partial_2_16
"tbz x16, #1, 88f\n"
"ldr d12, [x14], #0x8\n"
"mov x20, #0x48\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x16, #0, 97f\n"
"ld1 { v12.s }[2], [x14]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 97f\n"
"88:" // Height 3: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x16, #0, 97f\n"
"ldr s12, [x14, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"b 97f\n"
"89:" // Height 3: Partial accumulate: partial_8_0
"tbz x16, #3, 93f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x14], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"tbz x16, #2, 91f\n"
"ld1 { v10.4s }, [x14], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"tbz x16, #1, 90f\n"
"ldr d11, [x14], #0x8\n"
"mov x20, #0x38\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
"tbz x16, #0, 97f\n"
"ld1 { v11.s }[2], [x14]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
"b 97f\n"
"90:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x16, #0, 97f\n"
"ldr s11, [x14, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial accumulate: partial_2_8
"tbz x16, #1, 92f\n"
"ldr d10, [x14], #0x8\n"
"mov x20, #0x28\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
"tbz x16, #0, 97f\n"
"ld1 { v10.s }[2], [x14]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
"b 97f\n"
"92:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x16, #0, 97f\n"
"ldr s10, [x14, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial accumulate: partial_4_0
"tbz x16, #2, 95f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
"tbz x16, #1, 94f\n"
"ldr d9, [x14], #0x8\n"
"mov x20, #0x18\n"
- "ldr d15, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
"tbz x16, #0, 97f\n"
"ld1 { v9.s }[2], [x14]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
"b 97f\n"
"94:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x16, #0, 97f\n"
"ldr s9, [x14, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial accumulate: partial_2_0
"tbz x16, #1, 96f\n"
"ldr d8, [x14], #0x8\n"
"mov x20, #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d20, [x23], #0x8\n"
"tbz x16, #0, 97f\n"
"ld1 { v8.s }[2], [x14]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v20.s }[2], [x23]\n"
"b 97f\n"
"96:" // Height 3: Partial accumulate: partial_1_0
"ldr s8, [x14, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s14, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s20, [x23, #0x0]\n"
"97:" // Height 3: Partial accumulate: Done
"sub x14, x14, x20\n"
"b 100f\n"
@@ -1301,18 +1303,18 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"ldr q11, [x14, #0x30]\n"
"ldr q12, [x14, #0x40]\n"
"ldr q13, [x14, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x22, #0x40]\n"
- "ldr q25, [x22, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q20, [x23, #0x0]\n"
+ "ldr q21, [x23, #0x10]\n"
+ "ldr q22, [x23, #0x20]\n"
+ "ldr q23, [x23, #0x30]\n"
+ "ldr q24, [x23, #0x40]\n"
+ "ldr q25, [x23, #0x50]\n"
"b 100f\n"
"99:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1337,8 +1339,8 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"mov x13, #0x0\n"
"101:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 102f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1383,23 +1385,23 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v10.4s, v6.4s, v0.s[0]\n"
"mov v29.d[1], x23\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "mov v28.d[1], x22\n"
+ "ldr x23, [x15, #0x88]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"ldr d27, [x15, #0x60]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v27.d[1], x21\n"
+ "mov v28.d[1], x22\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
- "ldr x23, [x15, #0x88]\n"
+ "ldr x22, [x15, #0x98]\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
"ldr d26, [x15, #0x70]\n"
- "mov v26.d[1], x20\n"
+ "mov v27.d[1], x21\n"
"fmla v12.4s, v29.4s, v0.s[0]\n"
"fmla v18.4s, v29.4s, v1.s[0]\n"
- "ldr x22, [x15, #0x98]\n"
+ "ldr x21, [x15, #0xa8]\n"
+ "mov v26.d[1], x20\n"
"fmla v24.4s, v29.4s, v2.s[0]\n"
"ldr d29, [x15, #0x80]\n"
"fmla v13.4s, v28.4s, v0.s[0]\n"
- "ldr x21, [x15, #0xa8]\n"
"fmla v19.4s, v28.4s, v1.s[0]\n"
"ldr x20, [x15, #0xb8]\n"
"fmla v25.4s, v28.4s, v2.s[0]\n"
@@ -1407,23 +1409,23 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v8.4s, v27.4s, v0.s[1]\n"
"mov v29.d[1], x23\n"
"fmla v14.4s, v27.4s, v1.s[1]\n"
- "mov v28.d[1], x22\n"
+ "ldr x23, [x15, #0xc8]\n"
"fmla v20.4s, v27.4s, v2.s[1]\n"
"ldr d27, [x15, #0xa0]\n"
"fmla v9.4s, v26.4s, v0.s[1]\n"
- "mov v27.d[1], x21\n"
+ "mov v28.d[1], x22\n"
"fmla v15.4s, v26.4s, v1.s[1]\n"
- "ldr x23, [x15, #0xc8]\n"
+ "ldr x22, [x15, #0xd8]\n"
"fmla v21.4s, v26.4s, v2.s[1]\n"
"ldr d26, [x15, #0xb0]\n"
- "mov v26.d[1], x20\n"
+ "mov v27.d[1], x21\n"
"fmla v10.4s, v29.4s, v0.s[1]\n"
"fmla v16.4s, v29.4s, v1.s[1]\n"
- "ldr x22, [x15, #0xd8]\n"
+ "ldr x21, [x15, #0xe8]\n"
+ "mov v26.d[1], x20\n"
"fmla v22.4s, v29.4s, v2.s[1]\n"
"ldr d29, [x15, #0xc0]\n"
"fmla v11.4s, v28.4s, v0.s[1]\n"
- "ldr x21, [x15, #0xe8]\n"
"fmla v17.4s, v28.4s, v1.s[1]\n"
"ldr x20, [x15, #0xf8]\n"
"fmla v23.4s, v28.4s, v2.s[1]\n"
@@ -1431,23 +1433,23 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v12.4s, v27.4s, v0.s[1]\n"
"mov v29.d[1], x23\n"
"fmla v18.4s, v27.4s, v1.s[1]\n"
- "mov v28.d[1], x22\n"
+ "ldr x23, [x15, #0x108]\n"
"fmla v24.4s, v27.4s, v2.s[1]\n"
"ldr d27, [x15, #0xe0]\n"
"fmla v13.4s, v26.4s, v0.s[1]\n"
- "mov v27.d[1], x21\n"
+ "mov v28.d[1], x22\n"
"fmla v19.4s, v26.4s, v1.s[1]\n"
- "ldr x23, [x15, #0x108]\n"
+ "ldr x22, [x15, #0x118]\n"
"fmla v25.4s, v26.4s, v2.s[1]\n"
"ldr d26, [x15, #0xf0]\n"
- "mov v26.d[1], x20\n"
+ "mov v27.d[1], x21\n"
"fmla v8.4s, v29.4s, v0.s[2]\n"
"fmla v14.4s, v29.4s, v1.s[2]\n"
- "ldr x22, [x15, #0x118]\n"
+ "ldr x21, [x15, #0x128]\n"
+ "mov v26.d[1], x20\n"
"fmla v20.4s, v29.4s, v2.s[2]\n"
"ldr d29, [x15, #0x100]\n"
"fmla v9.4s, v28.4s, v0.s[2]\n"
- "ldr x21, [x15, #0x128]\n"
"fmla v15.4s, v28.4s, v1.s[2]\n"
"ldr x20, [x15, #0x138]\n"
"fmla v21.4s, v28.4s, v2.s[2]\n"
@@ -1455,23 +1457,23 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v10.4s, v27.4s, v0.s[2]\n"
"mov v29.d[1], x23\n"
"fmla v16.4s, v27.4s, v1.s[2]\n"
- "mov v28.d[1], x22\n"
+ "ldr x23, [x15, #0x148]\n"
"fmla v22.4s, v27.4s, v2.s[2]\n"
"ldr d27, [x15, #0x120]\n"
"fmla v11.4s, v26.4s, v0.s[2]\n"
- "mov v27.d[1], x21\n"
+ "mov v28.d[1], x22\n"
"fmla v17.4s, v26.4s, v1.s[2]\n"
- "ldr x23, [x15, #0x148]\n"
+ "ldr x22, [x15, #0x158]\n"
"fmla v23.4s, v26.4s, v2.s[2]\n"
"ldr d26, [x15, #0x130]\n"
- "mov v26.d[1], x20\n"
+ "mov v27.d[1], x21\n"
"fmla v12.4s, v29.4s, v0.s[2]\n"
"fmla v18.4s, v29.4s, v1.s[2]\n"
- "ldr x22, [x15, #0x158]\n"
+ "ldr x21, [x15, #0x168]\n"
+ "mov v26.d[1], x20\n"
"fmla v24.4s, v29.4s, v2.s[2]\n"
"ldr d29, [x15, #0x140]\n"
"fmla v13.4s, v28.4s, v0.s[2]\n"
- "ldr x21, [x15, #0x168]\n"
"fmla v19.4s, v28.4s, v1.s[2]\n"
"ldr x20, [x15, #0x178]\n"
"fmla v25.4s, v28.4s, v2.s[2]\n"
@@ -1479,54 +1481,54 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v8.4s, v27.4s, v0.s[3]\n"
"mov v29.d[1], x23\n"
"fmla v14.4s, v27.4s, v1.s[3]\n"
- "mov v28.d[1], x22\n"
+ "add x11, x11, #0x10\n"
"fmla v20.4s, v27.4s, v2.s[3]\n"
"ldr d27, [x15, #0x160]\n"
"fmla v9.4s, v26.4s, v0.s[3]\n"
- "mov v27.d[1], x21\n"
+ "mov v28.d[1], x22\n"
"fmla v15.4s, v26.4s, v1.s[3]\n"
- "add x11, x11, #0x10\n"
+ "add x10, x10, #0x10\n"
"fmla v21.4s, v26.4s, v2.s[3]\n"
"ldr d26, [x15, #0x170]\n"
- "mov v26.d[1], x20\n"
- "add x10, x10, #0x10\n"
+ "mov v27.d[1], x21\n"
"add x9, x9, #0x10\n"
"add x15, x15, #0x180\n"
"fmla v10.4s, v29.4s, v0.s[3]\n"
- "ldr x26, [x15, #0x8]\n"
+ "mov v26.d[1], x20\n"
"fmla v16.4s, v29.4s, v1.s[3]\n"
- "ldr x25, [x15, #0x18]\n"
"fmla v22.4s, v29.4s, v2.s[3]\n"
"ldr d4, [x15, #0x0]\n"
+ "ldr x20, [x15, #0x8]\n"
"fmla v11.4s, v28.4s, v0.s[3]\n"
- "ldr x24, [x15, #0x28]\n"
"fmla v17.4s, v28.4s, v1.s[3]\n"
- "ldr x23, [x11, #0x8]\n"
+ "ldr x25, [x15, #0x18]\n"
"fmla v23.4s, v28.4s, v2.s[3]\n"
"ldr d5, [x15, #0x10]\n"
"fmla v12.4s, v27.4s, v0.s[3]\n"
- "ldr x22, [x10, #0x8]\n"
+ "ldr x24, [x15, #0x28]\n"
"fmla v18.4s, v27.4s, v1.s[3]\n"
- "ldr x21, [x9, #0x8]\n"
+ "ldr x23, [x11, #0x8]\n"
"fmla v24.4s, v27.4s, v2.s[3]\n"
"ldr d6, [x15, #0x20]\n"
"fmla v13.4s, v26.4s, v0.s[3]\n"
"ldr d0, [x11, #0x0]\n"
"fmla v19.4s, v26.4s, v1.s[3]\n"
"ldr d1, [x10, #0x0]\n"
+ "ldr x22, [x10, #0x8]\n"
"fmla v25.4s, v26.4s, v2.s[3]\n"
"ldr d2, [x9, #0x0]\n"
- "ldr d7, [x15, #0x30]\n"
"sub x12, x12, #0x4\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr d7, [x15, #0x30]\n"
"cmp x12, #0x8\n"
- "prfm pldl1keep, [x11, #0x80]\n"
- "mov v4.d[1], x26\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "ldr x21, [x9, #0x8]\n"
+ "mov v4.d[1], x20\n"
+ "ldr x20, [x15, #0x38]\n"
"mov v5.d[1], x25\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"mov v6.d[1], x24\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"mov v0.d[1], x23\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"mov v1.d[1], x22\n"
"mov v2.d[1], x21\n"
"mov v7.d[1], x20\n"
@@ -1640,14 +1642,14 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"ldr s31, [x10], #0x4\n"
"ldr s30, [x9], #0x4\n"
"ldr q27, [x15, #0x0]\n"
- "fmla v8.4s, v27.4s, v0.s[0]\n"
"ldr q26, [x15, #0x10]\n"
- "fmla v14.4s, v27.4s, v31.s[0]\n"
"ldr q29, [x15, #0x20]\n"
- "fmla v20.4s, v27.4s, v30.s[0]\n"
"ldr q28, [x15, #0x30]\n"
- "fmla v9.4s, v26.4s, v0.s[0]\n"
+ "fmla v8.4s, v27.4s, v0.s[0]\n"
+ "fmla v14.4s, v27.4s, v31.s[0]\n"
+ "fmla v20.4s, v27.4s, v30.s[0]\n"
"ldr q27, [x15, #0x40]\n"
+ "fmla v9.4s, v26.4s, v0.s[0]\n"
"fmla v15.4s, v26.4s, v31.s[0]\n"
"fmla v21.4s, v26.4s, v30.s[0]\n"
"ldr q26, [x15, #0x50]\n"
@@ -1671,34 +1673,34 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"cmp x13, x20\n"
"bne 101b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x14, #0x0]\n"
+ "add x24, x14, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 109f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v26.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v26.4s\n"
- "fmin v9.4s, v9.4s, v26.4s\n"
- "fmin v10.4s, v10.4s, v26.4s\n"
- "fmin v11.4s, v11.4s, v26.4s\n"
- "fmin v12.4s, v12.4s, v26.4s\n"
- "fmin v13.4s, v13.4s, v26.4s\n"
- "fmin v14.4s, v14.4s, v26.4s\n"
- "fmin v15.4s, v15.4s, v26.4s\n"
- "fmin v16.4s, v16.4s, v26.4s\n"
- "fmin v17.4s, v17.4s, v26.4s\n"
- "fmin v18.4s, v18.4s, v26.4s\n"
- "fmin v19.4s, v19.4s, v26.4s\n"
- "fmin v20.4s, v20.4s, v26.4s\n"
- "fmin v21.4s, v21.4s, v26.4s\n"
- "fmin v22.4s, v22.4s, v26.4s\n"
- "fmin v23.4s, v23.4s, v26.4s\n"
- "fmin v24.4s, v24.4s, v26.4s\n"
- "fmin v25.4s, v25.4s, v26.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v27.4s }, [x21]\n"
"ld1r { v26.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v27.4s\n"
+ "fmin v9.4s, v9.4s, v27.4s\n"
+ "fmin v10.4s, v10.4s, v27.4s\n"
+ "fmin v11.4s, v11.4s, v27.4s\n"
+ "fmin v12.4s, v12.4s, v27.4s\n"
+ "fmin v13.4s, v13.4s, v27.4s\n"
+ "fmin v14.4s, v14.4s, v27.4s\n"
+ "fmin v15.4s, v15.4s, v27.4s\n"
+ "fmin v16.4s, v16.4s, v27.4s\n"
+ "fmin v17.4s, v17.4s, v27.4s\n"
+ "fmin v18.4s, v18.4s, v27.4s\n"
+ "fmin v19.4s, v19.4s, v27.4s\n"
+ "fmin v20.4s, v20.4s, v27.4s\n"
+ "fmin v21.4s, v21.4s, v27.4s\n"
+ "fmin v22.4s, v22.4s, v27.4s\n"
+ "fmin v23.4s, v23.4s, v27.4s\n"
+ "fmin v24.4s, v24.4s, v27.4s\n"
+ "fmin v25.4s, v25.4s, v27.4s\n"
"fmax v8.4s, v8.4s, v26.4s\n"
"fmax v9.4s, v9.4s, v26.4s\n"
"fmax v10.4s, v10.4s, v26.4s\n"
@@ -1725,126 +1727,126 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"st1 { v9.4s }, [x14], #0x10\n"
"st1 { v10.4s }, [x14], #0x10\n"
"st1 { v11.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
"tbz x16, #2, 111f\n"
"st1 { v12.4s }, [x14], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x16, #1, 110f\n"
"str d13, [x14], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x16, #0, 121f\n"
"st1 { v13.s }[2], [x14]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 121f\n"
"110:" // Height 3: Partial direct writeback: partial_1_20
"tbz x16, #0, 121f\n"
"str s13, [x14, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 121f\n"
"111:" // Height 3: Partial direct writeback: partial_2_16
"tbz x16, #1, 112f\n"
"str d12, [x14], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x16, #0, 121f\n"
"st1 { v12.s }[2], [x14]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 121f\n"
"112:" // Height 3: Partial direct writeback: partial_1_16
"tbz x16, #0, 121f\n"
"str s12, [x14, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"b 121f\n"
"113:" // Height 3: Partial direct writeback: partial_8_0
"tbz x16, #3, 117f\n"
"st1 { v8.4s }, [x14], #0x10\n"
"st1 { v9.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
"tbz x16, #2, 115f\n"
"st1 { v10.4s }, [x14], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
"tbz x16, #1, 114f\n"
"str d11, [x14], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
"tbz x16, #0, 121f\n"
"st1 { v11.s }[2], [x14]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
"b 121f\n"
"114:" // Height 3: Partial direct writeback: partial_1_12
"tbz x16, #0, 121f\n"
"str s11, [x14, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
"b 121f\n"
"115:" // Height 3: Partial direct writeback: partial_2_8
"tbz x16, #1, 116f\n"
"str d10, [x14], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
"tbz x16, #0, 121f\n"
"st1 { v10.s }[2], [x14]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
"b 121f\n"
"116:" // Height 3: Partial direct writeback: partial_1_8
"tbz x16, #0, 121f\n"
"str s10, [x14, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
"b 121f\n"
"117:" // Height 3: Partial direct writeback: partial_4_0
"tbz x16, #2, 119f\n"
"st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
"tbz x16, #1, 118f\n"
"str d9, [x14], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
"tbz x16, #0, 121f\n"
"st1 { v9.s }[2], [x14]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
"b 121f\n"
"118:" // Height 3: Partial direct writeback: partial_1_4
"tbz x16, #0, 121f\n"
"str s9, [x14, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
"b 121f\n"
"119:" // Height 3: Partial direct writeback: partial_2_0
"tbz x16, #1, 120f\n"
"str d8, [x14], #0x8\n"
- "str d14, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
"tbz x16, #0, 121f\n"
"st1 { v8.s }[2], [x14]\n"
- "st1 { v14.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
"b 121f\n"
"120:" // Height 3: Partial direct writeback: partial_1_0
"str s8, [x14, #0x0]\n"
- "str s14, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
"121:" // Height 3: Partial direct writeback: Done
"b 123f\n"
"122:" // Height 3: Full writeback
@@ -1855,18 +1857,18 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"str q12, [x14, #0x40]\n"
"str q13, [x14, #0x50]\n"
"add x14, x14, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
+ "str q14, [x24, #0x0]\n"
+ "str q15, [x24, #0x10]\n"
+ "str q16, [x24, #0x20]\n"
+ "str q17, [x24, #0x30]\n"
+ "str q18, [x24, #0x40]\n"
+ "str q19, [x24, #0x50]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q22, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
"123:" // Height 3: Writeback done
"subs x16, x16, #0x18\n"
"bgt 84b\n"
@@ -1874,27 +1876,28 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"124:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0x10\n"
- "mov x17, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
+ "madd x20, x21, x20, x14\n"
"ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x14, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"125:" // Height 4: Column loop
"cbz x17, 126f\n"
"ldr q8, [x17, #0x0]\n"
- "mov v14.16b, v8.16b\n"
"ldr q9, [x17, #0x10]\n"
- "mov v15.16b, v9.16b\n"
"ldr q10, [x17, #0x20]\n"
- "mov v16.16b, v10.16b\n"
"ldr q11, [x17, #0x30]\n"
- "mov v17.16b, v11.16b\n"
+ "mov v14.16b, v8.16b\n"
"ldr q12, [x17, #0x40]\n"
- "mov v18.16b, v12.16b\n"
+ "mov v15.16b, v9.16b\n"
"ldr q13, [x17, #0x50]\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v17.16b, v11.16b\n"
+ "add x17, x17, #0x60\n"
+ "mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
"mov v20.16b, v8.16b\n"
- "add x17, x17, #0x60\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1910,175 +1913,175 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"126:" // Height 4: no bias
"tbz %x[flags], #0, 140f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x16, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x14, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"bge 139f\n"
"tbz x16, #4, 130f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x14], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x14], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v11.4s }, [x14], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x16, #2, 128f\n"
"ld1 { v12.4s }, [x14], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x16, #1, 127f\n"
"ldr d13, [x14], #0x8\n"
"mov x20, #0x58\n"
- "ldr d19, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x16, #0, 138f\n"
"ld1 { v13.s }[2], [x14]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 138f\n"
"127:" // Height 4: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x16, #0, 138f\n"
"ldr s13, [x14, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 138f\n"
"128:" // Height 4: Partial accumulate: partial_2_16
"tbz x16, #1, 129f\n"
"ldr d12, [x14], #0x8\n"
"mov x20, #0x48\n"
- "ldr d18, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x16, #0, 138f\n"
"ld1 { v12.s }[2], [x14]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 138f\n"
"129:" // Height 4: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x16, #0, 138f\n"
"ldr s12, [x14, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 138f\n"
"130:" // Height 4: Partial accumulate: partial_8_0
"tbz x16, #3, 134f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x14], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"tbz x16, #2, 132f\n"
"ld1 { v10.4s }, [x14], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x16, #1, 131f\n"
"ldr d11, [x14], #0x8\n"
"mov x20, #0x38\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x16, #0, 138f\n"
"ld1 { v11.s }[2], [x14]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 138f\n"
"131:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x16, #0, 138f\n"
"ldr s11, [x14, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial accumulate: partial_2_8
"tbz x16, #1, 133f\n"
"ldr d10, [x14], #0x8\n"
"mov x20, #0x28\n"
- "ldr d16, [x23], #0x8\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x16, #0, 138f\n"
"ld1 { v10.s }[2], [x14]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 138f\n"
"133:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x16, #0, 138f\n"
"ldr s10, [x14, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial accumulate: partial_4_0
"tbz x16, #2, 136f\n"
"ld1 { v8.4s }, [x14], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"tbz x16, #1, 135f\n"
"ldr d9, [x14], #0x8\n"
"mov x20, #0x18\n"
- "ldr d15, [x23], #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
"tbz x16, #0, 138f\n"
"ld1 { v9.s }[2], [x14]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v27.s }[2], [x22]\n"
"b 138f\n"
"135:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x16, #0, 138f\n"
"ldr s9, [x14, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
+ "ldr s27, [x22, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial accumulate: partial_2_0
"tbz x16, #1, 137f\n"
"ldr d8, [x14], #0x8\n"
"mov x20, #0x8\n"
- "ldr d14, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "ldr d20, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
"tbz x16, #0, 138f\n"
"ld1 { v8.s }[2], [x14]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v26.s }[2], [x22]\n"
"b 138f\n"
"137:" // Height 4: Partial accumulate: partial_1_0
"ldr s8, [x14, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s14, [x23, #0x0]\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s20, [x23, #0x0]\n"
+ "ldr s26, [x22, #0x0]\n"
"138:" // Height 4: Partial accumulate: Done
"sub x14, x14, x20\n"
"b 141f\n"
@@ -2089,24 +2092,24 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"ldr q11, [x14, #0x30]\n"
"ldr q12, [x14, #0x40]\n"
"ldr q13, [x14, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x22, #0x40]\n"
- "ldr q25, [x22, #0x50]\n"
- "ldr q26, [x21, #0x0]\n"
- "ldr q27, [x21, #0x10]\n"
- "ldr q28, [x21, #0x20]\n"
- "ldr q29, [x21, #0x30]\n"
- "ldr q30, [x21, #0x40]\n"
- "ldr q31, [x21, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q20, [x23, #0x0]\n"
+ "ldr q21, [x23, #0x10]\n"
+ "ldr q22, [x23, #0x20]\n"
+ "ldr q23, [x23, #0x30]\n"
+ "ldr q24, [x23, #0x40]\n"
+ "ldr q25, [x23, #0x50]\n"
+ "ldr q26, [x22, #0x0]\n"
+ "ldr q27, [x22, #0x10]\n"
+ "ldr q28, [x22, #0x20]\n"
+ "ldr q29, [x22, #0x30]\n"
+ "ldr q30, [x22, #0x40]\n"
+ "ldr q31, [x22, #0x50]\n"
"b 141f\n"
"140:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -2137,8 +2140,8 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"mov x13, #0x0\n"
"142:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w12, [x20, x13, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w12, [x20, x13, LSL #0x2]\n"
"tbz %x[flags], #3, 143f\n"
"ldr x20, [%x[input_ptr], x13, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2183,169 +2186,169 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"fmla v9.4s, v5.4s, v0.s[0]\n"
"ldr x20, [x15, #0x78]\n"
"fmla v15.4s, v5.4s, v1.s[0]\n"
- "mov v4.d[1], x23\n"
+ "add x11, x11, #0x10\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
- "ldr x23, [x15, #0x88]\n"
+ "mov v4.d[1], x23\n"
"fmla v27.4s, v5.4s, v3.s[0]\n"
"ldr d5, [x15, #0x50]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
- "mov v5.d[1], x22\n"
+ "ldr x23, [x15, #0x88]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "ldr x22, [x15, #0x98]\n"
+ "add x10, x10, #0x10\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
- "add x11, x11, #0x10\n"
+ "mov v5.d[1], x22\n"
"fmla v28.4s, v6.4s, v3.s[0]\n"
"ldr d6, [x15, #0x60]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x21\n"
+ "ldr x22, [x15, #0x98]\n"
"fmla v17.4s, v7.4s, v1.s[0]\n"
- "ldr x21, [x15, #0xa8]\n"
+ "add x9, x9, #0x10\n"
"fmla v23.4s, v7.4s, v2.s[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v6.d[1], x21\n"
"fmla v29.4s, v7.4s, v3.s[0]\n"
"ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x20\n"
"fmla v12.4s, v4.4s, v0.s[0]\n"
+ "ldr x21, [x15, #0xa8]\n"
"fmla v18.4s, v4.4s, v1.s[0]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "add x28, x28, #0x10\n"
+ "mov v7.d[1], x20\n"
"fmla v24.4s, v4.4s, v2.s[0]\n"
- "add x9, x9, #0x10\n"
"fmla v30.4s, v4.4s, v3.s[0]\n"
"ldr d4, [x15, #0x80]\n"
"fmla v13.4s, v5.4s, v0.s[0]\n"
- "mov v4.d[1], x23\n"
+ "ldr x20, [x15, #0xb8]\n"
"fmla v19.4s, v5.4s, v1.s[0]\n"
- "ldr x23, [x15, #0xc8]\n"
+ "ldr x27, [x11, #0x8]\n"
"fmla v25.4s, v5.4s, v2.s[0]\n"
- "add x28, x28, #0x10\n"
+ "mov v4.d[1], x23\n"
"fmla v31.4s, v5.4s, v3.s[0]\n"
"ldr d5, [x15, #0x90]\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
- "mov v5.d[1], x22\n"
+ "ldr x23, [x15, #0xc8]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr x22, [x15, #0xd8]\n"
+ "ldr x26, [x10, #0x8]\n"
"fmla v20.4s, v6.4s, v2.s[1]\n"
- "ldr x27, [x11, #0x8]\n"
+ "mov v5.d[1], x22\n"
"fmla v26.4s, v6.4s, v3.s[1]\n"
"ldr d6, [x15, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x21\n"
+ "ldr x22, [x15, #0xd8]\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x21, [x15, #0xe8]\n"
+ "ldr x25, [x9, #0x8]\n"
"fmla v21.4s, v7.4s, v2.s[1]\n"
- "ldr x26, [x10, #0x8]\n"
+ "mov v6.d[1], x21\n"
"fmla v27.4s, v7.4s, v3.s[1]\n"
"ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x20\n"
"fmla v10.4s, v4.4s, v0.s[1]\n"
+ "ldr x21, [x15, #0xe8]\n"
"fmla v16.4s, v4.4s, v1.s[1]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x24, [x28, #0x8]\n"
+ "mov v7.d[1], x20\n"
"fmla v22.4s, v4.4s, v2.s[1]\n"
- "ldr x25, [x9, #0x8]\n"
"fmla v28.4s, v4.4s, v3.s[1]\n"
"ldr d4, [x15, #0xc0]\n"
"fmla v11.4s, v5.4s, v0.s[1]\n"
- "mov v4.d[1], x23\n"
+ "ldr x20, [x15, #0xf8]\n"
"fmla v17.4s, v5.4s, v1.s[1]\n"
- "ldr x23, [x15, #0x108]\n"
+ "sub x12, x12, #0x4\n"
"fmla v23.4s, v5.4s, v2.s[1]\n"
- "ldr x24, [x28, #0x8]\n"
+ "mov v4.d[1], x23\n"
"fmla v29.4s, v5.4s, v3.s[1]\n"
"ldr d5, [x15, #0xd0]\n"
"fmla v12.4s, v6.4s, v0.s[1]\n"
- "mov v5.d[1], x22\n"
+ "ldr x23, [x15, #0x108]\n"
"fmla v18.4s, v6.4s, v1.s[1]\n"
- "ldr x22, [x15, #0x118]\n"
+ "cmp x12, #0x8\n"
"fmla v24.4s, v6.4s, v2.s[1]\n"
- "sub x12, x12, #0x4\n"
+ "mov v5.d[1], x22\n"
"fmla v30.4s, v6.4s, v3.s[1]\n"
"ldr d6, [x15, #0xe0]\n"
"fmla v13.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x21\n"
+ "ldr x22, [x15, #0x118]\n"
"fmla v19.4s, v7.4s, v1.s[1]\n"
- "ldr x21, [x15, #0x128]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"fmla v25.4s, v7.4s, v2.s[1]\n"
- "cmp x12, #0x8\n"
+ "mov v6.d[1], x21\n"
"fmla v31.4s, v7.4s, v3.s[1]\n"
"ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x20\n"
"fmla v8.4s, v4.4s, v0.s[2]\n"
+ "ldr x21, [x15, #0x128]\n"
"fmla v14.4s, v4.4s, v1.s[2]\n"
- "ldr x20, [x15, #0x138]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v7.d[1], x20\n"
"fmla v20.4s, v4.4s, v2.s[2]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
"fmla v26.4s, v4.4s, v3.s[2]\n"
"ldr d4, [x15, #0x100]\n"
"fmla v9.4s, v5.4s, v0.s[2]\n"
- "mov v4.d[1], x23\n"
+ "ldr x20, [x15, #0x138]\n"
"fmla v15.4s, v5.4s, v1.s[2]\n"
- "ldr x23, [x15, #0x148]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"fmla v21.4s, v5.4s, v2.s[2]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v4.d[1], x23\n"
"fmla v27.4s, v5.4s, v3.s[2]\n"
"ldr d5, [x15, #0x110]\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
- "mov v5.d[1], x22\n"
+ "ldr x23, [x15, #0x148]\n"
"fmla v16.4s, v6.4s, v1.s[2]\n"
- "ldr x22, [x15, #0x158]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v5.d[1], x22\n"
"fmla v28.4s, v6.4s, v3.s[2]\n"
"ldr d6, [x15, #0x120]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x21\n"
+ "ldr x22, [x15, #0x158]\n"
"fmla v17.4s, v7.4s, v1.s[2]\n"
- "ldr x21, [x15, #0x168]\n"
"fmla v23.4s, v7.4s, v2.s[2]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "mov v6.d[1], x21\n"
"fmla v29.4s, v7.4s, v3.s[2]\n"
"ldr d7, [x15, #0x130]\n"
- "mov v7.d[1], x20\n"
"fmla v12.4s, v4.4s, v0.s[2]\n"
+ "ldr x21, [x15, #0x168]\n"
"fmla v18.4s, v4.4s, v1.s[2]\n"
- "ldr x20, [x15, #0x178]\n"
+ "mov v7.d[1], x20\n"
"fmla v24.4s, v4.4s, v2.s[2]\n"
"fmla v30.4s, v4.4s, v3.s[2]\n"
"ldr d4, [x15, #0x140]\n"
"fmla v13.4s, v5.4s, v0.s[2]\n"
- "mov v4.d[1], x23\n"
+ "ldr x20, [x15, #0x178]\n"
"fmla v19.4s, v5.4s, v1.s[2]\n"
"fmla v25.4s, v5.4s, v2.s[2]\n"
+ "mov v4.d[1], x23\n"
"fmla v31.4s, v5.4s, v3.s[2]\n"
"ldr d5, [x15, #0x150]\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
- "mov v5.d[1], x22\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
"fmla v20.4s, v6.4s, v2.s[3]\n"
+ "mov v5.d[1], x22\n"
"fmla v26.4s, v6.4s, v3.s[3]\n"
"ldr d6, [x15, #0x160]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x21\n"
"fmla v15.4s, v7.4s, v1.s[3]\n"
"fmla v21.4s, v7.4s, v2.s[3]\n"
+ "mov v6.d[1], x21\n"
"fmla v27.4s, v7.4s, v3.s[3]\n"
"ldr d7, [x15, #0x170]\n"
- "mov v7.d[1], x20\n"
"add x15, x15, #0x180\n"
"fmla v10.4s, v4.4s, v0.s[3]\n"
- "ldr x23, [x15, #0x8]\n"
"fmla v16.4s, v4.4s, v1.s[3]\n"
- "ldr x22, [x15, #0x18]\n"
+ "ldr x23, [x15, #0x8]\n"
+ "mov v7.d[1], x20\n"
"fmla v22.4s, v4.4s, v2.s[3]\n"
- "ldr x21, [x15, #0x28]\n"
"fmla v28.4s, v4.4s, v3.s[3]\n"
"ldr d4, [x15, #0x0]\n"
"fmla v11.4s, v5.4s, v0.s[3]\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr x22, [x15, #0x18]\n"
"fmla v17.4s, v5.4s, v1.s[3]\n"
- "mov v4.d[1], x23\n"
+ "ldr x21, [x15, #0x28]\n"
"fmla v23.4s, v5.4s, v2.s[3]\n"
+ "ldr x20, [x15, #0x38]\n"
"fmla v29.4s, v5.4s, v3.s[3]\n"
"ldr d5, [x15, #0x10]\n"
"fmla v12.4s, v6.4s, v0.s[3]\n"
- "mov v5.d[1], x22\n"
+ "mov v4.d[1], x23\n"
"fmla v18.4s, v6.4s, v1.s[3]\n"
"fmla v24.4s, v6.4s, v2.s[3]\n"
+ "mov v5.d[1], x22\n"
"fmla v30.4s, v6.4s, v3.s[3]\n"
"ldr d6, [x15, #0x20]\n"
"fmla v13.4s, v7.4s, v0.s[3]\n"
@@ -2500,12 +2503,12 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"ldr s5, [x9], #0x4\n"
"ldr s4, [x28], #0x4\n"
"ldr q1, [x15, #0x0]\n"
- "fmla v8.4s, v1.4s, v7.s[0]\n"
"ldr q0, [x15, #0x10]\n"
- "fmla v14.4s, v1.4s, v6.s[0]\n"
"ldr q3, [x15, #0x20]\n"
- "fmla v20.4s, v1.4s, v5.s[0]\n"
"ldr q2, [x15, #0x30]\n"
+ "fmla v8.4s, v1.4s, v7.s[0]\n"
+ "fmla v14.4s, v1.4s, v6.s[0]\n"
+ "fmla v20.4s, v1.4s, v5.s[0]\n"
"fmla v26.4s, v1.4s, v4.s[0]\n"
"ldr q1, [x15, #0x40]\n"
"fmla v9.4s, v0.4s, v7.s[0]\n"
@@ -2537,42 +2540,42 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"cmp x13, x20\n"
"bne 142b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x14, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x14, #0x0]\n"
+ "add x24, x14, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 150f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v0.4s\n"
- "fmin v9.4s, v9.4s, v0.4s\n"
- "fmin v10.4s, v10.4s, v0.4s\n"
- "fmin v11.4s, v11.4s, v0.4s\n"
- "fmin v12.4s, v12.4s, v0.4s\n"
- "fmin v13.4s, v13.4s, v0.4s\n"
- "fmin v14.4s, v14.4s, v0.4s\n"
- "fmin v15.4s, v15.4s, v0.4s\n"
- "fmin v16.4s, v16.4s, v0.4s\n"
- "fmin v17.4s, v17.4s, v0.4s\n"
- "fmin v18.4s, v18.4s, v0.4s\n"
- "fmin v19.4s, v19.4s, v0.4s\n"
- "fmin v20.4s, v20.4s, v0.4s\n"
- "fmin v21.4s, v21.4s, v0.4s\n"
- "fmin v22.4s, v22.4s, v0.4s\n"
- "fmin v23.4s, v23.4s, v0.4s\n"
- "fmin v24.4s, v24.4s, v0.4s\n"
- "fmin v25.4s, v25.4s, v0.4s\n"
- "fmin v26.4s, v26.4s, v0.4s\n"
- "fmin v27.4s, v27.4s, v0.4s\n"
- "fmin v28.4s, v28.4s, v0.4s\n"
- "fmin v29.4s, v29.4s, v0.4s\n"
- "fmin v30.4s, v30.4s, v0.4s\n"
- "fmin v31.4s, v31.4s, v0.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v1.4s\n"
+ "fmin v28.4s, v28.4s, v1.4s\n"
+ "fmin v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v1.4s\n"
+ "fmin v31.4s, v31.4s, v1.4s\n"
"fmax v8.4s, v8.4s, v0.4s\n"
"fmax v9.4s, v9.4s, v0.4s\n"
"fmax v10.4s, v10.4s, v0.4s\n"
@@ -2605,153 +2608,153 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"st1 { v9.4s }, [x14], #0x10\n"
"st1 { v10.4s }, [x14], #0x10\n"
"st1 { v11.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v27.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v27.4s }, [x22], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x22], #0x10\n"
"tbz x16, #2, 152f\n"
"st1 { v12.4s }, [x14], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v30.4s }, [x22], #0x10\n"
"tbz x16, #1, 151f\n"
"str d13, [x14], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x16, #0, 162f\n"
"st1 { v13.s }[2], [x14]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 162f\n"
"151:" // Height 4: Partial direct writeback: partial_1_20
"tbz x16, #0, 162f\n"
"str s13, [x14, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"b 162f\n"
"152:" // Height 4: Partial direct writeback: partial_2_16
"tbz x16, #1, 153f\n"
"str d12, [x14], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x16, #0, 162f\n"
"st1 { v12.s }[2], [x14]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "st1 { v30.s }[2], [x22]\n"
"b 162f\n"
"153:" // Height 4: Partial direct writeback: partial_1_16
"tbz x16, #0, 162f\n"
"str s12, [x14, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
+ "str s30, [x22, #0x0]\n"
"b 162f\n"
"154:" // Height 4: Partial direct writeback: partial_8_0
"tbz x16, #3, 158f\n"
"st1 { v8.4s }, [x14], #0x10\n"
"st1 { v9.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v27.4s }, [x22], #0x10\n"
"tbz x16, #2, 156f\n"
"st1 { v10.4s }, [x14], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
"tbz x16, #1, 155f\n"
"str d11, [x14], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x16, #0, 162f\n"
"st1 { v11.s }[2], [x14]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
"b 162f\n"
"155:" // Height 4: Partial direct writeback: partial_1_12
"tbz x16, #0, 162f\n"
"str s11, [x14, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
"b 162f\n"
"156:" // Height 4: Partial direct writeback: partial_2_8
"tbz x16, #1, 157f\n"
"str d10, [x14], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x16, #0, 162f\n"
"st1 { v10.s }[2], [x14]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v28.s }[2], [x22]\n"
"b 162f\n"
"157:" // Height 4: Partial direct writeback: partial_1_8
"tbz x16, #0, 162f\n"
"str s10, [x14, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s28, [x22, #0x0]\n"
"b 162f\n"
"158:" // Height 4: Partial direct writeback: partial_4_0
"tbz x16, #2, 160f\n"
"st1 { v8.4s }, [x14], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
"tbz x16, #1, 159f\n"
"str d9, [x14], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d27, [x22], #0x8\n"
"tbz x16, #0, 162f\n"
"st1 { v9.s }[2], [x14]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v27.s }[2], [x22]\n"
"b 162f\n"
"159:" // Height 4: Partial direct writeback: partial_1_4
"tbz x16, #0, 162f\n"
"str s9, [x14, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s27, [x22, #0x0]\n"
"b 162f\n"
"160:" // Height 4: Partial direct writeback: partial_2_0
"tbz x16, #1, 161f\n"
"str d8, [x14], #0x8\n"
- "str d14, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d26, [x22], #0x8\n"
"tbz x16, #0, 162f\n"
"st1 { v8.s }[2], [x14]\n"
- "st1 { v14.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v26.s }[2], [x22]\n"
"b 162f\n"
"161:" // Height 4: Partial direct writeback: partial_1_0
"str s8, [x14, #0x0]\n"
- "str s14, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s26, [x22, #0x0]\n"
"162:" // Height 4: Partial direct writeback: Done
"b 164f\n"
"163:" // Height 4: Full writeback
@@ -2762,24 +2765,24 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"str q12, [x14, #0x40]\n"
"str q13, [x14, #0x50]\n"
"add x14, x14, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
- "str q26, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q28, [x21, #0x20]\n"
- "str q29, [x21, #0x30]\n"
- "str q30, [x21, #0x40]\n"
- "str q31, [x21, #0x50]\n"
+ "str q14, [x24, #0x0]\n"
+ "str q15, [x24, #0x10]\n"
+ "str q16, [x24, #0x20]\n"
+ "str q17, [x24, #0x30]\n"
+ "str q18, [x24, #0x40]\n"
+ "str q19, [x24, #0x50]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q22, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
+ "str q26, [x22, #0x0]\n"
+ "str q27, [x22, #0x10]\n"
+ "str q28, [x22, #0x20]\n"
+ "str q29, [x22, #0x30]\n"
+ "str q30, [x22, #0x40]\n"
+ "str q31, [x22, #0x50]\n"
"164:" // Height 4: Writeback done
"subs x16, x16, #0x18\n"
"bgt 125b\n"
@@ -2795,8 +2798,8 @@ void a64_hybrid_fp32_mla_4x24_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"166:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp
index dbd45460e8..38acdd5054 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp32_mla_4x24 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp32_mla_4x24 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -98,10 +100,10 @@ void a64_hybrid_fp32_mla_4x24 (
"cmp %x[M], #0x2\n"
"bgt 83f\n"
"beq 42f\n"
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x10, 3f\n"
"ldr q8, [x10, #0x0]\n"
@@ -221,8 +223,8 @@ void a64_hybrid_fp32_mla_4x24 (
"mov x26, #0x0\n"
"19:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -252,10 +254,14 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q17, [x28, #0x60]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"ldr q16, [x28, #0x70]\n"
+ "sub x25, x25, #0x4\n"
+ "add x24, x24, #0x10\n"
"fmla v12.4s, v19.4s, v0.s[0]\n"
"ldr q19, [x28, #0x80]\n"
"fmla v13.4s, v18.4s, v0.s[0]\n"
"ldr q18, [x28, #0x90]\n"
+ "cmp x25, #0x8\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"ldr q17, [x28, #0xa0]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
@@ -284,20 +290,16 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q17, [x28, #0x160]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr q16, [x28, #0x170]\n"
- "sub x25, x25, #0x4\n"
- "add x24, x24, #0x10\n"
- "fmla v10.4s, v19.4s, v0.s[3]\n"
- "fmla v11.4s, v18.4s, v0.s[3]\n"
- "cmp x25, #0x8\n"
"add x28, x28, #0x180\n"
+ "fmla v10.4s, v19.4s, v0.s[3]\n"
"ldr q4, [x28, #0x0]\n"
+ "fmla v11.4s, v18.4s, v0.s[3]\n"
"ldr q5, [x28, #0x10]\n"
"fmla v12.4s, v17.4s, v0.s[3]\n"
"ldr q6, [x28, #0x20]\n"
"fmla v13.4s, v16.4s, v0.s[3]\n"
"ldr q0, [x24, #0x0]\n"
"ldr q7, [x28, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"bge 22b\n"
"23:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v4.4s, v0.s[0]\n"
@@ -308,10 +310,13 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q17, [x28, #0x60]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
"ldr q16, [x28, #0x70]\n"
+ "add x24, x24, #0x10\n"
+ "sub x25, x25, #0x4\n"
"fmla v12.4s, v19.4s, v0.s[0]\n"
"ldr q19, [x28, #0x80]\n"
"fmla v13.4s, v18.4s, v0.s[0]\n"
"ldr q18, [x28, #0x90]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"ldr q17, [x28, #0xa0]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
@@ -340,32 +345,29 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q17, [x28, #0x160]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr q16, [x28, #0x170]\n"
- "add x24, x24, #0x10\n"
- "sub x25, x25, #0x4\n"
+ "add x28, x28, #0x180\n"
"fmla v10.4s, v19.4s, v0.s[3]\n"
"fmla v11.4s, v18.4s, v0.s[3]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "add x28, x28, #0x180\n"
"fmla v12.4s, v17.4s, v0.s[3]\n"
"fmla v13.4s, v16.4s, v0.s[3]\n"
"24:" // Height 1: Multiply loop: Main loop skip
"cbz x25, 26f\n"
"25:" // Height 1: Multiply loop: Odd block loop
- "ldr s18, [x24], #0x4\n"
- "ldr q16, [x28, #0x0]\n"
- "fmla v8.4s, v16.4s, v18.s[0]\n"
+ "ldr s20, [x24], #0x4\n"
+ "ldr q17, [x28, #0x0]\n"
"sub x25, x25, #0x1\n"
- "ldr q17, [x28, #0x10]\n"
- "ldr q16, [x28, #0x20]\n"
- "fmla v9.4s, v17.4s, v18.s[0]\n"
- "fmla v10.4s, v16.4s, v18.s[0]\n"
- "ldr q17, [x28, #0x30]\n"
- "ldr q16, [x28, #0x40]\n"
- "fmla v11.4s, v17.4s, v18.s[0]\n"
- "fmla v12.4s, v16.4s, v18.s[0]\n"
+ "ldr q16, [x28, #0x10]\n"
+ "ldr q19, [x28, #0x20]\n"
+ "ldr q18, [x28, #0x30]\n"
+ "fmla v8.4s, v17.4s, v20.s[0]\n"
+ "ldr q17, [x28, #0x40]\n"
+ "fmla v9.4s, v16.4s, v20.s[0]\n"
"ldr q16, [x28, #0x50]\n"
- "fmla v13.4s, v16.4s, v18.s[0]\n"
"add x28, x28, #0x60\n"
+ "fmla v10.4s, v19.4s, v20.s[0]\n"
+ "fmla v11.4s, v18.4s, v20.s[0]\n"
+ "fmla v12.4s, v17.4s, v20.s[0]\n"
+ "fmla v13.4s, v16.4s, v20.s[0]\n"
"cbnz x25, 25b\n"
"26:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -374,9 +376,9 @@ void a64_hybrid_fp32_mla_4x24 (
"bne 19b\n"
"prfm pstl1keep, [x27, #0x0]\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v17.4s\n"
"fmin v9.4s, v9.4s, v17.4s\n"
@@ -479,141 +481,141 @@ void a64_hybrid_fp32_mla_4x24 (
"bgt 2b\n"
"b 166f\n"
"42:" // Height 2
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"43:" // Height 2: Column loop
"cbz x10, 44f\n"
"ldr q8, [x10, #0x0]\n"
"ldr q9, [x10, #0x10]\n"
- "mov v14.16b, v8.16b\n"
- "mov v15.16b, v9.16b\n"
"ldr q10, [x10, #0x20]\n"
"ldr q11, [x10, #0x30]\n"
- "mov v16.16b, v10.16b\n"
- "mov v17.16b, v11.16b\n"
"ldr q12, [x10, #0x40]\n"
"ldr q13, [x10, #0x50]\n"
+ "add x10, x10, #0x60\n"
+ "mov v14.16b, v8.16b\n"
+ "mov v15.16b, v9.16b\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v17.16b, v11.16b\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
- "add x10, x10, #0x60\n"
"b 59f\n"
"44:" // Height 2: no bias
"tbz %x[flags], #0, 58f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x23, x27, x20, LSL #2\n"
+ "add x24, x27, x20, LSL #2\n"
"bge 57f\n"
"tbz x9, #4, 48f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
"tbz x9, #2, 46f\n"
"ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
"tbz x9, #1, 45f\n"
"ldr d13, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
"tbz x9, #0, 56f\n"
"ld1 { v13.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x24]\n"
"b 56f\n"
"45:" // Height 2: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x9, #0, 56f\n"
"ldr s13, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
"b 56f\n"
"46:" // Height 2: Partial accumulate: partial_2_16
"tbz x9, #1, 47f\n"
"ldr d12, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
"tbz x9, #0, 56f\n"
"ld1 { v12.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x24]\n"
"b 56f\n"
"47:" // Height 2: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x9, #0, 56f\n"
"ldr s12, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
"b 56f\n"
"48:" // Height 2: Partial accumulate: partial_8_0
"tbz x9, #3, 52f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"tbz x9, #2, 50f\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"tbz x9, #1, 49f\n"
"ldr d11, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
"tbz x9, #0, 56f\n"
"ld1 { v11.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x24]\n"
"b 56f\n"
"49:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x9, #0, 56f\n"
"ldr s11, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
"b 56f\n"
"50:" // Height 2: Partial accumulate: partial_2_8
"tbz x9, #1, 51f\n"
"ldr d10, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
"tbz x9, #0, 56f\n"
"ld1 { v10.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x24]\n"
"b 56f\n"
"51:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x9, #0, 56f\n"
"ldr s10, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
"b 56f\n"
"52:" // Height 2: Partial accumulate: partial_4_0
"tbz x9, #2, 54f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x9, #1, 53f\n"
"ldr d9, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
"tbz x9, #0, 56f\n"
"ld1 { v9.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 56f\n"
"53:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x9, #0, 56f\n"
"ldr s9, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 56f\n"
"54:" // Height 2: Partial accumulate: partial_2_0
"tbz x9, #1, 55f\n"
"ldr d8, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
"tbz x9, #0, 56f\n"
"ld1 { v8.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 56f\n"
"55:" // Height 2: Partial accumulate: partial_1_0
"ldr s8, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
"56:" // Height 2: Partial accumulate: Done
"sub x27, x27, x20\n"
@@ -625,12 +627,12 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q11, [x27, #0x30]\n"
"ldr q12, [x27, #0x40]\n"
"ldr q13, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
"b 59f\n"
"58:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -649,8 +651,8 @@ void a64_hybrid_fp32_mla_4x24 (
"mov x26, #0x0\n"
"60:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -841,19 +843,19 @@ void a64_hybrid_fp32_mla_4x24 (
"sub x25, x25, #0x1\n"
"ldr q21, [x28, #0x0]\n"
"ldr q20, [x28, #0x10]\n"
- "fmla v8.4s, v21.4s, v25.s[0]\n"
- "fmla v14.4s, v21.4s, v24.s[0]\n"
"ldr q23, [x28, #0x20]\n"
"ldr q22, [x28, #0x30]\n"
+ "fmla v8.4s, v21.4s, v25.s[0]\n"
+ "fmla v14.4s, v21.4s, v24.s[0]\n"
+ "ldr q21, [x28, #0x40]\n"
"fmla v9.4s, v20.4s, v25.s[0]\n"
"fmla v15.4s, v20.4s, v24.s[0]\n"
- "ldr q21, [x28, #0x40]\n"
"ldr q20, [x28, #0x50]\n"
+ "add x28, x28, #0x60\n"
"fmla v10.4s, v23.4s, v25.s[0]\n"
"fmla v16.4s, v23.4s, v24.s[0]\n"
"fmla v11.4s, v22.4s, v25.s[0]\n"
"fmla v17.4s, v22.4s, v24.s[0]\n"
- "add x28, x28, #0x60\n"
"fmla v12.4s, v21.4s, v25.s[0]\n"
"fmla v18.4s, v21.4s, v24.s[0]\n"
"fmla v13.4s, v20.4s, v25.s[0]\n"
@@ -865,13 +867,13 @@ void a64_hybrid_fp32_mla_4x24 (
"cmp x26, x20\n"
"bne 60b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add x24, x27, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 68f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v21.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v21.4s\n"
"fmin v9.4s, v9.4s, v21.4s\n"
@@ -905,99 +907,99 @@ void a64_hybrid_fp32_mla_4x24 (
"st1 { v9.4s }, [x27], #0x10\n"
"st1 { v10.4s }, [x27], #0x10\n"
"st1 { v11.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
"tbz x9, #2, 70f\n"
"st1 { v12.4s }, [x27], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
"tbz x9, #1, 69f\n"
"str d13, [x27], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d19, [x24], #0x8\n"
"tbz x9, #0, 80f\n"
"st1 { v13.s }[2], [x27]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x24]\n"
"b 80f\n"
"69:" // Height 2: Partial direct writeback: partial_1_20
"tbz x9, #0, 80f\n"
"str s13, [x27, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
"b 80f\n"
"70:" // Height 2: Partial direct writeback: partial_2_16
"tbz x9, #1, 71f\n"
"str d12, [x27], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d18, [x24], #0x8\n"
"tbz x9, #0, 80f\n"
"st1 { v12.s }[2], [x27]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x24]\n"
"b 80f\n"
"71:" // Height 2: Partial direct writeback: partial_1_16
"tbz x9, #0, 80f\n"
"str s12, [x27, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
"b 80f\n"
"72:" // Height 2: Partial direct writeback: partial_8_0
"tbz x9, #3, 76f\n"
"st1 { v8.4s }, [x27], #0x10\n"
"st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
"tbz x9, #2, 74f\n"
"st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
"tbz x9, #1, 73f\n"
"str d11, [x27], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d17, [x24], #0x8\n"
"tbz x9, #0, 80f\n"
"st1 { v11.s }[2], [x27]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x24]\n"
"b 80f\n"
"73:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 80f\n"
"str s11, [x27, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
"b 80f\n"
"74:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 75f\n"
"str d10, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d16, [x24], #0x8\n"
"tbz x9, #0, 80f\n"
"st1 { v10.s }[2], [x27]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x24]\n"
"b 80f\n"
"75:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 80f\n"
"str s10, [x27, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
"b 80f\n"
"76:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 78f\n"
"st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
"tbz x9, #1, 77f\n"
"str d9, [x27], #0x8\n"
- "str d15, [x23], #0x8\n"
+ "str d15, [x24], #0x8\n"
"tbz x9, #0, 80f\n"
"st1 { v9.s }[2], [x27]\n"
- "st1 { v15.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x24]\n"
"b 80f\n"
"77:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 80f\n"
"str s9, [x27, #0x0]\n"
- "str s15, [x23, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
"b 80f\n"
"78:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 79f\n"
"str d8, [x27], #0x8\n"
- "str d14, [x23], #0x8\n"
+ "str d14, [x24], #0x8\n"
"tbz x9, #0, 80f\n"
"st1 { v8.s }[2], [x27]\n"
- "st1 { v14.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x24]\n"
"b 80f\n"
"79:" // Height 2: Partial direct writeback: partial_1_0
"str s8, [x27, #0x0]\n"
- "str s14, [x23, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
"80:" // Height 2: Partial direct writeback: Done
"b 82f\n"
"81:" // Height 2: Full writeback
@@ -1008,38 +1010,38 @@ void a64_hybrid_fp32_mla_4x24 (
"str q12, [x27, #0x40]\n"
"str q13, [x27, #0x50]\n"
"add x27, x27, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
+ "str q14, [x24, #0x0]\n"
+ "str q15, [x24, #0x10]\n"
+ "str q16, [x24, #0x20]\n"
+ "str q17, [x24, #0x30]\n"
+ "str q18, [x24, #0x40]\n"
+ "str q19, [x24, #0x50]\n"
"82:" // Height 2: Writeback done
"subs x9, x9, #0x18\n"
"bgt 43b\n"
"b 166f\n"
"83:" // Height 3
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"84:" // Height 3: Column loop
"cbz x10, 85f\n"
"ldr q8, [x10, #0x0]\n"
"ldr q9, [x10, #0x10]\n"
- "mov v14.16b, v8.16b\n"
- "mov v15.16b, v9.16b\n"
"ldr q10, [x10, #0x20]\n"
"ldr q11, [x10, #0x30]\n"
- "mov v16.16b, v10.16b\n"
- "mov v17.16b, v11.16b\n"
"ldr q12, [x10, #0x40]\n"
"ldr q13, [x10, #0x50]\n"
+ "add x10, x10, #0x60\n"
+ "mov v14.16b, v8.16b\n"
+ "mov v15.16b, v9.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v17.16b, v11.16b\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
- "add x10, x10, #0x60\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
"mov v24.16b, v12.16b\n"
@@ -1048,147 +1050,147 @@ void a64_hybrid_fp32_mla_4x24 (
"85:" // Height 3: no bias
"tbz %x[flags], #0, 99f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
"cmp x9, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x27, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"bge 98f\n"
"tbz x9, #4, 89f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
"tbz x9, #2, 87f\n"
"ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x9, #1, 86f\n"
"ldr d13, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x9, #0, 97f\n"
"ld1 { v13.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 97f\n"
"86:" // Height 3: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x9, #0, 97f\n"
"ldr s13, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 97f\n"
"87:" // Height 3: Partial accumulate: partial_2_16
"tbz x9, #1, 88f\n"
"ldr d12, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x9, #0, 97f\n"
"ld1 { v12.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 97f\n"
"88:" // Height 3: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x9, #0, 97f\n"
"ldr s12, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"b 97f\n"
"89:" // Height 3: Partial accumulate: partial_8_0
"tbz x9, #3, 93f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"tbz x9, #2, 91f\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"tbz x9, #1, 90f\n"
"ldr d11, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
- "ldr d23, [x22], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
"tbz x9, #0, 97f\n"
"ld1 { v11.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
"b 97f\n"
"90:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x9, #0, 97f\n"
"ldr s11, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial accumulate: partial_2_8
"tbz x9, #1, 92f\n"
"ldr d10, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
"tbz x9, #0, 97f\n"
"ld1 { v10.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
"b 97f\n"
"92:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x9, #0, 97f\n"
"ldr s10, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial accumulate: partial_4_0
"tbz x9, #2, 95f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
"tbz x9, #1, 94f\n"
"ldr d9, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
- "ldr d21, [x22], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
"tbz x9, #0, 97f\n"
"ld1 { v9.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
"b 97f\n"
"94:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x9, #0, 97f\n"
"ldr s9, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial accumulate: partial_2_0
"tbz x9, #1, 96f\n"
"ldr d8, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
- "ldr d20, [x22], #0x8\n"
+ "ldr d20, [x23], #0x8\n"
"tbz x9, #0, 97f\n"
"ld1 { v8.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v20.s }[2], [x23]\n"
"b 97f\n"
"96:" // Height 3: Partial accumulate: partial_1_0
"ldr s8, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s20, [x22, #0x0]\n"
+ "ldr s20, [x23, #0x0]\n"
"97:" // Height 3: Partial accumulate: Done
"sub x27, x27, x20\n"
"b 100f\n"
@@ -1199,18 +1201,18 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q11, [x27, #0x30]\n"
"ldr q12, [x27, #0x40]\n"
"ldr q13, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x22, #0x40]\n"
- "ldr q25, [x22, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q20, [x23, #0x0]\n"
+ "ldr q21, [x23, #0x10]\n"
+ "ldr q22, [x23, #0x20]\n"
+ "ldr q23, [x23, #0x30]\n"
+ "ldr q24, [x23, #0x40]\n"
+ "ldr q25, [x23, #0x50]\n"
"b 100f\n"
"99:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -1235,8 +1237,8 @@ void a64_hybrid_fp32_mla_4x24 (
"mov x26, #0x0\n"
"101:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 102f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1387,10 +1389,10 @@ void a64_hybrid_fp32_mla_4x24 (
"fmla v15.4s, v5.4s, v1.s[0]\n"
"fmla v21.4s, v5.4s, v2.s[0]\n"
"ldr q28, [x28, #0x50]\n"
- "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v16.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x25, x25, #0x4\n"
"prfm pldl1keep, [x23, #0x80]\n"
"fmla v22.4s, v6.4s, v2.s[0]\n"
"ldr q27, [x28, #0x60]\n"
@@ -1484,20 +1486,20 @@ void a64_hybrid_fp32_mla_4x24 (
"sub x25, x25, #0x1\n"
"ldr s30, [x22], #0x4\n"
"ldr q27, [x28, #0x0]\n"
- "fmla v8.4s, v27.4s, v0.s[0]\n"
- "fmla v14.4s, v27.4s, v31.s[0]\n"
"ldr q26, [x28, #0x10]\n"
"ldr q29, [x28, #0x20]\n"
- "fmla v20.4s, v27.4s, v30.s[0]\n"
- "fmla v9.4s, v26.4s, v0.s[0]\n"
"ldr q28, [x28, #0x30]\n"
+ "fmla v8.4s, v27.4s, v0.s[0]\n"
+ "fmla v14.4s, v27.4s, v31.s[0]\n"
+ "fmla v20.4s, v27.4s, v30.s[0]\n"
"ldr q27, [x28, #0x40]\n"
+ "fmla v9.4s, v26.4s, v0.s[0]\n"
"fmla v15.4s, v26.4s, v31.s[0]\n"
"fmla v21.4s, v26.4s, v30.s[0]\n"
"ldr q26, [x28, #0x50]\n"
+ "add x28, x28, #0x60\n"
"fmla v10.4s, v29.4s, v0.s[0]\n"
"fmla v16.4s, v29.4s, v31.s[0]\n"
- "add x28, x28, #0x60\n"
"fmla v22.4s, v29.4s, v30.s[0]\n"
"fmla v11.4s, v28.4s, v0.s[0]\n"
"fmla v17.4s, v28.4s, v31.s[0]\n"
@@ -1515,15 +1517,15 @@ void a64_hybrid_fp32_mla_4x24 (
"cmp x26, x20\n"
"bne 101b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x24, x27, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 109f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v27.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v27.4s }, [x21]\n"
"ld1r { v26.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v27.4s\n"
"fmin v9.4s, v9.4s, v27.4s\n"
@@ -1569,126 +1571,126 @@ void a64_hybrid_fp32_mla_4x24 (
"st1 { v9.4s }, [x27], #0x10\n"
"st1 { v10.4s }, [x27], #0x10\n"
"st1 { v11.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
"tbz x9, #2, 111f\n"
"st1 { v12.4s }, [x27], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x9, #1, 110f\n"
"str d13, [x27], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x9, #0, 121f\n"
"st1 { v13.s }[2], [x27]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 121f\n"
"110:" // Height 3: Partial direct writeback: partial_1_20
"tbz x9, #0, 121f\n"
"str s13, [x27, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 121f\n"
"111:" // Height 3: Partial direct writeback: partial_2_16
"tbz x9, #1, 112f\n"
"str d12, [x27], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x9, #0, 121f\n"
"st1 { v12.s }[2], [x27]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 121f\n"
"112:" // Height 3: Partial direct writeback: partial_1_16
"tbz x9, #0, 121f\n"
"str s12, [x27, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"b 121f\n"
"113:" // Height 3: Partial direct writeback: partial_8_0
"tbz x9, #3, 117f\n"
"st1 { v8.4s }, [x27], #0x10\n"
"st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
"tbz x9, #2, 115f\n"
"st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
"tbz x9, #1, 114f\n"
"str d11, [x27], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
"tbz x9, #0, 121f\n"
"st1 { v11.s }[2], [x27]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
"b 121f\n"
"114:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 121f\n"
"str s11, [x27, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
"b 121f\n"
"115:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 116f\n"
"str d10, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
"tbz x9, #0, 121f\n"
"st1 { v10.s }[2], [x27]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
"b 121f\n"
"116:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 121f\n"
"str s10, [x27, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
"b 121f\n"
"117:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 119f\n"
"st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
"tbz x9, #1, 118f\n"
"str d9, [x27], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
"tbz x9, #0, 121f\n"
"st1 { v9.s }[2], [x27]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
"b 121f\n"
"118:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 121f\n"
"str s9, [x27, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
"b 121f\n"
"119:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 120f\n"
"str d8, [x27], #0x8\n"
- "str d14, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
"tbz x9, #0, 121f\n"
"st1 { v8.s }[2], [x27]\n"
- "st1 { v14.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
"b 121f\n"
"120:" // Height 3: Partial direct writeback: partial_1_0
"str s8, [x27, #0x0]\n"
- "str s14, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
"121:" // Height 3: Partial direct writeback: Done
"b 123f\n"
"122:" // Height 3: Full writeback
@@ -1699,47 +1701,48 @@ void a64_hybrid_fp32_mla_4x24 (
"str q12, [x27, #0x40]\n"
"str q13, [x27, #0x50]\n"
"add x27, x27, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
+ "str q14, [x24, #0x0]\n"
+ "str q15, [x24, #0x10]\n"
+ "str q16, [x24, #0x20]\n"
+ "str q17, [x24, #0x30]\n"
+ "str q18, [x24, #0x40]\n"
+ "str q19, [x24, #0x50]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q22, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
"123:" // Height 3: Writeback done
"subs x9, x9, #0x18\n"
"bgt 84b\n"
"b 166f\n"
"124:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x10\n"
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"125:" // Height 4: Column loop
"cbz x10, 126f\n"
"ldr q8, [x10, #0x0]\n"
"ldr q9, [x10, #0x10]\n"
- "mov v14.16b, v8.16b\n"
- "mov v15.16b, v9.16b\n"
"ldr q10, [x10, #0x20]\n"
"ldr q11, [x10, #0x30]\n"
- "mov v16.16b, v10.16b\n"
- "mov v17.16b, v11.16b\n"
"ldr q12, [x10, #0x40]\n"
"ldr q13, [x10, #0x50]\n"
+ "add x10, x10, #0x60\n"
+ "mov v14.16b, v8.16b\n"
+ "mov v15.16b, v9.16b\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v16.16b, v10.16b\n"
+ "mov v17.16b, v11.16b\n"
"mov v18.16b, v12.16b\n"
"mov v19.16b, v13.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
- "add x10, x10, #0x60\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
"mov v24.16b, v12.16b\n"
@@ -1754,175 +1757,175 @@ void a64_hybrid_fp32_mla_4x24 (
"126:" // Height 4: no bias
"tbz %x[flags], #0, 140f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x9, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x27, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"bge 139f\n"
"tbz x9, #4, 130f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x9, #2, 128f\n"
"ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x9, #1, 127f\n"
"ldr d13, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x9, #0, 138f\n"
"ld1 { v13.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 138f\n"
"127:" // Height 4: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x9, #0, 138f\n"
"ldr s13, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 138f\n"
"128:" // Height 4: Partial accumulate: partial_2_16
"tbz x9, #1, 129f\n"
"ldr d12, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x9, #0, 138f\n"
"ld1 { v12.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 138f\n"
"129:" // Height 4: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x9, #0, 138f\n"
"ldr s12, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 138f\n"
"130:" // Height 4: Partial accumulate: partial_8_0
"tbz x9, #3, 134f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"tbz x9, #2, 132f\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x9, #1, 131f\n"
"ldr d11, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x9, #0, 138f\n"
"ld1 { v11.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 138f\n"
"131:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x9, #0, 138f\n"
"ldr s11, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial accumulate: partial_2_8
"tbz x9, #1, 133f\n"
"ldr d10, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x9, #0, 138f\n"
"ld1 { v10.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 138f\n"
"133:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x9, #0, 138f\n"
"ldr s10, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial accumulate: partial_4_0
"tbz x9, #2, 136f\n"
"ld1 { v8.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"tbz x9, #1, 135f\n"
"ldr d9, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
"tbz x9, #0, 138f\n"
"ld1 { v9.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v27.s }[2], [x22]\n"
"b 138f\n"
"135:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x9, #0, 138f\n"
"ldr s9, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
+ "ldr s27, [x22, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial accumulate: partial_2_0
"tbz x9, #1, 137f\n"
"ldr d8, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
+ "ldr d20, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
"tbz x9, #0, 138f\n"
"ld1 { v8.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v20.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v26.s }[2], [x22]\n"
"b 138f\n"
"137:" // Height 4: Partial accumulate: partial_1_0
"ldr s8, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s20, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "ldr s20, [x23, #0x0]\n"
+ "ldr s26, [x22, #0x0]\n"
"138:" // Height 4: Partial accumulate: Done
"sub x27, x27, x20\n"
"b 141f\n"
@@ -1933,24 +1936,24 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr q11, [x27, #0x30]\n"
"ldr q12, [x27, #0x40]\n"
"ldr q13, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q20, [x22, #0x0]\n"
- "ldr q21, [x22, #0x10]\n"
- "ldr q22, [x22, #0x20]\n"
- "ldr q23, [x22, #0x30]\n"
- "ldr q24, [x22, #0x40]\n"
- "ldr q25, [x22, #0x50]\n"
- "ldr q26, [x21, #0x0]\n"
- "ldr q27, [x21, #0x10]\n"
- "ldr q28, [x21, #0x20]\n"
- "ldr q29, [x21, #0x30]\n"
- "ldr q30, [x21, #0x40]\n"
- "ldr q31, [x21, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q20, [x23, #0x0]\n"
+ "ldr q21, [x23, #0x10]\n"
+ "ldr q22, [x23, #0x20]\n"
+ "ldr q23, [x23, #0x30]\n"
+ "ldr q24, [x23, #0x40]\n"
+ "ldr q25, [x23, #0x50]\n"
+ "ldr q26, [x22, #0x0]\n"
+ "ldr q27, [x22, #0x10]\n"
+ "ldr q28, [x22, #0x20]\n"
+ "ldr q29, [x22, #0x30]\n"
+ "ldr q30, [x22, #0x40]\n"
+ "ldr q31, [x22, #0x50]\n"
"b 141f\n"
"140:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1981,8 +1984,8 @@ void a64_hybrid_fp32_mla_4x24 (
"mov x26, #0x0\n"
"142:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 143f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2289,10 +2292,10 @@ void a64_hybrid_fp32_mla_4x24 (
"ldr s4, [x21], #0x4\n"
"ldr q1, [x28, #0x0]\n"
"ldr q0, [x28, #0x10]\n"
- "fmla v8.4s, v1.4s, v7.s[0]\n"
- "fmla v14.4s, v1.4s, v6.s[0]\n"
"ldr q3, [x28, #0x20]\n"
"ldr q2, [x28, #0x30]\n"
+ "fmla v8.4s, v1.4s, v7.s[0]\n"
+ "fmla v14.4s, v1.4s, v6.s[0]\n"
"fmla v20.4s, v1.4s, v5.s[0]\n"
"fmla v26.4s, v1.4s, v4.s[0]\n"
"ldr q1, [x28, #0x40]\n"
@@ -2325,17 +2328,17 @@ void a64_hybrid_fp32_mla_4x24 (
"cmp x26, x20\n"
"bne 142b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x27, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 150f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
@@ -2393,153 +2396,153 @@ void a64_hybrid_fp32_mla_4x24 (
"st1 { v9.4s }, [x27], #0x10\n"
"st1 { v10.4s }, [x27], #0x10\n"
"st1 { v11.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v27.4s }, [x21], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v27.4s }, [x22], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x22], #0x10\n"
"tbz x9, #2, 152f\n"
"st1 { v12.4s }, [x27], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v30.4s }, [x22], #0x10\n"
"tbz x9, #1, 151f\n"
"str d13, [x27], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x9, #0, 162f\n"
"st1 { v13.s }[2], [x27]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 162f\n"
"151:" // Height 4: Partial direct writeback: partial_1_20
"tbz x9, #0, 162f\n"
"str s13, [x27, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"b 162f\n"
"152:" // Height 4: Partial direct writeback: partial_2_16
"tbz x9, #1, 153f\n"
"str d12, [x27], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x9, #0, 162f\n"
"st1 { v12.s }[2], [x27]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "st1 { v30.s }[2], [x22]\n"
"b 162f\n"
"153:" // Height 4: Partial direct writeback: partial_1_16
"tbz x9, #0, 162f\n"
"str s12, [x27, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
+ "str s30, [x22, #0x0]\n"
"b 162f\n"
"154:" // Height 4: Partial direct writeback: partial_8_0
"tbz x9, #3, 158f\n"
"st1 { v8.4s }, [x27], #0x10\n"
"st1 { v9.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v15.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
- "st1 { v27.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v27.4s }, [x22], #0x10\n"
"tbz x9, #2, 156f\n"
"st1 { v10.4s }, [x27], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
"tbz x9, #1, 155f\n"
"str d11, [x27], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x9, #0, 162f\n"
"st1 { v11.s }[2], [x27]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
"b 162f\n"
"155:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 162f\n"
"str s11, [x27, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
"b 162f\n"
"156:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 157f\n"
"str d10, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x9, #0, 162f\n"
"st1 { v10.s }[2], [x27]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v28.s }[2], [x22]\n"
"b 162f\n"
"157:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 162f\n"
"str s10, [x27, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s28, [x22, #0x0]\n"
"b 162f\n"
"158:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 160f\n"
"st1 { v8.4s }, [x27], #0x10\n"
- "st1 { v14.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
"tbz x9, #1, 159f\n"
"str d9, [x27], #0x8\n"
- "str d15, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d27, [x22], #0x8\n"
"tbz x9, #0, 162f\n"
"st1 { v9.s }[2], [x27]\n"
- "st1 { v15.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v27.s }[2], [x22]\n"
"b 162f\n"
"159:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 162f\n"
"str s9, [x27, #0x0]\n"
- "str s15, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s27, [x22, #0x0]\n"
"b 162f\n"
"160:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 161f\n"
"str d8, [x27], #0x8\n"
- "str d14, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d26, [x22], #0x8\n"
"tbz x9, #0, 162f\n"
"st1 { v8.s }[2], [x27]\n"
- "st1 { v14.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v26.s }[2], [x22]\n"
"b 162f\n"
"161:" // Height 4: Partial direct writeback: partial_1_0
"str s8, [x27, #0x0]\n"
- "str s14, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s26, [x22, #0x0]\n"
"162:" // Height 4: Partial direct writeback: Done
"b 164f\n"
"163:" // Height 4: Full writeback
@@ -2550,24 +2553,24 @@ void a64_hybrid_fp32_mla_4x24 (
"str q12, [x27, #0x40]\n"
"str q13, [x27, #0x50]\n"
"add x27, x27, #0x60\n"
- "str q14, [x23, #0x0]\n"
- "str q15, [x23, #0x10]\n"
- "str q16, [x23, #0x20]\n"
- "str q17, [x23, #0x30]\n"
- "str q18, [x23, #0x40]\n"
- "str q19, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
- "str q26, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q28, [x21, #0x20]\n"
- "str q29, [x21, #0x30]\n"
- "str q30, [x21, #0x40]\n"
- "str q31, [x21, #0x50]\n"
+ "str q14, [x24, #0x0]\n"
+ "str q15, [x24, #0x10]\n"
+ "str q16, [x24, #0x20]\n"
+ "str q17, [x24, #0x30]\n"
+ "str q18, [x24, #0x40]\n"
+ "str q19, [x24, #0x50]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q22, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
+ "str q26, [x22, #0x0]\n"
+ "str q27, [x22, #0x10]\n"
+ "str q28, [x22, #0x20]\n"
+ "str q29, [x22, #0x30]\n"
+ "str q30, [x22, #0x40]\n"
+ "str q31, [x22, #0x50]\n"
"164:" // Height 4: Writeback done
"subs x9, x9, #0x18\n"
"bgt 125b\n"
@@ -2583,8 +2586,8 @@ void a64_hybrid_fp32_mla_4x24 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"166:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp
index 7f85d2dd42..d47ab34e03 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp
@@ -71,7 +71,8 @@ public:
return true;
}
- StdTransformsFixedTRB<rhs_operand_type, result_type, 6, 16, 1> transforms = {};
+ StdTransformsFixedTRB<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 1> transforms = {};
+
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp
index ddbc840829..a315a2fe4f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp32_mla_6x16_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp32_mla_6x16_a55 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -101,10 +103,10 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 67f\n"
"beq 34f\n"
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x7, 3f\n"
"ldr q8, [x7, #0x0]\n"
@@ -187,8 +189,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"mov x15, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -210,86 +212,89 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"18:" // Height 1: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr d17, [x17, #0x20]\n"
- "ldr x20, [x17, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"ldr d16, [x17, #0x30]\n"
- "mov v17.d[1], x20\n"
+ "add x13, x13, #0x10\n"
"ldr x20, [x17, #0x38]\n"
+ "sub x14, x14, #0x4\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "ldr x22, [x13, #0x8]\n"
+ "cmp x14, #0x8\n"
"mov v16.d[1], x20\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"ldr d17, [x17, #0x40]\n"
- "ldr x20, [x17, #0x48]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
"ldr d16, [x17, #0x50]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x58]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v16.d[1], x20\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"ldr d17, [x17, #0x60]\n"
- "ldr x20, [x17, #0x68]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
"ldr d16, [x17, #0x70]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x78]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
"mov v16.d[1], x20\n"
"fmla v10.4s, v17.4s, v0.s[1]\n"
"ldr d17, [x17, #0x80]\n"
- "ldr x20, [x17, #0x88]\n"
"fmla v11.4s, v16.4s, v0.s[1]\n"
"ldr d16, [x17, #0x90]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0x98]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v16.d[1], x20\n"
"fmla v8.4s, v17.4s, v0.s[2]\n"
"ldr d17, [x17, #0xa0]\n"
- "ldr x20, [x17, #0xa8]\n"
"fmla v9.4s, v16.4s, v0.s[2]\n"
"ldr d16, [x17, #0xb0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0xb8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v16.d[1], x20\n"
"fmla v10.4s, v17.4s, v0.s[2]\n"
"ldr d17, [x17, #0xc0]\n"
- "ldr x20, [x17, #0xc8]\n"
"fmla v11.4s, v16.4s, v0.s[2]\n"
"ldr d16, [x17, #0xd0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0xd8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v16.d[1], x20\n"
"fmla v8.4s, v17.4s, v0.s[3]\n"
"ldr d17, [x17, #0xe0]\n"
- "ldr x20, [x17, #0xe8]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr d16, [x17, #0xf0]\n"
- "mov v17.d[1], x20\n"
"ldr x20, [x17, #0xf8]\n"
- "mov v16.d[1], x20\n"
- "add x13, x13, #0x10\n"
"add x17, x17, #0x100\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v16.d[1], x20\n"
"fmla v10.4s, v17.4s, v0.s[3]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x20, [x17, #0x8]\n"
"fmla v11.4s, v16.4s, v0.s[3]\n"
"ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x4\n"
"ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x8\n"
- "ldr x21, [x13, #0x8]\n"
- "mov v6.d[1], x20\n"
"ldr x20, [x17, #0x18]\n"
- "mov v0.d[1], x21\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q17, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"ldr q16, [x17, #0x30]\n"
+ "add x13, x13, #0x10\n"
+ "sub x14, x14, #0x4\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"ldr q17, [x17, #0x40]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
"ldr q16, [x17, #0x50]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"ldr q17, [x17, #0x60]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
@@ -310,26 +315,23 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr q17, [x17, #0xe0]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr q16, [x17, #0xf0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x4\n"
+ "add x17, x17, #0x100\n"
"fmla v10.4s, v17.4s, v0.s[3]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"fmla v11.4s, v16.4s, v0.s[3]\n"
- "add x17, x17, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
"cbz x14, 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
- "ldr s17, [x13], #0x4\n"
+ "ldr s18, [x13], #0x4\n"
"sub x14, x14, #0x1\n"
- "ldr q16, [x17, #0x0]\n"
- "fmla v8.4s, v16.4s, v17.s[0]\n"
+ "ldr q17, [x17, #0x0]\n"
"ldr q16, [x17, #0x10]\n"
- "fmla v9.4s, v16.4s, v17.s[0]\n"
- "ldr q16, [x17, #0x20]\n"
- "fmla v10.4s, v16.4s, v17.s[0]\n"
+ "fmla v8.4s, v17.4s, v18.s[0]\n"
+ "ldr q17, [x17, #0x20]\n"
+ "fmla v9.4s, v16.4s, v18.s[0]\n"
"ldr q16, [x17, #0x30]\n"
- "fmla v11.4s, v16.4s, v17.s[0]\n"
"add x17, x17, #0x40\n"
+ "fmla v10.4s, v17.4s, v18.s[0]\n"
+ "fmla v11.4s, v16.4s, v18.s[0]\n"
"cbnz x14, 21b\n"
"22:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -338,14 +340,14 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"bne 15b\n"
"prfm pstl1keep, [x16, #0x0]\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v16.4s\n"
- "fmin v9.4s, v9.4s, v16.4s\n"
- "fmin v10.4s, v10.4s, v16.4s\n"
- "fmin v11.4s, v11.4s, v16.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v17.4s\n"
+ "fmin v9.4s, v9.4s, v17.4s\n"
+ "fmin v10.4s, v10.4s, v17.4s\n"
+ "fmin v11.4s, v11.4s, v17.4s\n"
"fmax v8.4s, v8.4s, v16.4s\n"
"fmax v9.4s, v9.4s, v16.4s\n"
"fmax v10.4s, v10.4s, v16.4s\n"
@@ -410,96 +412,96 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"bgt 2b\n"
"b 200f\n"
"34:" // Height 2
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"35:" // Height 2: Column loop
"cbz x7, 36f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
- "mov v15.16b, v11.16b\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"b 47f\n"
"36:" // Height 2: no bias
"tbz %x[flags], #0, 46f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x25, x16, x20, LSL #2\n"
+ "add x26, x16, x20, LSL #2\n"
"bge 45f\n"
"tbz x8, #3, 40f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"ld1 { v9.4s }, [x16], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x8, #2, 38f\n"
"ld1 { v10.4s }, [x16], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
"tbz x8, #1, 37f\n"
"ldr d11, [x16], #0x8\n"
"mov x20, #0x38\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"tbz x8, #0, 44f\n"
"ld1 { v11.s }[2], [x16]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v15.s }[2], [x26]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x8, #0, 44f\n"
"ldr s11, [x16, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
"tbz x8, #1, 39f\n"
"ldr d10, [x16], #0x8\n"
"mov x20, #0x28\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"tbz x8, #0, 44f\n"
"ld1 { v10.s }[2], [x16]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v14.s }[2], [x26]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x8, #0, 44f\n"
"ldr s10, [x16, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
"tbz x8, #2, 42f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x8, #1, 41f\n"
"ldr d9, [x16], #0x8\n"
"mov x20, #0x18\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"tbz x8, #0, 44f\n"
"ld1 { v9.s }[2], [x16]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x8, #0, 44f\n"
"ldr s9, [x16, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
"tbz x8, #1, 43f\n"
"ldr d8, [x16], #0x8\n"
"mov x20, #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"tbz x8, #0, 44f\n"
"ld1 { v8.s }[2], [x16]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
"ldr s8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 47f\n"
@@ -508,10 +510,10 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"b 47f\n"
"46:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -526,8 +528,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"mov x15, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -552,98 +554,98 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr x21, [x17, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
"ldr d17, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "ldr x20, [x17, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr d16, [x17, #0x30]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x48]\n"
+ "add x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
+ "mov v16.d[1], x21\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
- "mov v16.d[1], x20\n"
"fmla v14.4s, v17.4s, v1.s[0]\n"
"ldr d17, [x17, #0x40]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
- "ldr x20, [x17, #0x48]\n"
+ "ldr x21, [x17, #0x58]\n"
"fmla v15.4s, v16.4s, v1.s[0]\n"
"ldr d16, [x17, #0x50]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x58]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x68]\n"
+ "ldr x23, [x13, #0x8]\n"
+ "sub x14, x14, #0x4\n"
+ "mov v16.d[1], x21\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
- "ldr x21, [x17, #0x68]\n"
"fmla v12.4s, v17.4s, v1.s[1]\n"
"ldr d17, [x17, #0x60]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "ldr x21, [x17, #0x78]\n"
"fmla v13.4s, v16.4s, v1.s[1]\n"
"ldr d16, [x17, #0x70]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x88]\n"
+ "ldr x22, [x12, #0x8]\n"
+ "cmp x14, #0x8\n"
+ "mov v16.d[1], x21\n"
"fmla v10.4s, v17.4s, v0.s[1]\n"
- "mov v16.d[1], x20\n"
"fmla v14.4s, v17.4s, v1.s[1]\n"
"ldr d17, [x17, #0x80]\n"
"fmla v11.4s, v16.4s, v0.s[1]\n"
- "ldr x20, [x17, #0x88]\n"
+ "ldr x21, [x17, #0x98]\n"
"fmla v15.4s, v16.4s, v1.s[1]\n"
"ldr d16, [x17, #0x90]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0x98]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xa8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.4s, v17.4s, v0.s[2]\n"
- "ldr x21, [x17, #0xa8]\n"
"fmla v12.4s, v17.4s, v1.s[2]\n"
"ldr d17, [x17, #0xa0]\n"
"fmla v9.4s, v16.4s, v0.s[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "ldr x21, [x17, #0xb8]\n"
"fmla v13.4s, v16.4s, v1.s[2]\n"
"ldr d16, [x17, #0xb0]\n"
- "mov v17.d[1], x21\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0xc8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.4s, v17.4s, v0.s[2]\n"
- "mov v16.d[1], x20\n"
"fmla v14.4s, v17.4s, v1.s[2]\n"
"ldr d17, [x17, #0xc0]\n"
"fmla v11.4s, v16.4s, v0.s[2]\n"
- "ldr x20, [x17, #0xc8]\n"
+ "ldr x21, [x17, #0xd8]\n"
"fmla v15.4s, v16.4s, v1.s[2]\n"
"ldr d16, [x17, #0xd0]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x17, #0xd8]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xe8]\n"
+ "mov v16.d[1], x21\n"
"fmla v8.4s, v17.4s, v0.s[3]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v12.4s, v17.4s, v1.s[3]\n"
"ldr d17, [x17, #0xe0]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "ldr x21, [x17, #0xf8]\n"
"fmla v13.4s, v16.4s, v1.s[3]\n"
"ldr d16, [x17, #0xf0]\n"
- "mov v17.d[1], x21\n"
- "add x13, x13, #0x10\n"
- "mov v16.d[1], x20\n"
- "add x12, x12, #0x10\n"
+ "mov v17.d[1], x20\n"
"add x17, x17, #0x100\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v16.d[1], x21\n"
"fmla v10.4s, v17.4s, v0.s[3]\n"
"fmla v14.4s, v17.4s, v1.s[3]\n"
"ldr d6, [x17, #0x0]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v11.4s, v16.4s, v0.s[3]\n"
"ldr d0, [x13, #0x0]\n"
"fmla v15.4s, v16.4s, v1.s[3]\n"
"ldr d1, [x12, #0x0]\n"
- "sub x14, x14, #0x4\n"
"ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x8\n"
- "ldr x20, [x13, #0x8]\n"
- "mov v6.d[1], x21\n"
- "ldr x21, [x12, #0x8]\n"
- "mov v0.d[1], x20\n"
+ "mov v6.d[1], x20\n"
"ldr x20, [x17, #0x18]\n"
- "mov v1.d[1], x21\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v0.d[1], x23\n"
+ "mov v1.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x12, #0x80]\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
@@ -705,8 +707,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"sub x14, x14, #0x1\n"
"ldr s18, [x12], #0x4\n"
"ldr q17, [x17, #0x0]\n"
- "fmla v8.4s, v17.4s, v19.s[0]\n"
"ldr q16, [x17, #0x10]\n"
+ "fmla v8.4s, v17.4s, v19.s[0]\n"
"fmla v12.4s, v17.4s, v18.s[0]\n"
"ldr q17, [x17, #0x20]\n"
"fmla v9.4s, v16.4s, v19.s[0]\n"
@@ -724,22 +726,22 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"cmp x15, x20\n"
"bne 48b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
"prfm pstl1keep, [x16, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x16, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 56f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v16.4s\n"
- "fmin v9.4s, v9.4s, v16.4s\n"
- "fmin v10.4s, v10.4s, v16.4s\n"
- "fmin v11.4s, v11.4s, v16.4s\n"
- "fmin v12.4s, v12.4s, v16.4s\n"
- "fmin v13.4s, v13.4s, v16.4s\n"
- "fmin v14.4s, v14.4s, v16.4s\n"
- "fmin v15.4s, v15.4s, v16.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v17.4s\n"
+ "fmin v9.4s, v9.4s, v17.4s\n"
+ "fmin v10.4s, v10.4s, v17.4s\n"
+ "fmin v11.4s, v11.4s, v17.4s\n"
+ "fmin v12.4s, v12.4s, v17.4s\n"
+ "fmin v13.4s, v13.4s, v17.4s\n"
+ "fmin v14.4s, v14.4s, v17.4s\n"
+ "fmin v15.4s, v15.4s, v17.4s\n"
"fmax v8.4s, v8.4s, v16.4s\n"
"fmax v9.4s, v9.4s, v16.4s\n"
"fmax v10.4s, v10.4s, v16.4s\n"
@@ -754,63 +756,63 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"tbz x8, #3, 60f\n"
"st1 { v8.4s }, [x16], #0x10\n"
"st1 { v9.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
"tbz x8, #2, 58f\n"
"st1 { v10.4s }, [x16], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
"tbz x8, #1, 57f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d15, [x26], #0x8\n"
"tbz x8, #0, 64f\n"
"st1 { v11.s }[2], [x16]\n"
- "st1 { v15.s }[2], [x25]\n"
+ "st1 { v15.s }[2], [x26]\n"
"b 64f\n"
"57:" // Height 2: Partial direct writeback: partial_1_12
"tbz x8, #0, 64f\n"
"str s11, [x16, #0x0]\n"
- "str s15, [x25, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
"b 64f\n"
"58:" // Height 2: Partial direct writeback: partial_2_8
"tbz x8, #1, 59f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d14, [x26], #0x8\n"
"tbz x8, #0, 64f\n"
"st1 { v10.s }[2], [x16]\n"
- "st1 { v14.s }[2], [x25]\n"
+ "st1 { v14.s }[2], [x26]\n"
"b 64f\n"
"59:" // Height 2: Partial direct writeback: partial_1_8
"tbz x8, #0, 64f\n"
"str s10, [x16, #0x0]\n"
- "str s14, [x25, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
"b 64f\n"
"60:" // Height 2: Partial direct writeback: partial_4_0
"tbz x8, #2, 62f\n"
"st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
"tbz x8, #1, 61f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x8, #0, 64f\n"
"st1 { v9.s }[2], [x16]\n"
- "st1 { v13.s }[2], [x25]\n"
+ "st1 { v13.s }[2], [x26]\n"
"b 64f\n"
"61:" // Height 2: Partial direct writeback: partial_1_4
"tbz x8, #0, 64f\n"
"str s9, [x16, #0x0]\n"
- "str s13, [x25, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
"b 64f\n"
"62:" // Height 2: Partial direct writeback: partial_2_0
"tbz x8, #1, 63f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x8, #0, 64f\n"
"st1 { v8.s }[2], [x16]\n"
- "st1 { v12.s }[2], [x25]\n"
+ "st1 { v12.s }[2], [x26]\n"
"b 64f\n"
"63:" // Height 2: Partial direct writeback: partial_1_0
"str s8, [x16, #0x0]\n"
- "str s12, [x25, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
"64:" // Height 2: Partial direct writeback: Done
"b 66f\n"
"65:" // Height 2: Full writeback
@@ -819,31 +821,31 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
"66:" // Height 2: Writeback done
"subs x8, x8, #0x10\n"
"bgt 35b\n"
"b 200f\n"
"67:" // Height 3
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"68:" // Height 3: Column loop
"cbz x7, 69f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -851,94 +853,94 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"69:" // Height 3: no bias
"tbz %x[flags], #0, 79f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 78f\n"
"tbz x8, #3, 73f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"ld1 { v9.4s }, [x16], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"tbz x8, #2, 71f\n"
"ld1 { v10.4s }, [x16], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
"tbz x8, #1, 70f\n"
"ldr d11, [x16], #0x8\n"
"mov x20, #0x38\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x8, #0, 77f\n"
"ld1 { v11.s }[2], [x16]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
"b 77f\n"
"70:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x8, #0, 77f\n"
"ldr s11, [x16, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
"b 77f\n"
"71:" // Height 3: Partial accumulate: partial_2_8
"tbz x8, #1, 72f\n"
"ldr d10, [x16], #0x8\n"
"mov x20, #0x28\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x8, #0, 77f\n"
"ld1 { v10.s }[2], [x16]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
"b 77f\n"
"72:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x8, #0, 77f\n"
"ldr s10, [x16, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
"b 77f\n"
"73:" // Height 3: Partial accumulate: partial_4_0
"tbz x8, #2, 75f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"tbz x8, #1, 74f\n"
"ldr d9, [x16], #0x8\n"
"mov x20, #0x18\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x8, #0, 77f\n"
"ld1 { v9.s }[2], [x16]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
"b 77f\n"
"74:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x8, #0, 77f\n"
"ldr s9, [x16, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
"b 77f\n"
"75:" // Height 3: Partial accumulate: partial_2_0
"tbz x8, #1, 76f\n"
"ldr d8, [x16], #0x8\n"
"mov x20, #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
"tbz x8, #0, 77f\n"
"ld1 { v8.s }[2], [x16]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
"b 77f\n"
"76:" // Height 3: Partial accumulate: partial_1_0
"ldr s8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s12, [x25, #0x0]\n"
- "ldr s16, [x24, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
"77:" // Height 3: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 80f\n"
@@ -947,14 +949,14 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
"b 80f\n"
"79:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -973,8 +975,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"mov x15, #0x0\n"
"81:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 82f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1009,11 +1011,15 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v16.4s, v6.4s, v2.s[0]\n"
"ldr d21, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v21.d[1], x21\n"
+ "add x13, x13, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x12, x12, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"ldr d20, [x17, #0x30]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "add x11, x11, #0x10\n"
+ "ldr x24, [x13, #0x8]\n"
"mov v20.d[1], x20\n"
"fmla v10.4s, v21.4s, v0.s[0]\n"
"fmla v14.4s, v21.4s, v1.s[0]\n"
@@ -1021,11 +1027,15 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v18.4s, v21.4s, v2.s[0]\n"
"ldr d21, [x17, #0x40]\n"
"fmla v11.4s, v20.4s, v0.s[0]\n"
- "mov v21.d[1], x21\n"
+ "ldr x23, [x12, #0x8]\n"
"fmla v15.4s, v20.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x22, [x11, #0x8]\n"
"fmla v19.4s, v20.4s, v2.s[0]\n"
"ldr d20, [x17, #0x50]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "sub x14, x14, #0x4\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v20.d[1], x20\n"
"fmla v8.4s, v21.4s, v0.s[1]\n"
"fmla v12.4s, v21.4s, v1.s[1]\n"
@@ -1033,11 +1043,14 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v16.4s, v21.4s, v2.s[1]\n"
"ldr d21, [x17, #0x60]\n"
"fmla v9.4s, v20.4s, v0.s[1]\n"
- "mov v21.d[1], x21\n"
+ "cmp x14, #0x8\n"
"fmla v13.4s, v20.4s, v1.s[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v17.4s, v20.4s, v2.s[1]\n"
"ldr d20, [x17, #0x70]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"mov v20.d[1], x20\n"
"fmla v10.4s, v21.4s, v0.s[1]\n"
"fmla v14.4s, v21.4s, v1.s[1]\n"
@@ -1045,11 +1058,11 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v18.4s, v21.4s, v2.s[1]\n"
"ldr d21, [x17, #0x80]\n"
"fmla v11.4s, v20.4s, v0.s[1]\n"
- "mov v21.d[1], x21\n"
"fmla v15.4s, v20.4s, v1.s[1]\n"
- "ldr x21, [x17, #0xa8]\n"
"fmla v19.4s, v20.4s, v2.s[1]\n"
"ldr d20, [x17, #0x90]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v20.d[1], x20\n"
"fmla v8.4s, v21.4s, v0.s[2]\n"
"fmla v12.4s, v21.4s, v1.s[2]\n"
@@ -1057,11 +1070,11 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v16.4s, v21.4s, v2.s[2]\n"
"ldr d21, [x17, #0xa0]\n"
"fmla v9.4s, v20.4s, v0.s[2]\n"
- "mov v21.d[1], x21\n"
"fmla v13.4s, v20.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xc8]\n"
"fmla v17.4s, v20.4s, v2.s[2]\n"
"ldr d20, [x17, #0xb0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v20.d[1], x20\n"
"fmla v10.4s, v21.4s, v0.s[2]\n"
"fmla v14.4s, v21.4s, v1.s[2]\n"
@@ -1069,11 +1082,11 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v18.4s, v21.4s, v2.s[2]\n"
"ldr d21, [x17, #0xc0]\n"
"fmla v11.4s, v20.4s, v0.s[2]\n"
- "mov v21.d[1], x21\n"
"fmla v15.4s, v20.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v19.4s, v20.4s, v2.s[2]\n"
"ldr d20, [x17, #0xd0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v20.d[1], x20\n"
"fmla v8.4s, v21.4s, v0.s[3]\n"
"fmla v12.4s, v21.4s, v1.s[3]\n"
@@ -1081,40 +1094,29 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v16.4s, v21.4s, v2.s[3]\n"
"ldr d21, [x17, #0xe0]\n"
"fmla v9.4s, v20.4s, v0.s[3]\n"
- "mov v21.d[1], x21\n"
"fmla v13.4s, v20.4s, v1.s[3]\n"
- "add x13, x13, #0x10\n"
"fmla v17.4s, v20.4s, v2.s[3]\n"
"ldr d20, [x17, #0xf0]\n"
- "mov v20.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
+ "mov v21.d[1], x21\n"
"add x17, x17, #0x100\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v20.d[1], x20\n"
"fmla v10.4s, v21.4s, v0.s[3]\n"
- "ldr x20, [x17, #0x8]\n"
"fmla v14.4s, v21.4s, v1.s[3]\n"
- "ldr x23, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x18]\n"
"fmla v18.4s, v21.4s, v2.s[3]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.4s, v20.4s, v0.s[3]\n"
"ldr d0, [x13, #0x0]\n"
"fmla v15.4s, v20.4s, v1.s[3]\n"
"ldr d1, [x12, #0x0]\n"
- "ldr x22, [x12, #0x8]\n"
"fmla v19.4s, v20.4s, v2.s[3]\n"
"ldr d2, [x11, #0x0]\n"
- "sub x14, x14, #0x4\n"
"ldr d7, [x17, #0x10]\n"
- "cmp x14, #0x8\n"
- "ldr x21, [x11, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x17, #0x18]\n"
- "mov v0.d[1], x23\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x22\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- "mov v2.d[1], x21\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x24\n"
+ "mov v1.d[1], x23\n"
+ "mov v2.d[1], x22\n"
"mov v7.d[1], x20\n"
"bge 84b\n"
"85:" // Height 3: Multiply loop: Single iteration only
@@ -1196,8 +1198,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr s23, [x12], #0x4\n"
"ldr s22, [x11], #0x4\n"
"ldr q21, [x17, #0x0]\n"
- "fmla v8.4s, v21.4s, v24.s[0]\n"
"ldr q20, [x17, #0x10]\n"
+ "fmla v8.4s, v21.4s, v24.s[0]\n"
"fmla v12.4s, v21.4s, v23.s[0]\n"
"fmla v16.4s, v21.4s, v22.s[0]\n"
"ldr q21, [x17, #0x20]\n"
@@ -1219,28 +1221,28 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"cmp x15, x20\n"
"bne 81b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 89f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v20.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v20.4s\n"
- "fmin v9.4s, v9.4s, v20.4s\n"
- "fmin v10.4s, v10.4s, v20.4s\n"
- "fmin v11.4s, v11.4s, v20.4s\n"
- "fmin v12.4s, v12.4s, v20.4s\n"
- "fmin v13.4s, v13.4s, v20.4s\n"
- "fmin v14.4s, v14.4s, v20.4s\n"
- "fmin v15.4s, v15.4s, v20.4s\n"
- "fmin v16.4s, v16.4s, v20.4s\n"
- "fmin v17.4s, v17.4s, v20.4s\n"
- "fmin v18.4s, v18.4s, v20.4s\n"
- "fmin v19.4s, v19.4s, v20.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v21.4s\n"
+ "fmin v9.4s, v9.4s, v21.4s\n"
+ "fmin v10.4s, v10.4s, v21.4s\n"
+ "fmin v11.4s, v11.4s, v21.4s\n"
+ "fmin v12.4s, v12.4s, v21.4s\n"
+ "fmin v13.4s, v13.4s, v21.4s\n"
+ "fmin v14.4s, v14.4s, v21.4s\n"
+ "fmin v15.4s, v15.4s, v21.4s\n"
+ "fmin v16.4s, v16.4s, v21.4s\n"
+ "fmin v17.4s, v17.4s, v21.4s\n"
+ "fmin v18.4s, v18.4s, v21.4s\n"
+ "fmin v19.4s, v19.4s, v21.4s\n"
"fmax v8.4s, v8.4s, v20.4s\n"
"fmax v9.4s, v9.4s, v20.4s\n"
"fmax v10.4s, v10.4s, v20.4s\n"
@@ -1259,79 +1261,79 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"tbz x8, #3, 93f\n"
"st1 { v8.4s }, [x16], #0x10\n"
"st1 { v9.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
"tbz x8, #2, 91f\n"
"st1 { v10.4s }, [x16], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
"tbz x8, #1, 90f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x8, #0, 97f\n"
"st1 { v11.s }[2], [x16]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
"b 97f\n"
"90:" // Height 3: Partial direct writeback: partial_1_12
"tbz x8, #0, 97f\n"
"str s11, [x16, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial direct writeback: partial_2_8
"tbz x8, #1, 92f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x8, #0, 97f\n"
"st1 { v10.s }[2], [x16]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
"b 97f\n"
"92:" // Height 3: Partial direct writeback: partial_1_8
"tbz x8, #0, 97f\n"
"str s10, [x16, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial direct writeback: partial_4_0
"tbz x8, #2, 95f\n"
"st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
"tbz x8, #1, 94f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x8, #0, 97f\n"
"st1 { v9.s }[2], [x16]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
"b 97f\n"
"94:" // Height 3: Partial direct writeback: partial_1_4
"tbz x8, #0, 97f\n"
"str s9, [x16, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial direct writeback: partial_2_0
"tbz x8, #1, 96f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x8, #0, 97f\n"
"st1 { v8.s }[2], [x16]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
"b 97f\n"
"96:" // Height 3: Partial direct writeback: partial_1_0
"str s8, [x16, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
"97:" // Height 3: Partial direct writeback: Done
"b 99f\n"
"98:" // Height 3: Full writeback
@@ -1340,35 +1342,35 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"99:" // Height 3: Writeback done
"subs x8, x8, #0x10\n"
"bgt 68b\n"
"b 200f\n"
"100:" // Height 4
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"101:" // Height 4: Column loop
"cbz x7, 102f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -1380,111 +1382,111 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"102:" // Height 4: no bias
"tbz %x[flags], #0, 112f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 111f\n"
"tbz x8, #3, 106f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x16], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
"tbz x8, #2, 104f\n"
"ld1 { v10.4s }, [x16], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
"tbz x8, #1, 103f\n"
"ldr d11, [x16], #0x8\n"
"mov x20, #0x38\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x8, #0, 110f\n"
"ld1 { v11.s }[2], [x16]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
"b 110f\n"
"103:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x8, #0, 110f\n"
"ldr s11, [x16, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
"b 110f\n"
"104:" // Height 4: Partial accumulate: partial_2_8
"tbz x8, #1, 105f\n"
"ldr d10, [x16], #0x8\n"
"mov x20, #0x28\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x8, #0, 110f\n"
"ld1 { v10.s }[2], [x16]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
"b 110f\n"
"105:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x8, #0, 110f\n"
"ldr s10, [x16, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
"b 110f\n"
"106:" // Height 4: Partial accumulate: partial_4_0
"tbz x8, #2, 108f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"tbz x8, #1, 107f\n"
"ldr d9, [x16], #0x8\n"
"mov x20, #0x18\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x8, #0, 110f\n"
"ld1 { v9.s }[2], [x16]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
"b 110f\n"
"107:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x8, #0, 110f\n"
"ldr s9, [x16, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
"b 110f\n"
"108:" // Height 4: Partial accumulate: partial_2_0
"tbz x8, #1, 109f\n"
"ldr d8, [x16], #0x8\n"
"mov x20, #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x8, #0, 110f\n"
"ld1 { v8.s }[2], [x16]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
"b 110f\n"
"109:" // Height 4: Partial accumulate: partial_1_0
"ldr s8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s12, [x25, #0x0]\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
"110:" // Height 4: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 113f\n"
@@ -1493,18 +1495,18 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"b 113f\n"
"112:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1527,8 +1529,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"mov x15, #0x0\n"
"114:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1561,122 +1563,121 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"blt 118f\n"
"117:" // Height 4: Multiply loop: Main loop head
"fmla v8.4s, v6.4s, v0.s[0]\n"
- "ldr x21, [x17, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
"fmla v12.4s, v6.4s, v1.s[0]\n"
- "ldr x20, [x17, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
"fmla v16.4s, v6.4s, v2.s[0]\n"
"add x13, x13, #0x10\n"
"fmla v20.4s, v6.4s, v3.s[0]\n"
"ldr d25, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v25.d[1], x21\n"
+ "add x12, x12, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x11, x11, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x12, x12, #0x10\n"
+ "mov v25.d[1], x20\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"ldr d24, [x17, #0x30]\n"
- "mov v24.d[1], x20\n"
"fmla v10.4s, v25.4s, v0.s[0]\n"
+ "ldr x20, [x17, #0x48]\n"
"fmla v14.4s, v25.4s, v1.s[0]\n"
- "ldr x20, [x17, #0x58]\n"
+ "add x10, x10, #0x10\n"
+ "mov v24.d[1], x21\n"
"fmla v18.4s, v25.4s, v2.s[0]\n"
- "add x11, x11, #0x10\n"
"fmla v22.4s, v25.4s, v3.s[0]\n"
"ldr d25, [x17, #0x40]\n"
"fmla v11.4s, v24.4s, v0.s[0]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x58]\n"
"fmla v15.4s, v24.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x25, [x13, #0x8]\n"
"fmla v19.4s, v24.4s, v2.s[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v25.d[1], x20\n"
"fmla v23.4s, v24.4s, v3.s[0]\n"
"ldr d24, [x17, #0x50]\n"
- "mov v24.d[1], x20\n"
"fmla v8.4s, v25.4s, v0.s[1]\n"
+ "ldr x20, [x17, #0x68]\n"
"fmla v12.4s, v25.4s, v1.s[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "ldr x24, [x12, #0x8]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.4s, v25.4s, v2.s[1]\n"
- "ldr x25, [x13, #0x8]\n"
"fmla v20.4s, v25.4s, v3.s[1]\n"
"ldr d25, [x17, #0x60]\n"
"fmla v9.4s, v24.4s, v0.s[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x78]\n"
"fmla v13.4s, v24.4s, v1.s[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "ldr x23, [x11, #0x8]\n"
"fmla v17.4s, v24.4s, v2.s[1]\n"
- "ldr x24, [x12, #0x8]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.4s, v24.4s, v3.s[1]\n"
"ldr d24, [x17, #0x70]\n"
- "mov v24.d[1], x20\n"
"fmla v10.4s, v25.4s, v0.s[1]\n"
+ "ldr x20, [x17, #0x88]\n"
"fmla v14.4s, v25.4s, v1.s[1]\n"
- "ldr x20, [x17, #0x98]\n"
+ "ldr x22, [x10, #0x8]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.4s, v25.4s, v2.s[1]\n"
- "ldr x23, [x11, #0x8]\n"
"fmla v22.4s, v25.4s, v3.s[1]\n"
"ldr d25, [x17, #0x80]\n"
"fmla v11.4s, v24.4s, v0.s[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x98]\n"
"fmla v15.4s, v24.4s, v1.s[1]\n"
- "ldr x21, [x17, #0xa8]\n"
+ "sub x14, x14, #0x4\n"
"fmla v19.4s, v24.4s, v2.s[1]\n"
- "ldr x22, [x10, #0x8]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.4s, v24.4s, v3.s[1]\n"
"ldr d24, [x17, #0x90]\n"
- "mov v24.d[1], x20\n"
"fmla v8.4s, v25.4s, v0.s[2]\n"
+ "ldr x20, [x17, #0xa8]\n"
"fmla v12.4s, v25.4s, v1.s[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "cmp x14, #0x8\n"
+ "mov v24.d[1], x21\n"
"fmla v16.4s, v25.4s, v2.s[2]\n"
- "sub x14, x14, #0x4\n"
"fmla v20.4s, v25.4s, v3.s[2]\n"
"ldr d25, [x17, #0xa0]\n"
"fmla v9.4s, v24.4s, v0.s[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xb8]\n"
"fmla v13.4s, v24.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xc8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"fmla v17.4s, v24.4s, v2.s[2]\n"
- "cmp x14, #0x8\n"
+ "mov v25.d[1], x20\n"
"fmla v21.4s, v24.4s, v3.s[2]\n"
"ldr d24, [x17, #0xb0]\n"
- "mov v24.d[1], x20\n"
"fmla v10.4s, v25.4s, v0.s[2]\n"
+ "ldr x20, [x17, #0xc8]\n"
"fmla v14.4s, v25.4s, v1.s[2]\n"
- "ldr x20, [x17, #0xd8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.4s, v25.4s, v2.s[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"fmla v22.4s, v25.4s, v3.s[2]\n"
"ldr d25, [x17, #0xc0]\n"
"fmla v11.4s, v24.4s, v0.s[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xd8]\n"
"fmla v15.4s, v24.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xe8]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"fmla v19.4s, v24.4s, v2.s[2]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v25.d[1], x20\n"
"fmla v23.4s, v24.4s, v3.s[2]\n"
"ldr d24, [x17, #0xd0]\n"
- "mov v24.d[1], x20\n"
"fmla v8.4s, v25.4s, v0.s[3]\n"
+ "ldr x20, [x17, #0xe8]\n"
"fmla v12.4s, v25.4s, v1.s[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v24.d[1], x21\n"
"fmla v16.4s, v25.4s, v2.s[3]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
"fmla v20.4s, v25.4s, v3.s[3]\n"
"ldr d25, [x17, #0xe0]\n"
"fmla v9.4s, v24.4s, v0.s[3]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xf8]\n"
"fmla v13.4s, v24.4s, v1.s[3]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
"fmla v17.4s, v24.4s, v2.s[3]\n"
+ "mov v25.d[1], x20\n"
"fmla v21.4s, v24.4s, v3.s[3]\n"
"ldr d24, [x17, #0xf0]\n"
- "mov v24.d[1], x20\n"
"add x17, x17, #0x100\n"
"fmla v10.4s, v25.4s, v0.s[3]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v14.4s, v25.4s, v1.s[3]\n"
- "ldr x20, [x17, #0x18]\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v24.d[1], x21\n"
"fmla v18.4s, v25.4s, v2.s[3]\n"
"fmla v22.4s, v25.4s, v3.s[3]\n"
"ldr d6, [x17, #0x0]\n"
@@ -1689,7 +1690,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v23.4s, v24.4s, v3.s[3]\n"
"ldr d3, [x10, #0x0]\n"
"ldr d7, [x17, #0x10]\n"
- "mov v6.d[1], x21\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x17, #0x18]\n"
"mov v0.d[1], x25\n"
"mov v1.d[1], x24\n"
"mov v2.d[1], x23\n"
@@ -1794,8 +1796,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr s27, [x11], #0x4\n"
"ldr s26, [x10], #0x4\n"
"ldr q25, [x17, #0x0]\n"
- "fmla v8.4s, v25.4s, v29.s[0]\n"
"ldr q24, [x17, #0x10]\n"
+ "fmla v8.4s, v25.4s, v29.s[0]\n"
"fmla v12.4s, v25.4s, v28.s[0]\n"
"fmla v16.4s, v25.4s, v27.s[0]\n"
"fmla v20.4s, v25.4s, v26.s[0]\n"
@@ -1821,34 +1823,34 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"cmp x15, x20\n"
"bne 114b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v24.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v24.4s\n"
- "fmin v9.4s, v9.4s, v24.4s\n"
- "fmin v10.4s, v10.4s, v24.4s\n"
- "fmin v11.4s, v11.4s, v24.4s\n"
- "fmin v12.4s, v12.4s, v24.4s\n"
- "fmin v13.4s, v13.4s, v24.4s\n"
- "fmin v14.4s, v14.4s, v24.4s\n"
- "fmin v15.4s, v15.4s, v24.4s\n"
- "fmin v16.4s, v16.4s, v24.4s\n"
- "fmin v17.4s, v17.4s, v24.4s\n"
- "fmin v18.4s, v18.4s, v24.4s\n"
- "fmin v19.4s, v19.4s, v24.4s\n"
- "fmin v20.4s, v20.4s, v24.4s\n"
- "fmin v21.4s, v21.4s, v24.4s\n"
- "fmin v22.4s, v22.4s, v24.4s\n"
- "fmin v23.4s, v23.4s, v24.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v25.4s }, [x21]\n"
"ld1r { v24.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v25.4s\n"
+ "fmin v9.4s, v9.4s, v25.4s\n"
+ "fmin v10.4s, v10.4s, v25.4s\n"
+ "fmin v11.4s, v11.4s, v25.4s\n"
+ "fmin v12.4s, v12.4s, v25.4s\n"
+ "fmin v13.4s, v13.4s, v25.4s\n"
+ "fmin v14.4s, v14.4s, v25.4s\n"
+ "fmin v15.4s, v15.4s, v25.4s\n"
+ "fmin v16.4s, v16.4s, v25.4s\n"
+ "fmin v17.4s, v17.4s, v25.4s\n"
+ "fmin v18.4s, v18.4s, v25.4s\n"
+ "fmin v19.4s, v19.4s, v25.4s\n"
+ "fmin v20.4s, v20.4s, v25.4s\n"
+ "fmin v21.4s, v21.4s, v25.4s\n"
+ "fmin v22.4s, v22.4s, v25.4s\n"
+ "fmin v23.4s, v23.4s, v25.4s\n"
"fmax v8.4s, v8.4s, v24.4s\n"
"fmax v9.4s, v9.4s, v24.4s\n"
"fmax v10.4s, v10.4s, v24.4s\n"
@@ -1871,95 +1873,95 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"tbz x8, #3, 126f\n"
"st1 { v8.4s }, [x16], #0x10\n"
"st1 { v9.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
"tbz x8, #2, 124f\n"
"st1 { v10.4s }, [x16], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
"tbz x8, #1, 123f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
"tbz x8, #0, 130f\n"
"st1 { v11.s }[2], [x16]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
"b 130f\n"
"123:" // Height 4: Partial direct writeback: partial_1_12
"tbz x8, #0, 130f\n"
"str s11, [x16, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
"b 130f\n"
"124:" // Height 4: Partial direct writeback: partial_2_8
"tbz x8, #1, 125f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
"tbz x8, #0, 130f\n"
"st1 { v10.s }[2], [x16]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
"b 130f\n"
"125:" // Height 4: Partial direct writeback: partial_1_8
"tbz x8, #0, 130f\n"
"str s10, [x16, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
"b 130f\n"
"126:" // Height 4: Partial direct writeback: partial_4_0
"tbz x8, #2, 128f\n"
"st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
"tbz x8, #1, 127f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
"tbz x8, #0, 130f\n"
"st1 { v9.s }[2], [x16]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
"b 130f\n"
"127:" // Height 4: Partial direct writeback: partial_1_4
"tbz x8, #0, 130f\n"
"str s9, [x16, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
"b 130f\n"
"128:" // Height 4: Partial direct writeback: partial_2_0
"tbz x8, #1, 129f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x8, #0, 130f\n"
"st1 { v8.s }[2], [x16]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
"b 130f\n"
"129:" // Height 4: Partial direct writeback: partial_1_0
"str s8, [x16, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
"130:" // Height 4: Partial direct writeback: Done
"b 132f\n"
"131:" // Height 4: Full writeback
@@ -1968,39 +1970,39 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
"132:" // Height 4: Writeback done
"subs x8, x8, #0x10\n"
"bgt 101b\n"
"b 200f\n"
"133:" // Height 5
- "mov x7, %x[bias]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"134:" // Height 5: Column loop
"cbz x7, 135f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -2016,128 +2018,128 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"135:" // Height 5: no bias
"tbz %x[flags], #0, 145f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
+ "cmp x8, #0x10\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
"bge 144f\n"
"tbz x8, #3, 139f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x16], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x8, #2, 137f\n"
"ld1 { v10.4s }, [x16], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
"tbz x8, #1, 136f\n"
"ldr d11, [x16], #0x8\n"
"mov x20, #0x38\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x8, #0, 143f\n"
"ld1 { v11.s }[2], [x16]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
"b 143f\n"
"136:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x8, #0, 143f\n"
"ldr s11, [x16, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
"b 143f\n"
"137:" // Height 5: Partial accumulate: partial_2_8
"tbz x8, #1, 138f\n"
"ldr d10, [x16], #0x8\n"
"mov x20, #0x28\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x8, #0, 143f\n"
"ld1 { v10.s }[2], [x16]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
"b 143f\n"
"138:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x8, #0, 143f\n"
"ldr s10, [x16, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
"b 143f\n"
"139:" // Height 5: Partial accumulate: partial_4_0
"tbz x8, #2, 141f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x8, #1, 140f\n"
"ldr d9, [x16], #0x8\n"
"mov x20, #0x18\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x8, #0, 143f\n"
"ld1 { v9.s }[2], [x16]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 143f\n"
"140:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x8, #0, 143f\n"
"ldr s9, [x16, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 143f\n"
"141:" // Height 5: Partial accumulate: partial_2_0
"tbz x8, #1, 142f\n"
"ldr d8, [x16], #0x8\n"
"mov x20, #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x8, #0, 143f\n"
"ld1 { v8.s }[2], [x16]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 143f\n"
"142:" // Height 5: Partial accumulate: partial_1_0
"ldr s8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s12, [x25, #0x0]\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"143:" // Height 5: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 146f\n"
@@ -2146,22 +2148,22 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
"b 146f\n"
"145:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -2188,8 +2190,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"mov x15, #0x0\n"
"147:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 148f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2236,131 +2238,131 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v24.4s, v6.4s, v4.s[0]\n"
"ldr d29, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v29.d[1], x21\n"
+ "add x11, x11, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x10, x10, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x11, x11, #0x10\n"
+ "mov v29.d[1], x21\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"ldr d28, [x17, #0x30]\n"
- "mov v28.d[1], x20\n"
"fmla v10.4s, v29.4s, v0.s[0]\n"
+ "add x9, x9, #0x10\n"
"fmla v14.4s, v29.4s, v1.s[0]\n"
- "ldr x20, [x17, #0x58]\n"
+ "ldr x26, [x13, #0x8]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.4s, v29.4s, v2.s[0]\n"
- "add x9, x9, #0x10\n"
"fmla v22.4s, v29.4s, v3.s[0]\n"
- "ldr x26, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
"fmla v26.4s, v29.4s, v4.s[0]\n"
"ldr d29, [x17, #0x40]\n"
"fmla v11.4s, v28.4s, v0.s[0]\n"
- "mov v29.d[1], x21\n"
+ "ldr x25, [x12, #0x8]\n"
"fmla v15.4s, v28.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x24, [x11, #0x8]\n"
"fmla v19.4s, v28.4s, v2.s[0]\n"
- "ldr x25, [x12, #0x8]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.4s, v28.4s, v3.s[0]\n"
- "ldr x24, [x11, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
"fmla v27.4s, v28.4s, v4.s[0]\n"
"ldr d28, [x17, #0x50]\n"
- "mov v28.d[1], x20\n"
"fmla v8.4s, v29.4s, v0.s[1]\n"
+ "ldr x23, [x10, #0x8]\n"
"fmla v12.4s, v29.4s, v1.s[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "ldr x22, [x9, #0x8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.4s, v29.4s, v2.s[1]\n"
- "ldr x23, [x10, #0x8]\n"
"fmla v20.4s, v29.4s, v3.s[1]\n"
- "ldr x22, [x9, #0x8]\n"
+ "ldr x20, [x17, #0x78]\n"
"fmla v24.4s, v29.4s, v4.s[1]\n"
"ldr d29, [x17, #0x60]\n"
"fmla v9.4s, v28.4s, v0.s[1]\n"
- "mov v29.d[1], x21\n"
+ "sub x14, x14, #0x4\n"
"fmla v13.4s, v28.4s, v1.s[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "cmp x14, #0x8\n"
"fmla v17.4s, v28.4s, v2.s[1]\n"
- "sub x14, x14, #0x4\n"
+ "mov v29.d[1], x21\n"
"fmla v21.4s, v28.4s, v3.s[1]\n"
- "cmp x14, #0x8\n"
+ "ldr x21, [x17, #0x88]\n"
"fmla v25.4s, v28.4s, v4.s[1]\n"
"ldr d28, [x17, #0x70]\n"
- "mov v28.d[1], x20\n"
"fmla v10.4s, v29.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"fmla v14.4s, v29.4s, v1.s[1]\n"
- "ldr x20, [x17, #0x98]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.4s, v29.4s, v2.s[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"fmla v22.4s, v29.4s, v3.s[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
"fmla v26.4s, v29.4s, v4.s[1]\n"
"ldr d29, [x17, #0x80]\n"
"fmla v11.4s, v28.4s, v0.s[1]\n"
- "mov v29.d[1], x21\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"fmla v15.4s, v28.4s, v1.s[1]\n"
- "ldr x21, [x17, #0xa8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"fmla v19.4s, v28.4s, v2.s[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.4s, v28.4s, v3.s[1]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "ldr x21, [x17, #0xa8]\n"
"fmla v27.4s, v28.4s, v4.s[1]\n"
"ldr d28, [x17, #0x90]\n"
- "mov v28.d[1], x20\n"
"fmla v8.4s, v29.4s, v0.s[2]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"fmla v12.4s, v29.4s, v1.s[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.4s, v29.4s, v2.s[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
"fmla v20.4s, v29.4s, v3.s[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
"fmla v24.4s, v29.4s, v4.s[2]\n"
"ldr d29, [x17, #0xa0]\n"
"fmla v9.4s, v28.4s, v0.s[2]\n"
- "mov v29.d[1], x21\n"
"fmla v13.4s, v28.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xc8]\n"
"fmla v17.4s, v28.4s, v2.s[2]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.4s, v28.4s, v3.s[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
"fmla v25.4s, v28.4s, v4.s[2]\n"
"ldr d28, [x17, #0xb0]\n"
- "mov v28.d[1], x20\n"
"fmla v10.4s, v29.4s, v0.s[2]\n"
"fmla v14.4s, v29.4s, v1.s[2]\n"
- "ldr x20, [x17, #0xd8]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.4s, v29.4s, v2.s[2]\n"
"fmla v22.4s, v29.4s, v3.s[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
"fmla v26.4s, v29.4s, v4.s[2]\n"
"ldr d29, [x17, #0xc0]\n"
"fmla v11.4s, v28.4s, v0.s[2]\n"
- "mov v29.d[1], x21\n"
"fmla v15.4s, v28.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v19.4s, v28.4s, v2.s[2]\n"
+ "mov v29.d[1], x21\n"
"fmla v23.4s, v28.4s, v3.s[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
"fmla v27.4s, v28.4s, v4.s[2]\n"
"ldr d28, [x17, #0xd0]\n"
- "mov v28.d[1], x20\n"
"fmla v8.4s, v29.4s, v0.s[3]\n"
"fmla v12.4s, v29.4s, v1.s[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "mov v28.d[1], x20\n"
"fmla v16.4s, v29.4s, v2.s[3]\n"
"fmla v20.4s, v29.4s, v3.s[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
"fmla v24.4s, v29.4s, v4.s[3]\n"
"ldr d29, [x17, #0xe0]\n"
"fmla v9.4s, v28.4s, v0.s[3]\n"
- "mov v29.d[1], x21\n"
"fmla v13.4s, v28.4s, v1.s[3]\n"
"fmla v17.4s, v28.4s, v2.s[3]\n"
+ "mov v29.d[1], x21\n"
"fmla v21.4s, v28.4s, v3.s[3]\n"
"fmla v25.4s, v28.4s, v4.s[3]\n"
"ldr d28, [x17, #0xf0]\n"
- "mov v28.d[1], x20\n"
"add x17, x17, #0x100\n"
"fmla v10.4s, v29.4s, v0.s[3]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v14.4s, v29.4s, v1.s[3]\n"
- "ldr x20, [x17, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v28.d[1], x20\n"
"fmla v18.4s, v29.4s, v2.s[3]\n"
"fmla v22.4s, v29.4s, v3.s[3]\n"
+ "ldr x20, [x17, #0x18]\n"
"fmla v26.4s, v29.4s, v4.s[3]\n"
"ldr d6, [x17, #0x0]\n"
"fmla v11.4s, v28.4s, v0.s[3]\n"
@@ -2499,8 +2501,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr s31, [x10], #0x4\n"
"ldr s30, [x9], #0x4\n"
"ldr q29, [x17, #0x0]\n"
- "fmla v8.4s, v29.4s, v2.s[0]\n"
"ldr q28, [x17, #0x10]\n"
+ "fmla v8.4s, v29.4s, v2.s[0]\n"
"fmla v12.4s, v29.4s, v1.s[0]\n"
"fmla v16.4s, v29.4s, v0.s[0]\n"
"fmla v20.4s, v29.4s, v31.s[0]\n"
@@ -2530,40 +2532,40 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"cmp x15, x20\n"
"bne 147b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 155f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v28.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v28.4s\n"
- "fmin v9.4s, v9.4s, v28.4s\n"
- "fmin v10.4s, v10.4s, v28.4s\n"
- "fmin v11.4s, v11.4s, v28.4s\n"
- "fmin v12.4s, v12.4s, v28.4s\n"
- "fmin v13.4s, v13.4s, v28.4s\n"
- "fmin v14.4s, v14.4s, v28.4s\n"
- "fmin v15.4s, v15.4s, v28.4s\n"
- "fmin v16.4s, v16.4s, v28.4s\n"
- "fmin v17.4s, v17.4s, v28.4s\n"
- "fmin v18.4s, v18.4s, v28.4s\n"
- "fmin v19.4s, v19.4s, v28.4s\n"
- "fmin v20.4s, v20.4s, v28.4s\n"
- "fmin v21.4s, v21.4s, v28.4s\n"
- "fmin v22.4s, v22.4s, v28.4s\n"
- "fmin v23.4s, v23.4s, v28.4s\n"
- "fmin v24.4s, v24.4s, v28.4s\n"
- "fmin v25.4s, v25.4s, v28.4s\n"
- "fmin v26.4s, v26.4s, v28.4s\n"
- "fmin v27.4s, v27.4s, v28.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v29.4s }, [x21]\n"
"ld1r { v28.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v29.4s\n"
+ "fmin v9.4s, v9.4s, v29.4s\n"
+ "fmin v10.4s, v10.4s, v29.4s\n"
+ "fmin v11.4s, v11.4s, v29.4s\n"
+ "fmin v12.4s, v12.4s, v29.4s\n"
+ "fmin v13.4s, v13.4s, v29.4s\n"
+ "fmin v14.4s, v14.4s, v29.4s\n"
+ "fmin v15.4s, v15.4s, v29.4s\n"
+ "fmin v16.4s, v16.4s, v29.4s\n"
+ "fmin v17.4s, v17.4s, v29.4s\n"
+ "fmin v18.4s, v18.4s, v29.4s\n"
+ "fmin v19.4s, v19.4s, v29.4s\n"
+ "fmin v20.4s, v20.4s, v29.4s\n"
+ "fmin v21.4s, v21.4s, v29.4s\n"
+ "fmin v22.4s, v22.4s, v29.4s\n"
+ "fmin v23.4s, v23.4s, v29.4s\n"
+ "fmin v24.4s, v24.4s, v29.4s\n"
+ "fmin v25.4s, v25.4s, v29.4s\n"
+ "fmin v26.4s, v26.4s, v29.4s\n"
+ "fmin v27.4s, v27.4s, v29.4s\n"
"fmax v8.4s, v8.4s, v28.4s\n"
"fmax v9.4s, v9.4s, v28.4s\n"
"fmax v10.4s, v10.4s, v28.4s\n"
@@ -2590,111 +2592,111 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"tbz x8, #3, 159f\n"
"st1 { v8.4s }, [x16], #0x10\n"
"st1 { v9.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
"tbz x8, #2, 157f\n"
"st1 { v10.4s }, [x16], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
"tbz x8, #1, 156f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x8, #0, 163f\n"
"st1 { v11.s }[2], [x16]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
"b 163f\n"
"156:" // Height 5: Partial direct writeback: partial_1_12
"tbz x8, #0, 163f\n"
"str s11, [x16, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
"b 163f\n"
"157:" // Height 5: Partial direct writeback: partial_2_8
"tbz x8, #1, 158f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x8, #0, 163f\n"
"st1 { v10.s }[2], [x16]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
"b 163f\n"
"158:" // Height 5: Partial direct writeback: partial_1_8
"tbz x8, #0, 163f\n"
"str s10, [x16, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
"b 163f\n"
"159:" // Height 5: Partial direct writeback: partial_4_0
"tbz x8, #2, 161f\n"
"st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x8, #1, 160f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x8, #0, 163f\n"
"st1 { v9.s }[2], [x16]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 163f\n"
"160:" // Height 5: Partial direct writeback: partial_1_4
"tbz x8, #0, 163f\n"
"str s9, [x16, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 163f\n"
"161:" // Height 5: Partial direct writeback: partial_2_0
"tbz x8, #1, 162f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x8, #0, 163f\n"
"st1 { v8.s }[2], [x16]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 163f\n"
"162:" // Height 5: Partial direct writeback: partial_1_0
"str s8, [x16, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"163:" // Height 5: Partial direct writeback: Done
"b 165f\n"
"164:" // Height 5: Full writeback
@@ -2703,22 +2705,22 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"165:" // Height 5: Writeback done
"subs x8, x8, #0x10\n"
"bgt 134b\n"
@@ -2726,23 +2728,24 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"166:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0x18\n"
- "mov x7, %x[bias]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "ldr x7, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
+ "madd x20, x21, x20, x16\n"
"ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x16, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"167:" // Height 6: Column loop
"cbz x7, 168f\n"
"ldr q8, [x7, #0x0]\n"
- "mov v12.16b, v8.16b\n"
"ldr q9, [x7, #0x10]\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x7, #0x20]\n"
- "mov v14.16b, v10.16b\n"
"ldr q11, [x7, #0x30]\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
+ "add x7, x7, #0x40\n"
+ "mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
"mov v16.16b, v8.16b\n"
- "add x7, x7, #0x40\n"
"mov v17.16b, v9.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
@@ -2762,145 +2765,145 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"168:" // Height 6: no bias
"tbz %x[flags], #0, 178f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
+ "cmp x8, #0x10\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x8, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
"bge 177f\n"
"tbz x8, #3, 172f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x16], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x8, #2, 170f\n"
"ld1 { v10.4s }, [x16], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x8, #1, 169f\n"
"ldr d11, [x16], #0x8\n"
"mov x20, #0x38\n"
- "ldr d15, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x8, #0, 176f\n"
"ld1 { v11.s }[2], [x16]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 176f\n"
"169:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x8, #0, 176f\n"
"ldr s11, [x16, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 176f\n"
"170:" // Height 6: Partial accumulate: partial_2_8
"tbz x8, #1, 171f\n"
"ldr d10, [x16], #0x8\n"
"mov x20, #0x28\n"
- "ldr d14, [x25], #0x8\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x8, #0, 176f\n"
"ld1 { v10.s }[2], [x16]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 176f\n"
"171:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x8, #0, 176f\n"
"ldr s10, [x16, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 176f\n"
"172:" // Height 6: Partial accumulate: partial_4_0
"tbz x8, #2, 174f\n"
"ld1 { v8.4s }, [x16], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x8, #1, 173f\n"
"ldr d9, [x16], #0x8\n"
"mov x20, #0x18\n"
- "ldr d13, [x25], #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x8, #0, 176f\n"
"ld1 { v9.s }[2], [x16]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 176f\n"
"173:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x8, #0, 176f\n"
"ldr s9, [x16, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 176f\n"
"174:" // Height 6: Partial accumulate: partial_2_0
"tbz x8, #1, 175f\n"
"ldr d8, [x16], #0x8\n"
"mov x20, #0x8\n"
- "ldr d12, [x25], #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x8, #0, 176f\n"
"ld1 { v8.s }[2], [x16]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 176f\n"
"175:" // Height 6: Partial accumulate: partial_1_0
"ldr s8, [x16, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s12, [x25, #0x0]\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"176:" // Height 6: Partial accumulate: Done
"sub x16, x16, x20\n"
"b 179f\n"
@@ -2909,26 +2912,26 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr q9, [x16, #0x10]\n"
"ldr q10, [x16, #0x20]\n"
"ldr q11, [x16, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"b 179f\n"
"178:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -2959,8 +2962,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"mov x15, #0x0\n"
"180:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 181f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -3013,146 +3016,146 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"fmla v28.4s, v6.4s, v5.s[0]\n"
"ldr d6, [x17, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x21\n"
+ "add x10, x10, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x48]\n"
+ "add x9, x9, #0x10\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v6.d[1], x21\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "add x9, x9, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"add x28, x28, #0x10\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
"ldr d7, [x17, #0x30]\n"
- "mov v7.d[1], x20\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
+ "ldr x27, [x13, #0x8]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "ldr x20, [x17, #0x58]\n"
+ "ldr x26, [x12, #0x8]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
- "ldr x27, [x13, #0x8]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
- "ldr x26, [x12, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
"fmla v26.4s, v6.4s, v4.s[0]\n"
"ldr x25, [x11, #0x8]\n"
"fmla v30.4s, v6.4s, v5.s[0]\n"
"ldr d6, [x17, #0x40]\n"
"fmla v11.4s, v7.4s, v0.s[0]\n"
- "mov v6.d[1], x21\n"
+ "ldr x24, [x10, #0x8]\n"
"fmla v15.4s, v7.4s, v1.s[0]\n"
- "ldr x21, [x17, #0x68]\n"
+ "ldr x23, [x9, #0x8]\n"
"fmla v19.4s, v7.4s, v2.s[0]\n"
- "ldr x24, [x10, #0x8]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.4s, v7.4s, v3.s[0]\n"
- "ldr x23, [x9, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
"fmla v27.4s, v7.4s, v4.s[0]\n"
"ldr x22, [x28, #0x8]\n"
"fmla v31.4s, v7.4s, v5.s[0]\n"
"ldr d7, [x17, #0x50]\n"
- "mov v7.d[1], x20\n"
"fmla v8.4s, v6.4s, v0.s[1]\n"
+ "sub x14, x14, #0x4\n"
"fmla v12.4s, v6.4s, v1.s[1]\n"
- "ldr x20, [x17, #0x78]\n"
+ "cmp x14, #0x8\n"
+ "mov v7.d[1], x20\n"
"fmla v16.4s, v6.4s, v2.s[1]\n"
- "sub x14, x14, #0x4\n"
"fmla v20.4s, v6.4s, v3.s[1]\n"
- "cmp x14, #0x8\n"
+ "ldr x20, [x17, #0x78]\n"
"fmla v24.4s, v6.4s, v4.s[1]\n"
"prfm pldl1keep, [x13, #0x80]\n"
"fmla v28.4s, v6.4s, v5.s[1]\n"
"ldr d6, [x17, #0x60]\n"
"fmla v9.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x21\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"fmla v13.4s, v7.4s, v1.s[1]\n"
- "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.4s, v7.4s, v3.s[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr x21, [x17, #0x88]\n"
"fmla v25.4s, v7.4s, v4.s[1]\n"
"prfm pldl1keep, [x10, #0x80]\n"
"fmla v29.4s, v7.4s, v5.s[1]\n"
"ldr d7, [x17, #0x70]\n"
- "mov v7.d[1], x20\n"
"fmla v10.4s, v6.4s, v0.s[1]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"fmla v14.4s, v6.4s, v1.s[1]\n"
- "ldr x20, [x17, #0x98]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.4s, v6.4s, v2.s[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
"fmla v22.4s, v6.4s, v3.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
"fmla v26.4s, v6.4s, v4.s[1]\n"
"fmla v30.4s, v6.4s, v5.s[1]\n"
"ldr d6, [x17, #0x80]\n"
"fmla v11.4s, v7.4s, v0.s[1]\n"
- "mov v6.d[1], x21\n"
"fmla v15.4s, v7.4s, v1.s[1]\n"
- "ldr x21, [x17, #0xa8]\n"
"fmla v19.4s, v7.4s, v2.s[1]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.4s, v7.4s, v3.s[1]\n"
+ "ldr x21, [x17, #0xa8]\n"
"fmla v27.4s, v7.4s, v4.s[1]\n"
"fmla v31.4s, v7.4s, v5.s[1]\n"
"ldr d7, [x17, #0x90]\n"
- "mov v7.d[1], x20\n"
"fmla v8.4s, v6.4s, v0.s[2]\n"
"fmla v12.4s, v6.4s, v1.s[2]\n"
- "ldr x20, [x17, #0xb8]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.4s, v6.4s, v2.s[2]\n"
"fmla v20.4s, v6.4s, v3.s[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
"fmla v24.4s, v6.4s, v4.s[2]\n"
"fmla v28.4s, v6.4s, v5.s[2]\n"
"ldr d6, [x17, #0xa0]\n"
"fmla v9.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x21\n"
"fmla v13.4s, v7.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xc8]\n"
"fmla v17.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.4s, v7.4s, v3.s[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
"fmla v25.4s, v7.4s, v4.s[2]\n"
"fmla v29.4s, v7.4s, v5.s[2]\n"
"ldr d7, [x17, #0xb0]\n"
- "mov v7.d[1], x20\n"
"fmla v10.4s, v6.4s, v0.s[2]\n"
"fmla v14.4s, v6.4s, v1.s[2]\n"
- "ldr x20, [x17, #0xd8]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.4s, v6.4s, v2.s[2]\n"
"fmla v22.4s, v6.4s, v3.s[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
"fmla v26.4s, v6.4s, v4.s[2]\n"
"fmla v30.4s, v6.4s, v5.s[2]\n"
"ldr d6, [x17, #0xc0]\n"
"fmla v11.4s, v7.4s, v0.s[2]\n"
- "mov v6.d[1], x21\n"
"fmla v15.4s, v7.4s, v1.s[2]\n"
- "ldr x21, [x17, #0xe8]\n"
"fmla v19.4s, v7.4s, v2.s[2]\n"
+ "mov v6.d[1], x21\n"
"fmla v23.4s, v7.4s, v3.s[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
"fmla v27.4s, v7.4s, v4.s[2]\n"
"fmla v31.4s, v7.4s, v5.s[2]\n"
"ldr d7, [x17, #0xd0]\n"
- "mov v7.d[1], x20\n"
"fmla v8.4s, v6.4s, v0.s[3]\n"
"fmla v12.4s, v6.4s, v1.s[3]\n"
- "ldr x20, [x17, #0xf8]\n"
+ "mov v7.d[1], x20\n"
"fmla v16.4s, v6.4s, v2.s[3]\n"
"fmla v20.4s, v6.4s, v3.s[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
"fmla v24.4s, v6.4s, v4.s[3]\n"
"fmla v28.4s, v6.4s, v5.s[3]\n"
"ldr d6, [x17, #0xe0]\n"
"fmla v9.4s, v7.4s, v0.s[3]\n"
- "mov v6.d[1], x21\n"
"fmla v13.4s, v7.4s, v1.s[3]\n"
"fmla v17.4s, v7.4s, v2.s[3]\n"
+ "mov v6.d[1], x21\n"
"fmla v21.4s, v7.4s, v3.s[3]\n"
"fmla v25.4s, v7.4s, v4.s[3]\n"
"fmla v29.4s, v7.4s, v5.s[3]\n"
"ldr d7, [x17, #0xf0]\n"
- "mov v7.d[1], x20\n"
"add x17, x17, #0x100\n"
"fmla v10.4s, v6.4s, v0.s[3]\n"
- "ldr x21, [x17, #0x8]\n"
"fmla v14.4s, v6.4s, v1.s[3]\n"
- "ldr x20, [x17, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v7.d[1], x20\n"
"fmla v18.4s, v6.4s, v2.s[3]\n"
"fmla v22.4s, v6.4s, v3.s[3]\n"
+ "ldr x20, [x17, #0x18]\n"
"fmla v26.4s, v6.4s, v4.s[3]\n"
"fmla v30.4s, v6.4s, v5.s[3]\n"
"ldr d6, [x17, #0x0]\n"
@@ -3314,8 +3317,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"ldr s3, [x9], #0x4\n"
"ldr s2, [x28], #0x4\n"
"ldr q1, [x17, #0x0]\n"
- "fmla v8.4s, v1.4s, v7.s[0]\n"
"ldr q0, [x17, #0x10]\n"
+ "fmla v8.4s, v1.4s, v7.s[0]\n"
"fmla v12.4s, v1.4s, v6.s[0]\n"
"fmla v16.4s, v1.4s, v5.s[0]\n"
"fmla v20.4s, v1.4s, v4.s[0]\n"
@@ -3349,46 +3352,46 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"cmp x15, x20\n"
"bne 180b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x16, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x16, #0x0]\n"
+ "add x26, x16, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 188f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v0.4s }, [x20]\n"
- "fmin v8.4s, v8.4s, v0.4s\n"
- "fmin v9.4s, v9.4s, v0.4s\n"
- "fmin v10.4s, v10.4s, v0.4s\n"
- "fmin v11.4s, v11.4s, v0.4s\n"
- "fmin v12.4s, v12.4s, v0.4s\n"
- "fmin v13.4s, v13.4s, v0.4s\n"
- "fmin v14.4s, v14.4s, v0.4s\n"
- "fmin v15.4s, v15.4s, v0.4s\n"
- "fmin v16.4s, v16.4s, v0.4s\n"
- "fmin v17.4s, v17.4s, v0.4s\n"
- "fmin v18.4s, v18.4s, v0.4s\n"
- "fmin v19.4s, v19.4s, v0.4s\n"
- "fmin v20.4s, v20.4s, v0.4s\n"
- "fmin v21.4s, v21.4s, v0.4s\n"
- "fmin v22.4s, v22.4s, v0.4s\n"
- "fmin v23.4s, v23.4s, v0.4s\n"
- "fmin v24.4s, v24.4s, v0.4s\n"
- "fmin v25.4s, v25.4s, v0.4s\n"
- "fmin v26.4s, v26.4s, v0.4s\n"
- "fmin v27.4s, v27.4s, v0.4s\n"
- "fmin v28.4s, v28.4s, v0.4s\n"
- "fmin v29.4s, v29.4s, v0.4s\n"
- "fmin v30.4s, v30.4s, v0.4s\n"
- "fmin v31.4s, v31.4s, v0.4s\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
+ "fmin v8.4s, v8.4s, v1.4s\n"
+ "fmin v9.4s, v9.4s, v1.4s\n"
+ "fmin v10.4s, v10.4s, v1.4s\n"
+ "fmin v11.4s, v11.4s, v1.4s\n"
+ "fmin v12.4s, v12.4s, v1.4s\n"
+ "fmin v13.4s, v13.4s, v1.4s\n"
+ "fmin v14.4s, v14.4s, v1.4s\n"
+ "fmin v15.4s, v15.4s, v1.4s\n"
+ "fmin v16.4s, v16.4s, v1.4s\n"
+ "fmin v17.4s, v17.4s, v1.4s\n"
+ "fmin v18.4s, v18.4s, v1.4s\n"
+ "fmin v19.4s, v19.4s, v1.4s\n"
+ "fmin v20.4s, v20.4s, v1.4s\n"
+ "fmin v21.4s, v21.4s, v1.4s\n"
+ "fmin v22.4s, v22.4s, v1.4s\n"
+ "fmin v23.4s, v23.4s, v1.4s\n"
+ "fmin v24.4s, v24.4s, v1.4s\n"
+ "fmin v25.4s, v25.4s, v1.4s\n"
+ "fmin v26.4s, v26.4s, v1.4s\n"
+ "fmin v27.4s, v27.4s, v1.4s\n"
+ "fmin v28.4s, v28.4s, v1.4s\n"
+ "fmin v29.4s, v29.4s, v1.4s\n"
+ "fmin v30.4s, v30.4s, v1.4s\n"
+ "fmin v31.4s, v31.4s, v1.4s\n"
"fmax v8.4s, v8.4s, v0.4s\n"
"fmax v9.4s, v9.4s, v0.4s\n"
"fmax v10.4s, v10.4s, v0.4s\n"
@@ -3419,127 +3422,127 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"tbz x8, #3, 192f\n"
"st1 { v8.4s }, [x16], #0x10\n"
"st1 { v9.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x22], #0x10\n"
"tbz x8, #2, 190f\n"
"st1 { v10.4s }, [x16], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v30.4s }, [x22], #0x10\n"
"tbz x8, #1, 189f\n"
"str d11, [x16], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x8, #0, 196f\n"
"st1 { v11.s }[2], [x16]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 196f\n"
"189:" // Height 6: Partial direct writeback: partial_1_12
"tbz x8, #0, 196f\n"
"str s11, [x16, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"b 196f\n"
"190:" // Height 6: Partial direct writeback: partial_2_8
"tbz x8, #1, 191f\n"
"str d10, [x16], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x8, #0, 196f\n"
"st1 { v10.s }[2], [x16]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
+ "st1 { v30.s }[2], [x22]\n"
"b 196f\n"
"191:" // Height 6: Partial direct writeback: partial_1_8
"tbz x8, #0, 196f\n"
"str s10, [x16, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
+ "str s30, [x22, #0x0]\n"
"b 196f\n"
"192:" // Height 6: Partial direct writeback: partial_4_0
"tbz x8, #2, 194f\n"
"st1 { v8.4s }, [x16], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
"tbz x8, #1, 193f\n"
"str d9, [x16], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x8, #0, 196f\n"
"st1 { v9.s }[2], [x16]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
"b 196f\n"
"193:" // Height 6: Partial direct writeback: partial_1_4
"tbz x8, #0, 196f\n"
"str s9, [x16, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
"b 196f\n"
"194:" // Height 6: Partial direct writeback: partial_2_0
"tbz x8, #1, 195f\n"
"str d8, [x16], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x8, #0, 196f\n"
"st1 { v8.s }[2], [x16]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "st1 { v28.s }[2], [x22]\n"
"b 196f\n"
"195:" // Height 6: Partial direct writeback: partial_1_0
"str s8, [x16, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
+ "str s28, [x22, #0x0]\n"
"196:" // Height 6: Partial direct writeback: Done
"b 198f\n"
"197:" // Height 6: Full writeback
@@ -3548,26 +3551,26 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"str q10, [x16, #0x20]\n"
"str q11, [x16, #0x30]\n"
"add x16, x16, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q28, [x22, #0x0]\n"
+ "str q29, [x22, #0x10]\n"
+ "str q30, [x22, #0x20]\n"
+ "str q31, [x22, #0x30]\n"
"198:" // Height 6: Writeback done
"subs x8, x8, #0x10\n"
"bgt 167b\n"
@@ -3583,8 +3586,8 @@ void a64_hybrid_fp32_mla_6x16_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"200:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp
index bb84a50282..e411da6874 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp32_mla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp32_mla_6x16 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -101,10 +103,10 @@ void a64_hybrid_fp32_mla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 67f\n"
"beq 34f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x12, 3f\n"
"ldr q8, [x12, #0x0]\n"
@@ -187,8 +189,8 @@ void a64_hybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -212,6 +214,10 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q17, [x10, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "sub x27, x27, #0x4\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"ldr q17, [x10, #0x40]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
@@ -236,22 +242,21 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q17, [x10, #0xe0]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "sub x27, x27, #0x4\n"
- "add x26, x26, #0x10\n"
+ "add x10, x10, #0x100\n"
"fmla v10.4s, v17.4s, v0.s[3]\n"
+ "ldr q6, [x10, #0x0]\n"
"fmla v11.4s, v16.4s, v0.s[3]\n"
"ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x8\n"
- "add x10, x10, #0x100\n"
- "ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
"fmla v8.4s, v6.4s, v0.s[0]\n"
"ldr q17, [x10, #0x20]\n"
"fmla v9.4s, v7.4s, v0.s[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"ldr q17, [x10, #0x40]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
@@ -276,26 +281,23 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q17, [x10, #0xe0]\n"
"fmla v9.4s, v16.4s, v0.s[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x4\n"
+ "add x10, x10, #0x100\n"
"fmla v10.4s, v17.4s, v0.s[3]\n"
"fmla v11.4s, v16.4s, v0.s[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x26], #0x4\n"
- "ldr q16, [x10, #0x0]\n"
- "fmla v8.4s, v16.4s, v18.s[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"sub x27, x27, #0x1\n"
- "ldr q17, [x10, #0x10]\n"
- "ldr q16, [x10, #0x20]\n"
- "fmla v9.4s, v17.4s, v18.s[0]\n"
- "fmla v10.4s, v16.4s, v18.s[0]\n"
+ "ldr q16, [x10, #0x10]\n"
+ "fmla v8.4s, v17.4s, v18.s[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ "fmla v9.4s, v16.4s, v18.s[0]\n"
"ldr q16, [x10, #0x30]\n"
- "fmla v11.4s, v16.4s, v18.s[0]\n"
"add x10, x10, #0x40\n"
+ "fmla v10.4s, v17.4s, v18.s[0]\n"
+ "fmla v11.4s, v16.4s, v18.s[0]\n"
"cbnz x27, 21b\n"
"22:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -304,9 +306,9 @@ void a64_hybrid_fp32_mla_6x16 (
"bne 15b\n"
"prfm pstl1keep, [x9, #0x0]\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v17.4s\n"
"fmin v9.4s, v9.4s, v17.4s\n"
@@ -376,95 +378,95 @@ void a64_hybrid_fp32_mla_6x16 (
"bgt 2b\n"
"b 200f\n"
"34:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"35:" // Height 2: Column loop
"cbz x12, 36f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v14.16b, v10.16b\n"
"mov v15.16b, v11.16b\n"
- "add x12, x12, #0x40\n"
"b 47f\n"
"36:" // Height 2: no bias
"tbz %x[flags], #0, 46f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
"bge 45f\n"
"tbz x11, #3, 40f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x11, #2, 38f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
"tbz x11, #1, 37f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
"tbz x11, #0, 44f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v15.s }[2], [x26]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 44f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 39f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
"tbz x11, #0, 44f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v14.s }[2], [x26]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 44f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 42f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x11, #1, 41f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
"tbz x11, #0, 44f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 44f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 43f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
"tbz x11, #0, 44f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"44:" // Height 2: Partial accumulate: Done
"sub x9, x9, x20\n"
@@ -474,10 +476,10 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"b 47f\n"
"46:" // Height 2: no accumulate
"movi v8.16b, #0x0\n"
@@ -492,8 +494,8 @@ void a64_hybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -525,22 +527,22 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"fmla v14.4s, v17.4s, v1.s[0]\n"
"ldr q17, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
"fmla v15.4s, v16.4s, v1.s[0]\n"
"ldr q16, [x10, #0x50]\n"
- "cmp x27, #0x8\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"fmla v12.4s, v17.4s, v1.s[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
"fmla v13.4s, v16.4s, v1.s[1]\n"
"ldr q16, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.4s, v17.4s, v0.s[1]\n"
"fmla v14.4s, v17.4s, v1.s[1]\n"
"ldr q17, [x10, #0x80]\n"
@@ -584,18 +586,18 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v13.4s, v7.4s, v1.s[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x25, x25, #0x10\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.4s, v17.4s, v0.s[0]\n"
"fmla v14.4s, v17.4s, v1.s[0]\n"
"ldr q17, [x10, #0x40]\n"
- "sub x27, x27, #0x4\n"
"fmla v11.4s, v16.4s, v0.s[0]\n"
"fmla v15.4s, v16.4s, v1.s[0]\n"
"ldr q16, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v8.4s, v17.4s, v0.s[1]\n"
"fmla v12.4s, v17.4s, v1.s[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v9.4s, v16.4s, v0.s[1]\n"
"fmla v13.4s, v16.4s, v1.s[1]\n"
"ldr q16, [x10, #0x70]\n"
@@ -642,9 +644,9 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v9.4s, v16.4s, v19.s[0]\n"
"fmla v13.4s, v16.4s, v18.s[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
"fmla v10.4s, v17.4s, v19.s[0]\n"
"fmla v14.4s, v17.4s, v18.s[0]\n"
- "add x10, x10, #0x40\n"
"fmla v11.4s, v16.4s, v19.s[0]\n"
"fmla v15.4s, v16.4s, v18.s[0]\n"
"cbnz x27, 54b\n"
@@ -654,13 +656,13 @@ void a64_hybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 48b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 56f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v17.4s\n"
"fmin v9.4s, v9.4s, v17.4s\n"
@@ -684,63 +686,63 @@ void a64_hybrid_fp32_mla_6x16 (
"tbz x11, #3, 60f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
"tbz x11, #2, 58f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
"tbz x11, #1, 57f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
+ "str d15, [x26], #0x8\n"
"tbz x11, #0, 64f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
+ "st1 { v15.s }[2], [x26]\n"
"b 64f\n"
"57:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 64f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
"b 64f\n"
"58:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 59f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
+ "str d14, [x26], #0x8\n"
"tbz x11, #0, 64f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
+ "st1 { v14.s }[2], [x26]\n"
"b 64f\n"
"59:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 64f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
"b 64f\n"
"60:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 62f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
"tbz x11, #1, 61f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
+ "str d13, [x26], #0x8\n"
"tbz x11, #0, 64f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
+ "st1 { v13.s }[2], [x26]\n"
"b 64f\n"
"61:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 64f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
"b 64f\n"
"62:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 63f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
+ "str d12, [x26], #0x8\n"
"tbz x11, #0, 64f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
+ "st1 { v12.s }[2], [x26]\n"
"b 64f\n"
"63:" // Height 2: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
"64:" // Height 2: Partial direct writeback: Done
"b 66f\n"
"65:" // Height 2: Full writeback
@@ -749,126 +751,126 @@ void a64_hybrid_fp32_mla_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
"66:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 35b\n"
"b 200f\n"
"67:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"68:" // Height 3: Column loop
"cbz x12, 69f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
"b 80f\n"
"69:" // Height 3: no bias
"tbz %x[flags], #0, 79f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 78f\n"
"tbz x11, #3, 73f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #2, 71f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #1, 70f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x11, #0, 77f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
"b 77f\n"
"70:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 77f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
"b 77f\n"
"71:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 72f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x11, #0, 77f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
"b 77f\n"
"72:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 77f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
"b 77f\n"
"73:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 75f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
"tbz x11, #1, 74f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x11, #0, 77f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
"b 77f\n"
"74:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 77f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
"b 77f\n"
"75:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 76f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
"tbz x11, #0, 77f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
"b 77f\n"
"76:" // Height 3: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
"77:" // Height 3: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 80f\n"
@@ -877,14 +879,14 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
"b 80f\n"
"79:" // Height 3: no accumulate
"movi v8.16b, #0x0\n"
@@ -903,8 +905,8 @@ void a64_hybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"81:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 82f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -944,18 +946,18 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v17.4s, v7.4s, v2.s[0]\n"
"ldr q20, [x10, #0x30]\n"
"add x24, x24, #0x10\n"
- "fmla v10.4s, v21.4s, v0.s[0]\n"
- "fmla v14.4s, v21.4s, v1.s[0]\n"
"cmp x27, #0x8\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "fmla v10.4s, v21.4s, v0.s[0]\n"
+ "fmla v14.4s, v21.4s, v1.s[0]\n"
"fmla v18.4s, v21.4s, v2.s[0]\n"
"ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v11.4s, v20.4s, v0.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v15.4s, v20.4s, v1.s[0]\n"
"fmla v19.4s, v20.4s, v2.s[0]\n"
"ldr q20, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v8.4s, v21.4s, v0.s[1]\n"
"fmla v12.4s, v21.4s, v1.s[1]\n"
"fmla v16.4s, v21.4s, v2.s[1]\n"
@@ -1022,14 +1024,14 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v17.4s, v7.4s, v2.s[0]\n"
"ldr q20, [x10, #0x30]\n"
"sub x27, x27, #0x4\n"
- "fmla v10.4s, v21.4s, v0.s[0]\n"
- "fmla v14.4s, v21.4s, v1.s[0]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "fmla v10.4s, v21.4s, v0.s[0]\n"
+ "fmla v14.4s, v21.4s, v1.s[0]\n"
"fmla v18.4s, v21.4s, v2.s[0]\n"
"ldr q21, [x10, #0x40]\n"
- "fmla v11.4s, v20.4s, v0.s[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ "fmla v11.4s, v20.4s, v0.s[0]\n"
"fmla v15.4s, v20.4s, v1.s[0]\n"
"fmla v19.4s, v20.4s, v2.s[0]\n"
"ldr q20, [x10, #0x50]\n"
@@ -1088,9 +1090,9 @@ void a64_hybrid_fp32_mla_6x16 (
"sub x27, x27, #0x1\n"
"ldr s22, [x24], #0x4\n"
"ldr q21, [x10, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
"fmla v8.4s, v21.4s, v24.s[0]\n"
"fmla v12.4s, v21.4s, v23.s[0]\n"
- "ldr q20, [x10, #0x10]\n"
"fmla v16.4s, v21.4s, v22.s[0]\n"
"ldr q21, [x10, #0x20]\n"
"fmla v9.4s, v20.4s, v24.s[0]\n"
@@ -1111,15 +1113,15 @@ void a64_hybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 81b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 89f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v21.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v21.4s\n"
"fmin v9.4s, v9.4s, v21.4s\n"
@@ -1151,79 +1153,79 @@ void a64_hybrid_fp32_mla_6x16 (
"tbz x11, #3, 93f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #2, 91f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #1, 90f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x11, #0, 97f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
"b 97f\n"
"90:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 97f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
"b 97f\n"
"91:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 92f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x11, #0, 97f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
"b 97f\n"
"92:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 97f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
"b 97f\n"
"93:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 95f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
"tbz x11, #1, 94f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x11, #0, 97f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
"b 97f\n"
"94:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 97f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
"b 97f\n"
"95:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 96f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x11, #0, 97f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
"b 97f\n"
"96:" // Height 3: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
"97:" // Height 3: Partial direct writeback: Done
"b 99f\n"
"98:" // Height 3: Full writeback
@@ -1232,39 +1234,39 @@ void a64_hybrid_fp32_mla_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"99:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 68b\n"
"b 200f\n"
"100:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"101:" // Height 4: Column loop
"cbz x12, 102f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1272,111 +1274,111 @@ void a64_hybrid_fp32_mla_6x16 (
"102:" // Height 4: no bias
"tbz %x[flags], #0, 112f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 111f\n"
"tbz x11, #3, 106f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
"tbz x11, #2, 104f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
"tbz x11, #1, 103f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x11, #0, 110f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
"b 110f\n"
"103:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 110f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
"b 110f\n"
"104:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 105f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x11, #0, 110f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
"b 110f\n"
"105:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 110f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
"b 110f\n"
"106:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 108f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"tbz x11, #1, 107f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x11, #0, 110f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
"b 110f\n"
"107:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 110f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
"b 110f\n"
"108:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 109f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x11, #0, 110f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
"b 110f\n"
"109:" // Height 4: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
"110:" // Height 4: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 113f\n"
@@ -1385,18 +1387,18 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"b 113f\n"
"112:" // Height 4: no accumulate
"movi v8.16b, #0x0\n"
@@ -1419,8 +1421,8 @@ void a64_hybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"114:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1560,14 +1562,14 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"add x23, x23, #0x10\n"
- "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"ldr q24, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v10.4s, v25.4s, v0.s[0]\n"
"fmla v14.4s, v25.4s, v1.s[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x4\n"
"prfm pldl1keep, [x24, #0x80]\n"
"fmla v18.4s, v25.4s, v2.s[0]\n"
"fmla v22.4s, v25.4s, v3.s[0]\n"
@@ -1673,17 +1675,17 @@ void a64_hybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 114b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v25.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v25.4s }, [x21]\n"
"ld1r { v24.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v25.4s\n"
"fmin v9.4s, v9.4s, v25.4s\n"
@@ -1723,95 +1725,95 @@ void a64_hybrid_fp32_mla_6x16 (
"tbz x11, #3, 126f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
"tbz x11, #2, 124f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
"tbz x11, #1, 123f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
"tbz x11, #0, 130f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
"b 130f\n"
"123:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 130f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
"b 130f\n"
"124:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 125f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
"tbz x11, #0, 130f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
"b 130f\n"
"125:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 130f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
"b 130f\n"
"126:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 128f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
"tbz x11, #1, 127f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
"tbz x11, #0, 130f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
"b 130f\n"
"127:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 130f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
"b 130f\n"
"128:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 129f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x11, #0, 130f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
"b 130f\n"
"129:" // Height 4: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
"130:" // Height 4: Partial direct writeback: Done
"b 132f\n"
"131:" // Height 4: Full writeback
@@ -1820,43 +1822,43 @@ void a64_hybrid_fp32_mla_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
"132:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 101b\n"
"b 200f\n"
"133:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"134:" // Height 5: Column loop
"cbz x12, 135f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -1868,128 +1870,128 @@ void a64_hybrid_fp32_mla_6x16 (
"135:" // Height 5: no bias
"tbz %x[flags], #0, 145f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
"bge 144f\n"
"tbz x11, #3, 139f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #2, 137f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #1, 136f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x11, #0, 143f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
"b 143f\n"
"136:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 143f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
"b 143f\n"
"137:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 138f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x11, #0, 143f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
"b 143f\n"
"138:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 143f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
"b 143f\n"
"139:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 141f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x11, #1, 140f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x11, #0, 143f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 143f\n"
"140:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 143f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 143f\n"
"141:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 142f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x11, #0, 143f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 143f\n"
"142:" // Height 5: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"143:" // Height 5: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 146f\n"
@@ -1998,22 +2000,22 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
"b 146f\n"
"145:" // Height 5: no accumulate
"movi v8.16b, #0x0\n"
@@ -2040,8 +2042,8 @@ void a64_hybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"147:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 148f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2207,12 +2209,12 @@ void a64_hybrid_fp32_mla_6x16 (
"add x22, x22, #0x10\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
- "sub x27, x27, #0x4\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"ldr q28, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x4\n"
"fmla v10.4s, v29.4s, v0.s[0]\n"
"fmla v14.4s, v29.4s, v1.s[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
@@ -2309,9 +2311,9 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr s31, [x23], #0x4\n"
"ldr s30, [x22], #0x4\n"
"ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
"fmla v8.4s, v29.4s, v2.s[0]\n"
"fmla v12.4s, v29.4s, v1.s[0]\n"
- "ldr q28, [x10, #0x10]\n"
"fmla v16.4s, v29.4s, v0.s[0]\n"
"fmla v20.4s, v29.4s, v31.s[0]\n"
"fmla v24.4s, v29.4s, v30.s[0]\n"
@@ -2340,19 +2342,19 @@ void a64_hybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 147b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 155f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v29.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v29.4s }, [x21]\n"
"ld1r { v28.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v29.4s\n"
"fmin v9.4s, v9.4s, v29.4s\n"
@@ -2400,111 +2402,111 @@ void a64_hybrid_fp32_mla_6x16 (
"tbz x11, #3, 159f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #2, 157f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #1, 156f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x11, #0, 163f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
"b 163f\n"
"156:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 163f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
"b 163f\n"
"157:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 158f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x11, #0, 163f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
"b 163f\n"
"158:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 163f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
"b 163f\n"
"159:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 161f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x11, #1, 160f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x11, #0, 163f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 163f\n"
"160:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 163f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 163f\n"
"161:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 162f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x11, #0, 163f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 163f\n"
"162:" // Height 5: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"163:" // Height 5: Partial direct writeback: Done
"b 165f\n"
"164:" // Height 5: Full writeback
@@ -2513,50 +2515,51 @@ void a64_hybrid_fp32_mla_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"165:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 134b\n"
"b 200f\n"
"166:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"167:" // Height 6: Column loop
"cbz x12, 168f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "mov v12.16b, v8.16b\n"
- "mov v13.16b, v9.16b\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
- "mov v14.16b, v10.16b\n"
- "mov v15.16b, v11.16b\n"
+ "add x12, x12, #0x40\n"
+ "mov v12.16b, v8.16b\n"
+ "mov v13.16b, v9.16b\n"
"mov v16.16b, v8.16b\n"
"mov v17.16b, v9.16b\n"
- "add x12, x12, #0x40\n"
+ "mov v20.16b, v8.16b\n"
+ "mov v14.16b, v10.16b\n"
+ "mov v15.16b, v11.16b\n"
"mov v18.16b, v10.16b\n"
"mov v19.16b, v11.16b\n"
- "mov v20.16b, v8.16b\n"
"mov v21.16b, v9.16b\n"
"mov v22.16b, v10.16b\n"
"mov v23.16b, v11.16b\n"
@@ -2572,145 +2575,145 @@ void a64_hybrid_fp32_mla_6x16 (
"168:" // Height 6: no bias
"tbz %x[flags], #0, 178f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
"bge 177f\n"
"tbz x11, #3, 172f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x11, #2, 170f\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x11, #1, 169f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x11, #0, 176f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 176f\n"
"169:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 176f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 176f\n"
"170:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 171f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x11, #0, 176f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 176f\n"
"171:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 176f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 176f\n"
"172:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 174f\n"
"ld1 { v8.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v16.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v16.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x11, #1, 173f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x11, #0, 176f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 176f\n"
"173:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 176f\n"
"ldr s9, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 176f\n"
"174:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 175f\n"
"ldr d8, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d16, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d16, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x11, #0, 176f\n"
"ld1 { v8.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v16.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v16.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 176f\n"
"175:" // Height 6: Partial accumulate: partial_1_0
"ldr s8, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s16, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s16, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"176:" // Height 6: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 179f\n"
@@ -2719,26 +2722,26 @@ void a64_hybrid_fp32_mla_6x16 (
"ldr q9, [x9, #0x10]\n"
"ldr q10, [x9, #0x20]\n"
"ldr q11, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q16, [x24, #0x0]\n"
- "ldr q17, [x24, #0x10]\n"
- "ldr q18, [x24, #0x20]\n"
- "ldr q19, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q24, [x22, #0x0]\n"
- "ldr q25, [x22, #0x10]\n"
- "ldr q26, [x22, #0x20]\n"
- "ldr q27, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q16, [x25, #0x0]\n"
+ "ldr q17, [x25, #0x10]\n"
+ "ldr q18, [x25, #0x20]\n"
+ "ldr q19, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q24, [x23, #0x0]\n"
+ "ldr q25, [x23, #0x10]\n"
+ "ldr q26, [x23, #0x20]\n"
+ "ldr q27, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"b 179f\n"
"178:" // Height 6: no accumulate
"movi v8.16b, #0x0\n"
@@ -2769,8 +2772,8 @@ void a64_hybrid_fp32_mla_6x16 (
"mov x28, #0x0\n"
"180:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 181f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2960,18 +2963,18 @@ void a64_hybrid_fp32_mla_6x16 (
"fmla v9.4s, v7.4s, v0.s[0]\n"
"fmla v13.4s, v7.4s, v1.s[0]\n"
"add x21, x21, #0x10\n"
- "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"fmla v17.4s, v7.4s, v2.s[0]\n"
"fmla v21.4s, v7.4s, v3.s[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v25.4s, v7.4s, v4.s[0]\n"
"fmla v29.4s, v7.4s, v5.s[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v10.4s, v6.4s, v0.s[0]\n"
"fmla v14.4s, v6.4s, v1.s[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x27, x27, #0x4\n"
"prfm pldl1keep, [x22, #0x80]\n"
"fmla v18.4s, v6.4s, v2.s[0]\n"
"fmla v22.4s, v6.4s, v3.s[0]\n"
@@ -3115,21 +3118,21 @@ void a64_hybrid_fp32_mla_6x16 (
"cmp x28, x20\n"
"bne 180b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 188f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v1.4s\n"
"fmin v9.4s, v9.4s, v1.4s\n"
@@ -3185,127 +3188,127 @@ void a64_hybrid_fp32_mla_6x16 (
"tbz x11, #3, 192f\n"
"st1 { v8.4s }, [x9], #0x10\n"
"st1 { v9.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v13.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v21.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
- "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v13.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x22], #0x10\n"
"tbz x11, #2, 190f\n"
"st1 { v10.4s }, [x9], #0x10\n"
- "st1 { v14.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
- "st1 { v22.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v30.4s }, [x21], #0x10\n"
+ "st1 { v14.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
+ "st1 { v22.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v30.4s }, [x22], #0x10\n"
"tbz x11, #1, 189f\n"
"str d11, [x9], #0x8\n"
- "str d15, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
- "str d23, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d15, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
+ "str d23, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"st1 { v11.s }[2], [x9]\n"
- "st1 { v15.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
- "st1 { v23.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v15.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
+ "st1 { v23.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 196f\n"
"189:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 196f\n"
"str s11, [x9, #0x0]\n"
- "str s15, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
- "str s23, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s15, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
+ "str s23, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"b 196f\n"
"190:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 191f\n"
"str d10, [x9], #0x8\n"
- "str d14, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
- "str d22, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d30, [x21], #0x8\n"
+ "str d14, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
+ "str d22, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d30, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"st1 { v10.s }[2], [x9]\n"
- "st1 { v14.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
- "st1 { v22.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v30.s }[2], [x21]\n"
+ "st1 { v14.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
+ "st1 { v22.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
+ "st1 { v30.s }[2], [x22]\n"
"b 196f\n"
"191:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 196f\n"
"str s10, [x9, #0x0]\n"
- "str s14, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
- "str s22, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s30, [x21, #0x0]\n"
+ "str s14, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
+ "str s22, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
+ "str s30, [x22, #0x0]\n"
"b 196f\n"
"192:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 194f\n"
"st1 { v8.4s }, [x9], #0x10\n"
- "st1 { v12.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x22], #0x10\n"
"tbz x11, #1, 193f\n"
"str d9, [x9], #0x8\n"
- "str d13, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
- "str d21, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
- "str d29, [x21], #0x8\n"
+ "str d13, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
+ "str d21, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
+ "str d29, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"st1 { v9.s }[2], [x9]\n"
- "st1 { v13.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
- "st1 { v21.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
- "st1 { v29.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
+ "st1 { v21.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
+ "st1 { v29.s }[2], [x22]\n"
"b 196f\n"
"193:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 196f\n"
"str s9, [x9, #0x0]\n"
- "str s13, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
- "str s21, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
- "str s29, [x21, #0x0]\n"
+ "str s13, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
+ "str s21, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
+ "str s29, [x22, #0x0]\n"
"b 196f\n"
"194:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 195f\n"
"str d8, [x9], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x11, #0, 196f\n"
"st1 { v8.s }[2], [x9]\n"
- "st1 { v12.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
- "st1 { v20.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
- "st1 { v28.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
+ "st1 { v20.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
+ "st1 { v28.s }[2], [x22]\n"
"b 196f\n"
"195:" // Height 6: Partial direct writeback: partial_1_0
"str s8, [x9, #0x0]\n"
- "str s12, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
- "str s20, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
- "str s28, [x21, #0x0]\n"
+ "str s12, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
+ "str s20, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
+ "str s28, [x22, #0x0]\n"
"196:" // Height 6: Partial direct writeback: Done
"b 198f\n"
"197:" // Height 6: Full writeback
@@ -3314,26 +3317,26 @@ void a64_hybrid_fp32_mla_6x16 (
"str q10, [x9, #0x20]\n"
"str q11, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q12, [x25, #0x0]\n"
- "str q13, [x25, #0x10]\n"
- "str q14, [x25, #0x20]\n"
- "str q15, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
- "str q20, [x23, #0x0]\n"
- "str q21, [x23, #0x10]\n"
- "str q22, [x23, #0x20]\n"
- "str q23, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
- "str q28, [x21, #0x0]\n"
- "str q29, [x21, #0x10]\n"
- "str q30, [x21, #0x20]\n"
- "str q31, [x21, #0x30]\n"
+ "str q12, [x26, #0x0]\n"
+ "str q13, [x26, #0x10]\n"
+ "str q14, [x26, #0x20]\n"
+ "str q15, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q21, [x24, #0x10]\n"
+ "str q22, [x24, #0x20]\n"
+ "str q23, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q28, [x22, #0x0]\n"
+ "str q29, [x22, #0x10]\n"
+ "str q30, [x22, #0x20]\n"
+ "str q31, [x22, #0x30]\n"
"198:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 167b\n"
@@ -3349,8 +3352,8 @@ void a64_hybrid_fp32_mla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"200:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp
index 3ec02395d1..e8f7cdf329 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 8, 4, 1> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 4, 1> transforms = {};
// Default to the generic kernel
kern_type kernel=a64_hybrid_fp32_mla_8x4;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp
index 236865315e..f8b117c546 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp32_mla_8x4_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -104,10 +106,10 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"cmp %x[M], #0x2\n"
"bgt 43f\n"
"beq 22f\n"
- "mov x3, %x[bias]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x3, 3f\n"
"ldr q24, [x3, #0x0]\n"
@@ -119,15 +121,15 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 6f\n"
"tbz x4, #1, 4f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
+ "mov x20, #0x8\n"
"tbz x4, #0, 5f\n"
"ld1 { v24.s }[2], [x6]\n"
"b 5f\n"
"4:" // Height 1: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
+ "mov x20, #0x0\n"
"5:" // Height 1: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 8f\n"
"6:" // Height 1: full accumulate
"ldr q24, [x6, #0x0]\n"
@@ -137,16 +139,16 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"8:" // Height 1: setup done
"mov x7, #0x0\n"
"9:" // Height 1: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 10f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
"cbnz x7, 11f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
"b 11f\n"
"10:" // Height 1: setup direct input
"mov x17, %x[input_ptr]\n"
@@ -163,59 +165,59 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"12:" // Height 1: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
"add x17, x17, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x5, x5, #0x40\n"
+ "ldr x23, [x17, #0x8]\n"
"ldr d8, [x5, #0x0]\n"
- "fmla v24.4s, v10.4s, v0.s[2]\n"
+ "sub x8, x8, #0x4\n"
+ "ldr x20, [x5, #0x8]\n"
+ "cmp x8, #0x8\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"ldr d9, [x5, #0x10]\n"
+ "ldr x22, [x5, #0x18]\n"
+ "ldr x21, [x5, #0x28]\n"
+ "mov v8.d[1], x20\n"
+ "ldr x20, [x5, #0x38]\n"
+ "fmla v24.4s, v10.4s, v0.s[2]\n"
+ "ldr d10, [x5, #0x20]\n"
+ "prfm pldl1keep, [x17, #0x80]\n"
+ "mov v9.d[1], x22\n"
+ "mov v10.d[1], x21\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr d0, [x17, #0x0]\n"
- "sub x8, x8, #0x4\n"
- "ldr d10, [x5, #0x20]\n"
- "cmp x8, #0x8\n"
"ldr d11, [x5, #0x30]\n"
- "ldr x26, [x5, #0x8]\n"
- "mov v8.d[1], x26\n"
- "ldr x26, [x5, #0x18]\n"
- "mov v9.d[1], x26\n"
- "ldr x26, [x17, #0x8]\n"
- "mov v0.d[1], x26\n"
- "ldr x26, [x5, #0x28]\n"
- "mov v10.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
- "mov v11.d[1], x26\n"
- "prfm pldl1keep, [x17, #0x80]\n"
+ "mov v0.d[1], x23\n"
+ "mov v11.d[1], x20\n"
"bge 12b\n"
"13:" // Height 1: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
"add x17, x17, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"sub x8, x8, #0x4\n"
- "fmla v24.4s, v10.4s, v0.s[2]\n"
"prfm pldl1keep, [x17, #0x80]\n"
- "fmla v24.4s, v11.4s, v0.s[3]\n"
"add x5, x5, #0x40\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v24.4s, v10.4s, v0.s[2]\n"
+ "fmla v24.4s, v11.4s, v0.s[3]\n"
"14:" // Height 1: Multiply loop: Main loop skip
"cbz x8, 16f\n"
"15:" // Height 1: Multiply loop: Odd block loop
"ldr s17, [x17], #0x4\n"
"sub x8, x8, #0x1\n"
"ldr q16, [x5, #0x0]\n"
- "fmla v24.4s, v16.4s, v17.s[0]\n"
"add x5, x5, #0x10\n"
+ "fmla v24.4s, v16.4s, v17.s[0]\n"
"cbnz x8, 15b\n"
"16:" // Height 1: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 9b\n"
"prfm pstl1keep, [x6, #0x0]\n"
"tbz %x[flags], #1, 17f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"17:" // Height 1: No activation
"cmp x4, #0x4\n"
@@ -237,40 +239,40 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bgt 2b\n"
"b 170f\n"
"22:" // Height 2
- "mov x3, %x[bias]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"23:" // Height 2: Column loop
"cbz x3, 24f\n"
"ldr q24, [x3, #0x0]\n"
- "mov v25.16b, v24.16b\n"
"add x3, x3, #0x10\n"
+ "mov v25.16b, v24.16b\n"
"b 29f\n"
"24:" // Height 2: no bias
"tbz %x[flags], #0, 28f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x4, #0x4\n"
- "add x13, x6, x26, LSL #2\n"
+ "add x28, x6, x20, LSL #2\n"
"bge 27f\n"
"tbz x4, #1, 25f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
- "ldr d25, [x13], #0x8\n"
+ "mov x20, #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"tbz x4, #0, 26f\n"
"ld1 { v24.s }[2], [x6]\n"
- "ld1 { v25.s }[2], [x13]\n"
+ "ld1 { v25.s }[2], [x28]\n"
"b 26f\n"
"25:" // Height 2: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
- "ldr s25, [x13, #0x0]\n"
+ "mov x20, #0x0\n"
+ "ldr s25, [x28, #0x0]\n"
"26:" // Height 2: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 29f\n"
"27:" // Height 2: full accumulate
"ldr q24, [x6, #0x0]\n"
- "ldr q25, [x13, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
"b 29f\n"
"28:" // Height 2: no accumulate
"movi v24.16b, #0x0\n"
@@ -278,22 +280,22 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"29:" // Height 2: setup done
"mov x7, #0x0\n"
"30:" // Height 2: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
- "ldr x16, [x26, #0x8]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
+ "ldr x16, [x20, #0x8]\n"
"cbnz x7, 32f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
- "add x16, x16, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
+ "add x16, x16, x20, LSL #2\n"
"b 32f\n"
"31:" // Height 2: setup direct input
"mov x17, %x[input_ptr]\n"
- "add x16, x17, x27, LSL #2\n"
+ "add x16, x17, x21, LSL #2\n"
"32:" // Height 2: input setup done
"cmp x8, #0x4\n"
"blt 35f\n"
@@ -310,49 +312,49 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"add x17, x17, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
"add x16, x16, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x5, x5, #0x40\n"
+ "ldr x24, [x17, #0x8]\n"
"ldr d8, [x5, #0x0]\n"
+ "sub x8, x8, #0x4\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "ldr x20, [x5, #0x8]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"ldr d9, [x5, #0x10]\n"
+ "ldr x23, [x5, #0x18]\n"
+ "cmp x8, #0x8\n"
+ "ldr x22, [x5, #0x28]\n"
+ "mov v8.d[1], x20\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
+ "ldr x21, [x16, #0x8]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
"ldr d10, [x5, #0x20]\n"
- "ldr x27, [x5, #0x8]\n"
+ "ldr x20, [x5, #0x38]\n"
+ "mov v9.d[1], x23\n"
+ "prfm pldl1keep, [x17, #0x80]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr d0, [x17, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"ldr d1, [x16, #0x0]\n"
- "sub x8, x8, #0x4\n"
"ldr d11, [x5, #0x30]\n"
- "cmp x8, #0x8\n"
- "ldr x26, [x5, #0x18]\n"
- "mov v8.d[1], x27\n"
- "ldr x27, [x17, #0x8]\n"
- "mov v9.d[1], x26\n"
- "ldr x26, [x16, #0x8]\n"
- "mov v0.d[1], x27\n"
- "ldr x27, [x5, #0x28]\n"
- "mov v1.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
- "mov v10.d[1], x27\n"
- "mov v11.d[1], x26\n"
- "prfm pldl1keep, [x17, #0x80]\n"
+ "mov v10.d[1], x22\n"
"prfm pldl1keep, [x16, #0x80]\n"
+ "mov v0.d[1], x24\n"
+ "mov v1.d[1], x21\n"
+ "mov v11.d[1], x20\n"
"bge 33b\n"
"34:" // Height 2: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
"add x17, x17, #0x10\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
"add x16, x16, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"sub x8, x8, #0x4\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
"prfm pldl1keep, [x17, #0x80]\n"
- "fmla v24.4s, v10.4s, v0.s[2]\n"
+ "add x5, x5, #0x40\n"
"prfm pldl1keep, [x16, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
+ "fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "add x5, x5, #0x40\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"35:" // Height 2: Multiply loop: Main loop skip
@@ -362,26 +364,26 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"sub x8, x8, #0x1\n"
"ldr s17, [x16], #0x4\n"
"ldr q16, [x5, #0x0]\n"
+ "add x5, x5, #0x10\n"
"fmla v24.4s, v16.4s, v18.s[0]\n"
"fmla v25.4s, v16.4s, v17.s[0]\n"
- "add x5, x5, #0x10\n"
"cbnz x8, 36b\n"
"37:" // Height 2: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 30b\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x6, #0x0]\n"
- "prfm pstl1keep, [x13, #0x0]\n"
+ "add x28, x6, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"tbz %x[flags], #1, 38f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "fmin v25.4s, v25.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"fmax v25.4s, v25.4s, v16.4s\n"
"38:" // Height 2: No activation
@@ -389,65 +391,65 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 41f\n"
"tbz x4, #1, 39f\n"
"str d24, [x6], #0x8\n"
- "str d25, [x13], #0x8\n"
+ "str d25, [x28], #0x8\n"
"tbz x4, #0, 40f\n"
"st1 { v24.s }[2], [x6]\n"
- "st1 { v25.s }[2], [x13]\n"
+ "st1 { v25.s }[2], [x28]\n"
"b 40f\n"
"39:" // Height 2: Partial direct writeback: partial_1_0
"str s24, [x6, #0x0]\n"
- "str s25, [x13, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
"40:" // Height 2: Partial direct writeback: Done
"b 42f\n"
"41:" // Height 2: Full writeback
"str q24, [x6, #0x0]\n"
"add x6, x6, #0x10\n"
- "str q25, [x13, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
"42:" // Height 2: Writeback done
"subs x4, x4, #0x4\n"
"bgt 23b\n"
"b 170f\n"
"43:" // Height 3
- "mov x3, %x[bias]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 3: Column loop
"cbz x3, 45f\n"
"ldr q24, [x3, #0x0]\n"
+ "add x3, x3, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x3, x3, #0x10\n"
"b 50f\n"
"45:" // Height 3: no bias
"tbz %x[flags], #0, 49f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x4, #0x4\n"
- "add x12, x13, x26, LSL #2\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"bge 48f\n"
"tbz x4, #1, 46f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
- "ldr d25, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
+ "mov x20, #0x8\n"
+ "ldr d25, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
"tbz x4, #0, 47f\n"
"ld1 { v24.s }[2], [x6]\n"
- "ld1 { v25.s }[2], [x13]\n"
- "ld1 { v26.s }[2], [x12]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
"b 47f\n"
"46:" // Height 3: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
- "ldr s25, [x13, #0x0]\n"
- "ldr s26, [x12, #0x0]\n"
+ "mov x20, #0x0\n"
+ "ldr s25, [x28, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
"47:" // Height 3: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 50f\n"
"48:" // Height 3: full accumulate
"ldr q24, [x6, #0x0]\n"
- "ldr q25, [x13, #0x0]\n"
- "ldr q26, [x12, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
"b 50f\n"
"49:" // Height 3: no accumulate
"movi v24.16b, #0x0\n"
@@ -456,25 +458,25 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"50:" // Height 3: setup done
"mov x7, #0x0\n"
"51:" // Height 3: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 52f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
- "ldr x16, [x26, #0x8]\n"
- "ldr x15, [x26, #0x10]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
+ "ldr x16, [x20, #0x8]\n"
+ "ldr x15, [x20, #0x10]\n"
"cbnz x7, 53f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
- "add x16, x16, x26, LSL #2\n"
- "add x15, x15, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
+ "add x16, x16, x20, LSL #2\n"
+ "add x15, x15, x20, LSL #2\n"
"b 53f\n"
"52:" // Height 3: setup direct input
"mov x17, %x[input_ptr]\n"
- "add x16, x17, x27, LSL #2\n"
- "add x15, x16, x27, LSL #2\n"
+ "add x16, x17, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
"53:" // Height 3: input setup done
"cmp x8, #0x4\n"
"blt 56f\n"
@@ -494,42 +496,42 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"add x16, x16, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"add x15, x15, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x5, x5, #0x40\n"
+ "ldr x25, [x17, #0x8]\n"
"ldr d8, [x5, #0x0]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "ldr x20, [x5, #0x8]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
"ldr d9, [x5, #0x10]\n"
+ "ldr x24, [x5, #0x18]\n"
+ "sub x8, x8, #0x4\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "ldr x28, [x5, #0x8]\n"
+ "ldr x23, [x5, #0x28]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "ldr x27, [x5, #0x18]\n"
+ "ldr x22, [x16, #0x8]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"ldr d10, [x5, #0x20]\n"
- "ldr x26, [x5, #0x28]\n"
+ "ldr x21, [x15, #0x8]\n"
+ "cmp x8, #0x8\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr d0, [x17, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"ldr d1, [x16, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
"ldr d2, [x15, #0x0]\n"
- "sub x8, x8, #0x4\n"
"ldr d11, [x5, #0x30]\n"
- "cmp x8, #0x8\n"
- "ldr x9, [x17, #0x8]\n"
- "mov v8.d[1], x28\n"
- "ldr x28, [x16, #0x8]\n"
- "mov v9.d[1], x27\n"
- "ldr x27, [x15, #0x8]\n"
- "mov v10.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
- "mov v0.d[1], x9\n"
- "mov v1.d[1], x28\n"
+ "mov v8.d[1], x20\n"
+ "ldr x20, [x5, #0x38]\n"
+ "mov v9.d[1], x24\n"
"prfm pldl1keep, [x17, #0x80]\n"
- "mov v2.d[1], x27\n"
+ "mov v10.d[1], x23\n"
"prfm pldl1keep, [x16, #0x80]\n"
- "mov v11.d[1], x26\n"
+ "mov v0.d[1], x25\n"
"prfm pldl1keep, [x15, #0x80]\n"
+ "mov v1.d[1], x22\n"
+ "mov v2.d[1], x21\n"
+ "mov v11.d[1], x20\n"
"bge 54b\n"
"55:" // Height 3: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -538,16 +540,16 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"add x16, x16, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
"add x15, x15, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"sub x8, x8, #0x4\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
"prfm pldl1keep, [x17, #0x80]\n"
- "fmla v26.4s, v9.4s, v2.s[1]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"prfm pldl1keep, [x16, #0x80]\n"
- "fmla v24.4s, v10.4s, v0.s[2]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
+ "add x5, x5, #0x40\n"
+ "fmla v26.4s, v9.4s, v2.s[1]\n"
"prfm pldl1keep, [x15, #0x80]\n"
+ "fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "add x5, x5, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
@@ -560,30 +562,30 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s18, [x16], #0x4\n"
"ldr s17, [x15], #0x4\n"
"ldr q16, [x5, #0x0]\n"
+ "add x5, x5, #0x10\n"
"fmla v24.4s, v16.4s, v19.s[0]\n"
"fmla v25.4s, v16.4s, v18.s[0]\n"
- "add x5, x5, #0x10\n"
"fmla v26.4s, v16.4s, v17.s[0]\n"
"cbnz x8, 57b\n"
"58:" // Height 3: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 51b\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x6, #0x0]\n"
- "prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x12, #0x0]\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
"tbz %x[flags], #1, 59f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "fmin v25.4s, v25.4s, v16.4s\n"
- "fmin v26.4s, v26.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"fmax v25.4s, v25.4s, v16.4s\n"
"fmax v26.4s, v26.4s, v16.4s\n"
@@ -592,75 +594,75 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 62f\n"
"tbz x4, #1, 60f\n"
"str d24, [x6], #0x8\n"
- "str d25, [x13], #0x8\n"
- "str d26, [x12], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
"tbz x4, #0, 61f\n"
"st1 { v24.s }[2], [x6]\n"
- "st1 { v25.s }[2], [x13]\n"
- "st1 { v26.s }[2], [x12]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
"b 61f\n"
"60:" // Height 3: Partial direct writeback: partial_1_0
"str s24, [x6, #0x0]\n"
- "str s25, [x13, #0x0]\n"
- "str s26, [x12, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
"61:" // Height 3: Partial direct writeback: Done
"b 63f\n"
"62:" // Height 3: Full writeback
"str q24, [x6, #0x0]\n"
"add x6, x6, #0x10\n"
- "str q25, [x13, #0x0]\n"
- "str q26, [x12, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
"63:" // Height 3: Writeback done
"subs x4, x4, #0x4\n"
"bgt 44b\n"
"b 170f\n"
"64:" // Height 4
- "mov x3, %x[bias]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"65:" // Height 4: Column loop
"cbz x3, 66f\n"
"ldr q24, [x3, #0x0]\n"
+ "add x3, x3, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x3, x3, #0x10\n"
"mov v27.16b, v24.16b\n"
"b 71f\n"
"66:" // Height 4: no bias
"tbz %x[flags], #0, 70f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x4, #0x4\n"
- "add x11, x12, x26, LSL #2\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "add x26, x27, x20, LSL #2\n"
"bge 69f\n"
"tbz x4, #1, 67f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
- "ldr d25, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
- "ldr d27, [x11], #0x8\n"
+ "mov x20, #0x8\n"
+ "ldr d25, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
"tbz x4, #0, 68f\n"
"ld1 { v24.s }[2], [x6]\n"
- "ld1 { v25.s }[2], [x13]\n"
- "ld1 { v26.s }[2], [x12]\n"
- "ld1 { v27.s }[2], [x11]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
"b 68f\n"
"67:" // Height 4: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
- "ldr s25, [x13, #0x0]\n"
- "ldr s26, [x12, #0x0]\n"
- "ldr s27, [x11, #0x0]\n"
+ "mov x20, #0x0\n"
+ "ldr s25, [x28, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
"68:" // Height 4: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 71f\n"
"69:" // Height 4: full accumulate
"ldr q24, [x6, #0x0]\n"
- "ldr q25, [x13, #0x0]\n"
- "ldr q26, [x12, #0x0]\n"
- "ldr q27, [x11, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
"b 71f\n"
"70:" // Height 4: no accumulate
"movi v24.16b, #0x0\n"
@@ -670,28 +672,28 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"71:" // Height 4: setup done
"mov x7, #0x0\n"
"72:" // Height 4: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
- "ldr x16, [x26, #0x8]\n"
- "ldr x15, [x26, #0x10]\n"
- "ldr x14, [x26, #0x18]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
+ "ldr x16, [x20, #0x8]\n"
+ "ldr x15, [x20, #0x10]\n"
+ "ldr x14, [x20, #0x18]\n"
"cbnz x7, 74f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
- "add x16, x16, x26, LSL #2\n"
- "add x15, x15, x26, LSL #2\n"
- "add x14, x14, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
+ "add x16, x16, x20, LSL #2\n"
+ "add x15, x15, x20, LSL #2\n"
+ "add x14, x14, x20, LSL #2\n"
"b 74f\n"
"73:" // Height 4: setup direct input
"mov x17, %x[input_ptr]\n"
- "add x16, x17, x27, LSL #2\n"
- "add x15, x16, x27, LSL #2\n"
- "add x14, x15, x27, LSL #2\n"
+ "add x16, x17, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
"74:" // Height 4: input setup done
"cmp x8, #0x4\n"
"blt 77f\n"
@@ -719,22 +721,22 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr d8, [x5, #0x0]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "ldr x27, [x5, #0x8]\n"
+ "ldr x21, [x5, #0x8]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr d9, [x5, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "ldr x26, [x5, #0x18]\n"
+ "ldr x20, [x5, #0x18]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "ldr x11, [x5, #0x28]\n"
+ "ldr x25, [x5, #0x28]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
- "ldr x10, [x17, #0x8]\n"
+ "ldr x24, [x17, #0x8]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"ldr d10, [x5, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr d0, [x17, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"ldr d1, [x16, #0x0]\n"
- "ldr x9, [x16, #0x8]\n"
+ "ldr x23, [x16, #0x8]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
"ldr d2, [x15, #0x0]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
@@ -742,21 +744,21 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"sub x8, x8, #0x4\n"
"ldr d11, [x5, #0x30]\n"
"cmp x8, #0x8\n"
- "ldr x28, [x15, #0x8]\n"
- "mov v8.d[1], x27\n"
- "ldr x27, [x14, #0x8]\n"
- "mov v9.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
- "mov v10.d[1], x11\n"
+ "ldr x22, [x15, #0x8]\n"
+ "mov v8.d[1], x21\n"
+ "ldr x21, [x14, #0x8]\n"
+ "mov v9.d[1], x20\n"
+ "ldr x20, [x5, #0x38]\n"
+ "mov v10.d[1], x25\n"
"prfm pldl1keep, [x17, #0x80]\n"
- "mov v0.d[1], x10\n"
+ "mov v0.d[1], x24\n"
"prfm pldl1keep, [x16, #0x80]\n"
- "mov v1.d[1], x9\n"
- "mov v2.d[1], x28\n"
+ "mov v1.d[1], x23\n"
+ "mov v2.d[1], x22\n"
"prfm pldl1keep, [x15, #0x80]\n"
- "mov v3.d[1], x27\n"
+ "mov v3.d[1], x21\n"
"prfm pldl1keep, [x14, #0x80]\n"
- "mov v11.d[1], x26\n"
+ "mov v11.d[1], x20\n"
"bge 75b\n"
"76:" // Height 4: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -794,34 +796,34 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s18, [x15], #0x4\n"
"ldr s17, [x14], #0x4\n"
"ldr q16, [x5, #0x0]\n"
+ "add x5, x5, #0x10\n"
"fmla v24.4s, v16.4s, v20.s[0]\n"
"fmla v25.4s, v16.4s, v19.s[0]\n"
- "add x5, x5, #0x10\n"
"fmla v26.4s, v16.4s, v18.s[0]\n"
"fmla v27.4s, v16.4s, v17.s[0]\n"
"cbnz x8, 78b\n"
"79:" // Height 4: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 72b\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x6, #0x0]\n"
- "prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x12, #0x0]\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "fmin v25.4s, v25.4s, v16.4s\n"
- "fmin v26.4s, v26.4s, v16.4s\n"
- "fmin v27.4s, v27.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"fmax v25.4s, v25.4s, v16.4s\n"
"fmax v26.4s, v26.4s, v16.4s\n"
@@ -831,85 +833,85 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 83f\n"
"tbz x4, #1, 81f\n"
"str d24, [x6], #0x8\n"
- "str d25, [x13], #0x8\n"
- "str d26, [x12], #0x8\n"
- "str d27, [x11], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
"tbz x4, #0, 82f\n"
"st1 { v24.s }[2], [x6]\n"
- "st1 { v25.s }[2], [x13]\n"
- "st1 { v26.s }[2], [x12]\n"
- "st1 { v27.s }[2], [x11]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
"b 82f\n"
"81:" // Height 4: Partial direct writeback: partial_1_0
"str s24, [x6, #0x0]\n"
- "str s25, [x13, #0x0]\n"
- "str s26, [x12, #0x0]\n"
- "str s27, [x11, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
"82:" // Height 4: Partial direct writeback: Done
"b 84f\n"
"83:" // Height 4: Full writeback
"str q24, [x6, #0x0]\n"
"add x6, x6, #0x10\n"
- "str q25, [x13, #0x0]\n"
- "str q26, [x12, #0x0]\n"
- "str q27, [x11, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
"84:" // Height 4: Writeback done
"subs x4, x4, #0x4\n"
"bgt 65b\n"
"b 170f\n"
"85:" // Height 5
- "mov x3, %x[bias]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"86:" // Height 5: Column loop
"cbz x3, 87f\n"
"ldr q24, [x3, #0x0]\n"
+ "add x3, x3, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x3, x3, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"b 92f\n"
"87:" // Height 5: no bias
"tbz %x[flags], #0, 91f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x4, #0x4\n"
- "add x10, x11, x26, LSL #2\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "add x26, x27, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 90f\n"
"tbz x4, #1, 88f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
- "ldr d25, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
- "ldr d27, [x11], #0x8\n"
- "ldr d28, [x10], #0x8\n"
+ "mov x20, #0x8\n"
+ "ldr d25, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
"tbz x4, #0, 89f\n"
"ld1 { v24.s }[2], [x6]\n"
- "ld1 { v25.s }[2], [x13]\n"
- "ld1 { v26.s }[2], [x12]\n"
- "ld1 { v27.s }[2], [x11]\n"
- "ld1 { v28.s }[2], [x10]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
"b 89f\n"
"88:" // Height 5: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
- "ldr s25, [x13, #0x0]\n"
- "ldr s26, [x12, #0x0]\n"
- "ldr s27, [x11, #0x0]\n"
- "ldr s28, [x10, #0x0]\n"
+ "mov x20, #0x0\n"
+ "ldr s25, [x28, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
"89:" // Height 5: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 92f\n"
"90:" // Height 5: full accumulate
"ldr q24, [x6, #0x0]\n"
- "ldr q25, [x13, #0x0]\n"
- "ldr q26, [x12, #0x0]\n"
- "ldr q27, [x11, #0x0]\n"
- "ldr q28, [x10, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
"b 92f\n"
"91:" // Height 5: no accumulate
"movi v24.16b, #0x0\n"
@@ -920,31 +922,31 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"92:" // Height 5: setup done
"mov x7, #0x0\n"
"93:" // Height 5: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 94f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
- "ldr x16, [x26, #0x8]\n"
- "ldr x15, [x26, #0x10]\n"
- "ldr x14, [x26, #0x18]\n"
- "ldr x13, [x26, #0x20]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
+ "ldr x16, [x20, #0x8]\n"
+ "ldr x15, [x20, #0x10]\n"
+ "ldr x14, [x20, #0x18]\n"
+ "ldr x13, [x20, #0x20]\n"
"cbnz x7, 95f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
- "add x16, x16, x26, LSL #2\n"
- "add x15, x15, x26, LSL #2\n"
- "add x14, x14, x26, LSL #2\n"
- "add x13, x13, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
+ "add x16, x16, x20, LSL #2\n"
+ "add x15, x15, x20, LSL #2\n"
+ "add x14, x14, x20, LSL #2\n"
+ "add x13, x13, x20, LSL #2\n"
"b 95f\n"
"94:" // Height 5: setup direct input
"mov x17, %x[input_ptr]\n"
- "add x16, x17, x27, LSL #2\n"
- "add x15, x16, x27, LSL #2\n"
- "add x14, x15, x27, LSL #2\n"
- "add x13, x14, x27, LSL #2\n"
+ "add x16, x17, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
+ "add x13, x14, x21, LSL #2\n"
"95:" // Height 5: input setup done
"cmp x8, #0x4\n"
"blt 98f\n"
@@ -975,19 +977,19 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr d8, [x5, #0x0]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "ldr x27, [x5, #0x8]\n"
+ "ldr x20, [x5, #0x8]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "ldr x26, [x5, #0x18]\n"
+ "ldr x27, [x5, #0x18]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"ldr d9, [x5, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "ldr x12, [x5, #0x28]\n"
+ "ldr x26, [x5, #0x28]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "ldr x11, [x17, #0x8]\n"
+ "ldr x25, [x17, #0x8]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
- "ldr x10, [x16, #0x8]\n"
+ "ldr x24, [x16, #0x8]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "ldr x9, [x15, #0x8]\n"
+ "ldr x23, [x15, #0x8]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
"ldr d10, [x5, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
@@ -998,28 +1000,28 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr d2, [x15, #0x0]\n"
"fmla v27.4s, v11.4s, v3.s[3]\n"
"ldr d3, [x14, #0x0]\n"
- "ldr x28, [x14, #0x8]\n"
+ "ldr x22, [x14, #0x8]\n"
"fmla v28.4s, v11.4s, v4.s[3]\n"
"ldr d4, [x13, #0x0]\n"
"sub x8, x8, #0x4\n"
"ldr d11, [x5, #0x30]\n"
"cmp x8, #0x8\n"
- "mov v8.d[1], x27\n"
- "ldr x27, [x13, #0x8]\n"
- "mov v9.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
+ "ldr x21, [x13, #0x8]\n"
+ "mov v8.d[1], x20\n"
+ "ldr x20, [x5, #0x38]\n"
+ "mov v9.d[1], x27\n"
"prfm pldl1keep, [x17, #0x80]\n"
- "mov v10.d[1], x12\n"
+ "mov v10.d[1], x26\n"
"prfm pldl1keep, [x16, #0x80]\n"
- "mov v0.d[1], x11\n"
+ "mov v0.d[1], x25\n"
"prfm pldl1keep, [x15, #0x80]\n"
- "mov v1.d[1], x10\n"
+ "mov v1.d[1], x24\n"
"prfm pldl1keep, [x14, #0x80]\n"
- "mov v2.d[1], x9\n"
- "mov v3.d[1], x28\n"
+ "mov v2.d[1], x23\n"
+ "mov v3.d[1], x22\n"
"prfm pldl1keep, [x13, #0x80]\n"
- "mov v4.d[1], x27\n"
- "mov v11.d[1], x26\n"
+ "mov v4.d[1], x21\n"
+ "mov v11.d[1], x20\n"
"bge 96b\n"
"97:" // Height 5: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -1064,38 +1066,38 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s18, [x14], #0x4\n"
"ldr s17, [x13], #0x4\n"
"ldr q16, [x5, #0x0]\n"
+ "add x5, x5, #0x10\n"
"fmla v24.4s, v16.4s, v21.s[0]\n"
"fmla v25.4s, v16.4s, v20.s[0]\n"
- "add x5, x5, #0x10\n"
"fmla v26.4s, v16.4s, v19.s[0]\n"
"fmla v27.4s, v16.4s, v18.s[0]\n"
"fmla v28.4s, v16.4s, v17.s[0]\n"
"cbnz x8, 99b\n"
"100:" // Height 5: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 93b\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
- "add x10, x11, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x6, #0x0]\n"
- "prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x12, #0x0]\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x10, #0x0]\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"tbz %x[flags], #1, 101f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "fmin v25.4s, v25.4s, v16.4s\n"
- "fmin v26.4s, v26.4s, v16.4s\n"
- "fmin v27.4s, v27.4s, v16.4s\n"
- "fmin v28.4s, v28.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"fmax v25.4s, v25.4s, v16.4s\n"
"fmax v26.4s, v26.4s, v16.4s\n"
@@ -1106,95 +1108,95 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 104f\n"
"tbz x4, #1, 102f\n"
"str d24, [x6], #0x8\n"
- "str d25, [x13], #0x8\n"
- "str d26, [x12], #0x8\n"
- "str d27, [x11], #0x8\n"
- "str d28, [x10], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
"tbz x4, #0, 103f\n"
"st1 { v24.s }[2], [x6]\n"
- "st1 { v25.s }[2], [x13]\n"
- "st1 { v26.s }[2], [x12]\n"
- "st1 { v27.s }[2], [x11]\n"
- "st1 { v28.s }[2], [x10]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
"b 103f\n"
"102:" // Height 5: Partial direct writeback: partial_1_0
"str s24, [x6, #0x0]\n"
- "str s25, [x13, #0x0]\n"
- "str s26, [x12, #0x0]\n"
- "str s27, [x11, #0x0]\n"
- "str s28, [x10, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
"103:" // Height 5: Partial direct writeback: Done
"b 105f\n"
"104:" // Height 5: Full writeback
"str q24, [x6, #0x0]\n"
"add x6, x6, #0x10\n"
- "str q25, [x13, #0x0]\n"
- "str q26, [x12, #0x0]\n"
- "str q27, [x11, #0x0]\n"
- "str q28, [x10, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
"105:" // Height 5: Writeback done
"subs x4, x4, #0x4\n"
"bgt 86b\n"
"b 170f\n"
"106:" // Height 6
- "mov x3, %x[bias]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"107:" // Height 6: Column loop
"cbz x3, 108f\n"
"ldr q24, [x3, #0x0]\n"
+ "add x3, x3, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x3, x3, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
"b 113f\n"
"108:" // Height 6: no bias
"tbz %x[flags], #0, 112f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
- "add x10, x11, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x4, #0x4\n"
- "add x9, x10, x26, LSL #2\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "add x26, x27, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 111f\n"
"tbz x4, #1, 109f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
- "ldr d25, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
- "ldr d27, [x11], #0x8\n"
- "ldr d28, [x10], #0x8\n"
- "ldr d29, [x9], #0x8\n"
+ "mov x20, #0x8\n"
+ "ldr d25, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
"tbz x4, #0, 110f\n"
"ld1 { v24.s }[2], [x6]\n"
- "ld1 { v25.s }[2], [x13]\n"
- "ld1 { v26.s }[2], [x12]\n"
- "ld1 { v27.s }[2], [x11]\n"
- "ld1 { v28.s }[2], [x10]\n"
- "ld1 { v29.s }[2], [x9]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
+ "ld1 { v29.s }[2], [x24]\n"
"b 110f\n"
"109:" // Height 6: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
- "ldr s25, [x13, #0x0]\n"
- "ldr s26, [x12, #0x0]\n"
- "ldr s27, [x11, #0x0]\n"
- "ldr s28, [x10, #0x0]\n"
- "ldr s29, [x9, #0x0]\n"
+ "mov x20, #0x0\n"
+ "ldr s25, [x28, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
+ "ldr s29, [x24, #0x0]\n"
"110:" // Height 6: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 113f\n"
"111:" // Height 6: full accumulate
"ldr q24, [x6, #0x0]\n"
- "ldr q25, [x13, #0x0]\n"
- "ldr q26, [x12, #0x0]\n"
- "ldr q27, [x11, #0x0]\n"
- "ldr q28, [x10, #0x0]\n"
- "ldr q29, [x9, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q29, [x24, #0x0]\n"
"b 113f\n"
"112:" // Height 6: no accumulate
"movi v24.16b, #0x0\n"
@@ -1206,34 +1208,34 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"113:" // Height 6: setup done
"mov x7, #0x0\n"
"114:" // Height 6: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
- "ldr x16, [x26, #0x8]\n"
- "ldr x15, [x26, #0x10]\n"
- "ldr x14, [x26, #0x18]\n"
- "ldr x13, [x26, #0x20]\n"
- "ldr x12, [x26, #0x28]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
+ "ldr x16, [x20, #0x8]\n"
+ "ldr x15, [x20, #0x10]\n"
+ "ldr x14, [x20, #0x18]\n"
+ "ldr x13, [x20, #0x20]\n"
+ "ldr x12, [x20, #0x28]\n"
"cbnz x7, 116f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
- "add x16, x16, x26, LSL #2\n"
- "add x15, x15, x26, LSL #2\n"
- "add x14, x14, x26, LSL #2\n"
- "add x13, x13, x26, LSL #2\n"
- "add x12, x12, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
+ "add x16, x16, x20, LSL #2\n"
+ "add x15, x15, x20, LSL #2\n"
+ "add x14, x14, x20, LSL #2\n"
+ "add x13, x13, x20, LSL #2\n"
+ "add x12, x12, x20, LSL #2\n"
"b 116f\n"
"115:" // Height 6: setup direct input
"mov x17, %x[input_ptr]\n"
- "add x16, x17, x27, LSL #2\n"
- "add x15, x16, x27, LSL #2\n"
- "add x14, x15, x27, LSL #2\n"
- "add x13, x14, x27, LSL #2\n"
- "add x12, x13, x27, LSL #2\n"
+ "add x16, x17, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
+ "add x13, x14, x21, LSL #2\n"
+ "add x12, x13, x21, LSL #2\n"
"116:" // Height 6: input setup done
"cmp x8, #0x4\n"
"blt 119f\n"
@@ -1277,13 +1279,13 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v24.4s, v10.4s, v0.s[2]\n"
"ldr x26, [x17, #0x8]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "ldr x11, [x16, #0x8]\n"
+ "ldr x25, [x16, #0x8]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
- "ldr x10, [x15, #0x8]\n"
+ "ldr x24, [x15, #0x8]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "sub x8, x8, #0x4\n"
+ "ldr x23, [x14, #0x8]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
- "cmp x8, #0x8\n"
+ "ldr x22, [x13, #0x8]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
"ldr d10, [x5, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
@@ -1299,26 +1301,26 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v29.4s, v11.4s, v5.s[3]\n"
"ldr d5, [x12, #0x0]\n"
"ldr d11, [x5, #0x30]\n"
+ "sub x8, x8, #0x4\n"
+ "ldr x21, [x12, #0x8]\n"
+ "cmp x8, #0x8\n"
+ "ldr x20, [x5, #0x38]\n"
"mov v8.d[1], x9\n"
- "ldr x9, [x14, #0x8]\n"
- "mov v9.d[1], x28\n"
- "ldr x28, [x13, #0x8]\n"
- "mov v10.d[1], x27\n"
- "ldr x27, [x12, #0x8]\n"
- "mov v0.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
- "mov v1.d[1], x11\n"
"prfm pldl1keep, [x17, #0x80]\n"
- "mov v2.d[1], x10\n"
+ "mov v9.d[1], x28\n"
"prfm pldl1keep, [x16, #0x80]\n"
- "mov v3.d[1], x9\n"
+ "mov v10.d[1], x27\n"
"prfm pldl1keep, [x15, #0x80]\n"
- "mov v4.d[1], x28\n"
+ "mov v0.d[1], x26\n"
"prfm pldl1keep, [x14, #0x80]\n"
- "mov v5.d[1], x27\n"
+ "mov v1.d[1], x25\n"
"prfm pldl1keep, [x13, #0x80]\n"
- "mov v11.d[1], x26\n"
+ "mov v2.d[1], x24\n"
"prfm pldl1keep, [x12, #0x80]\n"
+ "mov v3.d[1], x23\n"
+ "mov v4.d[1], x22\n"
+ "mov v5.d[1], x21\n"
+ "mov v11.d[1], x20\n"
"bge 117b\n"
"118:" // Height 6: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -1370,42 +1372,42 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s18, [x13], #0x4\n"
"ldr s17, [x12], #0x4\n"
"ldr q16, [x5, #0x0]\n"
+ "add x5, x5, #0x10\n"
"fmla v24.4s, v16.4s, v22.s[0]\n"
"fmla v25.4s, v16.4s, v21.s[0]\n"
- "add x5, x5, #0x10\n"
"fmla v26.4s, v16.4s, v20.s[0]\n"
"fmla v27.4s, v16.4s, v19.s[0]\n"
"fmla v28.4s, v16.4s, v18.s[0]\n"
"fmla v29.4s, v16.4s, v17.s[0]\n"
"cbnz x8, 120b\n"
"121:" // Height 6: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 114b\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
- "add x10, x11, x26, LSL #2\n"
- "add x9, x10, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x6, #0x0]\n"
- "prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x12, #0x0]\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x10, #0x0]\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "fmin v25.4s, v25.4s, v16.4s\n"
- "fmin v26.4s, v26.4s, v16.4s\n"
- "fmin v27.4s, v27.4s, v16.4s\n"
- "fmin v28.4s, v28.4s, v16.4s\n"
- "fmin v29.4s, v29.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"fmax v25.4s, v25.4s, v16.4s\n"
"fmax v26.4s, v26.4s, v16.4s\n"
@@ -1417,51 +1419,51 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 125f\n"
"tbz x4, #1, 123f\n"
"str d24, [x6], #0x8\n"
- "str d25, [x13], #0x8\n"
- "str d26, [x12], #0x8\n"
- "str d27, [x11], #0x8\n"
- "str d28, [x10], #0x8\n"
- "str d29, [x9], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
+ "str d29, [x24], #0x8\n"
"tbz x4, #0, 124f\n"
"st1 { v24.s }[2], [x6]\n"
- "st1 { v25.s }[2], [x13]\n"
- "st1 { v26.s }[2], [x12]\n"
- "st1 { v27.s }[2], [x11]\n"
- "st1 { v28.s }[2], [x10]\n"
- "st1 { v29.s }[2], [x9]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
+ "st1 { v29.s }[2], [x24]\n"
"b 124f\n"
"123:" // Height 6: Partial direct writeback: partial_1_0
"str s24, [x6, #0x0]\n"
- "str s25, [x13, #0x0]\n"
- "str s26, [x12, #0x0]\n"
- "str s27, [x11, #0x0]\n"
- "str s28, [x10, #0x0]\n"
- "str s29, [x9, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
+ "str s29, [x24, #0x0]\n"
"124:" // Height 6: Partial direct writeback: Done
"b 126f\n"
"125:" // Height 6: Full writeback
"str q24, [x6, #0x0]\n"
"add x6, x6, #0x10\n"
- "str q25, [x13, #0x0]\n"
- "str q26, [x12, #0x0]\n"
- "str q27, [x11, #0x0]\n"
- "str q28, [x10, #0x0]\n"
- "str q29, [x9, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
+ "str q29, [x24, #0x0]\n"
"126:" // Height 6: Writeback done
"subs x4, x4, #0x4\n"
"bgt 107b\n"
"b 170f\n"
"127:" // Height 7
- "mov x3, %x[bias]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"128:" // Height 7: Column loop
"cbz x3, 129f\n"
"ldr q24, [x3, #0x0]\n"
+ "add x3, x3, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x3, x3, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
@@ -1469,53 +1471,53 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 134f\n"
"129:" // Height 7: no bias
"tbz %x[flags], #0, 133f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
- "add x10, x11, x26, LSL #2\n"
- "add x9, x10, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x4, #0x4\n"
- "add x28, x9, x26, LSL #2\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "add x26, x27, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"bge 132f\n"
"tbz x4, #1, 130f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
- "ldr d25, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
- "ldr d27, [x11], #0x8\n"
- "ldr d28, [x10], #0x8\n"
- "ldr d29, [x9], #0x8\n"
- "ldr d30, [x28], #0x8\n"
+ "mov x20, #0x8\n"
+ "ldr d25, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d30, [x23], #0x8\n"
"tbz x4, #0, 131f\n"
"ld1 { v24.s }[2], [x6]\n"
- "ld1 { v25.s }[2], [x13]\n"
- "ld1 { v26.s }[2], [x12]\n"
- "ld1 { v27.s }[2], [x11]\n"
- "ld1 { v28.s }[2], [x10]\n"
- "ld1 { v29.s }[2], [x9]\n"
- "ld1 { v30.s }[2], [x28]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
+ "ld1 { v29.s }[2], [x24]\n"
+ "ld1 { v30.s }[2], [x23]\n"
"b 131f\n"
"130:" // Height 7: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
- "ldr s25, [x13, #0x0]\n"
- "ldr s26, [x12, #0x0]\n"
- "ldr s27, [x11, #0x0]\n"
- "ldr s28, [x10, #0x0]\n"
- "ldr s29, [x9, #0x0]\n"
- "ldr s30, [x28, #0x0]\n"
+ "mov x20, #0x0\n"
+ "ldr s25, [x28, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
+ "ldr s29, [x24, #0x0]\n"
+ "ldr s30, [x23, #0x0]\n"
"131:" // Height 7: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 134f\n"
"132:" // Height 7: full accumulate
"ldr q24, [x6, #0x0]\n"
- "ldr q25, [x13, #0x0]\n"
- "ldr q26, [x12, #0x0]\n"
- "ldr q27, [x11, #0x0]\n"
- "ldr q28, [x10, #0x0]\n"
- "ldr q29, [x9, #0x0]\n"
- "ldr q30, [x28, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q29, [x24, #0x0]\n"
+ "ldr q30, [x23, #0x0]\n"
"b 134f\n"
"133:" // Height 7: no accumulate
"movi v24.16b, #0x0\n"
@@ -1528,37 +1530,37 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"134:" // Height 7: setup done
"mov x7, #0x0\n"
"135:" // Height 7: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 136f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
- "ldr x16, [x26, #0x8]\n"
- "ldr x15, [x26, #0x10]\n"
- "ldr x14, [x26, #0x18]\n"
- "ldr x13, [x26, #0x20]\n"
- "ldr x12, [x26, #0x28]\n"
- "ldr x11, [x26, #0x30]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
+ "ldr x16, [x20, #0x8]\n"
+ "ldr x15, [x20, #0x10]\n"
+ "ldr x14, [x20, #0x18]\n"
+ "ldr x13, [x20, #0x20]\n"
+ "ldr x12, [x20, #0x28]\n"
+ "ldr x11, [x20, #0x30]\n"
"cbnz x7, 137f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
- "add x16, x16, x26, LSL #2\n"
- "add x15, x15, x26, LSL #2\n"
- "add x14, x14, x26, LSL #2\n"
- "add x13, x13, x26, LSL #2\n"
- "add x12, x12, x26, LSL #2\n"
- "add x11, x11, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
+ "add x16, x16, x20, LSL #2\n"
+ "add x15, x15, x20, LSL #2\n"
+ "add x14, x14, x20, LSL #2\n"
+ "add x13, x13, x20, LSL #2\n"
+ "add x12, x12, x20, LSL #2\n"
+ "add x11, x11, x20, LSL #2\n"
"b 137f\n"
"136:" // Height 7: setup direct input
"mov x17, %x[input_ptr]\n"
- "add x16, x17, x27, LSL #2\n"
- "add x15, x16, x27, LSL #2\n"
- "add x14, x15, x27, LSL #2\n"
- "add x13, x14, x27, LSL #2\n"
- "add x12, x13, x27, LSL #2\n"
- "add x11, x12, x27, LSL #2\n"
+ "add x16, x17, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
+ "add x13, x14, x21, LSL #2\n"
+ "add x12, x13, x21, LSL #2\n"
+ "add x11, x12, x21, LSL #2\n"
"137:" // Height 7: input setup done
"cmp x8, #0x4\n"
"blt 140f\n"
@@ -1595,27 +1597,27 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr d8, [x5, #0x0]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "ldr x26, [x5, #0x8]\n"
+ "ldr x10, [x5, #0x8]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "ldr x10, [x5, #0x18]\n"
+ "ldr x9, [x5, #0x18]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "ldr x9, [x5, #0x28]\n"
+ "ldr x28, [x5, #0x28]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "ldr x28, [x17, #0x8]\n"
+ "ldr x27, [x17, #0x8]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
"ldr d9, [x5, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "ldr x27, [x16, #0x8]\n"
+ "ldr x26, [x16, #0x8]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "sub x8, x8, #0x4\n"
+ "ldr x25, [x15, #0x8]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
- "cmp x8, #0x8\n"
+ "ldr x24, [x14, #0x8]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "mov v8.d[1], x26\n"
+ "ldr x23, [x13, #0x8]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
- "ldr x26, [x15, #0x8]\n"
+ "ldr x22, [x12, #0x8]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
- "prfm pldl1keep, [x17, #0x80]\n"
+ "ldr x21, [x11, #0x8]\n"
"fmla v30.4s, v10.4s, v6.s[2]\n"
"ldr d10, [x5, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
@@ -1633,27 +1635,27 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v30.4s, v11.4s, v6.s[3]\n"
"ldr d6, [x11, #0x0]\n"
"ldr d11, [x5, #0x30]\n"
- "mov v9.d[1], x10\n"
- "ldr x10, [x14, #0x8]\n"
- "mov v10.d[1], x9\n"
- "ldr x9, [x13, #0x8]\n"
- "mov v0.d[1], x28\n"
- "ldr x28, [x12, #0x8]\n"
- "mov v1.d[1], x27\n"
- "ldr x27, [x11, #0x8]\n"
- "mov v2.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
- "mov v3.d[1], x10\n"
+ "sub x8, x8, #0x4\n"
+ "ldr x20, [x5, #0x38]\n"
+ "cmp x8, #0x8\n"
+ "mov v8.d[1], x10\n"
+ "prfm pldl1keep, [x17, #0x80]\n"
"prfm pldl1keep, [x16, #0x80]\n"
- "mov v4.d[1], x9\n"
+ "mov v9.d[1], x9\n"
"prfm pldl1keep, [x15, #0x80]\n"
- "mov v5.d[1], x28\n"
+ "mov v10.d[1], x28\n"
"prfm pldl1keep, [x14, #0x80]\n"
- "mov v6.d[1], x27\n"
+ "mov v0.d[1], x27\n"
"prfm pldl1keep, [x13, #0x80]\n"
- "mov v11.d[1], x26\n"
+ "mov v1.d[1], x26\n"
"prfm pldl1keep, [x12, #0x80]\n"
+ "mov v2.d[1], x25\n"
"prfm pldl1keep, [x11, #0x80]\n"
+ "mov v3.d[1], x24\n"
+ "mov v4.d[1], x23\n"
+ "mov v5.d[1], x22\n"
+ "mov v6.d[1], x21\n"
+ "mov v11.d[1], x20\n"
"bge 138b\n"
"139:" // Height 7: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -1712,9 +1714,9 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s18, [x12], #0x4\n"
"ldr s17, [x11], #0x4\n"
"ldr q16, [x5, #0x0]\n"
+ "add x5, x5, #0x10\n"
"fmla v24.4s, v16.4s, v23.s[0]\n"
"fmla v25.4s, v16.4s, v22.s[0]\n"
- "add x5, x5, #0x10\n"
"fmla v26.4s, v16.4s, v21.s[0]\n"
"fmla v27.4s, v16.4s, v20.s[0]\n"
"fmla v28.4s, v16.4s, v19.s[0]\n"
@@ -1722,36 +1724,36 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v30.4s, v16.4s, v17.s[0]\n"
"cbnz x8, 141b\n"
"142:" // Height 7: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 135b\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
- "add x10, x11, x26, LSL #2\n"
- "add x9, x10, x26, LSL #2\n"
- "add x28, x9, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x6, #0x0]\n"
- "prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x12, #0x0]\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x10, #0x0]\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x28, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 143f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "fmin v25.4s, v25.4s, v16.4s\n"
- "fmin v26.4s, v26.4s, v16.4s\n"
- "fmin v27.4s, v27.4s, v16.4s\n"
- "fmin v28.4s, v28.4s, v16.4s\n"
- "fmin v29.4s, v29.4s, v16.4s\n"
- "fmin v30.4s, v30.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"fmax v25.4s, v25.4s, v16.4s\n"
"fmax v26.4s, v26.4s, v16.4s\n"
@@ -1764,58 +1766,59 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 146f\n"
"tbz x4, #1, 144f\n"
"str d24, [x6], #0x8\n"
- "str d25, [x13], #0x8\n"
- "str d26, [x12], #0x8\n"
- "str d27, [x11], #0x8\n"
- "str d28, [x10], #0x8\n"
- "str d29, [x9], #0x8\n"
- "str d30, [x28], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
+ "str d29, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
"tbz x4, #0, 145f\n"
"st1 { v24.s }[2], [x6]\n"
- "st1 { v25.s }[2], [x13]\n"
- "st1 { v26.s }[2], [x12]\n"
- "st1 { v27.s }[2], [x11]\n"
- "st1 { v28.s }[2], [x10]\n"
- "st1 { v29.s }[2], [x9]\n"
- "st1 { v30.s }[2], [x28]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
+ "st1 { v29.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
"b 145f\n"
"144:" // Height 7: Partial direct writeback: partial_1_0
"str s24, [x6, #0x0]\n"
- "str s25, [x13, #0x0]\n"
- "str s26, [x12, #0x0]\n"
- "str s27, [x11, #0x0]\n"
- "str s28, [x10, #0x0]\n"
- "str s29, [x9, #0x0]\n"
- "str s30, [x28, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
+ "str s29, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
"145:" // Height 7: Partial direct writeback: Done
"b 147f\n"
"146:" // Height 7: Full writeback
"str q24, [x6, #0x0]\n"
"add x6, x6, #0x10\n"
- "str q25, [x13, #0x0]\n"
- "str q26, [x12, #0x0]\n"
- "str q27, [x11, #0x0]\n"
- "str q28, [x10, #0x0]\n"
- "str q29, [x9, #0x0]\n"
- "str q30, [x28, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
+ "str q29, [x24, #0x0]\n"
+ "str q30, [x23, #0x0]\n"
"147:" // Height 7: Writeback done
"subs x4, x4, #0x4\n"
"bgt 128b\n"
"b 170f\n"
"148:" // Height 8
- "ldr x27, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x26, #0x20\n"
- "mov x3, %x[bias]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "mov x20, #0x20\n"
+ "ldr x6, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "ldr x3, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x4, [%x[args_ptr], %[offsetof_N]]\n"
+ "madd x20, x21, x20, x6\n"
"ldr x5, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x6, %x[output_ptr]\n"
- "madd %x[output_ptr], x27, x26, %x[output_ptr]\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"149:" // Height 8: Column loop
"cbz x3, 150f\n"
"ldr q24, [x3, #0x0]\n"
+ "add x3, x3, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x3, x3, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
@@ -1824,58 +1827,58 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"b 155f\n"
"150:" // Height 8: no bias
"tbz %x[flags], #0, 154f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
- "add x10, x11, x26, LSL #2\n"
- "add x9, x10, x26, LSL #2\n"
- "add x28, x9, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x4, #0x4\n"
- "add x27, x28, x26, LSL #2\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "add x26, x27, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"bge 153f\n"
"tbz x4, #1, 151f\n"
"ldr d24, [x6], #0x8\n"
- "mov x26, #0x8\n"
- "ldr d25, [x13], #0x8\n"
- "ldr d26, [x12], #0x8\n"
- "ldr d27, [x11], #0x8\n"
- "ldr d28, [x10], #0x8\n"
- "ldr d29, [x9], #0x8\n"
- "ldr d30, [x28], #0x8\n"
- "ldr d31, [x27], #0x8\n"
+ "mov x20, #0x8\n"
+ "ldr d25, [x28], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d30, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x4, #0, 152f\n"
"ld1 { v24.s }[2], [x6]\n"
- "ld1 { v25.s }[2], [x13]\n"
- "ld1 { v26.s }[2], [x12]\n"
- "ld1 { v27.s }[2], [x11]\n"
- "ld1 { v28.s }[2], [x10]\n"
- "ld1 { v29.s }[2], [x9]\n"
- "ld1 { v30.s }[2], [x28]\n"
- "ld1 { v31.s }[2], [x27]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
+ "ld1 { v29.s }[2], [x24]\n"
+ "ld1 { v30.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 152f\n"
"151:" // Height 8: Partial accumulate: partial_1_0
"ldr s24, [x6, #0x0]\n"
- "mov x26, #0x0\n"
- "ldr s25, [x13, #0x0]\n"
- "ldr s26, [x12, #0x0]\n"
- "ldr s27, [x11, #0x0]\n"
- "ldr s28, [x10, #0x0]\n"
- "ldr s29, [x9, #0x0]\n"
- "ldr s30, [x28, #0x0]\n"
- "ldr s31, [x27, #0x0]\n"
+ "mov x20, #0x0\n"
+ "ldr s25, [x28, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
+ "ldr s29, [x24, #0x0]\n"
+ "ldr s30, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"152:" // Height 8: Partial accumulate: Done
- "sub x6, x6, x26\n"
+ "sub x6, x6, x20\n"
"b 155f\n"
"153:" // Height 8: full accumulate
"ldr q24, [x6, #0x0]\n"
- "ldr q25, [x13, #0x0]\n"
- "ldr q26, [x12, #0x0]\n"
- "ldr q27, [x11, #0x0]\n"
- "ldr q28, [x10, #0x0]\n"
- "ldr q29, [x9, #0x0]\n"
- "ldr q30, [x28, #0x0]\n"
- "ldr q31, [x27, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q29, [x24, #0x0]\n"
+ "ldr q30, [x23, #0x0]\n"
+ "ldr q31, [x22, #0x0]\n"
"b 155f\n"
"154:" // Height 8: no accumulate
"movi v24.16b, #0x0\n"
@@ -1889,40 +1892,40 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"155:" // Height 8: setup done
"mov x7, #0x0\n"
"156:" // Height 8: String loop
- "ldr x26, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w8, [x26, x7, LSL #0x2]\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w8, [x20, x7, LSL #0x2]\n"
"tbz %x[flags], #3, 157f\n"
- "ldr x26, [%x[input_ptr], x7, LSL #0x3]\n"
- "add x26, x26, x27, LSL #3\n"
- "ldr x17, [x26, #0x0]\n"
- "ldr x16, [x26, #0x8]\n"
- "ldr x15, [x26, #0x10]\n"
- "ldr x14, [x26, #0x18]\n"
- "ldr x13, [x26, #0x20]\n"
- "ldr x12, [x26, #0x28]\n"
- "ldr x11, [x26, #0x30]\n"
- "ldr x27, [x26, #0x38]\n"
+ "ldr x20, [%x[input_ptr], x7, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x17, [x20, #0x0]\n"
+ "ldr x16, [x20, #0x8]\n"
+ "ldr x15, [x20, #0x10]\n"
+ "ldr x14, [x20, #0x18]\n"
+ "ldr x13, [x20, #0x20]\n"
+ "ldr x12, [x20, #0x28]\n"
+ "ldr x11, [x20, #0x30]\n"
+ "ldr x9, [x20, #0x38]\n"
"cbnz x7, 158f\n"
- "ldr x26, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x17, x17, x26, LSL #2\n"
- "add x16, x16, x26, LSL #2\n"
- "add x15, x15, x26, LSL #2\n"
- "add x14, x14, x26, LSL #2\n"
- "add x13, x13, x26, LSL #2\n"
- "add x12, x12, x26, LSL #2\n"
- "add x11, x11, x26, LSL #2\n"
- "add x27, x27, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x17, x17, x20, LSL #2\n"
+ "add x16, x16, x20, LSL #2\n"
+ "add x15, x15, x20, LSL #2\n"
+ "add x14, x14, x20, LSL #2\n"
+ "add x13, x13, x20, LSL #2\n"
+ "add x12, x12, x20, LSL #2\n"
+ "add x11, x11, x20, LSL #2\n"
+ "add x9, x9, x20, LSL #2\n"
"b 158f\n"
"157:" // Height 8: setup direct input
"mov x17, %x[input_ptr]\n"
- "add x16, x17, x27, LSL #2\n"
- "add x15, x16, x27, LSL #2\n"
- "add x14, x15, x27, LSL #2\n"
- "add x13, x14, x27, LSL #2\n"
- "add x12, x13, x27, LSL #2\n"
- "add x11, x12, x27, LSL #2\n"
- "add x27, x11, x27, LSL #2\n"
+ "add x16, x17, x21, LSL #2\n"
+ "add x15, x16, x21, LSL #2\n"
+ "add x14, x15, x21, LSL #2\n"
+ "add x13, x14, x21, LSL #2\n"
+ "add x12, x13, x21, LSL #2\n"
+ "add x11, x12, x21, LSL #2\n"
+ "add x9, x11, x21, LSL #2\n"
"158:" // Height 8: input setup done
"cmp x8, #0x4\n"
"blt 161f\n"
@@ -1934,7 +1937,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr q4, [x13, #0x0]\n"
"ldr q5, [x12, #0x0]\n"
"ldr q6, [x11, #0x0]\n"
- "ldr q7, [x27, #0x0]\n"
+ "ldr q7, [x9, #0x0]\n"
"ldr q8, [x5, #0x0]\n"
"ldr q9, [x5, #0x10]\n"
"ldr q10, [x5, #0x20]\n"
@@ -1956,37 +1959,37 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v30.4s, v8.4s, v6.s[0]\n"
"add x11, x11, #0x10\n"
"fmla v31.4s, v8.4s, v7.s[0]\n"
- "add x27, x27, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"add x5, x5, #0x40\n"
"ldr d8, [x5, #0x0]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "ldr x26, [x5, #0x8]\n"
+ "ldr x22, [x5, #0x8]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "sub x8, x8, #0x4\n"
+ "ldr x28, [x5, #0x18]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "cmp x8, #0x8\n"
+ "ldr x21, [x5, #0x28]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "mov v8.d[1], x26\n"
+ "ldr x20, [x17, #0x8]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
- "ldr x26, [x5, #0x18]\n"
+ "ldr x27, [x16, #0x8]\n"
"fmla v31.4s, v9.4s, v7.s[1]\n"
"ldr d9, [x5, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "prfm pldl1keep, [x17, #0x80]\n"
+ "ldr x26, [x15, #0x8]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x16, #0x80]\n"
+ "ldr x25, [x14, #0x8]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
- "prfm pldl1keep, [x15, #0x80]\n"
+ "ldr x24, [x13, #0x8]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "mov v9.d[1], x26\n"
+ "ldr x23, [x12, #0x8]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
- "ldr x26, [x5, #0x28]\n"
+ "sub x8, x8, #0x4\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
- "prfm pldl1keep, [x14, #0x80]\n"
+ "cmp x8, #0x8\n"
"fmla v30.4s, v10.4s, v6.s[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v8.d[1], x22\n"
"fmla v31.4s, v10.4s, v7.s[2]\n"
"ldr d10, [x5, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
@@ -2003,31 +2006,31 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr d5, [x12, #0x0]\n"
"fmla v30.4s, v11.4s, v6.s[3]\n"
"ldr d6, [x11, #0x0]\n"
+ "ldr x22, [x11, #0x8]\n"
"fmla v31.4s, v11.4s, v7.s[3]\n"
- "ldr d7, [x27, #0x0]\n"
+ "ldr d7, [x9, #0x0]\n"
+ "mov v9.d[1], x28\n"
"ldr d11, [x5, #0x30]\n"
- "mov v10.d[1], x26\n"
- "ldr x26, [x17, #0x8]\n"
- "mov v0.d[1], x26\n"
- "ldr x26, [x16, #0x8]\n"
- "mov v1.d[1], x26\n"
- "ldr x26, [x15, #0x8]\n"
+ "mov v10.d[1], x21\n"
+ "ldr x21, [x9, #0x8]\n"
+ "mov v0.d[1], x20\n"
+ "ldr x20, [x5, #0x38]\n"
+ "mov v1.d[1], x27\n"
+ "prfm pldl1keep, [x17, #0x80]\n"
"mov v2.d[1], x26\n"
- "ldr x26, [x14, #0x8]\n"
- "mov v3.d[1], x26\n"
- "ldr x26, [x13, #0x8]\n"
- "mov v4.d[1], x26\n"
- "ldr x26, [x12, #0x8]\n"
- "mov v5.d[1], x26\n"
- "ldr x26, [x11, #0x8]\n"
- "mov v6.d[1], x26\n"
- "ldr x26, [x27, #0x8]\n"
- "mov v7.d[1], x26\n"
- "ldr x26, [x5, #0x38]\n"
- "mov v11.d[1], x26\n"
+ "prfm pldl1keep, [x16, #0x80]\n"
+ "mov v3.d[1], x25\n"
+ "prfm pldl1keep, [x15, #0x80]\n"
+ "mov v4.d[1], x24\n"
+ "prfm pldl1keep, [x14, #0x80]\n"
+ "mov v5.d[1], x23\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v6.d[1], x22\n"
"prfm pldl1keep, [x12, #0x80]\n"
+ "mov v7.d[1], x21\n"
"prfm pldl1keep, [x11, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "mov v11.d[1], x20\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"bge 159b\n"
"160:" // Height 8: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -2045,7 +2048,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v30.4s, v8.4s, v6.s[0]\n"
"add x11, x11, #0x10\n"
"fmla v31.4s, v8.4s, v7.s[0]\n"
- "add x27, x27, #0x10\n"
+ "add x9, x9, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"sub x8, x8, #0x4\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
@@ -2063,7 +2066,7 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v31.4s, v9.4s, v7.s[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
"add x5, x5, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
@@ -2091,11 +2094,11 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"ldr s20, [x13], #0x4\n"
"ldr s19, [x12], #0x4\n"
"ldr s18, [x11], #0x4\n"
- "ldr s17, [x27], #0x4\n"
+ "ldr s17, [x9], #0x4\n"
"ldr q16, [x5, #0x0]\n"
+ "add x5, x5, #0x10\n"
"fmla v24.4s, v16.4s, v0.s[0]\n"
"fmla v25.4s, v16.4s, v23.s[0]\n"
- "add x5, x5, #0x10\n"
"fmla v26.4s, v16.4s, v22.s[0]\n"
"fmla v27.4s, v16.4s, v21.s[0]\n"
"fmla v28.4s, v16.4s, v20.s[0]\n"
@@ -2104,39 +2107,39 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"fmla v31.4s, v16.4s, v17.s[0]\n"
"cbnz x8, 162b\n"
"163:" // Height 8: Multiply loop: No odd multiplies
- "ldr w26, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x7, x7, #0x1\n"
- "cmp x7, x26\n"
+ "cmp x7, x20\n"
"bne 156b\n"
- "ldr x26, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x13, x6, x26, LSL #2\n"
- "add x12, x13, x26, LSL #2\n"
- "add x11, x12, x26, LSL #2\n"
- "add x10, x11, x26, LSL #2\n"
- "add x9, x10, x26, LSL #2\n"
- "add x28, x9, x26, LSL #2\n"
- "add x27, x28, x26, LSL #2\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"prfm pstl1keep, [x6, #0x0]\n"
- "prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x12, #0x0]\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x10, #0x0]\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "add x28, x6, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x28, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20, LSL #2\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 164f\n"
- "add x26, %x[args_ptr], %[offset_max]\n"
- "ld1r { v16.4s }, [x26]\n"
- "fmin v24.4s, v24.4s, v16.4s\n"
- "fmin v25.4s, v25.4s, v16.4s\n"
- "fmin v26.4s, v26.4s, v16.4s\n"
- "fmin v27.4s, v27.4s, v16.4s\n"
- "fmin v28.4s, v28.4s, v16.4s\n"
- "fmin v29.4s, v29.4s, v16.4s\n"
- "fmin v30.4s, v30.4s, v16.4s\n"
- "fmin v31.4s, v31.4s, v16.4s\n"
- "add x26, %x[args_ptr], %[offset_min]\n"
- "ld1r { v16.4s }, [x26]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
+ "add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "fmin v24.4s, v24.4s, v17.4s\n"
+ "fmin v25.4s, v25.4s, v17.4s\n"
+ "fmin v26.4s, v26.4s, v17.4s\n"
+ "fmin v27.4s, v27.4s, v17.4s\n"
+ "fmin v28.4s, v28.4s, v17.4s\n"
+ "fmin v29.4s, v29.4s, v17.4s\n"
+ "fmin v30.4s, v30.4s, v17.4s\n"
+ "fmin v31.4s, v31.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
"fmax v25.4s, v25.4s, v16.4s\n"
"fmax v26.4s, v26.4s, v16.4s\n"
@@ -2150,62 +2153,62 @@ void a64_hybrid_fp32_mla_8x4_a55 (
"bge 167f\n"
"tbz x4, #1, 165f\n"
"str d24, [x6], #0x8\n"
- "str d25, [x13], #0x8\n"
- "str d26, [x12], #0x8\n"
- "str d27, [x11], #0x8\n"
- "str d28, [x10], #0x8\n"
- "str d29, [x9], #0x8\n"
- "str d30, [x28], #0x8\n"
- "str d31, [x27], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
+ "str d29, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x4, #0, 166f\n"
"st1 { v24.s }[2], [x6]\n"
- "st1 { v25.s }[2], [x13]\n"
- "st1 { v26.s }[2], [x12]\n"
- "st1 { v27.s }[2], [x11]\n"
- "st1 { v28.s }[2], [x10]\n"
- "st1 { v29.s }[2], [x9]\n"
- "st1 { v30.s }[2], [x28]\n"
- "st1 { v31.s }[2], [x27]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
+ "st1 { v29.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 166f\n"
"165:" // Height 8: Partial direct writeback: partial_1_0
"str s24, [x6, #0x0]\n"
- "str s25, [x13, #0x0]\n"
- "str s26, [x12, #0x0]\n"
- "str s27, [x11, #0x0]\n"
- "str s28, [x10, #0x0]\n"
- "str s29, [x9, #0x0]\n"
- "str s30, [x28, #0x0]\n"
- "str s31, [x27, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
+ "str s29, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"166:" // Height 8: Partial direct writeback: Done
"b 168f\n"
"167:" // Height 8: Full writeback
"str q24, [x6, #0x0]\n"
"add x6, x6, #0x10\n"
- "str q25, [x13, #0x0]\n"
- "str q26, [x12, #0x0]\n"
- "str q27, [x11, #0x0]\n"
- "str q28, [x10, #0x0]\n"
- "str q29, [x9, #0x0]\n"
- "str q30, [x28, #0x0]\n"
- "str q31, [x27, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
+ "str q29, [x24, #0x0]\n"
+ "str q30, [x23, #0x0]\n"
+ "str q31, [x22, #0x0]\n"
"168:" // Height 8: Writeback done
"subs x4, x4, #0x4\n"
"bgt 149b\n"
"subs %x[M], %x[M], #0x8\n"
"beq 170f\n"
- "ldr x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"tbz %x[flags], #3, 169f\n"
- "add x27, x27, #0x8\n"
- "str x27, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "add x21, x21, #0x8\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
"b 1b\n"
"169:" // Update direct input
- "mov x26, #0x20\n"
- "madd %x[input_ptr], x26, x27, %x[input_ptr]\n"
+ "mov x20, #0x20\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"170:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x26", "x27", "x28"
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp
index 004e5d7f23..6401b01607 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void a64_hybrid_fp32_mla_8x4 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void a64_hybrid_fp32_mla_8x4 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -104,10 +106,10 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp %x[M], #0x2\n"
"bgt 43f\n"
"beq 22f\n"
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x14, 3f\n"
"ldr q24, [x14, #0x0]\n"
@@ -138,8 +140,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"9:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 10f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -162,37 +164,37 @@ void a64_hybrid_fp32_mla_8x4 (
"blt 13f\n"
"12:" // Height 1: Multiply loop: Main loop head
"fmla v24.4s, v8.4s, v0.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"sub x9, x9, #0x4\n"
"add x28, x28, #0x10\n"
- "fmla v24.4s, v10.4s, v0.s[2]\n"
"cmp x9, #0x8\n"
"add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"ldr q8, [x12, #0x0]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"ldr q9, [x12, #0x10]\n"
+ "fmla v24.4s, v10.4s, v0.s[2]\n"
"ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr q0, [x28, #0x0]\n"
"ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"bge 12b\n"
"13:" // Height 1: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x28, x28, #0x10\n"
"sub x9, x9, #0x4\n"
+ "add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "add x12, x12, #0x40\n"
"14:" // Height 1: Multiply loop: Main loop skip
"cbz x9, 16f\n"
"15:" // Height 1: Multiply loop: Odd block loop
"ldr s17, [x28], #0x4\n"
"ldr q16, [x12, #0x0]\n"
"sub x9, x9, #0x1\n"
- "fmla v24.4s, v16.4s, v17.s[0]\n"
"add x12, x12, #0x10\n"
+ "fmla v24.4s, v16.4s, v17.s[0]\n"
"cbnz x9, 15b\n"
"16:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -201,9 +203,9 @@ void a64_hybrid_fp32_mla_8x4 (
"bne 9b\n"
"prfm pstl1keep, [x11, #0x0]\n"
"tbz %x[flags], #1, 17f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmax v24.4s, v24.4s, v16.4s\n"
@@ -227,40 +229,40 @@ void a64_hybrid_fp32_mla_8x4 (
"bgt 2b\n"
"b 170f\n"
"22:" // Height 2
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"23:" // Height 2: Column loop
"cbz x14, 24f\n"
"ldr q24, [x14, #0x0]\n"
- "mov v25.16b, v24.16b\n"
"add x14, x14, #0x10\n"
+ "mov v25.16b, v24.16b\n"
"b 29f\n"
"24:" // Height 2: no bias
"tbz %x[flags], #0, 28f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x13, #0x4\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
"bge 27f\n"
"tbz x13, #1, 25f\n"
"ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"mov x20, #0x8\n"
"tbz x13, #0, 26f\n"
"ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
+ "ld1 { v25.s }[2], [x28]\n"
"b 26f\n"
"25:" // Height 2: Partial accumulate: partial_1_0
"ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
+ "ldr s25, [x28, #0x0]\n"
"mov x20, #0x0\n"
"26:" // Height 2: Partial accumulate: Done
"sub x11, x11, x20\n"
"b 29f\n"
"27:" // Height 2: full accumulate
"ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
"b 29f\n"
"28:" // Height 2: no accumulate
"movi v24.16b, #0x0\n"
@@ -269,8 +271,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"30:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -300,37 +302,37 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v25.4s, v8.4s, v1.s[0]\n"
"sub x9, x9, #0x4\n"
"add x28, x28, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
"add x27, x27, #0x10\n"
"cmp x9, #0x8\n"
- "fmla v24.4s, v10.4s, v0.s[2]\n"
- "fmla v25.4s, v10.4s, v1.s[2]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"ldr q8, [x12, #0x0]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
"ldr q9, [x12, #0x10]\n"
+ "fmla v24.4s, v10.4s, v0.s[2]\n"
+ "fmla v25.4s, v10.4s, v1.s[2]\n"
"ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
- "fmla v25.4s, v11.4s, v1.s[3]\n"
"ldr q0, [x28, #0x0]\n"
+ "fmla v25.4s, v11.4s, v1.s[3]\n"
"ldr q1, [x27, #0x0]\n"
"ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"bge 33b\n"
"34:" // Height 2: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
"fmla v25.4s, v8.4s, v1.s[0]\n"
"add x28, x28, #0x10\n"
"add x27, x27, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
"sub x9, x9, #0x4\n"
"prfm pldl1keep, [x28, #0x80]\n"
+ "add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"35:" // Height 2: Multiply loop: Main loop skip
@@ -340,9 +342,9 @@ void a64_hybrid_fp32_mla_8x4 (
"ldr s17, [x27], #0x4\n"
"sub x9, x9, #0x1\n"
"ldr q16, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v16.4s, v18.s[0]\n"
"fmla v25.4s, v16.4s, v17.s[0]\n"
- "add x12, x12, #0x10\n"
"cbnz x9, 36b\n"
"37:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -350,13 +352,13 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp x10, x20\n"
"bne 30b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
"prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x27, #0x0]\n"
+ "add x28, x11, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmin v25.4s, v25.4s, v17.4s\n"
@@ -367,65 +369,65 @@ void a64_hybrid_fp32_mla_8x4 (
"bge 41f\n"
"tbz x13, #1, 39f\n"
"str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
+ "str d25, [x28], #0x8\n"
"tbz x13, #0, 40f\n"
"st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
+ "st1 { v25.s }[2], [x28]\n"
"b 40f\n"
"39:" // Height 2: Partial direct writeback: partial_1_0
"str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
"40:" // Height 2: Partial direct writeback: Done
"b 42f\n"
"41:" // Height 2: Full writeback
"str q24, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
"42:" // Height 2: Writeback done
"subs x13, x13, #0x4\n"
"bgt 23b\n"
"b 170f\n"
"43:" // Height 3
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 3: Column loop
"cbz x14, 45f\n"
"ldr q24, [x14, #0x0]\n"
+ "add x14, x14, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"b 50f\n"
"45:" // Height 3: no bias
"tbz %x[flags], #0, 49f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
"cmp x13, #0x4\n"
- "add x26, x27, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"bge 48f\n"
"tbz x13, #1, 46f\n"
"ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
"tbz x13, #0, 47f\n"
"ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
"b 47f\n"
"46:" // Height 3: Partial accumulate: partial_1_0
"ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
+ "ldr s25, [x28, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
"47:" // Height 3: Partial accumulate: Done
"sub x11, x11, x20\n"
"b 50f\n"
"48:" // Height 3: full accumulate
"ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
"b 50f\n"
"49:" // Height 3: no accumulate
"movi v24.16b, #0x0\n"
@@ -435,8 +437,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"51:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 52f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -471,14 +473,17 @@ void a64_hybrid_fp32_mla_8x4 (
"sub x9, x9, #0x4\n"
"add x28, x28, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
- "fmla v26.4s, v9.4s, v2.s[1]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"cmp x9, #0x8\n"
"add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"ldr q8, [x12, #0x0]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
+ "fmla v26.4s, v9.4s, v2.s[1]\n"
"ldr q9, [x12, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
@@ -486,14 +491,11 @@ void a64_hybrid_fp32_mla_8x4 (
"ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr q0, [x28, #0x0]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"ldr q1, [x27, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
"ldr q2, [x26, #0x0]\n"
"ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 54b\n"
"55:" // Height 3: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -501,17 +503,17 @@ void a64_hybrid_fp32_mla_8x4 (
"add x28, x28, #0x10\n"
"add x27, x27, #0x10\n"
"fmla v26.4s, v8.4s, v2.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x26, x26, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"sub x9, x9, #0x4\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
@@ -524,10 +526,10 @@ void a64_hybrid_fp32_mla_8x4 (
"sub x9, x9, #0x1\n"
"ldr s17, [x26], #0x4\n"
"ldr q16, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v16.4s, v19.s[0]\n"
"fmla v25.4s, v16.4s, v18.s[0]\n"
"fmla v26.4s, v16.4s, v17.s[0]\n"
- "add x12, x12, #0x10\n"
"cbnz x9, 57b\n"
"58:" // Height 3: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -535,15 +537,15 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp x10, x20\n"
"bne 51b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x11, #0x0]\n"
+ "add x28, x11, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmin v25.4s, v25.4s, v17.4s\n"
@@ -556,75 +558,75 @@ void a64_hybrid_fp32_mla_8x4 (
"bge 62f\n"
"tbz x13, #1, 60f\n"
"str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
"tbz x13, #0, 61f\n"
"st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
"b 61f\n"
"60:" // Height 3: Partial direct writeback: partial_1_0
"str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
"61:" // Height 3: Partial direct writeback: Done
"b 63f\n"
"62:" // Height 3: Full writeback
"str q24, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
"63:" // Height 3: Writeback done
"subs x13, x13, #0x4\n"
"bgt 44b\n"
"b 170f\n"
"64:" // Height 4
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"65:" // Height 4: Column loop
"cbz x14, 66f\n"
"ldr q24, [x14, #0x0]\n"
+ "add x14, x14, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"b 71f\n"
"66:" // Height 4: no bias
"tbz %x[flags], #0, 70f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
"cmp x13, #0x4\n"
- "add x25, x26, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
+ "add x26, x27, x20, LSL #2\n"
"bge 69f\n"
"tbz x13, #1, 67f\n"
"ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
"tbz x13, #0, 68f\n"
"ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
"b 68f\n"
"67:" // Height 4: Partial accumulate: partial_1_0
"ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
+ "ldr s25, [x28, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
"68:" // Height 4: Partial accumulate: Done
"sub x11, x11, x20\n"
"b 71f\n"
"69:" // Height 4: full accumulate
"ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
"b 71f\n"
"70:" // Height 4: no accumulate
"movi v24.16b, #0x0\n"
@@ -635,8 +637,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"72:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -678,23 +680,24 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v27.4s, v8.4s, v3.s[0]\n"
"add x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
"add x25, x25, #0x10\n"
"cmp x9, #0x8\n"
- "fmla v26.4s, v9.4s, v2.s[1]\n"
- "fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"ldr q8, [x12, #0x0]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
+ "fmla v26.4s, v9.4s, v2.s[1]\n"
+ "fmla v27.4s, v9.4s, v3.s[1]\n"
"ldr q9, [x12, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"ldr q10, [x12, #0x20]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr q0, [x28, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
@@ -704,7 +707,6 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v27.4s, v11.4s, v3.s[3]\n"
"ldr q3, [x25, #0x0]\n"
"ldr q11, [x12, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"bge 75b\n"
"76:" // Height 4: Multiply loop: Single iteration only
"fmla v24.4s, v8.4s, v0.s[0]\n"
@@ -715,18 +717,18 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v27.4s, v8.4s, v3.s[0]\n"
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "sub x9, x9, #0x4\n"
+ "add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "sub x9, x9, #0x4\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
@@ -742,9 +744,9 @@ void a64_hybrid_fp32_mla_8x4 (
"ldr s18, [x26], #0x4\n"
"ldr s17, [x25], #0x4\n"
"ldr q16, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v16.4s, v20.s[0]\n"
"fmla v25.4s, v16.4s, v19.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v26.4s, v16.4s, v18.s[0]\n"
"fmla v27.4s, v16.4s, v17.s[0]\n"
"cbnz x9, 78b\n"
@@ -754,17 +756,17 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp x10, x20\n"
"bne 72b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"tbz %x[flags], #1, 80f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmin v25.4s, v25.4s, v17.4s\n"
@@ -779,85 +781,85 @@ void a64_hybrid_fp32_mla_8x4 (
"bge 83f\n"
"tbz x13, #1, 81f\n"
"str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
"tbz x13, #0, 82f\n"
"st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
"b 82f\n"
"81:" // Height 4: Partial direct writeback: partial_1_0
"str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
"82:" // Height 4: Partial direct writeback: Done
"b 84f\n"
"83:" // Height 4: Full writeback
"str q24, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
"84:" // Height 4: Writeback done
"subs x13, x13, #0x4\n"
"bgt 65b\n"
"b 170f\n"
"85:" // Height 5
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"86:" // Height 5: Column loop
"cbz x14, 87f\n"
"ldr q24, [x14, #0x0]\n"
+ "add x14, x14, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"b 92f\n"
"87:" // Height 5: no bias
"tbz %x[flags], #0, 91f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "cmp x13, #0x4\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
- "cmp x13, #0x4\n"
- "add x24, x25, x20, LSL #2\n"
"bge 90f\n"
"tbz x13, #1, 88f\n"
"ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
"tbz x13, #0, 89f\n"
"ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
"b 89f\n"
"88:" // Height 5: Partial accumulate: partial_1_0
"ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
+ "ldr s25, [x28, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
"89:" // Height 5: Partial accumulate: Done
"sub x11, x11, x20\n"
"b 92f\n"
"90:" // Height 5: full accumulate
"ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
"b 92f\n"
"91:" // Height 5: no accumulate
"movi v24.16b, #0x0\n"
@@ -869,8 +871,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"93:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 94f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -917,30 +919,30 @@ void a64_hybrid_fp32_mla_8x4 (
"add x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
- "fmla v26.4s, v9.4s, v2.s[1]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"cmp x9, #0x8\n"
"add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"ldr q8, [x12, #0x0]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "fmla v26.4s, v9.4s, v2.s[1]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"ldr q9, [x12, #0x10]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
"ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr q0, [x28, #0x0]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
"ldr q1, [x27, #0x0]\n"
"fmla v26.4s, v11.4s, v2.s[3]\n"
@@ -961,21 +963,21 @@ void a64_hybrid_fp32_mla_8x4 (
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
"fmla v28.4s, v8.4s, v4.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"sub x9, x9, #0x4\n"
+ "add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -994,11 +996,11 @@ void a64_hybrid_fp32_mla_8x4 (
"ldr s18, [x25], #0x4\n"
"ldr s17, [x24], #0x4\n"
"ldr q16, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v16.4s, v21.s[0]\n"
"fmla v25.4s, v16.4s, v20.s[0]\n"
"fmla v26.4s, v16.4s, v19.s[0]\n"
"fmla v27.4s, v16.4s, v18.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v28.4s, v16.4s, v17.s[0]\n"
"cbnz x9, 99b\n"
"100:" // Height 5: Multiply loop: No odd multiplies
@@ -1007,19 +1009,19 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp x10, x20\n"
"bne 93b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"tbz %x[flags], #1, 101f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmin v25.4s, v25.4s, v17.4s\n"
@@ -1036,47 +1038,47 @@ void a64_hybrid_fp32_mla_8x4 (
"bge 104f\n"
"tbz x13, #1, 102f\n"
"str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
"tbz x13, #0, 103f\n"
"st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
"b 103f\n"
"102:" // Height 5: Partial direct writeback: partial_1_0
"str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
"103:" // Height 5: Partial direct writeback: Done
"b 105f\n"
"104:" // Height 5: Full writeback
"str q24, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
"105:" // Height 5: Writeback done
"subs x13, x13, #0x4\n"
"bgt 86b\n"
"b 170f\n"
"106:" // Height 6
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"107:" // Height 6: Column loop
"cbz x14, 108f\n"
"ldr q24, [x14, #0x0]\n"
+ "add x14, x14, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
@@ -1084,47 +1086,47 @@ void a64_hybrid_fp32_mla_8x4 (
"108:" // Height 6: no bias
"tbz %x[flags], #0, 112f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "cmp x13, #0x4\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "cmp x13, #0x4\n"
- "add x23, x24, x20, LSL #2\n"
"bge 111f\n"
"tbz x13, #1, 109f\n"
"ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
- "ldr d29, [x23], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
"tbz x13, #0, 110f\n"
"ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
- "ld1 { v29.s }[2], [x23]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
+ "ld1 { v29.s }[2], [x24]\n"
"b 110f\n"
"109:" // Height 6: Partial accumulate: partial_1_0
"ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
+ "ldr s25, [x28, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
- "ldr s29, [x23, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
+ "ldr s29, [x24, #0x0]\n"
"110:" // Height 6: Partial accumulate: Done
"sub x11, x11, x20\n"
"b 113f\n"
"111:" // Height 6: full accumulate
"ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
- "ldr q29, [x23, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q29, [x24, #0x0]\n"
"b 113f\n"
"112:" // Height 6: no accumulate
"movi v24.16b, #0x0\n"
@@ -1137,8 +1139,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"114:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 115f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1192,30 +1194,30 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v29.4s, v8.4s, v5.s[0]\n"
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
"add x23, x23, #0x10\n"
"cmp x9, #0x8\n"
- "fmla v26.4s, v9.4s, v2.s[1]\n"
- "fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
"add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"ldr q8, [x12, #0x0]\n"
+ "fmla v26.4s, v9.4s, v2.s[1]\n"
+ "fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
"ldr q9, [x12, #0x10]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
"ldr q10, [x12, #0x20]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
"ldr q0, [x28, #0x0]\n"
"fmla v25.4s, v11.4s, v1.s[3]\n"
@@ -1243,22 +1245,22 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v29.4s, v8.4s, v5.s[0]\n"
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
+ "sub x9, x9, #0x4\n"
+ "add x12, x12, #0x40\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "sub x9, x9, #0x4\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -1280,9 +1282,9 @@ void a64_hybrid_fp32_mla_8x4 (
"ldr s18, [x24], #0x4\n"
"ldr s17, [x23], #0x4\n"
"ldr q16, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v16.4s, v22.s[0]\n"
"fmla v25.4s, v16.4s, v21.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v26.4s, v16.4s, v20.s[0]\n"
"fmla v27.4s, v16.4s, v19.s[0]\n"
"fmla v28.4s, v16.4s, v18.s[0]\n"
@@ -1294,21 +1296,21 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp x10, x20\n"
"bne 114b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"tbz %x[flags], #1, 122f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmin v25.4s, v25.4s, v17.4s\n"
@@ -1327,51 +1329,51 @@ void a64_hybrid_fp32_mla_8x4 (
"bge 125f\n"
"tbz x13, #1, 123f\n"
"str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
- "str d29, [x23], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
+ "str d29, [x24], #0x8\n"
"tbz x13, #0, 124f\n"
"st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x23]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
+ "st1 { v29.s }[2], [x24]\n"
"b 124f\n"
"123:" // Height 6: Partial direct writeback: partial_1_0
"str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
- "str s29, [x23, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
+ "str s29, [x24, #0x0]\n"
"124:" // Height 6: Partial direct writeback: Done
"b 126f\n"
"125:" // Height 6: Full writeback
"str q24, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
- "str q29, [x23, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
+ "str q29, [x24, #0x0]\n"
"126:" // Height 6: Writeback done
"subs x13, x13, #0x4\n"
"bgt 107b\n"
"b 170f\n"
"127:" // Height 7
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"128:" // Height 7: Column loop
"cbz x14, 129f\n"
"ldr q24, [x14, #0x0]\n"
+ "add x14, x14, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
@@ -1380,52 +1382,52 @@ void a64_hybrid_fp32_mla_8x4 (
"129:" // Height 7: no bias
"tbz %x[flags], #0, 133f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "cmp x13, #0x4\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x13, #0x4\n"
- "add x22, x23, x20, LSL #2\n"
"bge 132f\n"
"tbz x13, #1, 130f\n"
"ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d30, [x22], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d30, [x23], #0x8\n"
"tbz x13, #0, 131f\n"
"ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
- "ld1 { v29.s }[2], [x23]\n"
- "ld1 { v30.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
+ "ld1 { v29.s }[2], [x24]\n"
+ "ld1 { v30.s }[2], [x23]\n"
"b 131f\n"
"130:" // Height 7: Partial accumulate: partial_1_0
"ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
+ "ldr s25, [x28, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
- "ldr s29, [x23, #0x0]\n"
- "ldr s30, [x22, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
+ "ldr s29, [x24, #0x0]\n"
+ "ldr s30, [x23, #0x0]\n"
"131:" // Height 7: Partial accumulate: Done
"sub x11, x11, x20\n"
"b 134f\n"
"132:" // Height 7: full accumulate
"ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
- "ldr q29, [x23, #0x0]\n"
- "ldr q30, [x22, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q29, [x24, #0x0]\n"
+ "ldr q30, [x23, #0x0]\n"
"b 134f\n"
"133:" // Height 7: no accumulate
"movi v24.16b, #0x0\n"
@@ -1439,8 +1441,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"135:" // Height 7: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 136f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1499,32 +1501,32 @@ void a64_hybrid_fp32_mla_8x4 (
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "fmla v26.4s, v9.4s, v2.s[1]\n"
"cmp x9, #0x8\n"
"add x12, x12, #0x40\n"
"ldr q8, [x12, #0x0]\n"
+ "fmla v26.4s, v9.4s, v2.s[1]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
"ldr q9, [x12, #0x10]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
"fmla v29.4s, v10.4s, v5.s[2]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v30.4s, v10.4s, v6.s[2]\n"
"ldr q10, [x12, #0x20]\n"
"fmla v24.4s, v11.4s, v0.s[3]\n"
@@ -1557,25 +1559,25 @@ void a64_hybrid_fp32_mla_8x4 (
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
"fmla v30.4s, v8.4s, v6.s[0]\n"
- "fmla v24.4s, v9.4s, v0.s[1]\n"
"add x22, x22, #0x10\n"
- "sub x9, x9, #0x4\n"
- "fmla v25.4s, v9.4s, v1.s[1]\n"
- "fmla v26.4s, v9.4s, v2.s[1]\n"
"prfm pldl1keep, [x28, #0x80]\n"
"prfm pldl1keep, [x27, #0x80]\n"
+ "fmla v24.4s, v9.4s, v0.s[1]\n"
+ "fmla v25.4s, v9.4s, v1.s[1]\n"
+ "sub x9, x9, #0x4\n"
+ "add x12, x12, #0x40\n"
+ "fmla v26.4s, v9.4s, v2.s[1]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "fmla v28.4s, v9.4s, v4.s[1]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "fmla v30.4s, v9.4s, v6.s[1]\n"
"prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
+ "fmla v30.4s, v9.4s, v6.s[1]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -1600,11 +1602,11 @@ void a64_hybrid_fp32_mla_8x4 (
"ldr s18, [x23], #0x4\n"
"ldr s17, [x22], #0x4\n"
"ldr q16, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v16.4s, v23.s[0]\n"
"fmla v25.4s, v16.4s, v22.s[0]\n"
"fmla v26.4s, v16.4s, v21.s[0]\n"
"fmla v27.4s, v16.4s, v20.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v28.4s, v16.4s, v19.s[0]\n"
"fmla v29.4s, v16.4s, v18.s[0]\n"
"fmla v30.4s, v16.4s, v17.s[0]\n"
@@ -1615,23 +1617,23 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp x10, x20\n"
"bne 135b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
"prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbz %x[flags], #1, 143f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmin v25.4s, v25.4s, v17.4s\n"
@@ -1652,58 +1654,59 @@ void a64_hybrid_fp32_mla_8x4 (
"bge 146f\n"
"tbz x13, #1, 144f\n"
"str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
- "str d29, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
+ "str d29, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
"tbz x13, #0, 145f\n"
"st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
+ "st1 { v29.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
"b 145f\n"
"144:" // Height 7: Partial direct writeback: partial_1_0
"str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
- "str s29, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
+ "str s29, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
"145:" // Height 7: Partial direct writeback: Done
"b 147f\n"
"146:" // Height 7: Full writeback
"str q24, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
- "str q29, [x23, #0x0]\n"
- "str q30, [x22, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
+ "str q29, [x24, #0x0]\n"
+ "str q30, [x23, #0x0]\n"
"147:" // Height 7: Writeback done
"subs x13, x13, #0x4\n"
"bgt 128b\n"
"b 170f\n"
"148:" // Height 8
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x20\n"
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x11\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"149:" // Height 8: Column loop
"cbz x14, 150f\n"
"ldr q24, [x14, #0x0]\n"
+ "add x14, x14, #0x10\n"
"mov v25.16b, v24.16b\n"
"mov v26.16b, v24.16b\n"
- "add x14, x14, #0x10\n"
"mov v27.16b, v24.16b\n"
"mov v28.16b, v24.16b\n"
"mov v29.16b, v24.16b\n"
@@ -1713,57 +1716,57 @@ void a64_hybrid_fp32_mla_8x4 (
"150:" // Height 8: no bias
"tbz %x[flags], #0, 154f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "cmp x13, #0x4\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x13, #0x4\n"
- "add x21, x22, x20, LSL #2\n"
"bge 153f\n"
"tbz x13, #1, 151f\n"
"ldr d24, [x11], #0x8\n"
- "ldr d25, [x27], #0x8\n"
+ "ldr d25, [x28], #0x8\n"
"mov x20, #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d27, [x25], #0x8\n"
- "ldr d28, [x24], #0x8\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d30, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d26, [x27], #0x8\n"
+ "ldr d27, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d30, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x13, #0, 152f\n"
"ld1 { v24.s }[2], [x11]\n"
- "ld1 { v25.s }[2], [x27]\n"
- "ld1 { v26.s }[2], [x26]\n"
- "ld1 { v27.s }[2], [x25]\n"
- "ld1 { v28.s }[2], [x24]\n"
- "ld1 { v29.s }[2], [x23]\n"
- "ld1 { v30.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v25.s }[2], [x28]\n"
+ "ld1 { v26.s }[2], [x27]\n"
+ "ld1 { v27.s }[2], [x26]\n"
+ "ld1 { v28.s }[2], [x25]\n"
+ "ld1 { v29.s }[2], [x24]\n"
+ "ld1 { v30.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 152f\n"
"151:" // Height 8: Partial accumulate: partial_1_0
"ldr s24, [x11, #0x0]\n"
- "ldr s25, [x27, #0x0]\n"
+ "ldr s25, [x28, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s26, [x26, #0x0]\n"
- "ldr s27, [x25, #0x0]\n"
- "ldr s28, [x24, #0x0]\n"
- "ldr s29, [x23, #0x0]\n"
- "ldr s30, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s26, [x27, #0x0]\n"
+ "ldr s27, [x26, #0x0]\n"
+ "ldr s28, [x25, #0x0]\n"
+ "ldr s29, [x24, #0x0]\n"
+ "ldr s30, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"152:" // Height 8: Partial accumulate: Done
"sub x11, x11, x20\n"
"b 155f\n"
"153:" // Height 8: full accumulate
"ldr q24, [x11, #0x0]\n"
- "ldr q25, [x27, #0x0]\n"
- "ldr q26, [x26, #0x0]\n"
- "ldr q27, [x25, #0x0]\n"
- "ldr q28, [x24, #0x0]\n"
- "ldr q29, [x23, #0x0]\n"
- "ldr q30, [x22, #0x0]\n"
- "ldr q31, [x21, #0x0]\n"
+ "ldr q25, [x28, #0x0]\n"
+ "ldr q26, [x27, #0x0]\n"
+ "ldr q27, [x26, #0x0]\n"
+ "ldr q28, [x25, #0x0]\n"
+ "ldr q29, [x24, #0x0]\n"
+ "ldr q30, [x23, #0x0]\n"
+ "ldr q31, [x22, #0x0]\n"
"b 155f\n"
"154:" // Height 8: no accumulate
"movi v24.16b, #0x0\n"
@@ -1778,8 +1781,8 @@ void a64_hybrid_fp32_mla_8x4 (
"mov x10, #0x0\n"
"156:" // Height 8: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 157f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1852,10 +1855,10 @@ void a64_hybrid_fp32_mla_8x4 (
"fmla v26.4s, v9.4s, v2.s[1]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
"add x12, x12, #0x40\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
"ldr q8, [x12, #0x0]\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"prfm pldl1keep, [x27, #0x80]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
"fmla v31.4s, v9.4s, v7.s[1]\n"
@@ -1912,24 +1915,24 @@ void a64_hybrid_fp32_mla_8x4 (
"add x21, x21, #0x10\n"
"fmla v24.4s, v9.4s, v0.s[1]\n"
"fmla v25.4s, v9.4s, v1.s[1]\n"
- "sub x9, x9, #0x4\n"
"prfm pldl1keep, [x28, #0x80]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
"fmla v26.4s, v9.4s, v2.s[1]\n"
"fmla v27.4s, v9.4s, v3.s[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "sub x9, x9, #0x4\n"
+ "add x12, x12, #0x40\n"
"fmla v28.4s, v9.4s, v4.s[1]\n"
"fmla v29.4s, v9.4s, v5.s[1]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"fmla v30.4s, v9.4s, v6.s[1]\n"
"fmla v31.4s, v9.4s, v7.s[1]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"prfm pldl1keep, [x23, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
"fmla v24.4s, v10.4s, v0.s[2]\n"
"fmla v25.4s, v10.4s, v1.s[2]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"prfm pldl1keep, [x21, #0x80]\n"
- "add x12, x12, #0x40\n"
"fmla v26.4s, v10.4s, v2.s[2]\n"
"fmla v27.4s, v10.4s, v3.s[2]\n"
"fmla v28.4s, v10.4s, v4.s[2]\n"
@@ -1957,9 +1960,9 @@ void a64_hybrid_fp32_mla_8x4 (
"ldr s18, [x22], #0x4\n"
"ldr s17, [x21], #0x4\n"
"ldr q16, [x12, #0x0]\n"
+ "add x12, x12, #0x10\n"
"fmla v24.4s, v16.4s, v0.s[0]\n"
"fmla v25.4s, v16.4s, v23.s[0]\n"
- "add x12, x12, #0x10\n"
"fmla v26.4s, v16.4s, v22.s[0]\n"
"fmla v27.4s, v16.4s, v21.s[0]\n"
"fmla v28.4s, v16.4s, v20.s[0]\n"
@@ -1973,25 +1976,25 @@ void a64_hybrid_fp32_mla_8x4 (
"cmp x10, x20\n"
"bne 156b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x11, #0x0]\n"
- "add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "prfm pstl1keep, [x28, #0x0]\n"
+ "add x27, x28, x20, LSL #2\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x26, x27, x20, LSL #2\n"
"prfm pstl1keep, [x26, #0x0]\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x25, #0x0]\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbz %x[flags], #1, 164f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v17.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v17.4s }, [x21]\n"
"ld1r { v16.4s }, [x20]\n"
"fmin v24.4s, v24.4s, v17.4s\n"
"fmin v25.4s, v25.4s, v17.4s\n"
@@ -2014,44 +2017,44 @@ void a64_hybrid_fp32_mla_8x4 (
"bge 167f\n"
"tbz x13, #1, 165f\n"
"str d24, [x11], #0x8\n"
- "str d25, [x27], #0x8\n"
- "str d26, [x26], #0x8\n"
- "str d27, [x25], #0x8\n"
- "str d28, [x24], #0x8\n"
- "str d29, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d31, [x21], #0x8\n"
+ "str d25, [x28], #0x8\n"
+ "str d26, [x27], #0x8\n"
+ "str d27, [x26], #0x8\n"
+ "str d28, [x25], #0x8\n"
+ "str d29, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
+ "str d31, [x22], #0x8\n"
"tbz x13, #0, 166f\n"
"st1 { v24.s }[2], [x11]\n"
- "st1 { v25.s }[2], [x27]\n"
- "st1 { v26.s }[2], [x26]\n"
- "st1 { v27.s }[2], [x25]\n"
- "st1 { v28.s }[2], [x24]\n"
- "st1 { v29.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v31.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x28]\n"
+ "st1 { v26.s }[2], [x27]\n"
+ "st1 { v27.s }[2], [x26]\n"
+ "st1 { v28.s }[2], [x25]\n"
+ "st1 { v29.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
+ "st1 { v31.s }[2], [x22]\n"
"b 166f\n"
"165:" // Height 8: Partial direct writeback: partial_1_0
"str s24, [x11, #0x0]\n"
- "str s25, [x27, #0x0]\n"
- "str s26, [x26, #0x0]\n"
- "str s27, [x25, #0x0]\n"
- "str s28, [x24, #0x0]\n"
- "str s29, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s31, [x21, #0x0]\n"
+ "str s25, [x28, #0x0]\n"
+ "str s26, [x27, #0x0]\n"
+ "str s27, [x26, #0x0]\n"
+ "str s28, [x25, #0x0]\n"
+ "str s29, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
+ "str s31, [x22, #0x0]\n"
"166:" // Height 8: Partial direct writeback: Done
"b 168f\n"
"167:" // Height 8: Full writeback
"str q24, [x11, #0x0]\n"
"add x11, x11, #0x10\n"
- "str q25, [x27, #0x0]\n"
- "str q26, [x26, #0x0]\n"
- "str q27, [x25, #0x0]\n"
- "str q28, [x24, #0x0]\n"
- "str q29, [x23, #0x0]\n"
- "str q30, [x22, #0x0]\n"
- "str q31, [x21, #0x0]\n"
+ "str q25, [x28, #0x0]\n"
+ "str q26, [x27, #0x0]\n"
+ "str q27, [x26, #0x0]\n"
+ "str q28, [x25, #0x0]\n"
+ "str q29, [x24, #0x0]\n"
+ "str q30, [x23, #0x0]\n"
+ "str q31, [x22, #0x0]\n"
"168:" // Height 8: Writeback done
"subs x13, x13, #0x4\n"
"bgt 149b\n"
@@ -2067,8 +2070,8 @@ void a64_hybrid_fp32_mla_8x4 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"170:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24.hpp
index f31dd7afd0..453ef4888f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 4, 24, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 24, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp
index 0e468b196a..39a34898d7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_4x24/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -99,27 +101,27 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"cmp %x[M], #0x2\n"
"bgt 87f\n"
"beq 44f\n"
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x10, 3f\n"
"ldr q8, [x10, #0x0]\n"
"ldr q9, [x10, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x10, #0x20]\n"
"ldr q11, [x10, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x10, #0x40]\n"
"ldr q13, [x10, #0x50]\n"
+ "add x10, x10, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -253,8 +255,8 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"20:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 21f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -277,6 +279,9 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"blt 24f\n"
"23:" // Height 1: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "cmp x25, #0x8\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q24, [x28, #0x40]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
@@ -293,45 +298,42 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"ldr q22, [x28, #0xa0]\n"
".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
- ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
"add x28, x28, #0xc0\n"
+ ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
"ldr q4, [x28, #0x0]\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
"ldr q5, [x28, #0x10]\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
"ldr q6, [x28, #0x20]\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "ldr q7, [x28, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"ld1 { v0.4s }, [x24], #0x10\n"
+ "ldr q7, [x28, #0x30]\n"
"bge 23b\n"
"24:" // Height 1: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q23, [x28, #0x40]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "ldr q25, [x28, #0x50]\n"
+ "ldr q22, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q21, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q24, [x28, #0x70]\n"
".inst 0x6e57ec0a // bfmmla v10.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x80]\n"
- ".inst 0x6e59ec10 // bfmmla v16.4s, v0.8h, v25.8h\n"
+ ".inst 0x6e56ec10 // bfmmla v16.4s, v0.8h, v22.8h\n"
"ldr q22, [x28, #0x90]\n"
".inst 0x6e55ec0b // bfmmla v11.4s, v0.8h, v21.8h\n"
"ldr q21, [x28, #0xa0]\n"
".inst 0x6e58ec11 // bfmmla v17.4s, v0.8h, v24.8h\n"
- "ldr q5, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
+ "ldr q3, [x28, #0xb0]\n"
+ "add x28, x28, #0xc0\n"
".inst 0x6e57ec0c // bfmmla v12.4s, v0.8h, v23.8h\n"
".inst 0x6e56ec12 // bfmmla v18.4s, v0.8h, v22.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "add x28, x28, #0xc0\n"
".inst 0x6e55ec0d // bfmmla v13.4s, v0.8h, v21.8h\n"
- ".inst 0x6e45ec13 // bfmmla v19.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e43ec13 // bfmmla v19.4s, v0.8h, v3.8h\n"
"25:" // Height 1: Multiply loop: Main loop skip
"cbz x25, 28f\n"
"cbz x25, 28f\n"
@@ -343,32 +345,32 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"26:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr s0, [x24, #0x0]\n"
"27:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q21, [x28, #0x0]\n"
- "ldr q1, [x28, #0x10]\n"
+ "ldr q23, [x28, #0x0]\n"
+ "ldr q29, [x28, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x6e55ec08 // bfmmla v8.4s, v0.8h, v21.8h\n"
- "ldr q21, [x28, #0x20]\n"
- "ldr q22, [x28, #0x30]\n"
- ".inst 0x6e41ec0e // bfmmla v14.4s, v0.8h, v1.8h\n"
- ".inst 0x6e55ec09 // bfmmla v9.4s, v0.8h, v21.8h\n"
- "ldr q21, [x28, #0x40]\n"
+ "ldr q22, [x28, #0x20]\n"
+ "ldr q21, [x28, #0x30]\n"
+ ".inst 0x6e57ec08 // bfmmla v8.4s, v0.8h, v23.8h\n"
+ "ldr q24, [x28, #0x40]\n"
+ ".inst 0x6e5dec0e // bfmmla v14.4s, v0.8h, v29.8h\n"
"ldr q23, [x28, #0x50]\n"
- ".inst 0x6e56ec0f // bfmmla v15.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec0a // bfmmla v10.4s, v0.8h, v21.8h\n"
- "ldr q21, [x28, #0x60]\n"
- "ldr q22, [x28, #0x70]\n"
+ ".inst 0x6e56ec09 // bfmmla v9.4s, v0.8h, v22.8h\n"
+ "ldr q22, [x28, #0x60]\n"
+ ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n"
+ "ldr q21, [x28, #0x70]\n"
+ ".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n"
+ "ldr q24, [x28, #0x80]\n"
".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
- ".inst 0x6e55ec0b // bfmmla v11.4s, v0.8h, v21.8h\n"
- "ldr q21, [x28, #0x80]\n"
"ldr q23, [x28, #0x90]\n"
- ".inst 0x6e56ec11 // bfmmla v17.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec0c // bfmmla v12.4s, v0.8h, v21.8h\n"
+ ".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
"ldr q22, [x28, #0xa0]\n"
+ ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x28, #0xb0]\n"
+ "add x28, x28, #0xc0\n"
+ ".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "add x28, x28, #0xc0\n"
"28:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -382,9 +384,9 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v12.2d, v12.2d, v18.2d\n"
"uzp1 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 29f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v22.4s\n"
"fmin v9.4s, v9.4s, v22.4s\n"
@@ -487,27 +489,27 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"bgt 2b\n"
"b 174f\n"
"44:" // Height 2
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"45:" // Height 2: Column loop
"cbz x10, 46f\n"
"ldr q8, [x10, #0x0]\n"
"ldr q9, [x10, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x10, #0x20]\n"
"ldr q11, [x10, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x10, #0x40]\n"
"ldr q13, [x10, #0x50]\n"
+ "add x10, x10, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -517,117 +519,117 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"tbz %x[flags], #0, 61f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x9, #0x18\n"
- "add x23, x27, x20, LSL #2\n"
+ "add x24, x27, x20, LSL #2\n"
"bge 59f\n"
"tbz x9, #4, 50f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
"tbz x9, #2, 48f\n"
"ld1 { v13.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
"tbz x9, #1, 47f\n"
"ldr d20, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
"tbz x9, #0, 58f\n"
"ld1 { v20.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v19.s }[2], [x24]\n"
"b 58f\n"
"47:" // Height 2: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x9, #0, 58f\n"
"ldr s20, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
"b 58f\n"
"48:" // Height 2: Partial accumulate: partial_2_16
"tbz x9, #1, 49f\n"
"ldr d13, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
"tbz x9, #0, 58f\n"
"ld1 { v13.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v18.s }[2], [x24]\n"
"b 58f\n"
"49:" // Height 2: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x9, #0, 58f\n"
"ldr s13, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
"b 58f\n"
"50:" // Height 2: Partial accumulate: partial_8_0
"tbz x9, #3, 54f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
"tbz x9, #2, 52f\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
"tbz x9, #1, 51f\n"
"ldr d12, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
"tbz x9, #0, 58f\n"
"ld1 { v12.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v17.s }[2], [x24]\n"
"b 58f\n"
"51:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x9, #0, 58f\n"
"ldr s12, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
"b 58f\n"
"52:" // Height 2: Partial accumulate: partial_2_8
"tbz x9, #1, 53f\n"
"ldr d11, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
"tbz x9, #0, 58f\n"
"ld1 { v11.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v16.s }[2], [x24]\n"
"b 58f\n"
"53:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x9, #0, 58f\n"
"ldr s11, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
"b 58f\n"
"54:" // Height 2: Partial accumulate: partial_4_0
"tbz x9, #2, 56f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
"tbz x9, #1, 55f\n"
"ldr d10, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
"tbz x9, #0, 58f\n"
"ld1 { v10.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x24]\n"
"b 58f\n"
"55:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x9, #0, 58f\n"
"ldr s10, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
"b 58f\n"
"56:" // Height 2: Partial accumulate: partial_2_0
"tbz x9, #1, 57f\n"
"ldr d9, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
"tbz x9, #0, 58f\n"
"ld1 { v9.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x24]\n"
"b 58f\n"
"57:" // Height 2: Partial accumulate: partial_1_0
"ldr s9, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
"58:" // Height 2: Partial accumulate: Done
"sub x27, x27, x20\n"
@@ -639,12 +641,12 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"ldr q12, [x27, #0x30]\n"
"ldr q13, [x27, #0x40]\n"
"ldr q20, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
"60:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -676,8 +678,8 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"63:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 64f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -704,41 +706,44 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"blt 67f\n"
"66:" // Height 2: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "cmp x25, #0x8\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x23], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "ldr q3, [x28, #0x40]\n"
+ "ldr q29, [x28, #0x40]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
"ldr q23, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
"ldr q22, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
"ldr q21, [x28, #0x70]\n"
- ".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n"
- "ldr q1, [x28, #0x80]\n"
+ ".inst 0x6e5dec0a // bfmmla v10.4s, v0.8h, v29.8h\n"
+ "ldr q30, [x28, #0x80]\n"
".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x90]\n"
".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
"ldr q22, [x28, #0xa0]\n"
".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x8\n"
"add x28, x28, #0xc0\n"
- ".inst 0x6e41ec0c // bfmmla v12.4s, v0.8h, v1.8h\n"
+ ".inst 0x6e5eec0c // bfmmla v12.4s, v0.8h, v30.8h\n"
"ldr q4, [x28, #0x0]\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
"ldr q5, [x28, #0x10]\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
"ldr q6, [x28, #0x20]\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "ldr q7, [x28, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
"ld1 { v0.4s }, [x24], #0x10\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
+ "ldr q7, [x28, #0x30]\n"
"bge 66b\n"
"67:" // Height 2: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q24, [x28, #0x40]\n"
@@ -756,14 +761,11 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"ldr q22, [x28, #0xa0]\n"
".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x28, #0xb0]\n"
- "sub x25, x25, #0x4\n"
+ "add x28, x28, #0xc0\n"
".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "add x28, x28, #0xc0\n"
"68:" // Height 2: Multiply loop: Main loop skip
"cbz x25, 71f\n"
"cbz x25, 71f\n"
@@ -781,55 +783,55 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"ldr q24, [x28, #0x0]\n"
"ldr q23, [x28, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
"ldr q22, [x28, #0x20]\n"
"ldr q21, [x28, #0x30]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e58ec08 // bfmmla v8.4s, v0.8h, v24.8h\n"
- ".inst 0x6e57ec0e // bfmmla v14.4s, v0.8h, v23.8h\n"
"ldr q24, [x28, #0x40]\n"
+ ".inst 0x6e57ec0e // bfmmla v14.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x50]\n"
".inst 0x6e56ec09 // bfmmla v9.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n"
"ldr q22, [x28, #0x60]\n"
+ ".inst 0x6e55ec0f // bfmmla v15.4s, v0.8h, v21.8h\n"
"ldr q21, [x28, #0x70]\n"
".inst 0x6e58ec0a // bfmmla v10.4s, v0.8h, v24.8h\n"
- ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q24, [x28, #0x80]\n"
+ ".inst 0x6e57ec10 // bfmmla v16.4s, v0.8h, v23.8h\n"
"ldr q23, [x28, #0x90]\n"
".inst 0x6e56ec0b // bfmmla v11.4s, v0.8h, v22.8h\n"
- ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q22, [x28, #0xa0]\n"
+ ".inst 0x6e55ec11 // bfmmla v17.4s, v0.8h, v21.8h\n"
"ldr q21, [x28, #0xb0]\n"
+ "add x28, x28, #0xc0\n"
".inst 0x6e58ec0c // bfmmla v12.4s, v0.8h, v24.8h\n"
".inst 0x6e57ec12 // bfmmla v18.4s, v0.8h, v23.8h\n"
".inst 0x6e56ec0d // bfmmla v13.4s, v0.8h, v22.8h\n"
".inst 0x6e55ec13 // bfmmla v19.4s, v0.8h, v21.8h\n"
- "add x28, x28, #0xc0\n"
"71:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
"cmp x26, x20\n"
"bne 63b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
"uzp1 v14.2d, v9.2d, v15.2d\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
+ "add x24, x27, x20, LSL #2\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v17.2d, v12.2d, v18.2d\n"
"uzp2 v12.2d, v12.2d, v18.2d\n"
"uzp1 v18.2d, v13.2d, v19.2d\n"
"uzp2 v13.2d, v13.2d, v19.2d\n"
"tbz %x[flags], #1, 72f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
"fmin v4.4s, v4.4s, v22.4s\n"
"fmin v14.4s, v14.4s, v22.4s\n"
@@ -863,99 +865,99 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"st1 { v14.4s }, [x27], #0x10\n"
"st1 { v15.4s }, [x27], #0x10\n"
"st1 { v16.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v11.4s }, [x24], #0x10\n"
"tbz x9, #2, 74f\n"
"st1 { v17.4s }, [x27], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
"tbz x9, #1, 73f\n"
"str d18, [x27], #0x8\n"
- "str d13, [x23], #0x8\n"
+ "str d13, [x24], #0x8\n"
"tbz x9, #0, 84f\n"
"st1 { v18.s }[2], [x27]\n"
- "st1 { v13.s }[2], [x23]\n"
+ "st1 { v13.s }[2], [x24]\n"
"b 84f\n"
"73:" // Height 2: Partial direct writeback: partial_1_20
"tbz x9, #0, 84f\n"
"str s18, [x27, #0x0]\n"
- "str s13, [x23, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
"b 84f\n"
"74:" // Height 2: Partial direct writeback: partial_2_16
"tbz x9, #1, 75f\n"
"str d17, [x27], #0x8\n"
- "str d12, [x23], #0x8\n"
+ "str d12, [x24], #0x8\n"
"tbz x9, #0, 84f\n"
"st1 { v17.s }[2], [x27]\n"
- "st1 { v12.s }[2], [x23]\n"
+ "st1 { v12.s }[2], [x24]\n"
"b 84f\n"
"75:" // Height 2: Partial direct writeback: partial_1_16
"tbz x9, #0, 84f\n"
"str s17, [x27, #0x0]\n"
- "str s12, [x23, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
"b 84f\n"
"76:" // Height 2: Partial direct writeback: partial_8_0
"tbz x9, #3, 80f\n"
"st1 { v4.4s }, [x27], #0x10\n"
"st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
"tbz x9, #2, 78f\n"
"st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
"tbz x9, #1, 77f\n"
"str d16, [x27], #0x8\n"
- "str d11, [x23], #0x8\n"
+ "str d11, [x24], #0x8\n"
"tbz x9, #0, 84f\n"
"st1 { v16.s }[2], [x27]\n"
- "st1 { v11.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x24]\n"
"b 84f\n"
"77:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 84f\n"
"str s16, [x27, #0x0]\n"
- "str s11, [x23, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
"b 84f\n"
"78:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 79f\n"
"str d15, [x27], #0x8\n"
- "str d10, [x23], #0x8\n"
+ "str d10, [x24], #0x8\n"
"tbz x9, #0, 84f\n"
"st1 { v15.s }[2], [x27]\n"
- "st1 { v10.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x24]\n"
"b 84f\n"
"79:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 84f\n"
"str s15, [x27, #0x0]\n"
- "str s10, [x23, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
"b 84f\n"
"80:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 82f\n"
"st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
"tbz x9, #1, 81f\n"
"str d14, [x27], #0x8\n"
- "str d9, [x23], #0x8\n"
+ "str d9, [x24], #0x8\n"
"tbz x9, #0, 84f\n"
"st1 { v14.s }[2], [x27]\n"
- "st1 { v9.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x24]\n"
"b 84f\n"
"81:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 84f\n"
"str s14, [x27, #0x0]\n"
- "str s9, [x23, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
"b 84f\n"
"82:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 83f\n"
"str d4, [x27], #0x8\n"
- "str d8, [x23], #0x8\n"
+ "str d8, [x24], #0x8\n"
"tbz x9, #0, 84f\n"
"st1 { v4.s }[2], [x27]\n"
- "st1 { v8.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x24]\n"
"b 84f\n"
"83:" // Height 2: Partial direct writeback: partial_1_0
"str s4, [x27, #0x0]\n"
- "str s8, [x23, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
"84:" // Height 2: Partial direct writeback: Done
"b 86f\n"
"85:" // Height 2: Full writeback
@@ -966,38 +968,38 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"str q17, [x27, #0x40]\n"
"str q18, [x27, #0x50]\n"
"add x27, x27, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q12, [x24, #0x40]\n"
+ "str q13, [x24, #0x50]\n"
"86:" // Height 2: Writeback done
"subs x9, x9, #0x18\n"
"bgt 45b\n"
"b 174f\n"
"87:" // Height 3
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"88:" // Height 3: Column loop
"cbz x10, 89f\n"
"ldr q8, [x10, #0x0]\n"
"ldr q9, [x10, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x10, #0x20]\n"
"ldr q11, [x10, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x10, #0x40]\n"
"ldr q13, [x10, #0x50]\n"
+ "add x10, x10, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -1018,147 +1020,147 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"89:" // Height 3: no bias
"tbz %x[flags], #0, 104f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
"cmp x9, #0x18\n"
- "add x22, x23, x20, LSL #2\n"
+ "add x24, x27, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"bge 102f\n"
"tbz x9, #4, 93f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
"ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
"tbz x9, #2, 91f\n"
"ld1 { v13.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x9, #1, 90f\n"
"ldr d20, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
+ "ldr d4, [x23], #0x8\n"
"tbz x9, #0, 101f\n"
"ld1 { v20.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v4.s }[2], [x23]\n"
"b 101f\n"
"90:" // Height 3: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x9, #0, 101f\n"
"ldr s20, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s4, [x23, #0x0]\n"
"b 101f\n"
"91:" // Height 3: Partial accumulate: partial_2_16
"tbz x9, #1, 92f\n"
"ldr d13, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x9, #0, 101f\n"
"ld1 { v13.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 101f\n"
"92:" // Height 3: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x9, #0, 101f\n"
"ldr s13, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"b 101f\n"
"93:" // Height 3: Partial accumulate: partial_8_0
"tbz x9, #3, 97f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
"tbz x9, #2, 95f\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
"tbz x9, #1, 94f\n"
"ldr d12, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
"tbz x9, #0, 101f\n"
"ld1 { v12.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
"b 101f\n"
"94:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x9, #0, 101f\n"
"ldr s12, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
"b 101f\n"
"95:" // Height 3: Partial accumulate: partial_2_8
"tbz x9, #1, 96f\n"
"ldr d11, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
"tbz x9, #0, 101f\n"
"ld1 { v11.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
"b 101f\n"
"96:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x9, #0, 101f\n"
"ldr s11, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
"b 101f\n"
"97:" // Height 3: Partial accumulate: partial_4_0
"tbz x9, #2, 99f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
"tbz x9, #1, 98f\n"
"ldr d10, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
"tbz x9, #0, 101f\n"
"ld1 { v10.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
"b 101f\n"
"98:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x9, #0, 101f\n"
"ldr s10, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
"b 101f\n"
"99:" // Height 3: Partial accumulate: partial_2_0
"tbz x9, #1, 100f\n"
"ldr d9, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
"tbz x9, #0, 101f\n"
"ld1 { v9.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
"b 101f\n"
"100:" // Height 3: Partial accumulate: partial_1_0
"ldr s9, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
"101:" // Height 3: Partial accumulate: Done
"sub x27, x27, x20\n"
"b 103f\n"
@@ -1169,18 +1171,18 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"ldr q12, [x27, #0x30]\n"
"ldr q13, [x27, #0x40]\n"
"ldr q20, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q21, [x23, #0x0]\n"
+ "ldr q22, [x23, #0x10]\n"
+ "ldr q23, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q25, [x23, #0x40]\n"
+ "ldr q4, [x23, #0x50]\n"
"103:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -1236,8 +1238,8 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"106:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 107f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1268,33 +1270,33 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"blt 110f\n"
"109:" // Height 3: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "sub x25, x25, #0x4\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "cmp x25, #0x8\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x23], #0x10\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q4, [x28, #0x40]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "cmp x25, #0x8\n"
- ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q3, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec5c // bfmmla v28.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x90]\n"
".inst 0x6e46ec0b // bfmmla v11.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e46ec57 // bfmmla v23.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0xa0]\n"
".inst 0x6e43ec11 // bfmmla v17.4s, v0.8h, v3.8h\n"
@@ -1318,25 +1320,25 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"bge 109b\n"
"110:" // Height 3: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "sub x25, x25, #0x4\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
"ldr q3, [x28, #0x40]\n"
".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- ".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q4, [x28, #0x50]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- ".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q1, [x28, #0x70]\n"
".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e43ec56 // bfmmla v22.4s, v2.8h, v3.8h\n"
"ldr q5, [x28, #0x80]\n"
".inst 0x6e44ec10 // bfmmla v16.4s, v0.8h, v4.8h\n"
@@ -1377,21 +1379,21 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"ldr q5, [x28, #0x0]\n"
"ldr q4, [x28, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- "ldr q3, [x28, #0x20]\n"
- "ldr q1, [x28, #0x30]\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- ".inst 0x6e45ec08 // bfmmla v8.4s, v0.8h, v5.8h\n"
+ "ldr q3, [x28, #0x20]\n"
+ "ldr q6, [x28, #0x30]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ec08 // bfmmla v8.4s, v0.8h, v5.8h\n"
"ldr q5, [x28, #0x40]\n"
".inst 0x6e44ec0e // bfmmla v14.4s, v0.8h, v4.8h\n"
- ".inst 0x6e44ec5a // bfmmla v26.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x50]\n"
".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n"
".inst 0x6e43ec55 // bfmmla v21.4s, v2.8h, v3.8h\n"
"ldr q3, [x28, #0x60]\n"
- ".inst 0x6e41ec0f // bfmmla v15.4s, v0.8h, v1.8h\n"
- ".inst 0x6e41ec5b // bfmmla v27.4s, v2.8h, v1.8h\n"
+ ".inst 0x6e46ec0f // bfmmla v15.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec5b // bfmmla v27.4s, v2.8h, v6.8h\n"
"ldr q1, [x28, #0x70]\n"
".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec56 // bfmmla v22.4s, v2.8h, v5.8h\n"
@@ -1420,19 +1422,19 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"cmp x26, x20\n"
"bne 106b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
- "uzp1 v14.2d, v9.2d, v15.2d\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v14.2d, v9.2d, v15.2d\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
+ "add x24, x27, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v17.2d, v12.2d, v18.2d\n"
"uzp2 v12.2d, v12.2d, v18.2d\n"
"uzp1 v18.2d, v13.2d, v19.2d\n"
@@ -1444,9 +1446,9 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v24.2d, v24.2d, v30.2d\n"
"uzp1 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 115f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v4.4s, v4.4s, v1.4s\n"
"fmin v14.4s, v14.4s, v1.4s\n"
@@ -1492,126 +1494,126 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"st1 { v14.4s }, [x27], #0x10\n"
"st1 { v15.4s }, [x27], #0x10\n"
"st1 { v16.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v11.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
"tbz x9, #2, 117f\n"
"st1 { v17.4s }, [x27], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x9, #1, 116f\n"
"str d18, [x27], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x9, #0, 127f\n"
"st1 { v18.s }[2], [x27]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 127f\n"
"116:" // Height 3: Partial direct writeback: partial_1_20
"tbz x9, #0, 127f\n"
"str s18, [x27, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 127f\n"
"117:" // Height 3: Partial direct writeback: partial_2_16
"tbz x9, #1, 118f\n"
"str d17, [x27], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x9, #0, 127f\n"
"st1 { v17.s }[2], [x27]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 127f\n"
"118:" // Height 3: Partial direct writeback: partial_1_16
"tbz x9, #0, 127f\n"
"str s17, [x27, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"b 127f\n"
"119:" // Height 3: Partial direct writeback: partial_8_0
"tbz x9, #3, 123f\n"
"st1 { v4.4s }, [x27], #0x10\n"
"st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
- "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
"tbz x9, #2, 121f\n"
"st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v22.4s }, [x23], #0x10\n"
"tbz x9, #1, 120f\n"
"str d16, [x27], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
"tbz x9, #0, 127f\n"
"st1 { v16.s }[2], [x27]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
"b 127f\n"
"120:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 127f\n"
"str s16, [x27, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
"b 127f\n"
"121:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 122f\n"
"str d15, [x27], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d22, [x22], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
"tbz x9, #0, 127f\n"
"st1 { v15.s }[2], [x27]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v22.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
"b 127f\n"
"122:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 127f\n"
"str s15, [x27, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s22, [x22, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
"b 127f\n"
"123:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 125f\n"
"st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
"tbz x9, #1, 124f\n"
"str d14, [x27], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d21, [x22], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
"tbz x9, #0, 127f\n"
"st1 { v14.s }[2], [x27]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v21.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
"b 127f\n"
"124:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 127f\n"
"str s14, [x27, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s21, [x22, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
"b 127f\n"
"125:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 126f\n"
"str d4, [x27], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d20, [x22], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
"tbz x9, #0, 127f\n"
"st1 { v4.s }[2], [x27]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v20.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
"b 127f\n"
"126:" // Height 3: Partial direct writeback: partial_1_0
"str s4, [x27, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s20, [x22, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
"127:" // Height 3: Partial direct writeback: Done
"b 129f\n"
"128:" // Height 3: Full writeback
@@ -1622,47 +1624,48 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"str q17, [x27, #0x40]\n"
"str q18, [x27, #0x50]\n"
"add x27, x27, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q20, [x22, #0x0]\n"
- "str q21, [x22, #0x10]\n"
- "str q22, [x22, #0x20]\n"
- "str q23, [x22, #0x30]\n"
- "str q24, [x22, #0x40]\n"
- "str q25, [x22, #0x50]\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q12, [x24, #0x40]\n"
+ "str q13, [x24, #0x50]\n"
+ "str q20, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q22, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q25, [x23, #0x50]\n"
"129:" // Height 3: Writeback done
"subs x9, x9, #0x18\n"
"bgt 88b\n"
"b 174f\n"
"130:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x10\n"
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"131:" // Height 4: Column loop
"cbz x10, 132f\n"
"ldr q8, [x10, #0x0]\n"
"ldr q9, [x10, #0x10]\n"
- "zip2 v14.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x10, #0x20]\n"
"ldr q11, [x10, #0x30]\n"
- "zip2 v15.2d, v9.2d, v9.2d\n"
- "zip1 v9.2d, v9.2d, v9.2d\n"
"ldr q12, [x10, #0x40]\n"
"ldr q13, [x10, #0x50]\n"
+ "add x10, x10, #0x60\n"
+ "zip2 v14.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
+ "zip2 v15.2d, v9.2d, v9.2d\n"
+ "zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v16.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
"zip2 v17.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
- "add x10, x10, #0x60\n"
"zip2 v18.2d, v12.2d, v12.2d\n"
"zip1 v12.2d, v12.2d, v12.2d\n"
"zip2 v19.2d, v13.2d, v13.2d\n"
@@ -1683,175 +1686,175 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"132:" // Height 4: no bias
"tbz %x[flags], #0, 147f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x9, #0x18\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x27, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"bge 145f\n"
"tbz x9, #4, 136f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v12.4s }, [x27], #0x10\n"
- "ld1 { v17.4s }, [x23], #0x10\n"
- "ld1 { v24.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v24.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x9, #2, 134f\n"
"ld1 { v13.4s }, [x27], #0x10\n"
- "ld1 { v18.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x9, #1, 133f\n"
"ldr d20, [x27], #0x8\n"
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x24], #0x8\n"
"mov x20, #0x58\n"
- "ldr d4, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d4, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x9, #0, 144f\n"
"ld1 { v20.s }[2], [x27]\n"
- "ld1 { v19.s }[2], [x23]\n"
- "ld1 { v4.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v4.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 144f\n"
"133:" // Height 4: Partial accumulate: partial_1_20
"mov x20, #0x50\n"
"tbz x9, #0, 144f\n"
"ldr s20, [x27, #0x0]\n"
- "ldr s19, [x23, #0x0]\n"
- "ldr s4, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s19, [x24, #0x0]\n"
+ "ldr s4, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 144f\n"
"134:" // Height 4: Partial accumulate: partial_2_16
"tbz x9, #1, 135f\n"
"ldr d13, [x27], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
"mov x20, #0x48\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x9, #0, 144f\n"
"ld1 { v13.s }[2], [x27]\n"
- "ld1 { v18.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 144f\n"
"135:" // Height 4: Partial accumulate: partial_1_16
"mov x20, #0x40\n"
"tbz x9, #0, 144f\n"
"ldr s13, [x27, #0x0]\n"
- "ldr s18, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s18, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 144f\n"
"136:" // Height 4: Partial accumulate: partial_8_0
"tbz x9, #3, 140f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x27], #0x10\n"
- "ld1 { v15.4s }, [x23], #0x10\n"
- "ld1 { v22.4s }, [x22], #0x10\n"
- "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v15.4s }, [x24], #0x10\n"
+ "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v27.4s }, [x22], #0x10\n"
"tbz x9, #2, 138f\n"
"ld1 { v11.4s }, [x27], #0x10\n"
- "ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v23.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v16.4s }, [x24], #0x10\n"
+ "ld1 { v23.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x9, #1, 137f\n"
"ldr d12, [x27], #0x8\n"
- "ldr d17, [x23], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x9, #0, 144f\n"
"ld1 { v12.s }[2], [x27]\n"
- "ld1 { v17.s }[2], [x23]\n"
- "ld1 { v24.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 144f\n"
"137:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x9, #0, 144f\n"
"ldr s12, [x27, #0x0]\n"
- "ldr s17, [x23, #0x0]\n"
- "ldr s24, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s17, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 144f\n"
"138:" // Height 4: Partial accumulate: partial_2_8
"tbz x9, #1, 139f\n"
"ldr d11, [x27], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"mov x20, #0x28\n"
- "ldr d23, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x9, #0, 144f\n"
"ld1 { v11.s }[2], [x27]\n"
- "ld1 { v16.s }[2], [x23]\n"
- "ld1 { v23.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v16.s }[2], [x24]\n"
+ "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 144f\n"
"139:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x9, #0, 144f\n"
"ldr s11, [x27, #0x0]\n"
- "ldr s16, [x23, #0x0]\n"
- "ldr s23, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s16, [x24, #0x0]\n"
+ "ldr s23, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"b 144f\n"
"140:" // Height 4: Partial accumulate: partial_4_0
"tbz x9, #2, 142f\n"
"ld1 { v9.4s }, [x27], #0x10\n"
- "ld1 { v14.4s }, [x23], #0x10\n"
- "ld1 { v21.4s }, [x22], #0x10\n"
- "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v26.4s }, [x22], #0x10\n"
"tbz x9, #1, 141f\n"
"ldr d10, [x27], #0x8\n"
- "ldr d15, [x23], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
"mov x20, #0x18\n"
- "ldr d22, [x22], #0x8\n"
- "ldr d27, [x21], #0x8\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d27, [x22], #0x8\n"
"tbz x9, #0, 144f\n"
"ld1 { v10.s }[2], [x27]\n"
- "ld1 { v15.s }[2], [x23]\n"
- "ld1 { v22.s }[2], [x22]\n"
- "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v27.s }[2], [x22]\n"
"b 144f\n"
"141:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x9, #0, 144f\n"
"ldr s10, [x27, #0x0]\n"
- "ldr s15, [x23, #0x0]\n"
- "ldr s22, [x22, #0x0]\n"
- "ldr s27, [x21, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s22, [x23, #0x0]\n"
+ "ldr s27, [x22, #0x0]\n"
"b 144f\n"
"142:" // Height 4: Partial accumulate: partial_2_0
"tbz x9, #1, 143f\n"
"ldr d9, [x27], #0x8\n"
- "ldr d14, [x23], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
"mov x20, #0x8\n"
- "ldr d21, [x22], #0x8\n"
- "ldr d26, [x21], #0x8\n"
+ "ldr d21, [x23], #0x8\n"
+ "ldr d26, [x22], #0x8\n"
"tbz x9, #0, 144f\n"
"ld1 { v9.s }[2], [x27]\n"
- "ld1 { v14.s }[2], [x23]\n"
- "ld1 { v21.s }[2], [x22]\n"
- "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v26.s }[2], [x22]\n"
"b 144f\n"
"143:" // Height 4: Partial accumulate: partial_1_0
"ldr s9, [x27, #0x0]\n"
- "ldr s14, [x23, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s21, [x22, #0x0]\n"
- "ldr s26, [x21, #0x0]\n"
+ "ldr s21, [x23, #0x0]\n"
+ "ldr s26, [x22, #0x0]\n"
"144:" // Height 4: Partial accumulate: Done
"sub x27, x27, x20\n"
"b 146f\n"
@@ -1862,24 +1865,24 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"ldr q12, [x27, #0x30]\n"
"ldr q13, [x27, #0x40]\n"
"ldr q20, [x27, #0x50]\n"
- "ldr q14, [x23, #0x0]\n"
- "ldr q15, [x23, #0x10]\n"
- "ldr q16, [x23, #0x20]\n"
- "ldr q17, [x23, #0x30]\n"
- "ldr q18, [x23, #0x40]\n"
- "ldr q19, [x23, #0x50]\n"
- "ldr q21, [x22, #0x0]\n"
- "ldr q22, [x22, #0x10]\n"
- "ldr q23, [x22, #0x20]\n"
- "ldr q24, [x22, #0x30]\n"
- "ldr q25, [x22, #0x40]\n"
- "ldr q4, [x22, #0x50]\n"
- "ldr q26, [x21, #0x0]\n"
- "ldr q27, [x21, #0x10]\n"
- "ldr q28, [x21, #0x20]\n"
- "ldr q29, [x21, #0x30]\n"
- "ldr q30, [x21, #0x40]\n"
- "ldr q31, [x21, #0x50]\n"
+ "ldr q14, [x24, #0x0]\n"
+ "ldr q15, [x24, #0x10]\n"
+ "ldr q16, [x24, #0x20]\n"
+ "ldr q17, [x24, #0x30]\n"
+ "ldr q18, [x24, #0x40]\n"
+ "ldr q19, [x24, #0x50]\n"
+ "ldr q21, [x23, #0x0]\n"
+ "ldr q22, [x23, #0x10]\n"
+ "ldr q23, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q25, [x23, #0x40]\n"
+ "ldr q4, [x23, #0x50]\n"
+ "ldr q26, [x22, #0x0]\n"
+ "ldr q27, [x22, #0x10]\n"
+ "ldr q28, [x22, #0x20]\n"
+ "ldr q29, [x22, #0x30]\n"
+ "ldr q30, [x22, #0x40]\n"
+ "ldr q31, [x22, #0x50]\n"
"146:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v14.2d\n"
"zip2 v14.2d, v9.2d, v14.2d\n"
@@ -1935,8 +1938,8 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"mov x26, #0x0\n"
"149:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 150f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1973,28 +1976,28 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x25, x25, #0x4\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
"cmp x25, #0x8\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x23], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ld1 { v3.4s }, [x21], #0x10\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x40]\n"
- ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v1.4s }, [x23], #0x10\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q5, [x28, #0x50]\n"
- ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
- ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q7, [x28, #0x70]\n"
".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "ld1 { v3.4s }, [x21], #0x10\n"
".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
"ldr q4, [x28, #0x80]\n"
".inst 0x6e45ec10 // bfmmla v16.4s, v0.8h, v5.8h\n"
@@ -2027,21 +2030,21 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x25, x25, #0x4\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e44ec08 // bfmmla v8.4s, v0.8h, v4.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e44ec54 // bfmmla v20.4s, v2.8h, v4.8h\n"
"ldr q3, [x28, #0x40]\n"
- ".inst 0x6e45ec0e // bfmmla v14.4s, v0.8h, v5.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e45ec5a // bfmmla v26.4s, v2.8h, v5.8h\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e46ec55 // bfmmla v21.4s, v2.8h, v6.8h\n"
"ldr q6, [x28, #0x60]\n"
- ".inst 0x6e47ec0f // bfmmla v15.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec5b // bfmmla v27.4s, v2.8h, v7.8h\n"
"ldr q1, [x28, #0x70]\n"
".inst 0x6e43ec0a // bfmmla v10.4s, v0.8h, v3.8h\n"
@@ -2132,23 +2135,23 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"cmp x26, x20\n"
"bne 149b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 v4.2d, v8.2d, v14.2d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v14.2d\n"
- "uzp1 v14.2d, v9.2d, v15.2d\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "uzp1 v14.2d, v9.2d, v15.2d\n"
"uzp2 v9.2d, v9.2d, v15.2d\n"
"uzp1 v15.2d, v10.2d, v16.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v10.2d, v10.2d, v16.2d\n"
+ "add x24, x27, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v16.2d, v11.2d, v17.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v11.2d, v11.2d, v17.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v17.2d, v12.2d, v18.2d\n"
"uzp2 v12.2d, v12.2d, v18.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v18.2d, v13.2d, v19.2d\n"
"uzp2 v13.2d, v13.2d, v19.2d\n"
"uzp1 v19.2d, v20.2d, v26.2d\n"
@@ -2164,9 +2167,9 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"uzp1 v30.2d, v25.2d, v31.2d\n"
"uzp2 v25.2d, v25.2d, v31.2d\n"
"tbz %x[flags], #1, 158f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v4.4s, v4.4s, v1.4s\n"
"fmin v14.4s, v14.4s, v1.4s\n"
@@ -2224,153 +2227,153 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"st1 { v14.4s }, [x27], #0x10\n"
"st1 { v15.4s }, [x27], #0x10\n"
"st1 { v16.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v11.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
- "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v11.4s }, [x24], #0x10\n"
+ "st1 { v19.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v27.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x22], #0x10\n"
"tbz x9, #2, 160f\n"
"st1 { v17.4s }, [x27], #0x10\n"
- "st1 { v12.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v29.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
"tbz x9, #1, 159f\n"
"str d18, [x27], #0x8\n"
- "str d13, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
+ "str d25, [x22], #0x8\n"
"tbz x9, #0, 170f\n"
"st1 { v18.s }[2], [x27]\n"
- "st1 { v13.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
+ "st1 { v25.s }[2], [x22]\n"
"b 170f\n"
"159:" // Height 4: Partial direct writeback: partial_1_20
"tbz x9, #0, 170f\n"
"str s18, [x27, #0x0]\n"
- "str s13, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
+ "str s25, [x22, #0x0]\n"
"b 170f\n"
"160:" // Height 4: Partial direct writeback: partial_2_16
"tbz x9, #1, 161f\n"
"str d17, [x27], #0x8\n"
- "str d12, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d29, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x9, #0, 170f\n"
"st1 { v17.s }[2], [x27]\n"
- "st1 { v12.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v29.s }[2], [x23]\n"
+ "st1 { v24.s }[2], [x22]\n"
"b 170f\n"
"161:" // Height 4: Partial direct writeback: partial_1_16
"tbz x9, #0, 170f\n"
"str s17, [x27, #0x0]\n"
- "str s12, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s29, [x23, #0x0]\n"
+ "str s24, [x22, #0x0]\n"
"b 170f\n"
"162:" // Height 4: Partial direct writeback: partial_8_0
"tbz x9, #3, 166f\n"
"st1 { v4.4s }, [x27], #0x10\n"
"st1 { v14.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v9.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
- "st1 { v21.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v19.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
"tbz x9, #2, 164f\n"
"st1 { v15.4s }, [x27], #0x10\n"
- "st1 { v10.4s }, [x23], #0x10\n"
- "st1 { v27.4s }, [x22], #0x10\n"
- "st1 { v22.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v27.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
"tbz x9, #1, 163f\n"
"str d16, [x27], #0x8\n"
- "str d11, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d23, [x21], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
"tbz x9, #0, 170f\n"
"st1 { v16.s }[2], [x27]\n"
- "st1 { v11.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v23.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
"b 170f\n"
"163:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 170f\n"
"str s16, [x27, #0x0]\n"
- "str s11, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s23, [x21, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
"b 170f\n"
"164:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 165f\n"
"str d15, [x27], #0x8\n"
- "str d10, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
- "str d22, [x21], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
"tbz x9, #0, 170f\n"
"st1 { v15.s }[2], [x27]\n"
- "st1 { v10.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
- "st1 { v22.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
"b 170f\n"
"165:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 170f\n"
"str s15, [x27, #0x0]\n"
- "str s10, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
- "str s22, [x21, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
"b 170f\n"
"166:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 168f\n"
"st1 { v4.4s }, [x27], #0x10\n"
- "st1 { v8.4s }, [x23], #0x10\n"
- "st1 { v19.4s }, [x22], #0x10\n"
- "st1 { v20.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v19.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
"tbz x9, #1, 167f\n"
"str d14, [x27], #0x8\n"
- "str d9, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
- "str d21, [x21], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
"tbz x9, #0, 170f\n"
"st1 { v14.s }[2], [x27]\n"
- "st1 { v9.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
- "st1 { v21.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
"b 170f\n"
"167:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 170f\n"
"str s14, [x27, #0x0]\n"
- "str s9, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
- "str s21, [x21, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
"b 170f\n"
"168:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 169f\n"
"str d4, [x27], #0x8\n"
- "str d8, [x23], #0x8\n"
- "str d19, [x22], #0x8\n"
- "str d20, [x21], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
"tbz x9, #0, 170f\n"
"st1 { v4.s }[2], [x27]\n"
- "st1 { v8.s }[2], [x23]\n"
- "st1 { v19.s }[2], [x22]\n"
- "st1 { v20.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
"b 170f\n"
"169:" // Height 4: Partial direct writeback: partial_1_0
"str s4, [x27, #0x0]\n"
- "str s8, [x23, #0x0]\n"
- "str s19, [x22, #0x0]\n"
- "str s20, [x21, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
"170:" // Height 4: Partial direct writeback: Done
"b 172f\n"
"171:" // Height 4: Full writeback
@@ -2381,24 +2384,24 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"str q17, [x27, #0x40]\n"
"str q18, [x27, #0x50]\n"
"add x27, x27, #0x60\n"
- "str q8, [x23, #0x0]\n"
- "str q9, [x23, #0x10]\n"
- "str q10, [x23, #0x20]\n"
- "str q11, [x23, #0x30]\n"
- "str q12, [x23, #0x40]\n"
- "str q13, [x23, #0x50]\n"
- "str q19, [x22, #0x0]\n"
- "str q26, [x22, #0x10]\n"
- "str q27, [x22, #0x20]\n"
- "str q28, [x22, #0x30]\n"
- "str q29, [x22, #0x40]\n"
- "str q30, [x22, #0x50]\n"
- "str q20, [x21, #0x0]\n"
- "str q21, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q23, [x21, #0x30]\n"
- "str q24, [x21, #0x40]\n"
- "str q25, [x21, #0x50]\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q12, [x24, #0x40]\n"
+ "str q13, [x24, #0x50]\n"
+ "str q19, [x23, #0x0]\n"
+ "str q26, [x23, #0x10]\n"
+ "str q27, [x23, #0x20]\n"
+ "str q28, [x23, #0x30]\n"
+ "str q29, [x23, #0x40]\n"
+ "str q30, [x23, #0x50]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x22, #0x40]\n"
+ "str q25, [x22, #0x50]\n"
"172:" // Height 4: Writeback done
"subs x9, x9, #0x18\n"
"bgt 131b\n"
@@ -2414,8 +2417,8 @@ void a64_hybrid_fp32bf16fp32_mmla_4x24 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"174:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16.hpp
index 71e16d68b5..6780b76a3a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp
index 5693c3f397..d4a4f18d2b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32bf16fp32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -102,23 +104,23 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"cmp %x[M], #0x2\n"
"bgt 71f\n"
"beq 36f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"cbz x12, 3f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 15f\n"
@@ -209,8 +211,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"16:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -231,6 +233,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"blt 20f\n"
"19:" // Height 1: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "cmp x27, #0x8\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
@@ -243,18 +248,17 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ec0e // bfmmla v14.4s, v0.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
- ".inst 0x6e52ec0b // bfmmla v11.4s, v0.8h, v18.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e52ec0b // bfmmla v11.4s, v0.8h, v18.8h\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x6e51ec0f // bfmmla v15.4s, v0.8h, v17.8h\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"ld1 { v0.4s }, [x26], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 19b\n"
"20:" // Height 1: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
@@ -267,11 +271,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ec0e // bfmmla v14.4s, v0.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ec0b // bfmmla v11.4s, v0.8h, v18.8h\n"
".inst 0x6e51ec0f // bfmmla v15.4s, v0.8h, v17.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x80\n"
"21:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 24f\n"
"cbz x27, 24f\n"
@@ -298,9 +300,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ec0e // bfmmla v14.4s, v0.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ec0b // bfmmla v11.4s, v0.8h, v18.8h\n"
".inst 0x6e51ec0f // bfmmla v15.4s, v0.8h, v17.8h\n"
- "add x10, x10, #0x80\n"
"24:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -312,9 +314,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
"fmin v8.4s, v8.4s, v18.4s\n"
"fmin v9.4s, v9.4s, v18.4s\n"
@@ -384,23 +386,23 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"bgt 2b\n"
"b 212f\n"
"36:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"37:" // Height 2: Column loop
"cbz x12, 38f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"b 50f\n"
@@ -408,75 +410,75 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"tbz %x[flags], #0, 49f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x11, #0x10\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
"bge 47f\n"
"tbz x11, #3, 42f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
"tbz x11, #2, 40f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
"tbz x11, #1, 39f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
"tbz x11, #0, 46f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
+ "ld1 { v15.s }[2], [x26]\n"
"b 46f\n"
"39:" // Height 2: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 46f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
"b 46f\n"
"40:" // Height 2: Partial accumulate: partial_2_8
"tbz x11, #1, 41f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
"tbz x11, #0, 46f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
+ "ld1 { v14.s }[2], [x26]\n"
"b 46f\n"
"41:" // Height 2: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 46f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
"b 46f\n"
"42:" // Height 2: Partial accumulate: partial_4_0
"tbz x11, #2, 44f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
"tbz x11, #1, 43f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
"tbz x11, #0, 46f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
+ "ld1 { v13.s }[2], [x26]\n"
"b 46f\n"
"43:" // Height 2: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 46f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
"b 46f\n"
"44:" // Height 2: Partial accumulate: partial_2_0
"tbz x11, #1, 45f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
"tbz x11, #0, 46f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
+ "ld1 { v12.s }[2], [x26]\n"
"b 46f\n"
"45:" // Height 2: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
"46:" // Height 2: Partial accumulate: Done
"sub x9, x9, x20\n"
@@ -486,10 +488,10 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
"48:" // Height 2: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -513,8 +515,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"51:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 52f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -539,7 +541,12 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"blt 55f\n"
"54:" // Height 2: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "cmp x27, #0x8\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
@@ -552,20 +559,18 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ec0e // bfmmla v14.4s, v0.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
"add x10, x10, #0x80\n"
".inst 0x6e52ec0b // bfmmla v11.4s, v0.8h, v18.8h\n"
"ldr q6, [x10, #0x0]\n"
".inst 0x6e51ec0f // bfmmla v15.4s, v0.8h, v17.8h\n"
- "ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"ld1 { v0.4s }, [x26], #0x10\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 54b\n"
"55:" // Height 2: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q18, [x10, #0x20]\n"
@@ -579,12 +584,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ec0e // bfmmla v14.4s, v0.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
- "sub x27, x27, #0x4\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ec0b // bfmmla v11.4s, v0.8h, v18.8h\n"
".inst 0x6e51ec0f // bfmmla v15.4s, v0.8h, v17.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x80\n"
"56:" // Height 2: Multiply loop: Main loop skip
"cbz x27, 59f\n"
"cbz x27, 59f\n"
@@ -615,30 +617,30 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e51ec0e // bfmmla v14.4s, v0.8h, v17.8h\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e52ec0b // bfmmla v11.4s, v0.8h, v18.8h\n"
".inst 0x6e51ec0f // bfmmla v15.4s, v0.8h, v17.8h\n"
- "add x10, x10, #0x80\n"
"59:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 51b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"tbz %x[flags], #1, 60f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
"fmin v6.4s, v6.4s, v18.4s\n"
"fmin v12.4s, v12.4s, v18.4s\n"
@@ -662,63 +664,63 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"tbz x11, #3, 64f\n"
"st1 { v6.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
"tbz x11, #2, 62f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
"tbz x11, #1, 61f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
+ "str d11, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
+ "st1 { v11.s }[2], [x26]\n"
"b 68f\n"
"61:" // Height 2: Partial direct writeback: partial_1_12
"tbz x11, #0, 68f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
"b 68f\n"
"62:" // Height 2: Partial direct writeback: partial_2_8
"tbz x11, #1, 63f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
+ "str d10, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
+ "st1 { v10.s }[2], [x26]\n"
"b 68f\n"
"63:" // Height 2: Partial direct writeback: partial_1_8
"tbz x11, #0, 68f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
"b 68f\n"
"64:" // Height 2: Partial direct writeback: partial_4_0
"tbz x11, #2, 66f\n"
"st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
"tbz x11, #1, 65f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
+ "str d9, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
+ "st1 { v9.s }[2], [x26]\n"
"b 68f\n"
"65:" // Height 2: Partial direct writeback: partial_1_4
"tbz x11, #0, 68f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
"b 68f\n"
"66:" // Height 2: Partial direct writeback: partial_2_0
"tbz x11, #1, 67f\n"
"str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
+ "str d8, [x26], #0x8\n"
"tbz x11, #0, 68f\n"
"st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
+ "st1 { v8.s }[2], [x26]\n"
"b 68f\n"
"67:" // Height 2: Partial direct writeback: partial_1_0
"str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
"68:" // Height 2: Partial direct writeback: Done
"b 70f\n"
"69:" // Height 2: Full writeback
@@ -727,32 +729,32 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
"70:" // Height 2: Writeback done
"subs x11, x11, #0x10\n"
"bgt 37b\n"
"b 212f\n"
"71:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"72:" // Height 3: Column loop
"cbz x12, 73f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -767,94 +769,94 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"73:" // Height 3: no bias
"tbz %x[flags], #0, 84f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"bge 82f\n"
"tbz x11, #3, 77f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #2, 75f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
"tbz x11, #1, 74f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
"b 81f\n"
"74:" // Height 3: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 81f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
"b 81f\n"
"75:" // Height 3: Partial accumulate: partial_2_8
"tbz x11, #1, 76f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
"b 81f\n"
"76:" // Height 3: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 81f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
"b 81f\n"
"77:" // Height 3: Partial accumulate: partial_4_0
"tbz x11, #2, 79f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #1, 78f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
"b 81f\n"
"78:" // Height 3: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 81f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
"b 81f\n"
"79:" // Height 3: Partial accumulate: partial_2_0
"tbz x11, #1, 80f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
"tbz x11, #0, 81f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
"b 81f\n"
"80:" // Height 3: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
"81:" // Height 3: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 83f\n"
@@ -863,14 +865,14 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
"83:" // Height 3: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -910,8 +912,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"86:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 87f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -940,33 +942,33 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"blt 90f\n"
"89:" // Height 3: Multiply loop: Main loop head
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x4\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "cmp x27, #0x8\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aec09 // bfmmla v9.4s, v0.8h, v26.8h\n"
- "cmp x27, #0x8\n"
".inst 0x6e5aec51 // bfmmla v17.4s, v2.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ec0d // bfmmla v13.4s, v0.8h, v25.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e59ec55 // bfmmla v21.4s, v2.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aec0a // bfmmla v10.4s, v0.8h, v26.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x6e5aec52 // bfmmla v18.4s, v2.8h, v26.8h\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e59ec0e // bfmmla v14.4s, v0.8h, v25.8h\n"
".inst 0x6e59ec56 // bfmmla v22.4s, v2.8h, v25.8h\n"
"ldr q25, [x10, #0x70]\n"
"add x10, x10, #0x80\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e5aec0b // bfmmla v11.4s, v0.8h, v26.8h\n"
".inst 0x6e5aec53 // bfmmla v19.4s, v2.8h, v26.8h\n"
"ldr q6, [x10, #0x0]\n"
@@ -978,25 +980,25 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"bge 89b\n"
"90:" // Height 3: Multiply loop: Single iteration only
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "sub x27, x27, #0x4\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aec09 // bfmmla v9.4s, v0.8h, v26.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e5aec51 // bfmmla v17.4s, v2.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ec0d // bfmmla v13.4s, v0.8h, v25.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e59ec55 // bfmmla v21.4s, v2.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aec0a // bfmmla v10.4s, v0.8h, v26.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e5aec52 // bfmmla v18.4s, v2.8h, v26.8h\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e59ec0e // bfmmla v14.4s, v0.8h, v25.8h\n"
@@ -1027,13 +1029,13 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q26, [x10, #0x0]\n"
"ldr q25, [x10, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- ".inst 0x6e5aec08 // bfmmla v8.4s, v0.8h, v26.8h\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x6e5aec50 // bfmmla v16.4s, v2.8h, v26.8h\n"
+ ".inst 0x6e59ec54 // bfmmla v20.4s, v2.8h, v25.8h\n"
+ ".inst 0x6e5aec08 // bfmmla v8.4s, v0.8h, v26.8h\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e59ec0c // bfmmla v12.4s, v0.8h, v25.8h\n"
- ".inst 0x6e59ec54 // bfmmla v20.4s, v2.8h, v25.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aec09 // bfmmla v9.4s, v0.8h, v26.8h\n"
".inst 0x6e5aec51 // bfmmla v17.4s, v2.8h, v26.8h\n"
@@ -1058,27 +1060,27 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 86b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "uzp1 v12.2d, v9.2d, v13.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 95f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
"fmin v6.4s, v6.4s, v26.4s\n"
"fmin v12.4s, v12.4s, v26.4s\n"
@@ -1110,79 +1112,79 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"tbz x11, #3, 99f\n"
"st1 { v6.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
- "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
+ "st1 { v17.4s }, [x25], #0x10\n"
"tbz x11, #2, 97f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v18.4s }, [x25], #0x10\n"
"tbz x11, #1, 96f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d19, [x24], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d19, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v19.s }[2], [x24]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v19.s }[2], [x25]\n"
"b 103f\n"
"96:" // Height 3: Partial direct writeback: partial_1_12
"tbz x11, #0, 103f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s19, [x24, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s19, [x25, #0x0]\n"
"b 103f\n"
"97:" // Height 3: Partial direct writeback: partial_2_8
"tbz x11, #1, 98f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d18, [x24], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d18, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v18.s }[2], [x24]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v18.s }[2], [x25]\n"
"b 103f\n"
"98:" // Height 3: Partial direct writeback: partial_1_8
"tbz x11, #0, 103f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s18, [x24, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s18, [x25, #0x0]\n"
"b 103f\n"
"99:" // Height 3: Partial direct writeback: partial_4_0
"tbz x11, #2, 101f\n"
"st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v16.4s }, [x25], #0x10\n"
"tbz x11, #1, 100f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d17, [x24], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d17, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v17.s }[2], [x24]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v17.s }[2], [x25]\n"
"b 103f\n"
"100:" // Height 3: Partial direct writeback: partial_1_4
"tbz x11, #0, 103f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s17, [x24, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s17, [x25, #0x0]\n"
"b 103f\n"
"101:" // Height 3: Partial direct writeback: partial_2_0
"tbz x11, #1, 102f\n"
"str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
"tbz x11, #0, 103f\n"
"st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v16.s }[2], [x24]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v16.s }[2], [x25]\n"
"b 103f\n"
"102:" // Height 3: Partial direct writeback: partial_1_0
"str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s16, [x24, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s16, [x25, #0x0]\n"
"103:" // Height 3: Partial direct writeback: Done
"b 105f\n"
"104:" // Height 3: Full writeback
@@ -1191,36 +1193,36 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q16, [x24, #0x0]\n"
- "str q17, [x24, #0x10]\n"
- "str q18, [x24, #0x20]\n"
- "str q19, [x24, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q17, [x25, #0x10]\n"
+ "str q18, [x25, #0x20]\n"
+ "str q19, [x25, #0x30]\n"
"105:" // Height 3: Writeback done
"subs x11, x11, #0x10\n"
"bgt 72b\n"
"b 212f\n"
"106:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"107:" // Height 4: Column loop
"cbz x12, 108f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -1235,111 +1237,111 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"108:" // Height 4: no bias
"tbz %x[flags], #0, 119f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"cmp x11, #0x10\n"
- "add x23, x24, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"bge 117f\n"
"tbz x11, #3, 112f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
"tbz x11, #2, 110f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
"tbz x11, #1, 109f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
"b 116f\n"
"109:" // Height 4: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 116f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
"b 116f\n"
"110:" // Height 4: Partial accumulate: partial_2_8
"tbz x11, #1, 111f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
"b 116f\n"
"111:" // Height 4: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 116f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
"b 116f\n"
"112:" // Height 4: Partial accumulate: partial_4_0
"tbz x11, #2, 114f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
"tbz x11, #1, 113f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
"b 116f\n"
"113:" // Height 4: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 116f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
"b 116f\n"
"114:" // Height 4: Partial accumulate: partial_2_0
"tbz x11, #1, 115f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
"tbz x11, #0, 116f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
"b 116f\n"
"115:" // Height 4: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
"116:" // Height 4: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 118f\n"
@@ -1348,18 +1350,18 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
"118:" // Height 4: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1399,8 +1401,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"121:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 122f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1435,28 +1437,28 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"cmp x27, #0x8\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aec09 // bfmmla v9.4s, v0.8h, v26.8h\n"
".inst 0x6e5aec51 // bfmmla v17.4s, v2.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ec0d // bfmmla v13.4s, v0.8h, v25.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e59ec55 // bfmmla v21.4s, v2.8h, v25.8h\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e5aec0a // bfmmla v10.4s, v0.8h, v26.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e5aec52 // bfmmla v18.4s, v2.8h, v26.8h\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e59ec0e // bfmmla v14.4s, v0.8h, v25.8h\n"
@@ -1477,18 +1479,18 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x27, x27, #0x4\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e5aec09 // bfmmla v9.4s, v0.8h, v26.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e5aec51 // bfmmla v17.4s, v2.8h, v26.8h\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e59ec0d // bfmmla v13.4s, v0.8h, v25.8h\n"
@@ -1549,8 +1551,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e59ec0e // bfmmla v14.4s, v0.8h, v25.8h\n"
".inst 0x6e59ec56 // bfmmla v22.4s, v2.8h, v25.8h\n"
"ldr q25, [x10, #0x70]\n"
- ".inst 0x6e5aec0b // bfmmla v11.4s, v0.8h, v26.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e5aec0b // bfmmla v11.4s, v0.8h, v26.8h\n"
".inst 0x6e5aec53 // bfmmla v19.4s, v2.8h, v26.8h\n"
".inst 0x6e59ec0f // bfmmla v15.4s, v0.8h, v25.8h\n"
".inst 0x6e59ec57 // bfmmla v23.4s, v2.8h, v25.8h\n"
@@ -1560,23 +1562,23 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 121b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
- "uzp1 v12.2d, v9.2d, v13.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
@@ -1584,9 +1586,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"tbz %x[flags], #1, 130f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
"fmin v6.4s, v6.4s, v26.4s\n"
"fmin v12.4s, v12.4s, v26.4s\n"
@@ -1626,95 +1628,95 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"tbz x11, #3, 134f\n"
"st1 { v6.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
"tbz x11, #2, 132f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
"tbz x11, #1, 131f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
"b 138f\n"
"131:" // Height 4: Partial direct writeback: partial_1_12
"tbz x11, #0, 138f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
"b 138f\n"
"132:" // Height 4: Partial direct writeback: partial_2_8
"tbz x11, #1, 133f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
"b 138f\n"
"133:" // Height 4: Partial direct writeback: partial_1_8
"tbz x11, #0, 138f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
"b 138f\n"
"134:" // Height 4: Partial direct writeback: partial_4_0
"tbz x11, #2, 136f\n"
"st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
"tbz x11, #1, 135f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
"b 138f\n"
"135:" // Height 4: Partial direct writeback: partial_1_4
"tbz x11, #0, 138f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
"b 138f\n"
"136:" // Height 4: Partial direct writeback: partial_2_0
"tbz x11, #1, 137f\n"
"str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
"tbz x11, #0, 138f\n"
"st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
"b 138f\n"
"137:" // Height 4: Partial direct writeback: partial_1_0
"str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
"138:" // Height 4: Partial direct writeback: Done
"b 140f\n"
"139:" // Height 4: Full writeback
@@ -1723,40 +1725,40 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
"140:" // Height 4: Writeback done
"subs x11, x11, #0x10\n"
"bgt 107b\n"
"b 212f\n"
"141:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"142:" // Height 5: Column loop
"cbz x12, 143f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -1779,128 +1781,128 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"143:" // Height 5: no bias
"tbz %x[flags], #0, 154f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x22, x23, x20, LSL #2\n"
"bge 152f\n"
"tbz x11, #3, 147f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #2, 145f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v27.4s }, [x23], #0x10\n"
"tbz x11, #1, 144f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d6, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v6.s }[2], [x23]\n"
"b 151f\n"
"144:" // Height 5: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 151f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s6, [x23, #0x0]\n"
"b 151f\n"
"145:" // Height 5: Partial accumulate: partial_2_8
"tbz x11, #1, 146f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
"b 151f\n"
"146:" // Height 5: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 151f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
"b 151f\n"
"147:" // Height 5: Partial accumulate: partial_4_0
"tbz x11, #2, 149f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #1, 148f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
"b 151f\n"
"148:" // Height 5: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 151f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
"b 151f\n"
"149:" // Height 5: Partial accumulate: partial_2_0
"tbz x11, #1, 150f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
"tbz x11, #0, 151f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
"b 151f\n"
"150:" // Height 5: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
"151:" // Height 5: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 153f\n"
@@ -1909,22 +1911,22 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q25, [x23, #0x0]\n"
+ "ldr q26, [x23, #0x10]\n"
+ "ldr q27, [x23, #0x20]\n"
+ "ldr q6, [x23, #0x30]\n"
"153:" // Height 5: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -1980,8 +1982,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"156:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 157f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2020,31 +2022,31 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x27, x27, #0x4\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
"cmp x27, #0x8\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
- "ldr q3, [x10, #0x20]\n"
+ ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "ldr q6, [x10, #0x20]\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
- ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q5, [x10, #0x30]\n"
- ".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- ".inst 0x6e43ec51 // bfmmla v17.4s, v2.8h, v3.8h\n"
- ".inst 0x6e43ec99 // bfmmla v25.4s, v4.8h, v3.8h\n"
+ ".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
+ ".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
".inst 0x6e45ec55 // bfmmla v21.4s, v2.8h, v5.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e45ec9d // bfmmla v29.4s, v4.8h, v5.8h\n"
"ldr q5, [x10, #0x50]\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
@@ -2073,22 +2075,22 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x27, x27, #0x4\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ ".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
+ ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
- ".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q3, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- ".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q1, [x10, #0x30]\n"
".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e43ec51 // bfmmla v17.4s, v2.8h, v3.8h\n"
".inst 0x6e43ec99 // bfmmla v25.4s, v4.8h, v3.8h\n"
"ldr q3, [x10, #0x40]\n"
@@ -2138,16 +2140,16 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q5, [x10, #0x10]\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
- "ldr q3, [x10, #0x20]\n"
".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
- ".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n"
".inst 0x6e45ec9c // bfmmla v28.4s, v4.8h, v5.8h\n"
+ ".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
+ "ldr q3, [x10, #0x20]\n"
+ ".inst 0x6e45ec54 // bfmmla v20.4s, v2.8h, v5.8h\n"
"ldr q1, [x10, #0x30]\n"
".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n"
".inst 0x6e43ec51 // bfmmla v17.4s, v2.8h, v3.8h\n"
@@ -2165,8 +2167,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x6e41ec56 // bfmmla v22.4s, v2.8h, v1.8h\n"
".inst 0x6e41ec9e // bfmmla v30.4s, v4.8h, v1.8h\n"
"ldr q1, [x10, #0x70]\n"
- ".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n"
".inst 0x6e43ec53 // bfmmla v19.4s, v2.8h, v3.8h\n"
".inst 0x6e43ec9b // bfmmla v27.4s, v4.8h, v3.8h\n"
".inst 0x6e41ec0f // bfmmla v15.4s, v0.8h, v1.8h\n"
@@ -2178,27 +2180,27 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 156b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
@@ -2208,9 +2210,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 165f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v6.4s, v6.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
@@ -2258,111 +2260,111 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"tbz x11, #3, 169f\n"
"st1 { v6.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
- "st1 { v25.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
+ "st1 { v25.4s }, [x23], #0x10\n"
"tbz x11, #2, 167f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v26.4s }, [x22], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v26.4s }, [x23], #0x10\n"
"tbz x11, #1, 166f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d27, [x22], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d27, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v27.s }[2], [x22]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v27.s }[2], [x23]\n"
"b 173f\n"
"166:" // Height 5: Partial direct writeback: partial_1_12
"tbz x11, #0, 173f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s27, [x22, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s27, [x23, #0x0]\n"
"b 173f\n"
"167:" // Height 5: Partial direct writeback: partial_2_8
"tbz x11, #1, 168f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d26, [x22], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d26, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v26.s }[2], [x22]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v26.s }[2], [x23]\n"
"b 173f\n"
"168:" // Height 5: Partial direct writeback: partial_1_8
"tbz x11, #0, 173f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s26, [x22, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s26, [x23, #0x0]\n"
"b 173f\n"
"169:" // Height 5: Partial direct writeback: partial_4_0
"tbz x11, #2, 171f\n"
"st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v24.4s }, [x23], #0x10\n"
"tbz x11, #1, 170f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d25, [x22], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d25, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v25.s }[2], [x22]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v25.s }[2], [x23]\n"
"b 173f\n"
"170:" // Height 5: Partial direct writeback: partial_1_4
"tbz x11, #0, 173f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s25, [x22, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s25, [x23, #0x0]\n"
"b 173f\n"
"171:" // Height 5: Partial direct writeback: partial_2_0
"tbz x11, #1, 172f\n"
"str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x11, #0, 173f\n"
"st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v24.s }[2], [x22]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v24.s }[2], [x23]\n"
"b 173f\n"
"172:" // Height 5: Partial direct writeback: partial_1_0
"str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s24, [x22, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s24, [x23, #0x0]\n"
"173:" // Height 5: Partial direct writeback: Done
"b 175f\n"
"174:" // Height 5: Full writeback
@@ -2371,47 +2373,48 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q24, [x22, #0x0]\n"
- "str q25, [x22, #0x10]\n"
- "str q26, [x22, #0x20]\n"
- "str q27, [x22, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q25, [x23, #0x10]\n"
+ "str q26, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
"175:" // Height 5: Writeback done
"subs x11, x11, #0x10\n"
"bgt 142b\n"
"b 212f\n"
"176:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"177:" // Height 6: Column loop
"cbz x12, 178f\n"
"ldr q8, [x12, #0x0]\n"
"ldr q9, [x12, #0x10]\n"
- "zip2 v12.2d, v8.2d, v8.2d\n"
- "zip1 v8.2d, v8.2d, v8.2d\n"
"ldr q10, [x12, #0x20]\n"
"ldr q11, [x12, #0x30]\n"
+ "add x12, x12, #0x40\n"
+ "zip2 v12.2d, v8.2d, v8.2d\n"
+ "zip1 v8.2d, v8.2d, v8.2d\n"
"zip2 v13.2d, v9.2d, v9.2d\n"
"zip1 v9.2d, v9.2d, v9.2d\n"
"zip2 v14.2d, v10.2d, v10.2d\n"
"zip1 v10.2d, v10.2d, v10.2d\n"
- "add x12, x12, #0x40\n"
"zip2 v15.2d, v11.2d, v11.2d\n"
"zip1 v11.2d, v11.2d, v11.2d\n"
"mov v16.16b, v8.16b\n"
@@ -2434,145 +2437,145 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"178:" // Height 6: no bias
"tbz %x[flags], #0, 189f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "add x21, x22, x20, LSL #2\n"
"bge 187f\n"
"tbz x11, #3, 182f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"ld1 { v10.4s }, [x9], #0x10\n"
- "ld1 { v13.4s }, [x25], #0x10\n"
- "ld1 { v18.4s }, [x24], #0x10\n"
- "ld1 { v21.4s }, [x23], #0x10\n"
- "ld1 { v26.4s }, [x22], #0x10\n"
- "ld1 { v29.4s }, [x21], #0x10\n"
+ "ld1 { v13.4s }, [x26], #0x10\n"
+ "ld1 { v18.4s }, [x25], #0x10\n"
+ "ld1 { v21.4s }, [x24], #0x10\n"
+ "ld1 { v26.4s }, [x23], #0x10\n"
+ "ld1 { v29.4s }, [x22], #0x10\n"
"tbz x11, #2, 180f\n"
"ld1 { v11.4s }, [x9], #0x10\n"
- "ld1 { v14.4s }, [x25], #0x10\n"
- "ld1 { v19.4s }, [x24], #0x10\n"
- "ld1 { v22.4s }, [x23], #0x10\n"
- "ld1 { v27.4s }, [x22], #0x10\n"
- "ld1 { v30.4s }, [x21], #0x10\n"
+ "ld1 { v14.4s }, [x26], #0x10\n"
+ "ld1 { v19.4s }, [x25], #0x10\n"
+ "ld1 { v22.4s }, [x24], #0x10\n"
+ "ld1 { v27.4s }, [x23], #0x10\n"
+ "ld1 { v30.4s }, [x22], #0x10\n"
"tbz x11, #1, 179f\n"
"ldr d16, [x9], #0x8\n"
- "ldr d15, [x25], #0x8\n"
+ "ldr d15, [x26], #0x8\n"
"mov x20, #0x38\n"
- "ldr d24, [x24], #0x8\n"
- "ldr d23, [x23], #0x8\n"
- "ldr d6, [x22], #0x8\n"
- "ldr d31, [x21], #0x8\n"
+ "ldr d24, [x25], #0x8\n"
+ "ldr d23, [x24], #0x8\n"
+ "ldr d6, [x23], #0x8\n"
+ "ldr d31, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v16.s }[2], [x9]\n"
- "ld1 { v15.s }[2], [x25]\n"
- "ld1 { v24.s }[2], [x24]\n"
- "ld1 { v23.s }[2], [x23]\n"
- "ld1 { v6.s }[2], [x22]\n"
- "ld1 { v31.s }[2], [x21]\n"
+ "ld1 { v15.s }[2], [x26]\n"
+ "ld1 { v24.s }[2], [x25]\n"
+ "ld1 { v23.s }[2], [x24]\n"
+ "ld1 { v6.s }[2], [x23]\n"
+ "ld1 { v31.s }[2], [x22]\n"
"b 186f\n"
"179:" // Height 6: Partial accumulate: partial_1_12
"mov x20, #0x30\n"
"tbz x11, #0, 186f\n"
"ldr s16, [x9, #0x0]\n"
- "ldr s15, [x25, #0x0]\n"
- "ldr s24, [x24, #0x0]\n"
- "ldr s23, [x23, #0x0]\n"
- "ldr s6, [x22, #0x0]\n"
- "ldr s31, [x21, #0x0]\n"
+ "ldr s15, [x26, #0x0]\n"
+ "ldr s24, [x25, #0x0]\n"
+ "ldr s23, [x24, #0x0]\n"
+ "ldr s6, [x23, #0x0]\n"
+ "ldr s31, [x22, #0x0]\n"
"b 186f\n"
"180:" // Height 6: Partial accumulate: partial_2_8
"tbz x11, #1, 181f\n"
"ldr d11, [x9], #0x8\n"
- "ldr d14, [x25], #0x8\n"
+ "ldr d14, [x26], #0x8\n"
"mov x20, #0x28\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d22, [x23], #0x8\n"
- "ldr d27, [x22], #0x8\n"
- "ldr d30, [x21], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d22, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "ldr d30, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v11.s }[2], [x9]\n"
- "ld1 { v14.s }[2], [x25]\n"
- "ld1 { v19.s }[2], [x24]\n"
- "ld1 { v22.s }[2], [x23]\n"
- "ld1 { v27.s }[2], [x22]\n"
- "ld1 { v30.s }[2], [x21]\n"
+ "ld1 { v14.s }[2], [x26]\n"
+ "ld1 { v19.s }[2], [x25]\n"
+ "ld1 { v22.s }[2], [x24]\n"
+ "ld1 { v27.s }[2], [x23]\n"
+ "ld1 { v30.s }[2], [x22]\n"
"b 186f\n"
"181:" // Height 6: Partial accumulate: partial_1_8
"mov x20, #0x20\n"
"tbz x11, #0, 186f\n"
"ldr s11, [x9, #0x0]\n"
- "ldr s14, [x25, #0x0]\n"
- "ldr s19, [x24, #0x0]\n"
- "ldr s22, [x23, #0x0]\n"
- "ldr s27, [x22, #0x0]\n"
- "ldr s30, [x21, #0x0]\n"
+ "ldr s14, [x26, #0x0]\n"
+ "ldr s19, [x25, #0x0]\n"
+ "ldr s22, [x24, #0x0]\n"
+ "ldr s27, [x23, #0x0]\n"
+ "ldr s30, [x22, #0x0]\n"
"b 186f\n"
"182:" // Height 6: Partial accumulate: partial_4_0
"tbz x11, #2, 184f\n"
"ld1 { v9.4s }, [x9], #0x10\n"
- "ld1 { v12.4s }, [x25], #0x10\n"
- "ld1 { v17.4s }, [x24], #0x10\n"
- "ld1 { v20.4s }, [x23], #0x10\n"
- "ld1 { v25.4s }, [x22], #0x10\n"
- "ld1 { v28.4s }, [x21], #0x10\n"
+ "ld1 { v12.4s }, [x26], #0x10\n"
+ "ld1 { v17.4s }, [x25], #0x10\n"
+ "ld1 { v20.4s }, [x24], #0x10\n"
+ "ld1 { v25.4s }, [x23], #0x10\n"
+ "ld1 { v28.4s }, [x22], #0x10\n"
"tbz x11, #1, 183f\n"
"ldr d10, [x9], #0x8\n"
- "ldr d13, [x25], #0x8\n"
+ "ldr d13, [x26], #0x8\n"
"mov x20, #0x18\n"
- "ldr d18, [x24], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "ldr d26, [x22], #0x8\n"
- "ldr d29, [x21], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d21, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
+ "ldr d29, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v10.s }[2], [x9]\n"
- "ld1 { v13.s }[2], [x25]\n"
- "ld1 { v18.s }[2], [x24]\n"
- "ld1 { v21.s }[2], [x23]\n"
- "ld1 { v26.s }[2], [x22]\n"
- "ld1 { v29.s }[2], [x21]\n"
+ "ld1 { v13.s }[2], [x26]\n"
+ "ld1 { v18.s }[2], [x25]\n"
+ "ld1 { v21.s }[2], [x24]\n"
+ "ld1 { v26.s }[2], [x23]\n"
+ "ld1 { v29.s }[2], [x22]\n"
"b 186f\n"
"183:" // Height 6: Partial accumulate: partial_1_4
"mov x20, #0x10\n"
"tbz x11, #0, 186f\n"
"ldr s10, [x9, #0x0]\n"
- "ldr s13, [x25, #0x0]\n"
- "ldr s18, [x24, #0x0]\n"
- "ldr s21, [x23, #0x0]\n"
- "ldr s26, [x22, #0x0]\n"
- "ldr s29, [x21, #0x0]\n"
+ "ldr s13, [x26, #0x0]\n"
+ "ldr s18, [x25, #0x0]\n"
+ "ldr s21, [x24, #0x0]\n"
+ "ldr s26, [x23, #0x0]\n"
+ "ldr s29, [x22, #0x0]\n"
"b 186f\n"
"184:" // Height 6: Partial accumulate: partial_2_0
"tbz x11, #1, 185f\n"
"ldr d9, [x9], #0x8\n"
- "ldr d12, [x25], #0x8\n"
+ "ldr d12, [x26], #0x8\n"
"mov x20, #0x8\n"
- "ldr d17, [x24], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "ldr d25, [x22], #0x8\n"
- "ldr d28, [x21], #0x8\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "ldr d25, [x23], #0x8\n"
+ "ldr d28, [x22], #0x8\n"
"tbz x11, #0, 186f\n"
"ld1 { v9.s }[2], [x9]\n"
- "ld1 { v12.s }[2], [x25]\n"
- "ld1 { v17.s }[2], [x24]\n"
- "ld1 { v20.s }[2], [x23]\n"
- "ld1 { v25.s }[2], [x22]\n"
- "ld1 { v28.s }[2], [x21]\n"
+ "ld1 { v12.s }[2], [x26]\n"
+ "ld1 { v17.s }[2], [x25]\n"
+ "ld1 { v20.s }[2], [x24]\n"
+ "ld1 { v25.s }[2], [x23]\n"
+ "ld1 { v28.s }[2], [x22]\n"
"b 186f\n"
"185:" // Height 6: Partial accumulate: partial_1_0
"ldr s9, [x9, #0x0]\n"
- "ldr s12, [x25, #0x0]\n"
+ "ldr s12, [x26, #0x0]\n"
"mov x20, #0x0\n"
- "ldr s17, [x24, #0x0]\n"
- "ldr s20, [x23, #0x0]\n"
- "ldr s25, [x22, #0x0]\n"
- "ldr s28, [x21, #0x0]\n"
+ "ldr s17, [x25, #0x0]\n"
+ "ldr s20, [x24, #0x0]\n"
+ "ldr s25, [x23, #0x0]\n"
+ "ldr s28, [x22, #0x0]\n"
"186:" // Height 6: Partial accumulate: Done
"sub x9, x9, x20\n"
"b 188f\n"
@@ -2581,26 +2584,26 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"ldr q10, [x9, #0x10]\n"
"ldr q11, [x9, #0x20]\n"
"ldr q16, [x9, #0x30]\n"
- "ldr q12, [x25, #0x0]\n"
- "ldr q13, [x25, #0x10]\n"
- "ldr q14, [x25, #0x20]\n"
- "ldr q15, [x25, #0x30]\n"
- "ldr q17, [x24, #0x0]\n"
- "ldr q18, [x24, #0x10]\n"
- "ldr q19, [x24, #0x20]\n"
- "ldr q24, [x24, #0x30]\n"
- "ldr q20, [x23, #0x0]\n"
- "ldr q21, [x23, #0x10]\n"
- "ldr q22, [x23, #0x20]\n"
- "ldr q23, [x23, #0x30]\n"
- "ldr q25, [x22, #0x0]\n"
- "ldr q26, [x22, #0x10]\n"
- "ldr q27, [x22, #0x20]\n"
- "ldr q6, [x22, #0x30]\n"
- "ldr q28, [x21, #0x0]\n"
- "ldr q29, [x21, #0x10]\n"
- "ldr q30, [x21, #0x20]\n"
- "ldr q31, [x21, #0x30]\n"
+ "ldr q12, [x26, #0x0]\n"
+ "ldr q13, [x26, #0x10]\n"
+ "ldr q14, [x26, #0x20]\n"
+ "ldr q15, [x26, #0x30]\n"
+ "ldr q17, [x25, #0x0]\n"
+ "ldr q18, [x25, #0x10]\n"
+ "ldr q19, [x25, #0x20]\n"
+ "ldr q24, [x25, #0x30]\n"
+ "ldr q20, [x24, #0x0]\n"
+ "ldr q21, [x24, #0x10]\n"
+ "ldr q22, [x24, #0x20]\n"
+ "ldr q23, [x24, #0x30]\n"
+ "ldr q25, [x23, #0x0]\n"
+ "ldr q26, [x23, #0x10]\n"
+ "ldr q27, [x23, #0x20]\n"
+ "ldr q6, [x23, #0x30]\n"
+ "ldr q28, [x22, #0x0]\n"
+ "ldr q29, [x22, #0x10]\n"
+ "ldr q30, [x22, #0x20]\n"
+ "ldr q31, [x22, #0x30]\n"
"188:" // Height 6: MMLA fixup
"zip1 v8.2d, v9.2d, v12.2d\n"
"zip2 v12.2d, v9.2d, v12.2d\n"
@@ -2656,8 +2659,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"mov x28, #0x0\n"
"191:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 192f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2700,36 +2703,36 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x8\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ "cmp x27, #0x8\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ld1 { v1.4s }, [x25], #0x10\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x4ea168a4 // bfcvtn2 v4.8h, v5.4s\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "ld1 { v5.4s }, [x21], #0x10\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "ld1 { v1.4s }, [x25], #0x10\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
- "ldr q5, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x10, #0x30]\n"
".inst 0x6e46ec09 // bfmmla v9.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec51 // bfmmla v17.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- "ld1 { v3.4s }, [x23], #0x10\n"
".inst 0x6e46ec99 // bfmmla v25.4s, v4.8h, v6.8h\n"
"ldr q6, [x10, #0x40]\n"
- ".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
- "prfm pldl1keep, [x22, #0x80]\n"
- ".inst 0x6e45ec55 // bfmmla v21.4s, v2.8h, v5.8h\n"
- ".inst 0x6e45ec9d // bfmmla v29.4s, v4.8h, v5.8h\n"
+ ".inst 0x6e47ec0d // bfmmla v13.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec55 // bfmmla v21.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e47ec9d // bfmmla v29.4s, v4.8h, v7.8h\n"
"ldr q7, [x10, #0x50]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
- "ld1 { v5.4s }, [x21], #0x10\n"
".inst 0x6e46ec0a // bfmmla v10.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec52 // bfmmla v18.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9a // bfmmla v26.4s, v4.8h, v6.8h\n"
@@ -2757,24 +2760,24 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"sub x27, x27, #0x4\n"
"prfm pldl1keep, [x26, #0x80]\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4ea168a4 // bfcvtn2 v4.8h, v5.4s\n"
+ ".inst 0x6e46ec08 // bfmmla v8.4s, v0.8h, v6.8h\n"
+ ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e46ec50 // bfmmla v16.4s, v2.8h, v6.8h\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
".inst 0x6e46ec98 // bfmmla v24.4s, v4.8h, v6.8h\n"
"ldr q3, [x10, #0x20]\n"
- ".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
- "prfm pldl1keep, [x23, #0x80]\n"
- ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec9c // bfmmla v28.4s, v4.8h, v7.8h\n"
"ldr q1, [x10, #0x30]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n"
".inst 0x6e43ec51 // bfmmla v17.4s, v2.8h, v3.8h\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e43ec99 // bfmmla v25.4s, v4.8h, v3.8h\n"
"ldr q3, [x10, #0x40]\n"
".inst 0x6e41ec0d // bfmmla v13.4s, v0.8h, v1.8h\n"
@@ -2828,13 +2831,13 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
".inst 0x4ea16862 // bfcvtn2 v2.8h, v3.4s\n"
".inst 0x4ea168a4 // bfcvtn2 v4.8h, v5.4s\n"
+ ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e47ec50 // bfmmla v16.4s, v2.8h, v7.8h\n"
".inst 0x6e47ec98 // bfmmla v24.4s, v4.8h, v7.8h\n"
"ldr q3, [x10, #0x20]\n"
- ".inst 0x6e46ec0c // bfmmla v12.4s, v0.8h, v6.8h\n"
".inst 0x6e46ec54 // bfmmla v20.4s, v2.8h, v6.8h\n"
".inst 0x6e46ec9c // bfmmla v28.4s, v4.8h, v6.8h\n"
"ldr q1, [x10, #0x30]\n"
@@ -2867,31 +2870,31 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"cmp x28, x20\n"
"bne 191b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 v6.2d, v8.2d, v12.2d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
@@ -2903,9 +2906,9 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"tbz %x[flags], #1, 200f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1r { v1.4s }, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1r { v1.4s }, [x21]\n"
"ld1r { v0.4s }, [x20]\n"
"fmin v6.4s, v6.4s, v1.4s\n"
"fmin v12.4s, v12.4s, v1.4s\n"
@@ -2961,127 +2964,127 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"tbz x11, #3, 204f\n"
"st1 { v6.4s }, [x9], #0x10\n"
"st1 { v12.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v9.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v20.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v17.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v28.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
- "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v9.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v20.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v17.4s }, [x24], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v28.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
+ "st1 { v25.4s }, [x22], #0x10\n"
"tbz x11, #2, 202f\n"
"st1 { v13.4s }, [x9], #0x10\n"
- "st1 { v10.4s }, [x25], #0x10\n"
- "st1 { v21.4s }, [x24], #0x10\n"
- "st1 { v18.4s }, [x23], #0x10\n"
- "st1 { v29.4s }, [x22], #0x10\n"
- "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v10.4s }, [x26], #0x10\n"
+ "st1 { v21.4s }, [x25], #0x10\n"
+ "st1 { v18.4s }, [x24], #0x10\n"
+ "st1 { v29.4s }, [x23], #0x10\n"
+ "st1 { v26.4s }, [x22], #0x10\n"
"tbz x11, #1, 201f\n"
"str d14, [x9], #0x8\n"
- "str d11, [x25], #0x8\n"
- "str d22, [x24], #0x8\n"
- "str d19, [x23], #0x8\n"
- "str d30, [x22], #0x8\n"
- "str d27, [x21], #0x8\n"
+ "str d11, [x26], #0x8\n"
+ "str d22, [x25], #0x8\n"
+ "str d19, [x24], #0x8\n"
+ "str d30, [x23], #0x8\n"
+ "str d27, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v14.s }[2], [x9]\n"
- "st1 { v11.s }[2], [x25]\n"
- "st1 { v22.s }[2], [x24]\n"
- "st1 { v19.s }[2], [x23]\n"
- "st1 { v30.s }[2], [x22]\n"
- "st1 { v27.s }[2], [x21]\n"
+ "st1 { v11.s }[2], [x26]\n"
+ "st1 { v22.s }[2], [x25]\n"
+ "st1 { v19.s }[2], [x24]\n"
+ "st1 { v30.s }[2], [x23]\n"
+ "st1 { v27.s }[2], [x22]\n"
"b 208f\n"
"201:" // Height 6: Partial direct writeback: partial_1_12
"tbz x11, #0, 208f\n"
"str s14, [x9, #0x0]\n"
- "str s11, [x25, #0x0]\n"
- "str s22, [x24, #0x0]\n"
- "str s19, [x23, #0x0]\n"
- "str s30, [x22, #0x0]\n"
- "str s27, [x21, #0x0]\n"
+ "str s11, [x26, #0x0]\n"
+ "str s22, [x25, #0x0]\n"
+ "str s19, [x24, #0x0]\n"
+ "str s30, [x23, #0x0]\n"
+ "str s27, [x22, #0x0]\n"
"b 208f\n"
"202:" // Height 6: Partial direct writeback: partial_2_8
"tbz x11, #1, 203f\n"
"str d13, [x9], #0x8\n"
- "str d10, [x25], #0x8\n"
- "str d21, [x24], #0x8\n"
- "str d18, [x23], #0x8\n"
- "str d29, [x22], #0x8\n"
- "str d26, [x21], #0x8\n"
+ "str d10, [x26], #0x8\n"
+ "str d21, [x25], #0x8\n"
+ "str d18, [x24], #0x8\n"
+ "str d29, [x23], #0x8\n"
+ "str d26, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v13.s }[2], [x9]\n"
- "st1 { v10.s }[2], [x25]\n"
- "st1 { v21.s }[2], [x24]\n"
- "st1 { v18.s }[2], [x23]\n"
- "st1 { v29.s }[2], [x22]\n"
- "st1 { v26.s }[2], [x21]\n"
+ "st1 { v10.s }[2], [x26]\n"
+ "st1 { v21.s }[2], [x25]\n"
+ "st1 { v18.s }[2], [x24]\n"
+ "st1 { v29.s }[2], [x23]\n"
+ "st1 { v26.s }[2], [x22]\n"
"b 208f\n"
"203:" // Height 6: Partial direct writeback: partial_1_8
"tbz x11, #0, 208f\n"
"str s13, [x9, #0x0]\n"
- "str s10, [x25, #0x0]\n"
- "str s21, [x24, #0x0]\n"
- "str s18, [x23, #0x0]\n"
- "str s29, [x22, #0x0]\n"
- "str s26, [x21, #0x0]\n"
+ "str s10, [x26, #0x0]\n"
+ "str s21, [x25, #0x0]\n"
+ "str s18, [x24, #0x0]\n"
+ "str s29, [x23, #0x0]\n"
+ "str s26, [x22, #0x0]\n"
"b 208f\n"
"204:" // Height 6: Partial direct writeback: partial_4_0
"tbz x11, #2, 206f\n"
"st1 { v6.4s }, [x9], #0x10\n"
- "st1 { v8.4s }, [x25], #0x10\n"
- "st1 { v15.4s }, [x24], #0x10\n"
- "st1 { v16.4s }, [x23], #0x10\n"
- "st1 { v23.4s }, [x22], #0x10\n"
- "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v8.4s }, [x26], #0x10\n"
+ "st1 { v15.4s }, [x25], #0x10\n"
+ "st1 { v16.4s }, [x24], #0x10\n"
+ "st1 { v23.4s }, [x23], #0x10\n"
+ "st1 { v24.4s }, [x22], #0x10\n"
"tbz x11, #1, 205f\n"
"str d12, [x9], #0x8\n"
- "str d9, [x25], #0x8\n"
- "str d20, [x24], #0x8\n"
- "str d17, [x23], #0x8\n"
- "str d28, [x22], #0x8\n"
- "str d25, [x21], #0x8\n"
+ "str d9, [x26], #0x8\n"
+ "str d20, [x25], #0x8\n"
+ "str d17, [x24], #0x8\n"
+ "str d28, [x23], #0x8\n"
+ "str d25, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v12.s }[2], [x9]\n"
- "st1 { v9.s }[2], [x25]\n"
- "st1 { v20.s }[2], [x24]\n"
- "st1 { v17.s }[2], [x23]\n"
- "st1 { v28.s }[2], [x22]\n"
- "st1 { v25.s }[2], [x21]\n"
+ "st1 { v9.s }[2], [x26]\n"
+ "st1 { v20.s }[2], [x25]\n"
+ "st1 { v17.s }[2], [x24]\n"
+ "st1 { v28.s }[2], [x23]\n"
+ "st1 { v25.s }[2], [x22]\n"
"b 208f\n"
"205:" // Height 6: Partial direct writeback: partial_1_4
"tbz x11, #0, 208f\n"
"str s12, [x9, #0x0]\n"
- "str s9, [x25, #0x0]\n"
- "str s20, [x24, #0x0]\n"
- "str s17, [x23, #0x0]\n"
- "str s28, [x22, #0x0]\n"
- "str s25, [x21, #0x0]\n"
+ "str s9, [x26, #0x0]\n"
+ "str s20, [x25, #0x0]\n"
+ "str s17, [x24, #0x0]\n"
+ "str s28, [x23, #0x0]\n"
+ "str s25, [x22, #0x0]\n"
"b 208f\n"
"206:" // Height 6: Partial direct writeback: partial_2_0
"tbz x11, #1, 207f\n"
"str d6, [x9], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x11, #0, 208f\n"
"st1 { v6.s }[2], [x9]\n"
- "st1 { v8.s }[2], [x25]\n"
- "st1 { v15.s }[2], [x24]\n"
- "st1 { v16.s }[2], [x23]\n"
- "st1 { v23.s }[2], [x22]\n"
- "st1 { v24.s }[2], [x21]\n"
+ "st1 { v8.s }[2], [x26]\n"
+ "st1 { v15.s }[2], [x25]\n"
+ "st1 { v16.s }[2], [x24]\n"
+ "st1 { v23.s }[2], [x23]\n"
+ "st1 { v24.s }[2], [x22]\n"
"b 208f\n"
"207:" // Height 6: Partial direct writeback: partial_1_0
"str s6, [x9, #0x0]\n"
- "str s8, [x25, #0x0]\n"
- "str s15, [x24, #0x0]\n"
- "str s16, [x23, #0x0]\n"
- "str s23, [x22, #0x0]\n"
- "str s24, [x21, #0x0]\n"
+ "str s8, [x26, #0x0]\n"
+ "str s15, [x25, #0x0]\n"
+ "str s16, [x24, #0x0]\n"
+ "str s23, [x23, #0x0]\n"
+ "str s24, [x22, #0x0]\n"
"208:" // Height 6: Partial direct writeback: Done
"b 210f\n"
"209:" // Height 6: Full writeback
@@ -3090,26 +3093,26 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"str q13, [x9, #0x20]\n"
"str q14, [x9, #0x30]\n"
"add x9, x9, #0x40\n"
- "str q8, [x25, #0x0]\n"
- "str q9, [x25, #0x10]\n"
- "str q10, [x25, #0x20]\n"
- "str q11, [x25, #0x30]\n"
- "str q15, [x24, #0x0]\n"
- "str q20, [x24, #0x10]\n"
- "str q21, [x24, #0x20]\n"
- "str q22, [x24, #0x30]\n"
- "str q16, [x23, #0x0]\n"
- "str q17, [x23, #0x10]\n"
- "str q18, [x23, #0x20]\n"
- "str q19, [x23, #0x30]\n"
- "str q23, [x22, #0x0]\n"
- "str q28, [x22, #0x10]\n"
- "str q29, [x22, #0x20]\n"
- "str q30, [x22, #0x30]\n"
- "str q24, [x21, #0x0]\n"
- "str q25, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
+ "str q8, [x26, #0x0]\n"
+ "str q9, [x26, #0x10]\n"
+ "str q10, [x26, #0x20]\n"
+ "str q11, [x26, #0x30]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q20, [x25, #0x10]\n"
+ "str q21, [x25, #0x20]\n"
+ "str q22, [x25, #0x30]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q17, [x24, #0x10]\n"
+ "str q18, [x24, #0x20]\n"
+ "str q19, [x24, #0x30]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q28, [x23, #0x10]\n"
+ "str q29, [x23, #0x20]\n"
+ "str q30, [x23, #0x30]\n"
+ "str q24, [x22, #0x0]\n"
+ "str q25, [x22, #0x10]\n"
+ "str q26, [x22, #0x20]\n"
+ "str q27, [x22, #0x30]\n"
"210:" // Height 6: Writeback done
"subs x11, x11, #0x10\n"
"bgt 177b\n"
@@ -3125,8 +3128,8 @@ void a64_hybrid_fp32bf16fp32_mmla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"212:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp
index bfc9c7e8f9..becd43516f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return false;
}
- StdTransformsFixed<rhs_operand_type, result_type, 4, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp
index eac0e7167e..4d23660942 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -84,133 +84,133 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 61f\n"
"beq 31f\n"
- "mov x15, %x[col_bias]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v15.16b, #0x1\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x13, %x[output_ptr]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "cbnz x11, 6f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "cbnz x12, 6f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x9, x9, x20\n"
+ "add x10, x10, x20\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x9, %x[input_ptr]\n"
+ "mov x10, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 11f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr d21, [x12, #0x70]\n"
- "ldr x20, [x12, #0x78]\n"
+ "ldr d21, [x14, #0x70]\n"
+ "ldr x20, [x14, #0x78]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr d20, [x12, #0x80]\n"
+ "ldr d20, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr d26, [x12, #0x90]\n"
+ "ldr d26, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr d25, [x12, #0xa0]\n"
+ "ldr d25, [x14, #0xa0]\n"
"mov v21.d[1], x20\n"
- "ldr x20, [x12, #0x88]\n"
+ "ldr x20, [x14, #0x88]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr d24, [x12, #0xb0]\n"
+ "ldr d24, [x14, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr d23, [x12, #0xc0]\n"
+ "ldr d23, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr d22, [x12, #0xd0]\n"
+ "ldr d22, [x14, #0xd0]\n"
".inst 0x4fa0e2b3 // sdot v19.4s, v21.16b, v0.4b[1]\n"
- "ldr d21, [x12, #0xe0]\n"
+ "ldr d21, [x14, #0xe0]\n"
"mov v20.d[1], x20\n"
- "ldr x20, [x12, #0x98]\n"
- "mov v26.d[1], x20\n"
- "ldr x20, [x12, #0xa8]\n"
- "mov v25.d[1], x20\n"
- "ldr x20, [x12, #0xb8]\n"
- "mov v24.d[1], x20\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x22, [x14, #0x98]\n"
+ "add x10, x10, #0x10\n"
+ "ldr x21, [x14, #0xa8]\n"
".inst 0x4f80ea90 // sdot v16.4s, v20.16b, v0.4b[2]\n"
- "ldr d20, [x12, #0xf0]\n"
+ "ldr d20, [x14, #0xf0]\n"
+ "ldr x20, [x14, #0xb8]\n"
+ "mov v26.d[1], x22\n"
+ "mov v25.d[1], x21\n"
+ "ldr x23, [x14, #0xc8]\n"
+ "ldr x22, [x14, #0xd8]\n"
".inst 0x4f80eb51 // sdot v17.4s, v26.16b, v0.4b[2]\n"
- "ldr x22, [x12, #0xd8]\n"
+ "mov v24.d[1], x20\n"
+ "ldr x21, [x14, #0xe8]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x4f80eb32 // sdot v18.4s, v25.16b, v0.4b[2]\n"
- "ldr x21, [x12, #0xe8]\n"
".inst 0x4f80eb13 // sdot v19.4s, v24.16b, v0.4b[2]\n"
- "ldr x20, [x12, #0xf8]\n"
"mov v23.d[1], x23\n"
"mov v22.d[1], x22\n"
- "add x9, x9, #0x10\n"
+ "add x14, x14, #0x100\n"
"mov v21.d[1], x21\n"
- "add x12, x12, #0x100\n"
- "mov v20.d[1], x20\n"
".inst 0x4fa0eaf0 // sdot v16.4s, v23.16b, v0.4b[3]\n"
+ "mov v20.d[1], x20\n"
".inst 0x4fa0ead1 // sdot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x4fa0eab2 // sdot v18.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa0ea93 // sdot v19.4s, v20.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 8f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q4, [x12, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q4, [x14, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr q21, [x12, #0x70]\n"
+ "ldr q21, [x14, #0x70]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x80]\n"
+ "ldr q20, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr q26, [x12, #0x90]\n"
+ "ldr q26, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr q25, [x12, #0xa0]\n"
+ "ldr q25, [x14, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr q24, [x12, #0xb0]\n"
+ "ldr q24, [x14, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr q23, [x12, #0xc0]\n"
+ "ldr q23, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr q22, [x12, #0xd0]\n"
+ "ldr q22, [x14, #0xd0]\n"
".inst 0x4fa0e2b3 // sdot v19.4s, v21.16b, v0.4b[1]\n"
- "ldr q21, [x12, #0xe0]\n"
+ "ldr q21, [x14, #0xe0]\n"
".inst 0x4f80ea90 // sdot v16.4s, v20.16b, v0.4b[2]\n"
- "ldr q20, [x12, #0xf0]\n"
+ "ldr q20, [x14, #0xf0]\n"
".inst 0x4f80eb51 // sdot v17.4s, v26.16b, v0.4b[2]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x4f80eb32 // sdot v18.4s, v25.16b, v0.4b[2]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f80eb13 // sdot v19.4s, v24.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x4fa0eaf0 // sdot v16.4s, v23.16b, v0.4b[3]\n"
".inst 0x4fa0ead1 // sdot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x4fa0eab2 // sdot v18.4s, v21.16b, v0.4b[3]\n"
@@ -218,54 +218,54 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"tbnz %x[flags], #31, 10f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x10, 18f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 18f\n"
+ "cmp x11, #0x4\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
- "ldr q20, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q22, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q21, [x12, #0x20]\n"
- ".inst 0x4f80e290 // sdot v16.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x30]\n"
+ "ldr q23, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q22, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q21, [x14, #0x20]\n"
+ "ldr q20, [x14, #0x30]\n"
+ ".inst 0x4f80e2f0 // sdot v16.4s, v23.16b, v0.4b[0]\n"
".inst 0x4f80e2d1 // sdot v17.4s, v22.16b, v0.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x4f80e2b2 // sdot v18.4s, v21.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x4f80e293 // sdot v19.4s, v20.16b, v0.4b[0]\n"
"bge 12b\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x10, 18f\n"
- "tbz x10, #1, 15f\n"
- "ldr h0, [x9], #0x2\n"
- "tbz x10, #0, 16f\n"
- "ld1 { v0.b }[2], [x9]\n"
+ "cbz x11, 18f\n"
+ "tbz x11, #1, 15f\n"
+ "ldr h0, [x10], #0x2\n"
+ "tbz x11, #0, 16f\n"
+ "ld1 { v0.b }[2], [x10]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 17f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q20, [x12, #0x0]\n"
- ".inst 0x4f80e290 // sdot v16.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x10]\n"
- ".inst 0x4f80e291 // sdot v17.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x20]\n"
- ".inst 0x4f80e292 // sdot v18.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x30]\n"
+ "ldr q23, [x14, #0x0]\n"
+ "ldr q22, [x14, #0x10]\n"
+ "ldr q21, [x14, #0x20]\n"
+ "ldr q20, [x14, #0x30]\n"
+ ".inst 0x4f80e2f0 // sdot v16.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x4f80e2d1 // sdot v17.4s, v22.16b, v0.4b[0]\n"
+ "add x14, x14, #0x40\n"
+ ".inst 0x4f80e2b2 // sdot v18.4s, v21.16b, v0.4b[0]\n"
".inst 0x4f80e293 // sdot v19.4s, v20.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 4b\n"
"prfm pstl1keep, [x13, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
@@ -276,28 +276,28 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"addp v11.4s, v11.4s, v11.4s\n"
"mul v11.4s, v11.4s, v20.4s\n"
"19:" // Height 1: skip row sum fixup
- "ldr q23, [x15, #0x0]\n"
+ "ldr q24, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q22, [x15, #0x10]\n"
+ "ldr q23, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q21, [x15, #0x20]\n"
+ "ldr q22, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q20, [x15, #0x30]\n"
+ "ldr q21, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
- "add v16.4s, v16.4s, v23.4s\n"
- "add v17.4s, v17.4s, v22.4s\n"
- "add v18.4s, v18.4s, v21.4s\n"
- "add v19.4s, v19.4s, v20.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v20.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v24.4s\n"
+ "add v17.4s, v17.4s, v23.4s\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v21.4s\n"
+ "add x16, x16, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v20.4s\n"
"sqrdmulh v17.4s, v17.4s, v20.4s\n"
"sqrdmulh v18.4s, v18.4s, v20.4s\n"
"sqrdmulh v19.4s, v19.4s, v20.4s\n"
- "add x15, x15, #0x40\n"
"tbz %x[flags], #5, 20f\n"
"and v23.16b, v16.16b, v0.16b\n"
"and v22.16b, v17.16b, v0.16b\n"
@@ -317,67 +317,67 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v20.4s }, [x20]\n"
- "add v16.4s, v16.4s, v20.4s\n"
- "add v17.4s, v17.4s, v20.4s\n"
- "add v18.4s, v18.4s, v20.4s\n"
- "add v19.4s, v19.4s, v20.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v20.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v20.4s\n"
- "smin v17.4s, v17.4s, v20.4s\n"
- "smin v18.4s, v18.4s, v20.4s\n"
- "smin v19.4s, v19.4s, v20.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v21.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v22.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v22.4s\n"
+ "smin v16.4s, v16.4s, v21.4s\n"
+ "smin v17.4s, v17.4s, v21.4s\n"
+ "smin v18.4s, v18.4s, v21.4s\n"
+ "smin v19.4s, v19.4s, v21.4s\n"
"smax v16.4s, v16.4s, v20.4s\n"
"smax v17.4s, v17.4s, v20.4s\n"
"smax v18.4s, v18.4s, v20.4s\n"
"smax v19.4s, v19.4s, v20.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 29f\n"
- "tbz x14, #3, 24f\n"
+ "tbz x15, #3, 24f\n"
"str d16, [x13], #0x8\n"
- "tbz x14, #2, 22f\n"
+ "tbz x15, #2, 22f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "tbz x14, #1, 21f\n"
+ "tbz x15, #1, 21f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[14], [x13]\n"
"b 28f\n"
"21:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[12], [x13]\n"
"b 28f\n"
"22:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x14, #1, 23f\n"
+ "tbz x15, #1, 23f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[10], [x13]\n"
"b 28f\n"
"23:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[8], [x13]\n"
"b 28f\n"
"24:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x14, #2, 26f\n"
+ "tbz x15, #2, 26f\n"
"str s16, [x13], #0x4\n"
- "tbz x14, #1, 25f\n"
+ "tbz x15, #1, 25f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[6], [x13]\n"
"b 28f\n"
"25:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[4], [x13]\n"
"b 28f\n"
"26:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x14, #1, 27f\n"
+ "tbz x15, #1, 27f\n"
"str h16, [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[2], [x13]\n"
"b 28f\n"
"27:" // Height 1: Partial direct writeback: partial_1_0
@@ -388,18 +388,18 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
"30:" // Height 1: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 2b\n"
"b 122f\n"
"31:" // Height 2
- "mov x15, %x[col_bias]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v15.16b, #0x1\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -410,80 +410,80 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"33:" // Height 2: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "ldr x28, [x20, #0x8]\n"
- "cbnz x11, 36f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x9, [x20, #0x8]\n"
+ "cbnz x12, 36f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x20\n"
"add x9, x9, x20\n"
- "add x28, x28, x20\n"
"b 36f\n"
"35:" // Height 2: setup direct input
- "mov x9, %x[input_ptr]\n"
- "add x28, x9, x21\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x9, x10, x21\n"
"36:" // Height 2: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 41f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q1, [x28, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q1, [x9, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 39f\n"
"37:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x20, [x12, #0x78]\n"
+ "ldr x20, [x14, #0x78]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr d25, [x12, #0x70]\n"
+ "ldr d25, [x14, #0x70]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v25.d[1], x20\n"
+ "ldr x23, [x14, #0x88]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr d24, [x12, #0x80]\n"
+ "ldr d24, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x23, [x12, #0x88]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr d30, [x12, #0x90]\n"
+ "ldr d30, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr x22, [x12, #0x98]\n"
+ "ldr x22, [x14, #0x98]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr d29, [x12, #0xa0]\n"
- "ldr x21, [x12, #0xa8]\n"
+ "ldr d29, [x14, #0xa0]\n"
+ "ldr x21, [x14, #0xa8]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr d28, [x12, #0xb0]\n"
- "ldr x20, [x12, #0xb8]\n"
+ "ldr d28, [x14, #0xb0]\n"
+ "ldr x20, [x14, #0xb8]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr d27, [x12, #0xc0]\n"
+ "ldr d27, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
"mov v24.d[1], x23\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr d26, [x12, #0xd0]\n"
+ "ldr d26, [x14, #0xd0]\n"
".inst 0x4fa0e333 // sdot v19.4s, v25.16b, v0.4b[1]\n"
"mov v30.d[1], x22\n"
".inst 0x4fa1e337 // sdot v23.4s, v25.16b, v1.4b[1]\n"
- "ldr d25, [x12, #0xe0]\n"
+ "ldr d25, [x14, #0xe0]\n"
"mov v29.d[1], x21\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x23, [x14, #0xc8]\n"
"mov v28.d[1], x20\n"
- "ldr x22, [x12, #0xd8]\n"
- "ldr x21, [x12, #0xe8]\n"
+ "ldr x22, [x14, #0xd8]\n"
+ "ldr x21, [x14, #0xe8]\n"
".inst 0x4f80eb10 // sdot v16.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb14 // sdot v20.4s, v24.16b, v1.4b[2]\n"
- "ldr d24, [x12, #0xf0]\n"
- "ldr x20, [x12, #0xf8]\n"
+ "ldr d24, [x14, #0xf0]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x4f80ebd1 // sdot v17.4s, v30.16b, v0.4b[2]\n"
".inst 0x4f81ebd5 // sdot v21.4s, v30.16b, v1.4b[2]\n"
"mov v27.d[1], x23\n"
@@ -494,9 +494,9 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f80eb93 // sdot v19.4s, v28.16b, v0.4b[2]\n"
"mov v24.d[1], x20\n"
".inst 0x4f81eb97 // sdot v23.4s, v28.16b, v1.4b[2]\n"
+ "add x10, x10, #0x10\n"
"add x9, x9, #0x10\n"
- "add x28, x28, #0x10\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x4fa0eb70 // sdot v16.4s, v27.16b, v0.4b[3]\n"
".inst 0x4fa1eb74 // sdot v20.4s, v27.16b, v1.4b[3]\n"
".inst 0x4fa0eb51 // sdot v17.4s, v26.16b, v0.4b[3]\n"
@@ -509,53 +509,53 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"38:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q1, [x28, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q1, [x9, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"bge 37b\n"
"39:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q25, [x12, #0x70]\n"
+ "ldr q25, [x14, #0x70]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q24, [x12, #0x80]\n"
+ "ldr q24, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q30, [x12, #0x90]\n"
+ "ldr q30, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q29, [x12, #0xa0]\n"
+ "ldr q29, [x14, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q28, [x12, #0xb0]\n"
+ "ldr q28, [x14, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr q27, [x12, #0xc0]\n"
+ "ldr q27, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr q26, [x12, #0xd0]\n"
+ "ldr q26, [x14, #0xd0]\n"
".inst 0x4fa0e333 // sdot v19.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e337 // sdot v23.4s, v25.16b, v1.4b[1]\n"
- "ldr q25, [x12, #0xe0]\n"
+ "ldr q25, [x14, #0xe0]\n"
".inst 0x4f80eb10 // sdot v16.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb14 // sdot v20.4s, v24.16b, v1.4b[2]\n"
- "ldr q24, [x12, #0xf0]\n"
+ "ldr q24, [x14, #0xf0]\n"
".inst 0x4f80ebd1 // sdot v17.4s, v30.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x4f81ebd5 // sdot v21.4s, v30.16b, v1.4b[2]\n"
".inst 0x4f80ebb2 // sdot v18.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebb6 // sdot v22.4s, v29.16b, v1.4b[2]\n"
@@ -573,29 +573,29 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 6: skip row sum
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"41:" // Height 2: Multiply loop: Main loop skip
- "cbz x10, 48f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 48f\n"
+ "cmp x11, #0x4\n"
"blt 44f\n"
"42:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
- "ldr s1, [x28], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "ldr s1, [x9], #0x4\n"
"tbnz %x[flags], #31, 43f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"43:" // Height 2: Multiply loop: unique 7: skip row sum
- "ldr q27, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q26, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q25, [x12, #0x20]\n"
+ "ldr q27, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q26, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q25, [x14, #0x20]\n"
+ "ldr q24, [x14, #0x30]\n"
".inst 0x4f80e370 // sdot v16.4s, v27.16b, v0.4b[0]\n"
- "ldr q24, [x12, #0x30]\n"
".inst 0x4f81e374 // sdot v20.4s, v27.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x4f80e351 // sdot v17.4s, v26.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x4f81e355 // sdot v21.4s, v26.16b, v1.4b[0]\n"
".inst 0x4f80e332 // sdot v18.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e336 // sdot v22.4s, v25.16b, v1.4b[0]\n"
@@ -603,44 +603,44 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f81e317 // sdot v23.4s, v24.16b, v1.4b[0]\n"
"bge 42b\n"
"44:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x10, 48f\n"
- "tbz x10, #1, 45f\n"
- "ldr h0, [x9], #0x2\n"
- "ldr h1, [x28], #0x2\n"
- "tbz x10, #0, 46f\n"
- "ld1 { v0.b }[2], [x9]\n"
- "ld1 { v1.b }[2], [x28]\n"
+ "cbz x11, 48f\n"
+ "tbz x11, #1, 45f\n"
+ "ldr h0, [x10], #0x2\n"
+ "ldr h1, [x9], #0x2\n"
+ "tbz x11, #0, 46f\n"
+ "ld1 { v0.b }[2], [x10]\n"
+ "ld1 { v1.b }[2], [x9]\n"
"b 46f\n"
"45:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
- "ldr b1, [x28, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
+ "ldr b1, [x9, #0x0]\n"
"46:" // Height 2: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 47f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q24, [x12, #0x0]\n"
- ".inst 0x4f80e310 // sdot v16.4s, v24.16b, v0.4b[0]\n"
- "ldr q26, [x12, #0x10]\n"
- ".inst 0x4f81e314 // sdot v20.4s, v24.16b, v1.4b[0]\n"
- "ldr q25, [x12, #0x20]\n"
+ "ldr q27, [x14, #0x0]\n"
+ "ldr q26, [x14, #0x10]\n"
+ "ldr q25, [x14, #0x20]\n"
+ "ldr q24, [x14, #0x30]\n"
+ ".inst 0x4f80e370 // sdot v16.4s, v27.16b, v0.4b[0]\n"
+ ".inst 0x4f81e374 // sdot v20.4s, v27.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x4f80e351 // sdot v17.4s, v26.16b, v0.4b[0]\n"
- "ldr q24, [x12, #0x30]\n"
".inst 0x4f81e355 // sdot v21.4s, v26.16b, v1.4b[0]\n"
".inst 0x4f80e332 // sdot v18.4s, v25.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x4f81e336 // sdot v22.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f80e313 // sdot v19.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e317 // sdot v23.4s, v24.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 34b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20\n"
"prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add x24, x13, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -652,28 +652,28 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"mul v11.4s, v11.4s, v24.4s\n"
"mul v12.4s, v12.4s, v24.4s\n"
"49:" // Height 2: skip row sum fixup
- "ldr q27, [x15, #0x0]\n"
+ "ldr q28, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q26, [x15, #0x10]\n"
+ "ldr q27, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q25, [x15, #0x20]\n"
+ "ldr q26, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q24, [x15, #0x30]\n"
+ "ldr q25, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "add v16.4s, v16.4s, v27.4s\n"
- "add v17.4s, v17.4s, v26.4s\n"
- "add v18.4s, v18.4s, v25.4s\n"
- "add v19.4s, v19.4s, v24.4s\n"
- "add v20.4s, v20.4s, v27.4s\n"
- "add v21.4s, v21.4s, v26.4s\n"
- "add v22.4s, v22.4s, v25.4s\n"
- "add v23.4s, v23.4s, v24.4s\n"
+ "add v16.4s, v16.4s, v28.4s\n"
+ "add v17.4s, v17.4s, v27.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
"ld1r { v24.4s }, [x20]\n"
+ "add v19.4s, v19.4s, v25.4s\n"
+ "add v20.4s, v20.4s, v28.4s\n"
+ "add v21.4s, v21.4s, v27.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v25.4s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
@@ -685,31 +685,31 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"sqrdmulh v21.4s, v21.4s, v24.4s\n"
"sqrdmulh v22.4s, v22.4s, v24.4s\n"
"sqrdmulh v23.4s, v23.4s, v24.4s\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 50f\n"
"and v24.16b, v16.16b, v0.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v24.4s\n"
"and v30.16b, v17.16b, v0.16b\n"
"and v29.16b, v18.16b, v0.16b\n"
"and v28.16b, v19.16b, v0.16b\n"
"and v27.16b, v20.16b, v0.16b\n"
"and v26.16b, v21.16b, v0.16b\n"
"and v25.16b, v22.16b, v0.16b\n"
- "and v24.16b, v23.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v27.4s, v27.4s, #0x1f\n"
"sshr v26.4s, v26.4s, #0x1f\n"
"sshr v25.4s, v25.4s, #0x1f\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v24.4s\n"
+ "and v24.16b, v23.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v30.4s\n"
"sqadd v18.4s, v18.4s, v29.4s\n"
"sqadd v19.4s, v19.4s, v28.4s\n"
"sqadd v20.4s, v20.4s, v27.4s\n"
"sqadd v21.4s, v21.4s, v26.4s\n"
"sqadd v22.4s, v22.4s, v25.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v24.4s\n"
"50:" // Height 2: no shift correction
"srshl v16.4s, v16.4s, v0.4s\n"
@@ -721,27 +721,28 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v24.4s }, [x20]\n"
- "add v16.4s, v16.4s, v24.4s\n"
- "add v17.4s, v17.4s, v24.4s\n"
- "add v18.4s, v18.4s, v24.4s\n"
- "add v19.4s, v19.4s, v24.4s\n"
- "add v20.4s, v20.4s, v24.4s\n"
- "add v21.4s, v21.4s, v24.4s\n"
- "add v22.4s, v22.4s, v24.4s\n"
- "add v23.4s, v23.4s, v24.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v24.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v24.4s\n"
- "smin v17.4s, v17.4s, v24.4s\n"
- "smin v18.4s, v18.4s, v24.4s\n"
- "smin v19.4s, v19.4s, v24.4s\n"
- "smin v20.4s, v20.4s, v24.4s\n"
- "smin v21.4s, v21.4s, v24.4s\n"
- "smin v22.4s, v22.4s, v24.4s\n"
- "smin v23.4s, v23.4s, v24.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v25.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v26.4s\n"
+ "add v17.4s, v17.4s, v26.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v26.4s\n"
+ "add v20.4s, v20.4s, v26.4s\n"
+ "add v21.4s, v21.4s, v26.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v26.4s\n"
+ "smin v16.4s, v16.4s, v25.4s\n"
+ "smin v17.4s, v17.4s, v25.4s\n"
+ "smin v18.4s, v18.4s, v25.4s\n"
+ "smin v19.4s, v19.4s, v25.4s\n"
+ "smin v20.4s, v20.4s, v25.4s\n"
+ "smin v21.4s, v21.4s, v25.4s\n"
+ "smin v22.4s, v22.4s, v25.4s\n"
+ "smin v23.4s, v23.4s, v25.4s\n"
"smax v16.4s, v16.4s, v24.4s\n"
"smax v17.4s, v17.4s, v24.4s\n"
"smax v18.4s, v18.4s, v24.4s\n"
@@ -754,88 +755,87 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"uzp1 v18.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v17.8h, v22.8h, v23.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v18.16b\n"
"uzp1 v20.16b, v20.16b, v17.16b\n"
"bge 59f\n"
- "tbz x14, #3, 54f\n"
+ "tbz x15, #3, 54f\n"
"str d16, [x13], #0x8\n"
- "str d20, [x23], #0x8\n"
- "tbz x14, #2, 52f\n"
+ "str d20, [x24], #0x8\n"
+ "tbz x15, #2, 52f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "tbz x14, #1, 51f\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "tbz x15, #1, 51f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[14], [x13]\n"
- "st1 { v20.b }[14], [x23]\n"
+ "st1 { v20.b }[14], [x24]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x14, #0, 58f\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[12], [x13]\n"
- "st1 { v20.b }[12], [x23]\n"
+ "st1 { v20.b }[12], [x24]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x14, #1, 53f\n"
+ "tbz x15, #1, 53f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[10], [x13]\n"
- "st1 { v20.b }[10], [x23]\n"
+ "st1 { v20.b }[10], [x24]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x14, #0, 58f\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[8], [x13]\n"
- "st1 { v20.b }[8], [x23]\n"
+ "st1 { v20.b }[8], [x24]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x14, #2, 56f\n"
+ "tbz x15, #2, 56f\n"
"str s16, [x13], #0x4\n"
- "str s20, [x23], #0x4\n"
- "tbz x14, #1, 55f\n"
+ "str s20, [x24], #0x4\n"
+ "tbz x15, #1, 55f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[6], [x13]\n"
- "st1 { v20.b }[6], [x23]\n"
+ "st1 { v20.b }[6], [x24]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x14, #0, 58f\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[4], [x13]\n"
- "st1 { v20.b }[4], [x23]\n"
+ "st1 { v20.b }[4], [x24]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x14, #1, 57f\n"
+ "tbz x15, #1, 57f\n"
"str h16, [x13], #0x2\n"
- "str h20, [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "str h20, [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[2], [x13]\n"
- "st1 { v20.b }[2], [x23]\n"
+ "st1 { v20.b }[2], [x24]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
"str b16, [x13, #0x0]\n"
- "str b20, [x23, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
- "str q20, [x23, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
"60:" // Height 2: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 32b\n"
"b 122f\n"
"61:" // Height 3
- "mov x15, %x[col_bias]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v13.4s, #0x0\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
"movi v15.16b, #0x1\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -850,105 +850,105 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"63:" // Height 3: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "ldr x28, [x20, #0x8]\n"
- "ldr x27, [x20, #0x10]\n"
- "cbnz x11, 66f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x9, [x20, #0x8]\n"
+ "ldr x28, [x20, #0x10]\n"
+ "cbnz x12, 66f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x20\n"
"add x9, x9, x20\n"
"add x28, x28, x20\n"
- "add x27, x27, x20\n"
"b 66f\n"
"65:" // Height 3: setup direct input
- "mov x9, %x[input_ptr]\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x9, x10, x21\n"
"add x28, x9, x21\n"
- "add x27, x28, x21\n"
"66:" // Height 3: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 71f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q1, [x28, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q1, [x9, #0x0]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 69f\n"
"67:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x20, [x12, #0x78]\n"
+ "ldr x20, [x14, #0x78]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x23, [x12, #0x88]\n"
+ "ldr x23, [x14, #0x88]\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr d29, [x12, #0x70]\n"
+ "ldr d29, [x14, #0x70]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v29.d[1], x20\n"
+ "ldr x22, [x14, #0x98]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x22, [x12, #0x98]\n"
+ "ldr x21, [x14, #0xa8]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr d28, [x12, #0x80]\n"
+ "ldr d28, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x12, #0xa8]\n"
+ "mov v29.d[1], x20\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x12, #0xb8]\n"
+ "ldr x20, [x14, #0xb8]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr d5, [x12, #0x90]\n"
+ "ldr d5, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
"mov v28.d[1], x23\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "mov v5.d[1], x22\n"
+ "ldr x23, [x14, #0xc8]\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr d4, [x12, #0xa0]\n"
+ "ldr d4, [x14, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v4.d[1], x21\n"
+ "mov v5.d[1], x22\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x22, [x14, #0xd8]\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr d3, [x12, #0xb0]\n"
+ "ldr d3, [x14, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v3.d[1], x20\n"
+ "mov v4.d[1], x21\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr x22, [x12, #0xd8]\n"
+ "ldr x21, [x14, #0xe8]\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr d31, [x12, #0xc0]\n"
+ "ldr d31, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr x21, [x12, #0xe8]\n"
+ "mov v3.d[1], x20\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr x20, [x12, #0xf8]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr d30, [x12, #0xd0]\n"
+ "ldr d30, [x14, #0xd0]\n"
".inst 0x4fa0e3b3 // sdot v19.4s, v29.16b, v0.4b[1]\n"
"mov v31.d[1], x23\n"
".inst 0x4fa1e3b7 // sdot v23.4s, v29.16b, v1.4b[1]\n"
- "mov v30.d[1], x22\n"
+ "add x10, x10, #0x10\n"
".inst 0x4fa2e3bb // sdot v27.4s, v29.16b, v2.4b[1]\n"
- "ldr d29, [x12, #0xe0]\n"
+ "ldr d29, [x14, #0xe0]\n"
".inst 0x4f80eb90 // sdot v16.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
+ "mov v30.d[1], x22\n"
".inst 0x4f81eb94 // sdot v20.4s, v28.16b, v1.4b[2]\n"
"add x9, x9, #0x10\n"
".inst 0x4f82eb98 // sdot v24.4s, v28.16b, v2.4b[2]\n"
- "ldr d28, [x12, #0xf0]\n"
+ "ldr d28, [x14, #0xf0]\n"
".inst 0x4f80e8b1 // sdot v17.4s, v5.16b, v0.4b[2]\n"
- "mov v28.d[1], x20\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f81e8b5 // sdot v21.4s, v5.16b, v1.4b[2]\n"
"add x28, x28, #0x10\n"
".inst 0x4f82e8b9 // sdot v25.4s, v5.16b, v2.4b[2]\n"
- "add x27, x27, #0x10\n"
+ "mov v28.d[1], x20\n"
".inst 0x4f80e892 // sdot v18.4s, v4.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x4f81e896 // sdot v22.4s, v4.16b, v1.4b[2]\n"
".inst 0x4f82e89a // sdot v26.4s, v4.16b, v2.4b[2]\n"
".inst 0x4f80e873 // sdot v19.4s, v3.16b, v0.4b[2]\n"
@@ -971,65 +971,65 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"68:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q1, [x28, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q1, [x9, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"bge 67b\n"
"69:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q29, [x12, #0x70]\n"
+ "ldr q29, [x14, #0x70]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q28, [x12, #0x80]\n"
+ "ldr q28, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr q5, [x12, #0x90]\n"
+ "ldr q5, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr q4, [x12, #0xa0]\n"
+ "ldr q4, [x14, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr q3, [x12, #0xb0]\n"
+ "ldr q3, [x14, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr q31, [x12, #0xc0]\n"
+ "ldr q31, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr q30, [x12, #0xd0]\n"
+ "ldr q30, [x14, #0xd0]\n"
".inst 0x4fa0e3b3 // sdot v19.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3b7 // sdot v23.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3bb // sdot v27.4s, v29.16b, v2.4b[1]\n"
- "ldr q29, [x12, #0xe0]\n"
+ "ldr q29, [x14, #0xe0]\n"
".inst 0x4f80eb90 // sdot v16.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb94 // sdot v20.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb98 // sdot v24.4s, v28.16b, v2.4b[2]\n"
- "ldr q28, [x12, #0xf0]\n"
+ "ldr q28, [x14, #0xf0]\n"
".inst 0x4f80e8b1 // sdot v17.4s, v5.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x4f81e8b5 // sdot v21.4s, v5.16b, v1.4b[2]\n"
".inst 0x4f82e8b9 // sdot v25.4s, v5.16b, v2.4b[2]\n"
".inst 0x4f80e892 // sdot v18.4s, v4.16b, v0.4b[2]\n"
@@ -1055,32 +1055,32 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"70:" // Height 3: Multiply loop: unique 10: skip row sum
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"71:" // Height 3: Multiply loop: Main loop skip
- "cbz x10, 78f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 78f\n"
+ "cmp x11, #0x4\n"
"blt 74f\n"
"72:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
- "ldr s1, [x28], #0x4\n"
- "ldr s2, [x27], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "ldr s1, [x9], #0x4\n"
+ "ldr s2, [x28], #0x4\n"
"tbnz %x[flags], #31, 73f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"73:" // Height 3: Multiply loop: unique 11: skip row sum
- "ldr q31, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q30, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q29, [x12, #0x20]\n"
+ "ldr q31, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q30, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q29, [x14, #0x20]\n"
+ "ldr q28, [x14, #0x30]\n"
".inst 0x4f80e3f0 // sdot v16.4s, v31.16b, v0.4b[0]\n"
- "ldr q28, [x12, #0x30]\n"
".inst 0x4f81e3f4 // sdot v20.4s, v31.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x4f82e3f8 // sdot v24.4s, v31.16b, v2.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x4f80e3d1 // sdot v17.4s, v30.16b, v0.4b[0]\n"
".inst 0x4f81e3d5 // sdot v21.4s, v30.16b, v1.4b[0]\n"
".inst 0x4f82e3d9 // sdot v25.4s, v30.16b, v2.4b[0]\n"
@@ -1092,36 +1092,36 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f82e39b // sdot v27.4s, v28.16b, v2.4b[0]\n"
"bge 72b\n"
"74:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x10, 78f\n"
- "tbz x10, #1, 75f\n"
- "ldr h0, [x9], #0x2\n"
- "ldr h1, [x28], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "tbz x10, #0, 76f\n"
- "ld1 { v0.b }[2], [x9]\n"
- "ld1 { v1.b }[2], [x28]\n"
- "ld1 { v2.b }[2], [x27]\n"
+ "cbz x11, 78f\n"
+ "tbz x11, #1, 75f\n"
+ "ldr h0, [x10], #0x2\n"
+ "ldr h1, [x9], #0x2\n"
+ "ldr h2, [x28], #0x2\n"
+ "tbz x11, #0, 76f\n"
+ "ld1 { v0.b }[2], [x10]\n"
+ "ld1 { v1.b }[2], [x9]\n"
+ "ld1 { v2.b }[2], [x28]\n"
"b 76f\n"
"75:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
- "ldr b1, [x28, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
+ "ldr b1, [x9, #0x0]\n"
+ "ldr b2, [x28, #0x0]\n"
"76:" // Height 3: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 77f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 12: skip row sum
- "ldr q28, [x12, #0x0]\n"
- ".inst 0x4f80e390 // sdot v16.4s, v28.16b, v0.4b[0]\n"
- "ldr q30, [x12, #0x10]\n"
- ".inst 0x4f81e394 // sdot v20.4s, v28.16b, v1.4b[0]\n"
- "ldr q29, [x12, #0x20]\n"
- ".inst 0x4f82e398 // sdot v24.4s, v28.16b, v2.4b[0]\n"
- "ldr q28, [x12, #0x30]\n"
+ "ldr q31, [x14, #0x0]\n"
+ "ldr q30, [x14, #0x10]\n"
+ "ldr q29, [x14, #0x20]\n"
+ "ldr q28, [x14, #0x30]\n"
+ ".inst 0x4f80e3f0 // sdot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x4f81e3f4 // sdot v20.4s, v31.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
+ ".inst 0x4f82e3f8 // sdot v24.4s, v31.16b, v2.4b[0]\n"
".inst 0x4f80e3d1 // sdot v17.4s, v30.16b, v0.4b[0]\n"
".inst 0x4f81e3d5 // sdot v21.4s, v30.16b, v1.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x4f82e3d9 // sdot v25.4s, v30.16b, v2.4b[0]\n"
".inst 0x4f80e3b2 // sdot v18.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f81e3b6 // sdot v22.4s, v29.16b, v1.4b[0]\n"
@@ -1131,15 +1131,15 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f82e39b // sdot v27.4s, v28.16b, v2.4b[0]\n"
"78:" // Height 3: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 64b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20\n"
- "add x22, x23, x20\n"
"prfm pstl1keep, [x13, #0x0]\n"
+ "add x24, x13, x20\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -1154,13 +1154,13 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"mul v12.4s, v12.4s, v28.4s\n"
"mul v13.4s, v13.4s, v28.4s\n"
"79:" // Height 3: skip row sum fixup
- "ldr q31, [x15, #0x0]\n"
+ "ldr q31, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q30, [x15, #0x10]\n"
+ "ldr q30, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q29, [x15, #0x20]\n"
+ "ldr q29, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q28, [x15, #0x30]\n"
+ "ldr q28, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1183,10 +1183,11 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"add v26.4s, v26.4s, v29.4s\n"
"add v27.4s, v27.4s, v28.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v28.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v28.4s }, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
+ "add x16, x16, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v28.4s\n"
"sqrdmulh v17.4s, v17.4s, v28.4s\n"
"sqrdmulh v18.4s, v18.4s, v28.4s\n"
@@ -1199,39 +1200,38 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"sqrdmulh v25.4s, v25.4s, v28.4s\n"
"sqrdmulh v26.4s, v26.4s, v28.4s\n"
"sqrdmulh v27.4s, v27.4s, v28.4s\n"
- "add x15, x15, #0x40\n"
"tbz %x[flags], #5, 80f\n"
"and v1.16b, v16.16b, v0.16b\n"
"and v31.16b, v17.16b, v0.16b\n"
"and v30.16b, v18.16b, v0.16b\n"
"and v29.16b, v19.16b, v0.16b\n"
"and v28.16b, v20.16b, v0.16b\n"
+ "and v3.16b, v21.16b, v0.16b\n"
+ "and v2.16b, v22.16b, v0.16b\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v31.4s, v31.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v1.4s\n"
"sqadd v17.4s, v17.4s, v31.4s\n"
"sqadd v18.4s, v18.4s, v30.4s\n"
"sqadd v19.4s, v19.4s, v29.4s\n"
"sqadd v20.4s, v20.4s, v28.4s\n"
- "and v3.16b, v21.16b, v0.16b\n"
- "and v2.16b, v22.16b, v0.16b\n"
"and v1.16b, v23.16b, v0.16b\n"
"and v31.16b, v24.16b, v0.16b\n"
"and v30.16b, v25.16b, v0.16b\n"
"and v29.16b, v26.16b, v0.16b\n"
"and v28.16b, v27.16b, v0.16b\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v21.4s, v21.4s, v3.4s\n"
+ "sqadd v22.4s, v22.4s, v2.4s\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v31.4s, v31.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v3.4s\n"
- "sqadd v22.4s, v22.4s, v2.4s\n"
"sqadd v23.4s, v23.4s, v1.4s\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"sqadd v25.4s, v25.4s, v30.4s\n"
@@ -1251,35 +1251,36 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v26.4s, v26.4s, v0.4s\n"
"srshl v27.4s, v27.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v28.4s }, [x20]\n"
- "add v16.4s, v16.4s, v28.4s\n"
- "add v17.4s, v17.4s, v28.4s\n"
- "add v18.4s, v18.4s, v28.4s\n"
- "add v19.4s, v19.4s, v28.4s\n"
- "add v20.4s, v20.4s, v28.4s\n"
- "add v21.4s, v21.4s, v28.4s\n"
- "add v22.4s, v22.4s, v28.4s\n"
- "add v23.4s, v23.4s, v28.4s\n"
- "add v24.4s, v24.4s, v28.4s\n"
- "add v25.4s, v25.4s, v28.4s\n"
- "add v26.4s, v26.4s, v28.4s\n"
- "add v27.4s, v27.4s, v28.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v28.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v28.4s\n"
- "smin v17.4s, v17.4s, v28.4s\n"
- "smin v18.4s, v18.4s, v28.4s\n"
- "smin v19.4s, v19.4s, v28.4s\n"
- "smin v20.4s, v20.4s, v28.4s\n"
- "smin v21.4s, v21.4s, v28.4s\n"
- "smin v22.4s, v22.4s, v28.4s\n"
- "smin v23.4s, v23.4s, v28.4s\n"
- "smin v24.4s, v24.4s, v28.4s\n"
- "smin v25.4s, v25.4s, v28.4s\n"
- "smin v26.4s, v26.4s, v28.4s\n"
- "smin v27.4s, v27.4s, v28.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v30.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v29.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v28.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v30.4s\n"
+ "add v17.4s, v17.4s, v30.4s\n"
+ "add v18.4s, v18.4s, v30.4s\n"
+ "add v19.4s, v19.4s, v30.4s\n"
+ "add v20.4s, v20.4s, v30.4s\n"
+ "add v21.4s, v21.4s, v30.4s\n"
+ "add v22.4s, v22.4s, v30.4s\n"
+ "add v23.4s, v23.4s, v30.4s\n"
+ "add v24.4s, v24.4s, v30.4s\n"
+ "add v25.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v30.4s\n"
+ "smin v16.4s, v16.4s, v29.4s\n"
+ "smin v17.4s, v17.4s, v29.4s\n"
+ "smin v18.4s, v18.4s, v29.4s\n"
+ "smin v19.4s, v19.4s, v29.4s\n"
+ "smin v20.4s, v20.4s, v29.4s\n"
+ "smin v21.4s, v21.4s, v29.4s\n"
+ "smin v22.4s, v22.4s, v29.4s\n"
+ "smin v23.4s, v23.4s, v29.4s\n"
+ "smin v24.4s, v24.4s, v29.4s\n"
+ "smin v25.4s, v25.4s, v29.4s\n"
+ "smin v26.4s, v26.4s, v29.4s\n"
+ "smin v27.4s, v27.4s, v29.4s\n"
"smax v16.4s, v16.4s, v28.4s\n"
"smax v17.4s, v17.4s, v28.4s\n"
"smax v18.4s, v18.4s, v28.4s\n"
@@ -1298,109 +1299,109 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"uzp1 v18.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v17.8h, v26.8h, v27.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v18.16b\n"
"uzp1 v24.16b, v24.16b, v17.16b\n"
"bge 89f\n"
- "tbz x14, #3, 84f\n"
+ "tbz x15, #3, 84f\n"
"str d16, [x13], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x14, #2, 82f\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "tbz x15, #2, 82f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "tbz x14, #1, 81f\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "tbz x15, #1, 81f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[14], [x13]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x14, #0, 88f\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[12], [x13]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x14, #1, 83f\n"
+ "tbz x15, #1, 83f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[10], [x13]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x14, #0, 88f\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[8], [x13]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x14, #2, 86f\n"
+ "tbz x15, #2, 86f\n"
"str s16, [x13], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "tbz x14, #1, 85f\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "tbz x15, #1, 85f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[6], [x13]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x14, #0, 88f\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[4], [x13]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x14, #1, 87f\n"
+ "tbz x15, #1, 87f\n"
"str h16, [x13], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[2], [x13]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
"str b16, [x13, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"90:" // Height 3: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0x4\n"
- "mov x15, %x[col_bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
- "movi v12.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
+ "movi v12.4s, #0x0\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "madd x20, x21, x20, x13\n"
"movi v13.4s, #0x0\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"movi v14.4s, #0x0\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v15.16b, #0x1\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1419,118 +1420,118 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"93:" // Height 4: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "ldr x28, [x20, #0x8]\n"
- "ldr x27, [x20, #0x10]\n"
- "ldr x26, [x20, #0x18]\n"
- "cbnz x11, 96f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x9, [x20, #0x8]\n"
+ "ldr x28, [x20, #0x10]\n"
+ "ldr x27, [x20, #0x18]\n"
+ "cbnz x12, 96f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x20\n"
"add x9, x9, x20\n"
"add x28, x28, x20\n"
"add x27, x27, x20\n"
- "add x26, x26, x20\n"
"b 96f\n"
"95:" // Height 4: setup direct input
- "mov x9, %x[input_ptr]\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x9, x10, x21\n"
"add x28, x9, x21\n"
"add x27, x28, x21\n"
- "add x26, x27, x21\n"
"96:" // Height 4: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 101f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q1, [x28, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x26, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q1, [x9, #0x0]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q3, [x27, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 99f\n"
"97:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x22, [x12, #0x78]\n"
+ "ldr x21, [x14, #0x78]\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x21, [x12, #0x88]\n"
+ "ldr x20, [x14, #0x88]\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr x20, [x12, #0x98]\n"
+ "ldr x26, [x14, #0x98]\n"
".inst 0x4f83e09c // sdot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr d4, [x12, #0x70]\n"
+ "ldr d4, [x14, #0x70]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x22\n"
+ "ldr x25, [x14, #0xa8]\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x25, [x12, #0xa8]\n"
+ "ldr x24, [x14, #0xb8]\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr x24, [x12, #0xb8]\n"
+ "mov v4.d[1], x21\n"
".inst 0x4f83e0bd // sdot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr d5, [x12, #0x80]\n"
+ "ldr d5, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
- "mov v5.d[1], x21\n"
+ "ldr x23, [x14, #0xc8]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x22, [x14, #0xd8]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr x22, [x12, #0xd8]\n"
+ "mov v5.d[1], x20\n"
".inst 0x4f83e0de // sdot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr d6, [x12, #0x90]\n"
+ "ldr d6, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "ldr x21, [x14, #0xe8]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x12, #0xe8]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr x20, [x12, #0xf8]\n"
+ "mov v6.d[1], x26\n"
".inst 0x4f83e0ff // sdot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr d7, [x12, #0xa0]\n"
+ "ldr d7, [x14, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v7.d[1], x25\n"
+ "add x10, x10, #0x10\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
"add x9, x9, #0x10\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
- "add x28, x28, #0x10\n"
+ "mov v7.d[1], x25\n"
".inst 0x4fa3e11c // sdot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr d8, [x12, #0xb0]\n"
+ "ldr d8, [x14, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v8.d[1], x24\n"
+ "add x28, x28, #0x10\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
"add x27, x27, #0x10\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
- "add x26, x26, #0x10\n"
+ "mov v8.d[1], x24\n"
".inst 0x4fa3e13d // sdot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr d9, [x12, #0xc0]\n"
+ "ldr d9, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
- "mov v9.d[1], x23\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
+ "mov v9.d[1], x23\n"
".inst 0x4fa3e15e // sdot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr d10, [x12, #0xd0]\n"
+ "ldr d10, [x14, #0xd0]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v10.d[1], x22\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x4fa2e09b // sdot v27.4s, v4.16b, v2.4b[1]\n"
+ "mov v10.d[1], x22\n"
".inst 0x4fa3e09f // sdot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr d4, [x12, #0xe0]\n"
+ "ldr d4, [x14, #0xe0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
- "mov v4.d[1], x21\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
+ "mov v4.d[1], x21\n"
".inst 0x4f83e8bc // sdot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr d5, [x12, #0xf0]\n"
+ "ldr d5, [x14, #0xf0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "mov v5.d[1], x20\n"
+ "add x14, x14, #0x100\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
- "add x12, x12, #0x100\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
+ "mov v5.d[1], x20\n"
".inst 0x4f83e8dd // sdot v29.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f80e8f2 // sdot v18.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8f6 // sdot v22.4s, v7.16b, v1.4b[2]\n"
@@ -1562,77 +1563,77 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"98:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q1, [x28, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x26, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q1, [x9, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q3, [x27, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
"prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 97b\n"
"99:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e090 // sdot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x4f81e094 // sdot v20.4s, v4.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f82e098 // sdot v24.4s, v4.16b, v2.4b[0]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f83e09c // sdot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x12, #0x70]\n"
+ "ldr q4, [x14, #0x70]\n"
".inst 0x4f80e0b1 // sdot v17.4s, v5.16b, v0.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e0b5 // sdot v21.4s, v5.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
+ "add x27, x27, #0x10\n"
".inst 0x4f82e0b9 // sdot v25.4s, v5.16b, v2.4b[0]\n"
".inst 0x4f83e0bd // sdot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr q5, [x12, #0x80]\n"
+ "ldr q5, [x14, #0x80]\n"
".inst 0x4f80e0d2 // sdot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0d6 // sdot v22.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0da // sdot v26.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0de // sdot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x12, #0x90]\n"
+ "ldr q6, [x14, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f7 // sdot v23.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0fb // sdot v27.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0ff // sdot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x12, #0xa0]\n"
+ "ldr q7, [x14, #0xa0]\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x4fa1e114 // sdot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x4fa2e118 // sdot v24.4s, v8.16b, v2.4b[1]\n"
".inst 0x4fa3e11c // sdot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr q8, [x12, #0xb0]\n"
+ "ldr q8, [x14, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x4fa1e135 // sdot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x4fa2e139 // sdot v25.4s, v9.16b, v2.4b[1]\n"
".inst 0x4fa3e13d // sdot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr q9, [x12, #0xc0]\n"
+ "ldr q9, [x14, #0xc0]\n"
".inst 0x4fa0e152 // sdot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x4fa1e156 // sdot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x4fa2e15a // sdot v26.4s, v10.16b, v2.4b[1]\n"
".inst 0x4fa3e15e // sdot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr q10, [x12, #0xd0]\n"
+ "ldr q10, [x14, #0xd0]\n"
".inst 0x4fa0e093 // sdot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x4fa1e097 // sdot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x4fa2e09b // sdot v27.4s, v4.16b, v2.4b[1]\n"
".inst 0x4fa3e09f // sdot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr q4, [x12, #0xe0]\n"
+ "ldr q4, [x14, #0xe0]\n"
".inst 0x4f80e8b0 // sdot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x4f81e8b4 // sdot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x4f82e8b8 // sdot v24.4s, v5.16b, v2.4b[2]\n"
".inst 0x4f83e8bc // sdot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr q5, [x12, #0xf0]\n"
+ "ldr q5, [x14, #0xf0]\n"
".inst 0x4f80e8d1 // sdot v17.4s, v6.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x4f81e8d5 // sdot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d9 // sdot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8dd // sdot v29.4s, v6.16b, v3.4b[2]\n"
@@ -1666,35 +1667,35 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"100:" // Height 4: Multiply loop: unique 14: skip row sum
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
"prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"101:" // Height 4: Multiply loop: Main loop skip
- "cbz x10, 108f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 108f\n"
+ "cmp x11, #0x4\n"
"blt 104f\n"
"102:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
- "ldr s1, [x28], #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x26], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "ldr s1, [x9], #0x4\n"
+ "ldr s2, [x28], #0x4\n"
+ "ldr s3, [x27], #0x4\n"
"tbnz %x[flags], #31, 103f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"103:" // Height 4: Multiply loop: unique 15: skip row sum
- "ldr q7, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q6, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q5, [x12, #0x20]\n"
+ "ldr q7, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q6, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q5, [x14, #0x20]\n"
+ "ldr q4, [x14, #0x30]\n"
".inst 0x4f80e0f0 // sdot v16.4s, v7.16b, v0.4b[0]\n"
- "ldr q4, [x12, #0x30]\n"
".inst 0x4f81e0f4 // sdot v20.4s, v7.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x4f83e0fc // sdot v28.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f80e0d1 // sdot v17.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0d5 // sdot v21.4s, v6.16b, v1.4b[0]\n"
@@ -1710,23 +1711,23 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f83e09f // sdot v31.4s, v4.16b, v3.4b[0]\n"
"bge 102b\n"
"104:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x10, 108f\n"
- "tbz x10, #1, 105f\n"
- "ldr h0, [x9], #0x2\n"
- "ldr h1, [x28], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x26], #0x2\n"
- "tbz x10, #0, 106f\n"
- "ld1 { v0.b }[2], [x9]\n"
- "ld1 { v1.b }[2], [x28]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x26]\n"
+ "cbz x11, 108f\n"
+ "tbz x11, #1, 105f\n"
+ "ldr h0, [x10], #0x2\n"
+ "ldr h1, [x9], #0x2\n"
+ "ldr h2, [x28], #0x2\n"
+ "ldr h3, [x27], #0x2\n"
+ "tbz x11, #0, 106f\n"
+ "ld1 { v0.b }[2], [x10]\n"
+ "ld1 { v1.b }[2], [x9]\n"
+ "ld1 { v2.b }[2], [x28]\n"
+ "ld1 { v3.b }[2], [x27]\n"
"b 106f\n"
"105:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
- "ldr b1, [x28, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x26, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
+ "ldr b1, [x9, #0x0]\n"
+ "ldr b2, [x28, #0x0]\n"
+ "ldr b3, [x27, #0x0]\n"
"106:" // Height 4: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 107f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
@@ -1734,16 +1735,16 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
".inst 0x4e8f946e // sdot v14.4s, v3.16b, v15.16b\n"
"107:" // Height 4: Multiply loop: unique 16: skip row sum
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x14, #0x0]\n"
+ "ldr q6, [x14, #0x10]\n"
+ "ldr q5, [x14, #0x20]\n"
+ "ldr q4, [x14, #0x30]\n"
".inst 0x4f80e0f0 // sdot v16.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x12, #0x10]\n"
".inst 0x4f81e0f4 // sdot v20.4s, v7.16b, v1.4b[0]\n"
- "ldr q5, [x12, #0x20]\n"
+ "add x14, x14, #0x40\n"
".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
- "ldr q4, [x12, #0x30]\n"
".inst 0x4f83e0fc // sdot v28.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f80e0d1 // sdot v17.4s, v6.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x4f81e0d5 // sdot v21.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d9 // sdot v25.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0dd // sdot v29.4s, v6.16b, v3.4b[0]\n"
@@ -1757,17 +1758,17 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
".inst 0x4f83e09f // sdot v31.4s, v4.16b, v3.4b[0]\n"
"108:" // Height 4: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 94b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
"prfm pstl1keep, [x13, #0x0]\n"
+ "add x24, x13, x20\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -1775,9 +1776,9 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"addp v14.4s, v14.4s, v14.4s\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1r { v0.4s }, [x20]\n"
- "neg v0.4s, v0.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "neg v0.4s, v0.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
"mul v11.4s, v11.4s, v0.4s\n"
@@ -1785,13 +1786,13 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"mul v13.4s, v13.4s, v0.4s\n"
"mul v14.4s, v14.4s, v0.4s\n"
"109:" // Height 4: skip row sum fixup
- "ldr q3, [x15, #0x0]\n"
+ "ldr q3, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q2, [x15, #0x10]\n"
+ "ldr q2, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q1, [x15, #0x20]\n"
+ "ldr q1, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q0, [x15, #0x30]\n"
+ "ldr q0, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1822,10 +1823,11 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"add v30.4s, v30.4s, v1.4s\n"
"add v31.4s, v31.4s, v0.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v1.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v1.4s }, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
+ "add x16, x16, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v1.4s\n"
"sqrdmulh v17.4s, v17.4s, v1.4s\n"
"sqrdmulh v18.4s, v18.4s, v1.4s\n"
@@ -1842,52 +1844,51 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
"sqrdmulh v30.4s, v30.4s, v1.4s\n"
"sqrdmulh v31.4s, v31.4s, v1.4s\n"
- "add x15, x15, #0x40\n"
"tbz %x[flags], #5, 110f\n"
"and v2.16b, v16.16b, v0.16b\n"
"and v1.16b, v17.16b, v0.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v2.4s\n"
- "sqadd v17.4s, v17.4s, v1.4s\n"
"and v7.16b, v18.16b, v0.16b\n"
"and v6.16b, v19.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v4.16b, v21.16b, v0.16b\n"
"and v3.16b, v22.16b, v0.16b\n"
- "and v2.16b, v23.16b, v0.16b\n"
- "and v1.16b, v24.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v2.4s\n"
+ "sqadd v17.4s, v17.4s, v1.4s\n"
+ "and v2.16b, v23.16b, v0.16b\n"
+ "and v1.16b, v24.16b, v0.16b\n"
"sqadd v18.4s, v18.4s, v7.4s\n"
"sqadd v19.4s, v19.4s, v6.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v4.4s\n"
"sqadd v22.4s, v22.4s, v3.4s\n"
- "sqadd v23.4s, v23.4s, v2.4s\n"
- "sqadd v24.4s, v24.4s, v1.4s\n"
"and v7.16b, v25.16b, v0.16b\n"
"and v6.16b, v26.16b, v0.16b\n"
"and v5.16b, v27.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"and v4.16b, v28.16b, v0.16b\n"
"and v3.16b, v29.16b, v0.16b\n"
- "and v2.16b, v30.16b, v0.16b\n"
- "and v1.16b, v31.16b, v0.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v2.4s\n"
+ "sqadd v24.4s, v24.4s, v1.4s\n"
+ "and v2.16b, v30.16b, v0.16b\n"
+ "and v1.16b, v31.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v7.4s\n"
"sqadd v26.4s, v26.4s, v6.4s\n"
"sqadd v27.4s, v27.4s, v5.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v4.4s\n"
"sqadd v29.4s, v29.4s, v3.4s\n"
"sqadd v30.4s, v30.4s, v2.4s\n"
@@ -1910,43 +1911,44 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"srshl v30.4s, v30.4s, v0.4s\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v0.4s }, [x20]\n"
- "add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v0.4s\n"
- "add v18.4s, v18.4s, v0.4s\n"
- "add v19.4s, v19.4s, v0.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
- "add v21.4s, v21.4s, v0.4s\n"
- "add v22.4s, v22.4s, v0.4s\n"
- "add v23.4s, v23.4s, v0.4s\n"
- "add v24.4s, v24.4s, v0.4s\n"
- "add v25.4s, v25.4s, v0.4s\n"
- "add v26.4s, v26.4s, v0.4s\n"
- "add v27.4s, v27.4s, v0.4s\n"
- "add v28.4s, v28.4s, v0.4s\n"
- "add v29.4s, v29.4s, v0.4s\n"
- "add v30.4s, v30.4s, v0.4s\n"
- "add v31.4s, v31.4s, v0.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v0.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v0.4s\n"
- "smin v17.4s, v17.4s, v0.4s\n"
- "smin v18.4s, v18.4s, v0.4s\n"
- "smin v19.4s, v19.4s, v0.4s\n"
- "smin v20.4s, v20.4s, v0.4s\n"
- "smin v21.4s, v21.4s, v0.4s\n"
- "smin v22.4s, v22.4s, v0.4s\n"
- "smin v23.4s, v23.4s, v0.4s\n"
- "smin v24.4s, v24.4s, v0.4s\n"
- "smin v25.4s, v25.4s, v0.4s\n"
- "smin v26.4s, v26.4s, v0.4s\n"
- "smin v27.4s, v27.4s, v0.4s\n"
- "smin v28.4s, v28.4s, v0.4s\n"
- "smin v29.4s, v29.4s, v0.4s\n"
- "smin v30.4s, v30.4s, v0.4s\n"
- "smin v31.4s, v31.4s, v0.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v2.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v1.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v2.4s\n"
+ "add v17.4s, v17.4s, v2.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
+ "add v19.4s, v19.4s, v2.4s\n"
+ "add v20.4s, v20.4s, v2.4s\n"
+ "add v21.4s, v21.4s, v2.4s\n"
+ "add v22.4s, v22.4s, v2.4s\n"
+ "add v23.4s, v23.4s, v2.4s\n"
+ "add v24.4s, v24.4s, v2.4s\n"
+ "add v25.4s, v25.4s, v2.4s\n"
+ "add v26.4s, v26.4s, v2.4s\n"
+ "add v27.4s, v27.4s, v2.4s\n"
+ "add v28.4s, v28.4s, v2.4s\n"
+ "add v29.4s, v29.4s, v2.4s\n"
+ "add v30.4s, v30.4s, v2.4s\n"
+ "add v31.4s, v31.4s, v2.4s\n"
+ "smin v16.4s, v16.4s, v1.4s\n"
+ "smin v17.4s, v17.4s, v1.4s\n"
+ "smin v18.4s, v18.4s, v1.4s\n"
+ "smin v19.4s, v19.4s, v1.4s\n"
+ "smin v20.4s, v20.4s, v1.4s\n"
+ "smin v21.4s, v21.4s, v1.4s\n"
+ "smin v22.4s, v22.4s, v1.4s\n"
+ "smin v23.4s, v23.4s, v1.4s\n"
+ "smin v24.4s, v24.4s, v1.4s\n"
+ "smin v25.4s, v25.4s, v1.4s\n"
+ "smin v26.4s, v26.4s, v1.4s\n"
+ "smin v27.4s, v27.4s, v1.4s\n"
+ "smin v28.4s, v28.4s, v1.4s\n"
+ "smin v29.4s, v29.4s, v1.4s\n"
+ "smin v30.4s, v30.4s, v1.4s\n"
+ "smin v31.4s, v31.4s, v1.4s\n"
"smax v16.4s, v16.4s, v0.4s\n"
"smax v17.4s, v17.4s, v0.4s\n"
"smax v18.4s, v18.4s, v0.4s\n"
@@ -1971,110 +1973,109 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"uzp1 v18.8h, v26.8h, v27.8h\n"
"uzp1 v28.8h, v28.8h, v29.8h\n"
"uzp1 v17.8h, v30.8h, v31.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v0.16b\n"
"uzp1 v20.16b, v20.16b, v19.16b\n"
"uzp1 v24.16b, v24.16b, v18.16b\n"
"uzp1 v28.16b, v28.16b, v17.16b\n"
"bge 119f\n"
- "tbz x14, #3, 114f\n"
+ "tbz x15, #3, 114f\n"
"str d16, [x13], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "tbz x14, #2, 112f\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
+ "tbz x15, #2, 112f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
- "tbz x14, #1, 111f\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
+ "tbz x15, #1, 111f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "st1 { v28.h }[6], [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "st1 { v28.h }[6], [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[14], [x13]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
- "st1 { v28.b }[14], [x21]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "st1 { v28.b }[14], [x22]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x14, #0, 118f\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[12], [x13]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
- "st1 { v28.b }[12], [x21]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "st1 { v28.b }[12], [x22]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x14, #1, 113f\n"
+ "tbz x15, #1, 113f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "st1 { v28.h }[4], [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "st1 { v28.h }[4], [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[10], [x13]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
- "st1 { v28.b }[10], [x21]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "st1 { v28.b }[10], [x22]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x14, #0, 118f\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[8], [x13]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
- "st1 { v28.b }[8], [x21]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "st1 { v28.b }[8], [x22]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x14, #2, 116f\n"
+ "tbz x15, #2, 116f\n"
"str s16, [x13], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
- "tbz x14, #1, 115f\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
+ "tbz x15, #1, 115f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "st1 { v28.h }[2], [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "st1 { v28.h }[2], [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[6], [x13]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
- "st1 { v28.b }[6], [x21]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "st1 { v28.b }[6], [x22]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x14, #0, 118f\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[4], [x13]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
- "st1 { v28.b }[4], [x21]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "st1 { v28.b }[4], [x22]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x14, #1, 117f\n"
+ "tbz x15, #1, 117f\n"
"str h16, [x13], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "str h28, [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "str h28, [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[2], [x13]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
- "st1 { v28.b }[2], [x21]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "st1 { v28.b }[2], [x22]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
"str b16, [x13, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
- "str b28, [x21, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "str b28, [x22, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
- "str q28, [x21, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q28, [x22, #0x0]\n"
"120:" // Height 4: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 92b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 122f\n"
@@ -2088,9 +2089,9 @@ void a64_hybrid_s8qa_dot_4x16_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp
index 3b773a6827..75e35a3e98 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void a64_hybrid_s8qa_dot_4x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -90,7 +90,7 @@ void a64_hybrid_s8qa_dot_4x16 (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -100,8 +100,8 @@ void a64_hybrid_s8qa_dot_4x16 (
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -134,6 +134,7 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q26, [x28, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q25, [x28, #0xa0]\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q24, [x28, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
@@ -144,11 +145,10 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q21, [x28, #0xe0]\n"
".inst 0x4f80ea90 // sdot v16.4s, v20.16b, v0.4b[2]\n"
"ldr q20, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4f80eb51 // sdot v17.4s, v26.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
".inst 0x4f80eb32 // sdot v18.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f80eb13 // sdot v19.4s, v24.16b, v0.4b[2]\n"
- "add x28, x28, #0x100\n"
".inst 0x4fa0eaf0 // sdot v16.4s, v23.16b, v0.4b[3]\n"
".inst 0x4fa0ead1 // sdot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x4fa0eab2 // sdot v18.4s, v21.16b, v0.4b[3]\n"
@@ -159,9 +159,9 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
+ "cmp x25, #0x20\n"
"ldr q7, [x28, #0x30]\n"
"ldr q8, [x28, #0x40]\n"
"ldr q9, [x28, #0x50]\n"
@@ -177,6 +177,8 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q26, [x28, #0x90]\n"
".inst 0x4f80e0f3 // sdot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q25, [x28, #0xa0]\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x4fa0e110 // sdot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q24, [x28, #0xb0]\n"
".inst 0x4fa0e131 // sdot v17.4s, v9.16b, v0.4b[1]\n"
@@ -187,12 +189,10 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q21, [x28, #0xe0]\n"
".inst 0x4f80ea90 // sdot v16.4s, v20.16b, v0.4b[2]\n"
"ldr q20, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x4f80eb51 // sdot v17.4s, v26.16b, v0.4b[2]\n"
- "sub x25, x25, #0x10\n"
".inst 0x4f80eb32 // sdot v18.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f80eb13 // sdot v19.4s, v24.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
- "add x28, x28, #0x100\n"
".inst 0x4fa0eaf0 // sdot v16.4s, v23.16b, v0.4b[3]\n"
".inst 0x4fa0ead1 // sdot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x4fa0eab2 // sdot v18.4s, v21.16b, v0.4b[3]\n"
@@ -213,14 +213,14 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q23, [x28, #0x0]\n"
"ldr q22, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q21, [x28, #0x20]\n"
"ldr q20, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e2f0 // sdot v16.4s, v23.16b, v0.4b[0]\n"
".inst 0x4f80e2d1 // sdot v17.4s, v22.16b, v0.4b[0]\n"
".inst 0x4f80e2b2 // sdot v18.4s, v21.16b, v0.4b[0]\n"
".inst 0x4f80e293 // sdot v19.4s, v20.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
"bge 12b\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
"cbz x25, 18f\n"
@@ -235,15 +235,15 @@ void a64_hybrid_s8qa_dot_4x16 (
"tbnz %x[flags], #31, 17f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q21, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- ".inst 0x4f80e2b0 // sdot v16.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x4f80e291 // sdot v17.4s, v20.16b, v0.4b[0]\n"
+ "ldr q23, [x28, #0x0]\n"
+ "ldr q22, [x28, #0x10]\n"
"ldr q21, [x28, #0x20]\n"
"ldr q20, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f80e2f0 // sdot v16.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x4f80e2d1 // sdot v17.4s, v22.16b, v0.4b[0]\n"
".inst 0x4f80e2b2 // sdot v18.4s, v21.16b, v0.4b[0]\n"
".inst 0x4f80e293 // sdot v19.4s, v20.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -252,8 +252,8 @@ void a64_hybrid_s8qa_dot_4x16 (
"prfm pstl1keep, [x27, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v20.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v20.4s }, [x20]\n"
"neg v20.4s, v20.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"mul v11.4s, v11.4s, v20.4s\n"
@@ -267,16 +267,16 @@ void a64_hybrid_s8qa_dot_4x16 (
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v20.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v20.4s }, [x20]\n"
"add v16.4s, v16.4s, v24.4s\n"
"add v17.4s, v17.4s, v23.4s\n"
- "add v18.4s, v18.4s, v22.4s\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x10, x10, #0x40\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v22.4s\n"
"add v19.4s, v19.4s, v21.4s\n"
"sqrdmulh v16.4s, v16.4s, v20.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v20.4s\n"
"sqrdmulh v18.4s, v18.4s, v20.4s\n"
"sqrdmulh v19.4s, v19.4s, v20.4s\n"
@@ -294,21 +294,21 @@ void a64_hybrid_s8qa_dot_4x16 (
"sqadd v18.4s, v18.4s, v21.4s\n"
"sqadd v19.4s, v19.4s, v20.4s\n"
"20:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
- "add v16.4s, v16.4s, v22.4s\n"
- "add v17.4s, v17.4s, v22.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v22.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
"add v18.4s, v18.4s, v22.4s\n"
"add v19.4s, v19.4s, v22.4s\n"
- "cmp x9, #0x10\n"
"smin v16.4s, v16.4s, v21.4s\n"
"smin v17.4s, v17.4s, v21.4s\n"
"smin v18.4s, v18.4s, v21.4s\n"
@@ -381,7 +381,7 @@ void a64_hybrid_s8qa_dot_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -395,8 +395,8 @@ void a64_hybrid_s8qa_dot_4x16 (
"mov x26, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -476,9 +476,9 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q1, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q4, [x28, #0x0]\n"
"ldr q5, [x28, #0x10]\n"
+ "cmp x25, #0x20\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
"ldr q8, [x28, #0x40]\n"
@@ -553,14 +553,14 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q27, [x28, #0x0]\n"
"ldr q26, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q25, [x28, #0x20]\n"
"ldr q24, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e370 // sdot v16.4s, v27.16b, v0.4b[0]\n"
".inst 0x4f81e374 // sdot v20.4s, v27.16b, v1.4b[0]\n"
".inst 0x4f80e351 // sdot v17.4s, v26.16b, v0.4b[0]\n"
".inst 0x4f81e355 // sdot v21.4s, v26.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f80e332 // sdot v18.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e336 // sdot v22.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f80e313 // sdot v19.4s, v24.16b, v0.4b[0]\n"
@@ -583,17 +583,17 @@ void a64_hybrid_s8qa_dot_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f942c // sdot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q24, [x28, #0x0]\n"
+ "ldr q27, [x28, #0x0]\n"
"ldr q26, [x28, #0x10]\n"
- ".inst 0x4f80e310 // sdot v16.4s, v24.16b, v0.4b[0]\n"
- ".inst 0x4f81e314 // sdot v20.4s, v24.16b, v1.4b[0]\n"
"ldr q25, [x28, #0x20]\n"
"ldr q24, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f80e370 // sdot v16.4s, v27.16b, v0.4b[0]\n"
+ ".inst 0x4f81e374 // sdot v20.4s, v27.16b, v1.4b[0]\n"
".inst 0x4f80e351 // sdot v17.4s, v26.16b, v0.4b[0]\n"
".inst 0x4f81e355 // sdot v21.4s, v26.16b, v1.4b[0]\n"
".inst 0x4f80e332 // sdot v18.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e336 // sdot v22.4s, v25.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f80e313 // sdot v19.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e317 // sdot v23.4s, v24.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
@@ -602,14 +602,14 @@ void a64_hybrid_s8qa_dot_4x16 (
"cmp x26, x20\n"
"bne 34b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v24.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v24.4s }, [x20]\n"
"neg v24.4s, v24.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -627,10 +627,10 @@ void a64_hybrid_s8qa_dot_4x16 (
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v24.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v16.4s, v16.4s, v28.4s\n"
"add v17.4s, v17.4s, v27.4s\n"
@@ -652,45 +652,45 @@ void a64_hybrid_s8qa_dot_4x16 (
"sqrdmulh v23.4s, v23.4s, v24.4s\n"
"tbz %x[flags], #5, 50f\n"
"and v24.16b, v16.16b, v0.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v24.4s\n"
"and v30.16b, v17.16b, v0.16b\n"
"and v29.16b, v18.16b, v0.16b\n"
"and v28.16b, v19.16b, v0.16b\n"
"and v27.16b, v20.16b, v0.16b\n"
"and v26.16b, v21.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"and v25.16b, v22.16b, v0.16b\n"
- "and v24.16b, v23.16b, v0.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v24.4s\n"
+ "and v24.16b, v23.16b, v0.16b\n"
"sshr v26.4s, v26.4s, #0x1f\n"
"sshr v25.4s, v25.4s, #0x1f\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v17.4s, v17.4s, v30.4s\n"
"sqadd v18.4s, v18.4s, v29.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v28.4s\n"
"sqadd v20.4s, v20.4s, v27.4s\n"
"sqadd v21.4s, v21.4s, v26.4s\n"
"sqadd v22.4s, v22.4s, v25.4s\n"
"sqadd v23.4s, v23.4s, v24.4s\n"
"50:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v16.4s, v16.4s, v26.4s\n"
"add v17.4s, v17.4s, v26.4s\n"
"add v18.4s, v18.4s, v26.4s\n"
@@ -724,68 +724,68 @@ void a64_hybrid_s8qa_dot_4x16 (
"bge 59f\n"
"tbz x9, #3, 54f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x9, #2, 52f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
"tbz x9, #1, 51f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x23]\n"
+ "st1 { v20.b }[14], [x24]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 58f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x23]\n"
+ "st1 { v20.b }[12], [x24]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 53f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x23]\n"
+ "st1 { v20.b }[10], [x24]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 58f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x23]\n"
+ "st1 { v20.b }[8], [x24]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 56f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x23], #0x4\n"
+ "str s20, [x24], #0x4\n"
"tbz x9, #1, 55f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x23]\n"
+ "st1 { v20.b }[6], [x24]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 58f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x23]\n"
+ "st1 { v20.b }[4], [x24]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 57f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x23], #0x2\n"
+ "str h20, [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x23]\n"
+ "st1 { v20.b }[2], [x24]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x23, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x23, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
"60:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 32b\n"
@@ -799,7 +799,7 @@ void a64_hybrid_s8qa_dot_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -817,8 +817,8 @@ void a64_hybrid_s8qa_dot_4x16 (
"mov x26, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -920,9 +920,9 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q1, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q2, [x22, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
@@ -1020,14 +1020,14 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q31, [x28, #0x0]\n"
"ldr q30, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q29, [x28, #0x20]\n"
"ldr q28, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e3f0 // sdot v16.4s, v31.16b, v0.4b[0]\n"
".inst 0x4f81e3f4 // sdot v20.4s, v31.16b, v1.4b[0]\n"
".inst 0x4f82e3f8 // sdot v24.4s, v31.16b, v2.4b[0]\n"
".inst 0x4f80e3d1 // sdot v17.4s, v30.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f81e3d5 // sdot v21.4s, v30.16b, v1.4b[0]\n"
".inst 0x4f82e3d9 // sdot v25.4s, v30.16b, v2.4b[0]\n"
".inst 0x4f80e3b2 // sdot v18.4s, v29.16b, v0.4b[0]\n"
@@ -1060,15 +1060,15 @@ void a64_hybrid_s8qa_dot_4x16 (
"77:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q31, [x28, #0x0]\n"
"ldr q30, [x28, #0x10]\n"
- ".inst 0x4f80e3f0 // sdot v16.4s, v31.16b, v0.4b[0]\n"
- ".inst 0x4f81e3f4 // sdot v20.4s, v31.16b, v1.4b[0]\n"
"ldr q29, [x28, #0x20]\n"
"ldr q28, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f80e3f0 // sdot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x4f81e3f4 // sdot v20.4s, v31.16b, v1.4b[0]\n"
".inst 0x4f82e3f8 // sdot v24.4s, v31.16b, v2.4b[0]\n"
".inst 0x4f80e3d1 // sdot v17.4s, v30.16b, v0.4b[0]\n"
".inst 0x4f81e3d5 // sdot v21.4s, v30.16b, v1.4b[0]\n"
".inst 0x4f82e3d9 // sdot v25.4s, v30.16b, v2.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f80e3b2 // sdot v18.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f81e3b6 // sdot v22.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f82e3ba // sdot v26.4s, v29.16b, v2.4b[0]\n"
@@ -1081,16 +1081,16 @@ void a64_hybrid_s8qa_dot_4x16 (
"cmp x26, x20\n"
"bne 64b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v28.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v28.4s }, [x20]\n"
"addp v13.4s, v13.4s, v13.4s\n"
"neg v28.4s, v28.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
@@ -1111,10 +1111,10 @@ void a64_hybrid_s8qa_dot_4x16 (
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v28.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
@@ -1152,18 +1152,18 @@ void a64_hybrid_s8qa_dot_4x16 (
"and v30.16b, v18.16b, v0.16b\n"
"and v29.16b, v19.16b, v0.16b\n"
"and v28.16b, v20.16b, v0.16b\n"
+ "and v3.16b, v21.16b, v0.16b\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v31.4s, v31.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
+ "and v2.16b, v22.16b, v0.16b\n"
"sqadd v16.4s, v16.4s, v1.4s\n"
"sqadd v17.4s, v17.4s, v31.4s\n"
"sqadd v18.4s, v18.4s, v30.4s\n"
"sqadd v19.4s, v19.4s, v29.4s\n"
"sqadd v20.4s, v20.4s, v28.4s\n"
- "and v3.16b, v21.16b, v0.16b\n"
- "and v2.16b, v22.16b, v0.16b\n"
"and v1.16b, v23.16b, v0.16b\n"
"and v31.16b, v24.16b, v0.16b\n"
"and v30.16b, v25.16b, v0.16b\n"
@@ -1184,21 +1184,21 @@ void a64_hybrid_s8qa_dot_4x16 (
"sqadd v26.4s, v26.4s, v29.4s\n"
"sqadd v27.4s, v27.4s, v28.4s\n"
"80:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v30.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v30.4s }, [x21]\n"
"ld1r { v29.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v28.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v24.4s, v24.4s, v0.4s\n"
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
@@ -1251,102 +1251,103 @@ void a64_hybrid_s8qa_dot_4x16 (
"bge 89f\n"
"tbz x9, #3, 84f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x9, #2, 82f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
"tbz x9, #1, 81f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 88f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 83f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 88f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 86f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
"tbz x9, #1, 85f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 88f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 87f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"90:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "movi v15.16b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1368,8 +1369,8 @@ void a64_hybrid_s8qa_dot_4x16 (
"mov x26, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1493,9 +1494,9 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q1, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q2, [x22, #0x0]\n"
"ldr q3, [x21, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q4, [x28, #0x0]\n"
"ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
@@ -1616,14 +1617,14 @@ void a64_hybrid_s8qa_dot_4x16 (
"ldr q7, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q5, [x28, #0x20]\n"
"ldr q4, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x4f80e0f0 // sdot v16.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0f4 // sdot v20.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0fc // sdot v28.4s, v7.16b, v3.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f80e0d1 // sdot v17.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0d5 // sdot v21.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d9 // sdot v25.4s, v6.16b, v2.4b[0]\n"
@@ -1664,15 +1665,15 @@ void a64_hybrid_s8qa_dot_4x16 (
"107:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q7, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
- ".inst 0x4f80e0f0 // sdot v16.4s, v7.16b, v0.4b[0]\n"
- ".inst 0x4f81e0f4 // sdot v20.4s, v7.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
"ldr q4, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f80e0f0 // sdot v16.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f81e0f4 // sdot v20.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f8 // sdot v24.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0fc // sdot v28.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f80e0d1 // sdot v17.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0d5 // sdot v21.4s, v6.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x4f82e0d9 // sdot v25.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0dd // sdot v29.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f80e0b2 // sdot v18.4s, v5.16b, v0.4b[0]\n"
@@ -1689,18 +1690,18 @@ void a64_hybrid_s8qa_dot_4x16 (
"cmp x26, x20\n"
"bne 94b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x21, x22, x20\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v0.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
"neg v0.4s, v0.4s\n"
@@ -1724,10 +1725,10 @@ void a64_hybrid_s8qa_dot_4x16 (
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v1.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
@@ -1774,32 +1775,32 @@ void a64_hybrid_s8qa_dot_4x16 (
"tbz %x[flags], #5, 110f\n"
"and v2.16b, v16.16b, v0.16b\n"
"and v1.16b, v17.16b, v0.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v2.4s\n"
- "sqadd v17.4s, v17.4s, v1.4s\n"
"and v7.16b, v18.16b, v0.16b\n"
"and v6.16b, v19.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v4.16b, v21.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"and v3.16b, v22.16b, v0.16b\n"
- "and v2.16b, v23.16b, v0.16b\n"
- "and v1.16b, v24.16b, v0.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v2.4s\n"
+ "sqadd v17.4s, v17.4s, v1.4s\n"
+ "and v2.16b, v23.16b, v0.16b\n"
+ "and v1.16b, v24.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v18.4s, v18.4s, v7.4s\n"
"sqadd v19.4s, v19.4s, v6.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v4.4s\n"
"sqadd v22.4s, v22.4s, v3.4s\n"
+ "and v7.16b, v25.16b, v0.16b\n"
"sqadd v23.4s, v23.4s, v2.4s\n"
"sqadd v24.4s, v24.4s, v1.4s\n"
- "and v7.16b, v25.16b, v0.16b\n"
"and v6.16b, v26.16b, v0.16b\n"
"and v5.16b, v27.16b, v0.16b\n"
"and v4.16b, v28.16b, v0.16b\n"
@@ -1821,21 +1822,21 @@ void a64_hybrid_s8qa_dot_4x16 (
"sqadd v30.4s, v30.4s, v2.4s\n"
"sqadd v31.4s, v31.4s, v1.4s\n"
"110:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v1.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v24.4s, v24.4s, v0.4s\n"
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
@@ -1907,100 +1908,100 @@ void a64_hybrid_s8qa_dot_4x16 (
"bge 119f\n"
"tbz x9, #3, 114f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x9, #2, 112f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
"tbz x9, #1, 111f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "st1 { v28.h }[6], [x21], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "st1 { v28.h }[6], [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
- "st1 { v28.b }[14], [x21]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "st1 { v28.b }[14], [x22]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 118f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
- "st1 { v28.b }[12], [x21]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "st1 { v28.b }[12], [x22]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 113f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "st1 { v28.h }[4], [x21], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "st1 { v28.h }[4], [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
- "st1 { v28.b }[10], [x21]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "st1 { v28.b }[10], [x22]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 118f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
- "st1 { v28.b }[8], [x21]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "st1 { v28.b }[8], [x22]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 116f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
"tbz x9, #1, 115f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "st1 { v28.h }[2], [x21], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "st1 { v28.h }[2], [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
- "st1 { v28.b }[6], [x21]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "st1 { v28.b }[6], [x22]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 118f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
- "st1 { v28.b }[4], [x21]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "st1 { v28.b }[4], [x22]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 117f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "str h28, [x21], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "str h28, [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
- "st1 { v28.b }[2], [x21]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "st1 { v28.b }[2], [x22]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
- "str b28, [x21, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "str b28, [x22, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
- "str q28, [x21, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q28, [x22, #0x0]\n"
"120:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 92b\n"
@@ -2016,8 +2017,8 @@ void a64_hybrid_s8qa_dot_4x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16.hpp
index 55ea68d1b5..55290826d1 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsFixed<rhs_operand_type, result_type, 4, 16, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 16, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp
index 883bd5afdd..8f70b3dc26 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_mmla_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void a64_hybrid_s8qa_mmla_4x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -90,7 +90,7 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -104,8 +104,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -130,6 +130,7 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q4, [x28, #0x60]\n"
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
+ "add x24, x24, #0x10\n"
"trn1 v0.2d, v1.2d, v27.2d\n"
".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
@@ -151,9 +152,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e98a430 // smmla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
".inst 0x4e9ea434 // smmla v20.4s, v1.16b, v30.16b\n"
- "add x24, x24, #0x10\n"
- ".inst 0x4e9da431 // smmla v17.4s, v1.16b, v29.16b\n"
"add x28, x28, #0x100\n"
+ ".inst 0x4e9da431 // smmla v17.4s, v1.16b, v29.16b\n"
".inst 0x4e9ca435 // smmla v21.4s, v1.16b, v28.16b\n"
".inst 0x4e9ba432 // smmla v18.4s, v1.16b, v27.16b\n"
".inst 0x4e9aa436 // smmla v22.4s, v1.16b, v26.16b\n"
@@ -166,9 +166,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
+ "cmp x25, #0x20\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
"ldr q10, [x28, #0x50]\n"
@@ -176,10 +176,12 @@ void a64_hybrid_s8qa_mmla_4x16 (
"prfm pldl1keep, [x24, #0x80]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v0.2d, v1.2d, v24.2d\n"
+ "trn2 v1.2d, v1.2d, v24.2d\n"
".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
- "trn2 v1.2d, v1.2d, v24.2d\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q24, [x28, #0x80]\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
@@ -196,11 +198,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q25, [x28, #0xe0]\n"
".inst 0x4e98a430 // smmla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
".inst 0x4e9ea434 // smmla v20.4s, v1.16b, v30.16b\n"
- ".inst 0x4e9da431 // smmla v17.4s, v1.16b, v29.16b\n"
- "add x24, x24, #0x10\n"
"add x28, x28, #0x100\n"
+ ".inst 0x4e9da431 // smmla v17.4s, v1.16b, v29.16b\n"
".inst 0x4e9ca435 // smmla v21.4s, v1.16b, v28.16b\n"
".inst 0x4e9ba432 // smmla v18.4s, v1.16b, v27.16b\n"
".inst 0x4e9aa436 // smmla v22.4s, v1.16b, v26.16b\n"
@@ -222,24 +222,24 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
"ldr q24, [x28, #0x0]\n"
- "ldr q26, [x28, #0x10]\n"
- ".inst 0x4e98a410 // smmla v16.4s, v0.16b, v24.16b\n"
+ "ldr q30, [x28, #0x10]\n"
"sub x25, x25, #0x8\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
"cmp x25, #0x8\n"
- ".inst 0x4e9aa414 // smmla v20.4s, v0.16b, v26.16b\n"
"ldr q27, [x28, #0x40]\n"
"ldr q26, [x28, #0x50]\n"
- ".inst 0x4e99a411 // smmla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a415 // smmla v21.4s, v0.16b, v24.16b\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98a410 // smmla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9ea414 // smmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9da411 // smmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9ca415 // smmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e9ba412 // smmla v18.4s, v0.16b, v27.16b\n"
".inst 0x4e9aa416 // smmla v22.4s, v0.16b, v26.16b\n"
".inst 0x4e99a413 // smmla v19.4s, v0.16b, v25.16b\n"
".inst 0x4e98a417 // smmla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"bge 12b\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
"cbz x25, 20f\n"
@@ -267,23 +267,23 @@ void a64_hybrid_s8qa_mmla_4x16 (
"tbnz %x[flags], #31, 19f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"19:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q25, [x28, #0x0]\n"
- "ldr q24, [x28, #0x10]\n"
- ".inst 0x4e99a410 // smmla v16.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a414 // smmla v20.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
- ".inst 0x4e99a411 // smmla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a415 // smmla v21.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x40]\n"
- "ldr q24, [x28, #0x50]\n"
- ".inst 0x4e99a412 // smmla v18.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a416 // smmla v22.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98a410 // smmla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9ea414 // smmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9da411 // smmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9ca415 // smmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e9ba412 // smmla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x4e9aa416 // smmla v22.4s, v0.16b, v26.16b\n"
".inst 0x4e99a413 // smmla v19.4s, v0.16b, v25.16b\n"
".inst 0x4e98a417 // smmla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"20:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -297,8 +297,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mov v23.16b, v16.16b\n"
"tbnz %x[flags], #31, 21f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v16.4s }, [x20]\n"
"neg v16.4s, v16.4s\n"
"dup v11.4s, v11.s[0]\n"
"mul v11.4s, v11.4s, v16.4s\n"
@@ -312,16 +312,16 @@ void a64_hybrid_s8qa_mmla_4x16 (
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v16.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v16.4s }, [x20]\n"
"add v23.4s, v23.4s, v24.4s\n"
"add v17.4s, v17.4s, v22.4s\n"
- "add v18.4s, v18.4s, v21.4s\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x10, x10, #0x40\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v21.4s\n"
"add v19.4s, v19.4s, v20.4s\n"
"sqrdmulh v23.4s, v23.4s, v16.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v16.4s\n"
"sqrdmulh v18.4s, v18.4s, v16.4s\n"
"sqrdmulh v19.4s, v19.4s, v16.4s\n"
@@ -339,21 +339,21 @@ void a64_hybrid_s8qa_mmla_4x16 (
"sqadd v18.4s, v18.4s, v20.4s\n"
"sqadd v19.4s, v19.4s, v16.4s\n"
"22:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v21.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
- "add v23.4s, v23.4s, v21.4s\n"
- "add v17.4s, v17.4s, v21.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
+ "add v23.4s, v23.4s, v21.4s\n"
+ "add v17.4s, v17.4s, v21.4s\n"
"add v18.4s, v18.4s, v21.4s\n"
"add v19.4s, v19.4s, v21.4s\n"
- "cmp x9, #0x10\n"
"smin v23.4s, v23.4s, v20.4s\n"
"smin v17.4s, v17.4s, v20.4s\n"
"smin v18.4s, v18.4s, v20.4s\n"
@@ -426,7 +426,7 @@ void a64_hybrid_s8qa_mmla_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"34:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -440,8 +440,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mov x26, #0x0\n"
"36:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 37f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -471,11 +471,13 @@ void a64_hybrid_s8qa_mmla_4x16 (
"blt 41f\n"
"39:" // Height 2: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q24, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
"ldr q30, [x28, #0x90]\n"
".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
@@ -491,11 +493,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e98a430 // smmla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
".inst 0x4e9ea434 // smmla v20.4s, v1.16b, v30.16b\n"
- "add x24, x24, #0x10\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e9da431 // smmla v17.4s, v1.16b, v29.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e9ca435 // smmla v21.4s, v1.16b, v28.16b\n"
- "add x28, x28, #0x100\n"
".inst 0x4e9ba432 // smmla v18.4s, v1.16b, v27.16b\n"
".inst 0x4e9aa436 // smmla v22.4s, v1.16b, v26.16b\n"
".inst 0x4e99a433 // smmla v19.4s, v1.16b, v25.16b\n"
@@ -507,9 +507,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q2, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q5, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
+ "cmp x25, #0x20\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
@@ -520,11 +520,14 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bge 39b\n"
"41:" // Height 2: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"ldr q24, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
"ldr q30, [x28, #0x90]\n"
".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
@@ -539,14 +542,11 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q25, [x28, #0xe0]\n"
".inst 0x4e98a430 // smmla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
".inst 0x4e9ea434 // smmla v20.4s, v1.16b, v30.16b\n"
+ "add x28, x28, #0x100\n"
".inst 0x4e9da431 // smmla v17.4s, v1.16b, v29.16b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
".inst 0x4e9ca435 // smmla v21.4s, v1.16b, v28.16b\n"
".inst 0x4e9ba432 // smmla v18.4s, v1.16b, v27.16b\n"
- "add x28, x28, #0x100\n"
".inst 0x4e9aa436 // smmla v22.4s, v1.16b, v26.16b\n"
".inst 0x4e99a433 // smmla v19.4s, v1.16b, v25.16b\n"
".inst 0x4e98a437 // smmla v23.4s, v1.16b, v24.16b\n"
@@ -568,24 +568,24 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"45:" // Height 2: Multiply loop: unique 7: skip row sum
"ldr q24, [x28, #0x0]\n"
- "ldr q26, [x28, #0x10]\n"
- ".inst 0x4e98a410 // smmla v16.4s, v0.16b, v24.16b\n"
+ "ldr q30, [x28, #0x10]\n"
"sub x25, x25, #0x8\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
"cmp x25, #0x8\n"
- ".inst 0x4e9aa414 // smmla v20.4s, v0.16b, v26.16b\n"
"ldr q27, [x28, #0x40]\n"
"ldr q26, [x28, #0x50]\n"
- ".inst 0x4e99a411 // smmla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a415 // smmla v21.4s, v0.16b, v24.16b\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98a410 // smmla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9ea414 // smmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9da411 // smmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9ca415 // smmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e9ba412 // smmla v18.4s, v0.16b, v27.16b\n"
".inst 0x4e9aa416 // smmla v22.4s, v0.16b, v26.16b\n"
".inst 0x4e99a413 // smmla v19.4s, v0.16b, v25.16b\n"
".inst 0x4e98a417 // smmla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"bge 44b\n"
"46:" // Height 2: Multiply loop: Skip odd blocks
"cbz x25, 52f\n"
@@ -620,23 +620,23 @@ void a64_hybrid_s8qa_mmla_4x16 (
"tbnz %x[flags], #31, 51f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
"51:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q25, [x28, #0x0]\n"
- "ldr q24, [x28, #0x10]\n"
- ".inst 0x4e99a410 // smmla v16.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a414 // smmla v20.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
- ".inst 0x4e99a411 // smmla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a415 // smmla v21.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x40]\n"
- "ldr q24, [x28, #0x50]\n"
- ".inst 0x4e99a412 // smmla v18.4s, v0.16b, v25.16b\n"
- ".inst 0x4e98a416 // smmla v22.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98a410 // smmla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9ea414 // smmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9da411 // smmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9ca415 // smmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e9ba412 // smmla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x4e9aa416 // smmla v22.4s, v0.16b, v26.16b\n"
".inst 0x4e99a413 // smmla v19.4s, v0.16b, v25.16b\n"
".inst 0x4e98a417 // smmla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"52:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -644,21 +644,21 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bne 36b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v24.2d, v16.2d, v20.2d\n"
- "add x23, x27, x20\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"mov v23.16b, v24.16b\n"
"tbnz %x[flags], #31, 53f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v24.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v24.4s }, [x20]\n"
"neg v24.4s, v24.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
@@ -676,10 +676,10 @@ void a64_hybrid_s8qa_mmla_4x16 (
"add v16.4s, v16.4s, v12.4s\n"
"add v17.4s, v17.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v24.4s }, [x20]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v28.4s\n"
"add v20.4s, v20.4s, v27.4s\n"
@@ -701,45 +701,45 @@ void a64_hybrid_s8qa_mmla_4x16 (
"sqrdmulh v19.4s, v19.4s, v24.4s\n"
"tbz %x[flags], #5, 54f\n"
"and v24.16b, v23.16b, v0.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v24.4s\n"
"and v30.16b, v20.16b, v0.16b\n"
"and v29.16b, v21.16b, v0.16b\n"
"and v28.16b, v22.16b, v0.16b\n"
"and v27.16b, v16.16b, v0.16b\n"
"and v26.16b, v17.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"and v25.16b, v18.16b, v0.16b\n"
- "and v24.16b, v19.16b, v0.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v24.4s\n"
+ "and v24.16b, v19.16b, v0.16b\n"
"sshr v26.4s, v26.4s, #0x1f\n"
"sshr v25.4s, v25.4s, #0x1f\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v30.4s\n"
"sqadd v21.4s, v21.4s, v29.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v28.4s\n"
"sqadd v16.4s, v16.4s, v27.4s\n"
"sqadd v17.4s, v17.4s, v26.4s\n"
"sqadd v18.4s, v18.4s, v25.4s\n"
"sqadd v19.4s, v19.4s, v24.4s\n"
"54:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v0.4s\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v23.4s, v23.4s, v26.4s\n"
"add v20.4s, v20.4s, v26.4s\n"
"add v21.4s, v21.4s, v26.4s\n"
@@ -773,68 +773,68 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bge 63f\n"
"tbz x9, #3, 58f\n"
"str d23, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d16, [x24], #0x8\n"
"tbz x9, #2, 56f\n"
"st1 { v23.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
"tbz x9, #1, 55f\n"
"st1 { v23.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x24]\n"
"b 62f\n"
"55:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 62f\n"
"st1 { v23.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x24]\n"
"b 62f\n"
"56:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 57f\n"
"st1 { v23.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x24]\n"
"b 62f\n"
"57:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 62f\n"
"st1 { v23.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x24]\n"
"b 62f\n"
"58:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 60f\n"
"str s23, [x27], #0x4\n"
- "str s16, [x23], #0x4\n"
+ "str s16, [x24], #0x4\n"
"tbz x9, #1, 59f\n"
"st1 { v23.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x24]\n"
"b 62f\n"
"59:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 62f\n"
"st1 { v23.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x24]\n"
"b 62f\n"
"60:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 61f\n"
"str h23, [x27], #0x2\n"
- "str h16, [x23], #0x2\n"
+ "str h16, [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x24]\n"
"b 62f\n"
"61:" // Height 2: Partial direct writeback: partial_1_0
"str b23, [x27, #0x0]\n"
- "str b16, [x23, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
"62:" // Height 2: Partial direct writeback: Done
"b 64f\n"
"63:" // Height 2: Full writeback
"str q23, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q16, [x23, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
"64:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 34b\n"
@@ -848,7 +848,7 @@ void a64_hybrid_s8qa_mmla_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"66:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -870,8 +870,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mov x26, #0x0\n"
"68:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 69f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -905,35 +905,35 @@ void a64_hybrid_s8qa_mmla_4x16 (
"71:" // Height 3: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
- "ldr q14, [x28, #0x70]\n"
- ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q5, [x28, #0x60]\n"
+ "ldr q14, [x28, #0x60]\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
"ldr q4, [x28, #0x80]\n"
- ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- ".inst 0x4e85a413 // smmla v19.4s, v0.16b, v5.16b\n"
- ".inst 0x4e85a45b // smmla v27.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e8ea413 // smmla v19.4s, v0.16b, v14.16b\n"
+ ".inst 0x4e8ea45b // smmla v27.4s, v2.16b, v14.16b\n"
"ldr q6, [x28, #0xd0]\n"
- ".inst 0x4e8ea417 // smmla v23.4s, v0.16b, v14.16b\n"
- ".inst 0x4e8ea45f // smmla v31.4s, v2.16b, v14.16b\n"
+ ".inst 0x4e85a417 // smmla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85a45f // smmla v31.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0xe0]\n"
".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
".inst 0x4e84a478 // smmla v24.4s, v3.16b, v4.16b\n"
@@ -962,9 +962,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q2, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q3, [x22, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
@@ -977,36 +977,36 @@ void a64_hybrid_s8qa_mmla_4x16 (
"73:" // Height 3: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
- "ldr q14, [x28, #0x70]\n"
- ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q5, [x28, #0x60]\n"
+ "ldr q14, [x28, #0x60]\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
"ldr q4, [x28, #0x80]\n"
- ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "sub x25, x25, #0x10\n"
- ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x23, x23, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x22, x22, #0x10\n"
- ".inst 0x4e85a413 // smmla v19.4s, v0.16b, v5.16b\n"
- ".inst 0x4e85a45b // smmla v27.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e8ea413 // smmla v19.4s, v0.16b, v14.16b\n"
+ ".inst 0x4e8ea45b // smmla v27.4s, v2.16b, v14.16b\n"
"ldr q6, [x28, #0xd0]\n"
- ".inst 0x4e8ea417 // smmla v23.4s, v0.16b, v14.16b\n"
- ".inst 0x4e8ea45f // smmla v31.4s, v2.16b, v14.16b\n"
+ ".inst 0x4e85a417 // smmla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85a45f // smmla v31.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0xe0]\n"
".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
".inst 0x4e84a478 // smmla v24.4s, v3.16b, v4.16b\n"
@@ -1040,34 +1040,34 @@ void a64_hybrid_s8qa_mmla_4x16 (
"cmp x25, #0x8\n"
"blt 78f\n"
"76:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
+ "ldr d3, [x24], #0x8\n"
"ldr d0, [x23], #0x8\n"
- "trn1 v0.2d, v1.2d, v0.2d\n"
"ldr d1, [x22], #0x8\n"
+ "trn1 v0.2d, v3.2d, v0.2d\n"
"trn1 v2.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 77f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 11: skip row sum
- "ldr q3, [x28, #0x0]\n"
- "ldr q1, [x28, #0x10]\n"
- ".inst 0x4e83a410 // smmla v16.4s, v0.16b, v3.16b\n"
- ".inst 0x4e83a458 // smmla v24.4s, v2.16b, v3.16b\n"
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
"ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
"cmp x25, #0x8\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x4e81a414 // smmla v20.4s, v0.16b, v1.16b\n"
- ".inst 0x4e81a45c // smmla v28.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81a410 // smmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88a414 // smmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88a45c // smmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45d // smmla v29.4s, v2.16b, v6.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e85a45a // smmla v26.4s, v2.16b, v5.16b\n"
".inst 0x4e84a416 // smmla v22.4s, v0.16b, v4.16b\n"
@@ -1120,24 +1120,24 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"83:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q1, [x28, #0x0]\n"
- "ldr q3, [x28, #0x10]\n"
- ".inst 0x4e81a410 // smmla v16.4s, v0.16b, v1.16b\n"
- ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
- "ldr q1, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- ".inst 0x4e83a414 // smmla v20.4s, v0.16b, v3.16b\n"
- ".inst 0x4e83a45c // smmla v28.4s, v2.16b, v3.16b\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n"
- ".inst 0x4e81a459 // smmla v25.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81a410 // smmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88a414 // smmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88a45c // smmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45d // smmla v29.4s, v2.16b, v6.16b\n"
".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e85a45a // smmla v26.4s, v2.16b, v5.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x4e84a416 // smmla v22.4s, v0.16b, v4.16b\n"
".inst 0x4e84a45e // smmla v30.4s, v2.16b, v4.16b\n"
".inst 0x4e83a413 // smmla v19.4s, v0.16b, v3.16b\n"
@@ -1151,18 +1151,18 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bne 68b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v0.2d, v16.2d, v20.2d\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "uzp1 v20.2d, v17.2d, v21.2d\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v24.2d, v24.2d, v28.2d\n"
"uzp1 v25.2d, v25.2d, v29.2d\n"
"uzp1 v26.2d, v26.2d, v30.2d\n"
@@ -1170,9 +1170,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mov v31.16b, v0.16b\n"
"tbnz %x[flags], #31, 85f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v23.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
+ "ld1r { v23.4s }, [x20]\n"
"neg v23.4s, v23.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
@@ -1192,10 +1192,10 @@ void a64_hybrid_s8qa_mmla_4x16 (
"add v16.4s, v16.4s, v12.4s\n"
"add v17.4s, v17.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v23.4s }, [x20]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
@@ -1233,18 +1233,18 @@ void a64_hybrid_s8qa_mmla_4x16 (
"and v29.16b, v21.16b, v0.16b\n"
"and v28.16b, v22.16b, v0.16b\n"
"and v23.16b, v16.16b, v0.16b\n"
+ "and v3.16b, v17.16b, v0.16b\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v23.4s, v23.4s, #0x1f\n"
+ "and v2.16b, v18.16b, v0.16b\n"
"sqadd v31.4s, v31.4s, v1.4s\n"
"sqadd v20.4s, v20.4s, v30.4s\n"
"sqadd v21.4s, v21.4s, v29.4s\n"
"sqadd v22.4s, v22.4s, v28.4s\n"
"sqadd v16.4s, v16.4s, v23.4s\n"
- "and v3.16b, v17.16b, v0.16b\n"
- "and v2.16b, v18.16b, v0.16b\n"
"and v1.16b, v19.16b, v0.16b\n"
"and v30.16b, v24.16b, v0.16b\n"
"and v29.16b, v25.16b, v0.16b\n"
@@ -1265,21 +1265,21 @@ void a64_hybrid_s8qa_mmla_4x16 (
"sqadd v26.4s, v26.4s, v28.4s\n"
"sqadd v27.4s, v27.4s, v23.4s\n"
"86:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v29.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v0.4s\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v29.4s }, [x21]\n"
"ld1r { v28.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v23.4s }, [x20]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v24.4s, v24.4s, v0.4s\n"
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
@@ -1332,102 +1332,103 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bge 95f\n"
"tbz x9, #3, 90f\n"
"str d31, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x9, #2, 88f\n"
"st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
"tbz x9, #1, 87f\n"
"st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 94f\n"
"87:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 94f\n"
"st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 94f\n"
"88:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 89f\n"
"st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 94f\n"
"89:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 94f\n"
"st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 94f\n"
"90:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 92f\n"
"str s31, [x27], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
"tbz x9, #1, 91f\n"
"st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 94f\n"
"91:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 94f\n"
"st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 94f\n"
"92:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 93f\n"
"str h31, [x27], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 94f\n"
"93:" // Height 3: Partial direct writeback: partial_1_0
"str b31, [x27, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"94:" // Height 3: Partial direct writeback: Done
"b 96f\n"
"95:" // Height 3: Full writeback
"str q31, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q16, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"96:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 66b\n"
"b 130f\n"
"97:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "movi v15.16b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"98:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1449,8 +1450,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mov x26, #0x0\n"
"100:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 101f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1488,28 +1489,28 @@ void a64_hybrid_s8qa_mmla_4x16 (
"103:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
"ldr q4, [x28, #0x60]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x22, x22, #0x10\n"
".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x21, x21, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
@@ -1546,9 +1547,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q2, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q3, [x22, #0x0]\n"
"ldr q4, [x21, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q5, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
@@ -1563,32 +1564,32 @@ void a64_hybrid_s8qa_mmla_4x16 (
"105:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
"sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
"ldr q4, [x28, #0x60]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e85a410 // smmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
".inst 0x4e85a458 // smmla v24.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x4e86a414 // smmla v20.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45c // smmla v28.4s, v2.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x4e88a415 // smmla v21.4s, v0.16b, v8.16b\n"
".inst 0x4e88a45d // smmla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
".inst 0x4e89a412 // smmla v18.4s, v0.16b, v9.16b\n"
".inst 0x4e89a45a // smmla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x4e8aa416 // smmla v22.4s, v0.16b, v10.16b\n"
".inst 0x4e8aa45e // smmla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x21, x21, #0x10\n"
".inst 0x4e84a413 // smmla v19.4s, v0.16b, v4.16b\n"
".inst 0x4e84a45b // smmla v27.4s, v2.16b, v4.16b\n"
"ldr q4, [x28, #0xd0]\n"
@@ -1628,35 +1629,35 @@ void a64_hybrid_s8qa_mmla_4x16 (
"cmp x25, #0x8\n"
"blt 110f\n"
"108:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
+ "ldr d3, [x24], #0x8\n"
"ldr d0, [x23], #0x8\n"
- "trn1 v0.2d, v1.2d, v0.2d\n"
"ldr d2, [x22], #0x8\n"
"ldr d1, [x21], #0x8\n"
+ "trn1 v0.2d, v3.2d, v0.2d\n"
"trn1 v2.2d, v2.2d, v1.2d\n"
"tbnz %x[flags], #31, 109f\n"
".inst 0x4e8f940b // sdot v11.4s, v0.16b, v15.16b\n"
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"109:" // Height 4: Multiply loop: unique 15: skip row sum
- "ldr q3, [x28, #0x0]\n"
- "ldr q1, [x28, #0x10]\n"
- ".inst 0x4e83a410 // smmla v16.4s, v0.16b, v3.16b\n"
- ".inst 0x4e83a458 // smmla v24.4s, v2.16b, v3.16b\n"
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
"ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
"cmp x25, #0x8\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x4e81a414 // smmla v20.4s, v0.16b, v1.16b\n"
- ".inst 0x4e81a45c // smmla v28.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81a410 // smmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88a414 // smmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88a45c // smmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45d // smmla v29.4s, v2.16b, v6.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e85a45a // smmla v26.4s, v2.16b, v5.16b\n"
".inst 0x4e84a416 // smmla v22.4s, v0.16b, v4.16b\n"
@@ -1716,24 +1717,24 @@ void a64_hybrid_s8qa_mmla_4x16 (
".inst 0x4e8f944d // sdot v13.4s, v2.16b, v15.16b\n"
"115:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q1, [x28, #0x0]\n"
- "ldr q3, [x28, #0x10]\n"
- ".inst 0x4e81a410 // smmla v16.4s, v0.16b, v1.16b\n"
- ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
- "ldr q1, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- ".inst 0x4e83a414 // smmla v20.4s, v0.16b, v3.16b\n"
- ".inst 0x4e83a45c // smmla v28.4s, v2.16b, v3.16b\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n"
- ".inst 0x4e81a459 // smmla v25.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81a410 // smmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88a414 // smmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88a45c // smmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e87a411 // smmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a459 // smmla v25.4s, v2.16b, v7.16b\n"
".inst 0x4e86a415 // smmla v21.4s, v0.16b, v6.16b\n"
".inst 0x4e86a45d // smmla v29.4s, v2.16b, v6.16b\n"
".inst 0x4e85a412 // smmla v18.4s, v0.16b, v5.16b\n"
".inst 0x4e85a45a // smmla v26.4s, v2.16b, v5.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x4e84a416 // smmla v22.4s, v0.16b, v4.16b\n"
".inst 0x4e84a45e // smmla v30.4s, v2.16b, v4.16b\n"
".inst 0x4e83a413 // smmla v19.4s, v0.16b, v3.16b\n"
@@ -1747,22 +1748,22 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bne 100b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v0.2d, v16.2d, v20.2d\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "uzp1 v20.2d, v17.2d, v21.2d\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
+ "add x22, x23, x20\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
"uzp2 v24.2d, v24.2d, v28.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v28.2d, v25.2d, v29.2d\n"
"uzp2 v25.2d, v25.2d, v29.2d\n"
"uzp1 v29.2d, v26.2d, v30.2d\n"
@@ -1772,9 +1773,9 @@ void a64_hybrid_s8qa_mmla_4x16 (
"mov v31.16b, v0.16b\n"
"tbnz %x[flags], #31, 117f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v0.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
"neg v0.4s, v0.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
@@ -1782,8 +1783,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
"dup v13.4s, v13.s[0]\n"
"mul v11.4s, v11.4s, v0.4s\n"
"mul v12.4s, v12.4s, v0.4s\n"
- "mul v13.4s, v13.4s, v0.4s\n"
"mul v14.4s, v14.4s, v0.4s\n"
+ "mul v13.4s, v13.4s, v0.4s\n"
"117:" // Height 4: skip row sum fixup
"ldr q0, [x10, #0x0]\n"
"ldr q4, [x10, #0x10]\n"
@@ -1796,10 +1797,10 @@ void a64_hybrid_s8qa_mmla_4x16 (
"add v16.4s, v16.4s, v12.4s\n"
"add v17.4s, v17.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v1.4s }, [x20]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v13.4s\n"
"add v28.4s, v28.4s, v13.4s\n"
@@ -1846,32 +1847,32 @@ void a64_hybrid_s8qa_mmla_4x16 (
"tbz %x[flags], #5, 118f\n"
"and v2.16b, v31.16b, v0.16b\n"
"and v1.16b, v20.16b, v0.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v31.4s, v31.4s, v2.4s\n"
- "sqadd v20.4s, v20.4s, v1.4s\n"
"and v7.16b, v21.16b, v0.16b\n"
"and v6.16b, v22.16b, v0.16b\n"
"and v5.16b, v16.16b, v0.16b\n"
"and v4.16b, v17.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"and v3.16b, v18.16b, v0.16b\n"
- "and v2.16b, v19.16b, v0.16b\n"
- "and v1.16b, v23.16b, v0.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v2.4s\n"
+ "sqadd v20.4s, v20.4s, v1.4s\n"
+ "and v2.16b, v19.16b, v0.16b\n"
+ "and v1.16b, v23.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v21.4s, v21.4s, v7.4s\n"
"sqadd v22.4s, v22.4s, v6.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v5.4s\n"
"sqadd v17.4s, v17.4s, v4.4s\n"
"sqadd v18.4s, v18.4s, v3.4s\n"
+ "and v7.16b, v28.16b, v0.16b\n"
"sqadd v19.4s, v19.4s, v2.4s\n"
"sqadd v23.4s, v23.4s, v1.4s\n"
- "and v7.16b, v28.16b, v0.16b\n"
"and v6.16b, v29.16b, v0.16b\n"
"and v5.16b, v30.16b, v0.16b\n"
"and v4.16b, v24.16b, v0.16b\n"
@@ -1893,21 +1894,21 @@ void a64_hybrid_s8qa_mmla_4x16 (
"sqadd v26.4s, v26.4s, v2.4s\n"
"sqadd v27.4s, v27.4s, v1.4s\n"
"118:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v0.4s\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v1.4s }, [x20]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v28.4s, v28.4s, v0.4s\n"
"srshl v29.4s, v29.4s, v0.4s\n"
@@ -1979,100 +1980,100 @@ void a64_hybrid_s8qa_mmla_4x16 (
"bge 127f\n"
"tbz x9, #3, 122f\n"
"str d31, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x9, #2, 120f\n"
"st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v23.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v24.s }[2], [x22], #0x4\n"
"tbz x9, #1, 119f\n"
"st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v23.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v23.h }[6], [x23], #0x2\n"
+ "st1 { v24.h }[6], [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v23.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v23.b }[14], [x23]\n"
+ "st1 { v24.b }[14], [x22]\n"
"b 126f\n"
"119:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 126f\n"
"st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v23.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v23.b }[12], [x23]\n"
+ "st1 { v24.b }[12], [x22]\n"
"b 126f\n"
"120:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 121f\n"
"st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v23.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v23.h }[4], [x23], #0x2\n"
+ "st1 { v24.h }[4], [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v23.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v23.b }[10], [x23]\n"
+ "st1 { v24.b }[10], [x22]\n"
"b 126f\n"
"121:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 126f\n"
"st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v23.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v23.b }[8], [x23]\n"
+ "st1 { v24.b }[8], [x22]\n"
"b 126f\n"
"122:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 124f\n"
"str s31, [x27], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s23, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s23, [x23], #0x4\n"
+ "str s24, [x22], #0x4\n"
"tbz x9, #1, 123f\n"
"st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v23.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v23.h }[2], [x23], #0x2\n"
+ "st1 { v24.h }[2], [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v23.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v23.b }[6], [x23]\n"
+ "st1 { v24.b }[6], [x22]\n"
"b 126f\n"
"123:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 126f\n"
"st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v23.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v23.b }[4], [x23]\n"
+ "st1 { v24.b }[4], [x22]\n"
"b 126f\n"
"124:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 125f\n"
"str h31, [x27], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h23, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h23, [x23], #0x2\n"
+ "str h24, [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v23.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v23.b }[2], [x23]\n"
+ "st1 { v24.b }[2], [x22]\n"
"b 126f\n"
"125:" // Height 4: Partial direct writeback: partial_1_0
"str b31, [x27, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b23, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b23, [x23, #0x0]\n"
+ "str b24, [x22, #0x0]\n"
"126:" // Height 4: Partial direct writeback: Done
"b 128f\n"
"127:" // Height 4: Full writeback
"str q31, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q16, [x23, #0x0]\n"
- "str q23, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q24, [x22, #0x0]\n"
"128:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 98b\n"
@@ -2088,8 +2089,8 @@ void a64_hybrid_s8qa_mmla_4x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"130:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp
index 2b7531d1e2..51057a6ffc 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return false;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp
index 38a57b0741..559b492871 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,18 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -97,9 +97,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x6, %x[col_bias]\n"
"ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -109,8 +109,8 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w13, [x20, x14, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -126,118 +126,118 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"blt 9f\n"
"ldr q0, [x12, #0x0]\n"
"cmp x13, #0x20\n"
- "ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
+ "ldr q6, [x16, #0x0]\n"
+ "ldr q7, [x16, #0x10]\n"
"blt 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr d17, [x15, #0x20]\n"
- "ldr x20, [x15, #0x28]\n"
+ "ldr d17, [x16, #0x20]\n"
+ "ldr x21, [x16, #0x28]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr d16, [x15, #0x30]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr d16, [x16, #0x30]\n"
+ "add x12, x12, #0x10\n"
+ "ldr x20, [x16, #0x38]\n"
+ "sub x13, x13, #0x10\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x16, #0x48]\n"
+ "ldr x22, [x12, #0x8]\n"
+ "cmp x13, #0x20\n"
"mov v16.d[1], x20\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr d17, [x15, #0x40]\n"
- "ldr x20, [x15, #0x48]\n"
+ "ldr d17, [x16, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr d16, [x15, #0x50]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr d16, [x16, #0x50]\n"
+ "ldr x20, [x16, #0x58]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x16, #0x68]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"mov v16.d[1], x20\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr d17, [x15, #0x60]\n"
- "ldr x20, [x15, #0x68]\n"
+ "ldr d17, [x16, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr d16, [x15, #0x70]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr d16, [x16, #0x70]\n"
+ "ldr x20, [x16, #0x78]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x16, #0x88]\n"
"mov v16.d[1], x20\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr d17, [x15, #0x80]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr d17, [x16, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr d16, [x15, #0x90]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x98]\n"
+ "ldr d16, [x16, #0x90]\n"
+ "ldr x20, [x16, #0x98]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x16, #0xa8]\n"
"mov v16.d[1], x20\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr d17, [x15, #0xa0]\n"
- "ldr x20, [x15, #0xa8]\n"
+ "ldr d17, [x16, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr d16, [x15, #0xb0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr d16, [x16, #0xb0]\n"
+ "ldr x20, [x16, #0xb8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x16, #0xc8]\n"
"mov v16.d[1], x20\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr d17, [x15, #0xc0]\n"
- "ldr x20, [x15, #0xc8]\n"
+ "ldr d17, [x16, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr d16, [x15, #0xd0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0xd8]\n"
+ "ldr d16, [x16, #0xd0]\n"
+ "ldr x20, [x16, #0xd8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x16, #0xe8]\n"
"mov v16.d[1], x20\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr d17, [x15, #0xe0]\n"
- "ldr x20, [x15, #0xe8]\n"
+ "ldr d17, [x16, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr d16, [x15, #0xf0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr d16, [x16, #0xf0]\n"
+ "ldr x20, [x16, #0xf8]\n"
+ "add x16, x16, #0x100\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x16, #0x8]\n"
"mov v16.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "add x15, x15, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
- "ldr d6, [x15, #0x0]\n"
- "ldr x20, [x15, #0x8]\n"
+ "ldr d6, [x16, #0x0]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
- "sub x13, x13, #0x10\n"
- "ldr d7, [x15, #0x10]\n"
- "cmp x13, #0x20\n"
- "ldr x21, [x12, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x15, #0x18]\n"
- "mov v0.d[1], x21\n"
+ "ldr d7, [x16, #0x10]\n"
+ "ldr x20, [x16, #0x18]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x12, #0x80]\n"
"bge 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q17, [x15, #0x20]\n"
+ "ldr q17, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q16, [x15, #0x30]\n"
+ "ldr q16, [x16, #0x30]\n"
+ "add x12, x12, #0x10\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr q17, [x15, #0x40]\n"
+ "ldr q17, [x16, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x15, #0x50]\n"
+ "ldr q16, [x16, #0x50]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x15, #0x60]\n"
+ "ldr q17, [x16, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x15, #0x70]\n"
+ "ldr q16, [x16, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x15, #0x80]\n"
+ "ldr q17, [x16, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x15, #0x90]\n"
+ "ldr q16, [x16, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x15, #0xa0]\n"
+ "ldr q17, [x16, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x15, #0xb0]\n"
+ "ldr q16, [x16, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x15, #0xc0]\n"
+ "ldr q17, [x16, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x15, #0xd0]\n"
+ "ldr q16, [x16, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr q17, [x15, #0xe0]\n"
+ "ldr q17, [x16, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr q16, [x15, #0xf0]\n"
- "add x12, x12, #0x10\n"
- "sub x13, x13, #0x10\n"
+ "ldr q16, [x16, #0xf0]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
- "add x15, x15, #0x100\n"
"9:" // Height 1: Multiply loop: Main loop skip
"cbz x13, 14f\n"
"cmp x13, #0x4\n"
@@ -245,16 +245,16 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"10:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x12], #0x4\n"
"sub x13, x13, #0x4\n"
- "ldr q16, [x15, #0x0]\n"
- ".inst 0x4f92e208 // sdot v8.4s, v16.16b, v18.4b[0]\n"
- "ldr q16, [x15, #0x10]\n"
- ".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n"
- "ldr q17, [x15, #0x20]\n"
+ "ldr q17, [x16, #0x0]\n"
"cmp x13, #0x4\n"
- "ldr q16, [x15, #0x30]\n"
+ "ldr q16, [x16, #0x10]\n"
+ ".inst 0x4f92e228 // sdot v8.4s, v17.16b, v18.4b[0]\n"
+ "ldr q17, [x16, #0x20]\n"
+ ".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n"
+ "ldr q16, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f92e22a // sdot v10.4s, v17.16b, v18.4b[0]\n"
".inst 0x4f92e20b // sdot v11.4s, v16.16b, v18.4b[0]\n"
- "add x15, x15, #0x40\n"
"bge 10b\n"
"11:" // Height 1: Multiply loop: Skip odd blocks
"cbz x13, 14f\n"
@@ -266,30 +266,30 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"12:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x12, #0x0]\n"
"13:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q16, [x15, #0x0]\n"
- ".inst 0x4f80e208 // sdot v8.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x15, #0x10]\n"
+ "ldr q17, [x16, #0x0]\n"
+ "ldr q16, [x16, #0x10]\n"
+ ".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n"
+ "ldr q17, [x16, #0x20]\n"
".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x15, #0x20]\n"
- ".inst 0x4f80e20a // sdot v10.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x15, #0x30]\n"
+ "ldr q16, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
+ ".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "add x15, x15, #0x40\n"
"14:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x14, x14, #0x1\n"
"cmp x14, x20\n"
"bne 4b\n"
- "ldr q16, [x6, #0x0]\n"
- "add v8.4s, v8.4s, v16.4s\n"
- "ldr q16, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v16.4s\n"
- "ldr q16, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v16.4s\n"
+ "ldr q19, [x6, #0x0]\n"
+ "ldr q18, [x6, #0x10]\n"
+ "ldr q17, [x6, #0x20]\n"
"ldr q16, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v16.4s\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "add v8.4s, v8.4s, v19.4s\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add v9.4s, v9.4s, v18.4s\n"
+ "add v10.4s, v10.4s, v17.4s\n"
"add x6, x6, #0x40\n"
+ "add v11.4s, v11.4s, v16.4s\n"
"tbz %x[flags], #4, 15f\n"
"ldr q0, [x8, #0x0]\n"
"ldr q4, [x7, #0x0]\n"
@@ -303,9 +303,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add x7, x7, #0x40\n"
"b 16f\n"
"15:" // Height 1: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -337,87 +337,87 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v10.4s, v10.4s, v2.4s\n"
"srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
- "add v8.4s, v8.4s, v16.4s\n"
- "add v9.4s, v9.4s, v16.4s\n"
- "add v10.4s, v10.4s, v16.4s\n"
- "add v11.4s, v11.4s, v16.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v16.4s }, [x20]\n"
- "smin v8.4s, v8.4s, v16.4s\n"
- "smin v9.4s, v9.4s, v16.4s\n"
- "smin v10.4s, v10.4s, v16.4s\n"
- "smin v11.4s, v11.4s, v16.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v18.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "cmp x17, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
+ "add v8.4s, v8.4s, v18.4s\n"
+ "add v9.4s, v9.4s, v18.4s\n"
+ "add v10.4s, v10.4s, v18.4s\n"
+ "add v11.4s, v11.4s, v18.4s\n"
+ "smin v8.4s, v8.4s, v17.4s\n"
+ "smin v9.4s, v9.4s, v17.4s\n"
+ "smin v10.4s, v10.4s, v17.4s\n"
+ "smin v11.4s, v11.4s, v17.4s\n"
"smax v8.4s, v8.4s, v16.4s\n"
"smax v9.4s, v9.4s, v16.4s\n"
"smax v10.4s, v10.4s, v16.4s\n"
"smax v11.4s, v11.4s, v16.4s\n"
"uzp1 v8.8h, v8.8h, v9.8h\n"
"uzp1 v16.8h, v10.8h, v11.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v16.16b\n"
"bge 26f\n"
- "tbz x16, #3, 21f\n"
- "str d8, [x17], #0x8\n"
- "tbz x16, #2, 19f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "tbz x16, #1, 18f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[14], [x17]\n"
+ "tbz x17, #3, 21f\n"
+ "str d8, [x15], #0x8\n"
+ "tbz x17, #2, 19f\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
+ "tbz x17, #1, 18f\n"
+ "st1 { v8.h }[6], [x15], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[14], [x15]\n"
"b 25f\n"
"18:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[12], [x17]\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[12], [x15]\n"
"b 25f\n"
"19:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x16, #1, 20f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[10], [x17]\n"
+ "tbz x17, #1, 20f\n"
+ "st1 { v8.h }[4], [x15], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[10], [x15]\n"
"b 25f\n"
"20:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[8], [x17]\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[8], [x15]\n"
"b 25f\n"
"21:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x16, #2, 23f\n"
- "str s8, [x17], #0x4\n"
- "tbz x16, #1, 22f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[6], [x17]\n"
+ "tbz x17, #2, 23f\n"
+ "str s8, [x15], #0x4\n"
+ "tbz x17, #1, 22f\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[6], [x15]\n"
"b 25f\n"
"22:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[4], [x17]\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[4], [x15]\n"
"b 25f\n"
"23:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x16, #1, 24f\n"
- "str h8, [x17], #0x2\n"
- "tbz x16, #0, 25f\n"
- "st1 { v8.b }[2], [x17]\n"
+ "tbz x17, #1, 24f\n"
+ "str h8, [x15], #0x2\n"
+ "tbz x17, #0, 25f\n"
+ "st1 { v8.b }[2], [x15]\n"
"b 25f\n"
"24:" // Height 1: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
+ "str b8, [x15, #0x0]\n"
"25:" // Height 1: Partial direct writeback: Done
"b 27f\n"
"26:" // Height 1: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
+ "str q8, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
"27:" // Height 1: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 2b\n"
"b 164f\n"
"28:" // Height 2
"mov x6, %x[col_bias]\n"
"ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"29:" // Height 2: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -431,8 +431,8 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"31:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w13, [x20, x14, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 32f\n"
"ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -452,154 +452,154 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr q0, [x12, #0x0]\n"
"cmp x13, #0x20\n"
"ldr q1, [x11, #0x0]\n"
- "ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
+ "ldr q6, [x16, #0x0]\n"
+ "ldr q7, [x16, #0x10]\n"
"blt 35f\n"
"34:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x15, #0x28]\n"
+ "ldr x20, [x16, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr d17, [x15, #0x20]\n"
+ "ldr d17, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr x21, [x16, #0x38]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr d16, [x15, #0x30]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x16, #0x30]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x16, #0x48]\n"
+ "add x12, x12, #0x10\n"
+ "add x11, x11, #0x10\n"
+ "mov v16.d[1], x21\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "mov v16.d[1], x20\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr d17, [x15, #0x40]\n"
+ "ldr d17, [x16, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr x20, [x15, #0x48]\n"
+ "ldr x21, [x16, #0x58]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr d16, [x15, #0x50]\n"
+ "ldr d16, [x16, #0x50]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x58]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x16, #0x68]\n"
+ "ldr x23, [x12, #0x8]\n"
+ "sub x13, x13, #0x10\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr x21, [x15, #0x68]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr d17, [x15, #0x60]\n"
+ "ldr d17, [x16, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr x21, [x16, #0x78]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr d16, [x15, #0x70]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x16, #0x70]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x16, #0x88]\n"
+ "ldr x22, [x11, #0x8]\n"
+ "cmp x13, #0x20\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "mov v16.d[1], x20\n"
".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr d17, [x15, #0x80]\n"
+ "ldr d17, [x16, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr x20, [x15, #0x88]\n"
+ "ldr x21, [x16, #0x98]\n"
".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr d16, [x15, #0x90]\n"
+ "ldr d16, [x16, #0x90]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x15, #0x98]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x16, #0xa8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr x21, [x15, #0xa8]\n"
".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr d17, [x15, #0xa0]\n"
+ "ldr d17, [x16, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr x21, [x16, #0xb8]\n"
".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr d16, [x15, #0xb0]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x16, #0xb0]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x16, #0xc8]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "mov v16.d[1], x20\n"
".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr d17, [x15, #0xc0]\n"
+ "ldr d17, [x16, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr x20, [x15, #0xc8]\n"
+ "ldr x21, [x16, #0xd8]\n"
".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr d16, [x15, #0xd0]\n"
+ "ldr d16, [x16, #0xd0]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x15, #0xd8]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x16, #0xe8]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr x21, [x15, #0xe8]\n"
".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr d17, [x15, #0xe0]\n"
+ "ldr d17, [x16, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x21, [x16, #0xf8]\n"
".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr d16, [x15, #0xf0]\n"
- "mov v17.d[1], x21\n"
- "add x12, x12, #0x10\n"
- "mov v16.d[1], x20\n"
- "add x11, x11, #0x10\n"
- "add x15, x15, #0x100\n"
+ "ldr d16, [x16, #0xf0]\n"
+ "mov v17.d[1], x20\n"
+ "add x16, x16, #0x100\n"
+ "ldr x20, [x16, #0x8]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n"
- "ldr d6, [x15, #0x0]\n"
- "ldr x21, [x15, #0x8]\n"
+ "ldr d6, [x16, #0x0]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1ea0f // sdot v15.4s, v16.16b, v1.4b[3]\n"
"ldr d1, [x11, #0x0]\n"
- "sub x13, x13, #0x10\n"
- "ldr d7, [x15, #0x10]\n"
- "cmp x13, #0x20\n"
- "ldr x20, [x12, #0x8]\n"
- "mov v6.d[1], x21\n"
- "ldr x21, [x11, #0x8]\n"
- "mov v0.d[1], x20\n"
- "ldr x20, [x15, #0x18]\n"
- "mov v1.d[1], x21\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr d7, [x16, #0x10]\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x16, #0x18]\n"
+ "mov v0.d[1], x23\n"
+ "mov v1.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x11, #0x80]\n"
"bge 34b\n"
"35:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q17, [x15, #0x20]\n"
+ "ldr q17, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q16, [x15, #0x30]\n"
+ "ldr q16, [x16, #0x30]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
"sub x13, x13, #0x10\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x15, #0x40]\n"
+ "ldr q17, [x16, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x15, #0x50]\n"
+ "ldr q16, [x16, #0x50]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x15, #0x60]\n"
+ "ldr q17, [x16, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x15, #0x70]\n"
+ "ldr q16, [x16, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x15, #0x80]\n"
+ "ldr q17, [x16, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x15, #0x90]\n"
+ "ldr q16, [x16, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x15, #0xa0]\n"
+ "ldr q17, [x16, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x15, #0xb0]\n"
+ "ldr q16, [x16, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x15, #0xc0]\n"
+ "ldr q17, [x16, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x15, #0xd0]\n"
+ "ldr q16, [x16, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr q17, [x15, #0xe0]\n"
+ "ldr q17, [x16, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr q16, [x15, #0xf0]\n"
+ "ldr q16, [x16, #0xf0]\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
- "add x15, x15, #0x100\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
".inst 0x4fa1ea0f // sdot v15.4s, v16.16b, v1.4b[3]\n"
@@ -612,16 +612,16 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"sub x13, x13, #0x4\n"
"ldr s18, [x11], #0x4\n"
"cmp x13, #0x4\n"
- "ldr q17, [x15, #0x0]\n"
+ "ldr q17, [x16, #0x0]\n"
+ "ldr q16, [x16, #0x10]\n"
".inst 0x4f93e228 // sdot v8.4s, v17.16b, v19.4b[0]\n"
- "ldr q16, [x15, #0x10]\n"
".inst 0x4f92e22c // sdot v12.4s, v17.16b, v18.4b[0]\n"
- "ldr q17, [x15, #0x20]\n"
+ "ldr q17, [x16, #0x20]\n"
".inst 0x4f93e209 // sdot v9.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20d // sdot v13.4s, v16.16b, v18.4b[0]\n"
- "ldr q16, [x15, #0x30]\n"
+ "ldr q16, [x16, #0x30]\n"
".inst 0x4f93e22a // sdot v10.4s, v17.16b, v19.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f92e22e // sdot v14.4s, v17.16b, v18.4b[0]\n"
".inst 0x4f93e20b // sdot v11.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20f // sdot v15.4s, v16.16b, v18.4b[0]\n"
@@ -639,16 +639,16 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr b0, [x12, #0x0]\n"
"ldr b1, [x11, #0x0]\n"
"40:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q17, [x15, #0x0]\n"
+ "ldr q17, [x16, #0x0]\n"
+ "ldr q16, [x16, #0x10]\n"
".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n"
- "ldr q16, [x15, #0x10]\n"
".inst 0x4f81e22c // sdot v12.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x15, #0x20]\n"
+ "ldr q17, [x16, #0x20]\n"
".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20d // sdot v13.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x15, #0x30]\n"
+ "ldr q16, [x16, #0x30]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
@@ -658,19 +658,19 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x14, x20\n"
"bne 31b\n"
"ldr q19, [x6, #0x0]\n"
- "add v8.4s, v8.4s, v19.4s\n"
"ldr q18, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v18.4s\n"
"ldr q17, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v17.4s\n"
"ldr q16, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v16.4s\n"
+ "add v8.4s, v8.4s, v19.4s\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x17, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "add v9.4s, v9.4s, v18.4s\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add v10.4s, v10.4s, v17.4s\n"
+ "add v11.4s, v11.4s, v16.4s\n"
"add v12.4s, v12.4s, v19.4s\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x15, x20\n"
"add v13.4s, v13.4s, v18.4s\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"add v14.4s, v14.4s, v17.4s\n"
"add v15.4s, v15.4s, v16.4s\n"
"add x6, x6, #0x40\n"
@@ -687,9 +687,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add x7, x7, #0x40\n"
"b 43f\n"
"42:" // Height 2: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -741,27 +741,28 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v14.4s, v14.4s, v2.4s\n"
"srshl v15.4s, v15.4s, v3.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
- "add v8.4s, v8.4s, v16.4s\n"
- "add v9.4s, v9.4s, v16.4s\n"
- "add v10.4s, v10.4s, v16.4s\n"
- "add v11.4s, v11.4s, v16.4s\n"
- "add v12.4s, v12.4s, v16.4s\n"
- "add v13.4s, v13.4s, v16.4s\n"
- "add v14.4s, v14.4s, v16.4s\n"
- "add v15.4s, v15.4s, v16.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v16.4s }, [x20]\n"
- "smin v8.4s, v8.4s, v16.4s\n"
- "smin v9.4s, v9.4s, v16.4s\n"
- "smin v10.4s, v10.4s, v16.4s\n"
- "smin v11.4s, v11.4s, v16.4s\n"
- "smin v12.4s, v12.4s, v16.4s\n"
- "smin v13.4s, v13.4s, v16.4s\n"
- "smin v14.4s, v14.4s, v16.4s\n"
- "smin v15.4s, v15.4s, v16.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v18.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v17.4s }, [x21]\n"
+ "cmp x17, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
+ "add v8.4s, v8.4s, v18.4s\n"
+ "add v9.4s, v9.4s, v18.4s\n"
+ "add v10.4s, v10.4s, v18.4s\n"
+ "add v11.4s, v11.4s, v18.4s\n"
+ "add v12.4s, v12.4s, v18.4s\n"
+ "add v13.4s, v13.4s, v18.4s\n"
+ "add v14.4s, v14.4s, v18.4s\n"
+ "add v15.4s, v15.4s, v18.4s\n"
+ "smin v8.4s, v8.4s, v17.4s\n"
+ "smin v9.4s, v9.4s, v17.4s\n"
+ "smin v10.4s, v10.4s, v17.4s\n"
+ "smin v11.4s, v11.4s, v17.4s\n"
+ "smin v12.4s, v12.4s, v17.4s\n"
+ "smin v13.4s, v13.4s, v17.4s\n"
+ "smin v14.4s, v14.4s, v17.4s\n"
+ "smin v15.4s, v15.4s, v17.4s\n"
"smax v8.4s, v8.4s, v16.4s\n"
"smax v9.4s, v9.4s, v16.4s\n"
"smax v10.4s, v10.4s, v16.4s\n"
@@ -774,85 +775,84 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v17.8h, v10.8h, v11.8h\n"
"uzp1 v12.8h, v12.8h, v13.8h\n"
"uzp1 v16.8h, v14.8h, v15.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v17.16b\n"
"uzp1 v12.16b, v12.16b, v16.16b\n"
"bge 53f\n"
- "tbz x16, #3, 48f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x25], #0x8\n"
- "tbz x16, #2, 46f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "tbz x16, #1, 45f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x25]\n"
+ "tbz x17, #3, 48f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "tbz x17, #2, 46f\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "tbz x17, #1, 45f\n"
+ "st1 { v8.h }[6], [x15], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[14], [x15]\n"
+ "st1 { v12.b }[14], [x26]\n"
"b 52f\n"
"45:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x25]\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[12], [x15]\n"
+ "st1 { v12.b }[12], [x26]\n"
"b 52f\n"
"46:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x16, #1, 47f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x25]\n"
+ "tbz x17, #1, 47f\n"
+ "st1 { v8.h }[4], [x15], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[10], [x15]\n"
+ "st1 { v12.b }[10], [x26]\n"
"b 52f\n"
"47:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x25]\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[8], [x15]\n"
+ "st1 { v12.b }[8], [x26]\n"
"b 52f\n"
"48:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x16, #2, 50f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x25], #0x4\n"
- "tbz x16, #1, 49f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x25]\n"
+ "tbz x17, #2, 50f\n"
+ "str s8, [x15], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "tbz x17, #1, 49f\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[6], [x15]\n"
+ "st1 { v12.b }[6], [x26]\n"
"b 52f\n"
"49:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x25]\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[4], [x15]\n"
+ "st1 { v12.b }[4], [x26]\n"
"b 52f\n"
"50:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x16, #1, 51f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x25], #0x2\n"
- "tbz x16, #0, 52f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x25]\n"
+ "tbz x17, #1, 51f\n"
+ "str h8, [x15], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "tbz x17, #0, 52f\n"
+ "st1 { v8.b }[2], [x15]\n"
+ "st1 { v12.b }[2], [x26]\n"
"b 52f\n"
"51:" // Height 2: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x25, #0x0]\n"
+ "str b8, [x15, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
"52:" // Height 2: Partial direct writeback: Done
"b 54f\n"
"53:" // Height 2: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x25, #0x0]\n"
+ "str q8, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
+ "str q12, [x26, #0x0]\n"
"54:" // Height 2: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 29b\n"
"b 164f\n"
"55:" // Height 3
"mov x6, %x[col_bias]\n"
"ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"56:" // Height 3: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -870,8 +870,8 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"58:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w13, [x20, x14, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -895,123 +895,123 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x13, #0x20\n"
"ldr q1, [x11, #0x0]\n"
"ldr q2, [x10, #0x0]\n"
- "ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
+ "ldr q6, [x16, #0x0]\n"
+ "ldr q7, [x16, #0x10]\n"
"blt 62f\n"
"61:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x15, #0x28]\n"
+ "ldr x21, [x16, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr x20, [x16, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr d21, [x15, #0x20]\n"
+ "ldr d21, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v21.d[1], x21\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x48]\n"
+ "add x11, x11, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr d20, [x15, #0x30]\n"
+ "ldr d20, [x16, #0x30]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x16, #0x48]\n"
+ "add x10, x10, #0x10\n"
+ "ldr x24, [x12, #0x8]\n"
"mov v20.d[1], x20\n"
".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x20, [x16, #0x58]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr d21, [x15, #0x40]\n"
+ "ldr d21, [x16, #0x40]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
- "mov v21.d[1], x21\n"
+ "ldr x23, [x11, #0x8]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x68]\n"
+ "ldr x22, [x10, #0x8]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr d20, [x15, #0x50]\n"
+ "ldr d20, [x16, #0x50]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x16, #0x68]\n"
+ "sub x13, x13, #0x10\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
"mov v20.d[1], x20\n"
".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr x20, [x16, #0x78]\n"
".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr d21, [x15, #0x60]\n"
+ "ldr d21, [x16, #0x60]\n"
".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n"
- "mov v21.d[1], x21\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr d20, [x15, #0x70]\n"
+ "ldr d20, [x16, #0x70]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x16, #0x88]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"mov v20.d[1], x20\n"
".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x98]\n"
+ "ldr x20, [x16, #0x98]\n"
".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr d21, [x15, #0x80]\n"
+ "ldr d21, [x16, #0x80]\n"
".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n"
- "mov v21.d[1], x21\n"
".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0xa8]\n"
".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr d20, [x15, #0x90]\n"
+ "ldr d20, [x16, #0x90]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x16, #0xa8]\n"
"mov v20.d[1], x20\n"
".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "ldr x20, [x16, #0xb8]\n"
".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr d21, [x15, #0xa0]\n"
+ "ldr d21, [x16, #0xa0]\n"
".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n"
- "mov v21.d[1], x21\n"
".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xc8]\n"
".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr d20, [x15, #0xb0]\n"
+ "ldr d20, [x16, #0xb0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x16, #0xc8]\n"
"mov v20.d[1], x20\n"
".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xd8]\n"
+ "ldr x20, [x16, #0xd8]\n"
".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr d21, [x15, #0xc0]\n"
+ "ldr d21, [x16, #0xc0]\n"
".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n"
- "mov v21.d[1], x21\n"
".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xe8]\n"
".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr d20, [x15, #0xd0]\n"
+ "ldr d20, [x16, #0xd0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x16, #0xe8]\n"
"mov v20.d[1], x20\n"
".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "ldr x20, [x16, #0xf8]\n"
".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr d21, [x15, #0xe0]\n"
+ "ldr d21, [x16, #0xe0]\n"
".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n"
- "mov v21.d[1], x21\n"
".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n"
- "add x12, x12, #0x10\n"
".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr d20, [x15, #0xf0]\n"
+ "ldr d20, [x16, #0xf0]\n"
+ "mov v21.d[1], x21\n"
+ "add x16, x16, #0x100\n"
+ "ldr x21, [x16, #0x8]\n"
"mov v20.d[1], x20\n"
- "add x11, x11, #0x10\n"
- "add x10, x10, #0x10\n"
- "add x15, x15, #0x100\n"
".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0x8]\n"
".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n"
- "ldr x23, [x12, #0x8]\n"
+ "ldr x20, [x16, #0x18]\n"
".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n"
- "ldr d6, [x15, #0x0]\n"
+ "ldr d6, [x16, #0x0]\n"
".inst 0x4fa0ea8b // sdot v11.4s, v20.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1ea8f // sdot v15.4s, v20.16b, v1.4b[3]\n"
"ldr d1, [x11, #0x0]\n"
- "ldr x22, [x11, #0x8]\n"
".inst 0x4fa2ea93 // sdot v19.4s, v20.16b, v2.4b[3]\n"
"ldr d2, [x10, #0x0]\n"
- "sub x13, x13, #0x10\n"
- "ldr d7, [x15, #0x10]\n"
- "cmp x13, #0x20\n"
- "ldr x21, [x10, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x15, #0x18]\n"
- "mov v0.d[1], x23\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- "mov v1.d[1], x22\n"
- "prfm pldl1keep, [x11, #0x80]\n"
- "mov v2.d[1], x21\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "ldr d7, [x16, #0x10]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x24\n"
+ "mov v1.d[1], x23\n"
+ "mov v2.d[1], x22\n"
"mov v7.d[1], x20\n"
"bge 61b\n"
"62:" // Height 3: Multiply loop: Single iteration only
@@ -1020,66 +1020,66 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q21, [x15, #0x20]\n"
+ "ldr q21, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x10, x10, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"sub x13, x13, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q20, [x15, #0x30]\n"
+ "ldr q20, [x16, #0x30]\n"
".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x15, #0x40]\n"
+ "ldr q21, [x16, #0x40]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x15, #0x50]\n"
+ "ldr q20, [x16, #0x50]\n"
".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x15, #0x60]\n"
+ "ldr q21, [x16, #0x60]\n"
".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x15, #0x70]\n"
+ "ldr q20, [x16, #0x70]\n"
".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x15, #0x80]\n"
+ "ldr q21, [x16, #0x80]\n"
".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x15, #0x90]\n"
+ "ldr q20, [x16, #0x90]\n"
".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x15, #0xa0]\n"
+ "ldr q21, [x16, #0xa0]\n"
".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x15, #0xb0]\n"
+ "ldr q20, [x16, #0xb0]\n"
".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x15, #0xc0]\n"
+ "ldr q21, [x16, #0xc0]\n"
".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x15, #0xd0]\n"
+ "ldr q20, [x16, #0xd0]\n"
".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr q21, [x15, #0xe0]\n"
+ "ldr q21, [x16, #0xe0]\n"
".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n"
".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n"
".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr q20, [x15, #0xf0]\n"
+ "ldr q20, [x16, #0xf0]\n"
".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n"
- "add x15, x15, #0x100\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n"
".inst 0x4fa0ea8b // sdot v11.4s, v20.16b, v0.4b[3]\n"
@@ -1095,18 +1095,18 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr s23, [x11], #0x4\n"
"cmp x13, #0x4\n"
"ldr s22, [x10], #0x4\n"
- "ldr q21, [x15, #0x0]\n"
+ "ldr q21, [x16, #0x0]\n"
+ "ldr q20, [x16, #0x10]\n"
".inst 0x4f98e2a8 // sdot v8.4s, v21.16b, v24.4b[0]\n"
- "ldr q20, [x15, #0x10]\n"
".inst 0x4f97e2ac // sdot v12.4s, v21.16b, v23.4b[0]\n"
".inst 0x4f96e2b0 // sdot v16.4s, v21.16b, v22.4b[0]\n"
- "ldr q21, [x15, #0x20]\n"
+ "ldr q21, [x16, #0x20]\n"
".inst 0x4f98e289 // sdot v9.4s, v20.16b, v24.4b[0]\n"
".inst 0x4f97e28d // sdot v13.4s, v20.16b, v23.4b[0]\n"
".inst 0x4f96e291 // sdot v17.4s, v20.16b, v22.4b[0]\n"
- "ldr q20, [x15, #0x30]\n"
+ "ldr q20, [x16, #0x30]\n"
".inst 0x4f98e2aa // sdot v10.4s, v21.16b, v24.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f97e2ae // sdot v14.4s, v21.16b, v23.4b[0]\n"
".inst 0x4f96e2b2 // sdot v18.4s, v21.16b, v22.4b[0]\n"
".inst 0x4f98e28b // sdot v11.4s, v20.16b, v24.4b[0]\n"
@@ -1129,18 +1129,18 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr b1, [x11, #0x0]\n"
"ldr b2, [x10, #0x0]\n"
"67:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q21, [x15, #0x0]\n"
+ "ldr q21, [x16, #0x0]\n"
+ "ldr q20, [x16, #0x10]\n"
".inst 0x4f80e2a8 // sdot v8.4s, v21.16b, v0.4b[0]\n"
- "ldr q20, [x15, #0x10]\n"
".inst 0x4f81e2ac // sdot v12.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b0 // sdot v16.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x15, #0x20]\n"
+ "ldr q21, [x16, #0x20]\n"
".inst 0x4f80e289 // sdot v9.4s, v20.16b, v0.4b[0]\n"
".inst 0x4f81e28d // sdot v13.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e291 // sdot v17.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x15, #0x30]\n"
+ "ldr q20, [x16, #0x30]\n"
".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
@@ -1152,21 +1152,21 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x14, x20\n"
"bne 58b\n"
"ldr q23, [x6, #0x0]\n"
- "add v8.4s, v8.4s, v23.4s\n"
"ldr q22, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v22.4s\n"
"ldr q21, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v21.4s\n"
"ldr q20, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v20.4s\n"
+ "add v8.4s, v8.4s, v23.4s\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x17, x20\n"
- "add x24, x25, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add v9.4s, v9.4s, v22.4s\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add v10.4s, v10.4s, v21.4s\n"
+ "add v11.4s, v11.4s, v20.4s\n"
"add v12.4s, v12.4s, v23.4s\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add x26, x15, x20\n"
"add v13.4s, v13.4s, v22.4s\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"add v14.4s, v14.4s, v21.4s\n"
"add v15.4s, v15.4s, v20.4s\n"
"add v16.4s, v16.4s, v23.4s\n"
@@ -1187,9 +1187,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add x7, x7, #0x40\n"
"b 70f\n"
"69:" // Height 3: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -1261,35 +1261,36 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v18.4s, v18.4s, v2.4s\n"
"srshl v19.4s, v19.4s, v3.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v20.4s }, [x20]\n"
- "add v8.4s, v8.4s, v20.4s\n"
- "add v9.4s, v9.4s, v20.4s\n"
- "add v10.4s, v10.4s, v20.4s\n"
- "add v11.4s, v11.4s, v20.4s\n"
- "add v12.4s, v12.4s, v20.4s\n"
- "add v13.4s, v13.4s, v20.4s\n"
- "add v14.4s, v14.4s, v20.4s\n"
- "add v15.4s, v15.4s, v20.4s\n"
- "add v16.4s, v16.4s, v20.4s\n"
- "add v17.4s, v17.4s, v20.4s\n"
- "add v18.4s, v18.4s, v20.4s\n"
- "add v19.4s, v19.4s, v20.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v20.4s }, [x20]\n"
- "smin v8.4s, v8.4s, v20.4s\n"
- "smin v9.4s, v9.4s, v20.4s\n"
- "smin v10.4s, v10.4s, v20.4s\n"
- "smin v11.4s, v11.4s, v20.4s\n"
- "smin v12.4s, v12.4s, v20.4s\n"
- "smin v13.4s, v13.4s, v20.4s\n"
- "smin v14.4s, v14.4s, v20.4s\n"
- "smin v15.4s, v15.4s, v20.4s\n"
- "smin v16.4s, v16.4s, v20.4s\n"
- "smin v17.4s, v17.4s, v20.4s\n"
- "smin v18.4s, v18.4s, v20.4s\n"
- "smin v19.4s, v19.4s, v20.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v21.4s }, [x21]\n"
+ "cmp x17, #0x10\n"
"ld1r { v20.4s }, [x20]\n"
+ "add v8.4s, v8.4s, v22.4s\n"
+ "add v9.4s, v9.4s, v22.4s\n"
+ "add v10.4s, v10.4s, v22.4s\n"
+ "add v11.4s, v11.4s, v22.4s\n"
+ "add v12.4s, v12.4s, v22.4s\n"
+ "add v13.4s, v13.4s, v22.4s\n"
+ "add v14.4s, v14.4s, v22.4s\n"
+ "add v15.4s, v15.4s, v22.4s\n"
+ "add v16.4s, v16.4s, v22.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v22.4s\n"
+ "smin v8.4s, v8.4s, v21.4s\n"
+ "smin v9.4s, v9.4s, v21.4s\n"
+ "smin v10.4s, v10.4s, v21.4s\n"
+ "smin v11.4s, v11.4s, v21.4s\n"
+ "smin v12.4s, v12.4s, v21.4s\n"
+ "smin v13.4s, v13.4s, v21.4s\n"
+ "smin v14.4s, v14.4s, v21.4s\n"
+ "smin v15.4s, v15.4s, v21.4s\n"
+ "smin v16.4s, v16.4s, v21.4s\n"
+ "smin v17.4s, v17.4s, v21.4s\n"
+ "smin v18.4s, v18.4s, v21.4s\n"
+ "smin v19.4s, v19.4s, v21.4s\n"
"smax v8.4s, v8.4s, v20.4s\n"
"smax v9.4s, v9.4s, v20.4s\n"
"smax v10.4s, v10.4s, v20.4s\n"
@@ -1308,102 +1309,101 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v20.8h, v14.8h, v15.8h\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v21.16b\n"
"uzp1 v12.16b, v12.16b, v20.16b\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 80f\n"
- "tbz x16, #3, 75f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "tbz x16, #2, 73f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "tbz x16, #1, 72f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
+ "tbz x17, #3, 75f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "tbz x17, #2, 73f\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "tbz x17, #1, 72f\n"
+ "st1 { v8.h }[6], [x15], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[14], [x15]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
"b 79f\n"
"72:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[12], [x15]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
"b 79f\n"
"73:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x16, #1, 74f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
+ "tbz x17, #1, 74f\n"
+ "st1 { v8.h }[4], [x15], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[10], [x15]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
"b 79f\n"
"74:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[8], [x15]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
"b 79f\n"
"75:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x16, #2, 77f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "tbz x16, #1, 76f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
+ "tbz x17, #2, 77f\n"
+ "str s8, [x15], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "tbz x17, #1, 76f\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[6], [x15]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
"b 79f\n"
"76:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[4], [x15]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
"b 79f\n"
"77:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x16, #1, 78f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "tbz x16, #0, 79f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
+ "tbz x17, #1, 78f\n"
+ "str h8, [x15], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "tbz x17, #0, 79f\n"
+ "st1 { v8.b }[2], [x15]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
"b 79f\n"
"78:" // Height 3: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
+ "str b8, [x15, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
"79:" // Height 3: Partial direct writeback: Done
"b 81f\n"
"80:" // Height 3: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
+ "str q8, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
"81:" // Height 3: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 56b\n"
"b 164f\n"
"82:" // Height 4
"mov x6, %x[col_bias]\n"
"ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"83:" // Height 4: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -1425,8 +1425,8 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"85:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w13, [x20, x14, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 86f\n"
"ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1454,130 +1454,129 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr q1, [x11, #0x0]\n"
"ldr q2, [x10, #0x0]\n"
"ldr q3, [x9, #0x0]\n"
- "ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
+ "ldr q6, [x16, #0x0]\n"
+ "ldr q7, [x16, #0x10]\n"
"blt 89f\n"
"88:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x15, #0x28]\n"
+ "ldr x20, [x16, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr x21, [x16, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr d25, [x15, #0x20]\n"
+ "ldr d25, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v25.d[1], x21\n"
+ "add x11, x11, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x48]\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x11, x11, #0x10\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr d24, [x15, #0x30]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x16, #0x30]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
+ "ldr x20, [x16, #0x48]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x58]\n"
+ "add x9, x9, #0x10\n"
+ "mov v24.d[1], x21\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
- "add x10, x10, #0x10\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr d25, [x15, #0x40]\n"
+ "ldr d25, [x16, #0x40]\n"
".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x16, #0x58]\n"
".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x68]\n"
+ "ldr x25, [x12, #0x8]\n"
".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr d24, [x15, #0x50]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x16, #0x50]\n"
".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n"
+ "ldr x20, [x16, #0x68]\n"
".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr x24, [x11, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n"
- "ldr x25, [x12, #0x8]\n"
".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr d25, [x15, #0x60]\n"
+ "ldr d25, [x16, #0x60]\n"
".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x16, #0x78]\n"
".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0x88]\n"
+ "ldr x23, [x10, #0x8]\n"
".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n"
- "ldr x24, [x11, #0x8]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr d24, [x15, #0x70]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x16, #0x70]\n"
".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n"
+ "ldr x20, [x16, #0x88]\n"
".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x98]\n"
+ "ldr x22, [x9, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n"
- "ldr x23, [x10, #0x8]\n"
".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr d25, [x15, #0x80]\n"
+ "ldr d25, [x16, #0x80]\n"
".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x16, #0x98]\n"
".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0xa8]\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n"
- "ldr x22, [x9, #0x8]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr d24, [x15, #0x90]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x16, #0x90]\n"
".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n"
+ "ldr x20, [x16, #0xa8]\n"
".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "cmp x13, #0x20\n"
+ "mov v24.d[1], x21\n"
".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n"
- "sub x13, x13, #0x10\n"
".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr d25, [x15, #0xa0]\n"
+ "ldr d25, [x16, #0xa0]\n"
".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x16, #0xb8]\n"
".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xc8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n"
- "cmp x13, #0x20\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr d24, [x15, #0xb0]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x16, #0xb0]\n"
".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n"
+ "ldr x20, [x16, #0xc8]\n"
".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xd8]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr d25, [x15, #0xc0]\n"
+ "ldr d25, [x16, #0xc0]\n"
".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x16, #0xd8]\n"
".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xe8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr d24, [x15, #0xd0]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x16, #0xd0]\n"
".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n"
+ "ldr x20, [x16, #0xe8]\n"
".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr d25, [x15, #0xe0]\n"
+ "ldr d25, [x16, #0xe0]\n"
".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x16, #0xf8]\n"
".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr d24, [x15, #0xf0]\n"
- "mov v24.d[1], x20\n"
- "add x15, x15, #0x100\n"
+ "ldr d24, [x16, #0xf0]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n"
- "ldr x21, [x15, #0x8]\n"
".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n"
- "ldr x20, [x15, #0x18]\n"
+ "ldr x20, [x16, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb36 // sdot v22.4s, v25.16b, v3.4b[3]\n"
- "ldr d6, [x15, #0x0]\n"
+ "ldr d6, [x16, #0x0]\n"
".inst 0x4fa0eb0b // sdot v11.4s, v24.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1eb0f // sdot v15.4s, v24.16b, v1.4b[3]\n"
@@ -1586,8 +1585,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr d2, [x10, #0x0]\n"
".inst 0x4fa3eb17 // sdot v23.4s, v24.16b, v3.4b[3]\n"
"ldr d3, [x9, #0x0]\n"
- "ldr d7, [x15, #0x10]\n"
- "mov v6.d[1], x21\n"
+ "ldr d7, [x16, #0x10]\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x16, #0x18]\n"
"mov v0.d[1], x25\n"
"mov v1.d[1], x24\n"
"mov v2.d[1], x23\n"
@@ -1602,7 +1602,7 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x10, x10, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q25, [x15, #0x20]\n"
+ "ldr q25, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x9, x9, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -1610,7 +1610,7 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q24, [x15, #0x30]\n"
+ "ldr q24, [x16, #0x30]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
@@ -1618,64 +1618,64 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
"prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x15, #0x40]\n"
+ "ldr q25, [x16, #0x40]\n"
".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n"
".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n"
".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x15, #0x50]\n"
+ "ldr q24, [x16, #0x50]\n"
".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x15, #0x60]\n"
+ "ldr q25, [x16, #0x60]\n"
".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x15, #0x70]\n"
+ "ldr q24, [x16, #0x70]\n"
".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x15, #0x80]\n"
+ "ldr q25, [x16, #0x80]\n"
".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x15, #0x90]\n"
+ "ldr q24, [x16, #0x90]\n"
".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x15, #0xa0]\n"
+ "ldr q25, [x16, #0xa0]\n"
".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x15, #0xb0]\n"
+ "ldr q24, [x16, #0xb0]\n"
".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x15, #0xc0]\n"
+ "ldr q25, [x16, #0xc0]\n"
".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x15, #0xd0]\n"
+ "ldr q24, [x16, #0xd0]\n"
".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n"
".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr q25, [x15, #0xe0]\n"
+ "ldr q25, [x16, #0xe0]\n"
".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n"
".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n"
".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n"
".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr q24, [x15, #0xf0]\n"
+ "ldr q24, [x16, #0xf0]\n"
".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n"
- "add x15, x15, #0x100\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb36 // sdot v22.4s, v25.16b, v3.4b[3]\n"
@@ -1694,20 +1694,20 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x13, #0x4\n"
"ldr s27, [x10], #0x4\n"
"ldr s26, [x9], #0x4\n"
- "ldr q25, [x15, #0x0]\n"
+ "ldr q25, [x16, #0x0]\n"
+ "ldr q24, [x16, #0x10]\n"
".inst 0x4f9de328 // sdot v8.4s, v25.16b, v29.4b[0]\n"
- "ldr q24, [x15, #0x10]\n"
".inst 0x4f9ce32c // sdot v12.4s, v25.16b, v28.4b[0]\n"
".inst 0x4f9be330 // sdot v16.4s, v25.16b, v27.4b[0]\n"
".inst 0x4f9ae334 // sdot v20.4s, v25.16b, v26.4b[0]\n"
- "ldr q25, [x15, #0x20]\n"
+ "ldr q25, [x16, #0x20]\n"
".inst 0x4f9de309 // sdot v9.4s, v24.16b, v29.4b[0]\n"
".inst 0x4f9ce30d // sdot v13.4s, v24.16b, v28.4b[0]\n"
".inst 0x4f9be311 // sdot v17.4s, v24.16b, v27.4b[0]\n"
".inst 0x4f9ae315 // sdot v21.4s, v24.16b, v26.4b[0]\n"
- "ldr q24, [x15, #0x30]\n"
+ "ldr q24, [x16, #0x30]\n"
".inst 0x4f9de32a // sdot v10.4s, v25.16b, v29.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f9ce32e // sdot v14.4s, v25.16b, v28.4b[0]\n"
".inst 0x4f9be332 // sdot v18.4s, v25.16b, v27.4b[0]\n"
".inst 0x4f9ae336 // sdot v22.4s, v25.16b, v26.4b[0]\n"
@@ -1735,20 +1735,20 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr b2, [x10, #0x0]\n"
"ldr b3, [x9, #0x0]\n"
"94:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q25, [x15, #0x0]\n"
+ "ldr q25, [x16, #0x0]\n"
+ "ldr q24, [x16, #0x10]\n"
".inst 0x4f80e328 // sdot v8.4s, v25.16b, v0.4b[0]\n"
- "ldr q24, [x15, #0x10]\n"
".inst 0x4f81e32c // sdot v12.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f82e330 // sdot v16.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e334 // sdot v20.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x15, #0x20]\n"
+ "ldr q25, [x16, #0x20]\n"
".inst 0x4f80e309 // sdot v9.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e30d // sdot v13.4s, v24.16b, v1.4b[0]\n"
".inst 0x4f82e311 // sdot v17.4s, v24.16b, v2.4b[0]\n"
".inst 0x4f83e315 // sdot v21.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x15, #0x30]\n"
+ "ldr q24, [x16, #0x30]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
@@ -1762,24 +1762,24 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x14, x20\n"
"bne 85b\n"
"ldr q27, [x6, #0x0]\n"
- "add v8.4s, v8.4s, v27.4s\n"
"ldr q26, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v26.4s\n"
"ldr q25, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v25.4s\n"
"ldr q24, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v24.4s\n"
+ "add v8.4s, v8.4s, v27.4s\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x17, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "add v9.4s, v9.4s, v26.4s\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add v10.4s, v10.4s, v25.4s\n"
+ "add v11.4s, v11.4s, v24.4s\n"
"add v12.4s, v12.4s, v27.4s\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x15, x20\n"
"add v13.4s, v13.4s, v26.4s\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"prfm pstl1keep, [x24, #0x0]\n"
"add v14.4s, v14.4s, v25.4s\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"add v15.4s, v15.4s, v24.4s\n"
"add v16.4s, v16.4s, v27.4s\n"
"add v17.4s, v17.4s, v26.4s\n"
@@ -1803,9 +1803,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add x7, x7, #0x40\n"
"b 97f\n"
"96:" // Height 4: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -1897,43 +1897,44 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v22.4s, v22.4s, v2.4s\n"
"srshl v23.4s, v23.4s, v3.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v24.4s }, [x20]\n"
- "add v8.4s, v8.4s, v24.4s\n"
- "add v9.4s, v9.4s, v24.4s\n"
- "add v10.4s, v10.4s, v24.4s\n"
- "add v11.4s, v11.4s, v24.4s\n"
- "add v12.4s, v12.4s, v24.4s\n"
- "add v13.4s, v13.4s, v24.4s\n"
- "add v14.4s, v14.4s, v24.4s\n"
- "add v15.4s, v15.4s, v24.4s\n"
- "add v16.4s, v16.4s, v24.4s\n"
- "add v17.4s, v17.4s, v24.4s\n"
- "add v18.4s, v18.4s, v24.4s\n"
- "add v19.4s, v19.4s, v24.4s\n"
- "add v20.4s, v20.4s, v24.4s\n"
- "add v21.4s, v21.4s, v24.4s\n"
- "add v22.4s, v22.4s, v24.4s\n"
- "add v23.4s, v23.4s, v24.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v24.4s }, [x20]\n"
- "smin v8.4s, v8.4s, v24.4s\n"
- "smin v9.4s, v9.4s, v24.4s\n"
- "smin v10.4s, v10.4s, v24.4s\n"
- "smin v11.4s, v11.4s, v24.4s\n"
- "smin v12.4s, v12.4s, v24.4s\n"
- "smin v13.4s, v13.4s, v24.4s\n"
- "smin v14.4s, v14.4s, v24.4s\n"
- "smin v15.4s, v15.4s, v24.4s\n"
- "smin v16.4s, v16.4s, v24.4s\n"
- "smin v17.4s, v17.4s, v24.4s\n"
- "smin v18.4s, v18.4s, v24.4s\n"
- "smin v19.4s, v19.4s, v24.4s\n"
- "smin v20.4s, v20.4s, v24.4s\n"
- "smin v21.4s, v21.4s, v24.4s\n"
- "smin v22.4s, v22.4s, v24.4s\n"
- "smin v23.4s, v23.4s, v24.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v25.4s }, [x21]\n"
+ "cmp x17, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
+ "add v8.4s, v8.4s, v26.4s\n"
+ "add v9.4s, v9.4s, v26.4s\n"
+ "add v10.4s, v10.4s, v26.4s\n"
+ "add v11.4s, v11.4s, v26.4s\n"
+ "add v12.4s, v12.4s, v26.4s\n"
+ "add v13.4s, v13.4s, v26.4s\n"
+ "add v14.4s, v14.4s, v26.4s\n"
+ "add v15.4s, v15.4s, v26.4s\n"
+ "add v16.4s, v16.4s, v26.4s\n"
+ "add v17.4s, v17.4s, v26.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v26.4s\n"
+ "add v20.4s, v20.4s, v26.4s\n"
+ "add v21.4s, v21.4s, v26.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v26.4s\n"
+ "smin v8.4s, v8.4s, v25.4s\n"
+ "smin v9.4s, v9.4s, v25.4s\n"
+ "smin v10.4s, v10.4s, v25.4s\n"
+ "smin v11.4s, v11.4s, v25.4s\n"
+ "smin v12.4s, v12.4s, v25.4s\n"
+ "smin v13.4s, v13.4s, v25.4s\n"
+ "smin v14.4s, v14.4s, v25.4s\n"
+ "smin v15.4s, v15.4s, v25.4s\n"
+ "smin v16.4s, v16.4s, v25.4s\n"
+ "smin v17.4s, v17.4s, v25.4s\n"
+ "smin v18.4s, v18.4s, v25.4s\n"
+ "smin v19.4s, v19.4s, v25.4s\n"
+ "smin v20.4s, v20.4s, v25.4s\n"
+ "smin v21.4s, v21.4s, v25.4s\n"
+ "smin v22.4s, v22.4s, v25.4s\n"
+ "smin v23.4s, v23.4s, v25.4s\n"
"smax v8.4s, v8.4s, v24.4s\n"
"smax v9.4s, v9.4s, v24.4s\n"
"smax v10.4s, v10.4s, v24.4s\n"
@@ -1958,119 +1959,118 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v18.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v17.8h, v22.8h, v23.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v25.16b\n"
"uzp1 v12.16b, v12.16b, v24.16b\n"
"uzp1 v16.16b, v16.16b, v18.16b\n"
"uzp1 v20.16b, v20.16b, v17.16b\n"
"bge 107f\n"
- "tbz x16, #3, 102f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "tbz x16, #2, 100f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "tbz x16, #1, 99f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
- "st1 { v20.b }[14], [x23]\n"
+ "tbz x17, #3, 102f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "tbz x17, #2, 100f\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "tbz x17, #1, 99f\n"
+ "st1 { v8.h }[6], [x15], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[14], [x15]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
+ "st1 { v20.b }[14], [x24]\n"
"b 106f\n"
"99:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
- "st1 { v20.b }[12], [x23]\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[12], [x15]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
+ "st1 { v20.b }[12], [x24]\n"
"b 106f\n"
"100:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x16, #1, 101f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
- "st1 { v20.b }[10], [x23]\n"
+ "tbz x17, #1, 101f\n"
+ "st1 { v8.h }[4], [x15], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[10], [x15]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
+ "st1 { v20.b }[10], [x24]\n"
"b 106f\n"
"101:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
- "st1 { v20.b }[8], [x23]\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[8], [x15]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
+ "st1 { v20.b }[8], [x24]\n"
"b 106f\n"
"102:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x16, #2, 104f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "tbz x16, #1, 103f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
- "st1 { v20.b }[6], [x23]\n"
+ "tbz x17, #2, 104f\n"
+ "str s8, [x15], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "tbz x17, #1, 103f\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[6], [x15]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
+ "st1 { v20.b }[6], [x24]\n"
"b 106f\n"
"103:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
- "st1 { v20.b }[4], [x23]\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[4], [x15]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
+ "st1 { v20.b }[4], [x24]\n"
"b 106f\n"
"104:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x16, #1, 105f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "str h20, [x23], #0x2\n"
- "tbz x16, #0, 106f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
- "st1 { v20.b }[2], [x23]\n"
+ "tbz x17, #1, 105f\n"
+ "str h8, [x15], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "tbz x17, #0, 106f\n"
+ "st1 { v8.b }[2], [x15]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
+ "st1 { v20.b }[2], [x24]\n"
"b 106f\n"
"105:" // Height 4: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
- "str b20, [x23, #0x0]\n"
+ "str b8, [x15, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
"106:" // Height 4: Partial direct writeback: Done
"b 108f\n"
"107:" // Height 4: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
- "str q20, [x23, #0x0]\n"
+ "str q8, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
"108:" // Height 4: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 83b\n"
"b 164f\n"
"109:" // Height 5
"mov x6, %x[col_bias]\n"
"ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"110:" // Height 5: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2096,8 +2096,8 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"112:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w13, [x20, x14, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 113f\n"
"ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2129,148 +2129,148 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr q2, [x10, #0x0]\n"
"ldr q3, [x9, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
- "ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
+ "ldr q6, [x16, #0x0]\n"
+ "ldr q7, [x16, #0x10]\n"
"blt 116f\n"
"115:" // Height 5: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x15, #0x28]\n"
+ "ldr x21, [x16, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr x20, [x16, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr d29, [x15, #0x20]\n"
+ "ldr d29, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v29.d[1], x21\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x48]\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "ldr x21, [x16, #0x48]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr d28, [x15, #0x30]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x16, #0x30]\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x26, [x12, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n"
- "add x28, x28, #0x10\n"
".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n"
- "ldr x26, [x12, #0x8]\n"
+ "ldr x20, [x16, #0x58]\n"
".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr d29, [x15, #0x40]\n"
+ "ldr d29, [x16, #0x40]\n"
".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n"
- "mov v29.d[1], x21\n"
+ "ldr x25, [x11, #0x8]\n"
".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x68]\n"
+ "ldr x24, [x10, #0x8]\n"
".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n"
- "ldr x25, [x11, #0x8]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n"
- "ldr x24, [x10, #0x8]\n"
+ "ldr x21, [x16, #0x68]\n"
".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr d28, [x15, #0x50]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x16, #0x50]\n"
".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n"
+ "ldr x23, [x9, #0x8]\n"
".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x78]\n"
+ "ldr x22, [x28, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n"
- "ldr x23, [x9, #0x8]\n"
".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n"
- "ldr x22, [x28, #0x8]\n"
+ "ldr x20, [x16, #0x78]\n"
".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr d29, [x15, #0x60]\n"
+ "ldr d29, [x16, #0x60]\n"
".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n"
- "mov v29.d[1], x21\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0x88]\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n"
- "sub x13, x13, #0x10\n"
+ "mov v29.d[1], x21\n"
".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n"
- "cmp x13, #0x20\n"
+ "ldr x21, [x16, #0x88]\n"
".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr d28, [x15, #0x70]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x16, #0x70]\n"
".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x98]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr x20, [x16, #0x98]\n"
".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr d29, [x15, #0x80]\n"
+ "ldr d29, [x16, #0x80]\n"
".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n"
- "mov v29.d[1], x21\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0xa8]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr x21, [x16, #0xa8]\n"
".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr d28, [x15, #0x90]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x16, #0x90]\n"
".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n"
+ "ldr x20, [x16, #0xb8]\n"
".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr d29, [x15, #0xa0]\n"
+ "ldr d29, [x16, #0xa0]\n"
".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xc8]\n"
".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n"
+ "ldr x21, [x16, #0xc8]\n"
".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr d28, [x15, #0xb0]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x16, #0xb0]\n"
".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xd8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n"
+ "ldr x20, [x16, #0xd8]\n"
".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr d29, [x15, #0xc0]\n"
+ "ldr d29, [x16, #0xc0]\n"
".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xe8]\n"
".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n"
+ "ldr x21, [x16, #0xe8]\n"
".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr d28, [x15, #0xd0]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x16, #0xd0]\n"
".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n"
+ "ldr x20, [x16, #0xf8]\n"
".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr d29, [x15, #0xe0]\n"
+ "ldr d29, [x16, #0xe0]\n"
".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n"
- "mov v29.d[1], x21\n"
".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr d28, [x15, #0xf0]\n"
- "mov v28.d[1], x20\n"
- "add x15, x15, #0x100\n"
+ "ldr d28, [x16, #0xf0]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n"
- "ldr x21, [x15, #0x8]\n"
".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n"
- "ldr x20, [x15, #0x18]\n"
+ "ldr x21, [x16, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb6 // sdot v22.4s, v29.16b, v3.4b[3]\n"
+ "ldr x20, [x16, #0x18]\n"
".inst 0x4fa4ebba // sdot v26.4s, v29.16b, v4.4b[3]\n"
- "ldr d6, [x15, #0x0]\n"
+ "ldr d6, [x16, #0x0]\n"
".inst 0x4fa0eb8b // sdot v11.4s, v28.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1eb8f // sdot v15.4s, v28.16b, v1.4b[3]\n"
@@ -2281,7 +2281,7 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr d3, [x9, #0x0]\n"
".inst 0x4fa4eb9b // sdot v27.4s, v28.16b, v4.4b[3]\n"
"ldr d4, [x28, #0x0]\n"
- "ldr d7, [x15, #0x10]\n"
+ "ldr d7, [x16, #0x10]\n"
"mov v6.d[1], x21\n"
"mov v0.d[1], x26\n"
"mov v1.d[1], x25\n"
@@ -2300,7 +2300,7 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x9, x9, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q29, [x15, #0x20]\n"
+ "ldr q29, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x28, x28, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -2310,7 +2310,7 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q28, [x15, #0x30]\n"
+ "ldr q28, [x16, #0x30]\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
@@ -2319,75 +2319,75 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n"
".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x15, #0x40]\n"
+ "ldr q29, [x16, #0x40]\n"
".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n"
".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x15, #0x50]\n"
+ "ldr q28, [x16, #0x50]\n"
".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x15, #0x60]\n"
+ "ldr q29, [x16, #0x60]\n"
".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x15, #0x70]\n"
+ "ldr q28, [x16, #0x70]\n"
".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x15, #0x80]\n"
+ "ldr q29, [x16, #0x80]\n"
".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x15, #0x90]\n"
+ "ldr q28, [x16, #0x90]\n"
".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x15, #0xa0]\n"
+ "ldr q29, [x16, #0xa0]\n"
".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x15, #0xb0]\n"
+ "ldr q28, [x16, #0xb0]\n"
".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x15, #0xc0]\n"
+ "ldr q29, [x16, #0xc0]\n"
".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x15, #0xd0]\n"
+ "ldr q28, [x16, #0xd0]\n"
".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n"
".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr q29, [x15, #0xe0]\n"
+ "ldr q29, [x16, #0xe0]\n"
".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n"
".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n"
".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr q28, [x15, #0xf0]\n"
+ "ldr q28, [x16, #0xf0]\n"
".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n"
- "add x15, x15, #0x100\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb6 // sdot v22.4s, v29.16b, v3.4b[3]\n"
@@ -2409,22 +2409,22 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr s0, [x10], #0x4\n"
"ldr s31, [x9], #0x4\n"
"ldr s30, [x28], #0x4\n"
- "ldr q29, [x15, #0x0]\n"
+ "ldr q29, [x16, #0x0]\n"
+ "ldr q28, [x16, #0x10]\n"
".inst 0x4f82e3a8 // sdot v8.4s, v29.16b, v2.4b[0]\n"
- "ldr q28, [x15, #0x10]\n"
".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f80e3b0 // sdot v16.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f9fe3b4 // sdot v20.4s, v29.16b, v31.4b[0]\n"
".inst 0x4f9ee3b8 // sdot v24.4s, v29.16b, v30.4b[0]\n"
- "ldr q29, [x15, #0x20]\n"
+ "ldr q29, [x16, #0x20]\n"
".inst 0x4f82e389 // sdot v9.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f80e391 // sdot v17.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f9fe395 // sdot v21.4s, v28.16b, v31.4b[0]\n"
".inst 0x4f9ee399 // sdot v25.4s, v28.16b, v30.4b[0]\n"
- "ldr q28, [x15, #0x30]\n"
+ "ldr q28, [x16, #0x30]\n"
".inst 0x4f82e3aa // sdot v10.4s, v29.16b, v2.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f80e3b2 // sdot v18.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f9fe3b6 // sdot v22.4s, v29.16b, v31.4b[0]\n"
@@ -2457,22 +2457,22 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr b3, [x9, #0x0]\n"
"ldr b4, [x28, #0x0]\n"
"121:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q29, [x15, #0x0]\n"
+ "ldr q29, [x16, #0x0]\n"
+ "ldr q28, [x16, #0x10]\n"
".inst 0x4f80e3a8 // sdot v8.4s, v29.16b, v0.4b[0]\n"
- "ldr q28, [x15, #0x10]\n"
".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f82e3b0 // sdot v16.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f83e3b4 // sdot v20.4s, v29.16b, v3.4b[0]\n"
".inst 0x4f84e3b8 // sdot v24.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x15, #0x20]\n"
+ "ldr q29, [x16, #0x20]\n"
".inst 0x4f80e389 // sdot v9.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f82e391 // sdot v17.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f83e395 // sdot v21.4s, v28.16b, v3.4b[0]\n"
".inst 0x4f84e399 // sdot v25.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x15, #0x30]\n"
+ "ldr q28, [x16, #0x30]\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n"
@@ -2488,26 +2488,26 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x14, x20\n"
"bne 112b\n"
"ldr q31, [x6, #0x0]\n"
- "add v8.4s, v8.4s, v31.4s\n"
"ldr q30, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v30.4s\n"
"ldr q29, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v29.4s\n"
"ldr q28, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v28.4s\n"
+ "add v8.4s, v8.4s, v31.4s\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x17, x20\n"
+ "add v9.4s, v9.4s, v30.4s\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add v10.4s, v10.4s, v29.4s\n"
+ "add v11.4s, v11.4s, v28.4s\n"
+ "add v12.4s, v12.4s, v31.4s\n"
+ "add x26, x15, x20\n"
+ "add v13.4s, v13.4s, v30.4s\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "add v12.4s, v12.4s, v31.4s\n"
+ "add x23, x24, x20\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add v13.4s, v13.4s, v30.4s\n"
"prfm pstl1keep, [x23, #0x0]\n"
"add v14.4s, v14.4s, v29.4s\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"add v15.4s, v15.4s, v28.4s\n"
"add v16.4s, v16.4s, v31.4s\n"
"add v17.4s, v17.4s, v30.4s\n"
@@ -2535,9 +2535,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add x7, x7, #0x40\n"
"b 124f\n"
"123:" // Height 5: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -2649,51 +2649,52 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v26.4s, v26.4s, v2.4s\n"
"srshl v27.4s, v27.4s, v3.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v28.4s }, [x20]\n"
- "add v8.4s, v8.4s, v28.4s\n"
- "add v9.4s, v9.4s, v28.4s\n"
- "add v10.4s, v10.4s, v28.4s\n"
- "add v11.4s, v11.4s, v28.4s\n"
- "add v12.4s, v12.4s, v28.4s\n"
- "add v13.4s, v13.4s, v28.4s\n"
- "add v14.4s, v14.4s, v28.4s\n"
- "add v15.4s, v15.4s, v28.4s\n"
- "add v16.4s, v16.4s, v28.4s\n"
- "add v17.4s, v17.4s, v28.4s\n"
- "add v18.4s, v18.4s, v28.4s\n"
- "add v19.4s, v19.4s, v28.4s\n"
- "add v20.4s, v20.4s, v28.4s\n"
- "add v21.4s, v21.4s, v28.4s\n"
- "add v22.4s, v22.4s, v28.4s\n"
- "add v23.4s, v23.4s, v28.4s\n"
- "add v24.4s, v24.4s, v28.4s\n"
- "add v25.4s, v25.4s, v28.4s\n"
- "add v26.4s, v26.4s, v28.4s\n"
- "add v27.4s, v27.4s, v28.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v28.4s }, [x20]\n"
- "smin v8.4s, v8.4s, v28.4s\n"
- "smin v9.4s, v9.4s, v28.4s\n"
- "smin v10.4s, v10.4s, v28.4s\n"
- "smin v11.4s, v11.4s, v28.4s\n"
- "smin v12.4s, v12.4s, v28.4s\n"
- "smin v13.4s, v13.4s, v28.4s\n"
- "smin v14.4s, v14.4s, v28.4s\n"
- "smin v15.4s, v15.4s, v28.4s\n"
- "smin v16.4s, v16.4s, v28.4s\n"
- "smin v17.4s, v17.4s, v28.4s\n"
- "smin v18.4s, v18.4s, v28.4s\n"
- "smin v19.4s, v19.4s, v28.4s\n"
- "smin v20.4s, v20.4s, v28.4s\n"
- "smin v21.4s, v21.4s, v28.4s\n"
- "smin v22.4s, v22.4s, v28.4s\n"
- "smin v23.4s, v23.4s, v28.4s\n"
- "smin v24.4s, v24.4s, v28.4s\n"
- "smin v25.4s, v25.4s, v28.4s\n"
- "smin v26.4s, v26.4s, v28.4s\n"
- "smin v27.4s, v27.4s, v28.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v30.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v29.4s }, [x21]\n"
+ "cmp x17, #0x10\n"
"ld1r { v28.4s }, [x20]\n"
+ "add v8.4s, v8.4s, v30.4s\n"
+ "add v9.4s, v9.4s, v30.4s\n"
+ "add v10.4s, v10.4s, v30.4s\n"
+ "add v11.4s, v11.4s, v30.4s\n"
+ "add v12.4s, v12.4s, v30.4s\n"
+ "add v13.4s, v13.4s, v30.4s\n"
+ "add v14.4s, v14.4s, v30.4s\n"
+ "add v15.4s, v15.4s, v30.4s\n"
+ "add v16.4s, v16.4s, v30.4s\n"
+ "add v17.4s, v17.4s, v30.4s\n"
+ "add v18.4s, v18.4s, v30.4s\n"
+ "add v19.4s, v19.4s, v30.4s\n"
+ "add v20.4s, v20.4s, v30.4s\n"
+ "add v21.4s, v21.4s, v30.4s\n"
+ "add v22.4s, v22.4s, v30.4s\n"
+ "add v23.4s, v23.4s, v30.4s\n"
+ "add v24.4s, v24.4s, v30.4s\n"
+ "add v25.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v30.4s\n"
+ "smin v8.4s, v8.4s, v29.4s\n"
+ "smin v9.4s, v9.4s, v29.4s\n"
+ "smin v10.4s, v10.4s, v29.4s\n"
+ "smin v11.4s, v11.4s, v29.4s\n"
+ "smin v12.4s, v12.4s, v29.4s\n"
+ "smin v13.4s, v13.4s, v29.4s\n"
+ "smin v14.4s, v14.4s, v29.4s\n"
+ "smin v15.4s, v15.4s, v29.4s\n"
+ "smin v16.4s, v16.4s, v29.4s\n"
+ "smin v17.4s, v17.4s, v29.4s\n"
+ "smin v18.4s, v18.4s, v29.4s\n"
+ "smin v19.4s, v19.4s, v29.4s\n"
+ "smin v20.4s, v20.4s, v29.4s\n"
+ "smin v21.4s, v21.4s, v29.4s\n"
+ "smin v22.4s, v22.4s, v29.4s\n"
+ "smin v23.4s, v23.4s, v29.4s\n"
+ "smin v24.4s, v24.4s, v29.4s\n"
+ "smin v25.4s, v25.4s, v29.4s\n"
+ "smin v26.4s, v26.4s, v29.4s\n"
+ "smin v27.4s, v27.4s, v29.4s\n"
"smax v8.4s, v8.4s, v28.4s\n"
"smax v9.4s, v9.4s, v28.4s\n"
"smax v10.4s, v10.4s, v28.4s\n"
@@ -2724,139 +2725,139 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v18.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v17.8h, v26.8h, v27.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v29.16b\n"
"uzp1 v12.16b, v12.16b, v28.16b\n"
"uzp1 v16.16b, v16.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v18.16b\n"
"uzp1 v24.16b, v24.16b, v17.16b\n"
"bge 134f\n"
- "tbz x16, #3, 129f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x16, #2, 127f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "tbz x16, #1, 126f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "tbz x17, #3, 129f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "tbz x17, #2, 127f\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "tbz x17, #1, 126f\n"
+ "st1 { v8.h }[6], [x15], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[14], [x15]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 133f\n"
"126:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[12], [x15]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 133f\n"
"127:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x16, #1, 128f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "tbz x17, #1, 128f\n"
+ "st1 { v8.h }[4], [x15], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[10], [x15]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 133f\n"
"128:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[8], [x15]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 133f\n"
"129:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x16, #2, 131f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "tbz x16, #1, 130f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "tbz x17, #2, 131f\n"
+ "str s8, [x15], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "tbz x17, #1, 130f\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[6], [x15]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 133f\n"
"130:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[4], [x15]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 133f\n"
"131:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x16, #1, 132f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "tbz x16, #0, 133f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "tbz x17, #1, 132f\n"
+ "str h8, [x15], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "tbz x17, #0, 133f\n"
+ "st1 { v8.b }[2], [x15]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 133f\n"
"132:" // Height 5: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b8, [x15, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"133:" // Height 5: Partial direct writeback: Done
"b 135f\n"
"134:" // Height 5: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q8, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"135:" // Height 5: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 110b\n"
"b 164f\n"
"136:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0x6\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x6, %x[col_bias]\n"
"ldr x7, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_N]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x15\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"137:" // Height 6: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2886,8 +2887,8 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"mov x14, #0x0\n"
"139:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w13, [x20, x14, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w13, [x20, x14, LSL #0x2]\n"
"tbz %x[flags], #3, 140f\n"
"ldr x20, [%x[input_ptr], x14, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2923,14 +2924,14 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr q3, [x9, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
"ldr q5, [x27, #0x0]\n"
- "ldr q6, [x15, #0x0]\n"
- "ldr q7, [x15, #0x10]\n"
+ "ldr q6, [x16, #0x0]\n"
+ "ldr q7, [x16, #0x10]\n"
"blt 143f\n"
"142:" // Height 6: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x15, #0x28]\n"
+ "ldr x21, [x16, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x38]\n"
+ "ldr x20, [x16, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2938,149 +2939,150 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
"add x10, x10, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x15, #0x20]\n"
+ "ldr d6, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x21\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x48]\n"
+ "add x28, x28, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x28, x28, #0x10\n"
+ "ldr x21, [x16, #0x48]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
"add x27, x27, #0x10\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr d7, [x15, #0x30]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x16, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x26, [x12, #0x8]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x15, #0x58]\n"
+ "ldr x25, [x11, #0x8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr x26, [x12, #0x8]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x25, [x11, #0x8]\n"
+ "ldr x20, [x16, #0x58]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
"ldr x24, [x10, #0x8]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x15, #0x40]\n"
+ "ldr d6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x21\n"
+ "ldr x23, [x9, #0x8]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x15, #0x68]\n"
+ "ldr x22, [x28, #0x8]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x23, [x9, #0x8]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "sub x13, x13, #0x10\n"
+ "ldr x21, [x16, #0x68]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
- "cmp x13, #0x20\n"
+ "sub x13, x13, #0x10\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr d7, [x15, #0x50]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x16, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "cmp x13, #0x20\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x78]\n"
- ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
"prfm pldl1keep, [x12, #0x80]\n"
+ "mov v7.d[1], x20\n"
+ ".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr x20, [x16, #0x78]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr d6, [x15, #0x60]\n"
+ "ldr d6, [x16, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x21\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0x88]\n"
- ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
"prfm pldl1keep, [x9, #0x80]\n"
+ ".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "ldr x21, [x16, #0x88]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr d7, [x15, #0x70]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x16, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x27, #0x80]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x20, [x15, #0x98]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
+ "ldr x20, [x16, #0x98]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr d6, [x15, #0x80]\n"
+ "ldr d6, [x16, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x21\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x21, [x15, #0xa8]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
+ "ldr x21, [x16, #0xa8]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr d7, [x15, #0x90]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x16, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xb8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x20, [x16, #0xb8]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr d6, [x15, #0xa0]\n"
+ "ldr d6, [x16, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x21\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x21, [x16, #0xc8]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr d7, [x15, #0xb0]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x16, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x20, [x15, #0xd8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x20, [x16, #0xd8]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr d6, [x15, #0xc0]\n"
+ "ldr d6, [x16, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x21\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x21, [x15, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x21, [x16, #0xe8]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr d7, [x15, #0xd0]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x16, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x20, [x15, #0xf8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x20, [x16, #0xf8]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr d6, [x15, #0xe0]\n"
+ "ldr d6, [x16, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x21\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
- "ldr x22, [x28, #0x8]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
+ "ldr x21, [x27, #0x8]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr d7, [x15, #0xf0]\n"
- "mov v7.d[1], x20\n"
- "add x15, x15, #0x100\n"
+ "ldr d7, [x16, #0xf0]\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x20, [x15, #0x8]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
+ "ldr x20, [x16, #0x8]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8de // sdot v30.4s, v6.16b, v5.4b[3]\n"
- "ldr d6, [x15, #0x0]\n"
+ "ldr d6, [x16, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x12, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
@@ -3093,11 +3095,10 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr d4, [x28, #0x0]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
"ldr d5, [x27, #0x0]\n"
- "ldr d7, [x15, #0x10]\n"
+ "ldr d7, [x16, #0x10]\n"
"mov v6.d[1], x20\n"
- "ldr x21, [x27, #0x8]\n"
+ "ldr x20, [x16, #0x18]\n"
"mov v0.d[1], x26\n"
- "ldr x20, [x15, #0x18]\n"
"mov v1.d[1], x25\n"
"mov v2.d[1], x24\n"
"mov v3.d[1], x23\n"
@@ -3117,7 +3118,7 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
"add x28, x28, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x15, #0x20]\n"
+ "ldr q6, [x16, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x27, x27, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -3129,7 +3130,7 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x15, #0x30]\n"
+ "ldr q7, [x16, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
"prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
@@ -3139,86 +3140,86 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x15, #0x40]\n"
+ "ldr q6, [x16, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x15, #0x50]\n"
+ "ldr q7, [x16, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x15, #0x60]\n"
+ "ldr q6, [x16, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x15, #0x70]\n"
+ "ldr q7, [x16, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x15, #0x80]\n"
+ "ldr q6, [x16, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x15, #0x90]\n"
+ "ldr q7, [x16, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x15, #0xa0]\n"
+ "ldr q6, [x16, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x15, #0xb0]\n"
+ "ldr q7, [x16, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x15, #0xc0]\n"
+ "ldr q6, [x16, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x15, #0xd0]\n"
+ "ldr q7, [x16, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x15, #0xe0]\n"
+ "ldr q6, [x16, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x15, #0xf0]\n"
+ "ldr q7, [x16, #0xf0]\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "add x15, x15, #0x100\n"
+ "add x16, x16, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
@@ -3243,24 +3244,24 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr s4, [x9], #0x4\n"
"ldr s3, [x28], #0x4\n"
"ldr s2, [x27], #0x4\n"
- "ldr q1, [x15, #0x0]\n"
+ "ldr q1, [x16, #0x0]\n"
+ "ldr q0, [x16, #0x10]\n"
".inst 0x4f87e028 // sdot v8.4s, v1.16b, v7.4b[0]\n"
- "ldr q0, [x15, #0x10]\n"
".inst 0x4f86e02c // sdot v12.4s, v1.16b, v6.4b[0]\n"
".inst 0x4f85e030 // sdot v16.4s, v1.16b, v5.4b[0]\n"
".inst 0x4f84e034 // sdot v20.4s, v1.16b, v4.4b[0]\n"
".inst 0x4f83e038 // sdot v24.4s, v1.16b, v3.4b[0]\n"
".inst 0x4f82e03c // sdot v28.4s, v1.16b, v2.4b[0]\n"
- "ldr q1, [x15, #0x20]\n"
+ "ldr q1, [x16, #0x20]\n"
".inst 0x4f87e009 // sdot v9.4s, v0.16b, v7.4b[0]\n"
".inst 0x4f86e00d // sdot v13.4s, v0.16b, v6.4b[0]\n"
".inst 0x4f85e011 // sdot v17.4s, v0.16b, v5.4b[0]\n"
".inst 0x4f84e015 // sdot v21.4s, v0.16b, v4.4b[0]\n"
".inst 0x4f83e019 // sdot v25.4s, v0.16b, v3.4b[0]\n"
".inst 0x4f82e01d // sdot v29.4s, v0.16b, v2.4b[0]\n"
- "ldr q0, [x15, #0x30]\n"
+ "ldr q0, [x16, #0x30]\n"
".inst 0x4f87e02a // sdot v10.4s, v1.16b, v7.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f86e02e // sdot v14.4s, v1.16b, v6.4b[0]\n"
".inst 0x4f85e032 // sdot v18.4s, v1.16b, v5.4b[0]\n"
".inst 0x4f84e036 // sdot v22.4s, v1.16b, v4.4b[0]\n"
@@ -3298,24 +3299,24 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"ldr b4, [x28, #0x0]\n"
"ldr b5, [x27, #0x0]\n"
"148:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x15, #0x0]\n"
+ "ldr q7, [x16, #0x0]\n"
+ "ldr q6, [x16, #0x10]\n"
".inst 0x4f80e0e8 // sdot v8.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x15, #0x10]\n"
".inst 0x4f81e0ec // sdot v12.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f0 // sdot v16.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f4 // sdot v20.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f8 // sdot v24.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fc // sdot v28.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x15, #0x20]\n"
+ "ldr q7, [x16, #0x20]\n"
".inst 0x4f80e0c9 // sdot v9.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cd // sdot v13.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d1 // sdot v17.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d5 // sdot v21.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d9 // sdot v25.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dd // sdot v29.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x15, #0x30]\n"
+ "ldr q6, [x16, #0x30]\n"
".inst 0x4f80e0ea // sdot v10.4s, v7.16b, v0.4b[0]\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
".inst 0x4f81e0ee // sdot v14.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f2 // sdot v18.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f6 // sdot v22.4s, v7.16b, v3.4b[0]\n"
@@ -3333,30 +3334,30 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"cmp x14, x20\n"
"bne 139b\n"
"ldr q3, [x6, #0x0]\n"
- "add v8.4s, v8.4s, v3.4s\n"
"ldr q2, [x6, #0x10]\n"
- "add v9.4s, v9.4s, v2.4s\n"
"ldr q1, [x6, #0x20]\n"
- "add v10.4s, v10.4s, v1.4s\n"
"ldr q0, [x6, #0x30]\n"
- "add v11.4s, v11.4s, v0.4s\n"
+ "add v8.4s, v8.4s, v3.4s\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x17, x20\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "add v9.4s, v9.4s, v2.4s\n"
+ "prfm pstl1keep, [x15, #0x0]\n"
+ "add v10.4s, v10.4s, v1.4s\n"
+ "add v11.4s, v11.4s, v0.4s\n"
"add v12.4s, v12.4s, v3.4s\n"
- "prfm pstl1keep, [x25, #0x0]\n"
+ "add x26, x15, x20\n"
"add v13.4s, v13.4s, v2.4s\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "add x24, x25, x20\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
+ "add x23, x24, x20\n"
"prfm pstl1keep, [x24, #0x0]\n"
- "add v14.4s, v14.4s, v1.4s\n"
+ "add x22, x23, x20\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "add v15.4s, v15.4s, v0.4s\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add v14.4s, v14.4s, v1.4s\n"
+ "add v15.4s, v15.4s, v0.4s\n"
"add v16.4s, v16.4s, v3.4s\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"add v17.4s, v17.4s, v2.4s\n"
"add v18.4s, v18.4s, v1.4s\n"
"add v19.4s, v19.4s, v0.4s\n"
@@ -3386,9 +3387,9 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"add x7, x7, #0x40\n"
"b 151f\n"
"150:" // Height 6: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -3520,59 +3521,60 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"srshl v30.4s, v30.4s, v2.4s\n"
"srshl v31.4s, v31.4s, v3.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v0.4s }, [x20]\n"
- "add v8.4s, v8.4s, v0.4s\n"
- "add v9.4s, v9.4s, v0.4s\n"
- "add v10.4s, v10.4s, v0.4s\n"
- "add v11.4s, v11.4s, v0.4s\n"
- "add v12.4s, v12.4s, v0.4s\n"
- "add v13.4s, v13.4s, v0.4s\n"
- "add v14.4s, v14.4s, v0.4s\n"
- "add v15.4s, v15.4s, v0.4s\n"
- "add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v0.4s\n"
- "add v18.4s, v18.4s, v0.4s\n"
- "add v19.4s, v19.4s, v0.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
- "add v21.4s, v21.4s, v0.4s\n"
- "add v22.4s, v22.4s, v0.4s\n"
- "add v23.4s, v23.4s, v0.4s\n"
- "add v24.4s, v24.4s, v0.4s\n"
- "add v25.4s, v25.4s, v0.4s\n"
- "add v26.4s, v26.4s, v0.4s\n"
- "add v27.4s, v27.4s, v0.4s\n"
- "add v28.4s, v28.4s, v0.4s\n"
- "add v29.4s, v29.4s, v0.4s\n"
- "add v30.4s, v30.4s, v0.4s\n"
- "add v31.4s, v31.4s, v0.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v0.4s }, [x20]\n"
- "smin v8.4s, v8.4s, v0.4s\n"
- "smin v9.4s, v9.4s, v0.4s\n"
- "smin v10.4s, v10.4s, v0.4s\n"
- "smin v11.4s, v11.4s, v0.4s\n"
- "smin v12.4s, v12.4s, v0.4s\n"
- "smin v13.4s, v13.4s, v0.4s\n"
- "smin v14.4s, v14.4s, v0.4s\n"
- "smin v15.4s, v15.4s, v0.4s\n"
- "smin v16.4s, v16.4s, v0.4s\n"
- "smin v17.4s, v17.4s, v0.4s\n"
- "smin v18.4s, v18.4s, v0.4s\n"
- "smin v19.4s, v19.4s, v0.4s\n"
- "smin v20.4s, v20.4s, v0.4s\n"
- "smin v21.4s, v21.4s, v0.4s\n"
- "smin v22.4s, v22.4s, v0.4s\n"
- "smin v23.4s, v23.4s, v0.4s\n"
- "smin v24.4s, v24.4s, v0.4s\n"
- "smin v25.4s, v25.4s, v0.4s\n"
- "smin v26.4s, v26.4s, v0.4s\n"
- "smin v27.4s, v27.4s, v0.4s\n"
- "smin v28.4s, v28.4s, v0.4s\n"
- "smin v29.4s, v29.4s, v0.4s\n"
- "smin v30.4s, v30.4s, v0.4s\n"
- "smin v31.4s, v31.4s, v0.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v2.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v1.4s }, [x21]\n"
+ "cmp x17, #0x10\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v8.4s, v8.4s, v2.4s\n"
+ "add v9.4s, v9.4s, v2.4s\n"
+ "add v10.4s, v10.4s, v2.4s\n"
+ "add v11.4s, v11.4s, v2.4s\n"
+ "add v12.4s, v12.4s, v2.4s\n"
+ "add v13.4s, v13.4s, v2.4s\n"
+ "add v14.4s, v14.4s, v2.4s\n"
+ "add v15.4s, v15.4s, v2.4s\n"
+ "add v16.4s, v16.4s, v2.4s\n"
+ "add v17.4s, v17.4s, v2.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
+ "add v19.4s, v19.4s, v2.4s\n"
+ "add v20.4s, v20.4s, v2.4s\n"
+ "add v21.4s, v21.4s, v2.4s\n"
+ "add v22.4s, v22.4s, v2.4s\n"
+ "add v23.4s, v23.4s, v2.4s\n"
+ "add v24.4s, v24.4s, v2.4s\n"
+ "add v25.4s, v25.4s, v2.4s\n"
+ "add v26.4s, v26.4s, v2.4s\n"
+ "add v27.4s, v27.4s, v2.4s\n"
+ "add v28.4s, v28.4s, v2.4s\n"
+ "add v29.4s, v29.4s, v2.4s\n"
+ "add v30.4s, v30.4s, v2.4s\n"
+ "add v31.4s, v31.4s, v2.4s\n"
+ "smin v8.4s, v8.4s, v1.4s\n"
+ "smin v9.4s, v9.4s, v1.4s\n"
+ "smin v10.4s, v10.4s, v1.4s\n"
+ "smin v11.4s, v11.4s, v1.4s\n"
+ "smin v12.4s, v12.4s, v1.4s\n"
+ "smin v13.4s, v13.4s, v1.4s\n"
+ "smin v14.4s, v14.4s, v1.4s\n"
+ "smin v15.4s, v15.4s, v1.4s\n"
+ "smin v16.4s, v16.4s, v1.4s\n"
+ "smin v17.4s, v17.4s, v1.4s\n"
+ "smin v18.4s, v18.4s, v1.4s\n"
+ "smin v19.4s, v19.4s, v1.4s\n"
+ "smin v20.4s, v20.4s, v1.4s\n"
+ "smin v21.4s, v21.4s, v1.4s\n"
+ "smin v22.4s, v22.4s, v1.4s\n"
+ "smin v23.4s, v23.4s, v1.4s\n"
+ "smin v24.4s, v24.4s, v1.4s\n"
+ "smin v25.4s, v25.4s, v1.4s\n"
+ "smin v26.4s, v26.4s, v1.4s\n"
+ "smin v27.4s, v27.4s, v1.4s\n"
+ "smin v28.4s, v28.4s, v1.4s\n"
+ "smin v29.4s, v29.4s, v1.4s\n"
+ "smin v30.4s, v30.4s, v1.4s\n"
+ "smin v31.4s, v31.4s, v1.4s\n"
"smax v8.4s, v8.4s, v0.4s\n"
"smax v9.4s, v9.4s, v0.4s\n"
"smax v10.4s, v10.4s, v0.4s\n"
@@ -3609,7 +3611,6 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v18.8h, v26.8h, v27.8h\n"
"uzp1 v28.8h, v28.8h, v29.8h\n"
"uzp1 v17.8h, v30.8h, v31.8h\n"
- "cmp x16, #0x10\n"
"uzp1 v8.16b, v8.16b, v2.16b\n"
"uzp1 v12.16b, v12.16b, v1.16b\n"
"uzp1 v16.16b, v16.16b, v0.16b\n"
@@ -3617,136 +3618,136 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"uzp1 v24.16b, v24.16b, v18.16b\n"
"uzp1 v28.16b, v28.16b, v17.16b\n"
"bge 161f\n"
- "tbz x16, #3, 156f\n"
- "str d8, [x17], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "tbz x16, #2, 154f\n"
- "st1 { v8.s }[2], [x17], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
- "tbz x16, #1, 153f\n"
- "st1 { v8.h }[6], [x17], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "st1 { v28.h }[6], [x21], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[14], [x17]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
- "st1 { v28.b }[14], [x21]\n"
+ "tbz x17, #3, 156f\n"
+ "str d8, [x15], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
+ "tbz x17, #2, 154f\n"
+ "st1 { v8.s }[2], [x15], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
+ "tbz x17, #1, 153f\n"
+ "st1 { v8.h }[6], [x15], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "st1 { v28.h }[6], [x22], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[14], [x15]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "st1 { v28.b }[14], [x22]\n"
"b 160f\n"
"153:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[12], [x17]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
- "st1 { v28.b }[12], [x21]\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[12], [x15]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "st1 { v28.b }[12], [x22]\n"
"b 160f\n"
"154:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x16, #1, 155f\n"
- "st1 { v8.h }[4], [x17], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "st1 { v28.h }[4], [x21], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[10], [x17]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
- "st1 { v28.b }[10], [x21]\n"
+ "tbz x17, #1, 155f\n"
+ "st1 { v8.h }[4], [x15], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "st1 { v28.h }[4], [x22], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[10], [x15]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "st1 { v28.b }[10], [x22]\n"
"b 160f\n"
"155:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[8], [x17]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
- "st1 { v28.b }[8], [x21]\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[8], [x15]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "st1 { v28.b }[8], [x22]\n"
"b 160f\n"
"156:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x16, #2, 158f\n"
- "str s8, [x17], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
- "tbz x16, #1, 157f\n"
- "st1 { v8.h }[2], [x17], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "st1 { v28.h }[2], [x21], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[6], [x17]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
- "st1 { v28.b }[6], [x21]\n"
+ "tbz x17, #2, 158f\n"
+ "str s8, [x15], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
+ "tbz x17, #1, 157f\n"
+ "st1 { v8.h }[2], [x15], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "st1 { v28.h }[2], [x22], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[6], [x15]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "st1 { v28.b }[6], [x22]\n"
"b 160f\n"
"157:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[4], [x17]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
- "st1 { v28.b }[4], [x21]\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[4], [x15]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "st1 { v28.b }[4], [x22]\n"
"b 160f\n"
"158:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x16, #1, 159f\n"
- "str h8, [x17], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "str h28, [x21], #0x2\n"
- "tbz x16, #0, 160f\n"
- "st1 { v8.b }[2], [x17]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
- "st1 { v28.b }[2], [x21]\n"
+ "tbz x17, #1, 159f\n"
+ "str h8, [x15], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "str h28, [x22], #0x2\n"
+ "tbz x17, #0, 160f\n"
+ "st1 { v8.b }[2], [x15]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "st1 { v28.b }[2], [x22]\n"
"b 160f\n"
"159:" // Height 6: Partial direct writeback: partial_1_0
- "str b8, [x17, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
- "str b28, [x21, #0x0]\n"
+ "str b8, [x15, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "str b28, [x22, #0x0]\n"
"160:" // Height 6: Partial direct writeback: Done
"b 162f\n"
"161:" // Height 6: Full writeback
- "str q8, [x17, #0x0]\n"
- "add x17, x17, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
- "str q28, [x21, #0x0]\n"
+ "str q8, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q28, [x22, #0x0]\n"
"162:" // Height 6: Writeback done
- "subs x16, x16, #0x10\n"
+ "subs x17, x17, #0x10\n"
"bgt 137b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 164f\n"
@@ -3760,8 +3761,8 @@ void a64_hybrid_s8qs_dot_6x16_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"164:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp
index f3942328a6..dbff7baee7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,18 @@ void a64_hybrid_s8qs_dot_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -97,9 +97,9 @@ void a64_hybrid_s8qs_dot_6x16 (
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -109,8 +109,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"mov x28, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -125,102 +125,102 @@ void a64_hybrid_s8qs_dot_6x16 (
"cmp x27, #0x10\n"
"blt 9f\n"
"ldr q0, [x26, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
"cmp x27, #0x20\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x9, #0x70]\n"
+ "ldr q16, [x10, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x9, #0x80]\n"
+ "ldr q17, [x10, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr q16, [x9, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x20\n"
- "add x9, x9, #0x100\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x9, #0x70]\n"
+ "ldr q16, [x10, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x9, #0x80]\n"
+ "ldr q17, [x10, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr q16, [x9, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x9, x9, #0x100\n"
"9:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 14f\n"
"cmp x27, #0x4\n"
"blt 11f\n"
"10:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x26], #0x4\n"
- "ldr q16, [x9, #0x0]\n"
- ".inst 0x4f92e208 // sdot v8.4s, v16.16b, v18.4b[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"sub x27, x27, #0x4\n"
- "ldr q16, [x9, #0x10]\n"
- "ldr q17, [x9, #0x20]\n"
- ".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n"
+ "ldr q16, [x10, #0x10]\n"
"cmp x27, #0x4\n"
- "ldr q16, [x9, #0x30]\n"
+ ".inst 0x4f92e228 // sdot v8.4s, v17.16b, v18.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f92e22a // sdot v10.4s, v17.16b, v18.4b[0]\n"
".inst 0x4f92e20b // sdot v11.4s, v16.16b, v18.4b[0]\n"
- "add x9, x9, #0x40\n"
"bge 10b\n"
"11:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 14f\n"
@@ -232,30 +232,30 @@ void a64_hybrid_s8qs_dot_6x16 (
"12:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x26, #0x0]\n"
"13:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q17, [x9, #0x0]\n"
- "ldr q16, [x9, #0x10]\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
- "ldr q17, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "add x9, x9, #0x40\n"
"14:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 4b\n"
- "ldr q17, [x14, #0x0]\n"
- "ldr q16, [x14, #0x10]\n"
- "add v8.4s, v8.4s, v17.4s\n"
- "add v9.4s, v9.4s, v16.4s\n"
+ "ldr q19, [x14, #0x0]\n"
+ "ldr q18, [x14, #0x10]\n"
"ldr q17, [x14, #0x20]\n"
"ldr q16, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add v8.4s, v8.4s, v19.4s\n"
+ "add v9.4s, v9.4s, v18.4s\n"
"add v10.4s, v10.4s, v17.4s\n"
"add v11.4s, v11.4s, v16.4s\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x14, x14, #0x40\n"
"tbz %x[flags], #4, 15f\n"
"ldr q0, [x12, #0x0]\n"
"ldr q4, [x13, #0x0]\n"
@@ -269,9 +269,9 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x13, x13, #0x40\n"
"b 16f\n"
"15:" // Height 1: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -298,21 +298,21 @@ void a64_hybrid_s8qs_dot_6x16 (
"sqadd v10.4s, v10.4s, v17.4s\n"
"sqadd v11.4s, v11.4s, v16.4s\n"
"17:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
- "add v8.4s, v8.4s, v18.4s\n"
- "add v9.4s, v9.4s, v18.4s\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
+ "add v8.4s, v8.4s, v18.4s\n"
+ "add v9.4s, v9.4s, v18.4s\n"
"add v10.4s, v10.4s, v18.4s\n"
"add v11.4s, v11.4s, v18.4s\n"
- "cmp x10, #0x10\n"
"smin v8.4s, v8.4s, v17.4s\n"
"smin v9.4s, v9.4s, v17.4s\n"
"smin v10.4s, v10.4s, v17.4s\n"
@@ -325,65 +325,65 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v16.8h, v10.8h, v11.8h\n"
"uzp1 v8.16b, v8.16b, v16.16b\n"
"bge 26f\n"
- "tbz x10, #3, 21f\n"
- "str d8, [x11], #0x8\n"
- "tbz x10, #2, 19f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "tbz x10, #1, 18f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "tbz x10, #0, 25f\n"
- "st1 { v8.b }[14], [x11]\n"
+ "tbz x11, #3, 21f\n"
+ "str d8, [x9], #0x8\n"
+ "tbz x11, #2, 19f\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "tbz x11, #1, 18f\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "tbz x11, #0, 25f\n"
+ "st1 { v8.b }[14], [x9]\n"
"b 25f\n"
"18:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x10, #0, 25f\n"
- "st1 { v8.b }[12], [x11]\n"
+ "tbz x11, #0, 25f\n"
+ "st1 { v8.b }[12], [x9]\n"
"b 25f\n"
"19:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x10, #1, 20f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "tbz x10, #0, 25f\n"
- "st1 { v8.b }[10], [x11]\n"
+ "tbz x11, #1, 20f\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "tbz x11, #0, 25f\n"
+ "st1 { v8.b }[10], [x9]\n"
"b 25f\n"
"20:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x10, #0, 25f\n"
- "st1 { v8.b }[8], [x11]\n"
+ "tbz x11, #0, 25f\n"
+ "st1 { v8.b }[8], [x9]\n"
"b 25f\n"
"21:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x10, #2, 23f\n"
- "str s8, [x11], #0x4\n"
- "tbz x10, #1, 22f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "tbz x10, #0, 25f\n"
- "st1 { v8.b }[6], [x11]\n"
+ "tbz x11, #2, 23f\n"
+ "str s8, [x9], #0x4\n"
+ "tbz x11, #1, 22f\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "tbz x11, #0, 25f\n"
+ "st1 { v8.b }[6], [x9]\n"
"b 25f\n"
"22:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x10, #0, 25f\n"
- "st1 { v8.b }[4], [x11]\n"
+ "tbz x11, #0, 25f\n"
+ "st1 { v8.b }[4], [x9]\n"
"b 25f\n"
"23:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x10, #1, 24f\n"
- "str h8, [x11], #0x2\n"
- "tbz x10, #0, 25f\n"
- "st1 { v8.b }[2], [x11]\n"
+ "tbz x11, #1, 24f\n"
+ "str h8, [x9], #0x2\n"
+ "tbz x11, #0, 25f\n"
+ "st1 { v8.b }[2], [x9]\n"
"b 25f\n"
"24:" // Height 1: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
"25:" // Height 1: Partial direct writeback: Done
"b 27f\n"
"26:" // Height 1: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"27:" // Height 1: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 164f\n"
"28:" // Height 2
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"29:" // Height 2: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -397,8 +397,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"mov x28, #0x0\n"
"31:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 32f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -418,117 +418,117 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q0, [x26, #0x0]\n"
"ldr q1, [x25, #0x0]\n"
"cmp x27, #0x20\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 35f\n"
"34:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
"sub x27, x27, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x9, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "ldr q17, [x10, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x9, #0x50]\n"
- "cmp x27, #0x20\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x9, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x9, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q16, [x10, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x9, #0x80]\n"
+ "ldr q17, [x10, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr q16, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
".inst 0x4fa1ea0f // sdot v15.4s, v16.16b, v1.4b[3]\n"
"ldr q1, [x25, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 34b\n"
"35:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
"add x26, x26, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
"add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x9, #0x40]\n"
- "sub x27, x27, #0x10\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x9, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x9, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x9, #0x70]\n"
+ "ldr q16, [x10, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x9, #0x80]\n"
+ "ldr q17, [x10, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr q16, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
@@ -541,18 +541,18 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr s19, [x26], #0x4\n"
"ldr s18, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
"cmp x27, #0x4\n"
- "ldr q17, [x9, #0x0]\n"
- "ldr q16, [x9, #0x10]\n"
".inst 0x4f93e228 // sdot v8.4s, v17.16b, v19.4b[0]\n"
".inst 0x4f92e22c // sdot v12.4s, v17.16b, v18.4b[0]\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4f93e209 // sdot v9.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20d // sdot v13.4s, v16.16b, v18.4b[0]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f93e22a // sdot v10.4s, v17.16b, v19.4b[0]\n"
".inst 0x4f92e22e // sdot v14.4s, v17.16b, v18.4b[0]\n"
- "add x9, x9, #0x40\n"
".inst 0x4f93e20b // sdot v11.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20f // sdot v15.4s, v16.16b, v18.4b[0]\n"
"bge 37b\n"
@@ -569,17 +569,17 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr b0, [x26, #0x0]\n"
"ldr b1, [x25, #0x0]\n"
"40:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q17, [x9, #0x0]\n"
- "ldr q16, [x9, #0x10]\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f81e22c // sdot v12.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20d // sdot v13.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "add x9, x9, #0x40\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
"41:" // Height 2: Multiply loop: No odd multiplies
@@ -589,20 +589,20 @@ void a64_hybrid_s8qs_dot_6x16 (
"bne 31b\n"
"ldr q19, [x14, #0x0]\n"
"ldr q18, [x14, #0x10]\n"
- "add v8.4s, v8.4s, v19.4s\n"
- "add v9.4s, v9.4s, v18.4s\n"
"ldr q17, [x14, #0x20]\n"
"ldr q16, [x14, #0x30]\n"
- "add v10.4s, v10.4s, v17.4s\n"
- "add v11.4s, v11.4s, v16.4s\n"
+ "add x14, x14, #0x40\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x11, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add v8.4s, v8.4s, v19.4s\n"
+ "add v9.4s, v9.4s, v18.4s\n"
"add v12.4s, v12.4s, v19.4s\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"add v13.4s, v13.4s, v18.4s\n"
+ "add v10.4s, v10.4s, v17.4s\n"
+ "add v11.4s, v11.4s, v16.4s\n"
+ "add x26, x9, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"add v14.4s, v14.4s, v17.4s\n"
- "add x14, x14, #0x40\n"
"add v15.4s, v15.4s, v16.4s\n"
"tbz %x[flags], #4, 42f\n"
"ldr q0, [x12, #0x0]\n"
@@ -617,9 +617,9 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x13, x13, #0x40\n"
"b 43f\n"
"42:" // Height 2: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -646,11 +646,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v19.4s\n"
+ "and v19.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v18.4s\n"
+ "and v18.16b, v13.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v17.4s\n"
"sqadd v11.4s, v11.4s, v16.4s\n"
- "and v19.16b, v12.16b, v0.16b\n"
- "and v18.16b, v13.16b, v1.16b\n"
"and v17.16b, v14.16b, v2.16b\n"
"and v16.16b, v15.16b, v3.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
@@ -662,21 +662,21 @@ void a64_hybrid_s8qs_dot_6x16 (
"sqadd v14.4s, v14.4s, v17.4s\n"
"sqadd v15.4s, v15.4s, v16.4s\n"
"44:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
"srshl v14.4s, v14.4s, v2.4s\n"
"srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"add v8.4s, v8.4s, v18.4s\n"
"add v9.4s, v9.4s, v18.4s\n"
"add v10.4s, v10.4s, v18.4s\n"
@@ -708,81 +708,81 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v8.16b, v8.16b, v17.16b\n"
"uzp1 v12.16b, v12.16b, v16.16b\n"
"bge 53f\n"
- "tbz x10, #3, 48f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x25], #0x8\n"
- "tbz x10, #2, 46f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "tbz x10, #1, 45f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "tbz x10, #0, 52f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x25]\n"
+ "tbz x11, #3, 48f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "tbz x11, #2, 46f\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "tbz x11, #1, 45f\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "tbz x11, #0, 52f\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x26]\n"
"b 52f\n"
"45:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x10, #0, 52f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x25]\n"
+ "tbz x11, #0, 52f\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x26]\n"
"b 52f\n"
"46:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x10, #1, 47f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "tbz x10, #0, 52f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x25]\n"
+ "tbz x11, #1, 47f\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "tbz x11, #0, 52f\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x26]\n"
"b 52f\n"
"47:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x10, #0, 52f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x25]\n"
+ "tbz x11, #0, 52f\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x26]\n"
"b 52f\n"
"48:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x10, #2, 50f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x25], #0x4\n"
- "tbz x10, #1, 49f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "tbz x10, #0, 52f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x25]\n"
+ "tbz x11, #2, 50f\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "tbz x11, #1, 49f\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "tbz x11, #0, 52f\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x26]\n"
"b 52f\n"
"49:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x10, #0, 52f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x25]\n"
+ "tbz x11, #0, 52f\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x26]\n"
"b 52f\n"
"50:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x10, #1, 51f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x25], #0x2\n"
- "tbz x10, #0, 52f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x25]\n"
+ "tbz x11, #1, 51f\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "tbz x11, #0, 52f\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x26]\n"
"b 52f\n"
"51:" // Height 2: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x25, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
"52:" // Height 2: Partial direct writeback: Done
"b 54f\n"
"53:" // Height 2: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x25, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x26, #0x0]\n"
"54:" // Height 2: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 29b\n"
"b 164f\n"
"55:" // Height 3
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"56:" // Height 3: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -800,8 +800,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"mov x28, #0x0\n"
"58:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -825,8 +825,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q1, [x25, #0x0]\n"
"cmp x27, #0x20\n"
"ldr q2, [x24, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 62f\n"
"61:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -834,77 +834,77 @@ void a64_hybrid_s8qs_dot_6x16 (
"sub x27, x27, #0x10\n"
"add x26, x26, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q21, [x9, #0x20]\n"
+ "ldr q21, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x25, x25, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q20, [x9, #0x30]\n"
+ "ldr q20, [x10, #0x30]\n"
"add x24, x24, #0x10\n"
- ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
"cmp x27, #0x20\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x9, #0x40]\n"
+ "ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x9, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q20, [x10, #0x50]\n"
".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x9, #0x60]\n"
+ "ldr q21, [x10, #0x60]\n"
".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x9, #0x70]\n"
+ "ldr q20, [x10, #0x70]\n"
".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x9, #0x80]\n"
+ "ldr q21, [x10, #0x80]\n"
".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x9, #0x90]\n"
+ "ldr q20, [x10, #0x90]\n"
".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x9, #0xa0]\n"
+ "ldr q21, [x10, #0xa0]\n"
".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x9, #0xb0]\n"
+ "ldr q20, [x10, #0xb0]\n"
".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x9, #0xc0]\n"
+ "ldr q21, [x10, #0xc0]\n"
".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x9, #0xd0]\n"
+ "ldr q20, [x10, #0xd0]\n"
".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr q21, [x9, #0xe0]\n"
+ "ldr q21, [x10, #0xe0]\n"
".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n"
".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n"
".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr q20, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q20, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4fa0ea8b // sdot v11.4s, v20.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
".inst 0x4fa1ea8f // sdot v15.4s, v20.16b, v1.4b[3]\n"
"ldr q1, [x25, #0x0]\n"
".inst 0x4fa2ea93 // sdot v19.4s, v20.16b, v2.4b[3]\n"
"ldr q2, [x24, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 61b\n"
"62:" // Height 3: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -912,65 +912,65 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x26, x26, #0x10\n"
"add x25, x25, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q21, [x9, #0x20]\n"
+ "ldr q21, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x24, x24, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q20, [x9, #0x30]\n"
+ "ldr q20, [x10, #0x30]\n"
"sub x27, x27, #0x10\n"
- ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x9, #0x40]\n"
- ".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
+ "ldr q21, [x10, #0x40]\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x9, #0x50]\n"
+ "ldr q20, [x10, #0x50]\n"
".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x9, #0x60]\n"
+ "ldr q21, [x10, #0x60]\n"
".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x9, #0x70]\n"
+ "ldr q20, [x10, #0x70]\n"
".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x9, #0x80]\n"
+ "ldr q21, [x10, #0x80]\n"
".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x9, #0x90]\n"
+ "ldr q20, [x10, #0x90]\n"
".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x9, #0xa0]\n"
+ "ldr q21, [x10, #0xa0]\n"
".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x9, #0xb0]\n"
+ "ldr q20, [x10, #0xb0]\n"
".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x9, #0xc0]\n"
+ "ldr q21, [x10, #0xc0]\n"
".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x9, #0xd0]\n"
+ "ldr q20, [x10, #0xd0]\n"
".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr q21, [x9, #0xe0]\n"
+ "ldr q21, [x10, #0xe0]\n"
".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n"
".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n"
".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr q20, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q20, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n"
@@ -985,19 +985,19 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr s24, [x26], #0x4\n"
"ldr s23, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s22, [x24], #0x4\n"
- "ldr q21, [x9, #0x0]\n"
+ "ldr q21, [x10, #0x0]\n"
+ "cmp x27, #0x4\n"
+ "ldr q20, [x10, #0x10]\n"
".inst 0x4f98e2a8 // sdot v8.4s, v21.16b, v24.4b[0]\n"
".inst 0x4f97e2ac // sdot v12.4s, v21.16b, v23.4b[0]\n"
- "ldr q20, [x9, #0x10]\n"
".inst 0x4f96e2b0 // sdot v16.4s, v21.16b, v22.4b[0]\n"
- "ldr q21, [x9, #0x20]\n"
+ "ldr q21, [x10, #0x20]\n"
".inst 0x4f98e289 // sdot v9.4s, v20.16b, v24.4b[0]\n"
".inst 0x4f97e28d // sdot v13.4s, v20.16b, v23.4b[0]\n"
".inst 0x4f96e291 // sdot v17.4s, v20.16b, v22.4b[0]\n"
- "ldr q20, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q20, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f98e2aa // sdot v10.4s, v21.16b, v24.4b[0]\n"
".inst 0x4f97e2ae // sdot v14.4s, v21.16b, v23.4b[0]\n"
".inst 0x4f96e2b2 // sdot v18.4s, v21.16b, v22.4b[0]\n"
@@ -1021,17 +1021,17 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr b1, [x25, #0x0]\n"
"ldr b2, [x24, #0x0]\n"
"67:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q21, [x9, #0x0]\n"
- "ldr q20, [x9, #0x10]\n"
+ "ldr q21, [x10, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
".inst 0x4f80e2a8 // sdot v8.4s, v21.16b, v0.4b[0]\n"
".inst 0x4f81e2ac // sdot v12.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b0 // sdot v16.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x9, #0x20]\n"
+ "ldr q21, [x10, #0x20]\n"
".inst 0x4f80e289 // sdot v9.4s, v20.16b, v0.4b[0]\n"
".inst 0x4f81e28d // sdot v13.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e291 // sdot v17.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q20, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
@@ -1045,23 +1045,23 @@ void a64_hybrid_s8qs_dot_6x16 (
"bne 58b\n"
"ldr q23, [x14, #0x0]\n"
"ldr q22, [x14, #0x10]\n"
- "add v8.4s, v8.4s, v23.4s\n"
- "add v9.4s, v9.4s, v22.4s\n"
"ldr q21, [x14, #0x20]\n"
"ldr q20, [x14, #0x30]\n"
- "add v10.4s, v10.4s, v21.4s\n"
- "add v11.4s, v11.4s, v20.4s\n"
+ "add x14, x14, #0x40\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x11, x20\n"
- "add x24, x25, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add v8.4s, v8.4s, v23.4s\n"
+ "add v9.4s, v9.4s, v22.4s\n"
"add v12.4s, v12.4s, v23.4s\n"
"add v13.4s, v13.4s, v22.4s\n"
+ "add v10.4s, v10.4s, v21.4s\n"
+ "add v11.4s, v11.4s, v20.4s\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"add v14.4s, v14.4s, v21.4s\n"
"add v15.4s, v15.4s, v20.4s\n"
- "add x14, x14, #0x40\n"
"add v16.4s, v16.4s, v23.4s\n"
"add v17.4s, v17.4s, v22.4s\n"
"add v18.4s, v18.4s, v21.4s\n"
@@ -1079,9 +1079,9 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x13, x13, #0x40\n"
"b 70f\n"
"69:" // Height 3: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -1112,11 +1112,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v21.4s, v21.4s, #0x1f\n"
"sshr v20.4s, v20.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v23.4s\n"
+ "and v23.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v22.4s\n"
+ "and v22.16b, v13.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v21.4s\n"
"sqadd v11.4s, v11.4s, v20.4s\n"
- "and v23.16b, v12.16b, v0.16b\n"
- "and v22.16b, v13.16b, v1.16b\n"
"and v21.16b, v14.16b, v2.16b\n"
"and v20.16b, v15.16b, v3.16b\n"
"sshr v23.4s, v23.4s, #0x1f\n"
@@ -1124,11 +1124,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v21.4s, v21.4s, #0x1f\n"
"sshr v20.4s, v20.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v23.4s\n"
+ "and v23.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v22.4s\n"
+ "and v22.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v21.4s\n"
"sqadd v15.4s, v15.4s, v20.4s\n"
- "and v23.16b, v16.16b, v0.16b\n"
- "and v22.16b, v17.16b, v1.16b\n"
"and v21.16b, v18.16b, v2.16b\n"
"and v20.16b, v19.16b, v3.16b\n"
"sshr v23.4s, v23.4s, #0x1f\n"
@@ -1140,21 +1140,21 @@ void a64_hybrid_s8qs_dot_6x16 (
"sqadd v18.4s, v18.4s, v21.4s\n"
"sqadd v19.4s, v19.4s, v20.4s\n"
"71:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v20.4s }, [x20]\n"
"srshl v14.4s, v14.4s, v2.4s\n"
"srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v1.4s\n"
"srshl v18.4s, v18.4s, v2.4s\n"
@@ -1205,97 +1205,97 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v12.16b, v12.16b, v20.16b\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 80f\n"
- "tbz x10, #3, 75f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "tbz x10, #2, 73f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "tbz x10, #1, 72f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "tbz x10, #0, 79f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
+ "tbz x11, #3, 75f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "tbz x11, #2, 73f\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "tbz x11, #1, 72f\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "tbz x11, #0, 79f\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
"b 79f\n"
"72:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x10, #0, 79f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
+ "tbz x11, #0, 79f\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
"b 79f\n"
"73:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x10, #1, 74f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "tbz x10, #0, 79f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
+ "tbz x11, #1, 74f\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "tbz x11, #0, 79f\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
"b 79f\n"
"74:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x10, #0, 79f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
+ "tbz x11, #0, 79f\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
"b 79f\n"
"75:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x10, #2, 77f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "tbz x10, #1, 76f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "tbz x10, #0, 79f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
+ "tbz x11, #2, 77f\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "tbz x11, #1, 76f\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "tbz x11, #0, 79f\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
"b 79f\n"
"76:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x10, #0, 79f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
+ "tbz x11, #0, 79f\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
"b 79f\n"
"77:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x10, #1, 78f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "tbz x10, #0, 79f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
+ "tbz x11, #1, 78f\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "tbz x11, #0, 79f\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
"b 79f\n"
"78:" // Height 3: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
"79:" // Height 3: Partial direct writeback: Done
"b 81f\n"
"80:" // Height 3: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
"81:" // Height 3: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 56b\n"
"b 164f\n"
"82:" // Height 4
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"83:" // Height 4: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -1317,8 +1317,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"mov x28, #0x0\n"
"85:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 86f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1346,8 +1346,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"cmp x27, #0x20\n"
"ldr q2, [x24, #0x0]\n"
"ldr q3, [x23, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 89f\n"
"88:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -1356,7 +1356,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x26, x26, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q25, [x9, #0x20]\n"
+ "ldr q25, [x10, #0x20]\n"
"add x25, x25, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -1364,7 +1364,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x23, x23, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
"cmp x27, #0x20\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
@@ -1372,70 +1372,70 @@ void a64_hybrid_s8qs_dot_6x16 (
"prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
"prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n"
"prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n"
".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x9, #0x70]\n"
+ "ldr q24, [x10, #0x70]\n"
".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x9, #0x80]\n"
+ "ldr q25, [x10, #0x80]\n"
".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x9, #0x90]\n"
+ "ldr q24, [x10, #0x90]\n"
".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x9, #0xa0]\n"
+ "ldr q25, [x10, #0xa0]\n"
".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x9, #0xb0]\n"
+ "ldr q24, [x10, #0xb0]\n"
".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x9, #0xc0]\n"
+ "ldr q25, [x10, #0xc0]\n"
".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x9, #0xd0]\n"
+ "ldr q24, [x10, #0xd0]\n"
".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n"
".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr q25, [x9, #0xe0]\n"
+ "ldr q25, [x10, #0xe0]\n"
".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n"
".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n"
".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n"
".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr q24, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n"
".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb36 // sdot v22.4s, v25.16b, v3.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4fa0eb0b // sdot v11.4s, v24.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
".inst 0x4fa1eb0f // sdot v15.4s, v24.16b, v1.4b[3]\n"
@@ -1444,7 +1444,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x24, #0x0]\n"
".inst 0x4fa3eb17 // sdot v23.4s, v24.16b, v3.4b[3]\n"
"ldr q3, [x23, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 88b\n"
"89:" // Height 4: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -1453,80 +1453,80 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x25, x25, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q25, [x9, #0x20]\n"
+ "ldr q25, [x10, #0x20]\n"
"add x24, x24, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"add x23, x23, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q24, [x9, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q24, [x10, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
"prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n"
".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n"
".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x9, #0x70]\n"
+ "ldr q24, [x10, #0x70]\n"
".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x9, #0x80]\n"
+ "ldr q25, [x10, #0x80]\n"
".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x9, #0x90]\n"
+ "ldr q24, [x10, #0x90]\n"
".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x9, #0xa0]\n"
+ "ldr q25, [x10, #0xa0]\n"
".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x9, #0xb0]\n"
+ "ldr q24, [x10, #0xb0]\n"
".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x9, #0xc0]\n"
+ "ldr q25, [x10, #0xc0]\n"
".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x9, #0xd0]\n"
+ "ldr q24, [x10, #0xd0]\n"
".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n"
".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr q25, [x9, #0xe0]\n"
+ "ldr q25, [x10, #0xe0]\n"
".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n"
".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n"
".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n"
".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr q24, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n"
".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n"
@@ -1543,22 +1543,22 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr s29, [x26], #0x4\n"
"ldr s28, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s27, [x24], #0x4\n"
"ldr s26, [x23], #0x4\n"
- "ldr q25, [x9, #0x0]\n"
- "ldr q24, [x9, #0x10]\n"
+ "cmp x27, #0x4\n"
+ "ldr q25, [x10, #0x0]\n"
+ "ldr q24, [x10, #0x10]\n"
".inst 0x4f9de328 // sdot v8.4s, v25.16b, v29.4b[0]\n"
".inst 0x4f9ce32c // sdot v12.4s, v25.16b, v28.4b[0]\n"
".inst 0x4f9be330 // sdot v16.4s, v25.16b, v27.4b[0]\n"
".inst 0x4f9ae334 // sdot v20.4s, v25.16b, v26.4b[0]\n"
- "ldr q25, [x9, #0x20]\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4f9de309 // sdot v9.4s, v24.16b, v29.4b[0]\n"
".inst 0x4f9ce30d // sdot v13.4s, v24.16b, v28.4b[0]\n"
".inst 0x4f9be311 // sdot v17.4s, v24.16b, v27.4b[0]\n"
".inst 0x4f9ae315 // sdot v21.4s, v24.16b, v26.4b[0]\n"
- "ldr q24, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q24, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f9de32a // sdot v10.4s, v25.16b, v29.4b[0]\n"
".inst 0x4f9ce32e // sdot v14.4s, v25.16b, v28.4b[0]\n"
".inst 0x4f9be332 // sdot v18.4s, v25.16b, v27.4b[0]\n"
@@ -1587,19 +1587,19 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr b2, [x24, #0x0]\n"
"ldr b3, [x23, #0x0]\n"
"94:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q25, [x9, #0x0]\n"
- "ldr q24, [x9, #0x10]\n"
+ "ldr q25, [x10, #0x0]\n"
+ "ldr q24, [x10, #0x10]\n"
".inst 0x4f80e328 // sdot v8.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e32c // sdot v12.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f82e330 // sdot v16.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e334 // sdot v20.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x9, #0x20]\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4f80e309 // sdot v9.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e30d // sdot v13.4s, v24.16b, v1.4b[0]\n"
".inst 0x4f82e311 // sdot v17.4s, v24.16b, v2.4b[0]\n"
".inst 0x4f83e315 // sdot v21.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q24, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
@@ -1615,24 +1615,24 @@ void a64_hybrid_s8qs_dot_6x16 (
"bne 85b\n"
"ldr q27, [x14, #0x0]\n"
"ldr q26, [x14, #0x10]\n"
- "add v8.4s, v8.4s, v27.4s\n"
- "add v9.4s, v9.4s, v26.4s\n"
"ldr q25, [x14, #0x20]\n"
"ldr q24, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add v8.4s, v8.4s, v27.4s\n"
+ "add v9.4s, v9.4s, v26.4s\n"
+ "add v12.4s, v12.4s, v27.4s\n"
+ "add v13.4s, v13.4s, v26.4s\n"
"add v10.4s, v10.4s, v25.4s\n"
"add v11.4s, v11.4s, v24.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x11, x20\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
"add x24, x25, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x23, x24, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "add v12.4s, v12.4s, v27.4s\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "add v13.4s, v13.4s, v26.4s\n"
"add v14.4s, v14.4s, v25.4s\n"
- "add x14, x14, #0x40\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"add v15.4s, v15.4s, v24.4s\n"
"add v16.4s, v16.4s, v27.4s\n"
"add v17.4s, v17.4s, v26.4s\n"
@@ -1655,9 +1655,9 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x13, x13, #0x40\n"
"b 97f\n"
"96:" // Height 4: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -1692,11 +1692,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v25.4s, v25.4s, #0x1f\n"
"sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v27.4s\n"
+ "and v27.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v26.4s\n"
+ "and v26.16b, v13.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v25.4s\n"
"sqadd v11.4s, v11.4s, v24.4s\n"
- "and v27.16b, v12.16b, v0.16b\n"
- "and v26.16b, v13.16b, v1.16b\n"
"and v25.16b, v14.16b, v2.16b\n"
"and v24.16b, v15.16b, v3.16b\n"
"sshr v27.4s, v27.4s, #0x1f\n"
@@ -1704,11 +1704,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v25.4s, v25.4s, #0x1f\n"
"sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v27.4s\n"
+ "and v27.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v26.4s\n"
+ "and v26.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v25.4s\n"
"sqadd v15.4s, v15.4s, v24.4s\n"
- "and v27.16b, v16.16b, v0.16b\n"
- "and v26.16b, v17.16b, v1.16b\n"
"and v25.16b, v18.16b, v2.16b\n"
"and v24.16b, v19.16b, v3.16b\n"
"sshr v27.4s, v27.4s, #0x1f\n"
@@ -1716,11 +1716,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v25.4s, v25.4s, #0x1f\n"
"sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v27.4s\n"
+ "and v27.16b, v20.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v26.4s\n"
+ "and v26.16b, v21.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v25.4s\n"
"sqadd v19.4s, v19.4s, v24.4s\n"
- "and v27.16b, v20.16b, v0.16b\n"
- "and v26.16b, v21.16b, v1.16b\n"
"and v25.16b, v22.16b, v2.16b\n"
"and v24.16b, v23.16b, v3.16b\n"
"sshr v27.4s, v27.4s, #0x1f\n"
@@ -1732,21 +1732,21 @@ void a64_hybrid_s8qs_dot_6x16 (
"sqadd v22.4s, v22.4s, v25.4s\n"
"sqadd v23.4s, v23.4s, v24.4s\n"
"98:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
"srshl v14.4s, v14.4s, v2.4s\n"
"srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v1.4s\n"
"srshl v18.4s, v18.4s, v2.4s\n"
@@ -1816,113 +1816,113 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v16.16b, v16.16b, v18.16b\n"
"uzp1 v20.16b, v20.16b, v17.16b\n"
"bge 107f\n"
- "tbz x10, #3, 102f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "tbz x10, #2, 100f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "tbz x10, #1, 99f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "tbz x10, #0, 106f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
- "st1 { v20.b }[14], [x23]\n"
+ "tbz x11, #3, 102f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "tbz x11, #2, 100f\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "tbz x11, #1, 99f\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "tbz x11, #0, 106f\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
+ "st1 { v20.b }[14], [x24]\n"
"b 106f\n"
"99:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x10, #0, 106f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
- "st1 { v20.b }[12], [x23]\n"
+ "tbz x11, #0, 106f\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
+ "st1 { v20.b }[12], [x24]\n"
"b 106f\n"
"100:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x10, #1, 101f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "tbz x10, #0, 106f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
- "st1 { v20.b }[10], [x23]\n"
+ "tbz x11, #1, 101f\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "tbz x11, #0, 106f\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
+ "st1 { v20.b }[10], [x24]\n"
"b 106f\n"
"101:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x10, #0, 106f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
- "st1 { v20.b }[8], [x23]\n"
+ "tbz x11, #0, 106f\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
+ "st1 { v20.b }[8], [x24]\n"
"b 106f\n"
"102:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x10, #2, 104f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "tbz x10, #1, 103f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "tbz x10, #0, 106f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
- "st1 { v20.b }[6], [x23]\n"
+ "tbz x11, #2, 104f\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "tbz x11, #1, 103f\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "tbz x11, #0, 106f\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
+ "st1 { v20.b }[6], [x24]\n"
"b 106f\n"
"103:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x10, #0, 106f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
- "st1 { v20.b }[4], [x23]\n"
+ "tbz x11, #0, 106f\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
+ "st1 { v20.b }[4], [x24]\n"
"b 106f\n"
"104:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x10, #1, 105f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "str h20, [x23], #0x2\n"
- "tbz x10, #0, 106f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
- "st1 { v20.b }[2], [x23]\n"
+ "tbz x11, #1, 105f\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "tbz x11, #0, 106f\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
+ "st1 { v20.b }[2], [x24]\n"
"b 106f\n"
"105:" // Height 4: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
- "str b20, [x23, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
"106:" // Height 4: Partial direct writeback: Done
"b 108f\n"
"107:" // Height 4: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
- "str q20, [x23, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
"108:" // Height 4: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 83b\n"
"b 164f\n"
"109:" // Height 5
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"110:" // Height 5: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -1948,8 +1948,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"mov x28, #0x0\n"
"112:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 113f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1981,8 +1981,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q2, [x24, #0x0]\n"
"ldr q3, [x23, #0x0]\n"
"ldr q4, [x22, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 116f\n"
"115:" // Height 5: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -1994,7 +1994,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x25, x25, #0x10\n"
"add x24, x24, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q29, [x9, #0x20]\n"
+ "ldr q29, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x23, x23, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -2003,7 +2003,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"cmp x27, #0x20\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q28, [x9, #0x30]\n"
+ "ldr q28, [x10, #0x30]\n"
"prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
@@ -2014,80 +2014,80 @@ void a64_hybrid_s8qs_dot_6x16 (
"prfm pldl1keep, [x23, #0x80]\n"
"prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x9, #0x40]\n"
+ "ldr q29, [x10, #0x40]\n"
".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n"
".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x9, #0x50]\n"
+ "ldr q28, [x10, #0x50]\n"
".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x9, #0x60]\n"
+ "ldr q29, [x10, #0x60]\n"
".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x9, #0x70]\n"
+ "ldr q28, [x10, #0x70]\n"
".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x9, #0x80]\n"
+ "ldr q29, [x10, #0x80]\n"
".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x9, #0x90]\n"
+ "ldr q28, [x10, #0x90]\n"
".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x9, #0xa0]\n"
+ "ldr q29, [x10, #0xa0]\n"
".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x9, #0xb0]\n"
+ "ldr q28, [x10, #0xb0]\n"
".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x9, #0xc0]\n"
+ "ldr q29, [x10, #0xc0]\n"
".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x9, #0xd0]\n"
+ "ldr q28, [x10, #0xd0]\n"
".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n"
".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr q29, [x9, #0xe0]\n"
+ "ldr q29, [x10, #0xe0]\n"
".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n"
".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n"
".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr q28, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q28, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb6 // sdot v22.4s, v29.16b, v3.4b[3]\n"
".inst 0x4fa4ebba // sdot v26.4s, v29.16b, v4.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4fa0eb8b // sdot v11.4s, v28.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
".inst 0x4fa1eb8f // sdot v15.4s, v28.16b, v1.4b[3]\n"
@@ -2098,7 +2098,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q3, [x23, #0x0]\n"
".inst 0x4fa4eb9b // sdot v27.4s, v28.16b, v4.4b[3]\n"
"ldr q4, [x22, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 115b\n"
"116:" // Height 5: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -2110,17 +2110,17 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x24, x24, #0x10\n"
"add x23, x23, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q29, [x9, #0x20]\n"
+ "ldr q29, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x22, x22, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q28, [x9, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q28, [x10, #0x30]\n"
+ "sub x27, x27, #0x10\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
@@ -2129,74 +2129,74 @@ void a64_hybrid_s8qs_dot_6x16 (
".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n"
"prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x9, #0x40]\n"
+ "ldr q29, [x10, #0x40]\n"
".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n"
".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x9, #0x50]\n"
+ "ldr q28, [x10, #0x50]\n"
".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x9, #0x60]\n"
+ "ldr q29, [x10, #0x60]\n"
".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x9, #0x70]\n"
+ "ldr q28, [x10, #0x70]\n"
".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x9, #0x80]\n"
+ "ldr q29, [x10, #0x80]\n"
".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x9, #0x90]\n"
+ "ldr q28, [x10, #0x90]\n"
".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x9, #0xa0]\n"
+ "ldr q29, [x10, #0xa0]\n"
".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x9, #0xb0]\n"
+ "ldr q28, [x10, #0xb0]\n"
".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x9, #0xc0]\n"
+ "ldr q29, [x10, #0xc0]\n"
".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x9, #0xd0]\n"
+ "ldr q28, [x10, #0xd0]\n"
".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n"
".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr q29, [x9, #0xe0]\n"
+ "ldr q29, [x10, #0xe0]\n"
".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n"
".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n"
".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr q28, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q28, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n"
@@ -2215,25 +2215,25 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr s2, [x26], #0x4\n"
"ldr s1, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s0, [x24], #0x4\n"
"ldr s31, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr s30, [x22], #0x4\n"
- "ldr q29, [x9, #0x0]\n"
+ "ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
".inst 0x4f82e3a8 // sdot v8.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n"
- "ldr q28, [x9, #0x10]\n"
".inst 0x4f80e3b0 // sdot v16.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f9fe3b4 // sdot v20.4s, v29.16b, v31.4b[0]\n"
".inst 0x4f9ee3b8 // sdot v24.4s, v29.16b, v30.4b[0]\n"
- "ldr q29, [x9, #0x20]\n"
+ "ldr q29, [x10, #0x20]\n"
".inst 0x4f82e389 // sdot v9.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f80e391 // sdot v17.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f9fe395 // sdot v21.4s, v28.16b, v31.4b[0]\n"
".inst 0x4f9ee399 // sdot v25.4s, v28.16b, v30.4b[0]\n"
- "ldr q28, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q28, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f82e3aa // sdot v10.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f80e3b2 // sdot v18.4s, v29.16b, v0.4b[0]\n"
@@ -2267,21 +2267,21 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr b3, [x23, #0x0]\n"
"ldr b4, [x22, #0x0]\n"
"121:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q29, [x9, #0x0]\n"
- "ldr q28, [x9, #0x10]\n"
+ "ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
".inst 0x4f80e3a8 // sdot v8.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f82e3b0 // sdot v16.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f83e3b4 // sdot v20.4s, v29.16b, v3.4b[0]\n"
".inst 0x4f84e3b8 // sdot v24.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x9, #0x20]\n"
+ "ldr q29, [x10, #0x20]\n"
".inst 0x4f80e389 // sdot v9.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f82e391 // sdot v17.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f83e395 // sdot v21.4s, v28.16b, v3.4b[0]\n"
".inst 0x4f84e399 // sdot v25.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q28, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n"
@@ -2299,28 +2299,28 @@ void a64_hybrid_s8qs_dot_6x16 (
"bne 112b\n"
"ldr q31, [x14, #0x0]\n"
"ldr q30, [x14, #0x10]\n"
- "add v8.4s, v8.4s, v31.4s\n"
- "add v9.4s, v9.4s, v30.4s\n"
"ldr q29, [x14, #0x20]\n"
"ldr q28, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add v8.4s, v8.4s, v31.4s\n"
+ "add v9.4s, v9.4s, v30.4s\n"
+ "add v12.4s, v12.4s, v31.4s\n"
+ "add v13.4s, v13.4s, v30.4s\n"
"add v10.4s, v10.4s, v29.4s\n"
"add v11.4s, v11.4s, v28.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x11, x20\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
"add x24, x25, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "add v12.4s, v12.4s, v31.4s\n"
- "add v13.4s, v13.4s, v30.4s\n"
"add v14.4s, v14.4s, v29.4s\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"add v15.4s, v15.4s, v28.4s\n"
- "add x14, x14, #0x40\n"
"add v16.4s, v16.4s, v31.4s\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"add v17.4s, v17.4s, v30.4s\n"
"add v18.4s, v18.4s, v29.4s\n"
"add v19.4s, v19.4s, v28.4s\n"
@@ -2345,9 +2345,9 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x13, x13, #0x40\n"
"b 124f\n"
"123:" // Height 5: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -2386,11 +2386,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v31.4s\n"
+ "and v31.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v30.4s\n"
+ "and v30.16b, v13.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v29.4s\n"
"sqadd v11.4s, v11.4s, v28.4s\n"
- "and v31.16b, v12.16b, v0.16b\n"
- "and v30.16b, v13.16b, v1.16b\n"
"and v29.16b, v14.16b, v2.16b\n"
"and v28.16b, v15.16b, v3.16b\n"
"sshr v31.4s, v31.4s, #0x1f\n"
@@ -2398,11 +2398,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v31.4s\n"
+ "and v31.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v30.4s\n"
+ "and v30.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v29.4s\n"
"sqadd v15.4s, v15.4s, v28.4s\n"
- "and v31.16b, v16.16b, v0.16b\n"
- "and v30.16b, v17.16b, v1.16b\n"
"and v29.16b, v18.16b, v2.16b\n"
"and v28.16b, v19.16b, v3.16b\n"
"sshr v31.4s, v31.4s, #0x1f\n"
@@ -2410,11 +2410,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v31.4s\n"
+ "and v31.16b, v20.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v30.4s\n"
+ "and v30.16b, v21.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v29.4s\n"
"sqadd v19.4s, v19.4s, v28.4s\n"
- "and v31.16b, v20.16b, v0.16b\n"
- "and v30.16b, v21.16b, v1.16b\n"
"and v29.16b, v22.16b, v2.16b\n"
"and v28.16b, v23.16b, v3.16b\n"
"sshr v31.4s, v31.4s, #0x1f\n"
@@ -2422,11 +2422,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v31.4s\n"
+ "and v31.16b, v24.16b, v0.16b\n"
"sqadd v21.4s, v21.4s, v30.4s\n"
+ "and v30.16b, v25.16b, v1.16b\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"sqadd v23.4s, v23.4s, v28.4s\n"
- "and v31.16b, v24.16b, v0.16b\n"
- "and v30.16b, v25.16b, v1.16b\n"
"and v29.16b, v26.16b, v2.16b\n"
"and v28.16b, v27.16b, v3.16b\n"
"sshr v31.4s, v31.4s, #0x1f\n"
@@ -2438,21 +2438,21 @@ void a64_hybrid_s8qs_dot_6x16 (
"sqadd v26.4s, v26.4s, v29.4s\n"
"sqadd v27.4s, v27.4s, v28.4s\n"
"125:" // Height 5: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v30.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v30.4s }, [x21]\n"
"ld1r { v29.4s }, [x20]\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v28.4s }, [x20]\n"
"srshl v14.4s, v14.4s, v2.4s\n"
"srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v1.4s\n"
"srshl v18.4s, v18.4s, v2.4s\n"
@@ -2541,132 +2541,133 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v20.16b, v20.16b, v18.16b\n"
"uzp1 v24.16b, v24.16b, v17.16b\n"
"bge 134f\n"
- "tbz x10, #3, 129f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x10, #2, 127f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "tbz x10, #1, 126f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "tbz x10, #0, 133f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "tbz x11, #3, 129f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "tbz x11, #2, 127f\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "tbz x11, #1, 126f\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "tbz x11, #0, 133f\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 133f\n"
"126:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x10, #0, 133f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "tbz x11, #0, 133f\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 133f\n"
"127:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x10, #1, 128f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "tbz x10, #0, 133f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "tbz x11, #1, 128f\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "tbz x11, #0, 133f\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 133f\n"
"128:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x10, #0, 133f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "tbz x11, #0, 133f\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 133f\n"
"129:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x10, #2, 131f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "tbz x10, #1, 130f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "tbz x10, #0, 133f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "tbz x11, #2, 131f\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "tbz x11, #1, 130f\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "tbz x11, #0, 133f\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 133f\n"
"130:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x10, #0, 133f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "tbz x11, #0, 133f\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 133f\n"
"131:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x10, #1, 132f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "tbz x10, #0, 133f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "tbz x11, #1, 132f\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "tbz x11, #0, 133f\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 133f\n"
"132:" // Height 5: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"133:" // Height 5: Partial direct writeback: Done
"b 135f\n"
"134:" // Height 5: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"135:" // Height 5: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 110b\n"
"b 164f\n"
"136:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x6\n"
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"137:" // Height 6: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2696,8 +2697,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"mov x28, #0x0\n"
"139:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 140f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2733,8 +2734,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q3, [x23, #0x0]\n"
"ldr q4, [x22, #0x0]\n"
"ldr q5, [x21, #0x0]\n"
- "ldr q6, [x9, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
"blt 143f\n"
"142:" // Height 6: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -2747,7 +2748,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x24, x24, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x10, #0x20]\n"
"add x23, x23, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -2759,7 +2760,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
+ "ldr q7, [x10, #0x30]\n"
"prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
@@ -2771,92 +2772,92 @@ void a64_hybrid_s8qs_dot_6x16 (
"prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x10, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x10, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x10, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x10, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x10, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x10, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x10, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x10, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x10, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x10, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8de // sdot v30.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x9, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
@@ -2869,7 +2870,7 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr q4, [x22, #0x0]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
"ldr q5, [x21, #0x0]\n"
- "ldr q7, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x10]\n"
"bge 142b\n"
"143:" // Height 6: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
@@ -2882,108 +2883,108 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x23, x23, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x20]\n"
+ "ldr q6, [x10, #0x20]\n"
"add x22, x22, #0x10\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
"prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x40]\n"
+ "ldr q6, [x10, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x50]\n"
+ "ldr q7, [x10, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x60]\n"
+ "ldr q6, [x10, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x70]\n"
+ "ldr q7, [x10, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x9, #0x80]\n"
+ "ldr q6, [x10, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x9, #0x90]\n"
+ "ldr q7, [x10, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x10, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xb0]\n"
+ "ldr q7, [x10, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x10, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x9, #0xd0]\n"
+ "ldr q7, [x10, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x10, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q7, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
@@ -3004,28 +3005,28 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr s7, [x26], #0x4\n"
"ldr s6, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s5, [x24], #0x4\n"
"ldr s4, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr s3, [x22], #0x4\n"
"ldr s2, [x21], #0x4\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q0, [x9, #0x10]\n"
+ "ldr q1, [x10, #0x0]\n"
+ "ldr q0, [x10, #0x10]\n"
".inst 0x4f87e028 // sdot v8.4s, v1.16b, v7.4b[0]\n"
".inst 0x4f86e02c // sdot v12.4s, v1.16b, v6.4b[0]\n"
".inst 0x4f85e030 // sdot v16.4s, v1.16b, v5.4b[0]\n"
".inst 0x4f84e034 // sdot v20.4s, v1.16b, v4.4b[0]\n"
".inst 0x4f83e038 // sdot v24.4s, v1.16b, v3.4b[0]\n"
".inst 0x4f82e03c // sdot v28.4s, v1.16b, v2.4b[0]\n"
- "ldr q1, [x9, #0x20]\n"
+ "ldr q1, [x10, #0x20]\n"
".inst 0x4f87e009 // sdot v9.4s, v0.16b, v7.4b[0]\n"
".inst 0x4f86e00d // sdot v13.4s, v0.16b, v6.4b[0]\n"
".inst 0x4f85e011 // sdot v17.4s, v0.16b, v5.4b[0]\n"
".inst 0x4f84e015 // sdot v21.4s, v0.16b, v4.4b[0]\n"
".inst 0x4f83e019 // sdot v25.4s, v0.16b, v3.4b[0]\n"
".inst 0x4f82e01d // sdot v29.4s, v0.16b, v2.4b[0]\n"
- "ldr q0, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q0, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f87e02a // sdot v10.4s, v1.16b, v7.4b[0]\n"
".inst 0x4f86e02e // sdot v14.4s, v1.16b, v6.4b[0]\n"
".inst 0x4f85e032 // sdot v18.4s, v1.16b, v5.4b[0]\n"
@@ -3064,23 +3065,23 @@ void a64_hybrid_s8qs_dot_6x16 (
"ldr b4, [x22, #0x0]\n"
"ldr b5, [x21, #0x0]\n"
"148:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
".inst 0x4f80e0e8 // sdot v8.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ec // sdot v12.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f0 // sdot v16.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f4 // sdot v20.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f8 // sdot v24.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fc // sdot v28.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x4f80e0c9 // sdot v9.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cd // sdot v13.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d1 // sdot v17.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d5 // sdot v21.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d9 // sdot v25.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dd // sdot v29.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x9, #0x30]\n"
- "add x9, x9, #0x40\n"
+ "ldr q6, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e0ea // sdot v10.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ee // sdot v14.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f2 // sdot v18.4s, v7.16b, v2.4b[0]\n"
@@ -3100,32 +3101,32 @@ void a64_hybrid_s8qs_dot_6x16 (
"bne 139b\n"
"ldr q3, [x14, #0x0]\n"
"ldr q2, [x14, #0x10]\n"
- "add v8.4s, v8.4s, v3.4s\n"
- "add v9.4s, v9.4s, v2.4s\n"
"ldr q1, [x14, #0x20]\n"
"ldr q0, [x14, #0x30]\n"
+ "add x14, x14, #0x40\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add v8.4s, v8.4s, v3.4s\n"
+ "add v9.4s, v9.4s, v2.4s\n"
+ "add v12.4s, v12.4s, v3.4s\n"
+ "add v13.4s, v13.4s, v2.4s\n"
"add v10.4s, v10.4s, v1.4s\n"
"add v11.4s, v11.4s, v0.4s\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x11, x20\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
"add x24, x25, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "add v12.4s, v12.4s, v3.4s\n"
- "prfm pstl1keep, [x21, #0x0]\n"
- "add v13.4s, v13.4s, v2.4s\n"
"add v14.4s, v14.4s, v1.4s\n"
- "add x14, x14, #0x40\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"add v15.4s, v15.4s, v0.4s\n"
"add v16.4s, v16.4s, v3.4s\n"
+ "add x22, x23, x20\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"add v17.4s, v17.4s, v2.4s\n"
"add v18.4s, v18.4s, v1.4s\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"add v19.4s, v19.4s, v0.4s\n"
"add v20.4s, v20.4s, v3.4s\n"
"add v21.4s, v21.4s, v2.4s\n"
@@ -3152,9 +3153,9 @@ void a64_hybrid_s8qs_dot_6x16 (
"add x13, x13, #0x40\n"
"b 151f\n"
"150:" // Height 6: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -3197,11 +3198,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v7.4s\n"
+ "and v7.16b, v12.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v6.4s\n"
+ "and v6.16b, v13.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v5.4s\n"
"sqadd v11.4s, v11.4s, v4.4s\n"
- "and v7.16b, v12.16b, v0.16b\n"
- "and v6.16b, v13.16b, v1.16b\n"
"and v5.16b, v14.16b, v2.16b\n"
"and v4.16b, v15.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3209,11 +3210,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v12.4s, v12.4s, v7.4s\n"
+ "and v7.16b, v16.16b, v0.16b\n"
"sqadd v13.4s, v13.4s, v6.4s\n"
+ "and v6.16b, v17.16b, v1.16b\n"
"sqadd v14.4s, v14.4s, v5.4s\n"
"sqadd v15.4s, v15.4s, v4.4s\n"
- "and v7.16b, v16.16b, v0.16b\n"
- "and v6.16b, v17.16b, v1.16b\n"
"and v5.16b, v18.16b, v2.16b\n"
"and v4.16b, v19.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3221,11 +3222,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v7.4s\n"
+ "and v7.16b, v20.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v6.4s\n"
+ "and v6.16b, v21.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v5.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
- "and v7.16b, v20.16b, v0.16b\n"
- "and v6.16b, v21.16b, v1.16b\n"
"and v5.16b, v22.16b, v2.16b\n"
"and v4.16b, v23.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3233,11 +3234,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v7.4s\n"
+ "and v7.16b, v24.16b, v0.16b\n"
"sqadd v21.4s, v21.4s, v6.4s\n"
+ "and v6.16b, v25.16b, v1.16b\n"
"sqadd v22.4s, v22.4s, v5.4s\n"
"sqadd v23.4s, v23.4s, v4.4s\n"
- "and v7.16b, v24.16b, v0.16b\n"
- "and v6.16b, v25.16b, v1.16b\n"
"and v5.16b, v26.16b, v2.16b\n"
"and v4.16b, v27.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3245,11 +3246,11 @@ void a64_hybrid_s8qs_dot_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v24.4s, v24.4s, v7.4s\n"
+ "and v7.16b, v28.16b, v0.16b\n"
"sqadd v25.4s, v25.4s, v6.4s\n"
+ "and v6.16b, v29.16b, v1.16b\n"
"sqadd v26.4s, v26.4s, v5.4s\n"
"sqadd v27.4s, v27.4s, v4.4s\n"
- "and v7.16b, v28.16b, v0.16b\n"
- "and v6.16b, v29.16b, v1.16b\n"
"and v5.16b, v30.16b, v2.16b\n"
"and v4.16b, v31.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3261,21 +3262,21 @@ void a64_hybrid_s8qs_dot_6x16 (
"sqadd v30.4s, v30.4s, v5.4s\n"
"sqadd v31.4s, v31.4s, v4.4s\n"
"152:" // Height 6: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v6.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v6.4s }, [x21]\n"
"ld1r { v5.4s }, [x20]\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"srshl v12.4s, v12.4s, v0.4s\n"
"srshl v13.4s, v13.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v4.4s }, [x20]\n"
"srshl v14.4s, v14.4s, v2.4s\n"
"srshl v15.4s, v15.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v1.4s\n"
"srshl v18.4s, v18.4s, v2.4s\n"
@@ -3383,136 +3384,136 @@ void a64_hybrid_s8qs_dot_6x16 (
"uzp1 v24.16b, v24.16b, v18.16b\n"
"uzp1 v28.16b, v28.16b, v17.16b\n"
"bge 161f\n"
- "tbz x10, #3, 156f\n"
- "str d8, [x11], #0x8\n"
- "str d12, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "tbz x10, #2, 154f\n"
- "st1 { v8.s }[2], [x11], #0x4\n"
- "st1 { v12.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
- "tbz x10, #1, 153f\n"
- "st1 { v8.h }[6], [x11], #0x2\n"
- "st1 { v12.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "st1 { v28.h }[6], [x21], #0x2\n"
- "tbz x10, #0, 160f\n"
- "st1 { v8.b }[14], [x11]\n"
- "st1 { v12.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
- "st1 { v28.b }[14], [x21]\n"
+ "tbz x11, #3, 156f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
+ "tbz x11, #2, 154f\n"
+ "st1 { v8.s }[2], [x9], #0x4\n"
+ "st1 { v12.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
+ "tbz x11, #1, 153f\n"
+ "st1 { v8.h }[6], [x9], #0x2\n"
+ "st1 { v12.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "st1 { v28.h }[6], [x22], #0x2\n"
+ "tbz x11, #0, 160f\n"
+ "st1 { v8.b }[14], [x9]\n"
+ "st1 { v12.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "st1 { v28.b }[14], [x22]\n"
"b 160f\n"
"153:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x10, #0, 160f\n"
- "st1 { v8.b }[12], [x11]\n"
- "st1 { v12.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
- "st1 { v28.b }[12], [x21]\n"
+ "tbz x11, #0, 160f\n"
+ "st1 { v8.b }[12], [x9]\n"
+ "st1 { v12.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "st1 { v28.b }[12], [x22]\n"
"b 160f\n"
"154:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x10, #1, 155f\n"
- "st1 { v8.h }[4], [x11], #0x2\n"
- "st1 { v12.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "st1 { v28.h }[4], [x21], #0x2\n"
- "tbz x10, #0, 160f\n"
- "st1 { v8.b }[10], [x11]\n"
- "st1 { v12.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
- "st1 { v28.b }[10], [x21]\n"
+ "tbz x11, #1, 155f\n"
+ "st1 { v8.h }[4], [x9], #0x2\n"
+ "st1 { v12.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "st1 { v28.h }[4], [x22], #0x2\n"
+ "tbz x11, #0, 160f\n"
+ "st1 { v8.b }[10], [x9]\n"
+ "st1 { v12.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "st1 { v28.b }[10], [x22]\n"
"b 160f\n"
"155:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x10, #0, 160f\n"
- "st1 { v8.b }[8], [x11]\n"
- "st1 { v12.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
- "st1 { v28.b }[8], [x21]\n"
+ "tbz x11, #0, 160f\n"
+ "st1 { v8.b }[8], [x9]\n"
+ "st1 { v12.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "st1 { v28.b }[8], [x22]\n"
"b 160f\n"
"156:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x10, #2, 158f\n"
- "str s8, [x11], #0x4\n"
- "str s12, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
- "tbz x10, #1, 157f\n"
- "st1 { v8.h }[2], [x11], #0x2\n"
- "st1 { v12.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "st1 { v28.h }[2], [x21], #0x2\n"
- "tbz x10, #0, 160f\n"
- "st1 { v8.b }[6], [x11]\n"
- "st1 { v12.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
- "st1 { v28.b }[6], [x21]\n"
+ "tbz x11, #2, 158f\n"
+ "str s8, [x9], #0x4\n"
+ "str s12, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
+ "tbz x11, #1, 157f\n"
+ "st1 { v8.h }[2], [x9], #0x2\n"
+ "st1 { v12.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "st1 { v28.h }[2], [x22], #0x2\n"
+ "tbz x11, #0, 160f\n"
+ "st1 { v8.b }[6], [x9]\n"
+ "st1 { v12.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "st1 { v28.b }[6], [x22]\n"
"b 160f\n"
"157:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x10, #0, 160f\n"
- "st1 { v8.b }[4], [x11]\n"
- "st1 { v12.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
- "st1 { v28.b }[4], [x21]\n"
+ "tbz x11, #0, 160f\n"
+ "st1 { v8.b }[4], [x9]\n"
+ "st1 { v12.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "st1 { v28.b }[4], [x22]\n"
"b 160f\n"
"158:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x10, #1, 159f\n"
- "str h8, [x11], #0x2\n"
- "str h12, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "str h28, [x21], #0x2\n"
- "tbz x10, #0, 160f\n"
- "st1 { v8.b }[2], [x11]\n"
- "st1 { v12.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
- "st1 { v28.b }[2], [x21]\n"
+ "tbz x11, #1, 159f\n"
+ "str h8, [x9], #0x2\n"
+ "str h12, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "str h28, [x22], #0x2\n"
+ "tbz x11, #0, 160f\n"
+ "st1 { v8.b }[2], [x9]\n"
+ "st1 { v12.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "st1 { v28.b }[2], [x22]\n"
"b 160f\n"
"159:" // Height 6: Partial direct writeback: partial_1_0
- "str b8, [x11, #0x0]\n"
- "str b12, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
- "str b28, [x21, #0x0]\n"
+ "str b8, [x9, #0x0]\n"
+ "str b12, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "str b28, [x22, #0x0]\n"
"160:" // Height 6: Partial direct writeback: Done
"b 162f\n"
"161:" // Height 6: Full writeback
- "str q8, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q12, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
- "str q28, [x21, #0x0]\n"
+ "str q8, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q12, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q28, [x22, #0x0]\n"
"162:" // Height 6: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 137b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 164f\n"
@@ -3526,8 +3527,8 @@ void a64_hybrid_s8qs_dot_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"164:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16.hpp
index d0d5f1b80d..494370ade7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp
index 0771829d37..867bcded1f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,18 @@ void a64_hybrid_s8qs_mmla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -97,9 +97,9 @@ void a64_hybrid_s8qs_mmla_6x16 (
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -113,8 +113,8 @@ void a64_hybrid_s8qs_mmla_6x16 (
"mov x28, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -129,115 +129,115 @@ void a64_hybrid_s8qs_mmla_6x16 (
"cmp x27, #0x10\n"
"blt 9f\n"
"ldr q1, [x26, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"cmp x27, #0x20\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
"blt 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"trn1 v18.2d, v1.2d, v21.2d\n"
+ "trn2 v1.2d, v1.2d, v21.2d\n"
".inst 0x4e87a648 // smmla v8.4s, v18.16b, v7.16b\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4e86a64c // smmla v12.4s, v18.16b, v6.16b\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v21.2d\n"
+ "ldr q16, [x10, #0x70]\n"
".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x80]\n"
+ "ldr q17, [x10, #0x80]\n"
".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4e91a428 // smmla v8.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4e90a42c // smmla v12.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4e91a429 // smmla v9.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4e90a42d // smmla v13.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4e91a42a // smmla v10.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4e90a42e // smmla v14.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x20\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e91a42b // smmla v11.4s, v1.16b, v17.16b\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x4e90a42f // smmla v15.4s, v1.16b, v16.16b\n"
"ldr q1, [x26, #0x0]\n"
- "add x9, x9, #0x100\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q6, [x10, #0x10]\n"
"bge 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
- "trn1 v18.2d, v1.2d, v19.2d\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "trn1 v18.2d, v1.2d, v16.2d\n"
+ "trn2 v1.2d, v1.2d, v16.2d\n"
".inst 0x4e87a648 // smmla v8.4s, v18.16b, v7.16b\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4e86a64c // smmla v12.4s, v18.16b, v6.16b\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v19.2d\n"
+ "ldr q16, [x10, #0x70]\n"
".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x80]\n"
+ "ldr q17, [x10, #0x80]\n"
".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4e91a428 // smmla v8.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4e90a42c // smmla v12.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4e91a429 // smmla v9.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4e90a42d // smmla v13.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4e91a42a // smmla v10.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4e90a42e // smmla v14.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e91a42b // smmla v11.4s, v1.16b, v17.16b\n"
".inst 0x4e90a42f // smmla v15.4s, v1.16b, v16.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x9, x9, #0x100\n"
"9:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 16f\n"
"cmp x27, #0x8\n"
"blt 11f\n"
"10:" // Height 1: Multiply loop: Odd block loop
"ldr d18, [x26], #0x8\n"
- "ldr q17, [x9, #0x0]\n"
- "trn1 v18.2d, v18.2d, v16.2d\n"
- "ldr q31, [x9, #0x10]\n"
- ".inst 0x4e91a648 // smmla v8.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x20]\n"
- ".inst 0x4e9fa64c // smmla v12.4s, v18.16b, v31.16b\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q19, [x10, #0x0]\n"
+ "sub x27, x27, #0x8\n"
+ "ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x8\n"
+ "trn1 v18.2d, v18.2d, v17.2d\n"
+ ".inst 0x4e93a648 // smmla v8.4s, v18.16b, v19.16b\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4e90a64c // smmla v12.4s, v18.16b, v16.16b\n"
+ "ldr q16, [x10, #0x30]\n"
".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ "ldr q16, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "add x9, x9, #0x80\n"
"bge 10b\n"
"11:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 16f\n"
@@ -261,24 +261,24 @@ void a64_hybrid_s8qs_mmla_6x16 (
"14:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b1, [x26, #0x0]\n"
"15:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q17, [x9, #0x0]\n"
- "ldr q19, [x9, #0x10]\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q19, [x10, #0x10]\n"
"trn1 v18.2d, v1.2d, v16.2d\n"
".inst 0x4e91a648 // smmla v8.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4e93a64c // smmla v12.4s, v18.16b, v19.16b\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x70]\n"
+ "ldr q16, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "add x9, x9, #0x80\n"
"16:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -292,11 +292,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q16, [x14, #0x30]\n"
"uzp1 v10.2d, v10.2d, v14.2d\n"
"uzp1 v11.2d, v11.2d, v15.2d\n"
- "mov v15.16b, v8.16b\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "add v15.4s, v15.4s, v19.4s\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"add x14, x14, #0x40\n"
+ "mov v15.16b, v8.16b\n"
"add v9.4s, v9.4s, v18.4s\n"
+ "add v15.4s, v15.4s, v19.4s\n"
"add v10.4s, v10.4s, v17.4s\n"
"add v11.4s, v11.4s, v16.4s\n"
"tbz %x[flags], #4, 17f\n"
@@ -312,9 +312,9 @@ void a64_hybrid_s8qs_mmla_6x16 (
"add x13, x13, #0x40\n"
"b 18f\n"
"17:" // Height 1: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -328,34 +328,34 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sqrdmulh v10.4s, v10.4s, v6.4s\n"
"sqrdmulh v11.4s, v11.4s, v7.4s\n"
"tbz %x[flags], #5, 19f\n"
- "and v17.16b, v15.16b, v0.16b\n"
- "and v16.16b, v9.16b, v1.16b\n"
- "and v25.16b, v10.16b, v2.16b\n"
- "and v18.16b, v11.16b, v3.16b\n"
+ "and v18.16b, v15.16b, v0.16b\n"
+ "and v17.16b, v9.16b, v1.16b\n"
+ "and v26.16b, v10.16b, v2.16b\n"
+ "and v16.16b, v11.16b, v3.16b\n"
+ "sshr v18.4s, v18.4s, #0x1f\n"
"sshr v17.4s, v17.4s, #0x1f\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
- "sshr v25.4s, v25.4s, #0x1f\n"
- "sshr v18.4s, v18.4s, #0x1f\n"
- "sqadd v15.4s, v15.4s, v17.4s\n"
- "sqadd v9.4s, v9.4s, v16.4s\n"
- "sqadd v10.4s, v10.4s, v25.4s\n"
- "sqadd v11.4s, v11.4s, v18.4s\n"
+ "sqadd v15.4s, v15.4s, v18.4s\n"
+ "sqadd v9.4s, v9.4s, v17.4s\n"
+ "sqadd v10.4s, v10.4s, v26.4s\n"
+ "sqadd v11.4s, v11.4s, v16.4s\n"
"19:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v15.4s, v15.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
- "srshl v10.4s, v10.4s, v2.4s\n"
- "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
- "add v15.4s, v15.4s, v18.4s\n"
- "add v9.4s, v9.4s, v18.4s\n"
+ "srshl v10.4s, v10.4s, v2.4s\n"
+ "srshl v11.4s, v11.4s, v3.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
+ "add v15.4s, v15.4s, v18.4s\n"
+ "add v9.4s, v9.4s, v18.4s\n"
"add v10.4s, v10.4s, v18.4s\n"
"add v11.4s, v11.4s, v18.4s\n"
- "cmp x10, #0x10\n"
"smin v15.4s, v15.4s, v17.4s\n"
"smin v9.4s, v9.4s, v17.4s\n"
"smin v10.4s, v10.4s, v17.4s\n"
@@ -368,65 +368,65 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v16.8h, v10.8h, v11.8h\n"
"uzp1 v15.16b, v15.16b, v16.16b\n"
"bge 28f\n"
- "tbz x10, #3, 23f\n"
- "str d15, [x11], #0x8\n"
- "tbz x10, #2, 21f\n"
- "st1 { v15.s }[2], [x11], #0x4\n"
- "tbz x10, #1, 20f\n"
- "st1 { v15.h }[6], [x11], #0x2\n"
- "tbz x10, #0, 27f\n"
- "st1 { v15.b }[14], [x11]\n"
+ "tbz x11, #3, 23f\n"
+ "str d15, [x9], #0x8\n"
+ "tbz x11, #2, 21f\n"
+ "st1 { v15.s }[2], [x9], #0x4\n"
+ "tbz x11, #1, 20f\n"
+ "st1 { v15.h }[6], [x9], #0x2\n"
+ "tbz x11, #0, 27f\n"
+ "st1 { v15.b }[14], [x9]\n"
"b 27f\n"
"20:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x10, #0, 27f\n"
- "st1 { v15.b }[12], [x11]\n"
+ "tbz x11, #0, 27f\n"
+ "st1 { v15.b }[12], [x9]\n"
"b 27f\n"
"21:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x10, #1, 22f\n"
- "st1 { v15.h }[4], [x11], #0x2\n"
- "tbz x10, #0, 27f\n"
- "st1 { v15.b }[10], [x11]\n"
+ "tbz x11, #1, 22f\n"
+ "st1 { v15.h }[4], [x9], #0x2\n"
+ "tbz x11, #0, 27f\n"
+ "st1 { v15.b }[10], [x9]\n"
"b 27f\n"
"22:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x10, #0, 27f\n"
- "st1 { v15.b }[8], [x11]\n"
+ "tbz x11, #0, 27f\n"
+ "st1 { v15.b }[8], [x9]\n"
"b 27f\n"
"23:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x10, #2, 25f\n"
- "str s15, [x11], #0x4\n"
- "tbz x10, #1, 24f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
- "tbz x10, #0, 27f\n"
- "st1 { v15.b }[6], [x11]\n"
+ "tbz x11, #2, 25f\n"
+ "str s15, [x9], #0x4\n"
+ "tbz x11, #1, 24f\n"
+ "st1 { v15.h }[2], [x9], #0x2\n"
+ "tbz x11, #0, 27f\n"
+ "st1 { v15.b }[6], [x9]\n"
"b 27f\n"
"24:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x10, #0, 27f\n"
- "st1 { v15.b }[4], [x11]\n"
+ "tbz x11, #0, 27f\n"
+ "st1 { v15.b }[4], [x9]\n"
"b 27f\n"
"25:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x10, #1, 26f\n"
- "str h15, [x11], #0x2\n"
- "tbz x10, #0, 27f\n"
- "st1 { v15.b }[2], [x11]\n"
+ "tbz x11, #1, 26f\n"
+ "str h15, [x9], #0x2\n"
+ "tbz x11, #0, 27f\n"
+ "st1 { v15.b }[2], [x9]\n"
"b 27f\n"
"26:" // Height 1: Partial direct writeback: partial_1_0
- "str b15, [x11, #0x0]\n"
+ "str b15, [x9, #0x0]\n"
"27:" // Height 1: Partial direct writeback: Done
"b 29f\n"
"28:" // Height 1: Full writeback
- "str q15, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
+ "str q15, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
"29:" // Height 1: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 2b\n"
"b 176f\n"
"30:" // Height 2
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"31:" // Height 2: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -440,8 +440,8 @@ void a64_hybrid_s8qs_mmla_6x16 (
"mov x28, #0x0\n"
"33:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 34f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -461,120 +461,120 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q1, [x26, #0x0]\n"
"ldr q2, [x25, #0x0]\n"
"cmp x27, #0x20\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
"blt 37f\n"
"36:" // Height 2: Multiply loop: Main loop head
"trn1 v18.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a648 // smmla v8.4s, v18.16b, v7.16b\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q16, [x10, #0x20]\n"
".inst 0x4e86a64c // smmla v12.4s, v18.16b, v6.16b\n"
- "ldr q16, [x9, #0x30]\n"
- ".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x40]\n"
- ".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x50]\n"
- ".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x60]\n"
- ".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x80]\n"
- ".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e90a649 // smmla v9.4s, v18.16b, v16.16b\n"
+ "ldr q16, [x10, #0x40]\n"
+ ".inst 0x4e91a64d // smmla v13.4s, v18.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e90a64a // smmla v10.4s, v18.16b, v16.16b\n"
+ "ldr q16, [x10, #0x60]\n"
+ ".inst 0x4e91a64e // smmla v14.4s, v18.16b, v17.16b\n"
+ "ldr q31, [x10, #0x70]\n"
+ ".inst 0x4e90a64b // smmla v11.4s, v18.16b, v16.16b\n"
+ "ldr q17, [x10, #0x80]\n"
+ ".inst 0x4e9fa64f // smmla v15.4s, v18.16b, v31.16b\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4e91a428 // smmla v8.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4e90a42c // smmla v12.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4e91a429 // smmla v9.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4e90a42d // smmla v13.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4e91a42a // smmla v10.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4e90a42e // smmla v14.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e91a42b // smmla v11.4s, v1.16b, v17.16b\n"
- "add x9, x9, #0x100\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x4e90a42f // smmla v15.4s, v1.16b, v16.16b\n"
"ldr q1, [x26, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q6, [x10, #0x10]\n"
"bge 36b\n"
"37:" // Height 2: Multiply loop: Single iteration only
"trn1 v18.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a648 // smmla v8.4s, v18.16b, v7.16b\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4e86a64c // smmla v12.4s, v18.16b, v6.16b\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
+ "ldr q16, [x10, #0x70]\n"
".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x80]\n"
+ "ldr q17, [x10, #0x80]\n"
".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x90]\n"
+ "ldr q16, [x10, #0x90]\n"
".inst 0x4e91a428 // smmla v8.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xa0]\n"
+ "ldr q17, [x10, #0xa0]\n"
".inst 0x4e90a42c // smmla v12.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xb0]\n"
+ "ldr q16, [x10, #0xb0]\n"
".inst 0x4e91a429 // smmla v9.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xc0]\n"
+ "ldr q17, [x10, #0xc0]\n"
".inst 0x4e90a42d // smmla v13.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xd0]\n"
+ "ldr q16, [x10, #0xd0]\n"
".inst 0x4e91a42a // smmla v10.4s, v1.16b, v17.16b\n"
- "ldr q17, [x9, #0xe0]\n"
+ "ldr q17, [x10, #0xe0]\n"
".inst 0x4e90a42e // smmla v14.4s, v1.16b, v16.16b\n"
- "ldr q16, [x9, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e91a42b // smmla v11.4s, v1.16b, v17.16b\n"
".inst 0x4e90a42f // smmla v15.4s, v1.16b, v16.16b\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x9, x9, #0x100\n"
"38:" // Height 2: Multiply loop: Main loop skip
"cbz x27, 45f\n"
"cmp x27, #0x8\n"
"blt 40f\n"
"39:" // Height 2: Multiply loop: Odd block loop
- "ldr d17, [x26], #0x8\n"
- "ldr d16, [x25], #0x8\n"
- "trn1 v18.2d, v17.2d, v16.2d\n"
+ "ldr d19, [x26], #0x8\n"
+ "ldr d18, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr q17, [x9, #0x0]\n"
- "ldr q16, [x9, #0x10]\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x8\n"
+ "trn1 v18.2d, v19.2d, v18.2d\n"
".inst 0x4e91a648 // smmla v8.4s, v18.16b, v17.16b\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4e90a64c // smmla v12.4s, v18.16b, v16.16b\n"
- "ldr q17, [x9, #0x20]\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q17, [x9, #0x40]\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q17, [x9, #0x60]\n"
- "ldr q16, [x9, #0x70]\n"
- "cmp x27, #0x8\n"
+ "ldr q16, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "add x9, x9, #0x80\n"
"bge 39b\n"
"40:" // Height 2: Multiply loop: Skip odd blocks
"cbz x27, 45f\n"
@@ -605,24 +605,24 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr b1, [x26, #0x0]\n"
"ldr b2, [x25, #0x0]\n"
"44:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q17, [x9, #0x0]\n"
- "ldr q16, [x9, #0x10]\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
"trn1 v18.2d, v1.2d, v2.2d\n"
".inst 0x4e91a648 // smmla v8.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x20]\n"
+ "ldr q17, [x10, #0x20]\n"
".inst 0x4e90a64c // smmla v12.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x30]\n"
+ "ldr q16, [x10, #0x30]\n"
".inst 0x4e91a649 // smmla v9.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x40]\n"
+ "ldr q17, [x10, #0x40]\n"
".inst 0x4e90a64d // smmla v13.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x50]\n"
+ "ldr q16, [x10, #0x50]\n"
".inst 0x4e91a64a // smmla v10.4s, v18.16b, v17.16b\n"
- "ldr q17, [x9, #0x60]\n"
+ "ldr q17, [x10, #0x60]\n"
".inst 0x4e90a64e // smmla v14.4s, v18.16b, v16.16b\n"
- "ldr q16, [x9, #0x70]\n"
+ "ldr q16, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e91a64b // smmla v11.4s, v18.16b, v17.16b\n"
".inst 0x4e90a64f // smmla v15.4s, v18.16b, v16.16b\n"
- "add x9, x9, #0x80\n"
"45:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -639,18 +639,18 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "add x25, x11, x20\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "mov v15.16b, v17.16b\n"
- "add v15.4s, v15.4s, v19.4s\n"
"add x14, x14, #0x40\n"
+ "mov v15.16b, v17.16b\n"
"add v12.4s, v12.4s, v18.4s\n"
+ "add x26, x9, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"add v13.4s, v13.4s, v5.4s\n"
- "add v14.4s, v14.4s, v16.4s\n"
"add v8.4s, v8.4s, v19.4s\n"
+ "add v15.4s, v15.4s, v19.4s\n"
+ "add v14.4s, v14.4s, v16.4s\n"
"add v9.4s, v9.4s, v18.4s\n"
"add v10.4s, v10.4s, v5.4s\n"
"add v11.4s, v11.4s, v16.4s\n"
@@ -667,9 +667,9 @@ void a64_hybrid_s8qs_mmla_6x16 (
"add x13, x13, #0x40\n"
"b 47f\n"
"46:" // Height 2: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -696,11 +696,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v17.4s, v17.4s, #0x1f\n"
"sshr v16.4s, v16.4s, #0x1f\n"
"sqadd v15.4s, v15.4s, v19.4s\n"
+ "and v19.16b, v8.16b, v0.16b\n"
"sqadd v12.4s, v12.4s, v18.4s\n"
+ "and v18.16b, v9.16b, v1.16b\n"
"sqadd v13.4s, v13.4s, v17.4s\n"
"sqadd v14.4s, v14.4s, v16.4s\n"
- "and v19.16b, v8.16b, v0.16b\n"
- "and v18.16b, v9.16b, v1.16b\n"
"and v17.16b, v10.16b, v2.16b\n"
"and v16.16b, v11.16b, v3.16b\n"
"sshr v19.4s, v19.4s, #0x1f\n"
@@ -712,21 +712,21 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sqadd v10.4s, v10.4s, v17.4s\n"
"sqadd v11.4s, v11.4s, v16.4s\n"
"48:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v18.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v15.4s, v15.4s, v0.4s\n"
"srshl v12.4s, v12.4s, v1.4s\n"
- "srshl v13.4s, v13.4s, v2.4s\n"
- "srshl v14.4s, v14.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v18.4s }, [x21]\n"
"ld1r { v17.4s }, [x20]\n"
+ "srshl v13.4s, v13.4s, v2.4s\n"
+ "srshl v14.4s, v14.4s, v3.4s\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"add v15.4s, v15.4s, v18.4s\n"
"add v12.4s, v12.4s, v18.4s\n"
"add v13.4s, v13.4s, v18.4s\n"
@@ -758,81 +758,81 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v15.16b, v15.16b, v17.16b\n"
"uzp1 v8.16b, v8.16b, v16.16b\n"
"bge 57f\n"
- "tbz x10, #3, 52f\n"
- "str d15, [x11], #0x8\n"
- "str d8, [x25], #0x8\n"
- "tbz x10, #2, 50f\n"
- "st1 { v15.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x25], #0x4\n"
- "tbz x10, #1, 49f\n"
- "st1 { v15.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x25], #0x2\n"
- "tbz x10, #0, 56f\n"
- "st1 { v15.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x25]\n"
+ "tbz x11, #3, 52f\n"
+ "str d15, [x9], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "tbz x11, #2, 50f\n"
+ "st1 { v15.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x26], #0x4\n"
+ "tbz x11, #1, 49f\n"
+ "st1 { v15.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x26], #0x2\n"
+ "tbz x11, #0, 56f\n"
+ "st1 { v15.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x26]\n"
"b 56f\n"
"49:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x10, #0, 56f\n"
- "st1 { v15.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x25]\n"
+ "tbz x11, #0, 56f\n"
+ "st1 { v15.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x26]\n"
"b 56f\n"
"50:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x10, #1, 51f\n"
- "st1 { v15.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x25], #0x2\n"
- "tbz x10, #0, 56f\n"
- "st1 { v15.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x25]\n"
+ "tbz x11, #1, 51f\n"
+ "st1 { v15.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x26], #0x2\n"
+ "tbz x11, #0, 56f\n"
+ "st1 { v15.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x26]\n"
"b 56f\n"
"51:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x10, #0, 56f\n"
- "st1 { v15.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x25]\n"
+ "tbz x11, #0, 56f\n"
+ "st1 { v15.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x26]\n"
"b 56f\n"
"52:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x10, #2, 54f\n"
- "str s15, [x11], #0x4\n"
- "str s8, [x25], #0x4\n"
- "tbz x10, #1, 53f\n"
- "st1 { v15.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x25], #0x2\n"
- "tbz x10, #0, 56f\n"
- "st1 { v15.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x25]\n"
+ "tbz x11, #2, 54f\n"
+ "str s15, [x9], #0x4\n"
+ "str s8, [x26], #0x4\n"
+ "tbz x11, #1, 53f\n"
+ "st1 { v15.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x26], #0x2\n"
+ "tbz x11, #0, 56f\n"
+ "st1 { v15.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x26]\n"
"b 56f\n"
"53:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x10, #0, 56f\n"
- "st1 { v15.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x25]\n"
+ "tbz x11, #0, 56f\n"
+ "st1 { v15.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x26]\n"
"b 56f\n"
"54:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x10, #1, 55f\n"
- "str h15, [x11], #0x2\n"
- "str h8, [x25], #0x2\n"
- "tbz x10, #0, 56f\n"
- "st1 { v15.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x25]\n"
+ "tbz x11, #1, 55f\n"
+ "str h15, [x9], #0x2\n"
+ "str h8, [x26], #0x2\n"
+ "tbz x11, #0, 56f\n"
+ "st1 { v15.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x26]\n"
"b 56f\n"
"55:" // Height 2: Partial direct writeback: partial_1_0
- "str b15, [x11, #0x0]\n"
- "str b8, [x25, #0x0]\n"
+ "str b15, [x9, #0x0]\n"
+ "str b8, [x26, #0x0]\n"
"56:" // Height 2: Partial direct writeback: Done
"b 58f\n"
"57:" // Height 2: Full writeback
- "str q15, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x25, #0x0]\n"
+ "str q15, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x26, #0x0]\n"
"58:" // Height 2: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 31b\n"
"b 176f\n"
"59:" // Height 3
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"60:" // Height 3: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -854,8 +854,8 @@ void a64_hybrid_s8qs_mmla_6x16 (
"mov x28, #0x0\n"
"62:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 63f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -879,130 +879,130 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q2, [x25, #0x0]\n"
"cmp x27, #0x20\n"
"ldr q3, [x24, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
"blt 66f\n"
"65:" // Height 3: Multiply loop: Main loop head
"trn1 v27.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "trn1 v26.2d, v3.2d, v24.2d\n"
".inst 0x4e87a768 // smmla v8.4s, v27.16b, v7.16b\n"
- "trn1 v26.2d, v3.2d, v28.2d\n"
- ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
- "ldr q25, [x9, #0x20]\n"
".inst 0x4e86a76c // smmla v12.4s, v27.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn2 v3.2d, v3.2d, v24.2d\n"
+ ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4e86a754 // smmla v20.4s, v26.16b, v6.16b\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
".inst 0x4e99a769 // smmla v9.4s, v27.16b, v25.16b\n"
- "trn2 v3.2d, v3.2d, v28.2d\n"
".inst 0x4e99a751 // smmla v17.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
".inst 0x4e98a76d // smmla v13.4s, v27.16b, v24.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e98a755 // smmla v21.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
+ "ldr q24, [x10, #0x70]\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x80]\n"
+ "ldr q25, [x10, #0x80]\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e98a757 // smmla v23.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x90]\n"
+ "ldr q24, [x10, #0x90]\n"
"ldr q2, [x25, #0x0]\n"
".inst 0x4e99a428 // smmla v8.4s, v1.16b, v25.16b\n"
".inst 0x4e99a470 // smmla v16.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xa0]\n"
+ "ldr q25, [x10, #0xa0]\n"
".inst 0x4e98a42c // smmla v12.4s, v1.16b, v24.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e98a474 // smmla v20.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xb0]\n"
+ "ldr q24, [x10, #0xb0]\n"
".inst 0x4e99a429 // smmla v9.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e99a471 // smmla v17.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xc0]\n"
+ "ldr q25, [x10, #0xc0]\n"
".inst 0x4e98a42d // smmla v13.4s, v1.16b, v24.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e98a475 // smmla v21.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xd0]\n"
+ "ldr q24, [x10, #0xd0]\n"
".inst 0x4e99a42a // smmla v10.4s, v1.16b, v25.16b\n"
".inst 0x4e99a472 // smmla v18.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xe0]\n"
+ "ldr q25, [x10, #0xe0]\n"
".inst 0x4e98a42e // smmla v14.4s, v1.16b, v24.16b\n"
".inst 0x4e98a476 // smmla v22.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e99a42b // smmla v11.4s, v1.16b, v25.16b\n"
".inst 0x4e99a473 // smmla v19.4s, v3.16b, v25.16b\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x4e98a42f // smmla v15.4s, v1.16b, v24.16b\n"
"ldr q1, [x26, #0x0]\n"
".inst 0x4e98a477 // smmla v23.4s, v3.16b, v24.16b\n"
"ldr q3, [x24, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
"bge 65b\n"
"66:" // Height 3: Multiply loop: Single iteration only
"trn1 v27.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn1 v26.2d, v3.2d, v24.2d\n"
".inst 0x4e87a768 // smmla v8.4s, v27.16b, v7.16b\n"
- "trn1 v26.2d, v3.2d, v25.2d\n"
- ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
- "ldr q24, [x9, #0x20]\n"
".inst 0x4e86a76c // smmla v12.4s, v27.16b, v6.16b\n"
+ "trn2 v3.2d, v3.2d, v24.2d\n"
+ ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
+ "ldr q24, [x10, #0x20]\n"
".inst 0x4e86a754 // smmla v20.4s, v26.16b, v6.16b\n"
- "ldr q0, [x9, #0x30]\n"
+ "ldr q4, [x10, #0x30]\n"
".inst 0x4e98a769 // smmla v9.4s, v27.16b, v24.16b\n"
- "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x4e98a751 // smmla v17.4s, v26.16b, v24.16b\n"
- "ldr q25, [x9, #0x40]\n"
- ".inst 0x4e80a76d // smmla v13.4s, v27.16b, v0.16b\n"
- "add x26, x26, #0x10\n"
- ".inst 0x4e80a755 // smmla v21.4s, v26.16b, v0.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q25, [x10, #0x40]\n"
+ ".inst 0x4e84a76d // smmla v13.4s, v27.16b, v4.16b\n"
+ ".inst 0x4e84a755 // smmla v21.4s, v26.16b, v4.16b\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
+ "ldr q24, [x10, #0x70]\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x80]\n"
+ "ldr q25, [x10, #0x80]\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e98a757 // smmla v23.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x90]\n"
+ "ldr q24, [x10, #0x90]\n"
".inst 0x4e99a428 // smmla v8.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e99a470 // smmla v16.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xa0]\n"
+ "ldr q25, [x10, #0xa0]\n"
".inst 0x4e98a42c // smmla v12.4s, v1.16b, v24.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e98a474 // smmla v20.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xb0]\n"
+ "ldr q24, [x10, #0xb0]\n"
".inst 0x4e99a429 // smmla v9.4s, v1.16b, v25.16b\n"
".inst 0x4e99a471 // smmla v17.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xc0]\n"
+ "ldr q25, [x10, #0xc0]\n"
".inst 0x4e98a42d // smmla v13.4s, v1.16b, v24.16b\n"
".inst 0x4e98a475 // smmla v21.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xd0]\n"
+ "ldr q24, [x10, #0xd0]\n"
".inst 0x4e99a42a // smmla v10.4s, v1.16b, v25.16b\n"
".inst 0x4e99a472 // smmla v18.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xe0]\n"
+ "ldr q25, [x10, #0xe0]\n"
".inst 0x4e98a42e // smmla v14.4s, v1.16b, v24.16b\n"
".inst 0x4e98a476 // smmla v22.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e99a42b // smmla v11.4s, v1.16b, v25.16b\n"
".inst 0x4e99a473 // smmla v19.4s, v3.16b, v25.16b\n"
".inst 0x4e98a42f // smmla v15.4s, v1.16b, v24.16b\n"
@@ -1012,35 +1012,35 @@ void a64_hybrid_s8qs_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 69f\n"
"68:" // Height 3: Multiply loop: Odd block loop
- "ldr d25, [x26], #0x8\n"
- "ldr d24, [x25], #0x8\n"
- "trn1 v27.2d, v25.2d, v24.2d\n"
- "ldr d24, [x24], #0x8\n"
- "ldr q25, [x9, #0x0]\n"
- "trn1 v26.2d, v24.2d, v26.2d\n"
+ "ldr d29, [x26], #0x8\n"
+ "ldr d27, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr d26, [x24], #0x8\n"
+ "ldr q25, [x10, #0x0]\n"
+ "cmp x27, #0x8\n"
+ "ldr q24, [x10, #0x10]\n"
+ "trn1 v27.2d, v29.2d, v27.2d\n"
+ "trn1 v26.2d, v26.2d, v28.2d\n"
".inst 0x4e99a768 // smmla v8.4s, v27.16b, v25.16b\n"
- "ldr q24, [x9, #0x10]\n"
".inst 0x4e99a750 // smmla v16.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x20]\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4e98a76c // smmla v12.4s, v27.16b, v24.16b\n"
".inst 0x4e98a754 // smmla v20.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
".inst 0x4e99a769 // smmla v9.4s, v27.16b, v25.16b\n"
- "sub x27, x27, #0x8\n"
".inst 0x4e99a751 // smmla v17.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
".inst 0x4e98a76d // smmla v13.4s, v27.16b, v24.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e98a755 // smmla v21.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
+ "ldr q24, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
- "add x9, x9, #0x80\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
".inst 0x4e98a757 // smmla v23.4s, v26.16b, v24.16b\n"
@@ -1081,29 +1081,29 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr b2, [x25, #0x0]\n"
"ldr b3, [x24, #0x0]\n"
"73:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q25, [x9, #0x0]\n"
- "ldr q28, [x9, #0x10]\n"
+ "ldr q25, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
"trn1 v27.2d, v1.2d, v2.2d\n"
"trn1 v26.2d, v3.2d, v24.2d\n"
".inst 0x4e99a768 // smmla v8.4s, v27.16b, v25.16b\n"
- ".inst 0x4e99a750 // smmla v16.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x20]\n"
".inst 0x4e9ca76c // smmla v12.4s, v27.16b, v28.16b\n"
+ ".inst 0x4e99a750 // smmla v16.4s, v26.16b, v25.16b\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4e9ca754 // smmla v20.4s, v26.16b, v28.16b\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
".inst 0x4e99a769 // smmla v9.4s, v27.16b, v25.16b\n"
".inst 0x4e99a751 // smmla v17.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
".inst 0x4e98a76d // smmla v13.4s, v27.16b, v24.16b\n"
".inst 0x4e98a755 // smmla v21.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q24, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
@@ -1124,23 +1124,23 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "add x25, x11, x20\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "add x24, x25, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "add x14, x14, #0x40\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
"uzp1 v19.2d, v19.2d, v23.2d\n"
- "add x14, x14, #0x40\n"
+ "add x26, x9, x20\n"
"mov v23.16b, v26.16b\n"
- "add v23.4s, v23.4s, v28.4s\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"add v12.4s, v12.4s, v27.4s\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"add v13.4s, v13.4s, v25.4s\n"
"add v14.4s, v14.4s, v24.4s\n"
+ "add v23.4s, v23.4s, v28.4s\n"
"add v8.4s, v8.4s, v28.4s\n"
"add v9.4s, v9.4s, v27.4s\n"
"add v10.4s, v10.4s, v25.4s\n"
@@ -1162,9 +1162,9 @@ void a64_hybrid_s8qs_mmla_6x16 (
"add x13, x13, #0x40\n"
"b 76f\n"
"75:" // Height 3: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -1195,11 +1195,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v21.4s, v21.4s, #0x1f\n"
"sshr v20.4s, v20.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v24.4s\n"
+ "and v24.16b, v8.16b, v0.16b\n"
"sqadd v12.4s, v12.4s, v22.4s\n"
+ "and v22.16b, v9.16b, v1.16b\n"
"sqadd v13.4s, v13.4s, v21.4s\n"
"sqadd v14.4s, v14.4s, v20.4s\n"
- "and v24.16b, v8.16b, v0.16b\n"
- "and v22.16b, v9.16b, v1.16b\n"
"and v21.16b, v10.16b, v2.16b\n"
"and v20.16b, v11.16b, v3.16b\n"
"sshr v24.4s, v24.4s, #0x1f\n"
@@ -1207,11 +1207,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v21.4s, v21.4s, #0x1f\n"
"sshr v20.4s, v20.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v24.4s\n"
+ "and v24.16b, v16.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v22.4s\n"
+ "and v22.16b, v17.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v21.4s\n"
"sqadd v11.4s, v11.4s, v20.4s\n"
- "and v24.16b, v16.16b, v0.16b\n"
- "and v22.16b, v17.16b, v1.16b\n"
"and v21.16b, v18.16b, v2.16b\n"
"and v20.16b, v19.16b, v3.16b\n"
"sshr v24.4s, v24.4s, #0x1f\n"
@@ -1223,21 +1223,21 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sqadd v18.4s, v18.4s, v21.4s\n"
"sqadd v19.4s, v19.4s, v20.4s\n"
"77:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v12.4s, v12.4s, v1.4s\n"
- "srshl v13.4s, v13.4s, v2.4s\n"
- "srshl v14.4s, v14.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
+ "srshl v13.4s, v13.4s, v2.4s\n"
+ "srshl v14.4s, v14.4s, v3.4s\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v20.4s }, [x20]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v1.4s\n"
"srshl v18.4s, v18.4s, v2.4s\n"
@@ -1288,97 +1288,97 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v8.16b, v8.16b, v20.16b\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 86f\n"
- "tbz x10, #3, 81f\n"
- "str d23, [x11], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d16, [x24], #0x8\n"
- "tbz x10, #2, 79f\n"
- "st1 { v23.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x25], #0x4\n"
- "st1 { v16.s }[2], [x24], #0x4\n"
- "tbz x10, #1, 78f\n"
- "st1 { v23.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x25], #0x2\n"
- "st1 { v16.h }[6], [x24], #0x2\n"
- "tbz x10, #0, 85f\n"
- "st1 { v23.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x25]\n"
- "st1 { v16.b }[14], [x24]\n"
+ "tbz x11, #3, 81f\n"
+ "str d23, [x9], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d16, [x25], #0x8\n"
+ "tbz x11, #2, 79f\n"
+ "st1 { v23.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x26], #0x4\n"
+ "st1 { v16.s }[2], [x25], #0x4\n"
+ "tbz x11, #1, 78f\n"
+ "st1 { v23.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x26], #0x2\n"
+ "st1 { v16.h }[6], [x25], #0x2\n"
+ "tbz x11, #0, 85f\n"
+ "st1 { v23.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x26]\n"
+ "st1 { v16.b }[14], [x25]\n"
"b 85f\n"
"78:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x10, #0, 85f\n"
- "st1 { v23.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x25]\n"
- "st1 { v16.b }[12], [x24]\n"
+ "tbz x11, #0, 85f\n"
+ "st1 { v23.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x26]\n"
+ "st1 { v16.b }[12], [x25]\n"
"b 85f\n"
"79:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x10, #1, 80f\n"
- "st1 { v23.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x25], #0x2\n"
- "st1 { v16.h }[4], [x24], #0x2\n"
- "tbz x10, #0, 85f\n"
- "st1 { v23.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x25]\n"
- "st1 { v16.b }[10], [x24]\n"
+ "tbz x11, #1, 80f\n"
+ "st1 { v23.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x26], #0x2\n"
+ "st1 { v16.h }[4], [x25], #0x2\n"
+ "tbz x11, #0, 85f\n"
+ "st1 { v23.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x26]\n"
+ "st1 { v16.b }[10], [x25]\n"
"b 85f\n"
"80:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x10, #0, 85f\n"
- "st1 { v23.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x25]\n"
- "st1 { v16.b }[8], [x24]\n"
+ "tbz x11, #0, 85f\n"
+ "st1 { v23.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x26]\n"
+ "st1 { v16.b }[8], [x25]\n"
"b 85f\n"
"81:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x10, #2, 83f\n"
- "str s23, [x11], #0x4\n"
- "str s8, [x25], #0x4\n"
- "str s16, [x24], #0x4\n"
- "tbz x10, #1, 82f\n"
- "st1 { v23.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x25], #0x2\n"
- "st1 { v16.h }[2], [x24], #0x2\n"
- "tbz x10, #0, 85f\n"
- "st1 { v23.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x25]\n"
- "st1 { v16.b }[6], [x24]\n"
+ "tbz x11, #2, 83f\n"
+ "str s23, [x9], #0x4\n"
+ "str s8, [x26], #0x4\n"
+ "str s16, [x25], #0x4\n"
+ "tbz x11, #1, 82f\n"
+ "st1 { v23.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x26], #0x2\n"
+ "st1 { v16.h }[2], [x25], #0x2\n"
+ "tbz x11, #0, 85f\n"
+ "st1 { v23.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x26]\n"
+ "st1 { v16.b }[6], [x25]\n"
"b 85f\n"
"82:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x10, #0, 85f\n"
- "st1 { v23.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x25]\n"
- "st1 { v16.b }[4], [x24]\n"
+ "tbz x11, #0, 85f\n"
+ "st1 { v23.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x26]\n"
+ "st1 { v16.b }[4], [x25]\n"
"b 85f\n"
"83:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x10, #1, 84f\n"
- "str h23, [x11], #0x2\n"
- "str h8, [x25], #0x2\n"
- "str h16, [x24], #0x2\n"
- "tbz x10, #0, 85f\n"
- "st1 { v23.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x25]\n"
- "st1 { v16.b }[2], [x24]\n"
+ "tbz x11, #1, 84f\n"
+ "str h23, [x9], #0x2\n"
+ "str h8, [x26], #0x2\n"
+ "str h16, [x25], #0x2\n"
+ "tbz x11, #0, 85f\n"
+ "st1 { v23.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x26]\n"
+ "st1 { v16.b }[2], [x25]\n"
"b 85f\n"
"84:" // Height 3: Partial direct writeback: partial_1_0
- "str b23, [x11, #0x0]\n"
- "str b8, [x25, #0x0]\n"
- "str b16, [x24, #0x0]\n"
+ "str b23, [x9, #0x0]\n"
+ "str b8, [x26, #0x0]\n"
+ "str b16, [x25, #0x0]\n"
"85:" // Height 3: Partial direct writeback: Done
"b 87f\n"
"86:" // Height 3: Full writeback
- "str q23, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x25, #0x0]\n"
- "str q16, [x24, #0x0]\n"
+ "str q23, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x26, #0x0]\n"
+ "str q16, [x25, #0x0]\n"
"87:" // Height 3: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 60b\n"
"b 176f\n"
"88:" // Height 4
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"89:" // Height 4: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -1400,8 +1400,8 @@ void a64_hybrid_s8qs_mmla_6x16 (
"mov x28, #0x0\n"
"91:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 92f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1429,135 +1429,135 @@ void a64_hybrid_s8qs_mmla_6x16 (
"cmp x27, #0x20\n"
"ldr q3, [x24, #0x0]\n"
"ldr q4, [x23, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
"blt 95f\n"
"94:" // Height 4: Multiply loop: Main loop head
"trn1 v27.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a768 // smmla v8.4s, v27.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"trn1 v26.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
- "ldr q25, [x9, #0x20]\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x4e87a768 // smmla v8.4s, v27.16b, v7.16b\n"
".inst 0x4e86a76c // smmla v12.4s, v27.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4e86a754 // smmla v20.4s, v26.16b, v6.16b\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e99a769 // smmla v9.4s, v27.16b, v25.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e99a751 // smmla v17.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
".inst 0x4e98a76d // smmla v13.4s, v27.16b, v24.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e98a755 // smmla v21.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
+ "ldr q24, [x10, #0x70]\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x80]\n"
+ "ldr q25, [x10, #0x80]\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
".inst 0x4e98a757 // smmla v23.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x90]\n"
+ "ldr q24, [x10, #0x90]\n"
"ldr q2, [x25, #0x0]\n"
".inst 0x4e99a428 // smmla v8.4s, v1.16b, v25.16b\n"
".inst 0x4e99a470 // smmla v16.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xa0]\n"
+ "ldr q25, [x10, #0xa0]\n"
".inst 0x4e98a42c // smmla v12.4s, v1.16b, v24.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e98a474 // smmla v20.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xb0]\n"
+ "ldr q24, [x10, #0xb0]\n"
".inst 0x4e99a429 // smmla v9.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e99a471 // smmla v17.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xc0]\n"
+ "ldr q25, [x10, #0xc0]\n"
".inst 0x4e98a42d // smmla v13.4s, v1.16b, v24.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e98a475 // smmla v21.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xd0]\n"
+ "ldr q24, [x10, #0xd0]\n"
".inst 0x4e99a42a // smmla v10.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e99a472 // smmla v18.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xe0]\n"
+ "ldr q25, [x10, #0xe0]\n"
".inst 0x4e98a42e // smmla v14.4s, v1.16b, v24.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e98a476 // smmla v22.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e99a42b // smmla v11.4s, v1.16b, v25.16b\n"
".inst 0x4e99a473 // smmla v19.4s, v3.16b, v25.16b\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x4e98a42f // smmla v15.4s, v1.16b, v24.16b\n"
"ldr q1, [x26, #0x0]\n"
".inst 0x4e98a477 // smmla v23.4s, v3.16b, v24.16b\n"
"ldr q3, [x24, #0x0]\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
"bge 94b\n"
"95:" // Height 4: Multiply loop: Single iteration only
"trn1 v27.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a768 // smmla v8.4s, v27.16b, v7.16b\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v26.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
- "ldr q25, [x9, #0x20]\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ ".inst 0x4e87a768 // smmla v8.4s, v27.16b, v7.16b\n"
".inst 0x4e86a76c // smmla v12.4s, v27.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4e87a750 // smmla v16.4s, v26.16b, v7.16b\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4e86a754 // smmla v20.4s, v26.16b, v6.16b\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
".inst 0x4e99a769 // smmla v9.4s, v27.16b, v25.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e99a751 // smmla v17.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
".inst 0x4e98a76d // smmla v13.4s, v27.16b, v24.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e98a755 // smmla v21.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
+ "ldr q24, [x10, #0x70]\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x80]\n"
+ "ldr q25, [x10, #0x80]\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e98a757 // smmla v23.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x90]\n"
+ "ldr q24, [x10, #0x90]\n"
".inst 0x4e99a428 // smmla v8.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e99a470 // smmla v16.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xa0]\n"
+ "ldr q25, [x10, #0xa0]\n"
".inst 0x4e98a42c // smmla v12.4s, v1.16b, v24.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e98a474 // smmla v20.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xb0]\n"
+ "ldr q24, [x10, #0xb0]\n"
".inst 0x4e99a429 // smmla v9.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e99a471 // smmla v17.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xc0]\n"
+ "ldr q25, [x10, #0xc0]\n"
".inst 0x4e98a42d // smmla v13.4s, v1.16b, v24.16b\n"
".inst 0x4e98a475 // smmla v21.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xd0]\n"
+ "ldr q24, [x10, #0xd0]\n"
".inst 0x4e99a42a // smmla v10.4s, v1.16b, v25.16b\n"
".inst 0x4e99a472 // smmla v18.4s, v3.16b, v25.16b\n"
- "ldr q25, [x9, #0xe0]\n"
+ "ldr q25, [x10, #0xe0]\n"
".inst 0x4e98a42e // smmla v14.4s, v1.16b, v24.16b\n"
".inst 0x4e98a476 // smmla v22.4s, v3.16b, v24.16b\n"
- "ldr q24, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e99a42b // smmla v11.4s, v1.16b, v25.16b\n"
".inst 0x4e99a473 // smmla v19.4s, v3.16b, v25.16b\n"
".inst 0x4e98a42f // smmla v15.4s, v1.16b, v24.16b\n"
@@ -1567,35 +1567,35 @@ void a64_hybrid_s8qs_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 98f\n"
"97:" // Height 4: Multiply loop: Odd block loop
- "ldr d25, [x26], #0x8\n"
- "ldr d24, [x25], #0x8\n"
- "trn1 v27.2d, v25.2d, v24.2d\n"
+ "ldr d29, [x26], #0x8\n"
+ "ldr d27, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr d25, [x24], #0x8\n"
- "ldr d24, [x23], #0x8\n"
- "trn1 v26.2d, v25.2d, v24.2d\n"
+ "ldr d28, [x24], #0x8\n"
+ "ldr d26, [x23], #0x8\n"
"cmp x27, #0x8\n"
- "ldr q25, [x9, #0x0]\n"
- "ldr q24, [x9, #0x10]\n"
+ "ldr q25, [x10, #0x0]\n"
+ "ldr q24, [x10, #0x10]\n"
+ "trn1 v27.2d, v29.2d, v27.2d\n"
+ "trn1 v26.2d, v28.2d, v26.2d\n"
".inst 0x4e99a768 // smmla v8.4s, v27.16b, v25.16b\n"
".inst 0x4e99a750 // smmla v16.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x20]\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4e98a76c // smmla v12.4s, v27.16b, v24.16b\n"
".inst 0x4e98a754 // smmla v20.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
".inst 0x4e99a769 // smmla v9.4s, v27.16b, v25.16b\n"
".inst 0x4e99a751 // smmla v17.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
".inst 0x4e98a76d // smmla v13.4s, v27.16b, v24.16b\n"
".inst 0x4e98a755 // smmla v21.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q24, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
@@ -1644,29 +1644,29 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr b3, [x24, #0x0]\n"
"ldr b4, [x23, #0x0]\n"
"102:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q25, [x9, #0x0]\n"
- "ldr q24, [x9, #0x10]\n"
+ "ldr q25, [x10, #0x0]\n"
+ "ldr q24, [x10, #0x10]\n"
"trn1 v27.2d, v1.2d, v2.2d\n"
"trn1 v26.2d, v3.2d, v4.2d\n"
".inst 0x4e99a768 // smmla v8.4s, v27.16b, v25.16b\n"
".inst 0x4e99a750 // smmla v16.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x20]\n"
+ "ldr q25, [x10, #0x20]\n"
".inst 0x4e98a76c // smmla v12.4s, v27.16b, v24.16b\n"
".inst 0x4e98a754 // smmla v20.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x30]\n"
+ "ldr q24, [x10, #0x30]\n"
".inst 0x4e99a769 // smmla v9.4s, v27.16b, v25.16b\n"
".inst 0x4e99a751 // smmla v17.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x40]\n"
+ "ldr q25, [x10, #0x40]\n"
".inst 0x4e98a76d // smmla v13.4s, v27.16b, v24.16b\n"
".inst 0x4e98a755 // smmla v21.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x50]\n"
+ "ldr q24, [x10, #0x50]\n"
".inst 0x4e99a76a // smmla v10.4s, v27.16b, v25.16b\n"
".inst 0x4e99a752 // smmla v18.4s, v26.16b, v25.16b\n"
- "ldr q25, [x9, #0x60]\n"
+ "ldr q25, [x10, #0x60]\n"
".inst 0x4e98a76e // smmla v14.4s, v27.16b, v24.16b\n"
".inst 0x4e98a756 // smmla v22.4s, v26.16b, v24.16b\n"
- "ldr q24, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q24, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e99a76b // smmla v11.4s, v27.16b, v25.16b\n"
".inst 0x4e99a753 // smmla v19.4s, v26.16b, v25.16b\n"
".inst 0x4e98a76f // smmla v15.4s, v27.16b, v24.16b\n"
@@ -1687,29 +1687,29 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
- "add x25, x11, x20\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "add x14, x14, #0x40\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x11, #0x0]\n"
- "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add x26, x9, x20\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
- "add x14, x14, #0x40\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "add x24, x25, x20\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"mov v23.16b, v26.16b\n"
- "add v23.4s, v23.4s, v28.4s\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"add v12.4s, v12.4s, v27.4s\n"
"add v13.4s, v13.4s, v25.4s\n"
"add v14.4s, v14.4s, v24.4s\n"
+ "add v23.4s, v23.4s, v28.4s\n"
"add v8.4s, v8.4s, v28.4s\n"
"add v9.4s, v9.4s, v27.4s\n"
"add v10.4s, v10.4s, v25.4s\n"
@@ -1735,9 +1735,9 @@ void a64_hybrid_s8qs_mmla_6x16 (
"add x13, x13, #0x40\n"
"b 105f\n"
"104:" // Height 4: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -1772,11 +1772,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v25.4s, v25.4s, #0x1f\n"
"sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v27.4s\n"
+ "and v27.16b, v8.16b, v0.16b\n"
"sqadd v12.4s, v12.4s, v26.4s\n"
+ "and v26.16b, v9.16b, v1.16b\n"
"sqadd v13.4s, v13.4s, v25.4s\n"
"sqadd v14.4s, v14.4s, v24.4s\n"
- "and v27.16b, v8.16b, v0.16b\n"
- "and v26.16b, v9.16b, v1.16b\n"
"and v25.16b, v10.16b, v2.16b\n"
"and v24.16b, v11.16b, v3.16b\n"
"sshr v27.4s, v27.4s, #0x1f\n"
@@ -1784,11 +1784,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v25.4s, v25.4s, #0x1f\n"
"sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v27.4s\n"
+ "and v27.16b, v15.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v26.4s\n"
+ "and v26.16b, v20.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v25.4s\n"
"sqadd v11.4s, v11.4s, v24.4s\n"
- "and v27.16b, v15.16b, v0.16b\n"
- "and v26.16b, v20.16b, v1.16b\n"
"and v25.16b, v21.16b, v2.16b\n"
"and v24.16b, v22.16b, v3.16b\n"
"sshr v27.4s, v27.4s, #0x1f\n"
@@ -1796,11 +1796,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v25.4s, v25.4s, #0x1f\n"
"sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v15.4s, v15.4s, v27.4s\n"
+ "and v27.16b, v16.16b, v0.16b\n"
"sqadd v20.4s, v20.4s, v26.4s\n"
+ "and v26.16b, v17.16b, v1.16b\n"
"sqadd v21.4s, v21.4s, v25.4s\n"
"sqadd v22.4s, v22.4s, v24.4s\n"
- "and v27.16b, v16.16b, v0.16b\n"
- "and v26.16b, v17.16b, v1.16b\n"
"and v25.16b, v18.16b, v2.16b\n"
"and v24.16b, v19.16b, v3.16b\n"
"sshr v27.4s, v27.4s, #0x1f\n"
@@ -1812,21 +1812,21 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sqadd v18.4s, v18.4s, v25.4s\n"
"sqadd v19.4s, v19.4s, v24.4s\n"
"106:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v12.4s, v12.4s, v1.4s\n"
- "srshl v13.4s, v13.4s, v2.4s\n"
- "srshl v14.4s, v14.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
+ "srshl v13.4s, v13.4s, v2.4s\n"
+ "srshl v14.4s, v14.4s, v3.4s\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v15.4s, v15.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v1.4s\n"
"srshl v21.4s, v21.4s, v2.4s\n"
@@ -1896,113 +1896,113 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v15.16b, v15.16b, v20.16b\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 115f\n"
- "tbz x10, #3, 110f\n"
- "str d23, [x11], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "tbz x10, #2, 108f\n"
- "st1 { v23.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x25], #0x4\n"
- "st1 { v15.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "tbz x10, #1, 107f\n"
- "st1 { v23.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x25], #0x2\n"
- "st1 { v15.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "tbz x10, #0, 114f\n"
- "st1 { v23.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x25]\n"
- "st1 { v15.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
+ "tbz x11, #3, 110f\n"
+ "str d23, [x9], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "tbz x11, #2, 108f\n"
+ "st1 { v23.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x26], #0x4\n"
+ "st1 { v15.s }[2], [x25], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "tbz x11, #1, 107f\n"
+ "st1 { v23.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x26], #0x2\n"
+ "st1 { v15.h }[6], [x25], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "tbz x11, #0, 114f\n"
+ "st1 { v23.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x26]\n"
+ "st1 { v15.b }[14], [x25]\n"
+ "st1 { v16.b }[14], [x24]\n"
"b 114f\n"
"107:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x10, #0, 114f\n"
- "st1 { v23.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x25]\n"
- "st1 { v15.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
+ "tbz x11, #0, 114f\n"
+ "st1 { v23.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x26]\n"
+ "st1 { v15.b }[12], [x25]\n"
+ "st1 { v16.b }[12], [x24]\n"
"b 114f\n"
"108:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x10, #1, 109f\n"
- "st1 { v23.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x25], #0x2\n"
- "st1 { v15.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "tbz x10, #0, 114f\n"
- "st1 { v23.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x25]\n"
- "st1 { v15.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
+ "tbz x11, #1, 109f\n"
+ "st1 { v23.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x26], #0x2\n"
+ "st1 { v15.h }[4], [x25], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "tbz x11, #0, 114f\n"
+ "st1 { v23.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x26]\n"
+ "st1 { v15.b }[10], [x25]\n"
+ "st1 { v16.b }[10], [x24]\n"
"b 114f\n"
"109:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x10, #0, 114f\n"
- "st1 { v23.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x25]\n"
- "st1 { v15.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
+ "tbz x11, #0, 114f\n"
+ "st1 { v23.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x26]\n"
+ "st1 { v15.b }[8], [x25]\n"
+ "st1 { v16.b }[8], [x24]\n"
"b 114f\n"
"110:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x10, #2, 112f\n"
- "str s23, [x11], #0x4\n"
- "str s8, [x25], #0x4\n"
- "str s15, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "tbz x10, #1, 111f\n"
- "st1 { v23.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x25], #0x2\n"
- "st1 { v15.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "tbz x10, #0, 114f\n"
- "st1 { v23.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x25]\n"
- "st1 { v15.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
+ "tbz x11, #2, 112f\n"
+ "str s23, [x9], #0x4\n"
+ "str s8, [x26], #0x4\n"
+ "str s15, [x25], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "tbz x11, #1, 111f\n"
+ "st1 { v23.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x26], #0x2\n"
+ "st1 { v15.h }[2], [x25], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "tbz x11, #0, 114f\n"
+ "st1 { v23.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x26]\n"
+ "st1 { v15.b }[6], [x25]\n"
+ "st1 { v16.b }[6], [x24]\n"
"b 114f\n"
"111:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x10, #0, 114f\n"
- "st1 { v23.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x25]\n"
- "st1 { v15.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
+ "tbz x11, #0, 114f\n"
+ "st1 { v23.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x26]\n"
+ "st1 { v15.b }[4], [x25]\n"
+ "st1 { v16.b }[4], [x24]\n"
"b 114f\n"
"112:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x10, #1, 113f\n"
- "str h23, [x11], #0x2\n"
- "str h8, [x25], #0x2\n"
- "str h15, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "tbz x10, #0, 114f\n"
- "st1 { v23.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x25]\n"
- "st1 { v15.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
+ "tbz x11, #1, 113f\n"
+ "str h23, [x9], #0x2\n"
+ "str h8, [x26], #0x2\n"
+ "str h15, [x25], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "tbz x11, #0, 114f\n"
+ "st1 { v23.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x26]\n"
+ "st1 { v15.b }[2], [x25]\n"
+ "st1 { v16.b }[2], [x24]\n"
"b 114f\n"
"113:" // Height 4: Partial direct writeback: partial_1_0
- "str b23, [x11, #0x0]\n"
- "str b8, [x25, #0x0]\n"
- "str b15, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
+ "str b23, [x9, #0x0]\n"
+ "str b8, [x26, #0x0]\n"
+ "str b15, [x25, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
"114:" // Height 4: Partial direct writeback: Done
"b 116f\n"
"115:" // Height 4: Full writeback
- "str q23, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x25, #0x0]\n"
- "str q15, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
+ "str q23, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x26, #0x0]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
"116:" // Height 4: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 89b\n"
"b 176f\n"
"117:" // Height 5
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"118:" // Height 5: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2032,8 +2032,8 @@ void a64_hybrid_s8qs_mmla_6x16 (
"mov x28, #0x0\n"
"120:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 121f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2065,91 +2065,91 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q3, [x24, #0x0]\n"
"ldr q4, [x23, #0x0]\n"
"ldr q5, [x22, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 124f\n"
"123:" // Height 5: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "sub x27, x27, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x24, x24, #0x10\n"
+ "ldr q7, [x10, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x10, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x10, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x10, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x10, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
"ldr q2, [x25, #0x0]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q0, [x9, #0x90]\n"
+ "ldr q0, [x10, #0x90]\n"
"ldr q4, [x23, #0x0]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x10, #0xa0]\n"
".inst 0x4e80a42c // smmla v12.4s, v1.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bc // smmla v28.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xb0]\n"
+ "ldr q0, [x10, #0xb0]\n"
".inst 0x4e86a429 // smmla v9.4s, v1.16b, v6.16b\n"
".inst 0x4e86a471 // smmla v17.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4b9 // smmla v25.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x10, #0xc0]\n"
".inst 0x4e80a42d // smmla v13.4s, v1.16b, v0.16b\n"
".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bd // smmla v29.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xd0]\n"
+ "ldr q0, [x10, #0xd0]\n"
".inst 0x4e86a42a // smmla v10.4s, v1.16b, v6.16b\n"
".inst 0x4e86a472 // smmla v18.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4ba // smmla v26.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x10, #0xe0]\n"
".inst 0x4e80a42e // smmla v14.4s, v1.16b, v0.16b\n"
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4be // smmla v30.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q0, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e86a42b // smmla v11.4s, v1.16b, v6.16b\n"
".inst 0x4e86a473 // smmla v19.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bb // smmla v27.4s, v5.16b, v6.16b\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x4e80a42f // smmla v15.4s, v1.16b, v0.16b\n"
"ldr q1, [x26, #0x0]\n"
".inst 0x4e80a477 // smmla v23.4s, v3.16b, v0.16b\n"
@@ -2160,79 +2160,79 @@ void a64_hybrid_s8qs_mmla_6x16 (
"124:" // Height 5: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x10, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x10, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x10, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x10, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
+ "ldr q7, [x10, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q0, [x9, #0x90]\n"
+ "ldr q0, [x10, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q2, [x9, #0xa0]\n"
+ "ldr q2, [x10, #0xa0]\n"
".inst 0x4e80a42c // smmla v12.4s, v1.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bc // smmla v28.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xb0]\n"
+ "ldr q0, [x10, #0xb0]\n"
".inst 0x4e82a429 // smmla v9.4s, v1.16b, v2.16b\n"
".inst 0x4e82a471 // smmla v17.4s, v3.16b, v2.16b\n"
".inst 0x4e82a4b9 // smmla v25.4s, v5.16b, v2.16b\n"
- "ldr q2, [x9, #0xc0]\n"
+ "ldr q2, [x10, #0xc0]\n"
".inst 0x4e80a42d // smmla v13.4s, v1.16b, v0.16b\n"
".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bd // smmla v29.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xd0]\n"
+ "ldr q0, [x10, #0xd0]\n"
".inst 0x4e82a42a // smmla v10.4s, v1.16b, v2.16b\n"
".inst 0x4e82a472 // smmla v18.4s, v3.16b, v2.16b\n"
".inst 0x4e82a4ba // smmla v26.4s, v5.16b, v2.16b\n"
- "ldr q2, [x9, #0xe0]\n"
+ "ldr q2, [x10, #0xe0]\n"
".inst 0x4e80a42e // smmla v14.4s, v1.16b, v0.16b\n"
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4be // smmla v30.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q0, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e82a42b // smmla v11.4s, v1.16b, v2.16b\n"
".inst 0x4e82a473 // smmla v19.4s, v3.16b, v2.16b\n"
".inst 0x4e82a4bb // smmla v27.4s, v5.16b, v2.16b\n"
@@ -2244,44 +2244,44 @@ void a64_hybrid_s8qs_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 127f\n"
"126:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "cmp x27, #0x8\n"
"ldr d0, [x22], #0x8\n"
- "ldr q1, [x9, #0x0]\n"
- "trn1 v2.2d, v0.2d, v2.2d\n"
+ "ldr q1, [x10, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v3.2d, v2.2d\n"
+ "trn1 v2.2d, v0.2d, v5.2d\n"
+ "ldr q0, [x10, #0x10]\n"
".inst 0x4e81a488 // smmla v8.4s, v4.16b, v1.16b\n"
- "ldr q0, [x9, #0x10]\n"
".inst 0x4e81a470 // smmla v16.4s, v3.16b, v1.16b\n"
".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x20]\n"
+ "ldr q1, [x10, #0x20]\n"
".inst 0x4e80a48c // smmla v12.4s, v4.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e80a45c // smmla v28.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x30]\n"
+ "ldr q0, [x10, #0x30]\n"
".inst 0x4e81a489 // smmla v9.4s, v4.16b, v1.16b\n"
".inst 0x4e81a471 // smmla v17.4s, v3.16b, v1.16b\n"
".inst 0x4e81a459 // smmla v25.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x40]\n"
+ "ldr q1, [x10, #0x40]\n"
".inst 0x4e80a48d // smmla v13.4s, v4.16b, v0.16b\n"
".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45d // smmla v29.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x50]\n"
+ "ldr q0, [x10, #0x50]\n"
".inst 0x4e81a48a // smmla v10.4s, v4.16b, v1.16b\n"
".inst 0x4e81a472 // smmla v18.4s, v3.16b, v1.16b\n"
".inst 0x4e81a45a // smmla v26.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x60]\n"
+ "ldr q1, [x10, #0x60]\n"
".inst 0x4e80a48e // smmla v14.4s, v4.16b, v0.16b\n"
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45e // smmla v30.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x70]\n"
+ "ldr q0, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e81a48b // smmla v11.4s, v4.16b, v1.16b\n"
- "add x9, x9, #0x80\n"
".inst 0x4e81a473 // smmla v19.4s, v3.16b, v1.16b\n"
".inst 0x4e81a45b // smmla v27.4s, v2.16b, v1.16b\n"
".inst 0x4e80a48f // smmla v15.4s, v4.16b, v0.16b\n"
@@ -2338,36 +2338,36 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr b4, [x23, #0x0]\n"
"ldr b5, [x22, #0x0]\n"
"131:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn1 v3.2d, v3.2d, v4.2d\n"
"trn1 v2.2d, v5.2d, v0.2d\n"
- "ldr q0, [x9, #0x10]\n"
+ "ldr q0, [x10, #0x10]\n"
".inst 0x4e87a4c8 // smmla v8.4s, v6.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
- "ldr q1, [x9, #0x20]\n"
+ "ldr q1, [x10, #0x20]\n"
".inst 0x4e80a4cc // smmla v12.4s, v6.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45c // smmla v28.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x30]\n"
+ "ldr q0, [x10, #0x30]\n"
".inst 0x4e81a4c9 // smmla v9.4s, v6.16b, v1.16b\n"
".inst 0x4e81a471 // smmla v17.4s, v3.16b, v1.16b\n"
".inst 0x4e81a459 // smmla v25.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x40]\n"
+ "ldr q1, [x10, #0x40]\n"
".inst 0x4e80a4cd // smmla v13.4s, v6.16b, v0.16b\n"
".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45d // smmla v29.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x50]\n"
+ "ldr q0, [x10, #0x50]\n"
".inst 0x4e81a4ca // smmla v10.4s, v6.16b, v1.16b\n"
".inst 0x4e81a472 // smmla v18.4s, v3.16b, v1.16b\n"
".inst 0x4e81a45a // smmla v26.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x60]\n"
+ "ldr q1, [x10, #0x60]\n"
".inst 0x4e80a4ce // smmla v14.4s, v6.16b, v0.16b\n"
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45e // smmla v30.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q0, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e81a4cb // smmla v11.4s, v6.16b, v1.16b\n"
".inst 0x4e81a473 // smmla v19.4s, v3.16b, v1.16b\n"
".inst 0x4e81a45b // smmla v27.4s, v2.16b, v1.16b\n"
@@ -2388,37 +2388,37 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x11, x20\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
+ "add x14, x14, #0x40\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "add x22, x23, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
+ "add x26, x9, x20\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "add x25, x26, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
+ "add x24, x25, x20\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
- "add x14, x14, #0x40\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v24.2d, v24.2d, v28.2d\n"
"uzp1 v25.2d, v25.2d, v29.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v26.2d, v26.2d, v30.2d\n"
"uzp1 v27.2d, v27.2d, v31.2d\n"
"mov v31.16b, v2.16b\n"
- "add v31.4s, v31.4s, v4.4s\n"
"add v12.4s, v12.4s, v3.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
"add v14.4s, v14.4s, v0.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v3.4s\n"
"add v10.4s, v10.4s, v1.4s\n"
@@ -2448,9 +2448,9 @@ void a64_hybrid_s8qs_mmla_6x16 (
"add x13, x13, #0x40\n"
"b 134f\n"
"133:" // Height 5: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -2489,11 +2489,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v23.4s, v23.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v30.4s\n"
+ "and v30.16b, v8.16b, v0.16b\n"
"sqadd v12.4s, v12.4s, v29.4s\n"
+ "and v29.16b, v9.16b, v1.16b\n"
"sqadd v13.4s, v13.4s, v28.4s\n"
"sqadd v14.4s, v14.4s, v23.4s\n"
- "and v30.16b, v8.16b, v0.16b\n"
- "and v29.16b, v9.16b, v1.16b\n"
"and v28.16b, v10.16b, v2.16b\n"
"and v23.16b, v11.16b, v3.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
@@ -2501,11 +2501,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v23.4s, v23.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v30.4s\n"
+ "and v30.16b, v15.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v29.4s\n"
+ "and v29.16b, v20.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v28.4s\n"
"sqadd v11.4s, v11.4s, v23.4s\n"
- "and v30.16b, v15.16b, v0.16b\n"
- "and v29.16b, v20.16b, v1.16b\n"
"and v28.16b, v21.16b, v2.16b\n"
"and v23.16b, v22.16b, v3.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
@@ -2513,11 +2513,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v23.4s, v23.4s, #0x1f\n"
"sqadd v15.4s, v15.4s, v30.4s\n"
+ "and v30.16b, v16.16b, v0.16b\n"
"sqadd v20.4s, v20.4s, v29.4s\n"
+ "and v29.16b, v17.16b, v1.16b\n"
"sqadd v21.4s, v21.4s, v28.4s\n"
"sqadd v22.4s, v22.4s, v23.4s\n"
- "and v30.16b, v16.16b, v0.16b\n"
- "and v29.16b, v17.16b, v1.16b\n"
"and v28.16b, v18.16b, v2.16b\n"
"and v23.16b, v19.16b, v3.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
@@ -2525,11 +2525,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v23.4s, v23.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v30.4s\n"
+ "and v30.16b, v24.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v29.4s\n"
+ "and v29.16b, v25.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v28.4s\n"
"sqadd v19.4s, v19.4s, v23.4s\n"
- "and v30.16b, v24.16b, v0.16b\n"
- "and v29.16b, v25.16b, v1.16b\n"
"and v28.16b, v26.16b, v2.16b\n"
"and v23.16b, v27.16b, v3.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
@@ -2541,21 +2541,21 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sqadd v26.4s, v26.4s, v28.4s\n"
"sqadd v27.4s, v27.4s, v23.4s\n"
"135:" // Height 5: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v29.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"srshl v12.4s, v12.4s, v1.4s\n"
- "srshl v13.4s, v13.4s, v2.4s\n"
- "srshl v14.4s, v14.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v29.4s }, [x21]\n"
"ld1r { v28.4s }, [x20]\n"
+ "srshl v13.4s, v13.4s, v2.4s\n"
+ "srshl v14.4s, v14.4s, v3.4s\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v23.4s }, [x20]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v15.4s, v15.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v1.4s\n"
"srshl v21.4s, v21.4s, v2.4s\n"
@@ -2644,132 +2644,133 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v16.16b, v16.16b, v18.16b\n"
"uzp1 v24.16b, v24.16b, v17.16b\n"
"bge 144f\n"
- "tbz x10, #3, 139f\n"
- "str d31, [x11], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x10, #2, 137f\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x25], #0x4\n"
- "st1 { v15.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "tbz x10, #1, 136f\n"
- "st1 { v31.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x25], #0x2\n"
- "st1 { v15.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "tbz x10, #0, 143f\n"
- "st1 { v31.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x25]\n"
- "st1 { v15.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "tbz x11, #3, 139f\n"
+ "str d31, [x9], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "tbz x11, #2, 137f\n"
+ "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x26], #0x4\n"
+ "st1 { v15.s }[2], [x25], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "tbz x11, #1, 136f\n"
+ "st1 { v31.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x26], #0x2\n"
+ "st1 { v15.h }[6], [x25], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "tbz x11, #0, 143f\n"
+ "st1 { v31.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x26]\n"
+ "st1 { v15.b }[14], [x25]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 143f\n"
"136:" // Height 5: Partial direct writeback: partial_1_12
- "tbz x10, #0, 143f\n"
- "st1 { v31.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x25]\n"
- "st1 { v15.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "tbz x11, #0, 143f\n"
+ "st1 { v31.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x26]\n"
+ "st1 { v15.b }[12], [x25]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 143f\n"
"137:" // Height 5: Partial direct writeback: partial_2_8
- "tbz x10, #1, 138f\n"
- "st1 { v31.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x25], #0x2\n"
- "st1 { v15.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "tbz x10, #0, 143f\n"
- "st1 { v31.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x25]\n"
- "st1 { v15.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "tbz x11, #1, 138f\n"
+ "st1 { v31.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x26], #0x2\n"
+ "st1 { v15.h }[4], [x25], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "tbz x11, #0, 143f\n"
+ "st1 { v31.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x26]\n"
+ "st1 { v15.b }[10], [x25]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 143f\n"
"138:" // Height 5: Partial direct writeback: partial_1_8
- "tbz x10, #0, 143f\n"
- "st1 { v31.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x25]\n"
- "st1 { v15.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "tbz x11, #0, 143f\n"
+ "st1 { v31.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x26]\n"
+ "st1 { v15.b }[8], [x25]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 143f\n"
"139:" // Height 5: Partial direct writeback: partial_4_0
- "tbz x10, #2, 141f\n"
- "str s31, [x11], #0x4\n"
- "str s8, [x25], #0x4\n"
- "str s15, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "tbz x10, #1, 140f\n"
- "st1 { v31.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x25], #0x2\n"
- "st1 { v15.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "tbz x10, #0, 143f\n"
- "st1 { v31.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x25]\n"
- "st1 { v15.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "tbz x11, #2, 141f\n"
+ "str s31, [x9], #0x4\n"
+ "str s8, [x26], #0x4\n"
+ "str s15, [x25], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "tbz x11, #1, 140f\n"
+ "st1 { v31.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x26], #0x2\n"
+ "st1 { v15.h }[2], [x25], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "tbz x11, #0, 143f\n"
+ "st1 { v31.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x26]\n"
+ "st1 { v15.b }[6], [x25]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 143f\n"
"140:" // Height 5: Partial direct writeback: partial_1_4
- "tbz x10, #0, 143f\n"
- "st1 { v31.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x25]\n"
- "st1 { v15.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "tbz x11, #0, 143f\n"
+ "st1 { v31.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x26]\n"
+ "st1 { v15.b }[4], [x25]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 143f\n"
"141:" // Height 5: Partial direct writeback: partial_2_0
- "tbz x10, #1, 142f\n"
- "str h31, [x11], #0x2\n"
- "str h8, [x25], #0x2\n"
- "str h15, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "tbz x10, #0, 143f\n"
- "st1 { v31.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x25]\n"
- "st1 { v15.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "tbz x11, #1, 142f\n"
+ "str h31, [x9], #0x2\n"
+ "str h8, [x26], #0x2\n"
+ "str h15, [x25], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "tbz x11, #0, 143f\n"
+ "st1 { v31.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x26]\n"
+ "st1 { v15.b }[2], [x25]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 143f\n"
"142:" // Height 5: Partial direct writeback: partial_1_0
- "str b31, [x11, #0x0]\n"
- "str b8, [x25, #0x0]\n"
- "str b15, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b31, [x9, #0x0]\n"
+ "str b8, [x26, #0x0]\n"
+ "str b15, [x25, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"143:" // Height 5: Partial direct writeback: Done
"b 145f\n"
"144:" // Height 5: Full writeback
- "str q31, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x25, #0x0]\n"
- "str q15, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q31, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x26, #0x0]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"145:" // Height 5: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 118b\n"
"b 176f\n"
"146:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x6\n"
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"147:" // Height 6: Column loop
"movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
@@ -2799,8 +2800,8 @@ void a64_hybrid_s8qs_mmla_6x16 (
"mov x28, #0x0\n"
"149:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 150f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2836,93 +2837,93 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr q4, [x23, #0x0]\n"
"ldr q5, [x22, #0x0]\n"
"ldr q6, [x21, #0x0]\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"blt 153f\n"
"152:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x23, x23, #0x10\n"
+ "ldr q7, [x10, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x10, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q7, [x10, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x10, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "ldr q7, [x10, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
"ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q0, [x9, #0x90]\n"
+ "ldr q0, [x10, #0x90]\n"
"ldr q4, [x23, #0x0]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q6, [x9, #0xa0]\n"
+ "ldr q6, [x10, #0xa0]\n"
".inst 0x4e80a42c // smmla v12.4s, v1.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bc // smmla v28.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xb0]\n"
+ "ldr q0, [x10, #0xb0]\n"
".inst 0x4e86a429 // smmla v9.4s, v1.16b, v6.16b\n"
".inst 0x4e86a471 // smmla v17.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4b9 // smmla v25.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xc0]\n"
+ "ldr q6, [x10, #0xc0]\n"
".inst 0x4e80a42d // smmla v13.4s, v1.16b, v0.16b\n"
".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bd // smmla v29.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xd0]\n"
+ "ldr q0, [x10, #0xd0]\n"
".inst 0x4e86a42a // smmla v10.4s, v1.16b, v6.16b\n"
".inst 0x4e86a472 // smmla v18.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4ba // smmla v26.4s, v5.16b, v6.16b\n"
- "ldr q6, [x9, #0xe0]\n"
+ "ldr q6, [x10, #0xe0]\n"
".inst 0x4e80a42e // smmla v14.4s, v1.16b, v0.16b\n"
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4be // smmla v30.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q0, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e86a42b // smmla v11.4s, v1.16b, v6.16b\n"
".inst 0x4e86a473 // smmla v19.4s, v3.16b, v6.16b\n"
".inst 0x4e86a4bb // smmla v27.4s, v5.16b, v6.16b\n"
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x4e80a42f // smmla v15.4s, v1.16b, v0.16b\n"
"ldr q1, [x26, #0x0]\n"
".inst 0x4e80a477 // smmla v23.4s, v3.16b, v0.16b\n"
@@ -2934,81 +2935,81 @@ void a64_hybrid_s8qs_mmla_6x16 (
"153:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
- "ldr q6, [x9, #0x10]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x20]\n"
+ "ldr q7, [x10, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x30]\n"
+ "ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x40]\n"
- "add x22, x22, #0x10\n"
+ "ldr q7, [x10, #0x40]\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x50]\n"
+ "ldr q6, [x10, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q7, [x10, #0x60]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
- "ldr q6, [x9, #0x70]\n"
+ "ldr q6, [x10, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
- "ldr q7, [x9, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
+ "ldr q7, [x10, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
- "ldr q0, [x9, #0x90]\n"
+ "ldr q0, [x10, #0x90]\n"
".inst 0x4e87a428 // smmla v8.4s, v1.16b, v7.16b\n"
".inst 0x4e87a470 // smmla v16.4s, v3.16b, v7.16b\n"
".inst 0x4e87a4b8 // smmla v24.4s, v5.16b, v7.16b\n"
- "ldr q2, [x9, #0xa0]\n"
+ "ldr q2, [x10, #0xa0]\n"
".inst 0x4e80a42c // smmla v12.4s, v1.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bc // smmla v28.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xb0]\n"
+ "ldr q0, [x10, #0xb0]\n"
".inst 0x4e82a429 // smmla v9.4s, v1.16b, v2.16b\n"
".inst 0x4e82a471 // smmla v17.4s, v3.16b, v2.16b\n"
".inst 0x4e82a4b9 // smmla v25.4s, v5.16b, v2.16b\n"
- "ldr q2, [x9, #0xc0]\n"
+ "ldr q2, [x10, #0xc0]\n"
".inst 0x4e80a42d // smmla v13.4s, v1.16b, v0.16b\n"
".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4bd // smmla v29.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xd0]\n"
+ "ldr q0, [x10, #0xd0]\n"
".inst 0x4e82a42a // smmla v10.4s, v1.16b, v2.16b\n"
".inst 0x4e82a472 // smmla v18.4s, v3.16b, v2.16b\n"
".inst 0x4e82a4ba // smmla v26.4s, v5.16b, v2.16b\n"
- "ldr q2, [x9, #0xe0]\n"
+ "ldr q2, [x10, #0xe0]\n"
".inst 0x4e80a42e // smmla v14.4s, v1.16b, v0.16b\n"
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a4be // smmla v30.4s, v5.16b, v0.16b\n"
- "ldr q0, [x9, #0xf0]\n"
- "add x9, x9, #0x100\n"
+ "ldr q0, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e82a42b // smmla v11.4s, v1.16b, v2.16b\n"
".inst 0x4e82a473 // smmla v19.4s, v3.16b, v2.16b\n"
".inst 0x4e82a4bb // smmla v27.4s, v5.16b, v2.16b\n"
@@ -3020,44 +3021,44 @@ void a64_hybrid_s8qs_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 156f\n"
"155:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d5, [x24], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
"cmp x27, #0x8\n"
- "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"ldr d0, [x21], #0x8\n"
- "trn1 v2.2d, v1.2d, v0.2d\n"
- "ldr q1, [x9, #0x0]\n"
- "ldr q0, [x9, #0x10]\n"
+ "ldr q1, [x10, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v5.2d, v3.2d\n"
+ "trn1 v2.2d, v2.2d, v0.2d\n"
+ "ldr q0, [x10, #0x10]\n"
".inst 0x4e81a488 // smmla v8.4s, v4.16b, v1.16b\n"
".inst 0x4e81a470 // smmla v16.4s, v3.16b, v1.16b\n"
".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x20]\n"
+ "ldr q1, [x10, #0x20]\n"
".inst 0x4e80a48c // smmla v12.4s, v4.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45c // smmla v28.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x30]\n"
+ "ldr q0, [x10, #0x30]\n"
".inst 0x4e81a489 // smmla v9.4s, v4.16b, v1.16b\n"
".inst 0x4e81a471 // smmla v17.4s, v3.16b, v1.16b\n"
".inst 0x4e81a459 // smmla v25.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x40]\n"
+ "ldr q1, [x10, #0x40]\n"
".inst 0x4e80a48d // smmla v13.4s, v4.16b, v0.16b\n"
".inst 0x4e80a475 // smmla v21.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45d // smmla v29.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x50]\n"
+ "ldr q0, [x10, #0x50]\n"
".inst 0x4e81a48a // smmla v10.4s, v4.16b, v1.16b\n"
".inst 0x4e81a472 // smmla v18.4s, v3.16b, v1.16b\n"
".inst 0x4e81a45a // smmla v26.4s, v2.16b, v1.16b\n"
- "ldr q1, [x9, #0x60]\n"
+ "ldr q1, [x10, #0x60]\n"
".inst 0x4e80a48e // smmla v14.4s, v4.16b, v0.16b\n"
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45e // smmla v30.4s, v2.16b, v0.16b\n"
- "ldr q0, [x9, #0x70]\n"
- "add x9, x9, #0x80\n"
+ "ldr q0, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e81a48b // smmla v11.4s, v4.16b, v1.16b\n"
".inst 0x4e81a473 // smmla v19.4s, v3.16b, v1.16b\n"
".inst 0x4e81a45b // smmla v27.4s, v2.16b, v1.16b\n"
@@ -3122,37 +3123,37 @@ void a64_hybrid_s8qs_mmla_6x16 (
"ldr b5, [x22, #0x0]\n"
"ldr b6, [x21, #0x0]\n"
"160:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x9, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
"trn1 v2.2d, v1.2d, v2.2d\n"
"trn1 v4.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a448 // smmla v8.4s, v2.16b, v7.16b\n"
"trn1 v3.2d, v5.2d, v6.2d\n"
- "ldr q0, [x9, #0x10]\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e87a448 // smmla v8.4s, v2.16b, v7.16b\n"
".inst 0x4e87a490 // smmla v16.4s, v4.16b, v7.16b\n"
".inst 0x4e87a478 // smmla v24.4s, v3.16b, v7.16b\n"
- "ldr q1, [x9, #0x20]\n"
+ "ldr q1, [x10, #0x20]\n"
".inst 0x4e80a44c // smmla v12.4s, v2.16b, v0.16b\n"
".inst 0x4e80a494 // smmla v20.4s, v4.16b, v0.16b\n"
".inst 0x4e80a47c // smmla v28.4s, v3.16b, v0.16b\n"
- "ldr q0, [x9, #0x30]\n"
+ "ldr q0, [x10, #0x30]\n"
".inst 0x4e81a449 // smmla v9.4s, v2.16b, v1.16b\n"
".inst 0x4e81a491 // smmla v17.4s, v4.16b, v1.16b\n"
".inst 0x4e81a479 // smmla v25.4s, v3.16b, v1.16b\n"
- "ldr q1, [x9, #0x40]\n"
+ "ldr q1, [x10, #0x40]\n"
".inst 0x4e80a44d // smmla v13.4s, v2.16b, v0.16b\n"
".inst 0x4e80a495 // smmla v21.4s, v4.16b, v0.16b\n"
".inst 0x4e80a47d // smmla v29.4s, v3.16b, v0.16b\n"
- "ldr q0, [x9, #0x50]\n"
+ "ldr q0, [x10, #0x50]\n"
".inst 0x4e81a44a // smmla v10.4s, v2.16b, v1.16b\n"
".inst 0x4e81a492 // smmla v18.4s, v4.16b, v1.16b\n"
".inst 0x4e81a47a // smmla v26.4s, v3.16b, v1.16b\n"
- "ldr q1, [x9, #0x60]\n"
+ "ldr q1, [x10, #0x60]\n"
".inst 0x4e80a44e // smmla v14.4s, v2.16b, v0.16b\n"
".inst 0x4e80a496 // smmla v22.4s, v4.16b, v0.16b\n"
".inst 0x4e80a47e // smmla v30.4s, v3.16b, v0.16b\n"
- "ldr q0, [x9, #0x70]\n"
+ "ldr q0, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e81a44b // smmla v11.4s, v2.16b, v1.16b\n"
- "add x9, x9, #0x80\n"
".inst 0x4e81a493 // smmla v19.4s, v4.16b, v1.16b\n"
".inst 0x4e81a47b // smmla v27.4s, v3.16b, v1.16b\n"
".inst 0x4e80a44f // smmla v15.4s, v2.16b, v0.16b\n"
@@ -3172,32 +3173,32 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x11, x20\n"
- "add x24, x25, x20\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "add x14, x14, #0x40\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "add x21, x22, x20\n"
- "prfm pstl1keep, [x11, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x25, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "add x24, x25, x20\n"
+ "prfm pstl1keep, [x26, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x25, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
- "add x14, x14, #0x40\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "add x22, x23, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
"uzp2 v24.2d, v24.2d, v28.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v28.2d, v25.2d, v29.2d\n"
"uzp2 v25.2d, v25.2d, v29.2d\n"
"uzp1 v29.2d, v26.2d, v30.2d\n"
@@ -3205,10 +3206,10 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v30.2d, v27.2d, v31.2d\n"
"uzp2 v27.2d, v27.2d, v31.2d\n"
"mov v31.16b, v2.16b\n"
- "add v31.4s, v31.4s, v4.4s\n"
"add v12.4s, v12.4s, v3.4s\n"
"add v13.4s, v13.4s, v1.4s\n"
"add v14.4s, v14.4s, v0.4s\n"
+ "add v31.4s, v31.4s, v4.4s\n"
"add v8.4s, v8.4s, v4.4s\n"
"add v9.4s, v9.4s, v3.4s\n"
"add v10.4s, v10.4s, v1.4s\n"
@@ -3242,9 +3243,9 @@ void a64_hybrid_s8qs_mmla_6x16 (
"add x13, x13, #0x40\n"
"b 163f\n"
"162:" // Height 6: per layer parameters
- "add x20, %x[qp], %[per_layer_right_shift]\n"
- "ld1r { v0.4s }, [x20]\n"
+ "add x21, %x[qp], %[per_layer_right_shift]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "ld1r { v0.4s }, [x21]\n"
"ld1r { v4.4s }, [x20]\n"
"mov v1.16b, v0.16b\n"
"mov v5.16b, v4.16b\n"
@@ -3287,11 +3288,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v31.4s, v31.4s, v7.4s\n"
+ "and v7.16b, v8.16b, v0.16b\n"
"sqadd v12.4s, v12.4s, v6.4s\n"
+ "and v6.16b, v9.16b, v1.16b\n"
"sqadd v13.4s, v13.4s, v5.4s\n"
"sqadd v14.4s, v14.4s, v4.4s\n"
- "and v7.16b, v8.16b, v0.16b\n"
- "and v6.16b, v9.16b, v1.16b\n"
"and v5.16b, v10.16b, v2.16b\n"
"and v4.16b, v11.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3299,11 +3300,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v8.4s, v8.4s, v7.4s\n"
+ "and v7.16b, v15.16b, v0.16b\n"
"sqadd v9.4s, v9.4s, v6.4s\n"
+ "and v6.16b, v20.16b, v1.16b\n"
"sqadd v10.4s, v10.4s, v5.4s\n"
"sqadd v11.4s, v11.4s, v4.4s\n"
- "and v7.16b, v15.16b, v0.16b\n"
- "and v6.16b, v20.16b, v1.16b\n"
"and v5.16b, v21.16b, v2.16b\n"
"and v4.16b, v22.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3311,11 +3312,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v15.4s, v15.4s, v7.4s\n"
+ "and v7.16b, v16.16b, v0.16b\n"
"sqadd v20.4s, v20.4s, v6.4s\n"
+ "and v6.16b, v17.16b, v1.16b\n"
"sqadd v21.4s, v21.4s, v5.4s\n"
"sqadd v22.4s, v22.4s, v4.4s\n"
- "and v7.16b, v16.16b, v0.16b\n"
- "and v6.16b, v17.16b, v1.16b\n"
"and v5.16b, v18.16b, v2.16b\n"
"and v4.16b, v19.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3323,11 +3324,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v7.4s\n"
+ "and v7.16b, v23.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v6.4s\n"
+ "and v6.16b, v28.16b, v1.16b\n"
"sqadd v18.4s, v18.4s, v5.4s\n"
"sqadd v19.4s, v19.4s, v4.4s\n"
- "and v7.16b, v23.16b, v0.16b\n"
- "and v6.16b, v28.16b, v1.16b\n"
"and v5.16b, v29.16b, v2.16b\n"
"and v4.16b, v30.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3335,11 +3336,11 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v7.4s\n"
+ "and v7.16b, v24.16b, v0.16b\n"
"sqadd v28.4s, v28.4s, v6.4s\n"
+ "and v6.16b, v25.16b, v1.16b\n"
"sqadd v29.4s, v29.4s, v5.4s\n"
"sqadd v30.4s, v30.4s, v4.4s\n"
- "and v7.16b, v24.16b, v0.16b\n"
- "and v6.16b, v25.16b, v1.16b\n"
"and v5.16b, v26.16b, v2.16b\n"
"and v4.16b, v27.16b, v3.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
@@ -3351,21 +3352,21 @@ void a64_hybrid_s8qs_mmla_6x16 (
"sqadd v26.4s, v26.4s, v5.4s\n"
"sqadd v27.4s, v27.4s, v4.4s\n"
"164:" // Height 6: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v6.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"srshl v12.4s, v12.4s, v1.4s\n"
- "srshl v13.4s, v13.4s, v2.4s\n"
- "srshl v14.4s, v14.4s, v3.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v6.4s }, [x21]\n"
"ld1r { v5.4s }, [x20]\n"
+ "srshl v13.4s, v13.4s, v2.4s\n"
+ "srshl v14.4s, v14.4s, v3.4s\n"
"srshl v8.4s, v8.4s, v0.4s\n"
"srshl v9.4s, v9.4s, v1.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x11, #0x10\n"
"ld1r { v4.4s }, [x20]\n"
"srshl v10.4s, v10.4s, v2.4s\n"
"srshl v11.4s, v11.4s, v3.4s\n"
- "cmp x10, #0x10\n"
"srshl v15.4s, v15.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v1.4s\n"
"srshl v21.4s, v21.4s, v2.4s\n"
@@ -3473,136 +3474,136 @@ void a64_hybrid_s8qs_mmla_6x16 (
"uzp1 v23.16b, v23.16b, v18.16b\n"
"uzp1 v24.16b, v24.16b, v17.16b\n"
"bge 173f\n"
- "tbz x10, #3, 168f\n"
- "str d31, [x11], #0x8\n"
- "str d8, [x25], #0x8\n"
- "str d15, [x24], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
- "tbz x10, #2, 166f\n"
- "st1 { v31.s }[2], [x11], #0x4\n"
- "st1 { v8.s }[2], [x25], #0x4\n"
- "st1 { v15.s }[2], [x24], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v23.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
- "tbz x10, #1, 165f\n"
- "st1 { v31.h }[6], [x11], #0x2\n"
- "st1 { v8.h }[6], [x25], #0x2\n"
- "st1 { v15.h }[6], [x24], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v23.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
- "tbz x10, #0, 172f\n"
- "st1 { v31.b }[14], [x11]\n"
- "st1 { v8.b }[14], [x25]\n"
- "st1 { v15.b }[14], [x24]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v23.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "tbz x11, #3, 168f\n"
+ "str d31, [x9], #0x8\n"
+ "str d8, [x26], #0x8\n"
+ "str d15, [x25], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
+ "tbz x11, #2, 166f\n"
+ "st1 { v31.s }[2], [x9], #0x4\n"
+ "st1 { v8.s }[2], [x26], #0x4\n"
+ "st1 { v15.s }[2], [x25], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v24.s }[2], [x22], #0x4\n"
+ "tbz x11, #1, 165f\n"
+ "st1 { v31.h }[6], [x9], #0x2\n"
+ "st1 { v8.h }[6], [x26], #0x2\n"
+ "st1 { v15.h }[6], [x25], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v23.h }[6], [x23], #0x2\n"
+ "st1 { v24.h }[6], [x22], #0x2\n"
+ "tbz x11, #0, 172f\n"
+ "st1 { v31.b }[14], [x9]\n"
+ "st1 { v8.b }[14], [x26]\n"
+ "st1 { v15.b }[14], [x25]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v23.b }[14], [x23]\n"
+ "st1 { v24.b }[14], [x22]\n"
"b 172f\n"
"165:" // Height 6: Partial direct writeback: partial_1_12
- "tbz x10, #0, 172f\n"
- "st1 { v31.b }[12], [x11]\n"
- "st1 { v8.b }[12], [x25]\n"
- "st1 { v15.b }[12], [x24]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v23.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "tbz x11, #0, 172f\n"
+ "st1 { v31.b }[12], [x9]\n"
+ "st1 { v8.b }[12], [x26]\n"
+ "st1 { v15.b }[12], [x25]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v23.b }[12], [x23]\n"
+ "st1 { v24.b }[12], [x22]\n"
"b 172f\n"
"166:" // Height 6: Partial direct writeback: partial_2_8
- "tbz x10, #1, 167f\n"
- "st1 { v31.h }[4], [x11], #0x2\n"
- "st1 { v8.h }[4], [x25], #0x2\n"
- "st1 { v15.h }[4], [x24], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v23.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
- "tbz x10, #0, 172f\n"
- "st1 { v31.b }[10], [x11]\n"
- "st1 { v8.b }[10], [x25]\n"
- "st1 { v15.b }[10], [x24]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v23.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "tbz x11, #1, 167f\n"
+ "st1 { v31.h }[4], [x9], #0x2\n"
+ "st1 { v8.h }[4], [x26], #0x2\n"
+ "st1 { v15.h }[4], [x25], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v23.h }[4], [x23], #0x2\n"
+ "st1 { v24.h }[4], [x22], #0x2\n"
+ "tbz x11, #0, 172f\n"
+ "st1 { v31.b }[10], [x9]\n"
+ "st1 { v8.b }[10], [x26]\n"
+ "st1 { v15.b }[10], [x25]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v23.b }[10], [x23]\n"
+ "st1 { v24.b }[10], [x22]\n"
"b 172f\n"
"167:" // Height 6: Partial direct writeback: partial_1_8
- "tbz x10, #0, 172f\n"
- "st1 { v31.b }[8], [x11]\n"
- "st1 { v8.b }[8], [x25]\n"
- "st1 { v15.b }[8], [x24]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v23.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "tbz x11, #0, 172f\n"
+ "st1 { v31.b }[8], [x9]\n"
+ "st1 { v8.b }[8], [x26]\n"
+ "st1 { v15.b }[8], [x25]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v23.b }[8], [x23]\n"
+ "st1 { v24.b }[8], [x22]\n"
"b 172f\n"
"168:" // Height 6: Partial direct writeback: partial_4_0
- "tbz x10, #2, 170f\n"
- "str s31, [x11], #0x4\n"
- "str s8, [x25], #0x4\n"
- "str s15, [x24], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s23, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
- "tbz x10, #1, 169f\n"
- "st1 { v31.h }[2], [x11], #0x2\n"
- "st1 { v8.h }[2], [x25], #0x2\n"
- "st1 { v15.h }[2], [x24], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v23.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
- "tbz x10, #0, 172f\n"
- "st1 { v31.b }[6], [x11]\n"
- "st1 { v8.b }[6], [x25]\n"
- "st1 { v15.b }[6], [x24]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v23.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "tbz x11, #2, 170f\n"
+ "str s31, [x9], #0x4\n"
+ "str s8, [x26], #0x4\n"
+ "str s15, [x25], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s23, [x23], #0x4\n"
+ "str s24, [x22], #0x4\n"
+ "tbz x11, #1, 169f\n"
+ "st1 { v31.h }[2], [x9], #0x2\n"
+ "st1 { v8.h }[2], [x26], #0x2\n"
+ "st1 { v15.h }[2], [x25], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v23.h }[2], [x23], #0x2\n"
+ "st1 { v24.h }[2], [x22], #0x2\n"
+ "tbz x11, #0, 172f\n"
+ "st1 { v31.b }[6], [x9]\n"
+ "st1 { v8.b }[6], [x26]\n"
+ "st1 { v15.b }[6], [x25]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v23.b }[6], [x23]\n"
+ "st1 { v24.b }[6], [x22]\n"
"b 172f\n"
"169:" // Height 6: Partial direct writeback: partial_1_4
- "tbz x10, #0, 172f\n"
- "st1 { v31.b }[4], [x11]\n"
- "st1 { v8.b }[4], [x25]\n"
- "st1 { v15.b }[4], [x24]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v23.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "tbz x11, #0, 172f\n"
+ "st1 { v31.b }[4], [x9]\n"
+ "st1 { v8.b }[4], [x26]\n"
+ "st1 { v15.b }[4], [x25]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v23.b }[4], [x23]\n"
+ "st1 { v24.b }[4], [x22]\n"
"b 172f\n"
"170:" // Height 6: Partial direct writeback: partial_2_0
- "tbz x10, #1, 171f\n"
- "str h31, [x11], #0x2\n"
- "str h8, [x25], #0x2\n"
- "str h15, [x24], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h23, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
- "tbz x10, #0, 172f\n"
- "st1 { v31.b }[2], [x11]\n"
- "st1 { v8.b }[2], [x25]\n"
- "st1 { v15.b }[2], [x24]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v23.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "tbz x11, #1, 171f\n"
+ "str h31, [x9], #0x2\n"
+ "str h8, [x26], #0x2\n"
+ "str h15, [x25], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h23, [x23], #0x2\n"
+ "str h24, [x22], #0x2\n"
+ "tbz x11, #0, 172f\n"
+ "st1 { v31.b }[2], [x9]\n"
+ "st1 { v8.b }[2], [x26]\n"
+ "st1 { v15.b }[2], [x25]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v23.b }[2], [x23]\n"
+ "st1 { v24.b }[2], [x22]\n"
"b 172f\n"
"171:" // Height 6: Partial direct writeback: partial_1_0
- "str b31, [x11, #0x0]\n"
- "str b8, [x25, #0x0]\n"
- "str b15, [x24, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b23, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b31, [x9, #0x0]\n"
+ "str b8, [x26, #0x0]\n"
+ "str b15, [x25, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b23, [x23, #0x0]\n"
+ "str b24, [x22, #0x0]\n"
"172:" // Height 6: Partial direct writeback: Done
"b 174f\n"
"173:" // Height 6: Full writeback
- "str q31, [x11, #0x0]\n"
- "add x11, x11, #0x10\n"
- "str q8, [x25, #0x0]\n"
- "str q15, [x24, #0x0]\n"
- "str q16, [x23, #0x0]\n"
- "str q23, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q31, [x9, #0x0]\n"
+ "add x9, x9, #0x10\n"
+ "str q8, [x26, #0x0]\n"
+ "str q15, [x25, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q24, [x22, #0x0]\n"
"174:" // Height 6: Writeback done
- "subs x10, x10, #0x10\n"
+ "subs x11, x11, #0x10\n"
"bgt 147b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 176f\n"
@@ -3616,8 +3617,8 @@ void a64_hybrid_s8qs_mmla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"176:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp
index a02fbe8f28..47b6861f5b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp
index 289d38c3b6..55629a38d0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -87,72 +87,72 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"bgt 69f\n"
"beq 35f\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
"cmp x8, #0x10\n"
"bge 11f\n"
"tbz x8, #3, 6f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"tbz x8, #2, 4f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"tbz x8, #1, 3f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"tbz x8, #0, 10f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 10f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
"tbz x8, #1, 5f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"tbz x8, #0, 10f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 10f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
"tbz x8, #2, 8f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"tbz x8, #1, 7f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"tbz x8, #0, 10f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 10f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
"tbz x8, #1, 9f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"tbz x8, #0, 10f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 13f\n"
"11:" // Height 1: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"b 13f\n"
"12:" // Height 1: no accumulate
"movi v8.4s, #0x0\n"
@@ -163,8 +163,8 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"mov x15, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -180,118 +180,118 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"blt 19f\n"
"ldr q0, [x13, #0x0]\n"
"cmp x14, #0x20\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 18f\n"
"17:" // Height 1: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr d17, [x16, #0x20]\n"
- "ldr x20, [x16, #0x28]\n"
+ "ldr d17, [x17, #0x20]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr d16, [x16, #0x30]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr d16, [x17, #0x30]\n"
+ "add x13, x13, #0x10\n"
+ "ldr x20, [x17, #0x38]\n"
+ "sub x14, x14, #0x10\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "ldr x22, [x13, #0x8]\n"
+ "cmp x14, #0x20\n"
"mov v16.d[1], x20\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr d17, [x16, #0x40]\n"
- "ldr x20, [x16, #0x48]\n"
+ "ldr d17, [x17, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr d16, [x16, #0x50]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr d16, [x17, #0x50]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v16.d[1], x20\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr d17, [x16, #0x60]\n"
- "ldr x20, [x16, #0x68]\n"
+ "ldr d17, [x17, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr d16, [x16, #0x70]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr d16, [x17, #0x70]\n"
+ "ldr x20, [x17, #0x78]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
"mov v16.d[1], x20\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr d17, [x16, #0x80]\n"
- "ldr x20, [x16, #0x88]\n"
+ "ldr d17, [x17, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr d16, [x16, #0x90]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x98]\n"
+ "ldr d16, [x17, #0x90]\n"
+ "ldr x20, [x17, #0x98]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v16.d[1], x20\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr d17, [x16, #0xa0]\n"
- "ldr x20, [x16, #0xa8]\n"
+ "ldr d17, [x17, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr d16, [x16, #0xb0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ldr d16, [x17, #0xb0]\n"
+ "ldr x20, [x17, #0xb8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v16.d[1], x20\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr d17, [x16, #0xc0]\n"
- "ldr x20, [x16, #0xc8]\n"
+ "ldr d17, [x17, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr d16, [x16, #0xd0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xd8]\n"
+ "ldr d16, [x17, #0xd0]\n"
+ "ldr x20, [x17, #0xd8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v16.d[1], x20\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr d17, [x16, #0xe0]\n"
- "ldr x20, [x16, #0xe8]\n"
+ "ldr d17, [x17, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr d16, [x16, #0xf0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xf8]\n"
+ "ldr d16, [x17, #0xf0]\n"
+ "ldr x20, [x17, #0xf8]\n"
+ "add x17, x17, #0x100\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x8]\n"
"mov v16.d[1], x20\n"
- "add x13, x13, #0x10\n"
- "add x16, x16, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
- "ldr x20, [x16, #0x8]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x21, [x13, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v0.d[1], x21\n"
+ "ldr d7, [x17, #0x10]\n"
+ "ldr x20, [x17, #0x18]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
+ "add x13, x13, #0x10\n"
+ "sub x14, x14, #0x10\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr q17, [x16, #0x40]\n"
+ "ldr q17, [x17, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x50]\n"
+ "ldr q16, [x17, #0x50]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x16, #0x60]\n"
+ "ldr q17, [x17, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x16, #0x70]\n"
+ "ldr q16, [x17, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x16, #0x80]\n"
+ "ldr q17, [x17, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x16, #0x90]\n"
+ "ldr q16, [x17, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x16, #0xa0]\n"
+ "ldr q17, [x17, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x16, #0xb0]\n"
+ "ldr q16, [x17, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x16, #0xc0]\n"
+ "ldr q17, [x17, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x16, #0xd0]\n"
+ "ldr q16, [x17, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr q17, [x16, #0xe0]\n"
+ "ldr q17, [x17, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr q16, [x16, #0xf0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x10\n"
+ "ldr q16, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
"19:" // Height 1: Multiply loop: Main loop skip
"cbz x14, 24f\n"
"cmp x14, #0x4\n"
@@ -299,16 +299,16 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"20:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x13], #0x4\n"
"sub x14, x14, #0x4\n"
- "ldr q16, [x16, #0x0]\n"
- ".inst 0x4f92e208 // sdot v8.4s, v16.16b, v18.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
- ".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x0]\n"
"cmp x14, #0x4\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x10]\n"
+ ".inst 0x4f92e228 // sdot v8.4s, v17.16b, v18.4b[0]\n"
+ "ldr q17, [x17, #0x20]\n"
+ ".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n"
+ "ldr q16, [x17, #0x30]\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f92e22a // sdot v10.4s, v17.16b, v18.4b[0]\n"
".inst 0x4f92e20b // sdot v11.4s, v16.16b, v18.4b[0]\n"
- "add x16, x16, #0x40\n"
"bge 20b\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
"cbz x14, 24f\n"
@@ -320,165 +320,165 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x13, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q16, [x16, #0x0]\n"
- ".inst 0x4f80e208 // sdot v8.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
+ "ldr q17, [x17, #0x0]\n"
+ "ldr q16, [x17, #0x10]\n"
+ ".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x20]\n"
- ".inst 0x4f80e20a // sdot v10.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
+ "add x17, x17, #0x40\n"
+ ".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x15, x15, #0x1\n"
"cmp x15, x20\n"
"bne 14b\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
"bge 33f\n"
"tbz x8, #3, 28f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"tbz x8, #2, 26f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"tbz x8, #1, 25f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"b 32f\n"
"25:" // Height 1: Partial direct writeback: partial_1_12
"tbz x8, #0, 32f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"b 32f\n"
"26:" // Height 1: Partial direct writeback: partial_2_8
"tbz x8, #1, 27f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"b 32f\n"
"27:" // Height 1: Partial direct writeback: partial_1_8
"tbz x8, #0, 32f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"b 32f\n"
"28:" // Height 1: Partial direct writeback: partial_4_0
"tbz x8, #2, 30f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"tbz x8, #1, 29f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"b 32f\n"
"29:" // Height 1: Partial direct writeback: partial_1_4
"tbz x8, #0, 32f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"b 32f\n"
"30:" // Height 1: Partial direct writeback: partial_2_0
"tbz x8, #1, 31f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"b 32f\n"
"31:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"32:" // Height 1: Partial direct writeback: Done
"b 34f\n"
"33:" // Height 1: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"34:" // Height 1: Writeback done
"subs x8, x8, #0x10\n"
"bgt 2b\n"
"b 206f\n"
"35:" // Height 2
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x24, x17, x20, LSL #2\n"
+ "add x24, x16, x20, LSL #2\n"
"bge 45f\n"
"tbz x8, #3, 40f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"tbz x8, #2, 38f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"tbz x8, #1, 37f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 44f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
"tbz x8, #1, 39f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 44f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
"tbz x8, #2, 42f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"tbz x8, #1, 41f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 44f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
"tbz x8, #1, 43f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -497,8 +497,8 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"mov x15, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -518,154 +518,154 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr q0, [x13, #0x0]\n"
"cmp x14, #0x20\n"
"ldr q1, [x12, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr d17, [x16, #0x20]\n"
+ "ldr d17, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr d16, [x16, #0x30]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x17, #0x30]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x48]\n"
+ "add x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
+ "mov v16.d[1], x21\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "mov v16.d[1], x20\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr d17, [x16, #0x40]\n"
+ "ldr d17, [x17, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr x20, [x16, #0x48]\n"
+ "ldr x21, [x17, #0x58]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr d16, [x16, #0x50]\n"
+ "ldr d16, [x17, #0x50]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x58]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x68]\n"
+ "ldr x23, [x13, #0x8]\n"
+ "sub x14, x14, #0x10\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr x21, [x16, #0x68]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr d17, [x16, #0x60]\n"
+ "ldr d17, [x17, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x21, [x17, #0x78]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr d16, [x16, #0x70]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x17, #0x70]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x88]\n"
+ "ldr x22, [x12, #0x8]\n"
+ "cmp x14, #0x20\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
- "mov v16.d[1], x20\n"
".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr d17, [x16, #0x80]\n"
+ "ldr d17, [x17, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr x20, [x16, #0x88]\n"
+ "ldr x21, [x17, #0x98]\n"
".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr d16, [x16, #0x90]\n"
+ "ldr d16, [x17, #0x90]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x98]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xa8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr x21, [x16, #0xa8]\n"
".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr d17, [x16, #0xa0]\n"
+ "ldr d17, [x17, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ldr x21, [x17, #0xb8]\n"
".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr d16, [x16, #0xb0]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x17, #0xb0]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0xc8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
- "mov v16.d[1], x20\n"
".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr d17, [x16, #0xc0]\n"
+ "ldr d17, [x17, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr x20, [x16, #0xc8]\n"
+ "ldr x21, [x17, #0xd8]\n"
".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr d16, [x16, #0xd0]\n"
+ "ldr d16, [x17, #0xd0]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xd8]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xe8]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr d17, [x16, #0xe0]\n"
+ "ldr d17, [x17, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "ldr x21, [x17, #0xf8]\n"
".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr d16, [x16, #0xf0]\n"
- "mov v17.d[1], x21\n"
- "add x13, x13, #0x10\n"
- "mov v16.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "add x16, x16, #0x100\n"
+ "ldr d16, [x17, #0xf0]\n"
+ "mov v17.d[1], x20\n"
+ "add x17, x17, #0x100\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v16.d[1], x21\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
- "ldr x21, [x16, #0x8]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x4fa1ea0f // sdot v15.4s, v16.16b, v1.4b[3]\n"
"ldr d1, [x12, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x20, [x13, #0x8]\n"
- "mov v6.d[1], x21\n"
- "ldr x21, [x12, #0x8]\n"
- "mov v0.d[1], x20\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v1.d[1], x21\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr d7, [x17, #0x10]\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x17, #0x18]\n"
+ "mov v0.d[1], x23\n"
+ "mov v1.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x12, #0x80]\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
"sub x14, x14, #0x10\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x16, #0x40]\n"
+ "ldr q17, [x17, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x16, #0x50]\n"
+ "ldr q16, [x17, #0x50]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x16, #0x60]\n"
+ "ldr q17, [x17, #0x60]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x16, #0x70]\n"
+ "ldr q16, [x17, #0x70]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x16, #0x80]\n"
+ "ldr q17, [x17, #0x80]\n"
".inst 0x4fa0e20b // sdot v11.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20f // sdot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x16, #0x90]\n"
+ "ldr q16, [x17, #0x90]\n"
".inst 0x4f80ea28 // sdot v8.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2c // sdot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x16, #0xa0]\n"
+ "ldr q17, [x17, #0xa0]\n"
".inst 0x4f80ea09 // sdot v9.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0d // sdot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x16, #0xb0]\n"
+ "ldr q16, [x17, #0xb0]\n"
".inst 0x4f80ea2a // sdot v10.4s, v17.16b, v0.4b[2]\n"
".inst 0x4f81ea2e // sdot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x16, #0xc0]\n"
+ "ldr q17, [x17, #0xc0]\n"
".inst 0x4f80ea0b // sdot v11.4s, v16.16b, v0.4b[2]\n"
".inst 0x4f81ea0f // sdot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x16, #0xd0]\n"
+ "ldr q16, [x17, #0xd0]\n"
".inst 0x4fa0ea28 // sdot v8.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa1ea2c // sdot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr q17, [x16, #0xe0]\n"
+ "ldr q17, [x17, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
".inst 0x4fa1ea0d // sdot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr q16, [x16, #0xf0]\n"
+ "ldr q16, [x17, #0xf0]\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa1ea2e // sdot v14.4s, v17.16b, v1.4b[3]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
".inst 0x4fa1ea0f // sdot v15.4s, v16.16b, v1.4b[3]\n"
@@ -678,16 +678,16 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"sub x14, x14, #0x4\n"
"ldr s18, [x12], #0x4\n"
"cmp x14, #0x4\n"
- "ldr q17, [x16, #0x0]\n"
+ "ldr q17, [x17, #0x0]\n"
+ "ldr q16, [x17, #0x10]\n"
".inst 0x4f93e228 // sdot v8.4s, v17.16b, v19.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
".inst 0x4f92e22c // sdot v12.4s, v17.16b, v18.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x4f93e209 // sdot v9.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20d // sdot v13.4s, v16.16b, v18.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
".inst 0x4f93e22a // sdot v10.4s, v17.16b, v19.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f92e22e // sdot v14.4s, v17.16b, v18.4b[0]\n"
".inst 0x4f93e20b // sdot v11.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20f // sdot v15.4s, v16.16b, v18.4b[0]\n"
@@ -705,16 +705,16 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr b0, [x13, #0x0]\n"
"ldr b1, [x12, #0x0]\n"
"57:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q17, [x16, #0x0]\n"
+ "ldr q17, [x17, #0x0]\n"
+ "ldr q16, [x17, #0x10]\n"
".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
".inst 0x4f81e22c // sdot v12.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20d // sdot v13.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
@@ -724,79 +724,79 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 48b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"bge 67f\n"
"tbz x8, #3, 62f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"tbz x8, #2, 60f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"tbz x8, #1, 59f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
"tbz x8, #0, 66f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
"tbz x8, #1, 61f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
"tbz x8, #0, 66f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
"tbz x8, #2, 64f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"tbz x8, #1, 63f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
"tbz x8, #0, 66f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
"tbz x8, #1, 65f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -807,107 +807,107 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 206f\n"
"69:" // Height 3
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
"cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"bge 79f\n"
"tbz x8, #3, 74f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"tbz x8, #2, 72f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"tbz x8, #1, 71f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"b 78f\n"
"71:" // Height 3: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 78f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"b 78f\n"
"72:" // Height 3: Partial accumulate: partial_2_8
"tbz x8, #1, 73f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"b 78f\n"
"73:" // Height 3: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 78f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"b 78f\n"
"74:" // Height 3: Partial accumulate: partial_4_0
"tbz x8, #2, 76f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"tbz x8, #1, 75f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"b 78f\n"
"75:" // Height 3: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 78f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"b 78f\n"
"76:" // Height 3: Partial accumulate: partial_2_0
"tbz x8, #1, 77f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"b 78f\n"
"77:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"78:" // Height 3: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 81f\n"
"79:" // Height 3: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -934,8 +934,8 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"mov x15, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -959,123 +959,123 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp x14, #0x20\n"
"ldr q1, [x12, #0x0]\n"
"ldr q2, [x11, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 86f\n"
"85:" // Height 3: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x20, [x17, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr d21, [x16, #0x20]\n"
+ "ldr d21, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v21.d[1], x21\n"
+ "add x13, x13, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr d20, [x16, #0x30]\n"
+ "ldr d20, [x17, #0x30]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "add x11, x11, #0x10\n"
+ "ldr x24, [x13, #0x8]\n"
"mov v20.d[1], x20\n"
".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr x20, [x17, #0x58]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr d21, [x16, #0x40]\n"
+ "ldr d21, [x17, #0x40]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
- "mov v21.d[1], x21\n"
+ "ldr x23, [x12, #0x8]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x22, [x11, #0x8]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr d20, [x16, #0x50]\n"
+ "ldr d20, [x17, #0x50]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "sub x14, x14, #0x10\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v20.d[1], x20\n"
".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x20, [x17, #0x78]\n"
".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr d21, [x16, #0x60]\n"
+ "ldr d21, [x17, #0x60]\n"
".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n"
- "mov v21.d[1], x21\n"
+ "cmp x14, #0x20\n"
".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr d20, [x16, #0x70]\n"
+ "ldr d20, [x17, #0x70]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"mov v20.d[1], x20\n"
".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "ldr x20, [x17, #0x98]\n"
".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr d21, [x16, #0x80]\n"
+ "ldr d21, [x17, #0x80]\n"
".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n"
- "mov v21.d[1], x21\n"
".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr d20, [x16, #0x90]\n"
+ "ldr d20, [x17, #0x90]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v20.d[1], x20\n"
".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ldr x20, [x17, #0xb8]\n"
".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr d21, [x16, #0xa0]\n"
+ "ldr d21, [x17, #0xa0]\n"
".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n"
- "mov v21.d[1], x21\n"
".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr d20, [x16, #0xb0]\n"
+ "ldr d20, [x17, #0xb0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v20.d[1], x20\n"
".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "ldr x20, [x17, #0xd8]\n"
".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr d21, [x16, #0xc0]\n"
+ "ldr d21, [x17, #0xc0]\n"
".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n"
- "mov v21.d[1], x21\n"
".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr d20, [x16, #0xd0]\n"
+ "ldr d20, [x17, #0xd0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v20.d[1], x20\n"
".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "ldr x20, [x17, #0xf8]\n"
".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr d21, [x16, #0xe0]\n"
+ "ldr d21, [x17, #0xe0]\n"
".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n"
- "mov v21.d[1], x21\n"
".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n"
- "add x13, x13, #0x10\n"
".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr d20, [x16, #0xf0]\n"
+ "ldr d20, [x17, #0xf0]\n"
+ "mov v21.d[1], x21\n"
+ "add x17, x17, #0x100\n"
+ "ldr x21, [x17, #0x8]\n"
"mov v20.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
- "add x16, x16, #0x100\n"
".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n"
- "ldr x20, [x16, #0x8]\n"
".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n"
- "ldr x23, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x18]\n"
".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x4fa0ea8b // sdot v11.4s, v20.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x4fa1ea8f // sdot v15.4s, v20.16b, v1.4b[3]\n"
"ldr d1, [x12, #0x0]\n"
- "ldr x22, [x12, #0x8]\n"
".inst 0x4fa2ea93 // sdot v19.4s, v20.16b, v2.4b[3]\n"
"ldr d2, [x11, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x21, [x11, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v0.d[1], x23\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x22\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- "mov v2.d[1], x21\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr d7, [x17, #0x10]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x24\n"
+ "mov v1.d[1], x23\n"
+ "mov v2.d[1], x22\n"
"mov v7.d[1], x20\n"
"bge 85b\n"
"86:" // Height 3: Multiply loop: Single iteration only
@@ -1084,66 +1084,66 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q21, [x16, #0x20]\n"
+ "ldr q21, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"sub x14, x14, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q20, [x16, #0x30]\n"
+ "ldr q20, [x17, #0x30]\n"
".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x16, #0x40]\n"
+ "ldr q21, [x17, #0x40]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x16, #0x50]\n"
+ "ldr q20, [x17, #0x50]\n"
".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x16, #0x60]\n"
+ "ldr q21, [x17, #0x60]\n"
".inst 0x4fa0e289 // sdot v9.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28d // sdot v13.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e291 // sdot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x16, #0x70]\n"
+ "ldr q20, [x17, #0x70]\n"
".inst 0x4fa0e2aa // sdot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ae // sdot v14.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b2 // sdot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x16, #0x80]\n"
+ "ldr q21, [x17, #0x80]\n"
".inst 0x4fa0e28b // sdot v11.4s, v20.16b, v0.4b[1]\n"
".inst 0x4fa1e28f // sdot v15.4s, v20.16b, v1.4b[1]\n"
".inst 0x4fa2e293 // sdot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x16, #0x90]\n"
+ "ldr q20, [x17, #0x90]\n"
".inst 0x4f80eaa8 // sdot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaac // sdot v12.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab0 // sdot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x16, #0xa0]\n"
+ "ldr q21, [x17, #0xa0]\n"
".inst 0x4f80ea89 // sdot v9.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8d // sdot v13.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea91 // sdot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x16, #0xb0]\n"
+ "ldr q20, [x17, #0xb0]\n"
".inst 0x4f80eaaa // sdot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x4f81eaae // sdot v14.4s, v21.16b, v1.4b[2]\n"
".inst 0x4f82eab2 // sdot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x16, #0xc0]\n"
+ "ldr q21, [x17, #0xc0]\n"
".inst 0x4f80ea8b // sdot v11.4s, v20.16b, v0.4b[2]\n"
".inst 0x4f81ea8f // sdot v15.4s, v20.16b, v1.4b[2]\n"
".inst 0x4f82ea93 // sdot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x16, #0xd0]\n"
+ "ldr q20, [x17, #0xd0]\n"
".inst 0x4fa0eaa8 // sdot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x4fa1eaac // sdot v12.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab0 // sdot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr q21, [x16, #0xe0]\n"
+ "ldr q21, [x17, #0xe0]\n"
".inst 0x4fa0ea89 // sdot v9.4s, v20.16b, v0.4b[3]\n"
".inst 0x4fa1ea8d // sdot v13.4s, v20.16b, v1.4b[3]\n"
".inst 0x4fa2ea91 // sdot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr q20, [x16, #0xf0]\n"
+ "ldr q20, [x17, #0xf0]\n"
".inst 0x4fa0eaaa // sdot v10.4s, v21.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa1eaae // sdot v14.4s, v21.16b, v1.4b[3]\n"
".inst 0x4fa2eab2 // sdot v18.4s, v21.16b, v2.4b[3]\n"
".inst 0x4fa0ea8b // sdot v11.4s, v20.16b, v0.4b[3]\n"
@@ -1159,18 +1159,18 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr s23, [x12], #0x4\n"
"cmp x14, #0x4\n"
"ldr s22, [x11], #0x4\n"
- "ldr q21, [x16, #0x0]\n"
+ "ldr q21, [x17, #0x0]\n"
+ "ldr q20, [x17, #0x10]\n"
".inst 0x4f98e2a8 // sdot v8.4s, v21.16b, v24.4b[0]\n"
- "ldr q20, [x16, #0x10]\n"
".inst 0x4f97e2ac // sdot v12.4s, v21.16b, v23.4b[0]\n"
".inst 0x4f96e2b0 // sdot v16.4s, v21.16b, v22.4b[0]\n"
- "ldr q21, [x16, #0x20]\n"
+ "ldr q21, [x17, #0x20]\n"
".inst 0x4f98e289 // sdot v9.4s, v20.16b, v24.4b[0]\n"
".inst 0x4f97e28d // sdot v13.4s, v20.16b, v23.4b[0]\n"
".inst 0x4f96e291 // sdot v17.4s, v20.16b, v22.4b[0]\n"
- "ldr q20, [x16, #0x30]\n"
+ "ldr q20, [x17, #0x30]\n"
".inst 0x4f98e2aa // sdot v10.4s, v21.16b, v24.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f97e2ae // sdot v14.4s, v21.16b, v23.4b[0]\n"
".inst 0x4f96e2b2 // sdot v18.4s, v21.16b, v22.4b[0]\n"
".inst 0x4f98e28b // sdot v11.4s, v20.16b, v24.4b[0]\n"
@@ -1193,18 +1193,18 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr b1, [x12, #0x0]\n"
"ldr b2, [x11, #0x0]\n"
"91:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q21, [x16, #0x0]\n"
+ "ldr q21, [x17, #0x0]\n"
+ "ldr q20, [x17, #0x10]\n"
".inst 0x4f80e2a8 // sdot v8.4s, v21.16b, v0.4b[0]\n"
- "ldr q20, [x16, #0x10]\n"
".inst 0x4f81e2ac // sdot v12.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b0 // sdot v16.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x16, #0x20]\n"
+ "ldr q21, [x17, #0x20]\n"
".inst 0x4f80e289 // sdot v9.4s, v20.16b, v0.4b[0]\n"
".inst 0x4f81e28d // sdot v13.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e291 // sdot v17.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x16, #0x30]\n"
+ "ldr q20, [x17, #0x30]\n"
".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
@@ -1216,97 +1216,97 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 82b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
"bge 101f\n"
"tbz x8, #3, 96f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v17.4s }, [x23], #0x10\n"
"tbz x8, #2, 94f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"tbz x8, #1, 93f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
"tbz x8, #0, 100f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
"tbz x8, #1, 95f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
"tbz x8, #0, 100f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
"tbz x8, #2, 98f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"tbz x8, #1, 97f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
"tbz x8, #0, 100f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
"tbz x8, #1, 99f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -1321,38 +1321,38 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 206f\n"
"103:" // Height 4
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"bge 113f\n"
"tbz x8, #3, 108f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
"tbz x8, #2, 106f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"tbz x8, #1, 105f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
"ldr d23, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
@@ -1360,20 +1360,20 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"105:" // Height 4: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 112f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
"b 112f\n"
"106:" // Height 4: Partial accumulate: partial_2_8
"tbz x8, #1, 107f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
"ldr d22, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
@@ -1381,25 +1381,25 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"107:" // Height 4: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 112f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
"b 112f\n"
"108:" // Height 4: Partial accumulate: partial_4_0
"tbz x8, #2, 110f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"tbz x8, #1, 109f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
"ldr d21, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
@@ -1407,38 +1407,38 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"109:" // Height 4: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 112f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
"b 112f\n"
"110:" // Height 4: Partial accumulate: partial_2_0
"tbz x8, #1, 111f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
"ldr d20, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
"b 112f\n"
"111:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"ldr s20, [x22, #0x0]\n"
"112:" // Height 4: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 115f\n"
"113:" // Height 4: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -1473,8 +1473,8 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"mov x15, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1502,130 +1502,129 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr q1, [x12, #0x0]\n"
"ldr q2, [x11, #0x0]\n"
"ldr q3, [x10, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 120f\n"
"119:" // Height 4: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr d25, [x16, #0x20]\n"
+ "ldr d25, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v25.d[1], x21\n"
+ "add x12, x12, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x11, x11, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr d24, [x16, #0x30]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x30]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
+ "ldr x20, [x17, #0x48]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "add x10, x10, #0x10\n"
+ "mov v24.d[1], x21\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
- "add x11, x11, #0x10\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr d25, [x16, #0x40]\n"
+ "ldr d25, [x17, #0x40]\n"
".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x58]\n"
".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x25, [x13, #0x8]\n"
".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr d24, [x16, #0x50]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x50]\n"
".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n"
+ "ldr x20, [x17, #0x68]\n"
".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x24, [x12, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n"
- "ldr x25, [x13, #0x8]\n"
".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr d25, [x16, #0x60]\n"
+ "ldr d25, [x17, #0x60]\n"
".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x78]\n"
".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "ldr x23, [x11, #0x8]\n"
".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n"
- "ldr x24, [x12, #0x8]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr d24, [x16, #0x70]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x70]\n"
".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n"
+ "ldr x20, [x17, #0x88]\n"
".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "ldr x22, [x10, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n"
- "ldr x23, [x11, #0x8]\n"
".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr d25, [x16, #0x80]\n"
+ "ldr d25, [x17, #0x80]\n"
".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x98]\n"
".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
+ "sub x14, x14, #0x10\n"
".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n"
- "ldr x22, [x10, #0x8]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr d24, [x16, #0x90]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x90]\n"
".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n"
+ "ldr x20, [x17, #0xa8]\n"
".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "cmp x14, #0x20\n"
+ "mov v24.d[1], x21\n"
".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n"
- "sub x14, x14, #0x10\n"
".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr d25, [x16, #0xa0]\n"
+ "ldr d25, [x17, #0xa0]\n"
".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xb8]\n"
".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n"
- "cmp x14, #0x20\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr d24, [x16, #0xb0]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0xb0]\n"
".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n"
+ "ldr x20, [x17, #0xc8]\n"
".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr d25, [x16, #0xc0]\n"
+ "ldr d25, [x17, #0xc0]\n"
".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xd8]\n"
".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr d24, [x16, #0xd0]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0xd0]\n"
".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n"
+ "ldr x20, [x17, #0xe8]\n"
".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr d25, [x16, #0xe0]\n"
+ "ldr d25, [x17, #0xe0]\n"
".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xf8]\n"
".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n"
+ "mov v25.d[1], x20\n"
".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr d24, [x16, #0xf0]\n"
- "mov v24.d[1], x20\n"
- "add x16, x16, #0x100\n"
+ "ldr d24, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0x8]\n"
".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb36 // sdot v22.4s, v25.16b, v3.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x4fa0eb0b // sdot v11.4s, v24.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x4fa1eb0f // sdot v15.4s, v24.16b, v1.4b[3]\n"
@@ -1634,8 +1633,9 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr d2, [x11, #0x0]\n"
".inst 0x4fa3eb17 // sdot v23.4s, v24.16b, v3.4b[3]\n"
"ldr d3, [x10, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x21\n"
+ "ldr d7, [x17, #0x10]\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x17, #0x18]\n"
"mov v0.d[1], x25\n"
"mov v1.d[1], x24\n"
"mov v2.d[1], x23\n"
@@ -1650,7 +1650,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q25, [x16, #0x20]\n"
+ "ldr q25, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x10, x10, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -1658,7 +1658,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q24, [x16, #0x30]\n"
+ "ldr q24, [x17, #0x30]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
@@ -1666,64 +1666,64 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x16, #0x40]\n"
+ "ldr q25, [x17, #0x40]\n"
".inst 0x4f80e30b // sdot v11.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e30f // sdot v15.4s, v24.16b, v1.4b[0]\n"
".inst 0x4f82e313 // sdot v19.4s, v24.16b, v2.4b[0]\n"
".inst 0x4f83e317 // sdot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x16, #0x50]\n"
+ "ldr q24, [x17, #0x50]\n"
".inst 0x4fa0e328 // sdot v8.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32c // sdot v12.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e330 // sdot v16.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e334 // sdot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x16, #0x60]\n"
+ "ldr q25, [x17, #0x60]\n"
".inst 0x4fa0e309 // sdot v9.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30d // sdot v13.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e311 // sdot v17.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e315 // sdot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x16, #0x70]\n"
+ "ldr q24, [x17, #0x70]\n"
".inst 0x4fa0e32a // sdot v10.4s, v25.16b, v0.4b[1]\n"
".inst 0x4fa1e32e // sdot v14.4s, v25.16b, v1.4b[1]\n"
".inst 0x4fa2e332 // sdot v18.4s, v25.16b, v2.4b[1]\n"
".inst 0x4fa3e336 // sdot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x16, #0x80]\n"
+ "ldr q25, [x17, #0x80]\n"
".inst 0x4fa0e30b // sdot v11.4s, v24.16b, v0.4b[1]\n"
".inst 0x4fa1e30f // sdot v15.4s, v24.16b, v1.4b[1]\n"
".inst 0x4fa2e313 // sdot v19.4s, v24.16b, v2.4b[1]\n"
".inst 0x4fa3e317 // sdot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x16, #0x90]\n"
+ "ldr q24, [x17, #0x90]\n"
".inst 0x4f80eb28 // sdot v8.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2c // sdot v12.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb30 // sdot v16.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb34 // sdot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x16, #0xa0]\n"
+ "ldr q25, [x17, #0xa0]\n"
".inst 0x4f80eb09 // sdot v9.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0d // sdot v13.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb11 // sdot v17.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb15 // sdot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x16, #0xb0]\n"
+ "ldr q24, [x17, #0xb0]\n"
".inst 0x4f80eb2a // sdot v10.4s, v25.16b, v0.4b[2]\n"
".inst 0x4f81eb2e // sdot v14.4s, v25.16b, v1.4b[2]\n"
".inst 0x4f82eb32 // sdot v18.4s, v25.16b, v2.4b[2]\n"
".inst 0x4f83eb36 // sdot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x16, #0xc0]\n"
+ "ldr q25, [x17, #0xc0]\n"
".inst 0x4f80eb0b // sdot v11.4s, v24.16b, v0.4b[2]\n"
".inst 0x4f81eb0f // sdot v15.4s, v24.16b, v1.4b[2]\n"
".inst 0x4f82eb13 // sdot v19.4s, v24.16b, v2.4b[2]\n"
".inst 0x4f83eb17 // sdot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x16, #0xd0]\n"
+ "ldr q24, [x17, #0xd0]\n"
".inst 0x4fa0eb28 // sdot v8.4s, v25.16b, v0.4b[3]\n"
".inst 0x4fa1eb2c // sdot v12.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb30 // sdot v16.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb34 // sdot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr q25, [x16, #0xe0]\n"
+ "ldr q25, [x17, #0xe0]\n"
".inst 0x4fa0eb09 // sdot v9.4s, v24.16b, v0.4b[3]\n"
".inst 0x4fa1eb0d // sdot v13.4s, v24.16b, v1.4b[3]\n"
".inst 0x4fa2eb11 // sdot v17.4s, v24.16b, v2.4b[3]\n"
".inst 0x4fa3eb15 // sdot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr q24, [x16, #0xf0]\n"
+ "ldr q24, [x17, #0xf0]\n"
".inst 0x4fa0eb2a // sdot v10.4s, v25.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa1eb2e // sdot v14.4s, v25.16b, v1.4b[3]\n"
".inst 0x4fa2eb32 // sdot v18.4s, v25.16b, v2.4b[3]\n"
".inst 0x4fa3eb36 // sdot v22.4s, v25.16b, v3.4b[3]\n"
@@ -1742,20 +1742,20 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp x14, #0x4\n"
"ldr s27, [x11], #0x4\n"
"ldr s26, [x10], #0x4\n"
- "ldr q25, [x16, #0x0]\n"
+ "ldr q25, [x17, #0x0]\n"
+ "ldr q24, [x17, #0x10]\n"
".inst 0x4f9de328 // sdot v8.4s, v25.16b, v29.4b[0]\n"
- "ldr q24, [x16, #0x10]\n"
".inst 0x4f9ce32c // sdot v12.4s, v25.16b, v28.4b[0]\n"
".inst 0x4f9be330 // sdot v16.4s, v25.16b, v27.4b[0]\n"
".inst 0x4f9ae334 // sdot v20.4s, v25.16b, v26.4b[0]\n"
- "ldr q25, [x16, #0x20]\n"
+ "ldr q25, [x17, #0x20]\n"
".inst 0x4f9de309 // sdot v9.4s, v24.16b, v29.4b[0]\n"
".inst 0x4f9ce30d // sdot v13.4s, v24.16b, v28.4b[0]\n"
".inst 0x4f9be311 // sdot v17.4s, v24.16b, v27.4b[0]\n"
".inst 0x4f9ae315 // sdot v21.4s, v24.16b, v26.4b[0]\n"
- "ldr q24, [x16, #0x30]\n"
+ "ldr q24, [x17, #0x30]\n"
".inst 0x4f9de32a // sdot v10.4s, v25.16b, v29.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f9ce32e // sdot v14.4s, v25.16b, v28.4b[0]\n"
".inst 0x4f9be332 // sdot v18.4s, v25.16b, v27.4b[0]\n"
".inst 0x4f9ae336 // sdot v22.4s, v25.16b, v26.4b[0]\n"
@@ -1783,20 +1783,20 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr b2, [x11, #0x0]\n"
"ldr b3, [x10, #0x0]\n"
"125:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q25, [x16, #0x0]\n"
+ "ldr q25, [x17, #0x0]\n"
+ "ldr q24, [x17, #0x10]\n"
".inst 0x4f80e328 // sdot v8.4s, v25.16b, v0.4b[0]\n"
- "ldr q24, [x16, #0x10]\n"
".inst 0x4f81e32c // sdot v12.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f82e330 // sdot v16.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e334 // sdot v20.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x16, #0x20]\n"
+ "ldr q25, [x17, #0x20]\n"
".inst 0x4f80e309 // sdot v9.4s, v24.16b, v0.4b[0]\n"
".inst 0x4f81e30d // sdot v13.4s, v24.16b, v1.4b[0]\n"
".inst 0x4f82e311 // sdot v17.4s, v24.16b, v2.4b[0]\n"
".inst 0x4f83e315 // sdot v21.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x16, #0x30]\n"
+ "ldr q24, [x17, #0x30]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
@@ -1810,18 +1810,18 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 116b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
"bge 135f\n"
"tbz x8, #3, 130f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -1829,96 +1829,96 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v21.4s }, [x22], #0x10\n"
"tbz x8, #2, 128f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"tbz x8, #1, 127f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
"tbz x8, #0, 134f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
"tbz x8, #1, 129f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
"tbz x8, #0, 134f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
"tbz x8, #2, 132f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"tbz x8, #1, 131f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
"tbz x8, #0, 134f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
"tbz x8, #1, 133f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -1937,43 +1937,43 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 206f\n"
"137:" // Height 5
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
+ "cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x8, #0x10\n"
"add x21, x22, x20, LSL #2\n"
"bge 147f\n"
"tbz x8, #3, 142f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
"ld1 { v25.4s }, [x21], #0x10\n"
"tbz x8, #2, 140f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"ld1 { v26.4s }, [x21], #0x10\n"
"tbz x8, #1, 139f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
"ldr d23, [x22], #0x8\n"
"ldr d27, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
@@ -1982,7 +1982,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"139:" // Height 5: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 146f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
@@ -1990,14 +1990,14 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 146f\n"
"140:" // Height 5: Partial accumulate: partial_2_8
"tbz x8, #1, 141f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
"ldr d22, [x22], #0x8\n"
"ldr d26, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
@@ -2006,7 +2006,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"141:" // Height 5: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 146f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
@@ -2014,20 +2014,20 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 146f\n"
"142:" // Height 5: Partial accumulate: partial_4_0
"tbz x8, #2, 144f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
"tbz x8, #1, 143f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
"ldr d21, [x22], #0x8\n"
"ldr d25, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
@@ -2036,7 +2036,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"143:" // Height 5: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 146f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
@@ -2044,34 +2044,34 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 146f\n"
"144:" // Height 5: Partial accumulate: partial_2_0
"tbz x8, #1, 145f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
"ldr d20, [x22], #0x8\n"
"ldr d24, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
"ld1 { v24.s }[2], [x21]\n"
"b 146f\n"
"145:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"ldr s20, [x22, #0x0]\n"
"ldr s24, [x21, #0x0]\n"
"146:" // Height 5: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 149f\n"
"147:" // Height 5: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -2114,8 +2114,8 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"mov x15, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2147,148 +2147,148 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr q2, [x11, #0x0]\n"
"ldr q3, [x10, #0x0]\n"
"ldr q4, [x9, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 154f\n"
"153:" // Height 5: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x20, [x17, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr d29, [x16, #0x20]\n"
+ "ldr d29, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v29.d[1], x21\n"
+ "add x11, x11, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x11, x11, #0x10\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr d28, [x16, #0x30]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x30]\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr x26, [x13, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n"
- "add x9, x9, #0x10\n"
".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n"
- "ldr x26, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr d29, [x16, #0x40]\n"
+ "ldr d29, [x17, #0x40]\n"
".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n"
- "mov v29.d[1], x21\n"
+ "ldr x25, [x12, #0x8]\n"
".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x24, [x11, #0x8]\n"
".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n"
- "ldr x25, [x12, #0x8]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n"
- "ldr x24, [x11, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr d28, [x16, #0x50]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x50]\n"
".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n"
+ "ldr x23, [x10, #0x8]\n"
".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x22, [x9, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n"
- "ldr x23, [x10, #0x8]\n"
".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n"
- "ldr x22, [x9, #0x8]\n"
+ "ldr x20, [x17, #0x78]\n"
".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr d29, [x16, #0x60]\n"
+ "ldr d29, [x17, #0x60]\n"
".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n"
- "mov v29.d[1], x21\n"
+ "sub x14, x14, #0x10\n"
".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "cmp x14, #0x20\n"
".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
+ "mov v29.d[1], x21\n"
".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "ldr x21, [x17, #0x88]\n"
".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr d28, [x16, #0x70]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x70]\n"
".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr d29, [x16, #0x80]\n"
+ "ldr d29, [x17, #0x80]\n"
".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n"
- "mov v29.d[1], x21\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "ldr x21, [x17, #0xa8]\n"
".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr d28, [x16, #0x90]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x90]\n"
".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr d29, [x16, #0xa0]\n"
+ "ldr d29, [x17, #0xa0]\n"
".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr d28, [x16, #0xb0]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0xb0]\n"
".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr d29, [x16, #0xc0]\n"
+ "ldr d29, [x17, #0xc0]\n"
".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr d28, [x16, #0xd0]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0xd0]\n"
".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr d29, [x16, #0xe0]\n"
+ "ldr d29, [x17, #0xe0]\n"
".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n"
- "mov v29.d[1], x21\n"
".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n"
+ "mov v29.d[1], x21\n"
".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr d28, [x16, #0xf0]\n"
- "mov v28.d[1], x20\n"
- "add x16, x16, #0x100\n"
+ "ldr d28, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0x8]\n"
".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb6 // sdot v22.4s, v29.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0x18]\n"
".inst 0x4fa4ebba // sdot v26.4s, v29.16b, v4.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x4fa0eb8b // sdot v11.4s, v28.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x4fa1eb8f // sdot v15.4s, v28.16b, v1.4b[3]\n"
@@ -2299,7 +2299,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr d3, [x10, #0x0]\n"
".inst 0x4fa4eb9b // sdot v27.4s, v28.16b, v4.4b[3]\n"
"ldr d4, [x9, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
+ "ldr d7, [x17, #0x10]\n"
"mov v6.d[1], x21\n"
"mov v0.d[1], x26\n"
"mov v1.d[1], x25\n"
@@ -2318,7 +2318,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
"add x10, x10, #0x10\n"
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q29, [x16, #0x20]\n"
+ "ldr q29, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x9, x9, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -2328,7 +2328,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q28, [x16, #0x30]\n"
+ "ldr q28, [x17, #0x30]\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
@@ -2337,75 +2337,75 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n"
".inst 0x4f84e3ba // sdot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x16, #0x40]\n"
+ "ldr q29, [x17, #0x40]\n"
".inst 0x4f80e38b // sdot v11.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f81e38f // sdot v15.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f82e393 // sdot v19.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f83e397 // sdot v23.4s, v28.16b, v3.4b[0]\n"
".inst 0x4f84e39b // sdot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x16, #0x50]\n"
+ "ldr q28, [x17, #0x50]\n"
".inst 0x4fa0e3a8 // sdot v8.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ac // sdot v12.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b0 // sdot v16.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b4 // sdot v20.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3b8 // sdot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x16, #0x60]\n"
+ "ldr q29, [x17, #0x60]\n"
".inst 0x4fa0e389 // sdot v9.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38d // sdot v13.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e391 // sdot v17.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e395 // sdot v21.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e399 // sdot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x16, #0x70]\n"
+ "ldr q28, [x17, #0x70]\n"
".inst 0x4fa0e3aa // sdot v10.4s, v29.16b, v0.4b[1]\n"
".inst 0x4fa1e3ae // sdot v14.4s, v29.16b, v1.4b[1]\n"
".inst 0x4fa2e3b2 // sdot v18.4s, v29.16b, v2.4b[1]\n"
".inst 0x4fa3e3b6 // sdot v22.4s, v29.16b, v3.4b[1]\n"
".inst 0x4fa4e3ba // sdot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x16, #0x80]\n"
+ "ldr q29, [x17, #0x80]\n"
".inst 0x4fa0e38b // sdot v11.4s, v28.16b, v0.4b[1]\n"
".inst 0x4fa1e38f // sdot v15.4s, v28.16b, v1.4b[1]\n"
".inst 0x4fa2e393 // sdot v19.4s, v28.16b, v2.4b[1]\n"
".inst 0x4fa3e397 // sdot v23.4s, v28.16b, v3.4b[1]\n"
".inst 0x4fa4e39b // sdot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x16, #0x90]\n"
+ "ldr q28, [x17, #0x90]\n"
".inst 0x4f80eba8 // sdot v8.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebac // sdot v12.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb0 // sdot v16.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb4 // sdot v20.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebb8 // sdot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x16, #0xa0]\n"
+ "ldr q29, [x17, #0xa0]\n"
".inst 0x4f80eb89 // sdot v9.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8d // sdot v13.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb91 // sdot v17.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb95 // sdot v21.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb99 // sdot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x16, #0xb0]\n"
+ "ldr q28, [x17, #0xb0]\n"
".inst 0x4f80ebaa // sdot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x4f81ebae // sdot v14.4s, v29.16b, v1.4b[2]\n"
".inst 0x4f82ebb2 // sdot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x4f83ebb6 // sdot v22.4s, v29.16b, v3.4b[2]\n"
".inst 0x4f84ebba // sdot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x16, #0xc0]\n"
+ "ldr q29, [x17, #0xc0]\n"
".inst 0x4f80eb8b // sdot v11.4s, v28.16b, v0.4b[2]\n"
".inst 0x4f81eb8f // sdot v15.4s, v28.16b, v1.4b[2]\n"
".inst 0x4f82eb93 // sdot v19.4s, v28.16b, v2.4b[2]\n"
".inst 0x4f83eb97 // sdot v23.4s, v28.16b, v3.4b[2]\n"
".inst 0x4f84eb9b // sdot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x16, #0xd0]\n"
+ "ldr q28, [x17, #0xd0]\n"
".inst 0x4fa0eba8 // sdot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x4fa1ebac // sdot v12.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb0 // sdot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb4 // sdot v20.4s, v29.16b, v3.4b[3]\n"
".inst 0x4fa4ebb8 // sdot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr q29, [x16, #0xe0]\n"
+ "ldr q29, [x17, #0xe0]\n"
".inst 0x4fa0eb89 // sdot v9.4s, v28.16b, v0.4b[3]\n"
".inst 0x4fa1eb8d // sdot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x4fa2eb91 // sdot v17.4s, v28.16b, v2.4b[3]\n"
".inst 0x4fa3eb95 // sdot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x4fa4eb99 // sdot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr q28, [x16, #0xf0]\n"
+ "ldr q28, [x17, #0xf0]\n"
".inst 0x4fa0ebaa // sdot v10.4s, v29.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa1ebae // sdot v14.4s, v29.16b, v1.4b[3]\n"
".inst 0x4fa2ebb2 // sdot v18.4s, v29.16b, v2.4b[3]\n"
".inst 0x4fa3ebb6 // sdot v22.4s, v29.16b, v3.4b[3]\n"
@@ -2427,22 +2427,22 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr s0, [x11], #0x4\n"
"ldr s31, [x10], #0x4\n"
"ldr s30, [x9], #0x4\n"
- "ldr q29, [x16, #0x0]\n"
+ "ldr q29, [x17, #0x0]\n"
+ "ldr q28, [x17, #0x10]\n"
".inst 0x4f82e3a8 // sdot v8.4s, v29.16b, v2.4b[0]\n"
- "ldr q28, [x16, #0x10]\n"
".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f80e3b0 // sdot v16.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f9fe3b4 // sdot v20.4s, v29.16b, v31.4b[0]\n"
".inst 0x4f9ee3b8 // sdot v24.4s, v29.16b, v30.4b[0]\n"
- "ldr q29, [x16, #0x20]\n"
+ "ldr q29, [x17, #0x20]\n"
".inst 0x4f82e389 // sdot v9.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f80e391 // sdot v17.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f9fe395 // sdot v21.4s, v28.16b, v31.4b[0]\n"
".inst 0x4f9ee399 // sdot v25.4s, v28.16b, v30.4b[0]\n"
- "ldr q28, [x16, #0x30]\n"
+ "ldr q28, [x17, #0x30]\n"
".inst 0x4f82e3aa // sdot v10.4s, v29.16b, v2.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f80e3b2 // sdot v18.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f9fe3b6 // sdot v22.4s, v29.16b, v31.4b[0]\n"
@@ -2475,22 +2475,22 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr b3, [x10, #0x0]\n"
"ldr b4, [x9, #0x0]\n"
"159:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q29, [x16, #0x0]\n"
+ "ldr q29, [x17, #0x0]\n"
+ "ldr q28, [x17, #0x10]\n"
".inst 0x4f80e3a8 // sdot v8.4s, v29.16b, v0.4b[0]\n"
- "ldr q28, [x16, #0x10]\n"
".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f82e3b0 // sdot v16.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f83e3b4 // sdot v20.4s, v29.16b, v3.4b[0]\n"
".inst 0x4f84e3b8 // sdot v24.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x16, #0x20]\n"
+ "ldr q29, [x17, #0x20]\n"
".inst 0x4f80e389 // sdot v9.4s, v28.16b, v0.4b[0]\n"
".inst 0x4f81e38d // sdot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x4f82e391 // sdot v17.4s, v28.16b, v2.4b[0]\n"
".inst 0x4f83e395 // sdot v21.4s, v28.16b, v3.4b[0]\n"
".inst 0x4f84e399 // sdot v25.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x16, #0x30]\n"
+ "ldr q28, [x17, #0x30]\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x4f82e3b2 // sdot v18.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f83e3b6 // sdot v22.4s, v29.16b, v3.4b[0]\n"
@@ -2506,20 +2506,20 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 150b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
"prfm pstl1keep, [x21, #0x0]\n"
"bge 169f\n"
"tbz x8, #3, 164f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -2529,19 +2529,19 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"st1 { v24.4s }, [x21], #0x10\n"
"st1 { v25.4s }, [x21], #0x10\n"
"tbz x8, #2, 162f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"st1 { v26.4s }, [x21], #0x10\n"
"tbz x8, #1, 161f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"str d27, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
@@ -2549,7 +2549,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
"tbz x8, #0, 168f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
@@ -2557,13 +2557,13 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
"tbz x8, #1, 163f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"str d26, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
@@ -2571,7 +2571,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
"tbz x8, #0, 168f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
@@ -2579,19 +2579,19 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
"tbz x8, #2, 166f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v24.4s }, [x21], #0x10\n"
"tbz x8, #1, 165f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"str d25, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
@@ -2599,7 +2599,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
"tbz x8, #0, 168f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
@@ -2607,20 +2607,20 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
"tbz x8, #1, 167f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"str d24, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
"st1 { v24.s }[2], [x21]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
@@ -2628,11 +2628,11 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -2656,42 +2656,43 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"171:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0x18\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x16\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
+ "cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "cmp x8, #0x10\n"
"add x20, x21, x20, LSL #2\n"
"bge 181f\n"
"tbz x8, #3, 176f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
"ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
"ld1 { v25.4s }, [x21], #0x10\n"
"ld1 { v29.4s }, [x20], #0x10\n"
"tbz x8, #2, 174f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"ld1 { v26.4s }, [x21], #0x10\n"
"ld1 { v30.4s }, [x20], #0x10\n"
"tbz x8, #1, 173f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
@@ -2699,7 +2700,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr d27, [x21], #0x8\n"
"ldr d31, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
@@ -2709,7 +2710,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"173:" // Height 6: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 180f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
@@ -2718,7 +2719,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 180f\n"
"174:" // Height 6: Partial accumulate: partial_2_8
"tbz x8, #1, 175f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
@@ -2726,7 +2727,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr d26, [x21], #0x8\n"
"ldr d30, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
@@ -2736,7 +2737,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"175:" // Height 6: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 180f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
@@ -2745,14 +2746,14 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 180f\n"
"176:" // Height 6: Partial accumulate: partial_4_0
"tbz x8, #2, 178f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
"ld1 { v28.4s }, [x20], #0x10\n"
"tbz x8, #1, 177f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
@@ -2760,7 +2761,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr d25, [x21], #0x8\n"
"ldr d29, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
@@ -2770,7 +2771,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"177:" // Height 6: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 180f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
@@ -2779,7 +2780,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 180f\n"
"178:" // Height 6: Partial accumulate: partial_2_0
"tbz x8, #1, 179f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
@@ -2787,7 +2788,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr d24, [x21], #0x8\n"
"ldr d28, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
@@ -2795,7 +2796,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ld1 { v28.s }[2], [x20]\n"
"b 180f\n"
"179:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
@@ -2803,13 +2804,13 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr s24, [x21, #0x0]\n"
"ldr s28, [x20, #0x0]\n"
"180:" // Height 6: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 183f\n"
"181:" // Height 6: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -2860,8 +2861,8 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"mov x15, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2897,14 +2898,14 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr q3, [x10, #0x0]\n"
"ldr q4, [x9, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 188f\n"
"187:" // Height 6: Multiply loop: Main loop head
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x4f81e0cc // sdot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x20, [x17, #0x38]\n"
".inst 0x4f82e0d0 // sdot v16.4s, v6.16b, v2.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x4f83e0d4 // sdot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2912,151 +2913,151 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x16, #0x20]\n"
+ "ldr d6, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x21\n"
+ "add x10, x10, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x9, x9, #0x10\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
"add x28, x28, #0x10\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x27, [x13, #0x8]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr x26, [x12, #0x8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr x27, [x13, #0x8]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x26, [x12, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
"ldr x25, [x11, #0x8]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x16, #0x40]\n"
+ "ldr d6, [x17, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x21\n"
+ "ldr x24, [x10, #0x8]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x23, [x9, #0x8]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x24, [x10, #0x8]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr x23, [x9, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
"ldr x22, [x28, #0x8]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
+ "sub x14, x14, #0x10\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "cmp x14, #0x20\n"
+ "mov v7.d[1], x20\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "ldr x20, [x17, #0x78]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr d6, [x16, #0x60]\n"
+ "ldr d6, [x17, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x21\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr x21, [x17, #0x88]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr d6, [x16, #0x80]\n"
+ "ldr d6, [x17, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x21\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
+ "ldr x21, [x17, #0xa8]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr d6, [x16, #0xa0]\n"
+ "ldr d6, [x17, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x21\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr d6, [x16, #0xc0]\n"
+ "ldr d6, [x17, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x21\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr d6, [x16, #0xe0]\n"
+ "ldr d6, [x17, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x21\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x21\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x20\n"
- "add x16, x16, #0x100\n"
+ "ldr d7, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0x8]\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0x18]\n"
".inst 0x4fa4e8da // sdot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8de // sdot v30.4s, v6.16b, v5.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x4fa0e8eb // sdot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x4fa1e8ef // sdot v15.4s, v7.16b, v1.4b[3]\n"
@@ -3069,7 +3070,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr d4, [x9, #0x0]\n"
".inst 0x4fa5e8ff // sdot v31.4s, v7.16b, v5.4b[3]\n"
"ldr d5, [x28, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
+ "ldr d7, [x17, #0x10]\n"
"mov v6.d[1], x21\n"
"mov v0.d[1], x27\n"
"mov v1.d[1], x26\n"
@@ -3091,7 +3092,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f84e0d8 // sdot v24.4s, v6.16b, v4.4b[0]\n"
"add x9, x9, #0x10\n"
".inst 0x4f85e0dc // sdot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x20]\n"
+ "ldr q6, [x17, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"add x28, x28, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
@@ -3103,7 +3104,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x16, #0x30]\n"
+ "ldr q7, [x17, #0x30]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
@@ -3113,86 +3114,86 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0da // sdot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0de // sdot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x40]\n"
+ "ldr q6, [x17, #0x40]\n"
".inst 0x4f80e0eb // sdot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ef // sdot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f3 // sdot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f7 // sdot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0fb // sdot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0ff // sdot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x16, #0x50]\n"
+ "ldr q7, [x17, #0x50]\n"
".inst 0x4fa0e0c8 // sdot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0cc // sdot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d0 // sdot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d4 // sdot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0d8 // sdot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0dc // sdot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x16, #0x60]\n"
+ "ldr q6, [x17, #0x60]\n"
".inst 0x4fa0e0e9 // sdot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ed // sdot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f1 // sdot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f5 // sdot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0f9 // sdot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0fd // sdot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x16, #0x70]\n"
+ "ldr q7, [x17, #0x70]\n"
".inst 0x4fa0e0ca // sdot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x4fa1e0ce // sdot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x4fa2e0d2 // sdot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x4fa3e0d6 // sdot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x4fa4e0da // sdot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x4fa5e0de // sdot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x16, #0x80]\n"
+ "ldr q6, [x17, #0x80]\n"
".inst 0x4fa0e0eb // sdot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x4fa1e0ef // sdot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x4fa2e0f3 // sdot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x4fa3e0f7 // sdot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x4fa4e0fb // sdot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x4fa5e0ff // sdot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x16, #0x90]\n"
+ "ldr q7, [x17, #0x90]\n"
".inst 0x4f80e8c8 // sdot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8cc // sdot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d0 // sdot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d4 // sdot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8d8 // sdot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8dc // sdot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x16, #0xa0]\n"
+ "ldr q6, [x17, #0xa0]\n"
".inst 0x4f80e8e9 // sdot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ed // sdot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f1 // sdot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f5 // sdot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8f9 // sdot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8fd // sdot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x16, #0xb0]\n"
+ "ldr q7, [x17, #0xb0]\n"
".inst 0x4f80e8ca // sdot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x4f81e8ce // sdot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x4f82e8d2 // sdot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x4f83e8d6 // sdot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x4f84e8da // sdot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x4f85e8de // sdot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x16, #0xc0]\n"
+ "ldr q6, [x17, #0xc0]\n"
".inst 0x4f80e8eb // sdot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x4f81e8ef // sdot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x4f82e8f3 // sdot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x4f83e8f7 // sdot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x4f84e8fb // sdot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x4f85e8ff // sdot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x16, #0xd0]\n"
+ "ldr q7, [x17, #0xd0]\n"
".inst 0x4fa0e8c8 // sdot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x4fa1e8cc // sdot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d0 // sdot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d4 // sdot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x4fa4e8d8 // sdot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x4fa5e8dc // sdot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x16, #0xe0]\n"
+ "ldr q6, [x17, #0xe0]\n"
".inst 0x4fa0e8e9 // sdot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x4fa1e8ed // sdot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x4fa2e8f1 // sdot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x4fa3e8f5 // sdot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x4fa4e8f9 // sdot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x4fa5e8fd // sdot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x16, #0xf0]\n"
+ "ldr q7, [x17, #0xf0]\n"
".inst 0x4fa0e8ca // sdot v10.4s, v6.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x4fa1e8ce // sdot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x4fa2e8d2 // sdot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x4fa3e8d6 // sdot v22.4s, v6.16b, v3.4b[3]\n"
@@ -3217,24 +3218,24 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr s4, [x10], #0x4\n"
"ldr s3, [x9], #0x4\n"
"ldr s2, [x28], #0x4\n"
- "ldr q1, [x16, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
+ "ldr q0, [x17, #0x10]\n"
".inst 0x4f87e028 // sdot v8.4s, v1.16b, v7.4b[0]\n"
- "ldr q0, [x16, #0x10]\n"
".inst 0x4f86e02c // sdot v12.4s, v1.16b, v6.4b[0]\n"
".inst 0x4f85e030 // sdot v16.4s, v1.16b, v5.4b[0]\n"
".inst 0x4f84e034 // sdot v20.4s, v1.16b, v4.4b[0]\n"
".inst 0x4f83e038 // sdot v24.4s, v1.16b, v3.4b[0]\n"
".inst 0x4f82e03c // sdot v28.4s, v1.16b, v2.4b[0]\n"
- "ldr q1, [x16, #0x20]\n"
+ "ldr q1, [x17, #0x20]\n"
".inst 0x4f87e009 // sdot v9.4s, v0.16b, v7.4b[0]\n"
".inst 0x4f86e00d // sdot v13.4s, v0.16b, v6.4b[0]\n"
".inst 0x4f85e011 // sdot v17.4s, v0.16b, v5.4b[0]\n"
".inst 0x4f84e015 // sdot v21.4s, v0.16b, v4.4b[0]\n"
".inst 0x4f83e019 // sdot v25.4s, v0.16b, v3.4b[0]\n"
".inst 0x4f82e01d // sdot v29.4s, v0.16b, v2.4b[0]\n"
- "ldr q0, [x16, #0x30]\n"
+ "ldr q0, [x17, #0x30]\n"
".inst 0x4f87e02a // sdot v10.4s, v1.16b, v7.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f86e02e // sdot v14.4s, v1.16b, v6.4b[0]\n"
".inst 0x4f85e032 // sdot v18.4s, v1.16b, v5.4b[0]\n"
".inst 0x4f84e036 // sdot v22.4s, v1.16b, v4.4b[0]\n"
@@ -3272,24 +3273,24 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"ldr b4, [x9, #0x0]\n"
"ldr b5, [x28, #0x0]\n"
"193:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x16, #0x0]\n"
+ "ldr q7, [x17, #0x0]\n"
+ "ldr q6, [x17, #0x10]\n"
".inst 0x4f80e0e8 // sdot v8.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x16, #0x10]\n"
".inst 0x4f81e0ec // sdot v12.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f0 // sdot v16.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f4 // sdot v20.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f8 // sdot v24.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fc // sdot v28.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x16, #0x20]\n"
+ "ldr q7, [x17, #0x20]\n"
".inst 0x4f80e0c9 // sdot v9.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0cd // sdot v13.4s, v6.16b, v1.4b[0]\n"
".inst 0x4f82e0d1 // sdot v17.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d5 // sdot v21.4s, v6.16b, v3.4b[0]\n"
".inst 0x4f84e0d9 // sdot v25.4s, v6.16b, v4.4b[0]\n"
".inst 0x4f85e0dd // sdot v29.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x30]\n"
+ "ldr q6, [x17, #0x30]\n"
".inst 0x4f80e0ea // sdot v10.4s, v7.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x4f81e0ee // sdot v14.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f2 // sdot v18.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f6 // sdot v22.4s, v7.16b, v3.4b[0]\n"
@@ -3307,22 +3308,22 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 184b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x20, x21, x20, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
"prfm pstl1keep, [x20, #0x0]\n"
"bge 203f\n"
"tbz x8, #3, 198f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -3334,21 +3335,21 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"st1 { v28.4s }, [x20], #0x10\n"
"st1 { v29.4s }, [x20], #0x10\n"
"tbz x8, #2, 196f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"st1 { v26.4s }, [x21], #0x10\n"
"st1 { v30.4s }, [x20], #0x10\n"
"tbz x8, #1, 195f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"str d27, [x21], #0x8\n"
"str d31, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
@@ -3357,7 +3358,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
"tbz x8, #0, 202f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
@@ -3366,14 +3367,14 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
"tbz x8, #1, 197f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"str d26, [x21], #0x8\n"
"str d30, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
@@ -3382,7 +3383,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
"tbz x8, #0, 202f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
@@ -3391,21 +3392,21 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
"tbz x8, #2, 200f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v24.4s }, [x21], #0x10\n"
"st1 { v28.4s }, [x20], #0x10\n"
"tbz x8, #1, 199f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"str d25, [x21], #0x8\n"
"str d29, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
@@ -3414,7 +3415,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
"tbz x8, #0, 202f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
@@ -3423,14 +3424,14 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
"tbz x8, #1, 201f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"str d24, [x21], #0x8\n"
"str d28, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
@@ -3438,7 +3439,7 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"st1 { v28.s }[2], [x20]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
@@ -3447,11 +3448,11 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -3487,8 +3488,8 @@ void a64_hybrid_s8s32_dot_6x16_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp
index 452d647bb4..0950d7d950 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void a64_hybrid_s8s32_dot_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -88,7 +88,7 @@ void a64_hybrid_s8s32_dot_6x16 (
"beq 35f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
"cmp x11, #0x10\n"
@@ -163,8 +163,8 @@ void a64_hybrid_s8s32_dot_6x16 (
"mov x28, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -188,6 +188,10 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr q17, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
@@ -212,22 +216,21 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr q17, [x10, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x20\n"
- "add x10, x10, #0x100\n"
- "ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x4f80e0c8 // sdot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
@@ -252,29 +255,26 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr q17, [x10, #0xe0]\n"
".inst 0x4fa0ea09 // sdot v9.4s, v16.16b, v0.4b[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x4fa0ea2a // sdot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x4fa0ea0b // sdot v11.4s, v16.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"19:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 24f\n"
"cmp x27, #0x4\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x26], #0x4\n"
- "ldr q16, [x10, #0x0]\n"
- ".inst 0x4f92e208 // sdot v8.4s, v16.16b, v18.4b[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"sub x27, x27, #0x4\n"
"ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x4\n"
+ ".inst 0x4f92e228 // sdot v8.4s, v17.16b, v18.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x4f92e209 // sdot v9.4s, v16.16b, v18.4b[0]\n"
- "cmp x27, #0x4\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f92e22a // sdot v10.4s, v17.16b, v18.4b[0]\n"
".inst 0x4f92e20b // sdot v11.4s, v16.16b, v18.4b[0]\n"
- "add x10, x10, #0x40\n"
"bge 20b\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 24f\n"
@@ -289,12 +289,12 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr q17, [x10, #0x0]\n"
"ldr q16, [x10, #0x10]\n"
".inst 0x4f80e228 // sdot v8.4s, v17.16b, v0.4b[0]\n"
- ".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
- "add x10, x10, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -362,7 +362,7 @@ void a64_hybrid_s8s32_dot_6x16 (
"35:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -463,8 +463,8 @@ void a64_hybrid_s8s32_dot_6x16 (
"mov x28, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -496,22 +496,22 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x50]\n"
- "cmp x27, #0x20\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
"ldr q16, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa0e22a // sdot v10.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22e // sdot v14.4s, v17.16b, v1.4b[1]\n"
"ldr q17, [x10, #0x80]\n"
@@ -555,18 +555,18 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
- "sub x27, x27, #0x10\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4fa0e228 // sdot v8.4s, v17.16b, v0.4b[1]\n"
".inst 0x4fa1e22c // sdot v12.4s, v17.16b, v1.4b[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4fa0e209 // sdot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x4fa1e20d // sdot v13.4s, v16.16b, v1.4b[1]\n"
"ldr q16, [x10, #0x70]\n"
@@ -607,18 +607,18 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr s19, [x26], #0x4\n"
"ldr s18, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr q17, [x10, #0x0]\n"
"ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x4\n"
".inst 0x4f93e228 // sdot v8.4s, v17.16b, v19.4b[0]\n"
".inst 0x4f92e22c // sdot v12.4s, v17.16b, v18.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x4f93e209 // sdot v9.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20d // sdot v13.4s, v16.16b, v18.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f93e22a // sdot v10.4s, v17.16b, v19.4b[0]\n"
".inst 0x4f92e22e // sdot v14.4s, v17.16b, v18.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f93e20b // sdot v11.4s, v16.16b, v19.4b[0]\n"
".inst 0x4f92e20f // sdot v15.4s, v16.16b, v18.4b[0]\n"
"bge 54b\n"
@@ -643,9 +643,9 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f80e209 // sdot v9.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20d // sdot v13.4s, v16.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x4f80e22a // sdot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x4f81e22e // sdot v14.4s, v17.16b, v1.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x4f80e20b // sdot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x4f81e20f // sdot v15.4s, v16.16b, v1.4b[0]\n"
"58:" // Height 2: Multiply loop: No odd multiplies
@@ -654,9 +654,9 @@ void a64_hybrid_s8s32_dot_6x16 (
"cmp x28, x20\n"
"bne 48b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"bge 67f\n"
"tbz x11, #3, 62f\n"
@@ -738,12 +738,12 @@ void a64_hybrid_s8s32_dot_6x16 (
"69:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"bge 79f\n"
"tbz x11, #3, 74f\n"
@@ -864,8 +864,8 @@ void a64_hybrid_s8s32_dot_6x16 (
"mov x28, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -905,18 +905,18 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x30]\n"
"add x24, x24, #0x10\n"
- ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
"cmp x27, #0x20\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
"ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4fa0e2a8 // sdot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x4fa1e2ac // sdot v12.4s, v21.16b, v1.4b[1]\n"
".inst 0x4fa2e2b0 // sdot v16.4s, v21.16b, v2.4b[1]\n"
@@ -983,14 +983,14 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x30]\n"
"sub x27, x27, #0x10\n"
- ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f80e2aa // sdot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f81e2ae // sdot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x4f82e2b2 // sdot v18.4s, v21.16b, v2.4b[0]\n"
"ldr q21, [x10, #0x40]\n"
- ".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f80e28b // sdot v11.4s, v20.16b, v0.4b[0]\n"
".inst 0x4f81e28f // sdot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x4f82e293 // sdot v19.4s, v20.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x50]\n"
@@ -1049,12 +1049,12 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr s24, [x26], #0x4\n"
"ldr s23, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s22, [x24], #0x4\n"
"ldr q21, [x10, #0x0]\n"
+ "cmp x27, #0x4\n"
+ "ldr q20, [x10, #0x10]\n"
".inst 0x4f98e2a8 // sdot v8.4s, v21.16b, v24.4b[0]\n"
".inst 0x4f97e2ac // sdot v12.4s, v21.16b, v23.4b[0]\n"
- "ldr q20, [x10, #0x10]\n"
".inst 0x4f96e2b0 // sdot v16.4s, v21.16b, v22.4b[0]\n"
"ldr q21, [x10, #0x20]\n"
".inst 0x4f98e289 // sdot v9.4s, v20.16b, v24.4b[0]\n"
@@ -1108,11 +1108,11 @@ void a64_hybrid_s8s32_dot_6x16 (
"cmp x28, x20\n"
"bne 82b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"bge 101f\n"
"tbz x11, #3, 96f\n"
@@ -1214,13 +1214,13 @@ void a64_hybrid_s8s32_dot_6x16 (
"103:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x22, x23, x20, LSL #2\n"
"bge 113f\n"
"tbz x11, #3, 108f\n"
@@ -1365,8 +1365,8 @@ void a64_hybrid_s8s32_dot_6x16 (
"mov x28, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1506,14 +1506,14 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"add x23, x23, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
"ldr q24, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f80e32a // sdot v10.4s, v25.16b, v0.4b[0]\n"
".inst 0x4f81e32e // sdot v14.4s, v25.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f82e332 // sdot v18.4s, v25.16b, v2.4b[0]\n"
".inst 0x4f83e336 // sdot v22.4s, v25.16b, v3.4b[0]\n"
@@ -1591,9 +1591,9 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr s29, [x26], #0x4\n"
"ldr s28, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s27, [x24], #0x4\n"
"ldr s26, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr q25, [x10, #0x0]\n"
"ldr q24, [x10, #0x10]\n"
".inst 0x4f9de328 // sdot v8.4s, v25.16b, v29.4b[0]\n"
@@ -1662,13 +1662,13 @@ void a64_hybrid_s8s32_dot_6x16 (
"cmp x28, x20\n"
"bne 116b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
"bge 135f\n"
"tbz x11, #3, 130f\n"
@@ -1790,14 +1790,14 @@ void a64_hybrid_s8s32_dot_6x16 (
"137:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x21, x22, x20, LSL #2\n"
"bge 147f\n"
"tbz x11, #3, 142f\n"
@@ -1966,8 +1966,8 @@ void a64_hybrid_s8s32_dot_6x16 (
"mov x28, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2133,12 +2133,12 @@ void a64_hybrid_s8s32_dot_6x16 (
"add x22, x22, #0x10\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
- "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
"ldr q28, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
".inst 0x4f80e3aa // sdot v10.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f81e3ae // sdot v14.4s, v29.16b, v1.4b[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
@@ -2233,14 +2233,14 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr s2, [x26], #0x4\n"
"ldr s1, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s0, [x24], #0x4\n"
"ldr s31, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr s30, [x22], #0x4\n"
"ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
".inst 0x4f82e3a8 // sdot v8.4s, v29.16b, v2.4b[0]\n"
".inst 0x4f81e3ac // sdot v12.4s, v29.16b, v1.4b[0]\n"
- "ldr q28, [x10, #0x10]\n"
".inst 0x4f80e3b0 // sdot v16.4s, v29.16b, v0.4b[0]\n"
".inst 0x4f9fe3b4 // sdot v20.4s, v29.16b, v31.4b[0]\n"
".inst 0x4f9ee3b8 // sdot v24.4s, v29.16b, v30.4b[0]\n"
@@ -2316,15 +2316,15 @@ void a64_hybrid_s8s32_dot_6x16 (
"cmp x28, x20\n"
"bne 150b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "cmp x11, #0x10\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
"bge 169f\n"
"tbz x11, #3, 164f\n"
@@ -2465,19 +2465,20 @@ void a64_hybrid_s8s32_dot_6x16 (
"b 206f\n"
"171:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x20, x21, x20, LSL #2\n"
"bge 181f\n"
"tbz x11, #3, 176f\n"
@@ -2670,8 +2671,8 @@ void a64_hybrid_s8s32_dot_6x16 (
"mov x28, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2861,18 +2862,18 @@ void a64_hybrid_s8s32_dot_6x16 (
".inst 0x4f80e0e9 // sdot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x4f81e0ed // sdot v13.4s, v7.16b, v1.4b[0]\n"
"add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4f82e0f1 // sdot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x4f83e0f5 // sdot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4f84e0f9 // sdot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x4f85e0fd // sdot v29.4s, v7.16b, v5.4b[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4f80e0ca // sdot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x4f81e0ce // sdot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4f82e0d2 // sdot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x4f83e0d6 // sdot v22.4s, v6.16b, v3.4b[0]\n"
@@ -2978,9 +2979,9 @@ void a64_hybrid_s8s32_dot_6x16 (
"ldr s7, [x26], #0x4\n"
"ldr s6, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s5, [x24], #0x4\n"
"ldr s4, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr s3, [x22], #0x4\n"
"ldr s2, [x21], #0x4\n"
"ldr q1, [x10, #0x0]\n"
@@ -3073,16 +3074,16 @@ void a64_hybrid_s8s32_dot_6x16 (
"cmp x28, x20\n"
"bne 184b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"add x24, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"add x22, x23, x20, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"prfm pstl1keep, [x21, #0x0]\n"
"prfm pstl1keep, [x20, #0x0]\n"
"bge 203f\n"
@@ -3253,8 +3254,8 @@ void a64_hybrid_s8s32_dot_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16.hpp
index 4905ba5656..c5170553d8 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
index f8a76b5244..c6e982e20d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void a64_hybrid_s8s32_mmla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -88,7 +88,7 @@ void a64_hybrid_s8s32_mmla_6x16 (
"beq 38f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 13f\n"
"cmp x11, #0x10\n"
@@ -176,8 +176,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"mov x28, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -197,7 +197,12 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q6, [x10, #0x10]\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"trn1 v19.2d, v1.2d, v20.2d\n"
+ "trn2 v1.2d, v1.2d, v20.2d\n"
".inst 0x4e87a668 // smmla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x4e86a66c // smmla v12.4s, v19.16b, v6.16b\n"
@@ -210,7 +215,6 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v20.2d\n"
".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
@@ -227,39 +231,38 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x20\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
"ldr q1, [x26, #0x0]\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
- "trn1 v20.2d, v1.2d, v21.2d\n"
- ".inst 0x4e87a688 // smmla v8.4s, v20.16b, v7.16b\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "trn1 v19.2d, v1.2d, v17.2d\n"
+ "trn2 v1.2d, v1.2d, v17.2d\n"
+ ".inst 0x4e87a668 // smmla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
- ".inst 0x4e86a68c // smmla v12.4s, v20.16b, v6.16b\n"
+ ".inst 0x4e86a66c // smmla v12.4s, v19.16b, v6.16b\n"
"ldr q17, [x10, #0x30]\n"
- ".inst 0x4e92a689 // smmla v9.4s, v20.16b, v18.16b\n"
+ ".inst 0x4e92a669 // smmla v9.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x40]\n"
- ".inst 0x4e91a68d // smmla v13.4s, v20.16b, v17.16b\n"
+ ".inst 0x4e91a66d // smmla v13.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x50]\n"
- ".inst 0x4e92a68a // smmla v10.4s, v20.16b, v18.16b\n"
- "ldr q19, [x10, #0x60]\n"
- ".inst 0x4e91a68e // smmla v14.4s, v20.16b, v17.16b\n"
+ ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q20, [x10, #0x60]\n"
+ ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
"ldr q18, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v21.2d\n"
- ".inst 0x4e93a68b // smmla v11.4s, v20.16b, v19.16b\n"
+ ".inst 0x4e94a66b // smmla v11.4s, v19.16b, v20.16b\n"
"ldr q17, [x10, #0x80]\n"
- ".inst 0x4e92a68f // smmla v15.4s, v20.16b, v18.16b\n"
- "ldr q19, [x10, #0x90]\n"
+ ".inst 0x4e92a66f // smmla v15.4s, v19.16b, v18.16b\n"
+ "ldr q20, [x10, #0x90]\n"
".inst 0x4e91a428 // smmla v8.4s, v1.16b, v17.16b\n"
"ldr q18, [x10, #0xa0]\n"
- ".inst 0x4e93a42c // smmla v12.4s, v1.16b, v19.16b\n"
+ ".inst 0x4e94a42c // smmla v12.4s, v1.16b, v20.16b\n"
"ldr q17, [x10, #0xb0]\n"
".inst 0x4e92a429 // smmla v9.4s, v1.16b, v18.16b\n"
"ldr q18, [x10, #0xc0]\n"
@@ -269,22 +272,21 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 27f\n"
"cmp x27, #0x8\n"
"blt 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
"ldr d19, [x26], #0x8\n"
- "ldr q18, [x10, #0x0]\n"
- "trn1 v19.2d, v19.2d, v17.2d\n"
+ "ldr q20, [x10, #0x0]\n"
+ "sub x27, x27, #0x8\n"
"ldr q17, [x10, #0x10]\n"
- ".inst 0x4e92a668 // smmla v8.4s, v19.16b, v18.16b\n"
+ "cmp x27, #0x8\n"
+ "trn1 v19.2d, v19.2d, v18.2d\n"
+ ".inst 0x4e94a668 // smmla v8.4s, v19.16b, v20.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x4e91a66c // smmla v12.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x30]\n"
@@ -296,11 +298,9 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
- "add x10, x10, #0x80\n"
"bge 21b\n"
"22:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 27f\n"
@@ -324,24 +324,24 @@ void a64_hybrid_s8s32_mmla_6x16 (
"25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b1, [x26, #0x0]\n"
"26:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q23, [x10, #0x0]\n"
- "ldr q18, [x10, #0x10]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
"trn1 v19.2d, v1.2d, v17.2d\n"
- ".inst 0x4e97a668 // smmla v8.4s, v19.16b, v23.16b\n"
+ ".inst 0x4e98a668 // smmla v8.4s, v19.16b, v24.16b\n"
"ldr q17, [x10, #0x20]\n"
- ".inst 0x4e92a66c // smmla v12.4s, v19.16b, v18.16b\n"
- "ldr q31, [x10, #0x30]\n"
+ ".inst 0x4e94a66c // smmla v12.4s, v19.16b, v20.16b\n"
+ "ldr q0, [x10, #0x30]\n"
".inst 0x4e91a669 // smmla v9.4s, v19.16b, v17.16b\n"
"ldr q20, [x10, #0x40]\n"
- ".inst 0x4e9fa66d // smmla v13.4s, v19.16b, v31.16b\n"
+ ".inst 0x4e80a66d // smmla v13.4s, v19.16b, v0.16b\n"
"ldr q17, [x10, #0x50]\n"
".inst 0x4e94a66a // smmla v10.4s, v19.16b, v20.16b\n"
"ldr q18, [x10, #0x60]\n"
".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
- "add x10, x10, #0x80\n"
"27:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -413,7 +413,7 @@ void a64_hybrid_s8s32_mmla_6x16 (
"38:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"39:" // Height 2: Column loop
"tbz %x[flags], #0, 50f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -523,8 +523,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"mov x28, #0x0\n"
"52:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 53f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -549,6 +549,14 @@ void a64_hybrid_s8s32_mmla_6x16 (
"blt 56f\n"
"55:" // Height 2: Multiply loop: Main loop head
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a668 // smmla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x4e86a66c // smmla v12.4s, v19.16b, v6.16b\n"
@@ -561,7 +569,6 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
@@ -578,22 +585,21 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- ".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
"add x10, x10, #0x100\n"
+ ".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
"ldr q7, [x10, #0x0]\n"
".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
"ldr q1, [x26, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"bge 55b\n"
"56:" // Height 2: Multiply loop: Single iteration only
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e87a668 // smmla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x4e86a66c // smmla v12.4s, v19.16b, v6.16b\n"
@@ -606,7 +612,6 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
@@ -623,41 +628,36 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x4e91a42e // smmla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x4e92a42b // smmla v11.4s, v1.16b, v18.16b\n"
".inst 0x4e91a42f // smmla v15.4s, v1.16b, v17.16b\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x100\n"
"57:" // Height 2: Multiply loop: Main loop skip
"cbz x27, 64f\n"
"cmp x27, #0x8\n"
"blt 59f\n"
"58:" // Height 2: Multiply loop: Odd block loop
- "ldr d18, [x26], #0x8\n"
- "ldr d17, [x25], #0x8\n"
- "trn1 v19.2d, v18.2d, v17.2d\n"
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr q17, [x10, #0x0]\n"
- "ldr q22, [x10, #0x10]\n"
- ".inst 0x4e91a668 // smmla v8.4s, v19.16b, v17.16b\n"
- ".inst 0x4e96a66c // smmla v12.4s, v19.16b, v22.16b\n"
- "ldr q1, [x10, #0x20]\n"
+ "ldr q18, [x10, #0x0]\n"
+ "ldr q17, [x10, #0x10]\n"
+ "cmp x27, #0x8\n"
+ "trn1 v22.2d, v20.2d, v19.2d\n"
+ ".inst 0x4e92a6c8 // smmla v8.4s, v22.16b, v18.16b\n"
+ "ldr q2, [x10, #0x20]\n"
+ ".inst 0x4e91a6cc // smmla v12.4s, v22.16b, v17.16b\n"
"ldr q17, [x10, #0x30]\n"
- ".inst 0x4e81a669 // smmla v9.4s, v19.16b, v1.16b\n"
- ".inst 0x4e91a66d // smmla v13.4s, v19.16b, v17.16b\n"
+ ".inst 0x4e82a6c9 // smmla v9.4s, v22.16b, v2.16b\n"
"ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91a6cd // smmla v13.4s, v22.16b, v17.16b\n"
"ldr q17, [x10, #0x50]\n"
- ".inst 0x4e92a66a // smmla v10.4s, v19.16b, v18.16b\n"
- ".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
+ ".inst 0x4e92a6ca // smmla v10.4s, v22.16b, v18.16b\n"
"ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91a6ce // smmla v14.4s, v22.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "cmp x27, #0x8\n"
- ".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
- ".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x4e92a6cb // smmla v11.4s, v22.16b, v18.16b\n"
+ ".inst 0x4e91a6cf // smmla v15.4s, v22.16b, v17.16b\n"
"bge 58b\n"
"59:" // Height 2: Multiply loop: Skip odd blocks
"cbz x27, 64f\n"
@@ -703,27 +703,27 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x4e91a66e // smmla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x4e92a66b // smmla v11.4s, v19.16b, v18.16b\n"
".inst 0x4e91a66f // smmla v15.4s, v19.16b, v17.16b\n"
- "add x10, x10, #0x80\n"
"64:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 52b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"bge 73f\n"
"tbz x11, #3, 68f\n"
"st1 { v7.4s }, [x9], #0x10\n"
@@ -804,12 +804,12 @@ void a64_hybrid_s8s32_mmla_6x16 (
"75:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"76:" // Height 3: Column loop
"tbz %x[flags], #0, 87f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"bge 85f\n"
"tbz x11, #3, 80f\n"
@@ -951,8 +951,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"mov x28, #0x0\n"
"89:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 90f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -982,35 +982,38 @@ void a64_hybrid_s8s32_mmla_6x16 (
"92:" // Height 3: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x90]\n"
"ldr q2, [x25, #0x0]\n"
@@ -1018,15 +1021,12 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e9aa471 // smmla v17.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x4e99a42d // smmla v13.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e99a475 // smmla v21.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xd0]\n"
".inst 0x4e9aa42a // smmla v10.4s, v1.16b, v26.16b\n"
@@ -1048,43 +1048,43 @@ void a64_hybrid_s8s32_mmla_6x16 (
"93:" // Height 3: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x90]\n"
".inst 0x4e9aa428 // smmla v8.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
@@ -1109,25 +1109,25 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 96f\n"
"95:" // Height 3: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
- "ldr d25, [x24], #0x8\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr d27, [x24], #0x8\n"
"ldr q26, [x10, #0x0]\n"
- "trn1 v27.2d, v25.2d, v27.2d\n"
- ".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
+ "cmp x27, #0x8\n"
"ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v27.2d, v29.2d\n"
+ ".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
".inst 0x4e9aa770 // smmla v16.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x4e99a78c // smmla v12.4s, v28.16b, v25.16b\n"
".inst 0x4e99a774 // smmla v20.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
- "sub x27, x27, #0x8\n"
".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
@@ -1136,8 +1136,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
- ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
@@ -1183,9 +1183,9 @@ void a64_hybrid_s8s32_mmla_6x16 (
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9da78c // smmla v12.4s, v28.16b, v29.16b\n"
".inst 0x4e9aa770 // smmla v16.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x4e9da78c // smmla v12.4s, v28.16b, v29.16b\n"
".inst 0x4e9da774 // smmla v20.4s, v27.16b, v29.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
@@ -1211,20 +1211,20 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x28, x20\n"
"bne 89b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "uzp1 v7.2d, v8.2d, v12.2d\n"
"cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
@@ -1329,13 +1329,13 @@ void a64_hybrid_s8s32_mmla_6x16 (
"112:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"113:" // Height 4: Column loop
"tbz %x[flags], #0, 124f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x22, x23, x20, LSL #2\n"
"bge 122f\n"
"tbz x11, #3, 117f\n"
@@ -1497,8 +1497,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"mov x28, #0x0\n"
"126:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 127f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1532,33 +1532,38 @@ void a64_hybrid_s8s32_mmla_6x16 (
"129:" // Height 4: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
@@ -1569,23 +1574,18 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e9aa471 // smmla v17.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x4e99a42d // smmla v13.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e99a475 // smmla v21.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xd0]\n"
".inst 0x4e9aa42a // smmla v10.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e9aa472 // smmla v18.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xe0]\n"
".inst 0x4e99a42e // smmla v14.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e99a476 // smmla v22.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
@@ -1601,48 +1601,48 @@ void a64_hybrid_s8s32_mmla_6x16 (
"130:" // Height 4: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ ".inst 0x4e87a788 // smmla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a770 // smmla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x4e86a78c // smmla v12.4s, v28.16b, v6.16b\n"
".inst 0x4e86a774 // smmla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x4e9aa789 // smmla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x4e9aa771 // smmla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x4e99a78d // smmla v13.4s, v28.16b, v25.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e99a775 // smmla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x4e9aa78a // smmla v10.4s, v28.16b, v26.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e9aa772 // smmla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x4e99a78e // smmla v14.4s, v28.16b, v25.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e99a776 // smmla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x4e9aa78b // smmla v11.4s, v28.16b, v26.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e9aa773 // smmla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x4e99a78f // smmla v15.4s, v28.16b, v25.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e99a777 // smmla v23.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x90]\n"
".inst 0x4e9aa428 // smmla v8.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e9aa470 // smmla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x4e99a42c // smmla v12.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e99a474 // smmla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x4e9aa429 // smmla v9.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e9aa471 // smmla v17.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x4e99a42d // smmla v13.4s, v1.16b, v25.16b\n"
@@ -1664,16 +1664,16 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 133f\n"
"132:" // Height 4: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr d26, [x24], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "trn1 v27.2d, v26.2d, v25.2d\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"cmp x27, #0x8\n"
"ldr q26, [x10, #0x0]\n"
"ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v29.2d, v27.2d\n"
".inst 0x4e9aa788 // smmla v8.4s, v28.16b, v26.16b\n"
".inst 0x4e9aa770 // smmla v16.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x20]\n"
@@ -1774,24 +1774,24 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x28, x20\n"
"bne 126b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "uzp1 v13.2d, v10.2d, v14.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
@@ -1918,14 +1918,14 @@ void a64_hybrid_s8s32_mmla_6x16 (
"149:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"150:" // Height 5: Column loop
"tbz %x[flags], #0, 161f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x21, x22, x20, LSL #2\n"
"bge 159f\n"
"tbz x11, #3, 154f\n"
@@ -2123,8 +2123,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"mov x28, #0x0\n"
"163:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 164f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2161,51 +2161,51 @@ void a64_hybrid_s8s32_mmla_6x16 (
"166:" // Height 5: Multiply loop: Main loop head
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a4c8 // smmla v8.4s, v6.16b, v7.16b\n"
+ "sub x27, x27, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e87a4c8 // smmla v8.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x4e80a4cc // smmla v12.4s, v6.16b, v0.16b\n"
".inst 0x4e80a454 // smmla v20.4s, v2.16b, v0.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x4e80a49c // smmla v28.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x4e87a4c9 // smmla v9.4s, v6.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x24, x24, #0x10\n"
".inst 0x4e80a4cd // smmla v13.4s, v6.16b, v0.16b\n"
".inst 0x4e80a455 // smmla v21.4s, v2.16b, v0.16b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4e80a49d // smmla v29.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x50]\n"
".inst 0x4e87a4ca // smmla v10.4s, v6.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e80a4ce // smmla v14.4s, v6.16b, v0.16b\n"
".inst 0x4e80a456 // smmla v22.4s, v2.16b, v0.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e80a49e // smmla v30.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x70]\n"
".inst 0x4e87a4cb // smmla v11.4s, v6.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e80a4cf // smmla v15.4s, v6.16b, v0.16b\n"
".inst 0x4e80a457 // smmla v23.4s, v2.16b, v0.16b\n"
"ldr q2, [x25, #0x0]\n"
@@ -2251,47 +2251,47 @@ void a64_hybrid_s8s32_mmla_6x16 (
"167:" // Height 5: Multiply loop: Single iteration only
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a4c8 // smmla v8.4s, v6.16b, v7.16b\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e87a4c8 // smmla v8.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x4e80a4cc // smmla v12.4s, v6.16b, v0.16b\n"
".inst 0x4e80a454 // smmla v20.4s, v2.16b, v0.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e80a49c // smmla v28.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x4e87a4c9 // smmla v9.4s, v6.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x4e80a4cd // smmla v13.4s, v6.16b, v0.16b\n"
".inst 0x4e80a455 // smmla v21.4s, v2.16b, v0.16b\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e80a49d // smmla v29.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x50]\n"
".inst 0x4e87a4ca // smmla v10.4s, v6.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e80a4ce // smmla v14.4s, v6.16b, v0.16b\n"
".inst 0x4e80a456 // smmla v22.4s, v2.16b, v0.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e80a49e // smmla v30.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x70]\n"
".inst 0x4e87a4cb // smmla v11.4s, v6.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
@@ -2335,24 +2335,24 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 170f\n"
"169:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "cmp x27, #0x8\n"
"ldr d0, [x22], #0x8\n"
"ldr q1, [x10, #0x0]\n"
- "trn1 v2.2d, v0.2d, v2.2d\n"
- ".inst 0x4e81a488 // smmla v8.4s, v4.16b, v1.16b\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v3.2d, v2.2d\n"
+ "trn1 v2.2d, v0.2d, v5.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e81a488 // smmla v8.4s, v4.16b, v1.16b\n"
".inst 0x4e81a470 // smmla v16.4s, v3.16b, v1.16b\n"
".inst 0x4e81a458 // smmla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x10, #0x20]\n"
".inst 0x4e80a48c // smmla v12.4s, v4.16b, v0.16b\n"
".inst 0x4e80a474 // smmla v20.4s, v3.16b, v0.16b\n"
- "cmp x27, #0x8\n"
".inst 0x4e80a45c // smmla v28.4s, v2.16b, v0.16b\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x4e81a489 // smmla v9.4s, v4.16b, v1.16b\n"
@@ -2371,8 +2371,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e80a476 // smmla v22.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45e // smmla v30.4s, v2.16b, v0.16b\n"
"ldr q0, [x10, #0x70]\n"
- ".inst 0x4e86a48b // smmla v11.4s, v4.16b, v6.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x4e86a48b // smmla v11.4s, v4.16b, v6.16b\n"
".inst 0x4e86a473 // smmla v19.4s, v3.16b, v6.16b\n"
".inst 0x4e86a45b // smmla v27.4s, v2.16b, v6.16b\n"
".inst 0x4e80a48f // smmla v15.4s, v4.16b, v0.16b\n"
@@ -2471,28 +2471,28 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x28, x20\n"
"bne 163b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "cmp x11, #0x10\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "uzp1 v13.2d, v10.2d, v14.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
@@ -2640,19 +2640,20 @@ void a64_hybrid_s8s32_mmla_6x16 (
"b 224f\n"
"186:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"187:" // Height 6: Column loop
"tbz %x[flags], #0, 198f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x20, x21, x20, LSL #2\n"
"bge 196f\n"
"tbz x11, #3, 191f\n"
@@ -2870,8 +2871,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"mov x28, #0x0\n"
"200:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 201f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2912,56 +2913,56 @@ void a64_hybrid_s8s32_mmla_6x16 (
"203:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
"ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
"ldr q0, [x10, #0x90]\n"
"ldr q4, [x23, #0x0]\n"
@@ -3005,52 +3006,52 @@ void a64_hybrid_s8s32_mmla_6x16 (
"204:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4e87a450 // smmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e87a498 // smmla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x4e86a40c // smmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e86a454 // smmla v20.4s, v2.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x4e86a49c // smmla v28.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e87a409 // smmla v9.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x4e87a451 // smmla v17.4s, v2.16b, v7.16b\n"
".inst 0x4e87a499 // smmla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x22, x22, #0x10\n"
".inst 0x4e86a40d // smmla v13.4s, v0.16b, v6.16b\n"
".inst 0x4e86a455 // smmla v21.4s, v2.16b, v6.16b\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x4e86a49d // smmla v29.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x4e87a40a // smmla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x4e87a452 // smmla v18.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49a // smmla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x4e86a40e // smmla v14.4s, v0.16b, v6.16b\n"
".inst 0x4e86a456 // smmla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x4e86a49e // smmla v30.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x4e87a453 // smmla v19.4s, v2.16b, v7.16b\n"
".inst 0x4e87a49b // smmla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x4e86a40f // smmla v15.4s, v0.16b, v6.16b\n"
".inst 0x4e86a457 // smmla v23.4s, v2.16b, v6.16b\n"
".inst 0x4e86a49f // smmla v31.4s, v4.16b, v6.16b\n"
@@ -3091,18 +3092,18 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 207f\n"
"206:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d5, [x24], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
"cmp x27, #0x8\n"
- "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"ldr d0, [x21], #0x8\n"
- "trn1 v2.2d, v1.2d, v0.2d\n"
"ldr q1, [x10, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v5.2d, v3.2d\n"
+ "trn1 v2.2d, v2.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
".inst 0x4e81a488 // smmla v8.4s, v4.16b, v1.16b\n"
".inst 0x4e81a470 // smmla v16.4s, v3.16b, v1.16b\n"
@@ -3196,9 +3197,9 @@ void a64_hybrid_s8s32_mmla_6x16 (
"ldr q0, [x10, #0x0]\n"
"trn1 v7.2d, v1.2d, v2.2d\n"
"trn1 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x4e80a4e8 // smmla v8.4s, v7.16b, v0.16b\n"
"trn1 v2.2d, v5.2d, v6.2d\n"
"ldr q1, [x10, #0x10]\n"
+ ".inst 0x4e80a4e8 // smmla v8.4s, v7.16b, v0.16b\n"
".inst 0x4e80a470 // smmla v16.4s, v3.16b, v0.16b\n"
".inst 0x4e80a458 // smmla v24.4s, v2.16b, v0.16b\n"
"ldr q0, [x10, #0x20]\n"
@@ -3222,8 +3223,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
".inst 0x4e81a476 // smmla v22.4s, v3.16b, v1.16b\n"
".inst 0x4e81a45e // smmla v30.4s, v2.16b, v1.16b\n"
"ldr q6, [x10, #0x70]\n"
- ".inst 0x4e80a4eb // smmla v11.4s, v7.16b, v0.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x4e80a4eb // smmla v11.4s, v7.16b, v0.16b\n"
".inst 0x4e80a473 // smmla v19.4s, v3.16b, v0.16b\n"
".inst 0x4e80a45b // smmla v27.4s, v2.16b, v0.16b\n"
".inst 0x4e86a4ef // smmla v15.4s, v7.16b, v6.16b\n"
@@ -3235,32 +3236,32 @@ void a64_hybrid_s8s32_mmla_6x16 (
"cmp x28, x20\n"
"bne 200b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
- "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
@@ -3439,8 +3440,8 @@ void a64_hybrid_s8s32_mmla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"224:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp
index 14aba00788..b81e2e8593 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return false;
}
- StdTransformsFixed<rhs_operand_type, result_type, 4, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp
index 00d063b426..90b196735a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -84,133 +84,133 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"cmp %x[M], #0x2\n"
"bgt 61f\n"
"beq 31f\n"
- "mov x15, %x[col_bias]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v15.16b, #0x1\n"
"bic %x[flags], %x[flags], #0x80000000\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x13, %x[output_ptr]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
"movi v19.4s, #0x0\n"
"3:" // Height 1: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "cbnz x11, 6f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "cbnz x12, 6f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
- "add x9, x9, x20\n"
+ "add x10, x10, x20\n"
"b 6f\n"
"5:" // Height 1: setup direct input
- "mov x9, %x[input_ptr]\n"
+ "mov x10, %x[input_ptr]\n"
"6:" // Height 1: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 11f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr d21, [x12, #0x70]\n"
- "ldr x20, [x12, #0x78]\n"
+ "ldr d21, [x14, #0x70]\n"
+ "ldr x20, [x14, #0x78]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr d20, [x12, #0x80]\n"
+ "ldr d20, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr d26, [x12, #0x90]\n"
+ "ldr d26, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr d25, [x12, #0xa0]\n"
+ "ldr d25, [x14, #0xa0]\n"
"mov v21.d[1], x20\n"
- "ldr x20, [x12, #0x88]\n"
+ "ldr x20, [x14, #0x88]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr d24, [x12, #0xb0]\n"
+ "ldr d24, [x14, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr d23, [x12, #0xc0]\n"
+ "ldr d23, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr d22, [x12, #0xd0]\n"
+ "ldr d22, [x14, #0xd0]\n"
".inst 0x6fa0e2b3 // udot v19.4s, v21.16b, v0.4b[1]\n"
- "ldr d21, [x12, #0xe0]\n"
+ "ldr d21, [x14, #0xe0]\n"
"mov v20.d[1], x20\n"
- "ldr x20, [x12, #0x98]\n"
- "mov v26.d[1], x20\n"
- "ldr x20, [x12, #0xa8]\n"
- "mov v25.d[1], x20\n"
- "ldr x20, [x12, #0xb8]\n"
- "mov v24.d[1], x20\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x22, [x14, #0x98]\n"
+ "add x10, x10, #0x10\n"
+ "ldr x21, [x14, #0xa8]\n"
".inst 0x6f80ea90 // udot v16.4s, v20.16b, v0.4b[2]\n"
- "ldr d20, [x12, #0xf0]\n"
+ "ldr d20, [x14, #0xf0]\n"
+ "ldr x20, [x14, #0xb8]\n"
+ "mov v26.d[1], x22\n"
+ "mov v25.d[1], x21\n"
+ "ldr x23, [x14, #0xc8]\n"
+ "ldr x22, [x14, #0xd8]\n"
".inst 0x6f80eb51 // udot v17.4s, v26.16b, v0.4b[2]\n"
- "ldr x22, [x12, #0xd8]\n"
+ "mov v24.d[1], x20\n"
+ "ldr x21, [x14, #0xe8]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x6f80eb32 // udot v18.4s, v25.16b, v0.4b[2]\n"
- "ldr x21, [x12, #0xe8]\n"
".inst 0x6f80eb13 // udot v19.4s, v24.16b, v0.4b[2]\n"
- "ldr x20, [x12, #0xf8]\n"
"mov v23.d[1], x23\n"
"mov v22.d[1], x22\n"
- "add x9, x9, #0x10\n"
+ "add x14, x14, #0x100\n"
"mov v21.d[1], x21\n"
- "add x12, x12, #0x100\n"
- "mov v20.d[1], x20\n"
".inst 0x6fa0eaf0 // udot v16.4s, v23.16b, v0.4b[3]\n"
+ "mov v20.d[1], x20\n"
".inst 0x6fa0ead1 // udot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x6fa0eab2 // udot v18.4s, v21.16b, v0.4b[3]\n"
".inst 0x6fa0ea93 // udot v19.4s, v20.16b, v0.4b[3]\n"
"tbnz %x[flags], #31, 8f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q4, [x12, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q4, [x14, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr q21, [x12, #0x70]\n"
+ "ldr q21, [x14, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x80]\n"
+ "ldr q20, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr q26, [x12, #0x90]\n"
+ "ldr q26, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr q25, [x12, #0xa0]\n"
+ "ldr q25, [x14, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "ldr q24, [x12, #0xb0]\n"
+ "ldr q24, [x14, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "ldr q23, [x12, #0xc0]\n"
+ "ldr q23, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr q22, [x12, #0xd0]\n"
+ "ldr q22, [x14, #0xd0]\n"
".inst 0x6fa0e2b3 // udot v19.4s, v21.16b, v0.4b[1]\n"
- "ldr q21, [x12, #0xe0]\n"
+ "ldr q21, [x14, #0xe0]\n"
".inst 0x6f80ea90 // udot v16.4s, v20.16b, v0.4b[2]\n"
- "ldr q20, [x12, #0xf0]\n"
+ "ldr q20, [x14, #0xf0]\n"
".inst 0x6f80eb51 // udot v17.4s, v26.16b, v0.4b[2]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x6f80eb32 // udot v18.4s, v25.16b, v0.4b[2]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x6f80eb13 // udot v19.4s, v24.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x6fa0eaf0 // udot v16.4s, v23.16b, v0.4b[3]\n"
".inst 0x6fa0ead1 // udot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x6fa0eab2 // udot v18.4s, v21.16b, v0.4b[3]\n"
@@ -218,54 +218,54 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"tbnz %x[flags], #31, 10f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"10:" // Height 1: Multiply loop: unique 2: skip row sum
- "prfm pldl1keep, [x9, #0x80]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"11:" // Height 1: Multiply loop: Main loop skip
- "cbz x10, 18f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 18f\n"
+ "cmp x11, #0x4\n"
"blt 14f\n"
"12:" // Height 1: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
"tbnz %x[flags], #31, 13f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
- "ldr q20, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q22, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q21, [x12, #0x20]\n"
- ".inst 0x6f80e290 // udot v16.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x30]\n"
+ "ldr q23, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q22, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q21, [x14, #0x20]\n"
+ "ldr q20, [x14, #0x30]\n"
+ ".inst 0x6f80e2f0 // udot v16.4s, v23.16b, v0.4b[0]\n"
".inst 0x6f80e2d1 // udot v17.4s, v22.16b, v0.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x6f80e2b2 // udot v18.4s, v21.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x6f80e293 // udot v19.4s, v20.16b, v0.4b[0]\n"
"bge 12b\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
- "cbz x10, 18f\n"
- "tbz x10, #1, 15f\n"
- "ldr h0, [x9], #0x2\n"
- "tbz x10, #0, 16f\n"
- "ld1 { v0.b }[2], [x9]\n"
+ "cbz x11, 18f\n"
+ "tbz x11, #1, 15f\n"
+ "ldr h0, [x10], #0x2\n"
+ "tbz x11, #0, 16f\n"
+ "ld1 { v0.b }[2], [x10]\n"
"b 16f\n"
"15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
"16:" // Height 1: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 17f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q20, [x12, #0x0]\n"
- ".inst 0x6f80e290 // udot v16.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x10]\n"
- ".inst 0x6f80e291 // udot v17.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x20]\n"
- ".inst 0x6f80e292 // udot v18.4s, v20.16b, v0.4b[0]\n"
- "ldr q20, [x12, #0x30]\n"
+ "ldr q23, [x14, #0x0]\n"
+ "ldr q22, [x14, #0x10]\n"
+ "ldr q21, [x14, #0x20]\n"
+ "ldr q20, [x14, #0x30]\n"
+ ".inst 0x6f80e2f0 // udot v16.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x6f80e2d1 // udot v17.4s, v22.16b, v0.4b[0]\n"
+ "add x14, x14, #0x40\n"
+ ".inst 0x6f80e2b2 // udot v18.4s, v21.16b, v0.4b[0]\n"
".inst 0x6f80e293 // udot v19.4s, v20.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 4b\n"
"prfm pstl1keep, [x13, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
@@ -276,28 +276,28 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"addp v11.4s, v11.4s, v11.4s\n"
"mul v11.4s, v11.4s, v20.4s\n"
"19:" // Height 1: skip row sum fixup
- "ldr q23, [x15, #0x0]\n"
+ "ldr q24, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q22, [x15, #0x10]\n"
+ "ldr q23, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q21, [x15, #0x20]\n"
+ "ldr q22, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q20, [x15, #0x30]\n"
+ "ldr q21, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
- "add v16.4s, v16.4s, v23.4s\n"
- "add v17.4s, v17.4s, v22.4s\n"
- "add v18.4s, v18.4s, v21.4s\n"
- "add v19.4s, v19.4s, v20.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v20.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v24.4s\n"
+ "add v17.4s, v17.4s, v23.4s\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v21.4s\n"
+ "add x16, x16, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v20.4s\n"
"sqrdmulh v17.4s, v17.4s, v20.4s\n"
"sqrdmulh v18.4s, v18.4s, v20.4s\n"
"sqrdmulh v19.4s, v19.4s, v20.4s\n"
- "add x15, x15, #0x40\n"
"tbz %x[flags], #5, 20f\n"
"and v23.16b, v16.16b, v0.16b\n"
"and v22.16b, v17.16b, v0.16b\n"
@@ -317,67 +317,67 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v20.4s }, [x20]\n"
- "add v16.4s, v16.4s, v20.4s\n"
- "add v17.4s, v17.4s, v20.4s\n"
- "add v18.4s, v18.4s, v20.4s\n"
- "add v19.4s, v19.4s, v20.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v20.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v20.4s\n"
- "smin v17.4s, v17.4s, v20.4s\n"
- "smin v18.4s, v18.4s, v20.4s\n"
- "smin v19.4s, v19.4s, v20.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v21.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v22.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v22.4s\n"
+ "smin v16.4s, v16.4s, v21.4s\n"
+ "smin v17.4s, v17.4s, v21.4s\n"
+ "smin v18.4s, v18.4s, v21.4s\n"
+ "smin v19.4s, v19.4s, v21.4s\n"
"smax v16.4s, v16.4s, v20.4s\n"
"smax v17.4s, v17.4s, v20.4s\n"
"smax v18.4s, v18.4s, v20.4s\n"
"smax v19.4s, v19.4s, v20.4s\n"
"uzp1 v16.8h, v16.8h, v17.8h\n"
"uzp1 v17.8h, v18.8h, v19.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v17.16b\n"
"bge 29f\n"
- "tbz x14, #3, 24f\n"
+ "tbz x15, #3, 24f\n"
"str d16, [x13], #0x8\n"
- "tbz x14, #2, 22f\n"
+ "tbz x15, #2, 22f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "tbz x14, #1, 21f\n"
+ "tbz x15, #1, 21f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[14], [x13]\n"
"b 28f\n"
"21:" // Height 1: Partial direct writeback: partial_1_12
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[12], [x13]\n"
"b 28f\n"
"22:" // Height 1: Partial direct writeback: partial_2_8
- "tbz x14, #1, 23f\n"
+ "tbz x15, #1, 23f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[10], [x13]\n"
"b 28f\n"
"23:" // Height 1: Partial direct writeback: partial_1_8
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[8], [x13]\n"
"b 28f\n"
"24:" // Height 1: Partial direct writeback: partial_4_0
- "tbz x14, #2, 26f\n"
+ "tbz x15, #2, 26f\n"
"str s16, [x13], #0x4\n"
- "tbz x14, #1, 25f\n"
+ "tbz x15, #1, 25f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[6], [x13]\n"
"b 28f\n"
"25:" // Height 1: Partial direct writeback: partial_1_4
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[4], [x13]\n"
"b 28f\n"
"26:" // Height 1: Partial direct writeback: partial_2_0
- "tbz x14, #1, 27f\n"
+ "tbz x15, #1, 27f\n"
"str h16, [x13], #0x2\n"
- "tbz x14, #0, 28f\n"
+ "tbz x15, #0, 28f\n"
"st1 { v16.b }[2], [x13]\n"
"b 28f\n"
"27:" // Height 1: Partial direct writeback: partial_1_0
@@ -388,18 +388,18 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
"30:" // Height 1: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 2b\n"
"b 122f\n"
"31:" // Height 2
- "mov x15, %x[col_bias]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v15.16b, #0x1\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -410,80 +410,80 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"movi v22.4s, #0x0\n"
"movi v23.4s, #0x0\n"
"33:" // Height 2: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "ldr x28, [x20, #0x8]\n"
- "cbnz x11, 36f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x9, [x20, #0x8]\n"
+ "cbnz x12, 36f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x20\n"
"add x9, x9, x20\n"
- "add x28, x28, x20\n"
"b 36f\n"
"35:" // Height 2: setup direct input
- "mov x9, %x[input_ptr]\n"
- "add x28, x9, x21\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x9, x10, x21\n"
"36:" // Height 2: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 41f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q1, [x28, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q1, [x9, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 39f\n"
"37:" // Height 2: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x20, [x12, #0x78]\n"
+ "ldr x20, [x14, #0x78]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr d25, [x12, #0x70]\n"
+ "ldr d25, [x14, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v25.d[1], x20\n"
+ "ldr x23, [x14, #0x88]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr d24, [x12, #0x80]\n"
+ "ldr d24, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x23, [x12, #0x88]\n"
+ "mov v25.d[1], x20\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr d30, [x12, #0x90]\n"
+ "ldr d30, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "ldr x22, [x12, #0x98]\n"
+ "ldr x22, [x14, #0x98]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr d29, [x12, #0xa0]\n"
- "ldr x21, [x12, #0xa8]\n"
+ "ldr d29, [x14, #0xa0]\n"
+ "ldr x21, [x14, #0xa8]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr d28, [x12, #0xb0]\n"
- "ldr x20, [x12, #0xb8]\n"
+ "ldr d28, [x14, #0xb0]\n"
+ "ldr x20, [x14, #0xb8]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr d27, [x12, #0xc0]\n"
+ "ldr d27, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
"mov v24.d[1], x23\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr d26, [x12, #0xd0]\n"
+ "ldr d26, [x14, #0xd0]\n"
".inst 0x6fa0e333 // udot v19.4s, v25.16b, v0.4b[1]\n"
"mov v30.d[1], x22\n"
".inst 0x6fa1e337 // udot v23.4s, v25.16b, v1.4b[1]\n"
- "ldr d25, [x12, #0xe0]\n"
+ "ldr d25, [x14, #0xe0]\n"
"mov v29.d[1], x21\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x23, [x14, #0xc8]\n"
"mov v28.d[1], x20\n"
- "ldr x22, [x12, #0xd8]\n"
- "ldr x21, [x12, #0xe8]\n"
+ "ldr x22, [x14, #0xd8]\n"
+ "ldr x21, [x14, #0xe8]\n"
".inst 0x6f80eb10 // udot v16.4s, v24.16b, v0.4b[2]\n"
".inst 0x6f81eb14 // udot v20.4s, v24.16b, v1.4b[2]\n"
- "ldr d24, [x12, #0xf0]\n"
- "ldr x20, [x12, #0xf8]\n"
+ "ldr d24, [x14, #0xf0]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x6f80ebd1 // udot v17.4s, v30.16b, v0.4b[2]\n"
".inst 0x6f81ebd5 // udot v21.4s, v30.16b, v1.4b[2]\n"
"mov v27.d[1], x23\n"
@@ -494,9 +494,9 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f80eb93 // udot v19.4s, v28.16b, v0.4b[2]\n"
"mov v24.d[1], x20\n"
".inst 0x6f81eb97 // udot v23.4s, v28.16b, v1.4b[2]\n"
+ "add x10, x10, #0x10\n"
"add x9, x9, #0x10\n"
- "add x28, x28, #0x10\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x6fa0eb70 // udot v16.4s, v27.16b, v0.4b[3]\n"
".inst 0x6fa1eb74 // udot v20.4s, v27.16b, v1.4b[3]\n"
".inst 0x6fa0eb51 // udot v17.4s, v26.16b, v0.4b[3]\n"
@@ -509,53 +509,53 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"38:" // Height 2: Multiply loop: unique 5: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q1, [x28, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q1, [x9, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"bge 37b\n"
"39:" // Height 2: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr q25, [x12, #0x70]\n"
+ "ldr q25, [x14, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr q24, [x12, #0x80]\n"
+ "ldr q24, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr q30, [x12, #0x90]\n"
+ "ldr q30, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr q29, [x12, #0xa0]\n"
+ "ldr q29, [x14, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr q28, [x12, #0xb0]\n"
+ "ldr q28, [x14, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr q27, [x12, #0xc0]\n"
+ "ldr q27, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr q26, [x12, #0xd0]\n"
+ "ldr q26, [x14, #0xd0]\n"
".inst 0x6fa0e333 // udot v19.4s, v25.16b, v0.4b[1]\n"
".inst 0x6fa1e337 // udot v23.4s, v25.16b, v1.4b[1]\n"
- "ldr q25, [x12, #0xe0]\n"
+ "ldr q25, [x14, #0xe0]\n"
".inst 0x6f80eb10 // udot v16.4s, v24.16b, v0.4b[2]\n"
".inst 0x6f81eb14 // udot v20.4s, v24.16b, v1.4b[2]\n"
- "ldr q24, [x12, #0xf0]\n"
+ "ldr q24, [x14, #0xf0]\n"
".inst 0x6f80ebd1 // udot v17.4s, v30.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x6f81ebd5 // udot v21.4s, v30.16b, v1.4b[2]\n"
".inst 0x6f80ebb2 // udot v18.4s, v29.16b, v0.4b[2]\n"
".inst 0x6f81ebb6 // udot v22.4s, v29.16b, v1.4b[2]\n"
@@ -573,29 +573,29 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"40:" // Height 2: Multiply loop: unique 6: skip row sum
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
"41:" // Height 2: Multiply loop: Main loop skip
- "cbz x10, 48f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 48f\n"
+ "cmp x11, #0x4\n"
"blt 44f\n"
"42:" // Height 2: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
- "ldr s1, [x28], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "ldr s1, [x9], #0x4\n"
"tbnz %x[flags], #31, 43f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"43:" // Height 2: Multiply loop: unique 7: skip row sum
- "ldr q27, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q26, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q25, [x12, #0x20]\n"
+ "ldr q27, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q26, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q25, [x14, #0x20]\n"
+ "ldr q24, [x14, #0x30]\n"
".inst 0x6f80e370 // udot v16.4s, v27.16b, v0.4b[0]\n"
- "ldr q24, [x12, #0x30]\n"
".inst 0x6f81e374 // udot v20.4s, v27.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x6f80e351 // udot v17.4s, v26.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x6f81e355 // udot v21.4s, v26.16b, v1.4b[0]\n"
".inst 0x6f80e332 // udot v18.4s, v25.16b, v0.4b[0]\n"
".inst 0x6f81e336 // udot v22.4s, v25.16b, v1.4b[0]\n"
@@ -603,44 +603,44 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f81e317 // udot v23.4s, v24.16b, v1.4b[0]\n"
"bge 42b\n"
"44:" // Height 2: Multiply loop: Skip odd blocks
- "cbz x10, 48f\n"
- "tbz x10, #1, 45f\n"
- "ldr h0, [x9], #0x2\n"
- "ldr h1, [x28], #0x2\n"
- "tbz x10, #0, 46f\n"
- "ld1 { v0.b }[2], [x9]\n"
- "ld1 { v1.b }[2], [x28]\n"
+ "cbz x11, 48f\n"
+ "tbz x11, #1, 45f\n"
+ "ldr h0, [x10], #0x2\n"
+ "ldr h1, [x9], #0x2\n"
+ "tbz x11, #0, 46f\n"
+ "ld1 { v0.b }[2], [x10]\n"
+ "ld1 { v1.b }[2], [x9]\n"
"b 46f\n"
"45:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
- "ldr b1, [x28, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
+ "ldr b1, [x9, #0x0]\n"
"46:" // Height 2: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 47f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q24, [x12, #0x0]\n"
- ".inst 0x6f80e310 // udot v16.4s, v24.16b, v0.4b[0]\n"
- "ldr q26, [x12, #0x10]\n"
- ".inst 0x6f81e314 // udot v20.4s, v24.16b, v1.4b[0]\n"
- "ldr q25, [x12, #0x20]\n"
+ "ldr q27, [x14, #0x0]\n"
+ "ldr q26, [x14, #0x10]\n"
+ "ldr q25, [x14, #0x20]\n"
+ "ldr q24, [x14, #0x30]\n"
+ ".inst 0x6f80e370 // udot v16.4s, v27.16b, v0.4b[0]\n"
+ ".inst 0x6f81e374 // udot v20.4s, v27.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x6f80e351 // udot v17.4s, v26.16b, v0.4b[0]\n"
- "ldr q24, [x12, #0x30]\n"
".inst 0x6f81e355 // udot v21.4s, v26.16b, v1.4b[0]\n"
".inst 0x6f80e332 // udot v18.4s, v25.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x6f81e336 // udot v22.4s, v25.16b, v1.4b[0]\n"
".inst 0x6f80e313 // udot v19.4s, v24.16b, v0.4b[0]\n"
".inst 0x6f81e317 // udot v23.4s, v24.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 34b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20\n"
"prfm pstl1keep, [x13, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add x24, x13, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -652,28 +652,28 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"mul v11.4s, v11.4s, v24.4s\n"
"mul v12.4s, v12.4s, v24.4s\n"
"49:" // Height 2: skip row sum fixup
- "ldr q27, [x15, #0x0]\n"
+ "ldr q28, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q26, [x15, #0x10]\n"
+ "ldr q27, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q25, [x15, #0x20]\n"
+ "ldr q26, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q24, [x15, #0x30]\n"
+ "ldr q25, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "add v16.4s, v16.4s, v27.4s\n"
- "add v17.4s, v17.4s, v26.4s\n"
- "add v18.4s, v18.4s, v25.4s\n"
- "add v19.4s, v19.4s, v24.4s\n"
- "add v20.4s, v20.4s, v27.4s\n"
- "add v21.4s, v21.4s, v26.4s\n"
- "add v22.4s, v22.4s, v25.4s\n"
- "add v23.4s, v23.4s, v24.4s\n"
+ "add v16.4s, v16.4s, v28.4s\n"
+ "add v17.4s, v17.4s, v27.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
"ld1r { v24.4s }, [x20]\n"
+ "add v19.4s, v19.4s, v25.4s\n"
+ "add v20.4s, v20.4s, v28.4s\n"
+ "add v21.4s, v21.4s, v27.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v25.4s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
@@ -685,31 +685,31 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"sqrdmulh v21.4s, v21.4s, v24.4s\n"
"sqrdmulh v22.4s, v22.4s, v24.4s\n"
"sqrdmulh v23.4s, v23.4s, v24.4s\n"
- "add x15, x15, #0x40\n"
+ "add x16, x16, #0x40\n"
"tbz %x[flags], #5, 50f\n"
"and v24.16b, v16.16b, v0.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v24.4s\n"
"and v30.16b, v17.16b, v0.16b\n"
"and v29.16b, v18.16b, v0.16b\n"
"and v28.16b, v19.16b, v0.16b\n"
"and v27.16b, v20.16b, v0.16b\n"
"and v26.16b, v21.16b, v0.16b\n"
"and v25.16b, v22.16b, v0.16b\n"
- "and v24.16b, v23.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v27.4s, v27.4s, #0x1f\n"
"sshr v26.4s, v26.4s, #0x1f\n"
"sshr v25.4s, v25.4s, #0x1f\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v24.4s\n"
+ "and v24.16b, v23.16b, v0.16b\n"
"sqadd v17.4s, v17.4s, v30.4s\n"
"sqadd v18.4s, v18.4s, v29.4s\n"
"sqadd v19.4s, v19.4s, v28.4s\n"
"sqadd v20.4s, v20.4s, v27.4s\n"
"sqadd v21.4s, v21.4s, v26.4s\n"
"sqadd v22.4s, v22.4s, v25.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v23.4s, v23.4s, v24.4s\n"
"50:" // Height 2: no shift correction
"srshl v16.4s, v16.4s, v0.4s\n"
@@ -721,27 +721,28 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v24.4s }, [x20]\n"
- "add v16.4s, v16.4s, v24.4s\n"
- "add v17.4s, v17.4s, v24.4s\n"
- "add v18.4s, v18.4s, v24.4s\n"
- "add v19.4s, v19.4s, v24.4s\n"
- "add v20.4s, v20.4s, v24.4s\n"
- "add v21.4s, v21.4s, v24.4s\n"
- "add v22.4s, v22.4s, v24.4s\n"
- "add v23.4s, v23.4s, v24.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v24.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v24.4s\n"
- "smin v17.4s, v17.4s, v24.4s\n"
- "smin v18.4s, v18.4s, v24.4s\n"
- "smin v19.4s, v19.4s, v24.4s\n"
- "smin v20.4s, v20.4s, v24.4s\n"
- "smin v21.4s, v21.4s, v24.4s\n"
- "smin v22.4s, v22.4s, v24.4s\n"
- "smin v23.4s, v23.4s, v24.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v25.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v26.4s\n"
+ "add v17.4s, v17.4s, v26.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v26.4s\n"
+ "add v20.4s, v20.4s, v26.4s\n"
+ "add v21.4s, v21.4s, v26.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v26.4s\n"
+ "smin v16.4s, v16.4s, v25.4s\n"
+ "smin v17.4s, v17.4s, v25.4s\n"
+ "smin v18.4s, v18.4s, v25.4s\n"
+ "smin v19.4s, v19.4s, v25.4s\n"
+ "smin v20.4s, v20.4s, v25.4s\n"
+ "smin v21.4s, v21.4s, v25.4s\n"
+ "smin v22.4s, v22.4s, v25.4s\n"
+ "smin v23.4s, v23.4s, v25.4s\n"
"smax v16.4s, v16.4s, v24.4s\n"
"smax v17.4s, v17.4s, v24.4s\n"
"smax v18.4s, v18.4s, v24.4s\n"
@@ -754,88 +755,87 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"uzp1 v18.8h, v18.8h, v19.8h\n"
"uzp1 v20.8h, v20.8h, v21.8h\n"
"uzp1 v17.8h, v22.8h, v23.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v18.16b\n"
"uzp1 v20.16b, v20.16b, v17.16b\n"
"bge 59f\n"
- "tbz x14, #3, 54f\n"
+ "tbz x15, #3, 54f\n"
"str d16, [x13], #0x8\n"
- "str d20, [x23], #0x8\n"
- "tbz x14, #2, 52f\n"
+ "str d20, [x24], #0x8\n"
+ "tbz x15, #2, 52f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "tbz x14, #1, 51f\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "tbz x15, #1, 51f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[14], [x13]\n"
- "st1 { v20.b }[14], [x23]\n"
+ "st1 { v20.b }[14], [x24]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
- "tbz x14, #0, 58f\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[12], [x13]\n"
- "st1 { v20.b }[12], [x23]\n"
+ "st1 { v20.b }[12], [x24]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
- "tbz x14, #1, 53f\n"
+ "tbz x15, #1, 53f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[10], [x13]\n"
- "st1 { v20.b }[10], [x23]\n"
+ "st1 { v20.b }[10], [x24]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
- "tbz x14, #0, 58f\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[8], [x13]\n"
- "st1 { v20.b }[8], [x23]\n"
+ "st1 { v20.b }[8], [x24]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
- "tbz x14, #2, 56f\n"
+ "tbz x15, #2, 56f\n"
"str s16, [x13], #0x4\n"
- "str s20, [x23], #0x4\n"
- "tbz x14, #1, 55f\n"
+ "str s20, [x24], #0x4\n"
+ "tbz x15, #1, 55f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[6], [x13]\n"
- "st1 { v20.b }[6], [x23]\n"
+ "st1 { v20.b }[6], [x24]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
- "tbz x14, #0, 58f\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[4], [x13]\n"
- "st1 { v20.b }[4], [x23]\n"
+ "st1 { v20.b }[4], [x24]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
- "tbz x14, #1, 57f\n"
+ "tbz x15, #1, 57f\n"
"str h16, [x13], #0x2\n"
- "str h20, [x23], #0x2\n"
- "tbz x14, #0, 58f\n"
+ "str h20, [x24], #0x2\n"
+ "tbz x15, #0, 58f\n"
"st1 { v16.b }[2], [x13]\n"
- "st1 { v20.b }[2], [x23]\n"
+ "st1 { v20.b }[2], [x24]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
"str b16, [x13, #0x0]\n"
- "str b20, [x23, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
- "str q20, [x23, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
"60:" // Height 2: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 32b\n"
"b 122f\n"
"61:" // Height 3
- "mov x15, %x[col_bias]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"movi v13.4s, #0x0\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
"movi v15.16b, #0x1\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -850,105 +850,105 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"movi v26.4s, #0x0\n"
"movi v27.4s, #0x0\n"
"63:" // Height 3: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "ldr x28, [x20, #0x8]\n"
- "ldr x27, [x20, #0x10]\n"
- "cbnz x11, 66f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x9, [x20, #0x8]\n"
+ "ldr x28, [x20, #0x10]\n"
+ "cbnz x12, 66f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x20\n"
"add x9, x9, x20\n"
"add x28, x28, x20\n"
- "add x27, x27, x20\n"
"b 66f\n"
"65:" // Height 3: setup direct input
- "mov x9, %x[input_ptr]\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x9, x10, x21\n"
"add x28, x9, x21\n"
- "add x27, x28, x21\n"
"66:" // Height 3: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 71f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q1, [x28, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q1, [x9, #0x0]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 69f\n"
"67:" // Height 3: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x20, [x12, #0x78]\n"
+ "ldr x20, [x14, #0x78]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x23, [x12, #0x88]\n"
+ "ldr x23, [x14, #0x88]\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr d29, [x12, #0x70]\n"
+ "ldr d29, [x14, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v29.d[1], x20\n"
+ "ldr x22, [x14, #0x98]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x22, [x12, #0x98]\n"
+ "ldr x21, [x14, #0xa8]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr d28, [x12, #0x80]\n"
+ "ldr d28, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x12, #0xa8]\n"
+ "mov v29.d[1], x20\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x12, #0xb8]\n"
+ "ldr x20, [x14, #0xb8]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr d5, [x12, #0x90]\n"
+ "ldr d5, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
"mov v28.d[1], x23\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "mov v5.d[1], x22\n"
+ "ldr x23, [x14, #0xc8]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr d4, [x12, #0xa0]\n"
+ "ldr d4, [x14, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v4.d[1], x21\n"
+ "mov v5.d[1], x22\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x22, [x14, #0xd8]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr d3, [x12, #0xb0]\n"
+ "ldr d3, [x14, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v3.d[1], x20\n"
+ "mov v4.d[1], x21\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
- "ldr x22, [x12, #0xd8]\n"
+ "ldr x21, [x14, #0xe8]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr d31, [x12, #0xc0]\n"
+ "ldr d31, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "ldr x21, [x12, #0xe8]\n"
+ "mov v3.d[1], x20\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
- "ldr x20, [x12, #0xf8]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr d30, [x12, #0xd0]\n"
+ "ldr d30, [x14, #0xd0]\n"
".inst 0x6fa0e3b3 // udot v19.4s, v29.16b, v0.4b[1]\n"
"mov v31.d[1], x23\n"
".inst 0x6fa1e3b7 // udot v23.4s, v29.16b, v1.4b[1]\n"
- "mov v30.d[1], x22\n"
+ "add x10, x10, #0x10\n"
".inst 0x6fa2e3bb // udot v27.4s, v29.16b, v2.4b[1]\n"
- "ldr d29, [x12, #0xe0]\n"
+ "ldr d29, [x14, #0xe0]\n"
".inst 0x6f80eb90 // udot v16.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
+ "mov v30.d[1], x22\n"
".inst 0x6f81eb94 // udot v20.4s, v28.16b, v1.4b[2]\n"
"add x9, x9, #0x10\n"
".inst 0x6f82eb98 // udot v24.4s, v28.16b, v2.4b[2]\n"
- "ldr d28, [x12, #0xf0]\n"
+ "ldr d28, [x14, #0xf0]\n"
".inst 0x6f80e8b1 // udot v17.4s, v5.16b, v0.4b[2]\n"
- "mov v28.d[1], x20\n"
+ "mov v29.d[1], x21\n"
".inst 0x6f81e8b5 // udot v21.4s, v5.16b, v1.4b[2]\n"
"add x28, x28, #0x10\n"
".inst 0x6f82e8b9 // udot v25.4s, v5.16b, v2.4b[2]\n"
- "add x27, x27, #0x10\n"
+ "mov v28.d[1], x20\n"
".inst 0x6f80e892 // udot v18.4s, v4.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x6f81e896 // udot v22.4s, v4.16b, v1.4b[2]\n"
".inst 0x6f82e89a // udot v26.4s, v4.16b, v2.4b[2]\n"
".inst 0x6f80e873 // udot v19.4s, v3.16b, v0.4b[2]\n"
@@ -971,65 +971,65 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"68:" // Height 3: Multiply loop: unique 9: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q1, [x28, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q1, [x9, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"bge 67b\n"
"69:" // Height 3: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr q29, [x12, #0x70]\n"
+ "ldr q29, [x14, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr q28, [x12, #0x80]\n"
+ "ldr q28, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr q5, [x12, #0x90]\n"
+ "ldr q5, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr q4, [x12, #0xa0]\n"
+ "ldr q4, [x14, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "ldr q3, [x12, #0xb0]\n"
+ "ldr q3, [x14, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "ldr q31, [x12, #0xc0]\n"
+ "ldr q31, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
- "ldr q30, [x12, #0xd0]\n"
+ "ldr q30, [x14, #0xd0]\n"
".inst 0x6fa0e3b3 // udot v19.4s, v29.16b, v0.4b[1]\n"
".inst 0x6fa1e3b7 // udot v23.4s, v29.16b, v1.4b[1]\n"
".inst 0x6fa2e3bb // udot v27.4s, v29.16b, v2.4b[1]\n"
- "ldr q29, [x12, #0xe0]\n"
+ "ldr q29, [x14, #0xe0]\n"
".inst 0x6f80eb90 // udot v16.4s, v28.16b, v0.4b[2]\n"
".inst 0x6f81eb94 // udot v20.4s, v28.16b, v1.4b[2]\n"
".inst 0x6f82eb98 // udot v24.4s, v28.16b, v2.4b[2]\n"
- "ldr q28, [x12, #0xf0]\n"
+ "ldr q28, [x14, #0xf0]\n"
".inst 0x6f80e8b1 // udot v17.4s, v5.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x6f81e8b5 // udot v21.4s, v5.16b, v1.4b[2]\n"
".inst 0x6f82e8b9 // udot v25.4s, v5.16b, v2.4b[2]\n"
".inst 0x6f80e892 // udot v18.4s, v4.16b, v0.4b[2]\n"
@@ -1055,32 +1055,32 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"70:" // Height 3: Multiply loop: unique 10: skip row sum
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
- "prfm pldl1keep, [x27, #0x80]\n"
"71:" // Height 3: Multiply loop: Main loop skip
- "cbz x10, 78f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 78f\n"
+ "cmp x11, #0x4\n"
"blt 74f\n"
"72:" // Height 3: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
- "ldr s1, [x28], #0x4\n"
- "ldr s2, [x27], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "ldr s1, [x9], #0x4\n"
+ "ldr s2, [x28], #0x4\n"
"tbnz %x[flags], #31, 73f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"73:" // Height 3: Multiply loop: unique 11: skip row sum
- "ldr q31, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q30, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q29, [x12, #0x20]\n"
+ "ldr q31, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q30, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q29, [x14, #0x20]\n"
+ "ldr q28, [x14, #0x30]\n"
".inst 0x6f80e3f0 // udot v16.4s, v31.16b, v0.4b[0]\n"
- "ldr q28, [x12, #0x30]\n"
".inst 0x6f81e3f4 // udot v20.4s, v31.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x6f82e3f8 // udot v24.4s, v31.16b, v2.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x6f80e3d1 // udot v17.4s, v30.16b, v0.4b[0]\n"
".inst 0x6f81e3d5 // udot v21.4s, v30.16b, v1.4b[0]\n"
".inst 0x6f82e3d9 // udot v25.4s, v30.16b, v2.4b[0]\n"
@@ -1092,36 +1092,36 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f82e39b // udot v27.4s, v28.16b, v2.4b[0]\n"
"bge 72b\n"
"74:" // Height 3: Multiply loop: Skip odd blocks
- "cbz x10, 78f\n"
- "tbz x10, #1, 75f\n"
- "ldr h0, [x9], #0x2\n"
- "ldr h1, [x28], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "tbz x10, #0, 76f\n"
- "ld1 { v0.b }[2], [x9]\n"
- "ld1 { v1.b }[2], [x28]\n"
- "ld1 { v2.b }[2], [x27]\n"
+ "cbz x11, 78f\n"
+ "tbz x11, #1, 75f\n"
+ "ldr h0, [x10], #0x2\n"
+ "ldr h1, [x9], #0x2\n"
+ "ldr h2, [x28], #0x2\n"
+ "tbz x11, #0, 76f\n"
+ "ld1 { v0.b }[2], [x10]\n"
+ "ld1 { v1.b }[2], [x9]\n"
+ "ld1 { v2.b }[2], [x28]\n"
"b 76f\n"
"75:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
- "ldr b1, [x28, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
+ "ldr b1, [x9, #0x0]\n"
+ "ldr b2, [x28, #0x0]\n"
"76:" // Height 3: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 77f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 12: skip row sum
- "ldr q28, [x12, #0x0]\n"
- ".inst 0x6f80e390 // udot v16.4s, v28.16b, v0.4b[0]\n"
- "ldr q30, [x12, #0x10]\n"
- ".inst 0x6f81e394 // udot v20.4s, v28.16b, v1.4b[0]\n"
- "ldr q29, [x12, #0x20]\n"
- ".inst 0x6f82e398 // udot v24.4s, v28.16b, v2.4b[0]\n"
- "ldr q28, [x12, #0x30]\n"
+ "ldr q31, [x14, #0x0]\n"
+ "ldr q30, [x14, #0x10]\n"
+ "ldr q29, [x14, #0x20]\n"
+ "ldr q28, [x14, #0x30]\n"
+ ".inst 0x6f80e3f0 // udot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x6f81e3f4 // udot v20.4s, v31.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
+ ".inst 0x6f82e3f8 // udot v24.4s, v31.16b, v2.4b[0]\n"
".inst 0x6f80e3d1 // udot v17.4s, v30.16b, v0.4b[0]\n"
".inst 0x6f81e3d5 // udot v21.4s, v30.16b, v1.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x6f82e3d9 // udot v25.4s, v30.16b, v2.4b[0]\n"
".inst 0x6f80e3b2 // udot v18.4s, v29.16b, v0.4b[0]\n"
".inst 0x6f81e3b6 // udot v22.4s, v29.16b, v1.4b[0]\n"
@@ -1131,15 +1131,15 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f82e39b // udot v27.4s, v28.16b, v2.4b[0]\n"
"78:" // Height 3: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 64b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20\n"
- "add x22, x23, x20\n"
"prfm pstl1keep, [x13, #0x0]\n"
+ "add x24, x13, x20\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -1154,13 +1154,13 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"mul v12.4s, v12.4s, v28.4s\n"
"mul v13.4s, v13.4s, v28.4s\n"
"79:" // Height 3: skip row sum fixup
- "ldr q31, [x15, #0x0]\n"
+ "ldr q31, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q30, [x15, #0x10]\n"
+ "ldr q30, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q29, [x15, #0x20]\n"
+ "ldr q29, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q28, [x15, #0x30]\n"
+ "ldr q28, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1183,10 +1183,11 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"add v26.4s, v26.4s, v29.4s\n"
"add v27.4s, v27.4s, v28.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v28.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v28.4s }, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
+ "add x16, x16, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v28.4s\n"
"sqrdmulh v17.4s, v17.4s, v28.4s\n"
"sqrdmulh v18.4s, v18.4s, v28.4s\n"
@@ -1199,39 +1200,38 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"sqrdmulh v25.4s, v25.4s, v28.4s\n"
"sqrdmulh v26.4s, v26.4s, v28.4s\n"
"sqrdmulh v27.4s, v27.4s, v28.4s\n"
- "add x15, x15, #0x40\n"
"tbz %x[flags], #5, 80f\n"
"and v1.16b, v16.16b, v0.16b\n"
"and v31.16b, v17.16b, v0.16b\n"
"and v30.16b, v18.16b, v0.16b\n"
"and v29.16b, v19.16b, v0.16b\n"
"and v28.16b, v20.16b, v0.16b\n"
+ "and v3.16b, v21.16b, v0.16b\n"
+ "and v2.16b, v22.16b, v0.16b\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v31.4s, v31.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v1.4s\n"
"sqadd v17.4s, v17.4s, v31.4s\n"
"sqadd v18.4s, v18.4s, v30.4s\n"
"sqadd v19.4s, v19.4s, v29.4s\n"
"sqadd v20.4s, v20.4s, v28.4s\n"
- "and v3.16b, v21.16b, v0.16b\n"
- "and v2.16b, v22.16b, v0.16b\n"
"and v1.16b, v23.16b, v0.16b\n"
"and v31.16b, v24.16b, v0.16b\n"
"and v30.16b, v25.16b, v0.16b\n"
"and v29.16b, v26.16b, v0.16b\n"
"and v28.16b, v27.16b, v0.16b\n"
- "sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
+ "sqadd v21.4s, v21.4s, v3.4s\n"
+ "sqadd v22.4s, v22.4s, v2.4s\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v31.4s, v31.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
- "sqadd v21.4s, v21.4s, v3.4s\n"
- "sqadd v22.4s, v22.4s, v2.4s\n"
"sqadd v23.4s, v23.4s, v1.4s\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"sqadd v25.4s, v25.4s, v30.4s\n"
@@ -1251,35 +1251,36 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v26.4s, v26.4s, v0.4s\n"
"srshl v27.4s, v27.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v28.4s }, [x20]\n"
- "add v16.4s, v16.4s, v28.4s\n"
- "add v17.4s, v17.4s, v28.4s\n"
- "add v18.4s, v18.4s, v28.4s\n"
- "add v19.4s, v19.4s, v28.4s\n"
- "add v20.4s, v20.4s, v28.4s\n"
- "add v21.4s, v21.4s, v28.4s\n"
- "add v22.4s, v22.4s, v28.4s\n"
- "add v23.4s, v23.4s, v28.4s\n"
- "add v24.4s, v24.4s, v28.4s\n"
- "add v25.4s, v25.4s, v28.4s\n"
- "add v26.4s, v26.4s, v28.4s\n"
- "add v27.4s, v27.4s, v28.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v28.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v28.4s\n"
- "smin v17.4s, v17.4s, v28.4s\n"
- "smin v18.4s, v18.4s, v28.4s\n"
- "smin v19.4s, v19.4s, v28.4s\n"
- "smin v20.4s, v20.4s, v28.4s\n"
- "smin v21.4s, v21.4s, v28.4s\n"
- "smin v22.4s, v22.4s, v28.4s\n"
- "smin v23.4s, v23.4s, v28.4s\n"
- "smin v24.4s, v24.4s, v28.4s\n"
- "smin v25.4s, v25.4s, v28.4s\n"
- "smin v26.4s, v26.4s, v28.4s\n"
- "smin v27.4s, v27.4s, v28.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v30.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v29.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v28.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v30.4s\n"
+ "add v17.4s, v17.4s, v30.4s\n"
+ "add v18.4s, v18.4s, v30.4s\n"
+ "add v19.4s, v19.4s, v30.4s\n"
+ "add v20.4s, v20.4s, v30.4s\n"
+ "add v21.4s, v21.4s, v30.4s\n"
+ "add v22.4s, v22.4s, v30.4s\n"
+ "add v23.4s, v23.4s, v30.4s\n"
+ "add v24.4s, v24.4s, v30.4s\n"
+ "add v25.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v30.4s\n"
+ "smin v16.4s, v16.4s, v29.4s\n"
+ "smin v17.4s, v17.4s, v29.4s\n"
+ "smin v18.4s, v18.4s, v29.4s\n"
+ "smin v19.4s, v19.4s, v29.4s\n"
+ "smin v20.4s, v20.4s, v29.4s\n"
+ "smin v21.4s, v21.4s, v29.4s\n"
+ "smin v22.4s, v22.4s, v29.4s\n"
+ "smin v23.4s, v23.4s, v29.4s\n"
+ "smin v24.4s, v24.4s, v29.4s\n"
+ "smin v25.4s, v25.4s, v29.4s\n"
+ "smin v26.4s, v26.4s, v29.4s\n"
+ "smin v27.4s, v27.4s, v29.4s\n"
"smax v16.4s, v16.4s, v28.4s\n"
"smax v17.4s, v17.4s, v28.4s\n"
"smax v18.4s, v18.4s, v28.4s\n"
@@ -1298,109 +1299,109 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"uzp1 v18.8h, v22.8h, v23.8h\n"
"uzp1 v24.8h, v24.8h, v25.8h\n"
"uzp1 v17.8h, v26.8h, v27.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v19.16b\n"
"uzp1 v20.16b, v20.16b, v18.16b\n"
"uzp1 v24.16b, v24.16b, v17.16b\n"
"bge 89f\n"
- "tbz x14, #3, 84f\n"
+ "tbz x15, #3, 84f\n"
"str d16, [x13], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "tbz x14, #2, 82f\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "tbz x15, #2, 82f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "tbz x14, #1, 81f\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "tbz x15, #1, 81f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[14], [x13]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
- "tbz x14, #0, 88f\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[12], [x13]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
- "tbz x14, #1, 83f\n"
+ "tbz x15, #1, 83f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[10], [x13]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
- "tbz x14, #0, 88f\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[8], [x13]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
- "tbz x14, #2, 86f\n"
+ "tbz x15, #2, 86f\n"
"str s16, [x13], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "tbz x14, #1, 85f\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "tbz x15, #1, 85f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[6], [x13]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
- "tbz x14, #0, 88f\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[4], [x13]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
- "tbz x14, #1, 87f\n"
+ "tbz x15, #1, 87f\n"
"str h16, [x13], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "tbz x14, #0, 88f\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "tbz x15, #0, 88f\n"
"st1 { v16.b }[2], [x13]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
"str b16, [x13, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"90:" // Height 3: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0x4\n"
- "mov x15, %x[col_bias]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x16, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
- "movi v12.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
+ "movi v12.4s, #0x0\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_N]]\n"
+ "madd x20, x21, x20, x13\n"
"movi v13.4s, #0x0\n"
- "ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"movi v14.4s, #0x0\n"
- "ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_B_ptr]]\n"
"movi v15.16b, #0x1\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1419,118 +1420,118 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"movi v30.4s, #0x0\n"
"movi v31.4s, #0x0\n"
"93:" // Height 4: setup done
- "mov x11, #0x0\n"
+ "mov x12, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w10, [x20, x11, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w11, [x20, x12, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
- "ldr x20, [%x[input_ptr], x11, LSL #0x3]\n"
+ "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
- "ldr x9, [x20, #0x0]\n"
- "ldr x28, [x20, #0x8]\n"
- "ldr x27, [x20, #0x10]\n"
- "ldr x26, [x20, #0x18]\n"
- "cbnz x11, 96f\n"
+ "ldr x10, [x20, #0x0]\n"
+ "ldr x9, [x20, #0x8]\n"
+ "ldr x28, [x20, #0x10]\n"
+ "ldr x27, [x20, #0x18]\n"
+ "cbnz x12, 96f\n"
"ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x10, x10, x20\n"
"add x9, x9, x20\n"
"add x28, x28, x20\n"
"add x27, x27, x20\n"
- "add x26, x26, x20\n"
"b 96f\n"
"95:" // Height 4: setup direct input
- "mov x9, %x[input_ptr]\n"
+ "mov x10, %x[input_ptr]\n"
+ "add x9, x10, x21\n"
"add x28, x9, x21\n"
"add x27, x28, x21\n"
- "add x26, x27, x21\n"
"96:" // Height 4: input setup done
- "cmp x10, #0x10\n"
+ "cmp x11, #0x10\n"
"blt 101f\n"
- "ldr q0, [x9, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q1, [x28, #0x0]\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x26, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q1, [x9, #0x0]\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q3, [x27, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
"blt 99f\n"
"97:" // Height 4: Multiply loop: Main loop head
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "ldr x22, [x12, #0x78]\n"
+ "ldr x21, [x14, #0x78]\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "ldr x21, [x12, #0x88]\n"
+ "ldr x20, [x14, #0x88]\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "ldr x20, [x12, #0x98]\n"
+ "ldr x26, [x14, #0x98]\n"
".inst 0x6f83e09c // udot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr d4, [x12, #0x70]\n"
+ "ldr d4, [x14, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "mov v4.d[1], x22\n"
+ "ldr x25, [x14, #0xa8]\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "ldr x25, [x12, #0xa8]\n"
+ "ldr x24, [x14, #0xb8]\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
- "ldr x24, [x12, #0xb8]\n"
+ "mov v4.d[1], x21\n"
".inst 0x6f83e0bd // udot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr d5, [x12, #0x80]\n"
+ "ldr d5, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
- "mov v5.d[1], x21\n"
+ "ldr x23, [x14, #0xc8]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
- "ldr x23, [x12, #0xc8]\n"
+ "ldr x22, [x14, #0xd8]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
- "ldr x22, [x12, #0xd8]\n"
+ "mov v5.d[1], x20\n"
".inst 0x6f83e0de // udot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr d6, [x12, #0x90]\n"
+ "ldr d6, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x20\n"
+ "ldr x21, [x14, #0xe8]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x12, #0xe8]\n"
+ "ldr x20, [x14, #0xf8]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
- "ldr x20, [x12, #0xf8]\n"
+ "mov v6.d[1], x26\n"
".inst 0x6f83e0ff // udot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr d7, [x12, #0xa0]\n"
+ "ldr d7, [x14, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
- "mov v7.d[1], x25\n"
+ "add x10, x10, #0x10\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
"add x9, x9, #0x10\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
- "add x28, x28, #0x10\n"
+ "mov v7.d[1], x25\n"
".inst 0x6fa3e11c // udot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr d8, [x12, #0xb0]\n"
+ "ldr d8, [x14, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
- "mov v8.d[1], x24\n"
+ "add x28, x28, #0x10\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
"add x27, x27, #0x10\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
- "add x26, x26, #0x10\n"
+ "mov v8.d[1], x24\n"
".inst 0x6fa3e13d // udot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr d9, [x12, #0xc0]\n"
+ "ldr d9, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
- "mov v9.d[1], x23\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
+ "mov v9.d[1], x23\n"
".inst 0x6fa3e15e // udot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr d10, [x12, #0xd0]\n"
+ "ldr d10, [x14, #0xd0]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
- "mov v10.d[1], x22\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
+ "mov v10.d[1], x22\n"
".inst 0x6fa3e09f // udot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr d4, [x12, #0xe0]\n"
+ "ldr d4, [x14, #0xe0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
- "mov v4.d[1], x21\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
+ "mov v4.d[1], x21\n"
".inst 0x6f83e8bc // udot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr d5, [x12, #0xf0]\n"
+ "ldr d5, [x14, #0xf0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "mov v5.d[1], x20\n"
+ "add x14, x14, #0x100\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
- "add x12, x12, #0x100\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
+ "mov v5.d[1], x20\n"
".inst 0x6f83e8dd // udot v29.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f80e8f2 // udot v18.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8f6 // udot v22.4s, v7.16b, v1.4b[2]\n"
@@ -1562,77 +1563,77 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"98:" // Height 4: Multiply loop: unique 13: skip row sum
- "ldr q0, [x9, #0x0]\n"
- "sub x10, x10, #0x10\n"
- "ldr q1, [x28, #0x0]\n"
- "cmp x10, #0x20\n"
- "ldr q2, [x27, #0x0]\n"
- "ldr q3, [x26, #0x0]\n"
- "ldr q4, [x12, #0x0]\n"
- "ldr q5, [x12, #0x10]\n"
- "ldr q6, [x12, #0x20]\n"
- "ldr q7, [x12, #0x30]\n"
- "ldr q8, [x12, #0x40]\n"
- "ldr q9, [x12, #0x50]\n"
- "ldr q10, [x12, #0x60]\n"
+ "ldr q0, [x10, #0x0]\n"
+ "sub x11, x11, #0x10\n"
+ "ldr q1, [x9, #0x0]\n"
+ "cmp x11, #0x20\n"
+ "ldr q2, [x28, #0x0]\n"
+ "ldr q3, [x27, #0x0]\n"
+ "ldr q4, [x14, #0x0]\n"
+ "ldr q5, [x14, #0x10]\n"
+ "ldr q6, [x14, #0x20]\n"
+ "ldr q7, [x14, #0x30]\n"
+ "ldr q8, [x14, #0x40]\n"
+ "ldr q9, [x14, #0x50]\n"
+ "ldr q10, [x14, #0x60]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
"prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 97b\n"
"99:" // Height 4: Multiply loop: Single iteration only
".inst 0x6f80e090 // udot v16.4s, v4.16b, v0.4b[0]\n"
- "sub x10, x10, #0x10\n"
+ "sub x11, x11, #0x10\n"
".inst 0x6f81e094 // udot v20.4s, v4.16b, v1.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "add x10, x10, #0x10\n"
".inst 0x6f82e098 // udot v24.4s, v4.16b, v2.4b[0]\n"
- "add x28, x28, #0x10\n"
+ "add x9, x9, #0x10\n"
".inst 0x6f83e09c // udot v28.4s, v4.16b, v3.4b[0]\n"
- "ldr q4, [x12, #0x70]\n"
+ "ldr q4, [x14, #0x70]\n"
".inst 0x6f80e0b1 // udot v17.4s, v5.16b, v0.4b[0]\n"
- "add x27, x27, #0x10\n"
+ "add x28, x28, #0x10\n"
".inst 0x6f81e0b5 // udot v21.4s, v5.16b, v1.4b[0]\n"
- "add x26, x26, #0x10\n"
+ "add x27, x27, #0x10\n"
".inst 0x6f82e0b9 // udot v25.4s, v5.16b, v2.4b[0]\n"
".inst 0x6f83e0bd // udot v29.4s, v5.16b, v3.4b[0]\n"
- "ldr q5, [x12, #0x80]\n"
+ "ldr q5, [x14, #0x80]\n"
".inst 0x6f80e0d2 // udot v18.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d6 // udot v22.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0da // udot v26.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0de // udot v30.4s, v6.16b, v3.4b[0]\n"
- "ldr q6, [x12, #0x90]\n"
+ "ldr q6, [x14, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f7 // udot v23.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0fb // udot v27.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0ff // udot v31.4s, v7.16b, v3.4b[0]\n"
- "ldr q7, [x12, #0xa0]\n"
+ "ldr q7, [x14, #0xa0]\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
".inst 0x6fa1e114 // udot v20.4s, v8.16b, v1.4b[1]\n"
".inst 0x6fa2e118 // udot v24.4s, v8.16b, v2.4b[1]\n"
".inst 0x6fa3e11c // udot v28.4s, v8.16b, v3.4b[1]\n"
- "ldr q8, [x12, #0xb0]\n"
+ "ldr q8, [x14, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
".inst 0x6fa1e135 // udot v21.4s, v9.16b, v1.4b[1]\n"
".inst 0x6fa2e139 // udot v25.4s, v9.16b, v2.4b[1]\n"
".inst 0x6fa3e13d // udot v29.4s, v9.16b, v3.4b[1]\n"
- "ldr q9, [x12, #0xc0]\n"
+ "ldr q9, [x14, #0xc0]\n"
".inst 0x6fa0e152 // udot v18.4s, v10.16b, v0.4b[1]\n"
".inst 0x6fa1e156 // udot v22.4s, v10.16b, v1.4b[1]\n"
".inst 0x6fa2e15a // udot v26.4s, v10.16b, v2.4b[1]\n"
".inst 0x6fa3e15e // udot v30.4s, v10.16b, v3.4b[1]\n"
- "ldr q10, [x12, #0xd0]\n"
+ "ldr q10, [x14, #0xd0]\n"
".inst 0x6fa0e093 // udot v19.4s, v4.16b, v0.4b[1]\n"
".inst 0x6fa1e097 // udot v23.4s, v4.16b, v1.4b[1]\n"
".inst 0x6fa2e09b // udot v27.4s, v4.16b, v2.4b[1]\n"
".inst 0x6fa3e09f // udot v31.4s, v4.16b, v3.4b[1]\n"
- "ldr q4, [x12, #0xe0]\n"
+ "ldr q4, [x14, #0xe0]\n"
".inst 0x6f80e8b0 // udot v16.4s, v5.16b, v0.4b[2]\n"
".inst 0x6f81e8b4 // udot v20.4s, v5.16b, v1.4b[2]\n"
".inst 0x6f82e8b8 // udot v24.4s, v5.16b, v2.4b[2]\n"
".inst 0x6f83e8bc // udot v28.4s, v5.16b, v3.4b[2]\n"
- "ldr q5, [x12, #0xf0]\n"
+ "ldr q5, [x14, #0xf0]\n"
".inst 0x6f80e8d1 // udot v17.4s, v6.16b, v0.4b[2]\n"
- "add x12, x12, #0x100\n"
+ "add x14, x14, #0x100\n"
".inst 0x6f81e8d5 // udot v21.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d9 // udot v25.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8dd // udot v29.4s, v6.16b, v3.4b[2]\n"
@@ -1666,35 +1667,35 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"100:" // Height 4: Multiply loop: unique 14: skip row sum
+ "prfm pldl1keep, [x10, #0x80]\n"
"prfm pldl1keep, [x9, #0x80]\n"
"prfm pldl1keep, [x28, #0x80]\n"
"prfm pldl1keep, [x27, #0x80]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"101:" // Height 4: Multiply loop: Main loop skip
- "cbz x10, 108f\n"
- "cmp x10, #0x4\n"
+ "cbz x11, 108f\n"
+ "cmp x11, #0x4\n"
"blt 104f\n"
"102:" // Height 4: Multiply loop: Odd block loop
- "ldr s0, [x9], #0x4\n"
- "ldr s1, [x28], #0x4\n"
- "ldr s2, [x27], #0x4\n"
- "ldr s3, [x26], #0x4\n"
+ "ldr s0, [x10], #0x4\n"
+ "ldr s1, [x9], #0x4\n"
+ "ldr s2, [x28], #0x4\n"
+ "ldr s3, [x27], #0x4\n"
"tbnz %x[flags], #31, 103f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"103:" // Height 4: Multiply loop: unique 15: skip row sum
- "ldr q7, [x12, #0x0]\n"
- "sub x10, x10, #0x4\n"
- "ldr q6, [x12, #0x10]\n"
- "cmp x10, #0x4\n"
- "ldr q5, [x12, #0x20]\n"
+ "ldr q7, [x14, #0x0]\n"
+ "sub x11, x11, #0x4\n"
+ "ldr q6, [x14, #0x10]\n"
+ "cmp x11, #0x4\n"
+ "ldr q5, [x14, #0x20]\n"
+ "ldr q4, [x14, #0x30]\n"
".inst 0x6f80e0f0 // udot v16.4s, v7.16b, v0.4b[0]\n"
- "ldr q4, [x12, #0x30]\n"
".inst 0x6f81e0f4 // udot v20.4s, v7.16b, v1.4b[0]\n"
+ "add x14, x14, #0x40\n"
".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x6f83e0fc // udot v28.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f80e0d1 // udot v17.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d5 // udot v21.4s, v6.16b, v1.4b[0]\n"
@@ -1710,23 +1711,23 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f83e09f // udot v31.4s, v4.16b, v3.4b[0]\n"
"bge 102b\n"
"104:" // Height 4: Multiply loop: Skip odd blocks
- "cbz x10, 108f\n"
- "tbz x10, #1, 105f\n"
- "ldr h0, [x9], #0x2\n"
- "ldr h1, [x28], #0x2\n"
- "ldr h2, [x27], #0x2\n"
- "ldr h3, [x26], #0x2\n"
- "tbz x10, #0, 106f\n"
- "ld1 { v0.b }[2], [x9]\n"
- "ld1 { v1.b }[2], [x28]\n"
- "ld1 { v2.b }[2], [x27]\n"
- "ld1 { v3.b }[2], [x26]\n"
+ "cbz x11, 108f\n"
+ "tbz x11, #1, 105f\n"
+ "ldr h0, [x10], #0x2\n"
+ "ldr h1, [x9], #0x2\n"
+ "ldr h2, [x28], #0x2\n"
+ "ldr h3, [x27], #0x2\n"
+ "tbz x11, #0, 106f\n"
+ "ld1 { v0.b }[2], [x10]\n"
+ "ld1 { v1.b }[2], [x9]\n"
+ "ld1 { v2.b }[2], [x28]\n"
+ "ld1 { v3.b }[2], [x27]\n"
"b 106f\n"
"105:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
- "ldr b0, [x9, #0x0]\n"
- "ldr b1, [x28, #0x0]\n"
- "ldr b2, [x27, #0x0]\n"
- "ldr b3, [x26, #0x0]\n"
+ "ldr b0, [x10, #0x0]\n"
+ "ldr b1, [x9, #0x0]\n"
+ "ldr b2, [x28, #0x0]\n"
+ "ldr b3, [x27, #0x0]\n"
"106:" // Height 4: Multiply loop: Ragged operand read: Done
"tbnz %x[flags], #31, 107f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
@@ -1734,16 +1735,16 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
"107:" // Height 4: Multiply loop: unique 16: skip row sum
- "ldr q7, [x12, #0x0]\n"
+ "ldr q7, [x14, #0x0]\n"
+ "ldr q6, [x14, #0x10]\n"
+ "ldr q5, [x14, #0x20]\n"
+ "ldr q4, [x14, #0x30]\n"
".inst 0x6f80e0f0 // udot v16.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x12, #0x10]\n"
".inst 0x6f81e0f4 // udot v20.4s, v7.16b, v1.4b[0]\n"
- "ldr q5, [x12, #0x20]\n"
+ "add x14, x14, #0x40\n"
".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
- "ldr q4, [x12, #0x30]\n"
".inst 0x6f83e0fc // udot v28.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f80e0d1 // udot v17.4s, v6.16b, v0.4b[0]\n"
- "add x12, x12, #0x40\n"
".inst 0x6f81e0d5 // udot v21.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d9 // udot v25.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0dd // udot v29.4s, v6.16b, v3.4b[0]\n"
@@ -1757,17 +1758,17 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
".inst 0x6f83e09f // udot v31.4s, v4.16b, v3.4b[0]\n"
"108:" // Height 4: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
- "add x11, x11, #0x1\n"
- "cmp x11, x20\n"
+ "add x12, x12, #0x1\n"
+ "cmp x12, x20\n"
"bne 94b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
"prfm pstl1keep, [x13, #0x0]\n"
+ "add x24, x13, x20\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -1775,9 +1776,9 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"addp v14.4s, v14.4s, v14.4s\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1r { v0.4s }, [x20]\n"
- "neg v0.4s, v0.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "neg v0.4s, v0.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
"mul v11.4s, v11.4s, v0.4s\n"
@@ -1785,13 +1786,13 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"mul v13.4s, v13.4s, v0.4s\n"
"mul v14.4s, v14.4s, v0.4s\n"
"109:" // Height 4: skip row sum fixup
- "ldr q3, [x15, #0x0]\n"
+ "ldr q3, [x16, #0x0]\n"
"add v16.4s, v16.4s, v11.4s\n"
- "ldr q2, [x15, #0x10]\n"
+ "ldr q2, [x16, #0x10]\n"
"add v17.4s, v17.4s, v11.4s\n"
- "ldr q1, [x15, #0x20]\n"
+ "ldr q1, [x16, #0x20]\n"
"add v18.4s, v18.4s, v11.4s\n"
- "ldr q0, [x15, #0x30]\n"
+ "ldr q0, [x16, #0x30]\n"
"add v19.4s, v19.4s, v11.4s\n"
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
@@ -1822,10 +1823,11 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"add v30.4s, v30.4s, v1.4s\n"
"add v31.4s, v31.4s, v0.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v1.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v1.4s }, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"ld1r { v0.4s }, [x20]\n"
+ "add x16, x16, #0x40\n"
"sqrdmulh v16.4s, v16.4s, v1.4s\n"
"sqrdmulh v17.4s, v17.4s, v1.4s\n"
"sqrdmulh v18.4s, v18.4s, v1.4s\n"
@@ -1842,52 +1844,51 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
"sqrdmulh v30.4s, v30.4s, v1.4s\n"
"sqrdmulh v31.4s, v31.4s, v1.4s\n"
- "add x15, x15, #0x40\n"
"tbz %x[flags], #5, 110f\n"
"and v2.16b, v16.16b, v0.16b\n"
"and v1.16b, v17.16b, v0.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v2.4s\n"
- "sqadd v17.4s, v17.4s, v1.4s\n"
"and v7.16b, v18.16b, v0.16b\n"
"and v6.16b, v19.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v4.16b, v21.16b, v0.16b\n"
"and v3.16b, v22.16b, v0.16b\n"
- "and v2.16b, v23.16b, v0.16b\n"
- "and v1.16b, v24.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v2.4s\n"
+ "sqadd v17.4s, v17.4s, v1.4s\n"
+ "and v2.16b, v23.16b, v0.16b\n"
+ "and v1.16b, v24.16b, v0.16b\n"
"sqadd v18.4s, v18.4s, v7.4s\n"
"sqadd v19.4s, v19.4s, v6.4s\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v4.4s\n"
"sqadd v22.4s, v22.4s, v3.4s\n"
- "sqadd v23.4s, v23.4s, v2.4s\n"
- "sqadd v24.4s, v24.4s, v1.4s\n"
"and v7.16b, v25.16b, v0.16b\n"
"and v6.16b, v26.16b, v0.16b\n"
"and v5.16b, v27.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"and v4.16b, v28.16b, v0.16b\n"
"and v3.16b, v29.16b, v0.16b\n"
- "and v2.16b, v30.16b, v0.16b\n"
- "and v1.16b, v31.16b, v0.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v2.4s\n"
+ "sqadd v24.4s, v24.4s, v1.4s\n"
+ "and v2.16b, v30.16b, v0.16b\n"
+ "and v1.16b, v31.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v25.4s, v25.4s, v7.4s\n"
"sqadd v26.4s, v26.4s, v6.4s\n"
"sqadd v27.4s, v27.4s, v5.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v28.4s, v28.4s, v4.4s\n"
"sqadd v29.4s, v29.4s, v3.4s\n"
"sqadd v30.4s, v30.4s, v2.4s\n"
@@ -1910,43 +1911,44 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"srshl v30.4s, v30.4s, v0.4s\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"add x20, %x[qp], %[c_offset]\n"
- "ld1r { v0.4s }, [x20]\n"
- "add v16.4s, v16.4s, v0.4s\n"
- "add v17.4s, v17.4s, v0.4s\n"
- "add v18.4s, v18.4s, v0.4s\n"
- "add v19.4s, v19.4s, v0.4s\n"
- "add v20.4s, v20.4s, v0.4s\n"
- "add v21.4s, v21.4s, v0.4s\n"
- "add v22.4s, v22.4s, v0.4s\n"
- "add v23.4s, v23.4s, v0.4s\n"
- "add v24.4s, v24.4s, v0.4s\n"
- "add v25.4s, v25.4s, v0.4s\n"
- "add v26.4s, v26.4s, v0.4s\n"
- "add v27.4s, v27.4s, v0.4s\n"
- "add v28.4s, v28.4s, v0.4s\n"
- "add v29.4s, v29.4s, v0.4s\n"
- "add v30.4s, v30.4s, v0.4s\n"
- "add v31.4s, v31.4s, v0.4s\n"
- "add x20, %x[qp], %[maxval]\n"
- "ld1r { v0.4s }, [x20]\n"
- "smin v16.4s, v16.4s, v0.4s\n"
- "smin v17.4s, v17.4s, v0.4s\n"
- "smin v18.4s, v18.4s, v0.4s\n"
- "smin v19.4s, v19.4s, v0.4s\n"
- "smin v20.4s, v20.4s, v0.4s\n"
- "smin v21.4s, v21.4s, v0.4s\n"
- "smin v22.4s, v22.4s, v0.4s\n"
- "smin v23.4s, v23.4s, v0.4s\n"
- "smin v24.4s, v24.4s, v0.4s\n"
- "smin v25.4s, v25.4s, v0.4s\n"
- "smin v26.4s, v26.4s, v0.4s\n"
- "smin v27.4s, v27.4s, v0.4s\n"
- "smin v28.4s, v28.4s, v0.4s\n"
- "smin v29.4s, v29.4s, v0.4s\n"
- "smin v30.4s, v30.4s, v0.4s\n"
- "smin v31.4s, v31.4s, v0.4s\n"
+ "add x21, %x[qp], %[maxval]\n"
+ "ld1r { v2.4s }, [x20]\n"
"add x20, %x[qp], %[minval]\n"
+ "ld1r { v1.4s }, [x21]\n"
+ "cmp x15, #0x10\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v2.4s\n"
+ "add v17.4s, v17.4s, v2.4s\n"
+ "add v18.4s, v18.4s, v2.4s\n"
+ "add v19.4s, v19.4s, v2.4s\n"
+ "add v20.4s, v20.4s, v2.4s\n"
+ "add v21.4s, v21.4s, v2.4s\n"
+ "add v22.4s, v22.4s, v2.4s\n"
+ "add v23.4s, v23.4s, v2.4s\n"
+ "add v24.4s, v24.4s, v2.4s\n"
+ "add v25.4s, v25.4s, v2.4s\n"
+ "add v26.4s, v26.4s, v2.4s\n"
+ "add v27.4s, v27.4s, v2.4s\n"
+ "add v28.4s, v28.4s, v2.4s\n"
+ "add v29.4s, v29.4s, v2.4s\n"
+ "add v30.4s, v30.4s, v2.4s\n"
+ "add v31.4s, v31.4s, v2.4s\n"
+ "smin v16.4s, v16.4s, v1.4s\n"
+ "smin v17.4s, v17.4s, v1.4s\n"
+ "smin v18.4s, v18.4s, v1.4s\n"
+ "smin v19.4s, v19.4s, v1.4s\n"
+ "smin v20.4s, v20.4s, v1.4s\n"
+ "smin v21.4s, v21.4s, v1.4s\n"
+ "smin v22.4s, v22.4s, v1.4s\n"
+ "smin v23.4s, v23.4s, v1.4s\n"
+ "smin v24.4s, v24.4s, v1.4s\n"
+ "smin v25.4s, v25.4s, v1.4s\n"
+ "smin v26.4s, v26.4s, v1.4s\n"
+ "smin v27.4s, v27.4s, v1.4s\n"
+ "smin v28.4s, v28.4s, v1.4s\n"
+ "smin v29.4s, v29.4s, v1.4s\n"
+ "smin v30.4s, v30.4s, v1.4s\n"
+ "smin v31.4s, v31.4s, v1.4s\n"
"smax v16.4s, v16.4s, v0.4s\n"
"smax v17.4s, v17.4s, v0.4s\n"
"smax v18.4s, v18.4s, v0.4s\n"
@@ -1971,110 +1973,109 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"uzp1 v18.8h, v26.8h, v27.8h\n"
"uzp1 v28.8h, v28.8h, v29.8h\n"
"uzp1 v17.8h, v30.8h, v31.8h\n"
- "cmp x14, #0x10\n"
"uzp1 v16.16b, v16.16b, v0.16b\n"
"uzp1 v20.16b, v20.16b, v19.16b\n"
"uzp1 v24.16b, v24.16b, v18.16b\n"
"uzp1 v28.16b, v28.16b, v17.16b\n"
"bge 119f\n"
- "tbz x14, #3, 114f\n"
+ "tbz x15, #3, 114f\n"
"str d16, [x13], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
- "tbz x14, #2, 112f\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
+ "tbz x15, #2, 112f\n"
"st1 { v16.s }[2], [x13], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
- "tbz x14, #1, 111f\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
+ "tbz x15, #1, 111f\n"
"st1 { v16.h }[6], [x13], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "st1 { v28.h }[6], [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "st1 { v28.h }[6], [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[14], [x13]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
- "st1 { v28.b }[14], [x21]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "st1 { v28.b }[14], [x22]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
- "tbz x14, #0, 118f\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[12], [x13]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
- "st1 { v28.b }[12], [x21]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "st1 { v28.b }[12], [x22]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
- "tbz x14, #1, 113f\n"
+ "tbz x15, #1, 113f\n"
"st1 { v16.h }[4], [x13], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "st1 { v28.h }[4], [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "st1 { v28.h }[4], [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[10], [x13]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
- "st1 { v28.b }[10], [x21]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "st1 { v28.b }[10], [x22]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
- "tbz x14, #0, 118f\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[8], [x13]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
- "st1 { v28.b }[8], [x21]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "st1 { v28.b }[8], [x22]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
- "tbz x14, #2, 116f\n"
+ "tbz x15, #2, 116f\n"
"str s16, [x13], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
- "tbz x14, #1, 115f\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
+ "tbz x15, #1, 115f\n"
"st1 { v16.h }[2], [x13], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "st1 { v28.h }[2], [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "st1 { v28.h }[2], [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[6], [x13]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
- "st1 { v28.b }[6], [x21]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "st1 { v28.b }[6], [x22]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
- "tbz x14, #0, 118f\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[4], [x13]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
- "st1 { v28.b }[4], [x21]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "st1 { v28.b }[4], [x22]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
- "tbz x14, #1, 117f\n"
+ "tbz x15, #1, 117f\n"
"str h16, [x13], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "str h28, [x21], #0x2\n"
- "tbz x14, #0, 118f\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "str h28, [x22], #0x2\n"
+ "tbz x15, #0, 118f\n"
"st1 { v16.b }[2], [x13]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
- "st1 { v28.b }[2], [x21]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "st1 { v28.b }[2], [x22]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
"str b16, [x13, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
- "str b28, [x21, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "str b28, [x22, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
"str q16, [x13, #0x0]\n"
"add x13, x13, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
- "str q28, [x21, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q28, [x22, #0x0]\n"
"120:" // Height 4: Writeback done
- "subs x14, x14, #0x10\n"
+ "subs x15, x15, #0x10\n"
"bgt 92b\n"
"subs %x[M], %x[M], #0x4\n"
"beq 122f\n"
@@ -2088,9 +2089,9 @@ void a64_hybrid_u8qa_dot_4x16_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
index ebe583b5d4..23315f3c0c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void a64_hybrid_u8qa_dot_4x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -90,7 +90,7 @@ void a64_hybrid_u8qa_dot_4x16 (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -100,8 +100,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -134,6 +134,7 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q26, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q25, [x28, #0xa0]\n"
+ "add x24, x24, #0x10\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q24, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
@@ -144,11 +145,10 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q21, [x28, #0xe0]\n"
".inst 0x6f80ea90 // udot v16.4s, v20.16b, v0.4b[2]\n"
"ldr q20, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x6f80eb51 // udot v17.4s, v26.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
".inst 0x6f80eb32 // udot v18.4s, v25.16b, v0.4b[2]\n"
".inst 0x6f80eb13 // udot v19.4s, v24.16b, v0.4b[2]\n"
- "add x28, x28, #0x100\n"
".inst 0x6fa0eaf0 // udot v16.4s, v23.16b, v0.4b[3]\n"
".inst 0x6fa0ead1 // udot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x6fa0eab2 // udot v18.4s, v21.16b, v0.4b[3]\n"
@@ -159,9 +159,9 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
+ "cmp x25, #0x20\n"
"ldr q7, [x28, #0x30]\n"
"ldr q8, [x28, #0x40]\n"
"ldr q9, [x28, #0x50]\n"
@@ -177,6 +177,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q26, [x28, #0x90]\n"
".inst 0x6f80e0f3 // udot v19.4s, v7.16b, v0.4b[0]\n"
"ldr q25, [x28, #0xa0]\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
".inst 0x6fa0e110 // udot v16.4s, v8.16b, v0.4b[1]\n"
"ldr q24, [x28, #0xb0]\n"
".inst 0x6fa0e131 // udot v17.4s, v9.16b, v0.4b[1]\n"
@@ -187,12 +189,10 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q21, [x28, #0xe0]\n"
".inst 0x6f80ea90 // udot v16.4s, v20.16b, v0.4b[2]\n"
"ldr q20, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
".inst 0x6f80eb51 // udot v17.4s, v26.16b, v0.4b[2]\n"
- "sub x25, x25, #0x10\n"
".inst 0x6f80eb32 // udot v18.4s, v25.16b, v0.4b[2]\n"
".inst 0x6f80eb13 // udot v19.4s, v24.16b, v0.4b[2]\n"
- "add x24, x24, #0x10\n"
- "add x28, x28, #0x100\n"
".inst 0x6fa0eaf0 // udot v16.4s, v23.16b, v0.4b[3]\n"
".inst 0x6fa0ead1 // udot v17.4s, v22.16b, v0.4b[3]\n"
".inst 0x6fa0eab2 // udot v18.4s, v21.16b, v0.4b[3]\n"
@@ -213,14 +213,14 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q23, [x28, #0x0]\n"
"ldr q22, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q21, [x28, #0x20]\n"
"ldr q20, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f80e2f0 // udot v16.4s, v23.16b, v0.4b[0]\n"
".inst 0x6f80e2d1 // udot v17.4s, v22.16b, v0.4b[0]\n"
".inst 0x6f80e2b2 // udot v18.4s, v21.16b, v0.4b[0]\n"
".inst 0x6f80e293 // udot v19.4s, v20.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
"bge 12b\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
"cbz x25, 18f\n"
@@ -235,15 +235,15 @@ void a64_hybrid_u8qa_dot_4x16 (
"tbnz %x[flags], #31, 17f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"17:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q21, [x28, #0x0]\n"
- "ldr q20, [x28, #0x10]\n"
- ".inst 0x6f80e2b0 // udot v16.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x6f80e291 // udot v17.4s, v20.16b, v0.4b[0]\n"
+ "ldr q23, [x28, #0x0]\n"
+ "ldr q22, [x28, #0x10]\n"
"ldr q21, [x28, #0x20]\n"
"ldr q20, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6f80e2f0 // udot v16.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x6f80e2d1 // udot v17.4s, v22.16b, v0.4b[0]\n"
".inst 0x6f80e2b2 // udot v18.4s, v21.16b, v0.4b[0]\n"
".inst 0x6f80e293 // udot v19.4s, v20.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
"18:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -252,8 +252,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"prfm pstl1keep, [x27, #0x0]\n"
"tbnz %x[flags], #31, 19f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v20.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v20.4s }, [x20]\n"
"neg v20.4s, v20.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"mul v11.4s, v11.4s, v20.4s\n"
@@ -267,16 +267,16 @@ void a64_hybrid_u8qa_dot_4x16 (
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v20.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v20.4s }, [x20]\n"
"add v16.4s, v16.4s, v24.4s\n"
"add v17.4s, v17.4s, v23.4s\n"
- "add v18.4s, v18.4s, v22.4s\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x10, x10, #0x40\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v22.4s\n"
"add v19.4s, v19.4s, v21.4s\n"
"sqrdmulh v16.4s, v16.4s, v20.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v20.4s\n"
"sqrdmulh v18.4s, v18.4s, v20.4s\n"
"sqrdmulh v19.4s, v19.4s, v20.4s\n"
@@ -294,21 +294,21 @@ void a64_hybrid_u8qa_dot_4x16 (
"sqadd v18.4s, v18.4s, v21.4s\n"
"sqadd v19.4s, v19.4s, v20.4s\n"
"20:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v22.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x21]\n"
"ld1r { v21.4s }, [x20]\n"
- "add v16.4s, v16.4s, v22.4s\n"
- "add v17.4s, v17.4s, v22.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v22.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
"add v18.4s, v18.4s, v22.4s\n"
"add v19.4s, v19.4s, v22.4s\n"
- "cmp x9, #0x10\n"
"smin v16.4s, v16.4s, v21.4s\n"
"smin v17.4s, v17.4s, v21.4s\n"
"smin v18.4s, v18.4s, v21.4s\n"
@@ -381,7 +381,7 @@ void a64_hybrid_u8qa_dot_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"32:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -395,8 +395,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"mov x26, #0x0\n"
"34:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 35f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -476,9 +476,9 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q1, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q4, [x28, #0x0]\n"
"ldr q5, [x28, #0x10]\n"
+ "cmp x25, #0x20\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
"ldr q8, [x28, #0x40]\n"
@@ -553,14 +553,14 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q27, [x28, #0x0]\n"
"ldr q26, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q25, [x28, #0x20]\n"
"ldr q24, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f80e370 // udot v16.4s, v27.16b, v0.4b[0]\n"
".inst 0x6f81e374 // udot v20.4s, v27.16b, v1.4b[0]\n"
".inst 0x6f80e351 // udot v17.4s, v26.16b, v0.4b[0]\n"
".inst 0x6f81e355 // udot v21.4s, v26.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f80e332 // udot v18.4s, v25.16b, v0.4b[0]\n"
".inst 0x6f81e336 // udot v22.4s, v25.16b, v1.4b[0]\n"
".inst 0x6f80e313 // udot v19.4s, v24.16b, v0.4b[0]\n"
@@ -583,17 +583,17 @@ void a64_hybrid_u8qa_dot_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
"47:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q24, [x28, #0x0]\n"
+ "ldr q27, [x28, #0x0]\n"
"ldr q26, [x28, #0x10]\n"
- ".inst 0x6f80e310 // udot v16.4s, v24.16b, v0.4b[0]\n"
- ".inst 0x6f81e314 // udot v20.4s, v24.16b, v1.4b[0]\n"
"ldr q25, [x28, #0x20]\n"
"ldr q24, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6f80e370 // udot v16.4s, v27.16b, v0.4b[0]\n"
+ ".inst 0x6f81e374 // udot v20.4s, v27.16b, v1.4b[0]\n"
".inst 0x6f80e351 // udot v17.4s, v26.16b, v0.4b[0]\n"
".inst 0x6f81e355 // udot v21.4s, v26.16b, v1.4b[0]\n"
".inst 0x6f80e332 // udot v18.4s, v25.16b, v0.4b[0]\n"
".inst 0x6f81e336 // udot v22.4s, v25.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f80e313 // udot v19.4s, v24.16b, v0.4b[0]\n"
".inst 0x6f81e317 // udot v23.4s, v24.16b, v1.4b[0]\n"
"48:" // Height 2: Multiply loop: No odd multiplies
@@ -602,14 +602,14 @@ void a64_hybrid_u8qa_dot_4x16 (
"cmp x26, x20\n"
"bne 34b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"tbnz %x[flags], #31, 49f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v24.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v24.4s }, [x20]\n"
"neg v24.4s, v24.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
@@ -627,10 +627,10 @@ void a64_hybrid_u8qa_dot_4x16 (
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v24.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v16.4s, v16.4s, v28.4s\n"
"add v17.4s, v17.4s, v27.4s\n"
@@ -652,45 +652,45 @@ void a64_hybrid_u8qa_dot_4x16 (
"sqrdmulh v23.4s, v23.4s, v24.4s\n"
"tbz %x[flags], #5, 50f\n"
"and v24.16b, v16.16b, v0.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v24.4s\n"
"and v30.16b, v17.16b, v0.16b\n"
"and v29.16b, v18.16b, v0.16b\n"
"and v28.16b, v19.16b, v0.16b\n"
"and v27.16b, v20.16b, v0.16b\n"
"and v26.16b, v21.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"and v25.16b, v22.16b, v0.16b\n"
- "and v24.16b, v23.16b, v0.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v24.4s\n"
+ "and v24.16b, v23.16b, v0.16b\n"
"sshr v26.4s, v26.4s, #0x1f\n"
"sshr v25.4s, v25.4s, #0x1f\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v17.4s, v17.4s, v30.4s\n"
"sqadd v18.4s, v18.4s, v29.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v19.4s, v19.4s, v28.4s\n"
"sqadd v20.4s, v20.4s, v27.4s\n"
"sqadd v21.4s, v21.4s, v26.4s\n"
"sqadd v22.4s, v22.4s, v25.4s\n"
"sqadd v23.4s, v23.4s, v24.4s\n"
"50:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v16.4s, v16.4s, v26.4s\n"
"add v17.4s, v17.4s, v26.4s\n"
"add v18.4s, v18.4s, v26.4s\n"
@@ -724,68 +724,68 @@ void a64_hybrid_u8qa_dot_4x16 (
"bge 59f\n"
"tbz x9, #3, 54f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x23], #0x8\n"
+ "str d20, [x24], #0x8\n"
"tbz x9, #2, 52f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
"tbz x9, #1, 51f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x23]\n"
+ "st1 { v20.b }[14], [x24]\n"
"b 58f\n"
"51:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 58f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x23]\n"
+ "st1 { v20.b }[12], [x24]\n"
"b 58f\n"
"52:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 53f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x23]\n"
+ "st1 { v20.b }[10], [x24]\n"
"b 58f\n"
"53:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 58f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x23]\n"
+ "st1 { v20.b }[8], [x24]\n"
"b 58f\n"
"54:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 56f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x23], #0x4\n"
+ "str s20, [x24], #0x4\n"
"tbz x9, #1, 55f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x23]\n"
+ "st1 { v20.b }[6], [x24]\n"
"b 58f\n"
"55:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 58f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x23]\n"
+ "st1 { v20.b }[4], [x24]\n"
"b 58f\n"
"56:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 57f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x23], #0x2\n"
+ "str h20, [x24], #0x2\n"
"tbz x9, #0, 58f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x23]\n"
+ "st1 { v20.b }[2], [x24]\n"
"b 58f\n"
"57:" // Height 2: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x23, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
"58:" // Height 2: Partial direct writeback: Done
"b 60f\n"
"59:" // Height 2: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x23, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
"60:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 32b\n"
@@ -799,7 +799,7 @@ void a64_hybrid_u8qa_dot_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"62:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -817,8 +817,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"mov x26, #0x0\n"
"64:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 65f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -920,9 +920,9 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q1, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q2, [x22, #0x0]\n"
"ldr q4, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
"ldr q7, [x28, #0x30]\n"
@@ -1020,14 +1020,14 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q31, [x28, #0x0]\n"
"ldr q30, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q29, [x28, #0x20]\n"
"ldr q28, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f80e3f0 // udot v16.4s, v31.16b, v0.4b[0]\n"
".inst 0x6f81e3f4 // udot v20.4s, v31.16b, v1.4b[0]\n"
".inst 0x6f82e3f8 // udot v24.4s, v31.16b, v2.4b[0]\n"
".inst 0x6f80e3d1 // udot v17.4s, v30.16b, v0.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f81e3d5 // udot v21.4s, v30.16b, v1.4b[0]\n"
".inst 0x6f82e3d9 // udot v25.4s, v30.16b, v2.4b[0]\n"
".inst 0x6f80e3b2 // udot v18.4s, v29.16b, v0.4b[0]\n"
@@ -1060,15 +1060,15 @@ void a64_hybrid_u8qa_dot_4x16 (
"77:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q31, [x28, #0x0]\n"
"ldr q30, [x28, #0x10]\n"
- ".inst 0x6f80e3f0 // udot v16.4s, v31.16b, v0.4b[0]\n"
- ".inst 0x6f81e3f4 // udot v20.4s, v31.16b, v1.4b[0]\n"
"ldr q29, [x28, #0x20]\n"
"ldr q28, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6f80e3f0 // udot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x6f81e3f4 // udot v20.4s, v31.16b, v1.4b[0]\n"
".inst 0x6f82e3f8 // udot v24.4s, v31.16b, v2.4b[0]\n"
".inst 0x6f80e3d1 // udot v17.4s, v30.16b, v0.4b[0]\n"
".inst 0x6f81e3d5 // udot v21.4s, v30.16b, v1.4b[0]\n"
".inst 0x6f82e3d9 // udot v25.4s, v30.16b, v2.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f80e3b2 // udot v18.4s, v29.16b, v0.4b[0]\n"
".inst 0x6f81e3b6 // udot v22.4s, v29.16b, v1.4b[0]\n"
".inst 0x6f82e3ba // udot v26.4s, v29.16b, v2.4b[0]\n"
@@ -1081,16 +1081,16 @@ void a64_hybrid_u8qa_dot_4x16 (
"cmp x26, x20\n"
"bne 64b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"tbnz %x[flags], #31, 79f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v28.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v28.4s }, [x20]\n"
"addp v13.4s, v13.4s, v13.4s\n"
"neg v28.4s, v28.4s\n"
"addp v11.4s, v11.4s, v11.4s\n"
@@ -1111,10 +1111,10 @@ void a64_hybrid_u8qa_dot_4x16 (
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v28.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
@@ -1152,18 +1152,18 @@ void a64_hybrid_u8qa_dot_4x16 (
"and v30.16b, v18.16b, v0.16b\n"
"and v29.16b, v19.16b, v0.16b\n"
"and v28.16b, v20.16b, v0.16b\n"
+ "and v3.16b, v21.16b, v0.16b\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v31.4s, v31.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
+ "and v2.16b, v22.16b, v0.16b\n"
"sqadd v16.4s, v16.4s, v1.4s\n"
"sqadd v17.4s, v17.4s, v31.4s\n"
"sqadd v18.4s, v18.4s, v30.4s\n"
"sqadd v19.4s, v19.4s, v29.4s\n"
"sqadd v20.4s, v20.4s, v28.4s\n"
- "and v3.16b, v21.16b, v0.16b\n"
- "and v2.16b, v22.16b, v0.16b\n"
"and v1.16b, v23.16b, v0.16b\n"
"and v31.16b, v24.16b, v0.16b\n"
"and v30.16b, v25.16b, v0.16b\n"
@@ -1184,21 +1184,21 @@ void a64_hybrid_u8qa_dot_4x16 (
"sqadd v26.4s, v26.4s, v29.4s\n"
"sqadd v27.4s, v27.4s, v28.4s\n"
"80:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v30.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v30.4s }, [x21]\n"
"ld1r { v29.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v28.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v24.4s, v24.4s, v0.4s\n"
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
@@ -1251,102 +1251,103 @@ void a64_hybrid_u8qa_dot_4x16 (
"bge 89f\n"
"tbz x9, #3, 84f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x9, #2, 82f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
"tbz x9, #1, 81f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 88f\n"
"81:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 88f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 88f\n"
"82:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 83f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 88f\n"
"83:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 88f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 88f\n"
"84:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 86f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
"tbz x9, #1, 85f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 88f\n"
"85:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 88f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 88f\n"
"86:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 87f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
"tbz x9, #0, 88f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 88f\n"
"87:" // Height 3: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"88:" // Height 3: Partial direct writeback: Done
"b 90f\n"
"89:" // Height 3: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"90:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 62b\n"
"b 122f\n"
"91:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "movi v15.16b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"92:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1368,8 +1369,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"mov x26, #0x0\n"
"94:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 95f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1493,9 +1494,9 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q0, [x24, #0x0]\n"
"ldr q1, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q2, [x22, #0x0]\n"
"ldr q3, [x21, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q4, [x28, #0x0]\n"
"ldr q5, [x28, #0x10]\n"
"ldr q6, [x28, #0x20]\n"
@@ -1616,14 +1617,14 @@ void a64_hybrid_u8qa_dot_4x16 (
"ldr q7, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
"sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
"ldr q5, [x28, #0x20]\n"
"ldr q4, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
".inst 0x6f80e0f0 // udot v16.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0f4 // udot v20.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0fc // udot v28.4s, v7.16b, v3.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f80e0d1 // udot v17.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d5 // udot v21.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d9 // udot v25.4s, v6.16b, v2.4b[0]\n"
@@ -1664,15 +1665,15 @@ void a64_hybrid_u8qa_dot_4x16 (
"107:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q7, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
- ".inst 0x6f80e0f0 // udot v16.4s, v7.16b, v0.4b[0]\n"
- ".inst 0x6f81e0f4 // udot v20.4s, v7.16b, v1.4b[0]\n"
"ldr q5, [x28, #0x20]\n"
"ldr q4, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x6f80e0f0 // udot v16.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x6f81e0f4 // udot v20.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f8 // udot v24.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0fc // udot v28.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f80e0d1 // udot v17.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0d5 // udot v21.4s, v6.16b, v1.4b[0]\n"
- "add x28, x28, #0x40\n"
".inst 0x6f82e0d9 // udot v25.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0dd // udot v29.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f80e0b2 // udot v18.4s, v5.16b, v0.4b[0]\n"
@@ -1689,18 +1690,18 @@ void a64_hybrid_u8qa_dot_4x16 (
"cmp x26, x20\n"
"bne 94b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "add x21, x22, x20\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20\n"
"prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"tbnz %x[flags], #31, 109f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v0.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
"addp v13.4s, v13.4s, v13.4s\n"
"addp v14.4s, v14.4s, v14.4s\n"
"neg v0.4s, v0.4s\n"
@@ -1724,10 +1725,10 @@ void a64_hybrid_u8qa_dot_4x16 (
"add v20.4s, v20.4s, v12.4s\n"
"add v21.4s, v21.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v1.4s }, [x20]\n"
"add v22.4s, v22.4s, v12.4s\n"
"add v23.4s, v23.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
@@ -1774,32 +1775,32 @@ void a64_hybrid_u8qa_dot_4x16 (
"tbz %x[flags], #5, 110f\n"
"and v2.16b, v16.16b, v0.16b\n"
"and v1.16b, v17.16b, v0.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v16.4s, v16.4s, v2.4s\n"
- "sqadd v17.4s, v17.4s, v1.4s\n"
"and v7.16b, v18.16b, v0.16b\n"
"and v6.16b, v19.16b, v0.16b\n"
"and v5.16b, v20.16b, v0.16b\n"
"and v4.16b, v21.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"and v3.16b, v22.16b, v0.16b\n"
- "and v2.16b, v23.16b, v0.16b\n"
- "and v1.16b, v24.16b, v0.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v2.4s\n"
+ "sqadd v17.4s, v17.4s, v1.4s\n"
+ "and v2.16b, v23.16b, v0.16b\n"
+ "and v1.16b, v24.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v18.4s, v18.4s, v7.4s\n"
"sqadd v19.4s, v19.4s, v6.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v5.4s\n"
"sqadd v21.4s, v21.4s, v4.4s\n"
"sqadd v22.4s, v22.4s, v3.4s\n"
+ "and v7.16b, v25.16b, v0.16b\n"
"sqadd v23.4s, v23.4s, v2.4s\n"
"sqadd v24.4s, v24.4s, v1.4s\n"
- "and v7.16b, v25.16b, v0.16b\n"
"and v6.16b, v26.16b, v0.16b\n"
"and v5.16b, v27.16b, v0.16b\n"
"and v4.16b, v28.16b, v0.16b\n"
@@ -1821,21 +1822,21 @@ void a64_hybrid_u8qa_dot_4x16 (
"sqadd v30.4s, v30.4s, v2.4s\n"
"sqadd v31.4s, v31.4s, v1.4s\n"
"110:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
"srshl v21.4s, v21.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v1.4s }, [x20]\n"
"srshl v22.4s, v22.4s, v0.4s\n"
"srshl v23.4s, v23.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v24.4s, v24.4s, v0.4s\n"
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
@@ -1907,100 +1908,100 @@ void a64_hybrid_u8qa_dot_4x16 (
"bge 119f\n"
"tbz x9, #3, 114f\n"
"str d16, [x27], #0x8\n"
- "str d20, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
- "str d28, [x21], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
"tbz x9, #2, 112f\n"
"st1 { v16.s }[2], [x27], #0x4\n"
- "st1 { v20.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
- "st1 { v28.s }[2], [x21], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
"tbz x9, #1, 111f\n"
"st1 { v16.h }[6], [x27], #0x2\n"
- "st1 { v20.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
- "st1 { v28.h }[6], [x21], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "st1 { v28.h }[6], [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[14], [x27]\n"
- "st1 { v20.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
- "st1 { v28.b }[14], [x21]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "st1 { v28.b }[14], [x22]\n"
"b 118f\n"
"111:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 118f\n"
"st1 { v16.b }[12], [x27]\n"
- "st1 { v20.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
- "st1 { v28.b }[12], [x21]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "st1 { v28.b }[12], [x22]\n"
"b 118f\n"
"112:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 113f\n"
"st1 { v16.h }[4], [x27], #0x2\n"
- "st1 { v20.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
- "st1 { v28.h }[4], [x21], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "st1 { v28.h }[4], [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[10], [x27]\n"
- "st1 { v20.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
- "st1 { v28.b }[10], [x21]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "st1 { v28.b }[10], [x22]\n"
"b 118f\n"
"113:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 118f\n"
"st1 { v16.b }[8], [x27]\n"
- "st1 { v20.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
- "st1 { v28.b }[8], [x21]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "st1 { v28.b }[8], [x22]\n"
"b 118f\n"
"114:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 116f\n"
"str s16, [x27], #0x4\n"
- "str s20, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
- "str s28, [x21], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
"tbz x9, #1, 115f\n"
"st1 { v16.h }[2], [x27], #0x2\n"
- "st1 { v20.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
- "st1 { v28.h }[2], [x21], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "st1 { v28.h }[2], [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[6], [x27]\n"
- "st1 { v20.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
- "st1 { v28.b }[6], [x21]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "st1 { v28.b }[6], [x22]\n"
"b 118f\n"
"115:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 118f\n"
"st1 { v16.b }[4], [x27]\n"
- "st1 { v20.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
- "st1 { v28.b }[4], [x21]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "st1 { v28.b }[4], [x22]\n"
"b 118f\n"
"116:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 117f\n"
"str h16, [x27], #0x2\n"
- "str h20, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
- "str h28, [x21], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "str h28, [x22], #0x2\n"
"tbz x9, #0, 118f\n"
"st1 { v16.b }[2], [x27]\n"
- "st1 { v20.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
- "st1 { v28.b }[2], [x21]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "st1 { v28.b }[2], [x22]\n"
"b 118f\n"
"117:" // Height 4: Partial direct writeback: partial_1_0
"str b16, [x27, #0x0]\n"
- "str b20, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
- "str b28, [x21, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "str b28, [x22, #0x0]\n"
"118:" // Height 4: Partial direct writeback: Done
"b 120f\n"
"119:" // Height 4: Full writeback
"str q16, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q20, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
- "str q28, [x21, #0x0]\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q28, [x22, #0x0]\n"
"120:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 92b\n"
@@ -2016,8 +2017,8 @@ void a64_hybrid_u8qa_dot_4x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"122:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16.hpp
index 17e7405a0a..84f6ed0553 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsFixed<rhs_operand_type, result_type, 4, 16, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 16, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp
index 1335b355ef..2d3af7f9c3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_mmla_4x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void a64_hybrid_u8qa_mmla_4x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -90,7 +90,7 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -104,8 +104,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -130,6 +130,7 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q4, [x28, #0x60]\n"
"blt 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
+ "add x24, x24, #0x10\n"
"trn1 v0.2d, v1.2d, v27.2d\n"
".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
@@ -151,9 +152,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n"
"add x28, x28, #0x100\n"
+ ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n"
".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n"
".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n"
".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n"
@@ -166,9 +166,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
+ "cmp x25, #0x20\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
"ldr q10, [x28, #0x50]\n"
@@ -176,10 +176,12 @@ void a64_hybrid_u8qa_mmla_4x16 (
"prfm pldl1keep, [x24, #0x80]\n"
"bge 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v0.2d, v1.2d, v24.2d\n"
+ "trn2 v1.2d, v1.2d, v24.2d\n"
".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
- "trn2 v1.2d, v1.2d, v24.2d\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q24, [x28, #0x80]\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
@@ -196,11 +198,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q25, [x28, #0xe0]\n"
".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n"
- ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n"
- "add x24, x24, #0x10\n"
"add x28, x28, #0x100\n"
+ ".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n"
".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n"
".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n"
".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n"
@@ -222,24 +222,24 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"13:" // Height 1: Multiply loop: unique 3: skip row sum
"ldr q24, [x28, #0x0]\n"
- "ldr q26, [x28, #0x10]\n"
- ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n"
+ "ldr q30, [x28, #0x10]\n"
"sub x25, x25, #0x8\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
"cmp x25, #0x8\n"
- ".inst 0x6e9aa414 // ummla v20.4s, v0.16b, v26.16b\n"
"ldr q27, [x28, #0x40]\n"
"ldr q26, [x28, #0x50]\n"
- ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x6e9ea414 // ummla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x6e9da411 // ummla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x6e9ca415 // ummla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e9ba412 // ummla v18.4s, v0.16b, v27.16b\n"
".inst 0x6e9aa416 // ummla v22.4s, v0.16b, v26.16b\n"
".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n"
".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"bge 12b\n"
"14:" // Height 1: Multiply loop: Skip odd blocks
"cbz x25, 20f\n"
@@ -267,23 +267,23 @@ void a64_hybrid_u8qa_mmla_4x16 (
"tbnz %x[flags], #31, 19f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"19:" // Height 1: Multiply loop: unique 4: skip row sum
- "ldr q25, [x28, #0x0]\n"
- "ldr q24, [x28, #0x10]\n"
- ".inst 0x6e99a410 // ummla v16.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a414 // ummla v20.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
- ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x40]\n"
- "ldr q24, [x28, #0x50]\n"
- ".inst 0x6e99a412 // ummla v18.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a416 // ummla v22.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x6e9ea414 // ummla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x6e9da411 // ummla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x6e9ca415 // ummla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x6e9ba412 // ummla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x6e9aa416 // ummla v22.4s, v0.16b, v26.16b\n"
".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n"
".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"20:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -297,8 +297,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mov v23.16b, v16.16b\n"
"tbnz %x[flags], #31, 21f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v16.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v16.4s }, [x20]\n"
"neg v16.4s, v16.4s\n"
"dup v11.4s, v11.s[0]\n"
"mul v11.4s, v11.4s, v16.4s\n"
@@ -312,16 +312,16 @@ void a64_hybrid_u8qa_mmla_4x16 (
"add v18.4s, v18.4s, v11.4s\n"
"add v19.4s, v19.4s, v11.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
- "ld1r { v16.4s }, [x20]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v16.4s }, [x20]\n"
"add v23.4s, v23.4s, v24.4s\n"
"add v17.4s, v17.4s, v22.4s\n"
- "add v18.4s, v18.4s, v21.4s\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x10, x10, #0x40\n"
"ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v21.4s\n"
"add v19.4s, v19.4s, v20.4s\n"
"sqrdmulh v23.4s, v23.4s, v16.4s\n"
- "add x10, x10, #0x40\n"
"sqrdmulh v17.4s, v17.4s, v16.4s\n"
"sqrdmulh v18.4s, v18.4s, v16.4s\n"
"sqrdmulh v19.4s, v19.4s, v16.4s\n"
@@ -339,21 +339,21 @@ void a64_hybrid_u8qa_mmla_4x16 (
"sqadd v18.4s, v18.4s, v20.4s\n"
"sqadd v19.4s, v19.4s, v16.4s\n"
"22:" // Height 1: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v21.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
- "srshl v18.4s, v18.4s, v0.4s\n"
- "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v21.4s }, [x21]\n"
"ld1r { v20.4s }, [x20]\n"
- "add v23.4s, v23.4s, v21.4s\n"
- "add v17.4s, v17.4s, v21.4s\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v16.4s }, [x20]\n"
+ "add v23.4s, v23.4s, v21.4s\n"
+ "add v17.4s, v17.4s, v21.4s\n"
"add v18.4s, v18.4s, v21.4s\n"
"add v19.4s, v19.4s, v21.4s\n"
- "cmp x9, #0x10\n"
"smin v23.4s, v23.4s, v20.4s\n"
"smin v17.4s, v17.4s, v20.4s\n"
"smin v18.4s, v18.4s, v20.4s\n"
@@ -426,7 +426,7 @@ void a64_hybrid_u8qa_mmla_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"34:" // Height 2: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -440,8 +440,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mov x26, #0x0\n"
"36:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 37f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -471,11 +471,13 @@ void a64_hybrid_u8qa_mmla_4x16 (
"blt 41f\n"
"39:" // Height 2: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q24, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
"ldr q30, [x28, #0x90]\n"
".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
@@ -491,11 +493,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n"
- "add x24, x24, #0x10\n"
+ "add x28, x28, #0x100\n"
".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n"
- "add x28, x28, #0x100\n"
".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n"
".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n"
".inst 0x6e99a433 // ummla v19.4s, v1.16b, v25.16b\n"
@@ -507,9 +507,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q2, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q5, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
+ "cmp x25, #0x20\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
"ldr q9, [x28, #0x40]\n"
@@ -520,11 +520,14 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bge 39b\n"
"41:" // Height 2: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"ldr q25, [x28, #0x70]\n"
".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"ldr q24, [x28, #0x80]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
"ldr q30, [x28, #0x90]\n"
".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
@@ -539,14 +542,11 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q25, [x28, #0xe0]\n"
".inst 0x6e98a430 // ummla v16.4s, v1.16b, v24.16b\n"
"ldr q24, [x28, #0xf0]\n"
- "sub x25, x25, #0x10\n"
".inst 0x6e9ea434 // ummla v20.4s, v1.16b, v30.16b\n"
+ "add x28, x28, #0x100\n"
".inst 0x6e9da431 // ummla v17.4s, v1.16b, v29.16b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
".inst 0x6e9ca435 // ummla v21.4s, v1.16b, v28.16b\n"
".inst 0x6e9ba432 // ummla v18.4s, v1.16b, v27.16b\n"
- "add x28, x28, #0x100\n"
".inst 0x6e9aa436 // ummla v22.4s, v1.16b, v26.16b\n"
".inst 0x6e99a433 // ummla v19.4s, v1.16b, v25.16b\n"
".inst 0x6e98a437 // ummla v23.4s, v1.16b, v24.16b\n"
@@ -568,24 +568,24 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"45:" // Height 2: Multiply loop: unique 7: skip row sum
"ldr q24, [x28, #0x0]\n"
- "ldr q26, [x28, #0x10]\n"
- ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n"
+ "ldr q30, [x28, #0x10]\n"
"sub x25, x25, #0x8\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
"cmp x25, #0x8\n"
- ".inst 0x6e9aa414 // ummla v20.4s, v0.16b, v26.16b\n"
"ldr q27, [x28, #0x40]\n"
"ldr q26, [x28, #0x50]\n"
- ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x6e9ea414 // ummla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x6e9da411 // ummla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x6e9ca415 // ummla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e9ba412 // ummla v18.4s, v0.16b, v27.16b\n"
".inst 0x6e9aa416 // ummla v22.4s, v0.16b, v26.16b\n"
".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n"
".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"bge 44b\n"
"46:" // Height 2: Multiply loop: Skip odd blocks
"cbz x25, 52f\n"
@@ -620,23 +620,23 @@ void a64_hybrid_u8qa_mmla_4x16 (
"tbnz %x[flags], #31, 51f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
"51:" // Height 2: Multiply loop: unique 8: skip row sum
- "ldr q25, [x28, #0x0]\n"
- "ldr q24, [x28, #0x10]\n"
- ".inst 0x6e99a410 // ummla v16.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a414 // ummla v20.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x20]\n"
- "ldr q24, [x28, #0x30]\n"
- ".inst 0x6e99a411 // ummla v17.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a415 // ummla v21.4s, v0.16b, v24.16b\n"
- "ldr q25, [x28, #0x40]\n"
- "ldr q24, [x28, #0x50]\n"
- ".inst 0x6e99a412 // ummla v18.4s, v0.16b, v25.16b\n"
- ".inst 0x6e98a416 // ummla v22.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
"ldr q25, [x28, #0x60]\n"
+ ".inst 0x6e98a410 // ummla v16.4s, v0.16b, v24.16b\n"
"ldr q24, [x28, #0x70]\n"
+ ".inst 0x6e9ea414 // ummla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x6e9da411 // ummla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x6e9ca415 // ummla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x6e9ba412 // ummla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x6e9aa416 // ummla v22.4s, v0.16b, v26.16b\n"
".inst 0x6e99a413 // ummla v19.4s, v0.16b, v25.16b\n"
".inst 0x6e98a417 // ummla v23.4s, v0.16b, v24.16b\n"
- "add x28, x28, #0x80\n"
"52:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -644,21 +644,21 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bne 36b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v24.2d, v16.2d, v20.2d\n"
- "add x23, x27, x20\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"mov v23.16b, v24.16b\n"
"tbnz %x[flags], #31, 53f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v24.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v24.4s }, [x20]\n"
"neg v24.4s, v24.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
@@ -676,10 +676,10 @@ void a64_hybrid_u8qa_mmla_4x16 (
"add v16.4s, v16.4s, v12.4s\n"
"add v17.4s, v17.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v24.4s }, [x20]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v28.4s\n"
"add v20.4s, v20.4s, v27.4s\n"
@@ -701,45 +701,45 @@ void a64_hybrid_u8qa_mmla_4x16 (
"sqrdmulh v19.4s, v19.4s, v24.4s\n"
"tbz %x[flags], #5, 54f\n"
"and v24.16b, v23.16b, v0.16b\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
- "sqadd v23.4s, v23.4s, v24.4s\n"
"and v30.16b, v20.16b, v0.16b\n"
"and v29.16b, v21.16b, v0.16b\n"
"and v28.16b, v22.16b, v0.16b\n"
"and v27.16b, v16.16b, v0.16b\n"
"and v26.16b, v17.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"and v25.16b, v18.16b, v0.16b\n"
- "and v24.16b, v19.16b, v0.16b\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v24.4s\n"
+ "and v24.16b, v19.16b, v0.16b\n"
"sshr v26.4s, v26.4s, #0x1f\n"
"sshr v25.4s, v25.4s, #0x1f\n"
- "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v20.4s, v20.4s, v30.4s\n"
"sqadd v21.4s, v21.4s, v29.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
"sqadd v22.4s, v22.4s, v28.4s\n"
"sqadd v16.4s, v16.4s, v27.4s\n"
"sqadd v17.4s, v17.4s, v26.4s\n"
"sqadd v18.4s, v18.4s, v25.4s\n"
"sqadd v19.4s, v19.4s, v24.4s\n"
"54:" // Height 2: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v26.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v0.4s\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
"ld1r { v25.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v24.4s }, [x20]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"add v23.4s, v23.4s, v26.4s\n"
"add v20.4s, v20.4s, v26.4s\n"
"add v21.4s, v21.4s, v26.4s\n"
@@ -773,68 +773,68 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bge 63f\n"
"tbz x9, #3, 58f\n"
"str d23, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
+ "str d16, [x24], #0x8\n"
"tbz x9, #2, 56f\n"
"st1 { v23.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
"tbz x9, #1, 55f\n"
"st1 { v23.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x23]\n"
+ "st1 { v16.b }[14], [x24]\n"
"b 62f\n"
"55:" // Height 2: Partial direct writeback: partial_1_12
"tbz x9, #0, 62f\n"
"st1 { v23.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x23]\n"
+ "st1 { v16.b }[12], [x24]\n"
"b 62f\n"
"56:" // Height 2: Partial direct writeback: partial_2_8
"tbz x9, #1, 57f\n"
"st1 { v23.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x23]\n"
+ "st1 { v16.b }[10], [x24]\n"
"b 62f\n"
"57:" // Height 2: Partial direct writeback: partial_1_8
"tbz x9, #0, 62f\n"
"st1 { v23.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x23]\n"
+ "st1 { v16.b }[8], [x24]\n"
"b 62f\n"
"58:" // Height 2: Partial direct writeback: partial_4_0
"tbz x9, #2, 60f\n"
"str s23, [x27], #0x4\n"
- "str s16, [x23], #0x4\n"
+ "str s16, [x24], #0x4\n"
"tbz x9, #1, 59f\n"
"st1 { v23.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x23]\n"
+ "st1 { v16.b }[6], [x24]\n"
"b 62f\n"
"59:" // Height 2: Partial direct writeback: partial_1_4
"tbz x9, #0, 62f\n"
"st1 { v23.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x23]\n"
+ "st1 { v16.b }[4], [x24]\n"
"b 62f\n"
"60:" // Height 2: Partial direct writeback: partial_2_0
"tbz x9, #1, 61f\n"
"str h23, [x27], #0x2\n"
- "str h16, [x23], #0x2\n"
+ "str h16, [x24], #0x2\n"
"tbz x9, #0, 62f\n"
"st1 { v23.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x23]\n"
+ "st1 { v16.b }[2], [x24]\n"
"b 62f\n"
"61:" // Height 2: Partial direct writeback: partial_1_0
"str b23, [x27, #0x0]\n"
- "str b16, [x23, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
"62:" // Height 2: Partial direct writeback: Done
"b 64f\n"
"63:" // Height 2: Full writeback
"str q23, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q16, [x23, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
"64:" // Height 2: Writeback done
"subs x9, x9, #0x10\n"
"bgt 34b\n"
@@ -848,7 +848,7 @@ void a64_hybrid_u8qa_mmla_4x16 (
"movi v15.16b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"66:" // Height 3: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -870,8 +870,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mov x26, #0x0\n"
"68:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 69f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -905,35 +905,35 @@ void a64_hybrid_u8qa_mmla_4x16 (
"71:" // Height 3: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
- "ldr q14, [x28, #0x70]\n"
- ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q5, [x28, #0x60]\n"
+ "ldr q14, [x28, #0x60]\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
"ldr q4, [x28, #0x80]\n"
- ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- ".inst 0x6e85a413 // ummla v19.4s, v0.16b, v5.16b\n"
- ".inst 0x6e85a45b // ummla v27.4s, v2.16b, v5.16b\n"
+ ".inst 0x6e8ea413 // ummla v19.4s, v0.16b, v14.16b\n"
+ ".inst 0x6e8ea45b // ummla v27.4s, v2.16b, v14.16b\n"
"ldr q6, [x28, #0xd0]\n"
- ".inst 0x6e8ea417 // ummla v23.4s, v0.16b, v14.16b\n"
- ".inst 0x6e8ea45f // ummla v31.4s, v2.16b, v14.16b\n"
+ ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e85a45f // ummla v31.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0xe0]\n"
".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n"
".inst 0x6e84a478 // ummla v24.4s, v3.16b, v4.16b\n"
@@ -962,9 +962,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q2, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q3, [x22, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
"ldr q8, [x28, #0x30]\n"
@@ -977,36 +977,36 @@ void a64_hybrid_u8qa_mmla_4x16 (
"73:" // Height 3: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
- ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
- "ldr q14, [x28, #0x70]\n"
- ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- "ldr q5, [x28, #0x60]\n"
+ "ldr q14, [x28, #0x60]\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
"ldr q4, [x28, #0x80]\n"
- ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "sub x25, x25, #0x10\n"
- ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x22, x22, #0x10\n"
- ".inst 0x6e85a413 // ummla v19.4s, v0.16b, v5.16b\n"
- ".inst 0x6e85a45b // ummla v27.4s, v2.16b, v5.16b\n"
+ ".inst 0x6e8ea413 // ummla v19.4s, v0.16b, v14.16b\n"
+ ".inst 0x6e8ea45b // ummla v27.4s, v2.16b, v14.16b\n"
"ldr q6, [x28, #0xd0]\n"
- ".inst 0x6e8ea417 // ummla v23.4s, v0.16b, v14.16b\n"
- ".inst 0x6e8ea45f // ummla v31.4s, v2.16b, v14.16b\n"
+ ".inst 0x6e85a417 // ummla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e85a45f // ummla v31.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0xe0]\n"
".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n"
".inst 0x6e84a478 // ummla v24.4s, v3.16b, v4.16b\n"
@@ -1040,34 +1040,34 @@ void a64_hybrid_u8qa_mmla_4x16 (
"cmp x25, #0x8\n"
"blt 78f\n"
"76:" // Height 3: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
+ "ldr d3, [x24], #0x8\n"
"ldr d0, [x23], #0x8\n"
- "trn1 v0.2d, v1.2d, v0.2d\n"
"ldr d1, [x22], #0x8\n"
+ "trn1 v0.2d, v3.2d, v0.2d\n"
"trn1 v2.2d, v1.2d, v2.2d\n"
"tbnz %x[flags], #31, 77f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"77:" // Height 3: Multiply loop: unique 11: skip row sum
- "ldr q3, [x28, #0x0]\n"
- "ldr q1, [x28, #0x10]\n"
- ".inst 0x6e83a410 // ummla v16.4s, v0.16b, v3.16b\n"
- ".inst 0x6e83a458 // ummla v24.4s, v2.16b, v3.16b\n"
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
"ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
"cmp x25, #0x8\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x6e81a414 // ummla v20.4s, v0.16b, v1.16b\n"
- ".inst 0x6e81a45c // ummla v28.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x6e88a414 // ummla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e88a45c // ummla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n"
".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n"
@@ -1120,24 +1120,24 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"83:" // Height 3: Multiply loop: unique 12: skip row sum
"ldr q1, [x28, #0x0]\n"
- "ldr q3, [x28, #0x10]\n"
- ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n"
- ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n"
- "ldr q1, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- ".inst 0x6e83a414 // ummla v20.4s, v0.16b, v3.16b\n"
- ".inst 0x6e83a45c // ummla v28.4s, v2.16b, v3.16b\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x6e81a411 // ummla v17.4s, v0.16b, v1.16b\n"
- ".inst 0x6e81a459 // ummla v25.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x6e88a414 // ummla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e88a45c // ummla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n"
".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n"
".inst 0x6e84a45e // ummla v30.4s, v2.16b, v4.16b\n"
".inst 0x6e83a413 // ummla v19.4s, v0.16b, v3.16b\n"
@@ -1151,18 +1151,18 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bne 68b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v0.2d, v16.2d, v20.2d\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "uzp1 v20.2d, v17.2d, v21.2d\n"
"prfm pstl1keep, [x27, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v24.2d, v24.2d, v28.2d\n"
"uzp1 v25.2d, v25.2d, v29.2d\n"
"uzp1 v26.2d, v26.2d, v30.2d\n"
@@ -1170,9 +1170,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mov v31.16b, v0.16b\n"
"tbnz %x[flags], #31, 85f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v23.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
+ "ld1r { v23.4s }, [x20]\n"
"neg v23.4s, v23.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
@@ -1192,10 +1192,10 @@ void a64_hybrid_u8qa_mmla_4x16 (
"add v16.4s, v16.4s, v12.4s\n"
"add v17.4s, v17.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v23.4s }, [x20]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v24.4s, v24.4s, v13.4s\n"
"add v25.4s, v25.4s, v13.4s\n"
@@ -1233,18 +1233,18 @@ void a64_hybrid_u8qa_mmla_4x16 (
"and v29.16b, v21.16b, v0.16b\n"
"and v28.16b, v22.16b, v0.16b\n"
"and v23.16b, v16.16b, v0.16b\n"
+ "and v3.16b, v17.16b, v0.16b\n"
"sshr v1.4s, v1.4s, #0x1f\n"
"sshr v30.4s, v30.4s, #0x1f\n"
"sshr v29.4s, v29.4s, #0x1f\n"
"sshr v28.4s, v28.4s, #0x1f\n"
"sshr v23.4s, v23.4s, #0x1f\n"
+ "and v2.16b, v18.16b, v0.16b\n"
"sqadd v31.4s, v31.4s, v1.4s\n"
"sqadd v20.4s, v20.4s, v30.4s\n"
"sqadd v21.4s, v21.4s, v29.4s\n"
"sqadd v22.4s, v22.4s, v28.4s\n"
"sqadd v16.4s, v16.4s, v23.4s\n"
- "and v3.16b, v17.16b, v0.16b\n"
- "and v2.16b, v18.16b, v0.16b\n"
"and v1.16b, v19.16b, v0.16b\n"
"and v30.16b, v24.16b, v0.16b\n"
"and v29.16b, v25.16b, v0.16b\n"
@@ -1265,21 +1265,21 @@ void a64_hybrid_u8qa_mmla_4x16 (
"sqadd v26.4s, v26.4s, v28.4s\n"
"sqadd v27.4s, v27.4s, v23.4s\n"
"86:" // Height 3: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v29.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v0.4s\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v29.4s }, [x21]\n"
"ld1r { v28.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v23.4s }, [x20]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v24.4s, v24.4s, v0.4s\n"
"srshl v25.4s, v25.4s, v0.4s\n"
"srshl v26.4s, v26.4s, v0.4s\n"
@@ -1332,102 +1332,103 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bge 95f\n"
"tbz x9, #3, 90f\n"
"str d31, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d24, [x22], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
"tbz x9, #2, 88f\n"
"st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v24.s }[2], [x22], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
"tbz x9, #1, 87f\n"
"st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v24.h }[6], [x22], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v24.b }[14], [x22]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
"b 94f\n"
"87:" // Height 3: Partial direct writeback: partial_1_12
"tbz x9, #0, 94f\n"
"st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v24.b }[12], [x22]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
"b 94f\n"
"88:" // Height 3: Partial direct writeback: partial_2_8
"tbz x9, #1, 89f\n"
"st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v24.h }[4], [x22], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v24.b }[10], [x22]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
"b 94f\n"
"89:" // Height 3: Partial direct writeback: partial_1_8
"tbz x9, #0, 94f\n"
"st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v24.b }[8], [x22]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
"b 94f\n"
"90:" // Height 3: Partial direct writeback: partial_4_0
"tbz x9, #2, 92f\n"
"str s31, [x27], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s24, [x22], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
"tbz x9, #1, 91f\n"
"st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v24.h }[2], [x22], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v24.b }[6], [x22]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
"b 94f\n"
"91:" // Height 3: Partial direct writeback: partial_1_4
"tbz x9, #0, 94f\n"
"st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v24.b }[4], [x22]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
"b 94f\n"
"92:" // Height 3: Partial direct writeback: partial_2_0
"tbz x9, #1, 93f\n"
"str h31, [x27], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h24, [x22], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
"tbz x9, #0, 94f\n"
"st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v24.b }[2], [x22]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
"b 94f\n"
"93:" // Height 3: Partial direct writeback: partial_1_0
"str b31, [x27, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b24, [x22, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
"94:" // Height 3: Partial direct writeback: Done
"b 96f\n"
"95:" // Height 3: Full writeback
"str q31, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q16, [x23, #0x0]\n"
- "str q24, [x22, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
"96:" // Height 3: Writeback done
"subs x9, x9, #0x10\n"
"bgt 66b\n"
"b 130f\n"
"97:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"movi v11.4s, #0x0\n"
"movi v12.4s, #0x0\n"
- "movi v13.4s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
- "movi v15.16b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "movi v15.16b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"98:" // Height 4: Column loop
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -1449,8 +1450,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mov x26, #0x0\n"
"100:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 101f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1488,28 +1489,28 @@ void a64_hybrid_u8qa_mmla_4x16 (
"103:" // Height 4: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
"ldr q4, [x28, #0x60]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x21, x21, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
@@ -1546,9 +1547,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"ldr q1, [x24, #0x0]\n"
"ldr q2, [x23, #0x0]\n"
"sub x25, x25, #0x10\n"
- "cmp x25, #0x20\n"
"ldr q3, [x22, #0x0]\n"
"ldr q4, [x21, #0x0]\n"
+ "cmp x25, #0x20\n"
"ldr q5, [x28, #0x0]\n"
"ldr q6, [x28, #0x10]\n"
"ldr q7, [x28, #0x20]\n"
@@ -1563,32 +1564,32 @@ void a64_hybrid_u8qa_mmla_4x16 (
"105:" // Height 4: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
"sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
"ldr q4, [x28, #0x60]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x6e85a410 // ummla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
".inst 0x6e85a458 // ummla v24.4s, v2.16b, v5.16b\n"
"ldr q5, [x28, #0x70]\n"
- ".inst 0x6e86a414 // ummla v20.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45c // ummla v28.4s, v2.16b, v6.16b\n"
"ldr q6, [x28, #0x80]\n"
- ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
"ldr q7, [x28, #0x90]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6e88a415 // ummla v21.4s, v0.16b, v8.16b\n"
".inst 0x6e88a45d // ummla v29.4s, v2.16b, v8.16b\n"
"ldr q8, [x28, #0xa0]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e89a412 // ummla v18.4s, v0.16b, v9.16b\n"
".inst 0x6e89a45a // ummla v26.4s, v2.16b, v9.16b\n"
"ldr q9, [x28, #0xb0]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e8aa416 // ummla v22.4s, v0.16b, v10.16b\n"
".inst 0x6e8aa45e // ummla v30.4s, v2.16b, v10.16b\n"
"ldr q10, [x28, #0xc0]\n"
- "add x21, x21, #0x10\n"
".inst 0x6e84a413 // ummla v19.4s, v0.16b, v4.16b\n"
".inst 0x6e84a45b // ummla v27.4s, v2.16b, v4.16b\n"
"ldr q4, [x28, #0xd0]\n"
@@ -1628,35 +1629,35 @@ void a64_hybrid_u8qa_mmla_4x16 (
"cmp x25, #0x8\n"
"blt 110f\n"
"108:" // Height 4: Multiply loop: Odd block loop
- "ldr d1, [x24], #0x8\n"
+ "ldr d3, [x24], #0x8\n"
"ldr d0, [x23], #0x8\n"
- "trn1 v0.2d, v1.2d, v0.2d\n"
"ldr d2, [x22], #0x8\n"
"ldr d1, [x21], #0x8\n"
+ "trn1 v0.2d, v3.2d, v0.2d\n"
"trn1 v2.2d, v2.2d, v1.2d\n"
"tbnz %x[flags], #31, 109f\n"
".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"109:" // Height 4: Multiply loop: unique 15: skip row sum
- "ldr q3, [x28, #0x0]\n"
- "ldr q1, [x28, #0x10]\n"
- ".inst 0x6e83a410 // ummla v16.4s, v0.16b, v3.16b\n"
- ".inst 0x6e83a458 // ummla v24.4s, v2.16b, v3.16b\n"
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
"ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- "sub x25, x25, #0x8\n"
"cmp x25, #0x8\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x6e81a414 // ummla v20.4s, v0.16b, v1.16b\n"
- ".inst 0x6e81a45c // ummla v28.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x6e88a414 // ummla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e88a45c // ummla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n"
".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n"
@@ -1716,24 +1717,24 @@ void a64_hybrid_u8qa_mmla_4x16 (
".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
"115:" // Height 4: Multiply loop: unique 16: skip row sum
"ldr q1, [x28, #0x0]\n"
- "ldr q3, [x28, #0x10]\n"
- ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n"
- ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n"
- "ldr q1, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
"ldr q6, [x28, #0x30]\n"
- ".inst 0x6e83a414 // ummla v20.4s, v0.16b, v3.16b\n"
- ".inst 0x6e83a45c // ummla v28.4s, v2.16b, v3.16b\n"
"ldr q5, [x28, #0x40]\n"
"ldr q4, [x28, #0x50]\n"
- ".inst 0x6e81a411 // ummla v17.4s, v0.16b, v1.16b\n"
- ".inst 0x6e81a459 // ummla v25.4s, v2.16b, v1.16b\n"
"ldr q3, [x28, #0x60]\n"
+ ".inst 0x6e81a410 // ummla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x28, #0x70]\n"
+ ".inst 0x6e88a414 // ummla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x6e88a45c // ummla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x6e87a411 // ummla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e87a459 // ummla v25.4s, v2.16b, v7.16b\n"
".inst 0x6e86a415 // ummla v21.4s, v0.16b, v6.16b\n"
".inst 0x6e86a45d // ummla v29.4s, v2.16b, v6.16b\n"
".inst 0x6e85a412 // ummla v18.4s, v0.16b, v5.16b\n"
".inst 0x6e85a45a // ummla v26.4s, v2.16b, v5.16b\n"
- "add x28, x28, #0x80\n"
".inst 0x6e84a416 // ummla v22.4s, v0.16b, v4.16b\n"
".inst 0x6e84a45e // ummla v30.4s, v2.16b, v4.16b\n"
".inst 0x6e83a413 // ummla v19.4s, v0.16b, v3.16b\n"
@@ -1747,22 +1748,22 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bne 100b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 v0.2d, v16.2d, v20.2d\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
- "add x21, x22, x20\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
- "uzp1 v20.2d, v17.2d, v21.2d\n"
"prfm pstl1keep, [x27, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
+ "add x22, x23, x20\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
"uzp2 v24.2d, v24.2d, v28.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v28.2d, v25.2d, v29.2d\n"
"uzp2 v25.2d, v25.2d, v29.2d\n"
"uzp1 v29.2d, v26.2d, v30.2d\n"
@@ -1772,9 +1773,9 @@ void a64_hybrid_u8qa_mmla_4x16 (
"mov v31.16b, v0.16b\n"
"tbnz %x[flags], #31, 117f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1r { v0.4s }, [x20]\n"
"addp v11.4s, v11.4s, v11.4s\n"
"addp v13.4s, v13.4s, v13.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
"neg v0.4s, v0.4s\n"
"dup v12.4s, v11.s[3]\n"
"dup v11.4s, v11.s[0]\n"
@@ -1782,8 +1783,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
"dup v13.4s, v13.s[0]\n"
"mul v11.4s, v11.4s, v0.4s\n"
"mul v12.4s, v12.4s, v0.4s\n"
- "mul v13.4s, v13.4s, v0.4s\n"
"mul v14.4s, v14.4s, v0.4s\n"
+ "mul v13.4s, v13.4s, v0.4s\n"
"117:" // Height 4: skip row sum fixup
"ldr q0, [x10, #0x0]\n"
"ldr q4, [x10, #0x10]\n"
@@ -1796,10 +1797,10 @@ void a64_hybrid_u8qa_mmla_4x16 (
"add v16.4s, v16.4s, v12.4s\n"
"add v17.4s, v17.4s, v12.4s\n"
"add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
"ld1r { v1.4s }, [x20]\n"
"add v18.4s, v18.4s, v12.4s\n"
"add v19.4s, v19.4s, v12.4s\n"
- "orr %x[flags], %x[flags], #0x80000000\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
"add v23.4s, v23.4s, v13.4s\n"
"add v28.4s, v28.4s, v13.4s\n"
@@ -1846,32 +1847,32 @@ void a64_hybrid_u8qa_mmla_4x16 (
"tbz %x[flags], #5, 118f\n"
"and v2.16b, v31.16b, v0.16b\n"
"and v1.16b, v20.16b, v0.16b\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
- "sqadd v31.4s, v31.4s, v2.4s\n"
- "sqadd v20.4s, v20.4s, v1.4s\n"
"and v7.16b, v21.16b, v0.16b\n"
"and v6.16b, v22.16b, v0.16b\n"
"and v5.16b, v16.16b, v0.16b\n"
"and v4.16b, v17.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"and v3.16b, v18.16b, v0.16b\n"
- "and v2.16b, v19.16b, v0.16b\n"
- "and v1.16b, v23.16b, v0.16b\n"
"sshr v7.4s, v7.4s, #0x1f\n"
"sshr v6.4s, v6.4s, #0x1f\n"
"sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v2.4s\n"
+ "sqadd v20.4s, v20.4s, v1.4s\n"
+ "and v2.16b, v19.16b, v0.16b\n"
+ "and v1.16b, v23.16b, v0.16b\n"
"sshr v4.4s, v4.4s, #0x1f\n"
"sshr v3.4s, v3.4s, #0x1f\n"
- "sshr v2.4s, v2.4s, #0x1f\n"
- "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v21.4s, v21.4s, v7.4s\n"
"sqadd v22.4s, v22.4s, v6.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
"sqadd v16.4s, v16.4s, v5.4s\n"
"sqadd v17.4s, v17.4s, v4.4s\n"
"sqadd v18.4s, v18.4s, v3.4s\n"
+ "and v7.16b, v28.16b, v0.16b\n"
"sqadd v19.4s, v19.4s, v2.4s\n"
"sqadd v23.4s, v23.4s, v1.4s\n"
- "and v7.16b, v28.16b, v0.16b\n"
"and v6.16b, v29.16b, v0.16b\n"
"and v5.16b, v30.16b, v0.16b\n"
"and v4.16b, v24.16b, v0.16b\n"
@@ -1893,21 +1894,21 @@ void a64_hybrid_u8qa_mmla_4x16 (
"sqadd v26.4s, v26.4s, v2.4s\n"
"sqadd v27.4s, v27.4s, v1.4s\n"
"118:" // Height 4: no shift correction
- "add x20, %x[qp], %[c_offset]\n"
- "ld1r { v3.4s }, [x20]\n"
+ "add x21, %x[qp], %[c_offset]\n"
"srshl v31.4s, v31.4s, v0.4s\n"
"srshl v20.4s, v20.4s, v0.4s\n"
- "srshl v21.4s, v21.4s, v0.4s\n"
- "srshl v22.4s, v22.4s, v0.4s\n"
"add x20, %x[qp], %[maxval]\n"
+ "ld1r { v3.4s }, [x21]\n"
"ld1r { v2.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
"srshl v16.4s, v16.4s, v0.4s\n"
"srshl v17.4s, v17.4s, v0.4s\n"
"add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
"ld1r { v1.4s }, [x20]\n"
"srshl v18.4s, v18.4s, v0.4s\n"
"srshl v19.4s, v19.4s, v0.4s\n"
- "cmp x9, #0x10\n"
"srshl v23.4s, v23.4s, v0.4s\n"
"srshl v28.4s, v28.4s, v0.4s\n"
"srshl v29.4s, v29.4s, v0.4s\n"
@@ -1979,100 +1980,100 @@ void a64_hybrid_u8qa_mmla_4x16 (
"bge 127f\n"
"tbz x9, #3, 122f\n"
"str d31, [x27], #0x8\n"
- "str d16, [x23], #0x8\n"
- "str d23, [x22], #0x8\n"
- "str d24, [x21], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
"tbz x9, #2, 120f\n"
"st1 { v31.s }[2], [x27], #0x4\n"
- "st1 { v16.s }[2], [x23], #0x4\n"
- "st1 { v23.s }[2], [x22], #0x4\n"
- "st1 { v24.s }[2], [x21], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v24.s }[2], [x22], #0x4\n"
"tbz x9, #1, 119f\n"
"st1 { v31.h }[6], [x27], #0x2\n"
- "st1 { v16.h }[6], [x23], #0x2\n"
- "st1 { v23.h }[6], [x22], #0x2\n"
- "st1 { v24.h }[6], [x21], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v23.h }[6], [x23], #0x2\n"
+ "st1 { v24.h }[6], [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[14], [x27]\n"
- "st1 { v16.b }[14], [x23]\n"
- "st1 { v23.b }[14], [x22]\n"
- "st1 { v24.b }[14], [x21]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v23.b }[14], [x23]\n"
+ "st1 { v24.b }[14], [x22]\n"
"b 126f\n"
"119:" // Height 4: Partial direct writeback: partial_1_12
"tbz x9, #0, 126f\n"
"st1 { v31.b }[12], [x27]\n"
- "st1 { v16.b }[12], [x23]\n"
- "st1 { v23.b }[12], [x22]\n"
- "st1 { v24.b }[12], [x21]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v23.b }[12], [x23]\n"
+ "st1 { v24.b }[12], [x22]\n"
"b 126f\n"
"120:" // Height 4: Partial direct writeback: partial_2_8
"tbz x9, #1, 121f\n"
"st1 { v31.h }[4], [x27], #0x2\n"
- "st1 { v16.h }[4], [x23], #0x2\n"
- "st1 { v23.h }[4], [x22], #0x2\n"
- "st1 { v24.h }[4], [x21], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v23.h }[4], [x23], #0x2\n"
+ "st1 { v24.h }[4], [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[10], [x27]\n"
- "st1 { v16.b }[10], [x23]\n"
- "st1 { v23.b }[10], [x22]\n"
- "st1 { v24.b }[10], [x21]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v23.b }[10], [x23]\n"
+ "st1 { v24.b }[10], [x22]\n"
"b 126f\n"
"121:" // Height 4: Partial direct writeback: partial_1_8
"tbz x9, #0, 126f\n"
"st1 { v31.b }[8], [x27]\n"
- "st1 { v16.b }[8], [x23]\n"
- "st1 { v23.b }[8], [x22]\n"
- "st1 { v24.b }[8], [x21]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v23.b }[8], [x23]\n"
+ "st1 { v24.b }[8], [x22]\n"
"b 126f\n"
"122:" // Height 4: Partial direct writeback: partial_4_0
"tbz x9, #2, 124f\n"
"str s31, [x27], #0x4\n"
- "str s16, [x23], #0x4\n"
- "str s23, [x22], #0x4\n"
- "str s24, [x21], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s23, [x23], #0x4\n"
+ "str s24, [x22], #0x4\n"
"tbz x9, #1, 123f\n"
"st1 { v31.h }[2], [x27], #0x2\n"
- "st1 { v16.h }[2], [x23], #0x2\n"
- "st1 { v23.h }[2], [x22], #0x2\n"
- "st1 { v24.h }[2], [x21], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v23.h }[2], [x23], #0x2\n"
+ "st1 { v24.h }[2], [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[6], [x27]\n"
- "st1 { v16.b }[6], [x23]\n"
- "st1 { v23.b }[6], [x22]\n"
- "st1 { v24.b }[6], [x21]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v23.b }[6], [x23]\n"
+ "st1 { v24.b }[6], [x22]\n"
"b 126f\n"
"123:" // Height 4: Partial direct writeback: partial_1_4
"tbz x9, #0, 126f\n"
"st1 { v31.b }[4], [x27]\n"
- "st1 { v16.b }[4], [x23]\n"
- "st1 { v23.b }[4], [x22]\n"
- "st1 { v24.b }[4], [x21]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v23.b }[4], [x23]\n"
+ "st1 { v24.b }[4], [x22]\n"
"b 126f\n"
"124:" // Height 4: Partial direct writeback: partial_2_0
"tbz x9, #1, 125f\n"
"str h31, [x27], #0x2\n"
- "str h16, [x23], #0x2\n"
- "str h23, [x22], #0x2\n"
- "str h24, [x21], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h23, [x23], #0x2\n"
+ "str h24, [x22], #0x2\n"
"tbz x9, #0, 126f\n"
"st1 { v31.b }[2], [x27]\n"
- "st1 { v16.b }[2], [x23]\n"
- "st1 { v23.b }[2], [x22]\n"
- "st1 { v24.b }[2], [x21]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v23.b }[2], [x23]\n"
+ "st1 { v24.b }[2], [x22]\n"
"b 126f\n"
"125:" // Height 4: Partial direct writeback: partial_1_0
"str b31, [x27, #0x0]\n"
- "str b16, [x23, #0x0]\n"
- "str b23, [x22, #0x0]\n"
- "str b24, [x21, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b23, [x23, #0x0]\n"
+ "str b24, [x22, #0x0]\n"
"126:" // Height 4: Partial direct writeback: Done
"b 128f\n"
"127:" // Height 4: Full writeback
"str q31, [x27, #0x0]\n"
"add x27, x27, #0x10\n"
- "str q16, [x23, #0x0]\n"
- "str q23, [x22, #0x0]\n"
- "str q24, [x21, #0x0]\n"
+ "str q16, [x24, #0x0]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q24, [x22, #0x0]\n"
"128:" // Height 4: Writeback done
"subs x9, x9, #0x10\n"
"bgt 98b\n"
@@ -2088,8 +2089,8 @@ void a64_hybrid_u8qa_mmla_4x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"130:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16.hpp
new file mode 100644
index 0000000000..be1947effc
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16.hpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<uint8_t>, \
+ size_t, size_t, \
+ const int8_t *, \
+ IndirectOutputArg<uint8_t>, \
+ const Requantize32 *, const int32_t *, unsigned int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_hybrid_u8s8qa_dot_4x16( ARGLIST );
+
+class cls_a64_hybrid_u8s8qa_dot_4x16
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef uint8_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 4;
+ }
+
+ static unsigned int out_width()
+ {
+ return 16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 16, 4> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ case CPUModel::A55r1:
+ return { 7.5301 };
+ default:
+ return { 27.5482 };
+ case CPUModel::A510:
+ return { 14.81 };
+ case CPUModel::V1:
+ return { 44.54 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_hybrid_u8s8qa_dot_4x16;
+ cls_a64_hybrid_u8s8qa_dot_4x16(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16/generic.cpp
new file mode 100644
index 0000000000..e5ca848fb9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_dot_4x16/generic.cpp
@@ -0,0 +1,2027 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void a64_hybrid_u8s8qa_dot_4x16 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<uint8_t> A_arg,
+ size_t M, size_t N, const int8_t *B_ptr, IndirectOutputArg<uint8_t> output_arg,
+ const Requantize32 *qp, const int32_t *col_bias, unsigned int
+)
+{
+ struct KernelArgs {
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const int8_t *B_ptr = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ void *output_ptr = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ ka.output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ if (qp->c_offset > qp->minval) {
+ flags |= 0x20;
+ }
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x4\n"
+ "bge 91f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 61f\n"
+ "beq 31f\n"
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v15.16b, #0x1\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "2:" // Height 1: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "3:" // Height 1: setup done
+ "mov x26, #0x0\n"
+ "4:" // Height 1: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 5f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "cbnz x26, 6f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "b 6f\n"
+ "5:" // Height 1: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "6:" // Height 1: input setup done
+ "cmp x25, #0x10\n"
+ "blt 11f\n"
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "blt 9f\n"
+ "7:" // Height 1: Multiply loop: Main loop head
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q21, [x28, #0x70]\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q20, [x28, #0x80]\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q26, [x28, #0x90]\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ "ldr q25, [x28, #0xa0]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q24, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ "ldr q23, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ "ldr q22, [x28, #0xd0]\n"
+ ".inst 0x4f20f2b3 // sudot v19.4s, v21.16b, v0.4b[1]\n"
+ "ldr q21, [x28, #0xe0]\n"
+ ".inst 0x4f00fa90 // sudot v16.4s, v20.16b, v0.4b[2]\n"
+ "ldr q20, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f00fb51 // sudot v17.4s, v26.16b, v0.4b[2]\n"
+ ".inst 0x4f00fb32 // sudot v18.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x4f00fb13 // sudot v19.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f20faf0 // sudot v16.4s, v23.16b, v0.4b[3]\n"
+ ".inst 0x4f20fad1 // sudot v17.4s, v22.16b, v0.4b[3]\n"
+ ".inst 0x4f20fab2 // sudot v18.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x4f20fa93 // sudot v19.4s, v20.16b, v0.4b[3]\n"
+ "tbnz %x[flags], #31, 8f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "8:" // Height 1: Multiply loop: unique 1: skip row sum
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "cmp x25, #0x20\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "bge 7b\n"
+ "9:" // Height 1: Multiply loop: Single iteration only
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ "ldr q21, [x28, #0x70]\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ "ldr q20, [x28, #0x80]\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ "ldr q26, [x28, #0x90]\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ "ldr q25, [x28, #0xa0]\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ "ldr q24, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ "ldr q23, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ "ldr q22, [x28, #0xd0]\n"
+ ".inst 0x4f20f2b3 // sudot v19.4s, v21.16b, v0.4b[1]\n"
+ "ldr q21, [x28, #0xe0]\n"
+ ".inst 0x4f00fa90 // sudot v16.4s, v20.16b, v0.4b[2]\n"
+ "ldr q20, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f00fb51 // sudot v17.4s, v26.16b, v0.4b[2]\n"
+ ".inst 0x4f00fb32 // sudot v18.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x4f00fb13 // sudot v19.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f20faf0 // sudot v16.4s, v23.16b, v0.4b[3]\n"
+ ".inst 0x4f20fad1 // sudot v17.4s, v22.16b, v0.4b[3]\n"
+ ".inst 0x4f20fab2 // sudot v18.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x4f20fa93 // sudot v19.4s, v20.16b, v0.4b[3]\n"
+ "tbnz %x[flags], #31, 10f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "10:" // Height 1: Multiply loop: unique 2: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "11:" // Height 1: Multiply loop: Main loop skip
+ "cbz x25, 18f\n"
+ "cmp x25, #0x4\n"
+ "blt 14f\n"
+ "12:" // Height 1: Multiply loop: Odd block loop
+ "ldr s0, [x24], #0x4\n"
+ "tbnz %x[flags], #31, 13f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "13:" // Height 1: Multiply loop: unique 3: skip row sum
+ "ldr q23, [x28, #0x0]\n"
+ "ldr q22, [x28, #0x10]\n"
+ "sub x25, x25, #0x4\n"
+ "ldr q21, [x28, #0x20]\n"
+ "ldr q20, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f2f0 // sudot v16.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x4f00f2d1 // sudot v17.4s, v22.16b, v0.4b[0]\n"
+ ".inst 0x4f00f2b2 // sudot v18.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f00f293 // sudot v19.4s, v20.16b, v0.4b[0]\n"
+ "bge 12b\n"
+ "14:" // Height 1: Multiply loop: Skip odd blocks
+ "cbz x25, 18f\n"
+ "tbz x25, #1, 15f\n"
+ "ldr h0, [x24], #0x2\n"
+ "tbz x25, #0, 16f\n"
+ "ld1 { v0.b }[2], [x24]\n"
+ "b 16f\n"
+ "15:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x24, #0x0]\n"
+ "16:" // Height 1: Multiply loop: Ragged operand read: Done
+ "tbnz %x[flags], #31, 17f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "17:" // Height 1: Multiply loop: unique 4: skip row sum
+ "ldr q23, [x28, #0x0]\n"
+ "ldr q22, [x28, #0x10]\n"
+ "ldr q21, [x28, #0x20]\n"
+ "ldr q20, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f2f0 // sudot v16.4s, v23.16b, v0.4b[0]\n"
+ ".inst 0x4f00f2d1 // sudot v17.4s, v22.16b, v0.4b[0]\n"
+ ".inst 0x4f00f2b2 // sudot v18.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f00f293 // sudot v19.4s, v20.16b, v0.4b[0]\n"
+ "18:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 4b\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "tbnz %x[flags], #31, 19f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v20.4s }, [x20]\n"
+ "neg v20.4s, v20.4s\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "mul v11.4s, v11.4s, v20.4s\n"
+ "19:" // Height 1: skip row sum fixup
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q23, [x10, #0x10]\n"
+ "add v16.4s, v16.4s, v11.4s\n"
+ "add v17.4s, v17.4s, v11.4s\n"
+ "ldr q22, [x10, #0x20]\n"
+ "ldr q21, [x10, #0x30]\n"
+ "add v18.4s, v18.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v11.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v24.4s\n"
+ "add v17.4s, v17.4s, v23.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x10, x10, #0x40\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v21.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v20.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v20.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v20.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v20.4s\n"
+ "tbz %x[flags], #5, 20f\n"
+ "and v23.16b, v16.16b, v0.16b\n"
+ "and v22.16b, v17.16b, v0.16b\n"
+ "and v21.16b, v18.16b, v0.16b\n"
+ "and v20.16b, v19.16b, v0.16b\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v23.4s\n"
+ "sqadd v17.4s, v17.4s, v22.4s\n"
+ "sqadd v18.4s, v18.4s, v21.4s\n"
+ "sqadd v19.4s, v19.4s, v20.4s\n"
+ "20:" // Height 1: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v22.4s }, [x21]\n"
+ "ld1r { v21.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v20.4s }, [x20]\n"
+ "add v16.4s, v16.4s, v22.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
+ "add v18.4s, v18.4s, v22.4s\n"
+ "add v19.4s, v19.4s, v22.4s\n"
+ "smin v16.4s, v16.4s, v21.4s\n"
+ "smin v17.4s, v17.4s, v21.4s\n"
+ "smin v18.4s, v18.4s, v21.4s\n"
+ "smin v19.4s, v19.4s, v21.4s\n"
+ "smax v16.4s, v16.4s, v20.4s\n"
+ "smax v17.4s, v17.4s, v20.4s\n"
+ "smax v18.4s, v18.4s, v20.4s\n"
+ "smax v19.4s, v19.4s, v20.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "uzp1 v16.16b, v16.16b, v17.16b\n"
+ "bge 29f\n"
+ "tbz x9, #3, 24f\n"
+ "str d16, [x27], #0x8\n"
+ "tbz x9, #2, 22f\n"
+ "st1 { v16.s }[2], [x27], #0x4\n"
+ "tbz x9, #1, 21f\n"
+ "st1 { v16.h }[6], [x27], #0x2\n"
+ "tbz x9, #0, 28f\n"
+ "st1 { v16.b }[14], [x27]\n"
+ "b 28f\n"
+ "21:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 28f\n"
+ "st1 { v16.b }[12], [x27]\n"
+ "b 28f\n"
+ "22:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 23f\n"
+ "st1 { v16.h }[4], [x27], #0x2\n"
+ "tbz x9, #0, 28f\n"
+ "st1 { v16.b }[10], [x27]\n"
+ "b 28f\n"
+ "23:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 28f\n"
+ "st1 { v16.b }[8], [x27]\n"
+ "b 28f\n"
+ "24:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 26f\n"
+ "str s16, [x27], #0x4\n"
+ "tbz x9, #1, 25f\n"
+ "st1 { v16.h }[2], [x27], #0x2\n"
+ "tbz x9, #0, 28f\n"
+ "st1 { v16.b }[6], [x27]\n"
+ "b 28f\n"
+ "25:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 28f\n"
+ "st1 { v16.b }[4], [x27]\n"
+ "b 28f\n"
+ "26:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 27f\n"
+ "str h16, [x27], #0x2\n"
+ "tbz x9, #0, 28f\n"
+ "st1 { v16.b }[2], [x27]\n"
+ "b 28f\n"
+ "27:" // Height 1: Partial direct writeback: partial_1_0
+ "str b16, [x27, #0x0]\n"
+ "28:" // Height 1: Partial direct writeback: Done
+ "b 30f\n"
+ "29:" // Height 1: Full writeback
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "30:" // Height 1: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 2b\n"
+ "b 122f\n"
+ "31:" // Height 2
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "movi v15.16b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "32:" // Height 2: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "33:" // Height 2: setup done
+ "mov x26, #0x0\n"
+ "34:" // Height 2: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 35f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "cbnz x26, 36f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "b 36f\n"
+ "35:" // Height 2: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "36:" // Height 2: input setup done
+ "cmp x25, #0x10\n"
+ "blt 41f\n"
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "blt 39f\n"
+ "37:" // Height 2: Multiply loop: Main loop head
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f094 // sudot v20.4s, v4.16b, v1.4b[0]\n"
+ "ldr q25, [x28, #0x70]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0b5 // sudot v21.4s, v5.16b, v1.4b[0]\n"
+ "ldr q24, [x28, #0x80]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d6 // sudot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q30, [x28, #0x90]\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f7 // sudot v23.4s, v7.16b, v1.4b[0]\n"
+ "ldr q29, [x28, #0xa0]\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4f21f114 // sudot v20.4s, v8.16b, v1.4b[1]\n"
+ "ldr q28, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f21f135 // sudot v21.4s, v9.16b, v1.4b[1]\n"
+ "ldr q27, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f21f156 // sudot v22.4s, v10.16b, v1.4b[1]\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x4f20f333 // sudot v19.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x4f21f337 // sudot v23.4s, v25.16b, v1.4b[1]\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x4f00fb10 // sudot v16.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb14 // sudot v20.4s, v24.16b, v1.4b[2]\n"
+ "ldr q24, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f00fbd1 // sudot v17.4s, v30.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbd5 // sudot v21.4s, v30.16b, v1.4b[2]\n"
+ ".inst 0x4f00fbb2 // sudot v18.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbb6 // sudot v22.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x4f00fb93 // sudot v19.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb97 // sudot v23.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f20fb70 // sudot v16.4s, v27.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb74 // sudot v20.4s, v27.16b, v1.4b[3]\n"
+ ".inst 0x4f20fb51 // sudot v17.4s, v26.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb55 // sudot v21.4s, v26.16b, v1.4b[3]\n"
+ ".inst 0x4f20fb32 // sudot v18.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb36 // sudot v22.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x4f20fb13 // sudot v19.4s, v24.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb17 // sudot v23.4s, v24.16b, v1.4b[3]\n"
+ "tbnz %x[flags], #31, 38f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ "38:" // Height 2: Multiply loop: unique 5: skip row sum
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "cmp x25, #0x20\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "bge 37b\n"
+ "39:" // Height 2: Multiply loop: Single iteration only
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f094 // sudot v20.4s, v4.16b, v1.4b[0]\n"
+ "ldr q25, [x28, #0x70]\n"
+ "sub x25, x25, #0x10\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0b5 // sudot v21.4s, v5.16b, v1.4b[0]\n"
+ "ldr q24, [x28, #0x80]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d6 // sudot v22.4s, v6.16b, v1.4b[0]\n"
+ "ldr q30, [x28, #0x90]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f7 // sudot v23.4s, v7.16b, v1.4b[0]\n"
+ "ldr q29, [x28, #0xa0]\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4f21f114 // sudot v20.4s, v8.16b, v1.4b[1]\n"
+ "ldr q28, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f21f135 // sudot v21.4s, v9.16b, v1.4b[1]\n"
+ "ldr q27, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f21f156 // sudot v22.4s, v10.16b, v1.4b[1]\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x4f20f333 // sudot v19.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x4f21f337 // sudot v23.4s, v25.16b, v1.4b[1]\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x4f00fb10 // sudot v16.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb14 // sudot v20.4s, v24.16b, v1.4b[2]\n"
+ "ldr q24, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f00fbd1 // sudot v17.4s, v30.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbd5 // sudot v21.4s, v30.16b, v1.4b[2]\n"
+ ".inst 0x4f00fbb2 // sudot v18.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbb6 // sudot v22.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x4f00fb93 // sudot v19.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb97 // sudot v23.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f20fb70 // sudot v16.4s, v27.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb74 // sudot v20.4s, v27.16b, v1.4b[3]\n"
+ ".inst 0x4f20fb51 // sudot v17.4s, v26.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb55 // sudot v21.4s, v26.16b, v1.4b[3]\n"
+ ".inst 0x4f20fb32 // sudot v18.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb36 // sudot v22.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x4f20fb13 // sudot v19.4s, v24.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb17 // sudot v23.4s, v24.16b, v1.4b[3]\n"
+ "tbnz %x[flags], #31, 40f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ "40:" // Height 2: Multiply loop: unique 6: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "41:" // Height 2: Multiply loop: Main loop skip
+ "cbz x25, 48f\n"
+ "cmp x25, #0x4\n"
+ "blt 44f\n"
+ "42:" // Height 2: Multiply loop: Odd block loop
+ "ldr s0, [x24], #0x4\n"
+ "ldr s1, [x23], #0x4\n"
+ "tbnz %x[flags], #31, 43f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ "43:" // Height 2: Multiply loop: unique 7: skip row sum
+ "ldr q27, [x28, #0x0]\n"
+ "ldr q26, [x28, #0x10]\n"
+ "sub x25, x25, #0x4\n"
+ "ldr q25, [x28, #0x20]\n"
+ "ldr q24, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f370 // sudot v16.4s, v27.16b, v0.4b[0]\n"
+ ".inst 0x4f01f374 // sudot v20.4s, v27.16b, v1.4b[0]\n"
+ ".inst 0x4f00f351 // sudot v17.4s, v26.16b, v0.4b[0]\n"
+ ".inst 0x4f01f355 // sudot v21.4s, v26.16b, v1.4b[0]\n"
+ ".inst 0x4f00f332 // sudot v18.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x4f01f336 // sudot v22.4s, v25.16b, v1.4b[0]\n"
+ ".inst 0x4f00f313 // sudot v19.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x4f01f317 // sudot v23.4s, v24.16b, v1.4b[0]\n"
+ "bge 42b\n"
+ "44:" // Height 2: Multiply loop: Skip odd blocks
+ "cbz x25, 48f\n"
+ "tbz x25, #1, 45f\n"
+ "ldr h0, [x24], #0x2\n"
+ "ldr h1, [x23], #0x2\n"
+ "tbz x25, #0, 46f\n"
+ "ld1 { v0.b }[2], [x24]\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "b 46f\n"
+ "45:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x24, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "46:" // Height 2: Multiply loop: Ragged operand read: Done
+ "tbnz %x[flags], #31, 47f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ "47:" // Height 2: Multiply loop: unique 8: skip row sum
+ "ldr q27, [x28, #0x0]\n"
+ "ldr q26, [x28, #0x10]\n"
+ "ldr q25, [x28, #0x20]\n"
+ "ldr q24, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f370 // sudot v16.4s, v27.16b, v0.4b[0]\n"
+ ".inst 0x4f01f374 // sudot v20.4s, v27.16b, v1.4b[0]\n"
+ ".inst 0x4f00f351 // sudot v17.4s, v26.16b, v0.4b[0]\n"
+ ".inst 0x4f01f355 // sudot v21.4s, v26.16b, v1.4b[0]\n"
+ ".inst 0x4f00f332 // sudot v18.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x4f01f336 // sudot v22.4s, v25.16b, v1.4b[0]\n"
+ ".inst 0x4f00f313 // sudot v19.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x4f01f317 // sudot v23.4s, v24.16b, v1.4b[0]\n"
+ "48:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 34b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "tbnz %x[flags], #31, 49f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v24.4s }, [x20]\n"
+ "neg v24.4s, v24.4s\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v12.4s, v12.4s, v12.4s\n"
+ "mul v11.4s, v11.4s, v24.4s\n"
+ "mul v12.4s, v12.4s, v24.4s\n"
+ "49:" // Height 2: skip row sum fixup
+ "ldr q28, [x10, #0x0]\n"
+ "ldr q27, [x10, #0x10]\n"
+ "add v16.4s, v16.4s, v11.4s\n"
+ "add v17.4s, v17.4s, v11.4s\n"
+ "ldr q26, [x10, #0x20]\n"
+ "ldr q25, [x10, #0x30]\n"
+ "add v18.4s, v18.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v12.4s\n"
+ "add v21.4s, v21.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v24.4s }, [x20]\n"
+ "add v22.4s, v22.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add v16.4s, v16.4s, v28.4s\n"
+ "add v17.4s, v17.4s, v27.4s\n"
+ "add x10, x10, #0x40\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v25.4s\n"
+ "add v20.4s, v20.4s, v28.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v21.4s, v21.4s, v27.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v25.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v24.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v24.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v24.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v24.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v24.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v24.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v24.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v24.4s\n"
+ "tbz %x[flags], #5, 50f\n"
+ "and v24.16b, v16.16b, v0.16b\n"
+ "and v30.16b, v17.16b, v0.16b\n"
+ "and v29.16b, v18.16b, v0.16b\n"
+ "and v28.16b, v19.16b, v0.16b\n"
+ "and v27.16b, v20.16b, v0.16b\n"
+ "and v26.16b, v21.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "and v25.16b, v22.16b, v0.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v24.4s\n"
+ "and v24.16b, v23.16b, v0.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v30.4s\n"
+ "sqadd v18.4s, v18.4s, v29.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sqadd v19.4s, v19.4s, v28.4s\n"
+ "sqadd v20.4s, v20.4s, v27.4s\n"
+ "sqadd v21.4s, v21.4s, v26.4s\n"
+ "sqadd v22.4s, v22.4s, v25.4s\n"
+ "sqadd v23.4s, v23.4s, v24.4s\n"
+ "50:" // Height 2: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
+ "ld1r { v25.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v24.4s }, [x20]\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "add v16.4s, v16.4s, v26.4s\n"
+ "add v17.4s, v17.4s, v26.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v26.4s\n"
+ "add v20.4s, v20.4s, v26.4s\n"
+ "add v21.4s, v21.4s, v26.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v23.4s, v23.4s, v26.4s\n"
+ "smin v16.4s, v16.4s, v25.4s\n"
+ "smin v17.4s, v17.4s, v25.4s\n"
+ "smin v18.4s, v18.4s, v25.4s\n"
+ "smin v19.4s, v19.4s, v25.4s\n"
+ "smin v20.4s, v20.4s, v25.4s\n"
+ "smin v21.4s, v21.4s, v25.4s\n"
+ "smin v22.4s, v22.4s, v25.4s\n"
+ "smin v23.4s, v23.4s, v25.4s\n"
+ "smax v16.4s, v16.4s, v24.4s\n"
+ "smax v17.4s, v17.4s, v24.4s\n"
+ "smax v18.4s, v18.4s, v24.4s\n"
+ "smax v19.4s, v19.4s, v24.4s\n"
+ "smax v20.4s, v20.4s, v24.4s\n"
+ "smax v21.4s, v21.4s, v24.4s\n"
+ "smax v22.4s, v22.4s, v24.4s\n"
+ "smax v23.4s, v23.4s, v24.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v18.8h, v18.8h, v19.8h\n"
+ "uzp1 v20.8h, v20.8h, v21.8h\n"
+ "uzp1 v17.8h, v22.8h, v23.8h\n"
+ "uzp1 v16.16b, v16.16b, v18.16b\n"
+ "uzp1 v20.16b, v20.16b, v17.16b\n"
+ "bge 59f\n"
+ "tbz x9, #3, 54f\n"
+ "str d16, [x27], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "tbz x9, #2, 52f\n"
+ "st1 { v16.s }[2], [x27], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "tbz x9, #1, 51f\n"
+ "st1 { v16.h }[6], [x27], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "tbz x9, #0, 58f\n"
+ "st1 { v16.b }[14], [x27]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "b 58f\n"
+ "51:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 58f\n"
+ "st1 { v16.b }[12], [x27]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "b 58f\n"
+ "52:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 53f\n"
+ "st1 { v16.h }[4], [x27], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "tbz x9, #0, 58f\n"
+ "st1 { v16.b }[10], [x27]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "b 58f\n"
+ "53:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 58f\n"
+ "st1 { v16.b }[8], [x27]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "b 58f\n"
+ "54:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 56f\n"
+ "str s16, [x27], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "tbz x9, #1, 55f\n"
+ "st1 { v16.h }[2], [x27], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "tbz x9, #0, 58f\n"
+ "st1 { v16.b }[6], [x27]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "b 58f\n"
+ "55:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 58f\n"
+ "st1 { v16.b }[4], [x27]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "b 58f\n"
+ "56:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 57f\n"
+ "str h16, [x27], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "tbz x9, #0, 58f\n"
+ "st1 { v16.b }[2], [x27]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "b 58f\n"
+ "57:" // Height 2: Partial direct writeback: partial_1_0
+ "str b16, [x27, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "58:" // Height 2: Partial direct writeback: Done
+ "b 60f\n"
+ "59:" // Height 2: Full writeback
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "str q20, [x24, #0x0]\n"
+ "60:" // Height 2: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 32b\n"
+ "b 122f\n"
+ "61:" // Height 3
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "movi v13.4s, #0x0\n"
+ "movi v15.16b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "62:" // Height 3: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "63:" // Height 3: setup done
+ "mov x26, #0x0\n"
+ "64:" // Height 3: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 65f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "cbnz x26, 66f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "b 66f\n"
+ "65:" // Height 3: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "66:" // Height 3: input setup done
+ "cmp x25, #0x10\n"
+ "blt 71f\n"
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q2, [x22, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "blt 69f\n"
+ "67:" // Height 3: Multiply loop: Main loop head
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f094 // sudot v20.4s, v4.16b, v1.4b[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f02f098 // sudot v24.4s, v4.16b, v2.4b[0]\n"
+ "ldr q29, [x28, #0x70]\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4f01f0b5 // sudot v21.4s, v5.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0b9 // sudot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q28, [x28, #0x80]\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d6 // sudot v22.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0da // sudot v26.4s, v6.16b, v2.4b[0]\n"
+ "ldr q5, [x28, #0x90]\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f7 // sudot v23.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0fb // sudot v27.4s, v7.16b, v2.4b[0]\n"
+ "ldr q4, [x28, #0xa0]\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4f21f114 // sudot v20.4s, v8.16b, v1.4b[1]\n"
+ ".inst 0x4f22f118 // sudot v24.4s, v8.16b, v2.4b[1]\n"
+ "ldr q3, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f21f135 // sudot v21.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4f22f139 // sudot v25.4s, v9.16b, v2.4b[1]\n"
+ "ldr q31, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f21f156 // sudot v22.4s, v10.16b, v1.4b[1]\n"
+ ".inst 0x4f22f15a // sudot v26.4s, v10.16b, v2.4b[1]\n"
+ "ldr q30, [x28, #0xd0]\n"
+ ".inst 0x4f20f3b3 // sudot v19.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x4f21f3b7 // sudot v23.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x4f22f3bb // sudot v27.4s, v29.16b, v2.4b[1]\n"
+ "ldr q29, [x28, #0xe0]\n"
+ ".inst 0x4f00fb90 // sudot v16.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb94 // sudot v20.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb98 // sudot v24.4s, v28.16b, v2.4b[2]\n"
+ "ldr q28, [x28, #0xf0]\n"
+ ".inst 0x4f00f8b1 // sudot v17.4s, v5.16b, v0.4b[2]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f01f8b5 // sudot v21.4s, v5.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8b9 // sudot v25.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x4f00f892 // sudot v18.4s, v4.16b, v0.4b[2]\n"
+ ".inst 0x4f01f896 // sudot v22.4s, v4.16b, v1.4b[2]\n"
+ ".inst 0x4f02f89a // sudot v26.4s, v4.16b, v2.4b[2]\n"
+ ".inst 0x4f00f873 // sudot v19.4s, v3.16b, v0.4b[2]\n"
+ ".inst 0x4f01f877 // sudot v23.4s, v3.16b, v1.4b[2]\n"
+ ".inst 0x4f02f87b // sudot v27.4s, v3.16b, v2.4b[2]\n"
+ ".inst 0x4f20fbf0 // sudot v16.4s, v31.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbf4 // sudot v20.4s, v31.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbf8 // sudot v24.4s, v31.16b, v2.4b[3]\n"
+ ".inst 0x4f20fbd1 // sudot v17.4s, v30.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbd5 // sudot v21.4s, v30.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbd9 // sudot v25.4s, v30.16b, v2.4b[3]\n"
+ ".inst 0x4f20fbb2 // sudot v18.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbb6 // sudot v22.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbba // sudot v26.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x4f20fb93 // sudot v19.4s, v28.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb97 // sudot v23.4s, v28.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb9b // sudot v27.4s, v28.16b, v2.4b[3]\n"
+ "tbnz %x[flags], #31, 68f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "68:" // Height 3: Multiply loop: unique 9: skip row sum
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q2, [x22, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "bge 67b\n"
+ "69:" // Height 3: Multiply loop: Single iteration only
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f094 // sudot v20.4s, v4.16b, v1.4b[0]\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f02f098 // sudot v24.4s, v4.16b, v2.4b[0]\n"
+ "ldr q29, [x28, #0x70]\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f01f0b5 // sudot v21.4s, v5.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0b9 // sudot v25.4s, v5.16b, v2.4b[0]\n"
+ "ldr q28, [x28, #0x80]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d6 // sudot v22.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0da // sudot v26.4s, v6.16b, v2.4b[0]\n"
+ "ldr q5, [x28, #0x90]\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f7 // sudot v23.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0fb // sudot v27.4s, v7.16b, v2.4b[0]\n"
+ "ldr q4, [x28, #0xa0]\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4f21f114 // sudot v20.4s, v8.16b, v1.4b[1]\n"
+ ".inst 0x4f22f118 // sudot v24.4s, v8.16b, v2.4b[1]\n"
+ "ldr q3, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f21f135 // sudot v21.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4f22f139 // sudot v25.4s, v9.16b, v2.4b[1]\n"
+ "ldr q31, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f21f156 // sudot v22.4s, v10.16b, v1.4b[1]\n"
+ ".inst 0x4f22f15a // sudot v26.4s, v10.16b, v2.4b[1]\n"
+ "ldr q30, [x28, #0xd0]\n"
+ ".inst 0x4f20f3b3 // sudot v19.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x4f21f3b7 // sudot v23.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x4f22f3bb // sudot v27.4s, v29.16b, v2.4b[1]\n"
+ "ldr q29, [x28, #0xe0]\n"
+ ".inst 0x4f00fb90 // sudot v16.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb94 // sudot v20.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb98 // sudot v24.4s, v28.16b, v2.4b[2]\n"
+ "ldr q28, [x28, #0xf0]\n"
+ ".inst 0x4f00f8b1 // sudot v17.4s, v5.16b, v0.4b[2]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f01f8b5 // sudot v21.4s, v5.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8b9 // sudot v25.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x4f00f892 // sudot v18.4s, v4.16b, v0.4b[2]\n"
+ ".inst 0x4f01f896 // sudot v22.4s, v4.16b, v1.4b[2]\n"
+ ".inst 0x4f02f89a // sudot v26.4s, v4.16b, v2.4b[2]\n"
+ ".inst 0x4f00f873 // sudot v19.4s, v3.16b, v0.4b[2]\n"
+ ".inst 0x4f01f877 // sudot v23.4s, v3.16b, v1.4b[2]\n"
+ ".inst 0x4f02f87b // sudot v27.4s, v3.16b, v2.4b[2]\n"
+ ".inst 0x4f20fbf0 // sudot v16.4s, v31.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbf4 // sudot v20.4s, v31.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbf8 // sudot v24.4s, v31.16b, v2.4b[3]\n"
+ ".inst 0x4f20fbd1 // sudot v17.4s, v30.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbd5 // sudot v21.4s, v30.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbd9 // sudot v25.4s, v30.16b, v2.4b[3]\n"
+ ".inst 0x4f20fbb2 // sudot v18.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbb6 // sudot v22.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbba // sudot v26.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x4f20fb93 // sudot v19.4s, v28.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb97 // sudot v23.4s, v28.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb9b // sudot v27.4s, v28.16b, v2.4b[3]\n"
+ "tbnz %x[flags], #31, 70f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "70:" // Height 3: Multiply loop: unique 10: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "71:" // Height 3: Multiply loop: Main loop skip
+ "cbz x25, 78f\n"
+ "cmp x25, #0x4\n"
+ "blt 74f\n"
+ "72:" // Height 3: Multiply loop: Odd block loop
+ "ldr s0, [x24], #0x4\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "tbnz %x[flags], #31, 73f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "73:" // Height 3: Multiply loop: unique 11: skip row sum
+ "ldr q31, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "sub x25, x25, #0x4\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f3f0 // sudot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3f4 // sudot v20.4s, v31.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3f8 // sudot v24.4s, v31.16b, v2.4b[0]\n"
+ ".inst 0x4f00f3d1 // sudot v17.4s, v30.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3d5 // sudot v21.4s, v30.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3d9 // sudot v25.4s, v30.16b, v2.4b[0]\n"
+ ".inst 0x4f00f3b2 // sudot v18.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3b6 // sudot v22.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3ba // sudot v26.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f00f393 // sudot v19.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f01f397 // sudot v23.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f02f39b // sudot v27.4s, v28.16b, v2.4b[0]\n"
+ "bge 72b\n"
+ "74:" // Height 3: Multiply loop: Skip odd blocks
+ "cbz x25, 78f\n"
+ "tbz x25, #1, 75f\n"
+ "ldr h0, [x24], #0x2\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "tbz x25, #0, 76f\n"
+ "ld1 { v0.b }[2], [x24]\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
+ "b 76f\n"
+ "75:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x24, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
+ "76:" // Height 3: Multiply loop: Ragged operand read: Done
+ "tbnz %x[flags], #31, 77f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "77:" // Height 3: Multiply loop: unique 12: skip row sum
+ "ldr q31, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f3f0 // sudot v16.4s, v31.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3f4 // sudot v20.4s, v31.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3f8 // sudot v24.4s, v31.16b, v2.4b[0]\n"
+ ".inst 0x4f00f3d1 // sudot v17.4s, v30.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3d5 // sudot v21.4s, v30.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3d9 // sudot v25.4s, v30.16b, v2.4b[0]\n"
+ ".inst 0x4f00f3b2 // sudot v18.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3b6 // sudot v22.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3ba // sudot v26.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f00f393 // sudot v19.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f01f397 // sudot v23.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f02f39b // sudot v27.4s, v28.16b, v2.4b[0]\n"
+ "78:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 64b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "tbnz %x[flags], #31, 79f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "addp v13.4s, v13.4s, v13.4s\n"
+ "neg v28.4s, v28.4s\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v12.4s, v12.4s, v12.4s\n"
+ "addp v13.4s, v13.4s, v13.4s\n"
+ "mul v11.4s, v11.4s, v28.4s\n"
+ "mul v12.4s, v12.4s, v28.4s\n"
+ "mul v13.4s, v13.4s, v28.4s\n"
+ "79:" // Height 3: skip row sum fixup
+ "ldr q0, [x10, #0x0]\n"
+ "ldr q31, [x10, #0x10]\n"
+ "add v16.4s, v16.4s, v11.4s\n"
+ "add v17.4s, v17.4s, v11.4s\n"
+ "ldr q30, [x10, #0x20]\n"
+ "ldr q29, [x10, #0x30]\n"
+ "add v18.4s, v18.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v12.4s\n"
+ "add v21.4s, v21.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "add v22.4s, v22.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add v24.4s, v24.4s, v13.4s\n"
+ "add v25.4s, v25.4s, v13.4s\n"
+ "add x10, x10, #0x40\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "add v27.4s, v27.4s, v13.4s\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v31.4s\n"
+ "add v18.4s, v18.4s, v30.4s\n"
+ "add v19.4s, v19.4s, v29.4s\n"
+ "add v20.4s, v20.4s, v0.4s\n"
+ "add v21.4s, v21.4s, v31.4s\n"
+ "add v22.4s, v22.4s, v30.4s\n"
+ "add v23.4s, v23.4s, v29.4s\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v25.4s, v25.4s, v31.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v29.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v28.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v28.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v28.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v28.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v28.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v28.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v28.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v28.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v28.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v28.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v28.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v28.4s\n"
+ "tbz %x[flags], #5, 80f\n"
+ "and v1.16b, v16.16b, v0.16b\n"
+ "and v31.16b, v17.16b, v0.16b\n"
+ "and v30.16b, v18.16b, v0.16b\n"
+ "and v29.16b, v19.16b, v0.16b\n"
+ "and v28.16b, v20.16b, v0.16b\n"
+ "and v3.16b, v21.16b, v0.16b\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "and v2.16b, v22.16b, v0.16b\n"
+ "sqadd v16.4s, v16.4s, v1.4s\n"
+ "sqadd v17.4s, v17.4s, v31.4s\n"
+ "sqadd v18.4s, v18.4s, v30.4s\n"
+ "sqadd v19.4s, v19.4s, v29.4s\n"
+ "sqadd v20.4s, v20.4s, v28.4s\n"
+ "and v1.16b, v23.16b, v0.16b\n"
+ "and v31.16b, v24.16b, v0.16b\n"
+ "and v30.16b, v25.16b, v0.16b\n"
+ "and v29.16b, v26.16b, v0.16b\n"
+ "and v28.16b, v27.16b, v0.16b\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sshr v31.4s, v31.4s, #0x1f\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sqadd v21.4s, v21.4s, v3.4s\n"
+ "sqadd v22.4s, v22.4s, v2.4s\n"
+ "sqadd v23.4s, v23.4s, v1.4s\n"
+ "sqadd v24.4s, v24.4s, v31.4s\n"
+ "sqadd v25.4s, v25.4s, v30.4s\n"
+ "sqadd v26.4s, v26.4s, v29.4s\n"
+ "sqadd v27.4s, v27.4s, v28.4s\n"
+ "80:" // Height 3: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v30.4s }, [x21]\n"
+ "ld1r { v29.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "add v16.4s, v16.4s, v30.4s\n"
+ "add v17.4s, v17.4s, v30.4s\n"
+ "add v18.4s, v18.4s, v30.4s\n"
+ "add v19.4s, v19.4s, v30.4s\n"
+ "add v20.4s, v20.4s, v30.4s\n"
+ "add v21.4s, v21.4s, v30.4s\n"
+ "add v22.4s, v22.4s, v30.4s\n"
+ "add v23.4s, v23.4s, v30.4s\n"
+ "add v24.4s, v24.4s, v30.4s\n"
+ "add v25.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v30.4s\n"
+ "add v27.4s, v27.4s, v30.4s\n"
+ "smin v16.4s, v16.4s, v29.4s\n"
+ "smin v17.4s, v17.4s, v29.4s\n"
+ "smin v18.4s, v18.4s, v29.4s\n"
+ "smin v19.4s, v19.4s, v29.4s\n"
+ "smin v20.4s, v20.4s, v29.4s\n"
+ "smin v21.4s, v21.4s, v29.4s\n"
+ "smin v22.4s, v22.4s, v29.4s\n"
+ "smin v23.4s, v23.4s, v29.4s\n"
+ "smin v24.4s, v24.4s, v29.4s\n"
+ "smin v25.4s, v25.4s, v29.4s\n"
+ "smin v26.4s, v26.4s, v29.4s\n"
+ "smin v27.4s, v27.4s, v29.4s\n"
+ "smax v16.4s, v16.4s, v28.4s\n"
+ "smax v17.4s, v17.4s, v28.4s\n"
+ "smax v18.4s, v18.4s, v28.4s\n"
+ "smax v19.4s, v19.4s, v28.4s\n"
+ "smax v20.4s, v20.4s, v28.4s\n"
+ "smax v21.4s, v21.4s, v28.4s\n"
+ "smax v22.4s, v22.4s, v28.4s\n"
+ "smax v23.4s, v23.4s, v28.4s\n"
+ "smax v24.4s, v24.4s, v28.4s\n"
+ "smax v25.4s, v25.4s, v28.4s\n"
+ "smax v26.4s, v26.4s, v28.4s\n"
+ "smax v27.4s, v27.4s, v28.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v19.8h, v18.8h, v19.8h\n"
+ "uzp1 v20.8h, v20.8h, v21.8h\n"
+ "uzp1 v18.8h, v22.8h, v23.8h\n"
+ "uzp1 v24.8h, v24.8h, v25.8h\n"
+ "uzp1 v17.8h, v26.8h, v27.8h\n"
+ "uzp1 v16.16b, v16.16b, v19.16b\n"
+ "uzp1 v20.16b, v20.16b, v18.16b\n"
+ "uzp1 v24.16b, v24.16b, v17.16b\n"
+ "bge 89f\n"
+ "tbz x9, #3, 84f\n"
+ "str d16, [x27], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "tbz x9, #2, 82f\n"
+ "st1 { v16.s }[2], [x27], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "tbz x9, #1, 81f\n"
+ "st1 { v16.h }[6], [x27], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "tbz x9, #0, 88f\n"
+ "st1 { v16.b }[14], [x27]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "b 88f\n"
+ "81:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 88f\n"
+ "st1 { v16.b }[12], [x27]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "b 88f\n"
+ "82:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 83f\n"
+ "st1 { v16.h }[4], [x27], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "tbz x9, #0, 88f\n"
+ "st1 { v16.b }[10], [x27]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "b 88f\n"
+ "83:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 88f\n"
+ "st1 { v16.b }[8], [x27]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "b 88f\n"
+ "84:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 86f\n"
+ "str s16, [x27], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "tbz x9, #1, 85f\n"
+ "st1 { v16.h }[2], [x27], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "tbz x9, #0, 88f\n"
+ "st1 { v16.b }[6], [x27]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "b 88f\n"
+ "85:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 88f\n"
+ "st1 { v16.b }[4], [x27]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "b 88f\n"
+ "86:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 87f\n"
+ "str h16, [x27], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "tbz x9, #0, 88f\n"
+ "st1 { v16.b }[2], [x27]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "b 88f\n"
+ "87:" // Height 3: Partial direct writeback: partial_1_0
+ "str b16, [x27, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "88:" // Height 3: Partial direct writeback: Done
+ "b 90f\n"
+ "89:" // Height 3: Full writeback
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "90:" // Height 3: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 62b\n"
+ "b 122f\n"
+ "91:" // Height 4
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x20, #0x4\n"
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x27\n"
+ "movi v15.16b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "92:" // Height 4: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ "93:" // Height 4: setup done
+ "mov x26, #0x0\n"
+ "94:" // Height 4: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 95f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "ldr x21, [x20, #0x18]\n"
+ "cbnz x26, 96f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "add x21, x21, x20\n"
+ "b 96f\n"
+ "95:" // Height 4: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
+ "96:" // Height 4: input setup done
+ "cmp x25, #0x10\n"
+ "blt 101f\n"
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q2, [x22, #0x0]\n"
+ "ldr q3, [x21, #0x0]\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "blt 99f\n"
+ "97:" // Height 4: Multiply loop: Main loop head
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f094 // sudot v20.4s, v4.16b, v1.4b[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f02f098 // sudot v24.4s, v4.16b, v2.4b[0]\n"
+ ".inst 0x4f03f09c // sudot v28.4s, v4.16b, v3.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0b5 // sudot v21.4s, v5.16b, v1.4b[0]\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4f02f0b9 // sudot v25.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0bd // sudot v29.4s, v5.16b, v3.4b[0]\n"
+ "ldr q5, [x28, #0x80]\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d6 // sudot v22.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0da // sudot v26.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0de // sudot v30.4s, v6.16b, v3.4b[0]\n"
+ "ldr q6, [x28, #0x90]\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f7 // sudot v23.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0fb // sudot v27.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0ff // sudot v31.4s, v7.16b, v3.4b[0]\n"
+ "ldr q7, [x28, #0xa0]\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4f21f114 // sudot v20.4s, v8.16b, v1.4b[1]\n"
+ ".inst 0x4f22f118 // sudot v24.4s, v8.16b, v2.4b[1]\n"
+ ".inst 0x4f23f11c // sudot v28.4s, v8.16b, v3.4b[1]\n"
+ "ldr q8, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f21f135 // sudot v21.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4f22f139 // sudot v25.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x4f23f13d // sudot v29.4s, v9.16b, v3.4b[1]\n"
+ "ldr q9, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f21f156 // sudot v22.4s, v10.16b, v1.4b[1]\n"
+ ".inst 0x4f22f15a // sudot v26.4s, v10.16b, v2.4b[1]\n"
+ ".inst 0x4f23f15e // sudot v30.4s, v10.16b, v3.4b[1]\n"
+ "ldr q10, [x28, #0xd0]\n"
+ ".inst 0x4f20f093 // sudot v19.4s, v4.16b, v0.4b[1]\n"
+ ".inst 0x4f21f097 // sudot v23.4s, v4.16b, v1.4b[1]\n"
+ ".inst 0x4f22f09b // sudot v27.4s, v4.16b, v2.4b[1]\n"
+ ".inst 0x4f23f09f // sudot v31.4s, v4.16b, v3.4b[1]\n"
+ "ldr q4, [x28, #0xe0]\n"
+ ".inst 0x4f00f8b0 // sudot v16.4s, v5.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8b4 // sudot v20.4s, v5.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8b8 // sudot v24.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8bc // sudot v28.4s, v5.16b, v3.4b[2]\n"
+ "ldr q5, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f00f8d1 // sudot v17.4s, v6.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8d5 // sudot v21.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8d9 // sudot v25.4s, v6.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8dd // sudot v29.4s, v6.16b, v3.4b[2]\n"
+ ".inst 0x4f00f8f2 // sudot v18.4s, v7.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8f6 // sudot v22.4s, v7.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8fa // sudot v26.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8fe // sudot v30.4s, v7.16b, v3.4b[2]\n"
+ ".inst 0x4f00f913 // sudot v19.4s, v8.16b, v0.4b[2]\n"
+ ".inst 0x4f01f917 // sudot v23.4s, v8.16b, v1.4b[2]\n"
+ ".inst 0x4f02f91b // sudot v27.4s, v8.16b, v2.4b[2]\n"
+ ".inst 0x4f03f91f // sudot v31.4s, v8.16b, v3.4b[2]\n"
+ ".inst 0x4f20f930 // sudot v16.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x4f21f934 // sudot v20.4s, v9.16b, v1.4b[3]\n"
+ ".inst 0x4f22f938 // sudot v24.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x4f23f93c // sudot v28.4s, v9.16b, v3.4b[3]\n"
+ ".inst 0x4f20f951 // sudot v17.4s, v10.16b, v0.4b[3]\n"
+ ".inst 0x4f21f955 // sudot v21.4s, v10.16b, v1.4b[3]\n"
+ ".inst 0x4f22f959 // sudot v25.4s, v10.16b, v2.4b[3]\n"
+ ".inst 0x4f23f95d // sudot v29.4s, v10.16b, v3.4b[3]\n"
+ ".inst 0x4f20f892 // sudot v18.4s, v4.16b, v0.4b[3]\n"
+ ".inst 0x4f21f896 // sudot v22.4s, v4.16b, v1.4b[3]\n"
+ ".inst 0x4f22f89a // sudot v26.4s, v4.16b, v2.4b[3]\n"
+ ".inst 0x4f23f89e // sudot v30.4s, v4.16b, v3.4b[3]\n"
+ ".inst 0x4f20f8b3 // sudot v19.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8b7 // sudot v23.4s, v5.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8bb // sudot v27.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8bf // sudot v31.4s, v5.16b, v3.4b[3]\n"
+ "tbnz %x[flags], #31, 98f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
+ "98:" // Height 4: Multiply loop: unique 13: skip row sum
+ "ldr q0, [x24, #0x0]\n"
+ "ldr q1, [x23, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q2, [x22, #0x0]\n"
+ "ldr q3, [x21, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q4, [x28, #0x0]\n"
+ "ldr q5, [x28, #0x10]\n"
+ "ldr q6, [x28, #0x20]\n"
+ "ldr q7, [x28, #0x30]\n"
+ "ldr q8, [x28, #0x40]\n"
+ "ldr q9, [x28, #0x50]\n"
+ "ldr q10, [x28, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "bge 97b\n"
+ "99:" // Height 4: Multiply loop: Single iteration only
+ ".inst 0x4f00f090 // sudot v16.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f094 // sudot v20.4s, v4.16b, v1.4b[0]\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f02f098 // sudot v24.4s, v4.16b, v2.4b[0]\n"
+ ".inst 0x4f03f09c // sudot v28.4s, v4.16b, v3.4b[0]\n"
+ "ldr q4, [x28, #0x70]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f00f0b1 // sudot v17.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0b5 // sudot v21.4s, v5.16b, v1.4b[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4f02f0b9 // sudot v25.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0bd // sudot v29.4s, v5.16b, v3.4b[0]\n"
+ "ldr q5, [x28, #0x80]\n"
+ ".inst 0x4f00f0d2 // sudot v18.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d6 // sudot v22.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0da // sudot v26.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0de // sudot v30.4s, v6.16b, v3.4b[0]\n"
+ "ldr q6, [x28, #0x90]\n"
+ ".inst 0x4f00f0f3 // sudot v19.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f7 // sudot v23.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0fb // sudot v27.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0ff // sudot v31.4s, v7.16b, v3.4b[0]\n"
+ "ldr q7, [x28, #0xa0]\n"
+ ".inst 0x4f20f110 // sudot v16.4s, v8.16b, v0.4b[1]\n"
+ ".inst 0x4f21f114 // sudot v20.4s, v8.16b, v1.4b[1]\n"
+ ".inst 0x4f22f118 // sudot v24.4s, v8.16b, v2.4b[1]\n"
+ ".inst 0x4f23f11c // sudot v28.4s, v8.16b, v3.4b[1]\n"
+ "ldr q8, [x28, #0xb0]\n"
+ ".inst 0x4f20f131 // sudot v17.4s, v9.16b, v0.4b[1]\n"
+ ".inst 0x4f21f135 // sudot v21.4s, v9.16b, v1.4b[1]\n"
+ ".inst 0x4f22f139 // sudot v25.4s, v9.16b, v2.4b[1]\n"
+ ".inst 0x4f23f13d // sudot v29.4s, v9.16b, v3.4b[1]\n"
+ "ldr q9, [x28, #0xc0]\n"
+ ".inst 0x4f20f152 // sudot v18.4s, v10.16b, v0.4b[1]\n"
+ ".inst 0x4f21f156 // sudot v22.4s, v10.16b, v1.4b[1]\n"
+ ".inst 0x4f22f15a // sudot v26.4s, v10.16b, v2.4b[1]\n"
+ ".inst 0x4f23f15e // sudot v30.4s, v10.16b, v3.4b[1]\n"
+ "ldr q10, [x28, #0xd0]\n"
+ ".inst 0x4f20f093 // sudot v19.4s, v4.16b, v0.4b[1]\n"
+ ".inst 0x4f21f097 // sudot v23.4s, v4.16b, v1.4b[1]\n"
+ ".inst 0x4f22f09b // sudot v27.4s, v4.16b, v2.4b[1]\n"
+ ".inst 0x4f23f09f // sudot v31.4s, v4.16b, v3.4b[1]\n"
+ "ldr q4, [x28, #0xe0]\n"
+ ".inst 0x4f00f8b0 // sudot v16.4s, v5.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8b4 // sudot v20.4s, v5.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8b8 // sudot v24.4s, v5.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8bc // sudot v28.4s, v5.16b, v3.4b[2]\n"
+ "ldr q5, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4f00f8d1 // sudot v17.4s, v6.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8d5 // sudot v21.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8d9 // sudot v25.4s, v6.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8dd // sudot v29.4s, v6.16b, v3.4b[2]\n"
+ ".inst 0x4f00f8f2 // sudot v18.4s, v7.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8f6 // sudot v22.4s, v7.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8fa // sudot v26.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8fe // sudot v30.4s, v7.16b, v3.4b[2]\n"
+ ".inst 0x4f00f913 // sudot v19.4s, v8.16b, v0.4b[2]\n"
+ ".inst 0x4f01f917 // sudot v23.4s, v8.16b, v1.4b[2]\n"
+ ".inst 0x4f02f91b // sudot v27.4s, v8.16b, v2.4b[2]\n"
+ ".inst 0x4f03f91f // sudot v31.4s, v8.16b, v3.4b[2]\n"
+ ".inst 0x4f20f930 // sudot v16.4s, v9.16b, v0.4b[3]\n"
+ ".inst 0x4f21f934 // sudot v20.4s, v9.16b, v1.4b[3]\n"
+ ".inst 0x4f22f938 // sudot v24.4s, v9.16b, v2.4b[3]\n"
+ ".inst 0x4f23f93c // sudot v28.4s, v9.16b, v3.4b[3]\n"
+ ".inst 0x4f20f951 // sudot v17.4s, v10.16b, v0.4b[3]\n"
+ ".inst 0x4f21f955 // sudot v21.4s, v10.16b, v1.4b[3]\n"
+ ".inst 0x4f22f959 // sudot v25.4s, v10.16b, v2.4b[3]\n"
+ ".inst 0x4f23f95d // sudot v29.4s, v10.16b, v3.4b[3]\n"
+ ".inst 0x4f20f892 // sudot v18.4s, v4.16b, v0.4b[3]\n"
+ ".inst 0x4f21f896 // sudot v22.4s, v4.16b, v1.4b[3]\n"
+ ".inst 0x4f22f89a // sudot v26.4s, v4.16b, v2.4b[3]\n"
+ ".inst 0x4f23f89e // sudot v30.4s, v4.16b, v3.4b[3]\n"
+ ".inst 0x4f20f8b3 // sudot v19.4s, v5.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8b7 // sudot v23.4s, v5.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8bb // sudot v27.4s, v5.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8bf // sudot v31.4s, v5.16b, v3.4b[3]\n"
+ "tbnz %x[flags], #31, 100f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
+ "100:" // Height 4: Multiply loop: unique 14: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "101:" // Height 4: Multiply loop: Main loop skip
+ "cbz x25, 108f\n"
+ "cmp x25, #0x4\n"
+ "blt 104f\n"
+ "102:" // Height 4: Multiply loop: Odd block loop
+ "ldr s0, [x24], #0x4\n"
+ "ldr s1, [x23], #0x4\n"
+ "ldr s2, [x22], #0x4\n"
+ "ldr s3, [x21], #0x4\n"
+ "tbnz %x[flags], #31, 103f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
+ "103:" // Height 4: Multiply loop: unique 15: skip row sum
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "sub x25, x25, #0x4\n"
+ "ldr q5, [x28, #0x20]\n"
+ "ldr q4, [x28, #0x30]\n"
+ "cmp x25, #0x4\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f0f0 // sudot v16.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f4 // sudot v20.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f8 // sudot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0fc // sudot v28.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f00f0d1 // sudot v17.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d5 // sudot v21.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0d9 // sudot v25.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0dd // sudot v29.4s, v6.16b, v3.4b[0]\n"
+ ".inst 0x4f00f0b2 // sudot v18.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0b6 // sudot v22.4s, v5.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0ba // sudot v26.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0be // sudot v30.4s, v5.16b, v3.4b[0]\n"
+ ".inst 0x4f00f093 // sudot v19.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f097 // sudot v23.4s, v4.16b, v1.4b[0]\n"
+ ".inst 0x4f02f09b // sudot v27.4s, v4.16b, v2.4b[0]\n"
+ ".inst 0x4f03f09f // sudot v31.4s, v4.16b, v3.4b[0]\n"
+ "bge 102b\n"
+ "104:" // Height 4: Multiply loop: Skip odd blocks
+ "cbz x25, 108f\n"
+ "tbz x25, #1, 105f\n"
+ "ldr h0, [x24], #0x2\n"
+ "ldr h1, [x23], #0x2\n"
+ "ldr h2, [x22], #0x2\n"
+ "ldr h3, [x21], #0x2\n"
+ "tbz x25, #0, 106f\n"
+ "ld1 { v0.b }[2], [x24]\n"
+ "ld1 { v1.b }[2], [x23]\n"
+ "ld1 { v2.b }[2], [x22]\n"
+ "ld1 { v3.b }[2], [x21]\n"
+ "b 106f\n"
+ "105:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x24, #0x0]\n"
+ "ldr b1, [x23, #0x0]\n"
+ "ldr b2, [x22, #0x0]\n"
+ "ldr b3, [x21, #0x0]\n"
+ "106:" // Height 4: Multiply loop: Ragged operand read: Done
+ "tbnz %x[flags], #31, 107f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942c // udot v12.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f946e // udot v14.4s, v3.16b, v15.16b\n"
+ "107:" // Height 4: Multiply loop: unique 16: skip row sum
+ "ldr q7, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q5, [x28, #0x20]\n"
+ "ldr q4, [x28, #0x30]\n"
+ "add x28, x28, #0x40\n"
+ ".inst 0x4f00f0f0 // sudot v16.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0f4 // sudot v20.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f8 // sudot v24.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0fc // sudot v28.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f00f0d1 // sudot v17.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0d5 // sudot v21.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0d9 // sudot v25.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0dd // sudot v29.4s, v6.16b, v3.4b[0]\n"
+ ".inst 0x4f00f0b2 // sudot v18.4s, v5.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0b6 // sudot v22.4s, v5.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0ba // sudot v26.4s, v5.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0be // sudot v30.4s, v5.16b, v3.4b[0]\n"
+ ".inst 0x4f00f093 // sudot v19.4s, v4.16b, v0.4b[0]\n"
+ ".inst 0x4f01f097 // sudot v23.4s, v4.16b, v1.4b[0]\n"
+ ".inst 0x4f02f09b // sudot v27.4s, v4.16b, v2.4b[0]\n"
+ ".inst 0x4f03f09f // sudot v31.4s, v4.16b, v3.4b[0]\n"
+ "108:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 94b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "add x24, x27, x20\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "tbnz %x[flags], #31, 109f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v12.4s, v12.4s, v12.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "addp v13.4s, v13.4s, v13.4s\n"
+ "addp v14.4s, v14.4s, v14.4s\n"
+ "neg v0.4s, v0.4s\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v12.4s, v12.4s, v12.4s\n"
+ "addp v13.4s, v13.4s, v13.4s\n"
+ "addp v14.4s, v14.4s, v14.4s\n"
+ "mul v11.4s, v11.4s, v0.4s\n"
+ "mul v12.4s, v12.4s, v0.4s\n"
+ "mul v13.4s, v13.4s, v0.4s\n"
+ "mul v14.4s, v14.4s, v0.4s\n"
+ "109:" // Height 4: skip row sum fixup
+ "ldr q0, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "add v16.4s, v16.4s, v11.4s\n"
+ "add v17.4s, v17.4s, v11.4s\n"
+ "ldr q3, [x10, #0x20]\n"
+ "ldr q2, [x10, #0x30]\n"
+ "add v18.4s, v18.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v12.4s\n"
+ "add v21.4s, v21.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "add v22.4s, v22.4s, v12.4s\n"
+ "add v23.4s, v23.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add v24.4s, v24.4s, v13.4s\n"
+ "add v25.4s, v25.4s, v13.4s\n"
+ "add x10, x10, #0x40\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "add v27.4s, v27.4s, v13.4s\n"
+ "add v28.4s, v28.4s, v14.4s\n"
+ "add v29.4s, v29.4s, v14.4s\n"
+ "add v30.4s, v30.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v14.4s\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "add v18.4s, v18.4s, v3.4s\n"
+ "add v19.4s, v19.4s, v2.4s\n"
+ "add v20.4s, v20.4s, v0.4s\n"
+ "add v21.4s, v21.4s, v4.4s\n"
+ "add v22.4s, v22.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v2.4s\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v2.4s\n"
+ "add v28.4s, v28.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v29.4s, v29.4s, v4.4s\n"
+ "add v30.4s, v30.4s, v3.4s\n"
+ "add v31.4s, v31.4s, v2.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v1.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v1.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v1.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v1.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v1.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v1.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v1.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v1.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v1.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v1.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v1.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v1.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v1.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v1.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v1.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v1.4s\n"
+ "tbz %x[flags], #5, 110f\n"
+ "and v2.16b, v16.16b, v0.16b\n"
+ "and v1.16b, v17.16b, v0.16b\n"
+ "and v7.16b, v18.16b, v0.16b\n"
+ "and v6.16b, v19.16b, v0.16b\n"
+ "and v5.16b, v20.16b, v0.16b\n"
+ "and v4.16b, v21.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "and v3.16b, v22.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v2.4s\n"
+ "sqadd v17.4s, v17.4s, v1.4s\n"
+ "and v2.16b, v23.16b, v0.16b\n"
+ "and v1.16b, v24.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v18.4s, v18.4s, v7.4s\n"
+ "sqadd v19.4s, v19.4s, v6.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v5.4s\n"
+ "sqadd v21.4s, v21.4s, v4.4s\n"
+ "sqadd v22.4s, v22.4s, v3.4s\n"
+ "and v7.16b, v25.16b, v0.16b\n"
+ "sqadd v23.4s, v23.4s, v2.4s\n"
+ "sqadd v24.4s, v24.4s, v1.4s\n"
+ "and v6.16b, v26.16b, v0.16b\n"
+ "and v5.16b, v27.16b, v0.16b\n"
+ "and v4.16b, v28.16b, v0.16b\n"
+ "and v3.16b, v29.16b, v0.16b\n"
+ "and v2.16b, v30.16b, v0.16b\n"
+ "and v1.16b, v31.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v25.4s, v25.4s, v7.4s\n"
+ "sqadd v26.4s, v26.4s, v6.4s\n"
+ "sqadd v27.4s, v27.4s, v5.4s\n"
+ "sqadd v28.4s, v28.4s, v4.4s\n"
+ "sqadd v29.4s, v29.4s, v3.4s\n"
+ "sqadd v30.4s, v30.4s, v2.4s\n"
+ "sqadd v31.4s, v31.4s, v1.4s\n"
+ "110:" // Height 4: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v3.4s }, [x21]\n"
+ "ld1r { v2.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "srshl v28.4s, v28.4s, v0.4s\n"
+ "srshl v29.4s, v29.4s, v0.4s\n"
+ "srshl v30.4s, v30.4s, v0.4s\n"
+ "srshl v31.4s, v31.4s, v0.4s\n"
+ "add v16.4s, v16.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v3.4s\n"
+ "add v18.4s, v18.4s, v3.4s\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v20.4s, v20.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v3.4s\n"
+ "add v22.4s, v22.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "add v24.4s, v24.4s, v3.4s\n"
+ "add v25.4s, v25.4s, v3.4s\n"
+ "add v26.4s, v26.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v3.4s\n"
+ "add v28.4s, v28.4s, v3.4s\n"
+ "add v29.4s, v29.4s, v3.4s\n"
+ "add v30.4s, v30.4s, v3.4s\n"
+ "add v31.4s, v31.4s, v3.4s\n"
+ "smin v16.4s, v16.4s, v2.4s\n"
+ "smin v17.4s, v17.4s, v2.4s\n"
+ "smin v18.4s, v18.4s, v2.4s\n"
+ "smin v19.4s, v19.4s, v2.4s\n"
+ "smin v20.4s, v20.4s, v2.4s\n"
+ "smin v21.4s, v21.4s, v2.4s\n"
+ "smin v22.4s, v22.4s, v2.4s\n"
+ "smin v23.4s, v23.4s, v2.4s\n"
+ "smin v24.4s, v24.4s, v2.4s\n"
+ "smin v25.4s, v25.4s, v2.4s\n"
+ "smin v26.4s, v26.4s, v2.4s\n"
+ "smin v27.4s, v27.4s, v2.4s\n"
+ "smin v28.4s, v28.4s, v2.4s\n"
+ "smin v29.4s, v29.4s, v2.4s\n"
+ "smin v30.4s, v30.4s, v2.4s\n"
+ "smin v31.4s, v31.4s, v2.4s\n"
+ "smax v16.4s, v16.4s, v1.4s\n"
+ "smax v17.4s, v17.4s, v1.4s\n"
+ "smax v18.4s, v18.4s, v1.4s\n"
+ "smax v19.4s, v19.4s, v1.4s\n"
+ "smax v20.4s, v20.4s, v1.4s\n"
+ "smax v21.4s, v21.4s, v1.4s\n"
+ "smax v22.4s, v22.4s, v1.4s\n"
+ "smax v23.4s, v23.4s, v1.4s\n"
+ "smax v24.4s, v24.4s, v1.4s\n"
+ "smax v25.4s, v25.4s, v1.4s\n"
+ "smax v26.4s, v26.4s, v1.4s\n"
+ "smax v27.4s, v27.4s, v1.4s\n"
+ "smax v28.4s, v28.4s, v1.4s\n"
+ "smax v29.4s, v29.4s, v1.4s\n"
+ "smax v30.4s, v30.4s, v1.4s\n"
+ "smax v31.4s, v31.4s, v1.4s\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v0.8h, v18.8h, v19.8h\n"
+ "uzp1 v20.8h, v20.8h, v21.8h\n"
+ "uzp1 v19.8h, v22.8h, v23.8h\n"
+ "uzp1 v24.8h, v24.8h, v25.8h\n"
+ "uzp1 v18.8h, v26.8h, v27.8h\n"
+ "uzp1 v28.8h, v28.8h, v29.8h\n"
+ "uzp1 v17.8h, v30.8h, v31.8h\n"
+ "uzp1 v16.16b, v16.16b, v0.16b\n"
+ "uzp1 v20.16b, v20.16b, v19.16b\n"
+ "uzp1 v24.16b, v24.16b, v18.16b\n"
+ "uzp1 v28.16b, v28.16b, v17.16b\n"
+ "bge 119f\n"
+ "tbz x9, #3, 114f\n"
+ "str d16, [x27], #0x8\n"
+ "str d20, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "str d28, [x22], #0x8\n"
+ "tbz x9, #2, 112f\n"
+ "st1 { v16.s }[2], [x27], #0x4\n"
+ "st1 { v20.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "st1 { v28.s }[2], [x22], #0x4\n"
+ "tbz x9, #1, 111f\n"
+ "st1 { v16.h }[6], [x27], #0x2\n"
+ "st1 { v20.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "st1 { v28.h }[6], [x22], #0x2\n"
+ "tbz x9, #0, 118f\n"
+ "st1 { v16.b }[14], [x27]\n"
+ "st1 { v20.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "st1 { v28.b }[14], [x22]\n"
+ "b 118f\n"
+ "111:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 118f\n"
+ "st1 { v16.b }[12], [x27]\n"
+ "st1 { v20.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "st1 { v28.b }[12], [x22]\n"
+ "b 118f\n"
+ "112:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 113f\n"
+ "st1 { v16.h }[4], [x27], #0x2\n"
+ "st1 { v20.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "st1 { v28.h }[4], [x22], #0x2\n"
+ "tbz x9, #0, 118f\n"
+ "st1 { v16.b }[10], [x27]\n"
+ "st1 { v20.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "st1 { v28.b }[10], [x22]\n"
+ "b 118f\n"
+ "113:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 118f\n"
+ "st1 { v16.b }[8], [x27]\n"
+ "st1 { v20.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "st1 { v28.b }[8], [x22]\n"
+ "b 118f\n"
+ "114:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 116f\n"
+ "str s16, [x27], #0x4\n"
+ "str s20, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "str s28, [x22], #0x4\n"
+ "tbz x9, #1, 115f\n"
+ "st1 { v16.h }[2], [x27], #0x2\n"
+ "st1 { v20.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "st1 { v28.h }[2], [x22], #0x2\n"
+ "tbz x9, #0, 118f\n"
+ "st1 { v16.b }[6], [x27]\n"
+ "st1 { v20.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "st1 { v28.b }[6], [x22]\n"
+ "b 118f\n"
+ "115:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 118f\n"
+ "st1 { v16.b }[4], [x27]\n"
+ "st1 { v20.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "st1 { v28.b }[4], [x22]\n"
+ "b 118f\n"
+ "116:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 117f\n"
+ "str h16, [x27], #0x2\n"
+ "str h20, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "str h28, [x22], #0x2\n"
+ "tbz x9, #0, 118f\n"
+ "st1 { v16.b }[2], [x27]\n"
+ "st1 { v20.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "st1 { v28.b }[2], [x22]\n"
+ "b 118f\n"
+ "117:" // Height 4: Partial direct writeback: partial_1_0
+ "str b16, [x27, #0x0]\n"
+ "str b20, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "str b28, [x22, #0x0]\n"
+ "118:" // Height 4: Partial direct writeback: Done
+ "b 120f\n"
+ "119:" // Height 4: Full writeback
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "str q20, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "str q28, [x22, #0x0]\n"
+ "120:" // Height 4: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 92b\n"
+ "subs %x[M], %x[M], #0x4\n"
+ "beq 122f\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 121f\n"
+ "add x21, x21, #0x4\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "121:" // Update direct input
+ "mov x20, #0x4\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "b 1b\n"
+ "122:" // Exit
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16.hpp
new file mode 100644
index 0000000000..ee1297fc7b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16.hpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<uint8_t>, \
+ size_t, size_t, \
+ const int8_t *, \
+ IndirectOutputArg<uint8_t>, \
+ const Requantize32 *, const int32_t *, unsigned int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_hybrid_u8s8qa_mmla_4x16( ARGLIST );
+
+class cls_a64_hybrid_u8s8qa_mmla_4x16
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef uint8_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 4;
+ }
+
+ static unsigned int out_width()
+ {
+ return 16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 8;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 4, 16, 8> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 47.74 };
+ case CPUModel::A510:
+ return { 27.99 };
+ case CPUModel::V1:
+ return { 62.26 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_hybrid_u8s8qa_mmla_4x16;
+ cls_a64_hybrid_u8s8qa_mmla_4x16(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16/generic.cpp
new file mode 100644
index 0000000000..00b9db05c0
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8qa_mmla_4x16/generic.cpp
@@ -0,0 +1,2099 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void a64_hybrid_u8s8qa_mmla_4x16 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<uint8_t> A_arg,
+ size_t M, size_t N, const int8_t *B_ptr, IndirectOutputArg<uint8_t> output_arg,
+ const Requantize32 *qp, const int32_t *col_bias, unsigned int
+)
+{
+ struct KernelArgs {
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const int8_t *B_ptr = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ void *output_ptr = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ ka.output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ if (qp->c_offset > qp->minval) {
+ flags |= 0x20;
+ }
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x4\n"
+ "bge 97f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 65f\n"
+ "beq 33f\n"
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v15.16b, #0x1\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "2:" // Height 1: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "3:" // Height 1: setup done
+ "mov x26, #0x0\n"
+ "4:" // Height 1: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 5f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "cbnz x26, 6f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "b 6f\n"
+ "5:" // Height 1: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "6:" // Height 1: input setup done
+ "cmp x25, #0x10\n"
+ "blt 11f\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q5, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
+ "blt 9f\n"
+ "7:" // Height 1: Multiply loop: Main loop head
+ "add x24, x24, #0x10\n"
+ "trn1 v0.2d, v1.2d, v27.2d\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q25, [x28, #0x70]\n"
+ "trn2 v1.2d, v1.2d, v27.2d\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ "ldr q24, [x28, #0x80]\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ "ldr q30, [x28, #0x90]\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q29, [x28, #0xa0]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ "ldr q28, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ "ldr q27, [x28, #0xc0]\n"
+ ".inst 0x4e84ac13 // usmmla v19.4s, v0.16b, v4.16b\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x4e99ac17 // usmmla v23.4s, v0.16b, v25.16b\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x4e98ac30 // usmmla v16.4s, v1.16b, v24.16b\n"
+ "ldr q24, [x28, #0xf0]\n"
+ ".inst 0x4e9eac34 // usmmla v20.4s, v1.16b, v30.16b\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e9dac31 // usmmla v17.4s, v1.16b, v29.16b\n"
+ ".inst 0x4e9cac35 // usmmla v21.4s, v1.16b, v28.16b\n"
+ ".inst 0x4e9bac32 // usmmla v18.4s, v1.16b, v27.16b\n"
+ ".inst 0x4e9aac36 // usmmla v22.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e99ac33 // usmmla v19.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e98ac37 // usmmla v23.4s, v1.16b, v24.16b\n"
+ "tbnz %x[flags], #31, 8f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ "8:" // Height 1: Multiply loop: unique 1: skip row sum
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q5, [x28, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "cmp x25, #0x20\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "bge 7b\n"
+ "9:" // Height 1: Multiply loop: Single iteration only
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "trn1 v0.2d, v1.2d, v24.2d\n"
+ "trn2 v1.2d, v1.2d, v24.2d\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q25, [x28, #0x70]\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ "ldr q24, [x28, #0x80]\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ "ldr q30, [x28, #0x90]\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q29, [x28, #0xa0]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ "ldr q28, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ "ldr q27, [x28, #0xc0]\n"
+ ".inst 0x4e84ac13 // usmmla v19.4s, v0.16b, v4.16b\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x4e99ac17 // usmmla v23.4s, v0.16b, v25.16b\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x4e98ac30 // usmmla v16.4s, v1.16b, v24.16b\n"
+ "ldr q24, [x28, #0xf0]\n"
+ ".inst 0x4e9eac34 // usmmla v20.4s, v1.16b, v30.16b\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e9dac31 // usmmla v17.4s, v1.16b, v29.16b\n"
+ ".inst 0x4e9cac35 // usmmla v21.4s, v1.16b, v28.16b\n"
+ ".inst 0x4e9bac32 // usmmla v18.4s, v1.16b, v27.16b\n"
+ ".inst 0x4e9aac36 // usmmla v22.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e99ac33 // usmmla v19.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e98ac37 // usmmla v23.4s, v1.16b, v24.16b\n"
+ "tbnz %x[flags], #31, 10f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ "10:" // Height 1: Multiply loop: unique 2: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "11:" // Height 1: Multiply loop: Main loop skip
+ "cbz x25, 20f\n"
+ "cmp x25, #0x8\n"
+ "blt 14f\n"
+ "12:" // Height 1: Multiply loop: Odd block loop
+ "ldr d25, [x24], #0x8\n"
+ "trn1 v0.2d, v25.2d, v24.2d\n"
+ "tbnz %x[flags], #31, 13f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "13:" // Height 1: Multiply loop: unique 3: skip row sum
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "cmp x25, #0x8\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
+ "ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98ac10 // usmmla v16.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9eac14 // usmmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9dac11 // usmmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9cac15 // usmmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e9bac12 // usmmla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x4e9aac16 // usmmla v22.4s, v0.16b, v26.16b\n"
+ ".inst 0x4e99ac13 // usmmla v19.4s, v0.16b, v25.16b\n"
+ ".inst 0x4e98ac17 // usmmla v23.4s, v0.16b, v24.16b\n"
+ "bge 12b\n"
+ "14:" // Height 1: Multiply loop: Skip odd blocks
+ "cbz x25, 20f\n"
+ "tbz x25, #2, 16f\n"
+ "ldr s1, [x24], #0x4\n"
+ "tbz x25, #1, 15f\n"
+ "ld1 { v1.h }[2], [x24], #0x2\n"
+ "tbz x25, #0, 18f\n"
+ "ld1 { v1.b }[6], [x24]\n"
+ "b 18f\n"
+ "15:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x25, #0, 18f\n"
+ "ld1 { v1.b }[4], [x24]\n"
+ "b 18f\n"
+ "16:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x25, #1, 17f\n"
+ "ldr h1, [x24], #0x2\n"
+ "tbz x25, #0, 18f\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "b 18f\n"
+ "17:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x24, #0x0]\n"
+ "18:" // Height 1: Multiply loop: Ragged operand read: Done
+ "trn1 v0.2d, v1.2d, v24.2d\n"
+ "tbnz %x[flags], #31, 19f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "19:" // Height 1: Multiply loop: unique 4: skip row sum
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
+ "ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98ac10 // usmmla v16.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9eac14 // usmmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9dac11 // usmmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9cac15 // usmmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e9bac12 // usmmla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x4e9aac16 // usmmla v22.4s, v0.16b, v26.16b\n"
+ ".inst 0x4e99ac13 // usmmla v19.4s, v0.16b, v25.16b\n"
+ ".inst 0x4e98ac17 // usmmla v23.4s, v0.16b, v24.16b\n"
+ "20:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 4b\n"
+ "uzp1 v16.2d, v16.2d, v20.2d\n"
+ "uzp1 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "uzp1 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v19.2d, v19.2d, v23.2d\n"
+ "mov v23.16b, v16.16b\n"
+ "tbnz %x[flags], #31, 21f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "neg v16.4s, v16.4s\n"
+ "dup v11.4s, v11.s[0]\n"
+ "mul v11.4s, v11.4s, v16.4s\n"
+ "21:" // Height 1: skip row sum fixup
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q22, [x10, #0x10]\n"
+ "add v23.4s, v23.4s, v11.4s\n"
+ "add v17.4s, v17.4s, v11.4s\n"
+ "ldr q21, [x10, #0x20]\n"
+ "ldr q20, [x10, #0x30]\n"
+ "add v18.4s, v18.4s, v11.4s\n"
+ "add v19.4s, v19.4s, v11.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "add v23.4s, v23.4s, v24.4s\n"
+ "add v17.4s, v17.4s, v22.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add x10, x10, #0x40\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v21.4s\n"
+ "add v19.4s, v19.4s, v20.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v16.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v16.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v16.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v16.4s\n"
+ "tbz %x[flags], #5, 22f\n"
+ "and v22.16b, v23.16b, v0.16b\n"
+ "and v21.16b, v17.16b, v0.16b\n"
+ "and v20.16b, v18.16b, v0.16b\n"
+ "and v16.16b, v19.16b, v0.16b\n"
+ "sshr v22.4s, v22.4s, #0x1f\n"
+ "sshr v21.4s, v21.4s, #0x1f\n"
+ "sshr v20.4s, v20.4s, #0x1f\n"
+ "sshr v16.4s, v16.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v22.4s\n"
+ "sqadd v17.4s, v17.4s, v21.4s\n"
+ "sqadd v18.4s, v18.4s, v20.4s\n"
+ "sqadd v19.4s, v19.4s, v16.4s\n"
+ "22:" // Height 1: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v21.4s }, [x21]\n"
+ "ld1r { v20.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v16.4s }, [x20]\n"
+ "add v23.4s, v23.4s, v21.4s\n"
+ "add v17.4s, v17.4s, v21.4s\n"
+ "add v18.4s, v18.4s, v21.4s\n"
+ "add v19.4s, v19.4s, v21.4s\n"
+ "smin v23.4s, v23.4s, v20.4s\n"
+ "smin v17.4s, v17.4s, v20.4s\n"
+ "smin v18.4s, v18.4s, v20.4s\n"
+ "smin v19.4s, v19.4s, v20.4s\n"
+ "smax v23.4s, v23.4s, v16.4s\n"
+ "smax v17.4s, v17.4s, v16.4s\n"
+ "smax v18.4s, v18.4s, v16.4s\n"
+ "smax v19.4s, v19.4s, v16.4s\n"
+ "uzp1 v23.8h, v23.8h, v17.8h\n"
+ "uzp1 v16.8h, v18.8h, v19.8h\n"
+ "uzp1 v23.16b, v23.16b, v16.16b\n"
+ "bge 31f\n"
+ "tbz x9, #3, 26f\n"
+ "str d23, [x27], #0x8\n"
+ "tbz x9, #2, 24f\n"
+ "st1 { v23.s }[2], [x27], #0x4\n"
+ "tbz x9, #1, 23f\n"
+ "st1 { v23.h }[6], [x27], #0x2\n"
+ "tbz x9, #0, 30f\n"
+ "st1 { v23.b }[14], [x27]\n"
+ "b 30f\n"
+ "23:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 30f\n"
+ "st1 { v23.b }[12], [x27]\n"
+ "b 30f\n"
+ "24:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 25f\n"
+ "st1 { v23.h }[4], [x27], #0x2\n"
+ "tbz x9, #0, 30f\n"
+ "st1 { v23.b }[10], [x27]\n"
+ "b 30f\n"
+ "25:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 30f\n"
+ "st1 { v23.b }[8], [x27]\n"
+ "b 30f\n"
+ "26:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 28f\n"
+ "str s23, [x27], #0x4\n"
+ "tbz x9, #1, 27f\n"
+ "st1 { v23.h }[2], [x27], #0x2\n"
+ "tbz x9, #0, 30f\n"
+ "st1 { v23.b }[6], [x27]\n"
+ "b 30f\n"
+ "27:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 30f\n"
+ "st1 { v23.b }[4], [x27]\n"
+ "b 30f\n"
+ "28:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 29f\n"
+ "str h23, [x27], #0x2\n"
+ "tbz x9, #0, 30f\n"
+ "st1 { v23.b }[2], [x27]\n"
+ "b 30f\n"
+ "29:" // Height 1: Partial direct writeback: partial_1_0
+ "str b23, [x27, #0x0]\n"
+ "30:" // Height 1: Partial direct writeback: Done
+ "b 32f\n"
+ "31:" // Height 1: Full writeback
+ "str q23, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "32:" // Height 1: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 2b\n"
+ "b 130f\n"
+ "33:" // Height 2
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "movi v15.16b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "34:" // Height 2: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "35:" // Height 2: setup done
+ "mov x26, #0x0\n"
+ "36:" // Height 2: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 37f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "cbnz x26, 38f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "b 38f\n"
+ "37:" // Height 2: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "38:" // Height 2: input setup done
+ "cmp x25, #0x10\n"
+ "blt 43f\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q5, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
+ "blt 41f\n"
+ "39:" // Height 2: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q25, [x28, #0x70]\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ "ldr q24, [x28, #0x80]\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ "ldr q30, [x28, #0x90]\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q29, [x28, #0xa0]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ "ldr q28, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ "ldr q27, [x28, #0xc0]\n"
+ ".inst 0x4e84ac13 // usmmla v19.4s, v0.16b, v4.16b\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x4e99ac17 // usmmla v23.4s, v0.16b, v25.16b\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x4e98ac30 // usmmla v16.4s, v1.16b, v24.16b\n"
+ "ldr q24, [x28, #0xf0]\n"
+ ".inst 0x4e9eac34 // usmmla v20.4s, v1.16b, v30.16b\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e9dac31 // usmmla v17.4s, v1.16b, v29.16b\n"
+ ".inst 0x4e9cac35 // usmmla v21.4s, v1.16b, v28.16b\n"
+ ".inst 0x4e9bac32 // usmmla v18.4s, v1.16b, v27.16b\n"
+ ".inst 0x4e9aac36 // usmmla v22.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e99ac33 // usmmla v19.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e98ac37 // usmmla v23.4s, v1.16b, v24.16b\n"
+ "tbnz %x[flags], #31, 40f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ "40:" // Height 2: Multiply loop: unique 5: skip row sum
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q5, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "cmp x25, #0x20\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "ldr q4, [x28, #0x60]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "bge 39b\n"
+ "41:" // Height 2: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ "ldr q25, [x28, #0x70]\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ "ldr q24, [x28, #0x80]\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ "ldr q30, [x28, #0x90]\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ "ldr q29, [x28, #0xa0]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ "ldr q28, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ "ldr q27, [x28, #0xc0]\n"
+ ".inst 0x4e84ac13 // usmmla v19.4s, v0.16b, v4.16b\n"
+ "ldr q26, [x28, #0xd0]\n"
+ ".inst 0x4e99ac17 // usmmla v23.4s, v0.16b, v25.16b\n"
+ "ldr q25, [x28, #0xe0]\n"
+ ".inst 0x4e98ac30 // usmmla v16.4s, v1.16b, v24.16b\n"
+ "ldr q24, [x28, #0xf0]\n"
+ ".inst 0x4e9eac34 // usmmla v20.4s, v1.16b, v30.16b\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e9dac31 // usmmla v17.4s, v1.16b, v29.16b\n"
+ ".inst 0x4e9cac35 // usmmla v21.4s, v1.16b, v28.16b\n"
+ ".inst 0x4e9bac32 // usmmla v18.4s, v1.16b, v27.16b\n"
+ ".inst 0x4e9aac36 // usmmla v22.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e99ac33 // usmmla v19.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e98ac37 // usmmla v23.4s, v1.16b, v24.16b\n"
+ "tbnz %x[flags], #31, 42f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ "42:" // Height 2: Multiply loop: unique 6: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "43:" // Height 2: Multiply loop: Main loop skip
+ "cbz x25, 52f\n"
+ "cmp x25, #0x8\n"
+ "blt 46f\n"
+ "44:" // Height 2: Multiply loop: Odd block loop
+ "ldr d25, [x24], #0x8\n"
+ "ldr d24, [x23], #0x8\n"
+ "trn1 v0.2d, v25.2d, v24.2d\n"
+ "tbnz %x[flags], #31, 45f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "45:" // Height 2: Multiply loop: unique 7: skip row sum
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "cmp x25, #0x8\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
+ "ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98ac10 // usmmla v16.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9eac14 // usmmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9dac11 // usmmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9cac15 // usmmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e9bac12 // usmmla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x4e9aac16 // usmmla v22.4s, v0.16b, v26.16b\n"
+ ".inst 0x4e99ac13 // usmmla v19.4s, v0.16b, v25.16b\n"
+ ".inst 0x4e98ac17 // usmmla v23.4s, v0.16b, v24.16b\n"
+ "bge 44b\n"
+ "46:" // Height 2: Multiply loop: Skip odd blocks
+ "cbz x25, 52f\n"
+ "tbz x25, #2, 48f\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "tbz x25, #1, 47f\n"
+ "ld1 { v1.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "tbz x25, #0, 50f\n"
+ "ld1 { v1.b }[6], [x24]\n"
+ "ld1 { v2.b }[6], [x23]\n"
+ "b 50f\n"
+ "47:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x25, #0, 50f\n"
+ "ld1 { v1.b }[4], [x24]\n"
+ "ld1 { v2.b }[4], [x23]\n"
+ "b 50f\n"
+ "48:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x25, #1, 49f\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "tbz x25, #0, 50f\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "b 50f\n"
+ "49:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "50:" // Height 2: Multiply loop: Ragged operand read: Done
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "tbnz %x[flags], #31, 51f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ "51:" // Height 2: Multiply loop: unique 8: skip row sum
+ "ldr q24, [x28, #0x0]\n"
+ "ldr q30, [x28, #0x10]\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q28, [x28, #0x30]\n"
+ "ldr q27, [x28, #0x40]\n"
+ "ldr q26, [x28, #0x50]\n"
+ "ldr q25, [x28, #0x60]\n"
+ ".inst 0x4e98ac10 // usmmla v16.4s, v0.16b, v24.16b\n"
+ "ldr q24, [x28, #0x70]\n"
+ ".inst 0x4e9eac14 // usmmla v20.4s, v0.16b, v30.16b\n"
+ ".inst 0x4e9dac11 // usmmla v17.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e9cac15 // usmmla v21.4s, v0.16b, v28.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e9bac12 // usmmla v18.4s, v0.16b, v27.16b\n"
+ ".inst 0x4e9aac16 // usmmla v22.4s, v0.16b, v26.16b\n"
+ ".inst 0x4e99ac13 // usmmla v19.4s, v0.16b, v25.16b\n"
+ ".inst 0x4e98ac17 // usmmla v23.4s, v0.16b, v24.16b\n"
+ "52:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 36b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 v24.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "mov v23.16b, v24.16b\n"
+ "tbnz %x[flags], #31, 53f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "ld1r { v24.4s }, [x20]\n"
+ "neg v24.4s, v24.4s\n"
+ "dup v12.4s, v11.s[3]\n"
+ "dup v11.4s, v11.s[0]\n"
+ "mul v11.4s, v11.4s, v24.4s\n"
+ "mul v12.4s, v12.4s, v24.4s\n"
+ "53:" // Height 2: skip row sum fixup
+ "ldr q28, [x10, #0x0]\n"
+ "ldr q27, [x10, #0x10]\n"
+ "add v23.4s, v23.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v11.4s\n"
+ "ldr q26, [x10, #0x20]\n"
+ "ldr q25, [x10, #0x30]\n"
+ "add v21.4s, v21.4s, v11.4s\n"
+ "add v22.4s, v22.4s, v11.4s\n"
+ "add v16.4s, v16.4s, v12.4s\n"
+ "add v17.4s, v17.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v24.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v12.4s\n"
+ "add v19.4s, v19.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add v23.4s, v23.4s, v28.4s\n"
+ "add v20.4s, v20.4s, v27.4s\n"
+ "add x10, x10, #0x40\n"
+ "add v21.4s, v21.4s, v26.4s\n"
+ "add v22.4s, v22.4s, v25.4s\n"
+ "add v16.4s, v16.4s, v28.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v17.4s, v17.4s, v27.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v25.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v24.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v24.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v24.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v24.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v24.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v24.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v24.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v24.4s\n"
+ "tbz %x[flags], #5, 54f\n"
+ "and v24.16b, v23.16b, v0.16b\n"
+ "and v30.16b, v20.16b, v0.16b\n"
+ "and v29.16b, v21.16b, v0.16b\n"
+ "and v28.16b, v22.16b, v0.16b\n"
+ "and v27.16b, v16.16b, v0.16b\n"
+ "and v26.16b, v17.16b, v0.16b\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "and v25.16b, v18.16b, v0.16b\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sshr v27.4s, v27.4s, #0x1f\n"
+ "sqadd v23.4s, v23.4s, v24.4s\n"
+ "and v24.16b, v19.16b, v0.16b\n"
+ "sshr v26.4s, v26.4s, #0x1f\n"
+ "sshr v25.4s, v25.4s, #0x1f\n"
+ "sqadd v20.4s, v20.4s, v30.4s\n"
+ "sqadd v21.4s, v21.4s, v29.4s\n"
+ "sshr v24.4s, v24.4s, #0x1f\n"
+ "sqadd v22.4s, v22.4s, v28.4s\n"
+ "sqadd v16.4s, v16.4s, v27.4s\n"
+ "sqadd v17.4s, v17.4s, v26.4s\n"
+ "sqadd v18.4s, v18.4s, v25.4s\n"
+ "sqadd v19.4s, v19.4s, v24.4s\n"
+ "54:" // Height 2: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v26.4s }, [x21]\n"
+ "ld1r { v25.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v24.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "add v23.4s, v23.4s, v26.4s\n"
+ "add v20.4s, v20.4s, v26.4s\n"
+ "add v21.4s, v21.4s, v26.4s\n"
+ "add v22.4s, v22.4s, v26.4s\n"
+ "add v16.4s, v16.4s, v26.4s\n"
+ "add v17.4s, v17.4s, v26.4s\n"
+ "add v18.4s, v18.4s, v26.4s\n"
+ "add v19.4s, v19.4s, v26.4s\n"
+ "smin v23.4s, v23.4s, v25.4s\n"
+ "smin v20.4s, v20.4s, v25.4s\n"
+ "smin v21.4s, v21.4s, v25.4s\n"
+ "smin v22.4s, v22.4s, v25.4s\n"
+ "smin v16.4s, v16.4s, v25.4s\n"
+ "smin v17.4s, v17.4s, v25.4s\n"
+ "smin v18.4s, v18.4s, v25.4s\n"
+ "smin v19.4s, v19.4s, v25.4s\n"
+ "smax v23.4s, v23.4s, v24.4s\n"
+ "smax v20.4s, v20.4s, v24.4s\n"
+ "smax v21.4s, v21.4s, v24.4s\n"
+ "smax v22.4s, v22.4s, v24.4s\n"
+ "smax v16.4s, v16.4s, v24.4s\n"
+ "smax v17.4s, v17.4s, v24.4s\n"
+ "smax v18.4s, v18.4s, v24.4s\n"
+ "smax v19.4s, v19.4s, v24.4s\n"
+ "uzp1 v23.8h, v23.8h, v20.8h\n"
+ "uzp1 v20.8h, v21.8h, v22.8h\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v17.8h, v18.8h, v19.8h\n"
+ "uzp1 v23.16b, v23.16b, v20.16b\n"
+ "uzp1 v16.16b, v16.16b, v17.16b\n"
+ "bge 63f\n"
+ "tbz x9, #3, 58f\n"
+ "str d23, [x27], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "tbz x9, #2, 56f\n"
+ "st1 { v23.s }[2], [x27], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "tbz x9, #1, 55f\n"
+ "st1 { v23.h }[6], [x27], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "tbz x9, #0, 62f\n"
+ "st1 { v23.b }[14], [x27]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "b 62f\n"
+ "55:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 62f\n"
+ "st1 { v23.b }[12], [x27]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "b 62f\n"
+ "56:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 57f\n"
+ "st1 { v23.h }[4], [x27], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "tbz x9, #0, 62f\n"
+ "st1 { v23.b }[10], [x27]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "b 62f\n"
+ "57:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 62f\n"
+ "st1 { v23.b }[8], [x27]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "b 62f\n"
+ "58:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 60f\n"
+ "str s23, [x27], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "tbz x9, #1, 59f\n"
+ "st1 { v23.h }[2], [x27], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "tbz x9, #0, 62f\n"
+ "st1 { v23.b }[6], [x27]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "b 62f\n"
+ "59:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 62f\n"
+ "st1 { v23.b }[4], [x27]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "b 62f\n"
+ "60:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 61f\n"
+ "str h23, [x27], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "tbz x9, #0, 62f\n"
+ "st1 { v23.b }[2], [x27]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "b 62f\n"
+ "61:" // Height 2: Partial direct writeback: partial_1_0
+ "str b23, [x27, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "62:" // Height 2: Partial direct writeback: Done
+ "b 64f\n"
+ "63:" // Height 2: Full writeback
+ "str q23, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "str q16, [x24, #0x0]\n"
+ "64:" // Height 2: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 34b\n"
+ "b 130f\n"
+ "65:" // Height 3
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "movi v13.4s, #0x0\n"
+ "movi v15.16b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "66:" // Height 3: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ "67:" // Height 3: setup done
+ "mov x26, #0x0\n"
+ "68:" // Height 3: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 69f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "cbnz x26, 70f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "b 70f\n"
+ "69:" // Height 3: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "70:" // Height 3: input setup done
+ "cmp x25, #0x10\n"
+ "blt 75f\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q5, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "blt 73f\n"
+ "71:" // Height 3: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q14, [x28, #0x60]\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e85ac58 // usmmla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
+ ".inst 0x4e86ac5c // usmmla v28.4s, v2.16b, v6.16b\n"
+ "ldr q4, [x28, #0x80]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ "ldr q7, [x28, #0x90]\n"
+ ".inst 0x4e88ac5d // usmmla v29.4s, v2.16b, v8.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
+ ".inst 0x4e89ac5a // usmmla v26.4s, v2.16b, v9.16b\n"
+ "ldr q9, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ ".inst 0x4e8aac5e // usmmla v30.4s, v2.16b, v10.16b\n"
+ "ldr q10, [x28, #0xc0]\n"
+ ".inst 0x4e8eac13 // usmmla v19.4s, v0.16b, v14.16b\n"
+ ".inst 0x4e8eac5b // usmmla v27.4s, v2.16b, v14.16b\n"
+ "ldr q6, [x28, #0xd0]\n"
+ ".inst 0x4e85ac17 // usmmla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5f // usmmla v31.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0xe0]\n"
+ ".inst 0x4e84ac30 // usmmla v16.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84ac78 // usmmla v24.4s, v3.16b, v4.16b\n"
+ "ldr q4, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e87ac34 // usmmla v20.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac7c // usmmla v28.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e88ac31 // usmmla v17.4s, v1.16b, v8.16b\n"
+ ".inst 0x4e88ac79 // usmmla v25.4s, v3.16b, v8.16b\n"
+ ".inst 0x4e89ac35 // usmmla v21.4s, v1.16b, v9.16b\n"
+ ".inst 0x4e89ac7d // usmmla v29.4s, v3.16b, v9.16b\n"
+ ".inst 0x4e8aac32 // usmmla v18.4s, v1.16b, v10.16b\n"
+ ".inst 0x4e8aac7a // usmmla v26.4s, v3.16b, v10.16b\n"
+ ".inst 0x4e86ac36 // usmmla v22.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac7e // usmmla v30.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e85ac33 // usmmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e85ac7b // usmmla v27.4s, v3.16b, v5.16b\n"
+ ".inst 0x4e84ac37 // usmmla v23.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84ac7f // usmmla v31.4s, v3.16b, v4.16b\n"
+ "tbnz %x[flags], #31, 72f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
+ "72:" // Height 3: Multiply loop: unique 9: skip row sum
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q5, [x28, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "bge 71b\n"
+ "73:" // Height 3: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q14, [x28, #0x60]\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e85ac58 // usmmla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
+ ".inst 0x4e86ac5c // usmmla v28.4s, v2.16b, v6.16b\n"
+ "ldr q4, [x28, #0x80]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ "ldr q7, [x28, #0x90]\n"
+ ".inst 0x4e88ac5d // usmmla v29.4s, v2.16b, v8.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
+ ".inst 0x4e89ac5a // usmmla v26.4s, v2.16b, v9.16b\n"
+ "ldr q9, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ ".inst 0x4e8aac5e // usmmla v30.4s, v2.16b, v10.16b\n"
+ "ldr q10, [x28, #0xc0]\n"
+ ".inst 0x4e8eac13 // usmmla v19.4s, v0.16b, v14.16b\n"
+ ".inst 0x4e8eac5b // usmmla v27.4s, v2.16b, v14.16b\n"
+ "ldr q6, [x28, #0xd0]\n"
+ ".inst 0x4e85ac17 // usmmla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5f // usmmla v31.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0xe0]\n"
+ ".inst 0x4e84ac30 // usmmla v16.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84ac78 // usmmla v24.4s, v3.16b, v4.16b\n"
+ "ldr q4, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e87ac34 // usmmla v20.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac7c // usmmla v28.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e88ac31 // usmmla v17.4s, v1.16b, v8.16b\n"
+ ".inst 0x4e88ac79 // usmmla v25.4s, v3.16b, v8.16b\n"
+ ".inst 0x4e89ac35 // usmmla v21.4s, v1.16b, v9.16b\n"
+ ".inst 0x4e89ac7d // usmmla v29.4s, v3.16b, v9.16b\n"
+ ".inst 0x4e8aac32 // usmmla v18.4s, v1.16b, v10.16b\n"
+ ".inst 0x4e8aac7a // usmmla v26.4s, v3.16b, v10.16b\n"
+ ".inst 0x4e86ac36 // usmmla v22.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac7e // usmmla v30.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e85ac33 // usmmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e85ac7b // usmmla v27.4s, v3.16b, v5.16b\n"
+ ".inst 0x4e84ac37 // usmmla v23.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84ac7f // usmmla v31.4s, v3.16b, v4.16b\n"
+ "tbnz %x[flags], #31, 74f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
+ "74:" // Height 3: Multiply loop: unique 10: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "75:" // Height 3: Multiply loop: Main loop skip
+ "cbz x25, 84f\n"
+ "cmp x25, #0x8\n"
+ "blt 78f\n"
+ "76:" // Height 3: Multiply loop: Odd block loop
+ "ldr d3, [x24], #0x8\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "trn1 v0.2d, v3.2d, v0.2d\n"
+ "trn1 v2.2d, v1.2d, v2.2d\n"
+ "tbnz %x[flags], #31, 77f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "77:" // Height 3: Multiply loop: unique 11: skip row sum
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "cmp x25, #0x8\n"
+ "ldr q5, [x28, #0x40]\n"
+ "ldr q4, [x28, #0x50]\n"
+ "ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81ac10 // usmmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac58 // usmmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88ac14 // usmmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88ac5c // usmmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e86ac15 // usmmla v21.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac5d // usmmla v29.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e85ac12 // usmmla v18.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5a // usmmla v26.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84ac16 // usmmla v22.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84ac5e // usmmla v30.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e83ac13 // usmmla v19.4s, v0.16b, v3.16b\n"
+ ".inst 0x4e83ac5b // usmmla v27.4s, v2.16b, v3.16b\n"
+ ".inst 0x4e81ac17 // usmmla v23.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac5f // usmmla v31.4s, v2.16b, v1.16b\n"
+ "bge 76b\n"
+ "78:" // Height 3: Multiply loop: Skip odd blocks
+ "cbz x25, 84f\n"
+ "tbz x25, #2, 80f\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "tbz x25, #1, 79f\n"
+ "ld1 { v1.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v3.h }[2], [x22], #0x2\n"
+ "tbz x25, #0, 82f\n"
+ "ld1 { v1.b }[6], [x24]\n"
+ "ld1 { v2.b }[6], [x23]\n"
+ "ld1 { v3.b }[6], [x22]\n"
+ "b 82f\n"
+ "79:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x25, #0, 82f\n"
+ "ld1 { v1.b }[4], [x24]\n"
+ "ld1 { v2.b }[4], [x23]\n"
+ "ld1 { v3.b }[4], [x22]\n"
+ "b 82f\n"
+ "80:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x25, #1, 81f\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "tbz x25, #0, 82f\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "b 82f\n"
+ "81:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "82:" // Height 3: Multiply loop: Ragged operand read: Done
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "tbnz %x[flags], #31, 83f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "83:" // Height 3: Multiply loop: unique 12: skip row sum
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "ldr q5, [x28, #0x40]\n"
+ "ldr q4, [x28, #0x50]\n"
+ "ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81ac10 // usmmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac58 // usmmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88ac14 // usmmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88ac5c // usmmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e86ac15 // usmmla v21.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac5d // usmmla v29.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e85ac12 // usmmla v18.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5a // usmmla v26.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84ac16 // usmmla v22.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84ac5e // usmmla v30.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e83ac13 // usmmla v19.4s, v0.16b, v3.16b\n"
+ ".inst 0x4e83ac5b // usmmla v27.4s, v2.16b, v3.16b\n"
+ ".inst 0x4e81ac17 // usmmla v23.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac5f // usmmla v31.4s, v2.16b, v1.16b\n"
+ "84:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 68b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 v0.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v24.2d, v24.2d, v28.2d\n"
+ "uzp1 v25.2d, v25.2d, v29.2d\n"
+ "uzp1 v26.2d, v26.2d, v30.2d\n"
+ "uzp1 v27.2d, v27.2d, v31.2d\n"
+ "mov v31.16b, v0.16b\n"
+ "tbnz %x[flags], #31, 85f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v13.4s, v13.4s, v13.4s\n"
+ "ld1r { v23.4s }, [x20]\n"
+ "neg v23.4s, v23.4s\n"
+ "dup v12.4s, v11.s[3]\n"
+ "dup v11.4s, v11.s[0]\n"
+ "dup v13.4s, v13.s[0]\n"
+ "mul v11.4s, v11.4s, v23.4s\n"
+ "mul v12.4s, v12.4s, v23.4s\n"
+ "mul v13.4s, v13.4s, v23.4s\n"
+ "85:" // Height 3: skip row sum fixup
+ "ldr q0, [x10, #0x0]\n"
+ "ldr q30, [x10, #0x10]\n"
+ "add v31.4s, v31.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v11.4s\n"
+ "ldr q29, [x10, #0x20]\n"
+ "ldr q28, [x10, #0x30]\n"
+ "add v21.4s, v21.4s, v11.4s\n"
+ "add v22.4s, v22.4s, v11.4s\n"
+ "add v16.4s, v16.4s, v12.4s\n"
+ "add v17.4s, v17.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v23.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v12.4s\n"
+ "add v19.4s, v19.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add v24.4s, v24.4s, v13.4s\n"
+ "add v25.4s, v25.4s, v13.4s\n"
+ "add x10, x10, #0x40\n"
+ "add v26.4s, v26.4s, v13.4s\n"
+ "add v27.4s, v27.4s, v13.4s\n"
+ "add v31.4s, v31.4s, v0.4s\n"
+ "add v20.4s, v20.4s, v30.4s\n"
+ "add v21.4s, v21.4s, v29.4s\n"
+ "add v22.4s, v22.4s, v28.4s\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v30.4s\n"
+ "add v18.4s, v18.4s, v29.4s\n"
+ "add v19.4s, v19.4s, v28.4s\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v25.4s, v25.4s, v30.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "add v27.4s, v27.4s, v28.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v23.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v23.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v23.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v23.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v23.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v23.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v23.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v23.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v23.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v23.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v23.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v23.4s\n"
+ "tbz %x[flags], #5, 86f\n"
+ "and v1.16b, v31.16b, v0.16b\n"
+ "and v30.16b, v20.16b, v0.16b\n"
+ "and v29.16b, v21.16b, v0.16b\n"
+ "and v28.16b, v22.16b, v0.16b\n"
+ "and v23.16b, v16.16b, v0.16b\n"
+ "and v3.16b, v17.16b, v0.16b\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "and v2.16b, v18.16b, v0.16b\n"
+ "sqadd v31.4s, v31.4s, v1.4s\n"
+ "sqadd v20.4s, v20.4s, v30.4s\n"
+ "sqadd v21.4s, v21.4s, v29.4s\n"
+ "sqadd v22.4s, v22.4s, v28.4s\n"
+ "sqadd v16.4s, v16.4s, v23.4s\n"
+ "and v1.16b, v19.16b, v0.16b\n"
+ "and v30.16b, v24.16b, v0.16b\n"
+ "and v29.16b, v25.16b, v0.16b\n"
+ "and v28.16b, v26.16b, v0.16b\n"
+ "and v23.16b, v27.16b, v0.16b\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sshr v30.4s, v30.4s, #0x1f\n"
+ "sshr v29.4s, v29.4s, #0x1f\n"
+ "sshr v28.4s, v28.4s, #0x1f\n"
+ "sshr v23.4s, v23.4s, #0x1f\n"
+ "sqadd v17.4s, v17.4s, v3.4s\n"
+ "sqadd v18.4s, v18.4s, v2.4s\n"
+ "sqadd v19.4s, v19.4s, v1.4s\n"
+ "sqadd v24.4s, v24.4s, v30.4s\n"
+ "sqadd v25.4s, v25.4s, v29.4s\n"
+ "sqadd v26.4s, v26.4s, v28.4s\n"
+ "sqadd v27.4s, v27.4s, v23.4s\n"
+ "86:" // Height 3: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v31.4s, v31.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v29.4s }, [x21]\n"
+ "ld1r { v28.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v23.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "add v31.4s, v31.4s, v29.4s\n"
+ "add v20.4s, v20.4s, v29.4s\n"
+ "add v21.4s, v21.4s, v29.4s\n"
+ "add v22.4s, v22.4s, v29.4s\n"
+ "add v16.4s, v16.4s, v29.4s\n"
+ "add v17.4s, v17.4s, v29.4s\n"
+ "add v18.4s, v18.4s, v29.4s\n"
+ "add v19.4s, v19.4s, v29.4s\n"
+ "add v24.4s, v24.4s, v29.4s\n"
+ "add v25.4s, v25.4s, v29.4s\n"
+ "add v26.4s, v26.4s, v29.4s\n"
+ "add v27.4s, v27.4s, v29.4s\n"
+ "smin v31.4s, v31.4s, v28.4s\n"
+ "smin v20.4s, v20.4s, v28.4s\n"
+ "smin v21.4s, v21.4s, v28.4s\n"
+ "smin v22.4s, v22.4s, v28.4s\n"
+ "smin v16.4s, v16.4s, v28.4s\n"
+ "smin v17.4s, v17.4s, v28.4s\n"
+ "smin v18.4s, v18.4s, v28.4s\n"
+ "smin v19.4s, v19.4s, v28.4s\n"
+ "smin v24.4s, v24.4s, v28.4s\n"
+ "smin v25.4s, v25.4s, v28.4s\n"
+ "smin v26.4s, v26.4s, v28.4s\n"
+ "smin v27.4s, v27.4s, v28.4s\n"
+ "smax v31.4s, v31.4s, v23.4s\n"
+ "smax v20.4s, v20.4s, v23.4s\n"
+ "smax v21.4s, v21.4s, v23.4s\n"
+ "smax v22.4s, v22.4s, v23.4s\n"
+ "smax v16.4s, v16.4s, v23.4s\n"
+ "smax v17.4s, v17.4s, v23.4s\n"
+ "smax v18.4s, v18.4s, v23.4s\n"
+ "smax v19.4s, v19.4s, v23.4s\n"
+ "smax v24.4s, v24.4s, v23.4s\n"
+ "smax v25.4s, v25.4s, v23.4s\n"
+ "smax v26.4s, v26.4s, v23.4s\n"
+ "smax v27.4s, v27.4s, v23.4s\n"
+ "uzp1 v31.8h, v31.8h, v20.8h\n"
+ "uzp1 v20.8h, v21.8h, v22.8h\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v18.8h, v18.8h, v19.8h\n"
+ "uzp1 v24.8h, v24.8h, v25.8h\n"
+ "uzp1 v17.8h, v26.8h, v27.8h\n"
+ "uzp1 v31.16b, v31.16b, v20.16b\n"
+ "uzp1 v16.16b, v16.16b, v18.16b\n"
+ "uzp1 v24.16b, v24.16b, v17.16b\n"
+ "bge 95f\n"
+ "tbz x9, #3, 90f\n"
+ "str d31, [x27], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d24, [x23], #0x8\n"
+ "tbz x9, #2, 88f\n"
+ "st1 { v31.s }[2], [x27], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v24.s }[2], [x23], #0x4\n"
+ "tbz x9, #1, 87f\n"
+ "st1 { v31.h }[6], [x27], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v24.h }[6], [x23], #0x2\n"
+ "tbz x9, #0, 94f\n"
+ "st1 { v31.b }[14], [x27]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v24.b }[14], [x23]\n"
+ "b 94f\n"
+ "87:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 94f\n"
+ "st1 { v31.b }[12], [x27]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v24.b }[12], [x23]\n"
+ "b 94f\n"
+ "88:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 89f\n"
+ "st1 { v31.h }[4], [x27], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v24.h }[4], [x23], #0x2\n"
+ "tbz x9, #0, 94f\n"
+ "st1 { v31.b }[10], [x27]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v24.b }[10], [x23]\n"
+ "b 94f\n"
+ "89:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 94f\n"
+ "st1 { v31.b }[8], [x27]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v24.b }[8], [x23]\n"
+ "b 94f\n"
+ "90:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 92f\n"
+ "str s31, [x27], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s24, [x23], #0x4\n"
+ "tbz x9, #1, 91f\n"
+ "st1 { v31.h }[2], [x27], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v24.h }[2], [x23], #0x2\n"
+ "tbz x9, #0, 94f\n"
+ "st1 { v31.b }[6], [x27]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v24.b }[6], [x23]\n"
+ "b 94f\n"
+ "91:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 94f\n"
+ "st1 { v31.b }[4], [x27]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v24.b }[4], [x23]\n"
+ "b 94f\n"
+ "92:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 93f\n"
+ "str h31, [x27], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h24, [x23], #0x2\n"
+ "tbz x9, #0, 94f\n"
+ "st1 { v31.b }[2], [x27]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v24.b }[2], [x23]\n"
+ "b 94f\n"
+ "93:" // Height 3: Partial direct writeback: partial_1_0
+ "str b31, [x27, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b24, [x23, #0x0]\n"
+ "94:" // Height 3: Partial direct writeback: Done
+ "b 96f\n"
+ "95:" // Height 3: Full writeback
+ "str q31, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "str q16, [x24, #0x0]\n"
+ "str q24, [x23, #0x0]\n"
+ "96:" // Height 3: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 66b\n"
+ "b 130f\n"
+ "97:" // Height 4
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x20, #0x4\n"
+ "mov x10, %x[col_bias]\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x27\n"
+ "movi v15.16b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "98:" // Height 4: Column loop
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ "99:" // Height 4: setup done
+ "mov x26, #0x0\n"
+ "100:" // Height 4: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 101f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "ldr x21, [x20, #0x18]\n"
+ "cbnz x26, 102f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "add x21, x21, x20\n"
+ "b 102f\n"
+ "101:" // Height 4: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
+ "102:" // Height 4: input setup done
+ "cmp x25, #0x10\n"
+ "blt 107f\n"
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "ldr q5, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "blt 105f\n"
+ "103:" // Height 4: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q4, [x28, #0x60]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e85ac58 // usmmla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
+ ".inst 0x4e86ac5c // usmmla v28.4s, v2.16b, v6.16b\n"
+ "ldr q6, [x28, #0x80]\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ "ldr q7, [x28, #0x90]\n"
+ ".inst 0x4e88ac5d // usmmla v29.4s, v2.16b, v8.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ ".inst 0x4e89ac5a // usmmla v26.4s, v2.16b, v9.16b\n"
+ "ldr q9, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ ".inst 0x4e8aac5e // usmmla v30.4s, v2.16b, v10.16b\n"
+ "ldr q10, [x28, #0xc0]\n"
+ ".inst 0x4e84ac13 // usmmla v19.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84ac5b // usmmla v27.4s, v2.16b, v4.16b\n"
+ "ldr q4, [x28, #0xd0]\n"
+ ".inst 0x4e85ac17 // usmmla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5f // usmmla v31.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0xe0]\n"
+ ".inst 0x4e86ac30 // usmmla v16.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac78 // usmmla v24.4s, v3.16b, v6.16b\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e87ac34 // usmmla v20.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac7c // usmmla v28.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e88ac31 // usmmla v17.4s, v1.16b, v8.16b\n"
+ ".inst 0x4e88ac79 // usmmla v25.4s, v3.16b, v8.16b\n"
+ ".inst 0x4e89ac35 // usmmla v21.4s, v1.16b, v9.16b\n"
+ ".inst 0x4e89ac7d // usmmla v29.4s, v3.16b, v9.16b\n"
+ ".inst 0x4e8aac32 // usmmla v18.4s, v1.16b, v10.16b\n"
+ ".inst 0x4e8aac7a // usmmla v26.4s, v3.16b, v10.16b\n"
+ ".inst 0x4e84ac36 // usmmla v22.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84ac7e // usmmla v30.4s, v3.16b, v4.16b\n"
+ ".inst 0x4e85ac33 // usmmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e85ac7b // usmmla v27.4s, v3.16b, v5.16b\n"
+ ".inst 0x4e86ac37 // usmmla v23.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac7f // usmmla v31.4s, v3.16b, v6.16b\n"
+ "tbnz %x[flags], #31, 104f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
+ "104:" // Height 4: Multiply loop: unique 13: skip row sum
+ "ldr q1, [x24, #0x0]\n"
+ "ldr q2, [x23, #0x0]\n"
+ "sub x25, x25, #0x10\n"
+ "ldr q3, [x22, #0x0]\n"
+ "ldr q4, [x21, #0x0]\n"
+ "cmp x25, #0x20\n"
+ "ldr q5, [x28, #0x0]\n"
+ "ldr q6, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q8, [x28, #0x30]\n"
+ "ldr q9, [x28, #0x40]\n"
+ "ldr q10, [x28, #0x50]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "bge 103b\n"
+ "105:" // Height 4: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "ldr q4, [x28, #0x60]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4e85ac10 // usmmla v16.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e86ac14 // usmmla v20.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e88ac15 // usmmla v21.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e85ac58 // usmmla v24.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0x70]\n"
+ ".inst 0x4e86ac5c // usmmla v28.4s, v2.16b, v6.16b\n"
+ "ldr q6, [x28, #0x80]\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ "ldr q7, [x28, #0x90]\n"
+ ".inst 0x4e88ac5d // usmmla v29.4s, v2.16b, v8.16b\n"
+ "ldr q8, [x28, #0xa0]\n"
+ ".inst 0x4e89ac12 // usmmla v18.4s, v0.16b, v9.16b\n"
+ ".inst 0x4e89ac5a // usmmla v26.4s, v2.16b, v9.16b\n"
+ "ldr q9, [x28, #0xb0]\n"
+ ".inst 0x4e8aac16 // usmmla v22.4s, v0.16b, v10.16b\n"
+ ".inst 0x4e8aac5e // usmmla v30.4s, v2.16b, v10.16b\n"
+ "ldr q10, [x28, #0xc0]\n"
+ ".inst 0x4e84ac13 // usmmla v19.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84ac5b // usmmla v27.4s, v2.16b, v4.16b\n"
+ "ldr q4, [x28, #0xd0]\n"
+ ".inst 0x4e85ac17 // usmmla v23.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5f // usmmla v31.4s, v2.16b, v5.16b\n"
+ "ldr q5, [x28, #0xe0]\n"
+ ".inst 0x4e86ac30 // usmmla v16.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac78 // usmmla v24.4s, v3.16b, v6.16b\n"
+ "ldr q6, [x28, #0xf0]\n"
+ "add x28, x28, #0x100\n"
+ ".inst 0x4e87ac34 // usmmla v20.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac7c // usmmla v28.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e88ac31 // usmmla v17.4s, v1.16b, v8.16b\n"
+ ".inst 0x4e88ac79 // usmmla v25.4s, v3.16b, v8.16b\n"
+ ".inst 0x4e89ac35 // usmmla v21.4s, v1.16b, v9.16b\n"
+ ".inst 0x4e89ac7d // usmmla v29.4s, v3.16b, v9.16b\n"
+ ".inst 0x4e8aac32 // usmmla v18.4s, v1.16b, v10.16b\n"
+ ".inst 0x4e8aac7a // usmmla v26.4s, v3.16b, v10.16b\n"
+ ".inst 0x4e84ac36 // usmmla v22.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e84ac7e // usmmla v30.4s, v3.16b, v4.16b\n"
+ ".inst 0x4e85ac33 // usmmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e85ac7b // usmmla v27.4s, v3.16b, v5.16b\n"
+ ".inst 0x4e86ac37 // usmmla v23.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac7f // usmmla v31.4s, v3.16b, v6.16b\n"
+ "tbnz %x[flags], #31, 106f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ ".inst 0x6e8f942b // udot v11.4s, v1.16b, v15.16b\n"
+ ".inst 0x6e8f946d // udot v13.4s, v3.16b, v15.16b\n"
+ "106:" // Height 4: Multiply loop: unique 14: skip row sum
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ "107:" // Height 4: Multiply loop: Main loop skip
+ "cbz x25, 116f\n"
+ "cmp x25, #0x8\n"
+ "blt 110f\n"
+ "108:" // Height 4: Multiply loop: Odd block loop
+ "ldr d3, [x24], #0x8\n"
+ "ldr d0, [x23], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
+ "ldr d1, [x21], #0x8\n"
+ "trn1 v0.2d, v3.2d, v0.2d\n"
+ "trn1 v2.2d, v2.2d, v1.2d\n"
+ "tbnz %x[flags], #31, 109f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "109:" // Height 4: Multiply loop: unique 15: skip row sum
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "sub x25, x25, #0x8\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "cmp x25, #0x8\n"
+ "ldr q5, [x28, #0x40]\n"
+ "ldr q4, [x28, #0x50]\n"
+ "ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81ac10 // usmmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac58 // usmmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88ac14 // usmmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88ac5c // usmmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e86ac15 // usmmla v21.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac5d // usmmla v29.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e85ac12 // usmmla v18.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5a // usmmla v26.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84ac16 // usmmla v22.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84ac5e // usmmla v30.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e83ac13 // usmmla v19.4s, v0.16b, v3.16b\n"
+ ".inst 0x4e83ac5b // usmmla v27.4s, v2.16b, v3.16b\n"
+ ".inst 0x4e81ac17 // usmmla v23.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac5f // usmmla v31.4s, v2.16b, v1.16b\n"
+ "bge 108b\n"
+ "110:" // Height 4: Multiply loop: Skip odd blocks
+ "cbz x25, 116f\n"
+ "tbz x25, #2, 112f\n"
+ "ldr s1, [x24], #0x4\n"
+ "ldr s2, [x23], #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s9, [x21], #0x4\n"
+ "tbz x25, #1, 111f\n"
+ "ld1 { v1.h }[2], [x24], #0x2\n"
+ "ld1 { v2.h }[2], [x23], #0x2\n"
+ "ld1 { v3.h }[2], [x22], #0x2\n"
+ "ld1 { v9.h }[2], [x21], #0x2\n"
+ "tbz x25, #0, 114f\n"
+ "ld1 { v1.b }[6], [x24]\n"
+ "ld1 { v2.b }[6], [x23]\n"
+ "ld1 { v3.b }[6], [x22]\n"
+ "ld1 { v9.b }[6], [x21]\n"
+ "b 114f\n"
+ "111:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x25, #0, 114f\n"
+ "ld1 { v1.b }[4], [x24]\n"
+ "ld1 { v2.b }[4], [x23]\n"
+ "ld1 { v3.b }[4], [x22]\n"
+ "ld1 { v9.b }[4], [x21]\n"
+ "b 114f\n"
+ "112:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x25, #1, 113f\n"
+ "ldr h1, [x24], #0x2\n"
+ "ldr h2, [x23], #0x2\n"
+ "ldr h3, [x22], #0x2\n"
+ "ldr h9, [x21], #0x2\n"
+ "tbz x25, #0, 114f\n"
+ "ld1 { v1.b }[2], [x24]\n"
+ "ld1 { v2.b }[2], [x23]\n"
+ "ld1 { v3.b }[2], [x22]\n"
+ "ld1 { v9.b }[2], [x21]\n"
+ "b 114f\n"
+ "113:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x24, #0x0]\n"
+ "ldr b2, [x23, #0x0]\n"
+ "ldr b3, [x22, #0x0]\n"
+ "ldr b9, [x21, #0x0]\n"
+ "114:" // Height 4: Multiply loop: Ragged operand read: Done
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn1 v2.2d, v3.2d, v9.2d\n"
+ "tbnz %x[flags], #31, 115f\n"
+ ".inst 0x6e8f940b // udot v11.4s, v0.16b, v15.16b\n"
+ ".inst 0x6e8f944d // udot v13.4s, v2.16b, v15.16b\n"
+ "115:" // Height 4: Multiply loop: unique 16: skip row sum
+ "ldr q1, [x28, #0x0]\n"
+ "ldr q8, [x28, #0x10]\n"
+ "ldr q7, [x28, #0x20]\n"
+ "ldr q6, [x28, #0x30]\n"
+ "ldr q5, [x28, #0x40]\n"
+ "ldr q4, [x28, #0x50]\n"
+ "ldr q3, [x28, #0x60]\n"
+ ".inst 0x4e81ac10 // usmmla v16.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac58 // usmmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x28, #0x70]\n"
+ ".inst 0x4e88ac14 // usmmla v20.4s, v0.16b, v8.16b\n"
+ ".inst 0x4e88ac5c // usmmla v28.4s, v2.16b, v8.16b\n"
+ "add x28, x28, #0x80\n"
+ ".inst 0x4e87ac11 // usmmla v17.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac59 // usmmla v25.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e86ac15 // usmmla v21.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac5d // usmmla v29.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e85ac12 // usmmla v18.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e85ac5a // usmmla v26.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84ac16 // usmmla v22.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e84ac5e // usmmla v30.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e83ac13 // usmmla v19.4s, v0.16b, v3.16b\n"
+ ".inst 0x4e83ac5b // usmmla v27.4s, v2.16b, v3.16b\n"
+ ".inst 0x4e81ac17 // usmmla v23.4s, v0.16b, v1.16b\n"
+ ".inst 0x4e81ac5f // usmmla v31.4s, v2.16b, v1.16b\n"
+ "116:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 100b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 v0.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x27, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
+ "add x22, x23, x20\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "uzp1 v23.2d, v24.2d, v28.2d\n"
+ "uzp2 v24.2d, v24.2d, v28.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "uzp1 v28.2d, v25.2d, v29.2d\n"
+ "uzp2 v25.2d, v25.2d, v29.2d\n"
+ "uzp1 v29.2d, v26.2d, v30.2d\n"
+ "uzp2 v26.2d, v26.2d, v30.2d\n"
+ "uzp1 v30.2d, v27.2d, v31.2d\n"
+ "uzp2 v27.2d, v27.2d, v31.2d\n"
+ "mov v31.16b, v0.16b\n"
+ "tbnz %x[flags], #31, 117f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "addp v11.4s, v11.4s, v11.4s\n"
+ "addp v13.4s, v13.4s, v13.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "neg v0.4s, v0.4s\n"
+ "dup v12.4s, v11.s[3]\n"
+ "dup v11.4s, v11.s[0]\n"
+ "dup v14.4s, v13.s[3]\n"
+ "dup v13.4s, v13.s[0]\n"
+ "mul v11.4s, v11.4s, v0.4s\n"
+ "mul v12.4s, v12.4s, v0.4s\n"
+ "mul v14.4s, v14.4s, v0.4s\n"
+ "mul v13.4s, v13.4s, v0.4s\n"
+ "117:" // Height 4: skip row sum fixup
+ "ldr q0, [x10, #0x0]\n"
+ "ldr q4, [x10, #0x10]\n"
+ "add v31.4s, v31.4s, v11.4s\n"
+ "add v20.4s, v20.4s, v11.4s\n"
+ "ldr q3, [x10, #0x20]\n"
+ "ldr q2, [x10, #0x30]\n"
+ "add v21.4s, v21.4s, v11.4s\n"
+ "add v22.4s, v22.4s, v11.4s\n"
+ "add v16.4s, v16.4s, v12.4s\n"
+ "add v17.4s, v17.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "add v18.4s, v18.4s, v12.4s\n"
+ "add v19.4s, v19.4s, v12.4s\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add v23.4s, v23.4s, v13.4s\n"
+ "add v28.4s, v28.4s, v13.4s\n"
+ "add x10, x10, #0x40\n"
+ "add v29.4s, v29.4s, v13.4s\n"
+ "add v30.4s, v30.4s, v13.4s\n"
+ "add v24.4s, v24.4s, v14.4s\n"
+ "add v25.4s, v25.4s, v14.4s\n"
+ "add v26.4s, v26.4s, v14.4s\n"
+ "add v27.4s, v27.4s, v14.4s\n"
+ "add v31.4s, v31.4s, v0.4s\n"
+ "add v20.4s, v20.4s, v4.4s\n"
+ "add v21.4s, v21.4s, v3.4s\n"
+ "add v22.4s, v22.4s, v2.4s\n"
+ "add v16.4s, v16.4s, v0.4s\n"
+ "add v17.4s, v17.4s, v4.4s\n"
+ "add v18.4s, v18.4s, v3.4s\n"
+ "add v19.4s, v19.4s, v2.4s\n"
+ "add v23.4s, v23.4s, v0.4s\n"
+ "add v28.4s, v28.4s, v4.4s\n"
+ "add v29.4s, v29.4s, v3.4s\n"
+ "add v30.4s, v30.4s, v2.4s\n"
+ "add v24.4s, v24.4s, v0.4s\n"
+ "ld1r { v0.4s }, [x20]\n"
+ "add v25.4s, v25.4s, v4.4s\n"
+ "add v26.4s, v26.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v2.4s\n"
+ "sqrdmulh v31.4s, v31.4s, v1.4s\n"
+ "sqrdmulh v20.4s, v20.4s, v1.4s\n"
+ "sqrdmulh v21.4s, v21.4s, v1.4s\n"
+ "sqrdmulh v22.4s, v22.4s, v1.4s\n"
+ "sqrdmulh v16.4s, v16.4s, v1.4s\n"
+ "sqrdmulh v17.4s, v17.4s, v1.4s\n"
+ "sqrdmulh v18.4s, v18.4s, v1.4s\n"
+ "sqrdmulh v19.4s, v19.4s, v1.4s\n"
+ "sqrdmulh v23.4s, v23.4s, v1.4s\n"
+ "sqrdmulh v28.4s, v28.4s, v1.4s\n"
+ "sqrdmulh v29.4s, v29.4s, v1.4s\n"
+ "sqrdmulh v30.4s, v30.4s, v1.4s\n"
+ "sqrdmulh v24.4s, v24.4s, v1.4s\n"
+ "sqrdmulh v25.4s, v25.4s, v1.4s\n"
+ "sqrdmulh v26.4s, v26.4s, v1.4s\n"
+ "sqrdmulh v27.4s, v27.4s, v1.4s\n"
+ "tbz %x[flags], #5, 118f\n"
+ "and v2.16b, v31.16b, v0.16b\n"
+ "and v1.16b, v20.16b, v0.16b\n"
+ "and v7.16b, v21.16b, v0.16b\n"
+ "and v6.16b, v22.16b, v0.16b\n"
+ "and v5.16b, v16.16b, v0.16b\n"
+ "and v4.16b, v17.16b, v0.16b\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "and v3.16b, v18.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sqadd v31.4s, v31.4s, v2.4s\n"
+ "sqadd v20.4s, v20.4s, v1.4s\n"
+ "and v2.16b, v19.16b, v0.16b\n"
+ "and v1.16b, v23.16b, v0.16b\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sqadd v21.4s, v21.4s, v7.4s\n"
+ "sqadd v22.4s, v22.4s, v6.4s\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v16.4s, v16.4s, v5.4s\n"
+ "sqadd v17.4s, v17.4s, v4.4s\n"
+ "sqadd v18.4s, v18.4s, v3.4s\n"
+ "and v7.16b, v28.16b, v0.16b\n"
+ "sqadd v19.4s, v19.4s, v2.4s\n"
+ "sqadd v23.4s, v23.4s, v1.4s\n"
+ "and v6.16b, v29.16b, v0.16b\n"
+ "and v5.16b, v30.16b, v0.16b\n"
+ "and v4.16b, v24.16b, v0.16b\n"
+ "and v3.16b, v25.16b, v0.16b\n"
+ "and v2.16b, v26.16b, v0.16b\n"
+ "and v1.16b, v27.16b, v0.16b\n"
+ "sshr v7.4s, v7.4s, #0x1f\n"
+ "sshr v6.4s, v6.4s, #0x1f\n"
+ "sshr v5.4s, v5.4s, #0x1f\n"
+ "sshr v4.4s, v4.4s, #0x1f\n"
+ "sshr v3.4s, v3.4s, #0x1f\n"
+ "sshr v2.4s, v2.4s, #0x1f\n"
+ "sshr v1.4s, v1.4s, #0x1f\n"
+ "sqadd v28.4s, v28.4s, v7.4s\n"
+ "sqadd v29.4s, v29.4s, v6.4s\n"
+ "sqadd v30.4s, v30.4s, v5.4s\n"
+ "sqadd v24.4s, v24.4s, v4.4s\n"
+ "sqadd v25.4s, v25.4s, v3.4s\n"
+ "sqadd v26.4s, v26.4s, v2.4s\n"
+ "sqadd v27.4s, v27.4s, v1.4s\n"
+ "118:" // Height 4: no shift correction
+ "add x21, %x[qp], %[c_offset]\n"
+ "srshl v31.4s, v31.4s, v0.4s\n"
+ "srshl v20.4s, v20.4s, v0.4s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1r { v3.4s }, [x21]\n"
+ "ld1r { v2.4s }, [x20]\n"
+ "srshl v21.4s, v21.4s, v0.4s\n"
+ "srshl v22.4s, v22.4s, v0.4s\n"
+ "srshl v16.4s, v16.4s, v0.4s\n"
+ "srshl v17.4s, v17.4s, v0.4s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "cmp x9, #0x10\n"
+ "ld1r { v1.4s }, [x20]\n"
+ "srshl v18.4s, v18.4s, v0.4s\n"
+ "srshl v19.4s, v19.4s, v0.4s\n"
+ "srshl v23.4s, v23.4s, v0.4s\n"
+ "srshl v28.4s, v28.4s, v0.4s\n"
+ "srshl v29.4s, v29.4s, v0.4s\n"
+ "srshl v30.4s, v30.4s, v0.4s\n"
+ "srshl v24.4s, v24.4s, v0.4s\n"
+ "srshl v25.4s, v25.4s, v0.4s\n"
+ "srshl v26.4s, v26.4s, v0.4s\n"
+ "srshl v27.4s, v27.4s, v0.4s\n"
+ "add v31.4s, v31.4s, v3.4s\n"
+ "add v20.4s, v20.4s, v3.4s\n"
+ "add v21.4s, v21.4s, v3.4s\n"
+ "add v22.4s, v22.4s, v3.4s\n"
+ "add v16.4s, v16.4s, v3.4s\n"
+ "add v17.4s, v17.4s, v3.4s\n"
+ "add v18.4s, v18.4s, v3.4s\n"
+ "add v19.4s, v19.4s, v3.4s\n"
+ "add v23.4s, v23.4s, v3.4s\n"
+ "add v28.4s, v28.4s, v3.4s\n"
+ "add v29.4s, v29.4s, v3.4s\n"
+ "add v30.4s, v30.4s, v3.4s\n"
+ "add v24.4s, v24.4s, v3.4s\n"
+ "add v25.4s, v25.4s, v3.4s\n"
+ "add v26.4s, v26.4s, v3.4s\n"
+ "add v27.4s, v27.4s, v3.4s\n"
+ "smin v31.4s, v31.4s, v2.4s\n"
+ "smin v20.4s, v20.4s, v2.4s\n"
+ "smin v21.4s, v21.4s, v2.4s\n"
+ "smin v22.4s, v22.4s, v2.4s\n"
+ "smin v16.4s, v16.4s, v2.4s\n"
+ "smin v17.4s, v17.4s, v2.4s\n"
+ "smin v18.4s, v18.4s, v2.4s\n"
+ "smin v19.4s, v19.4s, v2.4s\n"
+ "smin v23.4s, v23.4s, v2.4s\n"
+ "smin v28.4s, v28.4s, v2.4s\n"
+ "smin v29.4s, v29.4s, v2.4s\n"
+ "smin v30.4s, v30.4s, v2.4s\n"
+ "smin v24.4s, v24.4s, v2.4s\n"
+ "smin v25.4s, v25.4s, v2.4s\n"
+ "smin v26.4s, v26.4s, v2.4s\n"
+ "smin v27.4s, v27.4s, v2.4s\n"
+ "smax v31.4s, v31.4s, v1.4s\n"
+ "smax v20.4s, v20.4s, v1.4s\n"
+ "smax v21.4s, v21.4s, v1.4s\n"
+ "smax v22.4s, v22.4s, v1.4s\n"
+ "smax v16.4s, v16.4s, v1.4s\n"
+ "smax v17.4s, v17.4s, v1.4s\n"
+ "smax v18.4s, v18.4s, v1.4s\n"
+ "smax v19.4s, v19.4s, v1.4s\n"
+ "smax v23.4s, v23.4s, v1.4s\n"
+ "smax v28.4s, v28.4s, v1.4s\n"
+ "smax v29.4s, v29.4s, v1.4s\n"
+ "smax v30.4s, v30.4s, v1.4s\n"
+ "smax v24.4s, v24.4s, v1.4s\n"
+ "smax v25.4s, v25.4s, v1.4s\n"
+ "smax v26.4s, v26.4s, v1.4s\n"
+ "smax v27.4s, v27.4s, v1.4s\n"
+ "uzp1 v31.8h, v31.8h, v20.8h\n"
+ "uzp1 v20.8h, v21.8h, v22.8h\n"
+ "uzp1 v16.8h, v16.8h, v17.8h\n"
+ "uzp1 v19.8h, v18.8h, v19.8h\n"
+ "uzp1 v23.8h, v23.8h, v28.8h\n"
+ "uzp1 v18.8h, v29.8h, v30.8h\n"
+ "uzp1 v24.8h, v24.8h, v25.8h\n"
+ "uzp1 v17.8h, v26.8h, v27.8h\n"
+ "uzp1 v31.16b, v31.16b, v20.16b\n"
+ "uzp1 v16.16b, v16.16b, v19.16b\n"
+ "uzp1 v23.16b, v23.16b, v18.16b\n"
+ "uzp1 v24.16b, v24.16b, v17.16b\n"
+ "bge 127f\n"
+ "tbz x9, #3, 122f\n"
+ "str d31, [x27], #0x8\n"
+ "str d16, [x24], #0x8\n"
+ "str d23, [x23], #0x8\n"
+ "str d24, [x22], #0x8\n"
+ "tbz x9, #2, 120f\n"
+ "st1 { v31.s }[2], [x27], #0x4\n"
+ "st1 { v16.s }[2], [x24], #0x4\n"
+ "st1 { v23.s }[2], [x23], #0x4\n"
+ "st1 { v24.s }[2], [x22], #0x4\n"
+ "tbz x9, #1, 119f\n"
+ "st1 { v31.h }[6], [x27], #0x2\n"
+ "st1 { v16.h }[6], [x24], #0x2\n"
+ "st1 { v23.h }[6], [x23], #0x2\n"
+ "st1 { v24.h }[6], [x22], #0x2\n"
+ "tbz x9, #0, 126f\n"
+ "st1 { v31.b }[14], [x27]\n"
+ "st1 { v16.b }[14], [x24]\n"
+ "st1 { v23.b }[14], [x23]\n"
+ "st1 { v24.b }[14], [x22]\n"
+ "b 126f\n"
+ "119:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x9, #0, 126f\n"
+ "st1 { v31.b }[12], [x27]\n"
+ "st1 { v16.b }[12], [x24]\n"
+ "st1 { v23.b }[12], [x23]\n"
+ "st1 { v24.b }[12], [x22]\n"
+ "b 126f\n"
+ "120:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x9, #1, 121f\n"
+ "st1 { v31.h }[4], [x27], #0x2\n"
+ "st1 { v16.h }[4], [x24], #0x2\n"
+ "st1 { v23.h }[4], [x23], #0x2\n"
+ "st1 { v24.h }[4], [x22], #0x2\n"
+ "tbz x9, #0, 126f\n"
+ "st1 { v31.b }[10], [x27]\n"
+ "st1 { v16.b }[10], [x24]\n"
+ "st1 { v23.b }[10], [x23]\n"
+ "st1 { v24.b }[10], [x22]\n"
+ "b 126f\n"
+ "121:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x9, #0, 126f\n"
+ "st1 { v31.b }[8], [x27]\n"
+ "st1 { v16.b }[8], [x24]\n"
+ "st1 { v23.b }[8], [x23]\n"
+ "st1 { v24.b }[8], [x22]\n"
+ "b 126f\n"
+ "122:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x9, #2, 124f\n"
+ "str s31, [x27], #0x4\n"
+ "str s16, [x24], #0x4\n"
+ "str s23, [x23], #0x4\n"
+ "str s24, [x22], #0x4\n"
+ "tbz x9, #1, 123f\n"
+ "st1 { v31.h }[2], [x27], #0x2\n"
+ "st1 { v16.h }[2], [x24], #0x2\n"
+ "st1 { v23.h }[2], [x23], #0x2\n"
+ "st1 { v24.h }[2], [x22], #0x2\n"
+ "tbz x9, #0, 126f\n"
+ "st1 { v31.b }[6], [x27]\n"
+ "st1 { v16.b }[6], [x24]\n"
+ "st1 { v23.b }[6], [x23]\n"
+ "st1 { v24.b }[6], [x22]\n"
+ "b 126f\n"
+ "123:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x9, #0, 126f\n"
+ "st1 { v31.b }[4], [x27]\n"
+ "st1 { v16.b }[4], [x24]\n"
+ "st1 { v23.b }[4], [x23]\n"
+ "st1 { v24.b }[4], [x22]\n"
+ "b 126f\n"
+ "124:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x9, #1, 125f\n"
+ "str h31, [x27], #0x2\n"
+ "str h16, [x24], #0x2\n"
+ "str h23, [x23], #0x2\n"
+ "str h24, [x22], #0x2\n"
+ "tbz x9, #0, 126f\n"
+ "st1 { v31.b }[2], [x27]\n"
+ "st1 { v16.b }[2], [x24]\n"
+ "st1 { v23.b }[2], [x23]\n"
+ "st1 { v24.b }[2], [x22]\n"
+ "b 126f\n"
+ "125:" // Height 4: Partial direct writeback: partial_1_0
+ "str b31, [x27, #0x0]\n"
+ "str b16, [x24, #0x0]\n"
+ "str b23, [x23, #0x0]\n"
+ "str b24, [x22, #0x0]\n"
+ "126:" // Height 4: Partial direct writeback: Done
+ "b 128f\n"
+ "127:" // Height 4: Full writeback
+ "str q31, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "str q16, [x24, #0x0]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q24, [x22, #0x0]\n"
+ "128:" // Height 4: Writeback done
+ "subs x9, x9, #0x10\n"
+ "bgt 98b\n"
+ "subs %x[M], %x[M], #0x4\n"
+ "beq 130f\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 129f\n"
+ "add x21, x21, #0x4\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "129:" // Update direct input
+ "mov x20, #0x4\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "b 1b\n"
+ "130:" // Exit
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16.hpp
new file mode 100644
index 0000000000..4f963124cb
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16.hpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<uint8_t>, \
+ size_t, size_t, \
+ const int8_t *, \
+ IndirectOutputArg<int32_t>, \
+ const int32_t *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_hybrid_u8s8s32_dot_6x16( ARGLIST );
+
+class cls_a64_hybrid_u8s8s32_dot_6x16
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+
+ static unsigned int out_width()
+ {
+ return 16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, uint32_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 31.63 };
+ case CPUModel::A510:
+ return { 15.89 };
+ case CPUModel::V1:
+ return { 53.87 };
+ case CPUModel::A55r1:
+ return { 9.217 };
+ }
+ }
+
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ case CPUModel::A55r1:
+ return { 9.5238, 2.0799, 0.2279 };
+ default:
+ return { 29.6736, 11.4025, 0.5591 };
+ case CPUModel::A510:
+ return { 16.65, 3.92, 0.48 };
+ case CPUModel::V1:
+ return { 42.62, 16.32, 0.83 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_hybrid_u8s8s32_dot_6x16;
+ cls_a64_hybrid_u8s8s32_dot_6x16(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16/generic.cpp
new file mode 100644
index 0000000000..074a9585d8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_dot_6x16/generic.cpp
@@ -0,0 +1,3264 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+
+namespace arm_gemm {
+
+void a64_hybrid_u8s8s32_dot_6x16 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<uint8_t> A_arg,
+ size_t M, size_t N, const int8_t *B_ptr, IndirectOutputArg<int32_t> output_arg,
+ const int32_t *, Activation, bool accumulate
+)
+{
+ struct KernelArgs {
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const int8_t *B_ptr = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ void *output_ptr = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ ka.output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 171f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 137f\n"
+ "beq 103f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 69f\n"
+ "beq 35f\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "2:" // Height 1: Column loop
+ "tbz %x[flags], #0, 12f\n"
+ "cmp x11, #0x10\n"
+ "bge 11f\n"
+ "tbz x11, #3, 6f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "tbz x11, #2, 4f\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 3f\n"
+ "ldr d11, [x9], #0x8\n"
+ "mov x25, #0x38\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "b 10f\n"
+ "3:" // Height 1: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 10f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "b 10f\n"
+ "4:" // Height 1: Partial accumulate: partial_2_8
+ "tbz x11, #1, 5f\n"
+ "ldr d10, [x9], #0x8\n"
+ "mov x25, #0x28\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "b 10f\n"
+ "5:" // Height 1: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 10f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "b 10f\n"
+ "6:" // Height 1: Partial accumulate: partial_4_0
+ "tbz x11, #2, 8f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 7f\n"
+ "ldr d9, [x9], #0x8\n"
+ "mov x25, #0x18\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "b 10f\n"
+ "7:" // Height 1: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 10f\n"
+ "ldr s9, [x9, #0x0]\n"
+ "b 10f\n"
+ "8:" // Height 1: Partial accumulate: partial_2_0
+ "tbz x11, #1, 9f\n"
+ "ldr d8, [x9], #0x8\n"
+ "mov x25, #0x8\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v8.s }[2], [x9]\n"
+ "b 10f\n"
+ "9:" // Height 1: Partial accumulate: partial_1_0
+ "ldr s8, [x9, #0x0]\n"
+ "mov x25, #0x0\n"
+ "10:" // Height 1: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 13f\n"
+ "11:" // Height 1: full accumulate
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "b 13f\n"
+ "12:" // Height 1: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "13:" // Height 1: setup done
+ "mov x28, #0x0\n"
+ "14:" // Height 1: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 15f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "cbnz x28, 16f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "b 16f\n"
+ "15:" // Height 1: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "16:" // Height 1: input setup done
+ "cmp x27, #0x10\n"
+ "blt 19f\n"
+ "ldr q0, [x26, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 18f\n"
+ "17:" // Height 1: Multiply loop: Main loop head
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f00f22a // sudot v10.4s, v17.16b, v0.4b[0]\n"
+ "ldr q17, [x10, #0x40]\n"
+ ".inst 0x4f00f20b // sudot v11.4s, v16.16b, v0.4b[0]\n"
+ "ldr q16, [x10, #0x50]\n"
+ ".inst 0x4f20f228 // sudot v8.4s, v17.16b, v0.4b[1]\n"
+ "ldr q17, [x10, #0x60]\n"
+ ".inst 0x4f20f209 // sudot v9.4s, v16.16b, v0.4b[1]\n"
+ "ldr q16, [x10, #0x70]\n"
+ ".inst 0x4f20f22a // sudot v10.4s, v17.16b, v0.4b[1]\n"
+ "ldr q17, [x10, #0x80]\n"
+ ".inst 0x4f20f20b // sudot v11.4s, v16.16b, v0.4b[1]\n"
+ "ldr q16, [x10, #0x90]\n"
+ ".inst 0x4f00fa28 // sudot v8.4s, v17.16b, v0.4b[2]\n"
+ "ldr q17, [x10, #0xa0]\n"
+ ".inst 0x4f00fa09 // sudot v9.4s, v16.16b, v0.4b[2]\n"
+ "ldr q16, [x10, #0xb0]\n"
+ ".inst 0x4f00fa2a // sudot v10.4s, v17.16b, v0.4b[2]\n"
+ "ldr q17, [x10, #0xc0]\n"
+ ".inst 0x4f00fa0b // sudot v11.4s, v16.16b, v0.4b[2]\n"
+ "ldr q16, [x10, #0xd0]\n"
+ ".inst 0x4f20fa28 // sudot v8.4s, v17.16b, v0.4b[3]\n"
+ "ldr q17, [x10, #0xe0]\n"
+ ".inst 0x4f20fa09 // sudot v9.4s, v16.16b, v0.4b[3]\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fa2a // sudot v10.4s, v17.16b, v0.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x4f20fa0b // sudot v11.4s, v16.16b, v0.4b[3]\n"
+ "ldr q0, [x26, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 17b\n"
+ "18:" // Height 1: Multiply loop: Single iteration only
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f00f22a // sudot v10.4s, v17.16b, v0.4b[0]\n"
+ "ldr q17, [x10, #0x40]\n"
+ ".inst 0x4f00f20b // sudot v11.4s, v16.16b, v0.4b[0]\n"
+ "ldr q16, [x10, #0x50]\n"
+ ".inst 0x4f20f228 // sudot v8.4s, v17.16b, v0.4b[1]\n"
+ "ldr q17, [x10, #0x60]\n"
+ ".inst 0x4f20f209 // sudot v9.4s, v16.16b, v0.4b[1]\n"
+ "ldr q16, [x10, #0x70]\n"
+ ".inst 0x4f20f22a // sudot v10.4s, v17.16b, v0.4b[1]\n"
+ "ldr q17, [x10, #0x80]\n"
+ ".inst 0x4f20f20b // sudot v11.4s, v16.16b, v0.4b[1]\n"
+ "ldr q16, [x10, #0x90]\n"
+ ".inst 0x4f00fa28 // sudot v8.4s, v17.16b, v0.4b[2]\n"
+ "ldr q17, [x10, #0xa0]\n"
+ ".inst 0x4f00fa09 // sudot v9.4s, v16.16b, v0.4b[2]\n"
+ "ldr q16, [x10, #0xb0]\n"
+ ".inst 0x4f00fa2a // sudot v10.4s, v17.16b, v0.4b[2]\n"
+ "ldr q17, [x10, #0xc0]\n"
+ ".inst 0x4f00fa0b // sudot v11.4s, v16.16b, v0.4b[2]\n"
+ "ldr q16, [x10, #0xd0]\n"
+ ".inst 0x4f20fa28 // sudot v8.4s, v17.16b, v0.4b[3]\n"
+ "ldr q17, [x10, #0xe0]\n"
+ ".inst 0x4f20fa09 // sudot v9.4s, v16.16b, v0.4b[3]\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fa2a // sudot v10.4s, v17.16b, v0.4b[3]\n"
+ ".inst 0x4f20fa0b // sudot v11.4s, v16.16b, v0.4b[3]\n"
+ "19:" // Height 1: Multiply loop: Main loop skip
+ "cbz x27, 24f\n"
+ "cmp x27, #0x4\n"
+ "blt 21f\n"
+ "20:" // Height 1: Multiply loop: Odd block loop
+ "ldr s18, [x26], #0x4\n"
+ "ldr q17, [x10, #0x0]\n"
+ "sub x27, x27, #0x4\n"
+ "ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x4\n"
+ ".inst 0x4f12f228 // sudot v8.4s, v17.16b, v18.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f12f209 // sudot v9.4s, v16.16b, v18.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f12f22a // sudot v10.4s, v17.16b, v18.4b[0]\n"
+ ".inst 0x4f12f20b // sudot v11.4s, v16.16b, v18.4b[0]\n"
+ "bge 20b\n"
+ "21:" // Height 1: Multiply loop: Skip odd blocks
+ "cbz x27, 24f\n"
+ "tbz x27, #1, 22f\n"
+ "ldr h0, [x26], #0x2\n"
+ "tbz x27, #0, 23f\n"
+ "ld1 { v0.b }[2], [x26]\n"
+ "b 23f\n"
+ "22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x26, #0x0]\n"
+ "23:" // Height 1: Multiply loop: Ragged operand read: Done
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
+ ".inst 0x4f00f228 // sudot v8.4s, v17.16b, v0.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f00f209 // sudot v9.4s, v16.16b, v0.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f00f22a // sudot v10.4s, v17.16b, v0.4b[0]\n"
+ ".inst 0x4f00f20b // sudot v11.4s, v16.16b, v0.4b[0]\n"
+ "24:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 14b\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "bge 33f\n"
+ "tbz x11, #3, 28f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v9.4s }, [x9], #0x10\n"
+ "tbz x11, #2, 26f\n"
+ "st1 { v10.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 25f\n"
+ "str d11, [x9], #0x8\n"
+ "tbz x11, #0, 32f\n"
+ "st1 { v11.s }[2], [x9]\n"
+ "b 32f\n"
+ "25:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 32f\n"
+ "str s11, [x9, #0x0]\n"
+ "b 32f\n"
+ "26:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 27f\n"
+ "str d10, [x9], #0x8\n"
+ "tbz x11, #0, 32f\n"
+ "st1 { v10.s }[2], [x9]\n"
+ "b 32f\n"
+ "27:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 32f\n"
+ "str s10, [x9, #0x0]\n"
+ "b 32f\n"
+ "28:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 30f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 29f\n"
+ "str d9, [x9], #0x8\n"
+ "tbz x11, #0, 32f\n"
+ "st1 { v9.s }[2], [x9]\n"
+ "b 32f\n"
+ "29:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 32f\n"
+ "str s9, [x9, #0x0]\n"
+ "b 32f\n"
+ "30:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 31f\n"
+ "str d8, [x9], #0x8\n"
+ "tbz x11, #0, 32f\n"
+ "st1 { v8.s }[2], [x9]\n"
+ "b 32f\n"
+ "31:" // Height 1: Partial direct writeback: partial_1_0
+ "str s8, [x9, #0x0]\n"
+ "32:" // Height 1: Partial direct writeback: Done
+ "b 34f\n"
+ "33:" // Height 1: Full writeback
+ "str q8, [x9, #0x0]\n"
+ "str q9, [x9, #0x10]\n"
+ "str q10, [x9, #0x20]\n"
+ "str q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "34:" // Height 1: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 2b\n"
+ "b 206f\n"
+ "35:" // Height 2
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "36:" // Height 2: Column loop
+ "tbz %x[flags], #0, 46f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "bge 45f\n"
+ "tbz x11, #3, 40f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "tbz x11, #2, 38f\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 37f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "tbz x11, #0, 44f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "b 44f\n"
+ "37:" // Height 2: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 44f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "b 44f\n"
+ "38:" // Height 2: Partial accumulate: partial_2_8
+ "tbz x11, #1, 39f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "tbz x11, #0, 44f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "b 44f\n"
+ "39:" // Height 2: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 44f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "b 44f\n"
+ "40:" // Height 2: Partial accumulate: partial_4_0
+ "tbz x11, #2, 42f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 41f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "tbz x11, #0, 44f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "b 44f\n"
+ "41:" // Height 2: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 44f\n"
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "b 44f\n"
+ "42:" // Height 2: Partial accumulate: partial_2_0
+ "tbz x11, #1, 43f\n"
+ "ldr d8, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "tbz x11, #0, 44f\n"
+ "ld1 { v8.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "b 44f\n"
+ "43:" // Height 2: Partial accumulate: partial_1_0
+ "ldr s8, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "44:" // Height 2: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 47f\n"
+ "45:" // Height 2: full accumulate
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "b 47f\n"
+ "46:" // Height 2: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "47:" // Height 2: setup done
+ "mov x28, #0x0\n"
+ "48:" // Height 2: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 49f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "cbnz x28, 50f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "b 50f\n"
+ "49:" // Height 2: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "50:" // Height 2: input setup done
+ "cmp x27, #0x10\n"
+ "blt 53f\n"
+ "ldr q0, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 52f\n"
+ "51:" // Height 2: Multiply loop: Main loop head
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ "sub x27, x27, #0x10\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f00f22a // sudot v10.4s, v17.16b, v0.4b[0]\n"
+ ".inst 0x4f01f22e // sudot v14.4s, v17.16b, v1.4b[0]\n"
+ "ldr q17, [x10, #0x40]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f00f20b // sudot v11.4s, v16.16b, v0.4b[0]\n"
+ ".inst 0x4f01f20f // sudot v15.4s, v16.16b, v1.4b[0]\n"
+ "ldr q16, [x10, #0x50]\n"
+ ".inst 0x4f20f228 // sudot v8.4s, v17.16b, v0.4b[1]\n"
+ ".inst 0x4f21f22c // sudot v12.4s, v17.16b, v1.4b[1]\n"
+ "ldr q17, [x10, #0x60]\n"
+ ".inst 0x4f20f209 // sudot v9.4s, v16.16b, v0.4b[1]\n"
+ ".inst 0x4f21f20d // sudot v13.4s, v16.16b, v1.4b[1]\n"
+ "ldr q16, [x10, #0x70]\n"
+ ".inst 0x4f20f22a // sudot v10.4s, v17.16b, v0.4b[1]\n"
+ ".inst 0x4f21f22e // sudot v14.4s, v17.16b, v1.4b[1]\n"
+ "ldr q17, [x10, #0x80]\n"
+ ".inst 0x4f20f20b // sudot v11.4s, v16.16b, v0.4b[1]\n"
+ ".inst 0x4f21f20f // sudot v15.4s, v16.16b, v1.4b[1]\n"
+ "ldr q16, [x10, #0x90]\n"
+ ".inst 0x4f00fa28 // sudot v8.4s, v17.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa2c // sudot v12.4s, v17.16b, v1.4b[2]\n"
+ "ldr q17, [x10, #0xa0]\n"
+ ".inst 0x4f00fa09 // sudot v9.4s, v16.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa0d // sudot v13.4s, v16.16b, v1.4b[2]\n"
+ "ldr q16, [x10, #0xb0]\n"
+ ".inst 0x4f00fa2a // sudot v10.4s, v17.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa2e // sudot v14.4s, v17.16b, v1.4b[2]\n"
+ "ldr q17, [x10, #0xc0]\n"
+ ".inst 0x4f00fa0b // sudot v11.4s, v16.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa0f // sudot v15.4s, v16.16b, v1.4b[2]\n"
+ "ldr q16, [x10, #0xd0]\n"
+ ".inst 0x4f20fa28 // sudot v8.4s, v17.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa2c // sudot v12.4s, v17.16b, v1.4b[3]\n"
+ "ldr q17, [x10, #0xe0]\n"
+ ".inst 0x4f20fa09 // sudot v9.4s, v16.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa0d // sudot v13.4s, v16.16b, v1.4b[3]\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fa2a // sudot v10.4s, v17.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa2e // sudot v14.4s, v17.16b, v1.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x4f20fa0b // sudot v11.4s, v16.16b, v0.4b[3]\n"
+ "ldr q0, [x26, #0x0]\n"
+ ".inst 0x4f21fa0f // sudot v15.4s, v16.16b, v1.4b[3]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 51b\n"
+ "52:" // Height 2: Multiply loop: Single iteration only
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f00f22a // sudot v10.4s, v17.16b, v0.4b[0]\n"
+ ".inst 0x4f01f22e // sudot v14.4s, v17.16b, v1.4b[0]\n"
+ "ldr q17, [x10, #0x40]\n"
+ ".inst 0x4f00f20b // sudot v11.4s, v16.16b, v0.4b[0]\n"
+ ".inst 0x4f01f20f // sudot v15.4s, v16.16b, v1.4b[0]\n"
+ "ldr q16, [x10, #0x50]\n"
+ ".inst 0x4f20f228 // sudot v8.4s, v17.16b, v0.4b[1]\n"
+ ".inst 0x4f21f22c // sudot v12.4s, v17.16b, v1.4b[1]\n"
+ "ldr q17, [x10, #0x60]\n"
+ ".inst 0x4f20f209 // sudot v9.4s, v16.16b, v0.4b[1]\n"
+ ".inst 0x4f21f20d // sudot v13.4s, v16.16b, v1.4b[1]\n"
+ "ldr q16, [x10, #0x70]\n"
+ ".inst 0x4f20f22a // sudot v10.4s, v17.16b, v0.4b[1]\n"
+ ".inst 0x4f21f22e // sudot v14.4s, v17.16b, v1.4b[1]\n"
+ "ldr q17, [x10, #0x80]\n"
+ ".inst 0x4f20f20b // sudot v11.4s, v16.16b, v0.4b[1]\n"
+ ".inst 0x4f21f20f // sudot v15.4s, v16.16b, v1.4b[1]\n"
+ "ldr q16, [x10, #0x90]\n"
+ ".inst 0x4f00fa28 // sudot v8.4s, v17.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa2c // sudot v12.4s, v17.16b, v1.4b[2]\n"
+ "ldr q17, [x10, #0xa0]\n"
+ ".inst 0x4f00fa09 // sudot v9.4s, v16.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa0d // sudot v13.4s, v16.16b, v1.4b[2]\n"
+ "ldr q16, [x10, #0xb0]\n"
+ ".inst 0x4f00fa2a // sudot v10.4s, v17.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa2e // sudot v14.4s, v17.16b, v1.4b[2]\n"
+ "ldr q17, [x10, #0xc0]\n"
+ ".inst 0x4f00fa0b // sudot v11.4s, v16.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa0f // sudot v15.4s, v16.16b, v1.4b[2]\n"
+ "ldr q16, [x10, #0xd0]\n"
+ ".inst 0x4f20fa28 // sudot v8.4s, v17.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa2c // sudot v12.4s, v17.16b, v1.4b[3]\n"
+ "ldr q17, [x10, #0xe0]\n"
+ ".inst 0x4f20fa09 // sudot v9.4s, v16.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa0d // sudot v13.4s, v16.16b, v1.4b[3]\n"
+ "ldr q16, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fa2a // sudot v10.4s, v17.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa2e // sudot v14.4s, v17.16b, v1.4b[3]\n"
+ ".inst 0x4f20fa0b // sudot v11.4s, v16.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa0f // sudot v15.4s, v16.16b, v1.4b[3]\n"
+ "53:" // Height 2: Multiply loop: Main loop skip
+ "cbz x27, 58f\n"
+ "cmp x27, #0x4\n"
+ "blt 55f\n"
+ "54:" // Height 2: Multiply loop: Odd block loop
+ "ldr s19, [x26], #0x4\n"
+ "ldr s18, [x25], #0x4\n"
+ "sub x27, x27, #0x4\n"
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x4\n"
+ ".inst 0x4f13f228 // sudot v8.4s, v17.16b, v19.4b[0]\n"
+ ".inst 0x4f12f22c // sudot v12.4s, v17.16b, v18.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f13f209 // sudot v9.4s, v16.16b, v19.4b[0]\n"
+ ".inst 0x4f12f20d // sudot v13.4s, v16.16b, v18.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f13f22a // sudot v10.4s, v17.16b, v19.4b[0]\n"
+ ".inst 0x4f12f22e // sudot v14.4s, v17.16b, v18.4b[0]\n"
+ ".inst 0x4f13f20b // sudot v11.4s, v16.16b, v19.4b[0]\n"
+ ".inst 0x4f12f20f // sudot v15.4s, v16.16b, v18.4b[0]\n"
+ "bge 54b\n"
+ "55:" // Height 2: Multiply loop: Skip odd blocks
+ "cbz x27, 58f\n"
+ "tbz x27, #1, 56f\n"
+ "ldr h0, [x26], #0x2\n"
+ "ldr h1, [x25], #0x2\n"
+ "tbz x27, #0, 57f\n"
+ "ld1 { v0.b }[2], [x26]\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "b 57f\n"
+ "56:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "57:" // Height 2: Multiply loop: Ragged operand read: Done
+ "ldr q17, [x10, #0x0]\n"
+ "ldr q16, [x10, #0x10]\n"
+ ".inst 0x4f00f228 // sudot v8.4s, v17.16b, v0.4b[0]\n"
+ ".inst 0x4f01f22c // sudot v12.4s, v17.16b, v1.4b[0]\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4f00f209 // sudot v9.4s, v16.16b, v0.4b[0]\n"
+ ".inst 0x4f01f20d // sudot v13.4s, v16.16b, v1.4b[0]\n"
+ "ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f00f22a // sudot v10.4s, v17.16b, v0.4b[0]\n"
+ ".inst 0x4f01f22e // sudot v14.4s, v17.16b, v1.4b[0]\n"
+ ".inst 0x4f00f20b // sudot v11.4s, v16.16b, v0.4b[0]\n"
+ ".inst 0x4f01f20f // sudot v15.4s, v16.16b, v1.4b[0]\n"
+ "58:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 48b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "bge 67f\n"
+ "tbz x11, #3, 62f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "tbz x11, #2, 60f\n"
+ "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 59f\n"
+ "str d11, [x9], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "tbz x11, #0, 66f\n"
+ "st1 { v11.s }[2], [x9]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "b 66f\n"
+ "59:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 66f\n"
+ "str s11, [x9, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "b 66f\n"
+ "60:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 61f\n"
+ "str d10, [x9], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "tbz x11, #0, 66f\n"
+ "st1 { v10.s }[2], [x9]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "b 66f\n"
+ "61:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 66f\n"
+ "str s10, [x9, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "b 66f\n"
+ "62:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 64f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 63f\n"
+ "str d9, [x9], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "tbz x11, #0, 66f\n"
+ "st1 { v9.s }[2], [x9]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "b 66f\n"
+ "63:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 66f\n"
+ "str s9, [x9, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "b 66f\n"
+ "64:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 65f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "tbz x11, #0, 66f\n"
+ "st1 { v8.s }[2], [x9]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "b 66f\n"
+ "65:" // Height 2: Partial direct writeback: partial_1_0
+ "str s8, [x9, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "66:" // Height 2: Partial direct writeback: Done
+ "b 68f\n"
+ "67:" // Height 2: Full writeback
+ "str q8, [x9, #0x0]\n"
+ "str q9, [x9, #0x10]\n"
+ "str q10, [x9, #0x20]\n"
+ "str q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "68:" // Height 2: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 36b\n"
+ "b 206f\n"
+ "69:" // Height 3
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "70:" // Height 3: Column loop
+ "tbz %x[flags], #0, 80f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "bge 79f\n"
+ "tbz x11, #3, 74f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "tbz x11, #2, 72f\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 71f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x11, #0, 78f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "b 78f\n"
+ "71:" // Height 3: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 78f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "b 78f\n"
+ "72:" // Height 3: Partial accumulate: partial_2_8
+ "tbz x11, #1, 73f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x11, #0, 78f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "b 78f\n"
+ "73:" // Height 3: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 78f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "b 78f\n"
+ "74:" // Height 3: Partial accumulate: partial_4_0
+ "tbz x11, #2, 76f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 75f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x11, #0, 78f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "b 78f\n"
+ "75:" // Height 3: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 78f\n"
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "b 78f\n"
+ "76:" // Height 3: Partial accumulate: partial_2_0
+ "tbz x11, #1, 77f\n"
+ "ldr d8, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "tbz x11, #0, 78f\n"
+ "ld1 { v8.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "b 78f\n"
+ "77:" // Height 3: Partial accumulate: partial_1_0
+ "ldr s8, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "78:" // Height 3: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 81f\n"
+ "79:" // Height 3: full accumulate
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "b 81f\n"
+ "80:" // Height 3: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "81:" // Height 3: setup done
+ "mov x28, #0x0\n"
+ "82:" // Height 3: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 83f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "cbnz x28, 84f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "b 84f\n"
+ "83:" // Height 3: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "84:" // Height 3: input setup done
+ "cmp x27, #0x10\n"
+ "blt 87f\n"
+ "ldr q0, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q2, [x24, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 86f\n"
+ "85:" // Height 3: Multiply loop: Main loop head
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ "ldr q21, [x10, #0x20]\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ "ldr q20, [x10, #0x30]\n"
+ "add x24, x24, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f00f2aa // sudot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f01f2ae // sudot v14.4s, v21.16b, v1.4b[0]\n"
+ ".inst 0x4f02f2b2 // sudot v18.4s, v21.16b, v2.4b[0]\n"
+ "ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f00f28b // sudot v11.4s, v20.16b, v0.4b[0]\n"
+ ".inst 0x4f01f28f // sudot v15.4s, v20.16b, v1.4b[0]\n"
+ ".inst 0x4f02f293 // sudot v19.4s, v20.16b, v2.4b[0]\n"
+ "ldr q20, [x10, #0x50]\n"
+ ".inst 0x4f20f2a8 // sudot v8.4s, v21.16b, v0.4b[1]\n"
+ ".inst 0x4f21f2ac // sudot v12.4s, v21.16b, v1.4b[1]\n"
+ ".inst 0x4f22f2b0 // sudot v16.4s, v21.16b, v2.4b[1]\n"
+ "ldr q21, [x10, #0x60]\n"
+ ".inst 0x4f20f289 // sudot v9.4s, v20.16b, v0.4b[1]\n"
+ ".inst 0x4f21f28d // sudot v13.4s, v20.16b, v1.4b[1]\n"
+ ".inst 0x4f22f291 // sudot v17.4s, v20.16b, v2.4b[1]\n"
+ "ldr q20, [x10, #0x70]\n"
+ ".inst 0x4f20f2aa // sudot v10.4s, v21.16b, v0.4b[1]\n"
+ ".inst 0x4f21f2ae // sudot v14.4s, v21.16b, v1.4b[1]\n"
+ ".inst 0x4f22f2b2 // sudot v18.4s, v21.16b, v2.4b[1]\n"
+ "ldr q21, [x10, #0x80]\n"
+ ".inst 0x4f20f28b // sudot v11.4s, v20.16b, v0.4b[1]\n"
+ ".inst 0x4f21f28f // sudot v15.4s, v20.16b, v1.4b[1]\n"
+ ".inst 0x4f22f293 // sudot v19.4s, v20.16b, v2.4b[1]\n"
+ "ldr q20, [x10, #0x90]\n"
+ ".inst 0x4f00faa8 // sudot v8.4s, v21.16b, v0.4b[2]\n"
+ ".inst 0x4f01faac // sudot v12.4s, v21.16b, v1.4b[2]\n"
+ ".inst 0x4f02fab0 // sudot v16.4s, v21.16b, v2.4b[2]\n"
+ "ldr q21, [x10, #0xa0]\n"
+ ".inst 0x4f00fa89 // sudot v9.4s, v20.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa8d // sudot v13.4s, v20.16b, v1.4b[2]\n"
+ ".inst 0x4f02fa91 // sudot v17.4s, v20.16b, v2.4b[2]\n"
+ "ldr q20, [x10, #0xb0]\n"
+ ".inst 0x4f00faaa // sudot v10.4s, v21.16b, v0.4b[2]\n"
+ ".inst 0x4f01faae // sudot v14.4s, v21.16b, v1.4b[2]\n"
+ ".inst 0x4f02fab2 // sudot v18.4s, v21.16b, v2.4b[2]\n"
+ "ldr q21, [x10, #0xc0]\n"
+ ".inst 0x4f00fa8b // sudot v11.4s, v20.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa8f // sudot v15.4s, v20.16b, v1.4b[2]\n"
+ ".inst 0x4f02fa93 // sudot v19.4s, v20.16b, v2.4b[2]\n"
+ "ldr q20, [x10, #0xd0]\n"
+ ".inst 0x4f20faa8 // sudot v8.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x4f21faac // sudot v12.4s, v21.16b, v1.4b[3]\n"
+ ".inst 0x4f22fab0 // sudot v16.4s, v21.16b, v2.4b[3]\n"
+ "ldr q21, [x10, #0xe0]\n"
+ ".inst 0x4f20fa89 // sudot v9.4s, v20.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa8d // sudot v13.4s, v20.16b, v1.4b[3]\n"
+ ".inst 0x4f22fa91 // sudot v17.4s, v20.16b, v2.4b[3]\n"
+ "ldr q20, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20faaa // sudot v10.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x4f21faae // sudot v14.4s, v21.16b, v1.4b[3]\n"
+ ".inst 0x4f22fab2 // sudot v18.4s, v21.16b, v2.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x4f20fa8b // sudot v11.4s, v20.16b, v0.4b[3]\n"
+ "ldr q0, [x26, #0x0]\n"
+ ".inst 0x4f21fa8f // sudot v15.4s, v20.16b, v1.4b[3]\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x4f22fa93 // sudot v19.4s, v20.16b, v2.4b[3]\n"
+ "ldr q2, [x24, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 85b\n"
+ "86:" // Height 3: Multiply loop: Single iteration only
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ "ldr q21, [x10, #0x20]\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ "ldr q20, [x10, #0x30]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f00f2aa // sudot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f01f2ae // sudot v14.4s, v21.16b, v1.4b[0]\n"
+ ".inst 0x4f02f2b2 // sudot v18.4s, v21.16b, v2.4b[0]\n"
+ "ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f00f28b // sudot v11.4s, v20.16b, v0.4b[0]\n"
+ ".inst 0x4f01f28f // sudot v15.4s, v20.16b, v1.4b[0]\n"
+ ".inst 0x4f02f293 // sudot v19.4s, v20.16b, v2.4b[0]\n"
+ "ldr q20, [x10, #0x50]\n"
+ ".inst 0x4f20f2a8 // sudot v8.4s, v21.16b, v0.4b[1]\n"
+ ".inst 0x4f21f2ac // sudot v12.4s, v21.16b, v1.4b[1]\n"
+ ".inst 0x4f22f2b0 // sudot v16.4s, v21.16b, v2.4b[1]\n"
+ "ldr q21, [x10, #0x60]\n"
+ ".inst 0x4f20f289 // sudot v9.4s, v20.16b, v0.4b[1]\n"
+ ".inst 0x4f21f28d // sudot v13.4s, v20.16b, v1.4b[1]\n"
+ ".inst 0x4f22f291 // sudot v17.4s, v20.16b, v2.4b[1]\n"
+ "ldr q20, [x10, #0x70]\n"
+ ".inst 0x4f20f2aa // sudot v10.4s, v21.16b, v0.4b[1]\n"
+ ".inst 0x4f21f2ae // sudot v14.4s, v21.16b, v1.4b[1]\n"
+ ".inst 0x4f22f2b2 // sudot v18.4s, v21.16b, v2.4b[1]\n"
+ "ldr q21, [x10, #0x80]\n"
+ ".inst 0x4f20f28b // sudot v11.4s, v20.16b, v0.4b[1]\n"
+ ".inst 0x4f21f28f // sudot v15.4s, v20.16b, v1.4b[1]\n"
+ ".inst 0x4f22f293 // sudot v19.4s, v20.16b, v2.4b[1]\n"
+ "ldr q20, [x10, #0x90]\n"
+ ".inst 0x4f00faa8 // sudot v8.4s, v21.16b, v0.4b[2]\n"
+ ".inst 0x4f01faac // sudot v12.4s, v21.16b, v1.4b[2]\n"
+ ".inst 0x4f02fab0 // sudot v16.4s, v21.16b, v2.4b[2]\n"
+ "ldr q21, [x10, #0xa0]\n"
+ ".inst 0x4f00fa89 // sudot v9.4s, v20.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa8d // sudot v13.4s, v20.16b, v1.4b[2]\n"
+ ".inst 0x4f02fa91 // sudot v17.4s, v20.16b, v2.4b[2]\n"
+ "ldr q20, [x10, #0xb0]\n"
+ ".inst 0x4f00faaa // sudot v10.4s, v21.16b, v0.4b[2]\n"
+ ".inst 0x4f01faae // sudot v14.4s, v21.16b, v1.4b[2]\n"
+ ".inst 0x4f02fab2 // sudot v18.4s, v21.16b, v2.4b[2]\n"
+ "ldr q21, [x10, #0xc0]\n"
+ ".inst 0x4f00fa8b // sudot v11.4s, v20.16b, v0.4b[2]\n"
+ ".inst 0x4f01fa8f // sudot v15.4s, v20.16b, v1.4b[2]\n"
+ ".inst 0x4f02fa93 // sudot v19.4s, v20.16b, v2.4b[2]\n"
+ "ldr q20, [x10, #0xd0]\n"
+ ".inst 0x4f20faa8 // sudot v8.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x4f21faac // sudot v12.4s, v21.16b, v1.4b[3]\n"
+ ".inst 0x4f22fab0 // sudot v16.4s, v21.16b, v2.4b[3]\n"
+ "ldr q21, [x10, #0xe0]\n"
+ ".inst 0x4f20fa89 // sudot v9.4s, v20.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa8d // sudot v13.4s, v20.16b, v1.4b[3]\n"
+ ".inst 0x4f22fa91 // sudot v17.4s, v20.16b, v2.4b[3]\n"
+ "ldr q20, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20faaa // sudot v10.4s, v21.16b, v0.4b[3]\n"
+ ".inst 0x4f21faae // sudot v14.4s, v21.16b, v1.4b[3]\n"
+ ".inst 0x4f22fab2 // sudot v18.4s, v21.16b, v2.4b[3]\n"
+ ".inst 0x4f20fa8b // sudot v11.4s, v20.16b, v0.4b[3]\n"
+ ".inst 0x4f21fa8f // sudot v15.4s, v20.16b, v1.4b[3]\n"
+ ".inst 0x4f22fa93 // sudot v19.4s, v20.16b, v2.4b[3]\n"
+ "87:" // Height 3: Multiply loop: Main loop skip
+ "cbz x27, 92f\n"
+ "cmp x27, #0x4\n"
+ "blt 89f\n"
+ "88:" // Height 3: Multiply loop: Odd block loop
+ "ldr s24, [x26], #0x4\n"
+ "ldr s23, [x25], #0x4\n"
+ "sub x27, x27, #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr q21, [x10, #0x0]\n"
+ "cmp x27, #0x4\n"
+ "ldr q20, [x10, #0x10]\n"
+ ".inst 0x4f18f2a8 // sudot v8.4s, v21.16b, v24.4b[0]\n"
+ ".inst 0x4f17f2ac // sudot v12.4s, v21.16b, v23.4b[0]\n"
+ ".inst 0x4f16f2b0 // sudot v16.4s, v21.16b, v22.4b[0]\n"
+ "ldr q21, [x10, #0x20]\n"
+ ".inst 0x4f18f289 // sudot v9.4s, v20.16b, v24.4b[0]\n"
+ ".inst 0x4f17f28d // sudot v13.4s, v20.16b, v23.4b[0]\n"
+ ".inst 0x4f16f291 // sudot v17.4s, v20.16b, v22.4b[0]\n"
+ "ldr q20, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f18f2aa // sudot v10.4s, v21.16b, v24.4b[0]\n"
+ ".inst 0x4f17f2ae // sudot v14.4s, v21.16b, v23.4b[0]\n"
+ ".inst 0x4f16f2b2 // sudot v18.4s, v21.16b, v22.4b[0]\n"
+ ".inst 0x4f18f28b // sudot v11.4s, v20.16b, v24.4b[0]\n"
+ ".inst 0x4f17f28f // sudot v15.4s, v20.16b, v23.4b[0]\n"
+ ".inst 0x4f16f293 // sudot v19.4s, v20.16b, v22.4b[0]\n"
+ "bge 88b\n"
+ "89:" // Height 3: Multiply loop: Skip odd blocks
+ "cbz x27, 92f\n"
+ "tbz x27, #1, 90f\n"
+ "ldr h0, [x26], #0x2\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "tbz x27, #0, 91f\n"
+ "ld1 { v0.b }[2], [x26]\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "b 91f\n"
+ "90:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "91:" // Height 3: Multiply loop: Ragged operand read: Done
+ "ldr q21, [x10, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
+ ".inst 0x4f00f2a8 // sudot v8.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f01f2ac // sudot v12.4s, v21.16b, v1.4b[0]\n"
+ ".inst 0x4f02f2b0 // sudot v16.4s, v21.16b, v2.4b[0]\n"
+ "ldr q21, [x10, #0x20]\n"
+ ".inst 0x4f00f289 // sudot v9.4s, v20.16b, v0.4b[0]\n"
+ ".inst 0x4f01f28d // sudot v13.4s, v20.16b, v1.4b[0]\n"
+ ".inst 0x4f02f291 // sudot v17.4s, v20.16b, v2.4b[0]\n"
+ "ldr q20, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f00f2aa // sudot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x4f01f2ae // sudot v14.4s, v21.16b, v1.4b[0]\n"
+ ".inst 0x4f02f2b2 // sudot v18.4s, v21.16b, v2.4b[0]\n"
+ ".inst 0x4f00f28b // sudot v11.4s, v20.16b, v0.4b[0]\n"
+ ".inst 0x4f01f28f // sudot v15.4s, v20.16b, v1.4b[0]\n"
+ ".inst 0x4f02f293 // sudot v19.4s, v20.16b, v2.4b[0]\n"
+ "92:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 82b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "bge 101f\n"
+ "tbz x11, #3, 96f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "tbz x11, #2, 94f\n"
+ "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 93f\n"
+ "str d11, [x9], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x11, #0, 100f\n"
+ "st1 { v11.s }[2], [x9]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "b 100f\n"
+ "93:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 100f\n"
+ "str s11, [x9, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "b 100f\n"
+ "94:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 95f\n"
+ "str d10, [x9], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x11, #0, 100f\n"
+ "st1 { v10.s }[2], [x9]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "b 100f\n"
+ "95:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 100f\n"
+ "str s10, [x9, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "b 100f\n"
+ "96:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 98f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 97f\n"
+ "str d9, [x9], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x11, #0, 100f\n"
+ "st1 { v9.s }[2], [x9]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "b 100f\n"
+ "97:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 100f\n"
+ "str s9, [x9, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "b 100f\n"
+ "98:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 99f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x11, #0, 100f\n"
+ "st1 { v8.s }[2], [x9]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "b 100f\n"
+ "99:" // Height 3: Partial direct writeback: partial_1_0
+ "str s8, [x9, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "100:" // Height 3: Partial direct writeback: Done
+ "b 102f\n"
+ "101:" // Height 3: Full writeback
+ "str q8, [x9, #0x0]\n"
+ "str q9, [x9, #0x10]\n"
+ "str q10, [x9, #0x20]\n"
+ "str q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "102:" // Height 3: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 70b\n"
+ "b 206f\n"
+ "103:" // Height 4
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "104:" // Height 4: Column loop
+ "tbz %x[flags], #0, 114f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "bge 113f\n"
+ "tbz x11, #3, 108f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "tbz x11, #2, 106f\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 105f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x11, #0, 112f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "b 112f\n"
+ "105:" // Height 4: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 112f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "b 112f\n"
+ "106:" // Height 4: Partial accumulate: partial_2_8
+ "tbz x11, #1, 107f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x11, #0, 112f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "b 112f\n"
+ "107:" // Height 4: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 112f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "b 112f\n"
+ "108:" // Height 4: Partial accumulate: partial_4_0
+ "tbz x11, #2, 110f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 109f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x11, #0, 112f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "b 112f\n"
+ "109:" // Height 4: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 112f\n"
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "b 112f\n"
+ "110:" // Height 4: Partial accumulate: partial_2_0
+ "tbz x11, #1, 111f\n"
+ "ldr d8, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x11, #0, 112f\n"
+ "ld1 { v8.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "b 112f\n"
+ "111:" // Height 4: Partial accumulate: partial_1_0
+ "ldr s8, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "112:" // Height 4: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 115f\n"
+ "113:" // Height 4: full accumulate
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "b 115f\n"
+ "114:" // Height 4: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "115:" // Height 4: setup done
+ "mov x28, #0x0\n"
+ "116:" // Height 4: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 117f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "cbnz x28, 118f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "b 118f\n"
+ "117:" // Height 4: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "118:" // Height 4: input setup done
+ "cmp x27, #0x10\n"
+ "blt 121f\n"
+ "ldr q0, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q2, [x24, #0x0]\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 120f\n"
+ "119:" // Height 4: Multiply loop: Main loop head
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d4 // sudot v20.4s, v6.16b, v3.4b[0]\n"
+ "ldr q25, [x10, #0x20]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f5 // sudot v21.4s, v7.16b, v3.4b[0]\n"
+ "ldr q24, [x10, #0x30]\n"
+ "cmp x27, #0x20\n"
+ ".inst 0x4f00f32a // sudot v10.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x4f01f32e // sudot v14.4s, v25.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f02f332 // sudot v18.4s, v25.16b, v2.4b[0]\n"
+ ".inst 0x4f03f336 // sudot v22.4s, v25.16b, v3.4b[0]\n"
+ "ldr q25, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f00f30b // sudot v11.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x4f01f30f // sudot v15.4s, v24.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4f02f313 // sudot v19.4s, v24.16b, v2.4b[0]\n"
+ ".inst 0x4f03f317 // sudot v23.4s, v24.16b, v3.4b[0]\n"
+ "ldr q24, [x10, #0x50]\n"
+ ".inst 0x4f20f328 // sudot v8.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x4f21f32c // sudot v12.4s, v25.16b, v1.4b[1]\n"
+ ".inst 0x4f22f330 // sudot v16.4s, v25.16b, v2.4b[1]\n"
+ ".inst 0x4f23f334 // sudot v20.4s, v25.16b, v3.4b[1]\n"
+ "ldr q25, [x10, #0x60]\n"
+ ".inst 0x4f20f309 // sudot v9.4s, v24.16b, v0.4b[1]\n"
+ ".inst 0x4f21f30d // sudot v13.4s, v24.16b, v1.4b[1]\n"
+ ".inst 0x4f22f311 // sudot v17.4s, v24.16b, v2.4b[1]\n"
+ ".inst 0x4f23f315 // sudot v21.4s, v24.16b, v3.4b[1]\n"
+ "ldr q24, [x10, #0x70]\n"
+ ".inst 0x4f20f32a // sudot v10.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x4f21f32e // sudot v14.4s, v25.16b, v1.4b[1]\n"
+ ".inst 0x4f22f332 // sudot v18.4s, v25.16b, v2.4b[1]\n"
+ ".inst 0x4f23f336 // sudot v22.4s, v25.16b, v3.4b[1]\n"
+ "ldr q25, [x10, #0x80]\n"
+ ".inst 0x4f20f30b // sudot v11.4s, v24.16b, v0.4b[1]\n"
+ ".inst 0x4f21f30f // sudot v15.4s, v24.16b, v1.4b[1]\n"
+ ".inst 0x4f22f313 // sudot v19.4s, v24.16b, v2.4b[1]\n"
+ ".inst 0x4f23f317 // sudot v23.4s, v24.16b, v3.4b[1]\n"
+ "ldr q24, [x10, #0x90]\n"
+ ".inst 0x4f00fb28 // sudot v8.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb2c // sudot v12.4s, v25.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb30 // sudot v16.4s, v25.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb34 // sudot v20.4s, v25.16b, v3.4b[2]\n"
+ "ldr q25, [x10, #0xa0]\n"
+ ".inst 0x4f00fb09 // sudot v9.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb0d // sudot v13.4s, v24.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb11 // sudot v17.4s, v24.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb15 // sudot v21.4s, v24.16b, v3.4b[2]\n"
+ "ldr q24, [x10, #0xb0]\n"
+ ".inst 0x4f00fb2a // sudot v10.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb2e // sudot v14.4s, v25.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb32 // sudot v18.4s, v25.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb36 // sudot v22.4s, v25.16b, v3.4b[2]\n"
+ "ldr q25, [x10, #0xc0]\n"
+ ".inst 0x4f00fb0b // sudot v11.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb0f // sudot v15.4s, v24.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb13 // sudot v19.4s, v24.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb17 // sudot v23.4s, v24.16b, v3.4b[2]\n"
+ "ldr q24, [x10, #0xd0]\n"
+ ".inst 0x4f20fb28 // sudot v8.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb2c // sudot v12.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb30 // sudot v16.4s, v25.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb34 // sudot v20.4s, v25.16b, v3.4b[3]\n"
+ "ldr q25, [x10, #0xe0]\n"
+ ".inst 0x4f20fb09 // sudot v9.4s, v24.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb0d // sudot v13.4s, v24.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb11 // sudot v17.4s, v24.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb15 // sudot v21.4s, v24.16b, v3.4b[3]\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fb2a // sudot v10.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb2e // sudot v14.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb32 // sudot v18.4s, v25.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb36 // sudot v22.4s, v25.16b, v3.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x4f20fb0b // sudot v11.4s, v24.16b, v0.4b[3]\n"
+ "ldr q0, [x26, #0x0]\n"
+ ".inst 0x4f21fb0f // sudot v15.4s, v24.16b, v1.4b[3]\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x4f22fb13 // sudot v19.4s, v24.16b, v2.4b[3]\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x4f23fb17 // sudot v23.4s, v24.16b, v3.4b[3]\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 119b\n"
+ "120:" // Height 4: Multiply loop: Single iteration only
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d4 // sudot v20.4s, v6.16b, v3.4b[0]\n"
+ "ldr q25, [x10, #0x20]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f5 // sudot v21.4s, v7.16b, v3.4b[0]\n"
+ "ldr q24, [x10, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f00f32a // sudot v10.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x4f01f32e // sudot v14.4s, v25.16b, v1.4b[0]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f02f332 // sudot v18.4s, v25.16b, v2.4b[0]\n"
+ ".inst 0x4f03f336 // sudot v22.4s, v25.16b, v3.4b[0]\n"
+ "ldr q25, [x10, #0x40]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4f00f30b // sudot v11.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x4f01f30f // sudot v15.4s, v24.16b, v1.4b[0]\n"
+ ".inst 0x4f02f313 // sudot v19.4s, v24.16b, v2.4b[0]\n"
+ ".inst 0x4f03f317 // sudot v23.4s, v24.16b, v3.4b[0]\n"
+ "ldr q24, [x10, #0x50]\n"
+ ".inst 0x4f20f328 // sudot v8.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x4f21f32c // sudot v12.4s, v25.16b, v1.4b[1]\n"
+ ".inst 0x4f22f330 // sudot v16.4s, v25.16b, v2.4b[1]\n"
+ ".inst 0x4f23f334 // sudot v20.4s, v25.16b, v3.4b[1]\n"
+ "ldr q25, [x10, #0x60]\n"
+ ".inst 0x4f20f309 // sudot v9.4s, v24.16b, v0.4b[1]\n"
+ ".inst 0x4f21f30d // sudot v13.4s, v24.16b, v1.4b[1]\n"
+ ".inst 0x4f22f311 // sudot v17.4s, v24.16b, v2.4b[1]\n"
+ ".inst 0x4f23f315 // sudot v21.4s, v24.16b, v3.4b[1]\n"
+ "ldr q24, [x10, #0x70]\n"
+ ".inst 0x4f20f32a // sudot v10.4s, v25.16b, v0.4b[1]\n"
+ ".inst 0x4f21f32e // sudot v14.4s, v25.16b, v1.4b[1]\n"
+ ".inst 0x4f22f332 // sudot v18.4s, v25.16b, v2.4b[1]\n"
+ ".inst 0x4f23f336 // sudot v22.4s, v25.16b, v3.4b[1]\n"
+ "ldr q25, [x10, #0x80]\n"
+ ".inst 0x4f20f30b // sudot v11.4s, v24.16b, v0.4b[1]\n"
+ ".inst 0x4f21f30f // sudot v15.4s, v24.16b, v1.4b[1]\n"
+ ".inst 0x4f22f313 // sudot v19.4s, v24.16b, v2.4b[1]\n"
+ ".inst 0x4f23f317 // sudot v23.4s, v24.16b, v3.4b[1]\n"
+ "ldr q24, [x10, #0x90]\n"
+ ".inst 0x4f00fb28 // sudot v8.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb2c // sudot v12.4s, v25.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb30 // sudot v16.4s, v25.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb34 // sudot v20.4s, v25.16b, v3.4b[2]\n"
+ "ldr q25, [x10, #0xa0]\n"
+ ".inst 0x4f00fb09 // sudot v9.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb0d // sudot v13.4s, v24.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb11 // sudot v17.4s, v24.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb15 // sudot v21.4s, v24.16b, v3.4b[2]\n"
+ "ldr q24, [x10, #0xb0]\n"
+ ".inst 0x4f00fb2a // sudot v10.4s, v25.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb2e // sudot v14.4s, v25.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb32 // sudot v18.4s, v25.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb36 // sudot v22.4s, v25.16b, v3.4b[2]\n"
+ "ldr q25, [x10, #0xc0]\n"
+ ".inst 0x4f00fb0b // sudot v11.4s, v24.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb0f // sudot v15.4s, v24.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb13 // sudot v19.4s, v24.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb17 // sudot v23.4s, v24.16b, v3.4b[2]\n"
+ "ldr q24, [x10, #0xd0]\n"
+ ".inst 0x4f20fb28 // sudot v8.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb2c // sudot v12.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb30 // sudot v16.4s, v25.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb34 // sudot v20.4s, v25.16b, v3.4b[3]\n"
+ "ldr q25, [x10, #0xe0]\n"
+ ".inst 0x4f20fb09 // sudot v9.4s, v24.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb0d // sudot v13.4s, v24.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb11 // sudot v17.4s, v24.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb15 // sudot v21.4s, v24.16b, v3.4b[3]\n"
+ "ldr q24, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fb2a // sudot v10.4s, v25.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb2e // sudot v14.4s, v25.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb32 // sudot v18.4s, v25.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb36 // sudot v22.4s, v25.16b, v3.4b[3]\n"
+ ".inst 0x4f20fb0b // sudot v11.4s, v24.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb0f // sudot v15.4s, v24.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb13 // sudot v19.4s, v24.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb17 // sudot v23.4s, v24.16b, v3.4b[3]\n"
+ "121:" // Height 4: Multiply loop: Main loop skip
+ "cbz x27, 126f\n"
+ "cmp x27, #0x4\n"
+ "blt 123f\n"
+ "122:" // Height 4: Multiply loop: Odd block loop
+ "ldr s29, [x26], #0x4\n"
+ "ldr s28, [x25], #0x4\n"
+ "sub x27, x27, #0x4\n"
+ "ldr s27, [x24], #0x4\n"
+ "ldr s26, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
+ "ldr q25, [x10, #0x0]\n"
+ "ldr q24, [x10, #0x10]\n"
+ ".inst 0x4f1df328 // sudot v8.4s, v25.16b, v29.4b[0]\n"
+ ".inst 0x4f1cf32c // sudot v12.4s, v25.16b, v28.4b[0]\n"
+ ".inst 0x4f1bf330 // sudot v16.4s, v25.16b, v27.4b[0]\n"
+ ".inst 0x4f1af334 // sudot v20.4s, v25.16b, v26.4b[0]\n"
+ "ldr q25, [x10, #0x20]\n"
+ ".inst 0x4f1df309 // sudot v9.4s, v24.16b, v29.4b[0]\n"
+ ".inst 0x4f1cf30d // sudot v13.4s, v24.16b, v28.4b[0]\n"
+ ".inst 0x4f1bf311 // sudot v17.4s, v24.16b, v27.4b[0]\n"
+ ".inst 0x4f1af315 // sudot v21.4s, v24.16b, v26.4b[0]\n"
+ "ldr q24, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f1df32a // sudot v10.4s, v25.16b, v29.4b[0]\n"
+ ".inst 0x4f1cf32e // sudot v14.4s, v25.16b, v28.4b[0]\n"
+ ".inst 0x4f1bf332 // sudot v18.4s, v25.16b, v27.4b[0]\n"
+ ".inst 0x4f1af336 // sudot v22.4s, v25.16b, v26.4b[0]\n"
+ ".inst 0x4f1df30b // sudot v11.4s, v24.16b, v29.4b[0]\n"
+ ".inst 0x4f1cf30f // sudot v15.4s, v24.16b, v28.4b[0]\n"
+ ".inst 0x4f1bf313 // sudot v19.4s, v24.16b, v27.4b[0]\n"
+ ".inst 0x4f1af317 // sudot v23.4s, v24.16b, v26.4b[0]\n"
+ "bge 122b\n"
+ "123:" // Height 4: Multiply loop: Skip odd blocks
+ "cbz x27, 126f\n"
+ "tbz x27, #1, 124f\n"
+ "ldr h0, [x26], #0x2\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "tbz x27, #0, 125f\n"
+ "ld1 { v0.b }[2], [x26]\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "b 125f\n"
+ "124:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "125:" // Height 4: Multiply loop: Ragged operand read: Done
+ "ldr q25, [x10, #0x0]\n"
+ "ldr q24, [x10, #0x10]\n"
+ ".inst 0x4f00f328 // sudot v8.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x4f01f32c // sudot v12.4s, v25.16b, v1.4b[0]\n"
+ ".inst 0x4f02f330 // sudot v16.4s, v25.16b, v2.4b[0]\n"
+ ".inst 0x4f03f334 // sudot v20.4s, v25.16b, v3.4b[0]\n"
+ "ldr q25, [x10, #0x20]\n"
+ ".inst 0x4f00f309 // sudot v9.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x4f01f30d // sudot v13.4s, v24.16b, v1.4b[0]\n"
+ ".inst 0x4f02f311 // sudot v17.4s, v24.16b, v2.4b[0]\n"
+ ".inst 0x4f03f315 // sudot v21.4s, v24.16b, v3.4b[0]\n"
+ "ldr q24, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f00f32a // sudot v10.4s, v25.16b, v0.4b[0]\n"
+ ".inst 0x4f01f32e // sudot v14.4s, v25.16b, v1.4b[0]\n"
+ ".inst 0x4f02f332 // sudot v18.4s, v25.16b, v2.4b[0]\n"
+ ".inst 0x4f03f336 // sudot v22.4s, v25.16b, v3.4b[0]\n"
+ ".inst 0x4f00f30b // sudot v11.4s, v24.16b, v0.4b[0]\n"
+ ".inst 0x4f01f30f // sudot v15.4s, v24.16b, v1.4b[0]\n"
+ ".inst 0x4f02f313 // sudot v19.4s, v24.16b, v2.4b[0]\n"
+ ".inst 0x4f03f317 // sudot v23.4s, v24.16b, v3.4b[0]\n"
+ "126:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 116b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "bge 135f\n"
+ "tbz x11, #3, 130f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "tbz x11, #2, 128f\n"
+ "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 127f\n"
+ "str d11, [x9], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "tbz x11, #0, 134f\n"
+ "st1 { v11.s }[2], [x9]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "b 134f\n"
+ "127:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 134f\n"
+ "str s11, [x9, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "b 134f\n"
+ "128:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 129f\n"
+ "str d10, [x9], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "tbz x11, #0, 134f\n"
+ "st1 { v10.s }[2], [x9]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "b 134f\n"
+ "129:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 134f\n"
+ "str s10, [x9, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "b 134f\n"
+ "130:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 132f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 131f\n"
+ "str d9, [x9], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "tbz x11, #0, 134f\n"
+ "st1 { v9.s }[2], [x9]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "b 134f\n"
+ "131:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 134f\n"
+ "str s9, [x9, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "b 134f\n"
+ "132:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 133f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "tbz x11, #0, 134f\n"
+ "st1 { v8.s }[2], [x9]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "b 134f\n"
+ "133:" // Height 4: Partial direct writeback: partial_1_0
+ "str s8, [x9, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "134:" // Height 4: Partial direct writeback: Done
+ "b 136f\n"
+ "135:" // Height 4: Full writeback
+ "str q8, [x9, #0x0]\n"
+ "str q9, [x9, #0x10]\n"
+ "str q10, [x9, #0x20]\n"
+ "str q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "136:" // Height 4: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 104b\n"
+ "b 206f\n"
+ "137:" // Height 5
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "138:" // Height 5: Column loop
+ "tbz %x[flags], #0, 148f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "bge 147f\n"
+ "tbz x11, #3, 142f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x11, #2, 140f\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 139f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "b 146f\n"
+ "139:" // Height 5: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 146f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "b 146f\n"
+ "140:" // Height 5: Partial accumulate: partial_2_8
+ "tbz x11, #1, 141f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "b 146f\n"
+ "141:" // Height 5: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 146f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "b 146f\n"
+ "142:" // Height 5: Partial accumulate: partial_4_0
+ "tbz x11, #2, 144f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 143f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "b 146f\n"
+ "143:" // Height 5: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 146f\n"
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "b 146f\n"
+ "144:" // Height 5: Partial accumulate: partial_2_0
+ "tbz x11, #1, 145f\n"
+ "ldr d8, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "ld1 { v8.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "b 146f\n"
+ "145:" // Height 5: Partial accumulate: partial_1_0
+ "ldr s8, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "146:" // Height 5: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 149f\n"
+ "147:" // Height 5: full accumulate
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "b 149f\n"
+ "148:" // Height 5: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "149:" // Height 5: setup done
+ "mov x28, #0x0\n"
+ "150:" // Height 5: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 151f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "cbnz x28, 152f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "b 152f\n"
+ "151:" // Height 5: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "152:" // Height 5: input setup done
+ "cmp x27, #0x10\n"
+ "blt 155f\n"
+ "ldr q0, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q2, [x24, #0x0]\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 154f\n"
+ "153:" // Height 5: Multiply loop: Main loop head
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d4 // sudot v20.4s, v6.16b, v3.4b[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f04f0d8 // sudot v24.4s, v6.16b, v4.4b[0]\n"
+ "ldr q29, [x10, #0x20]\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ "add x22, x22, #0x10\n"
+ "cmp x27, #0x20\n"
+ ".inst 0x4f03f0f5 // sudot v21.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0f9 // sudot v25.4s, v7.16b, v4.4b[0]\n"
+ "ldr q28, [x10, #0x30]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f00f3aa // sudot v10.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3ae // sudot v14.4s, v29.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f02f3b2 // sudot v18.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f03f3b6 // sudot v22.4s, v29.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4f04f3ba // sudot v26.4s, v29.16b, v4.4b[0]\n"
+ "ldr q29, [x10, #0x40]\n"
+ ".inst 0x4f00f38b // sudot v11.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f01f38f // sudot v15.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f02f393 // sudot v19.4s, v28.16b, v2.4b[0]\n"
+ ".inst 0x4f03f397 // sudot v23.4s, v28.16b, v3.4b[0]\n"
+ ".inst 0x4f04f39b // sudot v27.4s, v28.16b, v4.4b[0]\n"
+ "ldr q28, [x10, #0x50]\n"
+ ".inst 0x4f20f3a8 // sudot v8.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x4f21f3ac // sudot v12.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x4f22f3b0 // sudot v16.4s, v29.16b, v2.4b[1]\n"
+ ".inst 0x4f23f3b4 // sudot v20.4s, v29.16b, v3.4b[1]\n"
+ ".inst 0x4f24f3b8 // sudot v24.4s, v29.16b, v4.4b[1]\n"
+ "ldr q29, [x10, #0x60]\n"
+ ".inst 0x4f20f389 // sudot v9.4s, v28.16b, v0.4b[1]\n"
+ ".inst 0x4f21f38d // sudot v13.4s, v28.16b, v1.4b[1]\n"
+ ".inst 0x4f22f391 // sudot v17.4s, v28.16b, v2.4b[1]\n"
+ ".inst 0x4f23f395 // sudot v21.4s, v28.16b, v3.4b[1]\n"
+ ".inst 0x4f24f399 // sudot v25.4s, v28.16b, v4.4b[1]\n"
+ "ldr q28, [x10, #0x70]\n"
+ ".inst 0x4f20f3aa // sudot v10.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x4f21f3ae // sudot v14.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x4f22f3b2 // sudot v18.4s, v29.16b, v2.4b[1]\n"
+ ".inst 0x4f23f3b6 // sudot v22.4s, v29.16b, v3.4b[1]\n"
+ ".inst 0x4f24f3ba // sudot v26.4s, v29.16b, v4.4b[1]\n"
+ "ldr q29, [x10, #0x80]\n"
+ ".inst 0x4f20f38b // sudot v11.4s, v28.16b, v0.4b[1]\n"
+ ".inst 0x4f21f38f // sudot v15.4s, v28.16b, v1.4b[1]\n"
+ ".inst 0x4f22f393 // sudot v19.4s, v28.16b, v2.4b[1]\n"
+ ".inst 0x4f23f397 // sudot v23.4s, v28.16b, v3.4b[1]\n"
+ ".inst 0x4f24f39b // sudot v27.4s, v28.16b, v4.4b[1]\n"
+ "ldr q28, [x10, #0x90]\n"
+ ".inst 0x4f00fba8 // sudot v8.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbac // sudot v12.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x4f02fbb0 // sudot v16.4s, v29.16b, v2.4b[2]\n"
+ ".inst 0x4f03fbb4 // sudot v20.4s, v29.16b, v3.4b[2]\n"
+ ".inst 0x4f04fbb8 // sudot v24.4s, v29.16b, v4.4b[2]\n"
+ "ldr q29, [x10, #0xa0]\n"
+ ".inst 0x4f00fb89 // sudot v9.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb8d // sudot v13.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb91 // sudot v17.4s, v28.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb95 // sudot v21.4s, v28.16b, v3.4b[2]\n"
+ ".inst 0x4f04fb99 // sudot v25.4s, v28.16b, v4.4b[2]\n"
+ "ldr q28, [x10, #0xb0]\n"
+ ".inst 0x4f00fbaa // sudot v10.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbae // sudot v14.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x4f02fbb2 // sudot v18.4s, v29.16b, v2.4b[2]\n"
+ ".inst 0x4f03fbb6 // sudot v22.4s, v29.16b, v3.4b[2]\n"
+ ".inst 0x4f04fbba // sudot v26.4s, v29.16b, v4.4b[2]\n"
+ "ldr q29, [x10, #0xc0]\n"
+ ".inst 0x4f00fb8b // sudot v11.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb8f // sudot v15.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb93 // sudot v19.4s, v28.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb97 // sudot v23.4s, v28.16b, v3.4b[2]\n"
+ ".inst 0x4f04fb9b // sudot v27.4s, v28.16b, v4.4b[2]\n"
+ "ldr q28, [x10, #0xd0]\n"
+ ".inst 0x4f20fba8 // sudot v8.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbac // sudot v12.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbb0 // sudot v16.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x4f23fbb4 // sudot v20.4s, v29.16b, v3.4b[3]\n"
+ ".inst 0x4f24fbb8 // sudot v24.4s, v29.16b, v4.4b[3]\n"
+ "ldr q29, [x10, #0xe0]\n"
+ ".inst 0x4f20fb89 // sudot v9.4s, v28.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb8d // sudot v13.4s, v28.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb91 // sudot v17.4s, v28.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb95 // sudot v21.4s, v28.16b, v3.4b[3]\n"
+ ".inst 0x4f24fb99 // sudot v25.4s, v28.16b, v4.4b[3]\n"
+ "ldr q28, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fbaa // sudot v10.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbae // sudot v14.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbb2 // sudot v18.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x4f23fbb6 // sudot v22.4s, v29.16b, v3.4b[3]\n"
+ ".inst 0x4f24fbba // sudot v26.4s, v29.16b, v4.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x4f20fb8b // sudot v11.4s, v28.16b, v0.4b[3]\n"
+ "ldr q0, [x26, #0x0]\n"
+ ".inst 0x4f21fb8f // sudot v15.4s, v28.16b, v1.4b[3]\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x4f22fb93 // sudot v19.4s, v28.16b, v2.4b[3]\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x4f23fb97 // sudot v23.4s, v28.16b, v3.4b[3]\n"
+ "ldr q3, [x23, #0x0]\n"
+ ".inst 0x4f24fb9b // sudot v27.4s, v28.16b, v4.4b[3]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 153b\n"
+ "154:" // Height 5: Multiply loop: Single iteration only
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d4 // sudot v20.4s, v6.16b, v3.4b[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f04f0d8 // sudot v24.4s, v6.16b, v4.4b[0]\n"
+ "ldr q29, [x10, #0x20]\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f03f0f5 // sudot v21.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0f9 // sudot v25.4s, v7.16b, v4.4b[0]\n"
+ "ldr q28, [x10, #0x30]\n"
+ "sub x27, x27, #0x10\n"
+ ".inst 0x4f00f3aa // sudot v10.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3ae // sudot v14.4s, v29.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4f02f3b2 // sudot v18.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f03f3b6 // sudot v22.4s, v29.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4f04f3ba // sudot v26.4s, v29.16b, v4.4b[0]\n"
+ "ldr q29, [x10, #0x40]\n"
+ ".inst 0x4f00f38b // sudot v11.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f01f38f // sudot v15.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f02f393 // sudot v19.4s, v28.16b, v2.4b[0]\n"
+ ".inst 0x4f03f397 // sudot v23.4s, v28.16b, v3.4b[0]\n"
+ ".inst 0x4f04f39b // sudot v27.4s, v28.16b, v4.4b[0]\n"
+ "ldr q28, [x10, #0x50]\n"
+ ".inst 0x4f20f3a8 // sudot v8.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x4f21f3ac // sudot v12.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x4f22f3b0 // sudot v16.4s, v29.16b, v2.4b[1]\n"
+ ".inst 0x4f23f3b4 // sudot v20.4s, v29.16b, v3.4b[1]\n"
+ ".inst 0x4f24f3b8 // sudot v24.4s, v29.16b, v4.4b[1]\n"
+ "ldr q29, [x10, #0x60]\n"
+ ".inst 0x4f20f389 // sudot v9.4s, v28.16b, v0.4b[1]\n"
+ ".inst 0x4f21f38d // sudot v13.4s, v28.16b, v1.4b[1]\n"
+ ".inst 0x4f22f391 // sudot v17.4s, v28.16b, v2.4b[1]\n"
+ ".inst 0x4f23f395 // sudot v21.4s, v28.16b, v3.4b[1]\n"
+ ".inst 0x4f24f399 // sudot v25.4s, v28.16b, v4.4b[1]\n"
+ "ldr q28, [x10, #0x70]\n"
+ ".inst 0x4f20f3aa // sudot v10.4s, v29.16b, v0.4b[1]\n"
+ ".inst 0x4f21f3ae // sudot v14.4s, v29.16b, v1.4b[1]\n"
+ ".inst 0x4f22f3b2 // sudot v18.4s, v29.16b, v2.4b[1]\n"
+ ".inst 0x4f23f3b6 // sudot v22.4s, v29.16b, v3.4b[1]\n"
+ ".inst 0x4f24f3ba // sudot v26.4s, v29.16b, v4.4b[1]\n"
+ "ldr q29, [x10, #0x80]\n"
+ ".inst 0x4f20f38b // sudot v11.4s, v28.16b, v0.4b[1]\n"
+ ".inst 0x4f21f38f // sudot v15.4s, v28.16b, v1.4b[1]\n"
+ ".inst 0x4f22f393 // sudot v19.4s, v28.16b, v2.4b[1]\n"
+ ".inst 0x4f23f397 // sudot v23.4s, v28.16b, v3.4b[1]\n"
+ ".inst 0x4f24f39b // sudot v27.4s, v28.16b, v4.4b[1]\n"
+ "ldr q28, [x10, #0x90]\n"
+ ".inst 0x4f00fba8 // sudot v8.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbac // sudot v12.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x4f02fbb0 // sudot v16.4s, v29.16b, v2.4b[2]\n"
+ ".inst 0x4f03fbb4 // sudot v20.4s, v29.16b, v3.4b[2]\n"
+ ".inst 0x4f04fbb8 // sudot v24.4s, v29.16b, v4.4b[2]\n"
+ "ldr q29, [x10, #0xa0]\n"
+ ".inst 0x4f00fb89 // sudot v9.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb8d // sudot v13.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb91 // sudot v17.4s, v28.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb95 // sudot v21.4s, v28.16b, v3.4b[2]\n"
+ ".inst 0x4f04fb99 // sudot v25.4s, v28.16b, v4.4b[2]\n"
+ "ldr q28, [x10, #0xb0]\n"
+ ".inst 0x4f00fbaa // sudot v10.4s, v29.16b, v0.4b[2]\n"
+ ".inst 0x4f01fbae // sudot v14.4s, v29.16b, v1.4b[2]\n"
+ ".inst 0x4f02fbb2 // sudot v18.4s, v29.16b, v2.4b[2]\n"
+ ".inst 0x4f03fbb6 // sudot v22.4s, v29.16b, v3.4b[2]\n"
+ ".inst 0x4f04fbba // sudot v26.4s, v29.16b, v4.4b[2]\n"
+ "ldr q29, [x10, #0xc0]\n"
+ ".inst 0x4f00fb8b // sudot v11.4s, v28.16b, v0.4b[2]\n"
+ ".inst 0x4f01fb8f // sudot v15.4s, v28.16b, v1.4b[2]\n"
+ ".inst 0x4f02fb93 // sudot v19.4s, v28.16b, v2.4b[2]\n"
+ ".inst 0x4f03fb97 // sudot v23.4s, v28.16b, v3.4b[2]\n"
+ ".inst 0x4f04fb9b // sudot v27.4s, v28.16b, v4.4b[2]\n"
+ "ldr q28, [x10, #0xd0]\n"
+ ".inst 0x4f20fba8 // sudot v8.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbac // sudot v12.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbb0 // sudot v16.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x4f23fbb4 // sudot v20.4s, v29.16b, v3.4b[3]\n"
+ ".inst 0x4f24fbb8 // sudot v24.4s, v29.16b, v4.4b[3]\n"
+ "ldr q29, [x10, #0xe0]\n"
+ ".inst 0x4f20fb89 // sudot v9.4s, v28.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb8d // sudot v13.4s, v28.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb91 // sudot v17.4s, v28.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb95 // sudot v21.4s, v28.16b, v3.4b[3]\n"
+ ".inst 0x4f24fb99 // sudot v25.4s, v28.16b, v4.4b[3]\n"
+ "ldr q28, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20fbaa // sudot v10.4s, v29.16b, v0.4b[3]\n"
+ ".inst 0x4f21fbae // sudot v14.4s, v29.16b, v1.4b[3]\n"
+ ".inst 0x4f22fbb2 // sudot v18.4s, v29.16b, v2.4b[3]\n"
+ ".inst 0x4f23fbb6 // sudot v22.4s, v29.16b, v3.4b[3]\n"
+ ".inst 0x4f24fbba // sudot v26.4s, v29.16b, v4.4b[3]\n"
+ ".inst 0x4f20fb8b // sudot v11.4s, v28.16b, v0.4b[3]\n"
+ ".inst 0x4f21fb8f // sudot v15.4s, v28.16b, v1.4b[3]\n"
+ ".inst 0x4f22fb93 // sudot v19.4s, v28.16b, v2.4b[3]\n"
+ ".inst 0x4f23fb97 // sudot v23.4s, v28.16b, v3.4b[3]\n"
+ ".inst 0x4f24fb9b // sudot v27.4s, v28.16b, v4.4b[3]\n"
+ "155:" // Height 5: Multiply loop: Main loop skip
+ "cbz x27, 160f\n"
+ "cmp x27, #0x4\n"
+ "blt 157f\n"
+ "156:" // Height 5: Multiply loop: Odd block loop
+ "ldr s2, [x26], #0x4\n"
+ "ldr s1, [x25], #0x4\n"
+ "sub x27, x27, #0x4\n"
+ "ldr s0, [x24], #0x4\n"
+ "ldr s31, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
+ "ldr s30, [x22], #0x4\n"
+ "ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
+ ".inst 0x4f02f3a8 // sudot v8.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f01f3ac // sudot v12.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x4f00f3b0 // sudot v16.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f1ff3b4 // sudot v20.4s, v29.16b, v31.4b[0]\n"
+ ".inst 0x4f1ef3b8 // sudot v24.4s, v29.16b, v30.4b[0]\n"
+ "ldr q29, [x10, #0x20]\n"
+ ".inst 0x4f02f389 // sudot v9.4s, v28.16b, v2.4b[0]\n"
+ ".inst 0x4f01f38d // sudot v13.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f00f391 // sudot v17.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f1ff395 // sudot v21.4s, v28.16b, v31.4b[0]\n"
+ ".inst 0x4f1ef399 // sudot v25.4s, v28.16b, v30.4b[0]\n"
+ "ldr q28, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f02f3aa // sudot v10.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f01f3ae // sudot v14.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x4f00f3b2 // sudot v18.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f1ff3b6 // sudot v22.4s, v29.16b, v31.4b[0]\n"
+ ".inst 0x4f1ef3ba // sudot v26.4s, v29.16b, v30.4b[0]\n"
+ ".inst 0x4f02f38b // sudot v11.4s, v28.16b, v2.4b[0]\n"
+ ".inst 0x4f01f38f // sudot v15.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f00f393 // sudot v19.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f1ff397 // sudot v23.4s, v28.16b, v31.4b[0]\n"
+ ".inst 0x4f1ef39b // sudot v27.4s, v28.16b, v30.4b[0]\n"
+ "bge 156b\n"
+ "157:" // Height 5: Multiply loop: Skip odd blocks
+ "cbz x27, 160f\n"
+ "tbz x27, #1, 158f\n"
+ "ldr h0, [x26], #0x2\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "tbz x27, #0, 159f\n"
+ "ld1 { v0.b }[2], [x26]\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "b 159f\n"
+ "158:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "159:" // Height 5: Multiply loop: Ragged operand read: Done
+ "ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
+ ".inst 0x4f00f3a8 // sudot v8.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3ac // sudot v12.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3b0 // sudot v16.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f03f3b4 // sudot v20.4s, v29.16b, v3.4b[0]\n"
+ ".inst 0x4f04f3b8 // sudot v24.4s, v29.16b, v4.4b[0]\n"
+ "ldr q29, [x10, #0x20]\n"
+ ".inst 0x4f00f389 // sudot v9.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f01f38d // sudot v13.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f02f391 // sudot v17.4s, v28.16b, v2.4b[0]\n"
+ ".inst 0x4f03f395 // sudot v21.4s, v28.16b, v3.4b[0]\n"
+ ".inst 0x4f04f399 // sudot v25.4s, v28.16b, v4.4b[0]\n"
+ "ldr q28, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f00f3aa // sudot v10.4s, v29.16b, v0.4b[0]\n"
+ ".inst 0x4f01f3ae // sudot v14.4s, v29.16b, v1.4b[0]\n"
+ ".inst 0x4f02f3b2 // sudot v18.4s, v29.16b, v2.4b[0]\n"
+ ".inst 0x4f03f3b6 // sudot v22.4s, v29.16b, v3.4b[0]\n"
+ ".inst 0x4f04f3ba // sudot v26.4s, v29.16b, v4.4b[0]\n"
+ ".inst 0x4f00f38b // sudot v11.4s, v28.16b, v0.4b[0]\n"
+ ".inst 0x4f01f38f // sudot v15.4s, v28.16b, v1.4b[0]\n"
+ ".inst 0x4f02f393 // sudot v19.4s, v28.16b, v2.4b[0]\n"
+ ".inst 0x4f03f397 // sudot v23.4s, v28.16b, v3.4b[0]\n"
+ ".inst 0x4f04f39b // sudot v27.4s, v28.16b, v4.4b[0]\n"
+ "160:" // Height 5: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 150b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "bge 169f\n"
+ "tbz x11, #3, 164f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "tbz x11, #2, 162f\n"
+ "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 161f\n"
+ "str d11, [x9], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x11, #0, 168f\n"
+ "st1 { v11.s }[2], [x9]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "b 168f\n"
+ "161:" // Height 5: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 168f\n"
+ "str s11, [x9, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "b 168f\n"
+ "162:" // Height 5: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 163f\n"
+ "str d10, [x9], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x11, #0, 168f\n"
+ "st1 { v10.s }[2], [x9]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "b 168f\n"
+ "163:" // Height 5: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 168f\n"
+ "str s10, [x9, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "b 168f\n"
+ "164:" // Height 5: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 166f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 165f\n"
+ "str d9, [x9], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x11, #0, 168f\n"
+ "st1 { v9.s }[2], [x9]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "b 168f\n"
+ "165:" // Height 5: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 168f\n"
+ "str s9, [x9, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "b 168f\n"
+ "166:" // Height 5: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 167f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x11, #0, 168f\n"
+ "st1 { v8.s }[2], [x9]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "b 168f\n"
+ "167:" // Height 5: Partial direct writeback: partial_1_0
+ "str s8, [x9, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "168:" // Height 5: Partial direct writeback: Done
+ "b 170f\n"
+ "169:" // Height 5: Full writeback
+ "str q8, [x9, #0x0]\n"
+ "str q9, [x9, #0x10]\n"
+ "str q10, [x9, #0x20]\n"
+ "str q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "170:" // Height 5: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 138b\n"
+ "b 206f\n"
+ "171:" // Height 6
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "172:" // Height 6: Column loop
+ "tbz %x[flags], #0, 182f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "bge 181f\n"
+ "tbz x11, #3, 176f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x11, #2, 174f\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 173f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x11, #0, 180f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
+ "b 180f\n"
+ "173:" // Height 6: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 180f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
+ "b 180f\n"
+ "174:" // Height 6: Partial accumulate: partial_2_8
+ "tbz x11, #1, 175f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x11, #0, 180f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
+ "b 180f\n"
+ "175:" // Height 6: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 180f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
+ "b 180f\n"
+ "176:" // Height 6: Partial accumulate: partial_4_0
+ "tbz x11, #2, 178f\n"
+ "ld1 { v8.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v16.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v24.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 177f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x11, #0, 180f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
+ "b 180f\n"
+ "177:" // Height 6: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 180f\n"
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
+ "b 180f\n"
+ "178:" // Height 6: Partial accumulate: partial_2_0
+ "tbz x11, #1, 179f\n"
+ "ldr d8, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d16, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d24, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x11, #0, 180f\n"
+ "ld1 { v8.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v16.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v24.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
+ "b 180f\n"
+ "179:" // Height 6: Partial accumulate: partial_1_0
+ "ldr s8, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s16, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s24, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
+ "180:" // Height 6: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 183f\n"
+ "181:" // Height 6: full accumulate
+ "ldr q8, [x9, #0x0]\n"
+ "ldr q9, [x9, #0x10]\n"
+ "ldr q10, [x9, #0x20]\n"
+ "ldr q11, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q16, [x23, #0x0]\n"
+ "ldr q17, [x23, #0x10]\n"
+ "ldr q18, [x23, #0x20]\n"
+ "ldr q19, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q24, [x21, #0x0]\n"
+ "ldr q25, [x21, #0x10]\n"
+ "ldr q26, [x21, #0x20]\n"
+ "ldr q27, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
+ "b 183f\n"
+ "182:" // Height 6: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ "183:" // Height 6: setup done
+ "mov x28, #0x0\n"
+ "184:" // Height 6: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 185f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x21, [x20, #0x28]\n"
+ "cbnz x28, 186f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "add x21, x21, x20\n"
+ "b 186f\n"
+ "185:" // Height 6: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
+ "186:" // Height 6: input setup done
+ "cmp x27, #0x10\n"
+ "blt 189f\n"
+ "ldr q0, [x26, #0x0]\n"
+ "ldr q1, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q2, [x24, #0x0]\n"
+ "ldr q3, [x23, #0x0]\n"
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q6, [x10, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "blt 188f\n"
+ "187:" // Height 6: Multiply loop: Main loop head
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d4 // sudot v20.4s, v6.16b, v3.4b[0]\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x4f04f0d8 // sudot v24.4s, v6.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0dc // sudot v28.4s, v6.16b, v5.4b[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f5 // sudot v21.4s, v7.16b, v3.4b[0]\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f04f0f9 // sudot v25.4s, v7.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0fd // sudot v29.4s, v7.16b, v5.4b[0]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4f00f0ca // sudot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ce // sudot v14.4s, v6.16b, v1.4b[0]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4f02f0d2 // sudot v18.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d6 // sudot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ ".inst 0x4f04f0da // sudot v26.4s, v6.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0de // sudot v30.4s, v6.16b, v5.4b[0]\n"
+ "ldr q6, [x10, #0x40]\n"
+ ".inst 0x4f00f0eb // sudot v11.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ef // sudot v15.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f3 // sudot v19.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f7 // sudot v23.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0fb // sudot v27.4s, v7.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0ff // sudot v31.4s, v7.16b, v5.4b[0]\n"
+ "ldr q7, [x10, #0x50]\n"
+ ".inst 0x4f20f0c8 // sudot v8.4s, v6.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0cc // sudot v12.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0d0 // sudot v16.4s, v6.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0d4 // sudot v20.4s, v6.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0d8 // sudot v24.4s, v6.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0dc // sudot v28.4s, v6.16b, v5.4b[1]\n"
+ "ldr q6, [x10, #0x60]\n"
+ ".inst 0x4f20f0e9 // sudot v9.4s, v7.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0ed // sudot v13.4s, v7.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0f1 // sudot v17.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0f5 // sudot v21.4s, v7.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0f9 // sudot v25.4s, v7.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0fd // sudot v29.4s, v7.16b, v5.4b[1]\n"
+ "ldr q7, [x10, #0x70]\n"
+ ".inst 0x4f20f0ca // sudot v10.4s, v6.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0ce // sudot v14.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0d2 // sudot v18.4s, v6.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0d6 // sudot v22.4s, v6.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0da // sudot v26.4s, v6.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0de // sudot v30.4s, v6.16b, v5.4b[1]\n"
+ "ldr q6, [x10, #0x80]\n"
+ ".inst 0x4f20f0eb // sudot v11.4s, v7.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0ef // sudot v15.4s, v7.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0f3 // sudot v19.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0f7 // sudot v23.4s, v7.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0fb // sudot v27.4s, v7.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0ff // sudot v31.4s, v7.16b, v5.4b[1]\n"
+ "ldr q7, [x10, #0x90]\n"
+ ".inst 0x4f00f8c8 // sudot v8.4s, v6.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8cc // sudot v12.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8d0 // sudot v16.4s, v6.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8d4 // sudot v20.4s, v6.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8d8 // sudot v24.4s, v6.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8dc // sudot v28.4s, v6.16b, v5.4b[2]\n"
+ "ldr q6, [x10, #0xa0]\n"
+ ".inst 0x4f00f8e9 // sudot v9.4s, v7.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8ed // sudot v13.4s, v7.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8f1 // sudot v17.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8f5 // sudot v21.4s, v7.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8f9 // sudot v25.4s, v7.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8fd // sudot v29.4s, v7.16b, v5.4b[2]\n"
+ "ldr q7, [x10, #0xb0]\n"
+ ".inst 0x4f00f8ca // sudot v10.4s, v6.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8ce // sudot v14.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8d2 // sudot v18.4s, v6.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8d6 // sudot v22.4s, v6.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8da // sudot v26.4s, v6.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8de // sudot v30.4s, v6.16b, v5.4b[2]\n"
+ "ldr q6, [x10, #0xc0]\n"
+ ".inst 0x4f00f8eb // sudot v11.4s, v7.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8ef // sudot v15.4s, v7.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8f3 // sudot v19.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8f7 // sudot v23.4s, v7.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8fb // sudot v27.4s, v7.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8ff // sudot v31.4s, v7.16b, v5.4b[2]\n"
+ "ldr q7, [x10, #0xd0]\n"
+ ".inst 0x4f20f8c8 // sudot v8.4s, v6.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8cc // sudot v12.4s, v6.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8d0 // sudot v16.4s, v6.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8d4 // sudot v20.4s, v6.16b, v3.4b[3]\n"
+ ".inst 0x4f24f8d8 // sudot v24.4s, v6.16b, v4.4b[3]\n"
+ ".inst 0x4f25f8dc // sudot v28.4s, v6.16b, v5.4b[3]\n"
+ "ldr q6, [x10, #0xe0]\n"
+ ".inst 0x4f20f8e9 // sudot v9.4s, v7.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8ed // sudot v13.4s, v7.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8f1 // sudot v17.4s, v7.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8f5 // sudot v21.4s, v7.16b, v3.4b[3]\n"
+ ".inst 0x4f24f8f9 // sudot v25.4s, v7.16b, v4.4b[3]\n"
+ ".inst 0x4f25f8fd // sudot v29.4s, v7.16b, v5.4b[3]\n"
+ "ldr q7, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20f8ca // sudot v10.4s, v6.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8ce // sudot v14.4s, v6.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8d2 // sudot v18.4s, v6.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8d6 // sudot v22.4s, v6.16b, v3.4b[3]\n"
+ ".inst 0x4f24f8da // sudot v26.4s, v6.16b, v4.4b[3]\n"
+ ".inst 0x4f25f8de // sudot v30.4s, v6.16b, v5.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
+ ".inst 0x4f20f8eb // sudot v11.4s, v7.16b, v0.4b[3]\n"
+ "ldr q0, [x26, #0x0]\n"
+ ".inst 0x4f21f8ef // sudot v15.4s, v7.16b, v1.4b[3]\n"
+ "ldr q1, [x25, #0x0]\n"
+ ".inst 0x4f22f8f3 // sudot v19.4s, v7.16b, v2.4b[3]\n"
+ "ldr q2, [x24, #0x0]\n"
+ ".inst 0x4f23f8f7 // sudot v23.4s, v7.16b, v3.4b[3]\n"
+ "ldr q3, [x23, #0x0]\n"
+ ".inst 0x4f24f8fb // sudot v27.4s, v7.16b, v4.4b[3]\n"
+ "ldr q4, [x22, #0x0]\n"
+ ".inst 0x4f25f8ff // sudot v31.4s, v7.16b, v5.4b[3]\n"
+ "ldr q5, [x21, #0x0]\n"
+ "ldr q7, [x10, #0x10]\n"
+ "bge 187b\n"
+ "188:" // Height 6: Multiply loop: Single iteration only
+ ".inst 0x4f00f0c8 // sudot v8.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cc // sudot v12.4s, v6.16b, v1.4b[0]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x4f02f0d0 // sudot v16.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d4 // sudot v20.4s, v6.16b, v3.4b[0]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4f04f0d8 // sudot v24.4s, v6.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0dc // sudot v28.4s, v6.16b, v5.4b[0]\n"
+ "ldr q6, [x10, #0x20]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4f00f0e9 // sudot v9.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ed // sudot v13.4s, v7.16b, v1.4b[0]\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4f02f0f1 // sudot v17.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f5 // sudot v21.4s, v7.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4f04f0f9 // sudot v25.4s, v7.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0fd // sudot v29.4s, v7.16b, v5.4b[0]\n"
+ "ldr q7, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4f00f0ca // sudot v10.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ce // sudot v14.4s, v6.16b, v1.4b[0]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4f02f0d2 // sudot v18.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d6 // sudot v22.4s, v6.16b, v3.4b[0]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ ".inst 0x4f04f0da // sudot v26.4s, v6.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0de // sudot v30.4s, v6.16b, v5.4b[0]\n"
+ "ldr q6, [x10, #0x40]\n"
+ ".inst 0x4f00f0eb // sudot v11.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ef // sudot v15.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f3 // sudot v19.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f7 // sudot v23.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0fb // sudot v27.4s, v7.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0ff // sudot v31.4s, v7.16b, v5.4b[0]\n"
+ "ldr q7, [x10, #0x50]\n"
+ ".inst 0x4f20f0c8 // sudot v8.4s, v6.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0cc // sudot v12.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0d0 // sudot v16.4s, v6.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0d4 // sudot v20.4s, v6.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0d8 // sudot v24.4s, v6.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0dc // sudot v28.4s, v6.16b, v5.4b[1]\n"
+ "ldr q6, [x10, #0x60]\n"
+ ".inst 0x4f20f0e9 // sudot v9.4s, v7.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0ed // sudot v13.4s, v7.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0f1 // sudot v17.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0f5 // sudot v21.4s, v7.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0f9 // sudot v25.4s, v7.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0fd // sudot v29.4s, v7.16b, v5.4b[1]\n"
+ "ldr q7, [x10, #0x70]\n"
+ ".inst 0x4f20f0ca // sudot v10.4s, v6.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0ce // sudot v14.4s, v6.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0d2 // sudot v18.4s, v6.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0d6 // sudot v22.4s, v6.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0da // sudot v26.4s, v6.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0de // sudot v30.4s, v6.16b, v5.4b[1]\n"
+ "ldr q6, [x10, #0x80]\n"
+ ".inst 0x4f20f0eb // sudot v11.4s, v7.16b, v0.4b[1]\n"
+ ".inst 0x4f21f0ef // sudot v15.4s, v7.16b, v1.4b[1]\n"
+ ".inst 0x4f22f0f3 // sudot v19.4s, v7.16b, v2.4b[1]\n"
+ ".inst 0x4f23f0f7 // sudot v23.4s, v7.16b, v3.4b[1]\n"
+ ".inst 0x4f24f0fb // sudot v27.4s, v7.16b, v4.4b[1]\n"
+ ".inst 0x4f25f0ff // sudot v31.4s, v7.16b, v5.4b[1]\n"
+ "ldr q7, [x10, #0x90]\n"
+ ".inst 0x4f00f8c8 // sudot v8.4s, v6.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8cc // sudot v12.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8d0 // sudot v16.4s, v6.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8d4 // sudot v20.4s, v6.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8d8 // sudot v24.4s, v6.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8dc // sudot v28.4s, v6.16b, v5.4b[2]\n"
+ "ldr q6, [x10, #0xa0]\n"
+ ".inst 0x4f00f8e9 // sudot v9.4s, v7.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8ed // sudot v13.4s, v7.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8f1 // sudot v17.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8f5 // sudot v21.4s, v7.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8f9 // sudot v25.4s, v7.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8fd // sudot v29.4s, v7.16b, v5.4b[2]\n"
+ "ldr q7, [x10, #0xb0]\n"
+ ".inst 0x4f00f8ca // sudot v10.4s, v6.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8ce // sudot v14.4s, v6.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8d2 // sudot v18.4s, v6.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8d6 // sudot v22.4s, v6.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8da // sudot v26.4s, v6.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8de // sudot v30.4s, v6.16b, v5.4b[2]\n"
+ "ldr q6, [x10, #0xc0]\n"
+ ".inst 0x4f00f8eb // sudot v11.4s, v7.16b, v0.4b[2]\n"
+ ".inst 0x4f01f8ef // sudot v15.4s, v7.16b, v1.4b[2]\n"
+ ".inst 0x4f02f8f3 // sudot v19.4s, v7.16b, v2.4b[2]\n"
+ ".inst 0x4f03f8f7 // sudot v23.4s, v7.16b, v3.4b[2]\n"
+ ".inst 0x4f04f8fb // sudot v27.4s, v7.16b, v4.4b[2]\n"
+ ".inst 0x4f05f8ff // sudot v31.4s, v7.16b, v5.4b[2]\n"
+ "ldr q7, [x10, #0xd0]\n"
+ ".inst 0x4f20f8c8 // sudot v8.4s, v6.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8cc // sudot v12.4s, v6.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8d0 // sudot v16.4s, v6.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8d4 // sudot v20.4s, v6.16b, v3.4b[3]\n"
+ ".inst 0x4f24f8d8 // sudot v24.4s, v6.16b, v4.4b[3]\n"
+ ".inst 0x4f25f8dc // sudot v28.4s, v6.16b, v5.4b[3]\n"
+ "ldr q6, [x10, #0xe0]\n"
+ ".inst 0x4f20f8e9 // sudot v9.4s, v7.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8ed // sudot v13.4s, v7.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8f1 // sudot v17.4s, v7.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8f5 // sudot v21.4s, v7.16b, v3.4b[3]\n"
+ ".inst 0x4f24f8f9 // sudot v25.4s, v7.16b, v4.4b[3]\n"
+ ".inst 0x4f25f8fd // sudot v29.4s, v7.16b, v5.4b[3]\n"
+ "ldr q7, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4f20f8ca // sudot v10.4s, v6.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8ce // sudot v14.4s, v6.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8d2 // sudot v18.4s, v6.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8d6 // sudot v22.4s, v6.16b, v3.4b[3]\n"
+ ".inst 0x4f24f8da // sudot v26.4s, v6.16b, v4.4b[3]\n"
+ ".inst 0x4f25f8de // sudot v30.4s, v6.16b, v5.4b[3]\n"
+ ".inst 0x4f20f8eb // sudot v11.4s, v7.16b, v0.4b[3]\n"
+ ".inst 0x4f21f8ef // sudot v15.4s, v7.16b, v1.4b[3]\n"
+ ".inst 0x4f22f8f3 // sudot v19.4s, v7.16b, v2.4b[3]\n"
+ ".inst 0x4f23f8f7 // sudot v23.4s, v7.16b, v3.4b[3]\n"
+ ".inst 0x4f24f8fb // sudot v27.4s, v7.16b, v4.4b[3]\n"
+ ".inst 0x4f25f8ff // sudot v31.4s, v7.16b, v5.4b[3]\n"
+ "189:" // Height 6: Multiply loop: Main loop skip
+ "cbz x27, 194f\n"
+ "cmp x27, #0x4\n"
+ "blt 191f\n"
+ "190:" // Height 6: Multiply loop: Odd block loop
+ "ldr s7, [x26], #0x4\n"
+ "ldr s6, [x25], #0x4\n"
+ "sub x27, x27, #0x4\n"
+ "ldr s5, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
+ "ldr s3, [x22], #0x4\n"
+ "ldr s2, [x21], #0x4\n"
+ "ldr q1, [x10, #0x0]\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4f07f028 // sudot v8.4s, v1.16b, v7.4b[0]\n"
+ ".inst 0x4f06f02c // sudot v12.4s, v1.16b, v6.4b[0]\n"
+ ".inst 0x4f05f030 // sudot v16.4s, v1.16b, v5.4b[0]\n"
+ ".inst 0x4f04f034 // sudot v20.4s, v1.16b, v4.4b[0]\n"
+ ".inst 0x4f03f038 // sudot v24.4s, v1.16b, v3.4b[0]\n"
+ ".inst 0x4f02f03c // sudot v28.4s, v1.16b, v2.4b[0]\n"
+ "ldr q1, [x10, #0x20]\n"
+ ".inst 0x4f07f009 // sudot v9.4s, v0.16b, v7.4b[0]\n"
+ ".inst 0x4f06f00d // sudot v13.4s, v0.16b, v6.4b[0]\n"
+ ".inst 0x4f05f011 // sudot v17.4s, v0.16b, v5.4b[0]\n"
+ ".inst 0x4f04f015 // sudot v21.4s, v0.16b, v4.4b[0]\n"
+ ".inst 0x4f03f019 // sudot v25.4s, v0.16b, v3.4b[0]\n"
+ ".inst 0x4f02f01d // sudot v29.4s, v0.16b, v2.4b[0]\n"
+ "ldr q0, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f07f02a // sudot v10.4s, v1.16b, v7.4b[0]\n"
+ ".inst 0x4f06f02e // sudot v14.4s, v1.16b, v6.4b[0]\n"
+ ".inst 0x4f05f032 // sudot v18.4s, v1.16b, v5.4b[0]\n"
+ ".inst 0x4f04f036 // sudot v22.4s, v1.16b, v4.4b[0]\n"
+ ".inst 0x4f03f03a // sudot v26.4s, v1.16b, v3.4b[0]\n"
+ ".inst 0x4f02f03e // sudot v30.4s, v1.16b, v2.4b[0]\n"
+ ".inst 0x4f07f00b // sudot v11.4s, v0.16b, v7.4b[0]\n"
+ ".inst 0x4f06f00f // sudot v15.4s, v0.16b, v6.4b[0]\n"
+ ".inst 0x4f05f013 // sudot v19.4s, v0.16b, v5.4b[0]\n"
+ ".inst 0x4f04f017 // sudot v23.4s, v0.16b, v4.4b[0]\n"
+ ".inst 0x4f03f01b // sudot v27.4s, v0.16b, v3.4b[0]\n"
+ ".inst 0x4f02f01f // sudot v31.4s, v0.16b, v2.4b[0]\n"
+ "bge 190b\n"
+ "191:" // Height 6: Multiply loop: Skip odd blocks
+ "cbz x27, 194f\n"
+ "tbz x27, #1, 192f\n"
+ "ldr h0, [x26], #0x2\n"
+ "ldr h1, [x25], #0x2\n"
+ "ldr h2, [x24], #0x2\n"
+ "ldr h3, [x23], #0x2\n"
+ "ldr h4, [x22], #0x2\n"
+ "ldr h5, [x21], #0x2\n"
+ "tbz x27, #0, 193f\n"
+ "ld1 { v0.b }[2], [x26]\n"
+ "ld1 { v1.b }[2], [x25]\n"
+ "ld1 { v2.b }[2], [x24]\n"
+ "ld1 { v3.b }[2], [x23]\n"
+ "ld1 { v4.b }[2], [x22]\n"
+ "ld1 { v5.b }[2], [x21]\n"
+ "b 193f\n"
+ "192:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b0, [x26, #0x0]\n"
+ "ldr b1, [x25, #0x0]\n"
+ "ldr b2, [x24, #0x0]\n"
+ "ldr b3, [x23, #0x0]\n"
+ "ldr b4, [x22, #0x0]\n"
+ "ldr b5, [x21, #0x0]\n"
+ "193:" // Height 6: Multiply loop: Ragged operand read: Done
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ ".inst 0x4f00f0e8 // sudot v8.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ec // sudot v12.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f0 // sudot v16.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f4 // sudot v20.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0f8 // sudot v24.4s, v7.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0fc // sudot v28.4s, v7.16b, v5.4b[0]\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x4f00f0c9 // sudot v9.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cd // sudot v13.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0d1 // sudot v17.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d5 // sudot v21.4s, v6.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0d9 // sudot v25.4s, v6.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0dd // sudot v29.4s, v6.16b, v5.4b[0]\n"
+ "ldr q6, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
+ ".inst 0x4f00f0ea // sudot v10.4s, v7.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0ee // sudot v14.4s, v7.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0f2 // sudot v18.4s, v7.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0f6 // sudot v22.4s, v7.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0fa // sudot v26.4s, v7.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0fe // sudot v30.4s, v7.16b, v5.4b[0]\n"
+ ".inst 0x4f00f0cb // sudot v11.4s, v6.16b, v0.4b[0]\n"
+ ".inst 0x4f01f0cf // sudot v15.4s, v6.16b, v1.4b[0]\n"
+ ".inst 0x4f02f0d3 // sudot v19.4s, v6.16b, v2.4b[0]\n"
+ ".inst 0x4f03f0d7 // sudot v23.4s, v6.16b, v3.4b[0]\n"
+ ".inst 0x4f04f0db // sudot v27.4s, v6.16b, v4.4b[0]\n"
+ ".inst 0x4f05f0df // sudot v31.4s, v6.16b, v5.4b[0]\n"
+ "194:" // Height 6: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 184b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "bge 203f\n"
+ "tbz x11, #3, 198f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v9.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v13.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v21.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "st1 { v29.4s }, [x20], #0x10\n"
+ "tbz x11, #2, 196f\n"
+ "st1 { v10.4s }, [x9], #0x10\n"
+ "st1 { v14.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "st1 { v22.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "st1 { v30.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 195f\n"
+ "str d11, [x9], #0x8\n"
+ "str d15, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "str d23, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "str d31, [x20], #0x8\n"
+ "tbz x11, #0, 202f\n"
+ "st1 { v11.s }[2], [x9]\n"
+ "st1 { v15.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "st1 { v23.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "st1 { v31.s }[2], [x20]\n"
+ "b 202f\n"
+ "195:" // Height 6: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 202f\n"
+ "str s11, [x9, #0x0]\n"
+ "str s15, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "str s23, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "str s31, [x20, #0x0]\n"
+ "b 202f\n"
+ "196:" // Height 6: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 197f\n"
+ "str d10, [x9], #0x8\n"
+ "str d14, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "str d22, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "str d30, [x20], #0x8\n"
+ "tbz x11, #0, 202f\n"
+ "st1 { v10.s }[2], [x9]\n"
+ "st1 { v14.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "st1 { v22.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "st1 { v30.s }[2], [x20]\n"
+ "b 202f\n"
+ "197:" // Height 6: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 202f\n"
+ "str s10, [x9, #0x0]\n"
+ "str s14, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "str s22, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "str s30, [x20, #0x0]\n"
+ "b 202f\n"
+ "198:" // Height 6: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 200f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 199f\n"
+ "str d9, [x9], #0x8\n"
+ "str d13, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "str d21, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "str d29, [x20], #0x8\n"
+ "tbz x11, #0, 202f\n"
+ "st1 { v9.s }[2], [x9]\n"
+ "st1 { v13.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "st1 { v21.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "st1 { v29.s }[2], [x20]\n"
+ "b 202f\n"
+ "199:" // Height 6: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 202f\n"
+ "str s9, [x9, #0x0]\n"
+ "str s13, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "str s21, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "str s29, [x20, #0x0]\n"
+ "b 202f\n"
+ "200:" // Height 6: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 201f\n"
+ "str d8, [x9], #0x8\n"
+ "str d12, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "str d20, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "str d28, [x20], #0x8\n"
+ "tbz x11, #0, 202f\n"
+ "st1 { v8.s }[2], [x9]\n"
+ "st1 { v12.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "st1 { v20.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "st1 { v28.s }[2], [x20]\n"
+ "b 202f\n"
+ "201:" // Height 6: Partial direct writeback: partial_1_0
+ "str s8, [x9, #0x0]\n"
+ "str s12, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "str s20, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "str s28, [x20, #0x0]\n"
+ "202:" // Height 6: Partial direct writeback: Done
+ "b 204f\n"
+ "203:" // Height 6: Full writeback
+ "str q8, [x9, #0x0]\n"
+ "str q9, [x9, #0x10]\n"
+ "str q10, [x9, #0x20]\n"
+ "str q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q12, [x24, #0x0]\n"
+ "str q13, [x24, #0x10]\n"
+ "str q14, [x24, #0x20]\n"
+ "str q15, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q20, [x22, #0x0]\n"
+ "str q21, [x22, #0x10]\n"
+ "str q22, [x22, #0x20]\n"
+ "str q23, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "str q28, [x20, #0x0]\n"
+ "str q29, [x20, #0x10]\n"
+ "str q30, [x20, #0x20]\n"
+ "str q31, [x20, #0x30]\n"
+ "204:" // Height 6: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 172b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 206f\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 205f\n"
+ "add x21, x21, #0x6\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "205:" // Update direct input
+ "mov x20, #0x6\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "b 1b\n"
+ "206:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16.hpp
new file mode 100644
index 0000000000..feda7d707a
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16.hpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<uint8_t>, \
+ size_t, size_t, \
+ const int8_t *, \
+ IndirectOutputArg<int32_t>, \
+ const int32_t *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_hybrid_u8s8s32_mmla_6x16( ARGLIST );
+
+class cls_a64_hybrid_u8s8s32_mmla_6x16
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+
+ static unsigned int out_width()
+ {
+ return 16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 8;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 8> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, uint32_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 55.05 };
+ case CPUModel::A510:
+ return { 30.34 };
+ case CPUModel::V1:
+ return { 83.77 };
+ }
+ }
+
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 55.31, 15.72, 0.62 };
+ case CPUModel::A510:
+ return { 33.64, 3.92, 0.48 };
+ case CPUModel::V1:
+ return { 63.94, 16.18, 0.83 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_hybrid_u8s8s32_mmla_6x16;
+ cls_a64_hybrid_u8s8s32_mmla_6x16(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16/generic.cpp
new file mode 100644
index 0000000000..32fa470d9e
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8s8s32_mmla_6x16/generic.cpp
@@ -0,0 +1,3450 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+
+namespace arm_gemm {
+
+void a64_hybrid_u8s8s32_mmla_6x16 (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<uint8_t> A_arg,
+ size_t M, size_t N, const int8_t *B_ptr, IndirectOutputArg<int32_t> output_arg,
+ const int32_t *, Activation, bool accumulate
+)
+{
+ struct KernelArgs {
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const int8_t *B_ptr = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ void *output_ptr = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ ka.output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ __asm__ __volatile__(
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 186f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 149f\n"
+ "beq 112f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 75f\n"
+ "beq 38f\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "2:" // Height 1: Column loop
+ "tbz %x[flags], #0, 13f\n"
+ "cmp x11, #0x10\n"
+ "bge 11f\n"
+ "tbz x11, #3, 6f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "tbz x11, #2, 4f\n"
+ "ld1 { v11.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 3f\n"
+ "ldr d16, [x9], #0x8\n"
+ "mov x25, #0x38\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v16.s }[2], [x9]\n"
+ "b 10f\n"
+ "3:" // Height 1: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 10f\n"
+ "ldr s16, [x9, #0x0]\n"
+ "b 10f\n"
+ "4:" // Height 1: Partial accumulate: partial_2_8
+ "tbz x11, #1, 5f\n"
+ "ldr d11, [x9], #0x8\n"
+ "mov x25, #0x28\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "b 10f\n"
+ "5:" // Height 1: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 10f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "b 10f\n"
+ "6:" // Height 1: Partial accumulate: partial_4_0
+ "tbz x11, #2, 8f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 7f\n"
+ "ldr d10, [x9], #0x8\n"
+ "mov x25, #0x18\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "b 10f\n"
+ "7:" // Height 1: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 10f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "b 10f\n"
+ "8:" // Height 1: Partial accumulate: partial_2_0
+ "tbz x11, #1, 9f\n"
+ "ldr d9, [x9], #0x8\n"
+ "mov x25, #0x8\n"
+ "tbz x11, #0, 10f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "b 10f\n"
+ "9:" // Height 1: Partial accumulate: partial_1_0
+ "ldr s9, [x9, #0x0]\n"
+ "mov x25, #0x0\n"
+ "10:" // Height 1: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 12f\n"
+ "11:" // Height 1: full accumulate
+ "ldr q9, [x9, #0x0]\n"
+ "ldr q10, [x9, #0x10]\n"
+ "ldr q11, [x9, #0x20]\n"
+ "ldr q16, [x9, #0x30]\n"
+ "12:" // Height 1: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "b 14f\n"
+ "13:" // Height 1: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "14:" // Height 1: setup done
+ "mov x28, #0x0\n"
+ "15:" // Height 1: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 16f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "cbnz x28, 17f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "b 17f\n"
+ "16:" // Height 1: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "17:" // Height 1: input setup done
+ "cmp x27, #0x10\n"
+ "blt 20f\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q6, [x10, #0x10]\n"
+ "blt 19f\n"
+ "18:" // Height 1: Multiply loop: Main loop head
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "trn1 v19.2d, v1.2d, v20.2d\n"
+ "trn2 v1.2d, v1.2d, v20.2d\n"
+ ".inst 0x4e87ae68 // usmmla v8.4s, v19.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86ae6c // usmmla v12.4s, v19.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92ae69 // usmmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91ae6d // usmmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92ae6a // usmmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91ae6e // usmmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ ".inst 0x4e92ae6b // usmmla v11.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x80]\n"
+ ".inst 0x4e91ae6f // usmmla v15.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x90]\n"
+ ".inst 0x4e92ac28 // usmmla v8.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e91ac2c // usmmla v12.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92ac29 // usmmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91ac2d // usmmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92ac2a // usmmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91ac2e // usmmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e92ac2b // usmmla v11.4s, v1.16b, v18.16b\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x4e91ac2f // usmmla v15.4s, v1.16b, v17.16b\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "bge 18b\n"
+ "19:" // Height 1: Multiply loop: Single iteration only
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "trn1 v19.2d, v1.2d, v17.2d\n"
+ "trn2 v1.2d, v1.2d, v17.2d\n"
+ ".inst 0x4e87ae68 // usmmla v8.4s, v19.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86ae6c // usmmla v12.4s, v19.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92ae69 // usmmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91ae6d // usmmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92ae6a // usmmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q20, [x10, #0x60]\n"
+ ".inst 0x4e91ae6e // usmmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q18, [x10, #0x70]\n"
+ ".inst 0x4e94ae6b // usmmla v11.4s, v19.16b, v20.16b\n"
+ "ldr q17, [x10, #0x80]\n"
+ ".inst 0x4e92ae6f // usmmla v15.4s, v19.16b, v18.16b\n"
+ "ldr q20, [x10, #0x90]\n"
+ ".inst 0x4e91ac28 // usmmla v8.4s, v1.16b, v17.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e94ac2c // usmmla v12.4s, v1.16b, v20.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92ac29 // usmmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91ac2d // usmmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92ac2a // usmmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91ac2e // usmmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e92ac2b // usmmla v11.4s, v1.16b, v18.16b\n"
+ ".inst 0x4e91ac2f // usmmla v15.4s, v1.16b, v17.16b\n"
+ "20:" // Height 1: Multiply loop: Main loop skip
+ "cbz x27, 27f\n"
+ "cmp x27, #0x8\n"
+ "blt 22f\n"
+ "21:" // Height 1: Multiply loop: Odd block loop
+ "ldr d19, [x26], #0x8\n"
+ "ldr q20, [x10, #0x0]\n"
+ "sub x27, x27, #0x8\n"
+ "ldr q17, [x10, #0x10]\n"
+ "cmp x27, #0x8\n"
+ "trn1 v19.2d, v19.2d, v18.2d\n"
+ ".inst 0x4e94ae68 // usmmla v8.4s, v19.16b, v20.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e91ae6c // usmmla v12.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92ae69 // usmmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91ae6d // usmmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92ae6a // usmmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91ae6e // usmmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e92ae6b // usmmla v11.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91ae6f // usmmla v15.4s, v19.16b, v17.16b\n"
+ "bge 21b\n"
+ "22:" // Height 1: Multiply loop: Skip odd blocks
+ "cbz x27, 27f\n"
+ "tbz x27, #2, 24f\n"
+ "ldr s1, [x26], #0x4\n"
+ "tbz x27, #1, 23f\n"
+ "ld1 { v1.h }[2], [x26], #0x2\n"
+ "tbz x27, #0, 26f\n"
+ "ld1 { v1.b }[6], [x26]\n"
+ "b 26f\n"
+ "23:" // Height 1: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x27, #0, 26f\n"
+ "ld1 { v1.b }[4], [x26]\n"
+ "b 26f\n"
+ "24:" // Height 1: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x27, #1, 25f\n"
+ "ldr h1, [x26], #0x2\n"
+ "tbz x27, #0, 26f\n"
+ "ld1 { v1.b }[2], [x26]\n"
+ "b 26f\n"
+ "25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x26, #0x0]\n"
+ "26:" // Height 1: Multiply loop: Ragged operand read: Done
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
+ "trn1 v19.2d, v1.2d, v17.2d\n"
+ ".inst 0x4e98ae68 // usmmla v8.4s, v19.16b, v24.16b\n"
+ "ldr q17, [x10, #0x20]\n"
+ ".inst 0x4e94ae6c // usmmla v12.4s, v19.16b, v20.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e91ae69 // usmmla v9.4s, v19.16b, v17.16b\n"
+ "ldr q20, [x10, #0x40]\n"
+ ".inst 0x4e80ae6d // usmmla v13.4s, v19.16b, v0.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e94ae6a // usmmla v10.4s, v19.16b, v20.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91ae6e // usmmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e92ae6b // usmmla v11.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91ae6f // usmmla v15.4s, v19.16b, v17.16b\n"
+ "27:" // Height 1: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 15b\n"
+ "cmp x11, #0x10\n"
+ "uzp1 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v10.2d, v10.2d, v14.2d\n"
+ "uzp1 v11.2d, v11.2d, v15.2d\n"
+ "bge 36f\n"
+ "tbz x11, #3, 31f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "st1 { v9.4s }, [x9], #0x10\n"
+ "tbz x11, #2, 29f\n"
+ "st1 { v10.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 28f\n"
+ "str d11, [x9], #0x8\n"
+ "tbz x11, #0, 35f\n"
+ "st1 { v11.s }[2], [x9]\n"
+ "b 35f\n"
+ "28:" // Height 1: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 35f\n"
+ "str s11, [x9, #0x0]\n"
+ "b 35f\n"
+ "29:" // Height 1: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 30f\n"
+ "str d10, [x9], #0x8\n"
+ "tbz x11, #0, 35f\n"
+ "st1 { v10.s }[2], [x9]\n"
+ "b 35f\n"
+ "30:" // Height 1: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 35f\n"
+ "str s10, [x9, #0x0]\n"
+ "b 35f\n"
+ "31:" // Height 1: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 33f\n"
+ "st1 { v8.4s }, [x9], #0x10\n"
+ "tbz x11, #1, 32f\n"
+ "str d9, [x9], #0x8\n"
+ "tbz x11, #0, 35f\n"
+ "st1 { v9.s }[2], [x9]\n"
+ "b 35f\n"
+ "32:" // Height 1: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 35f\n"
+ "str s9, [x9, #0x0]\n"
+ "b 35f\n"
+ "33:" // Height 1: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 34f\n"
+ "str d8, [x9], #0x8\n"
+ "tbz x11, #0, 35f\n"
+ "st1 { v8.s }[2], [x9]\n"
+ "b 35f\n"
+ "34:" // Height 1: Partial direct writeback: partial_1_0
+ "str s8, [x9, #0x0]\n"
+ "35:" // Height 1: Partial direct writeback: Done
+ "b 37f\n"
+ "36:" // Height 1: Full writeback
+ "str q8, [x9, #0x0]\n"
+ "str q9, [x9, #0x10]\n"
+ "str q10, [x9, #0x20]\n"
+ "str q11, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "37:" // Height 1: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 2b\n"
+ "b 224f\n"
+ "38:" // Height 2
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "39:" // Height 2: Column loop
+ "tbz %x[flags], #0, 50f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "bge 48f\n"
+ "tbz x11, #3, 43f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "tbz x11, #2, 41f\n"
+ "ld1 { v11.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 40f\n"
+ "ldr d16, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "tbz x11, #0, 47f\n"
+ "ld1 { v16.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "b 47f\n"
+ "40:" // Height 2: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 47f\n"
+ "ldr s16, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "b 47f\n"
+ "41:" // Height 2: Partial accumulate: partial_2_8
+ "tbz x11, #1, 42f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "tbz x11, #0, 47f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "b 47f\n"
+ "42:" // Height 2: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 47f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "b 47f\n"
+ "43:" // Height 2: Partial accumulate: partial_4_0
+ "tbz x11, #2, 45f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 44f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "tbz x11, #0, 47f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "b 47f\n"
+ "44:" // Height 2: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 47f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "b 47f\n"
+ "45:" // Height 2: Partial accumulate: partial_2_0
+ "tbz x11, #1, 46f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "tbz x11, #0, 47f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "b 47f\n"
+ "46:" // Height 2: Partial accumulate: partial_1_0
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "47:" // Height 2: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 49f\n"
+ "48:" // Height 2: full accumulate
+ "ldr q9, [x9, #0x0]\n"
+ "ldr q10, [x9, #0x10]\n"
+ "ldr q11, [x9, #0x20]\n"
+ "ldr q16, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "49:" // Height 2: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "b 51f\n"
+ "50:" // Height 2: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "51:" // Height 2: setup done
+ "mov x28, #0x0\n"
+ "52:" // Height 2: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 53f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "cbnz x28, 54f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "b 54f\n"
+ "53:" // Height 2: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "54:" // Height 2: input setup done
+ "cmp x27, #0x10\n"
+ "blt 57f\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "blt 56f\n"
+ "55:" // Height 2: Multiply loop: Main loop head
+ "trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4e87ae68 // usmmla v8.4s, v19.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86ae6c // usmmla v12.4s, v19.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92ae69 // usmmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91ae6d // usmmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92ae6a // usmmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91ae6e // usmmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ ".inst 0x4e92ae6b // usmmla v11.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x80]\n"
+ ".inst 0x4e91ae6f // usmmla v15.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x90]\n"
+ ".inst 0x4e92ac28 // usmmla v8.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e91ac2c // usmmla v12.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92ac29 // usmmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91ac2d // usmmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92ac2a // usmmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91ac2e // usmmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e92ac2b // usmmla v11.4s, v1.16b, v18.16b\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x4e91ac2f // usmmla v15.4s, v1.16b, v17.16b\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "bge 55b\n"
+ "56:" // Height 2: Multiply loop: Single iteration only
+ "trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4e87ae68 // usmmla v8.4s, v19.16b, v7.16b\n"
+ "ldr q18, [x10, #0x20]\n"
+ ".inst 0x4e86ae6c // usmmla v12.4s, v19.16b, v6.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e92ae69 // usmmla v9.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91ae6d // usmmla v13.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92ae6a // usmmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91ae6e // usmmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ ".inst 0x4e92ae6b // usmmla v11.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x80]\n"
+ ".inst 0x4e91ae6f // usmmla v15.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x90]\n"
+ ".inst 0x4e92ac28 // usmmla v8.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xa0]\n"
+ ".inst 0x4e91ac2c // usmmla v12.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xb0]\n"
+ ".inst 0x4e92ac29 // usmmla v9.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xc0]\n"
+ ".inst 0x4e91ac2d // usmmla v13.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xd0]\n"
+ ".inst 0x4e92ac2a // usmmla v10.4s, v1.16b, v18.16b\n"
+ "ldr q18, [x10, #0xe0]\n"
+ ".inst 0x4e91ac2e // usmmla v14.4s, v1.16b, v17.16b\n"
+ "ldr q17, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e92ac2b // usmmla v11.4s, v1.16b, v18.16b\n"
+ ".inst 0x4e91ac2f // usmmla v15.4s, v1.16b, v17.16b\n"
+ "57:" // Height 2: Multiply loop: Main loop skip
+ "cbz x27, 64f\n"
+ "cmp x27, #0x8\n"
+ "blt 59f\n"
+ "58:" // Height 2: Multiply loop: Odd block loop
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr q18, [x10, #0x0]\n"
+ "ldr q17, [x10, #0x10]\n"
+ "cmp x27, #0x8\n"
+ "trn1 v22.2d, v20.2d, v19.2d\n"
+ ".inst 0x4e92aec8 // usmmla v8.4s, v22.16b, v18.16b\n"
+ "ldr q2, [x10, #0x20]\n"
+ ".inst 0x4e91aecc // usmmla v12.4s, v22.16b, v17.16b\n"
+ "ldr q17, [x10, #0x30]\n"
+ ".inst 0x4e82aec9 // usmmla v9.4s, v22.16b, v2.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e91aecd // usmmla v13.4s, v22.16b, v17.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92aeca // usmmla v10.4s, v22.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91aece // usmmla v14.4s, v22.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e92aecb // usmmla v11.4s, v22.16b, v18.16b\n"
+ ".inst 0x4e91aecf // usmmla v15.4s, v22.16b, v17.16b\n"
+ "bge 58b\n"
+ "59:" // Height 2: Multiply loop: Skip odd blocks
+ "cbz x27, 64f\n"
+ "tbz x27, #2, 61f\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "tbz x27, #1, 60f\n"
+ "ld1 { v1.h }[2], [x26], #0x2\n"
+ "ld1 { v2.h }[2], [x25], #0x2\n"
+ "tbz x27, #0, 63f\n"
+ "ld1 { v1.b }[6], [x26]\n"
+ "ld1 { v2.b }[6], [x25]\n"
+ "b 63f\n"
+ "60:" // Height 2: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x27, #0, 63f\n"
+ "ld1 { v1.b }[4], [x26]\n"
+ "ld1 { v2.b }[4], [x25]\n"
+ "b 63f\n"
+ "61:" // Height 2: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x27, #1, 62f\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h2, [x25], #0x2\n"
+ "tbz x27, #0, 63f\n"
+ "ld1 { v1.b }[2], [x26]\n"
+ "ld1 { v2.b }[2], [x25]\n"
+ "b 63f\n"
+ "62:" // Height 2: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x26, #0x0]\n"
+ "ldr b2, [x25, #0x0]\n"
+ "63:" // Height 2: Multiply loop: Ragged operand read: Done
+ "ldr q18, [x10, #0x0]\n"
+ "ldr q17, [x10, #0x10]\n"
+ "trn1 v19.2d, v1.2d, v2.2d\n"
+ ".inst 0x4e92ae68 // usmmla v8.4s, v19.16b, v18.16b\n"
+ "ldr q5, [x10, #0x20]\n"
+ ".inst 0x4e91ae6c // usmmla v12.4s, v19.16b, v17.16b\n"
+ "ldr q21, [x10, #0x30]\n"
+ ".inst 0x4e85ae69 // usmmla v9.4s, v19.16b, v5.16b\n"
+ "ldr q18, [x10, #0x40]\n"
+ ".inst 0x4e95ae6d // usmmla v13.4s, v19.16b, v21.16b\n"
+ "ldr q17, [x10, #0x50]\n"
+ ".inst 0x4e92ae6a // usmmla v10.4s, v19.16b, v18.16b\n"
+ "ldr q18, [x10, #0x60]\n"
+ ".inst 0x4e91ae6e // usmmla v14.4s, v19.16b, v17.16b\n"
+ "ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e92ae6b // usmmla v11.4s, v19.16b, v18.16b\n"
+ ".inst 0x4e91ae6f // usmmla v15.4s, v19.16b, v17.16b\n"
+ "64:" // Height 2: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 52b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "bge 73f\n"
+ "tbz x11, #3, 68f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "tbz x11, #2, 66f\n"
+ "st1 { v13.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 65f\n"
+ "str d14, [x9], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "tbz x11, #0, 72f\n"
+ "st1 { v14.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "b 72f\n"
+ "65:" // Height 2: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 72f\n"
+ "str s14, [x9, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "b 72f\n"
+ "66:" // Height 2: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 67f\n"
+ "str d13, [x9], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "tbz x11, #0, 72f\n"
+ "st1 { v13.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "b 72f\n"
+ "67:" // Height 2: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 72f\n"
+ "str s13, [x9, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "b 72f\n"
+ "68:" // Height 2: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 70f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "tbz x11, #1, 69f\n"
+ "str d12, [x9], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "tbz x11, #0, 72f\n"
+ "st1 { v12.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "b 72f\n"
+ "69:" // Height 2: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 72f\n"
+ "str s12, [x9, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "b 72f\n"
+ "70:" // Height 2: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 71f\n"
+ "str d7, [x9], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "tbz x11, #0, 72f\n"
+ "st1 { v7.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "b 72f\n"
+ "71:" // Height 2: Partial direct writeback: partial_1_0
+ "str s7, [x9, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "72:" // Height 2: Partial direct writeback: Done
+ "b 74f\n"
+ "73:" // Height 2: Full writeback
+ "str q7, [x9, #0x0]\n"
+ "str q12, [x9, #0x10]\n"
+ "str q13, [x9, #0x20]\n"
+ "str q14, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "74:" // Height 2: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 39b\n"
+ "b 224f\n"
+ "75:" // Height 3
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "76:" // Height 3: Column loop
+ "tbz %x[flags], #0, 87f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "bge 85f\n"
+ "tbz x11, #3, 80f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "tbz x11, #2, 78f\n"
+ "ld1 { v11.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 77f\n"
+ "ldr d16, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "tbz x11, #0, 84f\n"
+ "ld1 { v16.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "b 84f\n"
+ "77:" // Height 3: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 84f\n"
+ "ldr s16, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "b 84f\n"
+ "78:" // Height 3: Partial accumulate: partial_2_8
+ "tbz x11, #1, 79f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "tbz x11, #0, 84f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "b 84f\n"
+ "79:" // Height 3: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 84f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "b 84f\n"
+ "80:" // Height 3: Partial accumulate: partial_4_0
+ "tbz x11, #2, 82f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 81f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "tbz x11, #0, 84f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "b 84f\n"
+ "81:" // Height 3: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 84f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "b 84f\n"
+ "82:" // Height 3: Partial accumulate: partial_2_0
+ "tbz x11, #1, 83f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "tbz x11, #0, 84f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "b 84f\n"
+ "83:" // Height 3: Partial accumulate: partial_1_0
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "84:" // Height 3: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 86f\n"
+ "85:" // Height 3: full accumulate
+ "ldr q9, [x9, #0x0]\n"
+ "ldr q10, [x9, #0x10]\n"
+ "ldr q11, [x9, #0x20]\n"
+ "ldr q16, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "86:" // Height 3: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "b 88f\n"
+ "87:" // Height 3: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "88:" // Height 3: setup done
+ "mov x28, #0x0\n"
+ "89:" // Height 3: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 90f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "cbnz x28, 91f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "b 91f\n"
+ "90:" // Height 3: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "91:" // Height 3: input setup done
+ "cmp x27, #0x10\n"
+ "blt 94f\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "blt 93f\n"
+ "92:" // Height 3: Multiply loop: Main loop head
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
+ ".inst 0x4e87af88 // usmmla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x4e86af8c // usmmla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
+ ".inst 0x4e87af70 // usmmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86af74 // usmmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
+ "ldr q2, [x25, #0x0]\n"
+ ".inst 0x4e9aac28 // usmmla v8.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac70 // usmmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99ac2c // usmmla v12.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac74 // usmmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aac29 // usmmla v9.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac71 // usmmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99ac2d // usmmla v13.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac75 // usmmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aac2a // usmmla v10.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac72 // usmmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99ac2e // usmmla v14.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac76 // usmmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e9aac2b // usmmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac73 // usmmla v19.4s, v3.16b, v26.16b\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x4e99ac2f // usmmla v15.4s, v1.16b, v25.16b\n"
+ "ldr q1, [x26, #0x0]\n"
+ ".inst 0x4e99ac77 // usmmla v23.4s, v3.16b, v25.16b\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "bge 92b\n"
+ "93:" // Height 3: Multiply loop: Single iteration only
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
+ ".inst 0x4e87af88 // usmmla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x4e86af8c // usmmla v12.4s, v28.16b, v6.16b\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
+ ".inst 0x4e87af70 // usmmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86af74 // usmmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
+ ".inst 0x4e9aac28 // usmmla v8.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac70 // usmmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99ac2c // usmmla v12.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac74 // usmmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aac29 // usmmla v9.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac71 // usmmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99ac2d // usmmla v13.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac75 // usmmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aac2a // usmmla v10.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac72 // usmmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99ac2e // usmmla v14.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac76 // usmmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e9aac2b // usmmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac73 // usmmla v19.4s, v3.16b, v26.16b\n"
+ ".inst 0x4e99ac2f // usmmla v15.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac77 // usmmla v23.4s, v3.16b, v25.16b\n"
+ "94:" // Height 3: Multiply loop: Main loop skip
+ "cbz x27, 101f\n"
+ "cmp x27, #0x8\n"
+ "blt 96f\n"
+ "95:" // Height 3: Multiply loop: Odd block loop
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr d27, [x24], #0x8\n"
+ "ldr q26, [x10, #0x0]\n"
+ "cmp x27, #0x8\n"
+ "ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v27.2d, v29.2d\n"
+ ".inst 0x4e9aaf88 // usmmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf70 // usmmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e99af8c // usmmla v12.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af74 // usmmla v20.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "bge 95b\n"
+ "96:" // Height 3: Multiply loop: Skip odd blocks
+ "cbz x27, 101f\n"
+ "tbz x27, #2, 98f\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "tbz x27, #1, 97f\n"
+ "ld1 { v1.h }[2], [x26], #0x2\n"
+ "ld1 { v2.h }[2], [x25], #0x2\n"
+ "ld1 { v3.h }[2], [x24], #0x2\n"
+ "tbz x27, #0, 100f\n"
+ "ld1 { v1.b }[6], [x26]\n"
+ "ld1 { v2.b }[6], [x25]\n"
+ "ld1 { v3.b }[6], [x24]\n"
+ "b 100f\n"
+ "97:" // Height 3: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x27, #0, 100f\n"
+ "ld1 { v1.b }[4], [x26]\n"
+ "ld1 { v2.b }[4], [x25]\n"
+ "ld1 { v3.b }[4], [x24]\n"
+ "b 100f\n"
+ "98:" // Height 3: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x27, #1, 99f\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h2, [x25], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "tbz x27, #0, 100f\n"
+ "ld1 { v1.b }[2], [x26]\n"
+ "ld1 { v2.b }[2], [x25]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "b 100f\n"
+ "99:" // Height 3: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x26, #0x0]\n"
+ "ldr b2, [x25, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "100:" // Height 3: Multiply loop: Ragged operand read: Done
+ "ldr q26, [x10, #0x0]\n"
+ "ldr q29, [x10, #0x10]\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
+ ".inst 0x4e9aaf88 // usmmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9daf8c // usmmla v12.4s, v28.16b, v29.16b\n"
+ ".inst 0x4e9aaf70 // usmmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e9daf74 // usmmla v20.4s, v27.16b, v29.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "101:" // Height 3: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 89b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v16.2d, v16.2d, v20.2d\n"
+ "uzp1 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v19.2d, v19.2d, v23.2d\n"
+ "bge 110f\n"
+ "tbz x11, #3, 105f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "st1 { v17.4s }, [x23], #0x10\n"
+ "tbz x11, #2, 103f\n"
+ "st1 { v13.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v18.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 102f\n"
+ "str d14, [x9], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d19, [x23], #0x8\n"
+ "tbz x11, #0, 109f\n"
+ "st1 { v14.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v19.s }[2], [x23]\n"
+ "b 109f\n"
+ "102:" // Height 3: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 109f\n"
+ "str s14, [x9, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s19, [x23, #0x0]\n"
+ "b 109f\n"
+ "103:" // Height 3: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 104f\n"
+ "str d13, [x9], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d18, [x23], #0x8\n"
+ "tbz x11, #0, 109f\n"
+ "st1 { v13.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v18.s }[2], [x23]\n"
+ "b 109f\n"
+ "104:" // Height 3: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 109f\n"
+ "str s13, [x9, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s18, [x23, #0x0]\n"
+ "b 109f\n"
+ "105:" // Height 3: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 107f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v16.4s }, [x23], #0x10\n"
+ "tbz x11, #1, 106f\n"
+ "str d12, [x9], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d17, [x23], #0x8\n"
+ "tbz x11, #0, 109f\n"
+ "st1 { v12.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v17.s }[2], [x23]\n"
+ "b 109f\n"
+ "106:" // Height 3: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 109f\n"
+ "str s12, [x9, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s17, [x23, #0x0]\n"
+ "b 109f\n"
+ "107:" // Height 3: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 108f\n"
+ "str d7, [x9], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d16, [x23], #0x8\n"
+ "tbz x11, #0, 109f\n"
+ "st1 { v7.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v16.s }[2], [x23]\n"
+ "b 109f\n"
+ "108:" // Height 3: Partial direct writeback: partial_1_0
+ "str s7, [x9, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
+ "109:" // Height 3: Partial direct writeback: Done
+ "b 111f\n"
+ "110:" // Height 3: Full writeback
+ "str q7, [x9, #0x0]\n"
+ "str q12, [x9, #0x10]\n"
+ "str q13, [x9, #0x20]\n"
+ "str q14, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q18, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "111:" // Height 3: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 76b\n"
+ "b 224f\n"
+ "112:" // Height 4
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "113:" // Height 4: Column loop
+ "tbz %x[flags], #0, 124f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "bge 122f\n"
+ "tbz x11, #3, 117f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "tbz x11, #2, 115f\n"
+ "ld1 { v11.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 114f\n"
+ "ldr d16, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "tbz x11, #0, 121f\n"
+ "ld1 { v16.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "b 121f\n"
+ "114:" // Height 4: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 121f\n"
+ "ldr s16, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "b 121f\n"
+ "115:" // Height 4: Partial accumulate: partial_2_8
+ "tbz x11, #1, 116f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "tbz x11, #0, 121f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "b 121f\n"
+ "116:" // Height 4: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 121f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "b 121f\n"
+ "117:" // Height 4: Partial accumulate: partial_4_0
+ "tbz x11, #2, 119f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 118f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "tbz x11, #0, 121f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "b 121f\n"
+ "118:" // Height 4: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 121f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "b 121f\n"
+ "119:" // Height 4: Partial accumulate: partial_2_0
+ "tbz x11, #1, 120f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "tbz x11, #0, 121f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "b 121f\n"
+ "120:" // Height 4: Partial accumulate: partial_1_0
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "121:" // Height 4: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 123f\n"
+ "122:" // Height 4: full accumulate
+ "ldr q9, [x9, #0x0]\n"
+ "ldr q10, [x9, #0x10]\n"
+ "ldr q11, [x9, #0x20]\n"
+ "ldr q16, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "123:" // Height 4: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "b 125f\n"
+ "124:" // Height 4: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "125:" // Height 4: setup done
+ "mov x28, #0x0\n"
+ "126:" // Height 4: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 127f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "cbnz x28, 128f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "b 128f\n"
+ "127:" // Height 4: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "128:" // Height 4: input setup done
+ "cmp x27, #0x10\n"
+ "blt 131f\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "blt 130f\n"
+ "129:" // Height 4: Multiply loop: Main loop head
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x4e87af88 // usmmla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x4e86af8c // usmmla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4e87af70 // usmmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86af74 // usmmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
+ "ldr q2, [x25, #0x0]\n"
+ ".inst 0x4e9aac28 // usmmla v8.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac70 // usmmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99ac2c // usmmla v12.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac74 // usmmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aac29 // usmmla v9.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac71 // usmmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99ac2d // usmmla v13.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac75 // usmmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aac2a // usmmla v10.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac72 // usmmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99ac2e // usmmla v14.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac76 // usmmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e9aac2b // usmmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac73 // usmmla v19.4s, v3.16b, v26.16b\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x4e99ac2f // usmmla v15.4s, v1.16b, v25.16b\n"
+ "ldr q1, [x26, #0x0]\n"
+ ".inst 0x4e99ac77 // usmmla v23.4s, v3.16b, v25.16b\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q6, [x10, #0x10]\n"
+ "bge 129b\n"
+ "130:" // Height 4: Multiply loop: Single iteration only
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ ".inst 0x4e87af88 // usmmla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x4e86af8c // usmmla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4e87af70 // usmmla v16.4s, v27.16b, v7.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e86af74 // usmmla v20.4s, v27.16b, v6.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x80]\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x90]\n"
+ ".inst 0x4e9aac28 // usmmla v8.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac70 // usmmla v16.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xa0]\n"
+ ".inst 0x4e99ac2c // usmmla v12.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac74 // usmmla v20.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xb0]\n"
+ ".inst 0x4e9aac29 // usmmla v9.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac71 // usmmla v17.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xc0]\n"
+ ".inst 0x4e99ac2d // usmmla v13.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac75 // usmmla v21.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xd0]\n"
+ ".inst 0x4e9aac2a // usmmla v10.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac72 // usmmla v18.4s, v3.16b, v26.16b\n"
+ "ldr q26, [x10, #0xe0]\n"
+ ".inst 0x4e99ac2e // usmmla v14.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac76 // usmmla v22.4s, v3.16b, v25.16b\n"
+ "ldr q25, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e9aac2b // usmmla v11.4s, v1.16b, v26.16b\n"
+ ".inst 0x4e9aac73 // usmmla v19.4s, v3.16b, v26.16b\n"
+ ".inst 0x4e99ac2f // usmmla v15.4s, v1.16b, v25.16b\n"
+ ".inst 0x4e99ac77 // usmmla v23.4s, v3.16b, v25.16b\n"
+ "131:" // Height 4: Multiply loop: Main loop skip
+ "cbz x27, 138f\n"
+ "cmp x27, #0x8\n"
+ "blt 133f\n"
+ "132:" // Height 4: Multiply loop: Odd block loop
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
+ "cmp x27, #0x8\n"
+ "ldr q26, [x10, #0x0]\n"
+ "ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v29.2d, v27.2d\n"
+ ".inst 0x4e9aaf88 // usmmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf70 // usmmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e99af8c // usmmla v12.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af74 // usmmla v20.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "bge 132b\n"
+ "133:" // Height 4: Multiply loop: Skip odd blocks
+ "cbz x27, 138f\n"
+ "tbz x27, #2, 135f\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
+ "tbz x27, #1, 134f\n"
+ "ld1 { v1.h }[2], [x26], #0x2\n"
+ "ld1 { v2.h }[2], [x25], #0x2\n"
+ "ld1 { v3.h }[2], [x24], #0x2\n"
+ "ld1 { v4.h }[2], [x23], #0x2\n"
+ "tbz x27, #0, 137f\n"
+ "ld1 { v1.b }[6], [x26]\n"
+ "ld1 { v2.b }[6], [x25]\n"
+ "ld1 { v3.b }[6], [x24]\n"
+ "ld1 { v4.b }[6], [x23]\n"
+ "b 137f\n"
+ "134:" // Height 4: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x27, #0, 137f\n"
+ "ld1 { v1.b }[4], [x26]\n"
+ "ld1 { v2.b }[4], [x25]\n"
+ "ld1 { v3.b }[4], [x24]\n"
+ "ld1 { v4.b }[4], [x23]\n"
+ "b 137f\n"
+ "135:" // Height 4: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x27, #1, 136f\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h2, [x25], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x23], #0x2\n"
+ "tbz x27, #0, 137f\n"
+ "ld1 { v1.b }[2], [x26]\n"
+ "ld1 { v2.b }[2], [x25]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x23]\n"
+ "b 137f\n"
+ "136:" // Height 4: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x26, #0x0]\n"
+ "ldr b2, [x25, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x23, #0x0]\n"
+ "137:" // Height 4: Multiply loop: Ragged operand read: Done
+ "ldr q26, [x10, #0x0]\n"
+ "ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v1.2d, v2.2d\n"
+ "trn1 v27.2d, v3.2d, v4.2d\n"
+ ".inst 0x4e9aaf88 // usmmla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf70 // usmmla v16.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x20]\n"
+ ".inst 0x4e99af8c // usmmla v12.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af74 // usmmla v20.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x30]\n"
+ ".inst 0x4e9aaf89 // usmmla v9.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf71 // usmmla v17.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x40]\n"
+ ".inst 0x4e99af8d // usmmla v13.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af75 // usmmla v21.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x50]\n"
+ ".inst 0x4e9aaf8a // usmmla v10.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf72 // usmmla v18.4s, v27.16b, v26.16b\n"
+ "ldr q26, [x10, #0x60]\n"
+ ".inst 0x4e99af8e // usmmla v14.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af76 // usmmla v22.4s, v27.16b, v25.16b\n"
+ "ldr q25, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e9aaf8b // usmmla v11.4s, v28.16b, v26.16b\n"
+ ".inst 0x4e9aaf73 // usmmla v19.4s, v27.16b, v26.16b\n"
+ ".inst 0x4e99af8f // usmmla v15.4s, v28.16b, v25.16b\n"
+ ".inst 0x4e99af77 // usmmla v23.4s, v27.16b, v25.16b\n"
+ "138:" // Height 4: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 126b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "uzp1 v15.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "bge 147f\n"
+ "tbz x11, #3, 142f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "tbz x11, #2, 140f\n"
+ "st1 { v13.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 139f\n"
+ "str d14, [x9], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "st1 { v14.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "b 146f\n"
+ "139:" // Height 4: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 146f\n"
+ "str s14, [x9, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "b 146f\n"
+ "140:" // Height 4: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 141f\n"
+ "str d13, [x9], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "st1 { v13.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "b 146f\n"
+ "141:" // Height 4: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 146f\n"
+ "str s13, [x9, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "b 146f\n"
+ "142:" // Height 4: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 144f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "tbz x11, #1, 143f\n"
+ "str d12, [x9], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "st1 { v12.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "b 146f\n"
+ "143:" // Height 4: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 146f\n"
+ "str s12, [x9, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "b 146f\n"
+ "144:" // Height 4: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 145f\n"
+ "str d7, [x9], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "tbz x11, #0, 146f\n"
+ "st1 { v7.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "b 146f\n"
+ "145:" // Height 4: Partial direct writeback: partial_1_0
+ "str s7, [x9, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "146:" // Height 4: Partial direct writeback: Done
+ "b 148f\n"
+ "147:" // Height 4: Full writeback
+ "str q7, [x9, #0x0]\n"
+ "str q12, [x9, #0x10]\n"
+ "str q13, [x9, #0x20]\n"
+ "str q14, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "148:" // Height 4: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 113b\n"
+ "b 224f\n"
+ "149:" // Height 5
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "150:" // Height 5: Column loop
+ "tbz %x[flags], #0, 161f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "bge 159f\n"
+ "tbz x11, #3, 154f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "tbz x11, #2, 152f\n"
+ "ld1 { v11.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 151f\n"
+ "ldr d16, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "tbz x11, #0, 158f\n"
+ "ld1 { v16.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
+ "b 158f\n"
+ "151:" // Height 5: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 158f\n"
+ "ldr s16, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
+ "b 158f\n"
+ "152:" // Height 5: Partial accumulate: partial_2_8
+ "tbz x11, #1, 153f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "tbz x11, #0, 158f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "b 158f\n"
+ "153:" // Height 5: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 158f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "b 158f\n"
+ "154:" // Height 5: Partial accumulate: partial_4_0
+ "tbz x11, #2, 156f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 155f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "tbz x11, #0, 158f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "b 158f\n"
+ "155:" // Height 5: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 158f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "b 158f\n"
+ "156:" // Height 5: Partial accumulate: partial_2_0
+ "tbz x11, #1, 157f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "tbz x11, #0, 158f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "b 158f\n"
+ "157:" // Height 5: Partial accumulate: partial_1_0
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "158:" // Height 5: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 160f\n"
+ "159:" // Height 5: full accumulate
+ "ldr q9, [x9, #0x0]\n"
+ "ldr q10, [x9, #0x10]\n"
+ "ldr q11, [x9, #0x20]\n"
+ "ldr q16, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "160:" // Height 5: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "zip1 v24.2d, v25.2d, v28.2d\n"
+ "zip2 v28.2d, v25.2d, v28.2d\n"
+ "zip1 v25.2d, v26.2d, v29.2d\n"
+ "zip2 v29.2d, v26.2d, v29.2d\n"
+ "zip1 v26.2d, v27.2d, v30.2d\n"
+ "zip2 v30.2d, v27.2d, v30.2d\n"
+ "zip1 v27.2d, v6.2d, v31.2d\n"
+ "zip2 v31.2d, v6.2d, v31.2d\n"
+ "b 162f\n"
+ "161:" // Height 5: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ "162:" // Height 5: setup done
+ "mov x28, #0x0\n"
+ "163:" // Height 5: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 164f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "cbnz x28, 165f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "b 165f\n"
+ "164:" // Height 5: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "165:" // Height 5: input setup done
+ "cmp x27, #0x10\n"
+ "blt 168f\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
+ "ldr q5, [x22, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 167f\n"
+ "166:" // Height 5: Multiply loop: Main loop head
+ "trn1 v6.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "trn1 v4.2d, v5.2d, v0.2d\n"
+ "trn2 v5.2d, v5.2d, v0.2d\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e87acc8 // usmmla v8.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac50 // usmmla v16.4s, v2.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4e87ac98 // usmmla v24.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x4e80accc // usmmla v12.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac54 // usmmla v20.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80ac9c // usmmla v28.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e87acc9 // usmmla v9.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac51 // usmmla v17.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac99 // usmmla v25.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x40]\n"
+ ".inst 0x4e80accd // usmmla v13.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac55 // usmmla v21.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80ac9d // usmmla v29.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e87acca // usmmla v10.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac52 // usmmla v18.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9a // usmmla v26.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x60]\n"
+ ".inst 0x4e80acce // usmmla v14.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac56 // usmmla v22.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80ac9e // usmmla v30.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
+ ".inst 0x4e87accb // usmmla v11.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac53 // usmmla v19.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9b // usmmla v27.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x80]\n"
+ ".inst 0x4e80accf // usmmla v15.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac57 // usmmla v23.4s, v2.16b, v0.16b\n"
+ "ldr q2, [x25, #0x0]\n"
+ ".inst 0x4e80ac9f // usmmla v31.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x90]\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x4e87ac28 // usmmla v8.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac70 // usmmla v16.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e87acb8 // usmmla v24.4s, v5.16b, v7.16b\n"
+ "ldr q6, [x10, #0xa0]\n"
+ ".inst 0x4e80ac2c // usmmla v12.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac74 // usmmla v20.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbc // usmmla v28.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xb0]\n"
+ ".inst 0x4e86ac29 // usmmla v9.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac71 // usmmla v17.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acb9 // usmmla v25.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xc0]\n"
+ ".inst 0x4e80ac2d // usmmla v13.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac75 // usmmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbd // usmmla v29.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xd0]\n"
+ ".inst 0x4e86ac2a // usmmla v10.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac72 // usmmla v18.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acba // usmmla v26.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xe0]\n"
+ ".inst 0x4e80ac2e // usmmla v14.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac76 // usmmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbe // usmmla v30.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e86ac2b // usmmla v11.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac73 // usmmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acbb // usmmla v27.4s, v5.16b, v6.16b\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x4e80ac2f // usmmla v15.4s, v1.16b, v0.16b\n"
+ "ldr q1, [x26, #0x0]\n"
+ ".inst 0x4e80ac77 // usmmla v23.4s, v3.16b, v0.16b\n"
+ "ldr q3, [x24, #0x0]\n"
+ ".inst 0x4e80acbf // usmmla v31.4s, v5.16b, v0.16b\n"
+ "ldr q5, [x22, #0x0]\n"
+ "bge 166b\n"
+ "167:" // Height 5: Multiply loop: Single iteration only
+ "trn1 v6.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "trn1 v4.2d, v5.2d, v0.2d\n"
+ "trn2 v5.2d, v5.2d, v0.2d\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e87acc8 // usmmla v8.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac50 // usmmla v16.4s, v2.16b, v7.16b\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4e87ac98 // usmmla v24.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x4e80accc // usmmla v12.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac54 // usmmla v20.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80ac9c // usmmla v28.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e87acc9 // usmmla v9.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac51 // usmmla v17.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac99 // usmmla v25.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x40]\n"
+ ".inst 0x4e80accd // usmmla v13.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac55 // usmmla v21.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80ac9d // usmmla v29.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e87acca // usmmla v10.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac52 // usmmla v18.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9a // usmmla v26.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x60]\n"
+ ".inst 0x4e80acce // usmmla v14.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac56 // usmmla v22.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80ac9e // usmmla v30.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
+ ".inst 0x4e87accb // usmmla v11.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e87ac53 // usmmla v19.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9b // usmmla v27.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x80]\n"
+ ".inst 0x4e80accf // usmmla v15.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e80ac57 // usmmla v23.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e80ac9f // usmmla v31.4s, v4.16b, v0.16b\n"
+ "ldr q2, [x10, #0x90]\n"
+ ".inst 0x4e87ac28 // usmmla v8.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac70 // usmmla v16.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e87acb8 // usmmla v24.4s, v5.16b, v7.16b\n"
+ "ldr q0, [x10, #0xa0]\n"
+ ".inst 0x4e82ac2c // usmmla v12.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82ac74 // usmmla v20.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82acbc // usmmla v28.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xb0]\n"
+ ".inst 0x4e80ac29 // usmmla v9.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac71 // usmmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acb9 // usmmla v25.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xc0]\n"
+ ".inst 0x4e82ac2d // usmmla v13.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82ac75 // usmmla v21.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82acbd // usmmla v29.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xd0]\n"
+ ".inst 0x4e80ac2a // usmmla v10.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac72 // usmmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acba // usmmla v26.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xe0]\n"
+ ".inst 0x4e82ac2e // usmmla v14.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82ac76 // usmmla v22.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82acbe // usmmla v30.4s, v5.16b, v2.16b\n"
+ "ldr q6, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e80ac2b // usmmla v11.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac73 // usmmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbb // usmmla v27.4s, v5.16b, v0.16b\n"
+ ".inst 0x4e86ac2f // usmmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac77 // usmmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acbf // usmmla v31.4s, v5.16b, v6.16b\n"
+ "168:" // Height 5: Multiply loop: Main loop skip
+ "cbz x27, 175f\n"
+ "cmp x27, #0x8\n"
+ "blt 170f\n"
+ "169:" // Height 5: Multiply loop: Odd block loop
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "cmp x27, #0x8\n"
+ "ldr d0, [x22], #0x8\n"
+ "ldr q1, [x10, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v3.2d, v2.2d\n"
+ "trn1 v2.2d, v0.2d, v5.2d\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e81ac88 // usmmla v8.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81ac70 // usmmla v16.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac58 // usmmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x20]\n"
+ ".inst 0x4e80ac8c // usmmla v12.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac74 // usmmla v20.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5c // usmmla v28.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e81ac89 // usmmla v9.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81ac71 // usmmla v17.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac59 // usmmla v25.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x40]\n"
+ ".inst 0x4e80ac8d // usmmla v13.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac75 // usmmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5d // usmmla v29.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e81ac8a // usmmla v10.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81ac72 // usmmla v18.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5a // usmmla v26.4s, v2.16b, v1.16b\n"
+ "ldr q6, [x10, #0x60]\n"
+ ".inst 0x4e80ac8e // usmmla v14.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac76 // usmmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5e // usmmla v30.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e86ac8b // usmmla v11.4s, v4.16b, v6.16b\n"
+ ".inst 0x4e86ac73 // usmmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86ac5b // usmmla v27.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80ac8f // usmmla v15.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac77 // usmmla v23.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5f // usmmla v31.4s, v2.16b, v0.16b\n"
+ "bge 169b\n"
+ "170:" // Height 5: Multiply loop: Skip odd blocks
+ "cbz x27, 175f\n"
+ "tbz x27, #2, 172f\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x22], #0x4\n"
+ "tbz x27, #1, 171f\n"
+ "ld1 { v1.h }[2], [x26], #0x2\n"
+ "ld1 { v2.h }[2], [x25], #0x2\n"
+ "ld1 { v3.h }[2], [x24], #0x2\n"
+ "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x22], #0x2\n"
+ "tbz x27, #0, 174f\n"
+ "ld1 { v1.b }[6], [x26]\n"
+ "ld1 { v2.b }[6], [x25]\n"
+ "ld1 { v3.b }[6], [x24]\n"
+ "ld1 { v4.b }[6], [x23]\n"
+ "ld1 { v5.b }[6], [x22]\n"
+ "b 174f\n"
+ "171:" // Height 5: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x27, #0, 174f\n"
+ "ld1 { v1.b }[4], [x26]\n"
+ "ld1 { v2.b }[4], [x25]\n"
+ "ld1 { v3.b }[4], [x24]\n"
+ "ld1 { v4.b }[4], [x23]\n"
+ "ld1 { v5.b }[4], [x22]\n"
+ "b 174f\n"
+ "172:" // Height 5: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x27, #1, 173f\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h2, [x25], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x22], #0x2\n"
+ "tbz x27, #0, 174f\n"
+ "ld1 { v1.b }[2], [x26]\n"
+ "ld1 { v2.b }[2], [x25]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x23]\n"
+ "ld1 { v5.b }[2], [x22]\n"
+ "b 174f\n"
+ "173:" // Height 5: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x26, #0x0]\n"
+ "ldr b2, [x25, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x23, #0x0]\n"
+ "ldr b5, [x22, #0x0]\n"
+ "174:" // Height 5: Multiply loop: Ragged operand read: Done
+ "ldr q6, [x10, #0x0]\n"
+ "trn1 v7.2d, v1.2d, v2.2d\n"
+ "trn1 v3.2d, v3.2d, v4.2d\n"
+ "trn1 v2.2d, v5.2d, v0.2d\n"
+ "ldr q1, [x10, #0x10]\n"
+ ".inst 0x4e86ace8 // usmmla v8.4s, v7.16b, v6.16b\n"
+ ".inst 0x4e86ac70 // usmmla v16.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86ac58 // usmmla v24.4s, v2.16b, v6.16b\n"
+ "ldr q0, [x10, #0x20]\n"
+ ".inst 0x4e81acec // usmmla v12.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81ac74 // usmmla v20.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5c // usmmla v28.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x30]\n"
+ ".inst 0x4e80ace9 // usmmla v9.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80ac71 // usmmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac59 // usmmla v25.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x40]\n"
+ ".inst 0x4e81aced // usmmla v13.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81ac75 // usmmla v21.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5d // usmmla v29.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x50]\n"
+ ".inst 0x4e80acea // usmmla v10.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80ac72 // usmmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5a // usmmla v26.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x60]\n"
+ ".inst 0x4e81acee // usmmla v14.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81ac76 // usmmla v22.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5e // usmmla v30.4s, v2.16b, v1.16b\n"
+ "ldr q6, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e80aceb // usmmla v11.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80ac73 // usmmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5b // usmmla v27.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e86acef // usmmla v15.4s, v7.16b, v6.16b\n"
+ ".inst 0x4e86ac77 // usmmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86ac5f // usmmla v31.4s, v2.16b, v6.16b\n"
+ "175:" // Height 5: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 163b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "uzp1 v15.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "uzp1 v24.2d, v24.2d, v28.2d\n"
+ "uzp1 v25.2d, v25.2d, v29.2d\n"
+ "uzp1 v26.2d, v26.2d, v30.2d\n"
+ "uzp1 v27.2d, v27.2d, v31.2d\n"
+ "bge 184f\n"
+ "tbz x11, #3, 179f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "st1 { v25.4s }, [x21], #0x10\n"
+ "tbz x11, #2, 177f\n"
+ "st1 { v13.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v26.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 176f\n"
+ "str d14, [x9], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d27, [x21], #0x8\n"
+ "tbz x11, #0, 183f\n"
+ "st1 { v14.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v27.s }[2], [x21]\n"
+ "b 183f\n"
+ "176:" // Height 5: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 183f\n"
+ "str s14, [x9, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s27, [x21, #0x0]\n"
+ "b 183f\n"
+ "177:" // Height 5: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 178f\n"
+ "str d13, [x9], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d26, [x21], #0x8\n"
+ "tbz x11, #0, 183f\n"
+ "st1 { v13.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v26.s }[2], [x21]\n"
+ "b 183f\n"
+ "178:" // Height 5: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 183f\n"
+ "str s13, [x9, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s26, [x21, #0x0]\n"
+ "b 183f\n"
+ "179:" // Height 5: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 181f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v24.4s }, [x21], #0x10\n"
+ "tbz x11, #1, 180f\n"
+ "str d12, [x9], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d25, [x21], #0x8\n"
+ "tbz x11, #0, 183f\n"
+ "st1 { v12.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v25.s }[2], [x21]\n"
+ "b 183f\n"
+ "180:" // Height 5: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 183f\n"
+ "str s12, [x9, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s25, [x21, #0x0]\n"
+ "b 183f\n"
+ "181:" // Height 5: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 182f\n"
+ "str d7, [x9], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d24, [x21], #0x8\n"
+ "tbz x11, #0, 183f\n"
+ "st1 { v7.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v24.s }[2], [x21]\n"
+ "b 183f\n"
+ "182:" // Height 5: Partial direct writeback: partial_1_0
+ "str s7, [x9, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s24, [x21, #0x0]\n"
+ "183:" // Height 5: Partial direct writeback: Done
+ "b 185f\n"
+ "184:" // Height 5: Full writeback
+ "str q7, [x9, #0x0]\n"
+ "str q12, [x9, #0x10]\n"
+ "str q13, [x9, #0x20]\n"
+ "str q14, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q24, [x21, #0x0]\n"
+ "str q25, [x21, #0x10]\n"
+ "str q26, [x21, #0x20]\n"
+ "str q27, [x21, #0x30]\n"
+ "185:" // Height 5: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 150b\n"
+ "b 224f\n"
+ "186:" // Height 6
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "187:" // Height 6: Column loop
+ "tbz %x[flags], #0, 198f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "bge 196f\n"
+ "tbz x11, #3, 191f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "ld1 { v10.4s }, [x9], #0x10\n"
+ "ld1 { v13.4s }, [x24], #0x10\n"
+ "ld1 { v18.4s }, [x23], #0x10\n"
+ "ld1 { v21.4s }, [x22], #0x10\n"
+ "ld1 { v26.4s }, [x21], #0x10\n"
+ "ld1 { v29.4s }, [x20], #0x10\n"
+ "tbz x11, #2, 189f\n"
+ "ld1 { v11.4s }, [x9], #0x10\n"
+ "ld1 { v14.4s }, [x24], #0x10\n"
+ "ld1 { v19.4s }, [x23], #0x10\n"
+ "ld1 { v22.4s }, [x22], #0x10\n"
+ "ld1 { v27.4s }, [x21], #0x10\n"
+ "ld1 { v30.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 188f\n"
+ "ldr d16, [x9], #0x8\n"
+ "ldr d15, [x24], #0x8\n"
+ "mov x25, #0x38\n"
+ "ldr d24, [x23], #0x8\n"
+ "ldr d23, [x22], #0x8\n"
+ "ldr d6, [x21], #0x8\n"
+ "ldr d31, [x20], #0x8\n"
+ "tbz x11, #0, 195f\n"
+ "ld1 { v16.s }[2], [x9]\n"
+ "ld1 { v15.s }[2], [x24]\n"
+ "ld1 { v24.s }[2], [x23]\n"
+ "ld1 { v23.s }[2], [x22]\n"
+ "ld1 { v6.s }[2], [x21]\n"
+ "ld1 { v31.s }[2], [x20]\n"
+ "b 195f\n"
+ "188:" // Height 6: Partial accumulate: partial_1_12
+ "mov x25, #0x30\n"
+ "tbz x11, #0, 195f\n"
+ "ldr s16, [x9, #0x0]\n"
+ "ldr s15, [x24, #0x0]\n"
+ "ldr s24, [x23, #0x0]\n"
+ "ldr s23, [x22, #0x0]\n"
+ "ldr s6, [x21, #0x0]\n"
+ "ldr s31, [x20, #0x0]\n"
+ "b 195f\n"
+ "189:" // Height 6: Partial accumulate: partial_2_8
+ "tbz x11, #1, 190f\n"
+ "ldr d11, [x9], #0x8\n"
+ "ldr d14, [x24], #0x8\n"
+ "mov x25, #0x28\n"
+ "ldr d19, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "ldr d27, [x21], #0x8\n"
+ "ldr d30, [x20], #0x8\n"
+ "tbz x11, #0, 195f\n"
+ "ld1 { v11.s }[2], [x9]\n"
+ "ld1 { v14.s }[2], [x24]\n"
+ "ld1 { v19.s }[2], [x23]\n"
+ "ld1 { v22.s }[2], [x22]\n"
+ "ld1 { v27.s }[2], [x21]\n"
+ "ld1 { v30.s }[2], [x20]\n"
+ "b 195f\n"
+ "190:" // Height 6: Partial accumulate: partial_1_8
+ "mov x25, #0x20\n"
+ "tbz x11, #0, 195f\n"
+ "ldr s11, [x9, #0x0]\n"
+ "ldr s14, [x24, #0x0]\n"
+ "ldr s19, [x23, #0x0]\n"
+ "ldr s22, [x22, #0x0]\n"
+ "ldr s27, [x21, #0x0]\n"
+ "ldr s30, [x20, #0x0]\n"
+ "b 195f\n"
+ "191:" // Height 6: Partial accumulate: partial_4_0
+ "tbz x11, #2, 193f\n"
+ "ld1 { v9.4s }, [x9], #0x10\n"
+ "ld1 { v12.4s }, [x24], #0x10\n"
+ "ld1 { v17.4s }, [x23], #0x10\n"
+ "ld1 { v20.4s }, [x22], #0x10\n"
+ "ld1 { v25.4s }, [x21], #0x10\n"
+ "ld1 { v28.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 192f\n"
+ "ldr d10, [x9], #0x8\n"
+ "ldr d13, [x24], #0x8\n"
+ "mov x25, #0x18\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d26, [x21], #0x8\n"
+ "ldr d29, [x20], #0x8\n"
+ "tbz x11, #0, 195f\n"
+ "ld1 { v10.s }[2], [x9]\n"
+ "ld1 { v13.s }[2], [x24]\n"
+ "ld1 { v18.s }[2], [x23]\n"
+ "ld1 { v21.s }[2], [x22]\n"
+ "ld1 { v26.s }[2], [x21]\n"
+ "ld1 { v29.s }[2], [x20]\n"
+ "b 195f\n"
+ "192:" // Height 6: Partial accumulate: partial_1_4
+ "mov x25, #0x10\n"
+ "tbz x11, #0, 195f\n"
+ "ldr s10, [x9, #0x0]\n"
+ "ldr s13, [x24, #0x0]\n"
+ "ldr s18, [x23, #0x0]\n"
+ "ldr s21, [x22, #0x0]\n"
+ "ldr s26, [x21, #0x0]\n"
+ "ldr s29, [x20, #0x0]\n"
+ "b 195f\n"
+ "193:" // Height 6: Partial accumulate: partial_2_0
+ "tbz x11, #1, 194f\n"
+ "ldr d9, [x9], #0x8\n"
+ "ldr d12, [x24], #0x8\n"
+ "mov x25, #0x8\n"
+ "ldr d17, [x23], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d25, [x21], #0x8\n"
+ "ldr d28, [x20], #0x8\n"
+ "tbz x11, #0, 195f\n"
+ "ld1 { v9.s }[2], [x9]\n"
+ "ld1 { v12.s }[2], [x24]\n"
+ "ld1 { v17.s }[2], [x23]\n"
+ "ld1 { v20.s }[2], [x22]\n"
+ "ld1 { v25.s }[2], [x21]\n"
+ "ld1 { v28.s }[2], [x20]\n"
+ "b 195f\n"
+ "194:" // Height 6: Partial accumulate: partial_1_0
+ "ldr s9, [x9, #0x0]\n"
+ "ldr s12, [x24, #0x0]\n"
+ "mov x25, #0x0\n"
+ "ldr s17, [x23, #0x0]\n"
+ "ldr s20, [x22, #0x0]\n"
+ "ldr s25, [x21, #0x0]\n"
+ "ldr s28, [x20, #0x0]\n"
+ "195:" // Height 6: Partial accumulate: Done
+ "sub x9, x9, x25\n"
+ "b 197f\n"
+ "196:" // Height 6: full accumulate
+ "ldr q9, [x9, #0x0]\n"
+ "ldr q10, [x9, #0x10]\n"
+ "ldr q11, [x9, #0x20]\n"
+ "ldr q16, [x9, #0x30]\n"
+ "ldr q12, [x24, #0x0]\n"
+ "ldr q13, [x24, #0x10]\n"
+ "ldr q14, [x24, #0x20]\n"
+ "ldr q15, [x24, #0x30]\n"
+ "ldr q17, [x23, #0x0]\n"
+ "ldr q18, [x23, #0x10]\n"
+ "ldr q19, [x23, #0x20]\n"
+ "ldr q24, [x23, #0x30]\n"
+ "ldr q20, [x22, #0x0]\n"
+ "ldr q21, [x22, #0x10]\n"
+ "ldr q22, [x22, #0x20]\n"
+ "ldr q23, [x22, #0x30]\n"
+ "ldr q25, [x21, #0x0]\n"
+ "ldr q26, [x21, #0x10]\n"
+ "ldr q27, [x21, #0x20]\n"
+ "ldr q6, [x21, #0x30]\n"
+ "ldr q28, [x20, #0x0]\n"
+ "ldr q29, [x20, #0x10]\n"
+ "ldr q30, [x20, #0x20]\n"
+ "ldr q31, [x20, #0x30]\n"
+ "197:" // Height 6: MMLA fixup
+ "zip1 v8.2d, v9.2d, v12.2d\n"
+ "zip2 v12.2d, v9.2d, v12.2d\n"
+ "zip1 v9.2d, v10.2d, v13.2d\n"
+ "zip2 v13.2d, v10.2d, v13.2d\n"
+ "zip1 v10.2d, v11.2d, v14.2d\n"
+ "zip2 v14.2d, v11.2d, v14.2d\n"
+ "zip1 v11.2d, v16.2d, v15.2d\n"
+ "zip2 v15.2d, v16.2d, v15.2d\n"
+ "zip1 v16.2d, v17.2d, v20.2d\n"
+ "zip2 v20.2d, v17.2d, v20.2d\n"
+ "zip1 v17.2d, v18.2d, v21.2d\n"
+ "zip2 v21.2d, v18.2d, v21.2d\n"
+ "zip1 v18.2d, v19.2d, v22.2d\n"
+ "zip2 v22.2d, v19.2d, v22.2d\n"
+ "zip1 v19.2d, v24.2d, v23.2d\n"
+ "zip2 v23.2d, v24.2d, v23.2d\n"
+ "zip1 v24.2d, v25.2d, v28.2d\n"
+ "zip2 v28.2d, v25.2d, v28.2d\n"
+ "zip1 v25.2d, v26.2d, v29.2d\n"
+ "zip2 v29.2d, v26.2d, v29.2d\n"
+ "zip1 v26.2d, v27.2d, v30.2d\n"
+ "zip2 v30.2d, v27.2d, v30.2d\n"
+ "zip1 v27.2d, v6.2d, v31.2d\n"
+ "zip2 v31.2d, v6.2d, v31.2d\n"
+ "b 199f\n"
+ "198:" // Height 6: no accumulate
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "movi v12.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ "199:" // Height 6: setup done
+ "mov x28, #0x0\n"
+ "200:" // Height 6: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 201f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x21, [x20, #0x28]\n"
+ "cbnz x28, 202f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "add x21, x21, x20\n"
+ "b 202f\n"
+ "201:" // Height 6: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
+ "202:" // Height 6: input setup done
+ "cmp x27, #0x10\n"
+ "blt 205f\n"
+ "ldr q1, [x26, #0x0]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "cmp x27, #0x20\n"
+ "ldr q3, [x24, #0x0]\n"
+ "ldr q4, [x23, #0x0]\n"
+ "ldr q5, [x22, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
+ "ldr q7, [x10, #0x0]\n"
+ "blt 204f\n"
+ "203:" // Height 6: Multiply loop: Main loop head
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "trn2 v5.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x4e87ac08 // usmmla v8.4s, v0.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x4e87ac50 // usmmla v16.4s, v2.16b, v7.16b\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x4e87ac98 // usmmla v24.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x4e86ac0c // usmmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4e86ac54 // usmmla v20.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86ac9c // usmmla v28.4s, v4.16b, v6.16b\n"
+ "ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ ".inst 0x4e87ac09 // usmmla v9.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac51 // usmmla v17.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac99 // usmmla v25.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x40]\n"
+ ".inst 0x4e86ac0d // usmmla v13.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac55 // usmmla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86ac9d // usmmla v29.4s, v4.16b, v6.16b\n"
+ "ldr q6, [x10, #0x50]\n"
+ ".inst 0x4e87ac0a // usmmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac52 // usmmla v18.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9a // usmmla v26.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x60]\n"
+ ".inst 0x4e86ac0e // usmmla v14.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac56 // usmmla v22.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86ac9e // usmmla v30.4s, v4.16b, v6.16b\n"
+ "ldr q6, [x10, #0x70]\n"
+ ".inst 0x4e87ac0b // usmmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac53 // usmmla v19.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9b // usmmla v27.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x80]\n"
+ ".inst 0x4e86ac0f // usmmla v15.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac57 // usmmla v23.4s, v2.16b, v6.16b\n"
+ "ldr q2, [x25, #0x0]\n"
+ ".inst 0x4e86ac9f // usmmla v31.4s, v4.16b, v6.16b\n"
+ "ldr q0, [x10, #0x90]\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x4e87ac28 // usmmla v8.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac70 // usmmla v16.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e87acb8 // usmmla v24.4s, v5.16b, v7.16b\n"
+ "ldr q6, [x10, #0xa0]\n"
+ ".inst 0x4e80ac2c // usmmla v12.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac74 // usmmla v20.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbc // usmmla v28.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xb0]\n"
+ ".inst 0x4e86ac29 // usmmla v9.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac71 // usmmla v17.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acb9 // usmmla v25.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xc0]\n"
+ ".inst 0x4e80ac2d // usmmla v13.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac75 // usmmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbd // usmmla v29.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xd0]\n"
+ ".inst 0x4e86ac2a // usmmla v10.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac72 // usmmla v18.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acba // usmmla v26.4s, v5.16b, v6.16b\n"
+ "ldr q6, [x10, #0xe0]\n"
+ ".inst 0x4e80ac2e // usmmla v14.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac76 // usmmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbe // usmmla v30.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e86ac2b // usmmla v11.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac73 // usmmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acbb // usmmla v27.4s, v5.16b, v6.16b\n"
+ "ldr q7, [x10, #0x0]\n"
+ ".inst 0x4e80ac2f // usmmla v15.4s, v1.16b, v0.16b\n"
+ "ldr q1, [x26, #0x0]\n"
+ ".inst 0x4e80ac77 // usmmla v23.4s, v3.16b, v0.16b\n"
+ "ldr q3, [x24, #0x0]\n"
+ ".inst 0x4e80acbf // usmmla v31.4s, v5.16b, v0.16b\n"
+ "ldr q5, [x22, #0x0]\n"
+ "ldr q6, [x21, #0x0]\n"
+ "bge 203b\n"
+ "204:" // Height 6: Multiply loop: Single iteration only
+ "trn1 v0.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "trn1 v2.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "trn1 v4.2d, v5.2d, v6.2d\n"
+ "trn2 v5.2d, v5.2d, v6.2d\n"
+ "ldr q6, [x10, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x4e87ac08 // usmmla v8.4s, v0.16b, v7.16b\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x4e87ac50 // usmmla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ ".inst 0x4e87ac98 // usmmla v24.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x20]\n"
+ ".inst 0x4e86ac0c // usmmla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ ".inst 0x4e86ac54 // usmmla v20.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86ac9c // usmmla v28.4s, v4.16b, v6.16b\n"
+ "ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
+ ".inst 0x4e87ac09 // usmmla v9.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac51 // usmmla v17.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac99 // usmmla v25.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x40]\n"
+ ".inst 0x4e86ac0d // usmmla v13.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac55 // usmmla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86ac9d // usmmla v29.4s, v4.16b, v6.16b\n"
+ "ldr q6, [x10, #0x50]\n"
+ ".inst 0x4e87ac0a // usmmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac52 // usmmla v18.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9a // usmmla v26.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x60]\n"
+ ".inst 0x4e86ac0e // usmmla v14.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac56 // usmmla v22.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86ac9e // usmmla v30.4s, v4.16b, v6.16b\n"
+ "ldr q6, [x10, #0x70]\n"
+ ".inst 0x4e87ac0b // usmmla v11.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87ac53 // usmmla v19.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e87ac9b // usmmla v27.4s, v4.16b, v7.16b\n"
+ "ldr q7, [x10, #0x80]\n"
+ ".inst 0x4e86ac0f // usmmla v15.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e86ac57 // usmmla v23.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e86ac9f // usmmla v31.4s, v4.16b, v6.16b\n"
+ "ldr q2, [x10, #0x90]\n"
+ ".inst 0x4e87ac28 // usmmla v8.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e87ac70 // usmmla v16.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e87acb8 // usmmla v24.4s, v5.16b, v7.16b\n"
+ "ldr q0, [x10, #0xa0]\n"
+ ".inst 0x4e82ac2c // usmmla v12.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82ac74 // usmmla v20.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82acbc // usmmla v28.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xb0]\n"
+ ".inst 0x4e80ac29 // usmmla v9.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac71 // usmmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acb9 // usmmla v25.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xc0]\n"
+ ".inst 0x4e82ac2d // usmmla v13.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82ac75 // usmmla v21.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82acbd // usmmla v29.4s, v5.16b, v2.16b\n"
+ "ldr q2, [x10, #0xd0]\n"
+ ".inst 0x4e80ac2a // usmmla v10.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac72 // usmmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acba // usmmla v26.4s, v5.16b, v0.16b\n"
+ "ldr q0, [x10, #0xe0]\n"
+ ".inst 0x4e82ac2e // usmmla v14.4s, v1.16b, v2.16b\n"
+ ".inst 0x4e82ac76 // usmmla v22.4s, v3.16b, v2.16b\n"
+ ".inst 0x4e82acbe // usmmla v30.4s, v5.16b, v2.16b\n"
+ "ldr q6, [x10, #0xf0]\n"
+ "add x10, x10, #0x100\n"
+ ".inst 0x4e80ac2b // usmmla v11.4s, v1.16b, v0.16b\n"
+ ".inst 0x4e80ac73 // usmmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80acbb // usmmla v27.4s, v5.16b, v0.16b\n"
+ ".inst 0x4e86ac2f // usmmla v15.4s, v1.16b, v6.16b\n"
+ ".inst 0x4e86ac77 // usmmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86acbf // usmmla v31.4s, v5.16b, v6.16b\n"
+ "205:" // Height 6: Multiply loop: Main loop skip
+ "cbz x27, 212f\n"
+ "cmp x27, #0x8\n"
+ "blt 207f\n"
+ "206:" // Height 6: Multiply loop: Odd block loop
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr d5, [x24], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
+ "cmp x27, #0x8\n"
+ "ldr d2, [x22], #0x8\n"
+ "ldr d0, [x21], #0x8\n"
+ "ldr q1, [x10, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v5.2d, v3.2d\n"
+ "trn1 v2.2d, v2.2d, v0.2d\n"
+ "ldr q0, [x10, #0x10]\n"
+ ".inst 0x4e81ac88 // usmmla v8.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81ac70 // usmmla v16.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac58 // usmmla v24.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x20]\n"
+ ".inst 0x4e80ac8c // usmmla v12.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac74 // usmmla v20.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5c // usmmla v28.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x30]\n"
+ ".inst 0x4e81ac89 // usmmla v9.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81ac71 // usmmla v17.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac59 // usmmla v25.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x40]\n"
+ ".inst 0x4e80ac8d // usmmla v13.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac75 // usmmla v21.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5d // usmmla v29.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x50]\n"
+ ".inst 0x4e81ac8a // usmmla v10.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e81ac72 // usmmla v18.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5a // usmmla v26.4s, v2.16b, v1.16b\n"
+ "ldr q6, [x10, #0x60]\n"
+ ".inst 0x4e80ac8e // usmmla v14.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac76 // usmmla v22.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5e // usmmla v30.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e86ac8b // usmmla v11.4s, v4.16b, v6.16b\n"
+ ".inst 0x4e86ac73 // usmmla v19.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86ac5b // usmmla v27.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e80ac8f // usmmla v15.4s, v4.16b, v0.16b\n"
+ ".inst 0x4e80ac77 // usmmla v23.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5f // usmmla v31.4s, v2.16b, v0.16b\n"
+ "bge 206b\n"
+ "207:" // Height 6: Multiply loop: Skip odd blocks
+ "cbz x27, 212f\n"
+ "tbz x27, #2, 209f\n"
+ "ldr s1, [x26], #0x4\n"
+ "ldr s2, [x25], #0x4\n"
+ "ldr s3, [x24], #0x4\n"
+ "ldr s4, [x23], #0x4\n"
+ "ldr s5, [x22], #0x4\n"
+ "ldr s6, [x21], #0x4\n"
+ "tbz x27, #1, 208f\n"
+ "ld1 { v1.h }[2], [x26], #0x2\n"
+ "ld1 { v2.h }[2], [x25], #0x2\n"
+ "ld1 { v3.h }[2], [x24], #0x2\n"
+ "ld1 { v4.h }[2], [x23], #0x2\n"
+ "ld1 { v5.h }[2], [x22], #0x2\n"
+ "ld1 { v6.h }[2], [x21], #0x2\n"
+ "tbz x27, #0, 211f\n"
+ "ld1 { v1.b }[6], [x26]\n"
+ "ld1 { v2.b }[6], [x25]\n"
+ "ld1 { v3.b }[6], [x24]\n"
+ "ld1 { v4.b }[6], [x23]\n"
+ "ld1 { v5.b }[6], [x22]\n"
+ "ld1 { v6.b }[6], [x21]\n"
+ "b 211f\n"
+ "208:" // Height 6: Multiply loop: Ragged operand read: partial_1_4
+ "tbz x27, #0, 211f\n"
+ "ld1 { v1.b }[4], [x26]\n"
+ "ld1 { v2.b }[4], [x25]\n"
+ "ld1 { v3.b }[4], [x24]\n"
+ "ld1 { v4.b }[4], [x23]\n"
+ "ld1 { v5.b }[4], [x22]\n"
+ "ld1 { v6.b }[4], [x21]\n"
+ "b 211f\n"
+ "209:" // Height 6: Multiply loop: Ragged operand read: partial_2_0
+ "tbz x27, #1, 210f\n"
+ "ldr h1, [x26], #0x2\n"
+ "ldr h2, [x25], #0x2\n"
+ "ldr h3, [x24], #0x2\n"
+ "ldr h4, [x23], #0x2\n"
+ "ldr h5, [x22], #0x2\n"
+ "ldr h6, [x21], #0x2\n"
+ "tbz x27, #0, 211f\n"
+ "ld1 { v1.b }[2], [x26]\n"
+ "ld1 { v2.b }[2], [x25]\n"
+ "ld1 { v3.b }[2], [x24]\n"
+ "ld1 { v4.b }[2], [x23]\n"
+ "ld1 { v5.b }[2], [x22]\n"
+ "ld1 { v6.b }[2], [x21]\n"
+ "b 211f\n"
+ "210:" // Height 6: Multiply loop: Ragged operand read: partial_1_0
+ "ldr b1, [x26, #0x0]\n"
+ "ldr b2, [x25, #0x0]\n"
+ "ldr b3, [x24, #0x0]\n"
+ "ldr b4, [x23, #0x0]\n"
+ "ldr b5, [x22, #0x0]\n"
+ "ldr b6, [x21, #0x0]\n"
+ "211:" // Height 6: Multiply loop: Ragged operand read: Done
+ "ldr q0, [x10, #0x0]\n"
+ "trn1 v7.2d, v1.2d, v2.2d\n"
+ "trn1 v3.2d, v3.2d, v4.2d\n"
+ "trn1 v2.2d, v5.2d, v6.2d\n"
+ "ldr q1, [x10, #0x10]\n"
+ ".inst 0x4e80ace8 // usmmla v8.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80ac70 // usmmla v16.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac58 // usmmla v24.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x20]\n"
+ ".inst 0x4e81acec // usmmla v12.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81ac74 // usmmla v20.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5c // usmmla v28.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x30]\n"
+ ".inst 0x4e80ace9 // usmmla v9.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80ac71 // usmmla v17.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac59 // usmmla v25.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x40]\n"
+ ".inst 0x4e81aced // usmmla v13.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81ac75 // usmmla v21.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5d // usmmla v29.4s, v2.16b, v1.16b\n"
+ "ldr q1, [x10, #0x50]\n"
+ ".inst 0x4e80acea // usmmla v10.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80ac72 // usmmla v18.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5a // usmmla v26.4s, v2.16b, v0.16b\n"
+ "ldr q0, [x10, #0x60]\n"
+ ".inst 0x4e81acee // usmmla v14.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e81ac76 // usmmla v22.4s, v3.16b, v1.16b\n"
+ ".inst 0x4e81ac5e // usmmla v30.4s, v2.16b, v1.16b\n"
+ "ldr q6, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
+ ".inst 0x4e80aceb // usmmla v11.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e80ac73 // usmmla v19.4s, v3.16b, v0.16b\n"
+ ".inst 0x4e80ac5b // usmmla v27.4s, v2.16b, v0.16b\n"
+ ".inst 0x4e86acef // usmmla v15.4s, v7.16b, v6.16b\n"
+ ".inst 0x4e86ac77 // usmmla v23.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e86ac5f // usmmla v31.4s, v2.16b, v6.16b\n"
+ "212:" // Height 6: Multiply loop: No odd multiplies
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 200b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
+ "uzp2 v8.2d, v8.2d, v12.2d\n"
+ "uzp1 v12.2d, v9.2d, v13.2d\n"
+ "uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
+ "uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "uzp1 v14.2d, v11.2d, v15.2d\n"
+ "uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "uzp1 v15.2d, v16.2d, v20.2d\n"
+ "uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x20, x21, x20, LSL #2\n"
+ "uzp1 v20.2d, v17.2d, v21.2d\n"
+ "uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "uzp1 v21.2d, v18.2d, v22.2d\n"
+ "uzp2 v18.2d, v18.2d, v22.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
+ "uzp1 v22.2d, v19.2d, v23.2d\n"
+ "uzp2 v19.2d, v19.2d, v23.2d\n"
+ "uzp1 v23.2d, v24.2d, v28.2d\n"
+ "uzp2 v24.2d, v24.2d, v28.2d\n"
+ "uzp1 v28.2d, v25.2d, v29.2d\n"
+ "uzp2 v25.2d, v25.2d, v29.2d\n"
+ "uzp1 v29.2d, v26.2d, v30.2d\n"
+ "uzp2 v26.2d, v26.2d, v30.2d\n"
+ "uzp1 v30.2d, v27.2d, v31.2d\n"
+ "uzp2 v27.2d, v27.2d, v31.2d\n"
+ "bge 221f\n"
+ "tbz x11, #3, 216f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v12.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v9.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v20.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v17.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v28.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "st1 { v25.4s }, [x20], #0x10\n"
+ "tbz x11, #2, 214f\n"
+ "st1 { v13.4s }, [x9], #0x10\n"
+ "st1 { v10.4s }, [x24], #0x10\n"
+ "st1 { v21.4s }, [x23], #0x10\n"
+ "st1 { v18.4s }, [x22], #0x10\n"
+ "st1 { v29.4s }, [x21], #0x10\n"
+ "st1 { v26.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 213f\n"
+ "str d14, [x9], #0x8\n"
+ "str d11, [x24], #0x8\n"
+ "str d22, [x23], #0x8\n"
+ "str d19, [x22], #0x8\n"
+ "str d30, [x21], #0x8\n"
+ "str d27, [x20], #0x8\n"
+ "tbz x11, #0, 220f\n"
+ "st1 { v14.s }[2], [x9]\n"
+ "st1 { v11.s }[2], [x24]\n"
+ "st1 { v22.s }[2], [x23]\n"
+ "st1 { v19.s }[2], [x22]\n"
+ "st1 { v30.s }[2], [x21]\n"
+ "st1 { v27.s }[2], [x20]\n"
+ "b 220f\n"
+ "213:" // Height 6: Partial direct writeback: partial_1_12
+ "tbz x11, #0, 220f\n"
+ "str s14, [x9, #0x0]\n"
+ "str s11, [x24, #0x0]\n"
+ "str s22, [x23, #0x0]\n"
+ "str s19, [x22, #0x0]\n"
+ "str s30, [x21, #0x0]\n"
+ "str s27, [x20, #0x0]\n"
+ "b 220f\n"
+ "214:" // Height 6: Partial direct writeback: partial_2_8
+ "tbz x11, #1, 215f\n"
+ "str d13, [x9], #0x8\n"
+ "str d10, [x24], #0x8\n"
+ "str d21, [x23], #0x8\n"
+ "str d18, [x22], #0x8\n"
+ "str d29, [x21], #0x8\n"
+ "str d26, [x20], #0x8\n"
+ "tbz x11, #0, 220f\n"
+ "st1 { v13.s }[2], [x9]\n"
+ "st1 { v10.s }[2], [x24]\n"
+ "st1 { v21.s }[2], [x23]\n"
+ "st1 { v18.s }[2], [x22]\n"
+ "st1 { v29.s }[2], [x21]\n"
+ "st1 { v26.s }[2], [x20]\n"
+ "b 220f\n"
+ "215:" // Height 6: Partial direct writeback: partial_1_8
+ "tbz x11, #0, 220f\n"
+ "str s13, [x9, #0x0]\n"
+ "str s10, [x24, #0x0]\n"
+ "str s21, [x23, #0x0]\n"
+ "str s18, [x22, #0x0]\n"
+ "str s29, [x21, #0x0]\n"
+ "str s26, [x20, #0x0]\n"
+ "b 220f\n"
+ "216:" // Height 6: Partial direct writeback: partial_4_0
+ "tbz x11, #2, 218f\n"
+ "st1 { v7.4s }, [x9], #0x10\n"
+ "st1 { v8.4s }, [x24], #0x10\n"
+ "st1 { v15.4s }, [x23], #0x10\n"
+ "st1 { v16.4s }, [x22], #0x10\n"
+ "st1 { v23.4s }, [x21], #0x10\n"
+ "st1 { v24.4s }, [x20], #0x10\n"
+ "tbz x11, #1, 217f\n"
+ "str d12, [x9], #0x8\n"
+ "str d9, [x24], #0x8\n"
+ "str d20, [x23], #0x8\n"
+ "str d17, [x22], #0x8\n"
+ "str d28, [x21], #0x8\n"
+ "str d25, [x20], #0x8\n"
+ "tbz x11, #0, 220f\n"
+ "st1 { v12.s }[2], [x9]\n"
+ "st1 { v9.s }[2], [x24]\n"
+ "st1 { v20.s }[2], [x23]\n"
+ "st1 { v17.s }[2], [x22]\n"
+ "st1 { v28.s }[2], [x21]\n"
+ "st1 { v25.s }[2], [x20]\n"
+ "b 220f\n"
+ "217:" // Height 6: Partial direct writeback: partial_1_4
+ "tbz x11, #0, 220f\n"
+ "str s12, [x9, #0x0]\n"
+ "str s9, [x24, #0x0]\n"
+ "str s20, [x23, #0x0]\n"
+ "str s17, [x22, #0x0]\n"
+ "str s28, [x21, #0x0]\n"
+ "str s25, [x20, #0x0]\n"
+ "b 220f\n"
+ "218:" // Height 6: Partial direct writeback: partial_2_0
+ "tbz x11, #1, 219f\n"
+ "str d7, [x9], #0x8\n"
+ "str d8, [x24], #0x8\n"
+ "str d15, [x23], #0x8\n"
+ "str d16, [x22], #0x8\n"
+ "str d23, [x21], #0x8\n"
+ "str d24, [x20], #0x8\n"
+ "tbz x11, #0, 220f\n"
+ "st1 { v7.s }[2], [x9]\n"
+ "st1 { v8.s }[2], [x24]\n"
+ "st1 { v15.s }[2], [x23]\n"
+ "st1 { v16.s }[2], [x22]\n"
+ "st1 { v23.s }[2], [x21]\n"
+ "st1 { v24.s }[2], [x20]\n"
+ "b 220f\n"
+ "219:" // Height 6: Partial direct writeback: partial_1_0
+ "str s7, [x9, #0x0]\n"
+ "str s8, [x24, #0x0]\n"
+ "str s15, [x23, #0x0]\n"
+ "str s16, [x22, #0x0]\n"
+ "str s23, [x21, #0x0]\n"
+ "str s24, [x20, #0x0]\n"
+ "220:" // Height 6: Partial direct writeback: Done
+ "b 222f\n"
+ "221:" // Height 6: Full writeback
+ "str q7, [x9, #0x0]\n"
+ "str q12, [x9, #0x10]\n"
+ "str q13, [x9, #0x20]\n"
+ "str q14, [x9, #0x30]\n"
+ "add x9, x9, #0x40\n"
+ "str q8, [x24, #0x0]\n"
+ "str q9, [x24, #0x10]\n"
+ "str q10, [x24, #0x20]\n"
+ "str q11, [x24, #0x30]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q16, [x22, #0x0]\n"
+ "str q17, [x22, #0x10]\n"
+ "str q18, [x22, #0x20]\n"
+ "str q19, [x22, #0x30]\n"
+ "str q23, [x21, #0x0]\n"
+ "str q28, [x21, #0x10]\n"
+ "str q29, [x21, #0x20]\n"
+ "str q30, [x21, #0x30]\n"
+ "str q24, [x20, #0x0]\n"
+ "str q25, [x20, #0x10]\n"
+ "str q26, [x20, #0x20]\n"
+ "str q27, [x20, #0x30]\n"
+ "222:" // Height 6: Writeback done
+ "subs x11, x11, #0x10\n"
+ "bgt 187b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 224f\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 223f\n"
+ "add x21, x21, #0x6\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "223:" // Update direct input
+ "mov x20, #0x6\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "b 1b\n"
+ "224:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp
index 38bb7c646d..4bb4c31577 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp
index 7f0fad7fa7..4ec75191b9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -87,72 +87,72 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"bgt 69f\n"
"beq 35f\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
"cmp x8, #0x10\n"
"bge 11f\n"
"tbz x8, #3, 6f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"tbz x8, #2, 4f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"tbz x8, #1, 3f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"tbz x8, #0, 10f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"b 10f\n"
"3:" // Height 1: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 10f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"b 10f\n"
"4:" // Height 1: Partial accumulate: partial_2_8
"tbz x8, #1, 5f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"tbz x8, #0, 10f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"b 10f\n"
"5:" // Height 1: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 10f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"b 10f\n"
"6:" // Height 1: Partial accumulate: partial_4_0
"tbz x8, #2, 8f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"tbz x8, #1, 7f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"tbz x8, #0, 10f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"b 10f\n"
"7:" // Height 1: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 10f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"b 10f\n"
"8:" // Height 1: Partial accumulate: partial_2_0
"tbz x8, #1, 9f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"tbz x8, #0, 10f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"b 10f\n"
"9:" // Height 1: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"10:" // Height 1: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 13f\n"
"11:" // Height 1: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"b 13f\n"
"12:" // Height 1: no accumulate
"movi v8.4s, #0x0\n"
@@ -163,8 +163,8 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"mov x15, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -180,118 +180,118 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"blt 19f\n"
"ldr q0, [x13, #0x0]\n"
"cmp x14, #0x20\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 18f\n"
"17:" // Height 1: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr d17, [x16, #0x20]\n"
- "ldr x20, [x16, #0x28]\n"
+ "ldr d17, [x17, #0x20]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr d16, [x16, #0x30]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr d16, [x17, #0x30]\n"
+ "add x13, x13, #0x10\n"
+ "ldr x20, [x17, #0x38]\n"
+ "sub x14, x14, #0x10\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "ldr x22, [x13, #0x8]\n"
+ "cmp x14, #0x20\n"
"mov v16.d[1], x20\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr d17, [x16, #0x40]\n"
- "ldr x20, [x16, #0x48]\n"
+ "ldr d17, [x17, #0x40]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr d16, [x16, #0x50]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr d16, [x17, #0x50]\n"
+ "ldr x20, [x17, #0x58]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v16.d[1], x20\n"
".inst 0x6fa0e228 // udot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr d17, [x16, #0x60]\n"
- "ldr x20, [x16, #0x68]\n"
+ "ldr d17, [x17, #0x60]\n"
".inst 0x6fa0e209 // udot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr d16, [x16, #0x70]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr d16, [x17, #0x70]\n"
+ "ldr x20, [x17, #0x78]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
"mov v16.d[1], x20\n"
".inst 0x6fa0e22a // udot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr d17, [x16, #0x80]\n"
- "ldr x20, [x16, #0x88]\n"
+ "ldr d17, [x17, #0x80]\n"
".inst 0x6fa0e20b // udot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr d16, [x16, #0x90]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x98]\n"
+ "ldr d16, [x17, #0x90]\n"
+ "ldr x20, [x17, #0x98]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v16.d[1], x20\n"
".inst 0x6f80ea28 // udot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr d17, [x16, #0xa0]\n"
- "ldr x20, [x16, #0xa8]\n"
+ "ldr d17, [x17, #0xa0]\n"
".inst 0x6f80ea09 // udot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr d16, [x16, #0xb0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ldr d16, [x17, #0xb0]\n"
+ "ldr x20, [x17, #0xb8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v16.d[1], x20\n"
".inst 0x6f80ea2a // udot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr d17, [x16, #0xc0]\n"
- "ldr x20, [x16, #0xc8]\n"
+ "ldr d17, [x17, #0xc0]\n"
".inst 0x6f80ea0b // udot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr d16, [x16, #0xd0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xd8]\n"
+ "ldr d16, [x17, #0xd0]\n"
+ "ldr x20, [x17, #0xd8]\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v16.d[1], x20\n"
".inst 0x6fa0ea28 // udot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr d17, [x16, #0xe0]\n"
- "ldr x20, [x16, #0xe8]\n"
+ "ldr d17, [x17, #0xe0]\n"
".inst 0x6fa0ea09 // udot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr d16, [x16, #0xf0]\n"
- "mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xf8]\n"
+ "ldr d16, [x17, #0xf0]\n"
+ "ldr x20, [x17, #0xf8]\n"
+ "add x17, x17, #0x100\n"
+ "mov v17.d[1], x21\n"
+ "ldr x21, [x17, #0x8]\n"
"mov v16.d[1], x20\n"
- "add x13, x13, #0x10\n"
- "add x16, x16, #0x100\n"
".inst 0x6fa0ea2a // udot v10.4s, v17.16b, v0.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
- "ldr x20, [x16, #0x8]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x6fa0ea0b // udot v11.4s, v16.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x21, [x13, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v0.d[1], x21\n"
+ "ldr d7, [x17, #0x10]\n"
+ "ldr x20, [x17, #0x18]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x13, #0x80]\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
+ "add x13, x13, #0x10\n"
+ "sub x14, x14, #0x10\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
- "ldr q17, [x16, #0x40]\n"
+ "ldr q17, [x17, #0x40]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x50]\n"
+ "ldr q16, [x17, #0x50]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6fa0e228 // udot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x16, #0x60]\n"
+ "ldr q17, [x17, #0x60]\n"
".inst 0x6fa0e209 // udot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x16, #0x70]\n"
+ "ldr q16, [x17, #0x70]\n"
".inst 0x6fa0e22a // udot v10.4s, v17.16b, v0.4b[1]\n"
- "ldr q17, [x16, #0x80]\n"
+ "ldr q17, [x17, #0x80]\n"
".inst 0x6fa0e20b // udot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr q16, [x16, #0x90]\n"
+ "ldr q16, [x17, #0x90]\n"
".inst 0x6f80ea28 // udot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x16, #0xa0]\n"
+ "ldr q17, [x17, #0xa0]\n"
".inst 0x6f80ea09 // udot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x16, #0xb0]\n"
+ "ldr q16, [x17, #0xb0]\n"
".inst 0x6f80ea2a // udot v10.4s, v17.16b, v0.4b[2]\n"
- "ldr q17, [x16, #0xc0]\n"
+ "ldr q17, [x17, #0xc0]\n"
".inst 0x6f80ea0b // udot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr q16, [x16, #0xd0]\n"
+ "ldr q16, [x17, #0xd0]\n"
".inst 0x6fa0ea28 // udot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr q17, [x16, #0xe0]\n"
+ "ldr q17, [x17, #0xe0]\n"
".inst 0x6fa0ea09 // udot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr q16, [x16, #0xf0]\n"
- "add x13, x13, #0x10\n"
- "sub x14, x14, #0x10\n"
+ "ldr q16, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa0ea2a // udot v10.4s, v17.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6fa0ea0b // udot v11.4s, v16.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
"19:" // Height 1: Multiply loop: Main loop skip
"cbz x14, 24f\n"
"cmp x14, #0x4\n"
@@ -299,16 +299,16 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"20:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x13], #0x4\n"
"sub x14, x14, #0x4\n"
- "ldr q16, [x16, #0x0]\n"
- ".inst 0x6f92e208 // udot v8.4s, v16.16b, v18.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
- ".inst 0x6f92e209 // udot v9.4s, v16.16b, v18.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x0]\n"
"cmp x14, #0x4\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x10]\n"
+ ".inst 0x6f92e228 // udot v8.4s, v17.16b, v18.4b[0]\n"
+ "ldr q17, [x17, #0x20]\n"
+ ".inst 0x6f92e209 // udot v9.4s, v16.16b, v18.4b[0]\n"
+ "ldr q16, [x17, #0x30]\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f92e22a // udot v10.4s, v17.16b, v18.4b[0]\n"
".inst 0x6f92e20b // udot v11.4s, v16.16b, v18.4b[0]\n"
- "add x16, x16, #0x40\n"
"bge 20b\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
"cbz x14, 24f\n"
@@ -320,165 +320,165 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"22:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b0, [x13, #0x0]\n"
"23:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q16, [x16, #0x0]\n"
- ".inst 0x6f80e208 // udot v8.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
+ "ldr q17, [x17, #0x0]\n"
+ "ldr q16, [x17, #0x10]\n"
+ ".inst 0x6f80e228 // udot v8.4s, v17.16b, v0.4b[0]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x6f80e209 // udot v9.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x20]\n"
- ".inst 0x6f80e20a // udot v10.4s, v16.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
+ "add x17, x17, #0x40\n"
+ ".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x15, x15, #0x1\n"
"cmp x15, x20\n"
"bne 14b\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
"bge 33f\n"
"tbz x8, #3, 28f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"tbz x8, #2, 26f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"tbz x8, #1, 25f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"b 32f\n"
"25:" // Height 1: Partial direct writeback: partial_1_12
"tbz x8, #0, 32f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"b 32f\n"
"26:" // Height 1: Partial direct writeback: partial_2_8
"tbz x8, #1, 27f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"b 32f\n"
"27:" // Height 1: Partial direct writeback: partial_1_8
"tbz x8, #0, 32f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"b 32f\n"
"28:" // Height 1: Partial direct writeback: partial_4_0
"tbz x8, #2, 30f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"tbz x8, #1, 29f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"b 32f\n"
"29:" // Height 1: Partial direct writeback: partial_1_4
"tbz x8, #0, 32f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"b 32f\n"
"30:" // Height 1: Partial direct writeback: partial_2_0
"tbz x8, #1, 31f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"tbz x8, #0, 32f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"b 32f\n"
"31:" // Height 1: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"32:" // Height 1: Partial direct writeback: Done
"b 34f\n"
"33:" // Height 1: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"34:" // Height 1: Writeback done
"subs x8, x8, #0x10\n"
"bgt 2b\n"
"b 206f\n"
"35:" // Height 2
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"cmp x8, #0x10\n"
- "add x24, x17, x20, LSL #2\n"
+ "add x24, x16, x20, LSL #2\n"
"bge 45f\n"
"tbz x8, #3, 40f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"tbz x8, #2, 38f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"tbz x8, #1, 37f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"b 44f\n"
"37:" // Height 2: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 44f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"b 44f\n"
"38:" // Height 2: Partial accumulate: partial_2_8
"tbz x8, #1, 39f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"b 44f\n"
"39:" // Height 2: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 44f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"b 44f\n"
"40:" // Height 2: Partial accumulate: partial_4_0
"tbz x8, #2, 42f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"tbz x8, #1, 41f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"b 44f\n"
"41:" // Height 2: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 44f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"b 44f\n"
"42:" // Height 2: Partial accumulate: partial_2_0
"tbz x8, #1, 43f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"tbz x8, #0, 44f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"b 44f\n"
"43:" // Height 2: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"44:" // Height 2: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 47f\n"
"45:" // Height 2: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -497,8 +497,8 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"mov x15, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -518,154 +518,154 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr q0, [x13, #0x0]\n"
"cmp x14, #0x20\n"
"ldr q1, [x12, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 52f\n"
"51:" // Height 2: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr d17, [x16, #0x20]\n"
+ "ldr d17, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr d16, [x16, #0x30]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x17, #0x30]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x48]\n"
+ "add x13, x13, #0x10\n"
+ "add x12, x12, #0x10\n"
+ "mov v16.d[1], x21\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
- "mov v16.d[1], x20\n"
".inst 0x6f81e22e // udot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr d17, [x16, #0x40]\n"
+ "ldr d17, [x17, #0x40]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
- "ldr x20, [x16, #0x48]\n"
+ "ldr x21, [x17, #0x58]\n"
".inst 0x6f81e20f // udot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr d16, [x16, #0x50]\n"
+ "ldr d16, [x17, #0x50]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x58]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0x68]\n"
+ "ldr x23, [x13, #0x8]\n"
+ "sub x14, x14, #0x10\n"
+ "mov v16.d[1], x21\n"
".inst 0x6fa0e228 // udot v8.4s, v17.16b, v0.4b[1]\n"
- "ldr x21, [x16, #0x68]\n"
".inst 0x6fa1e22c // udot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr d17, [x16, #0x60]\n"
+ "ldr d17, [x17, #0x60]\n"
".inst 0x6fa0e209 // udot v9.4s, v16.16b, v0.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x21, [x17, #0x78]\n"
".inst 0x6fa1e20d // udot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr d16, [x16, #0x70]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x17, #0x70]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0x88]\n"
+ "ldr x22, [x12, #0x8]\n"
+ "cmp x14, #0x20\n"
+ "mov v16.d[1], x21\n"
".inst 0x6fa0e22a // udot v10.4s, v17.16b, v0.4b[1]\n"
- "mov v16.d[1], x20\n"
".inst 0x6fa1e22e // udot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr d17, [x16, #0x80]\n"
+ "ldr d17, [x17, #0x80]\n"
".inst 0x6fa0e20b // udot v11.4s, v16.16b, v0.4b[1]\n"
- "ldr x20, [x16, #0x88]\n"
+ "ldr x21, [x17, #0x98]\n"
".inst 0x6fa1e20f // udot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr d16, [x16, #0x90]\n"
+ "ldr d16, [x17, #0x90]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x16, #0x98]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xa8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
+ "mov v16.d[1], x21\n"
".inst 0x6f80ea28 // udot v8.4s, v17.16b, v0.4b[2]\n"
- "ldr x21, [x16, #0xa8]\n"
".inst 0x6f81ea2c // udot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr d17, [x16, #0xa0]\n"
+ "ldr d17, [x17, #0xa0]\n"
".inst 0x6f80ea09 // udot v9.4s, v16.16b, v0.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ldr x21, [x17, #0xb8]\n"
".inst 0x6f81ea0d // udot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr d16, [x16, #0xb0]\n"
- "mov v17.d[1], x21\n"
+ "ldr d16, [x17, #0xb0]\n"
+ "mov v17.d[1], x20\n"
+ "ldr x20, [x17, #0xc8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v16.d[1], x21\n"
".inst 0x6f80ea2a // udot v10.4s, v17.16b, v0.4b[2]\n"
- "mov v16.d[1], x20\n"
".inst 0x6f81ea2e // udot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr d17, [x16, #0xc0]\n"
+ "ldr d17, [x17, #0xc0]\n"
".inst 0x6f80ea0b // udot v11.4s, v16.16b, v0.4b[2]\n"
- "ldr x20, [x16, #0xc8]\n"
+ "ldr x21, [x17, #0xd8]\n"
".inst 0x6f81ea0f // udot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr d16, [x16, #0xd0]\n"
+ "ldr d16, [x17, #0xd0]\n"
"mov v17.d[1], x20\n"
- "ldr x20, [x16, #0xd8]\n"
- "mov v16.d[1], x20\n"
+ "ldr x20, [x17, #0xe8]\n"
+ "mov v16.d[1], x21\n"
".inst 0x6fa0ea28 // udot v8.4s, v17.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x6fa1ea2c // udot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr d17, [x16, #0xe0]\n"
+ "ldr d17, [x17, #0xe0]\n"
".inst 0x6fa0ea09 // udot v9.4s, v16.16b, v0.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "ldr x21, [x17, #0xf8]\n"
".inst 0x6fa1ea0d // udot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr d16, [x16, #0xf0]\n"
- "mov v17.d[1], x21\n"
- "add x13, x13, #0x10\n"
- "mov v16.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "add x16, x16, #0x100\n"
+ "ldr d16, [x17, #0xf0]\n"
+ "mov v17.d[1], x20\n"
+ "add x17, x17, #0x100\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v16.d[1], x21\n"
".inst 0x6fa0ea2a // udot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x6fa1ea2e // udot v14.4s, v17.16b, v1.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
- "ldr x21, [x16, #0x8]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x6fa0ea0b // udot v11.4s, v16.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x6fa1ea0f // udot v15.4s, v16.16b, v1.4b[3]\n"
"ldr d1, [x12, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x20, [x13, #0x8]\n"
- "mov v6.d[1], x21\n"
- "ldr x21, [x12, #0x8]\n"
- "mov v0.d[1], x20\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v1.d[1], x21\n"
- "prfm pldl1keep, [x13, #0x80]\n"
+ "ldr d7, [x17, #0x10]\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x17, #0x18]\n"
+ "mov v0.d[1], x23\n"
+ "mov v1.d[1], x22\n"
"mov v7.d[1], x20\n"
- "prfm pldl1keep, [x12, #0x80]\n"
"bge 51b\n"
"52:" // Height 2: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
"sub x14, x14, #0x10\n"
".inst 0x6f81e22e // udot v14.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x16, #0x40]\n"
+ "ldr q17, [x17, #0x40]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6f81e20f // udot v15.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x16, #0x50]\n"
+ "ldr q16, [x17, #0x50]\n"
".inst 0x6fa0e228 // udot v8.4s, v17.16b, v0.4b[1]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6fa1e22c // udot v12.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x16, #0x60]\n"
+ "ldr q17, [x17, #0x60]\n"
".inst 0x6fa0e209 // udot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x6fa1e20d // udot v13.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x16, #0x70]\n"
+ "ldr q16, [x17, #0x70]\n"
".inst 0x6fa0e22a // udot v10.4s, v17.16b, v0.4b[1]\n"
".inst 0x6fa1e22e // udot v14.4s, v17.16b, v1.4b[1]\n"
- "ldr q17, [x16, #0x80]\n"
+ "ldr q17, [x17, #0x80]\n"
".inst 0x6fa0e20b // udot v11.4s, v16.16b, v0.4b[1]\n"
".inst 0x6fa1e20f // udot v15.4s, v16.16b, v1.4b[1]\n"
- "ldr q16, [x16, #0x90]\n"
+ "ldr q16, [x17, #0x90]\n"
".inst 0x6f80ea28 // udot v8.4s, v17.16b, v0.4b[2]\n"
".inst 0x6f81ea2c // udot v12.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x16, #0xa0]\n"
+ "ldr q17, [x17, #0xa0]\n"
".inst 0x6f80ea09 // udot v9.4s, v16.16b, v0.4b[2]\n"
".inst 0x6f81ea0d // udot v13.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x16, #0xb0]\n"
+ "ldr q16, [x17, #0xb0]\n"
".inst 0x6f80ea2a // udot v10.4s, v17.16b, v0.4b[2]\n"
".inst 0x6f81ea2e // udot v14.4s, v17.16b, v1.4b[2]\n"
- "ldr q17, [x16, #0xc0]\n"
+ "ldr q17, [x17, #0xc0]\n"
".inst 0x6f80ea0b // udot v11.4s, v16.16b, v0.4b[2]\n"
".inst 0x6f81ea0f // udot v15.4s, v16.16b, v1.4b[2]\n"
- "ldr q16, [x16, #0xd0]\n"
+ "ldr q16, [x17, #0xd0]\n"
".inst 0x6fa0ea28 // udot v8.4s, v17.16b, v0.4b[3]\n"
".inst 0x6fa1ea2c // udot v12.4s, v17.16b, v1.4b[3]\n"
- "ldr q17, [x16, #0xe0]\n"
+ "ldr q17, [x17, #0xe0]\n"
".inst 0x6fa0ea09 // udot v9.4s, v16.16b, v0.4b[3]\n"
".inst 0x6fa1ea0d // udot v13.4s, v16.16b, v1.4b[3]\n"
- "ldr q16, [x16, #0xf0]\n"
+ "ldr q16, [x17, #0xf0]\n"
".inst 0x6fa0ea2a // udot v10.4s, v17.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa1ea2e // udot v14.4s, v17.16b, v1.4b[3]\n"
".inst 0x6fa0ea0b // udot v11.4s, v16.16b, v0.4b[3]\n"
".inst 0x6fa1ea0f // udot v15.4s, v16.16b, v1.4b[3]\n"
@@ -678,16 +678,16 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"sub x14, x14, #0x4\n"
"ldr s18, [x12], #0x4\n"
"cmp x14, #0x4\n"
- "ldr q17, [x16, #0x0]\n"
+ "ldr q17, [x17, #0x0]\n"
+ "ldr q16, [x17, #0x10]\n"
".inst 0x6f93e228 // udot v8.4s, v17.16b, v19.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
".inst 0x6f92e22c // udot v12.4s, v17.16b, v18.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x6f93e209 // udot v9.4s, v16.16b, v19.4b[0]\n"
".inst 0x6f92e20d // udot v13.4s, v16.16b, v18.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
".inst 0x6f93e22a // udot v10.4s, v17.16b, v19.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f92e22e // udot v14.4s, v17.16b, v18.4b[0]\n"
".inst 0x6f93e20b // udot v11.4s, v16.16b, v19.4b[0]\n"
".inst 0x6f92e20f // udot v15.4s, v16.16b, v18.4b[0]\n"
@@ -705,16 +705,16 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr b0, [x13, #0x0]\n"
"ldr b1, [x12, #0x0]\n"
"57:" // Height 2: Multiply loop: Ragged operand read: Done
- "ldr q17, [x16, #0x0]\n"
+ "ldr q17, [x17, #0x0]\n"
+ "ldr q16, [x17, #0x10]\n"
".inst 0x6f80e228 // udot v8.4s, v17.16b, v0.4b[0]\n"
- "ldr q16, [x16, #0x10]\n"
".inst 0x6f81e22c // udot v12.4s, v17.16b, v1.4b[0]\n"
- "ldr q17, [x16, #0x20]\n"
+ "ldr q17, [x17, #0x20]\n"
".inst 0x6f80e209 // udot v9.4s, v16.16b, v0.4b[0]\n"
".inst 0x6f81e20d // udot v13.4s, v16.16b, v1.4b[0]\n"
- "ldr q16, [x16, #0x30]\n"
+ "ldr q16, [x17, #0x30]\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f81e22e // udot v14.4s, v17.16b, v1.4b[0]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x6f81e20f // udot v15.4s, v16.16b, v1.4b[0]\n"
@@ -724,79 +724,79 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 48b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"bge 67f\n"
"tbz x8, #3, 62f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"tbz x8, #2, 60f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"tbz x8, #1, 59f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"b 66f\n"
"59:" // Height 2: Partial direct writeback: partial_1_12
"tbz x8, #0, 66f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"b 66f\n"
"60:" // Height 2: Partial direct writeback: partial_2_8
"tbz x8, #1, 61f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"b 66f\n"
"61:" // Height 2: Partial direct writeback: partial_1_8
"tbz x8, #0, 66f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"b 66f\n"
"62:" // Height 2: Partial direct writeback: partial_4_0
"tbz x8, #2, 64f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"tbz x8, #1, 63f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"b 66f\n"
"63:" // Height 2: Partial direct writeback: partial_1_4
"tbz x8, #0, 66f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"b 66f\n"
"64:" // Height 2: Partial direct writeback: partial_2_0
"tbz x8, #1, 65f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"tbz x8, #0, 66f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"b 66f\n"
"65:" // Height 2: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"66:" // Height 2: Partial direct writeback: Done
"b 68f\n"
"67:" // Height 2: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -807,107 +807,107 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 206f\n"
"69:" // Height 3
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
"cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"bge 79f\n"
"tbz x8, #3, 74f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"tbz x8, #2, 72f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"tbz x8, #1, 71f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"b 78f\n"
"71:" // Height 3: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 78f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"b 78f\n"
"72:" // Height 3: Partial accumulate: partial_2_8
"tbz x8, #1, 73f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"b 78f\n"
"73:" // Height 3: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 78f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"b 78f\n"
"74:" // Height 3: Partial accumulate: partial_4_0
"tbz x8, #2, 76f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"tbz x8, #1, 75f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"b 78f\n"
"75:" // Height 3: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 78f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"b 78f\n"
"76:" // Height 3: Partial accumulate: partial_2_0
"tbz x8, #1, 77f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
"tbz x8, #0, 78f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"b 78f\n"
"77:" // Height 3: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"78:" // Height 3: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 81f\n"
"79:" // Height 3: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -934,8 +934,8 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"mov x15, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -959,123 +959,123 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp x14, #0x20\n"
"ldr q1, [x12, #0x0]\n"
"ldr q2, [x11, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 86f\n"
"85:" // Height 3: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x20, [x17, #0x38]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr d21, [x16, #0x20]\n"
+ "ldr d21, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v21.d[1], x21\n"
+ "add x13, x13, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr d20, [x16, #0x30]\n"
+ "ldr d20, [x17, #0x30]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x48]\n"
+ "add x11, x11, #0x10\n"
+ "ldr x24, [x13, #0x8]\n"
"mov v20.d[1], x20\n"
".inst 0x6f80e2aa // udot v10.4s, v21.16b, v0.4b[0]\n"
".inst 0x6f81e2ae // udot v14.4s, v21.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr x20, [x17, #0x58]\n"
".inst 0x6f82e2b2 // udot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr d21, [x16, #0x40]\n"
+ "ldr d21, [x17, #0x40]\n"
".inst 0x6f80e28b // udot v11.4s, v20.16b, v0.4b[0]\n"
- "mov v21.d[1], x21\n"
+ "ldr x23, [x12, #0x8]\n"
".inst 0x6f81e28f // udot v15.4s, v20.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x22, [x11, #0x8]\n"
".inst 0x6f82e293 // udot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr d20, [x16, #0x50]\n"
+ "ldr d20, [x17, #0x50]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x68]\n"
+ "sub x14, x14, #0x10\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
"mov v20.d[1], x20\n"
".inst 0x6fa0e2a8 // udot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x6fa1e2ac // udot v12.4s, v21.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x20, [x17, #0x78]\n"
".inst 0x6fa2e2b0 // udot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr d21, [x16, #0x60]\n"
+ "ldr d21, [x17, #0x60]\n"
".inst 0x6fa0e289 // udot v9.4s, v20.16b, v0.4b[1]\n"
- "mov v21.d[1], x21\n"
+ "cmp x14, #0x20\n"
".inst 0x6fa1e28d // udot v13.4s, v20.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6fa2e291 // udot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr d20, [x16, #0x70]\n"
+ "ldr d20, [x17, #0x70]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
"mov v20.d[1], x20\n"
".inst 0x6fa0e2aa // udot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x6fa1e2ae // udot v14.4s, v21.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "ldr x20, [x17, #0x98]\n"
".inst 0x6fa2e2b2 // udot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr d21, [x16, #0x80]\n"
+ "ldr d21, [x17, #0x80]\n"
".inst 0x6fa0e28b // udot v11.4s, v20.16b, v0.4b[1]\n"
- "mov v21.d[1], x21\n"
".inst 0x6fa1e28f // udot v15.4s, v20.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
".inst 0x6fa2e293 // udot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr d20, [x16, #0x90]\n"
+ "ldr d20, [x17, #0x90]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xa8]\n"
"mov v20.d[1], x20\n"
".inst 0x6f80eaa8 // udot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x6f81eaac // udot v12.4s, v21.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "ldr x20, [x17, #0xb8]\n"
".inst 0x6f82eab0 // udot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr d21, [x16, #0xa0]\n"
+ "ldr d21, [x17, #0xa0]\n"
".inst 0x6f80ea89 // udot v9.4s, v20.16b, v0.4b[2]\n"
- "mov v21.d[1], x21\n"
".inst 0x6f81ea8d // udot v13.4s, v20.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
".inst 0x6f82ea91 // udot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr d20, [x16, #0xb0]\n"
+ "ldr d20, [x17, #0xb0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xc8]\n"
"mov v20.d[1], x20\n"
".inst 0x6f80eaaa // udot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x6f81eaae // udot v14.4s, v21.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "ldr x20, [x17, #0xd8]\n"
".inst 0x6f82eab2 // udot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr d21, [x16, #0xc0]\n"
+ "ldr d21, [x17, #0xc0]\n"
".inst 0x6f80ea8b // udot v11.4s, v20.16b, v0.4b[2]\n"
- "mov v21.d[1], x21\n"
".inst 0x6f81ea8f // udot v15.4s, v20.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x6f82ea93 // udot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr d20, [x16, #0xd0]\n"
+ "ldr d20, [x17, #0xd0]\n"
+ "mov v21.d[1], x21\n"
+ "ldr x21, [x17, #0xe8]\n"
"mov v20.d[1], x20\n"
".inst 0x6fa0eaa8 // udot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x6fa1eaac // udot v12.4s, v21.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "ldr x20, [x17, #0xf8]\n"
".inst 0x6fa2eab0 // udot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr d21, [x16, #0xe0]\n"
+ "ldr d21, [x17, #0xe0]\n"
".inst 0x6fa0ea89 // udot v9.4s, v20.16b, v0.4b[3]\n"
- "mov v21.d[1], x21\n"
".inst 0x6fa1ea8d // udot v13.4s, v20.16b, v1.4b[3]\n"
- "add x13, x13, #0x10\n"
".inst 0x6fa2ea91 // udot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr d20, [x16, #0xf0]\n"
+ "ldr d20, [x17, #0xf0]\n"
+ "mov v21.d[1], x21\n"
+ "add x17, x17, #0x100\n"
+ "ldr x21, [x17, #0x8]\n"
"mov v20.d[1], x20\n"
- "add x12, x12, #0x10\n"
- "add x11, x11, #0x10\n"
- "add x16, x16, #0x100\n"
".inst 0x6fa0eaaa // udot v10.4s, v21.16b, v0.4b[3]\n"
- "ldr x20, [x16, #0x8]\n"
".inst 0x6fa1eaae // udot v14.4s, v21.16b, v1.4b[3]\n"
- "ldr x23, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x18]\n"
".inst 0x6fa2eab2 // udot v18.4s, v21.16b, v2.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x6fa0ea8b // udot v11.4s, v20.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x6fa1ea8f // udot v15.4s, v20.16b, v1.4b[3]\n"
"ldr d1, [x12, #0x0]\n"
- "ldr x22, [x12, #0x8]\n"
".inst 0x6fa2ea93 // udot v19.4s, v20.16b, v2.4b[3]\n"
"ldr d2, [x11, #0x0]\n"
- "sub x14, x14, #0x10\n"
- "ldr d7, [x16, #0x10]\n"
- "cmp x14, #0x20\n"
- "ldr x21, [x11, #0x8]\n"
- "mov v6.d[1], x20\n"
- "ldr x20, [x16, #0x18]\n"
- "mov v0.d[1], x23\n"
- "prfm pldl1keep, [x13, #0x80]\n"
- "mov v1.d[1], x22\n"
- "prfm pldl1keep, [x12, #0x80]\n"
- "mov v2.d[1], x21\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr d7, [x17, #0x10]\n"
+ "mov v6.d[1], x21\n"
+ "mov v0.d[1], x24\n"
+ "mov v1.d[1], x23\n"
+ "mov v2.d[1], x22\n"
"mov v7.d[1], x20\n"
"bge 85b\n"
"86:" // Height 3: Multiply loop: Single iteration only
@@ -1084,66 +1084,66 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
- "ldr q21, [x16, #0x20]\n"
+ "ldr q21, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
"sub x14, x14, #0x10\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "ldr q20, [x16, #0x30]\n"
+ "ldr q20, [x17, #0x30]\n"
".inst 0x6f80e2aa // udot v10.4s, v21.16b, v0.4b[0]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6f81e2ae // udot v14.4s, v21.16b, v1.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f82e2b2 // udot v18.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x16, #0x40]\n"
+ "ldr q21, [x17, #0x40]\n"
".inst 0x6f80e28b // udot v11.4s, v20.16b, v0.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x6f81e28f // udot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x6f82e293 // udot v19.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x16, #0x50]\n"
+ "ldr q20, [x17, #0x50]\n"
".inst 0x6fa0e2a8 // udot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x6fa1e2ac // udot v12.4s, v21.16b, v1.4b[1]\n"
".inst 0x6fa2e2b0 // udot v16.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x16, #0x60]\n"
+ "ldr q21, [x17, #0x60]\n"
".inst 0x6fa0e289 // udot v9.4s, v20.16b, v0.4b[1]\n"
".inst 0x6fa1e28d // udot v13.4s, v20.16b, v1.4b[1]\n"
".inst 0x6fa2e291 // udot v17.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x16, #0x70]\n"
+ "ldr q20, [x17, #0x70]\n"
".inst 0x6fa0e2aa // udot v10.4s, v21.16b, v0.4b[1]\n"
".inst 0x6fa1e2ae // udot v14.4s, v21.16b, v1.4b[1]\n"
".inst 0x6fa2e2b2 // udot v18.4s, v21.16b, v2.4b[1]\n"
- "ldr q21, [x16, #0x80]\n"
+ "ldr q21, [x17, #0x80]\n"
".inst 0x6fa0e28b // udot v11.4s, v20.16b, v0.4b[1]\n"
".inst 0x6fa1e28f // udot v15.4s, v20.16b, v1.4b[1]\n"
".inst 0x6fa2e293 // udot v19.4s, v20.16b, v2.4b[1]\n"
- "ldr q20, [x16, #0x90]\n"
+ "ldr q20, [x17, #0x90]\n"
".inst 0x6f80eaa8 // udot v8.4s, v21.16b, v0.4b[2]\n"
".inst 0x6f81eaac // udot v12.4s, v21.16b, v1.4b[2]\n"
".inst 0x6f82eab0 // udot v16.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x16, #0xa0]\n"
+ "ldr q21, [x17, #0xa0]\n"
".inst 0x6f80ea89 // udot v9.4s, v20.16b, v0.4b[2]\n"
".inst 0x6f81ea8d // udot v13.4s, v20.16b, v1.4b[2]\n"
".inst 0x6f82ea91 // udot v17.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x16, #0xb0]\n"
+ "ldr q20, [x17, #0xb0]\n"
".inst 0x6f80eaaa // udot v10.4s, v21.16b, v0.4b[2]\n"
".inst 0x6f81eaae // udot v14.4s, v21.16b, v1.4b[2]\n"
".inst 0x6f82eab2 // udot v18.4s, v21.16b, v2.4b[2]\n"
- "ldr q21, [x16, #0xc0]\n"
+ "ldr q21, [x17, #0xc0]\n"
".inst 0x6f80ea8b // udot v11.4s, v20.16b, v0.4b[2]\n"
".inst 0x6f81ea8f // udot v15.4s, v20.16b, v1.4b[2]\n"
".inst 0x6f82ea93 // udot v19.4s, v20.16b, v2.4b[2]\n"
- "ldr q20, [x16, #0xd0]\n"
+ "ldr q20, [x17, #0xd0]\n"
".inst 0x6fa0eaa8 // udot v8.4s, v21.16b, v0.4b[3]\n"
".inst 0x6fa1eaac // udot v12.4s, v21.16b, v1.4b[3]\n"
".inst 0x6fa2eab0 // udot v16.4s, v21.16b, v2.4b[3]\n"
- "ldr q21, [x16, #0xe0]\n"
+ "ldr q21, [x17, #0xe0]\n"
".inst 0x6fa0ea89 // udot v9.4s, v20.16b, v0.4b[3]\n"
".inst 0x6fa1ea8d // udot v13.4s, v20.16b, v1.4b[3]\n"
".inst 0x6fa2ea91 // udot v17.4s, v20.16b, v2.4b[3]\n"
- "ldr q20, [x16, #0xf0]\n"
+ "ldr q20, [x17, #0xf0]\n"
".inst 0x6fa0eaaa // udot v10.4s, v21.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa1eaae // udot v14.4s, v21.16b, v1.4b[3]\n"
".inst 0x6fa2eab2 // udot v18.4s, v21.16b, v2.4b[3]\n"
".inst 0x6fa0ea8b // udot v11.4s, v20.16b, v0.4b[3]\n"
@@ -1159,18 +1159,18 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr s23, [x12], #0x4\n"
"cmp x14, #0x4\n"
"ldr s22, [x11], #0x4\n"
- "ldr q21, [x16, #0x0]\n"
+ "ldr q21, [x17, #0x0]\n"
+ "ldr q20, [x17, #0x10]\n"
".inst 0x6f98e2a8 // udot v8.4s, v21.16b, v24.4b[0]\n"
- "ldr q20, [x16, #0x10]\n"
".inst 0x6f97e2ac // udot v12.4s, v21.16b, v23.4b[0]\n"
".inst 0x6f96e2b0 // udot v16.4s, v21.16b, v22.4b[0]\n"
- "ldr q21, [x16, #0x20]\n"
+ "ldr q21, [x17, #0x20]\n"
".inst 0x6f98e289 // udot v9.4s, v20.16b, v24.4b[0]\n"
".inst 0x6f97e28d // udot v13.4s, v20.16b, v23.4b[0]\n"
".inst 0x6f96e291 // udot v17.4s, v20.16b, v22.4b[0]\n"
- "ldr q20, [x16, #0x30]\n"
+ "ldr q20, [x17, #0x30]\n"
".inst 0x6f98e2aa // udot v10.4s, v21.16b, v24.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f97e2ae // udot v14.4s, v21.16b, v23.4b[0]\n"
".inst 0x6f96e2b2 // udot v18.4s, v21.16b, v22.4b[0]\n"
".inst 0x6f98e28b // udot v11.4s, v20.16b, v24.4b[0]\n"
@@ -1193,18 +1193,18 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr b1, [x12, #0x0]\n"
"ldr b2, [x11, #0x0]\n"
"91:" // Height 3: Multiply loop: Ragged operand read: Done
- "ldr q21, [x16, #0x0]\n"
+ "ldr q21, [x17, #0x0]\n"
+ "ldr q20, [x17, #0x10]\n"
".inst 0x6f80e2a8 // udot v8.4s, v21.16b, v0.4b[0]\n"
- "ldr q20, [x16, #0x10]\n"
".inst 0x6f81e2ac // udot v12.4s, v21.16b, v1.4b[0]\n"
".inst 0x6f82e2b0 // udot v16.4s, v21.16b, v2.4b[0]\n"
- "ldr q21, [x16, #0x20]\n"
+ "ldr q21, [x17, #0x20]\n"
".inst 0x6f80e289 // udot v9.4s, v20.16b, v0.4b[0]\n"
".inst 0x6f81e28d // udot v13.4s, v20.16b, v1.4b[0]\n"
".inst 0x6f82e291 // udot v17.4s, v20.16b, v2.4b[0]\n"
- "ldr q20, [x16, #0x30]\n"
+ "ldr q20, [x17, #0x30]\n"
".inst 0x6f80e2aa // udot v10.4s, v21.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f81e2ae // udot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x6f82e2b2 // udot v18.4s, v21.16b, v2.4b[0]\n"
".inst 0x6f80e28b // udot v11.4s, v20.16b, v0.4b[0]\n"
@@ -1216,97 +1216,97 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 82b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"prfm pstl1keep, [x23, #0x0]\n"
"bge 101f\n"
"tbz x8, #3, 96f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v17.4s }, [x23], #0x10\n"
"tbz x8, #2, 94f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"tbz x8, #1, 93f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"b 100f\n"
"93:" // Height 3: Partial direct writeback: partial_1_12
"tbz x8, #0, 100f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"b 100f\n"
"94:" // Height 3: Partial direct writeback: partial_2_8
"tbz x8, #1, 95f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"b 100f\n"
"95:" // Height 3: Partial direct writeback: partial_1_8
"tbz x8, #0, 100f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"b 100f\n"
"96:" // Height 3: Partial direct writeback: partial_4_0
"tbz x8, #2, 98f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"tbz x8, #1, 97f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"b 100f\n"
"97:" // Height 3: Partial direct writeback: partial_1_4
"tbz x8, #0, 100f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"b 100f\n"
"98:" // Height 3: Partial direct writeback: partial_2_0
"tbz x8, #1, 99f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"tbz x8, #0, 100f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"b 100f\n"
"99:" // Height 3: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"100:" // Height 3: Partial direct writeback: Done
"b 102f\n"
"101:" // Height 3: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -1321,38 +1321,38 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 206f\n"
"103:" // Height 4
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"bge 113f\n"
"tbz x8, #3, 108f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
"tbz x8, #2, 106f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"tbz x8, #1, 105f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
"ldr d23, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
@@ -1360,20 +1360,20 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"105:" // Height 4: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 112f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
"b 112f\n"
"106:" // Height 4: Partial accumulate: partial_2_8
"tbz x8, #1, 107f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
"ldr d22, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
@@ -1381,25 +1381,25 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"107:" // Height 4: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 112f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
"b 112f\n"
"108:" // Height 4: Partial accumulate: partial_4_0
"tbz x8, #2, 110f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"tbz x8, #1, 109f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
"ldr d21, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
@@ -1407,38 +1407,38 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"109:" // Height 4: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 112f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
"b 112f\n"
"110:" // Height 4: Partial accumulate: partial_2_0
"tbz x8, #1, 111f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
"ldr d20, [x22], #0x8\n"
"tbz x8, #0, 112f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
"b 112f\n"
"111:" // Height 4: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"ldr s20, [x22, #0x0]\n"
"112:" // Height 4: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 115f\n"
"113:" // Height 4: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -1473,8 +1473,8 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"mov x15, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1502,130 +1502,129 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr q1, [x12, #0x0]\n"
"ldr q2, [x11, #0x0]\n"
"ldr q3, [x10, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 120f\n"
"119:" // Height 4: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x20, [x17, #0x28]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x21, [x17, #0x38]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr d25, [x16, #0x20]\n"
+ "ldr d25, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v25.d[1], x21\n"
+ "add x12, x12, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x11, x11, #0x10\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "add x12, x12, #0x10\n"
+ "mov v25.d[1], x20\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr d24, [x16, #0x30]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x30]\n"
".inst 0x6f80e32a // udot v10.4s, v25.16b, v0.4b[0]\n"
+ "ldr x20, [x17, #0x48]\n"
".inst 0x6f81e32e // udot v14.4s, v25.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "add x10, x10, #0x10\n"
+ "mov v24.d[1], x21\n"
".inst 0x6f82e332 // udot v18.4s, v25.16b, v2.4b[0]\n"
- "add x11, x11, #0x10\n"
".inst 0x6f83e336 // udot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr d25, [x16, #0x40]\n"
+ "ldr d25, [x17, #0x40]\n"
".inst 0x6f80e30b // udot v11.4s, v24.16b, v0.4b[0]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x58]\n"
".inst 0x6f81e30f // udot v15.4s, v24.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x25, [x13, #0x8]\n"
".inst 0x6f82e313 // udot v19.4s, v24.16b, v2.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v25.d[1], x20\n"
".inst 0x6f83e317 // udot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr d24, [x16, #0x50]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x50]\n"
".inst 0x6fa0e328 // udot v8.4s, v25.16b, v0.4b[1]\n"
+ "ldr x20, [x17, #0x68]\n"
".inst 0x6fa1e32c // udot v12.4s, v25.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x24, [x12, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x6fa2e330 // udot v16.4s, v25.16b, v2.4b[1]\n"
- "ldr x25, [x13, #0x8]\n"
".inst 0x6fa3e334 // udot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr d25, [x16, #0x60]\n"
+ "ldr d25, [x17, #0x60]\n"
".inst 0x6fa0e309 // udot v9.4s, v24.16b, v0.4b[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x78]\n"
".inst 0x6fa1e30d // udot v13.4s, v24.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "ldr x23, [x11, #0x8]\n"
".inst 0x6fa2e311 // udot v17.4s, v24.16b, v2.4b[1]\n"
- "ldr x24, [x12, #0x8]\n"
+ "mov v25.d[1], x20\n"
".inst 0x6fa3e315 // udot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr d24, [x16, #0x70]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x70]\n"
".inst 0x6fa0e32a // udot v10.4s, v25.16b, v0.4b[1]\n"
+ "ldr x20, [x17, #0x88]\n"
".inst 0x6fa1e32e // udot v14.4s, v25.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "ldr x22, [x10, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x6fa2e332 // udot v18.4s, v25.16b, v2.4b[1]\n"
- "ldr x23, [x11, #0x8]\n"
".inst 0x6fa3e336 // udot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr d25, [x16, #0x80]\n"
+ "ldr d25, [x17, #0x80]\n"
".inst 0x6fa0e30b // udot v11.4s, v24.16b, v0.4b[1]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0x98]\n"
".inst 0x6fa1e30f // udot v15.4s, v24.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
+ "sub x14, x14, #0x10\n"
".inst 0x6fa2e313 // udot v19.4s, v24.16b, v2.4b[1]\n"
- "ldr x22, [x10, #0x8]\n"
+ "mov v25.d[1], x20\n"
".inst 0x6fa3e317 // udot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr d24, [x16, #0x90]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0x90]\n"
".inst 0x6f80eb28 // udot v8.4s, v25.16b, v0.4b[2]\n"
+ "ldr x20, [x17, #0xa8]\n"
".inst 0x6f81eb2c // udot v12.4s, v25.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "cmp x14, #0x20\n"
+ "mov v24.d[1], x21\n"
".inst 0x6f82eb30 // udot v16.4s, v25.16b, v2.4b[2]\n"
- "sub x14, x14, #0x10\n"
".inst 0x6f83eb34 // udot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr d25, [x16, #0xa0]\n"
+ "ldr d25, [x17, #0xa0]\n"
".inst 0x6f80eb09 // udot v9.4s, v24.16b, v0.4b[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xb8]\n"
".inst 0x6f81eb0d // udot v13.4s, v24.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6f82eb11 // udot v17.4s, v24.16b, v2.4b[2]\n"
- "cmp x14, #0x20\n"
+ "mov v25.d[1], x20\n"
".inst 0x6f83eb15 // udot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr d24, [x16, #0xb0]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0xb0]\n"
".inst 0x6f80eb2a // udot v10.4s, v25.16b, v0.4b[2]\n"
+ "ldr x20, [x17, #0xc8]\n"
".inst 0x6f81eb2e // udot v14.4s, v25.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v24.d[1], x21\n"
".inst 0x6f82eb32 // udot v18.4s, v25.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6f83eb36 // udot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr d25, [x16, #0xc0]\n"
+ "ldr d25, [x17, #0xc0]\n"
".inst 0x6f80eb0b // udot v11.4s, v24.16b, v0.4b[2]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xd8]\n"
".inst 0x6f81eb0f // udot v15.4s, v24.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x6f82eb13 // udot v19.4s, v24.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v25.d[1], x20\n"
".inst 0x6f83eb17 // udot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr d24, [x16, #0xd0]\n"
- "mov v24.d[1], x20\n"
+ "ldr d24, [x17, #0xd0]\n"
".inst 0x6fa0eb28 // udot v8.4s, v25.16b, v0.4b[3]\n"
+ "ldr x20, [x17, #0xe8]\n"
".inst 0x6fa1eb2c // udot v12.4s, v25.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
+ "mov v24.d[1], x21\n"
".inst 0x6fa2eb30 // udot v16.4s, v25.16b, v2.4b[3]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x6fa3eb34 // udot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr d25, [x16, #0xe0]\n"
+ "ldr d25, [x17, #0xe0]\n"
".inst 0x6fa0eb09 // udot v9.4s, v24.16b, v0.4b[3]\n"
- "mov v25.d[1], x21\n"
+ "ldr x21, [x17, #0xf8]\n"
".inst 0x6fa1eb0d // udot v13.4s, v24.16b, v1.4b[3]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x6fa2eb11 // udot v17.4s, v24.16b, v2.4b[3]\n"
+ "mov v25.d[1], x20\n"
".inst 0x6fa3eb15 // udot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr d24, [x16, #0xf0]\n"
- "mov v24.d[1], x20\n"
- "add x16, x16, #0x100\n"
+ "ldr d24, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa0eb2a // udot v10.4s, v25.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0x8]\n"
".inst 0x6fa1eb2e // udot v14.4s, v25.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x20, [x17, #0x8]\n"
+ "mov v24.d[1], x21\n"
".inst 0x6fa2eb32 // udot v18.4s, v25.16b, v2.4b[3]\n"
".inst 0x6fa3eb36 // udot v22.4s, v25.16b, v3.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x6fa0eb0b // udot v11.4s, v24.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x6fa1eb0f // udot v15.4s, v24.16b, v1.4b[3]\n"
@@ -1634,8 +1633,9 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr d2, [x11, #0x0]\n"
".inst 0x6fa3eb17 // udot v23.4s, v24.16b, v3.4b[3]\n"
"ldr d3, [x10, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
- "mov v6.d[1], x21\n"
+ "ldr d7, [x17, #0x10]\n"
+ "mov v6.d[1], x20\n"
+ "ldr x20, [x17, #0x18]\n"
"mov v0.d[1], x25\n"
"mov v1.d[1], x24\n"
"mov v2.d[1], x23\n"
@@ -1650,7 +1650,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
- "ldr q25, [x16, #0x20]\n"
+ "ldr q25, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"add x10, x10, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
@@ -1658,7 +1658,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "ldr q24, [x16, #0x30]\n"
+ "ldr q24, [x17, #0x30]\n"
".inst 0x6f80e32a // udot v10.4s, v25.16b, v0.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f81e32e // udot v14.4s, v25.16b, v1.4b[0]\n"
@@ -1666,64 +1666,64 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f82e332 // udot v18.4s, v25.16b, v2.4b[0]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x6f83e336 // udot v22.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x16, #0x40]\n"
+ "ldr q25, [x17, #0x40]\n"
".inst 0x6f80e30b // udot v11.4s, v24.16b, v0.4b[0]\n"
".inst 0x6f81e30f // udot v15.4s, v24.16b, v1.4b[0]\n"
".inst 0x6f82e313 // udot v19.4s, v24.16b, v2.4b[0]\n"
".inst 0x6f83e317 // udot v23.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x16, #0x50]\n"
+ "ldr q24, [x17, #0x50]\n"
".inst 0x6fa0e328 // udot v8.4s, v25.16b, v0.4b[1]\n"
".inst 0x6fa1e32c // udot v12.4s, v25.16b, v1.4b[1]\n"
".inst 0x6fa2e330 // udot v16.4s, v25.16b, v2.4b[1]\n"
".inst 0x6fa3e334 // udot v20.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x16, #0x60]\n"
+ "ldr q25, [x17, #0x60]\n"
".inst 0x6fa0e309 // udot v9.4s, v24.16b, v0.4b[1]\n"
".inst 0x6fa1e30d // udot v13.4s, v24.16b, v1.4b[1]\n"
".inst 0x6fa2e311 // udot v17.4s, v24.16b, v2.4b[1]\n"
".inst 0x6fa3e315 // udot v21.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x16, #0x70]\n"
+ "ldr q24, [x17, #0x70]\n"
".inst 0x6fa0e32a // udot v10.4s, v25.16b, v0.4b[1]\n"
".inst 0x6fa1e32e // udot v14.4s, v25.16b, v1.4b[1]\n"
".inst 0x6fa2e332 // udot v18.4s, v25.16b, v2.4b[1]\n"
".inst 0x6fa3e336 // udot v22.4s, v25.16b, v3.4b[1]\n"
- "ldr q25, [x16, #0x80]\n"
+ "ldr q25, [x17, #0x80]\n"
".inst 0x6fa0e30b // udot v11.4s, v24.16b, v0.4b[1]\n"
".inst 0x6fa1e30f // udot v15.4s, v24.16b, v1.4b[1]\n"
".inst 0x6fa2e313 // udot v19.4s, v24.16b, v2.4b[1]\n"
".inst 0x6fa3e317 // udot v23.4s, v24.16b, v3.4b[1]\n"
- "ldr q24, [x16, #0x90]\n"
+ "ldr q24, [x17, #0x90]\n"
".inst 0x6f80eb28 // udot v8.4s, v25.16b, v0.4b[2]\n"
".inst 0x6f81eb2c // udot v12.4s, v25.16b, v1.4b[2]\n"
".inst 0x6f82eb30 // udot v16.4s, v25.16b, v2.4b[2]\n"
".inst 0x6f83eb34 // udot v20.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x16, #0xa0]\n"
+ "ldr q25, [x17, #0xa0]\n"
".inst 0x6f80eb09 // udot v9.4s, v24.16b, v0.4b[2]\n"
".inst 0x6f81eb0d // udot v13.4s, v24.16b, v1.4b[2]\n"
".inst 0x6f82eb11 // udot v17.4s, v24.16b, v2.4b[2]\n"
".inst 0x6f83eb15 // udot v21.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x16, #0xb0]\n"
+ "ldr q24, [x17, #0xb0]\n"
".inst 0x6f80eb2a // udot v10.4s, v25.16b, v0.4b[2]\n"
".inst 0x6f81eb2e // udot v14.4s, v25.16b, v1.4b[2]\n"
".inst 0x6f82eb32 // udot v18.4s, v25.16b, v2.4b[2]\n"
".inst 0x6f83eb36 // udot v22.4s, v25.16b, v3.4b[2]\n"
- "ldr q25, [x16, #0xc0]\n"
+ "ldr q25, [x17, #0xc0]\n"
".inst 0x6f80eb0b // udot v11.4s, v24.16b, v0.4b[2]\n"
".inst 0x6f81eb0f // udot v15.4s, v24.16b, v1.4b[2]\n"
".inst 0x6f82eb13 // udot v19.4s, v24.16b, v2.4b[2]\n"
".inst 0x6f83eb17 // udot v23.4s, v24.16b, v3.4b[2]\n"
- "ldr q24, [x16, #0xd0]\n"
+ "ldr q24, [x17, #0xd0]\n"
".inst 0x6fa0eb28 // udot v8.4s, v25.16b, v0.4b[3]\n"
".inst 0x6fa1eb2c // udot v12.4s, v25.16b, v1.4b[3]\n"
".inst 0x6fa2eb30 // udot v16.4s, v25.16b, v2.4b[3]\n"
".inst 0x6fa3eb34 // udot v20.4s, v25.16b, v3.4b[3]\n"
- "ldr q25, [x16, #0xe0]\n"
+ "ldr q25, [x17, #0xe0]\n"
".inst 0x6fa0eb09 // udot v9.4s, v24.16b, v0.4b[3]\n"
".inst 0x6fa1eb0d // udot v13.4s, v24.16b, v1.4b[3]\n"
".inst 0x6fa2eb11 // udot v17.4s, v24.16b, v2.4b[3]\n"
".inst 0x6fa3eb15 // udot v21.4s, v24.16b, v3.4b[3]\n"
- "ldr q24, [x16, #0xf0]\n"
+ "ldr q24, [x17, #0xf0]\n"
".inst 0x6fa0eb2a // udot v10.4s, v25.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa1eb2e // udot v14.4s, v25.16b, v1.4b[3]\n"
".inst 0x6fa2eb32 // udot v18.4s, v25.16b, v2.4b[3]\n"
".inst 0x6fa3eb36 // udot v22.4s, v25.16b, v3.4b[3]\n"
@@ -1742,20 +1742,20 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp x14, #0x4\n"
"ldr s27, [x11], #0x4\n"
"ldr s26, [x10], #0x4\n"
- "ldr q25, [x16, #0x0]\n"
+ "ldr q25, [x17, #0x0]\n"
+ "ldr q24, [x17, #0x10]\n"
".inst 0x6f9de328 // udot v8.4s, v25.16b, v29.4b[0]\n"
- "ldr q24, [x16, #0x10]\n"
".inst 0x6f9ce32c // udot v12.4s, v25.16b, v28.4b[0]\n"
".inst 0x6f9be330 // udot v16.4s, v25.16b, v27.4b[0]\n"
".inst 0x6f9ae334 // udot v20.4s, v25.16b, v26.4b[0]\n"
- "ldr q25, [x16, #0x20]\n"
+ "ldr q25, [x17, #0x20]\n"
".inst 0x6f9de309 // udot v9.4s, v24.16b, v29.4b[0]\n"
".inst 0x6f9ce30d // udot v13.4s, v24.16b, v28.4b[0]\n"
".inst 0x6f9be311 // udot v17.4s, v24.16b, v27.4b[0]\n"
".inst 0x6f9ae315 // udot v21.4s, v24.16b, v26.4b[0]\n"
- "ldr q24, [x16, #0x30]\n"
+ "ldr q24, [x17, #0x30]\n"
".inst 0x6f9de32a // udot v10.4s, v25.16b, v29.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f9ce32e // udot v14.4s, v25.16b, v28.4b[0]\n"
".inst 0x6f9be332 // udot v18.4s, v25.16b, v27.4b[0]\n"
".inst 0x6f9ae336 // udot v22.4s, v25.16b, v26.4b[0]\n"
@@ -1783,20 +1783,20 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr b2, [x11, #0x0]\n"
"ldr b3, [x10, #0x0]\n"
"125:" // Height 4: Multiply loop: Ragged operand read: Done
- "ldr q25, [x16, #0x0]\n"
+ "ldr q25, [x17, #0x0]\n"
+ "ldr q24, [x17, #0x10]\n"
".inst 0x6f80e328 // udot v8.4s, v25.16b, v0.4b[0]\n"
- "ldr q24, [x16, #0x10]\n"
".inst 0x6f81e32c // udot v12.4s, v25.16b, v1.4b[0]\n"
".inst 0x6f82e330 // udot v16.4s, v25.16b, v2.4b[0]\n"
".inst 0x6f83e334 // udot v20.4s, v25.16b, v3.4b[0]\n"
- "ldr q25, [x16, #0x20]\n"
+ "ldr q25, [x17, #0x20]\n"
".inst 0x6f80e309 // udot v9.4s, v24.16b, v0.4b[0]\n"
".inst 0x6f81e30d // udot v13.4s, v24.16b, v1.4b[0]\n"
".inst 0x6f82e311 // udot v17.4s, v24.16b, v2.4b[0]\n"
".inst 0x6f83e315 // udot v21.4s, v24.16b, v3.4b[0]\n"
- "ldr q24, [x16, #0x30]\n"
+ "ldr q24, [x17, #0x30]\n"
".inst 0x6f80e32a // udot v10.4s, v25.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f81e32e // udot v14.4s, v25.16b, v1.4b[0]\n"
".inst 0x6f82e332 // udot v18.4s, v25.16b, v2.4b[0]\n"
".inst 0x6f83e336 // udot v22.4s, v25.16b, v3.4b[0]\n"
@@ -1810,18 +1810,18 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 116b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"prfm pstl1keep, [x22, #0x0]\n"
"bge 135f\n"
"tbz x8, #3, 130f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -1829,96 +1829,96 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v21.4s }, [x22], #0x10\n"
"tbz x8, #2, 128f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"tbz x8, #1, 127f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
"b 134f\n"
"127:" // Height 4: Partial direct writeback: partial_1_12
"tbz x8, #0, 134f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
"b 134f\n"
"128:" // Height 4: Partial direct writeback: partial_2_8
"tbz x8, #1, 129f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
"b 134f\n"
"129:" // Height 4: Partial direct writeback: partial_1_8
"tbz x8, #0, 134f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
"b 134f\n"
"130:" // Height 4: Partial direct writeback: partial_4_0
"tbz x8, #2, 132f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"tbz x8, #1, 131f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
"b 134f\n"
"131:" // Height 4: Partial direct writeback: partial_1_4
"tbz x8, #0, 134f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
"b 134f\n"
"132:" // Height 4: Partial direct writeback: partial_2_0
"tbz x8, #1, 133f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"tbz x8, #0, 134f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
"b 134f\n"
"133:" // Height 4: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
"134:" // Height 4: Partial direct writeback: Done
"b 136f\n"
"135:" // Height 4: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -1937,43 +1937,43 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 206f\n"
"137:" // Height 5
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
+ "cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x8, #0x10\n"
"add x21, x22, x20, LSL #2\n"
"bge 147f\n"
"tbz x8, #3, 142f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
"ld1 { v25.4s }, [x21], #0x10\n"
"tbz x8, #2, 140f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"ld1 { v26.4s }, [x21], #0x10\n"
"tbz x8, #1, 139f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
"ldr d23, [x22], #0x8\n"
"ldr d27, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
@@ -1982,7 +1982,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"139:" // Height 5: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 146f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
@@ -1990,14 +1990,14 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 146f\n"
"140:" // Height 5: Partial accumulate: partial_2_8
"tbz x8, #1, 141f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
"ldr d22, [x22], #0x8\n"
"ldr d26, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
@@ -2006,7 +2006,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"141:" // Height 5: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 146f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
@@ -2014,20 +2014,20 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 146f\n"
"142:" // Height 5: Partial accumulate: partial_4_0
"tbz x8, #2, 144f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
"tbz x8, #1, 143f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
"ldr d21, [x22], #0x8\n"
"ldr d25, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
@@ -2036,7 +2036,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"143:" // Height 5: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 146f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
@@ -2044,34 +2044,34 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 146f\n"
"144:" // Height 5: Partial accumulate: partial_2_0
"tbz x8, #1, 145f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
"ldr d20, [x22], #0x8\n"
"ldr d24, [x21], #0x8\n"
"tbz x8, #0, 146f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
"ld1 { v24.s }[2], [x21]\n"
"b 146f\n"
"145:" // Height 5: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
"ldr s20, [x22, #0x0]\n"
"ldr s24, [x21, #0x0]\n"
"146:" // Height 5: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 149f\n"
"147:" // Height 5: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -2114,8 +2114,8 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"mov x15, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2147,148 +2147,148 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr q2, [x11, #0x0]\n"
"ldr q3, [x10, #0x0]\n"
"ldr q4, [x9, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 154f\n"
"153:" // Height 5: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x20, [x17, #0x38]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
"add x12, x12, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr d29, [x16, #0x20]\n"
+ "ldr d29, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v29.d[1], x21\n"
+ "add x11, x11, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x10, x10, #0x10\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "add x11, x11, #0x10\n"
+ "mov v29.d[1], x21\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr d28, [x16, #0x30]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x30]\n"
".inst 0x6f80e3aa // udot v10.4s, v29.16b, v0.4b[0]\n"
+ "add x9, x9, #0x10\n"
".inst 0x6f81e3ae // udot v14.4s, v29.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr x26, [x13, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x6f82e3b2 // udot v18.4s, v29.16b, v2.4b[0]\n"
- "add x9, x9, #0x10\n"
".inst 0x6f83e3b6 // udot v22.4s, v29.16b, v3.4b[0]\n"
- "ldr x26, [x13, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
".inst 0x6f84e3ba // udot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr d29, [x16, #0x40]\n"
+ "ldr d29, [x17, #0x40]\n"
".inst 0x6f80e38b // udot v11.4s, v28.16b, v0.4b[0]\n"
- "mov v29.d[1], x21\n"
+ "ldr x25, [x12, #0x8]\n"
".inst 0x6f81e38f // udot v15.4s, v28.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x24, [x11, #0x8]\n"
".inst 0x6f82e393 // udot v19.4s, v28.16b, v2.4b[0]\n"
- "ldr x25, [x12, #0x8]\n"
+ "mov v29.d[1], x21\n"
".inst 0x6f83e397 // udot v23.4s, v28.16b, v3.4b[0]\n"
- "ldr x24, [x11, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
".inst 0x6f84e39b // udot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr d28, [x16, #0x50]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x50]\n"
".inst 0x6fa0e3a8 // udot v8.4s, v29.16b, v0.4b[1]\n"
+ "ldr x23, [x10, #0x8]\n"
".inst 0x6fa1e3ac // udot v12.4s, v29.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "ldr x22, [x9, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x6fa2e3b0 // udot v16.4s, v29.16b, v2.4b[1]\n"
- "ldr x23, [x10, #0x8]\n"
".inst 0x6fa3e3b4 // udot v20.4s, v29.16b, v3.4b[1]\n"
- "ldr x22, [x9, #0x8]\n"
+ "ldr x20, [x17, #0x78]\n"
".inst 0x6fa4e3b8 // udot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr d29, [x16, #0x60]\n"
+ "ldr d29, [x17, #0x60]\n"
".inst 0x6fa0e389 // udot v9.4s, v28.16b, v0.4b[1]\n"
- "mov v29.d[1], x21\n"
+ "sub x14, x14, #0x10\n"
".inst 0x6fa1e38d // udot v13.4s, v28.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "cmp x14, #0x20\n"
".inst 0x6fa2e391 // udot v17.4s, v28.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
+ "mov v29.d[1], x21\n"
".inst 0x6fa3e395 // udot v21.4s, v28.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "ldr x21, [x17, #0x88]\n"
".inst 0x6fa4e399 // udot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr d28, [x16, #0x70]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x70]\n"
".inst 0x6fa0e3aa // udot v10.4s, v29.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6fa1e3ae // udot v14.4s, v29.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v28.d[1], x20\n"
".inst 0x6fa2e3b2 // udot v18.4s, v29.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6fa3e3b6 // udot v22.4s, v29.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
".inst 0x6fa4e3ba // udot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr d29, [x16, #0x80]\n"
+ "ldr d29, [x17, #0x80]\n"
".inst 0x6fa0e38b // udot v11.4s, v28.16b, v0.4b[1]\n"
- "mov v29.d[1], x21\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x6fa1e38f // udot v15.4s, v28.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
+ "prfm pldl1keep, [x10, #0x80]\n"
".inst 0x6fa2e393 // udot v19.4s, v28.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "mov v29.d[1], x21\n"
".inst 0x6fa3e397 // udot v23.4s, v28.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x10, #0x80]\n"
+ "ldr x21, [x17, #0xa8]\n"
".inst 0x6fa4e39b // udot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr d28, [x16, #0x90]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0x90]\n"
".inst 0x6f80eba8 // udot v8.4s, v29.16b, v0.4b[2]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x6f81ebac // udot v12.4s, v29.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x6f82ebb0 // udot v16.4s, v29.16b, v2.4b[2]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x6f83ebb4 // udot v20.4s, v29.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
".inst 0x6f84ebb8 // udot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr d29, [x16, #0xa0]\n"
+ "ldr d29, [x17, #0xa0]\n"
".inst 0x6f80eb89 // udot v9.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
".inst 0x6f81eb8d // udot v13.4s, v28.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
".inst 0x6f82eb91 // udot v17.4s, v28.16b, v2.4b[2]\n"
+ "mov v29.d[1], x21\n"
".inst 0x6f83eb95 // udot v21.4s, v28.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
".inst 0x6f84eb99 // udot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr d28, [x16, #0xb0]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0xb0]\n"
".inst 0x6f80ebaa // udot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x6f81ebae // udot v14.4s, v29.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x6f82ebb2 // udot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x6f83ebb6 // udot v22.4s, v29.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
".inst 0x6f84ebba // udot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr d29, [x16, #0xc0]\n"
+ "ldr d29, [x17, #0xc0]\n"
".inst 0x6f80eb8b // udot v11.4s, v28.16b, v0.4b[2]\n"
- "mov v29.d[1], x21\n"
".inst 0x6f81eb8f // udot v15.4s, v28.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x6f82eb93 // udot v19.4s, v28.16b, v2.4b[2]\n"
+ "mov v29.d[1], x21\n"
".inst 0x6f83eb97 // udot v23.4s, v28.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
".inst 0x6f84eb9b // udot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr d28, [x16, #0xd0]\n"
- "mov v28.d[1], x20\n"
+ "ldr d28, [x17, #0xd0]\n"
".inst 0x6fa0eba8 // udot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x6fa1ebac // udot v12.4s, v29.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x6fa2ebb0 // udot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x6fa3ebb4 // udot v20.4s, v29.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
".inst 0x6fa4ebb8 // udot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr d29, [x16, #0xe0]\n"
+ "ldr d29, [x17, #0xe0]\n"
".inst 0x6fa0eb89 // udot v9.4s, v28.16b, v0.4b[3]\n"
- "mov v29.d[1], x21\n"
".inst 0x6fa1eb8d // udot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x6fa2eb91 // udot v17.4s, v28.16b, v2.4b[3]\n"
+ "mov v29.d[1], x21\n"
".inst 0x6fa3eb95 // udot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x6fa4eb99 // udot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr d28, [x16, #0xf0]\n"
- "mov v28.d[1], x20\n"
- "add x16, x16, #0x100\n"
+ "ldr d28, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa0ebaa // udot v10.4s, v29.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0x8]\n"
".inst 0x6fa1ebae // udot v14.4s, v29.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v28.d[1], x20\n"
".inst 0x6fa2ebb2 // udot v18.4s, v29.16b, v2.4b[3]\n"
".inst 0x6fa3ebb6 // udot v22.4s, v29.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0x18]\n"
".inst 0x6fa4ebba // udot v26.4s, v29.16b, v4.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x6fa0eb8b // udot v11.4s, v28.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x6fa1eb8f // udot v15.4s, v28.16b, v1.4b[3]\n"
@@ -2299,7 +2299,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr d3, [x10, #0x0]\n"
".inst 0x6fa4eb9b // udot v27.4s, v28.16b, v4.4b[3]\n"
"ldr d4, [x9, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
+ "ldr d7, [x17, #0x10]\n"
"mov v6.d[1], x21\n"
"mov v0.d[1], x26\n"
"mov v1.d[1], x25\n"
@@ -2318,7 +2318,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
"add x10, x10, #0x10\n"
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
- "ldr q29, [x16, #0x20]\n"
+ "ldr q29, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"add x9, x9, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
@@ -2328,7 +2328,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
"prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
- "ldr q28, [x16, #0x30]\n"
+ "ldr q28, [x17, #0x30]\n"
".inst 0x6f80e3aa // udot v10.4s, v29.16b, v0.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x6f81e3ae // udot v14.4s, v29.16b, v1.4b[0]\n"
@@ -2337,75 +2337,75 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"prfm pldl1keep, [x9, #0x80]\n"
".inst 0x6f83e3b6 // udot v22.4s, v29.16b, v3.4b[0]\n"
".inst 0x6f84e3ba // udot v26.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x16, #0x40]\n"
+ "ldr q29, [x17, #0x40]\n"
".inst 0x6f80e38b // udot v11.4s, v28.16b, v0.4b[0]\n"
".inst 0x6f81e38f // udot v15.4s, v28.16b, v1.4b[0]\n"
".inst 0x6f82e393 // udot v19.4s, v28.16b, v2.4b[0]\n"
".inst 0x6f83e397 // udot v23.4s, v28.16b, v3.4b[0]\n"
".inst 0x6f84e39b // udot v27.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x16, #0x50]\n"
+ "ldr q28, [x17, #0x50]\n"
".inst 0x6fa0e3a8 // udot v8.4s, v29.16b, v0.4b[1]\n"
".inst 0x6fa1e3ac // udot v12.4s, v29.16b, v1.4b[1]\n"
".inst 0x6fa2e3b0 // udot v16.4s, v29.16b, v2.4b[1]\n"
".inst 0x6fa3e3b4 // udot v20.4s, v29.16b, v3.4b[1]\n"
".inst 0x6fa4e3b8 // udot v24.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x16, #0x60]\n"
+ "ldr q29, [x17, #0x60]\n"
".inst 0x6fa0e389 // udot v9.4s, v28.16b, v0.4b[1]\n"
".inst 0x6fa1e38d // udot v13.4s, v28.16b, v1.4b[1]\n"
".inst 0x6fa2e391 // udot v17.4s, v28.16b, v2.4b[1]\n"
".inst 0x6fa3e395 // udot v21.4s, v28.16b, v3.4b[1]\n"
".inst 0x6fa4e399 // udot v25.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x16, #0x70]\n"
+ "ldr q28, [x17, #0x70]\n"
".inst 0x6fa0e3aa // udot v10.4s, v29.16b, v0.4b[1]\n"
".inst 0x6fa1e3ae // udot v14.4s, v29.16b, v1.4b[1]\n"
".inst 0x6fa2e3b2 // udot v18.4s, v29.16b, v2.4b[1]\n"
".inst 0x6fa3e3b6 // udot v22.4s, v29.16b, v3.4b[1]\n"
".inst 0x6fa4e3ba // udot v26.4s, v29.16b, v4.4b[1]\n"
- "ldr q29, [x16, #0x80]\n"
+ "ldr q29, [x17, #0x80]\n"
".inst 0x6fa0e38b // udot v11.4s, v28.16b, v0.4b[1]\n"
".inst 0x6fa1e38f // udot v15.4s, v28.16b, v1.4b[1]\n"
".inst 0x6fa2e393 // udot v19.4s, v28.16b, v2.4b[1]\n"
".inst 0x6fa3e397 // udot v23.4s, v28.16b, v3.4b[1]\n"
".inst 0x6fa4e39b // udot v27.4s, v28.16b, v4.4b[1]\n"
- "ldr q28, [x16, #0x90]\n"
+ "ldr q28, [x17, #0x90]\n"
".inst 0x6f80eba8 // udot v8.4s, v29.16b, v0.4b[2]\n"
".inst 0x6f81ebac // udot v12.4s, v29.16b, v1.4b[2]\n"
".inst 0x6f82ebb0 // udot v16.4s, v29.16b, v2.4b[2]\n"
".inst 0x6f83ebb4 // udot v20.4s, v29.16b, v3.4b[2]\n"
".inst 0x6f84ebb8 // udot v24.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x16, #0xa0]\n"
+ "ldr q29, [x17, #0xa0]\n"
".inst 0x6f80eb89 // udot v9.4s, v28.16b, v0.4b[2]\n"
".inst 0x6f81eb8d // udot v13.4s, v28.16b, v1.4b[2]\n"
".inst 0x6f82eb91 // udot v17.4s, v28.16b, v2.4b[2]\n"
".inst 0x6f83eb95 // udot v21.4s, v28.16b, v3.4b[2]\n"
".inst 0x6f84eb99 // udot v25.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x16, #0xb0]\n"
+ "ldr q28, [x17, #0xb0]\n"
".inst 0x6f80ebaa // udot v10.4s, v29.16b, v0.4b[2]\n"
".inst 0x6f81ebae // udot v14.4s, v29.16b, v1.4b[2]\n"
".inst 0x6f82ebb2 // udot v18.4s, v29.16b, v2.4b[2]\n"
".inst 0x6f83ebb6 // udot v22.4s, v29.16b, v3.4b[2]\n"
".inst 0x6f84ebba // udot v26.4s, v29.16b, v4.4b[2]\n"
- "ldr q29, [x16, #0xc0]\n"
+ "ldr q29, [x17, #0xc0]\n"
".inst 0x6f80eb8b // udot v11.4s, v28.16b, v0.4b[2]\n"
".inst 0x6f81eb8f // udot v15.4s, v28.16b, v1.4b[2]\n"
".inst 0x6f82eb93 // udot v19.4s, v28.16b, v2.4b[2]\n"
".inst 0x6f83eb97 // udot v23.4s, v28.16b, v3.4b[2]\n"
".inst 0x6f84eb9b // udot v27.4s, v28.16b, v4.4b[2]\n"
- "ldr q28, [x16, #0xd0]\n"
+ "ldr q28, [x17, #0xd0]\n"
".inst 0x6fa0eba8 // udot v8.4s, v29.16b, v0.4b[3]\n"
".inst 0x6fa1ebac // udot v12.4s, v29.16b, v1.4b[3]\n"
".inst 0x6fa2ebb0 // udot v16.4s, v29.16b, v2.4b[3]\n"
".inst 0x6fa3ebb4 // udot v20.4s, v29.16b, v3.4b[3]\n"
".inst 0x6fa4ebb8 // udot v24.4s, v29.16b, v4.4b[3]\n"
- "ldr q29, [x16, #0xe0]\n"
+ "ldr q29, [x17, #0xe0]\n"
".inst 0x6fa0eb89 // udot v9.4s, v28.16b, v0.4b[3]\n"
".inst 0x6fa1eb8d // udot v13.4s, v28.16b, v1.4b[3]\n"
".inst 0x6fa2eb91 // udot v17.4s, v28.16b, v2.4b[3]\n"
".inst 0x6fa3eb95 // udot v21.4s, v28.16b, v3.4b[3]\n"
".inst 0x6fa4eb99 // udot v25.4s, v28.16b, v4.4b[3]\n"
- "ldr q28, [x16, #0xf0]\n"
+ "ldr q28, [x17, #0xf0]\n"
".inst 0x6fa0ebaa // udot v10.4s, v29.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa1ebae // udot v14.4s, v29.16b, v1.4b[3]\n"
".inst 0x6fa2ebb2 // udot v18.4s, v29.16b, v2.4b[3]\n"
".inst 0x6fa3ebb6 // udot v22.4s, v29.16b, v3.4b[3]\n"
@@ -2427,22 +2427,22 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr s0, [x11], #0x4\n"
"ldr s31, [x10], #0x4\n"
"ldr s30, [x9], #0x4\n"
- "ldr q29, [x16, #0x0]\n"
+ "ldr q29, [x17, #0x0]\n"
+ "ldr q28, [x17, #0x10]\n"
".inst 0x6f82e3a8 // udot v8.4s, v29.16b, v2.4b[0]\n"
- "ldr q28, [x16, #0x10]\n"
".inst 0x6f81e3ac // udot v12.4s, v29.16b, v1.4b[0]\n"
".inst 0x6f80e3b0 // udot v16.4s, v29.16b, v0.4b[0]\n"
".inst 0x6f9fe3b4 // udot v20.4s, v29.16b, v31.4b[0]\n"
".inst 0x6f9ee3b8 // udot v24.4s, v29.16b, v30.4b[0]\n"
- "ldr q29, [x16, #0x20]\n"
+ "ldr q29, [x17, #0x20]\n"
".inst 0x6f82e389 // udot v9.4s, v28.16b, v2.4b[0]\n"
".inst 0x6f81e38d // udot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x6f80e391 // udot v17.4s, v28.16b, v0.4b[0]\n"
".inst 0x6f9fe395 // udot v21.4s, v28.16b, v31.4b[0]\n"
".inst 0x6f9ee399 // udot v25.4s, v28.16b, v30.4b[0]\n"
- "ldr q28, [x16, #0x30]\n"
+ "ldr q28, [x17, #0x30]\n"
".inst 0x6f82e3aa // udot v10.4s, v29.16b, v2.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f81e3ae // udot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x6f80e3b2 // udot v18.4s, v29.16b, v0.4b[0]\n"
".inst 0x6f9fe3b6 // udot v22.4s, v29.16b, v31.4b[0]\n"
@@ -2475,22 +2475,22 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr b3, [x10, #0x0]\n"
"ldr b4, [x9, #0x0]\n"
"159:" // Height 5: Multiply loop: Ragged operand read: Done
- "ldr q29, [x16, #0x0]\n"
+ "ldr q29, [x17, #0x0]\n"
+ "ldr q28, [x17, #0x10]\n"
".inst 0x6f80e3a8 // udot v8.4s, v29.16b, v0.4b[0]\n"
- "ldr q28, [x16, #0x10]\n"
".inst 0x6f81e3ac // udot v12.4s, v29.16b, v1.4b[0]\n"
".inst 0x6f82e3b0 // udot v16.4s, v29.16b, v2.4b[0]\n"
".inst 0x6f83e3b4 // udot v20.4s, v29.16b, v3.4b[0]\n"
".inst 0x6f84e3b8 // udot v24.4s, v29.16b, v4.4b[0]\n"
- "ldr q29, [x16, #0x20]\n"
+ "ldr q29, [x17, #0x20]\n"
".inst 0x6f80e389 // udot v9.4s, v28.16b, v0.4b[0]\n"
".inst 0x6f81e38d // udot v13.4s, v28.16b, v1.4b[0]\n"
".inst 0x6f82e391 // udot v17.4s, v28.16b, v2.4b[0]\n"
".inst 0x6f83e395 // udot v21.4s, v28.16b, v3.4b[0]\n"
".inst 0x6f84e399 // udot v25.4s, v28.16b, v4.4b[0]\n"
- "ldr q28, [x16, #0x30]\n"
+ "ldr q28, [x17, #0x30]\n"
".inst 0x6f80e3aa // udot v10.4s, v29.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f81e3ae // udot v14.4s, v29.16b, v1.4b[0]\n"
".inst 0x6f82e3b2 // udot v18.4s, v29.16b, v2.4b[0]\n"
".inst 0x6f83e3b6 // udot v22.4s, v29.16b, v3.4b[0]\n"
@@ -2506,20 +2506,20 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 150b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
"prfm pstl1keep, [x21, #0x0]\n"
"bge 169f\n"
"tbz x8, #3, 164f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -2529,19 +2529,19 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"st1 { v24.4s }, [x21], #0x10\n"
"st1 { v25.4s }, [x21], #0x10\n"
"tbz x8, #2, 162f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"st1 { v26.4s }, [x21], #0x10\n"
"tbz x8, #1, 161f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"str d27, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
@@ -2549,7 +2549,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 168f\n"
"161:" // Height 5: Partial direct writeback: partial_1_12
"tbz x8, #0, 168f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
@@ -2557,13 +2557,13 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 168f\n"
"162:" // Height 5: Partial direct writeback: partial_2_8
"tbz x8, #1, 163f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"str d26, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
@@ -2571,7 +2571,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 168f\n"
"163:" // Height 5: Partial direct writeback: partial_1_8
"tbz x8, #0, 168f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
@@ -2579,19 +2579,19 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 168f\n"
"164:" // Height 5: Partial direct writeback: partial_4_0
"tbz x8, #2, 166f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v24.4s }, [x21], #0x10\n"
"tbz x8, #1, 165f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"str d25, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
@@ -2599,7 +2599,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 168f\n"
"165:" // Height 5: Partial direct writeback: partial_1_4
"tbz x8, #0, 168f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
@@ -2607,20 +2607,20 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 168f\n"
"166:" // Height 5: Partial direct writeback: partial_2_0
"tbz x8, #1, 167f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"str d24, [x21], #0x8\n"
"tbz x8, #0, 168f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
"st1 { v24.s }[2], [x21]\n"
"b 168f\n"
"167:" // Height 5: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
@@ -2628,11 +2628,11 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"168:" // Height 5: Partial direct writeback: Done
"b 170f\n"
"169:" // Height 5: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -2656,42 +2656,43 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"171:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"mov x20, #0x18\n"
+ "ldr x16, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"ldr x8, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x17, %x[output_ptr]\n"
- "ldr x16, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "ldr x17, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x16\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
+ "cmp x8, #0x10\n"
+ "add x24, x16, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "cmp x8, #0x10\n"
"add x20, x21, x20, LSL #2\n"
"bge 181f\n"
"tbz x8, #3, 176f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
"ld1 { v28.4s }, [x20], #0x10\n"
- "ld1 { v9.4s }, [x17], #0x10\n"
+ "ld1 { v9.4s }, [x16], #0x10\n"
"ld1 { v13.4s }, [x24], #0x10\n"
"ld1 { v17.4s }, [x23], #0x10\n"
"ld1 { v21.4s }, [x22], #0x10\n"
"ld1 { v25.4s }, [x21], #0x10\n"
"ld1 { v29.4s }, [x20], #0x10\n"
"tbz x8, #2, 174f\n"
- "ld1 { v10.4s }, [x17], #0x10\n"
+ "ld1 { v10.4s }, [x16], #0x10\n"
"ld1 { v14.4s }, [x24], #0x10\n"
"ld1 { v18.4s }, [x23], #0x10\n"
"ld1 { v22.4s }, [x22], #0x10\n"
"ld1 { v26.4s }, [x21], #0x10\n"
"ld1 { v30.4s }, [x20], #0x10\n"
"tbz x8, #1, 173f\n"
- "ldr d11, [x17], #0x8\n"
+ "ldr d11, [x16], #0x8\n"
"mov x25, #0x38\n"
"ldr d15, [x24], #0x8\n"
"ldr d19, [x23], #0x8\n"
@@ -2699,7 +2700,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr d27, [x21], #0x8\n"
"ldr d31, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v11.s }[2], [x17]\n"
+ "ld1 { v11.s }[2], [x16]\n"
"ld1 { v15.s }[2], [x24]\n"
"ld1 { v19.s }[2], [x23]\n"
"ld1 { v23.s }[2], [x22]\n"
@@ -2709,7 +2710,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"173:" // Height 6: Partial accumulate: partial_1_12
"mov x25, #0x30\n"
"tbz x8, #0, 180f\n"
- "ldr s11, [x17, #0x0]\n"
+ "ldr s11, [x16, #0x0]\n"
"ldr s15, [x24, #0x0]\n"
"ldr s19, [x23, #0x0]\n"
"ldr s23, [x22, #0x0]\n"
@@ -2718,7 +2719,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 180f\n"
"174:" // Height 6: Partial accumulate: partial_2_8
"tbz x8, #1, 175f\n"
- "ldr d10, [x17], #0x8\n"
+ "ldr d10, [x16], #0x8\n"
"mov x25, #0x28\n"
"ldr d14, [x24], #0x8\n"
"ldr d18, [x23], #0x8\n"
@@ -2726,7 +2727,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr d26, [x21], #0x8\n"
"ldr d30, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v10.s }[2], [x17]\n"
+ "ld1 { v10.s }[2], [x16]\n"
"ld1 { v14.s }[2], [x24]\n"
"ld1 { v18.s }[2], [x23]\n"
"ld1 { v22.s }[2], [x22]\n"
@@ -2736,7 +2737,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"175:" // Height 6: Partial accumulate: partial_1_8
"mov x25, #0x20\n"
"tbz x8, #0, 180f\n"
- "ldr s10, [x17, #0x0]\n"
+ "ldr s10, [x16, #0x0]\n"
"ldr s14, [x24, #0x0]\n"
"ldr s18, [x23, #0x0]\n"
"ldr s22, [x22, #0x0]\n"
@@ -2745,14 +2746,14 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 180f\n"
"176:" // Height 6: Partial accumulate: partial_4_0
"tbz x8, #2, 178f\n"
- "ld1 { v8.4s }, [x17], #0x10\n"
+ "ld1 { v8.4s }, [x16], #0x10\n"
"ld1 { v12.4s }, [x24], #0x10\n"
"ld1 { v16.4s }, [x23], #0x10\n"
"ld1 { v20.4s }, [x22], #0x10\n"
"ld1 { v24.4s }, [x21], #0x10\n"
"ld1 { v28.4s }, [x20], #0x10\n"
"tbz x8, #1, 177f\n"
- "ldr d9, [x17], #0x8\n"
+ "ldr d9, [x16], #0x8\n"
"mov x25, #0x18\n"
"ldr d13, [x24], #0x8\n"
"ldr d17, [x23], #0x8\n"
@@ -2760,7 +2761,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr d25, [x21], #0x8\n"
"ldr d29, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v9.s }[2], [x17]\n"
+ "ld1 { v9.s }[2], [x16]\n"
"ld1 { v13.s }[2], [x24]\n"
"ld1 { v17.s }[2], [x23]\n"
"ld1 { v21.s }[2], [x22]\n"
@@ -2770,7 +2771,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"177:" // Height 6: Partial accumulate: partial_1_4
"mov x25, #0x10\n"
"tbz x8, #0, 180f\n"
- "ldr s9, [x17, #0x0]\n"
+ "ldr s9, [x16, #0x0]\n"
"ldr s13, [x24, #0x0]\n"
"ldr s17, [x23, #0x0]\n"
"ldr s21, [x22, #0x0]\n"
@@ -2779,7 +2780,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 180f\n"
"178:" // Height 6: Partial accumulate: partial_2_0
"tbz x8, #1, 179f\n"
- "ldr d8, [x17], #0x8\n"
+ "ldr d8, [x16], #0x8\n"
"mov x25, #0x8\n"
"ldr d12, [x24], #0x8\n"
"ldr d16, [x23], #0x8\n"
@@ -2787,7 +2788,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr d24, [x21], #0x8\n"
"ldr d28, [x20], #0x8\n"
"tbz x8, #0, 180f\n"
- "ld1 { v8.s }[2], [x17]\n"
+ "ld1 { v8.s }[2], [x16]\n"
"ld1 { v12.s }[2], [x24]\n"
"ld1 { v16.s }[2], [x23]\n"
"ld1 { v20.s }[2], [x22]\n"
@@ -2795,7 +2796,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ld1 { v28.s }[2], [x20]\n"
"b 180f\n"
"179:" // Height 6: Partial accumulate: partial_1_0
- "ldr s8, [x17, #0x0]\n"
+ "ldr s8, [x16, #0x0]\n"
"mov x25, #0x0\n"
"ldr s12, [x24, #0x0]\n"
"ldr s16, [x23, #0x0]\n"
@@ -2803,13 +2804,13 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr s24, [x21, #0x0]\n"
"ldr s28, [x20, #0x0]\n"
"180:" // Height 6: Partial accumulate: Done
- "sub x17, x17, x25\n"
+ "sub x16, x16, x25\n"
"b 183f\n"
"181:" // Height 6: full accumulate
- "ldr q8, [x17, #0x0]\n"
- "ldr q9, [x17, #0x10]\n"
- "ldr q10, [x17, #0x20]\n"
- "ldr q11, [x17, #0x30]\n"
+ "ldr q8, [x16, #0x0]\n"
+ "ldr q9, [x16, #0x10]\n"
+ "ldr q10, [x16, #0x20]\n"
+ "ldr q11, [x16, #0x30]\n"
"ldr q12, [x24, #0x0]\n"
"ldr q13, [x24, #0x10]\n"
"ldr q14, [x24, #0x20]\n"
@@ -2860,8 +2861,8 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"mov x15, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w14, [x20, x15, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w14, [x20, x15, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
"ldr x20, [%x[input_ptr], x15, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2897,14 +2898,14 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr q3, [x10, #0x0]\n"
"ldr q4, [x9, #0x0]\n"
"ldr q5, [x28, #0x0]\n"
- "ldr q6, [x16, #0x0]\n"
- "ldr q7, [x16, #0x10]\n"
+ "ldr q6, [x17, #0x0]\n"
+ "ldr q7, [x17, #0x10]\n"
"blt 188f\n"
"187:" // Height 6: Multiply loop: Main loop head
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
- "ldr x21, [x16, #0x28]\n"
+ "ldr x21, [x17, #0x28]\n"
".inst 0x6f81e0cc // udot v12.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x38]\n"
+ "ldr x20, [x17, #0x38]\n"
".inst 0x6f82e0d0 // udot v16.4s, v6.16b, v2.4b[0]\n"
"add x13, x13, #0x10\n"
".inst 0x6f83e0d4 // udot v20.4s, v6.16b, v3.4b[0]\n"
@@ -2912,151 +2913,151 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
"add x11, x11, #0x10\n"
".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x16, #0x20]\n"
+ "ldr d6, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x21\n"
+ "add x10, x10, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x48]\n"
+ "add x9, x9, #0x10\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "add x10, x10, #0x10\n"
+ "mov v6.d[1], x21\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "add x9, x9, #0x10\n"
+ "ldr x21, [x17, #0x48]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
"add x28, x28, #0x10\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr d7, [x16, #0x30]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
+ "ldr x27, [x13, #0x8]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "ldr x20, [x16, #0x58]\n"
+ "ldr x26, [x12, #0x8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
- "ldr x27, [x13, #0x8]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
- "ldr x26, [x12, #0x8]\n"
+ "ldr x20, [x17, #0x58]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
"ldr x25, [x11, #0x8]\n"
".inst 0x6f85e0de // udot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr d6, [x16, #0x40]\n"
+ "ldr d6, [x17, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
- "mov v6.d[1], x21\n"
+ "ldr x24, [x10, #0x8]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
- "ldr x21, [x16, #0x68]\n"
+ "ldr x23, [x9, #0x8]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
- "ldr x24, [x10, #0x8]\n"
+ "mov v6.d[1], x21\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
- "ldr x23, [x9, #0x8]\n"
+ "ldr x21, [x17, #0x68]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
"ldr x22, [x28, #0x8]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr d7, [x16, #0x50]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
+ "sub x14, x14, #0x10\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x78]\n"
+ "cmp x14, #0x20\n"
+ "mov v7.d[1], x20\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
- "sub x14, x14, #0x10\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
- "cmp x14, #0x20\n"
+ "ldr x20, [x17, #0x78]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
"prfm pldl1keep, [x13, #0x80]\n"
".inst 0x6fa5e0dc // udot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr d6, [x16, #0x60]\n"
+ "ldr d6, [x17, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x21\n"
+ "prfm pldl1keep, [x12, #0x80]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0x88]\n"
+ "prfm pldl1keep, [x11, #0x80]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x12, #0x80]\n"
+ "mov v6.d[1], x21\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x11, #0x80]\n"
+ "ldr x21, [x17, #0x88]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x6fa5e0fd // udot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr d7, [x16, #0x70]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
+ "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
- "ldr x20, [x16, #0x98]\n"
+ "prfm pldl1keep, [x28, #0x80]\n"
+ "mov v7.d[1], x20\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
- "prfm pldl1keep, [x9, #0x80]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
- "prfm pldl1keep, [x28, #0x80]\n"
+ "ldr x20, [x17, #0x98]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x6fa5e0de // udot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr d6, [x16, #0x80]\n"
+ "ldr d6, [x17, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
- "mov v6.d[1], x21\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
- "ldr x21, [x16, #0xa8]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
+ "mov v6.d[1], x21\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
+ "ldr x21, [x17, #0xa8]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0ff // udot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr d7, [x16, #0x90]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xb8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xb8]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8dc // udot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr d6, [x16, #0xa0]\n"
+ "ldr d6, [x17, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x21\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xc8]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x21\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xc8]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8fd // udot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr d7, [x16, #0xb0]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
- "ldr x20, [x16, #0xd8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
+ "ldr x20, [x17, #0xd8]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8de // udot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr d6, [x16, #0xc0]\n"
+ "ldr d6, [x17, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
- "mov v6.d[1], x21\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
- "ldr x21, [x16, #0xe8]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
+ "mov v6.d[1], x21\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
+ "ldr x21, [x17, #0xe8]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8ff // udot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr d7, [x16, #0xd0]\n"
- "mov v7.d[1], x20\n"
+ "ldr d7, [x17, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0xf8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0xf8]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8dc // udot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr d6, [x16, #0xe0]\n"
+ "ldr d6, [x17, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
- "mov v6.d[1], x21\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
+ "mov v6.d[1], x21\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x6fa5e8fd // udot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr d7, [x16, #0xf0]\n"
- "mov v7.d[1], x20\n"
- "add x16, x16, #0x100\n"
+ "ldr d7, [x17, #0xf0]\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
- "ldr x21, [x16, #0x8]\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
- "ldr x20, [x16, #0x18]\n"
+ "ldr x21, [x17, #0x8]\n"
+ "mov v7.d[1], x20\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
+ "ldr x20, [x17, #0x18]\n"
".inst 0x6fa4e8da // udot v26.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8de // udot v30.4s, v6.16b, v5.4b[3]\n"
- "ldr d6, [x16, #0x0]\n"
+ "ldr d6, [x17, #0x0]\n"
".inst 0x6fa0e8eb // udot v11.4s, v7.16b, v0.4b[3]\n"
"ldr d0, [x13, #0x0]\n"
".inst 0x6fa1e8ef // udot v15.4s, v7.16b, v1.4b[3]\n"
@@ -3069,7 +3070,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr d4, [x9, #0x0]\n"
".inst 0x6fa5e8ff // udot v31.4s, v7.16b, v5.4b[3]\n"
"ldr d5, [x28, #0x0]\n"
- "ldr d7, [x16, #0x10]\n"
+ "ldr d7, [x17, #0x10]\n"
"mov v6.d[1], x21\n"
"mov v0.d[1], x27\n"
"mov v1.d[1], x26\n"
@@ -3091,7 +3092,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f84e0d8 // udot v24.4s, v6.16b, v4.4b[0]\n"
"add x9, x9, #0x10\n"
".inst 0x6f85e0dc // udot v28.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x20]\n"
+ "ldr q6, [x17, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"add x28, x28, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
@@ -3103,7 +3104,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
"prfm pldl1keep, [x11, #0x80]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x16, #0x30]\n"
+ "ldr q7, [x17, #0x30]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
"prfm pldl1keep, [x10, #0x80]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
@@ -3113,86 +3114,86 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f84e0da // udot v26.4s, v6.16b, v4.4b[0]\n"
".inst 0x6f85e0de // udot v30.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x40]\n"
+ "ldr q6, [x17, #0x40]\n"
".inst 0x6f80e0eb // udot v11.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ef // udot v15.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f3 // udot v19.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f7 // udot v23.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0fb // udot v27.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0ff // udot v31.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x16, #0x50]\n"
+ "ldr q7, [x17, #0x50]\n"
".inst 0x6fa0e0c8 // udot v8.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0cc // udot v12.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d0 // udot v16.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d4 // udot v20.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0d8 // udot v24.4s, v6.16b, v4.4b[1]\n"
".inst 0x6fa5e0dc // udot v28.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x16, #0x60]\n"
+ "ldr q6, [x17, #0x60]\n"
".inst 0x6fa0e0e9 // udot v9.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ed // udot v13.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f1 // udot v17.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f5 // udot v21.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0f9 // udot v25.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0fd // udot v29.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x16, #0x70]\n"
+ "ldr q7, [x17, #0x70]\n"
".inst 0x6fa0e0ca // udot v10.4s, v6.16b, v0.4b[1]\n"
".inst 0x6fa1e0ce // udot v14.4s, v6.16b, v1.4b[1]\n"
".inst 0x6fa2e0d2 // udot v18.4s, v6.16b, v2.4b[1]\n"
".inst 0x6fa3e0d6 // udot v22.4s, v6.16b, v3.4b[1]\n"
".inst 0x6fa4e0da // udot v26.4s, v6.16b, v4.4b[1]\n"
".inst 0x6fa5e0de // udot v30.4s, v6.16b, v5.4b[1]\n"
- "ldr q6, [x16, #0x80]\n"
+ "ldr q6, [x17, #0x80]\n"
".inst 0x6fa0e0eb // udot v11.4s, v7.16b, v0.4b[1]\n"
".inst 0x6fa1e0ef // udot v15.4s, v7.16b, v1.4b[1]\n"
".inst 0x6fa2e0f3 // udot v19.4s, v7.16b, v2.4b[1]\n"
".inst 0x6fa3e0f7 // udot v23.4s, v7.16b, v3.4b[1]\n"
".inst 0x6fa4e0fb // udot v27.4s, v7.16b, v4.4b[1]\n"
".inst 0x6fa5e0ff // udot v31.4s, v7.16b, v5.4b[1]\n"
- "ldr q7, [x16, #0x90]\n"
+ "ldr q7, [x17, #0x90]\n"
".inst 0x6f80e8c8 // udot v8.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8cc // udot v12.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d0 // udot v16.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d4 // udot v20.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8d8 // udot v24.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8dc // udot v28.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x16, #0xa0]\n"
+ "ldr q6, [x17, #0xa0]\n"
".inst 0x6f80e8e9 // udot v9.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ed // udot v13.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f1 // udot v17.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f5 // udot v21.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8f9 // udot v25.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8fd // udot v29.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x16, #0xb0]\n"
+ "ldr q7, [x17, #0xb0]\n"
".inst 0x6f80e8ca // udot v10.4s, v6.16b, v0.4b[2]\n"
".inst 0x6f81e8ce // udot v14.4s, v6.16b, v1.4b[2]\n"
".inst 0x6f82e8d2 // udot v18.4s, v6.16b, v2.4b[2]\n"
".inst 0x6f83e8d6 // udot v22.4s, v6.16b, v3.4b[2]\n"
".inst 0x6f84e8da // udot v26.4s, v6.16b, v4.4b[2]\n"
".inst 0x6f85e8de // udot v30.4s, v6.16b, v5.4b[2]\n"
- "ldr q6, [x16, #0xc0]\n"
+ "ldr q6, [x17, #0xc0]\n"
".inst 0x6f80e8eb // udot v11.4s, v7.16b, v0.4b[2]\n"
".inst 0x6f81e8ef // udot v15.4s, v7.16b, v1.4b[2]\n"
".inst 0x6f82e8f3 // udot v19.4s, v7.16b, v2.4b[2]\n"
".inst 0x6f83e8f7 // udot v23.4s, v7.16b, v3.4b[2]\n"
".inst 0x6f84e8fb // udot v27.4s, v7.16b, v4.4b[2]\n"
".inst 0x6f85e8ff // udot v31.4s, v7.16b, v5.4b[2]\n"
- "ldr q7, [x16, #0xd0]\n"
+ "ldr q7, [x17, #0xd0]\n"
".inst 0x6fa0e8c8 // udot v8.4s, v6.16b, v0.4b[3]\n"
".inst 0x6fa1e8cc // udot v12.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d0 // udot v16.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d4 // udot v20.4s, v6.16b, v3.4b[3]\n"
".inst 0x6fa4e8d8 // udot v24.4s, v6.16b, v4.4b[3]\n"
".inst 0x6fa5e8dc // udot v28.4s, v6.16b, v5.4b[3]\n"
- "ldr q6, [x16, #0xe0]\n"
+ "ldr q6, [x17, #0xe0]\n"
".inst 0x6fa0e8e9 // udot v9.4s, v7.16b, v0.4b[3]\n"
".inst 0x6fa1e8ed // udot v13.4s, v7.16b, v1.4b[3]\n"
".inst 0x6fa2e8f1 // udot v17.4s, v7.16b, v2.4b[3]\n"
".inst 0x6fa3e8f5 // udot v21.4s, v7.16b, v3.4b[3]\n"
".inst 0x6fa4e8f9 // udot v25.4s, v7.16b, v4.4b[3]\n"
".inst 0x6fa5e8fd // udot v29.4s, v7.16b, v5.4b[3]\n"
- "ldr q7, [x16, #0xf0]\n"
+ "ldr q7, [x17, #0xf0]\n"
".inst 0x6fa0e8ca // udot v10.4s, v6.16b, v0.4b[3]\n"
- "add x16, x16, #0x100\n"
+ "add x17, x17, #0x100\n"
".inst 0x6fa1e8ce // udot v14.4s, v6.16b, v1.4b[3]\n"
".inst 0x6fa2e8d2 // udot v18.4s, v6.16b, v2.4b[3]\n"
".inst 0x6fa3e8d6 // udot v22.4s, v6.16b, v3.4b[3]\n"
@@ -3217,24 +3218,24 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr s4, [x10], #0x4\n"
"ldr s3, [x9], #0x4\n"
"ldr s2, [x28], #0x4\n"
- "ldr q1, [x16, #0x0]\n"
+ "ldr q1, [x17, #0x0]\n"
+ "ldr q0, [x17, #0x10]\n"
".inst 0x6f87e028 // udot v8.4s, v1.16b, v7.4b[0]\n"
- "ldr q0, [x16, #0x10]\n"
".inst 0x6f86e02c // udot v12.4s, v1.16b, v6.4b[0]\n"
".inst 0x6f85e030 // udot v16.4s, v1.16b, v5.4b[0]\n"
".inst 0x6f84e034 // udot v20.4s, v1.16b, v4.4b[0]\n"
".inst 0x6f83e038 // udot v24.4s, v1.16b, v3.4b[0]\n"
".inst 0x6f82e03c // udot v28.4s, v1.16b, v2.4b[0]\n"
- "ldr q1, [x16, #0x20]\n"
+ "ldr q1, [x17, #0x20]\n"
".inst 0x6f87e009 // udot v9.4s, v0.16b, v7.4b[0]\n"
".inst 0x6f86e00d // udot v13.4s, v0.16b, v6.4b[0]\n"
".inst 0x6f85e011 // udot v17.4s, v0.16b, v5.4b[0]\n"
".inst 0x6f84e015 // udot v21.4s, v0.16b, v4.4b[0]\n"
".inst 0x6f83e019 // udot v25.4s, v0.16b, v3.4b[0]\n"
".inst 0x6f82e01d // udot v29.4s, v0.16b, v2.4b[0]\n"
- "ldr q0, [x16, #0x30]\n"
+ "ldr q0, [x17, #0x30]\n"
".inst 0x6f87e02a // udot v10.4s, v1.16b, v7.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f86e02e // udot v14.4s, v1.16b, v6.4b[0]\n"
".inst 0x6f85e032 // udot v18.4s, v1.16b, v5.4b[0]\n"
".inst 0x6f84e036 // udot v22.4s, v1.16b, v4.4b[0]\n"
@@ -3272,24 +3273,24 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"ldr b4, [x9, #0x0]\n"
"ldr b5, [x28, #0x0]\n"
"193:" // Height 6: Multiply loop: Ragged operand read: Done
- "ldr q7, [x16, #0x0]\n"
+ "ldr q7, [x17, #0x0]\n"
+ "ldr q6, [x17, #0x10]\n"
".inst 0x6f80e0e8 // udot v8.4s, v7.16b, v0.4b[0]\n"
- "ldr q6, [x16, #0x10]\n"
".inst 0x6f81e0ec // udot v12.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f0 // udot v16.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f4 // udot v20.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f8 // udot v24.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0fc // udot v28.4s, v7.16b, v5.4b[0]\n"
- "ldr q7, [x16, #0x20]\n"
+ "ldr q7, [x17, #0x20]\n"
".inst 0x6f80e0c9 // udot v9.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0cd // udot v13.4s, v6.16b, v1.4b[0]\n"
".inst 0x6f82e0d1 // udot v17.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d5 // udot v21.4s, v6.16b, v3.4b[0]\n"
".inst 0x6f84e0d9 // udot v25.4s, v6.16b, v4.4b[0]\n"
".inst 0x6f85e0dd // udot v29.4s, v6.16b, v5.4b[0]\n"
- "ldr q6, [x16, #0x30]\n"
+ "ldr q6, [x17, #0x30]\n"
".inst 0x6f80e0ea // udot v10.4s, v7.16b, v0.4b[0]\n"
- "add x16, x16, #0x40\n"
+ "add x17, x17, #0x40\n"
".inst 0x6f81e0ee // udot v14.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f2 // udot v18.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f6 // udot v22.4s, v7.16b, v3.4b[0]\n"
@@ -3307,22 +3308,22 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"cmp x15, x20\n"
"bne 184b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x17, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"cmp x8, #0x10\n"
- "prfm pstl1keep, [x17, #0x0]\n"
+ "prfm pstl1keep, [x16, #0x0]\n"
+ "add x24, x16, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x20, x21, x20, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
"prfm pstl1keep, [x20, #0x0]\n"
"bge 203f\n"
"tbz x8, #3, 198f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
- "st1 { v9.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
+ "st1 { v9.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v13.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
@@ -3334,21 +3335,21 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"st1 { v28.4s }, [x20], #0x10\n"
"st1 { v29.4s }, [x20], #0x10\n"
"tbz x8, #2, 196f\n"
- "st1 { v10.4s }, [x17], #0x10\n"
+ "st1 { v10.4s }, [x16], #0x10\n"
"st1 { v14.4s }, [x24], #0x10\n"
"st1 { v18.4s }, [x23], #0x10\n"
"st1 { v22.4s }, [x22], #0x10\n"
"st1 { v26.4s }, [x21], #0x10\n"
"st1 { v30.4s }, [x20], #0x10\n"
"tbz x8, #1, 195f\n"
- "str d11, [x17], #0x8\n"
+ "str d11, [x16], #0x8\n"
"str d15, [x24], #0x8\n"
"str d19, [x23], #0x8\n"
"str d23, [x22], #0x8\n"
"str d27, [x21], #0x8\n"
"str d31, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v11.s }[2], [x17]\n"
+ "st1 { v11.s }[2], [x16]\n"
"st1 { v15.s }[2], [x24]\n"
"st1 { v19.s }[2], [x23]\n"
"st1 { v23.s }[2], [x22]\n"
@@ -3357,7 +3358,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 202f\n"
"195:" // Height 6: Partial direct writeback: partial_1_12
"tbz x8, #0, 202f\n"
- "str s11, [x17, #0x0]\n"
+ "str s11, [x16, #0x0]\n"
"str s15, [x24, #0x0]\n"
"str s19, [x23, #0x0]\n"
"str s23, [x22, #0x0]\n"
@@ -3366,14 +3367,14 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 202f\n"
"196:" // Height 6: Partial direct writeback: partial_2_8
"tbz x8, #1, 197f\n"
- "str d10, [x17], #0x8\n"
+ "str d10, [x16], #0x8\n"
"str d14, [x24], #0x8\n"
"str d18, [x23], #0x8\n"
"str d22, [x22], #0x8\n"
"str d26, [x21], #0x8\n"
"str d30, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v10.s }[2], [x17]\n"
+ "st1 { v10.s }[2], [x16]\n"
"st1 { v14.s }[2], [x24]\n"
"st1 { v18.s }[2], [x23]\n"
"st1 { v22.s }[2], [x22]\n"
@@ -3382,7 +3383,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 202f\n"
"197:" // Height 6: Partial direct writeback: partial_1_8
"tbz x8, #0, 202f\n"
- "str s10, [x17, #0x0]\n"
+ "str s10, [x16, #0x0]\n"
"str s14, [x24, #0x0]\n"
"str s18, [x23, #0x0]\n"
"str s22, [x22, #0x0]\n"
@@ -3391,21 +3392,21 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 202f\n"
"198:" // Height 6: Partial direct writeback: partial_4_0
"tbz x8, #2, 200f\n"
- "st1 { v8.4s }, [x17], #0x10\n"
+ "st1 { v8.4s }, [x16], #0x10\n"
"st1 { v12.4s }, [x24], #0x10\n"
"st1 { v16.4s }, [x23], #0x10\n"
"st1 { v20.4s }, [x22], #0x10\n"
"st1 { v24.4s }, [x21], #0x10\n"
"st1 { v28.4s }, [x20], #0x10\n"
"tbz x8, #1, 199f\n"
- "str d9, [x17], #0x8\n"
+ "str d9, [x16], #0x8\n"
"str d13, [x24], #0x8\n"
"str d17, [x23], #0x8\n"
"str d21, [x22], #0x8\n"
"str d25, [x21], #0x8\n"
"str d29, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v9.s }[2], [x17]\n"
+ "st1 { v9.s }[2], [x16]\n"
"st1 { v13.s }[2], [x24]\n"
"st1 { v17.s }[2], [x23]\n"
"st1 { v21.s }[2], [x22]\n"
@@ -3414,7 +3415,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 202f\n"
"199:" // Height 6: Partial direct writeback: partial_1_4
"tbz x8, #0, 202f\n"
- "str s9, [x17, #0x0]\n"
+ "str s9, [x16, #0x0]\n"
"str s13, [x24, #0x0]\n"
"str s17, [x23, #0x0]\n"
"str s21, [x22, #0x0]\n"
@@ -3423,14 +3424,14 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"b 202f\n"
"200:" // Height 6: Partial direct writeback: partial_2_0
"tbz x8, #1, 201f\n"
- "str d8, [x17], #0x8\n"
+ "str d8, [x16], #0x8\n"
"str d12, [x24], #0x8\n"
"str d16, [x23], #0x8\n"
"str d20, [x22], #0x8\n"
"str d24, [x21], #0x8\n"
"str d28, [x20], #0x8\n"
"tbz x8, #0, 202f\n"
- "st1 { v8.s }[2], [x17]\n"
+ "st1 { v8.s }[2], [x16]\n"
"st1 { v12.s }[2], [x24]\n"
"st1 { v16.s }[2], [x23]\n"
"st1 { v20.s }[2], [x22]\n"
@@ -3438,7 +3439,7 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"st1 { v28.s }[2], [x20]\n"
"b 202f\n"
"201:" // Height 6: Partial direct writeback: partial_1_0
- "str s8, [x17, #0x0]\n"
+ "str s8, [x16, #0x0]\n"
"str s12, [x24, #0x0]\n"
"str s16, [x23, #0x0]\n"
"str s20, [x22, #0x0]\n"
@@ -3447,11 +3448,11 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"202:" // Height 6: Partial direct writeback: Done
"b 204f\n"
"203:" // Height 6: Full writeback
- "str q8, [x17, #0x0]\n"
- "str q9, [x17, #0x10]\n"
- "str q10, [x17, #0x20]\n"
- "str q11, [x17, #0x30]\n"
- "add x17, x17, #0x40\n"
+ "str q8, [x16, #0x0]\n"
+ "str q9, [x16, #0x10]\n"
+ "str q10, [x16, #0x20]\n"
+ "str q11, [x16, #0x30]\n"
+ "add x16, x16, #0x40\n"
"str q12, [x24, #0x0]\n"
"str q13, [x24, #0x10]\n"
"str q14, [x24, #0x20]\n"
@@ -3487,8 +3488,8 @@ void a64_hybrid_u8u32_dot_6x16_a55 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp
index 849c680843..8b7f5afb7e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void a64_hybrid_u8u32_dot_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -88,7 +88,7 @@ void a64_hybrid_u8u32_dot_6x16 (
"beq 35f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 12f\n"
"cmp x11, #0x10\n"
@@ -163,8 +163,8 @@ void a64_hybrid_u8u32_dot_6x16 (
"mov x28, #0x0\n"
"14:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 15f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -188,6 +188,10 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr q17, [x10, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
@@ -212,22 +216,21 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr q17, [x10, #0xe0]\n"
".inst 0x6fa0ea09 // udot v9.4s, v16.16b, v0.4b[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6fa0ea2a // udot v10.4s, v17.16b, v0.4b[3]\n"
+ "ldr q6, [x10, #0x0]\n"
".inst 0x6fa0ea0b // udot v11.4s, v16.16b, v0.4b[3]\n"
"ldr q0, [x26, #0x0]\n"
- "cmp x27, #0x20\n"
- "add x10, x10, #0x100\n"
- "ldr q6, [x10, #0x0]\n"
"ldr q7, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 17b\n"
"18:" // Height 1: Multiply loop: Single iteration only
".inst 0x6f80e0c8 // udot v8.4s, v6.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
@@ -252,29 +255,26 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr q17, [x10, #0xe0]\n"
".inst 0x6fa0ea09 // udot v9.4s, v16.16b, v0.4b[3]\n"
"ldr q16, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6fa0ea2a // udot v10.4s, v17.16b, v0.4b[3]\n"
".inst 0x6fa0ea0b // udot v11.4s, v16.16b, v0.4b[3]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"19:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 24f\n"
"cmp x27, #0x4\n"
"blt 21f\n"
"20:" // Height 1: Multiply loop: Odd block loop
"ldr s18, [x26], #0x4\n"
- "ldr q16, [x10, #0x0]\n"
- ".inst 0x6f92e208 // udot v8.4s, v16.16b, v18.4b[0]\n"
+ "ldr q17, [x10, #0x0]\n"
"sub x27, x27, #0x4\n"
"ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x4\n"
+ ".inst 0x6f92e228 // udot v8.4s, v17.16b, v18.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x6f92e209 // udot v9.4s, v16.16b, v18.4b[0]\n"
- "cmp x27, #0x4\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f92e22a // udot v10.4s, v17.16b, v18.4b[0]\n"
".inst 0x6f92e20b // udot v11.4s, v16.16b, v18.4b[0]\n"
- "add x10, x10, #0x40\n"
"bge 20b\n"
"21:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 24f\n"
@@ -289,12 +289,12 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr q17, [x10, #0x0]\n"
"ldr q16, [x10, #0x10]\n"
".inst 0x6f80e228 // udot v8.4s, v17.16b, v0.4b[0]\n"
- ".inst 0x6f80e209 // udot v9.4s, v16.16b, v0.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
+ ".inst 0x6f80e209 // udot v9.4s, v16.16b, v0.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
- "add x10, x10, #0x40\n"
"24:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -362,7 +362,7 @@ void a64_hybrid_u8u32_dot_6x16 (
"35:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"36:" // Height 2: Column loop
"tbz %x[flags], #0, 46f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -463,8 +463,8 @@ void a64_hybrid_u8u32_dot_6x16 (
"mov x28, #0x0\n"
"48:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 49f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -496,22 +496,22 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x6f81e22e // udot v14.4s, v17.16b, v1.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
- "add x25, x25, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x6f81e20f // udot v15.4s, v16.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x50]\n"
- "cmp x27, #0x20\n"
".inst 0x6fa0e228 // udot v8.4s, v17.16b, v0.4b[1]\n"
".inst 0x6fa1e22c // udot v12.4s, v17.16b, v1.4b[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6fa0e209 // udot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x6fa1e20d // udot v13.4s, v16.16b, v1.4b[1]\n"
"ldr q16, [x10, #0x70]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6fa0e22a // udot v10.4s, v17.16b, v0.4b[1]\n"
".inst 0x6fa1e22e // udot v14.4s, v17.16b, v1.4b[1]\n"
"ldr q17, [x10, #0x80]\n"
@@ -555,18 +555,18 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
"add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x6f81e22e // udot v14.4s, v17.16b, v1.4b[0]\n"
"ldr q17, [x10, #0x40]\n"
- "sub x27, x27, #0x10\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x6f81e20f // udot v15.4s, v16.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x50]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6fa0e228 // udot v8.4s, v17.16b, v0.4b[1]\n"
".inst 0x6fa1e22c // udot v12.4s, v17.16b, v1.4b[1]\n"
"ldr q17, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6fa0e209 // udot v9.4s, v16.16b, v0.4b[1]\n"
".inst 0x6fa1e20d // udot v13.4s, v16.16b, v1.4b[1]\n"
"ldr q16, [x10, #0x70]\n"
@@ -607,18 +607,18 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr s19, [x26], #0x4\n"
"ldr s18, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr q17, [x10, #0x0]\n"
"ldr q16, [x10, #0x10]\n"
+ "cmp x27, #0x4\n"
".inst 0x6f93e228 // udot v8.4s, v17.16b, v19.4b[0]\n"
".inst 0x6f92e22c // udot v12.4s, v17.16b, v18.4b[0]\n"
"ldr q17, [x10, #0x20]\n"
".inst 0x6f93e209 // udot v9.4s, v16.16b, v19.4b[0]\n"
".inst 0x6f92e20d // udot v13.4s, v16.16b, v18.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f93e22a // udot v10.4s, v17.16b, v19.4b[0]\n"
".inst 0x6f92e22e // udot v14.4s, v17.16b, v18.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x6f93e20b // udot v11.4s, v16.16b, v19.4b[0]\n"
".inst 0x6f92e20f // udot v15.4s, v16.16b, v18.4b[0]\n"
"bge 54b\n"
@@ -643,9 +643,9 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f80e209 // udot v9.4s, v16.16b, v0.4b[0]\n"
".inst 0x6f81e20d // udot v13.4s, v16.16b, v1.4b[0]\n"
"ldr q16, [x10, #0x30]\n"
+ "add x10, x10, #0x40\n"
".inst 0x6f80e22a // udot v10.4s, v17.16b, v0.4b[0]\n"
".inst 0x6f81e22e // udot v14.4s, v17.16b, v1.4b[0]\n"
- "add x10, x10, #0x40\n"
".inst 0x6f80e20b // udot v11.4s, v16.16b, v0.4b[0]\n"
".inst 0x6f81e20f // udot v15.4s, v16.16b, v1.4b[0]\n"
"58:" // Height 2: Multiply loop: No odd multiplies
@@ -654,9 +654,9 @@ void a64_hybrid_u8u32_dot_6x16 (
"cmp x28, x20\n"
"bne 48b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
"bge 67f\n"
"tbz x11, #3, 62f\n"
@@ -738,12 +738,12 @@ void a64_hybrid_u8u32_dot_6x16 (
"69:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"70:" // Height 3: Column loop
"tbz %x[flags], #0, 80f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"bge 79f\n"
"tbz x11, #3, 74f\n"
@@ -864,8 +864,8 @@ void a64_hybrid_u8u32_dot_6x16 (
"mov x28, #0x0\n"
"82:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 83f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -905,18 +905,18 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x30]\n"
"add x24, x24, #0x10\n"
- ".inst 0x6f80e2aa // udot v10.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x6f81e2ae // udot v14.4s, v21.16b, v1.4b[0]\n"
"cmp x27, #0x20\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6f80e2aa // udot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x6f81e2ae // udot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x6f82e2b2 // udot v18.4s, v21.16b, v2.4b[0]\n"
"ldr q21, [x10, #0x40]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f80e28b // udot v11.4s, v20.16b, v0.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f81e28f // udot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x6f82e293 // udot v19.4s, v20.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x50]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6fa0e2a8 // udot v8.4s, v21.16b, v0.4b[1]\n"
".inst 0x6fa1e2ac // udot v12.4s, v21.16b, v1.4b[1]\n"
".inst 0x6fa2e2b0 // udot v16.4s, v21.16b, v2.4b[1]\n"
@@ -983,14 +983,14 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x30]\n"
"sub x27, x27, #0x10\n"
- ".inst 0x6f80e2aa // udot v10.4s, v21.16b, v0.4b[0]\n"
- ".inst 0x6f81e2ae // udot v14.4s, v21.16b, v1.4b[0]\n"
"prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6f80e2aa // udot v10.4s, v21.16b, v0.4b[0]\n"
+ ".inst 0x6f81e2ae // udot v14.4s, v21.16b, v1.4b[0]\n"
".inst 0x6f82e2b2 // udot v18.4s, v21.16b, v2.4b[0]\n"
"ldr q21, [x10, #0x40]\n"
- ".inst 0x6f80e28b // udot v11.4s, v20.16b, v0.4b[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
+ ".inst 0x6f80e28b // udot v11.4s, v20.16b, v0.4b[0]\n"
".inst 0x6f81e28f // udot v15.4s, v20.16b, v1.4b[0]\n"
".inst 0x6f82e293 // udot v19.4s, v20.16b, v2.4b[0]\n"
"ldr q20, [x10, #0x50]\n"
@@ -1049,12 +1049,12 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr s24, [x26], #0x4\n"
"ldr s23, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s22, [x24], #0x4\n"
"ldr q21, [x10, #0x0]\n"
+ "cmp x27, #0x4\n"
+ "ldr q20, [x10, #0x10]\n"
".inst 0x6f98e2a8 // udot v8.4s, v21.16b, v24.4b[0]\n"
".inst 0x6f97e2ac // udot v12.4s, v21.16b, v23.4b[0]\n"
- "ldr q20, [x10, #0x10]\n"
".inst 0x6f96e2b0 // udot v16.4s, v21.16b, v22.4b[0]\n"
"ldr q21, [x10, #0x20]\n"
".inst 0x6f98e289 // udot v9.4s, v20.16b, v24.4b[0]\n"
@@ -1108,11 +1108,11 @@ void a64_hybrid_u8u32_dot_6x16 (
"cmp x28, x20\n"
"bne 82b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
"bge 101f\n"
"tbz x11, #3, 96f\n"
@@ -1214,13 +1214,13 @@ void a64_hybrid_u8u32_dot_6x16 (
"103:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"104:" // Height 4: Column loop
"tbz %x[flags], #0, 114f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x22, x23, x20, LSL #2\n"
"bge 113f\n"
"tbz x11, #3, 108f\n"
@@ -1365,8 +1365,8 @@ void a64_hybrid_u8u32_dot_6x16 (
"mov x28, #0x0\n"
"116:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 117f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1506,14 +1506,14 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
"add x23, x23, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
"ldr q24, [x10, #0x30]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f80e32a // udot v10.4s, v25.16b, v0.4b[0]\n"
".inst 0x6f81e32e // udot v14.4s, v25.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f82e332 // udot v18.4s, v25.16b, v2.4b[0]\n"
".inst 0x6f83e336 // udot v22.4s, v25.16b, v3.4b[0]\n"
@@ -1591,9 +1591,9 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr s29, [x26], #0x4\n"
"ldr s28, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s27, [x24], #0x4\n"
"ldr s26, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr q25, [x10, #0x0]\n"
"ldr q24, [x10, #0x10]\n"
".inst 0x6f9de328 // udot v8.4s, v25.16b, v29.4b[0]\n"
@@ -1662,13 +1662,13 @@ void a64_hybrid_u8u32_dot_6x16 (
"cmp x28, x20\n"
"bne 116b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
"bge 135f\n"
"tbz x11, #3, 130f\n"
@@ -1790,14 +1790,14 @@ void a64_hybrid_u8u32_dot_6x16 (
"137:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"138:" // Height 5: Column loop
"tbz %x[flags], #0, 148f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x21, x22, x20, LSL #2\n"
"bge 147f\n"
"tbz x11, #3, 142f\n"
@@ -1966,8 +1966,8 @@ void a64_hybrid_u8u32_dot_6x16 (
"mov x28, #0x0\n"
"150:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 151f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2133,12 +2133,12 @@ void a64_hybrid_u8u32_dot_6x16 (
"add x22, x22, #0x10\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
- "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
"ldr q28, [x10, #0x30]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
".inst 0x6f80e3aa // udot v10.4s, v29.16b, v0.4b[0]\n"
".inst 0x6f81e3ae // udot v14.4s, v29.16b, v1.4b[0]\n"
"prfm pldl1keep, [x24, #0x80]\n"
@@ -2233,14 +2233,14 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr s2, [x26], #0x4\n"
"ldr s1, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s0, [x24], #0x4\n"
"ldr s31, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr s30, [x22], #0x4\n"
"ldr q29, [x10, #0x0]\n"
+ "ldr q28, [x10, #0x10]\n"
".inst 0x6f82e3a8 // udot v8.4s, v29.16b, v2.4b[0]\n"
".inst 0x6f81e3ac // udot v12.4s, v29.16b, v1.4b[0]\n"
- "ldr q28, [x10, #0x10]\n"
".inst 0x6f80e3b0 // udot v16.4s, v29.16b, v0.4b[0]\n"
".inst 0x6f9fe3b4 // udot v20.4s, v29.16b, v31.4b[0]\n"
".inst 0x6f9ee3b8 // udot v24.4s, v29.16b, v30.4b[0]\n"
@@ -2316,15 +2316,15 @@ void a64_hybrid_u8u32_dot_6x16 (
"cmp x28, x20\n"
"bne 150b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "add x24, x9, x20, LSL #2\n"
"prfm pstl1keep, [x24, #0x0]\n"
+ "add x23, x24, x20, LSL #2\n"
"prfm pstl1keep, [x23, #0x0]\n"
- "cmp x11, #0x10\n"
+ "add x22, x23, x20, LSL #2\n"
"prfm pstl1keep, [x22, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"prfm pstl1keep, [x21, #0x0]\n"
"bge 169f\n"
"tbz x11, #3, 164f\n"
@@ -2465,19 +2465,20 @@ void a64_hybrid_u8u32_dot_6x16 (
"b 206f\n"
"171:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"172:" // Height 6: Column loop
"tbz %x[flags], #0, 182f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x20, x21, x20, LSL #2\n"
"bge 181f\n"
"tbz x11, #3, 176f\n"
@@ -2670,8 +2671,8 @@ void a64_hybrid_u8u32_dot_6x16 (
"mov x28, #0x0\n"
"184:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 185f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2861,18 +2862,18 @@ void a64_hybrid_u8u32_dot_6x16 (
".inst 0x6f80e0e9 // udot v9.4s, v7.16b, v0.4b[0]\n"
".inst 0x6f81e0ed // udot v13.4s, v7.16b, v1.4b[0]\n"
"add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6f82e0f1 // udot v17.4s, v7.16b, v2.4b[0]\n"
".inst 0x6f83e0f5 // udot v21.4s, v7.16b, v3.4b[0]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6f84e0f9 // udot v25.4s, v7.16b, v4.4b[0]\n"
".inst 0x6f85e0fd // udot v29.4s, v7.16b, v5.4b[0]\n"
"ldr q7, [x10, #0x30]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6f80e0ca // udot v10.4s, v6.16b, v0.4b[0]\n"
".inst 0x6f81e0ce // udot v14.4s, v6.16b, v1.4b[0]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
+ "sub x27, x27, #0x10\n"
"prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6f82e0d2 // udot v18.4s, v6.16b, v2.4b[0]\n"
".inst 0x6f83e0d6 // udot v22.4s, v6.16b, v3.4b[0]\n"
@@ -2978,9 +2979,9 @@ void a64_hybrid_u8u32_dot_6x16 (
"ldr s7, [x26], #0x4\n"
"ldr s6, [x25], #0x4\n"
"sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"ldr s5, [x24], #0x4\n"
"ldr s4, [x23], #0x4\n"
+ "cmp x27, #0x4\n"
"ldr s3, [x22], #0x4\n"
"ldr s2, [x21], #0x4\n"
"ldr q1, [x10, #0x0]\n"
@@ -3073,16 +3074,16 @@ void a64_hybrid_u8u32_dot_6x16 (
"cmp x28, x20\n"
"bne 184b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"add x24, x9, x20, LSL #2\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"add x23, x24, x20, LSL #2\n"
- "prfm pstl1keep, [x9, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"add x22, x23, x20, LSL #2\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"add x21, x22, x20, LSL #2\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"prfm pstl1keep, [x21, #0x0]\n"
"prfm pstl1keep, [x20, #0x0]\n"
"bge 203f\n"
@@ -3253,8 +3254,8 @@ void a64_hybrid_u8u32_dot_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"206:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16.hpp
index e360452108..baa4e28e88 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return true;
}
- StdTransformsFixed<rhs_operand_type, result_type, 6, 16, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 6, 16, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp
index 364f388e79..790a350838 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_mmla_6x16/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void a64_hybrid_u8u32_mmla_6x16 (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -88,7 +88,7 @@ void a64_hybrid_u8u32_mmla_6x16 (
"beq 38f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"tbz %x[flags], #0, 13f\n"
"cmp x11, #0x10\n"
@@ -176,8 +176,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
"mov x28, #0x0\n"
"15:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -197,7 +197,12 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q6, [x10, #0x10]\n"
"blt 19f\n"
"18:" // Height 1: Multiply loop: Main loop head
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
"trn1 v19.2d, v1.2d, v20.2d\n"
+ "trn2 v1.2d, v1.2d, v20.2d\n"
".inst 0x6e87a668 // ummla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e86a66c // ummla v12.4s, v19.16b, v6.16b\n"
@@ -210,7 +215,6 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v20.2d\n"
".inst 0x6e92a66b // ummla v11.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x6e91a66f // ummla v15.4s, v19.16b, v17.16b\n"
@@ -227,39 +231,38 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e91a42e // ummla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "cmp x27, #0x20\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e92a42b // ummla v11.4s, v1.16b, v18.16b\n"
+ "ldr q7, [x10, #0x0]\n"
".inst 0x6e91a42f // ummla v15.4s, v1.16b, v17.16b\n"
"ldr q1, [x26, #0x0]\n"
- "add x10, x10, #0x100\n"
- "ldr q7, [x10, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
"bge 18b\n"
"19:" // Height 1: Multiply loop: Single iteration only
- "trn1 v20.2d, v1.2d, v21.2d\n"
- ".inst 0x6e87a688 // ummla v8.4s, v20.16b, v7.16b\n"
+ "add x26, x26, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "trn1 v19.2d, v1.2d, v17.2d\n"
+ "trn2 v1.2d, v1.2d, v17.2d\n"
+ ".inst 0x6e87a668 // ummla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
- ".inst 0x6e86a68c // ummla v12.4s, v20.16b, v6.16b\n"
+ ".inst 0x6e86a66c // ummla v12.4s, v19.16b, v6.16b\n"
"ldr q17, [x10, #0x30]\n"
- ".inst 0x6e92a689 // ummla v9.4s, v20.16b, v18.16b\n"
+ ".inst 0x6e92a669 // ummla v9.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x40]\n"
- ".inst 0x6e91a68d // ummla v13.4s, v20.16b, v17.16b\n"
+ ".inst 0x6e91a66d // ummla v13.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x50]\n"
- ".inst 0x6e92a68a // ummla v10.4s, v20.16b, v18.16b\n"
- "ldr q19, [x10, #0x60]\n"
- ".inst 0x6e91a68e // ummla v14.4s, v20.16b, v17.16b\n"
+ ".inst 0x6e92a66a // ummla v10.4s, v19.16b, v18.16b\n"
+ "ldr q20, [x10, #0x60]\n"
+ ".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
"ldr q18, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v21.2d\n"
- ".inst 0x6e93a68b // ummla v11.4s, v20.16b, v19.16b\n"
+ ".inst 0x6e94a66b // ummla v11.4s, v19.16b, v20.16b\n"
"ldr q17, [x10, #0x80]\n"
- ".inst 0x6e92a68f // ummla v15.4s, v20.16b, v18.16b\n"
- "ldr q19, [x10, #0x90]\n"
+ ".inst 0x6e92a66f // ummla v15.4s, v19.16b, v18.16b\n"
+ "ldr q20, [x10, #0x90]\n"
".inst 0x6e91a428 // ummla v8.4s, v1.16b, v17.16b\n"
"ldr q18, [x10, #0xa0]\n"
- ".inst 0x6e93a42c // ummla v12.4s, v1.16b, v19.16b\n"
+ ".inst 0x6e94a42c // ummla v12.4s, v1.16b, v20.16b\n"
"ldr q17, [x10, #0xb0]\n"
".inst 0x6e92a429 // ummla v9.4s, v1.16b, v18.16b\n"
"ldr q18, [x10, #0xc0]\n"
@@ -269,22 +272,21 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e91a42e // ummla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "sub x27, x27, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e92a42b // ummla v11.4s, v1.16b, v18.16b\n"
".inst 0x6e91a42f // ummla v15.4s, v1.16b, v17.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "add x10, x10, #0x100\n"
"20:" // Height 1: Multiply loop: Main loop skip
"cbz x27, 27f\n"
"cmp x27, #0x8\n"
"blt 22f\n"
"21:" // Height 1: Multiply loop: Odd block loop
"ldr d19, [x26], #0x8\n"
- "ldr q18, [x10, #0x0]\n"
- "trn1 v19.2d, v19.2d, v17.2d\n"
+ "ldr q20, [x10, #0x0]\n"
+ "sub x27, x27, #0x8\n"
"ldr q17, [x10, #0x10]\n"
- ".inst 0x6e92a668 // ummla v8.4s, v19.16b, v18.16b\n"
+ "cmp x27, #0x8\n"
+ "trn1 v19.2d, v19.2d, v18.2d\n"
+ ".inst 0x6e94a668 // ummla v8.4s, v19.16b, v20.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e91a66c // ummla v12.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x30]\n"
@@ -296,11 +298,9 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e92a66b // ummla v11.4s, v19.16b, v18.16b\n"
".inst 0x6e91a66f // ummla v15.4s, v19.16b, v17.16b\n"
- "add x10, x10, #0x80\n"
"bge 21b\n"
"22:" // Height 1: Multiply loop: Skip odd blocks
"cbz x27, 27f\n"
@@ -324,24 +324,24 @@ void a64_hybrid_u8u32_mmla_6x16 (
"25:" // Height 1: Multiply loop: Ragged operand read: partial_1_0
"ldr b1, [x26, #0x0]\n"
"26:" // Height 1: Multiply loop: Ragged operand read: Done
- "ldr q23, [x10, #0x0]\n"
- "ldr q18, [x10, #0x10]\n"
+ "ldr q24, [x10, #0x0]\n"
+ "ldr q20, [x10, #0x10]\n"
"trn1 v19.2d, v1.2d, v17.2d\n"
- ".inst 0x6e97a668 // ummla v8.4s, v19.16b, v23.16b\n"
+ ".inst 0x6e98a668 // ummla v8.4s, v19.16b, v24.16b\n"
"ldr q17, [x10, #0x20]\n"
- ".inst 0x6e92a66c // ummla v12.4s, v19.16b, v18.16b\n"
- "ldr q31, [x10, #0x30]\n"
+ ".inst 0x6e94a66c // ummla v12.4s, v19.16b, v20.16b\n"
+ "ldr q0, [x10, #0x30]\n"
".inst 0x6e91a669 // ummla v9.4s, v19.16b, v17.16b\n"
"ldr q20, [x10, #0x40]\n"
- ".inst 0x6e9fa66d // ummla v13.4s, v19.16b, v31.16b\n"
+ ".inst 0x6e80a66d // ummla v13.4s, v19.16b, v0.16b\n"
"ldr q17, [x10, #0x50]\n"
".inst 0x6e94a66a // ummla v10.4s, v19.16b, v20.16b\n"
"ldr q18, [x10, #0x60]\n"
".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e92a66b // ummla v11.4s, v19.16b, v18.16b\n"
".inst 0x6e91a66f // ummla v15.4s, v19.16b, v17.16b\n"
- "add x10, x10, #0x80\n"
"27:" // Height 1: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -413,7 +413,7 @@ void a64_hybrid_u8u32_mmla_6x16 (
"38:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"39:" // Height 2: Column loop
"tbz %x[flags], #0, 50f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
@@ -523,8 +523,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
"mov x28, #0x0\n"
"52:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 53f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -549,6 +549,14 @@ void a64_hybrid_u8u32_mmla_6x16 (
"blt 56f\n"
"55:" // Height 2: Multiply loop: Main loop head
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "ldr q2, [x25, #0x0]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e87a668 // ummla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e86a66c // ummla v12.4s, v19.16b, v6.16b\n"
@@ -561,7 +569,6 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e92a66b // ummla v11.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x6e91a66f // ummla v15.4s, v19.16b, v17.16b\n"
@@ -578,22 +585,21 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e91a42e // ummla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "sub x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "ldr q2, [x25, #0x0]\n"
- "cmp x27, #0x20\n"
- ".inst 0x6e92a42b // ummla v11.4s, v1.16b, v18.16b\n"
"add x10, x10, #0x100\n"
+ ".inst 0x6e92a42b // ummla v11.4s, v1.16b, v18.16b\n"
"ldr q7, [x10, #0x0]\n"
".inst 0x6e91a42f // ummla v15.4s, v1.16b, v17.16b\n"
"ldr q1, [x26, #0x0]\n"
"ldr q6, [x10, #0x10]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
"bge 55b\n"
"56:" // Height 2: Multiply loop: Single iteration only
"trn1 v19.2d, v1.2d, v2.2d\n"
+ "trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e87a668 // ummla v8.4s, v19.16b, v7.16b\n"
"ldr q18, [x10, #0x20]\n"
".inst 0x6e86a66c // ummla v12.4s, v19.16b, v6.16b\n"
@@ -606,7 +612,6 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "trn2 v1.2d, v1.2d, v2.2d\n"
".inst 0x6e92a66b // ummla v11.4s, v19.16b, v18.16b\n"
"ldr q18, [x10, #0x80]\n"
".inst 0x6e91a66f // ummla v15.4s, v19.16b, v17.16b\n"
@@ -623,41 +628,36 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0xe0]\n"
".inst 0x6e91a42e // ummla v14.4s, v1.16b, v17.16b\n"
"ldr q17, [x10, #0xf0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "add x10, x10, #0x100\n"
".inst 0x6e92a42b // ummla v11.4s, v1.16b, v18.16b\n"
".inst 0x6e91a42f // ummla v15.4s, v1.16b, v17.16b\n"
- "sub x27, x27, #0x10\n"
- "prfm pldl1keep, [x26, #0x80]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "add x10, x10, #0x100\n"
"57:" // Height 2: Multiply loop: Main loop skip
"cbz x27, 64f\n"
"cmp x27, #0x8\n"
"blt 59f\n"
"58:" // Height 2: Multiply loop: Odd block loop
- "ldr d18, [x26], #0x8\n"
- "ldr d17, [x25], #0x8\n"
- "trn1 v19.2d, v18.2d, v17.2d\n"
+ "ldr d20, [x26], #0x8\n"
+ "ldr d19, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr q17, [x10, #0x0]\n"
- "ldr q22, [x10, #0x10]\n"
- ".inst 0x6e91a668 // ummla v8.4s, v19.16b, v17.16b\n"
- ".inst 0x6e96a66c // ummla v12.4s, v19.16b, v22.16b\n"
- "ldr q1, [x10, #0x20]\n"
+ "ldr q18, [x10, #0x0]\n"
+ "ldr q17, [x10, #0x10]\n"
+ "cmp x27, #0x8\n"
+ "trn1 v22.2d, v20.2d, v19.2d\n"
+ ".inst 0x6e92a6c8 // ummla v8.4s, v22.16b, v18.16b\n"
+ "ldr q2, [x10, #0x20]\n"
+ ".inst 0x6e91a6cc // ummla v12.4s, v22.16b, v17.16b\n"
"ldr q17, [x10, #0x30]\n"
- ".inst 0x6e81a669 // ummla v9.4s, v19.16b, v1.16b\n"
- ".inst 0x6e91a66d // ummla v13.4s, v19.16b, v17.16b\n"
+ ".inst 0x6e82a6c9 // ummla v9.4s, v22.16b, v2.16b\n"
"ldr q18, [x10, #0x40]\n"
+ ".inst 0x6e91a6cd // ummla v13.4s, v22.16b, v17.16b\n"
"ldr q17, [x10, #0x50]\n"
- ".inst 0x6e92a66a // ummla v10.4s, v19.16b, v18.16b\n"
- ".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
+ ".inst 0x6e92a6ca // ummla v10.4s, v22.16b, v18.16b\n"
"ldr q18, [x10, #0x60]\n"
+ ".inst 0x6e91a6ce // ummla v14.4s, v22.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
- "cmp x27, #0x8\n"
- ".inst 0x6e92a66b // ummla v11.4s, v19.16b, v18.16b\n"
- ".inst 0x6e91a66f // ummla v15.4s, v19.16b, v17.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e92a6cb // ummla v11.4s, v22.16b, v18.16b\n"
+ ".inst 0x6e91a6cf // ummla v15.4s, v22.16b, v17.16b\n"
"bge 58b\n"
"59:" // Height 2: Multiply loop: Skip odd blocks
"cbz x27, 64f\n"
@@ -703,27 +703,27 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q18, [x10, #0x60]\n"
".inst 0x6e91a66e // ummla v14.4s, v19.16b, v17.16b\n"
"ldr q17, [x10, #0x70]\n"
+ "add x10, x10, #0x80\n"
".inst 0x6e92a66b // ummla v11.4s, v19.16b, v18.16b\n"
".inst 0x6e91a66f // ummla v15.4s, v19.16b, v17.16b\n"
- "add x10, x10, #0x80\n"
"64:" // Height 2: Multiply loop: No odd multiplies
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 52b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"bge 73f\n"
"tbz x11, #3, 68f\n"
"st1 { v7.4s }, [x9], #0x10\n"
@@ -804,12 +804,12 @@ void a64_hybrid_u8u32_mmla_6x16 (
"75:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"76:" // Height 3: Column loop
"tbz %x[flags], #0, 87f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"bge 85f\n"
"tbz x11, #3, 80f\n"
@@ -951,8 +951,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
"mov x28, #0x0\n"
"89:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 90f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -982,35 +982,38 @@ void a64_hybrid_u8u32_mmla_6x16 (
"92:" // Height 3: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e87a788 // ummla v8.4s, v28.16b, v7.16b\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x6e87a770 // ummla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
".inst 0x6e86a774 // ummla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e9aa789 // ummla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x6e9aa771 // ummla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e99a78d // ummla v13.4s, v28.16b, v25.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e99a775 // ummla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e9aa78a // ummla v10.4s, v28.16b, v26.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x6e9aa772 // ummla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e99a78e // ummla v14.4s, v28.16b, v25.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e99a776 // ummla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e9aa78b // ummla v11.4s, v28.16b, v26.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e9aa773 // ummla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e99a78f // ummla v15.4s, v28.16b, v25.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e99a777 // ummla v23.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x90]\n"
"ldr q2, [x25, #0x0]\n"
@@ -1018,15 +1021,12 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e9aa470 // ummla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e99a42c // ummla v12.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e99a474 // ummla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e9aa429 // ummla v9.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e9aa471 // ummla v17.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x6e99a42d // ummla v13.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e99a475 // ummla v21.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xd0]\n"
".inst 0x6e9aa42a // ummla v10.4s, v1.16b, v26.16b\n"
@@ -1048,43 +1048,43 @@ void a64_hybrid_u8u32_mmla_6x16 (
"93:" // Height 3: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e87a788 // ummla v8.4s, v28.16b, v7.16b\n"
- "trn1 v27.2d, v3.2d, v29.2d\n"
+ ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
+ "trn2 v3.2d, v3.2d, v25.2d\n"
".inst 0x6e87a770 // ummla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
".inst 0x6e86a774 // ummla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e9aa789 // ummla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v29.2d\n"
".inst 0x6e9aa771 // ummla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e99a78d // ummla v13.4s, v28.16b, v25.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x6e99a775 // ummla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e9aa78a // ummla v10.4s, v28.16b, v26.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e9aa772 // ummla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e99a78e // ummla v14.4s, v28.16b, v25.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e99a776 // ummla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e9aa78b // ummla v11.4s, v28.16b, v26.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e9aa773 // ummla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e99a78f // ummla v15.4s, v28.16b, v25.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e99a777 // ummla v23.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x90]\n"
".inst 0x6e9aa428 // ummla v8.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e9aa470 // ummla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e99a42c // ummla v12.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e99a474 // ummla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e9aa429 // ummla v9.4s, v1.16b, v26.16b\n"
@@ -1109,25 +1109,25 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 96f\n"
"95:" // Height 3: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
- "ldr d25, [x24], #0x8\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
+ "sub x27, x27, #0x8\n"
+ "ldr d27, [x24], #0x8\n"
"ldr q26, [x10, #0x0]\n"
- "trn1 v27.2d, v25.2d, v27.2d\n"
- ".inst 0x6e9aa788 // ummla v8.4s, v28.16b, v26.16b\n"
+ "cmp x27, #0x8\n"
"ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v27.2d, v29.2d\n"
+ ".inst 0x6e9aa788 // ummla v8.4s, v28.16b, v26.16b\n"
".inst 0x6e9aa770 // ummla v16.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x20]\n"
".inst 0x6e99a78c // ummla v12.4s, v28.16b, v25.16b\n"
".inst 0x6e99a774 // ummla v20.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e9aa789 // ummla v9.4s, v28.16b, v26.16b\n"
- "sub x27, x27, #0x8\n"
".inst 0x6e9aa771 // ummla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e99a78d // ummla v13.4s, v28.16b, v25.16b\n"
- "cmp x27, #0x8\n"
".inst 0x6e99a775 // ummla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e9aa78a // ummla v10.4s, v28.16b, v26.16b\n"
@@ -1136,8 +1136,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e99a78e // ummla v14.4s, v28.16b, v25.16b\n"
".inst 0x6e99a776 // ummla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
- ".inst 0x6e9aa78b // ummla v11.4s, v28.16b, v26.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e9aa78b // ummla v11.4s, v28.16b, v26.16b\n"
".inst 0x6e9aa773 // ummla v19.4s, v27.16b, v26.16b\n"
".inst 0x6e99a78f // ummla v15.4s, v28.16b, v25.16b\n"
".inst 0x6e99a777 // ummla v23.4s, v27.16b, v25.16b\n"
@@ -1183,9 +1183,9 @@ void a64_hybrid_u8u32_mmla_6x16 (
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn1 v27.2d, v3.2d, v25.2d\n"
".inst 0x6e9aa788 // ummla v8.4s, v28.16b, v26.16b\n"
+ ".inst 0x6e9da78c // ummla v12.4s, v28.16b, v29.16b\n"
".inst 0x6e9aa770 // ummla v16.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e9da78c // ummla v12.4s, v28.16b, v29.16b\n"
".inst 0x6e9da774 // ummla v20.4s, v27.16b, v29.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e9aa789 // ummla v9.4s, v28.16b, v26.16b\n"
@@ -1211,20 +1211,20 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x28, x20\n"
"bne 89b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "uzp1 v7.2d, v8.2d, v12.2d\n"
"cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v16.2d, v16.2d, v20.2d\n"
"uzp1 v17.2d, v17.2d, v21.2d\n"
"uzp1 v18.2d, v18.2d, v22.2d\n"
@@ -1329,13 +1329,13 @@ void a64_hybrid_u8u32_mmla_6x16 (
"112:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"113:" // Height 4: Column loop
"tbz %x[flags], #0, 124f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x22, x23, x20, LSL #2\n"
"bge 122f\n"
"tbz x11, #3, 117f\n"
@@ -1497,8 +1497,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
"mov x28, #0x0\n"
"126:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 127f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1532,33 +1532,38 @@ void a64_hybrid_u8u32_mmla_6x16 (
"129:" // Height 4: Multiply loop: Main loop head
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a788 // ummla v8.4s, v28.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "ldr q4, [x23, #0x0]\n"
+ ".inst 0x6e87a788 // ummla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e87a770 // ummla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
".inst 0x6e86a774 // ummla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e9aa789 // ummla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e9aa771 // ummla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e99a78d // ummla v13.4s, v28.16b, v25.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x6e99a775 // ummla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e9aa78a // ummla v10.4s, v28.16b, v26.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e9aa772 // ummla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e99a78e // ummla v14.4s, v28.16b, v25.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e99a776 // ummla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e9aa78b // ummla v11.4s, v28.16b, v26.16b\n"
- "add x23, x23, #0x10\n"
- "ldr q4, [x23, #0x0]\n"
".inst 0x6e9aa773 // ummla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e99a78f // ummla v15.4s, v28.16b, v25.16b\n"
@@ -1569,23 +1574,18 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e9aa470 // ummla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e99a42c // ummla v12.4s, v1.16b, v25.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e99a474 // ummla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e9aa429 // ummla v9.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e9aa471 // ummla v17.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x6e99a42d // ummla v13.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e99a475 // ummla v21.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xd0]\n"
".inst 0x6e9aa42a // ummla v10.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e9aa472 // ummla v18.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xe0]\n"
".inst 0x6e99a42e // ummla v14.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e99a476 // ummla v22.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xf0]\n"
"add x10, x10, #0x100\n"
@@ -1601,48 +1601,48 @@ void a64_hybrid_u8u32_mmla_6x16 (
"130:" // Height 4: Multiply loop: Single iteration only
"trn1 v28.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a788 // ummla v8.4s, v28.16b, v7.16b\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v27.2d, v3.2d, v4.2d\n"
+ "trn2 v3.2d, v3.2d, v4.2d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ ".inst 0x6e87a788 // ummla v8.4s, v28.16b, v7.16b\n"
+ ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a770 // ummla v16.4s, v27.16b, v7.16b\n"
"ldr q26, [x10, #0x20]\n"
- ".inst 0x6e86a78c // ummla v12.4s, v28.16b, v6.16b\n"
".inst 0x6e86a774 // ummla v20.4s, v27.16b, v6.16b\n"
"ldr q25, [x10, #0x30]\n"
".inst 0x6e9aa789 // ummla v9.4s, v28.16b, v26.16b\n"
- "trn2 v3.2d, v3.2d, v4.2d\n"
".inst 0x6e9aa771 // ummla v17.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x40]\n"
".inst 0x6e99a78d // ummla v13.4s, v28.16b, v25.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e99a775 // ummla v21.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x50]\n"
".inst 0x6e9aa78a // ummla v10.4s, v28.16b, v26.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e9aa772 // ummla v18.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x60]\n"
".inst 0x6e99a78e // ummla v14.4s, v28.16b, v25.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x6e99a776 // ummla v22.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x70]\n"
".inst 0x6e9aa78b // ummla v11.4s, v28.16b, v26.16b\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e9aa773 // ummla v19.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x80]\n"
".inst 0x6e99a78f // ummla v15.4s, v28.16b, v25.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e99a777 // ummla v23.4s, v27.16b, v25.16b\n"
"ldr q25, [x10, #0x90]\n"
".inst 0x6e9aa428 // ummla v8.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e9aa470 // ummla v16.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xa0]\n"
".inst 0x6e99a42c // ummla v12.4s, v1.16b, v25.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e99a474 // ummla v20.4s, v3.16b, v25.16b\n"
"ldr q25, [x10, #0xb0]\n"
".inst 0x6e9aa429 // ummla v9.4s, v1.16b, v26.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e9aa471 // ummla v17.4s, v3.16b, v26.16b\n"
"ldr q26, [x10, #0xc0]\n"
".inst 0x6e99a42d // ummla v13.4s, v1.16b, v25.16b\n"
@@ -1664,16 +1664,16 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 133f\n"
"132:" // Height 4: Multiply loop: Odd block loop
- "ldr d26, [x26], #0x8\n"
- "ldr d25, [x25], #0x8\n"
- "trn1 v28.2d, v26.2d, v25.2d\n"
+ "ldr d30, [x26], #0x8\n"
+ "ldr d28, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr d26, [x24], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "trn1 v27.2d, v26.2d, v25.2d\n"
+ "ldr d29, [x24], #0x8\n"
+ "ldr d27, [x23], #0x8\n"
"cmp x27, #0x8\n"
"ldr q26, [x10, #0x0]\n"
"ldr q25, [x10, #0x10]\n"
+ "trn1 v28.2d, v30.2d, v28.2d\n"
+ "trn1 v27.2d, v29.2d, v27.2d\n"
".inst 0x6e9aa788 // ummla v8.4s, v28.16b, v26.16b\n"
".inst 0x6e9aa770 // ummla v16.4s, v27.16b, v26.16b\n"
"ldr q26, [x10, #0x20]\n"
@@ -1774,24 +1774,24 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x28, x20\n"
"bne 126b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
- "uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
"cmp x11, #0x10\n"
+ "uzp1 v7.2d, v8.2d, v12.2d\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "uzp1 v13.2d, v10.2d, v14.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
@@ -1918,14 +1918,14 @@ void a64_hybrid_u8u32_mmla_6x16 (
"149:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"150:" // Height 5: Column loop
"tbz %x[flags], #0, 161f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x21, x22, x20, LSL #2\n"
"bge 159f\n"
"tbz x11, #3, 154f\n"
@@ -2123,8 +2123,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
"mov x28, #0x0\n"
"163:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 164f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2161,51 +2161,51 @@ void a64_hybrid_u8u32_mmla_6x16 (
"166:" // Height 5: Multiply loop: Main loop head
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a4c8 // ummla v8.4s, v6.16b, v7.16b\n"
+ "sub x27, x27, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x6e87a4c8 // ummla v8.4s, v6.16b, v7.16b\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e80a4cc // ummla v12.4s, v6.16b, v0.16b\n"
".inst 0x6e80a454 // ummla v20.4s, v2.16b, v0.16b\n"
- "add x26, x26, #0x10\n"
".inst 0x6e80a49c // ummla v28.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x6e87a4c9 // ummla v9.4s, v6.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x24, x24, #0x10\n"
".inst 0x6e80a4cd // ummla v13.4s, v6.16b, v0.16b\n"
".inst 0x6e80a455 // ummla v21.4s, v2.16b, v0.16b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6e80a49d // ummla v29.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x50]\n"
".inst 0x6e87a4ca // ummla v10.4s, v6.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e80a4ce // ummla v14.4s, v6.16b, v0.16b\n"
".inst 0x6e80a456 // ummla v22.4s, v2.16b, v0.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e80a49e // ummla v30.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x70]\n"
".inst 0x6e87a4cb // ummla v11.4s, v6.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e80a4cf // ummla v15.4s, v6.16b, v0.16b\n"
".inst 0x6e80a457 // ummla v23.4s, v2.16b, v0.16b\n"
"ldr q2, [x25, #0x0]\n"
@@ -2251,47 +2251,47 @@ void a64_hybrid_u8u32_mmla_6x16 (
"167:" // Height 5: Multiply loop: Single iteration only
"trn1 v6.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a4c8 // ummla v8.4s, v6.16b, v7.16b\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
"trn1 v4.2d, v5.2d, v0.2d\n"
"trn2 v5.2d, v5.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x6e87a4c8 // ummla v8.4s, v6.16b, v7.16b\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e80a4cc // ummla v12.4s, v6.16b, v0.16b\n"
".inst 0x6e80a454 // ummla v20.4s, v2.16b, v0.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e80a49c // ummla v28.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x6e87a4c9 // ummla v9.4s, v6.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e80a4cd // ummla v13.4s, v6.16b, v0.16b\n"
".inst 0x6e80a455 // ummla v21.4s, v2.16b, v0.16b\n"
- "add x22, x22, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e80a49d // ummla v29.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x50]\n"
".inst 0x6e87a4ca // ummla v10.4s, v6.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e80a4ce // ummla v14.4s, v6.16b, v0.16b\n"
".inst 0x6e80a456 // ummla v22.4s, v2.16b, v0.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e80a49e // ummla v30.4s, v4.16b, v0.16b\n"
"ldr q0, [x10, #0x70]\n"
".inst 0x6e87a4cb // ummla v11.4s, v6.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
@@ -2335,24 +2335,24 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 170f\n"
"169:" // Height 5: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
+ "ldr d3, [x24], #0x8\n"
+ "ldr d2, [x23], #0x8\n"
+ "cmp x27, #0x8\n"
"ldr d0, [x22], #0x8\n"
"ldr q1, [x10, #0x0]\n"
- "trn1 v2.2d, v0.2d, v2.2d\n"
- ".inst 0x6e81a488 // ummla v8.4s, v4.16b, v1.16b\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v3.2d, v2.2d\n"
+ "trn1 v2.2d, v0.2d, v5.2d\n"
"ldr q0, [x10, #0x10]\n"
+ ".inst 0x6e81a488 // ummla v8.4s, v4.16b, v1.16b\n"
".inst 0x6e81a470 // ummla v16.4s, v3.16b, v1.16b\n"
".inst 0x6e81a458 // ummla v24.4s, v2.16b, v1.16b\n"
"ldr q1, [x10, #0x20]\n"
".inst 0x6e80a48c // ummla v12.4s, v4.16b, v0.16b\n"
".inst 0x6e80a474 // ummla v20.4s, v3.16b, v0.16b\n"
- "cmp x27, #0x8\n"
".inst 0x6e80a45c // ummla v28.4s, v2.16b, v0.16b\n"
"ldr q0, [x10, #0x30]\n"
".inst 0x6e81a489 // ummla v9.4s, v4.16b, v1.16b\n"
@@ -2371,8 +2371,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e80a476 // ummla v22.4s, v3.16b, v0.16b\n"
".inst 0x6e80a45e // ummla v30.4s, v2.16b, v0.16b\n"
"ldr q0, [x10, #0x70]\n"
- ".inst 0x6e86a48b // ummla v11.4s, v4.16b, v6.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e86a48b // ummla v11.4s, v4.16b, v6.16b\n"
".inst 0x6e86a473 // ummla v19.4s, v3.16b, v6.16b\n"
".inst 0x6e86a45b // ummla v27.4s, v2.16b, v6.16b\n"
".inst 0x6e80a48f // ummla v15.4s, v4.16b, v0.16b\n"
@@ -2471,28 +2471,28 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x28, x20\n"
"bne 163b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "cmp x11, #0x10\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
- "uzp1 v13.2d, v10.2d, v14.2d\n"
"prfm pstl1keep, [x9, #0x0]\n"
+ "uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x24, #0x0]\n"
- "prfm pstl1keep, [x23, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x22, #0x0]\n"
- "prfm pstl1keep, [x21, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
@@ -2640,19 +2640,20 @@ void a64_hybrid_u8u32_mmla_6x16 (
"b 224f\n"
"186:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"187:" // Height 6: Column loop
"tbz %x[flags], #0, 198f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "cmp x11, #0x10\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"add x20, x21, x20, LSL #2\n"
"bge 196f\n"
"tbz x11, #3, 191f\n"
@@ -2870,8 +2871,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
"mov x28, #0x0\n"
"200:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 201f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2912,56 +2913,56 @@ void a64_hybrid_u8u32_mmla_6x16 (
"203:" // Height 6: Multiply loop: Main loop head
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
"sub x27, x27, #0x10\n"
+ "add x26, x26, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "cmp x27, #0x20\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "add x25, x25, #0x10\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x23, x23, #0x10\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "add x22, x22, #0x10\n"
- "add x21, x21, #0x10\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "cmp x27, #0x20\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x25, #0x80]\n"
- "prfm pldl1keep, [x24, #0x80]\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
"ldr q2, [x25, #0x0]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
"ldr q0, [x10, #0x90]\n"
"ldr q4, [x23, #0x0]\n"
@@ -3005,52 +3006,52 @@ void a64_hybrid_u8u32_mmla_6x16 (
"204:" // Height 6: Multiply loop: Single iteration only
"trn1 v0.2d, v1.2d, v2.2d\n"
"trn2 v1.2d, v1.2d, v2.2d\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
"add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"trn1 v2.2d, v3.2d, v4.2d\n"
"trn2 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
- "add x25, x25, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"trn1 v4.2d, v5.2d, v6.2d\n"
"trn2 v5.2d, v5.2d, v6.2d\n"
"ldr q6, [x10, #0x10]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ "add x21, x21, #0x10\n"
+ "prfm pldl1keep, [x26, #0x80]\n"
+ "prfm pldl1keep, [x25, #0x80]\n"
+ ".inst 0x6e87a450 // ummla v16.4s, v2.16b, v7.16b\n"
+ "prfm pldl1keep, [x24, #0x80]\n"
+ "sub x27, x27, #0x10\n"
+ "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e87a498 // ummla v24.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x20]\n"
".inst 0x6e86a40c // ummla v12.4s, v0.16b, v6.16b\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e86a454 // ummla v20.4s, v2.16b, v6.16b\n"
- "add x24, x24, #0x10\n"
".inst 0x6e86a49c // ummla v28.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x30]\n"
+ "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e87a409 // ummla v9.4s, v0.16b, v7.16b\n"
- "add x23, x23, #0x10\n"
".inst 0x6e87a451 // ummla v17.4s, v2.16b, v7.16b\n"
".inst 0x6e87a499 // ummla v25.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x40]\n"
- "add x22, x22, #0x10\n"
".inst 0x6e86a40d // ummla v13.4s, v0.16b, v6.16b\n"
".inst 0x6e86a455 // ummla v21.4s, v2.16b, v6.16b\n"
- "add x21, x21, #0x10\n"
- "sub x27, x27, #0x10\n"
".inst 0x6e86a49d // ummla v29.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x50]\n"
".inst 0x6e87a40a // ummla v10.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x26, #0x80]\n"
".inst 0x6e87a452 // ummla v18.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49a // ummla v26.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x60]\n"
- "prfm pldl1keep, [x25, #0x80]\n"
".inst 0x6e86a40e // ummla v14.4s, v0.16b, v6.16b\n"
".inst 0x6e86a456 // ummla v22.4s, v2.16b, v6.16b\n"
- "prfm pldl1keep, [x24, #0x80]\n"
- "prfm pldl1keep, [x23, #0x80]\n"
".inst 0x6e86a49e // ummla v30.4s, v4.16b, v6.16b\n"
"ldr q6, [x10, #0x70]\n"
".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
- "prfm pldl1keep, [x22, #0x80]\n"
".inst 0x6e87a453 // ummla v19.4s, v2.16b, v7.16b\n"
".inst 0x6e87a49b // ummla v27.4s, v4.16b, v7.16b\n"
"ldr q7, [x10, #0x80]\n"
- "prfm pldl1keep, [x21, #0x80]\n"
".inst 0x6e86a40f // ummla v15.4s, v0.16b, v6.16b\n"
".inst 0x6e86a457 // ummla v23.4s, v2.16b, v6.16b\n"
".inst 0x6e86a49f // ummla v31.4s, v4.16b, v6.16b\n"
@@ -3091,18 +3092,18 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x27, #0x8\n"
"blt 207f\n"
"206:" // Height 6: Multiply loop: Odd block loop
- "ldr d1, [x26], #0x8\n"
- "ldr d0, [x25], #0x8\n"
- "trn1 v4.2d, v1.2d, v0.2d\n"
+ "ldr d6, [x26], #0x8\n"
+ "ldr d4, [x25], #0x8\n"
"sub x27, x27, #0x8\n"
- "ldr d1, [x24], #0x8\n"
- "ldr d0, [x23], #0x8\n"
- "trn1 v3.2d, v1.2d, v0.2d\n"
+ "ldr d5, [x24], #0x8\n"
+ "ldr d3, [x23], #0x8\n"
"cmp x27, #0x8\n"
- "ldr d1, [x22], #0x8\n"
+ "ldr d2, [x22], #0x8\n"
"ldr d0, [x21], #0x8\n"
- "trn1 v2.2d, v1.2d, v0.2d\n"
"ldr q1, [x10, #0x0]\n"
+ "trn1 v4.2d, v6.2d, v4.2d\n"
+ "trn1 v3.2d, v5.2d, v3.2d\n"
+ "trn1 v2.2d, v2.2d, v0.2d\n"
"ldr q0, [x10, #0x10]\n"
".inst 0x6e81a488 // ummla v8.4s, v4.16b, v1.16b\n"
".inst 0x6e81a470 // ummla v16.4s, v3.16b, v1.16b\n"
@@ -3196,9 +3197,9 @@ void a64_hybrid_u8u32_mmla_6x16 (
"ldr q0, [x10, #0x0]\n"
"trn1 v7.2d, v1.2d, v2.2d\n"
"trn1 v3.2d, v3.2d, v4.2d\n"
- ".inst 0x6e80a4e8 // ummla v8.4s, v7.16b, v0.16b\n"
"trn1 v2.2d, v5.2d, v6.2d\n"
"ldr q1, [x10, #0x10]\n"
+ ".inst 0x6e80a4e8 // ummla v8.4s, v7.16b, v0.16b\n"
".inst 0x6e80a470 // ummla v16.4s, v3.16b, v0.16b\n"
".inst 0x6e80a458 // ummla v24.4s, v2.16b, v0.16b\n"
"ldr q0, [x10, #0x20]\n"
@@ -3222,8 +3223,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
".inst 0x6e81a476 // ummla v22.4s, v3.16b, v1.16b\n"
".inst 0x6e81a45e // ummla v30.4s, v2.16b, v1.16b\n"
"ldr q6, [x10, #0x70]\n"
- ".inst 0x6e80a4eb // ummla v11.4s, v7.16b, v0.16b\n"
"add x10, x10, #0x80\n"
+ ".inst 0x6e80a4eb // ummla v11.4s, v7.16b, v0.16b\n"
".inst 0x6e80a473 // ummla v19.4s, v3.16b, v0.16b\n"
".inst 0x6e80a45b // ummla v27.4s, v2.16b, v0.16b\n"
".inst 0x6e86a4ef // ummla v15.4s, v7.16b, v6.16b\n"
@@ -3235,32 +3236,32 @@ void a64_hybrid_u8u32_mmla_6x16 (
"cmp x28, x20\n"
"bne 200b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
+ "cmp x11, #0x10\n"
"uzp1 v7.2d, v8.2d, v12.2d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 v8.2d, v8.2d, v12.2d\n"
"uzp1 v12.2d, v9.2d, v13.2d\n"
- "add x20, x21, x20, LSL #2\n"
- "cmp x11, #0x10\n"
"uzp2 v9.2d, v9.2d, v13.2d\n"
+ "prfm pstl1keep, [x9, #0x0]\n"
"uzp1 v13.2d, v10.2d, v14.2d\n"
"uzp2 v10.2d, v10.2d, v14.2d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 v14.2d, v11.2d, v15.2d\n"
- "prfm pstl1keep, [x9, #0x0]\n"
- "prfm pstl1keep, [x24, #0x0]\n"
"uzp2 v11.2d, v11.2d, v15.2d\n"
+ "prfm pstl1keep, [x24, #0x0]\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 v15.2d, v16.2d, v20.2d\n"
- "prfm pstl1keep, [x23, #0x0]\n"
- "prfm pstl1keep, [x22, #0x0]\n"
"uzp2 v16.2d, v16.2d, v20.2d\n"
+ "prfm pstl1keep, [x23, #0x0]\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 v20.2d, v17.2d, v21.2d\n"
- "prfm pstl1keep, [x21, #0x0]\n"
- "prfm pstl1keep, [x20, #0x0]\n"
"uzp2 v17.2d, v17.2d, v21.2d\n"
+ "prfm pstl1keep, [x22, #0x0]\n"
"uzp1 v21.2d, v18.2d, v22.2d\n"
"uzp2 v18.2d, v18.2d, v22.2d\n"
+ "prfm pstl1keep, [x21, #0x0]\n"
+ "prfm pstl1keep, [x20, #0x0]\n"
"uzp1 v22.2d, v19.2d, v23.2d\n"
"uzp2 v19.2d, v19.2d, v23.2d\n"
"uzp1 v23.2d, v24.2d, v28.2d\n"
@@ -3439,8 +3440,8 @@ void a64_hybrid_u8u32_mmla_6x16 (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"224:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12.hpp
index 25c5bf1b44..13188f0e4d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void a64_interleaved_bf16fp32_dot_8x12( ARGLIST );
class cls_a64_interleaved_bf16fp32_dot_8x12
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 12, 2> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 2, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 2> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 2, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp
index 5684f464b6..ccfd19db36 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_dot_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,30 +55,30 @@ void a64_interleaved_bf16fp32_dot_8x12(
"ldr q4, [x22, #0x0]\n"
"ldr q5, [x22, #0x10]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
- "movi v8.16b, #0x0\n"
"ldr q6, [x22, #0x20]\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "movi v8.16b, #0x0\n"
"movi v9.16b, #0x0\n"
- "prfm pldl1keep, [%x[Apanel], #0x0]\n"
"movi v10.16b, #0x0\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v11.16b, #0x0\n"
- "prfm pldl1keep, [x22, #0x0]\n"
+ "cmp x20, #0x2\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
- "prfm pldl1keep, [x22, #0x40]\n"
+ "prfm pldl1keep, [%x[Apanel], #0x0]\n"
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
- "prfm pldl1keep, [%x[Apanel], #0x40]\n"
+ "prfm pldl1keep, [x22, #0x0]\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
- "prfm pldl1keep, [x22, #0x80]\n"
+ "prfm pldl1keep, [x22, #0x40]\n"
"movi v18.16b, #0x0\n"
"movi v19.16b, #0x0\n"
+ "prfm pldl1keep, [%x[Apanel], #0x40]\n"
"movi v20.16b, #0x0\n"
"movi v21.16b, #0x0\n"
+ "prfm pldl1keep, [x22, #0x80]\n"
"movi v22.16b, #0x0\n"
"movi v23.16b, #0x0\n"
"movi v24.16b, #0x0\n"
@@ -159,9 +159,9 @@ void a64_interleaved_bf16fp32_dot_8x12(
"bge 3b\n"
"4:" // main loop skip
"add %x[Apanel], %x[Apanel], #0x20\n"
+ "add x22, x22, #0x30\n"
".inst 0x4f40f088 // bfdot v8.4s, v4.8h, v0.h[0]\n"
".inst 0x4f60f08b // bfdot v11.4s, v4.8h, v0.h[1]\n"
- "add x22, x22, #0x30\n"
".inst 0x4f40f88e // bfdot v14.4s, v4.8h, v0.h[2]\n"
".inst 0x4f60f891 // bfdot v17.4s, v4.8h, v0.h[3]\n"
".inst 0x4f41f094 // bfdot v20.4s, v4.8h, v1.h[0]\n"
@@ -190,13 +190,13 @@ void a64_interleaved_bf16fp32_dot_8x12(
"add %x[Apanel], %x[Apanel], #0x20\n"
"ldr q2, [x22, #0x0]\n"
"ldr q1, [x22, #0x10]\n"
- ".inst 0x4f44f048 // bfdot v8.4s, v2.8h, v4.h[0]\n"
"ldr q0, [x22, #0x20]\n"
+ "add x22, x22, #0x30\n"
+ ".inst 0x4f44f048 // bfdot v8.4s, v2.8h, v4.h[0]\n"
".inst 0x4f64f04b // bfdot v11.4s, v2.8h, v4.h[1]\n"
".inst 0x4f44f84e // bfdot v14.4s, v2.8h, v4.h[2]\n"
".inst 0x4f64f851 // bfdot v17.4s, v2.8h, v4.h[3]\n"
".inst 0x4f43f054 // bfdot v20.4s, v2.8h, v3.h[0]\n"
- "add x22, x22, #0x30\n"
".inst 0x4f63f057 // bfdot v23.4s, v2.8h, v3.h[1]\n"
".inst 0x4f43f85a // bfdot v26.4s, v2.8h, v3.h[2]\n"
".inst 0x4f63f85d // bfdot v29.4s, v2.8h, v3.h[3]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp
index 66c2b92a34..7a270b6082 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2023 Arm Limited.
+ * Copyright (c) 2019-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,8 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510( ARGLIST );
class cls_a64_interleaved_bf16fp32_mmla_8x12
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -63,8 +64,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 12, 4> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 4, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp
index bab687a9b4..1513d378ca 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/a510.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,18 +54,18 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
"2:" // Width loop
"ldp q4, q5, [x22], #0x20\n"
"mov %x[Apanel], x21\n"
- "ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
- "ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
- "movi v8.16b, #0x0\n"
- "ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "movi v8.16b, #0x0\n"
"movi v9.16b, #0x0\n"
"movi v10.16b, #0x0\n"
"movi v11.16b, #0x0\n"
+ "ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
"movi v12.16b, #0x0\n"
"movi v13.16b, #0x0\n"
+ "ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
+ "cmp x20, #0x2\n"
"movi v14.16b, #0x0\n"
+ "ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
"movi v15.16b, #0x0\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
@@ -97,7 +97,7 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
".inst 0x6e44ecda // bfmmla v26.4s, v6.8h, v4.8h\n"
"cmp x20, #0x2\n"
".inst 0x6e45ecdd // bfmmla v29.4s, v6.8h, v5.8h\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q5, q4, [x22], #0x20\n"
".inst 0x6e43ec09 // bfmmla v9.4s, v0.8h, v3.8h\n"
".inst 0x6e47ec0c // bfmmla v12.4s, v0.8h, v7.8h\n"
".inst 0x6e43ec2f // bfmmla v15.4s, v1.8h, v3.8h\n"
@@ -106,28 +106,28 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
".inst 0x6e47ec58 // bfmmla v24.4s, v2.8h, v7.8h\n"
".inst 0x6e43ecdb // bfmmla v27.4s, v6.8h, v3.8h\n"
".inst 0x6e47ecde // bfmmla v30.4s, v6.8h, v7.8h\n"
- "ldp q7, q3, [x22], #0x20\n"
- ".inst 0x6e44ec0a // bfmmla v10.4s, v0.8h, v4.8h\n"
- ".inst 0x6e45ec0d // bfmmla v13.4s, v0.8h, v5.8h\n"
+ "ldp q3, q7, [x22], #0x20\n"
+ ".inst 0x6e45ec0a // bfmmla v10.4s, v0.8h, v5.8h\n"
+ ".inst 0x6e44ec0d // bfmmla v13.4s, v0.8h, v4.8h\n"
"ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e44ec30 // bfmmla v16.4s, v1.8h, v4.8h\n"
- ".inst 0x6e45ec33 // bfmmla v19.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e45ec30 // bfmmla v16.4s, v1.8h, v5.8h\n"
+ ".inst 0x6e44ec33 // bfmmla v19.4s, v1.8h, v4.8h\n"
+ ".inst 0x6e45ec56 // bfmmla v22.4s, v2.8h, v5.8h\n"
"ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e44ec56 // bfmmla v22.4s, v2.8h, v4.8h\n"
- ".inst 0x6e45ec59 // bfmmla v25.4s, v2.8h, v5.8h\n"
+ ".inst 0x6e44ec59 // bfmmla v25.4s, v2.8h, v4.8h\n"
+ ".inst 0x6e45ecdc // bfmmla v28.4s, v6.8h, v5.8h\n"
"ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e44ecdc // bfmmla v28.4s, v6.8h, v4.8h\n"
- ".inst 0x6e45ecdf // bfmmla v31.4s, v6.8h, v5.8h\n"
+ ".inst 0x6e44ecdf // bfmmla v31.4s, v6.8h, v4.8h\n"
+ ".inst 0x6e43ec08 // bfmmla v8.4s, v0.8h, v3.8h\n"
"ld1 { v6.8h }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e47ec08 // bfmmla v8.4s, v0.8h, v7.8h\n"
+ ".inst 0x6e47ec0b // bfmmla v11.4s, v0.8h, v7.8h\n"
"ldp q4, q5, [x22], #0x20\n"
- ".inst 0x6e43ec0b // bfmmla v11.4s, v0.8h, v3.8h\n"
- ".inst 0x6e47ec2e // bfmmla v14.4s, v1.8h, v7.8h\n"
- ".inst 0x6e43ec31 // bfmmla v17.4s, v1.8h, v3.8h\n"
- ".inst 0x6e47ec54 // bfmmla v20.4s, v2.8h, v7.8h\n"
- ".inst 0x6e43ec57 // bfmmla v23.4s, v2.8h, v3.8h\n"
- ".inst 0x6e47ecda // bfmmla v26.4s, v6.8h, v7.8h\n"
- ".inst 0x6e43ecdd // bfmmla v29.4s, v6.8h, v3.8h\n"
+ ".inst 0x6e43ec2e // bfmmla v14.4s, v1.8h, v3.8h\n"
+ ".inst 0x6e47ec31 // bfmmla v17.4s, v1.8h, v7.8h\n"
+ ".inst 0x6e43ec54 // bfmmla v20.4s, v2.8h, v3.8h\n"
+ ".inst 0x6e47ec57 // bfmmla v23.4s, v2.8h, v7.8h\n"
+ ".inst 0x6e43ecda // bfmmla v26.4s, v6.8h, v3.8h\n"
+ ".inst 0x6e47ecdd // bfmmla v29.4s, v6.8h, v7.8h\n"
"ldp q7, q3, [x22], #0x20\n"
".inst 0x6e44ec09 // bfmmla v9.4s, v0.8h, v4.8h\n"
".inst 0x6e45ec0c // bfmmla v12.4s, v0.8h, v5.8h\n"
@@ -143,11 +143,11 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
"ld1 { v0.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e47ec30 // bfmmla v16.4s, v1.8h, v7.8h\n"
".inst 0x6e43ec33 // bfmmla v19.4s, v1.8h, v3.8h\n"
- "ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e47ec56 // bfmmla v22.4s, v2.8h, v7.8h\n"
+ "ld1 { v1.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e43ec59 // bfmmla v25.4s, v2.8h, v3.8h\n"
- "ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e47ecdc // bfmmla v28.4s, v6.8h, v7.8h\n"
+ "ld1 { v2.8h }, [%x[Apanel]], #0x10\n"
".inst 0x6e43ecdf // bfmmla v31.4s, v6.8h, v3.8h\n"
"bge 3b\n"
"4:" // main loop skip
@@ -182,9 +182,9 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
"ldp q1, q0, [x22], #0x20\n"
"ld1 { v7.8h }, [%x[Apanel]], #0x10\n"
"ld1 { v6.8h }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e41ece8 // bfmmla v8.4s, v7.8h, v1.8h\n"
"ld1 { v5.8h }, [%x[Apanel]], #0x10\n"
"ld1 { v4.8h }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e41ece8 // bfmmla v8.4s, v7.8h, v1.8h\n"
".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
"ldp q3, q2, [x22], #0x20\n"
".inst 0x6e41ecce // bfmmla v14.4s, v6.8h, v1.8h\n"
@@ -212,41 +212,41 @@ void a64_interleaved_bf16fp32_mmla_8x12_a510(
".inst 0x6e40ec9f // bfmmla v31.4s, v4.8h, v0.8h\n"
"5:" // multiply loop done
"subs x23, x23, #0x1\n"
- "uzp1 v0.2d, v8.2d, v11.2d\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v1.2d, v9.2d, v12.2d\n"
"uzp2 v9.2d, v9.2d, v12.2d\n"
- "str q0, [%x[Cpanel], #0x0]\n"
"uzp1 v0.2d, v10.2d, v13.2d\n"
"uzp2 v10.2d, v10.2d, v13.2d\n"
- "str q1, [%x[Cpanel], #0x10]\n"
- "str q0, [%x[Cpanel], #0x20]\n"
- "uzp1 v0.2d, v14.2d, v17.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
"uzp2 v14.2d, v14.2d, v17.2d\n"
- "str q8, [%x[Cpanel], #0x30]\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
"uzp1 v2.2d, v15.2d, v18.2d\n"
"uzp2 v15.2d, v15.2d, v18.2d\n"
- "str q9, [%x[Cpanel], #0x40]\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
"uzp1 v17.2d, v16.2d, v19.2d\n"
"uzp2 v16.2d, v16.2d, v19.2d\n"
- "str q10, [%x[Cpanel], #0x50]\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
"uzp1 v1.2d, v20.2d, v23.2d\n"
"uzp2 v20.2d, v20.2d, v23.2d\n"
- "str q0, [%x[Cpanel], #0x60]\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
"uzp1 v0.2d, v21.2d, v24.2d\n"
"uzp2 v21.2d, v21.2d, v24.2d\n"
- "str q2, [%x[Cpanel], #0x70]\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
"uzp1 v23.2d, v22.2d, v25.2d\n"
"uzp2 v22.2d, v22.2d, v25.2d\n"
- "str q17, [%x[Cpanel], #0x80]\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
"uzp1 v19.2d, v26.2d, v29.2d\n"
"uzp2 v26.2d, v26.2d, v29.2d\n"
- "str q14, [%x[Cpanel], #0x90]\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
"uzp1 v18.2d, v27.2d, v30.2d\n"
"uzp2 v27.2d, v27.2d, v30.2d\n"
- "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
"uzp1 v17.2d, v28.2d, v31.2d\n"
"uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
"str q16, [%x[Cpanel], #0xb0]\n"
"str q1, [%x[Cpanel], #0xc0]\n"
"str q0, [%x[Cpanel], #0xd0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp
index 8485820c7c..f4493c6855 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,21 +55,21 @@ void a64_interleaved_bf16fp32_mmla_8x12(
"ldr q4, [x22, #0x0]\n"
"ldr q5, [x22, #0x10]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
- "movi v8.16b, #0x0\n"
- "ldr q2, [%x[Apanel], #0x20]\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "movi v8.16b, #0x0\n"
"movi v9.16b, #0x0\n"
"movi v10.16b, #0x0\n"
- "add x22, x22, #0x20\n"
"movi v11.16b, #0x0\n"
+ "add x22, x22, #0x20\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v12.16b, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "cmp x20, #0x2\n"
"movi v13.16b, #0x0\n"
"movi v14.16b, #0x0\n"
"movi v15.16b, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v16.16b, #0x0\n"
"movi v17.16b, #0x0\n"
"movi v18.16b, #0x0\n"
@@ -196,19 +196,19 @@ void a64_interleaved_bf16fp32_mmla_8x12(
"cbz x20, 5f\n"
"ldr q1, [x22, #0x0]\n"
"ldr q7, [%x[Apanel], #0x0]\n"
- ".inst 0x6e41ece8 // bfmmla v8.4s, v7.8h, v1.8h\n"
"ldr q6, [%x[Apanel], #0x10]\n"
"ldr q0, [x22, #0x10]\n"
- ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
"ldr q5, [%x[Apanel], #0x20]\n"
"ldr q4, [%x[Apanel], #0x30]\n"
- ".inst 0x6e41ecce // bfmmla v14.4s, v6.8h, v1.8h\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
"ldr q3, [x22, #0x20]\n"
"ldr q2, [x22, #0x30]\n"
+ ".inst 0x6e41ece8 // bfmmla v8.4s, v7.8h, v1.8h\n"
+ ".inst 0x6e40eceb // bfmmla v11.4s, v7.8h, v0.8h\n"
+ ".inst 0x6e41ecce // bfmmla v14.4s, v6.8h, v1.8h\n"
".inst 0x6e40ecd1 // bfmmla v17.4s, v6.8h, v0.8h\n"
".inst 0x6e41ecb4 // bfmmla v20.4s, v5.8h, v1.8h\n"
".inst 0x6e40ecb7 // bfmmla v23.4s, v5.8h, v0.8h\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6e41ec9a // bfmmla v26.4s, v4.8h, v1.8h\n"
"ldr q1, [x22, #0x40]\n"
".inst 0x6e40ec9d // bfmmla v29.4s, v4.8h, v0.8h\n"
@@ -216,8 +216,8 @@ void a64_interleaved_bf16fp32_mmla_8x12(
".inst 0x6e43ece9 // bfmmla v9.4s, v7.8h, v3.8h\n"
".inst 0x6e42ecec // bfmmla v12.4s, v7.8h, v2.8h\n"
".inst 0x6e43eccf // bfmmla v15.4s, v6.8h, v3.8h\n"
- ".inst 0x6e42ecd2 // bfmmla v18.4s, v6.8h, v2.8h\n"
"add x22, x22, #0x60\n"
+ ".inst 0x6e42ecd2 // bfmmla v18.4s, v6.8h, v2.8h\n"
".inst 0x6e43ecb5 // bfmmla v21.4s, v5.8h, v3.8h\n"
".inst 0x6e42ecb8 // bfmmla v24.4s, v5.8h, v2.8h\n"
".inst 0x6e43ec9b // bfmmla v27.4s, v4.8h, v3.8h\n"
@@ -232,41 +232,41 @@ void a64_interleaved_bf16fp32_mmla_8x12(
".inst 0x6e40ec9f // bfmmla v31.4s, v4.8h, v0.8h\n"
"5:" // multiply loop done
"subs x23, x23, #0x1\n"
- "uzp1 v0.2d, v8.2d, v11.2d\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v1.2d, v9.2d, v12.2d\n"
"uzp2 v9.2d, v9.2d, v12.2d\n"
- "str q0, [%x[Cpanel], #0x0]\n"
"uzp1 v0.2d, v10.2d, v13.2d\n"
"uzp2 v10.2d, v10.2d, v13.2d\n"
- "str q1, [%x[Cpanel], #0x10]\n"
- "str q0, [%x[Cpanel], #0x20]\n"
- "uzp1 v0.2d, v14.2d, v17.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
"uzp2 v14.2d, v14.2d, v17.2d\n"
- "str q8, [%x[Cpanel], #0x30]\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
"uzp1 v2.2d, v15.2d, v18.2d\n"
"uzp2 v15.2d, v15.2d, v18.2d\n"
- "str q9, [%x[Cpanel], #0x40]\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
"uzp1 v17.2d, v16.2d, v19.2d\n"
"uzp2 v16.2d, v16.2d, v19.2d\n"
- "str q10, [%x[Cpanel], #0x50]\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
"uzp1 v1.2d, v20.2d, v23.2d\n"
"uzp2 v20.2d, v20.2d, v23.2d\n"
- "str q0, [%x[Cpanel], #0x60]\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
"uzp1 v0.2d, v21.2d, v24.2d\n"
"uzp2 v21.2d, v21.2d, v24.2d\n"
- "str q2, [%x[Cpanel], #0x70]\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
"uzp1 v23.2d, v22.2d, v25.2d\n"
"uzp2 v22.2d, v22.2d, v25.2d\n"
- "str q17, [%x[Cpanel], #0x80]\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
"uzp1 v19.2d, v26.2d, v29.2d\n"
"uzp2 v26.2d, v26.2d, v29.2d\n"
- "str q14, [%x[Cpanel], #0x90]\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
"uzp1 v18.2d, v27.2d, v30.2d\n"
"uzp2 v27.2d, v27.2d, v30.2d\n"
- "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
"uzp1 v17.2d, v28.2d, v31.2d\n"
"uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
"str q16, [%x[Cpanel], #0xb0]\n"
"str q1, [%x[Cpanel], #0xc0]\n"
"str q0, [%x[Cpanel], #0xd0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12.hpp
index 37a54fcfab..7c26bfa682 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void a64_interleaved_s8s32_mmla_8x12_a510( ARGLIST );
class cls_a64_interleaved_s8s32_mmla_8x12
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 12, 8> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 8, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 8, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp
index c1d37383df..ba169540c2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/a510.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,18 +54,18 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
"2:" // Width loop
"ldp q4, q5, [x22], #0x20\n"
"mov %x[Apanel], x21\n"
- "ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
- "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
- "movi v8.4s, #0x0\n"
- "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
+ "ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
"movi v12.4s, #0x0\n"
"movi v13.4s, #0x0\n"
+ "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "cmp x20, #0x2\n"
"movi v14.4s, #0x0\n"
+ "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"movi v15.4s, #0x0\n"
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -97,7 +97,7 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
".inst 0x4e84a4da // smmla v26.4s, v6.16b, v4.16b\n"
"cmp x20, #0x2\n"
".inst 0x4e85a4dd // smmla v29.4s, v6.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q5, q4, [x22], #0x20\n"
".inst 0x4e83a409 // smmla v9.4s, v0.16b, v3.16b\n"
".inst 0x4e87a40c // smmla v12.4s, v0.16b, v7.16b\n"
".inst 0x4e83a42f // smmla v15.4s, v1.16b, v3.16b\n"
@@ -106,28 +106,28 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
".inst 0x4e87a458 // smmla v24.4s, v2.16b, v7.16b\n"
".inst 0x4e83a4db // smmla v27.4s, v6.16b, v3.16b\n"
".inst 0x4e87a4de // smmla v30.4s, v6.16b, v7.16b\n"
- "ldp q7, q3, [x22], #0x20\n"
- ".inst 0x4e84a40a // smmla v10.4s, v0.16b, v4.16b\n"
- ".inst 0x4e85a40d // smmla v13.4s, v0.16b, v5.16b\n"
+ "ldp q3, q7, [x22], #0x20\n"
+ ".inst 0x4e85a40a // smmla v10.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e84a40d // smmla v13.4s, v0.16b, v4.16b\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x4e84a430 // smmla v16.4s, v1.16b, v4.16b\n"
- ".inst 0x4e85a433 // smmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e85a430 // smmla v16.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e84a433 // smmla v19.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85a456 // smmla v22.4s, v2.16b, v5.16b\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x4e84a456 // smmla v22.4s, v2.16b, v4.16b\n"
- ".inst 0x4e85a459 // smmla v25.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84a459 // smmla v25.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e85a4dc // smmla v28.4s, v6.16b, v5.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x4e84a4dc // smmla v28.4s, v6.16b, v4.16b\n"
- ".inst 0x4e85a4df // smmla v31.4s, v6.16b, v5.16b\n"
+ ".inst 0x4e84a4df // smmla v31.4s, v6.16b, v4.16b\n"
+ ".inst 0x4e83a408 // smmla v8.4s, v0.16b, v3.16b\n"
"ld1 { v6.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x4e87a408 // smmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e87a40b // smmla v11.4s, v0.16b, v7.16b\n"
"ldp q4, q5, [x22], #0x20\n"
- ".inst 0x4e83a40b // smmla v11.4s, v0.16b, v3.16b\n"
- ".inst 0x4e87a42e // smmla v14.4s, v1.16b, v7.16b\n"
- ".inst 0x4e83a431 // smmla v17.4s, v1.16b, v3.16b\n"
- ".inst 0x4e87a454 // smmla v20.4s, v2.16b, v7.16b\n"
- ".inst 0x4e83a457 // smmla v23.4s, v2.16b, v3.16b\n"
- ".inst 0x4e87a4da // smmla v26.4s, v6.16b, v7.16b\n"
- ".inst 0x4e83a4dd // smmla v29.4s, v6.16b, v3.16b\n"
+ ".inst 0x4e83a42e // smmla v14.4s, v1.16b, v3.16b\n"
+ ".inst 0x4e87a431 // smmla v17.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e83a454 // smmla v20.4s, v2.16b, v3.16b\n"
+ ".inst 0x4e87a457 // smmla v23.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e83a4da // smmla v26.4s, v6.16b, v3.16b\n"
+ ".inst 0x4e87a4dd // smmla v29.4s, v6.16b, v7.16b\n"
"ldp q7, q3, [x22], #0x20\n"
".inst 0x4e84a409 // smmla v9.4s, v0.16b, v4.16b\n"
".inst 0x4e85a40c // smmla v12.4s, v0.16b, v5.16b\n"
@@ -143,11 +143,11 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e87a430 // smmla v16.4s, v1.16b, v7.16b\n"
".inst 0x4e83a433 // smmla v19.4s, v1.16b, v3.16b\n"
- "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e87a456 // smmla v22.4s, v2.16b, v7.16b\n"
+ "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e83a459 // smmla v25.4s, v2.16b, v3.16b\n"
- "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e87a4dc // smmla v28.4s, v6.16b, v7.16b\n"
+ "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
".inst 0x4e83a4df // smmla v31.4s, v6.16b, v3.16b\n"
"bge 3b\n"
"4:" // main loop skip
@@ -182,9 +182,9 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
"ldp q1, q0, [x22], #0x20\n"
"ld1 { v7.16b }, [%x[Apanel]], #0x10\n"
"ld1 { v6.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x4e81a4e8 // smmla v8.4s, v7.16b, v1.16b\n"
"ld1 { v5.16b }, [%x[Apanel]], #0x10\n"
"ld1 { v4.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x4e81a4e8 // smmla v8.4s, v7.16b, v1.16b\n"
".inst 0x4e80a4eb // smmla v11.4s, v7.16b, v0.16b\n"
"ldp q3, q2, [x22], #0x20\n"
".inst 0x4e81a4ce // smmla v14.4s, v6.16b, v1.16b\n"
@@ -212,41 +212,41 @@ void a64_interleaved_s8s32_mmla_8x12_a510(
".inst 0x4e80a49f // smmla v31.4s, v4.16b, v0.16b\n"
"5:" // multiply loop done
"subs x23, x23, #0x1\n"
- "uzp1 v0.2d, v8.2d, v11.2d\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v1.2d, v9.2d, v12.2d\n"
"uzp2 v9.2d, v9.2d, v12.2d\n"
- "str q0, [%x[Cpanel], #0x0]\n"
"uzp1 v0.2d, v10.2d, v13.2d\n"
"uzp2 v10.2d, v10.2d, v13.2d\n"
- "str q1, [%x[Cpanel], #0x10]\n"
- "str q0, [%x[Cpanel], #0x20]\n"
- "uzp1 v0.2d, v14.2d, v17.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
"uzp2 v14.2d, v14.2d, v17.2d\n"
- "str q8, [%x[Cpanel], #0x30]\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
"uzp1 v2.2d, v15.2d, v18.2d\n"
"uzp2 v15.2d, v15.2d, v18.2d\n"
- "str q9, [%x[Cpanel], #0x40]\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
"uzp1 v17.2d, v16.2d, v19.2d\n"
"uzp2 v16.2d, v16.2d, v19.2d\n"
- "str q10, [%x[Cpanel], #0x50]\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
"uzp1 v1.2d, v20.2d, v23.2d\n"
"uzp2 v20.2d, v20.2d, v23.2d\n"
- "str q0, [%x[Cpanel], #0x60]\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
"uzp1 v0.2d, v21.2d, v24.2d\n"
"uzp2 v21.2d, v21.2d, v24.2d\n"
- "str q2, [%x[Cpanel], #0x70]\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
"uzp1 v23.2d, v22.2d, v25.2d\n"
"uzp2 v22.2d, v22.2d, v25.2d\n"
- "str q17, [%x[Cpanel], #0x80]\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
"uzp1 v19.2d, v26.2d, v29.2d\n"
"uzp2 v26.2d, v26.2d, v29.2d\n"
- "str q14, [%x[Cpanel], #0x90]\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
"uzp1 v18.2d, v27.2d, v30.2d\n"
"uzp2 v27.2d, v27.2d, v30.2d\n"
- "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
"uzp1 v17.2d, v28.2d, v31.2d\n"
"uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
"str q16, [%x[Cpanel], #0xb0]\n"
"str q1, [%x[Cpanel], #0xc0]\n"
"str q0, [%x[Cpanel], #0xd0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp
index a097dc358a..63c6277719 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,21 +55,21 @@ void a64_interleaved_s8s32_mmla_8x12(
"ldr q4, [x22, #0x0]\n"
"ldr q5, [x22, #0x10]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
- "movi v8.4s, #0x0\n"
- "ldr q2, [%x[Apanel], #0x20]\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
"movi v10.4s, #0x0\n"
- "add x22, x22, #0x20\n"
"movi v11.4s, #0x0\n"
+ "add x22, x22, #0x20\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v12.4s, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "cmp x20, #0x2\n"
"movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
@@ -196,19 +196,19 @@ void a64_interleaved_s8s32_mmla_8x12(
"cbz x20, 5f\n"
"ldr q1, [x22, #0x0]\n"
"ldr q7, [%x[Apanel], #0x0]\n"
- ".inst 0x4e81a4e8 // smmla v8.4s, v7.16b, v1.16b\n"
"ldr q6, [%x[Apanel], #0x10]\n"
"ldr q0, [x22, #0x10]\n"
- ".inst 0x4e80a4eb // smmla v11.4s, v7.16b, v0.16b\n"
"ldr q5, [%x[Apanel], #0x20]\n"
"ldr q4, [%x[Apanel], #0x30]\n"
- ".inst 0x4e81a4ce // smmla v14.4s, v6.16b, v1.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
"ldr q3, [x22, #0x20]\n"
"ldr q2, [x22, #0x30]\n"
+ ".inst 0x4e81a4e8 // smmla v8.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e80a4eb // smmla v11.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e81a4ce // smmla v14.4s, v6.16b, v1.16b\n"
".inst 0x4e80a4d1 // smmla v17.4s, v6.16b, v0.16b\n"
".inst 0x4e81a4b4 // smmla v20.4s, v5.16b, v1.16b\n"
".inst 0x4e80a4b7 // smmla v23.4s, v5.16b, v0.16b\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x4e81a49a // smmla v26.4s, v4.16b, v1.16b\n"
"ldr q1, [x22, #0x40]\n"
".inst 0x4e80a49d // smmla v29.4s, v4.16b, v0.16b\n"
@@ -216,8 +216,8 @@ void a64_interleaved_s8s32_mmla_8x12(
".inst 0x4e83a4e9 // smmla v9.4s, v7.16b, v3.16b\n"
".inst 0x4e82a4ec // smmla v12.4s, v7.16b, v2.16b\n"
".inst 0x4e83a4cf // smmla v15.4s, v6.16b, v3.16b\n"
- ".inst 0x4e82a4d2 // smmla v18.4s, v6.16b, v2.16b\n"
"add x22, x22, #0x60\n"
+ ".inst 0x4e82a4d2 // smmla v18.4s, v6.16b, v2.16b\n"
".inst 0x4e83a4b5 // smmla v21.4s, v5.16b, v3.16b\n"
".inst 0x4e82a4b8 // smmla v24.4s, v5.16b, v2.16b\n"
".inst 0x4e83a49b // smmla v27.4s, v4.16b, v3.16b\n"
@@ -232,41 +232,41 @@ void a64_interleaved_s8s32_mmla_8x12(
".inst 0x4e80a49f // smmla v31.4s, v4.16b, v0.16b\n"
"5:" // multiply loop done
"subs x23, x23, #0x1\n"
- "uzp1 v0.2d, v8.2d, v11.2d\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v1.2d, v9.2d, v12.2d\n"
"uzp2 v9.2d, v9.2d, v12.2d\n"
- "str q0, [%x[Cpanel], #0x0]\n"
"uzp1 v0.2d, v10.2d, v13.2d\n"
"uzp2 v10.2d, v10.2d, v13.2d\n"
- "str q1, [%x[Cpanel], #0x10]\n"
- "str q0, [%x[Cpanel], #0x20]\n"
- "uzp1 v0.2d, v14.2d, v17.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
"uzp2 v14.2d, v14.2d, v17.2d\n"
- "str q8, [%x[Cpanel], #0x30]\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
"uzp1 v2.2d, v15.2d, v18.2d\n"
"uzp2 v15.2d, v15.2d, v18.2d\n"
- "str q9, [%x[Cpanel], #0x40]\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
"uzp1 v17.2d, v16.2d, v19.2d\n"
"uzp2 v16.2d, v16.2d, v19.2d\n"
- "str q10, [%x[Cpanel], #0x50]\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
"uzp1 v1.2d, v20.2d, v23.2d\n"
"uzp2 v20.2d, v20.2d, v23.2d\n"
- "str q0, [%x[Cpanel], #0x60]\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
"uzp1 v0.2d, v21.2d, v24.2d\n"
"uzp2 v21.2d, v21.2d, v24.2d\n"
- "str q2, [%x[Cpanel], #0x70]\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
"uzp1 v23.2d, v22.2d, v25.2d\n"
"uzp2 v22.2d, v22.2d, v25.2d\n"
- "str q17, [%x[Cpanel], #0x80]\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
"uzp1 v19.2d, v26.2d, v29.2d\n"
"uzp2 v26.2d, v26.2d, v29.2d\n"
- "str q14, [%x[Cpanel], #0x90]\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
"uzp1 v18.2d, v27.2d, v30.2d\n"
"uzp2 v27.2d, v27.2d, v30.2d\n"
- "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
"uzp1 v17.2d, v28.2d, v31.2d\n"
"uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
"str q16, [%x[Cpanel], #0xb0]\n"
"str q1, [%x[Cpanel], #0xc0]\n"
"str q0, [%x[Cpanel], #0xd0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12.hpp
new file mode 100644
index 0000000000..c22303ce06
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12.hpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef __aarch64__
+
+#include "../std_transforms_fixed.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const uint8_t *, const int8_t *, \
+ int32_t *, int, int, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void a64_interleaved_u8s8s32_mmla_8x12( ARGLIST );
+
+class cls_a64_interleaved_u8s8s32_mmla_8x12
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return 12;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 8;
+ }
+
+
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 8, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, uint32_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 62.58, 4.06, 8.02 };
+ case CPUModel::A510:
+ return { 47.83, 3.59, 3.72 };
+ case CPUModel::V1:
+ return { 111.52, 4.97, 10.80 };
+ }
+ }
+
+
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 62.57, 4.10, 0.51 };
+ case CPUModel::A510:
+ return { 47.66, 2.47, 0.29 };
+ case CPUModel::V1:
+ return { 75.54, 8.06, 0.63 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=a64_interleaved_u8s8s32_mmla_8x12;
+ cls_a64_interleaved_u8s8s32_mmla_8x12(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12/generic.cpp
new file mode 100644
index 0000000000..b7f2e0c04b
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8s8s32_mmla_8x12/generic.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void a64_interleaved_u8s8s32_mmla_8x12(
+ const uint8_t *Apanel,
+ const int8_t *Bpanel,
+ int32_t *Cpanel,
+ int ablocks,
+ int bblocks,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const int8_t *Bpanel = {};
+ size_t bblocks = {};
+ } ka;
+
+ ka.K = (K/8) - 1;
+ ka.Bpanel = Bpanel;
+ ka.bblocks = bblocks;
+
+ __asm__ __volatile__(
+ "1:" // Height loop
+ "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "mov x21, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr q4, [x22, #0x0]\n"
+ "ldr q5, [x22, #0x10]\n"
+ "mov %x[Apanel], x21\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "movi v8.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "movi v11.4s, #0x0\n"
+ "add x22, x22, #0x20\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
+ "movi v12.4s, #0x0\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "cmp x20, #0x2\n"
+ "movi v13.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
+ "movi v16.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
+ "blt 4f\n"
+ "3:" // main loop head
+ "ldr q6, [%x[Apanel], #0x0]\n"
+ "ldr q7, [x22, #0x0]\n"
+ ".inst 0x4e84ac08 // usmmla v8.4s, v0.16b, v4.16b\n"
+ "ldr q3, [x22, #0x10]\n"
+ ".inst 0x4e85ac0b // usmmla v11.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e84ac2e // usmmla v14.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85ac31 // usmmla v17.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e84ac54 // usmmla v20.4s, v2.16b, v4.16b\n"
+ "sub x20, x20, #0x2\n"
+ ".inst 0x4e85ac57 // usmmla v23.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84acda // usmmla v26.4s, v6.16b, v4.16b\n"
+ "ldr q4, [x22, #0x20]\n"
+ ".inst 0x4e85acdd // usmmla v29.4s, v6.16b, v5.16b\n"
+ "ldr q5, [x22, #0x30]\n"
+ ".inst 0x4e87ac09 // usmmla v9.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e83ac0c // usmmla v12.4s, v0.16b, v3.16b\n"
+ ".inst 0x4e87ac2f // usmmla v15.4s, v1.16b, v7.16b\n"
+ "cmp x20, #0x2\n"
+ ".inst 0x4e83ac32 // usmmla v18.4s, v1.16b, v3.16b\n"
+ ".inst 0x4e87ac55 // usmmla v21.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e83ac58 // usmmla v24.4s, v2.16b, v3.16b\n"
+ ".inst 0x4e87acdb // usmmla v27.4s, v6.16b, v7.16b\n"
+ "ldr q7, [x22, #0x40]\n"
+ ".inst 0x4e83acde // usmmla v30.4s, v6.16b, v3.16b\n"
+ "ldr q3, [x22, #0x50]\n"
+ ".inst 0x4e84ac0a // usmmla v10.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e85ac0d // usmmla v13.4s, v0.16b, v5.16b\n"
+ "ldr q0, [%x[Apanel], #0x10]\n"
+ ".inst 0x4e84ac30 // usmmla v16.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85ac33 // usmmla v19.4s, v1.16b, v5.16b\n"
+ "ldr q1, [%x[Apanel], #0x20]\n"
+ ".inst 0x4e84ac56 // usmmla v22.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e85ac59 // usmmla v25.4s, v2.16b, v5.16b\n"
+ "ldr q2, [%x[Apanel], #0x30]\n"
+ ".inst 0x4e84acdc // usmmla v28.4s, v6.16b, v4.16b\n"
+ "ldr q4, [x22, #0x60]\n"
+ ".inst 0x4e85acdf // usmmla v31.4s, v6.16b, v5.16b\n"
+ "ldr q6, [%x[Apanel], #0x40]\n"
+ "ldr q5, [x22, #0x70]\n"
+ ".inst 0x4e87ac08 // usmmla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e83ac0b // usmmla v11.4s, v0.16b, v3.16b\n"
+ ".inst 0x4e87ac2e // usmmla v14.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e83ac31 // usmmla v17.4s, v1.16b, v3.16b\n"
+ ".inst 0x4e87ac54 // usmmla v20.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e83ac57 // usmmla v23.4s, v2.16b, v3.16b\n"
+ ".inst 0x4e87acda // usmmla v26.4s, v6.16b, v7.16b\n"
+ "ldr q7, [x22, #0x80]\n"
+ ".inst 0x4e83acdd // usmmla v29.4s, v6.16b, v3.16b\n"
+ "ldr q3, [x22, #0x90]\n"
+ ".inst 0x4e84ac09 // usmmla v9.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e85ac0c // usmmla v12.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e84ac2f // usmmla v15.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85ac32 // usmmla v18.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e84ac55 // usmmla v21.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e85ac58 // usmmla v24.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84acdb // usmmla v27.4s, v6.16b, v4.16b\n"
+ "ldr q4, [x22, #0xa0]\n"
+ ".inst 0x4e85acde // usmmla v30.4s, v6.16b, v5.16b\n"
+ "ldr q5, [x22, #0xb0]\n"
+ ".inst 0x4e87ac0a // usmmla v10.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e83ac0d // usmmla v13.4s, v0.16b, v3.16b\n"
+ "ldr q0, [%x[Apanel], #0x50]\n"
+ ".inst 0x4e87ac30 // usmmla v16.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e83ac33 // usmmla v19.4s, v1.16b, v3.16b\n"
+ "ldr q1, [%x[Apanel], #0x60]\n"
+ ".inst 0x4e87ac56 // usmmla v22.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e83ac59 // usmmla v25.4s, v2.16b, v3.16b\n"
+ "ldr q2, [%x[Apanel], #0x70]\n"
+ ".inst 0x4e87acdc // usmmla v28.4s, v6.16b, v7.16b\n"
+ ".inst 0x4e83acdf // usmmla v31.4s, v6.16b, v3.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x80\n"
+ "add x22, x22, #0xc0\n"
+ "bge 3b\n"
+ "4:" // main loop skip
+ "ldr q3, [%x[Apanel], #0x0]\n"
+ "ldr q6, [x22, #0x0]\n"
+ ".inst 0x4e84ac08 // usmmla v8.4s, v0.16b, v4.16b\n"
+ "ldr q7, [x22, #0x10]\n"
+ ".inst 0x4e85ac0b // usmmla v11.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e84ac2e // usmmla v14.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85ac31 // usmmla v17.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e84ac54 // usmmla v20.4s, v2.16b, v4.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x4e85ac57 // usmmla v23.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84ac7a // usmmla v26.4s, v3.16b, v4.16b\n"
+ "ldr q4, [x22, #0x20]\n"
+ ".inst 0x4e85ac7d // usmmla v29.4s, v3.16b, v5.16b\n"
+ "ldr q5, [x22, #0x30]\n"
+ ".inst 0x4e86ac09 // usmmla v9.4s, v0.16b, v6.16b\n"
+ ".inst 0x4e87ac0c // usmmla v12.4s, v0.16b, v7.16b\n"
+ ".inst 0x4e86ac2f // usmmla v15.4s, v1.16b, v6.16b\n"
+ "add x22, x22, #0x40\n"
+ ".inst 0x4e87ac32 // usmmla v18.4s, v1.16b, v7.16b\n"
+ ".inst 0x4e86ac55 // usmmla v21.4s, v2.16b, v6.16b\n"
+ ".inst 0x4e87ac58 // usmmla v24.4s, v2.16b, v7.16b\n"
+ ".inst 0x4e86ac7b // usmmla v27.4s, v3.16b, v6.16b\n"
+ ".inst 0x4e87ac7e // usmmla v30.4s, v3.16b, v7.16b\n"
+ ".inst 0x4e84ac0a // usmmla v10.4s, v0.16b, v4.16b\n"
+ ".inst 0x4e85ac0d // usmmla v13.4s, v0.16b, v5.16b\n"
+ ".inst 0x4e84ac30 // usmmla v16.4s, v1.16b, v4.16b\n"
+ ".inst 0x4e85ac33 // usmmla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x4e84ac56 // usmmla v22.4s, v2.16b, v4.16b\n"
+ ".inst 0x4e85ac59 // usmmla v25.4s, v2.16b, v5.16b\n"
+ ".inst 0x4e84ac7c // usmmla v28.4s, v3.16b, v4.16b\n"
+ ".inst 0x4e85ac7f // usmmla v31.4s, v3.16b, v5.16b\n"
+ "cbz x20, 5f\n"
+ "ldr q1, [x22, #0x0]\n"
+ "ldr q7, [%x[Apanel], #0x0]\n"
+ "ldr q6, [%x[Apanel], #0x10]\n"
+ "ldr q0, [x22, #0x10]\n"
+ "ldr q5, [%x[Apanel], #0x20]\n"
+ "ldr q4, [%x[Apanel], #0x30]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ "ldr q3, [x22, #0x20]\n"
+ "ldr q2, [x22, #0x30]\n"
+ ".inst 0x4e81ace8 // usmmla v8.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e80aceb // usmmla v11.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e81acce // usmmla v14.4s, v6.16b, v1.16b\n"
+ ".inst 0x4e80acd1 // usmmla v17.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e81acb4 // usmmla v20.4s, v5.16b, v1.16b\n"
+ ".inst 0x4e80acb7 // usmmla v23.4s, v5.16b, v0.16b\n"
+ ".inst 0x4e81ac9a // usmmla v26.4s, v4.16b, v1.16b\n"
+ "ldr q1, [x22, #0x40]\n"
+ ".inst 0x4e80ac9d // usmmla v29.4s, v4.16b, v0.16b\n"
+ "ldr q0, [x22, #0x50]\n"
+ ".inst 0x4e83ace9 // usmmla v9.4s, v7.16b, v3.16b\n"
+ ".inst 0x4e82acec // usmmla v12.4s, v7.16b, v2.16b\n"
+ ".inst 0x4e83accf // usmmla v15.4s, v6.16b, v3.16b\n"
+ "add x22, x22, #0x60\n"
+ ".inst 0x4e82acd2 // usmmla v18.4s, v6.16b, v2.16b\n"
+ ".inst 0x4e83acb5 // usmmla v21.4s, v5.16b, v3.16b\n"
+ ".inst 0x4e82acb8 // usmmla v24.4s, v5.16b, v2.16b\n"
+ ".inst 0x4e83ac9b // usmmla v27.4s, v4.16b, v3.16b\n"
+ ".inst 0x4e82ac9e // usmmla v30.4s, v4.16b, v2.16b\n"
+ ".inst 0x4e81acea // usmmla v10.4s, v7.16b, v1.16b\n"
+ ".inst 0x4e80aced // usmmla v13.4s, v7.16b, v0.16b\n"
+ ".inst 0x4e81acd0 // usmmla v16.4s, v6.16b, v1.16b\n"
+ ".inst 0x4e80acd3 // usmmla v19.4s, v6.16b, v0.16b\n"
+ ".inst 0x4e81acb6 // usmmla v22.4s, v5.16b, v1.16b\n"
+ ".inst 0x4e80acb9 // usmmla v25.4s, v5.16b, v0.16b\n"
+ ".inst 0x4e81ac9c // usmmla v28.4s, v4.16b, v1.16b\n"
+ ".inst 0x4e80ac9f // usmmla v31.4s, v4.16b, v0.16b\n"
+ "5:" // multiply loop done
+ "subs x23, x23, #0x1\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
+ "uzp2 v8.2d, v8.2d, v11.2d\n"
+ "uzp1 v1.2d, v9.2d, v12.2d\n"
+ "uzp2 v9.2d, v9.2d, v12.2d\n"
+ "uzp1 v0.2d, v10.2d, v13.2d\n"
+ "uzp2 v10.2d, v10.2d, v13.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
+ "uzp2 v14.2d, v14.2d, v17.2d\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
+ "uzp1 v2.2d, v15.2d, v18.2d\n"
+ "uzp2 v15.2d, v15.2d, v18.2d\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
+ "uzp1 v17.2d, v16.2d, v19.2d\n"
+ "uzp2 v16.2d, v16.2d, v19.2d\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
+ "uzp1 v1.2d, v20.2d, v23.2d\n"
+ "uzp2 v20.2d, v20.2d, v23.2d\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
+ "uzp1 v0.2d, v21.2d, v24.2d\n"
+ "uzp2 v21.2d, v21.2d, v24.2d\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
+ "uzp1 v23.2d, v22.2d, v25.2d\n"
+ "uzp2 v22.2d, v22.2d, v25.2d\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
+ "uzp1 v19.2d, v26.2d, v29.2d\n"
+ "uzp2 v26.2d, v26.2d, v29.2d\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
+ "uzp1 v18.2d, v27.2d, v30.2d\n"
+ "uzp2 v27.2d, v27.2d, v30.2d\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
+ "uzp1 v17.2d, v28.2d, v31.2d\n"
+ "uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q16, [%x[Cpanel], #0xb0]\n"
+ "str q1, [%x[Cpanel], #0xc0]\n"
+ "str q0, [%x[Cpanel], #0xd0]\n"
+ "str q23, [%x[Cpanel], #0xe0]\n"
+ "str q20, [%x[Cpanel], #0xf0]\n"
+ "str q21, [%x[Cpanel], #0x100]\n"
+ "str q22, [%x[Cpanel], #0x110]\n"
+ "str q19, [%x[Cpanel], #0x120]\n"
+ "str q18, [%x[Cpanel], #0x130]\n"
+ "str q17, [%x[Cpanel], #0x140]\n"
+ "str q26, [%x[Cpanel], #0x150]\n"
+ "str q27, [%x[Cpanel], #0x160]\n"
+ "str q28, [%x[Cpanel], #0x170]\n"
+ "add %x[Cpanel], %x[Cpanel], #0x180\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23"
+ );
+}
+
+} // namespace arm_gemm
+#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12.hpp
index 0088557b8d..922438a255 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void a64_interleaved_u8u32_mmla_8x12_a510( ARGLIST );
class cls_a64_interleaved_u8u32_mmla_8x12
{
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint32_t result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsFixed<operand_type, result_type, 8, 12, 8> transforms = {};
- StdTransformsFixed<operand_type, result_type, 8, 12, 8, true> transforms_quantized = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 8> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 12, 8, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp
index 54c51954c8..8affb0ea86 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/a510.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,18 +54,18 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
"2:" // Width loop
"ldp q4, q5, [x22], #0x20\n"
"mov %x[Apanel], x21\n"
- "ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
- "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
- "movi v8.4s, #0x0\n"
- "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
"movi v10.4s, #0x0\n"
"movi v11.4s, #0x0\n"
+ "ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
"movi v12.4s, #0x0\n"
"movi v13.4s, #0x0\n"
+ "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
+ "cmp x20, #0x2\n"
"movi v14.4s, #0x0\n"
+ "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
"movi v15.4s, #0x0\n"
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
@@ -97,7 +97,7 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
".inst 0x6e84a4da // ummla v26.4s, v6.16b, v4.16b\n"
"cmp x20, #0x2\n"
".inst 0x6e85a4dd // ummla v29.4s, v6.16b, v5.16b\n"
- "ldp q4, q5, [x22], #0x20\n"
+ "ldp q5, q4, [x22], #0x20\n"
".inst 0x6e83a409 // ummla v9.4s, v0.16b, v3.16b\n"
".inst 0x6e87a40c // ummla v12.4s, v0.16b, v7.16b\n"
".inst 0x6e83a42f // ummla v15.4s, v1.16b, v3.16b\n"
@@ -106,28 +106,28 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
".inst 0x6e87a458 // ummla v24.4s, v2.16b, v7.16b\n"
".inst 0x6e83a4db // ummla v27.4s, v6.16b, v3.16b\n"
".inst 0x6e87a4de // ummla v30.4s, v6.16b, v7.16b\n"
- "ldp q7, q3, [x22], #0x20\n"
- ".inst 0x6e84a40a // ummla v10.4s, v0.16b, v4.16b\n"
- ".inst 0x6e85a40d // ummla v13.4s, v0.16b, v5.16b\n"
+ "ldp q3, q7, [x22], #0x20\n"
+ ".inst 0x6e85a40a // ummla v10.4s, v0.16b, v5.16b\n"
+ ".inst 0x6e84a40d // ummla v13.4s, v0.16b, v4.16b\n"
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e84a430 // ummla v16.4s, v1.16b, v4.16b\n"
- ".inst 0x6e85a433 // ummla v19.4s, v1.16b, v5.16b\n"
+ ".inst 0x6e85a430 // ummla v16.4s, v1.16b, v5.16b\n"
+ ".inst 0x6e84a433 // ummla v19.4s, v1.16b, v4.16b\n"
+ ".inst 0x6e85a456 // ummla v22.4s, v2.16b, v5.16b\n"
"ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e84a456 // ummla v22.4s, v2.16b, v4.16b\n"
- ".inst 0x6e85a459 // ummla v25.4s, v2.16b, v5.16b\n"
+ ".inst 0x6e84a459 // ummla v25.4s, v2.16b, v4.16b\n"
+ ".inst 0x6e85a4dc // ummla v28.4s, v6.16b, v5.16b\n"
"ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e84a4dc // ummla v28.4s, v6.16b, v4.16b\n"
- ".inst 0x6e85a4df // ummla v31.4s, v6.16b, v5.16b\n"
+ ".inst 0x6e84a4df // ummla v31.4s, v6.16b, v4.16b\n"
+ ".inst 0x6e83a408 // ummla v8.4s, v0.16b, v3.16b\n"
"ld1 { v6.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e87a408 // ummla v8.4s, v0.16b, v7.16b\n"
+ ".inst 0x6e87a40b // ummla v11.4s, v0.16b, v7.16b\n"
"ldp q4, q5, [x22], #0x20\n"
- ".inst 0x6e83a40b // ummla v11.4s, v0.16b, v3.16b\n"
- ".inst 0x6e87a42e // ummla v14.4s, v1.16b, v7.16b\n"
- ".inst 0x6e83a431 // ummla v17.4s, v1.16b, v3.16b\n"
- ".inst 0x6e87a454 // ummla v20.4s, v2.16b, v7.16b\n"
- ".inst 0x6e83a457 // ummla v23.4s, v2.16b, v3.16b\n"
- ".inst 0x6e87a4da // ummla v26.4s, v6.16b, v7.16b\n"
- ".inst 0x6e83a4dd // ummla v29.4s, v6.16b, v3.16b\n"
+ ".inst 0x6e83a42e // ummla v14.4s, v1.16b, v3.16b\n"
+ ".inst 0x6e87a431 // ummla v17.4s, v1.16b, v7.16b\n"
+ ".inst 0x6e83a454 // ummla v20.4s, v2.16b, v3.16b\n"
+ ".inst 0x6e87a457 // ummla v23.4s, v2.16b, v7.16b\n"
+ ".inst 0x6e83a4da // ummla v26.4s, v6.16b, v3.16b\n"
+ ".inst 0x6e87a4dd // ummla v29.4s, v6.16b, v7.16b\n"
"ldp q7, q3, [x22], #0x20\n"
".inst 0x6e84a409 // ummla v9.4s, v0.16b, v4.16b\n"
".inst 0x6e85a40c // ummla v12.4s, v0.16b, v5.16b\n"
@@ -143,11 +143,11 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
"ld1 { v0.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e87a430 // ummla v16.4s, v1.16b, v7.16b\n"
".inst 0x6e83a433 // ummla v19.4s, v1.16b, v3.16b\n"
- "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e87a456 // ummla v22.4s, v2.16b, v7.16b\n"
+ "ld1 { v1.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e83a459 // ummla v25.4s, v2.16b, v3.16b\n"
- "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e87a4dc // ummla v28.4s, v6.16b, v7.16b\n"
+ "ld1 { v2.16b }, [%x[Apanel]], #0x10\n"
".inst 0x6e83a4df // ummla v31.4s, v6.16b, v3.16b\n"
"bge 3b\n"
"4:" // main loop skip
@@ -182,9 +182,9 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
"ldp q1, q0, [x22], #0x20\n"
"ld1 { v7.16b }, [%x[Apanel]], #0x10\n"
"ld1 { v6.16b }, [%x[Apanel]], #0x10\n"
- ".inst 0x6e81a4e8 // ummla v8.4s, v7.16b, v1.16b\n"
"ld1 { v5.16b }, [%x[Apanel]], #0x10\n"
"ld1 { v4.16b }, [%x[Apanel]], #0x10\n"
+ ".inst 0x6e81a4e8 // ummla v8.4s, v7.16b, v1.16b\n"
".inst 0x6e80a4eb // ummla v11.4s, v7.16b, v0.16b\n"
"ldp q3, q2, [x22], #0x20\n"
".inst 0x6e81a4ce // ummla v14.4s, v6.16b, v1.16b\n"
@@ -212,41 +212,41 @@ void a64_interleaved_u8u32_mmla_8x12_a510(
".inst 0x6e80a49f // ummla v31.4s, v4.16b, v0.16b\n"
"5:" // multiply loop done
"subs x23, x23, #0x1\n"
- "uzp1 v0.2d, v8.2d, v11.2d\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v1.2d, v9.2d, v12.2d\n"
"uzp2 v9.2d, v9.2d, v12.2d\n"
- "str q0, [%x[Cpanel], #0x0]\n"
"uzp1 v0.2d, v10.2d, v13.2d\n"
"uzp2 v10.2d, v10.2d, v13.2d\n"
- "str q1, [%x[Cpanel], #0x10]\n"
- "str q0, [%x[Cpanel], #0x20]\n"
- "uzp1 v0.2d, v14.2d, v17.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
"uzp2 v14.2d, v14.2d, v17.2d\n"
- "str q8, [%x[Cpanel], #0x30]\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
"uzp1 v2.2d, v15.2d, v18.2d\n"
"uzp2 v15.2d, v15.2d, v18.2d\n"
- "str q9, [%x[Cpanel], #0x40]\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
"uzp1 v17.2d, v16.2d, v19.2d\n"
"uzp2 v16.2d, v16.2d, v19.2d\n"
- "str q10, [%x[Cpanel], #0x50]\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
"uzp1 v1.2d, v20.2d, v23.2d\n"
"uzp2 v20.2d, v20.2d, v23.2d\n"
- "str q0, [%x[Cpanel], #0x60]\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
"uzp1 v0.2d, v21.2d, v24.2d\n"
"uzp2 v21.2d, v21.2d, v24.2d\n"
- "str q2, [%x[Cpanel], #0x70]\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
"uzp1 v23.2d, v22.2d, v25.2d\n"
"uzp2 v22.2d, v22.2d, v25.2d\n"
- "str q17, [%x[Cpanel], #0x80]\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
"uzp1 v19.2d, v26.2d, v29.2d\n"
"uzp2 v26.2d, v26.2d, v29.2d\n"
- "str q14, [%x[Cpanel], #0x90]\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
"uzp1 v18.2d, v27.2d, v30.2d\n"
"uzp2 v27.2d, v27.2d, v30.2d\n"
- "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
"uzp1 v17.2d, v28.2d, v31.2d\n"
"uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
"str q16, [%x[Cpanel], #0xb0]\n"
"str q1, [%x[Cpanel], #0xc0]\n"
"str q0, [%x[Cpanel], #0xd0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp
index 30260b9c29..747572ef84 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020, 2023 Arm Limited.
+ * Copyright (c) 2019-2020, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,21 +55,21 @@ void a64_interleaved_u8u32_mmla_8x12(
"ldr q4, [x22, #0x0]\n"
"ldr q5, [x22, #0x10]\n"
"mov %x[Apanel], x21\n"
- "ldr q0, [%x[Apanel], #0x0]\n"
- "ldr q1, [%x[Apanel], #0x10]\n"
- "movi v8.4s, #0x0\n"
- "ldr q2, [%x[Apanel], #0x20]\n"
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
+ "movi v8.4s, #0x0\n"
"movi v9.4s, #0x0\n"
"movi v10.4s, #0x0\n"
- "add x22, x22, #0x20\n"
"movi v11.4s, #0x0\n"
+ "add x22, x22, #0x20\n"
+ "ldr q0, [%x[Apanel], #0x0]\n"
+ "ldr q1, [%x[Apanel], #0x10]\n"
"movi v12.4s, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
+ "ldr q2, [%x[Apanel], #0x20]\n"
+ "cmp x20, #0x2\n"
"movi v13.4s, #0x0\n"
"movi v14.4s, #0x0\n"
"movi v15.4s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"movi v16.4s, #0x0\n"
"movi v17.4s, #0x0\n"
"movi v18.4s, #0x0\n"
@@ -196,19 +196,19 @@ void a64_interleaved_u8u32_mmla_8x12(
"cbz x20, 5f\n"
"ldr q1, [x22, #0x0]\n"
"ldr q7, [%x[Apanel], #0x0]\n"
- ".inst 0x6e81a4e8 // ummla v8.4s, v7.16b, v1.16b\n"
"ldr q6, [%x[Apanel], #0x10]\n"
"ldr q0, [x22, #0x10]\n"
- ".inst 0x6e80a4eb // ummla v11.4s, v7.16b, v0.16b\n"
"ldr q5, [%x[Apanel], #0x20]\n"
"ldr q4, [%x[Apanel], #0x30]\n"
- ".inst 0x6e81a4ce // ummla v14.4s, v6.16b, v1.16b\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
"ldr q3, [x22, #0x20]\n"
"ldr q2, [x22, #0x30]\n"
+ ".inst 0x6e81a4e8 // ummla v8.4s, v7.16b, v1.16b\n"
+ ".inst 0x6e80a4eb // ummla v11.4s, v7.16b, v0.16b\n"
+ ".inst 0x6e81a4ce // ummla v14.4s, v6.16b, v1.16b\n"
".inst 0x6e80a4d1 // ummla v17.4s, v6.16b, v0.16b\n"
".inst 0x6e81a4b4 // ummla v20.4s, v5.16b, v1.16b\n"
".inst 0x6e80a4b7 // ummla v23.4s, v5.16b, v0.16b\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6e81a49a // ummla v26.4s, v4.16b, v1.16b\n"
"ldr q1, [x22, #0x40]\n"
".inst 0x6e80a49d // ummla v29.4s, v4.16b, v0.16b\n"
@@ -216,8 +216,8 @@ void a64_interleaved_u8u32_mmla_8x12(
".inst 0x6e83a4e9 // ummla v9.4s, v7.16b, v3.16b\n"
".inst 0x6e82a4ec // ummla v12.4s, v7.16b, v2.16b\n"
".inst 0x6e83a4cf // ummla v15.4s, v6.16b, v3.16b\n"
- ".inst 0x6e82a4d2 // ummla v18.4s, v6.16b, v2.16b\n"
"add x22, x22, #0x60\n"
+ ".inst 0x6e82a4d2 // ummla v18.4s, v6.16b, v2.16b\n"
".inst 0x6e83a4b5 // ummla v21.4s, v5.16b, v3.16b\n"
".inst 0x6e82a4b8 // ummla v24.4s, v5.16b, v2.16b\n"
".inst 0x6e83a49b // ummla v27.4s, v4.16b, v3.16b\n"
@@ -232,41 +232,41 @@ void a64_interleaved_u8u32_mmla_8x12(
".inst 0x6e80a49f // ummla v31.4s, v4.16b, v0.16b\n"
"5:" // multiply loop done
"subs x23, x23, #0x1\n"
- "uzp1 v0.2d, v8.2d, v11.2d\n"
+ "uzp1 v2.2d, v8.2d, v11.2d\n"
"uzp2 v8.2d, v8.2d, v11.2d\n"
"uzp1 v1.2d, v9.2d, v12.2d\n"
"uzp2 v9.2d, v9.2d, v12.2d\n"
- "str q0, [%x[Cpanel], #0x0]\n"
"uzp1 v0.2d, v10.2d, v13.2d\n"
"uzp2 v10.2d, v10.2d, v13.2d\n"
- "str q1, [%x[Cpanel], #0x10]\n"
- "str q0, [%x[Cpanel], #0x20]\n"
- "uzp1 v0.2d, v14.2d, v17.2d\n"
+ "str q2, [%x[Cpanel], #0x0]\n"
+ "uzp1 v3.2d, v14.2d, v17.2d\n"
"uzp2 v14.2d, v14.2d, v17.2d\n"
- "str q8, [%x[Cpanel], #0x30]\n"
+ "str q1, [%x[Cpanel], #0x10]\n"
"uzp1 v2.2d, v15.2d, v18.2d\n"
"uzp2 v15.2d, v15.2d, v18.2d\n"
- "str q9, [%x[Cpanel], #0x40]\n"
+ "str q0, [%x[Cpanel], #0x20]\n"
"uzp1 v17.2d, v16.2d, v19.2d\n"
"uzp2 v16.2d, v16.2d, v19.2d\n"
- "str q10, [%x[Cpanel], #0x50]\n"
+ "str q8, [%x[Cpanel], #0x30]\n"
"uzp1 v1.2d, v20.2d, v23.2d\n"
"uzp2 v20.2d, v20.2d, v23.2d\n"
- "str q0, [%x[Cpanel], #0x60]\n"
+ "str q9, [%x[Cpanel], #0x40]\n"
"uzp1 v0.2d, v21.2d, v24.2d\n"
"uzp2 v21.2d, v21.2d, v24.2d\n"
- "str q2, [%x[Cpanel], #0x70]\n"
+ "str q10, [%x[Cpanel], #0x50]\n"
"uzp1 v23.2d, v22.2d, v25.2d\n"
"uzp2 v22.2d, v22.2d, v25.2d\n"
- "str q17, [%x[Cpanel], #0x80]\n"
+ "str q3, [%x[Cpanel], #0x60]\n"
"uzp1 v19.2d, v26.2d, v29.2d\n"
"uzp2 v26.2d, v26.2d, v29.2d\n"
- "str q14, [%x[Cpanel], #0x90]\n"
+ "str q2, [%x[Cpanel], #0x70]\n"
"uzp1 v18.2d, v27.2d, v30.2d\n"
"uzp2 v27.2d, v27.2d, v30.2d\n"
- "str q15, [%x[Cpanel], #0xa0]\n"
+ "str q17, [%x[Cpanel], #0x80]\n"
"uzp1 v17.2d, v28.2d, v31.2d\n"
"uzp2 v28.2d, v28.2d, v31.2d\n"
+ "str q14, [%x[Cpanel], #0x90]\n"
+ "str q15, [%x[Cpanel], #0xa0]\n"
"str q16, [%x[Cpanel], #0xb0]\n"
"str q1, [%x[Cpanel], #0xc0]\n"
"str q0, [%x[Cpanel], #0xd0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp
index 19acfe8ae9..4707a17adb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp
@@ -49,7 +49,8 @@ void a64_sgemm_asimd_8x12_x1(const float *, const float *, float *, int, int, in
// structure.
class cls_a64_sgemm_8x12 {
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
@@ -68,7 +69,7 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixedTRB<operand_type, result_type, 8, 12> transforms = {};
+ StdTransformsFixedTRB<lhs_operand_type, rhs_operand_type, result_type, 8, 12> transforms = {};
template<typename T>
static PerformanceParameters get_performance_parameters(const CPUInfo *ci) {
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a53.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a53.cpp
index f4b6e7b70f..b9a2a3a3ef 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a53.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a53.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,312 +54,312 @@ void a64_sgemm_asimd_8x12_a53(const float *Apanel, const float *Bpanel, float *C
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
"1:\n"
// Unroll 0
- "ldr %d[b2], [%[b_ptr], #32]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
"nop\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
-
- "ldr %d[a0a], [%[a_ptr], #32]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "ldr x20, [%[a_ptr], #40]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
-
- "ldr %d[a1a], [%[a_ptr], #48]\n"
- "ins %[a0a].d[1], x20\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "ldr x20, [%[a_ptr], #56]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
-
- "ldr %d[b0], [%[b_ptr], #48]\n"
- "ins %[a1a].d[1], x20\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "ldr x20, [%[a_ptr], #40]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ins %[a0a].d[1], x20\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "ldr x20, [%[a_ptr], #56]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ins %[a1a].d[1], x20\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
ASM_PREFETCH("[%[a_ptr], #320]")
- "ins %[b0].d[1], x20\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
ASM_PREFETCH("[%[b_ptr], #448]")
"nop\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
"nop\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
ASM_PREFETCH("[%[b_ptr], #512]")
- "ins %[b1].d[1], x20\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
// Unroll 1
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
"nop\n"
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
-
- "ldr %d[a0], [%[a_ptr], #64]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
- "ldr x20, [%[a_ptr], #72]\n"
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
-
- "ldr %d[a1], [%[a_ptr], #80]\n"
- "ins %[a0].d[1], x20\n"
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
- "ldr x20, [%[a_ptr], #88]\n"
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
-
- "ldr %d[b0], [%[b_ptr], #96]\n"
- "ins %[a1].d[1], x20\n"
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
- "ldr x20, [%[b_ptr], #104]\n"
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+
+ "ldr %d[a0], [%[a_ptr], #64]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "ldr x20, [%[a_ptr], #72]\n"
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
+
+ "ldr %d[a1], [%[a_ptr], #80]\n"
+ "ins %[a0].d[1], x20\n"
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "ldr x20, [%[a_ptr], #88]\n"
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+
+ "ldr %d[b0], [%[b_ptr], #96]\n"
+ "ins %[a1].d[1], x20\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[b_ptr], #104]\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
"nop\n"
- "ins %[b0].d[1], x20\n"
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
"nop\n"
"nop\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "ldr %d[b1], [%[b_ptr], #112]\n"
+ "ldr %d[b1], [%[b_ptr], #112]\n"
"nop\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
- "ldr x20, [%[b_ptr], #120]\n"
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+ "ldr x20, [%[b_ptr], #120]\n"
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
"nop\n"
- "ins %[b1].d[1], x20\n"
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "bne 1b\n"
+ "bne 1b\n"
// Branch here if K=1 or 2. Do the right thing for odd/even at the end.
"4:\n"
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration. (even K)
- "ldr %d[b2], [%[b_ptr], #32]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
"nop\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
-
- "ldr %d[a0a], [%[a_ptr], #32]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "ldr x20, [%[a_ptr], #40]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
-
- "ldr %d[a1a], [%[a_ptr], #48]\n"
- "ins %[a0a].d[1], x20\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "ldr x20, [%[a_ptr], #56]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
-
- "ldr %d[b0], [%[b_ptr], #48]\n"
- "ins %[a1a].d[1], x20\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
-
- "ins %[b0].d[1], x20\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "ldr x20, [%[a_ptr], #40]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ins %[a0a].d[1], x20\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "ldr x20, [%[a_ptr], #56]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ins %[a1a].d[1], x20\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+
+ "ins %[b0].d[1], x20\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
"nop\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
"nop\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "ins %[b1].d[1], x20\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
"nop\n"
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
-
- "ins %[b2].d[1], x20\n"
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "b 3f\n"
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+
+ "ins %[b2].d[1], x20\n"
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "b 3f\n"
// Detached final iteration. (odd K)
"2:\n"
- "ldr %d[b2], [%[b_ptr], #32]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
"nop\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
-
- "ins %[b2].d[1], x20\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+
+ "ins %[b2].d[1], x20\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
// Common tail
"3:\n"
- "str q8, [%[c_ptr]]\n"
- "str q16, [%[c_ptr], #16]\n"
- "str q24, [%[c_ptr], #32]\n"
- "str q9, [%[c_ptr], #48]\n"
- "str q17, [%[c_ptr], #64]\n"
- "str q25, [%[c_ptr], #80]\n"
- "str q10, [%[c_ptr], #96]\n"
- "str q18, [%[c_ptr], #112]\n"
- "str q26, [%[c_ptr], #128]\n"
- "str q11, [%[c_ptr], #144]\n"
- "str q19, [%[c_ptr], #160]\n"
- "str q27, [%[c_ptr], #176]\n"
- "str q12, [%[c_ptr], #192]\n"
- "str q20, [%[c_ptr], #208]\n"
- "str q28, [%[c_ptr], #224]\n"
- "str q13, [%[c_ptr], #240]\n"
- "str q21, [%[c_ptr], #256]\n"
- "str q29, [%[c_ptr], #272]\n"
- "str q14, [%[c_ptr], #288]\n"
- "str q22, [%[c_ptr], #304]\n"
- "str q30, [%[c_ptr], #320]\n"
- "str q15, [%[c_ptr], #336]\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q8, [%[c_ptr]]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55.cpp
index 5f86da8ef3..4303d1346c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,313 +54,313 @@ void a64_sgemm_asimd_8x12_a55(const float *Apanel, const float *Bpanel, float *C
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
"1:\n"
// Unroll 0
- "ldr %d[b2], [%[b_ptr], #32]\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "subs %w[k], %w[k], #1\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "subs %w[k], %w[k], #1\n"
- "ldr %d[a0a], [%[a_ptr], #32]\n"
- "ins %[b2].d[1], x20\n"
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ins %[b2].d[1], x20\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "ldr x20, [%[a_ptr], #40]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "ldr x20, [%[a_ptr], #40]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %d[a1a], [%[a_ptr], #48]\n"
- "ins %[a0a].d[1], x20\n"
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ins %[a0a].d[1], x20\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "ldr x20, [%[a_ptr], #56]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "ldr x20, [%[a_ptr], #56]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
- "ins %[a1a].d[1], x20\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ins %[a1a].d[1], x20\n"
ASM_PREFETCH("[%[a_ptr], #320]")
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
-
- "ldr %d[b1], [%[b_ptr], #64]\n"
- "ins %[b0].d[1], x20\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+
+ "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ins %[b0].d[1], x20\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
ASM_PREFETCH("[%[b_ptr], #448]")
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
ASM_PREFETCH("[%[b_ptr], #512]")
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
// Unroll 1
- "ldr %d[b2], [%[b_ptr], #80]\n"
- "ins %[b1].d[1], x20\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
+ "ins %[b1].d[1], x20\n"
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
- "ldr %d[a0], [%[a_ptr], #64]\n"
- "ins %[b2].d[1], x20\n"
+ "ldr %d[a0], [%[a_ptr], #64]\n"
+ "ins %[b2].d[1], x20\n"
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
- "ldr x20, [%[a_ptr], #72]\n"
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
+ "ldr x20, [%[a_ptr], #72]\n"
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "ldr %d[a1], [%[a_ptr], #80]\n"
- "ins %[a0].d[1], x20\n"
+ "ldr %d[a1], [%[a_ptr], #80]\n"
+ "ins %[a0].d[1], x20\n"
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
- "ldr x20, [%[a_ptr], #88]\n"
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[a_ptr], #88]\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
- "ldr %d[b0], [%[b_ptr], #96]\n"
- "ins %[a1].d[1], x20\n"
+ "ldr %d[b0], [%[b_ptr], #96]\n"
+ "ins %[a1].d[1], x20\n"
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
- "ldr x20, [%[b_ptr], #104]\n"
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "ldr x20, [%[b_ptr], #104]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
- "ldr %d[b1], [%[b_ptr], #112]\n"
- "ins %[b0].d[1], x20\n"
+ "ldr %d[b1], [%[b_ptr], #112]\n"
+ "ins %[b0].d[1], x20\n"
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "ldr x20, [%[b_ptr], #120]\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[b_ptr], #120]\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "ldr %d[b2], [%[b_ptr], #32]\n"
- "ins %[b1].d[1], x20\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
+ "ins %[b1].d[1], x20\n"
- "bne 1b\n"
+ "bne 1b\n"
// Branch here if K=1 or 2. Do the right thing for odd/even at the end.
"4:\n"
- "cbnz %w[oddk], 2f\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "cbnz %w[oddk], 2f\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
// Detached final iteration. (even K)
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
-
- "ldr %d[a0a], [%[a_ptr], #32]\n"
- "ins %[b2].d[1], x20\n"
-
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "ldr x20, [%[a_ptr], #40]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
-
- "ldr %d[a1a], [%[a_ptr], #48]\n"
- "ins %[a0a].d[1], x20\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "ldr x20, [%[a_ptr], #56]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
-
- "ldr %d[b0], [%[b_ptr], #48]\n"
- "ins %[a1a].d[1], x20\n"
-
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
-
- "ldr %d[b1], [%[b_ptr], #64]\n"
- "ins %[b0].d[1], x20\n"
-
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
-
- "ldr %d[b2], [%[b_ptr], #80]\n"
- "ins %[b1].d[1], x20\n"
-
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
-
- "ins %[b2].d[1], x20\n"
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "b 3f\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
+ "ins %[b2].d[1], x20\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "ldr x20, [%[a_ptr], #40]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
+ "ins %[a0a].d[1], x20\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "ldr x20, [%[a_ptr], #56]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+ "ins %[a1a].d[1], x20\n"
+
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+
+ "ldr %d[b1], [%[b_ptr], #64]\n"
+ "ins %[b0].d[1], x20\n"
+
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+
+ "ldr %d[b2], [%[b_ptr], #80]\n"
+ "ins %[b1].d[1], x20\n"
+
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+
+ "ins %[b2].d[1], x20\n"
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "b 3f\n"
// Detached final iteration. (odd K)
"2:\n"
- "ldr %d[b2], [%[b_ptr], #32]\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
-
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
// Common tail
"3:\n"
- "str q8, [%[c_ptr]]\n"
- "str q16, [%[c_ptr], #16]\n"
- "str q24, [%[c_ptr], #32]\n"
- "str q9, [%[c_ptr], #48]\n"
- "str q17, [%[c_ptr], #64]\n"
- "str q25, [%[c_ptr], #80]\n"
- "str q10, [%[c_ptr], #96]\n"
- "str q18, [%[c_ptr], #112]\n"
- "str q26, [%[c_ptr], #128]\n"
- "str q11, [%[c_ptr], #144]\n"
- "str q19, [%[c_ptr], #160]\n"
- "str q27, [%[c_ptr], #176]\n"
- "str q12, [%[c_ptr], #192]\n"
- "str q20, [%[c_ptr], #208]\n"
- "str q28, [%[c_ptr], #224]\n"
- "str q13, [%[c_ptr], #240]\n"
- "str q21, [%[c_ptr], #256]\n"
- "str q29, [%[c_ptr], #272]\n"
- "str q14, [%[c_ptr], #288]\n"
- "str q22, [%[c_ptr], #304]\n"
- "str q30, [%[c_ptr], #320]\n"
- "str q15, [%[c_ptr], #336]\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q8, [%[c_ptr]]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55r1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55r1.cpp
index 7709ad1be6..fdbbe6b749 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55r1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/a55r1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -100,275 +100,275 @@ void a64_sgemm_asimd_8x12_a55r1(const float *Apanel, const float *Bpanel, float
// The loop is offset by these two instructions which must
// always be executed.
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "ldr %d[b2], [%[b_ptr], #32]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
"1:\n"
// Unroll 0
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "ldr %d[a0a], [%[a_ptr], #32]\n"
-
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "ldr x20, [%[a_ptr], #40]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %d[a1a], [%[a_ptr], #48]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "ins %[a0a].d[1], x20\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "ldr x20, [%[a_ptr], #56]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
-
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "ins %[a1a].d[1], x20\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "ins %[b0].d[1], x20\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "ldr x20, [%[a_ptr], #40]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "ins %[a0a].d[1], x20\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "ldr x20, [%[a_ptr], #56]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "ins %[a1a].d[1], x20\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
ASM_PREFETCH("[%[a_ptr], #448]")
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
ASM_PREFETCH("[%[b_ptr], #576]")
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
// Unroll 1
- "ldr %d[b2], [%[b_ptr], #80]\n"
-
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "ins %[b1].d[1], x20\n"
- "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
- "ldr %d[a0], [%[a_ptr], #64]\n"
-
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
- "ldr x20, [%[a_ptr], #72]\n"
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "ldr %d[a1], [%[a_ptr], #80]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
- "ins %[a0].d[1], x20\n"
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
- "ldr x20, [%[a_ptr], #88]\n"
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
- "ldr %d[b0], [%[b_ptr], #96]\n"
-
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "ins %[a1].d[1], x20\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
- "ldr x20, [%[b_ptr], #104]\n"
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
- "ldr %d[b1], [%[b_ptr], #112]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "ins %[b0].d[1], x20\n"
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "ldr x20, [%[b_ptr], #120]\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
-
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
-
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
+
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "ldr %d[a0], [%[a_ptr], #64]\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
+ "ldr x20, [%[a_ptr], #72]\n"
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "ldr %d[a1], [%[a_ptr], #80]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+ "ins %[a0].d[1], x20\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[a_ptr], #88]\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "ldr %d[b0], [%[b_ptr], #96]\n"
+
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "ins %[a1].d[1], x20\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "ldr x20, [%[b_ptr], #104]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "ldr %d[b1], [%[b_ptr], #112]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[b_ptr], #120]\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
ASM_PREFETCH("[%[b_ptr], #640]")
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "ins %[b1].d[1], x20\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "ldr %d[b2], [%[b_ptr], #32]\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "ldr %d[b2], [%[b_ptr], #32]\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "b.ne 1b\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "b.ne 1b\n"
// Branch here if K=1 or 2. Do the right thing for odd/even at the end.
"4:\n"
- // Start final iteration - branch off to "odd" code before we load a0a.
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr x20, [%[b_ptr], #40]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "cbnz %w[oddk], 2f\n"
+ // Start final iteration - branch off to "odd" code before we load a0a.
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "ldr x20, [%[b_ptr], #40]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "cbnz %w[oddk], 2f\n"
// Even K continuation
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "ldr %d[a0a], [%[a_ptr], #32]\n"
-
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "ldr x20, [%[a_ptr], #40]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "ldr %d[a0a], [%[a_ptr], #32]\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "ldr x20, [%[a_ptr], #40]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
ASM_PREFETCHW("[%[c_ptr]]")
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %d[a1a], [%[a_ptr], #48]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "ins %[a0a].d[1], x20\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "ldr x20, [%[a_ptr], #56]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "ldr %d[b0], [%[b_ptr], #48]\n"
-
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "ins %[a1a].d[1], x20\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "ldr x20, [%[b_ptr], #56]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "ldr %d[a1a], [%[a_ptr], #48]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "ins %[a0a].d[1], x20\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "ldr x20, [%[a_ptr], #56]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "ldr %d[b0], [%[b_ptr], #48]\n"
+
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "ins %[a1a].d[1], x20\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "ldr x20, [%[b_ptr], #56]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
ASM_PREFETCHW("[%[c_ptr], #64]")
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
ASM_PREFETCHW("[%[c_ptr], #128]")
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "ldr %d[b1], [%[b_ptr], #64]\n"
-
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "ins %[b0].d[1], x20\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "ldr x20, [%[b_ptr], #72]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "ldr %d[b1], [%[b_ptr], #64]\n"
+
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "ins %[b0].d[1], x20\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "ldr x20, [%[b_ptr], #72]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
ASM_PREFETCHW("[%[c_ptr], #192]")
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "ldr %d[b2], [%[b_ptr], #80]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ldr %d[b2], [%[b_ptr], #80]\n"
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "ins %[b1].d[1], x20\n"
- "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "ldr x20, [%[b_ptr], #88]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
- "ins %[b2].d[1], x20\n"
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "ins %[b1].d[1], x20\n"
+ "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
+ "ldr x20, [%[b_ptr], #88]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+ "ins %[b2].d[1], x20\n"
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
ASM_PREFETCHW("[%[c_ptr], #256]")
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
ASM_PREFETCHW("[%[c_ptr], #320]")
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
ASM_PREFETCHWL2("[%[c_ptr], #384]")
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
ASM_PREFETCHWL2("[%[c_ptr], #448]")
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #512]")
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #576]")
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
ASM_PREFETCHWL2("[%[c_ptr], #640]")
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
ASM_PREFETCHWL2("[%[c_ptr], #704]")
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "b 3f\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "b 3f\n"
// Odd K continuation
"2:\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
ASM_PREFETCHW("[%[c_ptr]]")
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "ins %[b2].d[1], x20\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "ins %[b2].d[1], x20\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
ASM_PREFETCHW("[%[c_ptr], #64]")
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
ASM_PREFETCHW("[%[c_ptr], #128]")
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
ASM_PREFETCHW("[%[c_ptr], #192]")
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
ASM_PREFETCHW("[%[c_ptr], #256]")
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
ASM_PREFETCHW("[%[c_ptr], #320]")
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #384]")
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #448]")
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
ASM_PREFETCHWL2("[%[c_ptr], #512]")
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
ASM_PREFETCHWL2("[%[c_ptr], #576]")
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
ASM_PREFETCHWL2("[%[c_ptr], #640]")
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
ASM_PREFETCHWL2("[%[c_ptr], #704]")
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
// Common tail
"3:\n"
- "str q8, [%[c_ptr]]\n"
- "str q16, [%[c_ptr], #16]\n"
- "str q24, [%[c_ptr], #32]\n"
- "str q9, [%[c_ptr], #48]\n"
- "str q17, [%[c_ptr], #64]\n"
- "str q25, [%[c_ptr], #80]\n"
- "str q10, [%[c_ptr], #96]\n"
- "str q18, [%[c_ptr], #112]\n"
- "str q26, [%[c_ptr], #128]\n"
- "str q11, [%[c_ptr], #144]\n"
- "str q19, [%[c_ptr], #160]\n"
- "str q27, [%[c_ptr], #176]\n"
- "str q12, [%[c_ptr], #192]\n"
- "str q20, [%[c_ptr], #208]\n"
- "str q28, [%[c_ptr], #224]\n"
- "str q13, [%[c_ptr], #240]\n"
- "str q21, [%[c_ptr], #256]\n"
- "str q29, [%[c_ptr], #272]\n"
- "str q14, [%[c_ptr], #288]\n"
- "str q22, [%[c_ptr], #304]\n"
- "str q30, [%[c_ptr], #320]\n"
- "str q15, [%[c_ptr], #336]\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q8, [%[c_ptr]]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "str q10, [%[c_ptr], #96]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "str q11, [%[c_ptr], #144]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "str q12, [%[c_ptr], #192]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "str q13, [%[c_ptr], #240]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "str q14, [%[c_ptr], #288]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "str q15, [%[c_ptr], #336]\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/generic.cpp
index dc72095a9b..5e1cce3233 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,281 +64,281 @@ void a64_sgemm_asimd_8x12(const float *Apanel, const float *Bpanel, float *Cpane
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Loop proper
"1:\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
ASM_PREFETCH("[%[a_ptr], #320]")
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
ASM_PREFETCH("[%[b_ptr], #448]")
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
-
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "ldr %q[a0], [%[a_ptr], #64]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "ldr %q[a1], [%[a_ptr], #80]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
+ "ldr %q[a0], [%[a_ptr], #64]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "ldr %q[a1], [%[a_ptr], #80]\n"
"fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
ASM_PREFETCH("[%[b_ptr], #512]")
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "bne 1b\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "bne 1b\n"
// Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
"4:\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
"fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "ldr %q[a0a], [%[a_ptr], #32]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "ldr %q[a0a], [%[a_ptr], #32]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
"fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "ldr %q[a1a], [%[a_ptr], #48]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
-
- "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
- "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
+ "ldr %q[a1a], [%[a_ptr], #48]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+
+ "fmla v8.4s , %[b0].4s, %[a0a].s[0]\n"
+ "fmla v16.4s, %[b1].4s, %[a0a].s[0]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
"fmla v9.4s , %[b0].4s, %[a0a].s[1]\n"
- "str q8, [%[c_ptr], #0]\n"
- "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
- "str q16, [%[c_ptr], #16]\n"
- "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
- "str q24, [%[c_ptr], #32]\n"
-
- "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
- "str q9, [%[c_ptr], #48]\n"
- "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
- "str q17, [%[c_ptr], #64]\n"
- "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
- "str q10, [%[c_ptr], #96]\n"
-
- "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
- "str q18, [%[c_ptr], #112]\n"
- "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
- "str q11, [%[c_ptr], #144]\n"
-
- "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
- "str q19, [%[c_ptr], #160]\n"
- "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0a].s[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "fmla v24.4s, %[b2].4s, %[a0a].s[0]\n"
+ "str q24, [%[c_ptr], #32]\n"
+
+ "fmla v25.4s, %[b2].4s, %[a0a].s[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "fmla v10.4s, %[b0].4s, %[a0a].s[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "fmla v18.4s, %[b1].4s, %[a0a].s[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v26.4s, %[b2].4s, %[a0a].s[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "fmla v11.4s, %[b0].4s, %[a0a].s[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "fmla v19.4s, %[b1].4s, %[a0a].s[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v27.4s, %[b2].4s, %[a0a].s[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1a].s[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "fmla v20.4s, %[b1].4s, %[a1a].s[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v28.4s, %[b2].4s, %[a1a].s[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
"fmla v13.4s, %[b0].4s, %[a1a].s[1]\n"
- "str q20, [%[c_ptr], #208]\n"
- "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
- "str q13, [%[c_ptr], #240]\n"
-
- "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
- "str q21, [%[c_ptr], #256]\n"
- "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
- "str q14, [%[c_ptr], #288]\n"
-
- "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
- "str q22, [%[c_ptr], #304]\n"
- "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
- "str q15, [%[c_ptr], #336]\n"
-
- "b 3f\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "fmla v21.4s, %[b1].4s, %[a1a].s[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v29.4s, %[b2].4s, %[a1a].s[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "fmla v14.4s, %[b0].4s, %[a1a].s[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "fmla v22.4s, %[b1].4s, %[a1a].s[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v30.4s, %[b2].4s, %[a1a].s[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "fmla v15.4s, %[b0].4s, %[a1a].s[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "fmla v23.4s, %[b1].4s, %[a1a].s[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v31.4s, %[b2].4s, %[a1a].s[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
+
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
"fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "str q8, [%[c_ptr], #0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "str q16, [%[c_ptr], #16]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "str q24, [%[c_ptr], #32]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "str q9, [%[c_ptr], #48]\n"
-
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "str q17, [%[c_ptr], #64]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "str q10, [%[c_ptr], #96]\n"
-
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "str q18, [%[c_ptr], #112]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "str q11, [%[c_ptr], #144]\n"
-
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "str q19, [%[c_ptr], #160]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
"fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "str q20, [%[c_ptr], #208]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "str q13, [%[c_ptr], #240]\n"
-
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "str q21, [%[c_ptr], #256]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "str q14, [%[c_ptr], #288]\n"
-
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "str q22, [%[c_ptr], #304]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
// Common tail
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a1] "+w" (a1), [a0a] "+w" (a0a), [a1a] "+w" (a1a),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/x1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/x1.cpp
index 89f8ac2d6c..1567b05d3e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/x1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12/x1.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -62,281 +62,281 @@ void a64_sgemm_asimd_8x12_x1(const float *Apanel, const float *Bpanel, float *Cp
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.4s, #0x0\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "movi v9.4s, #0x0\n"
- "ldr %q[b0], [%[b_ptr]]\n"
- "movi v10.4s, #0x0\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "movi v11.4s, #0x0\n"
- "ldr %q[b1], [%[b_ptr], #16]\n"
- "movi v12.4s, #0x0\n"
+ "movi v8.4s, #0x0\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "movi v9.4s, #0x0\n"
+ "ldr %q[b0], [%[b_ptr]]\n"
+ "movi v10.4s, #0x0\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "movi v11.4s, #0x0\n"
+ "ldr %q[b1], [%[b_ptr], #16]\n"
+ "movi v12.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
- "movi v13.4s, #0x0\n"
+ "movi v13.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #64]")
- "movi v14.4s, #0x0\n"
+ "movi v14.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.4s, #0x0\n"
+ "movi v15.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #128]")
- "movi v16.4s, #0x0\n"
+ "movi v16.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #192]")
- "movi v17.4s, #0x0\n"
+ "movi v17.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #256]")
- "movi v18.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #192]")
- "movi v19.4s, #0x0\n"
+ "movi v19.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #320]")
- "movi v20.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
ASM_PREFETCH("[%[a_ptr], #256]")
- "movi v21.4s, #0x0\n"
+ "movi v21.4s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #384]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
+ "movi v22.4s, #0x0\n"
+ "movi v23.4s, #0x0\n"
+ "movi v24.4s, #0x0\n"
+ "movi v25.4s, #0x0\n"
+ "movi v26.4s, #0x0\n"
+ "movi v27.4s, #0x0\n"
+ "movi v28.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ "movi v30.4s, #0x0\n"
+ "movi v31.4s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Loop proper
"1:\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
ASM_PREFETCH("[%[a_ptr], #320]")
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
ASM_PREFETCH("[%[b_ptr], #448]")
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "ldr %q[a0], [%[a_ptr], #32]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "ldr %q[a1], [%[a_ptr], #48]\n"
-
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr %q[b2], [%[b_ptr], #80]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "ldr %q[a0], [%[a_ptr], #32]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ldr %q[a1], [%[a_ptr], #48]\n"
+
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
+ "ldr %q[b2], [%[b_ptr], #80]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
"fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %q[b0], [%[b_ptr], #96]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "ldr %q[b0], [%[b_ptr], #96]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
ASM_PREFETCH("[%[b_ptr], #512]")
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "ldr %q[b1], [%[b_ptr], #112]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "ldr %q[a0], [%[a_ptr]]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "ldr %q[a1], [%[a_ptr], #16]\n"
- "bne 1b\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "ldr %q[b1], [%[b_ptr], #112]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "ldr %q[a0], [%[a_ptr]]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ldr %q[a1], [%[a_ptr], #16]\n"
+ "bne 1b\n"
// Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
"4:\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
"fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
"fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "ldr %q[b0], [%[b_ptr], #48]\n"
-
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "ldr %q[b1], [%[b_ptr], #64]\n"
-
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "add %[a_ptr], %[a_ptr], #64\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "ldr %q[a0], [%[a_ptr], #-32]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "add %[b_ptr], %[b_ptr], #96\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "ldr %q[a1], [%[a_ptr], #-16]\n"
-
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
- "ldr %q[b2], [%[b_ptr], #-16]\n"
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "ldr %q[b0], [%[b_ptr], #48]\n"
+
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "ldr %q[b1], [%[b_ptr], #64]\n"
+
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #64\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "ldr %q[a0], [%[a_ptr], #-32]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "add %[b_ptr], %[b_ptr], #96\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "ldr %q[a1], [%[a_ptr], #-16]\n"
+
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "ldr %q[b2], [%[b_ptr], #-16]\n"
"fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "str q8, [%[c_ptr], #0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "str q16, [%[c_ptr], #16]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "str q24, [%[c_ptr], #32]\n"
-
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "str q9, [%[c_ptr], #48]\n"
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "str q17, [%[c_ptr], #64]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "str q10, [%[c_ptr], #96]\n"
-
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "str q18, [%[c_ptr], #112]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "str q11, [%[c_ptr], #144]\n"
-
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "str q19, [%[c_ptr], #160]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "str q24, [%[c_ptr], #32]\n"
+
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
"fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "str q20, [%[c_ptr], #208]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "str q13, [%[c_ptr], #240]\n"
-
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "str q21, [%[c_ptr], #256]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "str q14, [%[c_ptr], #288]\n"
-
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "str q22, [%[c_ptr], #304]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "str q15, [%[c_ptr], #336]\n"
-
- "b 3f\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
+
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
- "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
- "ldr %q[b2], [%[b_ptr], #32]\n"
- "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
+ "fmla v8.4s , %[b0].4s, %[a0].s[0]\n"
+ "ldr %q[b2], [%[b_ptr], #32]\n"
+ "fmla v16.4s, %[b1].4s, %[a0].s[0]\n"
"fmla v9.4s , %[b0].4s, %[a0].s[1]\n"
- "str q8, [%[c_ptr], #0]\n"
- "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
- "str q16, [%[c_ptr], #16]\n"
- "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
- "add %[a_ptr], %[a_ptr], #32\n"
- "str q24, [%[c_ptr], #32]\n"
- "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
- "str q9, [%[c_ptr], #48]\n"
-
- "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
- "str q17, [%[c_ptr], #64]\n"
- "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
- "str q25, [%[c_ptr], #80]\n"
- "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
- "str q10, [%[c_ptr], #96]\n"
-
- "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
- "str q18, [%[c_ptr], #112]\n"
- "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
- "str q26, [%[c_ptr], #128]\n"
- "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
- "str q11, [%[c_ptr], #144]\n"
-
- "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
- "str q19, [%[c_ptr], #160]\n"
- "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
- "str q27, [%[c_ptr], #176]\n"
- "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
- "str q12, [%[c_ptr], #192]\n"
+ "str q8, [%[c_ptr], #0]\n"
+ "fmla v17.4s, %[b1].4s, %[a0].s[1]\n"
+ "str q16, [%[c_ptr], #16]\n"
+ "fmla v24.4s, %[b2].4s, %[a0].s[0]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
+ "add %[a_ptr], %[a_ptr], #32\n"
+ "str q24, [%[c_ptr], #32]\n"
+ "fmla v25.4s, %[b2].4s, %[a0].s[1]\n"
+ "str q9, [%[c_ptr], #48]\n"
+
+ "fmla v10.4s, %[b0].4s, %[a0].s[2]\n"
+ "str q17, [%[c_ptr], #64]\n"
+ "fmla v18.4s, %[b1].4s, %[a0].s[2]\n"
+ "str q25, [%[c_ptr], #80]\n"
+ "fmla v26.4s, %[b2].4s, %[a0].s[2]\n"
+ "str q10, [%[c_ptr], #96]\n"
+
+ "fmla v11.4s, %[b0].4s, %[a0].s[3]\n"
+ "str q18, [%[c_ptr], #112]\n"
+ "fmla v19.4s, %[b1].4s, %[a0].s[3]\n"
+ "str q26, [%[c_ptr], #128]\n"
+ "fmla v27.4s, %[b2].4s, %[a0].s[3]\n"
+ "str q11, [%[c_ptr], #144]\n"
+
+ "fmla v12.4s, %[b0].4s, %[a1].s[0]\n"
+ "str q19, [%[c_ptr], #160]\n"
+ "fmla v20.4s, %[b1].4s, %[a1].s[0]\n"
+ "str q27, [%[c_ptr], #176]\n"
+ "fmla v28.4s, %[b2].4s, %[a1].s[0]\n"
+ "str q12, [%[c_ptr], #192]\n"
"fmla v13.4s, %[b0].4s, %[a1].s[1]\n"
- "str q20, [%[c_ptr], #208]\n"
- "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
- "str q28, [%[c_ptr], #224]\n"
- "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
- "str q13, [%[c_ptr], #240]\n"
-
- "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
- "str q21, [%[c_ptr], #256]\n"
- "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
- "str q29, [%[c_ptr], #272]\n"
- "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
- "str q14, [%[c_ptr], #288]\n"
-
- "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
- "str q22, [%[c_ptr], #304]\n"
- "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
- "str q30, [%[c_ptr], #320]\n"
- "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
- "str q15, [%[c_ptr], #336]\n"
+ "str q20, [%[c_ptr], #208]\n"
+ "fmla v21.4s, %[b1].4s, %[a1].s[1]\n"
+ "str q28, [%[c_ptr], #224]\n"
+ "fmla v29.4s, %[b2].4s, %[a1].s[1]\n"
+ "str q13, [%[c_ptr], #240]\n"
+
+ "fmla v14.4s, %[b0].4s, %[a1].s[2]\n"
+ "str q21, [%[c_ptr], #256]\n"
+ "fmla v22.4s, %[b1].4s, %[a1].s[2]\n"
+ "str q29, [%[c_ptr], #272]\n"
+ "fmla v30.4s, %[b2].4s, %[a1].s[2]\n"
+ "str q14, [%[c_ptr], #288]\n"
+
+ "fmla v15.4s, %[b0].4s, %[a1].s[3]\n"
+ "str q22, [%[c_ptr], #304]\n"
+ "fmla v23.4s, %[b1].4s, %[a1].s[3]\n"
+ "str q30, [%[c_ptr], #320]\n"
+ "fmla v31.4s, %[b2].4s, %[a1].s[3]\n"
+ "str q15, [%[c_ptr], #336]\n"
// Common tail
"3:\n"
- "str q23, [%[c_ptr], #352]\n"
- "str q31, [%[c_ptr], #368]\n"
- "add %[c_ptr], %[c_ptr], #384\n"
+ "str q23, [%[c_ptr], #352]\n"
+ "str q31, [%[c_ptr], #368]\n"
+ "add %[c_ptr], %[c_ptr], #384\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
[a0] "+w" (a0), [a1] "+w" (a1),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp
index c1318a2a06..2fed3264ab 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,7 +42,8 @@ void a64_sgemm_asimd_8x6(const float *, const float *, float *, int, int, int);
// structure.
class cls_a64_sgemm_8x6 {
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
@@ -61,7 +62,7 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsFixed<operand_type, result_type, 8, 6, 1> transforms = {};
+ StdTransformsFixed<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 1> transforms = {};
kern_type kernel=a64_sgemm_asimd_8x6;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp
index 9b81374d2d..fb5044684f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x6/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,305 +64,305 @@ void a64_sgemm_asimd_8x6(const float *Apanel, const float *Bpanel, float *Cpanel
__asm __volatile (
// Initialize result registers, load initial operands, prime prefetches.
- "movi v8.2s, #0x0\n"
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "movi v9.2s, #0x0\n"
- "movi v10.2s, #0x0\n"
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "movi v11.2s, #0x0\n"
- "movi v12.2s, #0x0\n"
- "movi v13.2s, #0x0\n"
- "movi v14.2s, #0x0\n"
+ "movi v8.2s, #0x0\n"
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "movi v9.2s, #0x0\n"
+ "movi v10.2s, #0x0\n"
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "movi v11.2s, #0x0\n"
+ "movi v12.2s, #0x0\n"
+ "movi v13.2s, #0x0\n"
+ "movi v14.2s, #0x0\n"
ASM_PREFETCH("[%[b_ptr], #64]")
ASM_PREFETCHU("[%[a_ptr], #52]")
ASM_PREFETCHU("[%[a_ptr], #116]")
ASM_PREFETCH("[%[b_ptr], #128]")
- "movi v15.2s, #0x0\n"
- "movi v16.2s, #0x0\n"
- "movi v17.2s, #0x0\n"
- "movi v18.2s, #0x0\n"
- "movi v19.2s, #0x0\n"
- "movi v20.2s, #0x0\n"
- "movi v21.2s, #0x0\n"
- "movi v22.2s, #0x0\n"
- "movi v23.2s, #0x0\n"
- "movi v24.2s, #0x0\n"
- "movi v25.2s, #0x0\n"
- "movi v26.2s, #0x0\n"
- "movi v27.2s, #0x0\n"
- "movi v28.2s, #0x0\n"
- "movi v29.2s, #0x0\n"
- "movi v30.2s, #0x0\n"
- "movi v31.2s, #0x0\n"
+ "movi v15.2s, #0x0\n"
+ "movi v16.2s, #0x0\n"
+ "movi v17.2s, #0x0\n"
+ "movi v18.2s, #0x0\n"
+ "movi v19.2s, #0x0\n"
+ "movi v20.2s, #0x0\n"
+ "movi v21.2s, #0x0\n"
+ "movi v22.2s, #0x0\n"
+ "movi v23.2s, #0x0\n"
+ "movi v24.2s, #0x0\n"
+ "movi v25.2s, #0x0\n"
+ "movi v26.2s, #0x0\n"
+ "movi v27.2s, #0x0\n"
+ "movi v28.2s, #0x0\n"
+ "movi v29.2s, #0x0\n"
+ "movi v30.2s, #0x0\n"
+ "movi v31.2s, #0x0\n"
// Skip loop if we are doing zero iterations of it.
- "cbz %w[k], 4f\n"
+ "cbz %w[k], 4f\n"
// Loop proper
"1:\n"
- "ldr %d[b0], [%[b_ptr], #0]\n"
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
- "ldr %d[b1], [%[b_ptr], #8]\n"
- "fmla v8.2s , %[b0].2s, %[a0].2s\n"
- "fmla v9.2s , %[b0].2s, %[a1].2s\n"
- "fmla v10.2s, %[b0].2s, %[a2].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v16.2s, %[b1].2s, %[a0].2s\n"
- "fmla v17.2s, %[b1].2s, %[a1].2s\n"
- "fmla v11.2s, %[b0].2s, %[a3].2s\n"
-
- "ldr %d[b2], [%[b_ptr], #16]\n"
- "fmla v18.2s, %[b1].2s, %[a2].2s\n"
- "fmla v19.2s, %[b1].2s, %[a3].2s\n"
- "fmla v24.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v25.2s, %[b2].2s, %[a1].2s\n"
- "fmla v26.2s, %[b2].2s, %[a2].2s\n"
- "fmla v27.2s, %[b2].2s, %[a3].2s\n"
-
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "fmla v12.2s, %[b0].2s, %[a0].2s\n"
- "fmla v20.2s, %[b1].2s, %[a0].2s\n"
- "fmla v28.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
- "fmla v13.2s, %[b0].2s, %[a1].2s\n"
- "fmla v21.2s, %[b1].2s, %[a1].2s\n"
- "fmla v29.2s, %[b2].2s, %[a1].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v14.2s, %[b0].2s, %[a2].2s\n"
- "fmla v22.2s, %[b1].2s, %[a2].2s\n"
- "fmla v30.2s, %[b2].2s, %[a2].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v15.2s, %[b0].2s, %[a3].2s\n"
- "fmla v23.2s, %[b1].2s, %[a3].2s\n"
- "fmla v31.2s, %[b2].2s, %[a3].2s\n"
-
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "ldr %d[b0], [%[b_ptr], #0]\n"
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+ "ldr %d[b1], [%[b_ptr], #8]\n"
+ "fmla v8.2s , %[b0].2s, %[a0].2s\n"
+ "fmla v9.2s , %[b0].2s, %[a1].2s\n"
+ "fmla v10.2s, %[b0].2s, %[a2].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v16.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v17.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v11.2s, %[b0].2s, %[a3].2s\n"
+
+ "ldr %d[b2], [%[b_ptr], #16]\n"
+ "fmla v18.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v19.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v24.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v25.2s, %[b2].2s, %[a1].2s\n"
+ "fmla v26.2s, %[b2].2s, %[a2].2s\n"
+ "fmla v27.2s, %[b2].2s, %[a3].2s\n"
+
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "fmla v12.2s, %[b0].2s, %[a0].2s\n"
+ "fmla v20.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v28.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+ "fmla v13.2s, %[b0].2s, %[a1].2s\n"
+ "fmla v21.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v29.2s, %[b2].2s, %[a1].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v14.2s, %[b0].2s, %[a2].2s\n"
+ "fmla v22.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v30.2s, %[b2].2s, %[a2].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v15.2s, %[b0].2s, %[a3].2s\n"
+ "fmla v23.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v31.2s, %[b2].2s, %[a3].2s\n"
+
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "subs %w[k], %w[k], #1\n"
+ "subs %w[k], %w[k], #1\n"
ASM_PREFETCHU("[%[a_ptr], #156]")
- "ldr %d[b0], [%[b_ptr], #24]\n"
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
-
- "ldr %d[b1], [%[b_ptr], #32]\n"
- "fmla v8.2s , %[b0].2s, %[a0].2s\n"
- "fmla v9.2s , %[b0].2s, %[a1].2s\n"
- "fmla v10.2s, %[b0].2s, %[a2].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v16.2s, %[b1].2s, %[a0].2s\n"
- "fmla v17.2s, %[b1].2s, %[a1].2s\n"
- "fmla v11.2s, %[b0].2s, %[a3].2s\n"
-
- "ldr %d[b2], [%[b_ptr], #40]\n"
- "fmla v18.2s, %[b1].2s, %[a2].2s\n"
- "fmla v19.2s, %[b1].2s, %[a3].2s\n"
- "fmla v24.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v25.2s, %[b2].2s, %[a1].2s\n"
- "fmla v26.2s, %[b2].2s, %[a2].2s\n"
- "fmla v27.2s, %[b2].2s, %[a3].2s\n"
-
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "fmla v12.2s, %[b0].2s, %[a0].2s\n"
- "fmla v20.2s, %[b1].2s, %[a0].2s\n"
- "fmla v28.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
- "fmla v13.2s, %[b0].2s, %[a1].2s\n"
- "fmla v21.2s, %[b1].2s, %[a1].2s\n"
- "fmla v29.2s, %[b2].2s, %[a1].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v14.2s, %[b0].2s, %[a2].2s\n"
- "fmla v22.2s, %[b1].2s, %[a2].2s\n"
- "fmla v30.2s, %[b2].2s, %[a2].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v15.2s, %[b0].2s, %[a3].2s\n"
- "fmla v23.2s, %[b1].2s, %[a3].2s\n"
- "fmla v31.2s, %[b2].2s, %[a3].2s\n"
-
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "add %[b_ptr], %[b_ptr], #48\n"
+ "ldr %d[b0], [%[b_ptr], #24]\n"
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+
+ "ldr %d[b1], [%[b_ptr], #32]\n"
+ "fmla v8.2s , %[b0].2s, %[a0].2s\n"
+ "fmla v9.2s , %[b0].2s, %[a1].2s\n"
+ "fmla v10.2s, %[b0].2s, %[a2].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v16.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v17.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v11.2s, %[b0].2s, %[a3].2s\n"
+
+ "ldr %d[b2], [%[b_ptr], #40]\n"
+ "fmla v18.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v19.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v24.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v25.2s, %[b2].2s, %[a1].2s\n"
+ "fmla v26.2s, %[b2].2s, %[a2].2s\n"
+ "fmla v27.2s, %[b2].2s, %[a3].2s\n"
+
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "fmla v12.2s, %[b0].2s, %[a0].2s\n"
+ "fmla v20.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v28.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+ "fmla v13.2s, %[b0].2s, %[a1].2s\n"
+ "fmla v21.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v29.2s, %[b2].2s, %[a1].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v14.2s, %[b0].2s, %[a2].2s\n"
+ "fmla v22.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v30.2s, %[b2].2s, %[a2].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v15.2s, %[b0].2s, %[a3].2s\n"
+ "fmla v23.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v31.2s, %[b2].2s, %[a3].2s\n"
+
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
ASM_PREFETCHU("[%[a_ptr], #188]")
- "bne 1b\n"
+ "bne 1b\n"
// Target to use when K is 1 or 2 (i.e. zero iterations of main loop)
"4:\n"
ASM_PREFETCH("[%[c_ptr]]")
ASM_PREFETCH("[%[c_ptr], #64]")
- "ldr %d[b0], [%[b_ptr]]\n"
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+ "ldr %d[b0], [%[b_ptr]]\n"
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
// Branch to alternative tail for odd K
- "cbnz %w[oddk], 2f\n"
+ "cbnz %w[oddk], 2f\n"
// Detached final iteration (even K)
- "ldr %d[b1], [%[b_ptr], #8]\n"
- "fmla v8.2s , %[b0].2s, %[a0].2s\n"
- "fmla v9.2s , %[b0].2s, %[a1].2s\n"
- "fmla v10.2s, %[b0].2s, %[a2].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v16.2s, %[b1].2s, %[a0].2s\n"
- "fmla v17.2s, %[b1].2s, %[a1].2s\n"
- "fmla v11.2s, %[b0].2s, %[a3].2s\n"
-
- "ldr %d[b2], [%[b_ptr], #16]\n"
- "fmla v18.2s, %[b1].2s, %[a2].2s\n"
- "fmla v19.2s, %[b1].2s, %[a3].2s\n"
- "fmla v24.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v25.2s, %[b2].2s, %[a1].2s\n"
- "fmla v26.2s, %[b2].2s, %[a2].2s\n"
- "fmla v27.2s, %[b2].2s, %[a3].2s\n"
-
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "fmla v12.2s, %[b0].2s, %[a0].2s\n"
- "fmla v20.2s, %[b1].2s, %[a0].2s\n"
- "fmla v28.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
- "fmla v13.2s, %[b0].2s, %[a1].2s\n"
- "fmla v21.2s, %[b1].2s, %[a1].2s\n"
- "fmla v29.2s, %[b2].2s, %[a1].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v14.2s, %[b0].2s, %[a2].2s\n"
- "fmla v22.2s, %[b1].2s, %[a2].2s\n"
- "fmla v30.2s, %[b2].2s, %[a2].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v15.2s, %[b0].2s, %[a3].2s\n"
- "fmla v23.2s, %[b1].2s, %[a3].2s\n"
- "fmla v31.2s, %[b2].2s, %[a3].2s\n"
-
- "ldr %d[b0], [%[b_ptr], #24]\n"
- "add %[b_ptr], %[b_ptr], #48\n"
+ "ldr %d[b1], [%[b_ptr], #8]\n"
+ "fmla v8.2s , %[b0].2s, %[a0].2s\n"
+ "fmla v9.2s , %[b0].2s, %[a1].2s\n"
+ "fmla v10.2s, %[b0].2s, %[a2].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v16.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v17.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v11.2s, %[b0].2s, %[a3].2s\n"
+
+ "ldr %d[b2], [%[b_ptr], #16]\n"
+ "fmla v18.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v19.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v24.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v25.2s, %[b2].2s, %[a1].2s\n"
+ "fmla v26.2s, %[b2].2s, %[a2].2s\n"
+ "fmla v27.2s, %[b2].2s, %[a3].2s\n"
+
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "fmla v12.2s, %[b0].2s, %[a0].2s\n"
+ "fmla v20.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v28.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+ "fmla v13.2s, %[b0].2s, %[a1].2s\n"
+ "fmla v21.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v29.2s, %[b2].2s, %[a1].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v14.2s, %[b0].2s, %[a2].2s\n"
+ "fmla v22.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v30.2s, %[b2].2s, %[a2].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v15.2s, %[b0].2s, %[a3].2s\n"
+ "fmla v23.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v31.2s, %[b2].2s, %[a3].2s\n"
+
+ "ldr %d[b0], [%[b_ptr], #24]\n"
+ "add %[b_ptr], %[b_ptr], #48\n"
ASM_PREFETCH("[%[b_ptr], #128]")
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
-
- "ldr %d[b1], [%[b_ptr], #-16]\n"
- "fmla v8.2s , %[b0].2s, %[a0].2s\n"
- "fmla v9.2s , %[b0].2s, %[a1].2s\n"
- "fmla v10.2s, %[b0].2s, %[a2].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v16.2s, %[b1].2s, %[a0].2s\n"
- "fmla v17.2s, %[b1].2s, %[a1].2s\n"
- "fmla v11.2s, %[b0].2s, %[a3].2s\n"
-
- "ldr %d[b2], [%[b_ptr], #-8]\n"
- "fmla v18.2s, %[b1].2s, %[a2].2s\n"
- "fmla v19.2s, %[b1].2s, %[a3].2s\n"
- "fmla v24.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v25.2s, %[b2].2s, %[a1].2s\n"
- "fmla v26.2s, %[b2].2s, %[a2].2s\n"
- "fmla v27.2s, %[b2].2s, %[a3].2s\n"
-
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "fmla v12.2s, %[b0].2s, %[a0].2s\n"
- "fmla v20.2s, %[b1].2s, %[a0].2s\n"
- "fmla v28.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
- "fmla v13.2s, %[b0].2s, %[a1].2s\n"
- "fmla v21.2s, %[b1].2s, %[a1].2s\n"
- "fmla v29.2s, %[b2].2s, %[a1].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v14.2s, %[b0].2s, %[a2].2s\n"
- "fmla v22.2s, %[b1].2s, %[a2].2s\n"
- "fmla v30.2s, %[b2].2s, %[a2].2s\n"
-
- "fmla v15.2s, %[b0].2s, %[a3].2s\n"
- "fmla v23.2s, %[b1].2s, %[a3].2s\n"
- "fmla v31.2s, %[b2].2s, %[a3].2s\n"
-
- "b 3f\n"
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+
+ "ldr %d[b1], [%[b_ptr], #-16]\n"
+ "fmla v8.2s , %[b0].2s, %[a0].2s\n"
+ "fmla v9.2s , %[b0].2s, %[a1].2s\n"
+ "fmla v10.2s, %[b0].2s, %[a2].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v16.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v17.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v11.2s, %[b0].2s, %[a3].2s\n"
+
+ "ldr %d[b2], [%[b_ptr], #-8]\n"
+ "fmla v18.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v19.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v24.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v25.2s, %[b2].2s, %[a1].2s\n"
+ "fmla v26.2s, %[b2].2s, %[a2].2s\n"
+ "fmla v27.2s, %[b2].2s, %[a3].2s\n"
+
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "fmla v12.2s, %[b0].2s, %[a0].2s\n"
+ "fmla v20.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v28.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+ "fmla v13.2s, %[b0].2s, %[a1].2s\n"
+ "fmla v21.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v29.2s, %[b2].2s, %[a1].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v14.2s, %[b0].2s, %[a2].2s\n"
+ "fmla v22.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v30.2s, %[b2].2s, %[a2].2s\n"
+
+ "fmla v15.2s, %[b0].2s, %[a3].2s\n"
+ "fmla v23.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v31.2s, %[b2].2s, %[a3].2s\n"
+
+ "b 3f\n"
// Detached final iteration (odd K)
"2:\n"
- "ldr %d[b1], [%[b_ptr], #8]\n"
- "fmla v8.2s , %[b0].2s, %[a0].2s\n"
- "fmla v9.2s , %[b0].2s, %[a1].2s\n"
- "fmla v10.2s, %[b0].2s, %[a2].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v16.2s, %[b1].2s, %[a0].2s\n"
- "fmla v17.2s, %[b1].2s, %[a1].2s\n"
- "fmla v11.2s, %[b0].2s, %[a3].2s\n"
-
- "ldr %d[b2], [%[b_ptr], #16]\n"
- "fmla v18.2s, %[b1].2s, %[a2].2s\n"
- "fmla v19.2s, %[b1].2s, %[a3].2s\n"
- "fmla v24.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
- "fmla v25.2s, %[b2].2s, %[a1].2s\n"
- "fmla v26.2s, %[b2].2s, %[a2].2s\n"
- "fmla v27.2s, %[b2].2s, %[a3].2s\n"
-
- "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
- "fmla v12.2s, %[b0].2s, %[a0].2s\n"
- "fmla v20.2s, %[b1].2s, %[a0].2s\n"
- "fmla v28.2s, %[b2].2s, %[a0].2s\n"
-
- "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
- "fmla v13.2s, %[b0].2s, %[a1].2s\n"
- "fmla v21.2s, %[b1].2s, %[a1].2s\n"
- "fmla v29.2s, %[b2].2s, %[a1].2s\n"
-
- "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
- "fmla v14.2s, %[b0].2s, %[a2].2s\n"
- "fmla v22.2s, %[b1].2s, %[a2].2s\n"
- "fmla v30.2s, %[b2].2s, %[a2].2s\n"
-
- "fmla v15.2s, %[b0].2s, %[a3].2s\n"
- "fmla v23.2s, %[b1].2s, %[a3].2s\n"
- "fmla v31.2s, %[b2].2s, %[a3].2s\n"
-
- "add %[b_ptr], %[b_ptr], #24\n"
+ "ldr %d[b1], [%[b_ptr], #8]\n"
+ "fmla v8.2s , %[b0].2s, %[a0].2s\n"
+ "fmla v9.2s , %[b0].2s, %[a1].2s\n"
+ "fmla v10.2s, %[b0].2s, %[a2].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v16.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v17.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v11.2s, %[b0].2s, %[a3].2s\n"
+
+ "ldr %d[b2], [%[b_ptr], #16]\n"
+ "fmla v18.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v19.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v24.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a0].2s }, [%[a_ptr]], #4\n"
+ "fmla v25.2s, %[b2].2s, %[a1].2s\n"
+ "fmla v26.2s, %[b2].2s, %[a2].2s\n"
+ "fmla v27.2s, %[b2].2s, %[a3].2s\n"
+
+ "ld1r { %[a1].2s }, [%[a_ptr]], #4\n"
+ "fmla v12.2s, %[b0].2s, %[a0].2s\n"
+ "fmla v20.2s, %[b1].2s, %[a0].2s\n"
+ "fmla v28.2s, %[b2].2s, %[a0].2s\n"
+
+ "ld1r { %[a2].2s }, [%[a_ptr]], #4\n"
+ "fmla v13.2s, %[b0].2s, %[a1].2s\n"
+ "fmla v21.2s, %[b1].2s, %[a1].2s\n"
+ "fmla v29.2s, %[b2].2s, %[a1].2s\n"
+
+ "ld1r { %[a3].2s }, [%[a_ptr]], #4\n"
+ "fmla v14.2s, %[b0].2s, %[a2].2s\n"
+ "fmla v22.2s, %[b1].2s, %[a2].2s\n"
+ "fmla v30.2s, %[b2].2s, %[a2].2s\n"
+
+ "fmla v15.2s, %[b0].2s, %[a3].2s\n"
+ "fmla v23.2s, %[b1].2s, %[a3].2s\n"
+ "fmla v31.2s, %[b2].2s, %[a3].2s\n"
+
+ "add %[b_ptr], %[b_ptr], #24\n"
// Common tail
"3:\n"
- "str d8, [%[c_ptr], #0]\n"
- "str d16, [%[c_ptr], #8]\n"
- "str d24, [%[c_ptr], #16]\n"
- "str d9, [%[c_ptr], #24]\n"
- "str d17, [%[c_ptr], #32]\n"
- "str d25, [%[c_ptr], #40]\n"
- "str d10, [%[c_ptr], #48]\n"
- "str d18, [%[c_ptr], #56]\n"
- "str d26, [%[c_ptr], #64]\n"
- "str d11, [%[c_ptr], #72]\n"
- "str d19, [%[c_ptr], #80]\n"
- "str d27, [%[c_ptr], #88]\n"
- "str d12, [%[c_ptr], #96]\n"
- "str d20, [%[c_ptr], #104]\n"
- "str d28, [%[c_ptr], #112]\n"
- "str d13, [%[c_ptr], #120]\n"
- "str d21, [%[c_ptr], #128]\n"
- "str d29, [%[c_ptr], #136]\n"
- "str d14, [%[c_ptr], #144]\n"
- "str d22, [%[c_ptr], #152]\n"
- "str d30, [%[c_ptr], #160]\n"
- "str d15, [%[c_ptr], #168]\n"
- "str d23, [%[c_ptr], #176]\n"
- "str d31, [%[c_ptr], #184]\n"
- "add %[c_ptr], %[c_ptr], #192\n"
+ "str d8, [%[c_ptr], #0]\n"
+ "str d16, [%[c_ptr], #8]\n"
+ "str d24, [%[c_ptr], #16]\n"
+ "str d9, [%[c_ptr], #24]\n"
+ "str d17, [%[c_ptr], #32]\n"
+ "str d25, [%[c_ptr], #40]\n"
+ "str d10, [%[c_ptr], #48]\n"
+ "str d18, [%[c_ptr], #56]\n"
+ "str d26, [%[c_ptr], #64]\n"
+ "str d11, [%[c_ptr], #72]\n"
+ "str d19, [%[c_ptr], #80]\n"
+ "str d27, [%[c_ptr], #88]\n"
+ "str d12, [%[c_ptr], #96]\n"
+ "str d20, [%[c_ptr], #104]\n"
+ "str d28, [%[c_ptr], #112]\n"
+ "str d13, [%[c_ptr], #120]\n"
+ "str d21, [%[c_ptr], #128]\n"
+ "str d29, [%[c_ptr], #136]\n"
+ "str d14, [%[c_ptr], #144]\n"
+ "str d22, [%[c_ptr], #152]\n"
+ "str d30, [%[c_ptr], #160]\n"
+ "str d15, [%[c_ptr], #168]\n"
+ "str d23, [%[c_ptr], #176]\n"
+ "str d31, [%[c_ptr], #184]\n"
+ "add %[c_ptr], %[c_ptr], #192\n"
:
[a_ptr] "+r" (a_ptr), [b_ptr] "+r" (b_ptr), [c_ptr] "+r" (c_ptr),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_pretransposed/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_pretransposed/generic.cpp
index 0640cece0d..3616f39f2a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_pretransposed/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_pretransposed/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018 Arm Limited.
+ * Copyright (c) 2017-2018, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -170,28 +170,28 @@ void a64_sgemv_pretransposed(const float *A, int lda, const float *X, float *Y,
x0 = vld1q_f32(x_ptr);
__asm __volatile (
- "ldr q2, [%[a_ptr], #0]\n"
- "ldr q3, [%[a_ptr], #16]\n"
- "ldr q4, [%[a_ptr], #32]\n"
- "ldr q5, [%[a_ptr], #48]\n"
- "ldr q6, [%[a_ptr], #64]\n"
- "ldr q7, [%[a_ptr], #80]\n"
- "ldr q8, [%[a_ptr], #96]\n"
- "ldr q9, [%[a_ptr], #112]\n"
- "ldr q10, [%[a_ptr], #128]\n"
- "ldr q11, [%[a_ptr], #144]\n"
- "ldr q12, [%[a_ptr], #160]\n"
- "ldr q13, [%[a_ptr], #176]\n"
- "ldr q14, [%[a_ptr], #192]\n"
- "ldr q15, [%[a_ptr], #208]\n"
- "ldr q16, [%[a_ptr], #224]\n"
- "ldr q17, [%[a_ptr], #240]\n"
- "ldr q18, [%[a_ptr], #256]\n"
- "ldr q19, [%[a_ptr], #272]\n"
- "ldr q20, [%[a_ptr], #288]\n"
- "ldr q21, [%[a_ptr], #304]\n"
- "ldr q22, [%[a_ptr], #320]\n"
- "ldr q23, [%[a_ptr], #336]\n"
+ "ldr q2, [%[a_ptr], #0]\n"
+ "ldr q3, [%[a_ptr], #16]\n"
+ "ldr q4, [%[a_ptr], #32]\n"
+ "ldr q5, [%[a_ptr], #48]\n"
+ "ldr q6, [%[a_ptr], #64]\n"
+ "ldr q7, [%[a_ptr], #80]\n"
+ "ldr q8, [%[a_ptr], #96]\n"
+ "ldr q9, [%[a_ptr], #112]\n"
+ "ldr q10, [%[a_ptr], #128]\n"
+ "ldr q11, [%[a_ptr], #144]\n"
+ "ldr q12, [%[a_ptr], #160]\n"
+ "ldr q13, [%[a_ptr], #176]\n"
+ "ldr q14, [%[a_ptr], #192]\n"
+ "ldr q15, [%[a_ptr], #208]\n"
+ "ldr q16, [%[a_ptr], #224]\n"
+ "ldr q17, [%[a_ptr], #240]\n"
+ "ldr q18, [%[a_ptr], #256]\n"
+ "ldr q19, [%[a_ptr], #272]\n"
+ "ldr q20, [%[a_ptr], #288]\n"
+ "ldr q21, [%[a_ptr], #304]\n"
+ "ldr q22, [%[a_ptr], #320]\n"
+ "ldr q23, [%[a_ptr], #336]\n"
ASM_PREFETCH("[%[a_ptr], #384]")
ASM_PREFETCH("[%[a_ptr], #448]")
ASM_PREFETCH("[%[a_ptr], #512]")
@@ -218,305 +218,305 @@ void a64_sgemv_pretransposed(const float *A, int lda, const float *X, float *Y,
ASM_PREFETCH("[%[a_ptr], #1856]")
ASM_PREFETCH("[%[a_ptr], #1920]")
ASM_PREFETCH("[%[a_ptr], #1984]")
- "add %[a_ptr], %[a_ptr], #352\n"
+ "add %[a_ptr], %[a_ptr], #352\n"
- "cbz %w[k], 2f\n"
+ "cbz %w[k], 2f\n"
"1:\n"
// Unroll 0
- "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
- "ldr %q[x0a], [%[x_ptr], #16]\n"
- "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
- "ldr q3, [%[a_ptr], #0]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
- "ldr q4, [%[a_ptr], #16]\n"
- "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
- "ldr q5, [%[a_ptr], #32]\n"
- "add %[x_ptr], %[x_ptr], #32\n"
+ "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
+ "ldr %q[x0a], [%[x_ptr], #16]\n"
+ "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
+ "ldr q3, [%[a_ptr], #0]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
+ "ldr q4, [%[a_ptr], #16]\n"
+ "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
+ "ldr q5, [%[a_ptr], #32]\n"
+ "add %[x_ptr], %[x_ptr], #32\n"
ASM_PREFETCH("[%[a_ptr], #1664]")
- "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
- "ldr q6, [%[a_ptr], #48]\n"
- "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
- "ldr q7, [%[a_ptr], #64]\n"
- "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
- "ldr q8, [%[a_ptr], #80]\n"
- "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
- "ldr q9, [%[a_ptr], #96]\n"
+ "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
+ "ldr q6, [%[a_ptr], #48]\n"
+ "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
+ "ldr q7, [%[a_ptr], #64]\n"
+ "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
+ "ldr q8, [%[a_ptr], #80]\n"
+ "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
+ "ldr q9, [%[a_ptr], #96]\n"
ASM_PREFETCH("[%[a_ptr], #1728]")
// Unroll 1
- "fmla %[r0].4s, v10.4s, %[x0].s[1]\n"
- "ldr q10, [%[a_ptr], #112]\n"
- "fmla %[r1].4s, v11.4s, %[x0].s[1]\n"
- "ldr q11, [%[a_ptr], #128]\n"
- "fmla %[r2].4s, v12.4s, %[x0].s[1]\n"
- "ldr q12, [%[a_ptr], #144]\n"
- "fmla %[r3].4s, v13.4s, %[x0].s[1]\n"
- "ldr q13, [%[a_ptr], #160]\n"
+ "fmla %[r0].4s, v10.4s, %[x0].s[1]\n"
+ "ldr q10, [%[a_ptr], #112]\n"
+ "fmla %[r1].4s, v11.4s, %[x0].s[1]\n"
+ "ldr q11, [%[a_ptr], #128]\n"
+ "fmla %[r2].4s, v12.4s, %[x0].s[1]\n"
+ "ldr q12, [%[a_ptr], #144]\n"
+ "fmla %[r3].4s, v13.4s, %[x0].s[1]\n"
+ "ldr q13, [%[a_ptr], #160]\n"
ASM_PREFETCH("[%[a_ptr], #1792]")
- "fmla %[r4].4s, v14.4s, %[x0].s[1]\n"
- "ldr q14, [%[a_ptr], #176]\n"
- "fmla %[r5].4s, v15.4s, %[x0].s[1]\n"
- "ldr q15, [%[a_ptr], #192]\n"
- "fmla %[r6].4s, v16.4s, %[x0].s[1]\n"
- "ldr q16, [%[a_ptr], #208]\n"
- "fmla %[r7].4s, v17.4s, %[x0].s[1]\n"
- "ldr q17, [%[a_ptr], #224]\n"
+ "fmla %[r4].4s, v14.4s, %[x0].s[1]\n"
+ "ldr q14, [%[a_ptr], #176]\n"
+ "fmla %[r5].4s, v15.4s, %[x0].s[1]\n"
+ "ldr q15, [%[a_ptr], #192]\n"
+ "fmla %[r6].4s, v16.4s, %[x0].s[1]\n"
+ "ldr q16, [%[a_ptr], #208]\n"
+ "fmla %[r7].4s, v17.4s, %[x0].s[1]\n"
+ "ldr q17, [%[a_ptr], #224]\n"
ASM_PREFETCH("[%[a_ptr], #1856]")
// Unroll 2
- "fmla %[r0].4s, v18.4s, %[x0].s[2]\n"
- "ldr q18, [%[a_ptr], #240]\n"
- "fmla %[r1].4s, v19.4s, %[x0].s[2]\n"
- "ldr q19, [%[a_ptr], #256]\n"
- "fmla %[r2].4s, v20.4s, %[x0].s[2]\n"
- "ldr q20, [%[a_ptr], #272]\n"
- "fmla %[r3].4s, v21.4s, %[x0].s[2]\n"
- "ldr q21, [%[a_ptr], #288]\n"
+ "fmla %[r0].4s, v18.4s, %[x0].s[2]\n"
+ "ldr q18, [%[a_ptr], #240]\n"
+ "fmla %[r1].4s, v19.4s, %[x0].s[2]\n"
+ "ldr q19, [%[a_ptr], #256]\n"
+ "fmla %[r2].4s, v20.4s, %[x0].s[2]\n"
+ "ldr q20, [%[a_ptr], #272]\n"
+ "fmla %[r3].4s, v21.4s, %[x0].s[2]\n"
+ "ldr q21, [%[a_ptr], #288]\n"
ASM_PREFETCH("[%[a_ptr], #1920]")
- "fmla %[r4].4s, v22.4s, %[x0].s[2]\n"
- "ldr q22, [%[a_ptr], #304]\n"
- "fmla %[r5].4s, v23.4s, %[x0].s[2]\n"
- "ldr q23, [%[a_ptr], #320]\n"
- "fmla %[r6].4s, v3.4s, %[x0].s[2]\n"
- "ldr q2, [%[a_ptr], #336]\n"
- "ldr q3, [%[a_ptr], #352]\n"
- "fmla %[r7].4s, v4.4s, %[x0].s[2]\n"
- "ldr q4, [%[a_ptr], #368]\n"
+ "fmla %[r4].4s, v22.4s, %[x0].s[2]\n"
+ "ldr q22, [%[a_ptr], #304]\n"
+ "fmla %[r5].4s, v23.4s, %[x0].s[2]\n"
+ "ldr q23, [%[a_ptr], #320]\n"
+ "fmla %[r6].4s, v3.4s, %[x0].s[2]\n"
+ "ldr q2, [%[a_ptr], #336]\n"
+ "ldr q3, [%[a_ptr], #352]\n"
+ "fmla %[r7].4s, v4.4s, %[x0].s[2]\n"
+ "ldr q4, [%[a_ptr], #368]\n"
ASM_PREFETCH("[%[a_ptr], #1984]")
// Unroll 3
- "fmla %[r0].4s, v5.4s, %[x0].s[3]\n"
- "ldr q5, [%[a_ptr], #384]\n"
- "fmla %[r1].4s, v6.4s, %[x0].s[3]\n"
- "ldr q6, [%[a_ptr], #400]\n"
- "fmla %[r2].4s, v7.4s, %[x0].s[3]\n"
- "ldr q7, [%[a_ptr], #416]\n"
- "fmla %[r3].4s, v8.4s, %[x0].s[3]\n"
+ "fmla %[r0].4s, v5.4s, %[x0].s[3]\n"
+ "ldr q5, [%[a_ptr], #384]\n"
+ "fmla %[r1].4s, v6.4s, %[x0].s[3]\n"
+ "ldr q6, [%[a_ptr], #400]\n"
+ "fmla %[r2].4s, v7.4s, %[x0].s[3]\n"
+ "ldr q7, [%[a_ptr], #416]\n"
+ "fmla %[r3].4s, v8.4s, %[x0].s[3]\n"
ASM_PREFETCH("[%[a_ptr], #2048]")
- "ldr q8, [%[a_ptr], #432]\n"
- "fmla %[r4].4s, v9.4s, %[x0].s[3]\n"
- "ldr q9, [%[a_ptr], #448]\n"
- "fmla %[r5].4s, v10.4s, %[x0].s[3]\n"
- "ldr q10, [%[a_ptr], #464]\n"
- "fmla %[r6].4s, v11.4s, %[x0].s[3]\n"
- "ldr q11, [%[a_ptr], #480]\n"
- "fmla %[r7].4s, v12.4s, %[x0].s[3]\n"
- "ldr q12, [%[a_ptr], #496]\n"
+ "ldr q8, [%[a_ptr], #432]\n"
+ "fmla %[r4].4s, v9.4s, %[x0].s[3]\n"
+ "ldr q9, [%[a_ptr], #448]\n"
+ "fmla %[r5].4s, v10.4s, %[x0].s[3]\n"
+ "ldr q10, [%[a_ptr], #464]\n"
+ "fmla %[r6].4s, v11.4s, %[x0].s[3]\n"
+ "ldr q11, [%[a_ptr], #480]\n"
+ "fmla %[r7].4s, v12.4s, %[x0].s[3]\n"
+ "ldr q12, [%[a_ptr], #496]\n"
ASM_PREFETCH("[%[a_ptr], #2112]")
// Unroll 4
- "fmla %[r0].4s, v13.4s, %[x0a].s[0]\n"
- "ldr %q[x0], [%[x_ptr]]\n"
- "fmla %[r1].4s, v14.4s, %[x0a].s[0]\n"
- "ldr q14, [%[a_ptr], #512]\n"
- "fmla %[r2].4s, v15.4s, %[x0a].s[0]\n"
- "ldr q15, [%[a_ptr], #528]\n"
- "fmla %[r3].4s, v16.4s, %[x0a].s[0]\n"
+ "fmla %[r0].4s, v13.4s, %[x0a].s[0]\n"
+ "ldr %q[x0], [%[x_ptr]]\n"
+ "fmla %[r1].4s, v14.4s, %[x0a].s[0]\n"
+ "ldr q14, [%[a_ptr], #512]\n"
+ "fmla %[r2].4s, v15.4s, %[x0a].s[0]\n"
+ "ldr q15, [%[a_ptr], #528]\n"
+ "fmla %[r3].4s, v16.4s, %[x0a].s[0]\n"
ASM_PREFETCH("[%[a_ptr], #2176]")
- "ldr q16, [%[a_ptr], #544]\n"
- "fmla %[r4].4s, v17.4s, %[x0a].s[0]\n"
- "ldr q17, [%[a_ptr], #560]\n"
- "fmla %[r5].4s, v18.4s, %[x0a].s[0]\n"
- "ldr q18, [%[a_ptr], #576]\n"
- "fmla %[r6].4s, v19.4s, %[x0a].s[0]\n"
- "ldr q19, [%[a_ptr], #592]\n"
- "fmla %[r7].4s, v20.4s, %[x0a].s[0]\n"
- "ldr q20, [%[a_ptr], #608]\n"
+ "ldr q16, [%[a_ptr], #544]\n"
+ "fmla %[r4].4s, v17.4s, %[x0a].s[0]\n"
+ "ldr q17, [%[a_ptr], #560]\n"
+ "fmla %[r5].4s, v18.4s, %[x0a].s[0]\n"
+ "ldr q18, [%[a_ptr], #576]\n"
+ "fmla %[r6].4s, v19.4s, %[x0a].s[0]\n"
+ "ldr q19, [%[a_ptr], #592]\n"
+ "fmla %[r7].4s, v20.4s, %[x0a].s[0]\n"
+ "ldr q20, [%[a_ptr], #608]\n"
ASM_PREFETCH("[%[a_ptr], #2240]")
// Unroll 5
- "fmla %[r0].4s, v21.4s, %[x0a].s[1]\n"
- "ldr q21, [%[a_ptr], #624]\n"
- "fmla %[r1].4s, v22.4s, %[x0a].s[1]\n"
- "ldr q22, [%[a_ptr], #640]\n"
- "fmla %[r2].4s, v23.4s, %[x0a].s[1]\n"
- "ldr q23, [%[a_ptr], #656]\n"
- "fmla %[r3].4s, v2.4s, %[x0a].s[1]\n"
- "ldr q2, [%[a_ptr], #672]\n"
+ "fmla %[r0].4s, v21.4s, %[x0a].s[1]\n"
+ "ldr q21, [%[a_ptr], #624]\n"
+ "fmla %[r1].4s, v22.4s, %[x0a].s[1]\n"
+ "ldr q22, [%[a_ptr], #640]\n"
+ "fmla %[r2].4s, v23.4s, %[x0a].s[1]\n"
+ "ldr q23, [%[a_ptr], #656]\n"
+ "fmla %[r3].4s, v2.4s, %[x0a].s[1]\n"
+ "ldr q2, [%[a_ptr], #672]\n"
ASM_PREFETCH("[%[a_ptr], #2304]")
- "fmla %[r4].4s, v3.4s, %[x0a].s[1]\n"
- "ldr q3, [%[a_ptr], #688]\n"
- "fmla %[r5].4s, v4.4s, %[x0a].s[1]\n"
- "ldr q4, [%[a_ptr], #704]\n"
- "fmla %[r6].4s, v5.4s, %[x0a].s[1]\n"
- "ldr q5, [%[a_ptr], #720]\n"
- "fmla %[r7].4s, v6.4s, %[x0a].s[1]\n"
- "ldr q6, [%[a_ptr], #736]\n"
+ "fmla %[r4].4s, v3.4s, %[x0a].s[1]\n"
+ "ldr q3, [%[a_ptr], #688]\n"
+ "fmla %[r5].4s, v4.4s, %[x0a].s[1]\n"
+ "ldr q4, [%[a_ptr], #704]\n"
+ "fmla %[r6].4s, v5.4s, %[x0a].s[1]\n"
+ "ldr q5, [%[a_ptr], #720]\n"
+ "fmla %[r7].4s, v6.4s, %[x0a].s[1]\n"
+ "ldr q6, [%[a_ptr], #736]\n"
ASM_PREFETCH("[%[a_ptr], #2368]")
// Unroll 6
- "fmla %[r0].4s, v7.4s, %[x0a].s[2]\n"
- "ldr q7, [%[a_ptr], #752]\n"
- "fmla %[r1].4s, v8.4s, %[x0a].s[2]\n"
- "ldr q8, [%[a_ptr], #768]\n"
- "fmla %[r2].4s, v9.4s, %[x0a].s[2]\n"
- "ldr q9, [%[a_ptr], #784]\n"
- "fmla %[r3].4s, v10.4s, %[x0a].s[2]\n"
- "ldr q10, [%[a_ptr], #800]\n"
+ "fmla %[r0].4s, v7.4s, %[x0a].s[2]\n"
+ "ldr q7, [%[a_ptr], #752]\n"
+ "fmla %[r1].4s, v8.4s, %[x0a].s[2]\n"
+ "ldr q8, [%[a_ptr], #768]\n"
+ "fmla %[r2].4s, v9.4s, %[x0a].s[2]\n"
+ "ldr q9, [%[a_ptr], #784]\n"
+ "fmla %[r3].4s, v10.4s, %[x0a].s[2]\n"
+ "ldr q10, [%[a_ptr], #800]\n"
ASM_PREFETCH("[%[a_ptr], #2432]")
- "fmla %[r4].4s, v11.4s, %[x0a].s[2]\n"
- "ldr q11, [%[a_ptr], #816]\n"
- "fmla %[r5].4s, v12.4s, %[x0a].s[2]\n"
- "ldr q12, [%[a_ptr], #832]\n"
- "fmla %[r6].4s, v14.4s, %[x0a].s[2]\n"
- "ldr q13, [%[a_ptr], #848]\n"
- "ldr q14, [%[a_ptr], #864]\n"
- "fmla %[r7].4s, v15.4s, %[x0a].s[2]\n"
- "ldr q15, [%[a_ptr], #880]\n"
+ "fmla %[r4].4s, v11.4s, %[x0a].s[2]\n"
+ "ldr q11, [%[a_ptr], #816]\n"
+ "fmla %[r5].4s, v12.4s, %[x0a].s[2]\n"
+ "ldr q12, [%[a_ptr], #832]\n"
+ "fmla %[r6].4s, v14.4s, %[x0a].s[2]\n"
+ "ldr q13, [%[a_ptr], #848]\n"
+ "ldr q14, [%[a_ptr], #864]\n"
+ "fmla %[r7].4s, v15.4s, %[x0a].s[2]\n"
+ "ldr q15, [%[a_ptr], #880]\n"
ASM_PREFETCH("[%[a_ptr], #2496]")
// Unroll 7
- "fmla %[r0].4s, v16.4s, %[x0a].s[3]\n"
- "ldr q16, [%[a_ptr], #896]\n"
- "fmla %[r1].4s, v17.4s, %[x0a].s[3]\n"
- "ldr q17, [%[a_ptr], #912]\n"
- "fmla %[r2].4s, v18.4s, %[x0a].s[3]\n"
- "ldr q18, [%[a_ptr], #928]\n"
- "fmla %[r3].4s, v19.4s, %[x0a].s[3]\n"
+ "fmla %[r0].4s, v16.4s, %[x0a].s[3]\n"
+ "ldr q16, [%[a_ptr], #896]\n"
+ "fmla %[r1].4s, v17.4s, %[x0a].s[3]\n"
+ "ldr q17, [%[a_ptr], #912]\n"
+ "fmla %[r2].4s, v18.4s, %[x0a].s[3]\n"
+ "ldr q18, [%[a_ptr], #928]\n"
+ "fmla %[r3].4s, v19.4s, %[x0a].s[3]\n"
ASM_PREFETCH("[%[a_ptr], #2560]")
- "ldr q19, [%[a_ptr], #944]\n"
- "fmla %[r4].4s, v20.4s, %[x0a].s[3]\n"
- "ldr q20, [%[a_ptr], #960]\n"
- "fmla %[r5].4s, v21.4s, %[x0a].s[3]\n"
- "ldr q21, [%[a_ptr], #976]\n"
- "add %[a_ptr], %[a_ptr], #1024\n"
- "fmla %[r6].4s, v22.4s, %[x0a].s[3]\n"
- "ldr q22, [%[a_ptr], #-32]\n"
- "fmla %[r7].4s, v23.4s, %[x0a].s[3]\n"
- "ldr q23, [%[a_ptr], #-16]\n"
+ "ldr q19, [%[a_ptr], #944]\n"
+ "fmla %[r4].4s, v20.4s, %[x0a].s[3]\n"
+ "ldr q20, [%[a_ptr], #960]\n"
+ "fmla %[r5].4s, v21.4s, %[x0a].s[3]\n"
+ "ldr q21, [%[a_ptr], #976]\n"
+ "add %[a_ptr], %[a_ptr], #1024\n"
+ "fmla %[r6].4s, v22.4s, %[x0a].s[3]\n"
+ "ldr q22, [%[a_ptr], #-32]\n"
+ "fmla %[r7].4s, v23.4s, %[x0a].s[3]\n"
+ "ldr q23, [%[a_ptr], #-16]\n"
ASM_PREFETCH("[%[a_ptr], #1600]")
- "bne 1b\n"
+ "bne 1b\n"
// Detached final iteration
"2:\n"
// Unroll 0
- "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
- "ldr %q[x0a], [%[x_ptr], #16]\n"
- "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
- "ldr q3, [%[a_ptr], #0]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
- "ldr q4, [%[a_ptr], #16]\n"
- "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
- "ldr q5, [%[a_ptr], #32]\n"
- "add %[x_ptr], %[x_ptr], #32\n"
- "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
- "ldr q6, [%[a_ptr], #48]\n"
- "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
- "ldr q7, [%[a_ptr], #64]\n"
- "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
- "ldr q8, [%[a_ptr], #80]\n"
- "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
- "ldr q9, [%[a_ptr], #96]\n"
+ "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
+ "ldr %q[x0a], [%[x_ptr], #16]\n"
+ "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
+ "ldr q3, [%[a_ptr], #0]\n"
+ "subs %w[k], %w[k], #1\n"
+ "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
+ "ldr q4, [%[a_ptr], #16]\n"
+ "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
+ "ldr q5, [%[a_ptr], #32]\n"
+ "add %[x_ptr], %[x_ptr], #32\n"
+ "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
+ "ldr q6, [%[a_ptr], #48]\n"
+ "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
+ "ldr q7, [%[a_ptr], #64]\n"
+ "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
+ "ldr q8, [%[a_ptr], #80]\n"
+ "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
+ "ldr q9, [%[a_ptr], #96]\n"
// Unroll 1
- "fmla %[r0].4s, v10.4s, %[x0].s[1]\n"
- "ldr q10, [%[a_ptr], #112]\n"
- "fmla %[r1].4s, v11.4s, %[x0].s[1]\n"
- "ldr q11, [%[a_ptr], #128]\n"
- "fmla %[r2].4s, v12.4s, %[x0].s[1]\n"
- "ldr q12, [%[a_ptr], #144]\n"
- "fmla %[r3].4s, v13.4s, %[x0].s[1]\n"
- "ldr q13, [%[a_ptr], #160]\n"
- "fmla %[r4].4s, v14.4s, %[x0].s[1]\n"
- "ldr q14, [%[a_ptr], #176]\n"
- "fmla %[r5].4s, v15.4s, %[x0].s[1]\n"
- "ldr q15, [%[a_ptr], #192]\n"
- "fmla %[r6].4s, v16.4s, %[x0].s[1]\n"
- "ldr q16, [%[a_ptr], #208]\n"
- "fmla %[r7].4s, v17.4s, %[x0].s[1]\n"
- "ldr q17, [%[a_ptr], #224]\n"
+ "fmla %[r0].4s, v10.4s, %[x0].s[1]\n"
+ "ldr q10, [%[a_ptr], #112]\n"
+ "fmla %[r1].4s, v11.4s, %[x0].s[1]\n"
+ "ldr q11, [%[a_ptr], #128]\n"
+ "fmla %[r2].4s, v12.4s, %[x0].s[1]\n"
+ "ldr q12, [%[a_ptr], #144]\n"
+ "fmla %[r3].4s, v13.4s, %[x0].s[1]\n"
+ "ldr q13, [%[a_ptr], #160]\n"
+ "fmla %[r4].4s, v14.4s, %[x0].s[1]\n"
+ "ldr q14, [%[a_ptr], #176]\n"
+ "fmla %[r5].4s, v15.4s, %[x0].s[1]\n"
+ "ldr q15, [%[a_ptr], #192]\n"
+ "fmla %[r6].4s, v16.4s, %[x0].s[1]\n"
+ "ldr q16, [%[a_ptr], #208]\n"
+ "fmla %[r7].4s, v17.4s, %[x0].s[1]\n"
+ "ldr q17, [%[a_ptr], #224]\n"
// Unroll 2
- "fmla %[r0].4s, v18.4s, %[x0].s[2]\n"
- "ldr q18, [%[a_ptr], #240]\n"
- "fmla %[r1].4s, v19.4s, %[x0].s[2]\n"
- "ldr q19, [%[a_ptr], #256]\n"
- "fmla %[r2].4s, v20.4s, %[x0].s[2]\n"
- "ldr q20, [%[a_ptr], #272]\n"
- "fmla %[r3].4s, v21.4s, %[x0].s[2]\n"
- "ldr q21, [%[a_ptr], #288]\n"
- "fmla %[r4].4s, v22.4s, %[x0].s[2]\n"
- "ldr q22, [%[a_ptr], #304]\n"
- "fmla %[r5].4s, v23.4s, %[x0].s[2]\n"
- "ldr q23, [%[a_ptr], #320]\n"
- "fmla %[r6].4s, v3.4s, %[x0].s[2]\n"
- "ldr q2, [%[a_ptr], #336]\n"
- "ldr q3, [%[a_ptr], #352]\n"
- "fmla %[r7].4s, v4.4s, %[x0].s[2]\n"
- "ldr q4, [%[a_ptr], #368]\n"
+ "fmla %[r0].4s, v18.4s, %[x0].s[2]\n"
+ "ldr q18, [%[a_ptr], #240]\n"
+ "fmla %[r1].4s, v19.4s, %[x0].s[2]\n"
+ "ldr q19, [%[a_ptr], #256]\n"
+ "fmla %[r2].4s, v20.4s, %[x0].s[2]\n"
+ "ldr q20, [%[a_ptr], #272]\n"
+ "fmla %[r3].4s, v21.4s, %[x0].s[2]\n"
+ "ldr q21, [%[a_ptr], #288]\n"
+ "fmla %[r4].4s, v22.4s, %[x0].s[2]\n"
+ "ldr q22, [%[a_ptr], #304]\n"
+ "fmla %[r5].4s, v23.4s, %[x0].s[2]\n"
+ "ldr q23, [%[a_ptr], #320]\n"
+ "fmla %[r6].4s, v3.4s, %[x0].s[2]\n"
+ "ldr q2, [%[a_ptr], #336]\n"
+ "ldr q3, [%[a_ptr], #352]\n"
+ "fmla %[r7].4s, v4.4s, %[x0].s[2]\n"
+ "ldr q4, [%[a_ptr], #368]\n"
// Unroll 3
- "fmla %[r0].4s, v5.4s, %[x0].s[3]\n"
- "ldr q5, [%[a_ptr], #384]\n"
- "fmla %[r1].4s, v6.4s, %[x0].s[3]\n"
- "ldr q6, [%[a_ptr], #400]\n"
- "fmla %[r2].4s, v7.4s, %[x0].s[3]\n"
- "ldr q7, [%[a_ptr], #416]\n"
- "fmla %[r3].4s, v8.4s, %[x0].s[3]\n"
- "ldr q8, [%[a_ptr], #432]\n"
- "fmla %[r4].4s, v9.4s, %[x0].s[3]\n"
- "ldr q9, [%[a_ptr], #448]\n"
- "fmla %[r5].4s, v10.4s, %[x0].s[3]\n"
- "ldr q10, [%[a_ptr], #464]\n"
- "fmla %[r6].4s, v11.4s, %[x0].s[3]\n"
- "ldr q11, [%[a_ptr], #480]\n"
- "fmla %[r7].4s, v12.4s, %[x0].s[3]\n"
- "ldr q12, [%[a_ptr], #496]\n"
+ "fmla %[r0].4s, v5.4s, %[x0].s[3]\n"
+ "ldr q5, [%[a_ptr], #384]\n"
+ "fmla %[r1].4s, v6.4s, %[x0].s[3]\n"
+ "ldr q6, [%[a_ptr], #400]\n"
+ "fmla %[r2].4s, v7.4s, %[x0].s[3]\n"
+ "ldr q7, [%[a_ptr], #416]\n"
+ "fmla %[r3].4s, v8.4s, %[x0].s[3]\n"
+ "ldr q8, [%[a_ptr], #432]\n"
+ "fmla %[r4].4s, v9.4s, %[x0].s[3]\n"
+ "ldr q9, [%[a_ptr], #448]\n"
+ "fmla %[r5].4s, v10.4s, %[x0].s[3]\n"
+ "ldr q10, [%[a_ptr], #464]\n"
+ "fmla %[r6].4s, v11.4s, %[x0].s[3]\n"
+ "ldr q11, [%[a_ptr], #480]\n"
+ "fmla %[r7].4s, v12.4s, %[x0].s[3]\n"
+ "ldr q12, [%[a_ptr], #496]\n"
// Unroll 4
- "fmla %[r0].4s, v13.4s, %[x0a].s[0]\n"
- "fmla %[r1].4s, v14.4s, %[x0a].s[0]\n"
- "ldr q14, [%[a_ptr], #512]\n"
- "fmla %[r2].4s, v15.4s, %[x0a].s[0]\n"
- "ldr q15, [%[a_ptr], #528]\n"
- "fmla %[r3].4s, v16.4s, %[x0a].s[0]\n"
- "ldr q16, [%[a_ptr], #544]\n"
- "fmla %[r4].4s, v17.4s, %[x0a].s[0]\n"
- "ldr q17, [%[a_ptr], #560]\n"
- "fmla %[r5].4s, v18.4s, %[x0a].s[0]\n"
- "ldr q18, [%[a_ptr], #576]\n"
- "fmla %[r6].4s, v19.4s, %[x0a].s[0]\n"
- "ldr q19, [%[a_ptr], #592]\n"
- "fmla %[r7].4s, v20.4s, %[x0a].s[0]\n"
- "ldr q20, [%[a_ptr], #608]\n"
+ "fmla %[r0].4s, v13.4s, %[x0a].s[0]\n"
+ "fmla %[r1].4s, v14.4s, %[x0a].s[0]\n"
+ "ldr q14, [%[a_ptr], #512]\n"
+ "fmla %[r2].4s, v15.4s, %[x0a].s[0]\n"
+ "ldr q15, [%[a_ptr], #528]\n"
+ "fmla %[r3].4s, v16.4s, %[x0a].s[0]\n"
+ "ldr q16, [%[a_ptr], #544]\n"
+ "fmla %[r4].4s, v17.4s, %[x0a].s[0]\n"
+ "ldr q17, [%[a_ptr], #560]\n"
+ "fmla %[r5].4s, v18.4s, %[x0a].s[0]\n"
+ "ldr q18, [%[a_ptr], #576]\n"
+ "fmla %[r6].4s, v19.4s, %[x0a].s[0]\n"
+ "ldr q19, [%[a_ptr], #592]\n"
+ "fmla %[r7].4s, v20.4s, %[x0a].s[0]\n"
+ "ldr q20, [%[a_ptr], #608]\n"
// Unroll 5
- "fmla %[r0].4s, v21.4s, %[x0a].s[1]\n"
- "ldr q21, [%[a_ptr], #624]\n"
- "fmla %[r1].4s, v22.4s, %[x0a].s[1]\n"
- "ldr q22, [%[a_ptr], #640]\n"
- "fmla %[r2].4s, v23.4s, %[x0a].s[1]\n"
- "ldr q23, [%[a_ptr], #656]\n"
- "fmla %[r3].4s, v2.4s, %[x0a].s[1]\n"
- "add %[a_ptr], %[a_ptr], #672\n"
- "fmla %[r4].4s, v3.4s, %[x0a].s[1]\n"
- "fmla %[r5].4s, v4.4s, %[x0a].s[1]\n"
- "fmla %[r6].4s, v5.4s, %[x0a].s[1]\n"
- "fmla %[r7].4s, v6.4s, %[x0a].s[1]\n"
+ "fmla %[r0].4s, v21.4s, %[x0a].s[1]\n"
+ "ldr q21, [%[a_ptr], #624]\n"
+ "fmla %[r1].4s, v22.4s, %[x0a].s[1]\n"
+ "ldr q22, [%[a_ptr], #640]\n"
+ "fmla %[r2].4s, v23.4s, %[x0a].s[1]\n"
+ "ldr q23, [%[a_ptr], #656]\n"
+ "fmla %[r3].4s, v2.4s, %[x0a].s[1]\n"
+ "add %[a_ptr], %[a_ptr], #672\n"
+ "fmla %[r4].4s, v3.4s, %[x0a].s[1]\n"
+ "fmla %[r5].4s, v4.4s, %[x0a].s[1]\n"
+ "fmla %[r6].4s, v5.4s, %[x0a].s[1]\n"
+ "fmla %[r7].4s, v6.4s, %[x0a].s[1]\n"
// Unroll 6
- "fmla %[r0].4s, v7.4s, %[x0a].s[2]\n"
- "fmla %[r1].4s, v8.4s, %[x0a].s[2]\n"
- "fmla %[r2].4s, v9.4s, %[x0a].s[2]\n"
- "fmla %[r3].4s, v10.4s, %[x0a].s[2]\n"
- "fmla %[r4].4s, v11.4s, %[x0a].s[2]\n"
- "fmla %[r5].4s, v12.4s, %[x0a].s[2]\n"
- "fmla %[r6].4s, v14.4s, %[x0a].s[2]\n"
- "fmla %[r7].4s, v15.4s, %[x0a].s[2]\n"
+ "fmla %[r0].4s, v7.4s, %[x0a].s[2]\n"
+ "fmla %[r1].4s, v8.4s, %[x0a].s[2]\n"
+ "fmla %[r2].4s, v9.4s, %[x0a].s[2]\n"
+ "fmla %[r3].4s, v10.4s, %[x0a].s[2]\n"
+ "fmla %[r4].4s, v11.4s, %[x0a].s[2]\n"
+ "fmla %[r5].4s, v12.4s, %[x0a].s[2]\n"
+ "fmla %[r6].4s, v14.4s, %[x0a].s[2]\n"
+ "fmla %[r7].4s, v15.4s, %[x0a].s[2]\n"
// Unroll 7
- "fmla %[r0].4s, v16.4s, %[x0a].s[3]\n"
- "fmla %[r1].4s, v17.4s, %[x0a].s[3]\n"
- "fmla %[r2].4s, v18.4s, %[x0a].s[3]\n"
- "fmla %[r3].4s, v19.4s, %[x0a].s[3]\n"
- "fmla %[r4].4s, v20.4s, %[x0a].s[3]\n"
- "fmla %[r5].4s, v21.4s, %[x0a].s[3]\n"
- "fmla %[r6].4s, v22.4s, %[x0a].s[3]\n"
- "fmla %[r7].4s, v23.4s, %[x0a].s[3]\n"
+ "fmla %[r0].4s, v16.4s, %[x0a].s[3]\n"
+ "fmla %[r1].4s, v17.4s, %[x0a].s[3]\n"
+ "fmla %[r2].4s, v18.4s, %[x0a].s[3]\n"
+ "fmla %[r3].4s, v19.4s, %[x0a].s[3]\n"
+ "fmla %[r4].4s, v20.4s, %[x0a].s[3]\n"
+ "fmla %[r5].4s, v21.4s, %[x0a].s[3]\n"
+ "fmla %[r6].4s, v22.4s, %[x0a].s[3]\n"
+ "fmla %[r7].4s, v23.4s, %[x0a].s[3]\n"
:
[a_ptr] "+r" (a_ptr), [x_ptr] "+r" (x_ptr),
[x0] "+w" (x0), [x0a] "+w" (x0a), [k] "+r" (k),
@@ -532,53 +532,53 @@ void a64_sgemv_pretransposed(const float *A, int lda, const float *X, float *Y,
int l=(M%8)-1;
__asm __volatile (
- "ldr q2, [%[a_ptr], #0]\n"
- "ldr q3, [%[a_ptr], #16]\n"
- "ldr q4, [%[a_ptr], #32]\n"
- "ldr q5, [%[a_ptr], #48]\n"
- "ldr q6, [%[a_ptr], #64]\n"
- "ldr q7, [%[a_ptr], #80]\n"
- "ldr q8, [%[a_ptr], #96]\n"
- "ldr q9, [%[a_ptr], #112]\n"
- "ldr %s[x0], [%[x_ptr]]\n"
- "add %[a_ptr], %[a_ptr], #128\n"
- "add %[x_ptr], %[x_ptr], #4\n"
-
- "cbz %w[l], 2f\n"
+ "ldr q2, [%[a_ptr], #0]\n"
+ "ldr q3, [%[a_ptr], #16]\n"
+ "ldr q4, [%[a_ptr], #32]\n"
+ "ldr q5, [%[a_ptr], #48]\n"
+ "ldr q6, [%[a_ptr], #64]\n"
+ "ldr q7, [%[a_ptr], #80]\n"
+ "ldr q8, [%[a_ptr], #96]\n"
+ "ldr q9, [%[a_ptr], #112]\n"
+ "ldr %s[x0], [%[x_ptr]]\n"
+ "add %[a_ptr], %[a_ptr], #128\n"
+ "add %[x_ptr], %[x_ptr], #4\n"
+
+ "cbz %w[l], 2f\n"
"1:\n"
- "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
- "ldr q2, [%[a_ptr], #0]\n"
- "subs %w[l], %w[l], #1\n"
- "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
- "ldr q3, [%[a_ptr], #16]\n"
- "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
- "ldr q4, [%[a_ptr], #32]\n"
- "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
- "ldr q5, [%[a_ptr], #48]\n"
- "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
- "ldr q6, [%[a_ptr], #64]\n"
- "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
- "ldr q7, [%[a_ptr], #80]\n"
- "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
- "ldr q8, [%[a_ptr], #96]\n"
- "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
- "ldr q9, [%[a_ptr], #112]\n"
- "ldr %s[x0], [%[x_ptr]]\n"
- "add %[a_ptr], %[a_ptr], #128\n"
- "add %[x_ptr], %[x_ptr], #4\n"
- "bne 1b\n"
+ "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
+ "ldr q2, [%[a_ptr], #0]\n"
+ "subs %w[l], %w[l], #1\n"
+ "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
+ "ldr q3, [%[a_ptr], #16]\n"
+ "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
+ "ldr q4, [%[a_ptr], #32]\n"
+ "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
+ "ldr q5, [%[a_ptr], #48]\n"
+ "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
+ "ldr q6, [%[a_ptr], #64]\n"
+ "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
+ "ldr q7, [%[a_ptr], #80]\n"
+ "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
+ "ldr q8, [%[a_ptr], #96]\n"
+ "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
+ "ldr q9, [%[a_ptr], #112]\n"
+ "ldr %s[x0], [%[x_ptr]]\n"
+ "add %[a_ptr], %[a_ptr], #128\n"
+ "add %[x_ptr], %[x_ptr], #4\n"
+ "bne 1b\n"
"2:\n"
- "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
- "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
- "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
- "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
- "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
- "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
- "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
- "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
+ "fmla %[r0].4s, v2.4s, %[x0].s[0]\n"
+ "fmla %[r1].4s, v3.4s, %[x0].s[0]\n"
+ "fmla %[r2].4s, v4.4s, %[x0].s[0]\n"
+ "fmla %[r3].4s, v5.4s, %[x0].s[0]\n"
+ "fmla %[r4].4s, v6.4s, %[x0].s[0]\n"
+ "fmla %[r5].4s, v7.4s, %[x0].s[0]\n"
+ "fmla %[r6].4s, v8.4s, %[x0].s[0]\n"
+ "fmla %[r7].4s, v9.4s, %[x0].s[0]\n"
:
[a_ptr] "+r" (a_ptr), [x_ptr] "+r" (x_ptr),
[x0] "+w" (x0), [l] "+r" (l),
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp
index 5f7252f019..b8b93bf31f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,7 +72,7 @@ public:
return true;
}
- StdTransformsFixed<operand_type, result_type, 6, 4, 1> transforms = {};
+ StdTransformsFixed<operand_type, operand_type, result_type, 6, 4, 1> transforms = {};
// Default to the generic kernel
kern_type kernel=a64_smallK_hybrid_fp32_mla_6x4;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp
index a8e0c24eae..72f517fe35 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -72,7 +72,7 @@ public:
return true;
}
- StdTransformsFixed<operand_type, result_type, 8, 4, 1> transforms = {};
+ StdTransformsFixed<operand_type, operand_type, result_type, 8, 4, 1> transforms = {};
// Default to the generic kernel
kern_type kernel=a64_smallK_hybrid_fp32_mla_8x4;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp
index abf0eda008..6fdca066b1 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -73,7 +73,7 @@ public:
return false;
}
- StdTransformsFixed<operand_type, result_type, 6, 4, 4> transforms = {};
+ StdTransformsFixed<operand_type, operand_type, result_type, 6, 4, 4> transforms = {};
// Default to the generic kernel
kern_type kernel=a64_smallK_hybrid_s8s32_dot_6x4;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp
index 9f9c2a49db..27d3e2310c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -73,7 +73,7 @@ public:
return false;
}
- StdTransformsFixed<operand_type, result_type, 8, 4, 4> transforms = {};
+ StdTransformsFixed<operand_type, operand_type, result_type, 8, 4, 4> transforms = {};
// Default to the generic kernel
kern_type kernel=a64_smallK_hybrid_s8s32_dot_8x4;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_6x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_6x4.hpp
index 5d48a52d42..082ca66e01 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_6x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_6x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -73,7 +73,7 @@ public:
return false;
}
- StdTransformsFixed<operand_type, result_type, 6, 4, 4> transforms = {};
+ StdTransformsFixed<operand_type, operand_type, result_type, 6, 4, 4> transforms = {};
// Default to the generic kernel
kern_type kernel=a64_smallK_hybrid_u8u32_dot_6x4;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_8x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_8x4.hpp
index 942f94b0bf..866b97a316 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_8x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_8x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -73,7 +73,7 @@ public:
return false;
}
- StdTransformsFixed<operand_type, result_type, 8, 4, 4> transforms = {};
+ StdTransformsFixed<operand_type, operand_type, result_type, 8, 4, 4> transforms = {};
// Default to the generic kernel
kern_type kernel=a64_smallK_hybrid_u8u32_dot_8x4;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
index db29e42ef1..b102e1dea4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_bf16fp32_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,21 +64,21 @@ void sme2_gemv_bf16fp32_dot_16VL (
__asm__ __volatile__(
"ptrue p8.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
- "mul x22, x22, %x[K]\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
+ "cntw x28, ALL, MUL #4\n"
+ "mov x27, %x[B_ptr]\n"
+ "add x26, %x[N], x28\n"
"mov x25, %x[output_ptr]\n"
+ "sub x26, x26, #0x1\n"
"ptrue p1.b\n"
+ "udiv x26, x26, x28\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "lsl x22, x22, #0x1\n"
+ "add x22, x26, #0x3\n"
"mov x21, #0x1\n"
+ "and x22, x22, #0xfffffffffffffffc\n"
+ "mul x22, x22, x28\n"
+ "mul x22, x22, %x[K]\n"
+ "lsl x22, x22, #0x1\n"
"1:" // RHS size check loop
"cmp x22, #0x200000\n"
"blt 2f\n"
@@ -92,13 +92,13 @@ void sme2_gemv_bf16fp32_dot_16VL (
"lsl x21, x21, #0x16\n"
"orr x22, x22, x20\n"
"orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ ".inst 0xf8b64b7a // rprfm pldonce, x22, [x27]\n"
"3:" // RHS prefetch exit
"mov x24, %x[bias]\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 28f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 20f\n"
"beq 12f\n"
"mov x23, %x[A_ptr]\n"
@@ -108,8 +108,8 @@ void sme2_gemv_bf16fp32_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 5f\n"
- ".inst 0xa040c718 // ld1w { z24.s-z27.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042f00 // mova za.d[x9, #0], { z24.d-z27.d }\n"
+ ".inst 0xa040c714 // ld1w { z20.s-z23.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xc0042e80 // mova za.d[x9, #0], { z20.d-z23.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -118,59 +118,59 @@ void sme2_gemv_bf16fp32_dot_16VL (
"ble 8f\n"
"7:" // Width 1: Multiply loop: Main loop head
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z8.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
"sub x22, x22, #0x8\n"
- ".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158b298 // bfdot za.s[x9, 0], { z20.h-z23.h }, z8.h[0]\n"
- "addvl x26, x26, #16\n"
- "cmp x22, #0x8\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158b498 // bfdot za.s[x9, 0], { z4.h-z7.h }, z8.h[1]\n"
- "addvl x26, x26, #16\n"
"add x23, x23, #0x10\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158bb98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z8.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158bf18 // bfdot za.s[x9, 0], { z24.h-z27.h }, z8.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "cmp x22, #0x8\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc152b398 // bfdot za.s[x9, 0], { z28.h-z31.h }, z2.h[0]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc152b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z2.h[1]\n"
+ ".inst 0xc152bb18 // bfdot za.s[x9, 0], { z24.h-z27.h }, z2.h[2]\n"
+ ".inst 0xc152bd98 // bfdot za.s[x9, 0], { z12.h-z15.h }, z2.h[3]\n"
"bgt 7b\n"
"8:" // Width 1: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z11.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15bb398 // bfdot za.s[x9, 0], { z28.h-z31.h }, z11.h[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b198 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[0]\n"
"ble 9f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bb598 // bfdot za.s[x9, 0], { z12.h-z15.h }, z11.h[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b498 // bfdot za.s[x9, 0], { z4.h-z7.h }, z3.h[1]\n"
"ble 9f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z11.h[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[2]\n"
"ble 9f\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bbc18 // bfdot za.s[x9, 0], { z0.h-z3.h }, z11.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153bd18 // bfdot za.s[x9, 0], { z8.h-z11.h }, z3.h[3]\n"
"9:" // Width 1: Multiply loop: multiply skip
"tbz %x[flags], #1, 10f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z3.s }, p1/Z, [x21]\n"
- "ld1rw { z29.s }, p1/Z, [x20]\n"
- ".inst 0xc1bdc868 // fclamp { z8.s-z11.s }, z3.s, z29.s\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
+ ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
+ "ld1rw { z23.s }, p1/Z, [x21]\n"
+ "ld1rw { z22.s }, p1/Z, [x20]\n"
+ ".inst 0xc1b6cae0 // fclamp { z0.s-z3.s }, z23.s, z22.s\n"
+ ".inst 0xa060c320 // st1w { z0.s-z3.s }, p8, [x25]\n"
"addvl x25, x25, #4\n"
"b 11f\n"
"10:" // Width 1: No activation
- ".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
- ".inst 0xa060c32c // st1w { z12.s-z15.s }, p8, [x25]\n"
+ ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
+ ".inst 0xa060c320 // st1w { z0.s-z3.s }, p8, [x25]\n"
"addvl x25, x25, #4\n"
"11:" // Width 1: Output done
"b 36f\n"
@@ -182,10 +182,10 @@ void sme2_gemv_bf16fp32_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 13f\n"
- ".inst 0xa040c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c714 // ld1w { z20.s-z23.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
+ ".inst 0xa040c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc0042f80 // mova za.d[x9, #0], { z28.d-z31.d }\n"
+ ".inst 0xc0042c81 // mova za.d[x9, #1], { z4.d-z7.d }\n"
"b 14f\n"
"13:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -194,80 +194,80 @@ void sme2_gemv_bf16fp32_dot_16VL (
"ble 16f\n"
"15:" // Width 2: Multiply loop: Main loop head
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z9.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
"sub x22, x22, #0x8\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc159b198 // bfdot za.s[x9, 0], { z12.h-z15.h }, z9.h[0]\n"
+ "ld1rqh { z1.h }, p0/Z, [x23]\n"
"cmp x22, #0x8\n"
"add x23, x23, #0x10\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159b099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z9.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc159b598 // bfdot za.s[x9, 0], { z12.h-z15.h }, z9.h[1]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z9.h[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc159bb18 // bfdot za.s[x9, 0], { z24.h-z27.h }, z9.h[2]\n"
- ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159b819 // bfdot za.s[x9, 1], { z0.h-z3.h }, z9.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc159bc18 // bfdot za.s[x9, 0], { z0.h-z3.h }, z9.h[3]\n"
- ".inst 0xa041a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159bf99 // bfdot za.s[x9, 1], { z28.h-z31.h }, z9.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc151b398 // bfdot za.s[x9, 0], { z28.h-z31.h }, z1.h[0]\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc151b199 // bfdot za.s[x9, 1], { z12.h-z15.h }, z1.h[0]\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc151b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z1.h[1]\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc151b719 // bfdot za.s[x9, 1], { z24.h-z27.h }, z1.h[1]\n"
+ ".inst 0xc151bb98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z1.h[2]\n"
+ ".inst 0xc151b999 // bfdot za.s[x9, 1], { z12.h-z15.h }, z1.h[2]\n"
+ ".inst 0xc151bd18 // bfdot za.s[x9, 0], { z8.h-z11.h }, z1.h[3]\n"
+ ".inst 0xc151be99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z1.h[3]\n"
"bgt 15b\n"
"16:" // Width 2: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z11.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15bb198 // bfdot za.s[x9, 0], { z12.h-z15.h }, z11.h[0]\n"
- ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bb019 // bfdot za.s[x9, 1], { z0.h-z3.h }, z11.h[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b218 // bfdot za.s[x9, 0], { z16.h-z19.h }, z3.h[0]\n"
+ ".inst 0xc153b399 // bfdot za.s[x9, 1], { z28.h-z31.h }, z3.h[0]\n"
"ble 17f\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bb718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z11.h[1]\n"
- ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bb419 // bfdot za.s[x9, 1], { z0.h-z3.h }, z11.h[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b698 // bfdot za.s[x9, 0], { z20.h-z23.h }, z3.h[1]\n"
+ ".inst 0xc153b619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z3.h[1]\n"
"ble 17f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bb998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z11.h[2]\n"
- ".inst 0xa041a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bbb99 // bfdot za.s[x9, 1], { z28.h-z31.h }, z11.h[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b898 // bfdot za.s[x9, 0], { z4.h-z7.h }, z3.h[2]\n"
+ ".inst 0xc153ba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z3.h[2]\n"
"ble 17f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bbe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z11.h[3]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bbe99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z11.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z3.h[3]\n"
+ ".inst 0xc153bd99 // bfdot za.s[x9, 1], { z12.h-z15.h }, z3.h[3]\n"
"17:" // Width 2: Multiply loop: multiply skip
"tbz %x[flags], #1, 18f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
- "ld1rw { z9.s }, p1/Z, [x21]\n"
- ".inst 0xc0062c24 // mova { z4.d-z7.d }, za.d[x9, #1]\n"
- "ld1rw { z8.s }, p1/Z, [x20]\n"
- ".inst 0xc1a8c920 // fclamp { z0.s-z3.s }, z9.s, z8.s\n"
- ".inst 0xa060c720 // st1w { z0.s-z3.s }, pn9.b, [x25]\n"
- ".inst 0xc1a8c924 // fclamp { z4.s-z7.s }, z9.s, z8.s\n"
- ".inst 0xa061c324 // st1w { z4.s-z7.s }, p8, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c28 // mova { z8.d-z11.d }, za.d[x9, #1]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
+ "ld1rw { z23.s }, p1/Z, [x20]\n"
+ ".inst 0xc1b7ca24 // fclamp { z4.s-z7.s }, z17.s, z23.s\n"
+ ".inst 0xc1b7ca28 // fclamp { z8.s-z11.s }, z17.s, z23.s\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c328 // st1w { z8.s-z11.s }, p8, [x25, #0x4, MUL VL]\n"
"addvl x25, x25, #8\n"
"b 19f\n"
"18:" // Width 2: No activation
- ".inst 0xc0062c10 // mova { z16.d-z19.d }, za.d[x9, #0]\n"
- ".inst 0xa060c730 // st1w { z16.s-z19.s }, pn9.b, [x25]\n"
- ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- ".inst 0xa061c32c // st1w { z12.s-z15.s }, p8, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
+ ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c330 // st1w { z16.s-z19.s }, p8, [x25, #0x4, MUL VL]\n"
"addvl x25, x25, #8\n"
"19:" // Width 2: Output done
"b 36f\n"
@@ -280,12 +280,12 @@ void sme2_gemv_bf16fp32_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 21f\n"
- ".inst 0xa040c718 // ld1w { z24.s-z27.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042f00 // mova za.d[x9, #0], { z24.d-z27.d }\n"
- ".inst 0xa041c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042c81 // mova za.d[x9, #1], { z4.d-z7.d }\n"
- ".inst 0xa042c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042e02 // mova za.d[x9, #2], { z16.d-z19.d }\n"
+ ".inst 0xa040c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xc0042d00 // mova za.d[x9, #0], { z8.d-z11.d }\n"
+ ".inst 0xc0042c01 // mova za.d[x9, #1], { z0.d-z3.d }\n"
+ ".inst 0xc0042c82 // mova za.d[x9, #2], { z4.d-z7.d }\n"
"b 22f\n"
"21:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -294,101 +294,101 @@ void sme2_gemv_bf16fp32_dot_16VL (
"ble 24f\n"
"23:" // Width 3: Multiply loop: Main loop head
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z15.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
"sub x22, x22, #0x8\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fb018 // bfdot za.s[x9, 0], { z0.h-z3.h }, z15.h[0]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"cmp x22, #0x8\n"
"add x23, x23, #0x10\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z15.h[0]\n"
- ".inst 0xa042a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fb01a // bfdot za.s[x9, 2], { z0.h-z3.h }, z15.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fb698 // bfdot za.s[x9, 0], { z20.h-z23.h }, z15.h[1]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb699 // bfdot za.s[x9, 1], { z20.h-z23.h }, z15.h[1]\n"
- ".inst 0xa042a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fb51a // bfdot za.s[x9, 2], { z8.h-z11.h }, z15.h[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fbb18 // bfdot za.s[x9, 0], { z24.h-z27.h }, z15.h[2]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb919 // bfdot za.s[x9, 1], { z8.h-z11.h }, z15.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z15.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fbe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z15.h[3]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fbe19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z15.h[3]\n"
- ".inst 0xa042a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fbd1a // bfdot za.s[x9, 2], { z8.h-z11.h }, z15.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b198 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[0]\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z3.h[0]\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153b09a // bfdot za.s[x9, 2], { z4.h-z7.h }, z3.h[0]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153b618 // bfdot za.s[x9, 0], { z16.h-z19.h }, z3.h[1]\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153b699 // bfdot za.s[x9, 1], { z20.h-z23.h }, z3.h[1]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b71a // bfdot za.s[x9, 2], { z24.h-z27.h }, z3.h[1]\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153b998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[2]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153bb99 // bfdot za.s[x9, 1], { z28.h-z31.h }, z3.h[2]\n"
+ ".inst 0xc153b91a // bfdot za.s[x9, 2], { z8.h-z11.h }, z3.h[2]\n"
+ ".inst 0xc153bc98 // bfdot za.s[x9, 0], { z4.h-z7.h }, z3.h[3]\n"
+ ".inst 0xc153be99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z3.h[3]\n"
+ ".inst 0xc153bf1a // bfdot za.s[x9, 2], { z24.h-z27.h }, z3.h[3]\n"
"bgt 23b\n"
"24:" // Width 3: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z11.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15bb398 // bfdot za.s[x9, 0], { z28.h-z31.h }, z11.h[0]\n"
- ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bb019 // bfdot za.s[x9, 1], { z0.h-z3.h }, z11.h[0]\n"
- ".inst 0xa042a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bb29a // bfdot za.s[x9, 2], { z20.h-z23.h }, z11.h[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b298 // bfdot za.s[x9, 0], { z20.h-z23.h }, z3.h[0]\n"
+ ".inst 0xc153b199 // bfdot za.s[x9, 1], { z12.h-z15.h }, z3.h[0]\n"
+ ".inst 0xc153b09a // bfdot za.s[x9, 2], { z4.h-z7.h }, z3.h[0]\n"
"ble 25f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bb598 // bfdot za.s[x9, 0], { z12.h-z15.h }, z11.h[1]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bb499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z11.h[1]\n"
- ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bb79a // bfdot za.s[x9, 2], { z28.h-z31.h }, z11.h[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b698 // bfdot za.s[x9, 0], { z20.h-z23.h }, z3.h[1]\n"
+ ".inst 0xc153b519 // bfdot za.s[x9, 1], { z8.h-z11.h }, z3.h[1]\n"
+ ".inst 0xc153b61a // bfdot za.s[x9, 2], { z16.h-z19.h }, z3.h[1]\n"
"ble 25f\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bb898 // bfdot za.s[x9, 0], { z4.h-z7.h }, z11.h[2]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bba99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z11.h[2]\n"
- ".inst 0xa042a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bb99a // bfdot za.s[x9, 2], { z12.h-z15.h }, z11.h[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153bb98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z3.h[2]\n"
+ ".inst 0xc153bb19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z3.h[2]\n"
+ ".inst 0xc153b99a // bfdot za.s[x9, 2], { z12.h-z15.h }, z3.h[2]\n"
"ble 25f\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bbd98 // bfdot za.s[x9, 0], { z12.h-z15.h }, z11.h[3]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bbe99 // bfdot za.s[x9, 1], { z20.h-z23.h }, z11.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bbe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z11.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153bd18 // bfdot za.s[x9, 0], { z8.h-z11.h }, z3.h[3]\n"
+ ".inst 0xc153bf99 // bfdot za.s[x9, 1], { z28.h-z31.h }, z3.h[3]\n"
+ ".inst 0xc153bd9a // bfdot za.s[x9, 2], { z12.h-z15.h }, z3.h[3]\n"
"25:" // Width 3: Multiply loop: multiply skip
"tbz %x[flags], #1, 26f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
- "ld1rw { z17.s }, p1/Z, [x21]\n"
- ".inst 0xc0062c28 // mova { z8.d-z11.d }, za.d[x9, #1]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
- ".inst 0xc1b0ca24 // fclamp { z4.s-z7.s }, z17.s, z16.s\n"
- ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
- ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
- ".inst 0xc1b0ca28 // fclamp { z8.s-z11.s }, z17.s, z16.s\n"
- ".inst 0xa061c728 // st1w { z8.s-z11.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc1b0ca2c // fclamp { z12.s-z15.s }, z17.s, z16.s\n"
- ".inst 0xa062c32c // st1w { z12.s-z15.s }, p8, [x25, #0x8, MUL VL]\n"
+ ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ "ld1rw { z21.s }, p1/Z, [x21]\n"
+ ".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+ "ld1rw { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc1b4caa8 // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c330 // st1w { z16.s-z19.s }, p8, [x25, #0x8, MUL VL]\n"
"addvl x25, x25, #12\n"
"b 27f\n"
"26:" // Width 3: No activation
- ".inst 0xc0062c14 // mova { z20.d-z23.d }, za.d[x9, #0]\n"
- ".inst 0xa060c734 // st1w { z20.s-z23.s }, pn9.b, [x25]\n"
+ ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c5c // mova { z28.d-z31.d }, za.d[x9, #2]\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
- ".inst 0xa062c32c // st1w { z12.s-z15.s }, p8, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c33c // st1w { z28.s-z31.s }, p8, [x25, #0x8, MUL VL]\n"
"addvl x25, x25, #12\n"
"27:" // Width 3: Output done
"b 36f\n"
@@ -402,14 +402,14 @@ void sme2_gemv_bf16fp32_dot_16VL (
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 29f\n"
".inst 0xa040c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
- ".inst 0xa042c70c // ld1w { z12.s-z15.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042d82 // mova za.d[x9, #2], { z12.d-z15.d }\n"
+ ".inst 0xa041c70c // ld1w { z12.s-z15.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
".inst 0xa043c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
- ".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
+ ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
"addvl x24, x24, #16\n"
+ ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
+ ".inst 0xc0042f82 // mova za.d[x9, #2], { z28.d-z31.d }\n"
+ ".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
"b 30f\n"
"29:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -418,125 +418,125 @@ void sme2_gemv_bf16fp32_dot_16VL (
"ble 32f\n"
"31:" // Width 4: Multiply loop: Main loop head
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z8.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
"sub x22, x22, #0x8\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158b218 // bfdot za.s[x9, 0], { z16.h-z19.h }, z8.h[0]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"cmp x22, #0x8\n"
"add x23, x23, #0x10\n"
- ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158b199 // bfdot za.s[x9, 1], { z12.h-z15.h }, z8.h[0]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158b21a // bfdot za.s[x9, 2], { z16.h-z19.h }, z8.h[0]\n"
- ".inst 0xa043a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158b19b // bfdot za.s[x9, 3], { z12.h-z15.h }, z8.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158b598 // bfdot za.s[x9, 0], { z12.h-z15.h }, z8.h[1]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158b699 // bfdot za.s[x9, 1], { z20.h-z23.h }, z8.h[1]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158b61a // bfdot za.s[x9, 2], { z16.h-z19.h }, z8.h[1]\n"
- ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158b69b // bfdot za.s[x9, 3], { z20.h-z23.h }, z8.h[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158b898 // bfdot za.s[x9, 0], { z4.h-z7.h }, z8.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158ba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z8.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158ba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z8.h[2]\n"
- ".inst 0xa043a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158b81b // bfdot za.s[x9, 3], { z0.h-z3.h }, z8.h[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc158be98 // bfdot za.s[x9, 0], { z20.h-z23.h }, z8.h[3]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158be19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z8.h[3]\n"
- ".inst 0xa042a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158bc9a // bfdot za.s[x9, 2], { z4.h-z7.h }, z8.h[3]\n"
- ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158be9b // bfdot za.s[x9, 3], { z20.h-z23.h }, z8.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153b198 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b299 // bfdot za.s[x9, 1], { z20.h-z23.h }, z3.h[0]\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153b21a // bfdot za.s[x9, 2], { z16.h-z19.h }, z3.h[0]\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153b11b // bfdot za.s[x9, 3], { z8.h-z11.h }, z3.h[0]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153b718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z3.h[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b599 // bfdot za.s[x9, 1], { z12.h-z15.h }, z3.h[1]\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153b51a // bfdot za.s[x9, 2], { z8.h-z11.h }, z3.h[1]\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153b49b // bfdot za.s[x9, 3], { z4.h-z7.h }, z3.h[1]\n"
+ ".inst 0xa042a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153bb18 // bfdot za.s[x9, 0], { z24.h-z27.h }, z3.h[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b919 // bfdot za.s[x9, 1], { z8.h-z11.h }, z3.h[2]\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153ba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z3.h[2]\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153b89b // bfdot za.s[x9, 3], { z4.h-z7.h }, z3.h[2]\n"
+ ".inst 0xa042a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153bd18 // bfdot za.s[x9, 0], { z8.h-z11.h }, z3.h[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153bf99 // bfdot za.s[x9, 1], { z28.h-z31.h }, z3.h[3]\n"
+ ".inst 0xc153bd9a // bfdot za.s[x9, 2], { z12.h-z15.h }, z3.h[3]\n"
+ ".inst 0xc153be9b // bfdot za.s[x9, 3], { z20.h-z23.h }, z3.h[3]\n"
"bgt 31b\n"
"32:" // Width 4: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x22\n"
- "ld1rqh { z11.h }, p0/Z, [x23]\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15bb218 // bfdot za.s[x9, 0], { z16.h-z19.h }, z11.h[0]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bb299 // bfdot za.s[x9, 1], { z20.h-z23.h }, z11.h[0]\n"
- ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bb39a // bfdot za.s[x9, 2], { z28.h-z31.h }, z11.h[0]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15bb21b // bfdot za.s[x9, 3], { z16.h-z19.h }, z11.h[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153b218 // bfdot za.s[x9, 0], { z16.h-z19.h }, z3.h[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b199 // bfdot za.s[x9, 1], { z12.h-z15.h }, z3.h[0]\n"
+ ".inst 0xc153b39a // bfdot za.s[x9, 2], { z28.h-z31.h }, z3.h[0]\n"
+ ".inst 0xc153b29b // bfdot za.s[x9, 3], { z20.h-z23.h }, z3.h[0]\n"
"ble 33f\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bb418 // bfdot za.s[x9, 0], { z0.h-z3.h }, z11.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bb619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z11.h[1]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bb61a // bfdot za.s[x9, 2], { z16.h-z19.h }, z11.h[1]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15bb61b // bfdot za.s[x9, 3], { z16.h-z19.h }, z11.h[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153b598 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z3.h[1]\n"
+ ".inst 0xc153b71a // bfdot za.s[x9, 2], { z24.h-z27.h }, z3.h[1]\n"
+ ".inst 0xc153b69b // bfdot za.s[x9, 3], { z20.h-z23.h }, z3.h[1]\n"
"ble 33f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15bba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z11.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z11.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z11.h[2]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15bba1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z11.h[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153b998 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153b919 // bfdot za.s[x9, 1], { z8.h-z11.h }, z3.h[2]\n"
+ ".inst 0xc153ba9a // bfdot za.s[x9, 2], { z20.h-z23.h }, z3.h[2]\n"
+ ".inst 0xc153ba1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z3.h[2]\n"
"ble 33f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bbe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z11.h[3]\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bbf19 // bfdot za.s[x9, 1], { z24.h-z27.h }, z11.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bbe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z11.h[3]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15bbe1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z11.h[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153bd98 // bfdot za.s[x9, 0], { z12.h-z15.h }, z3.h[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153bd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z3.h[3]\n"
+ ".inst 0xc153be9a // bfdot za.s[x9, 2], { z20.h-z23.h }, z3.h[3]\n"
+ ".inst 0xc153be1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z3.h[3]\n"
"33:" // Width 4: Multiply loop: multiply skip
"tbz %x[flags], #1, 34f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
"ld1rw { z21.s }, p1/Z, [x21]\n"
- ".inst 0xc0062c38 // mova { z24.d-z27.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
"ld1rw { z20.s }, p1/Z, [x20]\n"
- ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
- ".inst 0xc0062c40 // mova { z0.d-z3.d }, za.d[x9, #2]\n"
- ".inst 0xa060c72c // st1w { z12.s-z15.s }, pn9.b, [x25]\n"
- ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
".inst 0xc0062c70 // mova { z16.d-z19.d }, za.d[x9, #3]\n"
- ".inst 0xa061c738 // st1w { z24.s-z27.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc1b4caa4 // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
- ".inst 0xa062c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xa063c330 // st1w { z16.s-z19.s }, p8, [x25, #0xc, MUL VL]\n"
"addvl x25, x25, #16\n"
"b 35f\n"
"34:" // Width 4: No activation
".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
- ".inst 0xa060c72c // st1w { z12.s-z15.s }, pn9.b, [x25]\n"
- ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
- ".inst 0xa061c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xc0062c64 // mova { z4.d-z7.d }, za.d[x9, #3]\n"
+ ".inst 0xa060c72c // st1w { z12.s-z15.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xa063c324 // st1w { z4.s-z7.s }, p8, [x25, #0xc, MUL VL]\n"
"addvl x25, x25, #16\n"
"35:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sub %x[N], %x[N], x28, LSL #2\n"
"bgt 4b\n"
"36:" // Exit
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL.hpp
new file mode 100644
index 0000000000..a473be77f1
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL.hpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+#include "../std_transforms_sme.hpp"
+
+#define ARGLIST \
+ const __fp16 *, const __fp16 *, \
+ __fp16 *, size_t, size_t, \
+ const __fp16 *, Activation, bool
+
+namespace arm_gemm
+{
+void sme2_gemv_fp16_mla_16VL( ARGLIST );
+
+class cls_sme2_gemv_fp16_mla_16VL
+{
+public:
+ typedef __fp16 operand_type;
+ typedef __fp16 result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ static unsigned int out_width()
+ {
+ return sme::get_vector_length<__fp16>() * 16;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 1;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+
+ StdTransformsSME<operand_type, result_type, 1, 16, 1> transforms = {};
+
+
+ // Default to the generic kernel
+ kern_type kernel=sme2_gemv_fp16_mla_16VL;
+ cls_sme2_gemv_fp16_mla_16VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL/generic.cpp
new file mode 100644
index 0000000000..4d18cc4670
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp16_mla_16VL/generic.cpp
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <limits>
+
+namespace arm_gemm {
+
+void sme2_gemv_fp16_mla_16VL (
+ const __fp16 *A_ptr, const __fp16 *B_ptr, __fp16 *output_ptr,
+ size_t N, size_t K,
+ const __fp16 *bias, Activation act, bool
+)
+{
+ struct KernelArgs {
+ __fp16 maxval = static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
+ const __fp16 *B_ptr = {};
+ size_t output_offset = {};
+ unsigned int input_initial_col = {};
+ } ka;
+
+ unsigned long flags=0;
+ ka.B_ptr = B_ptr;
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ ka.maxval = static_cast<__fp16>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ ka.minval = 0;
+ flags |= 0x2;
+ break;
+ }
+ __asm__ __volatile__(
+ "ptrue p8.b\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "mov x9, #0x0\n"
+ "cnth x28, ALL, MUL #4\n"
+ "mov x27, %x[B_ptr]\n"
+ "add x26, %x[N], x28\n"
+ "mov x25, %x[output_ptr]\n"
+ "sub x26, x26, #0x1\n"
+ "ptrue p1.b\n"
+ "udiv x26, x26, x28\n"
+ ".inst 0x25207811 // ptrue pn9.b\n"
+ "add x22, x26, #0x3\n"
+ "mov x21, #0x1\n"
+ "and x22, x22, #0xfffffffffffffffc\n"
+ "mul x22, x22, x28\n"
+ "mul x22, x22, %x[K]\n"
+ "lsl x22, x22, #0x1\n"
+ "1:" // RHS size check loop
+ "cmp x22, #0x200000\n"
+ "blt 2f\n"
+ "tbnz x22, #0, 3f\n"
+ "lsr x22, x22, #0x1\n"
+ "lsl x21, x21, #0x1\n"
+ "b 1b\n"
+ "2:" // RHS do prefetch
+ "lsl x20, x22, #0x26\n"
+ "sub x21, x21, #0x1\n"
+ "lsl x21, x21, #0x16\n"
+ "orr x22, x22, x20\n"
+ "orr x22, x22, x21\n"
+ ".inst 0xf8b64b7a // rprfm pldonce, x22, [x27]\n"
+ "3:" // RHS prefetch exit
+ "mov x24, %x[bias]\n"
+ "4:" // Column loop
+ "cmp x26, #0x4\n"
+ "bge 28f\n"
+ "cmp x26, #0x2\n"
+ "bgt 20f\n"
+ "beq 12f\n"
+ "mov x23, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "mov x20, %x[N]\n"
+ "mov x22, %x[K]\n"
+ ".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
+ ".inst 0x257467f0 // whilelt p8.h, XZR, x20, VLx4\n"
+ "cbz x24, 5f\n"
+ ".inst 0xa040a708 // ld1h { z8.h-z11.h }, pn9.b/Z, [x24]\n"
+ ".inst 0xc0042d00 // mova za.d[x9, #0], { z8.d-z11.d }\n"
+ "b 6f\n"
+ "5:" // Width 1: no bias
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "6:" // Width 1: setup done
+ "cmp x22, #0x8\n"
+ "ble 8f\n"
+ "7:" // Width 1: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "sub x22, x22, #0x8\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "cmp x22, #0x8\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b280 // fmla za.h[x9, 0], { z20.h-z23.h }, z3.h[0]\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b388 // fmla za.h[x9, 0], { z28.h-z31.h }, z3.h[1]\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b500 // fmla za.h[x9, 0], { z8.h-z11.h }, z3.h[2]\n"
+ ".inst 0xc113b488 // fmla za.h[x9, 0], { z4.h-z7.h }, z3.h[3]\n"
+ ".inst 0xc113ba80 // fmla za.h[x9, 0], { z20.h-z23.h }, z3.h[4]\n"
+ ".inst 0xc113b988 // fmla za.h[x9, 0], { z12.h-z15.h }, z3.h[5]\n"
+ ".inst 0xc113bf80 // fmla za.h[x9, 0], { z28.h-z31.h }, z3.h[6]\n"
+ ".inst 0xc113be08 // fmla za.h[x9, 0], { z16.h-z19.h }, z3.h[7]\n"
+ "bgt 7b\n"
+ "8:" // Width 1: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "ld1rqh { z15.h }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb280 // fmla za.h[x9, 0], { z20.h-z23.h }, z15.h[0]\n"
+ "ble 9f\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb208 // fmla za.h[x9, 0], { z16.h-z19.h }, z15.h[1]\n"
+ "ble 9f\n"
+ ".inst 0xa040a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb400 // fmla za.h[x9, 0], { z0.h-z3.h }, z15.h[2]\n"
+ "ble 9f\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb788 // fmla za.h[x9, 0], { z28.h-z31.h }, z15.h[3]\n"
+ "ble 9f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb880 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[4]\n"
+ "ble 9f\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fba08 // fmla za.h[x9, 0], { z16.h-z19.h }, z15.h[5]\n"
+ "ble 9f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbc80 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[6]\n"
+ "ble 9f\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbf88 // fmla za.h[x9, 0], { z28.h-z31.h }, z15.h[7]\n"
+ "9:" // Width 1: Multiply loop: multiply skip
+ "tbz %x[flags], #1, 10f\n"
+ "add x21, %x[args_ptr], %[offset_min]\n"
+ "add x20, %x[args_ptr], %[offset_max]\n"
+ ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+ "ld1rh { z4.h }, p1/Z, [x21]\n"
+ "ld1rh { z21.h }, p1/Z, [x20]\n"
+ ".inst 0xc175c888 // fclamp { z8.h-z11.h }, z4.h, z21.h\n"
+ ".inst 0xa060a328 // st1h { z8.h-z11.h }, p8, [x25]\n"
+ "addvl x25, x25, #4\n"
+ "b 11f\n"
+ "10:" // Width 1: No activation
+ ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
+ ".inst 0xa060a320 // st1h { z0.h-z3.h }, p8, [x25]\n"
+ "addvl x25, x25, #4\n"
+ "11:" // Width 1: Output done
+ "b 36f\n"
+ "12:" // Width 2
+ "mov x23, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "sub x20, %x[N], x28\n"
+ "mov x22, %x[K]\n"
+ ".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
+ ".inst 0x257467f0 // whilelt p8.h, XZR, x20, VLx4\n"
+ "cbz x24, 13f\n"
+ ".inst 0xa040a71c // ld1h { z28.h-z31.h }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041a70c // ld1h { z12.h-z15.h }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc0042f80 // mova za.d[x9, #0], { z28.d-z31.d }\n"
+ ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
+ "b 14f\n"
+ "13:" // Width 2: no bias
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "14:" // Width 2: setup done
+ "cmp x22, #0x8\n"
+ "ble 16f\n"
+ "15:" // Width 2: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ "sub x22, x22, #0x8\n"
+ "ld1rqh { z3.h }, p0/Z, [x23]\n"
+ "cmp x22, #0x8\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc113b100 // fmla za.h[x9, 0], { z8.h-z11.h }, z3.h[0]\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b301 // fmla za.h[x9, 1], { z24.h-z27.h }, z3.h[0]\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b288 // fmla za.h[x9, 0], { z20.h-z23.h }, z3.h[1]\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc113b209 // fmla za.h[x9, 1], { z16.h-z19.h }, z3.h[1]\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc113b500 // fmla za.h[x9, 0], { z8.h-z11.h }, z3.h[2]\n"
+ ".inst 0xa041a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b581 // fmla za.h[x9, 1], { z12.h-z15.h }, z3.h[2]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b688 // fmla za.h[x9, 0], { z20.h-z23.h }, z3.h[3]\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc113b709 // fmla za.h[x9, 1], { z24.h-z27.h }, z3.h[3]\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc113bb80 // fmla za.h[x9, 0], { z28.h-z31.h }, z3.h[4]\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc113b881 // fmla za.h[x9, 1], { z4.h-z7.h }, z3.h[4]\n"
+ ".inst 0xc113b988 // fmla za.h[x9, 0], { z12.h-z15.h }, z3.h[5]\n"
+ ".inst 0xc113ba09 // fmla za.h[x9, 1], { z16.h-z19.h }, z3.h[5]\n"
+ ".inst 0xc113bd00 // fmla za.h[x9, 0], { z8.h-z11.h }, z3.h[6]\n"
+ ".inst 0xc113be81 // fmla za.h[x9, 1], { z20.h-z23.h }, z3.h[6]\n"
+ ".inst 0xc113bf08 // fmla za.h[x9, 0], { z24.h-z27.h }, z3.h[7]\n"
+ ".inst 0xc113bf89 // fmla za.h[x9, 1], { z28.h-z31.h }, z3.h[7]\n"
+ "bgt 15b\n"
+ "16:" // Width 2: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "ld1rqh { z15.h }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb380 // fmla za.h[x9, 0], { z28.h-z31.h }, z15.h[0]\n"
+ ".inst 0xc11fb101 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[0]\n"
+ "ble 17f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb088 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[1]\n"
+ ".inst 0xc11fb289 // fmla za.h[x9, 1], { z20.h-z23.h }, z15.h[1]\n"
+ "ble 17f\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb600 // fmla za.h[x9, 0], { z16.h-z19.h }, z15.h[2]\n"
+ ".inst 0xc11fb501 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[2]\n"
+ "ble 17f\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb508 // fmla za.h[x9, 0], { z8.h-z11.h }, z15.h[3]\n"
+ ".inst 0xc11fb789 // fmla za.h[x9, 1], { z28.h-z31.h }, z15.h[3]\n"
+ "ble 17f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb880 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[4]\n"
+ ".inst 0xc11fba01 // fmla za.h[x9, 1], { z16.h-z19.h }, z15.h[4]\n"
+ "ble 17f\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbb08 // fmla za.h[x9, 0], { z24.h-z27.h }, z15.h[5]\n"
+ ".inst 0xc11fb909 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[5]\n"
+ "ble 17f\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbf80 // fmla za.h[x9, 0], { z28.h-z31.h }, z15.h[6]\n"
+ ".inst 0xc11fbd01 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[6]\n"
+ "ble 17f\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbf08 // fmla za.h[x9, 0], { z24.h-z27.h }, z15.h[7]\n"
+ ".inst 0xc11fbf89 // fmla za.h[x9, 1], { z28.h-z31.h }, z15.h[7]\n"
+ "17:" // Width 2: Multiply loop: multiply skip
+ "tbz %x[flags], #1, 18f\n"
+ "add x21, %x[args_ptr], %[offset_min]\n"
+ "add x20, %x[args_ptr], %[offset_max]\n"
+ ".inst 0xc0062c10 // mova { z16.d-z19.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c3c // mova { z28.d-z31.d }, za.d[x9, #1]\n"
+ "ld1rh { z15.h }, p1/Z, [x21]\n"
+ "ld1rh { z27.h }, p1/Z, [x20]\n"
+ ".inst 0xc17bc9f0 // fclamp { z16.h-z19.h }, z15.h, z27.h\n"
+ ".inst 0xc17bc9fc // fclamp { z28.h-z31.h }, z15.h, z27.h\n"
+ ".inst 0xa060a730 // st1h { z16.h-z19.h }, pn9.b, [x25]\n"
+ ".inst 0xa061a33c // st1h { z28.h-z31.h }, p8, [x25, #0x4, MUL VL]\n"
+ "addvl x25, x25, #8\n"
+ "b 19f\n"
+ "18:" // Width 2: No activation
+ ".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c3c // mova { z28.d-z31.d }, za.d[x9, #1]\n"
+ ".inst 0xa060a72c // st1h { z12.h-z15.h }, pn9.b, [x25]\n"
+ ".inst 0xa061a33c // st1h { z28.h-z31.h }, p8, [x25, #0x4, MUL VL]\n"
+ "addvl x25, x25, #8\n"
+ "19:" // Width 2: Output done
+ "b 36f\n"
+ "20:" // Width 3
+ "mov x20, #0x2\n"
+ "mov x23, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "msub x20, x28, x20, %x[N]\n"
+ "mov x22, %x[K]\n"
+ ".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
+ ".inst 0x257467f0 // whilelt p8.h, XZR, x20, VLx4\n"
+ "cbz x24, 21f\n"
+ ".inst 0xa040a700 // ld1h { z0.h-z3.h }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041a710 // ld1h { z16.h-z19.h }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042a708 // ld1h { z8.h-z11.h }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
+ ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
+ ".inst 0xc0042d02 // mova za.d[x9, #2], { z8.d-z11.d }\n"
+ "b 22f\n"
+ "21:" // Width 3: no bias
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "22:" // Width 3: setup done
+ "cmp x22, #0x8\n"
+ "ble 24f\n"
+ "23:" // Width 3: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ "sub x22, x22, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x23]\n"
+ "cmp x22, #0x8\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110b180 // fmla za.h[x9, 0], { z12.h-z15.h }, z0.h[0]\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc110b281 // fmla za.h[x9, 1], { z20.h-z23.h }, z0.h[0]\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc110b102 // fmla za.h[x9, 2], { z8.h-z11.h }, z0.h[0]\n"
+ ".inst 0xa042a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110b088 // fmla za.h[x9, 0], { z4.h-z7.h }, z0.h[1]\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc110b309 // fmla za.h[x9, 1], { z24.h-z27.h }, z0.h[1]\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc110b18a // fmla za.h[x9, 2], { z12.h-z15.h }, z0.h[1]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110b680 // fmla za.h[x9, 0], { z20.h-z23.h }, z0.h[2]\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc110b781 // fmla za.h[x9, 1], { z28.h-z31.h }, z0.h[2]\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc110b502 // fmla za.h[x9, 2], { z8.h-z11.h }, z0.h[2]\n"
+ ".inst 0xa042a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110b608 // fmla za.h[x9, 0], { z16.h-z19.h }, z0.h[3]\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc110b709 // fmla za.h[x9, 1], { z24.h-z27.h }, z0.h[3]\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc110b48a // fmla za.h[x9, 2], { z4.h-z7.h }, z0.h[3]\n"
+ ".inst 0xa042a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110bb80 // fmla za.h[x9, 0], { z28.h-z31.h }, z0.h[4]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc110b901 // fmla za.h[x9, 1], { z8.h-z11.h }, z0.h[4]\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc110b882 // fmla za.h[x9, 2], { z4.h-z7.h }, z0.h[4]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110b988 // fmla za.h[x9, 0], { z12.h-z15.h }, z0.h[5]\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc110b909 // fmla za.h[x9, 1], { z8.h-z11.h }, z0.h[5]\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc110bb0a // fmla za.h[x9, 2], { z24.h-z27.h }, z0.h[5]\n"
+ ".inst 0xa042a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110be80 // fmla za.h[x9, 0], { z20.h-z23.h }, z0.h[6]\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc110be01 // fmla za.h[x9, 1], { z16.h-z19.h }, z0.h[6]\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc110bd82 // fmla za.h[x9, 2], { z12.h-z15.h }, z0.h[6]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc110bc88 // fmla za.h[x9, 0], { z4.h-z7.h }, z0.h[7]\n"
+ ".inst 0xc110be09 // fmla za.h[x9, 1], { z16.h-z19.h }, z0.h[7]\n"
+ ".inst 0xc110be8a // fmla za.h[x9, 2], { z20.h-z23.h }, z0.h[7]\n"
+ "bgt 23b\n"
+ "24:" // Width 3: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "ld1rqh { z15.h }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb080 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[0]\n"
+ ".inst 0xc11fb301 // fmla za.h[x9, 1], { z24.h-z27.h }, z15.h[0]\n"
+ ".inst 0xc11fb382 // fmla za.h[x9, 2], { z28.h-z31.h }, z15.h[0]\n"
+ "ble 25f\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb308 // fmla za.h[x9, 0], { z24.h-z27.h }, z15.h[1]\n"
+ ".inst 0xc11fb089 // fmla za.h[x9, 1], { z4.h-z7.h }, z15.h[1]\n"
+ ".inst 0xc11fb10a // fmla za.h[x9, 2], { z8.h-z11.h }, z15.h[1]\n"
+ "ble 25f\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb780 // fmla za.h[x9, 0], { z28.h-z31.h }, z15.h[2]\n"
+ ".inst 0xc11fb601 // fmla za.h[x9, 1], { z16.h-z19.h }, z15.h[2]\n"
+ ".inst 0xc11fb502 // fmla za.h[x9, 2], { z8.h-z11.h }, z15.h[2]\n"
+ "ble 25f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb488 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[3]\n"
+ ".inst 0xc11fb709 // fmla za.h[x9, 1], { z24.h-z27.h }, z15.h[3]\n"
+ ".inst 0xc11fb60a // fmla za.h[x9, 2], { z16.h-z19.h }, z15.h[3]\n"
+ "ble 25f\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb900 // fmla za.h[x9, 0], { z8.h-z11.h }, z15.h[4]\n"
+ ".inst 0xc11fbb01 // fmla za.h[x9, 1], { z24.h-z27.h }, z15.h[4]\n"
+ ".inst 0xc11fba02 // fmla za.h[x9, 2], { z16.h-z19.h }, z15.h[4]\n"
+ "ble 25f\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb908 // fmla za.h[x9, 0], { z8.h-z11.h }, z15.h[5]\n"
+ ".inst 0xc11fba09 // fmla za.h[x9, 1], { z16.h-z19.h }, z15.h[5]\n"
+ ".inst 0xc11fbb0a // fmla za.h[x9, 2], { z24.h-z27.h }, z15.h[5]\n"
+ "ble 25f\n"
+ ".inst 0xa040a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbd00 // fmla za.h[x9, 0], { z8.h-z11.h }, z15.h[6]\n"
+ ".inst 0xc11fbe81 // fmla za.h[x9, 1], { z20.h-z23.h }, z15.h[6]\n"
+ ".inst 0xc11fbf02 // fmla za.h[x9, 2], { z24.h-z27.h }, z15.h[6]\n"
+ "ble 25f\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbf88 // fmla za.h[x9, 0], { z28.h-z31.h }, z15.h[7]\n"
+ ".inst 0xc11fbd09 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[7]\n"
+ ".inst 0xc11fbe8a // fmla za.h[x9, 2], { z20.h-z23.h }, z15.h[7]\n"
+ "25:" // Width 3: Multiply loop: multiply skip
+ "tbz %x[flags], #1, 26f\n"
+ "add x21, %x[args_ptr], %[offset_min]\n"
+ "add x20, %x[args_ptr], %[offset_max]\n"
+ ".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
+ "ld1rh { z17.h }, p1/Z, [x21]\n"
+ ".inst 0xc0062c44 // mova { z4.d-z7.d }, za.d[x9, #2]\n"
+ "ld1rh { z16.h }, p1/Z, [x20]\n"
+ ".inst 0xc170ca2c // fclamp { z12.h-z15.h }, z17.h, z16.h\n"
+ ".inst 0xc170ca20 // fclamp { z0.h-z3.h }, z17.h, z16.h\n"
+ ".inst 0xa060a72c // st1h { z12.h-z15.h }, pn9.b, [x25]\n"
+ ".inst 0xc170ca24 // fclamp { z4.h-z7.h }, z17.h, z16.h\n"
+ ".inst 0xa061a720 // st1h { z0.h-z3.h }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062a324 // st1h { z4.h-z7.h }, p8, [x25, #0x8, MUL VL]\n"
+ "addvl x25, x25, #12\n"
+ "b 27f\n"
+ "26:" // Width 3: No activation
+ ".inst 0xc0062c10 // mova { z16.d-z19.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c38 // mova { z24.d-z27.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c54 // mova { z20.d-z23.d }, za.d[x9, #2]\n"
+ ".inst 0xa060a730 // st1h { z16.h-z19.h }, pn9.b, [x25]\n"
+ ".inst 0xa061a738 // st1h { z24.h-z27.h }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062a334 // st1h { z20.h-z23.h }, p8, [x25, #0x8, MUL VL]\n"
+ "addvl x25, x25, #12\n"
+ "27:" // Width 3: Output done
+ "b 36f\n"
+ "28:" // Width 4
+ "mov x20, #0x3\n"
+ "mov x23, %x[A_ptr]\n"
+ "lsl x21, %x[K], #0x1\n"
+ "msub x20, x28, x20, %x[N]\n"
+ "mov x22, %x[K]\n"
+ ".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
+ ".inst 0x257467f0 // whilelt p8.h, XZR, x20, VLx4\n"
+ "cbz x24, 29f\n"
+ ".inst 0xa040a714 // ld1h { z20.h-z23.h }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041a704 // ld1h { z4.h-z7.h }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042a708 // ld1h { z8.h-z11.h }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa043a700 // ld1h { z0.h-z3.h }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xc0042e80 // mova za.d[x9, #0], { z20.d-z23.d }\n"
+ "addvl x24, x24, #16\n"
+ ".inst 0xc0042c81 // mova za.d[x9, #1], { z4.d-z7.d }\n"
+ ".inst 0xc0042d02 // mova za.d[x9, #2], { z8.d-z11.d }\n"
+ ".inst 0xc0042c03 // mova za.d[x9, #3], { z0.d-z3.d }\n"
+ "b 30f\n"
+ "29:" // Width 4: no bias
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "30:" // Width 4: setup done
+ "cmp x22, #0x8\n"
+ "ble 32f\n"
+ "31:" // Width 4: Multiply loop: Main loop head
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ "sub x22, x22, #0x8\n"
+ "ld1rqh { z4.h }, p0/Z, [x23]\n"
+ "cmp x22, #0x8\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114b300 // fmla za.h[x9, 0], { z24.h-z27.h }, z4.h[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114b181 // fmla za.h[x9, 1], { z12.h-z15.h }, z4.h[0]\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc114b382 // fmla za.h[x9, 2], { z28.h-z31.h }, z4.h[0]\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc114b103 // fmla za.h[x9, 3], { z8.h-z11.h }, z4.h[0]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114b208 // fmla za.h[x9, 0], { z16.h-z19.h }, z4.h[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114b189 // fmla za.h[x9, 1], { z12.h-z15.h }, z4.h[1]\n"
+ ".inst 0xa040a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc114b30a // fmla za.h[x9, 2], { z24.h-z27.h }, z4.h[1]\n"
+ ".inst 0xa041a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc114b10b // fmla za.h[x9, 3], { z8.h-z11.h }, z4.h[1]\n"
+ ".inst 0xa042a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114b580 // fmla za.h[x9, 0], { z12.h-z15.h }, z4.h[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114b401 // fmla za.h[x9, 1], { z0.h-z3.h }, z4.h[2]\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc114b702 // fmla za.h[x9, 2], { z24.h-z27.h }, z4.h[2]\n"
+ ".inst 0xa041a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc114b503 // fmla za.h[x9, 3], { z8.h-z11.h }, z4.h[2]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114b788 // fmla za.h[x9, 0], { z28.h-z31.h }, z4.h[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114b409 // fmla za.h[x9, 1], { z0.h-z3.h }, z4.h[3]\n"
+ ".inst 0xa040a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc114b50a // fmla za.h[x9, 2], { z8.h-z11.h }, z4.h[3]\n"
+ ".inst 0xa041a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc114b58b // fmla za.h[x9, 3], { z12.h-z15.h }, z4.h[3]\n"
+ ".inst 0xa042a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114b800 // fmla za.h[x9, 0], { z0.h-z3.h }, z4.h[4]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114bb01 // fmla za.h[x9, 1], { z24.h-z27.h }, z4.h[4]\n"
+ ".inst 0xa040a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc114b982 // fmla za.h[x9, 2], { z12.h-z15.h }, z4.h[4]\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc114b903 // fmla za.h[x9, 3], { z8.h-z11.h }, z4.h[4]\n"
+ ".inst 0xa042a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114ba88 // fmla za.h[x9, 0], { z20.h-z23.h }, z4.h[5]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114ba09 // fmla za.h[x9, 1], { z16.h-z19.h }, z4.h[5]\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc114b80a // fmla za.h[x9, 2], { z0.h-z3.h }, z4.h[5]\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc114b98b // fmla za.h[x9, 3], { z12.h-z15.h }, z4.h[5]\n"
+ ".inst 0xa042a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114bf00 // fmla za.h[x9, 0], { z24.h-z27.h }, z4.h[6]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114be01 // fmla za.h[x9, 1], { z16.h-z19.h }, z4.h[6]\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xc114bc02 // fmla za.h[x9, 2], { z0.h-z3.h }, z4.h[6]\n"
+ ".inst 0xa041a76d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc114bd03 // fmla za.h[x9, 3], { z8.h-z11.h }, z4.h[6]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc114be08 // fmla za.h[x9, 0], { z16.h-z19.h }, z4.h[7]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc114bd89 // fmla za.h[x9, 1], { z12.h-z15.h }, z4.h[7]\n"
+ ".inst 0xc114be8a // fmla za.h[x9, 2], { z20.h-z23.h }, z4.h[7]\n"
+ ".inst 0xc114bd0b // fmla za.h[x9, 3], { z8.h-z11.h }, z4.h[7]\n"
+ "bgt 31b\n"
+ "32:" // Width 4: Multiply loop: Single iteration only
+ "whilelt p0.h, XZR, x22\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ "ld1rqh { z15.h }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ ".inst 0xa041a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fb080 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb201 // fmla za.h[x9, 1], { z16.h-z19.h }, z15.h[0]\n"
+ ".inst 0xc11fb102 // fmla za.h[x9, 2], { z8.h-z11.h }, z15.h[0]\n"
+ ".inst 0xc11fb003 // fmla za.h[x9, 3], { z0.h-z3.h }, z15.h[0]\n"
+ "ble 33f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fb088 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb389 // fmla za.h[x9, 1], { z28.h-z31.h }, z15.h[1]\n"
+ ".inst 0xc11fb10a // fmla za.h[x9, 2], { z8.h-z11.h }, z15.h[1]\n"
+ ".inst 0xc11fb20b // fmla za.h[x9, 3], { z16.h-z19.h }, z15.h[1]\n"
+ "ble 33f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fb480 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb501 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[2]\n"
+ ".inst 0xc11fb682 // fmla za.h[x9, 2], { z20.h-z23.h }, z15.h[2]\n"
+ ".inst 0xc11fb603 // fmla za.h[x9, 3], { z16.h-z19.h }, z15.h[2]\n"
+ "ble 33f\n"
+ ".inst 0xa040a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a761 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fb788 // fmla za.h[x9, 0], { z28.h-z31.h }, z15.h[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fb509 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[3]\n"
+ ".inst 0xc11fb48a // fmla za.h[x9, 2], { z4.h-z7.h }, z15.h[3]\n"
+ ".inst 0xc11fb40b // fmla za.h[x9, 3], { z0.h-z3.h }, z15.h[3]\n"
+ "ble 33f\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fbb00 // fmla za.h[x9, 0], { z24.h-z27.h }, z15.h[4]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fba81 // fmla za.h[x9, 1], { z20.h-z23.h }, z15.h[4]\n"
+ ".inst 0xc11fba02 // fmla za.h[x9, 2], { z16.h-z19.h }, z15.h[4]\n"
+ ".inst 0xc11fb883 // fmla za.h[x9, 3], { z4.h-z7.h }, z15.h[4]\n"
+ "ble 33f\n"
+ ".inst 0xa040a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fba08 // fmla za.h[x9, 0], { z16.h-z19.h }, z15.h[5]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbb89 // fmla za.h[x9, 1], { z28.h-z31.h }, z15.h[5]\n"
+ ".inst 0xc11fba8a // fmla za.h[x9, 2], { z20.h-z23.h }, z15.h[5]\n"
+ ".inst 0xc11fb88b // fmla za.h[x9, 3], { z4.h-z7.h }, z15.h[5]\n"
+ "ble 33f\n"
+ ".inst 0xa040a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27]\n"
+ "subs x22, x22, #0x1\n"
+ ".inst 0xa041a769 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a771 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fbc80 // fmla za.h[x9, 0], { z4.h-z7.h }, z15.h[6]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbd01 // fmla za.h[x9, 1], { z8.h-z11.h }, z15.h[6]\n"
+ ".inst 0xc11fbe82 // fmla za.h[x9, 2], { z20.h-z23.h }, z15.h[6]\n"
+ ".inst 0xc11fbe03 // fmla za.h[x9, 3], { z16.h-z19.h }, z15.h[6]\n"
+ "ble 33f\n"
+ ".inst 0xa040a779 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041a77d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042a765 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043a775 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc11fbf08 // fmla za.h[x9, 0], { z24.h-z27.h }, z15.h[7]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc11fbf89 // fmla za.h[x9, 1], { z28.h-z31.h }, z15.h[7]\n"
+ ".inst 0xc11fbc8a // fmla za.h[x9, 2], { z4.h-z7.h }, z15.h[7]\n"
+ ".inst 0xc11fbe8b // fmla za.h[x9, 3], { z20.h-z23.h }, z15.h[7]\n"
+ "33:" // Width 4: Multiply loop: multiply skip
+ "tbz %x[flags], #1, 34f\n"
+ "add x21, %x[args_ptr], %[offset_min]\n"
+ "add x20, %x[args_ptr], %[offset_max]\n"
+ ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ "ld1rh { z17.h }, p1/Z, [x21]\n"
+ ".inst 0xc0062c40 // mova { z0.d-z3.d }, za.d[x9, #2]\n"
+ "ld1rh { z16.h }, p1/Z, [x20]\n"
+ ".inst 0xc0062c68 // mova { z8.d-z11.d }, za.d[x9, #3]\n"
+ ".inst 0xc170ca24 // fclamp { z4.h-z7.h }, z17.h, z16.h\n"
+ ".inst 0xc170ca2c // fclamp { z12.h-z15.h }, z17.h, z16.h\n"
+ ".inst 0xc170ca20 // fclamp { z0.h-z3.h }, z17.h, z16.h\n"
+ ".inst 0xa060a724 // st1h { z4.h-z7.h }, pn9.b, [x25]\n"
+ ".inst 0xa061a72c // st1h { z12.h-z15.h }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc170ca28 // fclamp { z8.h-z11.h }, z17.h, z16.h\n"
+ ".inst 0xa062a720 // st1h { z0.h-z3.h }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa063a328 // st1h { z8.h-z11.h }, p8, [x25, #0xc, MUL VL]\n"
+ "addvl x25, x25, #16\n"
+ "b 35f\n"
+ "34:" // Width 4: No activation
+ ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+ ".inst 0xc0062c60 // mova { z0.d-z3.d }, za.d[x9, #3]\n"
+ ".inst 0xa060a728 // st1h { z8.h-z11.h }, pn9.b, [x25]\n"
+ ".inst 0xa061a72c // st1h { z12.h-z15.h }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062a730 // st1h { z16.h-z19.h }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa063a320 // st1h { z0.h-z3.h }, p8, [x25, #0xc, MUL VL]\n"
+ "addvl x25, x25, #16\n"
+ "35:" // Width 4: Output done
+ "subs x26, x26, #0x4\n"
+ "sub %x[N], %x[N], x28, LSL #2\n"
+ "bgt 4b\n"
+ "36:" // Exit
+ ".inst 0xd503467f // SMSTOP\n"
+ "ptrue p8.b\n"
+ : [N] "+&r" (N)
+ : [A_ptr] "r" (A_ptr), [B_ptr] "r" (B_ptr), [K] "r" (K), [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [output_ptr] "r" (output_ptr)
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+
+#endif // defined(ARM_COMPUTE_ENABLE_SME2)
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
index d2c260536d..aec02fa337 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32_mla_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -63,21 +63,21 @@ void sme2_gemv_fp32_mla_16VL (
__asm__ __volatile__(
"ptrue p8.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
- "mul x22, x22, %x[K]\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
+ "cntw x28, ALL, MUL #4\n"
+ "mov x27, %x[B_ptr]\n"
+ "add x26, %x[N], x28\n"
"mov x25, %x[output_ptr]\n"
+ "sub x26, x26, #0x1\n"
"ptrue p1.b\n"
+ "udiv x26, x26, x28\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "lsl x22, x22, #0x2\n"
+ "add x22, x26, #0x3\n"
"mov x21, #0x1\n"
+ "and x22, x22, #0xfffffffffffffffc\n"
+ "mul x22, x22, x28\n"
+ "mul x22, x22, %x[K]\n"
+ "lsl x22, x22, #0x2\n"
"1:" // RHS size check loop
"cmp x22, #0x200000\n"
"blt 2f\n"
@@ -91,13 +91,13 @@ void sme2_gemv_fp32_mla_16VL (
"lsl x21, x21, #0x16\n"
"orr x22, x22, x20\n"
"orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ ".inst 0xf8b64b7a // rprfm pldonce, x22, [x27]\n"
"3:" // RHS prefetch exit
"mov x24, %x[bias]\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 28f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 20f\n"
"beq 12f\n"
"mov x23, %x[A_ptr]\n"
@@ -107,8 +107,8 @@ void sme2_gemv_fp32_mla_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 5f\n"
- ".inst 0xa040c718 // ld1w { z24.s-z27.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042f00 // mova za.d[x9, #0], { z24.d-z27.d }\n"
+ ".inst 0xa040c714 // ld1w { z20.s-z23.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xc0042e80 // mova za.d[x9, #0], { z20.d-z23.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -117,59 +117,59 @@ void sme2_gemv_fp32_mla_16VL (
"ble 8f\n"
"7:" // Width 1: Multiply loop: Main loop head
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z8.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "ld1rqw { z2.s }, p0/Z, [x23]\n"
"sub x22, x22, #0x4\n"
- ".inst 0xa040c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158a280 // fmla za.s[x9, 0], { z20.s-z23.s }, z8.s[0]\n"
- "addvl x26, x26, #16\n"
- "cmp x22, #0x4\n"
- ".inst 0xa040c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158a480 // fmla za.s[x9, 0], { z4.s-z7.s }, z8.s[1]\n"
- "addvl x26, x26, #16\n"
"add x23, x23, #0x10\n"
- ".inst 0xa040c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158ab80 // fmla za.s[x9, 0], { z28.s-z31.s }, z8.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158af00 // fmla za.s[x9, 0], { z24.s-z27.s }, z8.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "cmp x22, #0x4\n"
+ ".inst 0xa040c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc152a380 // fmla za.s[x9, 0], { z28.s-z31.s }, z2.s[0]\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc152a600 // fmla za.s[x9, 0], { z16.s-z19.s }, z2.s[1]\n"
+ ".inst 0xc152ab00 // fmla za.s[x9, 0], { z24.s-z27.s }, z2.s[2]\n"
+ ".inst 0xc152ad80 // fmla za.s[x9, 0], { z12.s-z15.s }, z2.s[3]\n"
"bgt 7b\n"
"8:" // Width 1: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z11.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xa040c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15ba380 // fmla za.s[x9, 0], { z28.s-z31.s }, z11.s[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a180 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[0]\n"
"ble 9f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15ba580 // fmla za.s[x9, 0], { z12.s-z15.s }, z11.s[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a480 // fmla za.s[x9, 0], { z4.s-z7.s }, z3.s[1]\n"
"ble 9f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15baa00 // fmla za.s[x9, 0], { z16.s-z19.s }, z11.s[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a980 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[2]\n"
"ble 9f\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bac00 // fmla za.s[x9, 0], { z0.s-z3.s }, z11.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153ad00 // fmla za.s[x9, 0], { z8.s-z11.s }, z3.s[3]\n"
"9:" // Width 1: Multiply loop: multiply skip
"tbz %x[flags], #1, 10f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- "ld1rw { z3.s }, p1/Z, [x21]\n"
- "ld1rw { z29.s }, p1/Z, [x20]\n"
- ".inst 0xc1bdc868 // fclamp { z8.s-z11.s }, z3.s, z29.s\n"
- ".inst 0xa060c328 // st1w { z8.s-z11.s }, p8, [x25]\n"
+ ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
+ "ld1rw { z23.s }, p1/Z, [x21]\n"
+ "ld1rw { z22.s }, p1/Z, [x20]\n"
+ ".inst 0xc1b6cae0 // fclamp { z0.s-z3.s }, z23.s, z22.s\n"
+ ".inst 0xa060c320 // st1w { z0.s-z3.s }, p8, [x25]\n"
"addvl x25, x25, #4\n"
"b 11f\n"
"10:" // Width 1: No activation
- ".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
- ".inst 0xa060c32c // st1w { z12.s-z15.s }, p8, [x25]\n"
+ ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
+ ".inst 0xa060c320 // st1w { z0.s-z3.s }, p8, [x25]\n"
"addvl x25, x25, #4\n"
"11:" // Width 1: Output done
"b 36f\n"
@@ -181,10 +181,10 @@ void sme2_gemv_fp32_mla_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 13f\n"
- ".inst 0xa040c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c714 // ld1w { z20.s-z23.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042e81 // mova za.d[x9, #1], { z20.d-z23.d }\n"
+ ".inst 0xa040c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc0042f80 // mova za.d[x9, #0], { z28.d-z31.d }\n"
+ ".inst 0xc0042c81 // mova za.d[x9, #1], { z4.d-z7.d }\n"
"b 14f\n"
"13:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -193,80 +193,80 @@ void sme2_gemv_fp32_mla_16VL (
"ble 16f\n"
"15:" // Width 2: Multiply loop: Main loop head
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z9.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27]\n"
"sub x22, x22, #0x4\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc159a180 // fmla za.s[x9, 0], { z12.s-z15.s }, z9.s[0]\n"
+ "ld1rqw { z1.s }, p0/Z, [x23]\n"
"cmp x22, #0x4\n"
"add x23, x23, #0x10\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159a081 // fmla za.s[x9, 1], { z4.s-z7.s }, z9.s[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc159a580 // fmla za.s[x9, 0], { z12.s-z15.s }, z9.s[1]\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159a481 // fmla za.s[x9, 1], { z4.s-z7.s }, z9.s[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc159ab00 // fmla za.s[x9, 0], { z24.s-z27.s }, z9.s[2]\n"
- ".inst 0xa041c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159a801 // fmla za.s[x9, 1], { z0.s-z3.s }, z9.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc159ac00 // fmla za.s[x9, 0], { z0.s-z3.s }, z9.s[3]\n"
- ".inst 0xa041c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc159af81 // fmla za.s[x9, 1], { z28.s-z31.s }, z9.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xc151a380 // fmla za.s[x9, 0], { z28.s-z31.s }, z1.s[0]\n"
+ ".inst 0xa041c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc151a181 // fmla za.s[x9, 1], { z12.s-z15.s }, z1.s[0]\n"
+ ".inst 0xa040c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xc151a600 // fmla za.s[x9, 0], { z16.s-z19.s }, z1.s[1]\n"
+ ".inst 0xa041c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc151a701 // fmla za.s[x9, 1], { z24.s-z27.s }, z1.s[1]\n"
+ ".inst 0xc151ab80 // fmla za.s[x9, 0], { z28.s-z31.s }, z1.s[2]\n"
+ ".inst 0xc151a981 // fmla za.s[x9, 1], { z12.s-z15.s }, z1.s[2]\n"
+ ".inst 0xc151ad00 // fmla za.s[x9, 0], { z8.s-z11.s }, z1.s[3]\n"
+ ".inst 0xc151ae81 // fmla za.s[x9, 1], { z20.s-z23.s }, z1.s[3]\n"
"bgt 15b\n"
"16:" // Width 2: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z11.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15ba180 // fmla za.s[x9, 0], { z12.s-z15.s }, z11.s[0]\n"
- ".inst 0xa041c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15ba001 // fmla za.s[x9, 1], { z0.s-z3.s }, z11.s[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a200 // fmla za.s[x9, 0], { z16.s-z19.s }, z3.s[0]\n"
+ ".inst 0xc153a381 // fmla za.s[x9, 1], { z28.s-z31.s }, z3.s[0]\n"
"ble 17f\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15ba700 // fmla za.s[x9, 0], { z24.s-z27.s }, z11.s[1]\n"
- ".inst 0xa041c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15ba401 // fmla za.s[x9, 1], { z0.s-z3.s }, z11.s[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a680 // fmla za.s[x9, 0], { z20.s-z23.s }, z3.s[1]\n"
+ ".inst 0xc153a601 // fmla za.s[x9, 1], { z16.s-z19.s }, z3.s[1]\n"
"ble 17f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15ba980 // fmla za.s[x9, 0], { z12.s-z15.s }, z11.s[2]\n"
- ".inst 0xa041c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bab81 // fmla za.s[x9, 1], { z28.s-z31.s }, z11.s[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a880 // fmla za.s[x9, 0], { z4.s-z7.s }, z3.s[2]\n"
+ ".inst 0xc153aa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z3.s[2]\n"
"ble 17f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z11.s[3]\n"
- ".inst 0xa041c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bae81 // fmla za.s[x9, 1], { z20.s-z23.s }, z11.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153af80 // fmla za.s[x9, 0], { z28.s-z31.s }, z3.s[3]\n"
+ ".inst 0xc153ad81 // fmla za.s[x9, 1], { z12.s-z15.s }, z3.s[3]\n"
"17:" // Width 2: Multiply loop: multiply skip
"tbz %x[flags], #1, 18f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
- "ld1rw { z9.s }, p1/Z, [x21]\n"
- ".inst 0xc0062c24 // mova { z4.d-z7.d }, za.d[x9, #1]\n"
- "ld1rw { z8.s }, p1/Z, [x20]\n"
- ".inst 0xc1a8c920 // fclamp { z0.s-z3.s }, z9.s, z8.s\n"
- ".inst 0xa060c720 // st1w { z0.s-z3.s }, pn9.b, [x25]\n"
- ".inst 0xc1a8c924 // fclamp { z4.s-z7.s }, z9.s, z8.s\n"
- ".inst 0xa061c324 // st1w { z4.s-z7.s }, p8, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c28 // mova { z8.d-z11.d }, za.d[x9, #1]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
+ "ld1rw { z23.s }, p1/Z, [x20]\n"
+ ".inst 0xc1b7ca24 // fclamp { z4.s-z7.s }, z17.s, z23.s\n"
+ ".inst 0xc1b7ca28 // fclamp { z8.s-z11.s }, z17.s, z23.s\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c328 // st1w { z8.s-z11.s }, p8, [x25, #0x4, MUL VL]\n"
"addvl x25, x25, #8\n"
"b 19f\n"
"18:" // Width 2: No activation
- ".inst 0xc0062c10 // mova { z16.d-z19.d }, za.d[x9, #0]\n"
- ".inst 0xa060c730 // st1w { z16.s-z19.s }, pn9.b, [x25]\n"
- ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
- ".inst 0xa061c32c // st1w { z12.s-z15.s }, p8, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
+ ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c330 // st1w { z16.s-z19.s }, p8, [x25, #0x4, MUL VL]\n"
"addvl x25, x25, #8\n"
"19:" // Width 2: Output done
"b 36f\n"
@@ -279,12 +279,12 @@ void sme2_gemv_fp32_mla_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 21f\n"
- ".inst 0xa040c718 // ld1w { z24.s-z27.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042f00 // mova za.d[x9, #0], { z24.d-z27.d }\n"
- ".inst 0xa041c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042c81 // mova za.d[x9, #1], { z4.d-z7.d }\n"
- ".inst 0xa042c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042e02 // mova za.d[x9, #2], { z16.d-z19.d }\n"
+ ".inst 0xa040c708 // ld1w { z8.s-z11.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xc0042d00 // mova za.d[x9, #0], { z8.d-z11.d }\n"
+ ".inst 0xc0042c01 // mova za.d[x9, #1], { z0.d-z3.d }\n"
+ ".inst 0xc0042c82 // mova za.d[x9, #2], { z4.d-z7.d }\n"
"b 22f\n"
"21:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -293,101 +293,101 @@ void sme2_gemv_fp32_mla_16VL (
"ble 24f\n"
"23:" // Width 3: Multiply loop: Main loop head
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z15.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
"sub x22, x22, #0x4\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fa000 // fmla za.s[x9, 0], { z0.s-z3.s }, z15.s[0]\n"
+ "ld1rqw { z3.s }, p0/Z, [x23]\n"
"cmp x22, #0x4\n"
"add x23, x23, #0x10\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fa081 // fmla za.s[x9, 1], { z4.s-z7.s }, z15.s[0]\n"
- ".inst 0xa042c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fa002 // fmla za.s[x9, 2], { z0.s-z3.s }, z15.s[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fa680 // fmla za.s[x9, 0], { z20.s-z23.s }, z15.s[1]\n"
- ".inst 0xa041c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fa681 // fmla za.s[x9, 1], { z20.s-z23.s }, z15.s[1]\n"
- ".inst 0xa042c749 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fa502 // fmla za.s[x9, 2], { z8.s-z11.s }, z15.s[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fab00 // fmla za.s[x9, 0], { z24.s-z27.s }, z15.s[2]\n"
- ".inst 0xa041c749 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fa901 // fmla za.s[x9, 1], { z8.s-z11.s }, z15.s[2]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15faa02 // fmla za.s[x9, 2], { z16.s-z19.s }, z15.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z15.s[3]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fae01 // fmla za.s[x9, 1], { z16.s-z19.s }, z15.s[3]\n"
- ".inst 0xa042c749 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fad02 // fmla za.s[x9, 2], { z8.s-z11.s }, z15.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a180 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[0]\n"
+ ".inst 0xa040c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153a101 // fmla za.s[x9, 1], { z8.s-z11.s }, z3.s[0]\n"
+ ".inst 0xa041c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153a082 // fmla za.s[x9, 2], { z4.s-z7.s }, z3.s[0]\n"
+ ".inst 0xa042c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153a600 // fmla za.s[x9, 0], { z16.s-z19.s }, z3.s[1]\n"
+ ".inst 0xa041c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153a681 // fmla za.s[x9, 1], { z20.s-z23.s }, z3.s[1]\n"
+ ".inst 0xa042c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a702 // fmla za.s[x9, 2], { z24.s-z27.s }, z3.s[1]\n"
+ ".inst 0xa040c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153a980 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[2]\n"
+ ".inst 0xa042c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153ab81 // fmla za.s[x9, 1], { z28.s-z31.s }, z3.s[2]\n"
+ ".inst 0xc153a902 // fmla za.s[x9, 2], { z8.s-z11.s }, z3.s[2]\n"
+ ".inst 0xc153ac80 // fmla za.s[x9, 0], { z4.s-z7.s }, z3.s[3]\n"
+ ".inst 0xc153ae81 // fmla za.s[x9, 1], { z20.s-z23.s }, z3.s[3]\n"
+ ".inst 0xc153af02 // fmla za.s[x9, 2], { z24.s-z27.s }, z3.s[3]\n"
"bgt 23b\n"
"24:" // Width 3: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z11.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xa040c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15ba380 // fmla za.s[x9, 0], { z28.s-z31.s }, z11.s[0]\n"
- ".inst 0xa041c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15ba001 // fmla za.s[x9, 1], { z0.s-z3.s }, z11.s[0]\n"
- ".inst 0xa042c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15ba282 // fmla za.s[x9, 2], { z20.s-z23.s }, z11.s[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a280 // fmla za.s[x9, 0], { z20.s-z23.s }, z3.s[0]\n"
+ ".inst 0xc153a181 // fmla za.s[x9, 1], { z12.s-z15.s }, z3.s[0]\n"
+ ".inst 0xc153a082 // fmla za.s[x9, 2], { z4.s-z7.s }, z3.s[0]\n"
"ble 25f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15ba580 // fmla za.s[x9, 0], { z12.s-z15.s }, z11.s[1]\n"
- ".inst 0xa041c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15ba481 // fmla za.s[x9, 1], { z4.s-z7.s }, z11.s[1]\n"
- ".inst 0xa042c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15ba782 // fmla za.s[x9, 2], { z28.s-z31.s }, z11.s[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a680 // fmla za.s[x9, 0], { z20.s-z23.s }, z3.s[1]\n"
+ ".inst 0xc153a501 // fmla za.s[x9, 1], { z8.s-z11.s }, z3.s[1]\n"
+ ".inst 0xc153a602 // fmla za.s[x9, 2], { z16.s-z19.s }, z3.s[1]\n"
"ble 25f\n"
- ".inst 0xa040c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15ba880 // fmla za.s[x9, 0], { z4.s-z7.s }, z11.s[2]\n"
- ".inst 0xa041c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15baa81 // fmla za.s[x9, 1], { z20.s-z23.s }, z11.s[2]\n"
- ".inst 0xa042c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15ba982 // fmla za.s[x9, 2], { z12.s-z15.s }, z11.s[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153ab80 // fmla za.s[x9, 0], { z28.s-z31.s }, z3.s[2]\n"
+ ".inst 0xc153ab01 // fmla za.s[x9, 1], { z24.s-z27.s }, z3.s[2]\n"
+ ".inst 0xc153a982 // fmla za.s[x9, 2], { z12.s-z15.s }, z3.s[2]\n"
"ble 25f\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bad80 // fmla za.s[x9, 0], { z12.s-z15.s }, z11.s[3]\n"
- ".inst 0xa041c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15bae81 // fmla za.s[x9, 1], { z20.s-z23.s }, z11.s[3]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bae02 // fmla za.s[x9, 2], { z16.s-z19.s }, z11.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153ad00 // fmla za.s[x9, 0], { z8.s-z11.s }, z3.s[3]\n"
+ ".inst 0xc153af81 // fmla za.s[x9, 1], { z28.s-z31.s }, z3.s[3]\n"
+ ".inst 0xc153ad82 // fmla za.s[x9, 2], { z12.s-z15.s }, z3.s[3]\n"
"25:" // Width 3: Multiply loop: multiply skip
"tbz %x[flags], #1, 26f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
- "ld1rw { z17.s }, p1/Z, [x21]\n"
- ".inst 0xc0062c28 // mova { z8.d-z11.d }, za.d[x9, #1]\n"
- "ld1rw { z16.s }, p1/Z, [x20]\n"
- ".inst 0xc1b0ca24 // fclamp { z4.s-z7.s }, z17.s, z16.s\n"
- ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
- ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
- ".inst 0xc1b0ca28 // fclamp { z8.s-z11.s }, z17.s, z16.s\n"
- ".inst 0xa061c728 // st1w { z8.s-z11.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc1b0ca2c // fclamp { z12.s-z15.s }, z17.s, z16.s\n"
- ".inst 0xa062c32c // st1w { z12.s-z15.s }, p8, [x25, #0x8, MUL VL]\n"
+ ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ "ld1rw { z21.s }, p1/Z, [x21]\n"
+ ".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+ "ld1rw { z20.s }, p1/Z, [x20]\n"
+ ".inst 0xc1b4caa8 // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c330 // st1w { z16.s-z19.s }, p8, [x25, #0x8, MUL VL]\n"
"addvl x25, x25, #12\n"
"b 27f\n"
"26:" // Width 3: No activation
- ".inst 0xc0062c14 // mova { z20.d-z23.d }, za.d[x9, #0]\n"
- ".inst 0xa060c734 // st1w { z20.s-z23.s }, pn9.b, [x25]\n"
+ ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c5c // mova { z28.d-z31.d }, za.d[x9, #2]\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
".inst 0xa061c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
- ".inst 0xa062c32c // st1w { z12.s-z15.s }, p8, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa062c33c // st1w { z28.s-z31.s }, p8, [x25, #0x8, MUL VL]\n"
"addvl x25, x25, #12\n"
"27:" // Width 3: Output done
"b 36f\n"
@@ -401,14 +401,14 @@ void sme2_gemv_fp32_mla_16VL (
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 29f\n"
".inst 0xa040c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
- ".inst 0xa041c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
- ".inst 0xa042c70c // ld1w { z12.s-z15.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042d82 // mova za.d[x9, #2], { z12.d-z15.d }\n"
+ ".inst 0xa041c70c // ld1w { z12.s-z15.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
".inst 0xa043c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
- ".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
+ ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
"addvl x24, x24, #16\n"
+ ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
+ ".inst 0xc0042f82 // mova za.d[x9, #2], { z28.d-z31.d }\n"
+ ".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
"b 30f\n"
"29:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -417,125 +417,125 @@ void sme2_gemv_fp32_mla_16VL (
"ble 32f\n"
"31:" // Width 4: Multiply loop: Main loop head
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z8.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
"sub x22, x22, #0x4\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158a200 // fmla za.s[x9, 0], { z16.s-z19.s }, z8.s[0]\n"
+ "ld1rqw { z3.s }, p0/Z, [x23]\n"
"cmp x22, #0x4\n"
"add x23, x23, #0x10\n"
- ".inst 0xa041c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158a181 // fmla za.s[x9, 1], { z12.s-z15.s }, z8.s[0]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158a202 // fmla za.s[x9, 2], { z16.s-z19.s }, z8.s[0]\n"
- ".inst 0xa043c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158a183 // fmla za.s[x9, 3], { z12.s-z15.s }, z8.s[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c74d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158a580 // fmla za.s[x9, 0], { z12.s-z15.s }, z8.s[1]\n"
- ".inst 0xa041c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158a681 // fmla za.s[x9, 1], { z20.s-z23.s }, z8.s[1]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158a602 // fmla za.s[x9, 2], { z16.s-z19.s }, z8.s[1]\n"
- ".inst 0xa043c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158a683 // fmla za.s[x9, 3], { z20.s-z23.s }, z8.s[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158a880 // fmla za.s[x9, 0], { z4.s-z7.s }, z8.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158aa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z8.s[2]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158aa02 // fmla za.s[x9, 2], { z16.s-z19.s }, z8.s[2]\n"
- ".inst 0xa043c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158a803 // fmla za.s[x9, 3], { z0.s-z3.s }, z8.s[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc158ae80 // fmla za.s[x9, 0], { z20.s-z23.s }, z8.s[3]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc158ae01 // fmla za.s[x9, 1], { z16.s-z19.s }, z8.s[3]\n"
- ".inst 0xa042c745 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc158ac82 // fmla za.s[x9, 2], { z4.s-z7.s }, z8.s[3]\n"
- ".inst 0xa043c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc158ae83 // fmla za.s[x9, 3], { z20.s-z23.s }, z8.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153a180 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a281 // fmla za.s[x9, 1], { z20.s-z23.s }, z3.s[0]\n"
+ ".inst 0xa040c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153a202 // fmla za.s[x9, 2], { z16.s-z19.s }, z3.s[0]\n"
+ ".inst 0xa041c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153a103 // fmla za.s[x9, 3], { z8.s-z11.s }, z3.s[0]\n"
+ ".inst 0xa042c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153a700 // fmla za.s[x9, 0], { z24.s-z27.s }, z3.s[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a581 // fmla za.s[x9, 1], { z12.s-z15.s }, z3.s[1]\n"
+ ".inst 0xa040c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153a502 // fmla za.s[x9, 2], { z8.s-z11.s }, z3.s[1]\n"
+ ".inst 0xa041c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153a483 // fmla za.s[x9, 3], { z4.s-z7.s }, z3.s[1]\n"
+ ".inst 0xa042c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153ab00 // fmla za.s[x9, 0], { z24.s-z27.s }, z3.s[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a901 // fmla za.s[x9, 1], { z8.s-z11.s }, z3.s[2]\n"
+ ".inst 0xa040c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xc153aa02 // fmla za.s[x9, 2], { z16.s-z19.s }, z3.s[2]\n"
+ ".inst 0xa041c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc153a883 // fmla za.s[x9, 3], { z4.s-z7.s }, z3.s[2]\n"
+ ".inst 0xa042c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153ad00 // fmla za.s[x9, 0], { z8.s-z11.s }, z3.s[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153af81 // fmla za.s[x9, 1], { z28.s-z31.s }, z3.s[3]\n"
+ ".inst 0xc153ad82 // fmla za.s[x9, 2], { z12.s-z15.s }, z3.s[3]\n"
+ ".inst 0xc153ae83 // fmla za.s[x9, 3], { z20.s-z23.s }, z3.s[3]\n"
"bgt 31b\n"
"32:" // Width 4: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x22\n"
- "ld1rqw { z11.s }, p0/Z, [x23]\n"
+ ".inst 0xa040c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc15ba200 // fmla za.s[x9, 0], { z16.s-z19.s }, z11.s[0]\n"
- ".inst 0xa041c755 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15ba281 // fmla za.s[x9, 1], { z20.s-z23.s }, z11.s[0]\n"
- ".inst 0xa042c75d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15ba382 // fmla za.s[x9, 2], { z28.s-z31.s }, z11.s[0]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15ba203 // fmla za.s[x9, 3], { z16.s-z19.s }, z11.s[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c77d // ldnt1w { z28.s-z31.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153a200 // fmla za.s[x9, 0], { z16.s-z19.s }, z3.s[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a181 // fmla za.s[x9, 1], { z12.s-z15.s }, z3.s[0]\n"
+ ".inst 0xc153a382 // fmla za.s[x9, 2], { z28.s-z31.s }, z3.s[0]\n"
+ ".inst 0xc153a283 // fmla za.s[x9, 3], { z20.s-z23.s }, z3.s[0]\n"
"ble 33f\n"
- ".inst 0xa040c741 // ldnt1w { z0.s-z3.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15ba400 // fmla za.s[x9, 0], { z0.s-z3.s }, z11.s[1]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15ba601 // fmla za.s[x9, 1], { z16.s-z19.s }, z11.s[1]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15ba602 // fmla za.s[x9, 2], { z16.s-z19.s }, z11.s[1]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15ba603 // fmla za.s[x9, 3], { z16.s-z19.s }, z11.s[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c765 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c779 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153a580 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a481 // fmla za.s[x9, 1], { z4.s-z7.s }, z3.s[1]\n"
+ ".inst 0xc153a702 // fmla za.s[x9, 2], { z24.s-z27.s }, z3.s[1]\n"
+ ".inst 0xc153a683 // fmla za.s[x9, 3], { z20.s-z23.s }, z3.s[1]\n"
"ble 33f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
"subs x22, x22, #0x1\n"
- ".inst 0xc15baa00 // fmla za.s[x9, 0], { z16.s-z19.s }, z11.s[2]\n"
- ".inst 0xa041c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15baa01 // fmla za.s[x9, 1], { z16.s-z19.s }, z11.s[2]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15baa02 // fmla za.s[x9, 2], { z16.s-z19.s }, z11.s[2]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15baa03 // fmla za.s[x9, 3], { z16.s-z19.s }, z11.s[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa041c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153a980 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153a901 // fmla za.s[x9, 1], { z8.s-z11.s }, z3.s[2]\n"
+ ".inst 0xc153aa82 // fmla za.s[x9, 2], { z20.s-z23.s }, z3.s[2]\n"
+ ".inst 0xc153aa03 // fmla za.s[x9, 3], { z16.s-z19.s }, z3.s[2]\n"
"ble 33f\n"
- ".inst 0xa040c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26]\n"
- ".inst 0xc15bae00 // fmla za.s[x9, 0], { z16.s-z19.s }, z11.s[3]\n"
- ".inst 0xa041c759 // ldnt1w { z24.s-z27.s }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15baf01 // fmla za.s[x9, 1], { z24.s-z27.s }, z11.s[3]\n"
- ".inst 0xa042c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15bae02 // fmla za.s[x9, 2], { z16.s-z19.s }, z11.s[3]\n"
- ".inst 0xa043c751 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15bae03 // fmla za.s[x9, 3], { z16.s-z19.s }, z11.s[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa040c76d // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa041c769 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042c775 // ldnt1w { z20.s-z23.s }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043c771 // ldnt1w { z16.s-z19.s }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc153ad80 // fmla za.s[x9, 0], { z12.s-z15.s }, z3.s[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc153ad01 // fmla za.s[x9, 1], { z8.s-z11.s }, z3.s[3]\n"
+ ".inst 0xc153ae82 // fmla za.s[x9, 2], { z20.s-z23.s }, z3.s[3]\n"
+ ".inst 0xc153ae03 // fmla za.s[x9, 3], { z16.s-z19.s }, z3.s[3]\n"
"33:" // Width 4: Multiply loop: multiply skip
"tbz %x[flags], #1, 34f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
"ld1rw { z21.s }, p1/Z, [x21]\n"
- ".inst 0xc0062c38 // mova { z24.d-z27.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
"ld1rw { z20.s }, p1/Z, [x20]\n"
- ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
- ".inst 0xc0062c40 // mova { z0.d-z3.d }, za.d[x9, #2]\n"
- ".inst 0xa060c72c // st1w { z12.s-z15.s }, pn9.b, [x25]\n"
- ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
".inst 0xc0062c70 // mova { z16.d-z19.d }, za.d[x9, #3]\n"
- ".inst 0xa061c738 // st1w { z24.s-z27.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc1b4caa4 // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
- ".inst 0xa062c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x8, MUL VL]\n"
+ ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xa063c330 // st1w { z16.s-z19.s }, p8, [x25, #0xc, MUL VL]\n"
"addvl x25, x25, #16\n"
"b 35f\n"
"34:" // Width 4: No activation
".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
- ".inst 0xa060c72c // st1w { z12.s-z15.s }, pn9.b, [x25]\n"
- ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
- ".inst 0xa061c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
- ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xc0062c64 // mova { z4.d-z7.d }, za.d[x9, #3]\n"
+ ".inst 0xa060c72c // st1w { z12.s-z15.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xa063c324 // st1w { z4.s-z7.s }, p8, [x25, #0xc, MUL VL]\n"
"addvl x25, x25, #16\n"
"35:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sub %x[N], %x[N], x28, LSL #2\n"
"bgt 4b\n"
"36:" // Exit
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
index c6fa11016f..7ed04da9df 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_fp32bf16fp32_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,22 +64,22 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
__asm__ __volatile__(
"ptrue p8.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x10, ALL, MUL #4\n"
- "add x28, %x[N], x10\n"
- "sub x28, x28, #0x1\n"
- "udiv x28, x28, x10\n"
- "add x22, x28, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x10\n"
- "mul x22, x22, %x[K]\n"
- "mov x9, #0x0\n"
- "mov x27, #0x4\n"
+ "mov x10, #0x0\n"
+ "cntw x9, ALL, MUL #4\n"
+ "mov x28, #0x4\n"
+ "add x27, %x[N], x9\n"
"mov x26, %x[B_ptr]\n"
+ "sub x27, x27, #0x1\n"
"mov x25, %x[output_ptr]\n"
+ "udiv x27, x27, x9\n"
"ptrue p2.b\n"
+ "add x22, x27, #0x3\n"
".inst 0x25207811 // ptrue pn9.b\n"
- "lsl x22, x22, #0x1\n"
+ "and x22, x22, #0xfffffffffffffffc\n"
"mov x21, #0x1\n"
+ "mul x22, x22, x9\n"
+ "mul x22, x22, %x[K]\n"
+ "lsl x22, x22, #0x1\n"
"1:" // RHS size check loop
"cmp x22, #0x200000\n"
"blt 2f\n"
@@ -97,9 +97,9 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"3:" // RHS prefetch exit
"mov x24, %x[bias]\n"
"4:" // Column loop
- "cmp x28, #0x4\n"
+ "cmp x27, #0x4\n"
"bge 28f\n"
- "cmp x28, #0x2\n"
+ "cmp x27, #0x2\n"
"bgt 20f\n"
"beq 12f\n"
"mov x23, %x[A_ptr]\n"
@@ -110,7 +110,7 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 5f\n"
".inst 0xa040c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042e00 // mova za.d[x9, #0], { z16.d-z19.d }\n"
+ ".inst 0xc0044e00 // mova za.d[x10, #0], { z16.d-z19.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -119,72 +119,72 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"ble 8f\n"
"7:" // Width 1: Multiply loop: Main loop head
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z10.s }, p1/Z, [x23]\n"
- ".inst 0x658aa94a // bfcvt z10.h, p2/M, z10.s\n"
- "ld1rqw { z16.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
- "uzp1 z10.h, z10.h, z10.h\n"
- "sub x22, x22, #0x8\n"
- "uzp1 z16.h, z16.h, z16.h\n"
- "trn1 z10.d, z10.d, z16.d\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xc15ab198 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[0]\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "whilelt p0.s, x28, x22\n"
+ ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z8.s }, p1/Z, [x23]\n"
"addvl x26, x26, #16\n"
+ "sub x22, x22, #0x8\n"
+ "ld1rqw { z11.s }, p0/Z, [x23, #16]\n"
"cmp x22, #0x8\n"
- ".inst 0xc15ab598 // bfdot za.s[x9, 0], { z12.h-z15.h }, z10.h[1]\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- "addvl x26, x26, #16\n"
"add x23, x23, #0x20\n"
- ".inst 0xc15ab818 // bfdot za.s[x9, 0], { z0.h-z3.h }, z10.h[2]\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
+ "addvl x26, x26, #16\n"
+ ".inst 0x658aa908 // bfcvt z8.h, p2/M, z8.s\n"
+ ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "addvl x26, x26, #16\n"
+ ".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15abf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z10.h[3]\n"
+ "uzp1 z8.h, z8.h, z8.h\n"
+ "uzp1 z11.h, z11.h, z11.h\n"
+ "trn1 z8.d, z8.d, z11.d\n"
+ ".inst 0xc158d098 // bfdot za.s[x10, 0], { z4.h-z7.h }, z8.h[0]\n"
+ ".inst 0xc158d698 // bfdot za.s[x10, 0], { z20.h-z23.h }, z8.h[1]\n"
+ ".inst 0xc158da18 // bfdot za.s[x10, 0], { z16.h-z19.h }, z8.h[2]\n"
+ ".inst 0xc158dd98 // bfdot za.s[x10, 0], { z12.h-z15.h }, z8.h[3]\n"
"bgt 7b\n"
"8:" // Width 1: Multiply loop: Single iteration only
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z15.s }, p1/Z, [x23]\n"
- ".inst 0x658aa9ef // bfcvt z15.h, p2/M, z15.s\n"
- "ld1rqw { z17.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aaa31 // bfcvt z17.h, p2/M, z17.s\n"
- "uzp1 z15.h, z15.h, z15.h\n"
+ "whilelt p0.s, x28, x22\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p1/Z, [x23]\n"
"subs x22, x22, #0x2\n"
- "uzp1 z17.h, z17.h, z17.h\n"
- "trn1 z15.d, z15.d, z17.d\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- "add x23, x23, #0x20\n"
- ".inst 0xc15fb218 // bfdot za.s[x9, 0], { z16.h-z19.h }, z15.h[0]\n"
"addvl x26, x26, #16\n"
+ "ld1rqw { z24.s }, p0/Z, [x23, #16]\n"
+ "add x23, x23, #0x20\n"
+ ".inst 0x658aa863 // bfcvt z3.h, p2/M, z3.s\n"
+ ".inst 0x658aab18 // bfcvt z24.h, p2/M, z24.s\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z24.h, z24.h, z24.h\n"
+ "trn1 z3.d, z3.d, z24.d\n"
+ ".inst 0xc153d198 // bfdot za.s[x10, 0], { z12.h-z15.h }, z3.h[0]\n"
"ble 9f\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fb418 // bfdot za.s[x9, 0], { z0.h-z3.h }, z15.h[1]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153d598 // bfdot za.s[x10, 0], { z12.h-z15.h }, z3.h[1]\n"
"ble 9f\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fb898 // bfdot za.s[x9, 0], { z4.h-z7.h }, z15.h[2]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153d998 // bfdot za.s[x10, 0], { z12.h-z15.h }, z3.h[2]\n"
"ble 9f\n"
- ".inst 0xa040a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fbd18 // bfdot za.s[x9, 0], { z8.h-z11.h }, z15.h[3]\n"
+ ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153df98 // bfdot za.s[x10, 0], { z28.h-z31.h }, z3.h[3]\n"
"9:" // Width 1: Multiply loop: multiply skip
"tbz %x[flags], #1, 10f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
- "ld1rw { z8.s }, p2/Z, [x21]\n"
- "ld1rw { z26.s }, p2/Z, [x20]\n"
- ".inst 0xc1bac900 // fclamp { z0.s-z3.s }, z8.s, z26.s\n"
- ".inst 0xa060c320 // st1w { z0.s-z3.s }, p8, [x25]\n"
+ ".inst 0xc0064c1c // mova { z28.d-z31.d }, za.d[x10, #0]\n"
+ "ld1rw { z4.s }, p2/Z, [x21]\n"
+ "ld1rw { z18.s }, p2/Z, [x20]\n"
+ ".inst 0xc1b2c89c // fclamp { z28.s-z31.s }, z4.s, z18.s\n"
+ ".inst 0xa060c33c // st1w { z28.s-z31.s }, p8, [x25]\n"
"addvl x25, x25, #4\n"
"b 11f\n"
"10:" // Width 1: No activation
- ".inst 0xc0062c04 // mova { z4.d-z7.d }, za.d[x9, #0]\n"
+ ".inst 0xc0064c04 // mova { z4.d-z7.d }, za.d[x10, #0]\n"
".inst 0xa060c324 // st1w { z4.s-z7.s }, p8, [x25]\n"
"addvl x25, x25, #4\n"
"11:" // Width 1: Output done
@@ -192,15 +192,15 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"12:" // Width 2
"mov x23, %x[A_ptr]\n"
"lsl x21, %x[K], #0x2\n"
- "sub x20, %x[N], x10\n"
+ "sub x20, %x[N], x9\n"
"mov x22, %x[K]\n"
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 13f\n"
- ".inst 0xa040c718 // ld1w { z24.s-z27.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042f00 // mova za.d[x9, #0], { z24.d-z27.d }\n"
- ".inst 0xa041c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
+ ".inst 0xa040c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc0044e00 // mova za.d[x10, #0], { z16.d-z19.d }\n"
+ ".inst 0xc0044c01 // mova za.d[x10, #1], { z0.d-z3.d }\n"
"b 14f\n"
"13:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -209,94 +209,94 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"ble 16f\n"
"15:" // Width 2: Multiply loop: Main loop head
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z13.s }, p1/Z, [x23]\n"
- ".inst 0x658aa9ad // bfcvt z13.h, p2/M, z13.s\n"
- "ld1rqw { z27.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aab7b // bfcvt z27.h, p2/M, z27.s\n"
- "uzp1 z13.h, z13.h, z13.h\n"
- "sub x22, x22, #0x8\n"
- "uzp1 z27.h, z27.h, z27.h\n"
- "trn1 z13.d, z13.d, z27.d\n"
+ "whilelt p0.s, x28, x22\n"
".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z8.s }, p1/Z, [x23]\n"
+ "sub x22, x22, #0x8\n"
+ "ld1rqw { z9.s }, p0/Z, [x23, #16]\n"
"cmp x22, #0x8\n"
- ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15db298 // bfdot za.s[x9, 0], { z20.h-z23.h }, z13.h[0]\n"
- "addvl x26, x26, #16\n"
"add x23, x23, #0x20\n"
- ".inst 0xc15db019 // bfdot za.s[x9, 1], { z0.h-z3.h }, z13.h[0]\n"
- ".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15db698 // bfdot za.s[x9, 0], { z20.h-z23.h }, z13.h[1]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15db719 // bfdot za.s[x9, 1], { z24.h-z27.h }, z13.h[1]\n"
- ".inst 0xa040a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26]\n"
+ ".inst 0x658aa908 // bfcvt z8.h, p2/M, z8.s\n"
+ ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ ".inst 0x658aa929 // bfcvt z9.h, p2/M, z9.s\n"
".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15db918 // bfdot za.s[x9, 0], { z8.h-z11.h }, z13.h[2]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15dba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z13.h[2]\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15dbc18 // bfdot za.s[x9, 0], { z0.h-z3.h }, z13.h[3]\n"
+ ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15dbc99 // bfdot za.s[x9, 1], { z4.h-z7.h }, z13.h[3]\n"
+ "uzp1 z8.h, z8.h, z8.h\n"
+ ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ "uzp1 z9.h, z9.h, z9.h\n"
+ "trn1 z8.d, z8.d, z9.d\n"
+ ".inst 0xc158d298 // bfdot za.s[x10, 0], { z20.h-z23.h }, z8.h[0]\n"
+ ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ ".inst 0xc158d319 // bfdot za.s[x10, 1], { z24.h-z27.h }, z8.h[0]\n"
+ ".inst 0xc158d418 // bfdot za.s[x10, 0], { z0.h-z3.h }, z8.h[1]\n"
+ ".inst 0xc158d619 // bfdot za.s[x10, 1], { z16.h-z19.h }, z8.h[1]\n"
+ ".inst 0xc158d898 // bfdot za.s[x10, 0], { z4.h-z7.h }, z8.h[2]\n"
+ ".inst 0xc158d999 // bfdot za.s[x10, 1], { z12.h-z15.h }, z8.h[2]\n"
+ ".inst 0xc158df98 // bfdot za.s[x10, 0], { z28.h-z31.h }, z8.h[3]\n"
+ ".inst 0xc158de99 // bfdot za.s[x10, 1], { z20.h-z23.h }, z8.h[3]\n"
"bgt 15b\n"
"16:" // Width 2: Multiply loop: Single iteration only
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z15.s }, p1/Z, [x23]\n"
- ".inst 0x658aa9ef // bfcvt z15.h, p2/M, z15.s\n"
- "ld1rqw { z5.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aa8a5 // bfcvt z5.h, p2/M, z5.s\n"
- "uzp1 z15.h, z15.h, z15.h\n"
+ "whilelt p0.s, x28, x22\n"
+ ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p1/Z, [x23]\n"
"subs x22, x22, #0x2\n"
- "uzp1 z5.h, z5.h, z5.h\n"
- "trn1 z15.d, z15.d, z5.d\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z16.s }, p0/Z, [x23, #16]\n"
"add x23, x23, #0x20\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb218 // bfdot za.s[x9, 0], { z16.h-z19.h }, z15.h[0]\n"
+ ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15fb319 // bfdot za.s[x9, 1], { z24.h-z27.h }, z15.h[0]\n"
+ ".inst 0x658aa863 // bfcvt z3.h, p2/M, z3.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z16.h, z16.h, z16.h\n"
+ "trn1 z3.d, z3.d, z16.d\n"
+ ".inst 0xc153d318 // bfdot za.s[x10, 0], { z24.h-z27.h }, z3.h[0]\n"
+ ".inst 0xc153d099 // bfdot za.s[x10, 1], { z4.h-z7.h }, z3.h[0]\n"
"ble 17f\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fb798 // bfdot za.s[x9, 0], { z28.h-z31.h }, z15.h[1]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z15.h[1]\n"
+ ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153d498 // bfdot za.s[x10, 0], { z4.h-z7.h }, z3.h[1]\n"
+ ".inst 0xc153d719 // bfdot za.s[x10, 1], { z24.h-z27.h }, z3.h[1]\n"
"ble 17f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z15.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z15.h[2]\n"
+ ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153d898 // bfdot za.s[x10, 0], { z4.h-z7.h }, z3.h[2]\n"
+ ".inst 0xc153d999 // bfdot za.s[x10, 1], { z12.h-z15.h }, z3.h[2]\n"
"ble 17f\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fbf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z15.h[3]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fbd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z15.h[3]\n"
+ ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153dc98 // bfdot za.s[x10, 0], { z4.h-z7.h }, z3.h[3]\n"
+ ".inst 0xc153de99 // bfdot za.s[x10, 1], { z20.h-z23.h }, z3.h[3]\n"
"17:" // Width 2: Multiply loop: multiply skip
"tbz %x[flags], #1, 18f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c14 // mova { z20.d-z23.d }, za.d[x9, #0]\n"
- "ld1rw { z11.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc0064c08 // mova { z8.d-z11.d }, za.d[x10, #0]\n"
+ ".inst 0xc0064c2c // mova { z12.d-z15.d }, za.d[x10, #1]\n"
+ "ld1rw { z3.s }, p2/Z, [x21]\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
- ".inst 0xc1bcc974 // fclamp { z20.s-z23.s }, z11.s, z28.s\n"
- ".inst 0xa060c734 // st1w { z20.s-z23.s }, pn9.b, [x25]\n"
- ".inst 0xc1bcc96c // fclamp { z12.s-z15.s }, z11.s, z28.s\n"
+ ".inst 0xc1bcc868 // fclamp { z8.s-z11.s }, z3.s, z28.s\n"
+ ".inst 0xc1bcc86c // fclamp { z12.s-z15.s }, z3.s, z28.s\n"
+ ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
".inst 0xa061c32c // st1w { z12.s-z15.s }, p8, [x25, #0x4, MUL VL]\n"
"addvl x25, x25, #8\n"
"b 19f\n"
"18:" // Width 2: No activation
- ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
- ".inst 0xa060c720 // st1w { z0.s-z3.s }, pn9.b, [x25]\n"
- ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
- ".inst 0xa061c320 // st1w { z0.s-z3.s }, p8, [x25, #0x4, MUL VL]\n"
+ ".inst 0xc0064c04 // mova { z4.d-z7.d }, za.d[x10, #0]\n"
+ ".inst 0xc0064c38 // mova { z24.d-z27.d }, za.d[x10, #1]\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c338 // st1w { z24.s-z27.s }, p8, [x25, #0x4, MUL VL]\n"
"addvl x25, x25, #8\n"
"19:" // Width 2: Output done
"b 36f\n"
@@ -304,17 +304,17 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"mov x20, #0x2\n"
"mov x23, %x[A_ptr]\n"
"lsl x21, %x[K], #0x2\n"
- "msub x20, x10, x20, %x[N]\n"
+ "msub x20, x9, x20, %x[N]\n"
"mov x22, %x[K]\n"
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 21f\n"
- ".inst 0xa040c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042f80 // mova za.d[x9, #0], { z28.d-z31.d }\n"
- ".inst 0xa041c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042c81 // mova za.d[x9, #1], { z4.d-z7.d }\n"
- ".inst 0xa042c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042c82 // mova za.d[x9, #2], { z4.d-z7.d }\n"
+ ".inst 0xa040c700 // ld1w { z0.s-z3.s }, pn9.b/Z, [x24]\n"
+ ".inst 0xa041c718 // ld1w { z24.s-z27.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c71c // ld1w { z28.s-z31.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xc0044c00 // mova za.d[x10, #0], { z0.d-z3.d }\n"
+ ".inst 0xc0044f01 // mova za.d[x10, #1], { z24.d-z27.d }\n"
+ ".inst 0xc0044f82 // mova za.d[x10, #2], { z28.d-z31.d }\n"
"b 22f\n"
"21:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -323,114 +323,114 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"ble 24f\n"
"23:" // Width 3: Multiply loop: Main loop head
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z14.s }, p1/Z, [x23]\n"
- ".inst 0x658aa9ce // bfcvt z14.h, p2/M, z14.s\n"
- "ld1rqw { z16.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
- "uzp1 z14.h, z14.h, z14.h\n"
+ "whilelt p0.s, x28, x22\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p1/Z, [x23]\n"
"sub x22, x22, #0x8\n"
- "uzp1 z16.h, z16.h, z16.h\n"
- "trn1 z14.d, z14.d, z16.d\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z0.s }, p0/Z, [x23, #16]\n"
"cmp x22, #0x8\n"
- ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15eb098 // bfdot za.s[x9, 0], { z4.h-z7.h }, z14.h[0]\n"
"add x23, x23, #0x20\n"
- ".inst 0xa042a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15eb319 // bfdot za.s[x9, 1], { z24.h-z27.h }, z14.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xc15eb01a // bfdot za.s[x9, 2], { z0.h-z3.h }, z14.h[0]\n"
- ".inst 0xa040a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15eb518 // bfdot za.s[x9, 0], { z8.h-z11.h }, z14.h[1]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15eb499 // bfdot za.s[x9, 1], { z4.h-z7.h }, z14.h[1]\n"
+ ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0x658aa863 // bfcvt z3.h, p2/M, z3.s\n"
+ ".inst 0xa042a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15eb61a // bfdot za.s[x9, 2], { z16.h-z19.h }, z14.h[1]\n"
- ".inst 0xa040a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26]\n"
+ ".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
+ ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
".inst 0xa041a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15eb818 // bfdot za.s[x9, 0], { z0.h-z3.h }, z14.h[2]\n"
- ".inst 0xa042a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15ebb99 // bfdot za.s[x9, 1], { z28.h-z31.h }, z14.h[2]\n"
+ ".inst 0xa042a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15eb81a // bfdot za.s[x9, 2], { z0.h-z3.h }, z14.h[2]\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15ebf18 // bfdot za.s[x9, 0], { z24.h-z27.h }, z14.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15ebf99 // bfdot za.s[x9, 1], { z28.h-z31.h }, z14.h[3]\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ ".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "trn1 z3.d, z3.d, z0.d\n"
+ ".inst 0xc153d198 // bfdot za.s[x10, 0], { z12.h-z15.h }, z3.h[0]\n"
+ ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xc153d319 // bfdot za.s[x10, 1], { z24.h-z27.h }, z3.h[0]\n"
+ ".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15ebe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z14.h[3]\n"
+ ".inst 0xc153d09a // bfdot za.s[x10, 2], { z4.h-z7.h }, z3.h[0]\n"
+ ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xc153d618 // bfdot za.s[x10, 0], { z16.h-z19.h }, z3.h[1]\n"
+ ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xc153d799 // bfdot za.s[x10, 1], { z28.h-z31.h }, z3.h[1]\n"
+ ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ ".inst 0xc153d51a // bfdot za.s[x10, 2], { z8.h-z11.h }, z3.h[1]\n"
+ ".inst 0xc153da98 // bfdot za.s[x10, 0], { z20.h-z23.h }, z3.h[2]\n"
+ ".inst 0xc153d999 // bfdot za.s[x10, 1], { z12.h-z15.h }, z3.h[2]\n"
+ ".inst 0xc153db1a // bfdot za.s[x10, 2], { z24.h-z27.h }, z3.h[2]\n"
+ ".inst 0xc153dc98 // bfdot za.s[x10, 0], { z4.h-z7.h }, z3.h[3]\n"
+ ".inst 0xc153de19 // bfdot za.s[x10, 1], { z16.h-z19.h }, z3.h[3]\n"
+ ".inst 0xc153df9a // bfdot za.s[x10, 2], { z28.h-z31.h }, z3.h[3]\n"
"bgt 23b\n"
"24:" // Width 3: Multiply loop: Single iteration only
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z15.s }, p1/Z, [x23]\n"
- ".inst 0x658aa9ef // bfcvt z15.h, p2/M, z15.s\n"
- "ld1rqw { z31.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aabff // bfcvt z31.h, p2/M, z31.s\n"
- "uzp1 z15.h, z15.h, z15.h\n"
+ "whilelt p0.s, x28, x22\n"
+ ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p1/Z, [x23]\n"
"subs x22, x22, #0x2\n"
- "uzp1 z31.h, z31.h, z31.h\n"
- "trn1 z15.d, z15.d, z31.d\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z20.s }, p0/Z, [x23, #16]\n"
"add x23, x23, #0x20\n"
- ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb218 // bfdot za.s[x9, 0], { z16.h-z19.h }, z15.h[0]\n"
- ".inst 0xa042a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fb019 // bfdot za.s[x9, 1], { z0.h-z3.h }, z15.h[0]\n"
+ ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0x658aa863 // bfcvt z3.h, p2/M, z3.s\n"
+ ".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15fb09a // bfdot za.s[x9, 2], { z4.h-z7.h }, z15.h[0]\n"
+ ".inst 0x658aaa94 // bfcvt z20.h, p2/M, z20.s\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z20.h, z20.h, z20.h\n"
+ "trn1 z3.d, z3.d, z20.d\n"
+ ".inst 0xc153d098 // bfdot za.s[x10, 0], { z4.h-z7.h }, z3.h[0]\n"
+ ".inst 0xc153d199 // bfdot za.s[x10, 1], { z12.h-z15.h }, z3.h[0]\n"
+ ".inst 0xc153d31a // bfdot za.s[x10, 2], { z24.h-z27.h }, z3.h[0]\n"
"ble 25f\n"
".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fb698 // bfdot za.s[x9, 0], { z20.h-z23.h }, z15.h[1]\n"
- ".inst 0xa041a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb699 // bfdot za.s[x9, 1], { z20.h-z23.h }, z15.h[1]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fb61a // bfdot za.s[x9, 2], { z16.h-z19.h }, z15.h[1]\n"
+ ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153d698 // bfdot za.s[x10, 0], { z20.h-z23.h }, z3.h[1]\n"
+ ".inst 0xc153d619 // bfdot za.s[x10, 1], { z16.h-z19.h }, z3.h[1]\n"
+ ".inst 0xc153d79a // bfdot za.s[x10, 2], { z28.h-z31.h }, z3.h[1]\n"
"ble 25f\n"
- ".inst 0xa040a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fb898 // bfdot za.s[x9, 0], { z4.h-z7.h }, z15.h[2]\n"
- ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb819 // bfdot za.s[x9, 1], { z0.h-z3.h }, z15.h[2]\n"
- ".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fbb1a // bfdot za.s[x9, 2], { z24.h-z27.h }, z15.h[2]\n"
+ ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa042a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153db98 // bfdot za.s[x10, 0], { z28.h-z31.h }, z3.h[2]\n"
+ ".inst 0xc153d919 // bfdot za.s[x10, 1], { z8.h-z11.h }, z3.h[2]\n"
+ ".inst 0xc153d99a // bfdot za.s[x10, 2], { z12.h-z15.h }, z3.h[2]\n"
"ble 25f\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fbf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z15.h[3]\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fbd19 // bfdot za.s[x9, 1], { z8.h-z11.h }, z15.h[3]\n"
- ".inst 0xa042a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fbc9a // bfdot za.s[x9, 2], { z4.h-z7.h }, z15.h[3]\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa042a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153dd98 // bfdot za.s[x10, 0], { z12.h-z15.h }, z3.h[3]\n"
+ ".inst 0xc153df19 // bfdot za.s[x10, 1], { z24.h-z27.h }, z3.h[3]\n"
+ ".inst 0xc153dd1a // bfdot za.s[x10, 2], { z8.h-z11.h }, z3.h[3]\n"
"25:" // Width 3: Multiply loop: multiply skip
"tbz %x[flags], #1, 26f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+ ".inst 0xc0064c08 // mova { z8.d-z11.d }, za.d[x10, #0]\n"
+ ".inst 0xc0064c20 // mova { z0.d-z3.d }, za.d[x10, #1]\n"
"ld1rw { z17.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c24 // mova { z4.d-z7.d }, za.d[x9, #1]\n"
+ ".inst 0xc0064c44 // mova { z4.d-z7.d }, za.d[x10, #2]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
- ".inst 0xc1b0ca3c // fclamp { z28.s-z31.s }, z17.s, z16.s\n"
- ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
- ".inst 0xa060c73c // st1w { z28.s-z31.s }, pn9.b, [x25]\n"
+ ".inst 0xc1b0ca28 // fclamp { z8.s-z11.s }, z17.s, z16.s\n"
+ ".inst 0xc1b0ca20 // fclamp { z0.s-z3.s }, z17.s, z16.s\n"
".inst 0xc1b0ca24 // fclamp { z4.s-z7.s }, z17.s, z16.s\n"
- ".inst 0xa061c724 // st1w { z4.s-z7.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc1b0ca2c // fclamp { z12.s-z15.s }, z17.s, z16.s\n"
- ".inst 0xa062c32c // st1w { z12.s-z15.s }, p8, [x25, #0x8, MUL VL]\n"
+ ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c324 // st1w { z4.s-z7.s }, p8, [x25, #0x8, MUL VL]\n"
"addvl x25, x25, #12\n"
"b 27f\n"
"26:" // Width 3: No activation
- ".inst 0xc0062c00 // mova { z0.d-z3.d }, za.d[x9, #0]\n"
- ".inst 0xa060c720 // st1w { z0.s-z3.s }, pn9.b, [x25]\n"
- ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
- ".inst 0xa061c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+ ".inst 0xc0064c08 // mova { z8.d-z11.d }, za.d[x10, #0]\n"
+ ".inst 0xc0064c20 // mova { z0.d-z3.d }, za.d[x10, #1]\n"
+ ".inst 0xc0064c50 // mova { z16.d-z19.d }, za.d[x10, #2]\n"
+ ".inst 0xa060c728 // st1w { z8.s-z11.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x4, MUL VL]\n"
".inst 0xa062c330 // st1w { z16.s-z19.s }, p8, [x25, #0x8, MUL VL]\n"
"addvl x25, x25, #12\n"
"27:" // Width 3: Output done
@@ -439,20 +439,20 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"mov x20, #0x3\n"
"mov x23, %x[A_ptr]\n"
"lsl x21, %x[K], #0x2\n"
- "msub x20, x10, x20, %x[N]\n"
+ "msub x20, x9, x20, %x[N]\n"
"mov x22, %x[K]\n"
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
".inst 0x25b467f0 // whilelt p8.s, XZR, x20, VLx4\n"
"cbz x24, 29f\n"
".inst 0xa040c70c // ld1w { z12.s-z15.s }, pn9.b/Z, [x24]\n"
- ".inst 0xc0042d80 // mova za.d[x9, #0], { z12.d-z15.d }\n"
- ".inst 0xa041c70c // ld1w { z12.s-z15.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
- ".inst 0xa042c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042e02 // mova za.d[x9, #2], { z16.d-z19.d }\n"
- ".inst 0xa043c714 // ld1w { z20.s-z23.s }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
- ".inst 0xc0042e83 // mova za.d[x9, #3], { z20.d-z23.d }\n"
+ ".inst 0xa041c704 // ld1w { z4.s-z7.s }, pn9.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c714 // ld1w { z20.s-z23.s }, pn9.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xa043c710 // ld1w { z16.s-z19.s }, pn9.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xc0044d80 // mova za.d[x10, #0], { z12.d-z15.d }\n"
"addvl x24, x24, #16\n"
+ ".inst 0xc0044c81 // mova za.d[x10, #1], { z4.d-z7.d }\n"
+ ".inst 0xc0044e82 // mova za.d[x10, #2], { z20.d-z23.d }\n"
+ ".inst 0xc0044e03 // mova za.d[x10, #3], { z16.d-z19.d }\n"
"b 30f\n"
"29:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -461,140 +461,140 @@ void sme2_gemv_fp32bf16fp32_dot_16VL (
"ble 32f\n"
"31:" // Width 4: Multiply loop: Main loop head
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z6.s }, p1/Z, [x23]\n"
- ".inst 0x658aa8c6 // bfcvt z6.h, p2/M, z6.s\n"
- "ld1rqw { z16.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
- "uzp1 z6.h, z6.h, z6.h\n"
+ "whilelt p0.s, x28, x22\n"
+ ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z7.s }, p1/Z, [x23]\n"
"sub x22, x22, #0x8\n"
- "uzp1 z16.h, z16.h, z16.h\n"
- "trn1 z6.d, z6.d, z16.d\n"
- ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z4.s }, p0/Z, [x23, #16]\n"
"cmp x22, #0x8\n"
- ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc156b198 // bfdot za.s[x9, 0], { z12.h-z15.h }, z6.h[0]\n"
"add x23, x23, #0x20\n"
- ".inst 0xa042a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc156b119 // bfdot za.s[x9, 1], { z8.h-z11.h }, z6.h[0]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc156b19a // bfdot za.s[x9, 2], { z12.h-z15.h }, z6.h[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xc156b21b // bfdot za.s[x9, 3], { z16.h-z19.h }, z6.h[0]\n"
- ".inst 0xa040a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26]\n"
".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc156b518 // bfdot za.s[x9, 0], { z8.h-z11.h }, z6.h[1]\n"
- ".inst 0xa042a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc156b599 // bfdot za.s[x9, 1], { z12.h-z15.h }, z6.h[1]\n"
- ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc156b41a // bfdot za.s[x9, 2], { z0.h-z3.h }, z6.h[1]\n"
+ ".inst 0x658aa8e7 // bfcvt z7.h, p2/M, z7.s\n"
+ ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0x658aa884 // bfcvt z4.h, p2/M, z4.s\n"
+ ".inst 0xa043a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc156b69b // bfdot za.s[x9, 3], { z20.h-z23.h }, z6.h[1]\n"
- ".inst 0xa040a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc156b918 // bfdot za.s[x9, 0], { z8.h-z11.h }, z6.h[2]\n"
- ".inst 0xa042a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc156b999 // bfdot za.s[x9, 1], { z12.h-z15.h }, z6.h[2]\n"
- ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc156b91a // bfdot za.s[x9, 2], { z8.h-z11.h }, z6.h[2]\n"
+ ".inst 0xa040a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa041a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ "uzp1 z7.h, z7.h, z7.h\n"
+ ".inst 0xa042a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ "uzp1 z4.h, z4.h, z4.h\n"
+ "trn1 z7.d, z7.d, z4.d\n"
+ ".inst 0xc157d218 // bfdot za.s[x10, 0], { z16.h-z19.h }, z7.h[0]\n"
+ ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc156ba9b // bfdot za.s[x9, 3], { z20.h-z23.h }, z6.h[2]\n"
- ".inst 0xa040a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26]\n"
- ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc156bf98 // bfdot za.s[x9, 0], { z28.h-z31.h }, z6.h[3]\n"
+ ".inst 0xc157d199 // bfdot za.s[x10, 1], { z12.h-z15.h }, z7.h[0]\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xc157d39a // bfdot za.s[x10, 2], { z28.h-z31.h }, z7.h[0]\n"
+ ".inst 0xa041a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xc157d31b // bfdot za.s[x10, 3], { z24.h-z27.h }, z7.h[0]\n"
".inst 0xa042a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc156bd99 // bfdot za.s[x9, 1], { z12.h-z15.h }, z6.h[3]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc156bf1a // bfdot za.s[x9, 2], { z24.h-z27.h }, z6.h[3]\n"
+ ".inst 0xc157d698 // bfdot za.s[x10, 0], { z20.h-z23.h }, z7.h[1]\n"
+ ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc156be1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z6.h[3]\n"
+ ".inst 0xc157d519 // bfdot za.s[x10, 1], { z8.h-z11.h }, z7.h[1]\n"
+ ".inst 0xa040a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xc157d41a // bfdot za.s[x10, 2], { z0.h-z3.h }, z7.h[1]\n"
+ ".inst 0xa041a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xc157d61b // bfdot za.s[x10, 3], { z16.h-z19.h }, z7.h[1]\n"
+ ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xc157d998 // bfdot za.s[x10, 0], { z12.h-z15.h }, z7.h[2]\n"
+ ".inst 0xa043a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ "addvl x26, x26, #16\n"
+ ".inst 0xc157db99 // bfdot za.s[x10, 1], { z28.h-z31.h }, z7.h[2]\n"
+ ".inst 0xc157db1a // bfdot za.s[x10, 2], { z24.h-z27.h }, z7.h[2]\n"
+ ".inst 0xc157da9b // bfdot za.s[x10, 3], { z20.h-z23.h }, z7.h[2]\n"
+ ".inst 0xc157dd18 // bfdot za.s[x10, 0], { z8.h-z11.h }, z7.h[3]\n"
+ ".inst 0xc157dc19 // bfdot za.s[x10, 1], { z0.h-z3.h }, z7.h[3]\n"
+ ".inst 0xc157de1a // bfdot za.s[x10, 2], { z16.h-z19.h }, z7.h[3]\n"
+ ".inst 0xc157dd9b // bfdot za.s[x10, 3], { z12.h-z15.h }, z7.h[3]\n"
"bgt 31b\n"
"32:" // Width 4: Multiply loop: Single iteration only
"whilelt p1.s, XZR, x22\n"
- "whilelt p0.s, x27, x22\n"
- "ld1rqw { z15.s }, p1/Z, [x23]\n"
- ".inst 0x658aa9ef // bfcvt z15.h, p2/M, z15.s\n"
- "ld1rqw { z16.s }, p0/Z, [x23, #16]\n"
- ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
- "uzp1 z15.h, z15.h, z15.h\n"
- "subs x22, x22, #0x2\n"
- "uzp1 z16.h, z16.h, z16.h\n"
- "trn1 z15.d, z15.d, z16.d\n"
+ "whilelt p0.s, x28, x22\n"
".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ "ld1rqw { z3.s }, p1/Z, [x23]\n"
+ "subs x22, x22, #0x2\n"
+ "ld1rqw { z16.s }, p0/Z, [x23, #16]\n"
"add x23, x23, #0x20\n"
- ".inst 0xa041a745 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb318 // bfdot za.s[x9, 0], { z24.h-z27.h }, z15.h[0]\n"
- ".inst 0xa042a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fb099 // bfdot za.s[x9, 1], { z4.h-z7.h }, z15.h[0]\n"
- ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15fb01a // bfdot za.s[x9, 2], { z0.h-z3.h }, z15.h[0]\n"
+ ".inst 0xa041a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0x658aa863 // bfcvt z3.h, p2/M, z3.s\n"
+ ".inst 0xa042a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0xa043a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
"addvl x26, x26, #16\n"
- ".inst 0xc15fb21b // bfdot za.s[x9, 3], { z16.h-z19.h }, z15.h[0]\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z16.h, z16.h, z16.h\n"
+ "trn1 z3.d, z3.d, z16.d\n"
+ ".inst 0xc153d318 // bfdot za.s[x10, 0], { z24.h-z27.h }, z3.h[0]\n"
+ ".inst 0xc153d399 // bfdot za.s[x10, 1], { z28.h-z31.h }, z3.h[0]\n"
+ ".inst 0xc153d11a // bfdot za.s[x10, 2], { z8.h-z11.h }, z3.h[0]\n"
+ ".inst 0xc153d19b // bfdot za.s[x10, 3], { z12.h-z15.h }, z3.h[0]\n"
"ble 33f\n"
- ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fb718 // bfdot za.s[x9, 0], { z24.h-z27.h }, z15.h[1]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fb619 // bfdot za.s[x9, 1], { z16.h-z19.h }, z15.h[1]\n"
- ".inst 0xa042a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fb69a // bfdot za.s[x9, 2], { z20.h-z23.h }, z15.h[1]\n"
- ".inst 0xa043a741 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15fb41b // bfdot za.s[x9, 3], { z0.h-z3.h }, z15.h[1]\n"
+ ".inst 0xa041a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa042a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa043a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xc153d598 // bfdot za.s[x10, 0], { z12.h-z15.h }, z3.h[1]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153d799 // bfdot za.s[x10, 1], { z28.h-z31.h }, z3.h[1]\n"
+ ".inst 0xc153d51a // bfdot za.s[x10, 2], { z8.h-z11.h }, z3.h[1]\n"
+ ".inst 0xc153d71b // bfdot za.s[x10, 3], { z24.h-z27.h }, z3.h[1]\n"
"ble 33f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa040a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26]\n"
"subs x22, x22, #0x2\n"
- ".inst 0xc15fba18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z15.h[2]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fba19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z15.h[2]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fba1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z15.h[2]\n"
- ".inst 0xa043a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15fba9b // bfdot za.s[x9, 3], { z20.h-z23.h }, z15.h[2]\n"
+ ".inst 0xa041a74d // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa042a75d // ldnt1h { z28.h-z31.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
+ ".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
+ ".inst 0xc153db18 // bfdot za.s[x10, 0], { z24.h-z27.h }, z3.h[2]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153d999 // bfdot za.s[x10, 1], { z12.h-z15.h }, z3.h[2]\n"
+ ".inst 0xc153db9a // bfdot za.s[x10, 2], { z28.h-z31.h }, z3.h[2]\n"
+ ".inst 0xc153da1b // bfdot za.s[x10, 3], { z16.h-z19.h }, z3.h[2]\n"
"ble 33f\n"
- ".inst 0xa040a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26]\n"
- ".inst 0xc15fbe18 // bfdot za.s[x9, 0], { z16.h-z19.h }, z15.h[3]\n"
- ".inst 0xa041a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc15fbe19 // bfdot za.s[x9, 1], { z16.h-z19.h }, z15.h[3]\n"
- ".inst 0xa042a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc15fbe1a // bfdot za.s[x9, 2], { z16.h-z19.h }, z15.h[3]\n"
+ ".inst 0xa040a749 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x26]\n"
+ ".inst 0xa041a759 // ldnt1h { z24.h-z27.h }, pn9.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa042a755 // ldnt1h { z20.h-z23.h }, pn9.b/Z, [x26, #0x8, MUL VL]\n"
".inst 0xa043a751 // ldnt1h { z16.h-z19.h }, pn9.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc15fbe1b // bfdot za.s[x9, 3], { z16.h-z19.h }, z15.h[3]\n"
+ ".inst 0xc153dd18 // bfdot za.s[x10, 0], { z8.h-z11.h }, z3.h[3]\n"
"addvl x26, x26, #16\n"
+ ".inst 0xc153df19 // bfdot za.s[x10, 1], { z24.h-z27.h }, z3.h[3]\n"
+ ".inst 0xc153de9a // bfdot za.s[x10, 2], { z20.h-z23.h }, z3.h[3]\n"
+ ".inst 0xc153de1b // bfdot za.s[x10, 3], { z16.h-z19.h }, z3.h[3]\n"
"33:" // Width 4: Multiply loop: multiply skip
"tbz %x[flags], #1, 34f\n"
"add x21, %x[args_ptr], %[offset_min]\n"
"add x20, %x[args_ptr], %[offset_max]\n"
- ".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
+ ".inst 0xc0064c04 // mova { z4.d-z7.d }, za.d[x10, #0]\n"
+ ".inst 0xc0064c28 // mova { z8.d-z11.d }, za.d[x10, #1]\n"
"ld1rw { z21.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c38 // mova { z24.d-z27.d }, za.d[x9, #1]\n"
+ ".inst 0xc0064c4c // mova { z12.d-z15.d }, za.d[x10, #2]\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
+ ".inst 0xc0064c70 // mova { z16.d-z19.d }, za.d[x10, #3]\n"
+ ".inst 0xc1b4caa4 // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4caa8 // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
- ".inst 0xc0062c40 // mova { z0.d-z3.d }, za.d[x9, #2]\n"
- ".inst 0xa060c72c // st1w { z12.s-z15.s }, pn9.b, [x25]\n"
- ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
- ".inst 0xc0062c70 // mova { z16.d-z19.d }, za.d[x9, #3]\n"
- ".inst 0xa061c738 // st1w { z24.s-z27.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
- ".inst 0xa062c720 // st1w { z0.s-z3.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xa060c724 // st1w { z4.s-z7.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c728 // st1w { z8.s-z11.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xa063c330 // st1w { z16.s-z19.s }, p8, [x25, #0xc, MUL VL]\n"
"addvl x25, x25, #16\n"
"b 35f\n"
"34:" // Width 4: No activation
- ".inst 0xc0062c10 // mova { z16.d-z19.d }, za.d[x9, #0]\n"
- ".inst 0xa060c730 // st1w { z16.s-z19.s }, pn9.b, [x25]\n"
- ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
- ".inst 0xa061c730 // st1w { z16.s-z19.s }, pn9.b, [x25, #0x4, MUL VL]\n"
- ".inst 0xc0062c54 // mova { z20.d-z23.d }, za.d[x9, #2]\n"
- ".inst 0xa062c734 // st1w { z20.s-z23.s }, pn9.b, [x25, #0x8, MUL VL]\n"
- ".inst 0xc0062c78 // mova { z24.d-z27.d }, za.d[x9, #3]\n"
+ ".inst 0xc0064c00 // mova { z0.d-z3.d }, za.d[x10, #0]\n"
+ ".inst 0xc0064c24 // mova { z4.d-z7.d }, za.d[x10, #1]\n"
+ ".inst 0xc0064c4c // mova { z12.d-z15.d }, za.d[x10, #2]\n"
+ ".inst 0xc0064c78 // mova { z24.d-z27.d }, za.d[x10, #3]\n"
+ ".inst 0xa060c720 // st1w { z0.s-z3.s }, pn9.b, [x25]\n"
+ ".inst 0xa061c724 // st1w { z4.s-z7.s }, pn9.b, [x25, #0x4, MUL VL]\n"
+ ".inst 0xa062c72c // st1w { z12.s-z15.s }, pn9.b, [x25, #0x8, MUL VL]\n"
".inst 0xa063c338 // st1w { z24.s-z27.s }, p8, [x25, #0xc, MUL VL]\n"
"addvl x25, x25, #16\n"
"35:" // Width 4: Output done
- "subs x28, x28, #0x4\n"
- "sub %x[N], %x[N], x10, LSL #2\n"
+ "subs x27, x27, #0x4\n"
+ "sub %x[N], %x[N], x9, LSL #2\n"
"bgt 4b\n"
"36:" // Exit
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
index 86bd8aeb04..1a7cc1e70e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_s8qa_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,20 +52,20 @@ void sme2_gemv_s8qa_dot_16VL (
__asm__ __volatile__(
"ptrue p8.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
+ "cntw x28, ALL, MUL #4\n"
+ "mov x27, %x[B_ptr]\n"
+ "add x26, %x[N], x28\n"
"mov x25, %x[output_ptr]\n"
+ "sub x26, x26, #0x1\n"
"ptrue p2.b\n"
+ "udiv x26, x26, x28\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "mul x22, x22, %x[K]\n"
+ "add x22, x26, #0x3\n"
"mov x21, #0x1\n"
+ "and x22, x22, #0xfffffffffffffffc\n"
+ "mul x22, x22, x28\n"
+ "mul x22, x22, %x[K]\n"
"1:" // RHS size check loop
"cmp x22, #0x200000\n"
"blt 2f\n"
@@ -79,16 +79,16 @@ void sme2_gemv_s8qa_dot_16VL (
"lsl x21, x21, #0x16\n"
"orr x22, x22, x20\n"
"orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ ".inst 0xf8b64b7a // rprfm pldonce, x22, [x27]\n"
"3:" // RHS prefetch exit
"mov x24, %x[col_bias]\n"
- "mov z28.s, #0x0\n"
- "mov z29.b, #0x1\n"
+ "mov z11.s, #0x0\n"
+ "mov z8.b, #0x1\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 34f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 24f\n"
"beq 14f\n"
"mov x23, %x[A_ptr]\n"
@@ -98,8 +98,8 @@ void sme2_gemv_s8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 5f\n"
- ".inst 0xa040c300 // ld1w { z0.s-z3.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
+ ".inst 0xa040c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xc0042d80 // mova za.d[x9, #0], { z12.d-z15.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -108,82 +108,82 @@ void sme2_gemv_s8qa_dot_16VL (
"ble 9f\n"
"7:" // Width 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b2a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b5a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b9a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bda0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b020 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b720 // sdot za.s[x9, 0], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xc159baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z9.b[2]\n"
+ ".inst 0xc159bfa0 // sdot za.s[x9, 0], { z28.b-z31.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 8f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"8:" // Width 1: Multiply loop: unique 1: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 7b\n"
"9:" // Width 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b1a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b320 // sdot za.s[x9, 0], { z24.b-z27.b }, z9.b[0]\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b420 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[1]\n"
"ble 10f\n"
- ".inst 0xa0408349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b920 // sdot za.s[x9, 0], { z8.b-z11.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bba0 // sdot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
"ble 10f\n"
- ".inst 0xa0408349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bd20 // sdot za.s[x9, 0], { z8.b-z11.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159be20 // sdot za.s[x9, 0], { z16.b-z19.b }, z9.b[3]\n"
"10:" // Width 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"11:" // Width 1: Multiply loop: unique 2: skip row sum
"tbnz %x[flags], #31, 12f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
- "ld1rw { z26.s }, p2/Z, [x21]\n"
- "neg z26.s, p2/M, z26.s\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"whilelt p0.s, XZR, x20\n"
- "saddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z26.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "saddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z17.s, p2/M, z17.s\n"
+ "mul z11.s, p2/M, z11.s, z17.s\n"
"12:" // Width 1: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z7.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z6.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z7.s }, p2/Z, [x22]\n"
+ "ld1rw { z30.s }, p2/Z, [x21]\n"
+ "ld1rw { z23.s }, p2/Z, [x20]\n"
".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
- ".inst 0xc1a1ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
- ".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
- "ld1rw { z30.s }, p2/Z, [x20]\n"
- ".inst 0xc1a2ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
- ".inst 0xc1bece0c // sclamp { z12.s-z15.s }, z16.s, z30.s\n"
+ ".inst 0xc1aaac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z10.s\n"
+ ".inst 0xc1a6aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+ ".inst 0xc1a7ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
+ ".inst 0xc1b7cfcc // sclamp { z12.s-z15.s }, z30.s, z23.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
"uzp1 z19.h, z14.h, z15.h\n"
"uzp1 z12.b, z12.b, z19.b\n"
@@ -199,10 +199,10 @@ void sme2_gemv_s8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 15f\n"
- ".inst 0xa040c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042e00 // mova za.d[x9, #0], { z16.d-z19.d }\n"
- ".inst 0xa041c318 // ld1w { z24.s-z27.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042f01 // mova za.d[x9, #1], { z24.d-z27.d }\n"
+ ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xa041c300 // ld1w { z0.s-z3.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
+ ".inst 0xc0042c01 // mova za.d[x9, #1], { z0.d-z3.d }\n"
"b 16f\n"
"15:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -211,111 +211,111 @@ void sme2_gemv_s8qa_dot_16VL (
"ble 19f\n"
"17:" // Width 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b1a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[0]\n"
- ".inst 0xa0418359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b321 // sdot za.s[x9, 1], { z24.b-z27.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b620 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b6a1 // sdot za.s[x9, 1], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b9a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[2]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b9a1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bca0 // sdot za.s[x9, 0], { z4.b-z7.b }, z1.b[3]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bd21 // sdot za.s[x9, 1], { z8.b-z11.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa0408371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b2a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z9.b[0]\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b321 // sdot za.s[x9, 1], { z24.b-z27.b }, z9.b[0]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b620 // sdot za.s[x9, 0], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xa0418371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b6a1 // sdot za.s[x9, 1], { z20.b-z23.b }, z9.b[1]\n"
+ ".inst 0xc159bba0 // sdot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159b821 // sdot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159bda0 // sdot za.s[x9, 0], { z12.b-z15.b }, z9.b[3]\n"
+ ".inst 0xc159be21 // sdot za.s[x9, 1], { z16.b-z19.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 18f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"18:" // Width 2: Multiply loop: unique 3: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 17b\n"
"19:" // Width 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b320 // sdot za.s[x9, 0], { z24.b-z27.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b221 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b020 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ ".inst 0xc159b321 // sdot za.s[x9, 1], { z24.b-z27.b }, z9.b[0]\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b6a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5a1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b620 // sdot za.s[x9, 0], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xc159b721 // sdot za.s[x9, 1], { z24.b-z27.b }, z9.b[1]\n"
"ble 20f\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b9a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[2]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b9a1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bba0 // sdot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159b821 // sdot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
"ble 20f\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bf20 // sdot za.s[x9, 0], { z24.b-z27.b }, z1.b[3]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bd21 // sdot za.s[x9, 1], { z8.b-z11.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159bf21 // sdot za.s[x9, 1], { z24.b-z27.b }, z9.b[3]\n"
"20:" // Width 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 21f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"21:" // Width 2: Multiply loop: unique 4: skip row sum
"tbnz %x[flags], #31, 22f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
- "neg z16.s, p2/M, z16.s\n"
+ "ld1rw { z1.s }, p2/Z, [x21]\n"
"whilelt p0.s, XZR, x20\n"
- "saddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z16.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "saddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z1.s, p2/M, z1.s\n"
+ "mul z11.s, p2/M, z11.s, z1.s\n"
"22:" // Width 2: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z2.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z9.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z3.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c18 // mova { z24.d-z27.d }, za.d[x9, #0]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
- ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
- ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
- "ld1rw { z21.s }, p2/Z, [x20]\n"
- ".inst 0xc1a5aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
- ".inst 0xc1a9ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z9.s\n"
- ".inst 0xc1a9ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
- ".inst 0xc1b5ce18 // sclamp { z24.s-z27.s }, z16.s, z21.s\n"
- ".inst 0xc1b5ce00 // sclamp { z0.s-z3.s }, z16.s, z21.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "uzp1 z9.h, z26.h, z27.h\n"
- "uzp1 z0.h, z0.h, z1.h\n"
- "uzp1 z26.h, z2.h, z3.h\n"
- "uzp1 z24.b, z24.b, z9.b\n"
- "st1b { z24.b }, p2, [x25]\n"
- "uzp1 z0.b, z0.b, z26.b\n"
- "st1b { z0.b }, p1, [x25, #1, MUL VL]\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z9.s }, p2/Z, [x22]\n"
+ "ld1rw { z6.s }, p2/Z, [x21]\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
+ ".inst 0xc0062c14 // mova { z20.d-z23.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc1a2ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
+ ".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
+ ".inst 0xc1a3aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z3.s\n"
+ ".inst 0xc1a3aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+ ".inst 0xc1a9ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z9.s\n"
+ ".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+ ".inst 0xc1bdccd4 // sclamp { z20.s-z23.s }, z6.s, z29.s\n"
+ ".inst 0xc1bdcccc // sclamp { z12.s-z15.s }, z6.s, z29.s\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z16.h, z22.h, z23.h\n"
+ "uzp1 z12.h, z12.h, z13.h\n"
+ "uzp1 z24.h, z14.h, z15.h\n"
+ "uzp1 z20.b, z20.b, z16.b\n"
+ "uzp1 z12.b, z12.b, z24.b\n"
+ "st1b { z20.b }, p2, [x25]\n"
+ "st1b { z12.b }, p1, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
"23:" // Width 2: Output done
"b 44f\n"
@@ -328,12 +328,12 @@ void sme2_gemv_s8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 25f\n"
- ".inst 0xa040c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042e00 // mova za.d[x9, #0], { z16.d-z19.d }\n"
- ".inst 0xa041c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
- ".inst 0xa042c318 // ld1w { z24.s-z27.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042f02 // mova za.d[x9, #2], { z24.d-z27.d }\n"
+ ".inst 0xa040c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xa041c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xc0042e80 // mova za.d[x9, #0], { z20.d-z23.d }\n"
+ ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
+ ".inst 0xc0042d82 // mova za.d[x9, #2], { z12.d-z15.d }\n"
"b 26f\n"
"25:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -342,136 +342,136 @@ void sme2_gemv_s8qa_dot_16VL (
"ble 29f\n"
"27:" // Width 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b221 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b1a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b5a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5a1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0428355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b6a2 // sdot za.s[x9, 2], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b920 // sdot za.s[x9, 0], { z8.b-z11.b }, z1.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b8a1 // sdot za.s[x9, 1], { z4.b-z7.b }, z1.b[2]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151ba22 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bf20 // sdot za.s[x9, 0], { z24.b-z27.b }, z1.b[3]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bca1 // sdot za.s[x9, 1], { z4.b-z7.b }, z1.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b3a0 // sdot za.s[x9, 0], { z28.b-z31.b }, z9.b[0]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b221 // sdot za.s[x9, 1], { z16.b-z19.b }, z9.b[0]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b1a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z9.b[0]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b720 // sdot za.s[x9, 0], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b7a1 // sdot za.s[x9, 1], { z28.b-z31.b }, z9.b[1]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b9a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z9.b[2]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b821 // sdot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159bba2 // sdot za.s[x9, 2], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159bea0 // sdot za.s[x9, 0], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159bf21 // sdot za.s[x9, 1], { z24.b-z27.b }, z9.b[3]\n"
+ ".inst 0xc159be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 28f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"28:" // Width 3: Multiply loop: unique 5: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 27b\n"
"29:" // Width 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa040836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b2a0 // sdot za.s[x9, 0], { z20.b-z23.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b221 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b222 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b1a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z9.b[0]\n"
+ ".inst 0xc159b021 // sdot za.s[x9, 1], { z0.b-z3.b }, z9.b[0]\n"
+ ".inst 0xc159b3a2 // sdot za.s[x9, 2], { z28.b-z31.b }, z9.b[0]\n"
"ble 30f\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408365 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b720 // sdot za.s[x9, 0], { z24.b-z27.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5a1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b4a0 // sdot za.s[x9, 0], { z4.b-z7.b }, z9.b[1]\n"
+ ".inst 0xc159b721 // sdot za.s[x9, 1], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xc159b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z9.b[1]\n"
"ble 30f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151ba20 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151ba21 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0428355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151baa2 // sdot za.s[x9, 2], { z20.b-z23.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bba0 // sdot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159b821 // sdot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159bb22 // sdot za.s[x9, 2], { z24.b-z27.b }, z9.b[2]\n"
"ble 30f\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bda0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[3]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151be21 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151bda2 // sdot za.s[x9, 2], { z12.b-z15.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bc20 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[3]\n"
+ ".inst 0xc159bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z9.b[3]\n"
"30:" // Width 3: Multiply loop: multiply skip
"tbnz %x[flags], #31, 31f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"31:" // Width 3: Multiply loop: unique 6: skip row sum
"tbnz %x[flags], #31, 32f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
"ld1rw { z16.s }, p2/Z, [x21]\n"
- "neg z16.s, p2/M, z16.s\n"
"whilelt p0.s, XZR, x20\n"
- "saddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z16.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "saddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z16.s, p2/M, z16.s\n"
+ "mul z11.s, p2/M, z11.s, z16.s\n"
"32:" // Width 3: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z2.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z3.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z1.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xc1a2ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z2.s\n"
- ".inst 0xc0062c24 // mova { z4.d-z7.d }, za.d[x9, #1]\n"
- ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z0.s }, p2/Z, [x22]\n"
+ "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ ".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+ ".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
- ".inst 0xc1a1aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
- ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
+ ".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
- ".inst 0xc1a3ab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z3.s\n"
- ".inst 0xc1a3ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- ".inst 0xc1a3ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
- ".inst 0xc1a0ce08 // sclamp { z8.s-z11.s }, z16.s, z0.s\n"
- ".inst 0xc1a0ce04 // sclamp { z4.s-z7.s }, z16.s, z0.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
- ".inst 0xc1a0ce0c // sclamp { z12.s-z15.s }, z16.s, z0.s\n"
- "uzp1 z18.h, z10.h, z11.h\n"
- "uzp1 z4.h, z4.h, z5.h\n"
- "uzp1 z17.h, z6.h, z7.h\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
+ ".inst 0xc1a0ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4ceac // sclamp { z12.s-z15.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z21.h, z30.h, z31.h\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "uzp1 z16.h, z14.h, z15.h\n"
- "uzp1 z8.b, z8.b, z18.b\n"
- "st1b { z8.b }, p2, [x25]\n"
- "uzp1 z4.b, z4.b, z17.b\n"
- "st1b { z4.b }, p2, [x25, #1, MUL VL]\n"
- "uzp1 z12.b, z12.b, z16.b\n"
- "st1b { z12.b }, p1, [x25, #2, MUL VL]\n"
+ "uzp1 z20.h, z14.h, z15.h\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z28.b, z28.b, z21.b\n"
+ "uzp1 z12.b, z12.b, z20.b\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z12.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z16.b }, p1, [x25, #2, MUL VL]\n"
"addvl x25, x25, #3\n"
"33:" // Width 3: Output done
"b 44f\n"
@@ -484,15 +484,15 @@ void sme2_gemv_s8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 35f\n"
- ".inst 0xa040c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042e80 // mova za.d[x9, #0], { z20.d-z23.d }\n"
- ".inst 0xa041c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
+ ".inst 0xa040c300 // ld1w { z0.s-z3.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xa041c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
".inst 0xa042c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042e02 // mova za.d[x9, #2], { z16.d-z19.d }\n"
- ".inst 0xa043c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0xc, MUL VL]\n"
- ".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
+ ".inst 0xa043c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
"addvl x24, x24, #16\n"
+ ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
+ ".inst 0xc0042e02 // mova za.d[x9, #2], { z16.d-z19.d }\n"
+ ".inst 0xc0042e83 // mova za.d[x9, #3], { z20.d-z23.d }\n"
"b 36f\n"
"35:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -501,164 +501,164 @@ void sme2_gemv_s8qa_dot_16VL (
"ble 39f\n"
"37:" // Width 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b220 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b221 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b1a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z1.b[0]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b1a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b620 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5a1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b623 // sdot za.s[x9, 3], { z16.b-z19.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151ba20 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b9a1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[2]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151ba22 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151ba23 // sdot za.s[x9, 3], { z16.b-z19.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bda0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[3]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bda1 // sdot za.s[x9, 1], { z12.b-z15.b }, z1.b[3]\n"
- ".inst 0xa0428359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151bf22 // sdot za.s[x9, 2], { z24.b-z27.b }, z1.b[3]\n"
- ".inst 0xa0438345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151bca3 // sdot za.s[x9, 3], { z4.b-z7.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b020 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b2a1 // sdot za.s[x9, 1], { z20.b-z23.b }, z9.b[0]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b3a2 // sdot za.s[x9, 2], { z28.b-z31.b }, z9.b[0]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b323 // sdot za.s[x9, 3], { z24.b-z27.b }, z9.b[0]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b420 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b7a1 // sdot za.s[x9, 1], { z28.b-z31.b }, z9.b[1]\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b622 // sdot za.s[x9, 2], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b5a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z9.b[1]\n"
+ ".inst 0xa0428361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159baa0 // sdot za.s[x9, 0], { z20.b-z23.b }, z9.b[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bba1 // sdot za.s[x9, 1], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b822 // sdot za.s[x9, 2], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b9a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z9.b[2]\n"
+ ".inst 0xa0428361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159bf20 // sdot za.s[x9, 0], { z24.b-z27.b }, z9.b[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bea1 // sdot za.s[x9, 1], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159bc22 // sdot za.s[x9, 2], { z0.b-z3.b }, z9.b[3]\n"
+ ".inst 0xc159be23 // sdot za.s[x9, 3], { z16.b-z19.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 38f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"38:" // Width 4: Multiply loop: unique 7: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 37b\n"
"39:" // Width 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b1a0 // sdot za.s[x9, 0], { z12.b-z15.b }, z1.b[0]\n"
- ".inst 0xa0418359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b321 // sdot za.s[x9, 1], { z24.b-z27.b }, z1.b[0]\n"
- ".inst 0xa0428349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b122 // sdot za.s[x9, 2], { z8.b-z11.b }, z1.b[0]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b223 // sdot za.s[x9, 3], { z16.b-z19.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b020 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b2a1 // sdot za.s[x9, 1], { z20.b-z23.b }, z9.b[0]\n"
+ ".inst 0xc159b3a2 // sdot za.s[x9, 2], { z28.b-z31.b }, z9.b[0]\n"
+ ".inst 0xc159b223 // sdot za.s[x9, 3], { z16.b-z19.b }, z9.b[0]\n"
"ble 40f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b620 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b621 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b5a2 // sdot za.s[x9, 2], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0438355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b6a3 // sdot za.s[x9, 3], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b420 // sdot za.s[x9, 0], { z0.b-z3.b }, z9.b[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b721 // sdot za.s[x9, 1], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xc159b7a2 // sdot za.s[x9, 2], { z28.b-z31.b }, z9.b[1]\n"
+ ".inst 0xc159b623 // sdot za.s[x9, 3], { z16.b-z19.b }, z9.b[1]\n"
"ble 40f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151ba20 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151ba21 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151ba22 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151ba23 // sdot za.s[x9, 3], { z16.b-z19.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159bb20 // sdot za.s[x9, 0], { z24.b-z27.b }, z9.b[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b821 // sdot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159baa2 // sdot za.s[x9, 2], { z20.b-z23.b }, z9.b[2]\n"
+ ".inst 0xc159b9a3 // sdot za.s[x9, 3], { z12.b-z15.b }, z9.b[2]\n"
"ble 40f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151be20 // sdot za.s[x9, 0], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151be21 // sdot za.s[x9, 1], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151be22 // sdot za.s[x9, 2], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151be23 // sdot za.s[x9, 3], { z16.b-z19.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159bf20 // sdot za.s[x9, 0], { z24.b-z27.b }, z9.b[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bfa1 // sdot za.s[x9, 1], { z28.b-z31.b }, z9.b[3]\n"
+ ".inst 0xc159bea2 // sdot za.s[x9, 2], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159be23 // sdot za.s[x9, 3], { z16.b-z19.b }, z9.b[3]\n"
"40:" // Width 4: Multiply loop: multiply skip
"tbnz %x[flags], #31, 41f\n"
- "sdot z28.s, z1.b, z29.b\n"
+ "sdot z11.s, z9.b, z8.b\n"
"41:" // Width 4: Multiply loop: unique 8: skip row sum
"tbnz %x[flags], #31, 42f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
"ld1rw { z16.s }, p2/Z, [x21]\n"
- "neg z16.s, p2/M, z16.s\n"
"whilelt p0.s, XZR, x20\n"
- "saddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z16.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "saddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z16.s, p2/M, z16.s\n"
+ "mul z11.s, p2/M, z11.s, z16.s\n"
"42:" // Width 4: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z11.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z7.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z12.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z3.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c18 // mova { z24.d-z27.d }, za.d[x9, #0]\n"
- ".inst 0xc1abac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z11.s\n"
- ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
- ".inst 0xc1abac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
- ".inst 0xc0062c54 // mova { z20.d-z23.d }, za.d[x9, #2]\n"
- ".inst 0xc1abac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
- ".inst 0xc0062c6c // mova { z12.d-z15.d }, za.d[x9, #3]\n"
- ".inst 0xc1abac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
- ".inst 0xc1a7aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
- "ld1rw { z31.s }, p2/Z, [x20]\n"
- ".inst 0xc1a7aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
- ".inst 0xc1a7aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
- ".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
- ".inst 0xc1a6ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
- ".inst 0xc1a6ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1a6ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z6.s\n"
- ".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
- ".inst 0xc1bfcc78 // sclamp { z24.s-z27.s }, z3.s, z31.s\n"
- ".inst 0xc1bfcc70 // sclamp { z16.s-z19.s }, z3.s, z31.s\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z13.s }, p2/Z, [x22]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
+ "ld1rw { z18.s }, p2/Z, [x20]\n"
+ ".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c58 // mova { z24.d-z27.d }, za.d[x9, #2]\n"
+ ".inst 0xc0062c74 // mova { z20.d-z23.d }, za.d[x9, #3]\n"
+ ".inst 0xc1aaac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
+ ".inst 0xc1aaac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z10.s\n"
+ ".inst 0xc1aaac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
+ ".inst 0xc1aaac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1acaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z12.s\n"
+ ".inst 0xc1acaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z12.s\n"
+ ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
+ ".inst 0xc1acaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z12.s\n"
+ ".inst 0xc1adab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z13.s\n"
+ ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc1adab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
+ ".inst 0xc1adab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z13.s\n"
+ ".inst 0xc1b2ce3c // sclamp { z28.s-z31.s }, z17.s, z18.s\n"
+ ".inst 0xc1b2ce20 // sclamp { z0.s-z3.s }, z17.s, z18.s\n"
+ ".inst 0xc1b2ce38 // sclamp { z24.s-z27.s }, z17.s, z18.s\n"
+ ".inst 0xc1b2ce34 // sclamp { z20.s-z23.s }, z17.s, z18.s\n"
+ "uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z14.h, z30.h, z31.h\n"
+ "uzp1 z0.h, z0.h, z1.h\n"
+ "uzp1 z18.h, z2.h, z3.h\n"
"uzp1 z24.h, z24.h, z25.h\n"
- ".inst 0xc1bfcc74 // sclamp { z20.s-z23.s }, z3.s, z31.s\n"
- ".inst 0xc1bfcc6c // sclamp { z12.s-z15.s }, z3.s, z31.s\n"
- "uzp1 z25.h, z26.h, z27.h\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "uzp1 z18.h, z18.h, z19.h\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "uzp1 z17.h, z22.h, z23.h\n"
- "uzp1 z12.h, z12.h, z13.h\n"
- "uzp1 z30.h, z14.h, z15.h\n"
- "uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p2, [x25]\n"
- "uzp1 z16.b, z16.b, z18.b\n"
- "st1b { z16.b }, p2, [x25, #1, MUL VL]\n"
- "uzp1 z20.b, z20.b, z17.b\n"
- "uzp1 z12.b, z12.b, z30.b\n"
- "st1b { z20.b }, p2, [x25, #2, MUL VL]\n"
- "st1b { z12.b }, p1, [x25, #3, MUL VL]\n"
+ "uzp1 z16.h, z22.h, z23.h\n"
+ "uzp1 z28.b, z28.b, z14.b\n"
+ "uzp1 z0.b, z0.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "uzp1 z20.b, z20.b, z16.b\n"
+ "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z0.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z24.b }, p2, [x25, #2, MUL VL]\n"
+ "st1b { z20.b }, p1, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
"43:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sub %x[N], %x[N], x28, LSL #2\n"
"bgt 4b\n"
"44:" // Exit
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
index 093feee6ce..1cbaf00052 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_gemv_u8qa_dot_16VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,20 +52,20 @@ void sme2_gemv_u8qa_dot_16VL (
__asm__ __volatile__(
"ptrue p8.b\n"
".inst 0xd503477f // SMSTART ZA\n"
- "cntw x28, ALL, MUL #4\n"
- "add x27, %x[N], x28\n"
- "sub x27, x27, #0x1\n"
- "udiv x27, x27, x28\n"
- "add x22, x27, #0x3\n"
- "and x22, x22, #0xfffffffffffffffc\n"
- "mul x22, x22, x28\n"
"mov x9, #0x0\n"
- "mov x26, %x[B_ptr]\n"
+ "cntw x28, ALL, MUL #4\n"
+ "mov x27, %x[B_ptr]\n"
+ "add x26, %x[N], x28\n"
"mov x25, %x[output_ptr]\n"
+ "sub x26, x26, #0x1\n"
"ptrue p2.b\n"
+ "udiv x26, x26, x28\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "mul x22, x22, %x[K]\n"
+ "add x22, x26, #0x3\n"
"mov x21, #0x1\n"
+ "and x22, x22, #0xfffffffffffffffc\n"
+ "mul x22, x22, x28\n"
+ "mul x22, x22, %x[K]\n"
"1:" // RHS size check loop
"cmp x22, #0x200000\n"
"blt 2f\n"
@@ -79,16 +79,16 @@ void sme2_gemv_u8qa_dot_16VL (
"lsl x21, x21, #0x16\n"
"orr x22, x22, x20\n"
"orr x22, x22, x21\n"
- ".inst 0xf8b64b5a // rprfm pldonce, x22, [x26]\n"
+ ".inst 0xf8b64b7a // rprfm pldonce, x22, [x27]\n"
"3:" // RHS prefetch exit
"mov x24, %x[col_bias]\n"
- "mov z28.s, #0x0\n"
- "mov z29.b, #0x1\n"
+ "mov z11.s, #0x0\n"
+ "mov z8.b, #0x1\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"4:" // Column loop
- "cmp x27, #0x4\n"
+ "cmp x26, #0x4\n"
"bge 34f\n"
- "cmp x27, #0x2\n"
+ "cmp x26, #0x2\n"
"bgt 24f\n"
"beq 14f\n"
"mov x23, %x[A_ptr]\n"
@@ -98,8 +98,8 @@ void sme2_gemv_u8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 5f\n"
- ".inst 0xa040c300 // ld1w { z0.s-z3.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
+ ".inst 0xa040c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xc0042d80 // mova za.d[x9, #0], { z12.d-z15.d }\n"
"b 6f\n"
"5:" // Width 1: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -108,82 +108,82 @@ void sme2_gemv_u8qa_dot_16VL (
"ble 9f\n"
"7:" // Width 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b2b0 // udot za.s[x9, 0], { z20.b-z23.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b5b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b9b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bdb0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b030 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b730 // udot za.s[x9, 0], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xc159bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z9.b[2]\n"
+ ".inst 0xc159bfb0 // udot za.s[x9, 0], { z28.b-z31.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 8f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"8:" // Width 1: Multiply loop: unique 1: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 7b\n"
"9:" // Width 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b1b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b330 // udot za.s[x9, 0], { z24.b-z27.b }, z9.b[0]\n"
"ble 10f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b430 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[1]\n"
"ble 10f\n"
- ".inst 0xa0408349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b930 // udot za.s[x9, 0], { z8.b-z11.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bbb0 // udot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
"ble 10f\n"
- ".inst 0xa0408349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bd30 // udot za.s[x9, 0], { z8.b-z11.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159be30 // udot za.s[x9, 0], { z16.b-z19.b }, z9.b[3]\n"
"10:" // Width 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"11:" // Width 1: Multiply loop: unique 2: skip row sum
"tbnz %x[flags], #31, 12f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
- "ld1rw { z26.s }, p2/Z, [x21]\n"
- "neg z26.s, p2/M, z26.s\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"whilelt p0.s, XZR, x20\n"
- "uaddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z26.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "uaddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z17.s, p2/M, z17.s\n"
+ "mul z11.s, p2/M, z11.s, z17.s\n"
"12:" // Width 1: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z7.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z6.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z7.s }, p2/Z, [x22]\n"
+ "ld1rw { z30.s }, p2/Z, [x21]\n"
+ "ld1rw { z23.s }, p2/Z, [x20]\n"
".inst 0xc0062c0c // mova { z12.d-z15.d }, za.d[x9, #0]\n"
- ".inst 0xc1a1ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
- ".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
- "ld1rw { z30.s }, p2/Z, [x20]\n"
- ".inst 0xc1a2ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
- ".inst 0xc1bece0c // sclamp { z12.s-z15.s }, z16.s, z30.s\n"
+ ".inst 0xc1aaac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z10.s\n"
+ ".inst 0xc1a6aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
+ ".inst 0xc1a7ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
+ ".inst 0xc1b7cfcc // sclamp { z12.s-z15.s }, z30.s, z23.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
"uzp1 z19.h, z14.h, z15.h\n"
"uzp1 z12.b, z12.b, z19.b\n"
@@ -199,10 +199,10 @@ void sme2_gemv_u8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 15f\n"
- ".inst 0xa040c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042e00 // mova za.d[x9, #0], { z16.d-z19.d }\n"
- ".inst 0xa041c318 // ld1w { z24.s-z27.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042f01 // mova za.d[x9, #1], { z24.d-z27.d }\n"
+ ".inst 0xa040c304 // ld1w { z4.s-z7.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xa041c300 // ld1w { z0.s-z3.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xc0042c80 // mova za.d[x9, #0], { z4.d-z7.d }\n"
+ ".inst 0xc0042c01 // mova za.d[x9, #1], { z0.d-z3.d }\n"
"b 16f\n"
"15:" // Width 2: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -211,111 +211,111 @@ void sme2_gemv_u8qa_dot_16VL (
"ble 19f\n"
"17:" // Width 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b1b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[0]\n"
- ".inst 0xa0418359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b331 // udot za.s[x9, 1], { z24.b-z27.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b630 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa0418355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b6b1 // udot za.s[x9, 1], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b9b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[2]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b9b1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bcb0 // udot za.s[x9, 0], { z4.b-z7.b }, z1.b[3]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bd31 // udot za.s[x9, 1], { z8.b-z11.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa0408371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b2b0 // udot za.s[x9, 0], { z20.b-z23.b }, z9.b[0]\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b331 // udot za.s[x9, 1], { z24.b-z27.b }, z9.b[0]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b630 // udot za.s[x9, 0], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xa0418371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b6b1 // udot za.s[x9, 1], { z20.b-z23.b }, z9.b[1]\n"
+ ".inst 0xc159bbb0 // udot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159b831 // udot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159bdb0 // udot za.s[x9, 0], { z12.b-z15.b }, z9.b[3]\n"
+ ".inst 0xc159be31 // udot za.s[x9, 1], { z16.b-z19.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 18f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"18:" // Width 2: Multiply loop: unique 3: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 17b\n"
"19:" // Width 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b330 // udot za.s[x9, 0], { z24.b-z27.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b231 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b030 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ ".inst 0xc159b331 // udot za.s[x9, 1], { z24.b-z27.b }, z9.b[0]\n"
"ble 20f\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b6b0 // udot za.s[x9, 0], { z20.b-z23.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5b1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b630 // udot za.s[x9, 0], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xc159b731 // udot za.s[x9, 1], { z24.b-z27.b }, z9.b[1]\n"
"ble 20f\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b9b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[2]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b9b1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bbb0 // udot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159b831 // udot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
"ble 20f\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bf30 // udot za.s[x9, 0], { z24.b-z27.b }, z1.b[3]\n"
- ".inst 0xa0418349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bd31 // udot za.s[x9, 1], { z8.b-z11.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159bf31 // udot za.s[x9, 1], { z24.b-z27.b }, z9.b[3]\n"
"20:" // Width 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 21f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"21:" // Width 2: Multiply loop: unique 4: skip row sum
"tbnz %x[flags], #31, 22f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
- "neg z16.s, p2/M, z16.s\n"
+ "ld1rw { z1.s }, p2/Z, [x21]\n"
"whilelt p0.s, XZR, x20\n"
- "uaddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z16.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "uaddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z1.s, p2/M, z1.s\n"
+ "mul z11.s, p2/M, z11.s, z1.s\n"
"22:" // Width 2: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z5.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z2.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z9.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z3.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c18 // mova { z24.d-z27.d }, za.d[x9, #0]\n"
- ".inst 0xc1a6ac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
- ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
- ".inst 0xc1a6ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z6.s\n"
- ".inst 0xc1a5aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z5.s\n"
- "ld1rw { z21.s }, p2/Z, [x20]\n"
- ".inst 0xc1a5aa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z5.s\n"
- ".inst 0xc1a9ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z9.s\n"
- ".inst 0xc1a9ab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
- ".inst 0xc1b5ce18 // sclamp { z24.s-z27.s }, z16.s, z21.s\n"
- ".inst 0xc1b5ce00 // sclamp { z0.s-z3.s }, z16.s, z21.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "uzp1 z9.h, z26.h, z27.h\n"
- "uzp1 z0.h, z0.h, z1.h\n"
- "uzp1 z26.h, z2.h, z3.h\n"
- "uzp1 z24.b, z24.b, z9.b\n"
- "st1b { z24.b }, p2, [x25]\n"
- "uzp1 z0.b, z0.b, z26.b\n"
- "st1b { z0.b }, p1, [x25, #1, MUL VL]\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z9.s }, p2/Z, [x22]\n"
+ "ld1rw { z6.s }, p2/Z, [x21]\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
+ ".inst 0xc0062c14 // mova { z20.d-z23.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc1a2ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
+ ".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
+ ".inst 0xc1a3aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z3.s\n"
+ ".inst 0xc1a3aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
+ ".inst 0xc1a9ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z9.s\n"
+ ".inst 0xc1a9ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z9.s\n"
+ ".inst 0xc1bdccd4 // sclamp { z20.s-z23.s }, z6.s, z29.s\n"
+ ".inst 0xc1bdcccc // sclamp { z12.s-z15.s }, z6.s, z29.s\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z16.h, z22.h, z23.h\n"
+ "uzp1 z12.h, z12.h, z13.h\n"
+ "uzp1 z24.h, z14.h, z15.h\n"
+ "uzp1 z20.b, z20.b, z16.b\n"
+ "uzp1 z12.b, z12.b, z24.b\n"
+ "st1b { z20.b }, p2, [x25]\n"
+ "st1b { z12.b }, p1, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
"23:" // Width 2: Output done
"b 44f\n"
@@ -328,12 +328,12 @@ void sme2_gemv_u8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 25f\n"
- ".inst 0xa040c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042e00 // mova za.d[x9, #0], { z16.d-z19.d }\n"
- ".inst 0xa041c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
- ".inst 0xa042c318 // ld1w { z24.s-z27.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042f02 // mova za.d[x9, #2], { z24.d-z27.d }\n"
+ ".inst 0xa040c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xa041c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
+ ".inst 0xa042c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
+ ".inst 0xc0042e80 // mova za.d[x9, #0], { z20.d-z23.d }\n"
+ ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
+ ".inst 0xc0042d82 // mova za.d[x9, #2], { z12.d-z15.d }\n"
"b 26f\n"
"25:" // Width 3: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -342,136 +342,136 @@ void sme2_gemv_u8qa_dot_16VL (
"ble 29f\n"
"27:" // Width 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b230 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b231 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b1b2 // udot za.s[x9, 2], { z12.b-z15.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b5b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5b1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0428355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b6b2 // udot za.s[x9, 2], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b930 // udot za.s[x9, 0], { z8.b-z11.b }, z1.b[2]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b8b1 // udot za.s[x9, 1], { z4.b-z7.b }, z1.b[2]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151ba32 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bf30 // udot za.s[x9, 0], { z24.b-z27.b }, z1.b[3]\n"
- ".inst 0xa0418345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bcb1 // udot za.s[x9, 1], { z4.b-z7.b }, z1.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151be32 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b3b0 // udot za.s[x9, 0], { z28.b-z31.b }, z9.b[0]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b231 // udot za.s[x9, 1], { z16.b-z19.b }, z9.b[0]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b1b2 // udot za.s[x9, 2], { z12.b-z15.b }, z9.b[0]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xa040836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b730 // udot za.s[x9, 0], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b7b1 // udot za.s[x9, 1], { z28.b-z31.b }, z9.b[1]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b632 // udot za.s[x9, 2], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b9b0 // udot za.s[x9, 0], { z12.b-z15.b }, z9.b[2]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b831 // udot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159bbb2 // udot za.s[x9, 2], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159beb0 // udot za.s[x9, 0], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159bf31 // udot za.s[x9, 1], { z24.b-z27.b }, z9.b[3]\n"
+ ".inst 0xc159be32 // udot za.s[x9, 2], { z16.b-z19.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 28f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"28:" // Width 3: Multiply loop: unique 5: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 27b\n"
"29:" // Width 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa040836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa0408355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b2b0 // udot za.s[x9, 0], { z20.b-z23.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b231 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b232 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b1b0 // udot za.s[x9, 0], { z12.b-z15.b }, z9.b[0]\n"
+ ".inst 0xc159b031 // udot za.s[x9, 1], { z0.b-z3.b }, z9.b[0]\n"
+ ".inst 0xc159b3b2 // udot za.s[x9, 2], { z28.b-z31.b }, z9.b[0]\n"
"ble 30f\n"
- ".inst 0xa0408359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408365 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b730 // udot za.s[x9, 0], { z24.b-z27.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5b1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b632 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b4b0 // udot za.s[x9, 0], { z4.b-z7.b }, z9.b[1]\n"
+ ".inst 0xc159b731 // udot za.s[x9, 1], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xc159b632 // udot za.s[x9, 2], { z16.b-z19.b }, z9.b[1]\n"
"ble 30f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa040837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151ba30 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151ba31 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0428355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151bab2 // udot za.s[x9, 2], { z20.b-z23.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bbb0 // udot za.s[x9, 0], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xc159b831 // udot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159bb32 // udot za.s[x9, 2], { z24.b-z27.b }, z9.b[2]\n"
"ble 30f\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bdb0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[3]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151be31 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151bdb2 // udot za.s[x9, 2], { z12.b-z15.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bc30 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[3]\n"
+ ".inst 0xc159beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159be32 // udot za.s[x9, 2], { z16.b-z19.b }, z9.b[3]\n"
"30:" // Width 3: Multiply loop: multiply skip
"tbnz %x[flags], #31, 31f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"31:" // Width 3: Multiply loop: unique 6: skip row sum
"tbnz %x[flags], #31, 32f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
"ld1rw { z16.s }, p2/Z, [x21]\n"
- "neg z16.s, p2/M, z16.s\n"
"whilelt p0.s, XZR, x20\n"
- "uaddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z16.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "uaddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z16.s, p2/M, z16.s\n"
+ "mul z11.s, p2/M, z11.s, z16.s\n"
"32:" // Width 3: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z2.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z3.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z1.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z16.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c08 // mova { z8.d-z11.d }, za.d[x9, #0]\n"
- ".inst 0xc1a2ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z2.s\n"
- ".inst 0xc0062c24 // mova { z4.d-z7.d }, za.d[x9, #1]\n"
- ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc0062c4c // mova { z12.d-z15.d }, za.d[x9, #2]\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z0.s }, p2/Z, [x22]\n"
+ "ld1rw { z21.s }, p2/Z, [x21]\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ ".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c2c // mova { z12.d-z15.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c50 // mova { z16.d-z19.d }, za.d[x9, #2]\n"
+ ".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
- ".inst 0xc1a1aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z1.s\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
- ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
+ ".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
- ".inst 0xc1a3ab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z3.s\n"
- ".inst 0xc1a3ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- ".inst 0xc1a3ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z3.s\n"
- ".inst 0xc1a0ce08 // sclamp { z8.s-z11.s }, z16.s, z0.s\n"
- ".inst 0xc1a0ce04 // sclamp { z4.s-z7.s }, z16.s, z0.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
- ".inst 0xc1a0ce0c // sclamp { z12.s-z15.s }, z16.s, z0.s\n"
- "uzp1 z18.h, z10.h, z11.h\n"
- "uzp1 z4.h, z4.h, z5.h\n"
- "uzp1 z17.h, z6.h, z7.h\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
+ ".inst 0xc1a0ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4ceac // sclamp { z12.s-z15.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z21.h, z30.h, z31.h\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "uzp1 z16.h, z14.h, z15.h\n"
- "uzp1 z8.b, z8.b, z18.b\n"
- "st1b { z8.b }, p2, [x25]\n"
- "uzp1 z4.b, z4.b, z17.b\n"
- "st1b { z4.b }, p2, [x25, #1, MUL VL]\n"
- "uzp1 z12.b, z12.b, z16.b\n"
- "st1b { z12.b }, p1, [x25, #2, MUL VL]\n"
+ "uzp1 z20.h, z14.h, z15.h\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z28.b, z28.b, z21.b\n"
+ "uzp1 z12.b, z12.b, z20.b\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z12.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z16.b }, p1, [x25, #2, MUL VL]\n"
"addvl x25, x25, #3\n"
"33:" // Width 3: Output done
"b 44f\n"
@@ -484,15 +484,15 @@ void sme2_gemv_u8qa_dot_16VL (
".inst 0xf8b54af8 // rprfm pldmany, x21, [x23]\n"
"whilelt p1.b, XZR, x20\n"
"cbz x24, 35f\n"
- ".inst 0xa040c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24]\n"
- ".inst 0xc0042e80 // mova za.d[x9, #0], { z20.d-z23.d }\n"
- ".inst 0xa041c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
- ".inst 0xc0042e01 // mova za.d[x9, #1], { z16.d-z19.d }\n"
+ ".inst 0xa040c300 // ld1w { z0.s-z3.s }, pn8.b/Z, [x24]\n"
+ ".inst 0xa041c30c // ld1w { z12.s-z15.s }, pn8.b/Z, [x24, #0x4, MUL VL]\n"
".inst 0xa042c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0x8, MUL VL]\n"
- ".inst 0xc0042e02 // mova za.d[x9, #2], { z16.d-z19.d }\n"
- ".inst 0xa043c310 // ld1w { z16.s-z19.s }, pn8.b/Z, [x24, #0xc, MUL VL]\n"
- ".inst 0xc0042e03 // mova za.d[x9, #3], { z16.d-z19.d }\n"
+ ".inst 0xa043c314 // ld1w { z20.s-z23.s }, pn8.b/Z, [x24, #0xc, MUL VL]\n"
+ ".inst 0xc0042c00 // mova za.d[x9, #0], { z0.d-z3.d }\n"
"addvl x24, x24, #16\n"
+ ".inst 0xc0042d81 // mova za.d[x9, #1], { z12.d-z15.d }\n"
+ ".inst 0xc0042e02 // mova za.d[x9, #2], { z16.d-z19.d }\n"
+ ".inst 0xc0042e83 // mova za.d[x9, #3], { z20.d-z23.d }\n"
"b 36f\n"
"35:" // Width 4: no bias
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
@@ -501,164 +501,164 @@ void sme2_gemv_u8qa_dot_16VL (
"ble 39f\n"
"37:" // Width 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b230 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b231 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[0]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b1b2 // udot za.s[x9, 2], { z12.b-z15.b }, z1.b[0]\n"
- ".inst 0xa043834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b1b3 // udot za.s[x9, 3], { z12.b-z15.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151b630 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b5b1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b632 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b633 // udot za.s[x9, 3], { z16.b-z19.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151ba30 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b9b1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[2]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151ba32 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151ba33 // udot za.s[x9, 3], { z16.b-z19.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151bdb0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[3]\n"
- ".inst 0xa041834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151bdb1 // udot za.s[x9, 1], { z12.b-z15.b }, z1.b[3]\n"
- ".inst 0xa0428359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151bf32 // udot za.s[x9, 2], { z24.b-z27.b }, z1.b[3]\n"
- ".inst 0xa0438345 // ldnt1b { z4.b-z7.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151bcb3 // udot za.s[x9, 3], { z4.b-z7.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b030 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b2b1 // udot za.s[x9, 1], { z20.b-z23.b }, z9.b[0]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b3b2 // udot za.s[x9, 2], { z28.b-z31.b }, z9.b[0]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b333 // udot za.s[x9, 3], { z24.b-z27.b }, z9.b[0]\n"
+ ".inst 0xa0428371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b430 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b7b1 // udot za.s[x9, 1], { z28.b-z31.b }, z9.b[1]\n"
+ ".inst 0xa0408375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b632 // udot za.s[x9, 2], { z16.b-z19.b }, z9.b[1]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b5b3 // udot za.s[x9, 3], { z12.b-z15.b }, z9.b[1]\n"
+ ".inst 0xa0428361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159bab0 // udot za.s[x9, 0], { z20.b-z23.b }, z9.b[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bbb1 // udot za.s[x9, 1], { z28.b-z31.b }, z9.b[2]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xc159b832 // udot za.s[x9, 2], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xc159b9b3 // udot za.s[x9, 3], { z12.b-z15.b }, z9.b[2]\n"
+ ".inst 0xa0428361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159bf30 // udot za.s[x9, 0], { z24.b-z27.b }, z9.b[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159beb1 // udot za.s[x9, 1], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159bc32 // udot za.s[x9, 2], { z0.b-z3.b }, z9.b[3]\n"
+ ".inst 0xc159be33 // udot za.s[x9, 3], { z16.b-z19.b }, z9.b[3]\n"
"tbnz %x[flags], #31, 38f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"38:" // Width 4: Multiply loop: unique 7: skip row sum
"sub x22, x22, #0x10\n"
"cmp x22, #0x10\n"
"bgt 37b\n"
"39:" // Width 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x22\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xa040834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26]\n"
+ "ld1rqb { z9.b }, p0/Z, [x23]\n"
"add x23, x23, #0x10\n"
- ".inst 0xc151b1b0 // udot za.s[x9, 0], { z12.b-z15.b }, z1.b[0]\n"
- ".inst 0xa0418359 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b331 // udot za.s[x9, 1], { z24.b-z27.b }, z1.b[0]\n"
- ".inst 0xa0428349 // ldnt1b { z8.b-z11.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b132 // udot za.s[x9, 2], { z8.b-z11.b }, z1.b[0]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b233 // udot za.s[x9, 3], { z16.b-z19.b }, z1.b[0]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b030 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[0]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b2b1 // udot za.s[x9, 1], { z20.b-z23.b }, z9.b[0]\n"
+ ".inst 0xc159b3b2 // udot za.s[x9, 2], { z28.b-z31.b }, z9.b[0]\n"
+ ".inst 0xc159b233 // udot za.s[x9, 3], { z16.b-z19.b }, z9.b[0]\n"
"ble 40f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151b630 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151b631 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[1]\n"
- ".inst 0xa042834d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151b5b2 // udot za.s[x9, 2], { z12.b-z15.b }, z1.b[1]\n"
- ".inst 0xa0438355 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151b6b3 // udot za.s[x9, 3], { z20.b-z23.b }, z1.b[1]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa042837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159b430 // udot za.s[x9, 0], { z0.b-z3.b }, z9.b[1]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b731 // udot za.s[x9, 1], { z24.b-z27.b }, z9.b[1]\n"
+ ".inst 0xc159b7b2 // udot za.s[x9, 2], { z28.b-z31.b }, z9.b[1]\n"
+ ".inst 0xc159b633 // udot za.s[x9, 3], { z16.b-z19.b }, z9.b[1]\n"
"ble 40f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
"subs x22, x22, #0x4\n"
- ".inst 0xc151ba30 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151ba31 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151ba32 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[2]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151ba33 // udot za.s[x9, 3], { z16.b-z19.b }, z1.b[2]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0418361 // ldnt1b { z0.b-z3.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa043836d // ldnt1b { z12.b-z15.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159bb30 // udot za.s[x9, 0], { z24.b-z27.b }, z9.b[2]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159b831 // udot za.s[x9, 1], { z0.b-z3.b }, z9.b[2]\n"
+ ".inst 0xc159bab2 // udot za.s[x9, 2], { z20.b-z23.b }, z9.b[2]\n"
+ ".inst 0xc159b9b3 // udot za.s[x9, 3], { z12.b-z15.b }, z9.b[2]\n"
"ble 40f\n"
- ".inst 0xa0408351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26]\n"
- ".inst 0xc151be30 // udot za.s[x9, 0], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa0418351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
- ".inst 0xc151be31 // udot za.s[x9, 1], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa0428351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0x8, MUL VL]\n"
- ".inst 0xc151be32 // udot za.s[x9, 2], { z16.b-z19.b }, z1.b[3]\n"
- ".inst 0xa0438351 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x26, #0xc, MUL VL]\n"
- ".inst 0xc151be33 // udot za.s[x9, 3], { z16.b-z19.b }, z1.b[3]\n"
- "addvl x26, x26, #16\n"
+ ".inst 0xa0408379 // ldnt1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa041837d // ldnt1b { z28.b-z31.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0428375 // ldnt1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ ".inst 0xa0438371 // ldnt1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xc159bf30 // udot za.s[x9, 0], { z24.b-z27.b }, z9.b[3]\n"
+ "addvl x27, x27, #16\n"
+ ".inst 0xc159bfb1 // udot za.s[x9, 1], { z28.b-z31.b }, z9.b[3]\n"
+ ".inst 0xc159beb2 // udot za.s[x9, 2], { z20.b-z23.b }, z9.b[3]\n"
+ ".inst 0xc159be33 // udot za.s[x9, 3], { z16.b-z19.b }, z9.b[3]\n"
"40:" // Width 4: Multiply loop: multiply skip
"tbnz %x[flags], #31, 41f\n"
- "udot z28.s, z1.b, z29.b\n"
+ "udot z11.s, z9.b, z8.b\n"
"41:" // Width 4: Multiply loop: unique 8: skip row sum
"tbnz %x[flags], #31, 42f\n"
"add x21, %x[qp], %[b_offset]\n"
"mov x20, #0x4\n"
"ld1rw { z16.s }, p2/Z, [x21]\n"
- "neg z16.s, p2/M, z16.s\n"
"whilelt p0.s, XZR, x20\n"
- "uaddv d28, p0, z28.s\n"
- "mov z28.s, z28.s[0]\n"
- "mul z28.s, p2/M, z28.s, z16.s\n"
"orr %x[flags], %x[flags], #0x80000000\n"
+ "uaddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "neg z16.s, p2/M, z16.s\n"
+ "mul z11.s, p2/M, z11.s, z16.s\n"
"42:" // Width 4: skip row sum fixup
- ".inst 0xc0904b80 // addha za0.s, p2/M, p2/M, z28.s\n"
- "add x20, %x[qp], %[per_layer_mul]\n"
- "ld1rw { z11.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904960 // addha za0.s, p2/M, p2/M, z11.s\n"
+ "add x21, %x[qp], %[per_layer_mul]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
- ".inst 0xc0904b81 // addha za1.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z7.s }, p2/Z, [x20]\n"
- "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0xc0904961 // addha za1.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z10.s }, p2/Z, [x21]\n"
+ "add x22, %x[qp], %[c_offset]\n"
"add x21, %x[qp], %[minval]\n"
- ".inst 0xc0904b82 // addha za2.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z6.s }, p2/Z, [x20]\n"
+ ".inst 0xc0904962 // addha za2.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z12.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[maxval]\n"
- ".inst 0xc0904b83 // addha za3.s, p2/M, p2/M, z28.s\n"
- "ld1rw { z3.s }, p2/Z, [x21]\n"
- ".inst 0xc0062c18 // mova { z24.d-z27.d }, za.d[x9, #0]\n"
- ".inst 0xc1abac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z11.s\n"
- ".inst 0xc0062c30 // mova { z16.d-z19.d }, za.d[x9, #1]\n"
- ".inst 0xc1abac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
- ".inst 0xc0062c54 // mova { z20.d-z23.d }, za.d[x9, #2]\n"
- ".inst 0xc1abac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z11.s\n"
- ".inst 0xc0062c6c // mova { z12.d-z15.d }, za.d[x9, #3]\n"
- ".inst 0xc1abac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z11.s\n"
- ".inst 0xc1a7aa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z7.s\n"
- "ld1rw { z31.s }, p2/Z, [x20]\n"
- ".inst 0xc1a7aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z7.s\n"
- ".inst 0xc1a7aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z7.s\n"
- ".inst 0xc1a7aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z7.s\n"
- ".inst 0xc1a6ab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z6.s\n"
- ".inst 0xc1a6ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z6.s\n"
- ".inst 0xc1a6ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z6.s\n"
- ".inst 0xc1a6ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z6.s\n"
- ".inst 0xc1bfcc78 // sclamp { z24.s-z27.s }, z3.s, z31.s\n"
- ".inst 0xc1bfcc70 // sclamp { z16.s-z19.s }, z3.s, z31.s\n"
+ ".inst 0xc0904963 // addha za3.s, p2/M, p2/M, z11.s\n"
+ "ld1rw { z13.s }, p2/Z, [x22]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
+ "ld1rw { z18.s }, p2/Z, [x20]\n"
+ ".inst 0xc0062c1c // mova { z28.d-z31.d }, za.d[x9, #0]\n"
+ ".inst 0xc0062c20 // mova { z0.d-z3.d }, za.d[x9, #1]\n"
+ ".inst 0xc0062c58 // mova { z24.d-z27.d }, za.d[x9, #2]\n"
+ ".inst 0xc0062c74 // mova { z20.d-z23.d }, za.d[x9, #3]\n"
+ ".inst 0xc1aaac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
+ ".inst 0xc1aaac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z10.s\n"
+ ".inst 0xc1aaac18 // sqdmulh { z24.s-z27.s }, { z24.s-z27.s }, z10.s\n"
+ ".inst 0xc1aaac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z10.s\n"
+ ".inst 0xc1acaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z12.s\n"
+ ".inst 0xc1acaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z12.s\n"
+ ".inst 0xc1acaa38 // srshl { z24.s-z27.s }, { z24.s-z27.s }, z12.s\n"
+ ".inst 0xc1acaa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z12.s\n"
+ ".inst 0xc1adab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z13.s\n"
+ ".inst 0xc1adab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z13.s\n"
+ ".inst 0xc1adab18 // add { z24.s-z27.s }, { z24.s-z27.s }, z13.s\n"
+ ".inst 0xc1adab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z13.s\n"
+ ".inst 0xc1b2ce3c // sclamp { z28.s-z31.s }, z17.s, z18.s\n"
+ ".inst 0xc1b2ce20 // sclamp { z0.s-z3.s }, z17.s, z18.s\n"
+ ".inst 0xc1b2ce38 // sclamp { z24.s-z27.s }, z17.s, z18.s\n"
+ ".inst 0xc1b2ce34 // sclamp { z20.s-z23.s }, z17.s, z18.s\n"
+ "uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z14.h, z30.h, z31.h\n"
+ "uzp1 z0.h, z0.h, z1.h\n"
+ "uzp1 z18.h, z2.h, z3.h\n"
"uzp1 z24.h, z24.h, z25.h\n"
- ".inst 0xc1bfcc74 // sclamp { z20.s-z23.s }, z3.s, z31.s\n"
- ".inst 0xc1bfcc6c // sclamp { z12.s-z15.s }, z3.s, z31.s\n"
- "uzp1 z25.h, z26.h, z27.h\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "uzp1 z18.h, z18.h, z19.h\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "uzp1 z17.h, z22.h, z23.h\n"
- "uzp1 z12.h, z12.h, z13.h\n"
- "uzp1 z30.h, z14.h, z15.h\n"
- "uzp1 z24.b, z24.b, z25.b\n"
- "st1b { z24.b }, p2, [x25]\n"
- "uzp1 z16.b, z16.b, z18.b\n"
- "st1b { z16.b }, p2, [x25, #1, MUL VL]\n"
- "uzp1 z20.b, z20.b, z17.b\n"
- "uzp1 z12.b, z12.b, z30.b\n"
- "st1b { z20.b }, p2, [x25, #2, MUL VL]\n"
- "st1b { z12.b }, p1, [x25, #3, MUL VL]\n"
+ "uzp1 z16.h, z22.h, z23.h\n"
+ "uzp1 z28.b, z28.b, z14.b\n"
+ "uzp1 z0.b, z0.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "uzp1 z20.b, z20.b, z16.b\n"
+ "st1b { z28.b }, p2, [x25]\n"
+ "st1b { z0.b }, p2, [x25, #1, MUL VL]\n"
+ "st1b { z24.b }, p2, [x25, #2, MUL VL]\n"
+ "st1b { z20.b }, p1, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
"43:" // Width 4: Output done
- "subs x27, x27, #0x4\n"
+ "subs x26, x26, #0x4\n"
"sub %x[N], %x[N], x28, LSL #2\n"
"bgt 4b\n"
"44:" // Exit
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp
index edfb362aab..db4f25bbfa 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
class cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL;
- StdTransformsSME<operand_type, result_type, 1, 4, 2> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 1, 4, 2> transforms = {};
cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
index 8105300cb7..87ba6d4819 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
B(B), kstride_bytes(roundup(K, 2) * sizeof(bfloat16)),
C(C), ldcb(ldc * sizeof(float)),
M(M), N(N), K(K),
- n_loops(((K / 2) - 1) / 2), n_tail_iters(((K / 2) - 1) % 2),
min(-std::numeric_limits<float>::infinity()),
max(std::numeric_limits<float>::infinity()),
bias(bias),
@@ -88,12 +87,13 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
const long kstride_bytes;
float *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
float min = -std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::infinity();
const float *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -112,17 +112,17 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14]\n"
- ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa040c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14]\n"
".inst 0xa041c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
- ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
".inst 0xa042c5d4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
- ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w11, [%x[args], %[offsetof_M]]\n"
@@ -137,103 +137,103 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "fmov z6.s, #1.0\n"
- ".inst 0xa009c29d // ldnt1w { z28.s-z31.s }, p8/Z, [x20, x9, LSL #2]\n"
- ".inst 0x809c00c0 // fmopa za0.s, p0/M, p0/M, z6.s, z28.s\n"
- ".inst 0x809d00c1 // fmopa za1.s, p0/M, p0/M, z6.s, z29.s\n"
- ".inst 0x809e00c2 // fmopa za2.s, p0/M, p0/M, z6.s, z30.s\n"
- ".inst 0x809f00c3 // fmopa za3.s, p0/M, p0/M, z6.s, z31.s\n"
+ "fmov z15.s, #1.0\n"
+ ".inst 0xa109c280 // ld1w { z0.s, z4.s, z8.s, z12.s }, p8/Z, [x20, x9, LSL #2]\n"
+ ".inst 0x808001e0 // fmopa za0.s, p0/M, p0/M, z15.s, z0.s\n"
+ ".inst 0x808401e1 // fmopa za1.s, p0/M, p0/M, z15.s, z4.s\n"
+ ".inst 0x808801e2 // fmopa za2.s, p0/M, p0/M, z15.s, z8.s\n"
+ ".inst 0x808c01e3 // fmopa za3.s, p0/M, p0/M, z15.s, z12.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x9\n"
"mov x21, x10\n"
"incw x20, ALL, MUL #4\n"
"incw x21\n"
"cmp x20, x28\n"
- "csel x21, x10, x21, LT\n"
"mov x20, x15\n"
+ "csel x21, x10, x21, LT\n"
"bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
"cmp x21, x11\n"
"csel x15, x20, x15, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x1\n"
"lsr x20, x20, #0x1\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x9, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1h { z28.h }, p0/Z, [x26]\n"
- ".inst 0xa040a6e9 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x23]\n"
- "ld1h { z22.h }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0xa041a6ed // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1h { z30.h }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa042a6e5 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1h { z20.h }, p0/Z, [x26, #3, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x9, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1h { z20.h }, p0/Z, [x26]\n"
+ ".inst 0xa140a6f3 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23]\n"
+ "ld1h { z4.h }, p0/Z, [x26, #1, MUL VL]\n"
+ ".inst 0xa041a6ec // ld1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1h { z29.h }, p0/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa142a6f2 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- ".inst 0xa143a6fb // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa043a6e8 // ld1h { z8.h-z11.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x81880380 // bfmopa za0.s, p0/M, p0/M, z28.h, z8.h\n"
- "subs x22, x22, #0x1\n"
- ".inst 0x81890381 // bfmopa za1.s, p0/M, p0/M, z28.h, z9.h\n"
- ".inst 0x818a0382 // bfmopa za2.s, p0/M, p0/M, z28.h, z10.h\n"
- ".inst 0x818b0383 // bfmopa za3.s, p0/M, p0/M, z28.h, z11.h\n"
- "ld1h { z28.h }, p0/Z, [x26]\n"
- ".inst 0x818c02c0 // bfmopa za0.s, p0/M, p0/M, z22.h, z12.h\n"
- ".inst 0xa040a6e9 // ldnt1h { z8.h-z11.h }, pn9.b/Z, [x23]\n"
- ".inst 0x818d02c1 // bfmopa za1.s, p0/M, p0/M, z22.h, z13.h\n"
- ".inst 0x818e02c2 // bfmopa za2.s, p0/M, p0/M, z22.h, z14.h\n"
- ".inst 0x818f02c3 // bfmopa za3.s, p0/M, p0/M, z22.h, z15.h\n"
- "ld1h { z22.h }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0x818403c0 // bfmopa za0.s, p0/M, p0/M, z30.h, z4.h\n"
- ".inst 0xa041a6ed // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0x818503c1 // bfmopa za1.s, p0/M, p0/M, z30.h, z5.h\n"
- ".inst 0x818603c2 // bfmopa za2.s, p0/M, p0/M, z30.h, z6.h\n"
- ".inst 0x818703c3 // bfmopa za3.s, p0/M, p0/M, z30.h, z7.h\n"
- "ld1h { z30.h }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa042a6e5 // ldnt1h { z4.h-z7.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
".inst 0x81930280 // bfmopa za0.s, p0/M, p0/M, z20.h, z19.h\n"
+ "subs x21, x21, #0x1\n"
".inst 0x81970281 // bfmopa za1.s, p0/M, p0/M, z20.h, z23.h\n"
".inst 0x819b0282 // bfmopa za2.s, p0/M, p0/M, z20.h, z27.h\n"
".inst 0x819f0283 // bfmopa za3.s, p0/M, p0/M, z20.h, z31.h\n"
- "ld1h { z20.h }, p0/Z, [x26, #3, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x26]\n"
+ ".inst 0x818c0080 // bfmopa za0.s, p0/M, p0/M, z4.h, z12.h\n"
+ ".inst 0xa140a6f3 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23]\n"
+ ".inst 0x818d0081 // bfmopa za1.s, p0/M, p0/M, z4.h, z13.h\n"
+ ".inst 0x818e0082 // bfmopa za2.s, p0/M, p0/M, z4.h, z14.h\n"
+ ".inst 0x818f0083 // bfmopa za3.s, p0/M, p0/M, z4.h, z15.h\n"
+ "ld1h { z4.h }, p0/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x819203a0 // bfmopa za0.s, p0/M, p0/M, z29.h, z18.h\n"
+ ".inst 0xa041a6ec // ld1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x819603a1 // bfmopa za1.s, p0/M, p0/M, z29.h, z22.h\n"
+ ".inst 0x819a03a2 // bfmopa za2.s, p0/M, p0/M, z29.h, z26.h\n"
+ ".inst 0x819e03a3 // bfmopa za3.s, p0/M, p0/M, z29.h, z30.h\n"
+ "ld1h { z29.h }, p0/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa142a6f2 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0x81880040 // bfmopa za0.s, p0/M, p0/M, z2.h, z8.h\n"
+ ".inst 0x81890041 // bfmopa za1.s, p0/M, p0/M, z2.h, z9.h\n"
+ ".inst 0x818a0042 // bfmopa za2.s, p0/M, p0/M, z2.h, z10.h\n"
+ ".inst 0x818b0043 // bfmopa za3.s, p0/M, p0/M, z2.h, z11.h\n"
+ "ld1h { z2.h }, p0/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- ".inst 0xa143a6fb // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa043a6e8 // ld1h { z8.h-z11.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x81880380 // bfmopa za0.s, p0/M, p0/M, z28.h, z8.h\n"
- ".inst 0x81890381 // bfmopa za1.s, p0/M, p0/M, z28.h, z9.h\n"
- ".inst 0x818a0382 // bfmopa za2.s, p0/M, p0/M, z28.h, z10.h\n"
- ".inst 0x818b0383 // bfmopa za3.s, p0/M, p0/M, z28.h, z11.h\n"
- ".inst 0x818c02c0 // bfmopa za0.s, p0/M, p0/M, z22.h, z12.h\n"
- ".inst 0x818d02c1 // bfmopa za1.s, p0/M, p0/M, z22.h, z13.h\n"
- ".inst 0x818e02c2 // bfmopa za2.s, p0/M, p0/M, z22.h, z14.h\n"
- ".inst 0x818f02c3 // bfmopa za3.s, p0/M, p0/M, z22.h, z15.h\n"
- ".inst 0x818403c0 // bfmopa za0.s, p0/M, p0/M, z30.h, z4.h\n"
- ".inst 0x818503c1 // bfmopa za1.s, p0/M, p0/M, z30.h, z5.h\n"
- ".inst 0x818603c2 // bfmopa za2.s, p0/M, p0/M, z30.h, z6.h\n"
- ".inst 0x818703c3 // bfmopa za3.s, p0/M, p0/M, z30.h, z7.h\n"
".inst 0x81930280 // bfmopa za0.s, p0/M, p0/M, z20.h, z19.h\n"
".inst 0x81970281 // bfmopa za1.s, p0/M, p0/M, z20.h, z23.h\n"
".inst 0x819b0282 // bfmopa za2.s, p0/M, p0/M, z20.h, z27.h\n"
".inst 0x819f0283 // bfmopa za3.s, p0/M, p0/M, z20.h, z31.h\n"
+ ".inst 0x818c0080 // bfmopa za0.s, p0/M, p0/M, z4.h, z12.h\n"
+ ".inst 0x818d0081 // bfmopa za1.s, p0/M, p0/M, z4.h, z13.h\n"
+ ".inst 0x818e0082 // bfmopa za2.s, p0/M, p0/M, z4.h, z14.h\n"
+ ".inst 0x818f0083 // bfmopa za3.s, p0/M, p0/M, z4.h, z15.h\n"
+ ".inst 0x819203a0 // bfmopa za0.s, p0/M, p0/M, z29.h, z18.h\n"
+ ".inst 0x819603a1 // bfmopa za1.s, p0/M, p0/M, z29.h, z22.h\n"
+ ".inst 0x819a03a2 // bfmopa za2.s, p0/M, p0/M, z29.h, z26.h\n"
+ ".inst 0x819e03a3 // bfmopa za3.s, p0/M, p0/M, z29.h, z30.h\n"
+ ".inst 0x81880040 // bfmopa za0.s, p0/M, p0/M, z2.h, z8.h\n"
+ ".inst 0x81890041 // bfmopa za1.s, p0/M, p0/M, z2.h, z9.h\n"
+ ".inst 0x818a0042 // bfmopa za2.s, p0/M, p0/M, z2.h, z10.h\n"
+ ".inst 0x818b0043 // bfmopa za3.s, p0/M, p0/M, z2.h, z11.h\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1h { z8.h }, p0/Z, [x26]\n"
- "subs x21, x21, #0x1\n"
+ "ld1h { z26.h }, p0/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
"addvl x26, x26, #1\n"
".inst 0xa140a6e3 // ld1h { z3.h, z7.h, z11.h, z15.h }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #4\n"
- ".inst 0x81830100 // bfmopa za0.s, p0/M, p0/M, z8.h, z3.h\n"
- ".inst 0x81870101 // bfmopa za1.s, p0/M, p0/M, z8.h, z7.h\n"
- ".inst 0x818b0102 // bfmopa za2.s, p0/M, p0/M, z8.h, z11.h\n"
- ".inst 0x818f0103 // bfmopa za3.s, p0/M, p0/M, z8.h, z15.h\n"
+ ".inst 0x81830340 // bfmopa za0.s, p0/M, p0/M, z26.h, z3.h\n"
+ ".inst 0x81870341 // bfmopa za1.s, p0/M, p0/M, z26.h, z7.h\n"
+ ".inst 0x818b0342 // bfmopa za2.s, p0/M, p0/M, z26.h, z11.h\n"
+ ".inst 0x818f0343 // bfmopa za3.s, p0/M, p0/M, z26.h, z15.h\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x15, #1, 14f\n"
@@ -241,25 +241,25 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5d4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14]\n"
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5d4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
+ ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa061c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
- "addvl x14, x14, #16\n"
- ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
- ".inst 0xa062c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa063c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0xc, MUL VL]\n"
"addvl x13, x13, #16\n"
"blt 11b\n"
"b 24f\n"
@@ -267,31 +267,31 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xa060c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
+ ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa061c5b8 // st1w { z24.s-z27.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5ac // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"cmp x12, x20\n"
".inst 0xa062c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
"addvl x13, x13, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
"ldr x25, [%x[args], %[offsetof_C]]\n"
- "add x25, x25, x9, LSL #2\n" // C += n
"sub x24, x11, x10\n"
"ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
"madd x25, x10, x23, x25\n" // C += m * ldc
"tbz x15, #2, 18f\n"
"cntw x20\n"
+ "mov x12, #0x0\n"
"cmp x24, x20\n"
"csel x22, x24, x20, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
@@ -301,30 +301,30 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
"add x25, x25, x23\n"
+ "add x12, x12, #0x4\n"
".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa160c323 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa160c330 // st1w { z16.s, z20.s, z24.s, z28.s }, p8, [x25]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa160c331 // st1w { z17.s, z21.s, z25.s, z29.s }, p8, [x25]\n"
+ ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"beq 17f\n"
- ".inst 0xa160c332 // st1w { z18.s, z22.s, z26.s, z30.s }, p8, [x25]\n"
+ ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
"subs x24, x24, x22\n"
@@ -332,29 +332,29 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
"b 22f\n"
"18:" // Store to output array: Skip activation: End
"cntw x20\n"
- "cmp x24, x20\n"
"ld1rw { z1.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
+ "cmp x24, x20\n"
+ "ld1rw { z0.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"csel x20, x24, x20, LT\n"
"lsr x21, x20, #0x2\n"
- "ld1rw { z0.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 20f\n"
"19:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
".inst 0xc1a0c83c // fclamp { z28.s-z31.s }, z1.s, z0.s\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa160c330 // st1w { z16.s, z20.s, z24.s, z28.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "add x12, x12, #0x4\n"
".inst 0xa160c331 // st1w { z17.s, z21.s, z25.s, z29.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa160c332 // st1w { z18.s, z22.s, z26.s, z30.s }, p8, [x25]\n"
"add x25, x25, x23\n"
".inst 0xa160c333 // st1w { z19.s, z23.s, z27.s, z31.s }, p8, [x25]\n"
@@ -364,13 +364,13 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
"cbz x20, 21f\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
".inst 0xc1a0c83c // fclamp { z28.s-z31.s }, z1.s, z0.s\n"
- "subs x20, x20, #0x1\n"
".inst 0xa160c330 // st1w { z16.s, z20.s, z24.s, z28.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"beq 21f\n"
@@ -385,25 +385,25 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x14, x14, #16\n"
"blt 23b\n"
"24:" // End block
"incw x9, ALL, MUL #4\n"
"cmp x9, x28\n"
"blt 3b\n"
"incw x10\n"
- "cmp x10, x11\n"
"mov x9, #0x0\n"
+ "cmp x10, x11\n"
"mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp
index ca7b0573fc..7f681b2734 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
class cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL;
- StdTransformsSME<operand_type, result_type, 2, 2, 2> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 2, 2, 2> transforms = {};
cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
index 20c1de9418..8b3c6d7fec 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
B(B), kstride_bytes(roundup(K, 2) * sizeof(bfloat16)),
C(C), ldcb(ldc * sizeof(float)),
M(M), N(N), K(K),
- n_loops(((K / 2) - 1) / 2), n_tail_iters(((K / 2) - 1) % 2),
min(-std::numeric_limits<float>::infinity()),
max(std::numeric_limits<float>::infinity()),
bias(bias),
@@ -88,12 +87,13 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
const long kstride_bytes;
float *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
float min = -std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::infinity();
const float *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -112,17 +112,17 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa042c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -137,103 +137,103 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "fmov z12.s, #1.0\n"
- ".inst 0xa10a4289 // ldnt1w { z1.s, z9.s }, p8/Z, [x20, x10, LSL #2]\n"
- ".inst 0x80810180 // fmopa za0.s, p0/M, p0/M, z12.s, z1.s\n"
- ".inst 0x80890181 // fmopa za1.s, p0/M, p0/M, z12.s, z9.s\n"
- ".inst 0x80810182 // fmopa za2.s, p0/M, p0/M, z12.s, z1.s\n"
- ".inst 0x80890183 // fmopa za3.s, p0/M, p0/M, z12.s, z9.s\n"
+ "fmov z17.s, #1.0\n"
+ ".inst 0xa00a428a // ld1w { z10.s-z11.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0x808a0220 // fmopa za0.s, p0/M, p0/M, z17.s, z10.s\n"
+ ".inst 0x808b0221 // fmopa za1.s, p0/M, p0/M, z17.s, z11.s\n"
+ ".inst 0x808a0222 // fmopa za2.s, p0/M, p0/M, z17.s, z10.s\n"
+ ".inst 0x808b0223 // fmopa za3.s, p0/M, p0/M, z17.s, z11.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20, ALL, MUL #2\n"
"incw x21, ALL, MUL #2\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x1\n"
"lsr x20, x20, #0x1\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0402772 // ld1h { z18.h-z19.h }, pn9.b/Z, [x27]\n"
- ".inst 0xa04026e3 // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x23]\n"
- ".inst 0xa0412764 // ld1h { z4.h-z5.h }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04126fb // ldnt1h { z26.h-z27.h }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa042276a // ld1h { z10.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04226f5 // ldnt1h { z20.h-z21.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa0432766 // ld1h { z6.h-z7.h }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0402776 // ld1h { z22.h-z23.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa14026e7 // ld1h { z7.h, z15.h }, pn9.b/Z, [x23]\n"
+ ".inst 0xa1412766 // ld1h { z6.h, z14.h }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa04126f4 // ld1h { z20.h-z21.h }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa1422762 // ld1h { z2.h, z10.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14226e3 // ld1h { z3.h, z11.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1432761 // ld1h { z1.h, z9.h }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa04326e9 // ldnt1h { z8.h-z9.h }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa04326e4 // ld1h { z4.h-z5.h }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x81820240 // bfmopa za0.s, p0/M, p0/M, z18.h, z2.h\n"
- "subs x22, x22, #0x1\n"
- ".inst 0x81830241 // bfmopa za1.s, p0/M, p0/M, z18.h, z3.h\n"
- ".inst 0x81820262 // bfmopa za2.s, p0/M, p0/M, z19.h, z2.h\n"
- ".inst 0x81830263 // bfmopa za3.s, p0/M, p0/M, z19.h, z3.h\n"
- ".inst 0xa0402772 // ld1h { z18.h-z19.h }, pn9.b/Z, [x27]\n"
- ".inst 0x819a0080 // bfmopa za0.s, p0/M, p0/M, z4.h, z26.h\n"
- ".inst 0xa04026e3 // ldnt1h { z2.h-z3.h }, pn9.b/Z, [x23]\n"
- ".inst 0x819b0081 // bfmopa za1.s, p0/M, p0/M, z4.h, z27.h\n"
- ".inst 0x819a00a2 // bfmopa za2.s, p0/M, p0/M, z5.h, z26.h\n"
- ".inst 0x819b00a3 // bfmopa za3.s, p0/M, p0/M, z5.h, z27.h\n"
- ".inst 0xa0412764 // ld1h { z4.h-z5.h }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0x81940140 // bfmopa za0.s, p0/M, p0/M, z10.h, z20.h\n"
- ".inst 0xa04126fb // ldnt1h { z26.h-z27.h }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0x81950141 // bfmopa za1.s, p0/M, p0/M, z10.h, z21.h\n"
- ".inst 0x81940162 // bfmopa za2.s, p0/M, p0/M, z11.h, z20.h\n"
- ".inst 0x81950163 // bfmopa za3.s, p0/M, p0/M, z11.h, z21.h\n"
- ".inst 0xa042276a // ld1h { z10.h-z11.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04226f5 // ldnt1h { z20.h-z21.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0x818800c0 // bfmopa za0.s, p0/M, p0/M, z6.h, z8.h\n"
- ".inst 0x818900c1 // bfmopa za1.s, p0/M, p0/M, z6.h, z9.h\n"
- ".inst 0x818800e2 // bfmopa za2.s, p0/M, p0/M, z7.h, z8.h\n"
- ".inst 0x818900e3 // bfmopa za3.s, p0/M, p0/M, z7.h, z9.h\n"
- ".inst 0xa0432766 // ld1h { z6.h-z7.h }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ ".inst 0x818702c0 // bfmopa za0.s, p0/M, p0/M, z22.h, z7.h\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0x818f02c1 // bfmopa za1.s, p0/M, p0/M, z22.h, z15.h\n"
+ ".inst 0x818702e2 // bfmopa za2.s, p0/M, p0/M, z23.h, z7.h\n"
+ ".inst 0x818f02e3 // bfmopa za3.s, p0/M, p0/M, z23.h, z15.h\n"
+ ".inst 0xa0402776 // ld1h { z22.h-z23.h }, pn9.b/Z, [x27]\n"
+ ".inst 0x819400c0 // bfmopa za0.s, p0/M, p0/M, z6.h, z20.h\n"
+ ".inst 0xa14026e7 // ld1h { z7.h, z15.h }, pn9.b/Z, [x23]\n"
+ ".inst 0x819500c1 // bfmopa za1.s, p0/M, p0/M, z6.h, z21.h\n"
+ ".inst 0x819401c2 // bfmopa za2.s, p0/M, p0/M, z14.h, z20.h\n"
+ ".inst 0x819501c3 // bfmopa za3.s, p0/M, p0/M, z14.h, z21.h\n"
+ ".inst 0xa1412766 // ld1h { z6.h, z14.h }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0x81830040 // bfmopa za0.s, p0/M, p0/M, z2.h, z3.h\n"
+ ".inst 0xa04126f4 // ld1h { z20.h-z21.h }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0x818b0041 // bfmopa za1.s, p0/M, p0/M, z2.h, z11.h\n"
+ ".inst 0x81830142 // bfmopa za2.s, p0/M, p0/M, z10.h, z3.h\n"
+ ".inst 0x818b0143 // bfmopa za3.s, p0/M, p0/M, z10.h, z11.h\n"
+ ".inst 0xa1422762 // ld1h { z2.h, z10.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14226e3 // ld1h { z3.h, z11.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x81840020 // bfmopa za0.s, p0/M, p0/M, z1.h, z4.h\n"
+ ".inst 0x81850021 // bfmopa za1.s, p0/M, p0/M, z1.h, z5.h\n"
+ ".inst 0x81840122 // bfmopa za2.s, p0/M, p0/M, z9.h, z4.h\n"
+ ".inst 0x81850123 // bfmopa za3.s, p0/M, p0/M, z9.h, z5.h\n"
+ ".inst 0xa1432761 // ld1h { z1.h, z9.h }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa04326e9 // ldnt1h { z8.h-z9.h }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa04326e4 // ld1h { z4.h-z5.h }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x81820240 // bfmopa za0.s, p0/M, p0/M, z18.h, z2.h\n"
- ".inst 0x81830241 // bfmopa za1.s, p0/M, p0/M, z18.h, z3.h\n"
- ".inst 0x81820262 // bfmopa za2.s, p0/M, p0/M, z19.h, z2.h\n"
- ".inst 0x81830263 // bfmopa za3.s, p0/M, p0/M, z19.h, z3.h\n"
- ".inst 0x819a0080 // bfmopa za0.s, p0/M, p0/M, z4.h, z26.h\n"
- ".inst 0x819b0081 // bfmopa za1.s, p0/M, p0/M, z4.h, z27.h\n"
- ".inst 0x819a00a2 // bfmopa za2.s, p0/M, p0/M, z5.h, z26.h\n"
- ".inst 0x819b00a3 // bfmopa za3.s, p0/M, p0/M, z5.h, z27.h\n"
- ".inst 0x81940140 // bfmopa za0.s, p0/M, p0/M, z10.h, z20.h\n"
- ".inst 0x81950141 // bfmopa za1.s, p0/M, p0/M, z10.h, z21.h\n"
- ".inst 0x81940162 // bfmopa za2.s, p0/M, p0/M, z11.h, z20.h\n"
- ".inst 0x81950163 // bfmopa za3.s, p0/M, p0/M, z11.h, z21.h\n"
- ".inst 0x818800c0 // bfmopa za0.s, p0/M, p0/M, z6.h, z8.h\n"
- ".inst 0x818900c1 // bfmopa za1.s, p0/M, p0/M, z6.h, z9.h\n"
- ".inst 0x818800e2 // bfmopa za2.s, p0/M, p0/M, z7.h, z8.h\n"
- ".inst 0x818900e3 // bfmopa za3.s, p0/M, p0/M, z7.h, z9.h\n"
+ ".inst 0x818702c0 // bfmopa za0.s, p0/M, p0/M, z22.h, z7.h\n"
+ ".inst 0x818f02c1 // bfmopa za1.s, p0/M, p0/M, z22.h, z15.h\n"
+ ".inst 0x818702e2 // bfmopa za2.s, p0/M, p0/M, z23.h, z7.h\n"
+ ".inst 0x818f02e3 // bfmopa za3.s, p0/M, p0/M, z23.h, z15.h\n"
+ ".inst 0x819400c0 // bfmopa za0.s, p0/M, p0/M, z6.h, z20.h\n"
+ ".inst 0x819500c1 // bfmopa za1.s, p0/M, p0/M, z6.h, z21.h\n"
+ ".inst 0x819401c2 // bfmopa za2.s, p0/M, p0/M, z14.h, z20.h\n"
+ ".inst 0x819501c3 // bfmopa za3.s, p0/M, p0/M, z14.h, z21.h\n"
+ ".inst 0x81830040 // bfmopa za0.s, p0/M, p0/M, z2.h, z3.h\n"
+ ".inst 0x818b0041 // bfmopa za1.s, p0/M, p0/M, z2.h, z11.h\n"
+ ".inst 0x81830142 // bfmopa za2.s, p0/M, p0/M, z10.h, z3.h\n"
+ ".inst 0x818b0143 // bfmopa za3.s, p0/M, p0/M, z10.h, z11.h\n"
+ ".inst 0x81840020 // bfmopa za0.s, p0/M, p0/M, z1.h, z4.h\n"
+ ".inst 0x81850021 // bfmopa za1.s, p0/M, p0/M, z1.h, z5.h\n"
+ ".inst 0x81840122 // bfmopa za2.s, p0/M, p0/M, z9.h, z4.h\n"
+ ".inst 0x81850123 // bfmopa za3.s, p0/M, p0/M, z9.h, z5.h\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa040277e // ld1h { z30.h-z31.h }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa040276a // ld1h { z10.h-z11.h }, pn9.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #2\n"
- ".inst 0xa14026e5 // ld1h { z5.h, z13.h }, pn9.b/Z, [x23]\n"
+ ".inst 0xa04026ee // ld1h { z14.h-z15.h }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #2\n"
- ".inst 0x818503c0 // bfmopa za0.s, p0/M, p0/M, z30.h, z5.h\n"
- ".inst 0x818d03c1 // bfmopa za1.s, p0/M, p0/M, z30.h, z13.h\n"
- ".inst 0x818503e2 // bfmopa za2.s, p0/M, p0/M, z31.h, z5.h\n"
- ".inst 0x818d03e3 // bfmopa za3.s, p0/M, p0/M, z31.h, z13.h\n"
+ ".inst 0x818e0140 // bfmopa za0.s, p0/M, p0/M, z10.h, z14.h\n"
+ ".inst 0x818f0141 // bfmopa za1.s, p0/M, p0/M, z10.h, z15.h\n"
+ ".inst 0x818e0162 // bfmopa za2.s, p0/M, p0/M, z11.h, z14.h\n"
+ ".inst 0x818f0163 // bfmopa za3.s, p0/M, p0/M, z11.h, z15.h\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x16, #1, 14f\n"
@@ -241,24 +241,24 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
+ ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa060c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa061c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
@@ -267,31 +267,31 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
- ".inst 0xa060c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14]\n"
- ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa061c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
"sub x25, x13, x11\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "add x26, x26, x10, LSL #2\n" // C += n
"madd x26, x11, x24, x26\n" // C += m * ldc
"tbz x16, #2, 21f\n"
"cntw x23\n"
+ "mov x12, #0x0\n"
"cmp x25, x23\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
@@ -299,36 +299,36 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
"add x26, x26, x24\n"
+ "add x12, x12, #0x4\n"
".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa1604354 // st1w { z20.s, z28.s }, p8, [x26]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa1604355 // st1w { z21.s, z29.s }, p8, [x26]\n"
+ ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
- ".inst 0xa1604356 // st1w { z22.s, z30.s }, p8, [x26]\n"
+ ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 21f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
@@ -336,28 +336,28 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
"add x26, x26, x24\n"
+ "add x12, x12, #0x4\n"
".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
"cbz x20, 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
+ ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
+ ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
@@ -365,37 +365,37 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
"b 28f\n"
"21:" // Store to output array: Skip activation: End
"cntw x23\n"
+ "ld1rw { z21.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
"cmp x25, x23\n"
- "ld1rw { z1.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ld1rw { z20.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "ld1rw { z0.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 23f\n"
"22:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c83c // fclamp { z28.s-z31.s }, z1.s, z0.s\n"
- ".inst 0xa1604354 // st1w { z20.s, z28.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604355 // st1w { z21.s, z29.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xa1604356 // st1w { z22.s, z30.s }, p8, [x26]\n"
+ ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xa1604357 // st1w { z23.s, z31.s }, p8, [x26]\n"
+ ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 24f\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
"subs x20, x20, #0x1\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 24f\n"
@@ -409,34 +409,34 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
"subs x25, x25, x22\n"
"beq 28f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 26f\n"
"25:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
+ ".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4caa8 // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
+ ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604343 // st1w { z3.s, z11.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 1 oddments
"cbz x20, 27f\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
"subs x20, x20, #0x1\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 27f\n"
@@ -451,25 +451,25 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 29b\n"
"30:" // End block
"incw x10, ALL, MUL #2\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp
index 7b31d6d2db..3c1dff268f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
class cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const bfloat16 *const A, const bfloat16 *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL;
- StdTransformsSME<operand_type, result_type, 4, 1, 2> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 4, 1, 2> transforms = {};
cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
index 70c94d32a3..b4b94f305e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
B(B), kstride_bytes(roundup(K, 2) * sizeof(bfloat16)),
C(C), ldcb(ldc * sizeof(float)),
M(M), N(N), K(K),
- n_loops(((K / 2) - 1) / 2), n_tail_iters(((K / 2) - 1) % 2),
min(-std::numeric_limits<float>::infinity()),
max(std::numeric_limits<float>::infinity()),
bias(bias),
@@ -88,12 +87,13 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
const long kstride_bytes;
float *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
float min = -std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::infinity();
const float *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -112,17 +112,17 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa042c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -137,103 +137,103 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "fmov z11.s, #1.0\n"
- "ldnt1w { z13.s }, p0/Z, [x20, x10, LSL #2]\n"
- ".inst 0x808d2560 // fmopa za0.s, p1/M, p1/M, z11.s, z13.s\n"
- ".inst 0x808d2561 // fmopa za1.s, p1/M, p1/M, z11.s, z13.s\n"
- ".inst 0x808d2562 // fmopa za2.s, p1/M, p1/M, z11.s, z13.s\n"
- ".inst 0x808d2563 // fmopa za3.s, p1/M, p1/M, z11.s, z13.s\n"
+ "fmov z6.s, #1.0\n"
+ "ld1w { z26.s }, p0/Z, [x20, x10, LSL #2]\n"
+ ".inst 0x809a24c0 // fmopa za0.s, p1/M, p1/M, z6.s, z26.s\n"
+ ".inst 0x809a24c1 // fmopa za1.s, p1/M, p1/M, z6.s, z26.s\n"
+ ".inst 0x809a24c2 // fmopa za2.s, p1/M, p1/M, z6.s, z26.s\n"
+ ".inst 0x809a24c3 // fmopa za3.s, p1/M, p1/M, z6.s, z26.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20\n"
"incw x21, ALL, MUL #4\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x1\n"
"lsr x20, x20, #0x1\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa140a360 // ld1h { z0.h, z4.h, z8.h, z12.h }, pn8.b/Z, [x27]\n"
- "ldnt1h { z19.h }, p1/Z, [x23]\n"
- ".inst 0xa141a371 // ld1h { z17.h, z21.h, z25.h, z29.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1h { z22.h }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa142a370 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1h { z23.h }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa143a363 // ld1h { z3.h, z7.h, z11.h, z15.h }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa140a363 // ld1h { z3.h, z7.h, z11.h, z15.h }, pn8.b/Z, [x27]\n"
+ "ld1h { z13.h }, p1/Z, [x23]\n"
+ ".inst 0xa141a372 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa142a373 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa143a370 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1h { z2.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z2.h }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x81932400 // bfmopa za0.s, p1/M, p1/M, z0.h, z19.h\n"
- "subs x22, x22, #0x1\n"
- ".inst 0x81932481 // bfmopa za1.s, p1/M, p1/M, z4.h, z19.h\n"
- ".inst 0x81932502 // bfmopa za2.s, p1/M, p1/M, z8.h, z19.h\n"
- ".inst 0x81932583 // bfmopa za3.s, p1/M, p1/M, z12.h, z19.h\n"
- ".inst 0xa140a360 // ld1h { z0.h, z4.h, z8.h, z12.h }, pn8.b/Z, [x27]\n"
- ".inst 0x81962620 // bfmopa za0.s, p1/M, p1/M, z17.h, z22.h\n"
- "ldnt1h { z19.h }, p1/Z, [x23]\n"
- ".inst 0x819626a1 // bfmopa za1.s, p1/M, p1/M, z21.h, z22.h\n"
- ".inst 0x81962722 // bfmopa za2.s, p1/M, p1/M, z25.h, z22.h\n"
- ".inst 0x819627a3 // bfmopa za3.s, p1/M, p1/M, z29.h, z22.h\n"
- ".inst 0xa141a371 // ld1h { z17.h, z21.h, z25.h, z29.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0x81972600 // bfmopa za0.s, p1/M, p1/M, z16.h, z23.h\n"
- "ldnt1h { z22.h }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0x81972681 // bfmopa za1.s, p1/M, p1/M, z20.h, z23.h\n"
- ".inst 0x81972702 // bfmopa za2.s, p1/M, p1/M, z24.h, z23.h\n"
- ".inst 0x81972783 // bfmopa za3.s, p1/M, p1/M, z28.h, z23.h\n"
- ".inst 0xa142a370 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1h { z23.h }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0x81822460 // bfmopa za0.s, p1/M, p1/M, z3.h, z2.h\n"
- ".inst 0x818224e1 // bfmopa za1.s, p1/M, p1/M, z7.h, z2.h\n"
- ".inst 0x81822562 // bfmopa za2.s, p1/M, p1/M, z11.h, z2.h\n"
- ".inst 0x818225e3 // bfmopa za3.s, p1/M, p1/M, z15.h, z2.h\n"
- ".inst 0xa143a363 // ld1h { z3.h, z7.h, z11.h, z15.h }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0x818d2460 // bfmopa za0.s, p1/M, p1/M, z3.h, z13.h\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0x818d24e1 // bfmopa za1.s, p1/M, p1/M, z7.h, z13.h\n"
+ ".inst 0x818d2562 // bfmopa za2.s, p1/M, p1/M, z11.h, z13.h\n"
+ ".inst 0x818d25e3 // bfmopa za3.s, p1/M, p1/M, z15.h, z13.h\n"
+ ".inst 0xa140a363 // ld1h { z3.h, z7.h, z11.h, z15.h }, pn8.b/Z, [x27]\n"
+ ".inst 0x81952640 // bfmopa za0.s, p1/M, p1/M, z18.h, z21.h\n"
+ "ld1h { z13.h }, p1/Z, [x23]\n"
+ ".inst 0x819526c1 // bfmopa za1.s, p1/M, p1/M, z22.h, z21.h\n"
+ ".inst 0x81952742 // bfmopa za2.s, p1/M, p1/M, z26.h, z21.h\n"
+ ".inst 0x819527c3 // bfmopa za3.s, p1/M, p1/M, z30.h, z21.h\n"
+ ".inst 0xa141a372 // ld1h { z18.h, z22.h, z26.h, z30.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0x81912660 // bfmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
+ "ld1h { z21.h }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0x819126e1 // bfmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
+ ".inst 0x81912762 // bfmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
+ ".inst 0x819127e3 // bfmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
+ ".inst 0xa142a373 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0x81822600 // bfmopa za0.s, p1/M, p1/M, z16.h, z2.h\n"
+ ".inst 0x81822681 // bfmopa za1.s, p1/M, p1/M, z20.h, z2.h\n"
+ ".inst 0x81822702 // bfmopa za2.s, p1/M, p1/M, z24.h, z2.h\n"
+ ".inst 0x81822783 // bfmopa za3.s, p1/M, p1/M, z28.h, z2.h\n"
+ ".inst 0xa143a370 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1h { z2.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z2.h }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x81932400 // bfmopa za0.s, p1/M, p1/M, z0.h, z19.h\n"
- ".inst 0x81932481 // bfmopa za1.s, p1/M, p1/M, z4.h, z19.h\n"
- ".inst 0x81932502 // bfmopa za2.s, p1/M, p1/M, z8.h, z19.h\n"
- ".inst 0x81932583 // bfmopa za3.s, p1/M, p1/M, z12.h, z19.h\n"
- ".inst 0x81962620 // bfmopa za0.s, p1/M, p1/M, z17.h, z22.h\n"
- ".inst 0x819626a1 // bfmopa za1.s, p1/M, p1/M, z21.h, z22.h\n"
- ".inst 0x81962722 // bfmopa za2.s, p1/M, p1/M, z25.h, z22.h\n"
- ".inst 0x819627a3 // bfmopa za3.s, p1/M, p1/M, z29.h, z22.h\n"
- ".inst 0x81972600 // bfmopa za0.s, p1/M, p1/M, z16.h, z23.h\n"
- ".inst 0x81972681 // bfmopa za1.s, p1/M, p1/M, z20.h, z23.h\n"
- ".inst 0x81972702 // bfmopa za2.s, p1/M, p1/M, z24.h, z23.h\n"
- ".inst 0x81972783 // bfmopa za3.s, p1/M, p1/M, z28.h, z23.h\n"
- ".inst 0x81822460 // bfmopa za0.s, p1/M, p1/M, z3.h, z2.h\n"
- ".inst 0x818224e1 // bfmopa za1.s, p1/M, p1/M, z7.h, z2.h\n"
- ".inst 0x81822562 // bfmopa za2.s, p1/M, p1/M, z11.h, z2.h\n"
- ".inst 0x818225e3 // bfmopa za3.s, p1/M, p1/M, z15.h, z2.h\n"
+ ".inst 0x818d2460 // bfmopa za0.s, p1/M, p1/M, z3.h, z13.h\n"
+ ".inst 0x818d24e1 // bfmopa za1.s, p1/M, p1/M, z7.h, z13.h\n"
+ ".inst 0x818d2562 // bfmopa za2.s, p1/M, p1/M, z11.h, z13.h\n"
+ ".inst 0x818d25e3 // bfmopa za3.s, p1/M, p1/M, z15.h, z13.h\n"
+ ".inst 0x81952640 // bfmopa za0.s, p1/M, p1/M, z18.h, z21.h\n"
+ ".inst 0x819526c1 // bfmopa za1.s, p1/M, p1/M, z22.h, z21.h\n"
+ ".inst 0x81952742 // bfmopa za2.s, p1/M, p1/M, z26.h, z21.h\n"
+ ".inst 0x819527c3 // bfmopa za3.s, p1/M, p1/M, z30.h, z21.h\n"
+ ".inst 0x81912660 // bfmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
+ ".inst 0x819126e1 // bfmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
+ ".inst 0x81912762 // bfmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
+ ".inst 0x819127e3 // bfmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
+ ".inst 0x81822600 // bfmopa za0.s, p1/M, p1/M, z16.h, z2.h\n"
+ ".inst 0x81822681 // bfmopa za1.s, p1/M, p1/M, z20.h, z2.h\n"
+ ".inst 0x81822702 // bfmopa za2.s, p1/M, p1/M, z24.h, z2.h\n"
+ ".inst 0x81822783 // bfmopa za3.s, p1/M, p1/M, z28.h, z2.h\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa140a373 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa140a370 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn8.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #4\n"
- "ld1h { z11.h }, p1/Z, [x23]\n"
+ "ld1h { z2.h }, p1/Z, [x23]\n"
"addvl x23, x23, #1\n"
- ".inst 0x818b2660 // bfmopa za0.s, p1/M, p1/M, z19.h, z11.h\n"
- ".inst 0x818b26e1 // bfmopa za1.s, p1/M, p1/M, z23.h, z11.h\n"
- ".inst 0x818b2762 // bfmopa za2.s, p1/M, p1/M, z27.h, z11.h\n"
- ".inst 0x818b27e3 // bfmopa za3.s, p1/M, p1/M, z31.h, z11.h\n"
+ ".inst 0x81822600 // bfmopa za0.s, p1/M, p1/M, z16.h, z2.h\n"
+ ".inst 0x81822681 // bfmopa za1.s, p1/M, p1/M, z20.h, z2.h\n"
+ ".inst 0x81822702 // bfmopa za2.s, p1/M, p1/M, z24.h, z2.h\n"
+ ".inst 0x81822783 // bfmopa za3.s, p1/M, p1/M, z28.h, z2.h\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x16, #1, 14f\n"
@@ -241,25 +241,25 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
- ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa042c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
+ ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa060c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14]\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 42f\n"
@@ -267,148 +267,148 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa060c1cc // st1w { z12.s-z15.s }, pn8.b, [x14]\n"
- ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
- ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa061c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
+ ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 42f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
"sub x25, x13, x11\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "add x26, x26, x10, LSL #2\n" // C += n
"madd x26, x11, x24, x26\n" // C += m * ldc
"tbz x16, #2, 27f\n"
"cntw x23\n"
+ "mov x12, #0x0\n"
"cmp x25, x23\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z14.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z15.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ "st1w { z0.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- "st1w { z5.s }, p0, [x26]\n"
+ "st1w { z1.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z2.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- "st1w { z8.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z9.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z10.s }, p0, [x26]\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z11.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z14.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z15.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
"cbz x20, 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- "st1w { z24.s }, p0, [x26]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
"subs x20, x20, #0x1\n"
- "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
- "st1w { z26.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 22f\n"
"21:" // Store to output array: Skip activation: Accumulator row 2 loop
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z8.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
+ "st1w { z9.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z10.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z11.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 21b\n"
"22:" // Store to output array: Skip activation: Accumulator row 2 oddments
"cbz x20, 23f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- "st1w { z12.s }, p0, [x26]\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 23f\n"
"subs x20, x20, #0x1\n"
- "st1w { z13.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 23f\n"
- "st1w { z14.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"23:" // Store to output array: Skip activation: Accumulator row 2 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 25f\n"
"24:" // Store to output array: Skip activation: Accumulator row 3 loop
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ "add x12, x12, #0x4\n"
"st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
+ "cmp x12, x21, LSL #2\n"
"st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
"st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
"st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 24b\n"
@@ -431,63 +431,63 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
"b 40f\n"
"27:" // Store to output array: Skip activation: End
"cntw x23\n"
- "cmp x25, x23\n"
"ld1rw { z21.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
+ "cmp x25, x23\n"
+ "ld1rw { z20.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "ld1rw { z20.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 29f\n"
"28:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
- ".inst 0xc1b4cabc // fclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1w { z28.s }, p0, [x26]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1w { z29.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
- "st1w { z30.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z31.s }, p0, [x26]\n"
+ "st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 28b\n"
"29:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 30f\n"
- ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1b4cabc // fclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1w { z28.s }, p0, [x26]\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 30f\n"
"subs x20, x20, #0x1\n"
- "st1w { z29.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 30f\n"
- "st1w { z30.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"30:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 40f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 32f\n"
"31:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1b4caa4 // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
- "st1w { z4.s }, p0, [x26]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
+ "st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 31b\n"
"32:" // Store to output array: Accumulator row 1 oddments
@@ -508,100 +508,100 @@ void sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL(const bfloat16 *const A, con
"subs x25, x25, x22\n"
"beq 40f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 35f\n"
"34:" // Store to output array: Accumulator row 2 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ "add x12, x12, #0x4\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
"st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
"st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
"st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 34b\n"
"35:" // Store to output array: Accumulator row 2 oddments
"cbz x20, 36f\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
- "st1w { z16.s }, p0, [x26]\n"
+ ".inst 0xc1b4cabc // fclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ "st1w { z28.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 36f\n"
"subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
+ "st1w { z29.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 36f\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z30.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"36:" // Store to output array: Accumulator row 2 oddments: End
"subs x25, x25, x22\n"
"beq 40f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 38f\n"
"37:" // Store to output array: Accumulator row 3 loop
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ "add x12, x12, #0x4\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
"st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
"st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
"st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 37b\n"
"38:" // Store to output array: Accumulator row 3 oddments
"cbz x20, 39f\n"
- ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
- "st1w { z16.s }, p0, [x26]\n"
+ ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 39f\n"
"subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 39f\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z14.s }, p0, [x26]\n"
"39:" // Store to output array: Accumulator row 3 oddments: End
"40:" // Store to output array: End
"tbz x16, #0, 42f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"41:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 41b\n"
"42:" // End block
"incw x10\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp
index a9196958c7..82aaa4da49 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
#pragma once
-#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
#include "../std_transforms_sme.hpp"
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
class cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL
{
public:
- typedef __fp16 operand_type;
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
typedef __fp16 result_type;
typedef void (*kern_type)(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL;
- StdTransformsSME<operand_type, result_type, 1, 4, 2> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 1, 4, 2> transforms = {};
cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const CPUInfo *)
{
@@ -90,4 +91,4 @@ public:
} // namespace arm_gemm
-#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp
index ad10ce7993..832fd0998a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,18 +10,18 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
-#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
#include "arm_gemm.hpp"
@@ -89,6 +89,7 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
const __fp16 *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -108,14 +109,14 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
".inst 0xa040c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
- ".inst 0xa041c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
- ".inst 0xa042c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
- ".inst 0xa043c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xa041c568 // ld1w { z8.s-z11.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
"addvl x11, x11, #16\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
"blt 1b\n"
@@ -132,17 +133,17 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
".inst 0x257a4770 // whilelt pn8.h, x27, x26, VLx2\n"
- "fmov z6.h, #0.0\n"
- "fmov z19.h, #1.0\n"
- ".inst 0xa01b2295 // ldnt1h { z20.h-z21.h }, p8/Z, [x20, x27, LSL #1]\n"
- "zip1 z23.h, z20.h, z6.h\n"
- "zip2 z12.h, z20.h, z6.h\n"
- "zip1 z16.h, z21.h, z6.h\n"
- "zip2 z8.h, z21.h, z6.h\n"
- ".inst 0x81b70260 // fmopa za0.s, p0/M, p0/M, z19.h, z23.h\n"
- ".inst 0x81ac0261 // fmopa za1.s, p0/M, p0/M, z19.h, z12.h\n"
- ".inst 0x81b00262 // fmopa za2.s, p0/M, p0/M, z19.h, z16.h\n"
- ".inst 0x81a80263 // fmopa za3.s, p0/M, p0/M, z19.h, z8.h\n"
+ "fmov z29.h, #0.0\n"
+ "fmov z2.h, #1.0\n"
+ ".inst 0xa01b229f // ldnt1h { z30.h-z31.h }, p8/Z, [x20, x27, LSL #1]\n"
+ "zip1 z22.h, z30.h, z29.h\n"
+ "zip2 z30.h, z30.h, z29.h\n"
+ "zip1 z20.h, z31.h, z29.h\n"
+ "zip2 z19.h, z31.h, z29.h\n"
+ ".inst 0x81b60040 // fmopa za0.s, p0/M, p0/M, z2.h, z22.h\n"
+ ".inst 0x81be0041 // fmopa za1.s, p0/M, p0/M, z2.h, z30.h\n"
+ ".inst 0x81b40042 // fmopa za2.s, p0/M, p0/M, z2.h, z20.h\n"
+ ".inst 0x81b30043 // fmopa za3.s, p0/M, p0/M, z2.h, z19.h\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x27\n"
"mov x21, x28\n"
@@ -161,79 +162,79 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
"add x20, x20, #0x1\n"
"lsr x20, x20, #0x1\n"
"lsr x21, x20, #0x2\n"
- "and x20, x20, #0x3\n"
"madd x23, x27, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
"cbz x21, 8f\n"
"subs x21, x21, #0x1\n"
- "ld1h { z21.h }, p0/Z, [x24]\n"
- ".inst 0xa140a6f8 // ldnt1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23]\n"
- "ld1h { z29.h }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0xa041a6ed // ldnt1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1h { z4.h }, p0/Z, [x24, #2, MUL VL]\n"
- ".inst 0xa042a6e1 // ldnt1h { z0.h-z3.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1h { z25.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x24]\n"
+ ".inst 0xa040a6f0 // ld1h { z16.h-z19.h }, pn9.b/Z, [x23]\n"
+ "ld1h { z31.h }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa141a6e2 // ld1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1h { z28.h }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa042a6f8 // ld1h { z24.h-z27.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1h { z22.h }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- ".inst 0xa143a6fb // ldnt1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa143a6e1 // ld1h { z1.h, z5.h, z9.h, z13.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x81b002a0 // fmopa za0.s, p0/M, p0/M, z21.h, z16.h\n"
+ ".inst 0x81b00280 // fmopa za0.s, p0/M, p0/M, z20.h, z16.h\n"
"subs x21, x21, #0x1\n"
- ".inst 0x81b402a1 // fmopa za1.s, p0/M, p0/M, z21.h, z20.h\n"
- ".inst 0x81b802a2 // fmopa za2.s, p0/M, p0/M, z21.h, z24.h\n"
- ".inst 0x81bc02a3 // fmopa za3.s, p0/M, p0/M, z21.h, z28.h\n"
- "ld1h { z21.h }, p0/Z, [x24]\n"
- ".inst 0x81ac03a0 // fmopa za0.s, p0/M, p0/M, z29.h, z12.h\n"
- ".inst 0xa140a6f0 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23]\n"
- ".inst 0x81ad03a1 // fmopa za1.s, p0/M, p0/M, z29.h, z13.h\n"
- ".inst 0x81ae03a2 // fmopa za2.s, p0/M, p0/M, z29.h, z14.h\n"
- ".inst 0x81af03a3 // fmopa za3.s, p0/M, p0/M, z29.h, z15.h\n"
- "ld1h { z29.h }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0x81a00080 // fmopa za0.s, p0/M, p0/M, z4.h, z0.h\n"
- ".inst 0xa041a6ec // ld1h { z12.h-z15.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0x81a10081 // fmopa za1.s, p0/M, p0/M, z4.h, z1.h\n"
- ".inst 0x81a20082 // fmopa za2.s, p0/M, p0/M, z4.h, z2.h\n"
- ".inst 0x81a30083 // fmopa za3.s, p0/M, p0/M, z4.h, z3.h\n"
- "ld1h { z4.h }, p0/Z, [x24, #2, MUL VL]\n"
- ".inst 0xa042a6e0 // ld1h { z0.h-z3.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- ".inst 0x81b30320 // fmopa za0.s, p0/M, p0/M, z25.h, z19.h\n"
- ".inst 0x81b70321 // fmopa za1.s, p0/M, p0/M, z25.h, z23.h\n"
- ".inst 0x81bb0322 // fmopa za2.s, p0/M, p0/M, z25.h, z27.h\n"
- ".inst 0x81bf0323 // fmopa za3.s, p0/M, p0/M, z25.h, z31.h\n"
- "ld1h { z25.h }, p0/Z, [x24, #3, MUL VL]\n"
+ ".inst 0x81b10281 // fmopa za1.s, p0/M, p0/M, z20.h, z17.h\n"
+ ".inst 0x81b20282 // fmopa za2.s, p0/M, p0/M, z20.h, z18.h\n"
+ ".inst 0x81b30283 // fmopa za3.s, p0/M, p0/M, z20.h, z19.h\n"
+ "ld1h { z20.h }, p0/Z, [x24]\n"
+ ".inst 0x81a203e0 // fmopa za0.s, p0/M, p0/M, z31.h, z2.h\n"
+ ".inst 0xa040a6f0 // ld1h { z16.h-z19.h }, pn9.b/Z, [x23]\n"
+ ".inst 0x81a603e1 // fmopa za1.s, p0/M, p0/M, z31.h, z6.h\n"
+ ".inst 0x81aa03e2 // fmopa za2.s, p0/M, p0/M, z31.h, z10.h\n"
+ ".inst 0x81ae03e3 // fmopa za3.s, p0/M, p0/M, z31.h, z14.h\n"
+ "ld1h { z31.h }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x81b80380 // fmopa za0.s, p0/M, p0/M, z28.h, z24.h\n"
+ ".inst 0xa141a6e2 // ld1h { z2.h, z6.h, z10.h, z14.h }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x81b90381 // fmopa za1.s, p0/M, p0/M, z28.h, z25.h\n"
+ ".inst 0x81ba0382 // fmopa za2.s, p0/M, p0/M, z28.h, z26.h\n"
+ ".inst 0x81bb0383 // fmopa za3.s, p0/M, p0/M, z28.h, z27.h\n"
+ "ld1h { z28.h }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa042a6f8 // ld1h { z24.h-z27.h }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0x81a102c0 // fmopa za0.s, p0/M, p0/M, z22.h, z1.h\n"
+ ".inst 0x81a502c1 // fmopa za1.s, p0/M, p0/M, z22.h, z5.h\n"
+ ".inst 0x81a902c2 // fmopa za2.s, p0/M, p0/M, z22.h, z9.h\n"
+ ".inst 0x81ad02c3 // fmopa za3.s, p0/M, p0/M, z22.h, z13.h\n"
+ "ld1h { z22.h }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- ".inst 0xa143a6f3 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa143a6e1 // ld1h { z1.h, z5.h, z9.h, z13.h }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x81b002a0 // fmopa za0.s, p0/M, p0/M, z21.h, z16.h\n"
- ".inst 0x81b402a1 // fmopa za1.s, p0/M, p0/M, z21.h, z20.h\n"
- ".inst 0x81b802a2 // fmopa za2.s, p0/M, p0/M, z21.h, z24.h\n"
- ".inst 0x81bc02a3 // fmopa za3.s, p0/M, p0/M, z21.h, z28.h\n"
- ".inst 0x81ac03a0 // fmopa za0.s, p0/M, p0/M, z29.h, z12.h\n"
- ".inst 0x81ad03a1 // fmopa za1.s, p0/M, p0/M, z29.h, z13.h\n"
- ".inst 0x81ae03a2 // fmopa za2.s, p0/M, p0/M, z29.h, z14.h\n"
- ".inst 0x81af03a3 // fmopa za3.s, p0/M, p0/M, z29.h, z15.h\n"
- ".inst 0x81a00080 // fmopa za0.s, p0/M, p0/M, z4.h, z0.h\n"
- ".inst 0x81a10081 // fmopa za1.s, p0/M, p0/M, z4.h, z1.h\n"
- ".inst 0x81a20082 // fmopa za2.s, p0/M, p0/M, z4.h, z2.h\n"
- ".inst 0x81a30083 // fmopa za3.s, p0/M, p0/M, z4.h, z3.h\n"
- ".inst 0x81b30320 // fmopa za0.s, p0/M, p0/M, z25.h, z19.h\n"
- ".inst 0x81b70321 // fmopa za1.s, p0/M, p0/M, z25.h, z23.h\n"
- ".inst 0x81bb0322 // fmopa za2.s, p0/M, p0/M, z25.h, z27.h\n"
- ".inst 0x81bf0323 // fmopa za3.s, p0/M, p0/M, z25.h, z31.h\n"
+ ".inst 0x81b00280 // fmopa za0.s, p0/M, p0/M, z20.h, z16.h\n"
+ ".inst 0x81b10281 // fmopa za1.s, p0/M, p0/M, z20.h, z17.h\n"
+ ".inst 0x81b20282 // fmopa za2.s, p0/M, p0/M, z20.h, z18.h\n"
+ ".inst 0x81b30283 // fmopa za3.s, p0/M, p0/M, z20.h, z19.h\n"
+ ".inst 0x81a203e0 // fmopa za0.s, p0/M, p0/M, z31.h, z2.h\n"
+ ".inst 0x81a603e1 // fmopa za1.s, p0/M, p0/M, z31.h, z6.h\n"
+ ".inst 0x81aa03e2 // fmopa za2.s, p0/M, p0/M, z31.h, z10.h\n"
+ ".inst 0x81ae03e3 // fmopa za3.s, p0/M, p0/M, z31.h, z14.h\n"
+ ".inst 0x81b80380 // fmopa za0.s, p0/M, p0/M, z28.h, z24.h\n"
+ ".inst 0x81b90381 // fmopa za1.s, p0/M, p0/M, z28.h, z25.h\n"
+ ".inst 0x81ba0382 // fmopa za2.s, p0/M, p0/M, z28.h, z26.h\n"
+ ".inst 0x81bb0383 // fmopa za3.s, p0/M, p0/M, z28.h, z27.h\n"
+ ".inst 0x81a102c0 // fmopa za0.s, p0/M, p0/M, z22.h, z1.h\n"
+ ".inst 0x81a502c1 // fmopa za1.s, p0/M, p0/M, z22.h, z5.h\n"
+ ".inst 0x81a902c2 // fmopa za2.s, p0/M, p0/M, z22.h, z9.h\n"
+ ".inst 0x81ad02c3 // fmopa za3.s, p0/M, p0/M, z22.h, z13.h\n"
"8:" // K oddments
"cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1h { z21.h }, p0/Z, [x24]\n"
+ "ld1h { z10.h }, p0/Z, [x24]\n"
"subs x20, x20, #0x1\n"
"addvl x24, x24, #1\n"
- ".inst 0xa140a6f0 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x23]\n"
+ ".inst 0xa140a6f3 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #4\n"
- ".inst 0x81b002a0 // fmopa za0.s, p0/M, p0/M, z21.h, z16.h\n"
- ".inst 0x81b402a1 // fmopa za1.s, p0/M, p0/M, z21.h, z20.h\n"
- ".inst 0x81b802a2 // fmopa za2.s, p0/M, p0/M, z21.h, z24.h\n"
- ".inst 0x81bc02a3 // fmopa za3.s, p0/M, p0/M, z21.h, z28.h\n"
+ ".inst 0x81b30140 // fmopa za0.s, p0/M, p0/M, z10.h, z19.h\n"
+ ".inst 0x81b70141 // fmopa za1.s, p0/M, p0/M, z10.h, z23.h\n"
+ ".inst 0x81bb0142 // fmopa za2.s, p0/M, p0/M, z10.h, z27.h\n"
+ ".inst 0x81bf0143 // fmopa za3.s, p0/M, p0/M, z10.h, z31.h\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x13, #1, 14f\n"
@@ -241,21 +242,21 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11]\n"
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xa040c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xa041c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa041c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa042c568 // ld1w { z8.s-z11.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
- ".inst 0xa043c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
- ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa042c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c568 // ld1w { z8.s-z11.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
"addvl x11, x11, #16\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa060c540 // st1w { z0.s-z3.s }, pn9.b, [x10]\n"
- ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa060c554 // st1w { z20.s-z23.s }, pn9.b, [x10]\n"
+ ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
".inst 0xa061c558 // st1w { z24.s-z27.s }, pn9.b, [x10, #0x4, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
".inst 0xa062c544 // st1w { z4.s-z7.s }, pn9.b, [x10, #0x8, MUL VL]\n"
"cmp x12, x20\n"
@@ -267,16 +268,16 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa060c540 // st1w { z0.s-z3.s }, pn9.b, [x10]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c548 // st1w { z8.s-z11.s }, pn9.b, [x10]\n"
"add x12, x12, #0x4\n"
- ".inst 0xa061c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xa061c544 // st1w { z4.s-z7.s }, pn9.b, [x10, #0x4, MUL VL]\n"
"cmp x12, x20\n"
".inst 0xa062c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0x8, MUL VL]\n"
- ".inst 0xa063c544 // st1w { z4.s-z7.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ ".inst 0xa063c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
"addvl x10, x10, #16\n"
"blt 13b\n"
"b 18f\n"
@@ -284,22 +285,22 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
"ldr x23, [%x[args], %[offsetof_C]]\n"
"sub x22, x9, x28\n"
"cntw x21\n"
- "ld1rh { z17.h }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ld1rh { z21.h }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
"ldr x20, [%x[args], %[offsetof_ldcb]]\n"
".inst 0x257a4770 // whilelt pn8.h, x27, x26, VLx2\n"
"cmp x22, x21\n"
- "ld1rh { z16.h }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "ld1rh { z20.h }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
"csel x22, x22, x21, LT\n"
"add x23, x23, x27, LSL #1\n" // C += n
"madd x23, x28, x20, x23\n" // C += m * ldc
"15:" // Store to output array: Accumulator loop
- ".inst 0xc0060414 // mova { z20.b-z23.b }, za0h.b[x12, 0:3]\n"
+ ".inst 0xc0060410 // mova { z16.b-z19.b }, za0h.b[x12, 0:3]\n"
"add x12, x12, #0x4\n"
- ".inst 0xc120e28e // fcvt z14.h, { z20.s-z21.s }\n"
- ".inst 0xc120e2cf // fcvt z15.h, { z22.s-z23.s }\n"
+ ".inst 0xc120e20e // fcvt z14.h, { z16.s-z17.s }\n"
+ ".inst 0xc120e24f // fcvt z15.h, { z18.s-z19.s }\n"
"cmp x12, x22, LSL #2\n"
- ".inst 0xc170c22e // fclamp { z14.h-z15.h }, z17.h, z16.h\n"
+ ".inst 0xc174c2ae // fclamp { z14.h-z15.h }, z21.h, z20.h\n"
".inst 0xa06022ee // st1h { z14.h-z15.h }, p8, [x23]\n"
"add x23, x23, x20\n"
"blt 15b\n"
@@ -308,15 +309,15 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
"mov x12, #0x0\n"
"cntw x20\n"
"17:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
- ".inst 0xa041c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
- ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
- ".inst 0xa043c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
- ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa040c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa041c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
"addvl x11, x11, #16\n"
- ".inst 0xc0840681 // mova za1h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
"blt 17b\n"
@@ -338,4 +339,4 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_1VLx4VL(const __fp16 *const A, c
} // namespace arm_gemm
-#endif // __ARM_FEATURE_SVE
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp
index 5bd34b2ca0..66d32acda7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
#pragma once
-#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
#include "../std_transforms_sme.hpp"
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const __fp16 *const A, c
class cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL
{
public:
- typedef __fp16 operand_type;
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
typedef __fp16 result_type;
typedef void (*kern_type)(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL;
- StdTransformsSME<operand_type, result_type, 2, 2, 2> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 2, 2, 2> transforms = {};
cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const CPUInfo *)
{
@@ -90,4 +91,4 @@ public:
} // namespace arm_gemm
-#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp
index 5c48f953e8..23e053c0f5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,18 +10,18 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
-#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
#include "arm_gemm.hpp"
@@ -89,6 +89,7 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const __fp16 *const A, c
const __fp16 *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -97,61 +98,61 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const __fp16 *const A, c
KernelArgs args(A, B, C, ldc, M, N, K, bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207810 // ptrue pn8.b\n"
- "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x16, #0, 2f\n"
+ "ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
- ".inst 0xa041c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- "addvl x15, x15, #16\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa040c1d8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x14]\n"
+ ".inst 0xa041c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c1cc // ld1w { z12.s-z15.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c1d0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840681 // mova za1h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w13, [%x[args], %[offsetof_M]]\n"
- "mov x11, #0x0\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
"mov x10, #0x0\n"
- "ldr w9, [%x[args], %[offsetof_N]]\n"
- "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "mov x9, #0x0\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x27, x28\n"
- "tbnz x16, #0, 4f\n"
+ "mov x26, x27\n"
+ "tbnz x15, #0, 4f\n"
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "whilelt p0.h, x10, x9\n"
- "fmov z10.h, #0.0\n"
- "fmov z11.h, #1.0\n"
- "ld1h { z18.h }, p0/Z, [x20, x10, LSL #1]\n"
- "zip1 z2.h, z18.h, z10.h\n"
- "zip2 z19.h, z18.h, z10.h\n"
- ".inst 0x81a22560 // fmopa za0.s, p1/M, p1/M, z11.h, z2.h\n"
- ".inst 0x81b32561 // fmopa za1.s, p1/M, p1/M, z11.h, z19.h\n"
- ".inst 0x81a22562 // fmopa za2.s, p1/M, p1/M, z11.h, z2.h\n"
- ".inst 0x81b32563 // fmopa za3.s, p1/M, p1/M, z11.h, z19.h\n"
+ "whilelt p0.h, x9, x28\n"
+ "fmov z7.h, #0.0\n"
+ "fmov z19.h, #1.0\n"
+ "ld1h { z20.h }, p0/Z, [x20, x9, LSL #1]\n"
+ "zip1 z21.h, z20.h, z7.h\n"
+ "zip2 z30.h, z20.h, z7.h\n"
+ ".inst 0x81b52660 // fmopa za0.s, p1/M, p1/M, z19.h, z21.h\n"
+ ".inst 0x81be2661 // fmopa za1.s, p1/M, p1/M, z19.h, z30.h\n"
+ ".inst 0x81b52662 // fmopa za2.s, p1/M, p1/M, z19.h, z21.h\n"
+ ".inst 0x81be2663 // fmopa za3.s, p1/M, p1/M, z19.h, z30.h\n"
"4:" // Prepare accumulators: Test for last block
- "mov x20, x10\n"
- "mov x21, x11\n"
+ "mov x20, x9\n"
+ "mov x21, x10\n"
"incw x20, ALL, MUL #2\n"
"incw x21, ALL, MUL #2\n"
- "cmp x20, x9\n"
- "mov x20, x16\n"
- "csel x21, x11, x21, LT\n"
- "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
- "cmp x21, x13\n"
- "csel x16, x20, x16, LT\n"
+ "cmp x20, x28\n"
+ "mov x20, x15\n"
+ "csel x21, x10, x21, LT\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x21, x11\n"
+ "csel x15, x20, x15, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
"ldr x23, [%x[args], %[offsetof_B]]\n"
@@ -159,294 +160,185 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_2VLx2VL(const __fp16 *const A, c
"add x20, x20, #0x1\n"
"lsr x20, x20, #0x1\n"
"lsr x21, x20, #0x2\n"
+ "madd x23, x9, x22, x23\n" // bptr = B + n * kstride_bytes
"and x20, x20, #0x3\n"
- "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
"cbz x21, 8f\n"
"subs x21, x21, #0x1\n"
- ".inst 0xa0402374 // ld1h { z20.h-z21.h }, pn8.b/Z, [x27]\n"
- ".inst 0xa14022ed // ldnt1h { z5.h, z13.h }, pn8.b/Z, [x23]\n"
- ".inst 0xa041236a // ld1h { z10.h-z11.h }, pn8.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa14122ec // ldnt1h { z4.h, z12.h }, pn8.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa0422372 // ld1h { z18.h-z19.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04222fb // ldnt1h { z26.h-z27.h }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa1432366 // ld1h { z6.h, z14.h }, pn8.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa04322f9 // ldnt1h { z24.h-z25.h }, pn8.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa0402344 // ld1h { z4.h-z5.h }, pn8.b/Z, [x26]\n"
+ ".inst 0xa14022f1 // ld1h { z17.h, z25.h }, pn8.b/Z, [x23]\n"
+ ".inst 0xa0412352 // ld1h { z18.h-z19.h }, pn8.b/Z, [x26, #0x2, MUL VL]\n"
+ ".inst 0xa14122e3 // ld1h { z3.h, z11.h }, pn8.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa042234c // ld1h { z12.h-z13.h }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa04222fc // ld1h { z28.h-z29.h }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1432347 // ld1h { z7.h, z15.h }, pn8.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14322f7 // ld1h { z23.h, z31.h }, pn8.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x81a52680 // fmopa za0.s, p1/M, p1/M, z20.h, z5.h\n"
+ ".inst 0x81b12480 // fmopa za0.s, p1/M, p1/M, z4.h, z17.h\n"
"subs x21, x21, #0x1\n"
- ".inst 0x81ad2681 // fmopa za1.s, p1/M, p1/M, z20.h, z13.h\n"
- ".inst 0x81a526a2 // fmopa za2.s, p1/M, p1/M, z21.h, z5.h\n"
- ".inst 0x81ad26a3 // fmopa za3.s, p1/M, p1/M, z21.h, z13.h\n"
- ".inst 0xa0402374 // ld1h { z20.h-z21.h }, pn8.b/Z, [x27]\n"
- ".inst 0x81a42540 // fmopa za0.s, p1/M, p1/M, z10.h, z4.h\n"
- ".inst 0xa14022e5 // ld1h { z5.h, z13.h }, pn8.b/Z, [x23]\n"
- ".inst 0x81ac2541 // fmopa za1.s, p1/M, p1/M, z10.h, z12.h\n"
- ".inst 0x81a42562 // fmopa za2.s, p1/M, p1/M, z11.h, z4.h\n"
- ".inst 0x81ac2563 // fmopa za3.s, p1/M, p1/M, z11.h, z12.h\n"
- ".inst 0xa041236a // ld1h { z10.h-z11.h }, pn8.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0x81ba2640 // fmopa za0.s, p1/M, p1/M, z18.h, z26.h\n"
- ".inst 0xa14122e4 // ld1h { z4.h, z12.h }, pn8.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0x81bb2641 // fmopa za1.s, p1/M, p1/M, z18.h, z27.h\n"
- ".inst 0x81ba2662 // fmopa za2.s, p1/M, p1/M, z19.h, z26.h\n"
- ".inst 0x81bb2663 // fmopa za3.s, p1/M, p1/M, z19.h, z27.h\n"
- ".inst 0xa0422372 // ld1h { z18.h-z19.h }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04222fa // ld1h { z26.h-z27.h }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0x81b824c0 // fmopa za0.s, p1/M, p1/M, z6.h, z24.h\n"
- ".inst 0x81b924c1 // fmopa za1.s, p1/M, p1/M, z6.h, z25.h\n"
- ".inst 0x81b825c2 // fmopa za2.s, p1/M, p1/M, z14.h, z24.h\n"
- ".inst 0x81b925c3 // fmopa za3.s, p1/M, p1/M, z14.h, z25.h\n"
- ".inst 0xa1432366 // ld1h { z6.h, z14.h }, pn8.b/Z, [x27, #0x6, MUL VL]\n"
- "addvl x27, x27, #8\n"
- ".inst 0xa04322f8 // ld1h { z24.h-z25.h }, pn8.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0x81b92481 // fmopa za1.s, p1/M, p1/M, z4.h, z25.h\n"
+ ".inst 0x81b124a2 // fmopa za2.s, p1/M, p1/M, z5.h, z17.h\n"
+ ".inst 0x81b924a3 // fmopa za3.s, p1/M, p1/M, z5.h, z25.h\n"
+ ".inst 0xa0402344 // ld1h { z4.h-z5.h }, pn8.b/Z, [x26]\n"
+ ".inst 0x81a32640 // fmopa za0.s, p1/M, p1/M, z18.h, z3.h\n"
+ ".inst 0xa14022f1 // ld1h { z17.h, z25.h }, pn8.b/Z, [x23]\n"
+ ".inst 0x81ab2641 // fmopa za1.s, p1/M, p1/M, z18.h, z11.h\n"
+ ".inst 0x81a32662 // fmopa za2.s, p1/M, p1/M, z19.h, z3.h\n"
+ ".inst 0x81ab2663 // fmopa za3.s, p1/M, p1/M, z19.h, z11.h\n"
+ ".inst 0xa0412352 // ld1h { z18.h-z19.h }, pn8.b/Z, [x26, #0x2, MUL VL]\n"
+ ".inst 0x81bc2580 // fmopa za0.s, p1/M, p1/M, z12.h, z28.h\n"
+ ".inst 0xa14122e3 // ld1h { z3.h, z11.h }, pn8.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0x81bd2581 // fmopa za1.s, p1/M, p1/M, z12.h, z29.h\n"
+ ".inst 0x81bc25a2 // fmopa za2.s, p1/M, p1/M, z13.h, z28.h\n"
+ ".inst 0x81bd25a3 // fmopa za3.s, p1/M, p1/M, z13.h, z29.h\n"
+ ".inst 0xa042234c // ld1h { z12.h-z13.h }, pn8.b/Z, [x26, #0x4, MUL VL]\n"
+ ".inst 0xa04222fc // ld1h { z28.h-z29.h }, pn8.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x81b724e0 // fmopa za0.s, p1/M, p1/M, z7.h, z23.h\n"
+ ".inst 0x81bf24e1 // fmopa za1.s, p1/M, p1/M, z7.h, z31.h\n"
+ ".inst 0x81b725e2 // fmopa za2.s, p1/M, p1/M, z15.h, z23.h\n"
+ ".inst 0x81bf25e3 // fmopa za3.s, p1/M, p1/M, z15.h, z31.h\n"
+ ".inst 0xa1432347 // ld1h { z7.h, z15.h }, pn8.b/Z, [x26, #0x6, MUL VL]\n"
+ "addvl x26, x26, #8\n"
+ ".inst 0xa14322f7 // ld1h { z23.h, z31.h }, pn8.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x81a52680 // fmopa za0.s, p1/M, p1/M, z20.h, z5.h\n"
- ".inst 0x81ad2681 // fmopa za1.s, p1/M, p1/M, z20.h, z13.h\n"
- ".inst 0x81a526a2 // fmopa za2.s, p1/M, p1/M, z21.h, z5.h\n"
- ".inst 0x81ad26a3 // fmopa za3.s, p1/M, p1/M, z21.h, z13.h\n"
- ".inst 0x81a42540 // fmopa za0.s, p1/M, p1/M, z10.h, z4.h\n"
- ".inst 0x81ac2541 // fmopa za1.s, p1/M, p1/M, z10.h, z12.h\n"
- ".inst 0x81a42562 // fmopa za2.s, p1/M, p1/M, z11.h, z4.h\n"
- ".inst 0x81ac2563 // fmopa za3.s, p1/M, p1/M, z11.h, z12.h\n"
- ".inst 0x81ba2640 // fmopa za0.s, p1/M, p1/M, z18.h, z26.h\n"
- ".inst 0x81bb2641 // fmopa za1.s, p1/M, p1/M, z18.h, z27.h\n"
- ".inst 0x81ba2662 // fmopa za2.s, p1/M, p1/M, z19.h, z26.h\n"
- ".inst 0x81bb2663 // fmopa za3.s, p1/M, p1/M, z19.h, z27.h\n"
- ".inst 0x81b824c0 // fmopa za0.s, p1/M, p1/M, z6.h, z24.h\n"
- ".inst 0x81b924c1 // fmopa za1.s, p1/M, p1/M, z6.h, z25.h\n"
- ".inst 0x81b825c2 // fmopa za2.s, p1/M, p1/M, z14.h, z24.h\n"
- ".inst 0x81b925c3 // fmopa za3.s, p1/M, p1/M, z14.h, z25.h\n"
+ ".inst 0x81b12480 // fmopa za0.s, p1/M, p1/M, z4.h, z17.h\n"
+ ".inst 0x81b92481 // fmopa za1.s, p1/M, p1/M, z4.h, z25.h\n"
+ ".inst 0x81b124a2 // fmopa za2.s, p1/M, p1/M, z5.h, z17.h\n"
+ ".inst 0x81b924a3 // fmopa za3.s, p1/M, p1/M, z5.h, z25.h\n"
+ ".inst 0x81a32640 // fmopa za0.s, p1/M, p1/M, z18.h, z3.h\n"
+ ".inst 0x81ab2641 // fmopa za1.s, p1/M, p1/M, z18.h, z11.h\n"
+ ".inst 0x81a32662 // fmopa za2.s, p1/M, p1/M, z19.h, z3.h\n"
+ ".inst 0x81ab2663 // fmopa za3.s, p1/M, p1/M, z19.h, z11.h\n"
+ ".inst 0x81bc2580 // fmopa za0.s, p1/M, p1/M, z12.h, z28.h\n"
+ ".inst 0x81bd2581 // fmopa za1.s, p1/M, p1/M, z12.h, z29.h\n"
+ ".inst 0x81bc25a2 // fmopa za2.s, p1/M, p1/M, z13.h, z28.h\n"
+ ".inst 0x81bd25a3 // fmopa za3.s, p1/M, p1/M, z13.h, z29.h\n"
+ ".inst 0x81b724e0 // fmopa za0.s, p1/M, p1/M, z7.h, z23.h\n"
+ ".inst 0x81bf24e1 // fmopa za1.s, p1/M, p1/M, z7.h, z31.h\n"
+ ".inst 0x81b725e2 // fmopa za2.s, p1/M, p1/M, z15.h, z23.h\n"
+ ".inst 0x81bf25e3 // fmopa za3.s, p1/M, p1/M, z15.h, z31.h\n"
"8:" // K oddments
"cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa0402374 // ld1h { z20.h-z21.h }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0402346 // ld1h { z6.h-z7.h }, pn8.b/Z, [x26]\n"
"subs x20, x20, #0x1\n"
- "addvl x27, x27, #2\n"
- ".inst 0xa14022e5 // ld1h { z5.h, z13.h }, pn8.b/Z, [x23]\n"
+ "addvl x26, x26, #2\n"
+ ".inst 0xa04022e0 // ld1h { z0.h-z1.h }, pn8.b/Z, [x23]\n"
"addvl x23, x23, #2\n"
- ".inst 0x81a52680 // fmopa za0.s, p1/M, p1/M, z20.h, z5.h\n"
- ".inst 0x81ad2681 // fmopa za1.s, p1/M, p1/M, z20.h, z13.h\n"
- ".inst 0x81a526a2 // fmopa za2.s, p1/M, p1/M, z21.h, z5.h\n"
- ".inst 0x81ad26a3 // fmopa za3.s, p1/M, p1/M, z21.h, z13.h\n"
+ ".inst 0x81a024c0 // fmopa za0.s, p1/M, p1/M, z6.h, z0.h\n"
+ ".inst 0x81a124c1 // fmopa za1.s, p1/M, p1/M, z6.h, z1.h\n"
+ ".inst 0x81a024e2 // fmopa za2.s, p1/M, p1/M, z7.h, z0.h\n"
+ ".inst 0x81a124e3 // fmopa za3.s, p1/M, p1/M, z7.h, z1.h\n"
"bgt 9b\n"
"10:" // K oddments: End
- "tbz x16, #1, 14f\n"
- "tbz x16, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa042c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- "addvl x15, x15, #16\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa060c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14]\n"
- ".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa061c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa040c1d4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x14]\n"
+ ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
+ ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
+ ".inst 0xa042c1dc // ld1w { z28.s-z31.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c1c0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa060c1ac // st1w { z12.s-z15.s }, pn8.b, [x13]\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa061c1b0 // st1w { z16.s-z19.s }, pn8.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
- ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa062c1b8 // st1w { z24.s-z27.s }, pn8.b, [x13, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ ".inst 0xa063c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 11b\n"
- "b 23f\n"
+ "b 18f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
- ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa060c1dc // st1w { z28.s-z31.s }, pn8.b, [x14]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1a8 // st1w { z8.s-z11.s }, pn8.b, [x13]\n"
"add x12, x12, #0x4\n"
- ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c1ac // st1w { z12.s-z15.s }, pn8.b, [x13, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0xc, MUL VL]\n"
- "addvl x14, x14, #16\n"
+ ".inst 0xa062c1bc // st1w { z28.s-z31.s }, pn8.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c1b0 // st1w { z16.s-z19.s }, pn8.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
- "b 23f\n"
+ "b 18f\n"
"14:" // Store to output array
- "ldr x26, [%x[args], %[offsetof_C]]\n"
- "sub x25, x13, x11\n"
- "cntw x24\n"
- "ld1rh { z20.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
- "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
- "whilelt p0.h, x10, x9\n"
- "cmp x25, x24\n"
- "ld1rh { z19.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
- "csel x22, x25, x24, LT\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "sub x24, x11, x10\n"
+ "cntw x23, ALL, MUL #2\n"
+ "ld1rh { z18.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+ "whilelt p0.h, x9, x28\n"
+ "cmp x24, x23\n"
+ "ld1rh { z17.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"mov x12, #0x0\n"
- "add x26, x26, x10, LSL #1\n" // C += n
- "lsr x21, x22, #0x2\n"
- "madd x26, x11, x23, x26\n" // C += m * ldc
- "and x20, x22, #0x3\n"
- "cbz x21, 16f\n"
- "15:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- "fcvt z8.h, p1/m, z8.s\n"
- "fcvt z9.h, p1/m, z9.s\n"
- "fcvt z10.h, p1/m, z10.s\n"
- "fcvt z11.h, p1/m, z11.s\n"
+ "mov x21, #0x0\n"
+ "add x25, x25, x9, LSL #1\n" // C += n
+ "mov x20, #0x2\n"
+ "madd x25, x10, x22, x25\n" // C += m * ldc
+ "csel x24, x24, x23, LT\n"
+ "15:" // Store to output array: Accumulator loop
+ ".inst 0xc006000e // mova { z14.b-z15.b }, za0h.b[x12, 0:1]\n"
"add x12, x12, #0x4\n"
- "fcvt z28.h, p1/m, z28.s\n"
- "fcvt z29.h, p1/m, z29.s\n"
- "cmp x12, x21, LSL #2\n"
- "fcvt z30.h, p1/m, z30.s\n"
- "fcvt z31.h, p1/m, z31.s\n"
- ".inst 0xc173ca88 // fclamp { z8.h-z11.h }, z20.h, z19.h\n"
- ".inst 0xc173ca9c // fclamp { z28.h-z31.h }, z20.h, z19.h\n"
- "uzp1 z16.h, z8.h, z28.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "uzp1 z18.h, z9.h, z29.h\n"
- "uzp1 z17.h, z10.h, z30.h\n"
- "uzp1 z16.h, z11.h, z31.h\n"
- "st1h { z18.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1h { z17.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ "cmp x12, x23, LSL #1\n"
+ "add x21, x21, #0x1\n"
+ ".inst 0xc120e1d0 // fcvt z16.h, { z14.s-z15.s }\n"
+ "csel x12, x12, x20, LT\n"
+ "cmp x21, x24\n"
+ ".inst 0x64712650 // fclamp z16.h, z18.h, z17.h\n"
+ "st1h { z16.h }, p0, [x25]\n"
+ "add x25, x25, x22\n"
"blt 15b\n"
- "16:" // Store to output array: Accumulator row 0 oddments
- "cbz x20, 17f\n"
- ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- "fcvt z8.h, p1/m, z8.s\n"
- "fcvt z9.h, p1/m, z9.s\n"
- "fcvt z10.h, p1/m, z10.s\n"
- "fcvt z11.h, p1/m, z11.s\n"
- "subs x20, x20, #0x1\n"
- "fcvt z12.h, p1/m, z12.s\n"
- "fcvt z13.h, p1/m, z13.s\n"
- "fcvt z14.h, p1/m, z14.s\n"
- "fcvt z15.h, p1/m, z15.s\n"
- ".inst 0xc173ca88 // fclamp { z8.h-z11.h }, z20.h, z19.h\n"
- ".inst 0xc173ca8c // fclamp { z12.h-z15.h }, z20.h, z19.h\n"
- "uzp1 z16.h, z8.h, z12.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "beq 17f\n"
- "subs x20, x20, #0x1\n"
- "uzp1 z16.h, z9.h, z13.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "beq 17f\n"
- "uzp1 z16.h, z10.h, z14.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "17:" // Store to output array: Accumulator row 0 oddments: End
- "subs x25, x25, x22\n"
- "beq 21f\n"
- "whilelt p0.h, x10, x9\n"
- "cmp x25, x24\n"
- "csel x20, x25, x24, LT\n"
- "mov x12, #0x0\n"
- "lsr x21, x20, #0x2\n"
- "and x20, x20, #0x3\n"
- "cbz x21, 19f\n"
- "18:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- "fcvt z0.h, p1/m, z0.s\n"
- "fcvt z1.h, p1/m, z1.s\n"
- "fcvt z2.h, p1/m, z2.s\n"
- "fcvt z3.h, p1/m, z3.s\n"
- "add x12, x12, #0x4\n"
- "fcvt z28.h, p1/m, z28.s\n"
- "fcvt z29.h, p1/m, z29.s\n"
- "cmp x12, x21, LSL #2\n"
- "fcvt z30.h, p1/m, z30.s\n"
- "fcvt z31.h, p1/m, z31.s\n"
- ".inst 0xc173ca80 // fclamp { z0.h-z3.h }, z20.h, z19.h\n"
- ".inst 0xc173ca9c // fclamp { z28.h-z31.h }, z20.h, z19.h\n"
- "uzp1 z16.h, z0.h, z28.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "uzp1 z18.h, z1.h, z29.h\n"
- "uzp1 z17.h, z2.h, z30.h\n"
- "uzp1 z16.h, z3.h, z31.h\n"
- "st1h { z18.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1h { z17.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "blt 18b\n"
- "19:" // Store to output array: Accumulator row 1 oddments
- "cbz x20, 20f\n"
- ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
- ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- "fcvt z28.h, p1/m, z28.s\n"
- "fcvt z29.h, p1/m, z29.s\n"
- "fcvt z30.h, p1/m, z30.s\n"
- "fcvt z31.h, p1/m, z31.s\n"
- "subs x20, x20, #0x1\n"
- "fcvt z12.h, p1/m, z12.s\n"
- "fcvt z13.h, p1/m, z13.s\n"
- "fcvt z14.h, p1/m, z14.s\n"
- "fcvt z15.h, p1/m, z15.s\n"
- ".inst 0xc173ca9c // fclamp { z28.h-z31.h }, z20.h, z19.h\n"
- ".inst 0xc173ca8c // fclamp { z12.h-z15.h }, z20.h, z19.h\n"
- "uzp1 z16.h, z28.h, z12.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "beq 20f\n"
- "subs x20, x20, #0x1\n"
- "uzp1 z16.h, z29.h, z13.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "beq 20f\n"
- "uzp1 z16.h, z30.h, z14.h\n"
- "st1h { z16.h }, p0, [x26]\n"
- "20:" // Store to output array: Accumulator row 1 oddments: End
- "21:" // Store to output array: End
- "tbz x16, #0, 23f\n"
+ "16:" // Store to output array: End
+ "tbz x15, #0, 18f\n"
"mov x12, #0x0\n"
"cntw x20\n"
- "22:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
- ".inst 0xa041c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xa042c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- "addvl x15, x15, #16\n"
- ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
+ "17:" // Store to output array: Refill accumulators: Loop
+ ".inst 0xa040c1cc // ld1w { z12.s-z15.s }, pn8.b/Z, [x14]\n"
+ ".inst 0xa041c1c4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c1c8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c1dc // ld1w { z28.s-z31.s }, pn8.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "blt 22b\n"
- "23:" // End block
- "incw x10, ALL, MUL #2\n"
- "cmp x10, x9\n"
+ "blt 17b\n"
+ "18:" // End block
+ "incw x9, ALL, MUL #2\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x11, ALL, MUL #2\n"
- "mov x10, #0x0\n"
- "cmp x11, x13\n"
- "mov x28, x27\n"
+ "incw x10, ALL, MUL #2\n"
+ "mov x9, #0x0\n"
+ "cmp x10, x11\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
} // namespace arm_gemm
-#endif // __ARM_FEATURE_SVE
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp
index 05029f04b0..f63eb30efd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
#pragma once
-#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
#include "../std_transforms_sme.hpp"
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
class cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL
{
public:
- typedef __fp16 operand_type;
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
typedef __fp16 result_type;
typedef void (*kern_type)(const __fp16 *const A, const __fp16 *const B, __fp16 *const C, int ldc, const int M, const int N, const int K, const __fp16 *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL;
- StdTransformsSME<operand_type, result_type, 4, 1, 2> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 4, 1, 2> transforms = {};
cls_sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const CPUInfo *)
{
@@ -90,4 +91,4 @@ public:
} // namespace arm_gemm
-#endif // ARM_COMPUTE_ENABLE_SVE
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp
index 8728cff31d..3d98d3fe16 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,18 +10,18 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
-#ifdef __ARM_FEATURE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
#include "arm_gemm.hpp"
@@ -89,6 +89,7 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
const __fp16 *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -107,15 +108,15 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
- ".inst 0xa041c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xa042c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xa043c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
"addvl x15, x15, #16\n"
- ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xc0840681 // mova za1h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
"blt 1b\n"
@@ -133,14 +134,14 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
"whilelt p0.h, x10, x9\n"
- "fmov z5.h, #0.0\n"
- "fmov z18.h, #1.0\n"
- "ld1h { z31.h }, p0/Z, [x20, x10, LSL #1]\n"
- "zip1 z15.h, z31.h, z5.h\n"
- ".inst 0x81af2640 // fmopa za0.s, p1/M, p1/M, z18.h, z15.h\n"
- ".inst 0x81af2641 // fmopa za1.s, p1/M, p1/M, z18.h, z15.h\n"
- ".inst 0x81af2642 // fmopa za2.s, p1/M, p1/M, z18.h, z15.h\n"
- ".inst 0x81af2643 // fmopa za3.s, p1/M, p1/M, z18.h, z15.h\n"
+ "fmov z13.h, #0.0\n"
+ "fmov z27.h, #1.0\n"
+ "ld1h { z14.h }, p0/Z, [x20, x10, LSL #1]\n"
+ "zip1 z30.h, z14.h, z13.h\n"
+ ".inst 0x81be2760 // fmopa za0.s, p1/M, p1/M, z27.h, z30.h\n"
+ ".inst 0x81be2761 // fmopa za1.s, p1/M, p1/M, z27.h, z30.h\n"
+ ".inst 0x81be2762 // fmopa za2.s, p1/M, p1/M, z27.h, z30.h\n"
+ ".inst 0x81be2763 // fmopa za3.s, p1/M, p1/M, z27.h, z30.h\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
@@ -159,79 +160,79 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"add x20, x20, #0x1\n"
"lsr x20, x20, #0x1\n"
"lsr x21, x20, #0x2\n"
- "and x20, x20, #0x3\n"
"madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
"cbz x21, 8f\n"
"subs x21, x21, #0x1\n"
- ".inst 0xa140a773 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x27]\n"
- "ldnt1h { z17.h }, p1/Z, [x23]\n"
- ".inst 0xa041a76c // ld1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1h { z26.h }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa042a760 // ld1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1h { z30.h }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa143a770 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xa040a778 // ld1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ "ld1h { z1.h }, p1/Z, [x23]\n"
+ ".inst 0xa041a764 // ld1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa042a77c // ld1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa043a76c // ld1h { z12.h-z15.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1h { z18.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z0.h }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x81b12660 // fmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
+ ".inst 0x81a12700 // fmopa za0.s, p1/M, p1/M, z24.h, z1.h\n"
"subs x21, x21, #0x1\n"
- ".inst 0x81b126e1 // fmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
- ".inst 0x81b12762 // fmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
- ".inst 0x81b127e3 // fmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
- ".inst 0xa140a773 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x27]\n"
- ".inst 0x81ba2580 // fmopa za0.s, p1/M, p1/M, z12.h, z26.h\n"
- "ld1h { z17.h }, p1/Z, [x23]\n"
- ".inst 0x81ba25a1 // fmopa za1.s, p1/M, p1/M, z13.h, z26.h\n"
- ".inst 0x81ba25c2 // fmopa za2.s, p1/M, p1/M, z14.h, z26.h\n"
- ".inst 0x81ba25e3 // fmopa za3.s, p1/M, p1/M, z15.h, z26.h\n"
- ".inst 0xa041a76c // ld1h { z12.h-z15.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0x81be2400 // fmopa za0.s, p1/M, p1/M, z0.h, z30.h\n"
- "ld1h { z26.h }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0x81be2421 // fmopa za1.s, p1/M, p1/M, z1.h, z30.h\n"
- ".inst 0x81be2442 // fmopa za2.s, p1/M, p1/M, z2.h, z30.h\n"
- ".inst 0x81be2463 // fmopa za3.s, p1/M, p1/M, z3.h, z30.h\n"
- ".inst 0xa042a760 // ld1h { z0.h-z3.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
- "ld1h { z30.h }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0x81b22600 // fmopa za0.s, p1/M, p1/M, z16.h, z18.h\n"
- ".inst 0x81b22681 // fmopa za1.s, p1/M, p1/M, z20.h, z18.h\n"
- ".inst 0x81b22702 // fmopa za2.s, p1/M, p1/M, z24.h, z18.h\n"
- ".inst 0x81b22783 // fmopa za3.s, p1/M, p1/M, z28.h, z18.h\n"
- ".inst 0xa143a770 // ld1h { z16.h, z20.h, z24.h, z28.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0x81a12721 // fmopa za1.s, p1/M, p1/M, z25.h, z1.h\n"
+ ".inst 0x81a12742 // fmopa za2.s, p1/M, p1/M, z26.h, z1.h\n"
+ ".inst 0x81a12763 // fmopa za3.s, p1/M, p1/M, z27.h, z1.h\n"
+ ".inst 0xa040a778 // ld1h { z24.h-z27.h }, pn9.b/Z, [x27]\n"
+ ".inst 0x81b72480 // fmopa za0.s, p1/M, p1/M, z4.h, z23.h\n"
+ "ld1h { z1.h }, p1/Z, [x23]\n"
+ ".inst 0x81b724a1 // fmopa za1.s, p1/M, p1/M, z5.h, z23.h\n"
+ ".inst 0x81b724c2 // fmopa za2.s, p1/M, p1/M, z6.h, z23.h\n"
+ ".inst 0x81b724e3 // fmopa za3.s, p1/M, p1/M, z7.h, z23.h\n"
+ ".inst 0xa041a764 // ld1h { z4.h-z7.h }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0x81b32780 // fmopa za0.s, p1/M, p1/M, z28.h, z19.h\n"
+ "ld1h { z23.h }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0x81b327a1 // fmopa za1.s, p1/M, p1/M, z29.h, z19.h\n"
+ ".inst 0x81b327c2 // fmopa za2.s, p1/M, p1/M, z30.h, z19.h\n"
+ ".inst 0x81b327e3 // fmopa za3.s, p1/M, p1/M, z31.h, z19.h\n"
+ ".inst 0xa042a77c // ld1h { z28.h-z31.h }, pn9.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0x81a02580 // fmopa za0.s, p1/M, p1/M, z12.h, z0.h\n"
+ ".inst 0x81a025a1 // fmopa za1.s, p1/M, p1/M, z13.h, z0.h\n"
+ ".inst 0x81a025c2 // fmopa za2.s, p1/M, p1/M, z14.h, z0.h\n"
+ ".inst 0x81a025e3 // fmopa za3.s, p1/M, p1/M, z15.h, z0.h\n"
+ ".inst 0xa043a76c // ld1h { z12.h-z15.h }, pn9.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ld1h { z18.h }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z0.h }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x81b12660 // fmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
- ".inst 0x81b126e1 // fmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
- ".inst 0x81b12762 // fmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
- ".inst 0x81b127e3 // fmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
- ".inst 0x81ba2580 // fmopa za0.s, p1/M, p1/M, z12.h, z26.h\n"
- ".inst 0x81ba25a1 // fmopa za1.s, p1/M, p1/M, z13.h, z26.h\n"
- ".inst 0x81ba25c2 // fmopa za2.s, p1/M, p1/M, z14.h, z26.h\n"
- ".inst 0x81ba25e3 // fmopa za3.s, p1/M, p1/M, z15.h, z26.h\n"
- ".inst 0x81be2400 // fmopa za0.s, p1/M, p1/M, z0.h, z30.h\n"
- ".inst 0x81be2421 // fmopa za1.s, p1/M, p1/M, z1.h, z30.h\n"
- ".inst 0x81be2442 // fmopa za2.s, p1/M, p1/M, z2.h, z30.h\n"
- ".inst 0x81be2463 // fmopa za3.s, p1/M, p1/M, z3.h, z30.h\n"
- ".inst 0x81b22600 // fmopa za0.s, p1/M, p1/M, z16.h, z18.h\n"
- ".inst 0x81b22681 // fmopa za1.s, p1/M, p1/M, z20.h, z18.h\n"
- ".inst 0x81b22702 // fmopa za2.s, p1/M, p1/M, z24.h, z18.h\n"
- ".inst 0x81b22783 // fmopa za3.s, p1/M, p1/M, z28.h, z18.h\n"
+ ".inst 0x81a12700 // fmopa za0.s, p1/M, p1/M, z24.h, z1.h\n"
+ ".inst 0x81a12721 // fmopa za1.s, p1/M, p1/M, z25.h, z1.h\n"
+ ".inst 0x81a12742 // fmopa za2.s, p1/M, p1/M, z26.h, z1.h\n"
+ ".inst 0x81a12763 // fmopa za3.s, p1/M, p1/M, z27.h, z1.h\n"
+ ".inst 0x81b72480 // fmopa za0.s, p1/M, p1/M, z4.h, z23.h\n"
+ ".inst 0x81b724a1 // fmopa za1.s, p1/M, p1/M, z5.h, z23.h\n"
+ ".inst 0x81b724c2 // fmopa za2.s, p1/M, p1/M, z6.h, z23.h\n"
+ ".inst 0x81b724e3 // fmopa za3.s, p1/M, p1/M, z7.h, z23.h\n"
+ ".inst 0x81b32780 // fmopa za0.s, p1/M, p1/M, z28.h, z19.h\n"
+ ".inst 0x81b327a1 // fmopa za1.s, p1/M, p1/M, z29.h, z19.h\n"
+ ".inst 0x81b327c2 // fmopa za2.s, p1/M, p1/M, z30.h, z19.h\n"
+ ".inst 0x81b327e3 // fmopa za3.s, p1/M, p1/M, z31.h, z19.h\n"
+ ".inst 0x81a02580 // fmopa za0.s, p1/M, p1/M, z12.h, z0.h\n"
+ ".inst 0x81a025a1 // fmopa za1.s, p1/M, p1/M, z13.h, z0.h\n"
+ ".inst 0x81a025c2 // fmopa za2.s, p1/M, p1/M, z14.h, z0.h\n"
+ ".inst 0x81a025e3 // fmopa za3.s, p1/M, p1/M, z15.h, z0.h\n"
"8:" // K oddments
"cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa140a773 // ld1h { z19.h, z23.h, z27.h, z31.h }, pn9.b/Z, [x27]\n"
+ ".inst 0xa040a768 // ld1h { z8.h-z11.h }, pn9.b/Z, [x27]\n"
"subs x20, x20, #0x1\n"
"addvl x27, x27, #4\n"
- "ld1h { z17.h }, p1/Z, [x23]\n"
+ "ld1h { z12.h }, p1/Z, [x23]\n"
"addvl x23, x23, #1\n"
- ".inst 0x81b12660 // fmopa za0.s, p1/M, p1/M, z19.h, z17.h\n"
- ".inst 0x81b126e1 // fmopa za1.s, p1/M, p1/M, z23.h, z17.h\n"
- ".inst 0x81b12762 // fmopa za2.s, p1/M, p1/M, z27.h, z17.h\n"
- ".inst 0x81b127e3 // fmopa za3.s, p1/M, p1/M, z31.h, z17.h\n"
+ ".inst 0x81ac2500 // fmopa za0.s, p1/M, p1/M, z8.h, z12.h\n"
+ ".inst 0x81ac2521 // fmopa za1.s, p1/M, p1/M, z9.h, z12.h\n"
+ ".inst 0x81ac2542 // fmopa za2.s, p1/M, p1/M, z10.h, z12.h\n"
+ ".inst 0x81ac2563 // fmopa za3.s, p1/M, p1/M, z11.h, z12.h\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x16, #1, 14f\n"
@@ -239,25 +240,25 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xa040c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
- ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa042c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa041c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
"addvl x15, x15, #16\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa060c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14]\n"
- ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa060c5dc // st1w { z28.s-z31.s }, pn9.b, [x14]\n"
+ ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
".inst 0xa061c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x4, MUL VL]\n"
".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
- ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa063c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 29f\n"
@@ -265,16 +266,16 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
- ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa060c5dc // st1w { z28.s-z31.s }, pn9.b, [x14]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14]\n"
"add x12, x12, #0x4\n"
- ".inst 0xa061c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa061c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 29f\n"
@@ -282,11 +283,11 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"ldr x26, [%x[args], %[offsetof_C]]\n"
"sub x25, x13, x11\n"
"cntw x24\n"
- "ld1rh { z29.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ld1rh { z21.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
"ldr x23, [%x[args], %[offsetof_ldcb]]\n"
"whilelt p0.s, x10, x9\n"
"cmp x25, x24\n"
- "ld1rh { z28.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "ld1rh { z20.h }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"csel x22, x25, x24, LT\n"
"mov x12, #0x0\n"
"add x26, x26, x10, LSL #1\n" // C += n
@@ -295,21 +296,21 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
"add x12, x12, #0x4\n"
- "fcvt z0.h, p1/m, z0.s\n"
- "fcvt z1.h, p1/m, z1.s\n"
- "fcvt z2.h, p1/m, z2.s\n"
- "fcvt z3.h, p1/m, z3.s\n"
+ "fcvt z28.h, p1/m, z28.s\n"
+ "fcvt z29.h, p1/m, z29.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc17ccba0 // fclamp { z0.h-z3.h }, z29.h, z28.h\n"
- "st1h { z0.s }, p0, [x26]\n"
+ "fcvt z30.h, p1/m, z30.s\n"
+ "fcvt z31.h, p1/m, z31.s\n"
+ ".inst 0xc174cabc // fclamp { z28.h-z31.h }, z21.h, z20.h\n"
+ "st1h { z28.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z1.s }, p0, [x26]\n"
+ "st1h { z29.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z2.s }, p0, [x26]\n"
+ "st1h { z30.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z3.s }, p0, [x26]\n"
+ "st1h { z31.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 15b\n"
"16:" // Store to output array: Accumulator row 0 oddments
@@ -320,7 +321,7 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"fcvt z17.h, p1/m, z17.s\n"
"fcvt z18.h, p1/m, z18.s\n"
"fcvt z19.h, p1/m, z19.s\n"
- ".inst 0xc17ccbb0 // fclamp { z16.h-z19.h }, z29.h, z28.h\n"
+ ".inst 0xc174cab0 // fclamp { z16.h-z19.h }, z21.h, z20.h\n"
"st1h { z16.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 17f\n"
@@ -333,155 +334,152 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
"17:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x24\n"
- "csel x22, x25, x24, LT\n"
"mov x12, #0x0\n"
+ "csel x22, x25, x24, LT\n"
"lsr x21, x22, #0x2\n"
"and x20, x22, #0x3\n"
"cbz x21, 19f\n"
"18:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
"add x12, x12, #0x4\n"
- "fcvt z24.h, p1/m, z24.s\n"
- "fcvt z25.h, p1/m, z25.s\n"
- "fcvt z26.h, p1/m, z26.s\n"
- "fcvt z27.h, p1/m, z27.s\n"
+ "fcvt z0.h, p1/m, z0.s\n"
+ "fcvt z1.h, p1/m, z1.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc17ccbb8 // fclamp { z24.h-z27.h }, z29.h, z28.h\n"
- "st1h { z24.s }, p0, [x26]\n"
+ "fcvt z2.h, p1/m, z2.s\n"
+ "fcvt z3.h, p1/m, z3.s\n"
+ ".inst 0xc174caa0 // fclamp { z0.h-z3.h }, z21.h, z20.h\n"
+ "st1h { z0.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z25.s }, p0, [x26]\n"
+ "st1h { z1.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z26.s }, p0, [x26]\n"
+ "st1h { z2.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z27.s }, p0, [x26]\n"
+ "st1h { z3.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 18b\n"
"19:" // Store to output array: Accumulator row 1 oddments
"cbz x20, 20f\n"
- ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
"subs x20, x20, #0x1\n"
- "fcvt z0.h, p1/m, z0.s\n"
- "fcvt z1.h, p1/m, z1.s\n"
- "fcvt z2.h, p1/m, z2.s\n"
- "fcvt z3.h, p1/m, z3.s\n"
- ".inst 0xc17ccba0 // fclamp { z0.h-z3.h }, z29.h, z28.h\n"
- "st1h { z0.s }, p0, [x26]\n"
+ "fcvt z24.h, p1/m, z24.s\n"
+ "fcvt z25.h, p1/m, z25.s\n"
+ "fcvt z26.h, p1/m, z26.s\n"
+ "fcvt z27.h, p1/m, z27.s\n"
+ ".inst 0xc174cab8 // fclamp { z24.h-z27.h }, z21.h, z20.h\n"
+ "st1h { z24.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 20f\n"
"subs x20, x20, #0x1\n"
- "st1h { z1.s }, p0, [x26]\n"
+ "st1h { z25.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 20f\n"
- "st1h { z2.s }, p0, [x26]\n"
+ "st1h { z26.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"20:" // Store to output array: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x24\n"
- "csel x22, x25, x24, LT\n"
"mov x12, #0x0\n"
+ "csel x22, x25, x24, LT\n"
"lsr x21, x22, #0x2\n"
"and x20, x22, #0x3\n"
"cbz x21, 22f\n"
"21:" // Store to output array: Accumulator row 2 loop
- ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
"add x12, x12, #0x4\n"
- "fcvt z20.h, p1/m, z20.s\n"
- "fcvt z21.h, p1/m, z21.s\n"
- "fcvt z22.h, p1/m, z22.s\n"
- "fcvt z23.h, p1/m, z23.s\n"
+ "fcvt z16.h, p1/m, z16.s\n"
+ "fcvt z17.h, p1/m, z17.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc17ccbb4 // fclamp { z20.h-z23.h }, z29.h, z28.h\n"
- "st1h { z20.s }, p0, [x26]\n"
+ "fcvt z18.h, p1/m, z18.s\n"
+ "fcvt z19.h, p1/m, z19.s\n"
+ ".inst 0xc174cab0 // fclamp { z16.h-z19.h }, z21.h, z20.h\n"
+ "st1h { z16.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z21.s }, p0, [x26]\n"
+ "st1h { z17.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z22.s }, p0, [x26]\n"
+ "st1h { z18.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z23.s }, p0, [x26]\n"
+ "st1h { z19.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 21b\n"
"22:" // Store to output array: Accumulator row 2 oddments
"cbz x20, 23f\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
"subs x20, x20, #0x1\n"
- "fcvt z12.h, p1/m, z12.s\n"
- "fcvt z13.h, p1/m, z13.s\n"
- "fcvt z14.h, p1/m, z14.s\n"
- "fcvt z15.h, p1/m, z15.s\n"
- ".inst 0xc17ccbac // fclamp { z12.h-z15.h }, z29.h, z28.h\n"
- "st1h { z12.s }, p0, [x26]\n"
+ "fcvt z28.h, p1/m, z28.s\n"
+ "fcvt z29.h, p1/m, z29.s\n"
+ "fcvt z30.h, p1/m, z30.s\n"
+ "fcvt z31.h, p1/m, z31.s\n"
+ ".inst 0xc174cabc // fclamp { z28.h-z31.h }, z21.h, z20.h\n"
+ "st1h { z28.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 23f\n"
"subs x20, x20, #0x1\n"
- "st1h { z13.s }, p0, [x26]\n"
+ "st1h { z29.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 23f\n"
- "st1h { z14.s }, p0, [x26]\n"
+ "st1h { z30.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"23:" // Store to output array: Accumulator row 2 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x24\n"
- "csel x20, x25, x24, LT\n"
"mov x12, #0x0\n"
+ "csel x20, x25, x24, LT\n"
"lsr x21, x20, #0x2\n"
"and x20, x20, #0x3\n"
"cbz x21, 25f\n"
"24:" // Store to output array: Accumulator row 3 loop
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
"add x12, x12, #0x4\n"
- "fcvt z4.h, p1/m, z4.s\n"
- "fcvt z5.h, p1/m, z5.s\n"
- "fcvt z6.h, p1/m, z6.s\n"
- "fcvt z7.h, p1/m, z7.s\n"
+ "fcvt z28.h, p1/m, z28.s\n"
+ "fcvt z29.h, p1/m, z29.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc17ccba4 // fclamp { z4.h-z7.h }, z29.h, z28.h\n"
- "st1h { z4.s }, p0, [x26]\n"
+ "fcvt z30.h, p1/m, z30.s\n"
+ "fcvt z31.h, p1/m, z31.s\n"
+ ".inst 0xc174cabc // fclamp { z28.h-z31.h }, z21.h, z20.h\n"
+ "st1h { z28.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z5.s }, p0, [x26]\n"
+ "st1h { z29.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z6.s }, p0, [x26]\n"
+ "st1h { z30.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "st1h { z7.s }, p0, [x26]\n"
+ "st1h { z31.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 24b\n"
"25:" // Store to output array: Accumulator row 3 oddments
"cbz x20, 26f\n"
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
"subs x20, x20, #0x1\n"
- "fcvt z4.h, p1/m, z4.s\n"
- "fcvt z5.h, p1/m, z5.s\n"
- "fcvt z6.h, p1/m, z6.s\n"
- "fcvt z7.h, p1/m, z7.s\n"
- ".inst 0xc17ccba4 // fclamp { z4.h-z7.h }, z29.h, z28.h\n"
- "st1h { z4.s }, p0, [x26]\n"
+ "fcvt z28.h, p1/m, z28.s\n"
+ "fcvt z29.h, p1/m, z29.s\n"
+ "fcvt z30.h, p1/m, z30.s\n"
+ "fcvt z31.h, p1/m, z31.s\n"
+ ".inst 0xc174cabc // fclamp { z28.h-z31.h }, z21.h, z20.h\n"
+ "st1h { z28.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 26f\n"
"subs x20, x20, #0x1\n"
- "st1h { z5.s }, p0, [x26]\n"
+ "st1h { z29.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 26f\n"
- "st1h { z6.s }, p0, [x26]\n"
+ "st1h { z30.s }, p0, [x26]\n"
"26:" // Store to output array: Accumulator row 3 oddments: End
"27:" // Store to output array: End
"tbz x16, #0, 29f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"28:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xa043c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
"addvl x15, x15, #16\n"
".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
"blt 28b\n"
@@ -503,4 +501,4 @@ void sme2_interleaved_nomerge_fp16fp32fp16_mopa_4VLx1VL(const __fp16 *const A, c
} // namespace arm_gemm
-#endif // __ARM_FEATURE_SVE
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp
index bf3de2118e..e3e2a0639f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
class cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL
{
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_fp32_mopa_1VLx4VL;
- StdTransformsSME<operand_type, result_type, 1, 4, 1> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 1, 4, 1> transforms = {};
cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
index 97be758bd6..1f9f08e401 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
B(B), kstride_bytes(K * sizeof(float)),
C(C), ldcb(ldc * sizeof(float)),
M(M), N(N), K(K),
- n_loops((K - 1) / 2), n_tail_iters((K - 1) % 2),
min(-std::numeric_limits<float>::infinity()),
max(std::numeric_limits<float>::infinity()),
bias(bias),
@@ -88,12 +87,13 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
const long kstride_bytes;
float *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
float min = -std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::infinity();
const float *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -112,17 +112,17 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14]\n"
- ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa040c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14]\n"
".inst 0xa041c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
- ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
".inst 0xa042c5d4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
- ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x14, x14, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w11, [%x[args], %[offsetof_M]]\n"
@@ -137,101 +137,101 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "fmov z6.s, #1.0\n"
- ".inst 0xa009c29d // ldnt1w { z28.s-z31.s }, p8/Z, [x20, x9, LSL #2]\n"
- ".inst 0x809c00c0 // fmopa za0.s, p0/M, p0/M, z6.s, z28.s\n"
- ".inst 0x809d00c1 // fmopa za1.s, p0/M, p0/M, z6.s, z29.s\n"
- ".inst 0x809e00c2 // fmopa za2.s, p0/M, p0/M, z6.s, z30.s\n"
- ".inst 0x809f00c3 // fmopa za3.s, p0/M, p0/M, z6.s, z31.s\n"
+ "fmov z15.s, #1.0\n"
+ ".inst 0xa109c280 // ld1w { z0.s, z4.s, z8.s, z12.s }, p8/Z, [x20, x9, LSL #2]\n"
+ ".inst 0x808001e0 // fmopa za0.s, p0/M, p0/M, z15.s, z0.s\n"
+ ".inst 0x808401e1 // fmopa za1.s, p0/M, p0/M, z15.s, z4.s\n"
+ ".inst 0x808801e2 // fmopa za2.s, p0/M, p0/M, z15.s, z8.s\n"
+ ".inst 0x808c01e3 // fmopa za3.s, p0/M, p0/M, z15.s, z12.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x9\n"
"mov x21, x10\n"
"incw x20, ALL, MUL #4\n"
"incw x21\n"
"cmp x20, x28\n"
- "csel x21, x10, x21, LT\n"
"mov x20, x15\n"
+ "csel x21, x10, x21, LT\n"
"bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
"cmp x21, x11\n"
"csel x15, x20, x15, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
- "lsr x23, x20, #0x2\n"
- "and x22, x20, #0x3\n"
- "ldr x21, [%x[args], %[offsetof_B]]\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x21, x9, x20, x21\n" // bptr = B + n * kstride_bytes
- "cbz x23, 8f\n"
- "subs x23, x23, #0x1\n"
- "ld1w { z28.s }, p0/Z, [x26]\n"
- ".inst 0xa040c6a9 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x21]\n"
- "ld1w { z22.s }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0xa041c6ad // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
- "ld1w { z30.s }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa042c6a5 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x21, #0x8, MUL VL]\n"
- "ld1w { z20.s }, p0/Z, [x26, #3, MUL VL]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "madd x23, x9, x22, x23\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1w { z20.s }, p0/Z, [x26]\n"
+ ".inst 0xa140c6f3 // ld1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x23]\n"
+ "ld1w { z4.s }, p0/Z, [x26, #1, MUL VL]\n"
+ ".inst 0xa041c6ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1w { z29.s }, p0/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa142c6f2 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- ".inst 0xa143c6bb // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x21, #0xc, MUL VL]\n"
- "addvl x21, x21, #16\n"
+ ".inst 0xa043c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "addvl x23, x23, #16\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x80880380 // fmopa za0.s, p0/M, p0/M, z28.s, z8.s\n"
- "subs x23, x23, #0x1\n"
- ".inst 0x80890381 // fmopa za1.s, p0/M, p0/M, z28.s, z9.s\n"
- ".inst 0x808a0382 // fmopa za2.s, p0/M, p0/M, z28.s, z10.s\n"
- ".inst 0x808b0383 // fmopa za3.s, p0/M, p0/M, z28.s, z11.s\n"
- "ld1w { z28.s }, p0/Z, [x26]\n"
- ".inst 0x808c02c0 // fmopa za0.s, p0/M, p0/M, z22.s, z12.s\n"
- ".inst 0xa040c6a9 // ldnt1w { z8.s-z11.s }, pn9.b/Z, [x21]\n"
- ".inst 0x808d02c1 // fmopa za1.s, p0/M, p0/M, z22.s, z13.s\n"
- ".inst 0x808e02c2 // fmopa za2.s, p0/M, p0/M, z22.s, z14.s\n"
- ".inst 0x808f02c3 // fmopa za3.s, p0/M, p0/M, z22.s, z15.s\n"
- "ld1w { z22.s }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0x808403c0 // fmopa za0.s, p0/M, p0/M, z30.s, z4.s\n"
- ".inst 0xa041c6ad // ldnt1w { z12.s-z15.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0x808503c1 // fmopa za1.s, p0/M, p0/M, z30.s, z5.s\n"
- ".inst 0x808603c2 // fmopa za2.s, p0/M, p0/M, z30.s, z6.s\n"
- ".inst 0x808703c3 // fmopa za3.s, p0/M, p0/M, z30.s, z7.s\n"
- "ld1w { z30.s }, p0/Z, [x26, #2, MUL VL]\n"
- ".inst 0xa042c6a5 // ldnt1w { z4.s-z7.s }, pn9.b/Z, [x21, #0x8, MUL VL]\n"
".inst 0x80930280 // fmopa za0.s, p0/M, p0/M, z20.s, z19.s\n"
+ "subs x21, x21, #0x1\n"
".inst 0x80970281 // fmopa za1.s, p0/M, p0/M, z20.s, z23.s\n"
".inst 0x809b0282 // fmopa za2.s, p0/M, p0/M, z20.s, z27.s\n"
".inst 0x809f0283 // fmopa za3.s, p0/M, p0/M, z20.s, z31.s\n"
- "ld1w { z20.s }, p0/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z20.s }, p0/Z, [x26]\n"
+ ".inst 0x808c0080 // fmopa za0.s, p0/M, p0/M, z4.s, z12.s\n"
+ ".inst 0xa140c6f3 // ld1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x23]\n"
+ ".inst 0x808d0081 // fmopa za1.s, p0/M, p0/M, z4.s, z13.s\n"
+ ".inst 0x808e0082 // fmopa za2.s, p0/M, p0/M, z4.s, z14.s\n"
+ ".inst 0x808f0083 // fmopa za3.s, p0/M, p0/M, z4.s, z15.s\n"
+ "ld1w { z4.s }, p0/Z, [x26, #1, MUL VL]\n"
+ ".inst 0x809203a0 // fmopa za0.s, p0/M, p0/M, z29.s, z18.s\n"
+ ".inst 0xa041c6ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x809603a1 // fmopa za1.s, p0/M, p0/M, z29.s, z22.s\n"
+ ".inst 0x809a03a2 // fmopa za2.s, p0/M, p0/M, z29.s, z26.s\n"
+ ".inst 0x809e03a3 // fmopa za3.s, p0/M, p0/M, z29.s, z30.s\n"
+ "ld1w { z29.s }, p0/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa142c6f2 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0x80880040 // fmopa za0.s, p0/M, p0/M, z2.s, z8.s\n"
+ ".inst 0x80890041 // fmopa za1.s, p0/M, p0/M, z2.s, z9.s\n"
+ ".inst 0x808a0042 // fmopa za2.s, p0/M, p0/M, z2.s, z10.s\n"
+ ".inst 0x808b0043 // fmopa za3.s, p0/M, p0/M, z2.s, z11.s\n"
+ "ld1w { z2.s }, p0/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- ".inst 0xa143c6bb // ldnt1w { z19.s, z23.s, z27.s, z31.s }, pn9.b/Z, [x21, #0xc, MUL VL]\n"
- "addvl x21, x21, #16\n"
+ ".inst 0xa043c6e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "addvl x23, x23, #16\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x80880380 // fmopa za0.s, p0/M, p0/M, z28.s, z8.s\n"
- ".inst 0x80890381 // fmopa za1.s, p0/M, p0/M, z28.s, z9.s\n"
- ".inst 0x808a0382 // fmopa za2.s, p0/M, p0/M, z28.s, z10.s\n"
- ".inst 0x808b0383 // fmopa za3.s, p0/M, p0/M, z28.s, z11.s\n"
- ".inst 0x808c02c0 // fmopa za0.s, p0/M, p0/M, z22.s, z12.s\n"
- ".inst 0x808d02c1 // fmopa za1.s, p0/M, p0/M, z22.s, z13.s\n"
- ".inst 0x808e02c2 // fmopa za2.s, p0/M, p0/M, z22.s, z14.s\n"
- ".inst 0x808f02c3 // fmopa za3.s, p0/M, p0/M, z22.s, z15.s\n"
- ".inst 0x808403c0 // fmopa za0.s, p0/M, p0/M, z30.s, z4.s\n"
- ".inst 0x808503c1 // fmopa za1.s, p0/M, p0/M, z30.s, z5.s\n"
- ".inst 0x808603c2 // fmopa za2.s, p0/M, p0/M, z30.s, z6.s\n"
- ".inst 0x808703c3 // fmopa za3.s, p0/M, p0/M, z30.s, z7.s\n"
".inst 0x80930280 // fmopa za0.s, p0/M, p0/M, z20.s, z19.s\n"
".inst 0x80970281 // fmopa za1.s, p0/M, p0/M, z20.s, z23.s\n"
".inst 0x809b0282 // fmopa za2.s, p0/M, p0/M, z20.s, z27.s\n"
".inst 0x809f0283 // fmopa za3.s, p0/M, p0/M, z20.s, z31.s\n"
+ ".inst 0x808c0080 // fmopa za0.s, p0/M, p0/M, z4.s, z12.s\n"
+ ".inst 0x808d0081 // fmopa za1.s, p0/M, p0/M, z4.s, z13.s\n"
+ ".inst 0x808e0082 // fmopa za2.s, p0/M, p0/M, z4.s, z14.s\n"
+ ".inst 0x808f0083 // fmopa za3.s, p0/M, p0/M, z4.s, z15.s\n"
+ ".inst 0x809203a0 // fmopa za0.s, p0/M, p0/M, z29.s, z18.s\n"
+ ".inst 0x809603a1 // fmopa za1.s, p0/M, p0/M, z29.s, z22.s\n"
+ ".inst 0x809a03a2 // fmopa za2.s, p0/M, p0/M, z29.s, z26.s\n"
+ ".inst 0x809e03a3 // fmopa za3.s, p0/M, p0/M, z29.s, z30.s\n"
+ ".inst 0x80880040 // fmopa za0.s, p0/M, p0/M, z2.s, z8.s\n"
+ ".inst 0x80890041 // fmopa za1.s, p0/M, p0/M, z2.s, z9.s\n"
+ ".inst 0x808a0042 // fmopa za2.s, p0/M, p0/M, z2.s, z10.s\n"
+ ".inst 0x808b0043 // fmopa za3.s, p0/M, p0/M, z2.s, z11.s\n"
"8:" // K oddments
- "cbz x22, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1w { z8.s }, p0/Z, [x26]\n"
- "subs x22, x22, #0x1\n"
+ "ld1w { z26.s }, p0/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
"addvl x26, x26, #1\n"
- ".inst 0xa140c6a3 // ld1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x21]\n"
- "addvl x21, x21, #4\n"
- ".inst 0x80830100 // fmopa za0.s, p0/M, p0/M, z8.s, z3.s\n"
- ".inst 0x80870101 // fmopa za1.s, p0/M, p0/M, z8.s, z7.s\n"
- ".inst 0x808b0102 // fmopa za2.s, p0/M, p0/M, z8.s, z11.s\n"
- ".inst 0x808f0103 // fmopa za3.s, p0/M, p0/M, z8.s, z15.s\n"
+ ".inst 0xa140c6e3 // ld1w { z3.s, z7.s, z11.s, z15.s }, pn9.b/Z, [x23]\n"
+ "addvl x23, x23, #4\n"
+ ".inst 0x80830340 // fmopa za0.s, p0/M, p0/M, z26.s, z3.s\n"
+ ".inst 0x80870341 // fmopa za1.s, p0/M, p0/M, z26.s, z7.s\n"
+ ".inst 0x808b0342 // fmopa za2.s, p0/M, p0/M, z26.s, z11.s\n"
+ ".inst 0x808f0343 // fmopa za3.s, p0/M, p0/M, z26.s, z15.s\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x15, #1, 14f\n"
@@ -239,25 +239,25 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5d4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa040c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14]\n"
".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5d4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
+ ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa061c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
- "addvl x14, x14, #16\n"
- ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
- ".inst 0xa062c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5a0 // st1w { z0.s-z3.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa063c5bc // st1w { z28.s-z31.s }, pn9.b, [x13, #0xc, MUL VL]\n"
"addvl x13, x13, #16\n"
"blt 11b\n"
"b 24f\n"
@@ -265,31 +265,31 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xa060c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13]\n"
+ ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa061c5b8 // st1w { z24.s-z27.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5ac // st1w { z12.s-z15.s }, pn9.b, [x13]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"cmp x12, x20\n"
".inst 0xa062c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0x8, MUL VL]\n"
- ".inst 0xa063c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
"addvl x13, x13, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
"ldr x25, [%x[args], %[offsetof_C]]\n"
- "add x25, x25, x9, LSL #2\n" // C += n
"sub x24, x11, x10\n"
"ldr x23, [%x[args], %[offsetof_ldcb]]\n"
+ "add x25, x25, x9, LSL #2\n" // C += n
"madd x25, x10, x23, x25\n" // C += m * ldc
"tbz x15, #2, 18f\n"
"cntw x20\n"
+ "mov x12, #0x0\n"
"cmp x24, x20\n"
"csel x22, x24, x20, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
@@ -299,30 +299,30 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
"add x25, x25, x23\n"
+ "add x12, x12, #0x4\n"
".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa160c323 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa160c330 // st1w { z16.s, z20.s, z24.s, z28.s }, p8, [x25]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa160c320 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa160c331 // st1w { z17.s, z21.s, z25.s, z29.s }, p8, [x25]\n"
+ ".inst 0xa160c321 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"beq 17f\n"
- ".inst 0xa160c332 // st1w { z18.s, z22.s, z26.s, z30.s }, p8, [x25]\n"
+ ".inst 0xa160c322 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
"subs x24, x24, x22\n"
@@ -330,29 +330,29 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
"b 22f\n"
"18:" // Store to output array: Skip activation: End
"cntw x20\n"
- "cmp x24, x20\n"
"ld1rw { z1.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
+ "cmp x24, x20\n"
+ "ld1rw { z0.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"csel x20, x24, x20, LT\n"
"lsr x21, x20, #0x2\n"
- "ld1rw { z0.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 20f\n"
"19:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
".inst 0xc1a0c83c // fclamp { z28.s-z31.s }, z1.s, z0.s\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa160c330 // st1w { z16.s, z20.s, z24.s, z28.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "add x12, x12, #0x4\n"
".inst 0xa160c331 // st1w { z17.s, z21.s, z25.s, z29.s }, p8, [x25]\n"
"add x25, x25, x23\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa160c332 // st1w { z18.s, z22.s, z26.s, z30.s }, p8, [x25]\n"
"add x25, x25, x23\n"
".inst 0xa160c333 // st1w { z19.s, z23.s, z27.s, z31.s }, p8, [x25]\n"
@@ -362,13 +362,13 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
"cbz x20, 21f\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
+ "subs x20, x20, #0x1\n"
".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
".inst 0xc1a0c83c // fclamp { z28.s-z31.s }, z1.s, z0.s\n"
- "subs x20, x20, #0x1\n"
".inst 0xa160c330 // st1w { z16.s, z20.s, z24.s, z28.s }, p8, [x25]\n"
"add x25, x25, x23\n"
"beq 21f\n"
@@ -383,25 +383,25 @@ void sme2_interleaved_nomerge_fp32_mopa_1VLx4VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x14, x14, #16\n"
"blt 23b\n"
"24:" // End block
"incw x9, ALL, MUL #4\n"
"cmp x9, x28\n"
"blt 3b\n"
"incw x10\n"
- "cmp x10, x11\n"
"mov x9, #0x0\n"
+ "cmp x10, x11\n"
"mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp
index 9bc1f83100..275399748a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
class cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL
{
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_fp32_mopa_2VLx2VL;
- StdTransformsSME<operand_type, result_type, 2, 2, 1> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 2, 2, 1> transforms = {};
cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
index 3c475044e2..45fcc7a860 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
B(B), kstride_bytes(K * sizeof(float)),
C(C), ldcb(ldc * sizeof(float)),
M(M), N(N), K(K),
- n_loops((K - 1) / 2), n_tail_iters((K - 1) % 2),
min(-std::numeric_limits<float>::infinity()),
max(std::numeric_limits<float>::infinity()),
bias(bias),
@@ -88,12 +87,13 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
const long kstride_bytes;
float *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
float min = -std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::infinity();
const float *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -112,17 +112,17 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa042c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -137,101 +137,101 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "fmov z12.s, #1.0\n"
- ".inst 0xa10a4289 // ldnt1w { z1.s, z9.s }, p8/Z, [x20, x10, LSL #2]\n"
- ".inst 0x80810180 // fmopa za0.s, p0/M, p0/M, z12.s, z1.s\n"
- ".inst 0x80890181 // fmopa za1.s, p0/M, p0/M, z12.s, z9.s\n"
- ".inst 0x80810182 // fmopa za2.s, p0/M, p0/M, z12.s, z1.s\n"
- ".inst 0x80890183 // fmopa za3.s, p0/M, p0/M, z12.s, z9.s\n"
+ "fmov z17.s, #1.0\n"
+ ".inst 0xa00a428a // ld1w { z10.s-z11.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0x808a0220 // fmopa za0.s, p0/M, p0/M, z17.s, z10.s\n"
+ ".inst 0x808b0221 // fmopa za1.s, p0/M, p0/M, z17.s, z11.s\n"
+ ".inst 0x808a0222 // fmopa za2.s, p0/M, p0/M, z17.s, z10.s\n"
+ ".inst 0x808b0223 // fmopa za3.s, p0/M, p0/M, z17.s, z11.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20, ALL, MUL #2\n"
"incw x21, ALL, MUL #2\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
- "lsr x23, x20, #0x2\n"
- "and x22, x20, #0x3\n"
- "ldr x21, [%x[args], %[offsetof_B]]\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x21, x10, x20, x21\n" // bptr = B + n * kstride_bytes
- "cbz x23, 8f\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xa0404772 // ld1w { z18.s-z19.s }, pn9.b/Z, [x27]\n"
- ".inst 0xa04046a3 // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x21]\n"
- ".inst 0xa0414764 // ld1w { z4.s-z5.s }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04146bb // ldnt1w { z26.s-z27.s }, pn9.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0xa042476a // ld1w { z10.s-z11.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04246b5 // ldnt1w { z20.s-z21.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0xa0434766 // ld1w { z6.s-z7.s }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0404776 // ld1w { z22.s-z23.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa14046e7 // ld1w { z7.s, z15.s }, pn9.b/Z, [x23]\n"
+ ".inst 0xa1414766 // ld1w { z6.s, z14.s }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa04146f4 // ld1w { z20.s-z21.s }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa1424762 // ld1w { z2.s, z10.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14246e3 // ld1w { z3.s, z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1434761 // ld1w { z1.s, z9.s }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa04346a9 // ldnt1w { z8.s-z9.s }, pn9.b/Z, [x21, #0x6, MUL VL]\n"
- "addvl x21, x21, #8\n"
+ ".inst 0xa04346e4 // ld1w { z4.s-z5.s }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ "addvl x23, x23, #8\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x80820240 // fmopa za0.s, p0/M, p0/M, z18.s, z2.s\n"
- "subs x23, x23, #0x1\n"
- ".inst 0x80830241 // fmopa za1.s, p0/M, p0/M, z18.s, z3.s\n"
- ".inst 0x80820262 // fmopa za2.s, p0/M, p0/M, z19.s, z2.s\n"
- ".inst 0x80830263 // fmopa za3.s, p0/M, p0/M, z19.s, z3.s\n"
- ".inst 0xa0404772 // ld1w { z18.s-z19.s }, pn9.b/Z, [x27]\n"
- ".inst 0x809a0080 // fmopa za0.s, p0/M, p0/M, z4.s, z26.s\n"
- ".inst 0xa04046a3 // ldnt1w { z2.s-z3.s }, pn9.b/Z, [x21]\n"
- ".inst 0x809b0081 // fmopa za1.s, p0/M, p0/M, z4.s, z27.s\n"
- ".inst 0x809a00a2 // fmopa za2.s, p0/M, p0/M, z5.s, z26.s\n"
- ".inst 0x809b00a3 // fmopa za3.s, p0/M, p0/M, z5.s, z27.s\n"
- ".inst 0xa0414764 // ld1w { z4.s-z5.s }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0x80940140 // fmopa za0.s, p0/M, p0/M, z10.s, z20.s\n"
- ".inst 0xa04146bb // ldnt1w { z26.s-z27.s }, pn9.b/Z, [x21, #0x2, MUL VL]\n"
- ".inst 0x80950141 // fmopa za1.s, p0/M, p0/M, z10.s, z21.s\n"
- ".inst 0x80940162 // fmopa za2.s, p0/M, p0/M, z11.s, z20.s\n"
- ".inst 0x80950163 // fmopa za3.s, p0/M, p0/M, z11.s, z21.s\n"
- ".inst 0xa042476a // ld1w { z10.s-z11.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa04246b5 // ldnt1w { z20.s-z21.s }, pn9.b/Z, [x21, #0x4, MUL VL]\n"
- ".inst 0x808800c0 // fmopa za0.s, p0/M, p0/M, z6.s, z8.s\n"
- ".inst 0x808900c1 // fmopa za1.s, p0/M, p0/M, z6.s, z9.s\n"
- ".inst 0x808800e2 // fmopa za2.s, p0/M, p0/M, z7.s, z8.s\n"
- ".inst 0x808900e3 // fmopa za3.s, p0/M, p0/M, z7.s, z9.s\n"
- ".inst 0xa0434766 // ld1w { z6.s-z7.s }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ ".inst 0x808702c0 // fmopa za0.s, p0/M, p0/M, z22.s, z7.s\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0x808f02c1 // fmopa za1.s, p0/M, p0/M, z22.s, z15.s\n"
+ ".inst 0x808702e2 // fmopa za2.s, p0/M, p0/M, z23.s, z7.s\n"
+ ".inst 0x808f02e3 // fmopa za3.s, p0/M, p0/M, z23.s, z15.s\n"
+ ".inst 0xa0404776 // ld1w { z22.s-z23.s }, pn9.b/Z, [x27]\n"
+ ".inst 0x809400c0 // fmopa za0.s, p0/M, p0/M, z6.s, z20.s\n"
+ ".inst 0xa14046e7 // ld1w { z7.s, z15.s }, pn9.b/Z, [x23]\n"
+ ".inst 0x809500c1 // fmopa za1.s, p0/M, p0/M, z6.s, z21.s\n"
+ ".inst 0x809401c2 // fmopa za2.s, p0/M, p0/M, z14.s, z20.s\n"
+ ".inst 0x809501c3 // fmopa za3.s, p0/M, p0/M, z14.s, z21.s\n"
+ ".inst 0xa1414766 // ld1w { z6.s, z14.s }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0x80830040 // fmopa za0.s, p0/M, p0/M, z2.s, z3.s\n"
+ ".inst 0xa04146f4 // ld1w { z20.s-z21.s }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0x808b0041 // fmopa za1.s, p0/M, p0/M, z2.s, z11.s\n"
+ ".inst 0x80830142 // fmopa za2.s, p0/M, p0/M, z10.s, z3.s\n"
+ ".inst 0x808b0143 // fmopa za3.s, p0/M, p0/M, z10.s, z11.s\n"
+ ".inst 0xa1424762 // ld1w { z2.s, z10.s }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14246e3 // ld1w { z3.s, z11.s }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0x80840020 // fmopa za0.s, p0/M, p0/M, z1.s, z4.s\n"
+ ".inst 0x80850021 // fmopa za1.s, p0/M, p0/M, z1.s, z5.s\n"
+ ".inst 0x80840122 // fmopa za2.s, p0/M, p0/M, z9.s, z4.s\n"
+ ".inst 0x80850123 // fmopa za3.s, p0/M, p0/M, z9.s, z5.s\n"
+ ".inst 0xa1434761 // ld1w { z1.s, z9.s }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa04346a9 // ldnt1w { z8.s-z9.s }, pn9.b/Z, [x21, #0x6, MUL VL]\n"
- "addvl x21, x21, #8\n"
+ ".inst 0xa04346e4 // ld1w { z4.s-z5.s }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ "addvl x23, x23, #8\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x80820240 // fmopa za0.s, p0/M, p0/M, z18.s, z2.s\n"
- ".inst 0x80830241 // fmopa za1.s, p0/M, p0/M, z18.s, z3.s\n"
- ".inst 0x80820262 // fmopa za2.s, p0/M, p0/M, z19.s, z2.s\n"
- ".inst 0x80830263 // fmopa za3.s, p0/M, p0/M, z19.s, z3.s\n"
- ".inst 0x809a0080 // fmopa za0.s, p0/M, p0/M, z4.s, z26.s\n"
- ".inst 0x809b0081 // fmopa za1.s, p0/M, p0/M, z4.s, z27.s\n"
- ".inst 0x809a00a2 // fmopa za2.s, p0/M, p0/M, z5.s, z26.s\n"
- ".inst 0x809b00a3 // fmopa za3.s, p0/M, p0/M, z5.s, z27.s\n"
- ".inst 0x80940140 // fmopa za0.s, p0/M, p0/M, z10.s, z20.s\n"
- ".inst 0x80950141 // fmopa za1.s, p0/M, p0/M, z10.s, z21.s\n"
- ".inst 0x80940162 // fmopa za2.s, p0/M, p0/M, z11.s, z20.s\n"
- ".inst 0x80950163 // fmopa za3.s, p0/M, p0/M, z11.s, z21.s\n"
- ".inst 0x808800c0 // fmopa za0.s, p0/M, p0/M, z6.s, z8.s\n"
- ".inst 0x808900c1 // fmopa za1.s, p0/M, p0/M, z6.s, z9.s\n"
- ".inst 0x808800e2 // fmopa za2.s, p0/M, p0/M, z7.s, z8.s\n"
- ".inst 0x808900e3 // fmopa za3.s, p0/M, p0/M, z7.s, z9.s\n"
+ ".inst 0x808702c0 // fmopa za0.s, p0/M, p0/M, z22.s, z7.s\n"
+ ".inst 0x808f02c1 // fmopa za1.s, p0/M, p0/M, z22.s, z15.s\n"
+ ".inst 0x808702e2 // fmopa za2.s, p0/M, p0/M, z23.s, z7.s\n"
+ ".inst 0x808f02e3 // fmopa za3.s, p0/M, p0/M, z23.s, z15.s\n"
+ ".inst 0x809400c0 // fmopa za0.s, p0/M, p0/M, z6.s, z20.s\n"
+ ".inst 0x809500c1 // fmopa za1.s, p0/M, p0/M, z6.s, z21.s\n"
+ ".inst 0x809401c2 // fmopa za2.s, p0/M, p0/M, z14.s, z20.s\n"
+ ".inst 0x809501c3 // fmopa za3.s, p0/M, p0/M, z14.s, z21.s\n"
+ ".inst 0x80830040 // fmopa za0.s, p0/M, p0/M, z2.s, z3.s\n"
+ ".inst 0x808b0041 // fmopa za1.s, p0/M, p0/M, z2.s, z11.s\n"
+ ".inst 0x80830142 // fmopa za2.s, p0/M, p0/M, z10.s, z3.s\n"
+ ".inst 0x808b0143 // fmopa za3.s, p0/M, p0/M, z10.s, z11.s\n"
+ ".inst 0x80840020 // fmopa za0.s, p0/M, p0/M, z1.s, z4.s\n"
+ ".inst 0x80850021 // fmopa za1.s, p0/M, p0/M, z1.s, z5.s\n"
+ ".inst 0x80840122 // fmopa za2.s, p0/M, p0/M, z9.s, z4.s\n"
+ ".inst 0x80850123 // fmopa za3.s, p0/M, p0/M, z9.s, z5.s\n"
"8:" // K oddments
- "cbz x22, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa040477e // ld1w { z30.s-z31.s }, pn9.b/Z, [x27]\n"
- "subs x22, x22, #0x1\n"
+ ".inst 0xa040476a // ld1w { z10.s-z11.s }, pn9.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #2\n"
- ".inst 0xa14046a5 // ld1w { z5.s, z13.s }, pn9.b/Z, [x21]\n"
- "addvl x21, x21, #2\n"
- ".inst 0x808503c0 // fmopa za0.s, p0/M, p0/M, z30.s, z5.s\n"
- ".inst 0x808d03c1 // fmopa za1.s, p0/M, p0/M, z30.s, z13.s\n"
- ".inst 0x808503e2 // fmopa za2.s, p0/M, p0/M, z31.s, z5.s\n"
- ".inst 0x808d03e3 // fmopa za3.s, p0/M, p0/M, z31.s, z13.s\n"
+ ".inst 0xa04046ee // ld1w { z14.s-z15.s }, pn9.b/Z, [x23]\n"
+ "addvl x23, x23, #2\n"
+ ".inst 0x808e0140 // fmopa za0.s, p0/M, p0/M, z10.s, z14.s\n"
+ ".inst 0x808f0141 // fmopa za1.s, p0/M, p0/M, z10.s, z15.s\n"
+ ".inst 0x808e0162 // fmopa za2.s, p0/M, p0/M, z11.s, z14.s\n"
+ ".inst 0x808f0163 // fmopa za3.s, p0/M, p0/M, z11.s, z15.s\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x16, #1, 14f\n"
@@ -239,24 +239,24 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
+ ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa060c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa061c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
@@ -265,31 +265,31 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
- ".inst 0xa060c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14]\n"
- ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa061c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
"sub x25, x13, x11\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "add x26, x26, x10, LSL #2\n" // C += n
"madd x26, x11, x24, x26\n" // C += m * ldc
"tbz x16, #2, 21f\n"
"cntw x23\n"
+ "mov x12, #0x0\n"
"cmp x25, x23\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
@@ -297,36 +297,36 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
"add x26, x26, x24\n"
+ "add x12, x12, #0x4\n"
".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa1604354 // st1w { z20.s, z28.s }, p8, [x26]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa1604355 // st1w { z21.s, z29.s }, p8, [x26]\n"
+ ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
- ".inst 0xa1604356 // st1w { z22.s, z30.s }, p8, [x26]\n"
+ ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 21f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
@@ -334,28 +334,28 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
"add x26, x26, x24\n"
+ "add x12, x12, #0x4\n"
".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
"cbz x20, 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
+ ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
+ ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
@@ -363,37 +363,37 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
"b 28f\n"
"21:" // Store to output array: Skip activation: End
"cntw x23\n"
+ "ld1rw { z21.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
"cmp x25, x23\n"
- "ld1rw { z1.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "ld1rw { z20.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "ld1rw { z0.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 23f\n"
"22:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c83c // fclamp { z28.s-z31.s }, z1.s, z0.s\n"
- ".inst 0xa1604354 // st1w { z20.s, z28.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604355 // st1w { z21.s, z29.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xa1604356 // st1w { z22.s, z30.s }, p8, [x26]\n"
+ ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xa1604357 // st1w { z23.s, z31.s }, p8, [x26]\n"
+ ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 24f\n"
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
"subs x20, x20, #0x1\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 24f\n"
@@ -407,34 +407,34 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
"subs x25, x25, x22\n"
"beq 28f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 26f\n"
"25:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
- ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
+ ".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4caa8 // fclamp { z8.s-z11.s }, z21.s, z20.s\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
- "add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
"add x26, x26, x24\n"
- ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
+ ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604343 // st1w { z3.s, z11.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 1 oddments
"cbz x20, 27f\n"
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
- ".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
"subs x20, x20, #0x1\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
"add x26, x26, x24\n"
"beq 27f\n"
@@ -449,25 +449,25 @@ void sme2_interleaved_nomerge_fp32_mopa_2VLx2VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 29b\n"
"30:" // End block
"incw x10, ALL, MUL #2\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp
index 165e25dd8f..453505a227 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
class cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL
{
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const float *const A, const float *const B, float *const C, int ldc, const int M, const int N, const int K, const float *const bias, const Activation act, bool accumulate, float *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_fp32_mopa_4VLx1VL;
- StdTransformsSME<operand_type, result_type, 4, 1, 1> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 4, 1, 1> transforms = {};
cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
index ae1f812442..63f93b9b5b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
B(B), kstride_bytes(K * sizeof(float)),
C(C), ldcb(ldc * sizeof(float)),
M(M), N(N), K(K),
- n_loops((K - 1) / 2), n_tail_iters((K - 1) % 2),
min(-std::numeric_limits<float>::infinity()),
max(std::numeric_limits<float>::infinity()),
bias(bias),
@@ -88,12 +87,13 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
const long kstride_bytes;
float *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
float min = -std::numeric_limits<float>::infinity();
float max = std::numeric_limits<float>::infinity();
const float *const bias;
+
float *const accumulator_buffer;
uint64_t flags;
};
@@ -112,17 +112,17 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa042c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -137,101 +137,101 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "fmov z11.s, #1.0\n"
- "ldnt1w { z13.s }, p0/Z, [x20, x10, LSL #2]\n"
- ".inst 0x808d2560 // fmopa za0.s, p1/M, p1/M, z11.s, z13.s\n"
- ".inst 0x808d2561 // fmopa za1.s, p1/M, p1/M, z11.s, z13.s\n"
- ".inst 0x808d2562 // fmopa za2.s, p1/M, p1/M, z11.s, z13.s\n"
- ".inst 0x808d2563 // fmopa za3.s, p1/M, p1/M, z11.s, z13.s\n"
+ "fmov z6.s, #1.0\n"
+ "ld1w { z26.s }, p0/Z, [x20, x10, LSL #2]\n"
+ ".inst 0x809a24c0 // fmopa za0.s, p1/M, p1/M, z6.s, z26.s\n"
+ ".inst 0x809a24c1 // fmopa za1.s, p1/M, p1/M, z6.s, z26.s\n"
+ ".inst 0x809a24c2 // fmopa za2.s, p1/M, p1/M, z6.s, z26.s\n"
+ ".inst 0x809a24c3 // fmopa za3.s, p1/M, p1/M, z6.s, z26.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20\n"
"incw x21, ALL, MUL #4\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
- "lsr x23, x20, #0x2\n"
- "and x22, x20, #0x3\n"
- "ldr x21, [%x[args], %[offsetof_B]]\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x21, x10, x20, x21\n" // bptr = B + n * kstride_bytes
- "cbz x23, 8f\n"
- "subs x23, x23, #0x1\n"
- ".inst 0xa140c360 // ld1w { z0.s, z4.s, z8.s, z12.s }, pn8.b/Z, [x27]\n"
- "ldnt1w { z19.s }, p1/Z, [x21]\n"
- ".inst 0xa141c371 // ld1w { z17.s, z21.s, z25.s, z29.s }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1w { z22.s }, p1/Z, [x21, #1, MUL VL]\n"
- ".inst 0xa142c370 // ld1w { z16.s, z20.s, z24.s, z28.s }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1w { z23.s }, p1/Z, [x21, #2, MUL VL]\n"
- ".inst 0xa143c363 // ld1w { z3.s, z7.s, z11.s, z15.s }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa140c363 // ld1w { z3.s, z7.s, z11.s, z15.s }, pn8.b/Z, [x27]\n"
+ "ld1w { z13.s }, p1/Z, [x23]\n"
+ ".inst 0xa141c372 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "ld1w { z21.s }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa142c373 // ld1w { z19.s, z23.s, z27.s, z31.s }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa143c370 // ld1w { z16.s, z20.s, z24.s, z28.s }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1w { z2.s }, p1/Z, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #4\n"
+ "ld1w { z2.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0x80932400 // fmopa za0.s, p1/M, p1/M, z0.s, z19.s\n"
- "subs x23, x23, #0x1\n"
- ".inst 0x80932481 // fmopa za1.s, p1/M, p1/M, z4.s, z19.s\n"
- ".inst 0x80932502 // fmopa za2.s, p1/M, p1/M, z8.s, z19.s\n"
- ".inst 0x80932583 // fmopa za3.s, p1/M, p1/M, z12.s, z19.s\n"
- ".inst 0xa140c360 // ld1w { z0.s, z4.s, z8.s, z12.s }, pn8.b/Z, [x27]\n"
- ".inst 0x80962620 // fmopa za0.s, p1/M, p1/M, z17.s, z22.s\n"
- "ldnt1w { z19.s }, p1/Z, [x21]\n"
- ".inst 0x809626a1 // fmopa za1.s, p1/M, p1/M, z21.s, z22.s\n"
- ".inst 0x80962722 // fmopa za2.s, p1/M, p1/M, z25.s, z22.s\n"
- ".inst 0x809627a3 // fmopa za3.s, p1/M, p1/M, z29.s, z22.s\n"
- ".inst 0xa141c371 // ld1w { z17.s, z21.s, z25.s, z29.s }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0x80972600 // fmopa za0.s, p1/M, p1/M, z16.s, z23.s\n"
- "ldnt1w { z22.s }, p1/Z, [x21, #1, MUL VL]\n"
- ".inst 0x80972681 // fmopa za1.s, p1/M, p1/M, z20.s, z23.s\n"
- ".inst 0x80972702 // fmopa za2.s, p1/M, p1/M, z24.s, z23.s\n"
- ".inst 0x80972783 // fmopa za3.s, p1/M, p1/M, z28.s, z23.s\n"
- ".inst 0xa142c370 // ld1w { z16.s, z20.s, z24.s, z28.s }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1w { z23.s }, p1/Z, [x21, #2, MUL VL]\n"
- ".inst 0x80822460 // fmopa za0.s, p1/M, p1/M, z3.s, z2.s\n"
- ".inst 0x808224e1 // fmopa za1.s, p1/M, p1/M, z7.s, z2.s\n"
- ".inst 0x80822562 // fmopa za2.s, p1/M, p1/M, z11.s, z2.s\n"
- ".inst 0x808225e3 // fmopa za3.s, p1/M, p1/M, z15.s, z2.s\n"
- ".inst 0xa143c363 // ld1w { z3.s, z7.s, z11.s, z15.s }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0x808d2460 // fmopa za0.s, p1/M, p1/M, z3.s, z13.s\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0x808d24e1 // fmopa za1.s, p1/M, p1/M, z7.s, z13.s\n"
+ ".inst 0x808d2562 // fmopa za2.s, p1/M, p1/M, z11.s, z13.s\n"
+ ".inst 0x808d25e3 // fmopa za3.s, p1/M, p1/M, z15.s, z13.s\n"
+ ".inst 0xa140c363 // ld1w { z3.s, z7.s, z11.s, z15.s }, pn8.b/Z, [x27]\n"
+ ".inst 0x80952640 // fmopa za0.s, p1/M, p1/M, z18.s, z21.s\n"
+ "ld1w { z13.s }, p1/Z, [x23]\n"
+ ".inst 0x809526c1 // fmopa za1.s, p1/M, p1/M, z22.s, z21.s\n"
+ ".inst 0x80952742 // fmopa za2.s, p1/M, p1/M, z26.s, z21.s\n"
+ ".inst 0x809527c3 // fmopa za3.s, p1/M, p1/M, z30.s, z21.s\n"
+ ".inst 0xa141c372 // ld1w { z18.s, z22.s, z26.s, z30.s }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0x80912660 // fmopa za0.s, p1/M, p1/M, z19.s, z17.s\n"
+ "ld1w { z21.s }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0x809126e1 // fmopa za1.s, p1/M, p1/M, z23.s, z17.s\n"
+ ".inst 0x80912762 // fmopa za2.s, p1/M, p1/M, z27.s, z17.s\n"
+ ".inst 0x809127e3 // fmopa za3.s, p1/M, p1/M, z31.s, z17.s\n"
+ ".inst 0xa142c373 // ld1w { z19.s, z23.s, z27.s, z31.s }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0x80822600 // fmopa za0.s, p1/M, p1/M, z16.s, z2.s\n"
+ ".inst 0x80822681 // fmopa za1.s, p1/M, p1/M, z20.s, z2.s\n"
+ ".inst 0x80822702 // fmopa za2.s, p1/M, p1/M, z24.s, z2.s\n"
+ ".inst 0x80822783 // fmopa za3.s, p1/M, p1/M, z28.s, z2.s\n"
+ ".inst 0xa143c370 // ld1w { z16.s, z20.s, z24.s, z28.s }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1w { z2.s }, p1/Z, [x21, #3, MUL VL]\n"
- "addvl x21, x21, #4\n"
+ "ld1w { z2.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0x80932400 // fmopa za0.s, p1/M, p1/M, z0.s, z19.s\n"
- ".inst 0x80932481 // fmopa za1.s, p1/M, p1/M, z4.s, z19.s\n"
- ".inst 0x80932502 // fmopa za2.s, p1/M, p1/M, z8.s, z19.s\n"
- ".inst 0x80932583 // fmopa za3.s, p1/M, p1/M, z12.s, z19.s\n"
- ".inst 0x80962620 // fmopa za0.s, p1/M, p1/M, z17.s, z22.s\n"
- ".inst 0x809626a1 // fmopa za1.s, p1/M, p1/M, z21.s, z22.s\n"
- ".inst 0x80962722 // fmopa za2.s, p1/M, p1/M, z25.s, z22.s\n"
- ".inst 0x809627a3 // fmopa za3.s, p1/M, p1/M, z29.s, z22.s\n"
- ".inst 0x80972600 // fmopa za0.s, p1/M, p1/M, z16.s, z23.s\n"
- ".inst 0x80972681 // fmopa za1.s, p1/M, p1/M, z20.s, z23.s\n"
- ".inst 0x80972702 // fmopa za2.s, p1/M, p1/M, z24.s, z23.s\n"
- ".inst 0x80972783 // fmopa za3.s, p1/M, p1/M, z28.s, z23.s\n"
- ".inst 0x80822460 // fmopa za0.s, p1/M, p1/M, z3.s, z2.s\n"
- ".inst 0x808224e1 // fmopa za1.s, p1/M, p1/M, z7.s, z2.s\n"
- ".inst 0x80822562 // fmopa za2.s, p1/M, p1/M, z11.s, z2.s\n"
- ".inst 0x808225e3 // fmopa za3.s, p1/M, p1/M, z15.s, z2.s\n"
+ ".inst 0x808d2460 // fmopa za0.s, p1/M, p1/M, z3.s, z13.s\n"
+ ".inst 0x808d24e1 // fmopa za1.s, p1/M, p1/M, z7.s, z13.s\n"
+ ".inst 0x808d2562 // fmopa za2.s, p1/M, p1/M, z11.s, z13.s\n"
+ ".inst 0x808d25e3 // fmopa za3.s, p1/M, p1/M, z15.s, z13.s\n"
+ ".inst 0x80952640 // fmopa za0.s, p1/M, p1/M, z18.s, z21.s\n"
+ ".inst 0x809526c1 // fmopa za1.s, p1/M, p1/M, z22.s, z21.s\n"
+ ".inst 0x80952742 // fmopa za2.s, p1/M, p1/M, z26.s, z21.s\n"
+ ".inst 0x809527c3 // fmopa za3.s, p1/M, p1/M, z30.s, z21.s\n"
+ ".inst 0x80912660 // fmopa za0.s, p1/M, p1/M, z19.s, z17.s\n"
+ ".inst 0x809126e1 // fmopa za1.s, p1/M, p1/M, z23.s, z17.s\n"
+ ".inst 0x80912762 // fmopa za2.s, p1/M, p1/M, z27.s, z17.s\n"
+ ".inst 0x809127e3 // fmopa za3.s, p1/M, p1/M, z31.s, z17.s\n"
+ ".inst 0x80822600 // fmopa za0.s, p1/M, p1/M, z16.s, z2.s\n"
+ ".inst 0x80822681 // fmopa za1.s, p1/M, p1/M, z20.s, z2.s\n"
+ ".inst 0x80822702 // fmopa za2.s, p1/M, p1/M, z24.s, z2.s\n"
+ ".inst 0x80822783 // fmopa za3.s, p1/M, p1/M, z28.s, z2.s\n"
"8:" // K oddments
- "cbz x22, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa140c373 // ld1w { z19.s, z23.s, z27.s, z31.s }, pn8.b/Z, [x27]\n"
- "subs x22, x22, #0x1\n"
+ ".inst 0xa140c370 // ld1w { z16.s, z20.s, z24.s, z28.s }, pn8.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #4\n"
- "ld1w { z11.s }, p1/Z, [x21]\n"
- "addvl x21, x21, #1\n"
- ".inst 0x808b2660 // fmopa za0.s, p1/M, p1/M, z19.s, z11.s\n"
- ".inst 0x808b26e1 // fmopa za1.s, p1/M, p1/M, z23.s, z11.s\n"
- ".inst 0x808b2762 // fmopa za2.s, p1/M, p1/M, z27.s, z11.s\n"
- ".inst 0x808b27e3 // fmopa za3.s, p1/M, p1/M, z31.s, z11.s\n"
+ "ld1w { z2.s }, p1/Z, [x23]\n"
+ "addvl x23, x23, #1\n"
+ ".inst 0x80822600 // fmopa za0.s, p1/M, p1/M, z16.s, z2.s\n"
+ ".inst 0x80822681 // fmopa za1.s, p1/M, p1/M, z20.s, z2.s\n"
+ ".inst 0x80822702 // fmopa za2.s, p1/M, p1/M, z24.s, z2.s\n"
+ ".inst 0x80822783 // fmopa za3.s, p1/M, p1/M, z28.s, z2.s\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x16, #1, 14f\n"
@@ -239,25 +239,25 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
- ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa042c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
+ ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
+ ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa060c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14]\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 42f\n"
@@ -265,148 +265,148 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa060c1cc // st1w { z12.s-z15.s }, pn8.b, [x14]\n"
- ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
- ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa061c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
+ ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 42f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10, LSL #2\n" // C += n
"sub x25, x13, x11\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "add x26, x26, x10, LSL #2\n" // C += n
"madd x26, x11, x24, x26\n" // C += m * ldc
"tbz x16, #2, 27f\n"
"cntw x23\n"
+ "mov x12, #0x0\n"
"cmp x25, x23\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Skip activation: Accumulator row 0 loop
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z14.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z15.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 15b\n"
"16:" // Store to output array: Skip activation: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ "st1w { z0.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- "st1w { z5.s }, p0, [x26]\n"
+ "st1w { z1.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 17f\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z2.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"17:" // Store to output array: Skip activation: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 19f\n"
"18:" // Store to output array: Skip activation: Accumulator row 1 loop
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- "st1w { z8.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z9.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z10.s }, p0, [x26]\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z11.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z14.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z15.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 18b\n"
"19:" // Store to output array: Skip activation: Accumulator row 1 oddments
"cbz x20, 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
- "st1w { z24.s }, p0, [x26]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
"subs x20, x20, #0x1\n"
- "st1w { z25.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 20f\n"
- "st1w { z26.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"20:" // Store to output array: Skip activation: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 22f\n"
"21:" // Store to output array: Skip activation: Accumulator row 2 loop
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
- "add x26, x26, x24\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z8.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
+ "st1w { z9.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z10.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z11.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 21b\n"
"22:" // Store to output array: Skip activation: Accumulator row 2 oddments
"cbz x20, 23f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- "st1w { z12.s }, p0, [x26]\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 23f\n"
"subs x20, x20, #0x1\n"
- "st1w { z13.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 23f\n"
- "st1w { z14.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"23:" // Store to output array: Skip activation: Accumulator row 2 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 25f\n"
"24:" // Store to output array: Skip activation: Accumulator row 3 loop
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ "add x12, x12, #0x4\n"
"st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
+ "cmp x12, x21, LSL #2\n"
"st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
"st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
"st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 24b\n"
@@ -429,63 +429,63 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
"b 40f\n"
"27:" // Store to output array: Skip activation: End
"cntw x23\n"
- "cmp x25, x23\n"
"ld1rw { z21.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
+ "cmp x25, x23\n"
+ "ld1rw { z20.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "ld1rw { z20.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 29f\n"
"28:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
- ".inst 0xc1b4cabc // fclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1w { z28.s }, p0, [x26]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1w { z29.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
- "st1w { z30.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z31.s }, p0, [x26]\n"
+ "st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 28b\n"
"29:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 30f\n"
- ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1b4cabc // fclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1w { z28.s }, p0, [x26]\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 30f\n"
"subs x20, x20, #0x1\n"
- "st1w { z29.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 30f\n"
- "st1w { z30.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"30:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 40f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 32f\n"
"31:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1b4caa4 // fclamp { z4.s-z7.s }, z21.s, z20.s\n"
- "st1w { z4.s }, p0, [x26]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1w { z5.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
- "st1w { z7.s }, p0, [x26]\n"
+ "st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 31b\n"
"32:" // Store to output array: Accumulator row 1 oddments
@@ -506,100 +506,100 @@ void sme2_interleaved_nomerge_fp32_mopa_4VLx1VL(const float *const A, const floa
"subs x25, x25, x22\n"
"beq 40f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 35f\n"
"34:" // Store to output array: Accumulator row 2 loop
".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ "add x12, x12, #0x4\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
"st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
"st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
"st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 34b\n"
"35:" // Store to output array: Accumulator row 2 oddments
"cbz x20, 36f\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ ".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
- "st1w { z16.s }, p0, [x26]\n"
+ ".inst 0xc1b4cabc // fclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ "st1w { z28.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 36f\n"
"subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
+ "st1w { z29.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 36f\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z30.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"36:" // Store to output array: Accumulator row 2 oddments: End
"subs x25, x25, x22\n"
"beq 40f\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 38f\n"
"37:" // Store to output array: Accumulator row 3 loop
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ "add x12, x12, #0x4\n"
".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "cmp x12, x21, LSL #2\n"
"st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "add x12, x12, #0x4\n"
"st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "cmp x12, x21, LSL #2\n"
"st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 37b\n"
"38:" // Store to output array: Accumulator row 3 oddments
"cbz x20, 39f\n"
- ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
- "st1w { z16.s }, p0, [x26]\n"
+ ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 39f\n"
"subs x20, x20, #0x1\n"
- "st1w { z17.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 39f\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z14.s }, p0, [x26]\n"
"39:" // Store to output array: Accumulator row 3 oddments: End
"40:" // Store to output array: End
"tbz x16, #0, 42f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"41:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 41b\n"
"42:" // End block
"incw x10\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp
index 7b3cc77867..b70bef3bbe 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
class cls_sme2_interleaved_nomerge_s8q_mopa_1VLx4VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int8_t result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
@@ -60,7 +61,7 @@ public:
static constexpr bool supports_accumulate()
{
- return false;
+ return true;
}
static constexpr bool supports_bias()
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8q_mopa_1VLx4VL;
- StdTransformsSME<operand_type, result_type, 1, 4, 4, true> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 1, 4, 4, true> transforms = {};
cls_sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
index aba677b158..56d1a13a72 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
C(C), ldcb(ldc * sizeof(int8_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+ min(0), max(0),
bias(bias), n_0(n_0),
accumulator_buffer(accumulator_buffer),
@@ -74,13 +74,14 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
const long kstride_bytes;
int8_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
- int32_t min = std::numeric_limits<int8_t>::min();
- int32_t max = std::numeric_limits<int8_t>::max();
+ const long M, N, K;
+ int32_t min;
+ int32_t max;
const int32_t *const bias;
const int n_0;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -89,131 +90,131 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x14, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
+ "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x14, #0, 2f\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13]\n"
- ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c5a8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
- ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa042c5a8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
- ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa043c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa041c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x13, x13, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w10, [%x[args], %[offsetof_M]]\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
+ "mov x10, #0x0\n"
"mov x9, #0x0\n"
- "mov x28, #0x0\n"
- "ldr w27, [%x[args], %[offsetof_N]]\n"
- "ldr x26, [%x[args], %[offsetof_A]]\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x25, x26\n"
- ".inst 0x25bb6790 // whilelt pn8.s, x28, x27, VLx4\n"
- "tbnz x14, #0, 4f\n"
+ "mov x26, x27\n"
+ ".inst 0x25bc6530 // whilelt pn8.s, x9, x28, VLx4\n"
+ "tbnz x15, #0, 4f\n"
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- ".inst 0xa11cc289 // ldnt1w { z1.s, z5.s, z9.s, z13.s }, p8/Z, [x20, x28, LSL #2]\n"
- ".inst 0xc0902420 // addha za0.s, p1/M, p1/M, z1.s\n"
- ".inst 0xc09024a1 // addha za1.s, p1/M, p1/M, z5.s\n"
- ".inst 0xc0902522 // addha za2.s, p1/M, p1/M, z9.s\n"
- ".inst 0xc09025a3 // addha za3.s, p1/M, p1/M, z13.s\n"
+ ".inst 0xa009c290 // ld1w { z16.s-z19.s }, p8/Z, [x20, x9, LSL #2]\n"
+ ".inst 0xc0902600 // addha za0.s, p1/M, p1/M, z16.s\n"
+ ".inst 0xc0902621 // addha za1.s, p1/M, p1/M, z17.s\n"
+ ".inst 0xc0902642 // addha za2.s, p1/M, p1/M, z18.s\n"
+ ".inst 0xc0902663 // addha za3.s, p1/M, p1/M, z19.s\n"
"4:" // Prepare accumulators: Test for last block
- "mov x20, x28\n"
- "mov x21, x9\n"
+ "mov x20, x9\n"
+ "mov x21, x10\n"
"incw x20, ALL, MUL #4\n"
"incw x21\n"
- "cmp x20, x27\n"
- "csel x21, x9, x21, LT\n"
- "mov x20, x14\n"
- "bfm x14, XZR, #0x0, #0x0 // bfc x14, #0x0, #0x1\n"
- "cmp x21, x10\n"
- "csel x14, x20, x14, LT\n"
+ "cmp x20, x28\n"
+ "mov x20, x15\n"
+ "csel x21, x10, x21, LT\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x21, x11\n"
+ "csel x15, x20, x15, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x28, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1b { z20.b }, p1/Z, [x25]\n"
- ".inst 0xa04086e5 // ldnt1b { z4.b-z7.b }, pn9.b/Z, [x23]\n"
- "ld1b { z11.b }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0xa04186f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1b { z2.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1b { z14.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386f1 // ldnt1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x9, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z5.b }, p1/Z, [x26]\n"
+ ".inst 0xa14086e0 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23]\n"
+ "ld1b { z31.b }, p1/Z, [x26, #1, MUL VL]\n"
+ ".inst 0xa14186f2 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1b { z1.b }, p1/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa14286f0 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1b { z6.b }, p1/Z, [x26, #3, MUL VL]\n"
+ "addvl x26, x26, #4\n"
+ ".inst 0xa14386e3 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa0842680 // smopa za0.s, p1/M, p1/M, z20.b, z4.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0852681 // smopa za1.s, p1/M, p1/M, z20.b, z5.b\n"
- ".inst 0xa0862682 // smopa za2.s, p1/M, p1/M, z20.b, z6.b\n"
- ".inst 0xa0872683 // smopa za3.s, p1/M, p1/M, z20.b, z7.b\n"
- "ld1b { z20.b }, p1/Z, [x25]\n"
- ".inst 0xa0982560 // smopa za0.s, p1/M, p1/M, z11.b, z24.b\n"
- ".inst 0xa04086e5 // ldnt1b { z4.b-z7.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa0992561 // smopa za1.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa09a2562 // smopa za2.s, p1/M, p1/M, z11.b, z26.b\n"
- ".inst 0xa09b2563 // smopa za3.s, p1/M, p1/M, z11.b, z27.b\n"
- "ld1b { z11.b }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0xa09c2440 // smopa za0.s, p1/M, p1/M, z2.b, z28.b\n"
- ".inst 0xa04186f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa09d2441 // smopa za1.s, p1/M, p1/M, z2.b, z29.b\n"
- ".inst 0xa09e2442 // smopa za2.s, p1/M, p1/M, z2.b, z30.b\n"
- ".inst 0xa09f2443 // smopa za3.s, p1/M, p1/M, z2.b, z31.b\n"
- "ld1b { z2.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- ".inst 0xa09025c0 // smopa za0.s, p1/M, p1/M, z14.b, z16.b\n"
- ".inst 0xa09125c1 // smopa za1.s, p1/M, p1/M, z14.b, z17.b\n"
- ".inst 0xa09225c2 // smopa za2.s, p1/M, p1/M, z14.b, z18.b\n"
- ".inst 0xa09325c3 // smopa za3.s, p1/M, p1/M, z14.b, z19.b\n"
- "ld1b { z14.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386f1 // ldnt1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa08024a0 // smopa za0.s, p1/M, p1/M, z5.b, z0.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa08424a1 // smopa za1.s, p1/M, p1/M, z5.b, z4.b\n"
+ ".inst 0xa08824a2 // smopa za2.s, p1/M, p1/M, z5.b, z8.b\n"
+ ".inst 0xa08c24a3 // smopa za3.s, p1/M, p1/M, z5.b, z12.b\n"
+ "ld1b { z5.b }, p1/Z, [x26]\n"
+ ".inst 0xa09227e0 // smopa za0.s, p1/M, p1/M, z31.b, z18.b\n"
+ ".inst 0xa14086e0 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa09627e1 // smopa za1.s, p1/M, p1/M, z31.b, z22.b\n"
+ ".inst 0xa09a27e2 // smopa za2.s, p1/M, p1/M, z31.b, z26.b\n"
+ ".inst 0xa09e27e3 // smopa za3.s, p1/M, p1/M, z31.b, z30.b\n"
+ "ld1b { z31.b }, p1/Z, [x26, #1, MUL VL]\n"
+ ".inst 0xa0902420 // smopa za0.s, p1/M, p1/M, z1.b, z16.b\n"
+ ".inst 0xa14186f2 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0942421 // smopa za1.s, p1/M, p1/M, z1.b, z20.b\n"
+ ".inst 0xa0982422 // smopa za2.s, p1/M, p1/M, z1.b, z24.b\n"
+ ".inst 0xa09c2423 // smopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+ "ld1b { z1.b }, p1/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa14286f0 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0xa08324c0 // smopa za0.s, p1/M, p1/M, z6.b, z3.b\n"
+ ".inst 0xa08724c1 // smopa za1.s, p1/M, p1/M, z6.b, z7.b\n"
+ ".inst 0xa08b24c2 // smopa za2.s, p1/M, p1/M, z6.b, z11.b\n"
+ ".inst 0xa08f24c3 // smopa za3.s, p1/M, p1/M, z6.b, z15.b\n"
+ "ld1b { z6.b }, p1/Z, [x26, #3, MUL VL]\n"
+ "addvl x26, x26, #4\n"
+ ".inst 0xa14386e3 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa0842680 // smopa za0.s, p1/M, p1/M, z20.b, z4.b\n"
- ".inst 0xa0852681 // smopa za1.s, p1/M, p1/M, z20.b, z5.b\n"
- ".inst 0xa0862682 // smopa za2.s, p1/M, p1/M, z20.b, z6.b\n"
- ".inst 0xa0872683 // smopa za3.s, p1/M, p1/M, z20.b, z7.b\n"
- ".inst 0xa0982560 // smopa za0.s, p1/M, p1/M, z11.b, z24.b\n"
- ".inst 0xa0992561 // smopa za1.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa09a2562 // smopa za2.s, p1/M, p1/M, z11.b, z26.b\n"
- ".inst 0xa09b2563 // smopa za3.s, p1/M, p1/M, z11.b, z27.b\n"
- ".inst 0xa09c2440 // smopa za0.s, p1/M, p1/M, z2.b, z28.b\n"
- ".inst 0xa09d2441 // smopa za1.s, p1/M, p1/M, z2.b, z29.b\n"
- ".inst 0xa09e2442 // smopa za2.s, p1/M, p1/M, z2.b, z30.b\n"
- ".inst 0xa09f2443 // smopa za3.s, p1/M, p1/M, z2.b, z31.b\n"
- ".inst 0xa09025c0 // smopa za0.s, p1/M, p1/M, z14.b, z16.b\n"
- ".inst 0xa09125c1 // smopa za1.s, p1/M, p1/M, z14.b, z17.b\n"
- ".inst 0xa09225c2 // smopa za2.s, p1/M, p1/M, z14.b, z18.b\n"
- ".inst 0xa09325c3 // smopa za3.s, p1/M, p1/M, z14.b, z19.b\n"
+ ".inst 0xa08024a0 // smopa za0.s, p1/M, p1/M, z5.b, z0.b\n"
+ ".inst 0xa08424a1 // smopa za1.s, p1/M, p1/M, z5.b, z4.b\n"
+ ".inst 0xa08824a2 // smopa za2.s, p1/M, p1/M, z5.b, z8.b\n"
+ ".inst 0xa08c24a3 // smopa za3.s, p1/M, p1/M, z5.b, z12.b\n"
+ ".inst 0xa09227e0 // smopa za0.s, p1/M, p1/M, z31.b, z18.b\n"
+ ".inst 0xa09627e1 // smopa za1.s, p1/M, p1/M, z31.b, z22.b\n"
+ ".inst 0xa09a27e2 // smopa za2.s, p1/M, p1/M, z31.b, z26.b\n"
+ ".inst 0xa09e27e3 // smopa za3.s, p1/M, p1/M, z31.b, z30.b\n"
+ ".inst 0xa0902420 // smopa za0.s, p1/M, p1/M, z1.b, z16.b\n"
+ ".inst 0xa0942421 // smopa za1.s, p1/M, p1/M, z1.b, z20.b\n"
+ ".inst 0xa0982422 // smopa za2.s, p1/M, p1/M, z1.b, z24.b\n"
+ ".inst 0xa09c2423 // smopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+ ".inst 0xa08324c0 // smopa za0.s, p1/M, p1/M, z6.b, z3.b\n"
+ ".inst 0xa08724c1 // smopa za1.s, p1/M, p1/M, z6.b, z7.b\n"
+ ".inst 0xa08b24c2 // smopa za2.s, p1/M, p1/M, z6.b, z11.b\n"
+ ".inst 0xa08f24c3 // smopa za3.s, p1/M, p1/M, z6.b, z15.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1b { z16.b }, p1/Z, [x25]\n"
- "subs x21, x21, #0x1\n"
- "addvl x25, x25, #1\n"
+ "ld1b { z16.b }, p1/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #1\n"
".inst 0xa04086e4 // ld1b { z4.b-z7.b }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #4\n"
".inst 0xa0842600 // smopa za0.s, p1/M, p1/M, z16.b, z4.b\n"
@@ -222,182 +223,182 @@ void sme2_interleaved_nomerge_s8q_mopa_1VLx4VL(const int8_t *const A, const int8
".inst 0xa0872603 // smopa za3.s, p1/M, p1/M, z16.b, z7.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- "ld1w { z15.s }, p1/Z, [x25]\n"
- "addvl x25, x25, #1\n"
+ "ld1w { z15.s }, p1/Z, [x26]\n"
+ "addvl x26, x26, #1\n"
".inst 0xc09125e0 // addva za0.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e1 // addva za1.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e2 // addva za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e3 // addva za3.s, p1/M, p1/M, z15.s\n"
- "tbz x14, #1, 14f\n"
- "tbz x14, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5a0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x13]\n"
- ".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
- ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c5a0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa040c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa042c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa060c5bc // st1w { z28.s-z31.s }, pn9.b, [x13]\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa061c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c578 // st1w { z24.s-z27.s }, pn9.b, [x11]\n"
+ ".inst 0xa063c5b8 // st1w { z24.s-z27.s }, pn9.b, [x13, #0xc, MUL VL]\n"
"addvl x13, x13, #16\n"
- ".inst 0xa061c564 // st1w { z4.s-z7.s }, pn9.b, [x11, #0x4, MUL VL]\n"
- ".inst 0xa062c574 // st1w { z20.s-z23.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c560 // st1w { z0.s-z3.s }, pn9.b, [x11, #0xc, MUL VL]\n"
- "addvl x11, x11, #16\n"
"blt 11b\n"
"b 21f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa060c564 // st1w { z4.s-z7.s }, pn9.b, [x11]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa061c574 // st1w { z20.s-z23.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c56c // st1w { z12.s-z15.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c568 // st1w { z8.s-z11.s }, pn9.b, [x11, #0xc, MUL VL]\n"
- "addvl x11, x11, #16\n"
+ ".inst 0xa062c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 21f\n"
"14:" // Store to output array
- "ldr x24, [%x[args], %[offsetof_C]]\n"
- "add x24, x24, x28\n" // C += n
- "sub x23, x10, x9\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "sub x24, x11, x10\n"
"ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
- "madd x24, x9, x22, x24\n" // C += m * ldc
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
"ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "add x25, x25, x9\n" // C += n
"ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z12.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z13.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
+ "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x14, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x28\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c284 // ld1w { z4.s-z7.s }, p8/Z, [x20]\n"
+ "tbz x15, #2, 15f\n"
+ "ldr w22, [%x[args], %[offsetof_n_0]]\n"
+ "ldr x21, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
"ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c28c // ld1w { z12.s-z15.s }, p8/Z, [x20]\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x22, LSL #2\n"
+ "add x20, x20, x22, LSL #2\n"
+ ".inst 0xa040c2a4 // ld1w { z4.s-z7.s }, p8/Z, [x21]\n"
+ ".inst 0xa040c280 // ld1w { z0.s-z3.s }, p8/Z, [x20]\n"
"15:" // Store to output array: Load per-channel parameters: End
"cntw x20\n"
- "whilelt p0.b, x28, x27\n"
- "cmp x23, x20\n"
- "csel x20, x23, x20, LT\n"
- "lsr x21, x20, #0x1\n"
+ "whilelt p0.b, x9, x28\n"
+ "cmp x24, x20\n"
"mov x12, #0x0\n"
+ "csel x20, x24, x20, LT\n"
+ "lsr x21, x20, #0x1\n"
"and x20, x20, #0x1\n"
"cbz x21, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc086001a // mova { z26.s-z27.s }, za0h.s[x12, 0:1]\n"
- ".inst 0xc086005c // mova { z28.s-z29.s }, za1h.s[x12, 0:1]\n"
- ".inst 0xc1a4a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z4.s\n"
- ".inst 0xc0860096 // mova { z22.s-z23.s }, za2h.s[x12, 0:1]\n"
- ".inst 0xc08600d0 // mova { z16.s-z17.s }, za3h.s[x12, 0:1]\n"
- ".inst 0xc1a5a41c // sqdmulh { z28.s-z29.s }, { z28.s-z29.s }, z5.s\n"
- ".inst 0xc1a6a416 // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z6.s\n"
+ ".inst 0xc0860010 // mova { z16.s-z17.s }, za0h.s[x12, 0:1]\n"
+ ".inst 0xc086005e // mova { z30.s-z31.s }, za1h.s[x12, 0:1]\n"
+ ".inst 0xc086009a // mova { z26.s-z27.s }, za2h.s[x12, 0:1]\n"
+ ".inst 0xc08600cc // mova { z12.s-z13.s }, za3h.s[x12, 0:1]\n"
+ ".inst 0xc1a4a410 // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z4.s\n"
+ ".inst 0xc1a5a41e // sqdmulh { z30.s-z31.s }, { z30.s-z31.s }, z5.s\n"
"add x12, x12, #0x2\n"
+ ".inst 0xc1a6a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z6.s\n"
"cmp x12, x21, LSL #1\n"
- ".inst 0xc1a7a410 // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z7.s\n"
- ".inst 0xc1aca23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z12.s\n"
- ".inst 0xc1ada23c // srshl { z28.s-z29.s }, { z28.s-z29.s }, z13.s\n"
- ".inst 0xc1aea236 // srshl { z22.s-z23.s }, { z22.s-z23.s }, z14.s\n"
- ".inst 0xc1afa230 // srshl { z16.s-z17.s }, { z16.s-z17.s }, z15.s\n"
- ".inst 0xc1a0a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z0.s\n"
- ".inst 0xc1a0a31c // add { z28.s-z29.s }, { z28.s-z29.s }, z0.s\n"
- ".inst 0xc1a0a316 // add { z22.s-z23.s }, { z22.s-z23.s }, z0.s\n"
- ".inst 0xc1a0a310 // add { z16.s-z17.s }, { z16.s-z17.s }, z0.s\n"
- ".inst 0xc1b4c6ba // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
- ".inst 0xc1b4c6bc // sclamp { z28.s-z29.s }, z21.s, z20.s\n"
- "uzp1 z19.b, z26.b, z28.b\n"
- ".inst 0xc1b4c6b6 // sclamp { z22.s-z23.s }, z21.s, z20.s\n"
+ ".inst 0xc1a7a40c // sqdmulh { z12.s-z13.s }, { z12.s-z13.s }, z7.s\n"
+ ".inst 0xc1a0a230 // srshl { z16.s-z17.s }, { z16.s-z17.s }, z0.s\n"
+ ".inst 0xc1a1a23e // srshl { z30.s-z31.s }, { z30.s-z31.s }, z1.s\n"
+ ".inst 0xc1a2a23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z2.s\n"
+ ".inst 0xc1a3a22c // srshl { z12.s-z13.s }, { z12.s-z13.s }, z3.s\n"
+ ".inst 0xc1a8a310 // add { z16.s-z17.s }, { z16.s-z17.s }, z8.s\n"
+ ".inst 0xc1a8a31e // add { z30.s-z31.s }, { z30.s-z31.s }, z8.s\n"
+ ".inst 0xc1a8a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z8.s\n"
+ ".inst 0xc1a8a30c // add { z12.s-z13.s }, { z12.s-z13.s }, z8.s\n"
".inst 0xc1b4c6b0 // sclamp { z16.s-z17.s }, z21.s, z20.s\n"
- "uzp1 z16.b, z22.b, z16.b\n"
- "uzp1 z18.b, z27.b, z29.b\n"
- "uzp1 z17.b, z23.b, z17.b\n"
- "uzp1 z16.b, z19.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
- "uzp1 z16.b, z18.b, z17.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
+ ".inst 0xc1b4c6be // sclamp { z30.s-z31.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4c6ba // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4c6ac // sclamp { z12.s-z13.s }, z21.s, z20.s\n"
+ "uzp1 z19.b, z16.b, z30.b\n"
+ "uzp1 z18.b, z17.b, z31.b\n"
+ "uzp1 z17.b, z26.b, z12.b\n"
+ "uzp1 z16.b, z27.b, z13.b\n"
+ "uzp1 z17.b, z19.b, z17.b\n"
+ "uzp1 z16.b, z18.b, z16.b\n"
+ "st1b { z17.b }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z16.b }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 18f\n"
".inst 0xc086000a // mova { z10.s-z11.s }, za0h.s[x12, 0:1]\n"
- ".inst 0xc0860058 // mova { z24.s-z25.s }, za1h.s[x12, 0:1]\n"
+ ".inst 0xc086005a // mova { z26.s-z27.s }, za1h.s[x12, 0:1]\n"
+ ".inst 0xc086008e // mova { z14.s-z15.s }, za2h.s[x12, 0:1]\n"
+ ".inst 0xc08600d6 // mova { z22.s-z23.s }, za3h.s[x12, 0:1]\n"
".inst 0xc1a4a40a // sqdmulh { z10.s-z11.s }, { z10.s-z11.s }, z4.s\n"
- ".inst 0xc086009a // mova { z26.s-z27.s }, za2h.s[x12, 0:1]\n"
- ".inst 0xc08600de // mova { z30.s-z31.s }, za3h.s[x12, 0:1]\n"
- ".inst 0xc1a5a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z5.s\n"
- ".inst 0xc1a6a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z6.s\n"
- ".inst 0xc1a7a41e // sqdmulh { z30.s-z31.s }, { z30.s-z31.s }, z7.s\n"
- ".inst 0xc1aca22a // srshl { z10.s-z11.s }, { z10.s-z11.s }, z12.s\n"
- ".inst 0xc1ada238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z13.s\n"
- ".inst 0xc1aea23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z14.s\n"
- ".inst 0xc1afa23e // srshl { z30.s-z31.s }, { z30.s-z31.s }, z15.s\n"
- ".inst 0xc1a0a30a // add { z10.s-z11.s }, { z10.s-z11.s }, z0.s\n"
- ".inst 0xc1a0a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
- ".inst 0xc1a0a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z0.s\n"
- ".inst 0xc1a0a31e // add { z30.s-z31.s }, { z30.s-z31.s }, z0.s\n"
+ ".inst 0xc1a5a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z5.s\n"
+ ".inst 0xc1a6a40e // sqdmulh { z14.s-z15.s }, { z14.s-z15.s }, z6.s\n"
+ ".inst 0xc1a7a416 // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z7.s\n"
+ ".inst 0xc1a0a22a // srshl { z10.s-z11.s }, { z10.s-z11.s }, z0.s\n"
+ ".inst 0xc1a1a23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z1.s\n"
+ ".inst 0xc1a2a22e // srshl { z14.s-z15.s }, { z14.s-z15.s }, z2.s\n"
+ ".inst 0xc1a3a236 // srshl { z22.s-z23.s }, { z22.s-z23.s }, z3.s\n"
+ ".inst 0xc1a8a30a // add { z10.s-z11.s }, { z10.s-z11.s }, z8.s\n"
+ ".inst 0xc1a8a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z8.s\n"
+ ".inst 0xc1a8a30e // add { z14.s-z15.s }, { z14.s-z15.s }, z8.s\n"
+ ".inst 0xc1a8a316 // add { z22.s-z23.s }, { z22.s-z23.s }, z8.s\n"
".inst 0xc1b4c6aa // sclamp { z10.s-z11.s }, z21.s, z20.s\n"
- ".inst 0xc1b4c6b8 // sclamp { z24.s-z25.s }, z21.s, z20.s\n"
- "uzp1 z17.b, z10.b, z24.b\n"
".inst 0xc1b4c6ba // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
- ".inst 0xc1b4c6be // sclamp { z30.s-z31.s }, z21.s, z20.s\n"
- "uzp1 z16.b, z26.b, z30.b\n"
+ ".inst 0xc1b4c6ae // sclamp { z14.s-z15.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4c6b6 // sclamp { z22.s-z23.s }, z21.s, z20.s\n"
+ "uzp1 z17.b, z10.b, z26.b\n"
+ "uzp1 z16.b, z14.b, z22.b\n"
"uzp1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
+ "st1b { z16.b }, p0, [x25]\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"19:" // Store to output array: End
- "tbz x14, #0, 21f\n"
+ "tbz x15, #0, 21f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"20:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13]\n"
- ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
- ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c5a0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa040c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x13, x13, #16\n"
"blt 20b\n"
"21:" // End block
- "incw x28, ALL, MUL #4\n"
- "cmp x28, x27\n"
+ "incw x9, ALL, MUL #4\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x9\n"
- "cmp x9, x10\n"
- "mov x28, #0x0\n"
- "mov x26, x25\n"
+ "incw x10\n"
+ "mov x9, #0x0\n"
+ "cmp x10, x11\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp
index 79990f72e5..68b43328a2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
class cls_sme2_interleaved_nomerge_s8q_mopa_2VLx2VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int8_t result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
@@ -60,7 +61,7 @@ public:
static constexpr bool supports_accumulate()
{
- return false;
+ return true;
}
static constexpr bool supports_bias()
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8q_mopa_2VLx2VL;
- StdTransformsSME<operand_type, result_type, 2, 2, 4, true> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 2, 2, 4, true> transforms = {};
cls_sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
index 7033de5fe3..8831a224ad 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
C(C), ldcb(ldc * sizeof(int8_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+ min(0), max(0),
bias(bias), n_0(n_0),
accumulator_buffer(accumulator_buffer),
@@ -74,13 +74,14 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
const long kstride_bytes;
int8_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
- int32_t min = std::numeric_limits<int8_t>::min();
- int32_t max = std::numeric_limits<int8_t>::max();
+ const long M, N, K;
+ int32_t min;
+ int32_t max;
const int32_t *const bias;
const int n_0;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -99,17 +100,17 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -124,108 +125,108 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- ".inst 0xa00a4299 // ldnt1w { z24.s-z25.s }, p8/Z, [x20, x10, LSL #2]\n"
- ".inst 0xc0902700 // addha za0.s, p1/M, p1/M, z24.s\n"
- ".inst 0xc0902721 // addha za1.s, p1/M, p1/M, z25.s\n"
- ".inst 0xc0902702 // addha za2.s, p1/M, p1/M, z24.s\n"
- ".inst 0xc0902723 // addha za3.s, p1/M, p1/M, z25.s\n"
+ ".inst 0xa10a4294 // ld1w { z20.s, z28.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc0902680 // addha za0.s, p1/M, p1/M, z20.s\n"
+ ".inst 0xc0902781 // addha za1.s, p1/M, p1/M, z28.s\n"
+ ".inst 0xc0902682 // addha za2.s, p1/M, p1/M, z20.s\n"
+ ".inst 0xc0902783 // addha za3.s, p1/M, p1/M, z28.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20, ALL, MUL #2\n"
"incw x21, ALL, MUL #2\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1400763 // ld1b { z3.b, z11.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa14006f9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa1410774 // ld1b { z20.b, z28.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04106f7 // ldnt1b { z22.b-z23.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa1420775 // ld1b { z21.b, z29.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206f8 // ldnt1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa1430765 // ld1b { z5.b, z13.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0400778 // ld1b { z24.b-z25.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa14006f7 // ld1b { z23.b, z31.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa1410776 // ld1b { z22.b, z30.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa04106fa // ld1b { z26.b-z27.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0420766 // ld1b { z6.b-z7.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206e0 // ld1b { z0.b, z8.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa043077c // ld1b { z28.b-z29.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa14306ef // ldnt1b { z7.b, z15.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa04306ec // ld1b { z12.b-z13.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa0912460 // smopa za0.s, p1/M, p1/M, z3.b, z17.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0992461 // smopa za1.s, p1/M, p1/M, z3.b, z25.b\n"
- ".inst 0xa0912562 // smopa za2.s, p1/M, p1/M, z11.b, z17.b\n"
- ".inst 0xa0992563 // smopa za3.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa1400763 // ld1b { z3.b, z11.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa0962680 // smopa za0.s, p1/M, p1/M, z20.b, z22.b\n"
- ".inst 0xa14006f9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa0972681 // smopa za1.s, p1/M, p1/M, z20.b, z23.b\n"
- ".inst 0xa0962782 // smopa za2.s, p1/M, p1/M, z28.b, z22.b\n"
- ".inst 0xa0972783 // smopa za3.s, p1/M, p1/M, z28.b, z23.b\n"
- ".inst 0xa1410774 // ld1b { z20.b, z28.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa09026a0 // smopa za0.s, p1/M, p1/M, z21.b, z16.b\n"
- ".inst 0xa04106f7 // ldnt1b { z22.b-z23.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa09826a1 // smopa za1.s, p1/M, p1/M, z21.b, z24.b\n"
- ".inst 0xa09027a2 // smopa za2.s, p1/M, p1/M, z29.b, z16.b\n"
- ".inst 0xa09827a3 // smopa za3.s, p1/M, p1/M, z29.b, z24.b\n"
- ".inst 0xa1420775 // ld1b { z21.b, z29.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206f8 // ldnt1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa08724a0 // smopa za0.s, p1/M, p1/M, z5.b, z7.b\n"
- ".inst 0xa08f24a1 // smopa za1.s, p1/M, p1/M, z5.b, z15.b\n"
- ".inst 0xa08725a2 // smopa za2.s, p1/M, p1/M, z13.b, z7.b\n"
- ".inst 0xa08f25a3 // smopa za3.s, p1/M, p1/M, z13.b, z15.b\n"
- ".inst 0xa1430765 // ld1b { z5.b, z13.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ ".inst 0xa0972700 // smopa za0.s, p1/M, p1/M, z24.b, z23.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa09f2701 // smopa za1.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa0972722 // smopa za2.s, p1/M, p1/M, z25.b, z23.b\n"
+ ".inst 0xa09f2723 // smopa za3.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa0400778 // ld1b { z24.b-z25.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa09a26c0 // smopa za0.s, p1/M, p1/M, z22.b, z26.b\n"
+ ".inst 0xa14006f7 // ld1b { z23.b, z31.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa09b26c1 // smopa za1.s, p1/M, p1/M, z22.b, z27.b\n"
+ ".inst 0xa09a27c2 // smopa za2.s, p1/M, p1/M, z30.b, z26.b\n"
+ ".inst 0xa09b27c3 // smopa za3.s, p1/M, p1/M, z30.b, z27.b\n"
+ ".inst 0xa1410776 // ld1b { z22.b, z30.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa08024c0 // smopa za0.s, p1/M, p1/M, z6.b, z0.b\n"
+ ".inst 0xa04106fa // ld1b { z26.b-z27.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa08824c1 // smopa za1.s, p1/M, p1/M, z6.b, z8.b\n"
+ ".inst 0xa08024e2 // smopa za2.s, p1/M, p1/M, z7.b, z0.b\n"
+ ".inst 0xa08824e3 // smopa za3.s, p1/M, p1/M, z7.b, z8.b\n"
+ ".inst 0xa0420766 // ld1b { z6.b-z7.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206e0 // ld1b { z0.b, z8.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa08c2780 // smopa za0.s, p1/M, p1/M, z28.b, z12.b\n"
+ ".inst 0xa08d2781 // smopa za1.s, p1/M, p1/M, z28.b, z13.b\n"
+ ".inst 0xa08c27a2 // smopa za2.s, p1/M, p1/M, z29.b, z12.b\n"
+ ".inst 0xa08d27a3 // smopa za3.s, p1/M, p1/M, z29.b, z13.b\n"
+ ".inst 0xa043077c // ld1b { z28.b-z29.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa14306ef // ldnt1b { z7.b, z15.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa04306ec // ld1b { z12.b-z13.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa0912460 // smopa za0.s, p1/M, p1/M, z3.b, z17.b\n"
- ".inst 0xa0992461 // smopa za1.s, p1/M, p1/M, z3.b, z25.b\n"
- ".inst 0xa0912562 // smopa za2.s, p1/M, p1/M, z11.b, z17.b\n"
- ".inst 0xa0992563 // smopa za3.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa0962680 // smopa za0.s, p1/M, p1/M, z20.b, z22.b\n"
- ".inst 0xa0972681 // smopa za1.s, p1/M, p1/M, z20.b, z23.b\n"
- ".inst 0xa0962782 // smopa za2.s, p1/M, p1/M, z28.b, z22.b\n"
- ".inst 0xa0972783 // smopa za3.s, p1/M, p1/M, z28.b, z23.b\n"
- ".inst 0xa09026a0 // smopa za0.s, p1/M, p1/M, z21.b, z16.b\n"
- ".inst 0xa09826a1 // smopa za1.s, p1/M, p1/M, z21.b, z24.b\n"
- ".inst 0xa09027a2 // smopa za2.s, p1/M, p1/M, z29.b, z16.b\n"
- ".inst 0xa09827a3 // smopa za3.s, p1/M, p1/M, z29.b, z24.b\n"
- ".inst 0xa08724a0 // smopa za0.s, p1/M, p1/M, z5.b, z7.b\n"
- ".inst 0xa08f24a1 // smopa za1.s, p1/M, p1/M, z5.b, z15.b\n"
- ".inst 0xa08725a2 // smopa za2.s, p1/M, p1/M, z13.b, z7.b\n"
- ".inst 0xa08f25a3 // smopa za3.s, p1/M, p1/M, z13.b, z15.b\n"
+ ".inst 0xa0972700 // smopa za0.s, p1/M, p1/M, z24.b, z23.b\n"
+ ".inst 0xa09f2701 // smopa za1.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa0972722 // smopa za2.s, p1/M, p1/M, z25.b, z23.b\n"
+ ".inst 0xa09f2723 // smopa za3.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa09a26c0 // smopa za0.s, p1/M, p1/M, z22.b, z26.b\n"
+ ".inst 0xa09b26c1 // smopa za1.s, p1/M, p1/M, z22.b, z27.b\n"
+ ".inst 0xa09a27c2 // smopa za2.s, p1/M, p1/M, z30.b, z26.b\n"
+ ".inst 0xa09b27c3 // smopa za3.s, p1/M, p1/M, z30.b, z27.b\n"
+ ".inst 0xa08024c0 // smopa za0.s, p1/M, p1/M, z6.b, z0.b\n"
+ ".inst 0xa08824c1 // smopa za1.s, p1/M, p1/M, z6.b, z8.b\n"
+ ".inst 0xa08024e2 // smopa za2.s, p1/M, p1/M, z7.b, z0.b\n"
+ ".inst 0xa08824e3 // smopa za3.s, p1/M, p1/M, z7.b, z8.b\n"
+ ".inst 0xa08c2780 // smopa za0.s, p1/M, p1/M, z28.b, z12.b\n"
+ ".inst 0xa08d2781 // smopa za1.s, p1/M, p1/M, z28.b, z13.b\n"
+ ".inst 0xa08c27a2 // smopa za2.s, p1/M, p1/M, z29.b, z12.b\n"
+ ".inst 0xa08d27a3 // smopa za3.s, p1/M, p1/M, z29.b, z13.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1400773 // ld1b { z19.b, z27.b }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa0400762 // ld1b { z2.b-z3.b }, pn9.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #2\n"
".inst 0xa04006f0 // ld1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #2\n"
- ".inst 0xa0902660 // smopa za0.s, p1/M, p1/M, z19.b, z16.b\n"
- ".inst 0xa0912661 // smopa za1.s, p1/M, p1/M, z19.b, z17.b\n"
- ".inst 0xa0902762 // smopa za2.s, p1/M, p1/M, z27.b, z16.b\n"
- ".inst 0xa0912763 // smopa za3.s, p1/M, p1/M, z27.b, z17.b\n"
+ ".inst 0xa0902440 // smopa za0.s, p1/M, p1/M, z2.b, z16.b\n"
+ ".inst 0xa0912441 // smopa za1.s, p1/M, p1/M, z2.b, z17.b\n"
+ ".inst 0xa0902462 // smopa za2.s, p1/M, p1/M, z3.b, z16.b\n"
+ ".inst 0xa0912463 // smopa za3.s, p1/M, p1/M, z3.b, z17.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- ".inst 0xa040476e // ld1w { z14.s-z15.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa1404767 // ld1w { z7.s, z15.s }, pn9.b/Z, [x27]\n"
"addvl x27, x27, #2\n"
- ".inst 0xc09125c0 // addva za0.s, p1/M, p1/M, z14.s\n"
- ".inst 0xc09125c1 // addva za1.s, p1/M, p1/M, z14.s\n"
+ ".inst 0xc09124e0 // addva za0.s, p1/M, p1/M, z7.s\n"
+ ".inst 0xc09124e1 // addva za1.s, p1/M, p1/M, z7.s\n"
".inst 0xc09125e2 // addva za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e3 // addva za3.s, p1/M, p1/M, z15.s\n"
"tbz x16, #1, 14f\n"
@@ -233,25 +234,25 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa060c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14]\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa061c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 24f\n"
@@ -259,71 +260,71 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa060c5cc // st1w { z12.s-z15.s }, pn9.b, [x14]\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
- ".inst 0xa061c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5dc // st1w { z28.s-z31.s }, pn9.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
"sub x25, x13, x11\n"
- "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "ld1rw { z9.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "ld1rw { z10.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "add x26, x26, x10\n" // C += n
+ "ld1rw { z11.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"madd x26, x11, x24, x26\n" // C += m * ldc
- "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+ "ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
"tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404280 // ld1w { z0.s-z1.s }, p8/Z, [x20]\n"
+ "ldr w22, [%x[args], %[offsetof_n_0]]\n"
+ "ldr x21, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
"ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404282 // ld1w { z2.s-z3.s }, p8/Z, [x20]\n"
+ "add x22, x22, x10\n"
+ "add x21, x21, x22, LSL #2\n"
+ "add x20, x20, x22, LSL #2\n"
+ ".inst 0xa04042a8 // ld1w { z8.s-z9.s }, p8/Z, [x21]\n"
+ ".inst 0xa040428a // ld1w { z10.s-z11.s }, p8/Z, [x20]\n"
"15:" // Store to output array: Load per-channel parameters: End
"cntw x23\n"
"whilelt p0.h, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xc1a0ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1a1ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z1.s\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
"add x12, x12, #0x4\n"
+ ".inst 0xc1a9ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a2aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc1a3aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z3.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1aeab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z14.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf28 // sclamp { z8.s-z11.s }, z25.s, z24.s\n"
- "uzp1 z16.h, z4.h, z8.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ ".inst 0xc1aaaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
+ ".inst 0xc1abaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z11.s\n"
+ ".inst 0xc1afab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z15.s\n"
+ ".inst 0xc1afab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z15.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cea0 // sclamp { z0.s-z3.s }, z21.s, z20.s\n"
+ "uzp1 z19.h, z28.h, z0.h\n"
+ "uzp1 z18.h, z29.h, z1.h\n"
+ "uzp1 z17.h, z30.h, z2.h\n"
+ "uzp1 z16.h, z31.h, z3.h\n"
+ "st1b { z19.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z5.h, z9.h\n"
- "uzp1 z17.h, z6.h, z10.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ "st1b { z18.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z7.h, z11.h\n"
"st1b { z17.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1b { z16.h }, p0, [x26]\n"
@@ -331,60 +332,59 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 18f\n"
- ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1a0ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z0.s\n"
- ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
+ ".inst 0xc1a8ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a2aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z2.s\n"
- ".inst 0xc1a3aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- ".inst 0xc1aeab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z14.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1b8cf28 // sclamp { z8.s-z11.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- "uzp1 z16.h, z8.h, z4.h\n"
+ ".inst 0xc1a9ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z9.s\n"
+ ".inst 0xc1aaaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+ ".inst 0xc1abaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+ ".inst 0xc1afab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z15.s\n"
+ ".inst 0xc1afab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z15.s\n"
+ ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ "uzp1 z16.h, z4.h, z28.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
"subs x20, x20, #0x1\n"
- "uzp1 z16.h, z9.h, z5.h\n"
+ "uzp1 z16.h, z5.h, z29.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
- "uzp1 z16.h, z10.h, z6.h\n"
+ "uzp1 z16.h, z6.h, z30.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 22f\n"
- "whilelt p0.h, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
- ".inst 0xc1a0ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1a1ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z1.s\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xc1a8ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
"add x12, x12, #0x4\n"
+ ".inst 0xc1a9ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z9.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a2aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc1a3aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z3.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1aeab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z14.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf34 // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
- "uzp1 z16.h, z4.h, z20.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ ".inst 0xc1aaaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z10.s\n"
+ ".inst 0xc1abaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+ ".inst 0xc1afab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z15.s\n"
+ ".inst 0xc1afab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z15.s\n"
+ ".inst 0xc1b4cea0 // sclamp { z0.s-z3.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ "uzp1 z19.h, z0.h, z28.h\n"
+ "uzp1 z18.h, z1.h, z29.h\n"
+ "uzp1 z17.h, z2.h, z30.h\n"
+ "uzp1 z16.h, z3.h, z31.h\n"
+ "st1b { z19.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z5.h, z21.h\n"
- "uzp1 z17.h, z6.h, z22.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ "st1b { z18.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z7.h, z23.h\n"
"st1b { z17.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1b { z16.h }, p0, [x26]\n"
@@ -394,15 +394,15 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
"cbz x20, 21f\n"
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xc1a0ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1a1ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a8ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a2aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc1a3aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1aeab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z14.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ ".inst 0xc1a9ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
+ ".inst 0xc1aaaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+ ".inst 0xc1abaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc1afab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z15.s\n"
+ ".inst 0xc1afab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z15.s\n"
+ ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
"uzp1 z16.h, z4.h, z16.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
@@ -420,25 +420,25 @@ void sme2_interleaved_nomerge_s8q_mopa_2VLx2VL(const int8_t *const A, const int8
"mov x12, #0x0\n"
"cntw x20\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 23b\n"
"24:" // End block
"incw x10, ALL, MUL #2\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp
index ef39cbbb28..51fc52f7b7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
class cls_sme2_interleaved_nomerge_s8q_mopa_4VLx1VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int8_t result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
@@ -60,7 +61,7 @@ public:
static constexpr bool supports_accumulate()
{
- return false;
+ return true;
}
static constexpr bool supports_bias()
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8q_mopa_4VLx1VL;
- StdTransformsSME<operand_type, result_type, 4, 1, 4, true> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 4, 1, 4, true> transforms = {};
cls_sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
index 4601f05501..df9a866b6d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
C(C), ldcb(ldc * sizeof(int8_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+ min(0), max(0),
bias(bias), n_0(n_0),
accumulator_buffer(accumulator_buffer),
@@ -74,13 +74,14 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
const long kstride_bytes;
int8_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
- int32_t min = std::numeric_limits<int8_t>::min();
- int32_t max = std::numeric_limits<int8_t>::max();
+ const long M, N, K;
+ int32_t min;
+ int32_t max;
const int32_t *const bias;
const int n_0;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -99,17 +100,17 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa040c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -124,95 +125,95 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "ldnt1w { z8.s }, p0/Z, [x20, x10, LSL #2]\n"
- ".inst 0xc0902500 // addha za0.s, p1/M, p1/M, z8.s\n"
- ".inst 0xc0902501 // addha za1.s, p1/M, p1/M, z8.s\n"
- ".inst 0xc0902502 // addha za2.s, p1/M, p1/M, z8.s\n"
- ".inst 0xc0902503 // addha za3.s, p1/M, p1/M, z8.s\n"
+ "ld1w { z6.s }, p0/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc09024c0 // addha za0.s, p1/M, p1/M, z6.s\n"
+ ".inst 0xc09024c1 // addha za1.s, p1/M, p1/M, z6.s\n"
+ ".inst 0xc09024c2 // addha za2.s, p1/M, p1/M, z6.s\n"
+ ".inst 0xc09024c3 // addha za3.s, p1/M, p1/M, z6.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20\n"
"incw x21, ALL, MUL #4\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0408364 // ld1b { z4.b-z7.b }, pn8.b/Z, [x27]\n"
- "ldnt1b { z14.b }, p1/Z, [x23]\n"
- ".inst 0xa0418374 // ld1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1b { z31.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa0428378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z13.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa0438368 // ld1b { z8.b-z11.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1408360 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn8.b/Z, [x27]\n"
+ "ld1b { z29.b }, p1/Z, [x23]\n"
+ ".inst 0xa1418361 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "ld1b { z19.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa1428363 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa0438378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1b { z29.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z31.b }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa08e2480 // smopa za0.s, p1/M, p1/M, z4.b, z14.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa08e24a1 // smopa za1.s, p1/M, p1/M, z5.b, z14.b\n"
- ".inst 0xa08e24c2 // smopa za2.s, p1/M, p1/M, z6.b, z14.b\n"
- ".inst 0xa08e24e3 // smopa za3.s, p1/M, p1/M, z7.b, z14.b\n"
- ".inst 0xa0408364 // ld1b { z4.b-z7.b }, pn8.b/Z, [x27]\n"
- ".inst 0xa09f2680 // smopa za0.s, p1/M, p1/M, z20.b, z31.b\n"
- "ldnt1b { z14.b }, p1/Z, [x23]\n"
- ".inst 0xa09f26a1 // smopa za1.s, p1/M, p1/M, z21.b, z31.b\n"
- ".inst 0xa09f26c2 // smopa za2.s, p1/M, p1/M, z22.b, z31.b\n"
- ".inst 0xa09f26e3 // smopa za3.s, p1/M, p1/M, z23.b, z31.b\n"
- ".inst 0xa0418374 // ld1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa08d2700 // smopa za0.s, p1/M, p1/M, z24.b, z13.b\n"
- "ldnt1b { z31.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa08d2721 // smopa za1.s, p1/M, p1/M, z25.b, z13.b\n"
- ".inst 0xa08d2742 // smopa za2.s, p1/M, p1/M, z26.b, z13.b\n"
- ".inst 0xa08d2763 // smopa za3.s, p1/M, p1/M, z27.b, z13.b\n"
- ".inst 0xa0428378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z13.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa09d2500 // smopa za0.s, p1/M, p1/M, z8.b, z29.b\n"
- ".inst 0xa09d2521 // smopa za1.s, p1/M, p1/M, z9.b, z29.b\n"
- ".inst 0xa09d2542 // smopa za2.s, p1/M, p1/M, z10.b, z29.b\n"
- ".inst 0xa09d2563 // smopa za3.s, p1/M, p1/M, z11.b, z29.b\n"
- ".inst 0xa0438368 // ld1b { z8.b-z11.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xa09d2400 // smopa za0.s, p1/M, p1/M, z0.b, z29.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa09d2481 // smopa za1.s, p1/M, p1/M, z4.b, z29.b\n"
+ ".inst 0xa09d2502 // smopa za2.s, p1/M, p1/M, z8.b, z29.b\n"
+ ".inst 0xa09d2583 // smopa za3.s, p1/M, p1/M, z12.b, z29.b\n"
+ ".inst 0xa1408360 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0932420 // smopa za0.s, p1/M, p1/M, z1.b, z19.b\n"
+ "ld1b { z29.b }, p1/Z, [x23]\n"
+ ".inst 0xa09324a1 // smopa za1.s, p1/M, p1/M, z5.b, z19.b\n"
+ ".inst 0xa0932522 // smopa za2.s, p1/M, p1/M, z9.b, z19.b\n"
+ ".inst 0xa09325a3 // smopa za3.s, p1/M, p1/M, z13.b, z19.b\n"
+ ".inst 0xa1418361 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0942460 // smopa za0.s, p1/M, p1/M, z3.b, z20.b\n"
+ "ld1b { z19.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa09424e1 // smopa za1.s, p1/M, p1/M, z7.b, z20.b\n"
+ ".inst 0xa0942562 // smopa za2.s, p1/M, p1/M, z11.b, z20.b\n"
+ ".inst 0xa09425e3 // smopa za3.s, p1/M, p1/M, z15.b, z20.b\n"
+ ".inst 0xa1428363 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa09f2700 // smopa za0.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa09f2721 // smopa za1.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa09f2742 // smopa za2.s, p1/M, p1/M, z26.b, z31.b\n"
+ ".inst 0xa09f2763 // smopa za3.s, p1/M, p1/M, z27.b, z31.b\n"
+ ".inst 0xa0438378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1b { z29.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z31.b }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa08e2480 // smopa za0.s, p1/M, p1/M, z4.b, z14.b\n"
- ".inst 0xa08e24a1 // smopa za1.s, p1/M, p1/M, z5.b, z14.b\n"
- ".inst 0xa08e24c2 // smopa za2.s, p1/M, p1/M, z6.b, z14.b\n"
- ".inst 0xa08e24e3 // smopa za3.s, p1/M, p1/M, z7.b, z14.b\n"
- ".inst 0xa09f2680 // smopa za0.s, p1/M, p1/M, z20.b, z31.b\n"
- ".inst 0xa09f26a1 // smopa za1.s, p1/M, p1/M, z21.b, z31.b\n"
- ".inst 0xa09f26c2 // smopa za2.s, p1/M, p1/M, z22.b, z31.b\n"
- ".inst 0xa09f26e3 // smopa za3.s, p1/M, p1/M, z23.b, z31.b\n"
- ".inst 0xa08d2700 // smopa za0.s, p1/M, p1/M, z24.b, z13.b\n"
- ".inst 0xa08d2721 // smopa za1.s, p1/M, p1/M, z25.b, z13.b\n"
- ".inst 0xa08d2742 // smopa za2.s, p1/M, p1/M, z26.b, z13.b\n"
- ".inst 0xa08d2763 // smopa za3.s, p1/M, p1/M, z27.b, z13.b\n"
- ".inst 0xa09d2500 // smopa za0.s, p1/M, p1/M, z8.b, z29.b\n"
- ".inst 0xa09d2521 // smopa za1.s, p1/M, p1/M, z9.b, z29.b\n"
- ".inst 0xa09d2542 // smopa za2.s, p1/M, p1/M, z10.b, z29.b\n"
- ".inst 0xa09d2563 // smopa za3.s, p1/M, p1/M, z11.b, z29.b\n"
+ ".inst 0xa09d2400 // smopa za0.s, p1/M, p1/M, z0.b, z29.b\n"
+ ".inst 0xa09d2481 // smopa za1.s, p1/M, p1/M, z4.b, z29.b\n"
+ ".inst 0xa09d2502 // smopa za2.s, p1/M, p1/M, z8.b, z29.b\n"
+ ".inst 0xa09d2583 // smopa za3.s, p1/M, p1/M, z12.b, z29.b\n"
+ ".inst 0xa0932420 // smopa za0.s, p1/M, p1/M, z1.b, z19.b\n"
+ ".inst 0xa09324a1 // smopa za1.s, p1/M, p1/M, z5.b, z19.b\n"
+ ".inst 0xa0932522 // smopa za2.s, p1/M, p1/M, z9.b, z19.b\n"
+ ".inst 0xa09325a3 // smopa za3.s, p1/M, p1/M, z13.b, z19.b\n"
+ ".inst 0xa0942460 // smopa za0.s, p1/M, p1/M, z3.b, z20.b\n"
+ ".inst 0xa09424e1 // smopa za1.s, p1/M, p1/M, z7.b, z20.b\n"
+ ".inst 0xa0942562 // smopa za2.s, p1/M, p1/M, z11.b, z20.b\n"
+ ".inst 0xa09425e3 // smopa za3.s, p1/M, p1/M, z15.b, z20.b\n"
+ ".inst 0xa09f2700 // smopa za0.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa09f2721 // smopa za1.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa09f2742 // smopa za2.s, p1/M, p1/M, z26.b, z31.b\n"
+ ".inst 0xa09f2763 // smopa za3.s, p1/M, p1/M, z27.b, z31.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #4\n"
"ld1b { z15.b }, p1/Z, [x23]\n"
"addvl x23, x23, #1\n"
@@ -233,25 +234,25 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
- ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa060c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14]\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa061c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 30f\n"
@@ -259,56 +260,56 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
"sub x25, x13, x11\n"
"ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
"ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+ "add x26, x26, x10\n" // C += n
+ "ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+ "madd x26, x11, x24, x26\n" // C += m * ldc
+ "ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
"tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- "ld1w { z2.s }, p0/Z, [x20]\n"
+ "ldr w22, [%x[args], %[offsetof_n_0]]\n"
+ "ldr x21, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
"ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
+ "add x22, x22, x10\n"
+ "add x21, x21, x22, LSL #2\n"
+ "add x20, x20, x22, LSL #2\n"
+ "ld1w { z2.s }, p0/Z, [x21]\n"
"ld1w { z1.s }, p0/Z, [x20]\n"
"15:" // Store to output array: Load per-channel parameters: End
"cntw x23\n"
"whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
- ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
"st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1b { z17.s }, p0, [x26]\n"
@@ -320,56 +321,55 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 18f\n"
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
- ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
- ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
- "st1b { z16.s }, p0, [x26]\n"
+ ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
+ ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a0ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
+ "st1b { z4.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
"subs x20, x20, #0x1\n"
- "st1b { z17.s }, p0, [x26]\n"
+ "st1b { z5.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
- "st1b { z18.s }, p0, [x26]\n"
+ "st1b { z6.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a0ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
- "st1b { z4.s }, p0, [x26]\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ "st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z5.s }, p0, [x26]\n"
+ "st1b { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z6.s }, p0, [x26]\n"
+ "st1b { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z7.s }, p0, [x26]\n"
+ "st1b { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 1 oddments
"cbz x20, 21f\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
"subs x20, x20, #0x1\n"
+ ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc1a0ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
+ ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
"st1b { z4.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 21f\n"
@@ -382,115 +382,113 @@ void sme2_interleaved_nomerge_s8q_mopa_4VLx1VL(const int8_t *const A, const int8
"21:" // Store to output array: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 23f\n"
"22:" // Store to output array: Accumulator row 2 loop
- ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
- ".inst 0xc1a2ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z2.s\n"
+ ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z1.s\n"
+ ".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a0ab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z0.s\n"
- ".inst 0xc1b4cea8 // sclamp { z8.s-z11.s }, z21.s, z20.s\n"
- "st1b { z8.s }, p0, [x26]\n"
+ ".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+ ".inst 0xc1a0ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1b8cf2c // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
+ "st1b { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z9.s }, p0, [x26]\n"
+ "st1b { z13.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z10.s }, p0, [x26]\n"
+ "st1b { z14.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z11.s }, p0, [x26]\n"
+ "st1b { z15.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 2 oddments
"cbz x20, 24f\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
- ".inst 0xc1a0ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
- ".inst 0xc1b4ceac // sclamp { z12.s-z15.s }, z21.s, z20.s\n"
- "st1b { z12.s }, p0, [x26]\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ "st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 24f\n"
"subs x20, x20, #0x1\n"
- "st1b { z13.s }, p0, [x26]\n"
+ "st1b { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 24f\n"
- "st1b { z14.s }, p0, [x26]\n"
+ "st1b { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"24:" // Store to output array: Accumulator row 2 oddments: End
"subs x25, x25, x22\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 26f\n"
"25:" // Store to output array: Accumulator row 3 loop
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a0ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
- ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1b { z28.s }, p0, [x26]\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ "st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z29.s }, p0, [x26]\n"
+ "st1b { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z30.s }, p0, [x26]\n"
+ "st1b { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z31.s }, p0, [x26]\n"
+ "st1b { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 3 oddments
"cbz x20, 27f\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
+ ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
- ".inst 0xc1a0ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
- ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1b { z28.s }, p0, [x26]\n"
+ ".inst 0xc1a2ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
+ ".inst 0xc1a1aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z1.s\n"
+ ".inst 0xc1a0ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z0.s\n"
+ ".inst 0xc1b8cf34 // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
+ "st1b { z20.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 27f\n"
"subs x20, x20, #0x1\n"
- "st1b { z29.s }, p0, [x26]\n"
+ "st1b { z21.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 27f\n"
- "st1b { z30.s }, p0, [x26]\n"
+ "st1b { z22.s }, p0, [x26]\n"
"27:" // Store to output array: Accumulator row 3 oddments: End
"28:" // Store to output array: End
"tbz x16, #0, 30f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 29b\n"
"30:" // End block
"incw x10\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp
index 7792192856..fe8f5383bd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL(const int8_t *const A, const
class cls_sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL;
- StdTransformsSME<operand_type, result_type, 1, 4, 4> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 1, 4, 4> transforms = {};
cls_sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp
index 4b26a6578c..bf60c61fc0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp
@@ -40,7 +40,8 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL(const int8_t *const A, const
const int8_t *const B,
float *const C, const int ldc,
const int M, const int N, const int K,
- const int32_t *const bias, const float *const late_bias, const Activation act,
+ const int32_t *const bias,
+ const DequantizeFloat &, const float *const late_bias, const Activation act,
bool accumulate,
int32_t *const accumulator_buffer
) : A(A),
@@ -94,7 +95,7 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL(const int8_t *const A, const
};
// Construct arguments for this kernel
- KernelArgs args(A, B, C, ldc, M, N, K, bias, late_bias, act, accumulate, accumulator_buffer);
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, dq, late_bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
"ldr x13, [%x[args], %[offsetof_flags]]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp
index df2c9c0ca3..edb6737dc7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL(const int8_t *const A, const
class cls_sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL;
- StdTransformsSME<operand_type, result_type, 2, 2, 4> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 2, 2, 4> transforms = {};
cls_sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp
index 1631fae8e9..ff649f11f6 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp
@@ -40,7 +40,8 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL(const int8_t *const A, const
const int8_t *const B,
float *const C, const int ldc,
const int M, const int N, const int K,
- const int32_t *const bias, const float *const late_bias, const Activation act,
+ const int32_t *const bias,
+ const DequantizeFloat &, const float *const late_bias, const Activation act,
bool accumulate,
int32_t *const accumulator_buffer
) : A(A),
@@ -94,7 +95,7 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL(const int8_t *const A, const
};
// Construct arguments for this kernel
- KernelArgs args(A, B, C, ldc, M, N, K, bias, late_bias, act, accumulate, accumulator_buffer);
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, dq, late_bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
"ldr x16, [%x[args], %[offsetof_flags]]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp
index 70952f4f03..112f5ef0e8 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL(const int8_t *const A, const
class cls_sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL;
- StdTransformsSME<operand_type, result_type, 4, 1, 4> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 4, 1, 4> transforms = {};
cls_sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp
index bafb16bca8..a08ea8311a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp
@@ -40,7 +40,8 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL(const int8_t *const A, const
const int8_t *const B,
float *const C, const int ldc,
const int M, const int N, const int K,
- const int32_t *const bias, const float *const late_bias, const Activation act,
+ const int32_t *const bias,
+ const DequantizeFloat &, const float *const late_bias, const Activation act,
bool accumulate,
int32_t *const accumulator_buffer
) : A(A),
@@ -94,7 +95,7 @@ void sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL(const int8_t *const A, const
};
// Construct arguments for this kernel
- KernelArgs args(A, B, C, ldc, M, N, K, bias, late_bias, act, accumulate, accumulator_buffer);
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, dq, late_bias, act, accumulate, accumulator_buffer);
__asm__ __volatile__(
"ldr x16, [%x[args], %[offsetof_flags]]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp
index b9d8b60c8d..0c8de041cb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
class cls_sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation, bool accumulate, int32_t *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL;
- StdTransformsSME<operand_type, result_type, 1, 4, 4> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 1, 4, 4> transforms = {};
cls_sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
index d11faa634d..a643fb265b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
C(C), ldcb(ldc * sizeof(int32_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
bias(bias),
accumulator_buffer(accumulator_buffer),
@@ -69,10 +68,11 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
const long kstride_bytes;
int32_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
const int32_t *const bias;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -92,16 +92,16 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
".inst 0xa040c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11]\n"
- ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
- ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa041c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xa043c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x11, x11, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w9, [%x[args], %[offsetof_M]]\n"
@@ -116,102 +116,102 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- ".inst 0xa11bc29b // ldnt1w { z19.s, z23.s, z27.s, z31.s }, p8/Z, [x20, x27, LSL #2]\n"
- ".inst 0xc0900260 // addha za0.s, p0/M, p0/M, z19.s\n"
- ".inst 0xc09002e1 // addha za1.s, p0/M, p0/M, z23.s\n"
- ".inst 0xc0900362 // addha za2.s, p0/M, p0/M, z27.s\n"
- ".inst 0xc09003e3 // addha za3.s, p0/M, p0/M, z31.s\n"
+ ".inst 0xa01bc288 // ld1w { z8.s-z11.s }, p8/Z, [x20, x27, LSL #2]\n"
+ ".inst 0xc0900100 // addha za0.s, p0/M, p0/M, z8.s\n"
+ ".inst 0xc0900121 // addha za1.s, p0/M, p0/M, z9.s\n"
+ ".inst 0xc0900142 // addha za2.s, p0/M, p0/M, z10.s\n"
+ ".inst 0xc0900163 // addha za3.s, p0/M, p0/M, z11.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x27\n"
"mov x21, x28\n"
"incw x20, ALL, MUL #4\n"
"incw x21\n"
"cmp x20, x26\n"
- "csel x21, x28, x21, LT\n"
"mov x20, x13\n"
+ "csel x21, x28, x21, LT\n"
"bfm x13, XZR, #0x0, #0x0 // bfc x13, #0x0, #0x1\n"
"cmp x21, x9\n"
"csel x13, x20, x13, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x27, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1b { z30.b }, p0/Z, [x24]\n"
- ".inst 0xa04086e1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x23]\n"
- "ld1b { z21.b }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0xa04186f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1b { z28.b }, p0/Z, [x24, #2, MUL VL]\n"
- ".inst 0xa04286e5 // ldnt1b { z4.b-z7.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1b { z11.b }, p0/Z, [x24, #3, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x27, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z21.b }, p0/Z, [x24]\n"
+ ".inst 0xa04086f8 // ld1b { z24.b-z27.b }, pn9.b/Z, [x23]\n"
+ "ld1b { z6.b }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa14186e1 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1b { z31.b }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa14286e3 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1b { z23.b }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- ".inst 0xa04386f1 // ldnt1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa14386e0 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa08003c0 // smopa za0.s, p0/M, p0/M, z30.b, z0.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa08103c1 // smopa za1.s, p0/M, p0/M, z30.b, z1.b\n"
- ".inst 0xa08203c2 // smopa za2.s, p0/M, p0/M, z30.b, z2.b\n"
- ".inst 0xa08303c3 // smopa za3.s, p0/M, p0/M, z30.b, z3.b\n"
- "ld1b { z30.b }, p0/Z, [x24]\n"
".inst 0xa09802a0 // smopa za0.s, p0/M, p0/M, z21.b, z24.b\n"
- ".inst 0xa04086e1 // ldnt1b { z0.b-z3.b }, pn9.b/Z, [x23]\n"
+ "subs x21, x21, #0x1\n"
".inst 0xa09902a1 // smopa za1.s, p0/M, p0/M, z21.b, z25.b\n"
".inst 0xa09a02a2 // smopa za2.s, p0/M, p0/M, z21.b, z26.b\n"
".inst 0xa09b02a3 // smopa za3.s, p0/M, p0/M, z21.b, z27.b\n"
- "ld1b { z21.b }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0xa0840380 // smopa za0.s, p0/M, p0/M, z28.b, z4.b\n"
- ".inst 0xa04186f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa0850381 // smopa za1.s, p0/M, p0/M, z28.b, z5.b\n"
- ".inst 0xa0860382 // smopa za2.s, p0/M, p0/M, z28.b, z6.b\n"
- ".inst 0xa0870383 // smopa za3.s, p0/M, p0/M, z28.b, z7.b\n"
- "ld1b { z28.b }, p0/Z, [x24, #2, MUL VL]\n"
- ".inst 0xa04286e5 // ldnt1b { z4.b-z7.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- ".inst 0xa0900160 // smopa za0.s, p0/M, p0/M, z11.b, z16.b\n"
- ".inst 0xa0910161 // smopa za1.s, p0/M, p0/M, z11.b, z17.b\n"
- ".inst 0xa0920162 // smopa za2.s, p0/M, p0/M, z11.b, z18.b\n"
- ".inst 0xa0930163 // smopa za3.s, p0/M, p0/M, z11.b, z19.b\n"
- "ld1b { z11.b }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1b { z21.b }, p0/Z, [x24]\n"
+ ".inst 0xa08100c0 // smopa za0.s, p0/M, p0/M, z6.b, z1.b\n"
+ ".inst 0xa04086f8 // ld1b { z24.b-z27.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa08500c1 // smopa za1.s, p0/M, p0/M, z6.b, z5.b\n"
+ ".inst 0xa08900c2 // smopa za2.s, p0/M, p0/M, z6.b, z9.b\n"
+ ".inst 0xa08d00c3 // smopa za3.s, p0/M, p0/M, z6.b, z13.b\n"
+ "ld1b { z6.b }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa08303e0 // smopa za0.s, p0/M, p0/M, z31.b, z3.b\n"
+ ".inst 0xa14186e1 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa08703e1 // smopa za1.s, p0/M, p0/M, z31.b, z7.b\n"
+ ".inst 0xa08b03e2 // smopa za2.s, p0/M, p0/M, z31.b, z11.b\n"
+ ".inst 0xa08f03e3 // smopa za3.s, p0/M, p0/M, z31.b, z15.b\n"
+ "ld1b { z31.b }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa14286e3 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0xa08002e0 // smopa za0.s, p0/M, p0/M, z23.b, z0.b\n"
+ ".inst 0xa08402e1 // smopa za1.s, p0/M, p0/M, z23.b, z4.b\n"
+ ".inst 0xa08802e2 // smopa za2.s, p0/M, p0/M, z23.b, z8.b\n"
+ ".inst 0xa08c02e3 // smopa za3.s, p0/M, p0/M, z23.b, z12.b\n"
+ "ld1b { z23.b }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- ".inst 0xa04386f1 // ldnt1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa14386e0 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa08003c0 // smopa za0.s, p0/M, p0/M, z30.b, z0.b\n"
- ".inst 0xa08103c1 // smopa za1.s, p0/M, p0/M, z30.b, z1.b\n"
- ".inst 0xa08203c2 // smopa za2.s, p0/M, p0/M, z30.b, z2.b\n"
- ".inst 0xa08303c3 // smopa za3.s, p0/M, p0/M, z30.b, z3.b\n"
".inst 0xa09802a0 // smopa za0.s, p0/M, p0/M, z21.b, z24.b\n"
".inst 0xa09902a1 // smopa za1.s, p0/M, p0/M, z21.b, z25.b\n"
".inst 0xa09a02a2 // smopa za2.s, p0/M, p0/M, z21.b, z26.b\n"
".inst 0xa09b02a3 // smopa za3.s, p0/M, p0/M, z21.b, z27.b\n"
- ".inst 0xa0840380 // smopa za0.s, p0/M, p0/M, z28.b, z4.b\n"
- ".inst 0xa0850381 // smopa za1.s, p0/M, p0/M, z28.b, z5.b\n"
- ".inst 0xa0860382 // smopa za2.s, p0/M, p0/M, z28.b, z6.b\n"
- ".inst 0xa0870383 // smopa za3.s, p0/M, p0/M, z28.b, z7.b\n"
- ".inst 0xa0900160 // smopa za0.s, p0/M, p0/M, z11.b, z16.b\n"
- ".inst 0xa0910161 // smopa za1.s, p0/M, p0/M, z11.b, z17.b\n"
- ".inst 0xa0920162 // smopa za2.s, p0/M, p0/M, z11.b, z18.b\n"
- ".inst 0xa0930163 // smopa za3.s, p0/M, p0/M, z11.b, z19.b\n"
+ ".inst 0xa08100c0 // smopa za0.s, p0/M, p0/M, z6.b, z1.b\n"
+ ".inst 0xa08500c1 // smopa za1.s, p0/M, p0/M, z6.b, z5.b\n"
+ ".inst 0xa08900c2 // smopa za2.s, p0/M, p0/M, z6.b, z9.b\n"
+ ".inst 0xa08d00c3 // smopa za3.s, p0/M, p0/M, z6.b, z13.b\n"
+ ".inst 0xa08303e0 // smopa za0.s, p0/M, p0/M, z31.b, z3.b\n"
+ ".inst 0xa08703e1 // smopa za1.s, p0/M, p0/M, z31.b, z7.b\n"
+ ".inst 0xa08b03e2 // smopa za2.s, p0/M, p0/M, z31.b, z11.b\n"
+ ".inst 0xa08f03e3 // smopa za3.s, p0/M, p0/M, z31.b, z15.b\n"
+ ".inst 0xa08002e0 // smopa za0.s, p0/M, p0/M, z23.b, z0.b\n"
+ ".inst 0xa08402e1 // smopa za1.s, p0/M, p0/M, z23.b, z4.b\n"
+ ".inst 0xa08802e2 // smopa za2.s, p0/M, p0/M, z23.b, z8.b\n"
+ ".inst 0xa08c02e3 // smopa za3.s, p0/M, p0/M, z23.b, z12.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1b { z22.b }, p0/Z, [x24]\n"
- "subs x21, x21, #0x1\n"
+ "ld1b { z14.b }, p0/Z, [x24]\n"
+ "subs x20, x20, #0x1\n"
"addvl x24, x24, #1\n"
- ".inst 0xa14086f1 // ld1b { z17.b, z21.b, z25.b, z29.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa14086e1 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #4\n"
- ".inst 0xa09102c0 // smopa za0.s, p0/M, p0/M, z22.b, z17.b\n"
- ".inst 0xa09502c1 // smopa za1.s, p0/M, p0/M, z22.b, z21.b\n"
- ".inst 0xa09902c2 // smopa za2.s, p0/M, p0/M, z22.b, z25.b\n"
- ".inst 0xa09d02c3 // smopa za3.s, p0/M, p0/M, z22.b, z29.b\n"
+ ".inst 0xa08101c0 // smopa za0.s, p0/M, p0/M, z14.b, z1.b\n"
+ ".inst 0xa08501c1 // smopa za1.s, p0/M, p0/M, z14.b, z5.b\n"
+ ".inst 0xa08901c2 // smopa za2.s, p0/M, p0/M, z14.b, z9.b\n"
+ ".inst 0xa08d01c3 // smopa za3.s, p0/M, p0/M, z14.b, z13.b\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x13, #1, 14f\n"
@@ -219,25 +219,25 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11]\n"
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa041c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ ".inst 0xa041c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
".inst 0xc086045c // mova { z28.s-z31.s }, za2h.s[x12]\n"
- ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xa042c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
- ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa043c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
- "add x12, x12, #0x4\n"
- "cmp x12, x20\n"
- ".inst 0xa060c544 // st1w { z4.s-z7.s }, pn9.b, [x10]\n"
+ ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
+ ".inst 0xa042c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
"addvl x11, x11, #16\n"
- ".inst 0xa061c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa060c548 // st1w { z8.s-z11.s }, pn9.b, [x10]\n"
+ ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa061c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
+ "add x12, x12, #0x4\n"
".inst 0xa062c55c // st1w { z28.s-z31.s }, pn9.b, [x10, #0x8, MUL VL]\n"
- ".inst 0xa063c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa063c540 // st1w { z0.s-z3.s }, pn9.b, [x10, #0xc, MUL VL]\n"
"addvl x10, x10, #16\n"
"blt 11b\n"
"b 20f\n"
@@ -245,16 +245,16 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa060c54c // st1w { z12.s-z15.s }, pn9.b, [x10]\n"
- ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
- ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
- ".inst 0xa061c544 // st1w { z4.s-z7.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c544 // st1w { z4.s-z7.s }, pn9.b, [x10]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c540 // st1w { z0.s-z3.s }, pn9.b, [x10, #0x8, MUL VL]\n"
- ".inst 0xa063c558 // st1w { z24.s-z27.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ ".inst 0xa062c558 // st1w { z24.s-z27.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ ".inst 0xa063c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
"addvl x10, x10, #16\n"
"blt 13b\n"
"b 20f\n"
@@ -264,11 +264,11 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
"cntw x20\n"
"ldr x22, [%x[args], %[offsetof_ldcb]]\n"
"cmp x21, x20\n"
+ "mov x12, #0x0\n"
"csel x20, x21, x20, LT\n"
"add x23, x23, x27, LSL #2\n" // C += n
"lsr x21, x20, #0x2\n"
"madd x23, x28, x22, x23\n" // C += m * ldc
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Accumulator row 0 loop
@@ -278,55 +278,55 @@ void sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL(const int8_t *const A, const in
".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
".inst 0xa160c2e0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x23]\n"
"add x23, x23, x22\n"
+ "add x12, x12, #0x4\n"
".inst 0xa160c2e1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x23]\n"
"add x23, x23, x22\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa160c2e2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x23]\n"
"add x23, x23, x22\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa160c2e3 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x23]\n"
"add x23, x23, x22\n"
"blt 15b\n"
"16:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xa160c2f0 // st1w { z16.s, z20.s, z24.s, z28.s }, p8, [x23]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa160c2e0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x23]\n"
"add x23, x23, x22\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa160c2f1 // st1w { z17.s, z21.s, z25.s, z29.s }, p8, [x23]\n"
+ ".inst 0xa160c2e1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x23]\n"
"add x23, x23, x22\n"
"beq 17f\n"
- ".inst 0xa160c2f2 // st1w { z18.s, z22.s, z26.s, z30.s }, p8, [x23]\n"
+ ".inst 0xa160c2e2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x23]\n"
"17:" // Store to output array: Accumulator row 0 oddments: End
"18:" // Store to output array: End
"tbz x13, #0, 20f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"19:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c568 // ld1w { z8.s-z11.s }, pn9.b/Z, [x11]\n"
- ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa041c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c570 // ld1w { z16.s-z19.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa041c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c564 // ld1w { z4.s-z7.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
".inst 0xa043c568 // ld1w { z8.s-z11.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x11, x11, #16\n"
"blt 19b\n"
"20:" // End block
"incw x27, ALL, MUL #4\n"
"cmp x27, x26\n"
"blt 3b\n"
"incw x28\n"
- "cmp x28, x9\n"
"mov x27, #0x0\n"
+ "cmp x28, x9\n"
"mov x25, x24\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp
index f05d2cf215..074a8819f9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
class cls_sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation, bool accumulate, int32_t *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL;
- StdTransformsSME<operand_type, result_type, 2, 2, 4> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 2, 2, 4> transforms = {};
cls_sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
index 47de894306..ae14cd7d50 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
C(C), ldcb(ldc * sizeof(int32_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
bias(bias),
accumulator_buffer(accumulator_buffer),
@@ -69,10 +68,11 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
const long kstride_bytes;
int32_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
const int32_t *const bias;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -91,17 +91,17 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa042c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -116,102 +116,102 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- ".inst 0xa00a4295 // ldnt1w { z20.s-z21.s }, p8/Z, [x20, x10, LSL #2]\n"
- ".inst 0xc0900280 // addha za0.s, p0/M, p0/M, z20.s\n"
- ".inst 0xc09002a1 // addha za1.s, p0/M, p0/M, z21.s\n"
- ".inst 0xc0900282 // addha za2.s, p0/M, p0/M, z20.s\n"
- ".inst 0xc09002a3 // addha za3.s, p0/M, p0/M, z21.s\n"
+ ".inst 0xa10a4296 // ld1w { z22.s, z30.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc09002c0 // addha za0.s, p0/M, p0/M, z22.s\n"
+ ".inst 0xc09003c1 // addha za1.s, p0/M, p0/M, z30.s\n"
+ ".inst 0xc09002c2 // addha za2.s, p0/M, p0/M, z22.s\n"
+ ".inst 0xc09003c3 // addha za3.s, p0/M, p0/M, z30.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20, ALL, MUL #2\n"
"incw x21, ALL, MUL #2\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa040077c // ld1b { z28.b-z29.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa14006e8 // ldnt1b { z0.b, z8.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa0410762 // ld1b { z2.b-z3.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa14106ff // ldnt1b { z23.b, z31.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa042076e // ld1b { z14.b-z15.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206f8 // ldnt1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1400766 // ld1b { z6.b, z14.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa04006e2 // ld1b { z2.b-z3.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa041077a // ld1b { z26.b-z27.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa04106f6 // ld1b { z22.b-z23.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa1420765 // ld1b { z5.b, z13.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206f5 // ld1b { z21.b, z29.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0430760 // ld1b { z0.b-z1.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa04306f5 // ldnt1b { z20.b-z21.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa14306f1 // ld1b { z17.b, z25.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa0800380 // smopa za0.s, p0/M, p0/M, z28.b, z0.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0880381 // smopa za1.s, p0/M, p0/M, z28.b, z8.b\n"
- ".inst 0xa08003a2 // smopa za2.s, p0/M, p0/M, z29.b, z0.b\n"
- ".inst 0xa08803a3 // smopa za3.s, p0/M, p0/M, z29.b, z8.b\n"
- ".inst 0xa040077c // ld1b { z28.b-z29.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa0970040 // smopa za0.s, p0/M, p0/M, z2.b, z23.b\n"
- ".inst 0xa14006e8 // ldnt1b { z0.b, z8.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa09f0041 // smopa za1.s, p0/M, p0/M, z2.b, z31.b\n"
- ".inst 0xa0970062 // smopa za2.s, p0/M, p0/M, z3.b, z23.b\n"
- ".inst 0xa09f0063 // smopa za3.s, p0/M, p0/M, z3.b, z31.b\n"
- ".inst 0xa0410762 // ld1b { z2.b-z3.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa09001c0 // smopa za0.s, p0/M, p0/M, z14.b, z16.b\n"
- ".inst 0xa14106ff // ldnt1b { z23.b, z31.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa09801c1 // smopa za1.s, p0/M, p0/M, z14.b, z24.b\n"
- ".inst 0xa09001e2 // smopa za2.s, p0/M, p0/M, z15.b, z16.b\n"
- ".inst 0xa09801e3 // smopa za3.s, p0/M, p0/M, z15.b, z24.b\n"
- ".inst 0xa042076e // ld1b { z14.b-z15.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206f8 // ldnt1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa0940080 // smopa za0.s, p0/M, p0/M, z4.b, z20.b\n"
- ".inst 0xa0950081 // smopa za1.s, p0/M, p0/M, z4.b, z21.b\n"
- ".inst 0xa09400a2 // smopa za2.s, p0/M, p0/M, z5.b, z20.b\n"
- ".inst 0xa09500a3 // smopa za3.s, p0/M, p0/M, z5.b, z21.b\n"
- ".inst 0xa0430764 // ld1b { z4.b-z5.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ ".inst 0xa08200c0 // smopa za0.s, p0/M, p0/M, z6.b, z2.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa08300c1 // smopa za1.s, p0/M, p0/M, z6.b, z3.b\n"
+ ".inst 0xa08201c2 // smopa za2.s, p0/M, p0/M, z14.b, z2.b\n"
+ ".inst 0xa08301c3 // smopa za3.s, p0/M, p0/M, z14.b, z3.b\n"
+ ".inst 0xa1400766 // ld1b { z6.b, z14.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa0960340 // smopa za0.s, p0/M, p0/M, z26.b, z22.b\n"
+ ".inst 0xa04006e2 // ld1b { z2.b-z3.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa0970341 // smopa za1.s, p0/M, p0/M, z26.b, z23.b\n"
+ ".inst 0xa0960362 // smopa za2.s, p0/M, p0/M, z27.b, z22.b\n"
+ ".inst 0xa0970363 // smopa za3.s, p0/M, p0/M, z27.b, z23.b\n"
+ ".inst 0xa041077a // ld1b { z26.b-z27.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa09500a0 // smopa za0.s, p0/M, p0/M, z5.b, z21.b\n"
+ ".inst 0xa04106f6 // ld1b { z22.b-z23.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa09d00a1 // smopa za1.s, p0/M, p0/M, z5.b, z29.b\n"
+ ".inst 0xa09501a2 // smopa za2.s, p0/M, p0/M, z13.b, z21.b\n"
+ ".inst 0xa09d01a3 // smopa za3.s, p0/M, p0/M, z13.b, z29.b\n"
+ ".inst 0xa1420765 // ld1b { z5.b, z13.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206f5 // ld1b { z21.b, z29.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0910000 // smopa za0.s, p0/M, p0/M, z0.b, z17.b\n"
+ ".inst 0xa0990001 // smopa za1.s, p0/M, p0/M, z0.b, z25.b\n"
+ ".inst 0xa0910022 // smopa za2.s, p0/M, p0/M, z1.b, z17.b\n"
+ ".inst 0xa0990023 // smopa za3.s, p0/M, p0/M, z1.b, z25.b\n"
+ ".inst 0xa0430760 // ld1b { z0.b-z1.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa04306f5 // ldnt1b { z20.b-z21.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa14306f1 // ld1b { z17.b, z25.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa0800380 // smopa za0.s, p0/M, p0/M, z28.b, z0.b\n"
- ".inst 0xa0880381 // smopa za1.s, p0/M, p0/M, z28.b, z8.b\n"
- ".inst 0xa08003a2 // smopa za2.s, p0/M, p0/M, z29.b, z0.b\n"
- ".inst 0xa08803a3 // smopa za3.s, p0/M, p0/M, z29.b, z8.b\n"
- ".inst 0xa0970040 // smopa za0.s, p0/M, p0/M, z2.b, z23.b\n"
- ".inst 0xa09f0041 // smopa za1.s, p0/M, p0/M, z2.b, z31.b\n"
- ".inst 0xa0970062 // smopa za2.s, p0/M, p0/M, z3.b, z23.b\n"
- ".inst 0xa09f0063 // smopa za3.s, p0/M, p0/M, z3.b, z31.b\n"
- ".inst 0xa09001c0 // smopa za0.s, p0/M, p0/M, z14.b, z16.b\n"
- ".inst 0xa09801c1 // smopa za1.s, p0/M, p0/M, z14.b, z24.b\n"
- ".inst 0xa09001e2 // smopa za2.s, p0/M, p0/M, z15.b, z16.b\n"
- ".inst 0xa09801e3 // smopa za3.s, p0/M, p0/M, z15.b, z24.b\n"
- ".inst 0xa0940080 // smopa za0.s, p0/M, p0/M, z4.b, z20.b\n"
- ".inst 0xa0950081 // smopa za1.s, p0/M, p0/M, z4.b, z21.b\n"
- ".inst 0xa09400a2 // smopa za2.s, p0/M, p0/M, z5.b, z20.b\n"
- ".inst 0xa09500a3 // smopa za3.s, p0/M, p0/M, z5.b, z21.b\n"
+ ".inst 0xa08200c0 // smopa za0.s, p0/M, p0/M, z6.b, z2.b\n"
+ ".inst 0xa08300c1 // smopa za1.s, p0/M, p0/M, z6.b, z3.b\n"
+ ".inst 0xa08201c2 // smopa za2.s, p0/M, p0/M, z14.b, z2.b\n"
+ ".inst 0xa08301c3 // smopa za3.s, p0/M, p0/M, z14.b, z3.b\n"
+ ".inst 0xa0960340 // smopa za0.s, p0/M, p0/M, z26.b, z22.b\n"
+ ".inst 0xa0970341 // smopa za1.s, p0/M, p0/M, z26.b, z23.b\n"
+ ".inst 0xa0960362 // smopa za2.s, p0/M, p0/M, z27.b, z22.b\n"
+ ".inst 0xa0970363 // smopa za3.s, p0/M, p0/M, z27.b, z23.b\n"
+ ".inst 0xa09500a0 // smopa za0.s, p0/M, p0/M, z5.b, z21.b\n"
+ ".inst 0xa09d00a1 // smopa za1.s, p0/M, p0/M, z5.b, z29.b\n"
+ ".inst 0xa09501a2 // smopa za2.s, p0/M, p0/M, z13.b, z21.b\n"
+ ".inst 0xa09d01a3 // smopa za3.s, p0/M, p0/M, z13.b, z29.b\n"
+ ".inst 0xa0910000 // smopa za0.s, p0/M, p0/M, z0.b, z17.b\n"
+ ".inst 0xa0990001 // smopa za1.s, p0/M, p0/M, z0.b, z25.b\n"
+ ".inst 0xa0910022 // smopa za2.s, p0/M, p0/M, z1.b, z17.b\n"
+ ".inst 0xa0990023 // smopa za3.s, p0/M, p0/M, z1.b, z25.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1400774 // ld1b { z20.b, z28.b }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa1400777 // ld1b { z23.b, z31.b }, pn9.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #2\n"
".inst 0xa14006e7 // ld1b { z7.b, z15.b }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #2\n"
- ".inst 0xa0870280 // smopa za0.s, p0/M, p0/M, z20.b, z7.b\n"
- ".inst 0xa08f0281 // smopa za1.s, p0/M, p0/M, z20.b, z15.b\n"
- ".inst 0xa0870382 // smopa za2.s, p0/M, p0/M, z28.b, z7.b\n"
- ".inst 0xa08f0383 // smopa za3.s, p0/M, p0/M, z28.b, z15.b\n"
+ ".inst 0xa08702e0 // smopa za0.s, p0/M, p0/M, z23.b, z7.b\n"
+ ".inst 0xa08f02e1 // smopa za1.s, p0/M, p0/M, z23.b, z15.b\n"
+ ".inst 0xa08703e2 // smopa za2.s, p0/M, p0/M, z31.b, z7.b\n"
+ ".inst 0xa08f03e3 // smopa za3.s, p0/M, p0/M, z31.b, z15.b\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x16, #1, 14f\n"
@@ -219,25 +219,25 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
- ".inst 0xa042c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa060c5dc // st1w { z28.s-z31.s }, pn9.b, [x14]\n"
+ ".inst 0xc0840682 // mova za2h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa061c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x4, MUL VL]\n"
".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 23f\n"
@@ -245,16 +245,16 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa060c5cc // st1w { z12.s-z15.s }, pn9.b, [x14]\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa061c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 23f\n"
@@ -264,25 +264,25 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
"cntw x24\n"
"ldr x23, [%x[args], %[offsetof_ldcb]]\n"
"cmp x25, x24\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x24, LT\n"
"add x26, x26, x10, LSL #2\n" // C += n
"lsr x21, x22, #0x2\n"
"madd x26, x11, x23, x26\n" // C += m * ldc
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
- "add x26, x26, x23\n"
- ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
"add x26, x26, x23\n"
"add x12, x12, #0x4\n"
- ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
+ ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
"add x26, x26, x23\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
+ ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ "add x26, x26, x23\n"
+ ".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
"add x26, x26, x23\n"
"blt 15b\n"
"16:" // Store to output array: Accumulator row 0 oddments
@@ -303,9 +303,9 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
"subs x25, x25, x22\n"
"beq 21f\n"
"cmp x25, x24\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x24, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 19f\n"
"18:" // Store to output array: Accumulator row 1 loop
@@ -313,53 +313,53 @@ void sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL(const int8_t *const A, const in
".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
"add x26, x26, x23\n"
+ "add x12, x12, #0x4\n"
".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
"add x26, x26, x23\n"
- "add x12, x12, #0x4\n"
+ "cmp x12, x21, LSL #2\n"
".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
"add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
".inst 0xa1604353 // st1w { z19.s, z27.s }, p8, [x26]\n"
"add x26, x26, x23\n"
"blt 18b\n"
"19:" // Store to output array: Accumulator row 1 oddments
"cbz x20, 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
- ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa1604340 // st1w { z0.s, z8.s }, p8, [x26]\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
"add x26, x26, x23\n"
"beq 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xa1604341 // st1w { z1.s, z9.s }, p8, [x26]\n"
+ ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
"add x26, x26, x23\n"
"beq 20f\n"
- ".inst 0xa1604342 // st1w { z2.s, z10.s }, p8, [x26]\n"
+ ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
"20:" // Store to output array: Accumulator row 1 oddments: End
"21:" // Store to output array: End
"tbz x16, #0, 23f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"22:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa041c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
".inst 0xa042c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
".inst 0xa043c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 22b\n"
"23:" // End block
"incw x10, ALL, MUL #2\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp
index ce10ab30e7..6b1dca0e2a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
class cls_sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, int32_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Activation, bool accumulate, int32_t *const accumulator_buffer);
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL;
- StdTransformsSME<operand_type, result_type, 4, 1, 4> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 4, 1, 4> transforms = {};
cls_sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
index a23c44b7da..03c19c46f5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,7 +48,6 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
C(C), ldcb(ldc * sizeof(int32_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
bias(bias),
accumulator_buffer(accumulator_buffer),
@@ -69,10 +68,11 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
const long kstride_bytes;
int32_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
+ const long M, N, K;
const int32_t *const bias;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -91,17 +91,17 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa041c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840481 // mova za1h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -116,102 +116,102 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "ldnt1w { z17.s }, p0/Z, [x20, x10, LSL #2]\n"
- ".inst 0xc0902620 // addha za0.s, p1/M, p1/M, z17.s\n"
- ".inst 0xc0902621 // addha za1.s, p1/M, p1/M, z17.s\n"
- ".inst 0xc0902622 // addha za2.s, p1/M, p1/M, z17.s\n"
- ".inst 0xc0902623 // addha za3.s, p1/M, p1/M, z17.s\n"
+ "ld1w { z1.s }, p0/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc0902420 // addha za0.s, p1/M, p1/M, z1.s\n"
+ ".inst 0xc0902421 // addha za1.s, p1/M, p1/M, z1.s\n"
+ ".inst 0xc0902422 // addha za2.s, p1/M, p1/M, z1.s\n"
+ ".inst 0xc0902423 // addha za3.s, p1/M, p1/M, z1.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20\n"
"incw x21, ALL, MUL #4\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- "ldnt1b { z12.b }, p1/Z, [x23]\n"
- ".inst 0xa1418370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1b { z5.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa1428363 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z4.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa1438362 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1408370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27]\n"
+ "ld1b { z0.b }, p1/Z, [x23]\n"
+ ".inst 0xa041836c // ld1b { z12.b-z15.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "ld1b { z10.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa1428371 // ld1b { z17.b, z21.b, z25.b, z29.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z18.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa1438373 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1b { z19.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z7.b }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa08c2640 // smopa za0.s, p1/M, p1/M, z18.b, z12.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa08c26c1 // smopa za1.s, p1/M, p1/M, z22.b, z12.b\n"
- ".inst 0xa08c2742 // smopa za2.s, p1/M, p1/M, z26.b, z12.b\n"
- ".inst 0xa08c27c3 // smopa za3.s, p1/M, p1/M, z30.b, z12.b\n"
- ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- ".inst 0xa0852600 // smopa za0.s, p1/M, p1/M, z16.b, z5.b\n"
- "ldnt1b { z12.b }, p1/Z, [x23]\n"
- ".inst 0xa0852681 // smopa za1.s, p1/M, p1/M, z20.b, z5.b\n"
- ".inst 0xa0852702 // smopa za2.s, p1/M, p1/M, z24.b, z5.b\n"
- ".inst 0xa0852783 // smopa za3.s, p1/M, p1/M, z28.b, z5.b\n"
- ".inst 0xa1418370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa0842460 // smopa za0.s, p1/M, p1/M, z3.b, z4.b\n"
- "ldnt1b { z5.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa08424e1 // smopa za1.s, p1/M, p1/M, z7.b, z4.b\n"
- ".inst 0xa0842562 // smopa za2.s, p1/M, p1/M, z11.b, z4.b\n"
- ".inst 0xa08425e3 // smopa za3.s, p1/M, p1/M, z15.b, z4.b\n"
- ".inst 0xa1428363 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z4.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa0932440 // smopa za0.s, p1/M, p1/M, z2.b, z19.b\n"
- ".inst 0xa09324c1 // smopa za1.s, p1/M, p1/M, z6.b, z19.b\n"
- ".inst 0xa0932542 // smopa za2.s, p1/M, p1/M, z10.b, z19.b\n"
- ".inst 0xa09325c3 // smopa za3.s, p1/M, p1/M, z14.b, z19.b\n"
- ".inst 0xa1438362 // ld1b { z2.b, z6.b, z10.b, z14.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xa0802600 // smopa za0.s, p1/M, p1/M, z16.b, z0.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0802681 // smopa za1.s, p1/M, p1/M, z20.b, z0.b\n"
+ ".inst 0xa0802702 // smopa za2.s, p1/M, p1/M, z24.b, z0.b\n"
+ ".inst 0xa0802783 // smopa za3.s, p1/M, p1/M, z28.b, z0.b\n"
+ ".inst 0xa1408370 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa08a2580 // smopa za0.s, p1/M, p1/M, z12.b, z10.b\n"
+ "ld1b { z0.b }, p1/Z, [x23]\n"
+ ".inst 0xa08a25a1 // smopa za1.s, p1/M, p1/M, z13.b, z10.b\n"
+ ".inst 0xa08a25c2 // smopa za2.s, p1/M, p1/M, z14.b, z10.b\n"
+ ".inst 0xa08a25e3 // smopa za3.s, p1/M, p1/M, z15.b, z10.b\n"
+ ".inst 0xa041836c // ld1b { z12.b-z15.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa0922620 // smopa za0.s, p1/M, p1/M, z17.b, z18.b\n"
+ "ld1b { z10.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa09226a1 // smopa za1.s, p1/M, p1/M, z21.b, z18.b\n"
+ ".inst 0xa0922722 // smopa za2.s, p1/M, p1/M, z25.b, z18.b\n"
+ ".inst 0xa09227a3 // smopa za3.s, p1/M, p1/M, z29.b, z18.b\n"
+ ".inst 0xa1428371 // ld1b { z17.b, z21.b, z25.b, z29.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z18.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa0872660 // smopa za0.s, p1/M, p1/M, z19.b, z7.b\n"
+ ".inst 0xa08726e1 // smopa za1.s, p1/M, p1/M, z23.b, z7.b\n"
+ ".inst 0xa0872762 // smopa za2.s, p1/M, p1/M, z27.b, z7.b\n"
+ ".inst 0xa08727e3 // smopa za3.s, p1/M, p1/M, z31.b, z7.b\n"
+ ".inst 0xa1438373 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1b { z19.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z7.b }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa08c2640 // smopa za0.s, p1/M, p1/M, z18.b, z12.b\n"
- ".inst 0xa08c26c1 // smopa za1.s, p1/M, p1/M, z22.b, z12.b\n"
- ".inst 0xa08c2742 // smopa za2.s, p1/M, p1/M, z26.b, z12.b\n"
- ".inst 0xa08c27c3 // smopa za3.s, p1/M, p1/M, z30.b, z12.b\n"
- ".inst 0xa0852600 // smopa za0.s, p1/M, p1/M, z16.b, z5.b\n"
- ".inst 0xa0852681 // smopa za1.s, p1/M, p1/M, z20.b, z5.b\n"
- ".inst 0xa0852702 // smopa za2.s, p1/M, p1/M, z24.b, z5.b\n"
- ".inst 0xa0852783 // smopa za3.s, p1/M, p1/M, z28.b, z5.b\n"
- ".inst 0xa0842460 // smopa za0.s, p1/M, p1/M, z3.b, z4.b\n"
- ".inst 0xa08424e1 // smopa za1.s, p1/M, p1/M, z7.b, z4.b\n"
- ".inst 0xa0842562 // smopa za2.s, p1/M, p1/M, z11.b, z4.b\n"
- ".inst 0xa08425e3 // smopa za3.s, p1/M, p1/M, z15.b, z4.b\n"
- ".inst 0xa0932440 // smopa za0.s, p1/M, p1/M, z2.b, z19.b\n"
- ".inst 0xa09324c1 // smopa za1.s, p1/M, p1/M, z6.b, z19.b\n"
- ".inst 0xa0932542 // smopa za2.s, p1/M, p1/M, z10.b, z19.b\n"
- ".inst 0xa09325c3 // smopa za3.s, p1/M, p1/M, z14.b, z19.b\n"
+ ".inst 0xa0802600 // smopa za0.s, p1/M, p1/M, z16.b, z0.b\n"
+ ".inst 0xa0802681 // smopa za1.s, p1/M, p1/M, z20.b, z0.b\n"
+ ".inst 0xa0802702 // smopa za2.s, p1/M, p1/M, z24.b, z0.b\n"
+ ".inst 0xa0802783 // smopa za3.s, p1/M, p1/M, z28.b, z0.b\n"
+ ".inst 0xa08a2580 // smopa za0.s, p1/M, p1/M, z12.b, z10.b\n"
+ ".inst 0xa08a25a1 // smopa za1.s, p1/M, p1/M, z13.b, z10.b\n"
+ ".inst 0xa08a25c2 // smopa za2.s, p1/M, p1/M, z14.b, z10.b\n"
+ ".inst 0xa08a25e3 // smopa za3.s, p1/M, p1/M, z15.b, z10.b\n"
+ ".inst 0xa0922620 // smopa za0.s, p1/M, p1/M, z17.b, z18.b\n"
+ ".inst 0xa09226a1 // smopa za1.s, p1/M, p1/M, z21.b, z18.b\n"
+ ".inst 0xa0922722 // smopa za2.s, p1/M, p1/M, z25.b, z18.b\n"
+ ".inst 0xa09227a3 // smopa za3.s, p1/M, p1/M, z29.b, z18.b\n"
+ ".inst 0xa0872660 // smopa za0.s, p1/M, p1/M, z19.b, z7.b\n"
+ ".inst 0xa08726e1 // smopa za1.s, p1/M, p1/M, z23.b, z7.b\n"
+ ".inst 0xa0872762 // smopa za2.s, p1/M, p1/M, z27.b, z7.b\n"
+ ".inst 0xa08727e3 // smopa za3.s, p1/M, p1/M, z31.b, z7.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa0408368 // ld1b { z8.b-z11.b }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #4\n"
"ld1b { z15.b }, p1/Z, [x23]\n"
"addvl x23, x23, #1\n"
- ".inst 0xa08f2500 // smopa za0.s, p1/M, p1/M, z8.b, z15.b\n"
- ".inst 0xa08f2521 // smopa za1.s, p1/M, p1/M, z9.b, z15.b\n"
- ".inst 0xa08f2542 // smopa za2.s, p1/M, p1/M, z10.b, z15.b\n"
- ".inst 0xa08f2563 // smopa za3.s, p1/M, p1/M, z11.b, z15.b\n"
+ ".inst 0xa08f2640 // smopa za0.s, p1/M, p1/M, z18.b, z15.b\n"
+ ".inst 0xa08f26c1 // smopa za1.s, p1/M, p1/M, z22.b, z15.b\n"
+ ".inst 0xa08f2742 // smopa za2.s, p1/M, p1/M, z26.b, z15.b\n"
+ ".inst 0xa08f27c3 // smopa za3.s, p1/M, p1/M, z30.b, z15.b\n"
"bgt 9b\n"
"10:" // K oddments: End
"tbz x16, #1, 14f\n"
@@ -219,25 +219,25 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
- ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
+ ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
+ ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa060c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14]\n"
".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa061c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 29f\n"
@@ -247,14 +247,14 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
"13:" // Store to partial result buffer: Store only: Loop
".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa061c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860464 // mova { z4.s-z7.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
".inst 0xa062c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 29f\n"
@@ -264,94 +264,94 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
"cntw x24\n"
"ldr x23, [%x[args], %[offsetof_ldcb]]\n"
"cmp x25, x24\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x24, LT\n"
"add x26, x26, x10, LSL #2\n" // C += n
"lsr x21, x22, #0x2\n"
"madd x26, x11, x23, x26\n" // C += m * ldc
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 16f\n"
"15:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- "st1w { z8.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1w { z9.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z10.s }, p0, [x26]\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z11.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1w { z14.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1w { z15.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 15b\n"
"16:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 17f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- "st1w { z4.s }, p0, [x26]\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ "st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 17f\n"
"subs x20, x20, #0x1\n"
- "st1w { z5.s }, p0, [x26]\n"
+ "st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 17f\n"
- "st1w { z6.s }, p0, [x26]\n"
+ "st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"17:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x24\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x24, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 19f\n"
"18:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
- "st1w { z16.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1w { z17.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z18.s }, p0, [x26]\n"
+ "st1w { z0.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z19.s }, p0, [x26]\n"
+ "st1w { z1.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1w { z2.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1w { z3.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 18b\n"
"19:" // Store to output array: Accumulator row 1 oddments
"cbz x20, 20f\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- "st1w { z20.s }, p0, [x26]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ "st1w { z12.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 20f\n"
"subs x20, x20, #0x1\n"
- "st1w { z21.s }, p0, [x26]\n"
+ "st1w { z13.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"beq 20f\n"
- "st1w { z22.s }, p0, [x26]\n"
+ "st1w { z14.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"20:" // Store to output array: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x24\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x24, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 22f\n"
"21:" // Store to output array: Accumulator row 2 loop
- ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
- "st1w { z24.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
- "st1w { z25.s }, p0, [x26]\n"
- "add x26, x26, x23\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
"add x12, x12, #0x4\n"
- "st1w { z26.s }, p0, [x26]\n"
+ "st1w { z8.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"cmp x12, x21, LSL #2\n"
- "st1w { z27.s }, p0, [x26]\n"
+ "st1w { z9.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1w { z10.s }, p0, [x26]\n"
+ "add x26, x26, x23\n"
+ "st1w { z11.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 21b\n"
"22:" // Store to output array: Accumulator row 2 oddments
@@ -371,21 +371,21 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
"subs x25, x25, x22\n"
"beq 27f\n"
"cmp x25, x24\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x24, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 25f\n"
"24:" // Store to output array: Accumulator row 3 loop
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ "add x12, x12, #0x4\n"
"st1w { z16.s }, p0, [x26]\n"
"add x26, x26, x23\n"
+ "cmp x12, x21, LSL #2\n"
"st1w { z17.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "add x12, x12, #0x4\n"
"st1w { z18.s }, p0, [x26]\n"
"add x26, x26, x23\n"
- "cmp x12, x21, LSL #2\n"
"st1w { z19.s }, p0, [x26]\n"
"add x26, x26, x23\n"
"blt 24b\n"
@@ -407,25 +407,25 @@ void sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL(const int8_t *const A, const in
"mov x12, #0x0\n"
"cntw x20\n"
"28:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa040c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15]\n"
".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 28b\n"
"29:" // End block
"incw x10\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp
index fb84883913..f384e0f491 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
class cls_sme2_interleaved_nomerge_u8q_mopa_1VLx4VL
{
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint8_t result_type;
typedef void (*kern_type)(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
@@ -60,7 +61,7 @@ public:
static constexpr bool supports_accumulate()
{
- return false;
+ return true;
}
static constexpr bool supports_bias()
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_u8q_mopa_1VLx4VL;
- StdTransformsSME<operand_type, result_type, 1, 4, 4, true> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 1, 4, 4, true> transforms = {};
cls_sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
index 96247d2db5..0482a5ea0f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_1VLx4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
B(B), kstride_bytes(roundup(K, 4) * sizeof(uint8_t)),
C(C), ldcb(ldc * sizeof(uint8_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+ min(0), max(0),
bias(bias), n_0(n_0),
accumulator_buffer(accumulator_buffer),
@@ -74,13 +74,14 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
const long kstride_bytes;
uint8_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
- int32_t min = std::numeric_limits<uint8_t>::min();
- int32_t max = std::numeric_limits<uint8_t>::max();
+ const long M, N, K;
+ int32_t min;
+ int32_t max;
const int32_t *const bias;
const int n_0;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -89,131 +90,131 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
KernelArgs args(A, B, C, ldc, M, N, K, bias, rq, n_0, accumulate, accumulator_buffer);
__asm__ __volatile__(
- "ldr x14, [%x[args], %[offsetof_flags]]\n"
+ "ldr x15, [%x[args], %[offsetof_flags]]\n"
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p1.b\n"
".inst 0x25207811 // ptrue pn9.b\n"
+ "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
"ldr x13, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
- "tbz x14, #0, 2f\n"
+ "tbz x15, #0, 2f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5a4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x13]\n"
- ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa041c5a8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
- ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa042c5a8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
- ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa043c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa041c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c5dc // ld1w { z28.s-z31.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x13, x13, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
- "ldr w10, [%x[args], %[offsetof_M]]\n"
+ "ldr w11, [%x[args], %[offsetof_M]]\n"
+ "mov x10, #0x0\n"
"mov x9, #0x0\n"
- "mov x28, #0x0\n"
- "ldr w27, [%x[args], %[offsetof_N]]\n"
- "ldr x26, [%x[args], %[offsetof_A]]\n"
+ "ldr w28, [%x[args], %[offsetof_N]]\n"
+ "ldr x27, [%x[args], %[offsetof_A]]\n"
"3:" // M and N loop
- "mov x25, x26\n"
- ".inst 0x25bb6790 // whilelt pn8.s, x28, x27, VLx4\n"
- "tbnz x14, #0, 4f\n"
+ "mov x26, x27\n"
+ ".inst 0x25bc6530 // whilelt pn8.s, x9, x28, VLx4\n"
+ "tbnz x15, #0, 4f\n"
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- ".inst 0xa11cc289 // ldnt1w { z1.s, z5.s, z9.s, z13.s }, p8/Z, [x20, x28, LSL #2]\n"
- ".inst 0xc0902420 // addha za0.s, p1/M, p1/M, z1.s\n"
- ".inst 0xc09024a1 // addha za1.s, p1/M, p1/M, z5.s\n"
- ".inst 0xc0902522 // addha za2.s, p1/M, p1/M, z9.s\n"
- ".inst 0xc09025a3 // addha za3.s, p1/M, p1/M, z13.s\n"
+ ".inst 0xa009c290 // ld1w { z16.s-z19.s }, p8/Z, [x20, x9, LSL #2]\n"
+ ".inst 0xc0902600 // addha za0.s, p1/M, p1/M, z16.s\n"
+ ".inst 0xc0902621 // addha za1.s, p1/M, p1/M, z17.s\n"
+ ".inst 0xc0902642 // addha za2.s, p1/M, p1/M, z18.s\n"
+ ".inst 0xc0902663 // addha za3.s, p1/M, p1/M, z19.s\n"
"4:" // Prepare accumulators: Test for last block
- "mov x20, x28\n"
- "mov x21, x9\n"
+ "mov x20, x9\n"
+ "mov x21, x10\n"
"incw x20, ALL, MUL #4\n"
"incw x21\n"
- "cmp x20, x27\n"
- "csel x21, x9, x21, LT\n"
- "mov x20, x14\n"
- "bfm x14, XZR, #0x0, #0x0 // bfc x14, #0x0, #0x1\n"
- "cmp x21, x10\n"
- "csel x14, x20, x14, LT\n"
+ "cmp x20, x28\n"
+ "mov x20, x15\n"
+ "csel x21, x10, x21, LT\n"
+ "bfm x15, XZR, #0x0, #0x0 // bfc x15, #0x0, #0x1\n"
+ "cmp x21, x11\n"
+ "csel x15, x20, x15, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x28, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- "ld1b { z20.b }, p1/Z, [x25]\n"
- ".inst 0xa04086e5 // ldnt1b { z4.b-z7.b }, pn9.b/Z, [x23]\n"
- "ld1b { z11.b }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0xa04186f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- "ld1b { z2.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- "ld1b { z14.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386f1 // ldnt1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x9, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z5.b }, p1/Z, [x26]\n"
+ ".inst 0xa14086e0 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23]\n"
+ "ld1b { z31.b }, p1/Z, [x26, #1, MUL VL]\n"
+ ".inst 0xa14186f2 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1b { z1.b }, p1/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa14286f0 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1b { z6.b }, p1/Z, [x26, #3, MUL VL]\n"
+ "addvl x26, x26, #4\n"
+ ".inst 0xa14386e3 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa1a42680 // umopa za0.s, p1/M, p1/M, z20.b, z4.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1a52681 // umopa za1.s, p1/M, p1/M, z20.b, z5.b\n"
- ".inst 0xa1a62682 // umopa za2.s, p1/M, p1/M, z20.b, z6.b\n"
- ".inst 0xa1a72683 // umopa za3.s, p1/M, p1/M, z20.b, z7.b\n"
- "ld1b { z20.b }, p1/Z, [x25]\n"
- ".inst 0xa1b82560 // umopa za0.s, p1/M, p1/M, z11.b, z24.b\n"
- ".inst 0xa04086e5 // ldnt1b { z4.b-z7.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa1b92561 // umopa za1.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa1ba2562 // umopa za2.s, p1/M, p1/M, z11.b, z26.b\n"
- ".inst 0xa1bb2563 // umopa za3.s, p1/M, p1/M, z11.b, z27.b\n"
- "ld1b { z11.b }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0xa1bc2440 // umopa za0.s, p1/M, p1/M, z2.b, z28.b\n"
- ".inst 0xa04186f9 // ldnt1b { z24.b-z27.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa1bd2441 // umopa za1.s, p1/M, p1/M, z2.b, z29.b\n"
- ".inst 0xa1be2442 // umopa za2.s, p1/M, p1/M, z2.b, z30.b\n"
- ".inst 0xa1bf2443 // umopa za3.s, p1/M, p1/M, z2.b, z31.b\n"
- "ld1b { z2.b }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0xa04286fd // ldnt1b { z28.b-z31.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
- ".inst 0xa1b025c0 // umopa za0.s, p1/M, p1/M, z14.b, z16.b\n"
- ".inst 0xa1b125c1 // umopa za1.s, p1/M, p1/M, z14.b, z17.b\n"
- ".inst 0xa1b225c2 // umopa za2.s, p1/M, p1/M, z14.b, z18.b\n"
- ".inst 0xa1b325c3 // umopa za3.s, p1/M, p1/M, z14.b, z19.b\n"
- "ld1b { z14.b }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0xa04386f1 // ldnt1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ ".inst 0xa1a024a0 // umopa za0.s, p1/M, p1/M, z5.b, z0.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1a424a1 // umopa za1.s, p1/M, p1/M, z5.b, z4.b\n"
+ ".inst 0xa1a824a2 // umopa za2.s, p1/M, p1/M, z5.b, z8.b\n"
+ ".inst 0xa1ac24a3 // umopa za3.s, p1/M, p1/M, z5.b, z12.b\n"
+ "ld1b { z5.b }, p1/Z, [x26]\n"
+ ".inst 0xa1b227e0 // umopa za0.s, p1/M, p1/M, z31.b, z18.b\n"
+ ".inst 0xa14086e0 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa1b627e1 // umopa za1.s, p1/M, p1/M, z31.b, z22.b\n"
+ ".inst 0xa1ba27e2 // umopa za2.s, p1/M, p1/M, z31.b, z26.b\n"
+ ".inst 0xa1be27e3 // umopa za3.s, p1/M, p1/M, z31.b, z30.b\n"
+ "ld1b { z31.b }, p1/Z, [x26, #1, MUL VL]\n"
+ ".inst 0xa1b02420 // umopa za0.s, p1/M, p1/M, z1.b, z16.b\n"
+ ".inst 0xa14186f2 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1b42421 // umopa za1.s, p1/M, p1/M, z1.b, z20.b\n"
+ ".inst 0xa1b82422 // umopa za2.s, p1/M, p1/M, z1.b, z24.b\n"
+ ".inst 0xa1bc2423 // umopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+ "ld1b { z1.b }, p1/Z, [x26, #2, MUL VL]\n"
+ ".inst 0xa14286f0 // ld1b { z16.b, z20.b, z24.b, z28.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0xa1a324c0 // umopa za0.s, p1/M, p1/M, z6.b, z3.b\n"
+ ".inst 0xa1a724c1 // umopa za1.s, p1/M, p1/M, z6.b, z7.b\n"
+ ".inst 0xa1ab24c2 // umopa za2.s, p1/M, p1/M, z6.b, z11.b\n"
+ ".inst 0xa1af24c3 // umopa za3.s, p1/M, p1/M, z6.b, z15.b\n"
+ "ld1b { z6.b }, p1/Z, [x26, #3, MUL VL]\n"
+ "addvl x26, x26, #4\n"
+ ".inst 0xa14386e3 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
"addvl x23, x23, #16\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa1a42680 // umopa za0.s, p1/M, p1/M, z20.b, z4.b\n"
- ".inst 0xa1a52681 // umopa za1.s, p1/M, p1/M, z20.b, z5.b\n"
- ".inst 0xa1a62682 // umopa za2.s, p1/M, p1/M, z20.b, z6.b\n"
- ".inst 0xa1a72683 // umopa za3.s, p1/M, p1/M, z20.b, z7.b\n"
- ".inst 0xa1b82560 // umopa za0.s, p1/M, p1/M, z11.b, z24.b\n"
- ".inst 0xa1b92561 // umopa za1.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa1ba2562 // umopa za2.s, p1/M, p1/M, z11.b, z26.b\n"
- ".inst 0xa1bb2563 // umopa za3.s, p1/M, p1/M, z11.b, z27.b\n"
- ".inst 0xa1bc2440 // umopa za0.s, p1/M, p1/M, z2.b, z28.b\n"
- ".inst 0xa1bd2441 // umopa za1.s, p1/M, p1/M, z2.b, z29.b\n"
- ".inst 0xa1be2442 // umopa za2.s, p1/M, p1/M, z2.b, z30.b\n"
- ".inst 0xa1bf2443 // umopa za3.s, p1/M, p1/M, z2.b, z31.b\n"
- ".inst 0xa1b025c0 // umopa za0.s, p1/M, p1/M, z14.b, z16.b\n"
- ".inst 0xa1b125c1 // umopa za1.s, p1/M, p1/M, z14.b, z17.b\n"
- ".inst 0xa1b225c2 // umopa za2.s, p1/M, p1/M, z14.b, z18.b\n"
- ".inst 0xa1b325c3 // umopa za3.s, p1/M, p1/M, z14.b, z19.b\n"
+ ".inst 0xa1a024a0 // umopa za0.s, p1/M, p1/M, z5.b, z0.b\n"
+ ".inst 0xa1a424a1 // umopa za1.s, p1/M, p1/M, z5.b, z4.b\n"
+ ".inst 0xa1a824a2 // umopa za2.s, p1/M, p1/M, z5.b, z8.b\n"
+ ".inst 0xa1ac24a3 // umopa za3.s, p1/M, p1/M, z5.b, z12.b\n"
+ ".inst 0xa1b227e0 // umopa za0.s, p1/M, p1/M, z31.b, z18.b\n"
+ ".inst 0xa1b627e1 // umopa za1.s, p1/M, p1/M, z31.b, z22.b\n"
+ ".inst 0xa1ba27e2 // umopa za2.s, p1/M, p1/M, z31.b, z26.b\n"
+ ".inst 0xa1be27e3 // umopa za3.s, p1/M, p1/M, z31.b, z30.b\n"
+ ".inst 0xa1b02420 // umopa za0.s, p1/M, p1/M, z1.b, z16.b\n"
+ ".inst 0xa1b42421 // umopa za1.s, p1/M, p1/M, z1.b, z20.b\n"
+ ".inst 0xa1b82422 // umopa za2.s, p1/M, p1/M, z1.b, z24.b\n"
+ ".inst 0xa1bc2423 // umopa za3.s, p1/M, p1/M, z1.b, z28.b\n"
+ ".inst 0xa1a324c0 // umopa za0.s, p1/M, p1/M, z6.b, z3.b\n"
+ ".inst 0xa1a724c1 // umopa za1.s, p1/M, p1/M, z6.b, z7.b\n"
+ ".inst 0xa1ab24c2 // umopa za2.s, p1/M, p1/M, z6.b, z11.b\n"
+ ".inst 0xa1af24c3 // umopa za3.s, p1/M, p1/M, z6.b, z15.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- "ld1b { z16.b }, p1/Z, [x25]\n"
- "subs x21, x21, #0x1\n"
- "addvl x25, x25, #1\n"
+ "ld1b { z16.b }, p1/Z, [x26]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x26, x26, #1\n"
".inst 0xa04086e4 // ld1b { z4.b-z7.b }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #4\n"
".inst 0xa1a42600 // umopa za0.s, p1/M, p1/M, z16.b, z4.b\n"
@@ -222,182 +223,182 @@ void sme2_interleaved_nomerge_u8q_mopa_1VLx4VL(const uint8_t *const A, const uin
".inst 0xa1a72603 // umopa za3.s, p1/M, p1/M, z16.b, z7.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- "ld1w { z15.s }, p1/Z, [x25]\n"
- "addvl x25, x25, #1\n"
+ "ld1w { z15.s }, p1/Z, [x26]\n"
+ "addvl x26, x26, #1\n"
".inst 0xc09125e0 // addva za0.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e1 // addva za1.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e2 // addva za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e3 // addva za3.s, p1/M, p1/M, z15.s\n"
- "tbz x14, #1, 14f\n"
- "tbz x14, #0, 12f\n"
+ "tbz x15, #1, 14f\n"
+ "tbz x15, #0, 12f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5a0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x13]\n"
- ".inst 0xc0860418 // mova { z24.s-z27.s }, za0h.s[x12]\n"
- ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xa041c5a0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa040c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5d0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa042c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa060c5bc // st1w { z28.s-z31.s }, pn9.b, [x13]\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa061c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13, #0x4, MUL VL]\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5b4 // st1w { z20.s-z23.s }, pn9.b, [x13, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c578 // st1w { z24.s-z27.s }, pn9.b, [x11]\n"
+ ".inst 0xa063c5b8 // st1w { z24.s-z27.s }, pn9.b, [x13, #0xc, MUL VL]\n"
"addvl x13, x13, #16\n"
- ".inst 0xa061c564 // st1w { z4.s-z7.s }, pn9.b, [x11, #0x4, MUL VL]\n"
- ".inst 0xa062c574 // st1w { z20.s-z23.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c560 // st1w { z0.s-z3.s }, pn9.b, [x11, #0xc, MUL VL]\n"
- "addvl x11, x11, #16\n"
"blt 11b\n"
"b 21f\n"
"12:" // Store to partial result buffer: Store only
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc0860434 // mova { z20.s-z23.s }, za1h.s[x12]\n"
- ".inst 0xa060c564 // st1w { z4.s-z7.s }, pn9.b, [x11]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
- ".inst 0xa061c574 // st1w { z20.s-z23.s }, pn9.b, [x11, #0x4, MUL VL]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5a8 // st1w { z8.s-z11.s }, pn9.b, [x13]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5a4 // st1w { z4.s-z7.s }, pn9.b, [x13, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c56c // st1w { z12.s-z15.s }, pn9.b, [x11, #0x8, MUL VL]\n"
- ".inst 0xa063c568 // st1w { z8.s-z11.s }, pn9.b, [x11, #0xc, MUL VL]\n"
- "addvl x11, x11, #16\n"
+ ".inst 0xa062c5ac // st1w { z12.s-z15.s }, pn9.b, [x13, #0x8, MUL VL]\n"
+ ".inst 0xa063c5b0 // st1w { z16.s-z19.s }, pn9.b, [x13, #0xc, MUL VL]\n"
+ "addvl x13, x13, #16\n"
"blt 13b\n"
"b 21f\n"
"14:" // Store to output array
- "ldr x24, [%x[args], %[offsetof_C]]\n"
- "add x24, x24, x28\n" // C += n
- "sub x23, x10, x9\n"
+ "ldr x25, [%x[args], %[offsetof_C]]\n"
+ "sub x24, x11, x10\n"
"ld1rw { z4.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
- "madd x24, x9, x22, x24\n" // C += m * ldc
+ "ldr x23, [%x[args], %[offsetof_ldcb]]\n"
"ld1rw { z5.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ld1rw { z6.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "add x25, x25, x9\n" // C += n
"ld1rw { z7.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z12.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z13.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+ "madd x25, x10, x23, x25\n" // C += m * ldc
+ "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
"ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
"ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
- "tbz x14, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x28\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c284 // ld1w { z4.s-z7.s }, p8/Z, [x20]\n"
+ "tbz x15, #2, 15f\n"
+ "ldr w22, [%x[args], %[offsetof_n_0]]\n"
+ "ldr x21, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
"ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa040c28c // ld1w { z12.s-z15.s }, p8/Z, [x20]\n"
+ "add x22, x22, x9\n"
+ "add x21, x21, x22, LSL #2\n"
+ "add x20, x20, x22, LSL #2\n"
+ ".inst 0xa040c2a4 // ld1w { z4.s-z7.s }, p8/Z, [x21]\n"
+ ".inst 0xa040c280 // ld1w { z0.s-z3.s }, p8/Z, [x20]\n"
"15:" // Store to output array: Load per-channel parameters: End
"cntw x20\n"
- "whilelt p0.b, x28, x27\n"
- "cmp x23, x20\n"
- "csel x20, x23, x20, LT\n"
- "lsr x21, x20, #0x1\n"
+ "whilelt p0.b, x9, x28\n"
+ "cmp x24, x20\n"
"mov x12, #0x0\n"
+ "csel x20, x24, x20, LT\n"
+ "lsr x21, x20, #0x1\n"
"and x20, x20, #0x1\n"
"cbz x21, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc086001a // mova { z26.s-z27.s }, za0h.s[x12, 0:1]\n"
- ".inst 0xc086005c // mova { z28.s-z29.s }, za1h.s[x12, 0:1]\n"
- ".inst 0xc1a4a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z4.s\n"
- ".inst 0xc0860096 // mova { z22.s-z23.s }, za2h.s[x12, 0:1]\n"
- ".inst 0xc08600d0 // mova { z16.s-z17.s }, za3h.s[x12, 0:1]\n"
- ".inst 0xc1a5a41c // sqdmulh { z28.s-z29.s }, { z28.s-z29.s }, z5.s\n"
- ".inst 0xc1a6a416 // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z6.s\n"
+ ".inst 0xc0860010 // mova { z16.s-z17.s }, za0h.s[x12, 0:1]\n"
+ ".inst 0xc086005e // mova { z30.s-z31.s }, za1h.s[x12, 0:1]\n"
+ ".inst 0xc086009a // mova { z26.s-z27.s }, za2h.s[x12, 0:1]\n"
+ ".inst 0xc08600cc // mova { z12.s-z13.s }, za3h.s[x12, 0:1]\n"
+ ".inst 0xc1a4a410 // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z4.s\n"
+ ".inst 0xc1a5a41e // sqdmulh { z30.s-z31.s }, { z30.s-z31.s }, z5.s\n"
"add x12, x12, #0x2\n"
+ ".inst 0xc1a6a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z6.s\n"
"cmp x12, x21, LSL #1\n"
- ".inst 0xc1a7a410 // sqdmulh { z16.s-z17.s }, { z16.s-z17.s }, z7.s\n"
- ".inst 0xc1aca23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z12.s\n"
- ".inst 0xc1ada23c // srshl { z28.s-z29.s }, { z28.s-z29.s }, z13.s\n"
- ".inst 0xc1aea236 // srshl { z22.s-z23.s }, { z22.s-z23.s }, z14.s\n"
- ".inst 0xc1afa230 // srshl { z16.s-z17.s }, { z16.s-z17.s }, z15.s\n"
- ".inst 0xc1a0a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z0.s\n"
- ".inst 0xc1a0a31c // add { z28.s-z29.s }, { z28.s-z29.s }, z0.s\n"
- ".inst 0xc1a0a316 // add { z22.s-z23.s }, { z22.s-z23.s }, z0.s\n"
- ".inst 0xc1a0a310 // add { z16.s-z17.s }, { z16.s-z17.s }, z0.s\n"
- ".inst 0xc1b4c6ba // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
- ".inst 0xc1b4c6bc // sclamp { z28.s-z29.s }, z21.s, z20.s\n"
- "uzp1 z19.b, z26.b, z28.b\n"
- ".inst 0xc1b4c6b6 // sclamp { z22.s-z23.s }, z21.s, z20.s\n"
+ ".inst 0xc1a7a40c // sqdmulh { z12.s-z13.s }, { z12.s-z13.s }, z7.s\n"
+ ".inst 0xc1a0a230 // srshl { z16.s-z17.s }, { z16.s-z17.s }, z0.s\n"
+ ".inst 0xc1a1a23e // srshl { z30.s-z31.s }, { z30.s-z31.s }, z1.s\n"
+ ".inst 0xc1a2a23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z2.s\n"
+ ".inst 0xc1a3a22c // srshl { z12.s-z13.s }, { z12.s-z13.s }, z3.s\n"
+ ".inst 0xc1a8a310 // add { z16.s-z17.s }, { z16.s-z17.s }, z8.s\n"
+ ".inst 0xc1a8a31e // add { z30.s-z31.s }, { z30.s-z31.s }, z8.s\n"
+ ".inst 0xc1a8a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z8.s\n"
+ ".inst 0xc1a8a30c // add { z12.s-z13.s }, { z12.s-z13.s }, z8.s\n"
".inst 0xc1b4c6b0 // sclamp { z16.s-z17.s }, z21.s, z20.s\n"
- "uzp1 z16.b, z22.b, z16.b\n"
- "uzp1 z18.b, z27.b, z29.b\n"
- "uzp1 z17.b, z23.b, z17.b\n"
- "uzp1 z16.b, z19.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
- "uzp1 z16.b, z18.b, z17.b\n"
- "st1b { z16.b }, p0, [x24]\n"
- "add x24, x24, x22\n"
+ ".inst 0xc1b4c6be // sclamp { z30.s-z31.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4c6ba // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4c6ac // sclamp { z12.s-z13.s }, z21.s, z20.s\n"
+ "uzp1 z19.b, z16.b, z30.b\n"
+ "uzp1 z18.b, z17.b, z31.b\n"
+ "uzp1 z17.b, z26.b, z12.b\n"
+ "uzp1 z16.b, z27.b, z13.b\n"
+ "uzp1 z17.b, z19.b, z17.b\n"
+ "uzp1 z16.b, z18.b, z16.b\n"
+ "st1b { z17.b }, p0, [x25]\n"
+ "add x25, x25, x23\n"
+ "st1b { z16.b }, p0, [x25]\n"
+ "add x25, x25, x23\n"
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 18f\n"
".inst 0xc086000a // mova { z10.s-z11.s }, za0h.s[x12, 0:1]\n"
- ".inst 0xc0860058 // mova { z24.s-z25.s }, za1h.s[x12, 0:1]\n"
+ ".inst 0xc086005a // mova { z26.s-z27.s }, za1h.s[x12, 0:1]\n"
+ ".inst 0xc086008e // mova { z14.s-z15.s }, za2h.s[x12, 0:1]\n"
+ ".inst 0xc08600d6 // mova { z22.s-z23.s }, za3h.s[x12, 0:1]\n"
".inst 0xc1a4a40a // sqdmulh { z10.s-z11.s }, { z10.s-z11.s }, z4.s\n"
- ".inst 0xc086009a // mova { z26.s-z27.s }, za2h.s[x12, 0:1]\n"
- ".inst 0xc08600de // mova { z30.s-z31.s }, za3h.s[x12, 0:1]\n"
- ".inst 0xc1a5a418 // sqdmulh { z24.s-z25.s }, { z24.s-z25.s }, z5.s\n"
- ".inst 0xc1a6a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z6.s\n"
- ".inst 0xc1a7a41e // sqdmulh { z30.s-z31.s }, { z30.s-z31.s }, z7.s\n"
- ".inst 0xc1aca22a // srshl { z10.s-z11.s }, { z10.s-z11.s }, z12.s\n"
- ".inst 0xc1ada238 // srshl { z24.s-z25.s }, { z24.s-z25.s }, z13.s\n"
- ".inst 0xc1aea23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z14.s\n"
- ".inst 0xc1afa23e // srshl { z30.s-z31.s }, { z30.s-z31.s }, z15.s\n"
- ".inst 0xc1a0a30a // add { z10.s-z11.s }, { z10.s-z11.s }, z0.s\n"
- ".inst 0xc1a0a318 // add { z24.s-z25.s }, { z24.s-z25.s }, z0.s\n"
- ".inst 0xc1a0a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z0.s\n"
- ".inst 0xc1a0a31e // add { z30.s-z31.s }, { z30.s-z31.s }, z0.s\n"
+ ".inst 0xc1a5a41a // sqdmulh { z26.s-z27.s }, { z26.s-z27.s }, z5.s\n"
+ ".inst 0xc1a6a40e // sqdmulh { z14.s-z15.s }, { z14.s-z15.s }, z6.s\n"
+ ".inst 0xc1a7a416 // sqdmulh { z22.s-z23.s }, { z22.s-z23.s }, z7.s\n"
+ ".inst 0xc1a0a22a // srshl { z10.s-z11.s }, { z10.s-z11.s }, z0.s\n"
+ ".inst 0xc1a1a23a // srshl { z26.s-z27.s }, { z26.s-z27.s }, z1.s\n"
+ ".inst 0xc1a2a22e // srshl { z14.s-z15.s }, { z14.s-z15.s }, z2.s\n"
+ ".inst 0xc1a3a236 // srshl { z22.s-z23.s }, { z22.s-z23.s }, z3.s\n"
+ ".inst 0xc1a8a30a // add { z10.s-z11.s }, { z10.s-z11.s }, z8.s\n"
+ ".inst 0xc1a8a31a // add { z26.s-z27.s }, { z26.s-z27.s }, z8.s\n"
+ ".inst 0xc1a8a30e // add { z14.s-z15.s }, { z14.s-z15.s }, z8.s\n"
+ ".inst 0xc1a8a316 // add { z22.s-z23.s }, { z22.s-z23.s }, z8.s\n"
".inst 0xc1b4c6aa // sclamp { z10.s-z11.s }, z21.s, z20.s\n"
- ".inst 0xc1b4c6b8 // sclamp { z24.s-z25.s }, z21.s, z20.s\n"
- "uzp1 z17.b, z10.b, z24.b\n"
".inst 0xc1b4c6ba // sclamp { z26.s-z27.s }, z21.s, z20.s\n"
- ".inst 0xc1b4c6be // sclamp { z30.s-z31.s }, z21.s, z20.s\n"
- "uzp1 z16.b, z26.b, z30.b\n"
+ ".inst 0xc1b4c6ae // sclamp { z14.s-z15.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4c6b6 // sclamp { z22.s-z23.s }, z21.s, z20.s\n"
+ "uzp1 z17.b, z10.b, z26.b\n"
+ "uzp1 z16.b, z14.b, z22.b\n"
"uzp1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p0, [x24]\n"
+ "st1b { z16.b }, p0, [x25]\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"19:" // Store to output array: End
- "tbz x14, #0, 21f\n"
+ "tbz x15, #0, 21f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"20:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5bc // ld1w { z28.s-z31.s }, pn9.b/Z, [x13]\n"
- ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c5b0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x13, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5ac // ld1w { z12.s-z15.s }, pn9.b/Z, [x13, #0x8, MUL VL]\n"
- ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xa043c5a0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x13, #0xc, MUL VL]\n"
+ ".inst 0xa040c5d8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x14]\n"
+ ".inst 0xa041c5cc // ld1w { z12.s-z15.s }, pn9.b/Z, [x14, #0x4, MUL VL]\n"
+ ".inst 0xa042c5c4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa043c5c0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x14, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x14, x14, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x13, x13, #16\n"
"blt 20b\n"
"21:" // End block
- "incw x28, ALL, MUL #4\n"
- "cmp x28, x27\n"
+ "incw x9, ALL, MUL #4\n"
+ "cmp x9, x28\n"
"blt 3b\n"
- "incw x9\n"
- "cmp x9, x10\n"
- "mov x28, #0x0\n"
- "mov x26, x25\n"
+ "incw x10\n"
+ "mov x9, #0x0\n"
+ "cmp x10, x11\n"
+ "mov x27, x26\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
:
: [args] "r" (&args), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_Requantize32_c_offset] "I" (offsetof(Requantize32, c_offset)), [offsetof_Requantize32_maxval] "I" (offsetof(Requantize32, maxval)), [offsetof_Requantize32_minval] "I" (offsetof(Requantize32, minval)), [offsetof_Requantize32_per_channel_muls] "I" (offsetof(Requantize32, per_channel_muls)), [offsetof_Requantize32_per_channel_right_shifts] "I" (offsetof(Requantize32, per_channel_right_shifts)), [offsetof_Requantize32_per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [offsetof_Requantize32_per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb)), [offsetof_n_0] "I" (offsetof(KernelArgs, n_0)), [rq] "r" (&rq)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp
index f8c375f9f5..a2621e85f4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
class cls_sme2_interleaved_nomerge_u8q_mopa_2VLx2VL
{
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint8_t result_type;
typedef void (*kern_type)(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
@@ -60,7 +61,7 @@ public:
static constexpr bool supports_accumulate()
{
- return false;
+ return true;
}
static constexpr bool supports_bias()
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_u8q_mopa_2VLx2VL;
- StdTransformsSME<operand_type, result_type, 2, 2, 4, true> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 2, 2, 4, true> transforms = {};
cls_sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
index 9a59799529..51b93d3636 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_2VLx2VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
B(B), kstride_bytes(roundup(K, 4) * sizeof(uint8_t)),
C(C), ldcb(ldc * sizeof(uint8_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+ min(0), max(0),
bias(bias), n_0(n_0),
accumulator_buffer(accumulator_buffer),
@@ -74,13 +74,14 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
const long kstride_bytes;
uint8_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
- int32_t min = std::numeric_limits<uint8_t>::min();
- int32_t max = std::numeric_limits<uint8_t>::max();
+ const long M, N, K;
+ int32_t min;
+ int32_t max;
const int32_t *const bias;
const int n_0;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -99,17 +100,17 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
- ".inst 0xa041c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840501 // mova za1h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -124,108 +125,108 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- ".inst 0xa00a4299 // ldnt1w { z24.s-z25.s }, p8/Z, [x20, x10, LSL #2]\n"
- ".inst 0xc0902700 // addha za0.s, p1/M, p1/M, z24.s\n"
- ".inst 0xc0902721 // addha za1.s, p1/M, p1/M, z25.s\n"
- ".inst 0xc0902702 // addha za2.s, p1/M, p1/M, z24.s\n"
- ".inst 0xc0902723 // addha za3.s, p1/M, p1/M, z25.s\n"
+ ".inst 0xa10a4294 // ld1w { z20.s, z28.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc0902680 // addha za0.s, p1/M, p1/M, z20.s\n"
+ ".inst 0xc0902781 // addha za1.s, p1/M, p1/M, z28.s\n"
+ ".inst 0xc0902682 // addha za2.s, p1/M, p1/M, z20.s\n"
+ ".inst 0xc0902783 // addha za3.s, p1/M, p1/M, z28.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20, ALL, MUL #2\n"
"incw x21, ALL, MUL #2\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1400763 // ld1b { z3.b, z11.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa14006f9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa1410774 // ld1b { z20.b, z28.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa04106f7 // ldnt1b { z22.b-z23.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa1420775 // ld1b { z21.b, z29.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206f8 // ldnt1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa1430765 // ld1b { z5.b, z13.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0400778 // ld1b { z24.b-z25.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa14006f7 // ld1b { z23.b, z31.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa1410776 // ld1b { z22.b, z30.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa04106fa // ld1b { z26.b-z27.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa0420766 // ld1b { z6.b-z7.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206e0 // ld1b { z0.b, z8.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa043077c // ld1b { z28.b-z29.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa14306ef // ldnt1b { z7.b, z15.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa04306ec // ld1b { z12.b-z13.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa1b12460 // umopa za0.s, p1/M, p1/M, z3.b, z17.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1b92461 // umopa za1.s, p1/M, p1/M, z3.b, z25.b\n"
- ".inst 0xa1b12562 // umopa za2.s, p1/M, p1/M, z11.b, z17.b\n"
- ".inst 0xa1b92563 // umopa za3.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa1400763 // ld1b { z3.b, z11.b }, pn9.b/Z, [x27]\n"
- ".inst 0xa1b62680 // umopa za0.s, p1/M, p1/M, z20.b, z22.b\n"
- ".inst 0xa14006f9 // ldnt1b { z17.b, z25.b }, pn9.b/Z, [x23]\n"
- ".inst 0xa1b72681 // umopa za1.s, p1/M, p1/M, z20.b, z23.b\n"
- ".inst 0xa1b62782 // umopa za2.s, p1/M, p1/M, z28.b, z22.b\n"
- ".inst 0xa1b72783 // umopa za3.s, p1/M, p1/M, z28.b, z23.b\n"
- ".inst 0xa1410774 // ld1b { z20.b, z28.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
- ".inst 0xa1b026a0 // umopa za0.s, p1/M, p1/M, z21.b, z16.b\n"
- ".inst 0xa04106f7 // ldnt1b { z22.b-z23.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
- ".inst 0xa1b826a1 // umopa za1.s, p1/M, p1/M, z21.b, z24.b\n"
- ".inst 0xa1b027a2 // umopa za2.s, p1/M, p1/M, z29.b, z16.b\n"
- ".inst 0xa1b827a3 // umopa za3.s, p1/M, p1/M, z29.b, z24.b\n"
- ".inst 0xa1420775 // ld1b { z21.b, z29.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa14206f8 // ldnt1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
- ".inst 0xa1a724a0 // umopa za0.s, p1/M, p1/M, z5.b, z7.b\n"
- ".inst 0xa1af24a1 // umopa za1.s, p1/M, p1/M, z5.b, z15.b\n"
- ".inst 0xa1a725a2 // umopa za2.s, p1/M, p1/M, z13.b, z7.b\n"
- ".inst 0xa1af25a3 // umopa za3.s, p1/M, p1/M, z13.b, z15.b\n"
- ".inst 0xa1430765 // ld1b { z5.b, z13.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ ".inst 0xa1b72700 // umopa za0.s, p1/M, p1/M, z24.b, z23.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1bf2701 // umopa za1.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa1b72722 // umopa za2.s, p1/M, p1/M, z25.b, z23.b\n"
+ ".inst 0xa1bf2723 // umopa za3.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa0400778 // ld1b { z24.b-z25.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa1ba26c0 // umopa za0.s, p1/M, p1/M, z22.b, z26.b\n"
+ ".inst 0xa14006f7 // ld1b { z23.b, z31.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa1bb26c1 // umopa za1.s, p1/M, p1/M, z22.b, z27.b\n"
+ ".inst 0xa1ba27c2 // umopa za2.s, p1/M, p1/M, z30.b, z26.b\n"
+ ".inst 0xa1bb27c3 // umopa za3.s, p1/M, p1/M, z30.b, z27.b\n"
+ ".inst 0xa1410776 // ld1b { z22.b, z30.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa1a024c0 // umopa za0.s, p1/M, p1/M, z6.b, z0.b\n"
+ ".inst 0xa04106fa // ld1b { z26.b-z27.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa1a824c1 // umopa za1.s, p1/M, p1/M, z6.b, z8.b\n"
+ ".inst 0xa1a024e2 // umopa za2.s, p1/M, p1/M, z7.b, z0.b\n"
+ ".inst 0xa1a824e3 // umopa za3.s, p1/M, p1/M, z7.b, z8.b\n"
+ ".inst 0xa0420766 // ld1b { z6.b-z7.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206e0 // ld1b { z0.b, z8.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1ac2780 // umopa za0.s, p1/M, p1/M, z28.b, z12.b\n"
+ ".inst 0xa1ad2781 // umopa za1.s, p1/M, p1/M, z28.b, z13.b\n"
+ ".inst 0xa1ac27a2 // umopa za2.s, p1/M, p1/M, z29.b, z12.b\n"
+ ".inst 0xa1ad27a3 // umopa za3.s, p1/M, p1/M, z29.b, z13.b\n"
+ ".inst 0xa043077c // ld1b { z28.b-z29.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
"addvl x27, x27, #8\n"
- ".inst 0xa14306ef // ldnt1b { z7.b, z15.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ ".inst 0xa04306ec // ld1b { z12.b-z13.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
"addvl x23, x23, #8\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa1b12460 // umopa za0.s, p1/M, p1/M, z3.b, z17.b\n"
- ".inst 0xa1b92461 // umopa za1.s, p1/M, p1/M, z3.b, z25.b\n"
- ".inst 0xa1b12562 // umopa za2.s, p1/M, p1/M, z11.b, z17.b\n"
- ".inst 0xa1b92563 // umopa za3.s, p1/M, p1/M, z11.b, z25.b\n"
- ".inst 0xa1b62680 // umopa za0.s, p1/M, p1/M, z20.b, z22.b\n"
- ".inst 0xa1b72681 // umopa za1.s, p1/M, p1/M, z20.b, z23.b\n"
- ".inst 0xa1b62782 // umopa za2.s, p1/M, p1/M, z28.b, z22.b\n"
- ".inst 0xa1b72783 // umopa za3.s, p1/M, p1/M, z28.b, z23.b\n"
- ".inst 0xa1b026a0 // umopa za0.s, p1/M, p1/M, z21.b, z16.b\n"
- ".inst 0xa1b826a1 // umopa za1.s, p1/M, p1/M, z21.b, z24.b\n"
- ".inst 0xa1b027a2 // umopa za2.s, p1/M, p1/M, z29.b, z16.b\n"
- ".inst 0xa1b827a3 // umopa za3.s, p1/M, p1/M, z29.b, z24.b\n"
- ".inst 0xa1a724a0 // umopa za0.s, p1/M, p1/M, z5.b, z7.b\n"
- ".inst 0xa1af24a1 // umopa za1.s, p1/M, p1/M, z5.b, z15.b\n"
- ".inst 0xa1a725a2 // umopa za2.s, p1/M, p1/M, z13.b, z7.b\n"
- ".inst 0xa1af25a3 // umopa za3.s, p1/M, p1/M, z13.b, z15.b\n"
+ ".inst 0xa1b72700 // umopa za0.s, p1/M, p1/M, z24.b, z23.b\n"
+ ".inst 0xa1bf2701 // umopa za1.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa1b72722 // umopa za2.s, p1/M, p1/M, z25.b, z23.b\n"
+ ".inst 0xa1bf2723 // umopa za3.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa1ba26c0 // umopa za0.s, p1/M, p1/M, z22.b, z26.b\n"
+ ".inst 0xa1bb26c1 // umopa za1.s, p1/M, p1/M, z22.b, z27.b\n"
+ ".inst 0xa1ba27c2 // umopa za2.s, p1/M, p1/M, z30.b, z26.b\n"
+ ".inst 0xa1bb27c3 // umopa za3.s, p1/M, p1/M, z30.b, z27.b\n"
+ ".inst 0xa1a024c0 // umopa za0.s, p1/M, p1/M, z6.b, z0.b\n"
+ ".inst 0xa1a824c1 // umopa za1.s, p1/M, p1/M, z6.b, z8.b\n"
+ ".inst 0xa1a024e2 // umopa za2.s, p1/M, p1/M, z7.b, z0.b\n"
+ ".inst 0xa1a824e3 // umopa za3.s, p1/M, p1/M, z7.b, z8.b\n"
+ ".inst 0xa1ac2780 // umopa za0.s, p1/M, p1/M, z28.b, z12.b\n"
+ ".inst 0xa1ad2781 // umopa za1.s, p1/M, p1/M, z28.b, z13.b\n"
+ ".inst 0xa1ac27a2 // umopa za2.s, p1/M, p1/M, z29.b, z12.b\n"
+ ".inst 0xa1ad27a3 // umopa za3.s, p1/M, p1/M, z29.b, z13.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
- ".inst 0xa1400773 // ld1b { z19.b, z27.b }, pn9.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ ".inst 0xa0400762 // ld1b { z2.b-z3.b }, pn9.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #2\n"
".inst 0xa04006f0 // ld1b { z16.b-z17.b }, pn9.b/Z, [x23]\n"
"addvl x23, x23, #2\n"
- ".inst 0xa1b02660 // umopa za0.s, p1/M, p1/M, z19.b, z16.b\n"
- ".inst 0xa1b12661 // umopa za1.s, p1/M, p1/M, z19.b, z17.b\n"
- ".inst 0xa1b02762 // umopa za2.s, p1/M, p1/M, z27.b, z16.b\n"
- ".inst 0xa1b12763 // umopa za3.s, p1/M, p1/M, z27.b, z17.b\n"
+ ".inst 0xa1b02440 // umopa za0.s, p1/M, p1/M, z2.b, z16.b\n"
+ ".inst 0xa1b12441 // umopa za1.s, p1/M, p1/M, z2.b, z17.b\n"
+ ".inst 0xa1b02462 // umopa za2.s, p1/M, p1/M, z3.b, z16.b\n"
+ ".inst 0xa1b12463 // umopa za3.s, p1/M, p1/M, z3.b, z17.b\n"
"bgt 9b\n"
"10:" // K oddments: End
- ".inst 0xa040476e // ld1w { z14.s-z15.s }, pn9.b/Z, [x27]\n"
+ ".inst 0xa1404767 // ld1w { z7.s, z15.s }, pn9.b/Z, [x27]\n"
"addvl x27, x27, #2\n"
- ".inst 0xc09125c0 // addva za0.s, p1/M, p1/M, z14.s\n"
- ".inst 0xc09125c1 // addva za1.s, p1/M, p1/M, z14.s\n"
+ ".inst 0xc09124e0 // addva za0.s, p1/M, p1/M, z7.s\n"
+ ".inst 0xc09124e1 // addva za1.s, p1/M, p1/M, z7.s\n"
".inst 0xc09125e2 // addva za2.s, p1/M, p1/M, z15.s\n"
".inst 0xc09125e3 // addva za3.s, p1/M, p1/M, z15.s\n"
"tbz x16, #1, 14f\n"
@@ -233,25 +234,25 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa041c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa043c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840783 // mova za3h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa060c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14]\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xa061c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c5dc // st1w { z28.s-z31.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 24f\n"
@@ -259,71 +260,71 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc086040c // mova { z12.s-z15.s }, za0h.s[x12]\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xa060c5cc // st1w { z12.s-z15.s }, pn9.b, [x14]\n"
- ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
- ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
- ".inst 0xa061c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5dc // st1w { z28.s-z31.s }, pn9.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c5d4 // st1w { z20.s-z23.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 24f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
"sub x25, x13, x11\n"
- "ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "ld1rw { z8.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "ld1rw { z9.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
+ "ld1rw { z10.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
+ "add x26, x26, x10\n" // C += n
+ "ld1rw { z11.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"madd x26, x11, x24, x26\n" // C += m * ldc
- "ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
- "ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z3.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
- "ld1rw { z14.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+ "ld1rw { z15.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
+ "ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+ "ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
"tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404280 // ld1w { z0.s-z1.s }, p8/Z, [x20]\n"
+ "ldr w22, [%x[args], %[offsetof_n_0]]\n"
+ "ldr x21, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
"ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
- ".inst 0xa0404282 // ld1w { z2.s-z3.s }, p8/Z, [x20]\n"
+ "add x22, x22, x10\n"
+ "add x21, x21, x22, LSL #2\n"
+ "add x20, x20, x22, LSL #2\n"
+ ".inst 0xa04042a8 // ld1w { z8.s-z9.s }, p8/Z, [x21]\n"
+ ".inst 0xa040428a // ld1w { z10.s-z11.s }, p8/Z, [x20]\n"
"15:" // Store to output array: Load per-channel parameters: End
"cntw x23\n"
"whilelt p0.h, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
- ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
- ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
- ".inst 0xc1a0ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1a1ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z1.s\n"
+ ".inst 0xc086041c // mova { z28.s-z31.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc1a8ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z8.s\n"
"add x12, x12, #0x4\n"
+ ".inst 0xc1a9ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z9.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a2aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc1a3aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z3.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1aeab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z14.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf28 // sclamp { z8.s-z11.s }, z25.s, z24.s\n"
- "uzp1 z16.h, z4.h, z8.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ ".inst 0xc1aaaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z10.s\n"
+ ".inst 0xc1abaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z11.s\n"
+ ".inst 0xc1afab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z15.s\n"
+ ".inst 0xc1afab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z15.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cea0 // sclamp { z0.s-z3.s }, z21.s, z20.s\n"
+ "uzp1 z19.h, z28.h, z0.h\n"
+ "uzp1 z18.h, z29.h, z1.h\n"
+ "uzp1 z17.h, z30.h, z2.h\n"
+ "uzp1 z16.h, z31.h, z3.h\n"
+ "st1b { z19.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z5.h, z9.h\n"
- "uzp1 z17.h, z6.h, z10.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ "st1b { z18.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z7.h, z11.h\n"
"st1b { z17.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1b { z16.h }, p0, [x26]\n"
@@ -331,60 +332,59 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 18f\n"
- ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1a0ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z0.s\n"
- ".inst 0xc1a1ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
+ ".inst 0xc1a8ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a2aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z2.s\n"
- ".inst 0xc1a3aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z3.s\n"
- ".inst 0xc1aeab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z14.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1b8cf28 // sclamp { z8.s-z11.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- "uzp1 z16.h, z8.h, z4.h\n"
+ ".inst 0xc1a9ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z9.s\n"
+ ".inst 0xc1aaaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+ ".inst 0xc1abaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+ ".inst 0xc1afab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z15.s\n"
+ ".inst 0xc1afab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z15.s\n"
+ ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ "uzp1 z16.h, z4.h, z28.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
"subs x20, x20, #0x1\n"
- "uzp1 z16.h, z9.h, z5.h\n"
+ "uzp1 z16.h, z5.h, z29.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
- "uzp1 z16.h, z10.h, z6.h\n"
+ "uzp1 z16.h, z6.h, z30.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 22f\n"
- "whilelt p0.h, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
- ".inst 0xc1a0ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1a1ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z1.s\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xc1a8ac00 // sqdmulh { z0.s-z3.s }, { z0.s-z3.s }, z8.s\n"
"add x12, x12, #0x4\n"
+ ".inst 0xc1a9ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z9.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a2aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc1a3aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z3.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1aeab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z14.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf34 // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
- "uzp1 z16.h, z4.h, z20.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ ".inst 0xc1aaaa20 // srshl { z0.s-z3.s }, { z0.s-z3.s }, z10.s\n"
+ ".inst 0xc1abaa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z11.s\n"
+ ".inst 0xc1afab00 // add { z0.s-z3.s }, { z0.s-z3.s }, z15.s\n"
+ ".inst 0xc1afab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z15.s\n"
+ ".inst 0xc1b4cea0 // sclamp { z0.s-z3.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ "uzp1 z19.h, z0.h, z28.h\n"
+ "uzp1 z18.h, z1.h, z29.h\n"
+ "uzp1 z17.h, z2.h, z30.h\n"
+ "uzp1 z16.h, z3.h, z31.h\n"
+ "st1b { z19.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z5.h, z21.h\n"
- "uzp1 z17.h, z6.h, z22.h\n"
- "st1b { z16.h }, p0, [x26]\n"
+ "st1b { z18.h }, p0, [x26]\n"
"add x26, x26, x24\n"
- "uzp1 z16.h, z7.h, z23.h\n"
"st1b { z17.h }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1b { z16.h }, p0, [x26]\n"
@@ -394,15 +394,15 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
"cbz x20, 21f\n"
".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
- ".inst 0xc1a0ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1a1ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a8ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z8.s\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a2aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
- ".inst 0xc1a3aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z3.s\n"
- ".inst 0xc1aeab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z14.s\n"
- ".inst 0xc1aeab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z14.s\n"
- ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
- ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ ".inst 0xc1a9ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z9.s\n"
+ ".inst 0xc1aaaa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z10.s\n"
+ ".inst 0xc1abaa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z11.s\n"
+ ".inst 0xc1afab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z15.s\n"
+ ".inst 0xc1afab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z15.s\n"
+ ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
+ ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
"uzp1 z16.h, z4.h, z16.h\n"
"st1b { z16.h }, p0, [x26]\n"
"add x26, x26, x24\n"
@@ -420,25 +420,25 @@ void sme2_interleaved_nomerge_u8q_mopa_2VLx2VL(const uint8_t *const A, const uin
"mov x12, #0x0\n"
"cntw x20\n"
"23:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 23b\n"
"24:" // End block
"incw x10, ALL, MUL #2\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #2\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp
index 04d19324c5..dbf62cbb8a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -37,7 +37,8 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
class cls_sme2_interleaved_nomerge_u8q_mopa_4VLx1VL
{
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint8_t result_type;
typedef void (*kern_type)(const uint8_t *const A, const uint8_t *const B, uint8_t *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const Requantize32 &rq, const int n_0, bool accumulate, int32_t *const accumulator_buffer);
@@ -60,7 +61,7 @@ public:
static constexpr bool supports_accumulate()
{
- return false;
+ return true;
}
static constexpr bool supports_bias()
@@ -81,7 +82,7 @@ public:
// Default to the generic kernel
kern_type kernel = sme2_interleaved_nomerge_u8q_mopa_4VLx1VL;
- StdTransformsSME<operand_type, result_type, 4, 1, 4, true> transforms = {};
+ StdTransformsSME<lhs_operand_type, result_type, 4, 1, 4, true> transforms = {};
cls_sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const CPUInfo *)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
index 0f3346e65e..775a3bf3d2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_u8q_mopa_4VLx1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,7 +49,7 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
B(B), kstride_bytes(roundup(K, 4) * sizeof(uint8_t)),
C(C), ldcb(ldc * sizeof(uint8_t)),
M(M), N(N), K(K),
- n_loops(((K / 4) - 1) / 2), n_tail_iters(((K / 4) - 1) % 2),
+ min(0), max(0),
bias(bias), n_0(n_0),
accumulator_buffer(accumulator_buffer),
@@ -74,13 +74,14 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
const long kstride_bytes;
uint8_t *const C;
const long ldcb;
- const long M, N, K, n_loops, n_tail_iters;
- int32_t min = std::numeric_limits<uint8_t>::min();
- int32_t max = std::numeric_limits<uint8_t>::max();
+ const long M, N, K;
+ int32_t min;
+ int32_t max;
const int32_t *const bias;
const int n_0;
+
int32_t *const accumulator_buffer;
uint64_t flags;
};
@@ -99,17 +100,17 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
"mov x12, #0x0\n"
"cntw x20\n"
"1:" // Initial accumulator load from buffer: Loop
- ".inst 0xa040c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840600 // mova za0h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa041c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840701 // mova za1h.s[x12], { z24.s-z27.s }\n"
- ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xa040c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 1b\n"
"2:" // Initial accumulator load from buffer: End
"ldr w13, [%x[args], %[offsetof_M]]\n"
@@ -124,95 +125,95 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
"ldr x20, [%x[args], %[offsetof_bias]]\n"
".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
"cbz x20, 5f\n"
- "ldnt1w { z8.s }, p0/Z, [x20, x10, LSL #2]\n"
- ".inst 0xc0902500 // addha za0.s, p1/M, p1/M, z8.s\n"
- ".inst 0xc0902501 // addha za1.s, p1/M, p1/M, z8.s\n"
- ".inst 0xc0902502 // addha za2.s, p1/M, p1/M, z8.s\n"
- ".inst 0xc0902503 // addha za3.s, p1/M, p1/M, z8.s\n"
+ "ld1w { z6.s }, p0/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc09024c0 // addha za0.s, p1/M, p1/M, z6.s\n"
+ ".inst 0xc09024c1 // addha za1.s, p1/M, p1/M, z6.s\n"
+ ".inst 0xc09024c2 // addha za2.s, p1/M, p1/M, z6.s\n"
+ ".inst 0xc09024c3 // addha za3.s, p1/M, p1/M, z6.s\n"
"4:" // Prepare accumulators: Test for last block
"mov x20, x10\n"
"mov x21, x11\n"
"incw x20\n"
"incw x21, ALL, MUL #4\n"
"cmp x20, x9\n"
- "csel x21, x11, x21, LT\n"
"mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
"bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
"cmp x21, x13\n"
"csel x16, x20, x16, LT\n"
"5:" // Prepare accumulators: End
"ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
"add x20, x20, #0x3\n"
"lsr x20, x20, #0x2\n"
- "ldr x23, [%x[args], %[offsetof_B]]\n"
- "lsr x22, x20, #0x2\n"
- "and x21, x20, #0x3\n"
- "ldr x20, [%x[args], %[offsetof_kstride_bytes]]\n"
- "madd x23, x10, x20, x23\n" // bptr = B + n * kstride_bytes
- "cbz x22, 8f\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa0408364 // ld1b { z4.b-z7.b }, pn8.b/Z, [x27]\n"
- "ldnt1b { z14.b }, p1/Z, [x23]\n"
- ".inst 0xa0418374 // ld1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- "ldnt1b { z31.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa0428378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z13.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa0438368 // ld1b { z8.b-z11.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1408360 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn8.b/Z, [x27]\n"
+ "ld1b { z29.b }, p1/Z, [x23]\n"
+ ".inst 0xa1418361 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "ld1b { z19.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa1428363 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa0438378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1b { z29.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z31.b }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"ble 7f\n"
"6:" // K loop
- ".inst 0xa1ae2480 // umopa za0.s, p1/M, p1/M, z4.b, z14.b\n"
- "subs x22, x22, #0x1\n"
- ".inst 0xa1ae24a1 // umopa za1.s, p1/M, p1/M, z5.b, z14.b\n"
- ".inst 0xa1ae24c2 // umopa za2.s, p1/M, p1/M, z6.b, z14.b\n"
- ".inst 0xa1ae24e3 // umopa za3.s, p1/M, p1/M, z7.b, z14.b\n"
- ".inst 0xa0408364 // ld1b { z4.b-z7.b }, pn8.b/Z, [x27]\n"
- ".inst 0xa1bf2680 // umopa za0.s, p1/M, p1/M, z20.b, z31.b\n"
- "ldnt1b { z14.b }, p1/Z, [x23]\n"
- ".inst 0xa1bf26a1 // umopa za1.s, p1/M, p1/M, z21.b, z31.b\n"
- ".inst 0xa1bf26c2 // umopa za2.s, p1/M, p1/M, z22.b, z31.b\n"
- ".inst 0xa1bf26e3 // umopa za3.s, p1/M, p1/M, z23.b, z31.b\n"
- ".inst 0xa0418374 // ld1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
- ".inst 0xa1ad2700 // umopa za0.s, p1/M, p1/M, z24.b, z13.b\n"
- "ldnt1b { z31.b }, p1/Z, [x23, #1, MUL VL]\n"
- ".inst 0xa1ad2721 // umopa za1.s, p1/M, p1/M, z25.b, z13.b\n"
- ".inst 0xa1ad2742 // umopa za2.s, p1/M, p1/M, z26.b, z13.b\n"
- ".inst 0xa1ad2763 // umopa za3.s, p1/M, p1/M, z27.b, z13.b\n"
- ".inst 0xa0428378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
- "ldnt1b { z13.b }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0xa1bd2500 // umopa za0.s, p1/M, p1/M, z8.b, z29.b\n"
- ".inst 0xa1bd2521 // umopa za1.s, p1/M, p1/M, z9.b, z29.b\n"
- ".inst 0xa1bd2542 // umopa za2.s, p1/M, p1/M, z10.b, z29.b\n"
- ".inst 0xa1bd2563 // umopa za3.s, p1/M, p1/M, z11.b, z29.b\n"
- ".inst 0xa0438368 // ld1b { z8.b-z11.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ ".inst 0xa1bd2400 // umopa za0.s, p1/M, p1/M, z0.b, z29.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1bd2481 // umopa za1.s, p1/M, p1/M, z4.b, z29.b\n"
+ ".inst 0xa1bd2502 // umopa za2.s, p1/M, p1/M, z8.b, z29.b\n"
+ ".inst 0xa1bd2583 // umopa za3.s, p1/M, p1/M, z12.b, z29.b\n"
+ ".inst 0xa1408360 // ld1b { z0.b, z4.b, z8.b, z12.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa1b32420 // umopa za0.s, p1/M, p1/M, z1.b, z19.b\n"
+ "ld1b { z29.b }, p1/Z, [x23]\n"
+ ".inst 0xa1b324a1 // umopa za1.s, p1/M, p1/M, z5.b, z19.b\n"
+ ".inst 0xa1b32522 // umopa za2.s, p1/M, p1/M, z9.b, z19.b\n"
+ ".inst 0xa1b325a3 // umopa za3.s, p1/M, p1/M, z13.b, z19.b\n"
+ ".inst 0xa1418361 // ld1b { z1.b, z5.b, z9.b, z13.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa1b42460 // umopa za0.s, p1/M, p1/M, z3.b, z20.b\n"
+ "ld1b { z19.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa1b424e1 // umopa za1.s, p1/M, p1/M, z7.b, z20.b\n"
+ ".inst 0xa1b42562 // umopa za2.s, p1/M, p1/M, z11.b, z20.b\n"
+ ".inst 0xa1b425e3 // umopa za3.s, p1/M, p1/M, z15.b, z20.b\n"
+ ".inst 0xa1428363 // ld1b { z3.b, z7.b, z11.b, z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa1bf2700 // umopa za0.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa1bf2721 // umopa za1.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa1bf2742 // umopa za2.s, p1/M, p1/M, z26.b, z31.b\n"
+ ".inst 0xa1bf2763 // umopa za3.s, p1/M, p1/M, z27.b, z31.b\n"
+ ".inst 0xa0438378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
"addvl x27, x27, #16\n"
- "ldnt1b { z29.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1b { z31.b }, p1/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
"bgt 6b\n"
"7:" // K loop tail
- ".inst 0xa1ae2480 // umopa za0.s, p1/M, p1/M, z4.b, z14.b\n"
- ".inst 0xa1ae24a1 // umopa za1.s, p1/M, p1/M, z5.b, z14.b\n"
- ".inst 0xa1ae24c2 // umopa za2.s, p1/M, p1/M, z6.b, z14.b\n"
- ".inst 0xa1ae24e3 // umopa za3.s, p1/M, p1/M, z7.b, z14.b\n"
- ".inst 0xa1bf2680 // umopa za0.s, p1/M, p1/M, z20.b, z31.b\n"
- ".inst 0xa1bf26a1 // umopa za1.s, p1/M, p1/M, z21.b, z31.b\n"
- ".inst 0xa1bf26c2 // umopa za2.s, p1/M, p1/M, z22.b, z31.b\n"
- ".inst 0xa1bf26e3 // umopa za3.s, p1/M, p1/M, z23.b, z31.b\n"
- ".inst 0xa1ad2700 // umopa za0.s, p1/M, p1/M, z24.b, z13.b\n"
- ".inst 0xa1ad2721 // umopa za1.s, p1/M, p1/M, z25.b, z13.b\n"
- ".inst 0xa1ad2742 // umopa za2.s, p1/M, p1/M, z26.b, z13.b\n"
- ".inst 0xa1ad2763 // umopa za3.s, p1/M, p1/M, z27.b, z13.b\n"
- ".inst 0xa1bd2500 // umopa za0.s, p1/M, p1/M, z8.b, z29.b\n"
- ".inst 0xa1bd2521 // umopa za1.s, p1/M, p1/M, z9.b, z29.b\n"
- ".inst 0xa1bd2542 // umopa za2.s, p1/M, p1/M, z10.b, z29.b\n"
- ".inst 0xa1bd2563 // umopa za3.s, p1/M, p1/M, z11.b, z29.b\n"
+ ".inst 0xa1bd2400 // umopa za0.s, p1/M, p1/M, z0.b, z29.b\n"
+ ".inst 0xa1bd2481 // umopa za1.s, p1/M, p1/M, z4.b, z29.b\n"
+ ".inst 0xa1bd2502 // umopa za2.s, p1/M, p1/M, z8.b, z29.b\n"
+ ".inst 0xa1bd2583 // umopa za3.s, p1/M, p1/M, z12.b, z29.b\n"
+ ".inst 0xa1b32420 // umopa za0.s, p1/M, p1/M, z1.b, z19.b\n"
+ ".inst 0xa1b324a1 // umopa za1.s, p1/M, p1/M, z5.b, z19.b\n"
+ ".inst 0xa1b32522 // umopa za2.s, p1/M, p1/M, z9.b, z19.b\n"
+ ".inst 0xa1b325a3 // umopa za3.s, p1/M, p1/M, z13.b, z19.b\n"
+ ".inst 0xa1b42460 // umopa za0.s, p1/M, p1/M, z3.b, z20.b\n"
+ ".inst 0xa1b424e1 // umopa za1.s, p1/M, p1/M, z7.b, z20.b\n"
+ ".inst 0xa1b42562 // umopa za2.s, p1/M, p1/M, z11.b, z20.b\n"
+ ".inst 0xa1b425e3 // umopa za3.s, p1/M, p1/M, z15.b, z20.b\n"
+ ".inst 0xa1bf2700 // umopa za0.s, p1/M, p1/M, z24.b, z31.b\n"
+ ".inst 0xa1bf2721 // umopa za1.s, p1/M, p1/M, z25.b, z31.b\n"
+ ".inst 0xa1bf2742 // umopa za2.s, p1/M, p1/M, z26.b, z31.b\n"
+ ".inst 0xa1bf2763 // umopa za3.s, p1/M, p1/M, z27.b, z31.b\n"
"8:" // K oddments
- "cbz x21, 10f\n"
+ "cbz x20, 10f\n"
"9:" // K oddments: Loop
".inst 0xa1408372 // ld1b { z18.b, z22.b, z26.b, z30.b }, pn8.b/Z, [x27]\n"
- "subs x21, x21, #0x1\n"
+ "subs x20, x20, #0x1\n"
"addvl x27, x27, #4\n"
"ld1b { z15.b }, p1/Z, [x23]\n"
"addvl x23, x23, #1\n"
@@ -233,25 +234,25 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
"mov x12, #0x0\n"
"cntw x20\n"
"11:" // Store to partial result buffer: Store and refill: Loop
- ".inst 0xa040c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
- ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
- ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
- ".inst 0xa042c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
- ".inst 0xa043c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
- ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xa040c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xc0860414 // mova { z20.s-z23.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840480 // mova za0h.s[x12], { z4.s-z7.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa060c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14]\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa061c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa062c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14, #0x8, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
- "addvl x15, x15, #16\n"
- ".inst 0xa061c1dc // st1w { z28.s-z31.s }, pn8.b, [x14, #0x4, MUL VL]\n"
- ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa063c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 11b\n"
"b 30f\n"
@@ -259,56 +260,56 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
"mov x12, #0x0\n"
"cntw x20\n"
"13:" // Store to partial result buffer: Store only: Loop
- ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
- ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
- ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
- ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
- ".inst 0xc0860460 // mova { z0.s-z3.s }, za3h.s[x12]\n"
- ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc0860420 // mova { z0.s-z3.s }, za1h.s[x12]\n"
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14]\n"
"add x12, x12, #0x4\n"
+ ".inst 0xa061c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0x4, MUL VL]\n"
"cmp x12, x20\n"
- ".inst 0xa062c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x8, MUL VL]\n"
- ".inst 0xa063c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14, #0xc, MUL VL]\n"
"addvl x14, x14, #16\n"
"blt 13b\n"
"b 30f\n"
"14:" // Store to output array
"ldr x26, [%x[args], %[offsetof_C]]\n"
- "add x26, x26, x10\n" // C += n
"sub x25, x13, x11\n"
"ld1rw { z2.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_mul]]\n"
"ldr x24, [%x[args], %[offsetof_ldcb]]\n"
- "madd x26, x11, x24, x26\n" // C += m * ldc
"ld1rw { z1.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_per_layer_right_shift]]\n"
"ld1rw { z0.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_c_offset]]\n"
- "ld1rw { z21.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
- "ld1rw { z20.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
+ "add x26, x26, x10\n" // C += n
+ "ld1rw { z25.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_minval]]\n"
+ "madd x26, x11, x24, x26\n" // C += m * ldc
+ "ld1rw { z24.s }, p1/Z, [%x[rq], %[offsetof_Requantize32_maxval]]\n"
"tbz x16, #2, 15f\n"
- "ldr w21, [%x[args], %[offsetof_n_0]]\n"
- "add x21, x21, x10\n"
- "ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
- "add x20, x20, x21, LSL #2\n"
- "ld1w { z2.s }, p0/Z, [x20]\n"
+ "ldr w22, [%x[args], %[offsetof_n_0]]\n"
+ "ldr x21, [%x[rq], %[offsetof_Requantize32_per_channel_muls]]\n"
"ldr x20, [%x[rq], %[offsetof_Requantize32_per_channel_right_shifts]]\n"
- "add x20, x20, x21, LSL #2\n"
+ "add x22, x22, x10\n"
+ "add x21, x21, x22, LSL #2\n"
+ "add x20, x20, x22, LSL #2\n"
+ "ld1w { z2.s }, p0/Z, [x21]\n"
"ld1w { z1.s }, p0/Z, [x20]\n"
"15:" // Store to output array: Load per-channel parameters: End
"cntw x23\n"
"whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 17f\n"
"16:" // Store to output array: Accumulator row 0 loop
".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
- ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
"st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"st1b { z17.s }, p0, [x26]\n"
@@ -320,56 +321,55 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
"blt 16b\n"
"17:" // Store to output array: Accumulator row 0 oddments
"cbz x20, 18f\n"
- ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
- ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
- ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
- ".inst 0xc1b4ceb0 // sclamp { z16.s-z19.s }, z21.s, z20.s\n"
- "st1b { z16.s }, p0, [x26]\n"
+ ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
+ ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a0ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
+ ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
+ "st1b { z4.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
"subs x20, x20, #0x1\n"
- "st1b { z17.s }, p0, [x26]\n"
+ "st1b { z5.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 18f\n"
- "st1b { z18.s }, p0, [x26]\n"
+ "st1b { z6.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"18:" // Store to output array: Accumulator row 0 oddments: End
"subs x25, x25, x22\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 20f\n"
"19:" // Store to output array: Accumulator row 1 loop
- ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a0ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
- "st1b { z4.s }, p0, [x26]\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ "st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z5.s }, p0, [x26]\n"
+ "st1b { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z6.s }, p0, [x26]\n"
+ "st1b { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z7.s }, p0, [x26]\n"
+ "st1b { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 19b\n"
"20:" // Store to output array: Accumulator row 1 oddments
"cbz x20, 21f\n"
".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
- ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
"subs x20, x20, #0x1\n"
+ ".inst 0xc1a2ac04 // sqdmulh { z4.s-z7.s }, { z4.s-z7.s }, z2.s\n"
".inst 0xc1a1aa24 // srshl { z4.s-z7.s }, { z4.s-z7.s }, z1.s\n"
".inst 0xc1a0ab04 // add { z4.s-z7.s }, { z4.s-z7.s }, z0.s\n"
- ".inst 0xc1b4cea4 // sclamp { z4.s-z7.s }, z21.s, z20.s\n"
+ ".inst 0xc1b8cf24 // sclamp { z4.s-z7.s }, z25.s, z24.s\n"
"st1b { z4.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 21f\n"
@@ -382,115 +382,113 @@ void sme2_interleaved_nomerge_u8q_mopa_4VLx1VL(const uint8_t *const A, const uin
"21:" // Store to output array: Accumulator row 1 oddments: End
"subs x25, x25, x22\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x22, x25, x23, LT\n"
"lsr x21, x22, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x22, #0x3\n"
"cbz x21, 23f\n"
"22:" // Store to output array: Accumulator row 2 loop
- ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
- ".inst 0xc1a2ac08 // sqdmulh { z8.s-z11.s }, { z8.s-z11.s }, z2.s\n"
+ ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa28 // srshl { z8.s-z11.s }, { z8.s-z11.s }, z1.s\n"
+ ".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a0ab08 // add { z8.s-z11.s }, { z8.s-z11.s }, z0.s\n"
- ".inst 0xc1b4cea8 // sclamp { z8.s-z11.s }, z21.s, z20.s\n"
- "st1b { z8.s }, p0, [x26]\n"
+ ".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
+ ".inst 0xc1a0ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
+ ".inst 0xc1b8cf2c // sclamp { z12.s-z15.s }, z25.s, z24.s\n"
+ "st1b { z12.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z9.s }, p0, [x26]\n"
+ "st1b { z13.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z10.s }, p0, [x26]\n"
+ "st1b { z14.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z11.s }, p0, [x26]\n"
+ "st1b { z15.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 22b\n"
"23:" // Store to output array: Accumulator row 2 oddments
"cbz x20, 24f\n"
- ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
- ".inst 0xc1a2ac0c // sqdmulh { z12.s-z15.s }, { z12.s-z15.s }, z2.s\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a1aa2c // srshl { z12.s-z15.s }, { z12.s-z15.s }, z1.s\n"
- ".inst 0xc1a0ab0c // add { z12.s-z15.s }, { z12.s-z15.s }, z0.s\n"
- ".inst 0xc1b4ceac // sclamp { z12.s-z15.s }, z21.s, z20.s\n"
- "st1b { z12.s }, p0, [x26]\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ "st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 24f\n"
"subs x20, x20, #0x1\n"
- "st1b { z13.s }, p0, [x26]\n"
+ "st1b { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 24f\n"
- "st1b { z14.s }, p0, [x26]\n"
+ "st1b { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"24:" // Store to output array: Accumulator row 2 oddments: End
"subs x25, x25, x22\n"
"beq 28f\n"
- "whilelt p0.s, x10, x9\n"
"cmp x25, x23\n"
+ "mov x12, #0x0\n"
"csel x20, x25, x23, LT\n"
"lsr x21, x20, #0x2\n"
- "mov x12, #0x0\n"
"and x20, x20, #0x3\n"
"cbz x21, 26f\n"
"25:" // Store to output array: Accumulator row 3 loop
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
"add x12, x12, #0x4\n"
- ".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
+ ".inst 0xc1a2ac10 // sqdmulh { z16.s-z19.s }, { z16.s-z19.s }, z2.s\n"
"cmp x12, x21, LSL #2\n"
- ".inst 0xc1a0ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
- ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1b { z28.s }, p0, [x26]\n"
+ ".inst 0xc1a1aa30 // srshl { z16.s-z19.s }, { z16.s-z19.s }, z1.s\n"
+ ".inst 0xc1a0ab10 // add { z16.s-z19.s }, { z16.s-z19.s }, z0.s\n"
+ ".inst 0xc1b8cf30 // sclamp { z16.s-z19.s }, z25.s, z24.s\n"
+ "st1b { z16.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z29.s }, p0, [x26]\n"
+ "st1b { z17.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z30.s }, p0, [x26]\n"
+ "st1b { z18.s }, p0, [x26]\n"
"add x26, x26, x24\n"
- "st1b { z31.s }, p0, [x26]\n"
+ "st1b { z19.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"blt 25b\n"
"26:" // Store to output array: Accumulator row 3 oddments
"cbz x20, 27f\n"
- ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
- ".inst 0xc1a2ac1c // sqdmulh { z28.s-z31.s }, { z28.s-z31.s }, z2.s\n"
+ ".inst 0xc0860474 // mova { z20.s-z23.s }, za3h.s[x12]\n"
"subs x20, x20, #0x1\n"
- ".inst 0xc1a1aa3c // srshl { z28.s-z31.s }, { z28.s-z31.s }, z1.s\n"
- ".inst 0xc1a0ab1c // add { z28.s-z31.s }, { z28.s-z31.s }, z0.s\n"
- ".inst 0xc1b4cebc // sclamp { z28.s-z31.s }, z21.s, z20.s\n"
- "st1b { z28.s }, p0, [x26]\n"
+ ".inst 0xc1a2ac14 // sqdmulh { z20.s-z23.s }, { z20.s-z23.s }, z2.s\n"
+ ".inst 0xc1a1aa34 // srshl { z20.s-z23.s }, { z20.s-z23.s }, z1.s\n"
+ ".inst 0xc1a0ab14 // add { z20.s-z23.s }, { z20.s-z23.s }, z0.s\n"
+ ".inst 0xc1b8cf34 // sclamp { z20.s-z23.s }, z25.s, z24.s\n"
+ "st1b { z20.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 27f\n"
"subs x20, x20, #0x1\n"
- "st1b { z29.s }, p0, [x26]\n"
+ "st1b { z21.s }, p0, [x26]\n"
"add x26, x26, x24\n"
"beq 27f\n"
- "st1b { z30.s }, p0, [x26]\n"
+ "st1b { z22.s }, p0, [x26]\n"
"27:" // Store to output array: Accumulator row 3 oddments: End
"28:" // Store to output array: End
"tbz x16, #0, 30f\n"
"mov x12, #0x0\n"
"cntw x20\n"
"29:" // Store to output array: Refill accumulators: Loop
- ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
- ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
- ".inst 0xa041c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
- ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
- ".inst 0xa042c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
- ".inst 0xc0840602 // mova za2h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa040c1f8 // ld1w { z24.s-z27.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
".inst 0xa043c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840700 // mova za0h.s[x12], { z24.s-z27.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
".inst 0xc0840403 // mova za3h.s[x12], { z0.s-z3.s }\n"
"add x12, x12, #0x4\n"
"cmp x12, x20\n"
- "addvl x15, x15, #16\n"
"blt 29b\n"
"30:" // End block
"incw x10\n"
"cmp x10, x9\n"
"blt 3b\n"
"incw x11, ALL, MUL #4\n"
- "cmp x11, x13\n"
"mov x10, #0x0\n"
+ "cmp x11, x13\n"
"mov x28, x27\n"
"blt 3b\n"
".inst 0xd503467f // SMSTOP\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp
index 1ce169d562..ac21a980d3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,7 +82,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 8, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 8, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
index 9136e32567..61c38db3cb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_bf16fp32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,18 +50,19 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -82,6 +83,7 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -107,15 +109,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"bgt 29f\n"
"beq 15f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -123,12 +125,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"mov x11, x12\n"
"3:" // Height 1: B setup done
@@ -143,26 +145,26 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cbz x15, 4f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1w { z16.s }, p4/Z, [x13]\n"
+ "ld1w { z19.s }, p4/Z, [x13]\n"
"ld1w { z18.s }, p3/Z, [x13, #1, MUL VL]\n"
- "zip1 z8.d, z16.d, z12.d\n"
- "zip2 z12.d, z16.d, z12.d\n"
"ld1w { z17.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "zip1 z8.d, z19.d, z12.d\n"
+ "zip2 z12.d, z19.d, z12.d\n"
"zip1 z9.d, z18.d, z13.d\n"
"zip2 z13.d, z18.d, z13.d\n"
"zip1 z10.d, z17.d, z14.d\n"
@@ -183,8 +185,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"7:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 8f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -200,96 +202,96 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z20.h }, p0/Z, [x26]\n"
- "trn1 z18.d, z20.d, z19.d\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z20.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "trn1 z19.d, z20.d, z18.d\n"
+ "trn2 z20.d, z20.d, z18.d\n"
+ ".inst 0x6471e668 // bfmmla z8.s, z19.h, z17.h\n"
+ "ld1h { z1.h }, p5/Z, [x11]\n"
+ ".inst 0x6470e66c // bfmmla z12.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
+ ".inst 0x6461e669 // bfmmla z9.s, z19.h, z1.h\n"
+ "ld1h { z18.h }, p5/Z, [x10]\n"
+ ".inst 0x6470e66d // bfmmla z13.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
+ ".inst 0x6472e66a // bfmmla z10.s, z19.h, z18.h\n"
"ld1h { z17.h }, p5/Z, [x9]\n"
+ ".inst 0x6470e66e // bfmmla z14.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
- "trn2 z20.d, z20.d, z19.d\n"
- ".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
- ".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
+ ".inst 0x6471e66b // bfmmla z11.s, z19.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x12, #2, MUL VL]\n"
+ ".inst 0x6470e66f // bfmmla z15.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
".inst 0x6471e688 // bfmmla z8.s, z20.h, z17.h\n"
- ".inst 0x6470e68c // bfmmla z12.s, z20.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x11, #2, MUL VL]\n"
+ ".inst 0x6470e68c // bfmmla z12.s, z20.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "addvl x11, x11, #4\n"
".inst 0x6471e689 // bfmmla z9.s, z20.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6470e68d // bfmmla z13.s, z20.h, z16.h\n"
- "ld1h { z16.h }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1h { z17.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6470e68a // bfmmla z10.s, z20.h, z16.h\n"
- ".inst 0x6471e68e // bfmmla z14.s, z20.h, z17.h\n"
+ "ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
+ ".inst 0x6471e68a // bfmmla z10.s, z20.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x9, #2, MUL VL]\n"
+ ".inst 0x6470e68e // bfmmla z14.s, z20.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x9, #3, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
+ "addvl x9, x9, #4\n"
".inst 0x6471e68b // bfmmla z11.s, z20.h, z17.h\n"
".inst 0x6470e68f // bfmmla z15.s, z20.h, z16.h\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #4\n"
- "addvl x11, x11, #4\n"
- "addvl x10, x10, #4\n"
- "addvl x9, x9, #4\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "trn1 z18.d, z1.d, z19.d\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
+ "addvl x12, x12, #2\n"
+ "ld1rqh { z1.h }, p0/Z, [x26]\n"
+ "trn1 z18.d, z1.d, z19.d\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x11]\n"
+ ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x9]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x9, x9, #2\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
- "addvl x12, x12, #2\n"
- "addvl x11, x11, #2\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
"ble 12f\n"
- "ld1h { z17.h }, p5/Z, [x12]\n"
- "ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6471e428 // bfmmla z8.s, z1.h, z17.h\n"
- ".inst 0x6470e42c // bfmmla z12.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x11]\n"
- "ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6471e429 // bfmmla z9.s, z1.h, z17.h\n"
- ".inst 0x6470e42d // bfmmla z13.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6471e42a // bfmmla z10.s, z1.h, z17.h\n"
- ".inst 0x6470e42e // bfmmla z14.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x9]\n"
- "ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6471e42b // bfmmla z11.s, z1.h, z17.h\n"
- ".inst 0x6470e42f // bfmmla z15.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x12]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #1, MUL VL]\n"
"addvl x12, x12, #2\n"
+ ".inst 0x6470e428 // bfmmla z8.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x11]\n"
+ ".inst 0x6471e42c // bfmmla z12.s, z1.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #2\n"
+ ".inst 0x6470e429 // bfmmla z9.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10]\n"
+ ".inst 0x6471e42d // bfmmla z13.s, z1.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x10, #1, MUL VL]\n"
"addvl x10, x10, #2\n"
+ ".inst 0x6470e42a // bfmmla z10.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
+ ".inst 0x6471e42e // bfmmla z14.s, z1.h, z17.h\n"
+ "ld1h { z0.h }, p5/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
+ ".inst 0x6470e42b // bfmmla z11.s, z1.h, z16.h\n"
+ ".inst 0x6460e42f // bfmmla z15.s, z1.h, z0.h\n"
"12:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -300,14 +302,14 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z21.s\n"
- "fmin z9.s, p5/M, z9.s, z21.s\n"
- "fmin z10.s, p5/M, z10.s, z21.s\n"
- "fmin z11.s, p5/M, z11.s, z21.s\n"
+ "fmin z8.s, p5/M, z8.s, z17.s\n"
+ "fmin z9.s, p5/M, z9.s, z17.s\n"
+ "fmin z10.s, p5/M, z10.s, z17.s\n"
+ "fmin z11.s, p5/M, z11.s, z17.s\n"
"fmax z8.s, p5/M, z8.s, z16.s\n"
"fmax z9.s, p5/M, z9.s, z16.s\n"
"fmax z10.s, p5/M, z10.s, z16.s\n"
@@ -325,15 +327,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 86f\n"
"15:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -341,12 +343,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"mov x11, x12\n"
"17:" // Height 2: B setup done
@@ -361,38 +363,38 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cbz x15, 18f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 20f\n"
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z18.s }, p4/Z, [x13]\n"
+ "ld1w { z16.s }, p3/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x13, #3, MUL VL]\n"
"add x20, x13, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x13]\n"
- "ld1w { z18.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z17.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
- "zip1 z9.d, z18.d, z13.d\n"
- "zip2 z13.d, z18.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x20, #3, MUL VL]\n"
- "zip1 z10.d, z17.d, z14.d\n"
- "zip2 z14.d, z17.d, z14.d\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z8.d, z18.d, z12.d\n"
+ "zip2 z12.d, z18.d, z12.d\n"
+ "zip1 z9.d, z16.d, z13.d\n"
+ "zip2 z13.d, z16.d, z13.d\n"
+ "zip1 z10.d, z5.d, z14.d\n"
+ "zip2 z14.d, z5.d, z14.d\n"
+ "zip1 z11.d, z17.d, z15.d\n"
+ "zip2 z15.d, z17.d, z15.d\n"
"b 20f\n"
"19:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -407,8 +409,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"21:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 22f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -427,99 +429,99 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z20.h }, p0/Z, [x26]\n"
- "ld1rqh { z19.h }, p0/Z, [x25]\n"
- "trn1 z18.d, z20.d, z19.d\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
- "ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z20.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z19.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z16.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "trn1 z18.d, z19.d, z16.d\n"
+ "trn2 z19.d, z19.d, z16.d\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x11]\n"
+ ".inst 0x6474e64c // bfmmla z12.s, z18.h, z20.h\n"
"ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x9]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
- "trn2 z20.d, z20.d, z19.d\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
- ".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x12, #2, MUL VL]\n"
+ ".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x12, #3, MUL VL]\n"
- ".inst 0x6471e688 // bfmmla z8.s, z20.h, z17.h\n"
- ".inst 0x6470e68c // bfmmla z12.s, z20.h, z16.h\n"
+ "addvl x12, x12, #4\n"
+ ".inst 0x6471e668 // bfmmla z8.s, z19.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x11, #2, MUL VL]\n"
+ ".inst 0x6470e66c // bfmmla z12.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x11, #3, MUL VL]\n"
- ".inst 0x6471e689 // bfmmla z9.s, z20.h, z17.h\n"
- ".inst 0x6470e68d // bfmmla z13.s, z20.h, z16.h\n"
+ "addvl x11, x11, #4\n"
+ ".inst 0x6471e669 // bfmmla z9.s, z19.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6470e66d // bfmmla z13.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6471e68a // bfmmla z10.s, z20.h, z17.h\n"
- ".inst 0x6470e68e // bfmmla z14.s, z20.h, z16.h\n"
+ "addvl x10, x10, #4\n"
+ ".inst 0x6471e66a // bfmmla z10.s, z19.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x9, #2, MUL VL]\n"
+ ".inst 0x6470e66e // bfmmla z14.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x9, #3, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
- ".inst 0x6471e68b // bfmmla z11.s, z20.h, z17.h\n"
- ".inst 0x6470e68f // bfmmla z15.s, z20.h, z16.h\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
- "addvl x12, x12, #4\n"
- "addvl x11, x11, #4\n"
- "addvl x10, x10, #4\n"
"addvl x9, x9, #4\n"
+ ".inst 0x6471e66b // bfmmla z11.s, z19.h, z17.h\n"
+ ".inst 0x6470e66f // bfmmla z15.s, z19.h, z16.h\n"
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z17.h }, p5/Z, [x12]\n"
+ "ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
+ "addvl x12, x12, #2\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
"ld1rqh { z19.h }, p0/Z, [x25]\n"
"trn1 z18.d, z1.d, z19.d\n"
- "ld1h { z17.h }, p5/Z, [x12]\n"
- "ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x11]\n"
+ ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x9]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x9, x9, #2\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
- "addvl x12, x12, #2\n"
- "addvl x11, x11, #2\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
"ble 26f\n"
- "ld1h { z17.h }, p5/Z, [x12]\n"
- "ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6471e428 // bfmmla z8.s, z1.h, z17.h\n"
- ".inst 0x6470e42c // bfmmla z12.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x11]\n"
- "ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6471e429 // bfmmla z9.s, z1.h, z17.h\n"
- ".inst 0x6470e42d // bfmmla z13.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6471e42a // bfmmla z10.s, z1.h, z17.h\n"
- ".inst 0x6470e42e // bfmmla z14.s, z1.h, z16.h\n"
- "ld1h { z22.h }, p5/Z, [x9]\n"
- "ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6476e42b // bfmmla z11.s, z1.h, z22.h\n"
- ".inst 0x6470e42f // bfmmla z15.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x12]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #1, MUL VL]\n"
"addvl x12, x12, #2\n"
+ ".inst 0x6470e428 // bfmmla z8.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x11]\n"
+ ".inst 0x6471e42c // bfmmla z12.s, z1.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x11, #1, MUL VL]\n"
"addvl x11, x11, #2\n"
+ ".inst 0x6470e429 // bfmmla z9.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10]\n"
+ ".inst 0x6471e42d // bfmmla z13.s, z1.h, z17.h\n"
+ "ld1h { z26.h }, p5/Z, [x10, #1, MUL VL]\n"
"addvl x10, x10, #2\n"
+ ".inst 0x6470e42a // bfmmla z10.s, z1.h, z16.h\n"
+ "ld1h { z17.h }, p5/Z, [x9]\n"
+ ".inst 0x647ae42e // bfmmla z14.s, z1.h, z26.h\n"
+ "ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
+ ".inst 0x6471e42b // bfmmla z11.s, z1.h, z17.h\n"
+ ".inst 0x6470e42f // bfmmla z15.s, z1.h, z16.h\n"
"26:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -528,17 +530,17 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x25, x13, x20, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x26, x13, x20, LSL #2\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z17.s\n"
"fmin z12.s, p5/M, z12.s, z17.s\n"
@@ -562,10 +564,10 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
"28:" // Height 2: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -573,15 +575,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 86f\n"
"29:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -589,12 +591,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"mov x11, x12\n"
"31:" // Height 3: B setup done
@@ -609,15 +611,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cbz x15, 32f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -632,36 +634,36 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z26.s }, p4/Z, [x13]\n"
+ "ld1w { z25.s }, p3/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"add x21, x13, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x13]\n"
- "ld1w { z17.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x21, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x20]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "zip1 z8.d, z26.d, z12.d\n"
+ "zip2 z12.d, z26.d, z12.d\n"
+ "ld1w { z2.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z9.d, z25.d, z13.d\n"
+ "zip2 z13.d, z25.d, z13.d\n"
+ "zip1 z10.d, z24.d, z14.d\n"
+ "zip2 z14.d, z24.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
- "zip1 z19.d, z24.d, z23.d\n"
- "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z19.d, z2.d, z23.d\n"
+ "zip2 z23.d, z2.d, z23.d\n"
"b 34f\n"
"33:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -684,8 +686,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"35:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 36f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -707,130 +709,130 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z30.h }, p0/Z, [x26]\n"
+ "ld1h { z25.h }, p5/Z, [x12]\n"
+ "ld1h { z30.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z29.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z24.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z28.h }, p0/Z, [x24]\n"
- "trn1 z27.d, z30.d, z24.d\n"
- "trn2 z30.d, z30.d, z24.d\n"
- "ld1h { z25.h }, p5/Z, [x12]\n"
- "trn1 z26.d, z28.d, z29.d\n"
- "ld1h { z24.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z27.d, z29.d, z24.d\n"
+ "trn2 z29.d, z29.d, z24.d\n"
+ "trn1 z26.d, z28.d, z31.d\n"
+ "trn2 z28.d, z28.d, z31.d\n"
".inst 0x6479e768 // bfmmla z8.s, z27.h, z25.h\n"
+ ".inst 0x647ee76c // bfmmla z12.s, z27.h, z30.h\n"
".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e76c // bfmmla z12.s, z27.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x11]\n"
+ ".inst 0x647ee754 // bfmmla z20.s, z26.h, z30.h\n"
"ld1h { z24.h }, p5/Z, [x11, #1, MUL VL]\n"
".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "trn2 z28.d, z28.d, z29.d\n"
".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9]\n"
- "cmp x27, #0x8\n"
".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x12, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x12, #3, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6479e7c8 // bfmmla z8.s, z30.h, z25.h\n"
+ "addvl x12, x12, #4\n"
+ ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
".inst 0x6479e790 // bfmmla z16.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x11, #2, MUL VL]\n"
- "addvl x12, x12, #4\n"
- ".inst 0x6478e7cc // bfmmla z12.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
".inst 0x6478e794 // bfmmla z20.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x11, #3, MUL VL]\n"
"addvl x11, x11, #4\n"
- ".inst 0x6479e7c9 // bfmmla z9.s, z30.h, z25.h\n"
+ ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
".inst 0x6479e791 // bfmmla z17.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6478e7cd // bfmmla z13.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
".inst 0x6478e795 // bfmmla z21.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- ".inst 0x6479e7ca // bfmmla z10.s, z30.h, z25.h\n"
+ ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
".inst 0x6479e792 // bfmmla z18.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9, #2, MUL VL]\n"
- ".inst 0x6478e7ce // bfmmla z14.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
".inst 0x6478e796 // bfmmla z22.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- ".inst 0x6479e7cb // bfmmla z11.s, z30.h, z25.h\n"
+ ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
".inst 0x6479e793 // bfmmla z19.s, z28.h, z25.h\n"
- ".inst 0x6478e7cf // bfmmla z15.s, z30.h, z24.h\n"
+ ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
".inst 0x6478e797 // bfmmla z23.s, z28.h, z24.h\n"
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x12]\n"
+ "ld1h { z28.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
+ "addvl x12, x12, #2\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
"ld1rqh { z24.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
"trn1 z27.d, z1.d, z24.d\n"
"trn2 z1.d, z1.d, z24.d\n"
- "ld1h { z25.h }, p5/Z, [x12]\n"
- "trn1 z26.d, z3.d, z28.d\n"
- "ld1h { z24.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "trn1 z26.d, z3.d, z29.d\n"
".inst 0x6479e768 // bfmmla z8.s, z27.h, z25.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
+ "trn2 z3.d, z3.d, z29.d\n"
".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e76c // bfmmla z12.s, z27.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x11]\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "trn2 z3.d, z3.d, z28.d\n"
+ "addvl x10, x10, #2\n"
".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #2\n"
".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "addvl x9, x9, #2\n"
".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"ble 40f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
".inst 0x6479e428 // bfmmla z8.s, z1.h, z25.h\n"
".inst 0x6479e470 // bfmmla z16.s, z3.h, z25.h\n"
+ "ld1h { z25.h }, p5/Z, [x11]\n"
".inst 0x6478e42c // bfmmla z12.s, z1.h, z24.h\n"
".inst 0x6478e474 // bfmmla z20.s, z3.h, z24.h\n"
- "ld1h { z25.h }, p5/Z, [x11]\n"
"ld1h { z24.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6479e429 // bfmmla z9.s, z1.h, z25.h\n"
".inst 0x6479e471 // bfmmla z17.s, z3.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #2\n"
".inst 0x6478e42d // bfmmla z13.s, z1.h, z24.h\n"
".inst 0x6478e475 // bfmmla z21.s, z3.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
".inst 0x6479e42a // bfmmla z10.s, z1.h, z25.h\n"
".inst 0x6479e472 // bfmmla z18.s, z3.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #2\n"
".inst 0x6478e42e // bfmmla z14.s, z1.h, z24.h\n"
".inst 0x6478e476 // bfmmla z22.s, z3.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #1, MUL VL]\n"
@@ -845,24 +847,24 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 35b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x13, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
"uzp1 z18.d, z18.d, z22.d\n"
"uzp1 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p5/Z, [x21]\n"
"ld1rw { z24.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z25.s\n"
"fmin z12.s, p5/M, z12.s, z25.s\n"
@@ -894,14 +896,14 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
"42:" // Height 3: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -909,15 +911,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 86f\n"
"43:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -925,12 +927,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"mov x11, x12\n"
"45:" // Height 4: B setup done
@@ -945,15 +947,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cbz x15, 46f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -968,37 +970,37 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x13]\n"
+ "ld1w { z22.s }, p3/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"add x22, x13, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x13]\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x22, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x21]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x20]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -1025,8 +1027,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"49:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1051,133 +1053,133 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z30.h }, p0/Z, [x26]\n"
- "ld1rqh { z24.h }, p0/Z, [x25]\n"
- "trn1 z29.d, z30.d, z24.d\n"
+ "ld1h { z31.h }, p5/Z, [x12]\n"
+ "ld1h { z30.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z29.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z25.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z28.h }, p0/Z, [x24]\n"
- "ld1rqh { z27.h }, p0/Z, [x23]\n"
- "trn2 z30.d, z30.d, z24.d\n"
- "trn1 z26.d, z28.d, z27.d\n"
- "ld1h { z25.h }, p5/Z, [x12]\n"
- "ld1h { z24.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
- ".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqh { z24.h }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z27.d, z29.d, z25.d\n"
+ "trn2 z29.d, z29.d, z25.d\n"
+ "trn1 z26.d, z28.d, z24.d\n"
+ "trn2 z28.d, z28.d, z24.d\n"
+ ".inst 0x647fe768 // bfmmla z8.s, z27.h, z31.h\n"
+ ".inst 0x647ee76c // bfmmla z12.s, z27.h, z30.h\n"
+ ".inst 0x647fe750 // bfmmla z16.s, z26.h, z31.h\n"
"ld1h { z25.h }, p5/Z, [x11]\n"
+ ".inst 0x647ee754 // bfmmla z20.s, z26.h, z30.h\n"
"ld1h { z24.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "trn2 z28.d, z28.d, z27.d\n"
- ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9]\n"
- "cmp x27, #0x8\n"
- ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
- ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x12, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x12, #3, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6479e7c8 // bfmmla z8.s, z30.h, z25.h\n"
+ "addvl x12, x12, #4\n"
+ ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
".inst 0x6479e790 // bfmmla z16.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x11, #2, MUL VL]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6478e7cc // bfmmla z12.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
".inst 0x6478e794 // bfmmla z20.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x12, x12, #4\n"
- ".inst 0x6479e7c9 // bfmmla z9.s, z30.h, z25.h\n"
+ "addvl x11, x11, #4\n"
+ ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
".inst 0x6479e791 // bfmmla z17.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "addvl x11, x11, #4\n"
- ".inst 0x6478e7cd // bfmmla z13.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
".inst 0x6478e795 // bfmmla z21.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
- ".inst 0x6479e7ca // bfmmla z10.s, z30.h, z25.h\n"
+ ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
".inst 0x6479e792 // bfmmla z18.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9, #2, MUL VL]\n"
- ".inst 0x6478e7ce // bfmmla z14.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
".inst 0x6478e796 // bfmmla z22.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- ".inst 0x6479e7cb // bfmmla z11.s, z30.h, z25.h\n"
+ ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
".inst 0x6479e793 // bfmmla z19.s, z28.h, z25.h\n"
- ".inst 0x6478e7cf // bfmmla z15.s, z30.h, z24.h\n"
+ ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
".inst 0x6478e797 // bfmmla z23.s, z28.h, z24.h\n"
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x12]\n"
+ "ld1h { z28.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
+ "addvl x12, x12, #2\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z24.h }, p0/Z, [x25]\n"
- "trn1 z28.d, z1.d, z24.d\n"
+ "ld1rqh { z25.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z27.h }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z24.d\n"
- "trn1 z26.d, z3.d, z27.d\n"
- "ld1h { z25.h }, p5/Z, [x12]\n"
- "ld1h { z24.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6479e788 // bfmmla z8.s, z28.h, z25.h\n"
- ".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e78c // bfmmla z12.s, z28.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
+ "ld1rqh { z24.h }, p0/Z, [x23]\n"
+ "trn1 z27.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ "trn1 z26.d, z3.d, z24.d\n"
+ ".inst 0x647de768 // bfmmla z8.s, z27.h, z29.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
+ "trn2 z3.d, z3.d, z24.d\n"
+ ".inst 0x647de750 // bfmmla z16.s, z26.h, z29.h\n"
"ld1h { z25.h }, p5/Z, [x11]\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6479e789 // bfmmla z9.s, z28.h, z25.h\n"
+ "addvl x11, x11, #2\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x4\n"
- ".inst 0x6478e78d // bfmmla z13.s, z28.h, z24.h\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "trn2 z3.d, z3.d, z27.d\n"
- ".inst 0x6479e78a // bfmmla z10.s, z28.h, z25.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #2\n"
- ".inst 0x6478e78e // bfmmla z14.s, z28.h, z24.h\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
- ".inst 0x6479e78b // bfmmla z11.s, z28.h, z25.h\n"
- ".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
- "addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
- ".inst 0x6478e78f // bfmmla z15.s, z28.h, z24.h\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
+ ".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
".inst 0x6479e428 // bfmmla z8.s, z1.h, z25.h\n"
".inst 0x6479e470 // bfmmla z16.s, z3.h, z25.h\n"
+ "ld1h { z25.h }, p5/Z, [x11]\n"
".inst 0x6478e42c // bfmmla z12.s, z1.h, z24.h\n"
".inst 0x6478e474 // bfmmla z20.s, z3.h, z24.h\n"
- "ld1h { z25.h }, p5/Z, [x11]\n"
"ld1h { z24.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
".inst 0x6479e429 // bfmmla z9.s, z1.h, z25.h\n"
".inst 0x6479e471 // bfmmla z17.s, z3.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #2\n"
".inst 0x6478e42d // bfmmla z13.s, z1.h, z24.h\n"
".inst 0x6478e475 // bfmmla z21.s, z3.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
+ "addvl x10, x10, #2\n"
".inst 0x6479e42a // bfmmla z10.s, z1.h, z25.h\n"
".inst 0x6479e472 // bfmmla z18.s, z3.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #2\n"
".inst 0x6478e42e // bfmmla z14.s, z1.h, z24.h\n"
".inst 0x6478e476 // bfmmla z22.s, z3.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x9, #1, MUL VL]\n"
@@ -1192,17 +1194,17 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
"uzp1 z20.d, z17.d, z21.d\n"
@@ -1212,9 +1214,9 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z24.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z24.s }, p5/Z, [x21]\n"
"ld1rw { z23.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z24.s\n"
"fmin z12.s, p5/M, z12.s, z24.s\n"
@@ -1254,18 +1256,18 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
"56:" // Height 4: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1273,15 +1275,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 86f\n"
"57:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"58:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -1289,12 +1291,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 59f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 59f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 59f\n"
"mov x11, x12\n"
"59:" // Height 5: B setup done
@@ -1309,15 +1311,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cbz x15, 60f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1340,46 +1342,46 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"60:" // Height 5: no bias
"tbz %x[flags], #0, 61f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x13]\n"
+ "ld1w { z22.s }, p3/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"add x23, x13, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x13]\n"
"add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x13, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x22]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x21]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z17.d, z18.d, z21.d\n"
- "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x20]\n"
- "zip1 z18.d, z19.d, z22.d\n"
- "zip2 z22.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1418,8 +1420,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"63:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 64f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1447,120 +1449,120 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 67f\n"
"66:" // Height 5: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z1.h }, p5/Z, [x12]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z6.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z3.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z7.h }, p0/Z, [x24]\n"
"ld1rqh { z2.h }, p0/Z, [x23]\n"
- "trn1 z5.d, z6.d, z1.d\n"
- "trn2 z6.d, z6.d, z1.d\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
"trn1 z3.d, z7.d, z2.d\n"
"trn2 z7.d, z7.d, z2.d\n"
- "ld1h { z1.h }, p5/Z, [x12]\n"
- "trn1 z2.d, z4.d, z0.d\n"
- "trn2 z4.d, z4.d, z0.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
"ld1h { z0.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6461e4a8 // bfmmla z8.s, z5.h, z1.h\n"
+ ".inst 0x6461e488 // bfmmla z8.s, z4.h, z1.h\n"
".inst 0x6461e470 // bfmmla z16.s, z3.h, z1.h\n"
".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x11]\n"
- "sub x27, x27, #0x8\n"
- ".inst 0x6460e4ac // bfmmla z12.s, z5.h, z0.h\n"
+ ".inst 0x6460e48c // bfmmla z12.s, z4.h, z0.h\n"
".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x6461e489 // bfmmla z9.s, z4.h, z1.h\n"
".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6460e4ad // bfmmla z13.s, z5.h, z0.h\n"
+ ".inst 0x6460e48d // bfmmla z13.s, z4.h, z0.h\n"
".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6461e4aa // bfmmla z10.s, z5.h, z1.h\n"
+ ".inst 0x6461e48a // bfmmla z10.s, z4.h, z1.h\n"
".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x9]\n"
- ".inst 0x6460e4ae // bfmmla z14.s, z5.h, z0.h\n"
+ ".inst 0x6460e48e // bfmmla z14.s, z4.h, z0.h\n"
".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
+ ".inst 0x6461e48b // bfmmla z11.s, z4.h, z1.h\n"
".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x12, #2, MUL VL]\n"
- ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6460e48f // bfmmla z15.s, z4.h, z0.h\n"
".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x12, #3, MUL VL]\n"
- ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
"addvl x12, x12, #4\n"
+ ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
".inst 0x6461e4f0 // bfmmla z16.s, z7.h, z1.h\n"
- ".inst 0x6461e498 // bfmmla z24.s, z4.h, z1.h\n"
+ ".inst 0x6461e4b8 // bfmmla z24.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x11, #2, MUL VL]\n"
".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
".inst 0x6460e4f4 // bfmmla z20.s, z7.h, z0.h\n"
- ".inst 0x6460e49c // bfmmla z28.s, z4.h, z0.h\n"
+ ".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x11, #3, MUL VL]\n"
- ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
"addvl x11, x11, #4\n"
+ ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
".inst 0x6461e4f1 // bfmmla z17.s, z7.h, z1.h\n"
- ".inst 0x6461e499 // bfmmla z25.s, z4.h, z1.h\n"
+ ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
".inst 0x6460e4f5 // bfmmla z21.s, z7.h, z0.h\n"
- ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
+ ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
"addvl x10, x10, #4\n"
+ ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
".inst 0x6461e4f2 // bfmmla z18.s, z7.h, z1.h\n"
- ".inst 0x6461e49a // bfmmla z26.s, z4.h, z1.h\n"
+ ".inst 0x6461e4ba // bfmmla z26.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x9, #2, MUL VL]\n"
".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
".inst 0x6460e4f6 // bfmmla z22.s, z7.h, z0.h\n"
- ".inst 0x6460e49e // bfmmla z30.s, z4.h, z0.h\n"
+ ".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #3, MUL VL]\n"
- ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
"addvl x9, x9, #4\n"
+ ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
".inst 0x6461e4f3 // bfmmla z19.s, z7.h, z1.h\n"
- ".inst 0x6461e49b // bfmmla z27.s, z4.h, z1.h\n"
+ ".inst 0x6461e4bb // bfmmla z27.s, z5.h, z1.h\n"
".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
".inst 0x6460e4f7 // bfmmla z23.s, z7.h, z0.h\n"
- ".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
+ ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
"bgt 66b\n"
"67:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z2.h }, p5/Z, [x12]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z4.h }, p0/Z, [x25]\n"
+ "ld1rqh { z6.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z2.h }, p0/Z, [x23]\n"
- "trn1 z7.d, z1.d, z4.d\n"
- "trn2 z1.d, z1.d, z4.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x23]\n"
"ld1rqh { z5.h }, p0/Z, [x22]\n"
- "trn1 z6.d, z3.d, z2.d\n"
- "trn2 z3.d, z3.d, z2.d\n"
- "ld1h { z2.h }, p5/Z, [x12]\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
"ld1h { z0.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
".inst 0x6462e4e8 // bfmmla z8.s, z7.h, z2.h\n"
".inst 0x6462e4d0 // bfmmla z16.s, z6.h, z2.h\n"
".inst 0x6462e498 // bfmmla z24.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x11]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
".inst 0x6460e4d4 // bfmmla z20.s, z6.h, z0.h\n"
- "addvl x12, x12, #2\n"
".inst 0x6460e49c // bfmmla z28.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6462e4e9 // bfmmla z9.s, z7.h, z2.h\n"
"addvl x11, x11, #2\n"
+ ".inst 0x6462e4e9 // bfmmla z9.s, z7.h, z2.h\n"
".inst 0x6462e4d1 // bfmmla z17.s, z6.h, z2.h\n"
".inst 0x6462e499 // bfmmla z25.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x10]\n"
@@ -1568,8 +1570,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6460e4d5 // bfmmla z21.s, z6.h, z0.h\n"
".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6462e4ea // bfmmla z10.s, z7.h, z2.h\n"
"addvl x10, x10, #2\n"
+ ".inst 0x6462e4ea // bfmmla z10.s, z7.h, z2.h\n"
".inst 0x6462e4d2 // bfmmla z18.s, z6.h, z2.h\n"
".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x9]\n"
@@ -1577,8 +1579,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6460e4d6 // bfmmla z22.s, z6.h, z0.h\n"
".inst 0x6460e49e // bfmmla z30.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
"addvl x9, x9, #2\n"
+ ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
".inst 0x6462e4d3 // bfmmla z19.s, z6.h, z2.h\n"
".inst 0x6462e49b // bfmmla z27.s, z4.h, z2.h\n"
".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
@@ -1587,12 +1589,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 68f\n"
"ld1h { z2.h }, p5/Z, [x12]\n"
"ld1h { z0.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
".inst 0x6462e428 // bfmmla z8.s, z1.h, z2.h\n"
".inst 0x6462e470 // bfmmla z16.s, z3.h, z2.h\n"
".inst 0x6462e4b8 // bfmmla z24.s, z5.h, z2.h\n"
- ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x11]\n"
- "addvl x12, x12, #2\n"
+ ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x11, #1, MUL VL]\n"
@@ -1600,8 +1602,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6462e429 // bfmmla z9.s, z1.h, z2.h\n"
".inst 0x6462e471 // bfmmla z17.s, z3.h, z2.h\n"
".inst 0x6462e4b9 // bfmmla z25.s, z5.h, z2.h\n"
- ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10]\n"
+ ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
@@ -1609,8 +1611,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6462e42a // bfmmla z10.s, z1.h, z2.h\n"
".inst 0x6462e472 // bfmmla z18.s, z3.h, z2.h\n"
".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x9]\n"
+ ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #1, MUL VL]\n"
@@ -1627,20 +1629,20 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 63b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1652,9 +1654,9 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z26.d, z26.d, z30.d\n"
"uzp1 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 69f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x21]\n"
"ld1rw { z23.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z0.s\n"
"fmin z12.s, p5/M, z12.s, z0.s\n"
@@ -1702,22 +1704,22 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
"70:" // Height 5: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1725,18 +1727,19 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"b 86f\n"
"71:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0x18\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"72:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -1744,12 +1747,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 73f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 73f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 73f\n"
"mov x11, x12\n"
"73:" // Height 6: B setup done
@@ -1764,15 +1767,15 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cbz x15, 74f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x15, x15, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1795,54 +1798,54 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"74:" // Height 6: no bias
"tbz %x[flags], #0, 75f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x13]\n"
+ "ld1w { z22.s }, p3/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"add x24, x13, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "ld1w { z17.s }, p4/Z, [x13]\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z18.s }, p3/Z, [x13, #1, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x13, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x13, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
- "zip1 z8.d, z17.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "zip2 z12.d, z17.d, z12.d\n"
- "zip1 z9.d, z18.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x23]\n"
- "zip2 z13.d, z18.d, z13.d\n"
- "zip1 z10.d, z20.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip2 z14.d, z20.d, z14.d\n"
- "zip1 z11.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x22]\n"
- "zip2 z15.d, z16.d, z15.d\n"
- "zip1 z16.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip2 z20.d, z17.d, z20.d\n"
- "zip1 z17.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x21]\n"
- "zip2 z21.d, z18.d, z21.d\n"
- "zip1 z18.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip2 z22.d, z19.d, z22.d\n"
- "zip1 z19.d, z24.d, z23.d\n"
"ld1w { z0.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z28.s }, p4/Z, [x20]\n"
- "zip2 z23.d, z24.d, z23.d\n"
- "zip1 z24.d, z25.d, z28.d\n"
"ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1878,8 +1881,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"77:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 78f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1910,123 +1913,123 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 81f\n"
"80:" // Height 6: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z7.h }, p0/Z, [x26]\n"
- "ld1rqh { z0.h }, p0/Z, [x25]\n"
- "trn1 z6.d, z7.d, z0.d\n"
- "ld1rqh { z5.h }, p0/Z, [x24]\n"
- "ld1rqh { z1.h }, p0/Z, [x23]\n"
- "trn2 z7.d, z7.d, z0.d\n"
- "trn1 z4.d, z5.d, z1.d\n"
- "ld1rqh { z3.h }, p0/Z, [x22]\n"
- "ld1rqh { z0.h }, p0/Z, [x21]\n"
- "trn2 z5.d, z5.d, z1.d\n"
- "trn1 z2.d, z3.d, z0.d\n"
- "trn2 z3.d, z3.d, z0.d\n"
"ld1h { z1.h }, p5/Z, [x12]\n"
- "ld1h { z0.h }, p5/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
- ".inst 0x6461e490 // bfmmla z16.s, z4.h, z1.h\n"
- ".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
- "ld1h { z1.h }, p5/Z, [x11]\n"
"sub x27, x27, #0x8\n"
- ".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
- ".inst 0x6460e494 // bfmmla z20.s, z4.h, z0.h\n"
"cmp x27, #0x8\n"
+ "ld1rqh { z6.h }, p0/Z, [x26]\n"
"add x26, x26, #0x10\n"
- ".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
- "ld1h { z0.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
+ "ld1rqh { z3.h }, p0/Z, [x25]\n"
"add x25, x25, #0x10\n"
- ".inst 0x6461e491 // bfmmla z17.s, z4.h, z1.h\n"
- ".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
+ "ld1rqh { z7.h }, p0/Z, [x24]\n"
"add x24, x24, #0x10\n"
- ".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
- ".inst 0x6460e495 // bfmmla z21.s, z4.h, z0.h\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "ld1rqh { z0.h }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z3.d, z7.d, z2.d\n"
+ "trn2 z7.d, z7.d, z2.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1h { z0.h }, p5/Z, [x12, #1, MUL VL]\n"
+ ".inst 0x6461e488 // bfmmla z8.s, z4.h, z1.h\n"
+ ".inst 0x6461e470 // bfmmla z16.s, z3.h, z1.h\n"
+ ".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
+ "ld1h { z1.h }, p5/Z, [x11]\n"
+ ".inst 0x6460e48c // bfmmla z12.s, z4.h, z0.h\n"
+ ".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
+ ".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
+ "ld1h { z0.h }, p5/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6461e489 // bfmmla z9.s, z4.h, z1.h\n"
+ ".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
+ ".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
+ "ld1h { z1.h }, p5/Z, [x10]\n"
+ ".inst 0x6460e48d // bfmmla z13.s, z4.h, z0.h\n"
+ ".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
- "add x21, x21, #0x10\n"
- ".inst 0x6461e492 // bfmmla z18.s, z4.h, z1.h\n"
+ ".inst 0x6461e48a // bfmmla z10.s, z4.h, z1.h\n"
+ ".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x9]\n"
- ".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
- ".inst 0x6460e496 // bfmmla z22.s, z4.h, z0.h\n"
+ ".inst 0x6460e48e // bfmmla z14.s, z4.h, z0.h\n"
+ ".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
- ".inst 0x6461e493 // bfmmla z19.s, z4.h, z1.h\n"
+ ".inst 0x6461e48b // bfmmla z11.s, z4.h, z1.h\n"
+ ".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x12, #2, MUL VL]\n"
- ".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
- ".inst 0x6460e497 // bfmmla z23.s, z4.h, z0.h\n"
+ ".inst 0x6460e48f // bfmmla z15.s, z4.h, z0.h\n"
+ ".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x12, #3, MUL VL]\n"
- ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
"addvl x12, x12, #4\n"
- ".inst 0x6461e4b0 // bfmmla z16.s, z5.h, z1.h\n"
- ".inst 0x6461e478 // bfmmla z24.s, z3.h, z1.h\n"
+ ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f0 // bfmmla z16.s, z7.h, z1.h\n"
+ ".inst 0x6461e4b8 // bfmmla z24.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x11, #2, MUL VL]\n"
- ".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
- ".inst 0x6460e4b4 // bfmmla z20.s, z5.h, z0.h\n"
- ".inst 0x6460e47c // bfmmla z28.s, z3.h, z0.h\n"
+ ".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f4 // bfmmla z20.s, z7.h, z0.h\n"
+ ".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x11, #3, MUL VL]\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
"addvl x11, x11, #4\n"
- ".inst 0x6461e4b1 // bfmmla z17.s, z5.h, z1.h\n"
- ".inst 0x6461e479 // bfmmla z25.s, z3.h, z1.h\n"
+ ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f1 // bfmmla z17.s, z7.h, z1.h\n"
+ ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6460e4ed // bfmmla z13.s, z7.h, z0.h\n"
- ".inst 0x6460e4b5 // bfmmla z21.s, z5.h, z0.h\n"
- ".inst 0x6460e47d // bfmmla z29.s, z3.h, z0.h\n"
+ ".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f5 // bfmmla z21.s, z7.h, z0.h\n"
+ ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6461e4ea // bfmmla z10.s, z7.h, z1.h\n"
"addvl x10, x10, #4\n"
- ".inst 0x6461e4b2 // bfmmla z18.s, z5.h, z1.h\n"
- ".inst 0x6461e47a // bfmmla z26.s, z3.h, z1.h\n"
+ ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f2 // bfmmla z18.s, z7.h, z1.h\n"
+ ".inst 0x6461e4ba // bfmmla z26.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x9, #2, MUL VL]\n"
- ".inst 0x6460e4ee // bfmmla z14.s, z7.h, z0.h\n"
- ".inst 0x6460e4b6 // bfmmla z22.s, z5.h, z0.h\n"
- ".inst 0x6460e47e // bfmmla z30.s, z3.h, z0.h\n"
+ ".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f6 // bfmmla z22.s, z7.h, z0.h\n"
+ ".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #3, MUL VL]\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
"addvl x9, x9, #4\n"
- ".inst 0x6461e4b3 // bfmmla z19.s, z5.h, z1.h\n"
- ".inst 0x6461e47b // bfmmla z27.s, z3.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
- ".inst 0x6460e4b7 // bfmmla z23.s, z5.h, z0.h\n"
- ".inst 0x6460e47f // bfmmla z31.s, z3.h, z0.h\n"
+ ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f3 // bfmmla z19.s, z7.h, z1.h\n"
+ ".inst 0x6461e4bb // bfmmla z27.s, z5.h, z1.h\n"
+ ".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f7 // bfmmla z23.s, z7.h, z0.h\n"
+ ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
"bgt 80b\n"
"81:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z2.h }, p5/Z, [x12]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z0.h }, p0/Z, [x25]\n"
- "trn1 z7.d, z1.d, z0.d\n"
+ "ld1rqh { z6.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z2.h }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z0.d\n"
- "trn1 z6.d, z3.d, z2.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x23]\n"
"ld1rqh { z5.h }, p0/Z, [x22]\n"
"ld1rqh { z0.h }, p0/Z, [x21]\n"
- "trn2 z3.d, z3.d, z2.d\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
- "ld1h { z2.h }, p5/Z, [x12]\n"
"ld1h { z0.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
".inst 0x6462e4e8 // bfmmla z8.s, z7.h, z2.h\n"
".inst 0x6462e4d0 // bfmmla z16.s, z6.h, z2.h\n"
".inst 0x6462e498 // bfmmla z24.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x11]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
".inst 0x6460e4d4 // bfmmla z20.s, z6.h, z0.h\n"
- "addvl x12, x12, #2\n"
".inst 0x6460e49c // bfmmla z28.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6462e4e9 // bfmmla z9.s, z7.h, z2.h\n"
"addvl x11, x11, #2\n"
+ ".inst 0x6462e4e9 // bfmmla z9.s, z7.h, z2.h\n"
".inst 0x6462e4d1 // bfmmla z17.s, z6.h, z2.h\n"
".inst 0x6462e499 // bfmmla z25.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x10]\n"
@@ -2034,8 +2037,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6460e4d5 // bfmmla z21.s, z6.h, z0.h\n"
".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6462e4ea // bfmmla z10.s, z7.h, z2.h\n"
"addvl x10, x10, #2\n"
+ ".inst 0x6462e4ea // bfmmla z10.s, z7.h, z2.h\n"
".inst 0x6462e4d2 // bfmmla z18.s, z6.h, z2.h\n"
".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x9]\n"
@@ -2043,8 +2046,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6460e4d6 // bfmmla z22.s, z6.h, z0.h\n"
".inst 0x6460e49e // bfmmla z30.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
"addvl x9, x9, #2\n"
+ ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
".inst 0x6462e4d3 // bfmmla z19.s, z6.h, z2.h\n"
".inst 0x6462e49b // bfmmla z27.s, z4.h, z2.h\n"
".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
@@ -2053,12 +2056,12 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"ble 82f\n"
"ld1h { z2.h }, p5/Z, [x12]\n"
"ld1h { z0.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
".inst 0x6462e428 // bfmmla z8.s, z1.h, z2.h\n"
".inst 0x6462e470 // bfmmla z16.s, z3.h, z2.h\n"
".inst 0x6462e4b8 // bfmmla z24.s, z5.h, z2.h\n"
- ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x11]\n"
- "addvl x12, x12, #2\n"
+ ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x11, #1, MUL VL]\n"
@@ -2066,8 +2069,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6462e429 // bfmmla z9.s, z1.h, z2.h\n"
".inst 0x6462e471 // bfmmla z17.s, z3.h, z2.h\n"
".inst 0x6462e4b9 // bfmmla z25.s, z5.h, z2.h\n"
- ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10]\n"
+ ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
@@ -2075,8 +2078,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
".inst 0x6462e42a // bfmmla z10.s, z1.h, z2.h\n"
".inst 0x6462e472 // bfmmla z18.s, z3.h, z2.h\n"
".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x9]\n"
+ ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x9, #1, MUL VL]\n"
@@ -2093,21 +2096,21 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 77b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -2123,9 +2126,9 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"uzp1 z30.d, z27.d, z31.d\n"
"uzp2 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x21]\n"
"ld1rw { z0.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
@@ -2181,26 +2184,26 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x22]\n"
- "st1w { z28.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x23]\n"
+ "st1w { z28.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x22]\n"
+ "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
"84:" // Height 6: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -2217,8 +2220,8 @@ void sve_ffhybrid_bf16fp32_mmla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"86:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp
index c42ad7e879..44b766c2d5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,7 +82,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 1> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
index 66601bd312..406f20bad7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,18 +49,19 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const __fp16 *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -81,6 +82,7 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -106,15 +108,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"bgt 27f\n"
"beq 14f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -122,12 +124,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"mov x11, x12\n"
"3:" // Height 1: B setup done
@@ -162,8 +164,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"7:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 8f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -181,41 +183,41 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"ld1h { z7.h }, p4/Z, [x11]\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z17.h }, p4/Z, [x10]\n"
- "ld1h { z16.h }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
- "fmla z10.h, p4/M, z17.h, z0.h\n"
- "fmla z11.h, p4/M, z16.h, z0.h\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "ld1h { z16.h }, p4/Z, [x9]\n"
"add x26, x26, #0x2\n"
"subs x27, x27, #0x1\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "ld1h { z6.h }, p4/Z, [x12]\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
+ "ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1h { z7.h }, p4/Z, [x11]\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z17.h }, p4/Z, [x10]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z16.h }, p4/Z, [x9]\n"
"add x28, x28, #0x1\n"
- "cmp x28, x20\n"
- "fmla z10.h, p4/M, z17.h, z0.h\n"
- "fmla z11.h, p4/M, z16.h, z0.h\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, p4/M, z17.h, z0.h\n"
+ "fmla z11.h, p4/M, z16.h, z0.h\n"
"bne 7b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p4/Z, [x21]\n"
"ld1rh { z16.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z17.h\n"
"fmin z9.h, p4/M, z9.h, z17.h\n"
@@ -238,15 +240,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 80f\n"
"14:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -254,12 +256,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 16f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 16f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 16f\n"
"mov x11, x12\n"
"16:" // Height 2: B setup done
@@ -274,22 +276,22 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"cbz x15, 17f\n"
"ld1h { z8.h }, p4/Z, [x15]\n"
"ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
"b 19f\n"
"17:" // Height 2: no bias
"tbz %x[flags], #0, 18f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x13, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x20, x13, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x20]\n"
"ld1h { z13.h }, p2/Z, [x20, #1, MUL VL]\n"
"ld1h { z14.h }, p1/Z, [x20, #2, MUL VL]\n"
@@ -308,8 +310,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"20:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 21f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -331,26 +333,26 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"ld1h { z7.h }, p4/Z, [x11]\n"
"ble 24f\n"
"23:" // Height 2: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
"ld1h { z17.h }, p4/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"ld1h { z16.h }, p4/Z, [x9]\n"
- "addvl x11, x11, #1\n"
"add x26, x26, #0x2\n"
"subs x27, x27, #0x1\n"
+ "add x25, x25, #0x2\n"
+ "addvl x10, x10, #1\n"
"fmla z10.h, p4/M, z17.h, z0.h\n"
"fmla z14.h, p4/M, z17.h, z1.h\n"
- "add x25, x25, #0x2\n"
+ "addvl x9, x9, #1\n"
+ "ld1h { z6.h }, p4/Z, [x12]\n"
"fmla z11.h, p4/M, z16.h, z0.h\n"
- "fmla z15.h, p4/M, z16.h, z1.h\n"
- "addvl x10, x10, #1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
+ "fmla z15.h, p4/M, z16.h, z1.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
- "addvl x9, x9, #1\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
"ld1h { z7.h }, p4/Z, [x11]\n"
"bgt 23b\n"
"24:" // Height 2: Multiply loop: Main loop skip
@@ -362,22 +364,22 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z13.h, p4/M, z7.h, z1.h\n"
"ld1h { z16.h }, p4/Z, [x9]\n"
"add x28, x28, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"cmp x28, x20\n"
+ "addvl x10, x10, #1\n"
"fmla z10.h, p4/M, z17.h, z0.h\n"
"fmla z14.h, p4/M, z17.h, z1.h\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z11.h, p4/M, z16.h, z0.h\n"
"fmla z15.h, p4/M, z16.h, z1.h\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"bne 20b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p4/Z, [x21]\n"
"ld1rh { z16.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z17.h\n"
"fmin z9.h, p4/M, z9.h, z17.h\n"
@@ -401,10 +403,10 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -412,15 +414,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 80f\n"
"27:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -428,12 +430,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 29f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 29f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 29f\n"
"mov x11, x12\n"
"29:" // Height 3: B setup done
@@ -448,27 +450,27 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"cbz x15, 30f\n"
"ld1h { z8.h }, p4/Z, [x15]\n"
"ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 32f\n"
"30:" // Height 3: no bias
"tbz %x[flags], #0, 31f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x13, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x21, x13, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x21]\n"
"ld1h { z13.h }, p2/Z, [x21, #1, MUL VL]\n"
"ld1h { z14.h }, p1/Z, [x21, #2, MUL VL]\n"
@@ -495,8 +497,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"33:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 34f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -522,13 +524,13 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"ld1h { z7.h }, p4/Z, [x11]\n"
"ble 37f\n"
"36:" // Height 3: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z21.h }, p4/Z, [x10]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"add x26, x26, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
@@ -536,18 +538,18 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"subs x27, x27, #0x1\n"
"add x25, x25, #0x2\n"
"add x24, x24, #0x2\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, p4/M, z21.h, z0.h\n"
"fmla z14.h, p4/M, z21.h, z1.h\n"
"fmla z18.h, p4/M, z21.h, z2.h\n"
+ "ld1h { z6.h }, p4/Z, [x12]\n"
"fmla z11.h, p4/M, z20.h, z0.h\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
+ "ld1rh { z0.h }, p4/Z, [x26]\n"
"fmla z15.h, p4/M, z20.h, z1.h\n"
"fmla z19.h, p4/M, z20.h, z2.h\n"
- "ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
"ld1h { z7.h }, p4/Z, [x11]\n"
"bgt 36b\n"
"37:" // Height 3: Multiply loop: Main loop skip
@@ -556,30 +558,30 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z12.h, p4/M, z6.h, z1.h\n"
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z21.h }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "addvl x12, x12, #1\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"ld1h { z20.h }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
- "fmla z10.h, p4/M, z21.h, z0.h\n"
- "fmla z14.h, p4/M, z21.h, z1.h\n"
"addvl x11, x11, #1\n"
+ "cmp x28, x20\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.h, p4/M, z21.h, z0.h\n"
+ "fmla z14.h, p4/M, z21.h, z1.h\n"
"fmla z18.h, p4/M, z21.h, z2.h\n"
"fmla z11.h, p4/M, z20.h, z0.h\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, p4/M, z20.h, z1.h\n"
"fmla z19.h, p4/M, z20.h, z2.h\n"
"bne 33b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z21.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z21.h }, p4/Z, [x21]\n"
"ld1rh { z20.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z21.h\n"
"fmin z9.h, p4/M, z9.h, z21.h\n"
@@ -611,14 +613,14 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -626,15 +628,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 80f\n"
"40:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -642,12 +644,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 42f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 42f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 42f\n"
"mov x11, x12\n"
"42:" // Height 4: B setup done
@@ -662,18 +664,18 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"cbz x15, 43f\n"
"ld1h { z8.h }, p4/Z, [x15]\n"
"ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -681,13 +683,13 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"43:" // Height 4: no bias
"tbz %x[flags], #0, 44f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x13, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x22, x13, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x22]\n"
"ld1h { z13.h }, p2/Z, [x22, #1, MUL VL]\n"
"ld1h { z14.h }, p1/Z, [x22, #2, MUL VL]\n"
@@ -722,8 +724,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -753,9 +755,9 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"ld1h { z7.h }, p4/Z, [x11]\n"
"ble 50f\n"
"49:" // Height 4: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
"fmla z12.h, p4/M, z6.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
@@ -773,9 +775,9 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z10.h, p4/M, z25.h, z0.h\n"
"fmla z14.h, p4/M, z25.h, z1.h\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.h, p4/M, z25.h, z2.h\n"
"fmla z22.h, p4/M, z25.h, z3.h\n"
- "addvl x9, x9, #1\n"
"ld1h { z6.h }, p4/Z, [x12]\n"
"fmla z11.h, p4/M, z24.h, z0.h\n"
"fmla z15.h, p4/M, z24.h, z1.h\n"
@@ -795,18 +797,18 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
"ld1h { z25.h }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "addvl x12, x12, #1\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"ld1h { z24.h }, p4/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, p4/M, z25.h, z0.h\n"
"fmla z14.h, p4/M, z25.h, z1.h\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, p4/M, z25.h, z2.h\n"
"fmla z22.h, p4/M, z25.h, z3.h\n"
"fmla z11.h, p4/M, z24.h, z0.h\n"
@@ -815,13 +817,13 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z23.h, p4/M, z24.h, z3.h\n"
"bne 46b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z25.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z25.h }, p4/Z, [x21]\n"
"ld1rh { z24.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z25.h\n"
"fmin z9.h, p4/M, z9.h, z25.h\n"
@@ -861,18 +863,18 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x24]\n"
+ "st1h { z21.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x24, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -880,15 +882,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 80f\n"
"53:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -896,12 +898,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 55f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 55f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 55f\n"
"mov x11, x12\n"
"55:" // Height 5: B setup done
@@ -916,18 +918,18 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"cbz x15, 56f\n"
"ld1h { z8.h }, p4/Z, [x15]\n"
"ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -939,16 +941,16 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"56:" // Height 5: no bias
"tbz %x[flags], #0, 57f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x23, x13, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x23]\n"
"ld1h { z13.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p1/Z, [x23, #2, MUL VL]\n"
"ld1h { z15.h }, p0/Z, [x23, #3, MUL VL]\n"
"ld1h { z16.h }, p3/Z, [x22]\n"
@@ -989,8 +991,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"59:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 60f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1033,8 +1035,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x26, x26, #0x2\n"
"subs x27, x27, #0x1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z29.h }, p4/Z, [x10]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"add x25, x25, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
@@ -1044,23 +1046,23 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z25.h, p4/M, z7.h, z4.h\n"
"ld1h { z28.h }, p4/Z, [x9]\n"
"add x22, x22, #0x2\n"
- "fmla z10.h, p4/M, z29.h, z0.h\n"
- "fmla z14.h, p4/M, z29.h, z1.h\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, p4/M, z29.h, z0.h\n"
+ "fmla z14.h, p4/M, z29.h, z1.h\n"
"fmla z18.h, p4/M, z29.h, z2.h\n"
"fmla z22.h, p4/M, z29.h, z3.h\n"
"fmla z26.h, p4/M, z29.h, z4.h\n"
+ "ld1h { z6.h }, p4/Z, [x12]\n"
"fmla z11.h, p4/M, z28.h, z0.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x12]\n"
"fmla z15.h, p4/M, z28.h, z1.h\n"
- "fmla z19.h, p4/M, z28.h, z2.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
+ "fmla z19.h, p4/M, z28.h, z2.h\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
"fmla z23.h, p4/M, z28.h, z3.h\n"
- "fmla z27.h, p4/M, z28.h, z4.h\n"
"ld1rh { z3.h }, p4/Z, [x23]\n"
+ "fmla z27.h, p4/M, z28.h, z4.h\n"
"ld1rh { z4.h }, p4/Z, [x22]\n"
"ld1h { z7.h }, p4/Z, [x11]\n"
"bgt 62b\n"
@@ -1071,15 +1073,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
"addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z29.h }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
- "addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
"ld1h { z28.h }, p4/Z, [x9]\n"
@@ -1096,14 +1098,14 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z27.h, p4/M, z28.h, z4.h\n"
"bne 59b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z29.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z29.h }, p4/Z, [x21]\n"
"ld1rh { z28.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z29.h\n"
"fmin z9.h, p4/M, z9.h, z29.h\n"
@@ -1151,22 +1153,22 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x24]\n"
+ "st1h { z21.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x23]\n"
+ "st1h { z25.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x23, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1174,18 +1176,19 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"b 80f\n"
"66:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0xc\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0xc\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -1193,12 +1196,12 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 68f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 68f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 68f\n"
"mov x11, x12\n"
"68:" // Height 6: B setup done
@@ -1213,18 +1216,18 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"cbz x15, 69f\n"
"ld1h { z8.h }, p4/Z, [x15]\n"
"ld1h { z9.h }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1240,17 +1243,17 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"69:" // Height 6: no bias
"tbz %x[flags], #0, 70f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x13, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x13]\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x13, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z11.h }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x24, x13, x20, LSL #1\n"
+ "add x23, x24, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x24]\n"
"ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
"ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
"ld1h { z16.h }, p3/Z, [x23]\n"
@@ -1299,8 +1302,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"72:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1362,9 +1365,9 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z29.h, p4/M, z7.h, z5.h\n"
"ld1h { z7.h }, p4/Z, [x9]\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, p4/M, z6.h, z0.h\n"
"fmla z14.h, p4/M, z6.h, z1.h\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, p4/M, z6.h, z2.h\n"
"fmla z22.h, p4/M, z6.h, z3.h\n"
"fmla z26.h, p4/M, z6.h, z4.h\n"
@@ -1391,15 +1394,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
"addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z28.h, p4/M, z6.h, z5.h\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
- "addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
@@ -1420,15 +1423,15 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"fmla z31.h, p4/M, z7.h, z5.h\n"
"bne 72b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p4/Z, [x21]\n"
"ld1rh { z0.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
@@ -1484,26 +1487,26 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p3, [x21]\n"
- "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p1, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p0, [x21, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x24]\n"
+ "st1h { z21.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x23]\n"
+ "st1h { z25.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z28.h }, p3, [x22]\n"
+ "st1h { z29.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z30.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z31.h }, p0, [x22, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1520,8 +1523,8 @@ void sve_ffhybrid_fp16_mla_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
index 842db1a4fc..78a84fd89b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp16_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,18 +49,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const __fp16 *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -81,6 +82,7 @@ void sve_ffhybrid_fp16_mla_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -106,15 +108,15 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"bgt 29f\n"
"beq 15f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -122,12 +124,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"mov x11, x12\n"
"3:" // Height 1: B setup done
@@ -162,8 +164,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"7:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 8f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -179,201 +181,201 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z16.h }, p5/Z, [x12]\n"
- "fmla z8.h, z16.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "fmla z8.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"fmla z9.h, z16.h, z0.h[0]\n"
- "ld1h { z16.h }, p5/Z, [x10]\n"
- "fmla z10.h, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
+ "fmla z10.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #1, MUL VL]\n"
"fmla z11.h, z16.h, z0.h[0]\n"
- "ld1h { z16.h }, p5/Z, [x12, #1, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z8.h, z17.h, z0.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[1]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z10.h, z17.h, z0.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #2, MUL VL]\n"
"fmla z11.h, z16.h, z0.h[1]\n"
- "ld1h { z16.h }, p5/Z, [x12, #2, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z8.h, z17.h, z0.h[2]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[2]\n"
- "ld1h { z16.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z10.h, z17.h, z0.h[2]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #3, MUL VL]\n"
"fmla z11.h, z16.h, z0.h[2]\n"
- "ld1h { z16.h }, p5/Z, [x12, #3, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[3]\n"
"ld1h { z16.h }, p5/Z, [x11, #3, MUL VL]\n"
+ "fmla z8.h, z17.h, z0.h[3]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[3]\n"
- "ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[3]\n"
"ld1h { z16.h }, p5/Z, [x9, #3, MUL VL]\n"
+ "fmla z10.h, z17.h, z0.h[3]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #4, MUL VL]\n"
"fmla z11.h, z16.h, z0.h[3]\n"
- "ld1h { z16.h }, p5/Z, [x12, #4, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[4]\n"
"ld1h { z16.h }, p5/Z, [x11, #4, MUL VL]\n"
+ "fmla z8.h, z17.h, z0.h[4]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[4]\n"
- "ld1h { z16.h }, p5/Z, [x10, #4, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[4]\n"
"ld1h { z16.h }, p5/Z, [x9, #4, MUL VL]\n"
+ "fmla z10.h, z17.h, z0.h[4]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #5, MUL VL]\n"
"fmla z11.h, z16.h, z0.h[4]\n"
- "ld1h { z16.h }, p5/Z, [x12, #5, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[5]\n"
"ld1h { z16.h }, p5/Z, [x11, #5, MUL VL]\n"
+ "fmla z8.h, z17.h, z0.h[5]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[5]\n"
- "ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[5]\n"
"ld1h { z16.h }, p5/Z, [x9, #5, MUL VL]\n"
+ "fmla z10.h, z17.h, z0.h[5]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #6, MUL VL]\n"
"fmla z11.h, z16.h, z0.h[5]\n"
- "ld1h { z16.h }, p5/Z, [x12, #6, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[6]\n"
"ld1h { z16.h }, p5/Z, [x11, #6, MUL VL]\n"
+ "fmla z8.h, z17.h, z0.h[6]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[6]\n"
- "ld1h { z16.h }, p5/Z, [x10, #6, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[6]\n"
"ld1h { z16.h }, p5/Z, [x9, #6, MUL VL]\n"
+ "fmla z10.h, z17.h, z0.h[6]\n"
+ "ld1h { z17.h }, p5/Z, [x12, #7, MUL VL]\n"
+ "addvl x12, x12, #8\n"
"fmla z11.h, z16.h, z0.h[6]\n"
- "ld1h { z16.h }, p5/Z, [x12, #7, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[7]\n"
"ld1h { z16.h }, p5/Z, [x11, #7, MUL VL]\n"
- "fmla z9.h, z16.h, z0.h[7]\n"
+ "addvl x11, x11, #8\n"
+ "fmla z8.h, z17.h, z0.h[7]\n"
"ld1h { z17.h }, p5/Z, [x10, #7, MUL VL]\n"
- "sub x27, x27, #0x8\n"
+ "addvl x10, x10, #8\n"
+ "fmla z9.h, z16.h, z0.h[7]\n"
"ld1h { z16.h }, p5/Z, [x9, #7, MUL VL]\n"
- "cmp x27, #0x8\n"
+ "addvl x9, x9, #8\n"
"fmla z10.h, z17.h, z0.h[7]\n"
"fmla z11.h, z16.h, z0.h[7]\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #8\n"
- "addvl x11, x11, #8\n"
- "addvl x10, x10, #8\n"
- "addvl x9, x9, #8\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z16.h }, p5/Z, [x12]\n"
- "fmla z8.h, z16.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
- "fmla z9.h, z16.h, z0.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"subs x27, x27, #0x1\n"
- "ld1h { z16.h }, p5/Z, [x9]\n"
- "fmla z10.h, z17.h, z0.h[0]\n"
- "fmla z11.h, z16.h, z0.h[0]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "fmla z8.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[0]\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, z17.h, z0.h[0]\n"
+ "fmla z11.h, z16.h, z0.h[0]\n"
"ble 12f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
- "fmla z8.h, z17.h, z0.h[1]\n"
- "fmla z9.h, z16.h, z0.h[1]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.h, z17.h, z0.h[1]\n"
- "fmla z11.h, z16.h, z0.h[1]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.h, z17.h, z0.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[1]\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, z17.h, z0.h[1]\n"
+ "fmla z11.h, z16.h, z0.h[1]\n"
"ble 12f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
- "fmla z8.h, z17.h, z0.h[2]\n"
- "fmla z9.h, z16.h, z0.h[2]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.h, z17.h, z0.h[2]\n"
- "fmla z11.h, z16.h, z0.h[2]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.h, z17.h, z0.h[2]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[2]\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, z17.h, z0.h[2]\n"
+ "fmla z11.h, z16.h, z0.h[2]\n"
"ble 12f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
- "fmla z8.h, z17.h, z0.h[3]\n"
- "fmla z9.h, z16.h, z0.h[3]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.h, z17.h, z0.h[3]\n"
- "fmla z11.h, z16.h, z0.h[3]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.h, z17.h, z0.h[3]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[3]\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, z17.h, z0.h[3]\n"
+ "fmla z11.h, z16.h, z0.h[3]\n"
"ble 12f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
- "fmla z8.h, z17.h, z0.h[4]\n"
- "fmla z9.h, z16.h, z0.h[4]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.h, z17.h, z0.h[4]\n"
- "fmla z11.h, z16.h, z0.h[4]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.h, z17.h, z0.h[4]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[4]\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, z17.h, z0.h[4]\n"
+ "fmla z11.h, z16.h, z0.h[4]\n"
"ble 12f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
- "fmla z8.h, z17.h, z0.h[5]\n"
- "fmla z9.h, z16.h, z0.h[5]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.h, z17.h, z0.h[5]\n"
- "fmla z11.h, z16.h, z0.h[5]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.h, z17.h, z0.h[5]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[5]\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, z17.h, z0.h[5]\n"
+ "fmla z11.h, z16.h, z0.h[5]\n"
"ble 12f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
- "fmla z8.h, z17.h, z0.h[6]\n"
- "fmla z9.h, z16.h, z0.h[6]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.h, z17.h, z0.h[6]\n"
- "fmla z11.h, z16.h, z0.h[6]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.h, z17.h, z0.h[6]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[6]\n"
+ "ld1h { z16.h }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.h, z17.h, z0.h[6]\n"
+ "fmla z11.h, z16.h, z0.h[6]\n"
"ble 12f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[7]\n"
- "fmla z9.h, z16.h, z0.h[7]\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z16.h, z0.h[7]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[7]\n"
"fmla z11.h, z16.h, z0.h[7]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"12:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 7b\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p5/Z, [x21]\n"
"ld1rh { z16.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z17.h\n"
"fmin z9.h, p5/M, z9.h, z17.h\n"
@@ -396,15 +398,15 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 86f\n"
"15:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -412,12 +414,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"mov x11, x12\n"
"17:" // Height 2: B setup done
@@ -432,22 +434,22 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cbz x15, 18f\n"
"ld1h { z8.h }, p5/Z, [x15]\n"
"ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
"b 20f\n"
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x13, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x13]\n"
"ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x20, x13, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x20]\n"
"ld1h { z13.h }, p3/Z, [x20, #1, MUL VL]\n"
"ld1h { z14.h }, p2/Z, [x20, #2, MUL VL]\n"
@@ -466,8 +468,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"21:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 22f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -486,29 +488,29 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z0.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z1.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"fmla z8.h, z17.h, z1.h[0]\n"
"fmla z12.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"fmla z9.h, z16.h, z1.h[0]\n"
"fmla z13.h, z16.h, z0.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
"fmla z10.h, z17.h, z1.h[0]\n"
"fmla z14.h, z17.h, z0.h[0]\n"
"ld1h { z17.h }, p5/Z, [x12, #1, MUL VL]\n"
- "cmp x27, #0x8\n"
"fmla z11.h, z16.h, z1.h[0]\n"
"fmla z15.h, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x11, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
"fmla z8.h, z17.h, z1.h[1]\n"
"fmla z12.h, z17.h, z0.h[1]\n"
"ld1h { z17.h }, p5/Z, [x10, #1, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z9.h, z16.h, z1.h[1]\n"
"fmla z13.h, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x9, #1, MUL VL]\n"
@@ -595,161 +597,161 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
"fmla z8.h, z17.h, z0.h[0]\n"
"fmla z12.h, z17.h, z1.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[0]\n"
"fmla z13.h, z16.h, z1.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[0]\n"
"fmla z14.h, z17.h, z1.h[0]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z11.h, z16.h, z0.h[0]\n"
"fmla z15.h, z16.h, z1.h[0]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[1]\n"
"fmla z12.h, z17.h, z1.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[1]\n"
"fmla z13.h, z16.h, z1.h[1]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[1]\n"
"fmla z14.h, z17.h, z1.h[1]\n"
- "addvl x12, x12, #1\n"
"fmla z11.h, z16.h, z0.h[1]\n"
"fmla z15.h, z16.h, z1.h[1]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[2]\n"
"fmla z12.h, z17.h, z1.h[2]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[2]\n"
"fmla z13.h, z16.h, z1.h[2]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[2]\n"
"fmla z14.h, z17.h, z1.h[2]\n"
- "addvl x12, x12, #1\n"
"fmla z11.h, z16.h, z0.h[2]\n"
"fmla z15.h, z16.h, z1.h[2]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[3]\n"
"fmla z12.h, z17.h, z1.h[3]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[3]\n"
"fmla z13.h, z16.h, z1.h[3]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[3]\n"
"fmla z14.h, z17.h, z1.h[3]\n"
- "addvl x12, x12, #1\n"
"fmla z11.h, z16.h, z0.h[3]\n"
"fmla z15.h, z16.h, z1.h[3]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[4]\n"
"fmla z12.h, z17.h, z1.h[4]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[4]\n"
"fmla z13.h, z16.h, z1.h[4]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[4]\n"
"fmla z14.h, z17.h, z1.h[4]\n"
- "addvl x12, x12, #1\n"
"fmla z11.h, z16.h, z0.h[4]\n"
"fmla z15.h, z16.h, z1.h[4]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[5]\n"
"fmla z12.h, z17.h, z1.h[5]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[5]\n"
"fmla z13.h, z16.h, z1.h[5]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[5]\n"
"fmla z14.h, z17.h, z1.h[5]\n"
- "addvl x12, x12, #1\n"
"fmla z11.h, z16.h, z0.h[5]\n"
"fmla z15.h, z16.h, z1.h[5]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[6]\n"
"fmla z12.h, z17.h, z1.h[6]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[6]\n"
"fmla z13.h, z16.h, z1.h[6]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[6]\n"
"fmla z14.h, z17.h, z1.h[6]\n"
- "addvl x12, x12, #1\n"
"fmla z11.h, z16.h, z0.h[6]\n"
"fmla z15.h, z16.h, z1.h[6]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1h { z17.h }, p5/Z, [x12]\n"
"ld1h { z16.h }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z17.h, z0.h[7]\n"
"fmla z12.h, z17.h, z1.h[7]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z16.h, z0.h[7]\n"
"fmla z13.h, z16.h, z1.h[7]\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z17.h, z0.h[7]\n"
"fmla z14.h, z17.h, z1.h[7]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z11.h, z16.h, z0.h[7]\n"
"fmla z15.h, z16.h, z1.h[7]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"26:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 21b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p5/Z, [x21]\n"
"ld1rh { z16.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z17.h\n"
"fmin z9.h, p5/M, z9.h, z17.h\n"
@@ -773,10 +775,10 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
"28:" // Height 2: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -784,15 +786,15 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 86f\n"
"29:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -800,12 +802,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"mov x11, x12\n"
"31:" // Height 3: B setup done
@@ -820,27 +822,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cbz x15, 32f\n"
"ld1h { z8.h }, p5/Z, [x15]\n"
"ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 34f\n"
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x13, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x13]\n"
"ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x21, x13, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x21]\n"
"ld1h { z13.h }, p3/Z, [x21, #1, MUL VL]\n"
"ld1h { z14.h }, p2/Z, [x21, #2, MUL VL]\n"
@@ -867,8 +869,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"35:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 36f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -890,126 +892,126 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z21.h }, p5/Z, [x12]\n"
+ "ld1h { z20.h }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z2.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x24]\n"
- "ld1h { z21.h }, p5/Z, [x12]\n"
+ "add x24, x24, #0x10\n"
"fmla z8.h, z21.h, z2.h[0]\n"
"fmla z12.h, z21.h, z1.h[0]\n"
- "ld1h { z20.h }, p5/Z, [x11]\n"
- "fmla z16.h, z21.h, z0.h[0]\n"
"fmla z9.h, z20.h, z2.h[0]\n"
- "ld1h { z21.h }, p5/Z, [x10]\n"
"fmla z13.h, z20.h, z1.h[0]\n"
+ "fmla z16.h, z21.h, z0.h[0]\n"
+ "ld1h { z21.h }, p5/Z, [x10]\n"
"fmla z17.h, z20.h, z0.h[0]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "cmp x27, #0x8\n"
"fmla z10.h, z21.h, z2.h[0]\n"
"fmla z14.h, z21.h, z1.h[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"fmla z18.h, z21.h, z0.h[0]\n"
- "fmla z11.h, z20.h, z2.h[0]\n"
"ld1h { z21.h }, p5/Z, [x12, #1, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "fmla z11.h, z20.h, z2.h[0]\n"
"fmla z15.h, z20.h, z1.h[0]\n"
"fmla z19.h, z20.h, z0.h[0]\n"
"ld1h { z20.h }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[1]\n"
"fmla z12.h, z21.h, z1.h[1]\n"
"fmla z16.h, z21.h, z0.h[1]\n"
- "fmla z9.h, z20.h, z2.h[1]\n"
"ld1h { z21.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[1]\n"
"fmla z13.h, z20.h, z1.h[1]\n"
"fmla z17.h, z20.h, z0.h[1]\n"
"ld1h { z20.h }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[1]\n"
"fmla z14.h, z21.h, z1.h[1]\n"
"fmla z18.h, z21.h, z0.h[1]\n"
- "fmla z11.h, z20.h, z2.h[1]\n"
"ld1h { z21.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[1]\n"
"fmla z15.h, z20.h, z1.h[1]\n"
"fmla z19.h, z20.h, z0.h[1]\n"
"ld1h { z20.h }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[2]\n"
"fmla z12.h, z21.h, z1.h[2]\n"
"fmla z16.h, z21.h, z0.h[2]\n"
- "fmla z9.h, z20.h, z2.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[2]\n"
"fmla z13.h, z20.h, z1.h[2]\n"
"fmla z17.h, z20.h, z0.h[2]\n"
"ld1h { z20.h }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[2]\n"
"fmla z14.h, z21.h, z1.h[2]\n"
"fmla z18.h, z21.h, z0.h[2]\n"
- "fmla z11.h, z20.h, z2.h[2]\n"
"ld1h { z21.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[2]\n"
"fmla z15.h, z20.h, z1.h[2]\n"
"fmla z19.h, z20.h, z0.h[2]\n"
"ld1h { z20.h }, p5/Z, [x11, #3, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[3]\n"
"fmla z12.h, z21.h, z1.h[3]\n"
"fmla z16.h, z21.h, z0.h[3]\n"
- "fmla z9.h, z20.h, z2.h[3]\n"
"ld1h { z21.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[3]\n"
"fmla z13.h, z20.h, z1.h[3]\n"
"fmla z17.h, z20.h, z0.h[3]\n"
"ld1h { z20.h }, p5/Z, [x9, #3, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[3]\n"
"fmla z14.h, z21.h, z1.h[3]\n"
"fmla z18.h, z21.h, z0.h[3]\n"
- "fmla z11.h, z20.h, z2.h[3]\n"
"ld1h { z21.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[3]\n"
"fmla z15.h, z20.h, z1.h[3]\n"
"fmla z19.h, z20.h, z0.h[3]\n"
"ld1h { z20.h }, p5/Z, [x11, #4, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[4]\n"
"fmla z12.h, z21.h, z1.h[4]\n"
"fmla z16.h, z21.h, z0.h[4]\n"
- "fmla z9.h, z20.h, z2.h[4]\n"
"ld1h { z21.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[4]\n"
"fmla z13.h, z20.h, z1.h[4]\n"
"fmla z17.h, z20.h, z0.h[4]\n"
"ld1h { z20.h }, p5/Z, [x9, #4, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[4]\n"
"fmla z14.h, z21.h, z1.h[4]\n"
"fmla z18.h, z21.h, z0.h[4]\n"
- "fmla z11.h, z20.h, z2.h[4]\n"
"ld1h { z21.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[4]\n"
"fmla z15.h, z20.h, z1.h[4]\n"
"fmla z19.h, z20.h, z0.h[4]\n"
"ld1h { z20.h }, p5/Z, [x11, #5, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[5]\n"
"fmla z12.h, z21.h, z1.h[5]\n"
"fmla z16.h, z21.h, z0.h[5]\n"
- "fmla z9.h, z20.h, z2.h[5]\n"
"ld1h { z21.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[5]\n"
"fmla z13.h, z20.h, z1.h[5]\n"
"fmla z17.h, z20.h, z0.h[5]\n"
"ld1h { z20.h }, p5/Z, [x9, #5, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[5]\n"
"fmla z14.h, z21.h, z1.h[5]\n"
"fmla z18.h, z21.h, z0.h[5]\n"
- "fmla z11.h, z20.h, z2.h[5]\n"
"ld1h { z21.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[5]\n"
"fmla z15.h, z20.h, z1.h[5]\n"
"fmla z19.h, z20.h, z0.h[5]\n"
"ld1h { z20.h }, p5/Z, [x11, #6, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[6]\n"
"fmla z12.h, z21.h, z1.h[6]\n"
"fmla z16.h, z21.h, z0.h[6]\n"
- "fmla z9.h, z20.h, z2.h[6]\n"
"ld1h { z21.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[6]\n"
"fmla z13.h, z20.h, z1.h[6]\n"
"fmla z17.h, z20.h, z0.h[6]\n"
"ld1h { z20.h }, p5/Z, [x9, #6, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[6]\n"
"fmla z14.h, z21.h, z1.h[6]\n"
"fmla z18.h, z21.h, z0.h[6]\n"
- "fmla z11.h, z20.h, z2.h[6]\n"
"ld1h { z21.h }, p5/Z, [x12, #7, MUL VL]\n"
"addvl x12, x12, #8\n"
+ "fmla z11.h, z20.h, z2.h[6]\n"
"fmla z15.h, z20.h, z1.h[6]\n"
"fmla z19.h, z20.h, z0.h[6]\n"
"ld1h { z20.h }, p5/Z, [x11, #7, MUL VL]\n"
@@ -1017,9 +1019,9 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z8.h, z21.h, z2.h[7]\n"
"fmla z12.h, z21.h, z1.h[7]\n"
"fmla z16.h, z21.h, z0.h[7]\n"
- "fmla z9.h, z20.h, z2.h[7]\n"
"ld1h { z21.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
+ "fmla z9.h, z20.h, z2.h[7]\n"
"fmla z13.h, z20.h, z1.h[7]\n"
"fmla z17.h, z20.h, z0.h[7]\n"
"ld1h { z20.h }, p5/Z, [x9, #7, MUL VL]\n"
@@ -1033,179 +1035,179 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z21.h }, p5/Z, [x12]\n"
+ "ld1h { z20.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1h { z21.h }, p5/Z, [x12]\n"
"fmla z8.h, z21.h, z0.h[0]\n"
"fmla z12.h, z21.h, z1.h[0]\n"
- "ld1h { z20.h }, p5/Z, [x11]\n"
- "fmla z16.h, z21.h, z2.h[0]\n"
"fmla z9.h, z20.h, z0.h[0]\n"
- "ld1h { z21.h }, p5/Z, [x10]\n"
"fmla z13.h, z20.h, z1.h[0]\n"
+ "fmla z16.h, z21.h, z2.h[0]\n"
+ "ld1h { z21.h }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z17.h, z20.h, z2.h[0]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[0]\n"
"fmla z14.h, z21.h, z1.h[0]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.h, z21.h, z2.h[0]\n"
"fmla z11.h, z20.h, z0.h[0]\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, z20.h, z1.h[0]\n"
"fmla z19.h, z20.h, z2.h[0]\n"
"ble 40f\n"
"ld1h { z21.h }, p5/Z, [x12]\n"
"ld1h { z20.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z21.h, z0.h[1]\n"
"fmla z12.h, z21.h, z1.h[1]\n"
"fmla z16.h, z21.h, z2.h[1]\n"
- "fmla z9.h, z20.h, z0.h[1]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z20.h, z0.h[1]\n"
"fmla z13.h, z20.h, z1.h[1]\n"
"fmla z17.h, z20.h, z2.h[1]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[1]\n"
"fmla z14.h, z21.h, z1.h[1]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.h, z21.h, z2.h[1]\n"
"fmla z11.h, z20.h, z0.h[1]\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, z20.h, z1.h[1]\n"
"fmla z19.h, z20.h, z2.h[1]\n"
"ble 40f\n"
"ld1h { z21.h }, p5/Z, [x12]\n"
"ld1h { z20.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z21.h, z0.h[2]\n"
"fmla z12.h, z21.h, z1.h[2]\n"
"fmla z16.h, z21.h, z2.h[2]\n"
- "fmla z9.h, z20.h, z0.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z20.h, z0.h[2]\n"
"fmla z13.h, z20.h, z1.h[2]\n"
"fmla z17.h, z20.h, z2.h[2]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[2]\n"
"fmla z14.h, z21.h, z1.h[2]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.h, z21.h, z2.h[2]\n"
"fmla z11.h, z20.h, z0.h[2]\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, z20.h, z1.h[2]\n"
"fmla z19.h, z20.h, z2.h[2]\n"
"ble 40f\n"
"ld1h { z21.h }, p5/Z, [x12]\n"
"ld1h { z20.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z21.h, z0.h[3]\n"
"fmla z12.h, z21.h, z1.h[3]\n"
"fmla z16.h, z21.h, z2.h[3]\n"
- "fmla z9.h, z20.h, z0.h[3]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z20.h, z0.h[3]\n"
"fmla z13.h, z20.h, z1.h[3]\n"
"fmla z17.h, z20.h, z2.h[3]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[3]\n"
"fmla z14.h, z21.h, z1.h[3]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.h, z21.h, z2.h[3]\n"
"fmla z11.h, z20.h, z0.h[3]\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, z20.h, z1.h[3]\n"
"fmla z19.h, z20.h, z2.h[3]\n"
"ble 40f\n"
"ld1h { z21.h }, p5/Z, [x12]\n"
"ld1h { z20.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z21.h, z0.h[4]\n"
"fmla z12.h, z21.h, z1.h[4]\n"
"fmla z16.h, z21.h, z2.h[4]\n"
- "fmla z9.h, z20.h, z0.h[4]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z20.h, z0.h[4]\n"
"fmla z13.h, z20.h, z1.h[4]\n"
"fmla z17.h, z20.h, z2.h[4]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[4]\n"
"fmla z14.h, z21.h, z1.h[4]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.h, z21.h, z2.h[4]\n"
"fmla z11.h, z20.h, z0.h[4]\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, z20.h, z1.h[4]\n"
"fmla z19.h, z20.h, z2.h[4]\n"
"ble 40f\n"
"ld1h { z21.h }, p5/Z, [x12]\n"
"ld1h { z20.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z21.h, z0.h[5]\n"
"fmla z12.h, z21.h, z1.h[5]\n"
"fmla z16.h, z21.h, z2.h[5]\n"
- "fmla z9.h, z20.h, z0.h[5]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z20.h, z0.h[5]\n"
"fmla z13.h, z20.h, z1.h[5]\n"
"fmla z17.h, z20.h, z2.h[5]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[5]\n"
"fmla z14.h, z21.h, z1.h[5]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.h, z21.h, z2.h[5]\n"
"fmla z11.h, z20.h, z0.h[5]\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, z20.h, z1.h[5]\n"
"fmla z19.h, z20.h, z2.h[5]\n"
"ble 40f\n"
"ld1h { z21.h }, p5/Z, [x12]\n"
"ld1h { z20.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z21.h, z0.h[6]\n"
"fmla z12.h, z21.h, z1.h[6]\n"
"fmla z16.h, z21.h, z2.h[6]\n"
- "fmla z9.h, z20.h, z0.h[6]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z20.h, z0.h[6]\n"
"fmla z13.h, z20.h, z1.h[6]\n"
"fmla z17.h, z20.h, z2.h[6]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[6]\n"
"fmla z14.h, z21.h, z1.h[6]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.h, z21.h, z2.h[6]\n"
"fmla z11.h, z20.h, z0.h[6]\n"
- "addvl x9, x9, #1\n"
"fmla z15.h, z20.h, z1.h[6]\n"
"fmla z19.h, z20.h, z2.h[6]\n"
"ble 40f\n"
"ld1h { z21.h }, p5/Z, [x12]\n"
"ld1h { z20.h }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z21.h, z0.h[7]\n"
"fmla z12.h, z21.h, z1.h[7]\n"
"fmla z16.h, z21.h, z2.h[7]\n"
- "fmla z9.h, z20.h, z0.h[7]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.h, z20.h, z0.h[7]\n"
"fmla z13.h, z20.h, z1.h[7]\n"
"fmla z17.h, z20.h, z2.h[7]\n"
"ld1h { z20.h }, p5/Z, [x9]\n"
- "addvl x11, x11, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z21.h, z0.h[7]\n"
"fmla z14.h, z21.h, z1.h[7]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, z21.h, z2.h[7]\n"
"fmla z11.h, z20.h, z0.h[7]\n"
"fmla z15.h, z20.h, z1.h[7]\n"
@@ -1216,12 +1218,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 35b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z21.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z21.h }, p5/Z, [x21]\n"
"ld1rh { z20.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z21.h\n"
"fmin z9.h, p5/M, z9.h, z21.h\n"
@@ -1253,14 +1255,14 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
"42:" // Height 3: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1268,15 +1270,15 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 86f\n"
"43:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -1284,12 +1286,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"mov x11, x12\n"
"45:" // Height 4: B setup done
@@ -1304,18 +1306,18 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cbz x15, 46f\n"
"ld1h { z8.h }, p5/Z, [x15]\n"
"ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1323,13 +1325,13 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x13, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x13]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x22, x13, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x22]\n"
"ld1h { z13.h }, p3/Z, [x22, #1, MUL VL]\n"
"ld1h { z14.h }, p2/Z, [x22, #2, MUL VL]\n"
@@ -1364,8 +1366,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"49:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1390,25 +1392,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x12]\n"
+ "ld1h { z24.h }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z3.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z2.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1h { z25.h }, p5/Z, [x12]\n"
- "ld1h { z24.h }, p5/Z, [x11]\n"
+ "add x23, x23, #0x10\n"
"fmla z8.h, z25.h, z3.h[0]\n"
"fmla z12.h, z25.h, z2.h[0]\n"
+ "fmla z9.h, z24.h, z3.h[0]\n"
+ "fmla z13.h, z24.h, z2.h[0]\n"
"fmla z16.h, z25.h, z1.h[0]\n"
"fmla z20.h, z25.h, z0.h[0]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "add x25, x25, #0x10\n"
- "fmla z9.h, z24.h, z3.h[0]\n"
- "fmla z13.h, z24.h, z2.h[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"fmla z17.h, z24.h, z1.h[0]\n"
"fmla z21.h, z24.h, z0.h[0]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
@@ -1567,22 +1569,22 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x12]\n"
+ "ld1h { z24.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1h { z25.h }, p5/Z, [x12]\n"
- "ld1h { z24.h }, p5/Z, [x11]\n"
"fmla z8.h, z25.h, z0.h[0]\n"
"fmla z12.h, z25.h, z1.h[0]\n"
+ "fmla z9.h, z24.h, z0.h[0]\n"
+ "fmla z13.h, z24.h, z1.h[0]\n"
"fmla z16.h, z25.h, z2.h[0]\n"
"fmla z20.h, z25.h, z3.h[0]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
- "fmla z9.h, z24.h, z0.h[0]\n"
- "fmla z13.h, z24.h, z1.h[0]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"fmla z17.h, z24.h, z2.h[0]\n"
"fmla z21.h, z24.h, z3.h[0]\n"
@@ -1599,23 +1601,23 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z25.h, z0.h[1]\n"
"fmla z12.h, z25.h, z1.h[1]\n"
"fmla z16.h, z25.h, z2.h[1]\n"
"fmla z20.h, z25.h, z3.h[1]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z24.h, z0.h[1]\n"
"fmla z13.h, z24.h, z1.h[1]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.h, z24.h, z2.h[1]\n"
"fmla z21.h, z24.h, z3.h[1]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z25.h, z0.h[1]\n"
"fmla z14.h, z25.h, z1.h[1]\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, z25.h, z2.h[1]\n"
"fmla z22.h, z25.h, z3.h[1]\n"
"fmla z11.h, z24.h, z0.h[1]\n"
@@ -1625,23 +1627,23 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z25.h, z0.h[2]\n"
"fmla z12.h, z25.h, z1.h[2]\n"
"fmla z16.h, z25.h, z2.h[2]\n"
"fmla z20.h, z25.h, z3.h[2]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z24.h, z0.h[2]\n"
"fmla z13.h, z24.h, z1.h[2]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.h, z24.h, z2.h[2]\n"
"fmla z21.h, z24.h, z3.h[2]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z25.h, z0.h[2]\n"
"fmla z14.h, z25.h, z1.h[2]\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, z25.h, z2.h[2]\n"
"fmla z22.h, z25.h, z3.h[2]\n"
"fmla z11.h, z24.h, z0.h[2]\n"
@@ -1651,23 +1653,23 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z25.h, z0.h[3]\n"
"fmla z12.h, z25.h, z1.h[3]\n"
"fmla z16.h, z25.h, z2.h[3]\n"
"fmla z20.h, z25.h, z3.h[3]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z24.h, z0.h[3]\n"
"fmla z13.h, z24.h, z1.h[3]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.h, z24.h, z2.h[3]\n"
"fmla z21.h, z24.h, z3.h[3]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z25.h, z0.h[3]\n"
"fmla z14.h, z25.h, z1.h[3]\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, z25.h, z2.h[3]\n"
"fmla z22.h, z25.h, z3.h[3]\n"
"fmla z11.h, z24.h, z0.h[3]\n"
@@ -1677,23 +1679,23 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z25.h, z0.h[4]\n"
"fmla z12.h, z25.h, z1.h[4]\n"
"fmla z16.h, z25.h, z2.h[4]\n"
"fmla z20.h, z25.h, z3.h[4]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z24.h, z0.h[4]\n"
"fmla z13.h, z24.h, z1.h[4]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.h, z24.h, z2.h[4]\n"
"fmla z21.h, z24.h, z3.h[4]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z25.h, z0.h[4]\n"
"fmla z14.h, z25.h, z1.h[4]\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, z25.h, z2.h[4]\n"
"fmla z22.h, z25.h, z3.h[4]\n"
"fmla z11.h, z24.h, z0.h[4]\n"
@@ -1703,23 +1705,23 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z25.h, z0.h[5]\n"
"fmla z12.h, z25.h, z1.h[5]\n"
"fmla z16.h, z25.h, z2.h[5]\n"
"fmla z20.h, z25.h, z3.h[5]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z24.h, z0.h[5]\n"
"fmla z13.h, z24.h, z1.h[5]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.h, z24.h, z2.h[5]\n"
"fmla z21.h, z24.h, z3.h[5]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z25.h, z0.h[5]\n"
"fmla z14.h, z25.h, z1.h[5]\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, z25.h, z2.h[5]\n"
"fmla z22.h, z25.h, z3.h[5]\n"
"fmla z11.h, z24.h, z0.h[5]\n"
@@ -1729,23 +1731,23 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z25.h, z0.h[6]\n"
"fmla z12.h, z25.h, z1.h[6]\n"
"fmla z16.h, z25.h, z2.h[6]\n"
"fmla z20.h, z25.h, z3.h[6]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z24.h, z0.h[6]\n"
"fmla z13.h, z24.h, z1.h[6]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.h, z24.h, z2.h[6]\n"
"fmla z21.h, z24.h, z3.h[6]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.h, z25.h, z0.h[6]\n"
"fmla z14.h, z25.h, z1.h[6]\n"
- "addvl x9, x9, #1\n"
"fmla z18.h, z25.h, z2.h[6]\n"
"fmla z22.h, z25.h, z3.h[6]\n"
"fmla z11.h, z24.h, z0.h[6]\n"
@@ -1755,16 +1757,16 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 54f\n"
"ld1h { z25.h }, p5/Z, [x12]\n"
"ld1h { z24.h }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z25.h, z0.h[7]\n"
"fmla z12.h, z25.h, z1.h[7]\n"
"fmla z16.h, z25.h, z2.h[7]\n"
"fmla z20.h, z25.h, z3.h[7]\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z24.h, z0.h[7]\n"
"fmla z13.h, z24.h, z1.h[7]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z17.h, z24.h, z2.h[7]\n"
"fmla z21.h, z24.h, z3.h[7]\n"
"ld1h { z24.h }, p5/Z, [x9]\n"
@@ -1783,13 +1785,13 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z25.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z25.h }, p5/Z, [x21]\n"
"ld1rh { z24.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z25.h\n"
"fmin z9.h, p5/M, z9.h, z25.h\n"
@@ -1829,18 +1831,18 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x24]\n"
+ "st1h { z21.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x24, #3, MUL VL]\n"
"56:" // Height 4: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1848,15 +1850,15 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 86f\n"
"57:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"58:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -1864,12 +1866,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 59f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 59f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 59f\n"
"mov x11, x12\n"
"59:" // Height 5: B setup done
@@ -1884,18 +1886,18 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cbz x15, 60f\n"
"ld1h { z8.h }, p5/Z, [x15]\n"
"ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1907,16 +1909,16 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"60:" // Height 5: no bias
"tbz %x[flags], #0, 61f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x13]\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x23, x13, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x23]\n"
"ld1h { z13.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p2/Z, [x23, #2, MUL VL]\n"
"ld1h { z15.h }, p1/Z, [x23, #3, MUL VL]\n"
"ld1h { z16.h }, p4/Z, [x22]\n"
@@ -1957,8 +1959,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"63:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 64f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1986,29 +1988,29 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 67f\n"
"66:" // Height 5: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x12]\n"
+ "ld1h { z28.h }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z4.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z3.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x22]\n"
- "ld1h { z29.h }, p5/Z, [x12]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.h, z29.h, z4.h[0]\n"
"fmla z12.h, z29.h, z3.h[0]\n"
- "ld1h { z28.h }, p5/Z, [x11]\n"
+ "fmla z9.h, z28.h, z4.h[0]\n"
"fmla z16.h, z29.h, z2.h[0]\n"
"fmla z20.h, z29.h, z1.h[0]\n"
- "add x25, x25, #0x10\n"
"fmla z24.h, z29.h, z0.h[0]\n"
- "fmla z9.h, z28.h, z4.h[0]\n"
- "ld1h { z29.h }, p5/Z, [x10]\n"
- "add x24, x24, #0x10\n"
"fmla z13.h, z28.h, z3.h[0]\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
"fmla z17.h, z28.h, z2.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z21.h, z28.h, z1.h[0]\n"
"fmla z25.h, z28.h, z0.h[0]\n"
"ld1h { z28.h }, p5/Z, [x9]\n"
@@ -2017,8 +2019,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[0]\n"
"fmla z22.h, z29.h, z1.h[0]\n"
"fmla z26.h, z29.h, z0.h[0]\n"
- "fmla z11.h, z28.h, z4.h[0]\n"
"ld1h { z29.h }, p5/Z, [x12, #1, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[0]\n"
"fmla z15.h, z28.h, z3.h[0]\n"
"fmla z19.h, z28.h, z2.h[0]\n"
"fmla z23.h, z28.h, z1.h[0]\n"
@@ -2029,8 +2031,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[1]\n"
"fmla z20.h, z29.h, z1.h[1]\n"
"fmla z24.h, z29.h, z0.h[1]\n"
- "fmla z9.h, z28.h, z4.h[1]\n"
"ld1h { z29.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[1]\n"
"fmla z13.h, z28.h, z3.h[1]\n"
"fmla z17.h, z28.h, z2.h[1]\n"
"fmla z21.h, z28.h, z1.h[1]\n"
@@ -2041,8 +2043,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[1]\n"
"fmla z22.h, z29.h, z1.h[1]\n"
"fmla z26.h, z29.h, z0.h[1]\n"
- "fmla z11.h, z28.h, z4.h[1]\n"
"ld1h { z29.h }, p5/Z, [x12, #2, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[1]\n"
"fmla z15.h, z28.h, z3.h[1]\n"
"fmla z19.h, z28.h, z2.h[1]\n"
"fmla z23.h, z28.h, z1.h[1]\n"
@@ -2053,8 +2055,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[2]\n"
"fmla z20.h, z29.h, z1.h[2]\n"
"fmla z24.h, z29.h, z0.h[2]\n"
- "fmla z9.h, z28.h, z4.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[2]\n"
"fmla z13.h, z28.h, z3.h[2]\n"
"fmla z17.h, z28.h, z2.h[2]\n"
"fmla z21.h, z28.h, z1.h[2]\n"
@@ -2065,8 +2067,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[2]\n"
"fmla z22.h, z29.h, z1.h[2]\n"
"fmla z26.h, z29.h, z0.h[2]\n"
- "fmla z11.h, z28.h, z4.h[2]\n"
"ld1h { z29.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[2]\n"
"fmla z15.h, z28.h, z3.h[2]\n"
"fmla z19.h, z28.h, z2.h[2]\n"
"fmla z23.h, z28.h, z1.h[2]\n"
@@ -2077,8 +2079,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[3]\n"
"fmla z20.h, z29.h, z1.h[3]\n"
"fmla z24.h, z29.h, z0.h[3]\n"
- "fmla z9.h, z28.h, z4.h[3]\n"
"ld1h { z29.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[3]\n"
"fmla z13.h, z28.h, z3.h[3]\n"
"fmla z17.h, z28.h, z2.h[3]\n"
"fmla z21.h, z28.h, z1.h[3]\n"
@@ -2089,8 +2091,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[3]\n"
"fmla z22.h, z29.h, z1.h[3]\n"
"fmla z26.h, z29.h, z0.h[3]\n"
- "fmla z11.h, z28.h, z4.h[3]\n"
"ld1h { z29.h }, p5/Z, [x12, #4, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[3]\n"
"fmla z15.h, z28.h, z3.h[3]\n"
"fmla z19.h, z28.h, z2.h[3]\n"
"fmla z23.h, z28.h, z1.h[3]\n"
@@ -2101,8 +2103,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[4]\n"
"fmla z20.h, z29.h, z1.h[4]\n"
"fmla z24.h, z29.h, z0.h[4]\n"
- "fmla z9.h, z28.h, z4.h[4]\n"
"ld1h { z29.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[4]\n"
"fmla z13.h, z28.h, z3.h[4]\n"
"fmla z17.h, z28.h, z2.h[4]\n"
"fmla z21.h, z28.h, z1.h[4]\n"
@@ -2113,8 +2115,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[4]\n"
"fmla z22.h, z29.h, z1.h[4]\n"
"fmla z26.h, z29.h, z0.h[4]\n"
- "fmla z11.h, z28.h, z4.h[4]\n"
"ld1h { z29.h }, p5/Z, [x12, #5, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[4]\n"
"fmla z15.h, z28.h, z3.h[4]\n"
"fmla z19.h, z28.h, z2.h[4]\n"
"fmla z23.h, z28.h, z1.h[4]\n"
@@ -2125,8 +2127,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[5]\n"
"fmla z20.h, z29.h, z1.h[5]\n"
"fmla z24.h, z29.h, z0.h[5]\n"
- "fmla z9.h, z28.h, z4.h[5]\n"
"ld1h { z29.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[5]\n"
"fmla z13.h, z28.h, z3.h[5]\n"
"fmla z17.h, z28.h, z2.h[5]\n"
"fmla z21.h, z28.h, z1.h[5]\n"
@@ -2137,8 +2139,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[5]\n"
"fmla z22.h, z29.h, z1.h[5]\n"
"fmla z26.h, z29.h, z0.h[5]\n"
- "fmla z11.h, z28.h, z4.h[5]\n"
"ld1h { z29.h }, p5/Z, [x12, #6, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[5]\n"
"fmla z15.h, z28.h, z3.h[5]\n"
"fmla z19.h, z28.h, z2.h[5]\n"
"fmla z23.h, z28.h, z1.h[5]\n"
@@ -2149,8 +2151,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[6]\n"
"fmla z20.h, z29.h, z1.h[6]\n"
"fmla z24.h, z29.h, z0.h[6]\n"
- "fmla z9.h, z28.h, z4.h[6]\n"
"ld1h { z29.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[6]\n"
"fmla z13.h, z28.h, z3.h[6]\n"
"fmla z17.h, z28.h, z2.h[6]\n"
"fmla z21.h, z28.h, z1.h[6]\n"
@@ -2161,30 +2163,30 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[6]\n"
"fmla z22.h, z29.h, z1.h[6]\n"
"fmla z26.h, z29.h, z0.h[6]\n"
- "fmla z11.h, z28.h, z4.h[6]\n"
"ld1h { z29.h }, p5/Z, [x12, #7, MUL VL]\n"
"addvl x12, x12, #8\n"
+ "fmla z11.h, z28.h, z4.h[6]\n"
"fmla z15.h, z28.h, z3.h[6]\n"
"fmla z19.h, z28.h, z2.h[6]\n"
"fmla z23.h, z28.h, z1.h[6]\n"
"fmla z27.h, z28.h, z0.h[6]\n"
"ld1h { z28.h }, p5/Z, [x11, #7, MUL VL]\n"
- "addvl x11, x11, #8\n"
"fmla z8.h, z29.h, z4.h[7]\n"
+ "addvl x11, x11, #8\n"
"fmla z12.h, z29.h, z3.h[7]\n"
"fmla z16.h, z29.h, z2.h[7]\n"
"fmla z20.h, z29.h, z1.h[7]\n"
"fmla z24.h, z29.h, z0.h[7]\n"
- "fmla z9.h, z28.h, z4.h[7]\n"
"ld1h { z29.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
+ "fmla z9.h, z28.h, z4.h[7]\n"
"fmla z13.h, z28.h, z3.h[7]\n"
"fmla z17.h, z28.h, z2.h[7]\n"
"fmla z21.h, z28.h, z1.h[7]\n"
"fmla z25.h, z28.h, z0.h[7]\n"
"ld1h { z28.h }, p5/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
"fmla z10.h, z29.h, z4.h[7]\n"
+ "addvl x9, x9, #8\n"
"fmla z14.h, z29.h, z3.h[7]\n"
"fmla z18.h, z29.h, z2.h[7]\n"
"fmla z22.h, z29.h, z1.h[7]\n"
@@ -2197,25 +2199,25 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"bgt 66b\n"
"67:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x12]\n"
+ "ld1h { z28.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1h { z29.h }, p5/Z, [x12]\n"
"fmla z8.h, z29.h, z0.h[0]\n"
"fmla z12.h, z29.h, z1.h[0]\n"
- "ld1h { z28.h }, p5/Z, [x11]\n"
+ "fmla z9.h, z28.h, z0.h[0]\n"
+ "fmla z13.h, z28.h, z1.h[0]\n"
"fmla z16.h, z29.h, z2.h[0]\n"
"fmla z20.h, z29.h, z3.h[0]\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z29.h, z4.h[0]\n"
- "fmla z9.h, z28.h, z0.h[0]\n"
- "ld1h { z29.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
- "fmla z13.h, z28.h, z1.h[0]\n"
"fmla z17.h, z28.h, z2.h[0]\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
"fmla z21.h, z28.h, z3.h[0]\n"
"fmla z25.h, z28.h, z4.h[0]\n"
@@ -2234,19 +2236,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 68f\n"
"ld1h { z29.h }, p5/Z, [x12]\n"
"ld1h { z28.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z29.h, z0.h[1]\n"
"fmla z12.h, z29.h, z1.h[1]\n"
"fmla z16.h, z29.h, z2.h[1]\n"
"fmla z20.h, z29.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z29.h, z4.h[1]\n"
"fmla z9.h, z28.h, z0.h[1]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z28.h, z1.h[1]\n"
"fmla z17.h, z28.h, z2.h[1]\n"
- "addvl x10, x10, #1\n"
"fmla z21.h, z28.h, z3.h[1]\n"
"fmla z25.h, z28.h, z4.h[1]\n"
"ld1h { z28.h }, p5/Z, [x9]\n"
@@ -2264,19 +2266,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 68f\n"
"ld1h { z29.h }, p5/Z, [x12]\n"
"ld1h { z28.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z29.h, z0.h[2]\n"
"fmla z12.h, z29.h, z1.h[2]\n"
"fmla z16.h, z29.h, z2.h[2]\n"
"fmla z20.h, z29.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z29.h, z4.h[2]\n"
"fmla z9.h, z28.h, z0.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z28.h, z1.h[2]\n"
"fmla z17.h, z28.h, z2.h[2]\n"
- "addvl x10, x10, #1\n"
"fmla z21.h, z28.h, z3.h[2]\n"
"fmla z25.h, z28.h, z4.h[2]\n"
"ld1h { z28.h }, p5/Z, [x9]\n"
@@ -2294,19 +2296,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 68f\n"
"ld1h { z29.h }, p5/Z, [x12]\n"
"ld1h { z28.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z29.h, z0.h[3]\n"
"fmla z12.h, z29.h, z1.h[3]\n"
"fmla z16.h, z29.h, z2.h[3]\n"
"fmla z20.h, z29.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z29.h, z4.h[3]\n"
"fmla z9.h, z28.h, z0.h[3]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z28.h, z1.h[3]\n"
"fmla z17.h, z28.h, z2.h[3]\n"
- "addvl x10, x10, #1\n"
"fmla z21.h, z28.h, z3.h[3]\n"
"fmla z25.h, z28.h, z4.h[3]\n"
"ld1h { z28.h }, p5/Z, [x9]\n"
@@ -2324,19 +2326,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 68f\n"
"ld1h { z29.h }, p5/Z, [x12]\n"
"ld1h { z28.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z29.h, z0.h[4]\n"
"fmla z12.h, z29.h, z1.h[4]\n"
"fmla z16.h, z29.h, z2.h[4]\n"
"fmla z20.h, z29.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z29.h, z4.h[4]\n"
"fmla z9.h, z28.h, z0.h[4]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z28.h, z1.h[4]\n"
"fmla z17.h, z28.h, z2.h[4]\n"
- "addvl x10, x10, #1\n"
"fmla z21.h, z28.h, z3.h[4]\n"
"fmla z25.h, z28.h, z4.h[4]\n"
"ld1h { z28.h }, p5/Z, [x9]\n"
@@ -2354,19 +2356,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 68f\n"
"ld1h { z29.h }, p5/Z, [x12]\n"
"ld1h { z28.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z29.h, z0.h[5]\n"
"fmla z12.h, z29.h, z1.h[5]\n"
"fmla z16.h, z29.h, z2.h[5]\n"
"fmla z20.h, z29.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z29.h, z4.h[5]\n"
"fmla z9.h, z28.h, z0.h[5]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z28.h, z1.h[5]\n"
"fmla z17.h, z28.h, z2.h[5]\n"
- "addvl x10, x10, #1\n"
"fmla z21.h, z28.h, z3.h[5]\n"
"fmla z25.h, z28.h, z4.h[5]\n"
"ld1h { z28.h }, p5/Z, [x9]\n"
@@ -2384,19 +2386,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 68f\n"
"ld1h { z29.h }, p5/Z, [x12]\n"
"ld1h { z28.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z29.h, z0.h[6]\n"
"fmla z12.h, z29.h, z1.h[6]\n"
"fmla z16.h, z29.h, z2.h[6]\n"
"fmla z20.h, z29.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z29.h, z4.h[6]\n"
"fmla z9.h, z28.h, z0.h[6]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.h, z28.h, z1.h[6]\n"
"fmla z17.h, z28.h, z2.h[6]\n"
- "addvl x10, x10, #1\n"
"fmla z21.h, z28.h, z3.h[6]\n"
"fmla z25.h, z28.h, z4.h[6]\n"
"ld1h { z28.h }, p5/Z, [x9]\n"
@@ -2414,12 +2416,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 68f\n"
"ld1h { z29.h }, p5/Z, [x12]\n"
"ld1h { z28.h }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z29.h, z0.h[7]\n"
"fmla z12.h, z29.h, z1.h[7]\n"
"fmla z16.h, z29.h, z2.h[7]\n"
"fmla z20.h, z29.h, z3.h[7]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z24.h, z29.h, z4.h[7]\n"
"fmla z9.h, z28.h, z0.h[7]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
@@ -2446,14 +2448,14 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 63b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"tbz %x[flags], #1, 69f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z29.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z29.h }, p5/Z, [x21]\n"
"ld1rh { z28.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z29.h\n"
"fmin z9.h, p5/M, z9.h, z29.h\n"
@@ -2501,22 +2503,22 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x24]\n"
+ "st1h { z21.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x23]\n"
+ "st1h { z25.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x23, #3, MUL VL]\n"
"70:" // Height 5: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -2524,18 +2526,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"b 86f\n"
"71:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0xc\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0xc\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"72:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #1\n"
"cnth x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
"add x20, x9, x20, LSL #1\n"
@@ -2543,12 +2546,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 73f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 73f\n"
"dech x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 73f\n"
"mov x11, x12\n"
"73:" // Height 6: B setup done
@@ -2563,18 +2566,18 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cbz x15, 74f\n"
"ld1h { z8.h }, p5/Z, [x15]\n"
"ld1h { z9.h }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x15, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -2590,17 +2593,17 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"74:" // Height 6: no bias
"tbz %x[flags], #0, 75f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x13, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x13]\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z9.h }, p3/Z, [x13, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x13, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z11.h }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x24, x13, x20, LSL #1\n"
+ "add x23, x24, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x24]\n"
"ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
"ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
"ld1h { z16.h }, p4/Z, [x23]\n"
@@ -2649,8 +2652,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"77:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 78f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2681,29 +2684,29 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 81f\n"
"80:" // Height 6: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z1.h }, p5/Z, [x12]\n"
+ "ld1h { z0.h }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z7.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z6.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z5.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z4.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1rqh { z3.h }, p0/Z, [x22]\n"
"ld1rqh { z2.h }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1h { z1.h }, p5/Z, [x12]\n"
- "ld1h { z0.h }, p5/Z, [x11]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.h, z1.h, z7.h[0]\n"
"fmla z12.h, z1.h, z6.h[0]\n"
+ "add x21, x21, #0x10\n"
"fmla z16.h, z1.h, z5.h[0]\n"
"fmla z20.h, z1.h, z4.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z24.h, z1.h, z3.h[0]\n"
"fmla z28.h, z1.h, z2.h[0]\n"
"ld1h { z1.h }, p5/Z, [x10]\n"
- "add x21, x21, #0x10\n"
"fmla z9.h, z0.h, z7.h[0]\n"
"fmla z13.h, z0.h, z6.h[0]\n"
"fmla z17.h, z0.h, z5.h[0]\n"
@@ -2926,27 +2929,27 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"bgt 80b\n"
"81:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z7.h }, p5/Z, [x12]\n"
+ "ld1h { z6.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1rqh { z4.h }, p0/Z, [x22]\n"
"ld1rqh { z5.h }, p0/Z, [x21]\n"
- "ld1h { z7.h }, p5/Z, [x12]\n"
- "ld1h { z6.h }, p5/Z, [x11]\n"
"fmla z8.h, z7.h, z0.h[0]\n"
"fmla z12.h, z7.h, z1.h[0]\n"
+ "fmla z9.h, z6.h, z0.h[0]\n"
+ "fmla z13.h, z6.h, z1.h[0]\n"
"fmla z16.h, z7.h, z2.h[0]\n"
"fmla z20.h, z7.h, z3.h[0]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z24.h, z7.h, z4.h[0]\n"
"fmla z28.h, z7.h, z5.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
- "fmla z9.h, z6.h, z0.h[0]\n"
- "fmla z13.h, z6.h, z1.h[0]\n"
"fmla z17.h, z6.h, z2.h[0]\n"
"fmla z21.h, z6.h, z3.h[0]\n"
"fmla z25.h, z6.h, z4.h[0]\n"
@@ -2968,19 +2971,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 82f\n"
"ld1h { z7.h }, p5/Z, [x12]\n"
"ld1h { z6.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z7.h, z0.h[1]\n"
"fmla z12.h, z7.h, z1.h[1]\n"
"fmla z16.h, z7.h, z2.h[1]\n"
"fmla z20.h, z7.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z7.h, z4.h[1]\n"
"fmla z28.h, z7.h, z5.h[1]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z6.h, z0.h[1]\n"
"fmla z13.h, z6.h, z1.h[1]\n"
- "addvl x10, x10, #1\n"
"fmla z17.h, z6.h, z2.h[1]\n"
"fmla z21.h, z6.h, z3.h[1]\n"
"fmla z25.h, z6.h, z4.h[1]\n"
@@ -3002,19 +3005,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 82f\n"
"ld1h { z7.h }, p5/Z, [x12]\n"
"ld1h { z6.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z7.h, z0.h[2]\n"
"fmla z12.h, z7.h, z1.h[2]\n"
"fmla z16.h, z7.h, z2.h[2]\n"
"fmla z20.h, z7.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z7.h, z4.h[2]\n"
"fmla z28.h, z7.h, z5.h[2]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z6.h, z0.h[2]\n"
"fmla z13.h, z6.h, z1.h[2]\n"
- "addvl x10, x10, #1\n"
"fmla z17.h, z6.h, z2.h[2]\n"
"fmla z21.h, z6.h, z3.h[2]\n"
"fmla z25.h, z6.h, z4.h[2]\n"
@@ -3036,19 +3039,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 82f\n"
"ld1h { z7.h }, p5/Z, [x12]\n"
"ld1h { z6.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z7.h, z0.h[3]\n"
"fmla z12.h, z7.h, z1.h[3]\n"
"fmla z16.h, z7.h, z2.h[3]\n"
"fmla z20.h, z7.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z7.h, z4.h[3]\n"
"fmla z28.h, z7.h, z5.h[3]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z6.h, z0.h[3]\n"
"fmla z13.h, z6.h, z1.h[3]\n"
- "addvl x10, x10, #1\n"
"fmla z17.h, z6.h, z2.h[3]\n"
"fmla z21.h, z6.h, z3.h[3]\n"
"fmla z25.h, z6.h, z4.h[3]\n"
@@ -3070,19 +3073,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 82f\n"
"ld1h { z7.h }, p5/Z, [x12]\n"
"ld1h { z6.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z7.h, z0.h[4]\n"
"fmla z12.h, z7.h, z1.h[4]\n"
"fmla z16.h, z7.h, z2.h[4]\n"
"fmla z20.h, z7.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z7.h, z4.h[4]\n"
"fmla z28.h, z7.h, z5.h[4]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z6.h, z0.h[4]\n"
"fmla z13.h, z6.h, z1.h[4]\n"
- "addvl x10, x10, #1\n"
"fmla z17.h, z6.h, z2.h[4]\n"
"fmla z21.h, z6.h, z3.h[4]\n"
"fmla z25.h, z6.h, z4.h[4]\n"
@@ -3104,19 +3107,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 82f\n"
"ld1h { z7.h }, p5/Z, [x12]\n"
"ld1h { z6.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z7.h, z0.h[5]\n"
"fmla z12.h, z7.h, z1.h[5]\n"
"fmla z16.h, z7.h, z2.h[5]\n"
"fmla z20.h, z7.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z7.h, z4.h[5]\n"
"fmla z28.h, z7.h, z5.h[5]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z6.h, z0.h[5]\n"
"fmla z13.h, z6.h, z1.h[5]\n"
- "addvl x10, x10, #1\n"
"fmla z17.h, z6.h, z2.h[5]\n"
"fmla z21.h, z6.h, z3.h[5]\n"
"fmla z25.h, z6.h, z4.h[5]\n"
@@ -3138,19 +3141,19 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 82f\n"
"ld1h { z7.h }, p5/Z, [x12]\n"
"ld1h { z6.h }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z7.h, z0.h[6]\n"
"fmla z12.h, z7.h, z1.h[6]\n"
"fmla z16.h, z7.h, z2.h[6]\n"
"fmla z20.h, z7.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.h, z7.h, z4.h[6]\n"
"fmla z28.h, z7.h, z5.h[6]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.h, z6.h, z0.h[6]\n"
"fmla z13.h, z6.h, z1.h[6]\n"
- "addvl x10, x10, #1\n"
"fmla z17.h, z6.h, z2.h[6]\n"
"fmla z21.h, z6.h, z3.h[6]\n"
"fmla z25.h, z6.h, z4.h[6]\n"
@@ -3172,12 +3175,12 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"ble 82f\n"
"ld1h { z7.h }, p5/Z, [x12]\n"
"ld1h { z6.h }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.h, z7.h, z0.h[7]\n"
"fmla z12.h, z7.h, z1.h[7]\n"
"fmla z16.h, z7.h, z2.h[7]\n"
"fmla z20.h, z7.h, z3.h[7]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z24.h, z7.h, z4.h[7]\n"
"fmla z28.h, z7.h, z5.h[7]\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
@@ -3208,15 +3211,15 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 77b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #1\n"
+ "add x26, x13, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x21]\n"
"ld1rh { z0.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
@@ -3272,26 +3275,26 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x13, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p4, [x21]\n"
- "st1h { z29.h }, p3, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p1, [x21, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x24]\n"
+ "st1h { z21.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x23]\n"
+ "st1h { z25.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z28.h }, p4, [x22]\n"
+ "st1h { z29.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z30.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z31.h }, p1, [x22, #3, MUL VL]\n"
"84:" // Height 6: Writeback done
"dech x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -3308,8 +3311,8 @@ void sve_ffhybrid_fp16_mla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"86:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp
index 3a93a2f7c8..847103646c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -82,7 +82,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 1> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
index 8e4fd4388e..51e2b3722a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,18 +49,19 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -81,6 +82,7 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -106,15 +108,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"bgt 27f\n"
"beq 14f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -122,12 +124,12 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"mov x11, x12\n"
"3:" // Height 1: B setup done
@@ -162,8 +164,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"7:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 8f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -181,41 +183,41 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"ld1w { z7.s }, p4/Z, [x11]\n"
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z17.s }, p4/Z, [x10]\n"
- "ld1w { z16.s }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
- "fmla z10.s, p4/M, z17.s, z0.s\n"
- "fmla z11.s, p4/M, z16.s, z0.s\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "ld1w { z16.s }, p4/Z, [x9]\n"
"add x26, x26, #0x4\n"
"subs x27, x27, #0x1\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "fmla z10.s, p4/M, z17.s, z0.s\n"
+ "ld1w { z6.s }, p4/Z, [x12]\n"
+ "fmla z11.s, p4/M, z16.s, z0.s\n"
+ "ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1w { z7.s }, p4/Z, [x11]\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z17.s }, p4/Z, [x10]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z16.s }, p4/Z, [x9]\n"
"add x28, x28, #0x1\n"
- "cmp x28, x20\n"
- "fmla z10.s, p4/M, z17.s, z0.s\n"
- "fmla z11.s, p4/M, z16.s, z0.s\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
"addvl x9, x9, #1\n"
+ "fmla z10.s, p4/M, z17.s, z0.s\n"
+ "fmla z11.s, p4/M, z16.s, z0.s\n"
"bne 7b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p4/Z, [x21]\n"
"ld1rw { z16.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z17.s\n"
"fmin z9.s, p4/M, z9.s, z17.s\n"
@@ -238,15 +240,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 80f\n"
"14:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -254,12 +256,12 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 16f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 16f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 16f\n"
"mov x11, x12\n"
"16:" // Height 2: B setup done
@@ -274,22 +276,22 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"cbz x15, 17f\n"
"ld1w { z8.s }, p4/Z, [x15]\n"
"ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
"b 19f\n"
"17:" // Height 2: no bias
"tbz %x[flags], #0, 18f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x13, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x13]\n"
"ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x20, x13, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x20]\n"
"ld1w { z13.s }, p2/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x20, #2, MUL VL]\n"
@@ -308,8 +310,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"20:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 21f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -331,26 +333,26 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"ld1w { z7.s }, p4/Z, [x11]\n"
"ble 24f\n"
"23:" // Height 2: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
"ld1w { z17.s }, p4/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"ld1w { z16.s }, p4/Z, [x9]\n"
- "addvl x11, x11, #1\n"
"add x26, x26, #0x4\n"
"subs x27, x27, #0x1\n"
+ "add x25, x25, #0x4\n"
+ "addvl x10, x10, #1\n"
"fmla z10.s, p4/M, z17.s, z0.s\n"
"fmla z14.s, p4/M, z17.s, z1.s\n"
- "add x25, x25, #0x4\n"
+ "addvl x9, x9, #1\n"
+ "ld1w { z6.s }, p4/Z, [x12]\n"
"fmla z11.s, p4/M, z16.s, z0.s\n"
- "fmla z15.s, p4/M, z16.s, z1.s\n"
- "addvl x10, x10, #1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
+ "fmla z15.s, p4/M, z16.s, z1.s\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
- "addvl x9, x9, #1\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
"ld1w { z7.s }, p4/Z, [x11]\n"
"bgt 23b\n"
"24:" // Height 2: Multiply loop: Main loop skip
@@ -362,22 +364,22 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z13.s, p4/M, z7.s, z1.s\n"
"ld1w { z16.s }, p4/Z, [x9]\n"
"add x28, x28, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"cmp x28, x20\n"
+ "addvl x10, x10, #1\n"
"fmla z10.s, p4/M, z17.s, z0.s\n"
"fmla z14.s, p4/M, z17.s, z1.s\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z11.s, p4/M, z16.s, z0.s\n"
"fmla z15.s, p4/M, z16.s, z1.s\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"bne 20b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p4/Z, [x21]\n"
"ld1rw { z16.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z17.s\n"
"fmin z9.s, p4/M, z9.s, z17.s\n"
@@ -401,10 +403,10 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -412,15 +414,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 80f\n"
"27:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -428,12 +430,12 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 29f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 29f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 29f\n"
"mov x11, x12\n"
"29:" // Height 3: B setup done
@@ -448,27 +450,27 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"cbz x15, 30f\n"
"ld1w { z8.s }, p4/Z, [x15]\n"
"ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 32f\n"
"30:" // Height 3: no bias
"tbz %x[flags], #0, 31f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x13, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x13]\n"
"ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x21, x13, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x21]\n"
"ld1w { z13.s }, p2/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x21, #2, MUL VL]\n"
@@ -495,8 +497,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"33:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 34f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -522,13 +524,13 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"ld1w { z7.s }, p4/Z, [x11]\n"
"ble 37f\n"
"36:" // Height 3: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z21.s }, p4/Z, [x10]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"add x26, x26, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
@@ -536,18 +538,18 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"subs x27, x27, #0x1\n"
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
+ "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, p4/M, z21.s, z0.s\n"
"fmla z14.s, p4/M, z21.s, z1.s\n"
"fmla z18.s, p4/M, z21.s, z2.s\n"
+ "ld1w { z6.s }, p4/Z, [x12]\n"
"fmla z11.s, p4/M, z20.s, z0.s\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
+ "ld1rw { z0.s }, p4/Z, [x26]\n"
"fmla z15.s, p4/M, z20.s, z1.s\n"
"fmla z19.s, p4/M, z20.s, z2.s\n"
- "ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
"ld1w { z7.s }, p4/Z, [x11]\n"
"bgt 36b\n"
"37:" // Height 3: Multiply loop: Main loop skip
@@ -556,30 +558,30 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z12.s, p4/M, z6.s, z1.s\n"
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z21.s }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "addvl x12, x12, #1\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"ld1w { z20.s }, p4/Z, [x9]\n"
- "addvl x12, x12, #1\n"
- "fmla z10.s, p4/M, z21.s, z0.s\n"
- "fmla z14.s, p4/M, z21.s, z1.s\n"
"addvl x11, x11, #1\n"
+ "cmp x28, x20\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
+ "fmla z10.s, p4/M, z21.s, z0.s\n"
+ "fmla z14.s, p4/M, z21.s, z1.s\n"
"fmla z18.s, p4/M, z21.s, z2.s\n"
"fmla z11.s, p4/M, z20.s, z0.s\n"
- "addvl x9, x9, #1\n"
"fmla z15.s, p4/M, z20.s, z1.s\n"
"fmla z19.s, p4/M, z20.s, z2.s\n"
"bne 33b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z21.s }, p4/Z, [x21]\n"
"ld1rw { z20.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z21.s\n"
"fmin z9.s, p4/M, z9.s, z21.s\n"
@@ -611,14 +613,14 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -626,15 +628,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 80f\n"
"40:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -642,12 +644,12 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 42f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 42f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 42f\n"
"mov x11, x12\n"
"42:" // Height 4: B setup done
@@ -662,18 +664,18 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"cbz x15, 43f\n"
"ld1w { z8.s }, p4/Z, [x15]\n"
"ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -681,13 +683,13 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"43:" // Height 4: no bias
"tbz %x[flags], #0, 44f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x13, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x13]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x22, x13, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x22]\n"
"ld1w { z13.s }, p2/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x22, #2, MUL VL]\n"
@@ -722,8 +724,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -753,9 +755,9 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"ld1w { z7.s }, p4/Z, [x11]\n"
"ble 50f\n"
"49:" // Height 4: Multiply loop: Main loop
+ "addvl x12, x12, #1\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
"fmla z12.s, p4/M, z6.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
@@ -773,9 +775,9 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z10.s, p4/M, z25.s, z0.s\n"
"fmla z14.s, p4/M, z25.s, z1.s\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z18.s, p4/M, z25.s, z2.s\n"
"fmla z22.s, p4/M, z25.s, z3.s\n"
- "addvl x9, x9, #1\n"
"ld1w { z6.s }, p4/Z, [x12]\n"
"fmla z11.s, p4/M, z24.s, z0.s\n"
"fmla z15.s, p4/M, z24.s, z1.s\n"
@@ -795,18 +797,18 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
"ld1w { z25.s }, p4/Z, [x10]\n"
- "cmp x28, x20\n"
+ "addvl x12, x12, #1\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"ld1w { z24.s }, p4/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, p4/M, z25.s, z0.s\n"
"fmla z14.s, p4/M, z25.s, z1.s\n"
- "addvl x9, x9, #1\n"
"fmla z18.s, p4/M, z25.s, z2.s\n"
"fmla z22.s, p4/M, z25.s, z3.s\n"
"fmla z11.s, p4/M, z24.s, z0.s\n"
@@ -815,13 +817,13 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z23.s, p4/M, z24.s, z3.s\n"
"bne 46b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p4/Z, [x21]\n"
"ld1rw { z24.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z25.s\n"
"fmin z9.s, p4/M, z9.s, z25.s\n"
@@ -861,18 +863,18 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x24]\n"
+ "st1w { z21.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x24, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -880,15 +882,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 80f\n"
"53:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -896,12 +898,12 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 55f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 55f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 55f\n"
"mov x11, x12\n"
"55:" // Height 5: B setup done
@@ -916,18 +918,18 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"cbz x15, 56f\n"
"ld1w { z8.s }, p4/Z, [x15]\n"
"ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -939,16 +941,16 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"56:" // Height 5: no bias
"tbz %x[flags], #0, 57f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x13]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x23, x13, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x23]\n"
"ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x22]\n"
@@ -989,8 +991,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"59:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 60f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1033,8 +1035,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"add x26, x26, #0x4\n"
"subs x27, x27, #0x1\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z29.s }, p4/Z, [x10]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"add x25, x25, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
@@ -1044,23 +1046,23 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z25.s, p4/M, z7.s, z4.s\n"
"ld1w { z28.s }, p4/Z, [x9]\n"
"add x22, x22, #0x4\n"
- "fmla z10.s, p4/M, z29.s, z0.s\n"
- "fmla z14.s, p4/M, z29.s, z1.s\n"
"addvl x10, x10, #1\n"
"addvl x9, x9, #1\n"
+ "fmla z10.s, p4/M, z29.s, z0.s\n"
+ "fmla z14.s, p4/M, z29.s, z1.s\n"
"fmla z18.s, p4/M, z29.s, z2.s\n"
"fmla z22.s, p4/M, z29.s, z3.s\n"
"fmla z26.s, p4/M, z29.s, z4.s\n"
+ "ld1w { z6.s }, p4/Z, [x12]\n"
"fmla z11.s, p4/M, z28.s, z0.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x12]\n"
"fmla z15.s, p4/M, z28.s, z1.s\n"
- "fmla z19.s, p4/M, z28.s, z2.s\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
+ "fmla z19.s, p4/M, z28.s, z2.s\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"fmla z23.s, p4/M, z28.s, z3.s\n"
- "fmla z27.s, p4/M, z28.s, z4.s\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
+ "fmla z27.s, p4/M, z28.s, z4.s\n"
"ld1rw { z4.s }, p4/Z, [x22]\n"
"ld1w { z7.s }, p4/Z, [x11]\n"
"bgt 62b\n"
@@ -1071,15 +1073,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
"addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z29.s }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
+ "addvl x10, x10, #1\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
- "addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
"ld1w { z28.s }, p4/Z, [x9]\n"
@@ -1096,14 +1098,14 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z27.s, p4/M, z28.s, z4.s\n"
"bne 59b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z29.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z29.s }, p4/Z, [x21]\n"
"ld1rw { z28.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z29.s\n"
"fmin z9.s, p4/M, z9.s, z29.s\n"
@@ -1151,22 +1153,22 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x24]\n"
+ "st1w { z21.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x23]\n"
+ "st1w { z25.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x23, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1174,18 +1176,19 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"b 80f\n"
"66:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0x18\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -1193,12 +1196,12 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 68f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 68f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 68f\n"
"mov x11, x12\n"
"68:" // Height 6: B setup done
@@ -1213,18 +1216,18 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"cbz x15, 69f\n"
"ld1w { z8.s }, p4/Z, [x15]\n"
"ld1w { z9.s }, p4/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1240,17 +1243,17 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"69:" // Height 6: no bias
"tbz %x[flags], #0, 70f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x13, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x13]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x13, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p0/Z, [x13, #3, MUL VL]\n"
+ "add x24, x13, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x24]\n"
"ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x23]\n"
@@ -1299,8 +1302,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"72:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 73f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1362,9 +1365,9 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z29.s, p4/M, z7.s, z5.s\n"
"ld1w { z7.s }, p4/Z, [x9]\n"
"addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, p4/M, z6.s, z0.s\n"
"fmla z14.s, p4/M, z6.s, z1.s\n"
- "addvl x9, x9, #1\n"
"fmla z18.s, p4/M, z6.s, z2.s\n"
"fmla z22.s, p4/M, z6.s, z3.s\n"
"fmla z26.s, p4/M, z6.s, z4.s\n"
@@ -1391,15 +1394,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
"addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z28.s, p4/M, z6.s, z5.s\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
- "addvl x10, x10, #1\n"
+ "cmp x28, x20\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
@@ -1420,15 +1423,15 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"fmla z31.s, p4/M, z7.s, z5.s\n"
"bne 72b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p4/Z, [x21]\n"
"ld1rw { z0.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
@@ -1484,26 +1487,26 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p3, [x21]\n"
- "st1w { z29.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x24]\n"
+ "st1w { z21.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x23]\n"
+ "st1w { z25.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z28.s }, p3, [x22]\n"
+ "st1w { z29.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z30.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z31.s }, p0, [x22, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1520,8 +1523,8 @@ void sve_ffhybrid_fp32_mla_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
index b1ab31e618..299dec5b3c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -49,18 +49,19 @@ void sve_ffhybrid_fp32_mla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -81,6 +82,7 @@ void sve_ffhybrid_fp32_mla_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -106,15 +108,15 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"bgt 29f\n"
"beq 15f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -122,12 +124,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"mov x11, x12\n"
"3:" // Height 1: B setup done
@@ -162,8 +164,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"7:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 8f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -179,113 +181,113 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1w { z16.s }, p5/Z, [x12]\n"
- "fmla z8.s, z16.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "fmla z8.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
"fmla z9.s, z16.s, z0.s[0]\n"
- "ld1w { z16.s }, p5/Z, [x10]\n"
- "fmla z10.s, z16.s, z0.s[0]\n"
"ld1w { z16.s }, p5/Z, [x9]\n"
+ "fmla z10.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x12, #1, MUL VL]\n"
"fmla z11.s, z16.s, z0.s[0]\n"
- "ld1w { z16.s }, p5/Z, [x12, #1, MUL VL]\n"
- "fmla z8.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p5/Z, [x11, #1, MUL VL]\n"
+ "fmla z8.s, z17.s, z0.s[1]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[1]\n"
- "ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z10.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p5/Z, [x9, #1, MUL VL]\n"
+ "fmla z10.s, z17.s, z0.s[1]\n"
+ "ld1w { z17.s }, p5/Z, [x12, #2, MUL VL]\n"
"fmla z11.s, z16.s, z0.s[1]\n"
- "ld1w { z16.s }, p5/Z, [x12, #2, MUL VL]\n"
- "fmla z8.s, z16.s, z0.s[2]\n"
"ld1w { z16.s }, p5/Z, [x11, #2, MUL VL]\n"
+ "fmla z8.s, z17.s, z0.s[2]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[2]\n"
- "ld1w { z16.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z10.s, z16.s, z0.s[2]\n"
"ld1w { z16.s }, p5/Z, [x9, #2, MUL VL]\n"
+ "fmla z10.s, z17.s, z0.s[2]\n"
+ "ld1w { z17.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
"fmla z11.s, z16.s, z0.s[2]\n"
- "ld1w { z16.s }, p5/Z, [x12, #3, MUL VL]\n"
- "fmla z8.s, z16.s, z0.s[3]\n"
"ld1w { z16.s }, p5/Z, [x11, #3, MUL VL]\n"
- "fmla z9.s, z16.s, z0.s[3]\n"
+ "addvl x11, x11, #4\n"
+ "fmla z8.s, z17.s, z0.s[3]\n"
"ld1w { z17.s }, p5/Z, [x10, #3, MUL VL]\n"
- "sub x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
+ "fmla z9.s, z16.s, z0.s[3]\n"
"ld1w { z16.s }, p5/Z, [x9, #3, MUL VL]\n"
- "cmp x27, #0x4\n"
+ "addvl x9, x9, #4\n"
"fmla z10.s, z17.s, z0.s[3]\n"
"fmla z11.s, z16.s, z0.s[3]\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #4\n"
- "addvl x11, x11, #4\n"
- "addvl x10, x10, #4\n"
- "addvl x9, x9, #4\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1w { z16.s }, p5/Z, [x12]\n"
- "fmla z8.s, z16.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
- "fmla z9.s, z16.s, z0.s[0]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
"subs x27, x27, #0x1\n"
- "ld1w { z16.s }, p5/Z, [x9]\n"
- "fmla z10.s, z17.s, z0.s[0]\n"
- "fmla z11.s, z16.s, z0.s[0]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "fmla z8.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.s, z16.s, z0.s[0]\n"
+ "ld1w { z16.s }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.s, z17.s, z0.s[0]\n"
+ "fmla z11.s, z16.s, z0.s[0]\n"
"ble 12f\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
- "fmla z8.s, z17.s, z0.s[1]\n"
- "fmla z9.s, z16.s, z0.s[1]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
- "ld1w { z16.s }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.s, z17.s, z0.s[1]\n"
- "fmla z11.s, z16.s, z0.s[1]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.s, z17.s, z0.s[1]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.s, z16.s, z0.s[1]\n"
+ "ld1w { z16.s }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.s, z17.s, z0.s[1]\n"
+ "fmla z11.s, z16.s, z0.s[1]\n"
"ble 12f\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
- "fmla z8.s, z17.s, z0.s[2]\n"
- "fmla z9.s, z16.s, z0.s[2]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
- "ld1w { z16.s }, p5/Z, [x9]\n"
"subs x27, x27, #0x1\n"
- "fmla z10.s, z17.s, z0.s[2]\n"
- "fmla z11.s, z16.s, z0.s[2]\n"
"addvl x12, x12, #1\n"
"addvl x11, x11, #1\n"
+ "fmla z8.s, z17.s, z0.s[2]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
+ "fmla z9.s, z16.s, z0.s[2]\n"
+ "ld1w { z16.s }, p5/Z, [x9]\n"
"addvl x9, x9, #1\n"
+ "fmla z10.s, z17.s, z0.s[2]\n"
+ "fmla z11.s, z16.s, z0.s[2]\n"
"ble 12f\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z17.s, z0.s[3]\n"
- "fmla z9.s, z16.s, z0.s[3]\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.s, z16.s, z0.s[3]\n"
"ld1w { z16.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z17.s, z0.s[3]\n"
"fmla z11.s, z16.s, z0.s[3]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"12:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 7b\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z17.s\n"
"fmin z9.s, p5/M, z9.s, z17.s\n"
@@ -308,15 +310,15 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 86f\n"
"15:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -324,12 +326,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"mov x11, x12\n"
"17:" // Height 2: B setup done
@@ -344,22 +346,22 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cbz x15, 18f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x15, x15, #4\n"
"b 20f\n"
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x13, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x13]\n"
"ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x20, x13, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
@@ -378,8 +380,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"21:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 22f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -398,29 +400,29 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z1.s }, p0/Z, [x26]\n"
- "ld1rqw { z0.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"fmla z8.s, z17.s, z1.s[0]\n"
"fmla z12.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
"fmla z9.s, z16.s, z1.s[0]\n"
"fmla z13.s, z16.s, z0.s[0]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x9]\n"
"fmla z10.s, z17.s, z1.s[0]\n"
"fmla z14.s, z17.s, z0.s[0]\n"
"ld1w { z17.s }, p5/Z, [x12, #1, MUL VL]\n"
- "cmp x27, #0x4\n"
"fmla z11.s, z16.s, z1.s[0]\n"
"fmla z15.s, z16.s, z0.s[0]\n"
"ld1w { z16.s }, p5/Z, [x11, #1, MUL VL]\n"
- "add x26, x26, #0x10\n"
"fmla z8.s, z17.s, z1.s[1]\n"
"fmla z12.s, z17.s, z0.s[1]\n"
"ld1w { z17.s }, p5/Z, [x10, #1, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z16.s, z1.s[1]\n"
"fmla z13.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p5/Z, [x9, #1, MUL VL]\n"
@@ -459,89 +461,89 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
+ "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "ld1rqw { z1.s }, p0/Z, [x25]\n"
"fmla z8.s, z17.s, z0.s[0]\n"
"fmla z12.s, z17.s, z1.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z16.s, z0.s[0]\n"
"fmla z13.s, z16.s, z1.s[0]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z17.s, z0.s[0]\n"
"fmla z14.s, z17.s, z1.s[0]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z11.s, z16.s, z0.s[0]\n"
"fmla z15.s, z16.s, z1.s[0]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z17.s, z0.s[1]\n"
"fmla z12.s, z17.s, z1.s[1]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z16.s, z0.s[1]\n"
"fmla z13.s, z16.s, z1.s[1]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z17.s, z0.s[1]\n"
"fmla z14.s, z17.s, z1.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z11.s, z16.s, z0.s[1]\n"
"fmla z15.s, z16.s, z1.s[1]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z17.s, z0.s[2]\n"
"fmla z12.s, z17.s, z1.s[2]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z16.s, z0.s[2]\n"
"fmla z13.s, z16.s, z1.s[2]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x9]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z17.s, z0.s[2]\n"
"fmla z14.s, z17.s, z1.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z11.s, z16.s, z0.s[2]\n"
"fmla z15.s, z16.s, z1.s[2]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"ble 26f\n"
"ld1w { z17.s }, p5/Z, [x12]\n"
"ld1w { z16.s }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z17.s, z0.s[3]\n"
"fmla z12.s, z17.s, z1.s[3]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z16.s, z0.s[3]\n"
"fmla z13.s, z16.s, z1.s[3]\n"
- "ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x9]\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z17.s, z0.s[3]\n"
"fmla z14.s, z17.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z11.s, z16.s, z0.s[3]\n"
"fmla z15.s, z16.s, z1.s[3]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"26:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 21b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z17.s\n"
"fmin z9.s, p5/M, z9.s, z17.s\n"
@@ -565,10 +567,10 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
"28:" // Height 2: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -576,15 +578,15 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 86f\n"
"29:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -592,12 +594,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"mov x11, x12\n"
"31:" // Height 3: B setup done
@@ -612,27 +614,27 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cbz x15, 32f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 34f\n"
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x13, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x13]\n"
"ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x21, x13, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
@@ -659,8 +661,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"35:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 36f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -682,62 +684,62 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z21.s }, p5/Z, [x12]\n"
+ "ld1w { z20.s }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1w { z21.s }, p5/Z, [x12]\n"
+ "add x24, x24, #0x10\n"
"fmla z8.s, z21.s, z2.s[0]\n"
"fmla z12.s, z21.s, z1.s[0]\n"
- "ld1w { z20.s }, p5/Z, [x11]\n"
- "fmla z16.s, z21.s, z0.s[0]\n"
"fmla z9.s, z20.s, z2.s[0]\n"
- "ld1w { z21.s }, p5/Z, [x10]\n"
"fmla z13.s, z20.s, z1.s[0]\n"
+ "fmla z16.s, z21.s, z0.s[0]\n"
+ "ld1w { z21.s }, p5/Z, [x10]\n"
"fmla z17.s, z20.s, z0.s[0]\n"
"ld1w { z20.s }, p5/Z, [x9]\n"
- "cmp x27, #0x4\n"
"fmla z10.s, z21.s, z2.s[0]\n"
"fmla z14.s, z21.s, z1.s[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"fmla z18.s, z21.s, z0.s[0]\n"
- "fmla z11.s, z20.s, z2.s[0]\n"
"ld1w { z21.s }, p5/Z, [x12, #1, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "fmla z11.s, z20.s, z2.s[0]\n"
"fmla z15.s, z20.s, z1.s[0]\n"
"fmla z19.s, z20.s, z0.s[0]\n"
"ld1w { z20.s }, p5/Z, [x11, #1, MUL VL]\n"
"fmla z8.s, z21.s, z2.s[1]\n"
"fmla z12.s, z21.s, z1.s[1]\n"
"fmla z16.s, z21.s, z0.s[1]\n"
- "fmla z9.s, z20.s, z2.s[1]\n"
"ld1w { z21.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.s, z20.s, z2.s[1]\n"
"fmla z13.s, z20.s, z1.s[1]\n"
"fmla z17.s, z20.s, z0.s[1]\n"
"ld1w { z20.s }, p5/Z, [x9, #1, MUL VL]\n"
"fmla z10.s, z21.s, z2.s[1]\n"
"fmla z14.s, z21.s, z1.s[1]\n"
"fmla z18.s, z21.s, z0.s[1]\n"
- "fmla z11.s, z20.s, z2.s[1]\n"
"ld1w { z21.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "fmla z11.s, z20.s, z2.s[1]\n"
"fmla z15.s, z20.s, z1.s[1]\n"
"fmla z19.s, z20.s, z0.s[1]\n"
"ld1w { z20.s }, p5/Z, [x11, #2, MUL VL]\n"
"fmla z8.s, z21.s, z2.s[2]\n"
"fmla z12.s, z21.s, z1.s[2]\n"
"fmla z16.s, z21.s, z0.s[2]\n"
- "fmla z9.s, z20.s, z2.s[2]\n"
"ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z20.s, z2.s[2]\n"
"fmla z13.s, z20.s, z1.s[2]\n"
"fmla z17.s, z20.s, z0.s[2]\n"
"ld1w { z20.s }, p5/Z, [x9, #2, MUL VL]\n"
"fmla z10.s, z21.s, z2.s[2]\n"
"fmla z14.s, z21.s, z1.s[2]\n"
"fmla z18.s, z21.s, z0.s[2]\n"
- "fmla z11.s, z20.s, z2.s[2]\n"
"ld1w { z21.s }, p5/Z, [x12, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "fmla z11.s, z20.s, z2.s[2]\n"
"fmla z15.s, z20.s, z1.s[2]\n"
"fmla z19.s, z20.s, z0.s[2]\n"
"ld1w { z20.s }, p5/Z, [x11, #3, MUL VL]\n"
@@ -745,9 +747,9 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z8.s, z21.s, z2.s[3]\n"
"fmla z12.s, z21.s, z1.s[3]\n"
"fmla z16.s, z21.s, z0.s[3]\n"
- "fmla z9.s, z20.s, z2.s[3]\n"
"ld1w { z21.s }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
+ "fmla z9.s, z20.s, z2.s[3]\n"
"fmla z13.s, z20.s, z1.s[3]\n"
"fmla z17.s, z20.s, z0.s[3]\n"
"ld1w { z20.s }, p5/Z, [x9, #3, MUL VL]\n"
@@ -761,91 +763,91 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z21.s }, p5/Z, [x12]\n"
+ "ld1w { z20.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1w { z21.s }, p5/Z, [x12]\n"
"fmla z8.s, z21.s, z0.s[0]\n"
"fmla z12.s, z21.s, z1.s[0]\n"
- "ld1w { z20.s }, p5/Z, [x11]\n"
- "fmla z16.s, z21.s, z2.s[0]\n"
"fmla z9.s, z20.s, z0.s[0]\n"
- "ld1w { z21.s }, p5/Z, [x10]\n"
"fmla z13.s, z20.s, z1.s[0]\n"
+ "fmla z16.s, z21.s, z2.s[0]\n"
+ "ld1w { z21.s }, p5/Z, [x10]\n"
+ "addvl x10, x10, #1\n"
"fmla z17.s, z20.s, z2.s[0]\n"
"ld1w { z20.s }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z21.s, z0.s[0]\n"
"fmla z14.s, z21.s, z1.s[0]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.s, z21.s, z2.s[0]\n"
"fmla z11.s, z20.s, z0.s[0]\n"
- "addvl x9, x9, #1\n"
"fmla z15.s, z20.s, z1.s[0]\n"
"fmla z19.s, z20.s, z2.s[0]\n"
"ble 40f\n"
"ld1w { z21.s }, p5/Z, [x12]\n"
"ld1w { z20.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z21.s, z0.s[1]\n"
"fmla z12.s, z21.s, z1.s[1]\n"
"fmla z16.s, z21.s, z2.s[1]\n"
- "fmla z9.s, z20.s, z0.s[1]\n"
"ld1w { z21.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.s, z20.s, z0.s[1]\n"
"fmla z13.s, z20.s, z1.s[1]\n"
"fmla z17.s, z20.s, z2.s[1]\n"
"ld1w { z20.s }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z21.s, z0.s[1]\n"
"fmla z14.s, z21.s, z1.s[1]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.s, z21.s, z2.s[1]\n"
"fmla z11.s, z20.s, z0.s[1]\n"
- "addvl x9, x9, #1\n"
"fmla z15.s, z20.s, z1.s[1]\n"
"fmla z19.s, z20.s, z2.s[1]\n"
"ble 40f\n"
"ld1w { z21.s }, p5/Z, [x12]\n"
"ld1w { z20.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z21.s, z0.s[2]\n"
"fmla z12.s, z21.s, z1.s[2]\n"
"fmla z16.s, z21.s, z2.s[2]\n"
- "fmla z9.s, z20.s, z0.s[2]\n"
"ld1w { z21.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.s, z20.s, z0.s[2]\n"
"fmla z13.s, z20.s, z1.s[2]\n"
"fmla z17.s, z20.s, z2.s[2]\n"
"ld1w { z20.s }, p5/Z, [x9]\n"
- "addvl x12, x12, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z21.s, z0.s[2]\n"
"fmla z14.s, z21.s, z1.s[2]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z18.s, z21.s, z2.s[2]\n"
"fmla z11.s, z20.s, z0.s[2]\n"
- "addvl x9, x9, #1\n"
"fmla z15.s, z20.s, z1.s[2]\n"
"fmla z19.s, z20.s, z2.s[2]\n"
"ble 40f\n"
"ld1w { z21.s }, p5/Z, [x12]\n"
"ld1w { z20.s }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z21.s, z0.s[3]\n"
"fmla z12.s, z21.s, z1.s[3]\n"
"fmla z16.s, z21.s, z2.s[3]\n"
- "fmla z9.s, z20.s, z0.s[3]\n"
"ld1w { z21.s }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "addvl x10, x10, #1\n"
+ "fmla z9.s, z20.s, z0.s[3]\n"
"fmla z13.s, z20.s, z1.s[3]\n"
"fmla z17.s, z20.s, z2.s[3]\n"
"ld1w { z20.s }, p5/Z, [x9]\n"
- "addvl x11, x11, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z21.s, z0.s[3]\n"
"fmla z14.s, z21.s, z1.s[3]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
"fmla z18.s, z21.s, z2.s[3]\n"
"fmla z11.s, z20.s, z0.s[3]\n"
"fmla z15.s, z20.s, z1.s[3]\n"
@@ -856,12 +858,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 35b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z21.s }, p5/Z, [x21]\n"
"ld1rw { z20.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z21.s\n"
"fmin z9.s, p5/M, z9.s, z21.s\n"
@@ -893,14 +895,14 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
"42:" // Height 3: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -908,15 +910,15 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 86f\n"
"43:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -924,12 +926,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"mov x11, x12\n"
"45:" // Height 4: B setup done
@@ -944,18 +946,18 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cbz x15, 46f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -963,13 +965,13 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x13, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x13]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x22, x13, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
@@ -1004,8 +1006,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"49:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1030,25 +1032,25 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z25.s }, p5/Z, [x12]\n"
+ "ld1w { z24.s }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z3.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z2.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "ld1w { z25.s }, p5/Z, [x12]\n"
- "ld1w { z24.s }, p5/Z, [x11]\n"
+ "add x23, x23, #0x10\n"
"fmla z8.s, z25.s, z3.s[0]\n"
"fmla z12.s, z25.s, z2.s[0]\n"
+ "fmla z9.s, z24.s, z3.s[0]\n"
+ "fmla z13.s, z24.s, z2.s[0]\n"
"fmla z16.s, z25.s, z1.s[0]\n"
"fmla z20.s, z25.s, z0.s[0]\n"
"ld1w { z25.s }, p5/Z, [x10]\n"
- "add x25, x25, #0x10\n"
- "fmla z9.s, z24.s, z3.s[0]\n"
- "fmla z13.s, z24.s, z2.s[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"fmla z17.s, z24.s, z1.s[0]\n"
"fmla z21.s, z24.s, z0.s[0]\n"
"ld1w { z24.s }, p5/Z, [x9]\n"
@@ -1127,22 +1129,22 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z25.s }, p5/Z, [x12]\n"
+ "ld1w { z24.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1rqw { z3.s }, p0/Z, [x23]\n"
- "ld1w { z25.s }, p5/Z, [x12]\n"
- "ld1w { z24.s }, p5/Z, [x11]\n"
"fmla z8.s, z25.s, z0.s[0]\n"
"fmla z12.s, z25.s, z1.s[0]\n"
+ "fmla z9.s, z24.s, z0.s[0]\n"
+ "fmla z13.s, z24.s, z1.s[0]\n"
"fmla z16.s, z25.s, z2.s[0]\n"
"fmla z20.s, z25.s, z3.s[0]\n"
"ld1w { z25.s }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
- "fmla z9.s, z24.s, z0.s[0]\n"
- "fmla z13.s, z24.s, z1.s[0]\n"
- "addvl x11, x11, #1\n"
"addvl x10, x10, #1\n"
"fmla z17.s, z24.s, z2.s[0]\n"
"fmla z21.s, z24.s, z3.s[0]\n"
@@ -1159,23 +1161,23 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 54f\n"
"ld1w { z25.s }, p5/Z, [x12]\n"
"ld1w { z24.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z25.s, z0.s[1]\n"
"fmla z12.s, z25.s, z1.s[1]\n"
"fmla z16.s, z25.s, z2.s[1]\n"
"fmla z20.s, z25.s, z3.s[1]\n"
"ld1w { z25.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z24.s, z0.s[1]\n"
"fmla z13.s, z24.s, z1.s[1]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.s, z24.s, z2.s[1]\n"
"fmla z21.s, z24.s, z3.s[1]\n"
"ld1w { z24.s }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z25.s, z0.s[1]\n"
"fmla z14.s, z25.s, z1.s[1]\n"
- "addvl x9, x9, #1\n"
"fmla z18.s, z25.s, z2.s[1]\n"
"fmla z22.s, z25.s, z3.s[1]\n"
"fmla z11.s, z24.s, z0.s[1]\n"
@@ -1185,23 +1187,23 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 54f\n"
"ld1w { z25.s }, p5/Z, [x12]\n"
"ld1w { z24.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z25.s, z0.s[2]\n"
"fmla z12.s, z25.s, z1.s[2]\n"
"fmla z16.s, z25.s, z2.s[2]\n"
"fmla z20.s, z25.s, z3.s[2]\n"
"ld1w { z25.s }, p5/Z, [x10]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z24.s, z0.s[2]\n"
"fmla z13.s, z24.s, z1.s[2]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z17.s, z24.s, z2.s[2]\n"
"fmla z21.s, z24.s, z3.s[2]\n"
"ld1w { z24.s }, p5/Z, [x9]\n"
- "addvl x10, x10, #1\n"
+ "addvl x9, x9, #1\n"
"fmla z10.s, z25.s, z0.s[2]\n"
"fmla z14.s, z25.s, z1.s[2]\n"
- "addvl x9, x9, #1\n"
"fmla z18.s, z25.s, z2.s[2]\n"
"fmla z22.s, z25.s, z3.s[2]\n"
"fmla z11.s, z24.s, z0.s[2]\n"
@@ -1211,16 +1213,16 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 54f\n"
"ld1w { z25.s }, p5/Z, [x12]\n"
"ld1w { z24.s }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z25.s, z0.s[3]\n"
"fmla z12.s, z25.s, z1.s[3]\n"
"fmla z16.s, z25.s, z2.s[3]\n"
"fmla z20.s, z25.s, z3.s[3]\n"
"ld1w { z25.s }, p5/Z, [x10]\n"
- "addvl x12, x12, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z24.s, z0.s[3]\n"
"fmla z13.s, z24.s, z1.s[3]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
"fmla z17.s, z24.s, z2.s[3]\n"
"fmla z21.s, z24.s, z3.s[3]\n"
"ld1w { z24.s }, p5/Z, [x9]\n"
@@ -1239,13 +1241,13 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p5/Z, [x21]\n"
"ld1rw { z24.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z25.s\n"
"fmin z9.s, p5/M, z9.s, z25.s\n"
@@ -1285,18 +1287,18 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
"56:" // Height 4: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1304,15 +1306,15 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 86f\n"
"57:" // Height 5
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"58:" // Height 5: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -1320,12 +1322,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 59f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 59f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 59f\n"
"mov x11, x12\n"
"59:" // Height 5: B setup done
@@ -1340,18 +1342,18 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cbz x15, 60f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1363,16 +1365,16 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"60:" // Height 5: no bias
"tbz %x[flags], #0, 61f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x13]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x23, x13, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x22]\n"
@@ -1413,8 +1415,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"63:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 64f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1442,29 +1444,29 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 67f\n"
"66:" // Height 5: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z29.s }, p5/Z, [x12]\n"
+ "ld1w { z28.s }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z4.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x22]\n"
- "ld1w { z29.s }, p5/Z, [x12]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.s, z29.s, z4.s[0]\n"
"fmla z12.s, z29.s, z3.s[0]\n"
- "ld1w { z28.s }, p5/Z, [x11]\n"
+ "fmla z9.s, z28.s, z4.s[0]\n"
"fmla z16.s, z29.s, z2.s[0]\n"
"fmla z20.s, z29.s, z1.s[0]\n"
- "add x25, x25, #0x10\n"
"fmla z24.s, z29.s, z0.s[0]\n"
- "fmla z9.s, z28.s, z4.s[0]\n"
- "ld1w { z29.s }, p5/Z, [x10]\n"
- "add x24, x24, #0x10\n"
"fmla z13.s, z28.s, z3.s[0]\n"
+ "ld1w { z29.s }, p5/Z, [x10]\n"
"fmla z17.s, z28.s, z2.s[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z21.s, z28.s, z1.s[0]\n"
"fmla z25.s, z28.s, z0.s[0]\n"
"ld1w { z28.s }, p5/Z, [x9]\n"
@@ -1473,8 +1475,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z18.s, z29.s, z2.s[0]\n"
"fmla z22.s, z29.s, z1.s[0]\n"
"fmla z26.s, z29.s, z0.s[0]\n"
- "fmla z11.s, z28.s, z4.s[0]\n"
"ld1w { z29.s }, p5/Z, [x12, #1, MUL VL]\n"
+ "fmla z11.s, z28.s, z4.s[0]\n"
"fmla z15.s, z28.s, z3.s[0]\n"
"fmla z19.s, z28.s, z2.s[0]\n"
"fmla z23.s, z28.s, z1.s[0]\n"
@@ -1485,8 +1487,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z16.s, z29.s, z2.s[1]\n"
"fmla z20.s, z29.s, z1.s[1]\n"
"fmla z24.s, z29.s, z0.s[1]\n"
- "fmla z9.s, z28.s, z4.s[1]\n"
"ld1w { z29.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.s, z28.s, z4.s[1]\n"
"fmla z13.s, z28.s, z3.s[1]\n"
"fmla z17.s, z28.s, z2.s[1]\n"
"fmla z21.s, z28.s, z1.s[1]\n"
@@ -1497,8 +1499,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z18.s, z29.s, z2.s[1]\n"
"fmla z22.s, z29.s, z1.s[1]\n"
"fmla z26.s, z29.s, z0.s[1]\n"
- "fmla z11.s, z28.s, z4.s[1]\n"
"ld1w { z29.s }, p5/Z, [x12, #2, MUL VL]\n"
+ "fmla z11.s, z28.s, z4.s[1]\n"
"fmla z15.s, z28.s, z3.s[1]\n"
"fmla z19.s, z28.s, z2.s[1]\n"
"fmla z23.s, z28.s, z1.s[1]\n"
@@ -1509,8 +1511,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z16.s, z29.s, z2.s[2]\n"
"fmla z20.s, z29.s, z1.s[2]\n"
"fmla z24.s, z29.s, z0.s[2]\n"
- "fmla z9.s, z28.s, z4.s[2]\n"
"ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z28.s, z4.s[2]\n"
"fmla z13.s, z28.s, z3.s[2]\n"
"fmla z17.s, z28.s, z2.s[2]\n"
"fmla z21.s, z28.s, z1.s[2]\n"
@@ -1521,30 +1523,30 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"fmla z18.s, z29.s, z2.s[2]\n"
"fmla z22.s, z29.s, z1.s[2]\n"
"fmla z26.s, z29.s, z0.s[2]\n"
- "fmla z11.s, z28.s, z4.s[2]\n"
"ld1w { z29.s }, p5/Z, [x12, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
+ "fmla z11.s, z28.s, z4.s[2]\n"
"fmla z15.s, z28.s, z3.s[2]\n"
"fmla z19.s, z28.s, z2.s[2]\n"
"fmla z23.s, z28.s, z1.s[2]\n"
"fmla z27.s, z28.s, z0.s[2]\n"
"ld1w { z28.s }, p5/Z, [x11, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
"fmla z8.s, z29.s, z4.s[3]\n"
+ "addvl x11, x11, #4\n"
"fmla z12.s, z29.s, z3.s[3]\n"
"fmla z16.s, z29.s, z2.s[3]\n"
"fmla z20.s, z29.s, z1.s[3]\n"
"fmla z24.s, z29.s, z0.s[3]\n"
- "fmla z9.s, z28.s, z4.s[3]\n"
"ld1w { z29.s }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
+ "fmla z9.s, z28.s, z4.s[3]\n"
"fmla z13.s, z28.s, z3.s[3]\n"
"fmla z17.s, z28.s, z2.s[3]\n"
"fmla z21.s, z28.s, z1.s[3]\n"
"fmla z25.s, z28.s, z0.s[3]\n"
"ld1w { z28.s }, p5/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"fmla z10.s, z29.s, z4.s[3]\n"
+ "addvl x9, x9, #4\n"
"fmla z14.s, z29.s, z3.s[3]\n"
"fmla z18.s, z29.s, z2.s[3]\n"
"fmla z22.s, z29.s, z1.s[3]\n"
@@ -1557,25 +1559,25 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"bgt 66b\n"
"67:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z29.s }, p5/Z, [x12]\n"
+ "ld1w { z28.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1rqw { z3.s }, p0/Z, [x23]\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1w { z29.s }, p5/Z, [x12]\n"
"fmla z8.s, z29.s, z0.s[0]\n"
"fmla z12.s, z29.s, z1.s[0]\n"
- "ld1w { z28.s }, p5/Z, [x11]\n"
+ "fmla z9.s, z28.s, z0.s[0]\n"
+ "fmla z13.s, z28.s, z1.s[0]\n"
"fmla z16.s, z29.s, z2.s[0]\n"
"fmla z20.s, z29.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
"fmla z24.s, z29.s, z4.s[0]\n"
- "fmla z9.s, z28.s, z0.s[0]\n"
- "ld1w { z29.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
- "fmla z13.s, z28.s, z1.s[0]\n"
"fmla z17.s, z28.s, z2.s[0]\n"
+ "ld1w { z29.s }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
"fmla z21.s, z28.s, z3.s[0]\n"
"fmla z25.s, z28.s, z4.s[0]\n"
@@ -1594,19 +1596,19 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 68f\n"
"ld1w { z29.s }, p5/Z, [x12]\n"
"ld1w { z28.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z29.s, z0.s[1]\n"
"fmla z12.s, z29.s, z1.s[1]\n"
"fmla z16.s, z29.s, z2.s[1]\n"
"fmla z20.s, z29.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.s, z29.s, z4.s[1]\n"
"fmla z9.s, z28.s, z0.s[1]\n"
"ld1w { z29.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.s, z28.s, z1.s[1]\n"
"fmla z17.s, z28.s, z2.s[1]\n"
- "addvl x10, x10, #1\n"
"fmla z21.s, z28.s, z3.s[1]\n"
"fmla z25.s, z28.s, z4.s[1]\n"
"ld1w { z28.s }, p5/Z, [x9]\n"
@@ -1624,19 +1626,19 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 68f\n"
"ld1w { z29.s }, p5/Z, [x12]\n"
"ld1w { z28.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z29.s, z0.s[2]\n"
"fmla z12.s, z29.s, z1.s[2]\n"
"fmla z16.s, z29.s, z2.s[2]\n"
"fmla z20.s, z29.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.s, z29.s, z4.s[2]\n"
"fmla z9.s, z28.s, z0.s[2]\n"
"ld1w { z29.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z13.s, z28.s, z1.s[2]\n"
"fmla z17.s, z28.s, z2.s[2]\n"
- "addvl x10, x10, #1\n"
"fmla z21.s, z28.s, z3.s[2]\n"
"fmla z25.s, z28.s, z4.s[2]\n"
"ld1w { z28.s }, p5/Z, [x9]\n"
@@ -1654,12 +1656,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 68f\n"
"ld1w { z29.s }, p5/Z, [x12]\n"
"ld1w { z28.s }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z29.s, z0.s[3]\n"
"fmla z12.s, z29.s, z1.s[3]\n"
"fmla z16.s, z29.s, z2.s[3]\n"
"fmla z20.s, z29.s, z3.s[3]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z24.s, z29.s, z4.s[3]\n"
"fmla z9.s, z28.s, z0.s[3]\n"
"ld1w { z29.s }, p5/Z, [x10]\n"
@@ -1686,14 +1688,14 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 63b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 69f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z29.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z29.s }, p5/Z, [x21]\n"
"ld1rw { z28.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z29.s\n"
"fmin z9.s, p5/M, z9.s, z29.s\n"
@@ -1741,22 +1743,22 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
"70:" // Height 5: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -1764,18 +1766,19 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"b 86f\n"
"71:" // Height 6
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0x18\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x18\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"72:" // Height 6: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
- "add x11, x12, x20, LSL #2\n"
"cntw x21, ALL, MUL #3\n"
+ "add x11, x12, x20, LSL #2\n"
"add x10, x11, x20, LSL #2\n"
"add x9, x10, x20, LSL #2\n"
"add x20, x9, x20, LSL #2\n"
@@ -1783,12 +1786,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 73f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 73f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 73f\n"
"mov x11, x12\n"
"73:" // Height 6: B setup done
@@ -1803,18 +1806,18 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cbz x15, 74f\n"
"ld1w { z8.s }, p5/Z, [x15]\n"
"ld1w { z9.s }, p5/Z, [x15, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x15, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x15, x15, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x15, x15, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1830,17 +1833,17 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"74:" // Height 6: no bias
"tbz %x[flags], #0, 75f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x13, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x13]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x13, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x13, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p1/Z, [x13, #3, MUL VL]\n"
+ "add x24, x13, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x23]\n"
@@ -1889,8 +1892,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"77:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 78f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1921,29 +1924,29 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 81f\n"
"80:" // Height 6: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z1.s }, p5/Z, [x12]\n"
+ "ld1w { z0.s }, p5/Z, [x11]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z7.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z6.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z4.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
"ld1rqw { z2.s }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1w { z1.s }, p5/Z, [x12]\n"
- "ld1w { z0.s }, p5/Z, [x11]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.s, z1.s, z7.s[0]\n"
"fmla z12.s, z1.s, z6.s[0]\n"
+ "add x21, x21, #0x10\n"
"fmla z16.s, z1.s, z5.s[0]\n"
"fmla z20.s, z1.s, z4.s[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z24.s, z1.s, z3.s[0]\n"
"fmla z28.s, z1.s, z2.s[0]\n"
"ld1w { z1.s }, p5/Z, [x10]\n"
- "add x21, x21, #0x10\n"
"fmla z9.s, z0.s, z7.s[0]\n"
"fmla z13.s, z0.s, z6.s[0]\n"
"fmla z17.s, z0.s, z5.s[0]\n"
@@ -2054,27 +2057,27 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"bgt 80b\n"
"81:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z7.s }, p5/Z, [x12]\n"
+ "ld1w { z6.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1rqw { z3.s }, p0/Z, [x23]\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
"ld1rqw { z5.s }, p0/Z, [x21]\n"
- "ld1w { z7.s }, p5/Z, [x12]\n"
- "ld1w { z6.s }, p5/Z, [x11]\n"
"fmla z8.s, z7.s, z0.s[0]\n"
"fmla z12.s, z7.s, z1.s[0]\n"
+ "fmla z9.s, z6.s, z0.s[0]\n"
+ "fmla z13.s, z6.s, z1.s[0]\n"
"fmla z16.s, z7.s, z2.s[0]\n"
"fmla z20.s, z7.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z24.s, z7.s, z4.s[0]\n"
"fmla z28.s, z7.s, z5.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10]\n"
"addvl x10, x10, #1\n"
- "fmla z9.s, z6.s, z0.s[0]\n"
- "fmla z13.s, z6.s, z1.s[0]\n"
"fmla z17.s, z6.s, z2.s[0]\n"
"fmla z21.s, z6.s, z3.s[0]\n"
"fmla z25.s, z6.s, z4.s[0]\n"
@@ -2096,19 +2099,19 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 82f\n"
"ld1w { z7.s }, p5/Z, [x12]\n"
"ld1w { z6.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z7.s, z0.s[1]\n"
"fmla z12.s, z7.s, z1.s[1]\n"
"fmla z16.s, z7.s, z2.s[1]\n"
"fmla z20.s, z7.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.s, z7.s, z4.s[1]\n"
"fmla z28.s, z7.s, z5.s[1]\n"
"ld1w { z7.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z6.s, z0.s[1]\n"
"fmla z13.s, z6.s, z1.s[1]\n"
- "addvl x10, x10, #1\n"
"fmla z17.s, z6.s, z2.s[1]\n"
"fmla z21.s, z6.s, z3.s[1]\n"
"fmla z25.s, z6.s, z4.s[1]\n"
@@ -2130,19 +2133,19 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 82f\n"
"ld1w { z7.s }, p5/Z, [x12]\n"
"ld1w { z6.s }, p5/Z, [x11]\n"
+ "subs x27, x27, #0x1\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z7.s, z0.s[2]\n"
"fmla z12.s, z7.s, z1.s[2]\n"
"fmla z16.s, z7.s, z2.s[2]\n"
"fmla z20.s, z7.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
- "addvl x12, x12, #1\n"
"fmla z24.s, z7.s, z4.s[2]\n"
"fmla z28.s, z7.s, z5.s[2]\n"
"ld1w { z7.s }, p5/Z, [x10]\n"
- "addvl x11, x11, #1\n"
+ "addvl x10, x10, #1\n"
"fmla z9.s, z6.s, z0.s[2]\n"
"fmla z13.s, z6.s, z1.s[2]\n"
- "addvl x10, x10, #1\n"
"fmla z17.s, z6.s, z2.s[2]\n"
"fmla z21.s, z6.s, z3.s[2]\n"
"fmla z25.s, z6.s, z4.s[2]\n"
@@ -2164,12 +2167,12 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"ble 82f\n"
"ld1w { z7.s }, p5/Z, [x12]\n"
"ld1w { z6.s }, p5/Z, [x11]\n"
+ "addvl x12, x12, #1\n"
+ "addvl x11, x11, #1\n"
"fmla z8.s, z7.s, z0.s[3]\n"
"fmla z12.s, z7.s, z1.s[3]\n"
"fmla z16.s, z7.s, z2.s[3]\n"
"fmla z20.s, z7.s, z3.s[3]\n"
- "addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
"fmla z24.s, z7.s, z4.s[3]\n"
"fmla z28.s, z7.s, z5.s[3]\n"
"ld1w { z7.s }, p5/Z, [x10]\n"
@@ -2200,15 +2203,15 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 77b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x13, x20, LSL #2\n"
+ "add x26, x13, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x21]\n"
"ld1rw { z0.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
@@ -2264,26 +2267,26 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x13, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x13, #3, MUL VL]\n"
"addvl x13, x13, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x21]\n"
- "st1w { z29.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x22]\n"
+ "st1w { z29.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x22, #3, MUL VL]\n"
"84:" // Height 6: Writeback done
"decw x14, ALL, MUL #4\n"
"cmp x14, XZR\n"
@@ -2300,8 +2303,8 @@ void sve_ffhybrid_fp32_mla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"86:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp
index 23f686a902..90112a823b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp
@@ -82,16 +82,14 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 4, 12, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 12, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
if (std::is_same<T, float>::value) {
switch (ci->get_cpu_model()) {
- case CPUModel::V1:
- return { 28.74 };
default:
- return { 15.27 };
+ return { 32.35 };
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
index 57f42cce77..0e98cc6def 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -50,18 +50,19 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -82,6 +83,7 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
ka.B_stride = B_stride;
switch(act.type) {
default:
@@ -104,17 +106,17 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"bgt 29f\n"
"beq 15f\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x21, ALL, MUL #5\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
"add x28, x9, x20, LSL #1\n"
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
@@ -122,20 +124,20 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x27, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x28, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 3f\n"
"mov x11, x12\n"
"3:" // Height 1: B setup done
@@ -154,19 +156,19 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x15, 4f\n"
"ld1w { z8.s }, p7/Z, [x15]\n"
"ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -174,16 +176,16 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 6f\n"
"4:" // Height 1: no bias
"tbz %x[flags], #0, 5f\n"
- "ld1w { z21.s }, p6/Z, [x13]\n"
- "ld1w { z20.s }, p5/Z, [x13, #1, MUL VL]\n"
- "zip1 z8.d, z21.d, z14.d\n"
- "zip2 z14.d, z21.d, z14.d\n"
+ "ld1w { z25.s }, p6/Z, [x13]\n"
+ "ld1w { z24.s }, p5/Z, [x13, #1, MUL VL]\n"
"ld1w { z23.s }, p4/Z, [x13, #2, MUL VL]\n"
"ld1w { z22.s }, p3/Z, [x13, #3, MUL VL]\n"
- "zip1 z9.d, z20.d, z15.d\n"
- "zip2 z15.d, z20.d, z15.d\n"
"ld1w { z21.s }, p2/Z, [x13, #4, MUL VL]\n"
"ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
+ "zip1 z8.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
"zip1 z10.d, z23.d, z16.d\n"
"zip2 z16.d, z23.d, z16.d\n"
"zip1 z11.d, z22.d, z17.d\n"
@@ -210,8 +212,8 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"7:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 8f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -227,78 +229,78 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"ble 11f\n"
"10:" // Height 1: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z23.h }, p7/Z, [x12]\n"
+ "ld1h { z22.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z21.h }, p7/Z, [x11]\n"
+ "ld1h { z20.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "cmp x25, #0x4\n"
+ "addvl x12, x12, #2\n"
+ "addvl x11, x11, #2\n"
"ld1rqw { z24.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
"uzp1 z24.h, z24.h, z24.h\n"
- "ld1h { z21.h }, p7/Z, [x12]\n"
- "ld1h { z20.h }, p7/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6475e708 // bfmmla z8.s, z24.h, z21.h\n"
- ".inst 0x6474e70e // bfmmla z14.s, z24.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x11]\n"
- "ld1h { z20.h }, p7/Z, [x11, #1, MUL VL]\n"
+ ".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
+ "ld1h { z23.h }, p7/Z, [x10]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
+ "ld1h { z22.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
- ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x10]\n"
- "ld1h { z20.h }, p7/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6475e70a // bfmmla z10.s, z24.h, z21.h\n"
- ".inst 0x6474e710 // bfmmla z16.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x9]\n"
+ ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
"ld1h { z20.h }, p7/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
- ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
"ld1h { z23.h }, p7/Z, [x28]\n"
+ ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
"ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
"ld1h { z21.h }, p7/Z, [x27]\n"
+ ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
"ld1h { z20.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
- "add x24, x24, #0x10\n"
- "addvl x12, x12, #2\n"
- "addvl x11, x11, #2\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "addvl x28, x28, #2\n"
- "addvl x27, x27, #2\n"
"bgt 10b\n"
"11:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
- "ld1rqw { z22.s }, p0/Z, [x24]\n"
- ".inst 0x658abed6 // bfcvt z22.h, p7/M, z22.s\n"
- "uzp1 z22.h, z22.h, z22.h\n"
- "ld1h { z21.h }, p7/Z, [x12]\n"
- "ld1h { z20.h }, p7/Z, [x12, #1, MUL VL]\n"
- ".inst 0x6475e6c8 // bfmmla z8.s, z22.h, z21.h\n"
- ".inst 0x6474e6ce // bfmmla z14.s, z22.h, z20.h\n"
+ "ld1h { z23.h }, p7/Z, [x12]\n"
+ "ld1h { z22.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
"ld1h { z21.h }, p7/Z, [x11]\n"
"ld1h { z20.h }, p7/Z, [x11, #1, MUL VL]\n"
- ".inst 0x6475e6c9 // bfmmla z9.s, z22.h, z21.h\n"
- ".inst 0x6474e6cf // bfmmla z15.s, z22.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x10]\n"
- "ld1h { z20.h }, p7/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6475e6ca // bfmmla z10.s, z22.h, z21.h\n"
- ".inst 0x6474e6d0 // bfmmla z16.s, z22.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x9]\n"
- "ld1h { z20.h }, p7/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6475e6cb // bfmmla z11.s, z22.h, z21.h\n"
- ".inst 0x6474e6d1 // bfmmla z17.s, z22.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x28]\n"
- "ld1h { z20.h }, p7/Z, [x28, #1, MUL VL]\n"
- ".inst 0x6475e6cc // bfmmla z12.s, z22.h, z21.h\n"
- ".inst 0x6474e6d2 // bfmmla z18.s, z22.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x27]\n"
- "ld1h { z20.h }, p7/Z, [x27, #1, MUL VL]\n"
- ".inst 0x6475e6cd // bfmmla z13.s, z22.h, z21.h\n"
- ".inst 0x6474e6d3 // bfmmla z19.s, z22.h, z20.h\n"
- "addvl x12, x12, #2\n"
"addvl x11, x11, #2\n"
+ "ld1rqw { z24.s }, p0/Z, [x24]\n"
+ ".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
+ "uzp1 z24.h, z24.h, z24.h\n"
+ ".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
+ "ld1h { z23.h }, p7/Z, [x10]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
+ "ld1h { z22.h }, p7/Z, [x10, #1, MUL VL]\n"
"addvl x10, x10, #2\n"
+ ".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
+ "ld1h { z21.h }, p7/Z, [x9]\n"
+ ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
+ "ld1h { z20.h }, p7/Z, [x9, #1, MUL VL]\n"
"addvl x9, x9, #2\n"
+ ".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
+ "ld1h { z23.h }, p7/Z, [x28]\n"
+ ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
+ "ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
"addvl x28, x28, #2\n"
+ ".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
+ "ld1h { z21.h }, p7/Z, [x27]\n"
+ ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
+ "ld1h { z20.h }, p7/Z, [x27, #1, MUL VL]\n"
"addvl x27, x27, #2\n"
+ ".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
+ ".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
+ ".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
+ ".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
"12:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -311,9 +313,9 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z12.d, z12.d, z18.d\n"
"uzp1 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 13f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z21.s }, p7/Z, [x21]\n"
"ld1rw { z20.s }, p7/Z, [x20]\n"
"fmin z8.s, p7/M, z8.s, z21.s\n"
"fmin z9.s, p7/M, z9.s, z21.s\n"
@@ -342,17 +344,17 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 58f\n"
"15:" // Height 2
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x21, ALL, MUL #5\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
"add x28, x9, x20, LSL #1\n"
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
@@ -360,20 +362,20 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x27, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x28, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 17f\n"
"mov x11, x12\n"
"17:" // Height 2: B setup done
@@ -392,19 +394,19 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x15, 18f\n"
"ld1w { z8.s }, p7/Z, [x15]\n"
"ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -413,25 +415,25 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"18:" // Height 2: no bias
"tbz %x[flags], #0, 19f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x13, x20, LSL #2\n"
- "ld1w { z16.s }, p6/Z, [x13]\n"
- "ld1w { z17.s }, p5/Z, [x13, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z25.s }, p6/Z, [x13]\n"
+ "ld1w { z24.s }, p5/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x13, #2, MUL VL]\n"
"ld1w { z22.s }, p3/Z, [x13, #3, MUL VL]\n"
"ld1w { z21.s }, p2/Z, [x13, #4, MUL VL]\n"
+ "add x20, x13, x20, LSL #2\n"
"ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
"ld1w { z14.s }, p6/Z, [x20]\n"
- "zip1 z8.d, z16.d, z14.d\n"
- "zip2 z14.d, z16.d, z14.d\n"
"ld1w { z15.s }, p5/Z, [x20, #1, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x20, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z15.d\n"
- "zip2 z15.d, z17.d, z15.d\n"
"ld1w { z17.s }, p3/Z, [x20, #3, MUL VL]\n"
"ld1w { z18.s }, p2/Z, [x20, #4, MUL VL]\n"
- "zip1 z10.d, z19.d, z16.d\n"
- "zip2 z16.d, z19.d, z16.d\n"
"ld1w { z19.s }, p1/Z, [x20, #5, MUL VL]\n"
+ "zip1 z8.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
+ "zip1 z10.d, z23.d, z16.d\n"
+ "zip2 z16.d, z23.d, z16.d\n"
"zip1 z11.d, z22.d, z17.d\n"
"zip2 z17.d, z22.d, z17.d\n"
"zip1 z12.d, z21.d, z18.d\n"
@@ -456,8 +458,8 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"21:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 22f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -476,87 +478,87 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"ble 25f\n"
"24:" // Height 2: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z23.h }, p7/Z, [x12]\n"
+ "ld1h { z22.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z21.h }, p7/Z, [x11]\n"
+ "ld1h { z25.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "cmp x25, #0x4\n"
+ "addvl x12, x12, #2\n"
+ "addvl x11, x11, #2\n"
"ld1rqw { z24.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z20.s }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
".inst 0x658abe94 // bfcvt z20.h, p7/M, z20.s\n"
"uzp1 z24.h, z24.h, z24.h\n"
- "ld1h { z23.h }, p7/Z, [x12]\n"
- "ld1h { z22.h }, p7/Z, [x12, #1, MUL VL]\n"
"uzp1 z20.h, z20.h, z20.h\n"
"trn1 z24.d, z24.d, z20.d\n"
- "ld1h { z21.h }, p7/Z, [x11]\n"
- "ld1h { z20.h }, p7/Z, [x11, #1, MUL VL]\n"
".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
- ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z23.h }, p7/Z, [x10]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z22.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
- ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x9]\n"
+ ".inst 0x6479e70f // bfmmla z15.s, z24.h, z25.h\n"
"ld1h { z20.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
- ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
"ld1h { z23.h }, p7/Z, [x28]\n"
+ ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
"ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
- ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x27]\n"
+ ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
"ld1h { z20.h }, p7/Z, [x27, #1, MUL VL]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
+ "addvl x27, x27, #2\n"
".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
- "addvl x12, x12, #2\n"
- "addvl x11, x11, #2\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "addvl x28, x28, #2\n"
- "addvl x27, x27, #2\n"
"bgt 24b\n"
"25:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z23.h }, p7/Z, [x12]\n"
+ "ld1h { z22.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
+ "ld1h { z21.h }, p7/Z, [x11]\n"
+ "ld1h { z25.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
"ld1rqw { z24.s }, p0/Z, [x24]\n"
"ld1rqw { z20.s }, p0/Z, [x23]\n"
".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
".inst 0x658abe94 // bfcvt z20.h, p7/M, z20.s\n"
"uzp1 z24.h, z24.h, z24.h\n"
- "ld1h { z23.h }, p7/Z, [x12]\n"
- "ld1h { z22.h }, p7/Z, [x12, #1, MUL VL]\n"
"uzp1 z20.h, z20.h, z20.h\n"
"trn1 z24.d, z24.d, z20.d\n"
- "ld1h { z21.h }, p7/Z, [x11]\n"
- "ld1h { z20.h }, p7/Z, [x11, #1, MUL VL]\n"
".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
- ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z23.h }, p7/Z, [x10]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z22.h }, p7/Z, [x10, #1, MUL VL]\n"
+ "addvl x10, x10, #2\n"
".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
- ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x9]\n"
+ ".inst 0x6479e70f // bfmmla z15.s, z24.h, z25.h\n"
"ld1h { z20.h }, p7/Z, [x9, #1, MUL VL]\n"
+ "addvl x9, x9, #2\n"
".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
- ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
"ld1h { z23.h }, p7/Z, [x28]\n"
+ ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
"ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
- ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x27]\n"
+ ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
"ld1h { z20.h }, p7/Z, [x27, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
- "addvl x12, x12, #2\n"
- "addvl x11, x11, #2\n"
".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "addvl x28, x28, #2\n"
- "addvl x27, x27, #2\n"
"26:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -565,21 +567,21 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
- "add x23, x13, x20, LSL #2\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
"uzp1 z16.d, z11.d, z17.d\n"
"uzp2 z11.d, z11.d, z17.d\n"
+ "add x24, x13, x20, LSL #2\n"
"uzp1 z17.d, z12.d, z18.d\n"
"uzp2 z12.d, z12.d, z18.d\n"
"uzp1 z18.d, z13.d, z19.d\n"
"uzp2 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 27f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z20.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z20.s }, p7/Z, [x21]\n"
"ld1rw { z19.s }, p7/Z, [x20]\n"
"fmin z4.s, p7/M, z4.s, z20.s\n"
"fmin z14.s, p7/M, z14.s, z20.s\n"
@@ -613,12 +615,12 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"st1w { z17.s }, p2, [x13, #4, MUL VL]\n"
"st1w { z18.s }, p1, [x13, #5, MUL VL]\n"
"addvl x13, x13, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
+ "st1w { z8.s }, p6, [x24]\n"
+ "st1w { z9.s }, p5, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x24, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x24, #5, MUL VL]\n"
"28:" // Height 2: Writeback done
"decw x14, ALL, MUL #6\n"
"cmp x14, XZR\n"
@@ -626,17 +628,17 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 58f\n"
"29:" // Height 3
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x15, %x[bias]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x13, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x21, ALL, MUL #5\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
"add x28, x9, x20, LSL #1\n"
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
@@ -644,20 +646,20 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x27, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x28, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 31f\n"
"mov x11, x12\n"
"31:" // Height 3: B setup done
@@ -676,19 +678,19 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x15, 32f\n"
"ld1w { z8.s }, p7/Z, [x15]\n"
"ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -709,38 +711,38 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"32:" // Height 3: no bias
"tbz %x[flags], #0, 33f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z22.s }, p6/Z, [x13]\n"
+ "ld1w { z24.s }, p5/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z0.s }, p4/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x13, #3, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x13, #4, MUL VL]\n"
"add x21, x13, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p6/Z, [x13]\n"
- "ld1w { z17.s }, p5/Z, [x13, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x13, #2, MUL VL]\n"
- "ld1w { z22.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z24.s }, p2/Z, [x13, #4, MUL VL]\n"
"ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
"ld1w { z14.s }, p6/Z, [x21]\n"
- "zip1 z8.d, z16.d, z14.d\n"
- "zip2 z14.d, z16.d, z14.d\n"
"ld1w { z15.s }, p5/Z, [x21, #1, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x21, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z15.d\n"
- "zip2 z15.d, z17.d, z15.d\n"
"ld1w { z17.s }, p3/Z, [x21, #3, MUL VL]\n"
"ld1w { z18.s }, p2/Z, [x21, #4, MUL VL]\n"
- "zip1 z10.d, z19.d, z16.d\n"
- "zip2 z16.d, z19.d, z16.d\n"
"ld1w { z19.s }, p1/Z, [x21, #5, MUL VL]\n"
"ld1w { z21.s }, p6/Z, [x20]\n"
- "zip1 z11.d, z22.d, z17.d\n"
- "zip2 z17.d, z22.d, z17.d\n"
+ "zip1 z8.d, z22.d, z14.d\n"
+ "zip2 z14.d, z22.d, z14.d\n"
"ld1w { z22.s }, p5/Z, [x20, #1, MUL VL]\n"
"ld1w { z23.s }, p4/Z, [x20, #2, MUL VL]\n"
- "zip1 z12.d, z24.d, z18.d\n"
- "zip2 z18.d, z24.d, z18.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
"ld1w { z24.s }, p3/Z, [x20, #3, MUL VL]\n"
"ld1w { z25.s }, p2/Z, [x20, #4, MUL VL]\n"
+ "zip1 z10.d, z0.d, z16.d\n"
+ "zip2 z16.d, z0.d, z16.d\n"
+ "ld1w { z0.s }, p1/Z, [x20, #5, MUL VL]\n"
+ "zip1 z11.d, z2.d, z17.d\n"
+ "zip2 z17.d, z2.d, z17.d\n"
+ "zip1 z12.d, z1.d, z18.d\n"
+ "zip2 z18.d, z1.d, z18.d\n"
"zip1 z13.d, z20.d, z19.d\n"
"zip2 z19.d, z20.d, z19.d\n"
- "ld1w { z0.s }, p1/Z, [x20, #5, MUL VL]\n"
"zip1 z20.d, z21.d, z26.d\n"
"zip2 z26.d, z21.d, z26.d\n"
"zip1 z21.d, z22.d, z27.d\n"
@@ -783,8 +785,8 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"35:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 36f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -806,111 +808,111 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"ble 39f\n"
"38:" // Height 3: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z3.h }, p7/Z, [x12]\n"
+ "ld1h { z2.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z1.h }, p7/Z, [x11]\n"
+ "ld1h { z6.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "cmp x25, #0x4\n"
+ "addvl x12, x12, #2\n"
+ "addvl x11, x11, #2\n"
"ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x23]\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z3.h }, p7/Z, [x12]\n"
- "uzp1 z0.h, z0.h, z0.h\n"
".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "ld1h { z2.h }, p7/Z, [x12, #1, MUL VL]\n"
- "ld1h { z1.h }, p7/Z, [x11]\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
"trn1 z5.d, z5.d, z0.d\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "ld1h { z0.h }, p7/Z, [x11, #1, MUL VL]\n"
".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
- ".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
"ld1h { z3.h }, p7/Z, [x10]\n"
- "sub x25, x25, #0x4\n"
+ ".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
- ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x10, #1, MUL VL]\n"
- "cmp x25, #0x4\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
".inst 0x6461e495 // bfmmla z21.s, z4.h, z1.h\n"
- ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6466e4af // bfmmla z15.s, z5.h, z6.h\n"
"ld1h { z1.h }, p7/Z, [x9]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6460e49b // bfmmla z27.s, z4.h, z0.h\n"
+ ".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
"ld1h { z0.h }, p7/Z, [x9, #1, MUL VL]\n"
".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
- "add x23, x23, #0x10\n"
+ "addvl x9, x9, #2\n"
".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
"ld1h { z3.h }, p7/Z, [x28]\n"
- "add x22, x22, #0x10\n"
".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
- "addvl x12, x12, #2\n"
+ "addvl x28, x28, #2\n"
".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
"ld1h { z1.h }, p7/Z, [x27]\n"
- "addvl x11, x11, #2\n"
".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x27, #1, MUL VL]\n"
".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
- "addvl x10, x10, #2\n"
+ "addvl x27, x27, #2\n"
".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
- "addvl x9, x9, #2\n"
- "addvl x28, x28, #2\n"
".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
".inst 0x6461e4ad // bfmmla z13.s, z5.h, z1.h\n"
- "addvl x27, x27, #2\n"
".inst 0x6461e499 // bfmmla z25.s, z4.h, z1.h\n"
".inst 0x6460e4b3 // bfmmla z19.s, z5.h, z0.h\n"
".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
"bgt 38b\n"
"39:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z3.h }, p7/Z, [x12]\n"
+ "ld1h { z2.h }, p7/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
+ "ld1h { z6.h }, p7/Z, [x11]\n"
+ "ld1h { z1.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
"ld1rqw { z5.s }, p0/Z, [x24]\n"
"ld1rqw { z0.s }, p0/Z, [x23]\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z3.h }, p7/Z, [x12]\n"
"uzp1 z0.h, z0.h, z0.h\n"
".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "ld1h { z2.h }, p7/Z, [x12, #1, MUL VL]\n"
- "ld1h { z1.h }, p7/Z, [x11]\n"
"trn1 z5.d, z5.d, z0.d\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "ld1h { z0.h }, p7/Z, [x11, #1, MUL VL]\n"
".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
- ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
+ ".inst 0x6466e4a9 // bfmmla z9.s, z5.h, z6.h\n"
+ ".inst 0x6461e4af // bfmmla z15.s, z5.h, z1.h\n"
+ ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
"ld1h { z3.h }, p7/Z, [x10]\n"
- "addvl x12, x12, #2\n"
".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
- ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
- ".inst 0x6461e495 // bfmmla z21.s, z4.h, z1.h\n"
- ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6466e495 // bfmmla z21.s, z4.h, z6.h\n"
+ ".inst 0x6461e49b // bfmmla z27.s, z4.h, z1.h\n"
"ld1h { z1.h }, p7/Z, [x9]\n"
- "addvl x10, x10, #2\n"
- ".inst 0x6460e49b // bfmmla z27.s, z4.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
+ "addvl x10, x10, #2\n"
"addvl x9, x9, #2\n"
+ ".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
- "ld1h { z3.h }, p7/Z, [x28]\n"
".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
- ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
+ "ld1h { z3.h }, p7/Z, [x28]\n"
"ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
- "addvl x28, x28, #2\n"
+ ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
- ".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
"ld1h { z1.h }, p7/Z, [x27]\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x27, #1, MUL VL]\n"
- ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
"addvl x27, x27, #2\n"
+ ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
@@ -924,16 +926,16 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"cmp x26, x20\n"
"bne 35b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
+ "add x24, x13, x20, LSL #2\n"
"uzp1 z16.d, z11.d, z17.d\n"
"uzp2 z11.d, z11.d, z17.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z17.d, z12.d, z18.d\n"
"uzp2 z12.d, z12.d, z18.d\n"
"uzp1 z18.d, z13.d, z19.d\n"
@@ -945,9 +947,9 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z24.d, z24.d, z30.d\n"
"uzp1 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 41f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x21]\n"
"ld1rw { z19.s }, p7/Z, [x20]\n"
"fmin z4.s, p7/M, z4.s, z0.s\n"
"fmin z14.s, p7/M, z14.s, z0.s\n"
@@ -993,18 +995,18 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"st1w { z17.s }, p2, [x13, #4, MUL VL]\n"
"st1w { z18.s }, p1, [x13, #5, MUL VL]\n"
"addvl x13, x13, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x22]\n"
- "st1w { z21.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z8.s }, p6, [x24]\n"
+ "st1w { z9.s }, p5, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x24, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x24, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x23]\n"
+ "st1w { z21.s }, p5, [x23, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x23, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x23, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x23, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x23, #5, MUL VL]\n"
"42:" // Height 3: Writeback done
"decw x14, ALL, MUL #6\n"
"cmp x14, XZR\n"
@@ -1012,20 +1014,21 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"b 58f\n"
"43:" // Height 4
"ldr x20, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x15, [%x[args_ptr], %[offsetof_bias]]\n"
+ "mov x21, #0x10\n"
"ldr x14, [%x[args_ptr], %[offsetof_N]]\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov x21, #0x10\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "mov x15, %x[bias]\n"
- "mov x13, %x[output_ptr]\n"
- "madd %x[output_ptr], x20, x21, %x[output_ptr]\n"
+ "ldr x13, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "madd x21, x20, x21, x13\n"
+ "str x21, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"ldr x12, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x21, ALL, MUL #5\n"
"add x11, x12, x20, LSL #1\n"
"add x10, x11, x20, LSL #1\n"
"add x9, x10, x20, LSL #1\n"
- "cntw x21, ALL, MUL #5\n"
"add x28, x9, x20, LSL #1\n"
"add x27, x28, x20, LSL #1\n"
"add x20, x27, x20, LSL #1\n"
@@ -1033,20 +1036,20 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x27, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x28, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x9, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"decw x21\n"
- "cmp x14, x21\n"
"mov x10, x12\n"
+ "cmp x14, x21\n"
"bgt 45f\n"
"mov x11, x12\n"
"45:" // Height 4: B setup done
@@ -1065,19 +1068,19 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x15, 46f\n"
"ld1w { z8.s }, p7/Z, [x15]\n"
"ld1w { z9.s }, p7/Z, [x15, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x15, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x15, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x15, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x15, #5, MUL VL]\n"
+ "addvl x15, x15, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x15, x15, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -1098,51 +1101,51 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"46:" // Height 4: no bias
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z22.s }, p6/Z, [x13]\n"
+ "ld1w { z24.s }, p5/Z, [x13, #1, MUL VL]\n"
+ "ld1w { z26.s }, p4/Z, [x13, #2, MUL VL]\n"
+ "ld1w { z27.s }, p3/Z, [x13, #3, MUL VL]\n"
+ "ld1w { z29.s }, p2/Z, [x13, #4, MUL VL]\n"
"add x22, x13, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "ld1w { z16.s }, p6/Z, [x13]\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p5/Z, [x13, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x13, #2, MUL VL]\n"
- "ld1w { z22.s }, p3/Z, [x13, #3, MUL VL]\n"
- "ld1w { z24.s }, p2/Z, [x13, #4, MUL VL]\n"
"ld1w { z20.s }, p1/Z, [x13, #5, MUL VL]\n"
"ld1w { z14.s }, p6/Z, [x22]\n"
- "zip1 z8.d, z16.d, z14.d\n"
- "zip2 z14.d, z16.d, z14.d\n"
"ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z15.d\n"
- "zip2 z15.d, z17.d, z15.d\n"
"ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
"ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
- "zip1 z10.d, z19.d, z16.d\n"
- "zip2 z16.d, z19.d, z16.d\n"
"ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
"ld1w { z21.s }, p6/Z, [x21]\n"
- "zip1 z11.d, z22.d, z17.d\n"
- "zip2 z17.d, z22.d, z17.d\n"
+ "zip1 z8.d, z22.d, z14.d\n"
+ "zip2 z14.d, z22.d, z14.d\n"
"ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
"ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
- "zip1 z12.d, z24.d, z18.d\n"
- "zip2 z18.d, z24.d, z18.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
"ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
"ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
- "zip1 z13.d, z20.d, z19.d\n"
- "zip2 z19.d, z20.d, z19.d\n"
+ "zip1 z10.d, z26.d, z16.d\n"
+ "zip2 z16.d, z26.d, z16.d\n"
"ld1w { z0.s }, p1/Z, [x21, #5, MUL VL]\n"
"ld1w { z26.s }, p6/Z, [x20]\n"
- "zip1 z20.d, z21.d, z26.d\n"
- "zip2 z26.d, z21.d, z26.d\n"
+ "zip1 z11.d, z27.d, z17.d\n"
+ "zip2 z17.d, z27.d, z17.d\n"
"ld1w { z27.s }, p5/Z, [x20, #1, MUL VL]\n"
"ld1w { z28.s }, p4/Z, [x20, #2, MUL VL]\n"
- "zip1 z21.d, z22.d, z27.d\n"
- "zip2 z27.d, z22.d, z27.d\n"
+ "zip1 z12.d, z29.d, z18.d\n"
+ "zip2 z18.d, z29.d, z18.d\n"
"ld1w { z29.s }, p3/Z, [x20, #3, MUL VL]\n"
"ld1w { z30.s }, p2/Z, [x20, #4, MUL VL]\n"
+ "zip1 z13.d, z20.d, z19.d\n"
+ "zip2 z19.d, z20.d, z19.d\n"
+ "ld1w { z31.s }, p1/Z, [x20, #5, MUL VL]\n"
+ "zip1 z20.d, z21.d, z26.d\n"
+ "zip2 z26.d, z21.d, z26.d\n"
+ "zip1 z21.d, z22.d, z27.d\n"
+ "zip2 z27.d, z22.d, z27.d\n"
"zip1 z22.d, z23.d, z28.d\n"
"zip2 z28.d, z23.d, z28.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #5, MUL VL]\n"
"zip1 z23.d, z24.d, z29.d\n"
"zip2 z29.d, z24.d, z29.d\n"
"zip1 z24.d, z25.d, z30.d\n"
@@ -1179,8 +1182,8 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"49:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1205,144 +1208,144 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"ble 53f\n"
"52:" // Height 4: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
- "ld1rqw { z7.s }, p0/Z, [x24]\n"
- "ld1rqw { z6.s }, p0/Z, [x23]\n"
- ".inst 0x658abce7 // bfcvt z7.h, p7/M, z7.s\n"
- "ld1rqw { z5.s }, p0/Z, [x22]\n"
- "ld1rqw { z4.s }, p0/Z, [x21]\n"
- ".inst 0x658abcc6 // bfcvt z6.h, p7/M, z6.s\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
- ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "uzp1 z7.h, z7.h, z7.h\n"
"ld1h { z3.h }, p7/Z, [x12]\n"
"ld1h { z2.h }, p7/Z, [x12, #1, MUL VL]\n"
- "uzp1 z6.h, z6.h, z6.h\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z7.h }, p7/Z, [x11]\n"
+ "ld1h { z6.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "cmp x25, #0x4\n"
+ "addvl x12, x12, #2\n"
+ "addvl x11, x11, #2\n"
+ "ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "ld1rqw { z0.s }, p0/Z, [x21]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z1.h }, p7/Z, [x11]\n"
- "ld1h { z0.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "trn1 z7.d, z7.d, z6.d\n"
- ".inst 0x6463e4e8 // bfmmla z8.s, z7.h, z3.h\n"
- "sub x25, x25, #0x4\n"
- "trn1 z5.d, z5.d, z4.d\n"
- ".inst 0x6463e4b4 // bfmmla z20.s, z5.h, z3.h\n"
- ".inst 0x6462e4ee // bfmmla z14.s, z7.h, z2.h\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "trn1 z5.d, z5.d, z1.d\n"
+ "trn1 z4.d, z4.d, z0.d\n"
+ ".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
+ ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
"ld1h { z3.h }, p7/Z, [x10]\n"
- ".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
+ ".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
+ ".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
"ld1h { z2.h }, p7/Z, [x10, #1, MUL VL]\n"
- "cmp x25, #0x4\n"
- ".inst 0x6461e4b5 // bfmmla z21.s, z5.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
+ "addvl x10, x10, #2\n"
+ ".inst 0x6467e4a9 // bfmmla z9.s, z5.h, z7.h\n"
+ ".inst 0x6467e495 // bfmmla z21.s, z4.h, z7.h\n"
+ ".inst 0x6466e4af // bfmmla z15.s, z5.h, z6.h\n"
"ld1h { z1.h }, p7/Z, [x9]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6460e4bb // bfmmla z27.s, z5.h, z0.h\n"
+ ".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
"ld1h { z0.h }, p7/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6463e4ea // bfmmla z10.s, z7.h, z3.h\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6463e4b6 // bfmmla z22.s, z5.h, z3.h\n"
- ".inst 0x6462e4f0 // bfmmla z16.s, z7.h, z2.h\n"
+ ".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
+ "addvl x9, x9, #2\n"
+ ".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
"ld1h { z3.h }, p7/Z, [x28]\n"
- "add x22, x22, #0x10\n"
- ".inst 0x6462e4bc // bfmmla z28.s, z5.h, z2.h\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
+ ".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
- "add x21, x21, #0x10\n"
- ".inst 0x6461e4b7 // bfmmla z23.s, z5.h, z1.h\n"
- ".inst 0x6460e4f1 // bfmmla z17.s, z7.h, z0.h\n"
+ "addvl x28, x28, #2\n"
+ ".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
"ld1h { z1.h }, p7/Z, [x27]\n"
- "addvl x12, x12, #2\n"
- ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
+ ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x27, #1, MUL VL]\n"
- ".inst 0x6463e4ec // bfmmla z12.s, z7.h, z3.h\n"
- "addvl x11, x11, #2\n"
- ".inst 0x6463e4b8 // bfmmla z24.s, z5.h, z3.h\n"
- ".inst 0x6462e4f2 // bfmmla z18.s, z7.h, z2.h\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- ".inst 0x6462e4be // bfmmla z30.s, z5.h, z2.h\n"
- ".inst 0x6461e4ed // bfmmla z13.s, z7.h, z1.h\n"
- "addvl x28, x28, #2\n"
+ ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
"addvl x27, x27, #2\n"
- ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
- ".inst 0x6460e4f3 // bfmmla z19.s, z7.h, z0.h\n"
- ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
+ ".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
+ ".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ad // bfmmla z13.s, z5.h, z1.h\n"
+ ".inst 0x6461e499 // bfmmla z25.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b3 // bfmmla z19.s, z5.h, z0.h\n"
+ ".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
"bgt 52b\n"
"53:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
- "ld1rqw { z7.s }, p0/Z, [x24]\n"
- "ld1rqw { z6.s }, p0/Z, [x23]\n"
- ".inst 0x658abce7 // bfcvt z7.h, p7/M, z7.s\n"
- "ld1rqw { z5.s }, p0/Z, [x22]\n"
- "ld1rqw { z4.s }, p0/Z, [x21]\n"
- ".inst 0x658abcc6 // bfcvt z6.h, p7/M, z6.s\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
- ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "uzp1 z7.h, z7.h, z7.h\n"
"ld1h { z3.h }, p7/Z, [x12]\n"
"ld1h { z2.h }, p7/Z, [x12, #1, MUL VL]\n"
- "uzp1 z6.h, z6.h, z6.h\n"
+ "addvl x12, x12, #2\n"
+ "ld1h { z7.h }, p7/Z, [x11]\n"
+ "ld1h { z6.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "addvl x11, x11, #2\n"
+ "ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "ld1rqw { z0.s }, p0/Z, [x21]\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z1.h }, p7/Z, [x11]\n"
- "ld1h { z0.h }, p7/Z, [x11, #1, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "trn1 z7.d, z7.d, z6.d\n"
- ".inst 0x6463e4e8 // bfmmla z8.s, z7.h, z3.h\n"
- "addvl x12, x12, #2\n"
- "trn1 z5.d, z5.d, z4.d\n"
- ".inst 0x6463e4b4 // bfmmla z20.s, z5.h, z3.h\n"
- ".inst 0x6462e4ee // bfmmla z14.s, z7.h, z2.h\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "trn1 z5.d, z5.d, z1.d\n"
+ "trn1 z4.d, z4.d, z0.d\n"
+ ".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
+ ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
"ld1h { z3.h }, p7/Z, [x10]\n"
- ".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
+ ".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
+ ".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
"ld1h { z2.h }, p7/Z, [x10, #1, MUL VL]\n"
- "addvl x11, x11, #2\n"
- ".inst 0x6461e4b5 // bfmmla z21.s, z5.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
- "ld1h { z1.h }, p7/Z, [x9]\n"
"addvl x10, x10, #2\n"
- ".inst 0x6460e4bb // bfmmla z27.s, z5.h, z0.h\n"
+ ".inst 0x6467e4a9 // bfmmla z9.s, z5.h, z7.h\n"
+ ".inst 0x6467e495 // bfmmla z21.s, z4.h, z7.h\n"
+ ".inst 0x6466e4af // bfmmla z15.s, z5.h, z6.h\n"
+ "ld1h { z1.h }, p7/Z, [x9]\n"
+ ".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
"ld1h { z0.h }, p7/Z, [x9, #1, MUL VL]\n"
- ".inst 0x6463e4ea // bfmmla z10.s, z7.h, z3.h\n"
+ ".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
"addvl x9, x9, #2\n"
- ".inst 0x6463e4b6 // bfmmla z22.s, z5.h, z3.h\n"
- ".inst 0x6462e4f0 // bfmmla z16.s, z7.h, z2.h\n"
+ ".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
"ld1h { z3.h }, p7/Z, [x28]\n"
- ".inst 0x6462e4bc // bfmmla z28.s, z5.h, z2.h\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
+ ".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
"addvl x28, x28, #2\n"
- ".inst 0x6461e4b7 // bfmmla z23.s, z5.h, z1.h\n"
- ".inst 0x6460e4f1 // bfmmla z17.s, z7.h, z0.h\n"
+ ".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
"ld1h { z1.h }, p7/Z, [x27]\n"
- ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
+ ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x27, #1, MUL VL]\n"
- ".inst 0x6463e4ec // bfmmla z12.s, z7.h, z3.h\n"
+ ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
"addvl x27, x27, #2\n"
- ".inst 0x6463e4b8 // bfmmla z24.s, z5.h, z3.h\n"
- ".inst 0x6462e4f2 // bfmmla z18.s, z7.h, z2.h\n"
- ".inst 0x6462e4be // bfmmla z30.s, z5.h, z2.h\n"
- ".inst 0x6461e4ed // bfmmla z13.s, z7.h, z1.h\n"
- ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
- ".inst 0x6460e4f3 // bfmmla z19.s, z7.h, z0.h\n"
- ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
+ ".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
+ ".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ad // bfmmla z13.s, z5.h, z1.h\n"
+ ".inst 0x6461e499 // bfmmla z25.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b3 // bfmmla z19.s, z5.h, z0.h\n"
+ ".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
"54:" // Height 4: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
"cmp x26, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x13, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
+ "add x24, x13, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z16.d, z11.d, z17.d\n"
"uzp2 z11.d, z11.d, z17.d\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 z17.d, z12.d, z18.d\n"
"uzp2 z12.d, z12.d, z18.d\n"
"uzp1 z18.d, z13.d, z19.d\n"
@@ -1360,9 +1363,9 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z30.d, z25.d, z31.d\n"
"uzp2 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 55f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p7/Z, [x21]\n"
"ld1rw { z0.s }, p7/Z, [x20]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
@@ -1420,24 +1423,24 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"st1w { z17.s }, p2, [x13, #4, MUL VL]\n"
"st1w { z18.s }, p1, [x13, #5, MUL VL]\n"
"addvl x13, x13, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z19.s }, p6, [x22]\n"
- "st1w { z26.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z27.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z28.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x21]\n"
- "st1w { z21.s }, p5, [x21, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x21, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x21, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x21, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x21, #5, MUL VL]\n"
+ "st1w { z8.s }, p6, [x24]\n"
+ "st1w { z9.s }, p5, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x24, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x24, #5, MUL VL]\n"
+ "st1w { z19.s }, p6, [x23]\n"
+ "st1w { z26.s }, p5, [x23, #1, MUL VL]\n"
+ "st1w { z27.s }, p4, [x23, #2, MUL VL]\n"
+ "st1w { z28.s }, p3, [x23, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x23, #4, MUL VL]\n"
+ "st1w { z30.s }, p1, [x23, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x22]\n"
+ "st1w { z21.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x22, #5, MUL VL]\n"
"56:" // Height 4: Writeback done
"decw x14, ALL, MUL #6\n"
"cmp x14, XZR\n"
@@ -1454,8 +1457,8 @@ void sve_ffhybrid_fp32bf16fp32_mmla_4x6VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL.hpp
new file mode 100644
index 0000000000..70ed2ee4c7
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL.hpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../bfloat.hpp"
+#include "../kernel_weight_format.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const bfloat16 *, const bfloat16 *, size_t, \
+ float *, int, size_t, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_ffinterleaved_bf16fp32_dot_8x3VL( ARGLIST );
+
+class cls_sve_ffinterleaved_bf16fp32_dot_8x3VL
+{
+public:
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<float>() * 3;
+ }
+ static unsigned int stripe_width()
+ {
+ return get_vector_length<float>();
+ }
+
+ static KernelWeightFormat kernel_weight_format()
+ {
+ return KernelWeightFormat::VL1VL_BL32;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 2;
+ }
+
+
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 2, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 2, 1, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, bfloat16>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 20.92, 7.74, 4.14 };
+ }
+ }
+
+
+ if (std::is_same<T, float>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 20.92, 5.18, 4.37 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_ffinterleaved_bf16fp32_dot_8x3VL;
+ cls_sve_ffinterleaved_bf16fp32_dot_8x3VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL/generic.cpp
new file mode 100644
index 0000000000..26192718b5
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_dot_8x3VL/generic.cpp
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstddef>
+#include "../../bfloat.hpp"
+
+namespace arm_gemm {
+
+void sve_ffinterleaved_bf16fp32_dot_8x3VL(
+ const bfloat16 *Apanel,
+ const bfloat16 *Bpanel,
+ size_t B_stride,
+ float *Cpanel,
+ int ablocks,
+ size_t N,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const bfloat16 *Bpanel = {};
+ size_t N = {};
+ size_t B_stride = {};
+ const bfloat16 *cur_B_ptr = {};
+ } ka;
+
+ ka.K = (K/2) - 1;
+ ka.Bpanel = Bpanel;
+ ka.N = N;
+ ka.B_stride = B_stride;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "1:" // Height loop
+ "ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "ldr x26, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov x25, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "2:" // Width loop
+ "ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
+ "cntw x23, ALL, MUL #2\n"
+ "mov %x[Apanel], x25\n"
+ "add x22, x24, x20, LSL #1\n"
+ "cmp x26, x23\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
+ "bgt 3f\n"
+ "decw x23\n"
+ "mov x21, x24\n"
+ "cmp x26, x23\n"
+ "bgt 3f\n"
+ "mov x22, x24\n"
+ "3:" // B setup done
+ "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "mov z8.b, #0x0\n"
+ "mov z9.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "mov z12.b, #0x0\n"
+ "mov z13.b, #0x0\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
+ "cmp x20, #0x2\n"
+ "mov z14.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z17.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x24]\n"
+ "mov z18.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "ld1h { z5.h }, p0/Z, [x22]\n"
+ "mov z20.b, #0x0\n"
+ "mov z21.b, #0x0\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
+ "mov z22.b, #0x0\n"
+ "mov z23.b, #0x0\n"
+ "mov z24.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z28.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z30.b, #0x0\n"
+ "mov z31.b, #0x0\n"
+ "blt 5f\n"
+ "4:" // main loop head
+ ".inst 0x64604088 // bfdot z8.s, z4.h, z0.h[0]\n"
+ ".inst 0x6468408b // bfdot z11.s, z4.h, z0.h[1]\n"
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel], #32]\n"
+ ".inst 0x6470408e // bfdot z14.s, z4.h, z0.h[2]\n"
+ ".inst 0x64784091 // bfdot z17.s, z4.h, z0.h[3]\n"
+ "ld1rqh { z7.h }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x64614094 // bfdot z20.s, z4.h, z1.h[0]\n"
+ ".inst 0x64694097 // bfdot z23.s, z4.h, z1.h[1]\n"
+ "sub x20, x20, #0x2\n"
+ ".inst 0x6471409a // bfdot z26.s, z4.h, z1.h[2]\n"
+ ".inst 0x6479409d // bfdot z29.s, z4.h, z1.h[3]\n"
+ "ld1h { z4.h }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x646040a9 // bfdot z9.s, z5.h, z0.h[0]\n"
+ ".inst 0x646840ac // bfdot z12.s, z5.h, z0.h[1]\n"
+ "addvl x24, x24, #2\n"
+ ".inst 0x647040af // bfdot z15.s, z5.h, z0.h[2]\n"
+ ".inst 0x647840b2 // bfdot z18.s, z5.h, z0.h[3]\n"
+ "cmp x20, #0x2\n"
+ ".inst 0x646140b5 // bfdot z21.s, z5.h, z1.h[0]\n"
+ ".inst 0x646940b8 // bfdot z24.s, z5.h, z1.h[1]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x647140bb // bfdot z27.s, z5.h, z1.h[2]\n"
+ ".inst 0x647940be // bfdot z30.s, z5.h, z1.h[3]\n"
+ "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
+ ".inst 0x646840cd // bfdot z13.s, z6.h, z0.h[1]\n"
+ "addvl x22, x22, #2\n"
+ ".inst 0x647040d0 // bfdot z16.s, z6.h, z0.h[2]\n"
+ ".inst 0x647840d3 // bfdot z19.s, z6.h, z0.h[3]\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ ".inst 0x646140d6 // bfdot z22.s, z6.h, z1.h[0]\n"
+ ".inst 0x646940d9 // bfdot z25.s, z6.h, z1.h[1]\n"
+ ".inst 0x647140dc // bfdot z28.s, z6.h, z1.h[2]\n"
+ ".inst 0x647940df // bfdot z31.s, z6.h, z1.h[3]\n"
+ "ld1h { z2.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "addvl x21, x21, #2\n"
+ ".inst 0x64634088 // bfdot z8.s, z4.h, z3.h[0]\n"
+ ".inst 0x646b408b // bfdot z11.s, z4.h, z3.h[1]\n"
+ ".inst 0x6473408e // bfdot z14.s, z4.h, z3.h[2]\n"
+ ".inst 0x647b4091 // bfdot z17.s, z4.h, z3.h[3]\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
+ ".inst 0x64674094 // bfdot z20.s, z4.h, z7.h[0]\n"
+ ".inst 0x646f4097 // bfdot z23.s, z4.h, z7.h[1]\n"
+ ".inst 0x6477409a // bfdot z26.s, z4.h, z7.h[2]\n"
+ ".inst 0x647f409d // bfdot z29.s, z4.h, z7.h[3]\n"
+ "ld1h { z4.h }, p0/Z, [x24]\n"
+ ".inst 0x646340a9 // bfdot z9.s, z5.h, z3.h[0]\n"
+ ".inst 0x646b40ac // bfdot z12.s, z5.h, z3.h[1]\n"
+ ".inst 0x647340af // bfdot z15.s, z5.h, z3.h[2]\n"
+ ".inst 0x647b40b2 // bfdot z18.s, z5.h, z3.h[3]\n"
+ ".inst 0x646740b5 // bfdot z21.s, z5.h, z7.h[0]\n"
+ ".inst 0x646f40b8 // bfdot z24.s, z5.h, z7.h[1]\n"
+ ".inst 0x647740bb // bfdot z27.s, z5.h, z7.h[2]\n"
+ ".inst 0x647f40be // bfdot z30.s, z5.h, z7.h[3]\n"
+ "ld1h { z5.h }, p0/Z, [x22]\n"
+ ".inst 0x6463404a // bfdot z10.s, z2.h, z3.h[0]\n"
+ ".inst 0x646b404d // bfdot z13.s, z2.h, z3.h[1]\n"
+ ".inst 0x64734050 // bfdot z16.s, z2.h, z3.h[2]\n"
+ ".inst 0x647b4053 // bfdot z19.s, z2.h, z3.h[3]\n"
+ ".inst 0x64674056 // bfdot z22.s, z2.h, z7.h[0]\n"
+ ".inst 0x646f4059 // bfdot z25.s, z2.h, z7.h[1]\n"
+ ".inst 0x6477405c // bfdot z28.s, z2.h, z7.h[2]\n"
+ ".inst 0x647f405f // bfdot z31.s, z2.h, z7.h[3]\n"
+ "ld1h { z6.h }, p0/Z, [x21]\n"
+ "bge 4b\n"
+ "5:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0x64604088 // bfdot z8.s, z4.h, z0.h[0]\n"
+ "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
+ ".inst 0x6468408b // bfdot z11.s, z4.h, z0.h[1]\n"
+ ".inst 0x6470408e // bfdot z14.s, z4.h, z0.h[2]\n"
+ ".inst 0x64784091 // bfdot z17.s, z4.h, z0.h[3]\n"
+ ".inst 0x64614094 // bfdot z20.s, z4.h, z1.h[0]\n"
+ ".inst 0x64694097 // bfdot z23.s, z4.h, z1.h[1]\n"
+ ".inst 0x6471409a // bfdot z26.s, z4.h, z1.h[2]\n"
+ ".inst 0x6479409d // bfdot z29.s, z4.h, z1.h[3]\n"
+ ".inst 0x646040a9 // bfdot z9.s, z5.h, z0.h[0]\n"
+ ".inst 0x646840ac // bfdot z12.s, z5.h, z0.h[1]\n"
+ ".inst 0x647040af // bfdot z15.s, z5.h, z0.h[2]\n"
+ ".inst 0x647840b2 // bfdot z18.s, z5.h, z0.h[3]\n"
+ ".inst 0x646140b5 // bfdot z21.s, z5.h, z1.h[0]\n"
+ ".inst 0x646940b8 // bfdot z24.s, z5.h, z1.h[1]\n"
+ ".inst 0x647140bb // bfdot z27.s, z5.h, z1.h[2]\n"
+ ".inst 0x647940be // bfdot z30.s, z5.h, z1.h[3]\n"
+ ".inst 0x646040ca // bfdot z10.s, z6.h, z0.h[0]\n"
+ ".inst 0x646840cd // bfdot z13.s, z6.h, z0.h[1]\n"
+ ".inst 0x647040d0 // bfdot z16.s, z6.h, z0.h[2]\n"
+ ".inst 0x647840d3 // bfdot z19.s, z6.h, z0.h[3]\n"
+ ".inst 0x646140d6 // bfdot z22.s, z6.h, z1.h[0]\n"
+ ".inst 0x646940d9 // bfdot z25.s, z6.h, z1.h[1]\n"
+ ".inst 0x647140dc // bfdot z28.s, z6.h, z1.h[2]\n"
+ ".inst 0x647940df // bfdot z31.s, z6.h, z1.h[3]\n"
+ "cbz x20, 6f\n"
+ "ld1rqh { z4.h }, p0/Z, [%x[Apanel]]\n"
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel], #16]\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "ld1h { z2.h }, p0/Z, [x24]\n"
+ "ld1h { z1.h }, p0/Z, [x22]\n"
+ "ld1h { z0.h }, p0/Z, [x21]\n"
+ ".inst 0x64644048 // bfdot z8.s, z2.h, z4.h[0]\n"
+ ".inst 0x646c404b // bfdot z11.s, z2.h, z4.h[1]\n"
+ ".inst 0x6474404e // bfdot z14.s, z2.h, z4.h[2]\n"
+ ".inst 0x647c4051 // bfdot z17.s, z2.h, z4.h[3]\n"
+ ".inst 0x64634054 // bfdot z20.s, z2.h, z3.h[0]\n"
+ ".inst 0x646b4057 // bfdot z23.s, z2.h, z3.h[1]\n"
+ ".inst 0x6473405a // bfdot z26.s, z2.h, z3.h[2]\n"
+ ".inst 0x647b405d // bfdot z29.s, z2.h, z3.h[3]\n"
+ ".inst 0x64644029 // bfdot z9.s, z1.h, z4.h[0]\n"
+ ".inst 0x646c402c // bfdot z12.s, z1.h, z4.h[1]\n"
+ ".inst 0x6474402f // bfdot z15.s, z1.h, z4.h[2]\n"
+ ".inst 0x647c4032 // bfdot z18.s, z1.h, z4.h[3]\n"
+ ".inst 0x64634035 // bfdot z21.s, z1.h, z3.h[0]\n"
+ ".inst 0x646b4038 // bfdot z24.s, z1.h, z3.h[1]\n"
+ ".inst 0x6473403b // bfdot z27.s, z1.h, z3.h[2]\n"
+ ".inst 0x647b403e // bfdot z30.s, z1.h, z3.h[3]\n"
+ ".inst 0x6464400a // bfdot z10.s, z0.h, z4.h[0]\n"
+ ".inst 0x646c400d // bfdot z13.s, z0.h, z4.h[1]\n"
+ ".inst 0x64744010 // bfdot z16.s, z0.h, z4.h[2]\n"
+ ".inst 0x647c4013 // bfdot z19.s, z0.h, z4.h[3]\n"
+ ".inst 0x64634016 // bfdot z22.s, z0.h, z3.h[0]\n"
+ ".inst 0x646b4019 // bfdot z25.s, z0.h, z3.h[1]\n"
+ ".inst 0x6473401c // bfdot z28.s, z0.h, z3.h[2]\n"
+ ".inst 0x647b401f // bfdot z31.s, z0.h, z3.h[3]\n"
+ "6:" // multiply loop done
+ "decw x26, ALL, MUL #3\n"
+ "st1w { z8.s }, p0, [%x[Cpanel]]\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "cmp x26, XZR\n"
+ "st1w { z12.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z13.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z20.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z21.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z22.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
+ "st1w { z24.s }, p0, [%x[Cpanel]]\n"
+ "st1w { z25.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z26.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z28.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z29.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z30.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z31.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #8\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_B_stride] "I" (offsetof(KernelArgs, B_stride)), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_cur_B_ptr] "I" (offsetof(KernelArgs, cur_B_ptr))
+ : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp
index 1fe5f48da6..8695a9b53c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp
@@ -41,7 +41,8 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL( ARGLIST );
class cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -72,8 +73,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 6, 4, 2> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 6, 4, 2, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 4, 2> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 4, 2, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
@@ -88,10 +89,8 @@ public:
if (std::is_same<T, float>::value) {
switch (ci->get_cpu_model()) {
- case CPUModel::V1:
- return { 53.48, 4.23, 6.53 };
- default:
- return { 29.07, 2.76, 5.39 };
+ default:
+ return { 39.66, 5.18, 4.37 };
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
index 576bd47039..69adb67a6a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,52 +55,52 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x26, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x25, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
"cntw x23, ALL, MUL #2\n"
+ "mov %x[Apanel], x25\n"
"add x22, x24, x20, LSL #1\n"
+ "cmp x26, x23\n"
"add x21, x22, x20, LSL #1\n"
"add x20, x21, x20, LSL #1\n"
- "cmp x26, x23\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x25\n"
"bgt 3f\n"
"decw x23\n"
- "cmp x26, x23\n"
"mov x21, x24\n"
+ "cmp x26, x23\n"
"bgt 3f\n"
"mov x22, x24\n"
"3:" // B setup done
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x24]\n"
"mov z11.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x24]\n"
"mov z12.b, #0x0\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z13.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
+ "cmp x20, #0x2\n"
"mov z14.b, #0x0\n"
- "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1h { z5.h }, p0/Z, [x24, #1, MUL VL]\n"
"mov z17.b, #0x0\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z18.b, #0x0\n"
- "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"mov z19.b, #0x0\n"
+ "ld1h { z5.h }, p0/Z, [x24, #1, MUL VL]\n"
"mov z20.b, #0x0\n"
- "addvl x24, x24, #2\n"
"mov z21.b, #0x0\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"mov z22.b, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z23.b, #0x0\n"
+ "addvl x24, x24, #2\n"
"mov z24.b, #0x0\n"
"mov z25.b, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"mov z28.b, #0x0\n"
@@ -114,78 +114,78 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x22]\n"
+ "ld1h { z3.h }, p0/Z, [x22]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z3.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
".inst 0x6464e4da // bfmmla z26.s, z6.h, z4.h\n"
".inst 0x6465e4dd // bfmmla z29.s, z6.h, z5.h\n"
- "ld1h { z5.h }, p0/Z, [x21]\n"
- "ld1h { z4.h }, p0/Z, [x21, #1, MUL VL]\n"
- ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6463e40c // bfmmla z12.s, z0.h, z3.h\n"
- ".inst 0x6467e42f // bfmmla z15.s, z1.h, z7.h\n"
- ".inst 0x6463e432 // bfmmla z18.s, z1.h, z3.h\n"
+ "ld1h { z4.h }, p0/Z, [x21]\n"
+ "ld1h { z5.h }, p0/Z, [x21, #1, MUL VL]\n"
+ ".inst 0x6463e409 // bfmmla z9.s, z0.h, z3.h\n"
"sub x20, x20, #0x2\n"
- ".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
- ".inst 0x6463e458 // bfmmla z24.s, z2.h, z3.h\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
+ ".inst 0x6463e42f // bfmmla z15.s, z1.h, z3.h\n"
"cmp x20, #0x2\n"
- ".inst 0x6467e4db // bfmmla z27.s, z6.h, z7.h\n"
- ".inst 0x6463e4de // bfmmla z30.s, z6.h, z3.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
+ ".inst 0x6463e455 // bfmmla z21.s, z2.h, z3.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ ".inst 0x6463e4db // bfmmla z27.s, z6.h, z3.h\n"
"ld1h { z3.h }, p0/Z, [x24]\n"
- ".inst 0x6465e40a // bfmmla z10.s, z0.h, z5.h\n"
- ".inst 0x6464e40d // bfmmla z13.s, z0.h, z4.h\n"
+ ".inst 0x6467e4de // bfmmla z30.s, z6.h, z7.h\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "ld1h { z7.h }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
+ ".inst 0x6464e430 // bfmmla z16.s, z1.h, z4.h\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel], #16]\n"
- ".inst 0x6465e430 // bfmmla z16.s, z1.h, z5.h\n"
- ".inst 0x6464e433 // bfmmla z19.s, z1.h, z4.h\n"
+ ".inst 0x6465e433 // bfmmla z19.s, z1.h, z5.h\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #32]\n"
- ".inst 0x6465e456 // bfmmla z22.s, z2.h, z5.h\n"
- ".inst 0x6464e459 // bfmmla z25.s, z2.h, z4.h\n"
- "ld1h { z7.h }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0x6465e4dc // bfmmla z28.s, z6.h, z5.h\n"
- ".inst 0x6464e4df // bfmmla z31.s, z6.h, z4.h\n"
- "ld1rqh { z5.h }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
+ ".inst 0x6464e4dc // bfmmla z28.s, z6.h, z4.h\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x6465e4df // bfmmla z31.s, z6.h, z5.h\n"
"ld1rqh { z6.h }, p0/Z, [%x[Apanel], #64]\n"
".inst 0x6463e408 // bfmmla z8.s, z0.h, z3.h\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6463e42e // bfmmla z14.s, z1.h, z3.h\n"
+ "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
- "ld1h { z2.h }, p0/Z, [x22, #2, MUL VL]\n"
- ".inst 0x6463e4b4 // bfmmla z20.s, z5.h, z3.h\n"
- ".inst 0x6467e4b7 // bfmmla z23.s, z5.h, z7.h\n"
- "ld1h { z4.h }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x6463e454 // bfmmla z20.s, z2.h, z3.h\n"
+ "ld1h { z5.h }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
".inst 0x6463e4da // bfmmla z26.s, z6.h, z3.h\n"
- ".inst 0x6467e4dd // bfmmla z29.s, z6.h, z7.h\n"
"ld1h { z3.h }, p0/Z, [x21, #2, MUL VL]\n"
+ ".inst 0x6467e4dd // bfmmla z29.s, z6.h, z7.h\n"
"ld1h { z7.h }, p0/Z, [x21, #3, MUL VL]\n"
- ".inst 0x6462e409 // bfmmla z9.s, z0.h, z2.h\n"
- ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- ".inst 0x6462e42f // bfmmla z15.s, z1.h, z2.h\n"
- ".inst 0x6464e432 // bfmmla z18.s, z1.h, z4.h\n"
+ ".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
+ ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
+ ".inst 0x6464e42f // bfmmla z15.s, z1.h, z4.h\n"
"addvl x22, x22, #4\n"
- ".inst 0x6462e4b5 // bfmmla z21.s, z5.h, z2.h\n"
- ".inst 0x6464e4b8 // bfmmla z24.s, z5.h, z4.h\n"
+ ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
+ ".inst 0x6464e455 // bfmmla z21.s, z2.h, z4.h\n"
"addvl x21, x21, #4\n"
- ".inst 0x6462e4db // bfmmla z27.s, z6.h, z2.h\n"
- ".inst 0x6464e4de // bfmmla z30.s, z6.h, z4.h\n"
+ ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
+ ".inst 0x6464e4db // bfmmla z27.s, z6.h, z4.h\n"
"ld1h { z4.h }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0x6465e4de // bfmmla z30.s, z6.h, z5.h\n"
".inst 0x6463e40a // bfmmla z10.s, z0.h, z3.h\n"
+ "ld1h { z5.h }, p0/Z, [x24, #3, MUL VL]\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x6463e430 // bfmmla z16.s, z1.h, z3.h\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x6467e433 // bfmmla z19.s, z1.h, z7.h\n"
+ ".inst 0x6463e456 // bfmmla z22.s, z2.h, z3.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #96]\n"
- ".inst 0x6463e4b6 // bfmmla z22.s, z5.h, z3.h\n"
- ".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
- "ld1h { z5.h }, p0/Z, [x24, #3, MUL VL]\n"
+ ".inst 0x6467e459 // bfmmla z25.s, z2.h, z7.h\n"
".inst 0x6463e4dc // bfmmla z28.s, z6.h, z3.h\n"
- ".inst 0x6467e4df // bfmmla z31.s, z6.h, z7.h\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #112]\n"
+ ".inst 0x6467e4df // bfmmla z31.s, z6.h, z7.h\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
"addvl x24, x24, #4\n"
"bge 4b\n"
"5:" // main loop skip
- "ld1rqh { z7.h }, p0/Z, [%x[Apanel]]\n"
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
@@ -193,52 +193,52 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
"ld1h { z6.h }, p0/Z, [x22]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z3.h }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x6464e4fa // bfmmla z26.s, z7.h, z4.h\n"
- ".inst 0x6465e4fd // bfmmla z29.s, z7.h, z5.h\n"
+ "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
+ ".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
"ld1h { z5.h }, p0/Z, [x21]\n"
"ld1h { z4.h }, p0/Z, [x21, #1, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- ".inst 0x6463e40c // bfmmla z12.s, z0.h, z3.h\n"
- ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- ".inst 0x6463e432 // bfmmla z18.s, z1.h, z3.h\n"
"add %x[Apanel], %x[Apanel], #0x10\n"
- ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- ".inst 0x6463e458 // bfmmla z24.s, z2.h, z3.h\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
"addvl x22, x22, #2\n"
- ".inst 0x6466e4fb // bfmmla z27.s, z7.h, z6.h\n"
- ".inst 0x6463e4fe // bfmmla z30.s, z7.h, z3.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
"addvl x21, x21, #2\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ ".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
+ ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
".inst 0x6465e40a // bfmmla z10.s, z0.h, z5.h\n"
".inst 0x6464e40d // bfmmla z13.s, z0.h, z4.h\n"
".inst 0x6465e430 // bfmmla z16.s, z1.h, z5.h\n"
".inst 0x6464e433 // bfmmla z19.s, z1.h, z4.h\n"
".inst 0x6465e456 // bfmmla z22.s, z2.h, z5.h\n"
".inst 0x6464e459 // bfmmla z25.s, z2.h, z4.h\n"
- ".inst 0x6465e4fc // bfmmla z28.s, z7.h, z5.h\n"
- ".inst 0x6464e4ff // bfmmla z31.s, z7.h, z4.h\n"
+ ".inst 0x6465e47c // bfmmla z28.s, z3.h, z5.h\n"
+ ".inst 0x6464e47f // bfmmla z31.s, z3.h, z4.h\n"
"cbz x20, 6f\n"
"ld1h { z1.h }, p0/Z, [x24]\n"
"ld1rqh { z7.h }, p0/Z, [%x[Apanel]]\n"
- ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
"ld1rqh { z6.h }, p0/Z, [%x[Apanel], #16]\n"
"ld1h { z0.h }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0x6460e4eb // bfmmla z11.s, z7.h, z0.h\n"
"ld1rqh { z5.h }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqh { z4.h }, p0/Z, [%x[Apanel], #48]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
+ "ld1h { z3.h }, p0/Z, [x22]\n"
+ "ld1h { z2.h }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x6460e4eb // bfmmla z11.s, z7.h, z0.h\n"
".inst 0x6461e4ce // bfmmla z14.s, z6.h, z1.h\n"
".inst 0x6460e4d1 // bfmmla z17.s, z6.h, z0.h\n"
".inst 0x6461e4b4 // bfmmla z20.s, z5.h, z1.h\n"
- "ld1h { z3.h }, p0/Z, [x22]\n"
".inst 0x6460e4b7 // bfmmla z23.s, z5.h, z0.h\n"
".inst 0x6461e49a // bfmmla z26.s, z4.h, z1.h\n"
- "ld1h { z2.h }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z1.h }, p0/Z, [x21]\n"
+ ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p0/Z, [x21, #1, MUL VL]\n"
".inst 0x6463e4e9 // bfmmla z9.s, z7.h, z3.h\n"
".inst 0x6462e4ec // bfmmla z12.s, z7.h, z2.h\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6463e4cf // bfmmla z15.s, z6.h, z3.h\n"
".inst 0x6462e4d2 // bfmmla z18.s, z6.h, z2.h\n"
".inst 0x6463e4b5 // bfmmla z21.s, z5.h, z3.h\n"
@@ -255,53 +255,53 @@ void sve_ffinterleaved_bf16fp32_mmla_8x3VL(
".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
"6:" // multiply loop done
"decw x26, ALL, MUL #3\n"
- "uzp1 z0.d, z8.d, z11.d\n"
+ "uzp1 z2.d, z8.d, z11.d\n"
"uzp2 z8.d, z8.d, z11.d\n"
"uzp1 z1.d, z9.d, z12.d\n"
"uzp2 z9.d, z9.d, z12.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel]]\n"
"uzp1 z0.d, z10.d, z13.d\n"
"uzp2 z10.d, z10.d, z13.d\n"
- "st1w { z1.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "uzp1 z2.d, z14.d, z17.d\n"
+ "st1w { z2.s }, p0, [%x[Cpanel]]\n"
+ "uzp1 z3.d, z14.d, z17.d\n"
"uzp2 z14.d, z14.d, z17.d\n"
- "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "uzp1 z1.d, z15.d, z18.d\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "uzp1 z17.d, z15.d, z18.d\n"
"cmp x26, XZR\n"
- "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"uzp2 z15.d, z15.d, z18.d\n"
- "uzp1 z17.d, z16.d, z19.d\n"
- "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "uzp1 z2.d, z16.d, z19.d\n"
+ "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
"uzp2 z16.d, z16.d, z19.d\n"
- "uzp1 z0.d, z20.d, z23.d\n"
- "st1w { z2.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "uzp1 z1.d, z20.d, z23.d\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"uzp2 z20.d, z20.d, z23.d\n"
- "uzp1 z23.d, z21.d, z24.d\n"
- "st1w { z1.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
- "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "uzp1 z0.d, z21.d, z24.d\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"uzp2 z21.d, z21.d, z24.d\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
- "uzp1 z19.d, z22.d, z25.d\n"
+ "uzp1 z23.d, z22.d, z25.d\n"
+ "st1w { z3.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
"uzp2 z22.d, z22.d, z25.d\n"
- "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
- "uzp1 z18.d, z26.d, z29.d\n"
+ "uzp1 z19.d, z26.d, z29.d\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
"uzp2 z26.d, z26.d, z29.d\n"
- "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
- "uzp1 z17.d, z27.d, z30.d\n"
+ "uzp1 z18.d, z27.d, z30.d\n"
"uzp2 z27.d, z27.d, z30.d\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
- "uzp1 z16.d, z28.d, z31.d\n"
+ "uzp1 z17.d, z28.d, z31.d\n"
"uzp2 z28.d, z28.d, z31.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
- "st1w { z23.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
- "st1w { z19.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z2.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
"st1w { z20.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
"st1w { z21.s }, p0, [%x[Cpanel]]\n"
"st1w { z22.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
- "st1w { z18.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1w { z26.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1w { z27.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
"st1w { z28.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp
index 60f1b699c3..22cd8be2b0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,8 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx( ARGLIST );
class cls_sve_ffinterleaved_fp16_mla_8x3VL
{
public:
- typedef __fp16 operand_type;
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
typedef __fp16 result_type;
typedef void (*kern_type)( ARGLIST );
@@ -72,8 +73,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
index 69ddb21c31..6749fc720a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,52 +54,52 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x26, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x25, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
"cnth x23, ALL, MUL #2\n"
+ "mov %x[Apanel], x25\n"
"add x22, x24, x20, LSL #1\n"
+ "cmp x26, x23\n"
"add x21, x22, x20, LSL #1\n"
"add x20, x21, x20, LSL #1\n"
- "cmp x26, x23\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x25\n"
"bgt 3f\n"
"dech x23\n"
- "cmp x26, x23\n"
"mov x21, x24\n"
+ "cmp x26, x23\n"
"bgt 3f\n"
"mov x22, x24\n"
"3:" // B setup done
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1h { z0.h }, p0/Z, [x24]\n"
"mov z11.b, #0x0\n"
+ "ld1h { z0.h }, p0/Z, [x24]\n"
"mov z12.b, #0x0\n"
- "ld1h { z1.h }, p0/Z, [x22]\n"
"mov z13.b, #0x0\n"
+ "ld1h { z1.h }, p0/Z, [x22]\n"
+ "cmp x20, #0x2\n"
"mov z14.b, #0x0\n"
- "ld1h { z2.h }, p0/Z, [x21]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
"mov z17.b, #0x0\n"
+ "ld1h { z2.h }, p0/Z, [x21]\n"
"mov z18.b, #0x0\n"
- "ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
"mov z19.b, #0x0\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
"mov z20.b, #0x0\n"
- "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"mov z21.b, #0x0\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
"mov z22.b, #0x0\n"
- "ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"mov z23.b, #0x0\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"mov z24.b, #0x0\n"
"mov z25.b, #0x0\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"mov z28.b, #0x0\n"
@@ -116,12 +116,12 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"fmla z11.h, p0/M, z0.h, z4.h\n"
"fmla z12.h, p0/M, z1.h, z4.h\n"
"fmla z13.h, p0/M, z2.h, z4.h\n"
- "ld1rh { z7.h }, p0/Z, [%x[Apanel], #10]\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z14.h, p0/M, z0.h, z5.h\n"
"fmla z15.h, p0/M, z1.h, z5.h\n"
"cmp x20, #0x2\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
- "ld1rh { z4.h }, p0/Z, [%x[Apanel], #12]\n"
+ "ld1rh { z7.h }, p0/Z, [%x[Apanel], #12]\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
"fmla z18.h, p0/M, z1.h, z6.h\n"
"fmla z19.h, p0/M, z2.h, z6.h\n"
@@ -129,60 +129,60 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"fmla z20.h, p0/M, z0.h, z3.h\n"
"fmla z21.h, p0/M, z1.h, z3.h\n"
"fmla z22.h, p0/M, z2.h, z3.h\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel], #16]\n"
- "fmla z23.h, p0/M, z0.h, z7.h\n"
- "fmla z24.h, p0/M, z1.h, z7.h\n"
- "fmla z25.h, p0/M, z2.h, z7.h\n"
- "ld1rh { z5.h }, p0/Z, [%x[Apanel], #18]\n"
- "fmla z26.h, p0/M, z0.h, z4.h\n"
- "fmla z27.h, p0/M, z1.h, z4.h\n"
- "fmla z28.h, p0/M, z2.h, z4.h\n"
- "ld1rh { z4.h }, p0/Z, [%x[Apanel], #20]\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #16]\n"
+ "fmla z23.h, p0/M, z0.h, z4.h\n"
+ "fmla z24.h, p0/M, z1.h, z4.h\n"
+ "fmla z25.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #18]\n"
+ "fmla z26.h, p0/M, z0.h, z7.h\n"
+ "fmla z27.h, p0/M, z1.h, z7.h\n"
+ "fmla z28.h, p0/M, z2.h, z7.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #20]\n"
"fmla z29.h, p0/M, z0.h, z6.h\n"
"ld1h { z7.h }, p0/Z, [x24, #1, MUL VL]\n"
"fmla z30.h, p0/M, z1.h, z6.h\n"
"fmla z31.h, p0/M, z2.h, z6.h\n"
"ld1h { z6.h }, p0/Z, [x22, #1, MUL VL]\n"
"ld1h { z2.h }, p0/Z, [x21, #1, MUL VL]\n"
- "fmla z8.h, p0/M, z7.h, z3.h\n"
- "ld1rh { z1.h }, p0/Z, [%x[Apanel], #22]\n"
- "fmla z9.h, p0/M, z6.h, z3.h\n"
- "fmla z10.h, p0/M, z2.h, z3.h\n"
- "fmla z11.h, p0/M, z7.h, z5.h\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel], #24]\n"
- "fmla z12.h, p0/M, z6.h, z5.h\n"
- "fmla z13.h, p0/M, z2.h, z5.h\n"
- "ld1rh { z5.h }, p0/Z, [%x[Apanel], #26]\n"
- "fmla z14.h, p0/M, z7.h, z4.h\n"
- "fmla z15.h, p0/M, z6.h, z4.h\n"
"addvl x24, x24, #2\n"
- "fmla z16.h, p0/M, z2.h, z4.h\n"
- "ld1rh { z0.h }, p0/Z, [%x[Apanel], #28]\n"
+ "ld1rh { z1.h }, p0/Z, [%x[Apanel], #22]\n"
+ "addvl x22, x22, #2\n"
+ "addvl x21, x21, #2\n"
+ "fmla z8.h, p0/M, z7.h, z5.h\n"
+ "fmla z11.h, p0/M, z7.h, z4.h\n"
+ "fmla z9.h, p0/M, z6.h, z5.h\n"
+ "fmla z12.h, p0/M, z6.h, z4.h\n"
+ "fmla z10.h, p0/M, z2.h, z5.h\n"
+ "fmla z13.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #24]\n"
+ "fmla z14.h, p0/M, z7.h, z3.h\n"
+ "fmla z15.h, p0/M, z6.h, z3.h\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #26]\n"
+ "fmla z16.h, p0/M, z2.h, z3.h\n"
"fmla z17.h, p0/M, z7.h, z1.h\n"
+ "ld1rh { z0.h }, p0/Z, [%x[Apanel], #28]\n"
"fmla z18.h, p0/M, z6.h, z1.h\n"
"fmla z19.h, p0/M, z2.h, z1.h\n"
"ld1rh { z1.h }, p0/Z, [%x[Apanel], #30]\n"
- "addvl x22, x22, #2\n"
- "addvl x21, x21, #2\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "fmla z20.h, p0/M, z7.h, z3.h\n"
- "fmla z21.h, p0/M, z6.h, z3.h\n"
- "fmla z22.h, p0/M, z2.h, z3.h\n"
+ "fmla z20.h, p0/M, z7.h, z5.h\n"
+ "fmla z21.h, p0/M, z6.h, z5.h\n"
+ "fmla z22.h, p0/M, z2.h, z5.h\n"
+ "fmla z23.h, p0/M, z7.h, z4.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
- "fmla z23.h, p0/M, z7.h, z5.h\n"
- "fmla z24.h, p0/M, z6.h, z5.h\n"
- "fmla z25.h, p0/M, z2.h, z5.h\n"
- "fmla z26.h, p0/M, z7.h, z0.h\n"
+ "fmla z24.h, p0/M, z6.h, z4.h\n"
+ "fmla z25.h, p0/M, z2.h, z4.h\n"
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
+ "fmla z26.h, p0/M, z7.h, z0.h\n"
"fmla z27.h, p0/M, z6.h, z0.h\n"
"fmla z28.h, p0/M, z2.h, z0.h\n"
- "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"fmla z29.h, p0/M, z7.h, z1.h\n"
"ld1h { z0.h }, p0/Z, [x24]\n"
"fmla z30.h, p0/M, z6.h, z1.h\n"
"fmla z31.h, p0/M, z2.h, z1.h\n"
"ld1h { z1.h }, p0/Z, [x22]\n"
"ld1h { z2.h }, p0/Z, [x21]\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"bge 4b\n"
"5:" // main loop skip
@@ -204,12 +204,12 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"fmla z18.h, p0/M, z1.h, z6.h\n"
"fmla z19.h, p0/M, z2.h, z6.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel], #14]\n"
+ "addvl x21, x21, #1\n"
"fmla z20.h, p0/M, z0.h, z7.h\n"
"fmla z21.h, p0/M, z1.h, z7.h\n"
- "addvl x21, x21, #1\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z22.h, p0/M, z2.h, z7.h\n"
"fmla z23.h, p0/M, z0.h, z4.h\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z24.h, p0/M, z1.h, z4.h\n"
"fmla z25.h, p0/M, z2.h, z4.h\n"
"fmla z26.h, p0/M, z0.h, z5.h\n"
@@ -223,19 +223,19 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"ld1h { z5.h }, p0/Z, [x22]\n"
"ld1h { z4.h }, p0/Z, [x21]\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
- "fmla z8.h, p0/M, z6.h, z3.h\n"
"ld1rh { z2.h }, p0/Z, [%x[Apanel], #2]\n"
"ld1rh { z1.h }, p0/Z, [%x[Apanel], #4]\n"
- "fmla z9.h, p0/M, z5.h, z3.h\n"
"ld1rh { z0.h }, p0/Z, [%x[Apanel], #6]\n"
+ "fmla z8.h, p0/M, z6.h, z3.h\n"
+ "fmla z9.h, p0/M, z5.h, z3.h\n"
"fmla z10.h, p0/M, z4.h, z3.h\n"
"fmla z11.h, p0/M, z6.h, z2.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z12.h, p0/M, z5.h, z2.h\n"
"fmla z13.h, p0/M, z4.h, z2.h\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
+ "ld1rh { z2.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z14.h, p0/M, z6.h, z1.h\n"
"fmla z15.h, p0/M, z5.h, z1.h\n"
- "ld1rh { z2.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z16.h, p0/M, z4.h, z1.h\n"
"fmla z17.h, p0/M, z6.h, z0.h\n"
"ld1rh { z1.h }, p0/Z, [%x[Apanel], #12]\n"
@@ -258,10 +258,10 @@ void sve_ffinterleaved_fp16_mla_8x3VL_a64fx(
"6:" // multiply loop done
"dech x26, ALL, MUL #3\n"
"st1h { z8.h }, p0, [%x[Cpanel]]\n"
- "cmp x26, XZR\n"
"st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "cmp x26, XZR\n"
"st1h { z12.h }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1h { z13.h }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1h { z14.h }, p0, [%x[Cpanel], #6, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
index 23503fa108..204bfdd658 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp16_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,43 +54,43 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x26, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x25, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
"cnth x23, ALL, MUL #2\n"
+ "mov %x[Apanel], x25\n"
"add x22, x24, x20, LSL #1\n"
+ "cmp x26, x23\n"
"add x21, x22, x20, LSL #1\n"
"add x20, x21, x20, LSL #1\n"
- "cmp x26, x23\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x25\n"
"bgt 3f\n"
"dech x23\n"
- "cmp x26, x23\n"
"mov x21, x24\n"
+ "cmp x26, x23\n"
"bgt 3f\n"
"mov x22, x24\n"
"3:" // B setup done
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z11.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z12.b, #0x0\n"
- "ld1h { z2.h }, p0/Z, [x24]\n"
"mov z13.b, #0x0\n"
+ "ld1h { z2.h }, p0/Z, [x24]\n"
+ "cmp x20, #0x2\n"
"mov z14.b, #0x0\n"
- "ld1h { z3.h }, p0/Z, [x22]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x21]\n"
"mov z17.b, #0x0\n"
+ "ld1h { z3.h }, p0/Z, [x22]\n"
"mov z18.b, #0x0\n"
"mov z19.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x21]\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
"mov z22.b, #0x0\n"
@@ -169,18 +169,18 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"fmla z31.h, z1.h, z7.h[7]\n"
"bge 4b\n"
"5:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "addvl x24, x24, #1\n"
"fmla z8.h, z2.h, z0.h[0]\n"
+ "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
"fmla z11.h, z2.h, z0.h[1]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z14.h, z2.h, z0.h[2]\n"
"fmla z17.h, z2.h, z0.h[3]\n"
- "addvl x24, x24, #1\n"
"fmla z20.h, z2.h, z0.h[4]\n"
"fmla z23.h, z2.h, z0.h[5]\n"
- "addvl x22, x22, #1\n"
"fmla z26.h, z2.h, z0.h[6]\n"
"fmla z29.h, z2.h, z0.h[7]\n"
- "addvl x21, x21, #1\n"
"fmla z9.h, z3.h, z0.h[0]\n"
"fmla z12.h, z3.h, z0.h[1]\n"
"fmla z15.h, z3.h, z0.h[2]\n"
@@ -200,13 +200,13 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"cbz x20, 6f\n"
"ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
"ld1h { z2.h }, p0/Z, [x24]\n"
- "fmla z8.h, z2.h, z3.h[0]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
"ld1h { z1.h }, p0/Z, [x22]\n"
"ld1h { z0.h }, p0/Z, [x21]\n"
+ "fmla z8.h, z2.h, z3.h[0]\n"
"fmla z11.h, z2.h, z3.h[1]\n"
"fmla z14.h, z2.h, z3.h[2]\n"
"fmla z17.h, z2.h, z3.h[3]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z20.h, z2.h, z3.h[4]\n"
"fmla z23.h, z2.h, z3.h[5]\n"
"fmla z26.h, z2.h, z3.h[6]\n"
@@ -230,10 +230,10 @@ void sve_ffinterleaved_fp16_mla_8x3VL(
"6:" // multiply loop done
"dech x26, ALL, MUL #3\n"
"st1h { z8.h }, p0, [%x[Cpanel]]\n"
- "cmp x26, XZR\n"
"st1h { z9.h }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1h { z10.h }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1h { z11.h }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "cmp x26, XZR\n"
"st1h { z12.h }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1h { z13.h }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1h { z14.h }, p0, [%x[Cpanel], #6, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp
index ac6986913d..ad52e2a9b3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,7 +41,8 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx( ARGLIST );
class cls_sve_ffinterleaved_fp32_mla_8x3VL
{
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -72,8 +73,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
index c65c3a3ce4..6135cd4bae 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,52 +54,52 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x26, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x25, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
"cntw x23, ALL, MUL #2\n"
+ "mov %x[Apanel], x25\n"
"add x22, x24, x20, LSL #2\n"
+ "cmp x26, x23\n"
"add x21, x22, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "cmp x26, x23\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x25\n"
"bgt 3f\n"
"decw x23\n"
- "cmp x26, x23\n"
"mov x21, x24\n"
+ "cmp x26, x23\n"
"bgt 3f\n"
"mov x22, x24\n"
"3:" // B setup done
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1w { z0.s }, p0/Z, [x24]\n"
"mov z11.b, #0x0\n"
+ "ld1w { z0.s }, p0/Z, [x24]\n"
"mov z12.b, #0x0\n"
- "ld1w { z1.s }, p0/Z, [x22]\n"
"mov z13.b, #0x0\n"
+ "ld1w { z1.s }, p0/Z, [x22]\n"
+ "cmp x20, #0x2\n"
"mov z14.b, #0x0\n"
- "ld1w { z2.s }, p0/Z, [x21]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z17.b, #0x0\n"
+ "ld1w { z2.s }, p0/Z, [x21]\n"
"mov z18.b, #0x0\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z19.b, #0x0\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z20.b, #0x0\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z21.b, #0x0\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z22.b, #0x0\n"
- "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z23.b, #0x0\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z24.b, #0x0\n"
"mov z25.b, #0x0\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
"mov z28.b, #0x0\n"
@@ -116,12 +116,12 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"fmla z11.s, p0/M, z0.s, z4.s\n"
"fmla z12.s, p0/M, z1.s, z4.s\n"
"fmla z13.s, p0/M, z2.s, z4.s\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #20]\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z14.s, p0/M, z0.s, z5.s\n"
"fmla z15.s, p0/M, z1.s, z5.s\n"
"cmp x20, #0x2\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #24]\n"
+ "ld1rw { z7.s }, p0/Z, [%x[Apanel], #24]\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
"fmla z18.s, p0/M, z1.s, z6.s\n"
"fmla z19.s, p0/M, z2.s, z6.s\n"
@@ -129,60 +129,60 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"fmla z20.s, p0/M, z0.s, z3.s\n"
"fmla z21.s, p0/M, z1.s, z3.s\n"
"fmla z22.s, p0/M, z2.s, z3.s\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #32]\n"
- "fmla z23.s, p0/M, z0.s, z7.s\n"
- "fmla z24.s, p0/M, z1.s, z7.s\n"
- "fmla z25.s, p0/M, z2.s, z7.s\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #36]\n"
- "fmla z26.s, p0/M, z0.s, z4.s\n"
- "fmla z27.s, p0/M, z1.s, z4.s\n"
- "fmla z28.s, p0/M, z2.s, z4.s\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #40]\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #32]\n"
+ "fmla z23.s, p0/M, z0.s, z4.s\n"
+ "fmla z24.s, p0/M, z1.s, z4.s\n"
+ "fmla z25.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #36]\n"
+ "fmla z26.s, p0/M, z0.s, z7.s\n"
+ "fmla z27.s, p0/M, z1.s, z7.s\n"
+ "fmla z28.s, p0/M, z2.s, z7.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #40]\n"
"fmla z29.s, p0/M, z0.s, z6.s\n"
"ld1w { z7.s }, p0/Z, [x24, #1, MUL VL]\n"
"fmla z30.s, p0/M, z1.s, z6.s\n"
"fmla z31.s, p0/M, z2.s, z6.s\n"
"ld1w { z6.s }, p0/Z, [x22, #1, MUL VL]\n"
"ld1w { z2.s }, p0/Z, [x21, #1, MUL VL]\n"
- "fmla z8.s, p0/M, z7.s, z3.s\n"
- "ld1rw { z1.s }, p0/Z, [%x[Apanel], #44]\n"
- "fmla z9.s, p0/M, z6.s, z3.s\n"
- "fmla z10.s, p0/M, z2.s, z3.s\n"
- "fmla z11.s, p0/M, z7.s, z5.s\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #48]\n"
- "fmla z12.s, p0/M, z6.s, z5.s\n"
- "fmla z13.s, p0/M, z2.s, z5.s\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #52]\n"
- "fmla z14.s, p0/M, z7.s, z4.s\n"
- "fmla z15.s, p0/M, z6.s, z4.s\n"
"addvl x24, x24, #2\n"
- "fmla z16.s, p0/M, z2.s, z4.s\n"
- "ld1rw { z0.s }, p0/Z, [%x[Apanel], #56]\n"
+ "ld1rw { z1.s }, p0/Z, [%x[Apanel], #44]\n"
+ "addvl x22, x22, #2\n"
+ "addvl x21, x21, #2\n"
+ "fmla z8.s, p0/M, z7.s, z5.s\n"
+ "fmla z11.s, p0/M, z7.s, z4.s\n"
+ "fmla z9.s, p0/M, z6.s, z5.s\n"
+ "fmla z12.s, p0/M, z6.s, z4.s\n"
+ "fmla z10.s, p0/M, z2.s, z5.s\n"
+ "fmla z13.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #48]\n"
+ "fmla z14.s, p0/M, z7.s, z3.s\n"
+ "fmla z15.s, p0/M, z6.s, z3.s\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
+ "fmla z16.s, p0/M, z2.s, z3.s\n"
"fmla z17.s, p0/M, z7.s, z1.s\n"
+ "ld1rw { z0.s }, p0/Z, [%x[Apanel], #56]\n"
"fmla z18.s, p0/M, z6.s, z1.s\n"
"fmla z19.s, p0/M, z2.s, z1.s\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #60]\n"
- "addvl x22, x22, #2\n"
- "addvl x21, x21, #2\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
- "fmla z20.s, p0/M, z7.s, z3.s\n"
- "fmla z21.s, p0/M, z6.s, z3.s\n"
- "fmla z22.s, p0/M, z2.s, z3.s\n"
+ "fmla z20.s, p0/M, z7.s, z5.s\n"
+ "fmla z21.s, p0/M, z6.s, z5.s\n"
+ "fmla z22.s, p0/M, z2.s, z5.s\n"
+ "fmla z23.s, p0/M, z7.s, z4.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "fmla z23.s, p0/M, z7.s, z5.s\n"
- "fmla z24.s, p0/M, z6.s, z5.s\n"
- "fmla z25.s, p0/M, z2.s, z5.s\n"
- "fmla z26.s, p0/M, z7.s, z0.s\n"
+ "fmla z24.s, p0/M, z6.s, z4.s\n"
+ "fmla z25.s, p0/M, z2.s, z4.s\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
+ "fmla z26.s, p0/M, z7.s, z0.s\n"
"fmla z27.s, p0/M, z6.s, z0.s\n"
"fmla z28.s, p0/M, z2.s, z0.s\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"fmla z29.s, p0/M, z7.s, z1.s\n"
"ld1w { z0.s }, p0/Z, [x24]\n"
"fmla z30.s, p0/M, z6.s, z1.s\n"
"fmla z31.s, p0/M, z2.s, z1.s\n"
"ld1w { z1.s }, p0/Z, [x22]\n"
"ld1w { z2.s }, p0/Z, [x21]\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"bge 4b\n"
"5:" // main loop skip
@@ -204,12 +204,12 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"fmla z18.s, p0/M, z1.s, z6.s\n"
"fmla z19.s, p0/M, z2.s, z6.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #28]\n"
+ "addvl x21, x21, #1\n"
"fmla z20.s, p0/M, z0.s, z7.s\n"
"fmla z21.s, p0/M, z1.s, z7.s\n"
- "addvl x21, x21, #1\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z22.s, p0/M, z2.s, z7.s\n"
"fmla z23.s, p0/M, z0.s, z4.s\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z24.s, p0/M, z1.s, z4.s\n"
"fmla z25.s, p0/M, z2.s, z4.s\n"
"fmla z26.s, p0/M, z0.s, z5.s\n"
@@ -223,19 +223,19 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"ld1w { z5.s }, p0/Z, [x22]\n"
"ld1w { z4.s }, p0/Z, [x21]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "fmla z8.s, p0/M, z6.s, z3.s\n"
"ld1rw { z2.s }, p0/Z, [%x[Apanel], #4]\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #8]\n"
- "fmla z9.s, p0/M, z5.s, z3.s\n"
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #12]\n"
+ "fmla z8.s, p0/M, z6.s, z3.s\n"
+ "fmla z9.s, p0/M, z5.s, z3.s\n"
"fmla z10.s, p0/M, z4.s, z3.s\n"
"fmla z11.s, p0/M, z6.s, z2.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z12.s, p0/M, z5.s, z2.s\n"
"fmla z13.s, p0/M, z4.s, z2.s\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z14.s, p0/M, z6.s, z1.s\n"
"fmla z15.s, p0/M, z5.s, z1.s\n"
- "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z16.s, p0/M, z4.s, z1.s\n"
"fmla z17.s, p0/M, z6.s, z0.s\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #24]\n"
@@ -258,10 +258,10 @@ void sve_ffinterleaved_fp32_mla_8x3VL_a64fx(
"6:" // multiply loop done
"decw x26, ALL, MUL #3\n"
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "cmp x26, XZR\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "cmp x26, XZR\n"
"st1w { z12.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1w { z13.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1w { z14.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
index 4b20be6f01..05262b50cb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_ffinterleaved_fp32_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,46 +54,46 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"1:" // Height loop
"ldr x20, [%x[args_ptr], %[offsetof_Bpanel]]\n"
"ldr x26, [%x[args_ptr], %[offsetof_N]]\n"
- "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"mov x25, %x[Apanel]\n"
+ "str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"2:" // Width loop
"ldr x24, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
"ldr x20, [%x[args_ptr], %[offsetof_B_stride]]\n"
"cntw x23, ALL, MUL #2\n"
+ "mov %x[Apanel], x25\n"
"add x22, x24, x20, LSL #2\n"
+ "cmp x26, x23\n"
"add x21, x22, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "cmp x26, x23\n"
"str x20, [%x[args_ptr], %[offsetof_cur_B_ptr]]\n"
- "mov %x[Apanel], x25\n"
"bgt 3f\n"
"decw x23\n"
- "cmp x26, x23\n"
"mov x21, x24\n"
+ "cmp x26, x23\n"
"bgt 3f\n"
"mov x22, x24\n"
"3:" // B setup done
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
"mov z10.b, #0x0\n"
- "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
"mov z11.b, #0x0\n"
+ "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
"mov z12.b, #0x0\n"
- "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"mov z13.b, #0x0\n"
+ "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
+ "cmp x20, #0x2\n"
"mov z14.b, #0x0\n"
- "ld1w { z4.s }, p0/Z, [x24]\n"
"mov z15.b, #0x0\n"
"mov z16.b, #0x0\n"
- "ld1w { z5.s }, p0/Z, [x22]\n"
"mov z17.b, #0x0\n"
+ "ld1w { z4.s }, p0/Z, [x24]\n"
"mov z18.b, #0x0\n"
- "ld1w { z6.s }, p0/Z, [x21]\n"
"mov z19.b, #0x0\n"
+ "ld1w { z5.s }, p0/Z, [x22]\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
+ "ld1w { z6.s }, p0/Z, [x21]\n"
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
"mov z24.b, #0x0\n"
@@ -172,18 +172,18 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"ld1w { z6.s }, p0/Z, [x21]\n"
"bge 4b\n"
"5:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "addvl x24, x24, #1\n"
"fmla z8.s, z4.s, z0.s[0]\n"
+ "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
"fmla z11.s, z4.s, z0.s[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z14.s, z4.s, z0.s[2]\n"
"fmla z17.s, z4.s, z0.s[3]\n"
- "addvl x24, x24, #1\n"
"fmla z20.s, z4.s, z1.s[0]\n"
"fmla z23.s, z4.s, z1.s[1]\n"
- "addvl x22, x22, #1\n"
"fmla z26.s, z4.s, z1.s[2]\n"
"fmla z29.s, z4.s, z1.s[3]\n"
- "addvl x21, x21, #1\n"
"fmla z9.s, z5.s, z0.s[0]\n"
"fmla z12.s, z5.s, z0.s[1]\n"
"fmla z15.s, z5.s, z0.s[2]\n"
@@ -206,8 +206,8 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"add %x[Apanel], %x[Apanel], #0x20\n"
"ld1w { z2.s }, p0/Z, [x24]\n"
"ld1w { z1.s }, p0/Z, [x22]\n"
- "fmla z8.s, z2.s, z4.s[0]\n"
"ld1w { z0.s }, p0/Z, [x21]\n"
+ "fmla z8.s, z2.s, z4.s[0]\n"
"fmla z11.s, z2.s, z4.s[1]\n"
"fmla z14.s, z2.s, z4.s[2]\n"
"fmla z17.s, z2.s, z4.s[3]\n"
@@ -234,10 +234,10 @@ void sve_ffinterleaved_fp32_mla_8x3VL(
"6:" // multiply loop done
"decw x26, ALL, MUL #3\n"
"st1w { z8.s }, p0, [%x[Cpanel]]\n"
- "cmp x26, XZR\n"
"st1w { z9.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"st1w { z10.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
"st1w { z11.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "cmp x26, XZR\n"
"st1w { z12.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1w { z13.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1w { z14.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp
index 49ccce342e..d1f3c31de3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 2> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 2> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp
index 176f6e0d3a..739ee24050 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -103,10 +105,10 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -139,8 +141,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov x28, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -156,98 +158,98 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z16.h }, p5/Z, [x10]\n"
- ".inst 0x64604208 // bfdot z8.s, z16.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ ".inst 0x64604228 // bfdot z8.s, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64604209 // bfdot z9.s, z16.h, z0.h[0]\n"
- "ld1h { z16.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6460420a // bfdot z10.s, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6460422a // bfdot z10.s, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
".inst 0x6460420b // bfdot z11.s, z16.h, z0.h[0]\n"
- "ld1h { z16.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x64684208 // bfdot z8.s, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x64684228 // bfdot z8.s, z17.h, z0.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
".inst 0x64684209 // bfdot z9.s, z16.h, z0.h[1]\n"
- "ld1h { z16.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6468420a // bfdot z10.s, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
+ ".inst 0x6468422a // bfdot z10.s, z17.h, z0.h[1]\n"
".inst 0x6468420b // bfdot z11.s, z16.h, z0.h[1]\n"
"ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x64704228 // bfdot z8.s, z17.h, z0.h[2]\n"
- ".inst 0x64704209 // bfdot z9.s, z16.h, z0.h[2]\n"
"ld1h { z17.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x64704209 // bfdot z9.s, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x6470422a // bfdot z10.s, z17.h, z0.h[2]\n"
- ".inst 0x6470420b // bfdot z11.s, z16.h, z0.h[2]\n"
"ld1h { z17.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x6470420b // bfdot z11.s, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x64784228 // bfdot z8.s, z17.h, z0.h[3]\n"
- ".inst 0x64784209 // bfdot z9.s, z16.h, z0.h[3]\n"
"ld1h { z17.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x64784209 // bfdot z9.s, z16.h, z0.h[3]\n"
"ld1h { z16.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
".inst 0x6478422a // bfdot z10.s, z17.h, z0.h[3]\n"
".inst 0x6478420b // bfdot z11.s, z16.h, z0.h[3]\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z16.h }, p5/Z, [x10]\n"
- ".inst 0x64604208 // bfdot z8.s, z16.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x64604209 // bfdot z9.s, z16.h, z0.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"subs x27, x27, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ ".inst 0x64604228 // bfdot z8.s, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64604209 // bfdot z9.s, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x6460422a // bfdot z10.s, z17.h, z0.h[0]\n"
".inst 0x6460420b // bfdot z11.s, z16.h, z0.h[0]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x64684228 // bfdot z8.s, z17.h, z0.h[1]\n"
- ".inst 0x64684209 // bfdot z9.s, z16.h, z0.h[1]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64684209 // bfdot z9.s, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ "addvl x10, x10, #4\n"
".inst 0x6468422a // bfdot z10.s, z17.h, z0.h[1]\n"
".inst 0x6468420b // bfdot z11.s, z16.h, z0.h[1]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x64704228 // bfdot z8.s, z17.h, z0.h[2]\n"
- ".inst 0x64704209 // bfdot z9.s, z16.h, z0.h[2]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64704209 // bfdot z9.s, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ "addvl x10, x10, #4\n"
".inst 0x6470422a // bfdot z10.s, z17.h, z0.h[2]\n"
".inst 0x6470420b // bfdot z11.s, z16.h, z0.h[2]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x64784228 // bfdot z8.s, z17.h, z0.h[3]\n"
- ".inst 0x64784209 // bfdot z9.s, z16.h, z0.h[3]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64784209 // bfdot z9.s, z16.h, z0.h[3]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x6478422a // bfdot z10.s, z17.h, z0.h[3]\n"
".inst 0x6478420b // bfdot z11.s, z16.h, z0.h[3]\n"
- "addvl x10, x10, #4\n"
"11:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z17.s\n"
"fmin z9.s, p5/M, z9.s, z17.s\n"
@@ -269,10 +271,10 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -285,22 +287,22 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cbz x12, 16f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
@@ -319,8 +321,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov x28, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -339,38 +341,38 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z0.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z1.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
".inst 0x64614228 // bfdot z8.s, z17.h, z1.h[0]\n"
".inst 0x6460422c // bfdot z12.s, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64614209 // bfdot z9.s, z16.h, z1.h[0]\n"
".inst 0x6460420d // bfdot z13.s, z16.h, z0.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6461422a // bfdot z10.s, z17.h, z1.h[0]\n"
".inst 0x6460422e // bfdot z14.s, z17.h, z0.h[0]\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x8\n"
".inst 0x6461420b // bfdot z11.s, z16.h, z1.h[0]\n"
".inst 0x6460420f // bfdot z15.s, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x64694228 // bfdot z8.s, z17.h, z1.h[1]\n"
".inst 0x6468422c // bfdot z12.s, z17.h, z0.h[1]\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
".inst 0x64694209 // bfdot z9.s, z16.h, z1.h[1]\n"
".inst 0x6468420d // bfdot z13.s, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
".inst 0x6469422a // bfdot z10.s, z17.h, z1.h[1]\n"
".inst 0x6468422e // bfdot z14.s, z17.h, z0.h[1]\n"
- "ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
".inst 0x6469420b // bfdot z11.s, z16.h, z1.h[1]\n"
".inst 0x6468420f // bfdot z15.s, z16.h, z0.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x64714228 // bfdot z8.s, z17.h, z1.h[2]\n"
".inst 0x6470422c // bfdot z12.s, z17.h, z0.h[2]\n"
@@ -397,50 +399,50 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
".inst 0x64604228 // bfdot z8.s, z17.h, z0.h[0]\n"
".inst 0x6461422c // bfdot z12.s, z17.h, z1.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64604209 // bfdot z9.s, z16.h, z0.h[0]\n"
".inst 0x6461420d // bfdot z13.s, z16.h, z1.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x6460422a // bfdot z10.s, z17.h, z0.h[0]\n"
".inst 0x6461422e // bfdot z14.s, z17.h, z1.h[0]\n"
- "addvl x10, x10, #4\n"
".inst 0x6460420b // bfdot z11.s, z16.h, z0.h[0]\n"
".inst 0x6461420f // bfdot z15.s, z16.h, z1.h[0]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x64684228 // bfdot z8.s, z17.h, z0.h[1]\n"
".inst 0x6469422c // bfdot z12.s, z17.h, z1.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64684209 // bfdot z9.s, z16.h, z0.h[1]\n"
".inst 0x6469420d // bfdot z13.s, z16.h, z1.h[1]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ "addvl x10, x10, #4\n"
".inst 0x6468422a // bfdot z10.s, z17.h, z0.h[1]\n"
".inst 0x6469422e // bfdot z14.s, z17.h, z1.h[1]\n"
- "addvl x10, x10, #4\n"
".inst 0x6468420b // bfdot z11.s, z16.h, z0.h[1]\n"
".inst 0x6469420f // bfdot z15.s, z16.h, z1.h[1]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x64704228 // bfdot z8.s, z17.h, z0.h[2]\n"
".inst 0x6471422c // bfdot z12.s, z17.h, z1.h[2]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64704209 // bfdot z9.s, z16.h, z0.h[2]\n"
".inst 0x6471420d // bfdot z13.s, z16.h, z1.h[2]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ "addvl x10, x10, #4\n"
".inst 0x6470422a // bfdot z10.s, z17.h, z0.h[2]\n"
".inst 0x6471422e // bfdot z14.s, z17.h, z1.h[2]\n"
- "addvl x10, x10, #4\n"
".inst 0x6470420b // bfdot z11.s, z16.h, z0.h[2]\n"
".inst 0x6471420f // bfdot z15.s, z16.h, z1.h[2]\n"
"ble 24f\n"
@@ -448,13 +450,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x64784228 // bfdot z8.s, z17.h, z0.h[3]\n"
".inst 0x6479422c // bfdot z12.s, z17.h, z1.h[3]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64784209 // bfdot z9.s, z16.h, z0.h[3]\n"
".inst 0x6479420d // bfdot z13.s, z16.h, z1.h[3]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
".inst 0x6478422a // bfdot z10.s, z17.h, z0.h[3]\n"
".inst 0x6479422e // bfdot z14.s, z17.h, z1.h[3]\n"
- "addvl x10, x10, #4\n"
".inst 0x6478420b // bfdot z11.s, z16.h, z0.h[3]\n"
".inst 0x6479420f // bfdot z15.s, z16.h, z1.h[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -463,11 +465,11 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cmp x28, x20\n"
"bne 19b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z17.s\n"
"fmin z9.s, p5/M, z9.s, z17.s\n"
@@ -491,20 +493,20 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -517,27 +519,27 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cbz x12, 29f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
@@ -564,8 +566,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov x28, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -587,37 +589,37 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z21.h }, p5/Z, [x10]\n"
+ "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z2.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x24]\n"
- "ld1h { z21.h }, p5/Z, [x10]\n"
+ "add x24, x24, #0x10\n"
".inst 0x646242a8 // bfdot z8.s, z21.h, z2.h[0]\n"
".inst 0x646142ac // bfdot z12.s, z21.h, z1.h[0]\n"
- "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x646042b0 // bfdot z16.s, z21.h, z0.h[0]\n"
".inst 0x64624289 // bfdot z9.s, z20.h, z2.h[0]\n"
- "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6461428d // bfdot z13.s, z20.h, z1.h[0]\n"
+ ".inst 0x646042b0 // bfdot z16.s, z21.h, z0.h[0]\n"
+ "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64604291 // bfdot z17.s, z20.h, z0.h[0]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x8\n"
".inst 0x646242aa // bfdot z10.s, z21.h, z2.h[0]\n"
".inst 0x646142ae // bfdot z14.s, z21.h, z1.h[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
".inst 0x646042b2 // bfdot z18.s, z21.h, z0.h[0]\n"
- ".inst 0x6462428b // bfdot z11.s, z20.h, z2.h[0]\n"
"ld1h { z21.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ ".inst 0x6462428b // bfdot z11.s, z20.h, z2.h[0]\n"
".inst 0x6461428f // bfdot z15.s, z20.h, z1.h[0]\n"
".inst 0x64604293 // bfdot z19.s, z20.h, z0.h[0]\n"
"ld1h { z20.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x646a42a8 // bfdot z8.s, z21.h, z2.h[1]\n"
".inst 0x646942ac // bfdot z12.s, z21.h, z1.h[1]\n"
".inst 0x646842b0 // bfdot z16.s, z21.h, z0.h[1]\n"
- ".inst 0x646a4289 // bfdot z9.s, z20.h, z2.h[1]\n"
"ld1h { z21.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x646a4289 // bfdot z9.s, z20.h, z2.h[1]\n"
".inst 0x6469428d // bfdot z13.s, z20.h, z1.h[1]\n"
".inst 0x64684291 // bfdot z17.s, z20.h, z0.h[1]\n"
"ld1h { z20.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -626,31 +628,31 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646942ae // bfdot z14.s, z21.h, z1.h[1]\n"
".inst 0x646842b2 // bfdot z18.s, z21.h, z0.h[1]\n"
".inst 0x646a428b // bfdot z11.s, z20.h, z2.h[1]\n"
- "ld1h { z21.h }, p5/Z, [x10, #-8, MUL VL]\n"
".inst 0x6469428f // bfdot z15.s, z20.h, z1.h[1]\n"
".inst 0x64684293 // bfdot z19.s, z20.h, z0.h[1]\n"
+ "ld1h { z21.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z20.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x647242a8 // bfdot z8.s, z21.h, z2.h[2]\n"
".inst 0x647142ac // bfdot z12.s, z21.h, z1.h[2]\n"
".inst 0x647042b0 // bfdot z16.s, z21.h, z0.h[2]\n"
- ".inst 0x64724289 // bfdot z9.s, z20.h, z2.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x64724289 // bfdot z9.s, z20.h, z2.h[2]\n"
".inst 0x6471428d // bfdot z13.s, z20.h, z1.h[2]\n"
".inst 0x64704291 // bfdot z17.s, z20.h, z0.h[2]\n"
"ld1h { z20.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x647242aa // bfdot z10.s, z21.h, z2.h[2]\n"
".inst 0x647142ae // bfdot z14.s, z21.h, z1.h[2]\n"
".inst 0x647042b2 // bfdot z18.s, z21.h, z0.h[2]\n"
- ".inst 0x6472428b // bfdot z11.s, z20.h, z2.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x6472428b // bfdot z11.s, z20.h, z2.h[2]\n"
".inst 0x6471428f // bfdot z15.s, z20.h, z1.h[2]\n"
".inst 0x64704293 // bfdot z19.s, z20.h, z0.h[2]\n"
"ld1h { z20.h }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x647a42a8 // bfdot z8.s, z21.h, z2.h[3]\n"
".inst 0x647942ac // bfdot z12.s, z21.h, z1.h[3]\n"
".inst 0x647842b0 // bfdot z16.s, z21.h, z0.h[3]\n"
- ".inst 0x647a4289 // bfdot z9.s, z20.h, z2.h[3]\n"
"ld1h { z21.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x647a4289 // bfdot z9.s, z20.h, z2.h[3]\n"
".inst 0x6479428d // bfdot z13.s, z20.h, z1.h[3]\n"
".inst 0x64784291 // bfdot z17.s, z20.h, z0.h[3]\n"
"ld1h { z20.h }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -663,18 +665,18 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z21.h }, p5/Z, [x10]\n"
+ "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1h { z21.h }, p5/Z, [x10]\n"
".inst 0x646042a8 // bfdot z8.s, z21.h, z0.h[0]\n"
".inst 0x646142ac // bfdot z12.s, z21.h, z1.h[0]\n"
- "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x646242b0 // bfdot z16.s, z21.h, z2.h[0]\n"
".inst 0x64604289 // bfdot z9.s, z20.h, z0.h[0]\n"
- "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6461428d // bfdot z13.s, z20.h, z1.h[0]\n"
+ ".inst 0x646242b0 // bfdot z16.s, z21.h, z2.h[0]\n"
+ "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64624291 // bfdot z17.s, z20.h, z2.h[0]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -687,12 +689,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x646842a8 // bfdot z8.s, z21.h, z0.h[1]\n"
".inst 0x646942ac // bfdot z12.s, z21.h, z1.h[1]\n"
".inst 0x646a42b0 // bfdot z16.s, z21.h, z2.h[1]\n"
- ".inst 0x64684289 // bfdot z9.s, z20.h, z0.h[1]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ ".inst 0x64684289 // bfdot z9.s, z20.h, z0.h[1]\n"
".inst 0x6469428d // bfdot z13.s, z20.h, z1.h[1]\n"
".inst 0x646a4291 // bfdot z17.s, z20.h, z2.h[1]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -706,12 +708,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x647042a8 // bfdot z8.s, z21.h, z0.h[2]\n"
".inst 0x647142ac // bfdot z12.s, z21.h, z1.h[2]\n"
".inst 0x647242b0 // bfdot z16.s, z21.h, z2.h[2]\n"
- ".inst 0x64704289 // bfdot z9.s, z20.h, z0.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
+ ".inst 0x64704289 // bfdot z9.s, z20.h, z0.h[2]\n"
".inst 0x6471428d // bfdot z13.s, z20.h, z1.h[2]\n"
".inst 0x64724291 // bfdot z17.s, z20.h, z2.h[2]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -728,8 +730,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647842a8 // bfdot z8.s, z21.h, z0.h[3]\n"
".inst 0x647942ac // bfdot z12.s, z21.h, z1.h[3]\n"
".inst 0x647a42b0 // bfdot z16.s, z21.h, z2.h[3]\n"
- ".inst 0x64784289 // bfdot z9.s, z20.h, z0.h[3]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64784289 // bfdot z9.s, z20.h, z0.h[3]\n"
".inst 0x6479428d // bfdot z13.s, z20.h, z1.h[3]\n"
".inst 0x647a4291 // bfdot z17.s, z20.h, z2.h[3]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -746,12 +748,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cmp x28, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z21.s }, p5/Z, [x21]\n"
"ld1rw { z20.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z21.s\n"
"fmin z9.s, p5/M, z9.s, z21.s\n"
@@ -783,24 +785,24 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -813,18 +815,18 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cbz x12, 42f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -832,13 +834,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
@@ -873,8 +875,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov x28, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -899,25 +901,25 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z3.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z2.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
".inst 0x64634328 // bfdot z8.s, z25.h, z3.h[0]\n"
".inst 0x6462432c // bfdot z12.s, z25.h, z2.h[0]\n"
+ ".inst 0x64634309 // bfdot z9.s, z24.h, z3.h[0]\n"
+ ".inst 0x6462430d // bfdot z13.s, z24.h, z2.h[0]\n"
".inst 0x64614330 // bfdot z16.s, z25.h, z1.h[0]\n"
".inst 0x64604334 // bfdot z20.s, z25.h, z0.h[0]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x64634309 // bfdot z9.s, z24.h, z3.h[0]\n"
- ".inst 0x6462430d // bfdot z13.s, z24.h, z2.h[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
".inst 0x64614311 // bfdot z17.s, z24.h, z1.h[0]\n"
".inst 0x64604315 // bfdot z21.s, z24.h, z0.h[0]\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -946,9 +948,9 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646a432e // bfdot z14.s, z25.h, z2.h[1]\n"
".inst 0x64694332 // bfdot z18.s, z25.h, z1.h[1]\n"
".inst 0x64684336 // bfdot z22.s, z25.h, z0.h[1]\n"
- "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
".inst 0x646b430b // bfdot z11.s, z24.h, z3.h[1]\n"
".inst 0x646a430f // bfdot z15.s, z24.h, z2.h[1]\n"
+ "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
".inst 0x64694313 // bfdot z19.s, z24.h, z1.h[1]\n"
".inst 0x64684317 // bfdot z23.s, z24.h, z0.h[1]\n"
"ld1h { z24.h }, p5/Z, [x10, #-7, MUL VL]\n"
@@ -993,20 +995,20 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x64604328 // bfdot z8.s, z25.h, z0.h[0]\n"
".inst 0x6461432c // bfdot z12.s, z25.h, z1.h[0]\n"
+ ".inst 0x64604309 // bfdot z9.s, z24.h, z0.h[0]\n"
+ ".inst 0x6461430d // bfdot z13.s, z24.h, z1.h[0]\n"
".inst 0x64624330 // bfdot z16.s, z25.h, z2.h[0]\n"
".inst 0x64634334 // bfdot z20.s, z25.h, z3.h[0]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x64604309 // bfdot z9.s, z24.h, z0.h[0]\n"
- ".inst 0x6461430d // bfdot z13.s, z24.h, z1.h[0]\n"
".inst 0x64624311 // bfdot z17.s, z24.h, z2.h[0]\n"
".inst 0x64634315 // bfdot z21.s, z24.h, z3.h[0]\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1022,12 +1024,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x64684328 // bfdot z8.s, z25.h, z0.h[1]\n"
".inst 0x6469432c // bfdot z12.s, z25.h, z1.h[1]\n"
".inst 0x646a4330 // bfdot z16.s, z25.h, z2.h[1]\n"
".inst 0x646b4334 // bfdot z20.s, z25.h, z3.h[1]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
".inst 0x64684309 // bfdot z9.s, z24.h, z0.h[1]\n"
".inst 0x6469430d // bfdot z13.s, z24.h, z1.h[1]\n"
".inst 0x646a4311 // bfdot z17.s, z24.h, z2.h[1]\n"
@@ -1045,12 +1047,12 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x64704328 // bfdot z8.s, z25.h, z0.h[2]\n"
".inst 0x6471432c // bfdot z12.s, z25.h, z1.h[2]\n"
".inst 0x64724330 // bfdot z16.s, z25.h, z2.h[2]\n"
".inst 0x64734334 // bfdot z20.s, z25.h, z3.h[2]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x2\n"
".inst 0x64704309 // bfdot z9.s, z24.h, z0.h[2]\n"
".inst 0x6471430d // bfdot z13.s, z24.h, z1.h[2]\n"
".inst 0x64724311 // bfdot z17.s, z24.h, z2.h[2]\n"
@@ -1093,13 +1095,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cmp x28, x20\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p5/Z, [x21]\n"
"ld1rw { z24.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z25.s\n"
"fmin z9.s, p5/M, z9.s, z25.s\n"
@@ -1139,28 +1141,28 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1173,18 +1175,18 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cbz x12, 55f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1196,16 +1198,16 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x22]\n"
@@ -1246,8 +1248,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov x28, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1275,29 +1277,29 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z4.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z3.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x22]\n"
- "ld1h { z29.h }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x646443a8 // bfdot z8.s, z29.h, z4.h[0]\n"
".inst 0x646343ac // bfdot z12.s, z29.h, z3.h[0]\n"
- "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x64644389 // bfdot z9.s, z28.h, z4.h[0]\n"
".inst 0x646243b0 // bfdot z16.s, z29.h, z2.h[0]\n"
".inst 0x646143b4 // bfdot z20.s, z29.h, z1.h[0]\n"
- "add x25, x25, #0x10\n"
".inst 0x646043b8 // bfdot z24.s, z29.h, z0.h[0]\n"
- ".inst 0x64644389 // bfdot z9.s, z28.h, z4.h[0]\n"
- "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x6463438d // bfdot z13.s, z28.h, z3.h[0]\n"
+ "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64624391 // bfdot z17.s, z28.h, z2.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x64614395 // bfdot z21.s, z28.h, z1.h[0]\n"
".inst 0x64604399 // bfdot z25.s, z28.h, z0.h[0]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1306,8 +1308,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646243b2 // bfdot z18.s, z29.h, z2.h[0]\n"
".inst 0x646143b6 // bfdot z22.s, z29.h, z1.h[0]\n"
".inst 0x646043ba // bfdot z26.s, z29.h, z0.h[0]\n"
- ".inst 0x6464438b // bfdot z11.s, z28.h, z4.h[0]\n"
"ld1h { z29.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6464438b // bfdot z11.s, z28.h, z4.h[0]\n"
".inst 0x6463438f // bfdot z15.s, z28.h, z3.h[0]\n"
".inst 0x64624393 // bfdot z19.s, z28.h, z2.h[0]\n"
".inst 0x64614397 // bfdot z23.s, z28.h, z1.h[0]\n"
@@ -1318,8 +1320,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646a43b0 // bfdot z16.s, z29.h, z2.h[1]\n"
".inst 0x646943b4 // bfdot z20.s, z29.h, z1.h[1]\n"
".inst 0x646843b8 // bfdot z24.s, z29.h, z0.h[1]\n"
- ".inst 0x646c4389 // bfdot z9.s, z28.h, z4.h[1]\n"
"ld1h { z29.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x646c4389 // bfdot z9.s, z28.h, z4.h[1]\n"
".inst 0x646b438d // bfdot z13.s, z28.h, z3.h[1]\n"
".inst 0x646a4391 // bfdot z17.s, z28.h, z2.h[1]\n"
".inst 0x64694395 // bfdot z21.s, z28.h, z1.h[1]\n"
@@ -1332,8 +1334,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x646943b6 // bfdot z22.s, z29.h, z1.h[1]\n"
".inst 0x646843ba // bfdot z26.s, z29.h, z0.h[1]\n"
".inst 0x646c438b // bfdot z11.s, z28.h, z4.h[1]\n"
- "ld1h { z29.h }, p5/Z, [x10, #-8, MUL VL]\n"
".inst 0x646b438f // bfdot z15.s, z28.h, z3.h[1]\n"
+ "ld1h { z29.h }, p5/Z, [x10, #-8, MUL VL]\n"
".inst 0x646a4393 // bfdot z19.s, z28.h, z2.h[1]\n"
".inst 0x64694397 // bfdot z23.s, z28.h, z1.h[1]\n"
".inst 0x6468439b // bfdot z27.s, z28.h, z0.h[1]\n"
@@ -1343,8 +1345,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647243b0 // bfdot z16.s, z29.h, z2.h[2]\n"
".inst 0x647143b4 // bfdot z20.s, z29.h, z1.h[2]\n"
".inst 0x647043b8 // bfdot z24.s, z29.h, z0.h[2]\n"
- ".inst 0x64744389 // bfdot z9.s, z28.h, z4.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x64744389 // bfdot z9.s, z28.h, z4.h[2]\n"
".inst 0x6473438d // bfdot z13.s, z28.h, z3.h[2]\n"
".inst 0x64724391 // bfdot z17.s, z28.h, z2.h[2]\n"
".inst 0x64714395 // bfdot z21.s, z28.h, z1.h[2]\n"
@@ -1355,8 +1357,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647243b2 // bfdot z18.s, z29.h, z2.h[2]\n"
".inst 0x647143b6 // bfdot z22.s, z29.h, z1.h[2]\n"
".inst 0x647043ba // bfdot z26.s, z29.h, z0.h[2]\n"
- ".inst 0x6474438b // bfdot z11.s, z28.h, z4.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x6474438b // bfdot z11.s, z28.h, z4.h[2]\n"
".inst 0x6473438f // bfdot z15.s, z28.h, z3.h[2]\n"
".inst 0x64724393 // bfdot z19.s, z28.h, z2.h[2]\n"
".inst 0x64714397 // bfdot z23.s, z28.h, z1.h[2]\n"
@@ -1367,8 +1369,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647a43b0 // bfdot z16.s, z29.h, z2.h[3]\n"
".inst 0x647943b4 // bfdot z20.s, z29.h, z1.h[3]\n"
".inst 0x647843b8 // bfdot z24.s, z29.h, z0.h[3]\n"
- ".inst 0x647c4389 // bfdot z9.s, z28.h, z4.h[3]\n"
"ld1h { z29.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x647c4389 // bfdot z9.s, z28.h, z4.h[3]\n"
".inst 0x647b438d // bfdot z13.s, z28.h, z3.h[3]\n"
".inst 0x647a4391 // bfdot z17.s, z28.h, z2.h[3]\n"
".inst 0x64794395 // bfdot z21.s, z28.h, z1.h[3]\n"
@@ -1387,23 +1389,23 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1h { z29.h }, p5/Z, [x10]\n"
".inst 0x646043a8 // bfdot z8.s, z29.h, z0.h[0]\n"
".inst 0x646143ac // bfdot z12.s, z29.h, z1.h[0]\n"
- "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x64604389 // bfdot z9.s, z28.h, z0.h[0]\n"
+ ".inst 0x6461438d // bfdot z13.s, z28.h, z1.h[0]\n"
".inst 0x646243b0 // bfdot z16.s, z29.h, z2.h[0]\n"
".inst 0x646343b4 // bfdot z20.s, z29.h, z3.h[0]\n"
".inst 0x646443b8 // bfdot z24.s, z29.h, z4.h[0]\n"
- ".inst 0x64604389 // bfdot z9.s, z28.h, z0.h[0]\n"
- "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6461438d // bfdot z13.s, z28.h, z1.h[0]\n"
".inst 0x64624391 // bfdot z17.s, z28.h, z2.h[0]\n"
+ "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x64634395 // bfdot z21.s, z28.h, z3.h[0]\n"
".inst 0x64644399 // bfdot z25.s, z28.h, z4.h[0]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1421,21 +1423,21 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x646843a8 // bfdot z8.s, z29.h, z0.h[1]\n"
".inst 0x646943ac // bfdot z12.s, z29.h, z1.h[1]\n"
".inst 0x646a43b0 // bfdot z16.s, z29.h, z2.h[1]\n"
".inst 0x646b43b4 // bfdot z20.s, z29.h, z3.h[1]\n"
- "subs x27, x27, #0x2\n"
".inst 0x646c43b8 // bfdot z24.s, z29.h, z4.h[1]\n"
- ".inst 0x64684389 // bfdot z9.s, z28.h, z0.h[1]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64684389 // bfdot z9.s, z28.h, z0.h[1]\n"
".inst 0x6469438d // bfdot z13.s, z28.h, z1.h[1]\n"
".inst 0x646a4391 // bfdot z17.s, z28.h, z2.h[1]\n"
".inst 0x646b4395 // bfdot z21.s, z28.h, z3.h[1]\n"
".inst 0x646c4399 // bfdot z25.s, z28.h, z4.h[1]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
".inst 0x646843aa // bfdot z10.s, z29.h, z0.h[1]\n"
+ "addvl x10, x10, #4\n"
".inst 0x646943ae // bfdot z14.s, z29.h, z1.h[1]\n"
".inst 0x646a43b2 // bfdot z18.s, z29.h, z2.h[1]\n"
".inst 0x646b43b6 // bfdot z22.s, z29.h, z3.h[1]\n"
@@ -1448,21 +1450,21 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x647043a8 // bfdot z8.s, z29.h, z0.h[2]\n"
".inst 0x647143ac // bfdot z12.s, z29.h, z1.h[2]\n"
".inst 0x647243b0 // bfdot z16.s, z29.h, z2.h[2]\n"
".inst 0x647343b4 // bfdot z20.s, z29.h, z3.h[2]\n"
- "subs x27, x27, #0x2\n"
".inst 0x647443b8 // bfdot z24.s, z29.h, z4.h[2]\n"
- ".inst 0x64704389 // bfdot z9.s, z28.h, z0.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64704389 // bfdot z9.s, z28.h, z0.h[2]\n"
".inst 0x6471438d // bfdot z13.s, z28.h, z1.h[2]\n"
".inst 0x64724391 // bfdot z17.s, z28.h, z2.h[2]\n"
".inst 0x64734395 // bfdot z21.s, z28.h, z3.h[2]\n"
".inst 0x64744399 // bfdot z25.s, z28.h, z4.h[2]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
".inst 0x647043aa // bfdot z10.s, z29.h, z0.h[2]\n"
+ "addvl x10, x10, #4\n"
".inst 0x647143ae // bfdot z14.s, z29.h, z1.h[2]\n"
".inst 0x647243b2 // bfdot z18.s, z29.h, z2.h[2]\n"
".inst 0x647343b6 // bfdot z22.s, z29.h, z3.h[2]\n"
@@ -1480,8 +1482,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
".inst 0x647a43b0 // bfdot z16.s, z29.h, z2.h[3]\n"
".inst 0x647b43b4 // bfdot z20.s, z29.h, z3.h[3]\n"
".inst 0x647c43b8 // bfdot z24.s, z29.h, z4.h[3]\n"
- ".inst 0x64784389 // bfdot z9.s, z28.h, z0.h[3]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x64784389 // bfdot z9.s, z28.h, z0.h[3]\n"
".inst 0x6479438d // bfdot z13.s, z28.h, z1.h[3]\n"
".inst 0x647a4391 // bfdot z17.s, z28.h, z2.h[3]\n"
".inst 0x647b4395 // bfdot z21.s, z28.h, z3.h[3]\n"
@@ -1504,14 +1506,14 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cmp x28, x20\n"
"bne 58b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z29.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z29.s }, p5/Z, [x21]\n"
"ld1rw { z28.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z29.s\n"
"fmin z9.s, p5/M, z9.s, z29.s\n"
@@ -1559,22 +1561,22 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1582,12 +1584,13 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"b 80f\n"
"66:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1600,18 +1603,18 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cbz x12, 68f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1627,17 +1630,17 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x23]\n"
@@ -1686,8 +1689,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"mov x28, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1718,29 +1721,29 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z1.h }, p5/Z, [x10]\n"
+ "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z7.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z6.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z5.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z4.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1rqh { z3.h }, p0/Z, [x22]\n"
"ld1rqh { z2.h }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
- "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x64674028 // bfdot z8.s, z1.h, z7.h[0]\n"
".inst 0x6466402c // bfdot z12.s, z1.h, z6.h[0]\n"
+ "add x21, x21, #0x10\n"
".inst 0x64654030 // bfdot z16.s, z1.h, z5.h[0]\n"
".inst 0x64644034 // bfdot z20.s, z1.h, z4.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x64634038 // bfdot z24.s, z1.h, z3.h[0]\n"
".inst 0x6462403c // bfdot z28.s, z1.h, z2.h[0]\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
".inst 0x64674009 // bfdot z9.s, z0.h, z7.h[0]\n"
".inst 0x6466400d // bfdot z13.s, z0.h, z6.h[0]\n"
".inst 0x64654011 // bfdot z17.s, z0.h, z5.h[0]\n"
@@ -1848,24 +1851,24 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x2\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1rqh { z4.h }, p0/Z, [x22]\n"
"ld1rqh { z5.h }, p0/Z, [x21]\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x646040e8 // bfdot z8.s, z7.h, z0.h[0]\n"
".inst 0x646140ec // bfdot z12.s, z7.h, z1.h[0]\n"
+ ".inst 0x646040c9 // bfdot z9.s, z6.h, z0.h[0]\n"
+ ".inst 0x646140cd // bfdot z13.s, z6.h, z1.h[0]\n"
".inst 0x646240f0 // bfdot z16.s, z7.h, z2.h[0]\n"
".inst 0x646340f4 // bfdot z20.s, z7.h, z3.h[0]\n"
".inst 0x646440f8 // bfdot z24.s, z7.h, z4.h[0]\n"
".inst 0x646540fc // bfdot z28.s, z7.h, z5.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x646040c9 // bfdot z9.s, z6.h, z0.h[0]\n"
- ".inst 0x646140cd // bfdot z13.s, z6.h, z1.h[0]\n"
".inst 0x646240d1 // bfdot z17.s, z6.h, z2.h[0]\n"
".inst 0x646340d5 // bfdot z21.s, z6.h, z3.h[0]\n"
".inst 0x646440d9 // bfdot z25.s, z6.h, z4.h[0]\n"
@@ -1887,23 +1890,23 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x646840e8 // bfdot z8.s, z7.h, z0.h[1]\n"
".inst 0x646940ec // bfdot z12.s, z7.h, z1.h[1]\n"
".inst 0x646a40f0 // bfdot z16.s, z7.h, z2.h[1]\n"
".inst 0x646b40f4 // bfdot z20.s, z7.h, z3.h[1]\n"
- "subs x27, x27, #0x2\n"
".inst 0x646c40f8 // bfdot z24.s, z7.h, z4.h[1]\n"
".inst 0x646d40fc // bfdot z28.s, z7.h, z5.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x646840c9 // bfdot z9.s, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x646940cd // bfdot z13.s, z6.h, z1.h[1]\n"
".inst 0x646a40d1 // bfdot z17.s, z6.h, z2.h[1]\n"
".inst 0x646b40d5 // bfdot z21.s, z6.h, z3.h[1]\n"
".inst 0x646c40d9 // bfdot z25.s, z6.h, z4.h[1]\n"
".inst 0x646d40dd // bfdot z29.s, z6.h, z5.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
".inst 0x646840ea // bfdot z10.s, z7.h, z0.h[1]\n"
+ "addvl x10, x10, #4\n"
".inst 0x646940ee // bfdot z14.s, z7.h, z1.h[1]\n"
".inst 0x646a40f2 // bfdot z18.s, z7.h, z2.h[1]\n"
".inst 0x646b40f6 // bfdot z22.s, z7.h, z3.h[1]\n"
@@ -1918,23 +1921,23 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x2\n"
".inst 0x647040e8 // bfdot z8.s, z7.h, z0.h[2]\n"
".inst 0x647140ec // bfdot z12.s, z7.h, z1.h[2]\n"
".inst 0x647240f0 // bfdot z16.s, z7.h, z2.h[2]\n"
".inst 0x647340f4 // bfdot z20.s, z7.h, z3.h[2]\n"
- "subs x27, x27, #0x2\n"
".inst 0x647440f8 // bfdot z24.s, z7.h, z4.h[2]\n"
".inst 0x647540fc // bfdot z28.s, z7.h, z5.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x647040c9 // bfdot z9.s, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x647140cd // bfdot z13.s, z6.h, z1.h[2]\n"
".inst 0x647240d1 // bfdot z17.s, z6.h, z2.h[2]\n"
".inst 0x647340d5 // bfdot z21.s, z6.h, z3.h[2]\n"
".inst 0x647440d9 // bfdot z25.s, z6.h, z4.h[2]\n"
".inst 0x647540dd // bfdot z29.s, z6.h, z5.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
".inst 0x647040ea // bfdot z10.s, z7.h, z0.h[2]\n"
+ "addvl x10, x10, #4\n"
".inst 0x647140ee // bfdot z14.s, z7.h, z1.h[2]\n"
".inst 0x647240f2 // bfdot z18.s, z7.h, z2.h[2]\n"
".inst 0x647340f6 // bfdot z22.s, z7.h, z3.h[2]\n"
@@ -1982,15 +1985,15 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"cmp x28, x20\n"
"bne 71b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x21]\n"
"ld1rw { z0.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
@@ -2046,26 +2049,26 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x21]\n"
- "st1w { z29.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x22]\n"
+ "st1w { z29.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x22, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -2082,8 +2085,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp
index 223d8a78de..325499b7a3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 8, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 8, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp
index 74e2d267bc..64788ab092 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -103,10 +105,10 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -119,26 +121,26 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cbz x12, 3f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z16.s }, p4/Z, [x9]\n"
+ "ld1w { z19.s }, p4/Z, [x9]\n"
"ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "zip1 z8.d, z16.d, z12.d\n"
- "zip2 z12.d, z16.d, z12.d\n"
"ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "zip1 z8.d, z19.d, z12.d\n"
+ "zip2 z12.d, z19.d, z12.d\n"
"zip1 z9.d, z18.d, z13.d\n"
"zip2 z13.d, z18.d, z13.d\n"
"zip1 z10.d, z17.d, z14.d\n"
@@ -159,8 +161,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -176,87 +178,87 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z20.h }, p0/Z, [x26]\n"
- "trn1 z18.d, z20.d, z19.d\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z20.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "trn1 z19.d, z20.d, z18.d\n"
+ "trn2 z20.d, z20.d, z18.d\n"
+ ".inst 0x6471e668 // bfmmla z8.s, z19.h, z17.h\n"
+ "ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6470e66c // bfmmla z12.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6461e669 // bfmmla z9.s, z19.h, z1.h\n"
+ "ld1h { z18.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6470e66d // bfmmla z13.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
+ ".inst 0x6472e66a // bfmmla z10.s, z19.h, z18.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6470e66e // bfmmla z14.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
- ".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
- ".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
+ ".inst 0x6471e66b // bfmmla z11.s, z19.h, z17.h\n"
+ ".inst 0x6470e66f // bfmmla z15.s, z19.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x6471e688 // bfmmla z8.s, z20.h, z17.h\n"
- ".inst 0x6470e68c // bfmmla z12.s, z20.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x6470e68c // bfmmla z12.s, z20.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x6471e689 // bfmmla z9.s, z20.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x10, #-4, MUL VL]\n"
".inst 0x6470e68d // bfmmla z13.s, z20.h, z16.h\n"
- "ld1h { z16.h }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1h { z17.h }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x6470e68a // bfmmla z10.s, z20.h, z16.h\n"
- ".inst 0x6471e68e // bfmmla z14.s, z20.h, z17.h\n"
+ "ld1h { z16.h }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x6471e68a // bfmmla z10.s, z20.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x6470e68e // bfmmla z14.s, z20.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
".inst 0x6471e68b // bfmmla z11.s, z20.h, z17.h\n"
".inst 0x6470e68f // bfmmla z15.s, z20.h, z16.h\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "trn1 z18.d, z1.d, z19.d\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
+ "ld1rqh { z1.h }, p0/Z, [x26]\n"
+ "trn1 z18.d, z1.d, z19.d\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
- "addvl x10, x10, #8\n"
"ble 11f\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6471e428 // bfmmla z8.s, z1.h, z17.h\n"
- ".inst 0x6470e42c // bfmmla z12.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6471e429 // bfmmla z9.s, z1.h, z17.h\n"
- ".inst 0x6470e42d // bfmmla z13.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6471e42a // bfmmla z10.s, z1.h, z17.h\n"
- ".inst 0x6470e42e // bfmmla z14.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
- "ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6471e42b // bfmmla z11.s, z1.h, z17.h\n"
- ".inst 0x6470e42f // bfmmla z15.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6470e428 // bfmmla z8.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6471e42c // bfmmla z12.s, z1.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6470e429 // bfmmla z9.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6471e42d // bfmmla z13.s, z1.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x6470e42a // bfmmla z10.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6471e42e // bfmmla z14.s, z1.h, z17.h\n"
+ "ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6470e42b // bfmmla z11.s, z1.h, z16.h\n"
+ ".inst 0x6460e42f // bfmmla z15.s, z1.h, z0.h\n"
"11:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -267,14 +269,14 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
- "fmin z8.s, p5/M, z8.s, z21.s\n"
- "fmin z9.s, p5/M, z9.s, z21.s\n"
- "fmin z10.s, p5/M, z10.s, z21.s\n"
- "fmin z11.s, p5/M, z11.s, z21.s\n"
+ "fmin z8.s, p5/M, z8.s, z17.s\n"
+ "fmin z9.s, p5/M, z9.s, z17.s\n"
+ "fmin z10.s, p5/M, z10.s, z17.s\n"
+ "fmin z11.s, p5/M, z11.s, z17.s\n"
"fmax z8.s, p5/M, z8.s, z16.s\n"
"fmax z9.s, p5/M, z9.s, z16.s\n"
"fmax z10.s, p5/M, z10.s, z16.s\n"
@@ -291,10 +293,10 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -307,38 +309,38 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cbz x12, 16f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z18.s }, p4/Z, [x9]\n"
+ "ld1w { z16.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z17.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x20, x9, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
- "ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
- "zip1 z9.d, z18.d, z13.d\n"
- "zip2 z13.d, z18.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x20, #3, MUL VL]\n"
- "zip1 z10.d, z17.d, z14.d\n"
- "zip2 z14.d, z17.d, z14.d\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z8.d, z18.d, z12.d\n"
+ "zip2 z12.d, z18.d, z12.d\n"
+ "zip1 z9.d, z16.d, z13.d\n"
+ "zip2 z13.d, z16.d, z13.d\n"
+ "zip1 z10.d, z5.d, z14.d\n"
+ "zip2 z14.d, z5.d, z14.d\n"
+ "zip1 z11.d, z17.d, z15.d\n"
+ "zip2 z15.d, z17.d, z15.d\n"
"b 18f\n"
"17:" // Height 2: no accumulate
"mov z8.b, #0x0\n"
@@ -353,8 +355,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -373,90 +375,90 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z20.h }, p0/Z, [x26]\n"
- "ld1rqh { z19.h }, p0/Z, [x25]\n"
- "trn1 z18.d, z20.d, z19.d\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z19.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z16.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "trn1 z18.d, z19.d, z16.d\n"
+ "trn2 z19.d, z19.d, z16.d\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6474e64c // bfmmla z12.s, z18.h, z20.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x6471e688 // bfmmla z8.s, z20.h, z17.h\n"
- ".inst 0x6470e68c // bfmmla z12.s, z20.h, z16.h\n"
+ ".inst 0x6471e668 // bfmmla z8.s, z19.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x6470e66c // bfmmla z12.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x6471e689 // bfmmla z9.s, z20.h, z17.h\n"
- ".inst 0x6470e68d // bfmmla z13.s, z20.h, z16.h\n"
+ ".inst 0x6471e669 // bfmmla z9.s, z19.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x6470e66d // bfmmla z13.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x6471e68a // bfmmla z10.s, z20.h, z17.h\n"
- ".inst 0x6470e68e // bfmmla z14.s, z20.h, z16.h\n"
+ ".inst 0x6471e66a // bfmmla z10.s, z19.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x6470e66e // bfmmla z14.s, z19.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
- ".inst 0x6471e68b // bfmmla z11.s, z20.h, z17.h\n"
- ".inst 0x6470e68f // bfmmla z15.s, z20.h, z16.h\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x6471e66b // bfmmla z11.s, z19.h, z17.h\n"
+ ".inst 0x6470e66f // bfmmla z15.s, z19.h, z16.h\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
"ld1rqh { z19.h }, p0/Z, [x25]\n"
"trn1 z18.d, z1.d, z19.d\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
- "addvl x10, x10, #8\n"
"ble 24f\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6471e428 // bfmmla z8.s, z1.h, z17.h\n"
- ".inst 0x6470e42c // bfmmla z12.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6471e429 // bfmmla z9.s, z1.h, z17.h\n"
- ".inst 0x6470e42d // bfmmla z13.s, z1.h, z16.h\n"
- "ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
- "ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6471e42a // bfmmla z10.s, z1.h, z17.h\n"
- ".inst 0x6470e42e // bfmmla z14.s, z1.h, z16.h\n"
- "ld1h { z22.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "ld1h { z16.h }, p5/Z, [x10]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6470e428 // bfmmla z8.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6471e42c // bfmmla z12.s, z1.h, z17.h\n"
+ "ld1h { z17.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6470e429 // bfmmla z9.s, z1.h, z16.h\n"
+ "ld1h { z16.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6471e42d // bfmmla z13.s, z1.h, z17.h\n"
+ "ld1h { z26.h }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x6470e42a // bfmmla z10.s, z1.h, z16.h\n"
+ "ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x647ae42e // bfmmla z14.s, z1.h, z26.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6476e42b // bfmmla z11.s, z1.h, z22.h\n"
- ".inst 0x6470e42f // bfmmla z15.s, z1.h, z16.h\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6471e42b // bfmmla z11.s, z1.h, z17.h\n"
+ ".inst 0x6470e42f // bfmmla z15.s, z1.h, z16.h\n"
"24:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -465,17 +467,17 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x26, x9, x20, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z17.s\n"
"fmin z12.s, p5/M, z12.s, z17.s\n"
@@ -499,20 +501,20 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -525,15 +527,15 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cbz x12, 29f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -548,36 +550,36 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z26.s }, p4/Z, [x9]\n"
+ "ld1w { z25.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x21, x9, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x21, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x20]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "zip1 z8.d, z26.d, z12.d\n"
+ "zip2 z12.d, z26.d, z12.d\n"
+ "ld1w { z2.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z9.d, z25.d, z13.d\n"
+ "zip2 z13.d, z25.d, z13.d\n"
+ "zip1 z10.d, z24.d, z14.d\n"
+ "zip2 z14.d, z24.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
- "zip1 z19.d, z24.d, z23.d\n"
- "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z19.d, z2.d, z23.d\n"
+ "zip2 z23.d, z2.d, z23.d\n"
"b 31f\n"
"30:" // Height 3: no accumulate
"mov z8.b, #0x0\n"
@@ -600,8 +602,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -623,92 +625,92 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z30.h }, p0/Z, [x26]\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z30.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z29.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z24.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z28.h }, p0/Z, [x24]\n"
- "trn1 z27.d, z30.d, z24.d\n"
- "trn2 z30.d, z30.d, z24.d\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "trn1 z26.d, z28.d, z29.d\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z27.d, z29.d, z24.d\n"
+ "trn2 z29.d, z29.d, z24.d\n"
+ "trn1 z26.d, z28.d, z31.d\n"
+ "trn2 z28.d, z28.d, z31.d\n"
".inst 0x6479e768 // bfmmla z8.s, z27.h, z25.h\n"
+ ".inst 0x647ee76c // bfmmla z12.s, z27.h, z30.h\n"
".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e76c // bfmmla z12.s, z27.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647ee754 // bfmmla z20.s, z26.h, z30.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z29.d\n"
".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x8\n"
".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x8\n"
".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
- "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
+ "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z24.h }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x6479e7c8 // bfmmla z8.s, z30.h, z25.h\n"
+ ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
".inst 0x6479e790 // bfmmla z16.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6478e7cc // bfmmla z12.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
".inst 0x6478e794 // bfmmla z20.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x6479e7c9 // bfmmla z9.s, z30.h, z25.h\n"
+ ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
".inst 0x6479e791 // bfmmla z17.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x6478e7cd // bfmmla z13.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
".inst 0x6478e795 // bfmmla z21.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x6479e7ca // bfmmla z10.s, z30.h, z25.h\n"
+ ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
".inst 0x6479e792 // bfmmla z18.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x6478e7ce // bfmmla z14.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
".inst 0x6478e796 // bfmmla z22.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x6479e7cb // bfmmla z11.s, z30.h, z25.h\n"
+ ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
".inst 0x6479e793 // bfmmla z19.s, z28.h, z25.h\n"
- ".inst 0x6478e7cf // bfmmla z15.s, z30.h, z24.h\n"
+ ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
".inst 0x6478e797 // bfmmla z23.s, z28.h, z24.h\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
"ld1rqh { z24.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
"trn1 z27.d, z1.d, z24.d\n"
"trn2 z1.d, z1.d, z24.d\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "trn1 z26.d, z3.d, z28.d\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "trn1 z26.d, z3.d, z29.d\n"
".inst 0x6479e768 // bfmmla z8.s, z27.h, z25.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
+ "trn2 z3.d, z3.d, z29.d\n"
".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e76c // bfmmla z12.s, z27.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z28.d\n"
".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
@@ -725,9 +727,9 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6479e428 // bfmmla z8.s, z1.h, z25.h\n"
".inst 0x6479e470 // bfmmla z16.s, z3.h, z25.h\n"
+ "ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6478e42c // bfmmla z12.s, z1.h, z24.h\n"
".inst 0x6478e474 // bfmmla z20.s, z3.h, z24.h\n"
- "ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6479e429 // bfmmla z9.s, z1.h, z25.h\n"
".inst 0x6479e471 // bfmmla z17.s, z3.h, z25.h\n"
@@ -752,24 +754,24 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
"uzp1 z18.d, z18.d, z22.d\n"
"uzp1 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p5/Z, [x21]\n"
"ld1rw { z24.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z25.s\n"
"fmin z12.s, p5/M, z12.s, z25.s\n"
@@ -801,24 +803,24 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -831,15 +833,15 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cbz x12, 42f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -854,37 +856,37 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x22, x9, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x22, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x21]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x20]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -911,8 +913,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -937,114 +939,114 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z30.h }, p0/Z, [x26]\n"
- "ld1rqh { z24.h }, p0/Z, [x25]\n"
- "trn1 z29.d, z30.d, z24.d\n"
+ "ld1h { z31.h }, p5/Z, [x10]\n"
+ "ld1h { z30.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z29.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z25.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z28.h }, p0/Z, [x24]\n"
- "ld1rqh { z27.h }, p0/Z, [x23]\n"
- "trn2 z30.d, z30.d, z24.d\n"
- "trn1 z26.d, z28.d, z27.d\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
- ".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqh { z24.h }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z27.d, z29.d, z25.d\n"
+ "trn2 z29.d, z29.d, z25.d\n"
+ "trn1 z26.d, z28.d, z24.d\n"
+ "trn2 z28.d, z28.d, z24.d\n"
+ ".inst 0x647fe768 // bfmmla z8.s, z27.h, z31.h\n"
+ ".inst 0x647ee76c // bfmmla z12.s, z27.h, z30.h\n"
+ ".inst 0x647fe750 // bfmmla z16.s, z26.h, z31.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647ee754 // bfmmla z20.s, z26.h, z30.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z27.d\n"
- ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x8\n"
- ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
- "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
- ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
+ "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z24.h }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x6479e7c8 // bfmmla z8.s, z30.h, z25.h\n"
+ ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
".inst 0x6479e790 // bfmmla z16.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6478e7cc // bfmmla z12.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
".inst 0x6478e794 // bfmmla z20.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6479e7c9 // bfmmla z9.s, z30.h, z25.h\n"
+ ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
".inst 0x6479e791 // bfmmla z17.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x6478e7cd // bfmmla z13.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
".inst 0x6478e795 // bfmmla z21.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x6479e7ca // bfmmla z10.s, z30.h, z25.h\n"
+ ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
".inst 0x6479e792 // bfmmla z18.s, z28.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x6478e7ce // bfmmla z14.s, z30.h, z24.h\n"
+ ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
".inst 0x6478e796 // bfmmla z22.s, z28.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x6479e7cb // bfmmla z11.s, z30.h, z25.h\n"
+ ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
".inst 0x6479e793 // bfmmla z19.s, z28.h, z25.h\n"
- ".inst 0x6478e7cf // bfmmla z15.s, z30.h, z24.h\n"
+ ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
".inst 0x6478e797 // bfmmla z23.s, z28.h, z24.h\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z24.h }, p0/Z, [x25]\n"
- "trn1 z28.d, z1.d, z24.d\n"
+ "ld1rqh { z25.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z27.h }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z24.d\n"
- "trn1 z26.d, z3.d, z27.d\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6479e788 // bfmmla z8.s, z28.h, z25.h\n"
- ".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e78c // bfmmla z12.s, z28.h, z24.h\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
+ "ld1rqh { z24.h }, p0/Z, [x23]\n"
+ "trn1 z27.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ "trn1 z26.d, z3.d, z24.d\n"
+ ".inst 0x647de768 // bfmmla z8.s, z27.h, z29.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
+ "trn2 z3.d, z3.d, z24.d\n"
+ ".inst 0x647de750 // bfmmla z16.s, z26.h, z29.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6479e789 // bfmmla z9.s, z28.h, z25.h\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- ".inst 0x6478e78d // bfmmla z13.s, z28.h, z24.h\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z27.d\n"
- ".inst 0x6479e78a // bfmmla z10.s, z28.h, z25.h\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6478e78e // bfmmla z14.s, z28.h, z24.h\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
- ".inst 0x6479e78b // bfmmla z11.s, z28.h, z25.h\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
- ".inst 0x6478e78f // bfmmla z15.s, z28.h, z24.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6479e428 // bfmmla z8.s, z1.h, z25.h\n"
".inst 0x6479e470 // bfmmla z16.s, z3.h, z25.h\n"
+ "ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x6478e42c // bfmmla z12.s, z1.h, z24.h\n"
".inst 0x6478e474 // bfmmla z20.s, z3.h, z24.h\n"
- "ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6479e429 // bfmmla z9.s, z1.h, z25.h\n"
".inst 0x6479e471 // bfmmla z17.s, z3.h, z25.h\n"
@@ -1069,17 +1071,17 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
"uzp1 z20.d, z17.d, z21.d\n"
@@ -1089,9 +1091,9 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z24.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z24.s }, p5/Z, [x21]\n"
"ld1rw { z23.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z24.s\n"
"fmin z12.s, p5/M, z12.s, z24.s\n"
@@ -1131,28 +1133,28 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1165,15 +1167,15 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cbz x12, 55f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1196,46 +1198,46 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x23, x9, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x22]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x21]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z17.d, z18.d, z21.d\n"
- "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x20]\n"
- "zip1 z18.d, z19.d, z22.d\n"
- "zip2 z22.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1274,8 +1276,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1303,102 +1305,103 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z1.h }, p5/Z, [x10]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z6.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z3.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z7.h }, p0/Z, [x24]\n"
"ld1rqh { z2.h }, p0/Z, [x23]\n"
- "trn1 z5.d, z6.d, z1.d\n"
- "trn2 z6.d, z6.d, z1.d\n"
- "ld1rqh { z4.h }, p0/Z, [x22]\n"
+ "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
"trn1 z3.d, z7.d, z2.d\n"
"trn2 z7.d, z7.d, z2.d\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
- "trn1 z2.d, z4.d, z0.d\n"
- "trn2 z4.d, z4.d, z0.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6461e4a8 // bfmmla z8.s, z5.h, z1.h\n"
+ ".inst 0x6461e488 // bfmmla z8.s, z4.h, z1.h\n"
".inst 0x6461e470 // bfmmla z16.s, z3.h, z1.h\n"
".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- ".inst 0x6460e4ac // bfmmla z12.s, z5.h, z0.h\n"
+ ".inst 0x6460e48c // bfmmla z12.s, z4.h, z0.h\n"
".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x6461e489 // bfmmla z9.s, z4.h, z1.h\n"
".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6460e4ad // bfmmla z13.s, z5.h, z0.h\n"
+ ".inst 0x6460e48d // bfmmla z13.s, z4.h, z0.h\n"
".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6461e4aa // bfmmla z10.s, z5.h, z1.h\n"
+ ".inst 0x6461e48a // bfmmla z10.s, z4.h, z1.h\n"
".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6460e4ae // bfmmla z14.s, z5.h, z0.h\n"
+ ".inst 0x6460e48e // bfmmla z14.s, z4.h, z0.h\n"
".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
+ ".inst 0x6461e48b // bfmmla z11.s, z4.h, z1.h\n"
".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
- "ld1h { z1.h }, p5/Z, [x10, #-8, MUL VL]\n"
- ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6460e48f // bfmmla z15.s, z4.h, z0.h\n"
".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
+ "ld1h { z1.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z0.h }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
".inst 0x6461e4f0 // bfmmla z16.s, z7.h, z1.h\n"
- ".inst 0x6461e498 // bfmmla z24.s, z4.h, z1.h\n"
+ ".inst 0x6461e4b8 // bfmmla z24.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #-6, MUL VL]\n"
".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
".inst 0x6460e4f4 // bfmmla z20.s, z7.h, z0.h\n"
- ".inst 0x6460e49c // bfmmla z28.s, z4.h, z0.h\n"
+ ".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
".inst 0x6461e4f1 // bfmmla z17.s, z7.h, z1.h\n"
- ".inst 0x6461e499 // bfmmla z25.s, z4.h, z1.h\n"
+ ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #-4, MUL VL]\n"
".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
".inst 0x6460e4f5 // bfmmla z21.s, z7.h, z0.h\n"
- ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
+ ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
".inst 0x6461e4f2 // bfmmla z18.s, z7.h, z1.h\n"
- ".inst 0x6461e49a // bfmmla z26.s, z4.h, z1.h\n"
+ ".inst 0x6461e4ba // bfmmla z26.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #-2, MUL VL]\n"
".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
".inst 0x6460e4f6 // bfmmla z22.s, z7.h, z0.h\n"
- ".inst 0x6460e49e // bfmmla z30.s, z4.h, z0.h\n"
+ ".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #-1, MUL VL]\n"
".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
".inst 0x6461e4f3 // bfmmla z19.s, z7.h, z1.h\n"
- ".inst 0x6461e49b // bfmmla z27.s, z4.h, z1.h\n"
+ ".inst 0x6461e4bb // bfmmla z27.s, z5.h, z1.h\n"
".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
".inst 0x6460e4f7 // bfmmla z23.s, z7.h, z0.h\n"
- ".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
+ ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z2.h }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z4.h }, p0/Z, [x25]\n"
+ "ld1rqh { z6.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z2.h }, p0/Z, [x23]\n"
- "trn1 z7.d, z1.d, z4.d\n"
- "trn2 z1.d, z1.d, z4.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x23]\n"
"ld1rqh { z5.h }, p0/Z, [x22]\n"
- "trn1 z6.d, z3.d, z2.d\n"
- "trn2 z3.d, z3.d, z2.d\n"
- "ld1h { z2.h }, p5/Z, [x10]\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
@@ -1406,7 +1409,6 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6462e4d0 // bfmmla z16.s, z6.h, z2.h\n"
".inst 0x6462e498 // bfmmla z24.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
".inst 0x6460e4d4 // bfmmla z20.s, z6.h, z0.h\n"
".inst 0x6460e49c // bfmmla z28.s, z4.h, z0.h\n"
@@ -1427,8 +1429,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6460e4d6 // bfmmla z22.s, z6.h, z0.h\n"
".inst 0x6460e49e // bfmmla z30.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
".inst 0x6462e4d3 // bfmmla z19.s, z6.h, z2.h\n"
".inst 0x6462e49b // bfmmla z27.s, z4.h, z2.h\n"
".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
@@ -1440,24 +1442,24 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6462e428 // bfmmla z8.s, z1.h, z2.h\n"
".inst 0x6462e470 // bfmmla z16.s, z3.h, z2.h\n"
".inst 0x6462e4b8 // bfmmla z24.s, z5.h, z2.h\n"
- ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6462e429 // bfmmla z9.s, z1.h, z2.h\n"
".inst 0x6462e471 // bfmmla z17.s, z3.h, z2.h\n"
".inst 0x6462e4b9 // bfmmla z25.s, z5.h, z2.h\n"
- ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6462e42a // bfmmla z10.s, z1.h, z2.h\n"
".inst 0x6462e472 // bfmmla z18.s, z3.h, z2.h\n"
".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1474,20 +1476,20 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 58b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1499,9 +1501,9 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z26.d, z26.d, z30.d\n"
"uzp1 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x21]\n"
"ld1rw { z23.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z0.s\n"
"fmin z12.s, p5/M, z12.s, z0.s\n"
@@ -1549,22 +1551,22 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1572,12 +1574,13 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"b 80f\n"
"66:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1590,15 +1593,15 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cbz x12, 68f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1621,54 +1624,54 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "ld1w { z17.s }, p4/Z, [x9]\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
- "zip1 z8.d, z17.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "zip2 z12.d, z17.d, z12.d\n"
- "zip1 z9.d, z18.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x23]\n"
- "zip2 z13.d, z18.d, z13.d\n"
- "zip1 z10.d, z20.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip2 z14.d, z20.d, z14.d\n"
- "zip1 z11.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x22]\n"
- "zip2 z15.d, z16.d, z15.d\n"
- "zip1 z16.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip2 z20.d, z17.d, z20.d\n"
- "zip1 z17.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x21]\n"
- "zip2 z21.d, z18.d, z21.d\n"
- "zip1 z18.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip2 z22.d, z19.d, z22.d\n"
- "zip1 z19.d, z24.d, z23.d\n"
"ld1w { z0.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z28.s }, p4/Z, [x20]\n"
- "zip2 z23.d, z24.d, z23.d\n"
- "zip1 z24.d, z25.d, z28.d\n"
"ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1704,8 +1707,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1736,113 +1739,113 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z7.h }, p0/Z, [x26]\n"
- "ld1rqh { z0.h }, p0/Z, [x25]\n"
- "trn1 z6.d, z7.d, z0.d\n"
- "ld1rqh { z5.h }, p0/Z, [x24]\n"
- "ld1rqh { z1.h }, p0/Z, [x23]\n"
- "trn2 z7.d, z7.d, z0.d\n"
- "trn1 z4.d, z5.d, z1.d\n"
- "ld1rqh { z3.h }, p0/Z, [x22]\n"
- "ld1rqh { z0.h }, p0/Z, [x21]\n"
- "trn2 z5.d, z5.d, z1.d\n"
- "trn1 z2.d, z3.d, z0.d\n"
- "trn2 z3.d, z3.d, z0.d\n"
"ld1h { z1.h }, p5/Z, [x10]\n"
- "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
- ".inst 0x6461e490 // bfmmla z16.s, z4.h, z1.h\n"
- ".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
- "ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
"sub x27, x27, #0x8\n"
- ".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
- ".inst 0x6460e494 // bfmmla z20.s, z4.h, z0.h\n"
"cmp x27, #0x8\n"
+ "ld1rqh { z6.h }, p0/Z, [x26]\n"
"add x26, x26, #0x10\n"
- ".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
- "ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
+ "ld1rqh { z3.h }, p0/Z, [x25]\n"
"add x25, x25, #0x10\n"
- ".inst 0x6461e491 // bfmmla z17.s, z4.h, z1.h\n"
- ".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
- "ld1h { z1.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1rqh { z7.h }, p0/Z, [x24]\n"
"add x24, x24, #0x10\n"
- ".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
- ".inst 0x6460e495 // bfmmla z21.s, z4.h, z0.h\n"
+ "ld1rqh { z2.h }, p0/Z, [x23]\n"
+ "ld1rqh { z5.h }, p0/Z, [x22]\n"
+ "ld1rqh { z0.h }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z3.d, z7.d, z2.d\n"
+ "trn2 z7.d, z7.d, z2.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x6461e488 // bfmmla z8.s, z4.h, z1.h\n"
+ ".inst 0x6461e470 // bfmmla z16.s, z3.h, z1.h\n"
+ ".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
+ "ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6460e48c // bfmmla z12.s, z4.h, z0.h\n"
+ ".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
+ ".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
+ "ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6461e489 // bfmmla z9.s, z4.h, z1.h\n"
+ ".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
+ ".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
+ "ld1h { z1.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6460e48d // bfmmla z13.s, z4.h, z0.h\n"
+ ".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
- "add x21, x21, #0x10\n"
- ".inst 0x6461e492 // bfmmla z18.s, z4.h, z1.h\n"
+ ".inst 0x6461e48a // bfmmla z10.s, z4.h, z1.h\n"
+ ".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
- ".inst 0x6460e496 // bfmmla z22.s, z4.h, z0.h\n"
+ ".inst 0x6460e48e // bfmmla z14.s, z4.h, z0.h\n"
+ ".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
- ".inst 0x6461e493 // bfmmla z19.s, z4.h, z1.h\n"
+ ".inst 0x6461e48b // bfmmla z11.s, z4.h, z1.h\n"
+ ".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
- "ld1h { z1.h }, p5/Z, [x10, #-8, MUL VL]\n"
- ".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
- ".inst 0x6460e497 // bfmmla z23.s, z4.h, z0.h\n"
+ ".inst 0x6460e48f // bfmmla z15.s, z4.h, z0.h\n"
+ ".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
+ "ld1h { z1.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z0.h }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
- ".inst 0x6461e4b0 // bfmmla z16.s, z5.h, z1.h\n"
- ".inst 0x6461e478 // bfmmla z24.s, z3.h, z1.h\n"
+ ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f0 // bfmmla z16.s, z7.h, z1.h\n"
+ ".inst 0x6461e4b8 // bfmmla z24.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #-6, MUL VL]\n"
- ".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
- ".inst 0x6460e4b4 // bfmmla z20.s, z5.h, z0.h\n"
- ".inst 0x6460e47c // bfmmla z28.s, z3.h, z0.h\n"
+ ".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f4 // bfmmla z20.s, z7.h, z0.h\n"
+ ".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
- ".inst 0x6461e4b1 // bfmmla z17.s, z5.h, z1.h\n"
- ".inst 0x6461e479 // bfmmla z25.s, z3.h, z1.h\n"
+ ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f1 // bfmmla z17.s, z7.h, z1.h\n"
+ ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x6460e4ed // bfmmla z13.s, z7.h, z0.h\n"
- ".inst 0x6460e4b5 // bfmmla z21.s, z5.h, z0.h\n"
- ".inst 0x6460e47d // bfmmla z29.s, z3.h, z0.h\n"
+ ".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f5 // bfmmla z21.s, z7.h, z0.h\n"
+ ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x6461e4ea // bfmmla z10.s, z7.h, z1.h\n"
- ".inst 0x6461e4b2 // bfmmla z18.s, z5.h, z1.h\n"
- ".inst 0x6461e47a // bfmmla z26.s, z3.h, z1.h\n"
+ ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f2 // bfmmla z18.s, z7.h, z1.h\n"
+ ".inst 0x6461e4ba // bfmmla z26.s, z5.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x6460e4ee // bfmmla z14.s, z7.h, z0.h\n"
- ".inst 0x6460e4b6 // bfmmla z22.s, z5.h, z0.h\n"
- ".inst 0x6460e47e // bfmmla z30.s, z3.h, z0.h\n"
+ ".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f6 // bfmmla z22.s, z7.h, z0.h\n"
+ ".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
- ".inst 0x6461e4b3 // bfmmla z19.s, z5.h, z1.h\n"
- ".inst 0x6461e47b // bfmmla z27.s, z3.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
- ".inst 0x6460e4b7 // bfmmla z23.s, z5.h, z0.h\n"
- ".inst 0x6460e47f // bfmmla z31.s, z3.h, z0.h\n"
+ ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
+ ".inst 0x6461e4f3 // bfmmla z19.s, z7.h, z1.h\n"
+ ".inst 0x6461e4bb // bfmmla z27.s, z5.h, z1.h\n"
+ ".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
+ ".inst 0x6460e4f7 // bfmmla z23.s, z7.h, z0.h\n"
+ ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z2.h }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z0.h }, p0/Z, [x25]\n"
- "trn1 z7.d, z1.d, z0.d\n"
+ "ld1rqh { z6.h }, p0/Z, [x25]\n"
"ld1rqh { z3.h }, p0/Z, [x24]\n"
- "ld1rqh { z2.h }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z0.d\n"
- "trn1 z6.d, z3.d, z2.d\n"
+ "ld1rqh { z4.h }, p0/Z, [x23]\n"
"ld1rqh { z5.h }, p0/Z, [x22]\n"
"ld1rqh { z0.h }, p0/Z, [x21]\n"
- "trn2 z3.d, z3.d, z2.d\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
- "ld1h { z2.h }, p5/Z, [x10]\n"
"ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6462e4e8 // bfmmla z8.s, z7.h, z2.h\n"
".inst 0x6462e4d0 // bfmmla z16.s, z6.h, z2.h\n"
".inst 0x6462e498 // bfmmla z24.s, z4.h, z2.h\n"
"ld1h { z2.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
".inst 0x6460e4d4 // bfmmla z20.s, z6.h, z0.h\n"
".inst 0x6460e49c // bfmmla z28.s, z4.h, z0.h\n"
@@ -1863,8 +1866,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6460e4d6 // bfmmla z22.s, z6.h, z0.h\n"
".inst 0x6460e49e // bfmmla z30.s, z4.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6462e4eb // bfmmla z11.s, z7.h, z2.h\n"
".inst 0x6462e4d3 // bfmmla z19.s, z6.h, z2.h\n"
".inst 0x6462e49b // bfmmla z27.s, z4.h, z2.h\n"
".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
@@ -1876,24 +1879,24 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
".inst 0x6462e428 // bfmmla z8.s, z1.h, z2.h\n"
".inst 0x6462e470 // bfmmla z16.s, z3.h, z2.h\n"
".inst 0x6462e4b8 // bfmmla z24.s, z5.h, z2.h\n"
- ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6460e42c // bfmmla z12.s, z1.h, z0.h\n"
".inst 0x6460e474 // bfmmla z20.s, z3.h, z0.h\n"
".inst 0x6460e4bc // bfmmla z28.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6462e429 // bfmmla z9.s, z1.h, z2.h\n"
".inst 0x6462e471 // bfmmla z17.s, z3.h, z2.h\n"
".inst 0x6462e4b9 // bfmmla z25.s, z5.h, z2.h\n"
- ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6460e42d // bfmmla z13.s, z1.h, z0.h\n"
".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6462e42a // bfmmla z10.s, z1.h, z2.h\n"
".inst 0x6462e472 // bfmmla z18.s, z3.h, z2.h\n"
".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
"ld1h { z2.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6460e42e // bfmmla z14.s, z1.h, z0.h\n"
".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e4be // bfmmla z30.s, z5.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1910,21 +1913,21 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 71b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z7.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1940,9 +1943,9 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"uzp1 z30.d, z27.d, z31.d\n"
"uzp2 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x21]\n"
"ld1rw { z0.s }, p5/Z, [x20]\n"
"fmin z7.s, p5/M, z7.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
@@ -1998,26 +2001,26 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x22]\n"
- "st1w { z28.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x23]\n"
+ "st1w { z28.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x22]\n"
+ "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -2034,8 +2037,8 @@ void sve_hybrid_bf16fp32_mmla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp
index b930e4c0d5..ec754b6435 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 1> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
index d1a9bb4a26..93462461ad 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const __fp16 *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -102,10 +104,10 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 25f\n"
"beq 13f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p3.h, x20, x11\n"
@@ -138,8 +140,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -158,14 +160,14 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop
"fmla z8.h, p4/M, z6.h, z0.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z17.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z16.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x26, x26, #0x2\n"
+ "subs x27, x27, #0x1\n"
"fmla z10.h, p4/M, z17.h, z0.h\n"
"fmla z11.h, p4/M, z16.h, z0.h\n"
- "subs x27, x27, #0x1\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
@@ -173,19 +175,19 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"10:" // Height 1: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.h, p4/M, z6.h, z0.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z17.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z16.h }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"fmla z10.h, p4/M, z17.h, z0.h\n"
"fmla z11.h, p4/M, z16.h, z0.h\n"
- "addvl x10, x10, #4\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p4/Z, [x21]\n"
"ld1rh { z16.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z17.h\n"
"fmin z9.h, p4/M, z9.h, z17.h\n"
@@ -207,10 +209,10 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"bgt 2b\n"
"b 74f\n"
"13:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"14:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p3.h, x20, x11\n"
@@ -223,22 +225,22 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cbz x12, 15f\n"
"ld1h { z8.h }, p4/Z, [x12]\n"
"ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 17f\n"
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x20]\n"
"ld1h { z13.h }, p2/Z, [x20, #1, MUL VL]\n"
"ld1h { z14.h }, p1/Z, [x20, #2, MUL VL]\n"
@@ -257,8 +259,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -293,8 +295,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z10.h, p4/M, z17.h, z0.h\n"
"fmla z14.h, p4/M, z17.h, z1.h\n"
"fmla z11.h, p4/M, z16.h, z0.h\n"
- "fmla z15.h, p4/M, z16.h, z1.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
+ "fmla z15.h, p4/M, z16.h, z1.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
@@ -308,19 +310,19 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z13.h, p4/M, z7.h, z1.h\n"
"ld1h { z16.h }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"fmla z10.h, p4/M, z17.h, z0.h\n"
"fmla z14.h, p4/M, z17.h, z1.h\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, p4/M, z16.h, z0.h\n"
"fmla z15.h, p4/M, z16.h, z1.h\n"
"bne 18b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p4/Z, [x21]\n"
"ld1rh { z16.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z17.h\n"
"fmin z9.h, p4/M, z9.h, z17.h\n"
@@ -344,20 +346,20 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
"24:" // Height 2: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 14b\n"
"b 74f\n"
"25:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"26:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p3.h, x20, x11\n"
@@ -370,27 +372,27 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cbz x12, 27f\n"
"ld1h { z8.h }, p4/Z, [x12]\n"
"ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 29f\n"
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x21]\n"
"ld1h { z13.h }, p2/Z, [x21, #1, MUL VL]\n"
"ld1h { z14.h }, p1/Z, [x21, #2, MUL VL]\n"
@@ -417,8 +419,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -449,8 +451,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x26, x26, #0x2\n"
"subs x27, x27, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z21.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"add x25, x25, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
@@ -462,11 +464,11 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z18.h, p4/M, z21.h, z2.h\n"
"fmla z11.h, p4/M, z20.h, z0.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
"fmla z15.h, p4/M, z20.h, z1.h\n"
- "fmla z19.h, p4/M, z20.h, z2.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
+ "fmla z19.h, p4/M, z20.h, z2.h\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
+ "ld1h { z6.h }, p4/Z, [x10]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Main loop skip
@@ -475,13 +477,13 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z12.h, p4/M, z6.h, z1.h\n"
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z21.h }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"ld1h { z20.h }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"fmla z10.h, p4/M, z21.h, z0.h\n"
"fmla z14.h, p4/M, z21.h, z1.h\n"
"fmla z18.h, p4/M, z21.h, z2.h\n"
@@ -490,12 +492,12 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z19.h, p4/M, z20.h, z2.h\n"
"bne 30b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"tbz %x[flags], #1, 35f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z21.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z21.h }, p4/Z, [x21]\n"
"ld1rh { z20.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z21.h\n"
"fmin z9.h, p4/M, z9.h, z21.h\n"
@@ -527,24 +529,24 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
"36:" // Height 3: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 26b\n"
"b 74f\n"
"37:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"38:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p3.h, x20, x11\n"
@@ -557,18 +559,18 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cbz x12, 39f\n"
"ld1h { z8.h }, p4/Z, [x12]\n"
"ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -576,13 +578,13 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x22]\n"
"ld1h { z13.h }, p2/Z, [x22, #1, MUL VL]\n"
"ld1h { z14.h }, p1/Z, [x22, #2, MUL VL]\n"
@@ -617,8 +619,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"42:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 43f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -668,7 +670,6 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z14.h, p4/M, z25.h, z1.h\n"
"fmla z18.h, p4/M, z25.h, z2.h\n"
"fmla z22.h, p4/M, z25.h, z3.h\n"
- "ld1h { z6.h }, p4/Z, [x10]\n"
"fmla z11.h, p4/M, z24.h, z0.h\n"
"fmla z15.h, p4/M, z24.h, z1.h\n"
"ld1rh { z0.h }, p4/Z, [x26]\n"
@@ -677,6 +678,7 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z23.h, p4/M, z24.h, z3.h\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
"ld1rh { z3.h }, p4/Z, [x23]\n"
+ "ld1h { z6.h }, p4/Z, [x10]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 45b\n"
"46:" // Height 4: Multiply loop: Main loop skip
@@ -687,15 +689,15 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
"ld1h { z25.h }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"ld1h { z24.h }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"fmla z10.h, p4/M, z25.h, z0.h\n"
"fmla z14.h, p4/M, z25.h, z1.h\n"
+ "addvl x10, x10, #4\n"
"fmla z18.h, p4/M, z25.h, z2.h\n"
"fmla z22.h, p4/M, z25.h, z3.h\n"
"fmla z11.h, p4/M, z24.h, z0.h\n"
@@ -704,13 +706,13 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z23.h, p4/M, z24.h, z3.h\n"
"bne 42b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"tbz %x[flags], #1, 47f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z25.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z25.h }, p4/Z, [x21]\n"
"ld1rh { z24.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z25.h\n"
"fmin z9.h, p4/M, z9.h, z25.h\n"
@@ -750,28 +752,28 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x24]\n"
+ "st1h { z21.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x24, #3, MUL VL]\n"
"48:" // Height 4: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 38b\n"
"b 74f\n"
"49:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"50:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p3.h, x20, x11\n"
@@ -784,18 +786,18 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cbz x12, 51f\n"
"ld1h { z8.h }, p4/Z, [x12]\n"
"ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -807,16 +809,16 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x23]\n"
"ld1h { z13.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p1/Z, [x23, #2, MUL VL]\n"
"ld1h { z15.h }, p0/Z, [x23, #3, MUL VL]\n"
"ld1h { z16.h }, p3/Z, [x22]\n"
@@ -857,8 +859,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"54:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 55f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -901,8 +903,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x25, x25, #0x2\n"
"add x24, x24, #0x2\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z29.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"add x23, x23, #0x2\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
@@ -920,12 +922,12 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"ld1rh { z0.h }, p4/Z, [x26]\n"
"ld1h { z6.h }, p4/Z, [x10]\n"
"fmla z15.h, p4/M, z28.h, z1.h\n"
- "fmla z19.h, p4/M, z28.h, z2.h\n"
"ld1rh { z1.h }, p4/Z, [x25]\n"
+ "fmla z19.h, p4/M, z28.h, z2.h\n"
"ld1rh { z2.h }, p4/Z, [x24]\n"
"fmla z23.h, p4/M, z28.h, z3.h\n"
- "fmla z27.h, p4/M, z28.h, z4.h\n"
"ld1rh { z3.h }, p4/Z, [x23]\n"
+ "fmla z27.h, p4/M, z28.h, z4.h\n"
"ld1rh { z4.h }, p4/Z, [x22]\n"
"ld1h { z7.h }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 57b\n"
@@ -936,12 +938,12 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
- "fmla z9.h, p4/M, z7.h, z0.h\n"
"ld1h { z29.h }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
+ "cmp x28, x20\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
"ld1h { z28.h }, p4/Z, [x10, #3, MUL VL]\n"
@@ -958,14 +960,14 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z27.h, p4/M, z28.h, z4.h\n"
"bne 54b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z29.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z29.h }, p4/Z, [x21]\n"
"ld1rh { z28.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z29.h\n"
"fmin z9.h, p4/M, z9.h, z29.h\n"
@@ -1013,22 +1015,22 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x24]\n"
+ "st1h { z21.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x23]\n"
+ "st1h { z25.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x23, #3, MUL VL]\n"
"60:" // Height 5: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1036,12 +1038,13 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"b 74f\n"
"61:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0xc\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"62:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p3.h, x20, x11\n"
@@ -1054,18 +1057,18 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"cbz x12, 63f\n"
"ld1h { z8.h }, p4/Z, [x12]\n"
"ld1h { z9.h }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p4/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1081,17 +1084,17 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"ld1h { z8.h }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z9.h }, p2/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p1/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z11.h }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #1\n"
+ "add x23, x24, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
"ld1h { z12.h }, p3/Z, [x24]\n"
"ld1h { z13.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p1/Z, [x24, #2, MUL VL]\n"
"ld1h { z15.h }, p0/Z, [x24, #3, MUL VL]\n"
"ld1h { z16.h }, p3/Z, [x23]\n"
@@ -1140,8 +1143,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"66:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 67f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1229,12 +1232,12 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.h, p4/M, z6.h, z2.h\n"
"fmla z20.h, p4/M, z6.h, z3.h\n"
- "cmp x28, x20\n"
"fmla z24.h, p4/M, z6.h, z4.h\n"
"fmla z28.h, p4/M, z6.h, z5.h\n"
"ld1h { z6.h }, p4/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, p4/M, z7.h, z0.h\n"
"fmla z13.h, p4/M, z7.h, z1.h\n"
+ "cmp x28, x20\n"
"fmla z17.h, p4/M, z7.h, z2.h\n"
"fmla z21.h, p4/M, z7.h, z3.h\n"
"fmla z25.h, p4/M, z7.h, z4.h\n"
@@ -1255,15 +1258,15 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"fmla z31.h, p4/M, z7.h, z5.h\n"
"bne 66b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"tbz %x[flags], #1, 71f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p4/Z, [x21]\n"
"ld1rh { z0.h }, p4/Z, [x20]\n"
"fmin z8.h, p4/M, z8.h, z1.h\n"
"fmin z9.h, p4/M, z9.h, z1.h\n"
@@ -1319,26 +1322,26 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"st1h { z10.h }, p1, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p3, [x25]\n"
- "st1h { z13.h }, p2, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p1, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p0, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p3, [x24]\n"
- "st1h { z17.h }, p2, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p1, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p0, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p3, [x23]\n"
- "st1h { z21.h }, p2, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p1, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p0, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x22]\n"
- "st1h { z25.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p0, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p3, [x21]\n"
- "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p1, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p0, [x21, #3, MUL VL]\n"
+ "st1h { z12.h }, p3, [x26]\n"
+ "st1h { z13.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p1, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p0, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p3, [x25]\n"
+ "st1h { z17.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p1, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p0, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p3, [x24]\n"
+ "st1h { z21.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p1, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p0, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p3, [x23]\n"
+ "st1h { z25.h }, p2, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p0, [x23, #3, MUL VL]\n"
+ "st1h { z28.h }, p3, [x22]\n"
+ "st1h { z29.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z30.h }, p1, [x22, #2, MUL VL]\n"
+ "st1h { z31.h }, p0, [x22, #3, MUL VL]\n"
"72:" // Height 6: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1355,8 +1358,8 @@ void sve_hybrid_fp16_mla_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"74:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp
index 041825df6b..231472bcd0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void sve_hybrid_fp16_mla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const __fp16 *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void sve_hybrid_fp16_mla_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -102,10 +104,10 @@ void sve_hybrid_fp16_mla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.h, x20, x11\n"
@@ -138,8 +140,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -155,53 +157,56 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z16.h }, p5/Z, [x10]\n"
- "fmla z8.h, z16.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "fmla z8.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[0]\n"
- "ld1h { z16.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z10.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z11.h, z16.h, z0.h[0]\n"
- "ld1h { z16.h }, p5/Z, [x10, #4, MUL VL]\n"
- "fmla z8.h, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z8.h, z17.h, z0.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[1]\n"
- "ld1h { z16.h }, p5/Z, [x10, #6, MUL VL]\n"
- "fmla z10.h, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
+ "fmla z10.h, z17.h, z0.h[1]\n"
"fmla z11.h, z16.h, z0.h[1]\n"
"ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[2]\n"
- "fmla z9.h, z16.h, z0.h[2]\n"
"ld1h { z17.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z17.h, z0.h[2]\n"
- "fmla z11.h, z16.h, z0.h[2]\n"
"ld1h { z17.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[3]\n"
- "fmla z9.h, z16.h, z0.h[3]\n"
"ld1h { z17.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[3]\n"
"ld1h { z16.h }, p5/Z, [x10, #-1, MUL VL]\n"
"fmla z10.h, z17.h, z0.h[3]\n"
- "fmla z11.h, z16.h, z0.h[3]\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
+ "fmla z11.h, z16.h, z0.h[3]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[4]\n"
- "fmla z9.h, z16.h, z0.h[4]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[4]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.h, z17.h, z0.h[4]\n"
- "fmla z11.h, z16.h, z0.h[4]\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z16.h, z0.h[4]\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[5]\n"
- "fmla z9.h, z16.h, z0.h[5]\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[5]\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
"fmla z10.h, z17.h, z0.h[5]\n"
@@ -209,121 +214,118 @@ void sve_hybrid_fp16_mla_6x4VL (
"ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[6]\n"
- "fmla z9.h, z16.h, z0.h[6]\n"
"ld1h { z17.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[6]\n"
"ld1h { z16.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z17.h, z0.h[6]\n"
- "fmla z11.h, z16.h, z0.h[6]\n"
"ld1h { z17.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z16.h, z0.h[6]\n"
"ld1h { z16.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[7]\n"
- "fmla z9.h, z16.h, z0.h[7]\n"
"ld1h { z17.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[7]\n"
"ld1h { z16.h }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x8\n"
- "cmp x27, #0x8\n"
"fmla z10.h, z17.h, z0.h[7]\n"
"fmla z11.h, z16.h, z0.h[7]\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1h { z16.h }, p5/Z, [x10]\n"
- "fmla z8.h, z16.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z9.h, z16.h, z0.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"subs x27, x27, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "fmla z8.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[0]\n"
"fmla z11.h, z16.h, z0.h[0]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[1]\n"
- "fmla z9.h, z16.h, z0.h[1]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[1]\n"
"fmla z11.h, z16.h, z0.h[1]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[2]\n"
- "fmla z9.h, z16.h, z0.h[2]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[2]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[2]\n"
"fmla z11.h, z16.h, z0.h[2]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[3]\n"
- "fmla z9.h, z16.h, z0.h[3]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[3]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[3]\n"
"fmla z11.h, z16.h, z0.h[3]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[4]\n"
- "fmla z9.h, z16.h, z0.h[4]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[4]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[4]\n"
"fmla z11.h, z16.h, z0.h[4]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[5]\n"
- "fmla z9.h, z16.h, z0.h[5]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[5]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[5]\n"
"fmla z11.h, z16.h, z0.h[5]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[6]\n"
- "fmla z9.h, z16.h, z0.h[6]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[6]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[6]\n"
"fmla z11.h, z16.h, z0.h[6]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[7]\n"
- "fmla z9.h, z16.h, z0.h[7]\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z16.h, z0.h[7]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[7]\n"
"fmla z11.h, z16.h, z0.h[7]\n"
- "addvl x10, x10, #4\n"
"11:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p5/Z, [x21]\n"
"ld1rh { z16.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z17.h\n"
"fmin z9.h, p5/M, z9.h, z17.h\n"
@@ -345,10 +347,10 @@ void sve_hybrid_fp16_mla_6x4VL (
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.h, x20, x11\n"
@@ -361,22 +363,22 @@ void sve_hybrid_fp16_mla_6x4VL (
"cbz x12, 16f\n"
"ld1h { z8.h }, p5/Z, [x12]\n"
"ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x9]\n"
"ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x20]\n"
"ld1h { z13.h }, p3/Z, [x20, #1, MUL VL]\n"
"ld1h { z14.h }, p2/Z, [x20, #2, MUL VL]\n"
@@ -395,8 +397,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -415,38 +417,38 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z1.h }, p0/Z, [x26]\n"
- "ld1rqh { z0.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
+ "ld1rqh { z1.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqh { z0.h }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"fmla z8.h, z17.h, z1.h[0]\n"
"fmla z12.h, z17.h, z0.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z1.h[0]\n"
"fmla z13.h, z16.h, z0.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.h, z17.h, z1.h[0]\n"
"fmla z14.h, z17.h, z0.h[0]\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x8\n"
"fmla z11.h, z16.h, z1.h[0]\n"
"fmla z15.h, z16.h, z0.h[0]\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
"fmla z8.h, z17.h, z1.h[1]\n"
"fmla z12.h, z17.h, z0.h[1]\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z9.h, z16.h, z1.h[1]\n"
"fmla z13.h, z16.h, z0.h[1]\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
"fmla z10.h, z17.h, z1.h[1]\n"
"fmla z14.h, z17.h, z0.h[1]\n"
- "ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z11.h, z16.h, z1.h[1]\n"
"fmla z15.h, z16.h, z0.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z17.h, z1.h[2]\n"
"fmla z12.h, z17.h, z0.h[2]\n"
@@ -493,9 +495,9 @@ void sve_hybrid_fp16_mla_6x4VL (
"addvl x10, x10, #16\n"
"fmla z10.h, z17.h, z1.h[5]\n"
"fmla z14.h, z17.h, z0.h[5]\n"
- "ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z11.h, z16.h, z1.h[5]\n"
"fmla z15.h, z16.h, z0.h[5]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z17.h, z1.h[6]\n"
"fmla z12.h, z17.h, z0.h[6]\n"
@@ -522,110 +524,110 @@ void sve_hybrid_fp16_mla_6x4VL (
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
- "ld1rqh { z0.h }, p0/Z, [x26]\n"
- "ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
+ "ld1rqh { z0.h }, p0/Z, [x26]\n"
+ "ld1rqh { z1.h }, p0/Z, [x25]\n"
"fmla z8.h, z17.h, z0.h[0]\n"
"fmla z12.h, z17.h, z1.h[0]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[0]\n"
"fmla z13.h, z16.h, z1.h[0]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[0]\n"
"fmla z14.h, z17.h, z1.h[0]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[0]\n"
"fmla z15.h, z16.h, z1.h[0]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[1]\n"
"fmla z12.h, z17.h, z1.h[1]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[1]\n"
"fmla z13.h, z16.h, z1.h[1]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[1]\n"
"fmla z14.h, z17.h, z1.h[1]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[1]\n"
"fmla z15.h, z16.h, z1.h[1]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[2]\n"
"fmla z12.h, z17.h, z1.h[2]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[2]\n"
"fmla z13.h, z16.h, z1.h[2]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[2]\n"
"fmla z14.h, z17.h, z1.h[2]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[2]\n"
"fmla z15.h, z16.h, z1.h[2]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[3]\n"
"fmla z12.h, z17.h, z1.h[3]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[3]\n"
"fmla z13.h, z16.h, z1.h[3]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[3]\n"
"fmla z14.h, z17.h, z1.h[3]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[3]\n"
"fmla z15.h, z16.h, z1.h[3]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[4]\n"
"fmla z12.h, z17.h, z1.h[4]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[4]\n"
"fmla z13.h, z16.h, z1.h[4]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[4]\n"
"fmla z14.h, z17.h, z1.h[4]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[4]\n"
"fmla z15.h, z16.h, z1.h[4]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[5]\n"
"fmla z12.h, z17.h, z1.h[5]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[5]\n"
"fmla z13.h, z16.h, z1.h[5]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[5]\n"
"fmla z14.h, z17.h, z1.h[5]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[5]\n"
"fmla z15.h, z16.h, z1.h[5]\n"
"ble 24f\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z17.h, z0.h[6]\n"
"fmla z12.h, z17.h, z1.h[6]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[6]\n"
"fmla z13.h, z16.h, z1.h[6]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[6]\n"
"fmla z14.h, z17.h, z1.h[6]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[6]\n"
"fmla z15.h, z16.h, z1.h[6]\n"
"ble 24f\n"
@@ -633,13 +635,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z17.h, z0.h[7]\n"
"fmla z12.h, z17.h, z1.h[7]\n"
+ "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z16.h, z0.h[7]\n"
"fmla z13.h, z16.h, z1.h[7]\n"
- "ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.h, z17.h, z0.h[7]\n"
"fmla z14.h, z17.h, z1.h[7]\n"
- "addvl x10, x10, #4\n"
"fmla z11.h, z16.h, z0.h[7]\n"
"fmla z15.h, z16.h, z1.h[7]\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -648,11 +650,11 @@ void sve_hybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 19b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z17.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z17.h }, p5/Z, [x21]\n"
"ld1rh { z16.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z17.h\n"
"fmin z9.h, p5/M, z9.h, z17.h\n"
@@ -676,20 +678,20 @@ void sve_hybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.h, x20, x11\n"
@@ -702,27 +704,27 @@ void sve_hybrid_fp16_mla_6x4VL (
"cbz x12, 29f\n"
"ld1h { z8.h }, p5/Z, [x12]\n"
"ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x9]\n"
"ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x21]\n"
"ld1h { z13.h }, p3/Z, [x21, #1, MUL VL]\n"
"ld1h { z14.h }, p2/Z, [x21, #2, MUL VL]\n"
@@ -749,8 +751,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -772,37 +774,37 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z21.h }, p5/Z, [x10]\n"
+ "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z2.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x24]\n"
- "ld1h { z21.h }, p5/Z, [x10]\n"
+ "add x24, x24, #0x10\n"
"fmla z8.h, z21.h, z2.h[0]\n"
"fmla z12.h, z21.h, z1.h[0]\n"
- "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z16.h, z21.h, z0.h[0]\n"
"fmla z9.h, z20.h, z2.h[0]\n"
- "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z20.h, z1.h[0]\n"
+ "fmla z16.h, z21.h, z0.h[0]\n"
+ "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z17.h, z20.h, z0.h[0]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x8\n"
"fmla z10.h, z21.h, z2.h[0]\n"
"fmla z14.h, z21.h, z1.h[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"fmla z18.h, z21.h, z0.h[0]\n"
- "fmla z11.h, z20.h, z2.h[0]\n"
"ld1h { z21.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "fmla z11.h, z20.h, z2.h[0]\n"
"fmla z15.h, z20.h, z1.h[0]\n"
"fmla z19.h, z20.h, z0.h[0]\n"
"ld1h { z20.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[1]\n"
"fmla z12.h, z21.h, z1.h[1]\n"
"fmla z16.h, z21.h, z0.h[1]\n"
- "fmla z9.h, z20.h, z2.h[1]\n"
"ld1h { z21.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[1]\n"
"fmla z13.h, z20.h, z1.h[1]\n"
"fmla z17.h, z20.h, z0.h[1]\n"
"ld1h { z20.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -811,63 +813,63 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z14.h, z21.h, z1.h[1]\n"
"fmla z18.h, z21.h, z0.h[1]\n"
"fmla z11.h, z20.h, z2.h[1]\n"
- "ld1h { z21.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z15.h, z20.h, z1.h[1]\n"
"fmla z19.h, z20.h, z0.h[1]\n"
+ "ld1h { z21.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z20.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[2]\n"
"fmla z12.h, z21.h, z1.h[2]\n"
"fmla z16.h, z21.h, z0.h[2]\n"
- "fmla z9.h, z20.h, z2.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[2]\n"
"fmla z13.h, z20.h, z1.h[2]\n"
"fmla z17.h, z20.h, z0.h[2]\n"
"ld1h { z20.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[2]\n"
"fmla z14.h, z21.h, z1.h[2]\n"
"fmla z18.h, z21.h, z0.h[2]\n"
- "fmla z11.h, z20.h, z2.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[2]\n"
"fmla z15.h, z20.h, z1.h[2]\n"
"fmla z19.h, z20.h, z0.h[2]\n"
"ld1h { z20.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[3]\n"
"fmla z12.h, z21.h, z1.h[3]\n"
"fmla z16.h, z21.h, z0.h[3]\n"
- "fmla z9.h, z20.h, z2.h[3]\n"
"ld1h { z21.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[3]\n"
"fmla z13.h, z20.h, z1.h[3]\n"
"fmla z17.h, z20.h, z0.h[3]\n"
"ld1h { z20.h }, p5/Z, [x10, #-1, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[3]\n"
"fmla z14.h, z21.h, z1.h[3]\n"
"fmla z18.h, z21.h, z0.h[3]\n"
- "fmla z11.h, z20.h, z2.h[3]\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
+ "fmla z11.h, z20.h, z2.h[3]\n"
"fmla z15.h, z20.h, z1.h[3]\n"
"fmla z19.h, z20.h, z0.h[3]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[4]\n"
"fmla z12.h, z21.h, z1.h[4]\n"
"fmla z16.h, z21.h, z0.h[4]\n"
- "fmla z9.h, z20.h, z2.h[4]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[4]\n"
"fmla z13.h, z20.h, z1.h[4]\n"
"fmla z17.h, z20.h, z0.h[4]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[4]\n"
"fmla z14.h, z21.h, z1.h[4]\n"
"fmla z18.h, z21.h, z0.h[4]\n"
- "fmla z11.h, z20.h, z2.h[4]\n"
"ld1h { z21.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[4]\n"
"fmla z15.h, z20.h, z1.h[4]\n"
"fmla z19.h, z20.h, z0.h[4]\n"
"ld1h { z20.h }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[5]\n"
"fmla z12.h, z21.h, z1.h[5]\n"
"fmla z16.h, z21.h, z0.h[5]\n"
- "fmla z9.h, z20.h, z2.h[5]\n"
"ld1h { z21.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[5]\n"
"fmla z13.h, z20.h, z1.h[5]\n"
"fmla z17.h, z20.h, z0.h[5]\n"
"ld1h { z20.h }, p5/Z, [x10, #7, MUL VL]\n"
@@ -876,31 +878,31 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z14.h, z21.h, z1.h[5]\n"
"fmla z18.h, z21.h, z0.h[5]\n"
"fmla z11.h, z20.h, z2.h[5]\n"
- "ld1h { z21.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z15.h, z20.h, z1.h[5]\n"
"fmla z19.h, z20.h, z0.h[5]\n"
+ "ld1h { z21.h }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1h { z20.h }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[6]\n"
"fmla z12.h, z21.h, z1.h[6]\n"
"fmla z16.h, z21.h, z0.h[6]\n"
- "fmla z9.h, z20.h, z2.h[6]\n"
"ld1h { z21.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[6]\n"
"fmla z13.h, z20.h, z1.h[6]\n"
"fmla z17.h, z20.h, z0.h[6]\n"
"ld1h { z20.h }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.h, z21.h, z2.h[6]\n"
"fmla z14.h, z21.h, z1.h[6]\n"
"fmla z18.h, z21.h, z0.h[6]\n"
- "fmla z11.h, z20.h, z2.h[6]\n"
"ld1h { z21.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z20.h, z2.h[6]\n"
"fmla z15.h, z20.h, z1.h[6]\n"
"fmla z19.h, z20.h, z0.h[6]\n"
"ld1h { z20.h }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.h, z21.h, z2.h[7]\n"
"fmla z12.h, z21.h, z1.h[7]\n"
"fmla z16.h, z21.h, z0.h[7]\n"
- "fmla z9.h, z20.h, z2.h[7]\n"
"ld1h { z21.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z20.h, z2.h[7]\n"
"fmla z13.h, z20.h, z1.h[7]\n"
"fmla z17.h, z20.h, z0.h[7]\n"
"ld1h { z20.h }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -913,18 +915,18 @@ void sve_hybrid_fp16_mla_6x4VL (
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z21.h }, p5/Z, [x10]\n"
+ "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
- "ld1h { z21.h }, p5/Z, [x10]\n"
"fmla z8.h, z21.h, z0.h[0]\n"
"fmla z12.h, z21.h, z1.h[0]\n"
- "ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z16.h, z21.h, z2.h[0]\n"
"fmla z9.h, z20.h, z0.h[0]\n"
- "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z20.h, z1.h[0]\n"
+ "fmla z16.h, z21.h, z2.h[0]\n"
+ "ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z17.h, z20.h, z2.h[0]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -937,12 +939,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z21.h, z0.h[1]\n"
"fmla z12.h, z21.h, z1.h[1]\n"
"fmla z16.h, z21.h, z2.h[1]\n"
- "fmla z9.h, z20.h, z0.h[1]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z20.h, z0.h[1]\n"
"fmla z13.h, z20.h, z1.h[1]\n"
"fmla z17.h, z20.h, z2.h[1]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -956,12 +958,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z21.h, z0.h[2]\n"
"fmla z12.h, z21.h, z1.h[2]\n"
"fmla z16.h, z21.h, z2.h[2]\n"
- "fmla z9.h, z20.h, z0.h[2]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z20.h, z0.h[2]\n"
"fmla z13.h, z20.h, z1.h[2]\n"
"fmla z17.h, z20.h, z2.h[2]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -975,12 +977,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z21.h, z0.h[3]\n"
"fmla z12.h, z21.h, z1.h[3]\n"
"fmla z16.h, z21.h, z2.h[3]\n"
- "fmla z9.h, z20.h, z0.h[3]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z20.h, z0.h[3]\n"
"fmla z13.h, z20.h, z1.h[3]\n"
"fmla z17.h, z20.h, z2.h[3]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -994,12 +996,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z21.h, z0.h[4]\n"
"fmla z12.h, z21.h, z1.h[4]\n"
"fmla z16.h, z21.h, z2.h[4]\n"
- "fmla z9.h, z20.h, z0.h[4]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z20.h, z0.h[4]\n"
"fmla z13.h, z20.h, z1.h[4]\n"
"fmla z17.h, z20.h, z2.h[4]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1013,12 +1015,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z21.h, z0.h[5]\n"
"fmla z12.h, z21.h, z1.h[5]\n"
"fmla z16.h, z21.h, z2.h[5]\n"
- "fmla z9.h, z20.h, z0.h[5]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z20.h, z0.h[5]\n"
"fmla z13.h, z20.h, z1.h[5]\n"
"fmla z17.h, z20.h, z2.h[5]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1032,12 +1034,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 37f\n"
"ld1h { z21.h }, p5/Z, [x10]\n"
"ld1h { z20.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z21.h, z0.h[6]\n"
"fmla z12.h, z21.h, z1.h[6]\n"
"fmla z16.h, z21.h, z2.h[6]\n"
- "fmla z9.h, z20.h, z0.h[6]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.h, z20.h, z0.h[6]\n"
"fmla z13.h, z20.h, z1.h[6]\n"
"fmla z17.h, z20.h, z2.h[6]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1054,8 +1056,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z8.h, z21.h, z0.h[7]\n"
"fmla z12.h, z21.h, z1.h[7]\n"
"fmla z16.h, z21.h, z2.h[7]\n"
- "fmla z9.h, z20.h, z0.h[7]\n"
"ld1h { z21.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z20.h, z0.h[7]\n"
"fmla z13.h, z20.h, z1.h[7]\n"
"fmla z17.h, z20.h, z2.h[7]\n"
"ld1h { z20.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1072,12 +1074,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
- "add x24, x25, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z21.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z21.h }, p5/Z, [x21]\n"
"ld1rh { z20.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z21.h\n"
"fmin z9.h, p5/M, z9.h, z21.h\n"
@@ -1109,24 +1111,24 @@ void sve_hybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.h, x20, x11\n"
@@ -1139,18 +1141,18 @@ void sve_hybrid_fp16_mla_6x4VL (
"cbz x12, 42f\n"
"ld1h { z8.h }, p5/Z, [x12]\n"
"ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1158,13 +1160,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x9]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x22]\n"
"ld1h { z13.h }, p3/Z, [x22, #1, MUL VL]\n"
"ld1h { z14.h }, p2/Z, [x22, #2, MUL VL]\n"
@@ -1199,8 +1201,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1225,25 +1227,25 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z3.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z2.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"fmla z8.h, z25.h, z3.h[0]\n"
"fmla z12.h, z25.h, z2.h[0]\n"
+ "fmla z9.h, z24.h, z3.h[0]\n"
+ "fmla z13.h, z24.h, z2.h[0]\n"
"fmla z16.h, z25.h, z1.h[0]\n"
"fmla z20.h, z25.h, z0.h[0]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
- "fmla z9.h, z24.h, z3.h[0]\n"
- "fmla z13.h, z24.h, z2.h[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"fmla z17.h, z24.h, z1.h[0]\n"
"fmla z21.h, z24.h, z0.h[0]\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1272,9 +1274,9 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z14.h, z25.h, z2.h[1]\n"
"fmla z18.h, z25.h, z1.h[1]\n"
"fmla z22.h, z25.h, z0.h[1]\n"
- "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z11.h, z24.h, z3.h[1]\n"
"fmla z15.h, z24.h, z2.h[1]\n"
+ "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z19.h, z24.h, z1.h[1]\n"
"fmla z23.h, z24.h, z0.h[1]\n"
"ld1h { z24.h }, p5/Z, [x10, #-7, MUL VL]\n"
@@ -1353,9 +1355,9 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z14.h, z25.h, z2.h[5]\n"
"fmla z18.h, z25.h, z1.h[5]\n"
"fmla z22.h, z25.h, z0.h[5]\n"
- "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z11.h, z24.h, z3.h[5]\n"
"fmla z15.h, z24.h, z2.h[5]\n"
+ "ld1h { z25.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z19.h, z24.h, z1.h[5]\n"
"fmla z23.h, z24.h, z0.h[5]\n"
"ld1h { z24.h }, p5/Z, [x10, #-7, MUL VL]\n"
@@ -1400,20 +1402,20 @@ void sve_hybrid_fp16_mla_6x4VL (
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z25.h, z0.h[0]\n"
"fmla z12.h, z25.h, z1.h[0]\n"
+ "fmla z9.h, z24.h, z0.h[0]\n"
+ "fmla z13.h, z24.h, z1.h[0]\n"
"fmla z16.h, z25.h, z2.h[0]\n"
"fmla z20.h, z25.h, z3.h[0]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.h, z24.h, z0.h[0]\n"
- "fmla z13.h, z24.h, z1.h[0]\n"
"fmla z17.h, z24.h, z2.h[0]\n"
"fmla z21.h, z24.h, z3.h[0]\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1429,12 +1431,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z25.h, z0.h[1]\n"
"fmla z12.h, z25.h, z1.h[1]\n"
"fmla z16.h, z25.h, z2.h[1]\n"
"fmla z20.h, z25.h, z3.h[1]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z24.h, z0.h[1]\n"
"fmla z13.h, z24.h, z1.h[1]\n"
"fmla z17.h, z24.h, z2.h[1]\n"
@@ -1452,12 +1454,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z25.h, z0.h[2]\n"
"fmla z12.h, z25.h, z1.h[2]\n"
"fmla z16.h, z25.h, z2.h[2]\n"
"fmla z20.h, z25.h, z3.h[2]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z24.h, z0.h[2]\n"
"fmla z13.h, z24.h, z1.h[2]\n"
"fmla z17.h, z24.h, z2.h[2]\n"
@@ -1475,12 +1477,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z25.h, z0.h[3]\n"
"fmla z12.h, z25.h, z1.h[3]\n"
"fmla z16.h, z25.h, z2.h[3]\n"
"fmla z20.h, z25.h, z3.h[3]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z24.h, z0.h[3]\n"
"fmla z13.h, z24.h, z1.h[3]\n"
"fmla z17.h, z24.h, z2.h[3]\n"
@@ -1498,12 +1500,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z25.h, z0.h[4]\n"
"fmla z12.h, z25.h, z1.h[4]\n"
"fmla z16.h, z25.h, z2.h[4]\n"
"fmla z20.h, z25.h, z3.h[4]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z24.h, z0.h[4]\n"
"fmla z13.h, z24.h, z1.h[4]\n"
"fmla z17.h, z24.h, z2.h[4]\n"
@@ -1521,12 +1523,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z25.h, z0.h[5]\n"
"fmla z12.h, z25.h, z1.h[5]\n"
"fmla z16.h, z25.h, z2.h[5]\n"
"fmla z20.h, z25.h, z3.h[5]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z24.h, z0.h[5]\n"
"fmla z13.h, z24.h, z1.h[5]\n"
"fmla z17.h, z24.h, z2.h[5]\n"
@@ -1544,12 +1546,12 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 50f\n"
"ld1h { z25.h }, p5/Z, [x10]\n"
"ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z25.h, z0.h[6]\n"
"fmla z12.h, z25.h, z1.h[6]\n"
"fmla z16.h, z25.h, z2.h[6]\n"
"fmla z20.h, z25.h, z3.h[6]\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.h, z24.h, z0.h[6]\n"
"fmla z13.h, z24.h, z1.h[6]\n"
"fmla z17.h, z24.h, z2.h[6]\n"
@@ -1592,13 +1594,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z25.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z25.h }, p5/Z, [x21]\n"
"ld1rh { z24.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z25.h\n"
"fmin z9.h, p5/M, z9.h, z25.h\n"
@@ -1638,28 +1640,28 @@ void sve_hybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x24]\n"
+ "st1h { z21.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x24, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.h, x20, x11\n"
@@ -1672,18 +1674,18 @@ void sve_hybrid_fp16_mla_6x4VL (
"cbz x12, 55f\n"
"ld1h { z8.h }, p5/Z, [x12]\n"
"ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1695,16 +1697,16 @@ void sve_hybrid_fp16_mla_6x4VL (
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x9]\n"
- "add x21, x22, x20, LSL #1\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
"ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
+ "add x21, x22, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x23]\n"
"ld1h { z13.h }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p2/Z, [x23, #2, MUL VL]\n"
"ld1h { z15.h }, p1/Z, [x23, #3, MUL VL]\n"
"ld1h { z16.h }, p4/Z, [x22]\n"
@@ -1745,8 +1747,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1774,29 +1776,29 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z4.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z3.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z1.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1rqh { z0.h }, p0/Z, [x22]\n"
- "ld1h { z29.h }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.h, z29.h, z4.h[0]\n"
"fmla z12.h, z29.h, z3.h[0]\n"
- "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[0]\n"
"fmla z16.h, z29.h, z2.h[0]\n"
"fmla z20.h, z29.h, z1.h[0]\n"
- "add x25, x25, #0x10\n"
"fmla z24.h, z29.h, z0.h[0]\n"
- "fmla z9.h, z28.h, z4.h[0]\n"
- "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
"fmla z13.h, z28.h, z3.h[0]\n"
+ "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z17.h, z28.h, z2.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z21.h, z28.h, z1.h[0]\n"
"fmla z25.h, z28.h, z0.h[0]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1805,8 +1807,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[0]\n"
"fmla z22.h, z29.h, z1.h[0]\n"
"fmla z26.h, z29.h, z0.h[0]\n"
- "fmla z11.h, z28.h, z4.h[0]\n"
"ld1h { z29.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[0]\n"
"fmla z15.h, z28.h, z3.h[0]\n"
"fmla z19.h, z28.h, z2.h[0]\n"
"fmla z23.h, z28.h, z1.h[0]\n"
@@ -1817,8 +1819,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[1]\n"
"fmla z20.h, z29.h, z1.h[1]\n"
"fmla z24.h, z29.h, z0.h[1]\n"
- "fmla z9.h, z28.h, z4.h[1]\n"
"ld1h { z29.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[1]\n"
"fmla z13.h, z28.h, z3.h[1]\n"
"fmla z17.h, z28.h, z2.h[1]\n"
"fmla z21.h, z28.h, z1.h[1]\n"
@@ -1831,8 +1833,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z22.h, z29.h, z1.h[1]\n"
"fmla z26.h, z29.h, z0.h[1]\n"
"fmla z11.h, z28.h, z4.h[1]\n"
- "ld1h { z29.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z15.h, z28.h, z3.h[1]\n"
+ "ld1h { z29.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z19.h, z28.h, z2.h[1]\n"
"fmla z23.h, z28.h, z1.h[1]\n"
"fmla z27.h, z28.h, z0.h[1]\n"
@@ -1842,8 +1844,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[2]\n"
"fmla z20.h, z29.h, z1.h[2]\n"
"fmla z24.h, z29.h, z0.h[2]\n"
- "fmla z9.h, z28.h, z4.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[2]\n"
"fmla z13.h, z28.h, z3.h[2]\n"
"fmla z17.h, z28.h, z2.h[2]\n"
"fmla z21.h, z28.h, z1.h[2]\n"
@@ -1854,8 +1856,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[2]\n"
"fmla z22.h, z29.h, z1.h[2]\n"
"fmla z26.h, z29.h, z0.h[2]\n"
- "fmla z11.h, z28.h, z4.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[2]\n"
"fmla z15.h, z28.h, z3.h[2]\n"
"fmla z19.h, z28.h, z2.h[2]\n"
"fmla z23.h, z28.h, z1.h[2]\n"
@@ -1866,8 +1868,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[3]\n"
"fmla z20.h, z29.h, z1.h[3]\n"
"fmla z24.h, z29.h, z0.h[3]\n"
- "fmla z9.h, z28.h, z4.h[3]\n"
"ld1h { z29.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[3]\n"
"fmla z13.h, z28.h, z3.h[3]\n"
"fmla z17.h, z28.h, z2.h[3]\n"
"fmla z21.h, z28.h, z1.h[3]\n"
@@ -1878,8 +1880,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[3]\n"
"fmla z22.h, z29.h, z1.h[3]\n"
"fmla z26.h, z29.h, z0.h[3]\n"
- "fmla z11.h, z28.h, z4.h[3]\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
+ "fmla z11.h, z28.h, z4.h[3]\n"
"fmla z15.h, z28.h, z3.h[3]\n"
"fmla z19.h, z28.h, z2.h[3]\n"
"fmla z23.h, z28.h, z1.h[3]\n"
@@ -1890,8 +1892,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[4]\n"
"fmla z20.h, z29.h, z1.h[4]\n"
"fmla z24.h, z29.h, z0.h[4]\n"
- "fmla z9.h, z28.h, z4.h[4]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[4]\n"
"fmla z13.h, z28.h, z3.h[4]\n"
"fmla z17.h, z28.h, z2.h[4]\n"
"fmla z21.h, z28.h, z1.h[4]\n"
@@ -1902,8 +1904,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[4]\n"
"fmla z22.h, z29.h, z1.h[4]\n"
"fmla z26.h, z29.h, z0.h[4]\n"
- "fmla z11.h, z28.h, z4.h[4]\n"
"ld1h { z29.h }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[4]\n"
"fmla z15.h, z28.h, z3.h[4]\n"
"fmla z19.h, z28.h, z2.h[4]\n"
"fmla z23.h, z28.h, z1.h[4]\n"
@@ -1914,8 +1916,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[5]\n"
"fmla z20.h, z29.h, z1.h[5]\n"
"fmla z24.h, z29.h, z0.h[5]\n"
- "fmla z9.h, z28.h, z4.h[5]\n"
"ld1h { z29.h }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[5]\n"
"fmla z13.h, z28.h, z3.h[5]\n"
"fmla z17.h, z28.h, z2.h[5]\n"
"fmla z21.h, z28.h, z1.h[5]\n"
@@ -1928,8 +1930,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z22.h, z29.h, z1.h[5]\n"
"fmla z26.h, z29.h, z0.h[5]\n"
"fmla z11.h, z28.h, z4.h[5]\n"
- "ld1h { z29.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z15.h, z28.h, z3.h[5]\n"
+ "ld1h { z29.h }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z19.h, z28.h, z2.h[5]\n"
"fmla z23.h, z28.h, z1.h[5]\n"
"fmla z27.h, z28.h, z0.h[5]\n"
@@ -1939,8 +1941,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[6]\n"
"fmla z20.h, z29.h, z1.h[6]\n"
"fmla z24.h, z29.h, z0.h[6]\n"
- "fmla z9.h, z28.h, z4.h[6]\n"
"ld1h { z29.h }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[6]\n"
"fmla z13.h, z28.h, z3.h[6]\n"
"fmla z17.h, z28.h, z2.h[6]\n"
"fmla z21.h, z28.h, z1.h[6]\n"
@@ -1951,8 +1953,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z18.h, z29.h, z2.h[6]\n"
"fmla z22.h, z29.h, z1.h[6]\n"
"fmla z26.h, z29.h, z0.h[6]\n"
- "fmla z11.h, z28.h, z4.h[6]\n"
"ld1h { z29.h }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.h, z28.h, z4.h[6]\n"
"fmla z15.h, z28.h, z3.h[6]\n"
"fmla z19.h, z28.h, z2.h[6]\n"
"fmla z23.h, z28.h, z1.h[6]\n"
@@ -1963,8 +1965,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[7]\n"
"fmla z20.h, z29.h, z1.h[7]\n"
"fmla z24.h, z29.h, z0.h[7]\n"
- "fmla z9.h, z28.h, z4.h[7]\n"
"ld1h { z29.h }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.h, z28.h, z4.h[7]\n"
"fmla z13.h, z28.h, z3.h[7]\n"
"fmla z17.h, z28.h, z2.h[7]\n"
"fmla z21.h, z28.h, z1.h[7]\n"
@@ -1983,23 +1985,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1rqh { z4.h }, p0/Z, [x22]\n"
- "ld1h { z29.h }, p5/Z, [x10]\n"
"fmla z8.h, z29.h, z0.h[0]\n"
"fmla z12.h, z29.h, z1.h[0]\n"
- "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[0]\n"
+ "fmla z13.h, z28.h, z1.h[0]\n"
"fmla z16.h, z29.h, z2.h[0]\n"
"fmla z20.h, z29.h, z3.h[0]\n"
"fmla z24.h, z29.h, z4.h[0]\n"
- "fmla z9.h, z28.h, z0.h[0]\n"
- "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.h, z28.h, z1.h[0]\n"
"fmla z17.h, z28.h, z2.h[0]\n"
+ "ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z21.h, z28.h, z3.h[0]\n"
"fmla z25.h, z28.h, z4.h[0]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
@@ -2017,21 +2019,21 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z29.h, z0.h[1]\n"
"fmla z12.h, z29.h, z1.h[1]\n"
"fmla z16.h, z29.h, z2.h[1]\n"
"fmla z20.h, z29.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z29.h, z4.h[1]\n"
- "fmla z9.h, z28.h, z0.h[1]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[1]\n"
"fmla z13.h, z28.h, z1.h[1]\n"
"fmla z17.h, z28.h, z2.h[1]\n"
"fmla z21.h, z28.h, z3.h[1]\n"
"fmla z25.h, z28.h, z4.h[1]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z29.h, z0.h[1]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z29.h, z1.h[1]\n"
"fmla z18.h, z29.h, z2.h[1]\n"
"fmla z22.h, z29.h, z3.h[1]\n"
@@ -2044,21 +2046,21 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z29.h, z0.h[2]\n"
"fmla z12.h, z29.h, z1.h[2]\n"
"fmla z16.h, z29.h, z2.h[2]\n"
"fmla z20.h, z29.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z29.h, z4.h[2]\n"
- "fmla z9.h, z28.h, z0.h[2]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[2]\n"
"fmla z13.h, z28.h, z1.h[2]\n"
"fmla z17.h, z28.h, z2.h[2]\n"
"fmla z21.h, z28.h, z3.h[2]\n"
"fmla z25.h, z28.h, z4.h[2]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z29.h, z0.h[2]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z29.h, z1.h[2]\n"
"fmla z18.h, z29.h, z2.h[2]\n"
"fmla z22.h, z29.h, z3.h[2]\n"
@@ -2071,21 +2073,21 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z29.h, z0.h[3]\n"
"fmla z12.h, z29.h, z1.h[3]\n"
"fmla z16.h, z29.h, z2.h[3]\n"
"fmla z20.h, z29.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z29.h, z4.h[3]\n"
- "fmla z9.h, z28.h, z0.h[3]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[3]\n"
"fmla z13.h, z28.h, z1.h[3]\n"
"fmla z17.h, z28.h, z2.h[3]\n"
"fmla z21.h, z28.h, z3.h[3]\n"
"fmla z25.h, z28.h, z4.h[3]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z29.h, z0.h[3]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z29.h, z1.h[3]\n"
"fmla z18.h, z29.h, z2.h[3]\n"
"fmla z22.h, z29.h, z3.h[3]\n"
@@ -2098,21 +2100,21 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z29.h, z0.h[4]\n"
"fmla z12.h, z29.h, z1.h[4]\n"
"fmla z16.h, z29.h, z2.h[4]\n"
"fmla z20.h, z29.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z29.h, z4.h[4]\n"
- "fmla z9.h, z28.h, z0.h[4]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[4]\n"
"fmla z13.h, z28.h, z1.h[4]\n"
"fmla z17.h, z28.h, z2.h[4]\n"
"fmla z21.h, z28.h, z3.h[4]\n"
"fmla z25.h, z28.h, z4.h[4]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z29.h, z0.h[4]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z29.h, z1.h[4]\n"
"fmla z18.h, z29.h, z2.h[4]\n"
"fmla z22.h, z29.h, z3.h[4]\n"
@@ -2125,21 +2127,21 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z29.h, z0.h[5]\n"
"fmla z12.h, z29.h, z1.h[5]\n"
"fmla z16.h, z29.h, z2.h[5]\n"
"fmla z20.h, z29.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z29.h, z4.h[5]\n"
- "fmla z9.h, z28.h, z0.h[5]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[5]\n"
"fmla z13.h, z28.h, z1.h[5]\n"
"fmla z17.h, z28.h, z2.h[5]\n"
"fmla z21.h, z28.h, z3.h[5]\n"
"fmla z25.h, z28.h, z4.h[5]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z29.h, z0.h[5]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z29.h, z1.h[5]\n"
"fmla z18.h, z29.h, z2.h[5]\n"
"fmla z22.h, z29.h, z3.h[5]\n"
@@ -2152,21 +2154,21 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 63f\n"
"ld1h { z29.h }, p5/Z, [x10]\n"
"ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z29.h, z0.h[6]\n"
"fmla z12.h, z29.h, z1.h[6]\n"
"fmla z16.h, z29.h, z2.h[6]\n"
"fmla z20.h, z29.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z29.h, z4.h[6]\n"
- "fmla z9.h, z28.h, z0.h[6]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[6]\n"
"fmla z13.h, z28.h, z1.h[6]\n"
"fmla z17.h, z28.h, z2.h[6]\n"
"fmla z21.h, z28.h, z3.h[6]\n"
"fmla z25.h, z28.h, z4.h[6]\n"
"ld1h { z28.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z29.h, z0.h[6]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z29.h, z1.h[6]\n"
"fmla z18.h, z29.h, z2.h[6]\n"
"fmla z22.h, z29.h, z3.h[6]\n"
@@ -2184,8 +2186,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"fmla z16.h, z29.h, z2.h[7]\n"
"fmla z20.h, z29.h, z3.h[7]\n"
"fmla z24.h, z29.h, z4.h[7]\n"
- "fmla z9.h, z28.h, z0.h[7]\n"
"ld1h { z29.h }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.h, z28.h, z0.h[7]\n"
"fmla z13.h, z28.h, z1.h[7]\n"
"fmla z17.h, z28.h, z2.h[7]\n"
"fmla z21.h, z28.h, z3.h[7]\n"
@@ -2208,14 +2210,14 @@ void sve_hybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 58b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
- "add x22, x23, x20, LSL #1\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z29.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z29.h }, p5/Z, [x21]\n"
"ld1rh { z28.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z29.h\n"
"fmin z9.h, p5/M, z9.h, z29.h\n"
@@ -2263,22 +2265,22 @@ void sve_hybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x24]\n"
+ "st1h { z21.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x23]\n"
+ "st1h { z25.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x23, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -2286,12 +2288,13 @@ void sve_hybrid_fp16_mla_6x4VL (
"b 80f\n"
"66:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0xc\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.h, x20, x11\n"
@@ -2304,18 +2307,18 @@ void sve_hybrid_fp16_mla_6x4VL (
"cbz x12, 68f\n"
"ld1h { z8.h }, p5/Z, [x12]\n"
"ld1h { z9.h }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1h { z10.h }, p5/Z, [x12, #2, MUL VL]\n"
"ld1h { z11.h }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -2331,17 +2334,17 @@ void sve_hybrid_fp16_mla_6x4VL (
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #1\n"
- "add x23, x24, x20, LSL #1\n"
"ld1h { z8.h }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"ld1h { z9.h }, p3/Z, [x9, #1, MUL VL]\n"
"ld1h { z10.h }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #1\n"
"ld1h { z11.h }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #1\n"
+ "add x23, x24, x20, LSL #1\n"
+ "add x22, x23, x20, LSL #1\n"
"ld1h { z12.h }, p4/Z, [x24]\n"
"ld1h { z13.h }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #1\n"
+ "add x20, x21, x20, LSL #1\n"
"ld1h { z14.h }, p2/Z, [x24, #2, MUL VL]\n"
"ld1h { z15.h }, p1/Z, [x24, #3, MUL VL]\n"
"ld1h { z16.h }, p4/Z, [x23]\n"
@@ -2390,8 +2393,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"mov x28, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2422,29 +2425,29 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z1.h }, p5/Z, [x10]\n"
+ "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x8\n"
+ "cmp x27, #0x8\n"
"ld1rqh { z7.h }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqh { z6.h }, p0/Z, [x25]\n"
- "sub x27, x27, #0x8\n"
+ "add x25, x25, #0x10\n"
"ld1rqh { z5.h }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqh { z4.h }, p0/Z, [x23]\n"
- "cmp x27, #0x8\n"
- "add x26, x26, #0x10\n"
"ld1rqh { z3.h }, p0/Z, [x22]\n"
"ld1rqh { z2.h }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
- "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.h, z1.h, z7.h[0]\n"
"fmla z12.h, z1.h, z6.h[0]\n"
+ "add x21, x21, #0x10\n"
"fmla z16.h, z1.h, z5.h[0]\n"
"fmla z20.h, z1.h, z4.h[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z24.h, z1.h, z3.h[0]\n"
"fmla z28.h, z1.h, z2.h[0]\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
"fmla z9.h, z0.h, z7.h[0]\n"
"fmla z13.h, z0.h, z6.h[0]\n"
"fmla z17.h, z0.h, z5.h[0]\n"
@@ -2665,24 +2668,24 @@ void sve_hybrid_fp16_mla_6x4VL (
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.h, XZR, x27\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqh { z0.h }, p0/Z, [x26]\n"
"ld1rqh { z1.h }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqh { z2.h }, p0/Z, [x24]\n"
"ld1rqh { z3.h }, p0/Z, [x23]\n"
"ld1rqh { z4.h }, p0/Z, [x22]\n"
"ld1rqh { z5.h }, p0/Z, [x21]\n"
- "ld1h { z7.h }, p5/Z, [x10]\n"
- "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.h, z7.h, z0.h[0]\n"
"fmla z12.h, z7.h, z1.h[0]\n"
+ "fmla z9.h, z6.h, z0.h[0]\n"
+ "fmla z13.h, z6.h, z1.h[0]\n"
"fmla z16.h, z7.h, z2.h[0]\n"
"fmla z20.h, z7.h, z3.h[0]\n"
"fmla z24.h, z7.h, z4.h[0]\n"
"fmla z28.h, z7.h, z5.h[0]\n"
"ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.h, z6.h, z0.h[0]\n"
- "fmla z13.h, z6.h, z1.h[0]\n"
"fmla z17.h, z6.h, z2.h[0]\n"
"fmla z21.h, z6.h, z3.h[0]\n"
"fmla z25.h, z6.h, z4.h[0]\n"
@@ -2704,23 +2707,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z7.h, z0.h[1]\n"
"fmla z12.h, z7.h, z1.h[1]\n"
"fmla z16.h, z7.h, z2.h[1]\n"
"fmla z20.h, z7.h, z3.h[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z7.h, z4.h[1]\n"
"fmla z28.h, z7.h, z5.h[1]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z6.h, z0.h[1]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z6.h, z1.h[1]\n"
"fmla z17.h, z6.h, z2.h[1]\n"
"fmla z21.h, z6.h, z3.h[1]\n"
"fmla z25.h, z6.h, z4.h[1]\n"
"fmla z29.h, z6.h, z5.h[1]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z7.h, z0.h[1]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z7.h, z1.h[1]\n"
"fmla z18.h, z7.h, z2.h[1]\n"
"fmla z22.h, z7.h, z3.h[1]\n"
@@ -2735,23 +2738,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z7.h, z0.h[2]\n"
"fmla z12.h, z7.h, z1.h[2]\n"
"fmla z16.h, z7.h, z2.h[2]\n"
"fmla z20.h, z7.h, z3.h[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z7.h, z4.h[2]\n"
"fmla z28.h, z7.h, z5.h[2]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z6.h, z0.h[2]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z6.h, z1.h[2]\n"
"fmla z17.h, z6.h, z2.h[2]\n"
"fmla z21.h, z6.h, z3.h[2]\n"
"fmla z25.h, z6.h, z4.h[2]\n"
"fmla z29.h, z6.h, z5.h[2]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z7.h, z0.h[2]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z7.h, z1.h[2]\n"
"fmla z18.h, z7.h, z2.h[2]\n"
"fmla z22.h, z7.h, z3.h[2]\n"
@@ -2766,23 +2769,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z7.h, z0.h[3]\n"
"fmla z12.h, z7.h, z1.h[3]\n"
"fmla z16.h, z7.h, z2.h[3]\n"
"fmla z20.h, z7.h, z3.h[3]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z7.h, z4.h[3]\n"
"fmla z28.h, z7.h, z5.h[3]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z6.h, z0.h[3]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z6.h, z1.h[3]\n"
"fmla z17.h, z6.h, z2.h[3]\n"
"fmla z21.h, z6.h, z3.h[3]\n"
"fmla z25.h, z6.h, z4.h[3]\n"
"fmla z29.h, z6.h, z5.h[3]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z7.h, z0.h[3]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z7.h, z1.h[3]\n"
"fmla z18.h, z7.h, z2.h[3]\n"
"fmla z22.h, z7.h, z3.h[3]\n"
@@ -2797,23 +2800,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z7.h, z0.h[4]\n"
"fmla z12.h, z7.h, z1.h[4]\n"
"fmla z16.h, z7.h, z2.h[4]\n"
"fmla z20.h, z7.h, z3.h[4]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z7.h, z4.h[4]\n"
"fmla z28.h, z7.h, z5.h[4]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z6.h, z0.h[4]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z6.h, z1.h[4]\n"
"fmla z17.h, z6.h, z2.h[4]\n"
"fmla z21.h, z6.h, z3.h[4]\n"
"fmla z25.h, z6.h, z4.h[4]\n"
"fmla z29.h, z6.h, z5.h[4]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z7.h, z0.h[4]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z7.h, z1.h[4]\n"
"fmla z18.h, z7.h, z2.h[4]\n"
"fmla z22.h, z7.h, z3.h[4]\n"
@@ -2828,23 +2831,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z7.h, z0.h[5]\n"
"fmla z12.h, z7.h, z1.h[5]\n"
"fmla z16.h, z7.h, z2.h[5]\n"
"fmla z20.h, z7.h, z3.h[5]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z7.h, z4.h[5]\n"
"fmla z28.h, z7.h, z5.h[5]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z6.h, z0.h[5]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z6.h, z1.h[5]\n"
"fmla z17.h, z6.h, z2.h[5]\n"
"fmla z21.h, z6.h, z3.h[5]\n"
"fmla z25.h, z6.h, z4.h[5]\n"
"fmla z29.h, z6.h, z5.h[5]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z7.h, z0.h[5]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z7.h, z1.h[5]\n"
"fmla z18.h, z7.h, z2.h[5]\n"
"fmla z22.h, z7.h, z3.h[5]\n"
@@ -2859,23 +2862,23 @@ void sve_hybrid_fp16_mla_6x4VL (
"ble 76f\n"
"ld1h { z7.h }, p5/Z, [x10]\n"
"ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.h, z7.h, z0.h[6]\n"
"fmla z12.h, z7.h, z1.h[6]\n"
"fmla z16.h, z7.h, z2.h[6]\n"
"fmla z20.h, z7.h, z3.h[6]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.h, z7.h, z4.h[6]\n"
"fmla z28.h, z7.h, z5.h[6]\n"
- "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.h, z6.h, z0.h[6]\n"
+ "ld1h { z7.h }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.h, z6.h, z1.h[6]\n"
"fmla z17.h, z6.h, z2.h[6]\n"
"fmla z21.h, z6.h, z3.h[6]\n"
"fmla z25.h, z6.h, z4.h[6]\n"
"fmla z29.h, z6.h, z5.h[6]\n"
"ld1h { z6.h }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.h, z7.h, z0.h[6]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.h, z7.h, z1.h[6]\n"
"fmla z18.h, z7.h, z2.h[6]\n"
"fmla z22.h, z7.h, z3.h[6]\n"
@@ -2923,15 +2926,15 @@ void sve_hybrid_fp16_mla_6x4VL (
"cmp x28, x20\n"
"bne 71b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #1\n"
+ "add x26, x9, x20, LSL #1\n"
+ "add x25, x26, x20, LSL #1\n"
"add x24, x25, x20, LSL #1\n"
"add x23, x24, x20, LSL #1\n"
"add x22, x23, x20, LSL #1\n"
- "add x21, x22, x20, LSL #1\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rh { z1.h }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rh { z1.h }, p5/Z, [x21]\n"
"ld1rh { z0.h }, p5/Z, [x20]\n"
"fmin z8.h, p5/M, z8.h, z1.h\n"
"fmin z9.h, p5/M, z9.h, z1.h\n"
@@ -2987,26 +2990,26 @@ void sve_hybrid_fp16_mla_6x4VL (
"st1h { z10.h }, p2, [x9, #2, MUL VL]\n"
"st1h { z11.h }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1h { z12.h }, p4, [x25]\n"
- "st1h { z13.h }, p3, [x25, #1, MUL VL]\n"
- "st1h { z14.h }, p2, [x25, #2, MUL VL]\n"
- "st1h { z15.h }, p1, [x25, #3, MUL VL]\n"
- "st1h { z16.h }, p4, [x24]\n"
- "st1h { z17.h }, p3, [x24, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x24, #2, MUL VL]\n"
- "st1h { z19.h }, p1, [x24, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x23]\n"
- "st1h { z21.h }, p3, [x23, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x23, #2, MUL VL]\n"
- "st1h { z23.h }, p1, [x23, #3, MUL VL]\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z25.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z26.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z27.h }, p1, [x22, #3, MUL VL]\n"
- "st1h { z28.h }, p4, [x21]\n"
- "st1h { z29.h }, p3, [x21, #1, MUL VL]\n"
- "st1h { z30.h }, p2, [x21, #2, MUL VL]\n"
- "st1h { z31.h }, p1, [x21, #3, MUL VL]\n"
+ "st1h { z12.h }, p4, [x26]\n"
+ "st1h { z13.h }, p3, [x26, #1, MUL VL]\n"
+ "st1h { z14.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z15.h }, p1, [x26, #3, MUL VL]\n"
+ "st1h { z16.h }, p4, [x25]\n"
+ "st1h { z17.h }, p3, [x25, #1, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z19.h }, p1, [x25, #3, MUL VL]\n"
+ "st1h { z20.h }, p4, [x24]\n"
+ "st1h { z21.h }, p3, [x24, #1, MUL VL]\n"
+ "st1h { z22.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z23.h }, p1, [x24, #3, MUL VL]\n"
+ "st1h { z24.h }, p4, [x23]\n"
+ "st1h { z25.h }, p3, [x23, #1, MUL VL]\n"
+ "st1h { z26.h }, p2, [x23, #2, MUL VL]\n"
+ "st1h { z27.h }, p1, [x23, #3, MUL VL]\n"
+ "st1h { z28.h }, p4, [x22]\n"
+ "st1h { z29.h }, p3, [x22, #1, MUL VL]\n"
+ "st1h { z30.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z31.h }, p1, [x22, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"dech x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -3023,8 +3026,8 @@ void sve_hybrid_fp16_mla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp
index 880f9d1a27..3e040b6197 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 1> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
index 66481f04f9..2b836659a2 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -102,10 +104,10 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 25f\n"
"beq 13f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -138,8 +140,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -158,14 +160,14 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop
"fmla z8.s, p4/M, z6.s, z0.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z17.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z16.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x26, x26, #0x4\n"
+ "subs x27, x27, #0x1\n"
"fmla z10.s, p4/M, z17.s, z0.s\n"
"fmla z11.s, p4/M, z16.s, z0.s\n"
- "subs x27, x27, #0x1\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
@@ -173,19 +175,19 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"10:" // Height 1: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"fmla z8.s, p4/M, z6.s, z0.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z17.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z16.s }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"fmla z10.s, p4/M, z17.s, z0.s\n"
"fmla z11.s, p4/M, z16.s, z0.s\n"
- "addvl x10, x10, #4\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p4/Z, [x21]\n"
"ld1rw { z16.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z17.s\n"
"fmin z9.s, p4/M, z9.s, z17.s\n"
@@ -207,10 +209,10 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"bgt 2b\n"
"b 74f\n"
"13:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"14:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -223,22 +225,22 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cbz x12, 15f\n"
"ld1w { z8.s }, p4/Z, [x12]\n"
"ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 17f\n"
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x20]\n"
"ld1w { z13.s }, p2/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x20, #2, MUL VL]\n"
@@ -257,8 +259,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -293,8 +295,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z10.s, p4/M, z17.s, z0.s\n"
"fmla z14.s, p4/M, z17.s, z1.s\n"
"fmla z11.s, p4/M, z16.s, z0.s\n"
- "fmla z15.s, p4/M, z16.s, z1.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
+ "fmla z15.s, p4/M, z16.s, z1.s\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
@@ -308,19 +310,19 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z13.s, p4/M, z7.s, z1.s\n"
"ld1w { z16.s }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"fmla z10.s, p4/M, z17.s, z0.s\n"
"fmla z14.s, p4/M, z17.s, z1.s\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, p4/M, z16.s, z0.s\n"
"fmla z15.s, p4/M, z16.s, z1.s\n"
"bne 18b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p4/Z, [x21]\n"
"ld1rw { z16.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z17.s\n"
"fmin z9.s, p4/M, z9.s, z17.s\n"
@@ -344,20 +346,20 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
"24:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 14b\n"
"b 74f\n"
"25:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"26:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -370,27 +372,27 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cbz x12, 27f\n"
"ld1w { z8.s }, p4/Z, [x12]\n"
"ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 29f\n"
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x21]\n"
"ld1w { z13.s }, p2/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x21, #2, MUL VL]\n"
@@ -417,8 +419,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -449,8 +451,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x26, x26, #0x4\n"
"subs x27, x27, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z21.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"add x25, x25, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
@@ -462,11 +464,11 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z18.s, p4/M, z21.s, z2.s\n"
"fmla z11.s, p4/M, z20.s, z0.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
"fmla z15.s, p4/M, z20.s, z1.s\n"
- "fmla z19.s, p4/M, z20.s, z2.s\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
+ "fmla z19.s, p4/M, z20.s, z2.s\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
+ "ld1w { z6.s }, p4/Z, [x10]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Main loop skip
@@ -475,13 +477,13 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z12.s, p4/M, z6.s, z1.s\n"
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z21.s }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"ld1w { z20.s }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"fmla z10.s, p4/M, z21.s, z0.s\n"
"fmla z14.s, p4/M, z21.s, z1.s\n"
"fmla z18.s, p4/M, z21.s, z2.s\n"
@@ -490,12 +492,12 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z19.s, p4/M, z20.s, z2.s\n"
"bne 30b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 35f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z21.s }, p4/Z, [x21]\n"
"ld1rw { z20.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z21.s\n"
"fmin z9.s, p4/M, z9.s, z21.s\n"
@@ -527,24 +529,24 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
"36:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 26b\n"
"b 74f\n"
"37:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"38:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -557,18 +559,18 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cbz x12, 39f\n"
"ld1w { z8.s }, p4/Z, [x12]\n"
"ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -576,13 +578,13 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x22]\n"
"ld1w { z13.s }, p2/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x22, #2, MUL VL]\n"
@@ -617,8 +619,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"42:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 43f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -668,7 +670,6 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z14.s, p4/M, z25.s, z1.s\n"
"fmla z18.s, p4/M, z25.s, z2.s\n"
"fmla z22.s, p4/M, z25.s, z3.s\n"
- "ld1w { z6.s }, p4/Z, [x10]\n"
"fmla z11.s, p4/M, z24.s, z0.s\n"
"fmla z15.s, p4/M, z24.s, z1.s\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -677,6 +678,7 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z23.s, p4/M, z24.s, z3.s\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
+ "ld1w { z6.s }, p4/Z, [x10]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 45b\n"
"46:" // Height 4: Multiply loop: Main loop skip
@@ -687,15 +689,15 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
"ld1w { z25.s }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"ld1w { z24.s }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"fmla z10.s, p4/M, z25.s, z0.s\n"
"fmla z14.s, p4/M, z25.s, z1.s\n"
+ "addvl x10, x10, #4\n"
"fmla z18.s, p4/M, z25.s, z2.s\n"
"fmla z22.s, p4/M, z25.s, z3.s\n"
"fmla z11.s, p4/M, z24.s, z0.s\n"
@@ -704,13 +706,13 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z23.s, p4/M, z24.s, z3.s\n"
"bne 42b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 47f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p4/Z, [x21]\n"
"ld1rw { z24.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z25.s\n"
"fmin z9.s, p4/M, z9.s, z25.s\n"
@@ -750,28 +752,28 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x24]\n"
+ "st1w { z21.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x24, #3, MUL VL]\n"
"48:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 38b\n"
"b 74f\n"
"49:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"50:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -784,18 +786,18 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cbz x12, 51f\n"
"ld1w { z8.s }, p4/Z, [x12]\n"
"ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -807,16 +809,16 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x23]\n"
"ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x22]\n"
@@ -857,8 +859,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"54:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 55f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -901,8 +903,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z29.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"add x23, x23, #0x4\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
@@ -920,12 +922,12 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1w { z6.s }, p4/Z, [x10]\n"
"fmla z15.s, p4/M, z28.s, z1.s\n"
- "fmla z19.s, p4/M, z28.s, z2.s\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
+ "fmla z19.s, p4/M, z28.s, z2.s\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"fmla z23.s, p4/M, z28.s, z3.s\n"
- "fmla z27.s, p4/M, z28.s, z4.s\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
+ "fmla z27.s, p4/M, z28.s, z4.s\n"
"ld1rw { z4.s }, p4/Z, [x22]\n"
"ld1w { z7.s }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 57b\n"
@@ -936,12 +938,12 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
- "fmla z9.s, p4/M, z7.s, z0.s\n"
"ld1w { z29.s }, p4/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
+ "cmp x28, x20\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
"ld1w { z28.s }, p4/Z, [x10, #3, MUL VL]\n"
@@ -958,14 +960,14 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z27.s, p4/M, z28.s, z4.s\n"
"bne 54b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z29.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z29.s }, p4/Z, [x21]\n"
"ld1rw { z28.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z29.s\n"
"fmin z9.s, p4/M, z9.s, z29.s\n"
@@ -1013,22 +1015,22 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x24]\n"
+ "st1w { z21.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x23]\n"
+ "st1w { z25.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x23, #3, MUL VL]\n"
"60:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1036,12 +1038,13 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"b 74f\n"
"61:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"62:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -1054,18 +1057,18 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"cbz x12, 63f\n"
"ld1w { z8.s }, p4/Z, [x12]\n"
"ld1w { z9.s }, p4/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p4/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p4/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1081,17 +1084,17 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x24]\n"
"ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x23]\n"
@@ -1140,8 +1143,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"mov x28, #0x0\n"
"66:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 67f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1229,12 +1232,12 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"fmla z16.s, p4/M, z6.s, z2.s\n"
"fmla z20.s, p4/M, z6.s, z3.s\n"
- "cmp x28, x20\n"
"fmla z24.s, p4/M, z6.s, z4.s\n"
"fmla z28.s, p4/M, z6.s, z5.s\n"
"ld1w { z6.s }, p4/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, p4/M, z7.s, z0.s\n"
"fmla z13.s, p4/M, z7.s, z1.s\n"
+ "cmp x28, x20\n"
"fmla z17.s, p4/M, z7.s, z2.s\n"
"fmla z21.s, p4/M, z7.s, z3.s\n"
"fmla z25.s, p4/M, z7.s, z4.s\n"
@@ -1255,15 +1258,15 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"fmla z31.s, p4/M, z7.s, z5.s\n"
"bne 66b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 71f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p4/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p4/Z, [x21]\n"
"ld1rw { z0.s }, p4/Z, [x20]\n"
"fmin z8.s, p4/M, z8.s, z1.s\n"
"fmin z9.s, p4/M, z9.s, z1.s\n"
@@ -1319,26 +1322,26 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p3, [x25]\n"
- "st1w { z13.s }, p2, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p1, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p0, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p3, [x24]\n"
- "st1w { z17.s }, p2, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p0, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p3, [x23]\n"
- "st1w { z21.s }, p2, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p1, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p0, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p3, [x22]\n"
- "st1w { z25.s }, p2, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p0, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p3, [x21]\n"
- "st1w { z29.s }, p2, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p0, [x21, #3, MUL VL]\n"
+ "st1w { z12.s }, p3, [x26]\n"
+ "st1w { z13.s }, p2, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p1, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p0, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p3, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p0, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p3, [x24]\n"
+ "st1w { z21.s }, p2, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p1, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p0, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p3, [x23]\n"
+ "st1w { z25.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p0, [x23, #3, MUL VL]\n"
+ "st1w { z28.s }, p3, [x22]\n"
+ "st1w { z29.s }, p2, [x22, #1, MUL VL]\n"
+ "st1w { z30.s }, p1, [x22, #2, MUL VL]\n"
+ "st1w { z31.s }, p0, [x22, #3, MUL VL]\n"
"72:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1355,8 +1358,8 @@ void sve_hybrid_fp32_mla_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"74:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
index e1581f2026..99828e8f0c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void sve_hybrid_fp32_mla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void sve_hybrid_fp32_mla_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -102,10 +104,10 @@ void sve_hybrid_fp32_mla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -138,8 +140,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -155,98 +157,98 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1w { z16.s }, p5/Z, [x10]\n"
- "fmla z8.s, z16.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "fmla z8.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[0]\n"
- "ld1w { z16.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z10.s, z16.s, z0.s[0]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "fmla z10.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #4, MUL VL]\n"
"fmla z11.s, z16.s, z0.s[0]\n"
- "ld1w { z16.s }, p5/Z, [x10, #4, MUL VL]\n"
- "fmla z8.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p5/Z, [x10, #5, MUL VL]\n"
+ "fmla z8.s, z17.s, z0.s[1]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #6, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[1]\n"
- "ld1w { z16.s }, p5/Z, [x10, #6, MUL VL]\n"
- "fmla z10.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
+ "fmla z10.s, z17.s, z0.s[1]\n"
"fmla z11.s, z16.s, z0.s[1]\n"
"ld1w { z17.s }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1w { z16.s }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.s, z17.s, z0.s[2]\n"
- "fmla z9.s, z16.s, z0.s[2]\n"
"ld1w { z17.s }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.s, z16.s, z0.s[2]\n"
"ld1w { z16.s }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.s, z17.s, z0.s[2]\n"
- "fmla z11.s, z16.s, z0.s[2]\n"
"ld1w { z17.s }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.s, z16.s, z0.s[2]\n"
"ld1w { z16.s }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.s, z17.s, z0.s[3]\n"
- "fmla z9.s, z16.s, z0.s[3]\n"
"ld1w { z17.s }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.s, z16.s, z0.s[3]\n"
"ld1w { z16.s }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"fmla z10.s, z17.s, z0.s[3]\n"
"fmla z11.s, z16.s, z0.s[3]\n"
- "add x26, x26, #0x10\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1w { z16.s }, p5/Z, [x10]\n"
- "fmla z8.s, z16.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z9.s, z16.s, z0.s[0]\n"
- "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"subs x27, x27, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "fmla z8.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z16.s, z0.s[0]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[0]\n"
"fmla z11.s, z16.s, z0.s[0]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z17.s, z0.s[1]\n"
- "fmla z9.s, z16.s, z0.s[1]\n"
"ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[1]\n"
"fmla z11.s, z16.s, z0.s[1]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z17.s, z0.s[2]\n"
- "fmla z9.s, z16.s, z0.s[2]\n"
"ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z16.s, z0.s[2]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[2]\n"
"fmla z11.s, z16.s, z0.s[2]\n"
- "addvl x10, x10, #4\n"
"ble 11f\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z17.s, z0.s[3]\n"
- "fmla z9.s, z16.s, z0.s[3]\n"
"ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z16.s, z0.s[3]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[3]\n"
"fmla z11.s, z16.s, z0.s[3]\n"
- "addvl x10, x10, #4\n"
"11:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z17.s\n"
"fmin z9.s, p5/M, z9.s, z17.s\n"
@@ -268,10 +270,10 @@ void sve_hybrid_fp32_mla_6x4VL (
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -284,22 +286,22 @@ void sve_hybrid_fp32_mla_6x4VL (
"cbz x12, 16f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z14.d, z10.d\n"
"mov z15.d, z11.d\n"
- "addvl x12, x12, #4\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
@@ -318,8 +320,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -338,38 +340,38 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z1.s }, p0/Z, [x26]\n"
- "ld1rqw { z0.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z1.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqw { z0.s }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"fmla z8.s, z17.s, z1.s[0]\n"
"fmla z12.s, z17.s, z0.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z16.s, z1.s[0]\n"
"fmla z13.s, z16.s, z0.s[0]\n"
- "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
"fmla z10.s, z17.s, z1.s[0]\n"
"fmla z14.s, z17.s, z0.s[0]\n"
"ld1w { z17.s }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x4\n"
"fmla z11.s, z16.s, z1.s[0]\n"
"fmla z15.s, z16.s, z0.s[0]\n"
"ld1w { z16.s }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
"fmla z8.s, z17.s, z1.s[1]\n"
"fmla z12.s, z17.s, z0.s[1]\n"
"ld1w { z17.s }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z9.s, z16.s, z1.s[1]\n"
"fmla z13.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
"fmla z10.s, z17.s, z1.s[1]\n"
"fmla z14.s, z17.s, z0.s[1]\n"
- "ld1w { z17.s }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z11.s, z16.s, z1.s[1]\n"
"fmla z15.s, z16.s, z0.s[1]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1w { z16.s }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.s, z17.s, z1.s[2]\n"
"fmla z12.s, z17.s, z0.s[2]\n"
@@ -396,50 +398,50 @@ void sve_hybrid_fp32_mla_6x4VL (
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
+ "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "ld1rqw { z1.s }, p0/Z, [x25]\n"
"fmla z8.s, z17.s, z0.s[0]\n"
"fmla z12.s, z17.s, z1.s[0]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[0]\n"
"fmla z13.s, z16.s, z1.s[0]\n"
- "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[0]\n"
"fmla z14.s, z17.s, z1.s[0]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z16.s, z0.s[0]\n"
"fmla z15.s, z16.s, z1.s[0]\n"
"ble 24f\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z17.s, z0.s[1]\n"
"fmla z12.s, z17.s, z1.s[1]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[1]\n"
"fmla z13.s, z16.s, z1.s[1]\n"
- "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[1]\n"
"fmla z14.s, z17.s, z1.s[1]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z16.s, z0.s[1]\n"
"fmla z15.s, z16.s, z1.s[1]\n"
"ble 24f\n"
"ld1w { z17.s }, p5/Z, [x10]\n"
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z17.s, z0.s[2]\n"
"fmla z12.s, z17.s, z1.s[2]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[2]\n"
"fmla z13.s, z16.s, z1.s[2]\n"
- "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[2]\n"
"fmla z14.s, z17.s, z1.s[2]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z16.s, z0.s[2]\n"
"fmla z15.s, z16.s, z1.s[2]\n"
"ble 24f\n"
@@ -447,13 +449,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"ld1w { z16.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z17.s, z0.s[3]\n"
"fmla z12.s, z17.s, z1.s[3]\n"
+ "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z16.s, z0.s[3]\n"
"fmla z13.s, z16.s, z1.s[3]\n"
- "ld1w { z17.s }, p5/Z, [x10, #2, MUL VL]\n"
"ld1w { z16.s }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"fmla z10.s, z17.s, z0.s[3]\n"
"fmla z14.s, z17.s, z1.s[3]\n"
- "addvl x10, x10, #4\n"
"fmla z11.s, z16.s, z0.s[3]\n"
"fmla z15.s, z16.s, z1.s[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -462,11 +464,11 @@ void sve_hybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 19b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z17.s\n"
"fmin z9.s, p5/M, z9.s, z17.s\n"
@@ -490,20 +492,20 @@ void sve_hybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -516,27 +518,27 @@ void sve_hybrid_fp32_mla_6x4VL (
"cbz x12, 29f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
@@ -563,8 +565,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -586,37 +588,37 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z21.s }, p5/Z, [x10]\n"
+ "ld1w { z20.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1w { z21.s }, p5/Z, [x10]\n"
+ "add x24, x24, #0x10\n"
"fmla z8.s, z21.s, z2.s[0]\n"
"fmla z12.s, z21.s, z1.s[0]\n"
- "ld1w { z20.s }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z16.s, z21.s, z0.s[0]\n"
"fmla z9.s, z20.s, z2.s[0]\n"
- "ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.s, z20.s, z1.s[0]\n"
+ "fmla z16.s, z21.s, z0.s[0]\n"
+ "ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z17.s, z20.s, z0.s[0]\n"
"ld1w { z20.s }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x4\n"
"fmla z10.s, z21.s, z2.s[0]\n"
"fmla z14.s, z21.s, z1.s[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"fmla z18.s, z21.s, z0.s[0]\n"
- "fmla z11.s, z20.s, z2.s[0]\n"
"ld1w { z21.s }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "fmla z11.s, z20.s, z2.s[0]\n"
"fmla z15.s, z20.s, z1.s[0]\n"
"fmla z19.s, z20.s, z0.s[0]\n"
"ld1w { z20.s }, p5/Z, [x10, #5, MUL VL]\n"
"fmla z8.s, z21.s, z2.s[1]\n"
"fmla z12.s, z21.s, z1.s[1]\n"
"fmla z16.s, z21.s, z0.s[1]\n"
- "fmla z9.s, z20.s, z2.s[1]\n"
"ld1w { z21.s }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.s, z20.s, z2.s[1]\n"
"fmla z13.s, z20.s, z1.s[1]\n"
"fmla z17.s, z20.s, z0.s[1]\n"
"ld1w { z20.s }, p5/Z, [x10, #7, MUL VL]\n"
@@ -625,31 +627,31 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z14.s, z21.s, z1.s[1]\n"
"fmla z18.s, z21.s, z0.s[1]\n"
"fmla z11.s, z20.s, z2.s[1]\n"
- "ld1w { z21.s }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z15.s, z20.s, z1.s[1]\n"
"fmla z19.s, z20.s, z0.s[1]\n"
+ "ld1w { z21.s }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1w { z20.s }, p5/Z, [x10, #-7, MUL VL]\n"
"fmla z8.s, z21.s, z2.s[2]\n"
"fmla z12.s, z21.s, z1.s[2]\n"
"fmla z16.s, z21.s, z0.s[2]\n"
- "fmla z9.s, z20.s, z2.s[2]\n"
"ld1w { z21.s }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.s, z20.s, z2.s[2]\n"
"fmla z13.s, z20.s, z1.s[2]\n"
"fmla z17.s, z20.s, z0.s[2]\n"
"ld1w { z20.s }, p5/Z, [x10, #-5, MUL VL]\n"
"fmla z10.s, z21.s, z2.s[2]\n"
"fmla z14.s, z21.s, z1.s[2]\n"
"fmla z18.s, z21.s, z0.s[2]\n"
- "fmla z11.s, z20.s, z2.s[2]\n"
"ld1w { z21.s }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.s, z20.s, z2.s[2]\n"
"fmla z15.s, z20.s, z1.s[2]\n"
"fmla z19.s, z20.s, z0.s[2]\n"
"ld1w { z20.s }, p5/Z, [x10, #-3, MUL VL]\n"
"fmla z8.s, z21.s, z2.s[3]\n"
"fmla z12.s, z21.s, z1.s[3]\n"
"fmla z16.s, z21.s, z0.s[3]\n"
- "fmla z9.s, z20.s, z2.s[3]\n"
"ld1w { z21.s }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.s, z20.s, z2.s[3]\n"
"fmla z13.s, z20.s, z1.s[3]\n"
"fmla z17.s, z20.s, z0.s[3]\n"
"ld1w { z20.s }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -662,18 +664,18 @@ void sve_hybrid_fp32_mla_6x4VL (
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z21.s }, p5/Z, [x10]\n"
+ "ld1w { z20.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
- "ld1w { z21.s }, p5/Z, [x10]\n"
"fmla z8.s, z21.s, z0.s[0]\n"
"fmla z12.s, z21.s, z1.s[0]\n"
- "ld1w { z20.s }, p5/Z, [x10, #1, MUL VL]\n"
- "fmla z16.s, z21.s, z2.s[0]\n"
"fmla z9.s, z20.s, z0.s[0]\n"
- "ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.s, z20.s, z1.s[0]\n"
+ "fmla z16.s, z21.s, z2.s[0]\n"
+ "ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z17.s, z20.s, z2.s[0]\n"
"ld1w { z20.s }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -686,12 +688,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 37f\n"
"ld1w { z21.s }, p5/Z, [x10]\n"
"ld1w { z20.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z21.s, z0.s[1]\n"
"fmla z12.s, z21.s, z1.s[1]\n"
"fmla z16.s, z21.s, z2.s[1]\n"
- "fmla z9.s, z20.s, z0.s[1]\n"
"ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.s, z20.s, z0.s[1]\n"
"fmla z13.s, z20.s, z1.s[1]\n"
"fmla z17.s, z20.s, z2.s[1]\n"
"ld1w { z20.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -705,12 +707,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 37f\n"
"ld1w { z21.s }, p5/Z, [x10]\n"
"ld1w { z20.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z21.s, z0.s[2]\n"
"fmla z12.s, z21.s, z1.s[2]\n"
"fmla z16.s, z21.s, z2.s[2]\n"
- "fmla z9.s, z20.s, z0.s[2]\n"
"ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
+ "fmla z9.s, z20.s, z0.s[2]\n"
"fmla z13.s, z20.s, z1.s[2]\n"
"fmla z17.s, z20.s, z2.s[2]\n"
"ld1w { z20.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -727,8 +729,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z8.s, z21.s, z0.s[3]\n"
"fmla z12.s, z21.s, z1.s[3]\n"
"fmla z16.s, z21.s, z2.s[3]\n"
- "fmla z9.s, z20.s, z0.s[3]\n"
"ld1w { z21.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z20.s, z0.s[3]\n"
"fmla z13.s, z20.s, z1.s[3]\n"
"fmla z17.s, z20.s, z2.s[3]\n"
"ld1w { z20.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -745,12 +747,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z21.s }, p5/Z, [x21]\n"
"ld1rw { z20.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z21.s\n"
"fmin z9.s, p5/M, z9.s, z21.s\n"
@@ -782,24 +784,24 @@ void sve_hybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -812,18 +814,18 @@ void sve_hybrid_fp32_mla_6x4VL (
"cbz x12, 42f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -831,13 +833,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
@@ -872,8 +874,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -898,25 +900,25 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z25.s }, p5/Z, [x10]\n"
+ "ld1w { z24.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z3.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z2.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
- "ld1w { z25.s }, p5/Z, [x10]\n"
- "ld1w { z24.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"fmla z8.s, z25.s, z3.s[0]\n"
"fmla z12.s, z25.s, z2.s[0]\n"
+ "fmla z9.s, z24.s, z3.s[0]\n"
+ "fmla z13.s, z24.s, z2.s[0]\n"
"fmla z16.s, z25.s, z1.s[0]\n"
"fmla z20.s, z25.s, z0.s[0]\n"
"ld1w { z25.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
- "fmla z9.s, z24.s, z3.s[0]\n"
- "fmla z13.s, z24.s, z2.s[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"fmla z17.s, z24.s, z1.s[0]\n"
"fmla z21.s, z24.s, z0.s[0]\n"
"ld1w { z24.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -945,9 +947,9 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z14.s, z25.s, z2.s[1]\n"
"fmla z18.s, z25.s, z1.s[1]\n"
"fmla z22.s, z25.s, z0.s[1]\n"
- "ld1w { z25.s }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z11.s, z24.s, z3.s[1]\n"
"fmla z15.s, z24.s, z2.s[1]\n"
+ "ld1w { z25.s }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z19.s, z24.s, z1.s[1]\n"
"fmla z23.s, z24.s, z0.s[1]\n"
"ld1w { z24.s }, p5/Z, [x10, #-7, MUL VL]\n"
@@ -992,20 +994,20 @@ void sve_hybrid_fp32_mla_6x4VL (
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z25.s }, p5/Z, [x10]\n"
+ "ld1w { z24.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1rqw { z3.s }, p0/Z, [x23]\n"
- "ld1w { z25.s }, p5/Z, [x10]\n"
- "ld1w { z24.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z25.s, z0.s[0]\n"
"fmla z12.s, z25.s, z1.s[0]\n"
+ "fmla z9.s, z24.s, z0.s[0]\n"
+ "fmla z13.s, z24.s, z1.s[0]\n"
"fmla z16.s, z25.s, z2.s[0]\n"
"fmla z20.s, z25.s, z3.s[0]\n"
"ld1w { z25.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.s, z24.s, z0.s[0]\n"
- "fmla z13.s, z24.s, z1.s[0]\n"
"fmla z17.s, z24.s, z2.s[0]\n"
"fmla z21.s, z24.s, z3.s[0]\n"
"ld1w { z24.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1021,12 +1023,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 50f\n"
"ld1w { z25.s }, p5/Z, [x10]\n"
"ld1w { z24.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z25.s, z0.s[1]\n"
"fmla z12.s, z25.s, z1.s[1]\n"
"fmla z16.s, z25.s, z2.s[1]\n"
"fmla z20.s, z25.s, z3.s[1]\n"
"ld1w { z25.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.s, z24.s, z0.s[1]\n"
"fmla z13.s, z24.s, z1.s[1]\n"
"fmla z17.s, z24.s, z2.s[1]\n"
@@ -1044,12 +1046,12 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 50f\n"
"ld1w { z25.s }, p5/Z, [x10]\n"
"ld1w { z24.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z25.s, z0.s[2]\n"
"fmla z12.s, z25.s, z1.s[2]\n"
"fmla z16.s, z25.s, z2.s[2]\n"
"fmla z20.s, z25.s, z3.s[2]\n"
"ld1w { z25.s }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x1\n"
"fmla z9.s, z24.s, z0.s[2]\n"
"fmla z13.s, z24.s, z1.s[2]\n"
"fmla z17.s, z24.s, z2.s[2]\n"
@@ -1092,13 +1094,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p5/Z, [x21]\n"
"ld1rw { z24.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z25.s\n"
"fmin z9.s, p5/M, z9.s, z25.s\n"
@@ -1138,28 +1140,28 @@ void sve_hybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1172,18 +1174,18 @@ void sve_hybrid_fp32_mla_6x4VL (
"cbz x12, 55f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1195,16 +1197,16 @@ void sve_hybrid_fp32_mla_6x4VL (
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x22]\n"
@@ -1245,8 +1247,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1274,29 +1276,29 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z29.s }, p5/Z, [x10]\n"
+ "ld1w { z28.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z4.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x22]\n"
- "ld1w { z29.s }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.s, z29.s, z4.s[0]\n"
"fmla z12.s, z29.s, z3.s[0]\n"
- "ld1w { z28.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.s, z28.s, z4.s[0]\n"
"fmla z16.s, z29.s, z2.s[0]\n"
"fmla z20.s, z29.s, z1.s[0]\n"
- "add x25, x25, #0x10\n"
"fmla z24.s, z29.s, z0.s[0]\n"
- "fmla z9.s, z28.s, z4.s[0]\n"
- "ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
"fmla z13.s, z28.s, z3.s[0]\n"
+ "ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z17.s, z28.s, z2.s[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z21.s, z28.s, z1.s[0]\n"
"fmla z25.s, z28.s, z0.s[0]\n"
"ld1w { z28.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1305,8 +1307,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z18.s, z29.s, z2.s[0]\n"
"fmla z22.s, z29.s, z1.s[0]\n"
"fmla z26.s, z29.s, z0.s[0]\n"
- "fmla z11.s, z28.s, z4.s[0]\n"
"ld1w { z29.s }, p5/Z, [x10, #4, MUL VL]\n"
+ "fmla z11.s, z28.s, z4.s[0]\n"
"fmla z15.s, z28.s, z3.s[0]\n"
"fmla z19.s, z28.s, z2.s[0]\n"
"fmla z23.s, z28.s, z1.s[0]\n"
@@ -1317,8 +1319,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z16.s, z29.s, z2.s[1]\n"
"fmla z20.s, z29.s, z1.s[1]\n"
"fmla z24.s, z29.s, z0.s[1]\n"
- "fmla z9.s, z28.s, z4.s[1]\n"
"ld1w { z29.s }, p5/Z, [x10, #6, MUL VL]\n"
+ "fmla z9.s, z28.s, z4.s[1]\n"
"fmla z13.s, z28.s, z3.s[1]\n"
"fmla z17.s, z28.s, z2.s[1]\n"
"fmla z21.s, z28.s, z1.s[1]\n"
@@ -1331,8 +1333,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z22.s, z29.s, z1.s[1]\n"
"fmla z26.s, z29.s, z0.s[1]\n"
"fmla z11.s, z28.s, z4.s[1]\n"
- "ld1w { z29.s }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z15.s, z28.s, z3.s[1]\n"
+ "ld1w { z29.s }, p5/Z, [x10, #-8, MUL VL]\n"
"fmla z19.s, z28.s, z2.s[1]\n"
"fmla z23.s, z28.s, z1.s[1]\n"
"fmla z27.s, z28.s, z0.s[1]\n"
@@ -1342,8 +1344,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z16.s, z29.s, z2.s[2]\n"
"fmla z20.s, z29.s, z1.s[2]\n"
"fmla z24.s, z29.s, z0.s[2]\n"
- "fmla z9.s, z28.s, z4.s[2]\n"
"ld1w { z29.s }, p5/Z, [x10, #-6, MUL VL]\n"
+ "fmla z9.s, z28.s, z4.s[2]\n"
"fmla z13.s, z28.s, z3.s[2]\n"
"fmla z17.s, z28.s, z2.s[2]\n"
"fmla z21.s, z28.s, z1.s[2]\n"
@@ -1354,8 +1356,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z18.s, z29.s, z2.s[2]\n"
"fmla z22.s, z29.s, z1.s[2]\n"
"fmla z26.s, z29.s, z0.s[2]\n"
- "fmla z11.s, z28.s, z4.s[2]\n"
"ld1w { z29.s }, p5/Z, [x10, #-4, MUL VL]\n"
+ "fmla z11.s, z28.s, z4.s[2]\n"
"fmla z15.s, z28.s, z3.s[2]\n"
"fmla z19.s, z28.s, z2.s[2]\n"
"fmla z23.s, z28.s, z1.s[2]\n"
@@ -1366,8 +1368,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z16.s, z29.s, z2.s[3]\n"
"fmla z20.s, z29.s, z1.s[3]\n"
"fmla z24.s, z29.s, z0.s[3]\n"
- "fmla z9.s, z28.s, z4.s[3]\n"
"ld1w { z29.s }, p5/Z, [x10, #-2, MUL VL]\n"
+ "fmla z9.s, z28.s, z4.s[3]\n"
"fmla z13.s, z28.s, z3.s[3]\n"
"fmla z17.s, z28.s, z2.s[3]\n"
"fmla z21.s, z28.s, z1.s[3]\n"
@@ -1386,23 +1388,23 @@ void sve_hybrid_fp32_mla_6x4VL (
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z29.s }, p5/Z, [x10]\n"
+ "ld1w { z28.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1rqw { z3.s }, p0/Z, [x23]\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
- "ld1w { z29.s }, p5/Z, [x10]\n"
"fmla z8.s, z29.s, z0.s[0]\n"
"fmla z12.s, z29.s, z1.s[0]\n"
- "ld1w { z28.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "fmla z9.s, z28.s, z0.s[0]\n"
+ "fmla z13.s, z28.s, z1.s[0]\n"
"fmla z16.s, z29.s, z2.s[0]\n"
"fmla z20.s, z29.s, z3.s[0]\n"
"fmla z24.s, z29.s, z4.s[0]\n"
- "fmla z9.s, z28.s, z0.s[0]\n"
- "ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z13.s, z28.s, z1.s[0]\n"
"fmla z17.s, z28.s, z2.s[0]\n"
+ "ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z21.s, z28.s, z3.s[0]\n"
"fmla z25.s, z28.s, z4.s[0]\n"
"ld1w { z28.s }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1420,21 +1422,21 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 63f\n"
"ld1w { z29.s }, p5/Z, [x10]\n"
"ld1w { z28.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z29.s, z0.s[1]\n"
"fmla z12.s, z29.s, z1.s[1]\n"
"fmla z16.s, z29.s, z2.s[1]\n"
"fmla z20.s, z29.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z29.s, z4.s[1]\n"
- "fmla z9.s, z28.s, z0.s[1]\n"
"ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z28.s, z0.s[1]\n"
"fmla z13.s, z28.s, z1.s[1]\n"
"fmla z17.s, z28.s, z2.s[1]\n"
"fmla z21.s, z28.s, z3.s[1]\n"
"fmla z25.s, z28.s, z4.s[1]\n"
"ld1w { z28.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.s, z29.s, z0.s[1]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.s, z29.s, z1.s[1]\n"
"fmla z18.s, z29.s, z2.s[1]\n"
"fmla z22.s, z29.s, z3.s[1]\n"
@@ -1447,21 +1449,21 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 63f\n"
"ld1w { z29.s }, p5/Z, [x10]\n"
"ld1w { z28.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z29.s, z0.s[2]\n"
"fmla z12.s, z29.s, z1.s[2]\n"
"fmla z16.s, z29.s, z2.s[2]\n"
"fmla z20.s, z29.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z29.s, z4.s[2]\n"
- "fmla z9.s, z28.s, z0.s[2]\n"
"ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z28.s, z0.s[2]\n"
"fmla z13.s, z28.s, z1.s[2]\n"
"fmla z17.s, z28.s, z2.s[2]\n"
"fmla z21.s, z28.s, z3.s[2]\n"
"fmla z25.s, z28.s, z4.s[2]\n"
"ld1w { z28.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.s, z29.s, z0.s[2]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.s, z29.s, z1.s[2]\n"
"fmla z18.s, z29.s, z2.s[2]\n"
"fmla z22.s, z29.s, z3.s[2]\n"
@@ -1479,8 +1481,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"fmla z16.s, z29.s, z2.s[3]\n"
"fmla z20.s, z29.s, z3.s[3]\n"
"fmla z24.s, z29.s, z4.s[3]\n"
- "fmla z9.s, z28.s, z0.s[3]\n"
"ld1w { z29.s }, p5/Z, [x10, #2, MUL VL]\n"
+ "fmla z9.s, z28.s, z0.s[3]\n"
"fmla z13.s, z28.s, z1.s[3]\n"
"fmla z17.s, z28.s, z2.s[3]\n"
"fmla z21.s, z28.s, z3.s[3]\n"
@@ -1503,14 +1505,14 @@ void sve_hybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 58b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z29.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z29.s }, p5/Z, [x21]\n"
"ld1rw { z28.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z29.s\n"
"fmin z9.s, p5/M, z9.s, z29.s\n"
@@ -1558,22 +1560,22 @@ void sve_hybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1581,12 +1583,13 @@ void sve_hybrid_fp32_mla_6x4VL (
"b 80f\n"
"66:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1599,18 +1602,18 @@ void sve_hybrid_fp32_mla_6x4VL (
"cbz x12, 68f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "mov z12.d, z8.d\n"
- "mov z13.d, z9.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
- "mov z14.d, z10.d\n"
- "mov z15.d, z11.d\n"
+ "addvl x12, x12, #4\n"
+ "mov z12.d, z8.d\n"
+ "mov z13.d, z9.d\n"
"mov z16.d, z8.d\n"
"mov z17.d, z9.d\n"
- "addvl x12, x12, #4\n"
+ "mov z20.d, z8.d\n"
+ "mov z14.d, z10.d\n"
+ "mov z15.d, z11.d\n"
"mov z18.d, z10.d\n"
"mov z19.d, z11.d\n"
- "mov z20.d, z8.d\n"
"mov z21.d, z9.d\n"
"mov z22.d, z10.d\n"
"mov z23.d, z11.d\n"
@@ -1626,17 +1629,17 @@ void sve_hybrid_fp32_mla_6x4VL (
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x23]\n"
@@ -1685,8 +1688,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"mov x28, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1717,29 +1720,29 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z1.s }, p5/Z, [x10]\n"
+ "ld1w { z0.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z7.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqw { z6.s }, p0/Z, [x25]\n"
- "sub x27, x27, #0x4\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z4.s }, p0/Z, [x23]\n"
- "cmp x27, #0x4\n"
- "add x26, x26, #0x10\n"
"ld1rqw { z3.s }, p0/Z, [x22]\n"
"ld1rqw { z2.s }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1w { z1.s }, p5/Z, [x10]\n"
- "ld1w { z0.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"fmla z8.s, z1.s, z7.s[0]\n"
"fmla z12.s, z1.s, z6.s[0]\n"
+ "add x21, x21, #0x10\n"
"fmla z16.s, z1.s, z5.s[0]\n"
"fmla z20.s, z1.s, z4.s[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z24.s, z1.s, z3.s[0]\n"
"fmla z28.s, z1.s, z2.s[0]\n"
"ld1w { z1.s }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
"fmla z9.s, z0.s, z7.s[0]\n"
"fmla z13.s, z0.s, z6.s[0]\n"
"fmla z17.s, z0.s, z5.s[0]\n"
@@ -1847,24 +1850,24 @@ void sve_hybrid_fp32_mla_6x4VL (
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1w { z7.s }, p5/Z, [x10]\n"
+ "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"ld1rqw { z0.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "subs x27, x27, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1rqw { z3.s }, p0/Z, [x23]\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
"ld1rqw { z5.s }, p0/Z, [x21]\n"
- "ld1w { z7.s }, p5/Z, [x10]\n"
- "ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
"fmla z8.s, z7.s, z0.s[0]\n"
"fmla z12.s, z7.s, z1.s[0]\n"
+ "fmla z9.s, z6.s, z0.s[0]\n"
+ "fmla z13.s, z6.s, z1.s[0]\n"
"fmla z16.s, z7.s, z2.s[0]\n"
"fmla z20.s, z7.s, z3.s[0]\n"
"fmla z24.s, z7.s, z4.s[0]\n"
"fmla z28.s, z7.s, z5.s[0]\n"
"ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
- "fmla z9.s, z6.s, z0.s[0]\n"
- "fmla z13.s, z6.s, z1.s[0]\n"
"fmla z17.s, z6.s, z2.s[0]\n"
"fmla z21.s, z6.s, z3.s[0]\n"
"fmla z25.s, z6.s, z4.s[0]\n"
@@ -1886,23 +1889,23 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 76f\n"
"ld1w { z7.s }, p5/Z, [x10]\n"
"ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z7.s, z0.s[1]\n"
"fmla z12.s, z7.s, z1.s[1]\n"
"fmla z16.s, z7.s, z2.s[1]\n"
"fmla z20.s, z7.s, z3.s[1]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z7.s, z4.s[1]\n"
"fmla z28.s, z7.s, z5.s[1]\n"
- "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z6.s, z0.s[1]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.s, z6.s, z1.s[1]\n"
"fmla z17.s, z6.s, z2.s[1]\n"
"fmla z21.s, z6.s, z3.s[1]\n"
"fmla z25.s, z6.s, z4.s[1]\n"
"fmla z29.s, z6.s, z5.s[1]\n"
"ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.s, z7.s, z0.s[1]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.s, z7.s, z1.s[1]\n"
"fmla z18.s, z7.s, z2.s[1]\n"
"fmla z22.s, z7.s, z3.s[1]\n"
@@ -1917,23 +1920,23 @@ void sve_hybrid_fp32_mla_6x4VL (
"ble 76f\n"
"ld1w { z7.s }, p5/Z, [x10]\n"
"ld1w { z6.s }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x1\n"
"fmla z8.s, z7.s, z0.s[2]\n"
"fmla z12.s, z7.s, z1.s[2]\n"
"fmla z16.s, z7.s, z2.s[2]\n"
"fmla z20.s, z7.s, z3.s[2]\n"
- "subs x27, x27, #0x1\n"
"fmla z24.s, z7.s, z4.s[2]\n"
"fmla z28.s, z7.s, z5.s[2]\n"
- "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z9.s, z6.s, z0.s[2]\n"
+ "ld1w { z7.s }, p5/Z, [x10, #2, MUL VL]\n"
"fmla z13.s, z6.s, z1.s[2]\n"
"fmla z17.s, z6.s, z2.s[2]\n"
"fmla z21.s, z6.s, z3.s[2]\n"
"fmla z25.s, z6.s, z4.s[2]\n"
"fmla z29.s, z6.s, z5.s[2]\n"
"ld1w { z6.s }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"fmla z10.s, z7.s, z0.s[2]\n"
+ "addvl x10, x10, #4\n"
"fmla z14.s, z7.s, z1.s[2]\n"
"fmla z18.s, z7.s, z2.s[2]\n"
"fmla z22.s, z7.s, z3.s[2]\n"
@@ -1981,15 +1984,15 @@ void sve_hybrid_fp32_mla_6x4VL (
"cmp x28, x20\n"
"bne 71b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x21]\n"
"ld1rw { z0.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z1.s\n"
"fmin z9.s, p5/M, z9.s, z1.s\n"
@@ -2045,26 +2048,26 @@ void sve_hybrid_fp32_mla_6x4VL (
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z12.s }, p4, [x25]\n"
- "st1w { z13.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z14.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z15.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z20.s }, p4, [x23]\n"
- "st1w { z21.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z22.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z23.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z28.s }, p4, [x21]\n"
- "st1w { z29.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z30.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z31.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z12.s }, p4, [x26]\n"
+ "st1w { z13.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z14.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z15.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z20.s }, p4, [x24]\n"
+ "st1w { z21.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z22.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z23.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z28.s }, p4, [x22]\n"
+ "st1w { z29.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z31.s }, p1, [x22, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -2081,8 +2084,8 @@ void sve_hybrid_fp32_mla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp
index a353c9d660..5073e9cd7a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 8, 1, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 1, 1> transforms = {};
// Default to the generic kernel
kern_type kernel=sve_hybrid_fp32_mla_8x1VL;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp
index 344341205b..ea3d266ec1 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -105,10 +107,10 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"cmp %x[M], #0x2\n"
"bgt 25f\n"
"beq 13f\n"
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
@@ -126,8 +128,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -146,22 +148,22 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"addvl x12, x12, #1\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
"ld1rw { z0.s }, p1/Z, [x28]\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
- "addvl x12, x12, #1\n"
"bne 6b\n"
"tbz %x[flags], #1, 11f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmax z24.s, p1/M, z24.s, z16.s\n"
@@ -174,23 +176,23 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"bgt 2b\n"
"b 98f\n"
"13:" // Height 2
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"14:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
"cbz x14, 15f\n"
"ld1w { z24.s }, p1/Z, [x14]\n"
- "mov z25.d, z24.d\n"
"addvl x14, x14, #1\n"
+ "mov z25.d, z24.d\n"
"b 17f\n"
"15:" // Height 2: no bias
"tbz %x[flags], #0, 16f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x11, x20, LSL #2\n"
"ld1w { z24.s }, p0/Z, [x11]\n"
+ "add x20, x11, x20, LSL #2\n"
"ld1w { z25.s }, p0/Z, [x20]\n"
"b 17f\n"
"16:" // Height 2: no accumulate
@@ -200,8 +202,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -224,28 +226,28 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"add x27, x27, #0x4\n"
- "fmla z25.s, p1/M, z16.s, z1.s\n"
"addvl x12, x12, #1\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
"ld1rw { z0.s }, p1/Z, [x28]\n"
+ "fmla z25.s, p1/M, z16.s, z1.s\n"
"ld1rw { z1.s }, p1/Z, [x27]\n"
"bgt 21b\n"
"22:" // Height 2: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z1.s\n"
- "addvl x12, x12, #1\n"
"bne 18b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
"tbz %x[flags], #1, 23f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
@@ -254,33 +256,33 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"23:" // Height 2: No activation
"st1w { z24.s }, p0, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
+ "st1w { z25.s }, p0, [x28]\n"
"24:" // Height 2: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 14b\n"
"b 98f\n"
"25:" // Height 3
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"26:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
"cbz x14, 27f\n"
"ld1w { z24.s }, p1/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"b 29f\n"
"27:" // Height 3: no bias
"tbz %x[flags], #0, 28f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x11, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p0/Z, [x11]\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
+ "add x20, x11, x21, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
"ld1w { z26.s }, p0/Z, [x20]\n"
"b 29f\n"
"28:" // Height 3: no accumulate
@@ -291,8 +293,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -319,13 +321,13 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"add x27, x27, #0x4\n"
"add x26, x26, #0x4\n"
- "fmla z25.s, p1/M, z16.s, z1.s\n"
- "fmla z26.s, p1/M, z16.s, z2.s\n"
"addvl x12, x12, #1\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
"ld1rw { z0.s }, p1/Z, [x28]\n"
+ "fmla z25.s, p1/M, z16.s, z1.s\n"
+ "fmla z26.s, p1/M, z16.s, z2.s\n"
"ld1rw { z1.s }, p1/Z, [x27]\n"
"ld1rw { z2.s }, p1/Z, [x26]\n"
"bgt 33b\n"
@@ -333,19 +335,19 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z1.s\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, p1/M, z16.s, z2.s\n"
"bne 30b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"tbz %x[flags], #1, 35f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
@@ -356,37 +358,37 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"35:" // Height 3: No activation
"st1w { z24.s }, p0, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
+ "st1w { z25.s }, p0, [x28]\n"
+ "st1w { z26.s }, p0, [x27]\n"
"36:" // Height 3: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 26b\n"
"b 98f\n"
"37:" // Height 4
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"38:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
"cbz x14, 39f\n"
"ld1w { z24.s }, p1/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"b 41f\n"
"39:" // Height 4: no bias
"tbz %x[flags], #0, 40f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x11, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p0/Z, [x11]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x22]\n"
- "ld1w { z26.s }, p0/Z, [x21]\n"
+ "add x20, x11, x21, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
"ld1w { z27.s }, p0/Z, [x20]\n"
"b 41f\n"
"40:" // Height 4: no accumulate
@@ -398,8 +400,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"42:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 43f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -430,38 +432,38 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"add x27, x27, #0x4\n"
"add x26, x26, #0x4\n"
- "fmla z25.s, p1/M, z16.s, z1.s\n"
- "fmla z26.s, p1/M, z16.s, z2.s\n"
"add x25, x25, #0x4\n"
- "fmla z27.s, p1/M, z16.s, z3.s\n"
"addvl x12, x12, #1\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
"ld1rw { z0.s }, p1/Z, [x28]\n"
+ "fmla z25.s, p1/M, z16.s, z1.s\n"
"ld1rw { z1.s }, p1/Z, [x27]\n"
+ "fmla z26.s, p1/M, z16.s, z2.s\n"
"ld1rw { z2.s }, p1/Z, [x26]\n"
+ "fmla z27.s, p1/M, z16.s, z3.s\n"
"ld1rw { z3.s }, p1/Z, [x25]\n"
"bgt 45b\n"
"46:" // Height 4: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z1.s\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, p1/M, z16.s, z2.s\n"
"fmla z27.s, p1/M, z16.s, z3.s\n"
"bne 42b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 47f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
@@ -474,41 +476,41 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"47:" // Height 4: No activation
"st1w { z24.s }, p0, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
+ "st1w { z25.s }, p0, [x28]\n"
+ "st1w { z26.s }, p0, [x27]\n"
+ "st1w { z27.s }, p0, [x26]\n"
"48:" // Height 4: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 38b\n"
"b 98f\n"
"49:" // Height 5
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"50:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
"cbz x14, 51f\n"
"ld1w { z24.s }, p1/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"b 53f\n"
"51:" // Height 5: no bias
"tbz %x[flags], #0, 52f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x11, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p0/Z, [x11]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x23]\n"
- "ld1w { z26.s }, p0/Z, [x22]\n"
- "ld1w { z27.s }, p0/Z, [x21]\n"
+ "add x20, x11, x21, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
"ld1w { z28.s }, p0/Z, [x20]\n"
"b 53f\n"
"52:" // Height 5: no accumulate
@@ -521,8 +523,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"54:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 55f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -557,19 +559,19 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"add x27, x27, #0x4\n"
"add x26, x26, #0x4\n"
- "fmla z25.s, p1/M, z16.s, z1.s\n"
- "fmla z26.s, p1/M, z16.s, z2.s\n"
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
- "fmla z27.s, p1/M, z16.s, z3.s\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
"addvl x12, x12, #1\n"
- "fmla z28.s, p1/M, z16.s, z4.s\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
+ "ld1rw { z0.s }, p1/Z, [x28]\n"
+ "fmla z25.s, p1/M, z16.s, z1.s\n"
+ "fmla z26.s, p1/M, z16.s, z2.s\n"
+ "fmla z27.s, p1/M, z16.s, z3.s\n"
"ld1rw { z1.s }, p1/Z, [x27]\n"
"ld1rw { z2.s }, p1/Z, [x26]\n"
+ "fmla z28.s, p1/M, z16.s, z4.s\n"
"ld1rw { z3.s }, p1/Z, [x25]\n"
"ld1rw { z4.s }, p1/Z, [x24]\n"
"bgt 57b\n"
@@ -577,23 +579,23 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z1.s\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, p1/M, z16.s, z2.s\n"
"fmla z27.s, p1/M, z16.s, z3.s\n"
"fmla z28.s, p1/M, z16.s, z4.s\n"
"bne 54b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"tbz %x[flags], #1, 59f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
@@ -608,44 +610,44 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"59:" // Height 5: No activation
"st1w { z24.s }, p0, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
+ "st1w { z25.s }, p0, [x28]\n"
+ "st1w { z26.s }, p0, [x27]\n"
+ "st1w { z27.s }, p0, [x26]\n"
+ "st1w { z28.s }, p0, [x25]\n"
"60:" // Height 5: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 50b\n"
"b 98f\n"
"61:" // Height 6
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"62:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
"cbz x14, 63f\n"
"ld1w { z24.s }, p1/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
"b 65f\n"
"63:" // Height 6: no bias
"tbz %x[flags], #0, 64f\n"
- "ldr x24, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x11, x24, LSL #2\n"
- "add x20, x23, x24, LSL #2\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p0/Z, [x11]\n"
- "add x22, x20, x24, LSL #2\n"
- "add x21, x22, x24, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x23]\n"
+ "add x20, x11, x22, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, x22, LSL #2\n"
"ld1w { z26.s }, p0/Z, [x20]\n"
- "add x20, x21, x24, LSL #2\n"
- "ld1w { z27.s }, p0/Z, [x22]\n"
+ "add x20, x20, x22, LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
+ "add x21, x20, x22, LSL #2\n"
+ "add x20, x21, x22, LSL #2\n"
"ld1w { z28.s }, p0/Z, [x21]\n"
"ld1w { z29.s }, p0/Z, [x20]\n"
"b 65f\n"
@@ -660,8 +662,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"66:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 67f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -700,48 +702,48 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"add x27, x27, #0x4\n"
"add x26, x26, #0x4\n"
- "fmla z25.s, p1/M, z16.s, z1.s\n"
- "fmla z26.s, p1/M, z16.s, z2.s\n"
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
- "fmla z27.s, p1/M, z16.s, z3.s\n"
- "fmla z28.s, p1/M, z16.s, z4.s\n"
"add x23, x23, #0x4\n"
"addvl x12, x12, #1\n"
- "fmla z29.s, p1/M, z16.s, z5.s\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
"ld1rw { z0.s }, p1/Z, [x28]\n"
+ "fmla z25.s, p1/M, z16.s, z1.s\n"
"ld1rw { z1.s }, p1/Z, [x27]\n"
+ "fmla z26.s, p1/M, z16.s, z2.s\n"
"ld1rw { z2.s }, p1/Z, [x26]\n"
+ "fmla z27.s, p1/M, z16.s, z3.s\n"
+ "fmla z28.s, p1/M, z16.s, z4.s\n"
"ld1rw { z3.s }, p1/Z, [x25]\n"
"ld1rw { z4.s }, p1/Z, [x24]\n"
+ "fmla z29.s, p1/M, z16.s, z5.s\n"
"ld1rw { z5.s }, p1/Z, [x23]\n"
"bgt 69b\n"
"70:" // Height 6: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z1.s\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, p1/M, z16.s, z2.s\n"
"fmla z27.s, p1/M, z16.s, z3.s\n"
"fmla z28.s, p1/M, z16.s, z4.s\n"
"fmla z29.s, p1/M, z16.s, z5.s\n"
"bne 66b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 71f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
@@ -758,29 +760,29 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"71:" // Height 6: No activation
"st1w { z24.s }, p0, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x23]\n"
+ "st1w { z25.s }, p0, [x28]\n"
+ "st1w { z26.s }, p0, [x27]\n"
+ "st1w { z27.s }, p0, [x26]\n"
+ "st1w { z28.s }, p0, [x25]\n"
+ "st1w { z29.s }, p0, [x24]\n"
"72:" // Height 6: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 62b\n"
"b 98f\n"
"73:" // Height 7
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"74:" // Height 7: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
"cbz x14, 75f\n"
"ld1w { z24.s }, p1/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -788,17 +790,17 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"b 77f\n"
"75:" // Height 7: no bias
"tbz %x[flags], #0, 76f\n"
- "ldr x24, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x11, x24, LSL #2\n"
- "add x20, x21, x24, LSL #2\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p0/Z, [x11]\n"
- "add x23, x20, x24, LSL #2\n"
- "add x22, x23, x24, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x21]\n"
+ "add x20, x11, x23, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, x23, LSL #2\n"
"ld1w { z26.s }, p0/Z, [x20]\n"
- "add x21, x22, x24, LSL #2\n"
- "add x20, x21, x24, LSL #2\n"
- "ld1w { z27.s }, p0/Z, [x23]\n"
+ "add x20, x20, x23, LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
+ "add x22, x20, x23, LSL #2\n"
+ "add x21, x22, x23, LSL #2\n"
+ "add x20, x21, x23, LSL #2\n"
"ld1w { z28.s }, p0/Z, [x22]\n"
"ld1w { z29.s }, p0/Z, [x21]\n"
"ld1w { z30.s }, p0/Z, [x20]\n"
@@ -815,8 +817,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"78:" // Height 7: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 79f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -859,25 +861,25 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"add x27, x27, #0x4\n"
"add x26, x26, #0x4\n"
- "fmla z25.s, p1/M, z16.s, z1.s\n"
- "fmla z26.s, p1/M, z16.s, z2.s\n"
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
- "fmla z27.s, p1/M, z16.s, z3.s\n"
- "ld1rw { z0.s }, p1/Z, [x28]\n"
"add x23, x23, #0x4\n"
"add x22, x22, #0x4\n"
- "fmla z28.s, p1/M, z16.s, z4.s\n"
- "fmla z29.s, p1/M, z16.s, z5.s\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
+ "ld1rw { z0.s }, p1/Z, [x28]\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, p1/M, z16.s, z1.s\n"
+ "fmla z26.s, p1/M, z16.s, z2.s\n"
"ld1rw { z1.s }, p1/Z, [x27]\n"
- "fmla z30.s, p1/M, z16.s, z6.s\n"
+ "fmla z27.s, p1/M, z16.s, z3.s\n"
"ld1rw { z2.s }, p1/Z, [x26]\n"
+ "fmla z28.s, p1/M, z16.s, z4.s\n"
"ld1rw { z3.s }, p1/Z, [x25]\n"
+ "fmla z29.s, p1/M, z16.s, z5.s\n"
"ld1rw { z4.s }, p1/Z, [x24]\n"
+ "fmla z30.s, p1/M, z16.s, z6.s\n"
"ld1rw { z5.s }, p1/Z, [x23]\n"
"ld1rw { z6.s }, p1/Z, [x22]\n"
"bgt 81b\n"
@@ -885,10 +887,10 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z1.s\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, p1/M, z16.s, z2.s\n"
"fmla z27.s, p1/M, z16.s, z3.s\n"
"fmla z28.s, p1/M, z16.s, z4.s\n"
@@ -896,16 +898,16 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmla z30.s, p1/M, z16.s, z6.s\n"
"bne 78b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 83f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
@@ -924,12 +926,12 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"83:" // Height 7: No activation
"st1w { z24.s }, p0, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x22]\n"
+ "st1w { z25.s }, p0, [x28]\n"
+ "st1w { z26.s }, p0, [x27]\n"
+ "st1w { z27.s }, p0, [x26]\n"
+ "st1w { z28.s }, p0, [x25]\n"
+ "st1w { z29.s }, p0, [x24]\n"
+ "st1w { z30.s }, p0, [x23]\n"
"84:" // Height 7: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
@@ -937,20 +939,21 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"b 98f\n"
"85:" // Height 8
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x20\n"
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x11\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"86:" // Height 8: Column loop
"mov x20, #0x0\n"
"whilelt p0.s, x20, x13\n"
"cbz x14, 87f\n"
"ld1w { z24.s }, p1/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -959,20 +962,20 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"b 89f\n"
"87:" // Height 8: no bias
"tbz %x[flags], #0, 88f\n"
- "ldr x24, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x11, x24, LSL #2\n"
- "add x21, x22, x24, LSL #2\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p0/Z, [x11]\n"
- "add x23, x21, x24, LSL #2\n"
- "add x20, x23, x24, LSL #2\n"
- "ld1w { z25.s }, p0/Z, [x22]\n"
- "ld1w { z26.s }, p0/Z, [x21]\n"
- "add x22, x20, x24, LSL #2\n"
- "add x21, x22, x24, LSL #2\n"
- "ld1w { z27.s }, p0/Z, [x23]\n"
- "ld1w { z28.s }, p0/Z, [x20]\n"
- "add x20, x21, x24, LSL #2\n"
- "ld1w { z29.s }, p0/Z, [x22]\n"
+ "add x20, x11, x23, LSL #2\n"
+ "ld1w { z25.s }, p0/Z, [x20]\n"
+ "add x20, x20, x23, LSL #2\n"
+ "ld1w { z26.s }, p0/Z, [x20]\n"
+ "add x20, x20, x23, LSL #2\n"
+ "ld1w { z27.s }, p0/Z, [x20]\n"
+ "add x22, x20, x23, LSL #2\n"
+ "add x20, x22, x23, LSL #2\n"
+ "add x21, x20, x23, LSL #2\n"
+ "ld1w { z28.s }, p0/Z, [x22]\n"
+ "ld1w { z29.s }, p0/Z, [x20]\n"
+ "add x20, x21, x23, LSL #2\n"
"ld1w { z30.s }, p0/Z, [x21]\n"
"ld1w { z31.s }, p0/Z, [x20]\n"
"b 89f\n"
@@ -989,8 +992,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"mov x10, #0x0\n"
"90:" // Height 8: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 91f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1037,28 +1040,28 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x28, x28, #0x4\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, p1/M, z16.s, z0.s\n"
"add x27, x27, #0x4\n"
"add x26, x26, #0x4\n"
- "fmla z25.s, p1/M, z16.s, z1.s\n"
- "fmla z26.s, p1/M, z16.s, z2.s\n"
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
- "fmla z27.s, p1/M, z16.s, z3.s\n"
- "fmla z28.s, p1/M, z16.s, z4.s\n"
"add x23, x23, #0x4\n"
"add x22, x22, #0x4\n"
- "fmla z29.s, p1/M, z16.s, z5.s\n"
+ "fmla z24.s, p1/M, z16.s, z0.s\n"
"ld1rw { z0.s }, p1/Z, [x28]\n"
"add x21, x21, #0x4\n"
"addvl x12, x12, #1\n"
+ "fmla z25.s, p1/M, z16.s, z1.s\n"
"ld1rw { z1.s }, p1/Z, [x27]\n"
- "fmla z30.s, p1/M, z16.s, z6.s\n"
- "fmla z31.s, p1/M, z16.s, z7.s\n"
+ "fmla z26.s, p1/M, z16.s, z2.s\n"
"ld1rw { z2.s }, p1/Z, [x26]\n"
+ "fmla z27.s, p1/M, z16.s, z3.s\n"
"ld1rw { z3.s }, p1/Z, [x25]\n"
+ "fmla z28.s, p1/M, z16.s, z4.s\n"
"ld1rw { z4.s }, p1/Z, [x24]\n"
+ "fmla z29.s, p1/M, z16.s, z5.s\n"
"ld1rw { z5.s }, p1/Z, [x23]\n"
+ "fmla z30.s, p1/M, z16.s, z6.s\n"
+ "fmla z31.s, p1/M, z16.s, z7.s\n"
"ld1rw { z6.s }, p1/Z, [x22]\n"
"ld1rw { z7.s }, p1/Z, [x21]\n"
"bgt 93b\n"
@@ -1066,10 +1069,10 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"ld1w { z16.s }, p1/Z, [x12]\n"
"add x10, x10, #0x1\n"
+ "addvl x12, x12, #1\n"
"cmp x10, x20\n"
"fmla z24.s, p1/M, z16.s, z0.s\n"
"fmla z25.s, p1/M, z16.s, z1.s\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, p1/M, z16.s, z2.s\n"
"fmla z27.s, p1/M, z16.s, z3.s\n"
"fmla z28.s, p1/M, z16.s, z4.s\n"
@@ -1078,17 +1081,17 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"fmla z31.s, p1/M, z16.s, z7.s\n"
"bne 90b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 95f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p1/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p1/Z, [x21]\n"
"ld1rw { z16.s }, p1/Z, [x20]\n"
"fmin z24.s, p1/M, z24.s, z17.s\n"
"fmin z25.s, p1/M, z25.s, z17.s\n"
@@ -1109,13 +1112,13 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"95:" // Height 8: No activation
"st1w { z24.s }, p0, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p0, [x27]\n"
- "st1w { z26.s }, p0, [x26]\n"
- "st1w { z27.s }, p0, [x25]\n"
- "st1w { z28.s }, p0, [x24]\n"
- "st1w { z29.s }, p0, [x23]\n"
- "st1w { z30.s }, p0, [x22]\n"
- "st1w { z31.s }, p0, [x21]\n"
+ "st1w { z25.s }, p0, [x28]\n"
+ "st1w { z26.s }, p0, [x27]\n"
+ "st1w { z27.s }, p0, [x26]\n"
+ "st1w { z28.s }, p0, [x25]\n"
+ "st1w { z29.s }, p0, [x24]\n"
+ "st1w { z30.s }, p0, [x23]\n"
+ "st1w { z31.s }, p0, [x22]\n"
"96:" // Height 8: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
@@ -1132,8 +1135,8 @@ void sve_hybrid_fp32_mla_8x1VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"98:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp
index 161c85e5f3..edfb3c0828 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,19 @@ void sve_hybrid_fp32_mla_8x1VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -79,6 +80,7 @@ void sve_hybrid_fp32_mla_8x1VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -105,10 +107,10 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
@@ -126,8 +128,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -143,50 +145,50 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
- "fmla z24.s, z16.s, z0.s[0]\n"
- "ld1w { z16.s }, p2/Z, [x12, #1, MUL VL]\n"
- "fmla z24.s, z16.s, z0.s[1]\n"
- "ld1w { z16.s }, p2/Z, [x12, #2, MUL VL]\n"
- "fmla z24.s, z16.s, z0.s[2]\n"
- "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
"sub x9, x9, #0x4\n"
+ "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
"cmp x9, #0x4\n"
- "fmla z24.s, z16.s, z0.s[3]\n"
- "add x28, x28, #0x10\n"
"addvl x12, x12, #4\n"
+ "ld1rqw { z0.s }, p0/Z, [x28]\n"
+ "add x28, x28, #0x10\n"
+ "fmla z24.s, z19.s, z0.s[0]\n"
+ "fmla z24.s, z18.s, z0.s[1]\n"
+ "fmla z24.s, z17.s, z0.s[2]\n"
+ "fmla z24.s, z16.s, z0.s[3]\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
- "ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, z16.s, z0.s[0]\n"
"addvl x12, x12, #1\n"
+ "ld1rqw { z0.s }, p0/Z, [x28]\n"
+ "fmla z24.s, z16.s, z0.s[0]\n"
"ble 11f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, z16.s, z0.s[1]\n"
"addvl x12, x12, #1\n"
+ "fmla z24.s, z16.s, z0.s[1]\n"
"ble 11f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
- "fmla z24.s, z16.s, z0.s[2]\n"
"addvl x12, x12, #1\n"
+ "fmla z24.s, z16.s, z0.s[2]\n"
"ble 11f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
- "fmla z24.s, z16.s, z0.s[3]\n"
"addvl x12, x12, #1\n"
+ "fmla z24.s, z16.s, z0.s[3]\n"
"11:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x10, x10, #0x1\n"
"cmp x10, x20\n"
"bne 6b\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmax z24.s, p2/M, z24.s, z16.s\n"
@@ -199,23 +201,23 @@ void sve_hybrid_fp32_mla_8x1VL (
"bgt 2b\n"
"b 106f\n"
"14:" // Height 2
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
"cbz x14, 16f\n"
"ld1w { z24.s }, p2/Z, [x14]\n"
- "mov z25.d, z24.d\n"
"addvl x14, x14, #1\n"
+ "mov z25.d, z24.d\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x11, x20, LSL #2\n"
"ld1w { z24.s }, p1/Z, [x11]\n"
+ "add x20, x11, x20, LSL #2\n"
"ld1w { z25.s }, p1/Z, [x20]\n"
"b 18f\n"
"17:" // Height 2: no accumulate
@@ -225,8 +227,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -245,63 +247,63 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
- "ld1rqw { z1.s }, p0/Z, [x28]\n"
- "ld1rqw { z0.s }, p0/Z, [x27]\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
"sub x9, x9, #0x4\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
- "fmla z24.s, z16.s, z1.s[0]\n"
- "fmla z25.s, z16.s, z0.s[0]\n"
- "ld1w { z16.s }, p2/Z, [x12, #1, MUL VL]\n"
- "fmla z24.s, z16.s, z1.s[1]\n"
- "fmla z25.s, z16.s, z0.s[1]\n"
"ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
"ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
- "fmla z24.s, z17.s, z1.s[2]\n"
- "fmla z25.s, z17.s, z0.s[2]\n"
"cmp x9, #0x4\n"
+ "addvl x12, x12, #4\n"
+ "ld1rqw { z1.s }, p0/Z, [x28]\n"
"add x28, x28, #0x10\n"
+ "ld1rqw { z0.s }, p0/Z, [x27]\n"
+ "add x27, x27, #0x10\n"
+ "fmla z24.s, z19.s, z1.s[0]\n"
+ "fmla z25.s, z19.s, z0.s[0]\n"
+ "fmla z24.s, z18.s, z1.s[1]\n"
+ "fmla z25.s, z18.s, z0.s[1]\n"
+ "fmla z24.s, z17.s, z1.s[2]\n"
+ "fmla z25.s, z17.s, z0.s[2]\n"
"fmla z24.s, z16.s, z1.s[3]\n"
"fmla z25.s, z16.s, z0.s[3]\n"
- "add x27, x27, #0x10\n"
- "addvl x12, x12, #4\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z16.s }, p2/Z, [x12]\n"
+ "subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
"fmla z24.s, z16.s, z0.s[0]\n"
"fmla z25.s, z16.s, z1.s[0]\n"
- "addvl x12, x12, #1\n"
"ble 24f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[1]\n"
"fmla z25.s, z16.s, z1.s[1]\n"
- "addvl x12, x12, #1\n"
"ble 24f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[2]\n"
"fmla z25.s, z16.s, z1.s[2]\n"
- "addvl x12, x12, #1\n"
"ble 24f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[3]\n"
"fmla z25.s, z16.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"24:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x10, x10, #0x1\n"
"cmp x10, x20\n"
"bne 19b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmin z25.s, p2/M, z25.s, z17.s\n"
@@ -310,33 +312,33 @@ void sve_hybrid_fp32_mla_8x1VL (
"25:" // Height 2: No activation
"st1w { z24.s }, p1, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
+ "st1w { z25.s }, p1, [x28]\n"
"26:" // Height 2: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 15b\n"
"b 106f\n"
"27:" // Height 3
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
"cbz x14, 29f\n"
"ld1w { z24.s }, p2/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"b 31f\n"
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x11, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p1/Z, [x11]\n"
- "ld1w { z25.s }, p1/Z, [x21]\n"
+ "add x20, x11, x21, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
"ld1w { z26.s }, p1/Z, [x20]\n"
"b 31f\n"
"30:" // Height 3: no accumulate
@@ -347,8 +349,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -370,62 +372,62 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
- "ld1rqw { z2.s }, p0/Z, [x28]\n"
- "ld1rqw { z1.s }, p0/Z, [x27]\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
"sub x9, x9, #0x4\n"
- "ld1rqw { z0.s }, p0/Z, [x26]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
- "fmla z24.s, z16.s, z2.s[0]\n"
- "fmla z25.s, z16.s, z1.s[0]\n"
- "fmla z26.s, z16.s, z0.s[0]\n"
- "ld1w { z16.s }, p2/Z, [x12, #1, MUL VL]\n"
- "fmla z24.s, z16.s, z2.s[1]\n"
"ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
- "fmla z25.s, z16.s, z1.s[1]\n"
- "fmla z26.s, z16.s, z0.s[1]\n"
"ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
"cmp x9, #0x4\n"
- "fmla z24.s, z17.s, z2.s[2]\n"
- "fmla z25.s, z17.s, z1.s[2]\n"
+ "addvl x12, x12, #4\n"
+ "ld1rqw { z2.s }, p0/Z, [x28]\n"
"add x28, x28, #0x10\n"
+ "ld1rqw { z1.s }, p0/Z, [x27]\n"
"add x27, x27, #0x10\n"
+ "ld1rqw { z0.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "fmla z24.s, z19.s, z2.s[0]\n"
+ "fmla z25.s, z19.s, z1.s[0]\n"
+ "fmla z26.s, z19.s, z0.s[0]\n"
+ "fmla z24.s, z18.s, z2.s[1]\n"
+ "fmla z25.s, z18.s, z1.s[1]\n"
+ "fmla z26.s, z18.s, z0.s[1]\n"
+ "fmla z24.s, z17.s, z2.s[2]\n"
+ "fmla z25.s, z17.s, z1.s[2]\n"
"fmla z26.s, z17.s, z0.s[2]\n"
"fmla z24.s, z16.s, z2.s[3]\n"
- "add x26, x26, #0x10\n"
- "addvl x12, x12, #4\n"
"fmla z25.s, z16.s, z1.s[3]\n"
"fmla z26.s, z16.s, z0.s[3]\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z16.s }, p2/Z, [x12]\n"
+ "subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
"fmla z24.s, z16.s, z0.s[0]\n"
"fmla z25.s, z16.s, z1.s[0]\n"
"fmla z26.s, z16.s, z2.s[0]\n"
- "addvl x12, x12, #1\n"
"ble 37f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[1]\n"
"fmla z25.s, z16.s, z1.s[1]\n"
"fmla z26.s, z16.s, z2.s[1]\n"
- "addvl x12, x12, #1\n"
"ble 37f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[2]\n"
"fmla z25.s, z16.s, z1.s[2]\n"
"fmla z26.s, z16.s, z2.s[2]\n"
- "addvl x12, x12, #1\n"
"ble 37f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[3]\n"
"fmla z25.s, z16.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[3]\n"
"37:" // Height 3: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -433,12 +435,12 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp x10, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
- "add x26, x27, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmin z25.s, p2/M, z25.s, z17.s\n"
@@ -449,37 +451,37 @@ void sve_hybrid_fp32_mla_8x1VL (
"38:" // Height 3: No activation
"st1w { z24.s }, p1, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x27]\n"
"39:" // Height 3: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 28b\n"
"b 106f\n"
"40:" // Height 4
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
"cbz x14, 42f\n"
"ld1w { z24.s }, p2/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"b 44f\n"
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x11, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p1/Z, [x11]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x22]\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
+ "add x20, x11, x21, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
"ld1w { z27.s }, p1/Z, [x20]\n"
"b 44f\n"
"43:" // Height 4: no accumulate
@@ -491,8 +493,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -517,29 +519,29 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x9, x9, #0x4\n"
+ "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "cmp x9, #0x4\n"
+ "addvl x12, x12, #4\n"
"ld1rqw { z3.s }, p0/Z, [x28]\n"
+ "add x28, x28, #0x10\n"
"ld1rqw { z2.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
+ "add x27, x27, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x26]\n"
"ld1rqw { z0.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
- "fmla z24.s, z16.s, z3.s[0]\n"
- "fmla z25.s, z16.s, z2.s[0]\n"
- "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
- "fmla z26.s, z16.s, z1.s[0]\n"
- "fmla z27.s, z16.s, z0.s[0]\n"
- "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
+ "fmla z24.s, z19.s, z3.s[0]\n"
+ "fmla z25.s, z19.s, z2.s[0]\n"
+ "fmla z26.s, z19.s, z1.s[0]\n"
+ "fmla z27.s, z19.s, z0.s[0]\n"
"fmla z24.s, z18.s, z3.s[1]\n"
"fmla z25.s, z18.s, z2.s[1]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
"fmla z26.s, z18.s, z1.s[1]\n"
"fmla z27.s, z18.s, z0.s[1]\n"
- "add x25, x25, #0x10\n"
- "addvl x12, x12, #4\n"
"fmla z24.s, z17.s, z3.s[2]\n"
"fmla z25.s, z17.s, z2.s[2]\n"
"fmla z26.s, z17.s, z1.s[2]\n"
@@ -551,38 +553,38 @@ void sve_hybrid_fp32_mla_8x1VL (
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z16.s }, p2/Z, [x12]\n"
+ "subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
"fmla z24.s, z16.s, z0.s[0]\n"
"fmla z25.s, z16.s, z1.s[0]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[0]\n"
"fmla z27.s, z16.s, z3.s[0]\n"
"ble 50f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[1]\n"
"fmla z25.s, z16.s, z1.s[1]\n"
"fmla z26.s, z16.s, z2.s[1]\n"
"fmla z27.s, z16.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"ble 50f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[2]\n"
"fmla z25.s, z16.s, z1.s[2]\n"
"fmla z26.s, z16.s, z2.s[2]\n"
"fmla z27.s, z16.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"ble 50f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[3]\n"
"fmla z25.s, z16.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[3]\n"
"fmla z27.s, z16.s, z3.s[3]\n"
"50:" // Height 4: Multiply loop: multiply skip
@@ -591,13 +593,13 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp x10, x20\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
- "add x25, x26, x20, LSL #2\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmin z25.s, p2/M, z25.s, z17.s\n"
@@ -610,41 +612,41 @@ void sve_hybrid_fp32_mla_8x1VL (
"51:" // Height 4: No activation
"st1w { z24.s }, p1, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "st1w { z27.s }, p1, [x26]\n"
"52:" // Height 4: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 41b\n"
"b 106f\n"
"53:" // Height 5
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
"cbz x14, 55f\n"
"ld1w { z24.s }, p2/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"b 57f\n"
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
- "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x11, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p1/Z, [x11]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x23]\n"
- "ld1w { z26.s }, p1/Z, [x22]\n"
- "ld1w { z27.s }, p1/Z, [x21]\n"
+ "add x20, x11, x21, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
+ "add x20, x20, x21, LSL #2\n"
"ld1w { z28.s }, p1/Z, [x20]\n"
"b 57f\n"
"56:" // Height 5: no accumulate
@@ -657,8 +659,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -686,33 +688,33 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x9, x9, #0x4\n"
+ "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "cmp x9, #0x4\n"
+ "addvl x12, x12, #4\n"
"ld1rqw { z4.s }, p0/Z, [x28]\n"
+ "add x28, x28, #0x10\n"
"ld1rqw { z3.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
+ "add x27, x27, #0x10\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1rqw { z1.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x24]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
- "fmla z24.s, z16.s, z4.s[0]\n"
- "fmla z25.s, z16.s, z3.s[0]\n"
- "fmla z26.s, z16.s, z2.s[0]\n"
- "fmla z27.s, z16.s, z1.s[0]\n"
- "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
- "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
- "fmla z28.s, z16.s, z0.s[0]\n"
+ "add x24, x24, #0x10\n"
+ "fmla z24.s, z19.s, z4.s[0]\n"
+ "fmla z25.s, z19.s, z3.s[0]\n"
+ "fmla z26.s, z19.s, z2.s[0]\n"
+ "fmla z27.s, z19.s, z1.s[0]\n"
+ "fmla z28.s, z19.s, z0.s[0]\n"
"fmla z24.s, z18.s, z4.s[1]\n"
- "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
- "add x27, x27, #0x10\n"
"fmla z25.s, z18.s, z3.s[1]\n"
"fmla z26.s, z18.s, z2.s[1]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"fmla z27.s, z18.s, z1.s[1]\n"
"fmla z28.s, z18.s, z0.s[1]\n"
- "add x24, x24, #0x10\n"
- "addvl x12, x12, #4\n"
"fmla z24.s, z17.s, z4.s[2]\n"
"fmla z25.s, z17.s, z3.s[2]\n"
"fmla z26.s, z17.s, z2.s[2]\n"
@@ -726,42 +728,42 @@ void sve_hybrid_fp32_mla_8x1VL (
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z16.s }, p2/Z, [x12]\n"
+ "subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
"ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
"fmla z24.s, z16.s, z0.s[0]\n"
"fmla z25.s, z16.s, z1.s[0]\n"
"fmla z26.s, z16.s, z2.s[0]\n"
"fmla z27.s, z16.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[0]\n"
"ble 63f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[1]\n"
"fmla z25.s, z16.s, z1.s[1]\n"
"fmla z26.s, z16.s, z2.s[1]\n"
"fmla z27.s, z16.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[1]\n"
"ble 63f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[2]\n"
"fmla z25.s, z16.s, z1.s[2]\n"
"fmla z26.s, z16.s, z2.s[2]\n"
"fmla z27.s, z16.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[2]\n"
"ble 63f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[3]\n"
"fmla z25.s, z16.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[3]\n"
"fmla z27.s, z16.s, z3.s[3]\n"
"fmla z28.s, z16.s, z4.s[3]\n"
@@ -771,14 +773,14 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp x10, x20\n"
"bne 58b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmin z25.s, p2/M, z25.s, z17.s\n"
@@ -793,44 +795,44 @@ void sve_hybrid_fp32_mla_8x1VL (
"64:" // Height 5: No activation
"st1w { z24.s }, p1, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "st1w { z27.s }, p1, [x26]\n"
+ "st1w { z28.s }, p1, [x25]\n"
"65:" // Height 5: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 54b\n"
"b 106f\n"
"66:" // Height 6
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
"cbz x14, 68f\n"
"ld1w { z24.s }, p2/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
"b 70f\n"
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
- "ldr x24, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x11, x24, LSL #2\n"
- "add x20, x23, x24, LSL #2\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p1/Z, [x11]\n"
- "add x22, x20, x24, LSL #2\n"
- "add x21, x22, x24, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x23]\n"
+ "add x20, x11, x22, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ "add x20, x20, x22, LSL #2\n"
"ld1w { z26.s }, p1/Z, [x20]\n"
- "add x20, x21, x24, LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x22]\n"
+ "add x20, x20, x22, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
+ "add x21, x20, x22, LSL #2\n"
+ "add x20, x21, x22, LSL #2\n"
"ld1w { z28.s }, p1/Z, [x21]\n"
"ld1w { z29.s }, p1/Z, [x20]\n"
"b 70f\n"
@@ -845,8 +847,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -877,33 +879,33 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x9, x9, #0x4\n"
+ "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "cmp x9, #0x4\n"
+ "addvl x12, x12, #4\n"
"ld1rqw { z5.s }, p0/Z, [x28]\n"
+ "add x28, x28, #0x10\n"
"ld1rqw { z4.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
+ "add x27, x27, #0x10\n"
"ld1rqw { z3.s }, p0/Z, [x26]\n"
"ld1rqw { z2.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x24]\n"
"ld1rqw { z0.s }, p0/Z, [x23]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1w { z19.s }, p2/Z, [x12]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"fmla z24.s, z19.s, z5.s[0]\n"
"fmla z25.s, z19.s, z4.s[0]\n"
- "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
"fmla z26.s, z19.s, z3.s[0]\n"
"fmla z27.s, z19.s, z2.s[0]\n"
- "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
"fmla z28.s, z19.s, z1.s[0]\n"
"fmla z29.s, z19.s, z0.s[0]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
"fmla z24.s, z18.s, z5.s[1]\n"
"fmla z25.s, z18.s, z4.s[1]\n"
- "add x23, x23, #0x10\n"
- "addvl x12, x12, #4\n"
"fmla z26.s, z18.s, z3.s[1]\n"
"fmla z27.s, z18.s, z2.s[1]\n"
"fmla z28.s, z18.s, z1.s[1]\n"
@@ -923,17 +925,17 @@ void sve_hybrid_fp32_mla_8x1VL (
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z16.s }, p2/Z, [x12]\n"
+ "subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
"ld1rqw { z4.s }, p0/Z, [x24]\n"
"ld1rqw { z5.s }, p0/Z, [x23]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
"fmla z24.s, z16.s, z0.s[0]\n"
"fmla z25.s, z16.s, z1.s[0]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[0]\n"
"fmla z27.s, z16.s, z3.s[0]\n"
"fmla z28.s, z16.s, z4.s[0]\n"
@@ -941,28 +943,28 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 76f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[1]\n"
"fmla z25.s, z16.s, z1.s[1]\n"
"fmla z26.s, z16.s, z2.s[1]\n"
"fmla z27.s, z16.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[1]\n"
"fmla z29.s, z16.s, z5.s[1]\n"
"ble 76f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[2]\n"
"fmla z25.s, z16.s, z1.s[2]\n"
"fmla z26.s, z16.s, z2.s[2]\n"
"fmla z27.s, z16.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[2]\n"
"fmla z29.s, z16.s, z5.s[2]\n"
"ble 76f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[3]\n"
"fmla z25.s, z16.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[3]\n"
"fmla z27.s, z16.s, z3.s[3]\n"
"fmla z28.s, z16.s, z4.s[3]\n"
@@ -973,15 +975,15 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp x10, x20\n"
"bne 71b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmin z25.s, p2/M, z25.s, z17.s\n"
@@ -998,29 +1000,29 @@ void sve_hybrid_fp32_mla_8x1VL (
"77:" // Height 6: No activation
"st1w { z24.s }, p1, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
- "st1w { z29.s }, p1, [x23]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "st1w { z27.s }, p1, [x26]\n"
+ "st1w { z28.s }, p1, [x25]\n"
+ "st1w { z29.s }, p1, [x24]\n"
"78:" // Height 6: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
"bgt 67b\n"
"b 106f\n"
"79:" // Height 7
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"80:" // Height 7: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
"cbz x14, 81f\n"
"ld1w { z24.s }, p2/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -1028,17 +1030,17 @@ void sve_hybrid_fp32_mla_8x1VL (
"b 83f\n"
"81:" // Height 7: no bias
"tbz %x[flags], #0, 82f\n"
- "ldr x24, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x11, x24, LSL #2\n"
- "add x20, x21, x24, LSL #2\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p1/Z, [x11]\n"
- "add x23, x20, x24, LSL #2\n"
- "add x22, x23, x24, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x21]\n"
+ "add x20, x11, x23, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ "add x20, x20, x23, LSL #2\n"
"ld1w { z26.s }, p1/Z, [x20]\n"
- "add x21, x22, x24, LSL #2\n"
- "add x20, x21, x24, LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x23]\n"
+ "add x20, x20, x23, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
+ "add x22, x20, x23, LSL #2\n"
+ "add x21, x22, x23, LSL #2\n"
+ "add x20, x21, x23, LSL #2\n"
"ld1w { z28.s }, p1/Z, [x22]\n"
"ld1w { z29.s }, p1/Z, [x21]\n"
"ld1w { z30.s }, p1/Z, [x20]\n"
@@ -1055,8 +1057,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"84:" // Height 7: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 85f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1090,37 +1092,37 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 88f\n"
"87:" // Height 7: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x9, x9, #0x4\n"
+ "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "cmp x9, #0x4\n"
+ "addvl x12, x12, #4\n"
"ld1rqw { z6.s }, p0/Z, [x28]\n"
+ "add x28, x28, #0x10\n"
"ld1rqw { z5.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
+ "add x27, x27, #0x10\n"
"ld1rqw { z4.s }, p0/Z, [x26]\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z2.s }, p0/Z, [x24]\n"
"ld1rqw { z1.s }, p0/Z, [x23]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x22]\n"
- "ld1w { z19.s }, p2/Z, [x12]\n"
"fmla z24.s, z19.s, z6.s[0]\n"
"fmla z25.s, z19.s, z5.s[0]\n"
+ "add x22, x22, #0x10\n"
"fmla z26.s, z19.s, z4.s[0]\n"
"fmla z27.s, z19.s, z3.s[0]\n"
- "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
- "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
"fmla z28.s, z19.s, z2.s[0]\n"
"fmla z29.s, z19.s, z1.s[0]\n"
- "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
- "add x25, x25, #0x10\n"
"fmla z30.s, z19.s, z0.s[0]\n"
"fmla z24.s, z18.s, z6.s[1]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"fmla z25.s, z18.s, z5.s[1]\n"
"fmla z26.s, z18.s, z4.s[1]\n"
- "add x22, x22, #0x10\n"
- "addvl x12, x12, #4\n"
"fmla z27.s, z18.s, z3.s[1]\n"
"fmla z28.s, z18.s, z2.s[1]\n"
"fmla z29.s, z18.s, z1.s[1]\n"
@@ -1142,50 +1144,50 @@ void sve_hybrid_fp32_mla_8x1VL (
"bgt 87b\n"
"88:" // Height 7: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z16.s }, p2/Z, [x12]\n"
+ "subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
"ld1rqw { z4.s }, p0/Z, [x24]\n"
"ld1rqw { z5.s }, p0/Z, [x23]\n"
"ld1rqw { z6.s }, p0/Z, [x22]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
"fmla z24.s, z16.s, z0.s[0]\n"
"fmla z25.s, z16.s, z1.s[0]\n"
"fmla z26.s, z16.s, z2.s[0]\n"
"fmla z27.s, z16.s, z3.s[0]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[0]\n"
"fmla z29.s, z16.s, z5.s[0]\n"
"fmla z30.s, z16.s, z6.s[0]\n"
"ble 89f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[1]\n"
"fmla z25.s, z16.s, z1.s[1]\n"
"fmla z26.s, z16.s, z2.s[1]\n"
"fmla z27.s, z16.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[1]\n"
"fmla z29.s, z16.s, z5.s[1]\n"
"fmla z30.s, z16.s, z6.s[1]\n"
"ble 89f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[2]\n"
"fmla z25.s, z16.s, z1.s[2]\n"
"fmla z26.s, z16.s, z2.s[2]\n"
"fmla z27.s, z16.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[2]\n"
"fmla z29.s, z16.s, z5.s[2]\n"
"fmla z30.s, z16.s, z6.s[2]\n"
"ble 89f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[3]\n"
"fmla z25.s, z16.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[3]\n"
"fmla z27.s, z16.s, z3.s[3]\n"
"fmla z28.s, z16.s, z4.s[3]\n"
@@ -1197,16 +1199,16 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp x10, x20\n"
"bne 84b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"tbz %x[flags], #1, 90f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmin z25.s, p2/M, z25.s, z17.s\n"
@@ -1225,12 +1227,12 @@ void sve_hybrid_fp32_mla_8x1VL (
"90:" // Height 7: No activation
"st1w { z24.s }, p1, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
- "st1w { z29.s }, p1, [x23]\n"
- "st1w { z30.s }, p1, [x22]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "st1w { z27.s }, p1, [x26]\n"
+ "st1w { z28.s }, p1, [x25]\n"
+ "st1w { z29.s }, p1, [x24]\n"
+ "st1w { z30.s }, p1, [x23]\n"
"91:" // Height 7: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
@@ -1238,20 +1240,21 @@ void sve_hybrid_fp32_mla_8x1VL (
"b 106f\n"
"92:" // Height 8
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x20\n"
- "mov x14, %x[bias]\n"
+ "ldr x14, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x13, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x11\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"93:" // Height 8: Column loop
"mov x20, #0x0\n"
"whilelt p1.s, x20, x13\n"
"cbz x14, 94f\n"
"ld1w { z24.s }, p2/Z, [x14]\n"
+ "addvl x14, x14, #1\n"
"mov z25.d, z24.d\n"
"mov z26.d, z24.d\n"
- "addvl x14, x14, #1\n"
"mov z27.d, z24.d\n"
"mov z28.d, z24.d\n"
"mov z29.d, z24.d\n"
@@ -1260,20 +1263,20 @@ void sve_hybrid_fp32_mla_8x1VL (
"b 96f\n"
"94:" // Height 8: no bias
"tbz %x[flags], #0, 95f\n"
- "ldr x24, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x11, x24, LSL #2\n"
- "add x21, x22, x24, LSL #2\n"
+ "ldr x23, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z24.s }, p1/Z, [x11]\n"
- "add x23, x21, x24, LSL #2\n"
- "add x20, x23, x24, LSL #2\n"
- "ld1w { z25.s }, p1/Z, [x22]\n"
- "ld1w { z26.s }, p1/Z, [x21]\n"
- "add x22, x20, x24, LSL #2\n"
- "add x21, x22, x24, LSL #2\n"
- "ld1w { z27.s }, p1/Z, [x23]\n"
- "ld1w { z28.s }, p1/Z, [x20]\n"
- "add x20, x21, x24, LSL #2\n"
- "ld1w { z29.s }, p1/Z, [x22]\n"
+ "add x20, x11, x23, LSL #2\n"
+ "ld1w { z25.s }, p1/Z, [x20]\n"
+ "add x20, x20, x23, LSL #2\n"
+ "ld1w { z26.s }, p1/Z, [x20]\n"
+ "add x20, x20, x23, LSL #2\n"
+ "ld1w { z27.s }, p1/Z, [x20]\n"
+ "add x22, x20, x23, LSL #2\n"
+ "add x20, x22, x23, LSL #2\n"
+ "add x21, x20, x23, LSL #2\n"
+ "ld1w { z28.s }, p1/Z, [x22]\n"
+ "ld1w { z29.s }, p1/Z, [x20]\n"
+ "add x20, x21, x23, LSL #2\n"
"ld1w { z30.s }, p1/Z, [x21]\n"
"ld1w { z31.s }, p1/Z, [x20]\n"
"b 96f\n"
@@ -1290,8 +1293,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"mov x10, #0x0\n"
"97:" // Height 8: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w9, [x20, x10, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w9, [x20, x10, LSL #0x2]\n"
"tbz %x[flags], #3, 98f\n"
"ldr x20, [%x[input_ptr], x10, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1328,37 +1331,37 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 101f\n"
"100:" // Height 8: Multiply loop: Main loop head
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z19.s }, p2/Z, [x12]\n"
+ "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
+ "sub x9, x9, #0x4\n"
+ "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "cmp x9, #0x4\n"
+ "addvl x12, x12, #4\n"
"ld1rqw { z7.s }, p0/Z, [x28]\n"
+ "add x28, x28, #0x10\n"
"ld1rqw { z6.s }, p0/Z, [x27]\n"
- "sub x9, x9, #0x4\n"
+ "add x27, x27, #0x10\n"
"ld1rqw { z5.s }, p0/Z, [x26]\n"
"ld1rqw { z4.s }, p0/Z, [x25]\n"
- "cmp x9, #0x4\n"
- "add x28, x28, #0x10\n"
+ "add x26, x26, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z3.s }, p0/Z, [x24]\n"
"ld1rqw { z2.s }, p0/Z, [x23]\n"
- "add x27, x27, #0x10\n"
- "add x26, x26, #0x10\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqw { z1.s }, p0/Z, [x22]\n"
"ld1rqw { z0.s }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1w { z19.s }, p2/Z, [x12]\n"
"fmla z24.s, z19.s, z7.s[0]\n"
"fmla z25.s, z19.s, z6.s[0]\n"
- "ld1w { z18.s }, p2/Z, [x12, #1, MUL VL]\n"
"fmla z26.s, z19.s, z5.s[0]\n"
"fmla z27.s, z19.s, z4.s[0]\n"
- "ld1w { z17.s }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x12, #3, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
"fmla z28.s, z19.s, z3.s[0]\n"
"fmla z29.s, z19.s, z2.s[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"fmla z30.s, z19.s, z1.s[0]\n"
"fmla z31.s, z19.s, z0.s[0]\n"
- "add x21, x21, #0x10\n"
- "addvl x12, x12, #4\n"
"fmla z24.s, z18.s, z7.s[1]\n"
"fmla z25.s, z18.s, z6.s[1]\n"
"fmla z26.s, z18.s, z5.s[1]\n"
@@ -1386,19 +1389,19 @@ void sve_hybrid_fp32_mla_8x1VL (
"bgt 100b\n"
"101:" // Height 8: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x9\n"
+ "ld1w { z16.s }, p2/Z, [x12]\n"
+ "subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"ld1rqw { z0.s }, p0/Z, [x28]\n"
"ld1rqw { z1.s }, p0/Z, [x27]\n"
- "subs x9, x9, #0x1\n"
"ld1rqw { z2.s }, p0/Z, [x26]\n"
"ld1rqw { z3.s }, p0/Z, [x25]\n"
"ld1rqw { z4.s }, p0/Z, [x24]\n"
"ld1rqw { z5.s }, p0/Z, [x23]\n"
"ld1rqw { z6.s }, p0/Z, [x22]\n"
"ld1rqw { z7.s }, p0/Z, [x21]\n"
- "ld1w { z16.s }, p2/Z, [x12]\n"
"fmla z24.s, z16.s, z0.s[0]\n"
"fmla z25.s, z16.s, z1.s[0]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[0]\n"
"fmla z27.s, z16.s, z3.s[0]\n"
"fmla z28.s, z16.s, z4.s[0]\n"
@@ -1408,11 +1411,11 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 102f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[1]\n"
"fmla z25.s, z16.s, z1.s[1]\n"
"fmla z26.s, z16.s, z2.s[1]\n"
"fmla z27.s, z16.s, z3.s[1]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[1]\n"
"fmla z29.s, z16.s, z5.s[1]\n"
"fmla z30.s, z16.s, z6.s[1]\n"
@@ -1420,20 +1423,20 @@ void sve_hybrid_fp32_mla_8x1VL (
"ble 102f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
"subs x9, x9, #0x1\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[2]\n"
"fmla z25.s, z16.s, z1.s[2]\n"
"fmla z26.s, z16.s, z2.s[2]\n"
"fmla z27.s, z16.s, z3.s[2]\n"
- "addvl x12, x12, #1\n"
"fmla z28.s, z16.s, z4.s[2]\n"
"fmla z29.s, z16.s, z5.s[2]\n"
"fmla z30.s, z16.s, z6.s[2]\n"
"fmla z31.s, z16.s, z7.s[2]\n"
"ble 102f\n"
"ld1w { z16.s }, p2/Z, [x12]\n"
+ "addvl x12, x12, #1\n"
"fmla z24.s, z16.s, z0.s[3]\n"
"fmla z25.s, z16.s, z1.s[3]\n"
- "addvl x12, x12, #1\n"
"fmla z26.s, z16.s, z2.s[3]\n"
"fmla z27.s, z16.s, z3.s[3]\n"
"fmla z28.s, z16.s, z4.s[3]\n"
@@ -1446,17 +1449,17 @@ void sve_hybrid_fp32_mla_8x1VL (
"cmp x10, x20\n"
"bne 97b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x27, x11, x20, LSL #2\n"
+ "add x28, x11, x20, LSL #2\n"
+ "add x27, x28, x20, LSL #2\n"
"add x26, x27, x20, LSL #2\n"
"add x25, x26, x20, LSL #2\n"
"add x24, x25, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"tbz %x[flags], #1, 103f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p2/Z, [x21]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"fmin z24.s, p2/M, z24.s, z17.s\n"
"fmin z25.s, p2/M, z25.s, z17.s\n"
@@ -1477,13 +1480,13 @@ void sve_hybrid_fp32_mla_8x1VL (
"103:" // Height 8: No activation
"st1w { z24.s }, p1, [x11]\n"
"addvl x11, x11, #1\n"
- "st1w { z25.s }, p1, [x27]\n"
- "st1w { z26.s }, p1, [x26]\n"
- "st1w { z27.s }, p1, [x25]\n"
- "st1w { z28.s }, p1, [x24]\n"
- "st1w { z29.s }, p1, [x23]\n"
- "st1w { z30.s }, p1, [x22]\n"
- "st1w { z31.s }, p1, [x21]\n"
+ "st1w { z25.s }, p1, [x28]\n"
+ "st1w { z26.s }, p1, [x27]\n"
+ "st1w { z27.s }, p1, [x26]\n"
+ "st1w { z28.s }, p1, [x25]\n"
+ "st1w { z29.s }, p1, [x24]\n"
+ "st1w { z30.s }, p1, [x23]\n"
+ "st1w { z31.s }, p1, [x22]\n"
"104:" // Height 8: Writeback done
"decw x13\n"
"cmp x13, XZR\n"
@@ -1500,8 +1503,8 @@ void sve_hybrid_fp32_mla_8x1VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"106:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z16", "z17", "z18", "z19", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp
index 66c106d2eb..d7436b15f4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 4, 12, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 12, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
index 2b2a0684f9..be245f9ecc 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -100,10 +102,10 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p6.s, x20, x9\n"
@@ -120,19 +122,19 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x10, 3f\n"
"ld1w { z8.s }, p7/Z, [x10]\n"
"ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "addvl x10, x10, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -140,16 +142,16 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z21.s }, p6/Z, [x27]\n"
- "ld1w { z20.s }, p5/Z, [x27, #1, MUL VL]\n"
- "zip1 z8.d, z21.d, z14.d\n"
- "zip2 z14.d, z21.d, z14.d\n"
+ "ld1w { z25.s }, p6/Z, [x27]\n"
+ "ld1w { z24.s }, p5/Z, [x27, #1, MUL VL]\n"
"ld1w { z23.s }, p4/Z, [x27, #2, MUL VL]\n"
"ld1w { z22.s }, p3/Z, [x27, #3, MUL VL]\n"
- "zip1 z9.d, z20.d, z15.d\n"
- "zip2 z15.d, z20.d, z15.d\n"
"ld1w { z21.s }, p2/Z, [x27, #4, MUL VL]\n"
"ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
+ "zip1 z8.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
"zip1 z10.d, z23.d, z16.d\n"
"zip2 z16.d, z23.d, z16.d\n"
"zip1 z11.d, z22.d, z17.d\n"
@@ -176,8 +178,8 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -193,70 +195,70 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z23.h }, p7/Z, [x28]\n"
+ "ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z21.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z20.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "cmp x25, #0x4\n"
"ld1rqw { z24.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
"uzp1 z24.h, z24.h, z24.h\n"
- "ld1h { z21.h }, p7/Z, [x28]\n"
- "ld1h { z20.h }, p7/Z, [x28, #1, MUL VL]\n"
- ".inst 0x6475e708 // bfmmla z8.s, z24.h, z21.h\n"
- ".inst 0x6474e70e // bfmmla z14.s, z24.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z20.h }, p7/Z, [x28, #3, MUL VL]\n"
+ ".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
+ "ld1h { z23.h }, p7/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
+ "ld1h { z22.h }, p7/Z, [x28, #5, MUL VL]\n"
".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
- ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x28, #4, MUL VL]\n"
- "ld1h { z20.h }, p7/Z, [x28, #5, MUL VL]\n"
- ".inst 0x6475e70a // bfmmla z10.s, z24.h, z21.h\n"
- ".inst 0x6474e710 // bfmmla z16.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
"ld1h { z20.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ ".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
+ ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
"ld1h { z23.h }, p7/Z, [x28, #-8, MUL VL]\n"
"ld1h { z22.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "sub x25, x25, #0x4\n"
"ld1h { z21.h }, p7/Z, [x28, #-6, MUL VL]\n"
"ld1h { z20.h }, p7/Z, [x28, #-5, MUL VL]\n"
- "cmp x25, #0x4\n"
+ "addvl x28, x28, #-4\n"
".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
- "add x24, x24, #0x10\n"
- "addvl x28, x28, #-4\n"
".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
- "ld1rqw { z23.s }, p0/Z, [x24]\n"
- ".inst 0x658abef7 // bfcvt z23.h, p7/M, z23.s\n"
- "uzp1 z23.h, z23.h, z23.h\n"
- "ld1h { z21.h }, p7/Z, [x28]\n"
- "ld1h { z20.h }, p7/Z, [x28, #1, MUL VL]\n"
- ".inst 0x6475e6e8 // bfmmla z8.s, z23.h, z21.h\n"
- ".inst 0x6474e6ee // bfmmla z14.s, z23.h, z20.h\n"
+ "ld1h { z23.h }, p7/Z, [x28]\n"
+ "ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
"ld1h { z21.h }, p7/Z, [x28, #2, MUL VL]\n"
"ld1h { z20.h }, p7/Z, [x28, #3, MUL VL]\n"
- ".inst 0x6475e6e9 // bfmmla z9.s, z23.h, z21.h\n"
- ".inst 0x6474e6ef // bfmmla z15.s, z23.h, z20.h\n"
- "ld1h { z21.h }, p7/Z, [x28, #4, MUL VL]\n"
- "ld1h { z20.h }, p7/Z, [x28, #5, MUL VL]\n"
- ".inst 0x6475e6ea // bfmmla z10.s, z23.h, z21.h\n"
- ".inst 0x6474e6f0 // bfmmla z16.s, z23.h, z20.h\n"
+ "ld1rqw { z24.s }, p0/Z, [x24]\n"
+ ".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
+ "uzp1 z24.h, z24.h, z24.h\n"
+ ".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
+ "ld1h { z23.h }, p7/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
+ "ld1h { z22.h }, p7/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
"ld1h { z21.h }, p7/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
"ld1h { z20.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x6475e6eb // bfmmla z11.s, z23.h, z21.h\n"
- ".inst 0x6474e6f1 // bfmmla z17.s, z23.h, z20.h\n"
- "ld1h { z20.h }, p7/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
+ ".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
+ ".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
+ ".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
+ "ld1h { z23.h }, p7/Z, [x28, #-8, MUL VL]\n"
"ld1h { z22.h }, p7/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x6474e6ec // bfmmla z12.s, z23.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x28, #-6, MUL VL]\n"
"ld1h { z20.h }, p7/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x6476e6f2 // bfmmla z18.s, z23.h, z22.h\n"
- ".inst 0x6475e6ed // bfmmla z13.s, z23.h, z21.h\n"
- ".inst 0x6474e6f3 // bfmmla z19.s, z23.h, z20.h\n"
"addvl x28, x28, #-4\n"
+ ".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
+ ".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
+ ".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
+ ".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
"11:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
@@ -269,9 +271,9 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z12.d, z12.d, z18.d\n"
"uzp1 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z21.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z21.s }, p7/Z, [x21]\n"
"ld1rw { z20.s }, p7/Z, [x20]\n"
"fmin z8.s, p7/M, z8.s, z21.s\n"
"fmin z9.s, p7/M, z9.s, z21.s\n"
@@ -299,10 +301,10 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"bgt 2b\n"
"b 54f\n"
"14:" // Height 2
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p6.s, x20, x9\n"
@@ -319,19 +321,19 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x10, 16f\n"
"ld1w { z8.s }, p7/Z, [x10]\n"
"ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "addvl x10, x10, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -340,25 +342,25 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x27, x20, LSL #2\n"
- "ld1w { z16.s }, p6/Z, [x27]\n"
- "ld1w { z17.s }, p5/Z, [x27, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x27, #2, MUL VL]\n"
+ "ld1w { z25.s }, p6/Z, [x27]\n"
+ "ld1w { z24.s }, p5/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z23.s }, p4/Z, [x27, #2, MUL VL]\n"
"ld1w { z22.s }, p3/Z, [x27, #3, MUL VL]\n"
"ld1w { z21.s }, p2/Z, [x27, #4, MUL VL]\n"
+ "add x20, x27, x20, LSL #2\n"
"ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
"ld1w { z14.s }, p6/Z, [x20]\n"
- "zip1 z8.d, z16.d, z14.d\n"
- "zip2 z14.d, z16.d, z14.d\n"
"ld1w { z15.s }, p5/Z, [x20, #1, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x20, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z15.d\n"
- "zip2 z15.d, z17.d, z15.d\n"
"ld1w { z17.s }, p3/Z, [x20, #3, MUL VL]\n"
"ld1w { z18.s }, p2/Z, [x20, #4, MUL VL]\n"
- "zip1 z10.d, z19.d, z16.d\n"
- "zip2 z16.d, z19.d, z16.d\n"
"ld1w { z19.s }, p1/Z, [x20, #5, MUL VL]\n"
+ "zip1 z8.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
+ "zip1 z10.d, z23.d, z16.d\n"
+ "zip2 z16.d, z23.d, z16.d\n"
"zip1 z11.d, z22.d, z17.d\n"
"zip2 z17.d, z22.d, z17.d\n"
"zip1 z12.d, z21.d, z18.d\n"
@@ -383,8 +385,8 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -403,77 +405,77 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z23.h }, p7/Z, [x28]\n"
+ "ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z21.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z25.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "cmp x25, #0x4\n"
"ld1rqw { z24.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z20.s }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
".inst 0x658abe94 // bfcvt z20.h, p7/M, z20.s\n"
"uzp1 z24.h, z24.h, z24.h\n"
- "ld1h { z23.h }, p7/Z, [x28]\n"
- "ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
"uzp1 z20.h, z20.h, z20.h\n"
"trn1 z24.d, z24.d, z20.d\n"
- "ld1h { z21.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z20.h }, p7/Z, [x28, #3, MUL VL]\n"
".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
- ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z23.h }, p7/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z22.h }, p7/Z, [x28, #5, MUL VL]\n"
".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
- ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x6479e70f // bfmmla z15.s, z24.h, z25.h\n"
"ld1h { z20.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
- "ld1h { z23.h }, p7/Z, [x28, #-8, MUL VL]\n"
".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
+ "ld1h { z23.h }, p7/Z, [x28, #-8, MUL VL]\n"
"ld1h { z22.h }, p7/Z, [x28, #-7, MUL VL]\n"
"ld1h { z21.h }, p7/Z, [x28, #-6, MUL VL]\n"
"ld1h { z20.h }, p7/Z, [x28, #-5, MUL VL]\n"
- "sub x25, x25, #0x4\n"
- "cmp x25, #0x4\n"
+ "addvl x28, x28, #-4\n"
".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
- "addvl x28, x28, #-4\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z23.h }, p7/Z, [x28]\n"
+ "ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z21.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z25.h }, p7/Z, [x28, #3, MUL VL]\n"
"ld1rqw { z24.s }, p0/Z, [x24]\n"
"ld1rqw { z20.s }, p0/Z, [x23]\n"
".inst 0x658abf18 // bfcvt z24.h, p7/M, z24.s\n"
".inst 0x658abe94 // bfcvt z20.h, p7/M, z20.s\n"
"uzp1 z24.h, z24.h, z24.h\n"
- "ld1h { z23.h }, p7/Z, [x28]\n"
- "ld1h { z22.h }, p7/Z, [x28, #1, MUL VL]\n"
"uzp1 z20.h, z20.h, z20.h\n"
"trn1 z24.d, z24.d, z20.d\n"
- "ld1h { z21.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z20.h }, p7/Z, [x28, #3, MUL VL]\n"
".inst 0x6477e708 // bfmmla z8.s, z24.h, z23.h\n"
- ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z23.h }, p7/Z, [x28, #4, MUL VL]\n"
+ ".inst 0x6476e70e // bfmmla z14.s, z24.h, z22.h\n"
"ld1h { z22.h }, p7/Z, [x28, #5, MUL VL]\n"
".inst 0x6475e709 // bfmmla z9.s, z24.h, z21.h\n"
- ".inst 0x6474e70f // bfmmla z15.s, z24.h, z20.h\n"
"ld1h { z21.h }, p7/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x6479e70f // bfmmla z15.s, z24.h, z25.h\n"
"ld1h { z20.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
".inst 0x6477e70a // bfmmla z10.s, z24.h, z23.h\n"
".inst 0x6476e710 // bfmmla z16.s, z24.h, z22.h\n"
- "ld1h { z23.h }, p7/Z, [x28, #-8, MUL VL]\n"
".inst 0x6475e70b // bfmmla z11.s, z24.h, z21.h\n"
".inst 0x6474e711 // bfmmla z17.s, z24.h, z20.h\n"
+ "ld1h { z23.h }, p7/Z, [x28, #-8, MUL VL]\n"
"ld1h { z22.h }, p7/Z, [x28, #-7, MUL VL]\n"
"ld1h { z21.h }, p7/Z, [x28, #-6, MUL VL]\n"
"ld1h { z20.h }, p7/Z, [x28, #-5, MUL VL]\n"
+ "addvl x28, x28, #-4\n"
".inst 0x6477e70c // bfmmla z12.s, z24.h, z23.h\n"
".inst 0x6476e712 // bfmmla z18.s, z24.h, z22.h\n"
- "addvl x28, x28, #-4\n"
".inst 0x6475e70d // bfmmla z13.s, z24.h, z21.h\n"
".inst 0x6474e713 // bfmmla z19.s, z24.h, z20.h\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -484,21 +486,21 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
- "add x23, x27, x20, LSL #2\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
"uzp1 z16.d, z11.d, z17.d\n"
"uzp2 z11.d, z11.d, z17.d\n"
+ "add x24, x27, x20, LSL #2\n"
"uzp1 z17.d, z12.d, z18.d\n"
"uzp2 z12.d, z12.d, z18.d\n"
"uzp1 z18.d, z13.d, z19.d\n"
"uzp2 z13.d, z13.d, z19.d\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z20.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z20.s }, p7/Z, [x21]\n"
"ld1rw { z19.s }, p7/Z, [x20]\n"
"fmin z4.s, p7/M, z4.s, z20.s\n"
"fmin z14.s, p7/M, z14.s, z20.s\n"
@@ -532,22 +534,22 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"st1w { z17.s }, p2, [x27, #4, MUL VL]\n"
"st1w { z18.s }, p1, [x27, #5, MUL VL]\n"
"addvl x27, x27, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
+ "st1w { z8.s }, p6, [x24]\n"
+ "st1w { z9.s }, p5, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x24, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x24, #5, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x9, ALL, MUL #6\n"
"cmp x9, XZR\n"
"bgt 15b\n"
"b 54f\n"
"27:" // Height 3
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p6.s, x20, x9\n"
@@ -564,19 +566,19 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x10, 29f\n"
"ld1w { z8.s }, p7/Z, [x10]\n"
"ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "addvl x10, x10, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -597,38 +599,38 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z22.s }, p6/Z, [x27]\n"
+ "ld1w { z24.s }, p5/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z0.s }, p4/Z, [x27, #2, MUL VL]\n"
+ "ld1w { z2.s }, p3/Z, [x27, #3, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x27, #4, MUL VL]\n"
"add x21, x27, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p6/Z, [x27]\n"
- "ld1w { z17.s }, p5/Z, [x27, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x27, #2, MUL VL]\n"
- "ld1w { z22.s }, p3/Z, [x27, #3, MUL VL]\n"
- "ld1w { z24.s }, p2/Z, [x27, #4, MUL VL]\n"
"ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
"ld1w { z14.s }, p6/Z, [x21]\n"
- "zip1 z8.d, z16.d, z14.d\n"
- "zip2 z14.d, z16.d, z14.d\n"
"ld1w { z15.s }, p5/Z, [x21, #1, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x21, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z15.d\n"
- "zip2 z15.d, z17.d, z15.d\n"
"ld1w { z17.s }, p3/Z, [x21, #3, MUL VL]\n"
"ld1w { z18.s }, p2/Z, [x21, #4, MUL VL]\n"
- "zip1 z10.d, z19.d, z16.d\n"
- "zip2 z16.d, z19.d, z16.d\n"
"ld1w { z19.s }, p1/Z, [x21, #5, MUL VL]\n"
"ld1w { z21.s }, p6/Z, [x20]\n"
- "zip1 z11.d, z22.d, z17.d\n"
- "zip2 z17.d, z22.d, z17.d\n"
+ "zip1 z8.d, z22.d, z14.d\n"
+ "zip2 z14.d, z22.d, z14.d\n"
"ld1w { z22.s }, p5/Z, [x20, #1, MUL VL]\n"
"ld1w { z23.s }, p4/Z, [x20, #2, MUL VL]\n"
- "zip1 z12.d, z24.d, z18.d\n"
- "zip2 z18.d, z24.d, z18.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
"ld1w { z24.s }, p3/Z, [x20, #3, MUL VL]\n"
"ld1w { z25.s }, p2/Z, [x20, #4, MUL VL]\n"
+ "zip1 z10.d, z0.d, z16.d\n"
+ "zip2 z16.d, z0.d, z16.d\n"
+ "ld1w { z0.s }, p1/Z, [x20, #5, MUL VL]\n"
+ "zip1 z11.d, z2.d, z17.d\n"
+ "zip2 z17.d, z2.d, z17.d\n"
+ "zip1 z12.d, z1.d, z18.d\n"
+ "zip2 z18.d, z1.d, z18.d\n"
"zip1 z13.d, z20.d, z19.d\n"
"zip2 z19.d, z20.d, z19.d\n"
- "ld1w { z0.s }, p1/Z, [x20, #5, MUL VL]\n"
"zip1 z20.d, z21.d, z26.d\n"
"zip2 z26.d, z21.d, z26.d\n"
"zip1 z21.d, z22.d, z27.d\n"
@@ -671,8 +673,8 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -694,52 +696,52 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z3.h }, p7/Z, [x28]\n"
+ "ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z1.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "cmp x25, #0x4\n"
"ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqw { z0.s }, p0/Z, [x23]\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
+ "add x23, x23, #0x10\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z3.h }, p7/Z, [x28]\n"
"uzp1 z0.h, z0.h, z0.h\n"
".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
- "ld1h { z1.h }, p7/Z, [x28, #2, MUL VL]\n"
"trn1 z5.d, z5.d, z0.d\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "ld1h { z0.h }, p7/Z, [x28, #3, MUL VL]\n"
".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
- ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
+ ".inst 0x6466e4a9 // bfmmla z9.s, z5.h, z6.h\n"
+ ".inst 0x6461e4af // bfmmla z15.s, z5.h, z1.h\n"
+ ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
"ld1h { z3.h }, p7/Z, [x28, #4, MUL VL]\n"
- "sub x25, x25, #0x4\n"
".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
- ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x28, #5, MUL VL]\n"
- "cmp x25, #0x4\n"
- ".inst 0x6461e495 // bfmmla z21.s, z4.h, z1.h\n"
- ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6466e495 // bfmmla z21.s, z4.h, z6.h\n"
+ ".inst 0x6461e49b // bfmmla z27.s, z4.h, z1.h\n"
"ld1h { z1.h }, p7/Z, [x28, #6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6460e49b // bfmmla z27.s, z4.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
- "ld1h { z3.h }, p7/Z, [x28, #-8, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
- "ld1h { z2.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "add x22, x22, #0x10\n"
".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
- "ld1h { z1.h }, p7/Z, [x28, #-6, MUL VL]\n"
".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
+ "ld1h { z3.h }, p7/Z, [x28, #-8, MUL VL]\n"
+ "ld1h { z2.h }, p7/Z, [x28, #-7, MUL VL]\n"
+ "ld1h { z1.h }, p7/Z, [x28, #-6, MUL VL]\n"
"ld1h { z0.h }, p7/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
"addvl x28, x28, #-4\n"
+ ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
@@ -750,47 +752,47 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
+ "ld1h { z3.h }, p7/Z, [x28]\n"
+ "ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z1.h }, p7/Z, [x28, #3, MUL VL]\n"
"ld1rqw { z5.s }, p0/Z, [x24]\n"
"ld1rqw { z0.s }, p0/Z, [x23]\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
"ld1rqw { z4.s }, p0/Z, [x22]\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z3.h }, p7/Z, [x28]\n"
"uzp1 z0.h, z0.h, z0.h\n"
".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
- "ld1h { z1.h }, p7/Z, [x28, #2, MUL VL]\n"
"trn1 z5.d, z5.d, z0.d\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "ld1h { z0.h }, p7/Z, [x28, #3, MUL VL]\n"
".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
- ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
+ ".inst 0x6466e4a9 // bfmmla z9.s, z5.h, z6.h\n"
+ ".inst 0x6461e4af // bfmmla z15.s, z5.h, z1.h\n"
+ ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
"ld1h { z3.h }, p7/Z, [x28, #4, MUL VL]\n"
".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
- ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x28, #5, MUL VL]\n"
- ".inst 0x6461e495 // bfmmla z21.s, z4.h, z1.h\n"
- ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6466e495 // bfmmla z21.s, z4.h, z6.h\n"
+ ".inst 0x6461e49b // bfmmla z27.s, z4.h, z1.h\n"
"ld1h { z1.h }, p7/Z, [x28, #6, MUL VL]\n"
- ".inst 0x6460e49b // bfmmla z27.s, z4.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
- "ld1h { z3.h }, p7/Z, [x28, #-8, MUL VL]\n"
".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
- "ld1h { z2.h }, p7/Z, [x28, #-7, MUL VL]\n"
".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
- "ld1h { z1.h }, p7/Z, [x28, #-6, MUL VL]\n"
".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
+ "ld1h { z3.h }, p7/Z, [x28, #-8, MUL VL]\n"
+ "ld1h { z2.h }, p7/Z, [x28, #-7, MUL VL]\n"
+ "ld1h { z1.h }, p7/Z, [x28, #-6, MUL VL]\n"
"ld1h { z0.h }, p7/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
"addvl x28, x28, #-4\n"
+ ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
@@ -804,16 +806,16 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"cmp x26, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
"uzp2 z9.d, z9.d, z15.d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
+ "add x24, x27, x20, LSL #2\n"
"uzp1 z16.d, z11.d, z17.d\n"
"uzp2 z11.d, z11.d, z17.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z17.d, z12.d, z18.d\n"
"uzp2 z12.d, z12.d, z18.d\n"
"uzp1 z18.d, z13.d, z19.d\n"
@@ -825,9 +827,9 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z24.d, z24.d, z30.d\n"
"uzp1 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z0.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p7/Z, [x21]\n"
"ld1rw { z19.s }, p7/Z, [x20]\n"
"fmin z4.s, p7/M, z4.s, z0.s\n"
"fmin z14.s, p7/M, z14.s, z0.s\n"
@@ -873,18 +875,18 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"st1w { z17.s }, p2, [x27, #4, MUL VL]\n"
"st1w { z18.s }, p1, [x27, #5, MUL VL]\n"
"addvl x27, x27, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x22]\n"
- "st1w { z21.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x22, #5, MUL VL]\n"
+ "st1w { z8.s }, p6, [x24]\n"
+ "st1w { z9.s }, p5, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x24, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x24, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x23]\n"
+ "st1w { z21.s }, p5, [x23, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x23, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x23, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x23, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x23, #5, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x9, ALL, MUL #6\n"
"cmp x9, XZR\n"
@@ -892,12 +894,13 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"b 54f\n"
"40:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x10\n"
- "mov x10, %x[bias]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p6.s, x20, x9\n"
@@ -914,19 +917,19 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"cbz x10, 42f\n"
"ld1w { z8.s }, p7/Z, [x10]\n"
"ld1w { z9.s }, p7/Z, [x10, #1, MUL VL]\n"
- "zip2 z14.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p7/Z, [x10, #2, MUL VL]\n"
"ld1w { z11.s }, p7/Z, [x10, #3, MUL VL]\n"
- "zip2 z15.d, z9.d, z9.d\n"
- "zip1 z9.d, z9.d, z9.d\n"
"ld1w { z12.s }, p7/Z, [x10, #4, MUL VL]\n"
"ld1w { z13.s }, p7/Z, [x10, #5, MUL VL]\n"
+ "addvl x10, x10, #6\n"
+ "zip2 z14.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
+ "zip2 z15.d, z9.d, z9.d\n"
+ "zip1 z9.d, z9.d, z9.d\n"
"zip2 z16.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
"zip2 z17.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
- "addvl x10, x10, #6\n"
"zip2 z18.d, z12.d, z12.d\n"
"zip1 z12.d, z12.d, z12.d\n"
"zip2 z19.d, z13.d, z13.d\n"
@@ -947,51 +950,51 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z22.s }, p6/Z, [x27]\n"
+ "ld1w { z24.s }, p5/Z, [x27, #1, MUL VL]\n"
+ "ld1w { z26.s }, p4/Z, [x27, #2, MUL VL]\n"
+ "ld1w { z27.s }, p3/Z, [x27, #3, MUL VL]\n"
+ "ld1w { z29.s }, p2/Z, [x27, #4, MUL VL]\n"
"add x22, x27, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "ld1w { z16.s }, p6/Z, [x27]\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p5/Z, [x27, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x27, #2, MUL VL]\n"
- "ld1w { z22.s }, p3/Z, [x27, #3, MUL VL]\n"
- "ld1w { z24.s }, p2/Z, [x27, #4, MUL VL]\n"
"ld1w { z20.s }, p1/Z, [x27, #5, MUL VL]\n"
"ld1w { z14.s }, p6/Z, [x22]\n"
- "zip1 z8.d, z16.d, z14.d\n"
- "zip2 z14.d, z16.d, z14.d\n"
"ld1w { z15.s }, p5/Z, [x22, #1, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x22, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z15.d\n"
- "zip2 z15.d, z17.d, z15.d\n"
"ld1w { z17.s }, p3/Z, [x22, #3, MUL VL]\n"
"ld1w { z18.s }, p2/Z, [x22, #4, MUL VL]\n"
- "zip1 z10.d, z19.d, z16.d\n"
- "zip2 z16.d, z19.d, z16.d\n"
"ld1w { z19.s }, p1/Z, [x22, #5, MUL VL]\n"
"ld1w { z21.s }, p6/Z, [x21]\n"
- "zip1 z11.d, z22.d, z17.d\n"
- "zip2 z17.d, z22.d, z17.d\n"
+ "zip1 z8.d, z22.d, z14.d\n"
+ "zip2 z14.d, z22.d, z14.d\n"
"ld1w { z22.s }, p5/Z, [x21, #1, MUL VL]\n"
"ld1w { z23.s }, p4/Z, [x21, #2, MUL VL]\n"
- "zip1 z12.d, z24.d, z18.d\n"
- "zip2 z18.d, z24.d, z18.d\n"
+ "zip1 z9.d, z24.d, z15.d\n"
+ "zip2 z15.d, z24.d, z15.d\n"
"ld1w { z24.s }, p3/Z, [x21, #3, MUL VL]\n"
"ld1w { z25.s }, p2/Z, [x21, #4, MUL VL]\n"
- "zip1 z13.d, z20.d, z19.d\n"
- "zip2 z19.d, z20.d, z19.d\n"
+ "zip1 z10.d, z26.d, z16.d\n"
+ "zip2 z16.d, z26.d, z16.d\n"
"ld1w { z0.s }, p1/Z, [x21, #5, MUL VL]\n"
"ld1w { z26.s }, p6/Z, [x20]\n"
- "zip1 z20.d, z21.d, z26.d\n"
- "zip2 z26.d, z21.d, z26.d\n"
+ "zip1 z11.d, z27.d, z17.d\n"
+ "zip2 z17.d, z27.d, z17.d\n"
"ld1w { z27.s }, p5/Z, [x20, #1, MUL VL]\n"
"ld1w { z28.s }, p4/Z, [x20, #2, MUL VL]\n"
- "zip1 z21.d, z22.d, z27.d\n"
- "zip2 z27.d, z22.d, z27.d\n"
+ "zip1 z12.d, z29.d, z18.d\n"
+ "zip2 z18.d, z29.d, z18.d\n"
"ld1w { z29.s }, p3/Z, [x20, #3, MUL VL]\n"
"ld1w { z30.s }, p2/Z, [x20, #4, MUL VL]\n"
+ "zip1 z13.d, z20.d, z19.d\n"
+ "zip2 z19.d, z20.d, z19.d\n"
+ "ld1w { z31.s }, p1/Z, [x20, #5, MUL VL]\n"
+ "zip1 z20.d, z21.d, z26.d\n"
+ "zip2 z26.d, z21.d, z26.d\n"
+ "zip1 z21.d, z22.d, z27.d\n"
+ "zip2 z27.d, z22.d, z27.d\n"
"zip1 z22.d, z23.d, z28.d\n"
"zip2 z28.d, z23.d, z28.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #5, MUL VL]\n"
"zip1 z23.d, z24.d, z29.d\n"
"zip2 z29.d, z24.d, z29.d\n"
"zip1 z24.d, z25.d, z30.d\n"
@@ -1028,8 +1031,8 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"mov x26, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1054,136 +1057,136 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
"whilelt p0.s, XZR, x25\n"
- "ld1rqw { z7.s }, p0/Z, [x24]\n"
- "ld1rqw { z6.s }, p0/Z, [x23]\n"
- ".inst 0x658abce7 // bfcvt z7.h, p7/M, z7.s\n"
- "ld1rqw { z5.s }, p0/Z, [x22]\n"
- "ld1rqw { z4.s }, p0/Z, [x21]\n"
- ".inst 0x658abcc6 // bfcvt z6.h, p7/M, z6.s\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
- ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "uzp1 z7.h, z7.h, z7.h\n"
"ld1h { z3.h }, p7/Z, [x28]\n"
"ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
- "uzp1 z6.h, z6.h, z6.h\n"
+ "sub x25, x25, #0x4\n"
+ "ld1h { z7.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "cmp x25, #0x4\n"
+ "ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "ld1rqw { z0.s }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z1.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z0.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "trn1 z7.d, z7.d, z6.d\n"
- ".inst 0x6463e4e8 // bfmmla z8.s, z7.h, z3.h\n"
- "sub x25, x25, #0x4\n"
- "trn1 z5.d, z5.d, z4.d\n"
- ".inst 0x6463e4b4 // bfmmla z20.s, z5.h, z3.h\n"
- ".inst 0x6462e4ee // bfmmla z14.s, z7.h, z2.h\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "trn1 z5.d, z5.d, z1.d\n"
+ "trn1 z4.d, z4.d, z0.d\n"
+ ".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
+ ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
"ld1h { z3.h }, p7/Z, [x28, #4, MUL VL]\n"
- ".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
+ ".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
+ ".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
"ld1h { z2.h }, p7/Z, [x28, #5, MUL VL]\n"
- "cmp x25, #0x4\n"
- ".inst 0x6461e4b5 // bfmmla z21.s, z5.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
+ ".inst 0x6467e4a9 // bfmmla z9.s, z5.h, z7.h\n"
+ ".inst 0x6467e495 // bfmmla z21.s, z4.h, z7.h\n"
+ ".inst 0x6466e4af // bfmmla z15.s, z5.h, z6.h\n"
"ld1h { z1.h }, p7/Z, [x28, #6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6460e4bb // bfmmla z27.s, z5.h, z0.h\n"
+ ".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
"ld1h { z0.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x6463e4ea // bfmmla z10.s, z7.h, z3.h\n"
- ".inst 0x6463e4b6 // bfmmla z22.s, z5.h, z3.h\n"
- ".inst 0x6462e4f0 // bfmmla z16.s, z7.h, z2.h\n"
+ ".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
+ ".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
+ ".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
+ ".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
"ld1h { z3.h }, p7/Z, [x28, #-8, MUL VL]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6462e4bc // bfmmla z28.s, z5.h, z2.h\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x28, #-7, MUL VL]\n"
- "add x22, x22, #0x10\n"
- ".inst 0x6461e4b7 // bfmmla z23.s, z5.h, z1.h\n"
- ".inst 0x6460e4f1 // bfmmla z17.s, z7.h, z0.h\n"
+ ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z1.h }, p7/Z, [x28, #-6, MUL VL]\n"
- "add x21, x21, #0x10\n"
- ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x6463e4ec // bfmmla z12.s, z7.h, z3.h\n"
"addvl x28, x28, #-4\n"
- ".inst 0x6463e4b8 // bfmmla z24.s, z5.h, z3.h\n"
- ".inst 0x6462e4f2 // bfmmla z18.s, z7.h, z2.h\n"
- ".inst 0x6462e4be // bfmmla z30.s, z5.h, z2.h\n"
- ".inst 0x6461e4ed // bfmmla z13.s, z7.h, z1.h\n"
- ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
- ".inst 0x6460e4f3 // bfmmla z19.s, z7.h, z0.h\n"
- ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
+ ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
+ ".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
+ ".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ad // bfmmla z13.s, z5.h, z1.h\n"
+ ".inst 0x6461e499 // bfmmla z25.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b3 // bfmmla z19.s, z5.h, z0.h\n"
+ ".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x25\n"
- "ld1rqw { z7.s }, p0/Z, [x24]\n"
- "ld1rqw { z6.s }, p0/Z, [x23]\n"
- ".inst 0x658abce7 // bfcvt z7.h, p7/M, z7.s\n"
- "ld1rqw { z5.s }, p0/Z, [x22]\n"
- "ld1rqw { z4.s }, p0/Z, [x21]\n"
- ".inst 0x658abcc6 // bfcvt z6.h, p7/M, z6.s\n"
- ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
- ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
- "uzp1 z7.h, z7.h, z7.h\n"
"ld1h { z3.h }, p7/Z, [x28]\n"
"ld1h { z2.h }, p7/Z, [x28, #1, MUL VL]\n"
- "uzp1 z6.h, z6.h, z6.h\n"
+ "ld1h { z7.h }, p7/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z6.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "ld1rqw { z5.s }, p0/Z, [x24]\n"
+ "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "ld1rqw { z4.s }, p0/Z, [x22]\n"
+ "ld1rqw { z0.s }, p0/Z, [x21]\n"
+ ".inst 0x658abca5 // bfcvt z5.h, p7/M, z5.s\n"
+ ".inst 0x658abc21 // bfcvt z1.h, p7/M, z1.s\n"
+ ".inst 0x658abc84 // bfcvt z4.h, p7/M, z4.s\n"
+ ".inst 0x658abc00 // bfcvt z0.h, p7/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "ld1h { z1.h }, p7/Z, [x28, #2, MUL VL]\n"
- "ld1h { z0.h }, p7/Z, [x28, #3, MUL VL]\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "trn1 z7.d, z7.d, z6.d\n"
- ".inst 0x6463e4e8 // bfmmla z8.s, z7.h, z3.h\n"
- "trn1 z5.d, z5.d, z4.d\n"
- ".inst 0x6463e4b4 // bfmmla z20.s, z5.h, z3.h\n"
- ".inst 0x6462e4ee // bfmmla z14.s, z7.h, z2.h\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "trn1 z5.d, z5.d, z1.d\n"
+ "trn1 z4.d, z4.d, z0.d\n"
+ ".inst 0x6463e4a8 // bfmmla z8.s, z5.h, z3.h\n"
+ ".inst 0x6463e494 // bfmmla z20.s, z4.h, z3.h\n"
"ld1h { z3.h }, p7/Z, [x28, #4, MUL VL]\n"
- ".inst 0x6462e4ba // bfmmla z26.s, z5.h, z2.h\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
+ ".inst 0x6462e4ae // bfmmla z14.s, z5.h, z2.h\n"
+ ".inst 0x6462e49a // bfmmla z26.s, z4.h, z2.h\n"
"ld1h { z2.h }, p7/Z, [x28, #5, MUL VL]\n"
- ".inst 0x6461e4b5 // bfmmla z21.s, z5.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
+ ".inst 0x6467e4a9 // bfmmla z9.s, z5.h, z7.h\n"
+ ".inst 0x6467e495 // bfmmla z21.s, z4.h, z7.h\n"
+ ".inst 0x6466e4af // bfmmla z15.s, z5.h, z6.h\n"
"ld1h { z1.h }, p7/Z, [x28, #6, MUL VL]\n"
- ".inst 0x6460e4bb // bfmmla z27.s, z5.h, z0.h\n"
+ ".inst 0x6466e49b // bfmmla z27.s, z4.h, z6.h\n"
"ld1h { z0.h }, p7/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x6463e4ea // bfmmla z10.s, z7.h, z3.h\n"
- ".inst 0x6463e4b6 // bfmmla z22.s, z5.h, z3.h\n"
- ".inst 0x6462e4f0 // bfmmla z16.s, z7.h, z2.h\n"
+ ".inst 0x6463e4aa // bfmmla z10.s, z5.h, z3.h\n"
+ ".inst 0x6463e496 // bfmmla z22.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b0 // bfmmla z16.s, z5.h, z2.h\n"
+ ".inst 0x6462e49c // bfmmla z28.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
+ ".inst 0x6461e497 // bfmmla z23.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b1 // bfmmla z17.s, z5.h, z0.h\n"
"ld1h { z3.h }, p7/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x6462e4bc // bfmmla z28.s, z5.h, z2.h\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
"ld1h { z2.h }, p7/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x6461e4b7 // bfmmla z23.s, z5.h, z1.h\n"
- ".inst 0x6460e4f1 // bfmmla z17.s, z7.h, z0.h\n"
+ ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z1.h }, p7/Z, [x28, #-6, MUL VL]\n"
- ".inst 0x6460e4bd // bfmmla z29.s, z5.h, z0.h\n"
"ld1h { z0.h }, p7/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x6463e4ec // bfmmla z12.s, z7.h, z3.h\n"
"addvl x28, x28, #-4\n"
- ".inst 0x6463e4b8 // bfmmla z24.s, z5.h, z3.h\n"
- ".inst 0x6462e4f2 // bfmmla z18.s, z7.h, z2.h\n"
- ".inst 0x6462e4be // bfmmla z30.s, z5.h, z2.h\n"
- ".inst 0x6461e4ed // bfmmla z13.s, z7.h, z1.h\n"
- ".inst 0x6461e4b9 // bfmmla z25.s, z5.h, z1.h\n"
- ".inst 0x6460e4f3 // bfmmla z19.s, z7.h, z0.h\n"
- ".inst 0x6460e4bf // bfmmla z31.s, z5.h, z0.h\n"
+ ".inst 0x6463e4ac // bfmmla z12.s, z5.h, z3.h\n"
+ ".inst 0x6463e498 // bfmmla z24.s, z4.h, z3.h\n"
+ ".inst 0x6462e4b2 // bfmmla z18.s, z5.h, z2.h\n"
+ ".inst 0x6462e49e // bfmmla z30.s, z4.h, z2.h\n"
+ ".inst 0x6461e4ad // bfmmla z13.s, z5.h, z1.h\n"
+ ".inst 0x6461e499 // bfmmla z25.s, z4.h, z1.h\n"
+ ".inst 0x6460e4b3 // bfmmla z19.s, z5.h, z0.h\n"
+ ".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
"50:" // Height 4: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x26, x26, #0x1\n"
"cmp x26, x20\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp1 z4.d, z8.d, z14.d\n"
"uzp2 z8.d, z8.d, z14.d\n"
"uzp1 z14.d, z9.d, z15.d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z15.d\n"
"uzp1 z15.d, z10.d, z16.d\n"
"uzp2 z10.d, z10.d, z16.d\n"
+ "add x24, x27, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z16.d, z11.d, z17.d\n"
"uzp2 z11.d, z11.d, z17.d\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 z17.d, z12.d, z18.d\n"
"uzp2 z12.d, z12.d, z18.d\n"
"uzp1 z18.d, z13.d, z19.d\n"
@@ -1201,9 +1204,9 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"uzp1 z30.d, z25.d, z31.d\n"
"uzp2 z25.d, z25.d, z31.d\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p7/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p7/Z, [x21]\n"
"ld1rw { z0.s }, p7/Z, [x20]\n"
"fmin z4.s, p7/M, z4.s, z1.s\n"
"fmin z14.s, p7/M, z14.s, z1.s\n"
@@ -1261,24 +1264,24 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"st1w { z17.s }, p2, [x27, #4, MUL VL]\n"
"st1w { z18.s }, p1, [x27, #5, MUL VL]\n"
"addvl x27, x27, #6\n"
- "st1w { z8.s }, p6, [x23]\n"
- "st1w { z9.s }, p5, [x23, #1, MUL VL]\n"
- "st1w { z10.s }, p4, [x23, #2, MUL VL]\n"
- "st1w { z11.s }, p3, [x23, #3, MUL VL]\n"
- "st1w { z12.s }, p2, [x23, #4, MUL VL]\n"
- "st1w { z13.s }, p1, [x23, #5, MUL VL]\n"
- "st1w { z19.s }, p6, [x22]\n"
- "st1w { z26.s }, p5, [x22, #1, MUL VL]\n"
- "st1w { z27.s }, p4, [x22, #2, MUL VL]\n"
- "st1w { z28.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #4, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z20.s }, p6, [x21]\n"
- "st1w { z21.s }, p5, [x21, #1, MUL VL]\n"
- "st1w { z22.s }, p4, [x21, #2, MUL VL]\n"
- "st1w { z23.s }, p3, [x21, #3, MUL VL]\n"
- "st1w { z24.s }, p2, [x21, #4, MUL VL]\n"
- "st1w { z25.s }, p1, [x21, #5, MUL VL]\n"
+ "st1w { z8.s }, p6, [x24]\n"
+ "st1w { z9.s }, p5, [x24, #1, MUL VL]\n"
+ "st1w { z10.s }, p4, [x24, #2, MUL VL]\n"
+ "st1w { z11.s }, p3, [x24, #3, MUL VL]\n"
+ "st1w { z12.s }, p2, [x24, #4, MUL VL]\n"
+ "st1w { z13.s }, p1, [x24, #5, MUL VL]\n"
+ "st1w { z19.s }, p6, [x23]\n"
+ "st1w { z26.s }, p5, [x23, #1, MUL VL]\n"
+ "st1w { z27.s }, p4, [x23, #2, MUL VL]\n"
+ "st1w { z28.s }, p3, [x23, #3, MUL VL]\n"
+ "st1w { z29.s }, p2, [x23, #4, MUL VL]\n"
+ "st1w { z30.s }, p1, [x23, #5, MUL VL]\n"
+ "st1w { z20.s }, p6, [x22]\n"
+ "st1w { z21.s }, p5, [x22, #1, MUL VL]\n"
+ "st1w { z22.s }, p4, [x22, #2, MUL VL]\n"
+ "st1w { z23.s }, p3, [x22, #3, MUL VL]\n"
+ "st1w { z24.s }, p2, [x22, #4, MUL VL]\n"
+ "st1w { z25.s }, p1, [x22, #5, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x9, ALL, MUL #6\n"
"cmp x9, XZR\n"
@@ -1295,8 +1298,8 @@ void sve_hybrid_fp32bf16fp32_mmla_4x6VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"54:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp
index 15b7dd721c..823d839289 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 8, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 8, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp
index 0d2b47ec39..11526b6f65 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -48,18 +48,19 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
+ const float *bias = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -80,6 +81,7 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
ka.string_lengths = string_lengths;
ka.N = N;
ka.B_ptr = B_ptr;
+ ka.bias = bias;
switch(act.type) {
default:
case Activation::Type::None:
@@ -103,10 +105,10 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cmp %x[M], #0x2\n"
"bgt 27f\n"
"beq 14f\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -119,26 +121,26 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cbz x12, 3f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 5f\n"
"3:" // Height 1: no bias
"tbz %x[flags], #0, 4f\n"
- "ld1w { z16.s }, p4/Z, [x9]\n"
+ "ld1w { z19.s }, p4/Z, [x9]\n"
"ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "zip1 z8.d, z16.d, z12.d\n"
- "zip2 z12.d, z16.d, z12.d\n"
"ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "zip1 z8.d, z19.d, z12.d\n"
+ "zip2 z12.d, z19.d, z12.d\n"
"zip1 z9.d, z18.d, z13.d\n"
"zip2 z13.d, z18.d, z13.d\n"
"zip1 z10.d, z17.d, z14.d\n"
@@ -159,8 +161,8 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"6:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 7f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -176,52 +178,52 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"ble 10f\n"
"9:" // Height 1: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
"ld1rqw { z18.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
".inst 0x658ab652 // bfcvt z18.h, p5/M, z18.s\n"
"uzp1 z18.h, z18.h, z18.h\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "addvl x10, x10, #8\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
- "add x26, x26, #0x10\n"
- "addvl x10, x10, #8\n"
"bgt 9b\n"
"10:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
+ "ld1h { z17.h }, p5/Z, [x10]\n"
+ "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
"ld1rqw { z18.s }, p0/Z, [x26]\n"
".inst 0x658ab652 // bfcvt z18.h, p5/M, z18.s\n"
"uzp1 z18.h, z18.h, z18.h\n"
- "ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
- ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6470e64c // bfmmla z12.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
- ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
- ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
- "addvl x10, x10, #8\n"
"11:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -232,9 +234,9 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
"tbz %x[flags], #1, 12f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z8.s, p5/M, z8.s, z17.s\n"
"fmin z9.s, p5/M, z9.s, z17.s\n"
@@ -256,10 +258,10 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -272,34 +274,34 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cbz x12, 16f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"b 18f\n"
"16:" // Height 2: no bias
"tbz %x[flags], #0, 17f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z19.s }, p4/Z, [x9]\n"
"ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z8.d, z19.d, z12.d\n"
+ "zip2 z12.d, z19.d, z12.d\n"
"zip1 z9.d, z18.d, z13.d\n"
"zip2 z13.d, z18.d, z13.d\n"
- "ld1w { z15.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z10.d, z17.d, z14.d\n"
"zip2 z14.d, z17.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -318,8 +320,8 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"19:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 20f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -338,61 +340,61 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"ble 23f\n"
"22:" // Height 2: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z19.s }, p0/Z, [x26]\n"
- "ld1rqw { z18.s }, p0/Z, [x25]\n"
- ".inst 0x658ab673 // bfcvt z19.h, p5/M, z19.s\n"
- ".inst 0x658ab652 // bfcvt z18.h, p5/M, z18.s\n"
- "uzp1 z19.h, z19.h, z19.h\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z19.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z18.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqw { z16.s }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ ".inst 0x658ab652 // bfcvt z18.h, p5/M, z18.s\n"
+ ".inst 0x658ab610 // bfcvt z16.h, p5/M, z16.s\n"
"uzp1 z18.h, z18.h, z18.h\n"
- "trn1 z19.d, z19.d, z18.d\n"
- ".inst 0x6471e668 // bfmmla z8.s, z19.h, z17.h\n"
+ "uzp1 z16.h, z16.h, z16.h\n"
+ "trn1 z18.d, z18.d, z16.d\n"
+ ".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6470e66c // bfmmla z12.s, z19.h, z16.h\n"
+ ".inst 0x6473e64c // bfmmla z12.s, z18.h, z19.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6471e669 // bfmmla z9.s, z19.h, z17.h\n"
+ ".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x6470e66d // bfmmla z13.s, z19.h, z16.h\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6471e66a // bfmmla z10.s, z19.h, z17.h\n"
+ ".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6470e66e // bfmmla z14.s, z19.h, z16.h\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
- ".inst 0x6471e66b // bfmmla z11.s, z19.h, z17.h\n"
- ".inst 0x6470e66f // bfmmla z15.s, z19.h, z16.h\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
+ ".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
"bgt 22b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z19.s }, p0/Z, [x26]\n"
- "ld1rqw { z18.s }, p0/Z, [x25]\n"
- ".inst 0x658ab673 // bfcvt z19.h, p5/M, z19.s\n"
- ".inst 0x658ab652 // bfcvt z18.h, p5/M, z18.s\n"
- "uzp1 z19.h, z19.h, z19.h\n"
"ld1h { z17.h }, p5/Z, [x10]\n"
- "ld1h { z16.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z19.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqw { z18.s }, p0/Z, [x26]\n"
+ "ld1rqw { z16.s }, p0/Z, [x25]\n"
+ ".inst 0x658ab652 // bfcvt z18.h, p5/M, z18.s\n"
+ ".inst 0x658ab610 // bfcvt z16.h, p5/M, z16.s\n"
"uzp1 z18.h, z18.h, z18.h\n"
- "trn1 z19.d, z19.d, z18.d\n"
- ".inst 0x6471e668 // bfmmla z8.s, z19.h, z17.h\n"
+ "uzp1 z16.h, z16.h, z16.h\n"
+ "trn1 z18.d, z18.d, z16.d\n"
+ ".inst 0x6471e648 // bfmmla z8.s, z18.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6470e66c // bfmmla z12.s, z19.h, z16.h\n"
+ ".inst 0x6473e64c // bfmmla z12.s, z18.h, z19.h\n"
"ld1h { z16.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6471e669 // bfmmla z9.s, z19.h, z17.h\n"
+ ".inst 0x6471e649 // bfmmla z9.s, z18.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x6470e66d // bfmmla z13.s, z19.h, z16.h\n"
+ ".inst 0x6470e64d // bfmmla z13.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6471e66a // bfmmla z10.s, z19.h, z17.h\n"
+ ".inst 0x6471e64a // bfmmla z10.s, z18.h, z17.h\n"
"ld1h { z17.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6470e66e // bfmmla z14.s, z19.h, z16.h\n"
+ ".inst 0x6470e64e // bfmmla z14.s, z18.h, z16.h\n"
"ld1h { z16.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6471e66b // bfmmla z11.s, z19.h, z17.h\n"
"addvl x10, x10, #8\n"
- ".inst 0x6470e66f // bfmmla z15.s, z19.h, z16.h\n"
+ ".inst 0x6471e64b // bfmmla z11.s, z18.h, z17.h\n"
+ ".inst 0x6470e64f // bfmmla z15.s, z18.h, z16.h\n"
"24:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -401,17 +403,17 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z6.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x26, x9, x20, LSL #2\n"
"tbz %x[flags], #1, 25f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z17.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z17.s }, p5/Z, [x21]\n"
"ld1rw { z16.s }, p5/Z, [x20]\n"
"fmin z6.s, p5/M, z6.s, z17.s\n"
"fmin z12.s, p5/M, z12.s, z17.s\n"
@@ -435,20 +437,20 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
"26:" // Height 2: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -461,15 +463,15 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cbz x12, 29f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -484,28 +486,28 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"29:" // Height 3: no bias
"tbz %x[flags], #0, 30f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p4/Z, [x9]\n"
+ "ld1w { z26.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x21, x9, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x21, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x20]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "zip1 z8.d, z24.d, z12.d\n"
+ "zip2 z12.d, z24.d, z12.d\n"
+ "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z9.d, z26.d, z13.d\n"
+ "zip2 z13.d, z26.d, z13.d\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -536,8 +538,8 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -559,83 +561,83 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"ble 36f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z28.s }, p0/Z, [x26]\n"
- "ld1rqw { z27.s }, p0/Z, [x25]\n"
- ".inst 0x658ab79c // bfcvt z28.h, p5/M, z28.s\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z27.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqw { z24.s }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqw { z26.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
".inst 0x658ab77b // bfcvt z27.h, p5/M, z27.s\n"
- "uzp1 z28.h, z28.h, z28.h\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
+ ".inst 0x658ab718 // bfcvt z24.h, p5/M, z24.s\n"
"uzp1 z27.h, z27.h, z27.h\n"
".inst 0x658ab75a // bfcvt z26.h, p5/M, z26.s\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "sub x27, x27, #0x4\n"
- "trn1 z28.d, z28.d, z27.d\n"
+ "uzp1 z24.h, z24.h, z24.h\n"
+ "trn1 z27.d, z27.d, z24.d\n"
"uzp1 z26.h, z26.h, z26.h\n"
- ".inst 0x6479e788 // bfmmla z8.s, z28.h, z25.h\n"
- "cmp x27, #0x4\n"
+ ".inst 0x6479e768 // bfmmla z8.s, z27.h, z25.h\n"
".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e78c // bfmmla z12.s, z28.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x26, x26, #0x10\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6479e789 // bfmmla z9.s, z28.h, z25.h\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
- ".inst 0x6478e78d // bfmmla z13.s, z28.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6479e78a // bfmmla z10.s, z28.h, z25.h\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
- ".inst 0x6478e78e // bfmmla z14.s, z28.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6479e78b // bfmmla z11.s, z28.h, z25.h\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
- ".inst 0x6478e78f // bfmmla z15.s, z28.h, z24.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"bgt 35b\n"
"36:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z28.s }, p0/Z, [x26]\n"
- "ld1rqw { z27.s }, p0/Z, [x25]\n"
- ".inst 0x658ab79c // bfcvt z28.h, p5/M, z28.s\n"
+ "ld1h { z25.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqw { z27.s }, p0/Z, [x26]\n"
+ "ld1rqw { z24.s }, p0/Z, [x25]\n"
"ld1rqw { z26.s }, p0/Z, [x24]\n"
".inst 0x658ab77b // bfcvt z27.h, p5/M, z27.s\n"
- "uzp1 z28.h, z28.h, z28.h\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
+ ".inst 0x658ab718 // bfcvt z24.h, p5/M, z24.s\n"
"uzp1 z27.h, z27.h, z27.h\n"
+ "uzp1 z24.h, z24.h, z24.h\n"
".inst 0x658ab75a // bfcvt z26.h, p5/M, z26.s\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "trn1 z28.d, z28.d, z27.d\n"
+ "trn1 z27.d, z27.d, z24.d\n"
"uzp1 z26.h, z26.h, z26.h\n"
- ".inst 0x6479e788 // bfmmla z8.s, z28.h, z25.h\n"
+ ".inst 0x6479e768 // bfmmla z8.s, z27.h, z25.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
".inst 0x6479e750 // bfmmla z16.s, z26.h, z25.h\n"
- ".inst 0x6478e78c // bfmmla z12.s, z28.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6478e754 // bfmmla z20.s, z26.h, z24.h\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6479e789 // bfmmla z9.s, z28.h, z25.h\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
- ".inst 0x6478e78d // bfmmla z13.s, z28.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6479e78a // bfmmla z10.s, z28.h, z25.h\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
- ".inst 0x6478e78e // bfmmla z14.s, z28.h, z24.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6479e78b // bfmmla z11.s, z28.h, z25.h\n"
"addvl x10, x10, #8\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
- ".inst 0x6478e78f // bfmmla z15.s, z28.h, z24.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"37:" // Height 3: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -643,24 +645,24 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
"uzp1 z18.d, z18.d, z22.d\n"
"uzp1 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 38f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z25.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z25.s }, p5/Z, [x21]\n"
"ld1rw { z24.s }, p5/Z, [x20]\n"
"fmin z6.s, p5/M, z6.s, z25.s\n"
"fmin z12.s, p5/M, z12.s, z25.s\n"
@@ -692,24 +694,24 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x24]\n"
- "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x25]\n"
+ "st1w { z17.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x25, #3, MUL VL]\n"
"39:" // Height 3: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -722,15 +724,15 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cbz x12, 42f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -745,37 +747,37 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"42:" // Height 4: no bias
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x22, x9, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x22, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x21]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x20]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -802,8 +804,8 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"45:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -828,110 +830,110 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"ble 49f\n"
"48:" // Height 4: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z29.s }, p0/Z, [x26]\n"
- "ld1rqw { z28.s }, p0/Z, [x25]\n"
- ".inst 0x658ab7bd // bfcvt z29.h, p5/M, z29.s\n"
- "ld1rqw { z27.s }, p0/Z, [x24]\n"
- "ld1rqw { z26.s }, p0/Z, [x23]\n"
- ".inst 0x658ab79c // bfcvt z28.h, p5/M, z28.s\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z27.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqw { z25.s }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqw { z26.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqw { z24.s }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
".inst 0x658ab77b // bfcvt z27.h, p5/M, z27.s\n"
+ ".inst 0x658ab739 // bfcvt z25.h, p5/M, z25.s\n"
".inst 0x658ab75a // bfcvt z26.h, p5/M, z26.s\n"
- "uzp1 z29.h, z29.h, z29.h\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "uzp1 z28.h, z28.h, z28.h\n"
+ ".inst 0x658ab718 // bfcvt z24.h, p5/M, z24.s\n"
"uzp1 z27.h, z27.h, z27.h\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
+ "uzp1 z25.h, z25.h, z25.h\n"
"uzp1 z26.h, z26.h, z26.h\n"
- "trn1 z29.d, z29.d, z28.d\n"
- ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
- "add x26, x26, #0x10\n"
- "trn1 z27.d, z27.d, z26.d\n"
- ".inst 0x6479e770 // bfmmla z16.s, z27.h, z25.h\n"
- ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
+ "uzp1 z24.h, z24.h, z24.h\n"
+ "trn1 z27.d, z27.d, z25.d\n"
+ "trn1 z26.d, z26.d, z24.d\n"
+ ".inst 0x647de768 // bfmmla z8.s, z27.h, z29.h\n"
+ ".inst 0x647de750 // bfmmla z16.s, z26.h, z29.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6478e774 // bfmmla z20.s, z27.h, z24.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
- "add x25, x25, #0x10\n"
- ".inst 0x6479e771 // bfmmla z17.s, z27.h, z25.h\n"
- ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
+ ".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x6478e775 // bfmmla z21.s, z27.h, z24.h\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
+ ".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6479e772 // bfmmla z18.s, z27.h, z25.h\n"
- ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
+ ".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6478e776 // bfmmla z22.s, z27.h, z24.h\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
+ ".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
"addvl x10, x10, #8\n"
- ".inst 0x6479e773 // bfmmla z19.s, z27.h, z25.h\n"
- ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
- ".inst 0x6478e777 // bfmmla z23.s, z27.h, z24.h\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
+ ".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
+ ".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"bgt 48b\n"
"49:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z29.s }, p0/Z, [x26]\n"
- "ld1rqw { z28.s }, p0/Z, [x25]\n"
- ".inst 0x658ab7bd // bfcvt z29.h, p5/M, z29.s\n"
- "ld1rqw { z27.s }, p0/Z, [x24]\n"
- "ld1rqw { z26.s }, p0/Z, [x23]\n"
- ".inst 0x658ab79c // bfcvt z28.h, p5/M, z28.s\n"
+ "ld1h { z29.h }, p5/Z, [x10]\n"
+ "ld1h { z28.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqw { z27.s }, p0/Z, [x26]\n"
+ "ld1rqw { z25.s }, p0/Z, [x25]\n"
+ "ld1rqw { z26.s }, p0/Z, [x24]\n"
+ "ld1rqw { z24.s }, p0/Z, [x23]\n"
".inst 0x658ab77b // bfcvt z27.h, p5/M, z27.s\n"
+ ".inst 0x658ab739 // bfcvt z25.h, p5/M, z25.s\n"
".inst 0x658ab75a // bfcvt z26.h, p5/M, z26.s\n"
- "uzp1 z29.h, z29.h, z29.h\n"
- "ld1h { z25.h }, p5/Z, [x10]\n"
- "ld1h { z24.h }, p5/Z, [x10, #1, MUL VL]\n"
- "uzp1 z28.h, z28.h, z28.h\n"
+ ".inst 0x658ab718 // bfcvt z24.h, p5/M, z24.s\n"
"uzp1 z27.h, z27.h, z27.h\n"
+ "uzp1 z25.h, z25.h, z25.h\n"
"uzp1 z26.h, z26.h, z26.h\n"
- "trn1 z29.d, z29.d, z28.d\n"
- ".inst 0x6479e7a8 // bfmmla z8.s, z29.h, z25.h\n"
- "trn1 z27.d, z27.d, z26.d\n"
- ".inst 0x6479e770 // bfmmla z16.s, z27.h, z25.h\n"
- ".inst 0x6478e7ac // bfmmla z12.s, z29.h, z24.h\n"
+ "uzp1 z24.h, z24.h, z24.h\n"
+ "trn1 z27.d, z27.d, z25.d\n"
+ "trn1 z26.d, z26.d, z24.d\n"
+ ".inst 0x647de768 // bfmmla z8.s, z27.h, z29.h\n"
+ ".inst 0x647de750 // bfmmla z16.s, z26.h, z29.h\n"
"ld1h { z25.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6478e774 // bfmmla z20.s, z27.h, z24.h\n"
+ ".inst 0x647ce76c // bfmmla z12.s, z27.h, z28.h\n"
+ ".inst 0x647ce754 // bfmmla z20.s, z26.h, z28.h\n"
"ld1h { z24.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6479e7a9 // bfmmla z9.s, z29.h, z25.h\n"
- ".inst 0x6479e771 // bfmmla z17.s, z27.h, z25.h\n"
- ".inst 0x6478e7ad // bfmmla z13.s, z29.h, z24.h\n"
+ ".inst 0x6479e769 // bfmmla z9.s, z27.h, z25.h\n"
+ ".inst 0x6479e751 // bfmmla z17.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x6478e775 // bfmmla z21.s, z27.h, z24.h\n"
+ ".inst 0x6478e76d // bfmmla z13.s, z27.h, z24.h\n"
+ ".inst 0x6478e755 // bfmmla z21.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6479e7aa // bfmmla z10.s, z29.h, z25.h\n"
- ".inst 0x6479e772 // bfmmla z18.s, z27.h, z25.h\n"
- ".inst 0x6478e7ae // bfmmla z14.s, z29.h, z24.h\n"
+ ".inst 0x6479e76a // bfmmla z10.s, z27.h, z25.h\n"
+ ".inst 0x6479e752 // bfmmla z18.s, z26.h, z25.h\n"
"ld1h { z25.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6478e776 // bfmmla z22.s, z27.h, z24.h\n"
+ ".inst 0x6478e76e // bfmmla z14.s, z27.h, z24.h\n"
+ ".inst 0x6478e756 // bfmmla z22.s, z26.h, z24.h\n"
"ld1h { z24.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6479e7ab // bfmmla z11.s, z29.h, z25.h\n"
"addvl x10, x10, #8\n"
- ".inst 0x6479e773 // bfmmla z19.s, z27.h, z25.h\n"
- ".inst 0x6478e7af // bfmmla z15.s, z29.h, z24.h\n"
- ".inst 0x6478e777 // bfmmla z23.s, z27.h, z24.h\n"
+ ".inst 0x6479e76b // bfmmla z11.s, z27.h, z25.h\n"
+ ".inst 0x6479e753 // bfmmla z19.s, z26.h, z25.h\n"
+ ".inst 0x6478e76f // bfmmla z15.s, z27.h, z24.h\n"
+ ".inst 0x6478e757 // bfmmla z23.s, z26.h, z24.h\n"
"50:" // Height 4: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
"uzp1 z20.d, z17.d, z21.d\n"
@@ -941,9 +943,9 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"tbz %x[flags], #1, 51f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z24.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z24.s }, p5/Z, [x21]\n"
"ld1rw { z23.s }, p5/Z, [x20]\n"
"fmin z6.s, p5/M, z6.s, z24.s\n"
"fmin z12.s, p5/M, z12.s, z24.s\n"
@@ -983,28 +985,28 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
"52:" // Height 4: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1017,15 +1019,15 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cbz x12, 55f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1048,46 +1050,46 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"55:" // Height 5: no bias
"tbz %x[flags], #0, 56f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x23, x9, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x22]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x21]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z17.d, z18.d, z21.d\n"
- "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x20]\n"
- "zip1 z18.d, z19.d, z22.d\n"
- "zip2 z22.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1126,8 +1128,8 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"58:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 59f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1155,115 +1157,115 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"ble 62f\n"
"61:" // Height 5: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z6.s }, p0/Z, [x26]\n"
- "ld1rqw { z5.s }, p0/Z, [x25]\n"
- ".inst 0x658ab4c6 // bfcvt z6.h, p5/M, z6.s\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- ".inst 0x658ab4a5 // bfcvt z5.h, p5/M, z5.s\n"
- ".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
- "ld1rqw { z2.s }, p0/Z, [x22]\n"
- ".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
- "uzp1 z6.h, z6.h, z6.h\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
- "uzp1 z5.h, z5.h, z5.h\n"
- "uzp1 z4.h, z4.h, z4.h\n"
- "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z5.h }, p5/Z, [x10, #1, MUL VL]\n"
"sub x27, x27, #0x4\n"
- "uzp1 z3.h, z3.h, z3.h\n"
- ".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
"cmp x27, #0x4\n"
+ "ld1rqw { z4.s }, p0/Z, [x26]\n"
"add x26, x26, #0x10\n"
- "trn1 z6.d, z6.d, z5.d\n"
- "trn1 z4.d, z4.d, z3.d\n"
- ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
+ "ld1rqw { z1.s }, p0/Z, [x25]\n"
"add x25, x25, #0x10\n"
- "uzp1 z2.h, z2.h, z2.h\n"
- ".inst 0x6461e490 // bfmmla z16.s, z4.h, z1.h\n"
- ".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
- "ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
- ".inst 0x6460e494 // bfmmla z20.s, z4.h, z0.h\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
"add x24, x24, #0x10\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x22]\n"
"add x23, x23, #0x10\n"
- ".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
- "ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
"add x22, x22, #0x10\n"
- ".inst 0x6461e491 // bfmmla z17.s, z4.h, z1.h\n"
+ ".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
+ ".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
+ ".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
+ ".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
+ "uzp1 z4.h, z4.h, z4.h\n"
+ ".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "trn1 z4.d, z4.d, z1.d\n"
+ "uzp1 z2.h, z2.h, z2.h\n"
+ "trn1 z3.d, z3.d, z0.d\n"
+ ".inst 0x6466e488 // bfmmla z8.s, z4.h, z6.h\n"
+ ".inst 0x6466e458 // bfmmla z24.s, z2.h, z6.h\n"
+ ".inst 0x6465e48c // bfmmla z12.s, z4.h, z5.h\n"
+ ".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
+ ".inst 0x6466e470 // bfmmla z16.s, z3.h, z6.h\n"
+ "ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x6465e474 // bfmmla z20.s, z3.h, z5.h\n"
+ "ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x6461e489 // bfmmla z9.s, z4.h, z1.h\n"
+ ".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
- ".inst 0x6460e495 // bfmmla z21.s, z4.h, z0.h\n"
+ ".inst 0x6460e48d // bfmmla z13.s, z4.h, z0.h\n"
+ ".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
- ".inst 0x6461e492 // bfmmla z18.s, z4.h, z1.h\n"
+ ".inst 0x6461e48a // bfmmla z10.s, z4.h, z1.h\n"
+ ".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
- ".inst 0x6460e496 // bfmmla z22.s, z4.h, z0.h\n"
+ ".inst 0x6460e48e // bfmmla z14.s, z4.h, z0.h\n"
+ ".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
"addvl x10, x10, #8\n"
- ".inst 0x6461e493 // bfmmla z19.s, z4.h, z1.h\n"
+ ".inst 0x6461e48b // bfmmla z11.s, z4.h, z1.h\n"
+ ".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
- ".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
- ".inst 0x6460e497 // bfmmla z23.s, z4.h, z0.h\n"
+ ".inst 0x6460e48f // bfmmla z15.s, z4.h, z0.h\n"
+ ".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
"bgt 61b\n"
"62:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z6.s }, p0/Z, [x26]\n"
- "ld1rqw { z5.s }, p0/Z, [x25]\n"
- ".inst 0x658ab4c6 // bfcvt z6.h, p5/M, z6.s\n"
- "ld1rqw { z4.s }, p0/Z, [x24]\n"
- "ld1rqw { z3.s }, p0/Z, [x23]\n"
- ".inst 0x658ab4a5 // bfcvt z5.h, p5/M, z5.s\n"
- ".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
+ "ld1h { z6.h }, p5/Z, [x10]\n"
+ "ld1h { z5.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqw { z4.s }, p0/Z, [x26]\n"
+ "ld1rqw { z1.s }, p0/Z, [x25]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
+ "ld1rqw { z0.s }, p0/Z, [x23]\n"
"ld1rqw { z2.s }, p0/Z, [x22]\n"
+ ".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
+ ".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
- "uzp1 z6.h, z6.h, z6.h\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
- "uzp1 z5.h, z5.h, z5.h\n"
+ ".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
"uzp1 z4.h, z4.h, z4.h\n"
- "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- "uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "trn1 z6.d, z6.d, z5.d\n"
- "trn1 z4.d, z4.d, z3.d\n"
- ".inst 0x6461e4c8 // bfmmla z8.s, z6.h, z1.h\n"
+ "uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
+ "trn1 z4.d, z4.d, z1.d\n"
"uzp1 z2.h, z2.h, z2.h\n"
- ".inst 0x6461e490 // bfmmla z16.s, z4.h, z1.h\n"
- ".inst 0x6461e458 // bfmmla z24.s, z2.h, z1.h\n"
+ "trn1 z3.d, z3.d, z0.d\n"
+ ".inst 0x6466e488 // bfmmla z8.s, z4.h, z6.h\n"
+ ".inst 0x6465e48c // bfmmla z12.s, z4.h, z5.h\n"
+ ".inst 0x6466e470 // bfmmla z16.s, z3.h, z6.h\n"
+ ".inst 0x6466e458 // bfmmla z24.s, z2.h, z6.h\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6460e4cc // bfmmla z12.s, z6.h, z0.h\n"
- ".inst 0x6460e494 // bfmmla z20.s, z4.h, z0.h\n"
- ".inst 0x6460e45c // bfmmla z28.s, z2.h, z0.h\n"
+ ".inst 0x6465e474 // bfmmla z20.s, z3.h, z5.h\n"
+ ".inst 0x6465e45c // bfmmla z28.s, z2.h, z5.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6461e4c9 // bfmmla z9.s, z6.h, z1.h\n"
- ".inst 0x6461e491 // bfmmla z17.s, z4.h, z1.h\n"
+ ".inst 0x6461e489 // bfmmla z9.s, z4.h, z1.h\n"
+ ".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x6460e4cd // bfmmla z13.s, z6.h, z0.h\n"
- ".inst 0x6460e495 // bfmmla z21.s, z4.h, z0.h\n"
+ ".inst 0x6460e48d // bfmmla z13.s, z4.h, z0.h\n"
+ ".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6461e4ca // bfmmla z10.s, z6.h, z1.h\n"
- ".inst 0x6461e492 // bfmmla z18.s, z4.h, z1.h\n"
+ ".inst 0x6461e48a // bfmmla z10.s, z4.h, z1.h\n"
+ ".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6460e4ce // bfmmla z14.s, z6.h, z0.h\n"
- ".inst 0x6460e496 // bfmmla z22.s, z4.h, z0.h\n"
+ ".inst 0x6460e48e // bfmmla z14.s, z4.h, z0.h\n"
+ ".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x6461e4cb // bfmmla z11.s, z6.h, z1.h\n"
"addvl x10, x10, #8\n"
- ".inst 0x6461e493 // bfmmla z19.s, z4.h, z1.h\n"
+ ".inst 0x6461e48b // bfmmla z11.s, z4.h, z1.h\n"
+ ".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
- ".inst 0x6460e4cf // bfmmla z15.s, z6.h, z0.h\n"
- ".inst 0x6460e497 // bfmmla z23.s, z4.h, z0.h\n"
+ ".inst 0x6460e48f // bfmmla z15.s, z4.h, z0.h\n"
+ ".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
"63:" // Height 5: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
@@ -1271,20 +1273,20 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 58b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1296,9 +1298,9 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z26.d, z26.d, z30.d\n"
"uzp1 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 64f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z0.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z0.s }, p5/Z, [x21]\n"
"ld1rw { z23.s }, p5/Z, [x20]\n"
"fmin z6.s, p5/M, z6.s, z0.s\n"
"fmin z12.s, p5/M, z12.s, z0.s\n"
@@ -1346,22 +1348,22 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x22]\n"
- "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x23]\n"
+ "st1w { z25.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x23, #3, MUL VL]\n"
"65:" // Height 5: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1369,12 +1371,13 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"b 80f\n"
"66:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
- "mov x12, %x[bias]\n"
+ "ldr x12, [%x[args_ptr], %[offsetof_bias]]\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1387,15 +1390,15 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"cbz x12, 68f\n"
"ld1w { z8.s }, p5/Z, [x12]\n"
"ld1w { z9.s }, p5/Z, [x12, #1, MUL VL]\n"
- "zip2 z12.d, z8.d, z8.d\n"
- "zip1 z8.d, z8.d, z8.d\n"
"ld1w { z10.s }, p5/Z, [x12, #2, MUL VL]\n"
"ld1w { z11.s }, p5/Z, [x12, #3, MUL VL]\n"
+ "addvl x12, x12, #4\n"
+ "zip2 z12.d, z8.d, z8.d\n"
+ "zip1 z8.d, z8.d, z8.d\n"
"zip2 z13.d, z9.d, z9.d\n"
"zip1 z9.d, z9.d, z9.d\n"
"zip2 z14.d, z10.d, z10.d\n"
"zip1 z10.d, z10.d, z10.d\n"
- "addvl x12, x12, #4\n"
"zip2 z15.d, z11.d, z11.d\n"
"zip1 z11.d, z11.d, z11.d\n"
"mov z16.d, z8.d\n"
@@ -1418,54 +1421,54 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"68:" // Height 6: no bias
"tbz %x[flags], #0, 69f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "ld1w { z17.s }, p4/Z, [x9]\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
- "zip1 z8.d, z17.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "zip2 z12.d, z17.d, z12.d\n"
- "zip1 z9.d, z18.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x23]\n"
- "zip2 z13.d, z18.d, z13.d\n"
- "zip1 z10.d, z20.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip2 z14.d, z20.d, z14.d\n"
- "zip1 z11.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x22]\n"
- "zip2 z15.d, z16.d, z15.d\n"
- "zip1 z16.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip2 z20.d, z17.d, z20.d\n"
- "zip1 z17.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x21]\n"
- "zip2 z21.d, z18.d, z21.d\n"
- "zip1 z18.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip2 z22.d, z19.d, z22.d\n"
- "zip1 z19.d, z24.d, z23.d\n"
"ld1w { z0.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z28.s }, p4/Z, [x20]\n"
- "zip2 z23.d, z24.d, z23.d\n"
- "zip1 z24.d, z25.d, z28.d\n"
"ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1501,8 +1504,8 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"mov x28, #0x0\n"
"71:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 72f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1533,146 +1536,146 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"ble 75f\n"
"74:" // Height 6: Multiply loop: Main loop head
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z7.s }, p0/Z, [x26]\n"
- "ld1rqw { z6.s }, p0/Z, [x25]\n"
- ".inst 0x658ab4e7 // bfcvt z7.h, p5/M, z7.s\n"
- "ld1rqw { z5.s }, p0/Z, [x24]\n"
- "ld1rqw { z4.s }, p0/Z, [x23]\n"
- ".inst 0x658ab4c6 // bfcvt z6.h, p5/M, z6.s\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x4\n"
+ "cmp x27, #0x4\n"
+ "ld1rqw { z5.s }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqw { z4.s }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x22]\n"
+ "ld1rqw { z0.s }, p0/Z, [x21]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
".inst 0x658ab4a5 // bfcvt z5.h, p5/M, z5.s\n"
- "ld1rqw { z3.s }, p0/Z, [x22]\n"
- "ld1rqw { z2.s }, p0/Z, [x21]\n"
".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
+ "add x21, x21, #0x10\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
+ ".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "uzp1 z7.h, z7.h, z7.h\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
- "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- "uzp1 z6.h, z6.h, z6.h\n"
+ ".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
- "sub x27, x27, #0x4\n"
- "cmp x27, #0x4\n"
"uzp1 z4.h, z4.h, z4.h\n"
"uzp1 z3.h, z3.h, z3.h\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "trn1 z7.d, z7.d, z6.d\n"
- ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
- "add x24, x24, #0x10\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
"trn1 z5.d, z5.d, z4.d\n"
- "trn1 z3.d, z3.d, z2.d\n"
- ".inst 0x6461e4b0 // bfmmla z16.s, z5.h, z1.h\n"
- "add x23, x23, #0x10\n"
- ".inst 0x6461e478 // bfmmla z24.s, z3.h, z1.h\n"
- ".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
+ "trn1 z3.d, z3.d, z1.d\n"
+ "trn1 z2.d, z2.d, z0.d\n"
+ ".inst 0x6467e4a8 // bfmmla z8.s, z5.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6466e4ac // bfmmla z12.s, z5.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- "add x22, x22, #0x10\n"
- ".inst 0x6460e4b4 // bfmmla z20.s, z5.h, z0.h\n"
- ".inst 0x6460e47c // bfmmla z28.s, z3.h, z0.h\n"
+ ".inst 0x6466e45c // bfmmla z28.s, z2.h, z6.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- "add x21, x21, #0x10\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
- ".inst 0x6461e4b1 // bfmmla z17.s, z5.h, z1.h\n"
- ".inst 0x6461e479 // bfmmla z25.s, z3.h, z1.h\n"
- ".inst 0x6460e4ed // bfmmla z13.s, z7.h, z0.h\n"
+ ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
+ ".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
+ ".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x6460e4b5 // bfmmla z21.s, z5.h, z0.h\n"
- ".inst 0x6460e47d // bfmmla z29.s, z3.h, z0.h\n"
+ ".inst 0x6460e4ad // bfmmla z13.s, z5.h, z0.h\n"
+ ".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
+ ".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6461e4ea // bfmmla z10.s, z7.h, z1.h\n"
- ".inst 0x6461e4b2 // bfmmla z18.s, z5.h, z1.h\n"
- ".inst 0x6461e47a // bfmmla z26.s, z3.h, z1.h\n"
- ".inst 0x6460e4ee // bfmmla z14.s, z7.h, z0.h\n"
+ ".inst 0x6461e4aa // bfmmla z10.s, z5.h, z1.h\n"
+ ".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
+ ".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6460e4b6 // bfmmla z22.s, z5.h, z0.h\n"
- ".inst 0x6460e47e // bfmmla z30.s, z3.h, z0.h\n"
+ ".inst 0x6460e4ae // bfmmla z14.s, z5.h, z0.h\n"
+ ".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
+ ".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
- ".inst 0x6461e4b3 // bfmmla z19.s, z5.h, z1.h\n"
- ".inst 0x6461e47b // bfmmla z27.s, z3.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
- ".inst 0x6460e4b7 // bfmmla z23.s, z5.h, z0.h\n"
- ".inst 0x6460e47f // bfmmla z31.s, z3.h, z0.h\n"
+ ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
+ ".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
+ ".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
+ ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
+ ".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
"bgt 74b\n"
"75:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.s, XZR, x27\n"
- "ld1rqw { z7.s }, p0/Z, [x26]\n"
- "ld1rqw { z6.s }, p0/Z, [x25]\n"
- ".inst 0x658ab4e7 // bfcvt z7.h, p5/M, z7.s\n"
- "ld1rqw { z5.s }, p0/Z, [x24]\n"
- "ld1rqw { z4.s }, p0/Z, [x23]\n"
- ".inst 0x658ab4c6 // bfcvt z6.h, p5/M, z6.s\n"
+ "ld1h { z7.h }, p5/Z, [x10]\n"
+ "ld1h { z6.h }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1rqw { z5.s }, p0/Z, [x26]\n"
+ "ld1rqw { z4.s }, p0/Z, [x25]\n"
+ "ld1rqw { z3.s }, p0/Z, [x24]\n"
+ "ld1rqw { z1.s }, p0/Z, [x23]\n"
+ "ld1rqw { z2.s }, p0/Z, [x22]\n"
+ "ld1rqw { z0.s }, p0/Z, [x21]\n"
".inst 0x658ab4a5 // bfcvt z5.h, p5/M, z5.s\n"
- "ld1rqw { z3.s }, p0/Z, [x22]\n"
- "ld1rqw { z2.s }, p0/Z, [x21]\n"
".inst 0x658ab484 // bfcvt z4.h, p5/M, z4.s\n"
".inst 0x658ab463 // bfcvt z3.h, p5/M, z3.s\n"
+ ".inst 0x658ab421 // bfcvt z1.h, p5/M, z1.s\n"
".inst 0x658ab442 // bfcvt z2.h, p5/M, z2.s\n"
- "uzp1 z7.h, z7.h, z7.h\n"
- "ld1h { z1.h }, p5/Z, [x10]\n"
- "ld1h { z0.h }, p5/Z, [x10, #1, MUL VL]\n"
- "uzp1 z6.h, z6.h, z6.h\n"
+ ".inst 0x658ab400 // bfcvt z0.h, p5/M, z0.s\n"
"uzp1 z5.h, z5.h, z5.h\n"
"uzp1 z4.h, z4.h, z4.h\n"
"uzp1 z3.h, z3.h, z3.h\n"
+ "uzp1 z1.h, z1.h, z1.h\n"
"uzp1 z2.h, z2.h, z2.h\n"
- "trn1 z7.d, z7.d, z6.d\n"
- ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
+ "uzp1 z0.h, z0.h, z0.h\n"
"trn1 z5.d, z5.d, z4.d\n"
- "trn1 z3.d, z3.d, z2.d\n"
- ".inst 0x6461e4b0 // bfmmla z16.s, z5.h, z1.h\n"
- ".inst 0x6461e478 // bfmmla z24.s, z3.h, z1.h\n"
- ".inst 0x6460e4ec // bfmmla z12.s, z7.h, z0.h\n"
+ "trn1 z3.d, z3.d, z1.d\n"
+ "trn1 z2.d, z2.d, z0.d\n"
+ ".inst 0x6467e4a8 // bfmmla z8.s, z5.h, z7.h\n"
+ ".inst 0x6467e470 // bfmmla z16.s, z3.h, z7.h\n"
+ ".inst 0x6466e4ac // bfmmla z12.s, z5.h, z6.h\n"
+ ".inst 0x6466e474 // bfmmla z20.s, z3.h, z6.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
"ld1h { z1.h }, p5/Z, [x10, #2, MUL VL]\n"
- ".inst 0x6460e4b4 // bfmmla z20.s, z5.h, z0.h\n"
- ".inst 0x6460e47c // bfmmla z28.s, z3.h, z0.h\n"
+ ".inst 0x6466e45c // bfmmla z28.s, z2.h, z6.h\n"
"ld1h { z0.h }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x6461e4e9 // bfmmla z9.s, z7.h, z1.h\n"
- ".inst 0x6461e4b1 // bfmmla z17.s, z5.h, z1.h\n"
- ".inst 0x6461e479 // bfmmla z25.s, z3.h, z1.h\n"
- ".inst 0x6460e4ed // bfmmla z13.s, z7.h, z0.h\n"
+ ".inst 0x6461e4a9 // bfmmla z9.s, z5.h, z1.h\n"
+ ".inst 0x6461e471 // bfmmla z17.s, z3.h, z1.h\n"
+ ".inst 0x6461e459 // bfmmla z25.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #4, MUL VL]\n"
- ".inst 0x6460e4b5 // bfmmla z21.s, z5.h, z0.h\n"
- ".inst 0x6460e47d // bfmmla z29.s, z3.h, z0.h\n"
+ ".inst 0x6460e4ad // bfmmla z13.s, z5.h, z0.h\n"
+ ".inst 0x6460e475 // bfmmla z21.s, z3.h, z0.h\n"
+ ".inst 0x6460e45d // bfmmla z29.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x6461e4ea // bfmmla z10.s, z7.h, z1.h\n"
- ".inst 0x6461e4b2 // bfmmla z18.s, z5.h, z1.h\n"
- ".inst 0x6461e47a // bfmmla z26.s, z3.h, z1.h\n"
- ".inst 0x6460e4ee // bfmmla z14.s, z7.h, z0.h\n"
+ ".inst 0x6461e4aa // bfmmla z10.s, z5.h, z1.h\n"
+ ".inst 0x6461e472 // bfmmla z18.s, z3.h, z1.h\n"
+ ".inst 0x6461e45a // bfmmla z26.s, z2.h, z1.h\n"
"ld1h { z1.h }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x6460e4b6 // bfmmla z22.s, z5.h, z0.h\n"
- ".inst 0x6460e47e // bfmmla z30.s, z3.h, z0.h\n"
+ ".inst 0x6460e4ae // bfmmla z14.s, z5.h, z0.h\n"
+ ".inst 0x6460e476 // bfmmla z22.s, z3.h, z0.h\n"
+ ".inst 0x6460e45e // bfmmla z30.s, z2.h, z0.h\n"
"ld1h { z0.h }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
- ".inst 0x6461e4eb // bfmmla z11.s, z7.h, z1.h\n"
- ".inst 0x6461e4b3 // bfmmla z19.s, z5.h, z1.h\n"
- ".inst 0x6461e47b // bfmmla z27.s, z3.h, z1.h\n"
- ".inst 0x6460e4ef // bfmmla z15.s, z7.h, z0.h\n"
- ".inst 0x6460e4b7 // bfmmla z23.s, z5.h, z0.h\n"
- ".inst 0x6460e47f // bfmmla z31.s, z3.h, z0.h\n"
+ ".inst 0x6461e4ab // bfmmla z11.s, z5.h, z1.h\n"
+ ".inst 0x6461e473 // bfmmla z19.s, z3.h, z1.h\n"
+ ".inst 0x6461e45b // bfmmla z27.s, z2.h, z1.h\n"
+ ".inst 0x6460e4af // bfmmla z15.s, z5.h, z0.h\n"
+ ".inst 0x6460e477 // bfmmla z23.s, z3.h, z0.h\n"
+ ".inst 0x6460e45f // bfmmla z31.s, z2.h, z0.h\n"
"76:" // Height 6: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 71b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x25, x9, x20, LSL #2\n"
- "add x24, x25, x20, LSL #2\n"
"uzp1 z6.d, z8.d, z12.d\n"
- "add x23, x24, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x26, x9, x20, LSL #2\n"
+ "add x25, x26, x20, LSL #2\n"
+ "add x24, x25, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "add x23, x24, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
@@ -1688,9 +1691,9 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"uzp1 z30.d, z27.d, z31.d\n"
"uzp2 z27.d, z27.d, z31.d\n"
"tbz %x[flags], #1, 77f\n"
- "add x20, %x[args_ptr], %[offset_max]\n"
- "ld1rw { z1.s }, p5/Z, [x20]\n"
+ "add x21, %x[args_ptr], %[offset_max]\n"
"add x20, %x[args_ptr], %[offset_min]\n"
+ "ld1rw { z1.s }, p5/Z, [x21]\n"
"ld1rw { z0.s }, p5/Z, [x20]\n"
"fmin z6.s, p5/M, z6.s, z1.s\n"
"fmin z12.s, p5/M, z12.s, z1.s\n"
@@ -1746,26 +1749,26 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
- "st1w { z8.s }, p4, [x25]\n"
- "st1w { z9.s }, p3, [x25, #1, MUL VL]\n"
- "st1w { z10.s }, p2, [x25, #2, MUL VL]\n"
- "st1w { z11.s }, p1, [x25, #3, MUL VL]\n"
- "st1w { z15.s }, p4, [x24]\n"
- "st1w { z20.s }, p3, [x24, #1, MUL VL]\n"
- "st1w { z21.s }, p2, [x24, #2, MUL VL]\n"
- "st1w { z22.s }, p1, [x24, #3, MUL VL]\n"
- "st1w { z16.s }, p4, [x23]\n"
- "st1w { z17.s }, p3, [x23, #1, MUL VL]\n"
- "st1w { z18.s }, p2, [x23, #2, MUL VL]\n"
- "st1w { z19.s }, p1, [x23, #3, MUL VL]\n"
- "st1w { z23.s }, p4, [x22]\n"
- "st1w { z28.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p2, [x22, #2, MUL VL]\n"
- "st1w { z30.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z24.s }, p4, [x21]\n"
- "st1w { z25.s }, p3, [x21, #1, MUL VL]\n"
- "st1w { z26.s }, p2, [x21, #2, MUL VL]\n"
- "st1w { z27.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z8.s }, p4, [x26]\n"
+ "st1w { z9.s }, p3, [x26, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x26, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x26, #3, MUL VL]\n"
+ "st1w { z15.s }, p4, [x25]\n"
+ "st1w { z20.s }, p3, [x25, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x25, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x24]\n"
+ "st1w { z17.s }, p3, [x24, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x24, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x24, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x23]\n"
+ "st1w { z28.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x22]\n"
+ "st1w { z25.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x22, #3, MUL VL]\n"
"78:" // Height 6: Writeback done
"decw x11, ALL, MUL #4\n"
"cmp x11, XZR\n"
@@ -1782,8 +1785,8 @@ void sve_hybrid_fp32bf16fp32_mmla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp
index ffc1606b3f..a8e82516a1 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsSVE<rhs_operand_type, result_type, 4, 4, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 4, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
index b7c523466e..7de5e09bd5 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void sve_hybrid_s8qa_dot_4x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -91,20 +91,20 @@ void sve_hybrid_s8qa_dot_4x4VL (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"3:" // Height 1: setup done
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -120,41 +120,41 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1b { z20.b }, p2/Z, [x28]\n"
- "sdot z16.s, z20.b, z0.b[0]\n"
- "ld1b { z21.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #2, MUL VL]\n"
- "sdot z17.s, z21.b, z0.b[0]\n"
- "sdot z18.s, z20.b, z0.b[0]\n"
- "ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z19.s, z20.b, z0.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x28]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #4, MUL VL]\n"
- "sdot z16.s, z20.b, z0.b[1]\n"
- "ld1b { z21.b }, p2/Z, [x28, #5, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #6, MUL VL]\n"
- "sdot z17.s, z21.b, z0.b[1]\n"
- "sdot z18.s, z20.b, z0.b[1]\n"
- "ld1b { z20.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "ld1b { z23.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "sdot z16.s, z21.b, z0.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "sdot z19.s, z20.b, z0.b[1]\n"
- "ld1b { z22.b }, p2/Z, [x28, #-8, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "ld1b { z21.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "sdot z16.s, z22.b, z0.b[2]\n"
- "sdot z17.s, z20.b, z0.b[2]\n"
- "ld1b { z20.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "sdot z18.s, z21.b, z0.b[2]\n"
- "sdot z19.s, z20.b, z0.b[2]\n"
- "ld1b { z22.b }, p2/Z, [x28, #-4, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "sdot z17.s, z26.b, z0.b[0]\n"
+ "sdot z18.s, z25.b, z0.b[0]\n"
+ "sdot z19.s, z24.b, z0.b[0]\n"
+ "sdot z16.s, z20.b, z0.b[1]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "sdot z17.s, z23.b, z0.b[1]\n"
+ "ld1b { z23.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "sdot z18.s, z22.b, z0.b[1]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "sdot z19.s, z21.b, z0.b[1]\n"
"ld1b { z21.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "sdot z16.s, z22.b, z0.b[3]\n"
- "sdot z17.s, z20.b, z0.b[3]\n"
+ "sdot z16.s, z20.b, z0.b[2]\n"
"ld1b { z20.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ "sdot z17.s, z26.b, z0.b[2]\n"
+ "sdot z18.s, z25.b, z0.b[2]\n"
+ "sdot z19.s, z24.b, z0.b[2]\n"
+ "sdot z16.s, z23.b, z0.b[3]\n"
+ "sdot z17.s, z22.b, z0.b[3]\n"
"sdot z18.s, z21.b, z0.b[3]\n"
"sdot z19.s, z20.b, z0.b[3]\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"sdot z11.s, z0.b, z15.b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
@@ -163,49 +163,49 @@ void sve_hybrid_s8qa_dot_4x4VL (
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1b { z22.b }, p2/Z, [x28]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "ld1b { z20.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
- "sdot z16.s, z22.b, z0.b[0]\n"
- "sdot z17.s, z20.b, z0.b[0]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "sdot z16.s, z23.b, z0.b[0]\n"
+ "sdot z17.s, z22.b, z0.b[0]\n"
"sdot z18.s, z21.b, z0.b[0]\n"
"sdot z19.s, z20.b, z0.b[0]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
- "ld1b { z20.b }, p2/Z, [x28]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
"ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z20.b, z0.b[1]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z23.b, z0.b[1]\n"
"sdot z17.s, z22.b, z0.b[1]\n"
"sdot z18.s, z21.b, z0.b[1]\n"
"sdot z19.s, z20.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
- "ld1b { z20.b }, p2/Z, [x28]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
"ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z20.b, z0.b[2]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z23.b, z0.b[2]\n"
"sdot z17.s, z22.b, z0.b[2]\n"
"sdot z18.s, z21.b, z0.b[2]\n"
"sdot z19.s, z20.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
- "ld1b { z21.b }, p2/Z, [x28]\n"
- "ld1b { z20.b }, p2/Z, [x28, #1, MUL VL]\n"
- "sdot z16.s, z21.b, z0.b[3]\n"
- "sdot z17.s, z20.b, z0.b[3]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z23.b, z0.b[3]\n"
+ "sdot z17.s, z22.b, z0.b[3]\n"
"sdot z18.s, z21.b, z0.b[3]\n"
"sdot z19.s, z20.b, z0.b[3]\n"
- "addvl x28, x28, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"sdot z11.s, z0.b, z15.b\n"
@@ -215,35 +215,35 @@ void sve_hybrid_s8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 4b\n"
"tbnz %x[flags], #31, 12f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z20.s, p2/M, z20.s\n"
"saddv d11, p0, z11.s\n"
"mov z11.s, z11.s[0]\n"
- "neg z20.s, p2/M, z20.s\n"
"mul z11.s, p2/M, z11.s, z20.s\n"
"12:" // Height 1: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
"add z17.s, z17.s, z11.s\n"
"ld1w { z23.s }, p2/Z, [x10]\n"
- "ld1w { z22.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x10, #1, MUL VL]\n"
"add z18.s, z18.s, z11.s\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z21.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x10, #3, MUL VL]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"add z16.s, z16.s, z23.s\n"
- "add z17.s, z17.s, z22.s\n"
- "add z18.s, z18.s, z21.s\n"
- "add z19.s, z19.s, z20.s\n"
+ "add z17.s, z17.s, z20.s\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "addvl x10, x10, #4\n"
+ "add z18.s, z18.s, z22.s\n"
+ "add z19.s, z19.s, z21.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x04b47610 // sqrdmulh z16.s, z16.s, z20.s\n"
".inst 0x04b47631 // sqrdmulh z17.s, z17.s, z20.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04b47652 // sqrdmulh z18.s, z18.s, z20.s\n"
".inst 0x04b47673 // sqrdmulh z19.s, z19.s, z20.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -261,19 +261,19 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sqadd z19.s, z19.s, z20.s\n"
"13:" // Height 1: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z20.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z20.s\n"
+ "ld1rw { z22.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z20.s\n"
- "add z18.s, z18.s, z20.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
"ld1rw { z21.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z20.s\n"
+ "add z16.s, z16.s, z22.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z22.s\n"
+ "add z18.s, z18.s, z22.s\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z22.s\n"
"smin z16.s, p2/M, z16.s, z21.s\n"
"smin z17.s, p2/M, z17.s, z21.s\n"
"smin z18.s, p2/M, z18.s, z21.s\n"
@@ -281,8 +281,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z20.s\n"
"smax z17.s, p2/M, z17.s, z20.s\n"
"smax z18.s, p2/M, z18.s, z20.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z20.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
"st1b { z16.b }, p1, [x27]\n"
@@ -300,24 +300,24 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
"mov x26, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -336,45 +336,45 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z25.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
"add x24, x24, #0x10\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
- "sdot z16.s, z24.b, z0.b[0]\n"
- "sdot z20.s, z24.b, z1.b[0]\n"
- "ld1b { z24.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z25.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z17.s, z26.b, z0.b[0]\n"
- "sdot z21.s, z26.b, z1.b[0]\n"
- "sdot z18.s, z24.b, z0.b[0]\n"
- "sdot z22.s, z24.b, z1.b[0]\n"
- "ld1b { z24.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z27.b }, p2/Z, [x28, #5, MUL VL]\n"
- "sdot z19.s, z25.b, z0.b[0]\n"
- "sdot z23.s, z25.b, z1.b[0]\n"
+ "add x23, x23, #0x10\n"
"ld1b { z26.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "sdot z16.s, z25.b, z0.b[0]\n"
+ "sdot z20.s, z25.b, z1.b[0]\n"
"ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "sdot z17.s, z30.b, z0.b[0]\n"
+ "sdot z21.s, z30.b, z1.b[0]\n"
+ "sdot z18.s, z29.b, z0.b[0]\n"
+ "sdot z22.s, z29.b, z1.b[0]\n"
+ "sdot z19.s, z28.b, z0.b[0]\n"
+ "sdot z23.s, z28.b, z1.b[0]\n"
"sdot z16.s, z24.b, z0.b[1]\n"
"sdot z20.s, z24.b, z1.b[1]\n"
"ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
"sdot z17.s, z27.b, z0.b[1]\n"
"sdot z21.s, z27.b, z1.b[1]\n"
- "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
"ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"sdot z18.s, z26.b, z0.b[1]\n"
"sdot z22.s, z26.b, z1.b[1]\n"
- "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
"sdot z19.s, z25.b, z0.b[1]\n"
"sdot z23.s, z25.b, z1.b[1]\n"
- "ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
"ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z16.s, z24.b, z0.b[2]\n"
"sdot z20.s, z24.b, z1.b[2]\n"
"ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
- "add x23, x23, #0x10\n"
"sdot z17.s, z30.b, z0.b[2]\n"
"sdot z21.s, z30.b, z1.b[2]\n"
"sdot z18.s, z29.b, z0.b[2]\n"
@@ -398,34 +398,34 @@ void sve_hybrid_s8qa_dot_4x4VL (
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
- "sdot z16.s, z24.b, z0.b[0]\n"
- "sdot z20.s, z24.b, z1.b[0]\n"
+ "subs x25, x25, #0x4\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "sdot z16.s, z27.b, z0.b[0]\n"
+ "sdot z20.s, z27.b, z1.b[0]\n"
"sdot z17.s, z26.b, z0.b[0]\n"
"sdot z21.s, z26.b, z1.b[0]\n"
"sdot z18.s, z25.b, z0.b[0]\n"
"sdot z22.s, z25.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
"sdot z19.s, z24.b, z0.b[0]\n"
"sdot z23.s, z24.b, z1.b[0]\n"
"ble 24f\n"
"ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z27.b, z0.b[1]\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z27.b, z0.b[1]\n"
"sdot z20.s, z27.b, z1.b[1]\n"
"sdot z17.s, z26.b, z0.b[1]\n"
"sdot z21.s, z26.b, z1.b[1]\n"
"sdot z18.s, z25.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"sdot z22.s, z25.b, z1.b[1]\n"
"sdot z19.s, z24.b, z0.b[1]\n"
"sdot z23.s, z24.b, z1.b[1]\n"
@@ -433,29 +433,29 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z27.b, z0.b[2]\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z27.b, z0.b[2]\n"
"sdot z20.s, z27.b, z1.b[2]\n"
"sdot z17.s, z26.b, z0.b[2]\n"
"sdot z21.s, z26.b, z1.b[2]\n"
"sdot z18.s, z25.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"sdot z22.s, z25.b, z1.b[2]\n"
"sdot z19.s, z24.b, z0.b[2]\n"
"sdot z23.s, z24.b, z1.b[2]\n"
"ble 24f\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
- "sdot z16.s, z24.b, z0.b[3]\n"
- "sdot z20.s, z24.b, z1.b[3]\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z27.b, z0.b[3]\n"
+ "sdot z20.s, z27.b, z1.b[3]\n"
"sdot z17.s, z26.b, z0.b[3]\n"
"sdot z21.s, z26.b, z1.b[3]\n"
"sdot z18.s, z25.b, z0.b[3]\n"
"sdot z22.s, z25.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"sdot z19.s, z24.b, z0.b[3]\n"
"sdot z23.s, z24.b, z1.b[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -468,18 +468,18 @@ void sve_hybrid_s8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 18b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
+ "add x24, x27, x20\n"
"tbnz %x[flags], #31, 26f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z24.s, p2/M, z24.s\n"
"saddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"saddv d12, p0, z12.s\n"
- "neg z24.s, p2/M, z24.s\n"
- "mov z12.s, z12.s[0]\n"
+ "mov z11.s, z11.s[0]\n"
"mul z11.s, p2/M, z11.s, z24.s\n"
+ "mov z12.s, z12.s[0]\n"
"mul z12.s, p2/M, z12.s, z24.s\n"
"26:" // Height 2: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
@@ -518,24 +518,24 @@ void sve_hybrid_s8qa_dot_4x4VL (
".inst 0x04b876f7 // sqrdmulh z23.s, z23.s, z24.s\n"
"tbz %x[flags], #5, 27f\n"
"and z24.d, z16.d, z0.d\n"
- "asr z24.s, z24.s, #0x1f\n"
- "sqadd z16.s, z16.s, z24.s\n"
"and z30.d, z17.d, z0.d\n"
"and z29.d, z18.d, z0.d\n"
"and z28.d, z19.d, z0.d\n"
"and z27.d, z20.d, z0.d\n"
"and z26.d, z21.d, z0.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
"and z25.d, z22.d, z0.d\n"
- "and z24.d, z23.d, z0.d\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"asr z27.s, z27.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z24.s\n"
+ "and z24.d, z23.d, z0.d\n"
"asr z26.s, z26.s, #0x1f\n"
"asr z25.s, z25.s, #0x1f\n"
- "asr z24.s, z24.s, #0x1f\n"
"sqadd z17.s, z17.s, z30.s\n"
"sqadd z18.s, z18.s, z29.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
"sqadd z19.s, z19.s, z28.s\n"
"sqadd z20.s, z20.s, z27.s\n"
"sqadd z21.s, z21.s, z26.s\n"
@@ -543,27 +543,27 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sqadd z23.s, z23.s, z24.s\n"
"27:" // Height 2: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z24.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z24.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z24.s\n"
- "add z18.s, z18.s, z24.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z24.s\n"
- "add z20.s, z20.s, z24.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z24.s\n"
- "add z22.s, z22.s, z24.s\n"
+ "add z16.s, z16.s, z26.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z26.s\n"
+ "add z18.s, z18.s, z26.s\n"
"ld1rw { z25.s }, p2/Z, [x20]\n"
- "add z23.s, z23.s, z24.s\n"
+ "add z19.s, z19.s, z26.s\n"
+ "add z20.s, z20.s, z26.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z21.s, z21.s, z26.s\n"
+ "add z22.s, z22.s, z26.s\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add z23.s, z23.s, z26.s\n"
"smin z16.s, p2/M, z16.s, z25.s\n"
"smin z17.s, p2/M, z17.s, z25.s\n"
"smin z18.s, p2/M, z18.s, z25.s\n"
@@ -575,20 +575,20 @@ void sve_hybrid_s8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z24.s\n"
"smax z17.s, p2/M, z17.s, z24.s\n"
"smax z18.s, p2/M, z18.s, z24.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z24.s\n"
"smax z20.s, p2/M, z20.s, z24.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z21.s, p2/M, z21.s, z24.s\n"
"smax z22.s, p2/M, z22.s, z24.s\n"
+ "smax z23.s, p2/M, z23.s, z24.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
"uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
"st1b { z16.b }, p1, [x27]\n"
- "smax z23.s, p2/M, z23.s, z24.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
- "st1b { z20.b }, p1, [x23]\n"
"addvl x27, x27, #1\n"
+ "st1b { z20.b }, p1, [x24]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -603,16 +603,16 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -623,8 +623,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mov x26, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -646,57 +646,57 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z3.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z31.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1b { z28.b }, p2/Z, [x28]\n"
- "sdot z16.s, z28.b, z0.b[0]\n"
- "sdot z20.s, z28.b, z1.b[0]\n"
- "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
- "sdot z24.s, z28.b, z2.b[0]\n"
- "sdot z17.s, z30.b, z0.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z21.s, z30.b, z1.b[0]\n"
- "sdot z25.s, z30.b, z2.b[0]\n"
- "ld1b { z3.b }, p2/Z, [x28, #4, MUL VL]\n"
- "sdot z18.s, z29.b, z0.b[0]\n"
- "sdot z22.s, z29.b, z1.b[0]\n"
- "ld1b { z31.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1b { z30.b }, p2/Z, [x28, #6, MUL VL]\n"
- "sdot z26.s, z29.b, z2.b[0]\n"
- "sdot z19.s, z28.b, z0.b[0]\n"
+ "add x22, x22, #0x10\n"
+ "sdot z16.s, z5.b, z0.b[0]\n"
+ "sdot z20.s, z5.b, z1.b[0]\n"
+ "sdot z17.s, z29.b, z0.b[0]\n"
+ "sdot z21.s, z29.b, z1.b[0]\n"
+ "sdot z18.s, z4.b, z0.b[0]\n"
+ "sdot z24.s, z5.b, z2.b[0]\n"
+ "sdot z25.s, z29.b, z2.b[0]\n"
"ld1b { z29.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "sdot z22.s, z4.b, z1.b[0]\n"
+ "sdot z26.s, z4.b, z2.b[0]\n"
+ "sdot z19.s, z28.b, z0.b[0]\n"
"sdot z23.s, z28.b, z1.b[0]\n"
"sdot z27.s, z28.b, z2.b[0]\n"
+ "sdot z16.s, z3.b, z0.b[1]\n"
"ld1b { z28.b }, p2/Z, [x28, #-8, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "sdot z16.s, z3.b, z0.b[1]\n"
"sdot z20.s, z3.b, z1.b[1]\n"
- "ld1b { z4.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "add x23, x23, #0x10\n"
"sdot z24.s, z3.b, z2.b[1]\n"
- "sdot z17.s, z31.b, z0.b[1]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-6, MUL VL]\n"
"ld1b { z3.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "add x22, x22, #0x10\n"
+ "sdot z17.s, z31.b, z0.b[1]\n"
"sdot z21.s, z31.b, z1.b[1]\n"
"sdot z25.s, z31.b, z2.b[1]\n"
- "ld1b { z31.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z18.s, z30.b, z0.b[1]\n"
+ "ld1b { z31.b }, p2/Z, [x28, #-4, MUL VL]\n"
"sdot z22.s, z30.b, z1.b[1]\n"
"sdot z26.s, z30.b, z2.b[1]\n"
- "sdot z19.s, z29.b, z0.b[1]\n"
"ld1b { z30.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "sdot z19.s, z29.b, z0.b[1]\n"
"sdot z23.s, z29.b, z1.b[1]\n"
"sdot z27.s, z29.b, z2.b[1]\n"
- "ld1b { z29.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z16.s, z28.b, z0.b[2]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-2, MUL VL]\n"
"sdot z20.s, z28.b, z1.b[2]\n"
"sdot z24.s, z28.b, z2.b[2]\n"
- "sdot z17.s, z5.b, z0.b[2]\n"
"ld1b { z28.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ "sdot z17.s, z5.b, z0.b[2]\n"
"sdot z21.s, z5.b, z1.b[2]\n"
"sdot z25.s, z5.b, z2.b[2]\n"
"sdot z18.s, z4.b, z0.b[2]\n"
@@ -727,23 +727,23 @@ void sve_hybrid_s8qa_dot_4x4VL (
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1b { z28.b }, p2/Z, [x28]\n"
- "sdot z16.s, z28.b, z0.b[0]\n"
- "sdot z20.s, z28.b, z1.b[0]\n"
- "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
- "sdot z24.s, z28.b, z2.b[0]\n"
+ "sdot z16.s, z31.b, z0.b[0]\n"
+ "sdot z20.s, z31.b, z1.b[0]\n"
"sdot z17.s, z30.b, z0.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
"sdot z21.s, z30.b, z1.b[0]\n"
- "sdot z25.s, z30.b, z2.b[0]\n"
- "addvl x28, x28, #4\n"
"sdot z18.s, z29.b, z0.b[0]\n"
"sdot z22.s, z29.b, z1.b[0]\n"
+ "sdot z24.s, z31.b, z2.b[0]\n"
+ "sdot z25.s, z30.b, z2.b[0]\n"
"sdot z26.s, z29.b, z2.b[0]\n"
"sdot z19.s, z28.b, z0.b[0]\n"
"sdot z23.s, z28.b, z1.b[0]\n"
@@ -752,14 +752,14 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1b { z31.b }, p2/Z, [x28]\n"
"ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z31.b, z0.b[1]\n"
"ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z31.b, z0.b[1]\n"
"sdot z20.s, z31.b, z1.b[1]\n"
"sdot z24.s, z31.b, z2.b[1]\n"
"sdot z17.s, z30.b, z0.b[1]\n"
"sdot z21.s, z30.b, z1.b[1]\n"
- "addvl x28, x28, #4\n"
"sdot z25.s, z30.b, z2.b[1]\n"
"sdot z18.s, z29.b, z0.b[1]\n"
"sdot z22.s, z29.b, z1.b[1]\n"
@@ -771,14 +771,14 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1b { z31.b }, p2/Z, [x28]\n"
"ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z31.b, z0.b[2]\n"
"ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z31.b, z0.b[2]\n"
"sdot z20.s, z31.b, z1.b[2]\n"
"sdot z24.s, z31.b, z2.b[2]\n"
"sdot z17.s, z30.b, z0.b[2]\n"
"sdot z21.s, z30.b, z1.b[2]\n"
- "addvl x28, x28, #4\n"
"sdot z25.s, z30.b, z2.b[2]\n"
"sdot z18.s, z29.b, z0.b[2]\n"
"sdot z22.s, z29.b, z1.b[2]\n"
@@ -789,15 +789,15 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ble 38f\n"
"ld1b { z31.b }, p2/Z, [x28]\n"
"ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
- "sdot z16.s, z31.b, z0.b[3]\n"
- "sdot z20.s, z31.b, z1.b[3]\n"
"ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z31.b, z0.b[3]\n"
+ "sdot z20.s, z31.b, z1.b[3]\n"
"sdot z24.s, z31.b, z2.b[3]\n"
"sdot z17.s, z30.b, z0.b[3]\n"
"sdot z21.s, z30.b, z1.b[3]\n"
"sdot z25.s, z30.b, z2.b[3]\n"
- "addvl x28, x28, #4\n"
"sdot z18.s, z29.b, z0.b[3]\n"
"sdot z22.s, z29.b, z1.b[3]\n"
"sdot z26.s, z29.b, z2.b[3]\n"
@@ -815,22 +815,22 @@ void sve_hybrid_s8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
"tbnz %x[flags], #31, 40f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z28.s, p2/M, z28.s\n"
"saddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"saddv d12, p0, z12.s\n"
"saddv d13, p0, z13.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
- "mov z13.s, z13.s[0]\n"
- "neg z28.s, p2/M, z28.s\n"
"mul z11.s, p2/M, z11.s, z28.s\n"
"mul z12.s, p2/M, z12.s, z28.s\n"
+ "mov z13.s, z13.s[0]\n"
"mul z13.s, p2/M, z13.s, z28.s\n"
"40:" // Height 3: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
@@ -885,18 +885,18 @@ void sve_hybrid_s8qa_dot_4x4VL (
"and z30.d, z18.d, z0.d\n"
"and z29.d, z19.d, z0.d\n"
"and z28.d, z20.d, z0.d\n"
+ "and z3.d, z21.d, z0.d\n"
"asr z1.s, z1.s, #0x1f\n"
"asr z31.s, z31.s, #0x1f\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
+ "and z2.d, z22.d, z0.d\n"
"sqadd z16.s, z16.s, z1.s\n"
"sqadd z17.s, z17.s, z31.s\n"
"sqadd z18.s, z18.s, z30.s\n"
"sqadd z19.s, z19.s, z29.s\n"
"sqadd z20.s, z20.s, z28.s\n"
- "and z3.d, z21.d, z0.d\n"
- "and z2.d, z22.d, z0.d\n"
"and z1.d, z23.d, z0.d\n"
"and z31.d, z24.d, z0.d\n"
"and z30.d, z25.d, z0.d\n"
@@ -918,35 +918,35 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sqadd z27.s, z27.s, z28.s\n"
"41:" // Height 3: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z28.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z28.s\n"
+ "ld1rw { z30.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z28.s\n"
- "add z18.s, z18.s, z28.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z28.s\n"
- "add z20.s, z20.s, z28.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z28.s\n"
- "add z22.s, z22.s, z28.s\n"
+ "add z16.s, z16.s, z30.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z28.s\n"
- "add z24.s, z24.s, z28.s\n"
+ "add z17.s, z17.s, z30.s\n"
+ "add z18.s, z18.s, z30.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z28.s\n"
- "add z26.s, z26.s, z28.s\n"
+ "add z19.s, z19.s, z30.s\n"
+ "add z20.s, z20.s, z30.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z21.s, z21.s, z30.s\n"
+ "add z22.s, z22.s, z30.s\n"
"ld1rw { z29.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z28.s\n"
+ "add z23.s, z23.s, z30.s\n"
+ "add z24.s, z24.s, z30.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z30.s\n"
+ "add z26.s, z26.s, z30.s\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z30.s\n"
"smin z16.s, p2/M, z16.s, z29.s\n"
"smin z17.s, p2/M, z17.s, z29.s\n"
"smin z18.s, p2/M, z18.s, z29.s\n"
@@ -962,28 +962,28 @@ void sve_hybrid_s8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z28.s\n"
"smax z17.s, p2/M, z17.s, z28.s\n"
"smax z18.s, p2/M, z18.s, z28.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z28.s\n"
"smax z20.s, p2/M, z20.s, z28.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z21.s, p2/M, z21.s, z28.s\n"
"smax z22.s, p2/M, z22.s, z28.s\n"
- "uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
"smax z23.s, p2/M, z23.s, z28.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"smax z24.s, p2/M, z24.s, z28.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
"smax z25.s, p2/M, z25.s, z28.s\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
"smax z26.s, p2/M, z26.s, z28.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
"smax z27.s, p2/M, z27.s, z28.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
- "st1b { z24.b }, p1, [x22]\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z18.h, z22.h, z23.h\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "uzp1 z20.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
"addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z20.b }, p1, [x24]\n"
+ "st1b { z24.b }, p1, [x23]\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -991,27 +991,28 @@ void sve_hybrid_s8qa_dot_4x4VL (
"b 58f\n"
"43:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "mov z15.b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -1026,8 +1027,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"mov x26, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1052,37 +1053,37 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1rqb { z3.b }, p0/Z, [x21]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
- "ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x21, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"sdot z16.s, z5.b, z0.b[0]\n"
"sdot z20.s, z5.b, z1.b[0]\n"
- "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "sdot z17.s, z10.b, z0.b[0]\n"
+ "sdot z21.s, z10.b, z1.b[0]\n"
"sdot z24.s, z5.b, z2.b[0]\n"
"sdot z28.s, z5.b, z3.b[0]\n"
- "sdot z17.s, z4.b, z0.b[0]\n"
- "sdot z21.s, z4.b, z1.b[0]\n"
- "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
- "sdot z25.s, z4.b, z2.b[0]\n"
- "sdot z29.s, z4.b, z3.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
"addvl x28, x28, #16\n"
+ "sdot z25.s, z10.b, z2.b[0]\n"
+ "sdot z29.s, z10.b, z3.b[0]\n"
+ "sdot z18.s, z4.b, z0.b[0]\n"
+ "sdot z22.s, z4.b, z1.b[0]\n"
+ "sdot z26.s, z4.b, z2.b[0]\n"
+ "sdot z30.s, z4.b, z3.b[0]\n"
"ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z30.s, z10.b, z3.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "add x21, x21, #0x10\n"
"sdot z19.s, z9.b, z0.b[0]\n"
"sdot z23.s, z9.b, z1.b[0]\n"
"sdot z27.s, z9.b, z2.b[0]\n"
@@ -1152,26 +1153,26 @@ void sve_hybrid_s8qa_dot_4x4VL (
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1rqb { z3.b }, p0/Z, [x21]\n"
- "ld1b { z7.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"sdot z16.s, z7.b, z0.b[0]\n"
"sdot z20.s, z7.b, z1.b[0]\n"
- "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "sdot z24.s, z7.b, z2.b[0]\n"
- "sdot z28.s, z7.b, z3.b[0]\n"
"sdot z17.s, z6.b, z0.b[0]\n"
"sdot z21.s, z6.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
- "sdot z25.s, z6.b, z2.b[0]\n"
- "sdot z29.s, z6.b, z3.b[0]\n"
"sdot z18.s, z5.b, z0.b[0]\n"
"sdot z22.s, z5.b, z1.b[0]\n"
+ "sdot z24.s, z7.b, z2.b[0]\n"
+ "sdot z28.s, z7.b, z3.b[0]\n"
+ "sdot z25.s, z6.b, z2.b[0]\n"
+ "sdot z29.s, z6.b, z3.b[0]\n"
"sdot z26.s, z5.b, z2.b[0]\n"
"sdot z30.s, z5.b, z3.b[0]\n"
"sdot z19.s, z4.b, z0.b[0]\n"
@@ -1182,14 +1183,14 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1b { z7.b }, p2/Z, [x28]\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z7.b, z0.b[1]\n"
"ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z7.b, z0.b[1]\n"
"sdot z20.s, z7.b, z1.b[1]\n"
"sdot z24.s, z7.b, z2.b[1]\n"
"sdot z28.s, z7.b, z3.b[1]\n"
"sdot z17.s, z6.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"sdot z21.s, z6.b, z1.b[1]\n"
"sdot z25.s, z6.b, z2.b[1]\n"
"sdot z29.s, z6.b, z3.b[1]\n"
@@ -1205,14 +1206,14 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ld1b { z7.b }, p2/Z, [x28]\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "sdot z16.s, z7.b, z0.b[2]\n"
"ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z7.b, z0.b[2]\n"
"sdot z20.s, z7.b, z1.b[2]\n"
"sdot z24.s, z7.b, z2.b[2]\n"
"sdot z28.s, z7.b, z3.b[2]\n"
"sdot z17.s, z6.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"sdot z21.s, z6.b, z1.b[2]\n"
"sdot z25.s, z6.b, z2.b[2]\n"
"sdot z29.s, z6.b, z3.b[2]\n"
@@ -1227,15 +1228,15 @@ void sve_hybrid_s8qa_dot_4x4VL (
"ble 52f\n"
"ld1b { z7.b }, p2/Z, [x28]\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "sdot z16.s, z7.b, z0.b[3]\n"
- "sdot z20.s, z7.b, z1.b[3]\n"
"ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "sdot z16.s, z7.b, z0.b[3]\n"
+ "sdot z20.s, z7.b, z1.b[3]\n"
"sdot z24.s, z7.b, z2.b[3]\n"
"sdot z28.s, z7.b, z3.b[3]\n"
"sdot z17.s, z6.b, z0.b[3]\n"
"sdot z21.s, z6.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"sdot z25.s, z6.b, z2.b[3]\n"
"sdot z29.s, z6.b, z3.b[3]\n"
"sdot z18.s, z5.b, z0.b[3]\n"
@@ -1258,25 +1259,25 @@ void sve_hybrid_s8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 46b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
"add x22, x23, x20\n"
- "add x21, x22, x20\n"
"tbnz %x[flags], #31, 54f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z0.s, p2/M, z0.s\n"
"saddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"saddv d12, p0, z12.s\n"
"saddv d13, p0, z13.s\n"
- "mov z12.s, z12.s[0]\n"
- "mov z13.s, z13.s[0]\n"
"saddv d14, p0, z14.s\n"
- "neg z0.s, p2/M, z0.s\n"
- "mov z14.s, z14.s[0]\n"
+ "mov z11.s, z11.s[0]\n"
+ "mov z12.s, z12.s[0]\n"
"mul z11.s, p2/M, z11.s, z0.s\n"
"mul z12.s, p2/M, z12.s, z0.s\n"
+ "mov z13.s, z13.s[0]\n"
+ "mov z14.s, z14.s[0]\n"
"mul z13.s, p2/M, z13.s, z0.s\n"
"mul z14.s, p2/M, z14.s, z0.s\n"
"54:" // Height 4: skip row sum fixup
@@ -1341,32 +1342,32 @@ void sve_hybrid_s8qa_dot_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z2.d, z16.d, z0.d\n"
"and z1.d, z17.d, z0.d\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
- "sqadd z16.s, z16.s, z2.s\n"
- "sqadd z17.s, z17.s, z1.s\n"
"and z7.d, z18.d, z0.d\n"
"and z6.d, z19.d, z0.d\n"
"and z5.d, z20.d, z0.d\n"
"and z4.d, z21.d, z0.d\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"and z3.d, z22.d, z0.d\n"
- "and z2.d, z23.d, z0.d\n"
- "and z1.d, z24.d, z0.d\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z2.s\n"
+ "sqadd z17.s, z17.s, z1.s\n"
+ "and z2.d, z23.d, z0.d\n"
+ "and z1.d, z24.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z3.s, z3.s, #0x1f\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
"sqadd z18.s, z18.s, z7.s\n"
"sqadd z19.s, z19.s, z6.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z4.s\n"
"sqadd z22.s, z22.s, z3.s\n"
+ "and z7.d, z25.d, z0.d\n"
"sqadd z23.s, z23.s, z2.s\n"
"sqadd z24.s, z24.s, z1.s\n"
- "and z7.d, z25.d, z0.d\n"
"and z6.d, z26.d, z0.d\n"
"and z5.d, z27.d, z0.d\n"
"and z4.d, z28.d, z0.d\n"
@@ -1389,43 +1390,43 @@ void sve_hybrid_s8qa_dot_4x4VL (
"sqadd z31.s, z31.s, z1.s\n"
"55:" // Height 4: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z2.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z2.s\n"
- "add z18.s, z18.s, z2.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z2.s\n"
- "add z20.s, z20.s, z2.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z2.s\n"
- "add z22.s, z22.s, z2.s\n"
+ "add z16.s, z16.s, z2.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z2.s\n"
- "add z24.s, z24.s, z2.s\n"
+ "add z17.s, z17.s, z2.s\n"
+ "add z18.s, z18.s, z2.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z2.s\n"
- "add z26.s, z26.s, z2.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z20.s, z20.s, z2.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- "add z27.s, z27.s, z2.s\n"
- "add z28.s, z28.s, z2.s\n"
+ "add z21.s, z21.s, z2.s\n"
+ "add z22.s, z22.s, z2.s\n"
".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
- "add z29.s, z29.s, z2.s\n"
- "add z30.s, z30.s, z2.s\n"
+ "add z23.s, z23.s, z2.s\n"
+ "add z24.s, z24.s, z2.s\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z25.s, z25.s, z2.s\n"
+ "add z26.s, z26.s, z2.s\n"
"ld1rw { z1.s }, p2/Z, [x20]\n"
- "add z31.s, z31.s, z2.s\n"
+ "add z27.s, z27.s, z2.s\n"
+ "add z28.s, z28.s, z2.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z29.s, z29.s, z2.s\n"
+ "add z30.s, z30.s, z2.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z31.s, z31.s, z2.s\n"
"smin z16.s, p2/M, z16.s, z1.s\n"
"smin z17.s, p2/M, z17.s, z1.s\n"
"smin z18.s, p2/M, z18.s, z1.s\n"
@@ -1445,36 +1446,36 @@ void sve_hybrid_s8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z0.s\n"
"smax z17.s, p2/M, z17.s, z0.s\n"
"smax z18.s, p2/M, z18.s, z0.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z0.s\n"
"smax z20.s, p2/M, z20.s, z0.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z21.s, p2/M, z21.s, z0.s\n"
"smax z22.s, p2/M, z22.s, z0.s\n"
- "uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
"smax z23.s, p2/M, z23.s, z0.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"smax z24.s, p2/M, z24.s, z0.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
"smax z25.s, p2/M, z25.s, z0.s\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
"smax z26.s, p2/M, z26.s, z0.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
"smax z27.s, p2/M, z27.s, z0.s\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
"smax z28.s, p2/M, z28.s, z0.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"smax z29.s, p2/M, z29.s, z0.s\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
"smax z30.s, p2/M, z30.s, z0.s\n"
- "uzp1 z28.h, z28.h, z29.h\n"
- "st1b { z24.b }, p1, [x22]\n"
"smax z31.s, p2/M, z31.s, z0.s\n"
- "uzp1 z16.h, z30.h, z31.h\n"
- "uzp1 z28.b, z28.b, z16.b\n"
- "st1b { z28.b }, p1, [x21]\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z18.h, z26.h, z27.h\n"
+ "uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
+ "uzp1 z17.h, z30.h, z31.h\n"
+ "st1b { z16.b }, p1, [x27]\n"
"addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z18.b\n"
+ "uzp1 z28.b, z28.b, z17.b\n"
+ "st1b { z20.b }, p1, [x24]\n"
+ "st1b { z24.b }, p1, [x23]\n"
+ "st1b { z28.b }, p1, [x22]\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -1491,8 +1492,8 @@ void sve_hybrid_s8qa_dot_4x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL.hpp
index ae922e9743..96550f4839 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsSVE<rhs_operand_type, result_type, 4, 8, 8> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 8, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp
index e0628364f4..b8e65e6999 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_mmla_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void sve_hybrid_s8qa_mmla_4x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -91,24 +91,24 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"3:" // Height 1: setup done
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -124,43 +124,43 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z30.b }, p2/Z, [x28]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45189810 // smmla z16.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z26.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45199814 // smmla z20.s, z0.b, z25.b\n"
- ".inst 0x45189811 // smmla z17.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x451a9815 // smmla z21.s, z0.b, z26.b\n"
- ".inst 0x45199812 // smmla z18.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #6, MUL VL]\n"
- "ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z0.d, z1.d, z31.d\n"
+ ".inst 0x451e9810 // smmla z16.s, z0.b, z30.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45189816 // smmla z22.s, z0.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x451a9813 // smmla z19.s, z0.b, z26.b\n"
- ".inst 0x45199817 // smmla z23.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45189830 // smmla z16.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x451a9834 // smmla z20.s, z1.b, z26.b\n"
+ "trn2 z1.d, z1.d, z31.d\n"
+ ".inst 0x451d9814 // smmla z20.s, z0.b, z29.b\n"
+ ".inst 0x451c9811 // smmla z17.s, z0.b, z28.b\n"
+ ".inst 0x451b9815 // smmla z21.s, z0.b, z27.b\n"
+ ".inst 0x451a9812 // smmla z18.s, z0.b, z26.b\n"
+ "ld1b { z31.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45199816 // smmla z22.s, z0.b, z25.b\n"
+ ".inst 0x45189813 // smmla z19.s, z0.b, z24.b\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x45089817 // smmla z23.s, z0.b, z8.b\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x451f9830 // smmla z16.s, z1.b, z31.b\n"
"ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
- ".inst 0x45199831 // smmla z17.s, z1.b, z25.b\n"
- ".inst 0x45189835 // smmla z21.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x451e9834 // smmla z20.s, z1.b, z30.b\n"
+ ".inst 0x451d9831 // smmla z17.s, z1.b, z29.b\n"
+ ".inst 0x451c9835 // smmla z21.s, z1.b, z28.b\n"
".inst 0x451b9832 // smmla z18.s, z1.b, z27.b\n"
".inst 0x451a9836 // smmla z22.s, z1.b, z26.b\n"
".inst 0x45199833 // smmla z19.s, z1.b, z25.b\n"
".inst 0x45189837 // smmla z23.s, z1.b, z24.b\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"sdot z11.s, z0.b, z15.b\n"
"sdot z11.s, z1.b, z15.b\n"
@@ -170,45 +170,45 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z27.d\n"
"ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45189810 // smmla z16.s, z0.b, z24.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
"subs x25, x25, #0x8\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- "trn2 z1.d, z1.d, z27.d\n"
- ".inst 0x451a9814 // smmla z20.s, z0.b, z26.b\n"
"ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45199811 // smmla z17.s, z0.b, z25.b\n"
- ".inst 0x45189815 // smmla z21.s, z0.b, z24.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z31.d\n"
+ ".inst 0x45189810 // smmla z16.s, z0.b, z24.b\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "trn2 z1.d, z1.d, z31.d\n"
+ ".inst 0x451e9814 // smmla z20.s, z0.b, z30.b\n"
+ ".inst 0x451d9811 // smmla z17.s, z0.b, z29.b\n"
+ ".inst 0x451c9815 // smmla z21.s, z0.b, z28.b\n"
".inst 0x451b9812 // smmla z18.s, z0.b, z27.b\n"
".inst 0x451a9816 // smmla z22.s, z0.b, z26.b\n"
".inst 0x45199813 // smmla z19.s, z0.b, z25.b\n"
".inst 0x45189817 // smmla z23.s, z0.b, z24.b\n"
- "addvl x28, x28, #8\n"
"ble 10f\n"
"ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45189830 // smmla z16.s, z1.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45189834 // smmla z20.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45199831 // smmla z17.s, z1.b, z25.b\n"
- ".inst 0x45189835 // smmla z21.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45199832 // smmla z18.s, z1.b, z25.b\n"
- ".inst 0x45189836 // smmla z22.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x451e9834 // smmla z20.s, z1.b, z30.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x451d9831 // smmla z17.s, z1.b, z29.b\n"
+ ".inst 0x451c9835 // smmla z21.s, z1.b, z28.b\n"
+ ".inst 0x451b9832 // smmla z18.s, z1.b, z27.b\n"
+ ".inst 0x451a9836 // smmla z22.s, z1.b, z26.b\n"
".inst 0x45199833 // smmla z19.s, z1.b, z25.b\n"
".inst 0x45189837 // smmla z23.s, z1.b, z24.b\n"
- "addvl x28, x28, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"sdot z11.s, z0.b, z15.b\n"
@@ -225,32 +225,32 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z23.d, z16.d\n"
"tbnz %x[flags], #31, 12f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
- "neg z16.s, p2/M, z16.s\n"
+ "ld1rw { z9.s }, p2/Z, [x20]\n"
+ "neg z9.s, p2/M, z9.s\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z16.s\n"
+ "mul z11.s, p2/M, z11.s, z9.s\n"
"12:" // Height 1: skip row sum fixup
"add z23.s, z23.s, z11.s\n"
"add z17.s, z17.s, z11.s\n"
"ld1w { z22.s }, p2/Z, [x10]\n"
- "ld1w { z21.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x10, #1, MUL VL]\n"
"add z18.s, z18.s, z11.s\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z20.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x10, #3, MUL VL]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"add z23.s, z23.s, z22.s\n"
- "add z17.s, z17.s, z21.s\n"
- "add z18.s, z18.s, z20.s\n"
- "add z19.s, z19.s, z16.s\n"
+ "add z17.s, z17.s, z24.s\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "addvl x10, x10, #4\n"
+ "add z18.s, z18.s, z21.s\n"
+ "add z19.s, z19.s, z20.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x04b076f7 // sqrdmulh z23.s, z23.s, z16.s\n"
".inst 0x04b07631 // sqrdmulh z17.s, z17.s, z16.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04b07652 // sqrdmulh z18.s, z18.s, z16.s\n"
".inst 0x04b07673 // sqrdmulh z19.s, z19.s, z16.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -268,19 +268,19 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sqadd z19.s, z19.s, z16.s\n"
"13:" // Height 1: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z16.s\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z16.s\n"
- "add z18.s, z18.s, z16.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z16.s\n"
+ "add z23.s, z23.s, z21.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z21.s\n"
+ "add z18.s, z18.s, z21.s\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z21.s\n"
"smin z23.s, p2/M, z23.s, z20.s\n"
"smin z17.s, p2/M, z17.s, z20.s\n"
"smin z18.s, p2/M, z18.s, z20.s\n"
@@ -288,8 +288,8 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"smax z23.s, p2/M, z23.s, z16.s\n"
"smax z17.s, p2/M, z17.s, z16.s\n"
"smax z18.s, p2/M, z18.s, z16.s\n"
- "uzp1 z23.h, z23.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z16.s\n"
+ "uzp1 z23.h, z23.h, z17.h\n"
"uzp1 z16.h, z18.h, z19.h\n"
"uzp1 z23.b, z23.b, z16.b\n"
"st1b { z23.b }, p1, [x27]\n"
@@ -307,24 +307,24 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
"mov x26, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -343,45 +343,45 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z26.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45189810 // smmla z16.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z26.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45199814 // smmla z20.s, z0.b, z25.b\n"
- ".inst 0x45189811 // smmla z17.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x451a9815 // smmla z21.s, z0.b, z26.b\n"
- ".inst 0x45199812 // smmla z18.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1rqb { z25.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1b { z24.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ ".inst 0x451f9810 // smmla z16.s, z0.b, z31.b\n"
"ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45189816 // smmla z22.s, z0.b, z24.b\n"
+ ".inst 0x451e9814 // smmla z20.s, z0.b, z30.b\n"
+ ".inst 0x451d9811 // smmla z17.s, z0.b, z29.b\n"
+ ".inst 0x451c9815 // smmla z21.s, z0.b, z28.b\n"
+ ".inst 0x451b9812 // smmla z18.s, z0.b, z27.b\n"
+ ".inst 0x451a9816 // smmla z22.s, z0.b, z26.b\n"
+ ".inst 0x45189813 // smmla z19.s, z0.b, z24.b\n"
"ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x451a9813 // smmla z19.s, z0.b, z26.b\n"
".inst 0x45199817 // smmla z23.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45189830 // smmla z16.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x451a9834 // smmla z20.s, z1.b, z26.b\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45189830 // smmla z16.s, z1.b, z24.b\n"
"ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
- ".inst 0x45199831 // smmla z17.s, z1.b, z25.b\n"
- ".inst 0x45189835 // smmla z21.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x451e9834 // smmla z20.s, z1.b, z30.b\n"
+ ".inst 0x451d9831 // smmla z17.s, z1.b, z29.b\n"
+ ".inst 0x451c9835 // smmla z21.s, z1.b, z28.b\n"
".inst 0x451b9832 // smmla z18.s, z1.b, z27.b\n"
".inst 0x451a9836 // smmla z22.s, z1.b, z26.b\n"
".inst 0x45199833 // smmla z19.s, z1.b, z25.b\n"
".inst 0x45189837 // smmla z23.s, z1.b, z24.b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"tbnz %x[flags], #31, 22f\n"
"sdot z11.s, z0.b, z15.b\n"
"sdot z11.s, z1.b, z15.b\n"
@@ -391,46 +391,46 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z27.d\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45189810 // smmla z16.s, z0.b, z24.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x8\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- "trn2 z1.d, z1.d, z27.d\n"
- ".inst 0x451a9814 // smmla z20.s, z0.b, z26.b\n"
- "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45199811 // smmla z17.s, z0.b, z25.b\n"
- ".inst 0x45189815 // smmla z21.s, z0.b, z24.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z24.d\n"
+ "trn2 z1.d, z1.d, z24.d\n"
+ ".inst 0x451d9810 // smmla z16.s, z0.b, z29.b\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
- ".inst 0x451b9812 // smmla z18.s, z0.b, z27.b\n"
- ".inst 0x451a9816 // smmla z22.s, z0.b, z26.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x451c9814 // smmla z20.s, z0.b, z28.b\n"
+ ".inst 0x45049811 // smmla z17.s, z0.b, z4.b\n"
+ ".inst 0x451b9815 // smmla z21.s, z0.b, z27.b\n"
+ ".inst 0x451a9812 // smmla z18.s, z0.b, z26.b\n"
+ ".inst 0x45069816 // smmla z22.s, z0.b, z6.b\n"
".inst 0x45199813 // smmla z19.s, z0.b, z25.b\n"
".inst 0x45189817 // smmla z23.s, z0.b, z24.b\n"
- "addvl x28, x28, #8\n"
"ble 24f\n"
"ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45189830 // smmla z16.s, z1.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45189834 // smmla z20.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45199831 // smmla z17.s, z1.b, z25.b\n"
- ".inst 0x45189835 // smmla z21.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45199832 // smmla z18.s, z1.b, z25.b\n"
- ".inst 0x45189836 // smmla z22.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x451e9834 // smmla z20.s, z1.b, z30.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x451d9831 // smmla z17.s, z1.b, z29.b\n"
+ ".inst 0x451c9835 // smmla z21.s, z1.b, z28.b\n"
+ ".inst 0x451b9832 // smmla z18.s, z1.b, z27.b\n"
+ ".inst 0x451a9836 // smmla z22.s, z1.b, z26.b\n"
".inst 0x45199833 // smmla z19.s, z1.b, z25.b\n"
".inst 0x45189837 // smmla z23.s, z1.b, z24.b\n"
- "addvl x28, x28, #8\n"
"24:" // Height 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 25f\n"
"sdot z11.s, z0.b, z15.b\n"
@@ -443,18 +443,18 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"uzp1 z24.d, z16.d, z20.d\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x23, x27, x20\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "add x23, x27, x20\n"
"mov z23.d, z24.d\n"
"tbnz %x[flags], #31, 26f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z24.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
"neg z24.s, p2/M, z24.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
@@ -497,24 +497,24 @@ void sve_hybrid_s8qa_mmla_4x4VL (
".inst 0x04b87673 // sqrdmulh z19.s, z19.s, z24.s\n"
"tbz %x[flags], #5, 27f\n"
"and z24.d, z23.d, z0.d\n"
- "asr z24.s, z24.s, #0x1f\n"
- "sqadd z23.s, z23.s, z24.s\n"
"and z30.d, z20.d, z0.d\n"
"and z29.d, z21.d, z0.d\n"
"and z28.d, z22.d, z0.d\n"
"and z27.d, z16.d, z0.d\n"
"and z26.d, z17.d, z0.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
"and z25.d, z18.d, z0.d\n"
- "and z24.d, z19.d, z0.d\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"asr z27.s, z27.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z24.s\n"
+ "and z24.d, z19.d, z0.d\n"
"asr z26.s, z26.s, #0x1f\n"
"asr z25.s, z25.s, #0x1f\n"
- "asr z24.s, z24.s, #0x1f\n"
"sqadd z20.s, z20.s, z30.s\n"
"sqadd z21.s, z21.s, z29.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
"sqadd z22.s, z22.s, z28.s\n"
"sqadd z16.s, z16.s, z27.s\n"
"sqadd z17.s, z17.s, z26.s\n"
@@ -522,27 +522,27 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sqadd z19.s, z19.s, z24.s\n"
"27:" // Height 2: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z24.s }, p2/Z, [x20]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z24.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z24.s\n"
- "add z21.s, z21.s, z24.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z24.s\n"
- "add z16.s, z16.s, z24.s\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z24.s\n"
- "add z18.s, z18.s, z24.s\n"
+ "add z23.s, z23.s, z26.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z20.s, z20.s, z26.s\n"
+ "add z21.s, z21.s, z26.s\n"
"ld1rw { z25.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z24.s\n"
+ "add z22.s, z22.s, z26.s\n"
+ "add z16.s, z16.s, z26.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z26.s\n"
+ "add z18.s, z18.s, z26.s\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z26.s\n"
"smin z23.s, p2/M, z23.s, z25.s\n"
"smin z20.s, p2/M, z20.s, z25.s\n"
"smin z21.s, p2/M, z21.s, z25.s\n"
@@ -554,20 +554,20 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"smax z23.s, p2/M, z23.s, z24.s\n"
"smax z20.s, p2/M, z20.s, z24.s\n"
"smax z21.s, p2/M, z21.s, z24.s\n"
- "uzp1 z23.h, z23.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z24.s\n"
"smax z16.s, p2/M, z16.s, z24.s\n"
- "uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z23.b, z23.b, z20.b\n"
"smax z17.s, p2/M, z17.s, z24.s\n"
"smax z18.s, p2/M, z18.s, z24.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z23.b }, p1, [x27]\n"
"smax z19.s, p2/M, z19.s, z24.s\n"
+ "uzp1 z23.h, z23.h, z20.h\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z23.b, z23.b, z20.b\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x23]\n"
+ "st1b { z23.b }, p1, [x27]\n"
"addvl x27, x27, #1\n"
+ "st1b { z16.b }, p1, [x23]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -582,16 +582,16 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -606,8 +606,8 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov x26, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -629,49 +629,49 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
"trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z4.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z5.d\n"
- "trn2 z3.d, z3.d, z5.d\n"
- ".inst 0x45049810 // smmla z16.s, z0.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45049858 // smmla z24.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45059814 // smmla z20.s, z0.b, z5.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x4505985c // smmla z28.s, z2.b, z5.b\n"
- ".inst 0x45049811 // smmla z17.s, z0.b, z4.b\n"
+ "trn1 z2.d, z3.d, z6.d\n"
+ "trn2 z3.d, z3.d, z6.d\n"
"ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ ".inst 0x450a9814 // smmla z20.s, z0.b, z10.b\n"
+ ".inst 0x45099811 // smmla z17.s, z0.b, z9.b\n"
+ ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
+ ".inst 0x45049812 // smmla z18.s, z0.b, z4.b\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45049859 // smmla z25.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45099815 // smmla z21.s, z0.b, z9.b\n"
- ".inst 0x4509985d // smmla z29.s, z2.b, z9.b\n"
- "ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45089812 // smmla z18.s, z0.b, z8.b\n"
- ".inst 0x4508985a // smmla z26.s, z2.b, z8.b\n"
- "ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ ".inst 0x450a985c // smmla z28.s, z2.b, z10.b\n"
+ ".inst 0x45099859 // smmla z25.s, z2.b, z9.b\n"
+ ".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
+ ".inst 0x4504985a // smmla z26.s, z2.b, z4.b\n"
".inst 0x45079816 // smmla z22.s, z0.b, z7.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x4507985e // smmla z30.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x45069813 // smmla z19.s, z0.b, z6.b\n"
+ "ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x4506985b // smmla z27.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4505985f // smmla z31.s, z2.b, z5.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x22, x22, #0x10\n"
".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x45049878 // smmla z24.s, z3.b, z4.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x450a9834 // smmla z20.s, z1.b, z10.b\n"
@@ -699,32 +699,32 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z5.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ "ld1b { z4.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"subs x25, x25, #0x8\n"
"ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45049814 // smmla z20.s, z0.b, z4.b\n"
- ".inst 0x4504985c // smmla z28.s, z2.b, z4.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z5.d\n"
+ "trn2 z3.d, z3.d, z5.d\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45049810 // smmla z16.s, z0.b, z4.b\n"
+ ".inst 0x450a9814 // smmla z20.s, z0.b, z10.b\n"
".inst 0x45099811 // smmla z17.s, z0.b, z9.b\n"
- ".inst 0x45099859 // smmla z25.s, z2.b, z9.b\n"
".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
- ".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45079812 // smmla z18.s, z0.b, z7.b\n"
+ ".inst 0x45049858 // smmla z24.s, z2.b, z4.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x450a985c // smmla z28.s, z2.b, z10.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45099859 // smmla z25.s, z2.b, z9.b\n"
+ ".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
".inst 0x4507985a // smmla z26.s, z2.b, z7.b\n"
".inst 0x45069816 // smmla z22.s, z0.b, z6.b\n"
".inst 0x4506985e // smmla z30.s, z2.b, z6.b\n"
@@ -734,24 +734,24 @@ void sve_hybrid_s8qa_mmla_4x4VL (
".inst 0x4504985f // smmla z31.s, z2.b, z4.b\n"
"ble 38f\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- ".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
- ".inst 0x45049878 // smmla z24.s, z3.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45059834 // smmla z20.s, z1.b, z5.b\n"
- ".inst 0x4505987c // smmla z28.s, z3.b, z5.b\n"
"ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45049831 // smmla z17.s, z1.b, z4.b\n"
- ".inst 0x45049879 // smmla z25.s, z3.b, z4.b\n"
+ ".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
+ ".inst 0x45049878 // smmla z24.s, z3.b, z4.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x450a9834 // smmla z20.s, z1.b, z10.b\n"
+ ".inst 0x450a987c // smmla z28.s, z3.b, z10.b\n"
+ ".inst 0x45099831 // smmla z17.s, z1.b, z9.b\n"
+ ".inst 0x45099879 // smmla z25.s, z3.b, z9.b\n"
+ "addvl x28, x28, #8\n"
".inst 0x45089835 // smmla z21.s, z1.b, z8.b\n"
".inst 0x4508987d // smmla z29.s, z3.b, z8.b\n"
".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
".inst 0x4507987a // smmla z26.s, z3.b, z7.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45069836 // smmla z22.s, z1.b, z6.b\n"
".inst 0x4506987e // smmla z30.s, z3.b, z6.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
@@ -771,15 +771,15 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z0.d, z16.d, z20.d\n"
- "add x23, x27, x20\n"
"uzp2 z16.d, z16.d, z20.d\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
- "add x22, x23, x20\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x27, x20\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "add x22, x23, x20\n"
"uzp1 z24.d, z24.d, z28.d\n"
"uzp1 z25.d, z25.d, z29.d\n"
"uzp1 z26.d, z26.d, z30.d\n"
@@ -787,14 +787,14 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z31.d, z0.d\n"
"tbnz %x[flags], #31, 40f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z23.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
+ "ld1rw { z23.s }, p2/Z, [x20]\n"
"neg z23.s, p2/M, z23.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z23.s\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z23.s\n"
"mul z12.s, p2/M, z12.s, z23.s\n"
"mul z13.s, p2/M, z13.s, z23.s\n"
"40:" // Height 3: skip row sum fixup
@@ -850,18 +850,18 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"and z29.d, z21.d, z0.d\n"
"and z28.d, z22.d, z0.d\n"
"and z23.d, z16.d, z0.d\n"
+ "and z3.d, z17.d, z0.d\n"
"asr z1.s, z1.s, #0x1f\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"asr z23.s, z23.s, #0x1f\n"
+ "and z2.d, z18.d, z0.d\n"
"sqadd z31.s, z31.s, z1.s\n"
"sqadd z20.s, z20.s, z30.s\n"
"sqadd z21.s, z21.s, z29.s\n"
"sqadd z22.s, z22.s, z28.s\n"
"sqadd z16.s, z16.s, z23.s\n"
- "and z3.d, z17.d, z0.d\n"
- "and z2.d, z18.d, z0.d\n"
"and z1.d, z19.d, z0.d\n"
"and z30.d, z24.d, z0.d\n"
"and z29.d, z25.d, z0.d\n"
@@ -883,35 +883,35 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sqadd z27.s, z27.s, z23.s\n"
"41:" // Height 3: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z23.s }, p2/Z, [x20]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z23.s\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z23.s\n"
- "add z21.s, z21.s, z23.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z23.s\n"
- "add z16.s, z16.s, z23.s\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z23.s\n"
- "add z18.s, z18.s, z23.s\n"
+ "add z31.s, z31.s, z29.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z19.s, z19.s, z23.s\n"
- "add z24.s, z24.s, z23.s\n"
+ "add z20.s, z20.s, z29.s\n"
+ "add z21.s, z21.s, z29.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z23.s\n"
- "add z26.s, z26.s, z23.s\n"
+ "add z22.s, z22.s, z29.s\n"
+ "add z16.s, z16.s, z29.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z29.s\n"
+ "add z18.s, z18.s, z29.s\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z23.s\n"
+ "add z19.s, z19.s, z29.s\n"
+ "add z24.s, z24.s, z29.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z29.s\n"
+ "add z26.s, z26.s, z29.s\n"
"ld1rw { z23.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z29.s\n"
"smin z31.s, p2/M, z31.s, z28.s\n"
"smin z20.s, p2/M, z20.s, z28.s\n"
"smin z21.s, p2/M, z21.s, z28.s\n"
@@ -927,28 +927,28 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"smax z31.s, p2/M, z31.s, z23.s\n"
"smax z20.s, p2/M, z20.s, z23.s\n"
"smax z21.s, p2/M, z21.s, z23.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z23.s\n"
"smax z16.s, p2/M, z16.s, z23.s\n"
- "uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z31.b, z31.b, z20.b\n"
"smax z17.s, p2/M, z17.s, z23.s\n"
"smax z18.s, p2/M, z18.s, z23.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
"smax z19.s, p2/M, z19.s, z23.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
"smax z24.s, p2/M, z24.s, z23.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z25.s, p2/M, z25.s, z23.s\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
"smax z26.s, p2/M, z26.s, z23.s\n"
+ "smax z27.s, p2/M, z27.s, z23.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
"uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z31.b, z31.b, z20.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "st1b { z31.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
"st1b { z16.b }, p1, [x23]\n"
- "smax z27.s, p2/M, z27.s, z23.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"st1b { z24.b }, p1, [x22]\n"
- "addvl x27, x27, #1\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -956,27 +956,28 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"b 58f\n"
"43:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "mov z15.b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -991,8 +992,8 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov x26, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1017,56 +1018,56 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "ld1rqb { z6.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z5.d\n"
- "ld1b { z4.b }, p2/Z, [x28]\n"
- "trn2 z3.d, z3.d, z5.d\n"
- ".inst 0x45049810 // smmla z16.s, z0.b, z4.b\n"
- ".inst 0x45049858 // smmla z24.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
- ".inst 0x45049814 // smmla z20.s, z0.b, z4.b\n"
- ".inst 0x4504985c // smmla z28.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x45059811 // smmla z17.s, z0.b, z5.b\n"
- ".inst 0x45059859 // smmla z25.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "trn1 z2.d, z3.d, z6.d\n"
+ "trn2 z3.d, z3.d, z6.d\n"
"ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
- ".inst 0x45049815 // smmla z21.s, z0.b, z4.b\n"
- ".inst 0x4504985d // smmla z29.s, z2.b, z4.b\n"
+ ".inst 0x45059810 // smmla z16.s, z0.b, z5.b\n"
+ ".inst 0x45049814 // smmla z20.s, z0.b, z4.b\n"
+ ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
+ ".inst 0x45099815 // smmla z21.s, z0.b, z9.b\n"
+ ".inst 0x45089812 // smmla z18.s, z0.b, z8.b\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45089812 // smmla z18.s, z0.b, z8.b\n"
+ ".inst 0x4504985c // smmla z28.s, z2.b, z4.b\n"
+ ".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
+ ".inst 0x4509985d // smmla z29.s, z2.b, z9.b\n"
".inst 0x4508985a // smmla z26.s, z2.b, z8.b\n"
- ".inst 0x45079816 // smmla z22.s, z0.b, z7.b\n"
+ ".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x450a985e // smmla z30.s, z2.b, z10.b\n"
+ ".inst 0x45069813 // smmla z19.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
- ".inst 0x4507985e // smmla z30.s, z2.b, z7.b\n"
- ".inst 0x45069813 // smmla z19.s, z0.b, z6.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4506985b // smmla z27.s, z2.b, z6.b\n"
".inst 0x45059817 // smmla z23.s, z0.b, z5.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x4505985f // smmla z31.s, z2.b, z5.b\n"
".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x45049878 // smmla z24.s, z3.b, z4.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x450a9834 // smmla z20.s, z1.b, z10.b\n"
- "add x22, x22, #0x10\n"
".inst 0x450a987c // smmla z28.s, z3.b, z10.b\n"
".inst 0x45099831 // smmla z17.s, z1.b, z9.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45099879 // smmla z25.s, z3.b, z9.b\n"
".inst 0x45089835 // smmla z21.s, z1.b, z8.b\n"
".inst 0x4508987d // smmla z29.s, z3.b, z8.b\n"
@@ -1089,60 +1090,60 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
"ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
"trn1 z2.d, z3.d, z5.d\n"
- "ld1b { z4.b }, p2/Z, [x28]\n"
"trn2 z3.d, z3.d, z5.d\n"
- ".inst 0x45049810 // smmla z16.s, z0.b, z4.b\n"
- ".inst 0x45049858 // smmla z24.s, z2.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "subs x25, x25, #0x8\n"
- ".inst 0x45059814 // smmla z20.s, z0.b, z5.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x4505985c // smmla z28.s, z2.b, z5.b\n"
- ".inst 0x45049811 // smmla z17.s, z0.b, z4.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
- ".inst 0x45049859 // smmla z25.s, z2.b, z4.b\n"
- ".inst 0x45089815 // smmla z21.s, z0.b, z8.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
- ".inst 0x4508985d // smmla z29.s, z2.b, z8.b\n"
- ".inst 0x45079812 // smmla z18.s, z0.b, z7.b\n"
+ ".inst 0x45069810 // smmla z16.s, z0.b, z6.b\n"
+ ".inst 0x45049814 // smmla z20.s, z0.b, z4.b\n"
+ ".inst 0x45079811 // smmla z17.s, z0.b, z7.b\n"
+ ".inst 0x45099815 // smmla z21.s, z0.b, z9.b\n"
+ ".inst 0x45089812 // smmla z18.s, z0.b, z8.b\n"
+ ".inst 0x45069858 // smmla z24.s, z2.b, z6.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x4504985c // smmla z28.s, z2.b, z4.b\n"
"addvl x28, x28, #8\n"
- ".inst 0x4507985a // smmla z26.s, z2.b, z7.b\n"
- ".inst 0x45069816 // smmla z22.s, z0.b, z6.b\n"
- ".inst 0x4506985e // smmla z30.s, z2.b, z6.b\n"
+ ".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
+ ".inst 0x4509985d // smmla z29.s, z2.b, z9.b\n"
+ ".inst 0x4508985a // smmla z26.s, z2.b, z8.b\n"
+ ".inst 0x450a9816 // smmla z22.s, z0.b, z10.b\n"
+ ".inst 0x450a985e // smmla z30.s, z2.b, z10.b\n"
".inst 0x45059813 // smmla z19.s, z0.b, z5.b\n"
".inst 0x4505985b // smmla z27.s, z2.b, z5.b\n"
- ".inst 0x45049817 // smmla z23.s, z0.b, z4.b\n"
- ".inst 0x4504985f // smmla z31.s, z2.b, z4.b\n"
+ ".inst 0x45069817 // smmla z23.s, z0.b, z6.b\n"
+ ".inst 0x4506985f // smmla z31.s, z2.b, z6.b\n"
"ble 52f\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- ".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
- ".inst 0x45049878 // smmla z24.s, z3.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45059834 // smmla z20.s, z1.b, z5.b\n"
- ".inst 0x4505987c // smmla z28.s, z3.b, z5.b\n"
"ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45049831 // smmla z17.s, z1.b, z4.b\n"
- ".inst 0x45049879 // smmla z25.s, z3.b, z4.b\n"
+ ".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
+ ".inst 0x45049878 // smmla z24.s, z3.b, z4.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x450a9834 // smmla z20.s, z1.b, z10.b\n"
+ ".inst 0x450a987c // smmla z28.s, z3.b, z10.b\n"
+ ".inst 0x45099831 // smmla z17.s, z1.b, z9.b\n"
+ ".inst 0x45099879 // smmla z25.s, z3.b, z9.b\n"
+ "addvl x28, x28, #8\n"
".inst 0x45089835 // smmla z21.s, z1.b, z8.b\n"
".inst 0x4508987d // smmla z29.s, z3.b, z8.b\n"
".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
".inst 0x4507987a // smmla z26.s, z3.b, z7.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45069836 // smmla z22.s, z1.b, z6.b\n"
".inst 0x4506987e // smmla z30.s, z3.b, z6.b\n"
".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
@@ -1162,16 +1163,16 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"bne 46b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z0.d, z16.d, z20.d\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"uzp2 z16.d, z16.d, z20.d\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "add x21, x22, x20\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x27, x20\n"
+ "add x22, x23, x20\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "add x21, x22, x20\n"
"uzp1 z23.d, z24.d, z28.d\n"
"uzp2 z24.d, z24.d, z28.d\n"
"uzp1 z28.d, z25.d, z29.d\n"
@@ -1183,15 +1184,15 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"mov z31.d, z0.d\n"
"tbnz %x[flags], #31, 54f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
"neg z0.s, p2/M, z0.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z0.s\n"
"mov z14.s, z13.s[3]\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z0.s\n"
"mul z12.s, p2/M, z12.s, z0.s\n"
"mul z13.s, p2/M, z13.s, z0.s\n"
"mul z14.s, p2/M, z14.s, z0.s\n"
@@ -1257,32 +1258,32 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z2.d, z31.d, z0.d\n"
"and z1.d, z20.d, z0.d\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
- "sqadd z31.s, z31.s, z2.s\n"
- "sqadd z20.s, z20.s, z1.s\n"
"and z7.d, z21.d, z0.d\n"
"and z6.d, z22.d, z0.d\n"
"and z5.d, z16.d, z0.d\n"
"and z4.d, z17.d, z0.d\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"and z3.d, z18.d, z0.d\n"
- "and z2.d, z19.d, z0.d\n"
- "and z1.d, z23.d, z0.d\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z2.s\n"
+ "sqadd z20.s, z20.s, z1.s\n"
+ "and z2.d, z19.d, z0.d\n"
+ "and z1.d, z23.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z3.s, z3.s, #0x1f\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
"sqadd z21.s, z21.s, z7.s\n"
"sqadd z22.s, z22.s, z6.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"sqadd z16.s, z16.s, z5.s\n"
"sqadd z17.s, z17.s, z4.s\n"
"sqadd z18.s, z18.s, z3.s\n"
+ "and z7.d, z28.d, z0.d\n"
"sqadd z19.s, z19.s, z2.s\n"
"sqadd z23.s, z23.s, z1.s\n"
- "and z7.d, z28.d, z0.d\n"
"and z6.d, z29.d, z0.d\n"
"and z5.d, z30.d, z0.d\n"
"and z4.d, z24.d, z0.d\n"
@@ -1305,43 +1306,43 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"sqadd z27.s, z27.s, z1.s\n"
"55:" // Height 4: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z2.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z2.s\n"
- "add z21.s, z21.s, z2.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z2.s\n"
- "add z16.s, z16.s, z2.s\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z2.s\n"
- "add z18.s, z18.s, z2.s\n"
+ "add z31.s, z31.s, z2.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z19.s, z19.s, z2.s\n"
- "add z23.s, z23.s, z2.s\n"
+ "add z20.s, z20.s, z2.s\n"
+ "add z21.s, z21.s, z2.s\n"
".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
- "add z28.s, z28.s, z2.s\n"
- "add z29.s, z29.s, z2.s\n"
+ "add z22.s, z22.s, z2.s\n"
+ "add z16.s, z16.s, z2.s\n"
".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z30.s, z30.s, z2.s\n"
- "add z24.s, z24.s, z2.s\n"
+ "add z17.s, z17.s, z2.s\n"
+ "add z18.s, z18.s, z2.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z2.s\n"
- "add z26.s, z26.s, z2.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z23.s, z23.s, z2.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z28.s, z28.s, z2.s\n"
+ "add z29.s, z29.s, z2.s\n"
"ld1rw { z1.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z2.s\n"
+ "add z30.s, z30.s, z2.s\n"
+ "add z24.s, z24.s, z2.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z2.s\n"
+ "add z26.s, z26.s, z2.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z2.s\n"
"smin z31.s, p2/M, z31.s, z1.s\n"
"smin z20.s, p2/M, z20.s, z1.s\n"
"smin z21.s, p2/M, z21.s, z1.s\n"
@@ -1361,36 +1362,36 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"smax z31.s, p2/M, z31.s, z0.s\n"
"smax z20.s, p2/M, z20.s, z0.s\n"
"smax z21.s, p2/M, z21.s, z0.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z0.s\n"
"smax z16.s, p2/M, z16.s, z0.s\n"
- "uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z31.b, z31.b, z20.b\n"
"smax z17.s, p2/M, z17.s, z0.s\n"
"smax z18.s, p2/M, z18.s, z0.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
"smax z19.s, p2/M, z19.s, z0.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
"smax z23.s, p2/M, z23.s, z0.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z28.s, p2/M, z28.s, z0.s\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
"smax z29.s, p2/M, z29.s, z0.s\n"
- "uzp1 z23.h, z23.h, z28.h\n"
- "st1b { z16.b }, p1, [x23]\n"
"smax z30.s, p2/M, z30.s, z0.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"smax z24.s, p2/M, z24.s, z0.s\n"
- "uzp1 z16.h, z29.h, z30.h\n"
- "uzp1 z23.b, z23.b, z16.b\n"
"smax z25.s, p2/M, z25.s, z0.s\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
"smax z26.s, p2/M, z26.s, z0.s\n"
+ "smax z27.s, p2/M, z27.s, z0.s\n"
+ "uzp1 z23.h, z23.h, z28.h\n"
+ "uzp1 z31.b, z31.b, z20.b\n"
+ "uzp1 z18.h, z29.h, z30.h\n"
"uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "st1b { z31.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z23.b, z23.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z16.b }, p1, [x23]\n"
"st1b { z23.b }, p1, [x22]\n"
- "smax z27.s, p2/M, z27.s, z0.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"st1b { z24.b }, p1, [x21]\n"
- "addvl x27, x27, #1\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -1407,8 +1408,8 @@ void sve_hybrid_s8qa_mmla_4x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp
index 056ae7a616..bd34f29894 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp
index c28717a37e..49930b57f7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,18 @@ void sve_hybrid_s8qs_dot_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -98,22 +98,22 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"3:" // Height 1: setup done
"mov x28, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -129,103 +129,103 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ble 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z16.b }, p2/Z, [x9]\n"
- "sdot z8.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x26, x26, #0x10\n"
+ "sdot z8.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sdot z10.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "sdot z10.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
"sdot z11.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p2/Z, [x9, #4, MUL VL]\n"
- "sdot z8.s, z16.b, z0.b[1]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
+ "sdot z8.s, z17.b, z0.b[1]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #6, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[1]\n"
- "ld1b { z16.b }, p2/Z, [x9, #6, MUL VL]\n"
- "sdot z10.s, z16.b, z0.b[1]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ "sdot z10.s, z17.b, z0.b[1]\n"
"sdot z11.s, z16.b, z0.b[1]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[2]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-6, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[2]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z17.b, z0.b[2]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-4, MUL VL]\n"
"sdot z11.s, z16.b, z0.b[2]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[3]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[3]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-1, MUL VL]\n"
"sdot z10.s, z17.b, z0.b[3]\n"
"sdot z11.s, z16.b, z0.b[3]\n"
- "add x26, x26, #0x10\n"
"bgt 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z16.b }, p2/Z, [x9]\n"
- "sdot z8.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "sdot z8.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[0]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[0]\n"
"sdot z11.s, z16.b, z0.b[0]\n"
- "addvl x9, x9, #4\n"
"ble 9f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[1]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[1]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[1]\n"
"sdot z11.s, z16.b, z0.b[1]\n"
- "addvl x9, x9, #4\n"
"ble 9f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[2]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[2]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[2]\n"
"sdot z11.s, z16.b, z0.b[2]\n"
- "addvl x9, x9, #4\n"
"ble 9f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[3]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[3]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[3]\n"
"sdot z11.s, z16.b, z0.b[3]\n"
- "addvl x9, x9, #4\n"
"9:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 4b\n"
- "ld1w { z17.s }, p2/Z, [x14]\n"
- "ld1w { z16.s }, p2/Z, [x14, #1, MUL VL]\n"
- "add z8.s, z8.s, z17.s\n"
- "add z9.s, z9.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x14]\n"
+ "ld1w { z18.s }, p2/Z, [x14, #1, MUL VL]\n"
"ld1w { z17.s }, p2/Z, [x14, #2, MUL VL]\n"
"ld1w { z16.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "add z8.s, z8.s, z19.s\n"
+ "add z9.s, z9.s, z18.s\n"
"add z10.s, z10.s, z17.s\n"
"add z11.s, z11.s, z16.s\n"
- "addvl x14, x14, #4\n"
"tbz %x[flags], #4, 10f\n"
"ld1w { z0.s }, p2/Z, [x12]\n"
"ld1w { z4.s }, p2/Z, [x13]\n"
@@ -269,19 +269,19 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sqadd z11.s, z11.s, z16.s\n"
"12:" // Height 1: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z16.s\n"
+ "ld1rw { z18.s }, p2/Z, [x20]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z16.s\n"
- "add z10.s, z10.s, z16.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
"ld1rw { z17.s }, p2/Z, [x20]\n"
- "add z11.s, z11.s, z16.s\n"
+ "add z8.s, z8.s, z18.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z9.s, z9.s, z18.s\n"
+ "add z10.s, z10.s, z18.s\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
+ "add z11.s, z11.s, z18.s\n"
"smin z8.s, p2/M, z8.s, z17.s\n"
"smin z9.s, p2/M, z9.s, z17.s\n"
"smin z10.s, p2/M, z10.s, z17.s\n"
@@ -289,41 +289,41 @@ void sve_hybrid_s8qs_dot_6x4VL (
"smax z8.s, p2/M, z8.s, z16.s\n"
"smax z9.s, p2/M, z9.s, z16.s\n"
"smax z10.s, p2/M, z10.s, z16.s\n"
- "uzp1 z8.h, z8.h, z9.h\n"
"smax z11.s, p2/M, z11.s, z16.s\n"
+ "uzp1 z8.h, z8.h, z9.h\n"
"uzp1 z16.h, z10.h, z11.h\n"
"uzp1 z8.b, z8.b, z16.b\n"
- "st1b { z8.b }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"13:" // Height 1: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"16:" // Height 2: setup done
"mov x28, #0x0\n"
"17:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 18f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -342,57 +342,57 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ble 21f\n"
"20:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x25, x25, #0x10\n"
"sdot z8.s, z17.b, z1.b[0]\n"
"sdot z12.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z1.b[0]\n"
"sdot z13.s, z16.b, z0.b[0]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z17.b, z1.b[0]\n"
"sdot z14.s, z17.b, z0.b[0]\n"
- "ld1b { z17.b }, p2/Z, [x9, #4, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
"sdot z11.s, z16.b, z1.b[0]\n"
"sdot z15.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
"sdot z8.s, z17.b, z1.b[1]\n"
"sdot z12.s, z17.b, z0.b[1]\n"
- "ld1b { z17.b }, p2/Z, [x9, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z17.b }, p2/Z, [x10, #6, MUL VL]\n"
"sdot z9.s, z16.b, z1.b[1]\n"
"sdot z13.s, z16.b, z0.b[1]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"sdot z10.s, z17.b, z1.b[1]\n"
"sdot z14.s, z17.b, z0.b[1]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z16.b, z1.b[1]\n"
"sdot z15.s, z16.b, z0.b[1]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z17.b, z1.b[2]\n"
"sdot z12.s, z17.b, z0.b[2]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-6, MUL VL]\n"
"sdot z9.s, z16.b, z1.b[2]\n"
"sdot z13.s, z16.b, z0.b[2]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z17.b, z1.b[2]\n"
"sdot z14.s, z17.b, z0.b[2]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-4, MUL VL]\n"
"sdot z11.s, z16.b, z1.b[2]\n"
"sdot z15.s, z16.b, z0.b[2]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z17.b, z1.b[3]\n"
"sdot z12.s, z17.b, z0.b[3]\n"
- "ld1b { z17.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-2, MUL VL]\n"
"sdot z9.s, z16.b, z1.b[3]\n"
"sdot z13.s, z16.b, z0.b[3]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-1, MUL VL]\n"
"sdot z10.s, z17.b, z1.b[3]\n"
"sdot z14.s, z17.b, z0.b[3]\n"
"sdot z11.s, z16.b, z1.b[3]\n"
@@ -400,64 +400,64 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bgt 20b\n"
"21:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[0]\n"
"sdot z12.s, z17.b, z1.b[0]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[0]\n"
"sdot z13.s, z16.b, z1.b[0]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[0]\n"
"sdot z14.s, z17.b, z1.b[0]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z16.b, z0.b[0]\n"
"sdot z15.s, z16.b, z1.b[0]\n"
"ble 22f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[1]\n"
"sdot z12.s, z17.b, z1.b[1]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[1]\n"
"sdot z13.s, z16.b, z1.b[1]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[1]\n"
"sdot z14.s, z17.b, z1.b[1]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z16.b, z0.b[1]\n"
"sdot z15.s, z16.b, z1.b[1]\n"
"ble 22f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[2]\n"
"sdot z12.s, z17.b, z1.b[2]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[2]\n"
"sdot z13.s, z16.b, z1.b[2]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[2]\n"
"sdot z14.s, z17.b, z1.b[2]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z16.b, z0.b[2]\n"
"sdot z15.s, z16.b, z1.b[2]\n"
"ble 22f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[3]\n"
"sdot z12.s, z17.b, z1.b[3]\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[3]\n"
"sdot z13.s, z16.b, z1.b[3]\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[3]\n"
"sdot z14.s, z17.b, z1.b[3]\n"
- "addvl x9, x9, #4\n"
"sdot z11.s, z16.b, z0.b[3]\n"
"sdot z15.s, z16.b, z1.b[3]\n"
"22:" // Height 2: Multiply loop: multiply skip
@@ -467,17 +467,17 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bne 17b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z19.s }, p2/Z, [x14]\n"
- "add x26, x11, x20\n"
- "add z8.s, z8.s, z19.s\n"
"ld1w { z18.s }, p2/Z, [x14, #1, MUL VL]\n"
"ld1w { z17.s }, p2/Z, [x14, #2, MUL VL]\n"
- "add z9.s, z9.s, z18.s\n"
- "add z10.s, z10.s, z17.s\n"
"ld1w { z16.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add z11.s, z11.s, z16.s\n"
- "add z12.s, z12.s, z19.s\n"
"addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
+ "add z8.s, z8.s, z19.s\n"
+ "add z12.s, z12.s, z19.s\n"
+ "add z9.s, z9.s, z18.s\n"
+ "add z10.s, z10.s, z17.s\n"
"add z13.s, z13.s, z18.s\n"
+ "add z11.s, z11.s, z16.s\n"
"add z14.s, z14.s, z17.s\n"
"add z15.s, z15.s, z16.s\n"
"tbz %x[flags], #4, 23f\n"
@@ -522,11 +522,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z17.s, z17.s, #0x1f\n"
"asr z16.s, z16.s, #0x1f\n"
"sqadd z8.s, z8.s, z19.s\n"
+ "and z19.d, z12.d, z0.d\n"
"sqadd z9.s, z9.s, z18.s\n"
+ "and z18.d, z13.d, z1.d\n"
"sqadd z10.s, z10.s, z17.s\n"
"sqadd z11.s, z11.s, z16.s\n"
- "and z19.d, z12.d, z0.d\n"
- "and z18.d, z13.d, z1.d\n"
"and z17.d, z14.d, z2.d\n"
"and z16.d, z15.d, z3.d\n"
"asr z19.s, z19.s, #0x1f\n"
@@ -539,73 +539,73 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sqadd z15.s, z15.s, z16.s\n"
"25:" // Height 2: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z17.s\n"
+ "ld1rw { z18.s }, p2/Z, [x20]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z17.s\n"
- "add z10.s, z10.s, z17.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
- "add z11.s, z11.s, z17.s\n"
- "add z12.s, z12.s, z17.s\n"
".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z17.s\n"
- "add z14.s, z14.s, z17.s\n"
+ "add z8.s, z8.s, z18.s\n"
".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "add z15.s, z15.s, z17.s\n"
- "add x20, %x[qp], %[minval]\n"
+ "add z9.s, z9.s, z18.s\n"
+ "add z10.s, z10.s, z18.s\n"
"ld1rw { z17.s }, p2/Z, [x20]\n"
- "smin z8.s, p2/M, z8.s, z16.s\n"
- "smin z9.s, p2/M, z9.s, z16.s\n"
- "smin z10.s, p2/M, z10.s, z16.s\n"
- "smin z11.s, p2/M, z11.s, z16.s\n"
- "smin z12.s, p2/M, z12.s, z16.s\n"
- "smin z13.s, p2/M, z13.s, z16.s\n"
- "smin z14.s, p2/M, z14.s, z16.s\n"
- "smin z15.s, p2/M, z15.s, z16.s\n"
- "smax z8.s, p2/M, z8.s, z17.s\n"
- "smax z9.s, p2/M, z9.s, z17.s\n"
- "smax z10.s, p2/M, z10.s, z17.s\n"
+ "add z11.s, z11.s, z18.s\n"
+ "add z12.s, z12.s, z18.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z13.s, z13.s, z18.s\n"
+ "add z14.s, z14.s, z18.s\n"
+ "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "add z15.s, z15.s, z18.s\n"
+ "smin z8.s, p2/M, z8.s, z17.s\n"
+ "smin z9.s, p2/M, z9.s, z17.s\n"
+ "smin z10.s, p2/M, z10.s, z17.s\n"
+ "smin z11.s, p2/M, z11.s, z17.s\n"
+ "smin z12.s, p2/M, z12.s, z17.s\n"
+ "smin z13.s, p2/M, z13.s, z17.s\n"
+ "smin z14.s, p2/M, z14.s, z17.s\n"
+ "smin z15.s, p2/M, z15.s, z17.s\n"
+ "smax z8.s, p2/M, z8.s, z16.s\n"
+ "smax z9.s, p2/M, z9.s, z16.s\n"
+ "smax z10.s, p2/M, z10.s, z16.s\n"
+ "smax z11.s, p2/M, z11.s, z16.s\n"
+ "smax z12.s, p2/M, z12.s, z16.s\n"
+ "smax z13.s, p2/M, z13.s, z16.s\n"
+ "smax z14.s, p2/M, z14.s, z16.s\n"
+ "smax z15.s, p2/M, z15.s, z16.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "smax z11.s, p2/M, z11.s, z17.s\n"
- "smax z12.s, p2/M, z12.s, z17.s\n"
- "uzp1 z16.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z16.b\n"
- "smax z13.s, p2/M, z13.s, z17.s\n"
- "smax z14.s, p2/M, z14.s, z17.s\n"
+ "uzp1 z17.h, z10.h, z11.h\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
- "smax z15.s, p2/M, z15.s, z17.s\n"
"uzp1 z16.h, z14.h, z15.h\n"
+ "uzp1 z8.b, z8.b, z17.b\n"
"uzp1 z12.b, z12.b, z16.b\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"st1b { z12.b }, p1, [x26]\n"
- "addvl x11, x11, #1\n"
"26:" // Height 2: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -616,8 +616,8 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov x28, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -639,73 +639,73 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z21.b }, p2/Z, [x10]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1b { z21.b }, p2/Z, [x9]\n"
+ "add x24, x24, #0x10\n"
"sdot z8.s, z21.b, z2.b[0]\n"
"sdot z12.s, z21.b, z1.b[0]\n"
- "ld1b { z20.b }, p2/Z, [x9, #1, MUL VL]\n"
- "sdot z16.s, z21.b, z0.b[0]\n"
"sdot z9.s, z20.b, z2.b[0]\n"
- "ld1b { z21.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[0]\n"
+ "sdot z16.s, z21.b, z0.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z20.b, z0.b[0]\n"
- "ld1b { z20.b }, p2/Z, [x9, #3, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z20.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z21.b, z2.b[0]\n"
"sdot z14.s, z21.b, z1.b[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"sdot z18.s, z21.b, z0.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #4, MUL VL]\n"
"sdot z11.s, z20.b, z2.b[0]\n"
- "ld1b { z21.b }, p2/Z, [x9, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
"sdot z15.s, z20.b, z1.b[0]\n"
"sdot z19.s, z20.b, z0.b[0]\n"
- "ld1b { z20.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #5, MUL VL]\n"
"sdot z8.s, z21.b, z2.b[1]\n"
"sdot z12.s, z21.b, z1.b[1]\n"
"sdot z16.s, z21.b, z0.b[1]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #6, MUL VL]\n"
"sdot z9.s, z20.b, z2.b[1]\n"
- "ld1b { z21.b }, p2/Z, [x9, #6, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[1]\n"
"sdot z17.s, z20.b, z0.b[1]\n"
- "ld1b { z20.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z20.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"sdot z10.s, z21.b, z2.b[1]\n"
"sdot z14.s, z21.b, z1.b[1]\n"
"sdot z18.s, z21.b, z0.b[1]\n"
"sdot z11.s, z20.b, z2.b[1]\n"
- "ld1b { z21.b }, p2/Z, [x9, #-8, MUL VL]\n"
"sdot z15.s, z20.b, z1.b[1]\n"
"sdot z19.s, z20.b, z0.b[1]\n"
- "ld1b { z20.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z21.b, z2.b[2]\n"
"sdot z12.s, z21.b, z1.b[2]\n"
"sdot z16.s, z21.b, z0.b[2]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #-6, MUL VL]\n"
"sdot z9.s, z20.b, z2.b[2]\n"
- "ld1b { z21.b }, p2/Z, [x9, #-6, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[2]\n"
"sdot z17.s, z20.b, z0.b[2]\n"
- "ld1b { z20.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z21.b, z2.b[2]\n"
"sdot z14.s, z21.b, z1.b[2]\n"
"sdot z18.s, z21.b, z0.b[2]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #-4, MUL VL]\n"
"sdot z11.s, z20.b, z2.b[2]\n"
- "ld1b { z21.b }, p2/Z, [x9, #-4, MUL VL]\n"
"sdot z15.s, z20.b, z1.b[2]\n"
"sdot z19.s, z20.b, z0.b[2]\n"
- "ld1b { z20.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z21.b, z2.b[3]\n"
"sdot z12.s, z21.b, z1.b[3]\n"
"sdot z16.s, z21.b, z0.b[3]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #-2, MUL VL]\n"
"sdot z9.s, z20.b, z2.b[3]\n"
- "ld1b { z21.b }, p2/Z, [x9, #-2, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[3]\n"
"sdot z17.s, z20.b, z0.b[3]\n"
- "ld1b { z20.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #-1, MUL VL]\n"
"sdot z10.s, z21.b, z2.b[3]\n"
"sdot z14.s, z21.b, z1.b[3]\n"
"sdot z18.s, z21.b, z0.b[3]\n"
@@ -715,21 +715,21 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z21.b }, p2/Z, [x10]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z21.b }, p2/Z, [x9]\n"
"sdot z8.s, z21.b, z0.b[0]\n"
"sdot z12.s, z21.b, z1.b[0]\n"
- "ld1b { z20.b }, p2/Z, [x9, #1, MUL VL]\n"
- "sdot z16.s, z21.b, z2.b[0]\n"
"sdot z9.s, z20.b, z0.b[0]\n"
- "ld1b { z21.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[0]\n"
+ "sdot z16.s, z21.b, z2.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z20.b, z2.b[0]\n"
- "ld1b { z20.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z20.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z21.b, z0.b[0]\n"
"sdot z14.s, z21.b, z1.b[0]\n"
"sdot z18.s, z21.b, z2.b[0]\n"
@@ -737,18 +737,18 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z15.s, z20.b, z1.b[0]\n"
"sdot z19.s, z20.b, z2.b[0]\n"
"ble 35f\n"
- "ld1b { z21.b }, p2/Z, [x9]\n"
- "ld1b { z20.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z21.b }, p2/Z, [x10]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z21.b, z0.b[1]\n"
"sdot z12.s, z21.b, z1.b[1]\n"
"sdot z16.s, z21.b, z2.b[1]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z20.b, z0.b[1]\n"
- "ld1b { z21.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z13.s, z20.b, z1.b[1]\n"
"sdot z17.s, z20.b, z2.b[1]\n"
- "ld1b { z20.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z20.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z21.b, z0.b[1]\n"
"sdot z14.s, z21.b, z1.b[1]\n"
"sdot z18.s, z21.b, z2.b[1]\n"
@@ -756,18 +756,18 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z15.s, z20.b, z1.b[1]\n"
"sdot z19.s, z20.b, z2.b[1]\n"
"ble 35f\n"
- "ld1b { z21.b }, p2/Z, [x9]\n"
- "ld1b { z20.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z21.b }, p2/Z, [x10]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z21.b, z0.b[2]\n"
"sdot z12.s, z21.b, z1.b[2]\n"
"sdot z16.s, z21.b, z2.b[2]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z20.b, z0.b[2]\n"
- "ld1b { z21.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z13.s, z20.b, z1.b[2]\n"
"sdot z17.s, z20.b, z2.b[2]\n"
- "ld1b { z20.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z20.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z21.b, z0.b[2]\n"
"sdot z14.s, z21.b, z1.b[2]\n"
"sdot z18.s, z21.b, z2.b[2]\n"
@@ -775,17 +775,17 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z15.s, z20.b, z1.b[2]\n"
"sdot z19.s, z20.b, z2.b[2]\n"
"ble 35f\n"
- "ld1b { z21.b }, p2/Z, [x9]\n"
- "ld1b { z20.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z21.b }, p2/Z, [x10]\n"
+ "ld1b { z20.b }, p2/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z21.b, z0.b[3]\n"
"sdot z12.s, z21.b, z1.b[3]\n"
"sdot z16.s, z21.b, z2.b[3]\n"
+ "ld1b { z21.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z20.b, z0.b[3]\n"
- "ld1b { z21.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[3]\n"
"sdot z17.s, z20.b, z2.b[3]\n"
- "ld1b { z20.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z20.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z21.b, z0.b[3]\n"
"sdot z14.s, z21.b, z1.b[3]\n"
"sdot z18.s, z21.b, z2.b[3]\n"
@@ -799,17 +799,17 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bne 30b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z23.s }, p2/Z, [x14]\n"
- "add x26, x11, x20\n"
- "add x25, x26, x20\n"
"ld1w { z22.s }, p2/Z, [x14, #1, MUL VL]\n"
"ld1w { z21.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
"add z8.s, z8.s, z23.s\n"
+ "add z12.s, z12.s, z23.s\n"
"add z9.s, z9.s, z22.s\n"
- "ld1w { z20.s }, p2/Z, [x14, #3, MUL VL]\n"
"add z10.s, z10.s, z21.s\n"
"add z11.s, z11.s, z20.s\n"
- "addvl x14, x14, #4\n"
- "add z12.s, z12.s, z23.s\n"
"add z13.s, z13.s, z22.s\n"
"add z14.s, z14.s, z21.s\n"
"add z15.s, z15.s, z20.s\n"
@@ -863,11 +863,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z21.s, z21.s, #0x1f\n"
"asr z20.s, z20.s, #0x1f\n"
"sqadd z8.s, z8.s, z23.s\n"
+ "and z23.d, z12.d, z0.d\n"
"sqadd z9.s, z9.s, z22.s\n"
+ "and z22.d, z13.d, z1.d\n"
"sqadd z10.s, z10.s, z21.s\n"
"sqadd z11.s, z11.s, z20.s\n"
- "and z23.d, z12.d, z0.d\n"
- "and z22.d, z13.d, z1.d\n"
"and z21.d, z14.d, z2.d\n"
"and z20.d, z15.d, z3.d\n"
"asr z23.s, z23.s, #0x1f\n"
@@ -875,11 +875,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z21.s, z21.s, #0x1f\n"
"asr z20.s, z20.s, #0x1f\n"
"sqadd z12.s, z12.s, z23.s\n"
+ "and z23.d, z16.d, z0.d\n"
"sqadd z13.s, z13.s, z22.s\n"
+ "and z22.d, z17.d, z1.d\n"
"sqadd z14.s, z14.s, z21.s\n"
"sqadd z15.s, z15.s, z20.s\n"
- "and z23.d, z16.d, z0.d\n"
- "and z22.d, z17.d, z1.d\n"
"and z21.d, z18.d, z2.d\n"
"and z20.d, z19.d, z3.d\n"
"asr z23.s, z23.s, #0x1f\n"
@@ -892,93 +892,93 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sqadd z19.s, z19.s, z20.s\n"
"38:" // Height 3: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z21.s\n"
+ "ld1rw { z22.s }, p2/Z, [x20]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z21.s\n"
- "add z10.s, z10.s, z21.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
- "add z11.s, z11.s, z21.s\n"
- "add z12.s, z12.s, z21.s\n"
".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z21.s\n"
- "add z14.s, z14.s, z21.s\n"
+ "add z8.s, z8.s, z22.s\n"
".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z21.s\n"
- "add z16.s, z16.s, z21.s\n"
+ "add z9.s, z9.s, z22.s\n"
+ "add z10.s, z10.s, z22.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z21.s\n"
- "add z18.s, z18.s, z21.s\n"
+ "add z11.s, z11.s, z22.s\n"
+ "add z12.s, z12.s, z22.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
- "ld1rw { z20.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z21.s\n"
- "add x20, %x[qp], %[minval]\n"
+ "add z13.s, z13.s, z22.s\n"
+ "add z14.s, z14.s, z22.s\n"
"ld1rw { z21.s }, p2/Z, [x20]\n"
- "smin z8.s, p2/M, z8.s, z20.s\n"
- "smin z9.s, p2/M, z9.s, z20.s\n"
- "smin z10.s, p2/M, z10.s, z20.s\n"
- "smin z11.s, p2/M, z11.s, z20.s\n"
- "smin z12.s, p2/M, z12.s, z20.s\n"
- "smin z13.s, p2/M, z13.s, z20.s\n"
- "smin z14.s, p2/M, z14.s, z20.s\n"
- "smin z15.s, p2/M, z15.s, z20.s\n"
- "smin z16.s, p2/M, z16.s, z20.s\n"
- "smin z17.s, p2/M, z17.s, z20.s\n"
- "smin z18.s, p2/M, z18.s, z20.s\n"
- "smin z19.s, p2/M, z19.s, z20.s\n"
- "smax z8.s, p2/M, z8.s, z21.s\n"
- "smax z9.s, p2/M, z9.s, z21.s\n"
- "smax z10.s, p2/M, z10.s, z21.s\n"
+ "add z15.s, z15.s, z22.s\n"
+ "add z16.s, z16.s, z22.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z22.s\n"
+ "add z18.s, z18.s, z22.s\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z22.s\n"
+ "smin z8.s, p2/M, z8.s, z21.s\n"
+ "smin z9.s, p2/M, z9.s, z21.s\n"
+ "smin z10.s, p2/M, z10.s, z21.s\n"
+ "smin z11.s, p2/M, z11.s, z21.s\n"
+ "smin z12.s, p2/M, z12.s, z21.s\n"
+ "smin z13.s, p2/M, z13.s, z21.s\n"
+ "smin z14.s, p2/M, z14.s, z21.s\n"
+ "smin z15.s, p2/M, z15.s, z21.s\n"
+ "smin z16.s, p2/M, z16.s, z21.s\n"
+ "smin z17.s, p2/M, z17.s, z21.s\n"
+ "smin z18.s, p2/M, z18.s, z21.s\n"
+ "smin z19.s, p2/M, z19.s, z21.s\n"
+ "smax z8.s, p2/M, z8.s, z20.s\n"
+ "smax z9.s, p2/M, z9.s, z20.s\n"
+ "smax z10.s, p2/M, z10.s, z20.s\n"
+ "smax z11.s, p2/M, z11.s, z20.s\n"
+ "smax z12.s, p2/M, z12.s, z20.s\n"
+ "smax z13.s, p2/M, z13.s, z20.s\n"
+ "smax z14.s, p2/M, z14.s, z20.s\n"
+ "smax z15.s, p2/M, z15.s, z20.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "smax z11.s, p2/M, z11.s, z21.s\n"
- "smax z12.s, p2/M, z12.s, z21.s\n"
- "uzp1 z20.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z20.b\n"
- "smax z13.s, p2/M, z13.s, z21.s\n"
- "smax z14.s, p2/M, z14.s, z21.s\n"
+ "smax z16.s, p2/M, z16.s, z20.s\n"
+ "smax z17.s, p2/M, z17.s, z20.s\n"
+ "uzp1 z21.h, z10.h, z11.h\n"
+ "smax z18.s, p2/M, z18.s, z20.s\n"
+ "smax z19.s, p2/M, z19.s, z20.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
- "smax z15.s, p2/M, z15.s, z21.s\n"
- "smax z16.s, p2/M, z16.s, z21.s\n"
"uzp1 z20.h, z14.h, z15.h\n"
- "uzp1 z12.b, z12.b, z20.b\n"
- "smax z17.s, p2/M, z17.s, z21.s\n"
- "smax z18.s, p2/M, z18.s, z21.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x26]\n"
- "smax z19.s, p2/M, z19.s, z21.s\n"
+ "uzp1 z8.b, z8.b, z21.b\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z12.b, z12.b, z20.b\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z12.b }, p1, [x26]\n"
"st1b { z16.b }, p1, [x25]\n"
- "addvl x11, x11, #1\n"
"39:" // Height 3: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -993,8 +993,8 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov x28, #0x0\n"
"43:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 44f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1019,89 +1019,89 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ble 47f\n"
"46:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"sdot z8.s, z25.b, z3.b[0]\n"
"sdot z12.s, z25.b, z2.b[0]\n"
- "sdot z16.s, z25.b, z1.b[0]\n"
- "sdot z20.s, z25.b, z0.b[0]\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
"sdot z9.s, z24.b, z3.b[0]\n"
"sdot z13.s, z24.b, z2.b[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
+ "sdot z16.s, z25.b, z1.b[0]\n"
+ "sdot z20.s, z25.b, z0.b[0]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z24.b, z1.b[0]\n"
"sdot z21.s, z24.b, z0.b[0]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z25.b, z3.b[0]\n"
"sdot z14.s, z25.b, z2.b[0]\n"
"sdot z18.s, z25.b, z1.b[0]\n"
"sdot z22.s, z25.b, z0.b[0]\n"
- "ld1b { z25.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #4, MUL VL]\n"
"sdot z11.s, z24.b, z3.b[0]\n"
"sdot z15.s, z24.b, z2.b[0]\n"
"sdot z19.s, z24.b, z1.b[0]\n"
"sdot z23.s, z24.b, z0.b[0]\n"
- "ld1b { z24.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #5, MUL VL]\n"
"sdot z8.s, z25.b, z3.b[1]\n"
"sdot z12.s, z25.b, z2.b[1]\n"
"sdot z16.s, z25.b, z1.b[1]\n"
"sdot z20.s, z25.b, z0.b[1]\n"
- "ld1b { z25.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #6, MUL VL]\n"
"sdot z9.s, z24.b, z3.b[1]\n"
"sdot z13.s, z24.b, z2.b[1]\n"
"sdot z17.s, z24.b, z1.b[1]\n"
"sdot z21.s, z24.b, z0.b[1]\n"
- "ld1b { z24.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z24.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"sdot z10.s, z25.b, z3.b[1]\n"
"sdot z14.s, z25.b, z2.b[1]\n"
"sdot z18.s, z25.b, z1.b[1]\n"
"sdot z22.s, z25.b, z0.b[1]\n"
- "ld1b { z25.b }, p2/Z, [x9, #-8, MUL VL]\n"
"sdot z11.s, z24.b, z3.b[1]\n"
"sdot z15.s, z24.b, z2.b[1]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-8, MUL VL]\n"
"sdot z19.s, z24.b, z1.b[1]\n"
"sdot z23.s, z24.b, z0.b[1]\n"
- "ld1b { z24.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z25.b, z3.b[2]\n"
"sdot z12.s, z25.b, z2.b[2]\n"
"sdot z16.s, z25.b, z1.b[2]\n"
"sdot z20.s, z25.b, z0.b[2]\n"
- "ld1b { z25.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-6, MUL VL]\n"
"sdot z9.s, z24.b, z3.b[2]\n"
"sdot z13.s, z24.b, z2.b[2]\n"
"sdot z17.s, z24.b, z1.b[2]\n"
"sdot z21.s, z24.b, z0.b[2]\n"
- "ld1b { z24.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z25.b, z3.b[2]\n"
"sdot z14.s, z25.b, z2.b[2]\n"
"sdot z18.s, z25.b, z1.b[2]\n"
"sdot z22.s, z25.b, z0.b[2]\n"
- "ld1b { z25.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-4, MUL VL]\n"
"sdot z11.s, z24.b, z3.b[2]\n"
"sdot z15.s, z24.b, z2.b[2]\n"
"sdot z19.s, z24.b, z1.b[2]\n"
"sdot z23.s, z24.b, z0.b[2]\n"
- "ld1b { z24.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z25.b, z3.b[3]\n"
"sdot z12.s, z25.b, z2.b[3]\n"
"sdot z16.s, z25.b, z1.b[3]\n"
"sdot z20.s, z25.b, z0.b[3]\n"
- "ld1b { z25.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-2, MUL VL]\n"
"sdot z9.s, z24.b, z3.b[3]\n"
"sdot z13.s, z24.b, z2.b[3]\n"
"sdot z17.s, z24.b, z1.b[3]\n"
"sdot z21.s, z24.b, z0.b[3]\n"
- "ld1b { z24.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-1, MUL VL]\n"
"sdot z10.s, z25.b, z3.b[3]\n"
"sdot z14.s, z25.b, z2.b[3]\n"
"sdot z18.s, z25.b, z1.b[3]\n"
@@ -1113,24 +1113,24 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bgt 46b\n"
"47:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
"sdot z8.s, z25.b, z0.b[0]\n"
"sdot z12.s, z25.b, z1.b[0]\n"
- "sdot z16.s, z25.b, z2.b[0]\n"
- "sdot z20.s, z25.b, z3.b[0]\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z24.b, z0.b[0]\n"
"sdot z13.s, z24.b, z1.b[0]\n"
+ "sdot z16.s, z25.b, z2.b[0]\n"
+ "sdot z20.s, z25.b, z3.b[0]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z24.b, z2.b[0]\n"
"sdot z21.s, z24.b, z3.b[0]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z25.b, z0.b[0]\n"
"sdot z14.s, z25.b, z1.b[0]\n"
"sdot z18.s, z25.b, z2.b[0]\n"
@@ -1140,20 +1140,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z24.b, z2.b[0]\n"
"sdot z23.s, z24.b, z3.b[0]\n"
"ble 48f\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z25.b, z0.b[1]\n"
"sdot z12.s, z25.b, z1.b[1]\n"
"sdot z16.s, z25.b, z2.b[1]\n"
"sdot z20.s, z25.b, z3.b[1]\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z24.b, z0.b[1]\n"
"sdot z13.s, z24.b, z1.b[1]\n"
"sdot z17.s, z24.b, z2.b[1]\n"
"sdot z21.s, z24.b, z3.b[1]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z25.b, z0.b[1]\n"
"sdot z14.s, z25.b, z1.b[1]\n"
"sdot z18.s, z25.b, z2.b[1]\n"
@@ -1163,20 +1163,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z24.b, z2.b[1]\n"
"sdot z23.s, z24.b, z3.b[1]\n"
"ble 48f\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z25.b, z0.b[2]\n"
"sdot z12.s, z25.b, z1.b[2]\n"
"sdot z16.s, z25.b, z2.b[2]\n"
"sdot z20.s, z25.b, z3.b[2]\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z24.b, z0.b[2]\n"
"sdot z13.s, z24.b, z1.b[2]\n"
"sdot z17.s, z24.b, z2.b[2]\n"
"sdot z21.s, z24.b, z3.b[2]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z25.b, z0.b[2]\n"
"sdot z14.s, z25.b, z1.b[2]\n"
"sdot z18.s, z25.b, z2.b[2]\n"
@@ -1186,19 +1186,19 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z19.s, z24.b, z2.b[2]\n"
"sdot z23.s, z24.b, z3.b[2]\n"
"ble 48f\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z25.b, z0.b[3]\n"
"sdot z12.s, z25.b, z1.b[3]\n"
"sdot z16.s, z25.b, z2.b[3]\n"
"sdot z20.s, z25.b, z3.b[3]\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z24.b, z0.b[3]\n"
"sdot z13.s, z24.b, z1.b[3]\n"
"sdot z17.s, z24.b, z2.b[3]\n"
"sdot z21.s, z24.b, z3.b[3]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z25.b, z0.b[3]\n"
"sdot z14.s, z25.b, z1.b[3]\n"
"sdot z18.s, z25.b, z2.b[3]\n"
@@ -1214,18 +1214,18 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bne 43b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"ld1w { z27.s }, p2/Z, [x14]\n"
- "add x26, x11, x20\n"
- "add x25, x26, x20\n"
"ld1w { z26.s }, p2/Z, [x14, #1, MUL VL]\n"
"ld1w { z25.s }, p2/Z, [x14, #2, MUL VL]\n"
- "add x24, x25, x20\n"
- "add z8.s, z8.s, z27.s\n"
"ld1w { z24.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
+ "add z8.s, z8.s, z27.s\n"
+ "add z12.s, z12.s, z27.s\n"
+ "add x24, x25, x20\n"
"add z9.s, z9.s, z26.s\n"
"add z10.s, z10.s, z25.s\n"
- "addvl x14, x14, #4\n"
"add z11.s, z11.s, z24.s\n"
- "add z12.s, z12.s, z27.s\n"
"add z13.s, z13.s, z26.s\n"
"add z14.s, z14.s, z25.s\n"
"add z15.s, z15.s, z24.s\n"
@@ -1287,11 +1287,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z25.s, z25.s, #0x1f\n"
"asr z24.s, z24.s, #0x1f\n"
"sqadd z8.s, z8.s, z27.s\n"
+ "and z27.d, z12.d, z0.d\n"
"sqadd z9.s, z9.s, z26.s\n"
+ "and z26.d, z13.d, z1.d\n"
"sqadd z10.s, z10.s, z25.s\n"
"sqadd z11.s, z11.s, z24.s\n"
- "and z27.d, z12.d, z0.d\n"
- "and z26.d, z13.d, z1.d\n"
"and z25.d, z14.d, z2.d\n"
"and z24.d, z15.d, z3.d\n"
"asr z27.s, z27.s, #0x1f\n"
@@ -1299,11 +1299,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z25.s, z25.s, #0x1f\n"
"asr z24.s, z24.s, #0x1f\n"
"sqadd z12.s, z12.s, z27.s\n"
+ "and z27.d, z16.d, z0.d\n"
"sqadd z13.s, z13.s, z26.s\n"
+ "and z26.d, z17.d, z1.d\n"
"sqadd z14.s, z14.s, z25.s\n"
"sqadd z15.s, z15.s, z24.s\n"
- "and z27.d, z16.d, z0.d\n"
- "and z26.d, z17.d, z1.d\n"
"and z25.d, z18.d, z2.d\n"
"and z24.d, z19.d, z3.d\n"
"asr z27.s, z27.s, #0x1f\n"
@@ -1311,11 +1311,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z25.s, z25.s, #0x1f\n"
"asr z24.s, z24.s, #0x1f\n"
"sqadd z16.s, z16.s, z27.s\n"
+ "and z27.d, z20.d, z0.d\n"
"sqadd z17.s, z17.s, z26.s\n"
+ "and z26.d, z21.d, z1.d\n"
"sqadd z18.s, z18.s, z25.s\n"
"sqadd z19.s, z19.s, z24.s\n"
- "and z27.d, z20.d, z0.d\n"
- "and z26.d, z21.d, z1.d\n"
"and z25.d, z22.d, z2.d\n"
"and z24.d, z23.d, z3.d\n"
"asr z27.s, z27.s, #0x1f\n"
@@ -1328,43 +1328,43 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sqadd z23.s, z23.s, z24.s\n"
"51:" // Height 4: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z25.s }, p2/Z, [x20]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z25.s\n"
+ "ld1rw { z25.s }, p2/Z, [x20]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z25.s\n"
- "add z10.s, z10.s, z25.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
- "add z11.s, z11.s, z25.s\n"
- "add z12.s, z12.s, z25.s\n"
".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z25.s\n"
- "add z14.s, z14.s, z25.s\n"
+ "add z8.s, z8.s, z25.s\n"
".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z25.s\n"
- "add z16.s, z16.s, z25.s\n"
+ "add z9.s, z9.s, z25.s\n"
+ "add z10.s, z10.s, z25.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z25.s\n"
- "add z18.s, z18.s, z25.s\n"
+ "add z11.s, z11.s, z25.s\n"
+ "add z12.s, z12.s, z25.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z25.s\n"
- "add z20.s, z20.s, z25.s\n"
+ "add z13.s, z13.s, z25.s\n"
+ "add z14.s, z14.s, z25.s\n"
".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
- "add z21.s, z21.s, z25.s\n"
- "add z22.s, z22.s, z25.s\n"
+ "add z15.s, z15.s, z25.s\n"
+ "add z16.s, z16.s, z25.s\n"
".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z25.s\n"
+ "add z18.s, z18.s, z25.s\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
- "add z23.s, z23.s, z25.s\n"
+ "add z19.s, z19.s, z25.s\n"
+ "add z20.s, z20.s, z25.s\n"
"add x20, %x[qp], %[minval]\n"
- "ld1rw { z25.s }, p2/Z, [x20]\n"
+ "add z21.s, z21.s, z25.s\n"
+ "add z22.s, z22.s, z25.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
+ "add z23.s, z23.s, z25.s\n"
"smin z8.s, p2/M, z8.s, z24.s\n"
"smin z9.s, p2/M, z9.s, z24.s\n"
"smin z10.s, p2/M, z10.s, z24.s\n"
@@ -1381,60 +1381,60 @@ void sve_hybrid_s8qs_dot_6x4VL (
"smin z21.s, p2/M, z21.s, z24.s\n"
"smin z22.s, p2/M, z22.s, z24.s\n"
"smin z23.s, p2/M, z23.s, z24.s\n"
- "smax z8.s, p2/M, z8.s, z25.s\n"
- "smax z9.s, p2/M, z9.s, z25.s\n"
- "smax z10.s, p2/M, z10.s, z25.s\n"
+ "smax z8.s, p2/M, z8.s, z26.s\n"
+ "smax z9.s, p2/M, z9.s, z26.s\n"
+ "smax z10.s, p2/M, z10.s, z26.s\n"
+ "smax z11.s, p2/M, z11.s, z26.s\n"
+ "smax z12.s, p2/M, z12.s, z26.s\n"
+ "smax z13.s, p2/M, z13.s, z26.s\n"
+ "smax z14.s, p2/M, z14.s, z26.s\n"
+ "smax z15.s, p2/M, z15.s, z26.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "smax z11.s, p2/M, z11.s, z25.s\n"
- "smax z12.s, p2/M, z12.s, z25.s\n"
- "uzp1 z24.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z24.b\n"
- "smax z13.s, p2/M, z13.s, z25.s\n"
- "smax z14.s, p2/M, z14.s, z25.s\n"
+ "smax z16.s, p2/M, z16.s, z26.s\n"
+ "smax z17.s, p2/M, z17.s, z26.s\n"
+ "uzp1 z25.h, z10.h, z11.h\n"
+ "smax z18.s, p2/M, z18.s, z26.s\n"
+ "smax z19.s, p2/M, z19.s, z26.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
- "smax z15.s, p2/M, z15.s, z25.s\n"
- "smax z16.s, p2/M, z16.s, z25.s\n"
+ "smax z20.s, p2/M, z20.s, z26.s\n"
+ "smax z21.s, p2/M, z21.s, z26.s\n"
"uzp1 z24.h, z14.h, z15.h\n"
- "uzp1 z12.b, z12.b, z24.b\n"
- "smax z17.s, p2/M, z17.s, z25.s\n"
- "smax z18.s, p2/M, z18.s, z25.s\n"
+ "smax z22.s, p2/M, z22.s, z26.s\n"
+ "smax z23.s, p2/M, z23.s, z26.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x26]\n"
- "smax z19.s, p2/M, z19.s, z25.s\n"
- "smax z20.s, p2/M, z20.s, z25.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
- "smax z21.s, p2/M, z21.s, z25.s\n"
- "smax z22.s, p2/M, z22.s, z25.s\n"
+ "uzp1 z8.b, z8.b, z25.b\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
"uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z12.b, z12.b, z24.b\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
+ "st1b { z12.b }, p1, [x26]\n"
"st1b { z16.b }, p1, [x25]\n"
- "smax z23.s, p2/M, z23.s, z25.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
"st1b { z20.b }, p1, [x24]\n"
- "addvl x11, x11, #1\n"
"52:" // Height 4: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -1453,8 +1453,8 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov x28, #0x0\n"
"56:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 57f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1482,105 +1482,105 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ble 60f\n"
"59:" // Height 5: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p2/Z, [x10]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z4.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x22]\n"
- "ld1b { z29.b }, p2/Z, [x9]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"sdot z8.s, z29.b, z4.b[0]\n"
"sdot z12.s, z29.b, z3.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "sdot z9.s, z28.b, z4.b[0]\n"
"sdot z16.s, z29.b, z2.b[0]\n"
"sdot z20.s, z29.b, z1.b[0]\n"
- "add x25, x25, #0x10\n"
"sdot z24.s, z29.b, z0.b[0]\n"
- "sdot z9.s, z28.b, z4.b[0]\n"
- "ld1b { z29.b }, p2/Z, [x9, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
"sdot z13.s, z28.b, z3.b[0]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z28.b, z2.b[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"sdot z21.s, z28.b, z1.b[0]\n"
"sdot z25.s, z28.b, z0.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z29.b, z4.b[0]\n"
"sdot z14.s, z29.b, z3.b[0]\n"
"sdot z18.s, z29.b, z2.b[0]\n"
"sdot z22.s, z29.b, z1.b[0]\n"
"sdot z26.s, z29.b, z0.b[0]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #4, MUL VL]\n"
"sdot z11.s, z28.b, z4.b[0]\n"
- "ld1b { z29.b }, p2/Z, [x9, #4, MUL VL]\n"
"sdot z15.s, z28.b, z3.b[0]\n"
"sdot z19.s, z28.b, z2.b[0]\n"
"sdot z23.s, z28.b, z1.b[0]\n"
"sdot z27.s, z28.b, z0.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #5, MUL VL]\n"
"sdot z8.s, z29.b, z4.b[1]\n"
"sdot z12.s, z29.b, z3.b[1]\n"
"sdot z16.s, z29.b, z2.b[1]\n"
"sdot z20.s, z29.b, z1.b[1]\n"
"sdot z24.s, z29.b, z0.b[1]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #6, MUL VL]\n"
"sdot z9.s, z28.b, z4.b[1]\n"
- "ld1b { z29.b }, p2/Z, [x9, #6, MUL VL]\n"
"sdot z13.s, z28.b, z3.b[1]\n"
"sdot z17.s, z28.b, z2.b[1]\n"
"sdot z21.s, z28.b, z1.b[1]\n"
"sdot z25.s, z28.b, z0.b[1]\n"
- "ld1b { z28.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z28.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"sdot z10.s, z29.b, z4.b[1]\n"
"sdot z14.s, z29.b, z3.b[1]\n"
"sdot z18.s, z29.b, z2.b[1]\n"
"sdot z22.s, z29.b, z1.b[1]\n"
"sdot z26.s, z29.b, z0.b[1]\n"
"sdot z11.s, z28.b, z4.b[1]\n"
- "ld1b { z29.b }, p2/Z, [x9, #-8, MUL VL]\n"
"sdot z15.s, z28.b, z3.b[1]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #-8, MUL VL]\n"
"sdot z19.s, z28.b, z2.b[1]\n"
"sdot z23.s, z28.b, z1.b[1]\n"
"sdot z27.s, z28.b, z0.b[1]\n"
- "ld1b { z28.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z29.b, z4.b[2]\n"
"sdot z12.s, z29.b, z3.b[2]\n"
"sdot z16.s, z29.b, z2.b[2]\n"
"sdot z20.s, z29.b, z1.b[2]\n"
"sdot z24.s, z29.b, z0.b[2]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #-6, MUL VL]\n"
"sdot z9.s, z28.b, z4.b[2]\n"
- "ld1b { z29.b }, p2/Z, [x9, #-6, MUL VL]\n"
"sdot z13.s, z28.b, z3.b[2]\n"
"sdot z17.s, z28.b, z2.b[2]\n"
"sdot z21.s, z28.b, z1.b[2]\n"
"sdot z25.s, z28.b, z0.b[2]\n"
- "ld1b { z28.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z29.b, z4.b[2]\n"
"sdot z14.s, z29.b, z3.b[2]\n"
"sdot z18.s, z29.b, z2.b[2]\n"
"sdot z22.s, z29.b, z1.b[2]\n"
"sdot z26.s, z29.b, z0.b[2]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #-4, MUL VL]\n"
"sdot z11.s, z28.b, z4.b[2]\n"
- "ld1b { z29.b }, p2/Z, [x9, #-4, MUL VL]\n"
"sdot z15.s, z28.b, z3.b[2]\n"
"sdot z19.s, z28.b, z2.b[2]\n"
"sdot z23.s, z28.b, z1.b[2]\n"
"sdot z27.s, z28.b, z0.b[2]\n"
- "ld1b { z28.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z29.b, z4.b[3]\n"
"sdot z12.s, z29.b, z3.b[3]\n"
"sdot z16.s, z29.b, z2.b[3]\n"
"sdot z20.s, z29.b, z1.b[3]\n"
"sdot z24.s, z29.b, z0.b[3]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #-2, MUL VL]\n"
"sdot z9.s, z28.b, z4.b[3]\n"
- "ld1b { z29.b }, p2/Z, [x9, #-2, MUL VL]\n"
"sdot z13.s, z28.b, z3.b[3]\n"
"sdot z17.s, z28.b, z2.b[3]\n"
"sdot z21.s, z28.b, z1.b[3]\n"
"sdot z25.s, z28.b, z0.b[3]\n"
- "ld1b { z28.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #-1, MUL VL]\n"
"sdot z10.s, z29.b, z4.b[3]\n"
"sdot z14.s, z29.b, z3.b[3]\n"
"sdot z18.s, z29.b, z2.b[3]\n"
@@ -1594,27 +1594,27 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bgt 59b\n"
"60:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p2/Z, [x10]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
"ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z29.b }, p2/Z, [x9]\n"
"sdot z8.s, z29.b, z0.b[0]\n"
"sdot z12.s, z29.b, z1.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "sdot z9.s, z28.b, z0.b[0]\n"
+ "sdot z13.s, z28.b, z1.b[0]\n"
"sdot z16.s, z29.b, z2.b[0]\n"
"sdot z20.s, z29.b, z3.b[0]\n"
"sdot z24.s, z29.b, z4.b[0]\n"
- "sdot z9.s, z28.b, z0.b[0]\n"
- "ld1b { z29.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sdot z13.s, z28.b, z1.b[0]\n"
"sdot z17.s, z28.b, z2.b[0]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z21.s, z28.b, z3.b[0]\n"
"sdot z25.s, z28.b, z4.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z28.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z29.b, z0.b[0]\n"
"sdot z14.s, z29.b, z1.b[0]\n"
"sdot z18.s, z29.b, z2.b[0]\n"
@@ -1626,23 +1626,23 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z28.b, z3.b[0]\n"
"sdot z27.s, z28.b, z4.b[0]\n"
"ble 61f\n"
- "ld1b { z29.b }, p2/Z, [x9]\n"
- "ld1b { z28.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x10]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z29.b, z0.b[1]\n"
"sdot z12.s, z29.b, z1.b[1]\n"
"sdot z16.s, z29.b, z2.b[1]\n"
"sdot z20.s, z29.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z29.b, z4.b[1]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z28.b, z0.b[1]\n"
- "ld1b { z29.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z28.b, z1.b[1]\n"
"sdot z17.s, z28.b, z2.b[1]\n"
"sdot z21.s, z28.b, z3.b[1]\n"
"sdot z25.s, z28.b, z4.b[1]\n"
- "ld1b { z28.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z28.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z29.b, z0.b[1]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z29.b, z1.b[1]\n"
"sdot z18.s, z29.b, z2.b[1]\n"
"sdot z22.s, z29.b, z3.b[1]\n"
@@ -1653,23 +1653,23 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z28.b, z3.b[1]\n"
"sdot z27.s, z28.b, z4.b[1]\n"
"ble 61f\n"
- "ld1b { z29.b }, p2/Z, [x9]\n"
- "ld1b { z28.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x10]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z29.b, z0.b[2]\n"
"sdot z12.s, z29.b, z1.b[2]\n"
"sdot z16.s, z29.b, z2.b[2]\n"
"sdot z20.s, z29.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z29.b, z4.b[2]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z28.b, z0.b[2]\n"
- "ld1b { z29.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z28.b, z1.b[2]\n"
"sdot z17.s, z28.b, z2.b[2]\n"
"sdot z21.s, z28.b, z3.b[2]\n"
"sdot z25.s, z28.b, z4.b[2]\n"
- "ld1b { z28.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z28.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z29.b, z0.b[2]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z29.b, z1.b[2]\n"
"sdot z18.s, z29.b, z2.b[2]\n"
"sdot z22.s, z29.b, z3.b[2]\n"
@@ -1680,21 +1680,21 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z23.s, z28.b, z3.b[2]\n"
"sdot z27.s, z28.b, z4.b[2]\n"
"ble 61f\n"
- "ld1b { z29.b }, p2/Z, [x9]\n"
- "ld1b { z28.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x10]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z29.b, z0.b[3]\n"
"sdot z12.s, z29.b, z1.b[3]\n"
"sdot z16.s, z29.b, z2.b[3]\n"
"sdot z20.s, z29.b, z3.b[3]\n"
"sdot z24.s, z29.b, z4.b[3]\n"
+ "ld1b { z29.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z28.b, z0.b[3]\n"
- "ld1b { z29.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z13.s, z28.b, z1.b[3]\n"
"sdot z17.s, z28.b, z2.b[3]\n"
"sdot z21.s, z28.b, z3.b[3]\n"
"sdot z25.s, z28.b, z4.b[3]\n"
- "ld1b { z28.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z28.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z29.b, z0.b[3]\n"
"sdot z14.s, z29.b, z1.b[3]\n"
"sdot z18.s, z29.b, z2.b[3]\n"
@@ -1711,20 +1711,20 @@ void sve_hybrid_s8qs_dot_6x4VL (
"cmp x28, x20\n"
"bne 56b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x26, x11, x20\n"
"ld1w { z31.s }, p2/Z, [x14]\n"
- "add x25, x26, x20\n"
"ld1w { z30.s }, p2/Z, [x14, #1, MUL VL]\n"
"ld1w { z29.s }, p2/Z, [x14, #2, MUL VL]\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
"ld1w { z28.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
"add z8.s, z8.s, z31.s\n"
+ "add z12.s, z12.s, z31.s\n"
+ "add x24, x25, x20\n"
+ "add x23, x24, x20\n"
"add z9.s, z9.s, z30.s\n"
- "addvl x14, x14, #4\n"
"add z10.s, z10.s, z29.s\n"
"add z11.s, z11.s, z28.s\n"
- "add z12.s, z12.s, z31.s\n"
"add z13.s, z13.s, z30.s\n"
"add z14.s, z14.s, z29.s\n"
"add z15.s, z15.s, z28.s\n"
@@ -1794,11 +1794,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"sqadd z8.s, z8.s, z31.s\n"
+ "and z31.d, z12.d, z0.d\n"
"sqadd z9.s, z9.s, z30.s\n"
+ "and z30.d, z13.d, z1.d\n"
"sqadd z10.s, z10.s, z29.s\n"
"sqadd z11.s, z11.s, z28.s\n"
- "and z31.d, z12.d, z0.d\n"
- "and z30.d, z13.d, z1.d\n"
"and z29.d, z14.d, z2.d\n"
"and z28.d, z15.d, z3.d\n"
"asr z31.s, z31.s, #0x1f\n"
@@ -1806,11 +1806,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"sqadd z12.s, z12.s, z31.s\n"
+ "and z31.d, z16.d, z0.d\n"
"sqadd z13.s, z13.s, z30.s\n"
+ "and z30.d, z17.d, z1.d\n"
"sqadd z14.s, z14.s, z29.s\n"
"sqadd z15.s, z15.s, z28.s\n"
- "and z31.d, z16.d, z0.d\n"
- "and z30.d, z17.d, z1.d\n"
"and z29.d, z18.d, z2.d\n"
"and z28.d, z19.d, z3.d\n"
"asr z31.s, z31.s, #0x1f\n"
@@ -1818,11 +1818,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"sqadd z16.s, z16.s, z31.s\n"
+ "and z31.d, z20.d, z0.d\n"
"sqadd z17.s, z17.s, z30.s\n"
+ "and z30.d, z21.d, z1.d\n"
"sqadd z18.s, z18.s, z29.s\n"
"sqadd z19.s, z19.s, z28.s\n"
- "and z31.d, z20.d, z0.d\n"
- "and z30.d, z21.d, z1.d\n"
"and z29.d, z22.d, z2.d\n"
"and z28.d, z23.d, z3.d\n"
"asr z31.s, z31.s, #0x1f\n"
@@ -1830,11 +1830,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"sqadd z20.s, z20.s, z31.s\n"
+ "and z31.d, z24.d, z0.d\n"
"sqadd z21.s, z21.s, z30.s\n"
+ "and z30.d, z25.d, z1.d\n"
"sqadd z22.s, z22.s, z29.s\n"
"sqadd z23.s, z23.s, z28.s\n"
- "and z31.d, z24.d, z0.d\n"
- "and z30.d, z25.d, z1.d\n"
"and z29.d, z26.d, z2.d\n"
"and z28.d, z27.d, z3.d\n"
"asr z31.s, z31.s, #0x1f\n"
@@ -1847,51 +1847,51 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sqadd z27.s, z27.s, z28.s\n"
"64:" // Height 5: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z29.s }, p2/Z, [x20]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z29.s\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z29.s\n"
- "add z10.s, z10.s, z29.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
- "add z11.s, z11.s, z29.s\n"
- "add z12.s, z12.s, z29.s\n"
".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z29.s\n"
- "add z14.s, z14.s, z29.s\n"
+ "add z8.s, z8.s, z29.s\n"
".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z29.s\n"
- "add z16.s, z16.s, z29.s\n"
+ "add z9.s, z9.s, z29.s\n"
+ "add z10.s, z10.s, z29.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z29.s\n"
- "add z18.s, z18.s, z29.s\n"
+ "add z11.s, z11.s, z29.s\n"
+ "add z12.s, z12.s, z29.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z29.s\n"
- "add z20.s, z20.s, z29.s\n"
+ "add z13.s, z13.s, z29.s\n"
+ "add z14.s, z14.s, z29.s\n"
".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
- "add z21.s, z21.s, z29.s\n"
- "add z22.s, z22.s, z29.s\n"
+ "add z15.s, z15.s, z29.s\n"
+ "add z16.s, z16.s, z29.s\n"
".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z29.s\n"
- "add z24.s, z24.s, z29.s\n"
+ "add z17.s, z17.s, z29.s\n"
+ "add z18.s, z18.s, z29.s\n"
".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z29.s\n"
- "add z26.s, z26.s, z29.s\n"
+ "add z19.s, z19.s, z29.s\n"
+ "add z20.s, z20.s, z29.s\n"
".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z21.s, z21.s, z29.s\n"
+ "add z22.s, z22.s, z29.s\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z29.s\n"
+ "add z23.s, z23.s, z29.s\n"
+ "add z24.s, z24.s, z29.s\n"
"add x20, %x[qp], %[minval]\n"
- "ld1rw { z29.s }, p2/Z, [x20]\n"
+ "add z25.s, z25.s, z29.s\n"
+ "add z26.s, z26.s, z29.s\n"
+ "ld1rw { z30.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z29.s\n"
"smin z8.s, p2/M, z8.s, z28.s\n"
"smin z9.s, p2/M, z9.s, z28.s\n"
"smin z10.s, p2/M, z10.s, z28.s\n"
@@ -1912,71 +1912,72 @@ void sve_hybrid_s8qs_dot_6x4VL (
"smin z25.s, p2/M, z25.s, z28.s\n"
"smin z26.s, p2/M, z26.s, z28.s\n"
"smin z27.s, p2/M, z27.s, z28.s\n"
- "smax z8.s, p2/M, z8.s, z29.s\n"
- "smax z9.s, p2/M, z9.s, z29.s\n"
- "smax z10.s, p2/M, z10.s, z29.s\n"
+ "smax z8.s, p2/M, z8.s, z30.s\n"
+ "smax z9.s, p2/M, z9.s, z30.s\n"
+ "smax z10.s, p2/M, z10.s, z30.s\n"
+ "smax z11.s, p2/M, z11.s, z30.s\n"
+ "smax z12.s, p2/M, z12.s, z30.s\n"
+ "smax z13.s, p2/M, z13.s, z30.s\n"
+ "smax z14.s, p2/M, z14.s, z30.s\n"
+ "smax z15.s, p2/M, z15.s, z30.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "smax z11.s, p2/M, z11.s, z29.s\n"
- "smax z12.s, p2/M, z12.s, z29.s\n"
- "uzp1 z28.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z28.b\n"
- "smax z13.s, p2/M, z13.s, z29.s\n"
- "smax z14.s, p2/M, z14.s, z29.s\n"
+ "smax z16.s, p2/M, z16.s, z30.s\n"
+ "smax z17.s, p2/M, z17.s, z30.s\n"
+ "uzp1 z29.h, z10.h, z11.h\n"
+ "smax z18.s, p2/M, z18.s, z30.s\n"
+ "smax z19.s, p2/M, z19.s, z30.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
- "smax z15.s, p2/M, z15.s, z29.s\n"
- "smax z16.s, p2/M, z16.s, z29.s\n"
+ "smax z20.s, p2/M, z20.s, z30.s\n"
+ "smax z21.s, p2/M, z21.s, z30.s\n"
"uzp1 z28.h, z14.h, z15.h\n"
- "uzp1 z12.b, z12.b, z28.b\n"
- "smax z17.s, p2/M, z17.s, z29.s\n"
- "smax z18.s, p2/M, z18.s, z29.s\n"
+ "smax z22.s, p2/M, z22.s, z30.s\n"
+ "smax z23.s, p2/M, z23.s, z30.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x26]\n"
- "smax z19.s, p2/M, z19.s, z29.s\n"
- "smax z20.s, p2/M, z20.s, z29.s\n"
+ "uzp1 z8.b, z8.b, z29.b\n"
+ "smax z24.s, p2/M, z24.s, z30.s\n"
+ "smax z25.s, p2/M, z25.s, z30.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
- "smax z21.s, p2/M, z21.s, z29.s\n"
- "smax z22.s, p2/M, z22.s, z29.s\n"
+ "smax z26.s, p2/M, z26.s, z30.s\n"
+ "smax z27.s, p2/M, z27.s, z30.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x25]\n"
- "smax z23.s, p2/M, z23.s, z29.s\n"
- "smax z24.s, p2/M, z24.s, z29.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
- "smax z25.s, p2/M, z25.s, z29.s\n"
- "smax z26.s, p2/M, z26.s, z29.s\n"
+ "uzp1 z12.b, z12.b, z28.b\n"
+ "uzp1 z18.h, z22.h, z23.h\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "st1b { z12.b }, p1, [x26]\n"
+ "uzp1 z20.b, z20.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z16.b }, p1, [x25]\n"
"st1b { z20.b }, p1, [x24]\n"
- "smax z27.s, p2/M, z27.s, z29.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"st1b { z24.b }, p1, [x23]\n"
- "addvl x11, x11, #1\n"
"65:" // Height 5: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x6\n"
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -1999,8 +2000,8 @@ void sve_hybrid_s8qs_dot_6x4VL (
"mov x28, #0x0\n"
"69:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 70f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -2031,121 +2032,121 @@ void sve_hybrid_s8qs_dot_6x4VL (
"ble 73f\n"
"72:" // Height 6: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p2/Z, [x10]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z7.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z6.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z5.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z4.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
"ld1rqb { z2.b }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1b { z1.b }, p2/Z, [x9]\n"
- "ld1b { z0.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"sdot z8.s, z1.b, z7.b[0]\n"
"sdot z12.s, z1.b, z6.b[0]\n"
+ "add x21, x21, #0x10\n"
"sdot z16.s, z1.b, z5.b[0]\n"
"sdot z20.s, z1.b, z4.b[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"sdot z24.s, z1.b, z3.b[0]\n"
"sdot z28.s, z1.b, z2.b[0]\n"
- "ld1b { z1.b }, p2/Z, [x9, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
+ "ld1b { z1.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z0.b, z7.b[0]\n"
"sdot z13.s, z0.b, z6.b[0]\n"
"sdot z17.s, z0.b, z5.b[0]\n"
"sdot z21.s, z0.b, z4.b[0]\n"
"sdot z25.s, z0.b, z3.b[0]\n"
"sdot z29.s, z0.b, z2.b[0]\n"
- "ld1b { z0.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z1.b, z7.b[0]\n"
"sdot z14.s, z1.b, z6.b[0]\n"
"sdot z18.s, z1.b, z5.b[0]\n"
"sdot z22.s, z1.b, z4.b[0]\n"
"sdot z26.s, z1.b, z3.b[0]\n"
"sdot z30.s, z1.b, z2.b[0]\n"
- "ld1b { z1.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10, #4, MUL VL]\n"
"sdot z11.s, z0.b, z7.b[0]\n"
"sdot z15.s, z0.b, z6.b[0]\n"
"sdot z19.s, z0.b, z5.b[0]\n"
"sdot z23.s, z0.b, z4.b[0]\n"
"sdot z27.s, z0.b, z3.b[0]\n"
"sdot z31.s, z0.b, z2.b[0]\n"
- "ld1b { z0.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #5, MUL VL]\n"
"sdot z8.s, z1.b, z7.b[1]\n"
"sdot z12.s, z1.b, z6.b[1]\n"
"sdot z16.s, z1.b, z5.b[1]\n"
"sdot z20.s, z1.b, z4.b[1]\n"
"sdot z24.s, z1.b, z3.b[1]\n"
"sdot z28.s, z1.b, z2.b[1]\n"
- "ld1b { z1.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10, #6, MUL VL]\n"
"sdot z9.s, z0.b, z7.b[1]\n"
"sdot z13.s, z0.b, z6.b[1]\n"
"sdot z17.s, z0.b, z5.b[1]\n"
"sdot z21.s, z0.b, z4.b[1]\n"
"sdot z25.s, z0.b, z3.b[1]\n"
"sdot z29.s, z0.b, z2.b[1]\n"
- "ld1b { z0.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z0.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
"sdot z10.s, z1.b, z7.b[1]\n"
"sdot z14.s, z1.b, z6.b[1]\n"
"sdot z18.s, z1.b, z5.b[1]\n"
"sdot z22.s, z1.b, z4.b[1]\n"
"sdot z26.s, z1.b, z3.b[1]\n"
"sdot z30.s, z1.b, z2.b[1]\n"
- "ld1b { z1.b }, p2/Z, [x9, #-8, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-8, MUL VL]\n"
"sdot z11.s, z0.b, z7.b[1]\n"
"sdot z15.s, z0.b, z6.b[1]\n"
"sdot z19.s, z0.b, z5.b[1]\n"
"sdot z23.s, z0.b, z4.b[1]\n"
"sdot z27.s, z0.b, z3.b[1]\n"
"sdot z31.s, z0.b, z2.b[1]\n"
- "ld1b { z0.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z1.b, z7.b[2]\n"
"sdot z12.s, z1.b, z6.b[2]\n"
"sdot z16.s, z1.b, z5.b[2]\n"
"sdot z20.s, z1.b, z4.b[2]\n"
"sdot z24.s, z1.b, z3.b[2]\n"
"sdot z28.s, z1.b, z2.b[2]\n"
- "ld1b { z1.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-6, MUL VL]\n"
"sdot z9.s, z0.b, z7.b[2]\n"
"sdot z13.s, z0.b, z6.b[2]\n"
"sdot z17.s, z0.b, z5.b[2]\n"
"sdot z21.s, z0.b, z4.b[2]\n"
"sdot z25.s, z0.b, z3.b[2]\n"
"sdot z29.s, z0.b, z2.b[2]\n"
- "ld1b { z0.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z1.b, z7.b[2]\n"
"sdot z14.s, z1.b, z6.b[2]\n"
"sdot z18.s, z1.b, z5.b[2]\n"
"sdot z22.s, z1.b, z4.b[2]\n"
"sdot z26.s, z1.b, z3.b[2]\n"
"sdot z30.s, z1.b, z2.b[2]\n"
- "ld1b { z1.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-4, MUL VL]\n"
"sdot z11.s, z0.b, z7.b[2]\n"
"sdot z15.s, z0.b, z6.b[2]\n"
"sdot z19.s, z0.b, z5.b[2]\n"
"sdot z23.s, z0.b, z4.b[2]\n"
"sdot z27.s, z0.b, z3.b[2]\n"
"sdot z31.s, z0.b, z2.b[2]\n"
- "ld1b { z0.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z1.b, z7.b[3]\n"
"sdot z12.s, z1.b, z6.b[3]\n"
"sdot z16.s, z1.b, z5.b[3]\n"
"sdot z20.s, z1.b, z4.b[3]\n"
"sdot z24.s, z1.b, z3.b[3]\n"
"sdot z28.s, z1.b, z2.b[3]\n"
- "ld1b { z1.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-2, MUL VL]\n"
"sdot z9.s, z0.b, z7.b[3]\n"
"sdot z13.s, z0.b, z6.b[3]\n"
"sdot z17.s, z0.b, z5.b[3]\n"
"sdot z21.s, z0.b, z4.b[3]\n"
"sdot z25.s, z0.b, z3.b[3]\n"
"sdot z29.s, z0.b, z2.b[3]\n"
- "ld1b { z0.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-1, MUL VL]\n"
"sdot z10.s, z1.b, z7.b[3]\n"
"sdot z14.s, z1.b, z6.b[3]\n"
"sdot z18.s, z1.b, z5.b[3]\n"
@@ -2161,30 +2162,30 @@ void sve_hybrid_s8qs_dot_6x4VL (
"bgt 72b\n"
"73:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z7.b }, p2/Z, [x10]\n"
+ "ld1b { z6.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
"ld1rqb { z4.b }, p0/Z, [x22]\n"
"ld1rqb { z5.b }, p0/Z, [x21]\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
"sdot z8.s, z7.b, z0.b[0]\n"
"sdot z12.s, z7.b, z1.b[0]\n"
+ "sdot z9.s, z6.b, z0.b[0]\n"
+ "sdot z13.s, z6.b, z1.b[0]\n"
"sdot z16.s, z7.b, z2.b[0]\n"
"sdot z20.s, z7.b, z3.b[0]\n"
"sdot z24.s, z7.b, z4.b[0]\n"
"sdot z28.s, z7.b, z5.b[0]\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sdot z9.s, z6.b, z0.b[0]\n"
- "sdot z13.s, z6.b, z1.b[0]\n"
+ "ld1b { z7.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z6.b, z2.b[0]\n"
"sdot z21.s, z6.b, z3.b[0]\n"
"sdot z25.s, z6.b, z4.b[0]\n"
"sdot z29.s, z6.b, z5.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z6.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z7.b, z0.b[0]\n"
"sdot z14.s, z7.b, z1.b[0]\n"
"sdot z18.s, z7.b, z2.b[0]\n"
@@ -2198,25 +2199,25 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z6.b, z4.b[0]\n"
"sdot z31.s, z6.b, z5.b[0]\n"
"ble 74f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x10]\n"
+ "ld1b { z6.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z7.b, z0.b[1]\n"
"sdot z12.s, z7.b, z1.b[1]\n"
"sdot z16.s, z7.b, z2.b[1]\n"
"sdot z20.s, z7.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z7.b, z4.b[1]\n"
"sdot z28.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z6.b, z1.b[1]\n"
"sdot z17.s, z6.b, z2.b[1]\n"
"sdot z21.s, z6.b, z3.b[1]\n"
"sdot z25.s, z6.b, z4.b[1]\n"
"sdot z29.s, z6.b, z5.b[1]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z6.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z7.b, z0.b[1]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z7.b, z1.b[1]\n"
"sdot z18.s, z7.b, z2.b[1]\n"
"sdot z22.s, z7.b, z3.b[1]\n"
@@ -2229,25 +2230,25 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z6.b, z4.b[1]\n"
"sdot z31.s, z6.b, z5.b[1]\n"
"ble 74f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x10]\n"
+ "ld1b { z6.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z7.b, z0.b[2]\n"
"sdot z12.s, z7.b, z1.b[2]\n"
"sdot z16.s, z7.b, z2.b[2]\n"
"sdot z20.s, z7.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z7.b, z4.b[2]\n"
"sdot z28.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
"sdot z9.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z6.b, z1.b[2]\n"
"sdot z17.s, z6.b, z2.b[2]\n"
"sdot z21.s, z6.b, z3.b[2]\n"
"sdot z25.s, z6.b, z4.b[2]\n"
"sdot z29.s, z6.b, z5.b[2]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z6.b }, p2/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z7.b, z0.b[2]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z7.b, z1.b[2]\n"
"sdot z18.s, z7.b, z2.b[2]\n"
"sdot z22.s, z7.b, z3.b[2]\n"
@@ -2260,23 +2261,23 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sdot z27.s, z6.b, z4.b[2]\n"
"sdot z31.s, z6.b, z5.b[2]\n"
"ble 74f\n"
- "ld1b { z7.b }, p2/Z, [x9]\n"
- "ld1b { z6.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x10]\n"
+ "ld1b { z6.b }, p2/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z7.b, z0.b[3]\n"
"sdot z12.s, z7.b, z1.b[3]\n"
"sdot z16.s, z7.b, z2.b[3]\n"
"sdot z20.s, z7.b, z3.b[3]\n"
"sdot z24.s, z7.b, z4.b[3]\n"
"sdot z28.s, z7.b, z5.b[3]\n"
- "ld1b { z7.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z6.b, z0.b[3]\n"
"sdot z13.s, z6.b, z1.b[3]\n"
"sdot z17.s, z6.b, z2.b[3]\n"
"sdot z21.s, z6.b, z3.b[3]\n"
"sdot z25.s, z6.b, z4.b[3]\n"
"sdot z29.s, z6.b, z5.b[3]\n"
- "ld1b { z6.b }, p2/Z, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
+ "ld1b { z6.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z7.b, z0.b[3]\n"
"sdot z14.s, z7.b, z1.b[3]\n"
"sdot z18.s, z7.b, z2.b[3]\n"
@@ -2295,21 +2296,21 @@ void sve_hybrid_s8qs_dot_6x4VL (
"cmp x28, x20\n"
"bne 69b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x26, x11, x20\n"
- "add x25, x26, x20\n"
"ld1w { z3.s }, p2/Z, [x14]\n"
"ld1w { z2.s }, p2/Z, [x14, #1, MUL VL]\n"
"ld1w { z1.s }, p2/Z, [x14, #2, MUL VL]\n"
- "add x24, x25, x20\n"
- "add x23, x24, x20\n"
"ld1w { z0.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x22, x23, x20\n"
+ "addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
+ "add x25, x26, x20\n"
"add z8.s, z8.s, z3.s\n"
+ "add z12.s, z12.s, z3.s\n"
+ "add x24, x25, x20\n"
+ "add x23, x24, x20\n"
"add z9.s, z9.s, z2.s\n"
"add z10.s, z10.s, z1.s\n"
+ "add x22, x23, x20\n"
"add z11.s, z11.s, z0.s\n"
- "addvl x14, x14, #4\n"
- "add z12.s, z12.s, z3.s\n"
"add z13.s, z13.s, z2.s\n"
"add z14.s, z14.s, z1.s\n"
"add z15.s, z15.s, z0.s\n"
@@ -2387,11 +2388,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z8.s, z8.s, z7.s\n"
+ "and z7.d, z12.d, z0.d\n"
"sqadd z9.s, z9.s, z6.s\n"
+ "and z6.d, z13.d, z1.d\n"
"sqadd z10.s, z10.s, z5.s\n"
"sqadd z11.s, z11.s, z4.s\n"
- "and z7.d, z12.d, z0.d\n"
- "and z6.d, z13.d, z1.d\n"
"and z5.d, z14.d, z2.d\n"
"and z4.d, z15.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2399,11 +2400,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z12.s, z12.s, z7.s\n"
+ "and z7.d, z16.d, z0.d\n"
"sqadd z13.s, z13.s, z6.s\n"
+ "and z6.d, z17.d, z1.d\n"
"sqadd z14.s, z14.s, z5.s\n"
"sqadd z15.s, z15.s, z4.s\n"
- "and z7.d, z16.d, z0.d\n"
- "and z6.d, z17.d, z1.d\n"
"and z5.d, z18.d, z2.d\n"
"and z4.d, z19.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2411,11 +2412,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z16.s, z16.s, z7.s\n"
+ "and z7.d, z20.d, z0.d\n"
"sqadd z17.s, z17.s, z6.s\n"
+ "and z6.d, z21.d, z1.d\n"
"sqadd z18.s, z18.s, z5.s\n"
"sqadd z19.s, z19.s, z4.s\n"
- "and z7.d, z20.d, z0.d\n"
- "and z6.d, z21.d, z1.d\n"
"and z5.d, z22.d, z2.d\n"
"and z4.d, z23.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2423,11 +2424,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z20.s, z20.s, z7.s\n"
+ "and z7.d, z24.d, z0.d\n"
"sqadd z21.s, z21.s, z6.s\n"
+ "and z6.d, z25.d, z1.d\n"
"sqadd z22.s, z22.s, z5.s\n"
"sqadd z23.s, z23.s, z4.s\n"
- "and z7.d, z24.d, z0.d\n"
- "and z6.d, z25.d, z1.d\n"
"and z5.d, z26.d, z2.d\n"
"and z4.d, z27.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2435,11 +2436,11 @@ void sve_hybrid_s8qs_dot_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z24.s, z24.s, z7.s\n"
+ "and z7.d, z28.d, z0.d\n"
"sqadd z25.s, z25.s, z6.s\n"
+ "and z6.d, z29.d, z1.d\n"
"sqadd z26.s, z26.s, z5.s\n"
"sqadd z27.s, z27.s, z4.s\n"
- "and z7.d, z28.d, z0.d\n"
- "and z6.d, z29.d, z1.d\n"
"and z5.d, z30.d, z2.d\n"
"and z4.d, z31.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2452,59 +2453,59 @@ void sve_hybrid_s8qs_dot_6x4VL (
"sqadd z31.s, z31.s, z4.s\n"
"77:" // Height 6: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z8.s, z8.s, z4.s\n"
+ "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880c // srshl z12.s, p2/M, z12.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z12.s, z12.s, z4.s\n"
".inst 0x4482882d // srshl z13.s, p2/M, z13.s, z1.s\n"
".inst 0x4482884e // srshl z14.s, p2/M, z14.s, z2.s\n"
- "add z13.s, z13.s, z4.s\n"
- "add z14.s, z14.s, z4.s\n"
+ "add z8.s, z8.s, z4.s\n"
".inst 0x4482886f // srshl z15.s, p2/M, z15.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z15.s, z15.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add z9.s, z9.s, z4.s\n"
+ "add z10.s, z10.s, z4.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z20.s, z20.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
+ "add z14.s, z14.s, z4.s\n"
".inst 0x44828835 // srshl z21.s, p2/M, z21.s, z1.s\n"
".inst 0x44828856 // srshl z22.s, p2/M, z22.s, z2.s\n"
- "add z21.s, z21.s, z4.s\n"
- "add z22.s, z22.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
".inst 0x44828877 // srshl z23.s, p2/M, z23.s, z3.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- "add z27.s, z27.s, z4.s\n"
- "add z28.s, z28.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
+ "add z22.s, z22.s, z4.s\n"
".inst 0x4482883d // srshl z29.s, p2/M, z29.s, z1.s\n"
".inst 0x4482885e // srshl z30.s, p2/M, z30.s, z2.s\n"
- "add z29.s, z29.s, z4.s\n"
- "add z30.s, z30.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
".inst 0x4482887f // srshl z31.s, p2/M, z31.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z25.s, z25.s, z4.s\n"
+ "add z26.s, z26.s, z4.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
- "add z31.s, z31.s, z4.s\n"
+ "add z27.s, z27.s, z4.s\n"
+ "add z28.s, z28.s, z4.s\n"
"add x20, %x[qp], %[minval]\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add z29.s, z29.s, z4.s\n"
+ "add z30.s, z30.s, z4.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
+ "add z31.s, z31.s, z4.s\n"
"smin z8.s, p2/M, z8.s, z0.s\n"
"smin z9.s, p2/M, z9.s, z0.s\n"
"smin z10.s, p2/M, z10.s, z0.s\n"
@@ -2529,58 +2530,58 @@ void sve_hybrid_s8qs_dot_6x4VL (
"smin z29.s, p2/M, z29.s, z0.s\n"
"smin z30.s, p2/M, z30.s, z0.s\n"
"smin z31.s, p2/M, z31.s, z0.s\n"
- "smax z8.s, p2/M, z8.s, z1.s\n"
- "smax z9.s, p2/M, z9.s, z1.s\n"
- "smax z10.s, p2/M, z10.s, z1.s\n"
+ "smax z8.s, p2/M, z8.s, z2.s\n"
+ "smax z9.s, p2/M, z9.s, z2.s\n"
+ "smax z10.s, p2/M, z10.s, z2.s\n"
+ "smax z11.s, p2/M, z11.s, z2.s\n"
+ "smax z12.s, p2/M, z12.s, z2.s\n"
+ "smax z13.s, p2/M, z13.s, z2.s\n"
+ "smax z14.s, p2/M, z14.s, z2.s\n"
+ "smax z15.s, p2/M, z15.s, z2.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "smax z11.s, p2/M, z11.s, z1.s\n"
- "smax z12.s, p2/M, z12.s, z1.s\n"
- "uzp1 z0.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z0.b\n"
- "smax z13.s, p2/M, z13.s, z1.s\n"
- "smax z14.s, p2/M, z14.s, z1.s\n"
+ "smax z16.s, p2/M, z16.s, z2.s\n"
+ "smax z17.s, p2/M, z17.s, z2.s\n"
+ "uzp1 z1.h, z10.h, z11.h\n"
+ "smax z18.s, p2/M, z18.s, z2.s\n"
+ "smax z19.s, p2/M, z19.s, z2.s\n"
"uzp1 z12.h, z12.h, z13.h\n"
- "st1b { z8.b }, p1, [x11]\n"
- "smax z15.s, p2/M, z15.s, z1.s\n"
- "smax z16.s, p2/M, z16.s, z1.s\n"
+ "smax z20.s, p2/M, z20.s, z2.s\n"
+ "smax z21.s, p2/M, z21.s, z2.s\n"
"uzp1 z0.h, z14.h, z15.h\n"
- "uzp1 z12.b, z12.b, z0.b\n"
- "smax z17.s, p2/M, z17.s, z1.s\n"
- "smax z18.s, p2/M, z18.s, z1.s\n"
+ "smax z22.s, p2/M, z22.s, z2.s\n"
+ "smax z23.s, p2/M, z23.s, z2.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z12.b }, p1, [x26]\n"
- "smax z19.s, p2/M, z19.s, z1.s\n"
- "smax z20.s, p2/M, z20.s, z1.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
- "smax z21.s, p2/M, z21.s, z1.s\n"
- "smax z22.s, p2/M, z22.s, z1.s\n"
+ "uzp1 z8.b, z8.b, z1.b\n"
+ "smax z24.s, p2/M, z24.s, z2.s\n"
+ "smax z25.s, p2/M, z25.s, z2.s\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
+ "smax z26.s, p2/M, z26.s, z2.s\n"
+ "smax z27.s, p2/M, z27.s, z2.s\n"
"uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x25]\n"
- "smax z23.s, p2/M, z23.s, z1.s\n"
- "smax z24.s, p2/M, z24.s, z1.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
- "smax z25.s, p2/M, z25.s, z1.s\n"
- "smax z26.s, p2/M, z26.s, z1.s\n"
+ "uzp1 z12.b, z12.b, z0.b\n"
+ "smax z28.s, p2/M, z28.s, z2.s\n"
+ "smax z29.s, p2/M, z29.s, z2.s\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
+ "st1b { z8.b }, p1, [x9]\n"
+ "smax z30.s, p2/M, z30.s, z2.s\n"
+ "smax z31.s, p2/M, z31.s, z2.s\n"
"uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x24]\n"
- "smax z27.s, p2/M, z27.s, z1.s\n"
- "smax z28.s, p2/M, z28.s, z1.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
- "smax z29.s, p2/M, z29.s, z1.s\n"
- "smax z30.s, p2/M, z30.s, z1.s\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z18.h, z26.h, z27.h\n"
+ "st1b { z12.b }, p1, [x26]\n"
+ "addvl x9, x9, #1\n"
"uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
+ "uzp1 z17.h, z30.h, z31.h\n"
+ "st1b { z16.b }, p1, [x25]\n"
+ "uzp1 z24.b, z24.b, z18.b\n"
+ "uzp1 z28.b, z28.b, z17.b\n"
+ "st1b { z20.b }, p1, [x24]\n"
"st1b { z24.b }, p1, [x23]\n"
- "smax z31.s, p2/M, z31.s, z1.s\n"
- "uzp1 z16.h, z30.h, z31.h\n"
- "uzp1 z28.b, z28.b, z16.b\n"
"st1b { z28.b }, p1, [x22]\n"
- "addvl x11, x11, #1\n"
"78:" // Height 6: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
@@ -2594,8 +2595,8 @@ void sve_hybrid_s8qs_dot_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL.hpp
index b1b1135c73..73e4bd32b9 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 8, 8> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 8, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
index cd5f85411c..759e3e2f3d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -47,18 +47,18 @@ void sve_hybrid_s8qs_mmla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -98,26 +98,26 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"3:" // Height 1: setup done
"mov x28, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -133,87 +133,87 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ble 8f\n"
"7:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z20.b }, p0/Z, [x26]\n"
- "trn1 z18.d, z20.d, z19.d\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z19.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "trn1 z18.d, z19.d, z22.d\n"
+ "trn2 z19.d, z19.d, z22.d\n"
".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-7, MUL VL]\n"
- ".inst 0x45119a88 // smmla z8.s, z20.b, z17.b\n"
- ".inst 0x45109a8c // smmla z12.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-5, MUL VL]\n"
- ".inst 0x45119a89 // smmla z9.s, z20.b, z17.b\n"
- ".inst 0x45109a8d // smmla z13.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-3, MUL VL]\n"
- ".inst 0x45119a8a // smmla z10.s, z20.b, z17.b\n"
- ".inst 0x45109a8e // smmla z14.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
- ".inst 0x45119a8b // smmla z11.s, z20.b, z17.b\n"
- ".inst 0x45109a8f // smmla z15.s, z20.b, z16.b\n"
- "add x26, x26, #0x10\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45119a68 // smmla z8.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45109a6c // smmla z12.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45119a69 // smmla z9.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45109a6d // smmla z13.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45119a6a // smmla z10.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45109a6e // smmla z14.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x45119a6b // smmla z11.s, z19.b, z17.b\n"
+ ".inst 0x45109a6f // smmla z15.s, z19.b, z16.b\n"
"bgt 7b\n"
"8:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
"trn1 z18.d, z1.d, z19.d\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
+ "ld1b { z7.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
- ".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
+ ".inst 0x45079a49 // smmla z9.s, z18.b, z7.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
+ "ld1b { z26.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z19.d\n"
- ".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x451a9a4b // smmla z11.s, z18.b, z26.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
- "addvl x9, x9, #8\n"
"ble 9f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x45119828 // smmla z8.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x4510982c // smmla z12.s, z1.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45119829 // smmla z9.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x4510982d // smmla z13.s, z1.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x4511982a // smmla z10.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x4510982e // smmla z14.s, z1.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4511982b // smmla z11.s, z1.b, z17.b\n"
".inst 0x4510982f // smmla z15.s, z1.b, z16.b\n"
- "addvl x9, x9, #8\n"
"9:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -227,10 +227,10 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"uzp1 z11.d, z11.d, z15.d\n"
"ld1w { z17.s }, p2/Z, [x14, #2, MUL VL]\n"
"ld1w { z16.s }, p2/Z, [x14, #3, MUL VL]\n"
- "mov z15.d, z8.d\n"
- "add z15.s, z15.s, z19.s\n"
"addvl x14, x14, #4\n"
+ "mov z15.d, z8.d\n"
"add z9.s, z9.s, z18.s\n"
+ "add z15.s, z15.s, z19.s\n"
"add z10.s, z10.s, z17.s\n"
"add z11.s, z11.s, z16.s\n"
"tbz %x[flags], #4, 10f\n"
@@ -276,61 +276,61 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"sqadd z11.s, z11.s, z16.s\n"
"12:" // Height 1: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z15.s, z15.s, z17.s\n"
+ "ld1rw { z17.s }, p2/Z, [x20]\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z17.s\n"
- "add z10.s, z10.s, z17.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
- "add z11.s, z11.s, z17.s\n"
+ "add z15.s, z15.s, z17.s\n"
"add x20, %x[qp], %[minval]\n"
- "ld1rw { z31.s }, p2/Z, [x20]\n"
+ "add z9.s, z9.s, z17.s\n"
+ "add z10.s, z10.s, z17.s\n"
+ "ld1rw { z28.s }, p2/Z, [x20]\n"
+ "add z11.s, z11.s, z17.s\n"
"smin z15.s, p2/M, z15.s, z16.s\n"
"smin z9.s, p2/M, z9.s, z16.s\n"
"smin z10.s, p2/M, z10.s, z16.s\n"
"smin z11.s, p2/M, z11.s, z16.s\n"
- "smax z15.s, p2/M, z15.s, z31.s\n"
- "smax z9.s, p2/M, z9.s, z31.s\n"
- "smax z10.s, p2/M, z10.s, z31.s\n"
+ "smax z15.s, p2/M, z15.s, z28.s\n"
+ "smax z9.s, p2/M, z9.s, z28.s\n"
+ "smax z10.s, p2/M, z10.s, z28.s\n"
+ "smax z11.s, p2/M, z11.s, z28.s\n"
"uzp1 z15.h, z15.h, z9.h\n"
- "smax z11.s, p2/M, z11.s, z31.s\n"
"uzp1 z16.h, z10.h, z11.h\n"
"uzp1 z15.b, z15.b, z16.b\n"
- "st1b { z15.b }, p1, [x11]\n"
- "addvl x11, x11, #1\n"
+ "st1b { z15.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"13:" // Height 1: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 2b\n"
"b 80f\n"
"14:" // Height 2
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"15:" // Height 2: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"16:" // Height 2: setup done
"mov x28, #0x0\n"
"17:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 18f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -349,90 +349,90 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ble 21f\n"
"20:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z20.b }, p0/Z, [x26]\n"
- "ld1rqb { z19.b }, p0/Z, [x25]\n"
- "trn1 z18.d, z20.d, z19.d\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z19.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z18.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z16.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "trn1 z2.d, z18.d, z16.d\n"
+ "trn2 z18.d, z18.d, z16.d\n"
+ ".inst 0x45119848 // smmla z8.s, z2.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4513984c // smmla z12.s, z2.b, z19.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45119849 // smmla z9.s, z2.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4510984d // smmla z13.s, z2.b, z16.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4511984a // smmla z10.s, z2.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4510984e // smmla z14.s, z2.b, z16.b\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x4511984b // smmla z11.s, z2.b, z17.b\n"
+ ".inst 0x4510984f // smmla z15.s, z2.b, z16.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-7, MUL VL]\n"
".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-6, MUL VL]\n"
".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-5, MUL VL]\n"
".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-4, MUL VL]\n"
".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-3, MUL VL]\n"
".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #-2, MUL VL]\n"
".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
+ "ld1b { z16.b }, p2/Z, [x10, #-1, MUL VL]\n"
".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-7, MUL VL]\n"
- ".inst 0x45119a88 // smmla z8.s, z20.b, z17.b\n"
- ".inst 0x45109a8c // smmla z12.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-5, MUL VL]\n"
- ".inst 0x45119a89 // smmla z9.s, z20.b, z17.b\n"
- ".inst 0x45109a8d // smmla z13.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-3, MUL VL]\n"
- ".inst 0x45119a8a // smmla z10.s, z20.b, z17.b\n"
- ".inst 0x45109a8e // smmla z14.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #-2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
- ".inst 0x45119a8b // smmla z11.s, z20.b, z17.b\n"
- ".inst 0x45109a8f // smmla z15.s, z20.b, z16.b\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"bgt 20b\n"
"21:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
"ld1rqb { z19.b }, p0/Z, [x25]\n"
"trn1 z18.d, z1.d, z19.d\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
- "addvl x9, x9, #8\n"
"ble 22f\n"
- "ld1b { z17.b }, p2/Z, [x9]\n"
- "ld1b { z16.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z17.b }, p2/Z, [x10]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x45119828 // smmla z8.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x4510982c // smmla z12.s, z1.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45119829 // smmla z9.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x4510982d // smmla z13.s, z1.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x4511982a // smmla z10.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x4510982e // smmla z14.s, z1.b, z16.b\n"
- "ld1b { z17.b }, p2/Z, [x9, #6, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z16.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4511982b // smmla z11.s, z1.b, z17.b\n"
".inst 0x4510982f // smmla z15.s, z1.b, z16.b\n"
- "addvl x9, x9, #8\n"
"22:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -449,16 +449,16 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"ld1w { z16.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x26, x11, x20\n"
+ "addvl x14, x14, #4\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
"mov z15.d, z20.d\n"
- "add z15.s, z15.s, z19.s\n"
"add z12.s, z12.s, z18.s\n"
"add z13.s, z13.s, z17.s\n"
- "add z14.s, z14.s, z16.s\n"
"add z8.s, z8.s, z19.s\n"
+ "add z15.s, z15.s, z19.s\n"
+ "add z14.s, z14.s, z16.s\n"
"add z9.s, z9.s, z18.s\n"
"add z10.s, z10.s, z17.s\n"
"add z11.s, z11.s, z16.s\n"
@@ -495,99 +495,99 @@ void sve_hybrid_s8qs_mmla_6x4VL (
".inst 0x04a6754a // sqrdmulh z10.s, z10.s, z6.s\n"
".inst 0x04a7756b // sqrdmulh z11.s, z11.s, z7.s\n"
"tbz %x[flags], #5, 25f\n"
- "and z19.d, z15.d, z0.d\n"
- "and z18.d, z12.d, z1.d\n"
+ "and z18.d, z15.d, z0.d\n"
+ "and z19.d, z12.d, z1.d\n"
"and z17.d, z13.d, z2.d\n"
"and z16.d, z14.d, z3.d\n"
- "asr z19.s, z19.s, #0x1f\n"
"asr z18.s, z18.s, #0x1f\n"
+ "asr z19.s, z19.s, #0x1f\n"
"asr z17.s, z17.s, #0x1f\n"
"asr z16.s, z16.s, #0x1f\n"
- "sqadd z15.s, z15.s, z19.s\n"
- "sqadd z12.s, z12.s, z18.s\n"
+ "sqadd z15.s, z15.s, z18.s\n"
+ "and z18.d, z8.d, z0.d\n"
+ "sqadd z12.s, z12.s, z19.s\n"
+ "and z19.d, z9.d, z1.d\n"
"sqadd z13.s, z13.s, z17.s\n"
"sqadd z14.s, z14.s, z16.s\n"
- "and z18.d, z8.d, z0.d\n"
- "and z24.d, z9.d, z1.d\n"
"and z17.d, z10.d, z2.d\n"
"and z16.d, z11.d, z3.d\n"
"asr z18.s, z18.s, #0x1f\n"
- "asr z24.s, z24.s, #0x1f\n"
+ "asr z19.s, z19.s, #0x1f\n"
"asr z17.s, z17.s, #0x1f\n"
"asr z16.s, z16.s, #0x1f\n"
"sqadd z8.s, z8.s, z18.s\n"
- "sqadd z9.s, z9.s, z24.s\n"
+ "sqadd z9.s, z9.s, z19.s\n"
"sqadd z10.s, z10.s, z17.s\n"
"sqadd z11.s, z11.s, z16.s\n"
"25:" // Height 2: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z17.s }, p2/Z, [x20]\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z15.s, z15.s, z17.s\n"
+ "ld1rw { z18.s }, p2/Z, [x20]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z17.s\n"
- "add z13.s, z13.s, z17.s\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z14.s, z14.s, z17.s\n"
- "add z8.s, z8.s, z17.s\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z17.s\n"
- "add z10.s, z10.s, z17.s\n"
+ "add z15.s, z15.s, z18.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
- "add z11.s, z11.s, z17.s\n"
- "add x20, %x[qp], %[minval]\n"
+ "add z12.s, z12.s, z18.s\n"
+ "add z13.s, z13.s, z18.s\n"
"ld1rw { z17.s }, p2/Z, [x20]\n"
- "smin z15.s, p2/M, z15.s, z16.s\n"
- "smin z12.s, p2/M, z12.s, z16.s\n"
- "smin z13.s, p2/M, z13.s, z16.s\n"
- "smin z14.s, p2/M, z14.s, z16.s\n"
- "smin z8.s, p2/M, z8.s, z16.s\n"
- "smin z9.s, p2/M, z9.s, z16.s\n"
- "smin z10.s, p2/M, z10.s, z16.s\n"
- "smin z11.s, p2/M, z11.s, z16.s\n"
- "smax z15.s, p2/M, z15.s, z17.s\n"
- "smax z12.s, p2/M, z12.s, z17.s\n"
- "smax z13.s, p2/M, z13.s, z17.s\n"
+ "add z14.s, z14.s, z18.s\n"
+ "add z8.s, z8.s, z18.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z9.s, z9.s, z18.s\n"
+ "add z10.s, z10.s, z18.s\n"
+ "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "add z11.s, z11.s, z18.s\n"
+ "smin z15.s, p2/M, z15.s, z17.s\n"
+ "smin z12.s, p2/M, z12.s, z17.s\n"
+ "smin z13.s, p2/M, z13.s, z17.s\n"
+ "smin z14.s, p2/M, z14.s, z17.s\n"
+ "smin z8.s, p2/M, z8.s, z17.s\n"
+ "smin z9.s, p2/M, z9.s, z17.s\n"
+ "smin z10.s, p2/M, z10.s, z17.s\n"
+ "smin z11.s, p2/M, z11.s, z17.s\n"
+ "smax z15.s, p2/M, z15.s, z16.s\n"
+ "smax z12.s, p2/M, z12.s, z16.s\n"
+ "smax z13.s, p2/M, z13.s, z16.s\n"
+ "smax z14.s, p2/M, z14.s, z16.s\n"
+ "smax z8.s, p2/M, z8.s, z16.s\n"
+ "smax z9.s, p2/M, z9.s, z16.s\n"
+ "smax z10.s, p2/M, z10.s, z16.s\n"
+ "smax z11.s, p2/M, z11.s, z16.s\n"
"uzp1 z15.h, z15.h, z12.h\n"
- "smax z14.s, p2/M, z14.s, z17.s\n"
- "smax z8.s, p2/M, z8.s, z17.s\n"
- "uzp1 z16.h, z13.h, z14.h\n"
- "uzp1 z15.b, z15.b, z16.b\n"
- "smax z9.s, p2/M, z9.s, z17.s\n"
- "smax z10.s, p2/M, z10.s, z17.s\n"
+ "uzp1 z17.h, z13.h, z14.h\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z15.b }, p1, [x11]\n"
- "smax z11.s, p2/M, z11.s, z17.s\n"
"uzp1 z16.h, z10.h, z11.h\n"
+ "uzp1 z15.b, z15.b, z17.b\n"
"uzp1 z8.b, z8.b, z16.b\n"
+ "st1b { z15.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"st1b { z8.b }, p1, [x26]\n"
- "addvl x11, x11, #1\n"
"26:" // Height 2: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 15b\n"
"b 80f\n"
"27:" // Height 3
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"28:" // Height 3: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -602,8 +602,8 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov x28, #0x0\n"
"30:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 31f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -625,125 +625,125 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ble 34f\n"
"33:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z28.b }, p2/Z, [x10]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z30.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z24.b }, p0/Z, [x25]\n"
- "ld1rqb { z28.b }, p0/Z, [x24]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqb { z29.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"trn1 z27.d, z30.d, z24.d\n"
"trn2 z30.d, z30.d, z24.d\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "trn1 z26.d, z28.d, z29.d\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45199b68 // smmla z8.s, z27.b, z25.b\n"
- ".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
- ".inst 0x45189b6c // smmla z12.s, z27.b, z24.b\n"
- ".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
- ".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
- ".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z29.d\n"
- ".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
- ".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "trn1 z26.d, z29.d, z31.d\n"
+ "trn2 z29.d, z29.d, z31.d\n"
+ ".inst 0x451c9b68 // smmla z8.s, z27.b, z28.b\n"
+ ".inst 0x45199b6c // smmla z12.s, z27.b, z25.b\n"
+ ".inst 0x451c9b50 // smmla z16.s, z26.b, z28.b\n"
+ "ld1b { z4.b }, p2/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45199b54 // smmla z20.s, z26.b, z25.b\n"
+ "ld1b { z28.b }, p2/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45049b69 // smmla z9.s, z27.b, z4.b\n"
+ ".inst 0x45049b51 // smmla z17.s, z26.b, z4.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x451c9b6d // smmla z13.s, z27.b, z28.b\n"
+ ".inst 0x451c9b55 // smmla z21.s, z26.b, z28.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z25.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x45189b6e // smmla z14.s, z27.b, z24.b\n"
".inst 0x45189b56 // smmla z22.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
+ "ld1b { z24.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
".inst 0x45199b6b // smmla z11.s, z27.b, z25.b\n"
".inst 0x45199b53 // smmla z19.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x45189b6f // smmla z15.s, z27.b, z24.b\n"
".inst 0x45189b57 // smmla z23.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-7, MUL VL]\n"
".inst 0x45199bc8 // smmla z8.s, z30.b, z25.b\n"
- ".inst 0x45199b90 // smmla z16.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ ".inst 0x45199bb0 // smmla z16.s, z29.b, z25.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-6, MUL VL]\n"
".inst 0x45189bcc // smmla z12.s, z30.b, z24.b\n"
- ".inst 0x45189b94 // smmla z20.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ ".inst 0x45189bb4 // smmla z20.s, z29.b, z24.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-5, MUL VL]\n"
".inst 0x45199bc9 // smmla z9.s, z30.b, z25.b\n"
- ".inst 0x45199b91 // smmla z17.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ ".inst 0x45199bb1 // smmla z17.s, z29.b, z25.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-4, MUL VL]\n"
".inst 0x45189bcd // smmla z13.s, z30.b, z24.b\n"
- ".inst 0x45189b95 // smmla z21.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ ".inst 0x45189bb5 // smmla z21.s, z29.b, z24.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-3, MUL VL]\n"
".inst 0x45199bca // smmla z10.s, z30.b, z25.b\n"
- ".inst 0x45199b92 // smmla z18.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ ".inst 0x45199bb2 // smmla z18.s, z29.b, z25.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-2, MUL VL]\n"
".inst 0x45189bce // smmla z14.s, z30.b, z24.b\n"
- ".inst 0x45189b96 // smmla z22.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ ".inst 0x45189bb6 // smmla z22.s, z29.b, z24.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-1, MUL VL]\n"
".inst 0x45199bcb // smmla z11.s, z30.b, z25.b\n"
- ".inst 0x45199b93 // smmla z19.s, z28.b, z25.b\n"
+ ".inst 0x45199bb3 // smmla z19.s, z29.b, z25.b\n"
".inst 0x45189bcf // smmla z15.s, z30.b, z24.b\n"
- ".inst 0x45189b97 // smmla z23.s, z28.b, z24.b\n"
+ ".inst 0x45189bb7 // smmla z23.s, z29.b, z24.b\n"
"bgt 33b\n"
"34:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
"ld1rqb { z24.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
"trn1 z27.d, z1.d, z24.d\n"
"trn2 z1.d, z1.d, z24.d\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "trn1 z26.d, z3.d, z28.d\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "trn1 z26.d, z3.d, z29.d\n"
".inst 0x45199b68 // smmla z8.s, z27.b, z25.b\n"
+ ".inst 0x451c9b6c // smmla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z29.d\n"
".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
- ".inst 0x45189b6c // smmla z12.s, z27.b, z24.b\n"
- ".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x451c9b54 // smmla z20.s, z26.b, z28.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z25.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z28.d\n"
+ "ld1b { z24.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x45189b6e // smmla z14.s, z27.b, z24.b\n"
".inst 0x45189b56 // smmla z22.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z24.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x45199b6b // smmla z11.s, z27.b, z25.b\n"
".inst 0x45199b53 // smmla z19.s, z26.b, z25.b\n"
".inst 0x45189b6f // smmla z15.s, z27.b, z24.b\n"
".inst 0x45189b57 // smmla z23.s, z26.b, z24.b\n"
"ble 35f\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x45199828 // smmla z8.s, z1.b, z25.b\n"
".inst 0x45199870 // smmla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x4518982c // smmla z12.s, z1.b, z24.b\n"
".inst 0x45189874 // smmla z20.s, z3.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45199829 // smmla z9.s, z1.b, z25.b\n"
".inst 0x45199871 // smmla z17.s, z3.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x4518982d // smmla z13.s, z1.b, z24.b\n"
".inst 0x45189875 // smmla z21.s, z3.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x4519982a // smmla z10.s, z1.b, z25.b\n"
".inst 0x45199872 // smmla z18.s, z3.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x4518982e // smmla z14.s, z1.b, z24.b\n"
".inst 0x45189876 // smmla z22.s, z3.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z24.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4519982b // smmla z11.s, z1.b, z25.b\n"
".inst 0x45199873 // smmla z19.s, z3.b, z25.b\n"
".inst 0x4518982f // smmla z15.s, z1.b, z24.b\n"
@@ -764,20 +764,20 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"ld1w { z24.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x26, x11, x20\n"
+ "addvl x14, x14, #4\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x25, x26, x20\n"
- "addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
"uzp1 z16.d, z16.d, z20.d\n"
"uzp1 z17.d, z17.d, z21.d\n"
+ "add x25, x26, x20\n"
"uzp1 z18.d, z18.d, z22.d\n"
"uzp1 z19.d, z19.d, z23.d\n"
"mov z23.d, z28.d\n"
- "add z23.s, z23.s, z27.s\n"
"add z12.s, z12.s, z26.s\n"
"add z13.s, z13.s, z25.s\n"
"add z14.s, z14.s, z24.s\n"
+ "add z23.s, z23.s, z27.s\n"
"add z8.s, z8.s, z27.s\n"
"add z9.s, z9.s, z26.s\n"
"add z10.s, z10.s, z25.s\n"
@@ -832,11 +832,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z21.s, z21.s, #0x1f\n"
"asr z20.s, z20.s, #0x1f\n"
"sqadd z23.s, z23.s, z24.s\n"
+ "and z24.d, z8.d, z0.d\n"
"sqadd z12.s, z12.s, z22.s\n"
+ "and z22.d, z9.d, z1.d\n"
"sqadd z13.s, z13.s, z21.s\n"
"sqadd z14.s, z14.s, z20.s\n"
- "and z24.d, z8.d, z0.d\n"
- "and z22.d, z9.d, z1.d\n"
"and z21.d, z10.d, z2.d\n"
"and z20.d, z11.d, z3.d\n"
"asr z24.s, z24.s, #0x1f\n"
@@ -844,11 +844,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z21.s, z21.s, #0x1f\n"
"asr z20.s, z20.s, #0x1f\n"
"sqadd z8.s, z8.s, z24.s\n"
+ "and z24.d, z16.d, z0.d\n"
"sqadd z9.s, z9.s, z22.s\n"
+ "and z22.d, z17.d, z1.d\n"
"sqadd z10.s, z10.s, z21.s\n"
"sqadd z11.s, z11.s, z20.s\n"
- "and z24.d, z16.d, z0.d\n"
- "and z22.d, z17.d, z1.d\n"
"and z21.d, z18.d, z2.d\n"
"and z20.d, z19.d, z3.d\n"
"asr z24.s, z24.s, #0x1f\n"
@@ -861,93 +861,93 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"sqadd z19.s, z19.s, z20.s\n"
"38:" // Height 3: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z21.s\n"
+ "ld1rw { z22.s }, p2/Z, [x20]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z21.s\n"
- "add z13.s, z13.s, z21.s\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z14.s, z14.s, z21.s\n"
- "add z8.s, z8.s, z21.s\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z21.s\n"
- "add z10.s, z10.s, z21.s\n"
+ "add z23.s, z23.s, z22.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z11.s, z11.s, z21.s\n"
- "add z16.s, z16.s, z21.s\n"
+ "add z12.s, z12.s, z22.s\n"
+ "add z13.s, z13.s, z22.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z21.s\n"
- "add z18.s, z18.s, z21.s\n"
+ "add z14.s, z14.s, z22.s\n"
+ "add z8.s, z8.s, z22.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
- "ld1rw { z20.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z21.s\n"
- "add x20, %x[qp], %[minval]\n"
+ "add z9.s, z9.s, z22.s\n"
+ "add z10.s, z10.s, z22.s\n"
"ld1rw { z21.s }, p2/Z, [x20]\n"
- "smin z23.s, p2/M, z23.s, z20.s\n"
- "smin z12.s, p2/M, z12.s, z20.s\n"
- "smin z13.s, p2/M, z13.s, z20.s\n"
- "smin z14.s, p2/M, z14.s, z20.s\n"
- "smin z8.s, p2/M, z8.s, z20.s\n"
- "smin z9.s, p2/M, z9.s, z20.s\n"
- "smin z10.s, p2/M, z10.s, z20.s\n"
- "smin z11.s, p2/M, z11.s, z20.s\n"
- "smin z16.s, p2/M, z16.s, z20.s\n"
- "smin z17.s, p2/M, z17.s, z20.s\n"
- "smin z18.s, p2/M, z18.s, z20.s\n"
- "smin z19.s, p2/M, z19.s, z20.s\n"
- "smax z23.s, p2/M, z23.s, z21.s\n"
- "smax z12.s, p2/M, z12.s, z21.s\n"
- "smax z13.s, p2/M, z13.s, z21.s\n"
+ "add z11.s, z11.s, z22.s\n"
+ "add z16.s, z16.s, z22.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z22.s\n"
+ "add z18.s, z18.s, z22.s\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z22.s\n"
+ "smin z23.s, p2/M, z23.s, z21.s\n"
+ "smin z12.s, p2/M, z12.s, z21.s\n"
+ "smin z13.s, p2/M, z13.s, z21.s\n"
+ "smin z14.s, p2/M, z14.s, z21.s\n"
+ "smin z8.s, p2/M, z8.s, z21.s\n"
+ "smin z9.s, p2/M, z9.s, z21.s\n"
+ "smin z10.s, p2/M, z10.s, z21.s\n"
+ "smin z11.s, p2/M, z11.s, z21.s\n"
+ "smin z16.s, p2/M, z16.s, z21.s\n"
+ "smin z17.s, p2/M, z17.s, z21.s\n"
+ "smin z18.s, p2/M, z18.s, z21.s\n"
+ "smin z19.s, p2/M, z19.s, z21.s\n"
+ "smax z23.s, p2/M, z23.s, z20.s\n"
+ "smax z12.s, p2/M, z12.s, z20.s\n"
+ "smax z13.s, p2/M, z13.s, z20.s\n"
+ "smax z14.s, p2/M, z14.s, z20.s\n"
+ "smax z8.s, p2/M, z8.s, z20.s\n"
+ "smax z9.s, p2/M, z9.s, z20.s\n"
+ "smax z10.s, p2/M, z10.s, z20.s\n"
+ "smax z11.s, p2/M, z11.s, z20.s\n"
"uzp1 z23.h, z23.h, z12.h\n"
- "smax z14.s, p2/M, z14.s, z21.s\n"
- "smax z8.s, p2/M, z8.s, z21.s\n"
- "uzp1 z20.h, z13.h, z14.h\n"
- "uzp1 z23.b, z23.b, z20.b\n"
- "smax z9.s, p2/M, z9.s, z21.s\n"
- "smax z10.s, p2/M, z10.s, z21.s\n"
+ "smax z16.s, p2/M, z16.s, z20.s\n"
+ "smax z17.s, p2/M, z17.s, z20.s\n"
+ "uzp1 z21.h, z13.h, z14.h\n"
+ "smax z18.s, p2/M, z18.s, z20.s\n"
+ "smax z19.s, p2/M, z19.s, z20.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z23.b }, p1, [x11]\n"
- "smax z11.s, p2/M, z11.s, z21.s\n"
- "smax z16.s, p2/M, z16.s, z21.s\n"
"uzp1 z20.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z20.b\n"
- "smax z17.s, p2/M, z17.s, z21.s\n"
- "smax z18.s, p2/M, z18.s, z21.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z8.b }, p1, [x26]\n"
- "smax z19.s, p2/M, z19.s, z21.s\n"
+ "uzp1 z23.b, z23.b, z21.b\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z8.b, z8.b, z20.b\n"
+ "st1b { z23.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z8.b }, p1, [x26]\n"
"st1b { z16.b }, p1, [x25]\n"
- "addvl x11, x11, #1\n"
"39:" // Height 3: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 28b\n"
"b 80f\n"
"40:" // Height 4
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"41:" // Height 4: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -962,8 +962,8 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov x28, #0x0\n"
"43:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 44f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -988,128 +988,128 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ble 47f\n"
"46:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z30.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "trn1 z29.d, z30.d, z24.d\n"
+ "ld1b { z31.b }, p2/Z, [x10]\n"
+ "ld1b { z30.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z29.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z28.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn2 z30.d, z30.d, z24.d\n"
- "trn1 z26.d, z28.d, z27.d\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45199ba8 // smmla z8.s, z29.b, z25.b\n"
- ".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
- ".inst 0x45189bac // smmla z12.s, z29.b, z24.b\n"
- ".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
- ".inst 0x45199ba9 // smmla z9.s, z29.b, z25.b\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z27.d, z29.d, z25.d\n"
+ "trn2 z29.d, z29.d, z25.d\n"
+ "trn1 z26.d, z28.d, z24.d\n"
+ "trn2 z28.d, z28.d, z24.d\n"
+ ".inst 0x451f9b68 // smmla z8.s, z27.b, z31.b\n"
+ ".inst 0x451e9b6c // smmla z12.s, z27.b, z30.b\n"
+ ".inst 0x451f9b50 // smmla z16.s, z26.b, z31.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x451e9b54 // smmla z20.s, z26.b, z30.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z27.d\n"
- ".inst 0x45189bad // smmla z13.s, z29.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- ".inst 0x45199baa // smmla z10.s, z29.b, z25.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
- ".inst 0x45189bae // smmla z14.s, z29.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45189b6e // smmla z14.s, z27.b, z24.b\n"
".inst 0x45189b56 // smmla z22.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- ".inst 0x45199bab // smmla z11.s, z29.b, z25.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x45199b6b // smmla z11.s, z27.b, z25.b\n"
".inst 0x45199b53 // smmla z19.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
- ".inst 0x45189baf // smmla z15.s, z29.b, z24.b\n"
+ ".inst 0x45189b6f // smmla z15.s, z27.b, z24.b\n"
".inst 0x45189b57 // smmla z23.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x45199bc8 // smmla z8.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45199ba8 // smmla z8.s, z29.b, z25.b\n"
".inst 0x45199b90 // smmla z16.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x45189bcc // smmla z12.s, z30.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45189bac // smmla z12.s, z29.b, z24.b\n"
".inst 0x45189b94 // smmla z20.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x45199bc9 // smmla z9.s, z30.b, z25.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45199ba9 // smmla z9.s, z29.b, z25.b\n"
".inst 0x45199b91 // smmla z17.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-4, MUL VL]\n"
- ".inst 0x45189bcd // smmla z13.s, z30.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45189bad // smmla z13.s, z29.b, z24.b\n"
".inst 0x45189b95 // smmla z21.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-3, MUL VL]\n"
- ".inst 0x45199bca // smmla z10.s, z30.b, z25.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45199baa // smmla z10.s, z29.b, z25.b\n"
".inst 0x45199b92 // smmla z18.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #-2, MUL VL]\n"
- ".inst 0x45189bce // smmla z14.s, z30.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45189bae // smmla z14.s, z29.b, z24.b\n"
".inst 0x45189b96 // smmla z22.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #-1, MUL VL]\n"
- ".inst 0x45199bcb // smmla z11.s, z30.b, z25.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x45199bab // smmla z11.s, z29.b, z25.b\n"
".inst 0x45199b93 // smmla z19.s, z28.b, z25.b\n"
- ".inst 0x45189bcf // smmla z15.s, z30.b, z24.b\n"
+ ".inst 0x45189baf // smmla z15.s, z29.b, z24.b\n"
".inst 0x45189b97 // smmla z23.s, z28.b, z24.b\n"
"bgt 46b\n"
"47:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p2/Z, [x10]\n"
+ "ld1b { z28.b }, p2/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "trn1 z28.d, z1.d, z24.d\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z24.d\n"
- "trn1 z26.d, z3.d, z27.d\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x45199b88 // smmla z8.s, z28.b, z25.b\n"
- ".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
- ".inst 0x45189b8c // smmla z12.s, z28.b, z24.b\n"
- ".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
- ".inst 0x45199b89 // smmla z9.s, z28.b, z25.b\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "trn1 z27.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ "trn1 z26.d, z3.d, z24.d\n"
+ ".inst 0x451d9b68 // smmla z8.s, z27.b, z29.b\n"
+ ".inst 0x451c9b6c // smmla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z24.d\n"
+ ".inst 0x451d9b50 // smmla z16.s, z26.b, z29.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x451c9b54 // smmla z20.s, z26.b, z28.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- ".inst 0x45189b8d // smmla z13.s, z28.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z27.d\n"
- ".inst 0x45199b8a // smmla z10.s, z28.b, z25.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #6, MUL VL]\n"
- ".inst 0x45189b8e // smmla z14.s, z28.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45189b6e // smmla z14.s, z27.b, z24.b\n"
".inst 0x45189b56 // smmla z22.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
- ".inst 0x45199b8b // smmla z11.s, z28.b, z25.b\n"
+ "ld1b { z24.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x45199b6b // smmla z11.s, z27.b, z25.b\n"
".inst 0x45199b53 // smmla z19.s, z26.b, z25.b\n"
- ".inst 0x45189b8f // smmla z15.s, z28.b, z24.b\n"
+ ".inst 0x45189b6f // smmla z15.s, z27.b, z24.b\n"
".inst 0x45189b57 // smmla z23.s, z26.b, z24.b\n"
"ble 48f\n"
- "ld1b { z25.b }, p2/Z, [x9]\n"
- "ld1b { z24.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x45199828 // smmla z8.s, z1.b, z25.b\n"
".inst 0x45199870 // smmla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x4518982c // smmla z12.s, z1.b, z24.b\n"
".inst 0x45189874 // smmla z20.s, z3.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45199829 // smmla z9.s, z1.b, z25.b\n"
".inst 0x45199871 // smmla z17.s, z3.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x4518982d // smmla z13.s, z1.b, z24.b\n"
".inst 0x45189875 // smmla z21.s, z3.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x4519982a // smmla z10.s, z1.b, z25.b\n"
".inst 0x45199872 // smmla z18.s, z3.b, z25.b\n"
- "ld1b { z25.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x4518982e // smmla z14.s, z1.b, z24.b\n"
".inst 0x45189876 // smmla z22.s, z3.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z24.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4519982b // smmla z11.s, z1.b, z25.b\n"
".inst 0x45199873 // smmla z19.s, z3.b, z25.b\n"
".inst 0x4518982f // smmla z15.s, z1.b, z24.b\n"
@@ -1130,25 +1130,25 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
"ld1w { z24.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x26, x11, x20\n"
+ "addvl x14, x14, #4\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "add x26, x9, x20\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "addvl x14, x14, #4\n"
+ "add x25, x26, x20\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "add x24, x25, x20\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"mov z23.d, z28.d\n"
- "add z23.s, z23.s, z27.s\n"
"add z12.s, z12.s, z26.s\n"
"add z13.s, z13.s, z25.s\n"
"add z14.s, z14.s, z24.s\n"
+ "add z23.s, z23.s, z27.s\n"
"add z8.s, z8.s, z27.s\n"
"add z9.s, z9.s, z26.s\n"
"add z10.s, z10.s, z25.s\n"
@@ -1211,11 +1211,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z25.s, z25.s, #0x1f\n"
"asr z24.s, z24.s, #0x1f\n"
"sqadd z23.s, z23.s, z27.s\n"
+ "and z27.d, z8.d, z0.d\n"
"sqadd z12.s, z12.s, z26.s\n"
+ "and z26.d, z9.d, z1.d\n"
"sqadd z13.s, z13.s, z25.s\n"
"sqadd z14.s, z14.s, z24.s\n"
- "and z27.d, z8.d, z0.d\n"
- "and z26.d, z9.d, z1.d\n"
"and z25.d, z10.d, z2.d\n"
"and z24.d, z11.d, z3.d\n"
"asr z27.s, z27.s, #0x1f\n"
@@ -1223,11 +1223,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z25.s, z25.s, #0x1f\n"
"asr z24.s, z24.s, #0x1f\n"
"sqadd z8.s, z8.s, z27.s\n"
+ "and z27.d, z15.d, z0.d\n"
"sqadd z9.s, z9.s, z26.s\n"
+ "and z26.d, z20.d, z1.d\n"
"sqadd z10.s, z10.s, z25.s\n"
"sqadd z11.s, z11.s, z24.s\n"
- "and z27.d, z15.d, z0.d\n"
- "and z26.d, z20.d, z1.d\n"
"and z25.d, z21.d, z2.d\n"
"and z24.d, z22.d, z3.d\n"
"asr z27.s, z27.s, #0x1f\n"
@@ -1235,11 +1235,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z25.s, z25.s, #0x1f\n"
"asr z24.s, z24.s, #0x1f\n"
"sqadd z15.s, z15.s, z27.s\n"
+ "and z27.d, z16.d, z0.d\n"
"sqadd z20.s, z20.s, z26.s\n"
+ "and z26.d, z17.d, z1.d\n"
"sqadd z21.s, z21.s, z25.s\n"
"sqadd z22.s, z22.s, z24.s\n"
- "and z27.d, z16.d, z0.d\n"
- "and z26.d, z17.d, z1.d\n"
"and z25.d, z18.d, z2.d\n"
"and z24.d, z19.d, z3.d\n"
"asr z27.s, z27.s, #0x1f\n"
@@ -1252,43 +1252,43 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"sqadd z19.s, z19.s, z24.s\n"
"51:" // Height 4: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z25.s }, p2/Z, [x20]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z25.s\n"
+ "ld1rw { z25.s }, p2/Z, [x20]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z25.s\n"
- "add z13.s, z13.s, z25.s\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z14.s, z14.s, z25.s\n"
- "add z8.s, z8.s, z25.s\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z25.s\n"
- "add z10.s, z10.s, z25.s\n"
+ "add z23.s, z23.s, z25.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z25.s\n"
- "add z15.s, z15.s, z25.s\n"
+ "add z12.s, z12.s, z25.s\n"
+ "add z13.s, z13.s, z25.s\n"
".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z25.s\n"
- "add z21.s, z21.s, z25.s\n"
+ "add z14.s, z14.s, z25.s\n"
+ "add z8.s, z8.s, z25.s\n"
".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z25.s\n"
- "add z16.s, z16.s, z25.s\n"
+ "add z9.s, z9.s, z25.s\n"
+ "add z10.s, z10.s, z25.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z25.s\n"
- "add z18.s, z18.s, z25.s\n"
+ "add z11.s, z11.s, z25.s\n"
+ "add z15.s, z15.s, z25.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z20.s, z20.s, z25.s\n"
+ "add z21.s, z21.s, z25.s\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z25.s\n"
+ "add z22.s, z22.s, z25.s\n"
+ "add z16.s, z16.s, z25.s\n"
"add x20, %x[qp], %[minval]\n"
- "ld1rw { z25.s }, p2/Z, [x20]\n"
+ "add z17.s, z17.s, z25.s\n"
+ "add z18.s, z18.s, z25.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z25.s\n"
"smin z23.s, p2/M, z23.s, z24.s\n"
"smin z12.s, p2/M, z12.s, z24.s\n"
"smin z13.s, p2/M, z13.s, z24.s\n"
@@ -1305,60 +1305,60 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"smin z17.s, p2/M, z17.s, z24.s\n"
"smin z18.s, p2/M, z18.s, z24.s\n"
"smin z19.s, p2/M, z19.s, z24.s\n"
- "smax z23.s, p2/M, z23.s, z25.s\n"
- "smax z12.s, p2/M, z12.s, z25.s\n"
- "smax z13.s, p2/M, z13.s, z25.s\n"
+ "smax z23.s, p2/M, z23.s, z26.s\n"
+ "smax z12.s, p2/M, z12.s, z26.s\n"
+ "smax z13.s, p2/M, z13.s, z26.s\n"
+ "smax z14.s, p2/M, z14.s, z26.s\n"
+ "smax z8.s, p2/M, z8.s, z26.s\n"
+ "smax z9.s, p2/M, z9.s, z26.s\n"
+ "smax z10.s, p2/M, z10.s, z26.s\n"
+ "smax z11.s, p2/M, z11.s, z26.s\n"
"uzp1 z23.h, z23.h, z12.h\n"
- "smax z14.s, p2/M, z14.s, z25.s\n"
- "smax z8.s, p2/M, z8.s, z25.s\n"
- "uzp1 z24.h, z13.h, z14.h\n"
- "uzp1 z23.b, z23.b, z24.b\n"
- "smax z9.s, p2/M, z9.s, z25.s\n"
- "smax z10.s, p2/M, z10.s, z25.s\n"
+ "smax z15.s, p2/M, z15.s, z26.s\n"
+ "smax z20.s, p2/M, z20.s, z26.s\n"
+ "uzp1 z25.h, z13.h, z14.h\n"
+ "smax z21.s, p2/M, z21.s, z26.s\n"
+ "smax z22.s, p2/M, z22.s, z26.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z23.b }, p1, [x11]\n"
- "smax z11.s, p2/M, z11.s, z25.s\n"
- "smax z15.s, p2/M, z15.s, z25.s\n"
- "uzp1 z23.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z23.b\n"
- "smax z20.s, p2/M, z20.s, z25.s\n"
- "smax z21.s, p2/M, z21.s, z25.s\n"
+ "smax z16.s, p2/M, z16.s, z26.s\n"
+ "smax z17.s, p2/M, z17.s, z26.s\n"
+ "uzp1 z24.h, z10.h, z11.h\n"
+ "smax z18.s, p2/M, z18.s, z26.s\n"
+ "smax z19.s, p2/M, z19.s, z26.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x26]\n"
- "smax z22.s, p2/M, z22.s, z25.s\n"
- "smax z16.s, p2/M, z16.s, z25.s\n"
+ "uzp1 z23.b, z23.b, z25.b\n"
"uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z15.b, z15.b, z20.b\n"
- "smax z17.s, p2/M, z17.s, z25.s\n"
- "smax z18.s, p2/M, z18.s, z25.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x25]\n"
- "smax z19.s, p2/M, z19.s, z25.s\n"
+ "uzp1 z8.b, z8.b, z24.b\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "st1b { z23.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
+ "uzp1 z15.b, z15.b, z20.b\n"
"uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z8.b }, p1, [x26]\n"
+ "st1b { z15.b }, p1, [x25]\n"
"st1b { z16.b }, p1, [x24]\n"
- "addvl x11, x11, #1\n"
"52:" // Height 4: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 41b\n"
"b 80f\n"
"53:" // Height 5
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "mov x11, %x[output_ptr]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"54:" // Height 5: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -1381,8 +1381,8 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov x28, #0x0\n"
"56:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 57f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1410,165 +1410,165 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ble 60f\n"
"59:" // Height 5: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p2/Z, [x10]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z6.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z7.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z5.d, z6.d, z1.d\n"
- "trn2 z6.d, z6.d, z1.d\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
"trn1 z3.d, z7.d, z2.d\n"
"trn2 z7.d, z7.d, z2.d\n"
- "ld1b { z1.b }, p2/Z, [x9]\n"
- "trn1 z2.d, z4.d, z0.d\n"
- "trn2 z4.d, z4.d, z0.d\n"
- "ld1b { z0.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x450198a8 // smmla z8.s, z5.b, z1.b\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p2/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45019888 // smmla z8.s, z4.b, z1.b\n"
".inst 0x45019870 // smmla z16.s, z3.b, z1.b\n"
".inst 0x45019858 // smmla z24.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- ".inst 0x450098ac // smmla z12.s, z5.b, z0.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4500988c // smmla z12.s, z4.b, z0.b\n"
".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4500985c // smmla z28.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #3, MUL VL]\n"
- ".inst 0x450198a9 // smmla z9.s, z5.b, z1.b\n"
- "add x25, x25, #0x10\n"
+ "ld1b { z0.b }, p2/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45019889 // smmla z9.s, z4.b, z1.b\n"
".inst 0x45019871 // smmla z17.s, z3.b, z1.b\n"
".inst 0x45019859 // smmla z25.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x450098ad // smmla z13.s, z5.b, z0.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4500988d // smmla z13.s, z4.b, z0.b\n"
".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4500985d // smmla z29.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #5, MUL VL]\n"
- ".inst 0x450198aa // smmla z10.s, z5.b, z1.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4501988a // smmla z10.s, z4.b, z1.b\n"
".inst 0x45019872 // smmla z18.s, z3.b, z1.b\n"
".inst 0x4501985a // smmla z26.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #6, MUL VL]\n"
- ".inst 0x450098ae // smmla z14.s, z5.b, z0.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4500988e // smmla z14.s, z4.b, z0.b\n"
".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x4500985e // smmla z30.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- ".inst 0x450198ab // smmla z11.s, z5.b, z1.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x4501988b // smmla z11.s, z4.b, z1.b\n"
".inst 0x45019873 // smmla z19.s, z3.b, z1.b\n"
".inst 0x4501985b // smmla z27.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-8, MUL VL]\n"
- ".inst 0x450098af // smmla z15.s, z5.b, z0.b\n"
+ ".inst 0x4500988f // smmla z15.s, z4.b, z0.b\n"
".inst 0x45009877 // smmla z23.s, z3.b, z0.b\n"
".inst 0x4500985f // smmla z31.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-7, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-7, MUL VL]\n"
".inst 0x450198c8 // smmla z8.s, z6.b, z1.b\n"
".inst 0x450198f0 // smmla z16.s, z7.b, z1.b\n"
- ".inst 0x45019898 // smmla z24.s, z4.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-6, MUL VL]\n"
+ ".inst 0x450198b8 // smmla z24.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-6, MUL VL]\n"
".inst 0x450098cc // smmla z12.s, z6.b, z0.b\n"
".inst 0x450098f4 // smmla z20.s, z7.b, z0.b\n"
- ".inst 0x4500989c // smmla z28.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-5, MUL VL]\n"
+ ".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-5, MUL VL]\n"
".inst 0x450198c9 // smmla z9.s, z6.b, z1.b\n"
".inst 0x450198f1 // smmla z17.s, z7.b, z1.b\n"
- ".inst 0x45019899 // smmla z25.s, z4.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-4, MUL VL]\n"
+ ".inst 0x450198b9 // smmla z25.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-4, MUL VL]\n"
".inst 0x450098cd // smmla z13.s, z6.b, z0.b\n"
".inst 0x450098f5 // smmla z21.s, z7.b, z0.b\n"
- ".inst 0x4500989d // smmla z29.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-3, MUL VL]\n"
+ ".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-3, MUL VL]\n"
".inst 0x450198ca // smmla z10.s, z6.b, z1.b\n"
".inst 0x450198f2 // smmla z18.s, z7.b, z1.b\n"
- ".inst 0x4501989a // smmla z26.s, z4.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-2, MUL VL]\n"
+ ".inst 0x450198ba // smmla z26.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-2, MUL VL]\n"
".inst 0x450098ce // smmla z14.s, z6.b, z0.b\n"
".inst 0x450098f6 // smmla z22.s, z7.b, z0.b\n"
- ".inst 0x4500989e // smmla z30.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-1, MUL VL]\n"
+ ".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-1, MUL VL]\n"
".inst 0x450198cb // smmla z11.s, z6.b, z1.b\n"
".inst 0x450198f3 // smmla z19.s, z7.b, z1.b\n"
- ".inst 0x4501989b // smmla z27.s, z4.b, z1.b\n"
+ ".inst 0x450198bb // smmla z27.s, z5.b, z1.b\n"
".inst 0x450098cf // smmla z15.s, z6.b, z0.b\n"
".inst 0x450098f7 // smmla z23.s, z7.b, z0.b\n"
- ".inst 0x4500989f // smmla z31.s, z4.b, z0.b\n"
+ ".inst 0x450098bf // smmla z31.s, z5.b, z0.b\n"
"bgt 59b\n"
"60:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p2/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z4.b }, p0/Z, [x25]\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z7.d, z1.d, z4.d\n"
- "trn2 z1.d, z1.d, z4.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
"ld1rqb { z5.b }, p0/Z, [x22]\n"
- "trn1 z6.d, z3.d, z2.d\n"
- "trn2 z3.d, z3.d, z2.d\n"
- "ld1b { z2.b }, p2/Z, [x9]\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
- "ld1b { z0.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x450298e8 // smmla z8.s, z7.b, z2.b\n"
".inst 0x450298d0 // smmla z16.s, z6.b, z2.b\n"
".inst 0x45029898 // smmla z24.s, z4.b, z2.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z2.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x450098ec // smmla z12.s, z7.b, z0.b\n"
".inst 0x450098d4 // smmla z20.s, z6.b, z0.b\n"
".inst 0x4500989c // smmla z28.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x450298e9 // smmla z9.s, z7.b, z2.b\n"
".inst 0x450298d1 // smmla z17.s, z6.b, z2.b\n"
".inst 0x45029899 // smmla z25.s, z4.b, z2.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z2.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x450098ed // smmla z13.s, z7.b, z0.b\n"
".inst 0x450098d5 // smmla z21.s, z6.b, z0.b\n"
".inst 0x4500989d // smmla z29.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x450298ea // smmla z10.s, z7.b, z2.b\n"
".inst 0x450298d2 // smmla z18.s, z6.b, z2.b\n"
".inst 0x4502989a // smmla z26.s, z4.b, z2.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z2.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x450098ee // smmla z14.s, z7.b, z0.b\n"
".inst 0x450098d6 // smmla z22.s, z6.b, z0.b\n"
".inst 0x4500989e // smmla z30.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x450298eb // smmla z11.s, z7.b, z2.b\n"
- "addvl x9, x9, #8\n"
".inst 0x450298d3 // smmla z19.s, z6.b, z2.b\n"
".inst 0x4502989b // smmla z27.s, z4.b, z2.b\n"
".inst 0x450098ef // smmla z15.s, z7.b, z0.b\n"
".inst 0x450098d7 // smmla z23.s, z6.b, z0.b\n"
".inst 0x4500989f // smmla z31.s, z4.b, z0.b\n"
"ble 61f\n"
- "ld1b { z2.b }, p2/Z, [x9]\n"
- "ld1b { z0.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z2.b }, p2/Z, [x10]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x45029828 // smmla z8.s, z1.b, z2.b\n"
".inst 0x45029870 // smmla z16.s, z3.b, z2.b\n"
".inst 0x450298b8 // smmla z24.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x4500982c // smmla z12.s, z1.b, z0.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #2, MUL VL]\n"
".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45029829 // smmla z9.s, z1.b, z2.b\n"
".inst 0x45029871 // smmla z17.s, z3.b, z2.b\n"
".inst 0x450298b9 // smmla z25.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x4500982d // smmla z13.s, z1.b, z0.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #4, MUL VL]\n"
".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x4502982a // smmla z10.s, z1.b, z2.b\n"
".inst 0x45029872 // smmla z18.s, z3.b, z2.b\n"
".inst 0x450298ba // smmla z26.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x4500982e // smmla z14.s, z1.b, z0.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #6, MUL VL]\n"
".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z0.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4502982b // smmla z11.s, z1.b, z2.b\n"
".inst 0x45029873 // smmla z19.s, z3.b, z2.b\n"
".inst 0x450298bb // smmla z27.s, z5.b, z2.b\n"
@@ -1583,27 +1583,27 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z4.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "add x26, x11, x20\n"
+ "ld1w { z3.s }, p2/Z, [x14]\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "ld1w { z3.s }, p2/Z, [x14]\n"
"ld1w { z2.s }, p2/Z, [x14, #1, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x14, #2, MUL VL]\n"
"uzp1 z13.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "ld1w { z1.s }, p2/Z, [x14, #2, MUL VL]\n"
"ld1w { z0.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "add x25, x26, x20\n"
- "add x24, x25, x20\n"
+ "add x26, x9, x20\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x23, x24, x20\n"
- "addvl x14, x14, #4\n"
+ "add x25, x26, x20\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "add x24, x25, x20\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x24, x20\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
"uzp1 z24.d, z24.d, z28.d\n"
@@ -1611,10 +1611,10 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"uzp1 z26.d, z26.d, z30.d\n"
"uzp1 z27.d, z27.d, z31.d\n"
"mov z31.d, z4.d\n"
- "add z31.s, z31.s, z3.s\n"
"add z12.s, z12.s, z2.s\n"
"add z13.s, z13.s, z1.s\n"
"add z14.s, z14.s, z0.s\n"
+ "add z31.s, z31.s, z3.s\n"
"add z8.s, z8.s, z3.s\n"
"add z9.s, z9.s, z2.s\n"
"add z10.s, z10.s, z1.s\n"
@@ -1685,11 +1685,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z28.s, z28.s, #0x1f\n"
"asr z23.s, z23.s, #0x1f\n"
"sqadd z31.s, z31.s, z30.s\n"
+ "and z30.d, z8.d, z0.d\n"
"sqadd z12.s, z12.s, z29.s\n"
+ "and z29.d, z9.d, z1.d\n"
"sqadd z13.s, z13.s, z28.s\n"
"sqadd z14.s, z14.s, z23.s\n"
- "and z30.d, z8.d, z0.d\n"
- "and z29.d, z9.d, z1.d\n"
"and z28.d, z10.d, z2.d\n"
"and z23.d, z11.d, z3.d\n"
"asr z30.s, z30.s, #0x1f\n"
@@ -1697,11 +1697,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z28.s, z28.s, #0x1f\n"
"asr z23.s, z23.s, #0x1f\n"
"sqadd z8.s, z8.s, z30.s\n"
+ "and z30.d, z15.d, z0.d\n"
"sqadd z9.s, z9.s, z29.s\n"
+ "and z29.d, z20.d, z1.d\n"
"sqadd z10.s, z10.s, z28.s\n"
"sqadd z11.s, z11.s, z23.s\n"
- "and z30.d, z15.d, z0.d\n"
- "and z29.d, z20.d, z1.d\n"
"and z28.d, z21.d, z2.d\n"
"and z23.d, z22.d, z3.d\n"
"asr z30.s, z30.s, #0x1f\n"
@@ -1709,11 +1709,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z28.s, z28.s, #0x1f\n"
"asr z23.s, z23.s, #0x1f\n"
"sqadd z15.s, z15.s, z30.s\n"
+ "and z30.d, z16.d, z0.d\n"
"sqadd z20.s, z20.s, z29.s\n"
+ "and z29.d, z17.d, z1.d\n"
"sqadd z21.s, z21.s, z28.s\n"
"sqadd z22.s, z22.s, z23.s\n"
- "and z30.d, z16.d, z0.d\n"
- "and z29.d, z17.d, z1.d\n"
"and z28.d, z18.d, z2.d\n"
"and z23.d, z19.d, z3.d\n"
"asr z30.s, z30.s, #0x1f\n"
@@ -1721,11 +1721,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z28.s, z28.s, #0x1f\n"
"asr z23.s, z23.s, #0x1f\n"
"sqadd z16.s, z16.s, z30.s\n"
+ "and z30.d, z24.d, z0.d\n"
"sqadd z17.s, z17.s, z29.s\n"
+ "and z29.d, z25.d, z1.d\n"
"sqadd z18.s, z18.s, z28.s\n"
"sqadd z19.s, z19.s, z23.s\n"
- "and z30.d, z24.d, z0.d\n"
- "and z29.d, z25.d, z1.d\n"
"and z28.d, z26.d, z2.d\n"
"and z23.d, z27.d, z3.d\n"
"asr z30.s, z30.s, #0x1f\n"
@@ -1738,51 +1738,51 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"sqadd z27.s, z27.s, z23.s\n"
"64:" // Height 5: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z28.s }, p2/Z, [x20]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z28.s\n"
+ "ld1rw { z28.s }, p2/Z, [x20]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z28.s\n"
- "add z13.s, z13.s, z28.s\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z14.s, z14.s, z28.s\n"
- "add z8.s, z8.s, z28.s\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z28.s\n"
- "add z10.s, z10.s, z28.s\n"
+ "add z31.s, z31.s, z28.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z28.s\n"
- "add z15.s, z15.s, z28.s\n"
+ "add z12.s, z12.s, z28.s\n"
+ "add z13.s, z13.s, z28.s\n"
".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z28.s\n"
- "add z21.s, z21.s, z28.s\n"
+ "add z14.s, z14.s, z28.s\n"
+ "add z8.s, z8.s, z28.s\n"
".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z28.s\n"
- "add z16.s, z16.s, z28.s\n"
+ "add z9.s, z9.s, z28.s\n"
+ "add z10.s, z10.s, z28.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z28.s\n"
- "add z18.s, z18.s, z28.s\n"
+ "add z11.s, z11.s, z28.s\n"
+ "add z15.s, z15.s, z28.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z19.s, z19.s, z28.s\n"
- "add z24.s, z24.s, z28.s\n"
+ "add z20.s, z20.s, z28.s\n"
+ "add z21.s, z21.s, z28.s\n"
".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z28.s\n"
- "add z26.s, z26.s, z28.s\n"
+ "add z22.s, z22.s, z28.s\n"
+ "add z16.s, z16.s, z28.s\n"
".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z28.s\n"
+ "add z18.s, z18.s, z28.s\n"
"ld1rw { z23.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z28.s\n"
+ "add z19.s, z19.s, z28.s\n"
+ "add z24.s, z24.s, z28.s\n"
"add x20, %x[qp], %[minval]\n"
- "ld1rw { z28.s }, p2/Z, [x20]\n"
+ "add z25.s, z25.s, z28.s\n"
+ "add z26.s, z26.s, z28.s\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z28.s\n"
"smin z31.s, p2/M, z31.s, z23.s\n"
"smin z12.s, p2/M, z12.s, z23.s\n"
"smin z13.s, p2/M, z13.s, z23.s\n"
@@ -1803,71 +1803,72 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"smin z25.s, p2/M, z25.s, z23.s\n"
"smin z26.s, p2/M, z26.s, z23.s\n"
"smin z27.s, p2/M, z27.s, z23.s\n"
- "smax z31.s, p2/M, z31.s, z28.s\n"
- "smax z12.s, p2/M, z12.s, z28.s\n"
- "smax z13.s, p2/M, z13.s, z28.s\n"
+ "smax z31.s, p2/M, z31.s, z29.s\n"
+ "smax z12.s, p2/M, z12.s, z29.s\n"
+ "smax z13.s, p2/M, z13.s, z29.s\n"
+ "smax z14.s, p2/M, z14.s, z29.s\n"
+ "smax z8.s, p2/M, z8.s, z29.s\n"
+ "smax z9.s, p2/M, z9.s, z29.s\n"
+ "smax z10.s, p2/M, z10.s, z29.s\n"
+ "smax z11.s, p2/M, z11.s, z29.s\n"
"uzp1 z31.h, z31.h, z12.h\n"
- "smax z14.s, p2/M, z14.s, z28.s\n"
- "smax z8.s, p2/M, z8.s, z28.s\n"
- "uzp1 z23.h, z13.h, z14.h\n"
- "uzp1 z31.b, z31.b, z23.b\n"
- "smax z9.s, p2/M, z9.s, z28.s\n"
- "smax z10.s, p2/M, z10.s, z28.s\n"
+ "smax z15.s, p2/M, z15.s, z29.s\n"
+ "smax z20.s, p2/M, z20.s, z29.s\n"
+ "uzp1 z28.h, z13.h, z14.h\n"
+ "smax z21.s, p2/M, z21.s, z29.s\n"
+ "smax z22.s, p2/M, z22.s, z29.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z31.b }, p1, [x11]\n"
- "smax z11.s, p2/M, z11.s, z28.s\n"
- "smax z15.s, p2/M, z15.s, z28.s\n"
+ "smax z16.s, p2/M, z16.s, z29.s\n"
+ "smax z17.s, p2/M, z17.s, z29.s\n"
"uzp1 z23.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z23.b\n"
- "smax z20.s, p2/M, z20.s, z28.s\n"
- "smax z21.s, p2/M, z21.s, z28.s\n"
+ "smax z18.s, p2/M, z18.s, z29.s\n"
+ "smax z19.s, p2/M, z19.s, z29.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x26]\n"
- "smax z22.s, p2/M, z22.s, z28.s\n"
- "smax z16.s, p2/M, z16.s, z28.s\n"
+ "uzp1 z31.b, z31.b, z28.b\n"
+ "smax z24.s, p2/M, z24.s, z29.s\n"
+ "smax z25.s, p2/M, z25.s, z29.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z15.b, z15.b, z20.b\n"
- "smax z17.s, p2/M, z17.s, z28.s\n"
- "smax z18.s, p2/M, z18.s, z28.s\n"
+ "smax z26.s, p2/M, z26.s, z29.s\n"
+ "smax z27.s, p2/M, z27.s, z29.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x25]\n"
- "smax z19.s, p2/M, z19.s, z28.s\n"
- "smax z24.s, p2/M, z24.s, z28.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
- "smax z25.s, p2/M, z25.s, z28.s\n"
- "smax z26.s, p2/M, z26.s, z28.s\n"
+ "uzp1 z8.b, z8.b, z23.b\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
+ "st1b { z31.b }, p1, [x9]\n"
+ "addvl x9, x9, #1\n"
"uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z15.b, z15.b, z20.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "st1b { z8.b }, p1, [x26]\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z15.b }, p1, [x25]\n"
"st1b { z16.b }, p1, [x24]\n"
- "smax z27.s, p2/M, z27.s, z28.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"st1b { z24.b }, p1, [x23]\n"
- "addvl x11, x11, #1\n"
"65:" // Height 5: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 54b\n"
"b 80f\n"
"66:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x6\n"
"mov x14, %x[col_bias]\n"
"ldr x13, [%x[args_ptr], %[offsetof_multiplier_ptr]]\n"
"ldr x12, [%x[args_ptr], %[offsetof_shift_ptr]]\n"
- "ldr x10, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x11, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
- "ldr x9, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"67:" // Height 6: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x10\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
"mov z10.s, #0x0\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
"mov z13.s, #0x0\n"
+ "whilelt p1.b, x20, x11\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
"mov z16.s, #0x0\n"
@@ -1890,8 +1891,8 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"mov x28, #0x0\n"
"69:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 70f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1922,168 +1923,168 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"ble 73f\n"
"72:" // Height 6: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z7.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "trn1 z6.d, z7.d, z0.d\n"
- "ld1rqb { z5.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "trn2 z7.d, z7.d, z0.d\n"
- "trn1 z4.d, z5.d, z1.d\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z0.b }, p0/Z, [x21]\n"
- "trn2 z5.d, z5.d, z1.d\n"
- "trn1 z2.d, z3.d, z0.d\n"
- "trn2 z3.d, z3.d, z0.d\n"
- "ld1b { z1.b }, p2/Z, [x9]\n"
- "ld1b { z0.b }, p2/Z, [x9, #1, MUL VL]\n"
- ".inst 0x450198c8 // smmla z8.s, z6.b, z1.b\n"
- ".inst 0x45019890 // smmla z16.s, z4.b, z1.b\n"
- ".inst 0x45019858 // smmla z24.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x10]\n"
"sub x27, x27, #0x10\n"
- ".inst 0x450098cc // smmla z12.s, z6.b, z0.b\n"
- ".inst 0x45009894 // smmla z20.s, z4.b, z0.b\n"
"cmp x27, #0x10\n"
+ "ld1rqb { z6.b }, p0/Z, [x26]\n"
"add x26, x26, #0x10\n"
- ".inst 0x4500985c // smmla z28.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #3, MUL VL]\n"
- ".inst 0x450198c9 // smmla z9.s, z6.b, z1.b\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
"add x25, x25, #0x10\n"
- ".inst 0x45019891 // smmla z17.s, z4.b, z1.b\n"
- ".inst 0x45019859 // smmla z25.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1rqb { z7.b }, p0/Z, [x24]\n"
"add x24, x24, #0x10\n"
- ".inst 0x450098cd // smmla z13.s, z6.b, z0.b\n"
- ".inst 0x45009895 // smmla z21.s, z4.b, z0.b\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z0.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
- ".inst 0x4500985d // smmla z29.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #5, MUL VL]\n"
- ".inst 0x450198ca // smmla z10.s, z6.b, z1.b\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
"add x21, x21, #0x10\n"
- ".inst 0x45019892 // smmla z18.s, z4.b, z1.b\n"
+ "trn1 z3.d, z7.d, z2.d\n"
+ "trn2 z7.d, z7.d, z2.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p2/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45019888 // smmla z8.s, z4.b, z1.b\n"
+ ".inst 0x45019870 // smmla z16.s, z3.b, z1.b\n"
+ ".inst 0x45019858 // smmla z24.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4500988c // smmla z12.s, z4.b, z0.b\n"
+ ".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
+ ".inst 0x4500985c // smmla z28.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45019889 // smmla z9.s, z4.b, z1.b\n"
+ ".inst 0x45019871 // smmla z17.s, z3.b, z1.b\n"
+ ".inst 0x45019859 // smmla z25.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4500988d // smmla z13.s, z4.b, z0.b\n"
+ ".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
+ ".inst 0x4500985d // smmla z29.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4501988a // smmla z10.s, z4.b, z1.b\n"
+ ".inst 0x45019872 // smmla z18.s, z3.b, z1.b\n"
".inst 0x4501985a // smmla z26.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #6, MUL VL]\n"
- ".inst 0x450098ce // smmla z14.s, z6.b, z0.b\n"
- ".inst 0x45009896 // smmla z22.s, z4.b, z0.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4500988e // smmla z14.s, z4.b, z0.b\n"
+ ".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x4500985e // smmla z30.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #16\n"
- ".inst 0x450198cb // smmla z11.s, z6.b, z1.b\n"
- ".inst 0x45019893 // smmla z19.s, z4.b, z1.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x4501988b // smmla z11.s, z4.b, z1.b\n"
+ ".inst 0x45019873 // smmla z19.s, z3.b, z1.b\n"
".inst 0x4501985b // smmla z27.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-8, MUL VL]\n"
- ".inst 0x450098cf // smmla z15.s, z6.b, z0.b\n"
- ".inst 0x45009897 // smmla z23.s, z4.b, z0.b\n"
+ ".inst 0x4500988f // smmla z15.s, z4.b, z0.b\n"
+ ".inst 0x45009877 // smmla z23.s, z3.b, z0.b\n"
".inst 0x4500985f // smmla z31.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-7, MUL VL]\n"
- ".inst 0x450198e8 // smmla z8.s, z7.b, z1.b\n"
- ".inst 0x450198b0 // smmla z16.s, z5.b, z1.b\n"
- ".inst 0x45019878 // smmla z24.s, z3.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-6, MUL VL]\n"
- ".inst 0x450098ec // smmla z12.s, z7.b, z0.b\n"
- ".inst 0x450098b4 // smmla z20.s, z5.b, z0.b\n"
- ".inst 0x4500987c // smmla z28.s, z3.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-5, MUL VL]\n"
- ".inst 0x450198e9 // smmla z9.s, z7.b, z1.b\n"
- ".inst 0x450198b1 // smmla z17.s, z5.b, z1.b\n"
- ".inst 0x45019879 // smmla z25.s, z3.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-4, MUL VL]\n"
- ".inst 0x450098ed // smmla z13.s, z7.b, z0.b\n"
- ".inst 0x450098b5 // smmla z21.s, z5.b, z0.b\n"
- ".inst 0x4500987d // smmla z29.s, z3.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-3, MUL VL]\n"
- ".inst 0x450198ea // smmla z10.s, z7.b, z1.b\n"
- ".inst 0x450198b2 // smmla z18.s, z5.b, z1.b\n"
- ".inst 0x4501987a // smmla z26.s, z3.b, z1.b\n"
- "ld1b { z1.b }, p2/Z, [x9, #-2, MUL VL]\n"
- ".inst 0x450098ee // smmla z14.s, z7.b, z0.b\n"
- ".inst 0x450098b6 // smmla z22.s, z5.b, z0.b\n"
- ".inst 0x4500987e // smmla z30.s, z3.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #-1, MUL VL]\n"
- ".inst 0x450198eb // smmla z11.s, z7.b, z1.b\n"
- ".inst 0x450198b3 // smmla z19.s, z5.b, z1.b\n"
- ".inst 0x4501987b // smmla z27.s, z3.b, z1.b\n"
- ".inst 0x450098ef // smmla z15.s, z7.b, z0.b\n"
- ".inst 0x450098b7 // smmla z23.s, z5.b, z0.b\n"
- ".inst 0x4500987f // smmla z31.s, z3.b, z0.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x450198c8 // smmla z8.s, z6.b, z1.b\n"
+ ".inst 0x450198f0 // smmla z16.s, z7.b, z1.b\n"
+ ".inst 0x450198b8 // smmla z24.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x450098cc // smmla z12.s, z6.b, z0.b\n"
+ ".inst 0x450098f4 // smmla z20.s, z7.b, z0.b\n"
+ ".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x450198c9 // smmla z9.s, z6.b, z1.b\n"
+ ".inst 0x450198f1 // smmla z17.s, z7.b, z1.b\n"
+ ".inst 0x450198b9 // smmla z25.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x450098cd // smmla z13.s, z6.b, z0.b\n"
+ ".inst 0x450098f5 // smmla z21.s, z7.b, z0.b\n"
+ ".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x450198ca // smmla z10.s, z6.b, z1.b\n"
+ ".inst 0x450198f2 // smmla z18.s, z7.b, z1.b\n"
+ ".inst 0x450198ba // smmla z26.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p2/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x450098ce // smmla z14.s, z6.b, z0.b\n"
+ ".inst 0x450098f6 // smmla z22.s, z7.b, z0.b\n"
+ ".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p2/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x450198cb // smmla z11.s, z6.b, z1.b\n"
+ ".inst 0x450198f3 // smmla z19.s, z7.b, z1.b\n"
+ ".inst 0x450198bb // smmla z27.s, z5.b, z1.b\n"
+ ".inst 0x450098cf // smmla z15.s, z6.b, z0.b\n"
+ ".inst 0x450098f7 // smmla z23.s, z7.b, z0.b\n"
+ ".inst 0x450098bf // smmla z31.s, z5.b, z0.b\n"
"bgt 72b\n"
"73:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p2/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "trn1 z7.d, z1.d, z0.d\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z0.d\n"
- "trn1 z6.d, z3.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
"ld1rqb { z5.b }, p0/Z, [x22]\n"
"ld1rqb { z0.b }, p0/Z, [x21]\n"
- "trn2 z3.d, z3.d, z2.d\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
- "ld1b { z2.b }, p2/Z, [x9]\n"
- "ld1b { z0.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x450298e8 // smmla z8.s, z7.b, z2.b\n"
".inst 0x450298d0 // smmla z16.s, z6.b, z2.b\n"
".inst 0x45029898 // smmla z24.s, z4.b, z2.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
+ "ld1b { z2.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x450098ec // smmla z12.s, z7.b, z0.b\n"
".inst 0x450098d4 // smmla z20.s, z6.b, z0.b\n"
".inst 0x4500989c // smmla z28.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x450298e9 // smmla z9.s, z7.b, z2.b\n"
".inst 0x450298d1 // smmla z17.s, z6.b, z2.b\n"
".inst 0x45029899 // smmla z25.s, z4.b, z2.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #4, MUL VL]\n"
+ "ld1b { z2.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x450098ed // smmla z13.s, z7.b, z0.b\n"
".inst 0x450098d5 // smmla z21.s, z6.b, z0.b\n"
".inst 0x4500989d // smmla z29.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x450298ea // smmla z10.s, z7.b, z2.b\n"
".inst 0x450298d2 // smmla z18.s, z6.b, z2.b\n"
".inst 0x4502989a // smmla z26.s, z4.b, z2.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #6, MUL VL]\n"
+ "ld1b { z2.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x450098ee // smmla z14.s, z7.b, z0.b\n"
".inst 0x450098d6 // smmla z22.s, z6.b, z0.b\n"
".inst 0x4500989e // smmla z30.s, z4.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #7, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x450298eb // smmla z11.s, z7.b, z2.b\n"
- "addvl x9, x9, #8\n"
".inst 0x450298d3 // smmla z19.s, z6.b, z2.b\n"
".inst 0x4502989b // smmla z27.s, z4.b, z2.b\n"
".inst 0x450098ef // smmla z15.s, z7.b, z0.b\n"
".inst 0x450098d7 // smmla z23.s, z6.b, z0.b\n"
".inst 0x4500989f // smmla z31.s, z4.b, z0.b\n"
"ble 74f\n"
- "ld1b { z2.b }, p2/Z, [x9]\n"
- "ld1b { z0.b }, p2/Z, [x9, #1, MUL VL]\n"
+ "ld1b { z2.b }, p2/Z, [x10]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #1, MUL VL]\n"
".inst 0x45029828 // smmla z8.s, z1.b, z2.b\n"
".inst 0x45029870 // smmla z16.s, z3.b, z2.b\n"
".inst 0x450298b8 // smmla z24.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [x10, #2, MUL VL]\n"
".inst 0x4500982c // smmla z12.s, z1.b, z0.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #2, MUL VL]\n"
".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #3, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #3, MUL VL]\n"
".inst 0x45029829 // smmla z9.s, z1.b, z2.b\n"
".inst 0x45029871 // smmla z17.s, z3.b, z2.b\n"
".inst 0x450298b9 // smmla z25.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [x10, #4, MUL VL]\n"
".inst 0x4500982d // smmla z13.s, z1.b, z0.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #4, MUL VL]\n"
".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #5, MUL VL]\n"
+ "ld1b { z0.b }, p2/Z, [x10, #5, MUL VL]\n"
".inst 0x4502982a // smmla z10.s, z1.b, z2.b\n"
".inst 0x45029872 // smmla z18.s, z3.b, z2.b\n"
".inst 0x450298ba // smmla z26.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [x10, #6, MUL VL]\n"
".inst 0x4500982e // smmla z14.s, z1.b, z0.b\n"
- "ld1b { z2.b }, p2/Z, [x9, #6, MUL VL]\n"
".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
- "ld1b { z0.b }, p2/Z, [x9, #7, MUL VL]\n"
- "addvl x9, x9, #8\n"
+ "ld1b { z0.b }, p2/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4502982b // smmla z11.s, z1.b, z2.b\n"
".inst 0x45029873 // smmla z19.s, z3.b, z2.b\n"
".inst 0x450298bb // smmla z27.s, z5.b, z2.b\n"
@@ -2097,31 +2098,31 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"bne 69b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z4.d, z8.d, z12.d\n"
- "add x26, x11, x20\n"
"uzp2 z8.d, z8.d, z12.d\n"
+ "ld1w { z3.s }, p2/Z, [x14]\n"
"uzp1 z12.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "add x25, x26, x20\n"
- "ld1w { z3.s }, p2/Z, [x14]\n"
- "uzp1 z13.d, z10.d, z14.d\n"
- "uzp2 z10.d, z10.d, z14.d\n"
"ld1w { z2.s }, p2/Z, [x14, #1, MUL VL]\n"
"ld1w { z1.s }, p2/Z, [x14, #2, MUL VL]\n"
+ "uzp1 z13.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "ld1w { z0.s }, p2/Z, [x14, #3, MUL VL]\n"
+ "addvl x14, x14, #4\n"
+ "add x26, x9, x20\n"
"uzp1 z14.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "ld1w { z0.s }, p2/Z, [x14, #3, MUL VL]\n"
- "add x24, x25, x20\n"
"uzp1 z15.d, z16.d, z20.d\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x23, x24, x20\n"
- "add x22, x23, x20\n"
+ "add x25, x26, x20\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
- "addvl x14, x14, #4\n"
+ "add x24, x25, x20\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x24, x20\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "add x22, x23, x20\n"
"uzp1 z23.d, z24.d, z28.d\n"
"uzp2 z24.d, z24.d, z28.d\n"
"uzp1 z28.d, z25.d, z29.d\n"
@@ -2131,10 +2132,10 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"uzp1 z30.d, z27.d, z31.d\n"
"uzp2 z27.d, z27.d, z31.d\n"
"mov z31.d, z4.d\n"
- "add z31.s, z31.s, z3.s\n"
"add z12.s, z12.s, z2.s\n"
"add z13.s, z13.s, z1.s\n"
"add z14.s, z14.s, z0.s\n"
+ "add z31.s, z31.s, z3.s\n"
"add z8.s, z8.s, z3.s\n"
"add z9.s, z9.s, z2.s\n"
"add z10.s, z10.s, z1.s\n"
@@ -2213,11 +2214,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z31.s, z31.s, z7.s\n"
+ "and z7.d, z8.d, z0.d\n"
"sqadd z12.s, z12.s, z6.s\n"
+ "and z6.d, z9.d, z1.d\n"
"sqadd z13.s, z13.s, z5.s\n"
"sqadd z14.s, z14.s, z4.s\n"
- "and z7.d, z8.d, z0.d\n"
- "and z6.d, z9.d, z1.d\n"
"and z5.d, z10.d, z2.d\n"
"and z4.d, z11.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2225,11 +2226,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z8.s, z8.s, z7.s\n"
+ "and z7.d, z15.d, z0.d\n"
"sqadd z9.s, z9.s, z6.s\n"
+ "and z6.d, z20.d, z1.d\n"
"sqadd z10.s, z10.s, z5.s\n"
"sqadd z11.s, z11.s, z4.s\n"
- "and z7.d, z15.d, z0.d\n"
- "and z6.d, z20.d, z1.d\n"
"and z5.d, z21.d, z2.d\n"
"and z4.d, z22.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2237,11 +2238,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z15.s, z15.s, z7.s\n"
+ "and z7.d, z16.d, z0.d\n"
"sqadd z20.s, z20.s, z6.s\n"
+ "and z6.d, z17.d, z1.d\n"
"sqadd z21.s, z21.s, z5.s\n"
"sqadd z22.s, z22.s, z4.s\n"
- "and z7.d, z16.d, z0.d\n"
- "and z6.d, z17.d, z1.d\n"
"and z5.d, z18.d, z2.d\n"
"and z4.d, z19.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2249,11 +2250,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z16.s, z16.s, z7.s\n"
+ "and z7.d, z23.d, z0.d\n"
"sqadd z17.s, z17.s, z6.s\n"
+ "and z6.d, z28.d, z1.d\n"
"sqadd z18.s, z18.s, z5.s\n"
"sqadd z19.s, z19.s, z4.s\n"
- "and z7.d, z23.d, z0.d\n"
- "and z6.d, z28.d, z1.d\n"
"and z5.d, z29.d, z2.d\n"
"and z4.d, z30.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2261,11 +2262,11 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"asr z5.s, z5.s, #0x1f\n"
"asr z4.s, z4.s, #0x1f\n"
"sqadd z23.s, z23.s, z7.s\n"
+ "and z7.d, z24.d, z0.d\n"
"sqadd z28.s, z28.s, z6.s\n"
+ "and z6.d, z25.d, z1.d\n"
"sqadd z29.s, z29.s, z5.s\n"
"sqadd z30.s, z30.s, z4.s\n"
- "and z7.d, z24.d, z0.d\n"
- "and z6.d, z25.d, z1.d\n"
"and z5.d, z26.d, z2.d\n"
"and z4.d, z27.d, z3.d\n"
"asr z7.s, z7.s, #0x1f\n"
@@ -2278,59 +2279,59 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"sqadd z27.s, z27.s, z4.s\n"
"77:" // Height 6: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z4.s\n"
+ "ld1rw { z4.s }, p2/Z, [x20]\n"
".inst 0x4482882c // srshl z12.s, p2/M, z12.s, z1.s\n"
".inst 0x4482884d // srshl z13.s, p2/M, z13.s, z2.s\n"
- "add z12.s, z12.s, z4.s\n"
- "add z13.s, z13.s, z4.s\n"
".inst 0x4482886e // srshl z14.s, p2/M, z14.s, z3.s\n"
".inst 0x44828808 // srshl z8.s, p2/M, z8.s, z0.s\n"
- "add z14.s, z14.s, z4.s\n"
- "add z8.s, z8.s, z4.s\n"
".inst 0x44828829 // srshl z9.s, p2/M, z9.s, z1.s\n"
".inst 0x4482884a // srshl z10.s, p2/M, z10.s, z2.s\n"
- "add z9.s, z9.s, z4.s\n"
- "add z10.s, z10.s, z4.s\n"
+ "add z31.s, z31.s, z4.s\n"
".inst 0x4482886b // srshl z11.s, p2/M, z11.s, z3.s\n"
".inst 0x4482880f // srshl z15.s, p2/M, z15.s, z0.s\n"
- "add z11.s, z11.s, z4.s\n"
- "add z15.s, z15.s, z4.s\n"
+ "add z12.s, z12.s, z4.s\n"
+ "add z13.s, z13.s, z4.s\n"
".inst 0x44828834 // srshl z20.s, p2/M, z20.s, z1.s\n"
".inst 0x44828855 // srshl z21.s, p2/M, z21.s, z2.s\n"
- "add z20.s, z20.s, z4.s\n"
- "add z21.s, z21.s, z4.s\n"
+ "add z14.s, z14.s, z4.s\n"
+ "add z8.s, z8.s, z4.s\n"
".inst 0x44828876 // srshl z22.s, p2/M, z22.s, z3.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z4.s\n"
- "add z16.s, z16.s, z4.s\n"
+ "add z9.s, z9.s, z4.s\n"
+ "add z10.s, z10.s, z4.s\n"
".inst 0x44828831 // srshl z17.s, p2/M, z17.s, z1.s\n"
".inst 0x44828852 // srshl z18.s, p2/M, z18.s, z2.s\n"
- "add z17.s, z17.s, z4.s\n"
- "add z18.s, z18.s, z4.s\n"
+ "add z11.s, z11.s, z4.s\n"
+ "add z15.s, z15.s, z4.s\n"
".inst 0x44828873 // srshl z19.s, p2/M, z19.s, z3.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z19.s, z19.s, z4.s\n"
- "add z23.s, z23.s, z4.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z4.s\n"
".inst 0x4482883c // srshl z28.s, p2/M, z28.s, z1.s\n"
".inst 0x4482885d // srshl z29.s, p2/M, z29.s, z2.s\n"
- "add z28.s, z28.s, z4.s\n"
- "add z29.s, z29.s, z4.s\n"
+ "add z22.s, z22.s, z4.s\n"
+ "add z16.s, z16.s, z4.s\n"
".inst 0x4482887e // srshl z30.s, p2/M, z30.s, z3.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z30.s, z30.s, z4.s\n"
- "add z24.s, z24.s, z4.s\n"
+ "add z17.s, z17.s, z4.s\n"
+ "add z18.s, z18.s, z4.s\n"
".inst 0x44828839 // srshl z25.s, p2/M, z25.s, z1.s\n"
".inst 0x4482885a // srshl z26.s, p2/M, z26.s, z2.s\n"
- "add z25.s, z25.s, z4.s\n"
- "add z26.s, z26.s, z4.s\n"
+ "add z19.s, z19.s, z4.s\n"
+ "add z23.s, z23.s, z4.s\n"
".inst 0x4482887b // srshl z27.s, p2/M, z27.s, z3.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z28.s, z28.s, z4.s\n"
+ "add z29.s, z29.s, z4.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z4.s\n"
+ "add z30.s, z30.s, z4.s\n"
+ "add z24.s, z24.s, z4.s\n"
"add x20, %x[qp], %[minval]\n"
- "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add z25.s, z25.s, z4.s\n"
+ "add z26.s, z26.s, z4.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z4.s\n"
"smin z31.s, p2/M, z31.s, z0.s\n"
"smin z12.s, p2/M, z12.s, z0.s\n"
"smin z13.s, p2/M, z13.s, z0.s\n"
@@ -2355,58 +2356,58 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"smin z25.s, p2/M, z25.s, z0.s\n"
"smin z26.s, p2/M, z26.s, z0.s\n"
"smin z27.s, p2/M, z27.s, z0.s\n"
- "smax z31.s, p2/M, z31.s, z1.s\n"
- "smax z12.s, p2/M, z12.s, z1.s\n"
- "smax z13.s, p2/M, z13.s, z1.s\n"
+ "smax z31.s, p2/M, z31.s, z2.s\n"
+ "smax z12.s, p2/M, z12.s, z2.s\n"
+ "smax z13.s, p2/M, z13.s, z2.s\n"
+ "smax z14.s, p2/M, z14.s, z2.s\n"
+ "smax z8.s, p2/M, z8.s, z2.s\n"
+ "smax z9.s, p2/M, z9.s, z2.s\n"
+ "smax z10.s, p2/M, z10.s, z2.s\n"
+ "smax z11.s, p2/M, z11.s, z2.s\n"
"uzp1 z31.h, z31.h, z12.h\n"
- "smax z14.s, p2/M, z14.s, z1.s\n"
- "smax z8.s, p2/M, z8.s, z1.s\n"
- "uzp1 z0.h, z13.h, z14.h\n"
- "uzp1 z31.b, z31.b, z0.b\n"
- "smax z9.s, p2/M, z9.s, z1.s\n"
- "smax z10.s, p2/M, z10.s, z1.s\n"
+ "smax z15.s, p2/M, z15.s, z2.s\n"
+ "smax z20.s, p2/M, z20.s, z2.s\n"
+ "uzp1 z1.h, z13.h, z14.h\n"
+ "smax z21.s, p2/M, z21.s, z2.s\n"
+ "smax z22.s, p2/M, z22.s, z2.s\n"
"uzp1 z8.h, z8.h, z9.h\n"
- "st1b { z31.b }, p1, [x11]\n"
- "smax z11.s, p2/M, z11.s, z1.s\n"
- "smax z15.s, p2/M, z15.s, z1.s\n"
- "uzp1 z31.h, z10.h, z11.h\n"
- "uzp1 z8.b, z8.b, z31.b\n"
- "smax z20.s, p2/M, z20.s, z1.s\n"
- "smax z21.s, p2/M, z21.s, z1.s\n"
+ "smax z16.s, p2/M, z16.s, z2.s\n"
+ "smax z17.s, p2/M, z17.s, z2.s\n"
+ "uzp1 z0.h, z10.h, z11.h\n"
+ "smax z18.s, p2/M, z18.s, z2.s\n"
+ "smax z19.s, p2/M, z19.s, z2.s\n"
"uzp1 z15.h, z15.h, z20.h\n"
- "st1b { z8.b }, p1, [x26]\n"
- "smax z22.s, p2/M, z22.s, z1.s\n"
- "smax z16.s, p2/M, z16.s, z1.s\n"
+ "uzp1 z31.b, z31.b, z1.b\n"
+ "smax z23.s, p2/M, z23.s, z2.s\n"
+ "smax z28.s, p2/M, z28.s, z2.s\n"
"uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z15.b, z15.b, z20.b\n"
- "smax z17.s, p2/M, z17.s, z1.s\n"
- "smax z18.s, p2/M, z18.s, z1.s\n"
+ "smax z29.s, p2/M, z29.s, z2.s\n"
+ "smax z30.s, p2/M, z30.s, z2.s\n"
"uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z15.b }, p1, [x25]\n"
- "smax z19.s, p2/M, z19.s, z1.s\n"
- "smax z23.s, p2/M, z23.s, z1.s\n"
+ "uzp1 z8.b, z8.b, z0.b\n"
+ "smax z24.s, p2/M, z24.s, z2.s\n"
+ "smax z25.s, p2/M, z25.s, z2.s\n"
"uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
- "smax z28.s, p2/M, z28.s, z1.s\n"
- "smax z29.s, p2/M, z29.s, z1.s\n"
+ "st1b { z31.b }, p1, [x9]\n"
+ "smax z26.s, p2/M, z26.s, z2.s\n"
+ "smax z27.s, p2/M, z27.s, z2.s\n"
"uzp1 z23.h, z23.h, z28.h\n"
- "st1b { z16.b }, p1, [x24]\n"
- "smax z30.s, p2/M, z30.s, z1.s\n"
- "smax z24.s, p2/M, z24.s, z1.s\n"
- "uzp1 z16.h, z29.h, z30.h\n"
- "uzp1 z23.b, z23.b, z16.b\n"
- "smax z25.s, p2/M, z25.s, z1.s\n"
- "smax z26.s, p2/M, z26.s, z1.s\n"
+ "uzp1 z15.b, z15.b, z20.b\n"
+ "uzp1 z18.h, z29.h, z30.h\n"
+ "st1b { z8.b }, p1, [x26]\n"
+ "addvl x9, x9, #1\n"
"uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "st1b { z15.b }, p1, [x25]\n"
+ "uzp1 z23.b, z23.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z16.b }, p1, [x24]\n"
"st1b { z23.b }, p1, [x23]\n"
- "smax z27.s, p2/M, z27.s, z1.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"st1b { z24.b }, p1, [x22]\n"
- "addvl x11, x11, #1\n"
"78:" // Height 6: Writeback done
- "decw x10, ALL, MUL #4\n"
- "cmp x10, XZR\n"
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
"bgt 67b\n"
"subs %x[M], %x[M], #0x6\n"
"beq 80f\n"
@@ -2420,8 +2421,8 @@ void sve_hybrid_s8qs_mmla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"80:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x13", "x14", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp
index cfa349f3aa..dc377f56e8 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp
index 1a483210f3..f94f76564b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -89,7 +89,7 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"beq 11f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -114,8 +114,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -134,14 +134,14 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop
"sdot z8.s, z6.b, z0.b\n"
- "sdot z9.s, z7.b, z0.b\n"
"ld1b { z17.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z7.b, z0.b\n"
"ld1b { z16.b }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x26, x26, #0x4\n"
+ "subs x27, x27, #0x4\n"
"sdot z10.s, z17.b, z0.b\n"
"sdot z11.s, z16.b, z0.b\n"
- "subs x27, x27, #0x4\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
@@ -149,14 +149,14 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"9:" // Height 1: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"sdot z8.s, z6.b, z0.b\n"
- "sdot z9.s, z7.b, z0.b\n"
"ld1b { z17.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z7.b, z0.b\n"
"ld1b { z16.b }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"sdot z10.s, z17.b, z0.b\n"
"sdot z11.s, z16.b, z0.b\n"
- "addvl x10, x10, #4\n"
"bne 5b\n"
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
@@ -171,7 +171,7 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"11:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"12:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -183,11 +183,11 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 13f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x20]\n"
"ld1w { z13.s }, p2/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x20, #2, MUL VL]\n"
@@ -206,8 +206,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"15:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -242,8 +242,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z10.s, z17.b, z0.b\n"
"sdot z14.s, z17.b, z1.b\n"
"sdot z11.s, z16.b, z0.b\n"
- "sdot z15.s, z16.b, z1.b\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
+ "sdot z15.s, z16.b, z1.b\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
"ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
@@ -257,18 +257,18 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z13.s, z7.b, z1.b\n"
"ld1b { z16.b }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"sdot z10.s, z17.b, z0.b\n"
"sdot z14.s, z17.b, z1.b\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z16.b, z0.b\n"
"sdot z15.s, z16.b, z1.b\n"
"bne 15b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p3, [x20]\n"
@@ -283,7 +283,7 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"21:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"22:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -295,12 +295,12 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 23f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x21]\n"
"ld1w { z13.s }, p2/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x21, #2, MUL VL]\n"
@@ -327,8 +327,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"25:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 26f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -359,8 +359,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"add x26, x26, #0x4\n"
"subs x27, x27, #0x4\n"
"sdot z16.s, z6.b, z2.b\n"
- "sdot z9.s, z7.b, z0.b\n"
"ld1b { z21.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z7.b, z0.b\n"
"add x25, x25, #0x4\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
@@ -372,11 +372,11 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z18.s, z21.b, z2.b\n"
"sdot z11.s, z20.b, z0.b\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
"sdot z15.s, z20.b, z1.b\n"
- "sdot z19.s, z20.b, z2.b\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
+ "sdot z19.s, z20.b, z2.b\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
+ "ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 28b\n"
"29:" // Height 3: Multiply loop: Main loop skip
@@ -385,13 +385,13 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z12.s, z6.b, z1.b\n"
"add x28, x28, #0x1\n"
"sdot z16.s, z6.b, z2.b\n"
- "sdot z9.s, z7.b, z0.b\n"
"ld1b { z21.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
"ld1b { z20.b }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"sdot z10.s, z21.b, z0.b\n"
"sdot z14.s, z21.b, z1.b\n"
"sdot z18.s, z21.b, z2.b\n"
@@ -400,11 +400,11 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z19.s, z20.b, z2.b\n"
"bne 25b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p3, [x21]\n"
@@ -423,7 +423,7 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"31:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"32:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -435,13 +435,13 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 33f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x22]\n"
"ld1w { z13.s }, p2/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x22, #2, MUL VL]\n"
@@ -476,8 +476,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"35:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 36f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -527,7 +527,6 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z14.s, z25.b, z1.b\n"
"sdot z18.s, z25.b, z2.b\n"
"sdot z22.s, z25.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
"sdot z11.s, z24.b, z0.b\n"
"sdot z15.s, z24.b, z1.b\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -536,6 +535,7 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z23.s, z24.b, z3.b\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
+ "ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 38b\n"
"39:" // Height 4: Multiply loop: Main loop skip
@@ -546,15 +546,15 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
"ld1b { z25.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
"sdot z21.s, z7.b, z3.b\n"
"ld1b { z24.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"sdot z10.s, z25.b, z0.b\n"
"sdot z14.s, z25.b, z1.b\n"
+ "addvl x10, x10, #4\n"
"sdot z18.s, z25.b, z2.b\n"
"sdot z22.s, z25.b, z3.b\n"
"sdot z11.s, z24.b, z0.b\n"
@@ -563,12 +563,12 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z23.s, z24.b, z3.b\n"
"bne 35b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p3, [x22]\n"
@@ -591,7 +591,7 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"41:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"42:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -603,16 +603,16 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x23]\n"
"ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x22]\n"
@@ -653,8 +653,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"45:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -697,8 +697,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
"sdot z24.s, z6.b, z4.b\n"
- "sdot z9.s, z7.b, z0.b\n"
"ld1b { z29.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z7.b, z0.b\n"
"add x23, x23, #0x4\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
@@ -716,12 +716,12 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1b { z6.b }, p4/Z, [x10]\n"
"sdot z15.s, z28.b, z1.b\n"
- "sdot z19.s, z28.b, z2.b\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
+ "sdot z19.s, z28.b, z2.b\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"sdot z23.s, z28.b, z3.b\n"
- "sdot z27.s, z28.b, z4.b\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
+ "sdot z27.s, z28.b, z4.b\n"
"ld1rw { z4.s }, p4/Z, [x22]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 48b\n"
@@ -732,12 +732,12 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
"sdot z24.s, z6.b, z4.b\n"
- "sdot z9.s, z7.b, z0.b\n"
"ld1b { z29.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
"sdot z17.s, z7.b, z2.b\n"
+ "cmp x28, x20\n"
"sdot z21.s, z7.b, z3.b\n"
"sdot z25.s, z7.b, z4.b\n"
"ld1b { z28.b }, p4/Z, [x10, #3, MUL VL]\n"
@@ -754,15 +754,15 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z27.s, z28.b, z4.b\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z12.s }, p3, [x23]\n"
"st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
"st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
@@ -786,11 +786,12 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"b 62f\n"
"51:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"52:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -802,17 +803,17 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 53f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x24]\n"
"ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x23]\n"
@@ -861,8 +862,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"55:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 56f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -950,12 +951,12 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"sdot z16.s, z6.b, z2.b\n"
"sdot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
"sdot z24.s, z6.b, z4.b\n"
"sdot z28.s, z6.b, z5.b\n"
"ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z7.b, z0.b\n"
"sdot z13.s, z7.b, z1.b\n"
+ "cmp x28, x20\n"
"sdot z17.s, z7.b, z2.b\n"
"sdot z21.s, z7.b, z3.b\n"
"sdot z25.s, z7.b, z4.b\n"
@@ -976,17 +977,17 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"sdot z31.s, z7.b, z5.b\n"
"bne 55b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z12.s }, p3, [x24]\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
"st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
"st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
@@ -1022,8 +1023,8 @@ void sve_hybrid_s8s32_dot_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"62:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp
index eeef192b66..70362ae888 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void sve_hybrid_s8s32_dot_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -89,7 +89,7 @@ void sve_hybrid_s8s32_dot_6x4VL (
"beq 12f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -114,8 +114,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov x28, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -131,89 +131,89 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z16.b }, p5/Z, [x10]\n"
- "sdot z8.s, z16.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "sdot z8.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sdot z10.s, z16.b, z0.b[0]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "sdot z10.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
"sdot z11.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p5/Z, [x10, #4, MUL VL]\n"
- "sdot z8.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "sdot z8.s, z17.b, z0.b[1]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[1]\n"
- "ld1b { z16.b }, p5/Z, [x10, #6, MUL VL]\n"
- "sdot z10.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
+ "sdot z10.s, z17.b, z0.b[1]\n"
"sdot z11.s, z16.b, z0.b[1]\n"
"ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[2]\n"
- "sdot z9.s, z16.b, z0.b[2]\n"
"ld1b { z17.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "sdot z9.s, z16.b, z0.b[2]\n"
"ld1b { z16.b }, p5/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z17.b, z0.b[2]\n"
- "sdot z11.s, z16.b, z0.b[2]\n"
"ld1b { z17.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "sdot z11.s, z16.b, z0.b[2]\n"
"ld1b { z16.b }, p5/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[3]\n"
- "sdot z9.s, z16.b, z0.b[3]\n"
"ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "sdot z9.s, z16.b, z0.b[3]\n"
"ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
"sdot z10.s, z17.b, z0.b[3]\n"
"sdot z11.s, z16.b, z0.b[3]\n"
- "add x26, x26, #0x10\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z16.b }, p5/Z, [x10]\n"
- "sdot z8.s, z16.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
- "sdot z9.s, z16.b, z0.b[0]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"subs x27, x27, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x26]\n"
+ "sdot z8.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z16.b, z0.b[0]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[0]\n"
"sdot z11.s, z16.b, z0.b[0]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[1]\n"
- "sdot z9.s, z16.b, z0.b[1]\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[1]\n"
"sdot z11.s, z16.b, z0.b[1]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[2]\n"
- "sdot z9.s, z16.b, z0.b[2]\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z16.b, z0.b[2]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[2]\n"
"sdot z11.s, z16.b, z0.b[2]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[3]\n"
- "sdot z9.s, z16.b, z0.b[3]\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z16.b, z0.b[3]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[3]\n"
"sdot z11.s, z16.b, z0.b[3]\n"
- "addvl x10, x10, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -232,7 +232,7 @@ void sve_hybrid_s8s32_dot_6x4VL (
"12:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"13:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -244,11 +244,11 @@ void sve_hybrid_s8s32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 14f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
@@ -267,8 +267,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov x28, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -287,38 +287,38 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"sdot z8.s, z17.b, z1.b[0]\n"
"sdot z12.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z1.b[0]\n"
"sdot z13.s, z16.b, z0.b[0]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
"sdot z10.s, z17.b, z1.b[0]\n"
"sdot z14.s, z17.b, z0.b[0]\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x10\n"
"sdot z11.s, z16.b, z1.b[0]\n"
"sdot z15.s, z16.b, z0.b[0]\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
"sdot z8.s, z17.b, z1.b[1]\n"
"sdot z12.s, z17.b, z0.b[1]\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
"sdot z9.s, z16.b, z1.b[1]\n"
"sdot z13.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
"sdot z10.s, z17.b, z1.b[1]\n"
"sdot z14.s, z17.b, z0.b[1]\n"
- "ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z11.s, z16.b, z1.b[1]\n"
"sdot z15.s, z16.b, z0.b[1]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z17.b, z1.b[2]\n"
"sdot z12.s, z17.b, z0.b[2]\n"
@@ -345,50 +345,50 @@ void sve_hybrid_s8s32_dot_6x4VL (
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x26]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"sdot z8.s, z17.b, z0.b[0]\n"
"sdot z12.s, z17.b, z1.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[0]\n"
"sdot z13.s, z16.b, z1.b[0]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[0]\n"
"sdot z14.s, z17.b, z1.b[0]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z16.b, z0.b[0]\n"
"sdot z15.s, z16.b, z1.b[0]\n"
"ble 21f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[1]\n"
"sdot z12.s, z17.b, z1.b[1]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[1]\n"
"sdot z13.s, z16.b, z1.b[1]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[1]\n"
"sdot z14.s, z17.b, z1.b[1]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z16.b, z0.b[1]\n"
"sdot z15.s, z16.b, z1.b[1]\n"
"ble 21f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z17.b, z0.b[2]\n"
"sdot z12.s, z17.b, z1.b[2]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[2]\n"
"sdot z13.s, z16.b, z1.b[2]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[2]\n"
"sdot z14.s, z17.b, z1.b[2]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z16.b, z0.b[2]\n"
"sdot z15.s, z16.b, z1.b[2]\n"
"ble 21f\n"
@@ -396,13 +396,13 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z17.b, z0.b[3]\n"
"sdot z12.s, z17.b, z1.b[3]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z16.b, z0.b[3]\n"
"sdot z13.s, z16.b, z1.b[3]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"sdot z10.s, z17.b, z0.b[3]\n"
"sdot z14.s, z17.b, z1.b[3]\n"
- "addvl x10, x10, #4\n"
"sdot z11.s, z16.b, z0.b[3]\n"
"sdot z15.s, z16.b, z1.b[3]\n"
"21:" // Height 2: Multiply loop: multiply skip
@@ -411,10 +411,10 @@ void sve_hybrid_s8s32_dot_6x4VL (
"cmp x28, x20\n"
"bne 16b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p4, [x20]\n"
@@ -429,7 +429,7 @@ void sve_hybrid_s8s32_dot_6x4VL (
"23:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"24:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -441,12 +441,12 @@ void sve_hybrid_s8s32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 25f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
@@ -473,8 +473,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov x28, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -496,37 +496,37 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z21.b }, p5/Z, [x10]\n"
+ "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1b { z21.b }, p5/Z, [x10]\n"
+ "add x24, x24, #0x10\n"
"sdot z8.s, z21.b, z2.b[0]\n"
"sdot z12.s, z21.b, z1.b[0]\n"
- "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
- "sdot z16.s, z21.b, z0.b[0]\n"
"sdot z9.s, z20.b, z2.b[0]\n"
- "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[0]\n"
+ "sdot z16.s, z21.b, z0.b[0]\n"
+ "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z20.b, z0.b[0]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x10\n"
"sdot z10.s, z21.b, z2.b[0]\n"
"sdot z14.s, z21.b, z1.b[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"sdot z18.s, z21.b, z0.b[0]\n"
- "sdot z11.s, z20.b, z2.b[0]\n"
"ld1b { z21.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "sdot z11.s, z20.b, z2.b[0]\n"
"sdot z15.s, z20.b, z1.b[0]\n"
"sdot z19.s, z20.b, z0.b[0]\n"
"ld1b { z20.b }, p5/Z, [x10, #5, MUL VL]\n"
"sdot z8.s, z21.b, z2.b[1]\n"
"sdot z12.s, z21.b, z1.b[1]\n"
"sdot z16.s, z21.b, z0.b[1]\n"
- "sdot z9.s, z20.b, z2.b[1]\n"
"ld1b { z21.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "sdot z9.s, z20.b, z2.b[1]\n"
"sdot z13.s, z20.b, z1.b[1]\n"
"sdot z17.s, z20.b, z0.b[1]\n"
"ld1b { z20.b }, p5/Z, [x10, #7, MUL VL]\n"
@@ -535,31 +535,31 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z14.s, z21.b, z1.b[1]\n"
"sdot z18.s, z21.b, z0.b[1]\n"
"sdot z11.s, z20.b, z2.b[1]\n"
- "ld1b { z21.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z15.s, z20.b, z1.b[1]\n"
"sdot z19.s, z20.b, z0.b[1]\n"
+ "ld1b { z21.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z20.b }, p5/Z, [x10, #-7, MUL VL]\n"
"sdot z8.s, z21.b, z2.b[2]\n"
"sdot z12.s, z21.b, z1.b[2]\n"
"sdot z16.s, z21.b, z0.b[2]\n"
- "sdot z9.s, z20.b, z2.b[2]\n"
"ld1b { z21.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "sdot z9.s, z20.b, z2.b[2]\n"
"sdot z13.s, z20.b, z1.b[2]\n"
"sdot z17.s, z20.b, z0.b[2]\n"
"ld1b { z20.b }, p5/Z, [x10, #-5, MUL VL]\n"
"sdot z10.s, z21.b, z2.b[2]\n"
"sdot z14.s, z21.b, z1.b[2]\n"
"sdot z18.s, z21.b, z0.b[2]\n"
- "sdot z11.s, z20.b, z2.b[2]\n"
"ld1b { z21.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "sdot z11.s, z20.b, z2.b[2]\n"
"sdot z15.s, z20.b, z1.b[2]\n"
"sdot z19.s, z20.b, z0.b[2]\n"
"ld1b { z20.b }, p5/Z, [x10, #-3, MUL VL]\n"
"sdot z8.s, z21.b, z2.b[3]\n"
"sdot z12.s, z21.b, z1.b[3]\n"
"sdot z16.s, z21.b, z0.b[3]\n"
- "sdot z9.s, z20.b, z2.b[3]\n"
"ld1b { z21.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "sdot z9.s, z20.b, z2.b[3]\n"
"sdot z13.s, z20.b, z1.b[3]\n"
"sdot z17.s, z20.b, z0.b[3]\n"
"ld1b { z20.b }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -572,18 +572,18 @@ void sve_hybrid_s8s32_dot_6x4VL (
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z21.b }, p5/Z, [x10]\n"
+ "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z21.b }, p5/Z, [x10]\n"
"sdot z8.s, z21.b, z0.b[0]\n"
"sdot z12.s, z21.b, z1.b[0]\n"
- "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
- "sdot z16.s, z21.b, z2.b[0]\n"
"sdot z9.s, z20.b, z0.b[0]\n"
- "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z20.b, z1.b[0]\n"
+ "sdot z16.s, z21.b, z2.b[0]\n"
+ "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z20.b, z2.b[0]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -596,12 +596,12 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 32f\n"
"ld1b { z21.b }, p5/Z, [x10]\n"
"ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z21.b, z0.b[1]\n"
"sdot z12.s, z21.b, z1.b[1]\n"
"sdot z16.s, z21.b, z2.b[1]\n"
- "sdot z9.s, z20.b, z0.b[1]\n"
"ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "sdot z9.s, z20.b, z0.b[1]\n"
"sdot z13.s, z20.b, z1.b[1]\n"
"sdot z17.s, z20.b, z2.b[1]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -615,12 +615,12 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 32f\n"
"ld1b { z21.b }, p5/Z, [x10]\n"
"ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z21.b, z0.b[2]\n"
"sdot z12.s, z21.b, z1.b[2]\n"
"sdot z16.s, z21.b, z2.b[2]\n"
- "sdot z9.s, z20.b, z0.b[2]\n"
"ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "sdot z9.s, z20.b, z0.b[2]\n"
"sdot z13.s, z20.b, z1.b[2]\n"
"sdot z17.s, z20.b, z2.b[2]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -637,8 +637,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z8.s, z21.b, z0.b[3]\n"
"sdot z12.s, z21.b, z1.b[3]\n"
"sdot z16.s, z21.b, z2.b[3]\n"
- "sdot z9.s, z20.b, z0.b[3]\n"
"ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z20.b, z0.b[3]\n"
"sdot z13.s, z20.b, z1.b[3]\n"
"sdot z17.s, z20.b, z2.b[3]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -655,11 +655,11 @@ void sve_hybrid_s8s32_dot_6x4VL (
"cmp x28, x20\n"
"bne 27b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p4, [x21]\n"
@@ -678,7 +678,7 @@ void sve_hybrid_s8s32_dot_6x4VL (
"34:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"35:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -690,13 +690,13 @@ void sve_hybrid_s8s32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 36f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
@@ -731,8 +731,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov x28, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -757,25 +757,25 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"sdot z8.s, z25.b, z3.b[0]\n"
"sdot z12.s, z25.b, z2.b[0]\n"
+ "sdot z9.s, z24.b, z3.b[0]\n"
+ "sdot z13.s, z24.b, z2.b[0]\n"
"sdot z16.s, z25.b, z1.b[0]\n"
"sdot z20.s, z25.b, z0.b[0]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
- "sdot z9.s, z24.b, z3.b[0]\n"
- "sdot z13.s, z24.b, z2.b[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"sdot z17.s, z24.b, z1.b[0]\n"
"sdot z21.s, z24.b, z0.b[0]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -804,9 +804,9 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z14.s, z25.b, z2.b[1]\n"
"sdot z18.s, z25.b, z1.b[1]\n"
"sdot z22.s, z25.b, z0.b[1]\n"
- "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z11.s, z24.b, z3.b[1]\n"
"sdot z15.s, z24.b, z2.b[1]\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z19.s, z24.b, z1.b[1]\n"
"sdot z23.s, z24.b, z0.b[1]\n"
"ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
@@ -851,20 +851,20 @@ void sve_hybrid_s8s32_dot_6x4VL (
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z25.b, z0.b[0]\n"
"sdot z12.s, z25.b, z1.b[0]\n"
+ "sdot z9.s, z24.b, z0.b[0]\n"
+ "sdot z13.s, z24.b, z1.b[0]\n"
"sdot z16.s, z25.b, z2.b[0]\n"
"sdot z20.s, z25.b, z3.b[0]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sdot z9.s, z24.b, z0.b[0]\n"
- "sdot z13.s, z24.b, z1.b[0]\n"
"sdot z17.s, z24.b, z2.b[0]\n"
"sdot z21.s, z24.b, z3.b[0]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -880,12 +880,12 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 43f\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z25.b, z0.b[1]\n"
"sdot z12.s, z25.b, z1.b[1]\n"
"sdot z16.s, z25.b, z2.b[1]\n"
"sdot z20.s, z25.b, z3.b[1]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z9.s, z24.b, z0.b[1]\n"
"sdot z13.s, z24.b, z1.b[1]\n"
"sdot z17.s, z24.b, z2.b[1]\n"
@@ -903,12 +903,12 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 43f\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z25.b, z0.b[2]\n"
"sdot z12.s, z25.b, z1.b[2]\n"
"sdot z16.s, z25.b, z2.b[2]\n"
"sdot z20.s, z25.b, z3.b[2]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"sdot z9.s, z24.b, z0.b[2]\n"
"sdot z13.s, z24.b, z1.b[2]\n"
"sdot z17.s, z24.b, z2.b[2]\n"
@@ -951,12 +951,12 @@ void sve_hybrid_s8s32_dot_6x4VL (
"cmp x28, x20\n"
"bne 38b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p4, [x22]\n"
@@ -979,7 +979,7 @@ void sve_hybrid_s8s32_dot_6x4VL (
"45:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"46:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -991,16 +991,16 @@ void sve_hybrid_s8s32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x22]\n"
@@ -1041,8 +1041,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov x28, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1070,29 +1070,29 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z4.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x22]\n"
- "ld1b { z29.b }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"sdot z8.s, z29.b, z4.b[0]\n"
"sdot z12.s, z29.b, z3.b[0]\n"
- "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sdot z9.s, z28.b, z4.b[0]\n"
"sdot z16.s, z29.b, z2.b[0]\n"
"sdot z20.s, z29.b, z1.b[0]\n"
- "add x25, x25, #0x10\n"
"sdot z24.s, z29.b, z0.b[0]\n"
- "sdot z9.s, z28.b, z4.b[0]\n"
- "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
"sdot z13.s, z28.b, z3.b[0]\n"
+ "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z17.s, z28.b, z2.b[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"sdot z21.s, z28.b, z1.b[0]\n"
"sdot z25.s, z28.b, z0.b[0]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1101,8 +1101,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z18.s, z29.b, z2.b[0]\n"
"sdot z22.s, z29.b, z1.b[0]\n"
"sdot z26.s, z29.b, z0.b[0]\n"
- "sdot z11.s, z28.b, z4.b[0]\n"
"ld1b { z29.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "sdot z11.s, z28.b, z4.b[0]\n"
"sdot z15.s, z28.b, z3.b[0]\n"
"sdot z19.s, z28.b, z2.b[0]\n"
"sdot z23.s, z28.b, z1.b[0]\n"
@@ -1113,8 +1113,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z16.s, z29.b, z2.b[1]\n"
"sdot z20.s, z29.b, z1.b[1]\n"
"sdot z24.s, z29.b, z0.b[1]\n"
- "sdot z9.s, z28.b, z4.b[1]\n"
"ld1b { z29.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "sdot z9.s, z28.b, z4.b[1]\n"
"sdot z13.s, z28.b, z3.b[1]\n"
"sdot z17.s, z28.b, z2.b[1]\n"
"sdot z21.s, z28.b, z1.b[1]\n"
@@ -1127,8 +1127,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z22.s, z29.b, z1.b[1]\n"
"sdot z26.s, z29.b, z0.b[1]\n"
"sdot z11.s, z28.b, z4.b[1]\n"
- "ld1b { z29.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z15.s, z28.b, z3.b[1]\n"
+ "ld1b { z29.b }, p5/Z, [x10, #-8, MUL VL]\n"
"sdot z19.s, z28.b, z2.b[1]\n"
"sdot z23.s, z28.b, z1.b[1]\n"
"sdot z27.s, z28.b, z0.b[1]\n"
@@ -1138,8 +1138,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z16.s, z29.b, z2.b[2]\n"
"sdot z20.s, z29.b, z1.b[2]\n"
"sdot z24.s, z29.b, z0.b[2]\n"
- "sdot z9.s, z28.b, z4.b[2]\n"
"ld1b { z29.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "sdot z9.s, z28.b, z4.b[2]\n"
"sdot z13.s, z28.b, z3.b[2]\n"
"sdot z17.s, z28.b, z2.b[2]\n"
"sdot z21.s, z28.b, z1.b[2]\n"
@@ -1150,8 +1150,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z18.s, z29.b, z2.b[2]\n"
"sdot z22.s, z29.b, z1.b[2]\n"
"sdot z26.s, z29.b, z0.b[2]\n"
- "sdot z11.s, z28.b, z4.b[2]\n"
"ld1b { z29.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "sdot z11.s, z28.b, z4.b[2]\n"
"sdot z15.s, z28.b, z3.b[2]\n"
"sdot z19.s, z28.b, z2.b[2]\n"
"sdot z23.s, z28.b, z1.b[2]\n"
@@ -1162,8 +1162,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z16.s, z29.b, z2.b[3]\n"
"sdot z20.s, z29.b, z1.b[3]\n"
"sdot z24.s, z29.b, z0.b[3]\n"
- "sdot z9.s, z28.b, z4.b[3]\n"
"ld1b { z29.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "sdot z9.s, z28.b, z4.b[3]\n"
"sdot z13.s, z28.b, z3.b[3]\n"
"sdot z17.s, z28.b, z2.b[3]\n"
"sdot z21.s, z28.b, z1.b[3]\n"
@@ -1182,23 +1182,23 @@ void sve_hybrid_s8s32_dot_6x4VL (
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
"ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z29.b }, p5/Z, [x10]\n"
"sdot z8.s, z29.b, z0.b[0]\n"
"sdot z12.s, z29.b, z1.b[0]\n"
- "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sdot z9.s, z28.b, z0.b[0]\n"
+ "sdot z13.s, z28.b, z1.b[0]\n"
"sdot z16.s, z29.b, z2.b[0]\n"
"sdot z20.s, z29.b, z3.b[0]\n"
"sdot z24.s, z29.b, z4.b[0]\n"
- "sdot z9.s, z28.b, z0.b[0]\n"
- "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sdot z13.s, z28.b, z1.b[0]\n"
"sdot z17.s, z28.b, z2.b[0]\n"
+ "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z21.s, z28.b, z3.b[0]\n"
"sdot z25.s, z28.b, z4.b[0]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1216,21 +1216,21 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 54f\n"
"ld1b { z29.b }, p5/Z, [x10]\n"
"ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z29.b, z0.b[1]\n"
"sdot z12.s, z29.b, z1.b[1]\n"
"sdot z16.s, z29.b, z2.b[1]\n"
"sdot z20.s, z29.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z29.b, z4.b[1]\n"
- "sdot z9.s, z28.b, z0.b[1]\n"
"ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z28.b, z0.b[1]\n"
"sdot z13.s, z28.b, z1.b[1]\n"
"sdot z17.s, z28.b, z2.b[1]\n"
"sdot z21.s, z28.b, z3.b[1]\n"
"sdot z25.s, z28.b, z4.b[1]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"sdot z10.s, z29.b, z0.b[1]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z29.b, z1.b[1]\n"
"sdot z18.s, z29.b, z2.b[1]\n"
"sdot z22.s, z29.b, z3.b[1]\n"
@@ -1243,21 +1243,21 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 54f\n"
"ld1b { z29.b }, p5/Z, [x10]\n"
"ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z29.b, z0.b[2]\n"
"sdot z12.s, z29.b, z1.b[2]\n"
"sdot z16.s, z29.b, z2.b[2]\n"
"sdot z20.s, z29.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z29.b, z4.b[2]\n"
- "sdot z9.s, z28.b, z0.b[2]\n"
"ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z28.b, z0.b[2]\n"
"sdot z13.s, z28.b, z1.b[2]\n"
"sdot z17.s, z28.b, z2.b[2]\n"
"sdot z21.s, z28.b, z3.b[2]\n"
"sdot z25.s, z28.b, z4.b[2]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"sdot z10.s, z29.b, z0.b[2]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z29.b, z1.b[2]\n"
"sdot z18.s, z29.b, z2.b[2]\n"
"sdot z22.s, z29.b, z3.b[2]\n"
@@ -1275,8 +1275,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"sdot z16.s, z29.b, z2.b[3]\n"
"sdot z20.s, z29.b, z3.b[3]\n"
"sdot z24.s, z29.b, z4.b[3]\n"
- "sdot z9.s, z28.b, z0.b[3]\n"
"ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "sdot z9.s, z28.b, z0.b[3]\n"
"sdot z13.s, z28.b, z1.b[3]\n"
"sdot z17.s, z28.b, z2.b[3]\n"
"sdot z21.s, z28.b, z3.b[3]\n"
@@ -1299,15 +1299,15 @@ void sve_hybrid_s8s32_dot_6x4VL (
"cmp x28, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z12.s }, p4, [x23]\n"
"st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
"st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
@@ -1331,11 +1331,12 @@ void sve_hybrid_s8s32_dot_6x4VL (
"b 68f\n"
"56:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"57:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1347,17 +1348,17 @@ void sve_hybrid_s8s32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 58f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x23]\n"
@@ -1406,8 +1407,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"mov x28, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1438,29 +1439,29 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p5/Z, [x10]\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z7.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z6.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z5.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z4.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
"ld1rqb { z2.b }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1b { z1.b }, p5/Z, [x10]\n"
- "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"sdot z8.s, z1.b, z7.b[0]\n"
"sdot z12.s, z1.b, z6.b[0]\n"
+ "add x21, x21, #0x10\n"
"sdot z16.s, z1.b, z5.b[0]\n"
"sdot z20.s, z1.b, z4.b[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"sdot z24.s, z1.b, z3.b[0]\n"
"sdot z28.s, z1.b, z2.b[0]\n"
"ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
"sdot z9.s, z0.b, z7.b[0]\n"
"sdot z13.s, z0.b, z6.b[0]\n"
"sdot z17.s, z0.b, z5.b[0]\n"
@@ -1568,24 +1569,24 @@ void sve_hybrid_s8s32_dot_6x4VL (
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z7.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
"ld1rqb { z4.b }, p0/Z, [x22]\n"
"ld1rqb { z5.b }, p0/Z, [x21]\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
"sdot z8.s, z7.b, z0.b[0]\n"
"sdot z12.s, z7.b, z1.b[0]\n"
+ "sdot z9.s, z6.b, z0.b[0]\n"
+ "sdot z13.s, z6.b, z1.b[0]\n"
"sdot z16.s, z7.b, z2.b[0]\n"
"sdot z20.s, z7.b, z3.b[0]\n"
"sdot z24.s, z7.b, z4.b[0]\n"
"sdot z28.s, z7.b, z5.b[0]\n"
"ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sdot z9.s, z6.b, z0.b[0]\n"
- "sdot z13.s, z6.b, z1.b[0]\n"
"sdot z17.s, z6.b, z2.b[0]\n"
"sdot z21.s, z6.b, z3.b[0]\n"
"sdot z25.s, z6.b, z4.b[0]\n"
@@ -1607,23 +1608,23 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 65f\n"
"ld1b { z7.b }, p5/Z, [x10]\n"
"ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z7.b, z0.b[1]\n"
"sdot z12.s, z7.b, z1.b[1]\n"
"sdot z16.s, z7.b, z2.b[1]\n"
"sdot z20.s, z7.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z7.b, z4.b[1]\n"
"sdot z28.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z6.b, z1.b[1]\n"
"sdot z17.s, z6.b, z2.b[1]\n"
"sdot z21.s, z6.b, z3.b[1]\n"
"sdot z25.s, z6.b, z4.b[1]\n"
"sdot z29.s, z6.b, z5.b[1]\n"
"ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"sdot z10.s, z7.b, z0.b[1]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z7.b, z1.b[1]\n"
"sdot z18.s, z7.b, z2.b[1]\n"
"sdot z22.s, z7.b, z3.b[1]\n"
@@ -1638,23 +1639,23 @@ void sve_hybrid_s8s32_dot_6x4VL (
"ble 65f\n"
"ld1b { z7.b }, p5/Z, [x10]\n"
"ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"sdot z8.s, z7.b, z0.b[2]\n"
"sdot z12.s, z7.b, z1.b[2]\n"
"sdot z16.s, z7.b, z2.b[2]\n"
"sdot z20.s, z7.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"sdot z24.s, z7.b, z4.b[2]\n"
"sdot z28.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z9.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"sdot z13.s, z6.b, z1.b[2]\n"
"sdot z17.s, z6.b, z2.b[2]\n"
"sdot z21.s, z6.b, z3.b[2]\n"
"sdot z25.s, z6.b, z4.b[2]\n"
"sdot z29.s, z6.b, z5.b[2]\n"
"ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"sdot z10.s, z7.b, z0.b[2]\n"
+ "addvl x10, x10, #4\n"
"sdot z14.s, z7.b, z1.b[2]\n"
"sdot z18.s, z7.b, z2.b[2]\n"
"sdot z22.s, z7.b, z3.b[2]\n"
@@ -1702,17 +1703,17 @@ void sve_hybrid_s8s32_dot_6x4VL (
"cmp x28, x20\n"
"bne 60b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z12.s }, p4, [x24]\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
"st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
"st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
@@ -1748,8 +1749,8 @@ void sve_hybrid_s8s32_dot_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp
index 686295496e..fceaeb119c 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 8, 8> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 8, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp
index f66b6345ea..d257cc69de 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void sve_hybrid_s8s32_mmla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -89,7 +89,7 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"beq 12f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -100,14 +100,14 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"incw x20\n"
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
- "ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z19.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "zip1 z9.d, z18.d, z13.d\n"
- "zip2 z13.d, z18.d, z13.d\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
+ "zip1 z9.d, z19.d, z13.d\n"
+ "zip2 z13.d, z19.d, z13.d\n"
"zip1 z10.d, z17.d, z14.d\n"
"zip2 z14.d, z17.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -126,8 +126,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov x28, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -143,87 +143,87 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z20.b }, p0/Z, [x26]\n"
- "trn1 z18.d, z20.d, z19.d\n"
- "ld1b { z17.b }, p5/Z, [x10]\n"
- "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
- ".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
- ".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z16.b }, p5/Z, [x10]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z19.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "trn1 z18.d, z19.d, z20.d\n"
+ "trn2 z19.d, z19.d, z20.d\n"
+ ".inst 0x45109a48 // smmla z8.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45119a4c // smmla z12.s, z18.b, z17.b\n"
+ "ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45109a49 // smmla z9.s, z18.b, z16.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45149a4d // smmla z13.s, z18.b, z20.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
- ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
+ ".inst 0x45019a4a // smmla z10.s, z18.b, z1.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x45119a88 // smmla z8.s, z20.b, z17.b\n"
- ".inst 0x45109a8c // smmla z12.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z16.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x45119a89 // smmla z9.s, z20.b, z17.b\n"
- ".inst 0x45109a8d // smmla z13.s, z20.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45109a68 // smmla z8.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45119a6c // smmla z12.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45109a69 // smmla z9.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45109a8a // smmla z10.s, z20.b, z16.b\n"
- ".inst 0x45079a8e // smmla z14.s, z20.b, z7.b\n"
+ ".inst 0x45119a6d // smmla z13.s, z19.b, z17.b\n"
+ "ld1b { z3.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45109a6a // smmla z10.s, z19.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45039a6e // smmla z14.s, z19.b, z3.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
- ".inst 0x45119a8b // smmla z11.s, z20.b, z17.b\n"
- ".inst 0x45109a8f // smmla z15.s, z20.b, z16.b\n"
- "add x26, x26, #0x10\n"
+ ".inst 0x45119a6b // smmla z11.s, z19.b, z17.b\n"
+ ".inst 0x45109a6f // smmla z15.s, z19.b, z16.b\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "trn1 z18.d, z1.d, z19.d\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "trn1 z18.d, z1.d, z19.d\n"
".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
- ".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
- ".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
- ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
- "addvl x10, x10, #8\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45119828 // smmla z8.s, z1.b, z17.b\n"
- ".inst 0x4510982c // smmla z12.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4510982c // smmla z12.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45119829 // smmla z9.s, z1.b, z17.b\n"
- ".inst 0x4510982d // smmla z13.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4510982d // smmla z13.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x4511982a // smmla z10.s, z1.b, z17.b\n"
- ".inst 0x4510982e // smmla z14.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4510982e // smmla z14.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4511982b // smmla z11.s, z1.b, z17.b\n"
".inst 0x4510982f // smmla z15.s, z1.b, z16.b\n"
- "addvl x10, x10, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -231,9 +231,9 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"bne 5b\n"
"uzp1 z8.d, z8.d, z12.d\n"
"uzp1 z9.d, z9.d, z13.d\n"
- "st1w { z8.s }, p4, [x9]\n"
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
+ "st1w { z8.s }, p4, [x9]\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
@@ -246,7 +246,7 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"12:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"13:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -258,19 +258,19 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 14f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z18.s }, p4/Z, [x9]\n"
- "ld1w { z2.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
- "zip1 z8.d, z18.d, z12.d\n"
- "zip2 z12.d, z18.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
- "zip1 z9.d, z2.d, z13.d\n"
- "zip2 z13.d, z2.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z8.d, z18.d, z12.d\n"
+ "zip2 z12.d, z18.d, z12.d\n"
+ "zip1 z9.d, z24.d, z13.d\n"
+ "zip2 z13.d, z24.d, z13.d\n"
"zip1 z10.d, z17.d, z14.d\n"
"zip2 z14.d, z17.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -289,8 +289,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov x28, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -309,109 +309,109 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z20.b }, p0/Z, [x26]\n"
- "ld1rqb { z19.b }, p0/Z, [x25]\n"
- "trn1 z18.d, z20.d, z19.d\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z19.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "trn1 z18.d, z19.d, z25.d\n"
+ "trn2 z19.d, z19.d, z25.d\n"
".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
- ".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
- ".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
- ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x45119a88 // smmla z8.s, z20.b, z17.b\n"
- ".inst 0x45109a8c // smmla z12.s, z20.b, z16.b\n"
+ ".inst 0x45119a68 // smmla z8.s, z19.b, z17.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45109a6c // smmla z12.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x45119a89 // smmla z9.s, z20.b, z17.b\n"
- ".inst 0x45109a8d // smmla z13.s, z20.b, z16.b\n"
+ ".inst 0x45119a69 // smmla z9.s, z19.b, z17.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45109a6d // smmla z13.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45119a8a // smmla z10.s, z20.b, z17.b\n"
- ".inst 0x45109a8e // smmla z14.s, z20.b, z16.b\n"
+ ".inst 0x45119a6a // smmla z10.s, z19.b, z17.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45109a6e // smmla z14.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
- ".inst 0x45119a8b // smmla z11.s, z20.b, z17.b\n"
- ".inst 0x45109a8f // smmla z15.s, z20.b, z16.b\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x45119a6b // smmla z11.s, z19.b, z17.b\n"
+ ".inst 0x45109a6f // smmla z15.s, z19.b, z16.b\n"
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
"ld1rqb { z19.b }, p0/Z, [x25]\n"
"trn1 z18.d, z1.d, z19.d\n"
- "ld1b { z17.b }, p5/Z, [x10]\n"
- "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45119a48 // smmla z8.s, z18.b, z17.b\n"
- ".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45109a4c // smmla z12.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x45119a49 // smmla z9.s, z18.b, z17.b\n"
- ".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45109a4d // smmla z13.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45119a4a // smmla z10.s, z18.b, z17.b\n"
- ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45109a4e // smmla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x45119a4b // smmla z11.s, z18.b, z17.b\n"
".inst 0x45109a4f // smmla z15.s, z18.b, z16.b\n"
- "addvl x10, x10, #8\n"
"ble 21f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45119828 // smmla z8.s, z1.b, z17.b\n"
- ".inst 0x4510982c // smmla z12.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4510982c // smmla z12.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45119829 // smmla z9.s, z1.b, z17.b\n"
- ".inst 0x4510982d // smmla z13.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4510982d // smmla z13.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x4511982a // smmla z10.s, z1.b, z17.b\n"
- ".inst 0x4510982e // smmla z14.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4510982e // smmla z14.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x4511982b // smmla z11.s, z1.b, z17.b\n"
".inst 0x4510982f // smmla z15.s, z1.b, z16.b\n"
- "addvl x10, x10, #8\n"
"21:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 16b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
- "uzp1 z16.d, z8.d, z12.d\n"
+ "uzp1 z17.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "uzp1 z17.d, z9.d, z13.d\n"
+ "uzp1 z16.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "st1w { z16.s }, p4, [x9]\n"
- "uzp1 z16.d, z10.d, z14.d\n"
+ "uzp1 z12.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "st1w { z17.s }, p3, [x9, #1, MUL VL]\n"
- "uzp1 z2.d, z11.d, z15.d\n"
+ "add x20, x9, x20, LSL #2\n"
+ "uzp1 z26.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "st1w { z16.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z2.s }, p1, [x9, #3, MUL VL]\n"
+ "st1w { z17.s }, p4, [x9]\n"
+ "st1w { z16.s }, p3, [x9, #1, MUL VL]\n"
+ "st1w { z12.s }, p2, [x9, #2, MUL VL]\n"
+ "st1w { z26.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z8.s }, p4, [x20]\n"
"st1w { z9.s }, p3, [x20, #1, MUL VL]\n"
@@ -425,7 +425,7 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"23:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"24:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -437,28 +437,28 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 25f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p4/Z, [x9]\n"
+ "ld1w { z26.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x21, x9, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x21, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x20]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "zip1 z8.d, z24.d, z12.d\n"
+ "zip2 z12.d, z24.d, z12.d\n"
+ "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z9.d, z26.d, z13.d\n"
+ "zip2 z13.d, z26.d, z13.d\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -489,8 +489,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov x28, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -512,92 +512,92 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z30.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "ld1rqb { z28.b }, p0/Z, [x24]\n"
- "trn1 z27.d, z30.d, z24.d\n"
- "trn2 z30.d, z30.d, z24.d\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
- "trn1 z26.d, z28.d, z29.d\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z27.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z24.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqb { z26.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z6.d, z27.d, z24.d\n"
+ "trn2 z27.d, z27.d, z24.d\n"
+ "trn1 z30.d, z26.d, z29.d\n"
+ "trn2 z26.d, z26.d, z29.d\n"
+ ".inst 0x451998c8 // smmla z8.s, z6.b, z25.b\n"
+ ".inst 0x451c98cc // smmla z12.s, z6.b, z28.b\n"
+ ".inst 0x45199bd0 // smmla z16.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x451c9bd4 // smmla z20.s, z30.b, z28.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x451998c9 // smmla z9.s, z6.b, z25.b\n"
+ ".inst 0x45199bd1 // smmla z17.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x451898cd // smmla z13.s, z6.b, z24.b\n"
+ ".inst 0x45189bd5 // smmla z21.s, z30.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x451998ca // smmla z10.s, z6.b, z25.b\n"
+ ".inst 0x45199bd2 // smmla z18.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x451898ce // smmla z14.s, z6.b, z24.b\n"
+ ".inst 0x45189bd6 // smmla z22.s, z30.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x451998cb // smmla z11.s, z6.b, z25.b\n"
+ ".inst 0x45199bd3 // smmla z19.s, z30.b, z25.b\n"
+ ".inst 0x451898cf // smmla z15.s, z6.b, z24.b\n"
+ ".inst 0x45189bd7 // smmla z23.s, z30.b, z24.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x45199b68 // smmla z8.s, z27.b, z25.b\n"
".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
".inst 0x45189b6c // smmla z12.s, z27.b, z24.b\n"
".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z29.d\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
".inst 0x45189b6e // smmla z14.s, z27.b, z24.b\n"
".inst 0x45189b56 // smmla z22.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
".inst 0x45199b6b // smmla z11.s, z27.b, z25.b\n"
".inst 0x45199b53 // smmla z19.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x45189b6f // smmla z15.s, z27.b, z24.b\n"
".inst 0x45189b57 // smmla z23.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x45199bc8 // smmla z8.s, z30.b, z25.b\n"
- ".inst 0x45199b90 // smmla z16.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x45189bcc // smmla z12.s, z30.b, z24.b\n"
- ".inst 0x45189b94 // smmla z20.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x45199bc9 // smmla z9.s, z30.b, z25.b\n"
- ".inst 0x45199b91 // smmla z17.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x45189bcd // smmla z13.s, z30.b, z24.b\n"
- ".inst 0x45189b95 // smmla z21.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45199bca // smmla z10.s, z30.b, z25.b\n"
- ".inst 0x45199b92 // smmla z18.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x45189bce // smmla z14.s, z30.b, z24.b\n"
- ".inst 0x45189b96 // smmla z22.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x45199bcb // smmla z11.s, z30.b, z25.b\n"
- ".inst 0x45199b93 // smmla z19.s, z28.b, z25.b\n"
- ".inst 0x45189bcf // smmla z15.s, z30.b, z24.b\n"
- ".inst 0x45189b97 // smmla z23.s, z28.b, z24.b\n"
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
"ld1rqb { z24.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
"trn1 z27.d, z1.d, z24.d\n"
"trn2 z1.d, z1.d, z24.d\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "trn1 z26.d, z3.d, z28.d\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "trn1 z26.d, z3.d, z29.d\n"
".inst 0x45199b68 // smmla z8.s, z27.b, z25.b\n"
+ ".inst 0x451c9b6c // smmla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z29.d\n"
".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
- ".inst 0x45189b6c // smmla z12.s, z27.b, z24.b\n"
- ".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x451c9b54 // smmla z20.s, z26.b, z28.b\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z28.d\n"
".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
@@ -614,9 +614,9 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45199828 // smmla z8.s, z1.b, z25.b\n"
".inst 0x45199870 // smmla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x4518982c // smmla z12.s, z1.b, z24.b\n"
".inst 0x45189874 // smmla z20.s, z3.b, z24.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45199829 // smmla z9.s, z1.b, z25.b\n"
".inst 0x45199871 // smmla z17.s, z3.b, z25.b\n"
@@ -641,26 +641,26 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 27b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "uzp1 z25.d, z8.d, z12.d\n"
+ "uzp1 z27.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "uzp1 z24.d, z9.d, z13.d\n"
- "st1w { z25.s }, p4, [x9]\n"
+ "uzp1 z26.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z25.d, z10.d, z14.d\n"
- "st1w { z24.s }, p3, [x9, #1, MUL VL]\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 z24.d, z11.d, z15.d\n"
- "st1w { z25.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z27.s }, p4, [x9]\n"
"uzp1 z16.d, z16.d, z20.d\n"
- "st1w { z24.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp1 z17.d, z17.d, z21.d\n"
+ "st1w { z26.s }, p3, [x9, #1, MUL VL]\n"
"uzp1 z18.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x21]\n"
"uzp1 z19.d, z19.d, z23.d\n"
+ "st1w { z25.s }, p2, [x9, #2, MUL VL]\n"
+ "st1w { z24.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x21]\n"
"st1w { z9.s }, p3, [x21, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x21, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x21, #3, MUL VL]\n"
@@ -676,7 +676,7 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"34:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"35:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -688,37 +688,37 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 36f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x22, x9, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x22, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x21]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x20]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -745,8 +745,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov x28, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -771,114 +771,114 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z30.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "trn1 z29.d, z30.d, z24.d\n"
+ "ld1b { z31.b }, p5/Z, [x10]\n"
+ "ld1b { z30.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z29.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z28.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn2 z30.d, z30.d, z24.d\n"
- "trn1 z26.d, z28.d, z27.d\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45199ba8 // smmla z8.s, z29.b, z25.b\n"
- ".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
- ".inst 0x45189bac // smmla z12.s, z29.b, z24.b\n"
- ".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z27.d, z29.d, z25.d\n"
+ "trn2 z29.d, z29.d, z25.d\n"
+ "trn1 z26.d, z28.d, z24.d\n"
+ "trn2 z28.d, z28.d, z24.d\n"
+ ".inst 0x451f9b68 // smmla z8.s, z27.b, z31.b\n"
+ ".inst 0x451e9b6c // smmla z12.s, z27.b, z30.b\n"
+ ".inst 0x451f9b50 // smmla z16.s, z26.b, z31.b\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x451e9b54 // smmla z20.s, z26.b, z30.b\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45199ba9 // smmla z9.s, z29.b, z25.b\n"
+ ".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z27.d\n"
- ".inst 0x45189bad // smmla z13.s, z29.b, z24.b\n"
+ ".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- ".inst 0x45199baa // smmla z10.s, z29.b, z25.b\n"
+ ".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
- ".inst 0x45189bae // smmla z14.s, z29.b, z24.b\n"
+ ".inst 0x45189b6e // smmla z14.s, z27.b, z24.b\n"
".inst 0x45189b56 // smmla z22.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x45199bab // smmla z11.s, z29.b, z25.b\n"
+ ".inst 0x45199b6b // smmla z11.s, z27.b, z25.b\n"
".inst 0x45199b53 // smmla z19.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
- ".inst 0x45189baf // smmla z15.s, z29.b, z24.b\n"
+ ".inst 0x45189b6f // smmla z15.s, z27.b, z24.b\n"
".inst 0x45189b57 // smmla z23.s, z26.b, z24.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x45199bc8 // smmla z8.s, z30.b, z25.b\n"
+ ".inst 0x45199ba8 // smmla z8.s, z29.b, z25.b\n"
".inst 0x45199b90 // smmla z16.s, z28.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x45189bcc // smmla z12.s, z30.b, z24.b\n"
+ ".inst 0x45189bac // smmla z12.s, z29.b, z24.b\n"
".inst 0x45189b94 // smmla z20.s, z28.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x45199bc9 // smmla z9.s, z30.b, z25.b\n"
+ ".inst 0x45199ba9 // smmla z9.s, z29.b, z25.b\n"
".inst 0x45199b91 // smmla z17.s, z28.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x45189bcd // smmla z13.s, z30.b, z24.b\n"
+ ".inst 0x45189bad // smmla z13.s, z29.b, z24.b\n"
".inst 0x45189b95 // smmla z21.s, z28.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45199bca // smmla z10.s, z30.b, z25.b\n"
+ ".inst 0x45199baa // smmla z10.s, z29.b, z25.b\n"
".inst 0x45199b92 // smmla z18.s, z28.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x45189bce // smmla z14.s, z30.b, z24.b\n"
+ ".inst 0x45189bae // smmla z14.s, z29.b, z24.b\n"
".inst 0x45189b96 // smmla z22.s, z28.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x45199bcb // smmla z11.s, z30.b, z25.b\n"
+ ".inst 0x45199bab // smmla z11.s, z29.b, z25.b\n"
".inst 0x45199b93 // smmla z19.s, z28.b, z25.b\n"
- ".inst 0x45189bcf // smmla z15.s, z30.b, z24.b\n"
+ ".inst 0x45189baf // smmla z15.s, z29.b, z24.b\n"
".inst 0x45189b97 // smmla z23.s, z28.b, z24.b\n"
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "trn1 z28.d, z1.d, z24.d\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z24.d\n"
- "trn1 z26.d, z3.d, z27.d\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45199b88 // smmla z8.s, z28.b, z25.b\n"
- ".inst 0x45199b50 // smmla z16.s, z26.b, z25.b\n"
- ".inst 0x45189b8c // smmla z12.s, z28.b, z24.b\n"
- ".inst 0x45189b54 // smmla z20.s, z26.b, z24.b\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "trn1 z27.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ "trn1 z26.d, z3.d, z24.d\n"
+ ".inst 0x451d9b68 // smmla z8.s, z27.b, z29.b\n"
+ ".inst 0x451c9b6c // smmla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z24.d\n"
+ ".inst 0x451d9b50 // smmla z16.s, z26.b, z29.b\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x451c9b54 // smmla z20.s, z26.b, z28.b\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45199b89 // smmla z9.s, z28.b, z25.b\n"
+ ".inst 0x45199b69 // smmla z9.s, z27.b, z25.b\n"
".inst 0x45199b51 // smmla z17.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- ".inst 0x45189b8d // smmla z13.s, z28.b, z24.b\n"
+ ".inst 0x45189b6d // smmla z13.s, z27.b, z24.b\n"
".inst 0x45189b55 // smmla z21.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z27.d\n"
- ".inst 0x45199b8a // smmla z10.s, z28.b, z25.b\n"
+ ".inst 0x45199b6a // smmla z10.s, z27.b, z25.b\n"
".inst 0x45199b52 // smmla z18.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x45189b8e // smmla z14.s, z28.b, z24.b\n"
+ ".inst 0x45189b6e // smmla z14.s, z27.b, z24.b\n"
".inst 0x45189b56 // smmla z22.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
- ".inst 0x45199b8b // smmla z11.s, z28.b, z25.b\n"
+ ".inst 0x45199b6b // smmla z11.s, z27.b, z25.b\n"
".inst 0x45199b53 // smmla z19.s, z26.b, z25.b\n"
- ".inst 0x45189b8f // smmla z15.s, z28.b, z24.b\n"
+ ".inst 0x45189b6f // smmla z15.s, z27.b, z24.b\n"
".inst 0x45189b57 // smmla z23.s, z26.b, z24.b\n"
"ble 43f\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45199828 // smmla z8.s, z1.b, z25.b\n"
".inst 0x45199870 // smmla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x4518982c // smmla z12.s, z1.b, z24.b\n"
".inst 0x45189874 // smmla z20.s, z3.b, z24.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45199829 // smmla z9.s, z1.b, z25.b\n"
".inst 0x45199871 // smmla z17.s, z3.b, z25.b\n"
@@ -903,33 +903,33 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 38b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp1 z25.d, z8.d, z12.d\n"
- "add x20, x21, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z24.d, z9.d, z13.d\n"
- "st1w { z25.s }, p4, [x9]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "uzp1 z25.d, z10.d, z14.d\n"
- "st1w { z24.s }, p3, [x9, #1, MUL VL]\n"
+ "uzp1 z27.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "uzp1 z24.d, z11.d, z15.d\n"
- "st1w { z25.s }, p2, [x9, #2, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "uzp1 z26.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z25.s }, p4, [x9]\n"
"uzp1 z25.d, z16.d, z20.d\n"
- "st1w { z24.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z24.s }, p3, [x9, #1, MUL VL]\n"
"uzp1 z24.d, z17.d, z21.d\n"
- "st1w { z8.s }, p4, [x22]\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z27.s }, p2, [x9, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z9.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z26.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"uzp1 z20.d, z19.d, z23.d\n"
- "st1w { z10.s }, p2, [x22, #2, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x22]\n"
+ "st1w { z9.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x22, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x22, #3, MUL VL]\n"
"st1w { z25.s }, p4, [x21]\n"
"st1w { z24.s }, p3, [x21, #1, MUL VL]\n"
@@ -947,7 +947,7 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"45:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"46:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -959,46 +959,46 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x23, x9, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x22]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x21]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z17.d, z18.d, z21.d\n"
- "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x20]\n"
- "zip1 z18.d, z19.d, z22.d\n"
- "zip2 z22.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1037,8 +1037,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov x28, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1066,102 +1066,103 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p5/Z, [x10]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z6.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z7.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z5.d, z6.d, z1.d\n"
- "trn2 z6.d, z6.d, z1.d\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
"trn1 z3.d, z7.d, z2.d\n"
"trn2 z7.d, z7.d, z2.d\n"
- "ld1b { z1.b }, p5/Z, [x10]\n"
- "trn1 z2.d, z4.d, z0.d\n"
- "trn2 z4.d, z4.d, z0.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
"ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x450198a8 // smmla z8.s, z5.b, z1.b\n"
+ ".inst 0x45019888 // smmla z8.s, z4.b, z1.b\n"
".inst 0x45019870 // smmla z16.s, z3.b, z1.b\n"
".inst 0x45019858 // smmla z24.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- ".inst 0x450098ac // smmla z12.s, z5.b, z0.b\n"
+ ".inst 0x4500988c // smmla z12.s, z4.b, z0.b\n"
".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x4500985c // smmla z28.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x450198a9 // smmla z9.s, z5.b, z1.b\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x45019889 // smmla z9.s, z4.b, z1.b\n"
".inst 0x45019871 // smmla z17.s, z3.b, z1.b\n"
".inst 0x45019859 // smmla z25.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x450098ad // smmla z13.s, z5.b, z0.b\n"
+ ".inst 0x4500988d // smmla z13.s, z4.b, z0.b\n"
".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x4500985d // smmla z29.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x450198aa // smmla z10.s, z5.b, z1.b\n"
+ ".inst 0x4501988a // smmla z10.s, z4.b, z1.b\n"
".inst 0x45019872 // smmla z18.s, z3.b, z1.b\n"
".inst 0x4501985a // smmla z26.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x450098ae // smmla z14.s, z5.b, z0.b\n"
+ ".inst 0x4500988e // smmla z14.s, z4.b, z0.b\n"
".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x4500985e // smmla z30.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x450198ab // smmla z11.s, z5.b, z1.b\n"
+ ".inst 0x4501988b // smmla z11.s, z4.b, z1.b\n"
".inst 0x45019873 // smmla z19.s, z3.b, z1.b\n"
".inst 0x4501985b // smmla z27.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
- ".inst 0x450098af // smmla z15.s, z5.b, z0.b\n"
+ ".inst 0x4500988f // smmla z15.s, z4.b, z0.b\n"
".inst 0x45009877 // smmla z23.s, z3.b, z0.b\n"
".inst 0x4500985f // smmla z31.s, z2.b, z0.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z0.b }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x450198c8 // smmla z8.s, z6.b, z1.b\n"
".inst 0x450198f0 // smmla z16.s, z7.b, z1.b\n"
- ".inst 0x45019898 // smmla z24.s, z4.b, z1.b\n"
+ ".inst 0x450198b8 // smmla z24.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-6, MUL VL]\n"
".inst 0x450098cc // smmla z12.s, z6.b, z0.b\n"
".inst 0x450098f4 // smmla z20.s, z7.b, z0.b\n"
- ".inst 0x4500989c // smmla z28.s, z4.b, z0.b\n"
+ ".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x450198c9 // smmla z9.s, z6.b, z1.b\n"
".inst 0x450198f1 // smmla z17.s, z7.b, z1.b\n"
- ".inst 0x45019899 // smmla z25.s, z4.b, z1.b\n"
+ ".inst 0x450198b9 // smmla z25.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-4, MUL VL]\n"
".inst 0x450098cd // smmla z13.s, z6.b, z0.b\n"
".inst 0x450098f5 // smmla z21.s, z7.b, z0.b\n"
- ".inst 0x4500989d // smmla z29.s, z4.b, z0.b\n"
+ ".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x450198ca // smmla z10.s, z6.b, z1.b\n"
".inst 0x450198f2 // smmla z18.s, z7.b, z1.b\n"
- ".inst 0x4501989a // smmla z26.s, z4.b, z1.b\n"
+ ".inst 0x450198ba // smmla z26.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-2, MUL VL]\n"
".inst 0x450098ce // smmla z14.s, z6.b, z0.b\n"
".inst 0x450098f6 // smmla z22.s, z7.b, z0.b\n"
- ".inst 0x4500989e // smmla z30.s, z4.b, z0.b\n"
+ ".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-1, MUL VL]\n"
".inst 0x450198cb // smmla z11.s, z6.b, z1.b\n"
".inst 0x450198f3 // smmla z19.s, z7.b, z1.b\n"
- ".inst 0x4501989b // smmla z27.s, z4.b, z1.b\n"
+ ".inst 0x450198bb // smmla z27.s, z5.b, z1.b\n"
".inst 0x450098cf // smmla z15.s, z6.b, z0.b\n"
".inst 0x450098f7 // smmla z23.s, z7.b, z0.b\n"
- ".inst 0x4500989f // smmla z31.s, z4.b, z0.b\n"
+ ".inst 0x450098bf // smmla z31.s, z5.b, z0.b\n"
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z4.b }, p0/Z, [x25]\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z7.d, z1.d, z4.d\n"
- "trn2 z1.d, z1.d, z4.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
"ld1rqb { z5.b }, p0/Z, [x22]\n"
- "trn1 z6.d, z3.d, z2.d\n"
- "trn2 z3.d, z3.d, z2.d\n"
- "ld1b { z2.b }, p5/Z, [x10]\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
"ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
@@ -1169,7 +1170,6 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x450298d0 // smmla z16.s, z6.b, z2.b\n"
".inst 0x45029898 // smmla z24.s, z4.b, z2.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
".inst 0x450098ec // smmla z12.s, z7.b, z0.b\n"
".inst 0x450098d4 // smmla z20.s, z6.b, z0.b\n"
".inst 0x4500989c // smmla z28.s, z4.b, z0.b\n"
@@ -1190,8 +1190,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x450098d6 // smmla z22.s, z6.b, z0.b\n"
".inst 0x4500989e // smmla z30.s, z4.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x450298eb // smmla z11.s, z7.b, z2.b\n"
"addvl x10, x10, #8\n"
+ ".inst 0x450298eb // smmla z11.s, z7.b, z2.b\n"
".inst 0x450298d3 // smmla z19.s, z6.b, z2.b\n"
".inst 0x4502989b // smmla z27.s, z4.b, z2.b\n"
".inst 0x450098ef // smmla z15.s, z7.b, z0.b\n"
@@ -1203,24 +1203,24 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x45029828 // smmla z8.s, z1.b, z2.b\n"
".inst 0x45029870 // smmla z16.s, z3.b, z2.b\n"
".inst 0x450298b8 // smmla z24.s, z5.b, z2.b\n"
- ".inst 0x4500982c // smmla z12.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4500982c // smmla z12.s, z1.b, z0.b\n"
".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45029829 // smmla z9.s, z1.b, z2.b\n"
".inst 0x45029871 // smmla z17.s, z3.b, z2.b\n"
".inst 0x450298b9 // smmla z25.s, z5.b, z2.b\n"
- ".inst 0x4500982d // smmla z13.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4500982d // smmla z13.s, z1.b, z0.b\n"
".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x4502982a // smmla z10.s, z1.b, z2.b\n"
".inst 0x45029872 // smmla z18.s, z3.b, z2.b\n"
".inst 0x450298ba // smmla z26.s, z5.b, z2.b\n"
- ".inst 0x4500982e // smmla z14.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4500982e // smmla z14.s, z1.b, z0.b\n"
".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1237,39 +1237,39 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "uzp1 z2.d, z8.d, z12.d\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
+ "uzp1 z1.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "uzp1 z1.d, z9.d, z13.d\n"
+ "uzp1 z0.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "uzp1 z0.d, z10.d, z14.d\n"
- "st1w { z2.s }, p4, [x9]\n"
+ "uzp1 z3.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 z2.d, z11.d, z15.d\n"
- "st1w { z1.s }, p3, [x9, #1, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z1.s }, p4, [x9]\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 z1.d, z16.d, z20.d\n"
- "st1w { z0.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z0.s }, p3, [x9, #1, MUL VL]\n"
"uzp1 z0.d, z17.d, z21.d\n"
- "st1w { z2.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z3.s }, p2, [x9, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x23]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z2.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"uzp1 z20.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
"uzp1 z24.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z25.d, z25.d, z29.d\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp1 z26.d, z26.d, z30.d\n"
- "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp1 z27.d, z27.d, z31.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"st1w { z1.s }, p4, [x22]\n"
"st1w { z0.s }, p3, [x22, #1, MUL VL]\n"
"st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
@@ -1289,11 +1289,12 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"b 68f\n"
"56:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"57:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1305,54 +1306,54 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 58f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "ld1w { z17.s }, p4/Z, [x9]\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
- "zip1 z8.d, z17.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "zip2 z12.d, z17.d, z12.d\n"
- "zip1 z9.d, z18.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x23]\n"
- "zip2 z13.d, z18.d, z13.d\n"
- "zip1 z10.d, z20.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip2 z14.d, z20.d, z14.d\n"
- "zip1 z11.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x22]\n"
- "zip2 z15.d, z16.d, z15.d\n"
- "zip1 z16.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip2 z20.d, z17.d, z20.d\n"
- "zip1 z17.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x21]\n"
- "zip2 z21.d, z18.d, z21.d\n"
- "zip1 z18.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip2 z22.d, z19.d, z22.d\n"
- "zip1 z19.d, z24.d, z23.d\n"
"ld1w { z0.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z28.s }, p4/Z, [x20]\n"
- "zip2 z23.d, z24.d, z23.d\n"
- "zip1 z24.d, z25.d, z28.d\n"
"ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1388,8 +1389,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"mov x28, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1420,113 +1421,113 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z7.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "trn1 z6.d, z7.d, z0.d\n"
- "ld1rqb { z5.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "trn2 z7.d, z7.d, z0.d\n"
- "trn1 z4.d, z5.d, z1.d\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z0.b }, p0/Z, [x21]\n"
- "trn2 z5.d, z5.d, z1.d\n"
- "trn1 z2.d, z3.d, z0.d\n"
- "trn2 z3.d, z3.d, z0.d\n"
"ld1b { z1.b }, p5/Z, [x10]\n"
- "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x450198c8 // smmla z8.s, z6.b, z1.b\n"
- ".inst 0x45019890 // smmla z16.s, z4.b, z1.b\n"
- ".inst 0x45019858 // smmla z24.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
"sub x27, x27, #0x10\n"
- ".inst 0x450098cc // smmla z12.s, z6.b, z0.b\n"
- ".inst 0x45009894 // smmla z20.s, z4.b, z0.b\n"
"cmp x27, #0x10\n"
+ "ld1rqb { z6.b }, p0/Z, [x26]\n"
"add x26, x26, #0x10\n"
- ".inst 0x4500985c // smmla z28.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x450198c9 // smmla z9.s, z6.b, z1.b\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
"add x25, x25, #0x10\n"
- ".inst 0x45019891 // smmla z17.s, z4.b, z1.b\n"
- ".inst 0x45019859 // smmla z25.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1rqb { z7.b }, p0/Z, [x24]\n"
"add x24, x24, #0x10\n"
- ".inst 0x450098cd // smmla z13.s, z6.b, z0.b\n"
- ".inst 0x45009895 // smmla z21.s, z4.b, z0.b\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z0.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z3.d, z7.d, z2.d\n"
+ "trn2 z7.d, z7.d, z2.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45019888 // smmla z8.s, z4.b, z1.b\n"
+ ".inst 0x45019870 // smmla z16.s, z3.b, z1.b\n"
+ ".inst 0x45019858 // smmla z24.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4500988c // smmla z12.s, z4.b, z0.b\n"
+ ".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
+ ".inst 0x4500985c // smmla z28.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45019889 // smmla z9.s, z4.b, z1.b\n"
+ ".inst 0x45019871 // smmla z17.s, z3.b, z1.b\n"
+ ".inst 0x45019859 // smmla z25.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4500988d // smmla z13.s, z4.b, z0.b\n"
+ ".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
".inst 0x4500985d // smmla z29.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x450198ca // smmla z10.s, z6.b, z1.b\n"
- "add x21, x21, #0x10\n"
- ".inst 0x45019892 // smmla z18.s, z4.b, z1.b\n"
+ ".inst 0x4501988a // smmla z10.s, z4.b, z1.b\n"
+ ".inst 0x45019872 // smmla z18.s, z3.b, z1.b\n"
".inst 0x4501985a // smmla z26.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x450098ce // smmla z14.s, z6.b, z0.b\n"
- ".inst 0x45009896 // smmla z22.s, z4.b, z0.b\n"
+ ".inst 0x4500988e // smmla z14.s, z4.b, z0.b\n"
+ ".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x4500985e // smmla z30.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x450198cb // smmla z11.s, z6.b, z1.b\n"
- ".inst 0x45019893 // smmla z19.s, z4.b, z1.b\n"
+ ".inst 0x4501988b // smmla z11.s, z4.b, z1.b\n"
+ ".inst 0x45019873 // smmla z19.s, z3.b, z1.b\n"
".inst 0x4501985b // smmla z27.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
- ".inst 0x450098cf // smmla z15.s, z6.b, z0.b\n"
- ".inst 0x45009897 // smmla z23.s, z4.b, z0.b\n"
+ ".inst 0x4500988f // smmla z15.s, z4.b, z0.b\n"
+ ".inst 0x45009877 // smmla z23.s, z3.b, z0.b\n"
".inst 0x4500985f // smmla z31.s, z2.b, z0.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z0.b }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x450198e8 // smmla z8.s, z7.b, z1.b\n"
- ".inst 0x450198b0 // smmla z16.s, z5.b, z1.b\n"
- ".inst 0x45019878 // smmla z24.s, z3.b, z1.b\n"
+ ".inst 0x450198c8 // smmla z8.s, z6.b, z1.b\n"
+ ".inst 0x450198f0 // smmla z16.s, z7.b, z1.b\n"
+ ".inst 0x450198b8 // smmla z24.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-6, MUL VL]\n"
- ".inst 0x450098ec // smmla z12.s, z7.b, z0.b\n"
- ".inst 0x450098b4 // smmla z20.s, z5.b, z0.b\n"
- ".inst 0x4500987c // smmla z28.s, z3.b, z0.b\n"
+ ".inst 0x450098cc // smmla z12.s, z6.b, z0.b\n"
+ ".inst 0x450098f4 // smmla z20.s, z7.b, z0.b\n"
+ ".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x450198e9 // smmla z9.s, z7.b, z1.b\n"
- ".inst 0x450198b1 // smmla z17.s, z5.b, z1.b\n"
- ".inst 0x45019879 // smmla z25.s, z3.b, z1.b\n"
+ ".inst 0x450198c9 // smmla z9.s, z6.b, z1.b\n"
+ ".inst 0x450198f1 // smmla z17.s, z7.b, z1.b\n"
+ ".inst 0x450198b9 // smmla z25.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x450098ed // smmla z13.s, z7.b, z0.b\n"
- ".inst 0x450098b5 // smmla z21.s, z5.b, z0.b\n"
- ".inst 0x4500987d // smmla z29.s, z3.b, z0.b\n"
+ ".inst 0x450098cd // smmla z13.s, z6.b, z0.b\n"
+ ".inst 0x450098f5 // smmla z21.s, z7.b, z0.b\n"
+ ".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x450198ea // smmla z10.s, z7.b, z1.b\n"
- ".inst 0x450198b2 // smmla z18.s, z5.b, z1.b\n"
- ".inst 0x4501987a // smmla z26.s, z3.b, z1.b\n"
+ ".inst 0x450198ca // smmla z10.s, z6.b, z1.b\n"
+ ".inst 0x450198f2 // smmla z18.s, z7.b, z1.b\n"
+ ".inst 0x450198ba // smmla z26.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x450098ee // smmla z14.s, z7.b, z0.b\n"
- ".inst 0x450098b6 // smmla z22.s, z5.b, z0.b\n"
- ".inst 0x4500987e // smmla z30.s, z3.b, z0.b\n"
+ ".inst 0x450098ce // smmla z14.s, z6.b, z0.b\n"
+ ".inst 0x450098f6 // smmla z22.s, z7.b, z0.b\n"
+ ".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x450198eb // smmla z11.s, z7.b, z1.b\n"
- ".inst 0x450198b3 // smmla z19.s, z5.b, z1.b\n"
- ".inst 0x4501987b // smmla z27.s, z3.b, z1.b\n"
- ".inst 0x450098ef // smmla z15.s, z7.b, z0.b\n"
- ".inst 0x450098b7 // smmla z23.s, z5.b, z0.b\n"
- ".inst 0x4500987f // smmla z31.s, z3.b, z0.b\n"
+ ".inst 0x450198cb // smmla z11.s, z6.b, z1.b\n"
+ ".inst 0x450198f3 // smmla z19.s, z7.b, z1.b\n"
+ ".inst 0x450198bb // smmla z27.s, z5.b, z1.b\n"
+ ".inst 0x450098cf // smmla z15.s, z6.b, z0.b\n"
+ ".inst 0x450098f7 // smmla z23.s, z7.b, z0.b\n"
+ ".inst 0x450098bf // smmla z31.s, z5.b, z0.b\n"
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "trn1 z7.d, z1.d, z0.d\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z0.d\n"
- "trn1 z6.d, z3.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
"ld1rqb { z5.b }, p0/Z, [x22]\n"
"ld1rqb { z0.b }, p0/Z, [x21]\n"
- "trn2 z3.d, z3.d, z2.d\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
- "ld1b { z2.b }, p5/Z, [x10]\n"
"ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x450298e8 // smmla z8.s, z7.b, z2.b\n"
".inst 0x450298d0 // smmla z16.s, z6.b, z2.b\n"
".inst 0x45029898 // smmla z24.s, z4.b, z2.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
".inst 0x450098ec // smmla z12.s, z7.b, z0.b\n"
".inst 0x450098d4 // smmla z20.s, z6.b, z0.b\n"
".inst 0x4500989c // smmla z28.s, z4.b, z0.b\n"
@@ -1547,8 +1548,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x450098d6 // smmla z22.s, z6.b, z0.b\n"
".inst 0x4500989e // smmla z30.s, z4.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x450298eb // smmla z11.s, z7.b, z2.b\n"
"addvl x10, x10, #8\n"
+ ".inst 0x450298eb // smmla z11.s, z7.b, z2.b\n"
".inst 0x450298d3 // smmla z19.s, z6.b, z2.b\n"
".inst 0x4502989b // smmla z27.s, z4.b, z2.b\n"
".inst 0x450098ef // smmla z15.s, z7.b, z0.b\n"
@@ -1560,24 +1561,24 @@ void sve_hybrid_s8s32_mmla_6x4VL (
".inst 0x45029828 // smmla z8.s, z1.b, z2.b\n"
".inst 0x45029870 // smmla z16.s, z3.b, z2.b\n"
".inst 0x450298b8 // smmla z24.s, z5.b, z2.b\n"
- ".inst 0x4500982c // smmla z12.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4500982c // smmla z12.s, z1.b, z0.b\n"
".inst 0x45009874 // smmla z20.s, z3.b, z0.b\n"
".inst 0x450098bc // smmla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45029829 // smmla z9.s, z1.b, z2.b\n"
".inst 0x45029871 // smmla z17.s, z3.b, z2.b\n"
".inst 0x450298b9 // smmla z25.s, z5.b, z2.b\n"
- ".inst 0x4500982d // smmla z13.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4500982d // smmla z13.s, z1.b, z0.b\n"
".inst 0x45009875 // smmla z21.s, z3.b, z0.b\n"
".inst 0x450098bd // smmla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x4502982a // smmla z10.s, z1.b, z2.b\n"
".inst 0x45029872 // smmla z18.s, z3.b, z2.b\n"
".inst 0x450298ba // smmla z26.s, z5.b, z2.b\n"
- ".inst 0x4500982e // smmla z14.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4500982e // smmla z14.s, z1.b, z0.b\n"
".inst 0x45009876 // smmla z22.s, z3.b, z0.b\n"
".inst 0x450098be // smmla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1594,46 +1595,46 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 60b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z0.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x20, x21, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z0.s }, p4, [x9]\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z0.s }, p4, [x9]\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x24]\n"
"uzp1 z23.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
"uzp2 z24.d, z24.d, z28.d\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
"uzp1 z28.d, z25.d, z29.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
"uzp2 z25.d, z25.d, z29.d\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
"uzp1 z29.d, z26.d, z30.d\n"
- "st1w { z15.s }, p4, [x23]\n"
"uzp2 z26.d, z26.d, z30.d\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
"uzp1 z30.d, z27.d, z31.d\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z27.d, z27.d, z31.d\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
"st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
"st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
"st1w { z16.s }, p4, [x22]\n"
@@ -1664,8 +1665,8 @@ void sve_hybrid_s8s32_mmla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp
index 11fe5ce7e3..8d508f94f0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsSVE<rhs_operand_type, result_type, 4, 4, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 4, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp
index e74b424888..7b598bac57 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void sve_hybrid_u8qa_dot_4x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -91,20 +91,20 @@ void sve_hybrid_u8qa_dot_4x4VL (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"3:" // Height 1: setup done
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -120,41 +120,41 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1b { z20.b }, p2/Z, [x28]\n"
- "udot z16.s, z20.b, z0.b[0]\n"
- "ld1b { z21.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #2, MUL VL]\n"
- "udot z17.s, z21.b, z0.b[0]\n"
- "udot z18.s, z20.b, z0.b[0]\n"
- "ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z19.s, z20.b, z0.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x28]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #4, MUL VL]\n"
- "udot z16.s, z20.b, z0.b[1]\n"
- "ld1b { z21.b }, p2/Z, [x28, #5, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #6, MUL VL]\n"
- "udot z17.s, z21.b, z0.b[1]\n"
- "udot z18.s, z20.b, z0.b[1]\n"
- "ld1b { z20.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "ld1b { z23.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "udot z16.s, z21.b, z0.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "udot z19.s, z20.b, z0.b[1]\n"
- "ld1b { z22.b }, p2/Z, [x28, #-8, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "ld1b { z21.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "udot z16.s, z22.b, z0.b[2]\n"
- "udot z17.s, z20.b, z0.b[2]\n"
- "ld1b { z20.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "udot z18.s, z21.b, z0.b[2]\n"
- "udot z19.s, z20.b, z0.b[2]\n"
- "ld1b { z22.b }, p2/Z, [x28, #-4, MUL VL]\n"
- "ld1b { z20.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "udot z17.s, z26.b, z0.b[0]\n"
+ "udot z18.s, z25.b, z0.b[0]\n"
+ "udot z19.s, z24.b, z0.b[0]\n"
+ "udot z16.s, z20.b, z0.b[1]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "udot z17.s, z23.b, z0.b[1]\n"
+ "ld1b { z23.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "udot z18.s, z22.b, z0.b[1]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "udot z19.s, z21.b, z0.b[1]\n"
"ld1b { z21.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "udot z16.s, z22.b, z0.b[3]\n"
- "udot z17.s, z20.b, z0.b[3]\n"
+ "udot z16.s, z20.b, z0.b[2]\n"
"ld1b { z20.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ "udot z17.s, z26.b, z0.b[2]\n"
+ "udot z18.s, z25.b, z0.b[2]\n"
+ "udot z19.s, z24.b, z0.b[2]\n"
+ "udot z16.s, z23.b, z0.b[3]\n"
+ "udot z17.s, z22.b, z0.b[3]\n"
"udot z18.s, z21.b, z0.b[3]\n"
"udot z19.s, z20.b, z0.b[3]\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"udot z11.s, z0.b, z15.b\n"
"8:" // Height 1: Multiply loop: unique 1: skip row sum
@@ -163,49 +163,49 @@ void sve_hybrid_u8qa_dot_4x4VL (
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1b { z22.b }, p2/Z, [x28]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "ld1b { z20.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
- "udot z16.s, z22.b, z0.b[0]\n"
- "udot z17.s, z20.b, z0.b[0]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "udot z16.s, z23.b, z0.b[0]\n"
+ "udot z17.s, z22.b, z0.b[0]\n"
"udot z18.s, z21.b, z0.b[0]\n"
"udot z19.s, z20.b, z0.b[0]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
- "ld1b { z20.b }, p2/Z, [x28]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
"ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z20.b, z0.b[1]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z23.b, z0.b[1]\n"
"udot z17.s, z22.b, z0.b[1]\n"
"udot z18.s, z21.b, z0.b[1]\n"
"udot z19.s, z20.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
- "ld1b { z20.b }, p2/Z, [x28]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
"ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z20.b, z0.b[2]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z23.b, z0.b[2]\n"
"udot z17.s, z22.b, z0.b[2]\n"
"udot z18.s, z21.b, z0.b[2]\n"
"udot z19.s, z20.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"ble 10f\n"
- "ld1b { z21.b }, p2/Z, [x28]\n"
- "ld1b { z20.b }, p2/Z, [x28, #1, MUL VL]\n"
- "udot z16.s, z21.b, z0.b[3]\n"
- "udot z17.s, z20.b, z0.b[3]\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z23.b, z0.b[3]\n"
+ "udot z17.s, z22.b, z0.b[3]\n"
"udot z18.s, z21.b, z0.b[3]\n"
"udot z19.s, z20.b, z0.b[3]\n"
- "addvl x28, x28, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"udot z11.s, z0.b, z15.b\n"
@@ -215,35 +215,35 @@ void sve_hybrid_u8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 4b\n"
"tbnz %x[flags], #31, 12f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z20.s, p2/M, z20.s\n"
"uaddv d11, p0, z11.s\n"
"mov z11.s, z11.s[0]\n"
- "neg z20.s, p2/M, z20.s\n"
"mul z11.s, p2/M, z11.s, z20.s\n"
"12:" // Height 1: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
"add z17.s, z17.s, z11.s\n"
"ld1w { z23.s }, p2/Z, [x10]\n"
- "ld1w { z22.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x10, #1, MUL VL]\n"
"add z18.s, z18.s, z11.s\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z21.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x10, #3, MUL VL]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"add z16.s, z16.s, z23.s\n"
- "add z17.s, z17.s, z22.s\n"
- "add z18.s, z18.s, z21.s\n"
- "add z19.s, z19.s, z20.s\n"
+ "add z17.s, z17.s, z20.s\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "addvl x10, x10, #4\n"
+ "add z18.s, z18.s, z22.s\n"
+ "add z19.s, z19.s, z21.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x04b47610 // sqrdmulh z16.s, z16.s, z20.s\n"
".inst 0x04b47631 // sqrdmulh z17.s, z17.s, z20.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04b47652 // sqrdmulh z18.s, z18.s, z20.s\n"
".inst 0x04b47673 // sqrdmulh z19.s, z19.s, z20.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -261,19 +261,19 @@ void sve_hybrid_u8qa_dot_4x4VL (
"sqadd z19.s, z19.s, z20.s\n"
"13:" // Height 1: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z20.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z20.s\n"
+ "ld1rw { z22.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z20.s\n"
- "add z18.s, z18.s, z20.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
"ld1rw { z21.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z20.s\n"
+ "add z16.s, z16.s, z22.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z22.s\n"
+ "add z18.s, z18.s, z22.s\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z22.s\n"
"smin z16.s, p2/M, z16.s, z21.s\n"
"smin z17.s, p2/M, z17.s, z21.s\n"
"smin z18.s, p2/M, z18.s, z21.s\n"
@@ -281,8 +281,8 @@ void sve_hybrid_u8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z20.s\n"
"smax z17.s, p2/M, z17.s, z20.s\n"
"smax z18.s, p2/M, z18.s, z20.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z20.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
"uzp1 z16.b, z16.b, z17.b\n"
"st1b { z16.b }, p1, [x27]\n"
@@ -300,24 +300,24 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
"mov x26, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -336,45 +336,45 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z25.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
"add x24, x24, #0x10\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
- "udot z16.s, z24.b, z0.b[0]\n"
- "udot z20.s, z24.b, z1.b[0]\n"
- "ld1b { z24.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z25.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z17.s, z26.b, z0.b[0]\n"
- "udot z21.s, z26.b, z1.b[0]\n"
- "udot z18.s, z24.b, z0.b[0]\n"
- "udot z22.s, z24.b, z1.b[0]\n"
- "ld1b { z24.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z27.b }, p2/Z, [x28, #5, MUL VL]\n"
- "udot z19.s, z25.b, z0.b[0]\n"
- "udot z23.s, z25.b, z1.b[0]\n"
+ "add x23, x23, #0x10\n"
"ld1b { z26.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "udot z16.s, z25.b, z0.b[0]\n"
+ "udot z20.s, z25.b, z1.b[0]\n"
"ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "udot z17.s, z30.b, z0.b[0]\n"
+ "udot z21.s, z30.b, z1.b[0]\n"
+ "udot z18.s, z29.b, z0.b[0]\n"
+ "udot z22.s, z29.b, z1.b[0]\n"
+ "udot z19.s, z28.b, z0.b[0]\n"
+ "udot z23.s, z28.b, z1.b[0]\n"
"udot z16.s, z24.b, z0.b[1]\n"
"udot z20.s, z24.b, z1.b[1]\n"
"ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
"udot z17.s, z27.b, z0.b[1]\n"
"udot z21.s, z27.b, z1.b[1]\n"
- "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
"ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"udot z18.s, z26.b, z0.b[1]\n"
"udot z22.s, z26.b, z1.b[1]\n"
- "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
"udot z19.s, z25.b, z0.b[1]\n"
"udot z23.s, z25.b, z1.b[1]\n"
- "ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
"ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
"udot z16.s, z24.b, z0.b[2]\n"
"udot z20.s, z24.b, z1.b[2]\n"
"ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
- "add x23, x23, #0x10\n"
"udot z17.s, z30.b, z0.b[2]\n"
"udot z21.s, z30.b, z1.b[2]\n"
"udot z18.s, z29.b, z0.b[2]\n"
@@ -398,34 +398,34 @@ void sve_hybrid_u8qa_dot_4x4VL (
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
- "udot z16.s, z24.b, z0.b[0]\n"
- "udot z20.s, z24.b, z1.b[0]\n"
+ "subs x25, x25, #0x4\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "udot z16.s, z27.b, z0.b[0]\n"
+ "udot z20.s, z27.b, z1.b[0]\n"
"udot z17.s, z26.b, z0.b[0]\n"
"udot z21.s, z26.b, z1.b[0]\n"
"udot z18.s, z25.b, z0.b[0]\n"
"udot z22.s, z25.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
"udot z19.s, z24.b, z0.b[0]\n"
"udot z23.s, z24.b, z1.b[0]\n"
"ble 24f\n"
"ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z27.b, z0.b[1]\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z27.b, z0.b[1]\n"
"udot z20.s, z27.b, z1.b[1]\n"
"udot z17.s, z26.b, z0.b[1]\n"
"udot z21.s, z26.b, z1.b[1]\n"
"udot z18.s, z25.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"udot z22.s, z25.b, z1.b[1]\n"
"udot z19.s, z24.b, z0.b[1]\n"
"udot z23.s, z24.b, z1.b[1]\n"
@@ -433,29 +433,29 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z27.b, z0.b[2]\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z27.b, z0.b[2]\n"
"udot z20.s, z27.b, z1.b[2]\n"
"udot z17.s, z26.b, z0.b[2]\n"
"udot z21.s, z26.b, z1.b[2]\n"
"udot z18.s, z25.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"udot z22.s, z25.b, z1.b[2]\n"
"udot z19.s, z24.b, z0.b[2]\n"
"udot z23.s, z24.b, z1.b[2]\n"
"ble 24f\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
"ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
- "udot z16.s, z24.b, z0.b[3]\n"
- "udot z20.s, z24.b, z1.b[3]\n"
"ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z27.b, z0.b[3]\n"
+ "udot z20.s, z27.b, z1.b[3]\n"
"udot z17.s, z26.b, z0.b[3]\n"
"udot z21.s, z26.b, z1.b[3]\n"
"udot z18.s, z25.b, z0.b[3]\n"
"udot z22.s, z25.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"udot z19.s, z24.b, z0.b[3]\n"
"udot z23.s, z24.b, z1.b[3]\n"
"24:" // Height 2: Multiply loop: multiply skip
@@ -468,18 +468,18 @@ void sve_hybrid_u8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 18b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
+ "add x24, x27, x20\n"
"tbnz %x[flags], #31, 26f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z24.s, p2/M, z24.s\n"
"uaddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"uaddv d12, p0, z12.s\n"
- "neg z24.s, p2/M, z24.s\n"
- "mov z12.s, z12.s[0]\n"
+ "mov z11.s, z11.s[0]\n"
"mul z11.s, p2/M, z11.s, z24.s\n"
+ "mov z12.s, z12.s[0]\n"
"mul z12.s, p2/M, z12.s, z24.s\n"
"26:" // Height 2: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
@@ -518,24 +518,24 @@ void sve_hybrid_u8qa_dot_4x4VL (
".inst 0x04b876f7 // sqrdmulh z23.s, z23.s, z24.s\n"
"tbz %x[flags], #5, 27f\n"
"and z24.d, z16.d, z0.d\n"
- "asr z24.s, z24.s, #0x1f\n"
- "sqadd z16.s, z16.s, z24.s\n"
"and z30.d, z17.d, z0.d\n"
"and z29.d, z18.d, z0.d\n"
"and z28.d, z19.d, z0.d\n"
"and z27.d, z20.d, z0.d\n"
"and z26.d, z21.d, z0.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
"and z25.d, z22.d, z0.d\n"
- "and z24.d, z23.d, z0.d\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"asr z27.s, z27.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z24.s\n"
+ "and z24.d, z23.d, z0.d\n"
"asr z26.s, z26.s, #0x1f\n"
"asr z25.s, z25.s, #0x1f\n"
- "asr z24.s, z24.s, #0x1f\n"
"sqadd z17.s, z17.s, z30.s\n"
"sqadd z18.s, z18.s, z29.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
"sqadd z19.s, z19.s, z28.s\n"
"sqadd z20.s, z20.s, z27.s\n"
"sqadd z21.s, z21.s, z26.s\n"
@@ -543,27 +543,27 @@ void sve_hybrid_u8qa_dot_4x4VL (
"sqadd z23.s, z23.s, z24.s\n"
"27:" // Height 2: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z24.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z24.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z24.s\n"
- "add z18.s, z18.s, z24.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z24.s\n"
- "add z20.s, z20.s, z24.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z24.s\n"
- "add z22.s, z22.s, z24.s\n"
+ "add z16.s, z16.s, z26.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z26.s\n"
+ "add z18.s, z18.s, z26.s\n"
"ld1rw { z25.s }, p2/Z, [x20]\n"
- "add z23.s, z23.s, z24.s\n"
+ "add z19.s, z19.s, z26.s\n"
+ "add z20.s, z20.s, z26.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z21.s, z21.s, z26.s\n"
+ "add z22.s, z22.s, z26.s\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add z23.s, z23.s, z26.s\n"
"smin z16.s, p2/M, z16.s, z25.s\n"
"smin z17.s, p2/M, z17.s, z25.s\n"
"smin z18.s, p2/M, z18.s, z25.s\n"
@@ -575,20 +575,20 @@ void sve_hybrid_u8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z24.s\n"
"smax z17.s, p2/M, z17.s, z24.s\n"
"smax z18.s, p2/M, z18.s, z24.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z24.s\n"
"smax z20.s, p2/M, z20.s, z24.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z21.s, p2/M, z21.s, z24.s\n"
"smax z22.s, p2/M, z22.s, z24.s\n"
+ "smax z23.s, p2/M, z23.s, z24.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
"uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
"st1b { z16.b }, p1, [x27]\n"
- "smax z23.s, p2/M, z23.s, z24.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
- "st1b { z20.b }, p1, [x23]\n"
"addvl x27, x27, #1\n"
+ "st1b { z20.b }, p1, [x24]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -603,16 +603,16 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -623,8 +623,8 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mov x26, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -646,57 +646,57 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z3.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z31.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1b { z28.b }, p2/Z, [x28]\n"
- "udot z16.s, z28.b, z0.b[0]\n"
- "udot z20.s, z28.b, z1.b[0]\n"
- "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
- "udot z24.s, z28.b, z2.b[0]\n"
- "udot z17.s, z30.b, z0.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z21.s, z30.b, z1.b[0]\n"
- "udot z25.s, z30.b, z2.b[0]\n"
- "ld1b { z3.b }, p2/Z, [x28, #4, MUL VL]\n"
- "udot z18.s, z29.b, z0.b[0]\n"
- "udot z22.s, z29.b, z1.b[0]\n"
- "ld1b { z31.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1b { z30.b }, p2/Z, [x28, #6, MUL VL]\n"
- "udot z26.s, z29.b, z2.b[0]\n"
- "udot z19.s, z28.b, z0.b[0]\n"
+ "add x22, x22, #0x10\n"
+ "udot z16.s, z5.b, z0.b[0]\n"
+ "udot z20.s, z5.b, z1.b[0]\n"
+ "udot z17.s, z29.b, z0.b[0]\n"
+ "udot z21.s, z29.b, z1.b[0]\n"
+ "udot z18.s, z4.b, z0.b[0]\n"
+ "udot z24.s, z5.b, z2.b[0]\n"
+ "udot z25.s, z29.b, z2.b[0]\n"
"ld1b { z29.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
+ "udot z22.s, z4.b, z1.b[0]\n"
+ "udot z26.s, z4.b, z2.b[0]\n"
+ "udot z19.s, z28.b, z0.b[0]\n"
"udot z23.s, z28.b, z1.b[0]\n"
"udot z27.s, z28.b, z2.b[0]\n"
+ "udot z16.s, z3.b, z0.b[1]\n"
"ld1b { z28.b }, p2/Z, [x28, #-8, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "udot z16.s, z3.b, z0.b[1]\n"
"udot z20.s, z3.b, z1.b[1]\n"
- "ld1b { z4.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "add x23, x23, #0x10\n"
"udot z24.s, z3.b, z2.b[1]\n"
- "udot z17.s, z31.b, z0.b[1]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-6, MUL VL]\n"
"ld1b { z3.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "add x22, x22, #0x10\n"
+ "udot z17.s, z31.b, z0.b[1]\n"
"udot z21.s, z31.b, z1.b[1]\n"
"udot z25.s, z31.b, z2.b[1]\n"
- "ld1b { z31.b }, p2/Z, [x28, #-4, MUL VL]\n"
"udot z18.s, z30.b, z0.b[1]\n"
+ "ld1b { z31.b }, p2/Z, [x28, #-4, MUL VL]\n"
"udot z22.s, z30.b, z1.b[1]\n"
"udot z26.s, z30.b, z2.b[1]\n"
- "udot z19.s, z29.b, z0.b[1]\n"
"ld1b { z30.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "udot z19.s, z29.b, z0.b[1]\n"
"udot z23.s, z29.b, z1.b[1]\n"
"udot z27.s, z29.b, z2.b[1]\n"
- "ld1b { z29.b }, p2/Z, [x28, #-2, MUL VL]\n"
"udot z16.s, z28.b, z0.b[2]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-2, MUL VL]\n"
"udot z20.s, z28.b, z1.b[2]\n"
"udot z24.s, z28.b, z2.b[2]\n"
- "udot z17.s, z5.b, z0.b[2]\n"
"ld1b { z28.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ "udot z17.s, z5.b, z0.b[2]\n"
"udot z21.s, z5.b, z1.b[2]\n"
"udot z25.s, z5.b, z2.b[2]\n"
"udot z18.s, z4.b, z0.b[2]\n"
@@ -727,23 +727,23 @@ void sve_hybrid_u8qa_dot_4x4VL (
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
- "ld1b { z28.b }, p2/Z, [x28]\n"
- "udot z16.s, z28.b, z0.b[0]\n"
- "udot z20.s, z28.b, z1.b[0]\n"
- "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
- "udot z24.s, z28.b, z2.b[0]\n"
+ "udot z16.s, z31.b, z0.b[0]\n"
+ "udot z20.s, z31.b, z1.b[0]\n"
"udot z17.s, z30.b, z0.b[0]\n"
- "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
"udot z21.s, z30.b, z1.b[0]\n"
- "udot z25.s, z30.b, z2.b[0]\n"
- "addvl x28, x28, #4\n"
"udot z18.s, z29.b, z0.b[0]\n"
"udot z22.s, z29.b, z1.b[0]\n"
+ "udot z24.s, z31.b, z2.b[0]\n"
+ "udot z25.s, z30.b, z2.b[0]\n"
"udot z26.s, z29.b, z2.b[0]\n"
"udot z19.s, z28.b, z0.b[0]\n"
"udot z23.s, z28.b, z1.b[0]\n"
@@ -752,14 +752,14 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ld1b { z31.b }, p2/Z, [x28]\n"
"ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z31.b, z0.b[1]\n"
"ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z31.b, z0.b[1]\n"
"udot z20.s, z31.b, z1.b[1]\n"
"udot z24.s, z31.b, z2.b[1]\n"
"udot z17.s, z30.b, z0.b[1]\n"
"udot z21.s, z30.b, z1.b[1]\n"
- "addvl x28, x28, #4\n"
"udot z25.s, z30.b, z2.b[1]\n"
"udot z18.s, z29.b, z0.b[1]\n"
"udot z22.s, z29.b, z1.b[1]\n"
@@ -771,14 +771,14 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ld1b { z31.b }, p2/Z, [x28]\n"
"ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z31.b, z0.b[2]\n"
"ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z31.b, z0.b[2]\n"
"udot z20.s, z31.b, z1.b[2]\n"
"udot z24.s, z31.b, z2.b[2]\n"
"udot z17.s, z30.b, z0.b[2]\n"
"udot z21.s, z30.b, z1.b[2]\n"
- "addvl x28, x28, #4\n"
"udot z25.s, z30.b, z2.b[2]\n"
"udot z18.s, z29.b, z0.b[2]\n"
"udot z22.s, z29.b, z1.b[2]\n"
@@ -789,15 +789,15 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ble 38f\n"
"ld1b { z31.b }, p2/Z, [x28]\n"
"ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
- "udot z16.s, z31.b, z0.b[3]\n"
- "udot z20.s, z31.b, z1.b[3]\n"
"ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z31.b, z0.b[3]\n"
+ "udot z20.s, z31.b, z1.b[3]\n"
"udot z24.s, z31.b, z2.b[3]\n"
"udot z17.s, z30.b, z0.b[3]\n"
"udot z21.s, z30.b, z1.b[3]\n"
"udot z25.s, z30.b, z2.b[3]\n"
- "addvl x28, x28, #4\n"
"udot z18.s, z29.b, z0.b[3]\n"
"udot z22.s, z29.b, z1.b[3]\n"
"udot z26.s, z29.b, z2.b[3]\n"
@@ -815,22 +815,22 @@ void sve_hybrid_u8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
"tbnz %x[flags], #31, 40f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z28.s, p2/M, z28.s\n"
"uaddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"uaddv d12, p0, z12.s\n"
"uaddv d13, p0, z13.s\n"
+ "mov z11.s, z11.s[0]\n"
"mov z12.s, z12.s[0]\n"
- "mov z13.s, z13.s[0]\n"
- "neg z28.s, p2/M, z28.s\n"
"mul z11.s, p2/M, z11.s, z28.s\n"
"mul z12.s, p2/M, z12.s, z28.s\n"
+ "mov z13.s, z13.s[0]\n"
"mul z13.s, p2/M, z13.s, z28.s\n"
"40:" // Height 3: skip row sum fixup
"add z16.s, z16.s, z11.s\n"
@@ -885,18 +885,18 @@ void sve_hybrid_u8qa_dot_4x4VL (
"and z30.d, z18.d, z0.d\n"
"and z29.d, z19.d, z0.d\n"
"and z28.d, z20.d, z0.d\n"
+ "and z3.d, z21.d, z0.d\n"
"asr z1.s, z1.s, #0x1f\n"
"asr z31.s, z31.s, #0x1f\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
+ "and z2.d, z22.d, z0.d\n"
"sqadd z16.s, z16.s, z1.s\n"
"sqadd z17.s, z17.s, z31.s\n"
"sqadd z18.s, z18.s, z30.s\n"
"sqadd z19.s, z19.s, z29.s\n"
"sqadd z20.s, z20.s, z28.s\n"
- "and z3.d, z21.d, z0.d\n"
- "and z2.d, z22.d, z0.d\n"
"and z1.d, z23.d, z0.d\n"
"and z31.d, z24.d, z0.d\n"
"and z30.d, z25.d, z0.d\n"
@@ -918,35 +918,35 @@ void sve_hybrid_u8qa_dot_4x4VL (
"sqadd z27.s, z27.s, z28.s\n"
"41:" // Height 3: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z28.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z28.s\n"
+ "ld1rw { z30.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z28.s\n"
- "add z18.s, z18.s, z28.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z28.s\n"
- "add z20.s, z20.s, z28.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z28.s\n"
- "add z22.s, z22.s, z28.s\n"
+ "add z16.s, z16.s, z30.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z28.s\n"
- "add z24.s, z24.s, z28.s\n"
+ "add z17.s, z17.s, z30.s\n"
+ "add z18.s, z18.s, z30.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z28.s\n"
- "add z26.s, z26.s, z28.s\n"
+ "add z19.s, z19.s, z30.s\n"
+ "add z20.s, z20.s, z30.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z21.s, z21.s, z30.s\n"
+ "add z22.s, z22.s, z30.s\n"
"ld1rw { z29.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z28.s\n"
+ "add z23.s, z23.s, z30.s\n"
+ "add z24.s, z24.s, z30.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z30.s\n"
+ "add z26.s, z26.s, z30.s\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z30.s\n"
"smin z16.s, p2/M, z16.s, z29.s\n"
"smin z17.s, p2/M, z17.s, z29.s\n"
"smin z18.s, p2/M, z18.s, z29.s\n"
@@ -962,28 +962,28 @@ void sve_hybrid_u8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z28.s\n"
"smax z17.s, p2/M, z17.s, z28.s\n"
"smax z18.s, p2/M, z18.s, z28.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z28.s\n"
"smax z20.s, p2/M, z20.s, z28.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z21.s, p2/M, z21.s, z28.s\n"
"smax z22.s, p2/M, z22.s, z28.s\n"
- "uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
"smax z23.s, p2/M, z23.s, z28.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"smax z24.s, p2/M, z24.s, z28.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
"smax z25.s, p2/M, z25.s, z28.s\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
"smax z26.s, p2/M, z26.s, z28.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
"smax z27.s, p2/M, z27.s, z28.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
- "st1b { z24.b }, p1, [x22]\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z18.h, z22.h, z23.h\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "uzp1 z20.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
"addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z20.b }, p1, [x24]\n"
+ "st1b { z24.b }, p1, [x23]\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -991,27 +991,28 @@ void sve_hybrid_u8qa_dot_4x4VL (
"b 58f\n"
"43:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "mov z15.b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -1026,8 +1027,8 @@ void sve_hybrid_u8qa_dot_4x4VL (
"mov x26, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1052,37 +1053,37 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
"add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1rqb { z3.b }, p0/Z, [x21]\n"
- "add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
- "ld1b { z5.b }, p2/Z, [x28]\n"
- "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "add x21, x21, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"udot z16.s, z5.b, z0.b[0]\n"
"udot z20.s, z5.b, z1.b[0]\n"
- "ld1b { z10.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "udot z17.s, z10.b, z0.b[0]\n"
+ "udot z21.s, z10.b, z1.b[0]\n"
"udot z24.s, z5.b, z2.b[0]\n"
"udot z28.s, z5.b, z3.b[0]\n"
- "udot z17.s, z4.b, z0.b[0]\n"
- "udot z21.s, z4.b, z1.b[0]\n"
- "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
- "udot z25.s, z4.b, z2.b[0]\n"
- "udot z29.s, z4.b, z3.b[0]\n"
- "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
"addvl x28, x28, #16\n"
+ "udot z25.s, z10.b, z2.b[0]\n"
+ "udot z29.s, z10.b, z3.b[0]\n"
+ "udot z18.s, z4.b, z0.b[0]\n"
+ "udot z22.s, z4.b, z1.b[0]\n"
+ "udot z26.s, z4.b, z2.b[0]\n"
+ "udot z30.s, z4.b, z3.b[0]\n"
"ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z30.s, z10.b, z3.b[0]\n"
"ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
- "add x21, x21, #0x10\n"
"udot z19.s, z9.b, z0.b[0]\n"
"udot z23.s, z9.b, z1.b[0]\n"
"udot z27.s, z9.b, z2.b[0]\n"
@@ -1152,26 +1153,26 @@ void sve_hybrid_u8qa_dot_4x4VL (
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
- "subs x25, x25, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x22]\n"
"ld1rqb { z3.b }, p0/Z, [x21]\n"
- "ld1b { z7.b }, p2/Z, [x28]\n"
- "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"udot z16.s, z7.b, z0.b[0]\n"
"udot z20.s, z7.b, z1.b[0]\n"
- "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "udot z24.s, z7.b, z2.b[0]\n"
- "udot z28.s, z7.b, z3.b[0]\n"
"udot z17.s, z6.b, z0.b[0]\n"
"udot z21.s, z6.b, z1.b[0]\n"
- "addvl x28, x28, #4\n"
- "udot z25.s, z6.b, z2.b[0]\n"
- "udot z29.s, z6.b, z3.b[0]\n"
"udot z18.s, z5.b, z0.b[0]\n"
"udot z22.s, z5.b, z1.b[0]\n"
+ "udot z24.s, z7.b, z2.b[0]\n"
+ "udot z28.s, z7.b, z3.b[0]\n"
+ "udot z25.s, z6.b, z2.b[0]\n"
+ "udot z29.s, z6.b, z3.b[0]\n"
"udot z26.s, z5.b, z2.b[0]\n"
"udot z30.s, z5.b, z3.b[0]\n"
"udot z19.s, z4.b, z0.b[0]\n"
@@ -1182,14 +1183,14 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ld1b { z7.b }, p2/Z, [x28]\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z7.b, z0.b[1]\n"
"ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z7.b, z0.b[1]\n"
"udot z20.s, z7.b, z1.b[1]\n"
"udot z24.s, z7.b, z2.b[1]\n"
"udot z28.s, z7.b, z3.b[1]\n"
"udot z17.s, z6.b, z0.b[1]\n"
- "addvl x28, x28, #4\n"
"udot z21.s, z6.b, z1.b[1]\n"
"udot z25.s, z6.b, z2.b[1]\n"
"udot z29.s, z6.b, z3.b[1]\n"
@@ -1205,14 +1206,14 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ld1b { z7.b }, p2/Z, [x28]\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x4\n"
- "udot z16.s, z7.b, z0.b[2]\n"
"ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z7.b, z0.b[2]\n"
"udot z20.s, z7.b, z1.b[2]\n"
"udot z24.s, z7.b, z2.b[2]\n"
"udot z28.s, z7.b, z3.b[2]\n"
"udot z17.s, z6.b, z0.b[2]\n"
- "addvl x28, x28, #4\n"
"udot z21.s, z6.b, z1.b[2]\n"
"udot z25.s, z6.b, z2.b[2]\n"
"udot z29.s, z6.b, z3.b[2]\n"
@@ -1227,15 +1228,15 @@ void sve_hybrid_u8qa_dot_4x4VL (
"ble 52f\n"
"ld1b { z7.b }, p2/Z, [x28]\n"
"ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
- "udot z16.s, z7.b, z0.b[3]\n"
- "udot z20.s, z7.b, z1.b[3]\n"
"ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "udot z16.s, z7.b, z0.b[3]\n"
+ "udot z20.s, z7.b, z1.b[3]\n"
"udot z24.s, z7.b, z2.b[3]\n"
"udot z28.s, z7.b, z3.b[3]\n"
"udot z17.s, z6.b, z0.b[3]\n"
"udot z21.s, z6.b, z1.b[3]\n"
- "addvl x28, x28, #4\n"
"udot z25.s, z6.b, z2.b[3]\n"
"udot z29.s, z6.b, z3.b[3]\n"
"udot z18.s, z5.b, z0.b[3]\n"
@@ -1258,25 +1259,25 @@ void sve_hybrid_u8qa_dot_4x4VL (
"cmp x26, x20\n"
"bne 46b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x27, x20\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
"add x22, x23, x20\n"
- "add x21, x22, x20\n"
"tbnz %x[flags], #31, 54f\n"
- "mov x20, #0x4\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, #0x4\n"
"add x20, %x[qp], %[b_offset]\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z0.s, p2/M, z0.s\n"
"uaddv d11, p0, z11.s\n"
- "mov z11.s, z11.s[0]\n"
"uaddv d12, p0, z12.s\n"
"uaddv d13, p0, z13.s\n"
- "mov z12.s, z12.s[0]\n"
- "mov z13.s, z13.s[0]\n"
"uaddv d14, p0, z14.s\n"
- "neg z0.s, p2/M, z0.s\n"
- "mov z14.s, z14.s[0]\n"
+ "mov z11.s, z11.s[0]\n"
+ "mov z12.s, z12.s[0]\n"
"mul z11.s, p2/M, z11.s, z0.s\n"
"mul z12.s, p2/M, z12.s, z0.s\n"
+ "mov z13.s, z13.s[0]\n"
+ "mov z14.s, z14.s[0]\n"
"mul z13.s, p2/M, z13.s, z0.s\n"
"mul z14.s, p2/M, z14.s, z0.s\n"
"54:" // Height 4: skip row sum fixup
@@ -1341,32 +1342,32 @@ void sve_hybrid_u8qa_dot_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z2.d, z16.d, z0.d\n"
"and z1.d, z17.d, z0.d\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
- "sqadd z16.s, z16.s, z2.s\n"
- "sqadd z17.s, z17.s, z1.s\n"
"and z7.d, z18.d, z0.d\n"
"and z6.d, z19.d, z0.d\n"
"and z5.d, z20.d, z0.d\n"
"and z4.d, z21.d, z0.d\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"and z3.d, z22.d, z0.d\n"
- "and z2.d, z23.d, z0.d\n"
- "and z1.d, z24.d, z0.d\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z2.s\n"
+ "sqadd z17.s, z17.s, z1.s\n"
+ "and z2.d, z23.d, z0.d\n"
+ "and z1.d, z24.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z3.s, z3.s, #0x1f\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
"sqadd z18.s, z18.s, z7.s\n"
"sqadd z19.s, z19.s, z6.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"sqadd z20.s, z20.s, z5.s\n"
"sqadd z21.s, z21.s, z4.s\n"
"sqadd z22.s, z22.s, z3.s\n"
+ "and z7.d, z25.d, z0.d\n"
"sqadd z23.s, z23.s, z2.s\n"
"sqadd z24.s, z24.s, z1.s\n"
- "and z7.d, z25.d, z0.d\n"
"and z6.d, z26.d, z0.d\n"
"and z5.d, z27.d, z0.d\n"
"and z4.d, z28.d, z0.d\n"
@@ -1389,43 +1390,43 @@ void sve_hybrid_u8qa_dot_4x4VL (
"sqadd z31.s, z31.s, z1.s\n"
"55:" // Height 4: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z16.s, z16.s, z2.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z2.s\n"
- "add z18.s, z18.s, z2.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
- "add z19.s, z19.s, z2.s\n"
- "add z20.s, z20.s, z2.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
- "add z21.s, z21.s, z2.s\n"
- "add z22.s, z22.s, z2.s\n"
+ "add z16.s, z16.s, z2.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z23.s, z23.s, z2.s\n"
- "add z24.s, z24.s, z2.s\n"
+ "add z17.s, z17.s, z2.s\n"
+ "add z18.s, z18.s, z2.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z2.s\n"
- "add z26.s, z26.s, z2.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z20.s, z20.s, z2.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
- "add z27.s, z27.s, z2.s\n"
- "add z28.s, z28.s, z2.s\n"
+ "add z21.s, z21.s, z2.s\n"
+ "add z22.s, z22.s, z2.s\n"
".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
- "add z29.s, z29.s, z2.s\n"
- "add z30.s, z30.s, z2.s\n"
+ "add z23.s, z23.s, z2.s\n"
+ "add z24.s, z24.s, z2.s\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z25.s, z25.s, z2.s\n"
+ "add z26.s, z26.s, z2.s\n"
"ld1rw { z1.s }, p2/Z, [x20]\n"
- "add z31.s, z31.s, z2.s\n"
+ "add z27.s, z27.s, z2.s\n"
+ "add z28.s, z28.s, z2.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z29.s, z29.s, z2.s\n"
+ "add z30.s, z30.s, z2.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z31.s, z31.s, z2.s\n"
"smin z16.s, p2/M, z16.s, z1.s\n"
"smin z17.s, p2/M, z17.s, z1.s\n"
"smin z18.s, p2/M, z18.s, z1.s\n"
@@ -1445,36 +1446,36 @@ void sve_hybrid_u8qa_dot_4x4VL (
"smax z16.s, p2/M, z16.s, z0.s\n"
"smax z17.s, p2/M, z17.s, z0.s\n"
"smax z18.s, p2/M, z18.s, z0.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z0.s\n"
"smax z20.s, p2/M, z20.s, z0.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z21.s, p2/M, z21.s, z0.s\n"
"smax z22.s, p2/M, z22.s, z0.s\n"
- "uzp1 z20.h, z20.h, z21.h\n"
- "st1b { z16.b }, p1, [x27]\n"
"smax z23.s, p2/M, z23.s, z0.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"smax z24.s, p2/M, z24.s, z0.s\n"
- "uzp1 z16.h, z22.h, z23.h\n"
- "uzp1 z20.b, z20.b, z16.b\n"
"smax z25.s, p2/M, z25.s, z0.s\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
"smax z26.s, p2/M, z26.s, z0.s\n"
- "uzp1 z24.h, z24.h, z25.h\n"
- "st1b { z20.b }, p1, [x23]\n"
"smax z27.s, p2/M, z27.s, z0.s\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
"smax z28.s, p2/M, z28.s, z0.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"smax z29.s, p2/M, z29.s, z0.s\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
"smax z30.s, p2/M, z30.s, z0.s\n"
- "uzp1 z28.h, z28.h, z29.h\n"
- "st1b { z24.b }, p1, [x22]\n"
"smax z31.s, p2/M, z31.s, z0.s\n"
- "uzp1 z16.h, z30.h, z31.h\n"
- "uzp1 z28.b, z28.b, z16.b\n"
- "st1b { z28.b }, p1, [x21]\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z18.h, z26.h, z27.h\n"
+ "uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
+ "uzp1 z17.h, z30.h, z31.h\n"
+ "st1b { z16.b }, p1, [x27]\n"
"addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z18.b\n"
+ "uzp1 z28.b, z28.b, z17.b\n"
+ "st1b { z20.b }, p1, [x24]\n"
+ "st1b { z24.b }, p1, [x23]\n"
+ "st1b { z28.b }, p1, [x22]\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -1491,8 +1492,8 @@ void sve_hybrid_u8qa_dot_4x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL.hpp
index 5de68cc738..ab37e6ad5b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return false;
}
- StdTransformsSVE<rhs_operand_type, result_type, 4, 8, 8> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 8, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp
index 69894bec41..212e178065 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_mmla_4x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,18 +45,18 @@ void sve_hybrid_u8qa_mmla_4x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -91,24 +91,24 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"3:" // Height 1: setup done
"mov x26, #0x0\n"
"4:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 5f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -124,43 +124,43 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"ble 9f\n"
"7:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z30.b }, p2/Z, [x28]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45d89810 // ummla z16.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z26.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45d99814 // ummla z20.s, z0.b, z25.b\n"
- ".inst 0x45d89811 // ummla z17.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45da9815 // ummla z21.s, z0.b, z26.b\n"
- ".inst 0x45d99812 // ummla z18.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #6, MUL VL]\n"
- "ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z0.d, z1.d, z31.d\n"
+ ".inst 0x45de9810 // ummla z16.s, z0.b, z30.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45d89816 // ummla z22.s, z0.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45da9813 // ummla z19.s, z0.b, z26.b\n"
- ".inst 0x45d99817 // ummla z23.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45d89830 // ummla z16.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x45da9834 // ummla z20.s, z1.b, z26.b\n"
+ "trn2 z1.d, z1.d, z31.d\n"
+ ".inst 0x45dd9814 // ummla z20.s, z0.b, z29.b\n"
+ ".inst 0x45dc9811 // ummla z17.s, z0.b, z28.b\n"
+ ".inst 0x45db9815 // ummla z21.s, z0.b, z27.b\n"
+ ".inst 0x45da9812 // ummla z18.s, z0.b, z26.b\n"
+ "ld1b { z31.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45d99816 // ummla z22.s, z0.b, z25.b\n"
+ ".inst 0x45d89813 // ummla z19.s, z0.b, z24.b\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x45c89817 // ummla z23.s, z0.b, z8.b\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45df9830 // ummla z16.s, z1.b, z31.b\n"
"ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
- ".inst 0x45d99831 // ummla z17.s, z1.b, z25.b\n"
- ".inst 0x45d89835 // ummla z21.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x45de9834 // ummla z20.s, z1.b, z30.b\n"
+ ".inst 0x45dd9831 // ummla z17.s, z1.b, z29.b\n"
+ ".inst 0x45dc9835 // ummla z21.s, z1.b, z28.b\n"
".inst 0x45db9832 // ummla z18.s, z1.b, z27.b\n"
".inst 0x45da9836 // ummla z22.s, z1.b, z26.b\n"
".inst 0x45d99833 // ummla z19.s, z1.b, z25.b\n"
".inst 0x45d89837 // ummla z23.s, z1.b, z24.b\n"
- "add x24, x24, #0x10\n"
"tbnz %x[flags], #31, 8f\n"
"udot z11.s, z0.b, z15.b\n"
"udot z11.s, z1.b, z15.b\n"
@@ -170,45 +170,45 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"bgt 7b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "trn1 z0.d, z1.d, z27.d\n"
"ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45d89810 // ummla z16.s, z0.b, z24.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
"subs x25, x25, #0x8\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- "trn2 z1.d, z1.d, z27.d\n"
- ".inst 0x45da9814 // ummla z20.s, z0.b, z26.b\n"
"ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45d99811 // ummla z17.s, z0.b, z25.b\n"
- ".inst 0x45d89815 // ummla z21.s, z0.b, z24.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z31.d\n"
+ ".inst 0x45d89810 // ummla z16.s, z0.b, z24.b\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "trn2 z1.d, z1.d, z31.d\n"
+ ".inst 0x45de9814 // ummla z20.s, z0.b, z30.b\n"
+ ".inst 0x45dd9811 // ummla z17.s, z0.b, z29.b\n"
+ ".inst 0x45dc9815 // ummla z21.s, z0.b, z28.b\n"
".inst 0x45db9812 // ummla z18.s, z0.b, z27.b\n"
".inst 0x45da9816 // ummla z22.s, z0.b, z26.b\n"
".inst 0x45d99813 // ummla z19.s, z0.b, z25.b\n"
".inst 0x45d89817 // ummla z23.s, z0.b, z24.b\n"
- "addvl x28, x28, #8\n"
"ble 10f\n"
"ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45d89830 // ummla z16.s, z1.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45d89834 // ummla z20.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45d99831 // ummla z17.s, z1.b, z25.b\n"
- ".inst 0x45d89835 // ummla z21.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45d99832 // ummla z18.s, z1.b, z25.b\n"
- ".inst 0x45d89836 // ummla z22.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45de9834 // ummla z20.s, z1.b, z30.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45dd9831 // ummla z17.s, z1.b, z29.b\n"
+ ".inst 0x45dc9835 // ummla z21.s, z1.b, z28.b\n"
+ ".inst 0x45db9832 // ummla z18.s, z1.b, z27.b\n"
+ ".inst 0x45da9836 // ummla z22.s, z1.b, z26.b\n"
".inst 0x45d99833 // ummla z19.s, z1.b, z25.b\n"
".inst 0x45d89837 // ummla z23.s, z1.b, z24.b\n"
- "addvl x28, x28, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
"tbnz %x[flags], #31, 11f\n"
"udot z11.s, z0.b, z15.b\n"
@@ -225,32 +225,32 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z23.d, z16.d\n"
"tbnz %x[flags], #31, 12f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
- "neg z16.s, p2/M, z16.s\n"
+ "ld1rw { z9.s }, p2/Z, [x20]\n"
+ "neg z9.s, p2/M, z9.s\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z16.s\n"
+ "mul z11.s, p2/M, z11.s, z9.s\n"
"12:" // Height 1: skip row sum fixup
"add z23.s, z23.s, z11.s\n"
"add z17.s, z17.s, z11.s\n"
"ld1w { z22.s }, p2/Z, [x10]\n"
- "ld1w { z21.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x10, #1, MUL VL]\n"
"add z18.s, z18.s, z11.s\n"
"add z19.s, z19.s, z11.s\n"
- "ld1w { z20.s }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x10, #3, MUL VL]\n"
"add x20, %x[qp], %[per_layer_mul]\n"
"orr %x[flags], %x[flags], #0x80000000\n"
"add z23.s, z23.s, z22.s\n"
- "add z17.s, z17.s, z21.s\n"
- "add z18.s, z18.s, z20.s\n"
- "add z19.s, z19.s, z16.s\n"
+ "add z17.s, z17.s, z24.s\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
"add x20, %x[qp], %[per_layer_right_shift]\n"
+ "addvl x10, x10, #4\n"
+ "add z18.s, z18.s, z21.s\n"
+ "add z19.s, z19.s, z20.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x04b076f7 // sqrdmulh z23.s, z23.s, z16.s\n"
".inst 0x04b07631 // sqrdmulh z17.s, z17.s, z16.s\n"
- "addvl x10, x10, #4\n"
".inst 0x04b07652 // sqrdmulh z18.s, z18.s, z16.s\n"
".inst 0x04b07673 // sqrdmulh z19.s, z19.s, z16.s\n"
"tbz %x[flags], #5, 13f\n"
@@ -268,19 +268,19 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"sqadd z19.s, z19.s, z16.s\n"
"13:" // Height 1: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z16.s }, p2/Z, [x20]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z16.s\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z16.s\n"
- "add z18.s, z18.s, z16.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
"ld1rw { z20.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z16.s\n"
+ "add z23.s, z23.s, z21.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z21.s\n"
+ "add z18.s, z18.s, z21.s\n"
"ld1rw { z16.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z21.s\n"
"smin z23.s, p2/M, z23.s, z20.s\n"
"smin z17.s, p2/M, z17.s, z20.s\n"
"smin z18.s, p2/M, z18.s, z20.s\n"
@@ -288,8 +288,8 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"smax z23.s, p2/M, z23.s, z16.s\n"
"smax z17.s, p2/M, z17.s, z16.s\n"
"smax z18.s, p2/M, z18.s, z16.s\n"
- "uzp1 z23.h, z23.h, z17.h\n"
"smax z19.s, p2/M, z19.s, z16.s\n"
+ "uzp1 z23.h, z23.h, z17.h\n"
"uzp1 z16.h, z18.h, z19.h\n"
"uzp1 z23.b, z23.b, z16.b\n"
"st1b { z23.b }, p1, [x27]\n"
@@ -307,24 +307,24 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"16:" // Height 2: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"17:" // Height 2: setup done
"mov x26, #0x0\n"
"18:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 19f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -343,45 +343,45 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"ble 23f\n"
"21:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z26.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45d89810 // ummla z16.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #1, MUL VL]\n"
- "trn2 z1.d, z1.d, z26.d\n"
- "ld1b { z24.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z26.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45d99814 // ummla z20.s, z0.b, z25.b\n"
- ".inst 0x45d89811 // ummla z17.s, z0.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45da9815 // ummla z21.s, z0.b, z26.b\n"
- ".inst 0x45d99812 // ummla z18.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1rqb { z25.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1b { z24.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ ".inst 0x45df9810 // ummla z16.s, z0.b, z31.b\n"
"ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45d89816 // ummla z22.s, z0.b, z24.b\n"
+ ".inst 0x45de9814 // ummla z20.s, z0.b, z30.b\n"
+ ".inst 0x45dd9811 // ummla z17.s, z0.b, z29.b\n"
+ ".inst 0x45dc9815 // ummla z21.s, z0.b, z28.b\n"
+ ".inst 0x45db9812 // ummla z18.s, z0.b, z27.b\n"
+ ".inst 0x45da9816 // ummla z22.s, z0.b, z26.b\n"
+ ".inst 0x45d89813 // ummla z19.s, z0.b, z24.b\n"
"ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45da9813 // ummla z19.s, z0.b, z26.b\n"
".inst 0x45d99817 // ummla z23.s, z0.b, z25.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45d89830 // ummla z16.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #-5, MUL VL]\n"
- ".inst 0x45da9834 // ummla z20.s, z1.b, z26.b\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
"ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45d89830 // ummla z16.s, z1.b, z24.b\n"
"ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
- ".inst 0x45d99831 // ummla z17.s, z1.b, z25.b\n"
- ".inst 0x45d89835 // ummla z21.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x45de9834 // ummla z20.s, z1.b, z30.b\n"
+ ".inst 0x45dd9831 // ummla z17.s, z1.b, z29.b\n"
+ ".inst 0x45dc9835 // ummla z21.s, z1.b, z28.b\n"
".inst 0x45db9832 // ummla z18.s, z1.b, z27.b\n"
".inst 0x45da9836 // ummla z22.s, z1.b, z26.b\n"
".inst 0x45d99833 // ummla z19.s, z1.b, z25.b\n"
".inst 0x45d89837 // ummla z23.s, z1.b, z24.b\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"tbnz %x[flags], #31, 22f\n"
"udot z11.s, z0.b, z15.b\n"
"udot z11.s, z1.b, z15.b\n"
@@ -391,46 +391,46 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"bgt 21b\n"
"23:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z27.d\n"
- "ld1b { z24.b }, p2/Z, [x28]\n"
- ".inst 0x45d89810 // ummla z16.s, z0.b, z24.b\n"
- "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #1, MUL VL]\n"
"subs x25, x25, #0x8\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- "trn2 z1.d, z1.d, z27.d\n"
- ".inst 0x45da9814 // ummla z20.s, z0.b, z26.b\n"
- "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45d99811 // ummla z17.s, z0.b, z25.b\n"
- ".inst 0x45d89815 // ummla z21.s, z0.b, z24.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z24.d\n"
+ "trn2 z1.d, z1.d, z24.d\n"
+ ".inst 0x45dd9810 // ummla z16.s, z0.b, z29.b\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
- ".inst 0x45db9812 // ummla z18.s, z0.b, z27.b\n"
- ".inst 0x45da9816 // ummla z22.s, z0.b, z26.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45dc9814 // ummla z20.s, z0.b, z28.b\n"
+ ".inst 0x45c49811 // ummla z17.s, z0.b, z4.b\n"
+ ".inst 0x45db9815 // ummla z21.s, z0.b, z27.b\n"
+ ".inst 0x45da9812 // ummla z18.s, z0.b, z26.b\n"
+ ".inst 0x45c69816 // ummla z22.s, z0.b, z6.b\n"
".inst 0x45d99813 // ummla z19.s, z0.b, z25.b\n"
".inst 0x45d89817 // ummla z23.s, z0.b, z24.b\n"
- "addvl x28, x28, #8\n"
"ble 24f\n"
"ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
".inst 0x45d89830 // ummla z16.s, z1.b, z24.b\n"
- "ld1b { z24.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45d89834 // ummla z20.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45d99831 // ummla z17.s, z1.b, z25.b\n"
- ".inst 0x45d89835 // ummla z21.s, z1.b, z24.b\n"
- "ld1b { z25.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z24.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45d99832 // ummla z18.s, z1.b, z25.b\n"
- ".inst 0x45d89836 // ummla z22.s, z1.b, z24.b\n"
"ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45de9834 // ummla z20.s, z1.b, z30.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45dd9831 // ummla z17.s, z1.b, z29.b\n"
+ ".inst 0x45dc9835 // ummla z21.s, z1.b, z28.b\n"
+ ".inst 0x45db9832 // ummla z18.s, z1.b, z27.b\n"
+ ".inst 0x45da9836 // ummla z22.s, z1.b, z26.b\n"
".inst 0x45d99833 // ummla z19.s, z1.b, z25.b\n"
".inst 0x45d89837 // ummla z23.s, z1.b, z24.b\n"
- "addvl x28, x28, #8\n"
"24:" // Height 2: Multiply loop: multiply skip
"tbnz %x[flags], #31, 25f\n"
"udot z11.s, z0.b, z15.b\n"
@@ -443,18 +443,18 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"uzp1 z24.d, z16.d, z20.d\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp2 z16.d, z16.d, z20.d\n"
- "add x23, x27, x20\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "add x23, x27, x20\n"
"mov z23.d, z24.d\n"
"tbnz %x[flags], #31, 26f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z24.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
"neg z24.s, p2/M, z24.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
@@ -497,24 +497,24 @@ void sve_hybrid_u8qa_mmla_4x4VL (
".inst 0x04b87673 // sqrdmulh z19.s, z19.s, z24.s\n"
"tbz %x[flags], #5, 27f\n"
"and z24.d, z23.d, z0.d\n"
- "asr z24.s, z24.s, #0x1f\n"
- "sqadd z23.s, z23.s, z24.s\n"
"and z30.d, z20.d, z0.d\n"
"and z29.d, z21.d, z0.d\n"
"and z28.d, z22.d, z0.d\n"
"and z27.d, z16.d, z0.d\n"
"and z26.d, z17.d, z0.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
"and z25.d, z18.d, z0.d\n"
- "and z24.d, z19.d, z0.d\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"asr z27.s, z27.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z24.s\n"
+ "and z24.d, z19.d, z0.d\n"
"asr z26.s, z26.s, #0x1f\n"
"asr z25.s, z25.s, #0x1f\n"
- "asr z24.s, z24.s, #0x1f\n"
"sqadd z20.s, z20.s, z30.s\n"
"sqadd z21.s, z21.s, z29.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
"sqadd z22.s, z22.s, z28.s\n"
"sqadd z16.s, z16.s, z27.s\n"
"sqadd z17.s, z17.s, z26.s\n"
@@ -522,27 +522,27 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"sqadd z19.s, z19.s, z24.s\n"
"27:" // Height 2: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z24.s }, p2/Z, [x20]\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z23.s, z23.s, z24.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z24.s\n"
- "add z21.s, z21.s, z24.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z24.s\n"
- "add z16.s, z16.s, z24.s\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z24.s\n"
- "add z18.s, z18.s, z24.s\n"
+ "add z23.s, z23.s, z26.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z20.s, z20.s, z26.s\n"
+ "add z21.s, z21.s, z26.s\n"
"ld1rw { z25.s }, p2/Z, [x20]\n"
- "add z19.s, z19.s, z24.s\n"
+ "add z22.s, z22.s, z26.s\n"
+ "add z16.s, z16.s, z26.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z26.s\n"
+ "add z18.s, z18.s, z26.s\n"
"ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z26.s\n"
"smin z23.s, p2/M, z23.s, z25.s\n"
"smin z20.s, p2/M, z20.s, z25.s\n"
"smin z21.s, p2/M, z21.s, z25.s\n"
@@ -554,20 +554,20 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"smax z23.s, p2/M, z23.s, z24.s\n"
"smax z20.s, p2/M, z20.s, z24.s\n"
"smax z21.s, p2/M, z21.s, z24.s\n"
- "uzp1 z23.h, z23.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z24.s\n"
"smax z16.s, p2/M, z16.s, z24.s\n"
- "uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z23.b, z23.b, z20.b\n"
"smax z17.s, p2/M, z17.s, z24.s\n"
"smax z18.s, p2/M, z18.s, z24.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z23.b }, p1, [x27]\n"
"smax z19.s, p2/M, z19.s, z24.s\n"
+ "uzp1 z23.h, z23.h, z20.h\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z23.b, z23.b, z20.b\n"
"uzp1 z16.b, z16.b, z17.b\n"
- "st1b { z16.b }, p1, [x23]\n"
+ "st1b { z23.b }, p1, [x27]\n"
"addvl x27, x27, #1\n"
+ "st1b { z16.b }, p1, [x23]\n"
"28:" // Height 2: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -582,16 +582,16 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z15.b, #0x1\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"30:" // Height 3: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -606,8 +606,8 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov x26, #0x0\n"
"32:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 33f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -629,49 +629,49 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"ble 37f\n"
"35:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
"trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z4.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z5.d\n"
- "trn2 z3.d, z3.d, z5.d\n"
- ".inst 0x45c49810 // ummla z16.s, z0.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45c49858 // ummla z24.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45c59814 // ummla z20.s, z0.b, z5.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c5985c // ummla z28.s, z2.b, z5.b\n"
- ".inst 0x45c49811 // ummla z17.s, z0.b, z4.b\n"
+ "trn1 z2.d, z3.d, z6.d\n"
+ "trn2 z3.d, z3.d, z6.d\n"
"ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ ".inst 0x45ca9814 // ummla z20.s, z0.b, z10.b\n"
+ ".inst 0x45c99811 // ummla z17.s, z0.b, z9.b\n"
+ ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
+ ".inst 0x45c49812 // ummla z18.s, z0.b, z4.b\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- ".inst 0x45c49859 // ummla z25.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45c99815 // ummla z21.s, z0.b, z9.b\n"
- ".inst 0x45c9985d // ummla z29.s, z2.b, z9.b\n"
- "ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
- ".inst 0x45c89812 // ummla z18.s, z0.b, z8.b\n"
- ".inst 0x45c8985a // ummla z26.s, z2.b, z8.b\n"
- "ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ ".inst 0x45ca985c // ummla z28.s, z2.b, z10.b\n"
+ ".inst 0x45c99859 // ummla z25.s, z2.b, z9.b\n"
+ ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
+ ".inst 0x45c4985a // ummla z26.s, z2.b, z4.b\n"
".inst 0x45c79816 // ummla z22.s, z0.b, z7.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
".inst 0x45c7985e // ummla z30.s, z2.b, z7.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
- "add x24, x24, #0x10\n"
".inst 0x45c69813 // ummla z19.s, z0.b, z6.b\n"
+ "ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
".inst 0x45c6985b // ummla z27.s, z2.b, z6.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x45c5985f // ummla z31.s, z2.b, z5.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x22, x22, #0x10\n"
".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
".inst 0x45c49878 // ummla z24.s, z3.b, z4.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x45ca9834 // ummla z20.s, z1.b, z10.b\n"
@@ -699,32 +699,32 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"bgt 35b\n"
"37:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
- "ld1rqb { z1.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "trn1 z0.d, z1.d, z2.d\n"
- "trn2 z1.d, z1.d, z2.d\n"
- "ld1b { z5.b }, p2/Z, [x28]\n"
- "trn1 z2.d, z3.d, z4.d\n"
- "trn2 z3.d, z3.d, z4.d\n"
- ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
- ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ "ld1b { z4.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
"subs x25, x25, #0x8\n"
"ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c49814 // ummla z20.s, z0.b, z4.b\n"
- ".inst 0x45c4985c // ummla z28.s, z2.b, z4.b\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z5.d\n"
+ "trn2 z3.d, z3.d, z5.d\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45c49810 // ummla z16.s, z0.b, z4.b\n"
+ ".inst 0x45ca9814 // ummla z20.s, z0.b, z10.b\n"
".inst 0x45c99811 // ummla z17.s, z0.b, z9.b\n"
- ".inst 0x45c99859 // ummla z25.s, z2.b, z9.b\n"
".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
- ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45c79812 // ummla z18.s, z0.b, z7.b\n"
+ ".inst 0x45c49858 // ummla z24.s, z2.b, z4.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45ca985c // ummla z28.s, z2.b, z10.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45c99859 // ummla z25.s, z2.b, z9.b\n"
+ ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
".inst 0x45c7985a // ummla z26.s, z2.b, z7.b\n"
".inst 0x45c69816 // ummla z22.s, z0.b, z6.b\n"
".inst 0x45c6985e // ummla z30.s, z2.b, z6.b\n"
@@ -734,24 +734,24 @@ void sve_hybrid_u8qa_mmla_4x4VL (
".inst 0x45c4985f // ummla z31.s, z2.b, z4.b\n"
"ble 38f\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- ".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
- ".inst 0x45c49878 // ummla z24.s, z3.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45c59834 // ummla z20.s, z1.b, z5.b\n"
- ".inst 0x45c5987c // ummla z28.s, z3.b, z5.b\n"
"ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c49831 // ummla z17.s, z1.b, z4.b\n"
- ".inst 0x45c49879 // ummla z25.s, z3.b, z4.b\n"
+ ".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
+ ".inst 0x45c49878 // ummla z24.s, z3.b, z4.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45ca9834 // ummla z20.s, z1.b, z10.b\n"
+ ".inst 0x45ca987c // ummla z28.s, z3.b, z10.b\n"
+ ".inst 0x45c99831 // ummla z17.s, z1.b, z9.b\n"
+ ".inst 0x45c99879 // ummla z25.s, z3.b, z9.b\n"
+ "addvl x28, x28, #8\n"
".inst 0x45c89835 // ummla z21.s, z1.b, z8.b\n"
".inst 0x45c8987d // ummla z29.s, z3.b, z8.b\n"
".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
".inst 0x45c7987a // ummla z26.s, z3.b, z7.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45c69836 // ummla z22.s, z1.b, z6.b\n"
".inst 0x45c6987e // ummla z30.s, z3.b, z6.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
@@ -771,15 +771,15 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"bne 32b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z0.d, z16.d, z20.d\n"
- "add x23, x27, x20\n"
"uzp2 z16.d, z16.d, z20.d\n"
"uzp1 z20.d, z17.d, z21.d\n"
"uzp2 z17.d, z17.d, z21.d\n"
- "add x22, x23, x20\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x27, x20\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "add x22, x23, x20\n"
"uzp1 z24.d, z24.d, z28.d\n"
"uzp1 z25.d, z25.d, z29.d\n"
"uzp1 z26.d, z26.d, z30.d\n"
@@ -787,14 +787,14 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z31.d, z0.d\n"
"tbnz %x[flags], #31, 40f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z23.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
+ "ld1rw { z23.s }, p2/Z, [x20]\n"
"neg z23.s, p2/M, z23.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z23.s\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z23.s\n"
"mul z12.s, p2/M, z12.s, z23.s\n"
"mul z13.s, p2/M, z13.s, z23.s\n"
"40:" // Height 3: skip row sum fixup
@@ -850,18 +850,18 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"and z29.d, z21.d, z0.d\n"
"and z28.d, z22.d, z0.d\n"
"and z23.d, z16.d, z0.d\n"
+ "and z3.d, z17.d, z0.d\n"
"asr z1.s, z1.s, #0x1f\n"
"asr z30.s, z30.s, #0x1f\n"
"asr z29.s, z29.s, #0x1f\n"
"asr z28.s, z28.s, #0x1f\n"
"asr z23.s, z23.s, #0x1f\n"
+ "and z2.d, z18.d, z0.d\n"
"sqadd z31.s, z31.s, z1.s\n"
"sqadd z20.s, z20.s, z30.s\n"
"sqadd z21.s, z21.s, z29.s\n"
"sqadd z22.s, z22.s, z28.s\n"
"sqadd z16.s, z16.s, z23.s\n"
- "and z3.d, z17.d, z0.d\n"
- "and z2.d, z18.d, z0.d\n"
"and z1.d, z19.d, z0.d\n"
"and z30.d, z24.d, z0.d\n"
"and z29.d, z25.d, z0.d\n"
@@ -883,35 +883,35 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"sqadd z27.s, z27.s, z23.s\n"
"41:" // Height 3: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z23.s }, p2/Z, [x20]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z23.s\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z23.s\n"
- "add z21.s, z21.s, z23.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z23.s\n"
- "add z16.s, z16.s, z23.s\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z23.s\n"
- "add z18.s, z18.s, z23.s\n"
+ "add z31.s, z31.s, z29.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z19.s, z19.s, z23.s\n"
- "add z24.s, z24.s, z23.s\n"
+ "add z20.s, z20.s, z29.s\n"
+ "add z21.s, z21.s, z29.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z23.s\n"
- "add z26.s, z26.s, z23.s\n"
+ "add z22.s, z22.s, z29.s\n"
+ "add z16.s, z16.s, z29.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z29.s\n"
+ "add z18.s, z18.s, z29.s\n"
"ld1rw { z28.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z23.s\n"
+ "add z19.s, z19.s, z29.s\n"
+ "add z24.s, z24.s, z29.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z29.s\n"
+ "add z26.s, z26.s, z29.s\n"
"ld1rw { z23.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z29.s\n"
"smin z31.s, p2/M, z31.s, z28.s\n"
"smin z20.s, p2/M, z20.s, z28.s\n"
"smin z21.s, p2/M, z21.s, z28.s\n"
@@ -927,28 +927,28 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"smax z31.s, p2/M, z31.s, z23.s\n"
"smax z20.s, p2/M, z20.s, z23.s\n"
"smax z21.s, p2/M, z21.s, z23.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z23.s\n"
"smax z16.s, p2/M, z16.s, z23.s\n"
- "uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z31.b, z31.b, z20.b\n"
"smax z17.s, p2/M, z17.s, z23.s\n"
"smax z18.s, p2/M, z18.s, z23.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
"smax z19.s, p2/M, z19.s, z23.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
"smax z24.s, p2/M, z24.s, z23.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z25.s, p2/M, z25.s, z23.s\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
"smax z26.s, p2/M, z26.s, z23.s\n"
+ "smax z27.s, p2/M, z27.s, z23.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
"uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z31.b, z31.b, z20.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "st1b { z31.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
"st1b { z16.b }, p1, [x23]\n"
- "smax z27.s, p2/M, z27.s, z23.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"st1b { z24.b }, p1, [x22]\n"
- "addvl x27, x27, #1\n"
"42:" // Height 3: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -956,27 +956,28 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"b 58f\n"
"43:" // Height 4
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x4\n"
"mov x10, %x[col_bias]\n"
"mov z11.s, #0x0\n"
"mov z12.s, #0x0\n"
- "mov z13.s, #0x0\n"
"bic %x[flags], %x[flags], #0x80000000\n"
"ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
"mov z14.s, #0x0\n"
- "mov z15.b, #0x1\n"
"ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x27, %x[output_ptr]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x27\n"
+ "mov z15.b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"44:" // Height 4: Column loop
"mov x20, #0x0\n"
- "whilelt p1.b, x20, x9\n"
"mov z16.s, #0x0\n"
"mov z17.s, #0x0\n"
"mov z18.s, #0x0\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
"mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
@@ -991,8 +992,8 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov x26, #0x0\n"
"46:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w25, [x20, x26, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
"tbz %x[flags], #3, 47f\n"
"ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1017,56 +1018,56 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"ble 51f\n"
"49:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "ld1rqb { z6.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
- "trn1 z2.d, z3.d, z5.d\n"
- "ld1b { z4.b }, p2/Z, [x28]\n"
- "trn2 z3.d, z3.d, z5.d\n"
- ".inst 0x45c49810 // ummla z16.s, z0.b, z4.b\n"
- ".inst 0x45c49858 // ummla z24.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
- ".inst 0x45c49814 // ummla z20.s, z0.b, z4.b\n"
- ".inst 0x45c4985c // ummla z28.s, z2.b, z4.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x45c59811 // ummla z17.s, z0.b, z5.b\n"
- ".inst 0x45c59859 // ummla z25.s, z2.b, z5.b\n"
- "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "trn1 z2.d, z3.d, z6.d\n"
+ "trn2 z3.d, z3.d, z6.d\n"
"ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
- ".inst 0x45c49815 // ummla z21.s, z0.b, z4.b\n"
- ".inst 0x45c4985d // ummla z29.s, z2.b, z4.b\n"
+ ".inst 0x45c59810 // ummla z16.s, z0.b, z5.b\n"
+ ".inst 0x45c49814 // ummla z20.s, z0.b, z4.b\n"
+ ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
+ ".inst 0x45c99815 // ummla z21.s, z0.b, z9.b\n"
+ ".inst 0x45c89812 // ummla z18.s, z0.b, z8.b\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
"ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #16\n"
- "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
- ".inst 0x45c89812 // ummla z18.s, z0.b, z8.b\n"
+ ".inst 0x45c4985c // ummla z28.s, z2.b, z4.b\n"
+ ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
+ ".inst 0x45c9985d // ummla z29.s, z2.b, z9.b\n"
".inst 0x45c8985a // ummla z26.s, z2.b, z8.b\n"
- ".inst 0x45c79816 // ummla z22.s, z0.b, z7.b\n"
+ ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n"
+ ".inst 0x45c69813 // ummla z19.s, z0.b, z6.b\n"
"ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
"ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
- ".inst 0x45c7985e // ummla z30.s, z2.b, z7.b\n"
- ".inst 0x45c69813 // ummla z19.s, z0.b, z6.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x45c6985b // ummla z27.s, z2.b, z6.b\n"
".inst 0x45c59817 // ummla z23.s, z0.b, z5.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
".inst 0x45c5985f // ummla z31.s, z2.b, z5.b\n"
".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
- "add x23, x23, #0x10\n"
".inst 0x45c49878 // ummla z24.s, z3.b, z4.b\n"
"ld1b { z4.b }, p2/Z, [x28, #-1, MUL VL]\n"
".inst 0x45ca9834 // ummla z20.s, z1.b, z10.b\n"
- "add x22, x22, #0x10\n"
".inst 0x45ca987c // ummla z28.s, z3.b, z10.b\n"
".inst 0x45c99831 // ummla z17.s, z1.b, z9.b\n"
- "add x21, x21, #0x10\n"
".inst 0x45c99879 // ummla z25.s, z3.b, z9.b\n"
".inst 0x45c89835 // ummla z21.s, z1.b, z8.b\n"
".inst 0x45c8987d // ummla z29.s, z3.b, z8.b\n"
@@ -1089,60 +1090,60 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"bgt 49b\n"
"51:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x25\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z0.d, z1.d, z2.d\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
"ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
"trn2 z1.d, z1.d, z2.d\n"
"trn1 z2.d, z3.d, z5.d\n"
- "ld1b { z4.b }, p2/Z, [x28]\n"
"trn2 z3.d, z3.d, z5.d\n"
- ".inst 0x45c49810 // ummla z16.s, z0.b, z4.b\n"
- ".inst 0x45c49858 // ummla z24.s, z2.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
- "subs x25, x25, #0x8\n"
- ".inst 0x45c59814 // ummla z20.s, z0.b, z5.b\n"
- "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
- ".inst 0x45c5985c // ummla z28.s, z2.b, z5.b\n"
- ".inst 0x45c49811 // ummla z17.s, z0.b, z4.b\n"
- "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
- ".inst 0x45c49859 // ummla z25.s, z2.b, z4.b\n"
- ".inst 0x45c89815 // ummla z21.s, z0.b, z8.b\n"
- "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
- ".inst 0x45c8985d // ummla z29.s, z2.b, z8.b\n"
- ".inst 0x45c79812 // ummla z18.s, z0.b, z7.b\n"
+ ".inst 0x45c69810 // ummla z16.s, z0.b, z6.b\n"
+ ".inst 0x45c49814 // ummla z20.s, z0.b, z4.b\n"
+ ".inst 0x45c79811 // ummla z17.s, z0.b, z7.b\n"
+ ".inst 0x45c99815 // ummla z21.s, z0.b, z9.b\n"
+ ".inst 0x45c89812 // ummla z18.s, z0.b, z8.b\n"
+ ".inst 0x45c69858 // ummla z24.s, z2.b, z6.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45c4985c // ummla z28.s, z2.b, z4.b\n"
"addvl x28, x28, #8\n"
- ".inst 0x45c7985a // ummla z26.s, z2.b, z7.b\n"
- ".inst 0x45c69816 // ummla z22.s, z0.b, z6.b\n"
- ".inst 0x45c6985e // ummla z30.s, z2.b, z6.b\n"
+ ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
+ ".inst 0x45c9985d // ummla z29.s, z2.b, z9.b\n"
+ ".inst 0x45c8985a // ummla z26.s, z2.b, z8.b\n"
+ ".inst 0x45ca9816 // ummla z22.s, z0.b, z10.b\n"
+ ".inst 0x45ca985e // ummla z30.s, z2.b, z10.b\n"
".inst 0x45c59813 // ummla z19.s, z0.b, z5.b\n"
".inst 0x45c5985b // ummla z27.s, z2.b, z5.b\n"
- ".inst 0x45c49817 // ummla z23.s, z0.b, z4.b\n"
- ".inst 0x45c4985f // ummla z31.s, z2.b, z4.b\n"
+ ".inst 0x45c69817 // ummla z23.s, z0.b, z6.b\n"
+ ".inst 0x45c6985f // ummla z31.s, z2.b, z6.b\n"
"ble 52f\n"
"ld1b { z4.b }, p2/Z, [x28]\n"
- ".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
- ".inst 0x45c49878 // ummla z24.s, z3.b, z4.b\n"
- "ld1b { z5.b }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
"ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
- ".inst 0x45c59834 // ummla z20.s, z1.b, z5.b\n"
- ".inst 0x45c5987c // ummla z28.s, z3.b, z5.b\n"
"ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
"ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
- ".inst 0x45c49831 // ummla z17.s, z1.b, z4.b\n"
- ".inst 0x45c49879 // ummla z25.s, z3.b, z4.b\n"
+ ".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
+ ".inst 0x45c49878 // ummla z24.s, z3.b, z4.b\n"
"ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
"ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x45ca9834 // ummla z20.s, z1.b, z10.b\n"
+ ".inst 0x45ca987c // ummla z28.s, z3.b, z10.b\n"
+ ".inst 0x45c99831 // ummla z17.s, z1.b, z9.b\n"
+ ".inst 0x45c99879 // ummla z25.s, z3.b, z9.b\n"
+ "addvl x28, x28, #8\n"
".inst 0x45c89835 // ummla z21.s, z1.b, z8.b\n"
".inst 0x45c8987d // ummla z29.s, z3.b, z8.b\n"
".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
".inst 0x45c7987a // ummla z26.s, z3.b, z7.b\n"
- "addvl x28, x28, #8\n"
".inst 0x45c69836 // ummla z22.s, z1.b, z6.b\n"
".inst 0x45c6987e // ummla z30.s, z3.b, z6.b\n"
".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
@@ -1162,16 +1163,16 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"bne 46b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
"uzp1 z0.d, z16.d, z20.d\n"
- "add x23, x27, x20\n"
- "add x22, x23, x20\n"
"uzp2 z16.d, z16.d, z20.d\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "add x21, x22, x20\n"
"uzp2 z17.d, z17.d, z21.d\n"
"uzp1 z21.d, z18.d, z22.d\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x27, x20\n"
+ "add x22, x23, x20\n"
"uzp1 z22.d, z19.d, z23.d\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "add x21, x22, x20\n"
"uzp1 z23.d, z24.d, z28.d\n"
"uzp2 z24.d, z24.d, z28.d\n"
"uzp1 z28.d, z25.d, z29.d\n"
@@ -1183,15 +1184,15 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"mov z31.d, z0.d\n"
"tbnz %x[flags], #31, 54f\n"
"add x20, %x[qp], %[b_offset]\n"
- "ld1rw { z0.s }, p2/Z, [x20]\n"
".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
"neg z0.s, p2/M, z0.s\n"
"mov z12.s, z11.s[3]\n"
"mov z11.s, z11.s[0]\n"
- "mul z11.s, p2/M, z11.s, z0.s\n"
"mov z14.s, z13.s[3]\n"
"mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z0.s\n"
"mul z12.s, p2/M, z12.s, z0.s\n"
"mul z13.s, p2/M, z13.s, z0.s\n"
"mul z14.s, p2/M, z14.s, z0.s\n"
@@ -1257,32 +1258,32 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"tbz %x[flags], #5, 55f\n"
"and z2.d, z31.d, z0.d\n"
"and z1.d, z20.d, z0.d\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
- "sqadd z31.s, z31.s, z2.s\n"
- "sqadd z20.s, z20.s, z1.s\n"
"and z7.d, z21.d, z0.d\n"
"and z6.d, z22.d, z0.d\n"
"and z5.d, z16.d, z0.d\n"
"and z4.d, z17.d, z0.d\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"and z3.d, z18.d, z0.d\n"
- "and z2.d, z19.d, z0.d\n"
- "and z1.d, z23.d, z0.d\n"
"asr z7.s, z7.s, #0x1f\n"
"asr z6.s, z6.s, #0x1f\n"
"asr z5.s, z5.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z2.s\n"
+ "sqadd z20.s, z20.s, z1.s\n"
+ "and z2.d, z19.d, z0.d\n"
+ "and z1.d, z23.d, z0.d\n"
"asr z4.s, z4.s, #0x1f\n"
"asr z3.s, z3.s, #0x1f\n"
- "asr z2.s, z2.s, #0x1f\n"
- "asr z1.s, z1.s, #0x1f\n"
"sqadd z21.s, z21.s, z7.s\n"
"sqadd z22.s, z22.s, z6.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
"sqadd z16.s, z16.s, z5.s\n"
"sqadd z17.s, z17.s, z4.s\n"
"sqadd z18.s, z18.s, z3.s\n"
+ "and z7.d, z28.d, z0.d\n"
"sqadd z19.s, z19.s, z2.s\n"
"sqadd z23.s, z23.s, z1.s\n"
- "and z7.d, z28.d, z0.d\n"
"and z6.d, z29.d, z0.d\n"
"and z5.d, z30.d, z0.d\n"
"and z4.d, z24.d, z0.d\n"
@@ -1305,43 +1306,43 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"sqadd z27.s, z27.s, z1.s\n"
"55:" // Height 4: no shift correction
"add x20, %x[qp], %[c_offset]\n"
- "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
- "add z31.s, z31.s, z2.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
- "add z20.s, z20.s, z2.s\n"
- "add z21.s, z21.s, z2.s\n"
".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
- "add z22.s, z22.s, z2.s\n"
- "add z16.s, z16.s, z2.s\n"
".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
- "add z17.s, z17.s, z2.s\n"
- "add z18.s, z18.s, z2.s\n"
+ "add z31.s, z31.s, z2.s\n"
".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
- "add z19.s, z19.s, z2.s\n"
- "add z23.s, z23.s, z2.s\n"
+ "add z20.s, z20.s, z2.s\n"
+ "add z21.s, z21.s, z2.s\n"
".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
- "add z28.s, z28.s, z2.s\n"
- "add z29.s, z29.s, z2.s\n"
+ "add z22.s, z22.s, z2.s\n"
+ "add z16.s, z16.s, z2.s\n"
".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
- "add z30.s, z30.s, z2.s\n"
- "add z24.s, z24.s, z2.s\n"
+ "add z17.s, z17.s, z2.s\n"
+ "add z18.s, z18.s, z2.s\n"
".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
- "add z25.s, z25.s, z2.s\n"
- "add z26.s, z26.s, z2.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z23.s, z23.s, z2.s\n"
".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
"add x20, %x[qp], %[maxval]\n"
+ "add z28.s, z28.s, z2.s\n"
+ "add z29.s, z29.s, z2.s\n"
"ld1rw { z1.s }, p2/Z, [x20]\n"
- "add z27.s, z27.s, z2.s\n"
+ "add z30.s, z30.s, z2.s\n"
+ "add z24.s, z24.s, z2.s\n"
"add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z2.s\n"
+ "add z26.s, z26.s, z2.s\n"
"ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z2.s\n"
"smin z31.s, p2/M, z31.s, z1.s\n"
"smin z20.s, p2/M, z20.s, z1.s\n"
"smin z21.s, p2/M, z21.s, z1.s\n"
@@ -1361,36 +1362,36 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"smax z31.s, p2/M, z31.s, z0.s\n"
"smax z20.s, p2/M, z20.s, z0.s\n"
"smax z21.s, p2/M, z21.s, z0.s\n"
- "uzp1 z31.h, z31.h, z20.h\n"
"smax z22.s, p2/M, z22.s, z0.s\n"
"smax z16.s, p2/M, z16.s, z0.s\n"
- "uzp1 z20.h, z21.h, z22.h\n"
- "uzp1 z31.b, z31.b, z20.b\n"
"smax z17.s, p2/M, z17.s, z0.s\n"
"smax z18.s, p2/M, z18.s, z0.s\n"
- "uzp1 z16.h, z16.h, z17.h\n"
- "st1b { z31.b }, p1, [x27]\n"
"smax z19.s, p2/M, z19.s, z0.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
"smax z23.s, p2/M, z23.s, z0.s\n"
- "uzp1 z17.h, z18.h, z19.h\n"
- "uzp1 z16.b, z16.b, z17.b\n"
"smax z28.s, p2/M, z28.s, z0.s\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
"smax z29.s, p2/M, z29.s, z0.s\n"
- "uzp1 z23.h, z23.h, z28.h\n"
- "st1b { z16.b }, p1, [x23]\n"
"smax z30.s, p2/M, z30.s, z0.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
"smax z24.s, p2/M, z24.s, z0.s\n"
- "uzp1 z16.h, z29.h, z30.h\n"
- "uzp1 z23.b, z23.b, z16.b\n"
"smax z25.s, p2/M, z25.s, z0.s\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
"smax z26.s, p2/M, z26.s, z0.s\n"
+ "smax z27.s, p2/M, z27.s, z0.s\n"
+ "uzp1 z23.h, z23.h, z28.h\n"
+ "uzp1 z31.b, z31.b, z20.b\n"
+ "uzp1 z18.h, z29.h, z30.h\n"
"uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "st1b { z31.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z23.b, z23.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z16.b }, p1, [x23]\n"
"st1b { z23.b }, p1, [x22]\n"
- "smax z27.s, p2/M, z27.s, z0.s\n"
- "uzp1 z16.h, z26.h, z27.h\n"
- "uzp1 z24.b, z24.b, z16.b\n"
"st1b { z24.b }, p1, [x21]\n"
- "addvl x27, x27, #1\n"
"56:" // Height 4: Writeback done
"decw x9, ALL, MUL #4\n"
"cmp x9, XZR\n"
@@ -1407,8 +1408,8 @@ void sve_hybrid_u8qa_mmla_4x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"58:" // Exit
- : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
: "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL.hpp
new file mode 100644
index 0000000000..ebf1883ccf
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL.hpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<uint8_t>, \
+ size_t, size_t, \
+ const int8_t *, \
+ IndirectOutputArg<uint8_t>, \
+ const Requantize32 *, const int32_t *, unsigned int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_hybrid_u8s8qa_dot_4x4VL( ARGLIST );
+
+class cls_sve_hybrid_u8s8qa_dot_4x4VL
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef uint8_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 4;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<int32_t>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 4, 4> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 29.89 };
+ case CPUModel::A510:
+ return { 17.12 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_hybrid_u8s8qa_dot_4x4VL;
+ cls_sve_hybrid_u8s8qa_dot_4x4VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL/generic.cpp
new file mode 100644
index 0000000000..dc3b7ef3ec
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_dot_4x4VL/generic.cpp
@@ -0,0 +1,1502 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void sve_hybrid_u8s8qa_dot_4x4VL (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<uint8_t> A_arg,
+ size_t M, size_t N, const int8_t *B_ptr, IndirectOutputArg<uint8_t> output_arg,
+ const Requantize32 *qp, const int32_t *col_bias, unsigned int
+)
+{
+ struct KernelArgs {
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const int8_t *B_ptr = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ void *output_ptr = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ ka.output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ if (qp->c_offset > qp->minval) {
+ flags |= 0x20;
+ }
+ __asm__ __volatile__(
+ "ptrue p2.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x4\n"
+ "bge 43f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 29f\n"
+ "beq 15f\n"
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z15.b, #0x1\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "2:" // Height 1: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "3:" // Height 1: setup done
+ "mov x26, #0x0\n"
+ "4:" // Height 1: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 5f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "cbnz x26, 6f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "b 6f\n"
+ "5:" // Height 1: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "6:" // Height 1: input setup done
+ "cmp x25, #0x10\n"
+ "ble 9f\n"
+ "7:" // Height 1: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z21.b }, p2/Z, [x28]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z23.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ ".inst 0x44a01eb0 // sudot z16.s, z21.b, z0.b[0]\n"
+ "ld1b { z21.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ ".inst 0x44a01f51 // sudot z17.s, z26.b, z0.b[0]\n"
+ ".inst 0x44a01f32 // sudot z18.s, z25.b, z0.b[0]\n"
+ ".inst 0x44a01f13 // sudot z19.s, z24.b, z0.b[0]\n"
+ ".inst 0x44a81e90 // sudot z16.s, z20.b, z0.b[1]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ ".inst 0x44a81ef1 // sudot z17.s, z23.b, z0.b[1]\n"
+ "ld1b { z23.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x44a81ed2 // sudot z18.s, z22.b, z0.b[1]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ ".inst 0x44a81eb3 // sudot z19.s, z21.b, z0.b[1]\n"
+ "ld1b { z21.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x44b01e90 // sudot z16.s, z20.b, z0.b[2]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x44b01f51 // sudot z17.s, z26.b, z0.b[2]\n"
+ ".inst 0x44b01f32 // sudot z18.s, z25.b, z0.b[2]\n"
+ ".inst 0x44b01f13 // sudot z19.s, z24.b, z0.b[2]\n"
+ ".inst 0x44b81ef0 // sudot z16.s, z23.b, z0.b[3]\n"
+ ".inst 0x44b81ed1 // sudot z17.s, z22.b, z0.b[3]\n"
+ ".inst 0x44b81eb2 // sudot z18.s, z21.b, z0.b[3]\n"
+ ".inst 0x44b81e93 // sudot z19.s, z20.b, z0.b[3]\n"
+ "tbnz %x[flags], #31, 8f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "8:" // Height 1: Multiply loop: unique 1: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 7b\n"
+ "9:" // Height 1: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ ".inst 0x44a01ef0 // sudot z16.s, z23.b, z0.b[0]\n"
+ ".inst 0x44a01ed1 // sudot z17.s, z22.b, z0.b[0]\n"
+ ".inst 0x44a01eb2 // sudot z18.s, z21.b, z0.b[0]\n"
+ ".inst 0x44a01e93 // sudot z19.s, z20.b, z0.b[0]\n"
+ "ble 10f\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44a81ef0 // sudot z16.s, z23.b, z0.b[1]\n"
+ ".inst 0x44a81ed1 // sudot z17.s, z22.b, z0.b[1]\n"
+ ".inst 0x44a81eb2 // sudot z18.s, z21.b, z0.b[1]\n"
+ ".inst 0x44a81e93 // sudot z19.s, z20.b, z0.b[1]\n"
+ "ble 10f\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b01ef0 // sudot z16.s, z23.b, z0.b[2]\n"
+ ".inst 0x44b01ed1 // sudot z17.s, z22.b, z0.b[2]\n"
+ ".inst 0x44b01eb2 // sudot z18.s, z21.b, z0.b[2]\n"
+ ".inst 0x44b01e93 // sudot z19.s, z20.b, z0.b[2]\n"
+ "ble 10f\n"
+ "ld1b { z23.b }, p2/Z, [x28]\n"
+ "ld1b { z22.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z21.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z20.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b81ef0 // sudot z16.s, z23.b, z0.b[3]\n"
+ ".inst 0x44b81ed1 // sudot z17.s, z22.b, z0.b[3]\n"
+ ".inst 0x44b81eb2 // sudot z18.s, z21.b, z0.b[3]\n"
+ ".inst 0x44b81e93 // sudot z19.s, z20.b, z0.b[3]\n"
+ "10:" // Height 1: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 11f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "11:" // Height 1: Multiply loop: unique 2: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 4b\n"
+ "tbnz %x[flags], #31, 12f\n"
+ "mov x21, #0x4\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z20.s, p2/M, z20.s\n"
+ "saddv d11, p0, z11.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z20.s\n"
+ "12:" // Height 1: skip row sum fixup
+ "add z16.s, z16.s, z11.s\n"
+ "add z17.s, z17.s, z11.s\n"
+ "ld1w { z23.s }, p2/Z, [x10]\n"
+ "ld1w { z20.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z18.s, z18.s, z11.s\n"
+ "add z19.s, z19.s, z11.s\n"
+ "ld1w { z22.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z16.s, z16.s, z23.s\n"
+ "add z17.s, z17.s, z20.s\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "addvl x10, x10, #4\n"
+ "add z18.s, z18.s, z22.s\n"
+ "add z19.s, z19.s, z21.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ ".inst 0x04b47610 // sqrdmulh z16.s, z16.s, z20.s\n"
+ ".inst 0x04b47631 // sqrdmulh z17.s, z17.s, z20.s\n"
+ ".inst 0x04b47652 // sqrdmulh z18.s, z18.s, z20.s\n"
+ ".inst 0x04b47673 // sqrdmulh z19.s, z19.s, z20.s\n"
+ "tbz %x[flags], #5, 13f\n"
+ "and z23.d, z16.d, z0.d\n"
+ "and z22.d, z17.d, z0.d\n"
+ "and z21.d, z18.d, z0.d\n"
+ "and z20.d, z19.d, z0.d\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ "asr z22.s, z22.s, #0x1f\n"
+ "asr z21.s, z21.s, #0x1f\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z23.s\n"
+ "sqadd z17.s, z17.s, z22.s\n"
+ "sqadd z18.s, z18.s, z21.s\n"
+ "sqadd z19.s, z19.s, z20.s\n"
+ "13:" // Height 1: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "ld1rw { z22.s }, p2/Z, [x20]\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
+ "add z16.s, z16.s, z22.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z22.s\n"
+ "add z18.s, z18.s, z22.s\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z22.s\n"
+ "smin z16.s, p2/M, z16.s, z21.s\n"
+ "smin z17.s, p2/M, z17.s, z21.s\n"
+ "smin z18.s, p2/M, z18.s, z21.s\n"
+ "smin z19.s, p2/M, z19.s, z21.s\n"
+ "smax z16.s, p2/M, z16.s, z20.s\n"
+ "smax z17.s, p2/M, z17.s, z20.s\n"
+ "smax z18.s, p2/M, z18.s, z20.s\n"
+ "smax z19.s, p2/M, z19.s, z20.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "14:" // Height 1: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 2b\n"
+ "b 58f\n"
+ "15:" // Height 2
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov z15.b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "16:" // Height 2: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "17:" // Height 2: setup done
+ "mov x26, #0x0\n"
+ "18:" // Height 2: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 19f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "cbnz x26, 20f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "b 20f\n"
+ "19:" // Height 2: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "20:" // Height 2: input setup done
+ "cmp x25, #0x10\n"
+ "ble 23f\n"
+ "21:" // Height 2: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z25.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1b { z26.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x44a01f30 // sudot z16.s, z25.b, z0.b[0]\n"
+ ".inst 0x44a11f34 // sudot z20.s, z25.b, z1.b[0]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ ".inst 0x44a01fd1 // sudot z17.s, z30.b, z0.b[0]\n"
+ ".inst 0x44a11fd5 // sudot z21.s, z30.b, z1.b[0]\n"
+ ".inst 0x44a01fb2 // sudot z18.s, z29.b, z0.b[0]\n"
+ ".inst 0x44a11fb6 // sudot z22.s, z29.b, z1.b[0]\n"
+ ".inst 0x44a01f93 // sudot z19.s, z28.b, z0.b[0]\n"
+ ".inst 0x44a11f97 // sudot z23.s, z28.b, z1.b[0]\n"
+ ".inst 0x44a81f10 // sudot z16.s, z24.b, z0.b[1]\n"
+ ".inst 0x44a91f14 // sudot z20.s, z24.b, z1.b[1]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ ".inst 0x44a81f71 // sudot z17.s, z27.b, z0.b[1]\n"
+ ".inst 0x44a91f75 // sudot z21.s, z27.b, z1.b[1]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ ".inst 0x44a81f52 // sudot z18.s, z26.b, z0.b[1]\n"
+ ".inst 0x44a91f56 // sudot z22.s, z26.b, z1.b[1]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ ".inst 0x44a81f33 // sudot z19.s, z25.b, z0.b[1]\n"
+ ".inst 0x44a91f37 // sudot z23.s, z25.b, z1.b[1]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x44b01f10 // sudot z16.s, z24.b, z0.b[2]\n"
+ ".inst 0x44b11f14 // sudot z20.s, z24.b, z1.b[2]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x44b01fd1 // sudot z17.s, z30.b, z0.b[2]\n"
+ ".inst 0x44b11fd5 // sudot z21.s, z30.b, z1.b[2]\n"
+ ".inst 0x44b01fb2 // sudot z18.s, z29.b, z0.b[2]\n"
+ ".inst 0x44b11fb6 // sudot z22.s, z29.b, z1.b[2]\n"
+ ".inst 0x44b01f93 // sudot z19.s, z28.b, z0.b[2]\n"
+ ".inst 0x44b11f97 // sudot z23.s, z28.b, z1.b[2]\n"
+ ".inst 0x44b81f70 // sudot z16.s, z27.b, z0.b[3]\n"
+ ".inst 0x44b91f74 // sudot z20.s, z27.b, z1.b[3]\n"
+ ".inst 0x44b81f51 // sudot z17.s, z26.b, z0.b[3]\n"
+ ".inst 0x44b91f55 // sudot z21.s, z26.b, z1.b[3]\n"
+ ".inst 0x44b81f32 // sudot z18.s, z25.b, z0.b[3]\n"
+ ".inst 0x44b91f36 // sudot z22.s, z25.b, z1.b[3]\n"
+ ".inst 0x44b81f13 // sudot z19.s, z24.b, z0.b[3]\n"
+ ".inst 0x44b91f17 // sudot z23.s, z24.b, z1.b[3]\n"
+ "tbnz %x[flags], #31, 22f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z12.s, z1.b, z15.b\n"
+ "22:" // Height 2: Multiply loop: unique 3: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 21b\n"
+ "23:" // Height 2: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ ".inst 0x44a01f70 // sudot z16.s, z27.b, z0.b[0]\n"
+ ".inst 0x44a11f74 // sudot z20.s, z27.b, z1.b[0]\n"
+ ".inst 0x44a01f51 // sudot z17.s, z26.b, z0.b[0]\n"
+ ".inst 0x44a11f55 // sudot z21.s, z26.b, z1.b[0]\n"
+ ".inst 0x44a01f32 // sudot z18.s, z25.b, z0.b[0]\n"
+ ".inst 0x44a11f36 // sudot z22.s, z25.b, z1.b[0]\n"
+ ".inst 0x44a01f13 // sudot z19.s, z24.b, z0.b[0]\n"
+ ".inst 0x44a11f17 // sudot z23.s, z24.b, z1.b[0]\n"
+ "ble 24f\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44a81f70 // sudot z16.s, z27.b, z0.b[1]\n"
+ ".inst 0x44a91f74 // sudot z20.s, z27.b, z1.b[1]\n"
+ ".inst 0x44a81f51 // sudot z17.s, z26.b, z0.b[1]\n"
+ ".inst 0x44a91f55 // sudot z21.s, z26.b, z1.b[1]\n"
+ ".inst 0x44a81f32 // sudot z18.s, z25.b, z0.b[1]\n"
+ ".inst 0x44a91f36 // sudot z22.s, z25.b, z1.b[1]\n"
+ ".inst 0x44a81f13 // sudot z19.s, z24.b, z0.b[1]\n"
+ ".inst 0x44a91f17 // sudot z23.s, z24.b, z1.b[1]\n"
+ "ble 24f\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b01f70 // sudot z16.s, z27.b, z0.b[2]\n"
+ ".inst 0x44b11f74 // sudot z20.s, z27.b, z1.b[2]\n"
+ ".inst 0x44b01f51 // sudot z17.s, z26.b, z0.b[2]\n"
+ ".inst 0x44b11f55 // sudot z21.s, z26.b, z1.b[2]\n"
+ ".inst 0x44b01f32 // sudot z18.s, z25.b, z0.b[2]\n"
+ ".inst 0x44b11f36 // sudot z22.s, z25.b, z1.b[2]\n"
+ ".inst 0x44b01f13 // sudot z19.s, z24.b, z0.b[2]\n"
+ ".inst 0x44b11f17 // sudot z23.s, z24.b, z1.b[2]\n"
+ "ble 24f\n"
+ "ld1b { z27.b }, p2/Z, [x28]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b81f70 // sudot z16.s, z27.b, z0.b[3]\n"
+ ".inst 0x44b91f74 // sudot z20.s, z27.b, z1.b[3]\n"
+ ".inst 0x44b81f51 // sudot z17.s, z26.b, z0.b[3]\n"
+ ".inst 0x44b91f55 // sudot z21.s, z26.b, z1.b[3]\n"
+ ".inst 0x44b81f32 // sudot z18.s, z25.b, z0.b[3]\n"
+ ".inst 0x44b91f36 // sudot z22.s, z25.b, z1.b[3]\n"
+ ".inst 0x44b81f13 // sudot z19.s, z24.b, z0.b[3]\n"
+ ".inst 0x44b91f17 // sudot z23.s, z24.b, z1.b[3]\n"
+ "24:" // Height 2: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 25f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z12.s, z1.b, z15.b\n"
+ "25:" // Height 2: Multiply loop: unique 4: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 18b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x27, x20\n"
+ "tbnz %x[flags], #31, 26f\n"
+ "mov x21, #0x4\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z24.s, p2/M, z24.s\n"
+ "saddv d11, p0, z11.s\n"
+ "saddv d12, p0, z12.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z24.s\n"
+ "mov z12.s, z12.s[0]\n"
+ "mul z12.s, p2/M, z12.s, z24.s\n"
+ "26:" // Height 2: skip row sum fixup
+ "add z16.s, z16.s, z11.s\n"
+ "add z17.s, z17.s, z11.s\n"
+ "ld1w { z28.s }, p2/Z, [x10]\n"
+ "ld1w { z27.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z18.s, z18.s, z11.s\n"
+ "add z19.s, z19.s, z11.s\n"
+ "ld1w { z26.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add z20.s, z20.s, z12.s\n"
+ "add z21.s, z21.s, z12.s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z22.s, z22.s, z12.s\n"
+ "add z23.s, z23.s, z12.s\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add z16.s, z16.s, z28.s\n"
+ "add z17.s, z17.s, z27.s\n"
+ "addvl x10, x10, #4\n"
+ "add z18.s, z18.s, z26.s\n"
+ "add z19.s, z19.s, z25.s\n"
+ "add z20.s, z20.s, z28.s\n"
+ "add z21.s, z21.s, z27.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z22.s, z22.s, z26.s\n"
+ "add z23.s, z23.s, z25.s\n"
+ ".inst 0x04b87610 // sqrdmulh z16.s, z16.s, z24.s\n"
+ ".inst 0x04b87631 // sqrdmulh z17.s, z17.s, z24.s\n"
+ ".inst 0x04b87652 // sqrdmulh z18.s, z18.s, z24.s\n"
+ ".inst 0x04b87673 // sqrdmulh z19.s, z19.s, z24.s\n"
+ ".inst 0x04b87694 // sqrdmulh z20.s, z20.s, z24.s\n"
+ ".inst 0x04b876b5 // sqrdmulh z21.s, z21.s, z24.s\n"
+ ".inst 0x04b876d6 // sqrdmulh z22.s, z22.s, z24.s\n"
+ ".inst 0x04b876f7 // sqrdmulh z23.s, z23.s, z24.s\n"
+ "tbz %x[flags], #5, 27f\n"
+ "and z24.d, z16.d, z0.d\n"
+ "and z30.d, z17.d, z0.d\n"
+ "and z29.d, z18.d, z0.d\n"
+ "and z28.d, z19.d, z0.d\n"
+ "and z27.d, z20.d, z0.d\n"
+ "and z26.d, z21.d, z0.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ "and z25.d, z22.d, z0.d\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "asr z29.s, z29.s, #0x1f\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "asr z27.s, z27.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z24.s\n"
+ "and z24.d, z23.d, z0.d\n"
+ "asr z26.s, z26.s, #0x1f\n"
+ "asr z25.s, z25.s, #0x1f\n"
+ "sqadd z17.s, z17.s, z30.s\n"
+ "sqadd z18.s, z18.s, z29.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ "sqadd z19.s, z19.s, z28.s\n"
+ "sqadd z20.s, z20.s, z27.s\n"
+ "sqadd z21.s, z21.s, z26.s\n"
+ "sqadd z22.s, z22.s, z25.s\n"
+ "sqadd z23.s, z23.s, z24.s\n"
+ "27:" // Height 2: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "add z16.s, z16.s, z26.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z26.s\n"
+ "add z18.s, z18.s, z26.s\n"
+ "ld1rw { z25.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z26.s\n"
+ "add z20.s, z20.s, z26.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z21.s, z21.s, z26.s\n"
+ "add z22.s, z22.s, z26.s\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add z23.s, z23.s, z26.s\n"
+ "smin z16.s, p2/M, z16.s, z25.s\n"
+ "smin z17.s, p2/M, z17.s, z25.s\n"
+ "smin z18.s, p2/M, z18.s, z25.s\n"
+ "smin z19.s, p2/M, z19.s, z25.s\n"
+ "smin z20.s, p2/M, z20.s, z25.s\n"
+ "smin z21.s, p2/M, z21.s, z25.s\n"
+ "smin z22.s, p2/M, z22.s, z25.s\n"
+ "smin z23.s, p2/M, z23.s, z25.s\n"
+ "smax z16.s, p2/M, z16.s, z24.s\n"
+ "smax z17.s, p2/M, z17.s, z24.s\n"
+ "smax z18.s, p2/M, z18.s, z24.s\n"
+ "smax z19.s, p2/M, z19.s, z24.s\n"
+ "smax z20.s, p2/M, z20.s, z24.s\n"
+ "smax z21.s, p2/M, z21.s, z24.s\n"
+ "smax z22.s, p2/M, z22.s, z24.s\n"
+ "smax z23.s, p2/M, z23.s, z24.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "st1b { z20.b }, p1, [x24]\n"
+ "28:" // Height 2: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 16b\n"
+ "b 58f\n"
+ "29:" // Height 3
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov z13.s, #0x0\n"
+ "mov z15.b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "30:" // Height 3: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov z24.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "31:" // Height 3: setup done
+ "mov x26, #0x0\n"
+ "32:" // Height 3: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 33f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "cbnz x26, 34f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "b 34f\n"
+ "33:" // Height 3: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "34:" // Height 3: input setup done
+ "cmp x25, #0x10\n"
+ "ble 37f\n"
+ "35:" // Height 3: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z3.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z31.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "add x22, x22, #0x10\n"
+ ".inst 0x44a01cb0 // sudot z16.s, z5.b, z0.b[0]\n"
+ ".inst 0x44a11cb4 // sudot z20.s, z5.b, z1.b[0]\n"
+ ".inst 0x44a01fb1 // sudot z17.s, z29.b, z0.b[0]\n"
+ ".inst 0x44a11fb5 // sudot z21.s, z29.b, z1.b[0]\n"
+ ".inst 0x44a01c92 // sudot z18.s, z4.b, z0.b[0]\n"
+ ".inst 0x44a21cb8 // sudot z24.s, z5.b, z2.b[0]\n"
+ ".inst 0x44a21fb9 // sudot z25.s, z29.b, z2.b[0]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ ".inst 0x44a11c96 // sudot z22.s, z4.b, z1.b[0]\n"
+ ".inst 0x44a21c9a // sudot z26.s, z4.b, z2.b[0]\n"
+ ".inst 0x44a01f93 // sudot z19.s, z28.b, z0.b[0]\n"
+ ".inst 0x44a11f97 // sudot z23.s, z28.b, z1.b[0]\n"
+ ".inst 0x44a21f9b // sudot z27.s, z28.b, z2.b[0]\n"
+ ".inst 0x44a81c70 // sudot z16.s, z3.b, z0.b[1]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ ".inst 0x44a91c74 // sudot z20.s, z3.b, z1.b[1]\n"
+ ".inst 0x44aa1c78 // sudot z24.s, z3.b, z2.b[1]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z3.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ ".inst 0x44a81ff1 // sudot z17.s, z31.b, z0.b[1]\n"
+ ".inst 0x44a91ff5 // sudot z21.s, z31.b, z1.b[1]\n"
+ ".inst 0x44aa1ff9 // sudot z25.s, z31.b, z2.b[1]\n"
+ ".inst 0x44a81fd2 // sudot z18.s, z30.b, z0.b[1]\n"
+ "ld1b { z31.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x44a91fd6 // sudot z22.s, z30.b, z1.b[1]\n"
+ ".inst 0x44aa1fda // sudot z26.s, z30.b, z2.b[1]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ ".inst 0x44a81fb3 // sudot z19.s, z29.b, z0.b[1]\n"
+ ".inst 0x44a91fb7 // sudot z23.s, z29.b, z1.b[1]\n"
+ ".inst 0x44aa1fbb // sudot z27.s, z29.b, z2.b[1]\n"
+ ".inst 0x44b01f90 // sudot z16.s, z28.b, z0.b[2]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x44b11f94 // sudot z20.s, z28.b, z1.b[2]\n"
+ ".inst 0x44b21f98 // sudot z24.s, z28.b, z2.b[2]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x44b01cb1 // sudot z17.s, z5.b, z0.b[2]\n"
+ ".inst 0x44b11cb5 // sudot z21.s, z5.b, z1.b[2]\n"
+ ".inst 0x44b21cb9 // sudot z25.s, z5.b, z2.b[2]\n"
+ ".inst 0x44b01c92 // sudot z18.s, z4.b, z0.b[2]\n"
+ ".inst 0x44b11c96 // sudot z22.s, z4.b, z1.b[2]\n"
+ ".inst 0x44b21c9a // sudot z26.s, z4.b, z2.b[2]\n"
+ ".inst 0x44b01c73 // sudot z19.s, z3.b, z0.b[2]\n"
+ ".inst 0x44b11c77 // sudot z23.s, z3.b, z1.b[2]\n"
+ ".inst 0x44b21c7b // sudot z27.s, z3.b, z2.b[2]\n"
+ ".inst 0x44b81ff0 // sudot z16.s, z31.b, z0.b[3]\n"
+ ".inst 0x44b91ff4 // sudot z20.s, z31.b, z1.b[3]\n"
+ ".inst 0x44ba1ff8 // sudot z24.s, z31.b, z2.b[3]\n"
+ ".inst 0x44b81fd1 // sudot z17.s, z30.b, z0.b[3]\n"
+ ".inst 0x44b91fd5 // sudot z21.s, z30.b, z1.b[3]\n"
+ ".inst 0x44ba1fd9 // sudot z25.s, z30.b, z2.b[3]\n"
+ ".inst 0x44b81fb2 // sudot z18.s, z29.b, z0.b[3]\n"
+ ".inst 0x44b91fb6 // sudot z22.s, z29.b, z1.b[3]\n"
+ ".inst 0x44ba1fba // sudot z26.s, z29.b, z2.b[3]\n"
+ ".inst 0x44b81f93 // sudot z19.s, z28.b, z0.b[3]\n"
+ ".inst 0x44b91f97 // sudot z23.s, z28.b, z1.b[3]\n"
+ ".inst 0x44ba1f9b // sudot z27.s, z28.b, z2.b[3]\n"
+ "tbnz %x[flags], #31, 36f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z12.s, z1.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "36:" // Height 3: Multiply loop: unique 5: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 35b\n"
+ "37:" // Height 3: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ ".inst 0x44a01ff0 // sudot z16.s, z31.b, z0.b[0]\n"
+ ".inst 0x44a11ff4 // sudot z20.s, z31.b, z1.b[0]\n"
+ ".inst 0x44a01fd1 // sudot z17.s, z30.b, z0.b[0]\n"
+ ".inst 0x44a11fd5 // sudot z21.s, z30.b, z1.b[0]\n"
+ ".inst 0x44a01fb2 // sudot z18.s, z29.b, z0.b[0]\n"
+ ".inst 0x44a11fb6 // sudot z22.s, z29.b, z1.b[0]\n"
+ ".inst 0x44a21ff8 // sudot z24.s, z31.b, z2.b[0]\n"
+ ".inst 0x44a21fd9 // sudot z25.s, z30.b, z2.b[0]\n"
+ ".inst 0x44a21fba // sudot z26.s, z29.b, z2.b[0]\n"
+ ".inst 0x44a01f93 // sudot z19.s, z28.b, z0.b[0]\n"
+ ".inst 0x44a11f97 // sudot z23.s, z28.b, z1.b[0]\n"
+ ".inst 0x44a21f9b // sudot z27.s, z28.b, z2.b[0]\n"
+ "ble 38f\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44a81ff0 // sudot z16.s, z31.b, z0.b[1]\n"
+ ".inst 0x44a91ff4 // sudot z20.s, z31.b, z1.b[1]\n"
+ ".inst 0x44aa1ff8 // sudot z24.s, z31.b, z2.b[1]\n"
+ ".inst 0x44a81fd1 // sudot z17.s, z30.b, z0.b[1]\n"
+ ".inst 0x44a91fd5 // sudot z21.s, z30.b, z1.b[1]\n"
+ ".inst 0x44aa1fd9 // sudot z25.s, z30.b, z2.b[1]\n"
+ ".inst 0x44a81fb2 // sudot z18.s, z29.b, z0.b[1]\n"
+ ".inst 0x44a91fb6 // sudot z22.s, z29.b, z1.b[1]\n"
+ ".inst 0x44aa1fba // sudot z26.s, z29.b, z2.b[1]\n"
+ ".inst 0x44a81f93 // sudot z19.s, z28.b, z0.b[1]\n"
+ ".inst 0x44a91f97 // sudot z23.s, z28.b, z1.b[1]\n"
+ ".inst 0x44aa1f9b // sudot z27.s, z28.b, z2.b[1]\n"
+ "ble 38f\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b01ff0 // sudot z16.s, z31.b, z0.b[2]\n"
+ ".inst 0x44b11ff4 // sudot z20.s, z31.b, z1.b[2]\n"
+ ".inst 0x44b21ff8 // sudot z24.s, z31.b, z2.b[2]\n"
+ ".inst 0x44b01fd1 // sudot z17.s, z30.b, z0.b[2]\n"
+ ".inst 0x44b11fd5 // sudot z21.s, z30.b, z1.b[2]\n"
+ ".inst 0x44b21fd9 // sudot z25.s, z30.b, z2.b[2]\n"
+ ".inst 0x44b01fb2 // sudot z18.s, z29.b, z0.b[2]\n"
+ ".inst 0x44b11fb6 // sudot z22.s, z29.b, z1.b[2]\n"
+ ".inst 0x44b21fba // sudot z26.s, z29.b, z2.b[2]\n"
+ ".inst 0x44b01f93 // sudot z19.s, z28.b, z0.b[2]\n"
+ ".inst 0x44b11f97 // sudot z23.s, z28.b, z1.b[2]\n"
+ ".inst 0x44b21f9b // sudot z27.s, z28.b, z2.b[2]\n"
+ "ble 38f\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b81ff0 // sudot z16.s, z31.b, z0.b[3]\n"
+ ".inst 0x44b91ff4 // sudot z20.s, z31.b, z1.b[3]\n"
+ ".inst 0x44ba1ff8 // sudot z24.s, z31.b, z2.b[3]\n"
+ ".inst 0x44b81fd1 // sudot z17.s, z30.b, z0.b[3]\n"
+ ".inst 0x44b91fd5 // sudot z21.s, z30.b, z1.b[3]\n"
+ ".inst 0x44ba1fd9 // sudot z25.s, z30.b, z2.b[3]\n"
+ ".inst 0x44b81fb2 // sudot z18.s, z29.b, z0.b[3]\n"
+ ".inst 0x44b91fb6 // sudot z22.s, z29.b, z1.b[3]\n"
+ ".inst 0x44ba1fba // sudot z26.s, z29.b, z2.b[3]\n"
+ ".inst 0x44b81f93 // sudot z19.s, z28.b, z0.b[3]\n"
+ ".inst 0x44b91f97 // sudot z23.s, z28.b, z1.b[3]\n"
+ ".inst 0x44ba1f9b // sudot z27.s, z28.b, z2.b[3]\n"
+ "38:" // Height 3: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 39f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z12.s, z1.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "39:" // Height 3: Multiply loop: unique 6: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 32b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
+ "tbnz %x[flags], #31, 40f\n"
+ "mov x21, #0x4\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1rw { z28.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z28.s, p2/M, z28.s\n"
+ "saddv d11, p0, z11.s\n"
+ "saddv d12, p0, z12.s\n"
+ "saddv d13, p0, z13.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "mov z12.s, z12.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z28.s\n"
+ "mul z12.s, p2/M, z12.s, z28.s\n"
+ "mov z13.s, z13.s[0]\n"
+ "mul z13.s, p2/M, z13.s, z28.s\n"
+ "40:" // Height 3: skip row sum fixup
+ "add z16.s, z16.s, z11.s\n"
+ "add z17.s, z17.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x10]\n"
+ "ld1w { z31.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z18.s, z18.s, z11.s\n"
+ "add z19.s, z19.s, z11.s\n"
+ "ld1w { z30.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z29.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add z20.s, z20.s, z12.s\n"
+ "add z21.s, z21.s, z12.s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z22.s, z22.s, z12.s\n"
+ "add z23.s, z23.s, z12.s\n"
+ "ld1rw { z28.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z25.s, z25.s, z13.s\n"
+ "addvl x10, x10, #4\n"
+ "add z26.s, z26.s, z13.s\n"
+ "add z27.s, z27.s, z13.s\n"
+ "add z16.s, z16.s, z0.s\n"
+ "add z17.s, z17.s, z31.s\n"
+ "add z18.s, z18.s, z30.s\n"
+ "add z19.s, z19.s, z29.s\n"
+ "add z20.s, z20.s, z0.s\n"
+ "add z21.s, z21.s, z31.s\n"
+ "add z22.s, z22.s, z30.s\n"
+ "add z23.s, z23.s, z29.s\n"
+ "add z24.s, z24.s, z0.s\n"
+ "add z25.s, z25.s, z31.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z26.s, z26.s, z30.s\n"
+ "add z27.s, z27.s, z29.s\n"
+ ".inst 0x04bc7610 // sqrdmulh z16.s, z16.s, z28.s\n"
+ ".inst 0x04bc7631 // sqrdmulh z17.s, z17.s, z28.s\n"
+ ".inst 0x04bc7652 // sqrdmulh z18.s, z18.s, z28.s\n"
+ ".inst 0x04bc7673 // sqrdmulh z19.s, z19.s, z28.s\n"
+ ".inst 0x04bc7694 // sqrdmulh z20.s, z20.s, z28.s\n"
+ ".inst 0x04bc76b5 // sqrdmulh z21.s, z21.s, z28.s\n"
+ ".inst 0x04bc76d6 // sqrdmulh z22.s, z22.s, z28.s\n"
+ ".inst 0x04bc76f7 // sqrdmulh z23.s, z23.s, z28.s\n"
+ ".inst 0x04bc7718 // sqrdmulh z24.s, z24.s, z28.s\n"
+ ".inst 0x04bc7739 // sqrdmulh z25.s, z25.s, z28.s\n"
+ ".inst 0x04bc775a // sqrdmulh z26.s, z26.s, z28.s\n"
+ ".inst 0x04bc777b // sqrdmulh z27.s, z27.s, z28.s\n"
+ "tbz %x[flags], #5, 41f\n"
+ "and z1.d, z16.d, z0.d\n"
+ "and z31.d, z17.d, z0.d\n"
+ "and z30.d, z18.d, z0.d\n"
+ "and z29.d, z19.d, z0.d\n"
+ "and z28.d, z20.d, z0.d\n"
+ "and z3.d, z21.d, z0.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "asr z31.s, z31.s, #0x1f\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "asr z29.s, z29.s, #0x1f\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "and z2.d, z22.d, z0.d\n"
+ "sqadd z16.s, z16.s, z1.s\n"
+ "sqadd z17.s, z17.s, z31.s\n"
+ "sqadd z18.s, z18.s, z30.s\n"
+ "sqadd z19.s, z19.s, z29.s\n"
+ "sqadd z20.s, z20.s, z28.s\n"
+ "and z1.d, z23.d, z0.d\n"
+ "and z31.d, z24.d, z0.d\n"
+ "and z30.d, z25.d, z0.d\n"
+ "and z29.d, z26.d, z0.d\n"
+ "and z28.d, z27.d, z0.d\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "asr z31.s, z31.s, #0x1f\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "asr z29.s, z29.s, #0x1f\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "sqadd z21.s, z21.s, z3.s\n"
+ "sqadd z22.s, z22.s, z2.s\n"
+ "sqadd z23.s, z23.s, z1.s\n"
+ "sqadd z24.s, z24.s, z31.s\n"
+ "sqadd z25.s, z25.s, z30.s\n"
+ "sqadd z26.s, z26.s, z29.s\n"
+ "sqadd z27.s, z27.s, z28.s\n"
+ "41:" // Height 3: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "ld1rw { z30.s }, p2/Z, [x20]\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "add z16.s, z16.s, z30.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "add z17.s, z17.s, z30.s\n"
+ "add z18.s, z18.s, z30.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
+ "add z19.s, z19.s, z30.s\n"
+ "add z20.s, z20.s, z30.s\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "add z21.s, z21.s, z30.s\n"
+ "add z22.s, z22.s, z30.s\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
+ "add z23.s, z23.s, z30.s\n"
+ "add z24.s, z24.s, z30.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z30.s\n"
+ "add z26.s, z26.s, z30.s\n"
+ "ld1rw { z28.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z30.s\n"
+ "smin z16.s, p2/M, z16.s, z29.s\n"
+ "smin z17.s, p2/M, z17.s, z29.s\n"
+ "smin z18.s, p2/M, z18.s, z29.s\n"
+ "smin z19.s, p2/M, z19.s, z29.s\n"
+ "smin z20.s, p2/M, z20.s, z29.s\n"
+ "smin z21.s, p2/M, z21.s, z29.s\n"
+ "smin z22.s, p2/M, z22.s, z29.s\n"
+ "smin z23.s, p2/M, z23.s, z29.s\n"
+ "smin z24.s, p2/M, z24.s, z29.s\n"
+ "smin z25.s, p2/M, z25.s, z29.s\n"
+ "smin z26.s, p2/M, z26.s, z29.s\n"
+ "smin z27.s, p2/M, z27.s, z29.s\n"
+ "smax z16.s, p2/M, z16.s, z28.s\n"
+ "smax z17.s, p2/M, z17.s, z28.s\n"
+ "smax z18.s, p2/M, z18.s, z28.s\n"
+ "smax z19.s, p2/M, z19.s, z28.s\n"
+ "smax z20.s, p2/M, z20.s, z28.s\n"
+ "smax z21.s, p2/M, z21.s, z28.s\n"
+ "smax z22.s, p2/M, z22.s, z28.s\n"
+ "smax z23.s, p2/M, z23.s, z28.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "smax z24.s, p2/M, z24.s, z28.s\n"
+ "smax z25.s, p2/M, z25.s, z28.s\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
+ "smax z26.s, p2/M, z26.s, z28.s\n"
+ "smax z27.s, p2/M, z27.s, z28.s\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
+ "uzp1 z18.h, z22.h, z23.h\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "uzp1 z20.b, z20.b, z18.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z20.b }, p1, [x24]\n"
+ "st1b { z24.b }, p1, [x23]\n"
+ "42:" // Height 3: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 30b\n"
+ "b 58f\n"
+ "43:" // Height 4
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x20, #0x4\n"
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x27\n"
+ "mov z15.b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "44:" // Height 4: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov z24.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z28.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "mov z30.s, #0x0\n"
+ "mov z31.s, #0x0\n"
+ "45:" // Height 4: setup done
+ "mov x26, #0x0\n"
+ "46:" // Height 4: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 47f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "ldr x21, [x20, #0x18]\n"
+ "cbnz x26, 48f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "add x21, x21, x20\n"
+ "b 48f\n"
+ "47:" // Height 4: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
+ "48:" // Height 4: input setup done
+ "cmp x25, #0x10\n"
+ "ble 51f\n"
+ "49:" // Height 4: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x44a01cb0 // sudot z16.s, z5.b, z0.b[0]\n"
+ ".inst 0x44a11cb4 // sudot z20.s, z5.b, z1.b[0]\n"
+ ".inst 0x44a01d51 // sudot z17.s, z10.b, z0.b[0]\n"
+ ".inst 0x44a11d55 // sudot z21.s, z10.b, z1.b[0]\n"
+ ".inst 0x44a21cb8 // sudot z24.s, z5.b, z2.b[0]\n"
+ ".inst 0x44a31cbc // sudot z28.s, z5.b, z3.b[0]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ ".inst 0x44a21d59 // sudot z25.s, z10.b, z2.b[0]\n"
+ ".inst 0x44a31d5d // sudot z29.s, z10.b, z3.b[0]\n"
+ ".inst 0x44a01c92 // sudot z18.s, z4.b, z0.b[0]\n"
+ ".inst 0x44a11c96 // sudot z22.s, z4.b, z1.b[0]\n"
+ ".inst 0x44a21c9a // sudot z26.s, z4.b, z2.b[0]\n"
+ ".inst 0x44a31c9e // sudot z30.s, z4.b, z3.b[0]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ ".inst 0x44a01d33 // sudot z19.s, z9.b, z0.b[0]\n"
+ ".inst 0x44a11d37 // sudot z23.s, z9.b, z1.b[0]\n"
+ ".inst 0x44a21d3b // sudot z27.s, z9.b, z2.b[0]\n"
+ ".inst 0x44a31d3f // sudot z31.s, z9.b, z3.b[0]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x44a81d10 // sudot z16.s, z8.b, z0.b[1]\n"
+ ".inst 0x44a91d14 // sudot z20.s, z8.b, z1.b[1]\n"
+ ".inst 0x44aa1d18 // sudot z24.s, z8.b, z2.b[1]\n"
+ ".inst 0x44ab1d1c // sudot z28.s, z8.b, z3.b[1]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ ".inst 0x44a81cf1 // sudot z17.s, z7.b, z0.b[1]\n"
+ ".inst 0x44a91cf5 // sudot z21.s, z7.b, z1.b[1]\n"
+ ".inst 0x44aa1cf9 // sudot z25.s, z7.b, z2.b[1]\n"
+ ".inst 0x44ab1cfd // sudot z29.s, z7.b, z3.b[1]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x44a81cd2 // sudot z18.s, z6.b, z0.b[1]\n"
+ ".inst 0x44a91cd6 // sudot z22.s, z6.b, z1.b[1]\n"
+ ".inst 0x44aa1cda // sudot z26.s, z6.b, z2.b[1]\n"
+ ".inst 0x44ab1cde // sudot z30.s, z6.b, z3.b[1]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ ".inst 0x44a81cb3 // sudot z19.s, z5.b, z0.b[1]\n"
+ ".inst 0x44a91cb7 // sudot z23.s, z5.b, z1.b[1]\n"
+ ".inst 0x44aa1cbb // sudot z27.s, z5.b, z2.b[1]\n"
+ ".inst 0x44ab1cbf // sudot z31.s, z5.b, z3.b[1]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x44b01c90 // sudot z16.s, z4.b, z0.b[2]\n"
+ ".inst 0x44b11c94 // sudot z20.s, z4.b, z1.b[2]\n"
+ ".inst 0x44b21c98 // sudot z24.s, z4.b, z2.b[2]\n"
+ ".inst 0x44b31c9c // sudot z28.s, z4.b, z3.b[2]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x44b01d51 // sudot z17.s, z10.b, z0.b[2]\n"
+ ".inst 0x44b11d55 // sudot z21.s, z10.b, z1.b[2]\n"
+ ".inst 0x44b21d59 // sudot z25.s, z10.b, z2.b[2]\n"
+ ".inst 0x44b31d5d // sudot z29.s, z10.b, z3.b[2]\n"
+ ".inst 0x44b01d32 // sudot z18.s, z9.b, z0.b[2]\n"
+ ".inst 0x44b11d36 // sudot z22.s, z9.b, z1.b[2]\n"
+ ".inst 0x44b21d3a // sudot z26.s, z9.b, z2.b[2]\n"
+ ".inst 0x44b31d3e // sudot z30.s, z9.b, z3.b[2]\n"
+ ".inst 0x44b01d13 // sudot z19.s, z8.b, z0.b[2]\n"
+ ".inst 0x44b11d17 // sudot z23.s, z8.b, z1.b[2]\n"
+ ".inst 0x44b21d1b // sudot z27.s, z8.b, z2.b[2]\n"
+ ".inst 0x44b31d1f // sudot z31.s, z8.b, z3.b[2]\n"
+ ".inst 0x44b81cf0 // sudot z16.s, z7.b, z0.b[3]\n"
+ ".inst 0x44b91cf4 // sudot z20.s, z7.b, z1.b[3]\n"
+ ".inst 0x44ba1cf8 // sudot z24.s, z7.b, z2.b[3]\n"
+ ".inst 0x44bb1cfc // sudot z28.s, z7.b, z3.b[3]\n"
+ ".inst 0x44b81cd1 // sudot z17.s, z6.b, z0.b[3]\n"
+ ".inst 0x44b91cd5 // sudot z21.s, z6.b, z1.b[3]\n"
+ ".inst 0x44ba1cd9 // sudot z25.s, z6.b, z2.b[3]\n"
+ ".inst 0x44bb1cdd // sudot z29.s, z6.b, z3.b[3]\n"
+ ".inst 0x44b81cb2 // sudot z18.s, z5.b, z0.b[3]\n"
+ ".inst 0x44b91cb6 // sudot z22.s, z5.b, z1.b[3]\n"
+ ".inst 0x44ba1cba // sudot z26.s, z5.b, z2.b[3]\n"
+ ".inst 0x44bb1cbe // sudot z30.s, z5.b, z3.b[3]\n"
+ ".inst 0x44b81c93 // sudot z19.s, z4.b, z0.b[3]\n"
+ ".inst 0x44b91c97 // sudot z23.s, z4.b, z1.b[3]\n"
+ ".inst 0x44ba1c9b // sudot z27.s, z4.b, z2.b[3]\n"
+ ".inst 0x44bb1c9f // sudot z31.s, z4.b, z3.b[3]\n"
+ "tbnz %x[flags], #31, 50f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z12.s, z1.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "udot z14.s, z3.b, z15.b\n"
+ "50:" // Height 4: Multiply loop: unique 7: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 49b\n"
+ "51:" // Height 4: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "ld1rqb { z0.b }, p0/Z, [x24]\n"
+ "ld1rqb { z1.b }, p0/Z, [x23]\n"
+ "ld1rqb { z2.b }, p0/Z, [x22]\n"
+ "ld1rqb { z3.b }, p0/Z, [x21]\n"
+ ".inst 0x44a01cf0 // sudot z16.s, z7.b, z0.b[0]\n"
+ ".inst 0x44a11cf4 // sudot z20.s, z7.b, z1.b[0]\n"
+ ".inst 0x44a01cd1 // sudot z17.s, z6.b, z0.b[0]\n"
+ ".inst 0x44a11cd5 // sudot z21.s, z6.b, z1.b[0]\n"
+ ".inst 0x44a01cb2 // sudot z18.s, z5.b, z0.b[0]\n"
+ ".inst 0x44a11cb6 // sudot z22.s, z5.b, z1.b[0]\n"
+ ".inst 0x44a21cf8 // sudot z24.s, z7.b, z2.b[0]\n"
+ ".inst 0x44a31cfc // sudot z28.s, z7.b, z3.b[0]\n"
+ ".inst 0x44a21cd9 // sudot z25.s, z6.b, z2.b[0]\n"
+ ".inst 0x44a31cdd // sudot z29.s, z6.b, z3.b[0]\n"
+ ".inst 0x44a21cba // sudot z26.s, z5.b, z2.b[0]\n"
+ ".inst 0x44a31cbe // sudot z30.s, z5.b, z3.b[0]\n"
+ ".inst 0x44a01c93 // sudot z19.s, z4.b, z0.b[0]\n"
+ ".inst 0x44a11c97 // sudot z23.s, z4.b, z1.b[0]\n"
+ ".inst 0x44a21c9b // sudot z27.s, z4.b, z2.b[0]\n"
+ ".inst 0x44a31c9f // sudot z31.s, z4.b, z3.b[0]\n"
+ "ble 52f\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44a81cf0 // sudot z16.s, z7.b, z0.b[1]\n"
+ ".inst 0x44a91cf4 // sudot z20.s, z7.b, z1.b[1]\n"
+ ".inst 0x44aa1cf8 // sudot z24.s, z7.b, z2.b[1]\n"
+ ".inst 0x44ab1cfc // sudot z28.s, z7.b, z3.b[1]\n"
+ ".inst 0x44a81cd1 // sudot z17.s, z6.b, z0.b[1]\n"
+ ".inst 0x44a91cd5 // sudot z21.s, z6.b, z1.b[1]\n"
+ ".inst 0x44aa1cd9 // sudot z25.s, z6.b, z2.b[1]\n"
+ ".inst 0x44ab1cdd // sudot z29.s, z6.b, z3.b[1]\n"
+ ".inst 0x44a81cb2 // sudot z18.s, z5.b, z0.b[1]\n"
+ ".inst 0x44a91cb6 // sudot z22.s, z5.b, z1.b[1]\n"
+ ".inst 0x44aa1cba // sudot z26.s, z5.b, z2.b[1]\n"
+ ".inst 0x44ab1cbe // sudot z30.s, z5.b, z3.b[1]\n"
+ ".inst 0x44a81c93 // sudot z19.s, z4.b, z0.b[1]\n"
+ ".inst 0x44a91c97 // sudot z23.s, z4.b, z1.b[1]\n"
+ ".inst 0x44aa1c9b // sudot z27.s, z4.b, z2.b[1]\n"
+ ".inst 0x44ab1c9f // sudot z31.s, z4.b, z3.b[1]\n"
+ "ble 52f\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x4\n"
+ "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b01cf0 // sudot z16.s, z7.b, z0.b[2]\n"
+ ".inst 0x44b11cf4 // sudot z20.s, z7.b, z1.b[2]\n"
+ ".inst 0x44b21cf8 // sudot z24.s, z7.b, z2.b[2]\n"
+ ".inst 0x44b31cfc // sudot z28.s, z7.b, z3.b[2]\n"
+ ".inst 0x44b01cd1 // sudot z17.s, z6.b, z0.b[2]\n"
+ ".inst 0x44b11cd5 // sudot z21.s, z6.b, z1.b[2]\n"
+ ".inst 0x44b21cd9 // sudot z25.s, z6.b, z2.b[2]\n"
+ ".inst 0x44b31cdd // sudot z29.s, z6.b, z3.b[2]\n"
+ ".inst 0x44b01cb2 // sudot z18.s, z5.b, z0.b[2]\n"
+ ".inst 0x44b11cb6 // sudot z22.s, z5.b, z1.b[2]\n"
+ ".inst 0x44b21cba // sudot z26.s, z5.b, z2.b[2]\n"
+ ".inst 0x44b31cbe // sudot z30.s, z5.b, z3.b[2]\n"
+ ".inst 0x44b01c93 // sudot z19.s, z4.b, z0.b[2]\n"
+ ".inst 0x44b11c97 // sudot z23.s, z4.b, z1.b[2]\n"
+ ".inst 0x44b21c9b // sudot z27.s, z4.b, z2.b[2]\n"
+ ".inst 0x44b31c9f // sudot z31.s, z4.b, z3.b[2]\n"
+ "ble 52f\n"
+ "ld1b { z7.b }, p2/Z, [x28]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ ".inst 0x44b81cf0 // sudot z16.s, z7.b, z0.b[3]\n"
+ ".inst 0x44b91cf4 // sudot z20.s, z7.b, z1.b[3]\n"
+ ".inst 0x44ba1cf8 // sudot z24.s, z7.b, z2.b[3]\n"
+ ".inst 0x44bb1cfc // sudot z28.s, z7.b, z3.b[3]\n"
+ ".inst 0x44b81cd1 // sudot z17.s, z6.b, z0.b[3]\n"
+ ".inst 0x44b91cd5 // sudot z21.s, z6.b, z1.b[3]\n"
+ ".inst 0x44ba1cd9 // sudot z25.s, z6.b, z2.b[3]\n"
+ ".inst 0x44bb1cdd // sudot z29.s, z6.b, z3.b[3]\n"
+ ".inst 0x44b81cb2 // sudot z18.s, z5.b, z0.b[3]\n"
+ ".inst 0x44b91cb6 // sudot z22.s, z5.b, z1.b[3]\n"
+ ".inst 0x44ba1cba // sudot z26.s, z5.b, z2.b[3]\n"
+ ".inst 0x44bb1cbe // sudot z30.s, z5.b, z3.b[3]\n"
+ ".inst 0x44b81c93 // sudot z19.s, z4.b, z0.b[3]\n"
+ ".inst 0x44b91c97 // sudot z23.s, z4.b, z1.b[3]\n"
+ ".inst 0x44ba1c9b // sudot z27.s, z4.b, z2.b[3]\n"
+ ".inst 0x44bb1c9f // sudot z31.s, z4.b, z3.b[3]\n"
+ "52:" // Height 4: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 53f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z12.s, z1.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "udot z14.s, z3.b, z15.b\n"
+ "53:" // Height 4: Multiply loop: unique 8: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 46b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "add x24, x27, x20\n"
+ "add x23, x24, x20\n"
+ "add x22, x23, x20\n"
+ "tbnz %x[flags], #31, 54f\n"
+ "mov x21, #0x4\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "neg z0.s, p2/M, z0.s\n"
+ "saddv d11, p0, z11.s\n"
+ "saddv d12, p0, z12.s\n"
+ "saddv d13, p0, z13.s\n"
+ "saddv d14, p0, z14.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "mov z12.s, z12.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z0.s\n"
+ "mul z12.s, p2/M, z12.s, z0.s\n"
+ "mov z13.s, z13.s[0]\n"
+ "mov z14.s, z14.s[0]\n"
+ "mul z13.s, p2/M, z13.s, z0.s\n"
+ "mul z14.s, p2/M, z14.s, z0.s\n"
+ "54:" // Height 4: skip row sum fixup
+ "add z16.s, z16.s, z11.s\n"
+ "add z17.s, z17.s, z11.s\n"
+ "ld1w { z4.s }, p2/Z, [x10]\n"
+ "ld1w { z0.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z18.s, z18.s, z11.s\n"
+ "add z19.s, z19.s, z11.s\n"
+ "ld1w { z3.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add z20.s, z20.s, z12.s\n"
+ "add z21.s, z21.s, z12.s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z22.s, z22.s, z12.s\n"
+ "add z23.s, z23.s, z12.s\n"
+ "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z25.s, z25.s, z13.s\n"
+ "addvl x10, x10, #4\n"
+ "add z26.s, z26.s, z13.s\n"
+ "add z27.s, z27.s, z13.s\n"
+ "add z28.s, z28.s, z14.s\n"
+ "add z29.s, z29.s, z14.s\n"
+ "add z30.s, z30.s, z14.s\n"
+ "add z31.s, z31.s, z14.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z0.s\n"
+ "add z18.s, z18.s, z3.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z20.s, z20.s, z4.s\n"
+ "add z21.s, z21.s, z0.s\n"
+ "add z22.s, z22.s, z3.s\n"
+ "add z23.s, z23.s, z2.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z0.s\n"
+ "add z26.s, z26.s, z3.s\n"
+ "add z27.s, z27.s, z2.s\n"
+ "add z28.s, z28.s, z4.s\n"
+ "add z29.s, z29.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z30.s, z30.s, z3.s\n"
+ "add z31.s, z31.s, z2.s\n"
+ ".inst 0x04a17610 // sqrdmulh z16.s, z16.s, z1.s\n"
+ ".inst 0x04a17631 // sqrdmulh z17.s, z17.s, z1.s\n"
+ ".inst 0x04a17652 // sqrdmulh z18.s, z18.s, z1.s\n"
+ ".inst 0x04a17673 // sqrdmulh z19.s, z19.s, z1.s\n"
+ ".inst 0x04a17694 // sqrdmulh z20.s, z20.s, z1.s\n"
+ ".inst 0x04a176b5 // sqrdmulh z21.s, z21.s, z1.s\n"
+ ".inst 0x04a176d6 // sqrdmulh z22.s, z22.s, z1.s\n"
+ ".inst 0x04a176f7 // sqrdmulh z23.s, z23.s, z1.s\n"
+ ".inst 0x04a17718 // sqrdmulh z24.s, z24.s, z1.s\n"
+ ".inst 0x04a17739 // sqrdmulh z25.s, z25.s, z1.s\n"
+ ".inst 0x04a1775a // sqrdmulh z26.s, z26.s, z1.s\n"
+ ".inst 0x04a1777b // sqrdmulh z27.s, z27.s, z1.s\n"
+ ".inst 0x04a1779c // sqrdmulh z28.s, z28.s, z1.s\n"
+ ".inst 0x04a177bd // sqrdmulh z29.s, z29.s, z1.s\n"
+ ".inst 0x04a177de // sqrdmulh z30.s, z30.s, z1.s\n"
+ ".inst 0x04a177ff // sqrdmulh z31.s, z31.s, z1.s\n"
+ "tbz %x[flags], #5, 55f\n"
+ "and z2.d, z16.d, z0.d\n"
+ "and z1.d, z17.d, z0.d\n"
+ "and z7.d, z18.d, z0.d\n"
+ "and z6.d, z19.d, z0.d\n"
+ "and z5.d, z20.d, z0.d\n"
+ "and z4.d, z21.d, z0.d\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "and z3.d, z22.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z2.s\n"
+ "sqadd z17.s, z17.s, z1.s\n"
+ "and z2.d, z23.d, z0.d\n"
+ "and z1.d, z24.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "sqadd z18.s, z18.s, z7.s\n"
+ "sqadd z19.s, z19.s, z6.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "sqadd z20.s, z20.s, z5.s\n"
+ "sqadd z21.s, z21.s, z4.s\n"
+ "sqadd z22.s, z22.s, z3.s\n"
+ "and z7.d, z25.d, z0.d\n"
+ "sqadd z23.s, z23.s, z2.s\n"
+ "sqadd z24.s, z24.s, z1.s\n"
+ "and z6.d, z26.d, z0.d\n"
+ "and z5.d, z27.d, z0.d\n"
+ "and z4.d, z28.d, z0.d\n"
+ "and z3.d, z29.d, z0.d\n"
+ "and z2.d, z30.d, z0.d\n"
+ "and z1.d, z31.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "sqadd z25.s, z25.s, z7.s\n"
+ "sqadd z26.s, z26.s, z6.s\n"
+ "sqadd z27.s, z27.s, z5.s\n"
+ "sqadd z28.s, z28.s, z4.s\n"
+ "sqadd z29.s, z29.s, z3.s\n"
+ "sqadd z30.s, z30.s, z2.s\n"
+ "sqadd z31.s, z31.s, z1.s\n"
+ "55:" // Height 4: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ "add z16.s, z16.s, z2.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "add z17.s, z17.s, z2.s\n"
+ "add z18.s, z18.s, z2.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z20.s, z20.s, z2.s\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
+ ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
+ "add z21.s, z21.s, z2.s\n"
+ "add z22.s, z22.s, z2.s\n"
+ ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
+ ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
+ "add z23.s, z23.s, z2.s\n"
+ "add z24.s, z24.s, z2.s\n"
+ ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "add z25.s, z25.s, z2.s\n"
+ "add z26.s, z26.s, z2.s\n"
+ "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z2.s\n"
+ "add z28.s, z28.s, z2.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z29.s, z29.s, z2.s\n"
+ "add z30.s, z30.s, z2.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z31.s, z31.s, z2.s\n"
+ "smin z16.s, p2/M, z16.s, z1.s\n"
+ "smin z17.s, p2/M, z17.s, z1.s\n"
+ "smin z18.s, p2/M, z18.s, z1.s\n"
+ "smin z19.s, p2/M, z19.s, z1.s\n"
+ "smin z20.s, p2/M, z20.s, z1.s\n"
+ "smin z21.s, p2/M, z21.s, z1.s\n"
+ "smin z22.s, p2/M, z22.s, z1.s\n"
+ "smin z23.s, p2/M, z23.s, z1.s\n"
+ "smin z24.s, p2/M, z24.s, z1.s\n"
+ "smin z25.s, p2/M, z25.s, z1.s\n"
+ "smin z26.s, p2/M, z26.s, z1.s\n"
+ "smin z27.s, p2/M, z27.s, z1.s\n"
+ "smin z28.s, p2/M, z28.s, z1.s\n"
+ "smin z29.s, p2/M, z29.s, z1.s\n"
+ "smin z30.s, p2/M, z30.s, z1.s\n"
+ "smin z31.s, p2/M, z31.s, z1.s\n"
+ "smax z16.s, p2/M, z16.s, z0.s\n"
+ "smax z17.s, p2/M, z17.s, z0.s\n"
+ "smax z18.s, p2/M, z18.s, z0.s\n"
+ "smax z19.s, p2/M, z19.s, z0.s\n"
+ "smax z20.s, p2/M, z20.s, z0.s\n"
+ "smax z21.s, p2/M, z21.s, z0.s\n"
+ "smax z22.s, p2/M, z22.s, z0.s\n"
+ "smax z23.s, p2/M, z23.s, z0.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "smax z24.s, p2/M, z24.s, z0.s\n"
+ "smax z25.s, p2/M, z25.s, z0.s\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
+ "smax z26.s, p2/M, z26.s, z0.s\n"
+ "smax z27.s, p2/M, z27.s, z0.s\n"
+ "uzp1 z20.h, z20.h, z21.h\n"
+ "smax z28.s, p2/M, z28.s, z0.s\n"
+ "smax z29.s, p2/M, z29.s, z0.s\n"
+ "uzp1 z17.h, z22.h, z23.h\n"
+ "smax z30.s, p2/M, z30.s, z0.s\n"
+ "smax z31.s, p2/M, z31.s, z0.s\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "uzp1 z18.h, z26.h, z27.h\n"
+ "uzp1 z28.h, z28.h, z29.h\n"
+ "uzp1 z20.b, z20.b, z17.b\n"
+ "uzp1 z17.h, z30.h, z31.h\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z18.b\n"
+ "uzp1 z28.b, z28.b, z17.b\n"
+ "st1b { z20.b }, p1, [x24]\n"
+ "st1b { z24.b }, p1, [x23]\n"
+ "st1b { z28.b }, p1, [x22]\n"
+ "56:" // Height 4: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 44b\n"
+ "subs %x[M], %x[M], #0x4\n"
+ "beq 58f\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 57f\n"
+ "add x21, x21, #0x4\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "57:" // Update direct input
+ "mov x20, #0x4\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "b 1b\n"
+ "58:" // Exit
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL.hpp
new file mode 100644
index 0000000000..b073731751
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL.hpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<uint8_t>, \
+ size_t, size_t, \
+ const int8_t *, \
+ IndirectOutputArg<uint8_t>, \
+ const Requantize32 *, const int32_t *, unsigned int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_hybrid_u8s8qa_mmla_4x4VL( ARGLIST );
+
+class cls_sve_hybrid_u8s8qa_mmla_4x4VL
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef uint8_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 4;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<int32_t>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 8;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return false;
+ }
+
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 4, 8, 8> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 47.37 };
+ case CPUModel::A510:
+ return { 20.88 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_hybrid_u8s8qa_mmla_4x4VL;
+ cls_sve_hybrid_u8s8qa_mmla_4x4VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL/generic.cpp
new file mode 100644
index 0000000000..01bdac2967
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8qa_mmla_4x4VL/generic.cpp
@@ -0,0 +1,1418 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void sve_hybrid_u8s8qa_mmla_4x4VL (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<uint8_t> A_arg,
+ size_t M, size_t N, const int8_t *B_ptr, IndirectOutputArg<uint8_t> output_arg,
+ const Requantize32 *qp, const int32_t *col_bias, unsigned int
+)
+{
+ struct KernelArgs {
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const int8_t *B_ptr = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ void *output_ptr = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ ka.output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ if (qp->c_offset > qp->minval) {
+ flags |= 0x20;
+ }
+ __asm__ __volatile__(
+ "ptrue p2.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x4\n"
+ "bge 43f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 29f\n"
+ "beq 15f\n"
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z15.b, #0x1\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "2:" // Height 1: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "3:" // Height 1: setup done
+ "mov x26, #0x0\n"
+ "4:" // Height 1: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 5f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "cbnz x26, 6f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "b 6f\n"
+ "5:" // Height 1: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "6:" // Height 1: input setup done
+ "cmp x25, #0x10\n"
+ "ble 9f\n"
+ "7:" // Height 1: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z30.b }, p2/Z, [x28]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z0.d, z1.d, z31.d\n"
+ ".inst 0x459e9810 // usmmla z16.s, z0.b, z30.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ "trn2 z1.d, z1.d, z31.d\n"
+ ".inst 0x459d9814 // usmmla z20.s, z0.b, z29.b\n"
+ ".inst 0x459c9811 // usmmla z17.s, z0.b, z28.b\n"
+ ".inst 0x459b9815 // usmmla z21.s, z0.b, z27.b\n"
+ ".inst 0x459a9812 // usmmla z18.s, z0.b, z26.b\n"
+ "ld1b { z31.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45999816 // usmmla z22.s, z0.b, z25.b\n"
+ ".inst 0x45989813 // usmmla z19.s, z0.b, z24.b\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x45889817 // usmmla z23.s, z0.b, z8.b\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x459f9830 // usmmla z16.s, z1.b, z31.b\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x459e9834 // usmmla z20.s, z1.b, z30.b\n"
+ ".inst 0x459d9831 // usmmla z17.s, z1.b, z29.b\n"
+ ".inst 0x459c9835 // usmmla z21.s, z1.b, z28.b\n"
+ ".inst 0x459b9832 // usmmla z18.s, z1.b, z27.b\n"
+ ".inst 0x459a9836 // usmmla z22.s, z1.b, z26.b\n"
+ ".inst 0x45999833 // usmmla z19.s, z1.b, z25.b\n"
+ ".inst 0x45989837 // usmmla z23.s, z1.b, z24.b\n"
+ "tbnz %x[flags], #31, 8f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "8:" // Height 1: Multiply loop: unique 1: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 7b\n"
+ "9:" // Height 1: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "subs x25, x25, #0x8\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z31.d\n"
+ ".inst 0x45989810 // usmmla z16.s, z0.b, z24.b\n"
+ "ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ "trn2 z1.d, z1.d, z31.d\n"
+ ".inst 0x459e9814 // usmmla z20.s, z0.b, z30.b\n"
+ ".inst 0x459d9811 // usmmla z17.s, z0.b, z29.b\n"
+ ".inst 0x459c9815 // usmmla z21.s, z0.b, z28.b\n"
+ ".inst 0x459b9812 // usmmla z18.s, z0.b, z27.b\n"
+ ".inst 0x459a9816 // usmmla z22.s, z0.b, z26.b\n"
+ ".inst 0x45999813 // usmmla z19.s, z0.b, z25.b\n"
+ ".inst 0x45989817 // usmmla z23.s, z0.b, z24.b\n"
+ "ble 10f\n"
+ "ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45989830 // usmmla z16.s, z1.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x459e9834 // usmmla z20.s, z1.b, z30.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x459d9831 // usmmla z17.s, z1.b, z29.b\n"
+ ".inst 0x459c9835 // usmmla z21.s, z1.b, z28.b\n"
+ ".inst 0x459b9832 // usmmla z18.s, z1.b, z27.b\n"
+ ".inst 0x459a9836 // usmmla z22.s, z1.b, z26.b\n"
+ ".inst 0x45999833 // usmmla z19.s, z1.b, z25.b\n"
+ ".inst 0x45989837 // usmmla z23.s, z1.b, z24.b\n"
+ "10:" // Height 1: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 11f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "11:" // Height 1: Multiply loop: unique 2: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 4b\n"
+ "uzp1 z16.d, z16.d, z20.d\n"
+ "uzp1 z17.d, z17.d, z21.d\n"
+ "uzp1 z18.d, z18.d, z22.d\n"
+ "uzp1 z19.d, z19.d, z23.d\n"
+ "mov z23.d, z16.d\n"
+ "tbnz %x[flags], #31, 12f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "ld1rw { z9.s }, p2/Z, [x20]\n"
+ "neg z9.s, p2/M, z9.s\n"
+ "mov z11.s, z11.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z9.s\n"
+ "12:" // Height 1: skip row sum fixup
+ "add z23.s, z23.s, z11.s\n"
+ "add z17.s, z17.s, z11.s\n"
+ "ld1w { z22.s }, p2/Z, [x10]\n"
+ "ld1w { z24.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z18.s, z18.s, z11.s\n"
+ "add z19.s, z19.s, z11.s\n"
+ "ld1w { z21.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z23.s, z23.s, z22.s\n"
+ "add z17.s, z17.s, z24.s\n"
+ "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "addvl x10, x10, #4\n"
+ "add z18.s, z18.s, z21.s\n"
+ "add z19.s, z19.s, z20.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ ".inst 0x04b076f7 // sqrdmulh z23.s, z23.s, z16.s\n"
+ ".inst 0x04b07631 // sqrdmulh z17.s, z17.s, z16.s\n"
+ ".inst 0x04b07652 // sqrdmulh z18.s, z18.s, z16.s\n"
+ ".inst 0x04b07673 // sqrdmulh z19.s, z19.s, z16.s\n"
+ "tbz %x[flags], #5, 13f\n"
+ "and z22.d, z23.d, z0.d\n"
+ "and z21.d, z17.d, z0.d\n"
+ "and z20.d, z18.d, z0.d\n"
+ "and z16.d, z19.d, z0.d\n"
+ "asr z22.s, z22.s, #0x1f\n"
+ "asr z21.s, z21.s, #0x1f\n"
+ "asr z20.s, z20.s, #0x1f\n"
+ "asr z16.s, z16.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z22.s\n"
+ "sqadd z17.s, z17.s, z21.s\n"
+ "sqadd z18.s, z18.s, z20.s\n"
+ "sqadd z19.s, z19.s, z16.s\n"
+ "13:" // Height 1: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "ld1rw { z21.s }, p2/Z, [x20]\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "ld1rw { z20.s }, p2/Z, [x20]\n"
+ "add z23.s, z23.s, z21.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z21.s\n"
+ "add z18.s, z18.s, z21.s\n"
+ "ld1rw { z16.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z21.s\n"
+ "smin z23.s, p2/M, z23.s, z20.s\n"
+ "smin z17.s, p2/M, z17.s, z20.s\n"
+ "smin z18.s, p2/M, z18.s, z20.s\n"
+ "smin z19.s, p2/M, z19.s, z20.s\n"
+ "smax z23.s, p2/M, z23.s, z16.s\n"
+ "smax z17.s, p2/M, z17.s, z16.s\n"
+ "smax z18.s, p2/M, z18.s, z16.s\n"
+ "smax z19.s, p2/M, z19.s, z16.s\n"
+ "uzp1 z23.h, z23.h, z17.h\n"
+ "uzp1 z16.h, z18.h, z19.h\n"
+ "uzp1 z23.b, z23.b, z16.b\n"
+ "st1b { z23.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "14:" // Height 1: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 2b\n"
+ "b 58f\n"
+ "15:" // Height 2
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov z15.b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "16:" // Height 2: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "17:" // Height 2: setup done
+ "mov x26, #0x0\n"
+ "18:" // Height 2: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 19f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "cbnz x26, 20f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "b 20f\n"
+ "19:" // Height 2: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "20:" // Height 2: input setup done
+ "cmp x25, #0x10\n"
+ "ble 23f\n"
+ "21:" // Height 2: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z31.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z25.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1b { z24.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ ".inst 0x459f9810 // usmmla z16.s, z0.b, z31.b\n"
+ "ld1b { z25.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ ".inst 0x459e9814 // usmmla z20.s, z0.b, z30.b\n"
+ ".inst 0x459d9811 // usmmla z17.s, z0.b, z29.b\n"
+ ".inst 0x459c9815 // usmmla z21.s, z0.b, z28.b\n"
+ ".inst 0x459b9812 // usmmla z18.s, z0.b, z27.b\n"
+ ".inst 0x459a9816 // usmmla z22.s, z0.b, z26.b\n"
+ ".inst 0x45989813 // usmmla z19.s, z0.b, z24.b\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x45999817 // usmmla z23.s, z0.b, z25.b\n"
+ "ld1b { z30.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x45989830 // usmmla z16.s, z1.b, z24.b\n"
+ "ld1b { z26.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x459e9834 // usmmla z20.s, z1.b, z30.b\n"
+ ".inst 0x459d9831 // usmmla z17.s, z1.b, z29.b\n"
+ ".inst 0x459c9835 // usmmla z21.s, z1.b, z28.b\n"
+ ".inst 0x459b9832 // usmmla z18.s, z1.b, z27.b\n"
+ ".inst 0x459a9836 // usmmla z22.s, z1.b, z26.b\n"
+ ".inst 0x45999833 // usmmla z19.s, z1.b, z25.b\n"
+ ".inst 0x45989837 // usmmla z23.s, z1.b, z24.b\n"
+ "tbnz %x[flags], #31, 22f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "22:" // Height 2: Multiply loop: unique 3: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 21b\n"
+ "23:" // Height 2: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z29.b }, p2/Z, [x28]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x8\n"
+ "ld1b { z4.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "trn1 z0.d, z1.d, z24.d\n"
+ "trn2 z1.d, z1.d, z24.d\n"
+ ".inst 0x459d9810 // usmmla z16.s, z0.b, z29.b\n"
+ "ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x459c9814 // usmmla z20.s, z0.b, z28.b\n"
+ ".inst 0x45849811 // usmmla z17.s, z0.b, z4.b\n"
+ ".inst 0x459b9815 // usmmla z21.s, z0.b, z27.b\n"
+ ".inst 0x459a9812 // usmmla z18.s, z0.b, z26.b\n"
+ ".inst 0x45869816 // usmmla z22.s, z0.b, z6.b\n"
+ ".inst 0x45999813 // usmmla z19.s, z0.b, z25.b\n"
+ ".inst 0x45989817 // usmmla z23.s, z0.b, z24.b\n"
+ "ble 24f\n"
+ "ld1b { z24.b }, p2/Z, [x28]\n"
+ "ld1b { z30.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z28.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z27.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z26.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45989830 // usmmla z16.s, z1.b, z24.b\n"
+ "ld1b { z25.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1b { z24.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x459e9834 // usmmla z20.s, z1.b, z30.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x459d9831 // usmmla z17.s, z1.b, z29.b\n"
+ ".inst 0x459c9835 // usmmla z21.s, z1.b, z28.b\n"
+ ".inst 0x459b9832 // usmmla z18.s, z1.b, z27.b\n"
+ ".inst 0x459a9836 // usmmla z22.s, z1.b, z26.b\n"
+ ".inst 0x45999833 // usmmla z19.s, z1.b, z25.b\n"
+ ".inst 0x45989837 // usmmla z23.s, z1.b, z24.b\n"
+ "24:" // Height 2: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 25f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "25:" // Height 2: Multiply loop: unique 4: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 18b\n"
+ "uzp1 z24.d, z16.d, z20.d\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "uzp1 z20.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "uzp1 z22.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "add x23, x27, x20\n"
+ "mov z23.d, z24.d\n"
+ "tbnz %x[flags], #31, 26f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
+ "neg z24.s, p2/M, z24.s\n"
+ "mov z12.s, z11.s[3]\n"
+ "mov z11.s, z11.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z24.s\n"
+ "mul z12.s, p2/M, z12.s, z24.s\n"
+ "26:" // Height 2: skip row sum fixup
+ "add z23.s, z23.s, z11.s\n"
+ "add z20.s, z20.s, z11.s\n"
+ "ld1w { z28.s }, p2/Z, [x10]\n"
+ "ld1w { z27.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z21.s, z21.s, z11.s\n"
+ "add z22.s, z22.s, z11.s\n"
+ "ld1w { z26.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add z16.s, z16.s, z12.s\n"
+ "add z17.s, z17.s, z12.s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z18.s, z18.s, z12.s\n"
+ "add z19.s, z19.s, z12.s\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add z23.s, z23.s, z28.s\n"
+ "add z20.s, z20.s, z27.s\n"
+ "addvl x10, x10, #4\n"
+ "add z21.s, z21.s, z26.s\n"
+ "add z22.s, z22.s, z25.s\n"
+ "add z16.s, z16.s, z28.s\n"
+ "add z17.s, z17.s, z27.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z18.s, z18.s, z26.s\n"
+ "add z19.s, z19.s, z25.s\n"
+ ".inst 0x04b876f7 // sqrdmulh z23.s, z23.s, z24.s\n"
+ ".inst 0x04b87694 // sqrdmulh z20.s, z20.s, z24.s\n"
+ ".inst 0x04b876b5 // sqrdmulh z21.s, z21.s, z24.s\n"
+ ".inst 0x04b876d6 // sqrdmulh z22.s, z22.s, z24.s\n"
+ ".inst 0x04b87610 // sqrdmulh z16.s, z16.s, z24.s\n"
+ ".inst 0x04b87631 // sqrdmulh z17.s, z17.s, z24.s\n"
+ ".inst 0x04b87652 // sqrdmulh z18.s, z18.s, z24.s\n"
+ ".inst 0x04b87673 // sqrdmulh z19.s, z19.s, z24.s\n"
+ "tbz %x[flags], #5, 27f\n"
+ "and z24.d, z23.d, z0.d\n"
+ "and z30.d, z20.d, z0.d\n"
+ "and z29.d, z21.d, z0.d\n"
+ "and z28.d, z22.d, z0.d\n"
+ "and z27.d, z16.d, z0.d\n"
+ "and z26.d, z17.d, z0.d\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ "and z25.d, z18.d, z0.d\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "asr z29.s, z29.s, #0x1f\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "asr z27.s, z27.s, #0x1f\n"
+ "sqadd z23.s, z23.s, z24.s\n"
+ "and z24.d, z19.d, z0.d\n"
+ "asr z26.s, z26.s, #0x1f\n"
+ "asr z25.s, z25.s, #0x1f\n"
+ "sqadd z20.s, z20.s, z30.s\n"
+ "sqadd z21.s, z21.s, z29.s\n"
+ "asr z24.s, z24.s, #0x1f\n"
+ "sqadd z22.s, z22.s, z28.s\n"
+ "sqadd z16.s, z16.s, z27.s\n"
+ "sqadd z17.s, z17.s, z26.s\n"
+ "sqadd z18.s, z18.s, z25.s\n"
+ "sqadd z19.s, z19.s, z24.s\n"
+ "27:" // Height 2: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "ld1rw { z26.s }, p2/Z, [x20]\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ "add z23.s, z23.s, z26.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "add z20.s, z20.s, z26.s\n"
+ "add z21.s, z21.s, z26.s\n"
+ "ld1rw { z25.s }, p2/Z, [x20]\n"
+ "add z22.s, z22.s, z26.s\n"
+ "add z16.s, z16.s, z26.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z17.s, z17.s, z26.s\n"
+ "add z18.s, z18.s, z26.s\n"
+ "ld1rw { z24.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z26.s\n"
+ "smin z23.s, p2/M, z23.s, z25.s\n"
+ "smin z20.s, p2/M, z20.s, z25.s\n"
+ "smin z21.s, p2/M, z21.s, z25.s\n"
+ "smin z22.s, p2/M, z22.s, z25.s\n"
+ "smin z16.s, p2/M, z16.s, z25.s\n"
+ "smin z17.s, p2/M, z17.s, z25.s\n"
+ "smin z18.s, p2/M, z18.s, z25.s\n"
+ "smin z19.s, p2/M, z19.s, z25.s\n"
+ "smax z23.s, p2/M, z23.s, z24.s\n"
+ "smax z20.s, p2/M, z20.s, z24.s\n"
+ "smax z21.s, p2/M, z21.s, z24.s\n"
+ "smax z22.s, p2/M, z22.s, z24.s\n"
+ "smax z16.s, p2/M, z16.s, z24.s\n"
+ "smax z17.s, p2/M, z17.s, z24.s\n"
+ "smax z18.s, p2/M, z18.s, z24.s\n"
+ "smax z19.s, p2/M, z19.s, z24.s\n"
+ "uzp1 z23.h, z23.h, z20.h\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
+ "uzp1 z23.b, z23.b, z20.b\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "st1b { z23.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "st1b { z16.b }, p1, [x23]\n"
+ "28:" // Height 2: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 16b\n"
+ "b 58f\n"
+ "29:" // Height 3
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "mov z13.s, #0x0\n"
+ "mov z15.b, #0x1\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "30:" // Height 3: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov z24.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z28.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "mov z30.s, #0x0\n"
+ "mov z31.s, #0x0\n"
+ "31:" // Height 3: setup done
+ "mov x26, #0x0\n"
+ "32:" // Height 3: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 33f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "cbnz x26, 34f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "b 34f\n"
+ "33:" // Height 3: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "34:" // Height 3: input setup done
+ "cmp x25, #0x10\n"
+ "ble 37f\n"
+ "35:" // Height 3: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z6.d\n"
+ "trn2 z3.d, z3.d, z6.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45859810 // usmmla z16.s, z0.b, z5.b\n"
+ ".inst 0x458a9814 // usmmla z20.s, z0.b, z10.b\n"
+ ".inst 0x45899811 // usmmla z17.s, z0.b, z9.b\n"
+ ".inst 0x45889815 // usmmla z21.s, z0.b, z8.b\n"
+ ".inst 0x45849812 // usmmla z18.s, z0.b, z4.b\n"
+ ".inst 0x45859858 // usmmla z24.s, z2.b, z5.b\n"
+ "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ ".inst 0x458a985c // usmmla z28.s, z2.b, z10.b\n"
+ ".inst 0x45899859 // usmmla z25.s, z2.b, z9.b\n"
+ ".inst 0x4588985d // usmmla z29.s, z2.b, z8.b\n"
+ ".inst 0x4584985a // usmmla z26.s, z2.b, z4.b\n"
+ ".inst 0x45879816 // usmmla z22.s, z0.b, z7.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x4587985e // usmmla z30.s, z2.b, z7.b\n"
+ ".inst 0x45869813 // usmmla z19.s, z0.b, z6.b\n"
+ "ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x4586985b // usmmla z27.s, z2.b, z6.b\n"
+ ".inst 0x45859817 // usmmla z23.s, z0.b, z5.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x4585985f // usmmla z31.s, z2.b, z5.b\n"
+ ".inst 0x45849830 // usmmla z16.s, z1.b, z4.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x45849878 // usmmla z24.s, z3.b, z4.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x458a9834 // usmmla z20.s, z1.b, z10.b\n"
+ ".inst 0x458a987c // usmmla z28.s, z3.b, z10.b\n"
+ ".inst 0x45899831 // usmmla z17.s, z1.b, z9.b\n"
+ ".inst 0x45899879 // usmmla z25.s, z3.b, z9.b\n"
+ ".inst 0x45889835 // usmmla z21.s, z1.b, z8.b\n"
+ ".inst 0x4588987d // usmmla z29.s, z3.b, z8.b\n"
+ ".inst 0x45879832 // usmmla z18.s, z1.b, z7.b\n"
+ ".inst 0x4587987a // usmmla z26.s, z3.b, z7.b\n"
+ ".inst 0x45869836 // usmmla z22.s, z1.b, z6.b\n"
+ ".inst 0x4586987e // usmmla z30.s, z3.b, z6.b\n"
+ ".inst 0x45859833 // usmmla z19.s, z1.b, z5.b\n"
+ ".inst 0x4585987b // usmmla z27.s, z3.b, z5.b\n"
+ ".inst 0x45849837 // usmmla z23.s, z1.b, z4.b\n"
+ ".inst 0x4584987f // usmmla z31.s, z3.b, z4.b\n"
+ "tbnz %x[flags], #31, 36f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "udot z13.s, z3.b, z15.b\n"
+ "36:" // Height 3: Multiply loop: unique 5: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 35b\n"
+ "37:" // Height 3: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z4.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "subs x25, x25, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z5.d\n"
+ "trn2 z3.d, z3.d, z5.d\n"
+ "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45849810 // usmmla z16.s, z0.b, z4.b\n"
+ ".inst 0x458a9814 // usmmla z20.s, z0.b, z10.b\n"
+ ".inst 0x45899811 // usmmla z17.s, z0.b, z9.b\n"
+ ".inst 0x45889815 // usmmla z21.s, z0.b, z8.b\n"
+ ".inst 0x45879812 // usmmla z18.s, z0.b, z7.b\n"
+ ".inst 0x45849858 // usmmla z24.s, z2.b, z4.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x458a985c // usmmla z28.s, z2.b, z10.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45899859 // usmmla z25.s, z2.b, z9.b\n"
+ ".inst 0x4588985d // usmmla z29.s, z2.b, z8.b\n"
+ ".inst 0x4587985a // usmmla z26.s, z2.b, z7.b\n"
+ ".inst 0x45869816 // usmmla z22.s, z0.b, z6.b\n"
+ ".inst 0x4586985e // usmmla z30.s, z2.b, z6.b\n"
+ ".inst 0x45859813 // usmmla z19.s, z0.b, z5.b\n"
+ ".inst 0x4585985b // usmmla z27.s, z2.b, z5.b\n"
+ ".inst 0x45849817 // usmmla z23.s, z0.b, z4.b\n"
+ ".inst 0x4584985f // usmmla z31.s, z2.b, z4.b\n"
+ "ble 38f\n"
+ "ld1b { z4.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45849830 // usmmla z16.s, z1.b, z4.b\n"
+ ".inst 0x45849878 // usmmla z24.s, z3.b, z4.b\n"
+ "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x458a9834 // usmmla z20.s, z1.b, z10.b\n"
+ ".inst 0x458a987c // usmmla z28.s, z3.b, z10.b\n"
+ ".inst 0x45899831 // usmmla z17.s, z1.b, z9.b\n"
+ ".inst 0x45899879 // usmmla z25.s, z3.b, z9.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45889835 // usmmla z21.s, z1.b, z8.b\n"
+ ".inst 0x4588987d // usmmla z29.s, z3.b, z8.b\n"
+ ".inst 0x45879832 // usmmla z18.s, z1.b, z7.b\n"
+ ".inst 0x4587987a // usmmla z26.s, z3.b, z7.b\n"
+ ".inst 0x45869836 // usmmla z22.s, z1.b, z6.b\n"
+ ".inst 0x4586987e // usmmla z30.s, z3.b, z6.b\n"
+ ".inst 0x45859833 // usmmla z19.s, z1.b, z5.b\n"
+ ".inst 0x4585987b // usmmla z27.s, z3.b, z5.b\n"
+ ".inst 0x45849837 // usmmla z23.s, z1.b, z4.b\n"
+ ".inst 0x4584987f // usmmla z31.s, z3.b, z4.b\n"
+ "38:" // Height 3: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 39f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "udot z13.s, z3.b, z15.b\n"
+ "39:" // Height 3: Multiply loop: unique 6: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 32b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z0.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "uzp1 z20.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x27, x20\n"
+ "uzp1 z22.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "add x22, x23, x20\n"
+ "uzp1 z24.d, z24.d, z28.d\n"
+ "uzp1 z25.d, z25.d, z29.d\n"
+ "uzp1 z26.d, z26.d, z30.d\n"
+ "uzp1 z27.d, z27.d, z31.d\n"
+ "mov z31.d, z0.d\n"
+ "tbnz %x[flags], #31, 40f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ ".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
+ "ld1rw { z23.s }, p2/Z, [x20]\n"
+ "neg z23.s, p2/M, z23.s\n"
+ "mov z12.s, z11.s[3]\n"
+ "mov z11.s, z11.s[0]\n"
+ "mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z23.s\n"
+ "mul z12.s, p2/M, z12.s, z23.s\n"
+ "mul z13.s, p2/M, z13.s, z23.s\n"
+ "40:" // Height 3: skip row sum fixup
+ "add z31.s, z31.s, z11.s\n"
+ "add z20.s, z20.s, z11.s\n"
+ "ld1w { z0.s }, p2/Z, [x10]\n"
+ "ld1w { z30.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z21.s, z21.s, z11.s\n"
+ "add z22.s, z22.s, z11.s\n"
+ "ld1w { z29.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z28.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add z16.s, z16.s, z12.s\n"
+ "add z17.s, z17.s, z12.s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z18.s, z18.s, z12.s\n"
+ "add z19.s, z19.s, z12.s\n"
+ "ld1rw { z23.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add z24.s, z24.s, z13.s\n"
+ "add z25.s, z25.s, z13.s\n"
+ "addvl x10, x10, #4\n"
+ "add z26.s, z26.s, z13.s\n"
+ "add z27.s, z27.s, z13.s\n"
+ "add z31.s, z31.s, z0.s\n"
+ "add z20.s, z20.s, z30.s\n"
+ "add z21.s, z21.s, z29.s\n"
+ "add z22.s, z22.s, z28.s\n"
+ "add z16.s, z16.s, z0.s\n"
+ "add z17.s, z17.s, z30.s\n"
+ "add z18.s, z18.s, z29.s\n"
+ "add z19.s, z19.s, z28.s\n"
+ "add z24.s, z24.s, z0.s\n"
+ "add z25.s, z25.s, z30.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z26.s, z26.s, z29.s\n"
+ "add z27.s, z27.s, z28.s\n"
+ ".inst 0x04b777ff // sqrdmulh z31.s, z31.s, z23.s\n"
+ ".inst 0x04b77694 // sqrdmulh z20.s, z20.s, z23.s\n"
+ ".inst 0x04b776b5 // sqrdmulh z21.s, z21.s, z23.s\n"
+ ".inst 0x04b776d6 // sqrdmulh z22.s, z22.s, z23.s\n"
+ ".inst 0x04b77610 // sqrdmulh z16.s, z16.s, z23.s\n"
+ ".inst 0x04b77631 // sqrdmulh z17.s, z17.s, z23.s\n"
+ ".inst 0x04b77652 // sqrdmulh z18.s, z18.s, z23.s\n"
+ ".inst 0x04b77673 // sqrdmulh z19.s, z19.s, z23.s\n"
+ ".inst 0x04b77718 // sqrdmulh z24.s, z24.s, z23.s\n"
+ ".inst 0x04b77739 // sqrdmulh z25.s, z25.s, z23.s\n"
+ ".inst 0x04b7775a // sqrdmulh z26.s, z26.s, z23.s\n"
+ ".inst 0x04b7777b // sqrdmulh z27.s, z27.s, z23.s\n"
+ "tbz %x[flags], #5, 41f\n"
+ "and z1.d, z31.d, z0.d\n"
+ "and z30.d, z20.d, z0.d\n"
+ "and z29.d, z21.d, z0.d\n"
+ "and z28.d, z22.d, z0.d\n"
+ "and z23.d, z16.d, z0.d\n"
+ "and z3.d, z17.d, z0.d\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "asr z29.s, z29.s, #0x1f\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ "and z2.d, z18.d, z0.d\n"
+ "sqadd z31.s, z31.s, z1.s\n"
+ "sqadd z20.s, z20.s, z30.s\n"
+ "sqadd z21.s, z21.s, z29.s\n"
+ "sqadd z22.s, z22.s, z28.s\n"
+ "sqadd z16.s, z16.s, z23.s\n"
+ "and z1.d, z19.d, z0.d\n"
+ "and z30.d, z24.d, z0.d\n"
+ "and z29.d, z25.d, z0.d\n"
+ "and z28.d, z26.d, z0.d\n"
+ "and z23.d, z27.d, z0.d\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "asr z30.s, z30.s, #0x1f\n"
+ "asr z29.s, z29.s, #0x1f\n"
+ "asr z28.s, z28.s, #0x1f\n"
+ "asr z23.s, z23.s, #0x1f\n"
+ "sqadd z17.s, z17.s, z3.s\n"
+ "sqadd z18.s, z18.s, z2.s\n"
+ "sqadd z19.s, z19.s, z1.s\n"
+ "sqadd z24.s, z24.s, z30.s\n"
+ "sqadd z25.s, z25.s, z29.s\n"
+ "sqadd z26.s, z26.s, z28.s\n"
+ "sqadd z27.s, z27.s, z23.s\n"
+ "41:" // Height 3: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
+ "ld1rw { z29.s }, p2/Z, [x20]\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ "add z31.s, z31.s, z29.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "add z20.s, z20.s, z29.s\n"
+ "add z21.s, z21.s, z29.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
+ "add z22.s, z22.s, z29.s\n"
+ "add z16.s, z16.s, z29.s\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "add z17.s, z17.s, z29.s\n"
+ "add z18.s, z18.s, z29.s\n"
+ "ld1rw { z28.s }, p2/Z, [x20]\n"
+ "add z19.s, z19.s, z29.s\n"
+ "add z24.s, z24.s, z29.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z29.s\n"
+ "add z26.s, z26.s, z29.s\n"
+ "ld1rw { z23.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z29.s\n"
+ "smin z31.s, p2/M, z31.s, z28.s\n"
+ "smin z20.s, p2/M, z20.s, z28.s\n"
+ "smin z21.s, p2/M, z21.s, z28.s\n"
+ "smin z22.s, p2/M, z22.s, z28.s\n"
+ "smin z16.s, p2/M, z16.s, z28.s\n"
+ "smin z17.s, p2/M, z17.s, z28.s\n"
+ "smin z18.s, p2/M, z18.s, z28.s\n"
+ "smin z19.s, p2/M, z19.s, z28.s\n"
+ "smin z24.s, p2/M, z24.s, z28.s\n"
+ "smin z25.s, p2/M, z25.s, z28.s\n"
+ "smin z26.s, p2/M, z26.s, z28.s\n"
+ "smin z27.s, p2/M, z27.s, z28.s\n"
+ "smax z31.s, p2/M, z31.s, z23.s\n"
+ "smax z20.s, p2/M, z20.s, z23.s\n"
+ "smax z21.s, p2/M, z21.s, z23.s\n"
+ "smax z22.s, p2/M, z22.s, z23.s\n"
+ "smax z16.s, p2/M, z16.s, z23.s\n"
+ "smax z17.s, p2/M, z17.s, z23.s\n"
+ "smax z18.s, p2/M, z18.s, z23.s\n"
+ "smax z19.s, p2/M, z19.s, z23.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
+ "smax z24.s, p2/M, z24.s, z23.s\n"
+ "smax z25.s, p2/M, z25.s, z23.s\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
+ "smax z26.s, p2/M, z26.s, z23.s\n"
+ "smax z27.s, p2/M, z27.s, z23.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "uzp1 z18.h, z18.h, z19.h\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z31.b, z31.b, z20.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "uzp1 z16.b, z16.b, z18.b\n"
+ "st1b { z31.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z16.b }, p1, [x23]\n"
+ "st1b { z24.b }, p1, [x22]\n"
+ "42:" // Height 3: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 30b\n"
+ "b 58f\n"
+ "43:" // Height 4
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x27, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x20, #0x4\n"
+ "mov x10, %x[col_bias]\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "bic %x[flags], %x[flags], #0x80000000\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_N]]\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "ldr x28, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x27\n"
+ "mov z15.b, #0x1\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "44:" // Height 4: Column loop
+ "mov x20, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "whilelt p1.b, x20, x9\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov z24.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z28.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "mov z30.s, #0x0\n"
+ "mov z31.s, #0x0\n"
+ "45:" // Height 4: setup done
+ "mov x26, #0x0\n"
+ "46:" // Height 4: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w25, [x20, x26, LSL #0x2]\n"
+ "tbz %x[flags], #3, 47f\n"
+ "ldr x20, [%x[input_ptr], x26, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x24, [x20, #0x0]\n"
+ "ldr x23, [x20, #0x8]\n"
+ "ldr x22, [x20, #0x10]\n"
+ "ldr x21, [x20, #0x18]\n"
+ "cbnz x26, 48f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "add x21, x21, x20\n"
+ "b 48f\n"
+ "47:" // Height 4: setup direct input
+ "mov x24, %x[input_ptr]\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
+ "48:" // Height 4: input setup done
+ "cmp x25, #0x10\n"
+ "ble 51f\n"
+ "49:" // Height 4: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z5.b }, p2/Z, [x28]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "ld1rqb { z6.b }, p0/Z, [x21]\n"
+ "add x22, x22, #0x10\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z6.d\n"
+ "trn2 z3.d, z3.d, z6.d\n"
+ "ld1b { z6.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45859810 // usmmla z16.s, z0.b, z5.b\n"
+ ".inst 0x45849814 // usmmla z20.s, z0.b, z4.b\n"
+ ".inst 0x45879811 // usmmla z17.s, z0.b, z7.b\n"
+ ".inst 0x45899815 // usmmla z21.s, z0.b, z9.b\n"
+ ".inst 0x45889812 // usmmla z18.s, z0.b, z8.b\n"
+ ".inst 0x45859858 // usmmla z24.s, z2.b, z5.b\n"
+ "ld1b { z5.b }, p2/Z, [x28, #7, MUL VL]\n"
+ "addvl x28, x28, #16\n"
+ ".inst 0x4584985c // usmmla z28.s, z2.b, z4.b\n"
+ ".inst 0x45879859 // usmmla z25.s, z2.b, z7.b\n"
+ ".inst 0x4589985d // usmmla z29.s, z2.b, z9.b\n"
+ ".inst 0x4588985a // usmmla z26.s, z2.b, z8.b\n"
+ ".inst 0x458a9816 // usmmla z22.s, z0.b, z10.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-8, MUL VL]\n"
+ ".inst 0x458a985e // usmmla z30.s, z2.b, z10.b\n"
+ ".inst 0x45869813 // usmmla z19.s, z0.b, z6.b\n"
+ "ld1b { z10.b }, p2/Z, [x28, #-7, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #-6, MUL VL]\n"
+ ".inst 0x4586985b // usmmla z27.s, z2.b, z6.b\n"
+ ".inst 0x45859817 // usmmla z23.s, z0.b, z5.b\n"
+ "ld1b { z8.b }, p2/Z, [x28, #-5, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #-4, MUL VL]\n"
+ ".inst 0x4585985f // usmmla z31.s, z2.b, z5.b\n"
+ ".inst 0x45849830 // usmmla z16.s, z1.b, z4.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #-3, MUL VL]\n"
+ "ld1b { z5.b }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x45849878 // usmmla z24.s, z3.b, z4.b\n"
+ "ld1b { z4.b }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x458a9834 // usmmla z20.s, z1.b, z10.b\n"
+ ".inst 0x458a987c // usmmla z28.s, z3.b, z10.b\n"
+ ".inst 0x45899831 // usmmla z17.s, z1.b, z9.b\n"
+ ".inst 0x45899879 // usmmla z25.s, z3.b, z9.b\n"
+ ".inst 0x45889835 // usmmla z21.s, z1.b, z8.b\n"
+ ".inst 0x4588987d // usmmla z29.s, z3.b, z8.b\n"
+ ".inst 0x45879832 // usmmla z18.s, z1.b, z7.b\n"
+ ".inst 0x4587987a // usmmla z26.s, z3.b, z7.b\n"
+ ".inst 0x45869836 // usmmla z22.s, z1.b, z6.b\n"
+ ".inst 0x4586987e // usmmla z30.s, z3.b, z6.b\n"
+ ".inst 0x45859833 // usmmla z19.s, z1.b, z5.b\n"
+ ".inst 0x4585987b // usmmla z27.s, z3.b, z5.b\n"
+ ".inst 0x45849837 // usmmla z23.s, z1.b, z4.b\n"
+ ".inst 0x4584987f // usmmla z31.s, z3.b, z4.b\n"
+ "tbnz %x[flags], #31, 50f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "udot z13.s, z3.b, z15.b\n"
+ "50:" // Height 4: Multiply loop: unique 7: skip row sum
+ "sub x25, x25, #0x10\n"
+ "cmp x25, #0x10\n"
+ "bgt 49b\n"
+ "51:" // Height 4: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x25\n"
+ "ld1b { z6.b }, p2/Z, [x28]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "subs x25, x25, #0x8\n"
+ "ld1b { z7.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z3.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x21]\n"
+ "trn1 z0.d, z1.d, z2.d\n"
+ "trn2 z1.d, z1.d, z2.d\n"
+ "trn1 z2.d, z3.d, z5.d\n"
+ "trn2 z3.d, z3.d, z5.d\n"
+ "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ ".inst 0x45869810 // usmmla z16.s, z0.b, z6.b\n"
+ ".inst 0x45849814 // usmmla z20.s, z0.b, z4.b\n"
+ ".inst 0x45879811 // usmmla z17.s, z0.b, z7.b\n"
+ ".inst 0x45899815 // usmmla z21.s, z0.b, z9.b\n"
+ ".inst 0x45889812 // usmmla z18.s, z0.b, z8.b\n"
+ ".inst 0x45869858 // usmmla z24.s, z2.b, z6.b\n"
+ "ld1b { z6.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x4584985c // usmmla z28.s, z2.b, z4.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45879859 // usmmla z25.s, z2.b, z7.b\n"
+ ".inst 0x4589985d // usmmla z29.s, z2.b, z9.b\n"
+ ".inst 0x4588985a // usmmla z26.s, z2.b, z8.b\n"
+ ".inst 0x458a9816 // usmmla z22.s, z0.b, z10.b\n"
+ ".inst 0x458a985e // usmmla z30.s, z2.b, z10.b\n"
+ ".inst 0x45859813 // usmmla z19.s, z0.b, z5.b\n"
+ ".inst 0x4585985b // usmmla z27.s, z2.b, z5.b\n"
+ ".inst 0x45869817 // usmmla z23.s, z0.b, z6.b\n"
+ ".inst 0x4586985f // usmmla z31.s, z2.b, z6.b\n"
+ "ble 52f\n"
+ "ld1b { z4.b }, p2/Z, [x28]\n"
+ "ld1b { z10.b }, p2/Z, [x28, #1, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1b { z6.b }, p2/Z, [x28, #5, MUL VL]\n"
+ ".inst 0x45849830 // usmmla z16.s, z1.b, z4.b\n"
+ ".inst 0x45849878 // usmmla z24.s, z3.b, z4.b\n"
+ "ld1b { z5.b }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x28, #7, MUL VL]\n"
+ ".inst 0x458a9834 // usmmla z20.s, z1.b, z10.b\n"
+ ".inst 0x458a987c // usmmla z28.s, z3.b, z10.b\n"
+ ".inst 0x45899831 // usmmla z17.s, z1.b, z9.b\n"
+ ".inst 0x45899879 // usmmla z25.s, z3.b, z9.b\n"
+ "addvl x28, x28, #8\n"
+ ".inst 0x45889835 // usmmla z21.s, z1.b, z8.b\n"
+ ".inst 0x4588987d // usmmla z29.s, z3.b, z8.b\n"
+ ".inst 0x45879832 // usmmla z18.s, z1.b, z7.b\n"
+ ".inst 0x4587987a // usmmla z26.s, z3.b, z7.b\n"
+ ".inst 0x45869836 // usmmla z22.s, z1.b, z6.b\n"
+ ".inst 0x4586987e // usmmla z30.s, z3.b, z6.b\n"
+ ".inst 0x45859833 // usmmla z19.s, z1.b, z5.b\n"
+ ".inst 0x4585987b // usmmla z27.s, z3.b, z5.b\n"
+ ".inst 0x45849837 // usmmla z23.s, z1.b, z4.b\n"
+ ".inst 0x4584987f // usmmla z31.s, z3.b, z4.b\n"
+ "52:" // Height 4: Multiply loop: multiply skip
+ "tbnz %x[flags], #31, 53f\n"
+ "udot z11.s, z0.b, z15.b\n"
+ "udot z13.s, z2.b, z15.b\n"
+ "udot z11.s, z1.b, z15.b\n"
+ "udot z13.s, z3.b, z15.b\n"
+ "53:" // Height 4: Multiply loop: unique 8: skip row sum
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x26, x26, #0x1\n"
+ "cmp x26, x20\n"
+ "bne 46b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z0.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "uzp1 z20.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "add x23, x27, x20\n"
+ "add x22, x23, x20\n"
+ "uzp1 z22.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "add x21, x22, x20\n"
+ "uzp1 z23.d, z24.d, z28.d\n"
+ "uzp2 z24.d, z24.d, z28.d\n"
+ "uzp1 z28.d, z25.d, z29.d\n"
+ "uzp2 z25.d, z25.d, z29.d\n"
+ "uzp1 z29.d, z26.d, z30.d\n"
+ "uzp2 z26.d, z26.d, z30.d\n"
+ "uzp1 z30.d, z27.d, z31.d\n"
+ "uzp2 z27.d, z27.d, z31.d\n"
+ "mov z31.d, z0.d\n"
+ "tbnz %x[flags], #31, 54f\n"
+ "add x20, %x[qp], %[b_offset]\n"
+ ".inst 0x4491a96b // addp z11.s, p2/m, z11.s, z11.s\n"
+ ".inst 0x4491a9ad // addp z13.s, p2/m, z13.s, z13.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "neg z0.s, p2/M, z0.s\n"
+ "mov z12.s, z11.s[3]\n"
+ "mov z11.s, z11.s[0]\n"
+ "mov z14.s, z13.s[3]\n"
+ "mov z13.s, z13.s[0]\n"
+ "mul z11.s, p2/M, z11.s, z0.s\n"
+ "mul z12.s, p2/M, z12.s, z0.s\n"
+ "mul z13.s, p2/M, z13.s, z0.s\n"
+ "mul z14.s, p2/M, z14.s, z0.s\n"
+ "54:" // Height 4: skip row sum fixup
+ "add z31.s, z31.s, z11.s\n"
+ "add z20.s, z20.s, z11.s\n"
+ "ld1w { z4.s }, p2/Z, [x10]\n"
+ "ld1w { z0.s }, p2/Z, [x10, #1, MUL VL]\n"
+ "add z21.s, z21.s, z11.s\n"
+ "add z22.s, z22.s, z11.s\n"
+ "ld1w { z3.s }, p2/Z, [x10, #2, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x10, #3, MUL VL]\n"
+ "add z16.s, z16.s, z12.s\n"
+ "add z17.s, z17.s, z12.s\n"
+ "add x20, %x[qp], %[per_layer_mul]\n"
+ "orr %x[flags], %x[flags], #0x80000000\n"
+ "add z18.s, z18.s, z12.s\n"
+ "add z19.s, z19.s, z12.s\n"
+ "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add x20, %x[qp], %[per_layer_right_shift]\n"
+ "add z23.s, z23.s, z13.s\n"
+ "add z28.s, z28.s, z13.s\n"
+ "addvl x10, x10, #4\n"
+ "add z29.s, z29.s, z13.s\n"
+ "add z30.s, z30.s, z13.s\n"
+ "add z24.s, z24.s, z14.s\n"
+ "add z25.s, z25.s, z14.s\n"
+ "add z26.s, z26.s, z14.s\n"
+ "add z27.s, z27.s, z14.s\n"
+ "add z31.s, z31.s, z4.s\n"
+ "add z20.s, z20.s, z0.s\n"
+ "add z21.s, z21.s, z3.s\n"
+ "add z22.s, z22.s, z2.s\n"
+ "add z16.s, z16.s, z4.s\n"
+ "add z17.s, z17.s, z0.s\n"
+ "add z18.s, z18.s, z3.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z23.s, z23.s, z4.s\n"
+ "add z28.s, z28.s, z0.s\n"
+ "add z29.s, z29.s, z3.s\n"
+ "add z30.s, z30.s, z2.s\n"
+ "add z24.s, z24.s, z4.s\n"
+ "add z25.s, z25.s, z0.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z26.s, z26.s, z3.s\n"
+ "add z27.s, z27.s, z2.s\n"
+ ".inst 0x04a177ff // sqrdmulh z31.s, z31.s, z1.s\n"
+ ".inst 0x04a17694 // sqrdmulh z20.s, z20.s, z1.s\n"
+ ".inst 0x04a176b5 // sqrdmulh z21.s, z21.s, z1.s\n"
+ ".inst 0x04a176d6 // sqrdmulh z22.s, z22.s, z1.s\n"
+ ".inst 0x04a17610 // sqrdmulh z16.s, z16.s, z1.s\n"
+ ".inst 0x04a17631 // sqrdmulh z17.s, z17.s, z1.s\n"
+ ".inst 0x04a17652 // sqrdmulh z18.s, z18.s, z1.s\n"
+ ".inst 0x04a17673 // sqrdmulh z19.s, z19.s, z1.s\n"
+ ".inst 0x04a176f7 // sqrdmulh z23.s, z23.s, z1.s\n"
+ ".inst 0x04a1779c // sqrdmulh z28.s, z28.s, z1.s\n"
+ ".inst 0x04a177bd // sqrdmulh z29.s, z29.s, z1.s\n"
+ ".inst 0x04a177de // sqrdmulh z30.s, z30.s, z1.s\n"
+ ".inst 0x04a17718 // sqrdmulh z24.s, z24.s, z1.s\n"
+ ".inst 0x04a17739 // sqrdmulh z25.s, z25.s, z1.s\n"
+ ".inst 0x04a1775a // sqrdmulh z26.s, z26.s, z1.s\n"
+ ".inst 0x04a1777b // sqrdmulh z27.s, z27.s, z1.s\n"
+ "tbz %x[flags], #5, 55f\n"
+ "and z2.d, z31.d, z0.d\n"
+ "and z1.d, z20.d, z0.d\n"
+ "and z7.d, z21.d, z0.d\n"
+ "and z6.d, z22.d, z0.d\n"
+ "and z5.d, z16.d, z0.d\n"
+ "and z4.d, z17.d, z0.d\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "and z3.d, z18.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "sqadd z31.s, z31.s, z2.s\n"
+ "sqadd z20.s, z20.s, z1.s\n"
+ "and z2.d, z19.d, z0.d\n"
+ "and z1.d, z23.d, z0.d\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "sqadd z21.s, z21.s, z7.s\n"
+ "sqadd z22.s, z22.s, z6.s\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "sqadd z16.s, z16.s, z5.s\n"
+ "sqadd z17.s, z17.s, z4.s\n"
+ "sqadd z18.s, z18.s, z3.s\n"
+ "and z7.d, z28.d, z0.d\n"
+ "sqadd z19.s, z19.s, z2.s\n"
+ "sqadd z23.s, z23.s, z1.s\n"
+ "and z6.d, z29.d, z0.d\n"
+ "and z5.d, z30.d, z0.d\n"
+ "and z4.d, z24.d, z0.d\n"
+ "and z3.d, z25.d, z0.d\n"
+ "and z2.d, z26.d, z0.d\n"
+ "and z1.d, z27.d, z0.d\n"
+ "asr z7.s, z7.s, #0x1f\n"
+ "asr z6.s, z6.s, #0x1f\n"
+ "asr z5.s, z5.s, #0x1f\n"
+ "asr z4.s, z4.s, #0x1f\n"
+ "asr z3.s, z3.s, #0x1f\n"
+ "asr z2.s, z2.s, #0x1f\n"
+ "asr z1.s, z1.s, #0x1f\n"
+ "sqadd z28.s, z28.s, z7.s\n"
+ "sqadd z29.s, z29.s, z6.s\n"
+ "sqadd z30.s, z30.s, z5.s\n"
+ "sqadd z24.s, z24.s, z4.s\n"
+ "sqadd z25.s, z25.s, z3.s\n"
+ "sqadd z26.s, z26.s, z2.s\n"
+ "sqadd z27.s, z27.s, z1.s\n"
+ "55:" // Height 4: no shift correction
+ "add x20, %x[qp], %[c_offset]\n"
+ ".inst 0x4482881f // srshl z31.s, p2/M, z31.s, z0.s\n"
+ "ld1rw { z2.s }, p2/Z, [x20]\n"
+ ".inst 0x44828814 // srshl z20.s, p2/M, z20.s, z0.s\n"
+ ".inst 0x44828815 // srshl z21.s, p2/M, z21.s, z0.s\n"
+ ".inst 0x44828816 // srshl z22.s, p2/M, z22.s, z0.s\n"
+ ".inst 0x44828810 // srshl z16.s, p2/M, z16.s, z0.s\n"
+ ".inst 0x44828811 // srshl z17.s, p2/M, z17.s, z0.s\n"
+ ".inst 0x44828812 // srshl z18.s, p2/M, z18.s, z0.s\n"
+ "add z31.s, z31.s, z2.s\n"
+ ".inst 0x44828813 // srshl z19.s, p2/M, z19.s, z0.s\n"
+ ".inst 0x44828817 // srshl z23.s, p2/M, z23.s, z0.s\n"
+ "add z20.s, z20.s, z2.s\n"
+ "add z21.s, z21.s, z2.s\n"
+ ".inst 0x4482881c // srshl z28.s, p2/M, z28.s, z0.s\n"
+ ".inst 0x4482881d // srshl z29.s, p2/M, z29.s, z0.s\n"
+ "add z22.s, z22.s, z2.s\n"
+ "add z16.s, z16.s, z2.s\n"
+ ".inst 0x4482881e // srshl z30.s, p2/M, z30.s, z0.s\n"
+ ".inst 0x44828818 // srshl z24.s, p2/M, z24.s, z0.s\n"
+ "add z17.s, z17.s, z2.s\n"
+ "add z18.s, z18.s, z2.s\n"
+ ".inst 0x44828819 // srshl z25.s, p2/M, z25.s, z0.s\n"
+ ".inst 0x4482881a // srshl z26.s, p2/M, z26.s, z0.s\n"
+ "add z19.s, z19.s, z2.s\n"
+ "add z23.s, z23.s, z2.s\n"
+ ".inst 0x4482881b // srshl z27.s, p2/M, z27.s, z0.s\n"
+ "add x20, %x[qp], %[maxval]\n"
+ "add z28.s, z28.s, z2.s\n"
+ "add z29.s, z29.s, z2.s\n"
+ "ld1rw { z1.s }, p2/Z, [x20]\n"
+ "add z30.s, z30.s, z2.s\n"
+ "add z24.s, z24.s, z2.s\n"
+ "add x20, %x[qp], %[minval]\n"
+ "add z25.s, z25.s, z2.s\n"
+ "add z26.s, z26.s, z2.s\n"
+ "ld1rw { z0.s }, p2/Z, [x20]\n"
+ "add z27.s, z27.s, z2.s\n"
+ "smin z31.s, p2/M, z31.s, z1.s\n"
+ "smin z20.s, p2/M, z20.s, z1.s\n"
+ "smin z21.s, p2/M, z21.s, z1.s\n"
+ "smin z22.s, p2/M, z22.s, z1.s\n"
+ "smin z16.s, p2/M, z16.s, z1.s\n"
+ "smin z17.s, p2/M, z17.s, z1.s\n"
+ "smin z18.s, p2/M, z18.s, z1.s\n"
+ "smin z19.s, p2/M, z19.s, z1.s\n"
+ "smin z23.s, p2/M, z23.s, z1.s\n"
+ "smin z28.s, p2/M, z28.s, z1.s\n"
+ "smin z29.s, p2/M, z29.s, z1.s\n"
+ "smin z30.s, p2/M, z30.s, z1.s\n"
+ "smin z24.s, p2/M, z24.s, z1.s\n"
+ "smin z25.s, p2/M, z25.s, z1.s\n"
+ "smin z26.s, p2/M, z26.s, z1.s\n"
+ "smin z27.s, p2/M, z27.s, z1.s\n"
+ "smax z31.s, p2/M, z31.s, z0.s\n"
+ "smax z20.s, p2/M, z20.s, z0.s\n"
+ "smax z21.s, p2/M, z21.s, z0.s\n"
+ "smax z22.s, p2/M, z22.s, z0.s\n"
+ "smax z16.s, p2/M, z16.s, z0.s\n"
+ "smax z17.s, p2/M, z17.s, z0.s\n"
+ "smax z18.s, p2/M, z18.s, z0.s\n"
+ "smax z19.s, p2/M, z19.s, z0.s\n"
+ "uzp1 z31.h, z31.h, z20.h\n"
+ "smax z23.s, p2/M, z23.s, z0.s\n"
+ "smax z28.s, p2/M, z28.s, z0.s\n"
+ "uzp1 z20.h, z21.h, z22.h\n"
+ "smax z29.s, p2/M, z29.s, z0.s\n"
+ "smax z30.s, p2/M, z30.s, z0.s\n"
+ "uzp1 z16.h, z16.h, z17.h\n"
+ "smax z24.s, p2/M, z24.s, z0.s\n"
+ "smax z25.s, p2/M, z25.s, z0.s\n"
+ "uzp1 z17.h, z18.h, z19.h\n"
+ "smax z26.s, p2/M, z26.s, z0.s\n"
+ "smax z27.s, p2/M, z27.s, z0.s\n"
+ "uzp1 z23.h, z23.h, z28.h\n"
+ "uzp1 z31.b, z31.b, z20.b\n"
+ "uzp1 z18.h, z29.h, z30.h\n"
+ "uzp1 z24.h, z24.h, z25.h\n"
+ "uzp1 z16.b, z16.b, z17.b\n"
+ "uzp1 z17.h, z26.h, z27.h\n"
+ "st1b { z31.b }, p1, [x27]\n"
+ "addvl x27, x27, #1\n"
+ "uzp1 z23.b, z23.b, z18.b\n"
+ "uzp1 z24.b, z24.b, z17.b\n"
+ "st1b { z16.b }, p1, [x23]\n"
+ "st1b { z23.b }, p1, [x22]\n"
+ "st1b { z24.b }, p1, [x21]\n"
+ "56:" // Height 4: Writeback done
+ "decw x9, ALL, MUL #4\n"
+ "cmp x9, XZR\n"
+ "bgt 44b\n"
+ "subs %x[M], %x[M], #0x4\n"
+ "beq 58f\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 57f\n"
+ "add x21, x21, #0x4\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "57:" // Update direct input
+ "mov x20, #0x4\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "b 1b\n"
+ "58:" // Exit
+ : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp)
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL.hpp
new file mode 100644
index 0000000000..7a8ee8ecb8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL.hpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ unsigned int, const unsigned int *, \
+ IndirectInputArg<uint8_t>, \
+ size_t, size_t, \
+ const int8_t *, \
+ IndirectOutputArg<int32_t>, \
+ const int32_t *, Activation, bool
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_hybrid_u8s8s32_mmla_6x4VL( ARGLIST );
+
+class cls_sve_hybrid_u8s8s32_mmla_6x4VL
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 6;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<int32_t>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 8;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 8, 8> transforms = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+ if (std::is_same<T, uint32_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 54.45 };
+ case CPUModel::A510:
+ return { 24.22 };
+ case CPUModel::V1:
+ return { 105.16 };
+ }
+ }
+
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 54.90, 15.69, 0.62 };
+ case CPUModel::A510:
+ return { 26.80, 3.89, 0.47 };
+ case CPUModel::V1:
+ return { 75.14, 15.87, 0.83 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_hybrid_u8s8s32_mmla_6x4VL;
+ cls_sve_hybrid_u8s8s32_mmla_6x4VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL/generic.cpp
new file mode 100644
index 0000000000..14299e80d6
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8s8s32_mmla_6x4VL/generic.cpp
@@ -0,0 +1,1675 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "arm_gemm.hpp"
+#include "../../utils.hpp"
+
+#include <cassert>
+
+namespace arm_gemm {
+
+void sve_hybrid_u8s8s32_mmla_6x4VL (
+ unsigned int num_strings, const unsigned int *string_lengths, IndirectInputArg<uint8_t> A_arg,
+ size_t M, size_t N, const int8_t *B_ptr, IndirectOutputArg<int32_t> output_arg,
+ const int32_t *, Activation, bool accumulate
+)
+{
+ struct KernelArgs {
+ unsigned int num_strings = {};
+ const unsigned int *string_lengths = {};
+ size_t N = {};
+ const int8_t *B_ptr = {};
+ size_t output_offset = {};
+ size_t input_initial_col = {};
+ size_t input_offset = {};
+ void *output_ptr = {};
+ } ka;
+
+ unsigned long flags=0;
+ void *input_ptr;
+
+ if (output_arg.is_indirect) {
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_offset=output_arg.indirect.offset;
+ flags |= 0x4;
+ } else {
+ ka.output_ptr=(void *)(output_arg.direct.base);
+ ka.output_offset=output_arg.direct.stride;
+ }
+
+ if (A_arg.is_indirect) {
+ input_ptr=(void *)(A_arg.indirect.ptr);
+ ka.input_offset=A_arg.indirect.start_row;
+ ka.input_initial_col=A_arg.indirect.start_col;
+ flags |= 0x8;
+ } else {
+ assert(num_strings==1);
+ input_ptr=(void *)(A_arg.direct.base);
+ ka.input_offset=A_arg.direct.stride;
+ }
+ if (accumulate) {
+ flags |= 0x1;
+ }
+ ka.num_strings = num_strings;
+ ka.string_lengths = string_lengths;
+ ka.N = N;
+ ka.B_ptr = B_ptr;
+ __asm__ __volatile__(
+ "ptrue p5.b\n"
+ "1:" // Row loop
+ "cmp %x[M], #0x6\n"
+ "bge 56f\n"
+ "cmp %x[M], #0x4\n"
+ "bgt 45f\n"
+ "beq 34f\n"
+ "cmp %x[M], #0x2\n"
+ "bgt 23f\n"
+ "beq 12f\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "2:" // Height 1: Column loop
+ "mov x20, #0x0\n"
+ "whilelt p4.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p3.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p2.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p1.s, x20, x11\n"
+ "tbz %x[flags], #0, 3f\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z19.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
+ "zip1 z9.d, z19.d, z13.d\n"
+ "zip2 z13.d, z19.d, z13.d\n"
+ "zip1 z10.d, z17.d, z14.d\n"
+ "zip2 z14.d, z17.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "b 4f\n"
+ "3:" // Height 1: no accumulate
+ "mov z8.s, #0x0\n"
+ "mov z9.s, #0x0\n"
+ "mov z10.s, #0x0\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "mov z15.s, #0x0\n"
+ "4:" // Height 1: setup done
+ "mov x28, #0x0\n"
+ "5:" // Height 1: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 6f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "cbnz x28, 7f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "b 7f\n"
+ "6:" // Height 1: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "7:" // Height 1: input setup done
+ "cmp x27, #0x10\n"
+ "ble 9f\n"
+ "8:" // Height 1: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z16.b }, p5/Z, [x10]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z19.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "trn1 z18.d, z19.d, z20.d\n"
+ "trn2 z19.d, z19.d, z20.d\n"
+ ".inst 0x45909a48 // usmmla z8.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45919a4c // usmmla z12.s, z18.b, z17.b\n"
+ "ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45909a49 // usmmla z9.s, z18.b, z16.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45949a4d // usmmla z13.s, z18.b, z20.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45819a4a // usmmla z10.s, z18.b, z1.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45909a4e // usmmla z14.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x45919a4b // usmmla z11.s, z18.b, z17.b\n"
+ ".inst 0x45909a4f // usmmla z15.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45909a68 // usmmla z8.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45919a6c // usmmla z12.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45909a69 // usmmla z9.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45919a6d // usmmla z13.s, z19.b, z17.b\n"
+ "ld1b { z3.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45909a6a // usmmla z10.s, z19.b, z16.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45839a6e // usmmla z14.s, z19.b, z3.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x45919a6b // usmmla z11.s, z19.b, z17.b\n"
+ ".inst 0x45909a6f // usmmla z15.s, z19.b, z16.b\n"
+ "bgt 8b\n"
+ "9:" // Height 1: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "trn1 z18.d, z1.d, z19.d\n"
+ ".inst 0x45919a48 // usmmla z8.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45909a4c // usmmla z12.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
+ ".inst 0x45919a49 // usmmla z9.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45909a4d // usmmla z13.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45919a4a // usmmla z10.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45909a4e // usmmla z14.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x45919a4b // usmmla z11.s, z18.b, z17.b\n"
+ ".inst 0x45909a4f // usmmla z15.s, z18.b, z16.b\n"
+ "ble 10f\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45919828 // usmmla z8.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4590982c // usmmla z12.s, z1.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45919829 // usmmla z9.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4590982d // usmmla z13.s, z1.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4591982a // usmmla z10.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4590982e // usmmla z14.s, z1.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x4591982b // usmmla z11.s, z1.b, z17.b\n"
+ ".inst 0x4590982f // usmmla z15.s, z1.b, z16.b\n"
+ "10:" // Height 1: Multiply loop: multiply skip
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 5b\n"
+ "uzp1 z8.d, z8.d, z12.d\n"
+ "uzp1 z9.d, z9.d, z13.d\n"
+ "uzp1 z10.d, z10.d, z14.d\n"
+ "uzp1 z11.d, z11.d, z15.d\n"
+ "st1w { z8.s }, p4, [x9]\n"
+ "st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "11:" // Height 1: Writeback done
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
+ "bgt 2b\n"
+ "b 68f\n"
+ "12:" // Height 2
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "13:" // Height 2: Column loop
+ "mov x20, #0x0\n"
+ "whilelt p4.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p3.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p2.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p1.s, x20, x11\n"
+ "tbz %x[flags], #0, 14f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z18.s }, p4/Z, [x9]\n"
+ "ld1w { z24.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x20]\n"
+ "ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z8.d, z18.d, z12.d\n"
+ "zip2 z12.d, z18.d, z12.d\n"
+ "zip1 z9.d, z24.d, z13.d\n"
+ "zip2 z13.d, z24.d, z13.d\n"
+ "zip1 z10.d, z17.d, z14.d\n"
+ "zip2 z14.d, z17.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "b 15f\n"
+ "14:" // Height 2: no accumulate
+ "mov z8.s, #0x0\n"
+ "mov z9.s, #0x0\n"
+ "mov z10.s, #0x0\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "mov z15.s, #0x0\n"
+ "15:" // Height 2: setup done
+ "mov x28, #0x0\n"
+ "16:" // Height 2: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 17f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "cbnz x28, 18f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "b 18f\n"
+ "17:" // Height 2: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "18:" // Height 2: input setup done
+ "cmp x27, #0x10\n"
+ "ble 20f\n"
+ "19:" // Height 2: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z19.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "trn1 z18.d, z19.d, z25.d\n"
+ "trn2 z19.d, z19.d, z25.d\n"
+ ".inst 0x45919a48 // usmmla z8.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45909a4c // usmmla z12.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45919a49 // usmmla z9.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45909a4d // usmmla z13.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45919a4a // usmmla z10.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45909a4e // usmmla z14.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x45919a4b // usmmla z11.s, z18.b, z17.b\n"
+ ".inst 0x45909a4f // usmmla z15.s, z18.b, z16.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45919a68 // usmmla z8.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45909a6c // usmmla z12.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45919a69 // usmmla z9.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45909a6d // usmmla z13.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45919a6a // usmmla z10.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45909a6e // usmmla z14.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x45919a6b // usmmla z11.s, z19.b, z17.b\n"
+ ".inst 0x45909a6f // usmmla z15.s, z19.b, z16.b\n"
+ "bgt 19b\n"
+ "20:" // Height 2: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1rqb { z19.b }, p0/Z, [x25]\n"
+ "trn1 z18.d, z1.d, z19.d\n"
+ ".inst 0x45919a48 // usmmla z8.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45909a4c // usmmla z12.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
+ ".inst 0x45919a49 // usmmla z9.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45909a4d // usmmla z13.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45919a4a // usmmla z10.s, z18.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45909a4e // usmmla z14.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x45919a4b // usmmla z11.s, z18.b, z17.b\n"
+ ".inst 0x45909a4f // usmmla z15.s, z18.b, z16.b\n"
+ "ble 21f\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45919828 // usmmla z8.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4590982c // usmmla z12.s, z1.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45919829 // usmmla z9.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4590982d // usmmla z13.s, z1.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4591982a // usmmla z10.s, z1.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4590982e // usmmla z14.s, z1.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x4591982b // usmmla z11.s, z1.b, z17.b\n"
+ ".inst 0x4590982f // usmmla z15.s, z1.b, z16.b\n"
+ "21:" // Height 2: Multiply loop: multiply skip
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 16b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z17.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z16.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z12.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "add x20, x9, x20, LSL #2\n"
+ "uzp1 z26.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z17.s }, p4, [x9]\n"
+ "st1w { z16.s }, p3, [x9, #1, MUL VL]\n"
+ "st1w { z12.s }, p2, [x9, #2, MUL VL]\n"
+ "st1w { z26.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x20]\n"
+ "st1w { z9.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x20, #3, MUL VL]\n"
+ "22:" // Height 2: Writeback done
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
+ "bgt 13b\n"
+ "b 68f\n"
+ "23:" // Height 3
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "24:" // Height 3: Column loop
+ "mov x20, #0x0\n"
+ "whilelt p4.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p3.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p2.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p1.s, x20, x11\n"
+ "tbz %x[flags], #0, 25f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p4/Z, [x9]\n"
+ "ld1w { z26.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x21]\n"
+ "ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x20]\n"
+ "ld1w { z18.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "zip1 z8.d, z24.d, z12.d\n"
+ "zip2 z12.d, z24.d, z12.d\n"
+ "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z9.d, z26.d, z13.d\n"
+ "zip2 z13.d, z26.d, z13.d\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "b 26f\n"
+ "25:" // Height 3: no accumulate
+ "mov z8.s, #0x0\n"
+ "mov z9.s, #0x0\n"
+ "mov z10.s, #0x0\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "mov z15.s, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "26:" // Height 3: setup done
+ "mov x28, #0x0\n"
+ "27:" // Height 3: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 28f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "cbnz x28, 29f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "b 29f\n"
+ "28:" // Height 3: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "29:" // Height 3: input setup done
+ "cmp x27, #0x10\n"
+ "ble 31f\n"
+ "30:" // Height 3: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z27.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z24.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqb { z26.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z6.d, z27.d, z24.d\n"
+ "trn2 z27.d, z27.d, z24.d\n"
+ "trn1 z30.d, z26.d, z29.d\n"
+ "trn2 z26.d, z26.d, z29.d\n"
+ ".inst 0x459998c8 // usmmla z8.s, z6.b, z25.b\n"
+ ".inst 0x459c98cc // usmmla z12.s, z6.b, z28.b\n"
+ ".inst 0x45999bd0 // usmmla z16.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x459c9bd4 // usmmla z20.s, z30.b, z28.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x459998c9 // usmmla z9.s, z6.b, z25.b\n"
+ ".inst 0x45999bd1 // usmmla z17.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x459898cd // usmmla z13.s, z6.b, z24.b\n"
+ ".inst 0x45989bd5 // usmmla z21.s, z30.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x459998ca // usmmla z10.s, z6.b, z25.b\n"
+ ".inst 0x45999bd2 // usmmla z18.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x459898ce // usmmla z14.s, z6.b, z24.b\n"
+ ".inst 0x45989bd6 // usmmla z22.s, z30.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x459998cb // usmmla z11.s, z6.b, z25.b\n"
+ ".inst 0x45999bd3 // usmmla z19.s, z30.b, z25.b\n"
+ ".inst 0x459898cf // usmmla z15.s, z6.b, z24.b\n"
+ ".inst 0x45989bd7 // usmmla z23.s, z30.b, z24.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45999b68 // usmmla z8.s, z27.b, z25.b\n"
+ ".inst 0x45999b50 // usmmla z16.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45989b6c // usmmla z12.s, z27.b, z24.b\n"
+ ".inst 0x45989b54 // usmmla z20.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45999b69 // usmmla z9.s, z27.b, z25.b\n"
+ ".inst 0x45999b51 // usmmla z17.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45989b6d // usmmla z13.s, z27.b, z24.b\n"
+ ".inst 0x45989b55 // usmmla z21.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45999b6a // usmmla z10.s, z27.b, z25.b\n"
+ ".inst 0x45999b52 // usmmla z18.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45989b6e // usmmla z14.s, z27.b, z24.b\n"
+ ".inst 0x45989b56 // usmmla z22.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x45999b6b // usmmla z11.s, z27.b, z25.b\n"
+ ".inst 0x45999b53 // usmmla z19.s, z26.b, z25.b\n"
+ ".inst 0x45989b6f // usmmla z15.s, z27.b, z24.b\n"
+ ".inst 0x45989b57 // usmmla z23.s, z26.b, z24.b\n"
+ "bgt 30b\n"
+ "31:" // Height 3: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1rqb { z24.b }, p0/Z, [x25]\n"
+ "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "trn1 z27.d, z1.d, z24.d\n"
+ "trn2 z1.d, z1.d, z24.d\n"
+ "trn1 z26.d, z3.d, z29.d\n"
+ ".inst 0x45999b68 // usmmla z8.s, z27.b, z25.b\n"
+ ".inst 0x459c9b6c // usmmla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z29.d\n"
+ ".inst 0x45999b50 // usmmla z16.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x459c9b54 // usmmla z20.s, z26.b, z28.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45999b69 // usmmla z9.s, z27.b, z25.b\n"
+ ".inst 0x45999b51 // usmmla z17.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45989b6d // usmmla z13.s, z27.b, z24.b\n"
+ ".inst 0x45989b55 // usmmla z21.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45999b6a // usmmla z10.s, z27.b, z25.b\n"
+ ".inst 0x45999b52 // usmmla z18.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45989b6e // usmmla z14.s, z27.b, z24.b\n"
+ ".inst 0x45989b56 // usmmla z22.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x45999b6b // usmmla z11.s, z27.b, z25.b\n"
+ ".inst 0x45999b53 // usmmla z19.s, z26.b, z25.b\n"
+ ".inst 0x45989b6f // usmmla z15.s, z27.b, z24.b\n"
+ ".inst 0x45989b57 // usmmla z23.s, z26.b, z24.b\n"
+ "ble 32f\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45999828 // usmmla z8.s, z1.b, z25.b\n"
+ ".inst 0x45999870 // usmmla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4598982c // usmmla z12.s, z1.b, z24.b\n"
+ ".inst 0x45989874 // usmmla z20.s, z3.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45999829 // usmmla z9.s, z1.b, z25.b\n"
+ ".inst 0x45999871 // usmmla z17.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4598982d // usmmla z13.s, z1.b, z24.b\n"
+ ".inst 0x45989875 // usmmla z21.s, z3.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4599982a // usmmla z10.s, z1.b, z25.b\n"
+ ".inst 0x45999872 // usmmla z18.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4598982e // usmmla z14.s, z1.b, z24.b\n"
+ ".inst 0x45989876 // usmmla z22.s, z3.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x4599982b // usmmla z11.s, z1.b, z25.b\n"
+ ".inst 0x45999873 // usmmla z19.s, z3.b, z25.b\n"
+ ".inst 0x4598982f // usmmla z15.s, z1.b, z24.b\n"
+ ".inst 0x45989877 // usmmla z23.s, z3.b, z24.b\n"
+ "32:" // Height 3: Multiply loop: multiply skip
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 27b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z27.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z26.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z25.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "uzp1 z24.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z27.s }, p4, [x9]\n"
+ "uzp1 z16.d, z16.d, z20.d\n"
+ "uzp1 z17.d, z17.d, z21.d\n"
+ "st1w { z26.s }, p3, [x9, #1, MUL VL]\n"
+ "uzp1 z18.d, z18.d, z22.d\n"
+ "uzp1 z19.d, z19.d, z23.d\n"
+ "st1w { z25.s }, p2, [x9, #2, MUL VL]\n"
+ "st1w { z24.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x21]\n"
+ "st1w { z9.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x20]\n"
+ "st1w { z17.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x20, #3, MUL VL]\n"
+ "33:" // Height 3: Writeback done
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
+ "bgt 24b\n"
+ "b 68f\n"
+ "34:" // Height 4
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "35:" // Height 4: Column loop
+ "mov x20, #0x0\n"
+ "whilelt p4.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p3.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p2.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p1.s, x20, x11\n"
+ "tbz %x[flags], #0, 36f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x22]\n"
+ "ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x21]\n"
+ "ld1w { z18.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
+ "ld1w { z20.s }, p4/Z, [x20]\n"
+ "ld1w { z21.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
+ "ld1w { z22.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "b 37f\n"
+ "36:" // Height 4: no accumulate
+ "mov z8.s, #0x0\n"
+ "mov z9.s, #0x0\n"
+ "mov z10.s, #0x0\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "mov z15.s, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "37:" // Height 4: setup done
+ "mov x28, #0x0\n"
+ "38:" // Height 4: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 39f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "cbnz x28, 40f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "b 40f\n"
+ "39:" // Height 4: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "40:" // Height 4: input setup done
+ "cmp x27, #0x10\n"
+ "ble 42f\n"
+ "41:" // Height 4: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z31.b }, p5/Z, [x10]\n"
+ "ld1b { z30.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z29.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqb { z28.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z27.d, z29.d, z25.d\n"
+ "trn2 z29.d, z29.d, z25.d\n"
+ "trn1 z26.d, z28.d, z24.d\n"
+ "trn2 z28.d, z28.d, z24.d\n"
+ ".inst 0x459f9b68 // usmmla z8.s, z27.b, z31.b\n"
+ ".inst 0x459e9b6c // usmmla z12.s, z27.b, z30.b\n"
+ ".inst 0x459f9b50 // usmmla z16.s, z26.b, z31.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x459e9b54 // usmmla z20.s, z26.b, z30.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45999b69 // usmmla z9.s, z27.b, z25.b\n"
+ ".inst 0x45999b51 // usmmla z17.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45989b6d // usmmla z13.s, z27.b, z24.b\n"
+ ".inst 0x45989b55 // usmmla z21.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45999b6a // usmmla z10.s, z27.b, z25.b\n"
+ ".inst 0x45999b52 // usmmla z18.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45989b6e // usmmla z14.s, z27.b, z24.b\n"
+ ".inst 0x45989b56 // usmmla z22.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x45999b6b // usmmla z11.s, z27.b, z25.b\n"
+ ".inst 0x45999b53 // usmmla z19.s, z26.b, z25.b\n"
+ ".inst 0x45989b6f // usmmla z15.s, z27.b, z24.b\n"
+ ".inst 0x45989b57 // usmmla z23.s, z26.b, z24.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45999ba8 // usmmla z8.s, z29.b, z25.b\n"
+ ".inst 0x45999b90 // usmmla z16.s, z28.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45989bac // usmmla z12.s, z29.b, z24.b\n"
+ ".inst 0x45989b94 // usmmla z20.s, z28.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45999ba9 // usmmla z9.s, z29.b, z25.b\n"
+ ".inst 0x45999b91 // usmmla z17.s, z28.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45989bad // usmmla z13.s, z29.b, z24.b\n"
+ ".inst 0x45989b95 // usmmla z21.s, z28.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45999baa // usmmla z10.s, z29.b, z25.b\n"
+ ".inst 0x45999b92 // usmmla z18.s, z28.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45989bae // usmmla z14.s, z29.b, z24.b\n"
+ ".inst 0x45989b96 // usmmla z22.s, z28.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x45999bab // usmmla z11.s, z29.b, z25.b\n"
+ ".inst 0x45999b93 // usmmla z19.s, z28.b, z25.b\n"
+ ".inst 0x45989baf // usmmla z15.s, z29.b, z24.b\n"
+ ".inst 0x45989b97 // usmmla z23.s, z28.b, z24.b\n"
+ "bgt 41b\n"
+ "42:" // Height 4: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "trn1 z27.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ "trn1 z26.d, z3.d, z24.d\n"
+ ".inst 0x459d9b68 // usmmla z8.s, z27.b, z29.b\n"
+ ".inst 0x459c9b6c // usmmla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z24.d\n"
+ ".inst 0x459d9b50 // usmmla z16.s, z26.b, z29.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x459c9b54 // usmmla z20.s, z26.b, z28.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45999b69 // usmmla z9.s, z27.b, z25.b\n"
+ ".inst 0x45999b51 // usmmla z17.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45989b6d // usmmla z13.s, z27.b, z24.b\n"
+ ".inst 0x45989b55 // usmmla z21.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45999b6a // usmmla z10.s, z27.b, z25.b\n"
+ ".inst 0x45999b52 // usmmla z18.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45989b6e // usmmla z14.s, z27.b, z24.b\n"
+ ".inst 0x45989b56 // usmmla z22.s, z26.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x45999b6b // usmmla z11.s, z27.b, z25.b\n"
+ ".inst 0x45999b53 // usmmla z19.s, z26.b, z25.b\n"
+ ".inst 0x45989b6f // usmmla z15.s, z27.b, z24.b\n"
+ ".inst 0x45989b57 // usmmla z23.s, z26.b, z24.b\n"
+ "ble 43f\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45999828 // usmmla z8.s, z1.b, z25.b\n"
+ ".inst 0x45999870 // usmmla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4598982c // usmmla z12.s, z1.b, z24.b\n"
+ ".inst 0x45989874 // usmmla z20.s, z3.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45999829 // usmmla z9.s, z1.b, z25.b\n"
+ ".inst 0x45999871 // usmmla z17.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4598982d // usmmla z13.s, z1.b, z24.b\n"
+ ".inst 0x45989875 // usmmla z21.s, z3.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4599982a // usmmla z10.s, z1.b, z25.b\n"
+ ".inst 0x45999872 // usmmla z18.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4598982e // usmmla z14.s, z1.b, z24.b\n"
+ ".inst 0x45989876 // usmmla z22.s, z3.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x4599982b // usmmla z11.s, z1.b, z25.b\n"
+ ".inst 0x45999873 // usmmla z19.s, z3.b, z25.b\n"
+ ".inst 0x4598982f // usmmla z15.s, z1.b, z24.b\n"
+ ".inst 0x45989877 // usmmla z23.s, z3.b, z24.b\n"
+ "43:" // Height 4: Multiply loop: multiply skip
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 38b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z25.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z24.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z27.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "uzp1 z26.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z25.s }, p4, [x9]\n"
+ "uzp1 z25.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z24.s }, p3, [x9, #1, MUL VL]\n"
+ "uzp1 z24.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z27.s }, p2, [x9, #2, MUL VL]\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z26.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "uzp1 z20.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x22]\n"
+ "st1w { z9.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z25.s }, p4, [x21]\n"
+ "st1w { z24.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z20.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x20]\n"
+ "st1w { z17.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x20, #3, MUL VL]\n"
+ "44:" // Height 4: Writeback done
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
+ "bgt 35b\n"
+ "b 68f\n"
+ "45:" // Height 5
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "46:" // Height 5: Column loop
+ "mov x20, #0x0\n"
+ "whilelt p4.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p3.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p2.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p1.s, x20, x11\n"
+ "tbz %x[flags], #0, 47f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x23]\n"
+ "ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
+ "ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x22]\n"
+ "ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
+ "ld1w { z20.s }, p4/Z, [x21]\n"
+ "ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
+ "ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "ld1w { z25.s }, p4/Z, [x20]\n"
+ "ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
+ "zip2 z28.d, z25.d, z28.d\n"
+ "zip1 z25.d, z26.d, z29.d\n"
+ "zip2 z29.d, z26.d, z29.d\n"
+ "zip1 z26.d, z27.d, z30.d\n"
+ "zip2 z30.d, z27.d, z30.d\n"
+ "zip1 z27.d, z0.d, z31.d\n"
+ "zip2 z31.d, z0.d, z31.d\n"
+ "b 48f\n"
+ "47:" // Height 5: no accumulate
+ "mov z8.s, #0x0\n"
+ "mov z9.s, #0x0\n"
+ "mov z10.s, #0x0\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "mov z15.s, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov z24.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z28.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "mov z30.s, #0x0\n"
+ "mov z31.s, #0x0\n"
+ "48:" // Height 5: setup done
+ "mov x28, #0x0\n"
+ "49:" // Height 5: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 50f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "cbnz x28, 51f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "b 51f\n"
+ "50:" // Height 5: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "51:" // Height 5: input setup done
+ "cmp x27, #0x10\n"
+ "ble 53f\n"
+ "52:" // Height 5: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p5/Z, [x10]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z6.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqb { z7.b }, p0/Z, [x24]\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
+ "trn1 z3.d, z7.d, z2.d\n"
+ "trn2 z7.d, z7.d, z2.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45819888 // usmmla z8.s, z4.b, z1.b\n"
+ ".inst 0x45819870 // usmmla z16.s, z3.b, z1.b\n"
+ ".inst 0x45819858 // usmmla z24.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4580988c // usmmla z12.s, z4.b, z0.b\n"
+ ".inst 0x45809874 // usmmla z20.s, z3.b, z0.b\n"
+ ".inst 0x4580985c // usmmla z28.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45819889 // usmmla z9.s, z4.b, z1.b\n"
+ ".inst 0x45819871 // usmmla z17.s, z3.b, z1.b\n"
+ ".inst 0x45819859 // usmmla z25.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4580988d // usmmla z13.s, z4.b, z0.b\n"
+ ".inst 0x45809875 // usmmla z21.s, z3.b, z0.b\n"
+ ".inst 0x4580985d // usmmla z29.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4581988a // usmmla z10.s, z4.b, z1.b\n"
+ ".inst 0x45819872 // usmmla z18.s, z3.b, z1.b\n"
+ ".inst 0x4581985a // usmmla z26.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4580988e // usmmla z14.s, z4.b, z0.b\n"
+ ".inst 0x45809876 // usmmla z22.s, z3.b, z0.b\n"
+ ".inst 0x4580985e // usmmla z30.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x4581988b // usmmla z11.s, z4.b, z1.b\n"
+ ".inst 0x45819873 // usmmla z19.s, z3.b, z1.b\n"
+ ".inst 0x4581985b // usmmla z27.s, z2.b, z1.b\n"
+ ".inst 0x4580988f // usmmla z15.s, z4.b, z0.b\n"
+ ".inst 0x45809877 // usmmla z23.s, z3.b, z0.b\n"
+ ".inst 0x4580985f // usmmla z31.s, z2.b, z0.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x458198c8 // usmmla z8.s, z6.b, z1.b\n"
+ ".inst 0x458198f0 // usmmla z16.s, z7.b, z1.b\n"
+ ".inst 0x458198b8 // usmmla z24.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x458098cc // usmmla z12.s, z6.b, z0.b\n"
+ ".inst 0x458098f4 // usmmla z20.s, z7.b, z0.b\n"
+ ".inst 0x458098bc // usmmla z28.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x458198c9 // usmmla z9.s, z6.b, z1.b\n"
+ ".inst 0x458198f1 // usmmla z17.s, z7.b, z1.b\n"
+ ".inst 0x458198b9 // usmmla z25.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x458098cd // usmmla z13.s, z6.b, z0.b\n"
+ ".inst 0x458098f5 // usmmla z21.s, z7.b, z0.b\n"
+ ".inst 0x458098bd // usmmla z29.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x458198ca // usmmla z10.s, z6.b, z1.b\n"
+ ".inst 0x458198f2 // usmmla z18.s, z7.b, z1.b\n"
+ ".inst 0x458198ba // usmmla z26.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x458098ce // usmmla z14.s, z6.b, z0.b\n"
+ ".inst 0x458098f6 // usmmla z22.s, z7.b, z0.b\n"
+ ".inst 0x458098be // usmmla z30.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x458198cb // usmmla z11.s, z6.b, z1.b\n"
+ ".inst 0x458198f3 // usmmla z19.s, z7.b, z1.b\n"
+ ".inst 0x458198bb // usmmla z27.s, z5.b, z1.b\n"
+ ".inst 0x458098cf // usmmla z15.s, z6.b, z0.b\n"
+ ".inst 0x458098f7 // usmmla z23.s, z7.b, z0.b\n"
+ ".inst 0x458098bf // usmmla z31.s, z5.b, z0.b\n"
+ "bgt 52b\n"
+ "53:" // Height 5: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
+ "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ "trn1 z4.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x458298e8 // usmmla z8.s, z7.b, z2.b\n"
+ ".inst 0x458298d0 // usmmla z16.s, z6.b, z2.b\n"
+ ".inst 0x45829898 // usmmla z24.s, z4.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x458098ec // usmmla z12.s, z7.b, z0.b\n"
+ ".inst 0x458098d4 // usmmla z20.s, z6.b, z0.b\n"
+ ".inst 0x4580989c // usmmla z28.s, z4.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x458298e9 // usmmla z9.s, z7.b, z2.b\n"
+ ".inst 0x458298d1 // usmmla z17.s, z6.b, z2.b\n"
+ ".inst 0x45829899 // usmmla z25.s, z4.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x458098ed // usmmla z13.s, z7.b, z0.b\n"
+ ".inst 0x458098d5 // usmmla z21.s, z6.b, z0.b\n"
+ ".inst 0x4580989d // usmmla z29.s, z4.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x458298ea // usmmla z10.s, z7.b, z2.b\n"
+ ".inst 0x458298d2 // usmmla z18.s, z6.b, z2.b\n"
+ ".inst 0x4582989a // usmmla z26.s, z4.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x458098ee // usmmla z14.s, z7.b, z0.b\n"
+ ".inst 0x458098d6 // usmmla z22.s, z6.b, z0.b\n"
+ ".inst 0x4580989e // usmmla z30.s, z4.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x458298eb // usmmla z11.s, z7.b, z2.b\n"
+ ".inst 0x458298d3 // usmmla z19.s, z6.b, z2.b\n"
+ ".inst 0x4582989b // usmmla z27.s, z4.b, z2.b\n"
+ ".inst 0x458098ef // usmmla z15.s, z7.b, z0.b\n"
+ ".inst 0x458098d7 // usmmla z23.s, z6.b, z0.b\n"
+ ".inst 0x4580989f // usmmla z31.s, z4.b, z0.b\n"
+ "ble 54f\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45829828 // usmmla z8.s, z1.b, z2.b\n"
+ ".inst 0x45829870 // usmmla z16.s, z3.b, z2.b\n"
+ ".inst 0x458298b8 // usmmla z24.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4580982c // usmmla z12.s, z1.b, z0.b\n"
+ ".inst 0x45809874 // usmmla z20.s, z3.b, z0.b\n"
+ ".inst 0x458098bc // usmmla z28.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45829829 // usmmla z9.s, z1.b, z2.b\n"
+ ".inst 0x45829871 // usmmla z17.s, z3.b, z2.b\n"
+ ".inst 0x458298b9 // usmmla z25.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4580982d // usmmla z13.s, z1.b, z0.b\n"
+ ".inst 0x45809875 // usmmla z21.s, z3.b, z0.b\n"
+ ".inst 0x458098bd // usmmla z29.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4582982a // usmmla z10.s, z1.b, z2.b\n"
+ ".inst 0x45829872 // usmmla z18.s, z3.b, z2.b\n"
+ ".inst 0x458298ba // usmmla z26.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4580982e // usmmla z14.s, z1.b, z0.b\n"
+ ".inst 0x45809876 // usmmla z22.s, z3.b, z0.b\n"
+ ".inst 0x458098be // usmmla z30.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x4582982b // usmmla z11.s, z1.b, z2.b\n"
+ ".inst 0x45829873 // usmmla z19.s, z3.b, z2.b\n"
+ ".inst 0x458298bb // usmmla z27.s, z5.b, z2.b\n"
+ ".inst 0x4580982f // usmmla z15.s, z1.b, z0.b\n"
+ ".inst 0x45809877 // usmmla z23.s, z3.b, z0.b\n"
+ ".inst 0x458098bf // usmmla z31.s, z5.b, z0.b\n"
+ "54:" // Height 5: Multiply loop: multiply skip
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 49b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z1.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z0.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z3.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "uzp1 z2.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z1.s }, p4, [x9]\n"
+ "add x20, x21, x20, LSL #2\n"
+ "uzp1 z1.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z0.s }, p3, [x9, #1, MUL VL]\n"
+ "uzp1 z0.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z3.s }, p2, [x9, #2, MUL VL]\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z2.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "uzp1 z20.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
+ "uzp1 z24.d, z24.d, z28.d\n"
+ "uzp1 z25.d, z25.d, z29.d\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
+ "uzp1 z26.d, z26.d, z30.d\n"
+ "uzp1 z27.d, z27.d, z31.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z1.s }, p4, [x22]\n"
+ "st1w { z0.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z20.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x21]\n"
+ "st1w { z17.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
+ "55:" // Height 5: Writeback done
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
+ "bgt 46b\n"
+ "b 68f\n"
+ "56:" // Height 6
+ "ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "mov x20, #0x18\n"
+ "ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
+ "ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
+ "57:" // Height 6: Column loop
+ "mov x20, #0x0\n"
+ "whilelt p4.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p3.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p2.s, x20, x11\n"
+ "incw x20\n"
+ "whilelt p1.s, x20, x11\n"
+ "tbz %x[flags], #0, 58f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "ld1w { z12.s }, p4/Z, [x24]\n"
+ "ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z17.s }, p4/Z, [x23]\n"
+ "ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
+ "ld1w { z20.s }, p4/Z, [x22]\n"
+ "ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
+ "ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "ld1w { z25.s }, p4/Z, [x21]\n"
+ "ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "ld1w { z28.s }, p4/Z, [x20]\n"
+ "ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
+ "zip2 z28.d, z25.d, z28.d\n"
+ "zip1 z25.d, z26.d, z29.d\n"
+ "zip2 z29.d, z26.d, z29.d\n"
+ "zip1 z26.d, z27.d, z30.d\n"
+ "zip2 z30.d, z27.d, z30.d\n"
+ "zip1 z27.d, z0.d, z31.d\n"
+ "zip2 z31.d, z0.d, z31.d\n"
+ "b 59f\n"
+ "58:" // Height 6: no accumulate
+ "mov z8.s, #0x0\n"
+ "mov z9.s, #0x0\n"
+ "mov z10.s, #0x0\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "mov z15.s, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "mov z21.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "mov z23.s, #0x0\n"
+ "mov z24.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z28.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "mov z30.s, #0x0\n"
+ "mov z31.s, #0x0\n"
+ "59:" // Height 6: setup done
+ "mov x28, #0x0\n"
+ "60:" // Height 6: String loop
+ "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
+ "tbz %x[flags], #3, 61f\n"
+ "ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
+ "add x20, x20, x21, LSL #3\n"
+ "ldr x26, [x20, #0x0]\n"
+ "ldr x25, [x20, #0x8]\n"
+ "ldr x24, [x20, #0x10]\n"
+ "ldr x23, [x20, #0x18]\n"
+ "ldr x22, [x20, #0x20]\n"
+ "ldr x21, [x20, #0x28]\n"
+ "cbnz x28, 62f\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_input_initial_col]]\n"
+ "add x26, x26, x20\n"
+ "add x25, x25, x20\n"
+ "add x24, x24, x20\n"
+ "add x23, x23, x20\n"
+ "add x22, x22, x20\n"
+ "add x21, x21, x20\n"
+ "b 62f\n"
+ "61:" // Height 6: setup direct input
+ "mov x26, %x[input_ptr]\n"
+ "add x25, x26, x21\n"
+ "add x24, x25, x21\n"
+ "add x23, x24, x21\n"
+ "add x22, x23, x21\n"
+ "add x21, x22, x21\n"
+ "62:" // Height 6: input setup done
+ "cmp x27, #0x10\n"
+ "ble 64f\n"
+ "63:" // Height 6: Multiply loop: Main loop head
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p5/Z, [x10]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z6.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqb { z7.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z0.b }, p0/Z, [x21]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z3.d, z7.d, z2.d\n"
+ "trn2 z7.d, z7.d, z2.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45819888 // usmmla z8.s, z4.b, z1.b\n"
+ ".inst 0x45819870 // usmmla z16.s, z3.b, z1.b\n"
+ ".inst 0x45819858 // usmmla z24.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4580988c // usmmla z12.s, z4.b, z0.b\n"
+ ".inst 0x45809874 // usmmla z20.s, z3.b, z0.b\n"
+ ".inst 0x4580985c // usmmla z28.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45819889 // usmmla z9.s, z4.b, z1.b\n"
+ ".inst 0x45819871 // usmmla z17.s, z3.b, z1.b\n"
+ ".inst 0x45819859 // usmmla z25.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4580988d // usmmla z13.s, z4.b, z0.b\n"
+ ".inst 0x45809875 // usmmla z21.s, z3.b, z0.b\n"
+ ".inst 0x4580985d // usmmla z29.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4581988a // usmmla z10.s, z4.b, z1.b\n"
+ ".inst 0x45819872 // usmmla z18.s, z3.b, z1.b\n"
+ ".inst 0x4581985a // usmmla z26.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4580988e // usmmla z14.s, z4.b, z0.b\n"
+ ".inst 0x45809876 // usmmla z22.s, z3.b, z0.b\n"
+ ".inst 0x4580985e // usmmla z30.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x4581988b // usmmla z11.s, z4.b, z1.b\n"
+ ".inst 0x45819873 // usmmla z19.s, z3.b, z1.b\n"
+ ".inst 0x4581985b // usmmla z27.s, z2.b, z1.b\n"
+ ".inst 0x4580988f // usmmla z15.s, z4.b, z0.b\n"
+ ".inst 0x45809877 // usmmla z23.s, z3.b, z0.b\n"
+ ".inst 0x4580985f // usmmla z31.s, z2.b, z0.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x458198c8 // usmmla z8.s, z6.b, z1.b\n"
+ ".inst 0x458198f0 // usmmla z16.s, z7.b, z1.b\n"
+ ".inst 0x458198b8 // usmmla z24.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x458098cc // usmmla z12.s, z6.b, z0.b\n"
+ ".inst 0x458098f4 // usmmla z20.s, z7.b, z0.b\n"
+ ".inst 0x458098bc // usmmla z28.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x458198c9 // usmmla z9.s, z6.b, z1.b\n"
+ ".inst 0x458198f1 // usmmla z17.s, z7.b, z1.b\n"
+ ".inst 0x458198b9 // usmmla z25.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x458098cd // usmmla z13.s, z6.b, z0.b\n"
+ ".inst 0x458098f5 // usmmla z21.s, z7.b, z0.b\n"
+ ".inst 0x458098bd // usmmla z29.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x458198ca // usmmla z10.s, z6.b, z1.b\n"
+ ".inst 0x458198f2 // usmmla z18.s, z7.b, z1.b\n"
+ ".inst 0x458198ba // usmmla z26.s, z5.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x458098ce // usmmla z14.s, z6.b, z0.b\n"
+ ".inst 0x458098f6 // usmmla z22.s, z7.b, z0.b\n"
+ ".inst 0x458098be // usmmla z30.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #-1, MUL VL]\n"
+ ".inst 0x458198cb // usmmla z11.s, z6.b, z1.b\n"
+ ".inst 0x458198f3 // usmmla z19.s, z7.b, z1.b\n"
+ ".inst 0x458198bb // usmmla z27.s, z5.b, z1.b\n"
+ ".inst 0x458098cf // usmmla z15.s, z6.b, z0.b\n"
+ ".inst 0x458098f7 // usmmla z23.s, z7.b, z0.b\n"
+ ".inst 0x458098bf // usmmla z31.s, z5.b, z0.b\n"
+ "bgt 63b\n"
+ "64:" // Height 6: Multiply loop: Single iteration only
+ "whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
+ "ld1rqb { z3.b }, p0/Z, [x24]\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z0.b }, p0/Z, [x21]\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
+ "trn1 z4.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x458298e8 // usmmla z8.s, z7.b, z2.b\n"
+ ".inst 0x458298d0 // usmmla z16.s, z6.b, z2.b\n"
+ ".inst 0x45829898 // usmmla z24.s, z4.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x458098ec // usmmla z12.s, z7.b, z0.b\n"
+ ".inst 0x458098d4 // usmmla z20.s, z6.b, z0.b\n"
+ ".inst 0x4580989c // usmmla z28.s, z4.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x458298e9 // usmmla z9.s, z7.b, z2.b\n"
+ ".inst 0x458298d1 // usmmla z17.s, z6.b, z2.b\n"
+ ".inst 0x45829899 // usmmla z25.s, z4.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x458098ed // usmmla z13.s, z7.b, z0.b\n"
+ ".inst 0x458098d5 // usmmla z21.s, z6.b, z0.b\n"
+ ".inst 0x4580989d // usmmla z29.s, z4.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x458298ea // usmmla z10.s, z7.b, z2.b\n"
+ ".inst 0x458298d2 // usmmla z18.s, z6.b, z2.b\n"
+ ".inst 0x4582989a // usmmla z26.s, z4.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x458098ee // usmmla z14.s, z7.b, z0.b\n"
+ ".inst 0x458098d6 // usmmla z22.s, z6.b, z0.b\n"
+ ".inst 0x4580989e // usmmla z30.s, z4.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x458298eb // usmmla z11.s, z7.b, z2.b\n"
+ ".inst 0x458298d3 // usmmla z19.s, z6.b, z2.b\n"
+ ".inst 0x4582989b // usmmla z27.s, z4.b, z2.b\n"
+ ".inst 0x458098ef // usmmla z15.s, z7.b, z0.b\n"
+ ".inst 0x458098d7 // usmmla z23.s, z6.b, z0.b\n"
+ ".inst 0x4580989f // usmmla z31.s, z4.b, z0.b\n"
+ "ble 65f\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45829828 // usmmla z8.s, z1.b, z2.b\n"
+ ".inst 0x45829870 // usmmla z16.s, z3.b, z2.b\n"
+ ".inst 0x458298b8 // usmmla z24.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x4580982c // usmmla z12.s, z1.b, z0.b\n"
+ ".inst 0x45809874 // usmmla z20.s, z3.b, z0.b\n"
+ ".inst 0x458098bc // usmmla z28.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45829829 // usmmla z9.s, z1.b, z2.b\n"
+ ".inst 0x45829871 // usmmla z17.s, z3.b, z2.b\n"
+ ".inst 0x458298b9 // usmmla z25.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x4580982d // usmmla z13.s, z1.b, z0.b\n"
+ ".inst 0x45809875 // usmmla z21.s, z3.b, z0.b\n"
+ ".inst 0x458098bd // usmmla z29.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x4582982a // usmmla z10.s, z1.b, z2.b\n"
+ ".inst 0x45829872 // usmmla z18.s, z3.b, z2.b\n"
+ ".inst 0x458298ba // usmmla z26.s, z5.b, z2.b\n"
+ "ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x4580982e // usmmla z14.s, z1.b, z0.b\n"
+ ".inst 0x45809876 // usmmla z22.s, z3.b, z0.b\n"
+ ".inst 0x458098be // usmmla z30.s, z5.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
+ ".inst 0x4582982b // usmmla z11.s, z1.b, z2.b\n"
+ ".inst 0x45829873 // usmmla z19.s, z3.b, z2.b\n"
+ ".inst 0x458298bb // usmmla z27.s, z5.b, z2.b\n"
+ ".inst 0x4580982f // usmmla z15.s, z1.b, z0.b\n"
+ ".inst 0x45809877 // usmmla z23.s, z3.b, z0.b\n"
+ ".inst 0x458098bf // usmmla z31.s, z5.b, z0.b\n"
+ "65:" // Height 6: Multiply loop: multiply skip
+ "ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
+ "add x28, x28, #0x1\n"
+ "cmp x28, x20\n"
+ "bne 60b\n"
+ "ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "uzp1 z0.d, z8.d, z12.d\n"
+ "uzp2 z8.d, z8.d, z12.d\n"
+ "uzp1 z12.d, z9.d, z13.d\n"
+ "uzp2 z9.d, z9.d, z13.d\n"
+ "uzp1 z13.d, z10.d, z14.d\n"
+ "uzp2 z10.d, z10.d, z14.d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "uzp1 z14.d, z11.d, z15.d\n"
+ "uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z0.s }, p4, [x9]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "uzp1 z15.d, z16.d, z20.d\n"
+ "uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
+ "uzp1 z20.d, z17.d, z21.d\n"
+ "uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
+ "uzp1 z21.d, z18.d, z22.d\n"
+ "uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "uzp1 z22.d, z19.d, z23.d\n"
+ "uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x24]\n"
+ "uzp1 z23.d, z24.d, z28.d\n"
+ "uzp2 z24.d, z24.d, z28.d\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
+ "uzp1 z28.d, z25.d, z29.d\n"
+ "uzp2 z25.d, z25.d, z29.d\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
+ "uzp1 z29.d, z26.d, z30.d\n"
+ "uzp2 z26.d, z26.d, z30.d\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
+ "uzp1 z30.d, z27.d, z31.d\n"
+ "uzp2 z27.d, z27.d, z31.d\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
+ "st1w { z16.s }, p4, [x22]\n"
+ "st1w { z17.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z18.s }, p2, [x22, #2, MUL VL]\n"
+ "st1w { z19.s }, p1, [x22, #3, MUL VL]\n"
+ "st1w { z23.s }, p4, [x21]\n"
+ "st1w { z28.s }, p3, [x21, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z30.s }, p1, [x21, #3, MUL VL]\n"
+ "st1w { z24.s }, p4, [x20]\n"
+ "st1w { z25.s }, p3, [x20, #1, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z27.s }, p1, [x20, #3, MUL VL]\n"
+ "66:" // Height 6: Writeback done
+ "decw x11, ALL, MUL #4\n"
+ "cmp x11, XZR\n"
+ "bgt 57b\n"
+ "subs %x[M], %x[M], #0x6\n"
+ "beq 68f\n"
+ "ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "tbz %x[flags], #3, 67f\n"
+ "add x21, x21, #0x6\n"
+ "str x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "b 1b\n"
+ "67:" // Update direct input
+ "mov x20, #0x6\n"
+ "madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
+ "b 1b\n"
+ "68:" // Exit
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp
index e9197e8ec5..12e99fb526 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -71,7 +71,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 4, 4> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 4, 4> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp
index 4d0f44982a..3f074fad7d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -89,7 +89,7 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"beq 11f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -114,8 +114,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -134,14 +134,14 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop
"udot z8.s, z6.b, z0.b\n"
- "udot z9.s, z7.b, z0.b\n"
"ld1b { z17.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z7.b, z0.b\n"
"ld1b { z16.b }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
"add x26, x26, #0x4\n"
+ "subs x27, x27, #0x4\n"
"udot z10.s, z17.b, z0.b\n"
"udot z11.s, z16.b, z0.b\n"
- "subs x27, x27, #0x4\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
@@ -149,14 +149,14 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"9:" // Height 1: Multiply loop: Main loop skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"udot z8.s, z6.b, z0.b\n"
- "udot z9.s, z7.b, z0.b\n"
"ld1b { z17.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z7.b, z0.b\n"
"ld1b { z16.b }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"udot z10.s, z17.b, z0.b\n"
"udot z11.s, z16.b, z0.b\n"
- "addvl x10, x10, #4\n"
"bne 5b\n"
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
@@ -171,7 +171,7 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"11:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"12:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -183,11 +183,11 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 13f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x20]\n"
"ld1w { z13.s }, p2/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x20, #2, MUL VL]\n"
@@ -206,8 +206,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"15:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 16f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -242,8 +242,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z10.s, z17.b, z0.b\n"
"udot z14.s, z17.b, z1.b\n"
"udot z11.s, z16.b, z0.b\n"
- "udot z15.s, z16.b, z1.b\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
+ "udot z15.s, z16.b, z1.b\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
"ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
@@ -257,18 +257,18 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z13.s, z7.b, z1.b\n"
"ld1b { z16.b }, p4/Z, [x10, #3, MUL VL]\n"
"add x28, x28, #0x1\n"
+ "addvl x10, x10, #4\n"
"cmp x28, x20\n"
"udot z10.s, z17.b, z0.b\n"
"udot z14.s, z17.b, z1.b\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z16.b, z0.b\n"
"udot z15.s, z16.b, z1.b\n"
"bne 15b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p3, [x20]\n"
@@ -283,7 +283,7 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"21:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"22:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -295,12 +295,12 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 23f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x21]\n"
"ld1w { z13.s }, p2/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x21, #2, MUL VL]\n"
@@ -327,8 +327,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"25:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 26f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -359,8 +359,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"add x26, x26, #0x4\n"
"subs x27, x27, #0x4\n"
"udot z16.s, z6.b, z2.b\n"
- "udot z9.s, z7.b, z0.b\n"
"ld1b { z21.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z7.b, z0.b\n"
"add x25, x25, #0x4\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
@@ -372,11 +372,11 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z18.s, z21.b, z2.b\n"
"udot z11.s, z20.b, z0.b\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
"udot z15.s, z20.b, z1.b\n"
- "udot z19.s, z20.b, z2.b\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
+ "udot z19.s, z20.b, z2.b\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
+ "ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 28b\n"
"29:" // Height 3: Multiply loop: Main loop skip
@@ -385,13 +385,13 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z12.s, z6.b, z1.b\n"
"add x28, x28, #0x1\n"
"udot z16.s, z6.b, z2.b\n"
- "udot z9.s, z7.b, z0.b\n"
"ld1b { z21.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
+ "udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
"ld1b { z20.b }, p4/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"udot z10.s, z21.b, z0.b\n"
"udot z14.s, z21.b, z1.b\n"
"udot z18.s, z21.b, z2.b\n"
@@ -400,11 +400,11 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z19.s, z20.b, z2.b\n"
"bne 25b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p3, [x21]\n"
@@ -423,7 +423,7 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"31:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"32:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -435,13 +435,13 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 33f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x22]\n"
"ld1w { z13.s }, p2/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p1/Z, [x22, #2, MUL VL]\n"
@@ -476,8 +476,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"35:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 36f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -527,7 +527,6 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z14.s, z25.b, z1.b\n"
"udot z18.s, z25.b, z2.b\n"
"udot z22.s, z25.b, z3.b\n"
- "ld1b { z6.b }, p4/Z, [x10]\n"
"udot z11.s, z24.b, z0.b\n"
"udot z15.s, z24.b, z1.b\n"
"ld1rw { z0.s }, p4/Z, [x26]\n"
@@ -536,6 +535,7 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z23.s, z24.b, z3.b\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
+ "ld1b { z6.b }, p4/Z, [x10]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 38b\n"
"39:" // Height 4: Multiply loop: Main loop skip
@@ -546,15 +546,15 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
"ld1b { z25.b }, p4/Z, [x10, #2, MUL VL]\n"
- "cmp x28, x20\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
"udot z21.s, z7.b, z3.b\n"
"ld1b { z24.b }, p4/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
+ "cmp x28, x20\n"
"udot z10.s, z25.b, z0.b\n"
"udot z14.s, z25.b, z1.b\n"
+ "addvl x10, x10, #4\n"
"udot z18.s, z25.b, z2.b\n"
"udot z22.s, z25.b, z3.b\n"
"udot z11.s, z24.b, z0.b\n"
@@ -563,12 +563,12 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z23.s, z24.b, z3.b\n"
"bne 35b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p3, [x22]\n"
@@ -591,7 +591,7 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"41:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"42:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -603,16 +603,16 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 43f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x23]\n"
"ld1w { z13.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x22]\n"
@@ -653,8 +653,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"45:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 46f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -697,8 +697,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"add x25, x25, #0x4\n"
"add x24, x24, #0x4\n"
"udot z24.s, z6.b, z4.b\n"
- "udot z9.s, z7.b, z0.b\n"
"ld1b { z29.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z7.b, z0.b\n"
"add x23, x23, #0x4\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
@@ -716,12 +716,12 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"ld1rw { z0.s }, p4/Z, [x26]\n"
"ld1b { z6.b }, p4/Z, [x10]\n"
"udot z15.s, z28.b, z1.b\n"
- "udot z19.s, z28.b, z2.b\n"
"ld1rw { z1.s }, p4/Z, [x25]\n"
+ "udot z19.s, z28.b, z2.b\n"
"ld1rw { z2.s }, p4/Z, [x24]\n"
"udot z23.s, z28.b, z3.b\n"
- "udot z27.s, z28.b, z4.b\n"
"ld1rw { z3.s }, p4/Z, [x23]\n"
+ "udot z27.s, z28.b, z4.b\n"
"ld1rw { z4.s }, p4/Z, [x22]\n"
"ld1b { z7.b }, p4/Z, [x10, #1, MUL VL]\n"
"bgt 48b\n"
@@ -732,12 +732,12 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
"udot z24.s, z6.b, z4.b\n"
- "udot z9.s, z7.b, z0.b\n"
"ld1b { z29.b }, p4/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
"udot z17.s, z7.b, z2.b\n"
+ "cmp x28, x20\n"
"udot z21.s, z7.b, z3.b\n"
"udot z25.s, z7.b, z4.b\n"
"ld1b { z28.b }, p4/Z, [x10, #3, MUL VL]\n"
@@ -754,15 +754,15 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z27.s, z28.b, z4.b\n"
"bne 45b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z12.s }, p3, [x23]\n"
"st1w { z13.s }, p2, [x23, #1, MUL VL]\n"
"st1w { z14.s }, p1, [x23, #2, MUL VL]\n"
@@ -786,11 +786,12 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"b 62f\n"
"51:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"52:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p3.s, x20, x11\n"
@@ -802,17 +803,17 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"whilelt p0.s, x20, x11\n"
"tbz %x[flags], #0, 53f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p3/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p2/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p1/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p0/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p3/Z, [x24]\n"
"ld1w { z13.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p1/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p0/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p3/Z, [x23]\n"
@@ -861,8 +862,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"mov x28, #0x0\n"
"55:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 56f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -950,12 +951,12 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"add x28, x28, #0x1\n"
"udot z16.s, z6.b, z2.b\n"
"udot z20.s, z6.b, z3.b\n"
- "cmp x28, x20\n"
"udot z24.s, z6.b, z4.b\n"
"udot z28.s, z6.b, z5.b\n"
"ld1b { z6.b }, p4/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z7.b, z0.b\n"
"udot z13.s, z7.b, z1.b\n"
+ "cmp x28, x20\n"
"udot z17.s, z7.b, z2.b\n"
"udot z21.s, z7.b, z3.b\n"
"udot z25.s, z7.b, z4.b\n"
@@ -976,17 +977,17 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"udot z31.s, z7.b, z5.b\n"
"bne 55b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"st1w { z8.s }, p3, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z9.s }, p2, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z10.s }, p1, [x9, #2, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"st1w { z11.s }, p0, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z12.s }, p3, [x24]\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z13.s }, p2, [x24, #1, MUL VL]\n"
"st1w { z14.s }, p1, [x24, #2, MUL VL]\n"
"st1w { z15.s }, p0, [x24, #3, MUL VL]\n"
@@ -1022,8 +1023,8 @@ void sve_hybrid_u8u32_dot_6x4VL_a64fx (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"62:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp
index 7871c0b003..f2dee3c0bb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void sve_hybrid_u8u32_dot_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -89,7 +89,7 @@ void sve_hybrid_u8u32_dot_6x4VL (
"beq 12f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -114,8 +114,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov x28, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -131,89 +131,89 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z16.b }, p5/Z, [x10]\n"
- "udot z8.s, z16.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "udot z8.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p5/Z, [x10, #2, MUL VL]\n"
- "udot z10.s, z16.b, z0.b[0]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "udot z10.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
"udot z11.s, z16.b, z0.b[0]\n"
- "ld1b { z16.b }, p5/Z, [x10, #4, MUL VL]\n"
- "udot z8.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
+ "udot z8.s, z17.b, z0.b[1]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
"udot z9.s, z16.b, z0.b[1]\n"
- "ld1b { z16.b }, p5/Z, [x10, #6, MUL VL]\n"
- "udot z10.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
+ "udot z10.s, z17.b, z0.b[1]\n"
"udot z11.s, z16.b, z0.b[1]\n"
"ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
"udot z8.s, z17.b, z0.b[2]\n"
- "udot z9.s, z16.b, z0.b[2]\n"
"ld1b { z17.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "udot z9.s, z16.b, z0.b[2]\n"
"ld1b { z16.b }, p5/Z, [x10, #-5, MUL VL]\n"
"udot z10.s, z17.b, z0.b[2]\n"
- "udot z11.s, z16.b, z0.b[2]\n"
"ld1b { z17.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "udot z11.s, z16.b, z0.b[2]\n"
"ld1b { z16.b }, p5/Z, [x10, #-3, MUL VL]\n"
"udot z8.s, z17.b, z0.b[3]\n"
- "udot z9.s, z16.b, z0.b[3]\n"
"ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "udot z9.s, z16.b, z0.b[3]\n"
"ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
"udot z10.s, z17.b, z0.b[3]\n"
"udot z11.s, z16.b, z0.b[3]\n"
- "add x26, x26, #0x10\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1b { z16.b }, p5/Z, [x10]\n"
- "udot z8.s, z16.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
- "udot z9.s, z16.b, z0.b[0]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"subs x27, x27, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x26]\n"
+ "udot z8.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z16.b, z0.b[0]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[0]\n"
"udot z11.s, z16.b, z0.b[0]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z17.b, z0.b[1]\n"
- "udot z9.s, z16.b, z0.b[1]\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[1]\n"
"udot z11.s, z16.b, z0.b[1]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z17.b, z0.b[2]\n"
- "udot z9.s, z16.b, z0.b[2]\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z16.b, z0.b[2]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[2]\n"
"udot z11.s, z16.b, z0.b[2]\n"
- "addvl x10, x10, #4\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
"udot z8.s, z17.b, z0.b[3]\n"
- "udot z9.s, z16.b, z0.b[3]\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z16.b, z0.b[3]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[3]\n"
"udot z11.s, z16.b, z0.b[3]\n"
- "addvl x10, x10, #4\n"
"10:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -232,7 +232,7 @@ void sve_hybrid_u8u32_dot_6x4VL (
"12:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"13:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -244,11 +244,11 @@ void sve_hybrid_u8u32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 14f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
@@ -267,8 +267,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov x28, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -287,38 +287,38 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z0.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"udot z8.s, z17.b, z1.b[0]\n"
"udot z12.s, z17.b, z0.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z16.b, z1.b[0]\n"
"udot z13.s, z16.b, z0.b[0]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
"udot z10.s, z17.b, z1.b[0]\n"
"udot z14.s, z17.b, z0.b[0]\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
- "cmp x27, #0x10\n"
"udot z11.s, z16.b, z1.b[0]\n"
"udot z15.s, z16.b, z0.b[0]\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
- "add x26, x26, #0x10\n"
"udot z8.s, z17.b, z1.b[1]\n"
"udot z12.s, z17.b, z0.b[1]\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
- "add x25, x25, #0x10\n"
"udot z9.s, z16.b, z1.b[1]\n"
"udot z13.s, z16.b, z0.b[1]\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
"udot z10.s, z17.b, z1.b[1]\n"
"udot z14.s, z17.b, z0.b[1]\n"
- "ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z11.s, z16.b, z1.b[1]\n"
"udot z15.s, z16.b, z0.b[1]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
"udot z8.s, z17.b, z1.b[2]\n"
"udot z12.s, z17.b, z0.b[2]\n"
@@ -345,50 +345,50 @@ void sve_hybrid_u8u32_dot_6x4VL (
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z0.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
+ "ld1rqb { z0.b }, p0/Z, [x26]\n"
+ "ld1rqb { z1.b }, p0/Z, [x25]\n"
"udot z8.s, z17.b, z0.b[0]\n"
"udot z12.s, z17.b, z1.b[0]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z16.b, z0.b[0]\n"
"udot z13.s, z16.b, z1.b[0]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[0]\n"
"udot z14.s, z17.b, z1.b[0]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z16.b, z0.b[0]\n"
"udot z15.s, z16.b, z1.b[0]\n"
"ble 21f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z17.b, z0.b[1]\n"
"udot z12.s, z17.b, z1.b[1]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z16.b, z0.b[1]\n"
"udot z13.s, z16.b, z1.b[1]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[1]\n"
"udot z14.s, z17.b, z1.b[1]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z16.b, z0.b[1]\n"
"udot z15.s, z16.b, z1.b[1]\n"
"ble 21f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z17.b, z0.b[2]\n"
"udot z12.s, z17.b, z1.b[2]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z16.b, z0.b[2]\n"
"udot z13.s, z16.b, z1.b[2]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[2]\n"
"udot z14.s, z17.b, z1.b[2]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z16.b, z0.b[2]\n"
"udot z15.s, z16.b, z1.b[2]\n"
"ble 21f\n"
@@ -396,13 +396,13 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
"udot z8.s, z17.b, z0.b[3]\n"
"udot z12.s, z17.b, z1.b[3]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z16.b, z0.b[3]\n"
"udot z13.s, z16.b, z1.b[3]\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "addvl x10, x10, #4\n"
"udot z10.s, z17.b, z0.b[3]\n"
"udot z14.s, z17.b, z1.b[3]\n"
- "addvl x10, x10, #4\n"
"udot z11.s, z16.b, z0.b[3]\n"
"udot z15.s, z16.b, z1.b[3]\n"
"21:" // Height 2: Multiply loop: multiply skip
@@ -411,10 +411,10 @@ void sve_hybrid_u8u32_dot_6x4VL (
"cmp x28, x20\n"
"bne 16b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p4, [x20]\n"
@@ -429,7 +429,7 @@ void sve_hybrid_u8u32_dot_6x4VL (
"23:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"24:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -441,12 +441,12 @@ void sve_hybrid_u8u32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 25f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
@@ -473,8 +473,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov x28, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -496,37 +496,37 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z21.b }, p5/Z, [x10]\n"
+ "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x24]\n"
- "ld1b { z21.b }, p5/Z, [x10]\n"
+ "add x24, x24, #0x10\n"
"udot z8.s, z21.b, z2.b[0]\n"
"udot z12.s, z21.b, z1.b[0]\n"
- "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
- "udot z16.s, z21.b, z0.b[0]\n"
"udot z9.s, z20.b, z2.b[0]\n"
- "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z20.b, z1.b[0]\n"
+ "udot z16.s, z21.b, z0.b[0]\n"
+ "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z17.s, z20.b, z0.b[0]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
- "cmp x27, #0x10\n"
"udot z10.s, z21.b, z2.b[0]\n"
"udot z14.s, z21.b, z1.b[0]\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
"udot z18.s, z21.b, z0.b[0]\n"
- "udot z11.s, z20.b, z2.b[0]\n"
"ld1b { z21.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
+ "udot z11.s, z20.b, z2.b[0]\n"
"udot z15.s, z20.b, z1.b[0]\n"
"udot z19.s, z20.b, z0.b[0]\n"
"ld1b { z20.b }, p5/Z, [x10, #5, MUL VL]\n"
"udot z8.s, z21.b, z2.b[1]\n"
"udot z12.s, z21.b, z1.b[1]\n"
"udot z16.s, z21.b, z0.b[1]\n"
- "udot z9.s, z20.b, z2.b[1]\n"
"ld1b { z21.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "udot z9.s, z20.b, z2.b[1]\n"
"udot z13.s, z20.b, z1.b[1]\n"
"udot z17.s, z20.b, z0.b[1]\n"
"ld1b { z20.b }, p5/Z, [x10, #7, MUL VL]\n"
@@ -535,31 +535,31 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z14.s, z21.b, z1.b[1]\n"
"udot z18.s, z21.b, z0.b[1]\n"
"udot z11.s, z20.b, z2.b[1]\n"
- "ld1b { z21.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z15.s, z20.b, z1.b[1]\n"
"udot z19.s, z20.b, z0.b[1]\n"
+ "ld1b { z21.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z20.b }, p5/Z, [x10, #-7, MUL VL]\n"
"udot z8.s, z21.b, z2.b[2]\n"
"udot z12.s, z21.b, z1.b[2]\n"
"udot z16.s, z21.b, z0.b[2]\n"
- "udot z9.s, z20.b, z2.b[2]\n"
"ld1b { z21.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "udot z9.s, z20.b, z2.b[2]\n"
"udot z13.s, z20.b, z1.b[2]\n"
"udot z17.s, z20.b, z0.b[2]\n"
"ld1b { z20.b }, p5/Z, [x10, #-5, MUL VL]\n"
"udot z10.s, z21.b, z2.b[2]\n"
"udot z14.s, z21.b, z1.b[2]\n"
"udot z18.s, z21.b, z0.b[2]\n"
- "udot z11.s, z20.b, z2.b[2]\n"
"ld1b { z21.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "udot z11.s, z20.b, z2.b[2]\n"
"udot z15.s, z20.b, z1.b[2]\n"
"udot z19.s, z20.b, z0.b[2]\n"
"ld1b { z20.b }, p5/Z, [x10, #-3, MUL VL]\n"
"udot z8.s, z21.b, z2.b[3]\n"
"udot z12.s, z21.b, z1.b[3]\n"
"udot z16.s, z21.b, z0.b[3]\n"
- "udot z9.s, z20.b, z2.b[3]\n"
"ld1b { z21.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "udot z9.s, z20.b, z2.b[3]\n"
"udot z13.s, z20.b, z1.b[3]\n"
"udot z17.s, z20.b, z0.b[3]\n"
"ld1b { z20.b }, p5/Z, [x10, #-1, MUL VL]\n"
@@ -572,18 +572,18 @@ void sve_hybrid_u8u32_dot_6x4VL (
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z21.b }, p5/Z, [x10]\n"
+ "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
- "ld1b { z21.b }, p5/Z, [x10]\n"
"udot z8.s, z21.b, z0.b[0]\n"
"udot z12.s, z21.b, z1.b[0]\n"
- "ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
- "udot z16.s, z21.b, z2.b[0]\n"
"udot z9.s, z20.b, z0.b[0]\n"
- "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z20.b, z1.b[0]\n"
+ "udot z16.s, z21.b, z2.b[0]\n"
+ "ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z17.s, z20.b, z2.b[0]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
"addvl x10, x10, #4\n"
@@ -596,12 +596,12 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 32f\n"
"ld1b { z21.b }, p5/Z, [x10]\n"
"ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z21.b, z0.b[1]\n"
"udot z12.s, z21.b, z1.b[1]\n"
"udot z16.s, z21.b, z2.b[1]\n"
- "udot z9.s, z20.b, z0.b[1]\n"
"ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "udot z9.s, z20.b, z0.b[1]\n"
"udot z13.s, z20.b, z1.b[1]\n"
"udot z17.s, z20.b, z2.b[1]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -615,12 +615,12 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 32f\n"
"ld1b { z21.b }, p5/Z, [x10]\n"
"ld1b { z20.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z21.b, z0.b[2]\n"
"udot z12.s, z21.b, z1.b[2]\n"
"udot z16.s, z21.b, z2.b[2]\n"
- "udot z9.s, z20.b, z0.b[2]\n"
"ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
+ "udot z9.s, z20.b, z0.b[2]\n"
"udot z13.s, z20.b, z1.b[2]\n"
"udot z17.s, z20.b, z2.b[2]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -637,8 +637,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z8.s, z21.b, z0.b[3]\n"
"udot z12.s, z21.b, z1.b[3]\n"
"udot z16.s, z21.b, z2.b[3]\n"
- "udot z9.s, z20.b, z0.b[3]\n"
"ld1b { z21.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z20.b, z0.b[3]\n"
"udot z13.s, z20.b, z1.b[3]\n"
"udot z17.s, z20.b, z2.b[3]\n"
"ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -655,11 +655,11 @@ void sve_hybrid_u8u32_dot_6x4VL (
"cmp x28, x20\n"
"bne 27b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p4, [x21]\n"
@@ -678,7 +678,7 @@ void sve_hybrid_u8u32_dot_6x4VL (
"34:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"35:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -690,13 +690,13 @@ void sve_hybrid_u8u32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 36f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
@@ -731,8 +731,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov x28, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -757,25 +757,25 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
"udot z8.s, z25.b, z3.b[0]\n"
"udot z12.s, z25.b, z2.b[0]\n"
+ "udot z9.s, z24.b, z3.b[0]\n"
+ "udot z13.s, z24.b, z2.b[0]\n"
"udot z16.s, z25.b, z1.b[0]\n"
"udot z20.s, z25.b, z0.b[0]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x25, x25, #0x10\n"
- "udot z9.s, z24.b, z3.b[0]\n"
- "udot z13.s, z24.b, z2.b[0]\n"
- "add x24, x24, #0x10\n"
- "add x23, x23, #0x10\n"
"udot z17.s, z24.b, z1.b[0]\n"
"udot z21.s, z24.b, z0.b[0]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -804,9 +804,9 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z14.s, z25.b, z2.b[1]\n"
"udot z18.s, z25.b, z1.b[1]\n"
"udot z22.s, z25.b, z0.b[1]\n"
- "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z11.s, z24.b, z3.b[1]\n"
"udot z15.s, z24.b, z2.b[1]\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z19.s, z24.b, z1.b[1]\n"
"udot z23.s, z24.b, z0.b[1]\n"
"ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
@@ -851,20 +851,20 @@ void sve_hybrid_u8u32_dot_6x4VL (
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
"udot z8.s, z25.b, z0.b[0]\n"
"udot z12.s, z25.b, z1.b[0]\n"
+ "udot z9.s, z24.b, z0.b[0]\n"
+ "udot z13.s, z24.b, z1.b[0]\n"
"udot z16.s, z25.b, z2.b[0]\n"
"udot z20.s, z25.b, z3.b[0]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "udot z9.s, z24.b, z0.b[0]\n"
- "udot z13.s, z24.b, z1.b[0]\n"
"udot z17.s, z24.b, z2.b[0]\n"
"udot z21.s, z24.b, z3.b[0]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -880,12 +880,12 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 43f\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z25.b, z0.b[1]\n"
"udot z12.s, z25.b, z1.b[1]\n"
"udot z16.s, z25.b, z2.b[1]\n"
"udot z20.s, z25.b, z3.b[1]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"udot z9.s, z24.b, z0.b[1]\n"
"udot z13.s, z24.b, z1.b[1]\n"
"udot z17.s, z24.b, z2.b[1]\n"
@@ -903,12 +903,12 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 43f\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z25.b, z0.b[2]\n"
"udot z12.s, z25.b, z1.b[2]\n"
"udot z16.s, z25.b, z2.b[2]\n"
"udot z20.s, z25.b, z3.b[2]\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x4\n"
"udot z9.s, z24.b, z0.b[2]\n"
"udot z13.s, z24.b, z1.b[2]\n"
"udot z17.s, z24.b, z2.b[2]\n"
@@ -951,12 +951,12 @@ void sve_hybrid_u8u32_dot_6x4VL (
"cmp x28, x20\n"
"bne 38b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z12.s }, p4, [x22]\n"
@@ -979,7 +979,7 @@ void sve_hybrid_u8u32_dot_6x4VL (
"45:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"46:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -991,16 +991,16 @@ void sve_hybrid_u8u32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x22]\n"
@@ -1041,8 +1041,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov x28, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1070,29 +1070,29 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z4.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z1.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
"ld1rqb { z0.b }, p0/Z, [x22]\n"
- "ld1b { z29.b }, p5/Z, [x10]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"udot z8.s, z29.b, z4.b[0]\n"
"udot z12.s, z29.b, z3.b[0]\n"
- "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "udot z9.s, z28.b, z4.b[0]\n"
"udot z16.s, z29.b, z2.b[0]\n"
"udot z20.s, z29.b, z1.b[0]\n"
- "add x25, x25, #0x10\n"
"udot z24.s, z29.b, z0.b[0]\n"
- "udot z9.s, z28.b, z4.b[0]\n"
- "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x24, x24, #0x10\n"
"udot z13.s, z28.b, z3.b[0]\n"
+ "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z17.s, z28.b, z2.b[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"udot z21.s, z28.b, z1.b[0]\n"
"udot z25.s, z28.b, z0.b[0]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1101,8 +1101,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z18.s, z29.b, z2.b[0]\n"
"udot z22.s, z29.b, z1.b[0]\n"
"udot z26.s, z29.b, z0.b[0]\n"
- "udot z11.s, z28.b, z4.b[0]\n"
"ld1b { z29.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "udot z11.s, z28.b, z4.b[0]\n"
"udot z15.s, z28.b, z3.b[0]\n"
"udot z19.s, z28.b, z2.b[0]\n"
"udot z23.s, z28.b, z1.b[0]\n"
@@ -1113,8 +1113,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z16.s, z29.b, z2.b[1]\n"
"udot z20.s, z29.b, z1.b[1]\n"
"udot z24.s, z29.b, z0.b[1]\n"
- "udot z9.s, z28.b, z4.b[1]\n"
"ld1b { z29.b }, p5/Z, [x10, #6, MUL VL]\n"
+ "udot z9.s, z28.b, z4.b[1]\n"
"udot z13.s, z28.b, z3.b[1]\n"
"udot z17.s, z28.b, z2.b[1]\n"
"udot z21.s, z28.b, z1.b[1]\n"
@@ -1127,8 +1127,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z22.s, z29.b, z1.b[1]\n"
"udot z26.s, z29.b, z0.b[1]\n"
"udot z11.s, z28.b, z4.b[1]\n"
- "ld1b { z29.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z15.s, z28.b, z3.b[1]\n"
+ "ld1b { z29.b }, p5/Z, [x10, #-8, MUL VL]\n"
"udot z19.s, z28.b, z2.b[1]\n"
"udot z23.s, z28.b, z1.b[1]\n"
"udot z27.s, z28.b, z0.b[1]\n"
@@ -1138,8 +1138,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z16.s, z29.b, z2.b[2]\n"
"udot z20.s, z29.b, z1.b[2]\n"
"udot z24.s, z29.b, z0.b[2]\n"
- "udot z9.s, z28.b, z4.b[2]\n"
"ld1b { z29.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ "udot z9.s, z28.b, z4.b[2]\n"
"udot z13.s, z28.b, z3.b[2]\n"
"udot z17.s, z28.b, z2.b[2]\n"
"udot z21.s, z28.b, z1.b[2]\n"
@@ -1150,8 +1150,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z18.s, z29.b, z2.b[2]\n"
"udot z22.s, z29.b, z1.b[2]\n"
"udot z26.s, z29.b, z0.b[2]\n"
- "udot z11.s, z28.b, z4.b[2]\n"
"ld1b { z29.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ "udot z11.s, z28.b, z4.b[2]\n"
"udot z15.s, z28.b, z3.b[2]\n"
"udot z19.s, z28.b, z2.b[2]\n"
"udot z23.s, z28.b, z1.b[2]\n"
@@ -1162,8 +1162,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z16.s, z29.b, z2.b[3]\n"
"udot z20.s, z29.b, z1.b[3]\n"
"udot z24.s, z29.b, z0.b[3]\n"
- "udot z9.s, z28.b, z4.b[3]\n"
"ld1b { z29.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ "udot z9.s, z28.b, z4.b[3]\n"
"udot z13.s, z28.b, z3.b[3]\n"
"udot z17.s, z28.b, z2.b[3]\n"
"udot z21.s, z28.b, z1.b[3]\n"
@@ -1182,23 +1182,23 @@ void sve_hybrid_u8u32_dot_6x4VL (
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
"ld1rqb { z4.b }, p0/Z, [x22]\n"
- "ld1b { z29.b }, p5/Z, [x10]\n"
"udot z8.s, z29.b, z0.b[0]\n"
"udot z12.s, z29.b, z1.b[0]\n"
- "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "udot z9.s, z28.b, z0.b[0]\n"
+ "udot z13.s, z28.b, z1.b[0]\n"
"udot z16.s, z29.b, z2.b[0]\n"
"udot z20.s, z29.b, z3.b[0]\n"
"udot z24.s, z29.b, z4.b[0]\n"
- "udot z9.s, z28.b, z0.b[0]\n"
- "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
- "udot z13.s, z28.b, z1.b[0]\n"
"udot z17.s, z28.b, z2.b[0]\n"
+ "ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z21.s, z28.b, z3.b[0]\n"
"udot z25.s, z28.b, z4.b[0]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
@@ -1216,21 +1216,21 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 54f\n"
"ld1b { z29.b }, p5/Z, [x10]\n"
"ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z29.b, z0.b[1]\n"
"udot z12.s, z29.b, z1.b[1]\n"
"udot z16.s, z29.b, z2.b[1]\n"
"udot z20.s, z29.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z29.b, z4.b[1]\n"
- "udot z9.s, z28.b, z0.b[1]\n"
"ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z28.b, z0.b[1]\n"
"udot z13.s, z28.b, z1.b[1]\n"
"udot z17.s, z28.b, z2.b[1]\n"
"udot z21.s, z28.b, z3.b[1]\n"
"udot z25.s, z28.b, z4.b[1]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"udot z10.s, z29.b, z0.b[1]\n"
+ "addvl x10, x10, #4\n"
"udot z14.s, z29.b, z1.b[1]\n"
"udot z18.s, z29.b, z2.b[1]\n"
"udot z22.s, z29.b, z3.b[1]\n"
@@ -1243,21 +1243,21 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 54f\n"
"ld1b { z29.b }, p5/Z, [x10]\n"
"ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z29.b, z0.b[2]\n"
"udot z12.s, z29.b, z1.b[2]\n"
"udot z16.s, z29.b, z2.b[2]\n"
"udot z20.s, z29.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z29.b, z4.b[2]\n"
- "udot z9.s, z28.b, z0.b[2]\n"
"ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z28.b, z0.b[2]\n"
"udot z13.s, z28.b, z1.b[2]\n"
"udot z17.s, z28.b, z2.b[2]\n"
"udot z21.s, z28.b, z3.b[2]\n"
"udot z25.s, z28.b, z4.b[2]\n"
"ld1b { z28.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"udot z10.s, z29.b, z0.b[2]\n"
+ "addvl x10, x10, #4\n"
"udot z14.s, z29.b, z1.b[2]\n"
"udot z18.s, z29.b, z2.b[2]\n"
"udot z22.s, z29.b, z3.b[2]\n"
@@ -1275,8 +1275,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"udot z16.s, z29.b, z2.b[3]\n"
"udot z20.s, z29.b, z3.b[3]\n"
"udot z24.s, z29.b, z4.b[3]\n"
- "udot z9.s, z28.b, z0.b[3]\n"
"ld1b { z29.b }, p5/Z, [x10, #2, MUL VL]\n"
+ "udot z9.s, z28.b, z0.b[3]\n"
"udot z13.s, z28.b, z1.b[3]\n"
"udot z17.s, z28.b, z2.b[3]\n"
"udot z21.s, z28.b, z3.b[3]\n"
@@ -1299,15 +1299,15 @@ void sve_hybrid_u8u32_dot_6x4VL (
"cmp x28, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z12.s }, p4, [x23]\n"
"st1w { z13.s }, p3, [x23, #1, MUL VL]\n"
"st1w { z14.s }, p2, [x23, #2, MUL VL]\n"
@@ -1331,11 +1331,12 @@ void sve_hybrid_u8u32_dot_6x4VL (
"b 68f\n"
"56:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"57:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1347,17 +1348,17 @@ void sve_hybrid_u8u32_dot_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 58f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"ld1w { z8.s }, p4/Z, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"ld1w { z9.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z10.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"ld1w { z11.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z16.s }, p4/Z, [x23]\n"
@@ -1406,8 +1407,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"mov x28, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1438,29 +1439,29 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p5/Z, [x10]\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z7.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
"ld1rqb { z6.b }, p0/Z, [x25]\n"
- "sub x27, x27, #0x10\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z5.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
"ld1rqb { z4.b }, p0/Z, [x23]\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
"ld1rqb { z3.b }, p0/Z, [x22]\n"
"ld1rqb { z2.b }, p0/Z, [x21]\n"
- "add x25, x25, #0x10\n"
- "add x24, x24, #0x10\n"
- "ld1b { z1.b }, p5/Z, [x10]\n"
- "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
"udot z8.s, z1.b, z7.b[0]\n"
"udot z12.s, z1.b, z6.b[0]\n"
+ "add x21, x21, #0x10\n"
"udot z16.s, z1.b, z5.b[0]\n"
"udot z20.s, z1.b, z4.b[0]\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
"udot z24.s, z1.b, z3.b[0]\n"
"udot z28.s, z1.b, z2.b[0]\n"
"ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
- "add x21, x21, #0x10\n"
"udot z9.s, z0.b, z7.b[0]\n"
"udot z13.s, z0.b, z6.b[0]\n"
"udot z17.s, z0.b, z5.b[0]\n"
@@ -1568,24 +1569,24 @@ void sve_hybrid_u8u32_dot_6x4VL (
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z7.b }, p5/Z, [x10]\n"
+ "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"ld1rqb { z0.b }, p0/Z, [x26]\n"
"ld1rqb { z1.b }, p0/Z, [x25]\n"
- "subs x27, x27, #0x4\n"
"ld1rqb { z2.b }, p0/Z, [x24]\n"
"ld1rqb { z3.b }, p0/Z, [x23]\n"
"ld1rqb { z4.b }, p0/Z, [x22]\n"
"ld1rqb { z5.b }, p0/Z, [x21]\n"
- "ld1b { z7.b }, p5/Z, [x10]\n"
- "ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
"udot z8.s, z7.b, z0.b[0]\n"
"udot z12.s, z7.b, z1.b[0]\n"
+ "udot z9.s, z6.b, z0.b[0]\n"
+ "udot z13.s, z6.b, z1.b[0]\n"
"udot z16.s, z7.b, z2.b[0]\n"
"udot z20.s, z7.b, z3.b[0]\n"
"udot z24.s, z7.b, z4.b[0]\n"
"udot z28.s, z7.b, z5.b[0]\n"
"ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
- "udot z9.s, z6.b, z0.b[0]\n"
- "udot z13.s, z6.b, z1.b[0]\n"
"udot z17.s, z6.b, z2.b[0]\n"
"udot z21.s, z6.b, z3.b[0]\n"
"udot z25.s, z6.b, z4.b[0]\n"
@@ -1607,23 +1608,23 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 65f\n"
"ld1b { z7.b }, p5/Z, [x10]\n"
"ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z7.b, z0.b[1]\n"
"udot z12.s, z7.b, z1.b[1]\n"
"udot z16.s, z7.b, z2.b[1]\n"
"udot z20.s, z7.b, z3.b[1]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z7.b, z4.b[1]\n"
"udot z28.s, z7.b, z5.b[1]\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z6.b, z0.b[1]\n"
+ "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z6.b, z1.b[1]\n"
"udot z17.s, z6.b, z2.b[1]\n"
"udot z21.s, z6.b, z3.b[1]\n"
"udot z25.s, z6.b, z4.b[1]\n"
"udot z29.s, z6.b, z5.b[1]\n"
"ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"udot z10.s, z7.b, z0.b[1]\n"
+ "addvl x10, x10, #4\n"
"udot z14.s, z7.b, z1.b[1]\n"
"udot z18.s, z7.b, z2.b[1]\n"
"udot z22.s, z7.b, z3.b[1]\n"
@@ -1638,23 +1639,23 @@ void sve_hybrid_u8u32_dot_6x4VL (
"ble 65f\n"
"ld1b { z7.b }, p5/Z, [x10]\n"
"ld1b { z6.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x4\n"
"udot z8.s, z7.b, z0.b[2]\n"
"udot z12.s, z7.b, z1.b[2]\n"
"udot z16.s, z7.b, z2.b[2]\n"
"udot z20.s, z7.b, z3.b[2]\n"
- "subs x27, x27, #0x4\n"
"udot z24.s, z7.b, z4.b[2]\n"
"udot z28.s, z7.b, z5.b[2]\n"
- "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z9.s, z6.b, z0.b[2]\n"
+ "ld1b { z7.b }, p5/Z, [x10, #2, MUL VL]\n"
"udot z13.s, z6.b, z1.b[2]\n"
"udot z17.s, z6.b, z2.b[2]\n"
"udot z21.s, z6.b, z3.b[2]\n"
"udot z25.s, z6.b, z4.b[2]\n"
"udot z29.s, z6.b, z5.b[2]\n"
"ld1b { z6.b }, p5/Z, [x10, #3, MUL VL]\n"
- "addvl x10, x10, #4\n"
"udot z10.s, z7.b, z0.b[2]\n"
+ "addvl x10, x10, #4\n"
"udot z14.s, z7.b, z1.b[2]\n"
"udot z18.s, z7.b, z2.b[2]\n"
"udot z22.s, z7.b, z3.b[2]\n"
@@ -1702,17 +1703,17 @@ void sve_hybrid_u8u32_dot_6x4VL (
"cmp x28, x20\n"
"bne 60b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"st1w { z8.s }, p4, [x9]\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
+ "add x21, x22, x20, LSL #2\n"
"st1w { z12.s }, p4, [x24]\n"
+ "add x20, x21, x20, LSL #2\n"
"st1w { z13.s }, p3, [x24, #1, MUL VL]\n"
"st1w { z14.s }, p2, [x24, #2, MUL VL]\n"
"st1w { z15.s }, p1, [x24, #3, MUL VL]\n"
@@ -1748,8 +1749,8 @@ void sve_hybrid_u8u32_dot_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp
index 8c6a3dba7d..8437b0ea48 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -70,7 +70,7 @@ public:
return true;
}
- StdTransformsSVE<rhs_operand_type, result_type, 6, 8, 8> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 6, 8, 8> transforms = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp
index 9269576d90..898ff3a235 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_mmla_6x4VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,18 +44,18 @@ void sve_hybrid_u8u32_mmla_6x4VL (
size_t output_offset = {};
size_t input_initial_col = {};
size_t input_offset = {};
+ void *output_ptr = {};
} ka;
unsigned long flags=0;
- void *output_ptr;
void *input_ptr;
if (output_arg.is_indirect) {
- output_ptr=(void *)(output_arg.indirect.ptr);
+ ka.output_ptr=(void *)(output_arg.indirect.ptr);
ka.output_offset=output_arg.indirect.offset;
flags |= 0x4;
} else {
- output_ptr=(void *)(output_arg.direct.base);
+ ka.output_ptr=(void *)(output_arg.direct.base);
ka.output_offset=output_arg.direct.stride;
}
@@ -89,7 +89,7 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"beq 12f\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"2:" // Height 1: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -100,14 +100,14 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"incw x20\n"
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 3f\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
- "ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z19.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
- "zip1 z9.d, z18.d, z13.d\n"
- "zip2 z13.d, z18.d, z13.d\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
+ "zip1 z9.d, z19.d, z13.d\n"
+ "zip2 z13.d, z19.d, z13.d\n"
"zip1 z10.d, z17.d, z14.d\n"
"zip2 z14.d, z17.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -126,8 +126,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov x28, #0x0\n"
"5:" // Height 1: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 6f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -143,87 +143,87 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"ble 9f\n"
"8:" // Height 1: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z20.b }, p0/Z, [x26]\n"
- "trn1 z18.d, z20.d, z19.d\n"
- "ld1b { z17.b }, p5/Z, [x10]\n"
- "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45d19a48 // ummla z8.s, z18.b, z17.b\n"
- ".inst 0x45d09a4c // ummla z12.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45d19a49 // ummla z9.s, z18.b, z17.b\n"
- ".inst 0x45d09a4d // ummla z13.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1b { z16.b }, p5/Z, [x10]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z19.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "trn1 z18.d, z19.d, z20.d\n"
+ "trn2 z19.d, z19.d, z20.d\n"
+ ".inst 0x45d09a48 // ummla z8.s, z18.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45d19a4c // ummla z12.s, z18.b, z17.b\n"
+ "ld1b { z20.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45d09a49 // ummla z9.s, z18.b, z16.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45d49a4d // ummla z13.s, z18.b, z20.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x45d19a4a // ummla z10.s, z18.b, z17.b\n"
- ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
+ ".inst 0x45c19a4a // ummla z10.s, z18.b, z1.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
".inst 0x45d19a4b // ummla z11.s, z18.b, z17.b\n"
".inst 0x45d09a4f // ummla z15.s, z18.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x45d19a88 // ummla z8.s, z20.b, z17.b\n"
- ".inst 0x45d09a8c // ummla z12.s, z20.b, z16.b\n"
- "ld1b { z17.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "ld1b { z16.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x45d19a89 // ummla z9.s, z20.b, z17.b\n"
- ".inst 0x45d09a8d // ummla z13.s, z20.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-7, MUL VL]\n"
+ ".inst 0x45d09a68 // ummla z8.s, z19.b, z16.b\n"
+ "ld1b { z16.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45d19a6c // ummla z12.s, z19.b, z17.b\n"
+ "ld1b { z17.b }, p5/Z, [x10, #-5, MUL VL]\n"
+ ".inst 0x45d09a69 // ummla z9.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-4, MUL VL]\n"
- "ld1b { z7.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45d09a8a // ummla z10.s, z20.b, z16.b\n"
- ".inst 0x45c79a8e // ummla z14.s, z20.b, z7.b\n"
+ ".inst 0x45d19a6d // ummla z13.s, z19.b, z17.b\n"
+ "ld1b { z3.b }, p5/Z, [x10, #-3, MUL VL]\n"
+ ".inst 0x45d09a6a // ummla z10.s, z19.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45c39a6e // ummla z14.s, z19.b, z3.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
- ".inst 0x45d19a8b // ummla z11.s, z20.b, z17.b\n"
- ".inst 0x45d09a8f // ummla z15.s, z20.b, z16.b\n"
- "add x26, x26, #0x10\n"
+ ".inst 0x45d19a6b // ummla z11.s, z19.b, z17.b\n"
+ ".inst 0x45d09a6f // ummla z15.s, z19.b, z16.b\n"
"bgt 8b\n"
"9:" // Height 1: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z1.b }, p0/Z, [x26]\n"
- "trn1 z18.d, z1.d, z19.d\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
+ "ld1rqb { z1.b }, p0/Z, [x26]\n"
+ "trn1 z18.d, z1.d, z19.d\n"
".inst 0x45d19a48 // ummla z8.s, z18.b, z17.b\n"
- ".inst 0x45d09a4c // ummla z12.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45d09a4c // ummla z12.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x45d19a49 // ummla z9.s, z18.b, z17.b\n"
- ".inst 0x45d09a4d // ummla z13.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45d09a4d // ummla z13.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45d19a4a // ummla z10.s, z18.b, z17.b\n"
- ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x45d19a4b // ummla z11.s, z18.b, z17.b\n"
".inst 0x45d09a4f // ummla z15.s, z18.b, z16.b\n"
- "addvl x10, x10, #8\n"
"ble 10f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45d19828 // ummla z8.s, z1.b, z17.b\n"
- ".inst 0x45d0982c // ummla z12.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45d0982c // ummla z12.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45d19829 // ummla z9.s, z1.b, z17.b\n"
- ".inst 0x45d0982d // ummla z13.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45d0982d // ummla z13.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45d1982a // ummla z10.s, z1.b, z17.b\n"
- ".inst 0x45d0982e // ummla z14.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45d0982e // ummla z14.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x45d1982b // ummla z11.s, z1.b, z17.b\n"
".inst 0x45d0982f // ummla z15.s, z1.b, z16.b\n"
- "addvl x10, x10, #8\n"
"10:" // Height 1: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
@@ -231,9 +231,9 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"bne 5b\n"
"uzp1 z8.d, z8.d, z12.d\n"
"uzp1 z9.d, z9.d, z13.d\n"
- "st1w { z8.s }, p4, [x9]\n"
"uzp1 z10.d, z10.d, z14.d\n"
"uzp1 z11.d, z11.d, z15.d\n"
+ "st1w { z8.s }, p4, [x9]\n"
"st1w { z9.s }, p3, [x9, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x9, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x9, #3, MUL VL]\n"
@@ -246,7 +246,7 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"12:" // Height 2
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"13:" // Height 2: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -258,19 +258,19 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 14f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
"ld1w { z18.s }, p4/Z, [x9]\n"
- "ld1w { z2.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z24.s }, p3/Z, [x9, #1, MUL VL]\n"
"ld1w { z17.s }, p2/Z, [x9, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
+ "add x20, x9, x20, LSL #2\n"
"ld1w { z12.s }, p4/Z, [x20]\n"
- "zip1 z8.d, z18.d, z12.d\n"
- "zip2 z12.d, z18.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x20, #2, MUL VL]\n"
- "zip1 z9.d, z2.d, z13.d\n"
- "zip2 z13.d, z2.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z8.d, z18.d, z12.d\n"
+ "zip2 z12.d, z18.d, z12.d\n"
+ "zip1 z9.d, z24.d, z13.d\n"
+ "zip2 z13.d, z24.d, z13.d\n"
"zip1 z10.d, z17.d, z14.d\n"
"zip2 z14.d, z17.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
@@ -289,8 +289,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov x28, #0x0\n"
"16:" // Height 2: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 17f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -309,109 +309,109 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"ble 20f\n"
"19:" // Height 2: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z20.b }, p0/Z, [x26]\n"
- "ld1rqb { z19.b }, p0/Z, [x25]\n"
- "trn1 z18.d, z20.d, z19.d\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z19.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "trn1 z18.d, z19.d, z25.d\n"
+ "trn2 z19.d, z19.d, z25.d\n"
".inst 0x45d19a48 // ummla z8.s, z18.b, z17.b\n"
- ".inst 0x45d09a4c // ummla z12.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45d09a4c // ummla z12.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45d19a49 // ummla z9.s, z18.b, z17.b\n"
- ".inst 0x45d09a4d // ummla z13.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45d09a4d // ummla z13.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45d19a4a // ummla z10.s, z18.b, z17.b\n"
- ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- "trn2 z20.d, z20.d, z19.d\n"
".inst 0x45d19a4b // ummla z11.s, z18.b, z17.b\n"
".inst 0x45d09a4f // ummla z15.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z16.b }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x45d19a88 // ummla z8.s, z20.b, z17.b\n"
- ".inst 0x45d09a8c // ummla z12.s, z20.b, z16.b\n"
+ ".inst 0x45d19a68 // ummla z8.s, z19.b, z17.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-6, MUL VL]\n"
+ ".inst 0x45d09a6c // ummla z12.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x45d19a89 // ummla z9.s, z20.b, z17.b\n"
- ".inst 0x45d09a8d // ummla z13.s, z20.b, z16.b\n"
+ ".inst 0x45d19a69 // ummla z9.s, z19.b, z17.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-4, MUL VL]\n"
+ ".inst 0x45d09a6d // ummla z13.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45d19a8a // ummla z10.s, z20.b, z17.b\n"
- ".inst 0x45d09a8e // ummla z14.s, z20.b, z16.b\n"
+ ".inst 0x45d19a6a // ummla z10.s, z19.b, z17.b\n"
"ld1b { z17.b }, p5/Z, [x10, #-2, MUL VL]\n"
+ ".inst 0x45d09a6e // ummla z14.s, z19.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #-1, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- "cmp x27, #0x10\n"
- ".inst 0x45d19a8b // ummla z11.s, z20.b, z17.b\n"
- ".inst 0x45d09a8f // ummla z15.s, z20.b, z16.b\n"
- "add x26, x26, #0x10\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x45d19a6b // ummla z11.s, z19.b, z17.b\n"
+ ".inst 0x45d09a6f // ummla z15.s, z19.b, z16.b\n"
"bgt 19b\n"
"20:" // Height 2: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z17.b }, p5/Z, [x10]\n"
+ "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
"ld1rqb { z19.b }, p0/Z, [x25]\n"
"trn1 z18.d, z1.d, z19.d\n"
- "ld1b { z17.b }, p5/Z, [x10]\n"
- "ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45d19a48 // ummla z8.s, z18.b, z17.b\n"
- ".inst 0x45d09a4c // ummla z12.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45d09a4c // ummla z12.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "trn2 z1.d, z1.d, z19.d\n"
".inst 0x45d19a49 // ummla z9.s, z18.b, z17.b\n"
- ".inst 0x45d09a4d // ummla z13.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45d09a4d // ummla z13.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45d19a4a // ummla z10.s, z18.b, z17.b\n"
- ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45d09a4e // ummla z14.s, z18.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- "trn2 z1.d, z1.d, z19.d\n"
+ "addvl x10, x10, #8\n"
".inst 0x45d19a4b // ummla z11.s, z18.b, z17.b\n"
".inst 0x45d09a4f // ummla z15.s, z18.b, z16.b\n"
- "addvl x10, x10, #8\n"
"ble 21f\n"
"ld1b { z17.b }, p5/Z, [x10]\n"
"ld1b { z16.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45d19828 // ummla z8.s, z1.b, z17.b\n"
- ".inst 0x45d0982c // ummla z12.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45d0982c // ummla z12.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45d19829 // ummla z9.s, z1.b, z17.b\n"
- ".inst 0x45d0982d // ummla z13.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45d0982d // ummla z13.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45d1982a // ummla z10.s, z1.b, z17.b\n"
- ".inst 0x45d0982e // ummla z14.s, z1.b, z16.b\n"
"ld1b { z17.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45d0982e // ummla z14.s, z1.b, z16.b\n"
"ld1b { z16.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #8\n"
".inst 0x45d1982b // ummla z11.s, z1.b, z17.b\n"
".inst 0x45d0982f // ummla z15.s, z1.b, z16.b\n"
- "addvl x10, x10, #8\n"
"21:" // Height 2: Multiply loop: multiply skip
"ldr w20, [%x[args_ptr], %[offsetof_num_strings]]\n"
"add x28, x28, #0x1\n"
"cmp x28, x20\n"
"bne 16b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x20, x9, x20, LSL #2\n"
- "uzp1 z16.d, z8.d, z12.d\n"
+ "uzp1 z17.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "uzp1 z17.d, z9.d, z13.d\n"
+ "uzp1 z16.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "st1w { z16.s }, p4, [x9]\n"
- "uzp1 z16.d, z10.d, z14.d\n"
+ "uzp1 z12.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "st1w { z17.s }, p3, [x9, #1, MUL VL]\n"
- "uzp1 z2.d, z11.d, z15.d\n"
+ "add x20, x9, x20, LSL #2\n"
+ "uzp1 z26.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
- "st1w { z16.s }, p2, [x9, #2, MUL VL]\n"
- "st1w { z2.s }, p1, [x9, #3, MUL VL]\n"
+ "st1w { z17.s }, p4, [x9]\n"
+ "st1w { z16.s }, p3, [x9, #1, MUL VL]\n"
+ "st1w { z12.s }, p2, [x9, #2, MUL VL]\n"
+ "st1w { z26.s }, p1, [x9, #3, MUL VL]\n"
"addvl x9, x9, #4\n"
"st1w { z8.s }, p4, [x20]\n"
"st1w { z9.s }, p3, [x20, #1, MUL VL]\n"
@@ -425,7 +425,7 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"23:" // Height 3
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"24:" // Height 3: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -437,28 +437,28 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 25f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z24.s }, p4/Z, [x9]\n"
+ "ld1w { z26.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x21, x9, x20, LSL #2\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x21]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x21, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x20]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x20, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "zip1 z8.d, z24.d, z12.d\n"
+ "zip2 z12.d, z24.d, z12.d\n"
+ "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z9.d, z26.d, z13.d\n"
+ "zip2 z13.d, z26.d, z13.d\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"zip1 z11.d, z16.d, z15.d\n"
"zip2 z15.d, z16.d, z15.d\n"
- "ld1w { z24.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z16.d, z17.d, z20.d\n"
"zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
@@ -489,8 +489,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov x28, #0x0\n"
"27:" // Height 3: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 28f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -512,92 +512,92 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"ble 31f\n"
"30:" // Height 3: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z30.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "ld1rqb { z28.b }, p0/Z, [x24]\n"
- "trn1 z27.d, z30.d, z24.d\n"
- "trn2 z30.d, z30.d, z24.d\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
- "trn1 z26.d, z28.d, z29.d\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z27.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z24.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
+ "ld1rqb { z26.b }, p0/Z, [x24]\n"
+ "add x24, x24, #0x10\n"
+ "trn1 z6.d, z27.d, z24.d\n"
+ "trn2 z27.d, z27.d, z24.d\n"
+ "trn1 z30.d, z26.d, z29.d\n"
+ "trn2 z26.d, z26.d, z29.d\n"
+ ".inst 0x45d998c8 // ummla z8.s, z6.b, z25.b\n"
+ ".inst 0x45dc98cc // ummla z12.s, z6.b, z28.b\n"
+ ".inst 0x45d99bd0 // ummla z16.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45dc9bd4 // ummla z20.s, z30.b, z28.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45d998c9 // ummla z9.s, z6.b, z25.b\n"
+ ".inst 0x45d99bd1 // ummla z17.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45d898cd // ummla z13.s, z6.b, z24.b\n"
+ ".inst 0x45d89bd5 // ummla z21.s, z30.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
+ ".inst 0x45d998ca // ummla z10.s, z6.b, z25.b\n"
+ ".inst 0x45d99bd2 // ummla z18.s, z30.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45d898ce // ummla z14.s, z6.b, z24.b\n"
+ ".inst 0x45d89bd6 // ummla z22.s, z30.b, z24.b\n"
+ "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ ".inst 0x45d998cb // ummla z11.s, z6.b, z25.b\n"
+ ".inst 0x45d99bd3 // ummla z19.s, z30.b, z25.b\n"
+ ".inst 0x45d898cf // ummla z15.s, z6.b, z24.b\n"
+ ".inst 0x45d89bd7 // ummla z23.s, z30.b, z24.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x45d99b68 // ummla z8.s, z27.b, z25.b\n"
".inst 0x45d99b50 // ummla z16.s, z26.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
".inst 0x45d89b6c // ummla z12.s, z27.b, z24.b\n"
".inst 0x45d89b54 // ummla z20.s, z26.b, z24.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
- "ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x45d99b69 // ummla z9.s, z27.b, z25.b\n"
".inst 0x45d99b51 // ummla z17.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z29.d\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
".inst 0x45d89b6d // ummla z13.s, z27.b, z24.b\n"
".inst 0x45d89b55 // ummla z21.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x45d99b6a // ummla z10.s, z27.b, z25.b\n"
".inst 0x45d99b52 // ummla z18.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
".inst 0x45d89b6e // ummla z14.s, z27.b, z24.b\n"
".inst 0x45d89b56 // ummla z22.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
- "addvl x10, x10, #16\n"
+ "ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
".inst 0x45d99b6b // ummla z11.s, z27.b, z25.b\n"
".inst 0x45d99b53 // ummla z19.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
".inst 0x45d89b6f // ummla z15.s, z27.b, z24.b\n"
".inst 0x45d89b57 // ummla z23.s, z26.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x45d99bc8 // ummla z8.s, z30.b, z25.b\n"
- ".inst 0x45d99b90 // ummla z16.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x45d89bcc // ummla z12.s, z30.b, z24.b\n"
- ".inst 0x45d89b94 // ummla z20.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x45d99bc9 // ummla z9.s, z30.b, z25.b\n"
- ".inst 0x45d99b91 // ummla z17.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x45d89bcd // ummla z13.s, z30.b, z24.b\n"
- ".inst 0x45d89b95 // ummla z21.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45d99bca // ummla z10.s, z30.b, z25.b\n"
- ".inst 0x45d99b92 // ummla z18.s, z28.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x45d89bce // ummla z14.s, z30.b, z24.b\n"
- ".inst 0x45d89b96 // ummla z22.s, z28.b, z24.b\n"
- "ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x45d99bcb // ummla z11.s, z30.b, z25.b\n"
- ".inst 0x45d99b93 // ummla z19.s, z28.b, z25.b\n"
- ".inst 0x45d89bcf // ummla z15.s, z30.b, z24.b\n"
- ".inst 0x45d89b97 // ummla z23.s, z28.b, z24.b\n"
"bgt 30b\n"
"31:" // Height 3: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z25.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
"ld1rqb { z24.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
"trn1 z27.d, z1.d, z24.d\n"
"trn2 z1.d, z1.d, z24.d\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "trn1 z26.d, z3.d, z28.d\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "trn1 z26.d, z3.d, z29.d\n"
".inst 0x45d99b68 // ummla z8.s, z27.b, z25.b\n"
+ ".inst 0x45dc9b6c // ummla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z29.d\n"
".inst 0x45d99b50 // ummla z16.s, z26.b, z25.b\n"
- ".inst 0x45d89b6c // ummla z12.s, z27.b, z24.b\n"
- ".inst 0x45d89b54 // ummla z20.s, z26.b, z24.b\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45dc9b54 // ummla z20.s, z26.b, z28.b\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45d99b69 // ummla z9.s, z27.b, z25.b\n"
".inst 0x45d99b51 // ummla z17.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
".inst 0x45d89b6d // ummla z13.s, z27.b, z24.b\n"
".inst 0x45d89b55 // ummla z21.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z28.d\n"
".inst 0x45d99b6a // ummla z10.s, z27.b, z25.b\n"
".inst 0x45d99b52 // ummla z18.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
@@ -614,9 +614,9 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45d99828 // ummla z8.s, z1.b, z25.b\n"
".inst 0x45d99870 // ummla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x45d8982c // ummla z12.s, z1.b, z24.b\n"
".inst 0x45d89874 // ummla z20.s, z3.b, z24.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45d99829 // ummla z9.s, z1.b, z25.b\n"
".inst 0x45d99871 // ummla z17.s, z3.b, z25.b\n"
@@ -641,26 +641,26 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 27b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x21, x9, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "uzp1 z25.d, z8.d, z12.d\n"
+ "uzp1 z27.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "uzp1 z24.d, z9.d, z13.d\n"
- "st1w { z25.s }, p4, [x9]\n"
+ "uzp1 z26.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z25.d, z10.d, z14.d\n"
- "st1w { z24.s }, p3, [x9, #1, MUL VL]\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x21, x9, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 z24.d, z11.d, z15.d\n"
- "st1w { z25.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z27.s }, p4, [x9]\n"
"uzp1 z16.d, z16.d, z20.d\n"
- "st1w { z24.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp1 z17.d, z17.d, z21.d\n"
+ "st1w { z26.s }, p3, [x9, #1, MUL VL]\n"
"uzp1 z18.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x21]\n"
"uzp1 z19.d, z19.d, z23.d\n"
+ "st1w { z25.s }, p2, [x9, #2, MUL VL]\n"
+ "st1w { z24.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
+ "st1w { z8.s }, p4, [x21]\n"
"st1w { z9.s }, p3, [x21, #1, MUL VL]\n"
"st1w { z10.s }, p2, [x21, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x21, #3, MUL VL]\n"
@@ -676,7 +676,7 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"34:" // Height 4
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"35:" // Height 4: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -688,37 +688,37 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 36f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x22, x9, x20, LSL #2\n"
"add x21, x22, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x22]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z14.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x22, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x21]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x21, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x20]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"zip1 z17.d, z18.d, z21.d\n"
"zip2 z21.d, z18.d, z21.d\n"
- "ld1w { z23.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z18.d, z19.d, z22.d\n"
"zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
@@ -745,8 +745,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov x28, #0x0\n"
"38:" // Height 4: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 39f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -771,114 +771,114 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"ble 42f\n"
"41:" // Height 4: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z30.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "trn1 z29.d, z30.d, z24.d\n"
+ "ld1b { z31.b }, p5/Z, [x10]\n"
+ "ld1b { z30.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
+ "ld1rqb { z29.b }, p0/Z, [x26]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z28.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn2 z30.d, z30.d, z24.d\n"
- "trn1 z26.d, z28.d, z27.d\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45d99ba8 // ummla z8.s, z29.b, z25.b\n"
- ".inst 0x45d99b50 // ummla z16.s, z26.b, z25.b\n"
- ".inst 0x45d89bac // ummla z12.s, z29.b, z24.b\n"
- ".inst 0x45d89b54 // ummla z20.s, z26.b, z24.b\n"
+ "add x24, x24, #0x10\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "add x23, x23, #0x10\n"
+ "trn1 z27.d, z29.d, z25.d\n"
+ "trn2 z29.d, z29.d, z25.d\n"
+ "trn1 z26.d, z28.d, z24.d\n"
+ "trn2 z28.d, z28.d, z24.d\n"
+ ".inst 0x45df9b68 // ummla z8.s, z27.b, z31.b\n"
+ ".inst 0x45de9b6c // ummla z12.s, z27.b, z30.b\n"
+ ".inst 0x45df9b50 // ummla z16.s, z26.b, z31.b\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45de9b54 // ummla z20.s, z26.b, z30.b\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45d99ba9 // ummla z9.s, z29.b, z25.b\n"
+ ".inst 0x45d99b69 // ummla z9.s, z27.b, z25.b\n"
".inst 0x45d99b51 // ummla z17.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "trn2 z28.d, z28.d, z27.d\n"
- ".inst 0x45d89bad // ummla z13.s, z29.b, z24.b\n"
+ ".inst 0x45d89b6d // ummla z13.s, z27.b, z24.b\n"
".inst 0x45d89b55 // ummla z21.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- ".inst 0x45d99baa // ummla z10.s, z29.b, z25.b\n"
+ ".inst 0x45d99b6a // ummla z10.s, z27.b, z25.b\n"
".inst 0x45d99b52 // ummla z18.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
- "cmp x27, #0x10\n"
- ".inst 0x45d89bae // ummla z14.s, z29.b, z24.b\n"
+ ".inst 0x45d89b6e // ummla z14.s, z27.b, z24.b\n"
".inst 0x45d89b56 // ummla z22.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x45d99bab // ummla z11.s, z29.b, z25.b\n"
+ ".inst 0x45d99b6b // ummla z11.s, z27.b, z25.b\n"
".inst 0x45d99b53 // ummla z19.s, z26.b, z25.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
- "add x26, x26, #0x10\n"
- ".inst 0x45d89baf // ummla z15.s, z29.b, z24.b\n"
+ ".inst 0x45d89b6f // ummla z15.s, z27.b, z24.b\n"
".inst 0x45d89b57 // ummla z23.s, z26.b, z24.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z24.b }, p5/Z, [x10, #-7, MUL VL]\n"
- "add x25, x25, #0x10\n"
- ".inst 0x45d99bc8 // ummla z8.s, z30.b, z25.b\n"
+ ".inst 0x45d99ba8 // ummla z8.s, z29.b, z25.b\n"
".inst 0x45d99b90 // ummla z16.s, z28.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #-6, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x45d89bcc // ummla z12.s, z30.b, z24.b\n"
+ ".inst 0x45d89bac // ummla z12.s, z29.b, z24.b\n"
".inst 0x45d89b94 // ummla z20.s, z28.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #-5, MUL VL]\n"
- "add x23, x23, #0x10\n"
- ".inst 0x45d99bc9 // ummla z9.s, z30.b, z25.b\n"
+ ".inst 0x45d99ba9 // ummla z9.s, z29.b, z25.b\n"
".inst 0x45d99b91 // ummla z17.s, z28.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x45d89bcd // ummla z13.s, z30.b, z24.b\n"
+ ".inst 0x45d89bad // ummla z13.s, z29.b, z24.b\n"
".inst 0x45d89b95 // ummla z21.s, z28.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45d99bca // ummla z10.s, z30.b, z25.b\n"
+ ".inst 0x45d99baa // ummla z10.s, z29.b, z25.b\n"
".inst 0x45d99b92 // ummla z18.s, z28.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x45d89bce // ummla z14.s, z30.b, z24.b\n"
+ ".inst 0x45d89bae // ummla z14.s, z29.b, z24.b\n"
".inst 0x45d89b96 // ummla z22.s, z28.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x45d99bcb // ummla z11.s, z30.b, z25.b\n"
+ ".inst 0x45d99bab // ummla z11.s, z29.b, z25.b\n"
".inst 0x45d99b93 // ummla z19.s, z28.b, z25.b\n"
- ".inst 0x45d89bcf // ummla z15.s, z30.b, z24.b\n"
+ ".inst 0x45d89baf // ummla z15.s, z29.b, z24.b\n"
".inst 0x45d89b97 // ummla z23.s, z28.b, z24.b\n"
"bgt 41b\n"
"42:" // Height 4: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z29.b }, p5/Z, [x10]\n"
+ "ld1b { z28.b }, p5/Z, [x10, #1, MUL VL]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z24.b }, p0/Z, [x25]\n"
- "trn1 z28.d, z1.d, z24.d\n"
+ "ld1rqb { z25.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z27.b }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z24.d\n"
- "trn1 z26.d, z3.d, z27.d\n"
- "ld1b { z25.b }, p5/Z, [x10]\n"
- "ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45d99b88 // ummla z8.s, z28.b, z25.b\n"
- ".inst 0x45d99b50 // ummla z16.s, z26.b, z25.b\n"
- ".inst 0x45d89b8c // ummla z12.s, z28.b, z24.b\n"
- ".inst 0x45d89b54 // ummla z20.s, z26.b, z24.b\n"
+ "ld1rqb { z24.b }, p0/Z, [x23]\n"
+ "trn1 z27.d, z1.d, z25.d\n"
+ "trn2 z1.d, z1.d, z25.d\n"
+ "trn1 z26.d, z3.d, z24.d\n"
+ ".inst 0x45dd9b68 // ummla z8.s, z27.b, z29.b\n"
+ ".inst 0x45dc9b6c // ummla z12.s, z27.b, z28.b\n"
+ "trn2 z3.d, z3.d, z24.d\n"
+ ".inst 0x45dd9b50 // ummla z16.s, z26.b, z29.b\n"
"ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45dc9b54 // ummla z20.s, z26.b, z28.b\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45d99b89 // ummla z9.s, z28.b, z25.b\n"
+ ".inst 0x45d99b69 // ummla z9.s, z27.b, z25.b\n"
".inst 0x45d99b51 // ummla z17.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #4, MUL VL]\n"
- "subs x27, x27, #0x8\n"
- ".inst 0x45d89b8d // ummla z13.s, z28.b, z24.b\n"
+ ".inst 0x45d89b6d // ummla z13.s, z27.b, z24.b\n"
".inst 0x45d89b55 // ummla z21.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #5, MUL VL]\n"
- "trn2 z3.d, z3.d, z27.d\n"
- ".inst 0x45d99b8a // ummla z10.s, z28.b, z25.b\n"
+ ".inst 0x45d99b6a // ummla z10.s, z27.b, z25.b\n"
".inst 0x45d99b52 // ummla z18.s, z26.b, z25.b\n"
"ld1b { z25.b }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x45d89b8e // ummla z14.s, z28.b, z24.b\n"
+ ".inst 0x45d89b6e // ummla z14.s, z27.b, z24.b\n"
".inst 0x45d89b56 // ummla z22.s, z26.b, z24.b\n"
"ld1b { z24.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #8\n"
- ".inst 0x45d99b8b // ummla z11.s, z28.b, z25.b\n"
+ ".inst 0x45d99b6b // ummla z11.s, z27.b, z25.b\n"
".inst 0x45d99b53 // ummla z19.s, z26.b, z25.b\n"
- ".inst 0x45d89b8f // ummla z15.s, z28.b, z24.b\n"
+ ".inst 0x45d89b6f // ummla z15.s, z27.b, z24.b\n"
".inst 0x45d89b57 // ummla z23.s, z26.b, z24.b\n"
"ble 43f\n"
"ld1b { z25.b }, p5/Z, [x10]\n"
"ld1b { z24.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45d99828 // ummla z8.s, z1.b, z25.b\n"
".inst 0x45d99870 // ummla z16.s, z3.b, z25.b\n"
+ "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
".inst 0x45d8982c // ummla z12.s, z1.b, z24.b\n"
".inst 0x45d89874 // ummla z20.s, z3.b, z24.b\n"
- "ld1b { z25.b }, p5/Z, [x10, #2, MUL VL]\n"
"ld1b { z24.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45d99829 // ummla z9.s, z1.b, z25.b\n"
".inst 0x45d99871 // ummla z17.s, z3.b, z25.b\n"
@@ -903,33 +903,33 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 38b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x22, x9, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp1 z25.d, z8.d, z12.d\n"
- "add x20, x21, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z24.d, z9.d, z13.d\n"
- "st1w { z25.s }, p4, [x9]\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "uzp1 z25.d, z10.d, z14.d\n"
- "st1w { z24.s }, p3, [x9, #1, MUL VL]\n"
+ "uzp1 z27.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
- "uzp1 z24.d, z11.d, z15.d\n"
- "st1w { z25.s }, p2, [x9, #2, MUL VL]\n"
+ "add x22, x9, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
+ "uzp1 z26.d, z11.d, z15.d\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z25.s }, p4, [x9]\n"
"uzp1 z25.d, z16.d, z20.d\n"
- "st1w { z24.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z24.s }, p3, [x9, #1, MUL VL]\n"
"uzp1 z24.d, z17.d, z21.d\n"
- "st1w { z8.s }, p4, [x22]\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z27.s }, p2, [x9, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z9.s }, p3, [x22, #1, MUL VL]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z26.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"uzp1 z20.d, z19.d, z23.d\n"
- "st1w { z10.s }, p2, [x22, #2, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x22]\n"
+ "st1w { z9.s }, p3, [x22, #1, MUL VL]\n"
+ "st1w { z10.s }, p2, [x22, #2, MUL VL]\n"
"st1w { z11.s }, p1, [x22, #3, MUL VL]\n"
"st1w { z25.s }, p4, [x21]\n"
"st1w { z24.s }, p3, [x21, #1, MUL VL]\n"
@@ -947,7 +947,7 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"45:" // Height 5
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "mov x9, %x[output_ptr]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"46:" // Height 5: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -959,46 +959,46 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 47f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x23, x9, x20, LSL #2\n"
"add x22, x23, x20, LSL #2\n"
- "ld1w { z19.s }, p4/Z, [x9]\n"
"add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z17.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x23]\n"
- "zip1 z8.d, z19.d, z12.d\n"
- "zip2 z12.d, z19.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x23, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip1 z9.d, z17.d, z13.d\n"
- "zip2 z13.d, z17.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x23, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x22]\n"
- "zip1 z10.d, z18.d, z14.d\n"
- "zip2 z14.d, z18.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x22, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip1 z11.d, z16.d, z15.d\n"
- "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x21]\n"
- "zip1 z16.d, z17.d, z20.d\n"
- "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip1 z17.d, z18.d, z21.d\n"
- "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x20]\n"
- "zip1 z18.d, z19.d, z22.d\n"
- "zip2 z22.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
"zip1 z19.d, z24.d, z23.d\n"
"zip2 z23.d, z24.d, z23.d\n"
- "ld1w { z0.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
@@ -1037,8 +1037,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov x28, #0x0\n"
"49:" // Height 5: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 50f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1066,102 +1066,103 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"ble 53f\n"
"52:" // Height 5: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z1.b }, p5/Z, [x10]\n"
+ "sub x27, x27, #0x10\n"
+ "cmp x27, #0x10\n"
"ld1rqb { z6.b }, p0/Z, [x26]\n"
- "ld1rqb { z1.b }, p0/Z, [x25]\n"
+ "add x26, x26, #0x10\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
+ "add x25, x25, #0x10\n"
"ld1rqb { z7.b }, p0/Z, [x24]\n"
"ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z5.d, z6.d, z1.d\n"
- "trn2 z6.d, z6.d, z1.d\n"
- "ld1rqb { z4.b }, p0/Z, [x22]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "add x24, x24, #0x10\n"
+ "add x23, x23, #0x10\n"
+ "add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
"trn1 z3.d, z7.d, z2.d\n"
"trn2 z7.d, z7.d, z2.d\n"
- "ld1b { z1.b }, p5/Z, [x10]\n"
- "trn1 z2.d, z4.d, z0.d\n"
- "trn2 z4.d, z4.d, z0.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
"ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45c198a8 // ummla z8.s, z5.b, z1.b\n"
+ ".inst 0x45c19888 // ummla z8.s, z4.b, z1.b\n"
".inst 0x45c19870 // ummla z16.s, z3.b, z1.b\n"
".inst 0x45c19858 // ummla z24.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
- "sub x27, x27, #0x10\n"
- ".inst 0x45c098ac // ummla z12.s, z5.b, z0.b\n"
+ ".inst 0x45c0988c // ummla z12.s, z4.b, z0.b\n"
".inst 0x45c09874 // ummla z20.s, z3.b, z0.b\n"
- "cmp x27, #0x10\n"
- "add x26, x26, #0x10\n"
".inst 0x45c0985c // ummla z28.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45c198a9 // ummla z9.s, z5.b, z1.b\n"
- "add x25, x25, #0x10\n"
+ ".inst 0x45c19889 // ummla z9.s, z4.b, z1.b\n"
".inst 0x45c19871 // ummla z17.s, z3.b, z1.b\n"
".inst 0x45c19859 // ummla z25.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
- "add x24, x24, #0x10\n"
- ".inst 0x45c098ad // ummla z13.s, z5.b, z0.b\n"
+ ".inst 0x45c0988d // ummla z13.s, z4.b, z0.b\n"
".inst 0x45c09875 // ummla z21.s, z3.b, z0.b\n"
- "add x23, x23, #0x10\n"
- "add x22, x22, #0x10\n"
".inst 0x45c0985d // ummla z29.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x45c198aa // ummla z10.s, z5.b, z1.b\n"
+ ".inst 0x45c1988a // ummla z10.s, z4.b, z1.b\n"
".inst 0x45c19872 // ummla z18.s, z3.b, z1.b\n"
".inst 0x45c1985a // ummla z26.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x45c098ae // ummla z14.s, z5.b, z0.b\n"
+ ".inst 0x45c0988e // ummla z14.s, z4.b, z0.b\n"
".inst 0x45c09876 // ummla z22.s, z3.b, z0.b\n"
".inst 0x45c0985e // ummla z30.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x45c198ab // ummla z11.s, z5.b, z1.b\n"
+ ".inst 0x45c1988b // ummla z11.s, z4.b, z1.b\n"
".inst 0x45c19873 // ummla z19.s, z3.b, z1.b\n"
".inst 0x45c1985b // ummla z27.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
- ".inst 0x45c098af // ummla z15.s, z5.b, z0.b\n"
+ ".inst 0x45c0988f // ummla z15.s, z4.b, z0.b\n"
".inst 0x45c09877 // ummla z23.s, z3.b, z0.b\n"
".inst 0x45c0985f // ummla z31.s, z2.b, z0.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z0.b }, p5/Z, [x10, #-7, MUL VL]\n"
".inst 0x45c198c8 // ummla z8.s, z6.b, z1.b\n"
".inst 0x45c198f0 // ummla z16.s, z7.b, z1.b\n"
- ".inst 0x45c19898 // ummla z24.s, z4.b, z1.b\n"
+ ".inst 0x45c198b8 // ummla z24.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-6, MUL VL]\n"
".inst 0x45c098cc // ummla z12.s, z6.b, z0.b\n"
".inst 0x45c098f4 // ummla z20.s, z7.b, z0.b\n"
- ".inst 0x45c0989c // ummla z28.s, z4.b, z0.b\n"
+ ".inst 0x45c098bc // ummla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-5, MUL VL]\n"
".inst 0x45c198c9 // ummla z9.s, z6.b, z1.b\n"
".inst 0x45c198f1 // ummla z17.s, z7.b, z1.b\n"
- ".inst 0x45c19899 // ummla z25.s, z4.b, z1.b\n"
+ ".inst 0x45c198b9 // ummla z25.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-4, MUL VL]\n"
".inst 0x45c098cd // ummla z13.s, z6.b, z0.b\n"
".inst 0x45c098f5 // ummla z21.s, z7.b, z0.b\n"
- ".inst 0x45c0989d // ummla z29.s, z4.b, z0.b\n"
+ ".inst 0x45c098bd // ummla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-3, MUL VL]\n"
".inst 0x45c198ca // ummla z10.s, z6.b, z1.b\n"
".inst 0x45c198f2 // ummla z18.s, z7.b, z1.b\n"
- ".inst 0x45c1989a // ummla z26.s, z4.b, z1.b\n"
+ ".inst 0x45c198ba // ummla z26.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-2, MUL VL]\n"
".inst 0x45c098ce // ummla z14.s, z6.b, z0.b\n"
".inst 0x45c098f6 // ummla z22.s, z7.b, z0.b\n"
- ".inst 0x45c0989e // ummla z30.s, z4.b, z0.b\n"
+ ".inst 0x45c098be // ummla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-1, MUL VL]\n"
".inst 0x45c198cb // ummla z11.s, z6.b, z1.b\n"
".inst 0x45c198f3 // ummla z19.s, z7.b, z1.b\n"
- ".inst 0x45c1989b // ummla z27.s, z4.b, z1.b\n"
+ ".inst 0x45c198bb // ummla z27.s, z5.b, z1.b\n"
".inst 0x45c098cf // ummla z15.s, z6.b, z0.b\n"
".inst 0x45c098f7 // ummla z23.s, z7.b, z0.b\n"
- ".inst 0x45c0989f // ummla z31.s, z4.b, z0.b\n"
+ ".inst 0x45c098bf // ummla z31.s, z5.b, z0.b\n"
"bgt 52b\n"
"53:" // Height 5: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z4.b }, p0/Z, [x25]\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn1 z7.d, z1.d, z4.d\n"
- "trn2 z1.d, z1.d, z4.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
"ld1rqb { z5.b }, p0/Z, [x22]\n"
- "trn1 z6.d, z3.d, z2.d\n"
- "trn2 z3.d, z3.d, z2.d\n"
- "ld1b { z2.b }, p5/Z, [x10]\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
"ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
@@ -1169,7 +1170,6 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c298d0 // ummla z16.s, z6.b, z2.b\n"
".inst 0x45c29898 // ummla z24.s, z4.b, z2.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
".inst 0x45c098ec // ummla z12.s, z7.b, z0.b\n"
".inst 0x45c098d4 // ummla z20.s, z6.b, z0.b\n"
".inst 0x45c0989c // ummla z28.s, z4.b, z0.b\n"
@@ -1190,8 +1190,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c098d6 // ummla z22.s, z6.b, z0.b\n"
".inst 0x45c0989e // ummla z30.s, z4.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x45c298eb // ummla z11.s, z7.b, z2.b\n"
"addvl x10, x10, #8\n"
+ ".inst 0x45c298eb // ummla z11.s, z7.b, z2.b\n"
".inst 0x45c298d3 // ummla z19.s, z6.b, z2.b\n"
".inst 0x45c2989b // ummla z27.s, z4.b, z2.b\n"
".inst 0x45c098ef // ummla z15.s, z7.b, z0.b\n"
@@ -1203,24 +1203,24 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c29828 // ummla z8.s, z1.b, z2.b\n"
".inst 0x45c29870 // ummla z16.s, z3.b, z2.b\n"
".inst 0x45c298b8 // ummla z24.s, z5.b, z2.b\n"
- ".inst 0x45c0982c // ummla z12.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45c0982c // ummla z12.s, z1.b, z0.b\n"
".inst 0x45c09874 // ummla z20.s, z3.b, z0.b\n"
".inst 0x45c098bc // ummla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45c29829 // ummla z9.s, z1.b, z2.b\n"
".inst 0x45c29871 // ummla z17.s, z3.b, z2.b\n"
".inst 0x45c298b9 // ummla z25.s, z5.b, z2.b\n"
- ".inst 0x45c0982d // ummla z13.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45c0982d // ummla z13.s, z1.b, z0.b\n"
".inst 0x45c09875 // ummla z21.s, z3.b, z0.b\n"
".inst 0x45c098bd // ummla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45c2982a // ummla z10.s, z1.b, z2.b\n"
".inst 0x45c29872 // ummla z18.s, z3.b, z2.b\n"
".inst 0x45c298ba // ummla z26.s, z5.b, z2.b\n"
- ".inst 0x45c0982e // ummla z14.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45c0982e // ummla z14.s, z1.b, z0.b\n"
".inst 0x45c09876 // ummla z22.s, z3.b, z0.b\n"
".inst 0x45c098be // ummla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1237,39 +1237,39 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 49b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x23, x9, x20, LSL #2\n"
- "add x22, x23, x20, LSL #2\n"
- "uzp1 z2.d, z8.d, z12.d\n"
- "add x21, x22, x20, LSL #2\n"
- "add x20, x21, x20, LSL #2\n"
+ "uzp1 z1.d, z8.d, z12.d\n"
"uzp2 z8.d, z8.d, z12.d\n"
- "uzp1 z1.d, z9.d, z13.d\n"
+ "uzp1 z0.d, z9.d, z13.d\n"
"uzp2 z9.d, z9.d, z13.d\n"
- "uzp1 z0.d, z10.d, z14.d\n"
- "st1w { z2.s }, p4, [x9]\n"
+ "uzp1 z3.d, z10.d, z14.d\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x23, x9, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 z2.d, z11.d, z15.d\n"
- "st1w { z1.s }, p3, [x9, #1, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z1.s }, p4, [x9]\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 z1.d, z16.d, z20.d\n"
- "st1w { z0.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z0.s }, p3, [x9, #1, MUL VL]\n"
"uzp1 z0.d, z17.d, z21.d\n"
- "st1w { z2.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z3.s }, p2, [x9, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x23]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z2.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"uzp1 z20.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x23]\n"
"uzp1 z24.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
"uzp1 z25.d, z25.d, z29.d\n"
+ "st1w { z9.s }, p3, [x23, #1, MUL VL]\n"
"uzp1 z26.d, z26.d, z30.d\n"
- "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"uzp1 z27.d, z27.d, z31.d\n"
+ "st1w { z10.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z11.s }, p1, [x23, #3, MUL VL]\n"
"st1w { z1.s }, p4, [x22]\n"
"st1w { z0.s }, p3, [x22, #1, MUL VL]\n"
"st1w { z21.s }, p2, [x22, #2, MUL VL]\n"
@@ -1289,11 +1289,12 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"b 68f\n"
"56:" // Height 6
"ldr x21, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ldr x9, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"mov x20, #0x18\n"
"ldr x11, [%x[args_ptr], %[offsetof_N]]\n"
- "mov x9, %x[output_ptr]\n"
"ldr x10, [%x[args_ptr], %[offsetof_B_ptr]]\n"
- "madd %x[output_ptr], x21, x20, %x[output_ptr]\n"
+ "madd x20, x21, x20, x9\n"
+ "str x20, [%x[args_ptr], %[offsetof_output_ptr]]\n"
"57:" // Height 6: Column loop
"mov x20, #0x0\n"
"whilelt p4.s, x20, x11\n"
@@ -1305,54 +1306,54 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"whilelt p1.s, x20, x11\n"
"tbz %x[flags], #0, 58f\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
+ "ld1w { z20.s }, p4/Z, [x9]\n"
+ "ld1w { z22.s }, p3/Z, [x9, #1, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x9, #2, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"add x24, x9, x20, LSL #2\n"
"add x23, x24, x20, LSL #2\n"
- "ld1w { z17.s }, p4/Z, [x9]\n"
"add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
- "ld1w { z18.s }, p3/Z, [x9, #1, MUL VL]\n"
- "ld1w { z20.s }, p2/Z, [x9, #2, MUL VL]\n"
- "add x20, x21, x20, LSL #2\n"
- "ld1w { z16.s }, p1/Z, [x9, #3, MUL VL]\n"
"ld1w { z12.s }, p4/Z, [x24]\n"
- "zip1 z8.d, z17.d, z12.d\n"
"ld1w { z13.s }, p3/Z, [x24, #1, MUL VL]\n"
+ "add x21, x22, x20, LSL #2\n"
+ "add x20, x21, x20, LSL #2\n"
"ld1w { z14.s }, p2/Z, [x24, #2, MUL VL]\n"
- "zip2 z12.d, z17.d, z12.d\n"
- "zip1 z9.d, z18.d, z13.d\n"
"ld1w { z15.s }, p1/Z, [x24, #3, MUL VL]\n"
"ld1w { z17.s }, p4/Z, [x23]\n"
- "zip2 z13.d, z18.d, z13.d\n"
- "zip1 z10.d, z20.d, z14.d\n"
"ld1w { z18.s }, p3/Z, [x23, #1, MUL VL]\n"
"ld1w { z19.s }, p2/Z, [x23, #2, MUL VL]\n"
- "zip2 z14.d, z20.d, z14.d\n"
- "zip1 z11.d, z16.d, z15.d\n"
"ld1w { z24.s }, p1/Z, [x23, #3, MUL VL]\n"
+ "zip1 z8.d, z20.d, z12.d\n"
+ "zip2 z12.d, z20.d, z12.d\n"
"ld1w { z20.s }, p4/Z, [x22]\n"
- "zip2 z15.d, z16.d, z15.d\n"
- "zip1 z16.d, z17.d, z20.d\n"
"ld1w { z21.s }, p3/Z, [x22, #1, MUL VL]\n"
+ "zip1 z9.d, z22.d, z13.d\n"
+ "zip2 z13.d, z22.d, z13.d\n"
"ld1w { z22.s }, p2/Z, [x22, #2, MUL VL]\n"
- "zip2 z20.d, z17.d, z20.d\n"
- "zip1 z17.d, z18.d, z21.d\n"
"ld1w { z23.s }, p1/Z, [x22, #3, MUL VL]\n"
+ "zip1 z10.d, z25.d, z14.d\n"
+ "zip2 z14.d, z25.d, z14.d\n"
"ld1w { z25.s }, p4/Z, [x21]\n"
- "zip2 z21.d, z18.d, z21.d\n"
- "zip1 z18.d, z19.d, z22.d\n"
"ld1w { z26.s }, p3/Z, [x21, #1, MUL VL]\n"
+ "zip1 z11.d, z16.d, z15.d\n"
+ "zip2 z15.d, z16.d, z15.d\n"
"ld1w { z27.s }, p2/Z, [x21, #2, MUL VL]\n"
- "zip2 z22.d, z19.d, z22.d\n"
- "zip1 z19.d, z24.d, z23.d\n"
"ld1w { z0.s }, p1/Z, [x21, #3, MUL VL]\n"
+ "zip1 z16.d, z17.d, z20.d\n"
+ "zip2 z20.d, z17.d, z20.d\n"
"ld1w { z28.s }, p4/Z, [x20]\n"
- "zip2 z23.d, z24.d, z23.d\n"
- "zip1 z24.d, z25.d, z28.d\n"
"ld1w { z29.s }, p3/Z, [x20, #1, MUL VL]\n"
+ "zip1 z17.d, z18.d, z21.d\n"
+ "zip2 z21.d, z18.d, z21.d\n"
"ld1w { z30.s }, p2/Z, [x20, #2, MUL VL]\n"
+ "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
+ "zip1 z18.d, z19.d, z22.d\n"
+ "zip2 z22.d, z19.d, z22.d\n"
+ "zip1 z19.d, z24.d, z23.d\n"
+ "zip2 z23.d, z24.d, z23.d\n"
+ "zip1 z24.d, z25.d, z28.d\n"
"zip2 z28.d, z25.d, z28.d\n"
"zip1 z25.d, z26.d, z29.d\n"
- "ld1w { z31.s }, p1/Z, [x20, #3, MUL VL]\n"
"zip2 z29.d, z26.d, z29.d\n"
"zip1 z26.d, z27.d, z30.d\n"
"zip2 z30.d, z27.d, z30.d\n"
@@ -1388,8 +1389,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"mov x28, #0x0\n"
"60:" // Height 6: String loop
"ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n"
- "ldr w27, [x20, x28, LSL #0x2]\n"
"ldr x21, [%x[args_ptr], %[offsetof_input_offset]]\n"
+ "ldr w27, [x20, x28, LSL #0x2]\n"
"tbz %x[flags], #3, 61f\n"
"ldr x20, [%x[input_ptr], x28, LSL #0x3]\n"
"add x20, x20, x21, LSL #3\n"
@@ -1420,113 +1421,113 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"ble 64f\n"
"63:" // Height 6: Multiply loop: Main loop head
"whilelt p0.b, XZR, x27\n"
- "ld1rqb { z7.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "trn1 z6.d, z7.d, z0.d\n"
- "ld1rqb { z5.b }, p0/Z, [x24]\n"
- "ld1rqb { z1.b }, p0/Z, [x23]\n"
- "trn2 z7.d, z7.d, z0.d\n"
- "trn1 z4.d, z5.d, z1.d\n"
- "ld1rqb { z3.b }, p0/Z, [x22]\n"
- "ld1rqb { z0.b }, p0/Z, [x21]\n"
- "trn2 z5.d, z5.d, z1.d\n"
- "trn1 z2.d, z3.d, z0.d\n"
- "trn2 z3.d, z3.d, z0.d\n"
"ld1b { z1.b }, p5/Z, [x10]\n"
- "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
- ".inst 0x45c198c8 // ummla z8.s, z6.b, z1.b\n"
- ".inst 0x45c19890 // ummla z16.s, z4.b, z1.b\n"
- ".inst 0x45c19858 // ummla z24.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
"sub x27, x27, #0x10\n"
- ".inst 0x45c098cc // ummla z12.s, z6.b, z0.b\n"
- ".inst 0x45c09894 // ummla z20.s, z4.b, z0.b\n"
"cmp x27, #0x10\n"
+ "ld1rqb { z6.b }, p0/Z, [x26]\n"
"add x26, x26, #0x10\n"
- ".inst 0x45c0985c // ummla z28.s, z2.b, z0.b\n"
- "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
- ".inst 0x45c198c9 // ummla z9.s, z6.b, z1.b\n"
+ "ld1rqb { z3.b }, p0/Z, [x25]\n"
"add x25, x25, #0x10\n"
- ".inst 0x45c19891 // ummla z17.s, z4.b, z1.b\n"
- ".inst 0x45c19859 // ummla z25.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ "ld1rqb { z7.b }, p0/Z, [x24]\n"
"add x24, x24, #0x10\n"
- ".inst 0x45c098cd // ummla z13.s, z6.b, z0.b\n"
- ".inst 0x45c09895 // ummla z21.s, z4.b, z0.b\n"
+ "ld1rqb { z2.b }, p0/Z, [x23]\n"
+ "ld1rqb { z5.b }, p0/Z, [x22]\n"
+ "ld1rqb { z0.b }, p0/Z, [x21]\n"
"add x23, x23, #0x10\n"
"add x22, x22, #0x10\n"
+ "trn1 z4.d, z6.d, z3.d\n"
+ "trn2 z6.d, z6.d, z3.d\n"
+ "add x21, x21, #0x10\n"
+ "trn1 z3.d, z7.d, z2.d\n"
+ "trn2 z7.d, z7.d, z2.d\n"
+ "trn1 z2.d, z5.d, z0.d\n"
+ "trn2 z5.d, z5.d, z0.d\n"
+ "ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
+ ".inst 0x45c19888 // ummla z8.s, z4.b, z1.b\n"
+ ".inst 0x45c19870 // ummla z16.s, z3.b, z1.b\n"
+ ".inst 0x45c19858 // ummla z24.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45c0988c // ummla z12.s, z4.b, z0.b\n"
+ ".inst 0x45c09874 // ummla z20.s, z3.b, z0.b\n"
+ ".inst 0x45c0985c // ummla z28.s, z2.b, z0.b\n"
+ "ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
+ ".inst 0x45c19889 // ummla z9.s, z4.b, z1.b\n"
+ ".inst 0x45c19871 // ummla z17.s, z3.b, z1.b\n"
+ ".inst 0x45c19859 // ummla z25.s, z2.b, z1.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45c0988d // ummla z13.s, z4.b, z0.b\n"
+ ".inst 0x45c09875 // ummla z21.s, z3.b, z0.b\n"
".inst 0x45c0985d // ummla z29.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
- ".inst 0x45c198ca // ummla z10.s, z6.b, z1.b\n"
- "add x21, x21, #0x10\n"
- ".inst 0x45c19892 // ummla z18.s, z4.b, z1.b\n"
+ ".inst 0x45c1988a // ummla z10.s, z4.b, z1.b\n"
+ ".inst 0x45c19872 // ummla z18.s, z3.b, z1.b\n"
".inst 0x45c1985a // ummla z26.s, z2.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #6, MUL VL]\n"
- ".inst 0x45c098ce // ummla z14.s, z6.b, z0.b\n"
- ".inst 0x45c09896 // ummla z22.s, z4.b, z0.b\n"
+ ".inst 0x45c0988e // ummla z14.s, z4.b, z0.b\n"
+ ".inst 0x45c09876 // ummla z22.s, z3.b, z0.b\n"
".inst 0x45c0985e // ummla z30.s, z2.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
"addvl x10, x10, #16\n"
- ".inst 0x45c198cb // ummla z11.s, z6.b, z1.b\n"
- ".inst 0x45c19893 // ummla z19.s, z4.b, z1.b\n"
+ ".inst 0x45c1988b // ummla z11.s, z4.b, z1.b\n"
+ ".inst 0x45c19873 // ummla z19.s, z3.b, z1.b\n"
".inst 0x45c1985b // ummla z27.s, z2.b, z1.b\n"
- "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
- ".inst 0x45c098cf // ummla z15.s, z6.b, z0.b\n"
- ".inst 0x45c09897 // ummla z23.s, z4.b, z0.b\n"
+ ".inst 0x45c0988f // ummla z15.s, z4.b, z0.b\n"
+ ".inst 0x45c09877 // ummla z23.s, z3.b, z0.b\n"
".inst 0x45c0985f // ummla z31.s, z2.b, z0.b\n"
+ "ld1b { z1.b }, p5/Z, [x10, #-8, MUL VL]\n"
"ld1b { z0.b }, p5/Z, [x10, #-7, MUL VL]\n"
- ".inst 0x45c198e8 // ummla z8.s, z7.b, z1.b\n"
- ".inst 0x45c198b0 // ummla z16.s, z5.b, z1.b\n"
- ".inst 0x45c19878 // ummla z24.s, z3.b, z1.b\n"
+ ".inst 0x45c198c8 // ummla z8.s, z6.b, z1.b\n"
+ ".inst 0x45c198f0 // ummla z16.s, z7.b, z1.b\n"
+ ".inst 0x45c198b8 // ummla z24.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-6, MUL VL]\n"
- ".inst 0x45c098ec // ummla z12.s, z7.b, z0.b\n"
- ".inst 0x45c098b4 // ummla z20.s, z5.b, z0.b\n"
- ".inst 0x45c0987c // ummla z28.s, z3.b, z0.b\n"
+ ".inst 0x45c098cc // ummla z12.s, z6.b, z0.b\n"
+ ".inst 0x45c098f4 // ummla z20.s, z7.b, z0.b\n"
+ ".inst 0x45c098bc // ummla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-5, MUL VL]\n"
- ".inst 0x45c198e9 // ummla z9.s, z7.b, z1.b\n"
- ".inst 0x45c198b1 // ummla z17.s, z5.b, z1.b\n"
- ".inst 0x45c19879 // ummla z25.s, z3.b, z1.b\n"
+ ".inst 0x45c198c9 // ummla z9.s, z6.b, z1.b\n"
+ ".inst 0x45c198f1 // ummla z17.s, z7.b, z1.b\n"
+ ".inst 0x45c198b9 // ummla z25.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-4, MUL VL]\n"
- ".inst 0x45c098ed // ummla z13.s, z7.b, z0.b\n"
- ".inst 0x45c098b5 // ummla z21.s, z5.b, z0.b\n"
- ".inst 0x45c0987d // ummla z29.s, z3.b, z0.b\n"
+ ".inst 0x45c098cd // ummla z13.s, z6.b, z0.b\n"
+ ".inst 0x45c098f5 // ummla z21.s, z7.b, z0.b\n"
+ ".inst 0x45c098bd // ummla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-3, MUL VL]\n"
- ".inst 0x45c198ea // ummla z10.s, z7.b, z1.b\n"
- ".inst 0x45c198b2 // ummla z18.s, z5.b, z1.b\n"
- ".inst 0x45c1987a // ummla z26.s, z3.b, z1.b\n"
+ ".inst 0x45c198ca // ummla z10.s, z6.b, z1.b\n"
+ ".inst 0x45c198f2 // ummla z18.s, z7.b, z1.b\n"
+ ".inst 0x45c198ba // ummla z26.s, z5.b, z1.b\n"
"ld1b { z1.b }, p5/Z, [x10, #-2, MUL VL]\n"
- ".inst 0x45c098ee // ummla z14.s, z7.b, z0.b\n"
- ".inst 0x45c098b6 // ummla z22.s, z5.b, z0.b\n"
- ".inst 0x45c0987e // ummla z30.s, z3.b, z0.b\n"
+ ".inst 0x45c098ce // ummla z14.s, z6.b, z0.b\n"
+ ".inst 0x45c098f6 // ummla z22.s, z7.b, z0.b\n"
+ ".inst 0x45c098be // ummla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #-1, MUL VL]\n"
- ".inst 0x45c198eb // ummla z11.s, z7.b, z1.b\n"
- ".inst 0x45c198b3 // ummla z19.s, z5.b, z1.b\n"
- ".inst 0x45c1987b // ummla z27.s, z3.b, z1.b\n"
- ".inst 0x45c098ef // ummla z15.s, z7.b, z0.b\n"
- ".inst 0x45c098b7 // ummla z23.s, z5.b, z0.b\n"
- ".inst 0x45c0987f // ummla z31.s, z3.b, z0.b\n"
+ ".inst 0x45c198cb // ummla z11.s, z6.b, z1.b\n"
+ ".inst 0x45c198f3 // ummla z19.s, z7.b, z1.b\n"
+ ".inst 0x45c198bb // ummla z27.s, z5.b, z1.b\n"
+ ".inst 0x45c098cf // ummla z15.s, z6.b, z0.b\n"
+ ".inst 0x45c098f7 // ummla z23.s, z7.b, z0.b\n"
+ ".inst 0x45c098bf // ummla z31.s, z5.b, z0.b\n"
"bgt 63b\n"
"64:" // Height 6: Multiply loop: Single iteration only
"whilelt p0.b, XZR, x27\n"
+ "ld1b { z2.b }, p5/Z, [x10]\n"
+ "subs x27, x27, #0x8\n"
"ld1rqb { z1.b }, p0/Z, [x26]\n"
- "ld1rqb { z0.b }, p0/Z, [x25]\n"
- "trn1 z7.d, z1.d, z0.d\n"
+ "ld1rqb { z6.b }, p0/Z, [x25]\n"
"ld1rqb { z3.b }, p0/Z, [x24]\n"
- "ld1rqb { z2.b }, p0/Z, [x23]\n"
- "trn2 z1.d, z1.d, z0.d\n"
- "trn1 z6.d, z3.d, z2.d\n"
+ "ld1rqb { z4.b }, p0/Z, [x23]\n"
"ld1rqb { z5.b }, p0/Z, [x22]\n"
"ld1rqb { z0.b }, p0/Z, [x21]\n"
- "trn2 z3.d, z3.d, z2.d\n"
+ "trn1 z7.d, z1.d, z6.d\n"
+ "trn2 z1.d, z1.d, z6.d\n"
+ "trn1 z6.d, z3.d, z4.d\n"
+ "trn2 z3.d, z3.d, z4.d\n"
"trn1 z4.d, z5.d, z0.d\n"
"trn2 z5.d, z5.d, z0.d\n"
- "ld1b { z2.b }, p5/Z, [x10]\n"
"ld1b { z0.b }, p5/Z, [x10, #1, MUL VL]\n"
".inst 0x45c298e8 // ummla z8.s, z7.b, z2.b\n"
".inst 0x45c298d0 // ummla z16.s, z6.b, z2.b\n"
".inst 0x45c29898 // ummla z24.s, z4.b, z2.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
- "subs x27, x27, #0x8\n"
".inst 0x45c098ec // ummla z12.s, z7.b, z0.b\n"
".inst 0x45c098d4 // ummla z20.s, z6.b, z0.b\n"
".inst 0x45c0989c // ummla z28.s, z4.b, z0.b\n"
@@ -1547,8 +1548,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c098d6 // ummla z22.s, z6.b, z0.b\n"
".inst 0x45c0989e // ummla z30.s, z4.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
- ".inst 0x45c298eb // ummla z11.s, z7.b, z2.b\n"
"addvl x10, x10, #8\n"
+ ".inst 0x45c298eb // ummla z11.s, z7.b, z2.b\n"
".inst 0x45c298d3 // ummla z19.s, z6.b, z2.b\n"
".inst 0x45c2989b // ummla z27.s, z4.b, z2.b\n"
".inst 0x45c098ef // ummla z15.s, z7.b, z0.b\n"
@@ -1560,24 +1561,24 @@ void sve_hybrid_u8u32_mmla_6x4VL (
".inst 0x45c29828 // ummla z8.s, z1.b, z2.b\n"
".inst 0x45c29870 // ummla z16.s, z3.b, z2.b\n"
".inst 0x45c298b8 // ummla z24.s, z5.b, z2.b\n"
- ".inst 0x45c0982c // ummla z12.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #2, MUL VL]\n"
+ ".inst 0x45c0982c // ummla z12.s, z1.b, z0.b\n"
".inst 0x45c09874 // ummla z20.s, z3.b, z0.b\n"
".inst 0x45c098bc // ummla z28.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #3, MUL VL]\n"
".inst 0x45c29829 // ummla z9.s, z1.b, z2.b\n"
".inst 0x45c29871 // ummla z17.s, z3.b, z2.b\n"
".inst 0x45c298b9 // ummla z25.s, z5.b, z2.b\n"
- ".inst 0x45c0982d // ummla z13.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #4, MUL VL]\n"
+ ".inst 0x45c0982d // ummla z13.s, z1.b, z0.b\n"
".inst 0x45c09875 // ummla z21.s, z3.b, z0.b\n"
".inst 0x45c098bd // ummla z29.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #5, MUL VL]\n"
".inst 0x45c2982a // ummla z10.s, z1.b, z2.b\n"
".inst 0x45c29872 // ummla z18.s, z3.b, z2.b\n"
".inst 0x45c298ba // ummla z26.s, z5.b, z2.b\n"
- ".inst 0x45c0982e // ummla z14.s, z1.b, z0.b\n"
"ld1b { z2.b }, p5/Z, [x10, #6, MUL VL]\n"
+ ".inst 0x45c0982e // ummla z14.s, z1.b, z0.b\n"
".inst 0x45c09876 // ummla z22.s, z3.b, z0.b\n"
".inst 0x45c098be // ummla z30.s, z5.b, z0.b\n"
"ld1b { z0.b }, p5/Z, [x10, #7, MUL VL]\n"
@@ -1594,46 +1595,46 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"cmp x28, x20\n"
"bne 60b\n"
"ldr x20, [%x[args_ptr], %[offsetof_output_offset]]\n"
- "add x24, x9, x20, LSL #2\n"
- "add x23, x24, x20, LSL #2\n"
"uzp1 z0.d, z8.d, z12.d\n"
- "add x22, x23, x20, LSL #2\n"
- "add x21, x22, x20, LSL #2\n"
"uzp2 z8.d, z8.d, z12.d\n"
"uzp1 z12.d, z9.d, z13.d\n"
- "add x20, x21, x20, LSL #2\n"
"uzp2 z9.d, z9.d, z13.d\n"
"uzp1 z13.d, z10.d, z14.d\n"
- "st1w { z0.s }, p4, [x9]\n"
"uzp2 z10.d, z10.d, z14.d\n"
+ "add x24, x9, x20, LSL #2\n"
+ "add x23, x24, x20, LSL #2\n"
+ "add x22, x23, x20, LSL #2\n"
"uzp1 z14.d, z11.d, z15.d\n"
- "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
"uzp2 z11.d, z11.d, z15.d\n"
+ "st1w { z0.s }, p4, [x9]\n"
+ "add x21, x22, x20, LSL #2\n"
"uzp1 z15.d, z16.d, z20.d\n"
- "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"uzp2 z16.d, z16.d, z20.d\n"
+ "st1w { z12.s }, p3, [x9, #1, MUL VL]\n"
+ "add x20, x21, x20, LSL #2\n"
"uzp1 z20.d, z17.d, z21.d\n"
- "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
- "addvl x9, x9, #4\n"
"uzp2 z17.d, z17.d, z21.d\n"
+ "st1w { z13.s }, p2, [x9, #2, MUL VL]\n"
"uzp1 z21.d, z18.d, z22.d\n"
- "st1w { z8.s }, p4, [x24]\n"
"uzp2 z18.d, z18.d, z22.d\n"
+ "st1w { z14.s }, p1, [x9, #3, MUL VL]\n"
+ "addvl x9, x9, #4\n"
"uzp1 z22.d, z19.d, z23.d\n"
- "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
"uzp2 z19.d, z19.d, z23.d\n"
+ "st1w { z8.s }, p4, [x24]\n"
"uzp1 z23.d, z24.d, z28.d\n"
- "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
"uzp2 z24.d, z24.d, z28.d\n"
+ "st1w { z9.s }, p3, [x24, #1, MUL VL]\n"
"uzp1 z28.d, z25.d, z29.d\n"
- "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
"uzp2 z25.d, z25.d, z29.d\n"
+ "st1w { z10.s }, p2, [x24, #2, MUL VL]\n"
"uzp1 z29.d, z26.d, z30.d\n"
- "st1w { z15.s }, p4, [x23]\n"
"uzp2 z26.d, z26.d, z30.d\n"
+ "st1w { z11.s }, p1, [x24, #3, MUL VL]\n"
"uzp1 z30.d, z27.d, z31.d\n"
- "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
"uzp2 z27.d, z27.d, z31.d\n"
+ "st1w { z15.s }, p4, [x23]\n"
+ "st1w { z20.s }, p3, [x23, #1, MUL VL]\n"
"st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
"st1w { z22.s }, p1, [x23, #3, MUL VL]\n"
"st1w { z16.s }, p4, [x22]\n"
@@ -1664,8 +1665,8 @@ void sve_hybrid_u8u32_mmla_6x4VL (
"madd %x[input_ptr], x20, x21, %x[input_ptr]\n"
"b 1b\n"
"68:" // Exit
- : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr)
- : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
+ : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr)
+ : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_output_ptr] "I" (offsetof(KernelArgs, output_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths))
: "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp
index 1ae035c614..f9041edcca 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void sve_interleaved_bf16fp32_dot_8x3VL( ARGLIST );
class cls_sve_interleaved_bf16fp32_dot_8x3VL
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 3, 2, 1> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 3, 2, 1, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 2, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 2, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp
index e507bc5551..0646fa02eb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_dot_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,25 +55,25 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z10.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x22]\n"
"mov z11.b, #0x0\n"
- "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z12.b, #0x0\n"
+ "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x22]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
"mov z16.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z17.b, #0x0\n"
- "ld1h { z6.h }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.b, #0x0\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z19.b, #0x0\n"
"mov z20.b, #0x0\n"
+ "ld1h { z6.h }, p0/Z, [x22, #2, MUL VL]\n"
"mov z21.b, #0x0\n"
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
@@ -151,12 +151,12 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
"ld1h { z6.h }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "addvl x22, x22, #3\n"
".inst 0x64604088 // bfdot z8.s, z4.h, z0.h[0]\n"
".inst 0x6468408b // bfdot z11.s, z4.h, z0.h[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
".inst 0x6470408e // bfdot z14.s, z4.h, z0.h[2]\n"
".inst 0x64784091 // bfdot z17.s, z4.h, z0.h[3]\n"
- "addvl x22, x22, #3\n"
".inst 0x64614094 // bfdot z20.s, z4.h, z1.h[0]\n"
".inst 0x64694097 // bfdot z23.s, z4.h, z1.h[1]\n"
".inst 0x6471409a // bfdot z26.s, z4.h, z1.h[2]\n"
@@ -183,13 +183,13 @@ void sve_interleaved_bf16fp32_dot_8x3VL(
"add %x[Apanel], %x[Apanel], #0x20\n"
"ld1h { z2.h }, p0/Z, [x22]\n"
"ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x64644048 // bfdot z8.s, z2.h, z4.h[0]\n"
"ld1h { z0.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ ".inst 0x64644048 // bfdot z8.s, z2.h, z4.h[0]\n"
".inst 0x646c404b // bfdot z11.s, z2.h, z4.h[1]\n"
".inst 0x6474404e // bfdot z14.s, z2.h, z4.h[2]\n"
".inst 0x647c4051 // bfdot z17.s, z2.h, z4.h[3]\n"
".inst 0x64634054 // bfdot z20.s, z2.h, z3.h[0]\n"
- "addvl x22, x22, #3\n"
".inst 0x646b4057 // bfdot z23.s, z2.h, z3.h[1]\n"
".inst 0x6473405a // bfdot z26.s, z2.h, z3.h[2]\n"
".inst 0x647b405d // bfdot z29.s, z2.h, z3.h[3]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp
index c5096ff4ba..5c0d59609b 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2023 Arm Limited.
+ * Copyright (c) 2019-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void sve_interleaved_bf16fp32_mmla_8x3VL( ARGLIST );
class cls_sve_interleaved_bf16fp32_mmla_8x3VL
{
public:
- typedef bfloat16 operand_type;
+ typedef bfloat16 lhs_operand_type;
+ typedef bfloat16 rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 6, 4, 2> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 6, 4, 2, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 4, 2> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 4, 2, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp
index ba7185752a..9cde63f9d7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_bf16fp32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,31 +55,31 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x22]\n"
"mov z10.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x22]\n"
"mov z11.b, #0x0\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z12.b, #0x0\n"
+ "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.b, #0x0\n"
- "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
"mov z16.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z17.b, #0x0\n"
- "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"mov z18.b, #0x0\n"
+ "ld1rqh { z1.h }, p0/Z, [%x[Apanel], #16]\n"
"mov z19.b, #0x0\n"
- "addvl x22, x22, #2\n"
"mov z20.b, #0x0\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #32]\n"
"mov z21.b, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z22.b, #0x0\n"
+ "addvl x22, x22, #2\n"
"mov z23.b, #0x0\n"
"mov z24.b, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z25.b, #0x0\n"
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
@@ -94,77 +94,77 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
".inst 0x6465e431 // bfmmla z17.s, z1.h, z5.h\n"
- "ld1h { z7.h }, p0/Z, [x22]\n"
+ "ld1h { z3.h }, p0/Z, [x22]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z3.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
".inst 0x6464e4da // bfmmla z26.s, z6.h, z4.h\n"
".inst 0x6465e4dd // bfmmla z29.s, z6.h, z5.h\n"
- "ld1h { z5.h }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1h { z4.h }, p0/Z, [x22, #3, MUL VL]\n"
- ".inst 0x6467e409 // bfmmla z9.s, z0.h, z7.h\n"
- ".inst 0x6463e40c // bfmmla z12.s, z0.h, z3.h\n"
- ".inst 0x6467e42f // bfmmla z15.s, z1.h, z7.h\n"
- ".inst 0x6463e432 // bfmmla z18.s, z1.h, z3.h\n"
+ "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x6463e409 // bfmmla z9.s, z0.h, z3.h\n"
"sub x20, x20, #0x2\n"
- ".inst 0x6467e455 // bfmmla z21.s, z2.h, z7.h\n"
- ".inst 0x6463e458 // bfmmla z24.s, z2.h, z3.h\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
+ ".inst 0x6463e42f // bfmmla z15.s, z1.h, z3.h\n"
"cmp x20, #0x2\n"
- ".inst 0x6467e4db // bfmmla z27.s, z6.h, z7.h\n"
- ".inst 0x6463e4de // bfmmla z30.s, z6.h, z3.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
+ ".inst 0x6463e455 // bfmmla z21.s, z2.h, z3.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ ".inst 0x6463e4db // bfmmla z27.s, z6.h, z3.h\n"
"ld1h { z3.h }, p0/Z, [x22, #4, MUL VL]\n"
- ".inst 0x6465e40a // bfmmla z10.s, z0.h, z5.h\n"
- ".inst 0x6464e40d // bfmmla z13.s, z0.h, z4.h\n"
+ ".inst 0x6467e4de // bfmmla z30.s, z6.h, z7.h\n"
+ ".inst 0x6464e40a // bfmmla z10.s, z0.h, z4.h\n"
+ "ld1h { z7.h }, p0/Z, [x22, #5, MUL VL]\n"
+ ".inst 0x6465e40d // bfmmla z13.s, z0.h, z5.h\n"
+ ".inst 0x6464e430 // bfmmla z16.s, z1.h, z4.h\n"
"ld1rqh { z0.h }, p0/Z, [%x[Apanel], #16]\n"
- ".inst 0x6465e430 // bfmmla z16.s, z1.h, z5.h\n"
- ".inst 0x6464e433 // bfmmla z19.s, z1.h, z4.h\n"
+ ".inst 0x6465e433 // bfmmla z19.s, z1.h, z5.h\n"
+ ".inst 0x6464e456 // bfmmla z22.s, z2.h, z4.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #32]\n"
- ".inst 0x6465e456 // bfmmla z22.s, z2.h, z5.h\n"
- ".inst 0x6464e459 // bfmmla z25.s, z2.h, z4.h\n"
- "ld1h { z7.h }, p0/Z, [x22, #5, MUL VL]\n"
- ".inst 0x6465e4dc // bfmmla z28.s, z6.h, z5.h\n"
- ".inst 0x6464e4df // bfmmla z31.s, z6.h, z4.h\n"
- "ld1rqh { z5.h }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x6465e459 // bfmmla z25.s, z2.h, z5.h\n"
+ ".inst 0x6464e4dc // bfmmla z28.s, z6.h, z4.h\n"
+ "ld1rqh { z2.h }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x6465e4df // bfmmla z31.s, z6.h, z5.h\n"
"ld1rqh { z6.h }, p0/Z, [%x[Apanel], #64]\n"
- "ld1h { z2.h }, p0/Z, [x22, #6, MUL VL]\n"
- ".inst 0x6463e408 // bfmmla z8.s, z0.h, z3.h\n"
- "ld1h { z4.h }, p0/Z, [x22, #7, MUL VL]\n"
+ "ld1h { z4.h }, p0/Z, [x22, #6, MUL VL]\n"
+ "ld1h { z5.h }, p0/Z, [x22, #7, MUL VL]\n"
"addvl x22, x22, #16\n"
+ ".inst 0x6463e408 // bfmmla z8.s, z0.h, z3.h\n"
".inst 0x6467e40b // bfmmla z11.s, z0.h, z7.h\n"
".inst 0x6463e42e // bfmmla z14.s, z1.h, z3.h\n"
".inst 0x6467e431 // bfmmla z17.s, z1.h, z7.h\n"
- ".inst 0x6463e4b4 // bfmmla z20.s, z5.h, z3.h\n"
- ".inst 0x6467e4b7 // bfmmla z23.s, z5.h, z7.h\n"
+ ".inst 0x6463e454 // bfmmla z20.s, z2.h, z3.h\n"
+ ".inst 0x6467e457 // bfmmla z23.s, z2.h, z7.h\n"
".inst 0x6463e4da // bfmmla z26.s, z6.h, z3.h\n"
- ".inst 0x6467e4dd // bfmmla z29.s, z6.h, z7.h\n"
"ld1h { z3.h }, p0/Z, [x22, #-8, MUL VL]\n"
+ ".inst 0x6467e4dd // bfmmla z29.s, z6.h, z7.h\n"
"ld1h { z7.h }, p0/Z, [x22, #-7, MUL VL]\n"
- ".inst 0x6462e409 // bfmmla z9.s, z0.h, z2.h\n"
- ".inst 0x6464e40c // bfmmla z12.s, z0.h, z4.h\n"
- ".inst 0x6462e42f // bfmmla z15.s, z1.h, z2.h\n"
- ".inst 0x6464e432 // bfmmla z18.s, z1.h, z4.h\n"
- ".inst 0x6462e4b5 // bfmmla z21.s, z5.h, z2.h\n"
- ".inst 0x6464e4b8 // bfmmla z24.s, z5.h, z4.h\n"
- ".inst 0x6462e4db // bfmmla z27.s, z6.h, z2.h\n"
- ".inst 0x6464e4de // bfmmla z30.s, z6.h, z4.h\n"
+ ".inst 0x6464e409 // bfmmla z9.s, z0.h, z4.h\n"
+ ".inst 0x6465e40c // bfmmla z12.s, z0.h, z5.h\n"
+ ".inst 0x6464e42f // bfmmla z15.s, z1.h, z4.h\n"
+ ".inst 0x6465e432 // bfmmla z18.s, z1.h, z5.h\n"
+ ".inst 0x6464e455 // bfmmla z21.s, z2.h, z4.h\n"
+ ".inst 0x6465e458 // bfmmla z24.s, z2.h, z5.h\n"
+ ".inst 0x6464e4db // bfmmla z27.s, z6.h, z4.h\n"
"ld1h { z4.h }, p0/Z, [x22, #-6, MUL VL]\n"
+ ".inst 0x6465e4de // bfmmla z30.s, z6.h, z5.h\n"
".inst 0x6463e40a // bfmmla z10.s, z0.h, z3.h\n"
+ "ld1h { z5.h }, p0/Z, [x22, #-5, MUL VL]\n"
".inst 0x6467e40d // bfmmla z13.s, z0.h, z7.h\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x6463e430 // bfmmla z16.s, z1.h, z3.h\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x6467e433 // bfmmla z19.s, z1.h, z7.h\n"
+ ".inst 0x6463e456 // bfmmla z22.s, z2.h, z3.h\n"
"ld1rqh { z1.h }, p0/Z, [%x[Apanel], #96]\n"
- ".inst 0x6463e4b6 // bfmmla z22.s, z5.h, z3.h\n"
- ".inst 0x6467e4b9 // bfmmla z25.s, z5.h, z7.h\n"
- "ld1h { z5.h }, p0/Z, [x22, #-5, MUL VL]\n"
+ ".inst 0x6467e459 // bfmmla z25.s, z2.h, z7.h\n"
".inst 0x6463e4dc // bfmmla z28.s, z6.h, z3.h\n"
- ".inst 0x6467e4df // bfmmla z31.s, z6.h, z7.h\n"
"ld1rqh { z2.h }, p0/Z, [%x[Apanel], #112]\n"
+ ".inst 0x6467e4df // bfmmla z31.s, z6.h, z7.h\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
"addvl x22, x22, #-4\n"
"bge 3b\n"
"4:" // main loop skip
- "ld1rqh { z7.h }, p0/Z, [%x[Apanel]]\n"
+ "ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
".inst 0x6464e408 // bfmmla z8.s, z0.h, z4.h\n"
".inst 0x6465e40b // bfmmla z11.s, z0.h, z5.h\n"
".inst 0x6464e42e // bfmmla z14.s, z1.h, z4.h\n"
@@ -172,54 +172,54 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
"ld1h { z6.h }, p0/Z, [x22]\n"
".inst 0x6464e454 // bfmmla z20.s, z2.h, z4.h\n"
".inst 0x6465e457 // bfmmla z23.s, z2.h, z5.h\n"
- "ld1h { z3.h }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x6464e4fa // bfmmla z26.s, z7.h, z4.h\n"
- ".inst 0x6465e4fd // bfmmla z29.s, z7.h, z5.h\n"
+ "ld1h { z7.h }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x6464e47a // bfmmla z26.s, z3.h, z4.h\n"
+ ".inst 0x6465e47d // bfmmla z29.s, z3.h, z5.h\n"
"ld1h { z5.h }, p0/Z, [x22, #2, MUL VL]\n"
"ld1h { z4.h }, p0/Z, [x22, #3, MUL VL]\n"
".inst 0x6466e409 // bfmmla z9.s, z0.h, z6.h\n"
- ".inst 0x6463e40c // bfmmla z12.s, z0.h, z3.h\n"
- ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
- ".inst 0x6463e432 // bfmmla z18.s, z1.h, z3.h\n"
"add %x[Apanel], %x[Apanel], #0x10\n"
- ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
- ".inst 0x6463e458 // bfmmla z24.s, z2.h, z3.h\n"
+ ".inst 0x6467e40c // bfmmla z12.s, z0.h, z7.h\n"
+ ".inst 0x6466e42f // bfmmla z15.s, z1.h, z6.h\n"
"addvl x22, x22, #4\n"
- ".inst 0x6466e4fb // bfmmla z27.s, z7.h, z6.h\n"
- ".inst 0x6463e4fe // bfmmla z30.s, z7.h, z3.h\n"
+ ".inst 0x6467e432 // bfmmla z18.s, z1.h, z7.h\n"
+ ".inst 0x6466e455 // bfmmla z21.s, z2.h, z6.h\n"
+ ".inst 0x6467e458 // bfmmla z24.s, z2.h, z7.h\n"
+ ".inst 0x6466e47b // bfmmla z27.s, z3.h, z6.h\n"
+ ".inst 0x6467e47e // bfmmla z30.s, z3.h, z7.h\n"
".inst 0x6465e40a // bfmmla z10.s, z0.h, z5.h\n"
".inst 0x6464e40d // bfmmla z13.s, z0.h, z4.h\n"
".inst 0x6465e430 // bfmmla z16.s, z1.h, z5.h\n"
".inst 0x6464e433 // bfmmla z19.s, z1.h, z4.h\n"
".inst 0x6465e456 // bfmmla z22.s, z2.h, z5.h\n"
".inst 0x6464e459 // bfmmla z25.s, z2.h, z4.h\n"
- ".inst 0x6465e4fc // bfmmla z28.s, z7.h, z5.h\n"
- ".inst 0x6464e4ff // bfmmla z31.s, z7.h, z4.h\n"
+ ".inst 0x6465e47c // bfmmla z28.s, z3.h, z5.h\n"
+ ".inst 0x6464e47f // bfmmla z31.s, z3.h, z4.h\n"
"cbz x20, 5f\n"
"ld1h { z1.h }, p0/Z, [x22]\n"
"ld1rqh { z7.h }, p0/Z, [%x[Apanel]]\n"
- ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
"ld1rqh { z6.h }, p0/Z, [%x[Apanel], #16]\n"
"ld1h { z0.h }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x6460e4eb // bfmmla z11.s, z7.h, z0.h\n"
"ld1rqh { z5.h }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqh { z4.h }, p0/Z, [%x[Apanel], #48]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x6461e4e8 // bfmmla z8.s, z7.h, z1.h\n"
+ "ld1h { z3.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x6460e4eb // bfmmla z11.s, z7.h, z0.h\n"
".inst 0x6461e4ce // bfmmla z14.s, z6.h, z1.h\n"
".inst 0x6460e4d1 // bfmmla z17.s, z6.h, z0.h\n"
".inst 0x6461e4b4 // bfmmla z20.s, z5.h, z1.h\n"
- "ld1h { z3.h }, p0/Z, [x22, #2, MUL VL]\n"
".inst 0x6460e4b7 // bfmmla z23.s, z5.h, z0.h\n"
".inst 0x6461e49a // bfmmla z26.s, z4.h, z1.h\n"
- "ld1h { z2.h }, p0/Z, [x22, #3, MUL VL]\n"
- ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z1.h }, p0/Z, [x22, #4, MUL VL]\n"
+ ".inst 0x6460e49d // bfmmla z29.s, z4.h, z0.h\n"
"ld1h { z0.h }, p0/Z, [x22, #5, MUL VL]\n"
".inst 0x6463e4e9 // bfmmla z9.s, z7.h, z3.h\n"
".inst 0x6462e4ec // bfmmla z12.s, z7.h, z2.h\n"
- "addvl x22, x22, #6\n"
".inst 0x6463e4cf // bfmmla z15.s, z6.h, z3.h\n"
+ "addvl x22, x22, #6\n"
".inst 0x6462e4d2 // bfmmla z18.s, z6.h, z2.h\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x6463e4b5 // bfmmla z21.s, z5.h, z3.h\n"
".inst 0x6462e4b8 // bfmmla z24.s, z5.h, z2.h\n"
".inst 0x6463e49b // bfmmla z27.s, z4.h, z3.h\n"
@@ -233,53 +233,53 @@ void sve_interleaved_bf16fp32_mmla_8x3VL(
".inst 0x6461e49c // bfmmla z28.s, z4.h, z1.h\n"
".inst 0x6460e49f // bfmmla z31.s, z4.h, z0.h\n"
"5:" // multiply loop done
- "uzp1 z0.d, z8.d, z11.d\n"
+ "uzp1 z2.d, z8.d, z11.d\n"
"uzp2 z8.d, z8.d, z11.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel]]\n"
- "uzp1 z0.d, z9.d, z12.d\n"
+ "subs x23, x23, #0x1\n"
+ "uzp1 z1.d, z9.d, z12.d\n"
"uzp2 z9.d, z9.d, z12.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"uzp1 z0.d, z10.d, z13.d\n"
"uzp2 z10.d, z10.d, z13.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "uzp1 z0.d, z14.d, z17.d\n"
+ "st1w { z2.s }, p0, [%x[Cpanel]]\n"
+ "uzp1 z3.d, z14.d, z17.d\n"
"uzp2 z14.d, z14.d, z17.d\n"
- "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
- "uzp1 z1.d, z15.d, z18.d\n"
- "subs x23, x23, #0x1\n"
- "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "uzp1 z17.d, z15.d, z18.d\n"
"uzp2 z15.d, z15.d, z18.d\n"
- "uzp1 z17.d, z16.d, z19.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "uzp1 z2.d, z16.d, z19.d\n"
"uzp2 z16.d, z16.d, z19.d\n"
- "uzp1 z0.d, z20.d, z23.d\n"
- "st1w { z1.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
- "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "uzp1 z1.d, z20.d, z23.d\n"
"uzp2 z20.d, z20.d, z23.d\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
- "uzp1 z23.d, z21.d, z24.d\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "uzp1 z0.d, z21.d, z24.d\n"
"uzp2 z21.d, z21.d, z24.d\n"
- "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
- "uzp1 z19.d, z22.d, z25.d\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "uzp1 z23.d, z22.d, z25.d\n"
"uzp2 z22.d, z22.d, z25.d\n"
- "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
- "uzp1 z18.d, z26.d, z29.d\n"
+ "st1w { z3.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "uzp1 z19.d, z26.d, z29.d\n"
"uzp2 z26.d, z26.d, z29.d\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
- "uzp1 z17.d, z27.d, z30.d\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "uzp1 z18.d, z27.d, z30.d\n"
"uzp2 z27.d, z27.d, z30.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
- "uzp1 z16.d, z28.d, z31.d\n"
+ "uzp1 z17.d, z28.d, z31.d\n"
"uzp2 z28.d, z28.d, z31.d\n"
- "st1w { z23.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
- "st1w { z19.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z2.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
"st1w { z20.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
"st1w { z21.s }, p0, [%x[Cpanel]]\n"
"st1w { z22.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
- "st1w { z18.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1w { z26.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1w { z27.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
"st1w { z28.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL.hpp
index 6c54167763..292ac1760e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx( ARGLIST );
class cls_sve_interleaved_fp16_mla_8x3VL
{
public:
- typedef __fp16 operand_type;
+ typedef __fp16 lhs_operand_type;
+ typedef __fp16 rhs_operand_type;
typedef __fp16 result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp
index 609277d889..360c61f0b4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,31 +54,31 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1h { z0.h }, p0/Z, [x22]\n"
"mov z10.b, #0x0\n"
+ "ld1h { z0.h }, p0/Z, [x22]\n"
"mov z11.b, #0x0\n"
- "ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
"mov z12.b, #0x0\n"
+ "ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.b, #0x0\n"
- "ld1h { z2.h }, p0/Z, [x22, #2, MUL VL]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
"mov z16.b, #0x0\n"
+ "ld1h { z2.h }, p0/Z, [x22, #2, MUL VL]\n"
"mov z17.b, #0x0\n"
- "ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
"mov z18.b, #0x0\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
"mov z19.b, #0x0\n"
- "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"mov z20.b, #0x0\n"
+ "ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
"mov z21.b, #0x0\n"
- "ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"mov z22.b, #0x0\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
"mov z23.b, #0x0\n"
"mov z24.b, #0x0\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #6]\n"
"mov z25.b, #0x0\n"
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
@@ -92,7 +92,7 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"fmla z9.h, p0/M, z1.h, z3.h\n"
"sub x20, x20, #0x2\n"
"fmla z10.h, p0/M, z2.h, z3.h\n"
- "ld1rh { z7.h }, p0/Z, [%x[Apanel], #8]\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z11.h, p0/M, z0.h, z4.h\n"
"fmla z12.h, p0/M, z1.h, z4.h\n"
"fmla z13.h, p0/M, z2.h, z4.h\n"
@@ -101,63 +101,63 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"fmla z15.h, p0/M, z1.h, z5.h\n"
"cmp x20, #0x2\n"
"fmla z16.h, p0/M, z2.h, z5.h\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel], #12]\n"
+ "ld1rh { z7.h }, p0/Z, [%x[Apanel], #12]\n"
"fmla z17.h, p0/M, z0.h, z6.h\n"
"fmla z18.h, p0/M, z1.h, z6.h\n"
"fmla z19.h, p0/M, z2.h, z6.h\n"
- "ld1rh { z5.h }, p0/Z, [%x[Apanel], #14]\n"
- "fmla z20.h, p0/M, z0.h, z7.h\n"
- "fmla z21.h, p0/M, z1.h, z7.h\n"
- "fmla z22.h, p0/M, z2.h, z7.h\n"
- "ld1rh { z7.h }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rh { z6.h }, p0/Z, [%x[Apanel], #14]\n"
+ "fmla z20.h, p0/M, z0.h, z3.h\n"
+ "fmla z21.h, p0/M, z1.h, z3.h\n"
+ "fmla z22.h, p0/M, z2.h, z3.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #16]\n"
"fmla z23.h, p0/M, z0.h, z4.h\n"
"fmla z24.h, p0/M, z1.h, z4.h\n"
"fmla z25.h, p0/M, z2.h, z4.h\n"
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #18]\n"
- "fmla z26.h, p0/M, z0.h, z3.h\n"
- "fmla z27.h, p0/M, z1.h, z3.h\n"
- "fmla z28.h, p0/M, z2.h, z3.h\n"
+ "fmla z26.h, p0/M, z0.h, z7.h\n"
+ "fmla z27.h, p0/M, z1.h, z7.h\n"
+ "fmla z28.h, p0/M, z2.h, z7.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel], #20]\n"
- "fmla z29.h, p0/M, z0.h, z5.h\n"
- "ld1h { z6.h }, p0/Z, [x22, #3, MUL VL]\n"
- "fmla z30.h, p0/M, z1.h, z5.h\n"
- "fmla z31.h, p0/M, z2.h, z5.h\n"
- "ld1h { z2.h }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1h { z5.h }, p0/Z, [x22, #5, MUL VL]\n"
- "fmla z8.h, p0/M, z6.h, z7.h\n"
+ "fmla z29.h, p0/M, z0.h, z6.h\n"
+ "ld1h { z7.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "fmla z30.h, p0/M, z1.h, z6.h\n"
+ "fmla z31.h, p0/M, z2.h, z6.h\n"
+ "ld1h { z6.h }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1h { z2.h }, p0/Z, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
"ld1rh { z1.h }, p0/Z, [%x[Apanel], #22]\n"
- "fmla z9.h, p0/M, z2.h, z7.h\n"
- "fmla z10.h, p0/M, z5.h, z7.h\n"
- "fmla z11.h, p0/M, z6.h, z4.h\n"
- "ld1rh { z7.h }, p0/Z, [%x[Apanel], #24]\n"
- "fmla z12.h, p0/M, z2.h, z4.h\n"
- "fmla z13.h, p0/M, z5.h, z4.h\n"
+ "fmla z8.h, p0/M, z7.h, z5.h\n"
+ "fmla z11.h, p0/M, z7.h, z4.h\n"
+ "fmla z9.h, p0/M, z6.h, z5.h\n"
+ "fmla z12.h, p0/M, z6.h, z4.h\n"
+ "fmla z10.h, p0/M, z2.h, z5.h\n"
+ "fmla z13.h, p0/M, z2.h, z4.h\n"
+ "ld1rh { z5.h }, p0/Z, [%x[Apanel], #24]\n"
+ "fmla z14.h, p0/M, z7.h, z3.h\n"
+ "fmla z15.h, p0/M, z6.h, z3.h\n"
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #26]\n"
- "fmla z14.h, p0/M, z6.h, z3.h\n"
- "fmla z15.h, p0/M, z2.h, z3.h\n"
- "addvl x22, x22, #6\n"
- "fmla z16.h, p0/M, z5.h, z3.h\n"
+ "fmla z16.h, p0/M, z2.h, z3.h\n"
+ "fmla z17.h, p0/M, z7.h, z1.h\n"
"ld1rh { z0.h }, p0/Z, [%x[Apanel], #28]\n"
- "fmla z17.h, p0/M, z6.h, z1.h\n"
- "fmla z18.h, p0/M, z2.h, z1.h\n"
- "fmla z19.h, p0/M, z5.h, z1.h\n"
+ "fmla z18.h, p0/M, z6.h, z1.h\n"
+ "fmla z19.h, p0/M, z2.h, z1.h\n"
"ld1rh { z1.h }, p0/Z, [%x[Apanel], #30]\n"
"add %x[Apanel], %x[Apanel], #0x20\n"
- "fmla z20.h, p0/M, z6.h, z7.h\n"
- "fmla z21.h, p0/M, z2.h, z7.h\n"
- "fmla z22.h, p0/M, z5.h, z7.h\n"
- "fmla z23.h, p0/M, z6.h, z4.h\n"
+ "fmla z20.h, p0/M, z7.h, z5.h\n"
+ "fmla z21.h, p0/M, z6.h, z5.h\n"
+ "fmla z22.h, p0/M, z2.h, z5.h\n"
+ "fmla z23.h, p0/M, z7.h, z4.h\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
- "fmla z24.h, p0/M, z2.h, z4.h\n"
- "fmla z25.h, p0/M, z5.h, z4.h\n"
+ "fmla z24.h, p0/M, z6.h, z4.h\n"
+ "fmla z25.h, p0/M, z2.h, z4.h\n"
"ld1rh { z4.h }, p0/Z, [%x[Apanel], #2]\n"
- "fmla z26.h, p0/M, z6.h, z0.h\n"
- "fmla z27.h, p0/M, z2.h, z0.h\n"
- "fmla z28.h, p0/M, z5.h, z0.h\n"
- "fmla z29.h, p0/M, z6.h, z1.h\n"
+ "fmla z26.h, p0/M, z7.h, z0.h\n"
+ "fmla z27.h, p0/M, z6.h, z0.h\n"
+ "fmla z28.h, p0/M, z2.h, z0.h\n"
+ "fmla z29.h, p0/M, z7.h, z1.h\n"
"ld1h { z0.h }, p0/Z, [x22]\n"
- "fmla z30.h, p0/M, z2.h, z1.h\n"
- "fmla z31.h, p0/M, z5.h, z1.h\n"
+ "fmla z30.h, p0/M, z6.h, z1.h\n"
+ "fmla z31.h, p0/M, z2.h, z1.h\n"
"ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
"ld1h { z2.h }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rh { z5.h }, p0/Z, [%x[Apanel], #4]\n"
@@ -199,19 +199,20 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"ld1h { z5.h }, p0/Z, [x22, #1, MUL VL]\n"
"ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rh { z3.h }, p0/Z, [%x[Apanel]]\n"
- "fmla z8.h, p0/M, z6.h, z3.h\n"
+ "addvl x22, x22, #3\n"
"ld1rh { z2.h }, p0/Z, [%x[Apanel], #2]\n"
"ld1rh { z1.h }, p0/Z, [%x[Apanel], #4]\n"
- "fmla z9.h, p0/M, z5.h, z3.h\n"
"ld1rh { z0.h }, p0/Z, [%x[Apanel], #6]\n"
+ "fmla z8.h, p0/M, z6.h, z3.h\n"
+ "fmla z9.h, p0/M, z5.h, z3.h\n"
"fmla z10.h, p0/M, z4.h, z3.h\n"
"fmla z11.h, p0/M, z6.h, z2.h\n"
+ "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
"fmla z12.h, p0/M, z5.h, z2.h\n"
"fmla z13.h, p0/M, z4.h, z2.h\n"
- "ld1rh { z3.h }, p0/Z, [%x[Apanel], #8]\n"
+ "ld1rh { z2.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z14.h, p0/M, z6.h, z1.h\n"
"fmla z15.h, p0/M, z5.h, z1.h\n"
- "ld1rh { z2.h }, p0/Z, [%x[Apanel], #10]\n"
"fmla z16.h, p0/M, z4.h, z1.h\n"
"fmla z17.h, p0/M, z6.h, z0.h\n"
"ld1rh { z1.h }, p0/Z, [%x[Apanel], #12]\n"
@@ -220,10 +221,9 @@ void sve_interleaved_fp16_mla_8x3VL_a64fx(
"ld1rh { z0.h }, p0/Z, [%x[Apanel], #14]\n"
"fmla z20.h, p0/M, z6.h, z3.h\n"
"fmla z21.h, p0/M, z5.h, z3.h\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z22.h, p0/M, z4.h, z3.h\n"
"fmla z23.h, p0/M, z6.h, z2.h\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z24.h, p0/M, z5.h, z2.h\n"
"fmla z25.h, p0/M, z4.h, z2.h\n"
"fmla z26.h, p0/M, z6.h, z1.h\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp
index 3b16c97e2c..09180c8f36 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp16_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,22 +54,22 @@ void sve_interleaved_fp16_mla_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z10.b, #0x0\n"
- "mov z11.b, #0x0\n"
"ld1h { z2.h }, p0/Z, [x22]\n"
+ "mov z11.b, #0x0\n"
"mov z12.b, #0x0\n"
- "mov z13.b, #0x0\n"
"ld1h { z3.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
+ "mov z13.b, #0x0\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
"mov z16.b, #0x0\n"
+ "ld1rqh { z0.h }, p0/Z, [%x[Apanel]]\n"
"mov z17.b, #0x0\n"
"mov z18.b, #0x0\n"
+ "ld1h { z4.h }, p0/Z, [x22, #2, MUL VL]\n"
"mov z19.b, #0x0\n"
"mov z20.b, #0x0\n"
"mov z21.b, #0x0\n"
@@ -147,12 +147,12 @@ void sve_interleaved_fp16_mla_8x3VL(
"fmla z31.h, z1.h, z7.h[7]\n"
"bge 3b\n"
"4:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ "addvl x22, x22, #3\n"
"fmla z8.h, z2.h, z0.h[0]\n"
"fmla z11.h, z2.h, z0.h[1]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z14.h, z2.h, z0.h[2]\n"
"fmla z17.h, z2.h, z0.h[3]\n"
- "addvl x22, x22, #3\n"
"fmla z20.h, z2.h, z0.h[4]\n"
"fmla z23.h, z2.h, z0.h[5]\n"
"fmla z26.h, z2.h, z0.h[6]\n"
@@ -176,16 +176,16 @@ void sve_interleaved_fp16_mla_8x3VL(
"cbz x20, 5f\n"
"ld1rqh { z3.h }, p0/Z, [%x[Apanel]]\n"
"ld1h { z2.h }, p0/Z, [x22]\n"
- "fmla z8.h, z2.h, z3.h[0]\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
"ld1h { z1.h }, p0/Z, [x22, #1, MUL VL]\n"
"ld1h { z0.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "fmla z8.h, z2.h, z3.h[0]\n"
"fmla z11.h, z2.h, z3.h[1]\n"
"fmla z14.h, z2.h, z3.h[2]\n"
"fmla z17.h, z2.h, z3.h[3]\n"
- "add %x[Apanel], %x[Apanel], #0x10\n"
"fmla z20.h, z2.h, z3.h[4]\n"
"fmla z23.h, z2.h, z3.h[5]\n"
- "addvl x22, x22, #3\n"
"fmla z26.h, z2.h, z3.h[6]\n"
"fmla z29.h, z2.h, z3.h[7]\n"
"fmla z9.h, z1.h, z3.h[0]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL.hpp
index 23ab7ce10a..89d65083f4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx( ARGLIST );
class cls_sve_interleaved_fp32_mla_8x3VL
{
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 1, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp
index 0b13913717..0006fddb2a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,31 +54,31 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1w { z0.s }, p0/Z, [x22]\n"
"mov z10.b, #0x0\n"
+ "ld1w { z0.s }, p0/Z, [x22]\n"
"mov z11.b, #0x0\n"
- "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
"mov z12.b, #0x0\n"
+ "ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.b, #0x0\n"
- "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z16.b, #0x0\n"
+ "ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n"
"mov z17.b, #0x0\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z18.b, #0x0\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z19.b, #0x0\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z20.b, #0x0\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z21.b, #0x0\n"
- "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z22.b, #0x0\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z23.b, #0x0\n"
"mov z24.b, #0x0\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z25.b, #0x0\n"
"mov z26.b, #0x0\n"
"mov z27.b, #0x0\n"
@@ -92,7 +92,7 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"fmla z9.s, p0/M, z1.s, z3.s\n"
"sub x20, x20, #0x2\n"
"fmla z10.s, p0/M, z2.s, z3.s\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z11.s, p0/M, z0.s, z4.s\n"
"fmla z12.s, p0/M, z1.s, z4.s\n"
"fmla z13.s, p0/M, z2.s, z4.s\n"
@@ -101,63 +101,63 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"fmla z15.s, p0/M, z1.s, z5.s\n"
"cmp x20, #0x2\n"
"fmla z16.s, p0/M, z2.s, z5.s\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #24]\n"
+ "ld1rw { z7.s }, p0/Z, [%x[Apanel], #24]\n"
"fmla z17.s, p0/M, z0.s, z6.s\n"
"fmla z18.s, p0/M, z1.s, z6.s\n"
"fmla z19.s, p0/M, z2.s, z6.s\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #28]\n"
- "fmla z20.s, p0/M, z0.s, z7.s\n"
- "fmla z21.s, p0/M, z1.s, z7.s\n"
- "fmla z22.s, p0/M, z2.s, z7.s\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "fmla z20.s, p0/M, z0.s, z3.s\n"
+ "fmla z21.s, p0/M, z1.s, z3.s\n"
+ "fmla z22.s, p0/M, z2.s, z3.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #32]\n"
"fmla z23.s, p0/M, z0.s, z4.s\n"
"fmla z24.s, p0/M, z1.s, z4.s\n"
"fmla z25.s, p0/M, z2.s, z4.s\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #36]\n"
- "fmla z26.s, p0/M, z0.s, z3.s\n"
- "fmla z27.s, p0/M, z1.s, z3.s\n"
- "fmla z28.s, p0/M, z2.s, z3.s\n"
+ "fmla z26.s, p0/M, z0.s, z7.s\n"
+ "fmla z27.s, p0/M, z1.s, z7.s\n"
+ "fmla z28.s, p0/M, z2.s, z7.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #40]\n"
- "fmla z29.s, p0/M, z0.s, z5.s\n"
- "ld1w { z6.s }, p0/Z, [x22, #3, MUL VL]\n"
- "fmla z30.s, p0/M, z1.s, z5.s\n"
- "fmla z31.s, p0/M, z2.s, z5.s\n"
- "ld1w { z2.s }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1w { z5.s }, p0/Z, [x22, #5, MUL VL]\n"
- "fmla z8.s, p0/M, z6.s, z7.s\n"
+ "fmla z29.s, p0/M, z0.s, z6.s\n"
+ "ld1w { z7.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "fmla z30.s, p0/M, z1.s, z6.s\n"
+ "fmla z31.s, p0/M, z2.s, z6.s\n"
+ "ld1w { z6.s }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z2.s }, p0/Z, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #44]\n"
- "fmla z9.s, p0/M, z2.s, z7.s\n"
- "fmla z10.s, p0/M, z5.s, z7.s\n"
- "fmla z11.s, p0/M, z6.s, z4.s\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #48]\n"
- "fmla z12.s, p0/M, z2.s, z4.s\n"
- "fmla z13.s, p0/M, z5.s, z4.s\n"
+ "fmla z8.s, p0/M, z7.s, z5.s\n"
+ "fmla z11.s, p0/M, z7.s, z4.s\n"
+ "fmla z9.s, p0/M, z6.s, z5.s\n"
+ "fmla z12.s, p0/M, z6.s, z4.s\n"
+ "fmla z10.s, p0/M, z2.s, z5.s\n"
+ "fmla z13.s, p0/M, z2.s, z4.s\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #48]\n"
+ "fmla z14.s, p0/M, z7.s, z3.s\n"
+ "fmla z15.s, p0/M, z6.s, z3.s\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
- "fmla z14.s, p0/M, z6.s, z3.s\n"
- "fmla z15.s, p0/M, z2.s, z3.s\n"
- "addvl x22, x22, #6\n"
- "fmla z16.s, p0/M, z5.s, z3.s\n"
+ "fmla z16.s, p0/M, z2.s, z3.s\n"
+ "fmla z17.s, p0/M, z7.s, z1.s\n"
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #56]\n"
- "fmla z17.s, p0/M, z6.s, z1.s\n"
- "fmla z18.s, p0/M, z2.s, z1.s\n"
- "fmla z19.s, p0/M, z5.s, z1.s\n"
+ "fmla z18.s, p0/M, z6.s, z1.s\n"
+ "fmla z19.s, p0/M, z2.s, z1.s\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #60]\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
- "fmla z20.s, p0/M, z6.s, z7.s\n"
- "fmla z21.s, p0/M, z2.s, z7.s\n"
- "fmla z22.s, p0/M, z5.s, z7.s\n"
- "fmla z23.s, p0/M, z6.s, z4.s\n"
+ "fmla z20.s, p0/M, z7.s, z5.s\n"
+ "fmla z21.s, p0/M, z6.s, z5.s\n"
+ "fmla z22.s, p0/M, z2.s, z5.s\n"
+ "fmla z23.s, p0/M, z7.s, z4.s\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "fmla z24.s, p0/M, z2.s, z4.s\n"
- "fmla z25.s, p0/M, z5.s, z4.s\n"
+ "fmla z24.s, p0/M, z6.s, z4.s\n"
+ "fmla z25.s, p0/M, z2.s, z4.s\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
- "fmla z26.s, p0/M, z6.s, z0.s\n"
- "fmla z27.s, p0/M, z2.s, z0.s\n"
- "fmla z28.s, p0/M, z5.s, z0.s\n"
- "fmla z29.s, p0/M, z6.s, z1.s\n"
+ "fmla z26.s, p0/M, z7.s, z0.s\n"
+ "fmla z27.s, p0/M, z6.s, z0.s\n"
+ "fmla z28.s, p0/M, z2.s, z0.s\n"
+ "fmla z29.s, p0/M, z7.s, z1.s\n"
"ld1w { z0.s }, p0/Z, [x22]\n"
- "fmla z30.s, p0/M, z2.s, z1.s\n"
- "fmla z31.s, p0/M, z5.s, z1.s\n"
+ "fmla z30.s, p0/M, z6.s, z1.s\n"
+ "fmla z31.s, p0/M, z2.s, z1.s\n"
"ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
"ld1w { z2.s }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
@@ -199,19 +199,20 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n"
"ld1w { z4.s }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "fmla z8.s, p0/M, z6.s, z3.s\n"
+ "addvl x22, x22, #3\n"
"ld1rw { z2.s }, p0/Z, [%x[Apanel], #4]\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #8]\n"
- "fmla z9.s, p0/M, z5.s, z3.s\n"
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #12]\n"
+ "fmla z8.s, p0/M, z6.s, z3.s\n"
+ "fmla z9.s, p0/M, z5.s, z3.s\n"
"fmla z10.s, p0/M, z4.s, z3.s\n"
"fmla z11.s, p0/M, z6.s, z2.s\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"fmla z12.s, p0/M, z5.s, z2.s\n"
"fmla z13.s, p0/M, z4.s, z2.s\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z14.s, p0/M, z6.s, z1.s\n"
"fmla z15.s, p0/M, z5.s, z1.s\n"
- "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"fmla z16.s, p0/M, z4.s, z1.s\n"
"fmla z17.s, p0/M, z6.s, z0.s\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #24]\n"
@@ -220,10 +221,9 @@ void sve_interleaved_fp32_mla_8x3VL_a64fx(
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #28]\n"
"fmla z20.s, p0/M, z6.s, z3.s\n"
"fmla z21.s, p0/M, z5.s, z3.s\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z22.s, p0/M, z4.s, z3.s\n"
"fmla z23.s, p0/M, z6.s, z2.s\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z24.s, p0/M, z5.s, z2.s\n"
"fmla z25.s, p0/M, z4.s, z2.s\n"
"fmla z26.s, p0/M, z6.s, z1.s\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp
index c7f32ff7a9..43591e9201 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -54,25 +54,25 @@ void sve_interleaved_fp32_mla_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.b, #0x0\n"
"mov z9.b, #0x0\n"
- "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
"mov z10.b, #0x0\n"
+ "ld1w { z4.s }, p0/Z, [x22]\n"
"mov z11.b, #0x0\n"
- "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"mov z12.b, #0x0\n"
+ "ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.b, #0x0\n"
- "ld1w { z4.s }, p0/Z, [x22]\n"
"mov z14.b, #0x0\n"
"mov z15.b, #0x0\n"
- "ld1w { z5.s }, p0/Z, [x22, #1, MUL VL]\n"
"mov z16.b, #0x0\n"
+ "ld1rqw { z0.s }, p0/Z, [%x[Apanel]]\n"
"mov z17.b, #0x0\n"
- "ld1w { z6.s }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.b, #0x0\n"
+ "ld1rqw { z1.s }, p0/Z, [%x[Apanel], #16]\n"
"mov z19.b, #0x0\n"
"mov z20.b, #0x0\n"
+ "ld1w { z6.s }, p0/Z, [x22, #2, MUL VL]\n"
"mov z21.b, #0x0\n"
"mov z22.b, #0x0\n"
"mov z23.b, #0x0\n"
@@ -150,12 +150,12 @@ void sve_interleaved_fp32_mla_8x3VL(
"ld1w { z6.s }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "addvl x22, x22, #3\n"
"fmla z8.s, z4.s, z0.s[0]\n"
"fmla z11.s, z4.s, z0.s[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"fmla z14.s, z4.s, z0.s[2]\n"
"fmla z17.s, z4.s, z0.s[3]\n"
- "addvl x22, x22, #3\n"
"fmla z20.s, z4.s, z1.s[0]\n"
"fmla z23.s, z4.s, z1.s[1]\n"
"fmla z26.s, z4.s, z1.s[2]\n"
@@ -182,13 +182,13 @@ void sve_interleaved_fp32_mla_8x3VL(
"add %x[Apanel], %x[Apanel], #0x20\n"
"ld1w { z2.s }, p0/Z, [x22]\n"
"ld1w { z1.s }, p0/Z, [x22, #1, MUL VL]\n"
- "fmla z8.s, z2.s, z4.s[0]\n"
"ld1w { z0.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "fmla z8.s, z2.s, z4.s[0]\n"
"fmla z11.s, z2.s, z4.s[1]\n"
"fmla z14.s, z2.s, z4.s[2]\n"
"fmla z17.s, z2.s, z4.s[3]\n"
"fmla z20.s, z2.s, z3.s[0]\n"
- "addvl x22, x22, #3\n"
"fmla z23.s, z2.s, z3.s[1]\n"
"fmla z26.s, z2.s, z3.s[2]\n"
"fmla z29.s, z2.s, z3.s[3]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mmla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mmla_8x3VL.hpp
index a355262fe2..3a0e7f4c20 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mmla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_fp32_mmla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,7 +35,8 @@ void sve_interleaved_fp32_mmla_8x3VL(const float *, const float *, float *, int,
class cls_sve_interleaved_fp32_mmla_8x3VL {
public:
- typedef float operand_type;
+ typedef float lhs_operand_type;
+ typedef float rhs_operand_type;
typedef float result_type;
typedef void (*kern_type)(const float *, const float *, float *, int, int, int);
@@ -57,7 +58,7 @@ public:
}
// Use the standard fixed size transforms.
- StdTransformsSVE<operand_type, result_type, 8, 6, 2, 2> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 2, 2> transforms = {};
kern_type kernel=sve_interleaved_fp32_mmla_8x3VL;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL.hpp
index cf3069f828..ac731b76ed 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx( ARGLIST );
class cls_sve_interleaved_s8s32_dot_8x3VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 4, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 4, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp
index c668a7b746..1b33014f36 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,31 +55,31 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
"mov z10.s, #0x0\n"
+ "ld1b { z0.b }, p0/Z, [x22]\n"
"mov z11.s, #0x0\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
"mov z12.s, #0x0\n"
+ "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.s, #0x0\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z16.s, #0x0\n"
+ "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z17.s, #0x0\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z18.s, #0x0\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z19.s, #0x0\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z20.s, #0x0\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z21.s, #0x0\n"
- "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z22.s, #0x0\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z25.s, #0x0\n"
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
@@ -93,7 +93,7 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"sdot z9.s, z1.b, z3.b\n"
"sub x20, x20, #0x2\n"
"sdot z10.s, z2.b, z3.b\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"sdot z11.s, z0.b, z4.b\n"
"sdot z12.s, z1.b, z4.b\n"
"sdot z13.s, z2.b, z4.b\n"
@@ -102,63 +102,63 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"sdot z15.s, z1.b, z5.b\n"
"cmp x20, #0x2\n"
"sdot z16.s, z2.b, z5.b\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #24]\n"
+ "ld1rw { z7.s }, p0/Z, [%x[Apanel], #24]\n"
"sdot z17.s, z0.b, z6.b\n"
"sdot z18.s, z1.b, z6.b\n"
"sdot z19.s, z2.b, z6.b\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #28]\n"
- "sdot z20.s, z0.b, z7.b\n"
- "sdot z21.s, z1.b, z7.b\n"
- "sdot z22.s, z2.b, z7.b\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "sdot z20.s, z0.b, z3.b\n"
+ "sdot z21.s, z1.b, z3.b\n"
+ "sdot z22.s, z2.b, z3.b\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #32]\n"
"sdot z23.s, z0.b, z4.b\n"
"sdot z24.s, z1.b, z4.b\n"
"sdot z25.s, z2.b, z4.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #36]\n"
- "sdot z26.s, z0.b, z3.b\n"
- "sdot z27.s, z1.b, z3.b\n"
- "sdot z28.s, z2.b, z3.b\n"
+ "sdot z26.s, z0.b, z7.b\n"
+ "sdot z27.s, z1.b, z7.b\n"
+ "sdot z28.s, z2.b, z7.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #40]\n"
- "sdot z29.s, z0.b, z5.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #3, MUL VL]\n"
- "sdot z30.s, z1.b, z5.b\n"
- "sdot z31.s, z2.b, z5.b\n"
- "ld1b { z2.b }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x22, #5, MUL VL]\n"
- "sdot z8.s, z6.b, z7.b\n"
+ "sdot z29.s, z0.b, z6.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "sdot z30.s, z1.b, z6.b\n"
+ "sdot z31.s, z2.b, z6.b\n"
+ "ld1b { z6.b }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #44]\n"
- "sdot z9.s, z2.b, z7.b\n"
- "sdot z10.s, z5.b, z7.b\n"
- "sdot z11.s, z6.b, z4.b\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #48]\n"
- "sdot z12.s, z2.b, z4.b\n"
- "sdot z13.s, z5.b, z4.b\n"
+ "sdot z8.s, z7.b, z5.b\n"
+ "sdot z11.s, z7.b, z4.b\n"
+ "sdot z9.s, z6.b, z5.b\n"
+ "sdot z12.s, z6.b, z4.b\n"
+ "sdot z10.s, z2.b, z5.b\n"
+ "sdot z13.s, z2.b, z4.b\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #48]\n"
+ "sdot z14.s, z7.b, z3.b\n"
+ "sdot z15.s, z6.b, z3.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
- "sdot z14.s, z6.b, z3.b\n"
- "sdot z15.s, z2.b, z3.b\n"
- "addvl x22, x22, #6\n"
- "sdot z16.s, z5.b, z3.b\n"
+ "sdot z16.s, z2.b, z3.b\n"
+ "sdot z17.s, z7.b, z1.b\n"
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #56]\n"
- "sdot z17.s, z6.b, z1.b\n"
- "sdot z18.s, z2.b, z1.b\n"
- "sdot z19.s, z5.b, z1.b\n"
+ "sdot z18.s, z6.b, z1.b\n"
+ "sdot z19.s, z2.b, z1.b\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #60]\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
- "sdot z20.s, z6.b, z7.b\n"
- "sdot z21.s, z2.b, z7.b\n"
- "sdot z22.s, z5.b, z7.b\n"
- "sdot z23.s, z6.b, z4.b\n"
+ "sdot z20.s, z7.b, z5.b\n"
+ "sdot z21.s, z6.b, z5.b\n"
+ "sdot z22.s, z2.b, z5.b\n"
+ "sdot z23.s, z7.b, z4.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "sdot z24.s, z2.b, z4.b\n"
- "sdot z25.s, z5.b, z4.b\n"
+ "sdot z24.s, z6.b, z4.b\n"
+ "sdot z25.s, z2.b, z4.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
- "sdot z26.s, z6.b, z0.b\n"
- "sdot z27.s, z2.b, z0.b\n"
- "sdot z28.s, z5.b, z0.b\n"
- "sdot z29.s, z6.b, z1.b\n"
+ "sdot z26.s, z7.b, z0.b\n"
+ "sdot z27.s, z6.b, z0.b\n"
+ "sdot z28.s, z2.b, z0.b\n"
+ "sdot z29.s, z7.b, z1.b\n"
"ld1b { z0.b }, p0/Z, [x22]\n"
- "sdot z30.s, z2.b, z1.b\n"
- "sdot z31.s, z5.b, z1.b\n"
+ "sdot z30.s, z6.b, z1.b\n"
+ "sdot z31.s, z2.b, z1.b\n"
"ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
"ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
@@ -200,19 +200,20 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "sdot z8.s, z6.b, z3.b\n"
+ "addvl x22, x22, #3\n"
"ld1rw { z2.s }, p0/Z, [%x[Apanel], #4]\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #8]\n"
- "sdot z9.s, z5.b, z3.b\n"
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #12]\n"
+ "sdot z8.s, z6.b, z3.b\n"
+ "sdot z9.s, z5.b, z3.b\n"
"sdot z10.s, z4.b, z3.b\n"
"sdot z11.s, z6.b, z2.b\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"sdot z12.s, z5.b, z2.b\n"
"sdot z13.s, z4.b, z2.b\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"sdot z14.s, z6.b, z1.b\n"
"sdot z15.s, z5.b, z1.b\n"
- "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"sdot z16.s, z4.b, z1.b\n"
"sdot z17.s, z6.b, z0.b\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #24]\n"
@@ -221,10 +222,9 @@ void sve_interleaved_s8s32_dot_8x3VL_a64fx(
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #28]\n"
"sdot z20.s, z6.b, z3.b\n"
"sdot z21.s, z5.b, z3.b\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"sdot z22.s, z4.b, z3.b\n"
"sdot z23.s, z6.b, z2.b\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"sdot z24.s, z5.b, z2.b\n"
"sdot z25.s, z4.b, z2.b\n"
"sdot z26.s, z6.b, z1.b\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp
index f6e1a75c15..1ddf171c7e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_dot_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,25 +55,25 @@ void sve_interleaved_s8s32_dot_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z10.s, #0x0\n"
+ "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z11.s, #0x0\n"
- "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z12.s, #0x0\n"
+ "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"mov z16.s, #0x0\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z17.s, #0x0\n"
- "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.s, #0x0\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
+ "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z21.s, #0x0\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
@@ -151,12 +151,12 @@ void sve_interleaved_s8s32_dot_8x3VL(
"ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "addvl x22, x22, #3\n"
"sdot z8.s, z4.b, z0.b[0]\n"
"sdot z11.s, z4.b, z0.b[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"sdot z14.s, z4.b, z0.b[2]\n"
"sdot z17.s, z4.b, z0.b[3]\n"
- "addvl x22, x22, #3\n"
"sdot z20.s, z4.b, z1.b[0]\n"
"sdot z23.s, z4.b, z1.b[1]\n"
"sdot z26.s, z4.b, z1.b[2]\n"
@@ -183,13 +183,13 @@ void sve_interleaved_s8s32_dot_8x3VL(
"add %x[Apanel], %x[Apanel], #0x20\n"
"ld1b { z2.b }, p0/Z, [x22]\n"
"ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
- "sdot z8.s, z2.b, z4.b[0]\n"
"ld1b { z0.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "sdot z8.s, z2.b, z4.b[0]\n"
"sdot z11.s, z2.b, z4.b[1]\n"
"sdot z14.s, z2.b, z4.b[2]\n"
"sdot z17.s, z2.b, z4.b[3]\n"
"sdot z20.s, z2.b, z3.b[0]\n"
- "addvl x22, x22, #3\n"
"sdot z23.s, z2.b, z3.b[1]\n"
"sdot z26.s, z2.b, z3.b[2]\n"
"sdot z29.s, z2.b, z3.b[3]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp
index 82734abfbe..5ba3e51e6f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,8 @@ void sve_interleaved_s8s32_mmla_8x3VL( ARGLIST );
class cls_sve_interleaved_s8s32_mmla_8x3VL
{
public:
- typedef int8_t operand_type;
+ typedef int8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
typedef int32_t result_type;
typedef void (*kern_type)( ARGLIST );
@@ -61,8 +62,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 6, 8, 2> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 6, 8, 2, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 8, 2> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 8, 2, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp
index bfed5000fc..261648eebe 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,31 +55,31 @@ void sve_interleaved_s8s32_mmla_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z10.s, #0x0\n"
+ "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z11.s, #0x0\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z12.s, #0x0\n"
+ "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.s, #0x0\n"
- "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"mov z16.s, #0x0\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z17.s, #0x0\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"mov z18.s, #0x0\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z19.s, #0x0\n"
- "addvl x22, x22, #2\n"
"mov z20.s, #0x0\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"mov z21.s, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z22.s, #0x0\n"
+ "addvl x22, x22, #2\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z25.s, #0x0\n"
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
@@ -94,77 +94,77 @@ void sve_interleaved_s8s32_mmla_8x3VL(
".inst 0x4505980b // smmla z11.s, z0.b, z5.b\n"
".inst 0x4504982e // smmla z14.s, z1.b, z4.b\n"
".inst 0x45059831 // smmla z17.s, z1.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22]\n"
+ "ld1b { z3.b }, p0/Z, [x22]\n"
".inst 0x45049854 // smmla z20.s, z2.b, z4.b\n"
".inst 0x45059857 // smmla z23.s, z2.b, z5.b\n"
- "ld1b { z3.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
".inst 0x450498da // smmla z26.s, z6.b, z4.b\n"
".inst 0x450598dd // smmla z29.s, z6.b, z5.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1b { z4.b }, p0/Z, [x22, #3, MUL VL]\n"
- ".inst 0x45079809 // smmla z9.s, z0.b, z7.b\n"
- ".inst 0x4503980c // smmla z12.s, z0.b, z3.b\n"
- ".inst 0x4507982f // smmla z15.s, z1.b, z7.b\n"
- ".inst 0x45039832 // smmla z18.s, z1.b, z3.b\n"
+ "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x45039809 // smmla z9.s, z0.b, z3.b\n"
"sub x20, x20, #0x2\n"
- ".inst 0x45079855 // smmla z21.s, z2.b, z7.b\n"
- ".inst 0x45039858 // smmla z24.s, z2.b, z3.b\n"
+ ".inst 0x4507980c // smmla z12.s, z0.b, z7.b\n"
+ ".inst 0x4503982f // smmla z15.s, z1.b, z3.b\n"
"cmp x20, #0x2\n"
- ".inst 0x450798db // smmla z27.s, z6.b, z7.b\n"
- ".inst 0x450398de // smmla z30.s, z6.b, z3.b\n"
+ ".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
+ ".inst 0x45039855 // smmla z21.s, z2.b, z3.b\n"
+ ".inst 0x45079858 // smmla z24.s, z2.b, z7.b\n"
+ ".inst 0x450398db // smmla z27.s, z6.b, z3.b\n"
"ld1b { z3.b }, p0/Z, [x22, #4, MUL VL]\n"
- ".inst 0x4505980a // smmla z10.s, z0.b, z5.b\n"
- ".inst 0x4504980d // smmla z13.s, z0.b, z4.b\n"
+ ".inst 0x450798de // smmla z30.s, z6.b, z7.b\n"
+ ".inst 0x4504980a // smmla z10.s, z0.b, z4.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
+ ".inst 0x4505980d // smmla z13.s, z0.b, z5.b\n"
+ ".inst 0x45049830 // smmla z16.s, z1.b, z4.b\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel], #16]\n"
- ".inst 0x45059830 // smmla z16.s, z1.b, z5.b\n"
- ".inst 0x45049833 // smmla z19.s, z1.b, z4.b\n"
+ ".inst 0x45059833 // smmla z19.s, z1.b, z5.b\n"
+ ".inst 0x45049856 // smmla z22.s, z2.b, z4.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #32]\n"
- ".inst 0x45059856 // smmla z22.s, z2.b, z5.b\n"
- ".inst 0x45049859 // smmla z25.s, z2.b, z4.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
- ".inst 0x450598dc // smmla z28.s, z6.b, z5.b\n"
- ".inst 0x450498df // smmla z31.s, z6.b, z4.b\n"
- "ld1rqb { z5.b }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x45059859 // smmla z25.s, z2.b, z5.b\n"
+ ".inst 0x450498dc // smmla z28.s, z6.b, z4.b\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x450598df // smmla z31.s, z6.b, z5.b\n"
"ld1rqb { z6.b }, p0/Z, [%x[Apanel], #64]\n"
- "ld1b { z2.b }, p0/Z, [x22, #6, MUL VL]\n"
- ".inst 0x45039808 // smmla z8.s, z0.b, z3.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #7, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x22, #6, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x22, #7, MUL VL]\n"
"addvl x22, x22, #16\n"
+ ".inst 0x45039808 // smmla z8.s, z0.b, z3.b\n"
".inst 0x4507980b // smmla z11.s, z0.b, z7.b\n"
".inst 0x4503982e // smmla z14.s, z1.b, z3.b\n"
".inst 0x45079831 // smmla z17.s, z1.b, z7.b\n"
- ".inst 0x450398b4 // smmla z20.s, z5.b, z3.b\n"
- ".inst 0x450798b7 // smmla z23.s, z5.b, z7.b\n"
+ ".inst 0x45039854 // smmla z20.s, z2.b, z3.b\n"
+ ".inst 0x45079857 // smmla z23.s, z2.b, z7.b\n"
".inst 0x450398da // smmla z26.s, z6.b, z3.b\n"
- ".inst 0x450798dd // smmla z29.s, z6.b, z7.b\n"
"ld1b { z3.b }, p0/Z, [x22, #-8, MUL VL]\n"
+ ".inst 0x450798dd // smmla z29.s, z6.b, z7.b\n"
"ld1b { z7.b }, p0/Z, [x22, #-7, MUL VL]\n"
- ".inst 0x45029809 // smmla z9.s, z0.b, z2.b\n"
- ".inst 0x4504980c // smmla z12.s, z0.b, z4.b\n"
- ".inst 0x4502982f // smmla z15.s, z1.b, z2.b\n"
- ".inst 0x45049832 // smmla z18.s, z1.b, z4.b\n"
- ".inst 0x450298b5 // smmla z21.s, z5.b, z2.b\n"
- ".inst 0x450498b8 // smmla z24.s, z5.b, z4.b\n"
- ".inst 0x450298db // smmla z27.s, z6.b, z2.b\n"
- ".inst 0x450498de // smmla z30.s, z6.b, z4.b\n"
+ ".inst 0x45049809 // smmla z9.s, z0.b, z4.b\n"
+ ".inst 0x4505980c // smmla z12.s, z0.b, z5.b\n"
+ ".inst 0x4504982f // smmla z15.s, z1.b, z4.b\n"
+ ".inst 0x45059832 // smmla z18.s, z1.b, z5.b\n"
+ ".inst 0x45049855 // smmla z21.s, z2.b, z4.b\n"
+ ".inst 0x45059858 // smmla z24.s, z2.b, z5.b\n"
+ ".inst 0x450498db // smmla z27.s, z6.b, z4.b\n"
"ld1b { z4.b }, p0/Z, [x22, #-6, MUL VL]\n"
+ ".inst 0x450598de // smmla z30.s, z6.b, z5.b\n"
".inst 0x4503980a // smmla z10.s, z0.b, z3.b\n"
+ "ld1b { z5.b }, p0/Z, [x22, #-5, MUL VL]\n"
".inst 0x4507980d // smmla z13.s, z0.b, z7.b\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x45039830 // smmla z16.s, z1.b, z3.b\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x45079833 // smmla z19.s, z1.b, z7.b\n"
+ ".inst 0x45039856 // smmla z22.s, z2.b, z3.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #96]\n"
- ".inst 0x450398b6 // smmla z22.s, z5.b, z3.b\n"
- ".inst 0x450798b9 // smmla z25.s, z5.b, z7.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #-5, MUL VL]\n"
+ ".inst 0x45079859 // smmla z25.s, z2.b, z7.b\n"
".inst 0x450398dc // smmla z28.s, z6.b, z3.b\n"
- ".inst 0x450798df // smmla z31.s, z6.b, z7.b\n"
"ld1rqb { z2.b }, p0/Z, [%x[Apanel], #112]\n"
+ ".inst 0x450798df // smmla z31.s, z6.b, z7.b\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
"addvl x22, x22, #-4\n"
"bge 3b\n"
"4:" // main loop skip
- "ld1rqb { z7.b }, p0/Z, [%x[Apanel]]\n"
+ "ld1rqb { z3.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45049808 // smmla z8.s, z0.b, z4.b\n"
".inst 0x4505980b // smmla z11.s, z0.b, z5.b\n"
".inst 0x4504982e // smmla z14.s, z1.b, z4.b\n"
@@ -172,54 +172,54 @@ void sve_interleaved_s8s32_mmla_8x3VL(
"ld1b { z6.b }, p0/Z, [x22]\n"
".inst 0x45049854 // smmla z20.s, z2.b, z4.b\n"
".inst 0x45059857 // smmla z23.s, z2.b, z5.b\n"
- "ld1b { z3.b }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x450498fa // smmla z26.s, z7.b, z4.b\n"
- ".inst 0x450598fd // smmla z29.s, z7.b, z5.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x4504987a // smmla z26.s, z3.b, z4.b\n"
+ ".inst 0x4505987d // smmla z29.s, z3.b, z5.b\n"
"ld1b { z5.b }, p0/Z, [x22, #2, MUL VL]\n"
"ld1b { z4.b }, p0/Z, [x22, #3, MUL VL]\n"
".inst 0x45069809 // smmla z9.s, z0.b, z6.b\n"
- ".inst 0x4503980c // smmla z12.s, z0.b, z3.b\n"
- ".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
- ".inst 0x45039832 // smmla z18.s, z1.b, z3.b\n"
"add %x[Apanel], %x[Apanel], #0x10\n"
- ".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
- ".inst 0x45039858 // smmla z24.s, z2.b, z3.b\n"
+ ".inst 0x4507980c // smmla z12.s, z0.b, z7.b\n"
+ ".inst 0x4506982f // smmla z15.s, z1.b, z6.b\n"
"addvl x22, x22, #4\n"
- ".inst 0x450698fb // smmla z27.s, z7.b, z6.b\n"
- ".inst 0x450398fe // smmla z30.s, z7.b, z3.b\n"
+ ".inst 0x45079832 // smmla z18.s, z1.b, z7.b\n"
+ ".inst 0x45069855 // smmla z21.s, z2.b, z6.b\n"
+ ".inst 0x45079858 // smmla z24.s, z2.b, z7.b\n"
+ ".inst 0x4506987b // smmla z27.s, z3.b, z6.b\n"
+ ".inst 0x4507987e // smmla z30.s, z3.b, z7.b\n"
".inst 0x4505980a // smmla z10.s, z0.b, z5.b\n"
".inst 0x4504980d // smmla z13.s, z0.b, z4.b\n"
".inst 0x45059830 // smmla z16.s, z1.b, z5.b\n"
".inst 0x45049833 // smmla z19.s, z1.b, z4.b\n"
".inst 0x45059856 // smmla z22.s, z2.b, z5.b\n"
".inst 0x45049859 // smmla z25.s, z2.b, z4.b\n"
- ".inst 0x450598fc // smmla z28.s, z7.b, z5.b\n"
- ".inst 0x450498ff // smmla z31.s, z7.b, z4.b\n"
+ ".inst 0x4505987c // smmla z28.s, z3.b, z5.b\n"
+ ".inst 0x4504987f // smmla z31.s, z3.b, z4.b\n"
"cbz x20, 5f\n"
"ld1b { z1.b }, p0/Z, [x22]\n"
"ld1rqb { z7.b }, p0/Z, [%x[Apanel]]\n"
- ".inst 0x450198e8 // smmla z8.s, z7.b, z1.b\n"
"ld1rqb { z6.b }, p0/Z, [%x[Apanel], #16]\n"
"ld1b { z0.b }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x450098eb // smmla z11.s, z7.b, z0.b\n"
"ld1rqb { z5.b }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqb { z4.b }, p0/Z, [%x[Apanel], #48]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x450198e8 // smmla z8.s, z7.b, z1.b\n"
+ "ld1b { z3.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x450098eb // smmla z11.s, z7.b, z0.b\n"
".inst 0x450198ce // smmla z14.s, z6.b, z1.b\n"
".inst 0x450098d1 // smmla z17.s, z6.b, z0.b\n"
".inst 0x450198b4 // smmla z20.s, z5.b, z1.b\n"
- "ld1b { z3.b }, p0/Z, [x22, #2, MUL VL]\n"
".inst 0x450098b7 // smmla z23.s, z5.b, z0.b\n"
".inst 0x4501989a // smmla z26.s, z4.b, z1.b\n"
- "ld1b { z2.b }, p0/Z, [x22, #3, MUL VL]\n"
- ".inst 0x4500989d // smmla z29.s, z4.b, z0.b\n"
"ld1b { z1.b }, p0/Z, [x22, #4, MUL VL]\n"
+ ".inst 0x4500989d // smmla z29.s, z4.b, z0.b\n"
"ld1b { z0.b }, p0/Z, [x22, #5, MUL VL]\n"
".inst 0x450398e9 // smmla z9.s, z7.b, z3.b\n"
".inst 0x450298ec // smmla z12.s, z7.b, z2.b\n"
- "addvl x22, x22, #6\n"
".inst 0x450398cf // smmla z15.s, z6.b, z3.b\n"
+ "addvl x22, x22, #6\n"
".inst 0x450298d2 // smmla z18.s, z6.b, z2.b\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x450398b5 // smmla z21.s, z5.b, z3.b\n"
".inst 0x450298b8 // smmla z24.s, z5.b, z2.b\n"
".inst 0x4503989b // smmla z27.s, z4.b, z3.b\n"
@@ -233,53 +233,53 @@ void sve_interleaved_s8s32_mmla_8x3VL(
".inst 0x4501989c // smmla z28.s, z4.b, z1.b\n"
".inst 0x4500989f // smmla z31.s, z4.b, z0.b\n"
"5:" // multiply loop done
- "uzp1 z0.d, z8.d, z11.d\n"
+ "uzp1 z2.d, z8.d, z11.d\n"
"uzp2 z8.d, z8.d, z11.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel]]\n"
- "uzp1 z0.d, z9.d, z12.d\n"
+ "subs x23, x23, #0x1\n"
+ "uzp1 z1.d, z9.d, z12.d\n"
"uzp2 z9.d, z9.d, z12.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"uzp1 z0.d, z10.d, z13.d\n"
"uzp2 z10.d, z10.d, z13.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "uzp1 z0.d, z14.d, z17.d\n"
+ "st1w { z2.s }, p0, [%x[Cpanel]]\n"
+ "uzp1 z3.d, z14.d, z17.d\n"
"uzp2 z14.d, z14.d, z17.d\n"
- "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
- "uzp1 z1.d, z15.d, z18.d\n"
- "subs x23, x23, #0x1\n"
- "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "uzp1 z17.d, z15.d, z18.d\n"
"uzp2 z15.d, z15.d, z18.d\n"
- "uzp1 z17.d, z16.d, z19.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "uzp1 z2.d, z16.d, z19.d\n"
"uzp2 z16.d, z16.d, z19.d\n"
- "uzp1 z0.d, z20.d, z23.d\n"
- "st1w { z1.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
- "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "uzp1 z1.d, z20.d, z23.d\n"
"uzp2 z20.d, z20.d, z23.d\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
- "uzp1 z23.d, z21.d, z24.d\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "uzp1 z0.d, z21.d, z24.d\n"
"uzp2 z21.d, z21.d, z24.d\n"
- "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
- "uzp1 z19.d, z22.d, z25.d\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "uzp1 z23.d, z22.d, z25.d\n"
"uzp2 z22.d, z22.d, z25.d\n"
- "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
- "uzp1 z18.d, z26.d, z29.d\n"
+ "st1w { z3.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "uzp1 z19.d, z26.d, z29.d\n"
"uzp2 z26.d, z26.d, z29.d\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
- "uzp1 z17.d, z27.d, z30.d\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "uzp1 z18.d, z27.d, z30.d\n"
"uzp2 z27.d, z27.d, z30.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
- "uzp1 z16.d, z28.d, z31.d\n"
+ "uzp1 z17.d, z28.d, z31.d\n"
"uzp2 z28.d, z28.d, z31.d\n"
- "st1w { z23.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
- "st1w { z19.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z2.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
"st1w { z20.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
"st1w { z21.s }, p0, [%x[Cpanel]]\n"
"st1w { z22.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
- "st1w { z18.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1w { z26.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1w { z27.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
"st1w { z28.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL.hpp
new file mode 100644
index 0000000000..072ffee1cc
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL.hpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include "../std_transforms_sve.hpp"
+#include "../performance_parameters.hpp"
+
+#define ARGLIST \
+ const uint8_t *, const int8_t *, \
+ int32_t *, int, int, int
+
+namespace arm_gemm
+{
+// Actual kernel implementations
+void sve_interleaved_u8s8s32_mmla_8x3VL( ARGLIST );
+
+class cls_sve_interleaved_u8s8s32_mmla_8x3VL
+{
+public:
+ typedef uint8_t lhs_operand_type;
+ typedef int8_t rhs_operand_type;
+ typedef int32_t result_type;
+
+ typedef void (*kern_type)( ARGLIST );
+
+ /* Kernel blocking parameters */
+ static constexpr unsigned int out_height()
+ {
+ return 8;
+ }
+
+ static unsigned int out_width()
+ {
+ return get_vector_length<int32_t>() * 3;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 8;
+ }
+
+
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 8, 2> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 8, 2, true> transforms_quantized = {};
+ template<typename T>
+ static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
+ {
+
+ if (std::is_same<T, uint32_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 61.97, 4.11, 7.93 };
+ case CPUModel::A510:
+ return { 43.18, 3.57, 2.89 };
+ case CPUModel::V1:
+ return { 123.47, 5.03, 11.76 };
+ }
+ }
+
+
+ if (std::is_same<T, uint8_t>::value) {
+ switch (ci->get_cpu_model()) {
+ default:
+ return { 62.00, 4.08, 0.51 };
+ case CPUModel::A510:
+ return { 38.02, 1.85, 0.28 };
+ case CPUModel::V1:
+ return { 95.28, 7.99, 0.79 };
+ }
+ }
+
+ return { 1.0 };
+ }
+
+ // Default to the generic kernel
+ kern_type kernel=sve_interleaved_u8s8s32_mmla_8x3VL;
+ cls_sve_interleaved_u8s8s32_mmla_8x3VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#undef ARGLIST
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL/generic.cpp
new file mode 100644
index 0000000000..50a1713b89
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8s8s32_mmla_8x3VL/generic.cpp
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SVE
+
+#include <cstddef>
+#include <cstdint>
+
+namespace arm_gemm {
+
+void sve_interleaved_u8s8s32_mmla_8x3VL(
+ const uint8_t *Apanel,
+ const int8_t *Bpanel,
+ int32_t *Cpanel,
+ int ablocks,
+ int bblocks,
+ int K) {
+
+ struct KernelArgs {
+ size_t K = {};
+ const int8_t *Bpanel = {};
+ size_t bblocks = {};
+ } ka;
+
+ ka.K = (K/8) - 1;
+ ka.Bpanel = Bpanel;
+ ka.bblocks = bblocks;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "1:" // Height loop
+ "ldr x23, [%x[args_ptr], %[offsetof_bblocks]]\n"
+ "ldr x22, [%x[args_ptr], %[offsetof_Bpanel]]\n"
+ "mov x21, %x[Apanel]\n"
+ "2:" // Width loop
+ "ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
+ "mov %x[Apanel], x21\n"
+ "mov z8.s, #0x0\n"
+ "mov z9.s, #0x0\n"
+ "mov z10.s, #0x0\n"
+ "ld1b { z4.b }, p0/Z, [x22]\n"
+ "mov z11.s, #0x0\n"
+ "mov z12.s, #0x0\n"
+ "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
+ "mov z13.s, #0x0\n"
+ "mov z14.s, #0x0\n"
+ "mov z15.s, #0x0\n"
+ "mov z16.s, #0x0\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
+ "mov z17.s, #0x0\n"
+ "mov z18.s, #0x0\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
+ "mov z19.s, #0x0\n"
+ "mov z20.s, #0x0\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
+ "mov z21.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "addvl x22, x22, #2\n"
+ "mov z23.s, #0x0\n"
+ "mov z24.s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
+ "mov z25.s, #0x0\n"
+ "mov z26.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "mov z28.s, #0x0\n"
+ "mov z29.s, #0x0\n"
+ "mov z30.s, #0x0\n"
+ "mov z31.s, #0x0\n"
+ "blt 4f\n"
+ "3:" // main loop head
+ "ld1rqb { z6.b }, p0/Z, [%x[Apanel]]\n"
+ ".inst 0x45849808 // usmmla z8.s, z0.b, z4.b\n"
+ ".inst 0x4585980b // usmmla z11.s, z0.b, z5.b\n"
+ ".inst 0x4584982e // usmmla z14.s, z1.b, z4.b\n"
+ ".inst 0x45859831 // usmmla z17.s, z1.b, z5.b\n"
+ "ld1b { z3.b }, p0/Z, [x22]\n"
+ ".inst 0x45849854 // usmmla z20.s, z2.b, z4.b\n"
+ ".inst 0x45859857 // usmmla z23.s, z2.b, z5.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x458498da // usmmla z26.s, z6.b, z4.b\n"
+ ".inst 0x458598dd // usmmla z29.s, z6.b, z5.b\n"
+ "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x45839809 // usmmla z9.s, z0.b, z3.b\n"
+ "sub x20, x20, #0x2\n"
+ ".inst 0x4587980c // usmmla z12.s, z0.b, z7.b\n"
+ ".inst 0x4583982f // usmmla z15.s, z1.b, z3.b\n"
+ "cmp x20, #0x2\n"
+ ".inst 0x45879832 // usmmla z18.s, z1.b, z7.b\n"
+ ".inst 0x45839855 // usmmla z21.s, z2.b, z3.b\n"
+ ".inst 0x45879858 // usmmla z24.s, z2.b, z7.b\n"
+ ".inst 0x458398db // usmmla z27.s, z6.b, z3.b\n"
+ "ld1b { z3.b }, p0/Z, [x22, #4, MUL VL]\n"
+ ".inst 0x458798de // usmmla z30.s, z6.b, z7.b\n"
+ ".inst 0x4584980a // usmmla z10.s, z0.b, z4.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
+ ".inst 0x4585980d // usmmla z13.s, z0.b, z5.b\n"
+ ".inst 0x45849830 // usmmla z16.s, z1.b, z4.b\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel], #16]\n"
+ ".inst 0x45859833 // usmmla z19.s, z1.b, z5.b\n"
+ ".inst 0x45849856 // usmmla z22.s, z2.b, z4.b\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #32]\n"
+ ".inst 0x45859859 // usmmla z25.s, z2.b, z5.b\n"
+ ".inst 0x458498dc // usmmla z28.s, z6.b, z4.b\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x458598df // usmmla z31.s, z6.b, z5.b\n"
+ "ld1rqb { z6.b }, p0/Z, [%x[Apanel], #64]\n"
+ "ld1b { z4.b }, p0/Z, [x22, #6, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x22, #7, MUL VL]\n"
+ "addvl x22, x22, #16\n"
+ ".inst 0x45839808 // usmmla z8.s, z0.b, z3.b\n"
+ ".inst 0x4587980b // usmmla z11.s, z0.b, z7.b\n"
+ ".inst 0x4583982e // usmmla z14.s, z1.b, z3.b\n"
+ ".inst 0x45879831 // usmmla z17.s, z1.b, z7.b\n"
+ ".inst 0x45839854 // usmmla z20.s, z2.b, z3.b\n"
+ ".inst 0x45879857 // usmmla z23.s, z2.b, z7.b\n"
+ ".inst 0x458398da // usmmla z26.s, z6.b, z3.b\n"
+ "ld1b { z3.b }, p0/Z, [x22, #-8, MUL VL]\n"
+ ".inst 0x458798dd // usmmla z29.s, z6.b, z7.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #-7, MUL VL]\n"
+ ".inst 0x45849809 // usmmla z9.s, z0.b, z4.b\n"
+ ".inst 0x4585980c // usmmla z12.s, z0.b, z5.b\n"
+ ".inst 0x4584982f // usmmla z15.s, z1.b, z4.b\n"
+ ".inst 0x45859832 // usmmla z18.s, z1.b, z5.b\n"
+ ".inst 0x45849855 // usmmla z21.s, z2.b, z4.b\n"
+ ".inst 0x45859858 // usmmla z24.s, z2.b, z5.b\n"
+ ".inst 0x458498db // usmmla z27.s, z6.b, z4.b\n"
+ "ld1b { z4.b }, p0/Z, [x22, #-6, MUL VL]\n"
+ ".inst 0x458598de // usmmla z30.s, z6.b, z5.b\n"
+ ".inst 0x4583980a // usmmla z10.s, z0.b, z3.b\n"
+ "ld1b { z5.b }, p0/Z, [x22, #-5, MUL VL]\n"
+ ".inst 0x4587980d // usmmla z13.s, z0.b, z7.b\n"
+ ".inst 0x45839830 // usmmla z16.s, z1.b, z3.b\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel], #80]\n"
+ ".inst 0x45879833 // usmmla z19.s, z1.b, z7.b\n"
+ ".inst 0x45839856 // usmmla z22.s, z2.b, z3.b\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #96]\n"
+ ".inst 0x45879859 // usmmla z25.s, z2.b, z7.b\n"
+ ".inst 0x458398dc // usmmla z28.s, z6.b, z3.b\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #112]\n"
+ ".inst 0x458798df // usmmla z31.s, z6.b, z7.b\n"
+ "add %x[Apanel], %x[Apanel], #0x80\n"
+ "addvl x22, x22, #-4\n"
+ "bge 3b\n"
+ "4:" // main loop skip
+ "ld1rqb { z3.b }, p0/Z, [%x[Apanel]]\n"
+ ".inst 0x45849808 // usmmla z8.s, z0.b, z4.b\n"
+ ".inst 0x4585980b // usmmla z11.s, z0.b, z5.b\n"
+ ".inst 0x4584982e // usmmla z14.s, z1.b, z4.b\n"
+ ".inst 0x45859831 // usmmla z17.s, z1.b, z5.b\n"
+ "ld1b { z6.b }, p0/Z, [x22]\n"
+ ".inst 0x45849854 // usmmla z20.s, z2.b, z4.b\n"
+ ".inst 0x45859857 // usmmla z23.s, z2.b, z5.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x4584987a // usmmla z26.s, z3.b, z4.b\n"
+ ".inst 0x4585987d // usmmla z29.s, z3.b, z5.b\n"
+ "ld1b { z5.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x45869809 // usmmla z9.s, z0.b, z6.b\n"
+ "add %x[Apanel], %x[Apanel], #0x10\n"
+ ".inst 0x4587980c // usmmla z12.s, z0.b, z7.b\n"
+ ".inst 0x4586982f // usmmla z15.s, z1.b, z6.b\n"
+ "addvl x22, x22, #4\n"
+ ".inst 0x45879832 // usmmla z18.s, z1.b, z7.b\n"
+ ".inst 0x45869855 // usmmla z21.s, z2.b, z6.b\n"
+ ".inst 0x45879858 // usmmla z24.s, z2.b, z7.b\n"
+ ".inst 0x4586987b // usmmla z27.s, z3.b, z6.b\n"
+ ".inst 0x4587987e // usmmla z30.s, z3.b, z7.b\n"
+ ".inst 0x4585980a // usmmla z10.s, z0.b, z5.b\n"
+ ".inst 0x4584980d // usmmla z13.s, z0.b, z4.b\n"
+ ".inst 0x45859830 // usmmla z16.s, z1.b, z5.b\n"
+ ".inst 0x45849833 // usmmla z19.s, z1.b, z4.b\n"
+ ".inst 0x45859856 // usmmla z22.s, z2.b, z5.b\n"
+ ".inst 0x45849859 // usmmla z25.s, z2.b, z4.b\n"
+ ".inst 0x4585987c // usmmla z28.s, z3.b, z5.b\n"
+ ".inst 0x4584987f // usmmla z31.s, z3.b, z4.b\n"
+ "cbz x20, 5f\n"
+ "ld1b { z1.b }, p0/Z, [x22]\n"
+ "ld1rqb { z7.b }, p0/Z, [%x[Apanel]]\n"
+ "ld1rqb { z6.b }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1b { z0.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1rqb { z5.b }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1rqb { z4.b }, p0/Z, [%x[Apanel], #48]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x458198e8 // usmmla z8.s, z7.b, z1.b\n"
+ "ld1b { z3.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x458098eb // usmmla z11.s, z7.b, z0.b\n"
+ ".inst 0x458198ce // usmmla z14.s, z6.b, z1.b\n"
+ ".inst 0x458098d1 // usmmla z17.s, z6.b, z0.b\n"
+ ".inst 0x458198b4 // usmmla z20.s, z5.b, z1.b\n"
+ ".inst 0x458098b7 // usmmla z23.s, z5.b, z0.b\n"
+ ".inst 0x4581989a // usmmla z26.s, z4.b, z1.b\n"
+ "ld1b { z1.b }, p0/Z, [x22, #4, MUL VL]\n"
+ ".inst 0x4580989d // usmmla z29.s, z4.b, z0.b\n"
+ "ld1b { z0.b }, p0/Z, [x22, #5, MUL VL]\n"
+ ".inst 0x458398e9 // usmmla z9.s, z7.b, z3.b\n"
+ ".inst 0x458298ec // usmmla z12.s, z7.b, z2.b\n"
+ ".inst 0x458398cf // usmmla z15.s, z6.b, z3.b\n"
+ "addvl x22, x22, #6\n"
+ ".inst 0x458298d2 // usmmla z18.s, z6.b, z2.b\n"
+ ".inst 0x458398b5 // usmmla z21.s, z5.b, z3.b\n"
+ ".inst 0x458298b8 // usmmla z24.s, z5.b, z2.b\n"
+ ".inst 0x4583989b // usmmla z27.s, z4.b, z3.b\n"
+ ".inst 0x4582989e // usmmla z30.s, z4.b, z2.b\n"
+ ".inst 0x458198ea // usmmla z10.s, z7.b, z1.b\n"
+ ".inst 0x458098ed // usmmla z13.s, z7.b, z0.b\n"
+ ".inst 0x458198d0 // usmmla z16.s, z6.b, z1.b\n"
+ ".inst 0x458098d3 // usmmla z19.s, z6.b, z0.b\n"
+ ".inst 0x458198b6 // usmmla z22.s, z5.b, z1.b\n"
+ ".inst 0x458098b9 // usmmla z25.s, z5.b, z0.b\n"
+ ".inst 0x4581989c // usmmla z28.s, z4.b, z1.b\n"
+ ".inst 0x4580989f // usmmla z31.s, z4.b, z0.b\n"
+ "5:" // multiply loop done
+ "uzp1 z2.d, z8.d, z11.d\n"
+ "uzp2 z8.d, z8.d, z11.d\n"
+ "subs x23, x23, #0x1\n"
+ "uzp1 z1.d, z9.d, z12.d\n"
+ "uzp2 z9.d, z9.d, z12.d\n"
+ "uzp1 z0.d, z10.d, z13.d\n"
+ "uzp2 z10.d, z10.d, z13.d\n"
+ "st1w { z2.s }, p0, [%x[Cpanel]]\n"
+ "uzp1 z3.d, z14.d, z17.d\n"
+ "uzp2 z14.d, z14.d, z17.d\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "uzp1 z17.d, z15.d, z18.d\n"
+ "uzp2 z15.d, z15.d, z18.d\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "uzp1 z2.d, z16.d, z19.d\n"
+ "uzp2 z16.d, z16.d, z19.d\n"
+ "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "uzp1 z1.d, z20.d, z23.d\n"
+ "uzp2 z20.d, z20.d, z23.d\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "uzp1 z0.d, z21.d, z24.d\n"
+ "uzp2 z21.d, z21.d, z24.d\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "uzp1 z23.d, z22.d, z25.d\n"
+ "uzp2 z22.d, z22.d, z25.d\n"
+ "st1w { z3.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "uzp1 z19.d, z26.d, z29.d\n"
+ "uzp2 z26.d, z26.d, z29.d\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "uzp1 z18.d, z27.d, z30.d\n"
+ "uzp2 z27.d, z27.d, z30.d\n"
+ "uzp1 z17.d, z28.d, z31.d\n"
+ "uzp2 z28.d, z28.d, z31.d\n"
+ "st1w { z2.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z20.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
+ "st1w { z21.s }, p0, [%x[Cpanel]]\n"
+ "st1w { z22.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z26.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z27.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z28.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #8\n"
+ "bgt 2b\n"
+ "subs %x[ablocks], %x[ablocks], #0x1\n"
+ "bne 1b\n"
+ : [Apanel] "+&r" (Apanel), [Cpanel] "+&r" (Cpanel), [ablocks] "+&r" (ablocks)
+ : [args_ptr] "r" (&ka), [offsetof_Bpanel] "I" (offsetof(KernelArgs, Bpanel)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_bblocks] "I" (offsetof(KernelArgs, bblocks))
+ : "cc", "memory", "p0", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+#endif // ARM_COMPUTE_ENABLE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL.hpp
index c0b215ccb4..bcb3279adb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,7 +40,8 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx( ARGLIST );
class cls_sve_interleaved_u8u32_dot_8x3VL
{
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint32_t result_type;
typedef void (*kern_type)( ARGLIST );
@@ -62,8 +63,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 3, 4, 1, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 4, 1> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 3, 4, 1, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp
index 79e794a834..171cf38fa6 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/a64fx.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,31 +55,31 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z0.b }, p0/Z, [x22]\n"
"mov z10.s, #0x0\n"
+ "ld1b { z0.b }, p0/Z, [x22]\n"
"mov z11.s, #0x0\n"
- "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
"mov z12.s, #0x0\n"
+ "ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.s, #0x0\n"
- "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z16.s, #0x0\n"
+ "ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z17.s, #0x0\n"
- "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z18.s, #0x0\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
"mov z19.s, #0x0\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z20.s, #0x0\n"
+ "ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
"mov z21.s, #0x0\n"
- "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z22.s, #0x0\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #12]\n"
"mov z25.s, #0x0\n"
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
@@ -93,7 +93,7 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"udot z9.s, z1.b, z3.b\n"
"sub x20, x20, #0x2\n"
"udot z10.s, z2.b, z3.b\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"udot z11.s, z0.b, z4.b\n"
"udot z12.s, z1.b, z4.b\n"
"udot z13.s, z2.b, z4.b\n"
@@ -102,63 +102,63 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"udot z15.s, z1.b, z5.b\n"
"cmp x20, #0x2\n"
"udot z16.s, z2.b, z5.b\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #24]\n"
+ "ld1rw { z7.s }, p0/Z, [%x[Apanel], #24]\n"
"udot z17.s, z0.b, z6.b\n"
"udot z18.s, z1.b, z6.b\n"
"udot z19.s, z2.b, z6.b\n"
- "ld1rw { z5.s }, p0/Z, [%x[Apanel], #28]\n"
- "udot z20.s, z0.b, z7.b\n"
- "udot z21.s, z1.b, z7.b\n"
- "udot z22.s, z2.b, z7.b\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #32]\n"
+ "ld1rw { z6.s }, p0/Z, [%x[Apanel], #28]\n"
+ "udot z20.s, z0.b, z3.b\n"
+ "udot z21.s, z1.b, z3.b\n"
+ "udot z22.s, z2.b, z3.b\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #32]\n"
"udot z23.s, z0.b, z4.b\n"
"udot z24.s, z1.b, z4.b\n"
"udot z25.s, z2.b, z4.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #36]\n"
- "udot z26.s, z0.b, z3.b\n"
- "udot z27.s, z1.b, z3.b\n"
- "udot z28.s, z2.b, z3.b\n"
+ "udot z26.s, z0.b, z7.b\n"
+ "udot z27.s, z1.b, z7.b\n"
+ "udot z28.s, z2.b, z7.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel], #40]\n"
- "udot z29.s, z0.b, z5.b\n"
- "ld1b { z6.b }, p0/Z, [x22, #3, MUL VL]\n"
- "udot z30.s, z1.b, z5.b\n"
- "udot z31.s, z2.b, z5.b\n"
- "ld1b { z2.b }, p0/Z, [x22, #4, MUL VL]\n"
- "ld1b { z5.b }, p0/Z, [x22, #5, MUL VL]\n"
- "udot z8.s, z6.b, z7.b\n"
+ "udot z29.s, z0.b, z6.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "udot z30.s, z1.b, z6.b\n"
+ "udot z31.s, z2.b, z6.b\n"
+ "ld1b { z6.b }, p0/Z, [x22, #4, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x22, #5, MUL VL]\n"
+ "addvl x22, x22, #6\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #44]\n"
- "udot z9.s, z2.b, z7.b\n"
- "udot z10.s, z5.b, z7.b\n"
- "udot z11.s, z6.b, z4.b\n"
- "ld1rw { z7.s }, p0/Z, [%x[Apanel], #48]\n"
- "udot z12.s, z2.b, z4.b\n"
- "udot z13.s, z5.b, z4.b\n"
+ "udot z8.s, z7.b, z5.b\n"
+ "udot z11.s, z7.b, z4.b\n"
+ "udot z9.s, z6.b, z5.b\n"
+ "udot z12.s, z6.b, z4.b\n"
+ "udot z10.s, z2.b, z5.b\n"
+ "udot z13.s, z2.b, z4.b\n"
+ "ld1rw { z5.s }, p0/Z, [%x[Apanel], #48]\n"
+ "udot z14.s, z7.b, z3.b\n"
+ "udot z15.s, z6.b, z3.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #52]\n"
- "udot z14.s, z6.b, z3.b\n"
- "udot z15.s, z2.b, z3.b\n"
- "addvl x22, x22, #6\n"
- "udot z16.s, z5.b, z3.b\n"
+ "udot z16.s, z2.b, z3.b\n"
+ "udot z17.s, z7.b, z1.b\n"
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #56]\n"
- "udot z17.s, z6.b, z1.b\n"
- "udot z18.s, z2.b, z1.b\n"
- "udot z19.s, z5.b, z1.b\n"
+ "udot z18.s, z6.b, z1.b\n"
+ "udot z19.s, z2.b, z1.b\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #60]\n"
"add %x[Apanel], %x[Apanel], #0x40\n"
- "udot z20.s, z6.b, z7.b\n"
- "udot z21.s, z2.b, z7.b\n"
- "udot z22.s, z5.b, z7.b\n"
- "udot z23.s, z6.b, z4.b\n"
+ "udot z20.s, z7.b, z5.b\n"
+ "udot z21.s, z6.b, z5.b\n"
+ "udot z22.s, z2.b, z5.b\n"
+ "udot z23.s, z7.b, z4.b\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "udot z24.s, z2.b, z4.b\n"
- "udot z25.s, z5.b, z4.b\n"
+ "udot z24.s, z6.b, z4.b\n"
+ "udot z25.s, z2.b, z4.b\n"
"ld1rw { z4.s }, p0/Z, [%x[Apanel], #4]\n"
- "udot z26.s, z6.b, z0.b\n"
- "udot z27.s, z2.b, z0.b\n"
- "udot z28.s, z5.b, z0.b\n"
- "udot z29.s, z6.b, z1.b\n"
+ "udot z26.s, z7.b, z0.b\n"
+ "udot z27.s, z6.b, z0.b\n"
+ "udot z28.s, z2.b, z0.b\n"
+ "udot z29.s, z7.b, z1.b\n"
"ld1b { z0.b }, p0/Z, [x22]\n"
- "udot z30.s, z2.b, z1.b\n"
- "udot z31.s, z5.b, z1.b\n"
+ "udot z30.s, z6.b, z1.b\n"
+ "udot z31.s, z2.b, z1.b\n"
"ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
"ld1b { z2.b }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rw { z5.s }, p0/Z, [%x[Apanel], #8]\n"
@@ -200,19 +200,20 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
"ld1rw { z3.s }, p0/Z, [%x[Apanel]]\n"
- "udot z8.s, z6.b, z3.b\n"
+ "addvl x22, x22, #3\n"
"ld1rw { z2.s }, p0/Z, [%x[Apanel], #4]\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #8]\n"
- "udot z9.s, z5.b, z3.b\n"
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #12]\n"
+ "udot z8.s, z6.b, z3.b\n"
+ "udot z9.s, z5.b, z3.b\n"
"udot z10.s, z4.b, z3.b\n"
"udot z11.s, z6.b, z2.b\n"
+ "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
"udot z12.s, z5.b, z2.b\n"
"udot z13.s, z4.b, z2.b\n"
- "ld1rw { z3.s }, p0/Z, [%x[Apanel], #16]\n"
+ "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"udot z14.s, z6.b, z1.b\n"
"udot z15.s, z5.b, z1.b\n"
- "ld1rw { z2.s }, p0/Z, [%x[Apanel], #20]\n"
"udot z16.s, z4.b, z1.b\n"
"udot z17.s, z6.b, z0.b\n"
"ld1rw { z1.s }, p0/Z, [%x[Apanel], #24]\n"
@@ -221,10 +222,9 @@ void sve_interleaved_u8u32_dot_8x3VL_a64fx(
"ld1rw { z0.s }, p0/Z, [%x[Apanel], #28]\n"
"udot z20.s, z6.b, z3.b\n"
"udot z21.s, z5.b, z3.b\n"
- "addvl x22, x22, #3\n"
+ "add %x[Apanel], %x[Apanel], #0x20\n"
"udot z22.s, z4.b, z3.b\n"
"udot z23.s, z6.b, z2.b\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"udot z24.s, z5.b, z2.b\n"
"udot z25.s, z4.b, z2.b\n"
"udot z26.s, z6.b, z1.b\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp
index 1c88336c2d..dc31a73c13 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,25 +55,25 @@ void sve_interleaved_u8u32_dot_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z10.s, #0x0\n"
+ "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z11.s, #0x0\n"
- "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z12.s, #0x0\n"
+ "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"mov z16.s, #0x0\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z17.s, #0x0\n"
- "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z18.s, #0x0\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z19.s, #0x0\n"
"mov z20.s, #0x0\n"
+ "ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"mov z21.s, #0x0\n"
"mov z22.s, #0x0\n"
"mov z23.s, #0x0\n"
@@ -151,12 +151,12 @@ void sve_interleaved_u8u32_dot_8x3VL(
"ld1b { z6.b }, p0/Z, [x22, #2, MUL VL]\n"
"bge 3b\n"
"4:" // main loop skip
+ "add %x[Apanel], %x[Apanel], #0x20\n"
+ "addvl x22, x22, #3\n"
"udot z8.s, z4.b, z0.b[0]\n"
"udot z11.s, z4.b, z0.b[1]\n"
- "add %x[Apanel], %x[Apanel], #0x20\n"
"udot z14.s, z4.b, z0.b[2]\n"
"udot z17.s, z4.b, z0.b[3]\n"
- "addvl x22, x22, #3\n"
"udot z20.s, z4.b, z1.b[0]\n"
"udot z23.s, z4.b, z1.b[1]\n"
"udot z26.s, z4.b, z1.b[2]\n"
@@ -183,13 +183,13 @@ void sve_interleaved_u8u32_dot_8x3VL(
"add %x[Apanel], %x[Apanel], #0x20\n"
"ld1b { z2.b }, p0/Z, [x22]\n"
"ld1b { z1.b }, p0/Z, [x22, #1, MUL VL]\n"
- "udot z8.s, z2.b, z4.b[0]\n"
"ld1b { z0.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "udot z8.s, z2.b, z4.b[0]\n"
"udot z11.s, z2.b, z4.b[1]\n"
"udot z14.s, z2.b, z4.b[2]\n"
"udot z17.s, z2.b, z4.b[3]\n"
"udot z20.s, z2.b, z3.b[0]\n"
- "addvl x22, x22, #3\n"
"udot z23.s, z2.b, z3.b[1]\n"
"udot z26.s, z2.b, z3.b[2]\n"
"udot z29.s, z2.b, z3.b[3]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp
index 067d0bf258..f6a526d879 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,7 +39,8 @@ void sve_interleaved_u8u32_mmla_8x3VL( ARGLIST );
class cls_sve_interleaved_u8u32_mmla_8x3VL
{
public:
- typedef uint8_t operand_type;
+ typedef uint8_t lhs_operand_type;
+ typedef uint8_t rhs_operand_type;
typedef uint32_t result_type;
typedef void (*kern_type)( ARGLIST );
@@ -61,8 +62,8 @@ public:
}
- StdTransformsSVE<operand_type, result_type, 8, 6, 8, 2> transforms = {};
- StdTransformsSVE<operand_type, result_type, 8, 6, 8, 2, true> transforms_quantized = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 8, 2> transforms = {};
+ StdTransformsSVE<lhs_operand_type, rhs_operand_type, result_type, 8, 6, 8, 2, true> transforms_quantized = {};
template<typename T>
static inline PerformanceParameters get_performance_parameters(const CPUInfo *ci)
{
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp
index 28449ea99b..e5389a771d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_8x3VL/generic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,31 +55,31 @@ void sve_interleaved_u8u32_mmla_8x3VL(
"2:" // Width loop
"ldr x20, [%x[args_ptr], %[offsetof_K]]\n"
"mov %x[Apanel], x21\n"
- "cmp x20, #0x2\n"
"mov z8.s, #0x0\n"
"mov z9.s, #0x0\n"
- "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z10.s, #0x0\n"
+ "ld1b { z4.b }, p0/Z, [x22]\n"
"mov z11.s, #0x0\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z12.s, #0x0\n"
+ "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "cmp x20, #0x2\n"
"mov z13.s, #0x0\n"
- "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z14.s, #0x0\n"
"mov z15.s, #0x0\n"
- "ld1b { z5.b }, p0/Z, [x22, #1, MUL VL]\n"
"mov z16.s, #0x0\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel]]\n"
"mov z17.s, #0x0\n"
- "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"mov z18.s, #0x0\n"
+ "ld1rqb { z1.b }, p0/Z, [%x[Apanel], #16]\n"
"mov z19.s, #0x0\n"
- "addvl x22, x22, #2\n"
"mov z20.s, #0x0\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #32]\n"
"mov z21.s, #0x0\n"
- "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z22.s, #0x0\n"
+ "addvl x22, x22, #2\n"
"mov z23.s, #0x0\n"
"mov z24.s, #0x0\n"
+ "add %x[Apanel], %x[Apanel], #0x30\n"
"mov z25.s, #0x0\n"
"mov z26.s, #0x0\n"
"mov z27.s, #0x0\n"
@@ -94,77 +94,77 @@ void sve_interleaved_u8u32_mmla_8x3VL(
".inst 0x45c5980b // ummla z11.s, z0.b, z5.b\n"
".inst 0x45c4982e // ummla z14.s, z1.b, z4.b\n"
".inst 0x45c59831 // ummla z17.s, z1.b, z5.b\n"
- "ld1b { z7.b }, p0/Z, [x22]\n"
+ "ld1b { z3.b }, p0/Z, [x22]\n"
".inst 0x45c49854 // ummla z20.s, z2.b, z4.b\n"
".inst 0x45c59857 // ummla z23.s, z2.b, z5.b\n"
- "ld1b { z3.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
".inst 0x45c498da // ummla z26.s, z6.b, z4.b\n"
".inst 0x45c598dd // ummla z29.s, z6.b, z5.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #2, MUL VL]\n"
- "ld1b { z4.b }, p0/Z, [x22, #3, MUL VL]\n"
- ".inst 0x45c79809 // ummla z9.s, z0.b, z7.b\n"
- ".inst 0x45c3980c // ummla z12.s, z0.b, z3.b\n"
- ".inst 0x45c7982f // ummla z15.s, z1.b, z7.b\n"
- ".inst 0x45c39832 // ummla z18.s, z1.b, z3.b\n"
+ "ld1b { z4.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x45c39809 // ummla z9.s, z0.b, z3.b\n"
"sub x20, x20, #0x2\n"
- ".inst 0x45c79855 // ummla z21.s, z2.b, z7.b\n"
- ".inst 0x45c39858 // ummla z24.s, z2.b, z3.b\n"
+ ".inst 0x45c7980c // ummla z12.s, z0.b, z7.b\n"
+ ".inst 0x45c3982f // ummla z15.s, z1.b, z3.b\n"
"cmp x20, #0x2\n"
- ".inst 0x45c798db // ummla z27.s, z6.b, z7.b\n"
- ".inst 0x45c398de // ummla z30.s, z6.b, z3.b\n"
+ ".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
+ ".inst 0x45c39855 // ummla z21.s, z2.b, z3.b\n"
+ ".inst 0x45c79858 // ummla z24.s, z2.b, z7.b\n"
+ ".inst 0x45c398db // ummla z27.s, z6.b, z3.b\n"
"ld1b { z3.b }, p0/Z, [x22, #4, MUL VL]\n"
- ".inst 0x45c5980a // ummla z10.s, z0.b, z5.b\n"
- ".inst 0x45c4980d // ummla z13.s, z0.b, z4.b\n"
+ ".inst 0x45c798de // ummla z30.s, z6.b, z7.b\n"
+ ".inst 0x45c4980a // ummla z10.s, z0.b, z4.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
+ ".inst 0x45c5980d // ummla z13.s, z0.b, z5.b\n"
+ ".inst 0x45c49830 // ummla z16.s, z1.b, z4.b\n"
"ld1rqb { z0.b }, p0/Z, [%x[Apanel], #16]\n"
- ".inst 0x45c59830 // ummla z16.s, z1.b, z5.b\n"
- ".inst 0x45c49833 // ummla z19.s, z1.b, z4.b\n"
+ ".inst 0x45c59833 // ummla z19.s, z1.b, z5.b\n"
+ ".inst 0x45c49856 // ummla z22.s, z2.b, z4.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #32]\n"
- ".inst 0x45c59856 // ummla z22.s, z2.b, z5.b\n"
- ".inst 0x45c49859 // ummla z25.s, z2.b, z4.b\n"
- "ld1b { z7.b }, p0/Z, [x22, #5, MUL VL]\n"
- ".inst 0x45c598dc // ummla z28.s, z6.b, z5.b\n"
- ".inst 0x45c498df // ummla z31.s, z6.b, z4.b\n"
- "ld1rqb { z5.b }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x45c59859 // ummla z25.s, z2.b, z5.b\n"
+ ".inst 0x45c498dc // ummla z28.s, z6.b, z4.b\n"
+ "ld1rqb { z2.b }, p0/Z, [%x[Apanel], #48]\n"
+ ".inst 0x45c598df // ummla z31.s, z6.b, z5.b\n"
"ld1rqb { z6.b }, p0/Z, [%x[Apanel], #64]\n"
- "ld1b { z2.b }, p0/Z, [x22, #6, MUL VL]\n"
- ".inst 0x45c39808 // ummla z8.s, z0.b, z3.b\n"
- "ld1b { z4.b }, p0/Z, [x22, #7, MUL VL]\n"
+ "ld1b { z4.b }, p0/Z, [x22, #6, MUL VL]\n"
+ "ld1b { z5.b }, p0/Z, [x22, #7, MUL VL]\n"
"addvl x22, x22, #16\n"
+ ".inst 0x45c39808 // ummla z8.s, z0.b, z3.b\n"
".inst 0x45c7980b // ummla z11.s, z0.b, z7.b\n"
".inst 0x45c3982e // ummla z14.s, z1.b, z3.b\n"
".inst 0x45c79831 // ummla z17.s, z1.b, z7.b\n"
- ".inst 0x45c398b4 // ummla z20.s, z5.b, z3.b\n"
- ".inst 0x45c798b7 // ummla z23.s, z5.b, z7.b\n"
+ ".inst 0x45c39854 // ummla z20.s, z2.b, z3.b\n"
+ ".inst 0x45c79857 // ummla z23.s, z2.b, z7.b\n"
".inst 0x45c398da // ummla z26.s, z6.b, z3.b\n"
- ".inst 0x45c798dd // ummla z29.s, z6.b, z7.b\n"
"ld1b { z3.b }, p0/Z, [x22, #-8, MUL VL]\n"
+ ".inst 0x45c798dd // ummla z29.s, z6.b, z7.b\n"
"ld1b { z7.b }, p0/Z, [x22, #-7, MUL VL]\n"
- ".inst 0x45c29809 // ummla z9.s, z0.b, z2.b\n"
- ".inst 0x45c4980c // ummla z12.s, z0.b, z4.b\n"
- ".inst 0x45c2982f // ummla z15.s, z1.b, z2.b\n"
- ".inst 0x45c49832 // ummla z18.s, z1.b, z4.b\n"
- ".inst 0x45c298b5 // ummla z21.s, z5.b, z2.b\n"
- ".inst 0x45c498b8 // ummla z24.s, z5.b, z4.b\n"
- ".inst 0x45c298db // ummla z27.s, z6.b, z2.b\n"
- ".inst 0x45c498de // ummla z30.s, z6.b, z4.b\n"
+ ".inst 0x45c49809 // ummla z9.s, z0.b, z4.b\n"
+ ".inst 0x45c5980c // ummla z12.s, z0.b, z5.b\n"
+ ".inst 0x45c4982f // ummla z15.s, z1.b, z4.b\n"
+ ".inst 0x45c59832 // ummla z18.s, z1.b, z5.b\n"
+ ".inst 0x45c49855 // ummla z21.s, z2.b, z4.b\n"
+ ".inst 0x45c59858 // ummla z24.s, z2.b, z5.b\n"
+ ".inst 0x45c498db // ummla z27.s, z6.b, z4.b\n"
"ld1b { z4.b }, p0/Z, [x22, #-6, MUL VL]\n"
+ ".inst 0x45c598de // ummla z30.s, z6.b, z5.b\n"
".inst 0x45c3980a // ummla z10.s, z0.b, z3.b\n"
+ "ld1b { z5.b }, p0/Z, [x22, #-5, MUL VL]\n"
".inst 0x45c7980d // ummla z13.s, z0.b, z7.b\n"
- "ld1rqb { z0.b }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x45c39830 // ummla z16.s, z1.b, z3.b\n"
+ "ld1rqb { z0.b }, p0/Z, [%x[Apanel], #80]\n"
".inst 0x45c79833 // ummla z19.s, z1.b, z7.b\n"
+ ".inst 0x45c39856 // ummla z22.s, z2.b, z3.b\n"
"ld1rqb { z1.b }, p0/Z, [%x[Apanel], #96]\n"
- ".inst 0x45c398b6 // ummla z22.s, z5.b, z3.b\n"
- ".inst 0x45c798b9 // ummla z25.s, z5.b, z7.b\n"
- "ld1b { z5.b }, p0/Z, [x22, #-5, MUL VL]\n"
+ ".inst 0x45c79859 // ummla z25.s, z2.b, z7.b\n"
".inst 0x45c398dc // ummla z28.s, z6.b, z3.b\n"
- ".inst 0x45c798df // ummla z31.s, z6.b, z7.b\n"
"ld1rqb { z2.b }, p0/Z, [%x[Apanel], #112]\n"
+ ".inst 0x45c798df // ummla z31.s, z6.b, z7.b\n"
"add %x[Apanel], %x[Apanel], #0x80\n"
"addvl x22, x22, #-4\n"
"bge 3b\n"
"4:" // main loop skip
- "ld1rqb { z7.b }, p0/Z, [%x[Apanel]]\n"
+ "ld1rqb { z3.b }, p0/Z, [%x[Apanel]]\n"
".inst 0x45c49808 // ummla z8.s, z0.b, z4.b\n"
".inst 0x45c5980b // ummla z11.s, z0.b, z5.b\n"
".inst 0x45c4982e // ummla z14.s, z1.b, z4.b\n"
@@ -172,54 +172,54 @@ void sve_interleaved_u8u32_mmla_8x3VL(
"ld1b { z6.b }, p0/Z, [x22]\n"
".inst 0x45c49854 // ummla z20.s, z2.b, z4.b\n"
".inst 0x45c59857 // ummla z23.s, z2.b, z5.b\n"
- "ld1b { z3.b }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x45c498fa // ummla z26.s, z7.b, z4.b\n"
- ".inst 0x45c598fd // ummla z29.s, z7.b, z5.b\n"
+ "ld1b { z7.b }, p0/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x45c4987a // ummla z26.s, z3.b, z4.b\n"
+ ".inst 0x45c5987d // ummla z29.s, z3.b, z5.b\n"
"ld1b { z5.b }, p0/Z, [x22, #2, MUL VL]\n"
"ld1b { z4.b }, p0/Z, [x22, #3, MUL VL]\n"
".inst 0x45c69809 // ummla z9.s, z0.b, z6.b\n"
- ".inst 0x45c3980c // ummla z12.s, z0.b, z3.b\n"
- ".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
- ".inst 0x45c39832 // ummla z18.s, z1.b, z3.b\n"
"add %x[Apanel], %x[Apanel], #0x10\n"
- ".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
- ".inst 0x45c39858 // ummla z24.s, z2.b, z3.b\n"
+ ".inst 0x45c7980c // ummla z12.s, z0.b, z7.b\n"
+ ".inst 0x45c6982f // ummla z15.s, z1.b, z6.b\n"
"addvl x22, x22, #4\n"
- ".inst 0x45c698fb // ummla z27.s, z7.b, z6.b\n"
- ".inst 0x45c398fe // ummla z30.s, z7.b, z3.b\n"
+ ".inst 0x45c79832 // ummla z18.s, z1.b, z7.b\n"
+ ".inst 0x45c69855 // ummla z21.s, z2.b, z6.b\n"
+ ".inst 0x45c79858 // ummla z24.s, z2.b, z7.b\n"
+ ".inst 0x45c6987b // ummla z27.s, z3.b, z6.b\n"
+ ".inst 0x45c7987e // ummla z30.s, z3.b, z7.b\n"
".inst 0x45c5980a // ummla z10.s, z0.b, z5.b\n"
".inst 0x45c4980d // ummla z13.s, z0.b, z4.b\n"
".inst 0x45c59830 // ummla z16.s, z1.b, z5.b\n"
".inst 0x45c49833 // ummla z19.s, z1.b, z4.b\n"
".inst 0x45c59856 // ummla z22.s, z2.b, z5.b\n"
".inst 0x45c49859 // ummla z25.s, z2.b, z4.b\n"
- ".inst 0x45c598fc // ummla z28.s, z7.b, z5.b\n"
- ".inst 0x45c498ff // ummla z31.s, z7.b, z4.b\n"
+ ".inst 0x45c5987c // ummla z28.s, z3.b, z5.b\n"
+ ".inst 0x45c4987f // ummla z31.s, z3.b, z4.b\n"
"cbz x20, 5f\n"
"ld1b { z1.b }, p0/Z, [x22]\n"
"ld1rqb { z7.b }, p0/Z, [%x[Apanel]]\n"
- ".inst 0x45c198e8 // ummla z8.s, z7.b, z1.b\n"
"ld1rqb { z6.b }, p0/Z, [%x[Apanel], #16]\n"
"ld1b { z0.b }, p0/Z, [x22, #1, MUL VL]\n"
- ".inst 0x45c098eb // ummla z11.s, z7.b, z0.b\n"
"ld1rqb { z5.b }, p0/Z, [%x[Apanel], #32]\n"
"ld1rqb { z4.b }, p0/Z, [%x[Apanel], #48]\n"
+ "add %x[Apanel], %x[Apanel], #0x40\n"
+ ".inst 0x45c198e8 // ummla z8.s, z7.b, z1.b\n"
+ "ld1b { z3.b }, p0/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z2.b }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x45c098eb // ummla z11.s, z7.b, z0.b\n"
".inst 0x45c198ce // ummla z14.s, z6.b, z1.b\n"
".inst 0x45c098d1 // ummla z17.s, z6.b, z0.b\n"
".inst 0x45c198b4 // ummla z20.s, z5.b, z1.b\n"
- "ld1b { z3.b }, p0/Z, [x22, #2, MUL VL]\n"
".inst 0x45c098b7 // ummla z23.s, z5.b, z0.b\n"
".inst 0x45c1989a // ummla z26.s, z4.b, z1.b\n"
- "ld1b { z2.b }, p0/Z, [x22, #3, MUL VL]\n"
- ".inst 0x45c0989d // ummla z29.s, z4.b, z0.b\n"
"ld1b { z1.b }, p0/Z, [x22, #4, MUL VL]\n"
+ ".inst 0x45c0989d // ummla z29.s, z4.b, z0.b\n"
"ld1b { z0.b }, p0/Z, [x22, #5, MUL VL]\n"
".inst 0x45c398e9 // ummla z9.s, z7.b, z3.b\n"
".inst 0x45c298ec // ummla z12.s, z7.b, z2.b\n"
- "addvl x22, x22, #6\n"
".inst 0x45c398cf // ummla z15.s, z6.b, z3.b\n"
+ "addvl x22, x22, #6\n"
".inst 0x45c298d2 // ummla z18.s, z6.b, z2.b\n"
- "add %x[Apanel], %x[Apanel], #0x40\n"
".inst 0x45c398b5 // ummla z21.s, z5.b, z3.b\n"
".inst 0x45c298b8 // ummla z24.s, z5.b, z2.b\n"
".inst 0x45c3989b // ummla z27.s, z4.b, z3.b\n"
@@ -233,53 +233,53 @@ void sve_interleaved_u8u32_mmla_8x3VL(
".inst 0x45c1989c // ummla z28.s, z4.b, z1.b\n"
".inst 0x45c0989f // ummla z31.s, z4.b, z0.b\n"
"5:" // multiply loop done
- "uzp1 z0.d, z8.d, z11.d\n"
+ "uzp1 z2.d, z8.d, z11.d\n"
"uzp2 z8.d, z8.d, z11.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel]]\n"
- "uzp1 z0.d, z9.d, z12.d\n"
+ "subs x23, x23, #0x1\n"
+ "uzp1 z1.d, z9.d, z12.d\n"
"uzp2 z9.d, z9.d, z12.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
"uzp1 z0.d, z10.d, z13.d\n"
"uzp2 z10.d, z10.d, z13.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "uzp1 z0.d, z14.d, z17.d\n"
+ "st1w { z2.s }, p0, [%x[Cpanel]]\n"
+ "uzp1 z3.d, z14.d, z17.d\n"
"uzp2 z14.d, z14.d, z17.d\n"
- "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
- "uzp1 z1.d, z15.d, z18.d\n"
- "subs x23, x23, #0x1\n"
- "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
+ "uzp1 z17.d, z15.d, z18.d\n"
"uzp2 z15.d, z15.d, z18.d\n"
- "uzp1 z17.d, z16.d, z19.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "uzp1 z2.d, z16.d, z19.d\n"
"uzp2 z16.d, z16.d, z19.d\n"
- "uzp1 z0.d, z20.d, z23.d\n"
- "st1w { z1.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
- "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "st1w { z8.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "uzp1 z1.d, z20.d, z23.d\n"
"uzp2 z20.d, z20.d, z23.d\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
- "uzp1 z23.d, z21.d, z24.d\n"
+ "st1w { z9.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "uzp1 z0.d, z21.d, z24.d\n"
"uzp2 z21.d, z21.d, z24.d\n"
- "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
- "uzp1 z19.d, z22.d, z25.d\n"
+ "st1w { z10.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
+ "uzp1 z23.d, z22.d, z25.d\n"
"uzp2 z22.d, z22.d, z25.d\n"
- "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
- "uzp1 z18.d, z26.d, z29.d\n"
+ "st1w { z3.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
+ "uzp1 z19.d, z26.d, z29.d\n"
"uzp2 z26.d, z26.d, z29.d\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
- "uzp1 z17.d, z27.d, z30.d\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
+ "addvl %x[Cpanel], %x[Cpanel], #16\n"
+ "uzp1 z18.d, z27.d, z30.d\n"
"uzp2 z27.d, z27.d, z30.d\n"
- "st1w { z0.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
- "uzp1 z16.d, z28.d, z31.d\n"
+ "uzp1 z17.d, z28.d, z31.d\n"
"uzp2 z28.d, z28.d, z31.d\n"
- "st1w { z23.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
- "st1w { z19.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
+ "st1w { z2.s }, p0, [%x[Cpanel], #-8, MUL VL]\n"
+ "st1w { z14.s }, p0, [%x[Cpanel], #-7, MUL VL]\n"
+ "st1w { z15.s }, p0, [%x[Cpanel], #-6, MUL VL]\n"
+ "st1w { z16.s }, p0, [%x[Cpanel], #-5, MUL VL]\n"
+ "st1w { z1.s }, p0, [%x[Cpanel], #-4, MUL VL]\n"
+ "st1w { z0.s }, p0, [%x[Cpanel], #-3, MUL VL]\n"
+ "st1w { z23.s }, p0, [%x[Cpanel], #-2, MUL VL]\n"
"st1w { z20.s }, p0, [%x[Cpanel], #-1, MUL VL]\n"
"st1w { z21.s }, p0, [%x[Cpanel]]\n"
"st1w { z22.s }, p0, [%x[Cpanel], #1, MUL VL]\n"
- "st1w { z18.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
- "st1w { z17.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
- "st1w { z16.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
+ "st1w { z19.s }, p0, [%x[Cpanel], #2, MUL VL]\n"
+ "st1w { z18.s }, p0, [%x[Cpanel], #3, MUL VL]\n"
+ "st1w { z17.s }, p0, [%x[Cpanel], #4, MUL VL]\n"
"st1w { z26.s }, p0, [%x[Cpanel], #5, MUL VL]\n"
"st1w { z27.s }, p0, [%x[Cpanel], #6, MUL VL]\n"
"st1w { z28.s }, p0, [%x[Cpanel], #7, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
index a81d4504ae..ba47e0aa54 100644
--- a/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/merges/a64_merge_fp16_24x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,7 +23,7 @@
*/
#pragma once
-#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(ARM_COMPUTE_ENABLE_FP16))
template<>
void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout, const int y0, const int ymax, const int x0, const int xmax, const __fp16 *bias, Activation act, bool append)
@@ -86,7 +86,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -140,7 +140,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -217,7 +217,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -317,7 +317,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -439,7 +439,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -584,7 +584,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -752,7 +752,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -944,7 +944,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1150,7 +1150,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1204,7 +1204,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1278,7 +1278,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1372,7 +1372,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1485,7 +1485,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1618,7 +1618,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1771,7 +1771,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -1945,7 +1945,7 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
} else {
/* Optimized routine to copy an entire block */
__asm __volatile (
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifndef ARM_COMPUTE_ENABLE_FP16
".arch armv8.2-a+fp16\n"
#endif
"dup v0.8h, %[maxval].h[0]\n"
@@ -2112,4 +2112,4 @@ void MergeResults<24, 8, false>(__fp16 *out, const __fp16 *in, const int ldout,
}
}
-#endif // __aarch64__ && (FP16_KERNELS || __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // __aarch64__ && (FP16_KERNELS || ARM_COMPUTE_ENABLE_FP16)
diff --git a/src/core/NEON/kernels/arm_gemm/performance_parameters.hpp b/src/core/NEON/kernels/arm_gemm/performance_parameters.hpp
index 059ab5f7df..57f779c498 100644
--- a/src/core/NEON/kernels/arm_gemm/performance_parameters.hpp
+++ b/src/core/NEON/kernels/arm_gemm/performance_parameters.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -26,9 +26,9 @@
namespace arm_gemm {
struct PerformanceParameters {
- float kernel_macs_cycle;
- float prepare_bytes_cycle = 0.0f;
- float merge_bytes_cycle = 0.0f;
+ float kernel_macs_cycle;
+ float prepare_bytes_cycle = 0.0f;
+ float merge_bytes_cycle = 0.0f;
PerformanceParameters(float k) : kernel_macs_cycle(k) { }
PerformanceParameters(float k, float p, float m) : kernel_macs_cycle(k), prepare_bytes_cycle(p), merge_bytes_cycle(m) { }
diff --git a/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp b/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp
index d35825c428..1a90cf7d89 100644
--- a/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp
+++ b/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp
@@ -35,9 +35,9 @@ namespace arm_gemm {
/* Quantized wrapper - do an integer GEMM and wrap around the quantization. */
template<typename To, typename Tr, typename Tgemm>
-class QuantizeWrapper : public GemmCommon<To, Tr> {
+class QuantizeWrapper : public GemmCommon<To, To, Tr> {
private:
- UniqueGemmCommon<To, Tgemm> _subgemm = nullptr;
+ UniqueGemmCommon<To, To, Tgemm> _subgemm = nullptr;
int32_t *_row_sums = nullptr;
int32_t *_col_sums = nullptr;
Requantize32 _params;
@@ -111,7 +111,7 @@ public:
QuantizeWrapper(const GemmArgs &args, const Requantize32 &qp) : _params(qp), _args(args), _barrier(args._maxthreads) {
GemmArgs newargs = GemmArgs(args._ci, args._Msize, args._Nsize, args._Ksize, args._Ksections, args._nbatches, args._nmulti, args._indirect_input, Activation(), args._maxthreads);
- _subgemm = gemm<To, Tgemm>(newargs);
+ _subgemm = gemm<To, To, Tgemm>(newargs);
if (_subgemm == nullptr) {
return;
@@ -122,7 +122,7 @@ public:
const To *B, const int ldb, const int B_multi_stride,
Tr *C, const int ldc, const int C_batch_stride, const int C_multi_stride,
const Tr *bias, const int bias_multi_stride) override {
- GemmCommon<To, Tr>::set_arrays(A, lda, A_batch_stride, A_multi_stride, B, ldb, B_multi_stride, C, ldc, C_batch_stride, C_multi_stride, bias, bias_multi_stride);
+ GemmCommon<To, To, Tr>::set_arrays(A, lda, A_batch_stride, A_multi_stride, B, ldb, B_multi_stride, C, ldc, C_batch_stride, C_multi_stride, bias, bias_multi_stride);
arrays_set = true;
set_child_arrays();
diff --git a/src/core/NEON/kernels/arm_gemm/quantized.cpp b/src/core/NEON/kernels/arm_gemm/quantized.cpp
index 6da9f4be0e..f24d5956e5 100644
--- a/src/core/NEON/kernels/arm_gemm/quantized.cpp
+++ b/src/core/NEON/kernels/arm_gemm/quantized.cpp
@@ -745,6 +745,10 @@ template void requantize_block_32(const Requantize32 &qp, unsigned int width, un
const uint32_t *input, unsigned int in_stride, uint8_t *output, unsigned int out_stride,
const int32_t *row_bias, const int32_t *col_bias, unsigned int start_col);
+template void requantize_block_32(const Requantize32 &qp, unsigned int width, unsigned int height,
+ const int32_t *input, unsigned int in_stride, uint8_t *output, unsigned int out_stride,
+ const int32_t *row_bias, const int32_t *col_bias, unsigned int start_col);
+
/*
* Routine (and helpers) to compute row sums needed for offset correction.
*
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp
index a9cbf4ec8d..e43eb8a09d 100644
--- a/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp
@@ -41,24 +41,24 @@ namespace arm_gemm {
* The optional 'block' parameter is for kernels using dot-product type
* instructions like UDOT and SDOT.
*/
-template<typename TOperand, typename TResult, unsigned int height, unsigned int width, unsigned int block=1, bool integrate_sums=false>
+template<typename TInput, typename TWeight, typename TResult, unsigned int height, unsigned int width, unsigned int block=1, bool integrate_sums=false>
class StdTransformsFixed
{
public:
template<typename TIn>
- void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
+ void PrepareA(TInput *out, const TIn *in, const int stride, const int y0,
const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) const {
Interleave<height, block, VLType::None>(out, in, stride, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
template<typename TIn>
- void PrepareA_indirect(TOperand *out, const TIn * const * const *ptr, size_t stringlen, size_t rounded_stringlen, const int y0,
+ void PrepareA_indirect(TInput *out, const TIn * const * const *ptr, size_t stringlen, size_t rounded_stringlen, const int y0,
const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
IndirectInterleave<height, block, VLType::None>(out, ptr, stringlen, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
template<typename TIn>
- void PrepareA_convolution(TOperand *out, const TIn *ptr, size_t stride, const convolver<TIn> &conv, size_t rounded_stringlen,
+ void PrepareA_convolution(TInput *out, const TIn *ptr, size_t stride, const convolver<TIn> &conv, size_t rounded_stringlen,
const int y0, const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
ConvolutionInterleave<height, block, VLType::None>(out, ptr, stride, conv, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
@@ -68,7 +68,7 @@ public:
}
template<typename TIn>
- void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
+ void PrepareB(TWeight *out, const TIn *in, const int stride, const int x0,
const int xmax, const int k0, const int kmax, bool transposed) const {
assert(!transposed);
Transform<width, block, true>(out, in, stride, x0, xmax, k0, kmax);
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_fixed_trB.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_fixed_trB.hpp
index 1db716455f..ec3cad0385 100644
--- a/src/core/NEON/kernels/arm_gemm/std_transforms_fixed_trB.hpp
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_fixed_trB.hpp
@@ -42,24 +42,24 @@ namespace arm_gemm {
* The optional 'block' parameter is for kernels using dot-product type
* instructions like UDOT and SDOT.
*/
-template<typename TOperand, typename TResult, unsigned int height, unsigned int width, unsigned int block=1, bool integrate_sums=false>
+template<typename TInput, typename TWeight, typename TResult, unsigned int height, unsigned int width, unsigned int block=1, bool integrate_sums=false>
class StdTransformsFixedTRB
{
public:
template<typename TIn>
- void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
+ void PrepareA(TInput *out, const TIn *in, const int stride, const int y0,
const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) const {
Interleave<height, block, VLType::None>(out, in, stride, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
template<typename TIn>
- void PrepareA_indirect(TOperand *out, const TIn * const * const *ptr, size_t stringlen, size_t rounded_stringlen, const int y0,
+ void PrepareA_indirect(TInput *out, const TIn * const * const *ptr, size_t stringlen, size_t rounded_stringlen, const int y0,
const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
IndirectInterleave<height, block, VLType::None>(out, ptr, stringlen, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
template<typename TIn>
- void PrepareA_convolution(TOperand *out, const TIn *ptr, size_t stride, const convolver<TIn> &conv, size_t rounded_stringlen,
+ void PrepareA_convolution(TInput *out, const TIn *ptr, size_t stride, const convolver<TIn> &conv, size_t rounded_stringlen,
const int y0, const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
ConvolutionInterleave<height, block, VLType::None>(out, ptr, stride, conv, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
@@ -69,7 +69,7 @@ public:
}
template<typename TIn>
- void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
+ void PrepareB(TWeight *out, const TIn *in, const int stride, const int x0,
const int xmax, const int k0, const int kmax, bool transposed) const {
if (transposed) {
Transform<width, block, false>(out, in, stride, x0, xmax, k0, kmax);
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
index c516bfc456..32d597f4af 100644
--- a/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
@@ -39,24 +39,24 @@ namespace arm_gemm {
* The optional 'block' parameter is for kernels using dot-product type
* instructions like UDOT and SDOT.
*/
-template<typename TOperand, typename TResult, unsigned int height, unsigned int width_vectors, unsigned int block=1, unsigned int mmla=1, bool integrate_sums=false>
+template<typename TInput, typename TWeight, typename TResult, unsigned int height, unsigned int width_vectors, unsigned int block=1, unsigned int mmla=1, bool integrate_sums=false>
class StdTransformsSVE
{
public:
template<typename TIn>
- void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
+ void PrepareA(TInput *out, const TIn *in, const int stride, const int y0,
const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
Interleave<height, block, VLType::None>(out, in, stride, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
template<typename TIn>
- void PrepareA_indirect(TOperand *out, const TIn * const * const *ptr, size_t stringlen, size_t rounded_stringlen, const int y0,
+ void PrepareA_indirect(TInput *out, const TIn * const * const *ptr, size_t stringlen, size_t rounded_stringlen, const int y0,
const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
IndirectInterleave<height, block, VLType::None>(out, ptr, stringlen, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
template<typename TIn>
- void PrepareA_convolution(TOperand *out, const TIn *ptr, size_t stride, const convolver<TIn> &conv, size_t rounded_stringlen,
+ void PrepareA_convolution(TInput *out, const TIn *ptr, size_t stride, const convolver<TIn> &conv, size_t rounded_stringlen,
const int y0, const int ymax, const int k0, const int kmax, int32_t row_sum_multiplier) {
ConvolutionInterleave<height, block, VLType::None>(out, ptr, stride, conv, rounded_stringlen, y0, ymax, k0, kmax, integrate_sums, row_sum_multiplier);
}
@@ -66,7 +66,7 @@ public:
}
template<typename TIn>
- void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
+ void PrepareB(TWeight *out, const TIn *in, const int stride, const int x0,
const int xmax, const int k0, const int kmax, bool transposed) {
assert (!transposed);
Transform<width_vectors, block, true, VLType::SVE>(out, in, stride, x0, xmax, k0, kmax);
diff --git a/src/core/NEON/kernels/arm_gemm/transform.cpp b/src/core/NEON/kernels/arm_gemm/transform.cpp
index 45e4f0e1de..06d9e2416c 100644
--- a/src/core/NEON/kernels/arm_gemm/transform.cpp
+++ b/src/core/NEON/kernels/arm_gemm/transform.cpp
@@ -129,17 +129,17 @@ void Transform(
// We don't have assembler transforms for AArch32, generate templated ones here.
#ifdef __arm__
template void Transform<8, 1, true, VLType::None>(float *, const float *, int, int, int, int, int);
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(ARM_COMPUTE_ENABLE_FP16)
template void Transform<8, 1, true, VLType::None>(float *, const __fp16 *, int, int, int, int, int);
-#endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // defined(ARM_COMPUTE_ENABLE_FP16)
#ifdef ARM_COMPUTE_ENABLE_BF16
template void Transform<8, 1, true, VLType::None>(float *, const bfloat16 *, int, int, int, int, int);
#endif // ARM_COMPUTE_ENABLE_BF16
#endif // AArch32
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#if defined(ARM_COMPUTE_ENABLE_FP16)
template void Transform<12, 1, false, VLType::None>(float *, const __fp16 *, int, int, int, int, int);
-#endif // defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+#endif // defined(ARM_COMPUTE_ENABLE_FP16)
#ifdef ARM_COMPUTE_ENABLE_BF16
template void Transform<12, 1, false, VLType::None>(float *, const bfloat16 *, int, int, int, int, int);
#endif // ARM_COMPUTE_ENABLE_BF16
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a32_transpose_interleave_8way_32bit.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a32_transpose_interleave_8way_32bit.hpp
index b50c240a3a..3690727f11 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a32_transpose_interleave_8way_32bit.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a32_transpose_interleave_8way_32bit.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -61,8 +61,8 @@ void TransformImpl<16, 1, true, 2, 2, VLType::None>::Transform(
template <>
void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x1(const uint16_t *&in0, uint16_t *out) {
__asm volatile (
- "VLD1.32 {d0-d3}, [%[in0]]!\n"
- "VST1.32 {d0-d3}, [%[out]]\n"
+ "VLD1.32 {d0-d3}, [%[in0]]!\n"
+ "VST1.32 {d0-d3}, [%[out]]\n"
ASM_PREFETCH("[%[in0], #192]")
: [in0] "+r" (in0),
[out] "+r" (out)
@@ -74,13 +74,13 @@ void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x1(const uint
template <>
void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x2(const uint16_t *&in0, const uint16_t *&in1, uint16_t *out) {
__asm volatile (
- "VLD1.32 {d0-d3}, [%[in0]]!\n"
- "VST1.32 {d0-d3}, [%[out]]!\n"
+ "VLD1.32 {d0-d3}, [%[in0]]!\n"
+ "VST1.32 {d0-d3}, [%[out]]!\n"
ASM_PREFETCH("[%[in0], #192]")
- "VLD1.32 {d0-d3}, [%[in1]]!\n"
- "VST1.32 {d0-d3}, [%[out]]\n"
+ "VLD1.32 {d0-d3}, [%[in1]]!\n"
+ "VST1.32 {d0-d3}, [%[out]]\n"
ASM_PREFETCH("[%[in1], #192]")
- "SUB %[out], %[out], #32\n"
+ "SUB %[out], %[out], #32\n"
: [in0] "+r" (in0),
[in1] "+r" (in1),
[out] "+r" (out)
@@ -92,19 +92,19 @@ void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x2(const uint
template <>
void TransposeInterleaveCommon<16, uint16_t, uint16_t>::moveblock_1x4(const uint16_t *&in0, const uint16_t *&in1, const uint16_t *&in2, const uint16_t *&in3, uint16_t *out) {
__asm __volatile (
- "VLD1.32 {d0-d3}, [%[in0]]!\n"
- "VST1.32 {d0-d3}, [%[out]]!\n"
+ "VLD1.32 {d0-d3}, [%[in0]]!\n"
+ "VST1.32 {d0-d3}, [%[out]]!\n"
ASM_PREFETCH("[%[in0], #192]")
- "VLD1.32 {d0-d3}, [%[in1]]!\n"
- "VST1.32 {d0-d3}, [%[out]]!\n"
+ "VLD1.32 {d0-d3}, [%[in1]]!\n"
+ "VST1.32 {d0-d3}, [%[out]]!\n"
ASM_PREFETCH("[%[in1], #192]")
- "VLD1.32 {d0-d3}, [%[in2]]!\n"
- "VST1.32 {d0-d3}, [%[out]]!\n"
+ "VLD1.32 {d0-d3}, [%[in2]]!\n"
+ "VST1.32 {d0-d3}, [%[out]]!\n"
ASM_PREFETCH("[%[in2], #192]")
- "VLD1.32 {d0-d3}, [%[in3]]!\n"
- "VST1.32 {d0-d3}, [%[out]]\n"
+ "VLD1.32 {d0-d3}, [%[in3]]!\n"
+ "VST1.32 {d0-d3}, [%[out]]\n"
ASM_PREFETCH("[%[in3], #192]")
- "SUB %[out], %[out], #96\n"
+ "SUB %[out], %[out], #96\n"
: [in0] "+r" (in0),
[in1] "+r" (in1),
[in2] "+r" (in2),
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp
index 8574d89226..618992c481 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_128.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,235 +34,281 @@ void a64_transpose_interleave_128(uint32_t *out, const uint32_t *in, size_t widt
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x20\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
"ldr q15, [x25], #0x10\n"
- "ldr q14, [x23], #0x10\n"
+ "ldr q14, [x22], #0x10\n"
"sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q13, [x22], #0x10\n"
+ "ldr q13, [x21], #0x10\n"
"ldr q12, [x20], #0x10\n"
+ "cmp x24, #0x20\n"
"ldr q11, [x25], #0x10\n"
- "ldr q10, [x23], #0x10\n"
- "ldr q9, [x22], #0x10\n"
+ "ldr q10, [x22], #0x10\n"
+ "ldr q9, [x21], #0x10\n"
"ldr q8, [x20], #0x10\n"
"ldr q7, [x25], #0x10\n"
- "ldr q6, [x23], #0x10\n"
- "ldr q5, [x22], #0x10\n"
+ "ldr q6, [x22], #0x10\n"
+ "ldr q5, [x21], #0x10\n"
"ldr q4, [x20], #0x10\n"
"ldr q3, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "ldr q1, [x22], #0x10\n"
+ "ldr q2, [x22], #0x10\n"
+ "ldr q1, [x21], #0x10\n"
"ldr q0, [x20], #0x10\n"
"ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
"ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q15, [x21, #0x0]\n"
- "str q11, [x21, #0x10]\n"
- "str q7, [x21, #0x20]\n"
- "str q3, [x21, #0x30]\n"
- "str q31, [x21, #0x40]\n"
- "str q27, [x21, #0x50]\n"
- "str q23, [x21, #0x60]\n"
- "str q19, [x21, #0x70]\n"
- "str q14, [x21, #0x80]\n"
- "str q10, [x21, #0x90]\n"
- "str q6, [x21, #0xa0]\n"
- "str q2, [x21, #0xb0]\n"
- "str q30, [x21, #0xc0]\n"
- "str q26, [x21, #0xd0]\n"
- "str q22, [x21, #0xe0]\n"
- "str q18, [x21, #0xf0]\n"
- "str q13, [x21, #0x100]\n"
- "str q9, [x21, #0x110]\n"
- "str q5, [x21, #0x120]\n"
- "str q1, [x21, #0x130]\n"
- "str q29, [x21, #0x140]\n"
- "str q25, [x21, #0x150]\n"
- "str q21, [x21, #0x160]\n"
- "str q17, [x21, #0x170]\n"
- "str q12, [x21, #0x180]\n"
- "str q8, [x21, #0x190]\n"
- "str q4, [x21, #0x1a0]\n"
- "str q0, [x21, #0x1b0]\n"
- "str q28, [x21, #0x1c0]\n"
- "str q24, [x21, #0x1d0]\n"
- "str q20, [x21, #0x1e0]\n"
- "str q16, [x21, #0x1f0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q15, [x23, #0x0]\n"
+ "str q11, [x23, #0x10]\n"
+ "str q7, [x23, #0x20]\n"
+ "str q3, [x23, #0x30]\n"
+ "str q31, [x23, #0x40]\n"
+ "str q27, [x23, #0x50]\n"
+ "str q23, [x23, #0x60]\n"
+ "str q19, [x23, #0x70]\n"
+ "str q14, [x23, #0x80]\n"
+ "str q10, [x23, #0x90]\n"
+ "str q6, [x23, #0xa0]\n"
+ "str q2, [x23, #0xb0]\n"
+ "str q30, [x23, #0xc0]\n"
+ "str q26, [x23, #0xd0]\n"
+ "str q22, [x23, #0xe0]\n"
+ "str q18, [x23, #0xf0]\n"
+ "str q13, [x23, #0x100]\n"
+ "str q9, [x23, #0x110]\n"
+ "str q5, [x23, #0x120]\n"
+ "str q1, [x23, #0x130]\n"
+ "str q29, [x23, #0x140]\n"
+ "str q25, [x23, #0x150]\n"
+ "str q21, [x23, #0x160]\n"
+ "str q17, [x23, #0x170]\n"
+ "str q12, [x23, #0x180]\n"
+ "str q8, [x23, #0x190]\n"
+ "str q4, [x23, #0x1a0]\n"
+ "str q0, [x23, #0x1b0]\n"
+ "str q28, [x23, #0x1c0]\n"
+ "str q24, [x23, #0x1d0]\n"
+ "str q20, [x23, #0x1e0]\n"
+ "str q16, [x23, #0x1f0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x10\n"
+ "movi v16.4s, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "str q16, [x23, #0x80]\n"
+ "str q16, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "str q16, [x23, #0xc0]\n"
+ "str q16, [x23, #0xd0]\n"
+ "str q16, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
+ "str q16, [x23, #0x100]\n"
+ "str q16, [x23, #0x110]\n"
+ "str q16, [x23, #0x120]\n"
+ "str q16, [x23, #0x130]\n"
+ "str q16, [x23, #0x140]\n"
+ "str q16, [x23, #0x150]\n"
+ "str q16, [x23, #0x160]\n"
+ "str q16, [x23, #0x170]\n"
+ "str q16, [x23, #0x180]\n"
+ "str q16, [x23, #0x190]\n"
+ "str q16, [x23, #0x1a0]\n"
+ "str q16, [x23, #0x1b0]\n"
+ "str q16, [x23, #0x1c0]\n"
+ "str q16, [x23, #0x1d0]\n"
+ "str q16, [x23, #0x1e0]\n"
+ "str q16, [x23, #0x1f0]\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
"ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
"sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
+ "cmp x24, #0x10\n"
"ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q31, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q23, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q30, [x21, #0x80]\n"
- "str q26, [x21, #0x90]\n"
- "str q22, [x21, #0xa0]\n"
- "str q18, [x21, #0xb0]\n"
- "str q29, [x21, #0x100]\n"
- "str q25, [x21, #0x110]\n"
- "str q21, [x21, #0x120]\n"
- "str q17, [x21, #0x130]\n"
- "str q28, [x21, #0x180]\n"
- "str q24, [x21, #0x190]\n"
- "str q20, [x21, #0x1a0]\n"
- "str q16, [x21, #0x1b0]\n"
- "add x21, x21, #0x40\n"
+ "str q31, [x23, #0x0]\n"
+ "str q27, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q30, [x23, #0x80]\n"
+ "str q26, [x23, #0x90]\n"
+ "str q22, [x23, #0xa0]\n"
+ "str q18, [x23, #0xb0]\n"
+ "str q29, [x23, #0x100]\n"
+ "str q25, [x23, #0x110]\n"
+ "str q21, [x23, #0x120]\n"
+ "str q17, [x23, #0x130]\n"
+ "str q28, [x23, #0x180]\n"
+ "str q24, [x23, #0x190]\n"
+ "str q20, [x23, #0x1a0]\n"
+ "str q16, [x23, #0x1b0]\n"
+ "add x23, x23, #0x40\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
"cmp x24, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x80]\n"
- "str q17, [x21, #0x100]\n"
- "str q16, [x21, #0x180]\n"
- "add x21, x21, #0x10\n"
+ "cmp x24, #0x4\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x80]\n"
+ "str q17, [x23, #0x100]\n"
+ "str q16, [x23, #0x180]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x80]\n"
- "str s17, [x21, #0x100]\n"
- "str s16, [x21, #0x180]\n"
- "add x21, x21, #0x4\n"
+ "cmp x24, #0x1\n"
+ "str s19, [x23, #0x0]\n"
+ "str s18, [x23, #0x80]\n"
+ "str s17, [x23, #0x100]\n"
+ "str s16, [x23, #0x180]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x200\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x20\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Column loop
+ "blt 14f\n"
+ "13:" // Tail row loop: Column loop
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x25], #0x10\n"
"sub x20, x20, #0x20\n"
- "cmp x20, #0x20\n"
+ "ldr q22, [x25], #0x10\n"
"ldr q21, [x25], #0x10\n"
+ "cmp x20, #0x20\n"
"ldr q20, [x25], #0x10\n"
"ldr q19, [x25], #0x10\n"
"ldr q18, [x25], #0x10\n"
"ldr q17, [x25], #0x10\n"
"ldr q16, [x25], #0x10\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q21, [x21, #0x20]\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Column loop skip
+ "str q23, [x23, #0x0]\n"
+ "str q22, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q20, [x23, #0x30]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x10\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 16 loop: loop
+ "movi v16.4s, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: width 16 loop: loop
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
"sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
+ "ldr q18, [x25], #0x10\n"
"ldr q17, [x25], #0x10\n"
+ "cmp x20, #0x10\n"
"ldr q16, [x25], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 16 loop: skip
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, #0x40\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 16 loop: skip
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr q16, [x25], #0x10\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr s16, [x25], #0x4\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp
index cdf1f98608..7d0460b3a0 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,362 +40,377 @@ void a64_transpose_interleave_12_1x4(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x30\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x30\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x30\n"
- "cmp x24, #0x30\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v31.16b, v21.16b, v17.16b\n"
- "zip1 v22.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v14.16b, v21.16b, v17.16b\n"
- "zip2 v13.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v30.16b, v19.16b, v17.16b\n"
- "zip1 v29.16b, v18.16b, v16.16b\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v12.16b, v19.16b, v17.16b\n"
- "zip2 v11.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v10.16b, v21.16b, v17.16b\n"
- "zip1 v9.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v8.16b, v21.16b, v17.16b\n"
- "zip2 v7.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x9], #0x10\n"
+ "ldr q18, [x26], #0x10\n"
+ "sub x28, x28, #0x30\n"
+ "ldr q17, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "cmp x28, #0x30\n"
+ "ldr q27, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
+ "ldr q24, [x20], #0x10\n"
+ "ldr q23, [x9], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
+ "zip1 v1.16b, v19.16b, v17.16b\n"
+ "zip1 v0.16b, v18.16b, v16.16b\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v15.16b, v19.16b, v17.16b\n"
+ "zip2 v14.16b, v18.16b, v16.16b\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v13.16b, v27.16b, v25.16b\n"
+ "zip1 v12.16b, v26.16b, v24.16b\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v6.16b, v19.16b, v17.16b\n"
- "zip1 v5.16b, v18.16b, v16.16b\n"
- "ldr q28, [x9], #0x10\n"
- "ldr q27, [x28], #0x10\n"
- "zip2 v4.16b, v19.16b, v17.16b\n"
- "zip2 v3.16b, v18.16b, v16.16b\n"
- "ldr q26, [x27], #0x10\n"
- "ldr q25, [x26], #0x10\n"
- "zip1 v2.16b, v28.16b, v26.16b\n"
- "zip1 v1.16b, v27.16b, v25.16b\n"
- "ldr q24, [x25], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip1 v16.16b, v31.16b, v22.16b\n"
- "zip2 v22.16b, v31.16b, v22.16b\n"
- "ldr q21, [x22], #0x10\n"
- "ldr q20, [x20], #0x10\n"
- "zip1 v0.16b, v24.16b, v21.16b\n"
- "zip1 v31.16b, v23.16b, v20.16b\n"
- "zip1 v19.16b, v14.16b, v13.16b\n"
- "zip1 v18.16b, v30.16b, v29.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip2 v16.16b, v30.16b, v29.16b\n"
- "zip1 v17.16b, v12.16b, v11.16b\n"
- "str q22, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "zip2 v30.16b, v28.16b, v26.16b\n"
- "zip2 v29.16b, v27.16b, v25.16b\n"
- "str q18, [x21, #0x30]\n"
- "zip2 v28.16b, v24.16b, v21.16b\n"
- "zip2 v27.16b, v23.16b, v20.16b\n"
- "str q16, [x21, #0x40]\n"
- "zip2 v21.16b, v14.16b, v13.16b\n"
- "zip1 v16.16b, v10.16b, v9.16b\n"
- "str q17, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v20.16b, v10.16b, v9.16b\n"
- "zip2 v19.16b, v12.16b, v11.16b\n"
- "zip1 v18.16b, v6.16b, v5.16b\n"
- "zip2 v17.16b, v6.16b, v5.16b\n"
- "str q21, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v16.16b, v8.16b, v7.16b\n"
- "zip2 v26.16b, v8.16b, v7.16b\n"
- "str q20, [x21, #0x20]\n"
- "zip1 v25.16b, v2.16b, v1.16b\n"
- "zip1 v24.16b, v4.16b, v3.16b\n"
- "str q19, [x21, #0x30]\n"
- "zip2 v23.16b, v4.16b, v3.16b\n"
- "zip1 v22.16b, v0.16b, v31.16b\n"
- "str q18, [x21, #0x40]\n"
- "zip2 v21.16b, v2.16b, v1.16b\n"
- "zip1 v20.16b, v30.16b, v29.16b\n"
- "str q17, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v19.16b, v30.16b, v29.16b\n"
- "zip2 v18.16b, v0.16b, v31.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v28.16b, v27.16b\n"
- "zip2 v16.16b, v28.16b, v27.16b\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v11.16b, v27.16b, v25.16b\n"
+ "zip2 v10.16b, v26.16b, v24.16b\n"
+ "ldr q9, [x9], #0x10\n"
+ "ldr q8, [x26], #0x10\n"
+ "zip1 v7.16b, v23.16b, v21.16b\n"
+ "zip1 v6.16b, v22.16b, v20.16b\n"
+ "ldr q31, [x25], #0x10\n"
+ "ldr q30, [x24], #0x10\n"
+ "zip2 v5.16b, v23.16b, v21.16b\n"
+ "zip2 v4.16b, v22.16b, v20.16b\n"
+ "ldr q29, [x23], #0x10\n"
+ "ldr q28, [x22], #0x10\n"
+ "zip1 v27.16b, v19.16b, v17.16b\n"
+ "zip1 v26.16b, v18.16b, v16.16b\n"
+ "ldr q25, [x21], #0x10\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip2 v23.16b, v19.16b, v17.16b\n"
+ "zip2 v22.16b, v18.16b, v16.16b\n"
+ "zip1 v3.16b, v9.16b, v31.16b\n"
+ "zip1 v2.16b, v8.16b, v30.16b\n"
+ "zip1 v21.16b, v1.16b, v0.16b\n"
+ "zip2 v20.16b, v1.16b, v0.16b\n"
+ "zip1 v1.16b, v29.16b, v25.16b\n"
+ "zip1 v0.16b, v28.16b, v24.16b\n"
+ "zip1 v19.16b, v15.16b, v14.16b\n"
+ "zip1 v18.16b, v13.16b, v12.16b\n"
+ "zip2 v17.16b, v13.16b, v12.16b\n"
+ "zip1 v16.16b, v11.16b, v10.16b\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "zip2 v31.16b, v9.16b, v31.16b\n"
+ "zip2 v30.16b, v8.16b, v30.16b\n"
+ "str q19, [x27, #0x20]\n"
+ "zip2 v29.16b, v29.16b, v25.16b\n"
+ "zip2 v28.16b, v28.16b, v24.16b\n"
+ "str q18, [x27, #0x30]\n"
+ "zip2 v21.16b, v15.16b, v14.16b\n"
+ "zip1 v20.16b, v7.16b, v6.16b\n"
+ "str q17, [x27, #0x40]\n"
+ "zip2 v19.16b, v7.16b, v6.16b\n"
+ "zip2 v18.16b, v11.16b, v10.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v17.16b, v27.16b, v26.16b\n"
+ "zip2 v16.16b, v27.16b, v26.16b\n"
+ "str q21, [x27, #0x0]\n"
+ "zip1 v27.16b, v5.16b, v4.16b\n"
+ "zip2 v26.16b, v5.16b, v4.16b\n"
+ "str q20, [x27, #0x10]\n"
+ "zip1 v25.16b, v3.16b, v2.16b\n"
+ "zip1 v24.16b, v23.16b, v22.16b\n"
+ "str q19, [x27, #0x20]\n"
+ "zip2 v23.16b, v23.16b, v22.16b\n"
+ "zip1 v22.16b, v1.16b, v0.16b\n"
+ "str q18, [x27, #0x30]\n"
+ "zip2 v21.16b, v3.16b, v2.16b\n"
+ "zip1 v20.16b, v31.16b, v30.16b\n"
+ "str q17, [x27, #0x40]\n"
+ "zip2 v19.16b, v31.16b, v30.16b\n"
+ "zip2 v18.16b, v1.16b, v0.16b\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v17.16b, v29.16b, v28.16b\n"
+ "zip2 v16.16b, v29.16b, v28.16b\n"
+ "str q27, [x27, #0x0]\n"
+ "str q26, [x27, #0x10]\n"
+ "str q25, [x27, #0x20]\n"
+ "str q24, [x27, #0x30]\n"
+ "str q23, [x27, #0x40]\n"
+ "str q22, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x20]\n"
+ "str q18, [x27, #0x30]\n"
+ "str q17, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x28, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d23, [x9], #0x8\n"
- "ldr d22, [x28], #0x8\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr d19, [x27], #0x8\n"
- "ldr d18, [x26], #0x8\n"
- "ldr d21, [x25], #0x8\n"
- "ldr d25, [x23], #0x8\n"
- "ldr d20, [x22], #0x8\n"
- "ldr d17, [x20], #0x8\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v22.s }[2], [x28], #0x4\n"
- "ld1 { v19.s }[2], [x27], #0x4\n"
- "ld1 { v18.s }[2], [x26], #0x4\n"
- "zip1 v24.16b, v23.16b, v19.16b\n"
- "zip1 v16.16b, v22.16b, v18.16b\n"
- "ld1 { v21.s }[2], [x25], #0x4\n"
- "ld1 { v25.s }[2], [x23], #0x4\n"
- "zip2 v19.16b, v23.16b, v19.16b\n"
- "zip2 v18.16b, v22.16b, v18.16b\n"
- "ld1 { v20.s }[2], [x22], #0x4\n"
- "ld1 { v17.s }[2], [x20], #0x4\n"
- "zip1 v23.16b, v21.16b, v20.16b\n"
- "zip1 v22.16b, v25.16b, v17.16b\n"
- "zip2 v21.16b, v21.16b, v20.16b\n"
- "zip2 v20.16b, v25.16b, v17.16b\n"
- "zip1 v17.16b, v24.16b, v16.16b\n"
- "zip2 v16.16b, v24.16b, v16.16b\n"
- "str q17, [x21, #0x0]\n"
+ "ldr d19, [x9], #0x8\n"
+ "ldr d23, [x26], #0x8\n"
+ "sub x28, x28, #0xc\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "cmp x28, #0xc\n"
+ "ldr d22, [x23], #0x8\n"
+ "ldr d25, [x22], #0x8\n"
+ "ldr d21, [x21], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
+ "ld1 { v19.s }[2], [x9], #0x4\n"
+ "ld1 { v23.s }[2], [x26], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v17.s }[2], [x24], #0x4\n"
+ "ld1 { v22.s }[2], [x23], #0x4\n"
+ "ld1 { v25.s }[2], [x22], #0x4\n"
+ "ld1 { v21.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
+ "zip1 v24.16b, v19.16b, v18.16b\n"
+ "zip1 v20.16b, v23.16b, v17.16b\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "zip2 v18.16b, v23.16b, v17.16b\n"
+ "zip1 v23.16b, v22.16b, v21.16b\n"
+ "zip1 v17.16b, v25.16b, v16.16b\n"
+ "zip2 v22.16b, v22.16b, v21.16b\n"
+ "zip2 v16.16b, v25.16b, v16.16b\n"
+ "zip1 v21.16b, v24.16b, v20.16b\n"
+ "zip2 v20.16b, v24.16b, v20.16b\n"
"zip1 v19.16b, v19.16b, v18.16b\n"
- "zip1 v18.16b, v23.16b, v22.16b\n"
- "str q16, [x21, #0x10]\n"
- "zip2 v17.16b, v23.16b, v22.16b\n"
- "zip1 v16.16b, v21.16b, v20.16b\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v18.16b, v23.16b, v17.16b\n"
+ "zip2 v17.16b, v23.16b, v17.16b\n"
+ "zip1 v16.16b, v22.16b, v16.16b\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x20]\n"
+ "str q18, [x27, #0x30]\n"
+ "str q17, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
+ "ldr s23, [x9], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "sub x28, x28, #0x4\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s17, [x24], #0x4\n"
+ "cmp x28, #0x4\n"
+ "ldr s21, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "zip1 v19.16b, v23.16b, v19.16b\n"
+ "zip1 v17.16b, v22.16b, v17.16b\n"
+ "zip1 v18.16b, v21.16b, v18.16b\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
- "ldr s16, [x20], #0x4\n"
- "zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "str q18, [x21, #0x0]\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x10\n"
+ "str q17, [x27, #0x0]\n"
+ "str q16, [x27, #0x30]\n"
+ "add x27, x27, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
+ "ldr b23, [x9], #0x1\n"
+ "ldr b22, [x26], #0x1\n"
+ "sub x28, x28, #0x1\n"
+ "ldr b19, [x25], #0x1\n"
+ "ldr b17, [x24], #0x1\n"
+ "cmp x28, #0x1\n"
+ "ldr b21, [x23], #0x1\n"
+ "ldr b20, [x22], #0x1\n"
+ "ldr b18, [x21], #0x1\n"
+ "ldr b16, [x20], #0x1\n"
+ "zip1 v19.16b, v23.16b, v19.16b\n"
+ "zip1 v17.16b, v22.16b, v17.16b\n"
+ "zip1 v18.16b, v21.16b, v18.16b\n"
+ "zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
- "ldr b16, [x20], #0x1\n"
- "zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "str s18, [x21, #0x0]\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x30]\n"
- "add x21, x21, #0x4\n"
+ "str s17, [x27, #0x0]\n"
+ "str s16, [x27, #0x30]\n"
+ "add x27, x27, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x60\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "csel x25, x25, %x[pad_row], GE\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x30\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x20, #0x30\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
+ "ldr q27, [x9], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
"sub x20, x20, #0x30\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x30\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v31.16b, v21.16b, v17.16b\n"
- "zip1 v30.16b, v20.16b, v16.16b\n"
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v29.16b, v21.16b, v17.16b\n"
- "zip2 v28.16b, v20.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.16b, v19.16b, v17.16b\n"
- "zip1 v26.16b, v18.16b, v16.16b\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v25.16b, v19.16b, v17.16b\n"
- "zip2 v20.16b, v18.16b, v16.16b\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v24.16b, v22.16b, v19.16b\n"
- "zip1 v23.16b, v21.16b, v18.16b\n"
- "zip1 v16.16b, v31.16b, v30.16b\n"
- "zip2 v17.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v29.16b, v28.16b\n"
- "str q17, [x21, #0x10]\n"
- "zip2 v22.16b, v22.16b, v19.16b\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v21.16b, v21.16b, v18.16b\n"
- "zip2 v18.16b, v29.16b, v28.16b\n"
- "zip1 v16.16b, v27.16b, v26.16b\n"
- "zip2 v17.16b, v27.16b, v26.16b\n"
- "str q18, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v16.16b, v25.16b, v20.16b\n"
- "zip2 v20.16b, v25.16b, v20.16b\n"
- "str q17, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v19.16b, v24.16b, v23.16b\n"
- "zip2 v18.16b, v24.16b, v23.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v22.16b, v21.16b\n"
- "zip2 v16.16b, v22.16b, v21.16b\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "ldr q26, [x9], #0x10\n"
+ "ldr q25, [x26], #0x10\n"
+ "ldr q20, [x25], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "ldr q24, [x9], #0x10\n"
+ "zip1 v18.16b, v27.16b, v21.16b\n"
+ "zip1 v16.16b, v22.16b, v17.16b\n"
+ "ldr q30, [x26], #0x10\n"
+ "ldr q23, [x25], #0x10\n"
+ "zip2 v29.16b, v27.16b, v21.16b\n"
+ "zip2 v28.16b, v22.16b, v17.16b\n"
+ "ldr q17, [x24], #0x10\n"
+ "zip1 v22.16b, v26.16b, v20.16b\n"
+ "zip1 v21.16b, v25.16b, v19.16b\n"
+ "zip2 v27.16b, v26.16b, v20.16b\n"
+ "zip2 v20.16b, v25.16b, v19.16b\n"
+ "zip1 v26.16b, v24.16b, v23.16b\n"
+ "zip1 v25.16b, v30.16b, v17.16b\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "zip2 v16.16b, v18.16b, v16.16b\n"
+ "zip1 v18.16b, v29.16b, v28.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "zip2 v23.16b, v30.16b, v17.16b\n"
+ "zip2 v17.16b, v29.16b, v28.16b\n"
+ "str q19, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.16b, v22.16b, v21.16b\n"
+ "zip2 v22.16b, v22.16b, v21.16b\n"
+ "str q18, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v21.16b, v27.16b, v20.16b\n"
+ "zip2 v20.16b, v27.16b, v20.16b\n"
+ "str q17, [x27, #0x0]\n"
+ "zip1 v19.16b, v26.16b, v25.16b\n"
+ "zip2 v18.16b, v26.16b, v25.16b\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v17.16b, v24.16b, v23.16b\n"
+ "zip2 v16.16b, v24.16b, v23.16b\n"
+ "str q22, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q18, [x27, #0x0]\n"
+ "str q17, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
"ldr d19, [x9], #0x8\n"
- "ldr d21, [x28], #0x8\n"
+ "ldr d21, [x26], #0x8\n"
"sub x20, x20, #0xc\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"cmp x20, #0xc\n"
- "ldr d18, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
"ld1 { v19.s }[2], [x9], #0x4\n"
- "ld1 { v21.s }[2], [x28], #0x4\n"
- "ld1 { v18.s }[2], [x27], #0x4\n"
- "ld1 { v16.s }[2], [x26], #0x4\n"
+ "ld1 { v21.s }[2], [x26], #0x4\n"
+ "ld1 { v18.s }[2], [x25], #0x4\n"
+ "ld1 { v16.s }[2], [x24], #0x4\n"
"zip1 v20.16b, v19.16b, v18.16b\n"
"zip1 v17.16b, v21.16b, v16.16b\n"
"zip2 v19.16b, v19.16b, v18.16b\n"
- "zip2 v18.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v20.16b, v17.16b\n"
+ "zip2 v16.16b, v21.16b, v16.16b\n"
+ "zip1 v18.16b, v20.16b, v17.16b\n"
"zip2 v17.16b, v20.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v19.16b, v18.16b\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "zip1 v16.16b, v19.16b, v16.16b\n"
+ "str q18, [x27, #0x0]\n"
+ "str q17, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
+ "ldr s18, [x26], #0x4\n"
"sub x20, x20, #0x4\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s16, [x24], #0x4\n"
"cmp x20, #0x4\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
+ "ldr b18, [x26], #0x1\n"
"sub x20, x20, #0x1\n"
+ "ldr b17, [x25], #0x1\n"
+ "ldr b16, [x24], #0x1\n"
"cmp x20, #0x1\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str s16, [x27, #0x0]\n"
+ "add x27, x27, #0x4\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp
index da0809d4d6..1eb49e290a 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,259 +41,268 @@ void a64_transpose_interleave_12_1x8(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "cmp %x[height], #0x7\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "cmp %x[height], #0x7\n"
- "add %x[in], x22, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GE\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "cmp %x[height], #0x5\n"
"csel x22, x22, %x[pad_row], GT\n"
"csel x23, x23, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x21, %x[width]\n"
+ "cmp %x[height], #0x3\n"
"csel x24, x24, %x[pad_row], GT\n"
"csel x25, x25, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x21, #0x30\n"
- "mov x20, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x28, #0x30\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q25, [x28], #0x10\n"
- "sub x21, x21, #0x30\n"
- "cmp x21, #0x30\n"
- "ldr q20, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v7.16b, v21.16b, v19.16b\n"
- "zip1 v6.16b, v25.16b, v18.16b\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "zip1 v28.16b, v20.16b, v17.16b\n"
- "zip1 v27.16b, v24.16b, v16.16b\n"
+ "ldr q1, [x9], #0x10\n"
+ "ldr q0, [x26], #0x10\n"
+ "sub x28, x28, #0x30\n"
+ "ldr q31, [x25], #0x10\n"
+ "ldr q28, [x24], #0x10\n"
+ "cmp x28, #0x30\n"
+ "ldr q27, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
+ "ldr q24, [x20], #0x10\n"
"ldr q23, [x9], #0x10\n"
- "ldr q22, [x28], #0x10\n"
- "zip2 v5.16b, v21.16b, v19.16b\n"
- "zip2 v4.16b, v20.16b, v17.16b\n"
- "ldr q21, [x27], #0x10\n"
- "ldr q20, [x26], #0x10\n"
- "zip2 v3.16b, v25.16b, v18.16b\n"
- "zip2 v2.16b, v24.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v1.16b, v23.16b, v19.16b\n"
- "zip1 v15.16b, v22.16b, v18.16b\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "zip1 v0.16b, v21.16b, v17.16b\n"
- "zip1 v31.16b, v20.16b, v16.16b\n"
- "ldr q26, [x9], #0x10\n"
- "ldr q30, [x28], #0x10\n"
- "zip2 v14.16b, v23.16b, v19.16b\n"
- "zip2 v13.16b, v21.16b, v17.16b\n"
- "ldr q25, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip2 v12.16b, v22.16b, v18.16b\n"
- "zip2 v11.16b, v20.16b, v16.16b\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x24], #0x10\n"
- "zip1 v10.16b, v26.16b, v23.16b\n"
- "zip1 v9.16b, v30.16b, v22.16b\n"
- "ldr q21, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
- "zip1 v29.16b, v25.16b, v21.16b\n"
- "zip1 v8.16b, v24.16b, v17.16b\n"
- "zip1 v19.16b, v7.16b, v28.16b\n"
- "zip1 v16.16b, v6.16b, v27.16b\n"
- "zip2 v28.16b, v7.16b, v28.16b\n"
- "zip2 v18.16b, v6.16b, v27.16b\n"
- "zip1 v27.16b, v5.16b, v4.16b\n"
- "zip1 v20.16b, v3.16b, v2.16b\n"
- "zip2 v7.16b, v26.16b, v23.16b\n"
- "zip2 v26.16b, v25.16b, v21.16b\n"
- "zip2 v6.16b, v30.16b, v22.16b\n"
- "zip2 v25.16b, v24.16b, v17.16b\n"
- "zip2 v5.16b, v5.16b, v4.16b\n"
- "zip2 v4.16b, v3.16b, v2.16b\n"
- "zip1 v3.16b, v1.16b, v0.16b\n"
- "zip1 v2.16b, v15.16b, v31.16b\n"
- "zip2 v1.16b, v1.16b, v0.16b\n"
- "zip2 v0.16b, v15.16b, v31.16b\n"
- "zip1 v31.16b, v14.16b, v13.16b\n"
- "zip1 v30.16b, v12.16b, v11.16b\n"
- "zip2 v24.16b, v14.16b, v13.16b\n"
- "zip2 v23.16b, v12.16b, v11.16b\n"
- "zip1 v22.16b, v10.16b, v29.16b\n"
- "zip1 v21.16b, v9.16b, v8.16b\n"
- "zip1 v17.16b, v19.16b, v16.16b\n"
- "zip2 v16.16b, v19.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "zip1 v19.16b, v28.16b, v18.16b\n"
- "zip2 v18.16b, v28.16b, v18.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v17.16b, v27.16b, v20.16b\n"
- "zip2 v16.16b, v27.16b, v20.16b\n"
- "str q19, [x20, #0x20]\n"
- "str q18, [x20, #0x30]\n"
- "zip2 v29.16b, v10.16b, v29.16b\n"
- "zip2 v20.16b, v9.16b, v8.16b\n"
- "str q17, [x20, #0x40]\n"
- "zip1 v28.16b, v7.16b, v26.16b\n"
- "zip1 v27.16b, v6.16b, v25.16b\n"
- "str q16, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip2 v26.16b, v7.16b, v26.16b\n"
- "zip2 v25.16b, v6.16b, v25.16b\n"
- "zip1 v17.16b, v5.16b, v4.16b\n"
- "zip2 v16.16b, v5.16b, v4.16b\n"
- "str q17, [x20, #0x0]\n"
+ "ldr q30, [x26], #0x10\n"
+ "ldr q22, [x25], #0x10\n"
+ "ldr q21, [x24], #0x10\n"
+ "zip1 v15.16b, v1.16b, v27.16b\n"
+ "zip1 v9.16b, v0.16b, v26.16b\n"
+ "ldr q20, [x23], #0x10\n"
+ "ldr q19, [x22], #0x10\n"
+ "zip1 v18.16b, v31.16b, v25.16b\n"
+ "zip1 v29.16b, v28.16b, v24.16b\n"
+ "ldr q17, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip2 v14.16b, v1.16b, v27.16b\n"
+ "zip2 v13.16b, v31.16b, v25.16b\n"
+ "ldr q8, [x9], #0x10\n"
+ "ldr q7, [x26], #0x10\n"
+ "zip2 v12.16b, v0.16b, v26.16b\n"
+ "zip2 v6.16b, v28.16b, v24.16b\n"
+ "ldr q5, [x25], #0x10\n"
+ "ldr q4, [x24], #0x10\n"
+ "zip1 v3.16b, v23.16b, v20.16b\n"
+ "zip1 v11.16b, v30.16b, v19.16b\n"
+ "ldr q28, [x23], #0x10\n"
+ "ldr q27, [x22], #0x10\n"
+ "zip1 v2.16b, v22.16b, v17.16b\n"
+ "zip1 v1.16b, v21.16b, v16.16b\n"
+ "ldr q26, [x21], #0x10\n"
+ "ldr q25, [x20], #0x10\n"
+ "zip2 v24.16b, v23.16b, v20.16b\n"
+ "zip2 v23.16b, v22.16b, v17.16b\n"
+ "zip2 v22.16b, v30.16b, v19.16b\n"
+ "zip2 v21.16b, v21.16b, v16.16b\n"
+ "zip1 v0.16b, v8.16b, v28.16b\n"
+ "zip1 v10.16b, v7.16b, v27.16b\n"
+ "zip1 v31.16b, v5.16b, v26.16b\n"
+ "zip1 v30.16b, v4.16b, v25.16b\n"
+ "zip1 v20.16b, v15.16b, v18.16b\n"
+ "zip1 v19.16b, v9.16b, v29.16b\n"
+ "zip2 v18.16b, v15.16b, v18.16b\n"
+ "zip2 v16.16b, v9.16b, v29.16b\n"
+ "zip1 v29.16b, v14.16b, v13.16b\n"
+ "zip1 v17.16b, v12.16b, v6.16b\n"
+ "zip2 v9.16b, v8.16b, v28.16b\n"
+ "zip2 v28.16b, v5.16b, v26.16b\n"
+ "zip2 v8.16b, v7.16b, v27.16b\n"
+ "zip2 v27.16b, v4.16b, v25.16b\n"
+ "zip2 v7.16b, v14.16b, v13.16b\n"
+ "zip2 v6.16b, v12.16b, v6.16b\n"
+ "zip1 v5.16b, v3.16b, v2.16b\n"
+ "zip1 v4.16b, v11.16b, v1.16b\n"
+ "zip2 v3.16b, v3.16b, v2.16b\n"
+ "zip2 v2.16b, v11.16b, v1.16b\n"
+ "zip1 v26.16b, v24.16b, v23.16b\n"
+ "zip1 v25.16b, v22.16b, v21.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "zip2 v23.16b, v22.16b, v21.16b\n"
+ "zip1 v1.16b, v0.16b, v31.16b\n"
+ "zip1 v22.16b, v10.16b, v30.16b\n"
+ "zip1 v21.16b, v20.16b, v19.16b\n"
+ "zip2 v20.16b, v20.16b, v19.16b\n"
+ "zip1 v19.16b, v18.16b, v16.16b\n"
+ "zip2 v18.16b, v18.16b, v16.16b\n"
+ "zip1 v16.16b, v29.16b, v17.16b\n"
+ "zip2 v17.16b, v29.16b, v17.16b\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "zip2 v0.16b, v0.16b, v31.16b\n"
+ "zip2 v31.16b, v10.16b, v30.16b\n"
+ "str q19, [x27, #0x20]\n"
+ "zip1 v30.16b, v9.16b, v28.16b\n"
+ "zip1 v29.16b, v8.16b, v27.16b\n"
+ "str q18, [x27, #0x30]\n"
+ "zip2 v28.16b, v9.16b, v28.16b\n"
+ "zip2 v27.16b, v8.16b, v27.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip1 v21.16b, v7.16b, v6.16b\n"
+ "zip2 v16.16b, v7.16b, v6.16b\n"
+ "str q17, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v20.16b, v5.16b, v4.16b\n"
+ "zip2 v19.16b, v5.16b, v4.16b\n"
"zip1 v18.16b, v3.16b, v2.16b\n"
"zip2 v17.16b, v3.16b, v2.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v16.16b, v1.16b, v0.16b\n"
- "zip2 v19.16b, v1.16b, v0.16b\n"
- "str q18, [x20, #0x20]\n"
- "str q17, [x20, #0x30]\n"
- "zip1 v18.16b, v31.16b, v30.16b\n"
- "zip2 v17.16b, v31.16b, v30.16b\n"
- "str q16, [x20, #0x40]\n"
- "zip1 v16.16b, v24.16b, v23.16b\n"
+ "str q21, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v16.16b, v26.16b, v25.16b\n"
+ "zip2 v26.16b, v26.16b, v25.16b\n"
+ "str q20, [x27, #0x20]\n"
+ "zip1 v25.16b, v24.16b, v23.16b\n"
"zip2 v24.16b, v24.16b, v23.16b\n"
- "str q19, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 v23.16b, v22.16b, v21.16b\n"
- "zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x20, #0x0]\n"
- "zip1 v21.16b, v29.16b, v20.16b\n"
- "zip2 v20.16b, v29.16b, v20.16b\n"
- "str q17, [x20, #0x10]\n"
- "zip1 v19.16b, v28.16b, v27.16b\n"
- "zip2 v18.16b, v28.16b, v27.16b\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v26.16b, v25.16b\n"
- "zip2 v16.16b, v26.16b, v25.16b\n"
- "str q24, [x20, #0x30]\n"
- "str q23, [x20, #0x40]\n"
- "str q22, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
- "str q21, [x20, #0x0]\n"
- "str q20, [x20, #0x10]\n"
- "str q19, [x20, #0x20]\n"
- "str q18, [x20, #0x30]\n"
- "str q17, [x20, #0x40]\n"
- "str q16, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
+ "str q19, [x27, #0x30]\n"
+ "zip1 v23.16b, v1.16b, v22.16b\n"
+ "zip2 v22.16b, v1.16b, v22.16b\n"
+ "str q18, [x27, #0x40]\n"
+ "zip1 v21.16b, v0.16b, v31.16b\n"
+ "zip2 v20.16b, v0.16b, v31.16b\n"
+ "str q17, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 v19.16b, v30.16b, v29.16b\n"
+ "zip2 v18.16b, v30.16b, v29.16b\n"
+ "str q16, [x27, #0x0]\n"
+ "zip1 v17.16b, v28.16b, v27.16b\n"
+ "zip2 v16.16b, v28.16b, v27.16b\n"
+ "str q26, [x27, #0x10]\n"
+ "str q25, [x27, #0x20]\n"
+ "str q24, [x27, #0x30]\n"
+ "str q23, [x27, #0x40]\n"
+ "str q22, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x20]\n"
+ "str q18, [x27, #0x30]\n"
+ "str q17, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x21, #0xc\n"
+ "cmp x28, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d23, [x9], #0x8\n"
- "ldr d27, [x28], #0x8\n"
- "sub x21, x21, #0xc\n"
- "cmp x21, #0xc\n"
- "ldr d21, [x27], #0x8\n"
- "ldr d26, [x26], #0x8\n"
- "ldr d20, [x25], #0x8\n"
- "ldr d19, [x24], #0x8\n"
- "ldr d17, [x23], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "ld1 { v23.s }[2], [x9], #0x4\n"
- "ld1 { v27.s }[2], [x28], #0x4\n"
- "ld1 { v21.s }[2], [x27], #0x4\n"
- "ld1 { v26.s }[2], [x26], #0x4\n"
- "ld1 { v20.s }[2], [x25], #0x4\n"
- "ld1 { v19.s }[2], [x24], #0x4\n"
- "zip1 v25.16b, v23.16b, v20.16b\n"
- "zip1 v24.16b, v27.16b, v19.16b\n"
- "ld1 { v17.s }[2], [x23], #0x4\n"
- "ld1 { v16.s }[2], [x22], #0x4\n"
- "zip1 v22.16b, v21.16b, v17.16b\n"
- "zip1 v18.16b, v26.16b, v16.16b\n"
- "zip2 v23.16b, v23.16b, v20.16b\n"
- "zip2 v21.16b, v21.16b, v17.16b\n"
- "zip2 v20.16b, v27.16b, v19.16b\n"
- "zip2 v17.16b, v26.16b, v16.16b\n"
- "zip1 v19.16b, v25.16b, v22.16b\n"
- "zip1 v16.16b, v24.16b, v18.16b\n"
- "zip2 v22.16b, v25.16b, v22.16b\n"
- "zip2 v18.16b, v24.16b, v18.16b\n"
- "zip1 v21.16b, v23.16b, v21.16b\n"
- "zip1 v20.16b, v20.16b, v17.16b\n"
- "zip1 v17.16b, v19.16b, v16.16b\n"
- "zip2 v16.16b, v19.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "zip1 v19.16b, v22.16b, v18.16b\n"
- "zip2 v18.16b, v22.16b, v18.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v17.16b, v21.16b, v20.16b\n"
- "zip2 v16.16b, v21.16b, v20.16b\n"
- "str q19, [x20, #0x20]\n"
- "str q18, [x20, #0x30]\n"
- "str q17, [x20, #0x40]\n"
- "str q16, [x20, #0x50]\n"
- "add x20, x20, %x[out_stride]\n"
+ "ldr d22, [x9], #0x8\n"
+ "ldr d23, [x26], #0x8\n"
+ "sub x28, x28, #0xc\n"
+ "ldr d21, [x25], #0x8\n"
+ "ldr d20, [x24], #0x8\n"
+ "cmp x28, #0xc\n"
+ "ldr d18, [x23], #0x8\n"
+ "ldr d17, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d16, [x20], #0x8\n"
+ "ld1 { v22.s }[2], [x9], #0x4\n"
+ "ld1 { v23.s }[2], [x26], #0x4\n"
+ "ld1 { v21.s }[2], [x25], #0x4\n"
+ "ld1 { v20.s }[2], [x24], #0x4\n"
+ "ld1 { v18.s }[2], [x23], #0x4\n"
+ "ld1 { v17.s }[2], [x22], #0x4\n"
+ "ld1 { v19.s }[2], [x21], #0x4\n"
+ "ld1 { v16.s }[2], [x20], #0x4\n"
+ "zip1 v25.16b, v22.16b, v18.16b\n"
+ "zip1 v24.16b, v23.16b, v17.16b\n"
+ "zip2 v22.16b, v22.16b, v18.16b\n"
+ "zip2 v23.16b, v23.16b, v17.16b\n"
+ "zip1 v18.16b, v21.16b, v19.16b\n"
+ "zip1 v17.16b, v20.16b, v16.16b\n"
+ "zip2 v21.16b, v21.16b, v19.16b\n"
+ "zip2 v16.16b, v20.16b, v16.16b\n"
+ "zip1 v20.16b, v25.16b, v18.16b\n"
+ "zip1 v19.16b, v24.16b, v17.16b\n"
+ "zip2 v18.16b, v25.16b, v18.16b\n"
+ "zip2 v17.16b, v24.16b, v17.16b\n"
+ "zip1 v22.16b, v22.16b, v21.16b\n"
+ "zip1 v16.16b, v23.16b, v16.16b\n"
+ "zip1 v21.16b, v20.16b, v19.16b\n"
+ "zip2 v20.16b, v20.16b, v19.16b\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "zip2 v18.16b, v18.16b, v17.16b\n"
+ "zip1 v17.16b, v22.16b, v16.16b\n"
+ "zip2 v16.16b, v22.16b, v16.16b\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x20]\n"
+ "str q18, [x27, #0x30]\n"
+ "str q17, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x21, #0x4\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s18, [x9], #0x4\n"
- "ldr s19, [x28], #0x4\n"
- "sub x21, x21, #0x4\n"
- "cmp x21, #0x4\n"
- "ldr s21, [x27], #0x4\n"
- "ldr s20, [x26], #0x4\n"
- "ldr s17, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "zip1 v18.16b, v18.16b, v17.16b\n"
- "zip1 v19.16b, v19.16b, v16.16b\n"
- "ldr s17, [x23], #0x4\n"
- "ldr s16, [x22], #0x4\n"
+ "ldr s23, [x9], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "sub x28, x28, #0x4\n"
+ "ldr s21, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "cmp x28, #0x4\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "zip1 v18.16b, v23.16b, v18.16b\n"
+ "zip1 v19.16b, v22.16b, v19.16b\n"
"zip1 v17.16b, v21.16b, v17.16b\n"
"zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v18.16b, v18.16b, v17.16b\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
"zip1 v17.16b, v18.16b, v16.16b\n"
"zip2 v16.16b, v18.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "str q16, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "str q17, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x21, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "sub x21, x21, #0x1\n"
- "cmp x21, #0x1\n"
- "ldr b21, [x27], #0x1\n"
- "ldr b20, [x26], #0x1\n"
- "ldr b17, [x25], #0x1\n"
- "ldr b16, [x24], #0x1\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr b17, [x23], #0x1\n"
- "ldr b16, [x22], #0x1\n"
+ "ldr b23, [x9], #0x1\n"
+ "ldr b22, [x26], #0x1\n"
+ "sub x28, x28, #0x1\n"
+ "ldr b21, [x25], #0x1\n"
+ "ldr b20, [x24], #0x1\n"
+ "cmp x28, #0x1\n"
+ "ldr b19, [x23], #0x1\n"
+ "ldr b18, [x22], #0x1\n"
+ "ldr b17, [x21], #0x1\n"
+ "ldr b16, [x20], #0x1\n"
+ "zip1 v19.16b, v23.16b, v19.16b\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
"zip1 v17.16b, v21.16b, v17.16b\n"
"zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str d16, [x20, #0x0]\n"
- "add x20, x20, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
"bge 1b\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp
index cef468e9cc..94726570d4 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,287 +40,308 @@ void a64_transpose_interleave_12_2x2(uint16_t *out, const uint16_t *in, size_t w
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x18\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "zip1 v10.8h, v19.8h, v18.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip2 v9.8h, v19.8h, v18.8h\n"
- "zip1 v8.8h, v17.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
+ "ldr q22, [x9], #0x10\n"
+ "ldr q21, [x26], #0x10\n"
+ "sub x28, x28, #0x18\n"
+ "ldr q20, [x25], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "cmp x28, #0x18\n"
"ldr q18, [x23], #0x10\n"
- "zip2 v7.8h, v17.8h, v16.8h\n"
- "zip1 v6.8h, v19.8h, v18.8h\n"
"ldr q17, [x22], #0x10\n"
+ "ldr q23, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "zip2 v5.8h, v19.8h, v18.8h\n"
- "zip1 v4.8h, v17.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip1 v3.8h, v21.8h, v18.8h\n"
- "zip2 v2.8h, v17.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v1.8h, v17.8h, v16.8h\n"
- "cmp x24, #0x18\n"
+ "zip1 v8.8h, v22.8h, v21.8h\n"
+ "zip2 v7.8h, v22.8h, v21.8h\n"
+ "ldr q22, [x9], #0x10\n"
+ "ldr q21, [x26], #0x10\n"
+ "zip1 v6.8h, v20.8h, v19.8h\n"
+ "zip2 v5.8h, v20.8h, v19.8h\n"
"ldr q20, [x25], #0x10\n"
- "ldr q19, [x23], #0x10\n"
- "zip1 v0.8h, v20.8h, v19.8h\n"
- "zip2 v31.8h, v21.8h, v18.8h\n"
- "ldr q30, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
- "zip1 v28.8h, v30.8h, v29.8h\n"
- "zip2 v27.8h, v17.8h, v16.8h\n"
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "zip1 v26.8h, v17.8h, v16.8h\n"
- "zip2 v25.8h, v17.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v17.8h, v16.8h\n"
- "zip2 v23.8h, v17.8h, v16.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip2 v22.8h, v20.8h, v19.8h\n"
- "zip1 v21.8h, v18.8h, v17.8h\n"
- "ldr q20, [x22], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v4.8h, v18.8h, v17.8h\n"
+ "zip2 v3.8h, v18.8h, v17.8h\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "zip1 v2.8h, v23.8h, v16.8h\n"
+ "zip2 v1.8h, v23.8h, v16.8h\n"
+ "ldr q23, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q10, [x21, #0x0]\n"
- "zip2 v19.8h, v18.8h, v17.8h\n"
- "str q9, [x21, #0x10]\n"
- "zip2 v18.8h, v30.8h, v29.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "str q3, [x21, #0x20]\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q8, [x21, #0x30]\n"
- "str q7, [x21, #0x40]\n"
- "str q1, [x21, #0x50]\n"
- "str q6, [x21, #0x60]\n"
- "str q5, [x21, #0x70]\n"
- "str q0, [x21, #0x80]\n"
- "str q4, [x21, #0x90]\n"
- "str q2, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q31, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q24, [x21, #0x40]\n"
- "str q23, [x21, #0x50]\n"
- "str q22, [x21, #0x60]\n"
- "str q21, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v0.8h, v22.8h, v21.8h\n"
+ "zip2 v31.8h, v22.8h, v21.8h\n"
+ "ldr q22, [x9], #0x10\n"
+ "ldr q21, [x26], #0x10\n"
+ "zip1 v30.8h, v20.8h, v19.8h\n"
+ "zip2 v29.8h, v20.8h, v19.8h\n"
+ "ldr q20, [x25], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v28.8h, v18.8h, v17.8h\n"
+ "zip2 v27.8h, v18.8h, v17.8h\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "zip1 v26.8h, v23.8h, v16.8h\n"
+ "zip2 v25.8h, v23.8h, v16.8h\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "str q8, [x27, #0x0]\n"
+ "zip1 v23.8h, v22.8h, v21.8h\n"
+ "str q7, [x27, #0x10]\n"
+ "zip2 v22.8h, v22.8h, v21.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "str q0, [x27, #0x20]\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "str q6, [x27, #0x30]\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "zip1 v17.8h, v24.8h, v16.8h\n"
+ "str q5, [x27, #0x40]\n"
+ "zip2 v16.8h, v24.8h, v16.8h\n"
+ "str q30, [x27, #0x50]\n"
+ "str q4, [x27, #0x60]\n"
+ "str q3, [x27, #0x70]\n"
+ "str q28, [x27, #0x80]\n"
+ "str q2, [x27, #0x90]\n"
+ "str q1, [x27, #0xa0]\n"
+ "str q26, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q31, [x27, #0x0]\n"
+ "str q23, [x27, #0x10]\n"
+ "str q22, [x27, #0x20]\n"
+ "str q29, [x27, #0x30]\n"
+ "str q21, [x27, #0x40]\n"
+ "str q20, [x27, #0x50]\n"
+ "str q27, [x27, #0x60]\n"
+ "str q19, [x27, #0x70]\n"
+ "str q18, [x27, #0x80]\n"
+ "str q25, [x27, #0x90]\n"
+ "str q17, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x28, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v28.8h, v17.8h, v16.8h\n"
- "zip2 v27.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v26.8h, v19.8h, v18.8h\n"
- "zip2 v25.8h, v19.8h, v18.8h\n"
- "ldr q19, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v24.8h, v17.8h, v16.8h\n"
- "zip2 v23.8h, v17.8h, v16.8h\n"
- "ldr d17, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
- "zip1 v22.8h, v17.8h, v16.8h\n"
- "zip1 v21.8h, v19.8h, v18.8h\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "zip2 v19.8h, v19.8h, v18.8h\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
+ "ldr q23, [x9], #0x10\n"
+ "ldr q19, [x26], #0x10\n"
+ "sub x28, x28, #0xc\n"
+ "ldr q22, [x25], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "cmp x28, #0xc\n"
+ "ldr q21, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v31.8h, v23.8h, v19.8h\n"
+ "zip2 v30.8h, v23.8h, v19.8h\n"
+ "ldr d29, [x9], #0x8\n"
+ "ldr d19, [x26], #0x8\n"
+ "zip1 v28.8h, v22.8h, v18.8h\n"
+ "zip2 v27.8h, v22.8h, v18.8h\n"
+ "ldr d26, [x25], #0x8\n"
+ "ldr d18, [x24], #0x8\n"
+ "zip1 v25.8h, v21.8h, v17.8h\n"
+ "zip2 v24.8h, v21.8h, v17.8h\n"
+ "ldr d23, [x23], #0x8\n"
"ldr d17, [x22], #0x8\n"
+ "zip1 v22.8h, v20.8h, v16.8h\n"
+ "zip2 v21.8h, v20.8h, v16.8h\n"
+ "ldr d20, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str q28, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q26, [x21, #0x30]\n"
- "str q25, [x21, #0x40]\n"
- "str q20, [x21, #0x50]\n"
- "str q24, [x21, #0x60]\n"
- "str q23, [x21, #0x70]\n"
- "str q18, [x21, #0x80]\n"
- "str q21, [x21, #0x90]\n"
- "str q19, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v19.8h, v29.8h, v19.8h\n"
+ "str q31, [x27, #0x0]\n"
+ "zip1 v18.8h, v26.8h, v18.8h\n"
+ "str q30, [x27, #0x10]\n"
+ "zip1 v17.8h, v23.8h, v17.8h\n"
+ "zip1 v16.8h, v20.8h, v16.8h\n"
+ "str q19, [x27, #0x20]\n"
+ "str q28, [x27, #0x30]\n"
+ "str q27, [x27, #0x40]\n"
+ "str q18, [x27, #0x50]\n"
+ "str q25, [x27, #0x60]\n"
+ "str q24, [x27, #0x70]\n"
+ "str q17, [x27, #0x80]\n"
+ "str q22, [x27, #0x90]\n"
+ "str q21, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "str q16, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "str q16, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
+ "ldr d23, [x9], #0x8\n"
+ "ldr d18, [x26], #0x8\n"
+ "sub x28, x28, #0x4\n"
+ "ldr d22, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "cmp x28, #0x4\n"
+ "ldr d21, [x23], #0x8\n"
"ldr d17, [x22], #0x8\n"
- "ldr d16, [x20], #0x8\n"
- "str q20, [x21, #0x0]\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x60]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x10\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d19, [x20], #0x8\n"
+ "zip1 v18.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v16.8h\n"
+ "zip1 v17.8h, v21.8h, v17.8h\n"
+ "str q18, [x27, #0x0]\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v16.8h, v20.8h, v19.8h\n"
+ "str q17, [x27, #0x60]\n"
+ "str q16, [x27, #0x90]\n"
+ "add x27, x27, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
- "ldr h17, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
+ "ldr h23, [x9], #0x2\n"
+ "ldr h18, [x26], #0x2\n"
+ "sub x28, x28, #0x1\n"
+ "ldr h22, [x25], #0x2\n"
+ "ldr h16, [x24], #0x2\n"
+ "cmp x28, #0x1\n"
+ "ldr h21, [x23], #0x2\n"
"ldr h17, [x22], #0x2\n"
- "ldr h16, [x20], #0x2\n"
- "str s20, [x21, #0x0]\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str s19, [x21, #0x30]\n"
- "str s18, [x21, #0x60]\n"
- "str s16, [x21, #0x90]\n"
- "add x21, x21, #0x4\n"
+ "ldr h20, [x21], #0x2\n"
+ "ldr h19, [x20], #0x2\n"
+ "zip1 v18.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v16.8h\n"
+ "zip1 v17.8h, v21.8h, v17.8h\n"
+ "str s18, [x27, #0x0]\n"
+ "str s16, [x27, #0x30]\n"
+ "zip1 v16.8h, v20.8h, v19.8h\n"
+ "str s17, [x27, #0x60]\n"
+ "str s16, [x27, #0x90]\n"
+ "add x27, x27, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0xc0\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x9, %x[in]\n"
"mov x20, %x[width]\n"
- "add x28, x9, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x28, %x[in_stride]\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
+ "mov x27, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "sub x20, x20, #0x18\n"
- "zip1 v22.8h, v17.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v17.8h, v17.8h, v16.8h\n"
- "zip1 v20.8h, v21.8h, v18.8h\n"
+ "add x26, x9, %x[in_stride]\n"
+ "add %x[in], x26, %x[in_stride]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x20, #0x18\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
"ldr q19, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "str q22, [x21, #0x0]\n"
+ "ldr q16, [x26], #0x10\n"
+ "sub x20, x20, #0x18\n"
+ "ldr q22, [x9], #0x10\n"
"cmp x20, #0x18\n"
- "str q17, [x21, #0x10]\n"
- "zip2 v18.8h, v21.8h, v18.8h\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q21, [x9], #0x10\n"
+ "ldr q20, [x26], #0x10\n"
"zip1 v17.8h, v19.8h, v16.8h\n"
- "str q20, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
"zip2 v16.8h, v19.8h, v16.8h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "zip1 v19.8h, v22.8h, v18.8h\n"
+ "zip2 v18.8h, v22.8h, v18.8h\n"
+ "str q17, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "zip1 v17.8h, v21.8h, v20.8h\n"
+ "zip2 v16.8h, v21.8h, v20.8h\n"
+ "str q19, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q18, [x27, #0x0]\n"
+ "str q17, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
"ldr q20, [x9], #0x10\n"
- "ldr q17, [x28], #0x10\n"
+ "ldr q17, [x26], #0x10\n"
"sub x20, x20, #0xc\n"
- "cmp x20, #0xc\n"
"ldr d19, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
+ "cmp x20, #0xc\n"
+ "ldr d16, [x26], #0x8\n"
"zip1 v18.8h, v20.8h, v17.8h\n"
"zip2 v17.8h, v20.8h, v17.8h\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "str q18, [x27, #0x0]\n"
+ "str q17, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d17, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
+ "ldr d16, [x26], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h17, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
+ "ldr h16, [x26], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str s16, [x27, #0x0]\n"
+ "add x27, x27, #0x4\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp
index 4c02d0534d..b9dca66e7b 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,388 +40,412 @@ void a64_transpose_interleave_12_2x4(uint16_t *out, const uint16_t *in, size_t w
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x18\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v13.8h, v21.8h, v17.8h\n"
- "zip1 v12.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v11.8h, v21.8h, v17.8h\n"
- "zip2 v10.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v9.8h, v19.8h, v17.8h\n"
- "zip1 v8.8h, v18.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v7.8h, v19.8h, v17.8h\n"
- "zip2 v6.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.8h, v21.8h, v17.8h\n"
- "zip1 v22.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v5.8h, v21.8h, v17.8h\n"
- "zip2 v4.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v26.8h, v19.8h, v17.8h\n"
- "zip1 v25.8h, v18.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v3.8h, v19.8h, v17.8h\n"
- "zip2 v2.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v21.8h, v17.8h\n"
- "zip1 v23.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v1.8h, v21.8h, v17.8h\n"
- "zip2 v0.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q27, [x9], #0x10\n"
+ "ldr q26, [x26], #0x10\n"
+ "sub x28, x28, #0x18\n"
+ "ldr q25, [x25], #0x10\n"
+ "ldr q24, [x24], #0x10\n"
+ "cmp x28, #0x18\n"
+ "ldr q23, [x23], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
+ "ldr q20, [x20], #0x10\n"
+ "ldr q19, [x9], #0x10\n"
+ "ldr q18, [x26], #0x10\n"
+ "zip1 v15.8h, v27.8h, v25.8h\n"
+ "zip1 v14.8h, v26.8h, v24.8h\n"
+ "ldr q17, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v13.8h, v27.8h, v25.8h\n"
+ "zip2 v12.8h, v26.8h, v24.8h\n"
+ "ldr q1, [x23], #0x10\n"
+ "ldr q0, [x22], #0x10\n"
+ "zip1 v11.8h, v23.8h, v21.8h\n"
+ "zip1 v10.8h, v22.8h, v20.8h\n"
+ "ldr q31, [x21], #0x10\n"
+ "ldr q24, [x20], #0x10\n"
+ "zip2 v30.8h, v23.8h, v21.8h\n"
+ "zip2 v29.8h, v22.8h, v20.8h\n"
+ "ldr q23, [x9], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
+ "zip1 v28.8h, v19.8h, v17.8h\n"
+ "zip1 v27.8h, v18.8h, v16.8h\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v9.8h, v19.8h, v17.8h\n"
+ "zip2 v8.8h, v18.8h, v16.8h\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v26.8h, v1.8h, v31.8h\n"
+ "zip1 v25.8h, v0.8h, v24.8h\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v31.8h, v19.8h, v17.8h\n"
- "zip1 v30.8h, v18.8h, v16.8h\n"
- "zip2 v29.8h, v19.8h, v17.8h\n"
- "zip2 v28.8h, v18.8h, v16.8h\n"
- "zip1 v17.8h, v13.8h, v12.8h\n"
- "zip2 v16.8h, v13.8h, v12.8h\n"
- "str q17, [x21, #0x0]\n"
+ "zip2 v7.8h, v1.8h, v31.8h\n"
+ "zip2 v6.8h, v0.8h, v24.8h\n"
+ "zip1 v5.8h, v23.8h, v21.8h\n"
+ "zip1 v24.8h, v22.8h, v20.8h\n"
+ "zip2 v4.8h, v23.8h, v21.8h\n"
+ "zip2 v3.8h, v22.8h, v20.8h\n"
+ "zip1 v2.8h, v19.8h, v17.8h\n"
+ "zip1 v1.8h, v18.8h, v16.8h\n"
+ "zip2 v0.8h, v19.8h, v17.8h\n"
+ "zip2 v31.8h, v18.8h, v16.8h\n"
+ "zip1 v16.8h, v15.8h, v14.8h\n"
+ "zip2 v23.8h, v15.8h, v14.8h\n"
+ "zip1 v22.8h, v13.8h, v12.8h\n"
+ "zip2 v21.8h, v13.8h, v12.8h\n"
+ "zip1 v20.8h, v28.8h, v27.8h\n"
+ "zip2 v19.8h, v28.8h, v27.8h\n"
"zip1 v18.8h, v11.8h, v10.8h\n"
"zip2 v17.8h, v11.8h, v10.8h\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v16.8h, v27.8h, v22.8h\n"
- "zip2 v22.8h, v27.8h, v22.8h\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v21.8h, v9.8h, v8.8h\n"
- "zip2 v20.8h, v9.8h, v8.8h\n"
- "str q17, [x21, #0x30]\n"
- "zip1 v19.8h, v7.8h, v6.8h\n"
- "zip2 v18.8h, v7.8h, v6.8h\n"
- "str q16, [x21, #0x40]\n"
- "zip1 v17.8h, v26.8h, v25.8h\n"
- "zip2 v16.8h, v26.8h, v25.8h\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "zip1 v27.8h, v5.8h, v4.8h\n"
- "zip2 v26.8h, v5.8h, v4.8h\n"
- "str q20, [x21, #0x70]\n"
- "zip1 v25.8h, v24.8h, v23.8h\n"
- "zip2 v24.8h, v24.8h, v23.8h\n"
- "str q19, [x21, #0x80]\n"
- "zip1 v23.8h, v1.8h, v0.8h\n"
- "zip2 v22.8h, v1.8h, v0.8h\n"
- "str q18, [x21, #0x90]\n"
- "zip1 v21.8h, v3.8h, v2.8h\n"
- "zip2 v20.8h, v3.8h, v2.8h\n"
- "str q17, [x21, #0xa0]\n"
- "zip1 v19.8h, v31.8h, v30.8h\n"
- "zip2 v18.8h, v31.8h, v30.8h\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v17.8h, v29.8h, v28.8h\n"
- "zip2 v16.8h, v29.8h, v28.8h\n"
- "str q27, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x27, #0x0]\n"
+ "zip1 v16.8h, v30.8h, v29.8h\n"
+ "zip2 v30.8h, v30.8h, v29.8h\n"
+ "str q23, [x27, #0x10]\n"
+ "zip1 v29.8h, v26.8h, v25.8h\n"
+ "zip2 v28.8h, v26.8h, v25.8h\n"
+ "str q22, [x27, #0x20]\n"
+ "str q21, [x27, #0x30]\n"
+ "zip1 v27.8h, v9.8h, v8.8h\n"
+ "zip2 v26.8h, v9.8h, v8.8h\n"
+ "str q20, [x27, #0x40]\n"
+ "zip1 v25.8h, v5.8h, v24.8h\n"
+ "zip2 v24.8h, v5.8h, v24.8h\n"
+ "str q19, [x27, #0x50]\n"
+ "zip1 v23.8h, v4.8h, v3.8h\n"
+ "zip2 v22.8h, v4.8h, v3.8h\n"
+ "str q18, [x27, #0x60]\n"
+ "zip1 v21.8h, v7.8h, v6.8h\n"
+ "zip2 v20.8h, v7.8h, v6.8h\n"
+ "str q17, [x27, #0x70]\n"
+ "zip1 v19.8h, v2.8h, v1.8h\n"
+ "zip2 v18.8h, v2.8h, v1.8h\n"
+ "str q16, [x27, #0x80]\n"
+ "zip1 v17.8h, v0.8h, v31.8h\n"
+ "zip2 v16.8h, v0.8h, v31.8h\n"
+ "str q30, [x27, #0x90]\n"
+ "str q29, [x27, #0xa0]\n"
+ "str q28, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q27, [x27, #0x0]\n"
+ "str q26, [x27, #0x10]\n"
+ "str q25, [x27, #0x20]\n"
+ "str q24, [x27, #0x30]\n"
+ "str q23, [x27, #0x40]\n"
+ "str q22, [x27, #0x50]\n"
+ "str q21, [x27, #0x60]\n"
+ "str q20, [x27, #0x70]\n"
+ "str q19, [x27, #0x80]\n"
+ "str q18, [x27, #0x90]\n"
+ "str q17, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x28, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.8h, v19.8h, v17.8h\n"
- "zip1 v24.8h, v18.8h, v16.8h\n"
+ "ldr q23, [x9], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
+ "sub x28, x28, #0xc\n"
"ldr q21, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip2 v31.8h, v19.8h, v17.8h\n"
- "zip2 v23.8h, v18.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v30.8h, v21.8h, v17.8h\n"
- "zip1 v29.8h, v20.8h, v16.8h\n"
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "zip2 v28.8h, v21.8h, v17.8h\n"
- "zip2 v27.8h, v20.8h, v16.8h\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v26.8h, v19.8h, v17.8h\n"
- "zip1 v22.8h, v18.8h, v16.8h\n"
- "ldr d21, [x25], #0x8\n"
- "ldr d20, [x23], #0x8\n"
- "zip1 v19.8h, v25.8h, v24.8h\n"
- "zip2 v18.8h, v25.8h, v24.8h\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr q16, [x24], #0x10\n"
+ "cmp x28, #0xc\n"
+ "ldr q1, [x23], #0x10\n"
+ "ldr q0, [x22], #0x10\n"
+ "ldr q28, [x21], #0x10\n"
+ "ldr q27, [x20], #0x10\n"
+ "ldr d20, [x9], #0x8\n"
+ "ldr d19, [x26], #0x8\n"
+ "zip1 v26.8h, v23.8h, v21.8h\n"
+ "zip1 v25.8h, v22.8h, v16.8h\n"
+ "ldr d18, [x25], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "zip2 v31.8h, v23.8h, v21.8h\n"
+ "zip2 v24.8h, v22.8h, v16.8h\n"
+ "ldr d23, [x23], #0x8\n"
+ "ldr d22, [x22], #0x8\n"
+ "zip1 v30.8h, v1.8h, v28.8h\n"
+ "zip1 v29.8h, v0.8h, v27.8h\n"
+ "ldr d21, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "zip1 v25.8h, v21.8h, v17.8h\n"
- "zip1 v24.8h, v20.8h, v16.8h\n"
- "zip1 v17.8h, v31.8h, v23.8h\n"
- "zip2 v16.8h, v31.8h, v23.8h\n"
- "str q19, [x21, #0x0]\n"
- "zip1 v23.8h, v26.8h, v22.8h\n"
- "zip2 v22.8h, v26.8h, v22.8h\n"
- "str q18, [x21, #0x10]\n"
+ "zip2 v28.8h, v1.8h, v28.8h\n"
+ "zip2 v27.8h, v0.8h, v27.8h\n"
+ "zip1 v20.8h, v20.8h, v18.8h\n"
+ "zip1 v19.8h, v19.8h, v17.8h\n"
+ "zip1 v18.8h, v26.8h, v25.8h\n"
+ "zip2 v17.8h, v26.8h, v25.8h\n"
+ "zip1 v26.8h, v23.8h, v21.8h\n"
+ "zip1 v25.8h, v22.8h, v16.8h\n"
+ "zip1 v16.8h, v31.8h, v24.8h\n"
+ "zip2 v24.8h, v31.8h, v24.8h\n"
+ "zip1 v23.8h, v20.8h, v19.8h\n"
+ "zip2 v22.8h, v20.8h, v19.8h\n"
+ "str q18, [x27, #0x0]\n"
"zip1 v21.8h, v30.8h, v29.8h\n"
"zip2 v20.8h, v30.8h, v29.8h\n"
- "str q17, [x21, #0x20]\n"
+ "str q17, [x27, #0x10]\n"
"zip1 v19.8h, v28.8h, v27.8h\n"
"zip2 v18.8h, v28.8h, v27.8h\n"
- "str q16, [x21, #0x30]\n"
- "zip1 v17.8h, v25.8h, v24.8h\n"
- "zip2 v16.8h, v25.8h, v24.8h\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x27, #0x20]\n"
+ "zip1 v17.8h, v26.8h, v25.8h\n"
+ "zip2 v16.8h, v26.8h, v25.8h\n"
+ "str q24, [x27, #0x30]\n"
+ "str q23, [x27, #0x40]\n"
+ "str q22, [x27, #0x50]\n"
+ "str q21, [x27, #0x60]\n"
+ "str q20, [x27, #0x70]\n"
+ "str q19, [x27, #0x80]\n"
+ "str q18, [x27, #0x90]\n"
+ "str q17, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "str q16, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "str q16, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
- "zip1 v16.8h, v18.8h, v16.8h\n"
+ "ldr d23, [x9], #0x8\n"
+ "ldr d22, [x26], #0x8\n"
+ "sub x28, x28, #0x4\n"
"ldr d18, [x25], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "cmp x28, #0x4\n"
+ "ldr d20, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "zip1 v18.8h, v18.8h, v17.8h\n"
+ "zip1 v18.8h, v23.8h, v18.8h\n"
+ "zip1 v17.8h, v22.8h, v17.8h\n"
+ "zip1 v20.8h, v20.8h, v19.8h\n"
"zip1 v16.8h, v21.8h, v16.8h\n"
- "str q20, [x21, #0x0]\n"
- "zip1 v17.8h, v18.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v16.8h\n"
- "str q19, [x21, #0x10]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, #0x20\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "zip2 v16.8h, v20.8h, v16.8h\n"
+ "str q19, [x27, #0x0]\n"
+ "str q18, [x27, #0x10]\n"
+ "str q17, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
+ "ldr h23, [x9], #0x2\n"
+ "ldr h22, [x26], #0x2\n"
+ "sub x28, x28, #0x1\n"
+ "ldr h19, [x25], #0x2\n"
+ "ldr h17, [x24], #0x2\n"
+ "cmp x28, #0x1\n"
+ "ldr h21, [x23], #0x2\n"
+ "ldr h20, [x22], #0x2\n"
+ "ldr h18, [x21], #0x2\n"
+ "ldr h16, [x20], #0x2\n"
+ "zip1 v19.8h, v23.8h, v19.8h\n"
+ "zip1 v17.8h, v22.8h, v17.8h\n"
+ "zip1 v18.8h, v21.8h, v18.8h\n"
+ "zip1 v16.8h, v20.8h, v16.8h\n"
"zip1 v17.8h, v19.8h, v17.8h\n"
"zip1 v16.8h, v18.8h, v16.8h\n"
- "ldr h20, [x25], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr h17, [x22], #0x2\n"
- "ldr h16, [x20], #0x2\n"
- "zip1 v17.8h, v20.8h, v17.8h\n"
- "zip1 v16.8h, v19.8h, v16.8h\n"
- "str d18, [x21, #0x0]\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x60]\n"
- "add x21, x21, #0x8\n"
+ "str d17, [x27, #0x0]\n"
+ "str d16, [x27, #0x60]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0xc0\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "csel x25, x25, %x[pad_row], GE\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x20, #0x18\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
+ "ldr q23, [x9], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
"sub x20, x20, #0x18\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
"cmp x20, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v31.8h, v19.8h, v17.8h\n"
- "zip1 v30.8h, v18.8h, v16.8h\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v29.8h, v19.8h, v17.8h\n"
- "zip2 v28.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.8h, v22.8h, v17.8h\n"
- "zip1 v21.8h, v20.8h, v16.8h\n"
+ "ldr q26, [x9], #0x10\n"
+ "ldr q25, [x26], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
"ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v26.8h, v22.8h, v17.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.8h, v19.8h, v17.8h\n"
- "zip1 v24.8h, v18.8h, v16.8h\n"
- "zip2 v23.8h, v19.8h, v17.8h\n"
- "zip2 v22.8h, v18.8h, v16.8h\n"
- "zip1 v17.8h, v31.8h, v30.8h\n"
- "zip2 v16.8h, v31.8h, v30.8h\n"
- "str q17, [x21, #0x0]\n"
- "zip1 v19.8h, v29.8h, v28.8h\n"
- "zip2 v18.8h, v29.8h, v28.8h\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v17.8h, v27.8h, v21.8h\n"
- "zip2 v16.8h, v27.8h, v21.8h\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "zip1 v21.8h, v26.8h, v20.8h\n"
- "zip2 v20.8h, v26.8h, v20.8h\n"
- "str q17, [x21, #0x40]\n"
- "zip1 v19.8h, v25.8h, v24.8h\n"
- "zip2 v18.8h, v25.8h, v24.8h\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v17.8h, v23.8h, v22.8h\n"
- "zip2 v16.8h, v23.8h, v22.8h\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "zip1 v31.8h, v23.8h, v21.8h\n"
+ "zip1 v30.8h, v22.8h, v16.8h\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip2 v29.8h, v23.8h, v21.8h\n"
+ "zip2 v23.8h, v22.8h, v16.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip1 v22.8h, v26.8h, v24.8h\n"
+ "zip1 v21.8h, v25.8h, v20.8h\n"
+ "zip2 v28.8h, v26.8h, v24.8h\n"
+ "zip2 v20.8h, v25.8h, v20.8h\n"
+ "zip1 v27.8h, v19.8h, v17.8h\n"
+ "zip1 v26.8h, v18.8h, v16.8h\n"
+ "zip2 v25.8h, v19.8h, v17.8h\n"
+ "zip2 v24.8h, v18.8h, v16.8h\n"
+ "zip1 v19.8h, v31.8h, v30.8h\n"
+ "zip2 v18.8h, v31.8h, v30.8h\n"
+ "zip1 v17.8h, v29.8h, v23.8h\n"
+ "zip2 v16.8h, v29.8h, v23.8h\n"
+ "zip1 v23.8h, v22.8h, v21.8h\n"
+ "zip2 v22.8h, v22.8h, v21.8h\n"
+ "str q19, [x27, #0x0]\n"
+ "str q18, [x27, #0x10]\n"
+ "zip1 v21.8h, v28.8h, v20.8h\n"
+ "zip2 v20.8h, v28.8h, v20.8h\n"
+ "str q17, [x27, #0x20]\n"
+ "zip1 v19.8h, v27.8h, v26.8h\n"
+ "zip2 v18.8h, v27.8h, v26.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v17.8h, v25.8h, v24.8h\n"
+ "zip2 v16.8h, v25.8h, v24.8h\n"
+ "str q23, [x27, #0x40]\n"
+ "str q22, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x20]\n"
+ "str q18, [x27, #0x30]\n"
+ "str q17, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
+ "ldr q25, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
"sub x20, x20, #0xc\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0xc\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v21.8h, v17.8h\n"
- "zip1 v23.8h, v18.8h, v16.8h\n"
- "ldr d20, [x9], #0x8\n"
- "ldr d19, [x28], #0x8\n"
- "zip2 v22.8h, v21.8h, v17.8h\n"
- "zip2 v18.8h, v18.8h, v16.8h\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v21.8h, v20.8h, v17.8h\n"
- "zip1 v20.8h, v19.8h, v16.8h\n"
- "zip1 v17.8h, v24.8h, v23.8h\n"
- "zip2 v16.8h, v24.8h, v23.8h\n"
- "str q17, [x21, #0x0]\n"
- "zip1 v19.8h, v22.8h, v18.8h\n"
- "zip2 v18.8h, v22.8h, v18.8h\n"
- "str q16, [x21, #0x10]\n"
- "zip1 v17.8h, v21.8h, v20.8h\n"
- "zip2 v16.8h, v21.8h, v20.8h\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "ldr d22, [x9], #0x8\n"
+ "ldr d23, [x26], #0x8\n"
+ "ldr d21, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "zip1 v20.8h, v25.8h, v18.8h\n"
+ "zip1 v19.8h, v24.8h, v17.8h\n"
+ "zip2 v18.8h, v25.8h, v18.8h\n"
+ "zip2 v17.8h, v24.8h, v17.8h\n"
+ "zip1 v22.8h, v22.8h, v21.8h\n"
+ "zip1 v16.8h, v23.8h, v16.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "zip1 v17.8h, v22.8h, v16.8h\n"
+ "zip2 v16.8h, v22.8h, v16.8h\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x20]\n"
+ "str q18, [x27, #0x30]\n"
+ "str q17, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d18, [x9], #0x8\n"
- "ldr d19, [x28], #0x8\n"
+ "ldr d19, [x26], #0x8\n"
"sub x20, x20, #0x4\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"cmp x20, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
"zip1 v18.8h, v18.8h, v17.8h\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
"zip1 v17.8h, v18.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v16.8h\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q17, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
+ "ldr h18, [x26], #0x2\n"
"sub x20, x20, #0x1\n"
+ "ldr h17, [x25], #0x2\n"
+ "ldr h16, [x24], #0x2\n"
"cmp x20, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
"zip1 v17.8h, v19.8h, v17.8h\n"
"zip1 v16.8h, v18.8h, v16.8h\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp
index 2a3208d18d..07326e7c98 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,592 +40,607 @@ void a64_transpose_interleave_12_2x4_fp32bf16(bfloat16 *out, const float *in, si
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x18\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q15, [x9], #0x10\n"
- "ldr q17, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q16, [x27], #0x10\n"
- "ldr q20, [x26], #0x10\n"
- "zip1 v6.4s, v15.4s, v16.4s\n"
- "zip1 v11.4s, v17.4s, v20.4s\n"
- "ldr q2, [x25], #0x10\n"
- "ldr q4, [x23], #0x10\n"
- "zip2 v22.4s, v15.4s, v16.4s\n"
- "zip2 v18.4s, v17.4s, v20.4s\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "sub x28, x28, #0x18\n"
+ "ldr q23, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "cmp x28, #0x18\n"
+ "ldr q29, [x23], #0x10\n"
+ "ldr q7, [x22], #0x10\n"
+ "ldr q5, [x21], #0x10\n"
+ "ldr q12, [x20], #0x10\n"
+ "ldr q1, [x9], #0x10\n"
+ "ldr q13, [x26], #0x10\n"
+ "zip1 v6.4s, v18.4s, v23.4s\n"
+ "zip1 v26.4s, v24.4s, v20.4s\n"
+ "ldr q9, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "zip2 v23.4s, v18.4s, v23.4s\n"
+ "zip2 v19.4s, v24.4s, v20.4s\n"
+ "ldr q22, [x23], #0x10\n"
+ "ldr q27, [x22], #0x10\n"
+ "zip1 v15.4s, v29.4s, v5.4s\n"
+ "zip1 v2.4s, v7.4s, v12.4s\n"
+ "ldr q25, [x21], #0x10\n"
+ "ldr q28, [x20], #0x10\n"
+ "zip2 v21.4s, v29.4s, v5.4s\n"
+ "zip2 v11.4s, v7.4s, v12.4s\n"
+ "ldr q3, [x9], #0x10\n"
+ "ldr q31, [x26], #0x10\n"
+ "zip1 v12.4s, v1.4s, v9.4s\n"
+ "zip1 v14.4s, v13.4s, v17.4s\n"
+ "ldr q30, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip2 v1.4s, v1.4s, v9.4s\n"
+ "zip2 v0.4s, v13.4s, v17.4s\n"
+ "ldr q13, [x23], #0x10\n"
+ "ldr q8, [x22], #0x10\n"
+ "zip1 v9.4s, v22.4s, v25.4s\n"
+ "zip1 v29.4s, v27.4s, v28.4s\n"
+ "ldr q5, [x21], #0x10\n"
+ "ldr q17, [x20], #0x10\n"
+ "zip2 v20.4s, v22.4s, v25.4s\n"
+ "zip2 v28.4s, v27.4s, v28.4s\n"
+ "ldr q7, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "zip1 v25.4s, v3.4s, v30.4s\n"
+ "zip1 v10.4s, v31.4s, v16.4s\n"
+ "ldr q4, [x25], #0x10\n"
+ "ldr q22, [x24], #0x10\n"
+ "zip2 v27.4s, v3.4s, v30.4s\n"
+ "zip2 v3.4s, v31.4s, v16.4s\n"
+ "ldr q31, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v16.4s, v13.4s, v5.4s\n"
+ "zip1 v30.4s, v8.4s, v17.4s\n"
+ "zip2 v13.4s, v13.4s, v5.4s\n"
+ "ldr q5, [x21], #0x10\n"
+ "zip2 v17.4s, v8.4s, v17.4s\n"
+ "zip1 v8.4s, v7.4s, v4.4s\n"
+ "zip2 v7.4s, v7.4s, v4.4s\n"
+ "zip1 v4.4s, v24.4s, v22.4s\n"
+ "zip2 v22.4s, v24.4s, v22.4s\n"
+ "zip1 v24.4s, v31.4s, v5.4s\n"
+ "zip2 v31.4s, v31.4s, v5.4s\n"
+ "zip1 v5.4s, v6.4s, v26.4s\n"
+ "zip2 v6.4s, v6.4s, v26.4s\n"
"ldr q26, [x20], #0x10\n"
- "zip1 v9.4s, v2.4s, v17.4s\n"
- "zip1 v10.4s, v4.4s, v26.4s\n"
- "ldr q16, [x9], #0x10\n"
- "ldr q27, [x28], #0x10\n"
- "zip2 v3.4s, v2.4s, v17.4s\n"
- "zip2 v30.4s, v4.4s, v26.4s\n"
- "ldr q13, [x27], #0x10\n"
- "ldr q1, [x26], #0x10\n"
- "zip1 v23.4s, v16.4s, v13.4s\n"
- "zip1 v5.4s, v27.4s, v1.4s\n"
- "ldr q26, [x25], #0x10\n"
- "ldr q14, [x23], #0x10\n"
- "zip2 v0.4s, v16.4s, v13.4s\n"
- "zip2 v2.4s, v27.4s, v1.4s\n"
- "ldr q15, [x22], #0x10\n"
- "ldr q8, [x20], #0x10\n"
- "zip1 v31.4s, v26.4s, v15.4s\n"
- "zip1 v4.4s, v14.4s, v8.4s\n"
- "ldr q28, [x9], #0x10\n"
- "ldr q19, [x28], #0x10\n"
- "zip2 v21.4s, v26.4s, v15.4s\n"
- "zip2 v16.4s, v14.4s, v8.4s\n"
- "ldr q15, [x27], #0x10\n"
- "ldr q1, [x26], #0x10\n"
- "zip1 v17.4s, v28.4s, v15.4s\n"
- "zip1 v8.4s, v19.4s, v1.4s\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip2 v7.4s, v28.4s, v15.4s\n"
- "zip2 v15.4s, v19.4s, v1.4s\n"
- "ldr q12, [x22], #0x10\n"
- "ldr q25, [x20], #0x10\n"
- "zip1 v14.4s, v27.4s, v12.4s\n"
- "zip1 v26.4s, v20.4s, v25.4s\n"
- "ldr q13, [x9], #0x10\n"
- "ldr q29, [x28], #0x10\n"
- "zip2 v28.4s, v27.4s, v12.4s\n"
- "zip2 v12.4s, v20.4s, v25.4s\n"
- "ldr q27, [x27], #0x10\n"
- "ldr q20, [x26], #0x10\n"
- "zip1 v19.4s, v13.4s, v27.4s\n"
- "zip1 v25.4s, v29.4s, v20.4s\n"
- "ldr q24, [x25], #0x10\n"
- "ldr q1, [x23], #0x10\n"
- "zip2 v27.4s, v13.4s, v27.4s\n"
- "zip2 v13.4s, v29.4s, v20.4s\n"
- "ldr q20, [x22], #0x10\n"
- "zip1 v29.4s, v24.4s, v20.4s\n"
- "zip2 v20.4s, v24.4s, v20.4s\n"
- "zip1 v24.4s, v6.4s, v11.4s\n"
- ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n"
- "zip2 v11.4s, v6.4s, v11.4s\n"
- "ldr q6, [x20], #0x10\n"
- ".inst 0x4ea16978 // bfcvtn2 v24.8h, v11.4s\n"
- "zip1 v11.4s, v1.4s, v6.4s\n"
- "zip2 v6.4s, v1.4s, v6.4s\n"
- "zip1 v1.4s, v22.4s, v18.4s\n"
- ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
- "ldr q22, [x9], #0x10\n"
- ".inst 0x4ea16a41 // bfcvtn2 v1.8h, v18.4s\n"
- "zip1 v18.4s, v23.4s, v5.4s\n"
- ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
- "zip2 v5.4s, v23.4s, v5.4s\n"
- "ldr q23, [x28], #0x10\n"
- ".inst 0x4ea168b2 // bfcvtn2 v18.8h, v5.4s\n"
- "zip1 v5.4s, v0.4s, v2.4s\n"
".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
- "zip2 v0.4s, v0.4s, v2.4s\n"
- "ldr q2, [x27], #0x10\n"
- ".inst 0x4ea16805 // bfcvtn2 v5.8h, v0.4s\n"
- "zip1 v0.4s, v22.4s, v2.4s\n"
- "zip2 v2.4s, v22.4s, v2.4s\n"
- "zip1 v22.4s, v17.4s, v8.4s\n"
- ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
- "zip2 v8.4s, v17.4s, v8.4s\n"
- "ldr q17, [x26], #0x10\n"
- ".inst 0x4ea16916 // bfcvtn2 v22.8h, v8.4s\n"
- "zip1 v8.4s, v23.4s, v17.4s\n"
- "zip2 v23.4s, v23.4s, v17.4s\n"
- "zip1 v17.4s, v7.4s, v15.4s\n"
- ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
- "zip2 v7.4s, v7.4s, v15.4s\n"
- "ldr q15, [x25], #0x10\n"
- ".inst 0x4ea168f1 // bfcvtn2 v17.8h, v7.4s\n"
- "zip1 v7.4s, v9.4s, v10.4s\n"
- ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
- "zip2 v10.4s, v9.4s, v10.4s\n"
- "ldr q9, [x23], #0x10\n"
- ".inst 0x4ea16947 // bfcvtn2 v7.8h, v10.4s\n"
- "zip1 v10.4s, v3.4s, v30.4s\n"
- ".inst 0x0ea1694a // bfcvtn v10.4h, v10.4s\n"
- "zip2 v30.4s, v3.4s, v30.4s\n"
- "ldr q3, [x22], #0x10\n"
- ".inst 0x4ea16bca // bfcvtn2 v10.8h, v30.4s\n"
- "zip1 v30.4s, v15.4s, v3.4s\n"
- "zip2 v15.4s, v15.4s, v3.4s\n"
- "zip1 v3.4s, v31.4s, v4.4s\n"
+ ".inst 0x4ea168c5 // bfcvtn2 v5.8h, v6.4s\n"
+ "zip1 v6.4s, v23.4s, v19.4s\n"
+ "zip2 v19.4s, v23.4s, v19.4s\n"
+ "ldr q23, [x9], #0x10\n"
+ ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
+ ".inst 0x4ea16a66 // bfcvtn2 v6.8h, v19.4s\n"
+ "zip1 v19.4s, v18.4s, v26.4s\n"
+ "zip2 v26.4s, v18.4s, v26.4s\n"
+ "zip1 v18.4s, v12.4s, v14.4s\n"
+ "zip2 v12.4s, v12.4s, v14.4s\n"
+ "ldr q14, [x26], #0x10\n"
+ ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
+ ".inst 0x4ea16992 // bfcvtn2 v18.8h, v12.4s\n"
+ "zip1 v12.4s, v1.4s, v0.4s\n"
+ "zip2 v1.4s, v1.4s, v0.4s\n"
+ "ldr q0, [x25], #0x10\n"
+ ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
+ ".inst 0x4ea1682c // bfcvtn2 v12.8h, v1.4s\n"
+ "zip1 v1.4s, v25.4s, v10.4s\n"
+ "zip2 v10.4s, v25.4s, v10.4s\n"
+ "ldr q25, [x24], #0x10\n"
+ ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
+ ".inst 0x4ea16941 // bfcvtn2 v1.8h, v10.4s\n"
+ "zip1 v10.4s, v23.4s, v0.4s\n"
+ "zip2 v23.4s, v23.4s, v0.4s\n"
+ "zip1 v0.4s, v14.4s, v25.4s\n"
+ "zip2 v25.4s, v14.4s, v25.4s\n"
+ "zip1 v14.4s, v27.4s, v3.4s\n"
+ "zip2 v3.4s, v27.4s, v3.4s\n"
+ "ldr q27, [x23], #0x10\n"
+ ".inst 0x0ea169ce // bfcvtn v14.4h, v14.4s\n"
+ ".inst 0x4ea1686e // bfcvtn2 v14.8h, v3.4s\n"
+ "zip1 v3.4s, v15.4s, v2.4s\n"
+ "zip2 v15.4s, v15.4s, v2.4s\n"
+ "ldr q2, [x22], #0x10\n"
".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v31.4s, v31.4s, v4.4s\n"
- "ldr q4, [x20], #0x10\n"
- ".inst 0x4ea16be3 // bfcvtn2 v3.8h, v31.4s\n"
- "zip1 v31.4s, v9.4s, v4.4s\n"
- "zip2 v4.4s, v9.4s, v4.4s\n"
- "zip1 v9.4s, v21.4s, v16.4s\n"
+ ".inst 0x4ea169e3 // bfcvtn2 v3.8h, v15.4s\n"
+ "zip1 v15.4s, v21.4s, v11.4s\n"
+ "zip2 v11.4s, v21.4s, v11.4s\n"
+ "ldr q21, [x21], #0x10\n"
+ ".inst 0x0ea169ef // bfcvtn v15.4h, v15.4s\n"
+ ".inst 0x4ea1696f // bfcvtn2 v15.8h, v11.4s\n"
+ "zip1 v11.4s, v9.4s, v29.4s\n"
+ "zip2 v29.4s, v9.4s, v29.4s\n"
+ "ldr q9, [x20], #0x10\n"
+ ".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
+ ".inst 0x4ea16bab // bfcvtn2 v11.8h, v29.4s\n"
+ "zip1 v29.4s, v27.4s, v21.4s\n"
+ "zip2 v21.4s, v27.4s, v21.4s\n"
+ "zip1 v27.4s, v2.4s, v9.4s\n"
+ "zip2 v2.4s, v2.4s, v9.4s\n"
+ "zip1 v9.4s, v20.4s, v28.4s\n"
+ "zip2 v28.4s, v20.4s, v28.4s\n"
+ "ldr q20, [x9], #0x10\n"
".inst 0x0ea16929 // bfcvtn v9.4h, v9.4s\n"
- "zip2 v16.4s, v21.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- ".inst 0x4ea16a09 // bfcvtn2 v9.8h, v16.4s\n"
- "zip1 v16.4s, v14.4s, v26.4s\n"
- ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v14.4s, v14.4s, v26.4s\n"
- "ldr q26, [x28], #0x10\n"
- ".inst 0x4ea169d0 // bfcvtn2 v16.8h, v14.4s\n"
- "zip1 v14.4s, v28.4s, v12.4s\n"
- ".inst 0x0ea169ce // bfcvtn v14.4h, v14.4s\n"
- "zip2 v12.4s, v28.4s, v12.4s\n"
- "ldr q28, [x27], #0x10\n"
- ".inst 0x4ea1698e // bfcvtn2 v14.8h, v12.4s\n"
- "zip1 v12.4s, v21.4s, v28.4s\n"
- "zip2 v28.4s, v21.4s, v28.4s\n"
- "zip1 v21.4s, v19.4s, v25.4s\n"
- ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- "zip2 v19.4s, v19.4s, v25.4s\n"
- "ldr q25, [x26], #0x10\n"
- ".inst 0x4ea16a75 // bfcvtn2 v21.8h, v19.4s\n"
- "zip1 v19.4s, v26.4s, v25.4s\n"
- "zip2 v25.4s, v26.4s, v25.4s\n"
- "zip1 v26.4s, v27.4s, v13.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v13.4s, v27.4s, v13.4s\n"
- "ldr q27, [x25], #0x10\n"
- ".inst 0x4ea169ba // bfcvtn2 v26.8h, v13.4s\n"
- "zip1 v13.4s, v0.4s, v8.4s\n"
+ ".inst 0x4ea16b89 // bfcvtn2 v9.8h, v28.4s\n"
+ "zip1 v28.4s, v16.4s, v30.4s\n"
+ "zip2 v30.4s, v16.4s, v30.4s\n"
+ "ldr q16, [x26], #0x10\n"
+ ".inst 0x0ea16b9c // bfcvtn v28.4h, v28.4s\n"
+ ".inst 0x4ea16bdc // bfcvtn2 v28.8h, v30.4s\n"
+ "zip1 v30.4s, v13.4s, v17.4s\n"
+ "zip2 v13.4s, v13.4s, v17.4s\n"
+ "ldr q17, [x25], #0x10\n"
+ ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
+ ".inst 0x4ea169be // bfcvtn2 v30.8h, v13.4s\n"
+ "zip1 v13.4s, v8.4s, v4.4s\n"
+ "zip2 v4.4s, v8.4s, v4.4s\n"
+ "ldr q8, [x24], #0x10\n"
".inst 0x0ea169ad // bfcvtn v13.4h, v13.4s\n"
- "zip2 v8.4s, v0.4s, v8.4s\n"
- "ldr q0, [x23], #0x10\n"
- ".inst 0x4ea1690d // bfcvtn2 v13.8h, v8.4s\n"
- "zip1 v8.4s, v2.4s, v23.4s\n"
- ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v23.4s, v2.4s, v23.4s\n"
- "ldr q2, [x22], #0x10\n"
- ".inst 0x4ea16ae8 // bfcvtn2 v8.8h, v23.4s\n"
+ ".inst 0x4ea1688d // bfcvtn2 v13.8h, v4.4s\n"
+ "zip1 v4.4s, v20.4s, v17.4s\n"
+ "zip2 v20.4s, v20.4s, v17.4s\n"
+ "zip1 v17.4s, v16.4s, v8.4s\n"
+ "zip2 v8.4s, v16.4s, v8.4s\n"
+ "zip1 v16.4s, v7.4s, v22.4s\n"
+ "zip2 v22.4s, v7.4s, v22.4s\n"
+ "ldr q7, [x23], #0x10\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16ad0 // bfcvtn2 v16.8h, v22.4s\n"
+ "zip1 v22.4s, v10.4s, v0.4s\n"
+ "zip2 v10.4s, v10.4s, v0.4s\n"
+ "ldr q0, [x22], #0x10\n"
+ ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
+ ".inst 0x4ea16956 // bfcvtn2 v22.8h, v10.4s\n"
+ "zip1 v10.4s, v23.4s, v25.4s\n"
+ "zip2 v23.4s, v23.4s, v25.4s\n"
+ "ldr q25, [x21], #0x10\n"
+ ".inst 0x0ea1694a // bfcvtn v10.4h, v10.4s\n"
+ ".inst 0x4ea16aea // bfcvtn2 v10.8h, v23.4s\n"
"ldr q23, [x20], #0x10\n"
- "str q24, [x21, #0x0]\n"
- "zip1 v24.4s, v27.4s, v2.4s\n"
- "zip2 v27.4s, v27.4s, v2.4s\n"
- "zip1 v2.4s, v0.4s, v23.4s\n"
- "zip2 v23.4s, v0.4s, v23.4s\n"
- "str q1, [x21, #0x10]\n"
- "zip1 v0.4s, v12.4s, v19.4s\n"
- "zip1 v1.4s, v28.4s, v25.4s\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v18.4s, v29.4s, v11.4s\n"
- ".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- "str q5, [x21, #0x30]\n"
- "zip1 v5.4s, v20.4s, v6.4s\n"
- "zip2 v19.4s, v12.4s, v19.4s\n"
- "str q22, [x21, #0x40]\n"
- "zip1 v12.4s, v30.4s, v31.4s\n"
- "zip1 v22.4s, v15.4s, v4.4s\n"
- "str q17, [x21, #0x50]\n"
- "zip1 v17.4s, v24.4s, v2.4s\n"
- ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
- "str q7, [x21, #0x60]\n"
- "zip1 v7.4s, v27.4s, v23.4s\n"
- "zip2 v25.4s, v28.4s, v25.4s\n"
- "str q10, [x21, #0x70]\n"
- ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
- "zip2 v29.4s, v29.4s, v11.4s\n"
- "str q3, [x21, #0x80]\n"
- ".inst 0x0ea168ab // bfcvtn v11.4h, v5.4s\n"
- "zip2 v10.4s, v20.4s, v6.4s\n"
- "str q9, [x21, #0x90]\n"
- ".inst 0x0ea16986 // bfcvtn v6.4h, v12.4s\n"
- "zip2 v12.4s, v30.4s, v31.4s\n"
- "str q16, [x21, #0xa0]\n"
- ".inst 0x0ea16ac5 // bfcvtn v5.4h, v22.4s\n"
- "zip2 v4.4s, v15.4s, v4.4s\n"
- "str q14, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- ".inst 0x0ea16a2f // bfcvtn v15.4h, v17.4s\n"
- "zip2 v20.4s, v24.4s, v2.4s\n"
- "str q21, [x21, #0x0]\n"
- ".inst 0x0ea168fc // bfcvtn v28.4h, v7.4s\n"
- "zip2 v30.4s, v27.4s, v23.4s\n"
- "str q26, [x21, #0x10]\n"
- ".inst 0x4ea16a60 // bfcvtn2 v0.8h, v19.4s\n"
- ".inst 0x4ea16b21 // bfcvtn2 v1.8h, v25.4s\n"
- "str q13, [x21, #0x20]\n"
- ".inst 0x4ea16bb2 // bfcvtn2 v18.8h, v29.4s\n"
- ".inst 0x4ea1694b // bfcvtn2 v11.8h, v10.4s\n"
- "str q8, [x21, #0x30]\n"
- ".inst 0x4ea16986 // bfcvtn2 v6.8h, v12.4s\n"
+ "str q5, [x27, #0x0]\n"
+ "zip1 v5.4s, v4.4s, v17.4s\n"
+ "zip2 v4.4s, v4.4s, v17.4s\n"
+ "str q6, [x27, #0x10]\n"
+ "zip1 v17.4s, v7.4s, v25.4s\n"
+ "zip2 v7.4s, v7.4s, v25.4s\n"
+ "str q18, [x27, #0x20]\n"
+ "zip1 v18.4s, v0.4s, v23.4s\n"
+ "zip2 v0.4s, v0.4s, v23.4s\n"
+ "str q12, [x27, #0x30]\n"
+ "zip1 v6.4s, v20.4s, v8.4s\n"
+ "zip1 v23.4s, v24.4s, v19.4s\n"
+ "str q1, [x27, #0x40]\n"
+ "zip1 v25.4s, v31.4s, v26.4s\n"
+ "zip1 v12.4s, v29.4s, v27.4s\n"
+ "str q14, [x27, #0x50]\n"
+ "zip1 v14.4s, v21.4s, v2.4s\n"
+ "zip1 v1.4s, v17.4s, v18.4s\n"
+ "str q3, [x27, #0x60]\n"
+ "zip1 v3.4s, v7.4s, v0.4s\n"
+ ".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
+ "str q15, [x27, #0x70]\n"
+ ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
+ "zip2 v20.4s, v20.4s, v8.4s\n"
+ "str q11, [x27, #0x80]\n"
+ ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
+ "zip2 v24.4s, v24.4s, v19.4s\n"
+ "str q9, [x27, #0x90]\n"
+ ".inst 0x0ea16b28 // bfcvtn v8.4h, v25.4s\n"
+ "zip2 v11.4s, v31.4s, v26.4s\n"
+ "str q28, [x27, #0xa0]\n"
+ ".inst 0x0ea16993 // bfcvtn v19.4h, v12.4s\n"
+ "zip2 v27.4s, v29.4s, v27.4s\n"
+ "str q30, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ ".inst 0x0ea169ce // bfcvtn v14.4h, v14.4s\n"
+ "zip2 v31.4s, v21.4s, v2.4s\n"
+ "str q13, [x27, #0x0]\n"
+ ".inst 0x0ea1683e // bfcvtn v30.4h, v1.4s\n"
+ "zip2 v15.4s, v17.4s, v18.4s\n"
+ "str q16, [x27, #0x10]\n"
+ ".inst 0x0ea1686c // bfcvtn v12.4h, v3.4s\n"
+ "zip2 v9.4s, v7.4s, v0.4s\n"
+ "str q22, [x27, #0x20]\n"
".inst 0x4ea16885 // bfcvtn2 v5.8h, v4.4s\n"
- "str q0, [x21, #0x40]\n"
- ".inst 0x4ea16a8f // bfcvtn2 v15.8h, v20.4s\n"
- ".inst 0x4ea16bdc // bfcvtn2 v28.8h, v30.4s\n"
- "str q1, [x21, #0x50]\n"
- "str q18, [x21, #0x60]\n"
- "str q11, [x21, #0x70]\n"
- "str q6, [x21, #0x80]\n"
- "str q5, [x21, #0x90]\n"
- "str q15, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ ".inst 0x4ea16a86 // bfcvtn2 v6.8h, v20.4s\n"
+ "str q10, [x27, #0x30]\n"
+ ".inst 0x4ea16b17 // bfcvtn2 v23.8h, v24.4s\n"
+ ".inst 0x4ea16968 // bfcvtn2 v8.8h, v11.4s\n"
+ ".inst 0x4ea16b73 // bfcvtn2 v19.8h, v27.4s\n"
+ ".inst 0x4ea16bee // bfcvtn2 v14.8h, v31.4s\n"
+ ".inst 0x4ea169fe // bfcvtn2 v30.8h, v15.4s\n"
+ ".inst 0x4ea1692c // bfcvtn2 v12.8h, v9.4s\n"
+ "str q5, [x27, #0x40]\n"
+ "str q6, [x27, #0x50]\n"
+ "str q23, [x27, #0x60]\n"
+ "str q8, [x27, #0x70]\n"
+ "str q19, [x27, #0x80]\n"
+ "str q14, [x27, #0x90]\n"
+ "str q30, [x27, #0xa0]\n"
+ "str q12, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0xc\n"
+ "cmp x28, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q20, [x9], #0x10\n"
- "ldr q9, [x28], #0x10\n"
- "sub x24, x24, #0xc\n"
- "cmp x24, #0xc\n"
- "ldr q8, [x27], #0x10\n"
- "ldr q1, [x26], #0x10\n"
- "zip1 v7.4s, v20.4s, v8.4s\n"
- "zip1 v19.4s, v9.4s, v1.4s\n"
- "ldr q6, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip2 v5.4s, v20.4s, v8.4s\n"
- "zip2 v18.4s, v9.4s, v1.4s\n"
- "ldr q27, [x22], #0x10\n"
- "ldr q14, [x20], #0x10\n"
- "zip1 v26.4s, v6.4s, v27.4s\n"
- "zip1 v15.4s, v16.4s, v14.4s\n"
- "ldr q1, [x9], #0x10\n"
- "ldr q30, [x28], #0x10\n"
- "zip2 v24.4s, v6.4s, v27.4s\n"
- "zip2 v25.4s, v16.4s, v14.4s\n"
- "ldr q13, [x27], #0x10\n"
- "ldr q17, [x26], #0x10\n"
- "zip1 v10.4s, v1.4s, v13.4s\n"
- "zip1 v16.4s, v30.4s, v17.4s\n"
- "ldr q4, [x25], #0x10\n"
- "ldr q11, [x23], #0x10\n"
- "zip2 v0.4s, v1.4s, v13.4s\n"
- "zip2 v27.4s, v30.4s, v17.4s\n"
- "ldr q28, [x22], #0x10\n"
- "ldr q12, [x20], #0x10\n"
- "zip1 v22.4s, v4.4s, v28.4s\n"
- "zip1 v13.4s, v11.4s, v12.4s\n"
- "ldr q31, [x9], #0x10\n"
- "ldr q17, [x28], #0x10\n"
- "zip2 v14.4s, v4.4s, v28.4s\n"
- "zip2 v12.4s, v11.4s, v12.4s\n"
- "ldr q2, [x27], #0x10\n"
+ "ldr q5, [x9], #0x10\n"
+ "ldr q9, [x26], #0x10\n"
+ "sub x28, x28, #0xc\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q12, [x24], #0x10\n"
+ "cmp x28, #0xc\n"
+ "ldr q0, [x23], #0x10\n"
+ "ldr q29, [x22], #0x10\n"
+ "ldr q23, [x21], #0x10\n"
+ "ldr q27, [x20], #0x10\n"
+ "ldr q17, [x9], #0x10\n"
+ "ldr q31, [x26], #0x10\n"
+ "zip1 v16.4s, v5.4s, v19.4s\n"
+ "zip1 v7.4s, v9.4s, v12.4s\n"
+ "ldr q10, [x25], #0x10\n"
+ "ldr q11, [x24], #0x10\n"
+ "zip2 v20.4s, v5.4s, v19.4s\n"
+ "zip2 v28.4s, v9.4s, v12.4s\n"
+ "ldr q21, [x23], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "zip1 v24.4s, v0.4s, v23.4s\n"
+ "zip1 v9.4s, v29.4s, v27.4s\n"
+ "ldr q18, [x21], #0x10\n"
+ "ldr q25, [x20], #0x10\n"
+ "zip2 v8.4s, v0.4s, v23.4s\n"
+ "zip2 v4.4s, v29.4s, v27.4s\n"
+ "ldr q27, [x9], #0x10\n"
"ldr q3, [x26], #0x10\n"
- "zip1 v8.4s, v31.4s, v2.4s\n"
- "zip1 v4.4s, v17.4s, v3.4s\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q1, [x23], #0x10\n"
- "zip2 v28.4s, v31.4s, v2.4s\n"
- "zip2 v29.4s, v17.4s, v3.4s\n"
- "ldr q11, [x22], #0x10\n"
- "ldr q17, [x20], #0x10\n"
- "zip1 v9.4s, v23.4s, v11.4s\n"
- "zip1 v21.4s, v1.4s, v17.4s\n"
- "zip2 v11.4s, v23.4s, v11.4s\n"
- "zip2 v17.4s, v1.4s, v17.4s\n"
- "zip1 v2.4s, v7.4s, v19.4s\n"
- "zip1 v31.4s, v5.4s, v18.4s\n"
- "zip1 v3.4s, v10.4s, v16.4s\n"
- "zip1 v6.4s, v0.4s, v27.4s\n"
- "zip1 v1.4s, v8.4s, v4.4s\n"
- "zip1 v30.4s, v28.4s, v29.4s\n"
- "zip1 v20.4s, v26.4s, v15.4s\n"
- "zip1 v23.4s, v24.4s, v25.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "zip2 v7.4s, v7.4s, v19.4s\n"
- "zip1 v19.4s, v22.4s, v13.4s\n"
- ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
- "zip2 v18.4s, v5.4s, v18.4s\n"
- "zip1 v5.4s, v14.4s, v12.4s\n"
+ "zip1 v2.4s, v17.4s, v10.4s\n"
+ "zip1 v6.4s, v31.4s, v11.4s\n"
+ "ldr q1, [x25], #0x10\n"
+ "ldr q29, [x24], #0x10\n"
+ "zip2 v15.4s, v17.4s, v10.4s\n"
+ "zip2 v5.4s, v31.4s, v11.4s\n"
+ "ldr q31, [x23], #0x10\n"
+ "ldr q12, [x22], #0x10\n"
+ "zip1 v13.4s, v21.4s, v18.4s\n"
+ "zip1 v10.4s, v22.4s, v25.4s\n"
+ "ldr q19, [x21], #0x10\n"
+ "ldr q23, [x20], #0x10\n"
+ "zip2 v14.4s, v21.4s, v18.4s\n"
+ "zip2 v30.4s, v22.4s, v25.4s\n"
+ "zip1 v0.4s, v27.4s, v1.4s\n"
+ "zip1 v26.4s, v3.4s, v29.4s\n"
+ "zip2 v1.4s, v27.4s, v1.4s\n"
+ "zip2 v11.4s, v3.4s, v29.4s\n"
+ "zip1 v18.4s, v31.4s, v19.4s\n"
+ "zip1 v17.4s, v12.4s, v23.4s\n"
+ "zip2 v21.4s, v31.4s, v19.4s\n"
+ "zip2 v27.4s, v12.4s, v23.4s\n"
+ "zip1 v3.4s, v16.4s, v7.4s\n"
+ "zip1 v23.4s, v20.4s, v28.4s\n"
+ "zip1 v25.4s, v2.4s, v6.4s\n"
+ "zip1 v12.4s, v15.4s, v5.4s\n"
+ "zip1 v31.4s, v0.4s, v26.4s\n"
+ "zip1 v19.4s, v1.4s, v11.4s\n"
+ "zip1 v29.4s, v24.4s, v9.4s\n"
+ "zip1 v22.4s, v8.4s, v4.4s\n"
".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v16.4s, v10.4s, v16.4s\n"
- "zip1 v10.4s, v9.4s, v21.4s\n"
- ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
- "zip2 v0.4s, v0.4s, v27.4s\n"
- "zip1 v27.4s, v11.4s, v17.4s\n"
- ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
- "zip2 v4.4s, v8.4s, v4.4s\n"
- ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "zip2 v29.4s, v28.4s, v29.4s\n"
- ".inst 0x0ea16a9c // bfcvtn v28.4h, v20.4s\n"
- "zip2 v15.4s, v26.4s, v15.4s\n"
- ".inst 0x0ea16ae8 // bfcvtn v8.4h, v23.4s\n"
- "zip2 v26.4s, v24.4s, v25.4s\n"
- ".inst 0x0ea16a79 // bfcvtn v25.4h, v19.4s\n"
- "zip2 v24.4s, v22.4s, v13.4s\n"
- ".inst 0x0ea168b7 // bfcvtn v23.4h, v5.4s\n"
- "zip2 v22.4s, v14.4s, v12.4s\n"
- ".inst 0x0ea16945 // bfcvtn v5.4h, v10.4s\n"
- "zip2 v20.4s, v9.4s, v21.4s\n"
- ".inst 0x0ea16b73 // bfcvtn v19.4h, v27.4s\n"
- "zip2 v17.4s, v11.4s, v17.4s\n"
- ".inst 0x4ea168e2 // bfcvtn2 v2.8h, v7.4s\n"
- ".inst 0x4ea16a5f // bfcvtn2 v31.8h, v18.4s\n"
- "str q2, [x21, #0x0]\n"
+ "zip2 v16.4s, v16.4s, v7.4s\n"
+ "zip1 v7.4s, v13.4s, v10.4s\n"
+ ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
+ "zip2 v20.4s, v20.4s, v28.4s\n"
+ "zip1 v28.4s, v14.4s, v30.4s\n"
+ ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
+ "zip2 v6.4s, v2.4s, v6.4s\n"
+ "zip1 v2.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
+ "zip2 v5.4s, v15.4s, v5.4s\n"
+ "zip1 v15.4s, v21.4s, v27.4s\n"
+ ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
+ "zip2 v26.4s, v0.4s, v26.4s\n"
+ ".inst 0x0ea16a60 // bfcvtn v0.4h, v19.4s\n"
+ "zip2 v1.4s, v1.4s, v11.4s\n"
+ ".inst 0x0ea16bab // bfcvtn v11.4h, v29.4s\n"
+ "zip2 v9.4s, v24.4s, v9.4s\n"
+ ".inst 0x0ea16ad3 // bfcvtn v19.4h, v22.4s\n"
+ "zip2 v24.4s, v8.4s, v4.4s\n"
+ ".inst 0x0ea168e8 // bfcvtn v8.4h, v7.4s\n"
+ "zip2 v4.4s, v13.4s, v10.4s\n"
+ ".inst 0x0ea16b96 // bfcvtn v22.4h, v28.4s\n"
+ "zip2 v29.4s, v14.4s, v30.4s\n"
+ ".inst 0x0ea1685c // bfcvtn v28.4h, v2.4s\n"
+ "zip2 v14.4s, v18.4s, v17.4s\n"
+ ".inst 0x0ea169f2 // bfcvtn v18.4h, v15.4s\n"
+ "zip2 v17.4s, v21.4s, v27.4s\n"
".inst 0x4ea16a03 // bfcvtn2 v3.8h, v16.4s\n"
- ".inst 0x4ea16806 // bfcvtn2 v6.8h, v0.4s\n"
- "str q31, [x21, #0x10]\n"
- ".inst 0x4ea16881 // bfcvtn2 v1.8h, v4.4s\n"
- ".inst 0x4ea16bbe // bfcvtn2 v30.8h, v29.4s\n"
- "str q3, [x21, #0x20]\n"
- ".inst 0x4ea169fc // bfcvtn2 v28.8h, v15.4s\n"
- ".inst 0x4ea16b48 // bfcvtn2 v8.8h, v26.4s\n"
- "str q6, [x21, #0x30]\n"
- ".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- "str q1, [x21, #0x40]\n"
- ".inst 0x4ea16a85 // bfcvtn2 v5.8h, v20.4s\n"
- ".inst 0x4ea16a33 // bfcvtn2 v19.8h, v17.4s\n"
- "str q30, [x21, #0x50]\n"
- "str q28, [x21, #0x60]\n"
- "str q8, [x21, #0x70]\n"
- "str q25, [x21, #0x80]\n"
- "str q23, [x21, #0x90]\n"
- "str q5, [x21, #0xa0]\n"
- "str q19, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ ".inst 0x4ea16a97 // bfcvtn2 v23.8h, v20.4s\n"
+ ".inst 0x4ea168d9 // bfcvtn2 v25.8h, v6.4s\n"
+ ".inst 0x4ea168ac // bfcvtn2 v12.8h, v5.4s\n"
+ ".inst 0x4ea16b5f // bfcvtn2 v31.8h, v26.4s\n"
+ ".inst 0x4ea16820 // bfcvtn2 v0.8h, v1.4s\n"
+ ".inst 0x4ea1692b // bfcvtn2 v11.8h, v9.4s\n"
+ ".inst 0x4ea16b13 // bfcvtn2 v19.8h, v24.4s\n"
+ "str q3, [x27, #0x0]\n"
+ ".inst 0x4ea16888 // bfcvtn2 v8.8h, v4.4s\n"
+ ".inst 0x4ea16bb6 // bfcvtn2 v22.8h, v29.4s\n"
+ "str q23, [x27, #0x10]\n"
+ ".inst 0x4ea169dc // bfcvtn2 v28.8h, v14.4s\n"
+ ".inst 0x4ea16a32 // bfcvtn2 v18.8h, v17.4s\n"
+ "str q25, [x27, #0x20]\n"
+ "str q12, [x27, #0x30]\n"
+ "str q31, [x27, #0x40]\n"
+ "str q0, [x27, #0x50]\n"
+ "str q11, [x27, #0x60]\n"
+ "str q19, [x27, #0x70]\n"
+ "str q8, [x27, #0x80]\n"
+ "str q22, [x27, #0x90]\n"
+ "str q28, [x27, #0xa0]\n"
+ "str q18, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "str q16, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "str q16, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr q23, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v23.4s, v17.4s\n"
- "zip1 v21.4s, v20.4s, v16.4s\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.4s, v23.4s, v17.4s\n"
- "zip2 v20.4s, v20.4s, v16.4s\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v27.4s, v19.4s, v17.4s\n"
- "zip1 v26.4s, v18.4s, v16.4s\n"
- "zip2 v25.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "zip1 v19.4s, v22.4s, v21.4s\n"
- "zip1 v18.4s, v28.4s, v20.4s\n"
+ "ldr q25, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "sub x28, x28, #0x4\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "cmp x28, #0x4\n"
+ "ldr q23, [x23], #0x10\n"
+ "ldr q19, [x22], #0x10\n"
+ "ldr q18, [x21], #0x10\n"
+ "ldr q17, [x20], #0x10\n"
+ "zip1 v22.4s, v25.4s, v21.4s\n"
+ "zip1 v16.4s, v24.4s, v20.4s\n"
+ "zip2 v21.4s, v25.4s, v21.4s\n"
+ "zip2 v20.4s, v24.4s, v20.4s\n"
+ "zip1 v27.4s, v23.4s, v18.4s\n"
+ "zip1 v26.4s, v19.4s, v17.4s\n"
+ "zip2 v25.4s, v23.4s, v18.4s\n"
+ "zip2 v24.4s, v19.4s, v17.4s\n"
+ "zip1 v19.4s, v22.4s, v16.4s\n"
+ "zip1 v18.4s, v21.4s, v20.4s\n"
"zip1 v17.4s, v27.4s, v26.4s\n"
+ "zip2 v23.4s, v22.4s, v16.4s\n"
"zip1 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v22.4s, v21.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v28.4s, v20.4s\n"
+ "zip2 v22.4s, v21.4s, v20.4s\n"
+ ".inst 0x0ea16a75 // bfcvtn v21.4h, v19.4s\n"
+ ".inst 0x0ea16a54 // bfcvtn v20.4h, v18.4s\n"
".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
"zip2 v18.4s, v27.4s, v26.4s\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
"zip2 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q23, [x21, #0x0]\n"
+ ".inst 0x4ea16af5 // bfcvtn2 v21.8h, v23.4s\n"
+ ".inst 0x4ea16ad4 // bfcvtn2 v20.8h, v22.4s\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- "str q19, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "add x21, x21, #0x20\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x60]\n"
+ "str q17, [x27, #0x70]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
+ "ldr s23, [x9], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s17, [x24], #0x4\n"
+ "cmp x28, #0x1\n"
+ "ldr s21, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "zip1 v19.4s, v23.4s, v19.4s\n"
+ "zip1 v17.4s, v22.4s, v17.4s\n"
+ "zip1 v18.4s, v21.4s, v18.4s\n"
+ "zip1 v16.4s, v20.4s, v16.4s\n"
"zip1 v17.4s, v19.4s, v17.4s\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v16.4s, v17.4s, v16.4s\n"
- ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
- "ldr s17, [x22], #0x4\n"
- "ldr s16, [x20], #0x4\n"
- "zip1 v17.4s, v20.4s, v17.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
- "zip1 v16.4s, v17.4s, v16.4s\n"
+ ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d18, [x21, #0x0]\n"
- "str d16, [x21, #0x60]\n"
- "add x21, x21, #0x8\n"
+ "str d17, [x27, #0x0]\n"
+ "str d16, [x27, #0x60]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0xc0\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "csel x25, x25, %x[pad_row], GE\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
- "ldr q22, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "sub x20, x20, #0x18\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"cmp x20, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v19.4s, v22.4s, v17.4s\n"
- "zip1 v21.4s, v18.4s, v16.4s\n"
- "ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v10.4s, v22.4s, v17.4s\n"
- "zip2 v2.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v24.4s, v17.4s\n"
- "zip1 v4.4s, v20.4s, v16.4s\n"
- "ldr q23, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v29.4s, v24.4s, v17.4s\n"
- "zip2 v1.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v30.4s, v23.4s, v17.4s\n"
- "zip1 v31.4s, v18.4s, v16.4s\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
"ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v23.4s, v23.4s, v17.4s\n"
- "zip2 v28.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.4s, v24.4s, v17.4s\n"
- "zip1 v26.4s, v20.4s, v16.4s\n"
- "ldr q14, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v24.4s, v24.4s, v17.4s\n"
- "zip2 v15.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v12.4s, v14.4s, v17.4s\n"
- "zip1 v13.4s, v18.4s, v16.4s\n"
- "ldr q7, [x9], #0x10\n"
- "ldr q3, [x28], #0x10\n"
- "zip2 v0.4s, v14.4s, v17.4s\n"
- "zip2 v9.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v14.4s, v7.4s, v17.4s\n"
- "zip1 v8.4s, v3.4s, v16.4s\n"
- "zip2 v7.4s, v7.4s, v17.4s\n"
- "zip2 v11.4s, v3.4s, v16.4s\n"
- "zip1 v18.4s, v19.4s, v21.4s\n"
- "zip1 v6.4s, v10.4s, v2.4s\n"
- "zip1 v5.4s, v22.4s, v4.4s\n"
- "zip1 v16.4s, v29.4s, v1.4s\n"
- "zip1 v27.4s, v30.4s, v31.4s\n"
- "zip1 v3.4s, v23.4s, v28.4s\n"
- "zip1 v17.4s, v25.4s, v26.4s\n"
- "zip1 v20.4s, v24.4s, v15.4s\n"
- ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
- "zip2 v19.4s, v19.4s, v21.4s\n"
- "zip1 v21.4s, v12.4s, v13.4s\n"
- ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
- "zip2 v10.4s, v10.4s, v2.4s\n"
- "zip1 v2.4s, v0.4s, v9.4s\n"
- ".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
- "zip2 v4.4s, v22.4s, v4.4s\n"
- "zip1 v22.4s, v14.4s, v8.4s\n"
- ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v1.4s, v29.4s, v1.4s\n"
- "zip1 v29.4s, v7.4s, v11.4s\n"
- ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n"
- "zip2 v30.4s, v30.4s, v31.4s\n"
- ".inst 0x0ea1687f // bfcvtn v31.4h, v3.4s\n"
- "zip2 v23.4s, v23.4s, v28.4s\n"
- ".inst 0x0ea16a23 // bfcvtn v3.4h, v17.4s\n"
- "zip2 v28.4s, v25.4s, v26.4s\n"
+ "ldr q20, [x26], #0x10\n"
+ "sub x20, x20, #0x18\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
+ "cmp x20, #0x18\n"
+ "ldr q25, [x9], #0x10\n"
+ "ldr q23, [x26], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q27, [x9], #0x10\n"
+ "zip1 v21.4s, v24.4s, v19.4s\n"
+ "zip1 v6.4s, v20.4s, v17.4s\n"
+ "ldr q26, [x26], #0x10\n"
+ "ldr q22, [x25], #0x10\n"
+ "zip2 v19.4s, v24.4s, v19.4s\n"
+ "zip2 v0.4s, v20.4s, v17.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q3, [x9], #0x10\n"
+ "zip1 v17.4s, v25.4s, v18.4s\n"
+ "zip1 v4.4s, v23.4s, v16.4s\n"
+ "ldr q1, [x26], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "zip2 v5.4s, v25.4s, v18.4s\n"
+ "zip2 v16.4s, v23.4s, v16.4s\n"
+ "ldr q23, [x24], #0x10\n"
+ "ldr q30, [x9], #0x10\n"
+ "zip1 v31.4s, v27.4s, v22.4s\n"
+ "zip1 v2.4s, v26.4s, v20.4s\n"
+ "ldr q25, [x26], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip2 v29.4s, v27.4s, v22.4s\n"
+ "zip2 v28.4s, v26.4s, v20.4s\n"
+ "ldr q20, [x24], #0x10\n"
+ "ldr q27, [x9], #0x10\n"
+ "zip1 v22.4s, v3.4s, v24.4s\n"
+ "zip1 v26.4s, v1.4s, v23.4s\n"
+ "ldr q7, [x26], #0x10\n"
+ "ldr q11, [x25], #0x10\n"
+ "zip2 v24.4s, v3.4s, v24.4s\n"
+ "zip2 v14.4s, v1.4s, v23.4s\n"
+ "ldr q3, [x24], #0x10\n"
+ "zip1 v23.4s, v30.4s, v18.4s\n"
+ "zip1 v13.4s, v25.4s, v20.4s\n"
+ "zip2 v18.4s, v30.4s, v18.4s\n"
+ "zip2 v12.4s, v25.4s, v20.4s\n"
+ "zip1 v15.4s, v27.4s, v11.4s\n"
+ "zip1 v9.4s, v7.4s, v3.4s\n"
+ "zip2 v8.4s, v27.4s, v11.4s\n"
+ "zip2 v10.4s, v7.4s, v3.4s\n"
+ "zip1 v11.4s, v21.4s, v6.4s\n"
+ "zip1 v7.4s, v19.4s, v0.4s\n"
+ "zip1 v1.4s, v17.4s, v4.4s\n"
+ "zip1 v3.4s, v5.4s, v16.4s\n"
+ "zip1 v30.4s, v31.4s, v2.4s\n"
+ "zip1 v25.4s, v29.4s, v28.4s\n"
+ "zip1 v27.4s, v22.4s, v26.4s\n"
+ "zip1 v20.4s, v24.4s, v14.4s\n"
+ ".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
+ "zip2 v6.4s, v21.4s, v6.4s\n"
+ "zip1 v21.4s, v23.4s, v13.4s\n"
+ ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
+ "zip2 v0.4s, v19.4s, v0.4s\n"
+ "zip1 v19.4s, v18.4s, v12.4s\n"
+ ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
+ "zip2 v4.4s, v17.4s, v4.4s\n"
+ "zip1 v17.4s, v15.4s, v9.4s\n"
+ ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
+ "zip2 v5.4s, v5.4s, v16.4s\n"
+ "zip1 v16.4s, v8.4s, v10.4s\n"
+ ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
+ "zip2 v2.4s, v31.4s, v2.4s\n"
+ ".inst 0x0ea16b3f // bfcvtn v31.4h, v25.4s\n"
+ "zip2 v29.4s, v29.4s, v28.4s\n"
+ ".inst 0x0ea16b7c // bfcvtn v28.4h, v27.4s\n"
+ "zip2 v27.4s, v22.4s, v26.4s\n"
".inst 0x0ea16a9a // bfcvtn v26.4h, v20.4s\n"
- "zip2 v25.4s, v24.4s, v15.4s\n"
+ "zip2 v25.4s, v24.4s, v14.4s\n"
".inst 0x0ea16ab8 // bfcvtn v24.4h, v21.4s\n"
- "zip2 v12.4s, v12.4s, v13.4s\n"
- ".inst 0x0ea16855 // bfcvtn v21.4h, v2.4s\n"
- "zip2 v13.4s, v0.4s, v9.4s\n"
- ".inst 0x0ea16ac2 // bfcvtn v2.4h, v22.4s\n"
- "zip2 v0.4s, v14.4s, v8.4s\n"
- ".inst 0x0ea16ba9 // bfcvtn v9.4h, v29.4s\n"
- "zip2 v17.4s, v7.4s, v11.4s\n"
- ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
- ".inst 0x4ea16946 // bfcvtn2 v6.8h, v10.4s\n"
- "str q18, [x21, #0x0]\n"
- ".inst 0x4ea16885 // bfcvtn2 v5.8h, v4.4s\n"
- ".inst 0x4ea16830 // bfcvtn2 v16.8h, v1.4s\n"
- "str q6, [x21, #0x10]\n"
- ".inst 0x4ea16bdb // bfcvtn2 v27.8h, v30.4s\n"
- ".inst 0x4ea16aff // bfcvtn2 v31.8h, v23.4s\n"
- "str q5, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- ".inst 0x4ea16b83 // bfcvtn2 v3.8h, v28.4s\n"
+ "zip2 v22.4s, v23.4s, v13.4s\n"
+ ".inst 0x0ea16a75 // bfcvtn v21.4h, v19.4s\n"
+ "zip2 v20.4s, v18.4s, v12.4s\n"
+ ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
+ "zip2 v18.4s, v15.4s, v9.4s\n"
+ ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
+ "zip2 v16.4s, v8.4s, v10.4s\n"
+ ".inst 0x4ea168cb // bfcvtn2 v11.8h, v6.4s\n"
+ ".inst 0x4ea16807 // bfcvtn2 v7.8h, v0.4s\n"
+ ".inst 0x4ea16881 // bfcvtn2 v1.8h, v4.4s\n"
+ ".inst 0x4ea168a3 // bfcvtn2 v3.8h, v5.4s\n"
+ ".inst 0x4ea1685e // bfcvtn2 v30.8h, v2.4s\n"
+ ".inst 0x4ea16bbf // bfcvtn2 v31.8h, v29.4s\n"
+ "str q11, [x27, #0x0]\n"
+ "str q7, [x27, #0x10]\n"
+ ".inst 0x4ea16b7c // bfcvtn2 v28.8h, v27.4s\n"
".inst 0x4ea16b3a // bfcvtn2 v26.8h, v25.4s\n"
- "str q27, [x21, #0x40]\n"
- ".inst 0x4ea16998 // bfcvtn2 v24.8h, v12.4s\n"
- ".inst 0x4ea169b5 // bfcvtn2 v21.8h, v13.4s\n"
- "str q31, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- ".inst 0x4ea16802 // bfcvtn2 v2.8h, v0.4s\n"
- ".inst 0x4ea16a29 // bfcvtn2 v9.8h, v17.4s\n"
- "str q3, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q24, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q2, [x21, #0x40]\n"
- "str q9, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "str q1, [x27, #0x20]\n"
+ ".inst 0x4ea16ad8 // bfcvtn2 v24.8h, v22.4s\n"
+ ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
+ "str q3, [x27, #0x30]\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
+ "str q30, [x27, #0x40]\n"
+ "str q31, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q28, [x27, #0x0]\n"
+ "str q26, [x27, #0x10]\n"
+ "str q24, [x27, #0x20]\n"
+ "str q21, [x27, #0x30]\n"
+ "str q19, [x27, #0x40]\n"
+ "str q17, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
+ "ldr q24, [x9], #0x10\n"
+ "ldr q23, [x26], #0x10\n"
"sub x20, x20, #0xc\n"
+ "ldr q22, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
"cmp x20, #0xc\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v26.4s, v19.4s, v17.4s\n"
- "zip1 v25.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v24.4s, v19.4s, v17.4s\n"
- "zip2 v23.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v2.4s, v21.4s, v17.4s\n"
- "zip1 v22.4s, v20.4s, v16.4s\n"
+ "ldr q28, [x9], #0x10\n"
+ "ldr q27, [x26], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
"ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v1.4s, v21.4s, v17.4s\n"
- "zip2 v0.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
+ "zip1 v26.4s, v24.4s, v22.4s\n"
+ "zip1 v25.4s, v23.4s, v16.4s\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip2 v24.4s, v24.4s, v22.4s\n"
+ "zip2 v23.4s, v23.4s, v16.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip1 v2.4s, v28.4s, v21.4s\n"
+ "zip1 v22.4s, v27.4s, v20.4s\n"
+ "zip2 v1.4s, v28.4s, v21.4s\n"
+ "zip2 v0.4s, v27.4s, v20.4s\n"
"zip1 v31.4s, v19.4s, v17.4s\n"
"zip1 v30.4s, v18.4s, v16.4s\n"
"zip2 v29.4s, v19.4s, v17.4s\n"
@@ -650,66 +665,75 @@ void a64_transpose_interleave_12_2x4_fp32bf16(bfloat16 *out, const float *in, si
"zip2 v16.4s, v29.4s, v28.4s\n"
".inst 0x4ea16b5b // bfcvtn2 v27.8h, v26.4s\n"
".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- "str q27, [x21, #0x0]\n"
".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q25, [x21, #0x10]\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q23, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q17, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "str q27, [x27, #0x0]\n"
+ "str q25, [x27, #0x10]\n"
+ "str q23, [x27, #0x20]\n"
+ "str q21, [x27, #0x30]\n"
+ "str q19, [x27, #0x40]\n"
+ "str q17, [x27, #0x50]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
- "ldr q20, [x9], #0x10\n"
- "ldr q19, [x28], #0x10\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
+ "ldr q21, [x9], #0x10\n"
+ "ldr q20, [x26], #0x10\n"
"sub x20, x20, #0x4\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v20.4s, v17.4s\n"
- "zip1 v18.4s, v19.4s, v16.4s\n"
- "zip2 v21.4s, v20.4s, v17.4s\n"
- "zip2 v20.4s, v19.4s, v16.4s\n"
- "zip1 v17.4s, v22.4s, v18.4s\n"
+ "zip1 v18.4s, v21.4s, v19.4s\n"
+ "zip1 v16.4s, v20.4s, v17.4s\n"
+ "zip2 v21.4s, v21.4s, v19.4s\n"
+ "zip2 v20.4s, v20.4s, v17.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "zip2 v19.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
- ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q19, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ ".inst 0x0ea16a32 // bfcvtn v18.4h, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
+ "ldr s18, [x26], #0x4\n"
"sub x20, x20, #0x1\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s16, [x24], #0x4\n"
"cmp x20, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
"zip1 v17.4s, v19.4s, v17.4s\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp
index 4d9d5e7f43..ee37b4378f 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_s8s16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,222 +34,236 @@ void a64_transpose_interleave_12_s8s16(int16_t *out, const int8_t *in, size_t wi
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x18\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
"ldr q3, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
- "sshll2 v20.8h, v3.16b, #0x0\n"
- "sshll v2.8h, v21.8b, #0x0\n"
- "ldr q1, [x22], #0x10\n"
- "ldr q19, [x20], #0x10\n"
- "sshll2 v18.8h, v1.16b, #0x0\n"
- "sshll v0.8h, v19.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "sshll v31.8h, v17.8b, #0x0\n"
- "sshll v30.8h, v16.8b, #0x0\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "sshll2 v27.8h, v21.16b, #0x0\n"
- "sshll2 v26.8h, v19.16b, #0x0\n"
- "dup v25.2d, v20.d[0]\n"
- "dup v24.2d, v2.d[1]\n"
+ "ldr q21, [x22], #0x10\n"
"sub x24, x24, #0x18\n"
+ "ldr q2, [x21], #0x10\n"
+ "ldr q20, [x20], #0x10\n"
"cmp x24, #0x18\n"
- "dup v23.2d, v18.d[0]\n"
- "dup v22.2d, v0.d[1]\n"
- "dup v21.2d, v20.d[1]\n"
- "dup v20.2d, v31.d[1]\n"
- "dup v19.2d, v18.d[1]\n"
- "dup v18.2d, v30.d[1]\n"
- "sshll v17.8h, v3.8b, #0x0\n"
- "sshll v16.8h, v1.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "mov v25.d[1], v2.d[0]\n"
- "mov v24.d[1], v27.d[0]\n"
- "str q25, [x21, #0x10]\n"
- "mov v23.d[1], v0.d[0]\n"
- "mov v22.d[1], v26.d[0]\n"
- "str q24, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "sshll v17.8h, v29.8b, #0x0\n"
- "sshll v16.8h, v28.8b, #0x0\n"
- "str q23, [x21, #0x40]\n"
- "mov v21.d[1], v31.d[0]\n"
- "mov v20.d[1], v27.d[1]\n"
- "str q22, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "mov v19.d[1], v30.d[0]\n"
- "mov v18.d[1], v26.d[1]\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "sshll2 v18.8h, v3.16b, #0x0\n"
+ "sshll v0.8h, v21.8b, #0x0\n"
+ "ldr d31, [x20], #0x8\n"
+ "sshll2 v16.8h, v2.16b, #0x0\n"
+ "sshll v30.8h, v20.8b, #0x0\n"
+ "sshll v29.8h, v19.8b, #0x0\n"
+ "sshll v28.8h, v17.8b, #0x0\n"
+ "sshll2 v27.8h, v21.16b, #0x0\n"
+ "sshll2 v26.8h, v20.16b, #0x0\n"
+ "dup v17.2d, v18.d[0]\n"
+ "dup v25.2d, v0.d[1]\n"
+ "dup v24.2d, v16.d[0]\n"
+ "dup v23.2d, v30.d[1]\n"
+ "dup v22.2d, v18.d[1]\n"
+ "dup v21.2d, v29.d[1]\n"
+ "dup v20.2d, v16.d[1]\n"
+ "dup v19.2d, v28.d[1]\n"
+ "sshll v16.8h, v3.8b, #0x0\n"
+ "sshll v18.8h, v2.8b, #0x0\n"
+ "mov v17.d[1], v0.d[0]\n"
+ "mov v25.d[1], v27.d[0]\n"
+ "mov v24.d[1], v30.d[0]\n"
+ "mov v23.d[1], v26.d[0]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "sshll v17.8h, v1.8b, #0x0\n"
+ "sshll v16.8h, v31.8b, #0x0\n"
+ "str q25, [x23, #0x20]\n"
+ "mov v22.d[1], v29.d[0]\n"
+ "mov v21.d[1], v27.d[1]\n"
+ "str q18, [x23, #0x30]\n"
+ "mov v20.d[1], v28.d[0]\n"
+ "mov v19.d[1], v26.d[1]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q23, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q22, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q20, [x23, #0x30]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cmp x24, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
"ldr d18, [x20], #0x8\n"
"sub x24, x24, #0xc\n"
+ "ld1 { v19.s }[2], [x22], #0x4\n"
+ "ldr d17, [x25], #0x8\n"
"cmp x24, #0xc\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
"ld1 { v18.s }[2], [x20], #0x4\n"
+ "ldr d16, [x21], #0x8\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v16.s }[2], [x21], #0x4\n"
"sshll v25.8h, v19.8b, #0x0\n"
- "sshll v24.8h, v18.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "sshll2 v23.8h, v19.16b, #0x0\n"
+ "sshll2 v24.8h, v19.16b, #0x0\n"
+ "sshll v23.8h, v18.8b, #0x0\n"
"sshll2 v22.8h, v18.16b, #0x0\n"
- "ld1 { v17.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x22], #0x4\n"
"sshll2 v21.8h, v17.16b, #0x0\n"
"sshll2 v20.8h, v16.16b, #0x0\n"
"dup v19.2d, v25.d[1]\n"
- "dup v18.2d, v24.d[1]\n"
- "sshll v17.8h, v17.8b, #0x0\n"
+ "sshll v18.8h, v17.8b, #0x0\n"
+ "dup v17.2d, v23.d[1]\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
"mov v21.d[1], v25.d[0]\n"
- "mov v19.d[1], v23.d[0]\n"
- "str q21, [x21, #0x10]\n"
- "mov v20.d[1], v24.d[0]\n"
- "mov v18.d[1], v22.d[0]\n"
- "str q19, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "str q20, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "mov v19.d[1], v24.d[0]\n"
+ "mov v20.d[1], v23.d[0]\n"
+ "mov v17.d[1], v22.d[0]\n"
+ "str q18, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q19, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q20, [x23, #0x40]\n"
+ "str q17, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x4\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
+ "cmp x24, #0x4\n"
"sshll v19.8h, v19.8b, #0x0\n"
"sshll v18.8h, v18.8b, #0x0\n"
"sshll v17.8h, v17.8b, #0x0\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x18]\n"
- "str d17, [x21, #0x30]\n"
- "str d16, [x21, #0x48]\n"
- "add x21, x21, #0x8\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x18]\n"
+ "str d17, [x23, #0x30]\n"
+ "str d16, [x23, #0x48]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr b19, [x25], #0x1\n"
- "ldr b18, [x23], #0x1\n"
+ "ldr b18, [x22], #0x1\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x22], #0x1\n"
+ "ldr b17, [x21], #0x1\n"
"ldr b16, [x20], #0x1\n"
+ "cmp x24, #0x1\n"
"sshll v19.8h, v19.8b, #0x0\n"
"sshll v18.8h, v18.8b, #0x0\n"
"sshll v17.8h, v17.8b, #0x0\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x18]\n"
- "str h17, [x21, #0x30]\n"
- "str h16, [x21, #0x48]\n"
- "add x21, x21, #0x2\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x18]\n"
+ "str h17, [x23, #0x30]\n"
+ "str h16, [x23, #0x48]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x60\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x18\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
- "ldr q20, [x25], #0x10\n"
- "ldr d16, [x25], #0x8\n"
- "sshll2 v19.8h, v20.16b, #0x0\n"
- "sshll v18.8h, v16.8b, #0x0\n"
- "dup v17.2d, v19.d[1]\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
+ "ldr q18, [x25], #0x10\n"
"sub x20, x20, #0x18\n"
- "sshll v16.8h, v20.8b, #0x0\n"
- "str q16, [x21, #0x0]\n"
- "dup v16.2d, v19.d[0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d16, [x25], #0x8\n"
"cmp x20, #0x18\n"
- "mov v17.d[1], v18.d[0]\n"
- "dup v16.2d, v18.d[1]\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "sshll2 v17.8h, v18.16b, #0x0\n"
+ "sshll v16.8h, v16.8b, #0x0\n"
+ "sshll v19.8h, v18.8b, #0x0\n"
+ "dup v18.2d, v17.d[1]\n"
+ "dup v17.2d, v17.d[0]\n"
+ "mov v18.d[1], v16.d[0]\n"
+ "dup v16.2d, v16.d[1]\n"
+ "str q19, [x23, #0x0]\n"
+ "str d17, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
"ldr d16, [x25], #0x8\n"
- "ld1 { v16.s }[2], [x25], #0x4\n"
"sub x20, x20, #0xc\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
"cmp x20, #0xc\n"
"sshll v17.8h, v16.8b, #0x0\n"
"sshll2 v16.8h, v16.16b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "str q17, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr s16, [x25], #0x4\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr b16, [x25], #0x1\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
"sshll v16.8h, v16.8b, #0x0\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x18\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp
index b0cd7e4ef7..3bfa4a0e42 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_12_u8u16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,222 +34,236 @@ void a64_transpose_interleave_12_u8u16(uint16_t *out, const uint8_t *in, size_t
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x18\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
"ldr q3, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
- "ushll2 v20.8h, v3.16b, #0x0\n"
- "ushll v2.8h, v21.8b, #0x0\n"
- "ldr q1, [x22], #0x10\n"
- "ldr q19, [x20], #0x10\n"
- "ushll2 v18.8h, v1.16b, #0x0\n"
- "ushll v0.8h, v19.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "ushll v31.8h, v17.8b, #0x0\n"
- "ushll v30.8h, v16.8b, #0x0\n"
- "ldr d29, [x23], #0x8\n"
- "ldr d28, [x20], #0x8\n"
- "ushll2 v27.8h, v21.16b, #0x0\n"
- "ushll2 v26.8h, v19.16b, #0x0\n"
- "dup v25.2d, v20.d[0]\n"
- "dup v24.2d, v2.d[1]\n"
+ "ldr q21, [x22], #0x10\n"
"sub x24, x24, #0x18\n"
+ "ldr q2, [x21], #0x10\n"
+ "ldr q20, [x20], #0x10\n"
"cmp x24, #0x18\n"
- "dup v23.2d, v18.d[0]\n"
- "dup v22.2d, v0.d[1]\n"
- "dup v21.2d, v20.d[1]\n"
- "dup v20.2d, v31.d[1]\n"
- "dup v19.2d, v18.d[1]\n"
- "dup v18.2d, v30.d[1]\n"
- "ushll v17.8h, v3.8b, #0x0\n"
- "ushll v16.8h, v1.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "mov v25.d[1], v2.d[0]\n"
- "mov v24.d[1], v27.d[0]\n"
- "str q25, [x21, #0x10]\n"
- "mov v23.d[1], v0.d[0]\n"
- "mov v22.d[1], v26.d[0]\n"
- "str q24, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "ushll v17.8h, v29.8b, #0x0\n"
- "ushll v16.8h, v28.8b, #0x0\n"
- "str q23, [x21, #0x40]\n"
- "mov v21.d[1], v31.d[0]\n"
- "mov v20.d[1], v27.d[1]\n"
- "str q22, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "mov v19.d[1], v30.d[0]\n"
- "mov v18.d[1], v26.d[1]\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d19, [x25], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
+ "ldr d1, [x22], #0x8\n"
+ "ushll2 v18.8h, v3.16b, #0x0\n"
+ "ushll v0.8h, v21.8b, #0x0\n"
+ "ldr d31, [x20], #0x8\n"
+ "ushll2 v16.8h, v2.16b, #0x0\n"
+ "ushll v30.8h, v20.8b, #0x0\n"
+ "ushll v29.8h, v19.8b, #0x0\n"
+ "ushll v28.8h, v17.8b, #0x0\n"
+ "ushll2 v27.8h, v21.16b, #0x0\n"
+ "ushll2 v26.8h, v20.16b, #0x0\n"
+ "dup v17.2d, v18.d[0]\n"
+ "dup v25.2d, v0.d[1]\n"
+ "dup v24.2d, v16.d[0]\n"
+ "dup v23.2d, v30.d[1]\n"
+ "dup v22.2d, v18.d[1]\n"
+ "dup v21.2d, v29.d[1]\n"
+ "dup v20.2d, v16.d[1]\n"
+ "dup v19.2d, v28.d[1]\n"
+ "ushll v16.8h, v3.8b, #0x0\n"
+ "ushll v18.8h, v2.8b, #0x0\n"
+ "mov v17.d[1], v0.d[0]\n"
+ "mov v25.d[1], v27.d[0]\n"
+ "mov v24.d[1], v30.d[0]\n"
+ "mov v23.d[1], v26.d[0]\n"
+ "str q16, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "ushll v17.8h, v1.8b, #0x0\n"
+ "ushll v16.8h, v31.8b, #0x0\n"
+ "str q25, [x23, #0x20]\n"
+ "mov v22.d[1], v29.d[0]\n"
+ "mov v21.d[1], v27.d[1]\n"
+ "str q18, [x23, #0x30]\n"
+ "mov v20.d[1], v28.d[0]\n"
+ "mov v19.d[1], v26.d[1]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q23, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q22, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q20, [x23, #0x30]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cmp x24, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr d19, [x23], #0x8\n"
+ "ldr d19, [x22], #0x8\n"
"ldr d18, [x20], #0x8\n"
"sub x24, x24, #0xc\n"
+ "ld1 { v19.s }[2], [x22], #0x4\n"
+ "ldr d17, [x25], #0x8\n"
"cmp x24, #0xc\n"
- "ld1 { v19.s }[2], [x23], #0x4\n"
"ld1 { v18.s }[2], [x20], #0x4\n"
+ "ldr d16, [x21], #0x8\n"
+ "ld1 { v17.s }[2], [x25], #0x4\n"
+ "ld1 { v16.s }[2], [x21], #0x4\n"
"ushll v25.8h, v19.8b, #0x0\n"
- "ushll v24.8h, v18.8b, #0x0\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x22], #0x8\n"
- "ushll2 v23.8h, v19.16b, #0x0\n"
+ "ushll2 v24.8h, v19.16b, #0x0\n"
+ "ushll v23.8h, v18.8b, #0x0\n"
"ushll2 v22.8h, v18.16b, #0x0\n"
- "ld1 { v17.s }[2], [x25], #0x4\n"
- "ld1 { v16.s }[2], [x22], #0x4\n"
"ushll2 v21.8h, v17.16b, #0x0\n"
"ushll2 v20.8h, v16.16b, #0x0\n"
"dup v19.2d, v25.d[1]\n"
- "dup v18.2d, v24.d[1]\n"
- "ushll v17.8h, v17.8b, #0x0\n"
+ "ushll v18.8h, v17.8b, #0x0\n"
+ "dup v17.2d, v23.d[1]\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str q17, [x21, #0x0]\n"
"mov v21.d[1], v25.d[0]\n"
- "mov v19.d[1], v23.d[0]\n"
- "str q21, [x21, #0x10]\n"
- "mov v20.d[1], v24.d[0]\n"
- "mov v18.d[1], v22.d[0]\n"
- "str q19, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "str q20, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "mov v19.d[1], v24.d[0]\n"
+ "mov v20.d[1], v23.d[0]\n"
+ "mov v17.d[1], v22.d[0]\n"
+ "str q18, [x23, #0x0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q19, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q20, [x23, #0x40]\n"
+ "str q17, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x4\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
+ "cmp x24, #0x4\n"
"ushll v19.8h, v19.8b, #0x0\n"
"ushll v18.8h, v18.8b, #0x0\n"
"ushll v17.8h, v17.8b, #0x0\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x18]\n"
- "str d17, [x21, #0x30]\n"
- "str d16, [x21, #0x48]\n"
- "add x21, x21, #0x8\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x18]\n"
+ "str d17, [x23, #0x30]\n"
+ "str d16, [x23, #0x48]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr b19, [x25], #0x1\n"
- "ldr b18, [x23], #0x1\n"
+ "ldr b18, [x22], #0x1\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x22], #0x1\n"
+ "ldr b17, [x21], #0x1\n"
"ldr b16, [x20], #0x1\n"
+ "cmp x24, #0x1\n"
"ushll v19.8h, v19.8b, #0x0\n"
"ushll v18.8h, v18.8b, #0x0\n"
"ushll v17.8h, v17.8b, #0x0\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x18]\n"
- "str h17, [x21, #0x30]\n"
- "str h16, [x21, #0x48]\n"
- "add x21, x21, #0x2\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x18]\n"
+ "str h17, [x23, #0x30]\n"
+ "str h16, [x23, #0x48]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x60\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x18\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
- "ldr q20, [x25], #0x10\n"
- "ldr d16, [x25], #0x8\n"
- "ushll2 v19.8h, v20.16b, #0x0\n"
- "ushll v18.8h, v16.8b, #0x0\n"
- "dup v17.2d, v19.d[1]\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
+ "ldr q18, [x25], #0x10\n"
"sub x20, x20, #0x18\n"
- "ushll v16.8h, v20.8b, #0x0\n"
- "str q16, [x21, #0x0]\n"
- "dup v16.2d, v19.d[0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d16, [x25], #0x8\n"
"cmp x20, #0x18\n"
- "mov v17.d[1], v18.d[0]\n"
- "dup v16.2d, v18.d[1]\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "ushll2 v17.8h, v18.16b, #0x0\n"
+ "ushll v16.8h, v16.8b, #0x0\n"
+ "ushll v19.8h, v18.8b, #0x0\n"
+ "dup v18.2d, v17.d[1]\n"
+ "dup v17.2d, v17.d[0]\n"
+ "mov v18.d[1], v16.d[0]\n"
+ "dup v16.2d, v16.d[1]\n"
+ "str q19, [x23, #0x0]\n"
+ "str d17, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
"ldr d16, [x25], #0x8\n"
- "ld1 { v16.s }[2], [x25], #0x4\n"
"sub x20, x20, #0xc\n"
+ "ld1 { v16.s }[2], [x25], #0x4\n"
"cmp x20, #0xc\n"
"ushll v17.8h, v16.8b, #0x0\n"
"ushll2 v16.8h, v16.16b, #0x0\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "str q17, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr s16, [x25], #0x4\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr b16, [x25], #0x1\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
"ushll v16.8h, v16.8b, #0x0\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x18\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp
index 0399f8becc..e798793759 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -10,16 +10,16 @@
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#pragma once
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp
index f3a1dde73f..7c79c5f7f0 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,14 +40,16 @@ void a64_transpose_interleave_16_1x4(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"cmp %x[height], #0x10\n"
- "blt 8f\n"
+ "blt 9f\n"
"1:" // Main row loop: Head
"mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[width]\n"
+ "mov x15, %x[out]\n"
+ "sub %x[height], %x[height], #0x10\n"
+ "add x14, x17, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
+ "cmp x16, #0x10\n"
"add x11, x12, %x[in_stride]\n"
"add x10, x11, %x[in_stride]\n"
"add x9, x10, %x[in_stride]\n"
@@ -55,244 +57,268 @@ void a64_transpose_interleave_16_1x4(uint8_t *out, const uint8_t *in, size_t wid
"add x27, x28, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q21, [x17], #0x10\n"
- "ldr q20, [x16], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v3.16b, v21.16b, v17.16b\n"
- "zip1 v2.16b, v20.16b, v16.16b\n"
- "ldr q19, [x13], #0x10\n"
- "ldr q18, [x12], #0x10\n"
- "zip2 v1.16b, v21.16b, v17.16b\n"
- "zip2 v0.16b, v20.16b, v16.16b\n"
- "ldr q17, [x11], #0x10\n"
- "ldr q16, [x10], #0x10\n"
+ "ldr q19, [x17], #0x10\n"
+ "ldr q18, [x14], #0x10\n"
+ "sub x16, x16, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "ldr q16, [x12], #0x10\n"
+ "cmp x16, #0x10\n"
+ "ldr q24, [x11], #0x10\n"
+ "ldr q23, [x10], #0x10\n"
+ "ldr q22, [x9], #0x10\n"
+ "ldr q21, [x28], #0x10\n"
+ "ldr q30, [x27], #0x10\n"
+ "ldr q29, [x26], #0x10\n"
+ "zip1 v3.16b, v19.16b, v17.16b\n"
+ "zip1 v2.16b, v18.16b, v16.16b\n"
+ "ldr q28, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v1.16b, v19.16b, v17.16b\n"
+ "zip2 v27.16b, v18.16b, v16.16b\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v26.16b, v24.16b, v22.16b\n"
+ "zip1 v25.16b, v23.16b, v21.16b\n"
+ "ldr q17, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip2 v24.16b, v24.16b, v22.16b\n"
+ "zip2 v23.16b, v23.16b, v21.16b\n"
+ "zip1 v22.16b, v30.16b, v28.16b\n"
+ "zip1 v21.16b, v29.16b, v20.16b\n"
+ "zip2 v0.16b, v30.16b, v28.16b\n"
+ "zip2 v20.16b, v29.16b, v20.16b\n"
"zip1 v31.16b, v19.16b, v17.16b\n"
"zip1 v30.16b, v18.16b, v16.16b\n"
- "ldr q25, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v24.16b, v19.16b, v17.16b\n"
- "zip2 v23.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.16b, v25.16b, v17.16b\n"
- "zip1 v21.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v29.16b, v25.16b, v17.16b\n"
- "zip2 v20.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v28.16b, v19.16b, v17.16b\n"
- "zip1 v27.16b, v18.16b, v16.16b\n"
- "zip2 v26.16b, v19.16b, v17.16b\n"
- "zip2 v25.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v3.16b, v2.16b\n"
- "zip2 v17.16b, v3.16b, v2.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v1.16b, v0.16b\n"
- "zip2 v19.16b, v1.16b, v0.16b\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v18.16b, v31.16b, v30.16b\n"
- "zip2 v17.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v16.16b, v24.16b, v23.16b\n"
+ "zip2 v29.16b, v19.16b, v17.16b\n"
+ "zip2 v28.16b, v18.16b, v16.16b\n"
+ "zip1 v19.16b, v3.16b, v2.16b\n"
+ "zip2 v18.16b, v3.16b, v2.16b\n"
+ "zip1 v17.16b, v1.16b, v27.16b\n"
+ "zip2 v16.16b, v1.16b, v27.16b\n"
+ "zip1 v27.16b, v26.16b, v25.16b\n"
+ "zip2 v26.16b, v26.16b, v25.16b\n"
+ "zip1 v25.16b, v24.16b, v23.16b\n"
"zip2 v24.16b, v24.16b, v23.16b\n"
- "str q19, [x21, #0x30]\n"
+ "str q19, [x15, #0x0]\n"
"zip1 v23.16b, v22.16b, v21.16b\n"
"zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x21, #0x40]\n"
- "zip1 v21.16b, v29.16b, v20.16b\n"
- "zip2 v20.16b, v29.16b, v20.16b\n"
- "str q17, [x21, #0x50]\n"
- "zip1 v19.16b, v28.16b, v27.16b\n"
- "zip2 v18.16b, v28.16b, v27.16b\n"
- "str q16, [x21, #0x60]\n"
- "zip1 v17.16b, v26.16b, v25.16b\n"
- "zip2 v16.16b, v26.16b, v25.16b\n"
- "str q24, [x21, #0x70]\n"
- "str q23, [x21, #0x80]\n"
- "str q22, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q20, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q18, [x15, #0x10]\n"
+ "zip1 v21.16b, v0.16b, v20.16b\n"
+ "zip2 v20.16b, v0.16b, v20.16b\n"
+ "str q17, [x15, #0x20]\n"
+ "zip1 v19.16b, v31.16b, v30.16b\n"
+ "zip2 v18.16b, v31.16b, v30.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "zip1 v17.16b, v29.16b, v28.16b\n"
+ "zip2 v16.16b, v29.16b, v28.16b\n"
+ "str q27, [x15, #0x40]\n"
+ "str q26, [x15, #0x50]\n"
+ "str q25, [x15, #0x60]\n"
+ "str q24, [x15, #0x70]\n"
+ "str q23, [x15, #0x80]\n"
+ "str q22, [x15, #0x90]\n"
+ "str q21, [x15, #0xa0]\n"
+ "str q20, [x15, #0xb0]\n"
+ "str q19, [x15, #0xc0]\n"
+ "str q18, [x15, #0xd0]\n"
+ "str q17, [x15, #0xe0]\n"
+ "str q16, [x15, #0xf0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x16, 8f\n"
+ "cmp x16, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x15, #0x0]\n"
+ "str q16, [x15, #0x10]\n"
+ "str q16, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "str q16, [x15, #0x40]\n"
+ "str q16, [x15, #0x50]\n"
+ "str q16, [x15, #0x60]\n"
+ "str q16, [x15, #0x70]\n"
+ "str q16, [x15, #0x80]\n"
+ "str q16, [x15, #0x90]\n"
+ "str q16, [x15, #0xa0]\n"
+ "str q16, [x15, #0xb0]\n"
+ "str q16, [x15, #0xc0]\n"
+ "str q16, [x15, #0xd0]\n"
+ "str q16, [x15, #0xe0]\n"
+ "str q16, [x15, #0xf0]\n"
"blt 5f\n"
"4:" // Main row loop: width 4 loop: loop
- "ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr s17, [x11], #0x4\n"
- "ldr s16, [x10], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
+ "ldr s23, [x17], #0x4\n"
+ "ldr s21, [x14], #0x4\n"
+ "sub x16, x16, #0x4\n"
+ "ldr s20, [x13], #0x4\n"
+ "ldr s19, [x12], #0x4\n"
+ "cmp x16, #0x4\n"
+ "ldr s22, [x11], #0x4\n"
+ "ldr s18, [x10], #0x4\n"
+ "ldr s17, [x9], #0x4\n"
+ "ldr s16, [x28], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s26, [x26], #0x4\n"
+ "zip1 v25.16b, v23.16b, v20.16b\n"
+ "zip1 v21.16b, v21.16b, v19.16b\n"
"ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s19, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "zip1 v22.16b, v22.16b, v17.16b\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "ldr s18, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
- "zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "str q22, [x21, #0x0]\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str q21, [x21, #0x40]\n"
- "str q18, [x21, #0x80]\n"
- "str q16, [x21, #0xc0]\n"
- "add x21, x21, #0x10\n"
+ "zip1 v21.16b, v25.16b, v21.16b\n"
+ "zip1 v20.16b, v27.16b, v20.16b\n"
+ "zip1 v19.16b, v26.16b, v19.16b\n"
+ "zip1 v17.16b, v22.16b, v17.16b\n"
+ "zip1 v18.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v23.16b, v16.16b\n"
+ "str q21, [x15, #0x0]\n"
+ "str q17, [x15, #0x40]\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v16.16b\n"
+ "str q17, [x15, #0x80]\n"
+ "str q16, [x15, #0xc0]\n"
+ "add x15, x15, #0x10\n"
"bge 4b\n"
"5:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x16, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr b17, [x11], #0x1\n"
- "ldr b16, [x10], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
+ "ldr b23, [x17], #0x1\n"
+ "ldr b21, [x14], #0x1\n"
+ "sub x16, x16, #0x1\n"
+ "ldr b20, [x13], #0x1\n"
+ "ldr b19, [x12], #0x1\n"
+ "cmp x16, #0x1\n"
+ "ldr b22, [x11], #0x1\n"
+ "ldr b18, [x10], #0x1\n"
+ "ldr b17, [x9], #0x1\n"
+ "ldr b16, [x28], #0x1\n"
+ "ldr b27, [x27], #0x1\n"
+ "ldr b26, [x26], #0x1\n"
+ "zip1 v25.16b, v23.16b, v20.16b\n"
+ "zip1 v21.16b, v21.16b, v19.16b\n"
"ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
+ "ldr b19, [x24], #0x1\n"
+ "ldr b24, [x23], #0x1\n"
+ "ldr b23, [x22], #0x1\n"
+ "zip1 v22.16b, v22.16b, v17.16b\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "ldr b18, [x21], #0x1\n"
"ldr b16, [x20], #0x1\n"
- "zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "str s22, [x21, #0x0]\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str s21, [x21, #0x40]\n"
- "str s18, [x21, #0x80]\n"
- "str s16, [x21, #0xc0]\n"
- "add x21, x21, #0x4\n"
+ "zip1 v21.16b, v25.16b, v21.16b\n"
+ "zip1 v20.16b, v27.16b, v20.16b\n"
+ "zip1 v19.16b, v26.16b, v19.16b\n"
+ "zip1 v17.16b, v22.16b, v17.16b\n"
+ "zip1 v18.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v23.16b, v16.16b\n"
+ "str s21, [x15, #0x0]\n"
+ "str s17, [x15, #0x40]\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v16.16b\n"
+ "str s17, [x15, #0x80]\n"
+ "str s16, [x15, #0xc0]\n"
+ "add x15, x15, #0x4\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
+ "8:" // Main row loop: odd col skip
"cmp %x[height], #0x10\n"
"add %x[out], %x[out], #0x100\n"
"bge 1b\n"
- "cbz %x[height], 16f\n"
- "8:" // Main loop skip
- "9:" // Tail row loop: Head
+ "cbz %x[height], 18f\n"
+ "9:" // Main loop skip
+ "10:" // Tail row loop: Head
"mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x14, x15, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x14, %x[in_stride]\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
+ "mov x15, %x[out]\n"
+ "add x14, x17, %x[in_stride]\n"
+ "add x13, x14, %x[in_stride]\n"
+ "add x12, x13, %x[in_stride]\n"
+ "csel x13, x13, %x[pad_row], GE\n"
+ "add %x[in], x12, %x[in_stride]\n"
+ "csel x12, x12, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 11f\n"
- "10:" // Tail row loop: Column loop
+ "csel x14, x14, %x[pad_row], GT\n"
+ "cmp x20, #0x10\n"
+ "blt 12f\n"
+ "11:" // Tail row loop: Column loop
"ldr q20, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
+ "ldr q21, [x14], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q19, [x13], #0x10\n"
+ "ldr q16, [x12], #0x10\n"
"cmp x20, #0x10\n"
- "ldr q19, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
"zip1 v18.16b, v20.16b, v19.16b\n"
"zip1 v17.16b, v21.16b, v16.16b\n"
"zip2 v20.16b, v20.16b, v19.16b\n"
- "zip2 v19.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v18.16b, v17.16b\n"
+ "zip2 v16.16b, v21.16b, v16.16b\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
"zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v20.16b, v19.16b\n"
- "zip2 v16.16b, v20.16b, v19.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 10b\n"
- "11:" // Tail row loop: Column loop skip
+ "zip1 v17.16b, v20.16b, v16.16b\n"
+ "zip2 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [x15, #0x0]\n"
+ "str q18, [x15, #0x10]\n"
+ "str q17, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "bge 11b\n"
+ "12:" // Tail row loop: Column loop skip
+ "cbz x20, 17f\n"
"cmp x20, #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: width 4 loop: loop
+ "movi v16.16b, #0x0\n"
+ "str q16, [x15, #0x0]\n"
+ "str q16, [x15, #0x10]\n"
+ "str q16, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: width 4 loop: loop
"ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
+ "ldr s18, [x14], #0x4\n"
"sub x20, x20, #0x4\n"
+ "ldr s17, [x13], #0x4\n"
+ "ldr s16, [x12], #0x4\n"
"cmp x20, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 12b\n"
- "13:" // Tail row loop: width 4 loop: skip
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 1 loop: loop
+ "blt 16f\n"
+ "15:" // Tail row loop: width 1 loop: loop
"ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
+ "ldr b18, [x14], #0x1\n"
"sub x20, x20, #0x1\n"
+ "ldr b17, [x13], #0x1\n"
+ "ldr b16, [x12], #0x1\n"
"cmp x20, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 1 loop: skip
+ "str s16, [x15, #0x0]\n"
+ "add x15, x15, #0x4\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 1 loop: skip
+ "17:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
- "bge 9b\n"
- "16:" // Done
+ "bge 10b\n"
+ "18:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp
index 7c7e91e666..586696fcc5 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,221 +41,232 @@ void a64_transpose_interleave_16_1x8(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "cmp %x[height], #0x7\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "cmp %x[height], #0x7\n"
- "add %x[in], x22, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GE\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "cmp %x[height], #0x5\n"
"csel x22, x22, %x[pad_row], GT\n"
"csel x23, x23, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x21, %x[width]\n"
+ "cmp %x[height], #0x3\n"
"csel x24, x24, %x[pad_row], GT\n"
"csel x25, x25, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x21, #0x20\n"
- "mov x20, %x[out]\n"
"sub %x[height], %x[height], #0x8\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x28, #0x20\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q23, [x9], #0x10\n"
- "ldr q22, [x28], #0x10\n"
- "sub x21, x21, #0x20\n"
- "cmp x21, #0x20\n"
- "ldr q20, [x27], #0x10\n"
- "ldr q21, [x26], #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v5.16b, v23.16b, v19.16b\n"
- "zip1 v4.16b, v22.16b, v18.16b\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "zip1 v3.16b, v20.16b, v17.16b\n"
- "zip1 v31.16b, v21.16b, v16.16b\n"
- "ldr q25, [x9], #0x10\n"
- "ldr q24, [x28], #0x10\n"
- "zip2 v2.16b, v23.16b, v19.16b\n"
- "zip2 v30.16b, v20.16b, v17.16b\n"
- "ldr q23, [x27], #0x10\n"
- "ldr q20, [x26], #0x10\n"
- "zip2 v22.16b, v22.16b, v18.16b\n"
- "zip2 v21.16b, v21.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x24], #0x10\n"
- "zip1 v29.16b, v25.16b, v19.16b\n"
- "zip1 v28.16b, v24.16b, v18.16b\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "zip1 v27.16b, v23.16b, v17.16b\n"
- "zip1 v26.16b, v20.16b, v16.16b\n"
- "zip2 v1.16b, v25.16b, v19.16b\n"
- "zip2 v25.16b, v23.16b, v17.16b\n"
- "zip2 v24.16b, v24.16b, v18.16b\n"
- "zip2 v16.16b, v20.16b, v16.16b\n"
- "zip1 v0.16b, v5.16b, v3.16b\n"
- "zip1 v17.16b, v4.16b, v31.16b\n"
- "zip2 v20.16b, v5.16b, v3.16b\n"
- "zip2 v19.16b, v4.16b, v31.16b\n"
- "zip1 v31.16b, v2.16b, v30.16b\n"
- "zip1 v18.16b, v22.16b, v21.16b\n"
- "zip2 v30.16b, v2.16b, v30.16b\n"
+ "ldr q24, [x9], #0x10\n"
+ "ldr q0, [x26], #0x10\n"
+ "sub x28, x28, #0x20\n"
+ "ldr q31, [x25], #0x10\n"
+ "ldr q30, [x24], #0x10\n"
+ "cmp x28, #0x20\n"
+ "ldr q23, [x23], #0x10\n"
+ "ldr q29, [x22], #0x10\n"
+ "ldr q22, [x21], #0x10\n"
+ "ldr q21, [x20], #0x10\n"
+ "ldr q28, [x9], #0x10\n"
+ "ldr q4, [x26], #0x10\n"
+ "ldr q27, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip1 v3.16b, v24.16b, v23.16b\n"
+ "zip1 v2.16b, v0.16b, v29.16b\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v26.16b, v31.16b, v22.16b\n"
+ "zip1 v25.16b, v30.16b, v21.16b\n"
+ "ldr q17, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "zip2 v23.16b, v31.16b, v22.16b\n"
+ "zip2 v22.16b, v0.16b, v29.16b\n"
+ "zip2 v21.16b, v30.16b, v21.16b\n"
+ "zip1 v0.16b, v28.16b, v19.16b\n"
+ "zip1 v31.16b, v4.16b, v18.16b\n"
+ "zip1 v30.16b, v27.16b, v17.16b\n"
+ "zip1 v29.16b, v20.16b, v16.16b\n"
+ "zip2 v1.16b, v28.16b, v19.16b\n"
+ "zip2 v28.16b, v27.16b, v17.16b\n"
+ "zip2 v27.16b, v4.16b, v18.16b\n"
+ "zip2 v20.16b, v20.16b, v16.16b\n"
+ "zip1 v19.16b, v3.16b, v26.16b\n"
+ "zip1 v18.16b, v2.16b, v25.16b\n"
+ "zip2 v17.16b, v3.16b, v26.16b\n"
+ "zip2 v16.16b, v2.16b, v25.16b\n"
+ "zip1 v26.16b, v24.16b, v23.16b\n"
+ "zip1 v25.16b, v22.16b, v21.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
"zip2 v23.16b, v22.16b, v21.16b\n"
- "zip1 v22.16b, v29.16b, v27.16b\n"
- "zip1 v21.16b, v28.16b, v26.16b\n"
- "zip2 v29.16b, v29.16b, v27.16b\n"
- "zip2 v28.16b, v28.16b, v26.16b\n"
- "zip1 v27.16b, v1.16b, v25.16b\n"
- "zip1 v26.16b, v24.16b, v16.16b\n"
- "zip2 v25.16b, v1.16b, v25.16b\n"
- "zip2 v24.16b, v24.16b, v16.16b\n"
- "zip1 v16.16b, v0.16b, v17.16b\n"
- "zip2 v17.16b, v0.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v16.16b, v20.16b, v19.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x20, #0x10]\n"
- "zip1 v19.16b, v31.16b, v18.16b\n"
- "zip2 v18.16b, v31.16b, v18.16b\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v30.16b, v23.16b\n"
- "zip2 v16.16b, v30.16b, v23.16b\n"
- "str q20, [x20, #0x30]\n"
- "str q19, [x20, #0x40]\n"
+ "zip1 v22.16b, v0.16b, v30.16b\n"
+ "zip1 v21.16b, v31.16b, v29.16b\n"
+ "zip2 v0.16b, v0.16b, v30.16b\n"
+ "zip2 v31.16b, v31.16b, v29.16b\n"
+ "zip1 v30.16b, v1.16b, v28.16b\n"
+ "zip1 v29.16b, v27.16b, v20.16b\n"
+ "zip2 v28.16b, v1.16b, v28.16b\n"
+ "zip2 v27.16b, v27.16b, v20.16b\n"
+ "zip1 v20.16b, v19.16b, v18.16b\n"
+ "zip2 v19.16b, v19.16b, v18.16b\n"
+ "zip1 v18.16b, v17.16b, v16.16b\n"
+ "zip2 v17.16b, v17.16b, v16.16b\n"
+ "zip1 v16.16b, v26.16b, v25.16b\n"
+ "zip2 v26.16b, v26.16b, v25.16b\n"
+ "zip1 v25.16b, v24.16b, v23.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "str q20, [x27, #0x0]\n"
+ "str q19, [x27, #0x10]\n"
"zip1 v23.16b, v22.16b, v21.16b\n"
"zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x20, #0x50]\n"
- "zip1 v21.16b, v29.16b, v28.16b\n"
- "zip2 v20.16b, v29.16b, v28.16b\n"
- "str q17, [x20, #0x60]\n"
- "zip1 v19.16b, v27.16b, v26.16b\n"
- "zip2 v18.16b, v27.16b, v26.16b\n"
- "str q16, [x20, #0x70]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 v17.16b, v25.16b, v24.16b\n"
- "zip2 v16.16b, v25.16b, v24.16b\n"
- "str q23, [x20, #0x0]\n"
- "str q22, [x20, #0x10]\n"
- "str q21, [x20, #0x20]\n"
- "str q20, [x20, #0x30]\n"
- "str q19, [x20, #0x40]\n"
- "str q18, [x20, #0x50]\n"
- "str q17, [x20, #0x60]\n"
- "str q16, [x20, #0x70]\n"
- "add x20, x20, %x[out_stride]\n"
+ "str q18, [x27, #0x20]\n"
+ "zip1 v21.16b, v0.16b, v31.16b\n"
+ "zip2 v20.16b, v0.16b, v31.16b\n"
+ "str q17, [x27, #0x30]\n"
+ "zip1 v19.16b, v30.16b, v29.16b\n"
+ "zip2 v18.16b, v30.16b, v29.16b\n"
+ "str q16, [x27, #0x40]\n"
+ "zip1 v17.16b, v28.16b, v27.16b\n"
+ "zip2 v16.16b, v28.16b, v27.16b\n"
+ "str q26, [x27, #0x50]\n"
+ "str q25, [x27, #0x60]\n"
+ "str q24, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q23, [x27, #0x0]\n"
+ "str q22, [x27, #0x10]\n"
+ "str q21, [x27, #0x20]\n"
+ "str q20, [x27, #0x30]\n"
+ "str q19, [x27, #0x40]\n"
+ "str q18, [x27, #0x50]\n"
+ "str q17, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x21, #0x10\n"
+ "cmp x28, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q25, [x9], #0x10\n"
- "ldr q27, [x28], #0x10\n"
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
- "ldr q26, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "ldr q22, [x25], #0x10\n"
- "ldr q21, [x24], #0x10\n"
- "zip1 v20.16b, v25.16b, v22.16b\n"
- "zip1 v23.16b, v27.16b, v21.16b\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "zip1 v19.16b, v26.16b, v17.16b\n"
- "zip1 v18.16b, v24.16b, v16.16b\n"
- "zip2 v25.16b, v25.16b, v22.16b\n"
- "zip2 v22.16b, v26.16b, v17.16b\n"
- "zip2 v21.16b, v27.16b, v21.16b\n"
- "zip2 v16.16b, v24.16b, v16.16b\n"
- "zip1 v24.16b, v20.16b, v19.16b\n"
- "zip1 v17.16b, v23.16b, v18.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "zip2 v19.16b, v23.16b, v18.16b\n"
- "zip1 v23.16b, v25.16b, v22.16b\n"
- "zip1 v18.16b, v21.16b, v16.16b\n"
- "zip2 v22.16b, v25.16b, v22.16b\n"
- "zip2 v21.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v24.16b, v17.16b\n"
- "zip2 v17.16b, v24.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v16.16b, v20.16b, v19.16b\n"
+ "ldr q24, [x9], #0x10\n"
+ "ldr q25, [x26], #0x10\n"
+ "sub x28, x28, #0x10\n"
+ "ldr q23, [x25], #0x10\n"
+ "ldr q22, [x24], #0x10\n"
+ "cmp x28, #0x10\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v20.16b, v24.16b, v18.16b\n"
+ "zip1 v19.16b, v25.16b, v17.16b\n"
+ "zip2 v24.16b, v24.16b, v18.16b\n"
+ "zip2 v25.16b, v25.16b, v17.16b\n"
+ "zip1 v18.16b, v23.16b, v21.16b\n"
+ "zip1 v17.16b, v22.16b, v16.16b\n"
+ "zip2 v23.16b, v23.16b, v21.16b\n"
+ "zip2 v16.16b, v22.16b, v16.16b\n"
+ "zip1 v22.16b, v20.16b, v18.16b\n"
+ "zip1 v21.16b, v19.16b, v17.16b\n"
+ "zip2 v20.16b, v20.16b, v18.16b\n"
+ "zip2 v19.16b, v19.16b, v17.16b\n"
+ "zip1 v18.16b, v24.16b, v23.16b\n"
+ "zip1 v17.16b, v25.16b, v16.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "zip2 v16.16b, v25.16b, v16.16b\n"
+ "zip1 v23.16b, v22.16b, v21.16b\n"
+ "zip2 v22.16b, v22.16b, v21.16b\n"
+ "zip1 v21.16b, v20.16b, v19.16b\n"
"zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x20, #0x10]\n"
- "zip1 v19.16b, v23.16b, v18.16b\n"
- "zip2 v18.16b, v23.16b, v18.16b\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v22.16b, v21.16b\n"
- "zip2 v16.16b, v22.16b, v21.16b\n"
- "str q20, [x20, #0x30]\n"
- "str q19, [x20, #0x40]\n"
- "str q18, [x20, #0x50]\n"
- "str q17, [x20, #0x60]\n"
- "str q16, [x20, #0x70]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "zip2 v18.16b, v18.16b, v17.16b\n"
+ "zip1 v17.16b, v24.16b, v16.16b\n"
+ "zip2 v16.16b, v24.16b, v16.16b\n"
+ "str q23, [x27, #0x0]\n"
+ "str q22, [x27, #0x10]\n"
+ "str q21, [x27, #0x20]\n"
+ "str q20, [x27, #0x30]\n"
+ "str q19, [x27, #0x40]\n"
+ "str q18, [x27, #0x50]\n"
+ "str q17, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x21, #0x4\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s18, [x9], #0x4\n"
- "ldr s19, [x28], #0x4\n"
- "sub x21, x21, #0x4\n"
- "cmp x21, #0x4\n"
- "ldr s21, [x27], #0x4\n"
- "ldr s20, [x26], #0x4\n"
- "ldr s17, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "zip1 v18.16b, v18.16b, v17.16b\n"
- "zip1 v19.16b, v19.16b, v16.16b\n"
- "ldr s17, [x23], #0x4\n"
- "ldr s16, [x22], #0x4\n"
+ "ldr s23, [x9], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "sub x28, x28, #0x4\n"
+ "ldr s21, [x25], #0x4\n"
+ "ldr s20, [x24], #0x4\n"
+ "cmp x28, #0x4\n"
+ "ldr s18, [x23], #0x4\n"
+ "ldr s19, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "zip1 v18.16b, v23.16b, v18.16b\n"
+ "zip1 v19.16b, v22.16b, v19.16b\n"
"zip1 v17.16b, v21.16b, v17.16b\n"
"zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v18.16b, v18.16b, v17.16b\n"
"zip1 v16.16b, v19.16b, v16.16b\n"
"zip1 v17.16b, v18.16b, v16.16b\n"
"zip2 v16.16b, v18.16b, v16.16b\n"
- "str q17, [x20, #0x0]\n"
- "str q16, [x20, #0x10]\n"
- "add x20, x20, #0x20\n"
+ "str q17, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x21, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "sub x21, x21, #0x1\n"
- "cmp x21, #0x1\n"
- "ldr b21, [x27], #0x1\n"
- "ldr b20, [x26], #0x1\n"
- "ldr b17, [x25], #0x1\n"
- "ldr b16, [x24], #0x1\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr b17, [x23], #0x1\n"
- "ldr b16, [x22], #0x1\n"
+ "ldr b23, [x9], #0x1\n"
+ "ldr b22, [x26], #0x1\n"
+ "sub x28, x28, #0x1\n"
+ "ldr b21, [x25], #0x1\n"
+ "ldr b20, [x24], #0x1\n"
+ "cmp x28, #0x1\n"
+ "ldr b19, [x23], #0x1\n"
+ "ldr b18, [x22], #0x1\n"
+ "ldr b17, [x21], #0x1\n"
+ "ldr b16, [x20], #0x1\n"
+ "zip1 v19.16b, v23.16b, v19.16b\n"
+ "zip1 v18.16b, v22.16b, v18.16b\n"
"zip1 v17.16b, v21.16b, v17.16b\n"
"zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str d16, [x20, #0x0]\n"
- "add x20, x20, #0x8\n"
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp
index b4515cbfd4..8186b1f475 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,189 +40,215 @@ void a64_transpose_interleave_16_2x2(uint16_t *out, const uint16_t *in, size_t w
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 8f\n"
+ "blt 9f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x10\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v1.8h, v17.8h, v16.8h\n"
- "zip2 v0.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v31.8h, v19.8h, v18.8h\n"
- "zip2 v30.8h, v19.8h, v18.8h\n"
- "ldr q29, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v28.8h, v17.8h, v16.8h\n"
- "zip2 v27.8h, v17.8h, v16.8h\n"
- "ldr q17, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
- "zip1 v26.8h, v17.8h, v16.8h\n"
- "zip2 v25.8h, v17.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v24.8h, v17.8h, v16.8h\n"
- "zip2 v23.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v22.8h, v17.8h, v16.8h\n"
- "zip2 v21.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
+ "ldr q22, [x9], #0x10\n"
+ "ldr q21, [x26], #0x10\n"
+ "sub x28, x28, #0x10\n"
+ "ldr q20, [x25], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "cmp x28, #0x10\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q23, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v19.8h, v29.8h, v18.8h\n"
- "zip2 v18.8h, v29.8h, v18.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q1, [x21, #0x0]\n"
- "str q0, [x21, #0x10]\n"
- "str q26, [x21, #0x20]\n"
- "str q25, [x21, #0x30]\n"
- "str q31, [x21, #0x40]\n"
- "str q30, [x21, #0x50]\n"
- "str q24, [x21, #0x60]\n"
- "str q23, [x21, #0x70]\n"
- "str q28, [x21, #0x80]\n"
- "str q27, [x21, #0x90]\n"
- "str q22, [x21, #0xa0]\n"
- "str q21, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v0.8h, v22.8h, v21.8h\n"
+ "zip2 v31.8h, v22.8h, v21.8h\n"
+ "ldr q22, [x9], #0x10\n"
+ "ldr q21, [x26], #0x10\n"
+ "zip1 v30.8h, v20.8h, v19.8h\n"
+ "zip2 v29.8h, v20.8h, v19.8h\n"
+ "ldr q20, [x25], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip1 v28.8h, v18.8h, v17.8h\n"
+ "zip2 v27.8h, v18.8h, v17.8h\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "zip1 v26.8h, v23.8h, v16.8h\n"
+ "zip2 v25.8h, v23.8h, v16.8h\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v23.8h, v22.8h, v21.8h\n"
+ "zip2 v22.8h, v22.8h, v21.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "str q0, [x27, #0x0]\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "str q31, [x27, #0x10]\n"
+ "zip1 v17.8h, v24.8h, v16.8h\n"
+ "zip2 v16.8h, v24.8h, v16.8h\n"
+ "str q23, [x27, #0x20]\n"
+ "str q22, [x27, #0x30]\n"
+ "str q30, [x27, #0x40]\n"
+ "str q29, [x27, #0x50]\n"
+ "str q21, [x27, #0x60]\n"
+ "str q20, [x27, #0x70]\n"
+ "str q28, [x27, #0x80]\n"
+ "str q27, [x27, #0x90]\n"
+ "str q19, [x27, #0xa0]\n"
+ "str q18, [x27, #0xb0]\n"
+ "str q26, [x27, #0xc0]\n"
+ "str q25, [x27, #0xd0]\n"
+ "str q17, [x27, #0xe0]\n"
+ "str q16, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x28, 8f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "str q16, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "str q16, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "str q16, [x27, #0xc0]\n"
+ "str q16, [x27, #0xd0]\n"
+ "str q16, [x27, #0xe0]\n"
+ "str q16, [x27, #0xf0]\n"
"blt 5f\n"
"4:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
- "ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
+ "ldr d23, [x9], #0x8\n"
+ "ldr d18, [x26], #0x8\n"
+ "sub x28, x28, #0x4\n"
+ "ldr d22, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
+ "cmp x28, #0x4\n"
+ "ldr d21, [x23], #0x8\n"
"ldr d17, [x22], #0x8\n"
- "ldr d16, [x20], #0x8\n"
- "str q20, [x21, #0x0]\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x80]\n"
- "str q16, [x21, #0xc0]\n"
- "add x21, x21, #0x10\n"
+ "ldr d20, [x21], #0x8\n"
+ "ldr d19, [x20], #0x8\n"
+ "zip1 v18.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v16.8h\n"
+ "zip1 v17.8h, v21.8h, v17.8h\n"
+ "str q18, [x27, #0x0]\n"
+ "str q16, [x27, #0x40]\n"
+ "zip1 v16.8h, v20.8h, v19.8h\n"
+ "str q17, [x27, #0x80]\n"
+ "str q16, [x27, #0xc0]\n"
+ "add x27, x27, #0x10\n"
"bge 4b\n"
"5:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
- "zip1 v20.8h, v19.8h, v18.8h\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
- "ldr h17, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
+ "ldr h23, [x9], #0x2\n"
+ "ldr h18, [x26], #0x2\n"
+ "sub x28, x28, #0x1\n"
+ "ldr h22, [x25], #0x2\n"
+ "ldr h16, [x24], #0x2\n"
+ "cmp x28, #0x1\n"
+ "ldr h21, [x23], #0x2\n"
"ldr h17, [x22], #0x2\n"
- "ldr h16, [x20], #0x2\n"
- "str s20, [x21, #0x0]\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str s19, [x21, #0x40]\n"
- "str s18, [x21, #0x80]\n"
- "str s16, [x21, #0xc0]\n"
- "add x21, x21, #0x4\n"
+ "ldr h20, [x21], #0x2\n"
+ "ldr h19, [x20], #0x2\n"
+ "zip1 v18.8h, v23.8h, v18.8h\n"
+ "zip1 v16.8h, v22.8h, v16.8h\n"
+ "zip1 v17.8h, v21.8h, v17.8h\n"
+ "str s18, [x27, #0x0]\n"
+ "str s16, [x27, #0x40]\n"
+ "zip1 v16.8h, v20.8h, v19.8h\n"
+ "str s17, [x27, #0x80]\n"
+ "str s16, [x27, #0xc0]\n"
+ "add x27, x27, #0x4\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
+ "8:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x100\n"
"bge 1b\n"
- "cbz %x[height], 16f\n"
- "8:" // Main loop skip
- "9:" // Tail row loop: Head
+ "cbz %x[height], 18f\n"
+ "9:" // Main loop skip
+ "10:" // Tail row loop: Head
"mov x9, %x[in]\n"
"mov x20, %x[width]\n"
- "add x28, x9, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x28, %x[in_stride]\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
+ "mov x27, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
- "blt 11f\n"
- "10:" // Tail row loop: Column loop
+ "add x26, x9, %x[in_stride]\n"
+ "add %x[in], x26, %x[in_stride]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x20, #0x10\n"
+ "blt 12f\n"
+ "11:" // Tail row loop: Column loop
"ldr q18, [x9], #0x10\n"
- "ldr q17, [x28], #0x10\n"
+ "ldr q17, [x26], #0x10\n"
"sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
"ldr q20, [x9], #0x10\n"
- "ldr q16, [x28], #0x10\n"
+ "cmp x20, #0x10\n"
+ "ldr q16, [x26], #0x10\n"
"zip1 v19.8h, v18.8h, v17.8h\n"
"zip2 v18.8h, v18.8h, v17.8h\n"
"zip1 v17.8h, v20.8h, v16.8h\n"
"zip2 v16.8h, v20.8h, v16.8h\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 10b\n"
- "11:" // Tail row loop: Column loop skip
+ "str q19, [x27, #0x0]\n"
+ "str q18, [x27, #0x10]\n"
+ "str q17, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 11b\n"
+ "12:" // Tail row loop: Column loop skip
+ "cbz x20, 17f\n"
"cmp x20, #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: width 4 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: width 4 loop: loop
"ldr d17, [x9], #0x8\n"
- "ldr d16, [x28], #0x8\n"
+ "ldr d16, [x26], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 12b\n"
- "13:" // Tail row loop: width 4 loop: skip
+ "str q16, [x27, #0x0]\n"
+ "add x27, x27, #0x10\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 1 loop: loop
+ "blt 16f\n"
+ "15:" // Tail row loop: width 1 loop: loop
"ldr h17, [x9], #0x2\n"
- "ldr h16, [x28], #0x2\n"
+ "ldr h16, [x26], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 1 loop: skip
+ "str s16, [x27, #0x0]\n"
+ "add x27, x27, #0x4\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 1 loop: skip
+ "17:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
- "bge 9b\n"
- "16:" // Done
+ "bge 10b\n"
+ "18:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp
index ac67467240..1ed8708f4f 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,451 +40,481 @@ void a64_transpose_interleave_16_2x4(uint16_t *out, const uint16_t *in, size_t w
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x20\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x20\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q23, [x9], #0x10\n"
- "ldr q29, [x28], #0x10\n"
- "sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q13, [x27], #0x10\n"
- "ldr q12, [x26], #0x10\n"
- "zip1 v20.8h, v23.8h, v13.8h\n"
- "zip1 v28.8h, v29.8h, v12.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q9, [x23], #0x10\n"
- "zip2 v22.8h, v23.8h, v13.8h\n"
- "zip2 v1.8h, v29.8h, v12.8h\n"
- "ldr q27, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "zip1 v4.8h, v18.8h, v27.8h\n"
- "zip1 v26.8h, v9.8h, v3.8h\n"
+ "ldr q0, [x9], #0x10\n"
+ "ldr q18, [x26], #0x10\n"
+ "sub x28, x28, #0x20\n"
+ "ldr q22, [x25], #0x10\n"
+ "ldr q27, [x24], #0x10\n"
+ "cmp x28, #0x20\n"
+ "ldr q29, [x23], #0x10\n"
+ "ldr q12, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
+ "ldr q5, [x20], #0x10\n"
+ "ldr q7, [x9], #0x10\n"
+ "ldr q16, [x26], #0x10\n"
+ "zip1 v26.8h, v0.8h, v22.8h\n"
+ "zip1 v3.8h, v18.8h, v27.8h\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q28, [x24], #0x10\n"
+ "zip2 v25.8h, v0.8h, v22.8h\n"
+ "zip2 v10.8h, v18.8h, v27.8h\n"
+ "ldr q1, [x23], #0x10\n"
+ "ldr q20, [x22], #0x10\n"
+ "zip1 v14.8h, v29.8h, v17.8h\n"
+ "zip1 v19.8h, v12.8h, v5.8h\n"
+ "ldr q0, [x21], #0x10\n"
+ "ldr q13, [x20], #0x10\n"
+ "zip2 v22.8h, v29.8h, v17.8h\n"
+ "zip2 v15.8h, v12.8h, v5.8h\n"
"ldr q17, [x9], #0x10\n"
- "ldr q2, [x28], #0x10\n"
- "zip2 v15.8h, v18.8h, v27.8h\n"
- "zip2 v12.8h, v9.8h, v3.8h\n"
- "ldr q23, [x27], #0x10\n"
- "ldr q14, [x26], #0x10\n"
- "zip1 v19.8h, v17.8h, v23.8h\n"
- "zip1 v21.8h, v2.8h, v14.8h\n"
- "ldr q6, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v27.8h, v17.8h, v23.8h\n"
- "zip2 v17.8h, v2.8h, v14.8h\n"
- "ldr q0, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "zip1 v16.8h, v6.8h, v0.8h\n"
- "zip1 v30.8h, v18.8h, v3.8h\n"
- "ldr q2, [x9], #0x10\n"
- "ldr q13, [x28], #0x10\n"
- "zip2 v31.8h, v6.8h, v0.8h\n"
- "zip2 v8.8h, v18.8h, v3.8h\n"
- "ldr q14, [x27], #0x10\n"
- "ldr q3, [x26], #0x10\n"
- "zip1 v11.8h, v2.8h, v14.8h\n"
- "zip1 v29.8h, v13.8h, v3.8h\n"
- "ldr q25, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v23.8h, v2.8h, v14.8h\n"
- "zip2 v10.8h, v13.8h, v3.8h\n"
- "ldr q7, [x22], #0x10\n"
- "ldr q6, [x20], #0x10\n"
- "zip1 v14.8h, v25.8h, v7.8h\n"
- "zip1 v13.8h, v18.8h, v6.8h\n"
+ "ldr q23, [x26], #0x10\n"
+ "zip1 v18.8h, v7.8h, v21.8h\n"
+ "zip1 v5.8h, v16.8h, v28.8h\n"
+ "ldr q31, [x25], #0x10\n"
+ "ldr q12, [x24], #0x10\n"
+ "zip2 v24.8h, v7.8h, v21.8h\n"
+ "zip2 v28.8h, v16.8h, v28.8h\n"
+ "ldr q4, [x23], #0x10\n"
+ "ldr q6, [x22], #0x10\n"
+ "zip1 v30.8h, v1.8h, v0.8h\n"
+ "zip1 v21.8h, v20.8h, v13.8h\n"
+ "ldr q7, [x21], #0x10\n"
+ "ldr q9, [x20], #0x10\n"
+ "zip2 v27.8h, v1.8h, v0.8h\n"
+ "zip2 v0.8h, v20.8h, v13.8h\n"
"ldr q2, [x9], #0x10\n"
- "ldr q5, [x28], #0x10\n"
- "zip2 v9.8h, v25.8h, v7.8h\n"
- "zip2 v7.8h, v18.8h, v6.8h\n"
- "ldr q6, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v25.8h, v2.8h, v6.8h\n"
- "zip1 v3.8h, v5.8h, v24.8h\n"
- "ldr q0, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v2.8h, v2.8h, v6.8h\n"
- "zip2 v24.8h, v5.8h, v24.8h\n"
- "ldr q5, [x22], #0x10\n"
- "zip1 v6.8h, v0.8h, v5.8h\n"
- "zip2 v5.8h, v0.8h, v5.8h\n"
- "zip1 v0.8h, v20.8h, v28.8h\n"
- "zip2 v28.8h, v20.8h, v28.8h\n"
- "ldr q20, [x20], #0x10\n"
- "str q0, [x21, #0x0]\n"
- "zip1 v0.8h, v18.8h, v20.8h\n"
- "zip2 v20.8h, v18.8h, v20.8h\n"
- "str q28, [x21, #0x10]\n"
- "zip1 v18.8h, v22.8h, v1.8h\n"
- "zip2 v28.8h, v22.8h, v1.8h\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v22.8h, v19.8h, v21.8h\n"
- "zip2 v19.8h, v19.8h, v21.8h\n"
- "str q28, [x21, #0x30]\n"
- "zip1 v18.8h, v27.8h, v17.8h\n"
- "zip2 v17.8h, v27.8h, v17.8h\n"
- "str q22, [x21, #0x40]\n"
- "zip1 v27.8h, v4.8h, v26.8h\n"
- "zip2 v26.8h, v4.8h, v26.8h\n"
- "str q19, [x21, #0x50]\n"
- "zip1 v22.8h, v15.8h, v12.8h\n"
- "zip2 v21.8h, v15.8h, v12.8h\n"
- "str q18, [x21, #0x60]\n"
- "zip1 v19.8h, v16.8h, v30.8h\n"
- "zip2 v18.8h, v16.8h, v30.8h\n"
- "str q17, [x21, #0x70]\n"
- "zip1 v17.8h, v31.8h, v8.8h\n"
- "zip2 v16.8h, v31.8h, v8.8h\n"
- "str q27, [x21, #0x80]\n"
- "str q26, [x21, #0x90]\n"
- "zip1 v31.8h, v11.8h, v29.8h\n"
- "zip2 v30.8h, v11.8h, v29.8h\n"
- "str q22, [x21, #0xa0]\n"
- "zip1 v29.8h, v23.8h, v10.8h\n"
- "zip2 v28.8h, v23.8h, v10.8h\n"
- "str q21, [x21, #0xb0]\n"
- "zip1 v27.8h, v25.8h, v3.8h\n"
- "zip2 v26.8h, v25.8h, v3.8h\n"
- "str q19, [x21, #0xc0]\n"
- "zip1 v25.8h, v2.8h, v24.8h\n"
- "zip2 v24.8h, v2.8h, v24.8h\n"
- "str q18, [x21, #0xd0]\n"
- "zip1 v23.8h, v14.8h, v13.8h\n"
- "zip2 v22.8h, v14.8h, v13.8h\n"
- "str q17, [x21, #0xe0]\n"
- "zip1 v21.8h, v9.8h, v7.8h\n"
- "zip2 v19.8h, v9.8h, v7.8h\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v2.8h, v6.8h, v0.8h\n"
- "zip2 v18.8h, v6.8h, v0.8h\n"
- "zip1 v17.8h, v5.8h, v20.8h\n"
- "zip2 v16.8h, v5.8h, v20.8h\n"
- "str q31, [x21, #0x0]\n"
- "str q30, [x21, #0x10]\n"
- "str q29, [x21, #0x20]\n"
- "str q28, [x21, #0x30]\n"
- "str q27, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "str q25, [x21, #0x60]\n"
- "str q24, [x21, #0x70]\n"
- "str q23, [x21, #0x80]\n"
- "str q22, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q19, [x21, #0xb0]\n"
- "str q2, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q8, [x26], #0x10\n"
+ "zip1 v29.8h, v17.8h, v31.8h\n"
+ "zip1 v16.8h, v23.8h, v12.8h\n"
+ "ldr q13, [x25], #0x10\n"
+ "ldr q1, [x24], #0x10\n"
+ "zip2 v20.8h, v17.8h, v31.8h\n"
+ "zip2 v23.8h, v23.8h, v12.8h\n"
+ "ldr q17, [x23], #0x10\n"
+ "ldr q31, [x22], #0x10\n"
+ "zip1 v12.8h, v4.8h, v7.8h\n"
+ "zip1 v11.8h, v6.8h, v9.8h\n"
+ "zip2 v7.8h, v4.8h, v7.8h\n"
+ "ldr q4, [x21], #0x10\n"
+ "zip2 v6.8h, v6.8h, v9.8h\n"
+ "zip1 v9.8h, v2.8h, v13.8h\n"
+ "zip2 v13.8h, v2.8h, v13.8h\n"
+ "zip1 v2.8h, v8.8h, v1.8h\n"
+ "zip2 v1.8h, v8.8h, v1.8h\n"
+ "zip1 v8.8h, v17.8h, v4.8h\n"
+ "zip2 v4.8h, v17.8h, v4.8h\n"
+ "zip1 v17.8h, v26.8h, v3.8h\n"
+ "zip2 v26.8h, v26.8h, v3.8h\n"
+ "ldr q3, [x20], #0x10\n"
+ "str q17, [x27, #0x0]\n"
+ "zip1 v17.8h, v25.8h, v10.8h\n"
+ "zip2 v25.8h, v25.8h, v10.8h\n"
+ "zip1 v10.8h, v18.8h, v5.8h\n"
+ "zip2 v5.8h, v18.8h, v5.8h\n"
+ "zip1 v18.8h, v31.8h, v3.8h\n"
+ "zip2 v3.8h, v31.8h, v3.8h\n"
+ "str q26, [x27, #0x10]\n"
+ "zip1 v31.8h, v24.8h, v28.8h\n"
+ "zip2 v26.8h, v24.8h, v28.8h\n"
+ "str q17, [x27, #0x20]\n"
+ "zip1 v28.8h, v14.8h, v19.8h\n"
+ "zip2 v19.8h, v14.8h, v19.8h\n"
+ "str q25, [x27, #0x30]\n"
+ "zip1 v24.8h, v22.8h, v15.8h\n"
+ "zip2 v15.8h, v22.8h, v15.8h\n"
+ "str q10, [x27, #0x40]\n"
+ "zip1 v25.8h, v30.8h, v21.8h\n"
+ "zip2 v14.8h, v30.8h, v21.8h\n"
+ "str q5, [x27, #0x50]\n"
+ "zip1 v5.8h, v27.8h, v0.8h\n"
+ "zip2 v0.8h, v27.8h, v0.8h\n"
+ "str q31, [x27, #0x60]\n"
+ "str q26, [x27, #0x70]\n"
+ "zip1 v31.8h, v29.8h, v16.8h\n"
+ "zip2 v22.8h, v29.8h, v16.8h\n"
+ "str q28, [x27, #0x80]\n"
+ "zip1 v10.8h, v20.8h, v23.8h\n"
+ "zip2 v28.8h, v20.8h, v23.8h\n"
+ "str q19, [x27, #0x90]\n"
+ "zip1 v27.8h, v9.8h, v2.8h\n"
+ "zip2 v20.8h, v9.8h, v2.8h\n"
+ "str q24, [x27, #0xa0]\n"
+ "zip1 v2.8h, v13.8h, v1.8h\n"
+ "zip2 v24.8h, v13.8h, v1.8h\n"
+ "str q15, [x27, #0xb0]\n"
+ "zip1 v23.8h, v12.8h, v11.8h\n"
+ "zip2 v26.8h, v12.8h, v11.8h\n"
+ "str q25, [x27, #0xc0]\n"
+ "zip1 v21.8h, v7.8h, v6.8h\n"
+ "zip2 v29.8h, v7.8h, v6.8h\n"
+ "str q14, [x27, #0xd0]\n"
+ "zip1 v19.8h, v8.8h, v18.8h\n"
+ "zip2 v18.8h, v8.8h, v18.8h\n"
+ "str q5, [x27, #0xe0]\n"
+ "zip1 v17.8h, v4.8h, v3.8h\n"
+ "zip2 v16.8h, v4.8h, v3.8h\n"
+ "str q0, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q31, [x27, #0x0]\n"
+ "str q22, [x27, #0x10]\n"
+ "str q10, [x27, #0x20]\n"
+ "str q28, [x27, #0x30]\n"
+ "str q27, [x27, #0x40]\n"
+ "str q20, [x27, #0x50]\n"
+ "str q2, [x27, #0x60]\n"
+ "str q24, [x27, #0x70]\n"
+ "str q23, [x27, #0x80]\n"
+ "str q26, [x27, #0x90]\n"
+ "str q21, [x27, #0xa0]\n"
+ "str q29, [x27, #0xb0]\n"
+ "str q19, [x27, #0xc0]\n"
+ "str q18, [x27, #0xd0]\n"
+ "str q17, [x27, #0xe0]\n"
+ "str q16, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0x10\n"
+ "cmp x28, #0x10\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v3.8h, v21.8h, v17.8h\n"
- "zip1 v2.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v1.8h, v21.8h, v17.8h\n"
- "zip2 v24.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v0.8h, v19.8h, v17.8h\n"
- "zip1 v31.8h, v18.8h, v16.8h\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v30.8h, v19.8h, v17.8h\n"
- "zip2 v29.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v23.8h, v21.8h, v17.8h\n"
- "zip1 v22.8h, v20.8h, v16.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v21.8h, v21.8h, v17.8h\n"
- "zip2 v20.8h, v20.8h, v16.8h\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x9], #0x10\n"
+ "ldr q18, [x26], #0x10\n"
+ "sub x28, x28, #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "cmp x28, #0x10\n"
+ "ldr q31, [x23], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "ldr q23, [x21], #0x10\n"
+ "ldr q22, [x20], #0x10\n"
+ "ldr q29, [x9], #0x10\n"
+ "ldr q28, [x26], #0x10\n"
+ "zip1 v27.8h, v19.8h, v17.8h\n"
+ "zip1 v26.8h, v18.8h, v16.8h\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v25.8h, v19.8h, v17.8h\n"
+ "zip2 v24.8h, v18.8h, v16.8h\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v3.8h, v31.8h, v23.8h\n"
+ "zip1 v2.8h, v30.8h, v22.8h\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v28.8h, v19.8h, v17.8h\n"
- "zip1 v27.8h, v18.8h, v16.8h\n"
- "zip2 v26.8h, v19.8h, v17.8h\n"
- "zip2 v25.8h, v18.8h, v16.8h\n"
- "zip1 v16.8h, v3.8h, v2.8h\n"
- "zip2 v17.8h, v3.8h, v2.8h\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.8h, v1.8h, v24.8h\n"
- "zip2 v19.8h, v1.8h, v24.8h\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v18.8h, v23.8h, v22.8h\n"
- "zip2 v17.8h, v23.8h, v22.8h\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v16.8h, v21.8h, v20.8h\n"
+ "zip2 v1.8h, v31.8h, v23.8h\n"
+ "zip2 v0.8h, v30.8h, v22.8h\n"
+ "zip1 v23.8h, v29.8h, v21.8h\n"
+ "zip1 v22.8h, v28.8h, v20.8h\n"
+ "zip2 v21.8h, v29.8h, v21.8h\n"
+ "zip2 v20.8h, v28.8h, v20.8h\n"
+ "zip1 v31.8h, v19.8h, v17.8h\n"
+ "zip1 v30.8h, v18.8h, v16.8h\n"
+ "zip2 v29.8h, v19.8h, v17.8h\n"
+ "zip2 v28.8h, v18.8h, v16.8h\n"
+ "zip1 v19.8h, v27.8h, v26.8h\n"
+ "zip2 v18.8h, v27.8h, v26.8h\n"
+ "zip1 v17.8h, v25.8h, v24.8h\n"
+ "zip2 v16.8h, v25.8h, v24.8h\n"
+ "zip1 v27.8h, v23.8h, v22.8h\n"
+ "zip2 v26.8h, v23.8h, v22.8h\n"
+ "zip1 v25.8h, v21.8h, v20.8h\n"
"zip2 v24.8h, v21.8h, v20.8h\n"
- "str q19, [x21, #0x30]\n"
- "zip1 v23.8h, v0.8h, v31.8h\n"
- "zip2 v22.8h, v0.8h, v31.8h\n"
- "str q18, [x21, #0x40]\n"
- "zip1 v21.8h, v30.8h, v29.8h\n"
- "zip2 v20.8h, v30.8h, v29.8h\n"
- "str q17, [x21, #0x50]\n"
- "zip1 v19.8h, v28.8h, v27.8h\n"
- "zip2 v18.8h, v28.8h, v27.8h\n"
- "str q16, [x21, #0x60]\n"
- "zip1 v17.8h, v26.8h, v25.8h\n"
- "zip2 v16.8h, v26.8h, v25.8h\n"
- "str q24, [x21, #0x70]\n"
- "str q23, [x21, #0x80]\n"
- "str q22, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q20, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q19, [x27, #0x0]\n"
+ "zip1 v23.8h, v3.8h, v2.8h\n"
+ "zip2 v22.8h, v3.8h, v2.8h\n"
+ "str q18, [x27, #0x10]\n"
+ "zip1 v21.8h, v1.8h, v0.8h\n"
+ "zip2 v20.8h, v1.8h, v0.8h\n"
+ "str q17, [x27, #0x20]\n"
+ "zip1 v19.8h, v31.8h, v30.8h\n"
+ "zip2 v18.8h, v31.8h, v30.8h\n"
+ "str q16, [x27, #0x30]\n"
+ "zip1 v17.8h, v29.8h, v28.8h\n"
+ "zip2 v16.8h, v29.8h, v28.8h\n"
+ "str q27, [x27, #0x40]\n"
+ "str q26, [x27, #0x50]\n"
+ "str q25, [x27, #0x60]\n"
+ "str q24, [x27, #0x70]\n"
+ "str q23, [x27, #0x80]\n"
+ "str q22, [x27, #0x90]\n"
+ "str q21, [x27, #0xa0]\n"
+ "str q20, [x27, #0xb0]\n"
+ "str q19, [x27, #0xc0]\n"
+ "str q18, [x27, #0xd0]\n"
+ "str q17, [x27, #0xe0]\n"
+ "str q16, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "str q16, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "str q16, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "str q16, [x27, #0xc0]\n"
+ "str q16, [x27, #0xd0]\n"
+ "str q16, [x27, #0xe0]\n"
+ "str q16, [x27, #0xf0]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr d19, [x9], #0x8\n"
- "ldr d18, [x28], #0x8\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
- "zip1 v17.8h, v19.8h, v17.8h\n"
- "zip1 v16.8h, v18.8h, v16.8h\n"
+ "ldr d23, [x9], #0x8\n"
+ "ldr d22, [x26], #0x8\n"
+ "sub x28, x28, #0x4\n"
"ldr d18, [x25], #0x8\n"
- "ldr d21, [x23], #0x8\n"
- "zip1 v20.8h, v17.8h, v16.8h\n"
- "zip2 v19.8h, v17.8h, v16.8h\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x24], #0x8\n"
+ "cmp x28, #0x4\n"
+ "ldr d20, [x23], #0x8\n"
+ "ldr d21, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "zip1 v18.8h, v18.8h, v17.8h\n"
+ "zip1 v18.8h, v23.8h, v18.8h\n"
+ "zip1 v17.8h, v22.8h, v17.8h\n"
+ "zip1 v20.8h, v20.8h, v19.8h\n"
"zip1 v16.8h, v21.8h, v16.8h\n"
- "str q20, [x21, #0x0]\n"
- "zip1 v17.8h, v18.8h, v16.8h\n"
- "zip2 v16.8h, v18.8h, v16.8h\n"
- "str q19, [x21, #0x10]\n"
- "str q17, [x21, #0x80]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x20\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "zip1 v17.8h, v20.8h, v16.8h\n"
+ "zip2 v16.8h, v20.8h, v16.8h\n"
+ "str q19, [x27, #0x0]\n"
+ "str q18, [x27, #0x10]\n"
+ "str q17, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
+ "ldr h23, [x9], #0x2\n"
+ "ldr h22, [x26], #0x2\n"
+ "sub x28, x28, #0x1\n"
+ "ldr h19, [x25], #0x2\n"
+ "ldr h17, [x24], #0x2\n"
+ "cmp x28, #0x1\n"
+ "ldr h21, [x23], #0x2\n"
+ "ldr h20, [x22], #0x2\n"
+ "ldr h18, [x21], #0x2\n"
+ "ldr h16, [x20], #0x2\n"
+ "zip1 v19.8h, v23.8h, v19.8h\n"
+ "zip1 v17.8h, v22.8h, v17.8h\n"
+ "zip1 v18.8h, v21.8h, v18.8h\n"
+ "zip1 v16.8h, v20.8h, v16.8h\n"
"zip1 v17.8h, v19.8h, v17.8h\n"
"zip1 v16.8h, v18.8h, v16.8h\n"
- "ldr h20, [x25], #0x2\n"
- "ldr h19, [x23], #0x2\n"
- "zip1 v18.8h, v17.8h, v16.8h\n"
- "ldr h17, [x22], #0x2\n"
- "ldr h16, [x20], #0x2\n"
- "zip1 v17.8h, v20.8h, v17.8h\n"
- "zip1 v16.8h, v19.8h, v16.8h\n"
- "str d18, [x21, #0x0]\n"
- "zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x80]\n"
- "add x21, x21, #0x8\n"
+ "str d17, [x27, #0x0]\n"
+ "str d16, [x27, #0x80]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x100\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "csel x25, x25, %x[pad_row], GE\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x20\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x20, #0x20\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
+ "ldr q20, [x9], #0x10\n"
+ "ldr q19, [x26], #0x10\n"
"sub x20, x20, #0x20\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x20\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v4.8h, v21.8h, v17.8h\n"
- "zip1 v3.8h, v20.8h, v16.8h\n"
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v2.8h, v21.8h, v17.8h\n"
- "zip2 v1.8h, v20.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v0.8h, v19.8h, v17.8h\n"
- "zip1 v31.8h, v18.8h, v16.8h\n"
- "ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v30.8h, v19.8h, v17.8h\n"
- "zip2 v23.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.8h, v24.8h, v17.8h\n"
- "zip1 v21.8h, v20.8h, v16.8h\n"
+ "ldr q23, [x9], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q29, [x9], #0x10\n"
+ "zip1 v4.8h, v20.8h, v18.8h\n"
+ "zip1 v3.8h, v19.8h, v17.8h\n"
+ "ldr q28, [x26], #0x10\n"
+ "ldr q27, [x25], #0x10\n"
+ "zip2 v2.8h, v20.8h, v18.8h\n"
+ "zip2 v1.8h, v19.8h, v17.8h\n"
+ "ldr q20, [x24], #0x10\n"
"ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v29.8h, v24.8h, v17.8h\n"
- "zip2 v28.8h, v20.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v27.8h, v19.8h, v17.8h\n"
- "zip1 v26.8h, v18.8h, v16.8h\n"
- "zip2 v25.8h, v19.8h, v17.8h\n"
- "zip2 v24.8h, v18.8h, v16.8h\n"
- "zip1 v16.8h, v4.8h, v3.8h\n"
- "zip2 v17.8h, v4.8h, v3.8h\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.8h, v2.8h, v1.8h\n"
- "zip2 v20.8h, v2.8h, v1.8h\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v19.8h, v0.8h, v31.8h\n"
- "zip2 v18.8h, v0.8h, v31.8h\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v17.8h, v30.8h, v23.8h\n"
- "zip2 v16.8h, v30.8h, v23.8h\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
+ "zip1 v26.8h, v23.8h, v21.8h\n"
+ "zip1 v25.8h, v22.8h, v16.8h\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip2 v24.8h, v23.8h, v21.8h\n"
+ "zip2 v23.8h, v22.8h, v16.8h\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip1 v22.8h, v29.8h, v27.8h\n"
+ "zip1 v21.8h, v28.8h, v20.8h\n"
+ "zip2 v0.8h, v29.8h, v27.8h\n"
+ "zip2 v31.8h, v28.8h, v20.8h\n"
+ "zip1 v30.8h, v19.8h, v17.8h\n"
+ "zip1 v29.8h, v18.8h, v16.8h\n"
+ "zip2 v28.8h, v19.8h, v17.8h\n"
+ "zip2 v27.8h, v18.8h, v16.8h\n"
+ "zip1 v20.8h, v4.8h, v3.8h\n"
+ "zip2 v19.8h, v4.8h, v3.8h\n"
+ "zip1 v18.8h, v2.8h, v1.8h\n"
+ "zip2 v17.8h, v2.8h, v1.8h\n"
+ "zip1 v16.8h, v26.8h, v25.8h\n"
+ "zip2 v26.8h, v26.8h, v25.8h\n"
+ "zip1 v25.8h, v24.8h, v23.8h\n"
+ "zip2 v24.8h, v24.8h, v23.8h\n"
+ "str q20, [x27, #0x0]\n"
+ "str q19, [x27, #0x10]\n"
"zip1 v23.8h, v22.8h, v21.8h\n"
"zip2 v22.8h, v22.8h, v21.8h\n"
- "str q18, [x21, #0x50]\n"
- "zip1 v21.8h, v29.8h, v28.8h\n"
- "zip2 v20.8h, v29.8h, v28.8h\n"
- "str q17, [x21, #0x60]\n"
- "zip1 v19.8h, v27.8h, v26.8h\n"
- "zip2 v18.8h, v27.8h, v26.8h\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v17.8h, v25.8h, v24.8h\n"
- "zip2 v16.8h, v25.8h, v24.8h\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q21, [x21, #0x20]\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "str q18, [x27, #0x20]\n"
+ "zip1 v21.8h, v0.8h, v31.8h\n"
+ "zip2 v20.8h, v0.8h, v31.8h\n"
+ "str q17, [x27, #0x30]\n"
+ "zip1 v19.8h, v30.8h, v29.8h\n"
+ "zip2 v18.8h, v30.8h, v29.8h\n"
+ "str q16, [x27, #0x40]\n"
+ "zip1 v17.8h, v28.8h, v27.8h\n"
+ "zip2 v16.8h, v28.8h, v27.8h\n"
+ "str q26, [x27, #0x50]\n"
+ "str q25, [x27, #0x60]\n"
+ "str q24, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "str q23, [x27, #0x0]\n"
+ "str q22, [x27, #0x10]\n"
+ "str q21, [x27, #0x20]\n"
+ "str q20, [x27, #0x30]\n"
+ "str q19, [x27, #0x40]\n"
+ "str q18, [x27, #0x50]\n"
+ "str q17, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0x10\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
+ "ldr q20, [x9], #0x10\n"
+ "ldr q19, [x26], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v25.8h, v19.8h, v17.8h\n"
- "zip1 v24.8h, v18.8h, v16.8h\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v20.8h, v19.8h, v17.8h\n"
- "zip2 v19.8h, v18.8h, v16.8h\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v23.8h, v22.8h, v17.8h\n"
- "zip1 v18.8h, v21.8h, v16.8h\n"
- "zip2 v22.8h, v22.8h, v17.8h\n"
- "zip2 v21.8h, v21.8h, v16.8h\n"
- "zip1 v16.8h, v25.8h, v24.8h\n"
- "zip2 v17.8h, v25.8h, v24.8h\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.8h, v20.8h, v19.8h\n"
+ "ldr q24, [x9], #0x10\n"
+ "ldr q25, [x26], #0x10\n"
+ "ldr q23, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip1 v22.8h, v20.8h, v18.8h\n"
+ "zip1 v21.8h, v19.8h, v17.8h\n"
+ "zip2 v20.8h, v20.8h, v18.8h\n"
+ "zip2 v19.8h, v19.8h, v17.8h\n"
+ "zip1 v18.8h, v24.8h, v23.8h\n"
+ "zip1 v17.8h, v25.8h, v16.8h\n"
+ "zip2 v24.8h, v24.8h, v23.8h\n"
+ "zip2 v16.8h, v25.8h, v16.8h\n"
+ "zip1 v23.8h, v22.8h, v21.8h\n"
+ "zip2 v22.8h, v22.8h, v21.8h\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
"zip2 v20.8h, v20.8h, v19.8h\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v19.8h, v23.8h, v18.8h\n"
- "zip2 v18.8h, v23.8h, v18.8h\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v17.8h, v22.8h, v21.8h\n"
- "zip2 v16.8h, v22.8h, v21.8h\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "zip1 v17.8h, v24.8h, v16.8h\n"
+ "zip2 v16.8h, v24.8h, v16.8h\n"
+ "str q23, [x27, #0x0]\n"
+ "str q22, [x27, #0x10]\n"
+ "str q21, [x27, #0x20]\n"
+ "str q20, [x27, #0x30]\n"
+ "str q19, [x27, #0x40]\n"
+ "str q18, [x27, #0x50]\n"
+ "str q17, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d18, [x9], #0x8\n"
- "ldr d19, [x28], #0x8\n"
+ "ldr d19, [x26], #0x8\n"
"sub x20, x20, #0x4\n"
+ "ldr d17, [x25], #0x8\n"
+ "ldr d16, [x24], #0x8\n"
"cmp x20, #0x4\n"
- "ldr d17, [x27], #0x8\n"
- "ldr d16, [x26], #0x8\n"
"zip1 v18.8h, v18.8h, v17.8h\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
"zip1 v17.8h, v18.8h, v16.8h\n"
"zip2 v16.8h, v18.8h, v16.8h\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q17, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h19, [x9], #0x2\n"
- "ldr h18, [x28], #0x2\n"
+ "ldr h18, [x26], #0x2\n"
"sub x20, x20, #0x1\n"
+ "ldr h17, [x25], #0x2\n"
+ "ldr h16, [x24], #0x2\n"
"cmp x20, #0x1\n"
- "ldr h17, [x27], #0x2\n"
- "ldr h16, [x26], #0x2\n"
"zip1 v17.8h, v19.8h, v17.8h\n"
"zip1 v16.8h, v18.8h, v16.8h\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp
index b9fe8b126a..a2f64768da 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_16_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,294 +40,313 @@ void a64_transpose_interleave_16_2x4_fp32bf16(bfloat16 *out, const float *in, si
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 8f\n"
+ "blt 9f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x10\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q13, [x9], #0x10\n"
- "ldr q12, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q1, [x27], #0x10\n"
- "ldr q9, [x26], #0x10\n"
- "zip1 v19.4s, v13.4s, v1.4s\n"
- "zip1 v14.4s, v12.4s, v9.4s\n"
- "ldr q15, [x25], #0x10\n"
- "ldr q4, [x23], #0x10\n"
- "zip2 v8.4s, v13.4s, v1.4s\n"
- "zip2 v28.4s, v12.4s, v9.4s\n"
- "ldr q0, [x22], #0x10\n"
- "ldr q1, [x20], #0x10\n"
- "zip1 v16.4s, v15.4s, v0.4s\n"
- "zip1 v5.4s, v4.4s, v1.4s\n"
- "ldr q25, [x9], #0x10\n"
- "ldr q24, [x28], #0x10\n"
- "zip2 v3.4s, v15.4s, v0.4s\n"
- "zip2 v2.4s, v4.4s, v1.4s\n"
- "ldr q21, [x27], #0x10\n"
- "ldr q30, [x26], #0x10\n"
- "zip1 v18.4s, v25.4s, v21.4s\n"
- "zip1 v27.4s, v24.4s, v30.4s\n"
- "ldr q22, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip2 v9.4s, v25.4s, v21.4s\n"
- "zip2 v10.4s, v24.4s, v30.4s\n"
+ "ldr q2, [x9], #0x10\n"
+ "ldr q3, [x26], #0x10\n"
+ "sub x28, x28, #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "ldr q4, [x24], #0x10\n"
+ "cmp x28, #0x10\n"
+ "ldr q16, [x23], #0x10\n"
"ldr q1, [x22], #0x10\n"
- "ldr q21, [x20], #0x10\n"
- "zip1 v25.4s, v22.4s, v1.4s\n"
- "zip1 v7.4s, v20.4s, v21.4s\n"
+ "ldr q5, [x21], #0x10\n"
+ "ldr q15, [x20], #0x10\n"
+ "ldr q19, [x9], #0x10\n"
+ "ldr q11, [x26], #0x10\n"
+ "zip1 v14.4s, v2.4s, v17.4s\n"
+ "zip1 v27.4s, v3.4s, v4.4s\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q18, [x24], #0x10\n"
+ "zip2 v12.4s, v2.4s, v17.4s\n"
+ "zip2 v7.4s, v3.4s, v4.4s\n"
+ "ldr q22, [x23], #0x10\n"
+ "ldr q9, [x22], #0x10\n"
+ "zip1 v17.4s, v16.4s, v5.4s\n"
+ "zip1 v29.4s, v1.4s, v15.4s\n"
+ "ldr q30, [x21], #0x10\n"
+ "ldr q8, [x20], #0x10\n"
+ "zip2 v28.4s, v16.4s, v5.4s\n"
+ "zip2 v26.4s, v1.4s, v15.4s\n"
"ldr q31, [x9], #0x10\n"
- "ldr q17, [x28], #0x10\n"
- "zip2 v30.4s, v22.4s, v1.4s\n"
- "zip2 v20.4s, v20.4s, v21.4s\n"
- "ldr q15, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v6.4s, v31.4s, v15.4s\n"
- "zip1 v4.4s, v17.4s, v24.4s\n"
- "ldr q12, [x25], #0x10\n"
- "ldr q29, [x23], #0x10\n"
- "zip2 v22.4s, v31.4s, v15.4s\n"
- "zip2 v26.4s, v17.4s, v24.4s\n"
- "ldr q0, [x22], #0x10\n"
- "ldr q24, [x20], #0x10\n"
- "zip1 v17.4s, v12.4s, v0.4s\n"
- "zip1 v31.4s, v29.4s, v24.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q1, [x28], #0x10\n"
- "zip2 v23.4s, v12.4s, v0.4s\n"
- "zip2 v24.4s, v29.4s, v24.4s\n"
- "ldr q11, [x27], #0x10\n"
- "ldr q29, [x26], #0x10\n"
- "zip1 v0.4s, v21.4s, v11.4s\n"
- "zip1 v13.4s, v1.4s, v29.4s\n"
- "ldr q15, [x25], #0x10\n"
- "ldr q12, [x23], #0x10\n"
- "zip2 v21.4s, v21.4s, v11.4s\n"
- "zip2 v29.4s, v1.4s, v29.4s\n"
+ "ldr q13, [x26], #0x10\n"
+ "zip1 v20.4s, v19.4s, v21.4s\n"
+ "zip1 v10.4s, v11.4s, v18.4s\n"
+ "ldr q2, [x25], #0x10\n"
+ "ldr q23, [x24], #0x10\n"
+ "zip2 v4.4s, v19.4s, v21.4s\n"
+ "zip2 v6.4s, v11.4s, v18.4s\n"
+ "ldr q21, [x23], #0x10\n"
"ldr q1, [x22], #0x10\n"
- "zip1 v11.4s, v15.4s, v1.4s\n"
- "zip2 v1.4s, v15.4s, v1.4s\n"
- "zip1 v15.4s, v19.4s, v14.4s\n"
- ".inst 0x0ea169ef // bfcvtn v15.4h, v15.4s\n"
- "zip2 v14.4s, v19.4s, v14.4s\n"
- "ldr q19, [x20], #0x10\n"
- ".inst 0x4ea169cf // bfcvtn2 v15.8h, v14.4s\n"
- "str q15, [x21, #0x0]\n"
- "zip1 v14.4s, v12.4s, v19.4s\n"
- "zip2 v15.4s, v12.4s, v19.4s\n"
- "zip1 v12.4s, v8.4s, v28.4s\n"
- "zip1 v19.4s, v18.4s, v27.4s\n"
- ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
- "zip2 v28.4s, v8.4s, v28.4s\n"
- "zip1 v8.4s, v9.4s, v10.4s\n"
- ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
- "zip2 v18.4s, v18.4s, v27.4s\n"
- "zip1 v27.4s, v6.4s, v4.4s\n"
- ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v10.4s, v9.4s, v10.4s\n"
- "zip1 v9.4s, v22.4s, v26.4s\n"
- ".inst 0x0ea16b7b // bfcvtn v27.4h, v27.4s\n"
- "zip2 v6.4s, v6.4s, v4.4s\n"
- "zip1 v4.4s, v0.4s, v13.4s\n"
+ "zip1 v24.4s, v22.4s, v30.4s\n"
+ "zip1 v16.4s, v9.4s, v8.4s\n"
+ "ldr q18, [x21], #0x10\n"
+ "ldr q15, [x20], #0x10\n"
+ "zip2 v22.4s, v22.4s, v30.4s\n"
+ "zip2 v25.4s, v9.4s, v8.4s\n"
+ "ldr q3, [x9], #0x10\n"
+ "ldr q5, [x26], #0x10\n"
+ "zip1 v8.4s, v31.4s, v2.4s\n"
+ "zip1 v30.4s, v13.4s, v23.4s\n"
+ "ldr q0, [x25], #0x10\n"
+ "ldr q19, [x24], #0x10\n"
+ "zip2 v31.4s, v31.4s, v2.4s\n"
+ "zip2 v2.4s, v13.4s, v23.4s\n"
+ "ldr q11, [x23], #0x10\n"
+ "ldr q9, [x22], #0x10\n"
+ "zip1 v13.4s, v21.4s, v18.4s\n"
+ "zip1 v23.4s, v1.4s, v15.4s\n"
+ "zip2 v21.4s, v21.4s, v18.4s\n"
+ "ldr q18, [x21], #0x10\n"
+ "zip2 v15.4s, v1.4s, v15.4s\n"
+ "zip1 v1.4s, v3.4s, v0.4s\n"
+ "zip2 v0.4s, v3.4s, v0.4s\n"
+ "zip1 v3.4s, v5.4s, v19.4s\n"
+ "zip2 v5.4s, v5.4s, v19.4s\n"
+ "zip1 v19.4s, v11.4s, v18.4s\n"
+ "zip2 v18.4s, v11.4s, v18.4s\n"
+ "zip1 v11.4s, v14.4s, v27.4s\n"
+ "zip2 v14.4s, v14.4s, v27.4s\n"
+ "ldr q27, [x20], #0x10\n"
+ ".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
+ ".inst 0x4ea169cb // bfcvtn2 v11.8h, v14.4s\n"
+ "zip1 v14.4s, v12.4s, v7.4s\n"
+ "zip2 v7.4s, v12.4s, v7.4s\n"
+ "zip1 v12.4s, v9.4s, v27.4s\n"
+ "zip2 v27.4s, v9.4s, v27.4s\n"
+ "zip1 v9.4s, v20.4s, v10.4s\n"
+ "zip2 v20.4s, v20.4s, v10.4s\n"
+ "str q11, [x27, #0x0]\n"
+ "zip1 v10.4s, v4.4s, v6.4s\n"
+ "zip1 v11.4s, v8.4s, v30.4s\n"
+ ".inst 0x0ea169ce // bfcvtn v14.4h, v14.4s\n"
+ "zip2 v4.4s, v4.4s, v6.4s\n"
+ "zip1 v6.4s, v31.4s, v2.4s\n"
".inst 0x0ea16929 // bfcvtn v9.4h, v9.4s\n"
- "zip2 v22.4s, v22.4s, v26.4s\n"
- "zip1 v26.4s, v21.4s, v29.4s\n"
- ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- "zip2 v13.4s, v0.4s, v13.4s\n"
- "zip1 v0.4s, v16.4s, v5.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v21.4s, v21.4s, v29.4s\n"
- "zip1 v29.4s, v3.4s, v2.4s\n"
+ ".inst 0x0ea1694a // bfcvtn v10.4h, v10.4s\n"
+ ".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
+ "zip2 v30.4s, v8.4s, v30.4s\n"
+ "zip1 v8.4s, v1.4s, v3.4s\n"
+ ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
+ "zip2 v31.4s, v31.4s, v2.4s\n"
+ "zip1 v2.4s, v0.4s, v5.4s\n"
+ "zip2 v3.4s, v1.4s, v3.4s\n"
+ "zip1 v1.4s, v17.4s, v29.4s\n"
+ ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
+ "zip2 v5.4s, v0.4s, v5.4s\n"
+ "zip1 v0.4s, v28.4s, v26.4s\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ "zip2 v17.4s, v17.4s, v29.4s\n"
+ "zip1 v29.4s, v24.4s, v16.4s\n"
+ ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
".inst 0x0ea16800 // bfcvtn v0.4h, v0.4s\n"
- "zip2 v5.4s, v16.4s, v5.4s\n"
- "zip1 v16.4s, v25.4s, v7.4s\n"
+ "zip2 v28.4s, v28.4s, v26.4s\n"
+ "zip1 v26.4s, v22.4s, v25.4s\n"
+ "zip2 v24.4s, v24.4s, v16.4s\n"
+ "zip1 v16.4s, v13.4s, v23.4s\n"
".inst 0x0ea16bbd // bfcvtn v29.4h, v29.4s\n"
- "zip2 v2.4s, v3.4s, v2.4s\n"
- "zip1 v3.4s, v30.4s, v20.4s\n"
+ "zip2 v25.4s, v22.4s, v25.4s\n"
+ "zip1 v22.4s, v21.4s, v15.4s\n"
+ ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
+ "zip2 v13.4s, v13.4s, v23.4s\n"
+ "zip1 v23.4s, v19.4s, v12.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v7.4s, v25.4s, v7.4s\n"
- "zip1 v25.4s, v17.4s, v31.4s\n"
- ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v30.4s, v30.4s, v20.4s\n"
- "zip1 v20.4s, v23.4s, v24.4s\n"
- ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
- "zip2 v17.4s, v17.4s, v31.4s\n"
- "zip1 v31.4s, v11.4s, v14.4s\n"
- ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n"
- "zip2 v24.4s, v23.4s, v24.4s\n"
- "zip1 v23.4s, v1.4s, v15.4s\n"
- ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
- "zip2 v14.4s, v11.4s, v14.4s\n"
+ ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
+ "zip2 v21.4s, v21.4s, v15.4s\n"
+ "zip1 v15.4s, v18.4s, v27.4s\n"
+ "zip2 v19.4s, v19.4s, v12.4s\n"
".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
- "zip2 v1.4s, v1.4s, v15.4s\n"
- ".inst 0x4ea16b8c // bfcvtn2 v12.8h, v28.4s\n"
- "str q12, [x21, #0x10]\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16948 // bfcvtn2 v8.8h, v10.4s\n"
- "str q19, [x21, #0x20]\n"
- ".inst 0x4ea168db // bfcvtn2 v27.8h, v6.4s\n"
- ".inst 0x4ea16ac9 // bfcvtn2 v9.8h, v22.4s\n"
- "str q8, [x21, #0x30]\n"
- ".inst 0x4ea169a4 // bfcvtn2 v4.8h, v13.4s\n"
- ".inst 0x4ea16aba // bfcvtn2 v26.8h, v21.4s\n"
- "str q27, [x21, #0x40]\n"
- ".inst 0x4ea168a0 // bfcvtn2 v0.8h, v5.4s\n"
- ".inst 0x4ea1685d // bfcvtn2 v29.8h, v2.4s\n"
- "str q9, [x21, #0x50]\n"
- ".inst 0x4ea168f0 // bfcvtn2 v16.8h, v7.4s\n"
- ".inst 0x4ea16bc3 // bfcvtn2 v3.8h, v30.4s\n"
- "str q4, [x21, #0x60]\n"
- ".inst 0x4ea16a39 // bfcvtn2 v25.8h, v17.4s\n"
- ".inst 0x4ea16b14 // bfcvtn2 v20.8h, v24.4s\n"
- "str q26, [x21, #0x70]\n"
- ".inst 0x4ea169df // bfcvtn2 v31.8h, v14.4s\n"
- ".inst 0x4ea16837 // bfcvtn2 v23.8h, v1.4s\n"
- "str q0, [x21, #0x80]\n"
- "str q29, [x21, #0x90]\n"
- "str q16, [x21, #0xa0]\n"
- "str q3, [x21, #0xb0]\n"
- "str q25, [x21, #0xc0]\n"
- "str q20, [x21, #0xd0]\n"
- "str q31, [x21, #0xe0]\n"
- "str q23, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v12.4s, v18.4s, v27.4s\n"
+ ".inst 0x4ea168ee // bfcvtn2 v14.8h, v7.4s\n"
+ ".inst 0x4ea16a89 // bfcvtn2 v9.8h, v20.4s\n"
+ ".inst 0x0ea169f2 // bfcvtn v18.4h, v15.4s\n"
+ ".inst 0x4ea1688a // bfcvtn2 v10.8h, v4.4s\n"
+ ".inst 0x4ea16bcb // bfcvtn2 v11.8h, v30.4s\n"
+ ".inst 0x4ea16be6 // bfcvtn2 v6.8h, v31.4s\n"
+ ".inst 0x4ea16868 // bfcvtn2 v8.8h, v3.4s\n"
+ ".inst 0x4ea168a2 // bfcvtn2 v2.8h, v5.4s\n"
+ "str q14, [x27, #0x10]\n"
+ ".inst 0x4ea16a21 // bfcvtn2 v1.8h, v17.4s\n"
+ ".inst 0x4ea16b80 // bfcvtn2 v0.8h, v28.4s\n"
+ "str q9, [x27, #0x20]\n"
+ ".inst 0x4ea16b1d // bfcvtn2 v29.8h, v24.4s\n"
+ ".inst 0x4ea16b3a // bfcvtn2 v26.8h, v25.4s\n"
+ "str q10, [x27, #0x30]\n"
+ ".inst 0x4ea169b0 // bfcvtn2 v16.8h, v13.4s\n"
+ ".inst 0x4ea16ab6 // bfcvtn2 v22.8h, v21.4s\n"
+ "str q11, [x27, #0x40]\n"
+ ".inst 0x4ea16a77 // bfcvtn2 v23.8h, v19.4s\n"
+ ".inst 0x4ea16992 // bfcvtn2 v18.8h, v12.4s\n"
+ "str q6, [x27, #0x50]\n"
+ "str q8, [x27, #0x60]\n"
+ "str q2, [x27, #0x70]\n"
+ "str q1, [x27, #0x80]\n"
+ "str q0, [x27, #0x90]\n"
+ "str q29, [x27, #0xa0]\n"
+ "str q26, [x27, #0xb0]\n"
+ "str q16, [x27, #0xc0]\n"
+ "str q22, [x27, #0xd0]\n"
+ "str q23, [x27, #0xe0]\n"
+ "str q18, [x27, #0xf0]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x4\n"
+ "cbz x28, 8f\n"
+ "cmp x28, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "str q16, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "str q16, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "str q16, [x27, #0xc0]\n"
+ "str q16, [x27, #0xd0]\n"
+ "str q16, [x27, #0xe0]\n"
+ "str q16, [x27, #0xf0]\n"
"blt 5f\n"
"4:" // Main row loop: width 4 loop: loop
- "ldr q23, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v23.4s, v17.4s\n"
- "zip1 v21.4s, v20.4s, v16.4s\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.4s, v23.4s, v17.4s\n"
- "zip2 v20.4s, v20.4s, v16.4s\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v27.4s, v19.4s, v17.4s\n"
- "zip1 v26.4s, v18.4s, v16.4s\n"
- "zip2 v25.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "zip1 v19.4s, v22.4s, v21.4s\n"
- "zip1 v18.4s, v28.4s, v20.4s\n"
+ "ldr q25, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "sub x28, x28, #0x4\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "cmp x28, #0x4\n"
+ "ldr q23, [x23], #0x10\n"
+ "ldr q19, [x22], #0x10\n"
+ "ldr q18, [x21], #0x10\n"
+ "ldr q17, [x20], #0x10\n"
+ "zip1 v22.4s, v25.4s, v21.4s\n"
+ "zip1 v16.4s, v24.4s, v20.4s\n"
+ "zip2 v21.4s, v25.4s, v21.4s\n"
+ "zip2 v20.4s, v24.4s, v20.4s\n"
+ "zip1 v27.4s, v23.4s, v18.4s\n"
+ "zip1 v26.4s, v19.4s, v17.4s\n"
+ "zip2 v25.4s, v23.4s, v18.4s\n"
+ "zip2 v24.4s, v19.4s, v17.4s\n"
+ "zip1 v19.4s, v22.4s, v16.4s\n"
+ "zip1 v18.4s, v21.4s, v20.4s\n"
"zip1 v17.4s, v27.4s, v26.4s\n"
+ "zip2 v23.4s, v22.4s, v16.4s\n"
"zip1 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v22.4s, v21.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v28.4s, v20.4s\n"
+ "zip2 v22.4s, v21.4s, v20.4s\n"
+ ".inst 0x0ea16a75 // bfcvtn v21.4h, v19.4s\n"
+ ".inst 0x0ea16a54 // bfcvtn v20.4h, v18.4s\n"
".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
"zip2 v18.4s, v27.4s, v26.4s\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
"zip2 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q23, [x21, #0x0]\n"
+ ".inst 0x4ea16af5 // bfcvtn2 v21.8h, v23.4s\n"
+ ".inst 0x4ea16ad4 // bfcvtn2 v20.8h, v22.4s\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- "str q19, [x21, #0x80]\n"
- "str q17, [x21, #0x90]\n"
- "add x21, x21, #0x20\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0x80]\n"
+ "str q17, [x27, #0x90]\n"
+ "add x27, x27, #0x20\n"
"bge 4b\n"
"5:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 7f\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
+ "ldr s23, [x9], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s17, [x24], #0x4\n"
+ "cmp x28, #0x1\n"
+ "ldr s21, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "zip1 v19.4s, v23.4s, v19.4s\n"
+ "zip1 v17.4s, v22.4s, v17.4s\n"
+ "zip1 v18.4s, v21.4s, v18.4s\n"
+ "zip1 v16.4s, v20.4s, v16.4s\n"
"zip1 v17.4s, v19.4s, v17.4s\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v16.4s, v17.4s, v16.4s\n"
- ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
- "ldr s17, [x22], #0x4\n"
- "ldr s16, [x20], #0x4\n"
- "zip1 v17.4s, v20.4s, v17.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
- "zip1 v16.4s, v17.4s, v16.4s\n"
+ ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d18, [x21, #0x0]\n"
- "str d16, [x21, #0x80]\n"
- "add x21, x21, #0x8\n"
+ "str d17, [x27, #0x0]\n"
+ "str d16, [x27, #0x80]\n"
+ "add x27, x27, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 1 loop: skip
+ "8:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x100\n"
"bge 1b\n"
- "cbz %x[height], 16f\n"
- "8:" // Main loop skip
- "9:" // Tail row loop: Head
+ "cbz %x[height], 18f\n"
+ "9:" // Main loop skip
+ "10:" // Tail row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "csel x25, x25, %x[pad_row], GE\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 11f\n"
- "10:" // Tail row loop: Column loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x20, #0x10\n"
+ "blt 12f\n"
+ "11:" // Tail row loop: Column loop
+ "ldr q20, [x9], #0x10\n"
+ "ldr q19, [x26], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v30.4s, v21.4s, v17.4s\n"
- "zip1 v29.4s, v20.4s, v16.4s\n"
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v28.4s, v21.4s, v17.4s\n"
- "zip2 v27.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v26.4s, v19.4s, v17.4s\n"
- "zip1 v25.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v8.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v7.4s, v21.4s, v17.4s\n"
- "zip1 v6.4s, v20.4s, v16.4s\n"
+ "ldr q0, [x9], #0x10\n"
+ "ldr q31, [x26], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q23, [x9], #0x10\n"
+ "zip1 v30.4s, v20.4s, v18.4s\n"
+ "zip1 v29.4s, v19.4s, v17.4s\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "zip2 v28.4s, v20.4s, v18.4s\n"
+ "zip2 v27.4s, v19.4s, v17.4s\n"
+ "ldr q20, [x24], #0x10\n"
"ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v5.4s, v21.4s, v17.4s\n"
- "zip2 v4.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
+ "zip1 v26.4s, v0.4s, v24.4s\n"
+ "zip1 v25.4s, v31.4s, v16.4s\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip2 v8.4s, v0.4s, v24.4s\n"
+ "zip2 v24.4s, v31.4s, v16.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip1 v7.4s, v23.4s, v21.4s\n"
+ "zip1 v6.4s, v22.4s, v20.4s\n"
+ "zip2 v5.4s, v23.4s, v21.4s\n"
+ "zip2 v4.4s, v22.4s, v20.4s\n"
"zip1 v3.4s, v19.4s, v17.4s\n"
"zip1 v2.4s, v18.4s, v16.4s\n"
"zip2 v1.4s, v19.4s, v17.4s\n"
@@ -358,70 +377,81 @@ void a64_transpose_interleave_16_2x4_fp32bf16(bfloat16 *out, const float *in, si
"zip2 v16.4s, v1.4s, v0.4s\n"
".inst 0x4ea16bdf // bfcvtn2 v31.8h, v30.4s\n"
".inst 0x4ea16b9d // bfcvtn2 v29.8h, v28.4s\n"
- "str q31, [x21, #0x0]\n"
".inst 0x4ea16b5b // bfcvtn2 v27.8h, v26.4s\n"
".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- "str q29, [x21, #0x10]\n"
".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q27, [x21, #0x20]\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q25, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q21, [x21, #0x50]\n"
- "str q19, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 10b\n"
- "11:" // Tail row loop: Column loop skip
+ "str q31, [x27, #0x0]\n"
+ "str q29, [x27, #0x10]\n"
+ "str q27, [x27, #0x20]\n"
+ "str q25, [x27, #0x30]\n"
+ "str q23, [x27, #0x40]\n"
+ "str q21, [x27, #0x50]\n"
+ "str q19, [x27, #0x60]\n"
+ "str q17, [x27, #0x70]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 11b\n"
+ "12:" // Tail row loop: Column loop skip
+ "cbz x20, 17f\n"
"cmp x20, #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: width 4 loop: loop
- "ldr q20, [x9], #0x10\n"
- "ldr q19, [x28], #0x10\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: width 4 loop: loop
+ "ldr q21, [x9], #0x10\n"
+ "ldr q20, [x26], #0x10\n"
"sub x20, x20, #0x4\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v20.4s, v17.4s\n"
- "zip1 v18.4s, v19.4s, v16.4s\n"
- "zip2 v21.4s, v20.4s, v17.4s\n"
- "zip2 v20.4s, v19.4s, v16.4s\n"
- "zip1 v17.4s, v22.4s, v18.4s\n"
+ "zip1 v18.4s, v21.4s, v19.4s\n"
+ "zip1 v16.4s, v20.4s, v17.4s\n"
+ "zip2 v21.4s, v21.4s, v19.4s\n"
+ "zip2 v20.4s, v20.4s, v17.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "zip2 v19.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
- ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q19, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
- "bge 12b\n"
- "13:" // Tail row loop: width 4 loop: skip
+ ".inst 0x0ea16a32 // bfcvtn v18.4h, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 1 loop: loop
+ "blt 16f\n"
+ "15:" // Tail row loop: width 1 loop: loop
"ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
+ "ldr s18, [x26], #0x4\n"
"sub x20, x20, #0x1\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s16, [x24], #0x4\n"
"cmp x20, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
"zip1 v17.4s, v19.4s, v17.4s\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 1 loop: skip
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 1 loop: skip
+ "17:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
- "bge 9b\n"
- "16:" // Done
+ "bge 10b\n"
+ "18:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp
index 46211ad4e4..0f00300c54 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,192 +34,206 @@ void a64_transpose_interleave_24(uint16_t *out, const uint16_t *in, size_t width
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x18\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
"ldr q1, [x25], #0x10\n"
- "ldr q0, [x22], #0x10\n"
+ "ldr q0, [x21], #0x10\n"
"sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
"ldr q17, [x25], #0x10\n"
- "ldr q31, [x23], #0x10\n"
- "dup v30.2d, v17.d[0]\n"
- "dup v29.2d, v31.d[1]\n"
- "ldr q16, [x22], #0x10\n"
- "ldr q28, [x20], #0x10\n"
- "dup v27.2d, v16.d[0]\n"
- "dup v26.2d, v28.d[1]\n"
- "ldr q25, [x25], #0x10\n"
- "ldr q24, [x22], #0x10\n"
- "dup v23.2d, v17.d[1]\n"
- "dup v22.2d, v25.d[1]\n"
- "ldr q21, [x23], #0x10\n"
+ "ldr q31, [x22], #0x10\n"
+ "cmp x24, #0x18\n"
+ "ldr q16, [x21], #0x10\n"
+ "ldr q30, [x20], #0x10\n"
+ "ldr q29, [x25], #0x10\n"
+ "ldr q28, [x21], #0x10\n"
+ "ldr q27, [x22], #0x10\n"
+ "dup v26.2d, v17.d[0]\n"
+ "dup v25.2d, v31.d[1]\n"
+ "ldr q24, [x20], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "dup v22.2d, v16.d[0]\n"
+ "dup v21.2d, v30.d[1]\n"
"ldr q20, [x20], #0x10\n"
- "dup v19.2d, v16.d[1]\n"
- "dup v18.2d, v24.d[1]\n"
- "ldr q17, [x23], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "mov v30.d[1], v31.d[0]\n"
- "mov v29.d[1], v21.d[0]\n"
- "mov v27.d[1], v28.d[0]\n"
- "mov v26.d[1], v20.d[0]\n"
- "str q1, [x21, #0x0]\n"
- "str q30, [x21, #0x10]\n"
- "mov v23.d[1], v25.d[0]\n"
- "mov v22.d[1], v21.d[1]\n"
- "str q29, [x21, #0x20]\n"
- "mov v19.d[1], v24.d[0]\n"
- "mov v18.d[1], v20.d[1]\n"
- "str q0, [x21, #0x30]\n"
- "str q27, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "dup v19.2d, v17.d[1]\n"
+ "dup v18.2d, v29.d[1]\n"
+ "str q1, [x23, #0x0]\n"
+ "dup v17.2d, v16.d[1]\n"
+ "dup v16.2d, v28.d[1]\n"
+ "mov v26.d[1], v31.d[0]\n"
+ "mov v25.d[1], v27.d[0]\n"
+ "mov v22.d[1], v30.d[0]\n"
+ "mov v21.d[1], v24.d[0]\n"
+ "str q26, [x23, #0x10]\n"
+ "str q25, [x23, #0x20]\n"
+ "mov v19.d[1], v29.d[0]\n"
+ "mov v18.d[1], v27.d[1]\n"
+ "str q0, [x23, #0x30]\n"
+ "mov v17.d[1], v28.d[0]\n"
+ "mov v16.d[1], v24.d[1]\n"
+ "str q22, [x23, #0x40]\n"
+ "str q21, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q17, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q20, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cmp x24, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q17, [x23], #0x10\n"
- "ldr q23, [x20], #0x10\n"
- "dup v22.2d, v17.d[1]\n"
- "dup v21.2d, v23.d[1]\n"
- "ldr q20, [x25], #0x10\n"
- "ldr q19, [x22], #0x10\n"
+ "ldr q25, [x22], #0x10\n"
+ "ldr q24, [x20], #0x10\n"
"sub x24, x24, #0xc\n"
+ "ldr q23, [x25], #0x10\n"
+ "ldr q22, [x21], #0x10\n"
"cmp x24, #0xc\n"
- "ldr d18, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
- "mov v18.d[1], v17.d[0]\n"
- "mov v22.d[1], v16.d[0]\n"
- "ldr d17, [x22], #0x8\n"
- "ldr d16, [x20], #0x8\n"
- "mov v17.d[1], v23.d[0]\n"
- "mov v21.d[1], v16.d[0]\n"
- "str q20, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q21, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr d21, [x25], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "ldr d18, [x20], #0x8\n"
+ "dup v17.2d, v25.d[1]\n"
+ "dup v16.2d, v24.d[1]\n"
+ "str q23, [x23, #0x0]\n"
+ "mov v21.d[1], v25.d[0]\n"
+ "mov v17.d[1], v20.d[0]\n"
+ "mov v19.d[1], v24.d[0]\n"
+ "mov v16.d[1], v18.d[0]\n"
+ "str q21, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x4\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x18]\n"
- "str d17, [x21, #0x30]\n"
- "str d16, [x21, #0x48]\n"
- "add x21, x21, #0x8\n"
+ "cmp x24, #0x4\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x18]\n"
+ "str d17, [x23, #0x30]\n"
+ "str d16, [x23, #0x48]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x18]\n"
- "str h17, [x21, #0x30]\n"
- "str h16, [x21, #0x48]\n"
- "add x21, x21, #0x2\n"
+ "cmp x24, #0x1\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x18]\n"
+ "str h17, [x23, #0x30]\n"
+ "str h16, [x23, #0x48]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x60\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x18\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
"ldr q19, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
- "dup v18.2d, v16.d[1]\n"
"sub x20, x20, #0x18\n"
- "ldr q17, [x25], #0x10\n"
- "dup v16.2d, v16.d[0]\n"
- "str q19, [x21, #0x0]\n"
+ "ldr q16, [x25], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
"cmp x20, #0x18\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "mov v18.d[1], v17.d[0]\n"
- "dup v16.2d, v17.d[1]\n"
- "str q18, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "dup v17.2d, v16.d[1]\n"
+ "dup v16.2d, v16.d[0]\n"
+ "str q19, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "mov v17.d[1], v18.d[0]\n"
+ "dup v16.2d, v18.d[1]\n"
+ "str q17, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
"ldr q17, [x25], #0x10\n"
- "ldr d16, [x25], #0x8\n"
"sub x20, x20, #0xc\n"
+ "ldr d16, [x25], #0x8\n"
"cmp x20, #0xc\n"
- "str q17, [x21, #0x0]\n"
- "str d16, [x21, #0x10]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "str q17, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str d16, [x23, #0x10]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d16, [x25], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h16, [x25], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x18\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp
index 1cb7bc4445..3e0ab6d955 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,634 +40,675 @@ void a64_transpose_interleave_24_2x4_fp32bf16(bfloat16 *out, const float *in, si
__asm__ __volatile__(
"cmp %x[height], #0x8\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "mov x28, %x[width]\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x9, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "cmp x28, #0x18\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x18\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q15, [x9], #0x10\n"
- "ldr q1, [x28], #0x10\n"
- "sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q0, [x27], #0x10\n"
- "ldr q27, [x26], #0x10\n"
- "zip1 v18.4s, v15.4s, v0.4s\n"
- "zip1 v20.4s, v1.4s, v27.4s\n"
- "ldr q13, [x25], #0x10\n"
- "ldr q14, [x23], #0x10\n"
- "zip2 v16.4s, v15.4s, v0.4s\n"
- "zip2 v3.4s, v1.4s, v27.4s\n"
- "ldr q12, [x22], #0x10\n"
- "ldr q11, [x20], #0x10\n"
- "zip1 v4.4s, v13.4s, v12.4s\n"
- "zip1 v28.4s, v14.4s, v11.4s\n"
- "ldr q5, [x9], #0x10\n"
- "ldr q30, [x28], #0x10\n"
- "zip2 v23.4s, v13.4s, v12.4s\n"
- "zip2 v19.4s, v14.4s, v11.4s\n"
- "ldr q25, [x27], #0x10\n"
- "ldr q11, [x26], #0x10\n"
- "zip1 v21.4s, v5.4s, v25.4s\n"
- "zip1 v14.4s, v30.4s, v11.4s\n"
- "ldr q6, [x25], #0x10\n"
- "ldr q27, [x23], #0x10\n"
- "zip2 v29.4s, v5.4s, v25.4s\n"
- "zip2 v17.4s, v30.4s, v11.4s\n"
- "ldr q2, [x22], #0x10\n"
- "ldr q10, [x20], #0x10\n"
- "zip1 v11.4s, v6.4s, v2.4s\n"
- "zip1 v1.4s, v27.4s, v10.4s\n"
- "ldr q8, [x9], #0x10\n"
- "ldr q5, [x28], #0x10\n"
- "zip2 v24.4s, v6.4s, v2.4s\n"
- "zip2 v0.4s, v27.4s, v10.4s\n"
- "ldr q6, [x27], #0x10\n"
- "ldr q31, [x26], #0x10\n"
- "zip1 v12.4s, v8.4s, v6.4s\n"
- "zip1 v10.4s, v5.4s, v31.4s\n"
- "ldr q30, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "zip2 v9.4s, v8.4s, v6.4s\n"
- "zip2 v13.4s, v5.4s, v31.4s\n"
- "ldr q7, [x22], #0x10\n"
- "ldr q8, [x20], #0x10\n"
- "zip1 v27.4s, v30.4s, v7.4s\n"
- "zip1 v31.4s, v2.4s, v8.4s\n"
- "ldr q5, [x9], #0x10\n"
- "ldr q26, [x28], #0x10\n"
- "zip2 v22.4s, v30.4s, v7.4s\n"
- "zip2 v8.4s, v2.4s, v8.4s\n"
- "ldr q2, [x27], #0x10\n"
- "ldr q6, [x26], #0x10\n"
- "zip1 v25.4s, v5.4s, v2.4s\n"
- "zip1 v15.4s, v26.4s, v6.4s\n"
- "ldr q7, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "zip2 v5.4s, v5.4s, v2.4s\n"
- "zip2 v26.4s, v26.4s, v6.4s\n"
- "ldr q2, [x22], #0x10\n"
- "zip1 v6.4s, v7.4s, v2.4s\n"
- "zip2 v7.4s, v7.4s, v2.4s\n"
- "zip1 v2.4s, v18.4s, v20.4s\n"
- ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
- "zip2 v20.4s, v18.4s, v20.4s\n"
- "ldr q18, [x20], #0x10\n"
- ".inst 0x4ea16a82 // bfcvtn2 v2.8h, v20.4s\n"
- "zip1 v20.4s, v30.4s, v18.4s\n"
- "zip2 v18.4s, v30.4s, v18.4s\n"
- "zip1 v30.4s, v16.4s, v3.4s\n"
- ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "zip2 v3.4s, v16.4s, v3.4s\n"
+ "ldr q28, [x9], #0x10\n"
+ "ldr q17, [x26], #0x10\n"
+ "sub x28, x28, #0x18\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q15, [x24], #0x10\n"
+ "cmp x28, #0x18\n"
+ "ldr q20, [x23], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "ldr q9, [x21], #0x10\n"
+ "ldr q14, [x20], #0x10\n"
"ldr q16, [x9], #0x10\n"
- ".inst 0x4ea1687e // bfcvtn2 v30.8h, v3.4s\n"
- "zip1 v3.4s, v21.4s, v14.4s\n"
- ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
- "zip2 v21.4s, v21.4s, v14.4s\n"
- "ldr q14, [x28], #0x10\n"
- ".inst 0x4ea16aa3 // bfcvtn2 v3.8h, v21.4s\n"
- "zip1 v21.4s, v29.4s, v17.4s\n"
- ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- "zip2 v29.4s, v29.4s, v17.4s\n"
- "ldr q17, [x27], #0x10\n"
- ".inst 0x4ea16bb5 // bfcvtn2 v21.8h, v29.4s\n"
- "zip1 v29.4s, v16.4s, v17.4s\n"
- "zip2 v16.4s, v16.4s, v17.4s\n"
- "zip1 v17.4s, v12.4s, v10.4s\n"
- ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
- "zip2 v10.4s, v12.4s, v10.4s\n"
- "ldr q12, [x26], #0x10\n"
- ".inst 0x4ea16951 // bfcvtn2 v17.8h, v10.4s\n"
- "zip1 v10.4s, v14.4s, v12.4s\n"
- "zip2 v14.4s, v14.4s, v12.4s\n"
- "zip1 v12.4s, v9.4s, v13.4s\n"
- ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
- "zip2 v13.4s, v9.4s, v13.4s\n"
- "ldr q9, [x25], #0x10\n"
- ".inst 0x4ea169ac // bfcvtn2 v12.8h, v13.4s\n"
- "zip1 v13.4s, v25.4s, v15.4s\n"
+ "ldr q22, [x26], #0x10\n"
+ "zip1 v27.4s, v28.4s, v19.4s\n"
+ "zip1 v7.4s, v17.4s, v15.4s\n"
+ "ldr q12, [x25], #0x10\n"
+ "ldr q24, [x24], #0x10\n"
+ "zip2 v6.4s, v28.4s, v19.4s\n"
+ "zip2 v26.4s, v17.4s, v15.4s\n"
+ "ldr q18, [x23], #0x10\n"
+ "ldr q23, [x22], #0x10\n"
+ "zip1 v25.4s, v20.4s, v9.4s\n"
+ "zip1 v11.4s, v30.4s, v14.4s\n"
+ "ldr q2, [x21], #0x10\n"
+ "ldr q0, [x20], #0x10\n"
+ "zip2 v28.4s, v20.4s, v9.4s\n"
+ "zip2 v31.4s, v30.4s, v14.4s\n"
+ "ldr q13, [x9], #0x10\n"
+ "ldr q10, [x26], #0x10\n"
+ "zip1 v14.4s, v16.4s, v12.4s\n"
+ "zip1 v15.4s, v22.4s, v24.4s\n"
+ "ldr q1, [x25], #0x10\n"
+ "ldr q30, [x24], #0x10\n"
+ "zip2 v3.4s, v16.4s, v12.4s\n"
+ "zip2 v20.4s, v22.4s, v24.4s\n"
+ "ldr q17, [x23], #0x10\n"
+ "ldr q9, [x22], #0x10\n"
+ "zip1 v21.4s, v18.4s, v2.4s\n"
+ "zip1 v22.4s, v23.4s, v0.4s\n"
+ "ldr q5, [x21], #0x10\n"
+ "ldr q4, [x20], #0x10\n"
+ "zip2 v16.4s, v18.4s, v2.4s\n"
+ "zip2 v12.4s, v23.4s, v0.4s\n"
+ "ldr q23, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "zip1 v2.4s, v13.4s, v1.4s\n"
+ "zip1 v18.4s, v10.4s, v30.4s\n"
+ "ldr q29, [x25], #0x10\n"
+ "ldr q0, [x24], #0x10\n"
+ "zip2 v1.4s, v13.4s, v1.4s\n"
+ "zip2 v30.4s, v10.4s, v30.4s\n"
+ "ldr q13, [x23], #0x10\n"
+ "ldr q10, [x22], #0x10\n"
+ "zip1 v8.4s, v17.4s, v5.4s\n"
+ "zip1 v19.4s, v9.4s, v4.4s\n"
+ "zip2 v5.4s, v17.4s, v5.4s\n"
+ "ldr q17, [x21], #0x10\n"
+ "zip2 v9.4s, v9.4s, v4.4s\n"
+ "zip1 v4.4s, v23.4s, v29.4s\n"
+ "zip2 v23.4s, v23.4s, v29.4s\n"
+ "zip1 v29.4s, v24.4s, v0.4s\n"
+ "zip2 v24.4s, v24.4s, v0.4s\n"
+ "zip1 v0.4s, v13.4s, v17.4s\n"
+ "zip2 v17.4s, v13.4s, v17.4s\n"
+ "zip1 v13.4s, v27.4s, v7.4s\n"
+ "zip2 v7.4s, v27.4s, v7.4s\n"
+ "ldr q27, [x20], #0x10\n"
".inst 0x0ea169ad // bfcvtn v13.4h, v13.4s\n"
- "zip2 v25.4s, v25.4s, v15.4s\n"
- "ldr q15, [x23], #0x10\n"
- ".inst 0x4ea16b2d // bfcvtn2 v13.8h, v25.4s\n"
- "zip1 v25.4s, v5.4s, v26.4s\n"
- ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
- "zip2 v5.4s, v5.4s, v26.4s\n"
- "ldr q26, [x22], #0x10\n"
- ".inst 0x4ea168b9 // bfcvtn2 v25.8h, v5.4s\n"
- "zip1 v5.4s, v9.4s, v26.4s\n"
- "zip2 v9.4s, v9.4s, v26.4s\n"
- "zip1 v26.4s, v29.4s, v10.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v10.4s, v29.4s, v10.4s\n"
- "ldr q29, [x20], #0x10\n"
- ".inst 0x4ea1695a // bfcvtn2 v26.8h, v10.4s\n"
- "zip1 v10.4s, v15.4s, v29.4s\n"
- "zip2 v15.4s, v15.4s, v29.4s\n"
- "zip1 v29.4s, v16.4s, v14.4s\n"
- ".inst 0x0ea16bbd // bfcvtn v29.4h, v29.4s\n"
- "zip2 v14.4s, v16.4s, v14.4s\n"
- "ldr q16, [x9], #0x10\n"
- ".inst 0x4ea169dd // bfcvtn2 v29.8h, v14.4s\n"
- "zip1 v14.4s, v4.4s, v28.4s\n"
+ ".inst 0x4ea168ed // bfcvtn2 v13.8h, v7.4s\n"
+ "zip1 v7.4s, v6.4s, v26.4s\n"
+ "zip2 v26.4s, v6.4s, v26.4s\n"
+ "ldr q6, [x9], #0x10\n"
+ ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
+ ".inst 0x4ea16b47 // bfcvtn2 v7.8h, v26.4s\n"
+ "zip1 v26.4s, v10.4s, v27.4s\n"
+ "zip2 v27.4s, v10.4s, v27.4s\n"
+ "zip1 v10.4s, v14.4s, v15.4s\n"
+ "zip2 v14.4s, v14.4s, v15.4s\n"
+ "ldr q15, [x26], #0x10\n"
+ ".inst 0x0ea1694a // bfcvtn v10.4h, v10.4s\n"
+ ".inst 0x4ea169ca // bfcvtn2 v10.8h, v14.4s\n"
+ "zip1 v14.4s, v3.4s, v20.4s\n"
+ "zip2 v20.4s, v3.4s, v20.4s\n"
+ "ldr q3, [x25], #0x10\n"
".inst 0x0ea169ce // bfcvtn v14.4h, v14.4s\n"
- "zip2 v4.4s, v4.4s, v28.4s\n"
- "ldr q28, [x28], #0x10\n"
- ".inst 0x4ea1688e // bfcvtn2 v14.8h, v4.4s\n"
- "zip1 v4.4s, v23.4s, v19.4s\n"
+ ".inst 0x4ea16a8e // bfcvtn2 v14.8h, v20.4s\n"
+ "zip1 v20.4s, v2.4s, v18.4s\n"
+ "zip2 v18.4s, v2.4s, v18.4s\n"
+ "ldr q2, [x24], #0x10\n"
+ ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n"
+ ".inst 0x4ea16a54 // bfcvtn2 v20.8h, v18.4s\n"
+ "zip1 v18.4s, v6.4s, v3.4s\n"
+ "zip2 v3.4s, v6.4s, v3.4s\n"
+ "zip1 v6.4s, v15.4s, v2.4s\n"
+ "zip2 v15.4s, v15.4s, v2.4s\n"
+ "zip1 v2.4s, v1.4s, v30.4s\n"
+ "zip2 v30.4s, v1.4s, v30.4s\n"
+ "ldr q1, [x23], #0x10\n"
+ ".inst 0x0ea16842 // bfcvtn v2.4h, v2.4s\n"
+ ".inst 0x4ea16bc2 // bfcvtn2 v2.8h, v30.4s\n"
+ "zip1 v30.4s, v4.4s, v29.4s\n"
+ "zip2 v4.4s, v4.4s, v29.4s\n"
+ "ldr q29, [x22], #0x10\n"
+ ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
+ ".inst 0x4ea1689e // bfcvtn2 v30.8h, v4.4s\n"
+ "zip1 v4.4s, v23.4s, v24.4s\n"
+ "zip2 v24.4s, v23.4s, v24.4s\n"
+ "ldr q23, [x21], #0x10\n"
".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
- "zip2 v19.4s, v23.4s, v19.4s\n"
- "ldr q23, [x27], #0x10\n"
- ".inst 0x4ea16a64 // bfcvtn2 v4.8h, v19.4s\n"
- "zip1 v19.4s, v16.4s, v23.4s\n"
- "zip2 v16.4s, v16.4s, v23.4s\n"
- "zip1 v23.4s, v11.4s, v1.4s\n"
- ".inst 0x0ea16af7 // bfcvtn v23.4h, v23.4s\n"
- "zip2 v1.4s, v11.4s, v1.4s\n"
- "ldr q11, [x26], #0x10\n"
- ".inst 0x4ea16837 // bfcvtn2 v23.8h, v1.4s\n"
- "zip1 v1.4s, v28.4s, v11.4s\n"
- "zip2 v28.4s, v28.4s, v11.4s\n"
- "zip1 v11.4s, v19.4s, v1.4s\n"
+ ".inst 0x4ea16b04 // bfcvtn2 v4.8h, v24.4s\n"
+ "zip1 v24.4s, v18.4s, v6.4s\n"
+ "zip2 v18.4s, v18.4s, v6.4s\n"
+ "ldr q6, [x20], #0x10\n"
+ ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n"
+ ".inst 0x4ea16a58 // bfcvtn2 v24.8h, v18.4s\n"
+ "zip1 v18.4s, v1.4s, v23.4s\n"
+ "zip2 v1.4s, v1.4s, v23.4s\n"
+ "zip1 v23.4s, v29.4s, v6.4s\n"
+ "zip2 v29.4s, v29.4s, v6.4s\n"
+ "zip1 v6.4s, v3.4s, v15.4s\n"
+ "zip2 v3.4s, v3.4s, v15.4s\n"
+ "ldr q15, [x9], #0x10\n"
+ ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
+ ".inst 0x4ea16866 // bfcvtn2 v6.8h, v3.4s\n"
+ "zip1 v3.4s, v25.4s, v11.4s\n"
+ "zip2 v11.4s, v25.4s, v11.4s\n"
+ "ldr q25, [x26], #0x10\n"
+ ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
+ ".inst 0x4ea16963 // bfcvtn2 v3.8h, v11.4s\n"
+ "zip1 v11.4s, v28.4s, v31.4s\n"
+ "zip2 v28.4s, v28.4s, v31.4s\n"
+ "ldr q31, [x25], #0x10\n"
".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
- "zip2 v19.4s, v19.4s, v1.4s\n"
- "ldr q1, [x25], #0x10\n"
- ".inst 0x4ea16a6b // bfcvtn2 v11.8h, v19.4s\n"
- "zip1 v19.4s, v16.4s, v28.4s\n"
- ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
- "zip2 v16.4s, v16.4s, v28.4s\n"
- "ldr q28, [x23], #0x10\n"
- ".inst 0x4ea16a13 // bfcvtn2 v19.8h, v16.4s\n"
- "zip1 v16.4s, v24.4s, v0.4s\n"
+ ".inst 0x4ea16b8b // bfcvtn2 v11.8h, v28.4s\n"
+ "zip1 v28.4s, v21.4s, v22.4s\n"
+ "zip2 v21.4s, v21.4s, v22.4s\n"
+ "ldr q22, [x24], #0x10\n"
+ ".inst 0x0ea16b9c // bfcvtn v28.4h, v28.4s\n"
+ ".inst 0x4ea16abc // bfcvtn2 v28.8h, v21.4s\n"
+ "zip1 v21.4s, v15.4s, v31.4s\n"
+ "zip2 v31.4s, v15.4s, v31.4s\n"
+ "zip1 v15.4s, v25.4s, v22.4s\n"
+ "zip2 v22.4s, v25.4s, v22.4s\n"
+ "zip1 v25.4s, v16.4s, v12.4s\n"
+ "zip2 v16.4s, v16.4s, v12.4s\n"
+ "ldr q12, [x23], #0x10\n"
+ ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
+ ".inst 0x4ea16a19 // bfcvtn2 v25.8h, v16.4s\n"
+ "zip1 v16.4s, v21.4s, v15.4s\n"
+ "zip2 v21.4s, v21.4s, v15.4s\n"
+ "ldr q15, [x22], #0x10\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v24.4s, v24.4s, v0.4s\n"
- "ldr q0, [x22], #0x10\n"
- ".inst 0x4ea16b10 // bfcvtn2 v16.8h, v24.4s\n"
- "ldr q24, [x20], #0x10\n"
- "str q2, [x21, #0x0]\n"
- "zip1 v2.4s, v1.4s, v0.4s\n"
- "zip2 v0.4s, v1.4s, v0.4s\n"
- "zip1 v1.4s, v28.4s, v24.4s\n"
- "zip2 v28.4s, v28.4s, v24.4s\n"
- "str q30, [x21, #0x10]\n"
- "zip1 v24.4s, v27.4s, v31.4s\n"
- "zip1 v30.4s, v22.4s, v8.4s\n"
- "str q3, [x21, #0x20]\n"
- "zip1 v3.4s, v6.4s, v20.4s\n"
- ".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n"
- "str q21, [x21, #0x30]\n"
- "zip1 v21.4s, v7.4s, v18.4s\n"
- "zip2 v31.4s, v27.4s, v31.4s\n"
- "str q17, [x21, #0x40]\n"
- "zip1 v17.4s, v5.4s, v10.4s\n"
- "zip1 v27.4s, v9.4s, v15.4s\n"
- "str q12, [x21, #0x50]\n"
- "zip1 v12.4s, v2.4s, v1.4s\n"
- ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "str q13, [x21, #0x60]\n"
- "zip1 v13.4s, v0.4s, v28.4s\n"
- "zip2 v22.4s, v22.4s, v8.4s\n"
- "str q25, [x21, #0x70]\n"
- ".inst 0x0ea16879 // bfcvtn v25.4h, v3.4s\n"
- "zip2 v8.4s, v6.4s, v20.4s\n"
- "str q26, [x21, #0x80]\n"
- ".inst 0x0ea16aa3 // bfcvtn v3.4h, v21.4s\n"
- "zip2 v18.4s, v7.4s, v18.4s\n"
- "str q29, [x21, #0x90]\n"
- ".inst 0x0ea16a27 // bfcvtn v7.4h, v17.4s\n"
- "zip2 v21.4s, v5.4s, v10.4s\n"
- "str q11, [x21, #0xa0]\n"
- ".inst 0x0ea16b65 // bfcvtn v5.4h, v27.4s\n"
- "zip2 v15.4s, v9.4s, v15.4s\n"
- "str q19, [x21, #0xb0]\n"
- ".inst 0x0ea16991 // bfcvtn v17.4h, v12.4s\n"
- "zip2 v20.4s, v2.4s, v1.4s\n"
- "str q14, [x21, #0xc0]\n"
- ".inst 0x0ea169bb // bfcvtn v27.4h, v13.4s\n"
- "zip2 v29.4s, v0.4s, v28.4s\n"
- "str q4, [x21, #0xd0]\n"
- ".inst 0x4ea16bf8 // bfcvtn2 v24.8h, v31.4s\n"
- ".inst 0x4ea16ade // bfcvtn2 v30.8h, v22.4s\n"
- "str q23, [x21, #0xe0]\n"
- ".inst 0x4ea16919 // bfcvtn2 v25.8h, v8.4s\n"
- ".inst 0x4ea16a43 // bfcvtn2 v3.8h, v18.4s\n"
- "str q16, [x21, #0xf0]\n"
- ".inst 0x4ea16aa7 // bfcvtn2 v7.8h, v21.4s\n"
- ".inst 0x4ea169e5 // bfcvtn2 v5.8h, v15.4s\n"
- "str q24, [x21, #0x100]\n"
- ".inst 0x4ea16a91 // bfcvtn2 v17.8h, v20.4s\n"
- ".inst 0x4ea16bbb // bfcvtn2 v27.8h, v29.4s\n"
- "str q30, [x21, #0x110]\n"
- "str q25, [x21, #0x120]\n"
- "str q3, [x21, #0x130]\n"
- "str q7, [x21, #0x140]\n"
- "str q5, [x21, #0x150]\n"
- "str q17, [x21, #0x160]\n"
- "str q27, [x21, #0x170]\n"
- "add x21, x21, %x[out_stride]\n"
+ ".inst 0x4ea16ab0 // bfcvtn2 v16.8h, v21.4s\n"
+ "zip1 v21.4s, v31.4s, v22.4s\n"
+ "zip2 v22.4s, v31.4s, v22.4s\n"
+ "ldr q31, [x21], #0x10\n"
+ ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
+ ".inst 0x4ea16ad5 // bfcvtn2 v21.8h, v22.4s\n"
+ "ldr q22, [x20], #0x10\n"
+ "str q13, [x27, #0x0]\n"
+ "zip1 v13.4s, v8.4s, v19.4s\n"
+ "zip2 v19.4s, v8.4s, v19.4s\n"
+ "str q7, [x27, #0x10]\n"
+ "zip1 v8.4s, v12.4s, v31.4s\n"
+ "zip2 v12.4s, v12.4s, v31.4s\n"
+ "str q10, [x27, #0x20]\n"
+ "zip1 v7.4s, v15.4s, v22.4s\n"
+ "zip2 v15.4s, v15.4s, v22.4s\n"
+ "str q14, [x27, #0x30]\n"
+ "zip1 v14.4s, v5.4s, v9.4s\n"
+ "zip1 v22.4s, v0.4s, v26.4s\n"
+ "str q20, [x27, #0x40]\n"
+ "zip1 v31.4s, v17.4s, v27.4s\n"
+ "zip1 v10.4s, v18.4s, v23.4s\n"
+ "str q2, [x27, #0x50]\n"
+ "zip1 v20.4s, v1.4s, v29.4s\n"
+ "zip1 v2.4s, v8.4s, v7.4s\n"
+ "str q30, [x27, #0x60]\n"
+ "zip1 v30.4s, v12.4s, v15.4s\n"
+ ".inst 0x0ea169ad // bfcvtn v13.4h, v13.4s\n"
+ "str q4, [x27, #0x70]\n"
+ ".inst 0x0ea169c4 // bfcvtn v4.4h, v14.4s\n"
+ "zip2 v9.4s, v5.4s, v9.4s\n"
+ "str q24, [x27, #0x80]\n"
+ ".inst 0x0ea16ad8 // bfcvtn v24.4h, v22.4s\n"
+ "zip2 v26.4s, v0.4s, v26.4s\n"
+ "str q6, [x27, #0x90]\n"
+ ".inst 0x0ea16bee // bfcvtn v14.4h, v31.4s\n"
+ "zip2 v5.4s, v17.4s, v27.4s\n"
+ "str q16, [x27, #0xa0]\n"
+ ".inst 0x0ea1695b // bfcvtn v27.4h, v10.4s\n"
+ "zip2 v17.4s, v18.4s, v23.4s\n"
+ "str q21, [x27, #0xb0]\n"
+ ".inst 0x0ea16a9f // bfcvtn v31.4h, v20.4s\n"
+ "zip2 v23.4s, v1.4s, v29.4s\n"
+ "str q3, [x27, #0xc0]\n"
+ ".inst 0x0ea16852 // bfcvtn v18.4h, v2.4s\n"
+ "zip2 v20.4s, v8.4s, v7.4s\n"
+ "str q11, [x27, #0xd0]\n"
+ ".inst 0x0ea16bd0 // bfcvtn v16.4h, v30.4s\n"
+ "zip2 v15.4s, v12.4s, v15.4s\n"
+ "str q28, [x27, #0xe0]\n"
+ ".inst 0x4ea16a6d // bfcvtn2 v13.8h, v19.4s\n"
+ ".inst 0x4ea16924 // bfcvtn2 v4.8h, v9.4s\n"
+ "str q25, [x27, #0xf0]\n"
+ ".inst 0x4ea16b58 // bfcvtn2 v24.8h, v26.4s\n"
+ ".inst 0x4ea168ae // bfcvtn2 v14.8h, v5.4s\n"
+ ".inst 0x4ea16a3b // bfcvtn2 v27.8h, v17.4s\n"
+ ".inst 0x4ea16aff // bfcvtn2 v31.8h, v23.4s\n"
+ ".inst 0x4ea16a92 // bfcvtn2 v18.8h, v20.4s\n"
+ ".inst 0x4ea169f0 // bfcvtn2 v16.8h, v15.4s\n"
+ "str q13, [x27, #0x100]\n"
+ "str q4, [x27, #0x110]\n"
+ "str q24, [x27, #0x120]\n"
+ "str q14, [x27, #0x130]\n"
+ "str q27, [x27, #0x140]\n"
+ "str q31, [x27, #0x150]\n"
+ "str q18, [x27, #0x160]\n"
+ "str q16, [x27, #0x170]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cbz x28, 10f\n"
+ "cmp x28, #0x10\n"
+ "movi v6.16b, #0x0\n"
+ "str q6, [x27, #0x0]\n"
+ "str q6, [x27, #0x10]\n"
+ "str q6, [x27, #0x20]\n"
+ "str q6, [x27, #0x30]\n"
+ "str q6, [x27, #0x40]\n"
+ "str q6, [x27, #0x50]\n"
+ "str q6, [x27, #0x60]\n"
+ "str q6, [x27, #0x70]\n"
+ "str q6, [x27, #0x80]\n"
+ "str q6, [x27, #0x90]\n"
+ "str q6, [x27, #0xa0]\n"
+ "str q6, [x27, #0xb0]\n"
+ "str q6, [x27, #0xc0]\n"
+ "str q6, [x27, #0xd0]\n"
+ "str q6, [x27, #0xe0]\n"
+ "str q6, [x27, #0xf0]\n"
+ "str q6, [x27, #0x100]\n"
+ "str q6, [x27, #0x110]\n"
+ "str q6, [x27, #0x120]\n"
+ "str q6, [x27, #0x130]\n"
+ "str q6, [x27, #0x140]\n"
+ "str q6, [x27, #0x150]\n"
+ "str q6, [x27, #0x160]\n"
+ "str q6, [x27, #0x170]\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q9, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q15, [x27], #0x10\n"
- "ldr q17, [x26], #0x10\n"
- "zip1 v14.4s, v9.4s, v15.4s\n"
- "zip1 v11.4s, v18.4s, v17.4s\n"
- "ldr q7, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip2 v12.4s, v9.4s, v15.4s\n"
- "zip2 v6.4s, v18.4s, v17.4s\n"
- "ldr q15, [x22], #0x10\n"
+ "ldr q19, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "sub x28, x28, #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q21, [x24], #0x10\n"
+ "cmp x28, #0x10\n"
+ "ldr q15, [x23], #0x10\n"
+ "ldr q20, [x22], #0x10\n"
+ "ldr q30, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "ldr q6, [x9], #0x10\n"
+ "ldr q10, [x26], #0x10\n"
+ "zip1 v4.4s, v19.4s, v18.4s\n"
+ "zip1 v17.4s, v24.4s, v21.4s\n"
+ "ldr q13, [x25], #0x10\n"
+ "ldr q31, [x24], #0x10\n"
+ "zip2 v29.4s, v19.4s, v18.4s\n"
+ "zip2 v26.4s, v24.4s, v21.4s\n"
+ "ldr q21, [x23], #0x10\n"
+ "ldr q25, [x22], #0x10\n"
+ "zip1 v18.4s, v15.4s, v30.4s\n"
+ "zip1 v7.4s, v20.4s, v16.4s\n"
+ "ldr q19, [x21], #0x10\n"
"ldr q3, [x20], #0x10\n"
- "zip1 v30.4s, v7.4s, v15.4s\n"
- "zip1 v20.4s, v16.4s, v3.4s\n"
- "ldr q17, [x9], #0x10\n"
- "ldr q9, [x28], #0x10\n"
- "zip2 v1.4s, v7.4s, v15.4s\n"
- "zip2 v24.4s, v16.4s, v3.4s\n"
- "ldr q10, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v0.4s, v17.4s, v10.4s\n"
- "zip1 v8.4s, v9.4s, v16.4s\n"
- "ldr q7, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "zip2 v17.4s, v17.4s, v10.4s\n"
- "zip2 v3.4s, v9.4s, v16.4s\n"
- "ldr q9, [x22], #0x10\n"
- "ldr q10, [x20], #0x10\n"
- "zip1 v25.4s, v7.4s, v9.4s\n"
- "zip1 v23.4s, v2.4s, v10.4s\n"
- "ldr q31, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v16.4s, v7.4s, v9.4s\n"
- "zip2 v27.4s, v2.4s, v10.4s\n"
- "ldr q26, [x27], #0x10\n"
- "ldr q19, [x26], #0x10\n"
- "zip1 v2.4s, v31.4s, v26.4s\n"
- "zip1 v7.4s, v21.4s, v19.4s\n"
- "ldr q29, [x25], #0x10\n"
- "ldr q13, [x23], #0x10\n"
- "zip2 v31.4s, v31.4s, v26.4s\n"
- "zip2 v19.4s, v21.4s, v19.4s\n"
- "ldr q4, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v26.4s, v29.4s, v4.4s\n"
- "zip1 v15.4s, v13.4s, v18.4s\n"
- "ldr q9, [x9], #0x10\n"
- "ldr q22, [x28], #0x10\n"
- "zip2 v4.4s, v29.4s, v4.4s\n"
- "zip2 v18.4s, v13.4s, v18.4s\n"
- "ldr q29, [x27], #0x10\n"
+ "zip2 v24.4s, v15.4s, v30.4s\n"
+ "zip2 v15.4s, v20.4s, v16.4s\n"
+ "ldr q27, [x9], #0x10\n"
+ "ldr q9, [x26], #0x10\n"
+ "zip1 v2.4s, v6.4s, v13.4s\n"
+ "zip1 v30.4s, v10.4s, v31.4s\n"
+ "ldr q0, [x25], #0x10\n"
+ "ldr q11, [x24], #0x10\n"
+ "zip2 v28.4s, v6.4s, v13.4s\n"
+ "zip2 v22.4s, v10.4s, v31.4s\n"
+ "ldr q8, [x23], #0x10\n"
+ "ldr q1, [x22], #0x10\n"
+ "zip1 v23.4s, v21.4s, v19.4s\n"
+ "zip1 v6.4s, v25.4s, v3.4s\n"
+ "ldr q31, [x21], #0x10\n"
+ "ldr q5, [x20], #0x10\n"
+ "zip2 v20.4s, v21.4s, v19.4s\n"
+ "zip2 v21.4s, v25.4s, v3.4s\n"
+ "ldr q16, [x9], #0x10\n"
"ldr q10, [x26], #0x10\n"
- "zip1 v21.4s, v9.4s, v29.4s\n"
- "zip1 v5.4s, v22.4s, v10.4s\n"
- "ldr q28, [x25], #0x10\n"
- "ldr q13, [x23], #0x10\n"
- "zip2 v29.4s, v9.4s, v29.4s\n"
- "zip2 v9.4s, v22.4s, v10.4s\n"
- "ldr q22, [x22], #0x10\n"
- "zip1 v10.4s, v28.4s, v22.4s\n"
- "zip2 v28.4s, v28.4s, v22.4s\n"
- "zip1 v22.4s, v14.4s, v11.4s\n"
- ".inst 0x0ea16ad6 // bfcvtn v22.4h, v22.4s\n"
- "zip2 v11.4s, v14.4s, v11.4s\n"
- "ldr q14, [x20], #0x10\n"
- ".inst 0x4ea16976 // bfcvtn2 v22.8h, v11.4s\n"
- "str q22, [x21, #0x0]\n"
- "zip1 v22.4s, v13.4s, v14.4s\n"
- "zip2 v14.4s, v13.4s, v14.4s\n"
- "zip1 v13.4s, v12.4s, v6.4s\n"
- "zip1 v11.4s, v0.4s, v8.4s\n"
- ".inst 0x0ea169ad // bfcvtn v13.4h, v13.4s\n"
- "zip2 v12.4s, v12.4s, v6.4s\n"
- "zip1 v6.4s, v17.4s, v3.4s\n"
- ".inst 0x0ea1696b // bfcvtn v11.4h, v11.4s\n"
- "zip2 v0.4s, v0.4s, v8.4s\n"
- "zip1 v8.4s, v2.4s, v7.4s\n"
- ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
- "zip2 v3.4s, v17.4s, v3.4s\n"
- "zip1 v17.4s, v31.4s, v19.4s\n"
+ "zip1 v14.4s, v27.4s, v0.4s\n"
+ "zip1 v3.4s, v9.4s, v11.4s\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q13, [x24], #0x10\n"
+ "zip2 v25.4s, v27.4s, v0.4s\n"
+ "zip2 v12.4s, v9.4s, v11.4s\n"
+ "ldr q11, [x23], #0x10\n"
+ "ldr q0, [x22], #0x10\n"
+ "zip1 v27.4s, v8.4s, v31.4s\n"
+ "zip1 v9.4s, v1.4s, v5.4s\n"
+ "zip2 v31.4s, v8.4s, v31.4s\n"
+ "ldr q8, [x21], #0x10\n"
+ "zip2 v5.4s, v1.4s, v5.4s\n"
+ "zip1 v1.4s, v16.4s, v19.4s\n"
+ "zip2 v19.4s, v16.4s, v19.4s\n"
+ "zip1 v16.4s, v10.4s, v13.4s\n"
+ "zip2 v13.4s, v10.4s, v13.4s\n"
+ "zip1 v10.4s, v11.4s, v8.4s\n"
+ "zip2 v11.4s, v11.4s, v8.4s\n"
+ "zip1 v8.4s, v4.4s, v17.4s\n"
+ "zip2 v17.4s, v4.4s, v17.4s\n"
+ "ldr q4, [x20], #0x10\n"
".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v2.4s, v2.4s, v7.4s\n"
- "zip1 v7.4s, v21.4s, v5.4s\n"
+ ".inst 0x4ea16a28 // bfcvtn2 v8.8h, v17.4s\n"
+ "zip1 v17.4s, v29.4s, v26.4s\n"
+ "zip2 v29.4s, v29.4s, v26.4s\n"
+ "zip1 v26.4s, v0.4s, v4.4s\n"
+ "zip2 v0.4s, v0.4s, v4.4s\n"
+ "zip1 v4.4s, v2.4s, v30.4s\n"
+ "zip2 v2.4s, v2.4s, v30.4s\n"
+ "str q8, [x27, #0x0]\n"
+ "zip1 v8.4s, v28.4s, v22.4s\n"
+ "zip1 v30.4s, v14.4s, v3.4s\n"
".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
- "zip2 v31.4s, v31.4s, v19.4s\n"
- "zip1 v19.4s, v29.4s, v9.4s\n"
- ".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
- "zip2 v21.4s, v21.4s, v5.4s\n"
- "zip1 v5.4s, v30.4s, v20.4s\n"
- ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
- "zip2 v29.4s, v29.4s, v9.4s\n"
- "zip1 v9.4s, v1.4s, v24.4s\n"
- ".inst 0x0ea168a5 // bfcvtn v5.4h, v5.4s\n"
- "zip2 v20.4s, v30.4s, v20.4s\n"
- "zip1 v30.4s, v25.4s, v23.4s\n"
- ".inst 0x0ea16929 // bfcvtn v9.4h, v9.4s\n"
- "zip2 v1.4s, v1.4s, v24.4s\n"
- "zip1 v24.4s, v16.4s, v27.4s\n"
+ "zip2 v22.4s, v28.4s, v22.4s\n"
+ "zip1 v28.4s, v25.4s, v12.4s\n"
+ ".inst 0x0ea16884 // bfcvtn v4.4h, v4.4s\n"
+ ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
- "zip2 v23.4s, v25.4s, v23.4s\n"
- "zip1 v25.4s, v26.4s, v15.4s\n"
+ "zip2 v14.4s, v14.4s, v3.4s\n"
+ "zip1 v3.4s, v1.4s, v16.4s\n"
+ ".inst 0x0ea16b9c // bfcvtn v28.4h, v28.4s\n"
+ "zip2 v25.4s, v25.4s, v12.4s\n"
+ "zip1 v12.4s, v19.4s, v13.4s\n"
+ "zip2 v16.4s, v1.4s, v16.4s\n"
+ "zip1 v1.4s, v18.4s, v7.4s\n"
+ ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
+ "zip2 v13.4s, v19.4s, v13.4s\n"
+ "zip1 v19.4s, v24.4s, v15.4s\n"
+ ".inst 0x0ea1698c // bfcvtn v12.4h, v12.4s\n"
+ "zip2 v7.4s, v18.4s, v7.4s\n"
+ "zip1 v18.4s, v23.4s, v6.4s\n"
+ ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
+ ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
+ "zip2 v15.4s, v24.4s, v15.4s\n"
+ "zip1 v24.4s, v20.4s, v21.4s\n"
+ "zip2 v23.4s, v23.4s, v6.4s\n"
+ "zip1 v6.4s, v27.4s, v9.4s\n"
+ ".inst 0x0ea16a52 // bfcvtn v18.4h, v18.4s\n"
+ "zip2 v21.4s, v20.4s, v21.4s\n"
+ "zip1 v20.4s, v31.4s, v5.4s\n"
".inst 0x0ea16b18 // bfcvtn v24.4h, v24.4s\n"
- "zip2 v27.4s, v16.4s, v27.4s\n"
- "zip1 v16.4s, v4.4s, v18.4s\n"
- ".inst 0x0ea16b39 // bfcvtn v25.4h, v25.4s\n"
- "zip2 v15.4s, v26.4s, v15.4s\n"
- "zip1 v26.4s, v10.4s, v22.4s\n"
- ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "zip2 v18.4s, v4.4s, v18.4s\n"
- "zip1 v4.4s, v28.4s, v14.4s\n"
- ".inst 0x0ea16b5a // bfcvtn v26.4h, v26.4s\n"
- "zip2 v10.4s, v10.4s, v22.4s\n"
- ".inst 0x0ea16896 // bfcvtn v22.4h, v4.4s\n"
- "zip2 v4.4s, v28.4s, v14.4s\n"
- ".inst 0x4ea1698d // bfcvtn2 v13.8h, v12.4s\n"
- "str q13, [x21, #0x10]\n"
- ".inst 0x4ea1680b // bfcvtn2 v11.8h, v0.4s\n"
- ".inst 0x4ea16866 // bfcvtn2 v6.8h, v3.4s\n"
- "str q11, [x21, #0x20]\n"
- ".inst 0x4ea16848 // bfcvtn2 v8.8h, v2.4s\n"
- ".inst 0x4ea16bf1 // bfcvtn2 v17.8h, v31.4s\n"
- "str q6, [x21, #0x30]\n"
- ".inst 0x4ea16aa7 // bfcvtn2 v7.8h, v21.4s\n"
- ".inst 0x4ea16bb3 // bfcvtn2 v19.8h, v29.4s\n"
- "str q8, [x21, #0x40]\n"
- ".inst 0x4ea16a85 // bfcvtn2 v5.8h, v20.4s\n"
- ".inst 0x4ea16829 // bfcvtn2 v9.8h, v1.4s\n"
- "str q17, [x21, #0x50]\n"
- ".inst 0x4ea16afe // bfcvtn2 v30.8h, v23.4s\n"
- ".inst 0x4ea16b78 // bfcvtn2 v24.8h, v27.4s\n"
- "str q7, [x21, #0x60]\n"
- ".inst 0x4ea169f9 // bfcvtn2 v25.8h, v15.4s\n"
- ".inst 0x4ea16a50 // bfcvtn2 v16.8h, v18.4s\n"
- "str q19, [x21, #0x70]\n"
- ".inst 0x4ea1695a // bfcvtn2 v26.8h, v10.4s\n"
- ".inst 0x4ea16896 // bfcvtn2 v22.8h, v4.4s\n"
- "str q5, [x21, #0xc0]\n"
- "str q9, [x21, #0xd0]\n"
- "str q30, [x21, #0xe0]\n"
- "str q24, [x21, #0xf0]\n"
- "str q25, [x21, #0x100]\n"
- "str q16, [x21, #0x110]\n"
- "str q26, [x21, #0x120]\n"
- "str q22, [x21, #0x130]\n"
- "add x21, x21, #0x80\n"
+ "zip2 v27.4s, v27.4s, v9.4s\n"
+ "zip1 v9.4s, v10.4s, v26.4s\n"
+ ".inst 0x0ea168c6 // bfcvtn v6.4h, v6.4s\n"
+ ".inst 0x0ea16a94 // bfcvtn v20.4h, v20.4s\n"
+ "zip2 v31.4s, v31.4s, v5.4s\n"
+ "zip1 v5.4s, v11.4s, v0.4s\n"
+ "zip2 v26.4s, v10.4s, v26.4s\n"
+ ".inst 0x0ea1692a // bfcvtn v10.4h, v9.4s\n"
+ "zip2 v0.4s, v11.4s, v0.4s\n"
+ ".inst 0x4ea16bb1 // bfcvtn2 v17.8h, v29.4s\n"
+ ".inst 0x4ea16844 // bfcvtn2 v4.8h, v2.4s\n"
+ ".inst 0x0ea168a9 // bfcvtn v9.4h, v5.4s\n"
+ ".inst 0x4ea16ac8 // bfcvtn2 v8.8h, v22.4s\n"
+ ".inst 0x4ea169de // bfcvtn2 v30.8h, v14.4s\n"
+ ".inst 0x4ea16b3c // bfcvtn2 v28.8h, v25.4s\n"
+ ".inst 0x4ea16a03 // bfcvtn2 v3.8h, v16.4s\n"
+ ".inst 0x4ea169ac // bfcvtn2 v12.8h, v13.4s\n"
+ "str q17, [x27, #0x10]\n"
+ ".inst 0x4ea168e1 // bfcvtn2 v1.8h, v7.4s\n"
+ ".inst 0x4ea169f3 // bfcvtn2 v19.8h, v15.4s\n"
+ "str q4, [x27, #0x20]\n"
+ ".inst 0x4ea16af2 // bfcvtn2 v18.8h, v23.4s\n"
+ ".inst 0x4ea16ab8 // bfcvtn2 v24.8h, v21.4s\n"
+ "str q8, [x27, #0x30]\n"
+ ".inst 0x4ea16b66 // bfcvtn2 v6.8h, v27.4s\n"
+ ".inst 0x4ea16bf4 // bfcvtn2 v20.8h, v31.4s\n"
+ "str q30, [x27, #0x40]\n"
+ ".inst 0x4ea16b4a // bfcvtn2 v10.8h, v26.4s\n"
+ ".inst 0x4ea16809 // bfcvtn2 v9.8h, v0.4s\n"
+ "str q28, [x27, #0x50]\n"
+ "str q3, [x27, #0x60]\n"
+ "str q12, [x27, #0x70]\n"
+ "str q1, [x27, #0xc0]\n"
+ "str q19, [x27, #0xd0]\n"
+ "str q18, [x27, #0xe0]\n"
+ "str q24, [x27, #0xf0]\n"
+ "str q6, [x27, #0x100]\n"
+ "str q20, [x27, #0x110]\n"
+ "str q10, [x27, #0x120]\n"
+ "str q9, [x27, #0x130]\n"
+ "add x27, x27, #0x80\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x28, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr q23, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v23.4s, v17.4s\n"
- "zip1 v21.4s, v20.4s, v16.4s\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.4s, v23.4s, v17.4s\n"
- "zip2 v20.4s, v20.4s, v16.4s\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "zip1 v27.4s, v19.4s, v17.4s\n"
- "zip1 v26.4s, v18.4s, v16.4s\n"
- "zip2 v25.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "zip1 v19.4s, v22.4s, v21.4s\n"
- "zip1 v18.4s, v28.4s, v20.4s\n"
+ "ldr q25, [x9], #0x10\n"
+ "ldr q24, [x26], #0x10\n"
+ "sub x28, x28, #0x4\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "cmp x28, #0x4\n"
+ "ldr q23, [x23], #0x10\n"
+ "ldr q19, [x22], #0x10\n"
+ "ldr q18, [x21], #0x10\n"
+ "ldr q17, [x20], #0x10\n"
+ "zip1 v22.4s, v25.4s, v21.4s\n"
+ "zip1 v16.4s, v24.4s, v20.4s\n"
+ "zip2 v21.4s, v25.4s, v21.4s\n"
+ "zip2 v20.4s, v24.4s, v20.4s\n"
+ "zip1 v27.4s, v23.4s, v18.4s\n"
+ "zip1 v26.4s, v19.4s, v17.4s\n"
+ "zip2 v25.4s, v23.4s, v18.4s\n"
+ "zip2 v24.4s, v19.4s, v17.4s\n"
+ "zip1 v19.4s, v22.4s, v16.4s\n"
+ "zip1 v18.4s, v21.4s, v20.4s\n"
"zip1 v17.4s, v27.4s, v26.4s\n"
+ "zip2 v23.4s, v22.4s, v16.4s\n"
"zip1 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x0ea16a77 // bfcvtn v23.4h, v19.4s\n"
- "zip2 v22.4s, v22.4s, v21.4s\n"
- ".inst 0x0ea16a55 // bfcvtn v21.4h, v18.4s\n"
- "zip2 v20.4s, v28.4s, v20.4s\n"
+ "zip2 v22.4s, v21.4s, v20.4s\n"
+ ".inst 0x0ea16a75 // bfcvtn v21.4h, v19.4s\n"
+ ".inst 0x0ea16a54 // bfcvtn v20.4h, v18.4s\n"
".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
"zip2 v18.4s, v27.4s, v26.4s\n"
".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
"zip2 v16.4s, v25.4s, v24.4s\n"
- ".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
- ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q23, [x21, #0x0]\n"
+ ".inst 0x4ea16af5 // bfcvtn2 v21.8h, v23.4s\n"
+ ".inst 0x4ea16ad4 // bfcvtn2 v20.8h, v22.4s\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- "str q19, [x21, #0xc0]\n"
- "str q17, [x21, #0xd0]\n"
- "add x21, x21, #0x20\n"
+ "str q21, [x27, #0x0]\n"
+ "str q20, [x27, #0x10]\n"
+ "str q19, [x27, #0xc0]\n"
+ "str q17, [x27, #0xd0]\n"
+ "add x27, x27, #0x20\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x28, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
+ "ldr s23, [x9], #0x4\n"
+ "ldr s22, [x26], #0x4\n"
+ "sub x28, x28, #0x1\n"
+ "ldr s19, [x25], #0x4\n"
+ "ldr s17, [x24], #0x4\n"
+ "cmp x28, #0x1\n"
+ "ldr s21, [x23], #0x4\n"
+ "ldr s20, [x22], #0x4\n"
+ "ldr s18, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "zip1 v19.4s, v23.4s, v19.4s\n"
+ "zip1 v17.4s, v22.4s, v17.4s\n"
+ "zip1 v18.4s, v21.4s, v18.4s\n"
+ "zip1 v16.4s, v20.4s, v16.4s\n"
"zip1 v17.4s, v19.4s, v17.4s\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
- "ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v16.4s, v17.4s, v16.4s\n"
- ".inst 0x0ea16a12 // bfcvtn v18.4h, v16.4s\n"
- "ldr s17, [x22], #0x4\n"
- "ldr s16, [x20], #0x4\n"
- "zip1 v17.4s, v20.4s, v17.4s\n"
- "zip1 v16.4s, v19.4s, v16.4s\n"
- "zip1 v16.4s, v17.4s, v16.4s\n"
+ ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d18, [x21, #0x0]\n"
- "str d16, [x21, #0xc0]\n"
- "add x21, x21, #0x8\n"
+ "str d17, [x27, #0x0]\n"
+ "str d16, [x27, #0xc0]\n"
+ "add x27, x27, #0x8\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x8\n"
"add %x[out], %x[out], #0x180\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x9, %x[in]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x26, x27, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x26, %x[in_stride]\n"
- "csel x26, x26, %x[pad_row], GT\n"
- "csel x27, x27, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x9, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "csel x25, x25, %x[pad_row], GE\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x28, x28, %x[pad_row], GT\n"
- "cmp x20, #0x18\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: Column loop
- "ldr q20, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x20, #0x18\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Column loop
+ "ldr q24, [x9], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
"sub x20, x20, #0x18\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x18\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v9.4s, v20.4s, v17.4s\n"
- "zip1 v30.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q19, [x28], #0x10\n"
- "zip2 v17.4s, v20.4s, v17.4s\n"
- "zip2 v5.4s, v18.4s, v16.4s\n"
- "ldr q18, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v0.4s, v21.4s, v18.4s\n"
- "zip1 v3.4s, v19.4s, v16.4s\n"
- "ldr q23, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v1.4s, v21.4s, v18.4s\n"
- "zip2 v16.4s, v19.4s, v16.4s\n"
- "ldr q19, [x27], #0x10\n"
- "ldr q18, [x26], #0x10\n"
- "zip1 v4.4s, v23.4s, v19.4s\n"
- "zip1 v2.4s, v20.4s, v18.4s\n"
- "ldr q22, [x9], #0x10\n"
- "ldr q21, [x28], #0x10\n"
- "zip2 v27.4s, v23.4s, v19.4s\n"
- "zip2 v28.4s, v20.4s, v18.4s\n"
- "ldr q20, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v25.4s, v22.4s, v20.4s\n"
- "zip1 v26.4s, v21.4s, v24.4s\n"
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v14.4s, v22.4s, v20.4s\n"
- "zip2 v12.4s, v21.4s, v24.4s\n"
- "ldr q31, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v15.4s, v19.4s, v31.4s\n"
- "zip1 v13.4s, v18.4s, v24.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q11, [x28], #0x10\n"
- "zip2 v20.4s, v19.4s, v31.4s\n"
- "zip2 v10.4s, v18.4s, v24.4s\n"
- "ldr q22, [x27], #0x10\n"
+ "ldr q26, [x9], #0x10\n"
"ldr q23, [x26], #0x10\n"
- "zip1 v19.4s, v21.4s, v22.4s\n"
- "zip1 v18.4s, v11.4s, v23.4s\n"
- "zip2 v6.4s, v21.4s, v22.4s\n"
- "zip2 v11.4s, v11.4s, v23.4s\n"
- "zip1 v8.4s, v9.4s, v30.4s\n"
- "zip1 v21.4s, v17.4s, v5.4s\n"
- "zip1 v7.4s, v0.4s, v3.4s\n"
- "zip1 v31.4s, v1.4s, v16.4s\n"
- "zip1 v29.4s, v4.4s, v2.4s\n"
- "zip1 v22.4s, v27.4s, v28.4s\n"
- "zip1 v24.4s, v25.4s, v26.4s\n"
- "zip1 v23.4s, v14.4s, v12.4s\n"
- ".inst 0x0ea16908 // bfcvtn v8.4h, v8.4s\n"
- "zip2 v9.4s, v9.4s, v30.4s\n"
- "zip1 v30.4s, v15.4s, v13.4s\n"
- ".inst 0x0ea16ab5 // bfcvtn v21.4h, v21.4s\n"
- "zip2 v5.4s, v17.4s, v5.4s\n"
- "zip1 v17.4s, v20.4s, v10.4s\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q25, [x9], #0x10\n"
+ "zip1 v6.4s, v24.4s, v19.4s\n"
+ "zip1 v11.4s, v22.4s, v17.4s\n"
+ "ldr q21, [x26], #0x10\n"
+ "ldr q20, [x25], #0x10\n"
+ "zip2 v2.4s, v24.4s, v19.4s\n"
+ "zip2 v22.4s, v22.4s, v17.4s\n"
+ "ldr q17, [x24], #0x10\n"
+ "ldr q7, [x9], #0x10\n"
+ "zip1 v19.4s, v26.4s, v18.4s\n"
+ "zip1 v5.4s, v23.4s, v16.4s\n"
+ "ldr q3, [x26], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "zip2 v0.4s, v26.4s, v18.4s\n"
+ "zip2 v4.4s, v23.4s, v16.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q1, [x9], #0x10\n"
+ "zip1 v31.4s, v25.4s, v20.4s\n"
+ "zip1 v23.4s, v21.4s, v17.4s\n"
+ "ldr q30, [x26], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip2 v29.4s, v25.4s, v20.4s\n"
+ "zip2 v28.4s, v21.4s, v17.4s\n"
+ "ldr q25, [x24], #0x10\n"
+ "ldr q20, [x9], #0x10\n"
+ "zip1 v21.4s, v7.4s, v24.4s\n"
+ "zip1 v26.4s, v3.4s, v16.4s\n"
+ "ldr q17, [x26], #0x10\n"
+ "ldr q27, [x25], #0x10\n"
+ "zip2 v24.4s, v7.4s, v24.4s\n"
+ "zip2 v14.4s, v3.4s, v16.4s\n"
+ "ldr q10, [x24], #0x10\n"
+ "zip1 v16.4s, v1.4s, v18.4s\n"
+ "zip1 v13.4s, v30.4s, v25.4s\n"
+ "zip2 v18.4s, v1.4s, v18.4s\n"
+ "zip2 v12.4s, v30.4s, v25.4s\n"
+ "zip1 v15.4s, v20.4s, v27.4s\n"
+ "zip1 v9.4s, v17.4s, v10.4s\n"
+ "zip2 v8.4s, v20.4s, v27.4s\n"
+ "zip2 v10.4s, v17.4s, v10.4s\n"
+ "zip1 v17.4s, v6.4s, v11.4s\n"
+ "zip1 v7.4s, v2.4s, v22.4s\n"
+ "zip1 v3.4s, v19.4s, v5.4s\n"
+ "zip1 v1.4s, v0.4s, v4.4s\n"
+ "zip1 v30.4s, v31.4s, v23.4s\n"
+ "zip1 v25.4s, v29.4s, v28.4s\n"
+ "zip1 v27.4s, v21.4s, v26.4s\n"
+ "zip1 v20.4s, v24.4s, v14.4s\n"
+ ".inst 0x0ea16a31 // bfcvtn v17.4h, v17.4s\n"
+ "zip2 v6.4s, v6.4s, v11.4s\n"
+ "zip1 v11.4s, v16.4s, v13.4s\n"
".inst 0x0ea168e7 // bfcvtn v7.4h, v7.4s\n"
- "zip2 v0.4s, v0.4s, v3.4s\n"
- "zip1 v3.4s, v19.4s, v18.4s\n"
- ".inst 0x0ea16bff // bfcvtn v31.4h, v31.4s\n"
- "zip2 v16.4s, v1.4s, v16.4s\n"
- "zip1 v1.4s, v6.4s, v11.4s\n"
- ".inst 0x0ea16bbd // bfcvtn v29.4h, v29.4s\n"
- "zip2 v2.4s, v4.4s, v2.4s\n"
- ".inst 0x0ea16ac4 // bfcvtn v4.4h, v22.4s\n"
- "zip2 v27.4s, v27.4s, v28.4s\n"
- ".inst 0x0ea16b1c // bfcvtn v28.4h, v24.4s\n"
- "zip2 v25.4s, v25.4s, v26.4s\n"
- ".inst 0x0ea16afa // bfcvtn v26.4h, v23.4s\n"
- "zip2 v14.4s, v14.4s, v12.4s\n"
- ".inst 0x0ea16bd8 // bfcvtn v24.4h, v30.4s\n"
- "zip2 v13.4s, v15.4s, v13.4s\n"
- ".inst 0x0ea16a2f // bfcvtn v15.4h, v17.4s\n"
- "zip2 v12.4s, v20.4s, v10.4s\n"
- ".inst 0x0ea16874 // bfcvtn v20.4h, v3.4s\n"
- "zip2 v10.4s, v19.4s, v18.4s\n"
- ".inst 0x0ea16831 // bfcvtn v17.4h, v1.4s\n"
- "zip2 v18.4s, v6.4s, v11.4s\n"
- ".inst 0x4ea16928 // bfcvtn2 v8.8h, v9.4s\n"
- ".inst 0x4ea168b5 // bfcvtn2 v21.8h, v5.4s\n"
- "str q8, [x21, #0x0]\n"
- ".inst 0x4ea16807 // bfcvtn2 v7.8h, v0.4s\n"
- ".inst 0x4ea16a1f // bfcvtn2 v31.8h, v16.4s\n"
- "str q21, [x21, #0x10]\n"
- ".inst 0x4ea1685d // bfcvtn2 v29.8h, v2.4s\n"
- ".inst 0x4ea16b64 // bfcvtn2 v4.8h, v27.4s\n"
- "str q7, [x21, #0x20]\n"
- ".inst 0x4ea16b3c // bfcvtn2 v28.8h, v25.4s\n"
- ".inst 0x4ea169da // bfcvtn2 v26.8h, v14.4s\n"
- "str q31, [x21, #0x30]\n"
- ".inst 0x4ea169b8 // bfcvtn2 v24.8h, v13.4s\n"
- ".inst 0x4ea1698f // bfcvtn2 v15.8h, v12.4s\n"
- "str q29, [x21, #0x40]\n"
- ".inst 0x4ea16954 // bfcvtn2 v20.8h, v10.4s\n"
- ".inst 0x4ea16a51 // bfcvtn2 v17.8h, v18.4s\n"
- "str q4, [x21, #0x50]\n"
- "str q28, [x21, #0x60]\n"
- "str q26, [x21, #0x70]\n"
- "str q24, [x21, #0x80]\n"
- "str q15, [x21, #0x90]\n"
- "str q20, [x21, #0xa0]\n"
- "str q17, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Column loop skip
+ "zip2 v22.4s, v2.4s, v22.4s\n"
+ "zip1 v2.4s, v18.4s, v12.4s\n"
+ ".inst 0x0ea16863 // bfcvtn v3.4h, v3.4s\n"
+ "zip2 v5.4s, v19.4s, v5.4s\n"
+ "zip1 v19.4s, v15.4s, v9.4s\n"
+ ".inst 0x0ea16821 // bfcvtn v1.4h, v1.4s\n"
+ "zip2 v4.4s, v0.4s, v4.4s\n"
+ "zip1 v0.4s, v8.4s, v10.4s\n"
+ ".inst 0x0ea16bde // bfcvtn v30.4h, v30.4s\n"
+ "zip2 v31.4s, v31.4s, v23.4s\n"
+ ".inst 0x0ea16b37 // bfcvtn v23.4h, v25.4s\n"
+ "zip2 v29.4s, v29.4s, v28.4s\n"
+ ".inst 0x0ea16b7c // bfcvtn v28.4h, v27.4s\n"
+ "zip2 v27.4s, v21.4s, v26.4s\n"
+ ".inst 0x0ea16a9a // bfcvtn v26.4h, v20.4s\n"
+ "zip2 v25.4s, v24.4s, v14.4s\n"
+ ".inst 0x0ea16978 // bfcvtn v24.4h, v11.4s\n"
+ "zip2 v14.4s, v16.4s, v13.4s\n"
+ ".inst 0x0ea16855 // bfcvtn v21.4h, v2.4s\n"
+ "zip2 v20.4s, v18.4s, v12.4s\n"
+ ".inst 0x0ea16a73 // bfcvtn v19.4h, v19.4s\n"
+ "zip2 v18.4s, v15.4s, v9.4s\n"
+ ".inst 0x0ea1680b // bfcvtn v11.4h, v0.4s\n"
+ "zip2 v16.4s, v8.4s, v10.4s\n"
+ ".inst 0x4ea168d1 // bfcvtn2 v17.8h, v6.4s\n"
+ ".inst 0x4ea16ac7 // bfcvtn2 v7.8h, v22.4s\n"
+ ".inst 0x4ea168a3 // bfcvtn2 v3.8h, v5.4s\n"
+ ".inst 0x4ea16881 // bfcvtn2 v1.8h, v4.4s\n"
+ ".inst 0x4ea16bfe // bfcvtn2 v30.8h, v31.4s\n"
+ ".inst 0x4ea16bb7 // bfcvtn2 v23.8h, v29.4s\n"
+ ".inst 0x4ea16b7c // bfcvtn2 v28.8h, v27.4s\n"
+ ".inst 0x4ea16b3a // bfcvtn2 v26.8h, v25.4s\n"
+ "str q17, [x27, #0x0]\n"
+ ".inst 0x4ea169d8 // bfcvtn2 v24.8h, v14.4s\n"
+ ".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
+ "str q7, [x27, #0x10]\n"
+ ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
+ ".inst 0x4ea16a0b // bfcvtn2 v11.8h, v16.4s\n"
+ "str q3, [x27, #0x20]\n"
+ "str q1, [x27, #0x30]\n"
+ "str q30, [x27, #0x40]\n"
+ "str q23, [x27, #0x50]\n"
+ "str q28, [x27, #0x60]\n"
+ "str q26, [x27, #0x70]\n"
+ "str q24, [x27, #0x80]\n"
+ "str q21, [x27, #0x90]\n"
+ "str q19, [x27, #0xa0]\n"
+ "str q11, [x27, #0xb0]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x10\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 16 loop: loop
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "str q16, [x27, #0x20]\n"
+ "str q16, [x27, #0x30]\n"
+ "str q16, [x27, #0x40]\n"
+ "str q16, [x27, #0x50]\n"
+ "str q16, [x27, #0x60]\n"
+ "str q16, [x27, #0x70]\n"
+ "str q16, [x27, #0x80]\n"
+ "str q16, [x27, #0x90]\n"
+ "str q16, [x27, #0xa0]\n"
+ "str q16, [x27, #0xb0]\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: width 16 loop: loop
+ "ldr q20, [x9], #0x10\n"
+ "ldr q19, [x26], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x10\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v30.4s, v21.4s, v17.4s\n"
- "zip1 v29.4s, v20.4s, v16.4s\n"
- "ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v28.4s, v21.4s, v17.4s\n"
- "zip2 v27.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v26.4s, v19.4s, v17.4s\n"
- "zip1 v25.4s, v18.4s, v16.4s\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v8.4s, v19.4s, v17.4s\n"
- "zip2 v24.4s, v18.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v7.4s, v21.4s, v17.4s\n"
- "zip1 v6.4s, v20.4s, v16.4s\n"
+ "ldr q0, [x9], #0x10\n"
+ "ldr q31, [x26], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "ldr q16, [x24], #0x10\n"
+ "ldr q23, [x9], #0x10\n"
+ "zip1 v30.4s, v20.4s, v18.4s\n"
+ "zip1 v29.4s, v19.4s, v17.4s\n"
+ "ldr q22, [x26], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "zip2 v28.4s, v20.4s, v18.4s\n"
+ "zip2 v27.4s, v19.4s, v17.4s\n"
+ "ldr q20, [x24], #0x10\n"
"ldr q19, [x9], #0x10\n"
- "ldr q18, [x28], #0x10\n"
- "zip2 v5.4s, v21.4s, v17.4s\n"
- "zip2 v4.4s, v20.4s, v16.4s\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
+ "zip1 v26.4s, v0.4s, v24.4s\n"
+ "zip1 v25.4s, v31.4s, v16.4s\n"
+ "ldr q18, [x26], #0x10\n"
+ "ldr q17, [x25], #0x10\n"
+ "zip2 v8.4s, v0.4s, v24.4s\n"
+ "zip2 v24.4s, v31.4s, v16.4s\n"
+ "ldr q16, [x24], #0x10\n"
+ "zip1 v7.4s, v23.4s, v21.4s\n"
+ "zip1 v6.4s, v22.4s, v20.4s\n"
+ "zip2 v5.4s, v23.4s, v21.4s\n"
+ "zip2 v4.4s, v22.4s, v20.4s\n"
"zip1 v3.4s, v19.4s, v17.4s\n"
"zip1 v2.4s, v18.4s, v16.4s\n"
"zip2 v1.4s, v19.4s, v17.4s\n"
@@ -698,70 +739,71 @@ void a64_transpose_interleave_24_2x4_fp32bf16(bfloat16 *out, const float *in, si
"zip2 v16.4s, v1.4s, v0.4s\n"
".inst 0x4ea16bdf // bfcvtn2 v31.8h, v30.4s\n"
".inst 0x4ea16b9d // bfcvtn2 v29.8h, v28.4s\n"
- "str q31, [x21, #0x0]\n"
".inst 0x4ea16b5b // bfcvtn2 v27.8h, v26.4s\n"
".inst 0x4ea16b19 // bfcvtn2 v25.8h, v24.4s\n"
- "str q29, [x21, #0x10]\n"
".inst 0x4ea16ad7 // bfcvtn2 v23.8h, v22.4s\n"
".inst 0x4ea16a95 // bfcvtn2 v21.8h, v20.4s\n"
- "str q27, [x21, #0x20]\n"
".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q25, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q21, [x21, #0x50]\n"
- "str q19, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "add x21, x21, #0x80\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 16 loop: skip
+ "str q31, [x27, #0x0]\n"
+ "str q29, [x27, #0x10]\n"
+ "str q27, [x27, #0x20]\n"
+ "str q25, [x27, #0x30]\n"
+ "str q23, [x27, #0x40]\n"
+ "str q21, [x27, #0x50]\n"
+ "str q19, [x27, #0x60]\n"
+ "str q17, [x27, #0x70]\n"
+ "add x27, x27, #0x80\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 16 loop: skip
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
- "ldr q20, [x9], #0x10\n"
- "ldr q19, [x28], #0x10\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
+ "ldr q21, [x9], #0x10\n"
+ "ldr q20, [x26], #0x10\n"
"sub x20, x20, #0x4\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q17, [x24], #0x10\n"
"cmp x20, #0x4\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.4s, v20.4s, v17.4s\n"
- "zip1 v18.4s, v19.4s, v16.4s\n"
- "zip2 v21.4s, v20.4s, v17.4s\n"
- "zip2 v20.4s, v19.4s, v16.4s\n"
- "zip1 v17.4s, v22.4s, v18.4s\n"
+ "zip1 v18.4s, v21.4s, v19.4s\n"
+ "zip1 v16.4s, v20.4s, v17.4s\n"
+ "zip2 v21.4s, v21.4s, v19.4s\n"
+ "zip2 v20.4s, v20.4s, v17.4s\n"
+ "zip1 v17.4s, v18.4s, v16.4s\n"
+ "zip2 v19.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x0ea16a33 // bfcvtn v19.4h, v17.4s\n"
- "zip2 v18.4s, v22.4s, v18.4s\n"
- ".inst 0x0ea16a11 // bfcvtn v17.4h, v16.4s\n"
- "zip2 v16.4s, v21.4s, v20.4s\n"
- ".inst 0x4ea16a53 // bfcvtn2 v19.8h, v18.4s\n"
- ".inst 0x4ea16a11 // bfcvtn2 v17.8h, v16.4s\n"
- "str q19, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ ".inst 0x0ea16a32 // bfcvtn v18.4h, v17.4s\n"
+ "zip2 v17.4s, v21.4s, v20.4s\n"
+ ".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
+ ".inst 0x4ea16a72 // bfcvtn2 v18.8h, v19.4s\n"
+ ".inst 0x4ea16a30 // bfcvtn2 v16.8h, v17.4s\n"
+ "str q18, [x27, #0x0]\n"
+ "str q16, [x27, #0x10]\n"
+ "add x27, x27, #0x20\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
+ "ldr s18, [x26], #0x4\n"
"sub x20, x20, #0x1\n"
+ "ldr s17, [x25], #0x4\n"
+ "ldr s16, [x24], #0x4\n"
"cmp x20, #0x1\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
"zip1 v17.4s, v19.4s, v17.4s\n"
"zip1 v16.4s, v18.4s, v16.4s\n"
"zip1 v16.4s, v17.4s, v16.4s\n"
".inst 0x0ea16a10 // bfcvtn v16.4h, v16.4s\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str d16, [x27, #0x0]\n"
+ "add x27, x27, #0x8\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0xc0\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp
index dcaf69d2a8..94c442b772 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_bf16fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,245 +34,266 @@ void a64_transpose_interleave_24_bf16fp32(float *out, const bfloat16 *in, size_t
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x18\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "sub x24, x24, #0x18\n"
- "shll v26.4s, v18.4h, #0x10\n"
- "ldr q16, [x22], #0x10\n"
- "ldr q25, [x20], #0x10\n"
- "shll2 v24.4s, v18.8h, #0x10\n"
- "shll v5.4s, v17.4h, #0x10\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "shll v21.4s, v23.4h, #0x10\n"
- "shll2 v4.4s, v17.8h, #0x10\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "shll v2.4s, v22.4h, #0x10\n"
- "shll v1.4s, v16.4h, #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "shll2 v0.4s, v16.8h, #0x10\n"
- "shll v31.4s, v20.4h, #0x10\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "shll v30.4s, v25.4h, #0x10\n"
- "shll2 v29.4s, v25.8h, #0x10\n"
- "shll v28.4s, v3.4h, #0x10\n"
- "str q26, [x21, #0x0]\n"
+ "ldr q18, [x22], #0x10\n"
+ "sub x24, x24, #0x18\n"
+ "ldr q17, [x21], #0x10\n"
+ "ldr q27, [x20], #0x10\n"
"cmp x24, #0x18\n"
- "shll2 v27.4s, v23.8h, #0x10\n"
- "str q24, [x21, #0x10]\n"
- "shll v26.4s, v19.4h, #0x10\n"
+ "ldr q26, [x25], #0x10\n"
+ "ldr q3, [x22], #0x10\n"
+ "ldr q2, [x21], #0x10\n"
+ "shll v16.4s, v19.4h, #0x10\n"
"shll2 v25.4s, v19.8h, #0x10\n"
- "str q21, [x21, #0x20]\n"
- "shll2 v24.4s, v22.8h, #0x10\n"
+ "ldr q1, [x20], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
"shll v23.4s, v18.4h, #0x10\n"
- "str q5, [x21, #0x30]\n"
"shll2 v22.4s, v18.8h, #0x10\n"
- "shll2 v21.4s, v20.8h, #0x10\n"
- "str q4, [x21, #0x40]\n"
- "shll v20.4s, v17.4h, #0x10\n"
- "shll2 v19.4s, v17.8h, #0x10\n"
- "str q2, [x21, #0x50]\n"
- "shll2 v18.4s, v3.8h, #0x10\n"
- "shll v17.4s, v16.4h, #0x10\n"
- "str q1, [x21, #0x60]\n"
- "shll2 v16.4s, v16.8h, #0x10\n"
- "str q0, [x21, #0x70]\n"
- "str q31, [x21, #0x80]\n"
- "str q30, [x21, #0x90]\n"
- "str q29, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q27, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q21, [x22], #0x10\n"
+ "ldr q0, [x21], #0x10\n"
+ "shll v20.4s, v26.4h, #0x10\n"
+ "shll v19.4s, v3.4h, #0x10\n"
+ "ldr q31, [x20], #0x10\n"
+ "shll v18.4s, v17.4h, #0x10\n"
+ "shll2 v17.4s, v17.8h, #0x10\n"
+ "str q16, [x23, #0x0]\n"
+ "shll v16.4s, v2.4h, #0x10\n"
+ "shll v30.4s, v27.4h, #0x10\n"
+ "str q25, [x23, #0x10]\n"
+ "shll2 v29.4s, v27.8h, #0x10\n"
+ "shll v28.4s, v1.4h, #0x10\n"
+ "str q20, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "shll2 v27.4s, v26.8h, #0x10\n"
+ "shll v26.4s, v24.4h, #0x10\n"
+ "str q22, [x23, #0x40]\n"
+ "shll2 v25.4s, v24.8h, #0x10\n"
+ "shll2 v24.4s, v3.8h, #0x10\n"
+ "str q19, [x23, #0x50]\n"
+ "shll v23.4s, v21.4h, #0x10\n"
+ "shll2 v22.4s, v21.8h, #0x10\n"
+ "str q18, [x23, #0x60]\n"
+ "shll2 v21.4s, v2.8h, #0x10\n"
+ "shll v20.4s, v0.4h, #0x10\n"
+ "str q17, [x23, #0x70]\n"
+ "shll2 v19.4s, v0.8h, #0x10\n"
+ "shll2 v18.4s, v1.8h, #0x10\n"
+ "str q16, [x23, #0x80]\n"
+ "shll v17.4s, v31.4h, #0x10\n"
+ "shll2 v16.4s, v31.8h, #0x10\n"
+ "str q30, [x23, #0x90]\n"
+ "str q29, [x23, #0xa0]\n"
+ "str q28, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q27, [x23, #0x0]\n"
+ "str q26, [x23, #0x10]\n"
+ "str q25, [x23, #0x20]\n"
+ "str q24, [x23, #0x30]\n"
+ "str q23, [x23, #0x40]\n"
+ "str q22, [x23, #0x50]\n"
+ "str q21, [x23, #0x60]\n"
+ "str q20, [x23, #0x70]\n"
+ "str q19, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q17, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cmp x24, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
"ldr q16, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
"sub x24, x24, #0xc\n"
+ "ldr q27, [x21], #0x10\n"
+ "ldr q26, [x20], #0x10\n"
"cmp x24, #0xc\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q27, [x20], #0x10\n"
- "shll v19.4s, v16.4h, #0x10\n"
- "shll2 v26.4s, v16.8h, #0x10\n"
- "ldr d16, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "shll v25.4s, v16.4h, #0x10\n"
- "shll v24.4s, v21.4h, #0x10\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x25], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "shll v18.4s, v16.4h, #0x10\n"
+ "shll2 v17.4s, v16.8h, #0x10\n"
"ldr d16, [x20], #0x8\n"
- "shll2 v23.4s, v21.8h, #0x10\n"
- "shll v22.4s, v18.4h, #0x10\n"
- "shll v21.4s, v20.4h, #0x10\n"
- "shll2 v20.4s, v20.8h, #0x10\n"
- "str q19, [x21, #0x0]\n"
- "shll v19.4s, v17.4h, #0x10\n"
- "shll v18.4s, v27.4h, #0x10\n"
- "str q26, [x21, #0x10]\n"
- "shll2 v17.4s, v27.8h, #0x10\n"
+ "shll v25.4s, v22.4h, #0x10\n"
+ "shll2 v24.4s, v22.8h, #0x10\n"
+ "shll v23.4s, v21.4h, #0x10\n"
+ "shll v22.4s, v20.4h, #0x10\n"
+ "shll v21.4s, v27.4h, #0x10\n"
+ "shll2 v20.4s, v27.8h, #0x10\n"
+ "str q18, [x23, #0x0]\n"
+ "shll v19.4s, v19.4h, #0x10\n"
+ "shll v18.4s, v26.4h, #0x10\n"
+ "str q17, [x23, #0x10]\n"
+ "shll2 v17.4s, v26.8h, #0x10\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q25, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q22, [x23, #0x50]\n"
+ "str q21, [x23, #0x60]\n"
+ "str q20, [x23, #0x70]\n"
+ "str q19, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q17, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "str q16, [x23, #0x80]\n"
+ "str q16, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
+ "cmp x24, #0x4\n"
"shll v19.4s, v19.4h, #0x10\n"
"shll v18.4s, v18.4h, #0x10\n"
"shll v17.4s, v17.4h, #0x10\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x30]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x90]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
+ "cmp x24, #0x1\n"
"shll v19.4s, v19.4h, #0x10\n"
"shll v18.4s, v18.4h, #0x10\n"
"shll v17.4s, v17.4h, #0x10\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x30]\n"
- "str s17, [x21, #0x60]\n"
- "str s16, [x21, #0x90]\n"
- "add x21, x21, #0x4\n"
+ "str s19, [x23, #0x0]\n"
+ "str s18, [x23, #0x30]\n"
+ "str s17, [x23, #0x60]\n"
+ "str s16, [x23, #0x90]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0xc0\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x18\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
"ldr q16, [x25], #0x10\n"
- "ldr q20, [x25], #0x10\n"
"sub x20, x20, #0x18\n"
- "shll v18.4s, v16.4h, #0x10\n"
- "ldr q19, [x25], #0x10\n"
- "shll2 v17.4s, v16.8h, #0x10\n"
- "shll v16.4s, v20.4h, #0x10\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q20, [x25], #0x10\n"
"cmp x20, #0x18\n"
- "shll2 v18.4s, v20.8h, #0x10\n"
- "shll v17.4s, v19.4h, #0x10\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "shll2 v16.4s, v19.8h, #0x10\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "shll v17.4s, v16.4h, #0x10\n"
+ "shll2 v16.4s, v16.8h, #0x10\n"
+ "shll v19.4s, v18.4h, #0x10\n"
+ "shll2 v18.4s, v18.8h, #0x10\n"
+ "str q17, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "shll v17.4s, v20.4h, #0x10\n"
+ "shll2 v16.4s, v20.8h, #0x10\n"
+ "str q19, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
"ldr q17, [x25], #0x10\n"
- "ldr d18, [x25], #0x8\n"
"sub x20, x20, #0xc\n"
+ "ldr d16, [x25], #0x8\n"
"cmp x20, #0xc\n"
- "shll v16.4s, v17.4h, #0x10\n"
+ "shll v18.4s, v17.4h, #0x10\n"
"shll2 v17.4s, v17.8h, #0x10\n"
- "str q16, [x21, #0x0]\n"
- "shll v16.4s, v18.4h, #0x10\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "shll v16.4s, v16.4h, #0x10\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.16b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d16, [x25], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h16, [x25], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
"shll v16.4s, v16.4h, #0x10\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp
index 966b75664e..4d106f4e6d 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_24_fp16fp32.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,245 +34,266 @@ void a64_transpose_interleave_24_fp16fp32(float *out, const __fp16 *in, size_t w
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x18\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "sub x24, x24, #0x18\n"
- "fcvtl v26.4s, v18.4h\n"
- "ldr q16, [x22], #0x10\n"
- "ldr q25, [x20], #0x10\n"
- "fcvtl2 v24.4s, v18.8h\n"
- "fcvtl v5.4s, v17.4h\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "fcvtl v21.4s, v23.4h\n"
- "fcvtl2 v4.4s, v17.8h\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "fcvtl v2.4s, v22.4h\n"
- "fcvtl v1.4s, v16.4h\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "fcvtl2 v0.4s, v16.8h\n"
- "fcvtl v31.4s, v20.4h\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q16, [x20], #0x10\n"
- "fcvtl v30.4s, v25.4h\n"
- "fcvtl2 v29.4s, v25.8h\n"
- "fcvtl v28.4s, v3.4h\n"
- "str q26, [x21, #0x0]\n"
+ "ldr q18, [x22], #0x10\n"
+ "sub x24, x24, #0x18\n"
+ "ldr q17, [x21], #0x10\n"
+ "ldr q27, [x20], #0x10\n"
"cmp x24, #0x18\n"
- "fcvtl2 v27.4s, v23.8h\n"
- "str q24, [x21, #0x10]\n"
- "fcvtl v26.4s, v19.4h\n"
+ "ldr q26, [x25], #0x10\n"
+ "ldr q3, [x22], #0x10\n"
+ "ldr q2, [x21], #0x10\n"
+ "fcvtl v16.4s, v19.4h\n"
"fcvtl2 v25.4s, v19.8h\n"
- "str q21, [x21, #0x20]\n"
- "fcvtl2 v24.4s, v22.8h\n"
+ "ldr q1, [x20], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
"fcvtl v23.4s, v18.4h\n"
- "str q5, [x21, #0x30]\n"
"fcvtl2 v22.4s, v18.8h\n"
- "fcvtl2 v21.4s, v20.8h\n"
- "str q4, [x21, #0x40]\n"
- "fcvtl v20.4s, v17.4h\n"
- "fcvtl2 v19.4s, v17.8h\n"
- "str q2, [x21, #0x50]\n"
- "fcvtl2 v18.4s, v3.8h\n"
- "fcvtl v17.4s, v16.4h\n"
- "str q1, [x21, #0x60]\n"
- "fcvtl2 v16.4s, v16.8h\n"
- "str q0, [x21, #0x70]\n"
- "str q31, [x21, #0x80]\n"
- "str q30, [x21, #0x90]\n"
- "str q29, [x21, #0xa0]\n"
- "str q28, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q27, [x21, #0x0]\n"
- "str q26, [x21, #0x10]\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q21, [x22], #0x10\n"
+ "ldr q0, [x21], #0x10\n"
+ "fcvtl v20.4s, v26.4h\n"
+ "fcvtl v19.4s, v3.4h\n"
+ "ldr q31, [x20], #0x10\n"
+ "fcvtl v18.4s, v17.4h\n"
+ "fcvtl2 v17.4s, v17.8h\n"
+ "str q16, [x23, #0x0]\n"
+ "fcvtl v16.4s, v2.4h\n"
+ "fcvtl v30.4s, v27.4h\n"
+ "str q25, [x23, #0x10]\n"
+ "fcvtl2 v29.4s, v27.8h\n"
+ "fcvtl v28.4s, v1.4h\n"
+ "str q20, [x23, #0x20]\n"
+ "str q23, [x23, #0x30]\n"
+ "fcvtl2 v27.4s, v26.8h\n"
+ "fcvtl v26.4s, v24.4h\n"
+ "str q22, [x23, #0x40]\n"
+ "fcvtl2 v25.4s, v24.8h\n"
+ "fcvtl2 v24.4s, v3.8h\n"
+ "str q19, [x23, #0x50]\n"
+ "fcvtl v23.4s, v21.4h\n"
+ "fcvtl2 v22.4s, v21.8h\n"
+ "str q18, [x23, #0x60]\n"
+ "fcvtl2 v21.4s, v2.8h\n"
+ "fcvtl v20.4s, v0.4h\n"
+ "str q17, [x23, #0x70]\n"
+ "fcvtl2 v19.4s, v0.8h\n"
+ "fcvtl2 v18.4s, v1.8h\n"
+ "str q16, [x23, #0x80]\n"
+ "fcvtl v17.4s, v31.4h\n"
+ "fcvtl2 v16.4s, v31.8h\n"
+ "str q30, [x23, #0x90]\n"
+ "str q29, [x23, #0xa0]\n"
+ "str q28, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q27, [x23, #0x0]\n"
+ "str q26, [x23, #0x10]\n"
+ "str q25, [x23, #0x20]\n"
+ "str q24, [x23, #0x30]\n"
+ "str q23, [x23, #0x40]\n"
+ "str q22, [x23, #0x50]\n"
+ "str q21, [x23, #0x60]\n"
+ "str q20, [x23, #0x70]\n"
+ "str q19, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q17, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cmp x24, #0xc\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
"ldr q16, [x25], #0x10\n"
- "ldr q21, [x23], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
"sub x24, x24, #0xc\n"
+ "ldr q27, [x21], #0x10\n"
+ "ldr q26, [x20], #0x10\n"
"cmp x24, #0xc\n"
- "ldr q20, [x22], #0x10\n"
- "ldr q27, [x20], #0x10\n"
- "fcvtl v19.4s, v16.4h\n"
- "fcvtl2 v26.4s, v16.8h\n"
- "ldr d16, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
- "fcvtl v25.4s, v16.4h\n"
- "fcvtl v24.4s, v21.4h\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d21, [x25], #0x8\n"
+ "ldr d20, [x22], #0x8\n"
+ "ldr d19, [x21], #0x8\n"
+ "fcvtl v18.4s, v16.4h\n"
+ "fcvtl2 v17.4s, v16.8h\n"
"ldr d16, [x20], #0x8\n"
- "fcvtl2 v23.4s, v21.8h\n"
- "fcvtl v22.4s, v18.4h\n"
- "fcvtl v21.4s, v20.4h\n"
- "fcvtl2 v20.4s, v20.8h\n"
- "str q19, [x21, #0x0]\n"
- "fcvtl v19.4s, v17.4h\n"
- "fcvtl v18.4s, v27.4h\n"
- "str q26, [x21, #0x10]\n"
- "fcvtl2 v17.4s, v27.8h\n"
+ "fcvtl v25.4s, v22.4h\n"
+ "fcvtl2 v24.4s, v22.8h\n"
+ "fcvtl v23.4s, v21.4h\n"
+ "fcvtl v22.4s, v20.4h\n"
+ "fcvtl v21.4s, v27.4h\n"
+ "fcvtl2 v20.4s, v27.8h\n"
+ "str q18, [x23, #0x0]\n"
+ "fcvtl v19.4s, v19.4h\n"
+ "fcvtl v18.4s, v26.4h\n"
+ "str q17, [x23, #0x10]\n"
+ "fcvtl2 v17.4s, v26.8h\n"
"fcvtl v16.4s, v16.4h\n"
- "str q25, [x21, #0x20]\n"
- "str q24, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q22, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q20, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q25, [x23, #0x30]\n"
+ "str q24, [x23, #0x40]\n"
+ "str q22, [x23, #0x50]\n"
+ "str q21, [x23, #0x60]\n"
+ "str q20, [x23, #0x70]\n"
+ "str q19, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q17, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x4\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "str q16, [x23, #0x80]\n"
+ "str q16, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
+ "cmp x24, #0x4\n"
"fcvtl v19.4s, v19.4h\n"
"fcvtl v18.4s, v18.4h\n"
"fcvtl v17.4s, v17.4h\n"
"fcvtl v16.4s, v16.4h\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x90]\n"
- "add x21, x21, #0x10\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x30]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x90]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
+ "cmp x24, #0x1\n"
"fcvtl v19.4s, v19.4h\n"
"fcvtl v18.4s, v18.4h\n"
"fcvtl v17.4s, v17.4h\n"
"fcvtl v16.4s, v16.4h\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x30]\n"
- "str s17, [x21, #0x60]\n"
- "str s16, [x21, #0x90]\n"
- "add x21, x21, #0x4\n"
+ "str s19, [x23, #0x0]\n"
+ "str s18, [x23, #0x30]\n"
+ "str s17, [x23, #0x60]\n"
+ "str s16, [x23, #0x90]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0xc0\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x18\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Unroll column loop
+ "blt 14f\n"
+ "13:" // Tail row loop: Unroll column loop
"ldr q16, [x25], #0x10\n"
- "ldr q20, [x25], #0x10\n"
"sub x20, x20, #0x18\n"
- "fcvtl v18.4s, v16.4h\n"
- "ldr q19, [x25], #0x10\n"
- "fcvtl2 v17.4s, v16.8h\n"
- "fcvtl v16.4s, v20.4h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
+ "ldr q18, [x25], #0x10\n"
+ "ldr q20, [x25], #0x10\n"
"cmp x20, #0x18\n"
- "fcvtl2 v18.4s, v20.8h\n"
- "fcvtl v17.4s, v19.4h\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "fcvtl2 v16.4s, v19.8h\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Unroll column loop skip
+ "fcvtl v17.4s, v16.4h\n"
+ "fcvtl2 v16.4s, v16.8h\n"
+ "fcvtl v19.4s, v18.4h\n"
+ "fcvtl2 v18.4s, v18.8h\n"
+ "str q17, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "fcvtl v17.4s, v20.4h\n"
+ "fcvtl2 v16.4s, v20.8h\n"
+ "str q19, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Unroll column loop skip
"cmp x20, #0xc\n"
- "blt 15f\n"
- "14:" // Tail row loop: Column loop
+ "blt 16f\n"
+ "15:" // Tail row loop: Column loop
"ldr q17, [x25], #0x10\n"
- "ldr d18, [x25], #0x8\n"
"sub x20, x20, #0xc\n"
+ "ldr d16, [x25], #0x8\n"
"cmp x20, #0xc\n"
- "fcvtl v16.4s, v17.4h\n"
+ "fcvtl v18.4s, v17.4h\n"
"fcvtl2 v17.4s, v17.8h\n"
- "str q16, [x21, #0x0]\n"
- "fcvtl v16.4s, v18.4h\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Column loop skip
+ "fcvtl v16.4s, v16.4h\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "movi v16.16b, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d16, [x25], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
"fcvtl v16.4s, v16.4h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h16, [x25], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
"fcvtl v16.4s, v16.4h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp
index 4a22675028..8c16e5ba46 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,14 +40,16 @@ void a64_transpose_interleave_32_1x4(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"cmp %x[height], #0x10\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[width]\n"
+ "mov x15, %x[out]\n"
+ "sub %x[height], %x[height], #0x10\n"
+ "add x14, x17, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
+ "cmp x16, #0x20\n"
"add x11, x12, %x[in_stride]\n"
"add x10, x11, %x[in_stride]\n"
"add x9, x10, %x[in_stride]\n"
@@ -55,420 +57,464 @@ void a64_transpose_interleave_32_1x4(uint8_t *out, const uint8_t *in, size_t wid
"add x27, x28, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x20\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
- "ldr q6, [x17], #0x10\n"
- "ldr q31, [x16], #0x10\n"
- "sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q7, [x15], #0x10\n"
- "ldr q0, [x14], #0x10\n"
- "zip1 v9.16b, v6.16b, v7.16b\n"
- "zip1 v20.16b, v31.16b, v0.16b\n"
- "ldr q24, [x13], #0x10\n"
- "ldr q19, [x12], #0x10\n"
- "zip2 v30.16b, v6.16b, v7.16b\n"
- "zip2 v12.16b, v31.16b, v0.16b\n"
- "ldr q23, [x11], #0x10\n"
- "ldr q17, [x10], #0x10\n"
- "zip1 v13.16b, v24.16b, v23.16b\n"
- "zip1 v16.16b, v19.16b, v17.16b\n"
- "ldr q0, [x9], #0x10\n"
- "ldr q31, [x28], #0x10\n"
- "zip2 v15.16b, v24.16b, v23.16b\n"
- "zip2 v11.16b, v19.16b, v17.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q4, [x26], #0x10\n"
- "zip1 v1.16b, v0.16b, v17.16b\n"
- "zip1 v21.16b, v31.16b, v4.16b\n"
- "ldr q28, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v0.16b, v0.16b, v17.16b\n"
- "zip2 v26.16b, v31.16b, v4.16b\n"
- "ldr q17, [x22], #0x10\n"
- "ldr q19, [x20], #0x10\n"
- "zip1 v23.16b, v28.16b, v17.16b\n"
- "zip1 v25.16b, v18.16b, v19.16b\n"
+ "ldr q28, [x17], #0x10\n"
+ "ldr q6, [x14], #0x10\n"
+ "sub x16, x16, #0x20\n"
+ "ldr q1, [x13], #0x10\n"
+ "ldr q30, [x12], #0x10\n"
+ "cmp x16, #0x20\n"
+ "ldr q21, [x11], #0x10\n"
+ "ldr q9, [x10], #0x10\n"
+ "ldr q15, [x9], #0x10\n"
+ "ldr q17, [x28], #0x10\n"
+ "ldr q19, [x27], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
+ "zip1 v4.16b, v28.16b, v1.16b\n"
+ "zip1 v3.16b, v6.16b, v30.16b\n"
+ "ldr q26, [x25], #0x10\n"
+ "ldr q11, [x24], #0x10\n"
+ "zip2 v29.16b, v28.16b, v1.16b\n"
+ "zip2 v16.16b, v6.16b, v30.16b\n"
+ "ldr q25, [x23], #0x10\n"
+ "ldr q7, [x22], #0x10\n"
+ "zip1 v0.16b, v21.16b, v15.16b\n"
+ "zip1 v1.16b, v9.16b, v17.16b\n"
+ "ldr q23, [x21], #0x10\n"
+ "ldr q28, [x20], #0x10\n"
+ "zip2 v20.16b, v21.16b, v15.16b\n"
+ "zip2 v24.16b, v9.16b, v17.16b\n"
"ldr q2, [x17], #0x10\n"
- "ldr q3, [x16], #0x10\n"
- "zip2 v7.16b, v28.16b, v17.16b\n"
- "zip2 v8.16b, v18.16b, v19.16b\n"
- "ldr q22, [x15], #0x10\n"
"ldr q27, [x14], #0x10\n"
- "zip1 v19.16b, v2.16b, v22.16b\n"
- "zip1 v17.16b, v3.16b, v27.16b\n"
- "ldr q6, [x13], #0x10\n"
- "ldr q4, [x12], #0x10\n"
- "zip2 v24.16b, v2.16b, v22.16b\n"
- "zip2 v22.16b, v3.16b, v27.16b\n"
- "ldr q14, [x11], #0x10\n"
- "ldr q18, [x10], #0x10\n"
- "zip1 v29.16b, v6.16b, v14.16b\n"
- "zip1 v31.16b, v4.16b, v18.16b\n"
- "ldr q2, [x9], #0x10\n"
- "ldr q10, [x28], #0x10\n"
- "zip2 v28.16b, v6.16b, v14.16b\n"
- "zip2 v27.16b, v4.16b, v18.16b\n"
- "ldr q6, [x27], #0x10\n"
- "ldr q5, [x26], #0x10\n"
- "zip1 v14.16b, v2.16b, v6.16b\n"
- "zip1 v4.16b, v10.16b, v5.16b\n"
- "ldr q3, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v6.16b, v2.16b, v6.16b\n"
- "zip2 v10.16b, v10.16b, v5.16b\n"
- "ldr q5, [x22], #0x10\n"
- "zip1 v2.16b, v3.16b, v5.16b\n"
- "zip2 v3.16b, v3.16b, v5.16b\n"
- "zip1 v5.16b, v9.16b, v20.16b\n"
- "zip2 v20.16b, v9.16b, v20.16b\n"
- "ldr q9, [x20], #0x10\n"
- "str q5, [x21, #0x0]\n"
- "zip1 v5.16b, v18.16b, v9.16b\n"
- "zip2 v9.16b, v18.16b, v9.16b\n"
- "str q20, [x21, #0x10]\n"
- "zip1 v18.16b, v30.16b, v12.16b\n"
- "zip2 v30.16b, v30.16b, v12.16b\n"
- "str q18, [x21, #0x20]\n"
- "zip1 v20.16b, v19.16b, v17.16b\n"
- "zip2 v12.16b, v19.16b, v17.16b\n"
- "str q30, [x21, #0x30]\n"
- "zip1 v18.16b, v24.16b, v22.16b\n"
- "zip2 v17.16b, v24.16b, v22.16b\n"
- "str q20, [x21, #0x40]\n"
- "zip1 v30.16b, v13.16b, v16.16b\n"
- "zip2 v24.16b, v13.16b, v16.16b\n"
- "str q12, [x21, #0x50]\n"
- "zip1 v22.16b, v15.16b, v11.16b\n"
- "zip2 v20.16b, v15.16b, v11.16b\n"
- "str q18, [x21, #0x60]\n"
- "zip1 v19.16b, v29.16b, v31.16b\n"
- "zip2 v18.16b, v29.16b, v31.16b\n"
- "str q17, [x21, #0x70]\n"
- "zip1 v17.16b, v28.16b, v27.16b\n"
- "zip2 v16.16b, v28.16b, v27.16b\n"
- "str q30, [x21, #0x80]\n"
- "zip1 v31.16b, v1.16b, v21.16b\n"
- "zip2 v1.16b, v1.16b, v21.16b\n"
- "str q24, [x21, #0x90]\n"
- "zip1 v30.16b, v0.16b, v26.16b\n"
- "zip2 v29.16b, v0.16b, v26.16b\n"
- "str q22, [x21, #0xa0]\n"
- "zip1 v28.16b, v14.16b, v4.16b\n"
- "zip2 v27.16b, v14.16b, v4.16b\n"
- "str q20, [x21, #0xb0]\n"
- "zip1 v26.16b, v6.16b, v10.16b\n"
- "zip2 v24.16b, v6.16b, v10.16b\n"
- "str q19, [x21, #0xc0]\n"
- "zip1 v14.16b, v23.16b, v25.16b\n"
- "zip2 v22.16b, v23.16b, v25.16b\n"
- "str q18, [x21, #0xd0]\n"
- "zip1 v21.16b, v7.16b, v8.16b\n"
- "zip2 v20.16b, v7.16b, v8.16b\n"
- "str q17, [x21, #0xe0]\n"
- "zip1 v19.16b, v2.16b, v5.16b\n"
- "zip2 v18.16b, v2.16b, v5.16b\n"
- "str q16, [x21, #0xf0]\n"
- "zip1 v17.16b, v3.16b, v9.16b\n"
- "zip2 v16.16b, v3.16b, v9.16b\n"
- "str q31, [x21, #0x100]\n"
- "str q1, [x21, #0x110]\n"
- "str q30, [x21, #0x120]\n"
- "str q29, [x21, #0x130]\n"
- "str q28, [x21, #0x140]\n"
- "str q27, [x21, #0x150]\n"
- "str q26, [x21, #0x160]\n"
- "str q24, [x21, #0x170]\n"
- "str q14, [x21, #0x180]\n"
- "str q22, [x21, #0x190]\n"
- "str q21, [x21, #0x1a0]\n"
- "str q20, [x21, #0x1b0]\n"
- "str q19, [x21, #0x1c0]\n"
- "str q18, [x21, #0x1d0]\n"
- "str q17, [x21, #0x1e0]\n"
- "str q16, [x21, #0x1f0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v15.16b, v19.16b, v26.16b\n"
+ "zip1 v21.16b, v22.16b, v11.16b\n"
+ "ldr q18, [x13], #0x10\n"
+ "ldr q8, [x12], #0x10\n"
+ "zip2 v14.16b, v19.16b, v26.16b\n"
+ "zip2 v13.16b, v22.16b, v11.16b\n"
+ "ldr q19, [x11], #0x10\n"
+ "ldr q10, [x10], #0x10\n"
+ "zip1 v12.16b, v25.16b, v23.16b\n"
+ "zip1 v5.16b, v7.16b, v28.16b\n"
+ "ldr q6, [x9], #0x10\n"
+ "ldr q30, [x28], #0x10\n"
+ "zip2 v31.16b, v25.16b, v23.16b\n"
+ "zip2 v11.16b, v7.16b, v28.16b\n"
+ "ldr q25, [x27], #0x10\n"
+ "ldr q7, [x26], #0x10\n"
+ "zip1 v23.16b, v2.16b, v18.16b\n"
+ "zip1 v17.16b, v27.16b, v8.16b\n"
+ "ldr q26, [x25], #0x10\n"
+ "ldr q9, [x24], #0x10\n"
+ "zip2 v18.16b, v2.16b, v18.16b\n"
+ "zip2 v22.16b, v27.16b, v8.16b\n"
+ "ldr q28, [x23], #0x10\n"
+ "ldr q8, [x22], #0x10\n"
+ "zip1 v2.16b, v19.16b, v6.16b\n"
+ "zip1 v27.16b, v10.16b, v30.16b\n"
+ "zip2 v6.16b, v19.16b, v6.16b\n"
+ "ldr q19, [x21], #0x10\n"
+ "zip2 v30.16b, v10.16b, v30.16b\n"
+ "zip1 v10.16b, v25.16b, v26.16b\n"
+ "zip2 v26.16b, v25.16b, v26.16b\n"
+ "zip1 v25.16b, v7.16b, v9.16b\n"
+ "zip2 v9.16b, v7.16b, v9.16b\n"
+ "zip1 v7.16b, v28.16b, v19.16b\n"
+ "zip2 v19.16b, v28.16b, v19.16b\n"
+ "zip1 v28.16b, v4.16b, v3.16b\n"
+ "zip2 v3.16b, v4.16b, v3.16b\n"
+ "ldr q4, [x20], #0x10\n"
+ "str q28, [x15, #0x0]\n"
+ "zip1 v28.16b, v29.16b, v16.16b\n"
+ "zip2 v16.16b, v29.16b, v16.16b\n"
+ "zip1 v29.16b, v23.16b, v17.16b\n"
+ "zip2 v23.16b, v23.16b, v17.16b\n"
+ "zip1 v17.16b, v8.16b, v4.16b\n"
+ "zip2 v4.16b, v8.16b, v4.16b\n"
+ "str q3, [x15, #0x10]\n"
+ "zip1 v8.16b, v18.16b, v22.16b\n"
+ "zip2 v22.16b, v18.16b, v22.16b\n"
+ "str q28, [x15, #0x20]\n"
+ "zip1 v28.16b, v0.16b, v1.16b\n"
+ "zip2 v3.16b, v0.16b, v1.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "zip1 v16.16b, v20.16b, v24.16b\n"
+ "zip2 v18.16b, v20.16b, v24.16b\n"
+ "str q29, [x15, #0x40]\n"
+ "zip1 v1.16b, v2.16b, v27.16b\n"
+ "zip2 v24.16b, v2.16b, v27.16b\n"
+ "str q23, [x15, #0x50]\n"
+ "zip1 v2.16b, v6.16b, v30.16b\n"
+ "zip2 v6.16b, v6.16b, v30.16b\n"
+ "str q8, [x15, #0x60]\n"
+ "zip1 v0.16b, v15.16b, v21.16b\n"
+ "zip2 v30.16b, v15.16b, v21.16b\n"
+ "str q22, [x15, #0x70]\n"
+ "zip1 v29.16b, v14.16b, v13.16b\n"
+ "zip2 v23.16b, v14.16b, v13.16b\n"
+ "str q28, [x15, #0x80]\n"
+ "zip1 v27.16b, v10.16b, v25.16b\n"
+ "zip2 v8.16b, v10.16b, v25.16b\n"
+ "str q3, [x15, #0x90]\n"
+ "zip1 v15.16b, v26.16b, v9.16b\n"
+ "zip2 v9.16b, v26.16b, v9.16b\n"
+ "str q16, [x15, #0xa0]\n"
+ "zip1 v28.16b, v12.16b, v5.16b\n"
+ "zip2 v22.16b, v12.16b, v5.16b\n"
+ "str q18, [x15, #0xb0]\n"
+ "zip1 v21.16b, v31.16b, v11.16b\n"
+ "zip2 v20.16b, v31.16b, v11.16b\n"
+ "str q1, [x15, #0xc0]\n"
+ "zip1 v25.16b, v7.16b, v17.16b\n"
+ "zip2 v18.16b, v7.16b, v17.16b\n"
+ "str q24, [x15, #0xd0]\n"
+ "zip1 v17.16b, v19.16b, v4.16b\n"
+ "zip2 v16.16b, v19.16b, v4.16b\n"
+ "str q2, [x15, #0xe0]\n"
+ "str q6, [x15, #0xf0]\n"
+ "str q0, [x15, #0x100]\n"
+ "str q30, [x15, #0x110]\n"
+ "str q29, [x15, #0x120]\n"
+ "str q23, [x15, #0x130]\n"
+ "str q27, [x15, #0x140]\n"
+ "str q8, [x15, #0x150]\n"
+ "str q15, [x15, #0x160]\n"
+ "str q9, [x15, #0x170]\n"
+ "str q28, [x15, #0x180]\n"
+ "str q22, [x15, #0x190]\n"
+ "str q21, [x15, #0x1a0]\n"
+ "str q20, [x15, #0x1b0]\n"
+ "str q25, [x15, #0x1c0]\n"
+ "str q18, [x15, #0x1d0]\n"
+ "str q17, [x15, #0x1e0]\n"
+ "str q16, [x15, #0x1f0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
- "cmp x24, #0x10\n"
+ "cbz x16, 10f\n"
+ "cmp x16, #0x10\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x15, #0x0]\n"
+ "str q16, [x15, #0x10]\n"
+ "str q16, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "str q16, [x15, #0x40]\n"
+ "str q16, [x15, #0x50]\n"
+ "str q16, [x15, #0x60]\n"
+ "str q16, [x15, #0x70]\n"
+ "str q16, [x15, #0x80]\n"
+ "str q16, [x15, #0x90]\n"
+ "str q16, [x15, #0xa0]\n"
+ "str q16, [x15, #0xb0]\n"
+ "str q16, [x15, #0xc0]\n"
+ "str q16, [x15, #0xd0]\n"
+ "str q16, [x15, #0xe0]\n"
+ "str q16, [x15, #0xf0]\n"
+ "str q16, [x15, #0x100]\n"
+ "str q16, [x15, #0x110]\n"
+ "str q16, [x15, #0x120]\n"
+ "str q16, [x15, #0x130]\n"
+ "str q16, [x15, #0x140]\n"
+ "str q16, [x15, #0x150]\n"
+ "str q16, [x15, #0x160]\n"
+ "str q16, [x15, #0x170]\n"
+ "str q16, [x15, #0x180]\n"
+ "str q16, [x15, #0x190]\n"
+ "str q16, [x15, #0x1a0]\n"
+ "str q16, [x15, #0x1b0]\n"
+ "str q16, [x15, #0x1c0]\n"
+ "str q16, [x15, #0x1d0]\n"
+ "str q16, [x15, #0x1e0]\n"
+ "str q16, [x15, #0x1f0]\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
- "ldr q21, [x17], #0x10\n"
- "ldr q20, [x16], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v3.16b, v21.16b, v17.16b\n"
- "zip1 v2.16b, v20.16b, v16.16b\n"
- "ldr q19, [x13], #0x10\n"
- "ldr q18, [x12], #0x10\n"
- "zip2 v1.16b, v21.16b, v17.16b\n"
- "zip2 v0.16b, v20.16b, v16.16b\n"
- "ldr q17, [x11], #0x10\n"
- "ldr q16, [x10], #0x10\n"
- "zip1 v31.16b, v19.16b, v17.16b\n"
- "zip1 v30.16b, v18.16b, v16.16b\n"
- "ldr q24, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v29.16b, v19.16b, v17.16b\n"
- "zip2 v23.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v22.16b, v24.16b, v17.16b\n"
- "zip1 v21.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v28.16b, v24.16b, v17.16b\n"
- "zip2 v20.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x17], #0x10\n"
+ "ldr q18, [x14], #0x10\n"
+ "sub x16, x16, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "ldr q16, [x12], #0x10\n"
+ "cmp x16, #0x10\n"
+ "ldr q24, [x11], #0x10\n"
+ "ldr q28, [x10], #0x10\n"
+ "ldr q23, [x9], #0x10\n"
+ "ldr q22, [x28], #0x10\n"
+ "ldr q27, [x27], #0x10\n"
+ "ldr q26, [x26], #0x10\n"
+ "zip1 v5.16b, v19.16b, v17.16b\n"
+ "zip1 v4.16b, v18.16b, v16.16b\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v3.16b, v19.16b, v17.16b\n"
+ "zip2 v2.16b, v18.16b, v16.16b\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v1.16b, v24.16b, v23.16b\n"
+ "zip1 v25.16b, v28.16b, v22.16b\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v27.16b, v19.16b, v17.16b\n"
- "zip1 v26.16b, v18.16b, v16.16b\n"
- "zip2 v25.16b, v19.16b, v17.16b\n"
- "zip2 v24.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v3.16b, v2.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "zip2 v23.16b, v28.16b, v22.16b\n"
+ "zip1 v0.16b, v27.16b, v21.16b\n"
+ "zip1 v22.16b, v26.16b, v20.16b\n"
+ "zip2 v31.16b, v27.16b, v21.16b\n"
+ "zip2 v30.16b, v26.16b, v20.16b\n"
+ "zip1 v29.16b, v19.16b, v17.16b\n"
+ "zip1 v28.16b, v18.16b, v16.16b\n"
+ "zip2 v27.16b, v19.16b, v17.16b\n"
+ "zip2 v26.16b, v18.16b, v16.16b\n"
+ "zip1 v21.16b, v5.16b, v4.16b\n"
+ "zip2 v20.16b, v5.16b, v4.16b\n"
+ "zip1 v19.16b, v3.16b, v2.16b\n"
"zip2 v18.16b, v3.16b, v2.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v1.16b, v0.16b\n"
- "zip2 v16.16b, v1.16b, v0.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "zip1 v19.16b, v31.16b, v30.16b\n"
- "zip2 v18.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x30]\n"
- "zip1 v17.16b, v29.16b, v23.16b\n"
- "zip2 v16.16b, v29.16b, v23.16b\n"
- "str q19, [x21, #0x80]\n"
- "zip1 v23.16b, v22.16b, v21.16b\n"
- "zip2 v22.16b, v22.16b, v21.16b\n"
- "str q18, [x21, #0x90]\n"
- "zip1 v21.16b, v28.16b, v20.16b\n"
- "zip2 v20.16b, v28.16b, v20.16b\n"
- "str q17, [x21, #0xa0]\n"
- "zip1 v19.16b, v27.16b, v26.16b\n"
- "zip2 v18.16b, v27.16b, v26.16b\n"
- "str q16, [x21, #0xb0]\n"
- "zip1 v17.16b, v25.16b, v24.16b\n"
- "zip2 v16.16b, v25.16b, v24.16b\n"
- "str q23, [x21, #0x100]\n"
- "str q22, [x21, #0x110]\n"
- "str q21, [x21, #0x120]\n"
- "str q20, [x21, #0x130]\n"
- "str q19, [x21, #0x180]\n"
- "str q18, [x21, #0x190]\n"
- "str q17, [x21, #0x1a0]\n"
- "str q16, [x21, #0x1b0]\n"
- "add x21, x21, #0x40\n"
+ "zip1 v17.16b, v1.16b, v25.16b\n"
+ "zip2 v16.16b, v1.16b, v25.16b\n"
+ "str q21, [x15, #0x0]\n"
+ "str q20, [x15, #0x10]\n"
+ "zip1 v25.16b, v24.16b, v23.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "str q19, [x15, #0x20]\n"
+ "zip1 v23.16b, v0.16b, v22.16b\n"
+ "zip2 v22.16b, v0.16b, v22.16b\n"
+ "str q18, [x15, #0x30]\n"
+ "zip1 v21.16b, v31.16b, v30.16b\n"
+ "zip2 v20.16b, v31.16b, v30.16b\n"
+ "str q17, [x15, #0x80]\n"
+ "zip1 v19.16b, v29.16b, v28.16b\n"
+ "zip2 v18.16b, v29.16b, v28.16b\n"
+ "str q16, [x15, #0x90]\n"
+ "zip1 v17.16b, v27.16b, v26.16b\n"
+ "zip2 v16.16b, v27.16b, v26.16b\n"
+ "str q25, [x15, #0xa0]\n"
+ "str q24, [x15, #0xb0]\n"
+ "str q23, [x15, #0x100]\n"
+ "str q22, [x15, #0x110]\n"
+ "str q21, [x15, #0x120]\n"
+ "str q20, [x15, #0x130]\n"
+ "str q19, [x15, #0x180]\n"
+ "str q18, [x15, #0x190]\n"
+ "str q17, [x15, #0x1a0]\n"
+ "str q16, [x15, #0x1b0]\n"
+ "add x15, x15, #0x40\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
- "cmp x24, #0x4\n"
+ "cmp x16, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
- "ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr s17, [x11], #0x4\n"
- "ldr s16, [x10], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
+ "ldr s23, [x17], #0x4\n"
+ "ldr s21, [x14], #0x4\n"
+ "sub x16, x16, #0x4\n"
+ "ldr s20, [x13], #0x4\n"
+ "ldr s19, [x12], #0x4\n"
+ "cmp x16, #0x4\n"
+ "ldr s22, [x11], #0x4\n"
+ "ldr s18, [x10], #0x4\n"
+ "ldr s17, [x9], #0x4\n"
+ "ldr s16, [x28], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s26, [x26], #0x4\n"
+ "zip1 v25.16b, v23.16b, v20.16b\n"
+ "zip1 v21.16b, v21.16b, v19.16b\n"
"ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s19, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "zip1 v22.16b, v22.16b, v17.16b\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "ldr s18, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
- "zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "str q22, [x21, #0x0]\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str q21, [x21, #0x80]\n"
- "str q18, [x21, #0x100]\n"
- "str q16, [x21, #0x180]\n"
- "add x21, x21, #0x10\n"
+ "zip1 v21.16b, v25.16b, v21.16b\n"
+ "zip1 v20.16b, v27.16b, v20.16b\n"
+ "zip1 v19.16b, v26.16b, v19.16b\n"
+ "zip1 v17.16b, v22.16b, v17.16b\n"
+ "zip1 v18.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v23.16b, v16.16b\n"
+ "str q21, [x15, #0x0]\n"
+ "str q17, [x15, #0x80]\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v16.16b\n"
+ "str q17, [x15, #0x100]\n"
+ "str q16, [x15, #0x180]\n"
+ "add x15, x15, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
- "cmp x24, #0x1\n"
+ "cmp x16, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr b17, [x11], #0x1\n"
- "ldr b16, [x10], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
+ "ldr b23, [x17], #0x1\n"
+ "ldr b21, [x14], #0x1\n"
+ "sub x16, x16, #0x1\n"
+ "ldr b20, [x13], #0x1\n"
+ "ldr b19, [x12], #0x1\n"
+ "cmp x16, #0x1\n"
+ "ldr b22, [x11], #0x1\n"
+ "ldr b18, [x10], #0x1\n"
+ "ldr b17, [x9], #0x1\n"
+ "ldr b16, [x28], #0x1\n"
+ "ldr b27, [x27], #0x1\n"
+ "ldr b26, [x26], #0x1\n"
+ "zip1 v25.16b, v23.16b, v20.16b\n"
+ "zip1 v21.16b, v21.16b, v19.16b\n"
"ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
+ "ldr b19, [x24], #0x1\n"
+ "ldr b24, [x23], #0x1\n"
+ "ldr b23, [x22], #0x1\n"
+ "zip1 v22.16b, v22.16b, v17.16b\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "ldr b18, [x21], #0x1\n"
"ldr b16, [x20], #0x1\n"
- "zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "str s22, [x21, #0x0]\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str s21, [x21, #0x80]\n"
- "str s18, [x21, #0x100]\n"
- "str s16, [x21, #0x180]\n"
- "add x21, x21, #0x4\n"
+ "zip1 v21.16b, v25.16b, v21.16b\n"
+ "zip1 v20.16b, v27.16b, v20.16b\n"
+ "zip1 v19.16b, v26.16b, v19.16b\n"
+ "zip1 v17.16b, v22.16b, v17.16b\n"
+ "zip1 v18.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v23.16b, v16.16b\n"
+ "str s21, [x15, #0x0]\n"
+ "str s17, [x15, #0x80]\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v16.16b\n"
+ "str s17, [x15, #0x100]\n"
+ "str s16, [x15, #0x180]\n"
+ "add x15, x15, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x10\n"
"add %x[out], %x[out], #0x200\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x14, x15, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x14, %x[in_stride]\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
+ "mov x15, %x[out]\n"
+ "add x14, x17, %x[in_stride]\n"
+ "add x13, x14, %x[in_stride]\n"
+ "add x12, x13, %x[in_stride]\n"
+ "csel x13, x13, %x[pad_row], GE\n"
+ "add %x[in], x12, %x[in_stride]\n"
+ "csel x12, x12, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x20, #0x20\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "blt 13f\n"
- "12:" // Tail row loop: Column loop
- "ldr q19, [x17], #0x10\n"
- "ldr q18, [x16], #0x10\n"
+ "csel x14, x14, %x[pad_row], GT\n"
+ "cmp x20, #0x20\n"
+ "blt 14f\n"
+ "13:" // Tail row loop: Column loop
+ "ldr q20, [x17], #0x10\n"
+ "ldr q19, [x14], #0x10\n"
"sub x20, x20, #0x20\n"
+ "ldr q18, [x13], #0x10\n"
+ "ldr q17, [x12], #0x10\n"
"cmp x20, #0x20\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v25.16b, v19.16b, v17.16b\n"
- "zip1 v24.16b, v18.16b, v16.16b\n"
- "ldr q22, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
- "zip2 v20.16b, v19.16b, v17.16b\n"
- "zip2 v19.16b, v18.16b, v16.16b\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v23.16b, v22.16b, v17.16b\n"
- "zip1 v18.16b, v21.16b, v16.16b\n"
- "zip2 v22.16b, v22.16b, v17.16b\n"
- "zip2 v21.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v25.16b, v24.16b\n"
- "zip2 v17.16b, v25.16b, v24.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v16.16b, v20.16b, v19.16b\n"
+ "ldr q24, [x17], #0x10\n"
+ "ldr q25, [x14], #0x10\n"
+ "ldr q23, [x13], #0x10\n"
+ "ldr q16, [x12], #0x10\n"
+ "zip1 v22.16b, v20.16b, v18.16b\n"
+ "zip1 v21.16b, v19.16b, v17.16b\n"
+ "zip2 v20.16b, v20.16b, v18.16b\n"
+ "zip2 v19.16b, v19.16b, v17.16b\n"
+ "zip1 v18.16b, v24.16b, v23.16b\n"
+ "zip1 v17.16b, v25.16b, v16.16b\n"
+ "zip2 v24.16b, v24.16b, v23.16b\n"
+ "zip2 v16.16b, v25.16b, v16.16b\n"
+ "zip1 v23.16b, v22.16b, v21.16b\n"
+ "zip2 v22.16b, v22.16b, v21.16b\n"
+ "zip1 v21.16b, v20.16b, v19.16b\n"
"zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x21, #0x10]\n"
- "zip1 v19.16b, v23.16b, v18.16b\n"
- "zip2 v18.16b, v23.16b, v18.16b\n"
- "str q16, [x21, #0x20]\n"
- "zip1 v17.16b, v22.16b, v21.16b\n"
- "zip2 v16.16b, v22.16b, v21.16b\n"
- "str q20, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Column loop skip
+ "zip1 v19.16b, v18.16b, v17.16b\n"
+ "zip2 v18.16b, v18.16b, v17.16b\n"
+ "zip1 v17.16b, v24.16b, v16.16b\n"
+ "zip2 v16.16b, v24.16b, v16.16b\n"
+ "str q23, [x15, #0x0]\n"
+ "str q22, [x15, #0x10]\n"
+ "str q21, [x15, #0x20]\n"
+ "str q20, [x15, #0x30]\n"
+ "str q19, [x15, #0x40]\n"
+ "str q18, [x15, #0x50]\n"
+ "str q17, [x15, #0x60]\n"
+ "str q16, [x15, #0x70]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x10\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 16 loop: loop
+ "movi v16.16b, #0x0\n"
+ "str q16, [x15, #0x0]\n"
+ "str q16, [x15, #0x10]\n"
+ "str q16, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "str q16, [x15, #0x40]\n"
+ "str q16, [x15, #0x50]\n"
+ "str q16, [x15, #0x60]\n"
+ "str q16, [x15, #0x70]\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: width 16 loop: loop
"ldr q20, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
+ "ldr q21, [x14], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q19, [x13], #0x10\n"
+ "ldr q16, [x12], #0x10\n"
"cmp x20, #0x10\n"
- "ldr q19, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
"zip1 v18.16b, v20.16b, v19.16b\n"
"zip1 v17.16b, v21.16b, v16.16b\n"
"zip2 v20.16b, v20.16b, v19.16b\n"
- "zip2 v19.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v18.16b, v17.16b\n"
+ "zip2 v16.16b, v21.16b, v16.16b\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
"zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v20.16b, v19.16b\n"
- "zip2 v16.16b, v20.16b, v19.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 16 loop: skip
+ "zip1 v17.16b, v20.16b, v16.16b\n"
+ "zip2 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [x15, #0x0]\n"
+ "str q18, [x15, #0x10]\n"
+ "str q17, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, #0x40\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 16 loop: skip
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
+ "ldr s18, [x14], #0x4\n"
"sub x20, x20, #0x4\n"
+ "ldr s17, [x13], #0x4\n"
+ "ldr s16, [x12], #0x4\n"
"cmp x20, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
+ "ldr b18, [x14], #0x1\n"
"sub x20, x20, #0x1\n"
+ "ldr b17, [x13], #0x1\n"
+ "ldr b16, [x12], #0x1\n"
"cmp x20, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str s16, [x15, #0x0]\n"
+ "add x15, x15, #0x4\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp
index 237536697c..f2c6b692a7 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_32_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,392 +40,422 @@ void a64_transpose_interleave_32_2x2(uint16_t *out, const uint16_t *in, size_t w
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 12f\n"
+ "blt 13f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x40\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q14, [x25], #0x10\n"
- "ldr q10, [x23], #0x10\n"
- "sub x24, x24, #0x40\n"
- "zip1 v12.8h, v14.8h, v10.8h\n"
- "ldr q5, [x22], #0x10\n"
- "ldr q3, [x20], #0x10\n"
- "zip2 v31.8h, v14.8h, v10.8h\n"
- "zip1 v19.8h, v5.8h, v3.8h\n"
- "ldr q27, [x25], #0x10\n"
- "ldr q25, [x23], #0x10\n"
- "zip1 v11.8h, v27.8h, v25.8h\n"
- "zip2 v24.8h, v27.8h, v25.8h\n"
+ "ldr q8, [x25], #0x10\n"
"ldr q6, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
- "zip2 v15.8h, v5.8h, v3.8h\n"
- "zip1 v18.8h, v6.8h, v29.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q9, [x23], #0x10\n"
- "zip1 v0.8h, v17.8h, v9.8h\n"
- "zip2 v9.8h, v17.8h, v9.8h\n"
- "ldr q21, [x22], #0x10\n"
+ "sub x24, x24, #0x40\n"
+ "ldr q2, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
- "zip2 v8.8h, v6.8h, v29.8h\n"
- "zip1 v30.8h, v21.8h, v20.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q5, [x23], #0x10\n"
- "zip1 v13.8h, v17.8h, v5.8h\n"
- "zip2 v25.8h, v17.8h, v5.8h\n"
- "ldr q7, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
- "zip2 v27.8h, v21.8h, v20.8h\n"
- "zip1 v14.8h, v7.8h, v29.8h\n"
- "ldr q28, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip2 v1.8h, v7.8h, v29.8h\n"
"cmp x24, #0x40\n"
- "ldr q10, [x22], #0x10\n"
- "ldr q21, [x20], #0x10\n"
- "zip1 v16.8h, v28.8h, v17.8h\n"
- "zip2 v17.8h, v28.8h, v17.8h\n"
- "ldr q5, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip1 v3.8h, v5.8h, v20.8h\n"
- "zip2 v7.8h, v5.8h, v20.8h\n"
- "ldr q22, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
- "zip1 v2.8h, v10.8h, v21.8h\n"
- "zip2 v5.8h, v10.8h, v21.8h\n"
"ldr q21, [x25], #0x10\n"
- "ldr q20, [x23], #0x10\n"
- "zip1 v4.8h, v21.8h, v20.8h\n"
- "zip2 v28.8h, v21.8h, v20.8h\n"
- "ldr q6, [x22], #0x10\n"
- "ldr q10, [x20], #0x10\n"
- "zip1 v26.8h, v22.8h, v29.8h\n"
- "zip2 v20.8h, v22.8h, v29.8h\n"
- "ldr q29, [x25], #0x10\n"
- "ldr q23, [x23], #0x10\n"
- "zip1 v21.8h, v29.8h, v23.8h\n"
- "zip2 v23.8h, v29.8h, v23.8h\n"
+ "ldr q28, [x22], #0x10\n"
+ "ldr q23, [x21], #0x10\n"
+ "zip1 v11.8h, v8.8h, v6.8h\n"
+ "zip2 v8.8h, v8.8h, v6.8h\n"
+ "ldr q1, [x20], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip1 v12.8h, v2.8h, v20.8h\n"
+ "zip2 v14.8h, v2.8h, v20.8h\n"
+ "ldr q24, [x22], #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ "zip1 v13.8h, v21.8h, v28.8h\n"
+ "zip2 v15.8h, v21.8h, v28.8h\n"
+ "ldr q17, [x20], #0x10\n"
+ "ldr q19, [x25], #0x10\n"
+ "zip1 v6.8h, v23.8h, v1.8h\n"
+ "zip2 v28.8h, v23.8h, v1.8h\n"
+ "ldr q16, [x22], #0x10\n"
+ "ldr q30, [x21], #0x10\n"
+ "zip1 v7.8h, v18.8h, v24.8h\n"
+ "zip2 v9.8h, v18.8h, v24.8h\n"
+ "ldr q18, [x20], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "zip1 v5.8h, v20.8h, v17.8h\n"
+ "zip2 v3.8h, v20.8h, v17.8h\n"
+ "ldr q23, [x22], #0x10\n"
+ "ldr q26, [x21], #0x10\n"
+ "zip1 v25.8h, v19.8h, v16.8h\n"
+ "zip2 v10.8h, v19.8h, v16.8h\n"
+ "ldr q16, [x20], #0x10\n"
+ "ldr q19, [x25], #0x10\n"
+ "zip1 v2.8h, v30.8h, v18.8h\n"
+ "zip2 v4.8h, v30.8h, v18.8h\n"
"ldr q22, [x22], #0x10\n"
- "ldr q29, [x20], #0x10\n"
- "str q12, [x21, #0x0]\n"
- "zip1 v12.8h, v6.8h, v10.8h\n"
- "str q31, [x21, #0x10]\n"
- "zip2 v6.8h, v6.8h, v10.8h\n"
- "zip1 v31.8h, v22.8h, v29.8h\n"
- "str q11, [x21, #0x20]\n"
- "zip2 v11.8h, v22.8h, v29.8h\n"
- "str q24, [x21, #0x30]\n"
- "str q0, [x21, #0x40]\n"
- "str q9, [x21, #0x50]\n"
- "str q13, [x21, #0x60]\n"
- "str q25, [x21, #0x70]\n"
- "str q19, [x21, #0x80]\n"
- "str q15, [x21, #0x90]\n"
- "str q18, [x21, #0xa0]\n"
- "str q8, [x21, #0xb0]\n"
- "str q30, [x21, #0xc0]\n"
- "str q27, [x21, #0xd0]\n"
- "str q14, [x21, #0xe0]\n"
- "str q1, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q16, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q3, [x21, #0x20]\n"
- "str q7, [x21, #0x30]\n"
- "str q4, [x21, #0x40]\n"
- "str q28, [x21, #0x50]\n"
- "str q21, [x21, #0x60]\n"
- "str q23, [x21, #0x70]\n"
- "str q2, [x21, #0x80]\n"
- "str q5, [x21, #0x90]\n"
- "str q26, [x21, #0xa0]\n"
- "str q20, [x21, #0xb0]\n"
- "str q12, [x21, #0xc0]\n"
- "str q6, [x21, #0xd0]\n"
- "str q31, [x21, #0xe0]\n"
- "str q11, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "ldr q17, [x21], #0x10\n"
+ "zip1 v27.8h, v21.8h, v23.8h\n"
+ "zip2 v20.8h, v21.8h, v23.8h\n"
+ "ldr q1, [x20], #0x10\n"
+ "ldr q23, [x25], #0x10\n"
+ "zip1 v0.8h, v26.8h, v16.8h\n"
+ "zip2 v31.8h, v26.8h, v16.8h\n"
+ "ldr q16, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
+ "zip1 v30.8h, v19.8h, v22.8h\n"
+ "zip2 v29.8h, v19.8h, v22.8h\n"
+ "ldr q19, [x20], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip1 v24.8h, v17.8h, v1.8h\n"
+ "zip2 v26.8h, v17.8h, v1.8h\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q1, [x21], #0x10\n"
+ "zip1 v22.8h, v23.8h, v16.8h\n"
+ "zip2 v23.8h, v23.8h, v16.8h\n"
+ "ldr q16, [x20], #0x10\n"
+ "str q11, [x23, #0x0]\n"
+ "zip1 v11.8h, v21.8h, v19.8h\n"
+ "zip2 v21.8h, v21.8h, v19.8h\n"
+ "str q8, [x23, #0x10]\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "str q13, [x23, #0x20]\n"
+ "zip1 v17.8h, v1.8h, v16.8h\n"
+ "zip2 v16.8h, v1.8h, v16.8h\n"
+ "str q15, [x23, #0x30]\n"
+ "str q7, [x23, #0x40]\n"
+ "str q9, [x23, #0x50]\n"
+ "str q25, [x23, #0x60]\n"
+ "str q10, [x23, #0x70]\n"
+ "str q12, [x23, #0x80]\n"
+ "str q14, [x23, #0x90]\n"
+ "str q6, [x23, #0xa0]\n"
+ "str q28, [x23, #0xb0]\n"
+ "str q5, [x23, #0xc0]\n"
+ "str q3, [x23, #0xd0]\n"
+ "str q2, [x23, #0xe0]\n"
+ "str q4, [x23, #0xf0]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q27, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q30, [x23, #0x20]\n"
+ "str q29, [x23, #0x30]\n"
+ "str q22, [x23, #0x40]\n"
+ "str q23, [x23, #0x50]\n"
+ "str q19, [x23, #0x60]\n"
+ "str q18, [x23, #0x70]\n"
+ "str q0, [x23, #0x80]\n"
+ "str q31, [x23, #0x90]\n"
+ "str q24, [x23, #0xa0]\n"
+ "str q26, [x23, #0xb0]\n"
+ "str q11, [x23, #0xc0]\n"
+ "str q21, [x23, #0xd0]\n"
+ "str q17, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cmp x24, #0x20\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"sub x24, x24, #0x20\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
"cmp x24, #0x20\n"
- "ldr q21, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v1.8h, v17.8h, v16.8h\n"
- "zip2 v0.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v31.8h, v17.8h, v16.8h\n"
- "zip2 v30.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
+ "ldr q19, [x25], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q22, [x21], #0x10\n"
+ "zip1 v0.8h, v21.8h, v17.8h\n"
+ "zip2 v31.8h, v21.8h, v17.8h\n"
+ "ldr q17, [x20], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "zip1 v30.8h, v20.8h, v16.8h\n"
+ "zip2 v29.8h, v20.8h, v16.8h\n"
+ "ldr q16, [x22], #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ "zip1 v28.8h, v19.8h, v18.8h\n"
+ "zip2 v27.8h, v19.8h, v18.8h\n"
"ldr q19, [x20], #0x10\n"
- "zip1 v29.8h, v21.8h, v18.8h\n"
- "zip2 v28.8h, v21.8h, v18.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v27.8h, v17.8h, v16.8h\n"
- "zip2 v26.8h, v17.8h, v16.8h\n"
- "ldr q25, [x22], #0x10\n"
- "ldr q18, [x20], #0x10\n"
- "zip1 v24.8h, v20.8h, v19.8h\n"
- "zip2 v23.8h, v20.8h, v19.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v22.8h, v17.8h, v16.8h\n"
- "zip2 v21.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip1 v26.8h, v22.8h, v17.8h\n"
+ "zip2 v25.8h, v22.8h, v17.8h\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q24, [x21], #0x10\n"
+ "zip1 v23.8h, v21.8h, v16.8h\n"
+ "zip2 v22.8h, v21.8h, v16.8h\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v19.8h, v25.8h, v18.8h\n"
- "zip2 v18.8h, v25.8h, v18.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q1, [x21, #0x0]\n"
- "str q0, [x21, #0x10]\n"
- "str q31, [x21, #0x20]\n"
- "str q30, [x21, #0x30]\n"
- "str q27, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "str q22, [x21, #0x60]\n"
- "str q21, [x21, #0x70]\n"
- "str q29, [x21, #0x80]\n"
- "str q28, [x21, #0x90]\n"
- "str q24, [x21, #0xa0]\n"
- "str q23, [x21, #0xb0]\n"
- "str q19, [x21, #0xc0]\n"
- "str q18, [x21, #0xd0]\n"
- "str q17, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "str q0, [x23, #0x0]\n"
+ "zip1 v19.8h, v18.8h, v17.8h\n"
+ "zip2 v18.8h, v18.8h, v17.8h\n"
+ "str q31, [x23, #0x10]\n"
+ "zip1 v17.8h, v24.8h, v16.8h\n"
+ "zip2 v16.8h, v24.8h, v16.8h\n"
+ "str q28, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q23, [x23, #0x40]\n"
+ "str q22, [x23, #0x50]\n"
+ "str q19, [x23, #0x60]\n"
+ "str q18, [x23, #0x70]\n"
+ "str q30, [x23, #0x80]\n"
+ "str q29, [x23, #0x90]\n"
+ "str q26, [x23, #0xa0]\n"
+ "str q25, [x23, #0xb0]\n"
+ "str q21, [x23, #0xc0]\n"
+ "str q20, [x23, #0xd0]\n"
+ "str q17, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
+ "cbz x24, 12f\n"
"cmp x24, #0x10\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "str q16, [x23, #0x80]\n"
+ "str q16, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "str q16, [x23, #0xc0]\n"
+ "str q16, [x23, #0xd0]\n"
+ "str q16, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
"blt 7f\n"
"6:" // Main row loop: width 16 loop: loop
"ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"sub x24, x24, #0x10\n"
+ "ldr q20, [x21], #0x10\n"
+ "ldr q19, [x20], #0x10\n"
"cmp x24, #0x10\n"
- "ldr q24, [x22], #0x10\n"
- "ldr q23, [x20], #0x10\n"
- "zip1 v19.8h, v17.8h, v16.8h\n"
- "zip2 v18.8h, v17.8h, v16.8h\n"
- "ldr q17, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q23, [x21], #0x10\n"
"zip1 v22.8h, v17.8h, v16.8h\n"
- "zip2 v21.8h, v17.8h, v16.8h\n"
- "ldr q20, [x22], #0x10\n"
+ "zip2 v17.8h, v17.8h, v16.8h\n"
"ldr q16, [x20], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "zip1 v19.8h, v24.8h, v23.8h\n"
- "str q18, [x21, #0x10]\n"
- "zip2 v18.8h, v24.8h, v23.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "str q22, [x21, #0x20]\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q17, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, #0x40\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "zip1 v19.8h, v24.8h, v18.8h\n"
+ "zip2 v18.8h, v24.8h, v18.8h\n"
+ "str q22, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "zip1 v17.8h, v23.8h, v16.8h\n"
+ "zip2 v16.8h, v23.8h, v16.8h\n"
+ "str q19, [x23, #0x20]\n"
+ "str q18, [x23, #0x30]\n"
+ "str q21, [x23, #0x80]\n"
+ "str q20, [x23, #0x90]\n"
+ "str q17, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, #0x40\n"
"bge 6b\n"
"7:" // Main row loop: width 16 loop: skip
"cmp x24, #0x4\n"
"blt 9f\n"
"8:" // Main row loop: width 4 loop: loop
"ldr d19, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d18, [x22], #0x8\n"
+ "ldr d18, [x21], #0x8\n"
"ldr d17, [x20], #0x8\n"
+ "cmp x24, #0x4\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
+ "str q16, [x23, #0x0]\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "str q16, [x21, #0x80]\n"
- "add x21, x21, #0x10\n"
+ "str q16, [x23, #0x80]\n"
+ "add x23, x23, #0x10\n"
"bge 8b\n"
"9:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 11f\n"
"10:" // Main row loop: width 1 loop: loop
"ldr h19, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
+ "ldr h16, [x22], #0x2\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h18, [x22], #0x2\n"
+ "ldr h18, [x21], #0x2\n"
"ldr h17, [x20], #0x2\n"
+ "cmp x24, #0x1\n"
"zip1 v16.8h, v19.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
+ "str s16, [x23, #0x0]\n"
"zip1 v16.8h, v18.8h, v17.8h\n"
- "str s16, [x21, #0x80]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x23, #0x80]\n"
+ "add x23, x23, #0x4\n"
"bge 10b\n"
"11:" // Main row loop: width 1 loop: skip
+ "12:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x100\n"
"bge 1b\n"
- "cbz %x[height], 24f\n"
- "12:" // Main loop skip
- "13:" // Tail row loop: Head
+ "cbz %x[height], 26f\n"
+ "13:" // Main loop skip
+ "14:" // Tail row loop: Head
"mov x25, %x[in]\n"
"mov x20, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "cmp x20, #0x40\n"
- "mov x21, %x[out]\n"
+ "mov x23, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
- "blt 15f\n"
- "14:" // Tail row loop: Unroll column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "cmp x20, #0x40\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: Unroll column loop
+ "ldr q21, [x25], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"sub x20, x20, #0x40\n"
- "zip1 v0.8h, v18.8h, v17.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip2 v31.8h, v18.8h, v17.8h\n"
- "zip1 v30.8h, v19.8h, v16.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip2 v29.8h, v19.8h, v16.8h\n"
- "zip1 v28.8h, v18.8h, v17.8h\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip2 v27.8h, v18.8h, v17.8h\n"
- "zip1 v26.8h, v19.8h, v16.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip2 v25.8h, v19.8h, v16.8h\n"
+ "ldr q20, [x25], #0x10\n"
"cmp x20, #0x40\n"
+ "ldr q16, [x22], #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v24.8h, v18.8h, v17.8h\n"
- "zip2 v23.8h, v18.8h, v17.8h\n"
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip1 v22.8h, v19.8h, v16.8h\n"
- "zip2 v21.8h, v19.8h, v16.8h\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q22, [x25], #0x10\n"
+ "zip1 v0.8h, v21.8h, v17.8h\n"
+ "zip2 v31.8h, v21.8h, v17.8h\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q21, [x25], #0x10\n"
+ "zip1 v30.8h, v20.8h, v16.8h\n"
+ "zip2 v29.8h, v20.8h, v16.8h\n"
+ "ldr q16, [x22], #0x10\n"
"ldr q20, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "str q0, [x21, #0x0]\n"
+ "zip1 v28.8h, v19.8h, v18.8h\n"
+ "zip2 v27.8h, v19.8h, v18.8h\n"
+ "ldr q19, [x22], #0x10\n"
+ "ldr q18, [x25], #0x10\n"
+ "zip1 v26.8h, v22.8h, v17.8h\n"
+ "zip2 v25.8h, v22.8h, v17.8h\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "zip1 v23.8h, v21.8h, v16.8h\n"
+ "zip2 v22.8h, v21.8h, v16.8h\n"
+ "ldr q16, [x22], #0x10\n"
+ "str q0, [x23, #0x0]\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
+ "str q31, [x23, #0x10]\n"
"zip1 v19.8h, v18.8h, v17.8h\n"
- "str q31, [x21, #0x10]\n"
"zip2 v18.8h, v18.8h, v17.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "str q30, [x21, #0x20]\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q29, [x21, #0x30]\n"
- "str q28, [x21, #0x40]\n"
- "str q27, [x21, #0x50]\n"
- "str q26, [x21, #0x60]\n"
- "str q25, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q24, [x21, #0x0]\n"
- "str q23, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 14b\n"
- "15:" // Tail row loop: Unroll column loop skip
+ "str q30, [x23, #0x20]\n"
+ "zip1 v17.8h, v24.8h, v16.8h\n"
+ "zip2 v16.8h, v24.8h, v16.8h\n"
+ "str q29, [x23, #0x30]\n"
+ "str q28, [x23, #0x40]\n"
+ "str q27, [x23, #0x50]\n"
+ "str q26, [x23, #0x60]\n"
+ "str q25, [x23, #0x70]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "str q23, [x23, #0x0]\n"
+ "str q22, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q20, [x23, #0x30]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: Unroll column loop skip
"cmp x20, #0x20\n"
- "blt 17f\n"
- "16:" // Tail row loop: Column loop
- "ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "blt 18f\n"
+ "17:" // Tail row loop: Column loop
+ "ldr q21, [x25], #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"sub x20, x20, #0x20\n"
+ "ldr q20, [x25], #0x10\n"
"cmp x20, #0x20\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
- "zip1 v24.8h, v18.8h, v17.8h\n"
- "zip2 v23.8h, v18.8h, v17.8h\n"
+ "ldr q19, [x22], #0x10\n"
"ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
- "zip1 v22.8h, v19.8h, v16.8h\n"
- "zip2 v21.8h, v19.8h, v16.8h\n"
- "ldr q20, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
+ "ldr q24, [x25], #0x10\n"
+ "zip1 v23.8h, v21.8h, v16.8h\n"
+ "zip2 v22.8h, v21.8h, v16.8h\n"
+ "ldr q16, [x22], #0x10\n"
+ "zip1 v21.8h, v20.8h, v19.8h\n"
+ "zip2 v20.8h, v20.8h, v19.8h\n"
"zip1 v19.8h, v18.8h, v17.8h\n"
"zip2 v18.8h, v18.8h, v17.8h\n"
- "zip1 v17.8h, v20.8h, v16.8h\n"
- "zip2 v16.8h, v20.8h, v16.8h\n"
- "str q24, [x21, #0x0]\n"
- "str q23, [x21, #0x10]\n"
- "str q22, [x21, #0x20]\n"
- "str q21, [x21, #0x30]\n"
- "str q19, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q17, [x21, #0x60]\n"
- "str q16, [x21, #0x70]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 16b\n"
- "17:" // Tail row loop: Column loop skip
+ "zip1 v17.8h, v24.8h, v16.8h\n"
+ "zip2 v16.8h, v24.8h, v16.8h\n"
+ "str q23, [x23, #0x0]\n"
+ "str q22, [x23, #0x10]\n"
+ "str q21, [x23, #0x20]\n"
+ "str q20, [x23, #0x30]\n"
+ "str q19, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q17, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: Column loop skip
+ "cbz x20, 25f\n"
"cmp x20, #0x10\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 16 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "blt 20f\n"
+ "19:" // Tail row loop: width 16 loop: loop
"ldr q18, [x25], #0x10\n"
- "ldr q17, [x23], #0x10\n"
+ "ldr q17, [x22], #0x10\n"
"sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
"ldr q20, [x25], #0x10\n"
- "ldr q16, [x23], #0x10\n"
+ "cmp x20, #0x10\n"
+ "ldr q16, [x22], #0x10\n"
"zip1 v19.8h, v18.8h, v17.8h\n"
"zip2 v18.8h, v18.8h, v17.8h\n"
"zip1 v17.8h, v20.8h, v16.8h\n"
"zip2 v16.8h, v20.8h, v16.8h\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 16 loop: skip
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, #0x40\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 16 loop: skip
"cmp x20, #0x4\n"
- "blt 21f\n"
- "20:" // Tail row loop: width 4 loop: loop
+ "blt 22f\n"
+ "21:" // Tail row loop: width 4 loop: loop
"ldr d17, [x25], #0x8\n"
- "ldr d16, [x23], #0x8\n"
+ "ldr d16, [x22], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 20b\n"
- "21:" // Tail row loop: width 4 loop: skip
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "bge 21b\n"
+ "22:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 23f\n"
- "22:" // Tail row loop: width 1 loop: loop
+ "blt 24f\n"
+ "23:" // Tail row loop: width 1 loop: loop
"ldr h17, [x25], #0x2\n"
- "ldr h16, [x23], #0x2\n"
+ "ldr h16, [x22], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
"zip1 v16.8h, v17.8h, v16.8h\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 22b\n"
- "23:" // Tail row loop: width 1 loop: skip
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
+ "bge 23b\n"
+ "24:" // Tail row loop: width 1 loop: skip
+ "25:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x80\n"
- "bge 13b\n"
- "24:" // Done
+ "bge 14b\n"
+ "26:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp
index f35752d5a8..44b6e7c9d4 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_48.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,165 +34,186 @@ void a64_transpose_interleave_48(uint16_t *out, const uint16_t *in, size_t width
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x18\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
"ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
"sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
+ "cmp x24, #0x18\n"
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q27, [x21, #0x0]\n"
- "str q23, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q26, [x21, #0x30]\n"
- "str q22, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q25, [x21, #0x60]\n"
- "str q21, [x21, #0x70]\n"
- "str q17, [x21, #0x80]\n"
- "str q24, [x21, #0x90]\n"
- "str q20, [x21, #0xa0]\n"
- "str q16, [x21, #0xb0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q27, [x23, #0x0]\n"
+ "str q23, [x23, #0x10]\n"
+ "str q19, [x23, #0x20]\n"
+ "str q26, [x23, #0x30]\n"
+ "str q22, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q25, [x23, #0x60]\n"
+ "str q21, [x23, #0x70]\n"
+ "str q17, [x23, #0x80]\n"
+ "str q24, [x23, #0x90]\n"
+ "str q20, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x10\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "str q16, [x23, #0x80]\n"
+ "str q16, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
"sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
+ "cmp x24, #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q23, [x21, #0x0]\n"
- "str q19, [x21, #0x10]\n"
- "str q22, [x21, #0x30]\n"
- "str q18, [x21, #0x40]\n"
- "str q21, [x21, #0x60]\n"
- "str q17, [x21, #0x70]\n"
- "str q20, [x21, #0x90]\n"
- "str q16, [x21, #0xa0]\n"
- "add x21, x21, #0x20\n"
+ "str q23, [x23, #0x0]\n"
+ "str q19, [x23, #0x10]\n"
+ "str q22, [x23, #0x30]\n"
+ "str q18, [x23, #0x40]\n"
+ "str q21, [x23, #0x60]\n"
+ "str q17, [x23, #0x70]\n"
+ "str q20, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "add x23, x23, #0x20\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
"cmp x24, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x30]\n"
- "str d17, [x21, #0x60]\n"
- "str d16, [x21, #0x90]\n"
- "add x21, x21, #0x8\n"
+ "cmp x24, #0x4\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x30]\n"
+ "str d17, [x23, #0x60]\n"
+ "str d16, [x23, #0x90]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x30]\n"
- "str h17, [x21, #0x60]\n"
- "str h16, [x21, #0x90]\n"
- "add x21, x21, #0x2\n"
+ "cmp x24, #0x1\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x30]\n"
+ "str h17, [x23, #0x60]\n"
+ "str h16, [x23, #0x90]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0xc0\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x18\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Column loop
+ "blt 14f\n"
+ "13:" // Tail row loop: Column loop
"ldr q18, [x25], #0x10\n"
- "ldr q17, [x25], #0x10\n"
"sub x20, x20, #0x18\n"
- "cmp x20, #0x18\n"
+ "ldr q17, [x25], #0x10\n"
"ldr q16, [x25], #0x10\n"
- "str q18, [x21, #0x0]\n"
- "str q17, [x21, #0x10]\n"
- "str q16, [x21, #0x20]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Column loop skip
+ "cmp x20, #0x18\n"
+ "str q18, [x23, #0x0]\n"
+ "str q17, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x10\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 16 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: width 16 loop: loop
"ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
"cmp x20, #0x10\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 16 loop: skip
+ "str q17, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "add x23, x23, #0x20\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 16 loop: skip
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d16, [x25], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h16, [x25], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x30\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
: "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp
index 6ef02ac044..ed12f5dfa9 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -41,9 +41,10 @@ void a64_transpose_interleave_4_1x16(uint8_t *out, const uint8_t *in, size_t wid
__asm__ __volatile__(
"1:" // Main row loop: Head
"mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "cmp %x[height], #0xf\n"
+ "mov x16, %x[width]\n"
+ "mov x15, %x[out]\n"
+ "add x14, x17, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
"add x11, x12, %x[in_stride]\n"
@@ -56,228 +57,231 @@ void a64_transpose_interleave_4_1x16(uint8_t *out, const uint8_t *in, size_t wid
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "cmp %x[height], #0xf\n"
- "add %x[in], x22, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GE\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "cmp %x[height], #0xd\n"
"csel x22, x22, %x[pad_row], GT\n"
"csel x23, x23, %x[pad_row], GE\n"
- "cmp %x[height], #0xd\n"
+ "cmp %x[height], #0xb\n"
"csel x24, x24, %x[pad_row], GT\n"
"csel x25, x25, %x[pad_row], GE\n"
- "cmp %x[height], #0xb\n"
+ "cmp %x[height], #0x9\n"
"csel x26, x26, %x[pad_row], GT\n"
"csel x27, x27, %x[pad_row], GE\n"
- "cmp %x[height], #0x9\n"
+ "cmp %x[height], #0x7\n"
"csel x28, x28, %x[pad_row], GT\n"
"csel x9, x9, %x[pad_row], GE\n"
- "cmp %x[height], #0x7\n"
+ "cmp %x[height], #0x5\n"
"csel x10, x10, %x[pad_row], GT\n"
"csel x11, x11, %x[pad_row], GE\n"
- "cmp %x[height], #0x5\n"
- "mov x21, %x[width]\n"
+ "cmp %x[height], #0x3\n"
"csel x12, x12, %x[pad_row], GT\n"
"csel x13, x13, %x[pad_row], GE\n"
- "cmp %x[height], #0x3\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x21, #0x10\n"
- "mov x20, %x[out]\n"
"sub %x[height], %x[height], #0x10\n"
+ "csel x14, x14, %x[pad_row], GT\n"
+ "cmp x16, #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
"ldr q3, [x17], #0x10\n"
- "ldr q9, [x16], #0x10\n"
- "sub x21, x21, #0x10\n"
- "cmp x21, #0x10\n"
- "ldr q2, [x15], #0x10\n"
- "ldr q8, [x14], #0x10\n"
- "ldr q0, [x13], #0x10\n"
- "ldr q31, [x12], #0x10\n"
- "ldr q30, [x11], #0x10\n"
+ "ldr q9, [x14], #0x10\n"
+ "sub x16, x16, #0x10\n"
+ "ldr q2, [x13], #0x10\n"
+ "ldr q8, [x12], #0x10\n"
+ "cmp x16, #0x10\n"
+ "ldr q1, [x11], #0x10\n"
"ldr q7, [x10], #0x10\n"
- "ldr q29, [x9], #0x10\n"
- "ldr q28, [x28], #0x10\n"
- "zip1 v27.16b, v3.16b, v29.16b\n"
- "zip1 v6.16b, v9.16b, v28.16b\n"
- "ldr q25, [x27], #0x10\n"
- "ldr q24, [x26], #0x10\n"
- "zip1 v26.16b, v2.16b, v25.16b\n"
- "zip1 v1.16b, v8.16b, v24.16b\n"
- "ldr q23, [x25], #0x10\n"
- "ldr q22, [x24], #0x10\n"
- "zip1 v21.16b, v0.16b, v23.16b\n"
- "zip1 v20.16b, v31.16b, v22.16b\n"
- "ldr q19, [x23], #0x10\n"
- "ldr q18, [x22], #0x10\n"
- "zip1 v17.16b, v30.16b, v19.16b\n"
- "zip1 v16.16b, v7.16b, v18.16b\n"
- "zip2 v5.16b, v3.16b, v29.16b\n"
- "zip2 v0.16b, v0.16b, v23.16b\n"
- "zip2 v4.16b, v2.16b, v25.16b\n"
- "zip2 v3.16b, v30.16b, v19.16b\n"
- "zip2 v2.16b, v9.16b, v28.16b\n"
- "zip2 v31.16b, v31.16b, v22.16b\n"
- "zip2 v30.16b, v8.16b, v24.16b\n"
- "zip2 v29.16b, v7.16b, v18.16b\n"
- "zip1 v25.16b, v27.16b, v21.16b\n"
- "zip1 v24.16b, v26.16b, v17.16b\n"
- "zip1 v23.16b, v6.16b, v20.16b\n"
- "zip1 v22.16b, v1.16b, v16.16b\n"
- "zip2 v28.16b, v27.16b, v21.16b\n"
- "zip2 v27.16b, v26.16b, v17.16b\n"
- "zip2 v26.16b, v6.16b, v20.16b\n"
- "zip2 v21.16b, v1.16b, v16.16b\n"
- "zip1 v1.16b, v5.16b, v0.16b\n"
- "zip1 v20.16b, v4.16b, v3.16b\n"
- "zip1 v19.16b, v2.16b, v31.16b\n"
- "zip1 v16.16b, v30.16b, v29.16b\n"
- "zip1 v18.16b, v25.16b, v24.16b\n"
- "zip1 v17.16b, v23.16b, v22.16b\n"
- "zip2 v25.16b, v25.16b, v24.16b\n"
- "zip2 v24.16b, v23.16b, v22.16b\n"
- "zip2 v0.16b, v5.16b, v0.16b\n"
- "zip2 v23.16b, v4.16b, v3.16b\n"
- "zip2 v31.16b, v2.16b, v31.16b\n"
- "zip2 v22.16b, v30.16b, v29.16b\n"
- "zip1 v30.16b, v28.16b, v27.16b\n"
- "zip1 v29.16b, v26.16b, v21.16b\n"
+ "ldr q0, [x9], #0x10\n"
+ "ldr q6, [x28], #0x10\n"
+ "ldr q31, [x27], #0x10\n"
+ "ldr q30, [x26], #0x10\n"
+ "ldr q29, [x25], #0x10\n"
+ "ldr q28, [x24], #0x10\n"
+ "ldr q26, [x23], #0x10\n"
+ "ldr q25, [x22], #0x10\n"
+ "ldr q24, [x21], #0x10\n"
+ "ldr q16, [x20], #0x10\n"
+ "zip1 v23.16b, v3.16b, v31.16b\n"
+ "zip1 v27.16b, v9.16b, v30.16b\n"
+ "zip1 v22.16b, v2.16b, v29.16b\n"
+ "zip1 v21.16b, v8.16b, v28.16b\n"
+ "zip1 v20.16b, v1.16b, v26.16b\n"
+ "zip1 v19.16b, v7.16b, v25.16b\n"
+ "zip1 v18.16b, v0.16b, v24.16b\n"
+ "zip1 v17.16b, v6.16b, v16.16b\n"
+ "zip2 v5.16b, v3.16b, v31.16b\n"
+ "zip2 v1.16b, v1.16b, v26.16b\n"
+ "zip2 v4.16b, v2.16b, v29.16b\n"
+ "zip2 v3.16b, v0.16b, v24.16b\n"
+ "zip2 v2.16b, v9.16b, v30.16b\n"
+ "zip2 v0.16b, v7.16b, v25.16b\n"
+ "zip2 v31.16b, v8.16b, v28.16b\n"
+ "zip2 v30.16b, v6.16b, v16.16b\n"
+ "zip1 v26.16b, v23.16b, v20.16b\n"
+ "zip1 v25.16b, v22.16b, v18.16b\n"
+ "zip1 v24.16b, v27.16b, v19.16b\n"
+ "zip1 v16.16b, v21.16b, v17.16b\n"
+ "zip2 v29.16b, v23.16b, v20.16b\n"
+ "zip2 v23.16b, v22.16b, v18.16b\n"
+ "zip2 v22.16b, v27.16b, v19.16b\n"
+ "zip2 v21.16b, v21.16b, v17.16b\n"
+ "zip1 v28.16b, v5.16b, v1.16b\n"
+ "zip1 v27.16b, v4.16b, v3.16b\n"
+ "zip1 v20.16b, v2.16b, v0.16b\n"
+ "zip1 v19.16b, v31.16b, v30.16b\n"
+ "zip1 v18.16b, v26.16b, v25.16b\n"
+ "zip1 v17.16b, v24.16b, v16.16b\n"
+ "zip2 v26.16b, v26.16b, v25.16b\n"
+ "zip2 v16.16b, v24.16b, v16.16b\n"
+ "zip2 v1.16b, v5.16b, v1.16b\n"
+ "zip2 v25.16b, v4.16b, v3.16b\n"
+ "zip2 v0.16b, v2.16b, v0.16b\n"
+ "zip2 v24.16b, v31.16b, v30.16b\n"
+ "zip1 v31.16b, v29.16b, v23.16b\n"
+ "zip1 v30.16b, v22.16b, v21.16b\n"
+ "zip2 v29.16b, v29.16b, v23.16b\n"
+ "zip2 v23.16b, v22.16b, v21.16b\n"
+ "zip1 v22.16b, v28.16b, v27.16b\n"
+ "zip1 v21.16b, v20.16b, v19.16b\n"
"zip2 v28.16b, v28.16b, v27.16b\n"
- "zip2 v27.16b, v26.16b, v21.16b\n"
- "zip1 v26.16b, v1.16b, v20.16b\n"
- "zip1 v21.16b, v19.16b, v16.16b\n"
- "zip2 v20.16b, v1.16b, v20.16b\n"
- "zip2 v19.16b, v19.16b, v16.16b\n"
- "zip1 v16.16b, v18.16b, v17.16b\n"
+ "zip2 v20.16b, v20.16b, v19.16b\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
"zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
+ "zip1 v17.16b, v26.16b, v16.16b\n"
+ "zip2 v16.16b, v26.16b, v16.16b\n"
+ "zip1 v27.16b, v1.16b, v25.16b\n"
+ "zip1 v26.16b, v0.16b, v24.16b\n"
+ "str q19, [x15, #0x0]\n"
+ "str q18, [x15, #0x10]\n"
+ "zip2 v25.16b, v1.16b, v25.16b\n"
+ "zip2 v24.16b, v0.16b, v24.16b\n"
+ "str q17, [x15, #0x20]\n"
+ "zip1 v19.16b, v31.16b, v30.16b\n"
+ "zip2 v18.16b, v31.16b, v30.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip1 v17.16b, v29.16b, v23.16b\n"
+ "zip2 v16.16b, v29.16b, v23.16b\n"
+ "zip1 v23.16b, v22.16b, v21.16b\n"
+ "zip2 v22.16b, v22.16b, v21.16b\n"
+ "str q19, [x15, #0x0]\n"
+ "str q18, [x15, #0x10]\n"
+ "zip1 v21.16b, v28.16b, v20.16b\n"
+ "zip2 v20.16b, v28.16b, v20.16b\n"
+ "str q17, [x15, #0x20]\n"
+ "zip1 v19.16b, v27.16b, v26.16b\n"
+ "zip2 v18.16b, v27.16b, v26.16b\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"zip1 v17.16b, v25.16b, v24.16b\n"
"zip2 v16.16b, v25.16b, v24.16b\n"
- "str q18, [x20, #0x10]\n"
- "str q17, [x20, #0x20]\n"
- "zip1 v25.16b, v0.16b, v23.16b\n"
- "zip1 v24.16b, v31.16b, v22.16b\n"
- "str q16, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip2 v23.16b, v0.16b, v23.16b\n"
- "zip2 v22.16b, v31.16b, v22.16b\n"
- "zip1 v16.16b, v30.16b, v29.16b\n"
- "zip2 v17.16b, v30.16b, v29.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v16.16b, v28.16b, v27.16b\n"
- "zip2 v18.16b, v28.16b, v27.16b\n"
- "str q17, [x20, #0x10]\n"
- "str q16, [x20, #0x20]\n"
- "zip1 v17.16b, v26.16b, v21.16b\n"
- "zip2 v16.16b, v26.16b, v21.16b\n"
- "str q18, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 v21.16b, v20.16b, v19.16b\n"
- "zip2 v20.16b, v20.16b, v19.16b\n"
- "str q17, [x20, #0x0]\n"
- "zip1 v19.16b, v25.16b, v24.16b\n"
- "zip2 v18.16b, v25.16b, v24.16b\n"
- "str q16, [x20, #0x10]\n"
- "zip1 v17.16b, v23.16b, v22.16b\n"
- "zip2 v16.16b, v23.16b, v22.16b\n"
- "str q21, [x20, #0x20]\n"
- "str q20, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
- "str q19, [x20, #0x0]\n"
- "str q18, [x20, #0x10]\n"
- "str q17, [x20, #0x20]\n"
- "str q16, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
+ "str q23, [x15, #0x0]\n"
+ "str q22, [x15, #0x10]\n"
+ "str q21, [x15, #0x20]\n"
+ "str q20, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "str q19, [x15, #0x0]\n"
+ "str q18, [x15, #0x10]\n"
+ "str q17, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x21, #0x4\n"
+ "cmp x16, #0x4\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr s21, [x17], #0x4\n"
- "ldr s23, [x16], #0x4\n"
- "sub x21, x21, #0x4\n"
- "cmp x21, #0x4\n"
- "ldr s20, [x15], #0x4\n"
- "ldr s22, [x14], #0x4\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "ldr s25, [x11], #0x4\n"
- "ldr s24, [x10], #0x4\n"
- "ldr s17, [x9], #0x4\n"
- "ldr s16, [x28], #0x4\n"
- "zip1 v21.16b, v21.16b, v17.16b\n"
- "zip1 v23.16b, v23.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v20.16b, v20.16b, v17.16b\n"
- "zip1 v22.16b, v22.16b, v16.16b\n"
- "ldr s17, [x25], #0x4\n"
- "ldr s16, [x24], #0x4\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr s17, [x23], #0x4\n"
- "ldr s16, [x22], #0x4\n"
+ "ldr s31, [x17], #0x4\n"
+ "ldr s30, [x14], #0x4\n"
+ "sub x16, x16, #0x4\n"
+ "ldr s29, [x13], #0x4\n"
+ "ldr s28, [x12], #0x4\n"
+ "cmp x16, #0x4\n"
+ "ldr s27, [x11], #0x4\n"
+ "ldr s26, [x10], #0x4\n"
+ "ldr s25, [x9], #0x4\n"
+ "ldr s24, [x28], #0x4\n"
+ "ldr s20, [x27], #0x4\n"
+ "ldr s21, [x26], #0x4\n"
+ "ldr s23, [x25], #0x4\n"
+ "ldr s22, [x24], #0x4\n"
+ "ldr s19, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
+ "ldr s16, [x20], #0x4\n"
+ "zip1 v20.16b, v31.16b, v20.16b\n"
+ "zip1 v21.16b, v30.16b, v21.16b\n"
+ "zip1 v23.16b, v29.16b, v23.16b\n"
+ "zip1 v22.16b, v28.16b, v22.16b\n"
+ "zip1 v19.16b, v27.16b, v19.16b\n"
+ "zip1 v18.16b, v26.16b, v18.16b\n"
"zip1 v17.16b, v25.16b, v17.16b\n"
"zip1 v16.16b, v24.16b, v16.16b\n"
- "zip1 v21.16b, v21.16b, v19.16b\n"
- "zip1 v20.16b, v20.16b, v17.16b\n"
- "zip1 v19.16b, v23.16b, v18.16b\n"
+ "zip1 v20.16b, v20.16b, v19.16b\n"
+ "zip1 v21.16b, v21.16b, v18.16b\n"
+ "zip1 v19.16b, v23.16b, v17.16b\n"
"zip1 v16.16b, v22.16b, v16.16b\n"
- "zip1 v18.16b, v21.16b, v20.16b\n"
- "zip1 v17.16b, v19.16b, v16.16b\n"
- "zip2 v20.16b, v21.16b, v20.16b\n"
- "zip2 v19.16b, v19.16b, v16.16b\n"
- "zip1 v16.16b, v18.16b, v17.16b\n"
+ "zip1 v18.16b, v20.16b, v19.16b\n"
+ "zip1 v17.16b, v21.16b, v16.16b\n"
+ "zip2 v20.16b, v20.16b, v19.16b\n"
+ "zip2 v16.16b, v21.16b, v16.16b\n"
+ "zip1 v19.16b, v18.16b, v17.16b\n"
"zip2 v18.16b, v18.16b, v17.16b\n"
- "str q16, [x20, #0x0]\n"
- "zip1 v17.16b, v20.16b, v19.16b\n"
- "zip2 v16.16b, v20.16b, v19.16b\n"
- "str q18, [x20, #0x10]\n"
- "str q17, [x20, #0x20]\n"
- "str q16, [x20, #0x30]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip1 v17.16b, v20.16b, v16.16b\n"
+ "zip2 v16.16b, v20.16b, v16.16b\n"
+ "str q19, [x15, #0x0]\n"
+ "str q18, [x15, #0x10]\n"
+ "str q17, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x21, #0x1\n"
- "blt 7f\n"
+ "cbz x16, 7f\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x15, #0x0]\n"
+ "str q16, [x15, #0x10]\n"
+ "str q16, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr b23, [x17], #0x1\n"
- "ldr b22, [x16], #0x1\n"
- "sub x21, x21, #0x1\n"
- "cmp x21, #0x1\n"
- "ldr b21, [x15], #0x1\n"
- "ldr b20, [x14], #0x1\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "ldr b25, [x11], #0x1\n"
- "ldr b24, [x10], #0x1\n"
- "ldr b17, [x9], #0x1\n"
- "ldr b16, [x28], #0x1\n"
- "zip1 v23.16b, v23.16b, v17.16b\n"
- "zip1 v22.16b, v22.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v21.16b, v21.16b, v17.16b\n"
- "zip1 v20.16b, v20.16b, v16.16b\n"
- "ldr b17, [x25], #0x1\n"
- "ldr b16, [x24], #0x1\n"
- "zip1 v19.16b, v19.16b, v17.16b\n"
- "zip1 v18.16b, v18.16b, v16.16b\n"
- "ldr b17, [x23], #0x1\n"
- "ldr b16, [x22], #0x1\n"
+ "ldr b31, [x17], #0x1\n"
+ "ldr b30, [x14], #0x1\n"
+ "sub x16, x16, #0x1\n"
+ "ldr b29, [x13], #0x1\n"
+ "ldr b28, [x12], #0x1\n"
+ "cmp x16, #0x1\n"
+ "ldr b27, [x11], #0x1\n"
+ "ldr b26, [x10], #0x1\n"
+ "ldr b25, [x9], #0x1\n"
+ "ldr b24, [x28], #0x1\n"
+ "ldr b23, [x27], #0x1\n"
+ "ldr b22, [x26], #0x1\n"
+ "ldr b21, [x25], #0x1\n"
+ "ldr b20, [x24], #0x1\n"
+ "ldr b19, [x23], #0x1\n"
+ "ldr b18, [x22], #0x1\n"
+ "ldr b17, [x21], #0x1\n"
+ "ldr b16, [x20], #0x1\n"
+ "zip1 v23.16b, v31.16b, v23.16b\n"
+ "zip1 v22.16b, v30.16b, v22.16b\n"
+ "zip1 v21.16b, v29.16b, v21.16b\n"
+ "zip1 v20.16b, v28.16b, v20.16b\n"
+ "zip1 v19.16b, v27.16b, v19.16b\n"
+ "zip1 v18.16b, v26.16b, v18.16b\n"
"zip1 v17.16b, v25.16b, v17.16b\n"
"zip1 v16.16b, v24.16b, v16.16b\n"
"zip1 v19.16b, v23.16b, v19.16b\n"
- "zip1 v17.16b, v21.16b, v17.16b\n"
"zip1 v18.16b, v22.16b, v18.16b\n"
+ "zip1 v17.16b, v21.16b, v17.16b\n"
"zip1 v16.16b, v20.16b, v16.16b\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x20, #0x0]\n"
- "add x20, x20, #0x10\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, #0x10\n"
"bge 6b\n"
- "7:" // Main row loop: width 1 loop: skip
+ "7:" // Main row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
"bge 1b\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp
index 5667820865..282375b3a3 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_4_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,11 +43,13 @@ void a64_transpose_interleave_4_1x4(uint8_t *out, const uint8_t *in, size_t widt
"blt 8f\n"
"1:" // Main row loop: Head
"mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
- "add x14, x15, %x[in_stride]\n"
+ "mov x16, %x[width]\n"
+ "mov x15, %x[out]\n"
+ "sub %x[height], %x[height], #0x10\n"
+ "add x14, x17, %x[in_stride]\n"
"add x13, x14, %x[in_stride]\n"
"add x12, x13, %x[in_stride]\n"
+ "cmp x16, #0x10\n"
"add x11, x12, %x[in_stride]\n"
"add x10, x11, %x[in_stride]\n"
"add x9, x10, %x[in_stride]\n"
@@ -55,168 +57,170 @@ void a64_transpose_interleave_4_1x4(uint8_t *out, const uint8_t *in, size_t widt
"add x27, x28, %x[in_stride]\n"
"add x26, x27, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
- "mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
- "cmp x24, #0x10\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x10\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ldr q21, [x17], #0x10\n"
- "ldr q20, [x16], #0x10\n"
- "sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q17, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v3.16b, v21.16b, v17.16b\n"
- "zip1 v2.16b, v20.16b, v16.16b\n"
- "ldr q19, [x13], #0x10\n"
- "ldr q18, [x12], #0x10\n"
- "zip2 v1.16b, v21.16b, v17.16b\n"
- "zip2 v0.16b, v20.16b, v16.16b\n"
- "ldr q17, [x11], #0x10\n"
- "ldr q16, [x10], #0x10\n"
- "zip1 v31.16b, v19.16b, v17.16b\n"
- "zip1 v30.16b, v18.16b, v16.16b\n"
- "ldr q21, [x9], #0x10\n"
- "ldr q20, [x28], #0x10\n"
- "zip2 v29.16b, v19.16b, v17.16b\n"
- "zip2 v28.16b, v18.16b, v16.16b\n"
- "ldr q17, [x27], #0x10\n"
- "ldr q16, [x26], #0x10\n"
- "zip1 v23.16b, v21.16b, v17.16b\n"
- "zip1 v22.16b, v20.16b, v16.16b\n"
- "ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "zip2 v27.16b, v21.16b, v17.16b\n"
- "zip2 v26.16b, v20.16b, v16.16b\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q19, [x17], #0x10\n"
+ "ldr q18, [x14], #0x10\n"
+ "sub x16, x16, #0x10\n"
+ "ldr q17, [x13], #0x10\n"
+ "ldr q16, [x12], #0x10\n"
+ "cmp x16, #0x10\n"
+ "ldr q27, [x11], #0x10\n"
+ "ldr q26, [x10], #0x10\n"
+ "ldr q25, [x9], #0x10\n"
+ "ldr q24, [x28], #0x10\n"
+ "ldr q23, [x27], #0x10\n"
+ "ldr q22, [x26], #0x10\n"
+ "zip1 v5.16b, v19.16b, v17.16b\n"
+ "zip1 v4.16b, v18.16b, v16.16b\n"
+ "ldr q21, [x25], #0x10\n"
+ "ldr q20, [x24], #0x10\n"
+ "zip2 v3.16b, v19.16b, v17.16b\n"
+ "zip2 v2.16b, v18.16b, v16.16b\n"
+ "ldr q19, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "zip1 v1.16b, v27.16b, v25.16b\n"
+ "zip1 v0.16b, v26.16b, v24.16b\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "zip1 v21.16b, v19.16b, v17.16b\n"
- "zip1 v20.16b, v18.16b, v16.16b\n"
- "zip2 v25.16b, v19.16b, v17.16b\n"
- "zip2 v24.16b, v18.16b, v16.16b\n"
- "zip1 v16.16b, v3.16b, v2.16b\n"
- "zip1 v18.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x0]\n"
- "zip1 v17.16b, v23.16b, v22.16b\n"
- "zip1 v16.16b, v21.16b, v20.16b\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
+ "zip2 v31.16b, v27.16b, v25.16b\n"
+ "zip2 v30.16b, v26.16b, v24.16b\n"
+ "zip1 v25.16b, v23.16b, v21.16b\n"
+ "zip1 v24.16b, v22.16b, v20.16b\n"
+ "zip2 v29.16b, v23.16b, v21.16b\n"
+ "zip2 v28.16b, v22.16b, v20.16b\n"
+ "zip1 v23.16b, v19.16b, v17.16b\n"
+ "zip1 v22.16b, v18.16b, v16.16b\n"
+ "zip2 v27.16b, v19.16b, v17.16b\n"
+ "zip2 v26.16b, v18.16b, v16.16b\n"
+ "zip1 v21.16b, v5.16b, v4.16b\n"
+ "zip1 v20.16b, v1.16b, v0.16b\n"
+ "zip1 v19.16b, v25.16b, v24.16b\n"
+ "zip1 v18.16b, v23.16b, v22.16b\n"
+ "zip2 v17.16b, v5.16b, v4.16b\n"
+ "zip2 v16.16b, v1.16b, v0.16b\n"
+ "str q21, [x15, #0x0]\n"
+ "str q20, [x15, #0x10]\n"
+ "zip2 v25.16b, v25.16b, v24.16b\n"
+ "zip2 v24.16b, v23.16b, v22.16b\n"
+ "str q19, [x15, #0x20]\n"
+ "zip1 v23.16b, v3.16b, v2.16b\n"
+ "zip1 v22.16b, v31.16b, v30.16b\n"
+ "str q18, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip1 v21.16b, v29.16b, v28.16b\n"
+ "zip1 v20.16b, v27.16b, v26.16b\n"
+ "str q17, [x15, #0x0]\n"
"zip2 v19.16b, v3.16b, v2.16b\n"
"zip2 v18.16b, v31.16b, v30.16b\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v17.16b, v23.16b, v22.16b\n"
- "zip2 v16.16b, v21.16b, v20.16b\n"
- "str q19, [x21, #0x0]\n"
- "zip1 v23.16b, v1.16b, v0.16b\n"
- "zip1 v22.16b, v29.16b, v28.16b\n"
- "str q18, [x21, #0x10]\n"
- "zip1 v21.16b, v27.16b, v26.16b\n"
- "zip1 v20.16b, v25.16b, v24.16b\n"
- "str q17, [x21, #0x20]\n"
- "zip2 v19.16b, v1.16b, v0.16b\n"
- "zip2 v18.16b, v29.16b, v28.16b\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v17.16b, v27.16b, v26.16b\n"
- "zip2 v16.16b, v25.16b, v24.16b\n"
- "str q23, [x21, #0x0]\n"
- "str q22, [x21, #0x10]\n"
- "str q21, [x21, #0x20]\n"
- "str q20, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x15, #0x10]\n"
+ "zip2 v17.16b, v29.16b, v28.16b\n"
+ "zip2 v16.16b, v27.16b, v26.16b\n"
+ "str q25, [x15, #0x20]\n"
+ "str q24, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "str q23, [x15, #0x0]\n"
+ "str q22, [x15, #0x10]\n"
+ "str q21, [x15, #0x20]\n"
+ "str q20, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "str q19, [x15, #0x0]\n"
+ "str q18, [x15, #0x10]\n"
+ "str q17, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cmp x24, #0x4\n"
+ "cmp x16, #0x4\n"
"blt 5f\n"
"4:" // Main row loop: Column loop
- "ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
- "sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x13], #0x4\n"
- "ldr s18, [x12], #0x4\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr s17, [x11], #0x4\n"
- "ldr s16, [x10], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr s19, [x9], #0x4\n"
- "ldr s18, [x28], #0x4\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr s17, [x27], #0x4\n"
- "ldr s16, [x26], #0x4\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
+ "ldr s23, [x17], #0x4\n"
+ "ldr s21, [x14], #0x4\n"
+ "sub x16, x16, #0x4\n"
+ "ldr s20, [x13], #0x4\n"
+ "ldr s17, [x12], #0x4\n"
+ "cmp x16, #0x4\n"
+ "ldr s22, [x11], #0x4\n"
+ "ldr s19, [x10], #0x4\n"
+ "ldr s18, [x9], #0x4\n"
+ "ldr s16, [x28], #0x4\n"
+ "ldr s27, [x27], #0x4\n"
+ "ldr s26, [x26], #0x4\n"
+ "zip1 v25.16b, v23.16b, v20.16b\n"
+ "zip1 v21.16b, v21.16b, v17.16b\n"
"ldr s20, [x25], #0x4\n"
- "ldr s19, [x23], #0x4\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s17, [x24], #0x4\n"
+ "ldr s24, [x23], #0x4\n"
+ "ldr s23, [x22], #0x4\n"
+ "zip1 v22.16b, v22.16b, v18.16b\n"
+ "zip1 v19.16b, v19.16b, v16.16b\n"
+ "ldr s18, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
+ "zip1 v21.16b, v25.16b, v21.16b\n"
+ "zip1 v20.16b, v27.16b, v20.16b\n"
+ "zip1 v17.16b, v26.16b, v17.16b\n"
+ "zip1 v19.16b, v22.16b, v19.16b\n"
+ "zip1 v18.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v23.16b, v16.16b\n"
+ "str q21, [x15, #0x0]\n"
"zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str q22, [x21, #0x0]\n"
- "str q21, [x21, #0x10]\n"
- "str q18, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q19, [x15, #0x10]\n"
+ "zip1 v16.16b, v18.16b, v16.16b\n"
+ "str q17, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 4b\n"
"5:" // Main row loop: Column loop skip
- "cmp x24, #0x1\n"
- "blt 7f\n"
+ "cbz x16, 7f\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x15, #0x0]\n"
+ "str q16, [x15, #0x10]\n"
+ "str q16, [x15, #0x20]\n"
+ "str q16, [x15, #0x30]\n"
"6:" // Main row loop: width 1 loop: loop
- "ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
- "sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x13], #0x1\n"
- "ldr b18, [x12], #0x1\n"
- "zip1 v22.16b, v17.16b, v16.16b\n"
- "ldr b17, [x11], #0x1\n"
- "ldr b16, [x10], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
- "ldr b19, [x9], #0x1\n"
- "ldr b18, [x28], #0x1\n"
- "zip1 v21.16b, v17.16b, v16.16b\n"
- "ldr b17, [x27], #0x1\n"
- "ldr b16, [x26], #0x1\n"
- "zip1 v17.16b, v19.16b, v17.16b\n"
- "zip1 v16.16b, v18.16b, v16.16b\n"
+ "ldr b23, [x17], #0x1\n"
+ "ldr b21, [x14], #0x1\n"
+ "sub x16, x16, #0x1\n"
+ "ldr b20, [x13], #0x1\n"
+ "ldr b19, [x12], #0x1\n"
+ "cmp x16, #0x1\n"
+ "ldr b22, [x11], #0x1\n"
+ "ldr b18, [x10], #0x1\n"
+ "ldr b17, [x9], #0x1\n"
+ "ldr b16, [x28], #0x1\n"
+ "ldr b27, [x27], #0x1\n"
+ "ldr b26, [x26], #0x1\n"
+ "zip1 v25.16b, v23.16b, v20.16b\n"
+ "zip1 v21.16b, v21.16b, v19.16b\n"
"ldr b20, [x25], #0x1\n"
- "ldr b19, [x23], #0x1\n"
- "zip1 v18.16b, v17.16b, v16.16b\n"
- "ldr b17, [x22], #0x1\n"
+ "ldr b19, [x24], #0x1\n"
+ "ldr b24, [x23], #0x1\n"
+ "ldr b23, [x22], #0x1\n"
+ "zip1 v22.16b, v22.16b, v17.16b\n"
+ "zip1 v17.16b, v18.16b, v16.16b\n"
+ "ldr b18, [x21], #0x1\n"
"ldr b16, [x20], #0x1\n"
- "zip1 v17.16b, v20.16b, v17.16b\n"
- "zip1 v16.16b, v19.16b, v16.16b\n"
- "str s22, [x21, #0x0]\n"
- "zip1 v16.16b, v17.16b, v16.16b\n"
- "str s21, [x21, #0x10]\n"
- "str s18, [x21, #0x20]\n"
- "str s16, [x21, #0x30]\n"
- "add x21, x21, #0x4\n"
+ "zip1 v21.16b, v25.16b, v21.16b\n"
+ "zip1 v20.16b, v27.16b, v20.16b\n"
+ "zip1 v19.16b, v26.16b, v19.16b\n"
+ "zip1 v17.16b, v22.16b, v17.16b\n"
+ "zip1 v18.16b, v24.16b, v18.16b\n"
+ "zip1 v16.16b, v23.16b, v16.16b\n"
+ "str s21, [x15, #0x0]\n"
+ "str s17, [x15, #0x10]\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip1 v16.16b, v18.16b, v16.16b\n"
+ "str s17, [x15, #0x20]\n"
+ "str s16, [x15, #0x30]\n"
+ "add x15, x15, #0x4\n"
"bge 6b\n"
- "7:" // Main row loop: width 1 loop: skip
+ "7:" // Main row loop: odd col skip
"cmp %x[height], #0x10\n"
"add %x[out], %x[out], #0x40\n"
"bge 1b\n"
@@ -224,84 +228,85 @@ void a64_transpose_interleave_4_1x4(uint8_t *out, const uint8_t *in, size_t widt
"8:" // Main loop skip
"9:" // Tail row loop: Head
"mov x17, %x[in]\n"
- "add x16, x17, %x[in_stride]\n"
- "add x15, x16, %x[in_stride]\n"
"mov x20, %x[width]\n"
- "add x14, x15, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x14, %x[in_stride]\n"
- "csel x14, x14, %x[pad_row], GT\n"
- "csel x15, x15, %x[pad_row], GE\n"
+ "mov x15, %x[out]\n"
+ "add x14, x17, %x[in_stride]\n"
+ "add x13, x14, %x[in_stride]\n"
+ "add x12, x13, %x[in_stride]\n"
+ "csel x13, x13, %x[pad_row], GE\n"
+ "add %x[in], x12, %x[in_stride]\n"
+ "csel x12, x12, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "csel x16, x16, %x[pad_row], GT\n"
- "cmp x20, #0x10\n"
- "mov x21, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
+ "csel x14, x14, %x[pad_row], GT\n"
+ "cmp x20, #0x10\n"
"blt 11f\n"
"10:" // Tail row loop: Unroll column loop
- "ldr q19, [x17], #0x10\n"
- "ldr q21, [x16], #0x10\n"
+ "ldr q20, [x17], #0x10\n"
+ "ldr q21, [x14], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q19, [x13], #0x10\n"
+ "ldr q16, [x12], #0x10\n"
"cmp x20, #0x10\n"
- "ldr q18, [x15], #0x10\n"
- "ldr q16, [x14], #0x10\n"
- "zip1 v20.16b, v19.16b, v18.16b\n"
+ "zip1 v18.16b, v20.16b, v19.16b\n"
"zip1 v17.16b, v21.16b, v16.16b\n"
- "zip2 v19.16b, v19.16b, v18.16b\n"
- "zip2 v18.16b, v21.16b, v16.16b\n"
- "zip1 v16.16b, v20.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 v16.16b, v20.16b, v17.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip1 v17.16b, v19.16b, v18.16b\n"
- "zip2 v16.16b, v19.16b, v18.16b\n"
- "str q17, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 v20.16b, v20.16b, v19.16b\n"
+ "zip2 v19.16b, v21.16b, v16.16b\n"
+ "zip1 v16.16b, v18.16b, v17.16b\n"
+ "zip2 v18.16b, v18.16b, v17.16b\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "zip1 v17.16b, v20.16b, v19.16b\n"
+ "zip2 v16.16b, v20.16b, v19.16b\n"
+ "str q18, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "str q17, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 10b\n"
"11:" // Tail row loop: Unroll column loop skip
"cmp x20, #0x4\n"
"blt 13f\n"
"12:" // Tail row loop: Column loop
"ldr s19, [x17], #0x4\n"
- "ldr s18, [x16], #0x4\n"
+ "ldr s18, [x14], #0x4\n"
"sub x20, x20, #0x4\n"
+ "ldr s17, [x13], #0x4\n"
+ "ldr s16, [x12], #0x4\n"
"cmp x20, #0x4\n"
- "ldr s17, [x15], #0x4\n"
- "ldr s16, [x14], #0x4\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q16, [x15, #0x0]\n"
+ "add x15, x15, %x[out_stride]\n"
"bge 12b\n"
"13:" // Tail row loop: Column loop skip
- "cmp x20, #0x1\n"
- "blt 15f\n"
+ "cbz x20, 15f\n"
+ "movi v16.16b, #0x0\n"
+ "str q16, [x15, #0x0]\n"
"14:" // Tail row loop: width 1 loop: loop
"ldr b19, [x17], #0x1\n"
- "ldr b18, [x16], #0x1\n"
+ "ldr b18, [x14], #0x1\n"
"sub x20, x20, #0x1\n"
+ "ldr b17, [x13], #0x1\n"
+ "ldr b16, [x12], #0x1\n"
"cmp x20, #0x1\n"
- "ldr b17, [x15], #0x1\n"
- "ldr b16, [x14], #0x1\n"
"zip1 v17.16b, v19.16b, v17.16b\n"
"zip1 v16.16b, v18.16b, v16.16b\n"
"zip1 v16.16b, v17.16b, v16.16b\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
+ "str s16, [x15, #0x0]\n"
+ "add x15, x15, #0x4\n"
"bge 14b\n"
- "15:" // Tail row loop: width 1 loop: skip
+ "15:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x10\n"
"bge 9b\n"
"16:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp
index 328274a488..387c6adabd 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,175 +34,201 @@ void a64_transpose_interleave_64(uint16_t *out, const uint16_t *in, size_t width
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x20\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
"ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
"sub x24, x24, #0x20\n"
- "cmp x24, #0x20\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
+ "cmp x24, #0x20\n"
"ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q31, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q23, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q30, [x21, #0x40]\n"
- "str q26, [x21, #0x50]\n"
- "str q22, [x21, #0x60]\n"
- "str q18, [x21, #0x70]\n"
- "str q29, [x21, #0x80]\n"
- "str q25, [x21, #0x90]\n"
- "str q21, [x21, #0xa0]\n"
- "str q17, [x21, #0xb0]\n"
- "str q28, [x21, #0xc0]\n"
- "str q24, [x21, #0xd0]\n"
- "str q20, [x21, #0xe0]\n"
- "str q16, [x21, #0xf0]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q31, [x23, #0x0]\n"
+ "str q27, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q30, [x23, #0x40]\n"
+ "str q26, [x23, #0x50]\n"
+ "str q22, [x23, #0x60]\n"
+ "str q18, [x23, #0x70]\n"
+ "str q29, [x23, #0x80]\n"
+ "str q25, [x23, #0x90]\n"
+ "str q21, [x23, #0xa0]\n"
+ "str q17, [x23, #0xb0]\n"
+ "str q28, [x23, #0xc0]\n"
+ "str q24, [x23, #0xd0]\n"
+ "str q20, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x10\n"
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "str q16, [x23, #0x80]\n"
+ "str q16, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "str q16, [x23, #0xc0]\n"
+ "str q16, [x23, #0xd0]\n"
+ "str q16, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
"sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
+ "cmp x24, #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q23, [x21, #0x0]\n"
- "str q19, [x21, #0x10]\n"
- "str q22, [x21, #0x40]\n"
- "str q18, [x21, #0x50]\n"
- "str q21, [x21, #0x80]\n"
- "str q17, [x21, #0x90]\n"
- "str q20, [x21, #0xc0]\n"
- "str q16, [x21, #0xd0]\n"
- "add x21, x21, #0x20\n"
+ "str q23, [x23, #0x0]\n"
+ "str q19, [x23, #0x10]\n"
+ "str q22, [x23, #0x40]\n"
+ "str q18, [x23, #0x50]\n"
+ "str q21, [x23, #0x80]\n"
+ "str q17, [x23, #0x90]\n"
+ "str q20, [x23, #0xc0]\n"
+ "str q16, [x23, #0xd0]\n"
+ "add x23, x23, #0x20\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
"cmp x24, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr d19, [x25], #0x8\n"
- "ldr d18, [x23], #0x8\n"
+ "ldr d18, [x22], #0x8\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr d17, [x22], #0x8\n"
+ "ldr d17, [x21], #0x8\n"
"ldr d16, [x20], #0x8\n"
- "str d19, [x21, #0x0]\n"
- "str d18, [x21, #0x40]\n"
- "str d17, [x21, #0x80]\n"
- "str d16, [x21, #0xc0]\n"
- "add x21, x21, #0x8\n"
+ "cmp x24, #0x4\n"
+ "str d19, [x23, #0x0]\n"
+ "str d18, [x23, #0x40]\n"
+ "str d17, [x23, #0x80]\n"
+ "str d16, [x23, #0xc0]\n"
+ "add x23, x23, #0x8\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr h19, [x25], #0x2\n"
- "ldr h18, [x23], #0x2\n"
+ "ldr h18, [x22], #0x2\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr h17, [x22], #0x2\n"
+ "ldr h17, [x21], #0x2\n"
"ldr h16, [x20], #0x2\n"
- "str h19, [x21, #0x0]\n"
- "str h18, [x21, #0x40]\n"
- "str h17, [x21, #0x80]\n"
- "str h16, [x21, #0xc0]\n"
- "add x21, x21, #0x2\n"
+ "cmp x24, #0x1\n"
+ "str h19, [x23, #0x0]\n"
+ "str h18, [x23, #0x40]\n"
+ "str h17, [x23, #0x80]\n"
+ "str h16, [x23, #0xc0]\n"
+ "add x23, x23, #0x2\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x100\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x20\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Column loop
+ "blt 14f\n"
+ "13:" // Tail row loop: Column loop
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
"sub x20, x20, #0x20\n"
- "cmp x20, #0x20\n"
+ "ldr q18, [x25], #0x10\n"
"ldr q17, [x25], #0x10\n"
+ "cmp x20, #0x20\n"
"ldr q16, [x25], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Column loop skip
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x10\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 16 loop: loop
+ "movi v16.8h, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: width 16 loop: loop
"ldr q17, [x25], #0x10\n"
- "ldr q16, [x25], #0x10\n"
"sub x20, x20, #0x10\n"
+ "ldr q16, [x25], #0x10\n"
"cmp x20, #0x10\n"
- "str q17, [x21, #0x0]\n"
- "str q16, [x21, #0x10]\n"
- "add x21, x21, #0x20\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 16 loop: skip
+ "str q17, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "add x23, x23, #0x20\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 16 loop: skip
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr d16, [x25], #0x8\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
- "str d16, [x21, #0x0]\n"
- "add x21, x21, #0x8\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str d16, [x23, #0x0]\n"
+ "add x23, x23, #0x8\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr h16, [x25], #0x2\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
- "str h16, [x21, #0x0]\n"
- "add x21, x21, #0x2\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str h16, [x23, #0x0]\n"
+ "add x23, x23, #0x2\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x40\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
: "cc", "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp
index feb469ab0e..e5778860a4 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/a64_transpose_interleave_96.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,215 +34,251 @@ void a64_transpose_interleave_96(uint32_t *out, const uint32_t *in, size_t width
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "blt 10f\n"
+ "blt 11f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
"mov x24, %x[width]\n"
- "add x23, x25, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
- "add x20, x22, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x25, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
"cmp x24, #0x18\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Column loop
"ldr q7, [x25], #0x10\n"
- "ldr q6, [x23], #0x10\n"
+ "ldr q6, [x22], #0x10\n"
"sub x24, x24, #0x18\n"
- "cmp x24, #0x18\n"
- "ldr q5, [x22], #0x10\n"
+ "ldr q5, [x21], #0x10\n"
"ldr q4, [x20], #0x10\n"
+ "cmp x24, #0x18\n"
"ldr q3, [x25], #0x10\n"
- "ldr q2, [x23], #0x10\n"
- "ldr q1, [x22], #0x10\n"
+ "ldr q2, [x22], #0x10\n"
+ "ldr q1, [x21], #0x10\n"
"ldr q0, [x20], #0x10\n"
"ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
"ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q7, [x21, #0x0]\n"
- "str q3, [x21, #0x10]\n"
- "str q31, [x21, #0x20]\n"
- "str q27, [x21, #0x30]\n"
- "str q23, [x21, #0x40]\n"
- "str q19, [x21, #0x50]\n"
- "str q6, [x21, #0x60]\n"
- "str q2, [x21, #0x70]\n"
- "str q30, [x21, #0x80]\n"
- "str q26, [x21, #0x90]\n"
- "str q22, [x21, #0xa0]\n"
- "str q18, [x21, #0xb0]\n"
- "str q5, [x21, #0xc0]\n"
- "str q1, [x21, #0xd0]\n"
- "str q29, [x21, #0xe0]\n"
- "str q25, [x21, #0xf0]\n"
- "str q21, [x21, #0x100]\n"
- "str q17, [x21, #0x110]\n"
- "str q4, [x21, #0x120]\n"
- "str q0, [x21, #0x130]\n"
- "str q28, [x21, #0x140]\n"
- "str q24, [x21, #0x150]\n"
- "str q20, [x21, #0x160]\n"
- "str q16, [x21, #0x170]\n"
- "add x21, x21, %x[out_stride]\n"
+ "str q7, [x23, #0x0]\n"
+ "str q3, [x23, #0x10]\n"
+ "str q31, [x23, #0x20]\n"
+ "str q27, [x23, #0x30]\n"
+ "str q23, [x23, #0x40]\n"
+ "str q19, [x23, #0x50]\n"
+ "str q6, [x23, #0x60]\n"
+ "str q2, [x23, #0x70]\n"
+ "str q30, [x23, #0x80]\n"
+ "str q26, [x23, #0x90]\n"
+ "str q22, [x23, #0xa0]\n"
+ "str q18, [x23, #0xb0]\n"
+ "str q5, [x23, #0xc0]\n"
+ "str q1, [x23, #0xd0]\n"
+ "str q29, [x23, #0xe0]\n"
+ "str q25, [x23, #0xf0]\n"
+ "str q21, [x23, #0x100]\n"
+ "str q17, [x23, #0x110]\n"
+ "str q4, [x23, #0x120]\n"
+ "str q0, [x23, #0x130]\n"
+ "str q28, [x23, #0x140]\n"
+ "str q24, [x23, #0x150]\n"
+ "str q20, [x23, #0x160]\n"
+ "str q16, [x23, #0x170]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Column loop skip
+ "cbz x24, 10f\n"
"cmp x24, #0x10\n"
+ "movi v16.4s, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "str q16, [x23, #0x60]\n"
+ "str q16, [x23, #0x70]\n"
+ "str q16, [x23, #0x80]\n"
+ "str q16, [x23, #0x90]\n"
+ "str q16, [x23, #0xa0]\n"
+ "str q16, [x23, #0xb0]\n"
+ "str q16, [x23, #0xc0]\n"
+ "str q16, [x23, #0xd0]\n"
+ "str q16, [x23, #0xe0]\n"
+ "str q16, [x23, #0xf0]\n"
+ "str q16, [x23, #0x100]\n"
+ "str q16, [x23, #0x110]\n"
+ "str q16, [x23, #0x120]\n"
+ "str q16, [x23, #0x130]\n"
+ "str q16, [x23, #0x140]\n"
+ "str q16, [x23, #0x150]\n"
+ "str q16, [x23, #0x160]\n"
+ "str q16, [x23, #0x170]\n"
"blt 5f\n"
"4:" // Main row loop: width 16 loop: loop
"ldr q31, [x25], #0x10\n"
- "ldr q30, [x23], #0x10\n"
+ "ldr q30, [x22], #0x10\n"
"sub x24, x24, #0x10\n"
- "cmp x24, #0x10\n"
- "ldr q29, [x22], #0x10\n"
+ "ldr q29, [x21], #0x10\n"
"ldr q28, [x20], #0x10\n"
+ "cmp x24, #0x10\n"
"ldr q27, [x25], #0x10\n"
- "ldr q26, [x23], #0x10\n"
- "ldr q25, [x22], #0x10\n"
+ "ldr q26, [x22], #0x10\n"
+ "ldr q25, [x21], #0x10\n"
"ldr q24, [x20], #0x10\n"
"ldr q23, [x25], #0x10\n"
- "ldr q22, [x23], #0x10\n"
- "ldr q21, [x22], #0x10\n"
+ "ldr q22, [x22], #0x10\n"
+ "ldr q21, [x21], #0x10\n"
"ldr q20, [x20], #0x10\n"
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q31, [x21, #0x0]\n"
- "str q27, [x21, #0x10]\n"
- "str q23, [x21, #0x20]\n"
- "str q19, [x21, #0x30]\n"
- "str q30, [x21, #0x60]\n"
- "str q26, [x21, #0x70]\n"
- "str q22, [x21, #0x80]\n"
- "str q18, [x21, #0x90]\n"
- "str q29, [x21, #0xc0]\n"
- "str q25, [x21, #0xd0]\n"
- "str q21, [x21, #0xe0]\n"
- "str q17, [x21, #0xf0]\n"
- "str q28, [x21, #0x120]\n"
- "str q24, [x21, #0x130]\n"
- "str q20, [x21, #0x140]\n"
- "str q16, [x21, #0x150]\n"
- "add x21, x21, #0x40\n"
+ "str q31, [x23, #0x0]\n"
+ "str q27, [x23, #0x10]\n"
+ "str q23, [x23, #0x20]\n"
+ "str q19, [x23, #0x30]\n"
+ "str q30, [x23, #0x60]\n"
+ "str q26, [x23, #0x70]\n"
+ "str q22, [x23, #0x80]\n"
+ "str q18, [x23, #0x90]\n"
+ "str q29, [x23, #0xc0]\n"
+ "str q25, [x23, #0xd0]\n"
+ "str q21, [x23, #0xe0]\n"
+ "str q17, [x23, #0xf0]\n"
+ "str q28, [x23, #0x120]\n"
+ "str q24, [x23, #0x130]\n"
+ "str q20, [x23, #0x140]\n"
+ "str q16, [x23, #0x150]\n"
+ "add x23, x23, #0x40\n"
"bge 4b\n"
"5:" // Main row loop: width 16 loop: skip
"cmp x24, #0x4\n"
"blt 7f\n"
"6:" // Main row loop: width 4 loop: loop
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x23], #0x10\n"
+ "ldr q18, [x22], #0x10\n"
"sub x24, x24, #0x4\n"
- "cmp x24, #0x4\n"
- "ldr q17, [x22], #0x10\n"
+ "ldr q17, [x21], #0x10\n"
"ldr q16, [x20], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x60]\n"
- "str q17, [x21, #0xc0]\n"
- "str q16, [x21, #0x120]\n"
- "add x21, x21, #0x10\n"
+ "cmp x24, #0x4\n"
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x60]\n"
+ "str q17, [x23, #0xc0]\n"
+ "str q16, [x23, #0x120]\n"
+ "add x23, x23, #0x10\n"
"bge 6b\n"
"7:" // Main row loop: width 4 loop: skip
"cmp x24, #0x1\n"
"blt 9f\n"
"8:" // Main row loop: width 1 loop: loop
"ldr s19, [x25], #0x4\n"
- "ldr s18, [x23], #0x4\n"
+ "ldr s18, [x22], #0x4\n"
"sub x24, x24, #0x1\n"
- "cmp x24, #0x1\n"
- "ldr s17, [x22], #0x4\n"
+ "ldr s17, [x21], #0x4\n"
"ldr s16, [x20], #0x4\n"
- "str s19, [x21, #0x0]\n"
- "str s18, [x21, #0x60]\n"
- "str s17, [x21, #0xc0]\n"
- "str s16, [x21, #0x120]\n"
- "add x21, x21, #0x4\n"
+ "cmp x24, #0x1\n"
+ "str s19, [x23, #0x0]\n"
+ "str s18, [x23, #0x60]\n"
+ "str s17, [x23, #0xc0]\n"
+ "str s16, [x23, #0x120]\n"
+ "add x23, x23, #0x4\n"
"bge 8b\n"
"9:" // Main row loop: width 1 loop: skip
+ "10:" // Main row loop: odd col skip
"cmp %x[height], #0x4\n"
"add %x[out], %x[out], #0x180\n"
"bge 1b\n"
- "cbz %x[height], 20f\n"
- "10:" // Main loop skip
- "11:" // Tail row loop: Head
+ "cbz %x[height], 22f\n"
+ "11:" // Main loop skip
+ "12:" // Tail row loop: Head
"mov x20, %x[width]\n"
"mov x25, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x20, #0x18\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x21, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
- "blt 13f\n"
- "12:" // Tail row loop: Column loop
+ "blt 14f\n"
+ "13:" // Tail row loop: Column loop
"ldr q21, [x25], #0x10\n"
- "ldr q20, [x25], #0x10\n"
"sub x20, x20, #0x18\n"
- "cmp x20, #0x18\n"
+ "ldr q20, [x25], #0x10\n"
"ldr q19, [x25], #0x10\n"
+ "cmp x20, #0x18\n"
"ldr q18, [x25], #0x10\n"
"ldr q17, [x25], #0x10\n"
"ldr q16, [x25], #0x10\n"
- "str q21, [x21, #0x0]\n"
- "str q20, [x21, #0x10]\n"
- "str q19, [x21, #0x20]\n"
- "str q18, [x21, #0x30]\n"
- "str q17, [x21, #0x40]\n"
- "str q16, [x21, #0x50]\n"
- "add x21, x21, %x[out_stride]\n"
- "bge 12b\n"
- "13:" // Tail row loop: Column loop skip
+ "str q21, [x23, #0x0]\n"
+ "str q20, [x23, #0x10]\n"
+ "str q19, [x23, #0x20]\n"
+ "str q18, [x23, #0x30]\n"
+ "str q17, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "bge 13b\n"
+ "14:" // Tail row loop: Column loop skip
+ "cbz x20, 21f\n"
"cmp x20, #0x10\n"
- "blt 15f\n"
- "14:" // Tail row loop: width 16 loop: loop
+ "movi v16.4s, #0x0\n"
+ "str q16, [x23, #0x0]\n"
+ "str q16, [x23, #0x10]\n"
+ "str q16, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "str q16, [x23, #0x40]\n"
+ "str q16, [x23, #0x50]\n"
+ "blt 16f\n"
+ "15:" // Tail row loop: width 16 loop: loop
"ldr q19, [x25], #0x10\n"
- "ldr q18, [x25], #0x10\n"
"sub x20, x20, #0x10\n"
- "cmp x20, #0x10\n"
+ "ldr q18, [x25], #0x10\n"
"ldr q17, [x25], #0x10\n"
+ "cmp x20, #0x10\n"
"ldr q16, [x25], #0x10\n"
- "str q19, [x21, #0x0]\n"
- "str q18, [x21, #0x10]\n"
- "str q17, [x21, #0x20]\n"
- "str q16, [x21, #0x30]\n"
- "add x21, x21, #0x40\n"
- "bge 14b\n"
- "15:" // Tail row loop: width 16 loop: skip
+ "str q19, [x23, #0x0]\n"
+ "str q18, [x23, #0x10]\n"
+ "str q17, [x23, #0x20]\n"
+ "str q16, [x23, #0x30]\n"
+ "add x23, x23, #0x40\n"
+ "bge 15b\n"
+ "16:" // Tail row loop: width 16 loop: skip
"cmp x20, #0x4\n"
- "blt 17f\n"
- "16:" // Tail row loop: width 4 loop: loop
+ "blt 18f\n"
+ "17:" // Tail row loop: width 4 loop: loop
"ldr q16, [x25], #0x10\n"
"sub x20, x20, #0x4\n"
"cmp x20, #0x4\n"
- "str q16, [x21, #0x0]\n"
- "add x21, x21, #0x10\n"
- "bge 16b\n"
- "17:" // Tail row loop: width 4 loop: skip
+ "str q16, [x23, #0x0]\n"
+ "add x23, x23, #0x10\n"
+ "bge 17b\n"
+ "18:" // Tail row loop: width 4 loop: skip
"cmp x20, #0x1\n"
- "blt 19f\n"
- "18:" // Tail row loop: width 1 loop: loop
+ "blt 20f\n"
+ "19:" // Tail row loop: width 1 loop: loop
"ldr s16, [x25], #0x4\n"
"sub x20, x20, #0x1\n"
"cmp x20, #0x1\n"
- "str s16, [x21, #0x0]\n"
- "add x21, x21, #0x4\n"
- "bge 18b\n"
- "19:" // Tail row loop: width 1 loop: skip
+ "str s16, [x23, #0x0]\n"
+ "add x23, x23, #0x4\n"
+ "bge 19b\n"
+ "20:" // Tail row loop: width 1 loop: skip
+ "21:" // Tail row loop: odd col skip
"cmp %x[height], #0x1\n"
"add %x[out], %x[out], #0x60\n"
- "bge 11b\n"
- "20:" // Done
+ "bge 12b\n"
+ "22:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22", "x23", "x24", "x25"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
index a4d480c405..4c7b367ed9 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -28,7 +28,7 @@
namespace {
-void sme_transpose_interleave_16VL(uint32_t *out, const uint32_t *in, size_t width, size_t in_stride, size_t height)
+void sme_transpose_interleave_16VL(uint16_t *out, const uint16_t *in, size_t width, size_t in_stride, size_t height)
{
size_t out_stride = 16 * height * sme::get_vector_length<uint8_t>();
@@ -36,82 +36,82 @@ void sme_transpose_interleave_16VL(uint32_t *out, const uint32_t *in, size_t wid
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p7.b\n"
"1:" // Main row loop: Head
- "mov x23, %x[in]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x24, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
- "mov x21, %x[width]\n"
+ "mov x22, %x[width]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z31.s }, p0/Z, [x23]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z30.s }, p0/Z, [x23, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z29.s }, p0/Z, [x23, #2, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z28.s }, p0/Z, [x23, #3, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z27.s }, p0/Z, [x23, #4, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z26.s }, p0/Z, [x23, #5, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z25.s }, p0/Z, [x23, #6, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z24.s }, p0/Z, [x23, #7, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p6.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p5.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p4.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p3.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "decw x20\n"
- "addvl x23, x23, #16\n"
- "ld1w { z23.s }, p0/Z, [x23, #-8, MUL VL]\n"
- "whilelt p0.s, XZR, x20\n"
- "mov x20, x22\n"
- "ld1w { z22.s }, p6/Z, [x23, #-7, MUL VL]\n"
- "decw x21, ALL, MUL #16\n"
- "ld1w { z21.s }, p5/Z, [x23, #-6, MUL VL]\n"
- "cmp x21, #0x0\n"
- "ld1w { z20.s }, p4/Z, [x23, #-5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z19.s }, p3/Z, [x23, #-4, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #-3, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x23, #-2, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x23, #-1, MUL VL]\n"
- "st1w { z31.s }, p7, [x20]\n"
- "st1w { z30.s }, p7, [x20, #1, MUL VL]\n"
- "st1w { z29.s }, p7, [x20, #2, MUL VL]\n"
- "st1w { z28.s }, p7, [x20, #3, MUL VL]\n"
- "st1w { z27.s }, p7, [x20, #4, MUL VL]\n"
- "st1w { z26.s }, p7, [x20, #5, MUL VL]\n"
- "st1w { z25.s }, p7, [x20, #6, MUL VL]\n"
- "st1w { z24.s }, p7, [x20, #7, MUL VL]\n"
+ "mov x21, x22\n"
+ "mov x20, x23\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z31.h }, p0/Z, [x24]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z30.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z29.h }, p0/Z, [x24, #2, MUL VL]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z28.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z27.h }, p0/Z, [x24, #4, MUL VL]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z26.h }, p1/Z, [x24, #5, MUL VL]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z25.h }, p0/Z, [x24, #6, MUL VL]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z24.h }, p1/Z, [x24, #7, MUL VL]\n"
+ "whilelt p6.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p5.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p4.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p3.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p2.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "addvl x24, x24, #16\n"
+ "dech x22, ALL, MUL #16\n"
+ "ld1h { z23.h }, p0/Z, [x24, #-8, MUL VL]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "cmp x22, #0x0\n"
+ "ld1h { z22.h }, p6/Z, [x24, #-7, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "ld1h { z21.h }, p5/Z, [x24, #-6, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x24, #-5, MUL VL]\n"
+ "ld1h { z19.h }, p3/Z, [x24, #-4, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x24, #-3, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x24, #-2, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x24, #-1, MUL VL]\n"
+ "st1h { z31.h }, p7, [x20]\n"
+ "st1h { z30.h }, p7, [x20, #1, MUL VL]\n"
+ "st1h { z29.h }, p7, [x20, #2, MUL VL]\n"
+ "st1h { z28.h }, p7, [x20, #3, MUL VL]\n"
+ "st1h { z27.h }, p7, [x20, #4, MUL VL]\n"
+ "st1h { z26.h }, p7, [x20, #5, MUL VL]\n"
+ "st1h { z25.h }, p7, [x20, #6, MUL VL]\n"
+ "st1h { z24.h }, p7, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1w { z23.s }, p7, [x20, #-8, MUL VL]\n"
- "st1w { z22.s }, p7, [x20, #-7, MUL VL]\n"
- "st1w { z21.s }, p7, [x20, #-6, MUL VL]\n"
- "st1w { z20.s }, p7, [x20, #-5, MUL VL]\n"
- "st1w { z19.s }, p7, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p7, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p7, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p7, [x20, #-1, MUL VL]\n"
+ "st1h { z23.h }, p7, [x20, #-8, MUL VL]\n"
+ "st1h { z22.h }, p7, [x20, #-7, MUL VL]\n"
+ "st1h { z21.h }, p7, [x20, #-6, MUL VL]\n"
+ "st1h { z20.h }, p7, [x20, #-5, MUL VL]\n"
+ "st1h { z19.h }, p7, [x20, #-4, MUL VL]\n"
+ "st1h { z18.h }, p7, [x20, #-3, MUL VL]\n"
+ "st1h { z17.h }, p7, [x20, #-2, MUL VL]\n"
+ "st1h { z16.h }, p7, [x20, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -120,7 +120,7 @@ void sme_transpose_interleave_16VL(uint32_t *out, const uint32_t *in, size_t wid
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
@@ -131,13 +131,26 @@ void Transform<16, 1, true, VLType::SME>(
float *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
{
sme_transpose_interleave_16VL(
- reinterpret_cast<uint32_t *>(out),
- reinterpret_cast<const uint32_t *>(in + k0 * stride + x0),
- (xmax-x0) * sizeof(float) / 4,
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+ (xmax-x0) * sizeof(float) / 2,
stride * sizeof(float),
(kmax-k0)
);
}
+template<>
+void Transform<16, 1, true, VLType::SME>(
+ __fp16 *out, const __fp16 *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+ sme_transpose_interleave_16VL(
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+ (xmax-x0) * sizeof(__fp16) / 2,
+ stride * sizeof(__fp16),
+ (kmax-k0)
+ );
+}
+
#endif // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
index 552abfc1c6..dca0031b55 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,92 +42,92 @@ void sme_transpose_interleave_16VL_1x4(uint8_t *out, const uint8_t *in, size_t w
".inst 0xd503477f // SMSTART ZA\n"
"ptrue p4.b\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
+ "mov x27, %x[in]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
+ "add x26, x27, %x[in_stride]\n"
+ "mov x25, %x[out]\n"
+ "add x24, x26, %x[in_stride]\n"
+ "mov x23, %x[width]\n"
+ "add x22, x24, %x[in_stride]\n"
"csel x24, x24, %x[pad_row], GE\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "mov x22, %x[out]\n"
- "csel x25, x25, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p3.b, XZR, x20\n"
- "ld1b { z20.b }, p3/Z, [x26]\n"
- "decb x20\n"
- "whilelt p2.b, XZR, x20\n"
- "ld1b { z18.b }, p2/Z, [x26, #1, MUL VL]\n"
- "decb x20\n"
- "whilelt p1.b, XZR, x20\n"
- "ld1b { z17.b }, p3/Z, [x25]\n"
- "decb x20\n"
- "whilelt p0.b, XZR, x20\n"
- "ld1b { z19.b }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1b { z16.b }, p3/Z, [x24]\n"
- "zip1 z25.b, z20.b, z16.b\n"
- "zip2 z24.b, z20.b, z16.b\n"
- "mov x20, x22\n"
- "ld1b { z16.b }, p2/Z, [x24, #1, MUL VL]\n"
- "zip1 z22.b, z18.b, z16.b\n"
- "zip2 z21.b, z18.b, z16.b\n"
- "decw x21, ALL, MUL #16\n"
- "ld1b { z16.b }, p3/Z, [x23]\n"
- "zip1 z18.b, z17.b, z16.b\n"
- "zip2 z17.b, z17.b, z16.b\n"
- "cmp x21, #0x0\n"
- "ld1b { z16.b }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z20.b, z19.b, z16.b\n"
- "zip2 z16.b, z19.b, z16.b\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1b { z19.b }, p1/Z, [x26, #2, MUL VL]\n"
- "zip1 z23.b, z25.b, z18.b\n"
- "zip2 z0.b, z25.b, z18.b\n"
- "ld1b { z18.b }, p0/Z, [x26, #3, MUL VL]\n"
- "zip1 z31.b, z24.b, z17.b\n"
- "zip2 z30.b, z24.b, z17.b\n"
+ "mov x21, x23\n"
+ "mov x20, x25\n"
+ "whilelt p3.b, XZR, x21\n"
+ "decb x21\n"
+ "whilelt p2.b, XZR, x21\n"
+ "decb x21\n"
+ "ld1b { z21.b }, p3/Z, [x27]\n"
+ "whilelt p1.b, XZR, x21\n"
+ "decb x21\n"
+ "ld1b { z24.b }, p2/Z, [x27, #1, MUL VL]\n"
+ "whilelt p0.b, XZR, x21\n"
+ "ld1b { z23.b }, p3/Z, [x26]\n"
+ "decw x23, ALL, MUL #16\n"
+ "ld1b { z20.b }, p2/Z, [x26, #1, MUL VL]\n"
+ "cmp x23, #0x0\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1b { z19.b }, p3/Z, [x24]\n"
+ "ld1b { z17.b }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1b { z16.b }, p3/Z, [x22]\n"
+ "ld1b { z18.b }, p2/Z, [x22, #1, MUL VL]\n"
+ "zip1 z22.b, z21.b, z19.b\n"
+ "zip2 z21.b, z21.b, z19.b\n"
+ "ld1b { z28.b }, p1/Z, [x27, #2, MUL VL]\n"
+ "zip1 z1.b, z24.b, z17.b\n"
+ "zip2 z0.b, z24.b, z17.b\n"
+ "ld1b { z27.b }, p0/Z, [x27, #3, MUL VL]\n"
+ "zip1 z17.b, z23.b, z16.b\n"
+ "zip2 z16.b, z23.b, z16.b\n"
+ "addvl x27, x27, #4\n"
+ "ld1b { z26.b }, p1/Z, [x26, #2, MUL VL]\n"
+ "zip1 z31.b, z20.b, z18.b\n"
+ "zip2 z30.b, z20.b, z18.b\n"
+ "ld1b { z25.b }, p0/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- "ld1b { z17.b }, p1/Z, [x25, #2, MUL VL]\n"
- "zip1 z29.b, z22.b, z20.b\n"
- "zip2 z28.b, z22.b, z20.b\n"
- "ld1b { z22.b }, p0/Z, [x25, #3, MUL VL]\n"
- "zip1 z27.b, z21.b, z16.b\n"
- "zip2 z26.b, z21.b, z16.b\n"
- "addvl x25, x25, #4\n"
- "ld1b { z16.b }, p1/Z, [x24, #2, MUL VL]\n"
- "zip1 z21.b, z19.b, z16.b\n"
- "zip2 z20.b, z19.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x24, #3, MUL VL]\n"
- "zip1 z25.b, z18.b, z16.b\n"
- "zip2 z24.b, z18.b, z16.b\n"
+ "ld1b { z20.b }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1b { z19.b }, p0/Z, [x24, #3, MUL VL]\n"
+ "zip1 z18.b, z22.b, z17.b\n"
+ "zip2 z24.b, z22.b, z17.b\n"
"addvl x24, x24, #4\n"
- "ld1b { z16.b }, p1/Z, [x23, #2, MUL VL]\n"
- "zip1 z19.b, z17.b, z16.b\n"
- "zip2 z18.b, z17.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x23, #3, MUL VL]\n"
- "zip1 z17.b, z22.b, z16.b\n"
- "zip2 z16.b, z22.b, z16.b\n"
- "addvl x23, x23, #4\n"
- "st1b { z23.b }, p4, [x20]\n"
- "zip1 z23.b, z21.b, z19.b\n"
- "zip2 z22.b, z21.b, z19.b\n"
- "st1b { z0.b }, p4, [x20, #1, MUL VL]\n"
- "zip1 z21.b, z20.b, z18.b\n"
- "zip2 z20.b, z20.b, z18.b\n"
- "st1b { z31.b }, p4, [x20, #2, MUL VL]\n"
- "zip1 z19.b, z25.b, z17.b\n"
- "zip2 z18.b, z25.b, z17.b\n"
- "st1b { z30.b }, p4, [x20, #3, MUL VL]\n"
- "zip1 z17.b, z24.b, z16.b\n"
- "zip2 z16.b, z24.b, z16.b\n"
- "st1b { z29.b }, p4, [x20, #4, MUL VL]\n"
- "st1b { z28.b }, p4, [x20, #5, MUL VL]\n"
- "st1b { z27.b }, p4, [x20, #6, MUL VL]\n"
- "st1b { z26.b }, p4, [x20, #7, MUL VL]\n"
+ "ld1b { z17.b }, p1/Z, [x22, #2, MUL VL]\n"
+ "zip1 z23.b, z21.b, z16.b\n"
+ "zip2 z22.b, z21.b, z16.b\n"
+ "ld1b { z16.b }, p0/Z, [x22, #3, MUL VL]\n"
+ "zip1 z21.b, z28.b, z20.b\n"
+ "zip2 z29.b, z28.b, z20.b\n"
+ "addvl x22, x22, #4\n"
+ "zip1 z28.b, z27.b, z19.b\n"
+ "zip2 z27.b, z27.b, z19.b\n"
+ "zip1 z20.b, z26.b, z17.b\n"
+ "zip2 z19.b, z26.b, z17.b\n"
+ "st1b { z18.b }, p4, [x20]\n"
+ "zip1 z18.b, z25.b, z16.b\n"
+ "zip2 z26.b, z25.b, z16.b\n"
+ "st1b { z24.b }, p4, [x20, #1, MUL VL]\n"
+ "zip1 z17.b, z1.b, z31.b\n"
+ "zip2 z16.b, z1.b, z31.b\n"
+ "st1b { z23.b }, p4, [x20, #2, MUL VL]\n"
+ "zip1 z25.b, z0.b, z30.b\n"
+ "zip2 z24.b, z0.b, z30.b\n"
+ "st1b { z22.b }, p4, [x20, #3, MUL VL]\n"
+ "zip1 z23.b, z21.b, z20.b\n"
+ "zip2 z22.b, z21.b, z20.b\n"
+ "zip1 z21.b, z29.b, z19.b\n"
+ "zip2 z20.b, z29.b, z19.b\n"
+ "st1b { z17.b }, p4, [x20, #4, MUL VL]\n"
+ "zip1 z19.b, z28.b, z18.b\n"
+ "zip2 z18.b, z28.b, z18.b\n"
+ "st1b { z16.b }, p4, [x20, #5, MUL VL]\n"
+ "zip1 z17.b, z27.b, z26.b\n"
+ "zip2 z16.b, z27.b, z26.b\n"
+ "st1b { z25.b }, p4, [x20, #6, MUL VL]\n"
+ "st1b { z24.b }, p4, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
"st1b { z23.b }, p4, [x20, #-8, MUL VL]\n"
"st1b { z22.b }, p4, [x20, #-7, MUL VL]\n"
@@ -145,7 +145,7 @@ void sme_transpose_interleave_16VL_1x4(uint8_t *out, const uint8_t *in, size_t w
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
index 2756327815..896288cdda 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_16VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,133 +43,133 @@ void sme_transpose_interleave_16VL_2x2_fp32bf16(bfloat16 *out, const float *in,
"ptrue p7.b\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
- "add x24, x25, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x24, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
"mov x23, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
"csel x24, x24, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
"mov x22, %x[width]\n"
"2:" // Main row loop: Column loop
"mov x21, x22\n"
+ "mov x20, x23\n"
"whilelt p1.s, XZR, x21\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
- ".inst 0x658abe00 // bfcvt z0.h, p7/M, z16.s\n"
"decw x21\n"
"whilelt p0.s, XZR, x21\n"
- "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
- ".inst 0x658abe1f // bfcvt z31.h, p7/M, z16.s\n"
"decw x21\n"
+ "ld1w { z16.s }, p1/Z, [x25]\n"
"whilelt p6.s, XZR, x21\n"
- "ld1w { z16.s }, p6/Z, [x25, #2, MUL VL]\n"
- ".inst 0x658abe1e // bfcvt z30.h, p7/M, z16.s\n"
"decw x21\n"
+ "ld1w { z18.s }, p0/Z, [x25, #1, MUL VL]\n"
"whilelt p5.s, XZR, x21\n"
- "ld1w { z16.s }, p5/Z, [x25, #3, MUL VL]\n"
- ".inst 0x658abe1d // bfcvt z29.h, p7/M, z16.s\n"
"decw x21\n"
+ "ld1w { z17.s }, p6/Z, [x25, #2, MUL VL]\n"
"whilelt p4.s, XZR, x21\n"
- "ld1w { z16.s }, p4/Z, [x25, #4, MUL VL]\n"
- ".inst 0x658abe1c // bfcvt z28.h, p7/M, z16.s\n"
"decw x21\n"
+ "ld1w { z19.s }, p5/Z, [x25, #3, MUL VL]\n"
+ ".inst 0x658abe03 // bfcvt z3.h, p7/M, z16.s\n"
"whilelt p3.s, XZR, x21\n"
- "ld1w { z16.s }, p3/Z, [x25, #5, MUL VL]\n"
- ".inst 0x658abe1b // bfcvt z27.h, p7/M, z16.s\n"
"decw x21\n"
+ "ld1w { z16.s }, p4/Z, [x25, #4, MUL VL]\n"
+ ".inst 0x658abe42 // bfcvt z2.h, p7/M, z18.s\n"
"whilelt p2.s, XZR, x21\n"
- "ld1w { z16.s }, p2/Z, [x25, #6, MUL VL]\n"
- ".inst 0x658abe1a // bfcvt z26.h, p7/M, z16.s\n"
"decw x21\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
+ "ld1w { z18.s }, p3/Z, [x25, #5, MUL VL]\n"
+ ".inst 0x658abe21 // bfcvt z1.h, p7/M, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x25, #6, MUL VL]\n"
+ ".inst 0x658abe60 // bfcvt z0.h, p7/M, z19.s\n"
+ "decw x22, ALL, MUL #16\n"
+ "add x23, x23, %x[out_stride]\n"
+ ".inst 0x658abe1f // bfcvt z31.h, p7/M, z16.s\n"
+ "ld1w { z19.s }, p1/Z, [x24]\n"
"whilelt p1.s, XZR, x21\n"
- ".inst 0x648abe00 // bfcvtnt z0.h, p7/M, z16.s\n"
"decw x21\n"
"ld1w { z16.s }, p1/Z, [x25, #7, MUL VL]\n"
"addvl x25, x25, #16\n"
- ".inst 0x658abe19 // bfcvt z25.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x658abe5e // bfcvt z30.h, p7/M, z18.s\n"
+ ".inst 0x658abe3d // bfcvt z29.h, p7/M, z17.s\n"
+ "ld1w { z18.s }, p0/Z, [x24, #1, MUL VL]\n"
"whilelt p0.s, XZR, x21\n"
"decw x21\n"
- ".inst 0x648abe1f // bfcvtnt z31.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #-8, MUL VL]\n"
- ".inst 0x658abe18 // bfcvt z24.h, p7/M, z16.s\n"
- "mov x20, x23\n"
- "decw x22, ALL, MUL #16\n"
- "ld1w { z16.s }, p6/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x25, #-8, MUL VL]\n"
+ ".inst 0x648abe63 // bfcvtnt z3.h, p7/M, z19.s\n"
+ ".inst 0x658abe1c // bfcvt z28.h, p7/M, z16.s\n"
+ "ld1w { z19.s }, p6/Z, [x24, #2, MUL VL]\n"
"whilelt p6.s, XZR, x21\n"
"decw x21\n"
- ".inst 0x648abe1e // bfcvtnt z30.h, p7/M, z16.s\n"
"ld1w { z16.s }, p6/Z, [x25, #-7, MUL VL]\n"
- ".inst 0x658abe17 // bfcvt z23.h, p7/M, z16.s\n"
- "add x23, x23, %x[out_stride]\n"
- "ld1w { z16.s }, p5/Z, [x24, #3, MUL VL]\n"
+ ".inst 0x648abe42 // bfcvtnt z2.h, p7/M, z18.s\n"
+ "ld1w { z18.s }, p5/Z, [x24, #3, MUL VL]\n"
"whilelt p5.s, XZR, x21\n"
"decw x21\n"
- ".inst 0x648abe1d // bfcvtnt z29.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p5/Z, [x25, #-6, MUL VL]\n"
- ".inst 0x658abe16 // bfcvt z22.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x24, #4, MUL VL]\n"
+ ".inst 0x658abe3b // bfcvt z27.h, p7/M, z17.s\n"
+ "ld1w { z17.s }, p5/Z, [x25, #-6, MUL VL]\n"
+ ".inst 0x648abe61 // bfcvtnt z1.h, p7/M, z19.s\n"
+ "ld1w { z19.s }, p4/Z, [x24, #4, MUL VL]\n"
"whilelt p4.s, XZR, x21\n"
"decw x21\n"
- ".inst 0x648abe1c // bfcvtnt z28.h, p7/M, z16.s\n"
+ ".inst 0x658abe1a // bfcvt z26.h, p7/M, z16.s\n"
"ld1w { z16.s }, p4/Z, [x25, #-5, MUL VL]\n"
- ".inst 0x658abe15 // bfcvt z21.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x24, #5, MUL VL]\n"
+ ".inst 0x648abe40 // bfcvtnt z0.h, p7/M, z18.s\n"
+ "ld1w { z18.s }, p3/Z, [x24, #5, MUL VL]\n"
"whilelt p3.s, XZR, x21\n"
"decw x21\n"
- ".inst 0x648abe1b // bfcvtnt z27.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x25, #-4, MUL VL]\n"
- ".inst 0x658abe14 // bfcvt z20.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #6, MUL VL]\n"
+ ".inst 0x658abe39 // bfcvt z25.h, p7/M, z17.s\n"
+ "ld1w { z17.s }, p3/Z, [x25, #-4, MUL VL]\n"
+ ".inst 0x648abe7f // bfcvtnt z31.h, p7/M, z19.s\n"
+ "ld1w { z19.s }, p2/Z, [x24, #6, MUL VL]\n"
"whilelt p2.s, XZR, x21\n"
"decw x21\n"
- ".inst 0x648abe1a // bfcvtnt z26.h, p7/M, z16.s\n"
+ ".inst 0x658abe18 // bfcvt z24.h, p7/M, z16.s\n"
"ld1w { z16.s }, p2/Z, [x25, #-3, MUL VL]\n"
- ".inst 0x658abe13 // bfcvt z19.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #7, MUL VL]\n"
+ ".inst 0x648abe5e // bfcvtnt z30.h, p7/M, z18.s\n"
+ "ld1w { z18.s }, p1/Z, [x24, #7, MUL VL]\n"
"whilelt p1.s, XZR, x21\n"
"decw x21\n"
- ".inst 0x648abe19 // bfcvtnt z25.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #-2, MUL VL]\n"
+ ".inst 0x658abe37 // bfcvt z23.h, p7/M, z17.s\n"
+ "ld1w { z17.s }, p1/Z, [x25, #-2, MUL VL]\n"
"addvl x24, x24, #16\n"
- ".inst 0x658abe12 // bfcvt z18.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x24, #-8, MUL VL]\n"
+ ".inst 0x648abe7d // bfcvtnt z29.h, p7/M, z19.s\n"
+ ".inst 0x658abe16 // bfcvt z22.h, p7/M, z16.s\n"
+ "ld1w { z19.s }, p0/Z, [x24, #-8, MUL VL]\n"
"whilelt p0.s, XZR, x21\n"
"cmp x22, #0x0\n"
- ".inst 0x648abe18 // bfcvtnt z24.h, p7/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x25, #-1, MUL VL]\n"
- ".inst 0x658abe11 // bfcvt z17.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p6/Z, [x24, #-7, MUL VL]\n"
- ".inst 0x648abe17 // bfcvtnt z23.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p5/Z, [x24, #-6, MUL VL]\n"
- ".inst 0x648abe16 // bfcvtnt z22.h, p7/M, z16.s\n"
+ ".inst 0x648abe5c // bfcvtnt z28.h, p7/M, z18.s\n"
+ ".inst 0x658abe35 // bfcvt z21.h, p7/M, z17.s\n"
+ "ld1w { z18.s }, p6/Z, [x24, #-7, MUL VL]\n"
+ "ld1w { z17.s }, p5/Z, [x24, #-6, MUL VL]\n"
+ ".inst 0x648abe7b // bfcvtnt z27.h, p7/M, z19.s\n"
+ ".inst 0x658abe14 // bfcvt z20.h, p7/M, z16.s\n"
"ld1w { z16.s }, p4/Z, [x24, #-5, MUL VL]\n"
- ".inst 0x648abe15 // bfcvtnt z21.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x24, #-4, MUL VL]\n"
- ".inst 0x648abe14 // bfcvtnt z20.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #-3, MUL VL]\n"
- ".inst 0x648abe13 // bfcvtnt z19.h, p7/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #-2, MUL VL]\n"
- ".inst 0x648abe12 // bfcvtnt z18.h, p7/M, z16.s\n"
+ "ld1w { z19.s }, p3/Z, [x24, #-4, MUL VL]\n"
+ ".inst 0x648abe5a // bfcvtnt z26.h, p7/M, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, #-3, MUL VL]\n"
+ ".inst 0x648abe39 // bfcvtnt z25.h, p7/M, z17.s\n"
+ "ld1w { z17.s }, p1/Z, [x24, #-2, MUL VL]\n"
+ ".inst 0x648abe18 // bfcvtnt z24.h, p7/M, z16.s\n"
"ld1w { z16.s }, p0/Z, [x24, #-1, MUL VL]\n"
- "st1h { z0.h }, p7, [x20]\n"
- ".inst 0x648abe11 // bfcvtnt z17.h, p7/M, z16.s\n"
- "st1h { z31.h }, p7, [x20, #1, MUL VL]\n"
- "st1h { z30.h }, p7, [x20, #2, MUL VL]\n"
- "st1h { z29.h }, p7, [x20, #3, MUL VL]\n"
- "st1h { z28.h }, p7, [x20, #4, MUL VL]\n"
- "st1h { z27.h }, p7, [x20, #5, MUL VL]\n"
- "st1h { z26.h }, p7, [x20, #6, MUL VL]\n"
- "st1h { z25.h }, p7, [x20, #7, MUL VL]\n"
+ "st1h { z3.h }, p7, [x20]\n"
+ ".inst 0x648abe77 // bfcvtnt z23.h, p7/M, z19.s\n"
+ "st1h { z2.h }, p7, [x20, #1, MUL VL]\n"
+ ".inst 0x648abe56 // bfcvtnt z22.h, p7/M, z18.s\n"
+ "st1h { z1.h }, p7, [x20, #2, MUL VL]\n"
+ ".inst 0x648abe35 // bfcvtnt z21.h, p7/M, z17.s\n"
+ "st1h { z0.h }, p7, [x20, #3, MUL VL]\n"
+ ".inst 0x648abe14 // bfcvtnt z20.h, p7/M, z16.s\n"
+ "st1h { z31.h }, p7, [x20, #4, MUL VL]\n"
+ "st1h { z30.h }, p7, [x20, #5, MUL VL]\n"
+ "st1h { z29.h }, p7, [x20, #6, MUL VL]\n"
+ "st1h { z28.h }, p7, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z24.h }, p7, [x20, #-8, MUL VL]\n"
- "st1h { z23.h }, p7, [x20, #-7, MUL VL]\n"
- "st1h { z22.h }, p7, [x20, #-6, MUL VL]\n"
- "st1h { z21.h }, p7, [x20, #-5, MUL VL]\n"
- "st1h { z20.h }, p7, [x20, #-4, MUL VL]\n"
- "st1h { z19.h }, p7, [x20, #-3, MUL VL]\n"
- "st1h { z18.h }, p7, [x20, #-2, MUL VL]\n"
- "st1h { z17.h }, p7, [x20, #-1, MUL VL]\n"
+ "st1h { z27.h }, p7, [x20, #-8, MUL VL]\n"
+ "st1h { z26.h }, p7, [x20, #-7, MUL VL]\n"
+ "st1h { z25.h }, p7, [x20, #-6, MUL VL]\n"
+ "st1h { z24.h }, p7, [x20, #-5, MUL VL]\n"
+ "st1h { z23.h }, p7, [x20, #-4, MUL VL]\n"
+ "st1h { z22.h }, p7, [x20, #-3, MUL VL]\n"
+ "st1h { z21.h }, p7, [x20, #-2, MUL VL]\n"
+ "st1h { z20.h }, p7, [x20, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
index a6ddb8fec0..1ece4005d6 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,34 +39,34 @@ void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t widt
"blt 6f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #4\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x26, %x[in_stride]\n"
+ "cnth x23, ALL, MUL #4\n"
+ "add x21, x24, %x[in_stride]\n"
+ "cmp x25, x23\n"
+ "add x20, x21, %x[in_stride]\n"
"mov x22, %x[out]\n"
+ "add %x[in], x20, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
"ld1h { z31.h }, p1/Z, [x26]\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
+ "sub x25, x25, x23\n"
"ld1h { z30.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "cmp x25, x23\n"
"ld1h { z29.h }, p1/Z, [x26, #2, MUL VL]\n"
"ld1h { z28.h }, p1/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- "ld1h { z27.h }, p1/Z, [x25]\n"
- "ld1h { z26.h }, p1/Z, [x25, #1, MUL VL]\n"
- "ld1h { z25.h }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1h { z24.h }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- "ld1h { z23.h }, p1/Z, [x24]\n"
- "ld1h { z22.h }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1h { z21.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z20.h }, p1/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z27.h }, p1/Z, [x24]\n"
+ "ld1h { z26.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z25.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z24.h }, p1/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
+ "ld1h { z23.h }, p1/Z, [x21]\n"
+ "ld1h { z22.h }, p1/Z, [x21, #1, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1h { z20.h }, p1/Z, [x21, #3, MUL VL]\n"
+ "addvl x21, x21, #4\n"
"ld1h { z19.h }, p1/Z, [x20]\n"
"ld1h { z18.h }, p1/Z, [x20, #1, MUL VL]\n"
"ld1h { z17.h }, p1/Z, [x20, #2, MUL VL]\n"
@@ -94,17 +94,17 @@ void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t widt
"add x22, x22, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.h, XZR, x23\n"
- "dech x23\n"
+ "whilelt p0.h, XZR, x25\n"
+ "dech x25\n"
+ "cmp x25, #0x0\n"
"ld1h { z19.h }, p0/Z, [x26]\n"
- "cmp x23, #0x0\n"
"addvl x26, x26, #1\n"
- "ld1h { z18.h }, p0/Z, [x25]\n"
- "addvl x25, x25, #1\n"
- "ld1h { z17.h }, p0/Z, [x24]\n"
+ "ld1h { z18.h }, p0/Z, [x24]\n"
"addvl x24, x24, #1\n"
+ "ld1h { z17.h }, p0/Z, [x21]\n"
+ "addvl x21, x21, #1\n"
"ld1h { z16.h }, p0/Z, [x20]\n"
"addvl x20, x20, #1\n"
"st1h { z19.h }, p1, [x22]\n"
@@ -131,8 +131,8 @@ void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t widt
"8:" // Tail row loop: Unroll column loop
"ld1h { z19.h }, p1/Z, [x26]\n"
"sub x21, x21, x20\n"
- "cmp x21, x20\n"
"ld1h { z18.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "cmp x21, x20\n"
"ld1h { z17.h }, p1/Z, [x26, #2, MUL VL]\n"
"ld1h { z16.h }, p1/Z, [x26, #3, MUL VL]\n"
"st1h { z19.h }, p1, [x22]\n"
@@ -150,10 +150,10 @@ void sme_transpose_interleave_1VL(uint16_t *out, const uint16_t *in, size_t widt
"10:" // Tail row loop: Column loop
"whilelt p0.h, XZR, x21\n"
"dech x21\n"
- "ld1h { z16.h }, p0/Z, [x26]\n"
- "st1h { z16.h }, p1, [x22]\n"
"cmp x21, #0x0\n"
+ "ld1h { z16.h }, p0/Z, [x26]\n"
"addvl x26, x26, #1\n"
+ "st1h { z16.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
index 399a52e233..ff8cfc7efe 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,65 +43,65 @@ void sme_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"ptrue p1.b\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add x25, x26, %x[in_stride]\n"
+ "mov x24, %x[width]\n"
+ "add x23, x25, %x[in_stride]\n"
+ "cntb x22\n"
+ "add x21, x23, %x[in_stride]\n"
+ "csel x23, x23, %x[pad_row], GE\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "mov x22, %x[width]\n"
- "cntb x21\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x22, x21\n"
"mov x20, %x[out]\n"
+ "csel x25, x25, %x[pad_row], GT\n"
+ "cmp x24, x22\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z17.b }, p1/Z, [x26]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
+ "ld1b { z20.b }, p1/Z, [x26]\n"
+ "sub x24, x24, x22\n"
"addvl x26, x26, #1\n"
+ "ld1b { z19.b }, p1/Z, [x25]\n"
+ "cmp x24, x22\n"
"addvl x25, x25, #1\n"
- "ld1b { z16.b }, p1/Z, [x24]\n"
- "zip1 z20.b, z17.b, z16.b\n"
- "zip2 z19.b, z17.b, z16.b\n"
- "addvl x24, x24, #1\n"
- "ld1b { z16.b }, p1/Z, [x23]\n"
- "zip1 z17.b, z18.b, z16.b\n"
- "zip2 z18.b, z18.b, z16.b\n"
+ "ld1b { z17.b }, p1/Z, [x23]\n"
"addvl x23, x23, #1\n"
- "zip1 z16.b, z20.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
+ "ld1b { z16.b }, p1/Z, [x21]\n"
+ "addvl x21, x21, #1\n"
+ "zip1 z18.b, z20.b, z17.b\n"
+ "zip2 z20.b, z20.b, z17.b\n"
+ "zip1 z17.b, z19.b, z16.b\n"
+ "zip2 z16.b, z19.b, z16.b\n"
+ "zip1 z19.b, z18.b, z17.b\n"
+ "zip2 z18.b, z18.b, z17.b\n"
+ "zip1 z17.b, z20.b, z16.b\n"
+ "zip2 z16.b, z20.b, z16.b\n"
+ "st1b { z19.b }, p1, [x20]\n"
"add x20, x20, %x[out_stride]\n"
- "zip2 z16.b, z20.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
+ "st1b { z18.b }, p1, [x20]\n"
"add x20, x20, %x[out_stride]\n"
- "zip1 z17.b, z19.b, z18.b\n"
- "zip2 z16.b, z19.b, z18.b\n"
"st1b { z17.b }, p1, [x20]\n"
"add x20, x20, %x[out_stride]\n"
"st1b { z16.b }, p1, [x20]\n"
"add x20, x20, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x24, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
- "ld1b { z17.b }, p0/Z, [x26]\n"
- "decw x22\n"
- "ld1b { z18.b }, p0/Z, [x25]\n"
- "cmp x22, #0x0\n"
+ "whilelt p0.b, XZR, x24\n"
+ "decw x24\n"
+ "ld1b { z19.b }, p0/Z, [x26]\n"
+ "cmp x24, #0x0\n"
"incd x26, ALL, MUL #2\n"
- "ld1b { z16.b }, p0/Z, [x24]\n"
- "zip1 z17.b, z17.b, z16.b\n"
+ "ld1b { z18.b }, p0/Z, [x25]\n"
"incd x25, ALL, MUL #2\n"
- "incd x24, ALL, MUL #2\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z16.b, z18.b, z16.b\n"
+ "ld1b { z17.b }, p0/Z, [x23]\n"
"incd x23, ALL, MUL #2\n"
+ "ld1b { z16.b }, p0/Z, [x21]\n"
+ "incd x21, ALL, MUL #2\n"
+ "zip1 z17.b, z19.b, z17.b\n"
+ "zip1 z16.b, z18.b, z16.b\n"
"zip1 z16.b, z17.b, z16.b\n"
"st1b { z16.b }, p1, [x20]\n"
"add x20, x20, %x[out_stride]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
index 6318e29a79..54c2af1a84 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,69 +45,69 @@ void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t
"blt 6f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #2\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x26, %x[in_stride]\n"
+ "cnth x23, ALL, MUL #2\n"
+ "add x21, x24, %x[in_stride]\n"
+ "cmp x25, x23\n"
+ "add x20, x21, %x[in_stride]\n"
"mov x22, %x[out]\n"
+ "add %x[in], x20, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z17.h }, p1/Z, [x26]\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
- "zip1 z24.h, z17.h, z16.h\n"
- "zip2 z23.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x26]\n"
+ "sub x25, x25, x23\n"
"ld1h { z17.h }, p1/Z, [x24]\n"
+ "cmp x25, x23\n"
+ "ld1h { z20.h }, p1/Z, [x21]\n"
"ld1h { z16.h }, p1/Z, [x20]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z21.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z22.h, z18.h, z17.h\n"
"addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
- "zip1 z20.h, z17.h, z16.h\n"
- "addvl x25, x25, #2\n"
- "zip2 z19.h, z17.h, z16.h\n"
"ld1h { z18.h }, p1/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
+ "ld1h { z21.h }, p1/Z, [x21, #1, MUL VL]\n"
+ "zip1 z17.h, z20.h, z16.h\n"
+ "zip2 z20.h, z20.h, z16.h\n"
+ "addvl x21, x21, #2\n"
"ld1h { z16.h }, p1/Z, [x20, #1, MUL VL]\n"
- "st1h { z24.h }, p1, [x22]\n"
- "zip1 z17.h, z18.h, z16.h\n"
"addvl x20, x20, #2\n"
- "st1h { z22.h }, p1, [x22, #1, MUL VL]\n"
+ "st1h { z19.h }, p1, [x22]\n"
+ "zip1 z19.h, z23.h, z18.h\n"
+ "zip2 z18.h, z23.h, z18.h\n"
+ "st1h { z17.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "zip2 z16.h, z18.h, z16.h\n"
- "st1h { z23.h }, p1, [x22]\n"
- "st1h { z21.h }, p1, [x22, #1, MUL VL]\n"
+ "zip1 z17.h, z21.h, z16.h\n"
+ "zip2 z16.h, z21.h, z16.h\n"
+ "st1h { z22.h }, p1, [x22]\n"
+ "st1h { z20.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p1, [x22]\n"
+ "st1h { z19.h }, p1, [x22]\n"
"st1h { z17.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z19.h }, p1, [x22]\n"
+ "st1h { z18.h }, p1, [x22]\n"
"st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.h, XZR, x23\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
- "decw x23\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
- "cmp x23, #0x0\n"
+ "whilelt p0.h, XZR, x25\n"
+ "decw x25\n"
+ "ld1h { z19.h }, p0/Z, [x26]\n"
+ "cmp x25, #0x0\n"
"incd x26, ALL, MUL #4\n"
- "zip1 z18.h, z17.h, z16.h\n"
"ld1h { z17.h }, p0/Z, [x24]\n"
- "incd x25, ALL, MUL #4\n"
"incd x24, ALL, MUL #4\n"
+ "ld1h { z18.h }, p0/Z, [x21]\n"
+ "incd x21, ALL, MUL #4\n"
"ld1h { z16.h }, p0/Z, [x20]\n"
"incd x20, ALL, MUL #4\n"
- "zip1 z16.h, z17.h, z16.h\n"
- "st1h { z18.h }, p1, [x22]\n"
+ "zip1 z17.h, z19.h, z17.h\n"
+ "zip1 z16.h, z18.h, z16.h\n"
+ "st1h { z17.h }, p1, [x22]\n"
"st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 4b\n"
@@ -119,12 +119,12 @@ void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
+ "add x24, x26, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp x21, x20\n"
"mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
@@ -132,20 +132,20 @@ void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t
"8:" // Tail row loop: Unroll column loop
"ld1h { z18.h }, p1/Z, [x26]\n"
"sub x21, x21, x20\n"
+ "ld1h { z17.h }, p1/Z, [x24]\n"
"cmp x21, x20\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
- "zip1 z17.h, z18.h, z16.h\n"
- "zip2 z19.h, z18.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z20.h }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
- "st1h { z17.h }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z18.h, z16.h\n"
+ "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z20.h, z16.h\n"
+ "zip2 z16.h, z20.h, z16.h\n"
"st1h { z19.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
- "addvl x25, x25, #2\n"
- "zip2 z16.h, z18.h, z16.h\n"
+ "st1h { z18.h }, p1, [x22]\n"
+ "add x22, x22, %x[out_stride]\n"
"st1h { z17.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
"st1h { z16.h }, p1, [x22]\n"
@@ -155,13 +155,13 @@ void sme_transpose_interleave_1VL_2x2(uint16_t *out, const uint16_t *in, size_t
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"whilelt p0.h, XZR, x21\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
"decw x21\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
+ "ld1h { z17.h }, p0/Z, [x26]\n"
"cmp x21, #0x0\n"
"incd x26, ALL, MUL #4\n"
+ "ld1h { z16.h }, p0/Z, [x24]\n"
+ "incd x24, ALL, MUL #4\n"
"zip1 z16.h, z17.h, z16.h\n"
- "incd x25, ALL, MUL #4\n"
"st1h { z16.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 10b\n"
@@ -205,4 +205,5 @@ void Transform<1, 2, true, VLType::SME>(
);
}
+
#endif // defined(ARM_COMPUTE_ENABLE_SME)
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
index b90063028d..2fafefbbc5 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_1VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,88 +45,88 @@ void sme_transpose_interleave_1VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"blt 6f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #2\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x26, %x[in_stride]\n"
+ "cnth x23, ALL, MUL #2\n"
+ "add x21, x24, %x[in_stride]\n"
+ "cmp x25, x23\n"
+ "add x20, x21, %x[in_stride]\n"
"mov x22, %x[out]\n"
+ "add %x[in], x20, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z16.s }, p1/Z, [x26]\n"
+ "ld1w { z19.s }, p1/Z, [x26]\n"
+ "sub x25, x25, x23\n"
+ "ld1w { z18.s }, p1/Z, [x21]\n"
+ "cmp x25, x23\n"
+ "ld1w { z17.s }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x21, #1, MUL VL]\n"
+ ".inst 0x658aa67b // bfcvt z27.h, p1/M, z19.s\n"
+ "ld1w { z19.s }, p1/Z, [x26, #2, MUL VL]\n"
+ ".inst 0x658aa65a // bfcvt z26.h, p1/M, z18.s\n"
+ "ld1w { z18.s }, p1/Z, [x21, #2, MUL VL]\n"
+ ".inst 0x658aa639 // bfcvt z25.h, p1/M, z17.s\n"
+ "ld1w { z17.s }, p1/Z, [x26, #3, MUL VL]\n"
".inst 0x658aa618 // bfcvt z24.h, p1/M, z16.s\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- ".inst 0x658aa617 // bfcvt z23.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658aa616 // bfcvt z22.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #1, MUL VL]\n"
- ".inst 0x658aa615 // bfcvt z21.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
- ".inst 0x658aa614 // bfcvt z20.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
- ".inst 0x658aa613 // bfcvt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #3, MUL VL]\n"
- ".inst 0x658aa612 // bfcvt z18.h, p1/M, z16.s\n"
"addvl x26, x26, #4\n"
- "ld1w { z16.s }, p1/Z, [x24, #3, MUL VL]\n"
- ".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
- "addvl x24, x24, #4\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
- ".inst 0x648aa618 // bfcvtnt z24.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20]\n"
- ".inst 0x648aa617 // bfcvtnt z23.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0x648aa616 // bfcvtnt z22.h, p1/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x21, #3, MUL VL]\n"
+ ".inst 0x658aa677 // bfcvt z23.h, p1/M, z19.s\n"
+ "addvl x21, x21, #4\n"
+ "ld1w { z19.s }, p1/Z, [x24]\n"
+ ".inst 0x658aa656 // bfcvt z22.h, p1/M, z18.s\n"
+ "ld1w { z18.s }, p1/Z, [x20]\n"
+ ".inst 0x658aa635 // bfcvt z21.h, p1/M, z17.s\n"
+ "ld1w { z17.s }, p1/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x658aa614 // bfcvt z20.h, p1/M, z16.s\n"
"ld1w { z16.s }, p1/Z, [x20, #1, MUL VL]\n"
- ".inst 0x648aa615 // bfcvtnt z21.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0x648aa614 // bfcvtnt z20.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x20, #2, MUL VL]\n"
- ".inst 0x648aa613 // bfcvtnt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0x648aa612 // bfcvtnt z18.h, p1/M, z16.s\n"
+ ".inst 0x648aa67b // bfcvtnt z27.h, p1/M, z19.s\n"
+ "ld1w { z19.s }, p1/Z, [x24, #2, MUL VL]\n"
+ ".inst 0x648aa65a // bfcvtnt z26.h, p1/M, z18.s\n"
+ "ld1w { z18.s }, p1/Z, [x20, #2, MUL VL]\n"
+ ".inst 0x648aa639 // bfcvtnt z25.h, p1/M, z17.s\n"
+ "ld1w { z17.s }, p1/Z, [x24, #3, MUL VL]\n"
+ ".inst 0x648aa618 // bfcvtnt z24.h, p1/M, z16.s\n"
+ "addvl x24, x24, #4\n"
"ld1w { z16.s }, p1/Z, [x20, #3, MUL VL]\n"
- "st1h { z24.h }, p1, [x22]\n"
+ "st1h { z27.h }, p1, [x22]\n"
+ ".inst 0x648aa677 // bfcvtnt z23.h, p1/M, z19.s\n"
"addvl x20, x20, #4\n"
- ".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
- "st1h { z23.h }, p1, [x22, #1, MUL VL]\n"
+ "st1h { z26.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z22.h }, p1, [x22]\n"
- "st1h { z21.h }, p1, [x22, #1, MUL VL]\n"
+ ".inst 0x648aa656 // bfcvtnt z22.h, p1/M, z18.s\n"
+ "st1h { z25.h }, p1, [x22]\n"
+ ".inst 0x648aa635 // bfcvtnt z21.h, p1/M, z17.s\n"
+ "st1h { z24.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p1, [x22]\n"
- "st1h { z19.h }, p1, [x22, #1, MUL VL]\n"
+ ".inst 0x648aa614 // bfcvtnt z20.h, p1/M, z16.s\n"
+ "st1h { z23.h }, p1, [x22]\n"
+ "st1h { z22.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z18.h }, p1, [x22]\n"
- "st1h { z17.h }, p1, [x22, #1, MUL VL]\n"
+ "st1h { z21.h }, p1, [x22]\n"
+ "st1h { z20.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.s, XZR, x23\n"
- "ld1w { z16.s }, p0/Z, [x26]\n"
- ".inst 0x658aa612 // bfcvt z18.h, p1/M, z16.s\n"
- "decw x23\n"
- "ld1w { z16.s }, p0/Z, [x24]\n"
- ".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
- "cmp x23, #0x0\n"
+ "whilelt p0.s, XZR, x25\n"
+ "decw x25\n"
+ "ld1w { z17.s }, p0/Z, [x26]\n"
+ "cmp x25, #0x0\n"
"addvl x26, x26, #1\n"
- "ld1w { z16.s }, p0/Z, [x25]\n"
- "addvl x25, x25, #1\n"
+ "ld1w { z16.s }, p0/Z, [x21]\n"
+ "addvl x21, x21, #1\n"
+ "ld1w { z19.s }, p0/Z, [x24]\n"
"addvl x24, x24, #1\n"
- ".inst 0x648aa612 // bfcvtnt z18.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
+ ".inst 0x658aa632 // bfcvt z18.h, p1/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x20]\n"
"addvl x20, x20, #1\n"
- ".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
+ ".inst 0x658aa610 // bfcvt z16.h, p1/M, z16.s\n"
+ ".inst 0x648aa672 // bfcvtnt z18.h, p1/M, z19.s\n"
+ ".inst 0x648aa630 // bfcvtnt z16.h, p1/M, z17.s\n"
"st1h { z18.h }, p1, [x22]\n"
- "st1h { z17.h }, p1, [x22, #1, MUL VL]\n"
+ "st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
@@ -137,42 +137,42 @@ void sme_transpose_interleave_1VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
+ "add x24, x26, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp x21, x20\n"
"mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z16.s }, p1/Z, [x26]\n"
- ".inst 0x658aa614 // bfcvt z20.h, p1/M, z16.s\n"
+ "ld1w { z19.s }, p1/Z, [x26]\n"
"sub x21, x21, x20\n"
+ "ld1w { z18.s }, p1/Z, [x26, #1, MUL VL]\n"
"cmp x21, x20\n"
- "ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658aa613 // bfcvt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
- ".inst 0x658aa612 // bfcvt z18.h, p1/M, z16.s\n"
+ "ld1w { z17.s }, p1/Z, [x26, #2, MUL VL]\n"
"ld1w { z16.s }, p1/Z, [x26, #3, MUL VL]\n"
- ".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
+ ".inst 0x658aa677 // bfcvt z23.h, p1/M, z19.s\n"
"addvl x26, x26, #4\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
- ".inst 0x648aa614 // bfcvtnt z20.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
- ".inst 0x648aa613 // bfcvtnt z19.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0x648aa612 // bfcvtnt z18.h, p1/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #3, MUL VL]\n"
- "st1h { z20.h }, p1, [x22]\n"
+ "ld1w { z22.s }, p1/Z, [x24]\n"
+ ".inst 0x658aa655 // bfcvt z21.h, p1/M, z18.s\n"
+ "ld1w { z20.s }, p1/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x658aa633 // bfcvt z19.h, p1/M, z17.s\n"
+ "ld1w { z18.s }, p1/Z, [x24, #2, MUL VL]\n"
+ ".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
+ "ld1w { z16.s }, p1/Z, [x24, #3, MUL VL]\n"
+ ".inst 0x648aa6d7 // bfcvtnt z23.h, p1/M, z22.s\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0x648aa695 // bfcvtnt z21.h, p1/M, z20.s\n"
+ ".inst 0x648aa653 // bfcvtnt z19.h, p1/M, z18.s\n"
+ ".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
+ "st1h { z23.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
- "addvl x25, x25, #4\n"
- "st1h { z19.h }, p1, [x22]\n"
+ "st1h { z21.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
- ".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
- "st1h { z18.h }, p1, [x22]\n"
+ "st1h { z19.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
"st1h { z17.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
@@ -181,15 +181,15 @@ void sme_transpose_interleave_1VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"whilelt p0.s, XZR, x21\n"
- "ld1w { z16.s }, p0/Z, [x26]\n"
- ".inst 0x658aa611 // bfcvt z17.h, p1/M, z16.s\n"
"decw x21\n"
- "ld1w { z16.s }, p0/Z, [x25]\n"
+ "ld1w { z16.s }, p0/Z, [x26]\n"
"cmp x21, #0x0\n"
"addvl x26, x26, #1\n"
- ".inst 0x648aa611 // bfcvtnt z17.h, p1/M, z16.s\n"
- "addvl x25, x25, #1\n"
- "st1h { z17.h }, p1, [x22]\n"
+ "ld1w { z17.s }, p0/Z, [x24]\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0x658aa610 // bfcvt z16.h, p1/M, z16.s\n"
+ ".inst 0x648aa630 // bfcvtnt z16.h, p1/M, z17.s\n"
+ "st1h { z16.h }, p1, [x22]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
index f827197ab7..f981624a1d 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,34 +39,34 @@ void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t widt
"blt 6f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x26, %x[in_stride]\n"
"cnth x20, ALL, MUL #4\n"
- "add x21, x24, %x[in_stride]\n"
- "cmp x23, x20\n"
- "add %x[in], x21, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "cmp x25, x20\n"
+ "add x21, x23, %x[in_stride]\n"
"mov x22, %x[out]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "sub x23, x23, x20\n"
+ "sub x25, x25, x20\n"
"ld1h { z31.h }, p2/Z, [x26]\n"
- "cmp x23, x20\n"
"ld1h { z30.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "cmp x25, x20\n"
"ld1h { z29.h }, p2/Z, [x26, #2, MUL VL]\n"
"ld1h { z28.h }, p2/Z, [x26, #3, MUL VL]\n"
"addvl x26, x26, #4\n"
- "ld1h { z27.h }, p2/Z, [x25]\n"
- "ld1h { z26.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z25.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z24.h }, p2/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- "ld1h { z23.h }, p2/Z, [x24]\n"
- "ld1h { z22.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z21.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z27.h }, p2/Z, [x24]\n"
+ "ld1h { z26.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z25.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z24.h }, p2/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
+ "ld1h { z23.h }, p2/Z, [x23]\n"
+ "ld1h { z22.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z21.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z20.h }, p2/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
"ld1h { z19.h }, p2/Z, [x21]\n"
"ld1h { z18.h }, p2/Z, [x21, #1, MUL VL]\n"
"ld1h { z17.h }, p2/Z, [x21, #2, MUL VL]\n"
@@ -92,29 +92,29 @@ void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t widt
"add x22, x22, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
+ "mov x20, x25\n"
+ "dech x25, ALL, MUL #2\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z23.h }, p1/Z, [x26]\n"
"dech x20\n"
- "dech x23, ALL, MUL #2\n"
- "ld1h { z22.h }, p1/Z, [x25]\n"
"whilelt p0.h, XZR, x20\n"
- "cmp x23, #0x0\n"
- "ld1h { z21.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "cmp x25, #0x0\n"
+ "ld1h { z23.h }, p1/Z, [x26]\n"
+ "ld1h { z22.h }, p0/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "ld1h { z20.h }, p0/Z, [x25, #1, MUL VL]\n"
- "addvl x25, x25, #2\n"
- "ld1h { z19.h }, p1/Z, [x24]\n"
- "ld1h { z18.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x24]\n"
+ "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
+ "ld1h { z19.h }, p1/Z, [x23]\n"
+ "ld1h { z18.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
"ld1h { z17.h }, p1/Z, [x21]\n"
"ld1h { z16.h }, p0/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
"st1h { z23.h }, p2, [x22]\n"
- "st1h { z21.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z21.h }, p2, [x22, #2, MUL VL]\n"
"st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
"st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
"st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
@@ -140,8 +140,8 @@ void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t widt
"8:" // Tail row loop: Unroll column loop
"sub x21, x21, x20\n"
"ld1h { z19.h }, p2/Z, [x26]\n"
- "cmp x21, x20\n"
"ld1h { z18.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "cmp x21, x20\n"
"ld1h { z17.h }, p2/Z, [x26, #2, MUL VL]\n"
"ld1h { z16.h }, p2/Z, [x26, #3, MUL VL]\n"
"st1h { z19.h }, p2, [x22]\n"
@@ -156,15 +156,15 @@ void sme_transpose_interleave_2VL(uint16_t *out, const uint16_t *in, size_t widt
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
- "dech x20\n"
"dech x21, ALL, MUL #2\n"
+ "whilelt p1.h, XZR, x20\n"
+ "dech x20\n"
"whilelt p0.h, XZR, x20\n"
"cmp x21, #0x0\n"
+ "ld1h { z17.h }, p1/Z, [x26]\n"
"ld1h { z16.h }, p0/Z, [x26, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22]\n"
"addvl x26, x26, #2\n"
+ "st1h { z17.h }, p2, [x22]\n"
"st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 10b\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
index c471d66e17..a7a384c85f 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,63 +43,63 @@ void sme_transpose_interleave_2VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"ptrue p1.b\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add x25, x26, %x[in_stride]\n"
+ "mov x24, %x[width]\n"
+ "add x23, x25, %x[in_stride]\n"
+ "cntb x22\n"
+ "add x21, x23, %x[in_stride]\n"
+ "csel x23, x23, %x[pad_row], GE\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "mov x22, %x[width]\n"
- "cntb x21\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x22, x21\n"
"mov x20, %x[out]\n"
+ "csel x25, x25, %x[pad_row], GT\n"
+ "cmp x24, x22\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z17.b }, p1/Z, [x26]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
+ "ld1b { z20.b }, p1/Z, [x26]\n"
+ "sub x24, x24, x22\n"
"addvl x26, x26, #1\n"
+ "ld1b { z19.b }, p1/Z, [x25]\n"
+ "cmp x24, x22\n"
"addvl x25, x25, #1\n"
- "ld1b { z16.b }, p1/Z, [x24]\n"
- "zip1 z20.b, z17.b, z16.b\n"
- "zip2 z19.b, z17.b, z16.b\n"
- "addvl x24, x24, #1\n"
"ld1b { z17.b }, p1/Z, [x23]\n"
- "zip1 z16.b, z18.b, z17.b\n"
- "zip2 z18.b, z18.b, z17.b\n"
"addvl x23, x23, #1\n"
+ "ld1b { z16.b }, p1/Z, [x21]\n"
+ "addvl x21, x21, #1\n"
+ "zip1 z18.b, z20.b, z17.b\n"
+ "zip2 z20.b, z20.b, z17.b\n"
+ "zip1 z17.b, z19.b, z16.b\n"
+ "zip2 z16.b, z19.b, z16.b\n"
+ "zip1 z19.b, z18.b, z17.b\n"
+ "zip2 z18.b, z18.b, z17.b\n"
"zip1 z17.b, z20.b, z16.b\n"
"zip2 z16.b, z20.b, z16.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
+ "st1b { z19.b }, p1, [x20]\n"
+ "st1b { z18.b }, p1, [x20, #1, MUL VL]\n"
"add x20, x20, %x[out_stride]\n"
- "zip1 z17.b, z19.b, z18.b\n"
- "zip2 z16.b, z19.b, z18.b\n"
"st1b { z17.b }, p1, [x20]\n"
"st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
"add x20, x20, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x24, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
+ "whilelt p0.b, XZR, x24\n"
+ "decw x24, ALL, MUL #2\n"
"ld1b { z18.b }, p0/Z, [x26]\n"
- "decw x22, ALL, MUL #2\n"
- "ld1b { z17.b }, p0/Z, [x25]\n"
- "cmp x22, #0x0\n"
+ "cmp x24, #0x0\n"
"incd x26, ALL, MUL #4\n"
- "ld1b { z16.b }, p0/Z, [x24]\n"
- "zip1 z18.b, z18.b, z16.b\n"
+ "ld1b { z19.b }, p0/Z, [x25]\n"
"incd x25, ALL, MUL #4\n"
- "incd x24, ALL, MUL #4\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z16.b, z17.b, z16.b\n"
+ "ld1b { z17.b }, p0/Z, [x23]\n"
"incd x23, ALL, MUL #4\n"
+ "ld1b { z16.b }, p0/Z, [x21]\n"
+ "incd x21, ALL, MUL #4\n"
+ "zip1 z18.b, z18.b, z17.b\n"
+ "zip1 z16.b, z19.b, z16.b\n"
"zip1 z17.b, z18.b, z16.b\n"
"zip2 z16.b, z18.b, z16.b\n"
"st1b { z17.b }, p1, [x20]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
index 5f967fa615..651ae5f061 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,69 +45,69 @@ void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t
"blt 6f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
- "cnth x21, ALL, MUL #2\n"
- "add x20, x24, %x[in_stride]\n"
- "cmp x23, x21\n"
- "add %x[in], x20, %x[in_stride]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x26, %x[in_stride]\n"
+ "cnth x23, ALL, MUL #2\n"
+ "add x21, x24, %x[in_stride]\n"
+ "cmp x25, x23\n"
+ "add x20, x21, %x[in_stride]\n"
"mov x22, %x[out]\n"
+ "add %x[in], x20, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z17.h }, p1/Z, [x26]\n"
- "sub x23, x23, x21\n"
- "cmp x23, x21\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
- "zip1 z24.h, z17.h, z16.h\n"
- "zip2 z23.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p1/Z, [x26]\n"
+ "sub x25, x25, x23\n"
"ld1h { z17.h }, p1/Z, [x24]\n"
+ "cmp x25, x23\n"
+ "ld1h { z20.h }, p1/Z, [x21]\n"
"ld1h { z16.h }, p1/Z, [x20]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z21.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z22.h, z18.h, z17.h\n"
"addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
- "addvl x25, x25, #2\n"
- "zip1 z20.h, z17.h, z16.h\n"
- "zip2 z19.h, z17.h, z16.h\n"
"ld1h { z18.h }, p1/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
+ "ld1h { z21.h }, p1/Z, [x21, #1, MUL VL]\n"
+ "zip1 z17.h, z20.h, z16.h\n"
+ "zip2 z20.h, z20.h, z16.h\n"
+ "addvl x21, x21, #2\n"
"ld1h { z16.h }, p1/Z, [x20, #1, MUL VL]\n"
- "st1h { z24.h }, p1, [x22]\n"
"addvl x20, x20, #2\n"
- "zip1 z17.h, z18.h, z16.h\n"
- "st1h { z23.h }, p1, [x22, #1, MUL VL]\n"
- "zip2 z16.h, z18.h, z16.h\n"
- "st1h { z22.h }, p1, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p1, [x22, #3, MUL VL]\n"
+ "st1h { z19.h }, p1, [x22]\n"
+ "zip1 z19.h, z23.h, z18.h\n"
+ "zip2 z18.h, z23.h, z18.h\n"
+ "st1h { z22.h }, p1, [x22, #1, MUL VL]\n"
+ "st1h { z17.h }, p1, [x22, #2, MUL VL]\n"
+ "zip1 z17.h, z21.h, z16.h\n"
+ "zip2 z16.h, z21.h, z16.h\n"
+ "st1h { z20.h }, p1, [x22, #3, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p1, [x22]\n"
- "st1h { z19.h }, p1, [x22, #1, MUL VL]\n"
+ "st1h { z19.h }, p1, [x22]\n"
+ "st1h { z18.h }, p1, [x22, #1, MUL VL]\n"
"st1h { z17.h }, p1, [x22, #2, MUL VL]\n"
"st1h { z16.h }, p1, [x22, #3, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.h, XZR, x23\n"
- "ld1h { z17.h }, p0/Z, [x26]\n"
- "decw x23, ALL, MUL #2\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
- "cmp x23, #0x0\n"
+ "whilelt p0.h, XZR, x25\n"
+ "decw x25, ALL, MUL #2\n"
+ "ld1h { z18.h }, p0/Z, [x26]\n"
+ "cmp x25, #0x0\n"
"addvl x26, x26, #1\n"
- "zip1 z20.h, z17.h, z16.h\n"
- "ld1h { z19.h }, p0/Z, [x24]\n"
- "addvl x25, x25, #1\n"
+ "ld1h { z17.h }, p0/Z, [x24]\n"
"addvl x24, x24, #1\n"
- "zip2 z18.h, z17.h, z16.h\n"
+ "ld1h { z20.h }, p0/Z, [x21]\n"
+ "addvl x21, x21, #1\n"
"ld1h { z16.h }, p0/Z, [x20]\n"
"addvl x20, x20, #1\n"
- "zip1 z17.h, z19.h, z16.h\n"
- "zip2 z16.h, z19.h, z16.h\n"
- "st1h { z20.h }, p1, [x22]\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z20.h, z16.h\n"
+ "zip2 z16.h, z20.h, z16.h\n"
+ "st1h { z19.h }, p1, [x22]\n"
"st1h { z18.h }, p1, [x22, #1, MUL VL]\n"
"st1h { z17.h }, p1, [x22, #2, MUL VL]\n"
"st1h { z16.h }, p1, [x22, #3, MUL VL]\n"
@@ -121,12 +121,12 @@ void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
+ "add x24, x26, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp x21, x20\n"
"mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
@@ -134,19 +134,19 @@ void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t
"8:" // Tail row loop: Unroll column loop
"ld1h { z18.h }, p1/Z, [x26]\n"
"sub x21, x21, x20\n"
+ "ld1h { z17.h }, p1/Z, [x24]\n"
"cmp x21, x20\n"
- "ld1h { z16.h }, p1/Z, [x25]\n"
- "zip1 z17.h, z18.h, z16.h\n"
- "zip2 z19.h, z18.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z20.h }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "ld1h { z16.h }, p1/Z, [x25, #1, MUL VL]\n"
- "st1h { z17.h }, p1, [x22]\n"
- "addvl x25, x25, #2\n"
- "zip1 z17.h, z18.h, z16.h\n"
- "st1h { z19.h }, p1, [x22, #1, MUL VL]\n"
+ "ld1h { z16.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z20.h, z16.h\n"
+ "zip2 z16.h, z20.h, z16.h\n"
+ "st1h { z19.h }, p1, [x22]\n"
+ "st1h { z18.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "zip2 z16.h, z18.h, z16.h\n"
"st1h { z17.h }, p1, [x22]\n"
"st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
@@ -155,13 +155,13 @@ void sme_transpose_interleave_2VL_2x2(uint16_t *out, const uint16_t *in, size_t
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"whilelt p0.h, XZR, x21\n"
- "ld1h { z18.h }, p0/Z, [x26]\n"
"decw x21, ALL, MUL #2\n"
- "ld1h { z16.h }, p0/Z, [x25]\n"
+ "ld1h { z18.h }, p0/Z, [x26]\n"
"cmp x21, #0x0\n"
"addvl x26, x26, #1\n"
+ "ld1h { z16.h }, p0/Z, [x24]\n"
+ "addvl x24, x24, #1\n"
"zip1 z17.h, z18.h, z16.h\n"
- "addvl x25, x25, #1\n"
"zip2 z16.h, z18.h, z16.h\n"
"st1h { z17.h }, p1, [x22]\n"
"st1h { z16.h }, p1, [x22, #1, MUL VL]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
index f22b833821..382d4af314 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_2VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,99 +45,99 @@ void sme_transpose_interleave_2VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"blt 6f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x26, %x[in_stride]\n"
"cnth x20, ALL, MUL #2\n"
- "add x21, x24, %x[in_stride]\n"
- "cmp x23, x20\n"
- "add %x[in], x21, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "cmp x25, x20\n"
+ "add x21, x23, %x[in_stride]\n"
"mov x22, %x[out]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z16.s }, p2/Z, [x26]\n"
+ "ld1w { z19.s }, p2/Z, [x26]\n"
+ "sub x25, x25, x20\n"
+ "ld1w { z18.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "cmp x25, x20\n"
+ "ld1w { z17.s }, p2/Z, [x23]\n"
+ "ld1w { z16.s }, p2/Z, [x23, #1, MUL VL]\n"
+ ".inst 0x658aaa7b // bfcvt z27.h, p2/M, z19.s\n"
+ "ld1w { z19.s }, p2/Z, [x26, #2, MUL VL]\n"
+ ".inst 0x658aaa5a // bfcvt z26.h, p2/M, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x26, #3, MUL VL]\n"
+ ".inst 0x658aaa39 // bfcvt z25.h, p2/M, z17.s\n"
+ "addvl x26, x26, #4\n"
+ "ld1w { z17.s }, p2/Z, [x23, #2, MUL VL]\n"
".inst 0x658aaa18 // bfcvt z24.h, p2/M, z16.s\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658aaa17 // bfcvt z23.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24]\n"
- ".inst 0x658aaa16 // bfcvt z22.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
- ".inst 0x658aaa15 // bfcvt z21.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x23, #3, MUL VL]\n"
+ ".inst 0x658aaa77 // bfcvt z23.h, p2/M, z19.s\n"
+ "addvl x23, x23, #4\n"
+ "ld1w { z19.s }, p2/Z, [x24]\n"
+ ".inst 0x658aaa56 // bfcvt z22.h, p2/M, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x658aaa35 // bfcvt z21.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p2/Z, [x21]\n"
".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, #3, MUL VL]\n"
- ".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
- "addvl x26, x26, #4\n"
- "ld1w { z16.s }, p2/Z, [x24, #2, MUL VL]\n"
- ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x24, #3, MUL VL]\n"
- ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x21, #1, MUL VL]\n"
+ ".inst 0x648aaa7b // bfcvtnt z27.h, p2/M, z19.s\n"
+ "ld1w { z19.s }, p2/Z, [x24, #2, MUL VL]\n"
+ ".inst 0x648aaa5a // bfcvtnt z26.h, p2/M, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, #3, MUL VL]\n"
+ ".inst 0x648aaa39 // bfcvtnt z25.h, p2/M, z17.s\n"
"addvl x24, x24, #4\n"
- "ld1w { z16.s }, p2/Z, [x25]\n"
+ "ld1w { z17.s }, p2/Z, [x21, #2, MUL VL]\n"
".inst 0x648aaa18 // bfcvtnt z24.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
- ".inst 0x648aaa17 // bfcvtnt z23.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21]\n"
- ".inst 0x648aaa16 // bfcvtnt z22.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, #1, MUL VL]\n"
- ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x21, #2, MUL VL]\n"
- ".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
"ld1w { z16.s }, p2/Z, [x21, #3, MUL VL]\n"
- "st1h { z24.h }, p2, [x22]\n"
+ "st1h { z27.h }, p2, [x22]\n"
"addvl x21, x21, #4\n"
- ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
- "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
+ ".inst 0x648aaa77 // bfcvtnt z23.h, p2/M, z19.s\n"
+ "st1h { z26.h }, p2, [x22, #1, MUL VL]\n"
+ ".inst 0x648aaa56 // bfcvtnt z22.h, p2/M, z18.s\n"
+ "st1h { z25.h }, p2, [x22, #2, MUL VL]\n"
+ ".inst 0x648aaa35 // bfcvtnt z21.h, p2/M, z17.s\n"
+ "st1h { z24.h }, p2, [x22, #3, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z20.h }, p2, [x22]\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #3, MUL VL]\n"
+ ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
+ "st1h { z23.h }, p2, [x22]\n"
+ "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z21.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
+ "mov x20, x25\n"
+ "decw x25, ALL, MUL #2\n"
"whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26]\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
"decw x20\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z16.s }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x24]\n"
- ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
- "decw x23, ALL, MUL #2\n"
- "cmp x23, #0x0\n"
- "ld1w { z16.s }, p0/Z, [x24, #1, MUL VL]\n"
- ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
+ "ld1w { z19.s }, p1/Z, [x26]\n"
+ "cmp x25, #0x0\n"
+ "ld1w { z18.s }, p0/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
+ "ld1w { z17.s }, p1/Z, [x23]\n"
+ "ld1w { z16.s }, p0/Z, [x23, #1, MUL VL]\n"
+ ".inst 0x658aaa77 // bfcvt z23.h, p2/M, z19.s\n"
+ "addvl x23, x23, #2\n"
+ ".inst 0x658aaa56 // bfcvt z22.h, p2/M, z18.s\n"
+ "ld1w { z21.s }, p1/Z, [x24]\n"
+ ".inst 0x658aaa34 // bfcvt z20.h, p2/M, z17.s\n"
+ "ld1w { z19.s }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
- "addvl x25, x25, #2\n"
- ".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x21]\n"
- ".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
+ ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p1/Z, [x21]\n"
"ld1w { z16.s }, p0/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
- ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
- "st1h { z20.h }, p2, [x22]\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #3, MUL VL]\n"
+ ".inst 0x648aaab7 // bfcvtnt z23.h, p2/M, z21.s\n"
+ ".inst 0x648aaa76 // bfcvtnt z22.h, p2/M, z19.s\n"
+ ".inst 0x648aaa34 // bfcvtnt z20.h, p2/M, z17.s\n"
+ ".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
+ "st1h { z23.h }, p2, [x22]\n"
+ "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z20.h }, p2, [x22, #2, MUL VL]\n"
+ "st1h { z18.h }, p2, [x22, #3, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
@@ -148,65 +148,65 @@ void sme_transpose_interleave_2VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
+ "add x24, x26, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #2\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"cmp x21, x20\n"
"mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z16.s }, p2/Z, [x26]\n"
- ".inst 0x658aaa14 // bfcvt z20.h, p2/M, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x26]\n"
"sub x21, x21, x20\n"
+ "ld1w { z18.s }, p2/Z, [x26, #1, MUL VL]\n"
"cmp x21, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658aaa13 // bfcvt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x26, #2, MUL VL]\n"
- ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x26, #2, MUL VL]\n"
"ld1w { z16.s }, p2/Z, [x26, #3, MUL VL]\n"
- ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
+ ".inst 0x658aaa77 // bfcvt z23.h, p2/M, z19.s\n"
"addvl x26, x26, #4\n"
- "ld1w { z16.s }, p2/Z, [x25]\n"
- ".inst 0x648aaa14 // bfcvtnt z20.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
- ".inst 0x648aaa13 // bfcvtnt z19.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #2, MUL VL]\n"
+ ".inst 0x658aaa56 // bfcvt z22.h, p2/M, z18.s\n"
+ "ld1w { z21.s }, p2/Z, [x24]\n"
+ "ld1w { z20.s }, p2/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x658aaa33 // bfcvt z19.h, p2/M, z17.s\n"
+ ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #3, MUL VL]\n"
+ ".inst 0x648aaab7 // bfcvtnt z23.h, p2/M, z21.s\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0x648aaa96 // bfcvtnt z22.h, p2/M, z20.s\n"
+ ".inst 0x648aaa33 // bfcvtnt z19.h, p2/M, z17.s\n"
".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #3, MUL VL]\n"
- "st1h { z20.h }, p2, [x22]\n"
- "addvl x25, x25, #4\n"
- ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z23.h }, p2, [x22]\n"
+ "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
- "st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z19.h }, p2, [x22]\n"
+ "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
+ "decw x21, ALL, MUL #2\n"
"whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26]\n"
- ".inst 0x658aaa12 // bfcvt z18.h, p2/M, z16.s\n"
"decw x20\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z16.s }, p0/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25]\n"
- "decw x21, ALL, MUL #2\n"
+ "ld1w { z17.s }, p1/Z, [x26]\n"
"cmp x21, #0x0\n"
- ".inst 0x648aaa12 // bfcvtnt z18.h, p2/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "addvl x25, x25, #2\n"
- ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
+ "ld1w { z19.s }, p1/Z, [x24]\n"
+ ".inst 0x658aaa32 // bfcvt z18.h, p2/M, z17.s\n"
+ "ld1w { z17.s }, p0/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ ".inst 0x648aaa72 // bfcvtnt z18.h, p2/M, z19.s\n"
+ ".inst 0x648aaa30 // bfcvtnt z16.h, p2/M, z17.s\n"
"st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
"add x22, x22, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
index 14636e3218..8d3aa59d13 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -38,35 +38,35 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
"ptrue p4.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
+ "add x25, x27, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "mov x23, %x[width]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p3.h, XZR, x20\n"
- "ld1h { z31.h }, p3/Z, [x26]\n"
- "dech x20\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z30.h }, p2/Z, [x26, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z29.h }, p1/Z, [x26, #2, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z28.h }, p0/Z, [x26, #3, MUL VL]\n"
- "mov x20, x22\n"
- "dech x21, ALL, MUL #4\n"
+ "mov x21, x23\n"
+ "mov x20, x26\n"
+ "whilelt p3.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p2.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z31.h }, p3/Z, [x27]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z30.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x23, ALL, MUL #4\n"
+ "ld1h { z29.h }, p1/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z28.h }, p0/Z, [x27, #3, MUL VL]\n"
+ "cmp x23, #0x0\n"
+ "addvl x27, x27, #4\n"
"ld1h { z27.h }, p3/Z, [x25]\n"
+ "add x26, x26, %x[out_stride]\n"
"ld1h { z26.h }, p2/Z, [x25, #1, MUL VL]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #4\n"
"ld1h { z25.h }, p1/Z, [x25, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
"ld1h { z24.h }, p0/Z, [x25, #3, MUL VL]\n"
"addvl x25, x25, #4\n"
"ld1h { z23.h }, p3/Z, [x24]\n"
@@ -74,12 +74,12 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
"ld1h { z21.h }, p1/Z, [x24, #2, MUL VL]\n"
"ld1h { z20.h }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "ld1h { z19.h }, p3/Z, [x23]\n"
- "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z19.h }, p3/Z, [x22]\n"
+ "ld1h { z18.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x22, #3, MUL VL]\n"
"st1h { z31.h }, p4, [x20]\n"
- "addvl x23, x23, #4\n"
+ "addvl x22, x22, #4\n"
"st1h { z30.h }, p4, [x20, #1, MUL VL]\n"
"st1h { z29.h }, p4, [x20, #2, MUL VL]\n"
"st1h { z28.h }, p4, [x20, #3, MUL VL]\n"
@@ -104,32 +104,32 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
+ "add %x[in], x27, %x[in_stride]\n"
"sub %x[height], %x[height], #0x1\n"
"mov x21, %x[width]\n"
"6:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z19.h }, p0/Z, [x26]\n"
+ "dech x21, ALL, MUL #4\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
"dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x26, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x27]\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
- "dech x21, ALL, MUL #4\n"
+ "ld1h { z18.h }, p0/Z, [x27, #1, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
"cmp x21, #0x0\n"
- "ld1h { z16.h }, p0/Z, [x26, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22]\n"
- "addvl x26, x26, #4\n"
- "st1h { z18.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z17.h }, p1/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
+ "st1h { z19.h }, p4, [x26]\n"
+ "st1h { z18.h }, p4, [x26, #1, MUL VL]\n"
+ "st1h { z17.h }, p4, [x26, #2, MUL VL]\n"
+ "st1h { z16.h }, p4, [x26, #3, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -139,7 +139,7 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
".inst 0xd503467f // SMSTOP\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
@@ -147,6 +147,19 @@ void sme_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
template<>
void Transform<4, 1, true, VLType::SME>(
+ double *out, const double *in, int stride, int x0, int xmax, int k0, int kmax)
+{
+ sme_transpose_interleave_4VL(
+ reinterpret_cast<uint16_t *>(out),
+ reinterpret_cast<const uint16_t *>(in + k0 * stride + x0),
+ (xmax-x0) * sizeof(double) / 2,
+ stride * sizeof(double),
+ (kmax-k0)
+ );
+}
+
+template<>
+void Transform<4, 1, true, VLType::SME>(
float *out, const float *in, int stride, int x0, int xmax, int k0, int kmax)
{
sme_transpose_interleave_4VL(
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
index 2d46a481f3..c7d4882b42 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,43 +43,43 @@ void sme_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"ptrue p1.b\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
- "add x22, x23, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x23, x23, %x[pad_row], GE\n"
+ "add x24, x25, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "add x22, x24, %x[in_stride]\n"
+ "mov x21, %x[width]\n"
+ "add x20, x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GE\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "mov x21, %x[out]\n"
- "csel x24, x24, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x20, %x[width]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"2:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x20\n"
- "ld1b { z17.b }, p0/Z, [x25]\n"
- "decw x20, ALL, MUL #4\n"
- "ld1b { z19.b }, p0/Z, [x24]\n"
- "cmp x20, #0x0\n"
+ "whilelt p0.b, XZR, x21\n"
+ "decw x21, ALL, MUL #4\n"
+ "ld1b { z20.b }, p0/Z, [x25]\n"
+ "cmp x21, #0x0\n"
"addvl x25, x25, #1\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z18.b, z17.b, z16.b\n"
- "zip2 z20.b, z17.b, z16.b\n"
+ "ld1b { z19.b }, p0/Z, [x24]\n"
"addvl x24, x24, #1\n"
- "ld1b { z16.b }, p0/Z, [x22]\n"
- "zip1 z17.b, z19.b, z16.b\n"
- "zip2 z19.b, z19.b, z16.b\n"
- "addvl x23, x23, #1\n"
+ "ld1b { z17.b }, p0/Z, [x22]\n"
"addvl x22, x22, #1\n"
- "zip1 z16.b, z18.b, z17.b\n"
+ "ld1b { z16.b }, p0/Z, [x20]\n"
+ "addvl x20, x20, #1\n"
+ "zip1 z18.b, z20.b, z17.b\n"
+ "zip2 z20.b, z20.b, z17.b\n"
+ "zip1 z17.b, z19.b, z16.b\n"
+ "zip2 z16.b, z19.b, z16.b\n"
+ "zip1 z19.b, z18.b, z17.b\n"
"zip2 z18.b, z18.b, z17.b\n"
- "st1b { z16.b }, p1, [x21]\n"
- "zip1 z17.b, z20.b, z19.b\n"
- "zip2 z16.b, z20.b, z19.b\n"
- "st1b { z18.b }, p1, [x21, #1, MUL VL]\n"
- "st1b { z17.b }, p1, [x21, #2, MUL VL]\n"
- "st1b { z16.b }, p1, [x21, #3, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 z17.b, z20.b, z16.b\n"
+ "zip2 z16.b, z20.b, z16.b\n"
+ "st1b { z19.b }, p1, [x23]\n"
+ "st1b { z18.b }, p1, [x23, #1, MUL VL]\n"
+ "st1b { z17.b }, p1, [x23, #2, MUL VL]\n"
+ "st1b { z16.b }, p1, [x23, #3, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
index 002a12479a..f070d7d322 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,49 +45,49 @@ void sme_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
"blt 4f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[out]\n"
+ "add x24, x26, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "add x21, x23, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
+ "mov x20, x22\n"
+ "decw x22, ALL, MUL #4\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z19.h }, p1/Z, [x26]\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x25]\n"
- "decw x21, ALL, MUL #4\n"
- "cmp x21, #0x0\n"
- "zip1 z24.h, z19.h, z17.h\n"
- "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z20.h }, p1/Z, [x26]\n"
+ "cmp x22, #0x0\n"
+ "ld1h { z19.h }, p0/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "addvl x25, x25, #2\n"
- "zip2 z23.h, z19.h, z17.h\n"
"ld1h { z17.h }, p1/Z, [x24]\n"
- "zip1 z22.h, z18.h, z16.h\n"
- "zip2 z21.h, z18.h, z16.h\n"
- "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #2\n"
- "ld1h { z16.h }, p1/Z, [x23]\n"
- "zip1 z19.h, z17.h, z16.h\n"
- "zip2 z18.h, z17.h, z16.h\n"
- "ld1h { z16.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x23]\n"
+ "ld1h { z24.h }, p0/Z, [x23, #1, MUL VL]\n"
"addvl x23, x23, #2\n"
- "zip1 z17.h, z20.h, z16.h\n"
- "zip2 z16.h, z20.h, z16.h\n"
- "st1h { z24.h }, p2, [x22]\n"
- "st1h { z23.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z23.h, z20.h, z17.h\n"
+ "zip2 z22.h, z20.h, z17.h\n"
+ "ld1h { z17.h }, p1/Z, [x21]\n"
+ "zip1 z21.h, z19.h, z16.h\n"
+ "zip2 z20.h, z19.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x21, #1, MUL VL]\n"
+ "addvl x21, x21, #2\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "st1h { z23.h }, p2, [x25]\n"
+ "zip1 z17.h, z24.h, z16.h\n"
+ "zip2 z16.h, z24.h, z16.h\n"
+ "st1h { z22.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z21.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x25, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x25, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x25, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x25, #7, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -97,35 +97,35 @@ void sme_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
"4:" // Main loop skip
"5:" // Tail row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add x24, x26, %x[in_stride]\n"
+ "mov x25, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
"mov x21, %x[width]\n"
"6:" // Tail row loop: Column loop
"mov x20, x21\n"
+ "decw x21, ALL, MUL #4\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z18.h }, p1/Z, [x26]\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x25]\n"
- "decw x21, ALL, MUL #4\n"
+ "ld1h { z18.h }, p1/Z, [x26]\n"
"cmp x21, #0x0\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "ld1h { z16.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "addvl x25, x25, #2\n"
+ "ld1h { z17.h }, p1/Z, [x24]\n"
+ "ld1h { z16.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z19.h, z18.h, z17.h\n"
"zip2 z18.h, z18.h, z17.h\n"
"zip1 z17.h, z20.h, z16.h\n"
"zip2 z16.h, z20.h, z16.h\n"
- "st1h { z19.h }, p2, [x22]\n"
- "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z19.h }, p2, [x25]\n"
+ "st1h { z18.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x25, #3, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
index 2a43f34f71..44305f0513 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_4VL_2x2_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,69 +45,69 @@ void sme_transpose_interleave_4VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"blt 4f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[out]\n"
+ "add x24, x26, %x[in_stride]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "add x21, x23, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
+ "mov x20, x22\n"
+ "decw x22, ALL, MUL #4\n"
"whilelt p3.s, XZR, x20\n"
- "ld1w { z16.s }, p3/Z, [x26]\n"
- ".inst 0x658ab218 // bfcvt z24.h, p4/M, z16.s\n"
"decw x20\n"
"whilelt p2.s, XZR, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658ab217 // bfcvt z23.h, p4/M, z16.s\n"
"decw x20\n"
+ "ld1w { z19.s }, p3/Z, [x26]\n"
"whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
- ".inst 0x658ab216 // bfcvt z22.h, p4/M, z16.s\n"
"decw x20\n"
+ "ld1w { z18.s }, p2/Z, [x26, #1, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
+ "ld1w { z17.s }, p1/Z, [x26, #2, MUL VL]\n"
+ "cmp x22, #0x0\n"
"ld1w { z16.s }, p0/Z, [x26, #3, MUL VL]\n"
- ".inst 0x658ab215 // bfcvt z21.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- ".inst 0x658ab214 // bfcvt z20.h, p4/M, z16.s\n"
- "decw x21, ALL, MUL #4\n"
- "cmp x21, #0x0\n"
- "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
- ".inst 0x658ab213 // bfcvt z19.h, p4/M, z16.s\n"
+ ".inst 0x658ab27b // bfcvt z27.h, p4/M, z19.s\n"
"addvl x26, x26, #4\n"
- "ld1w { z16.s }, p1/Z, [x24, #2, MUL VL]\n"
- ".inst 0x658ab212 // bfcvt z18.h, p4/M, z16.s\n"
+ "ld1w { z19.s }, p3/Z, [x23]\n"
+ ".inst 0x658ab25a // bfcvt z26.h, p4/M, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x23, #1, MUL VL]\n"
+ ".inst 0x658ab239 // bfcvt z25.h, p4/M, z17.s\n"
+ "ld1w { z17.s }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0x658ab218 // bfcvt z24.h, p4/M, z16.s\n"
+ "ld1w { z16.s }, p0/Z, [x23, #3, MUL VL]\n"
+ ".inst 0x658ab277 // bfcvt z23.h, p4/M, z19.s\n"
+ "addvl x23, x23, #4\n"
+ ".inst 0x658ab256 // bfcvt z22.h, p4/M, z18.s\n"
+ "ld1w { z19.s }, p3/Z, [x24]\n"
+ ".inst 0x658ab235 // bfcvt z21.h, p4/M, z17.s\n"
+ "ld1w { z18.s }, p2/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x658ab214 // bfcvt z20.h, p4/M, z16.s\n"
+ "ld1w { z17.s }, p1/Z, [x24, #2, MUL VL]\n"
"ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
- ".inst 0x658ab211 // bfcvt z17.h, p4/M, z16.s\n"
"addvl x24, x24, #4\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
+ ".inst 0x648ab27b // bfcvtnt z27.h, p4/M, z19.s\n"
+ "ld1w { z19.s }, p3/Z, [x21]\n"
+ ".inst 0x648ab25a // bfcvtnt z26.h, p4/M, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x21, #1, MUL VL]\n"
+ ".inst 0x648ab239 // bfcvtnt z25.h, p4/M, z17.s\n"
+ "ld1w { z17.s }, p1/Z, [x21, #2, MUL VL]\n"
".inst 0x648ab218 // bfcvtnt z24.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
- ".inst 0x648ab217 // bfcvtnt z23.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
- ".inst 0x648ab216 // bfcvtnt z22.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0x648ab215 // bfcvtnt z21.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
+ "ld1w { z16.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "addvl x21, x21, #4\n"
+ ".inst 0x648ab277 // bfcvtnt z23.h, p4/M, z19.s\n"
+ "st1h { z27.h }, p4, [x25]\n"
+ ".inst 0x648ab256 // bfcvtnt z22.h, p4/M, z18.s\n"
+ "st1h { z26.h }, p4, [x25, #1, MUL VL]\n"
+ ".inst 0x648ab235 // bfcvtnt z21.h, p4/M, z17.s\n"
+ "st1h { z25.h }, p4, [x25, #2, MUL VL]\n"
".inst 0x648ab214 // bfcvtnt z20.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x23, #1, MUL VL]\n"
- ".inst 0x648ab213 // bfcvtnt z19.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x23, #2, MUL VL]\n"
- ".inst 0x648ab212 // bfcvtnt z18.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x23, #3, MUL VL]\n"
- "addvl x23, x23, #4\n"
- ".inst 0x648ab211 // bfcvtnt z17.h, p4/M, z16.s\n"
- "st1h { z24.h }, p4, [x22]\n"
- "st1h { z23.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z22.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z21.h }, p4, [x22, #3, MUL VL]\n"
- "st1h { z20.h }, p4, [x22, #4, MUL VL]\n"
- "st1h { z19.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z24.h }, p4, [x25, #3, MUL VL]\n"
+ "st1h { z23.h }, p4, [x25, #4, MUL VL]\n"
+ "st1h { z22.h }, p4, [x25, #5, MUL VL]\n"
+ "st1h { z21.h }, p4, [x25, #6, MUL VL]\n"
+ "st1h { z20.h }, p4, [x25, #7, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -117,47 +117,47 @@ void sme_transpose_interleave_4VL_2x2_fp32bf16(bfloat16 *out, const float *in, s
"4:" // Main loop skip
"5:" // Tail row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x25, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "csel x25, x25, %x[pad_row], GT\n"
+ "add x24, x26, %x[in_stride]\n"
+ "mov x25, %x[out]\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
"mov x21, %x[width]\n"
"6:" // Tail row loop: Column loop
"mov x20, x21\n"
+ "decw x21, ALL, MUL #4\n"
"whilelt p3.s, XZR, x20\n"
- "ld1w { z16.s }, p3/Z, [x26]\n"
- ".inst 0x658ab214 // bfcvt z20.h, p4/M, z16.s\n"
"decw x20\n"
"whilelt p2.s, XZR, x20\n"
- "ld1w { z16.s }, p2/Z, [x26, #1, MUL VL]\n"
- ".inst 0x658ab213 // bfcvt z19.h, p4/M, z16.s\n"
"decw x20\n"
+ "ld1w { z19.s }, p3/Z, [x26]\n"
"whilelt p1.s, XZR, x20\n"
- "ld1w { z16.s }, p1/Z, [x26, #2, MUL VL]\n"
- ".inst 0x658ab212 // bfcvt z18.h, p4/M, z16.s\n"
"decw x20\n"
+ "ld1w { z18.s }, p2/Z, [x26, #1, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z16.s }, p0/Z, [x26, #3, MUL VL]\n"
- ".inst 0x658ab211 // bfcvt z17.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
- "decw x21, ALL, MUL #4\n"
+ "ld1w { z17.s }, p1/Z, [x26, #2, MUL VL]\n"
"cmp x21, #0x0\n"
- ".inst 0x648ab214 // bfcvtnt z20.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x26, #3, MUL VL]\n"
+ ".inst 0x658ab277 // bfcvt z23.h, p4/M, z19.s\n"
"addvl x26, x26, #4\n"
- ".inst 0x648ab213 // bfcvtnt z19.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p1/Z, [x25, #2, MUL VL]\n"
+ ".inst 0x658ab256 // bfcvt z22.h, p4/M, z18.s\n"
+ "ld1w { z21.s }, p3/Z, [x24]\n"
+ ".inst 0x658ab234 // bfcvt z20.h, p4/M, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x24, #1, MUL VL]\n"
+ ".inst 0x658ab212 // bfcvt z18.h, p4/M, z16.s\n"
+ "ld1w { z17.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0x648ab2b7 // bfcvtnt z23.h, p4/M, z21.s\n"
+ ".inst 0x648ab276 // bfcvtnt z22.h, p4/M, z19.s\n"
+ ".inst 0x648ab234 // bfcvtnt z20.h, p4/M, z17.s\n"
".inst 0x648ab212 // bfcvtnt z18.h, p4/M, z16.s\n"
- "ld1w { z16.s }, p0/Z, [x25, #3, MUL VL]\n"
- "addvl x25, x25, #4\n"
- ".inst 0x648ab211 // bfcvtnt z17.h, p4/M, z16.s\n"
- "st1h { z20.h }, p4, [x22]\n"
- "st1h { z19.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z23.h }, p4, [x25]\n"
+ "st1h { z22.h }, p4, [x25, #1, MUL VL]\n"
+ "st1h { z20.h }, p4, [x25, #2, MUL VL]\n"
+ "st1h { z18.h }, p4, [x25, #3, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL.hpp
index be9ad666a9..c49c5ba433 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,51 +39,51 @@ void sme_transpose_interleave_8VL(uint16_t *out, const uint16_t *in, size_t widt
"blt 4f\n"
"1:" // Main row loop: Head
"mov x25, %x[in]\n"
- "add x24, x25, %x[in_stride]\n"
- "add %x[in], x24, %x[in_stride]\n"
- "mov x23, %x[out]\n"
+ "mov x24, %x[out]\n"
+ "add x23, x25, %x[in_stride]\n"
"sub %x[height], %x[height], #0x2\n"
+ "add %x[in], x23, %x[in_stride]\n"
"mov x22, %x[width]\n"
"2:" // Main row loop: Column loop
"mov x21, x22\n"
+ "mov x20, x24\n"
"whilelt p0.h, XZR, x21\n"
- "ld1h { z31.h }, p0/Z, [x25]\n"
"dech x21\n"
"whilelt p6.h, XZR, x21\n"
- "ld1h { z30.h }, p6/Z, [x25, #1, MUL VL]\n"
"dech x21\n"
+ "ld1h { z31.h }, p0/Z, [x25]\n"
"whilelt p5.h, XZR, x21\n"
- "ld1h { z29.h }, p5/Z, [x25, #2, MUL VL]\n"
"dech x21\n"
+ "ld1h { z30.h }, p6/Z, [x25, #1, MUL VL]\n"
"whilelt p4.h, XZR, x21\n"
- "ld1h { z28.h }, p4/Z, [x25, #3, MUL VL]\n"
"dech x21\n"
+ "ld1h { z29.h }, p5/Z, [x25, #2, MUL VL]\n"
"whilelt p3.h, XZR, x21\n"
- "ld1h { z27.h }, p3/Z, [x25, #4, MUL VL]\n"
"dech x21\n"
+ "ld1h { z28.h }, p4/Z, [x25, #3, MUL VL]\n"
"whilelt p2.h, XZR, x21\n"
- "ld1h { z26.h }, p2/Z, [x25, #5, MUL VL]\n"
"dech x21\n"
+ "ld1h { z27.h }, p3/Z, [x25, #4, MUL VL]\n"
"whilelt p1.h, XZR, x21\n"
- "ld1h { z25.h }, p1/Z, [x25, #6, MUL VL]\n"
"dech x21\n"
- "mov x20, x23\n"
- "ld1h { z24.h }, p0/Z, [x24]\n"
- "whilelt p0.h, XZR, x21\n"
+ "ld1h { z26.h }, p2/Z, [x25, #5, MUL VL]\n"
"dech x22, ALL, MUL #8\n"
- "ld1h { z23.h }, p0/Z, [x25, #7, MUL VL]\n"
- "ld1h { z22.h }, p6/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z25.h }, p1/Z, [x25, #6, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "ld1h { z24.h }, p0/Z, [x23]\n"
+ "whilelt p0.h, XZR, x21\n"
"cmp x22, #0x0\n"
+ "ld1h { z23.h }, p0/Z, [x25, #7, MUL VL]\n"
"addvl x25, x25, #8\n"
- "ld1h { z21.h }, p5/Z, [x24, #2, MUL VL]\n"
- "add x23, x23, %x[out_stride]\n"
- "ld1h { z20.h }, p4/Z, [x24, #3, MUL VL]\n"
- "ld1h { z19.h }, p3/Z, [x24, #4, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x24, #5, MUL VL]\n"
- "ld1h { z17.h }, p1/Z, [x24, #6, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x24, #7, MUL VL]\n"
+ "ld1h { z22.h }, p6/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z21.h }, p5/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z20.h }, p4/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z19.h }, p3/Z, [x23, #4, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #5, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x23, #6, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x23, #7, MUL VL]\n"
"st1h { z31.h }, p7, [x20]\n"
- "addvl x24, x24, #8\n"
+ "addvl x23, x23, #8\n"
"st1h { z30.h }, p7, [x20, #1, MUL VL]\n"
"st1h { z29.h }, p7, [x20, #2, MUL VL]\n"
"st1h { z28.h }, p7, [x20, #3, MUL VL]\n"
@@ -109,47 +109,47 @@ void sme_transpose_interleave_8VL(uint16_t *out, const uint16_t *in, size_t widt
"4:" // Main loop skip
"5:" // Tail row loop: Head
"mov x25, %x[in]\n"
+ "mov x24, %x[out]\n"
"add %x[in], x25, %x[in_stride]\n"
- "mov x23, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
"mov x21, %x[width]\n"
"6:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z23.h }, p0/Z, [x25]\n"
+ "dech x21, ALL, MUL #8\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z22.h }, p0/Z, [x25, #1, MUL VL]\n"
"dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z21.h }, p0/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z23.h }, p1/Z, [x25]\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
+ "ld1h { z22.h }, p0/Z, [x25, #1, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x25, #3, MUL VL]\n"
"dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z19.h }, p0/Z, [x25, #4, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x25, #2, MUL VL]\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
+ "ld1h { z20.h }, p0/Z, [x25, #3, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x25, #5, MUL VL]\n"
"dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x25, #6, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x25, #4, MUL VL]\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
- "dech x21, ALL, MUL #8\n"
+ "ld1h { z18.h }, p0/Z, [x25, #5, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
"cmp x21, #0x0\n"
+ "ld1h { z17.h }, p1/Z, [x25, #6, MUL VL]\n"
"ld1h { z16.h }, p0/Z, [x25, #7, MUL VL]\n"
- "st1h { z23.h }, p7, [x23]\n"
"addvl x25, x25, #8\n"
- "st1h { z22.h }, p7, [x23, #1, MUL VL]\n"
- "st1h { z21.h }, p7, [x23, #2, MUL VL]\n"
- "st1h { z20.h }, p7, [x23, #3, MUL VL]\n"
- "st1h { z19.h }, p7, [x23, #4, MUL VL]\n"
- "st1h { z18.h }, p7, [x23, #5, MUL VL]\n"
- "st1h { z17.h }, p7, [x23, #6, MUL VL]\n"
- "st1h { z16.h }, p7, [x23, #7, MUL VL]\n"
- "add x23, x23, %x[out_stride]\n"
+ "st1h { z23.h }, p7, [x24]\n"
+ "st1h { z22.h }, p7, [x24, #1, MUL VL]\n"
+ "st1h { z21.h }, p7, [x24, #2, MUL VL]\n"
+ "st1h { z20.h }, p7, [x24, #3, MUL VL]\n"
+ "st1h { z19.h }, p7, [x24, #4, MUL VL]\n"
+ "st1h { z18.h }, p7, [x24, #5, MUL VL]\n"
+ "st1h { z17.h }, p7, [x24, #6, MUL VL]\n"
+ "st1h { z16.h }, p7, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_1x4.hpp
index 45d2e24258..30a9dc3e9c 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,62 +43,62 @@ void sme_transpose_interleave_8VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"ptrue p2.b\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add x25, x26, %x[in_stride]\n"
+ "mov x24, %x[out]\n"
+ "add x23, x25, %x[in_stride]\n"
+ "mov x22, %x[width]\n"
+ "add x21, x23, %x[in_stride]\n"
+ "csel x23, x23, %x[pad_row], GE\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
"cmp %x[height], #0x1\n"
- "mov x22, %x[out]\n"
- "csel x25, x25, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "csel x25, x25, %x[pad_row], GT\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
+ "mov x20, x22\n"
+ "decw x22, ALL, MUL #8\n"
"whilelt p1.b, XZR, x20\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
"decb x20\n"
"whilelt p0.b, XZR, x20\n"
- "ld1b { z17.b }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
- "decw x21, ALL, MUL #8\n"
- "cmp x21, #0x0\n"
- "ld1b { z21.b }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x26]\n"
+ "cmp x22, #0x0\n"
+ "ld1b { z24.b }, p0/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
+ "ld1b { z23.b }, p1/Z, [x25]\n"
+ "ld1b { z22.b }, p0/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "ld1b { z16.b }, p1/Z, [x24]\n"
- "zip1 z24.b, z19.b, z16.b\n"
- "zip2 z20.b, z19.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x24, #1, MUL VL]\n"
- "zip1 z23.b, z17.b, z16.b\n"
- "zip2 z22.b, z17.b, z16.b\n"
- "addvl x24, x24, #2\n"
- "ld1b { z16.b }, p1/Z, [x23]\n"
- "zip1 z17.b, z18.b, z16.b\n"
- "zip2 z19.b, z18.b, z16.b\n"
- "ld1b { z16.b }, p0/Z, [x23, #1, MUL VL]\n"
- "zip1 z18.b, z21.b, z16.b\n"
- "zip2 z21.b, z21.b, z16.b\n"
+ "ld1b { z19.b }, p1/Z, [x23]\n"
+ "ld1b { z18.b }, p0/Z, [x23, #1, MUL VL]\n"
"addvl x23, x23, #2\n"
- "zip1 z16.b, z24.b, z17.b\n"
- "zip2 z17.b, z24.b, z17.b\n"
- "st1b { z16.b }, p2, [x22]\n"
- "zip1 z16.b, z20.b, z19.b\n"
+ "ld1b { z17.b }, p1/Z, [x21]\n"
+ "ld1b { z16.b }, p0/Z, [x21, #1, MUL VL]\n"
+ "zip1 z21.b, z20.b, z19.b\n"
"zip2 z20.b, z20.b, z19.b\n"
- "st1b { z17.b }, p2, [x22, #1, MUL VL]\n"
- "zip1 z19.b, z23.b, z18.b\n"
- "zip2 z18.b, z23.b, z18.b\n"
- "st1b { z16.b }, p2, [x22, #2, MUL VL]\n"
- "zip1 z17.b, z22.b, z21.b\n"
- "zip2 z16.b, z22.b, z21.b\n"
- "st1b { z20.b }, p2, [x22, #3, MUL VL]\n"
- "st1b { z19.b }, p2, [x22, #4, MUL VL]\n"
- "st1b { z18.b }, p2, [x22, #5, MUL VL]\n"
- "st1b { z17.b }, p2, [x22, #6, MUL VL]\n"
- "st1b { z16.b }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x21, x21, #2\n"
+ "zip1 z25.b, z24.b, z18.b\n"
+ "zip2 z24.b, z24.b, z18.b\n"
+ "zip1 z19.b, z23.b, z17.b\n"
+ "zip2 z18.b, z23.b, z17.b\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "zip2 z16.b, z22.b, z16.b\n"
+ "zip1 z23.b, z21.b, z19.b\n"
+ "zip2 z22.b, z21.b, z19.b\n"
+ "zip1 z21.b, z20.b, z18.b\n"
+ "zip2 z20.b, z20.b, z18.b\n"
+ "zip1 z19.b, z25.b, z17.b\n"
+ "zip2 z18.b, z25.b, z17.b\n"
+ "zip1 z17.b, z24.b, z16.b\n"
+ "zip2 z16.b, z24.b, z16.b\n"
+ "st1b { z23.b }, p2, [x24]\n"
+ "st1b { z22.b }, p2, [x24, #1, MUL VL]\n"
+ "st1b { z21.b }, p2, [x24, #2, MUL VL]\n"
+ "st1b { z20.b }, p2, [x24, #3, MUL VL]\n"
+ "st1b { z19.b }, p2, [x24, #4, MUL VL]\n"
+ "st1b { z18.b }, p2, [x24, #5, MUL VL]\n"
+ "st1b { z17.b }, p2, [x24, #6, MUL VL]\n"
+ "st1b { z16.b }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_2x2.hpp
index ec7c415e27..75bc57a649 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sme_transpose_interleave_8VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023 Arm Limited.
+ * Copyright (c) 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -43,40 +43,40 @@ void sme_transpose_interleave_8VL_2x2(uint16_t *out, const uint16_t *in, size_t
"ptrue p4.b\n"
"1:" // Main row loop: Head
"mov x24, %x[in]\n"
- "add x23, x24, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x23, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
"mov x22, %x[out]\n"
+ "add %x[in], x23, %x[in_stride]\n"
"csel x23, x23, %x[pad_row], GT\n"
"sub %x[height], %x[height], #0x2\n"
"mov x21, %x[width]\n"
"2:" // Main row loop: Column loop
"mov x20, x21\n"
+ "decw x21, ALL, MUL #8\n"
"whilelt p3.h, XZR, x20\n"
- "ld1h { z20.h }, p3/Z, [x24]\n"
"dech x20\n"
"whilelt p2.h, XZR, x20\n"
- "ld1h { z19.h }, p2/Z, [x24, #1, MUL VL]\n"
"dech x20\n"
+ "ld1h { z21.h }, p3/Z, [x24]\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z18.h }, p1/Z, [x24, #2, MUL VL]\n"
"dech x20\n"
+ "ld1h { z20.h }, p2/Z, [x24, #1, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z24.h }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1h { z17.h }, p3/Z, [x23]\n"
- "decw x21, ALL, MUL #8\n"
+ "ld1h { z25.h }, p1/Z, [x24, #2, MUL VL]\n"
"cmp x21, #0x0\n"
- "zip1 z23.h, z20.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z24.h }, p0/Z, [x24, #3, MUL VL]\n"
"addvl x24, x24, #4\n"
- "zip2 z22.h, z20.h, z17.h\n"
- "zip1 z21.h, z19.h, z16.h\n"
+ "ld1h { z19.h }, p3/Z, [x23]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
"ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
- "zip2 z20.h, z19.h, z16.h\n"
- "zip1 z19.h, z18.h, z17.h\n"
"ld1h { z16.h }, p0/Z, [x23, #3, MUL VL]\n"
"addvl x23, x23, #4\n"
- "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z23.h, z21.h, z19.h\n"
+ "zip2 z22.h, z21.h, z19.h\n"
+ "zip1 z21.h, z20.h, z18.h\n"
+ "zip2 z20.h, z20.h, z18.h\n"
+ "zip1 z19.h, z25.h, z17.h\n"
+ "zip2 z18.h, z25.h, z17.h\n"
"zip1 z17.h, z24.h, z16.h\n"
"zip2 z16.h, z24.h, z16.h\n"
"st1h { z23.h }, p4, [x22]\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp
index f627fe575f..86dcddd07b 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_12VL_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,314 +39,314 @@ void sve_transpose_interleave_12VL_2x4_fp32bf16(bfloat16 *out, const float *in,
size_t out_stride = 12 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "ptrue p6.b\n"
+ "ptrue p2.b\n"
"1:" // Main row loop: Head
"mov x28, %x[in]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cnth x24, ALL, MUL #6\n"
- "add x23, x26, %x[in_stride]\n"
+ "mov x27, %x[width]\n"
+ "cnth x26, ALL, MUL #6\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x26, x26, %x[pad_row], GE\n"
+ "mov x25, %x[out]\n"
+ "add x24, x28, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "cmp x25, x24\n"
- "mov x22, %x[out]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x27, x26\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z22.s }, p6/Z, [x28]\n"
- "ld1w { z7.s }, p6/Z, [x28, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z19.s }, p6/Z, [x28, #2, MUL VL]\n"
- "ld1w { z18.s }, p6/Z, [x28, #3, MUL VL]\n"
- "mov x20, x22\n"
- "sub x25, x25, x24\n"
- "ld1w { z5.s }, p6/Z, [x28, #4, MUL VL]\n"
- "ld1w { z25.s }, p6/Z, [x28, #5, MUL VL]\n"
- "cmp x25, x24\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z20.s }, p6/Z, [x28, #6, MUL VL]\n"
- "ld1w { z23.s }, p6/Z, [x28, #7, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x28]\n"
+ "ld1w { z22.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "mov x21, x25\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1w { z30.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x28, #3, MUL VL]\n"
+ "mov x20, x25\n"
+ "sub x27, x27, x26\n"
+ "ld1w { z23.s }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x28, #5, MUL VL]\n"
+ "cmp x27, x26\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1w { z17.s }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1w { z0.s }, p2/Z, [x28, #7, MUL VL]\n"
"addvl x28, x28, #12\n"
- "ld1w { z4.s }, p6/Z, [x26]\n"
- "ld1w { z10.s }, p6/Z, [x26, #1, MUL VL]\n"
- "zip1 z14.s, z22.s, z4.s\n"
- "zip2 z22.s, z22.s, z4.s\n"
- "ld1w { z28.s }, p6/Z, [x26, #2, MUL VL]\n"
- "ld1w { z27.s }, p6/Z, [x26, #3, MUL VL]\n"
- "zip1 z24.s, z7.s, z10.s\n"
- "zip2 z15.s, z7.s, z10.s\n"
- "ld1w { z7.s }, p6/Z, [x26, #4, MUL VL]\n"
- "ld1w { z2.s }, p6/Z, [x26, #5, MUL VL]\n"
- "zip1 z9.s, z19.s, z28.s\n"
- "zip2 z0.s, z19.s, z28.s\n"
- "ld1w { z19.s }, p6/Z, [x26, #6, MUL VL]\n"
- "ld1w { z16.s }, p6/Z, [x26, #7, MUL VL]\n"
- "addvl x26, x26, #12\n"
- "zip1 z1.s, z18.s, z27.s\n"
- "ld1w { z30.s }, p6/Z, [x28, #-4, MUL VL]\n"
- "ld1w { z29.s }, p6/Z, [x28, #-3, MUL VL]\n"
- "zip2 z17.s, z18.s, z27.s\n"
- ".inst 0x658ab9d5 // bfcvt z21.h, p6/M, z14.s\n"
- "ld1w { z31.s }, p6/Z, [x27]\n"
- "ld1w { z8.s }, p6/Z, [x27, #1, MUL VL]\n"
- ".inst 0x658abacc // bfcvt z12.h, p6/M, z22.s\n"
- ".inst 0x658abb0e // bfcvt z14.h, p6/M, z24.s\n"
- "ld1w { z22.s }, p6/Z, [x27, #2, MUL VL]\n"
- "ld1w { z28.s }, p6/Z, [x27, #3, MUL VL]\n"
- ".inst 0x658ab9ea // bfcvt z10.h, p6/M, z15.s\n"
- ".inst 0x658ab92f // bfcvt z15.h, p6/M, z9.s\n"
- "ld1w { z27.s }, p6/Z, [x27, #4, MUL VL]\n"
- "ld1w { z13.s }, p6/Z, [x27, #5, MUL VL]\n"
- ".inst 0x658ab803 // bfcvt z3.h, p6/M, z0.s\n"
- ".inst 0x658ab832 // bfcvt z18.h, p6/M, z1.s\n"
- "ld1w { z26.s }, p6/Z, [x27, #6, MUL VL]\n"
- "ld1w { z9.s }, p6/Z, [x27, #7, MUL VL]\n"
- "addvl x27, x27, #12\n"
- ".inst 0x658aba26 // bfcvt z6.h, p6/M, z17.s\n"
- "ld1w { z1.s }, p6/Z, [x26, #-4, MUL VL]\n"
- "ld1w { z0.s }, p6/Z, [x26, #-3, MUL VL]\n"
- "zip1 z17.s, z5.s, z7.s\n"
- "zip2 z5.s, z5.s, z7.s\n"
- "ld1w { z24.s }, p6/Z, [x23]\n"
- "ld1w { z11.s }, p6/Z, [x23, #1, MUL VL]\n"
- "zip1 z7.s, z31.s, z24.s\n"
- "zip2 z31.s, z31.s, z24.s\n"
- "ld1w { z4.s }, p6/Z, [x23, #2, MUL VL]\n"
- "ld1w { z24.s }, p6/Z, [x23, #3, MUL VL]\n"
- ".inst 0x648ab8f5 // bfcvtnt z21.h, p6/M, z7.s\n"
- "zip1 z7.s, z8.s, z11.s\n"
- "zip2 z11.s, z8.s, z11.s\n"
- "ld1w { z8.s }, p6/Z, [x23, #4, MUL VL]\n"
- ".inst 0x648abbec // bfcvtnt z12.h, p6/M, z31.s\n"
- "ld1w { z31.s }, p6/Z, [x23, #5, MUL VL]\n"
- ".inst 0x648ab8ee // bfcvtnt z14.h, p6/M, z7.s\n"
- "ld1w { z7.s }, p6/Z, [x23, #6, MUL VL]\n"
- ".inst 0x648ab96a // bfcvtnt z10.h, p6/M, z11.s\n"
- "zip1 z11.s, z22.s, z4.s\n"
- "zip2 z4.s, z22.s, z4.s\n"
- "ld1w { z22.s }, p6/Z, [x23, #7, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x23]\n"
+ "ld1w { z14.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z12.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z29.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z31.s }, p2/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x23, #6, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x23, #7, MUL VL]\n"
"addvl x23, x23, #12\n"
- ".inst 0x648ab96f // bfcvtnt z15.h, p6/M, z11.s\n"
- "ld1w { z11.s }, p6/Z, [x28, #-2, MUL VL]\n"
- ".inst 0x648ab883 // bfcvtnt z3.h, p6/M, z4.s\n"
- "zip1 z4.s, z28.s, z24.s\n"
- "zip2 z24.s, z28.s, z24.s\n"
- "ld1w { z28.s }, p6/Z, [x28, #-1, MUL VL]\n"
- ".inst 0x648ab892 // bfcvtnt z18.h, p6/M, z4.s\n"
- "ld1w { z4.s }, p6/Z, [x27, #-4, MUL VL]\n"
- ".inst 0x648abb06 // bfcvtnt z6.h, p6/M, z24.s\n"
- "zip1 z24.s, z25.s, z2.s\n"
- "zip2 z25.s, z25.s, z2.s\n"
- "zip1 z2.s, z20.s, z19.s\n"
- "zip2 z20.s, z20.s, z19.s\n"
- "zip1 z19.s, z23.s, z16.s\n"
- "zip2 z16.s, z23.s, z16.s\n"
- "zip1 z23.s, z30.s, z1.s\n"
- "zip2 z30.s, z30.s, z1.s\n"
- "zip1 z1.s, z29.s, z0.s\n"
- "zip2 z0.s, z29.s, z0.s\n"
- ".inst 0x658aba31 // bfcvt z17.h, p6/M, z17.s\n"
- "zip1 z29.s, z27.s, z8.s\n"
- ".inst 0x658ab8a5 // bfcvt z5.h, p6/M, z5.s\n"
- "zip2 z27.s, z27.s, z8.s\n"
- "ld1w { z8.s }, p6/Z, [x27, #-3, MUL VL]\n"
- ".inst 0x658abb18 // bfcvt z24.h, p6/M, z24.s\n"
- ".inst 0x658abb39 // bfcvt z25.h, p6/M, z25.s\n"
- ".inst 0x658ab842 // bfcvt z2.h, p6/M, z2.s\n"
- ".inst 0x658aba94 // bfcvt z20.h, p6/M, z20.s\n"
- ".inst 0x658aba73 // bfcvt z19.h, p6/M, z19.s\n"
- ".inst 0x658aba10 // bfcvt z16.h, p6/M, z16.s\n"
- ".inst 0x658abaf7 // bfcvt z23.h, p6/M, z23.s\n"
- ".inst 0x658abbde // bfcvt z30.h, p6/M, z30.s\n"
- ".inst 0x658ab821 // bfcvt z1.h, p6/M, z1.s\n"
- ".inst 0x658ab800 // bfcvt z0.h, p6/M, z0.s\n"
- ".inst 0x648abbb1 // bfcvtnt z17.h, p6/M, z29.s\n"
- "ld1w { z29.s }, p6/Z, [x26, #-2, MUL VL]\n"
- ".inst 0x648abb65 // bfcvtnt z5.h, p6/M, z27.s\n"
- "zip1 z27.s, z13.s, z31.s\n"
- "zip2 z31.s, z13.s, z31.s\n"
- "ld1w { z13.s }, p6/Z, [x26, #-1, MUL VL]\n"
- ".inst 0x648abb78 // bfcvtnt z24.h, p6/M, z27.s\n"
- "ld1w { z27.s }, p6/Z, [x23, #-4, MUL VL]\n"
- ".inst 0x648abbf9 // bfcvtnt z25.h, p6/M, z31.s\n"
- "zip1 z31.s, z26.s, z7.s\n"
- "zip2 z26.s, z26.s, z7.s\n"
- "ld1w { z7.s }, p6/Z, [x23, #-3, MUL VL]\n"
- ".inst 0x648abbe2 // bfcvtnt z2.h, p6/M, z31.s\n"
- "ld1w { z31.s }, p6/Z, [x27, #-2, MUL VL]\n"
- ".inst 0x648abb54 // bfcvtnt z20.h, p6/M, z26.s\n"
- "zip1 z26.s, z9.s, z22.s\n"
- "zip2 z9.s, z9.s, z22.s\n"
- "ld1w { z22.s }, p6/Z, [x27, #-1, MUL VL]\n"
- ".inst 0x648abb53 // bfcvtnt z19.h, p6/M, z26.s\n"
- "ld1w { z26.s }, p6/Z, [x23, #-2, MUL VL]\n"
- ".inst 0x648ab930 // bfcvtnt z16.h, p6/M, z9.s\n"
- "ld1w { z9.s }, p6/Z, [x23, #-1, MUL VL]\n"
- "st1h { z21.h }, p6, [x21]\n"
- "zip1 z21.s, z4.s, z27.s\n"
- "zip2 z27.s, z4.s, z27.s\n"
- "zip1 z4.s, z8.s, z7.s\n"
- "zip2 z8.s, z8.s, z7.s\n"
- "st1h { z12.h }, p6, [x21, #1, MUL VL]\n"
- "zip1 z7.s, z11.s, z29.s\n"
- "zip2 z11.s, z11.s, z29.s\n"
- "st1h { z14.h }, p6, [x21, #2, MUL VL]\n"
- "zip1 z29.s, z28.s, z13.s\n"
- "zip2 z12.s, z28.s, z13.s\n"
- "st1h { z10.h }, p6, [x21, #3, MUL VL]\n"
- "st1h { z15.h }, p6, [x21, #4, MUL VL]\n"
- ".inst 0x648abab7 // bfcvtnt z23.h, p6/M, z21.s\n"
- ".inst 0x648abb7e // bfcvtnt z30.h, p6/M, z27.s\n"
- "st1h { z3.h }, p6, [x21, #5, MUL VL]\n"
- ".inst 0x648ab881 // bfcvtnt z1.h, p6/M, z4.s\n"
- ".inst 0x648ab900 // bfcvtnt z0.h, p6/M, z8.s\n"
- "st1h { z18.h }, p6, [x21, #6, MUL VL]\n"
- ".inst 0x658ab8e8 // bfcvt z8.h, p6/M, z7.s\n"
- "zip1 z27.s, z31.s, z26.s\n"
- "st1h { z6.h }, p6, [x21, #7, MUL VL]\n"
+ "zip1 z26.s, z16.s, z10.s\n"
+ "ld1w { z2.s }, p2/Z, [x28, #-4, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x28, #-3, MUL VL]\n"
+ "zip2 z15.s, z16.s, z10.s\n"
+ "zip1 z6.s, z22.s, z14.s\n"
+ "ld1w { z27.s }, p2/Z, [x24]\n"
+ "ld1w { z18.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip2 z28.s, z22.s, z14.s\n"
+ "zip1 z25.s, z30.s, z12.s\n"
+ "ld1w { z21.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x24, #3, MUL VL]\n"
+ "zip2 z7.s, z30.s, z12.s\n"
+ "zip1 z9.s, z11.s, z13.s\n"
+ "ld1w { z4.s }, p2/Z, [x24, #4, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x24, #5, MUL VL]\n"
+ "zip2 z16.s, z11.s, z13.s\n"
+ ".inst 0x658aab4c // bfcvt z12.h, p2/M, z26.s\n"
+ "ld1w { z14.s }, p2/Z, [x24, #6, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x24, #7, MUL VL]\n"
+ "addvl x24, x24, #12\n"
+ ".inst 0x658aa9ef // bfcvt z15.h, p2/M, z15.s\n"
+ "ld1w { z26.s }, p2/Z, [x23, #-4, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x23, #-3, MUL VL]\n"
+ ".inst 0x658aa8cd // bfcvt z13.h, p2/M, z6.s\n"
+ ".inst 0x658aab8a // bfcvt z10.h, p2/M, z28.s\n"
+ "ld1w { z28.s }, p2/Z, [x22]\n"
+ "ld1w { z8.s }, p2/Z, [x22, #1, MUL VL]\n"
+ ".inst 0x658aab39 // bfcvt z25.h, p2/M, z25.s\n"
+ ".inst 0x658aa8e6 // bfcvt z6.h, p2/M, z7.s\n"
+ "ld1w { z11.s }, p2/Z, [x22, #2, MUL VL]\n"
+ ".inst 0x658aa927 // bfcvt z7.h, p2/M, z9.s\n"
+ ".inst 0x658aaa10 // bfcvt z16.h, p2/M, z16.s\n"
+ "zip1 z9.s, z23.s, z29.s\n"
+ "zip2 z23.s, z23.s, z29.s\n"
+ "zip1 z29.s, z27.s, z28.s\n"
+ "zip2 z27.s, z27.s, z28.s\n"
+ "ld1w { z28.s }, p2/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x658aa929 // bfcvt z9.h, p2/M, z9.s\n"
+ ".inst 0x658aaaf7 // bfcvt z23.h, p2/M, z23.s\n"
+ ".inst 0x648aabac // bfcvtnt z12.h, p2/M, z29.s\n"
+ "ld1w { z29.s }, p2/Z, [x22, #4, MUL VL]\n"
+ ".inst 0x648aab6f // bfcvtnt z15.h, p2/M, z27.s\n"
+ "zip1 z27.s, z18.s, z8.s\n"
+ "zip2 z8.s, z18.s, z8.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, #5, MUL VL]\n"
+ ".inst 0x648aab6d // bfcvtnt z13.h, p2/M, z27.s\n"
+ "ld1w { z27.s }, p2/Z, [x22, #6, MUL VL]\n"
+ ".inst 0x648aa90a // bfcvtnt z10.h, p2/M, z8.s\n"
+ "zip1 z8.s, z21.s, z11.s\n"
+ "zip2 z21.s, z21.s, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x22, #7, MUL VL]\n"
+ "addvl x22, x22, #12\n"
+ ".inst 0x648aa919 // bfcvtnt z25.h, p2/M, z8.s\n"
+ "ld1w { z8.s }, p2/Z, [x28, #-2, MUL VL]\n"
+ ".inst 0x648aaaa6 // bfcvtnt z6.h, p2/M, z21.s\n"
+ "zip1 z21.s, z3.s, z28.s\n"
+ "zip2 z3.s, z3.s, z28.s\n"
+ "ld1w { z28.s }, p2/Z, [x28, #-1, MUL VL]\n"
+ ".inst 0x648aaaa7 // bfcvtnt z7.h, p2/M, z21.s\n"
+ "ld1w { z21.s }, p2/Z, [x24, #-4, MUL VL]\n"
+ ".inst 0x648aa870 // bfcvtnt z16.h, p2/M, z3.s\n"
+ "zip1 z3.s, z20.s, z31.s\n"
+ "zip2 z31.s, z20.s, z31.s\n"
+ "zip1 z20.s, z17.s, z19.s\n"
+ "zip2 z17.s, z17.s, z19.s\n"
+ "zip1 z19.s, z0.s, z1.s\n"
+ "zip2 z1.s, z0.s, z1.s\n"
+ "zip1 z0.s, z2.s, z26.s\n"
+ "zip2 z2.s, z2.s, z26.s\n"
+ "zip1 z26.s, z24.s, z5.s\n"
+ "zip2 z24.s, z24.s, z5.s\n"
+ "zip1 z5.s, z4.s, z29.s\n"
+ "zip2 z4.s, z4.s, z29.s\n"
+ "ld1w { z29.s }, p2/Z, [x24, #-3, MUL VL]\n"
+ ".inst 0x658aa863 // bfcvt z3.h, p2/M, z3.s\n"
+ ".inst 0x658aabff // bfcvt z31.h, p2/M, z31.s\n"
+ ".inst 0x658aaa94 // bfcvt z20.h, p2/M, z20.s\n"
+ ".inst 0x658aaa31 // bfcvt z17.h, p2/M, z17.s\n"
+ ".inst 0x658aaa73 // bfcvt z19.h, p2/M, z19.s\n"
+ ".inst 0x658aa821 // bfcvt z1.h, p2/M, z1.s\n"
+ ".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
+ ".inst 0x658aa842 // bfcvt z2.h, p2/M, z2.s\n"
+ ".inst 0x658aab5a // bfcvt z26.h, p2/M, z26.s\n"
+ ".inst 0x658aab18 // bfcvt z24.h, p2/M, z24.s\n"
+ ".inst 0x648aa8a9 // bfcvtnt z9.h, p2/M, z5.s\n"
+ "ld1w { z5.s }, p2/Z, [x23, #-2, MUL VL]\n"
+ ".inst 0x648aa897 // bfcvtnt z23.h, p2/M, z4.s\n"
+ "zip1 z4.s, z22.s, z18.s\n"
+ "zip2 z22.s, z22.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x23, #-1, MUL VL]\n"
+ ".inst 0x648aa883 // bfcvtnt z3.h, p2/M, z4.s\n"
+ "ld1w { z4.s }, p2/Z, [x22, #-4, MUL VL]\n"
+ ".inst 0x648aaadf // bfcvtnt z31.h, p2/M, z22.s\n"
+ "zip1 z22.s, z14.s, z27.s\n"
+ "zip2 z14.s, z14.s, z27.s\n"
+ "ld1w { z27.s }, p2/Z, [x22, #-3, MUL VL]\n"
+ ".inst 0x648aaad4 // bfcvtnt z20.h, p2/M, z22.s\n"
+ "ld1w { z22.s }, p2/Z, [x24, #-2, MUL VL]\n"
+ ".inst 0x648aa9d1 // bfcvtnt z17.h, p2/M, z14.s\n"
+ "zip1 z14.s, z30.s, z11.s\n"
+ "zip2 z11.s, z30.s, z11.s\n"
+ "ld1w { z30.s }, p2/Z, [x24, #-1, MUL VL]\n"
+ ".inst 0x648aa9d3 // bfcvtnt z19.h, p2/M, z14.s\n"
+ "ld1w { z14.s }, p2/Z, [x22, #-2, MUL VL]\n"
+ ".inst 0x648aa961 // bfcvtnt z1.h, p2/M, z11.s\n"
+ "ld1w { z11.s }, p2/Z, [x22, #-1, MUL VL]\n"
+ "st1h { z12.h }, p2, [x21]\n"
+ "zip1 z12.s, z21.s, z4.s\n"
+ "zip2 z21.s, z21.s, z4.s\n"
+ "zip1 z4.s, z29.s, z27.s\n"
+ "zip2 z29.s, z29.s, z27.s\n"
+ "st1h { z15.h }, p2, [x21, #1, MUL VL]\n"
+ "zip1 z27.s, z8.s, z5.s\n"
+ "zip2 z8.s, z8.s, z5.s\n"
+ "st1h { z13.h }, p2, [x21, #2, MUL VL]\n"
+ "zip1 z5.s, z28.s, z18.s\n"
+ "zip2 z28.s, z28.s, z18.s\n"
+ "st1h { z10.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z25.h }, p2, [x21, #4, MUL VL]\n"
+ ".inst 0x648aa980 // bfcvtnt z0.h, p2/M, z12.s\n"
+ ".inst 0x648aaaa2 // bfcvtnt z2.h, p2/M, z21.s\n"
+ "st1h { z6.h }, p2, [x21, #5, MUL VL]\n"
+ ".inst 0x648aa89a // bfcvtnt z26.h, p2/M, z4.s\n"
+ ".inst 0x648aabb8 // bfcvtnt z24.h, p2/M, z29.s\n"
+ "st1h { z7.h }, p2, [x21, #6, MUL VL]\n"
+ ".inst 0x658aab7b // bfcvt z27.h, p2/M, z27.s\n"
+ "zip1 z25.s, z22.s, z14.s\n"
+ "st1h { z16.h }, p2, [x21, #7, MUL VL]\n"
"addvl x21, x21, #12\n"
- ".inst 0x658ab96e // bfcvt z14.h, p6/M, z11.s\n"
- "zip2 z28.s, z31.s, z26.s\n"
- ".inst 0x658abbbd // bfcvt z29.h, p6/M, z29.s\n"
- "zip1 z21.s, z22.s, z9.s\n"
- "st1h { z17.h }, p6, [x21, #-4, MUL VL]\n"
- ".inst 0x658ab992 // bfcvt z18.h, p6/M, z12.s\n"
- "zip2 z17.s, z22.s, z9.s\n"
- "st1h { z5.h }, p6, [x21, #-3, MUL VL]\n"
- "st1h { z24.h }, p6, [x21, #-2, MUL VL]\n"
- ".inst 0x648abb68 // bfcvtnt z8.h, p6/M, z27.s\n"
- ".inst 0x648abb8e // bfcvtnt z14.h, p6/M, z28.s\n"
- "st1h { z25.h }, p6, [x21, #-1, MUL VL]\n"
- ".inst 0x648ababd // bfcvtnt z29.h, p6/M, z21.s\n"
- ".inst 0x648aba32 // bfcvtnt z18.h, p6/M, z17.s\n"
- "st1h { z2.h }, p6, [x20]\n"
- "st1h { z20.h }, p6, [x20, #1, MUL VL]\n"
- "st1h { z19.h }, p6, [x20, #2, MUL VL]\n"
- "st1h { z16.h }, p6, [x20, #3, MUL VL]\n"
- "st1h { z23.h }, p6, [x20, #4, MUL VL]\n"
- "st1h { z30.h }, p6, [x20, #5, MUL VL]\n"
- "st1h { z1.h }, p6, [x20, #6, MUL VL]\n"
- "st1h { z0.h }, p6, [x20, #7, MUL VL]\n"
+ ".inst 0x658aa906 // bfcvt z6.h, p2/M, z8.s\n"
+ "zip2 z4.s, z22.s, z14.s\n"
+ ".inst 0x658aa8b2 // bfcvt z18.h, p2/M, z5.s\n"
+ "zip1 z22.s, z30.s, z11.s\n"
+ ".inst 0x658aab95 // bfcvt z21.h, p2/M, z28.s\n"
+ "zip2 z16.s, z30.s, z11.s\n"
+ "st1h { z9.h }, p2, [x21, #-4, MUL VL]\n"
+ "st1h { z23.h }, p2, [x21, #-3, MUL VL]\n"
+ ".inst 0x648aab3b // bfcvtnt z27.h, p2/M, z25.s\n"
+ ".inst 0x648aa886 // bfcvtnt z6.h, p2/M, z4.s\n"
+ "st1h { z3.h }, p2, [x21, #-2, MUL VL]\n"
+ ".inst 0x648aaad2 // bfcvtnt z18.h, p2/M, z22.s\n"
+ "st1h { z31.h }, p2, [x21, #-1, MUL VL]\n"
+ ".inst 0x648aaa15 // bfcvtnt z21.h, p2/M, z16.s\n"
+ "st1h { z20.h }, p2, [x20]\n"
+ "st1h { z17.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z19.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z0.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z26.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z24.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- "st1h { z8.h }, p6, [x20, #-4, MUL VL]\n"
- "st1h { z14.h }, p6, [x20, #-3, MUL VL]\n"
- "st1h { z29.h }, p6, [x20, #-2, MUL VL]\n"
- "st1h { z18.h }, p6, [x20, #-1, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z6.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z18.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x27, 5f\n"
"4:" // Main row loop: Column loop
+ "mov x21, x27\n"
"mov x20, x25\n"
- "whilelt p5.s, XZR, x20\n"
- "ld1w { z22.s }, p5/Z, [x28]\n"
- "ld1w { z21.s }, p5/Z, [x26]\n"
- "decw x20\n"
- "whilelt p4.s, XZR, x20\n"
- "ld1w { z20.s }, p4/Z, [x28, #1, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x26, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p3.s, XZR, x20\n"
- "ld1w { z18.s }, p3/Z, [x28, #2, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x26, #2, MUL VL]\n"
- "decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z30.s }, p2/Z, [x28, #3, MUL VL]\n"
- "ld1w { z16.s }, p2/Z, [x26, #3, MUL VL]\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z13.s }, p1/Z, [x28, #4, MUL VL]\n"
- "ld1w { z29.s }, p5/Z, [x27]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z12.s }, p0/Z, [x28, #5, MUL VL]\n"
- "ld1w { z28.s }, p4/Z, [x27, #1, MUL VL]\n"
- "ld1w { z11.s }, p3/Z, [x27, #2, MUL VL]\n"
- "ld1w { z10.s }, p2/Z, [x27, #3, MUL VL]\n"
- "zip1 z27.s, z22.s, z21.s\n"
- "zip2 z26.s, z22.s, z21.s\n"
- "ld1w { z9.s }, p1/Z, [x26, #4, MUL VL]\n"
- "ld1w { z8.s }, p0/Z, [x26, #5, MUL VL]\n"
- "zip1 z25.s, z20.s, z19.s\n"
- "zip2 z24.s, z20.s, z19.s\n"
- "ld1w { z23.s }, p5/Z, [x23]\n"
- "ld1w { z22.s }, p4/Z, [x23, #1, MUL VL]\n"
- "zip1 z21.s, z18.s, z17.s\n"
- "zip2 z20.s, z18.s, z17.s\n"
- "ld1w { z19.s }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1w { z18.s }, p2/Z, [x23, #3, MUL VL]\n"
- "zip1 z17.s, z30.s, z16.s\n"
- "zip2 z16.s, z30.s, z16.s\n"
- "ld1w { z7.s }, p1/Z, [x27, #4, MUL VL]\n"
- "ld1w { z6.s }, p0/Z, [x27, #5, MUL VL]\n"
- ".inst 0x658abb65 // bfcvt z5.h, p6/M, z27.s\n"
- "zip1 z4.s, z29.s, z23.s\n"
- "ld1w { z3.s }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1w { z2.s }, p0/Z, [x23, #5, MUL VL]\n"
- ".inst 0x658abb41 // bfcvt z1.h, p6/M, z26.s\n"
- "zip2 z0.s, z29.s, z23.s\n"
- ".inst 0x658abb3f // bfcvt z31.h, p6/M, z25.s\n"
- "zip1 z30.s, z28.s, z22.s\n"
- "mov x20, x22\n"
- "decd x25, ALL, MUL #12\n"
- ".inst 0x658abb1d // bfcvt z29.h, p6/M, z24.s\n"
- "zip2 z28.s, z28.s, z22.s\n"
- "cmp x25, #0x0\n"
+ "decd x27, ALL, MUL #12\n"
+ "add x25, x25, %x[out_stride]\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
+ "whilelt p0.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z19.s }, p1/Z, [x28]\n"
+ "ld1w { z18.s }, p1/Z, [x23]\n"
+ "ld1w { z30.s }, p1/Z, [x24]\n"
+ "ld1w { z29.s }, p1/Z, [x22]\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z21.s }, p0/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z28.s }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z27.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "zip1 z16.s, z19.s, z18.s\n"
+ "zip2 z26.s, z19.s, z18.s\n"
+ "whilelt p0.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z20.s }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z25.s }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z24.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "zip1 z18.s, z21.s, z17.s\n"
+ "zip2 z23.s, z21.s, z17.s\n"
+ ".inst 0x658aaa0a // bfcvt z10.h, p2/M, z16.s\n"
+ "zip1 z9.s, z30.s, z29.s\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z17.s }, p0/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "zip1 z22.s, z20.s, z19.s\n"
+ "zip2 z21.s, z20.s, z19.s\n"
+ "ld1w { z20.s }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1w { z19.s }, p0/Z, [x22, #3, MUL VL]\n"
+ ".inst 0x658aab48 // bfcvt z8.h, p2/M, z26.s\n"
+ "zip2 z7.s, z30.s, z29.s\n"
+ "whilelt p0.s, XZR, x21\n"
+ "ld1w { z6.s }, p1/Z, [x28, #4, MUL VL]\n"
+ "ld1w { z5.s }, p1/Z, [x23, #4, MUL VL]\n"
+ ".inst 0x658aaa44 // bfcvt z4.h, p2/M, z18.s\n"
+ "zip1 z18.s, z17.s, z16.s\n"
+ "zip2 z17.s, z17.s, z16.s\n"
+ "ld1w { z3.s }, p1/Z, [x24, #4, MUL VL]\n"
+ "ld1w { z2.s }, p1/Z, [x22, #4, MUL VL]\n"
+ "zip1 z1.s, z28.s, z27.s\n"
+ ".inst 0x658aaae0 // bfcvt z0.h, p2/M, z23.s\n"
+ "cmp x27, #0x0\n"
+ "ld1w { z31.s }, p0/Z, [x28, #5, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z30.s }, p0/Z, [x24, #5, MUL VL]\n"
+ "zip2 z29.s, z28.s, z27.s\n"
+ ".inst 0x658aaadc // bfcvt z28.h, p2/M, z22.s\n"
+ "ld1w { z27.s }, p0/Z, [x22, #5, MUL VL]\n"
+ "zip1 z23.s, z25.s, z24.s\n"
+ ".inst 0x658aaaba // bfcvt z26.h, p2/M, z21.s\n"
"addvl x28, x28, #6\n"
- ".inst 0x658ababb // bfcvt z27.h, p6/M, z21.s\n"
- "zip1 z23.s, z11.s, z19.s\n"
- "addvl x27, x27, #6\n"
- "addvl x26, x26, #6\n"
- ".inst 0x658aba9a // bfcvt z26.h, p6/M, z20.s\n"
- "zip2 z22.s, z11.s, z19.s\n"
+ "zip2 z22.s, z25.s, z24.s\n"
+ ".inst 0x658aaa59 // bfcvt z25.h, p2/M, z18.s\n"
+ "addvl x24, x24, #6\n"
"addvl x23, x23, #6\n"
- "add x22, x22, %x[out_stride]\n"
- ".inst 0x658aba39 // bfcvt z25.h, p6/M, z17.s\n"
- "zip1 z21.s, z10.s, z18.s\n"
- ".inst 0x658aba18 // bfcvt z24.h, p6/M, z16.s\n"
- "zip2 z20.s, z10.s, z18.s\n"
- "zip1 z19.s, z13.s, z9.s\n"
- "zip2 z18.s, z13.s, z9.s\n"
- "zip1 z17.s, z12.s, z8.s\n"
- "zip2 z16.s, z12.s, z8.s\n"
- ".inst 0x648ab885 // bfcvtnt z5.h, p6/M, z4.s\n"
- ".inst 0x648ab801 // bfcvtnt z1.h, p6/M, z0.s\n"
- "st1h { z5.h }, p6, [x20]\n"
- ".inst 0x648abbdf // bfcvtnt z31.h, p6/M, z30.s\n"
- ".inst 0x648abb9d // bfcvtnt z29.h, p6/M, z28.s\n"
- "st1h { z1.h }, p6, [x20, #1, MUL VL]\n"
- ".inst 0x648abafb // bfcvtnt z27.h, p6/M, z23.s\n"
- ".inst 0x648abada // bfcvtnt z26.h, p6/M, z22.s\n"
- "st1h { z31.h }, p6, [x20, #2, MUL VL]\n"
- ".inst 0x648abab9 // bfcvtnt z25.h, p6/M, z21.s\n"
- ".inst 0x648aba98 // bfcvtnt z24.h, p6/M, z20.s\n"
- "st1h { z29.h }, p6, [x20, #3, MUL VL]\n"
- ".inst 0x658aba77 // bfcvt z23.h, p6/M, z19.s\n"
- "zip1 z22.s, z7.s, z3.s\n"
- "st1h { z27.h }, p6, [x20, #4, MUL VL]\n"
- ".inst 0x658aba55 // bfcvt z21.h, p6/M, z18.s\n"
- "zip2 z20.s, z7.s, z3.s\n"
- "st1h { z26.h }, p6, [x20, #5, MUL VL]\n"
- ".inst 0x658aba33 // bfcvt z19.h, p6/M, z17.s\n"
- "zip1 z18.s, z6.s, z2.s\n"
- "st1h { z25.h }, p6, [x20, #6, MUL VL]\n"
- ".inst 0x658aba11 // bfcvt z17.h, p6/M, z16.s\n"
- "zip2 z16.s, z6.s, z2.s\n"
- "st1h { z24.h }, p6, [x20, #7, MUL VL]\n"
+ "zip1 z21.s, z20.s, z19.s\n"
+ ".inst 0x658aaa38 // bfcvt z24.h, p2/M, z17.s\n"
+ "addvl x22, x22, #6\n"
+ "zip2 z20.s, z20.s, z19.s\n"
+ "zip1 z19.s, z6.s, z5.s\n"
+ "zip2 z18.s, z6.s, z5.s\n"
+ "zip1 z17.s, z31.s, z16.s\n"
+ "zip2 z16.s, z31.s, z16.s\n"
+ ".inst 0x648aa92a // bfcvtnt z10.h, p2/M, z9.s\n"
+ ".inst 0x648aa8e8 // bfcvtnt z8.h, p2/M, z7.s\n"
+ ".inst 0x648aa824 // bfcvtnt z4.h, p2/M, z1.s\n"
+ ".inst 0x648aaba0 // bfcvtnt z0.h, p2/M, z29.s\n"
+ ".inst 0x648aaafc // bfcvtnt z28.h, p2/M, z23.s\n"
+ ".inst 0x648aaada // bfcvtnt z26.h, p2/M, z22.s\n"
+ ".inst 0x648aaab9 // bfcvtnt z25.h, p2/M, z21.s\n"
+ "st1h { z10.h }, p2, [x20]\n"
+ ".inst 0x648aaa98 // bfcvtnt z24.h, p2/M, z20.s\n"
+ ".inst 0x658aaa77 // bfcvt z23.h, p2/M, z19.s\n"
+ "st1h { z8.h }, p2, [x20, #1, MUL VL]\n"
+ "zip1 z22.s, z3.s, z2.s\n"
+ ".inst 0x658aaa55 // bfcvt z21.h, p2/M, z18.s\n"
+ "st1h { z4.h }, p2, [x20, #2, MUL VL]\n"
+ "zip2 z20.s, z3.s, z2.s\n"
+ ".inst 0x658aaa33 // bfcvt z19.h, p2/M, z17.s\n"
+ "st1h { z0.h }, p2, [x20, #3, MUL VL]\n"
+ "zip1 z18.s, z30.s, z27.s\n"
+ ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
+ "st1h { z28.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z16.s, z30.s, z27.s\n"
+ "st1h { z26.h }, p2, [x20, #5, MUL VL]\n"
+ ".inst 0x648aaad7 // bfcvtnt z23.h, p2/M, z22.s\n"
+ "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
+ ".inst 0x648aaa95 // bfcvtnt z21.h, p2/M, z20.s\n"
+ "st1h { z24.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- ".inst 0x648abad7 // bfcvtnt z23.h, p6/M, z22.s\n"
- ".inst 0x648aba95 // bfcvtnt z21.h, p6/M, z20.s\n"
- "st1h { z23.h }, p6, [x20, #-4, MUL VL]\n"
- ".inst 0x648aba53 // bfcvtnt z19.h, p6/M, z18.s\n"
- ".inst 0x648aba11 // bfcvtnt z17.h, p6/M, z16.s\n"
- "st1h { z21.h }, p6, [x20, #-3, MUL VL]\n"
- "st1h { z19.h }, p6, [x20, #-2, MUL VL]\n"
- "st1h { z17.h }, p6, [x20, #-1, MUL VL]\n"
+ ".inst 0x648aaa53 // bfcvtnt z19.h, p2/M, z18.s\n"
+ ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
+ "st1h { z23.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z17.h }, p2, [x20, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -354,7 +354,7 @@ void sve_transpose_interleave_12VL_2x4_fp32bf16(bfloat16 *out, const float *in,
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp
index b33c4f6c2d..e37879e19b 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,59 +40,59 @@ void sve_transpose_interleave_1VL(uint32_t *out, const uint32_t *in, size_t widt
"mov x26, %x[in]\n"
"mov x25, %x[width]\n"
"cntw x24, ALL, MUL #2\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x22, x26, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
"add x20, x21, %x[in_stride]\n"
"cmp x25, x24\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
"sub x25, x25, x24\n"
"ld1w { z23.s }, p1/Z, [x26]\n"
"ld1w { z22.s }, p1/Z, [x26, #1, MUL VL]\n"
- "cmp x25, x24\n"
- "ld1w { z21.s }, p1/Z, [x23]\n"
- "ld1w { z20.s }, p1/Z, [x23, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "addvl x23, x23, #2\n"
+ "ld1w { z21.s }, p1/Z, [x22]\n"
+ "ld1w { z20.s }, p1/Z, [x22, #1, MUL VL]\n"
+ "cmp x25, x24\n"
+ "addvl x22, x22, #2\n"
"ld1w { z19.s }, p1/Z, [x21]\n"
"ld1w { z18.s }, p1/Z, [x21, #1, MUL VL]\n"
"addvl x21, x21, #2\n"
"ld1w { z17.s }, p1/Z, [x20]\n"
"ld1w { z16.s }, p1/Z, [x20, #1, MUL VL]\n"
- "st1w { z23.s }, p1, [x22]\n"
+ "st1w { z23.s }, p1, [x23]\n"
"addvl x20, x20, #2\n"
- "st1w { z21.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z22.s }, p1, [x22]\n"
- "st1w { z20.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z18.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1w { z21.s }, p1, [x23, #1, MUL VL]\n"
+ "st1w { z19.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z17.s }, p1, [x23, #3, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "st1w { z22.s }, p1, [x23]\n"
+ "st1w { z20.s }, p1, [x23, #1, MUL VL]\n"
+ "st1w { z18.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z16.s }, p1, [x23, #3, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cbz x25, 5f\n"
"4:" // Main row loop: Column loop
"whilelt p0.s, XZR, x25\n"
"decw x25\n"
- "ld1w { z19.s }, p0/Z, [x26]\n"
- "ld1w { z18.s }, p0/Z, [x23]\n"
"cmp x25, #0x0\n"
+ "ld1w { z19.s }, p0/Z, [x26]\n"
"addvl x26, x26, #1\n"
+ "ld1w { z18.s }, p0/Z, [x22]\n"
+ "addvl x22, x22, #1\n"
"ld1w { z17.s }, p0/Z, [x21]\n"
- "ld1w { z16.s }, p0/Z, [x20]\n"
- "addvl x23, x23, #1\n"
"addvl x21, x21, #1\n"
- "st1w { z19.s }, p1, [x22]\n"
+ "ld1w { z16.s }, p0/Z, [x20]\n"
"addvl x20, x20, #1\n"
- "st1w { z18.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1w { z19.s }, p1, [x23]\n"
+ "st1w { z18.s }, p1, [x23, #1, MUL VL]\n"
+ "st1w { z17.s }, p1, [x23, #2, MUL VL]\n"
+ "st1w { z16.s }, p1, [x23, #3, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -104,32 +104,32 @@ void sve_transpose_interleave_1VL(uint32_t *out, const uint32_t *in, size_t widt
"mov x21, %x[width]\n"
"cntw x20, ALL, MUL #2\n"
"mov x26, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x21, x20\n"
"add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
"sub x21, x21, x20\n"
"ld1w { z17.s }, p1/Z, [x26]\n"
"ld1w { z16.s }, p1/Z, [x26, #1, MUL VL]\n"
- "st1w { z17.s }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "cmp x21, x20\n"
- "st1w { z16.s }, p1, [x22]\n"
"addvl x26, x26, #2\n"
- "add x22, x22, %x[out_stride]\n"
+ "cmp x21, x20\n"
+ "st1w { z17.s }, p1, [x23]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "st1w { z16.s }, p1, [x23]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"whilelt p0.s, XZR, x21\n"
"decw x21\n"
- "ld1w { z16.s }, p0/Z, [x26]\n"
- "st1w { z16.s }, p1, [x22]\n"
"cmp x21, #0x0\n"
+ "ld1w { z16.s }, p0/Z, [x26]\n"
"addvl x26, x26, #1\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1w { z16.s }, p1, [x23]\n"
+ "add x23, x23, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp
index e468787815..60ac125bff 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_1VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,135 +44,135 @@ void sve_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"blt 6f\n"
"1:" // Main row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cntb x24, ALL, MUL #2\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
+ "mov x9, %x[width]\n"
+ "cntb x28, ALL, MUL #2\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x10, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "cmp x9, x28\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
"add x20, x21, %x[in_stride]\n"
- "cmp x25, x24\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z20.b }, p1/Z, [x10]\n"
- "ld1b { z18.b }, p1/Z, [x9]\n"
- "sub x25, x25, x24\n"
- "cmp x25, x24\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z25.b, z20.b, z17.b\n"
- "zip1 z24.b, z18.b, z16.b\n"
- "ld1b { z21.b }, p1/Z, [x26]\n"
- "ld1b { z19.b }, p1/Z, [x23]\n"
- "zip2 z2.b, z20.b, z17.b\n"
- "zip2 z1.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p1/Z, [x21]\n"
- "ld1b { z17.b }, p1/Z, [x20]\n"
- "zip1 z20.b, z21.b, z18.b\n"
- "zip1 z16.b, z19.b, z17.b\n"
- "ld1b { z0.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z31.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z30.b, z21.b, z18.b\n"
- "zip2 z29.b, z19.b, z17.b\n"
- "ld1b { z23.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z22.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z19.b, z25.b, z24.b\n"
- "zip1 z18.b, z20.b, z16.b\n"
- "ld1b { z28.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z27.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip2 z17.b, z25.b, z24.b\n"
- "zip2 z16.b, z20.b, z16.b\n"
- "ld1b { z21.b }, p1/Z, [x21, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x20, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22]\n"
- "zip1 z26.b, z0.b, z23.b\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z25.b, z31.b, z22.b\n"
- "zip1 z24.b, z28.b, z21.b\n"
- "st1b { z17.b }, p1, [x22]\n"
- "zip1 z19.b, z27.b, z20.b\n"
- "zip1 z17.b, z2.b, z1.b\n"
+ "ld1b { z25.b }, p1/Z, [x10]\n"
+ "ld1b { z24.b }, p1/Z, [x26]\n"
+ "sub x9, x9, x28\n"
+ "ld1b { z20.b }, p1/Z, [x25]\n"
+ "ld1b { z17.b }, p1/Z, [x24]\n"
+ "cmp x9, x28\n"
+ "ld1b { z23.b }, p1/Z, [x23]\n"
+ "ld1b { z22.b }, p1/Z, [x22]\n"
+ "ld1b { z21.b }, p1/Z, [x21]\n"
+ "ld1b { z19.b }, p1/Z, [x20]\n"
+ "zip1 z18.b, z25.b, z20.b\n"
+ "zip1 z16.b, z24.b, z17.b\n"
+ "ld1b { z4.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z3.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "zip2 z2.b, z25.b, z20.b\n"
+ "zip2 z1.b, z24.b, z17.b\n"
+ "ld1b { z25.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z0.b }, p1/Z, [x24, #1, MUL VL]\n"
"addvl x10, x10, #2\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z18.b, z30.b, z29.b\n"
- "zip2 z16.b, z2.b, z1.b\n"
- "st1b { z17.b }, p1, [x22]\n"
- "zip2 z17.b, z30.b, z29.b\n"
- "zip2 z23.b, z0.b, z23.b\n"
- "addvl x9, x9, #2\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z22.b, z31.b, z22.b\n"
- "zip2 z21.b, z28.b, z21.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip2 z20.b, z27.b, z20.b\n"
- "zip1 z16.b, z26.b, z25.b\n"
- "addvl x28, x28, #2\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z18.b, z24.b, z19.b\n"
- "zip2 z17.b, z26.b, z25.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip2 z16.b, z24.b, z19.b\n"
- "zip1 z19.b, z23.b, z22.b\n"
- "addvl x27, x27, #2\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z18.b, z21.b, z20.b\n"
"addvl x26, x26, #2\n"
- "st1b { z17.b }, p1, [x22]\n"
+ "zip1 z20.b, z23.b, z21.b\n"
+ "zip1 z17.b, z22.b, z19.b\n"
+ "ld1b { z31.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z30.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z24.b }, p1/Z, [x21, #1, MUL VL]\n"
+ "ld1b { z29.b }, p1/Z, [x20, #1, MUL VL]\n"
+ "zip2 z23.b, z23.b, z21.b\n"
+ "zip2 z22.b, z22.b, z19.b\n"
+ "zip1 z19.b, z18.b, z16.b\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "addvl x25, x25, #2\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z16.b, z20.b, z17.b\n"
+ "zip2 z17.b, z20.b, z17.b\n"
"addvl x23, x23, #2\n"
+ "addvl x22, x22, #2\n"
+ "zip1 z28.b, z4.b, z25.b\n"
+ "zip1 z21.b, z3.b, z0.b\n"
"addvl x21, x21, #2\n"
- "zip2 z17.b, z23.b, z22.b\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
"addvl x20, x20, #2\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p1, [x22]\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1b { z17.b }, p1, [x22]\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "zip1 z27.b, z31.b, z24.b\n"
+ "zip1 z26.b, z30.b, z29.b\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z20.b, z2.b, z1.b\n"
+ "zip1 z16.b, z23.b, z22.b\n"
+ "st1b { z18.b }, p1, [x27]\n"
+ "zip2 z19.b, z2.b, z1.b\n"
+ "zip2 z18.b, z23.b, z22.b\n"
+ "st1b { z17.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip2 z25.b, z4.b, z25.b\n"
+ "zip2 z17.b, z3.b, z0.b\n"
+ "st1b { z20.b }, p1, [x27]\n"
+ "zip2 z24.b, z31.b, z24.b\n"
+ "zip2 z23.b, z30.b, z29.b\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z16.b, z28.b, z21.b\n"
+ "zip1 z22.b, z27.b, z26.b\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "zip2 z21.b, z28.b, z21.b\n"
+ "zip2 z20.b, z27.b, z26.b\n"
+ "st1b { z18.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z19.b, z25.b, z17.b\n"
+ "zip1 z18.b, z24.b, z23.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "zip2 z17.b, z25.b, z17.b\n"
+ "zip2 z16.b, z24.b, z23.b\n"
+ "st1b { z22.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "st1b { z18.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z17.b }, p1, [x27]\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x9, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x25\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z18.b }, p0/Z, [x9]\n"
- "decw x25\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z21.b, z19.b, z17.b\n"
- "zip1 z20.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p0/Z, [x26]\n"
- "ld1b { z19.b }, p0/Z, [x23]\n"
- "cmp x25, #0x0\n"
+ "whilelt p0.b, XZR, x9\n"
+ "decw x9\n"
+ "ld1b { z23.b }, p0/Z, [x10]\n"
"incd x10, ALL, MUL #2\n"
- "ld1b { z17.b }, p0/Z, [x21]\n"
- "ld1b { z16.b }, p0/Z, [x20]\n"
- "zip1 z18.b, z18.b, z17.b\n"
- "zip1 z16.b, z19.b, z16.b\n"
- "incd x9, ALL, MUL #2\n"
- "incd x28, ALL, MUL #2\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "zip1 z16.b, z18.b, z16.b\n"
- "incd x27, ALL, MUL #2\n"
+ "ld1b { z22.b }, p0/Z, [x26]\n"
"incd x26, ALL, MUL #2\n"
- "st1b { z17.b }, p1, [x22]\n"
+ "ld1b { z19.b }, p0/Z, [x25]\n"
+ "incd x25, ALL, MUL #2\n"
+ "ld1b { z17.b }, p0/Z, [x24]\n"
+ "incd x24, ALL, MUL #2\n"
+ "ld1b { z21.b }, p0/Z, [x23]\n"
+ "ld1b { z20.b }, p0/Z, [x22]\n"
+ "ld1b { z18.b }, p0/Z, [x21]\n"
+ "cmp x9, #0x0\n"
"incd x23, ALL, MUL #2\n"
+ "ld1b { z16.b }, p0/Z, [x20]\n"
+ "zip1 z19.b, z23.b, z19.b\n"
+ "incd x22, ALL, MUL #2\n"
"incd x21, ALL, MUL #2\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
+ "zip1 z17.b, z22.b, z17.b\n"
"incd x20, ALL, MUL #2\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip1 z18.b, z21.b, z18.b\n"
+ "zip1 z16.b, z20.b, z16.b\n"
+ "zip1 z17.b, z19.b, z17.b\n"
+ "zip1 z16.b, z18.b, z16.b\n"
+ "st1b { z17.b }, p1, [x27]\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x8\n"
@@ -182,88 +182,88 @@ void sve_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cntb x20, ALL, MUL #2\n"
- "add x27, x28, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x27, %x[in_stride]\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x10, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"cmp x21, x20\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
"ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z18.b }, p1/Z, [x9]\n"
+ "ld1b { z19.b }, p1/Z, [x26]\n"
"sub x21, x21, x20\n"
+ "ld1b { z17.b }, p1/Z, [x25]\n"
+ "ld1b { z16.b }, p1/Z, [x24]\n"
"cmp x21, x20\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z20.b, z21.b, z17.b\n"
- "zip1 z19.b, z18.b, z16.b\n"
- "ld1b { z24.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z23.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z22.b, z21.b, z17.b\n"
- "zip2 z21.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z17.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z20.b, z24.b, z18.b\n"
- "zip1 z19.b, z23.b, z17.b\n"
+ "ld1b { z27.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z26.b }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "zip1 z16.b, z22.b, z21.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z16.b, z22.b, z21.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z18.b, z24.b, z18.b\n"
- "zip2 z17.b, z23.b, z17.b\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z16.b, z20.b, z19.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z16.b, z18.b, z17.b\n"
- "addvl x28, x28, #2\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
- "addvl x27, x27, #2\n"
- "zip2 z16.b, z18.b, z17.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x26, x26, #2\n"
+ "ld1b { z25.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z24.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z20.b, z21.b, z17.b\n"
+ "zip1 z18.b, z19.b, z16.b\n"
+ "zip2 z17.b, z21.b, z17.b\n"
+ "zip2 z16.b, z19.b, z16.b\n"
+ "zip1 z23.b, z27.b, z25.b\n"
+ "zip1 z22.b, z26.b, z24.b\n"
+ "zip1 z19.b, z20.b, z18.b\n"
+ "zip2 z18.b, z20.b, z18.b\n"
+ "zip1 z21.b, z17.b, z16.b\n"
+ "zip2 z17.b, z17.b, z16.b\n"
+ "zip2 z20.b, z27.b, z25.b\n"
+ "zip2 z16.b, z26.b, z24.b\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z18.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z19.b, z23.b, z22.b\n"
+ "zip2 z18.b, z23.b, z22.b\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z17.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z17.b, z20.b, z16.b\n"
+ "zip2 z16.b, z20.b, z16.b\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z18.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z17.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"whilelt p0.b, XZR, x21\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z18.b }, p0/Z, [x9]\n"
"decw x21\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
+ "ld1b { z19.b }, p0/Z, [x10]\n"
+ "incd x10, ALL, MUL #2\n"
+ "ld1b { z18.b }, p0/Z, [x26]\n"
+ "incd x26, ALL, MUL #2\n"
+ "ld1b { z17.b }, p0/Z, [x25]\n"
+ "incd x25, ALL, MUL #2\n"
+ "ld1b { z16.b }, p0/Z, [x24]\n"
+ "incd x24, ALL, MUL #2\n"
+ "cmp x21, #0x0\n"
"zip1 z17.b, z19.b, z17.b\n"
"zip1 z16.b, z18.b, z16.b\n"
- "cmp x21, #0x0\n"
- "incd x10, ALL, MUL #2\n"
"zip1 z16.b, z17.b, z16.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "incd x9, ALL, MUL #2\n"
- "incd x28, ALL, MUL #2\n"
- "incd x27, ALL, MUL #2\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -272,7 +272,7 @@ void sve_transpose_interleave_1VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp
index 546800fa69..e10f818ba2 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,57 +34,57 @@ void sve_transpose_interleave_3VL(uint16_t *out, const uint16_t *in, size_t widt
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "ptrue p3.b\n"
+ "ptrue p2.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x27, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z27.h }, p2/Z, [x26]\n"
- "ld1h { z26.h }, p2/Z, [x25]\n"
- "dech x20\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z25.h }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1h { z24.h }, p1/Z, [x25, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z23.h }, p0/Z, [x26, #2, MUL VL]\n"
- "ld1h { z22.h }, p0/Z, [x25, #2, MUL VL]\n"
- "mov x20, x22\n"
- "dech x21, ALL, MUL #3\n"
- "ld1h { z21.h }, p2/Z, [x24]\n"
- "ld1h { z20.h }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x24, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x23]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #3\n"
- "ld1h { z17.h }, p1/Z, [x23, #1, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x23, #2, MUL VL]\n"
- "st1h { z27.h }, p3, [x20]\n"
- "addvl x25, x25, #3\n"
- "st1h { z25.h }, p3, [x20, #1, MUL VL]\n"
+ "mov x21, x25\n"
+ "mov x20, x26\n"
+ "dech x25, ALL, MUL #3\n"
+ "add x26, x26, %x[out_stride]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z27.h }, p0/Z, [x27]\n"
+ "ld1h { z26.h }, p0/Z, [x24]\n"
+ "ld1h { z25.h }, p0/Z, [x23]\n"
+ "ld1h { z24.h }, p0/Z, [x22]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "cmp x25, #0x0\n"
+ "ld1h { z23.h }, p1/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z20.h }, p1/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z18.h }, p0/Z, [x24, #2, MUL VL]\n"
+ "addvl x27, x27, #3\n"
"addvl x24, x24, #3\n"
+ "ld1h { z17.h }, p0/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x22, #2, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20]\n"
"addvl x23, x23, #3\n"
- "st1h { z23.h }, p3, [x20, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z26.h }, p3, [x20, #3, MUL VL]\n"
- "st1h { z24.h }, p3, [x20, #4, MUL VL]\n"
- "st1h { z22.h }, p3, [x20, #5, MUL VL]\n"
- "st1h { z21.h }, p3, [x20, #6, MUL VL]\n"
- "st1h { z20.h }, p3, [x20, #7, MUL VL]\n"
+ "st1h { z23.h }, p2, [x20, #1, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "st1h { z19.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z26.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z22.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- "st1h { z19.h }, p3, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p3, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p3, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p3, [x20, #-1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z24.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z20.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -93,28 +93,28 @@ void sve_transpose_interleave_3VL(uint16_t *out, const uint16_t *in, size_t widt
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
"mov x21, %x[width]\n"
+ "add %x[in], x27, %x[in_stride]\n"
"6:" // Tail row loop: Column loop
"mov x20, x21\n"
+ "dech x21, ALL, MUL #3\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x26]\n"
"dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
- "dech x21, ALL, MUL #3\n"
+ "ld1h { z18.h }, p0/Z, [x27]\n"
"whilelt p0.h, XZR, x20\n"
"cmp x21, #0x0\n"
- "ld1h { z16.h }, p0/Z, [x26, #2, MUL VL]\n"
- "st1h { z18.h }, p3, [x22]\n"
- "addvl x26, x26, #3\n"
- "st1h { z17.h }, p3, [x22, #1, MUL VL]\n"
- "st1h { z16.h }, p3, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z17.h }, p1/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x27, #2, MUL VL]\n"
+ "addvl x27, x27, #3\n"
+ "st1h { z18.h }, p2, [x26]\n"
+ "st1h { z17.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x26, #2, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -123,7 +123,7 @@ void sve_transpose_interleave_3VL(uint16_t *out, const uint16_t *in, size_t widt
"8:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp
index a44141c109..e5bfb7bb7c 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,175 +44,175 @@ void sve_transpose_interleave_3VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"blt 6f\n"
"1:" // Main row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cntb x24, ALL, MUL #3\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
+ "mov x9, %x[width]\n"
+ "cntb x28, ALL, MUL #3\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x10, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "cmp x9, x28\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
"add x20, x21, %x[in_stride]\n"
- "cmp x25, x24\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z20.b }, p1/Z, [x9]\n"
- "sub x25, x25, x24\n"
- "cmp x25, x24\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z31.b, z21.b, z17.b\n"
- "zip1 z22.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x23]\n"
- "zip2 z14.b, z21.b, z17.b\n"
- "zip2 z13.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x21]\n"
- "ld1b { z16.b }, p1/Z, [x20]\n"
- "zip1 z30.b, z19.b, z17.b\n"
- "zip1 z29.b, z18.b, z16.b\n"
- "ld1b { z21.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z12.b, z19.b, z17.b\n"
- "zip2 z11.b, z18.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z10.b, z21.b, z17.b\n"
- "zip1 z9.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip2 z8.b, z21.b, z17.b\n"
- "zip2 z7.b, z20.b, z16.b\n"
+ "ld1b { z19.b }, p1/Z, [x10]\n"
+ "ld1b { z18.b }, p1/Z, [x26]\n"
+ "sub x9, x9, x28\n"
+ "ld1b { z17.b }, p1/Z, [x25]\n"
+ "ld1b { z16.b }, p1/Z, [x24]\n"
+ "cmp x9, x28\n"
+ "ld1b { z27.b }, p1/Z, [x23]\n"
+ "ld1b { z26.b }, p1/Z, [x22]\n"
+ "ld1b { z25.b }, p1/Z, [x21]\n"
+ "ld1b { z24.b }, p1/Z, [x20]\n"
+ "ld1b { z23.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z22.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "zip1 z1.b, z19.b, z17.b\n"
+ "zip1 z0.b, z18.b, z16.b\n"
+ "ld1b { z21.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip2 z15.b, z19.b, z17.b\n"
+ "zip2 z14.b, z18.b, z16.b\n"
+ "ld1b { z19.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z18.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "zip1 z13.b, z27.b, z25.b\n"
+ "zip1 z12.b, z26.b, z24.b\n"
"ld1b { z17.b }, p1/Z, [x21, #1, MUL VL]\n"
"ld1b { z16.b }, p1/Z, [x20, #1, MUL VL]\n"
- "zip1 z6.b, z19.b, z17.b\n"
- "zip1 z5.b, z18.b, z16.b\n"
- "ld1b { z28.b }, p1/Z, [x10, #2, MUL VL]\n"
- "ld1b { z27.b }, p1/Z, [x9, #2, MUL VL]\n"
- "zip2 z4.b, z19.b, z17.b\n"
- "zip2 z3.b, z18.b, z16.b\n"
- "ld1b { z26.b }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1b { z25.b }, p1/Z, [x27, #2, MUL VL]\n"
- "zip1 z2.b, z28.b, z26.b\n"
- "zip1 z1.b, z27.b, z25.b\n"
- "ld1b { z24.b }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1b { z23.b }, p1/Z, [x23, #2, MUL VL]\n"
- "zip1 z16.b, z31.b, z22.b\n"
- "zip2 z22.b, z31.b, z22.b\n"
- "ld1b { z21.b }, p1/Z, [x21, #2, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x20, #2, MUL VL]\n"
- "zip1 z0.b, z24.b, z21.b\n"
- "zip1 z31.b, z23.b, z20.b\n"
- "zip1 z19.b, z14.b, z13.b\n"
- "zip1 z18.b, z30.b, z29.b\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "zip2 z11.b, z27.b, z25.b\n"
+ "zip2 z10.b, z26.b, z24.b\n"
+ "ld1b { z9.b }, p1/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z8.b }, p1/Z, [x26, #2, MUL VL]\n"
+ "zip1 z7.b, z23.b, z21.b\n"
+ "zip1 z6.b, z22.b, z20.b\n"
+ "ld1b { z31.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1b { z30.b }, p1/Z, [x24, #2, MUL VL]\n"
+ "zip2 z5.b, z23.b, z21.b\n"
+ "zip2 z4.b, z22.b, z20.b\n"
+ "ld1b { z29.b }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z28.b }, p1/Z, [x22, #2, MUL VL]\n"
+ "zip1 z27.b, z19.b, z17.b\n"
+ "zip1 z26.b, z18.b, z16.b\n"
+ "ld1b { z25.b }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z24.b }, p1/Z, [x20, #2, MUL VL]\n"
+ "zip2 z23.b, z19.b, z17.b\n"
+ "zip2 z22.b, z18.b, z16.b\n"
+ "zip1 z3.b, z9.b, z31.b\n"
+ "zip1 z2.b, z8.b, z30.b\n"
"addvl x10, x10, #3\n"
- "zip2 z16.b, z30.b, z29.b\n"
- "zip1 z17.b, z12.b, z11.b\n"
- "st1b { z22.b }, p1, [x22, #1, MUL VL]\n"
- "addvl x9, x9, #3\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "zip2 z30.b, z28.b, z26.b\n"
- "zip2 z29.b, z27.b, z25.b\n"
- "addvl x28, x28, #3\n"
- "st1b { z18.b }, p1, [x22, #3, MUL VL]\n"
- "zip2 z28.b, z24.b, z21.b\n"
- "zip2 z27.b, z23.b, z20.b\n"
- "addvl x27, x27, #3\n"
- "st1b { z16.b }, p1, [x22, #4, MUL VL]\n"
- "zip2 z21.b, z14.b, z13.b\n"
- "zip1 z16.b, z10.b, z9.b\n"
"addvl x26, x26, #3\n"
- "st1b { z17.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z20.b, z10.b, z9.b\n"
- "zip2 z19.b, z12.b, z11.b\n"
- "zip1 z18.b, z6.b, z5.b\n"
- "zip2 z17.b, z6.b, z5.b\n"
- "st1b { z21.b }, p1, [x22]\n"
+ "zip1 z21.b, z1.b, z0.b\n"
+ "zip2 z20.b, z1.b, z0.b\n"
+ "addvl x25, x25, #3\n"
+ "addvl x24, x24, #3\n"
+ "zip1 z1.b, z29.b, z25.b\n"
+ "zip1 z0.b, z28.b, z24.b\n"
"addvl x23, x23, #3\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "zip1 z16.b, z8.b, z7.b\n"
- "zip2 z26.b, z8.b, z7.b\n"
+ "addvl x22, x22, #3\n"
+ "zip1 z19.b, z15.b, z14.b\n"
+ "zip1 z18.b, z13.b, z12.b\n"
"addvl x21, x21, #3\n"
- "st1b { z20.b }, p1, [x22, #2, MUL VL]\n"
- "zip1 z25.b, z2.b, z1.b\n"
- "zip1 z24.b, z4.b, z3.b\n"
"addvl x20, x20, #3\n"
- "st1b { z19.b }, p1, [x22, #3, MUL VL]\n"
- "zip2 z23.b, z4.b, z3.b\n"
- "zip1 z22.b, z0.b, z31.b\n"
- "st1b { z18.b }, p1, [x22, #4, MUL VL]\n"
- "zip2 z21.b, z2.b, z1.b\n"
- "zip1 z20.b, z30.b, z29.b\n"
- "st1b { z17.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z19.b, z30.b, z29.b\n"
- "zip2 z18.b, z0.b, z31.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip1 z17.b, z28.b, z27.b\n"
- "zip2 z16.b, z28.b, z27.b\n"
- "st1b { z26.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z25.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z24.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z23.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z22.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1b { z21.b }, p1, [x22]\n"
- "st1b { z20.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "zip2 z17.b, z13.b, z12.b\n"
+ "zip1 z16.b, z11.b, z10.b\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "zip2 z31.b, z9.b, z31.b\n"
+ "zip2 z30.b, z8.b, z30.b\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "zip2 z29.b, z29.b, z25.b\n"
+ "zip2 z28.b, z28.b, z24.b\n"
+ "st1b { z18.b }, p1, [x27, #3, MUL VL]\n"
+ "zip2 z21.b, z15.b, z14.b\n"
+ "zip1 z20.b, z7.b, z6.b\n"
+ "st1b { z17.b }, p1, [x27, #4, MUL VL]\n"
+ "zip2 z19.b, z7.b, z6.b\n"
+ "zip2 z18.b, z11.b, z10.b\n"
+ "st1b { z16.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z17.b, z27.b, z26.b\n"
+ "zip2 z16.b, z27.b, z26.b\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "zip1 z27.b, z5.b, z4.b\n"
+ "zip2 z26.b, z5.b, z4.b\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "zip1 z25.b, z3.b, z2.b\n"
+ "zip1 z24.b, z23.b, z22.b\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "zip2 z23.b, z23.b, z22.b\n"
+ "zip1 z22.b, z1.b, z0.b\n"
+ "st1b { z18.b }, p1, [x27, #3, MUL VL]\n"
+ "zip2 z21.b, z3.b, z2.b\n"
+ "zip1 z20.b, z31.b, z30.b\n"
+ "st1b { z17.b }, p1, [x27, #4, MUL VL]\n"
+ "zip2 z19.b, z31.b, z30.b\n"
+ "zip2 z18.b, z1.b, z0.b\n"
+ "st1b { z16.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z17.b, z29.b, z28.b\n"
+ "zip2 z16.b, z29.b, z28.b\n"
+ "st1b { z27.b }, p1, [x27]\n"
+ "st1b { z26.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z25.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z24.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z23.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z22.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x9, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x25\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z18.b }, p0/Z, [x9]\n"
- "decw x25, ALL, MUL #3\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z26.b, z19.b, z17.b\n"
- "zip1 z25.b, z18.b, z16.b\n"
- "ld1b { z21.b }, p0/Z, [x26]\n"
- "ld1b { z20.b }, p0/Z, [x23]\n"
- "zip2 z24.b, z19.b, z17.b\n"
- "zip2 z19.b, z18.b, z16.b\n"
- "ld1b { z18.b }, p0/Z, [x21]\n"
- "ld1b { z16.b }, p0/Z, [x20]\n"
- "zip1 z23.b, z21.b, z18.b\n"
- "zip1 z17.b, z20.b, z16.b\n"
- "zip2 z22.b, z21.b, z18.b\n"
- "zip2 z16.b, z20.b, z16.b\n"
- "cmp x25, #0x0\n"
+ "whilelt p0.b, XZR, x9\n"
+ "decw x9, ALL, MUL #3\n"
+ "ld1b { z24.b }, p0/Z, [x10]\n"
"incd x10, ALL, MUL #6\n"
- "incd x9, ALL, MUL #6\n"
- "incd x28, ALL, MUL #6\n"
- "zip1 z21.b, z26.b, z25.b\n"
- "zip2 z20.b, z26.b, z25.b\n"
- "incd x27, ALL, MUL #6\n"
+ "ld1b { z23.b }, p0/Z, [x26]\n"
"incd x26, ALL, MUL #6\n"
- "zip1 z19.b, z24.b, z19.b\n"
- "zip1 z18.b, z23.b, z17.b\n"
+ "ld1b { z19.b }, p0/Z, [x25]\n"
+ "incd x25, ALL, MUL #6\n"
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "incd x24, ALL, MUL #6\n"
+ "ld1b { z22.b }, p0/Z, [x23]\n"
+ "ld1b { z21.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x21]\n"
+ "cmp x9, #0x0\n"
"incd x23, ALL, MUL #6\n"
+ "ld1b { z16.b }, p0/Z, [x20]\n"
+ "zip1 z20.b, z24.b, z19.b\n"
+ "zip2 z24.b, z24.b, z19.b\n"
+ "incd x22, ALL, MUL #6\n"
+ "zip1 z19.b, z23.b, z18.b\n"
+ "zip2 z18.b, z23.b, z18.b\n"
"incd x21, ALL, MUL #6\n"
+ "incd x20, ALL, MUL #6\n"
+ "zip1 z23.b, z22.b, z17.b\n"
+ "zip2 z22.b, z22.b, z17.b\n"
+ "zip1 z17.b, z21.b, z16.b\n"
+ "zip2 z16.b, z21.b, z16.b\n"
+ "zip1 z21.b, z20.b, z19.b\n"
+ "zip2 z20.b, z20.b, z19.b\n"
+ "zip1 z19.b, z24.b, z18.b\n"
+ "zip1 z18.b, z23.b, z17.b\n"
"zip2 z17.b, z23.b, z17.b\n"
"zip1 z16.b, z22.b, z16.b\n"
- "incd x20, ALL, MUL #6\n"
- "st1b { z21.b }, p1, [x22]\n"
- "st1b { z20.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x8\n"
@@ -222,106 +222,106 @@ void sve_transpose_interleave_3VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cntb x20, ALL, MUL #3\n"
- "add x27, x28, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x27, %x[in_stride]\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x10, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"cmp x21, x20\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z20.b }, p1/Z, [x9]\n"
+ "ld1b { z27.b }, p1/Z, [x10]\n"
+ "ld1b { z22.b }, p1/Z, [x26]\n"
"sub x21, x21, x20\n"
+ "ld1b { z21.b }, p1/Z, [x25]\n"
+ "ld1b { z17.b }, p1/Z, [x24]\n"
"cmp x21, x20\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z31.b, z21.b, z17.b\n"
- "zip1 z30.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z29.b, z21.b, z17.b\n"
- "zip2 z28.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z27.b, z19.b, z17.b\n"
- "zip1 z26.b, z18.b, z16.b\n"
- "ld1b { z22.b }, p1/Z, [x10, #2, MUL VL]\n"
- "ld1b { z21.b }, p1/Z, [x9, #2, MUL VL]\n"
- "zip2 z25.b, z19.b, z17.b\n"
- "zip2 z20.b, z18.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x27, #2, MUL VL]\n"
- "zip1 z24.b, z22.b, z19.b\n"
- "zip1 z23.b, z21.b, z18.b\n"
- "zip1 z16.b, z31.b, z30.b\n"
- "zip2 z17.b, z31.b, z30.b\n"
- "st1b { z16.b }, p1, [x22]\n"
+ "ld1b { z26.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z25.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z19.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "ld1b { z30.b }, p1/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z29.b }, p1/Z, [x26, #2, MUL VL]\n"
+ "zip1 z18.b, z27.b, z21.b\n"
+ "zip1 z16.b, z22.b, z17.b\n"
+ "ld1b { z24.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1b { z23.b }, p1/Z, [x24, #2, MUL VL]\n"
+ "zip2 z28.b, z27.b, z21.b\n"
+ "zip2 z17.b, z22.b, z17.b\n"
+ "zip1 z22.b, z26.b, z20.b\n"
+ "zip1 z21.b, z25.b, z19.b\n"
"addvl x10, x10, #3\n"
- "zip1 z16.b, z29.b, z28.b\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "zip2 z22.b, z22.b, z19.b\n"
- "addvl x9, x9, #3\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z21.b, z21.b, z18.b\n"
- "zip2 z18.b, z29.b, z28.b\n"
- "zip1 z16.b, z27.b, z26.b\n"
- "zip2 z17.b, z27.b, z26.b\n"
- "st1b { z18.b }, p1, [x22]\n"
- "addvl x28, x28, #3\n"
- "st1b { z16.b }, p1, [x22, #1, MUL VL]\n"
- "zip1 z16.b, z25.b, z20.b\n"
- "zip2 z20.b, z25.b, z20.b\n"
- "addvl x27, x27, #3\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z19.b, z24.b, z23.b\n"
- "zip2 z18.b, z24.b, z23.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "zip1 z17.b, z22.b, z21.b\n"
- "zip2 z16.b, z22.b, z21.b\n"
- "st1b { z20.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1b { z18.b }, p1, [x22]\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x26, x26, #3\n"
+ "zip2 z27.b, z26.b, z20.b\n"
+ "zip2 z20.b, z25.b, z19.b\n"
+ "addvl x25, x25, #3\n"
+ "addvl x24, x24, #3\n"
+ "zip1 z26.b, z30.b, z24.b\n"
+ "zip1 z25.b, z29.b, z23.b\n"
+ "zip1 z19.b, z18.b, z16.b\n"
+ "zip2 z16.b, z18.b, z16.b\n"
+ "zip1 z18.b, z28.b, z17.b\n"
+ "zip2 z24.b, z30.b, z24.b\n"
+ "zip2 z23.b, z29.b, z23.b\n"
+ "zip2 z17.b, z28.b, z17.b\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "zip1 z16.b, z22.b, z21.b\n"
+ "zip2 z22.b, z22.b, z21.b\n"
+ "st1b { z18.b }, p1, [x27, #2, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z21.b, z27.b, z20.b\n"
+ "zip2 z20.b, z27.b, z20.b\n"
+ "st1b { z17.b }, p1, [x27]\n"
+ "zip1 z19.b, z26.b, z25.b\n"
+ "zip2 z18.b, z26.b, z25.b\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "zip1 z17.b, z24.b, z23.b\n"
+ "zip2 z16.b, z24.b, z23.b\n"
+ "st1b { z22.b }, p1, [x27, #2, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z18.b }, p1, [x27]\n"
+ "st1b { z17.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #2, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"whilelt p0.b, XZR, x21\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z21.b }, p0/Z, [x9]\n"
"decw x21, ALL, MUL #3\n"
- "ld1b { z18.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z20.b, z19.b, z18.b\n"
- "zip1 z17.b, z21.b, z16.b\n"
- "zip2 z19.b, z19.b, z18.b\n"
- "zip2 z16.b, z21.b, z16.b\n"
- "cmp x21, #0x0\n"
+ "ld1b { z19.b }, p0/Z, [x10]\n"
"incd x10, ALL, MUL #6\n"
- "incd x9, ALL, MUL #6\n"
- "incd x28, ALL, MUL #6\n"
+ "ld1b { z18.b }, p0/Z, [x26]\n"
+ "incd x26, ALL, MUL #6\n"
+ "ld1b { z17.b }, p0/Z, [x25]\n"
+ "incd x25, ALL, MUL #6\n"
+ "ld1b { z16.b }, p0/Z, [x24]\n"
+ "incd x24, ALL, MUL #6\n"
+ "cmp x21, #0x0\n"
+ "zip1 z20.b, z19.b, z17.b\n"
+ "zip2 z19.b, z19.b, z17.b\n"
+ "zip1 z17.b, z18.b, z16.b\n"
+ "zip2 z16.b, z18.b, z16.b\n"
"zip1 z18.b, z20.b, z17.b\n"
"zip2 z17.b, z20.b, z17.b\n"
- "incd x27, ALL, MUL #6\n"
"zip1 z16.b, z19.b, z16.b\n"
- "st1b { z18.b }, p1, [x22]\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1b { z18.b }, p1, [x27]\n"
+ "st1b { z17.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #2, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -330,7 +330,7 @@ void sve_transpose_interleave_3VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp
index 36a15a16b3..70eb77ebe5 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_3VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,104 +44,104 @@ void sve_transpose_interleave_3VL_2x2(uint16_t *out, const uint16_t *in, size_t
"blt 6f\n"
"1:" // Main row loop: Head
"mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #3\n"
- "add x25, x28, %x[in_stride]\n"
+ "mov x11, %x[width]\n"
+ "cnth x10, ALL, MUL #3\n"
+ "mov x9, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "cmp x11, x10\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z17.h }, p2/Z, [x12]\n"
- "ld1h { z23.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z16.h }, p2/Z, [x11]\n"
- "ld1h { z20.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z9.h, z17.h, z16.h\n"
- "zip2 z8.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x10]\n"
- "ld1h { z22.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z7.h, z23.h, z20.h\n"
- "mov x20, x22\n"
- "ld1h { z16.h }, p2/Z, [x9]\n"
- "ld1h { z21.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z6.h, z17.h, z16.h\n"
- "zip2 z5.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p2/Z, [x28]\n"
- "ld1h { z17.h }, p2/Z, [x25]\n"
- "zip1 z4.h, z22.h, z21.h\n"
- "zip1 z3.h, z18.h, z17.h\n"
- "ld1h { z19.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x11, #2, MUL VL]\n"
- "zip2 z2.h, z18.h, z17.h\n"
- "zip2 z1.h, z23.h, z20.h\n"
- "ld1h { z18.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x9, #2, MUL VL]\n"
- "zip1 z0.h, z19.h, z16.h\n"
- "zip2 z31.h, z19.h, z16.h\n"
- "ld1h { z20.h }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1h { z30.h }, p2/Z, [x28, #2, MUL VL]\n"
- "zip2 z29.h, z22.h, z21.h\n"
- "zip1 z28.h, z18.h, z17.h\n"
- "ld1h { z16.h }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x25, #2, MUL VL]\n"
- "zip1 z27.h, z20.h, z16.h\n"
- "zip2 z26.h, z18.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x24]\n"
- "ld1h { z18.h }, p2/Z, [x24, #1, MUL VL]\n"
- "zip2 z25.h, z20.h, z16.h\n"
- "zip1 z24.h, z30.h, z19.h\n"
- "ld1h { z23.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x23]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z21.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, #1, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x23, #2, MUL VL]\n"
- "st1h { z9.h }, p2, [x21]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "st1h { z8.h }, p2, [x21, #1, MUL VL]\n"
- "sub x27, x27, x26\n"
- "cmp x27, x26\n"
- "zip2 z19.h, z30.h, z19.h\n"
- "st1h { z7.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x12]\n"
+ "ld1h { z29.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "mov x21, x9\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z17.h }, p2/Z, [x28]\n"
+ "ld1h { z16.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "mov x20, x9\n"
+ "sub x11, x11, x10\n"
+ "ld1h { z21.h }, p2/Z, [x27]\n"
+ "ld1h { z28.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "cmp x11, x10\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z20.h }, p2/Z, [x26]\n"
+ "ld1h { z27.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z26.h }, p2/Z, [x25]\n"
+ "ld1h { z19.h }, p2/Z, [x24]\n"
+ "zip1 z25.h, z18.h, z17.h\n"
+ "zip2 z24.h, z18.h, z17.h\n"
+ "ld1h { z23.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "zip1 z22.h, z29.h, z16.h\n"
+ "zip2 z8.h, z29.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "zip1 z7.h, z21.h, z20.h\n"
+ "zip2 z6.h, z21.h, z20.h\n"
+ "ld1h { z21.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z5.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "zip1 z4.h, z28.h, z27.h\n"
+ "zip1 z3.h, z26.h, z19.h\n"
+ "ld1h { z20.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "zip2 z1.h, z26.h, z19.h\n"
+ "zip1 z0.h, z23.h, z18.h\n"
+ "ld1h { z19.h }, p2/Z, [x23]\n"
+ "ld1h { z31.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip2 z30.h, z23.h, z18.h\n"
+ "zip2 z29.h, z28.h, z27.h\n"
+ "ld1h { z28.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x22]\n"
+ "zip1 z27.h, z17.h, z16.h\n"
+ "zip2 z26.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "st1h { z25.h }, p2, [x21]\n"
+ "zip1 z25.h, z21.h, z20.h\n"
+ "st1h { z24.h }, p2, [x21, #1, MUL VL]\n"
+ "zip2 z24.h, z21.h, z20.h\n"
+ "zip1 z23.h, z5.h, z2.h\n"
"addvl x12, x12, #3\n"
- "addvl x11, x11, #3\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z6.h }, p2, [x21, #3, MUL VL]\n"
- "addvl x10, x10, #3\n"
- "addvl x9, x9, #3\n"
- "zip1 z17.h, z23.h, z16.h\n"
- "st1h { z5.h }, p2, [x21, #4, MUL VL]\n"
+ "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
+ "zip1 z22.h, z19.h, z18.h\n"
+ "zip2 z21.h, z19.h, z18.h\n"
"addvl x28, x28, #3\n"
+ "st1h { z7.h }, p2, [x21, #3, MUL VL]\n"
+ "zip1 z20.h, z31.h, z17.h\n"
+ "addvl x27, x27, #3\n"
+ "addvl x26, x26, #3\n"
+ "st1h { z6.h }, p2, [x21, #4, MUL VL]\n"
"addvl x25, x25, #3\n"
- "zip2 z16.h, z23.h, z16.h\n"
- "st1h { z4.h }, p2, [x21, #5, MUL VL]\n"
"addvl x24, x24, #3\n"
+ "zip2 z19.h, z5.h, z2.h\n"
+ "st1h { z4.h }, p2, [x21, #5, MUL VL]\n"
"addvl x23, x23, #3\n"
+ "addvl x22, x22, #3\n"
+ "zip2 z18.h, z31.h, z17.h\n"
"st1h { z3.h }, p2, [x21, #6, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z2.h }, p2, [x21, #7, MUL VL]\n"
+ "zip1 z17.h, z28.h, z16.h\n"
+ "zip2 z16.h, z28.h, z16.h\n"
+ "st1h { z1.h }, p2, [x21, #7, MUL VL]\n"
"addvl x21, x21, #12\n"
- "st1h { z27.h }, p2, [x21, #-4, MUL VL]\n"
+ "st1h { z25.h }, p2, [x21, #-4, MUL VL]\n"
"st1h { z22.h }, p2, [x21, #-3, MUL VL]\n"
"st1h { z21.h }, p2, [x21, #-2, MUL VL]\n"
"st1h { z20.h }, p2, [x21, #-1, MUL VL]\n"
- "st1h { z1.h }, p2, [x20]\n"
+ "st1h { z8.h }, p2, [x20]\n"
"st1h { z0.h }, p2, [x20, #1, MUL VL]\n"
- "st1h { z31.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z30.h }, p2, [x20, #2, MUL VL]\n"
"st1h { z29.h }, p2, [x20, #3, MUL VL]\n"
- "st1h { z28.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20, #4, MUL VL]\n"
"st1h { z26.h }, p2, [x20, #5, MUL VL]\n"
- "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
- "st1h { z24.h }, p2, [x20, #7, MUL VL]\n"
+ "st1h { z24.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z23.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
"st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
"st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
@@ -149,67 +149,67 @@ void sve_transpose_interleave_3VL_2x2(uint16_t *out, const uint16_t *in, size_t
"st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x11, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z0.h }, p1/Z, [x12]\n"
- "ld1h { z16.h }, p1/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z21.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z31.h }, p1/Z, [x10]\n"
- "ld1h { z30.h }, p0/Z, [x10, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decw x27, ALL, MUL #3\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z29.h }, p0/Z, [x9, #1, MUL VL]\n"
+ "mov x21, x11\n"
+ "mov x20, x9\n"
+ "decw x11, ALL, MUL #3\n"
+ "add x9, x9, %x[out_stride]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p0.h, XZR, x21\n"
+ "cmp x11, #0x0\n"
+ "ld1h { z19.h }, p1/Z, [x12]\n"
+ "ld1h { z18.h }, p1/Z, [x28]\n"
+ "ld1h { z0.h }, p1/Z, [x27]\n"
+ "ld1h { z31.h }, p1/Z, [x26]\n"
+ "ld1h { z30.h }, p1/Z, [x25]\n"
+ "ld1h { z29.h }, p1/Z, [x24]\n"
+ "ld1h { z17.h }, p0/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x28, #1, MUL VL]\n"
"addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
- "ld1h { z28.h }, p1/Z, [x28]\n"
- "ld1h { z20.h }, p1/Z, [x25]\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "ld1h { z27.h }, p0/Z, [x28, #1, MUL VL]\n"
"addvl x28, x28, #1\n"
- "ld1h { z26.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z22.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z21.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "addvl x27, x27, #1\n"
+ "addvl x26, x26, #1\n"
+ "ld1h { z28.h }, p0/Z, [x25, #1, MUL VL]\n"
"addvl x25, x25, #1\n"
- "ld1h { z25.h }, p1/Z, [x24]\n"
- "ld1h { z24.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z27.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x24, x24, #1\n"
- "zip1 z17.h, z0.h, z16.h\n"
- "ld1h { z23.h }, p1/Z, [x23]\n"
- "ld1h { z22.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z26.h }, p1/Z, [x23]\n"
+ "ld1h { z25.h }, p0/Z, [x23, #1, MUL VL]\n"
"addvl x23, x23, #1\n"
- "zip2 z16.h, z0.h, z16.h\n"
- "zip1 z21.h, z21.h, z19.h\n"
- "zip1 z19.h, z31.h, z18.h\n"
- "st1h { z17.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "zip2 z18.h, z31.h, z18.h\n"
- "zip1 z17.h, z30.h, z29.h\n"
- "st1h { z16.h }, p2, [x20, #1, MUL VL]\n"
+ "zip1 z20.h, z19.h, z18.h\n"
+ "ld1h { z24.h }, p1/Z, [x22]\n"
+ "ld1h { z23.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "addvl x22, x22, #1\n"
+ "zip2 z19.h, z19.h, z18.h\n"
+ "zip1 z18.h, z17.h, z16.h\n"
+ "zip1 z17.h, z0.h, z31.h\n"
"incd x12, ALL, MUL #4\n"
- "zip1 z16.h, z28.h, z20.h\n"
- "zip2 z20.h, z28.h, z20.h\n"
- "st1h { z21.h }, p2, [x20, #2, MUL VL]\n"
- "incd x11, ALL, MUL #4\n"
- "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
- "incd x10, ALL, MUL #4\n"
- "incd x9, ALL, MUL #4\n"
- "zip1 z19.h, z27.h, z26.h\n"
- "st1h { z18.h }, p2, [x20, #4, MUL VL]\n"
"incd x28, ALL, MUL #4\n"
+ "zip2 z16.h, z0.h, z31.h\n"
+ "zip1 z22.h, z22.h, z21.h\n"
+ "st1h { z20.h }, p2, [x20]\n"
+ "incd x27, ALL, MUL #4\n"
+ "zip1 z21.h, z30.h, z29.h\n"
+ "zip2 z20.h, z30.h, z29.h\n"
+ "st1h { z19.h }, p2, [x20, #1, MUL VL]\n"
+ "incd x26, ALL, MUL #4\n"
+ "st1h { z18.h }, p2, [x20, #2, MUL VL]\n"
"incd x25, ALL, MUL #4\n"
- "zip1 z18.h, z25.h, z23.h\n"
- "st1h { z17.h }, p2, [x20, #5, MUL VL]\n"
"incd x24, ALL, MUL #4\n"
+ "zip1 z19.h, z28.h, z27.h\n"
+ "st1h { z17.h }, p2, [x20, #3, MUL VL]\n"
"incd x23, ALL, MUL #4\n"
- "zip2 z17.h, z25.h, z23.h\n"
- "st1h { z16.h }, p2, [x20, #6, MUL VL]\n"
- "zip1 z16.h, z24.h, z22.h\n"
- "add x22, x22, %x[out_stride]\n"
+ "incd x22, ALL, MUL #4\n"
+ "zip1 z18.h, z26.h, z24.h\n"
+ "st1h { z16.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z17.h, z26.h, z24.h\n"
+ "zip1 z16.h, z25.h, z23.h\n"
+ "st1h { z22.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #6, MUL VL]\n"
"st1h { z20.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
"st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
@@ -227,64 +227,64 @@ void sve_transpose_interleave_3VL_2x2(uint16_t *out, const uint16_t *in, size_t
"mov x12, %x[in]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #3\n"
- "add x11, x12, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x11, %x[in_stride]\n"
- "csel x11, x11, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x9, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add %x[in], x28, %x[in_stride]\n"
+ "csel x28, x28, %x[pad_row], GT\n"
+ "cmp x21, x20\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z17.h }, p2/Z, [x12]\n"
- "ld1h { z22.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z19.h }, p2/Z, [x12]\n"
+ "ld1h { z23.h }, p2/Z, [x12, #1, MUL VL]\n"
"sub x21, x21, x20\n"
+ "ld1h { z18.h }, p2/Z, [x28]\n"
+ "ld1h { z17.h }, p2/Z, [x28, #1, MUL VL]\n"
"cmp x21, x20\n"
- "ld1h { z16.h }, p2/Z, [x11]\n"
- "ld1h { z21.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z18.h, z17.h, z16.h\n"
- "zip2 z17.h, z17.h, z16.h\n"
- "ld1h { z20.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x11, #2, MUL VL]\n"
- "zip1 z16.h, z22.h, z21.h\n"
- "st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x28, #2, MUL VL]\n"
"addvl x12, x12, #3\n"
- "addvl x11, x11, #3\n"
- "zip2 z18.h, z22.h, z21.h\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z20.h, z19.h\n"
- "zip2 z16.h, z20.h, z19.h\n"
- "st1h { z18.h }, p2, [x22]\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x28, x28, #3\n"
+ "zip1 z21.h, z19.h, z18.h\n"
+ "zip2 z20.h, z19.h, z18.h\n"
+ "zip1 z19.h, z23.h, z17.h\n"
+ "zip2 z18.h, z23.h, z17.h\n"
+ "zip1 z17.h, z22.h, z16.h\n"
+ "zip2 z16.h, z22.h, z16.h\n"
+ "st1h { z21.h }, p2, [x9]\n"
+ "st1h { z20.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z19.h }, p2, [x9, #2, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
+ "st1h { z18.h }, p2, [x9]\n"
+ "st1h { z17.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #2, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x12]\n"
- "ld1h { z17.h }, p0/Z, [x11]\n"
+ "decw x21, ALL, MUL #3\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
+ "cmp x21, #0x0\n"
+ "ld1h { z20.h }, p1/Z, [x12]\n"
+ "ld1h { z17.h }, p1/Z, [x28]\n"
"ld1h { z19.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x11, #1, MUL VL]\n"
- "decw x21, ALL, MUL #3\n"
"addvl x12, x12, #1\n"
+ "ld1h { z16.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #1\n"
"zip1 z18.h, z20.h, z17.h\n"
"zip2 z17.h, z20.h, z17.h\n"
- "addvl x11, x11, #1\n"
- "cmp x21, #0x0\n"
- "zip1 z16.h, z19.h, z16.h\n"
- "st1h { z18.h }, p2, [x22]\n"
"incd x12, ALL, MUL #4\n"
- "incd x11, ALL, MUL #4\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "incd x28, ALL, MUL #4\n"
+ "zip1 z16.h, z19.h, z16.h\n"
+ "st1h { z18.h }, p2, [x9]\n"
+ "st1h { z17.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #2, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -293,7 +293,7 @@ void sve_transpose_interleave_3VL_2x2(uint16_t *out, const uint16_t *in, size_t
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp
index e661e2698a..539b4946b4 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,67 +34,67 @@ void sve_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "ptrue p4.b\n"
+ "ptrue p2.b\n"
"blt 4f\n"
"1:" // Main row loop: Head
- "mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "add x23, x24, %x[in_stride]\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
- "mov x21, %x[width]\n"
+ "mov x25, %x[width]\n"
+ "add x24, x27, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"2:" // Main row loop: Column loop
- "mov x20, x21\n"
- "whilelt p3.h, XZR, x20\n"
- "ld1h { z31.h }, p3/Z, [x26]\n"
- "ld1h { z30.h }, p3/Z, [x25]\n"
- "dech x20\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z29.h }, p2/Z, [x26, #1, MUL VL]\n"
- "ld1h { z28.h }, p2/Z, [x25, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z27.h }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1h { z26.h }, p1/Z, [x25, #2, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z25.h }, p0/Z, [x26, #3, MUL VL]\n"
- "ld1h { z24.h }, p0/Z, [x25, #3, MUL VL]\n"
- "mov x20, x22\n"
- "dech x21, ALL, MUL #4\n"
- "ld1h { z23.h }, p3/Z, [x24]\n"
- "ld1h { z22.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z21.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z20.h }, p0/Z, [x24, #3, MUL VL]\n"
- "cmp x21, #0x0\n"
- "addvl x26, x26, #4\n"
- "ld1h { z19.h }, p3/Z, [x23]\n"
- "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
- "addvl x25, x25, #4\n"
+ "mov x21, x25\n"
+ "mov x20, x26\n"
+ "dech x25, ALL, MUL #4\n"
+ "add x26, x26, %x[out_stride]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z31.h }, p1/Z, [x27]\n"
+ "ld1h { z30.h }, p1/Z, [x24]\n"
+ "ld1h { z29.h }, p1/Z, [x23]\n"
+ "ld1h { z28.h }, p1/Z, [x22]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z27.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z26.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z25.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z24.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "whilelt p0.h, XZR, x21\n"
+ "cmp x25, #0x0\n"
+ "ld1h { z23.h }, p1/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z22.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z21.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z20.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x27, #3, MUL VL]\n"
+ "ld1h { z18.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
"addvl x24, x24, #4\n"
- "ld1h { z17.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x23, #3, MUL VL]\n"
- "st1h { z31.h }, p4, [x20]\n"
+ "ld1h { z17.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "st1h { z31.h }, p2, [x20]\n"
"addvl x23, x23, #4\n"
- "st1h { z29.h }, p4, [x20, #1, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z27.h }, p4, [x20, #2, MUL VL]\n"
- "st1h { z25.h }, p4, [x20, #3, MUL VL]\n"
- "st1h { z30.h }, p4, [x20, #4, MUL VL]\n"
- "st1h { z28.h }, p4, [x20, #5, MUL VL]\n"
- "st1h { z26.h }, p4, [x20, #6, MUL VL]\n"
- "st1h { z24.h }, p4, [x20, #7, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20, #1, MUL VL]\n"
+ "addvl x22, x22, #4\n"
+ "st1h { z23.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z30.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z26.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z22.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z18.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p4, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p4, [x20, #-1, MUL VL]\n"
+ "st1h { z29.h }, p2, [x20, #-8, MUL VL]\n"
+ "st1h { z25.h }, p2, [x20, #-7, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #-6, MUL VL]\n"
+ "st1h { z17.h }, p2, [x20, #-5, MUL VL]\n"
+ "st1h { z28.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z24.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z20.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bgt 2b\n"
"3:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -103,32 +103,32 @@ void sve_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
"cbz %x[height], 8f\n"
"4:" // Main loop skip
"5:" // Tail row loop: Head
- "mov x26, %x[in]\n"
- "add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x27, %x[in]\n"
+ "mov x26, %x[out]\n"
"sub %x[height], %x[height], #0x1\n"
"mov x21, %x[width]\n"
+ "add %x[in], x27, %x[in_stride]\n"
"6:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z19.h }, p0/Z, [x26]\n"
+ "dech x21, ALL, MUL #4\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x26, #1, MUL VL]\n"
"dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z17.h }, p0/Z, [x26, #2, MUL VL]\n"
+ "ld1h { z19.h }, p1/Z, [x27]\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
- "dech x21, ALL, MUL #4\n"
+ "ld1h { z18.h }, p0/Z, [x27, #1, MUL VL]\n"
"whilelt p0.h, XZR, x20\n"
"cmp x21, #0x0\n"
- "ld1h { z16.h }, p0/Z, [x26, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22]\n"
- "addvl x26, x26, #4\n"
- "st1h { z18.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z17.h }, p1/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x27, #3, MUL VL]\n"
+ "addvl x27, x27, #4\n"
+ "st1h { z19.h }, p2, [x26]\n"
+ "st1h { z18.h }, p2, [x26, #1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x26, #2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x26, #3, MUL VL]\n"
+ "add x26, x26, %x[out_stride]\n"
"bgt 6b\n"
"7:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -137,7 +137,7 @@ void sve_transpose_interleave_4VL(uint16_t *out, const uint16_t *in, size_t widt
"8:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp
index 03a78f72f1..5f0b4ea8d6 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,145 +44,145 @@ void sve_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"blt 6f\n"
"1:" // Main row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
- "mov x25, %x[width]\n"
- "cntb x24, ALL, MUL #2\n"
- "add x23, x26, %x[in_stride]\n"
- "add x21, x23, %x[in_stride]\n"
+ "mov x9, %x[width]\n"
+ "cntb x28, ALL, MUL #2\n"
+ "mov x27, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x26, x10, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add x23, x24, %x[in_stride]\n"
+ "cmp x9, x28\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
"add x20, x21, %x[in_stride]\n"
- "cmp x25, x24\n"
"add %x[in], x20, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z20.b }, p1/Z, [x9]\n"
- "sub x25, x25, x24\n"
- "cmp x25, x24\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z4.b, z21.b, z17.b\n"
- "zip1 z3.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x23]\n"
- "zip2 z2.b, z21.b, z17.b\n"
- "zip2 z1.b, z20.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x21]\n"
- "ld1b { z16.b }, p1/Z, [x20]\n"
- "zip1 z0.b, z19.b, z17.b\n"
- "zip1 z31.b, z18.b, z16.b\n"
- "ld1b { z24.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z30.b, z19.b, z17.b\n"
- "zip2 z23.b, z18.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z22.b, z24.b, z17.b\n"
- "zip1 z21.b, z20.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip2 z29.b, z24.b, z17.b\n"
- "zip2 z28.b, z20.b, z16.b\n"
+ "ld1b { z19.b }, p1/Z, [x10]\n"
+ "ld1b { z18.b }, p1/Z, [x26]\n"
+ "sub x9, x9, x28\n"
+ "ld1b { z17.b }, p1/Z, [x25]\n"
+ "ld1b { z16.b }, p1/Z, [x24]\n"
+ "cmp x9, x28\n"
+ "ld1b { z24.b }, p1/Z, [x23]\n"
+ "ld1b { z23.b }, p1/Z, [x22]\n"
+ "ld1b { z22.b }, p1/Z, [x21]\n"
+ "ld1b { z21.b }, p1/Z, [x20]\n"
+ "ld1b { z29.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z28.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "zip1 z4.b, z19.b, z17.b\n"
+ "zip1 z3.b, z18.b, z16.b\n"
+ "ld1b { z27.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip2 z2.b, z19.b, z17.b\n"
+ "zip2 z1.b, z18.b, z16.b\n"
+ "ld1b { z19.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z18.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "zip1 z26.b, z24.b, z22.b\n"
+ "zip1 z25.b, z23.b, z21.b\n"
"ld1b { z17.b }, p1/Z, [x21, #1, MUL VL]\n"
"ld1b { z16.b }, p1/Z, [x20, #1, MUL VL]\n"
- "zip1 z27.b, z19.b, z17.b\n"
- "zip1 z26.b, z18.b, z16.b\n"
- "zip2 z25.b, z19.b, z17.b\n"
- "zip2 z24.b, z18.b, z16.b\n"
+ "zip2 z24.b, z24.b, z22.b\n"
+ "zip2 z23.b, z23.b, z21.b\n"
+ "zip1 z22.b, z29.b, z27.b\n"
+ "zip1 z21.b, z28.b, z20.b\n"
"addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "zip1 z16.b, z4.b, z3.b\n"
- "zip2 z17.b, z4.b, z3.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x28, x28, #2\n"
- "zip1 z16.b, z2.b, z1.b\n"
- "zip2 z20.b, z2.b, z1.b\n"
- "st1b { z17.b }, p1, [x22, #1, MUL VL]\n"
- "addvl x27, x27, #2\n"
- "zip1 z19.b, z0.b, z31.b\n"
- "zip2 z18.b, z0.b, z31.b\n"
- "st1b { z16.b }, p1, [x22, #2, MUL VL]\n"
"addvl x26, x26, #2\n"
- "zip1 z17.b, z30.b, z23.b\n"
- "zip2 z16.b, z30.b, z23.b\n"
- "st1b { z20.b }, p1, [x22, #3, MUL VL]\n"
+ "zip2 z0.b, z29.b, z27.b\n"
+ "zip2 z31.b, z28.b, z20.b\n"
+ "addvl x25, x25, #2\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z30.b, z19.b, z17.b\n"
+ "zip1 z29.b, z18.b, z16.b\n"
"addvl x23, x23, #2\n"
- "st1b { z19.b }, p1, [x22, #4, MUL VL]\n"
+ "addvl x22, x22, #2\n"
+ "zip2 z28.b, z19.b, z17.b\n"
+ "zip2 z27.b, z18.b, z16.b\n"
"addvl x21, x21, #2\n"
"addvl x20, x20, #2\n"
+ "zip1 z20.b, z4.b, z3.b\n"
+ "zip2 z19.b, z4.b, z3.b\n"
+ "zip1 z18.b, z2.b, z1.b\n"
+ "zip2 z17.b, z2.b, z1.b\n"
+ "zip1 z16.b, z26.b, z25.b\n"
+ "zip2 z26.b, z26.b, z25.b\n"
+ "zip1 z25.b, z24.b, z23.b\n"
+ "zip2 z24.b, z24.b, z23.b\n"
+ "st1b { z20.b }, p1, [x27]\n"
+ "st1b { z19.b }, p1, [x27, #1, MUL VL]\n"
"zip1 z23.b, z22.b, z21.b\n"
- "st1b { z18.b }, p1, [x22, #5, MUL VL]\n"
"zip2 z22.b, z22.b, z21.b\n"
- "zip1 z21.b, z29.b, z28.b\n"
- "st1b { z17.b }, p1, [x22, #6, MUL VL]\n"
- "zip2 z20.b, z29.b, z28.b\n"
- "zip1 z19.b, z27.b, z26.b\n"
- "st1b { z16.b }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z18.b, z27.b, z26.b\n"
- "zip1 z17.b, z25.b, z24.b\n"
- "zip2 z16.b, z25.b, z24.b\n"
- "st1b { z23.b }, p1, [x22]\n"
- "st1b { z22.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z21.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z20.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1b { z18.b }, p1, [x27, #2, MUL VL]\n"
+ "zip1 z21.b, z0.b, z31.b\n"
+ "zip2 z20.b, z0.b, z31.b\n"
+ "st1b { z17.b }, p1, [x27, #3, MUL VL]\n"
+ "zip1 z19.b, z30.b, z29.b\n"
+ "zip2 z18.b, z30.b, z29.b\n"
+ "st1b { z16.b }, p1, [x27, #4, MUL VL]\n"
+ "zip1 z17.b, z28.b, z27.b\n"
+ "zip2 z16.b, z28.b, z27.b\n"
+ "st1b { z26.b }, p1, [x27, #5, MUL VL]\n"
+ "st1b { z25.b }, p1, [x27, #6, MUL VL]\n"
+ "st1b { z24.b }, p1, [x27, #7, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z23.b }, p1, [x27]\n"
+ "st1b { z22.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z21.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z20.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #5, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #6, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #7, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x25, 5f\n"
+ "cbz x9, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x25\n"
- "ld1b { z19.b }, p0/Z, [x10]\n"
- "ld1b { z18.b }, p0/Z, [x9]\n"
- "decw x25, ALL, MUL #4\n"
- "ld1b { z17.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z27.b, z19.b, z17.b\n"
- "zip1 z26.b, z18.b, z16.b\n"
- "ld1b { z22.b }, p0/Z, [x26]\n"
- "ld1b { z21.b }, p0/Z, [x23]\n"
- "zip2 z25.b, z19.b, z17.b\n"
- "zip2 z20.b, z18.b, z16.b\n"
- "ld1b { z19.b }, p0/Z, [x21]\n"
- "ld1b { z16.b }, p0/Z, [x20]\n"
- "zip1 z18.b, z22.b, z19.b\n"
- "zip1 z17.b, z21.b, z16.b\n"
- "zip2 z24.b, z22.b, z19.b\n"
- "zip2 z16.b, z21.b, z16.b\n"
- "cmp x25, #0x0\n"
+ "whilelt p0.b, XZR, x9\n"
+ "decw x9, ALL, MUL #4\n"
+ "ld1b { z20.b }, p0/Z, [x10]\n"
"addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "addvl x28, x28, #1\n"
- "zip1 z23.b, z27.b, z26.b\n"
- "zip2 z22.b, z27.b, z26.b\n"
- "addvl x27, x27, #1\n"
+ "ld1b { z24.b }, p0/Z, [x26]\n"
"addvl x26, x26, #1\n"
- "zip1 z21.b, z25.b, z20.b\n"
- "zip2 z20.b, z25.b, z20.b\n"
+ "ld1b { z19.b }, p0/Z, [x25]\n"
+ "addvl x25, x25, #1\n"
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "addvl x24, x24, #1\n"
+ "ld1b { z23.b }, p0/Z, [x23]\n"
+ "ld1b { z22.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x21]\n"
+ "cmp x9, #0x0\n"
"addvl x23, x23, #1\n"
+ "ld1b { z16.b }, p0/Z, [x20]\n"
+ "zip1 z21.b, z20.b, z19.b\n"
+ "zip2 z25.b, z20.b, z19.b\n"
+ "addvl x22, x22, #1\n"
+ "zip1 z20.b, z24.b, z18.b\n"
+ "zip2 z19.b, z24.b, z18.b\n"
"addvl x21, x21, #1\n"
+ "addvl x20, x20, #1\n"
+ "zip1 z18.b, z23.b, z17.b\n"
+ "zip2 z24.b, z23.b, z17.b\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "zip2 z16.b, z22.b, z16.b\n"
+ "zip1 z23.b, z21.b, z20.b\n"
+ "zip2 z22.b, z21.b, z20.b\n"
+ "zip1 z21.b, z25.b, z19.b\n"
+ "zip2 z20.b, z25.b, z19.b\n"
"zip1 z19.b, z18.b, z17.b\n"
"zip2 z18.b, z18.b, z17.b\n"
- "addvl x20, x20, #1\n"
"zip1 z17.b, z24.b, z16.b\n"
"zip2 z16.b, z24.b, z16.b\n"
- "st1b { z23.b }, p1, [x22]\n"
- "st1b { z22.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z21.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z20.b }, p1, [x22, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x22, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x22, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1b { z23.b }, p1, [x27]\n"
+ "st1b { z22.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z21.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z20.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #5, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #6, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #7, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x8\n"
@@ -192,90 +192,90 @@ void sve_transpose_interleave_4VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cntb x20, ALL, MUL #2\n"
- "add x27, x28, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x27, %x[in_stride]\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "mov x27, %x[out]\n"
+ "add x26, x10, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "add x24, x25, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
+ "csel x26, x26, %x[pad_row], GT\n"
"cmp x21, x20\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z19.b }, p1/Z, [x9]\n"
+ "ld1b { z20.b }, p1/Z, [x10]\n"
+ "ld1b { z24.b }, p1/Z, [x26]\n"
"sub x21, x21, x20\n"
+ "ld1b { z19.b }, p1/Z, [x25]\n"
+ "ld1b { z18.b }, p1/Z, [x24]\n"
"cmp x21, x20\n"
- "ld1b { z17.b }, p1/Z, [x28]\n"
- "ld1b { z16.b }, p1/Z, [x27]\n"
- "zip1 z26.b, z21.b, z17.b\n"
- "zip1 z25.b, z19.b, z16.b\n"
- "ld1b { z20.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z24.b, z21.b, z17.b\n"
- "zip2 z19.b, z19.b, z16.b\n"
- "ld1b { z17.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip1 z23.b, z20.b, z17.b\n"
- "zip1 z22.b, z18.b, z16.b\n"
- "zip2 z21.b, z20.b, z17.b\n"
- "zip2 z20.b, z18.b, z16.b\n"
+ "ld1b { z23.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z25.b }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "zip1 z16.b, z26.b, z25.b\n"
- "zip2 z18.b, z26.b, z25.b\n"
- "st1b { z16.b }, p1, [x22]\n"
- "addvl x28, x28, #2\n"
- "zip1 z17.b, z24.b, z19.b\n"
- "zip2 z16.b, z24.b, z19.b\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "addvl x27, x27, #2\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "zip1 z19.b, z23.b, z22.b\n"
- "zip2 z18.b, z23.b, z22.b\n"
- "st1b { z16.b }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p1, [x22]\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x26, x26, #2\n"
+ "ld1b { z22.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z17.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "addvl x25, x25, #2\n"
+ "addvl x24, x24, #2\n"
+ "zip1 z21.b, z20.b, z19.b\n"
+ "zip1 z16.b, z24.b, z18.b\n"
+ "zip2 z20.b, z20.b, z19.b\n"
+ "zip2 z19.b, z24.b, z18.b\n"
+ "zip1 z24.b, z23.b, z22.b\n"
+ "zip1 z18.b, z25.b, z17.b\n"
+ "zip2 z23.b, z23.b, z22.b\n"
+ "zip2 z22.b, z25.b, z17.b\n"
+ "zip1 z17.b, z21.b, z16.b\n"
+ "zip2 z16.b, z21.b, z16.b\n"
+ "zip1 z21.b, z20.b, z19.b\n"
+ "zip2 z20.b, z20.b, z19.b\n"
+ "zip1 z19.b, z24.b, z18.b\n"
+ "zip2 z18.b, z24.b, z18.b\n"
+ "st1b { z17.b }, p1, [x27]\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "zip1 z17.b, z23.b, z22.b\n"
+ "zip2 z16.b, z23.b, z22.b\n"
+ "st1b { z21.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z20.b }, p1, [x27, #3, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "st1b { z18.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #3, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"whilelt p0.b, XZR, x21\n"
- "ld1b { z20.b }, p0/Z, [x10]\n"
- "ld1b { z21.b }, p0/Z, [x9]\n"
"decw x21, ALL, MUL #4\n"
- "ld1b { z19.b }, p0/Z, [x28]\n"
- "ld1b { z16.b }, p0/Z, [x27]\n"
- "zip1 z18.b, z20.b, z19.b\n"
- "zip1 z17.b, z21.b, z16.b\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "zip2 z16.b, z21.b, z16.b\n"
- "cmp x21, #0x0\n"
+ "ld1b { z20.b }, p0/Z, [x10]\n"
"addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "addvl x28, x28, #1\n"
+ "ld1b { z19.b }, p0/Z, [x26]\n"
+ "addvl x26, x26, #1\n"
+ "ld1b { z17.b }, p0/Z, [x25]\n"
+ "addvl x25, x25, #1\n"
+ "ld1b { z16.b }, p0/Z, [x24]\n"
+ "addvl x24, x24, #1\n"
+ "cmp x21, #0x0\n"
+ "zip1 z18.b, z20.b, z17.b\n"
+ "zip2 z20.b, z20.b, z17.b\n"
+ "zip1 z17.b, z19.b, z16.b\n"
+ "zip2 z16.b, z19.b, z16.b\n"
"zip1 z19.b, z18.b, z17.b\n"
"zip2 z18.b, z18.b, z17.b\n"
- "addvl x27, x27, #1\n"
"zip1 z17.b, z20.b, z16.b\n"
"zip2 z16.b, z20.b, z16.b\n"
- "st1b { z19.b }, p1, [x22]\n"
- "st1b { z18.b }, p1, [x22, #1, MUL VL]\n"
- "st1b { z17.b }, p1, [x22, #2, MUL VL]\n"
- "st1b { z16.b }, p1, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1b { z19.b }, p1, [x27]\n"
+ "st1b { z18.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #3, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp
index b196799cfe..56d1c9accc 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_4VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,126 +44,126 @@ void sve_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
"blt 6f\n"
"1:" // Main row loop: Head
"mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #4\n"
- "add x25, x28, %x[in_stride]\n"
+ "mov x11, %x[width]\n"
+ "cnth x10, ALL, MUL #4\n"
+ "mov x9, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "cmp x11, x10\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
- "ld1h { z20.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z16.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z25.h, z18.h, z17.h\n"
- "zip2 z24.h, z18.h, z17.h\n"
- "ld1h { z19.h }, p2/Z, [x10]\n"
- "ld1h { z18.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z23.h, z20.h, z16.h\n"
- "zip2 z15.h, z20.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9]\n"
- "ld1h { z16.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z14.h, z19.h, z17.h\n"
- "zip2 z13.h, z19.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z12.h, z18.h, z16.h\n"
- "zip2 z11.h, z18.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x11, #3, MUL VL]\n"
- "mov x20, x22\n"
- "zip1 z10.h, z17.h, z16.h\n"
- "ld1h { z21.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x10, #3, MUL VL]\n"
- "zip2 z9.h, z17.h, z16.h\n"
- "zip1 z8.h, z19.h, z18.h\n"
- "ld1h { z17.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x9, #3, MUL VL]\n"
- "zip2 z7.h, z19.h, z18.h\n"
- "zip1 z6.h, z21.h, z17.h\n"
- "ld1h { z19.h }, p2/Z, [x28]\n"
- "ld1h { z18.h }, p2/Z, [x28, #1, MUL VL]\n"
- "zip2 z5.h, z21.h, z17.h\n"
- "zip1 z4.h, z20.h, z16.h\n"
- "ld1h { z22.h }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1h { z3.h }, p2/Z, [x28, #3, MUL VL]\n"
- "zip2 z2.h, z20.h, z16.h\n"
- "sub x27, x27, x26\n"
- "ld1h { z17.h }, p2/Z, [x25]\n"
- "ld1h { z16.h }, p2/Z, [x25, #1, MUL VL]\n"
- "zip1 z1.h, z19.h, z17.h\n"
- "zip2 z0.h, z19.h, z17.h\n"
- "ld1h { z21.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x25, #3, MUL VL]\n"
- "zip1 z31.h, z18.h, z16.h\n"
- "zip2 z30.h, z18.h, z16.h\n"
+ "ld1h { z22.h }, p2/Z, [x12]\n"
+ "ld1h { z28.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "mov x21, x9\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z26.h }, p2/Z, [x28]\n"
+ "ld1h { z17.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "mov x20, x9\n"
+ "sub x11, x11, x10\n"
+ "ld1h { z13.h }, p2/Z, [x27]\n"
+ "ld1h { z8.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "cmp x11, x10\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z30.h }, p2/Z, [x26]\n"
+ "ld1h { z31.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z15.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x12, #3, MUL VL]\n"
+ "zip1 z27.h, z22.h, z26.h\n"
+ "zip2 z26.h, z22.h, z26.h\n"
+ "ld1h { z4.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x28, #3, MUL VL]\n"
+ "zip1 z24.h, z28.h, z17.h\n"
+ "zip2 z19.h, z28.h, z17.h\n"
+ "ld1h { z25.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z21.h }, p2/Z, [x27, #3, MUL VL]\n"
+ "zip1 z11.h, z13.h, z30.h\n"
+ "zip2 z20.h, z13.h, z30.h\n"
+ "ld1h { z18.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1h { z1.h }, p2/Z, [x26, #3, MUL VL]\n"
+ "zip1 z12.h, z8.h, z31.h\n"
+ "zip2 z14.h, z8.h, z31.h\n"
+ "ld1h { z23.h }, p2/Z, [x25]\n"
+ "ld1h { z28.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "zip1 z13.h, z15.h, z4.h\n"
+ "zip2 z15.h, z15.h, z4.h\n"
+ "ld1h { z2.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z9.h }, p2/Z, [x25, #3, MUL VL]\n"
+ "zip1 z8.h, z0.h, z16.h\n"
+ "zip2 z10.h, z0.h, z16.h\n"
"ld1h { z17.h }, p2/Z, [x24]\n"
- "ld1h { z19.h }, p2/Z, [x24, #1, MUL VL]\n"
- "cmp x27, x26\n"
+ "ld1h { z16.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip1 z6.h, z25.h, z18.h\n"
+ "zip2 z5.h, z25.h, z18.h\n"
+ "ld1h { z4.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z7.h }, p2/Z, [x24, #3, MUL VL]\n"
+ "zip1 z30.h, z21.h, z1.h\n"
+ "zip2 z3.h, z21.h, z1.h\n"
+ "ld1h { z22.h }, p2/Z, [x23]\n"
+ "ld1h { z21.h }, p2/Z, [x23, #1, MUL VL]\n"
"addvl x12, x12, #4\n"
- "ld1h { z29.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z28.h }, p2/Z, [x24, #3, MUL VL]\n"
- "addvl x11, x11, #4\n"
- "addvl x10, x10, #4\n"
- "ld1h { z16.h }, p2/Z, [x23]\n"
- "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z27.h, z17.h, z16.h\n"
- "zip2 z26.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x23, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x23, #3, MUL VL]\n"
- "st1h { z25.h }, p2, [x21]\n"
- "zip1 z25.h, z19.h, z18.h\n"
- "st1h { z24.h }, p2, [x21, #1, MUL VL]\n"
- "zip2 z24.h, z19.h, z18.h\n"
- "addvl x9, x9, #4\n"
"addvl x28, x28, #4\n"
- "st1h { z23.h }, p2, [x21, #2, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z31.h }, p2/Z, [x23, #3, MUL VL]\n"
+ "zip1 z1.h, z23.h, z17.h\n"
+ "zip2 z0.h, z23.h, z17.h\n"
+ "ld1h { z25.h }, p2/Z, [x22]\n"
+ "ld1h { z23.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "zip1 z29.h, z28.h, z16.h\n"
+ "zip2 z28.h, z28.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x22, #3, MUL VL]\n"
+ "st1h { z27.h }, p2, [x21]\n"
+ "addvl x27, x27, #4\n"
+ "st1h { z26.h }, p2, [x21, #1, MUL VL]\n"
+ "addvl x26, x26, #4\n"
"addvl x25, x25, #4\n"
+ "zip1 z27.h, z2.h, z4.h\n"
+ "st1h { z24.h }, p2, [x21, #2, MUL VL]\n"
+ "zip1 z26.h, z22.h, z25.h\n"
+ "zip2 z25.h, z22.h, z25.h\n"
"addvl x24, x24, #4\n"
- "zip1 z23.h, z22.h, z21.h\n"
- "st1h { z15.h }, p2, [x21, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x21, #3, MUL VL]\n"
+ "zip1 z24.h, z21.h, z23.h\n"
+ "zip2 z23.h, z21.h, z23.h\n"
"addvl x23, x23, #4\n"
- "zip2 z22.h, z22.h, z21.h\n"
- "zip1 z21.h, z3.h, z20.h\n"
- "st1h { z14.h }, p2, [x21, #4, MUL VL]\n"
- "zip2 z20.h, z3.h, z20.h\n"
- "zip1 z19.h, z29.h, z17.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z13.h }, p2, [x21, #5, MUL VL]\n"
- "zip2 z18.h, z29.h, z17.h\n"
- "zip1 z17.h, z28.h, z16.h\n"
+ "st1h { z11.h }, p2, [x21, #4, MUL VL]\n"
+ "addvl x22, x22, #4\n"
+ "zip2 z22.h, z2.h, z4.h\n"
+ "zip1 z21.h, z9.h, z7.h\n"
+ "st1h { z20.h }, p2, [x21, #5, MUL VL]\n"
+ "zip2 z20.h, z9.h, z7.h\n"
+ "zip1 z19.h, z18.h, z17.h\n"
"st1h { z12.h }, p2, [x21, #6, MUL VL]\n"
- "zip2 z16.h, z28.h, z16.h\n"
- "st1h { z11.h }, p2, [x21, #7, MUL VL]\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z31.h, z16.h\n"
+ "st1h { z14.h }, p2, [x21, #7, MUL VL]\n"
"addvl x21, x21, #16\n"
+ "zip2 z16.h, z31.h, z16.h\n"
"st1h { z1.h }, p2, [x21, #-8, MUL VL]\n"
"st1h { z0.h }, p2, [x21, #-7, MUL VL]\n"
- "st1h { z31.h }, p2, [x21, #-6, MUL VL]\n"
- "st1h { z30.h }, p2, [x21, #-5, MUL VL]\n"
- "st1h { z27.h }, p2, [x21, #-4, MUL VL]\n"
- "st1h { z26.h }, p2, [x21, #-3, MUL VL]\n"
- "st1h { z25.h }, p2, [x21, #-2, MUL VL]\n"
- "st1h { z24.h }, p2, [x21, #-1, MUL VL]\n"
- "st1h { z10.h }, p2, [x20]\n"
- "st1h { z9.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z29.h }, p2, [x21, #-6, MUL VL]\n"
+ "st1h { z28.h }, p2, [x21, #-5, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #-4, MUL VL]\n"
+ "st1h { z25.h }, p2, [x21, #-3, MUL VL]\n"
+ "st1h { z24.h }, p2, [x21, #-2, MUL VL]\n"
+ "st1h { z23.h }, p2, [x21, #-1, MUL VL]\n"
+ "st1h { z13.h }, p2, [x20]\n"
+ "st1h { z15.h }, p2, [x20, #1, MUL VL]\n"
"st1h { z8.h }, p2, [x20, #2, MUL VL]\n"
- "st1h { z7.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z10.h }, p2, [x20, #3, MUL VL]\n"
"st1h { z6.h }, p2, [x20, #4, MUL VL]\n"
"st1h { z5.h }, p2, [x20, #5, MUL VL]\n"
- "st1h { z4.h }, p2, [x20, #6, MUL VL]\n"
- "st1h { z2.h }, p2, [x20, #7, MUL VL]\n"
+ "st1h { z30.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z3.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20, #-8, MUL VL]\n"
"st1h { z22.h }, p2, [x20, #-7, MUL VL]\n"
"st1h { z21.h }, p2, [x20, #-6, MUL VL]\n"
"st1h { z20.h }, p2, [x20, #-5, MUL VL]\n"
@@ -173,63 +173,63 @@ void sve_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
"st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x11, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z22.h }, p1/Z, [x12]\n"
- "ld1h { z21.h }, p1/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z18.h }, p1/Z, [x10]\n"
- "ld1h { z24.h }, p0/Z, [x10, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decw x27, ALL, MUL #4\n"
- "ld1h { z17.h }, p1/Z, [x9]\n"
- "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z31.h, z22.h, z21.h\n"
- "zip2 z23.h, z22.h, z21.h\n"
- "ld1h { z30.h }, p1/Z, [x28]\n"
- "ld1h { z29.h }, p0/Z, [x28, #1, MUL VL]\n"
- "zip1 z22.h, z20.h, z19.h\n"
- "zip2 z28.h, z20.h, z19.h\n"
- "ld1h { z21.h }, p1/Z, [x25]\n"
- "ld1h { z27.h }, p0/Z, [x25, #1, MUL VL]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "zip2 z19.h, z18.h, z17.h\n"
- "ld1h { z18.h }, p1/Z, [x24]\n"
- "ld1h { z26.h }, p0/Z, [x24, #1, MUL VL]\n"
- "zip1 z25.h, z24.h, z16.h\n"
- "zip2 z24.h, z24.h, z16.h\n"
- "ld1h { z17.h }, p1/Z, [x23]\n"
- "ld1h { z16.h }, p0/Z, [x23, #1, MUL VL]\n"
- "st1h { z31.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "st1h { z23.h }, p2, [x20, #1, MUL VL]\n"
+ "mov x21, x11\n"
+ "mov x20, x9\n"
+ "decw x11, ALL, MUL #4\n"
+ "add x9, x9, %x[out_stride]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p0.h, XZR, x21\n"
+ "cmp x11, #0x0\n"
+ "ld1h { z21.h }, p1/Z, [x12]\n"
+ "ld1h { z20.h }, p1/Z, [x28]\n"
+ "ld1h { z19.h }, p1/Z, [x27]\n"
+ "ld1h { z18.h }, p1/Z, [x26]\n"
+ "ld1h { z1.h }, p1/Z, [x25]\n"
+ "ld1h { z0.h }, p1/Z, [x24]\n"
+ "ld1h { z17.h }, p0/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "zip1 z23.h, z21.h, z20.h\n"
+ "zip2 z31.h, z21.h, z20.h\n"
+ "ld1h { z22.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z21.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "zip1 z30.h, z19.h, z18.h\n"
+ "zip2 z29.h, z19.h, z18.h\n"
+ "ld1h { z28.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
"addvl x12, x12, #2\n"
- "addvl x11, x11, #2\n"
- "zip1 z23.h, z30.h, z21.h\n"
- "st1h { z22.h }, p2, [x20, #2, MUL VL]\n"
- "addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "zip2 z22.h, z30.h, z21.h\n"
- "st1h { z28.h }, p2, [x20, #3, MUL VL]\n"
"addvl x28, x28, #2\n"
+ "ld1h { z27.h }, p1/Z, [x23]\n"
+ "ld1h { z26.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "zip1 z19.h, z17.h, z16.h\n"
+ "zip2 z18.h, z17.h, z16.h\n"
+ "ld1h { z17.h }, p1/Z, [x22]\n"
+ "ld1h { z16.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "zip1 z25.h, z22.h, z21.h\n"
+ "zip2 z24.h, z22.h, z21.h\n"
+ "st1h { z23.h }, p2, [x20]\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ "zip1 z23.h, z1.h, z0.h\n"
+ "st1h { z31.h }, p2, [x20, #1, MUL VL]\n"
"addvl x25, x25, #2\n"
- "zip1 z21.h, z29.h, z27.h\n"
- "st1h { z20.h }, p2, [x20, #4, MUL VL]\n"
"addvl x24, x24, #2\n"
+ "zip2 z22.h, z1.h, z0.h\n"
+ "st1h { z19.h }, p2, [x20, #2, MUL VL]\n"
"addvl x23, x23, #2\n"
- "zip2 z20.h, z29.h, z27.h\n"
- "st1h { z19.h }, p2, [x20, #5, MUL VL]\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
+ "addvl x22, x22, #2\n"
+ "zip1 z21.h, z28.h, z20.h\n"
+ "st1h { z18.h }, p2, [x20, #3, MUL VL]\n"
+ "zip2 z20.h, z28.h, z20.h\n"
+ "zip1 z19.h, z27.h, z17.h\n"
+ "st1h { z30.h }, p2, [x20, #4, MUL VL]\n"
+ "zip2 z18.h, z27.h, z17.h\n"
"zip1 z17.h, z26.h, z16.h\n"
+ "st1h { z29.h }, p2, [x20, #5, MUL VL]\n"
"zip2 z16.h, z26.h, z16.h\n"
+ "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
"st1h { z24.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
"st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
@@ -251,70 +251,70 @@ void sve_transpose_interleave_4VL_2x2(uint16_t *out, const uint16_t *in, size_t
"mov x12, %x[in]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #4\n"
- "add x11, x12, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x11, %x[in_stride]\n"
- "csel x11, x11, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x9, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add %x[in], x28, %x[in_stride]\n"
+ "csel x28, x28, %x[pad_row], GT\n"
+ "cmp x21, x20\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
+ "ld1h { z21.h }, p2/Z, [x12]\n"
"ld1h { z20.h }, p2/Z, [x12, #1, MUL VL]\n"
"sub x21, x21, x20\n"
+ "ld1h { z16.h }, p2/Z, [x28]\n"
+ "ld1h { z19.h }, p2/Z, [x28, #1, MUL VL]\n"
"cmp x21, x20\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z16.h }, p2/Z, [x11, #1, MUL VL]\n"
- "zip1 z23.h, z18.h, z17.h\n"
- "zip2 z19.h, z18.h, z17.h\n"
- "ld1h { z18.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z21.h, z20.h, z16.h\n"
- "zip2 z20.h, z20.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x11, #3, MUL VL]\n"
- "st1h { z23.h }, p2, [x22]\n"
+ "ld1h { z24.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z23.h }, p2/Z, [x12, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
- "st1h { z19.h }, p2, [x22, #1, MUL VL]\n"
- "addvl x11, x11, #4\n"
- "zip1 z19.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "st1h { z21.h }, p2, [x22, #2, MUL VL]\n"
- "zip1 z17.h, z22.h, z16.h\n"
- "zip2 z16.h, z22.h, z16.h\n"
- "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z19.h }, p2, [x22]\n"
- "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z18.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x28, #3, MUL VL]\n"
+ "addvl x28, x28, #4\n"
+ "zip1 z17.h, z21.h, z16.h\n"
+ "zip2 z21.h, z21.h, z16.h\n"
+ "zip1 z16.h, z20.h, z19.h\n"
+ "zip2 z20.h, z20.h, z19.h\n"
+ "st1h { z17.h }, p2, [x9]\n"
+ "zip1 z19.h, z24.h, z18.h\n"
+ "zip2 z18.h, z24.h, z18.h\n"
+ "zip1 z17.h, z23.h, z22.h\n"
+ "st1h { z21.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #2, MUL VL]\n"
+ "zip2 z16.h, z23.h, z22.h\n"
+ "st1h { z20.h }, p2, [x9, #3, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
+ "st1h { z19.h }, p2, [x9]\n"
+ "st1h { z18.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x9, #2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #3, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x12]\n"
- "ld1h { z17.h }, p0/Z, [x11]\n"
+ "decw x21, ALL, MUL #4\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z16.h }, p0/Z, [x11, #1, MUL VL]\n"
- "decw x21, ALL, MUL #4\n"
"cmp x21, #0x0\n"
+ "ld1h { z18.h }, p1/Z, [x12]\n"
+ "ld1h { z17.h }, p1/Z, [x28]\n"
+ "ld1h { z20.h }, p0/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #2\n"
+ "ld1h { z16.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
"zip1 z19.h, z18.h, z17.h\n"
"zip2 z18.h, z18.h, z17.h\n"
- "addvl x12, x12, #2\n"
- "addvl x11, x11, #2\n"
"zip1 z17.h, z20.h, z16.h\n"
"zip2 z16.h, z20.h, z16.h\n"
- "st1h { z19.h }, p2, [x22]\n"
- "st1h { z18.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z19.h }, p2, [x9]\n"
+ "st1h { z18.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z17.h }, p2, [x9, #2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #3, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp
index 68fe2d0cbe..69eccad912 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,216 +42,216 @@ void sve_transpose_interleave_6VL_1x8(uint8_t *out, const uint8_t *in, size_t wi
"ptrue p1.b\n"
"1:" // Main row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "cmp %x[height], #0x7\n"
+ "mov x9, %x[width]\n"
+ "cntb x28, ALL, MUL #3\n"
+ "mov x27, %x[out]\n"
+ "add x26, x10, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp %x[height], #0x7\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "csel x21, x21, %x[pad_row], GE\n"
"cmp %x[height], #0x5\n"
- "mov x22, %x[width]\n"
- "cntb x21, ALL, MUL #3\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "csel x26, x26, %x[pad_row], GE\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x3\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "cmp x22, x21\n"
- "mov x20, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x9, x28\n"
"sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z21.b }, p1/Z, [x10]\n"
- "ld1b { z25.b }, p1/Z, [x9]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z20.b }, p1/Z, [x28]\n"
- "ld1b { z24.b }, p1/Z, [x27]\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
- "zip1 z7.b, z21.b, z19.b\n"
- "zip1 z6.b, z25.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24]\n"
- "ld1b { z16.b }, p1/Z, [x23]\n"
- "zip1 z28.b, z20.b, z17.b\n"
- "zip1 z27.b, z24.b, z16.b\n"
+ "ld1b { z1.b }, p1/Z, [x10]\n"
+ "ld1b { z0.b }, p1/Z, [x26]\n"
+ "sub x9, x9, x28\n"
+ "ld1b { z31.b }, p1/Z, [x25]\n"
+ "ld1b { z28.b }, p1/Z, [x24]\n"
+ "cmp x9, x28\n"
+ "ld1b { z27.b }, p1/Z, [x23]\n"
+ "ld1b { z26.b }, p1/Z, [x22]\n"
+ "ld1b { z25.b }, p1/Z, [x21]\n"
+ "ld1b { z24.b }, p1/Z, [x20]\n"
"ld1b { z23.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z22.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z5.b, z21.b, z19.b\n"
- "zip2 z4.b, z20.b, z17.b\n"
- "ld1b { z21.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip2 z3.b, z25.b, z18.b\n"
- "zip2 z2.b, z24.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x25, #1, MUL VL]\n"
- "zip1 z1.b, z23.b, z19.b\n"
- "zip1 z15.b, z22.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip1 z0.b, z21.b, z17.b\n"
- "zip1 z31.b, z20.b, z16.b\n"
- "ld1b { z26.b }, p1/Z, [x10, #2, MUL VL]\n"
- "ld1b { z30.b }, p1/Z, [x9, #2, MUL VL]\n"
- "zip2 z14.b, z23.b, z19.b\n"
- "zip2 z13.b, z21.b, z17.b\n"
- "ld1b { z25.b }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1b { z24.b }, p1/Z, [x27, #2, MUL VL]\n"
- "zip2 z12.b, z22.b, z18.b\n"
- "zip2 z11.b, z20.b, z16.b\n"
- "ld1b { z23.b }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1b { z22.b }, p1/Z, [x25, #2, MUL VL]\n"
- "zip1 z10.b, z26.b, z23.b\n"
- "zip1 z9.b, z30.b, z22.b\n"
- "ld1b { z21.b }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1b { z17.b }, p1/Z, [x23, #2, MUL VL]\n"
- "zip1 z29.b, z25.b, z21.b\n"
- "zip1 z8.b, z24.b, z17.b\n"
- "zip1 z19.b, z7.b, z28.b\n"
- "zip1 z16.b, z6.b, z27.b\n"
+ "ld1b { z30.b }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1b { z22.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z21.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip1 z15.b, z1.b, z27.b\n"
+ "zip1 z9.b, z0.b, z26.b\n"
+ "ld1b { z20.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z19.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "zip1 z18.b, z31.b, z25.b\n"
+ "zip1 z29.b, z28.b, z24.b\n"
+ "ld1b { z17.b }, p1/Z, [x21, #1, MUL VL]\n"
+ "ld1b { z16.b }, p1/Z, [x20, #1, MUL VL]\n"
+ "zip2 z14.b, z1.b, z27.b\n"
+ "zip2 z13.b, z31.b, z25.b\n"
+ "ld1b { z8.b }, p1/Z, [x10, #2, MUL VL]\n"
+ "ld1b { z7.b }, p1/Z, [x26, #2, MUL VL]\n"
+ "zip2 z12.b, z0.b, z26.b\n"
+ "zip2 z6.b, z28.b, z24.b\n"
+ "ld1b { z5.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1b { z4.b }, p1/Z, [x24, #2, MUL VL]\n"
+ "zip1 z3.b, z23.b, z20.b\n"
+ "zip1 z11.b, z30.b, z19.b\n"
+ "ld1b { z28.b }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z27.b }, p1/Z, [x22, #2, MUL VL]\n"
+ "zip1 z2.b, z22.b, z17.b\n"
+ "zip1 z1.b, z21.b, z16.b\n"
+ "ld1b { z26.b }, p1/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z25.b }, p1/Z, [x20, #2, MUL VL]\n"
+ "zip2 z24.b, z23.b, z20.b\n"
+ "zip2 z23.b, z22.b, z17.b\n"
+ "zip2 z22.b, z30.b, z19.b\n"
+ "zip2 z21.b, z21.b, z16.b\n"
"addvl x10, x10, #3\n"
- "addvl x9, x9, #3\n"
- "zip2 z28.b, z7.b, z28.b\n"
- "zip2 z18.b, z6.b, z27.b\n"
- "addvl x28, x28, #3\n"
- "addvl x27, x27, #3\n"
- "zip1 z27.b, z5.b, z4.b\n"
- "zip1 z20.b, z3.b, z2.b\n"
"addvl x26, x26, #3\n"
+ "zip1 z0.b, z8.b, z28.b\n"
+ "zip1 z10.b, z7.b, z27.b\n"
"addvl x25, x25, #3\n"
- "zip2 z7.b, z26.b, z23.b\n"
- "zip2 z26.b, z25.b, z21.b\n"
"addvl x24, x24, #3\n"
+ "zip1 z31.b, z5.b, z26.b\n"
+ "zip1 z30.b, z4.b, z25.b\n"
"addvl x23, x23, #3\n"
- "zip2 z6.b, z30.b, z22.b\n"
- "zip2 z25.b, z24.b, z17.b\n"
- "zip2 z5.b, z5.b, z4.b\n"
- "zip2 z4.b, z3.b, z2.b\n"
- "zip1 z3.b, z1.b, z0.b\n"
- "zip1 z2.b, z15.b, z31.b\n"
- "zip2 z1.b, z1.b, z0.b\n"
- "zip2 z0.b, z15.b, z31.b\n"
- "zip1 z31.b, z14.b, z13.b\n"
- "zip1 z30.b, z12.b, z11.b\n"
- "zip2 z24.b, z14.b, z13.b\n"
- "zip2 z23.b, z12.b, z11.b\n"
- "zip1 z22.b, z10.b, z29.b\n"
- "zip1 z21.b, z9.b, z8.b\n"
- "zip1 z17.b, z19.b, z16.b\n"
- "zip2 z16.b, z19.b, z16.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "zip1 z19.b, z28.b, z18.b\n"
- "zip2 z18.b, z28.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z17.b, z27.b, z20.b\n"
- "zip2 z16.b, z27.b, z20.b\n"
- "st1b { z19.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #3, MUL VL]\n"
- "zip2 z29.b, z10.b, z29.b\n"
- "zip2 z20.b, z9.b, z8.b\n"
- "st1b { z17.b }, p1, [x20, #4, MUL VL]\n"
- "zip1 z28.b, z7.b, z26.b\n"
- "zip1 z27.b, z6.b, z25.b\n"
- "st1b { z16.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip2 z26.b, z7.b, z26.b\n"
- "zip2 z25.b, z6.b, z25.b\n"
- "zip1 z17.b, z5.b, z4.b\n"
- "zip2 z16.b, z5.b, z4.b\n"
- "st1b { z17.b }, p1, [x20]\n"
+ "addvl x22, x22, #3\n"
+ "zip1 z20.b, z15.b, z18.b\n"
+ "zip1 z19.b, z9.b, z29.b\n"
+ "addvl x21, x21, #3\n"
+ "addvl x20, x20, #3\n"
+ "zip2 z18.b, z15.b, z18.b\n"
+ "zip2 z16.b, z9.b, z29.b\n"
+ "zip1 z29.b, z14.b, z13.b\n"
+ "zip1 z17.b, z12.b, z6.b\n"
+ "zip2 z9.b, z8.b, z28.b\n"
+ "zip2 z28.b, z5.b, z26.b\n"
+ "zip2 z8.b, z7.b, z27.b\n"
+ "zip2 z27.b, z4.b, z25.b\n"
+ "zip2 z7.b, z14.b, z13.b\n"
+ "zip2 z6.b, z12.b, z6.b\n"
+ "zip1 z5.b, z3.b, z2.b\n"
+ "zip1 z4.b, z11.b, z1.b\n"
+ "zip2 z3.b, z3.b, z2.b\n"
+ "zip2 z2.b, z11.b, z1.b\n"
+ "zip1 z26.b, z24.b, z23.b\n"
+ "zip1 z25.b, z22.b, z21.b\n"
+ "zip2 z24.b, z24.b, z23.b\n"
+ "zip2 z23.b, z22.b, z21.b\n"
+ "zip1 z1.b, z0.b, z31.b\n"
+ "zip1 z22.b, z10.b, z30.b\n"
+ "zip1 z21.b, z20.b, z19.b\n"
+ "zip2 z20.b, z20.b, z19.b\n"
+ "zip1 z19.b, z18.b, z16.b\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "zip1 z16.b, z29.b, z17.b\n"
+ "zip2 z17.b, z29.b, z17.b\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "zip2 z0.b, z0.b, z31.b\n"
+ "zip2 z31.b, z10.b, z30.b\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "zip1 z30.b, z9.b, z28.b\n"
+ "zip1 z29.b, z8.b, z27.b\n"
+ "st1b { z18.b }, p1, [x27, #3, MUL VL]\n"
+ "zip2 z28.b, z9.b, z28.b\n"
+ "zip2 z27.b, z8.b, z27.b\n"
+ "st1b { z16.b }, p1, [x27, #4, MUL VL]\n"
+ "zip1 z21.b, z7.b, z6.b\n"
+ "zip2 z16.b, z7.b, z6.b\n"
+ "st1b { z17.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z20.b, z5.b, z4.b\n"
+ "zip2 z19.b, z5.b, z4.b\n"
"zip1 z18.b, z3.b, z2.b\n"
"zip2 z17.b, z3.b, z2.b\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z16.b, z1.b, z0.b\n"
- "zip2 z19.b, z1.b, z0.b\n"
- "st1b { z18.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #3, MUL VL]\n"
- "zip1 z18.b, z31.b, z30.b\n"
- "zip2 z17.b, z31.b, z30.b\n"
- "st1b { z16.b }, p1, [x20, #4, MUL VL]\n"
- "zip1 z16.b, z24.b, z23.b\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z16.b }, p1, [x27, #1, MUL VL]\n"
+ "zip1 z16.b, z26.b, z25.b\n"
+ "zip2 z26.b, z26.b, z25.b\n"
+ "st1b { z20.b }, p1, [x27, #2, MUL VL]\n"
+ "zip1 z25.b, z24.b, z23.b\n"
"zip2 z24.b, z24.b, z23.b\n"
- "st1b { z19.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 z23.b, z22.b, z21.b\n"
- "zip2 z22.b, z22.b, z21.b\n"
- "st1b { z18.b }, p1, [x20]\n"
- "zip1 z21.b, z29.b, z20.b\n"
- "zip2 z20.b, z29.b, z20.b\n"
- "st1b { z17.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z19.b, z28.b, z27.b\n"
- "zip2 z18.b, z28.b, z27.b\n"
- "st1b { z16.b }, p1, [x20, #2, MUL VL]\n"
- "zip1 z17.b, z26.b, z25.b\n"
- "zip2 z16.b, z26.b, z25.b\n"
- "st1b { z24.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z23.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z22.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "st1b { z21.b }, p1, [x20]\n"
- "st1b { z20.b }, p1, [x20, #1, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z19.b }, p1, [x27, #3, MUL VL]\n"
+ "zip1 z23.b, z1.b, z22.b\n"
+ "zip2 z22.b, z1.b, z22.b\n"
+ "st1b { z18.b }, p1, [x27, #4, MUL VL]\n"
+ "zip1 z21.b, z0.b, z31.b\n"
+ "zip2 z20.b, z0.b, z31.b\n"
+ "st1b { z17.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "zip1 z19.b, z30.b, z29.b\n"
+ "zip2 z18.b, z30.b, z29.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "zip1 z17.b, z28.b, z27.b\n"
+ "zip2 z16.b, z28.b, z27.b\n"
+ "st1b { z26.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z25.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z24.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z23.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z22.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x9, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
- "ld1b { z23.b }, p0/Z, [x10]\n"
- "ld1b { z27.b }, p0/Z, [x9]\n"
- "decd x22, ALL, MUL #6\n"
- "ld1b { z21.b }, p0/Z, [x28]\n"
- "ld1b { z26.b }, p0/Z, [x27]\n"
- "cmp x22, #0x0\n"
+ "whilelt p0.b, XZR, x9\n"
+ "decd x9, ALL, MUL #6\n"
+ "ld1b { z21.b }, p0/Z, [x10]\n"
"incd x10, ALL, MUL #6\n"
- "ld1b { z20.b }, p0/Z, [x26]\n"
- "ld1b { z19.b }, p0/Z, [x25]\n"
- "zip1 z25.b, z23.b, z20.b\n"
- "zip1 z24.b, z27.b, z19.b\n"
- "ld1b { z17.b }, p0/Z, [x24]\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z22.b, z21.b, z17.b\n"
- "zip1 z18.b, z26.b, z16.b\n"
- "zip2 z23.b, z23.b, z20.b\n"
- "zip2 z21.b, z21.b, z17.b\n"
- "incd x9, ALL, MUL #6\n"
- "incd x28, ALL, MUL #6\n"
- "zip2 z20.b, z27.b, z19.b\n"
- "zip2 z17.b, z26.b, z16.b\n"
- "incd x27, ALL, MUL #6\n"
+ "ld1b { z26.b }, p0/Z, [x26]\n"
"incd x26, ALL, MUL #6\n"
- "zip1 z19.b, z25.b, z22.b\n"
- "zip1 z16.b, z24.b, z18.b\n"
+ "ld1b { z20.b }, p0/Z, [x25]\n"
"incd x25, ALL, MUL #6\n"
+ "ld1b { z25.b }, p0/Z, [x24]\n"
"incd x24, ALL, MUL #6\n"
- "zip2 z22.b, z25.b, z22.b\n"
- "zip2 z18.b, z24.b, z18.b\n"
+ "ld1b { z18.b }, p0/Z, [x23]\n"
+ "ld1b { z19.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x21]\n"
+ "cmp x9, #0x0\n"
"incd x23, ALL, MUL #6\n"
- "zip1 z21.b, z23.b, z21.b\n"
- "zip1 z20.b, z20.b, z17.b\n"
- "zip1 z17.b, z19.b, z16.b\n"
- "zip2 z16.b, z19.b, z16.b\n"
- "st1b { z17.b }, p1, [x20]\n"
- "zip1 z19.b, z22.b, z18.b\n"
- "zip2 z18.b, z22.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #5, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "ld1b { z16.b }, p0/Z, [x20]\n"
+ "incd x22, ALL, MUL #6\n"
+ "incd x21, ALL, MUL #6\n"
+ "zip1 z23.b, z21.b, z18.b\n"
+ "zip2 z24.b, z21.b, z18.b\n"
+ "incd x20, ALL, MUL #6\n"
+ "zip1 z22.b, z20.b, z17.b\n"
+ "zip1 z21.b, z26.b, z19.b\n"
+ "zip2 z18.b, z20.b, z17.b\n"
+ "zip1 z17.b, z25.b, z16.b\n"
+ "zip2 z20.b, z26.b, z19.b\n"
+ "zip2 z16.b, z25.b, z16.b\n"
+ "zip1 z19.b, z23.b, z22.b\n"
+ "zip2 z23.b, z23.b, z22.b\n"
+ "zip1 z22.b, z24.b, z18.b\n"
+ "zip1 z18.b, z21.b, z17.b\n"
+ "zip2 z17.b, z21.b, z17.b\n"
+ "zip1 z16.b, z20.b, z16.b\n"
+ "zip1 z21.b, z19.b, z18.b\n"
+ "zip2 z20.b, z19.b, z18.b\n"
+ "zip1 z19.b, z23.b, z17.b\n"
+ "zip2 z18.b, z23.b, z17.b\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "zip2 z16.b, z22.b, z16.b\n"
+ "st1b { z21.b }, p1, [x27]\n"
+ "st1b { z20.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #5, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp
index 910fc6cb02..6c0a5c029b 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,125 +44,125 @@ void sve_transpose_interleave_6VL_2x4(uint16_t *out, const uint16_t *in, size_t
"blt 6f\n"
"1:" // Main row loop: Head
"mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #3\n"
- "add x25, x28, %x[in_stride]\n"
+ "mov x11, %x[width]\n"
+ "cnth x10, ALL, MUL #3\n"
+ "mov x9, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "cmp x11, x10\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
+ "ld1h { z19.h }, p2/Z, [x12]\n"
"ld1h { z13.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z12.h }, p2/Z, [x11, #1, MUL VL]\n"
- "mov x20, x22\n"
- "sub x27, x27, x26\n"
- "ld1h { z16.h }, p2/Z, [x10]\n"
- "ld1h { z11.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z23.h, z18.h, z16.h\n"
- "zip2 z29.h, z18.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x9]\n"
- "ld1h { z10.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z28.h, z17.h, z16.h\n"
- "ld1h { z27.h }, p2/Z, [x28]\n"
- "ld1h { z26.h }, p2/Z, [x25]\n"
+ "mov x21, x9\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z18.h }, p2/Z, [x28]\n"
+ "ld1h { z12.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "mov x20, x9\n"
+ "sub x11, x11, x10\n"
+ "ld1h { z16.h }, p2/Z, [x27]\n"
+ "ld1h { z11.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "cmp x11, x10\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z17.h }, p2/Z, [x26]\n"
+ "ld1h { z10.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z9.h }, p2/Z, [x25]\n"
+ "ld1h { z8.h }, p2/Z, [x24]\n"
+ "ld1h { z28.h }, p2/Z, [x23]\n"
+ "ld1h { z26.h }, p2/Z, [x22]\n"
+ "zip1 z24.h, z19.h, z16.h\n"
+ "zip2 z23.h, z19.h, z16.h\n"
+ "ld1h { z7.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z6.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "zip1 z16.h, z18.h, z17.h\n"
+ "zip2 z22.h, z18.h, z17.h\n"
+ "ld1h { z5.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z4.h }, p2/Z, [x26, #2, MUL VL]\n"
"zip1 z21.h, z13.h, z11.h\n"
"zip1 z20.h, z12.h, z10.h\n"
- "ld1h { z18.h }, p2/Z, [x24]\n"
- "ld1h { z19.h }, p2/Z, [x23]\n"
- "zip1 z17.h, z27.h, z18.h\n"
- "zip1 z16.h, z26.h, z19.h\n"
- "ld1h { z9.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z8.h }, p2/Z, [x11, #2, MUL VL]\n"
+ "ld1h { z3.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z2.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip1 z19.h, z9.h, z28.h\n"
+ "zip1 z18.h, z8.h, z26.h\n"
+ "ld1h { z27.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z1.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "zip1 z17.h, z24.h, z16.h\n"
+ "zip2 z16.h, z24.h, z16.h\n"
+ "ld1h { z0.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z31.h }, p2/Z, [x24, #2, MUL VL]\n"
"zip1 z25.h, z23.h, z22.h\n"
"zip2 z24.h, z23.h, z22.h\n"
- "ld1h { z23.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z7.h }, p2/Z, [x9, #2, MUL VL]\n"
- "zip1 z22.h, z29.h, z28.h\n"
- "zip2 z6.h, z29.h, z28.h\n"
- "ld1h { z28.h }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1h { z5.h }, p2/Z, [x25, #1, MUL VL]\n"
- "zip1 z4.h, z21.h, z20.h\n"
- "zip2 z3.h, z21.h, z20.h\n"
- "ld1h { z21.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z20.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z2.h, z17.h, z16.h\n"
- "zip2 z1.h, z17.h, z16.h\n"
- "ld1h { z0.h }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1h { z31.h }, p2/Z, [x25, #2, MUL VL]\n"
- "zip2 z18.h, z27.h, z18.h\n"
- "zip2 z17.h, z26.h, z19.h\n"
- "ld1h { z30.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z29.h }, p2/Z, [x23, #2, MUL VL]\n"
- "zip1 z19.h, z28.h, z21.h\n"
- "zip1 z16.h, z5.h, z20.h\n"
- "st1h { z25.h }, p2, [x21]\n"
- "zip2 z27.h, z13.h, z11.h\n"
- "zip2 z26.h, z12.h, z10.h\n"
- "cmp x27, x26\n"
- "st1h { z24.h }, p2, [x21, #1, MUL VL]\n"
- "zip1 z25.h, z9.h, z23.h\n"
- "zip1 z24.h, z8.h, z7.h\n"
+ "ld1h { z30.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z29.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "zip1 z23.h, z21.h, z20.h\n"
+ "zip2 z22.h, z21.h, z20.h\n"
+ "zip1 z21.h, z19.h, z18.h\n"
+ "zip2 z20.h, z19.h, z18.h\n"
+ "st1h { z17.h }, p2, [x21]\n"
"addvl x12, x12, #3\n"
- "st1h { z22.h }, p2, [x21, #2, MUL VL]\n"
- "zip2 z23.h, z9.h, z23.h\n"
- "zip2 z22.h, z8.h, z7.h\n"
- "addvl x11, x11, #3\n"
- "st1h { z6.h }, p2, [x21, #3, MUL VL]\n"
- "zip2 z28.h, z28.h, z21.h\n"
- "zip2 z21.h, z5.h, z20.h\n"
- "addvl x10, x10, #3\n"
- "st1h { z4.h }, p2, [x21, #4, MUL VL]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "zip2 z18.h, z18.h, z17.h\n"
- "addvl x9, x9, #3\n"
- "st1h { z3.h }, p2, [x21, #5, MUL VL]\n"
- "zip1 z17.h, z19.h, z16.h\n"
- "zip2 z16.h, z19.h, z16.h\n"
+ "zip2 z19.h, z9.h, z28.h\n"
+ "zip2 z18.h, z8.h, z26.h\n"
+ "st1h { z16.h }, p2, [x21, #1, MUL VL]\n"
"addvl x28, x28, #3\n"
- "st1h { z2.h }, p2, [x21, #6, MUL VL]\n"
- "zip1 z19.h, z27.h, z26.h\n"
- "zip2 z27.h, z27.h, z26.h\n"
+ "zip1 z17.h, z3.h, z27.h\n"
+ "zip1 z16.h, z2.h, z1.h\n"
+ "st1h { z25.h }, p2, [x21, #2, MUL VL]\n"
+ "addvl x27, x27, #3\n"
+ "st1h { z24.h }, p2, [x21, #3, MUL VL]\n"
+ "zip2 z26.h, z13.h, z11.h\n"
+ "zip2 z25.h, z12.h, z10.h\n"
+ "addvl x26, x26, #3\n"
+ "st1h { z23.h }, p2, [x21, #4, MUL VL]\n"
+ "zip1 z28.h, z7.h, z5.h\n"
+ "zip1 z24.h, z6.h, z4.h\n"
"addvl x25, x25, #3\n"
- "st1h { z1.h }, p2, [x21, #7, MUL VL]\n"
+ "st1h { z22.h }, p2, [x21, #5, MUL VL]\n"
+ "zip2 z23.h, z7.h, z5.h\n"
+ "zip2 z22.h, z6.h, z4.h\n"
+ "addvl x24, x24, #3\n"
+ "st1h { z21.h }, p2, [x21, #6, MUL VL]\n"
+ "zip2 z27.h, z3.h, z27.h\n"
+ "zip2 z21.h, z2.h, z1.h\n"
+ "addvl x23, x23, #3\n"
+ "st1h { z20.h }, p2, [x21, #7, MUL VL]\n"
"addvl x21, x21, #12\n"
- "zip1 z26.h, z25.h, z24.h\n"
- "zip2 z25.h, z25.h, z24.h\n"
+ "zip1 z20.h, z19.h, z18.h\n"
+ "zip2 z19.h, z19.h, z18.h\n"
+ "zip1 z18.h, z17.h, z16.h\n"
+ "zip2 z17.h, z17.h, z16.h\n"
+ "addvl x22, x22, #3\n"
+ "zip1 z16.h, z26.h, z25.h\n"
+ "zip2 z26.h, z26.h, z25.h\n"
"st1h { z20.h }, p2, [x21, #-4, MUL VL]\n"
+ "st1h { z19.h }, p2, [x21, #-3, MUL VL]\n"
+ "zip1 z25.h, z28.h, z24.h\n"
+ "zip2 z19.h, z28.h, z24.h\n"
+ "st1h { z18.h }, p2, [x21, #-2, MUL VL]\n"
"zip1 z24.h, z23.h, z22.h\n"
"zip2 z23.h, z23.h, z22.h\n"
- "addvl x24, x24, #3\n"
- "st1h { z18.h }, p2, [x21, #-3, MUL VL]\n"
- "zip1 z22.h, z28.h, z21.h\n"
- "zip2 z21.h, z28.h, z21.h\n"
- "addvl x23, x23, #3\n"
- "st1h { z17.h }, p2, [x21, #-2, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21, #-1, MUL VL]\n"
+ "zip1 z22.h, z27.h, z21.h\n"
+ "zip2 z21.h, z27.h, z21.h\n"
"zip1 z18.h, z0.h, z30.h\n"
"zip1 z17.h, z31.h, z29.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z16.h }, p2, [x21, #-1, MUL VL]\n"
+ "st1h { z16.h }, p2, [x20]\n"
"zip2 z20.h, z0.h, z30.h\n"
"zip2 z16.h, z31.h, z29.h\n"
- "st1h { z19.h }, p2, [x20]\n"
+ "st1h { z26.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z25.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
"zip1 z19.h, z18.h, z17.h\n"
"zip2 z18.h, z18.h, z17.h\n"
- "st1h { z27.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z24.h }, p2, [x20, #4, MUL VL]\n"
"zip1 z17.h, z20.h, z16.h\n"
"zip2 z16.h, z20.h, z16.h\n"
- "st1h { z26.h }, p2, [x20, #2, MUL VL]\n"
- "st1h { z25.h }, p2, [x20, #3, MUL VL]\n"
- "st1h { z24.h }, p2, [x20, #4, MUL VL]\n"
"st1h { z23.h }, p2, [x20, #5, MUL VL]\n"
"st1h { z22.h }, p2, [x20, #6, MUL VL]\n"
"st1h { z21.h }, p2, [x20, #7, MUL VL]\n"
@@ -173,79 +173,79 @@ void sve_transpose_interleave_6VL_2x4(uint16_t *out, const uint16_t *in, size_t
"st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x11, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z17.h }, p1/Z, [x12]\n"
- "ld1h { z19.h }, p1/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z22.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z21.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "zip1 z25.h, z17.h, z16.h\n"
- "zip2 z24.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z17.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z16.h, z19.h, z18.h\n"
- "zip2 z19.h, z19.h, z18.h\n"
- "ld1h { z0.h }, p1/Z, [x28]\n"
- "ld1h { z31.h }, p1/Z, [x25]\n"
- "zip1 z23.h, z22.h, z20.h\n"
- "zip1 z22.h, z21.h, z17.h\n"
- "ld1h { z30.h }, p1/Z, [x24]\n"
- "ld1h { z29.h }, p1/Z, [x23]\n"
- "zip1 z21.h, z0.h, z30.h\n"
- "zip1 z18.h, z31.h, z29.h\n"
- "ld1h { z28.h }, p0/Z, [x28, #1, MUL VL]\n"
- "ld1h { z27.h }, p0/Z, [x25, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decd x27, ALL, MUL #6\n"
- "ld1h { z20.h }, p0/Z, [x24, #1, MUL VL]\n"
- "ld1h { z26.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "mov x21, x11\n"
+ "mov x20, x9\n"
+ "decd x11, ALL, MUL #6\n"
+ "add x9, x9, %x[out_stride]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p0.h, XZR, x21\n"
+ "cmp x11, #0x0\n"
+ "ld1h { z23.h }, p1/Z, [x12]\n"
+ "ld1h { z25.h }, p1/Z, [x28]\n"
+ "ld1h { z22.h }, p1/Z, [x27]\n"
+ "ld1h { z19.h }, p1/Z, [x26]\n"
+ "ld1h { z1.h }, p1/Z, [x25]\n"
+ "ld1h { z0.h }, p1/Z, [x24]\n"
+ "ld1h { z21.h }, p0/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x28, #1, MUL VL]\n"
"addvl x12, x12, #1\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "zip1 z17.h, z25.h, z16.h\n"
- "zip2 z16.h, z25.h, z16.h\n"
"addvl x28, x28, #1\n"
- "addvl x25, x25, #1\n"
- "zip1 z25.h, z24.h, z19.h\n"
- "zip2 z19.h, z24.h, z19.h\n"
- "addvl x24, x24, #1\n"
- "addvl x23, x23, #1\n"
+ "ld1h { z18.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z17.h }, p0/Z, [x26, #1, MUL VL]\n"
"zip1 z24.h, z23.h, z22.h\n"
+ "zip1 z16.h, z25.h, z19.h\n"
+ "ld1h { z31.h }, p1/Z, [x23]\n"
+ "ld1h { z30.h }, p1/Z, [x22]\n"
"zip2 z23.h, z23.h, z22.h\n"
+ "zip2 z19.h, z25.h, z19.h\n"
+ "ld1h { z29.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z28.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "addvl x27, x27, #1\n"
+ "addvl x26, x26, #1\n"
+ "ld1h { z27.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z26.h }, p0/Z, [x22, #1, MUL VL]\n"
"zip1 z22.h, z21.h, z18.h\n"
- "zip2 z21.h, z21.h, z18.h\n"
+ "zip1 z21.h, z20.h, z17.h\n"
+ "zip1 z20.h, z1.h, z31.h\n"
+ "zip1 z18.h, z0.h, z30.h\n"
+ "addvl x25, x25, #1\n"
+ "addvl x24, x24, #1\n"
+ "addvl x23, x23, #1\n"
+ "addvl x22, x22, #1\n"
+ "zip1 z17.h, z24.h, z16.h\n"
+ "zip2 z16.h, z24.h, z16.h\n"
+ "zip1 z25.h, z23.h, z19.h\n"
+ "zip2 z24.h, z23.h, z19.h\n"
+ "incd x12, ALL, MUL #4\n"
+ "incd x28, ALL, MUL #4\n"
+ "zip1 z19.h, z22.h, z21.h\n"
+ "zip2 z23.h, z22.h, z21.h\n"
+ "incd x27, ALL, MUL #4\n"
+ "incd x26, ALL, MUL #4\n"
+ "zip1 z22.h, z20.h, z18.h\n"
+ "zip2 z21.h, z20.h, z18.h\n"
"st1h { z17.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "zip2 z18.h, z0.h, z30.h\n"
- "zip2 z17.h, z31.h, z29.h\n"
+ "incd x25, ALL, MUL #4\n"
+ "zip2 z18.h, z1.h, z31.h\n"
+ "zip2 z17.h, z0.h, z30.h\n"
"st1h { z16.h }, p2, [x20, #1, MUL VL]\n"
- "incd x12, ALL, MUL #4\n"
- "zip1 z20.h, z28.h, z20.h\n"
- "zip1 z16.h, z27.h, z26.h\n"
+ "incd x24, ALL, MUL #4\n"
+ "zip1 z20.h, z29.h, z27.h\n"
+ "zip1 z16.h, z28.h, z26.h\n"
"st1h { z25.h }, p2, [x20, #2, MUL VL]\n"
- "incd x11, ALL, MUL #4\n"
- "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
- "incd x10, ALL, MUL #4\n"
- "incd x9, ALL, MUL #4\n"
+ "incd x23, ALL, MUL #4\n"
+ "st1h { z24.h }, p2, [x20, #3, MUL VL]\n"
+ "incd x22, ALL, MUL #4\n"
+ "st1h { z19.h }, p2, [x20, #4, MUL VL]\n"
"zip1 z19.h, z18.h, z17.h\n"
- "st1h { z24.h }, p2, [x20, #4, MUL VL]\n"
- "incd x28, ALL, MUL #4\n"
- "incd x25, ALL, MUL #4\n"
"zip2 z18.h, z18.h, z17.h\n"
"st1h { z23.h }, p2, [x20, #5, MUL VL]\n"
- "incd x24, ALL, MUL #4\n"
- "incd x23, ALL, MUL #4\n"
"zip1 z17.h, z20.h, z16.h\n"
- "st1h { z22.h }, p2, [x20, #6, MUL VL]\n"
"zip2 z16.h, z20.h, z16.h\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z22.h }, p2, [x20, #6, MUL VL]\n"
"st1h { z21.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
"st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
@@ -261,123 +261,123 @@ void sve_transpose_interleave_6VL_2x4(uint16_t *out, const uint16_t *in, size_t
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #3\n"
- "add x9, x10, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x9, %x[in_stride]\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "csel x10, x10, %x[pad_row], GE\n"
+ "mov x9, %x[out]\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add %x[in], x26, %x[in_stride]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "csel x27, x27, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x11, x11, %x[pad_row], GT\n"
+ "csel x28, x28, %x[pad_row], GT\n"
"cmp x21, x20\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z18.h }, p2/Z, [x12]\n"
- "ld1h { z24.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x12]\n"
+ "ld1h { z28.h }, p2/Z, [x12, #1, MUL VL]\n"
"sub x21, x21, x20\n"
+ "ld1h { z21.h }, p2/Z, [x28]\n"
+ "ld1h { z27.h }, p2/Z, [x28, #1, MUL VL]\n"
"cmp x21, x20\n"
- "ld1h { z17.h }, p2/Z, [x11]\n"
- "ld1h { z23.h }, p2/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x10]\n"
- "ld1h { z22.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z31.h, z18.h, z16.h\n"
- "zip2 z30.h, z18.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x9]\n"
- "ld1h { z20.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z29.h, z17.h, z16.h\n"
- "zip2 z28.h, z17.h, z16.h\n"
- "ld1h { z19.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x11, #2, MUL VL]\n"
- "zip1 z27.h, z24.h, z22.h\n"
- "zip1 z21.h, z23.h, z20.h\n"
- "ld1h { z17.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x9, #2, MUL VL]\n"
- "zip2 z26.h, z24.h, z22.h\n"
- "zip2 z20.h, z23.h, z20.h\n"
- "zip1 z25.h, z19.h, z17.h\n"
- "zip1 z24.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x27]\n"
+ "ld1h { z26.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x26]\n"
+ "ld1h { z20.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z25.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z29.h }, p2/Z, [x28, #2, MUL VL]\n"
"addvl x12, x12, #3\n"
- "addvl x11, x11, #3\n"
- "zip2 z23.h, z19.h, z17.h\n"
- "zip2 z22.h, z18.h, z16.h\n"
- "addvl x10, x10, #3\n"
- "addvl x9, x9, #3\n"
- "zip1 z17.h, z31.h, z29.h\n"
- "zip2 z16.h, z31.h, z29.h\n"
- "st1h { z17.h }, p2, [x22]\n"
- "zip1 z19.h, z30.h, z28.h\n"
- "zip2 z18.h, z30.h, z28.h\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "zip1 z17.h, z27.h, z21.h\n"
- "zip2 z16.h, z27.h, z21.h\n"
- "st1h { z19.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #3, MUL VL]\n"
- "zip1 z21.h, z26.h, z20.h\n"
- "zip2 z20.h, z26.h, z20.h\n"
- "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
- "zip1 z19.h, z25.h, z24.h\n"
- "zip2 z18.h, z25.h, z24.h\n"
- "st1h { z16.h }, p2, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z23.h, z22.h\n"
- "zip2 z16.h, z23.h, z22.h\n"
- "st1h { z21.h }, p2, [x22]\n"
- "st1h { z20.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x28, x28, #3\n"
+ "ld1h { z24.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z19.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "zip1 z18.h, z22.h, z17.h\n"
+ "zip2 z23.h, z22.h, z17.h\n"
+ "zip1 z17.h, z21.h, z16.h\n"
+ "zip2 z16.h, z21.h, z16.h\n"
+ "addvl x27, x27, #3\n"
+ "addvl x26, x26, #3\n"
+ "zip1 z22.h, z28.h, z26.h\n"
+ "zip1 z21.h, z27.h, z20.h\n"
+ "zip2 z28.h, z28.h, z26.h\n"
+ "zip2 z20.h, z27.h, z20.h\n"
+ "zip1 z27.h, z25.h, z24.h\n"
+ "zip1 z26.h, z29.h, z19.h\n"
+ "zip2 z25.h, z25.h, z24.h\n"
+ "zip2 z24.h, z29.h, z19.h\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z23.h, z16.h\n"
+ "zip2 z16.h, z23.h, z16.h\n"
+ "zip1 z23.h, z22.h, z21.h\n"
+ "zip2 z22.h, z22.h, z21.h\n"
+ "st1h { z19.h }, p2, [x9]\n"
+ "st1h { z18.h }, p2, [x9, #1, MUL VL]\n"
+ "zip1 z21.h, z28.h, z20.h\n"
+ "zip2 z20.h, z28.h, z20.h\n"
+ "st1h { z17.h }, p2, [x9, #2, MUL VL]\n"
+ "zip1 z19.h, z27.h, z26.h\n"
+ "zip2 z18.h, z27.h, z26.h\n"
+ "st1h { z16.h }, p2, [x9, #3, MUL VL]\n"
+ "zip1 z17.h, z25.h, z24.h\n"
+ "zip2 z16.h, z25.h, z24.h\n"
+ "st1h { z23.h }, p2, [x9, #4, MUL VL]\n"
+ "st1h { z22.h }, p2, [x9, #5, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
+ "st1h { z21.h }, p2, [x9]\n"
+ "st1h { z20.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z19.h }, p2, [x9, #2, MUL VL]\n"
+ "st1h { z18.h }, p2, [x9, #3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x9, #4, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #5, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
+ "decd x21, ALL, MUL #6\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z22.h }, p1/Z, [x12]\n"
- "ld1h { z25.h }, p1/Z, [x11]\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z24.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z21.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "decd x21, ALL, MUL #6\n"
- "addvl x12, x12, #1\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z17.h }, p0/Z, [x9, #1, MUL VL]\n"
- "addvl x11, x11, #1\n"
- "addvl x10, x10, #1\n"
- "addvl x9, x9, #1\n"
- "zip1 z19.h, z22.h, z21.h\n"
- "zip1 z16.h, z25.h, z18.h\n"
"cmp x21, #0x0\n"
- "zip2 z22.h, z22.h, z21.h\n"
+ "ld1h { z25.h }, p1/Z, [x12]\n"
+ "ld1h { z24.h }, p1/Z, [x28]\n"
+ "ld1h { z18.h }, p1/Z, [x27]\n"
+ "ld1h { z17.h }, p1/Z, [x26]\n"
+ "ld1h { z22.h }, p0/Z, [x12, #1, MUL VL]\n"
+ "addvl x12, x12, #1\n"
+ "ld1h { z23.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #1\n"
+ "ld1h { z21.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "addvl x27, x27, #1\n"
+ "addvl x26, x26, #1\n"
+ "zip1 z20.h, z25.h, z18.h\n"
+ "zip1 z19.h, z24.h, z17.h\n"
"zip2 z18.h, z25.h, z18.h\n"
+ "zip2 z17.h, z24.h, z17.h\n"
"incd x12, ALL, MUL #4\n"
- "incd x11, ALL, MUL #4\n"
- "zip1 z21.h, z24.h, z20.h\n"
- "zip1 z20.h, z23.h, z17.h\n"
- "incd x10, ALL, MUL #4\n"
- "incd x9, ALL, MUL #4\n"
- "zip1 z17.h, z19.h, z16.h\n"
- "zip2 z16.h, z19.h, z16.h\n"
- "st1h { z17.h }, p2, [x22]\n"
- "zip1 z19.h, z22.h, z18.h\n"
- "zip2 z18.h, z22.h, z18.h\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "zip1 z17.h, z21.h, z20.h\n"
- "zip2 z16.h, z21.h, z20.h\n"
- "st1h { z19.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "incd x28, ALL, MUL #4\n"
+ "zip1 z22.h, z22.h, z21.h\n"
+ "zip1 z16.h, z23.h, z16.h\n"
+ "incd x27, ALL, MUL #4\n"
+ "incd x26, ALL, MUL #4\n"
+ "zip1 z21.h, z20.h, z19.h\n"
+ "zip2 z20.h, z20.h, z19.h\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z22.h, z16.h\n"
+ "zip2 z16.h, z22.h, z16.h\n"
+ "st1h { z21.h }, p2, [x9]\n"
+ "st1h { z20.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z19.h }, p2, [x9, #2, MUL VL]\n"
+ "st1h { z18.h }, p2, [x9, #3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x9, #4, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #5, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp
index f0f10d2f43..8ff9551c19 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,176 +39,176 @@ void sve_transpose_interleave_6VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
size_t out_stride = 6 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "ptrue p3.b\n"
+ "ptrue p2.b\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
+ "mov x25, %x[width]\n"
"cnth x20, ALL, MUL #3\n"
- "add x22, x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "mov x24, %x[out]\n"
+ "add x23, x26, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x22, x22, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x23, x20\n"
- "mov x21, %x[out]\n"
+ "csel x23, x23, %x[pad_row], GT\n"
+ "cmp x25, x20\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z17.s }, p3/Z, [x26]\n"
- "ld1w { z18.s }, p3/Z, [x26, #1, MUL VL]\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1w { z19.s }, p3/Z, [x26, #2, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24]\n"
- "zip1 z21.s, z17.s, z16.s\n"
- "zip2 z20.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24, #2, MUL VL]\n"
- "zip1 z29.s, z18.s, z17.s\n"
- "zip2 z28.s, z18.s, z17.s\n"
- "ld1w { z17.s }, p3/Z, [x26, #3, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x26, #4, MUL VL]\n"
- "zip1 z27.s, z19.s, z16.s\n"
- "zip2 z26.s, z19.s, z16.s\n"
- "ld1w { z19.s }, p3/Z, [x26, #5, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24, #3, MUL VL]\n"
- "zip1 z25.s, z17.s, z16.s\n"
- "zip2 z24.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x24, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x24, #5, MUL VL]\n"
- "zip1 z12.s, z18.s, z17.s\n"
- "zip2 z11.s, z18.s, z17.s\n"
- "ld1w { z18.s }, p3/Z, [x25]\n"
- "ld1w { z23.s }, p3/Z, [x25, #1, MUL VL]\n"
- "zip1 z10.s, z19.s, z16.s\n"
- "zip2 z9.s, z19.s, z16.s\n"
- "ld1w { z22.s }, p3/Z, [x25, #2, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x22]\n"
- ".inst 0x658aaea8 // bfcvt z8.h, p3/M, z21.s\n"
- "zip1 z7.s, z18.s, z17.s\n"
- "ld1w { z16.s }, p3/Z, [x22, #1, MUL VL]\n"
- "ld1w { z21.s }, p3/Z, [x22, #2, MUL VL]\n"
- ".inst 0x658aae86 // bfcvt z6.h, p3/M, z20.s\n"
- "zip2 z5.s, z18.s, z17.s\n"
- "ld1w { z20.s }, p3/Z, [x25, #3, MUL VL]\n"
- "ld1w { z19.s }, p3/Z, [x25, #4, MUL VL]\n"
- ".inst 0x658aafa4 // bfcvt z4.h, p3/M, z29.s\n"
- "zip1 z3.s, z23.s, z16.s\n"
- "ld1w { z2.s }, p3/Z, [x25, #5, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x22, #3, MUL VL]\n"
- ".inst 0x658aaf81 // bfcvt z1.h, p3/M, z28.s\n"
- "zip2 z0.s, z23.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x22, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x22, #5, MUL VL]\n"
- ".inst 0x658aaf7f // bfcvt z31.h, p3/M, z27.s\n"
- "zip1 z30.s, z22.s, z21.s\n"
- ".inst 0x658aaf5d // bfcvt z29.h, p3/M, z26.s\n"
- "zip2 z28.s, z22.s, z21.s\n"
- "addvl x26, x26, #6\n"
- "addvl x25, x25, #6\n"
- ".inst 0x658aaf3b // bfcvt z27.h, p3/M, z25.s\n"
+ "ld1w { z17.s }, p2/Z, [x26]\n"
+ "ld1w { z24.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "sub x25, x25, x20\n"
+ "ld1w { z23.s }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x22]\n"
+ "cmp x25, x20\n"
+ "ld1w { z22.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #3, MUL VL]\n"
+ "zip1 z4.s, z17.s, z16.s\n"
+ "zip2 z3.s, z17.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x22, #5, MUL VL]\n"
+ "zip1 z2.s, z24.s, z22.s\n"
+ "zip2 z1.s, z24.s, z22.s\n"
+ "ld1w { z0.s }, p2/Z, [x23]\n"
+ "ld1w { z31.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip1 z30.s, z23.s, z21.s\n"
+ "zip2 z29.s, z23.s, z21.s\n"
+ "ld1w { z28.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x21]\n"
"zip1 z26.s, z20.s, z18.s\n"
- "addvl x24, x24, #6\n"
- "addvl x22, x22, #6\n"
- ".inst 0x658aaf19 // bfcvt z25.h, p3/M, z24.s\n"
- "zip2 z24.s, z20.s, z18.s\n"
- ".inst 0x658aad97 // bfcvt z23.h, p3/M, z12.s\n"
+ "zip2 z25.s, z20.s, z18.s\n"
+ "ld1w { z24.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [x21, #2, MUL VL]\n"
"zip1 z22.s, z19.s, z17.s\n"
- ".inst 0x658aad75 // bfcvt z21.h, p3/M, z11.s\n"
- "zip2 z20.s, z19.s, z17.s\n"
- ".inst 0x658aad53 // bfcvt z19.h, p3/M, z10.s\n"
- "zip1 z18.s, z2.s, z16.s\n"
- ".inst 0x658aad31 // bfcvt z17.h, p3/M, z9.s\n"
- "zip2 z16.s, z2.s, z16.s\n"
- ".inst 0x648aace8 // bfcvtnt z8.h, p3/M, z7.s\n"
- ".inst 0x648aaca6 // bfcvtnt z6.h, p3/M, z5.s\n"
- "st1h { z8.h }, p3, [x21]\n"
- ".inst 0x648aac64 // bfcvtnt z4.h, p3/M, z3.s\n"
- ".inst 0x648aac01 // bfcvtnt z1.h, p3/M, z0.s\n"
- "st1h { z6.h }, p3, [x21, #1, MUL VL]\n"
- ".inst 0x648aafdf // bfcvtnt z31.h, p3/M, z30.s\n"
- ".inst 0x648aaf9d // bfcvtnt z29.h, p3/M, z28.s\n"
- "st1h { z4.h }, p3, [x21, #2, MUL VL]\n"
- "st1h { z1.h }, p3, [x21, #3, MUL VL]\n"
- ".inst 0x648aaf5b // bfcvtnt z27.h, p3/M, z26.s\n"
- ".inst 0x648aaf19 // bfcvtnt z25.h, p3/M, z24.s\n"
- "st1h { z31.h }, p3, [x21, #4, MUL VL]\n"
- ".inst 0x648aaed7 // bfcvtnt z23.h, p3/M, z22.s\n"
- ".inst 0x648aae95 // bfcvtnt z21.h, p3/M, z20.s\n"
- "st1h { z29.h }, p3, [x21, #5, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- ".inst 0x648aae53 // bfcvtnt z19.h, p3/M, z18.s\n"
- ".inst 0x648aae11 // bfcvtnt z17.h, p3/M, z16.s\n"
- "st1h { z27.h }, p3, [x21]\n"
- "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
- "st1h { z23.h }, p3, [x21, #2, MUL VL]\n"
- "st1h { z21.h }, p3, [x21, #3, MUL VL]\n"
- "st1h { z19.h }, p3, [x21, #4, MUL VL]\n"
- "st1h { z17.h }, p3, [x21, #5, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip2 z10.s, z19.s, z17.s\n"
+ "ld1w { z21.s }, p2/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "zip1 z19.s, z5.s, z16.s\n"
+ "zip2 z9.s, z5.s, z16.s\n"
+ "ld1w { z8.s }, p2/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x21, #3, MUL VL]\n"
+ ".inst 0x658aa887 // bfcvt z7.h, p2/M, z4.s\n"
+ "zip1 z6.s, z0.s, z27.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x21, #5, MUL VL]\n"
+ ".inst 0x658aa865 // bfcvt z5.h, p2/M, z3.s\n"
+ "zip2 z4.s, z0.s, z27.s\n"
+ ".inst 0x658aa843 // bfcvt z3.h, p2/M, z2.s\n"
+ "zip1 z2.s, z31.s, z24.s\n"
+ "addvl x26, x26, #6\n"
+ "addvl x23, x23, #6\n"
+ ".inst 0x658aa821 // bfcvt z1.h, p2/M, z1.s\n"
+ "zip2 z0.s, z31.s, z24.s\n"
+ "addvl x22, x22, #6\n"
+ "addvl x21, x21, #6\n"
+ ".inst 0x658aabdf // bfcvt z31.h, p2/M, z30.s\n"
+ "zip1 z30.s, z28.s, z23.s\n"
+ ".inst 0x658aabbd // bfcvt z29.h, p2/M, z29.s\n"
+ "zip2 z28.s, z28.s, z23.s\n"
+ ".inst 0x658aab5b // bfcvt z27.h, p2/M, z26.s\n"
+ "zip1 z26.s, z21.s, z18.s\n"
+ ".inst 0x658aab39 // bfcvt z25.h, p2/M, z25.s\n"
+ "zip2 z24.s, z21.s, z18.s\n"
+ ".inst 0x658aaad7 // bfcvt z23.h, p2/M, z22.s\n"
+ "zip1 z22.s, z20.s, z17.s\n"
+ ".inst 0x658aa955 // bfcvt z21.h, p2/M, z10.s\n"
+ "zip2 z20.s, z20.s, z17.s\n"
+ ".inst 0x658aaa73 // bfcvt z19.h, p2/M, z19.s\n"
+ "zip1 z18.s, z8.s, z16.s\n"
+ ".inst 0x658aa931 // bfcvt z17.h, p2/M, z9.s\n"
+ "zip2 z16.s, z8.s, z16.s\n"
+ ".inst 0x648aa8c7 // bfcvtnt z7.h, p2/M, z6.s\n"
+ ".inst 0x648aa885 // bfcvtnt z5.h, p2/M, z4.s\n"
+ ".inst 0x648aa843 // bfcvtnt z3.h, p2/M, z2.s\n"
+ ".inst 0x648aa801 // bfcvtnt z1.h, p2/M, z0.s\n"
+ ".inst 0x648aabdf // bfcvtnt z31.h, p2/M, z30.s\n"
+ ".inst 0x648aab9d // bfcvtnt z29.h, p2/M, z28.s\n"
+ "st1h { z7.h }, p2, [x24]\n"
+ "st1h { z5.h }, p2, [x24, #1, MUL VL]\n"
+ ".inst 0x648aab5b // bfcvtnt z27.h, p2/M, z26.s\n"
+ ".inst 0x648aab19 // bfcvtnt z25.h, p2/M, z24.s\n"
+ "st1h { z3.h }, p2, [x24, #2, MUL VL]\n"
+ ".inst 0x648aaad7 // bfcvtnt z23.h, p2/M, z22.s\n"
+ ".inst 0x648aaa95 // bfcvtnt z21.h, p2/M, z20.s\n"
+ "st1h { z1.h }, p2, [x24, #3, MUL VL]\n"
+ ".inst 0x648aaa53 // bfcvtnt z19.h, p2/M, z18.s\n"
+ ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
+ "st1h { z31.h }, p2, [x24, #4, MUL VL]\n"
+ "st1h { z29.h }, p2, [x24, #5, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "st1h { z27.h }, p2, [x24]\n"
+ "st1h { z25.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z23.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z21.h }, p2, [x24, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x24, #4, MUL VL]\n"
+ "st1h { z17.h }, p2, [x24, #5, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z20.s }, p2/Z, [x26]\n"
- "ld1w { z19.s }, p2/Z, [x24]\n"
+ "mov x20, x25\n"
+ "decd x25, ALL, MUL #6\n"
+ "whilelt p0.s, XZR, x20\n"
"decw x20\n"
"whilelt p1.s, XZR, x20\n"
- "ld1w { z18.s }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x24, #1, MUL VL]\n"
"decw x20\n"
+ "ld1w { z17.s }, p0/Z, [x26]\n"
+ "ld1w { z16.s }, p0/Z, [x22]\n"
+ "ld1w { z23.s }, p0/Z, [x23]\n"
+ "ld1w { z19.s }, p0/Z, [x21]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z25.s }, p0/Z, [x26, #2, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x24, #2, MUL VL]\n"
- "ld1w { z24.s }, p2/Z, [x25]\n"
- "ld1w { z30.s }, p1/Z, [x25, #1, MUL VL]\n"
- "zip1 z23.s, z20.s, z19.s\n"
- "zip2 z22.s, z20.s, z19.s\n"
- "ld1w { z29.s }, p0/Z, [x25, #2, MUL VL]\n"
- "ld1w { z21.s }, p2/Z, [x22]\n"
- "zip1 z20.s, z18.s, z17.s\n"
- "zip2 z19.s, z18.s, z17.s\n"
+ "cmp x25, #0x0\n"
+ "ld1w { z22.s }, p1/Z, [x26, #1, MUL VL]\n"
"ld1w { z18.s }, p1/Z, [x22, #1, MUL VL]\n"
- "ld1w { z28.s }, p0/Z, [x22, #2, MUL VL]\n"
- "zip1 z17.s, z25.s, z16.s\n"
- "zip2 z16.s, z25.s, z16.s\n"
- "decd x23, ALL, MUL #6\n"
- ".inst 0x658aaefb // bfcvt z27.h, p3/M, z23.s\n"
- "zip1 z26.s, z24.s, z21.s\n"
- "cmp x23, #0x0\n"
- ".inst 0x658aaed9 // bfcvt z25.h, p3/M, z22.s\n"
- "zip2 z24.s, z24.s, z21.s\n"
+ "ld1w { z31.s }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z30.s }, p1/Z, [x21, #1, MUL VL]\n"
+ "zip1 z21.s, z17.s, z16.s\n"
+ "zip2 z17.s, z17.s, z16.s\n"
+ "ld1w { z20.s }, p0/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "zip1 z29.s, z23.s, z19.s\n"
+ "zip2 z28.s, z23.s, z19.s\n"
+ "ld1w { z27.s }, p0/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z26.s }, p0/Z, [x21, #2, MUL VL]\n"
+ "zip1 z19.s, z22.s, z18.s\n"
+ "zip2 z18.s, z22.s, z18.s\n"
+ ".inst 0x658aaab9 // bfcvt z25.h, p2/M, z21.s\n"
+ ".inst 0x658aaa38 // bfcvt z24.h, p2/M, z17.s\n"
"addvl x26, x26, #3\n"
- "addvl x25, x25, #3\n"
- ".inst 0x658aae97 // bfcvt z23.h, p3/M, z20.s\n"
- "zip1 z22.s, z30.s, z18.s\n"
- "addvl x24, x24, #3\n"
+ "addvl x23, x23, #3\n"
+ "zip1 z17.s, z20.s, z16.s\n"
+ "zip2 z16.s, z20.s, z16.s\n"
"addvl x22, x22, #3\n"
- ".inst 0x658aae75 // bfcvt z21.h, p3/M, z19.s\n"
- "zip2 z20.s, z30.s, z18.s\n"
- ".inst 0x658aae33 // bfcvt z19.h, p3/M, z17.s\n"
- "zip1 z18.s, z29.s, z28.s\n"
- ".inst 0x658aae11 // bfcvt z17.h, p3/M, z16.s\n"
- "zip2 z16.s, z29.s, z28.s\n"
- ".inst 0x648aaf5b // bfcvtnt z27.h, p3/M, z26.s\n"
- ".inst 0x648aaf19 // bfcvtnt z25.h, p3/M, z24.s\n"
- "st1h { z27.h }, p3, [x21]\n"
- ".inst 0x648aaed7 // bfcvtnt z23.h, p3/M, z22.s\n"
- ".inst 0x648aae95 // bfcvtnt z21.h, p3/M, z20.s\n"
- "st1h { z25.h }, p3, [x21, #1, MUL VL]\n"
- ".inst 0x648aae53 // bfcvtnt z19.h, p3/M, z18.s\n"
- ".inst 0x648aae11 // bfcvtnt z17.h, p3/M, z16.s\n"
- "st1h { z23.h }, p3, [x21, #2, MUL VL]\n"
- "st1h { z21.h }, p3, [x21, #3, MUL VL]\n"
- "st1h { z19.h }, p3, [x21, #4, MUL VL]\n"
- "st1h { z17.h }, p3, [x21, #5, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "addvl x21, x21, #3\n"
+ ".inst 0x658aaa77 // bfcvt z23.h, p2/M, z19.s\n"
+ "zip1 z22.s, z31.s, z30.s\n"
+ ".inst 0x658aaa55 // bfcvt z21.h, p2/M, z18.s\n"
+ "zip2 z20.s, z31.s, z30.s\n"
+ ".inst 0x658aaa33 // bfcvt z19.h, p2/M, z17.s\n"
+ "zip1 z18.s, z27.s, z26.s\n"
+ ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
+ "zip2 z16.s, z27.s, z26.s\n"
+ ".inst 0x648aabb9 // bfcvtnt z25.h, p2/M, z29.s\n"
+ ".inst 0x648aab98 // bfcvtnt z24.h, p2/M, z28.s\n"
+ ".inst 0x648aaad7 // bfcvtnt z23.h, p2/M, z22.s\n"
+ ".inst 0x648aaa95 // bfcvtnt z21.h, p2/M, z20.s\n"
+ ".inst 0x648aaa53 // bfcvtnt z19.h, p2/M, z18.s\n"
+ ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
+ "st1h { z25.h }, p2, [x24]\n"
+ "st1h { z24.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z23.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z21.h }, p2, [x24, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x24, #4, MUL VL]\n"
+ "st1h { z17.h }, p2, [x24, #5, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -216,7 +216,7 @@ void sve_transpose_interleave_6VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp
index c638eaacde..314e3aebbd 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_6VL_4x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,160 +40,160 @@ void sve_transpose_interleave_6VL_4x2(uint32_t *out, const uint32_t *in, size_t
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "ptrue p3.b\n"
+ "ptrue p2.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
"mov x28, %x[in]\n"
"mov x27, %x[width]\n"
"cntw x26, ALL, MUL #6\n"
- "add x25, x28, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
+ "mov x25, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x24, x28, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
"cmp x27, x26\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
+ "add %x[in], x22, %x[in_stride]\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z18.s }, p3/Z, [x28]\n"
- "ld1w { z17.s }, p3/Z, [x28, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z19.s }, p3/Z, [x28, #2, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
- "zip1 z9.s, z18.s, z16.s\n"
- "zip2 z8.s, z18.s, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x25, #2, MUL VL]\n"
- "zip1 z7.s, z17.s, z16.s\n"
- "zip2 z6.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x24]\n"
- "ld1w { z16.s }, p3/Z, [x23]\n"
- "zip1 z5.s, z19.s, z18.s\n"
- "zip2 z4.s, z19.s, z18.s\n"
- "ld1w { z18.s }, p3/Z, [x28, #3, MUL VL]\n"
- "ld1w { z21.s }, p3/Z, [x28, #4, MUL VL]\n"
- "zip1 z3.s, z17.s, z16.s\n"
- "zip2 z2.s, z17.s, z16.s\n"
- "ld1w { z20.s }, p3/Z, [x28, #5, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x25, #3, MUL VL]\n"
- "mov x20, x22\n"
- "zip1 z1.s, z18.s, z17.s\n"
- "ld1w { z19.s }, p3/Z, [x25, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25, #5, MUL VL]\n"
- "zip2 z0.s, z18.s, z17.s\n"
- "zip1 z31.s, z21.s, z19.s\n"
- "ld1w { z18.s }, p3/Z, [x24, #1, MUL VL]\n"
- "ld1w { z17.s }, p3/Z, [x24, #2, MUL VL]\n"
- "zip2 z30.s, z21.s, z19.s\n"
- "zip1 z29.s, z20.s, z16.s\n"
- "ld1w { z19.s }, p3/Z, [x24, #3, MUL VL]\n"
- "ld1w { z28.s }, p3/Z, [x24, #4, MUL VL]\n"
- "zip2 z27.s, z20.s, z16.s\n"
+ "ld1w { z18.s }, p2/Z, [x28]\n"
+ "ld1w { z28.s }, p2/Z, [x28, #1, MUL VL]\n"
+ "mov x21, x25\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1w { z22.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z17.s }, p2/Z, [x24]\n"
+ "mov x20, x25\n"
"sub x27, x27, x26\n"
- "ld1w { z26.s }, p3/Z, [x24, #5, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23, #1, MUL VL]\n"
- "zip1 z25.s, z18.s, z16.s\n"
- "zip2 z24.s, z18.s, z16.s\n"
- "ld1w { z16.s }, p3/Z, [x23, #2, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x23, #3, MUL VL]\n"
- "zip1 z23.s, z17.s, z16.s\n"
- "zip2 z22.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x23, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x23, #5, MUL VL]\n"
- "st1w { z9.s }, p3, [x21]\n"
- "zip1 z21.s, z19.s, z18.s\n"
- "st1w { z8.s }, p3, [x21, #1, MUL VL]\n"
- "zip2 z20.s, z19.s, z18.s\n"
+ "ld1w { z16.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x24, #2, MUL VL]\n"
"cmp x27, x26\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1w { z27.s }, p2/Z, [x23]\n"
+ "ld1w { z26.s }, p2/Z, [x22]\n"
+ "ld1w { z20.s }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x28, #4, MUL VL]\n"
+ "zip1 z8.s, z18.s, z17.s\n"
+ "zip2 z25.s, z18.s, z17.s\n"
+ "ld1w { z24.s }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x24, #3, MUL VL]\n"
+ "zip1 z23.s, z28.s, z16.s\n"
+ "zip2 z7.s, z28.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, #4, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24, #5, MUL VL]\n"
+ "zip1 z6.s, z22.s, z21.s\n"
+ "zip2 z5.s, z22.s, z21.s\n"
+ "ld1w { z22.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "zip1 z4.s, z27.s, z26.s\n"
+ "zip2 z3.s, z27.s, z26.s\n"
+ "ld1w { z2.s }, p2/Z, [x23, #3, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "zip1 z0.s, z20.s, z18.s\n"
+ "zip2 z31.s, z20.s, z18.s\n"
+ "ld1w { z30.s }, p2/Z, [x23, #5, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "zip1 z29.s, z19.s, z17.s\n"
+ "zip2 z28.s, z19.s, z17.s\n"
+ "ld1w { z19.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #3, MUL VL]\n"
+ "zip1 z27.s, z24.s, z16.s\n"
+ "zip2 z26.s, z24.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x22, #5, MUL VL]\n"
+ "st1w { z8.s }, p2, [x21]\n"
"addvl x28, x28, #6\n"
- "st1w { z7.s }, p3, [x21, #2, MUL VL]\n"
- "addvl x25, x25, #6\n"
+ "st1w { z25.s }, p2, [x21, #1, MUL VL]\n"
+ "zip1 z25.s, z22.s, z20.s\n"
+ "zip2 z24.s, z22.s, z20.s\n"
"addvl x24, x24, #6\n"
- "zip1 z19.s, z28.s, z17.s\n"
- "st1w { z6.s }, p3, [x21, #3, MUL VL]\n"
+ "st1w { z23.s }, p2, [x21, #2, MUL VL]\n"
+ "zip1 z23.s, z21.s, z19.s\n"
+ "zip2 z22.s, z21.s, z19.s\n"
"addvl x23, x23, #6\n"
- "zip2 z18.s, z28.s, z17.s\n"
- "zip1 z17.s, z26.s, z16.s\n"
- "st1w { z5.s }, p3, [x21, #4, MUL VL]\n"
- "zip2 z16.s, z26.s, z16.s\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z4.s }, p3, [x21, #5, MUL VL]\n"
- "st1w { z3.s }, p3, [x21, #6, MUL VL]\n"
- "st1w { z2.s }, p3, [x21, #7, MUL VL]\n"
+ "st1w { z7.s }, p2, [x21, #3, MUL VL]\n"
+ "zip1 z21.s, z2.s, z18.s\n"
+ "zip2 z20.s, z2.s, z18.s\n"
+ "addvl x22, x22, #6\n"
+ "st1w { z6.s }, p2, [x21, #4, MUL VL]\n"
+ "zip1 z19.s, z1.s, z17.s\n"
+ "zip2 z18.s, z1.s, z17.s\n"
+ "st1w { z5.s }, p2, [x21, #5, MUL VL]\n"
+ "zip1 z17.s, z30.s, z16.s\n"
+ "zip2 z16.s, z30.s, z16.s\n"
+ "st1w { z4.s }, p2, [x21, #6, MUL VL]\n"
+ "st1w { z3.s }, p2, [x21, #7, MUL VL]\n"
"addvl x21, x21, #12\n"
- "st1w { z25.s }, p3, [x21, #-4, MUL VL]\n"
- "st1w { z24.s }, p3, [x21, #-3, MUL VL]\n"
- "st1w { z23.s }, p3, [x21, #-2, MUL VL]\n"
- "st1w { z22.s }, p3, [x21, #-1, MUL VL]\n"
- "st1w { z1.s }, p3, [x20]\n"
- "st1w { z0.s }, p3, [x20, #1, MUL VL]\n"
- "st1w { z31.s }, p3, [x20, #2, MUL VL]\n"
- "st1w { z30.s }, p3, [x20, #3, MUL VL]\n"
- "st1w { z29.s }, p3, [x20, #4, MUL VL]\n"
- "st1w { z27.s }, p3, [x20, #5, MUL VL]\n"
- "st1w { z21.s }, p3, [x20, #6, MUL VL]\n"
- "st1w { z20.s }, p3, [x20, #7, MUL VL]\n"
+ "st1w { z25.s }, p2, [x21, #-4, MUL VL]\n"
+ "st1w { z24.s }, p2, [x21, #-3, MUL VL]\n"
+ "st1w { z23.s }, p2, [x21, #-2, MUL VL]\n"
+ "st1w { z22.s }, p2, [x21, #-1, MUL VL]\n"
+ "st1w { z0.s }, p2, [x20]\n"
+ "st1w { z31.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z28.s }, p2, [x20, #3, MUL VL]\n"
+ "st1w { z27.s }, p2, [x20, #4, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #5, MUL VL]\n"
+ "st1w { z21.s }, p2, [x20, #6, MUL VL]\n"
+ "st1w { z20.s }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- "st1w { z19.s }, p3, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p3, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p3, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p3, [x20, #-1, MUL VL]\n"
+ "st1w { z19.s }, p2, [x20, #-4, MUL VL]\n"
+ "st1w { z18.s }, p2, [x20, #-3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x20, #-2, MUL VL]\n"
+ "st1w { z16.s }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cbz x27, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z19.s }, p2/Z, [x28]\n"
- "ld1w { z18.s }, p2/Z, [x25]\n"
- "decw x20\n"
- "whilelt p1.s, XZR, x20\n"
- "ld1w { z17.s }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x25, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z22.s }, p0/Z, [x28, #2, MUL VL]\n"
- "ld1w { z21.s }, p0/Z, [x25, #2, MUL VL]\n"
- "ld1w { z28.s }, p2/Z, [x24]\n"
- "ld1w { z27.s }, p2/Z, [x23]\n"
- "mov x20, x22\n"
+ "mov x21, x27\n"
+ "mov x20, x25\n"
"decd x27, ALL, MUL #6\n"
- "ld1w { z26.s }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1w { z25.s }, p0/Z, [x24, #2, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "decw x21\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z19.s }, p0/Z, [x28]\n"
+ "ld1w { z18.s }, p0/Z, [x24]\n"
+ "ld1w { z22.s }, p0/Z, [x23]\n"
+ "ld1w { z17.s }, p0/Z, [x22]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "cmp x27, #0x0\n"
+ "ld1w { z21.s }, p1/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z16.s }, p1/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z28.s }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z27.s }, p1/Z, [x22, #1, MUL VL]\n"
"zip1 z20.s, z19.s, z18.s\n"
- "zip2 z19.s, z19.s, z18.s\n"
- "ld1w { z24.s }, p1/Z, [x23, #1, MUL VL]\n"
+ "zip2 z26.s, z19.s, z18.s\n"
+ "ld1w { z19.s }, p0/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z18.s }, p0/Z, [x24, #2, MUL VL]\n"
+ "zip1 z25.s, z22.s, z17.s\n"
+ "zip2 z24.s, z22.s, z17.s\n"
"ld1w { z23.s }, p0/Z, [x23, #2, MUL VL]\n"
- "zip1 z18.s, z17.s, z16.s\n"
- "zip2 z17.s, z17.s, z16.s\n"
- "zip1 z16.s, z22.s, z21.s\n"
- "zip2 z22.s, z22.s, z21.s\n"
- "st1w { z20.s }, p3, [x20]\n"
- "cmp x27, #0x0\n"
- "zip1 z21.s, z28.s, z27.s\n"
- "zip2 z20.s, z28.s, z27.s\n"
- "st1w { z19.s }, p3, [x20, #1, MUL VL]\n"
+ "ld1w { z22.s }, p0/Z, [x22, #2, MUL VL]\n"
+ "zip1 z17.s, z21.s, z16.s\n"
+ "zip2 z16.s, z21.s, z16.s\n"
+ "st1w { z20.s }, p2, [x20]\n"
"addvl x28, x28, #3\n"
- "st1w { z18.s }, p3, [x20, #2, MUL VL]\n"
- "addvl x25, x25, #3\n"
"addvl x24, x24, #3\n"
- "zip1 z19.s, z26.s, z24.s\n"
- "st1w { z17.s }, p3, [x20, #3, MUL VL]\n"
+ "zip1 z21.s, z28.s, z27.s\n"
+ "zip1 z20.s, z19.s, z18.s\n"
+ "zip2 z19.s, z19.s, z18.s\n"
+ "st1w { z26.s }, p2, [x20, #1, MUL VL]\n"
"addvl x23, x23, #3\n"
- "zip2 z18.s, z26.s, z24.s\n"
- "zip1 z17.s, z25.s, z23.s\n"
- "st1w { z16.s }, p3, [x20, #4, MUL VL]\n"
- "zip2 z16.s, z25.s, z23.s\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z22.s }, p3, [x20, #5, MUL VL]\n"
- "st1w { z21.s }, p3, [x20, #6, MUL VL]\n"
- "st1w { z20.s }, p3, [x20, #7, MUL VL]\n"
+ "st1w { z17.s }, p2, [x20, #2, MUL VL]\n"
+ "addvl x22, x22, #3\n"
+ "zip2 z18.s, z28.s, z27.s\n"
+ "zip1 z17.s, z23.s, z22.s\n"
+ "st1w { z16.s }, p2, [x20, #3, MUL VL]\n"
+ "zip2 z16.s, z23.s, z22.s\n"
+ "st1w { z20.s }, p2, [x20, #4, MUL VL]\n"
+ "st1w { z19.s }, p2, [x20, #5, MUL VL]\n"
+ "st1w { z25.s }, p2, [x20, #6, MUL VL]\n"
+ "st1w { z24.s }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #12\n"
- "st1w { z19.s }, p3, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p3, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p3, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p3, [x20, #-1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x20, #-4, MUL VL]\n"
+ "st1w { z18.s }, p2, [x20, #-3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x20, #-2, MUL VL]\n"
+ "st1w { z16.s }, p2, [x20, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -205,90 +205,90 @@ void sve_transpose_interleave_6VL_4x2(uint32_t *out, const uint32_t *in, size_t
"mov x28, %x[in]\n"
"mov x21, %x[width]\n"
"cntw x20, ALL, MUL #6\n"
- "add x25, x28, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "add x24, x28, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x21, x20\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z17.s }, p3/Z, [x28]\n"
- "ld1w { z19.s }, p3/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z17.s }, p2/Z, [x28]\n"
+ "ld1w { z23.s }, p2/Z, [x28, #1, MUL VL]\n"
"sub x21, x21, x20\n"
+ "ld1w { z29.s }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x24]\n"
"cmp x21, x20\n"
- "ld1w { z18.s }, p3/Z, [x28, #2, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25]\n"
- "zip1 z28.s, z17.s, z16.s\n"
- "zip2 z20.s, z17.s, z16.s\n"
- "ld1w { z17.s }, p3/Z, [x25, #1, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25, #2, MUL VL]\n"
- "zip1 z27.s, z19.s, z17.s\n"
- "zip2 z26.s, z19.s, z17.s\n"
- "ld1w { z19.s }, p3/Z, [x28, #3, MUL VL]\n"
- "ld1w { z25.s }, p3/Z, [x28, #4, MUL VL]\n"
- "zip1 z24.s, z18.s, z16.s\n"
- "zip2 z23.s, z18.s, z16.s\n"
- "ld1w { z22.s }, p3/Z, [x28, #5, MUL VL]\n"
- "ld1w { z18.s }, p3/Z, [x25, #3, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1w { z28.s }, p2/Z, [x28, #3, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x24, #3, MUL VL]\n"
+ "zip1 z19.s, z17.s, z16.s\n"
+ "zip2 z18.s, z17.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x24, #4, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x24, #5, MUL VL]\n"
+ "zip1 z16.s, z23.s, z22.s\n"
+ "zip2 z24.s, z23.s, z22.s\n"
+ "zip1 z23.s, z29.s, z21.s\n"
+ "zip2 z22.s, z29.s, z21.s\n"
"addvl x28, x28, #6\n"
- "zip1 z21.s, z19.s, z18.s\n"
- "ld1w { z17.s }, p3/Z, [x25, #4, MUL VL]\n"
- "ld1w { z16.s }, p3/Z, [x25, #5, MUL VL]\n"
- "st1w { z28.s }, p3, [x22]\n"
- "addvl x25, x25, #6\n"
- "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
- "zip2 z20.s, z19.s, z18.s\n"
- "zip1 z19.s, z25.s, z17.s\n"
- "st1w { z27.s }, p3, [x22, #2, MUL VL]\n"
- "zip2 z18.s, z25.s, z17.s\n"
- "zip1 z17.s, z22.s, z16.s\n"
- "st1w { z26.s }, p3, [x22, #3, MUL VL]\n"
- "zip2 z16.s, z22.s, z16.s\n"
- "st1w { z24.s }, p3, [x22, #4, MUL VL]\n"
- "st1w { z23.s }, p3, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z21.s }, p3, [x22]\n"
- "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z19.s }, p3, [x22, #2, MUL VL]\n"
- "st1w { z18.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z17.s }, p3, [x22, #4, MUL VL]\n"
- "st1w { z16.s }, p3, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "addvl x24, x24, #6\n"
+ "st1w { z19.s }, p2, [x25]\n"
+ "zip1 z21.s, z28.s, z20.s\n"
+ "zip2 z20.s, z28.s, z20.s\n"
+ "st1w { z18.s }, p2, [x25, #1, MUL VL]\n"
+ "zip1 z19.s, z27.s, z17.s\n"
+ "zip2 z18.s, z27.s, z17.s\n"
+ "st1w { z16.s }, p2, [x25, #2, MUL VL]\n"
+ "zip1 z17.s, z26.s, z25.s\n"
+ "zip2 z16.s, z26.s, z25.s\n"
+ "st1w { z24.s }, p2, [x25, #3, MUL VL]\n"
+ "st1w { z23.s }, p2, [x25, #4, MUL VL]\n"
+ "st1w { z22.s }, p2, [x25, #5, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
+ "st1w { z21.s }, p2, [x25]\n"
+ "st1w { z20.s }, p2, [x25, #1, MUL VL]\n"
+ "st1w { z19.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x25, #4, MUL VL]\n"
+ "st1w { z16.s }, p2, [x25, #5, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
+ "decd x21, ALL, MUL #6\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z20.s }, p0/Z, [x28]\n"
- "ld1w { z19.s }, p0/Z, [x25]\n"
"decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z18.s }, p0/Z, [x28, #1, MUL VL]\n"
- "ld1w { z17.s }, p0/Z, [x25, #1, MUL VL]\n"
+ "whilelt p1.s, XZR, x20\n"
"decw x20\n"
+ "ld1w { z17.s }, p0/Z, [x28]\n"
+ "ld1w { z16.s }, p0/Z, [x24]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z22.s }, p0/Z, [x28, #2, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x25, #2, MUL VL]\n"
- "decd x21, ALL, MUL #6\n"
"cmp x21, #0x0\n"
- "zip1 z21.s, z20.s, z19.s\n"
- "zip2 z20.s, z20.s, z19.s\n"
+ "ld1w { z22.s }, p1/Z, [x28, #1, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip1 z21.s, z17.s, z16.s\n"
+ "zip2 z17.s, z17.s, z16.s\n"
+ "ld1w { z20.s }, p0/Z, [x28, #2, MUL VL]\n"
"addvl x28, x28, #3\n"
- "addvl x25, x25, #3\n"
- "zip1 z19.s, z18.s, z17.s\n"
- "zip2 z18.s, z18.s, z17.s\n"
- "zip1 z17.s, z22.s, z16.s\n"
- "zip2 z16.s, z22.s, z16.s\n"
- "st1w { z21.s }, p3, [x22]\n"
- "st1w { z20.s }, p3, [x22, #1, MUL VL]\n"
- "st1w { z19.s }, p3, [x22, #2, MUL VL]\n"
- "st1w { z18.s }, p3, [x22, #3, MUL VL]\n"
- "st1w { z17.s }, p3, [x22, #4, MUL VL]\n"
- "st1w { z16.s }, p3, [x22, #5, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1w { z16.s }, p0/Z, [x24, #2, MUL VL]\n"
+ "addvl x24, x24, #3\n"
+ "zip1 z19.s, z22.s, z18.s\n"
+ "zip2 z18.s, z22.s, z18.s\n"
+ "st1w { z21.s }, p2, [x25]\n"
+ "st1w { z17.s }, p2, [x25, #1, MUL VL]\n"
+ "zip1 z17.s, z20.s, z16.s\n"
+ "zip2 z16.s, z20.s, z16.s\n"
+ "st1w { z19.s }, p2, [x25, #2, MUL VL]\n"
+ "st1w { z18.s }, p2, [x25, #3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x25, #4, MUL VL]\n"
+ "st1w { z16.s }, p2, [x25, #5, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -297,7 +297,7 @@ void sve_transpose_interleave_6VL_4x2(uint32_t *out, const uint32_t *in, size_t
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp
index 0526bd0596..ae228d3916 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -34,152 +34,152 @@ void sve_transpose_interleave_8VL(uint32_t *out, const uint32_t *in, size_t widt
__asm__ __volatile__(
"cmp %x[height], #0x2\n"
- "ptrue p1.b\n"
+ "ptrue p2.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
"mov x25, %x[width]\n"
"cntw x24, ALL, MUL #16\n"
- "add x23, x26, %x[in_stride]\n"
- "cmp x25, x24\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
+ "mov x23, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "add x22, x26, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
+ "cmp x25, x24\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z15.s }, p1/Z, [x26]\n"
- "ld1w { z14.s }, p1/Z, [x26, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z13.s }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1w { z12.s }, p1/Z, [x26, #3, MUL VL]\n"
- "mov x20, x22\n"
+ "ld1w { z15.s }, p2/Z, [x26]\n"
+ "ld1w { z14.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "mov x21, x23\n"
+ "add x23, x23, %x[out_stride]\n"
+ "ld1w { z13.s }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z12.s }, p2/Z, [x26, #3, MUL VL]\n"
+ "mov x20, x23\n"
"sub x25, x25, x24\n"
- "ld1w { z11.s }, p1/Z, [x26, #4, MUL VL]\n"
- "ld1w { z10.s }, p1/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z11.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z10.s }, p2/Z, [x26, #5, MUL VL]\n"
"cmp x25, x24\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1w { z9.s }, p1/Z, [x26, #6, MUL VL]\n"
- "ld1w { z8.s }, p1/Z, [x26, #7, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "ld1w { z9.s }, p2/Z, [x26, #6, MUL VL]\n"
+ "ld1w { z8.s }, p2/Z, [x26, #7, MUL VL]\n"
"addvl x26, x26, #16\n"
- "ld1w { z7.s }, p1/Z, [x23]\n"
- "ld1w { z6.s }, p1/Z, [x23, #1, MUL VL]\n"
- "ld1w { z5.s }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1w { z4.s }, p1/Z, [x23, #3, MUL VL]\n"
- "ld1w { z3.s }, p1/Z, [x23, #4, MUL VL]\n"
- "ld1w { z2.s }, p1/Z, [x23, #5, MUL VL]\n"
- "ld1w { z1.s }, p1/Z, [x23, #6, MUL VL]\n"
- "ld1w { z0.s }, p1/Z, [x23, #7, MUL VL]\n"
- "addvl x23, x23, #16\n"
- "ld1w { z31.s }, p1/Z, [x26, #-8, MUL VL]\n"
- "ld1w { z30.s }, p1/Z, [x26, #-7, MUL VL]\n"
- "ld1w { z29.s }, p1/Z, [x26, #-6, MUL VL]\n"
- "ld1w { z28.s }, p1/Z, [x26, #-5, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x26, #-4, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x26, #-3, MUL VL]\n"
- "ld1w { z25.s }, p1/Z, [x26, #-2, MUL VL]\n"
- "ld1w { z24.s }, p1/Z, [x26, #-1, MUL VL]\n"
- "ld1w { z23.s }, p1/Z, [x23, #-8, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x23, #-7, MUL VL]\n"
- "ld1w { z21.s }, p1/Z, [x23, #-6, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x23, #-5, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x23, #-4, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x23, #-3, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x23, #-2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x23, #-1, MUL VL]\n"
- "st1w { z15.s }, p1, [x21]\n"
- "st1w { z14.s }, p1, [x21, #1, MUL VL]\n"
- "st1w { z13.s }, p1, [x21, #2, MUL VL]\n"
- "st1w { z12.s }, p1, [x21, #3, MUL VL]\n"
- "st1w { z11.s }, p1, [x21, #4, MUL VL]\n"
- "st1w { z10.s }, p1, [x21, #5, MUL VL]\n"
- "st1w { z9.s }, p1, [x21, #6, MUL VL]\n"
- "st1w { z8.s }, p1, [x21, #7, MUL VL]\n"
+ "ld1w { z7.s }, p2/Z, [x22]\n"
+ "ld1w { z6.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z5.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z4.s }, p2/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z2.s }, p2/Z, [x22, #5, MUL VL]\n"
+ "ld1w { z1.s }, p2/Z, [x22, #6, MUL VL]\n"
+ "ld1w { z0.s }, p2/Z, [x22, #7, MUL VL]\n"
+ "addvl x22, x22, #16\n"
+ "ld1w { z31.s }, p2/Z, [x26, #-8, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x26, #-7, MUL VL]\n"
+ "ld1w { z29.s }, p2/Z, [x26, #-6, MUL VL]\n"
+ "ld1w { z28.s }, p2/Z, [x26, #-5, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x26, #-4, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x26, #-3, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x26, #-2, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x26, #-1, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [x22, #-8, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x22, #-7, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #-6, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x22, #-5, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x22, #-4, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #-3, MUL VL]\n"
+ "ld1w { z17.s }, p2/Z, [x22, #-2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x22, #-1, MUL VL]\n"
+ "st1w { z15.s }, p2, [x21]\n"
+ "st1w { z14.s }, p2, [x21, #1, MUL VL]\n"
+ "st1w { z13.s }, p2, [x21, #2, MUL VL]\n"
+ "st1w { z12.s }, p2, [x21, #3, MUL VL]\n"
+ "st1w { z11.s }, p2, [x21, #4, MUL VL]\n"
+ "st1w { z10.s }, p2, [x21, #5, MUL VL]\n"
+ "st1w { z9.s }, p2, [x21, #6, MUL VL]\n"
+ "st1w { z8.s }, p2, [x21, #7, MUL VL]\n"
"addvl x21, x21, #16\n"
- "st1w { z7.s }, p1, [x21, #-8, MUL VL]\n"
- "st1w { z6.s }, p1, [x21, #-7, MUL VL]\n"
- "st1w { z5.s }, p1, [x21, #-6, MUL VL]\n"
- "st1w { z4.s }, p1, [x21, #-5, MUL VL]\n"
- "st1w { z3.s }, p1, [x21, #-4, MUL VL]\n"
- "st1w { z2.s }, p1, [x21, #-3, MUL VL]\n"
- "st1w { z1.s }, p1, [x21, #-2, MUL VL]\n"
- "st1w { z0.s }, p1, [x21, #-1, MUL VL]\n"
- "st1w { z31.s }, p1, [x20]\n"
- "st1w { z30.s }, p1, [x20, #1, MUL VL]\n"
- "st1w { z29.s }, p1, [x20, #2, MUL VL]\n"
- "st1w { z28.s }, p1, [x20, #3, MUL VL]\n"
- "st1w { z27.s }, p1, [x20, #4, MUL VL]\n"
- "st1w { z26.s }, p1, [x20, #5, MUL VL]\n"
- "st1w { z25.s }, p1, [x20, #6, MUL VL]\n"
- "st1w { z24.s }, p1, [x20, #7, MUL VL]\n"
+ "st1w { z7.s }, p2, [x21, #-8, MUL VL]\n"
+ "st1w { z6.s }, p2, [x21, #-7, MUL VL]\n"
+ "st1w { z5.s }, p2, [x21, #-6, MUL VL]\n"
+ "st1w { z4.s }, p2, [x21, #-5, MUL VL]\n"
+ "st1w { z3.s }, p2, [x21, #-4, MUL VL]\n"
+ "st1w { z2.s }, p2, [x21, #-3, MUL VL]\n"
+ "st1w { z1.s }, p2, [x21, #-2, MUL VL]\n"
+ "st1w { z0.s }, p2, [x21, #-1, MUL VL]\n"
+ "st1w { z31.s }, p2, [x20]\n"
+ "st1w { z30.s }, p2, [x20, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z28.s }, p2, [x20, #3, MUL VL]\n"
+ "st1w { z27.s }, p2, [x20, #4, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #5, MUL VL]\n"
+ "st1w { z25.s }, p2, [x20, #6, MUL VL]\n"
+ "st1w { z24.s }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1w { z23.s }, p1, [x20, #-8, MUL VL]\n"
- "st1w { z22.s }, p1, [x20, #-7, MUL VL]\n"
- "st1w { z21.s }, p1, [x20, #-6, MUL VL]\n"
- "st1w { z20.s }, p1, [x20, #-5, MUL VL]\n"
- "st1w { z19.s }, p1, [x20, #-4, MUL VL]\n"
- "st1w { z18.s }, p1, [x20, #-3, MUL VL]\n"
- "st1w { z17.s }, p1, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p1, [x20, #-1, MUL VL]\n"
+ "st1w { z23.s }, p2, [x20, #-8, MUL VL]\n"
+ "st1w { z22.s }, p2, [x20, #-7, MUL VL]\n"
+ "st1w { z21.s }, p2, [x20, #-6, MUL VL]\n"
+ "st1w { z20.s }, p2, [x20, #-5, MUL VL]\n"
+ "st1w { z19.s }, p2, [x20, #-4, MUL VL]\n"
+ "st1w { z18.s }, p2, [x20, #-3, MUL VL]\n"
+ "st1w { z17.s }, p2, [x20, #-2, MUL VL]\n"
+ "st1w { z16.s }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x25\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z31.s }, p0/Z, [x26]\n"
- "ld1w { z30.s }, p0/Z, [x23]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
+ "mov x21, x25\n"
+ "mov x20, x23\n"
+ "decw x25, ALL, MUL #8\n"
+ "add x23, x23, %x[out_stride]\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
+ "whilelt p0.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z31.s }, p1/Z, [x26]\n"
+ "ld1w { z30.s }, p1/Z, [x22]\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
"ld1w { z29.s }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1w { z28.s }, p0/Z, [x23, #1, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z27.s }, p0/Z, [x26, #2, MUL VL]\n"
- "ld1w { z26.s }, p0/Z, [x23, #2, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
+ "ld1w { z28.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z27.s }, p1/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z26.s }, p1/Z, [x22, #2, MUL VL]\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
"ld1w { z25.s }, p0/Z, [x26, #3, MUL VL]\n"
- "ld1w { z24.s }, p0/Z, [x23, #3, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z23.s }, p0/Z, [x26, #4, MUL VL]\n"
- "ld1w { z22.s }, p0/Z, [x23, #4, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
+ "ld1w { z24.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "whilelt p0.s, XZR, x21\n"
+ "decw x21\n"
+ "ld1w { z23.s }, p1/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z22.s }, p1/Z, [x22, #4, MUL VL]\n"
+ "whilelt p1.s, XZR, x21\n"
+ "decw x21\n"
"ld1w { z21.s }, p0/Z, [x26, #5, MUL VL]\n"
- "ld1w { z20.s }, p0/Z, [x23, #5, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z19.s }, p0/Z, [x26, #6, MUL VL]\n"
- "ld1w { z18.s }, p0/Z, [x23, #6, MUL VL]\n"
- "decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z17.s }, p0/Z, [x26, #7, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x23, #7, MUL VL]\n"
- "mov x20, x22\n"
- "decw x25, ALL, MUL #8\n"
- "st1w { z31.s }, p1, [x20]\n"
- "st1w { z29.s }, p1, [x20, #1, MUL VL]\n"
+ "ld1w { z20.s }, p0/Z, [x22, #5, MUL VL]\n"
+ "whilelt p0.s, XZR, x21\n"
"cmp x25, #0x0\n"
+ "ld1w { z19.s }, p1/Z, [x26, #6, MUL VL]\n"
+ "ld1w { z18.s }, p1/Z, [x22, #6, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x26, #7, MUL VL]\n"
"addvl x26, x26, #8\n"
- "st1w { z27.s }, p1, [x20, #2, MUL VL]\n"
- "addvl x23, x23, #8\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z25.s }, p1, [x20, #3, MUL VL]\n"
- "st1w { z23.s }, p1, [x20, #4, MUL VL]\n"
- "st1w { z21.s }, p1, [x20, #5, MUL VL]\n"
- "st1w { z19.s }, p1, [x20, #6, MUL VL]\n"
- "st1w { z17.s }, p1, [x20, #7, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x22, #7, MUL VL]\n"
+ "st1w { z31.s }, p2, [x20]\n"
+ "st1w { z29.s }, p2, [x20, #1, MUL VL]\n"
+ "addvl x22, x22, #8\n"
+ "st1w { z27.s }, p2, [x20, #2, MUL VL]\n"
+ "st1w { z25.s }, p2, [x20, #3, MUL VL]\n"
+ "st1w { z23.s }, p2, [x20, #4, MUL VL]\n"
+ "st1w { z21.s }, p2, [x20, #5, MUL VL]\n"
+ "st1w { z19.s }, p2, [x20, #6, MUL VL]\n"
+ "st1w { z17.s }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1w { z30.s }, p1, [x20, #-8, MUL VL]\n"
- "st1w { z28.s }, p1, [x20, #-7, MUL VL]\n"
- "st1w { z26.s }, p1, [x20, #-6, MUL VL]\n"
- "st1w { z24.s }, p1, [x20, #-5, MUL VL]\n"
- "st1w { z22.s }, p1, [x20, #-4, MUL VL]\n"
- "st1w { z20.s }, p1, [x20, #-3, MUL VL]\n"
- "st1w { z18.s }, p1, [x20, #-2, MUL VL]\n"
- "st1w { z16.s }, p1, [x20, #-1, MUL VL]\n"
+ "st1w { z30.s }, p2, [x20, #-8, MUL VL]\n"
+ "st1w { z28.s }, p2, [x20, #-7, MUL VL]\n"
+ "st1w { z26.s }, p2, [x20, #-6, MUL VL]\n"
+ "st1w { z24.s }, p2, [x20, #-5, MUL VL]\n"
+ "st1w { z22.s }, p2, [x20, #-4, MUL VL]\n"
+ "st1w { z20.s }, p2, [x20, #-3, MUL VL]\n"
+ "st1w { z18.s }, p2, [x20, #-2, MUL VL]\n"
+ "st1w { z16.s }, p2, [x20, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x2\n"
@@ -191,89 +191,89 @@ void sve_transpose_interleave_8VL(uint32_t *out, const uint32_t *in, size_t widt
"mov x21, %x[width]\n"
"cntw x20, ALL, MUL #16\n"
"mov x26, %x[in]\n"
+ "mov x23, %x[out]\n"
+ "sub %x[height], %x[height], #0x1\n"
"cmp x21, x20\n"
"add %x[in], x26, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x1\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1w { z31.s }, p1/Z, [x26]\n"
- "ld1w { z30.s }, p1/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z31.s }, p2/Z, [x26]\n"
+ "ld1w { z30.s }, p2/Z, [x26, #1, MUL VL]\n"
"sub x21, x21, x20\n"
+ "ld1w { z29.s }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z28.s }, p2/Z, [x26, #3, MUL VL]\n"
"cmp x21, x20\n"
- "ld1w { z29.s }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1w { z28.s }, p1/Z, [x26, #3, MUL VL]\n"
- "ld1w { z27.s }, p1/Z, [x26, #4, MUL VL]\n"
- "ld1w { z26.s }, p1/Z, [x26, #5, MUL VL]\n"
- "ld1w { z25.s }, p1/Z, [x26, #6, MUL VL]\n"
- "ld1w { z24.s }, p1/Z, [x26, #7, MUL VL]\n"
+ "ld1w { z27.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z26.s }, p2/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z25.s }, p2/Z, [x26, #6, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x26, #7, MUL VL]\n"
"addvl x26, x26, #16\n"
- "ld1w { z23.s }, p1/Z, [x26, #-8, MUL VL]\n"
- "ld1w { z22.s }, p1/Z, [x26, #-7, MUL VL]\n"
- "ld1w { z21.s }, p1/Z, [x26, #-6, MUL VL]\n"
- "ld1w { z20.s }, p1/Z, [x26, #-5, MUL VL]\n"
- "ld1w { z19.s }, p1/Z, [x26, #-4, MUL VL]\n"
- "ld1w { z18.s }, p1/Z, [x26, #-3, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x26, #-2, MUL VL]\n"
- "ld1w { z16.s }, p1/Z, [x26, #-1, MUL VL]\n"
- "st1w { z31.s }, p1, [x22]\n"
- "st1w { z30.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z29.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z28.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z27.s }, p1, [x22, #4, MUL VL]\n"
- "st1w { z26.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z25.s }, p1, [x22, #6, MUL VL]\n"
- "st1w { z24.s }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1w { z23.s }, p1, [x22]\n"
- "st1w { z22.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z21.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z20.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #6, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1w { z23.s }, p2/Z, [x26, #-8, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x26, #-7, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x26, #-6, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x26, #-5, MUL VL]\n"
+ "ld1w { z19.s }, p2/Z, [x26, #-4, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x26, #-3, MUL VL]\n"
+ "ld1w { z17.s }, p2/Z, [x26, #-2, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x26, #-1, MUL VL]\n"
+ "st1w { z31.s }, p2, [x23]\n"
+ "st1w { z30.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z29.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z28.s }, p2, [x23, #3, MUL VL]\n"
+ "st1w { z27.s }, p2, [x23, #4, MUL VL]\n"
+ "st1w { z26.s }, p2, [x23, #5, MUL VL]\n"
+ "st1w { z25.s }, p2, [x23, #6, MUL VL]\n"
+ "st1w { z24.s }, p2, [x23, #7, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
+ "st1w { z23.s }, p2, [x23]\n"
+ "st1w { z22.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z20.s }, p2, [x23, #3, MUL VL]\n"
+ "st1w { z19.s }, p2, [x23, #4, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #5, MUL VL]\n"
+ "st1w { z17.s }, p2, [x23, #6, MUL VL]\n"
+ "st1w { z16.s }, p2, [x23, #7, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z23.s }, p0/Z, [x26]\n"
+ "decw x21, ALL, MUL #8\n"
+ "whilelt p1.s, XZR, x20\n"
"decw x20\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z22.s }, p0/Z, [x26, #1, MUL VL]\n"
"decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z21.s }, p0/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z23.s }, p1/Z, [x26]\n"
+ "whilelt p1.s, XZR, x20\n"
"decw x20\n"
+ "ld1w { z22.s }, p0/Z, [x26, #1, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z20.s }, p0/Z, [x26, #3, MUL VL]\n"
"decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z19.s }, p0/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z21.s }, p1/Z, [x26, #2, MUL VL]\n"
+ "whilelt p1.s, XZR, x20\n"
"decw x20\n"
+ "ld1w { z20.s }, p0/Z, [x26, #3, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z18.s }, p0/Z, [x26, #5, MUL VL]\n"
"decw x20\n"
- "whilelt p0.s, XZR, x20\n"
- "ld1w { z17.s }, p0/Z, [x26, #6, MUL VL]\n"
+ "ld1w { z19.s }, p1/Z, [x26, #4, MUL VL]\n"
+ "whilelt p1.s, XZR, x20\n"
"decw x20\n"
- "decw x21, ALL, MUL #8\n"
+ "ld1w { z18.s }, p0/Z, [x26, #5, MUL VL]\n"
"whilelt p0.s, XZR, x20\n"
"cmp x21, #0x0\n"
+ "ld1w { z17.s }, p1/Z, [x26, #6, MUL VL]\n"
"ld1w { z16.s }, p0/Z, [x26, #7, MUL VL]\n"
- "st1w { z23.s }, p1, [x22]\n"
"addvl x26, x26, #8\n"
- "st1w { z22.s }, p1, [x22, #1, MUL VL]\n"
- "st1w { z21.s }, p1, [x22, #2, MUL VL]\n"
- "st1w { z20.s }, p1, [x22, #3, MUL VL]\n"
- "st1w { z19.s }, p1, [x22, #4, MUL VL]\n"
- "st1w { z18.s }, p1, [x22, #5, MUL VL]\n"
- "st1w { z17.s }, p1, [x22, #6, MUL VL]\n"
- "st1w { z16.s }, p1, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1w { z23.s }, p2, [x23]\n"
+ "st1w { z22.s }, p2, [x23, #1, MUL VL]\n"
+ "st1w { z21.s }, p2, [x23, #2, MUL VL]\n"
+ "st1w { z20.s }, p2, [x23, #3, MUL VL]\n"
+ "st1w { z19.s }, p2, [x23, #4, MUL VL]\n"
+ "st1w { z18.s }, p2, [x23, #5, MUL VL]\n"
+ "st1w { z17.s }, p2, [x23, #6, MUL VL]\n"
+ "st1w { z16.s }, p2, [x23, #7, MUL VL]\n"
+ "add x23, x23, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -282,7 +282,7 @@ void sve_transpose_interleave_8VL(uint32_t *out, const uint32_t *in, size_t widt
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp
index 98f0770d77..03d192c874 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,207 +42,207 @@ void sve_transpose_interleave_8VL_1x4(uint8_t *out, const uint8_t *in, size_t wi
"ptrue p2.b\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
+ "mov x25, %x[width]\n"
"cntb x20, ALL, MUL #8\n"
- "add x22, x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "mov x24, %x[out]\n"
+ "add x23, x26, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x22, x22, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x23, x20\n"
- "mov x21, %x[out]\n"
+ "csel x23, x23, %x[pad_row], GT\n"
+ "cmp x25, x20\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z7.b }, p2/Z, [x26]\n"
- "ld1b { z24.b }, p2/Z, [x26, #1, MUL VL]\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1b { z31.b }, p2/Z, [x25]\n"
- "ld1b { z18.b }, p2/Z, [x25, #1, MUL VL]\n"
- "ld1b { z19.b }, p2/Z, [x24]\n"
- "ld1b { z25.b }, p2/Z, [x24, #1, MUL VL]\n"
- "zip1 z23.b, z7.b, z19.b\n"
- "zip2 z20.b, z7.b, z19.b\n"
- "ld1b { z30.b }, p2/Z, [x22]\n"
- "ld1b { z3.b }, p2/Z, [x22, #1, MUL VL]\n"
- "zip1 z21.b, z31.b, z30.b\n"
- "zip2 z19.b, z31.b, z30.b\n"
- "ld1b { z16.b }, p2/Z, [x26, #2, MUL VL]\n"
- "ld1b { z30.b }, p2/Z, [x26, #3, MUL VL]\n"
- "zip1 z2.b, z24.b, z25.b\n"
- "zip1 z17.b, z18.b, z3.b\n"
- "ld1b { z29.b }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1b { z8.b }, p2/Z, [x25, #3, MUL VL]\n"
- "zip2 z22.b, z24.b, z25.b\n"
- "zip2 z4.b, z18.b, z3.b\n"
- "ld1b { z0.b }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1b { z3.b }, p2/Z, [x24, #3, MUL VL]\n"
- "zip1 z9.b, z16.b, z0.b\n"
- "zip2 z14.b, z16.b, z0.b\n"
- "ld1b { z18.b }, p2/Z, [x22, #2, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x22, #3, MUL VL]\n"
- "zip1 z24.b, z29.b, z18.b\n"
- "zip2 z11.b, z29.b, z18.b\n"
- "ld1b { z1.b }, p2/Z, [x26, #4, MUL VL]\n"
- "ld1b { z12.b }, p2/Z, [x26, #5, MUL VL]\n"
- "zip1 z13.b, z30.b, z3.b\n"
- "zip1 z15.b, z8.b, z16.b\n"
- "ld1b { z5.b }, p2/Z, [x25, #4, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x25, #5, MUL VL]\n"
- "zip2 z31.b, z30.b, z3.b\n"
- "zip2 z30.b, z8.b, z16.b\n"
- "ld1b { z16.b }, p2/Z, [x24, #4, MUL VL]\n"
- "ld1b { z18.b }, p2/Z, [x24, #5, MUL VL]\n"
- "zip1 z27.b, z1.b, z16.b\n"
- "zip2 z10.b, z1.b, z16.b\n"
- "ld1b { z7.b }, p2/Z, [x22, #4, MUL VL]\n"
- "ld1b { z16.b }, p2/Z, [x22, #5, MUL VL]\n"
- "zip1 z8.b, z5.b, z7.b\n"
- "zip2 z26.b, z5.b, z7.b\n"
- "ld1b { z3.b }, p2/Z, [x26, #6, MUL VL]\n"
- "ld1b { z25.b }, p2/Z, [x26, #7, MUL VL]\n"
- "zip1 z6.b, z12.b, z18.b\n"
- "zip1 z5.b, z29.b, z16.b\n"
- "ld1b { z0.b }, p2/Z, [x25, #6, MUL VL]\n"
- "ld1b { z28.b }, p2/Z, [x25, #7, MUL VL]\n"
- "zip2 z12.b, z12.b, z18.b\n"
- "zip2 z7.b, z29.b, z16.b\n"
- "ld1b { z1.b }, p2/Z, [x24, #6, MUL VL]\n"
- "ld1b { z29.b }, p2/Z, [x24, #7, MUL VL]\n"
- "zip1 z16.b, z23.b, z21.b\n"
- "zip2 z18.b, z23.b, z21.b\n"
- "ld1b { z23.b }, p2/Z, [x22, #6, MUL VL]\n"
- "ld1b { z21.b }, p2/Z, [x22, #7, MUL VL]\n"
- "st1b { z16.b }, p2, [x21]\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "zip1 z19.b, z2.b, z17.b\n"
- "st1b { z18.b }, p2, [x21, #1, MUL VL]\n"
+ "ld1b { z22.b }, p2/Z, [x26]\n"
+ "ld1b { z14.b }, p2/Z, [x26, #1, MUL VL]\n"
+ "sub x25, x25, x20\n"
+ "ld1b { z2.b }, p2/Z, [x23]\n"
+ "ld1b { z10.b }, p2/Z, [x23, #1, MUL VL]\n"
+ "cmp x25, x20\n"
+ "ld1b { z27.b }, p2/Z, [x22]\n"
+ "ld1b { z21.b }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z13.b }, p2/Z, [x21]\n"
+ "ld1b { z28.b }, p2/Z, [x21, #1, MUL VL]\n"
+ "ld1b { z11.b }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1b { z9.b }, p2/Z, [x26, #3, MUL VL]\n"
+ "ld1b { z23.b }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x23, #3, MUL VL]\n"
+ "zip1 z24.b, z22.b, z27.b\n"
+ "zip2 z19.b, z22.b, z27.b\n"
+ "ld1b { z5.b }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1b { z3.b }, p2/Z, [x22, #3, MUL VL]\n"
+ "zip1 z6.b, z2.b, z13.b\n"
+ "zip2 z20.b, z2.b, z13.b\n"
+ "ld1b { z25.b }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1b { z30.b }, p2/Z, [x21, #3, MUL VL]\n"
+ "zip1 z18.b, z14.b, z21.b\n"
+ "zip1 z16.b, z10.b, z28.b\n"
+ "ld1b { z26.b }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1b { z22.b }, p2/Z, [x26, #5, MUL VL]\n"
+ "zip2 z17.b, z14.b, z21.b\n"
+ "zip2 z10.b, z10.b, z28.b\n"
+ "ld1b { z27.b }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1b { z7.b }, p2/Z, [x23, #5, MUL VL]\n"
+ "zip1 z21.b, z11.b, z5.b\n"
+ "zip2 z12.b, z11.b, z5.b\n"
+ "ld1b { z2.b }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1b { z5.b }, p2/Z, [x22, #5, MUL VL]\n"
+ "zip1 z13.b, z23.b, z25.b\n"
+ "zip2 z31.b, z23.b, z25.b\n"
+ "ld1b { z25.b }, p2/Z, [x21, #4, MUL VL]\n"
+ "ld1b { z8.b }, p2/Z, [x21, #5, MUL VL]\n"
+ "zip1 z15.b, z9.b, z3.b\n"
+ "zip1 z11.b, z29.b, z30.b\n"
+ "ld1b { z14.b }, p2/Z, [x26, #6, MUL VL]\n"
+ "ld1b { z1.b }, p2/Z, [x26, #7, MUL VL]\n"
+ "zip2 z23.b, z9.b, z3.b\n"
+ "zip2 z0.b, z29.b, z30.b\n"
+ "ld1b { z3.b }, p2/Z, [x23, #6, MUL VL]\n"
+ "ld1b { z29.b }, p2/Z, [x23, #7, MUL VL]\n"
+ "zip1 z9.b, z26.b, z2.b\n"
+ "zip2 z26.b, z26.b, z2.b\n"
+ "ld1b { z2.b }, p2/Z, [x22, #6, MUL VL]\n"
+ "ld1b { z4.b }, p2/Z, [x22, #7, MUL VL]\n"
+ "zip1 z30.b, z27.b, z25.b\n"
+ "zip2 z28.b, z27.b, z25.b\n"
+ "ld1b { z27.b }, p2/Z, [x21, #6, MUL VL]\n"
+ "zip1 z25.b, z22.b, z5.b\n"
+ "zip2 z5.b, z22.b, z5.b\n"
"addvl x26, x26, #8\n"
- "zip2 z18.b, z2.b, z17.b\n"
- "zip1 z17.b, z22.b, z4.b\n"
- "st1b { z16.b }, p2, [x21, #2, MUL VL]\n"
- "addvl x25, x25, #8\n"
- "zip2 z16.b, z22.b, z4.b\n"
- "st1b { z20.b }, p2, [x21, #3, MUL VL]\n"
- "zip1 z4.b, z3.b, z1.b\n"
- "addvl x24, x24, #8\n"
- "st1b { z19.b }, p2, [x21, #4, MUL VL]\n"
- "zip1 z22.b, z0.b, z23.b\n"
- "zip2 z3.b, z3.b, z1.b\n"
+ "zip1 z22.b, z7.b, z8.b\n"
+ "zip2 z7.b, z7.b, z8.b\n"
+ "addvl x23, x23, #8\n"
"addvl x22, x22, #8\n"
- "st1b { z18.b }, p2, [x21, #5, MUL VL]\n"
- "zip2 z2.b, z0.b, z23.b\n"
- "zip1 z1.b, z25.b, z29.b\n"
- "st1b { z17.b }, p2, [x21, #6, MUL VL]\n"
- "zip1 z0.b, z28.b, z21.b\n"
- "zip2 z29.b, z25.b, z29.b\n"
- "st1b { z16.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 z28.b, z28.b, z21.b\n"
- "zip1 z17.b, z9.b, z24.b\n"
- "zip2 z16.b, z9.b, z24.b\n"
- "zip1 z19.b, z14.b, z11.b\n"
- "st1b { z17.b }, p2, [x21]\n"
- "zip2 z18.b, z14.b, z11.b\n"
- "zip1 z17.b, z13.b, z15.b\n"
- "st1b { z16.b }, p2, [x21, #1, MUL VL]\n"
- "zip2 z16.b, z13.b, z15.b\n"
- "zip1 z21.b, z31.b, z30.b\n"
- "st1b { z19.b }, p2, [x21, #2, MUL VL]\n"
- "zip2 z20.b, z31.b, z30.b\n"
- "st1b { z18.b }, p2, [x21, #3, MUL VL]\n"
- "zip1 z19.b, z27.b, z8.b\n"
- "st1b { z17.b }, p2, [x21, #4, MUL VL]\n"
- "zip2 z18.b, z27.b, z8.b\n"
- "zip1 z17.b, z10.b, z26.b\n"
- "st1b { z16.b }, p2, [x21, #5, MUL VL]\n"
- "zip2 z16.b, z10.b, z26.b\n"
- "zip1 z27.b, z6.b, z5.b\n"
- "st1b { z21.b }, p2, [x21, #6, MUL VL]\n"
- "zip2 z26.b, z6.b, z5.b\n"
- "zip1 z25.b, z12.b, z7.b\n"
- "st1b { z20.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- "zip2 z24.b, z12.b, z7.b\n"
- "zip1 z23.b, z4.b, z22.b\n"
- "st1b { z19.b }, p2, [x21]\n"
- "zip2 z22.b, z4.b, z22.b\n"
- "zip1 z21.b, z3.b, z2.b\n"
- "st1b { z18.b }, p2, [x21, #1, MUL VL]\n"
- "zip2 z20.b, z3.b, z2.b\n"
- "zip1 z19.b, z1.b, z0.b\n"
- "st1b { z17.b }, p2, [x21, #2, MUL VL]\n"
- "zip2 z18.b, z1.b, z0.b\n"
- "zip1 z17.b, z29.b, z28.b\n"
- "st1b { z16.b }, p2, [x21, #3, MUL VL]\n"
- "zip2 z16.b, z29.b, z28.b\n"
- "st1b { z27.b }, p2, [x21, #4, MUL VL]\n"
- "st1b { z26.b }, p2, [x21, #5, MUL VL]\n"
- "st1b { z25.b }, p2, [x21, #6, MUL VL]\n"
- "st1b { z24.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- "st1b { z23.b }, p2, [x21]\n"
- "st1b { z22.b }, p2, [x21, #1, MUL VL]\n"
- "st1b { z21.b }, p2, [x21, #2, MUL VL]\n"
- "st1b { z20.b }, p2, [x21, #3, MUL VL]\n"
- "st1b { z19.b }, p2, [x21, #4, MUL VL]\n"
- "st1b { z18.b }, p2, [x21, #5, MUL VL]\n"
- "st1b { z17.b }, p2, [x21, #6, MUL VL]\n"
- "st1b { z16.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 z8.b, z24.b, z6.b\n"
+ "zip2 z6.b, z24.b, z6.b\n"
+ "ld1b { z24.b }, p2/Z, [x21, #7, MUL VL]\n"
+ "addvl x21, x21, #8\n"
+ "st1b { z8.b }, p2, [x24]\n"
+ "zip1 z8.b, z19.b, z20.b\n"
+ "zip2 z20.b, z19.b, z20.b\n"
+ "zip1 z19.b, z18.b, z16.b\n"
+ "zip2 z18.b, z18.b, z16.b\n"
+ "zip1 z16.b, z17.b, z10.b\n"
+ "zip2 z17.b, z17.b, z10.b\n"
+ "st1b { z6.b }, p2, [x24, #1, MUL VL]\n"
+ "st1b { z8.b }, p2, [x24, #2, MUL VL]\n"
+ "zip1 z8.b, z14.b, z2.b\n"
+ "zip1 z6.b, z3.b, z27.b\n"
+ "st1b { z20.b }, p2, [x24, #3, MUL VL]\n"
+ "zip2 z10.b, z14.b, z2.b\n"
+ "zip2 z14.b, z3.b, z27.b\n"
+ "st1b { z19.b }, p2, [x24, #4, MUL VL]\n"
+ "zip1 z2.b, z1.b, z4.b\n"
+ "zip1 z3.b, z29.b, z24.b\n"
+ "st1b { z18.b }, p2, [x24, #5, MUL VL]\n"
+ "zip2 z1.b, z1.b, z4.b\n"
+ "zip2 z4.b, z29.b, z24.b\n"
+ "st1b { z16.b }, p2, [x24, #6, MUL VL]\n"
+ "zip1 z24.b, z21.b, z13.b\n"
+ "zip2 z16.b, z21.b, z13.b\n"
+ "st1b { z17.b }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "zip1 z29.b, z12.b, z31.b\n"
+ "zip2 z21.b, z12.b, z31.b\n"
+ "zip1 z20.b, z15.b, z11.b\n"
+ "zip2 z19.b, z15.b, z11.b\n"
+ "zip1 z18.b, z23.b, z0.b\n"
+ "zip2 z17.b, z23.b, z0.b\n"
+ "st1b { z24.b }, p2, [x24]\n"
+ "st1b { z16.b }, p2, [x24, #1, MUL VL]\n"
+ "zip1 z16.b, z9.b, z30.b\n"
+ "zip2 z30.b, z9.b, z30.b\n"
+ "st1b { z29.b }, p2, [x24, #2, MUL VL]\n"
+ "zip1 z29.b, z26.b, z28.b\n"
+ "zip2 z28.b, z26.b, z28.b\n"
+ "st1b { z21.b }, p2, [x24, #3, MUL VL]\n"
+ "zip1 z27.b, z25.b, z22.b\n"
+ "zip2 z26.b, z25.b, z22.b\n"
+ "st1b { z20.b }, p2, [x24, #4, MUL VL]\n"
+ "zip1 z25.b, z5.b, z7.b\n"
+ "zip2 z24.b, z5.b, z7.b\n"
+ "st1b { z19.b }, p2, [x24, #5, MUL VL]\n"
+ "zip1 z23.b, z8.b, z6.b\n"
+ "zip2 z22.b, z8.b, z6.b\n"
+ "st1b { z18.b }, p2, [x24, #6, MUL VL]\n"
+ "zip1 z21.b, z10.b, z14.b\n"
+ "zip2 z20.b, z10.b, z14.b\n"
+ "st1b { z17.b }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "zip1 z19.b, z2.b, z3.b\n"
+ "zip2 z18.b, z2.b, z3.b\n"
+ "st1b { z16.b }, p2, [x24]\n"
+ "zip1 z17.b, z1.b, z4.b\n"
+ "zip2 z16.b, z1.b, z4.b\n"
+ "st1b { z30.b }, p2, [x24, #1, MUL VL]\n"
+ "st1b { z29.b }, p2, [x24, #2, MUL VL]\n"
+ "st1b { z28.b }, p2, [x24, #3, MUL VL]\n"
+ "st1b { z27.b }, p2, [x24, #4, MUL VL]\n"
+ "st1b { z26.b }, p2, [x24, #5, MUL VL]\n"
+ "st1b { z25.b }, p2, [x24, #6, MUL VL]\n"
+ "st1b { z24.b }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "st1b { z23.b }, p2, [x24]\n"
+ "st1b { z22.b }, p2, [x24, #1, MUL VL]\n"
+ "st1b { z21.b }, p2, [x24, #2, MUL VL]\n"
+ "st1b { z20.b }, p2, [x24, #3, MUL VL]\n"
+ "st1b { z19.b }, p2, [x24, #4, MUL VL]\n"
+ "st1b { z18.b }, p2, [x24, #5, MUL VL]\n"
+ "st1b { z17.b }, p2, [x24, #6, MUL VL]\n"
+ "st1b { z16.b }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
+ "mov x20, x25\n"
+ "decw x25, ALL, MUL #8\n"
"whilelt p1.b, XZR, x20\n"
- "ld1b { z23.b }, p1/Z, [x26]\n"
- "ld1b { z22.b }, p1/Z, [x25]\n"
"decb x20\n"
"whilelt p0.b, XZR, x20\n"
- "ld1b { z21.b }, p0/Z, [x26, #1, MUL VL]\n"
- "ld1b { z25.b }, p0/Z, [x25, #1, MUL VL]\n"
- "ld1b { z19.b }, p1/Z, [x24]\n"
- "ld1b { z20.b }, p0/Z, [x24, #1, MUL VL]\n"
- "decw x23, ALL, MUL #8\n"
- "zip1 z24.b, z23.b, z19.b\n"
+ "cmp x25, #0x0\n"
+ "ld1b { z20.b }, p1/Z, [x26]\n"
+ "ld1b { z19.b }, p1/Z, [x23]\n"
"ld1b { z18.b }, p1/Z, [x22]\n"
- "ld1b { z16.b }, p0/Z, [x22, #1, MUL VL]\n"
- "zip1 z17.b, z22.b, z18.b\n"
- "zip2 z23.b, z23.b, z19.b\n"
- "zip2 z19.b, z22.b, z18.b\n"
- "zip1 z22.b, z21.b, z20.b\n"
- "cmp x23, #0x0\n"
+ "ld1b { z17.b }, p1/Z, [x21]\n"
+ "ld1b { z24.b }, p0/Z, [x26, #1, MUL VL]\n"
"addvl x26, x26, #2\n"
- "zip1 z18.b, z25.b, z16.b\n"
- "zip2 z21.b, z21.b, z20.b\n"
- "addvl x25, x25, #2\n"
- "addvl x24, x24, #2\n"
- "zip2 z20.b, z25.b, z16.b\n"
+ "ld1b { z25.b }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x23, x23, #2\n"
+ "ld1b { z23.b }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1b { z16.b }, p0/Z, [x21, #1, MUL VL]\n"
"addvl x22, x22, #2\n"
- "zip1 z16.b, z24.b, z17.b\n"
- "st1b { z16.b }, p2, [x21]\n"
- "zip2 z16.b, z24.b, z17.b\n"
- "zip1 z17.b, z23.b, z19.b\n"
- "st1b { z16.b }, p2, [x21, #1, MUL VL]\n"
- "zip2 z16.b, z23.b, z19.b\n"
- "zip1 z19.b, z22.b, z18.b\n"
- "st1b { z17.b }, p2, [x21, #2, MUL VL]\n"
- "zip2 z18.b, z22.b, z18.b\n"
- "zip1 z17.b, z21.b, z20.b\n"
- "st1b { z16.b }, p2, [x21, #3, MUL VL]\n"
- "zip2 z16.b, z21.b, z20.b\n"
- "st1b { z19.b }, p2, [x21, #4, MUL VL]\n"
- "st1b { z18.b }, p2, [x21, #5, MUL VL]\n"
- "st1b { z17.b }, p2, [x21, #6, MUL VL]\n"
- "st1b { z16.b }, p2, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "addvl x21, x21, #2\n"
+ "zip1 z22.b, z20.b, z18.b\n"
+ "zip1 z21.b, z19.b, z17.b\n"
+ "zip2 z20.b, z20.b, z18.b\n"
+ "zip2 z19.b, z19.b, z17.b\n"
+ "zip1 z18.b, z24.b, z23.b\n"
+ "zip1 z17.b, z25.b, z16.b\n"
+ "zip2 z24.b, z24.b, z23.b\n"
+ "zip2 z16.b, z25.b, z16.b\n"
+ "zip1 z23.b, z22.b, z21.b\n"
+ "zip2 z22.b, z22.b, z21.b\n"
+ "zip1 z21.b, z20.b, z19.b\n"
+ "zip2 z20.b, z20.b, z19.b\n"
+ "zip1 z19.b, z18.b, z17.b\n"
+ "zip2 z18.b, z18.b, z17.b\n"
+ "zip1 z17.b, z24.b, z16.b\n"
+ "zip2 z16.b, z24.b, z16.b\n"
+ "st1b { z23.b }, p2, [x24]\n"
+ "st1b { z22.b }, p2, [x24, #1, MUL VL]\n"
+ "st1b { z21.b }, p2, [x24, #2, MUL VL]\n"
+ "st1b { z20.b }, p2, [x24, #3, MUL VL]\n"
+ "st1b { z19.b }, p2, [x24, #4, MUL VL]\n"
+ "st1b { z18.b }, p2, [x24, #5, MUL VL]\n"
+ "st1b { z17.b }, p2, [x24, #6, MUL VL]\n"
+ "st1b { z16.b }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp
index 3fa5292143..091c0e526b 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_1x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -42,180 +42,180 @@ void sve_transpose_interleave_8VL_1x8(uint8_t *out, const uint8_t *in, size_t wi
"ptrue p1.b\n"
"1:" // Main row loop: Head
"mov x10, %x[in]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "add x27, x28, %x[in_stride]\n"
- "add x26, x27, %x[in_stride]\n"
+ "cmp %x[height], #0x7\n"
+ "mov x9, %x[width]\n"
+ "cntb x28, ALL, MUL #2\n"
+ "mov x27, %x[out]\n"
+ "add x26, x10, %x[in_stride]\n"
"add x25, x26, %x[in_stride]\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp %x[height], #0x7\n"
- "add %x[in], x23, %x[in_stride]\n"
- "csel x23, x23, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add x20, x21, %x[in_stride]\n"
+ "add %x[in], x20, %x[in_stride]\n"
+ "csel x20, x20, %x[pad_row], GT\n"
+ "csel x21, x21, %x[pad_row], GE\n"
"cmp %x[height], #0x5\n"
- "mov x22, %x[width]\n"
- "cntb x21, ALL, MUL #2\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "csel x26, x26, %x[pad_row], GE\n"
+ "csel x22, x22, %x[pad_row], GT\n"
+ "csel x23, x23, %x[pad_row], GE\n"
"cmp %x[height], #0x3\n"
- "csel x27, x27, %x[pad_row], GT\n"
- "csel x28, x28, %x[pad_row], GE\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "csel x25, x25, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "cmp x22, x21\n"
- "mov x20, %x[out]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "cmp x9, x28\n"
"sub %x[height], %x[height], #0x8\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1b { z23.b }, p1/Z, [x10]\n"
- "ld1b { z22.b }, p1/Z, [x9]\n"
- "sub x22, x22, x21\n"
- "cmp x22, x21\n"
- "ld1b { z20.b }, p1/Z, [x28]\n"
- "ld1b { z21.b }, p1/Z, [x27]\n"
- "ld1b { z19.b }, p1/Z, [x26]\n"
- "ld1b { z18.b }, p1/Z, [x25]\n"
- "zip1 z5.b, z23.b, z19.b\n"
- "zip1 z4.b, z22.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24]\n"
- "ld1b { z16.b }, p1/Z, [x23]\n"
- "zip1 z3.b, z20.b, z17.b\n"
- "zip1 z31.b, z21.b, z16.b\n"
- "ld1b { z25.b }, p1/Z, [x10, #1, MUL VL]\n"
- "ld1b { z24.b }, p1/Z, [x9, #1, MUL VL]\n"
- "zip2 z2.b, z23.b, z19.b\n"
- "zip2 z30.b, z20.b, z17.b\n"
- "ld1b { z23.b }, p1/Z, [x28, #1, MUL VL]\n"
- "ld1b { z20.b }, p1/Z, [x27, #1, MUL VL]\n"
- "zip2 z22.b, z22.b, z18.b\n"
- "zip2 z21.b, z21.b, z16.b\n"
- "ld1b { z19.b }, p1/Z, [x26, #1, MUL VL]\n"
- "ld1b { z18.b }, p1/Z, [x25, #1, MUL VL]\n"
- "zip1 z29.b, z25.b, z19.b\n"
- "zip1 z28.b, z24.b, z18.b\n"
- "ld1b { z17.b }, p1/Z, [x24, #1, MUL VL]\n"
- "ld1b { z16.b }, p1/Z, [x23, #1, MUL VL]\n"
- "zip1 z27.b, z23.b, z17.b\n"
- "zip1 z26.b, z20.b, z16.b\n"
- "zip2 z1.b, z25.b, z19.b\n"
- "zip2 z25.b, z23.b, z17.b\n"
+ "ld1b { z24.b }, p1/Z, [x10]\n"
+ "ld1b { z0.b }, p1/Z, [x26]\n"
+ "sub x9, x9, x28\n"
+ "ld1b { z31.b }, p1/Z, [x25]\n"
+ "ld1b { z30.b }, p1/Z, [x24]\n"
+ "cmp x9, x28\n"
+ "ld1b { z23.b }, p1/Z, [x23]\n"
+ "ld1b { z29.b }, p1/Z, [x22]\n"
+ "ld1b { z22.b }, p1/Z, [x21]\n"
+ "ld1b { z21.b }, p1/Z, [x20]\n"
+ "ld1b { z28.b }, p1/Z, [x10, #1, MUL VL]\n"
+ "ld1b { z4.b }, p1/Z, [x26, #1, MUL VL]\n"
"addvl x10, x10, #2\n"
- "addvl x9, x9, #2\n"
- "zip2 z24.b, z24.b, z18.b\n"
- "zip2 z16.b, z20.b, z16.b\n"
- "addvl x28, x28, #2\n"
- "addvl x27, x27, #2\n"
- "zip1 z0.b, z5.b, z3.b\n"
- "zip1 z17.b, z4.b, z31.b\n"
"addvl x26, x26, #2\n"
+ "ld1b { z27.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "ld1b { z20.b }, p1/Z, [x24, #1, MUL VL]\n"
+ "zip1 z3.b, z24.b, z23.b\n"
+ "zip1 z2.b, z0.b, z29.b\n"
+ "ld1b { z19.b }, p1/Z, [x23, #1, MUL VL]\n"
+ "ld1b { z18.b }, p1/Z, [x22, #1, MUL VL]\n"
+ "zip1 z26.b, z31.b, z22.b\n"
+ "zip1 z25.b, z30.b, z21.b\n"
+ "ld1b { z17.b }, p1/Z, [x21, #1, MUL VL]\n"
+ "ld1b { z16.b }, p1/Z, [x20, #1, MUL VL]\n"
+ "zip2 z24.b, z24.b, z23.b\n"
+ "zip2 z23.b, z31.b, z22.b\n"
+ "zip2 z22.b, z0.b, z29.b\n"
+ "zip2 z21.b, z30.b, z21.b\n"
"addvl x25, x25, #2\n"
- "zip2 z20.b, z5.b, z3.b\n"
- "zip2 z19.b, z4.b, z31.b\n"
"addvl x24, x24, #2\n"
+ "zip1 z0.b, z28.b, z19.b\n"
+ "zip1 z31.b, z4.b, z18.b\n"
"addvl x23, x23, #2\n"
- "zip1 z31.b, z2.b, z30.b\n"
- "zip1 z18.b, z22.b, z21.b\n"
- "zip2 z30.b, z2.b, z30.b\n"
+ "addvl x22, x22, #2\n"
+ "zip1 z30.b, z27.b, z17.b\n"
+ "zip1 z29.b, z20.b, z16.b\n"
+ "addvl x21, x21, #2\n"
+ "addvl x20, x20, #2\n"
+ "zip2 z1.b, z28.b, z19.b\n"
+ "zip2 z28.b, z27.b, z17.b\n"
+ "zip2 z27.b, z4.b, z18.b\n"
+ "zip2 z20.b, z20.b, z16.b\n"
+ "zip1 z19.b, z3.b, z26.b\n"
+ "zip1 z18.b, z2.b, z25.b\n"
+ "zip2 z17.b, z3.b, z26.b\n"
+ "zip2 z16.b, z2.b, z25.b\n"
+ "zip1 z26.b, z24.b, z23.b\n"
+ "zip1 z25.b, z22.b, z21.b\n"
+ "zip2 z24.b, z24.b, z23.b\n"
"zip2 z23.b, z22.b, z21.b\n"
- "zip1 z22.b, z29.b, z27.b\n"
- "zip1 z21.b, z28.b, z26.b\n"
- "zip2 z29.b, z29.b, z27.b\n"
- "zip2 z28.b, z28.b, z26.b\n"
- "zip1 z27.b, z1.b, z25.b\n"
- "zip1 z26.b, z24.b, z16.b\n"
- "zip2 z25.b, z1.b, z25.b\n"
- "zip2 z24.b, z24.b, z16.b\n"
- "zip1 z16.b, z0.b, z17.b\n"
- "zip2 z17.b, z0.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
- "zip1 z16.b, z20.b, z19.b\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "st1b { z17.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z19.b, z31.b, z18.b\n"
- "zip2 z18.b, z31.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #2, MUL VL]\n"
- "zip1 z17.b, z30.b, z23.b\n"
- "zip2 z16.b, z30.b, z23.b\n"
- "st1b { z20.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #4, MUL VL]\n"
+ "zip1 z22.b, z0.b, z30.b\n"
+ "zip1 z21.b, z31.b, z29.b\n"
+ "zip2 z0.b, z0.b, z30.b\n"
+ "zip2 z31.b, z31.b, z29.b\n"
+ "zip1 z30.b, z1.b, z28.b\n"
+ "zip1 z29.b, z27.b, z20.b\n"
+ "zip2 z28.b, z1.b, z28.b\n"
+ "zip2 z27.b, z27.b, z20.b\n"
+ "zip1 z20.b, z19.b, z18.b\n"
+ "zip2 z19.b, z19.b, z18.b\n"
+ "zip1 z18.b, z17.b, z16.b\n"
+ "zip2 z17.b, z17.b, z16.b\n"
+ "zip1 z16.b, z26.b, z25.b\n"
+ "zip2 z26.b, z26.b, z25.b\n"
+ "zip1 z25.b, z24.b, z23.b\n"
+ "zip2 z24.b, z24.b, z23.b\n"
+ "st1b { z20.b }, p1, [x27]\n"
+ "st1b { z19.b }, p1, [x27, #1, MUL VL]\n"
"zip1 z23.b, z22.b, z21.b\n"
"zip2 z22.b, z22.b, z21.b\n"
- "st1b { z18.b }, p1, [x20, #5, MUL VL]\n"
- "zip1 z21.b, z29.b, z28.b\n"
- "zip2 z20.b, z29.b, z28.b\n"
- "st1b { z17.b }, p1, [x20, #6, MUL VL]\n"
- "zip1 z19.b, z27.b, z26.b\n"
- "zip2 z18.b, z27.b, z26.b\n"
- "st1b { z16.b }, p1, [x20, #7, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
- "zip1 z17.b, z25.b, z24.b\n"
- "zip2 z16.b, z25.b, z24.b\n"
- "st1b { z23.b }, p1, [x20]\n"
- "st1b { z22.b }, p1, [x20, #1, MUL VL]\n"
- "st1b { z21.b }, p1, [x20, #2, MUL VL]\n"
- "st1b { z20.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #7, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "st1b { z18.b }, p1, [x27, #2, MUL VL]\n"
+ "zip1 z21.b, z0.b, z31.b\n"
+ "zip2 z20.b, z0.b, z31.b\n"
+ "st1b { z17.b }, p1, [x27, #3, MUL VL]\n"
+ "zip1 z19.b, z30.b, z29.b\n"
+ "zip2 z18.b, z30.b, z29.b\n"
+ "st1b { z16.b }, p1, [x27, #4, MUL VL]\n"
+ "zip1 z17.b, z28.b, z27.b\n"
+ "zip2 z16.b, z28.b, z27.b\n"
+ "st1b { z26.b }, p1, [x27, #5, MUL VL]\n"
+ "st1b { z25.b }, p1, [x27, #6, MUL VL]\n"
+ "st1b { z24.b }, p1, [x27, #7, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
+ "st1b { z23.b }, p1, [x27]\n"
+ "st1b { z22.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z21.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z20.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #5, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #6, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #7, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x22, 5f\n"
+ "cbz x9, 5f\n"
"4:" // Main row loop: Column loop
- "whilelt p0.b, XZR, x22\n"
- "ld1b { z25.b }, p0/Z, [x10]\n"
- "ld1b { z27.b }, p0/Z, [x9]\n"
- "decd x22, ALL, MUL #8\n"
- "ld1b { z26.b }, p0/Z, [x28]\n"
- "ld1b { z24.b }, p0/Z, [x27]\n"
- "cmp x22, #0x0\n"
+ "whilelt p0.b, XZR, x9\n"
+ "decd x9, ALL, MUL #8\n"
+ "ld1b { z19.b }, p0/Z, [x10]\n"
"addvl x10, x10, #1\n"
- "ld1b { z22.b }, p0/Z, [x26]\n"
- "ld1b { z21.b }, p0/Z, [x25]\n"
- "zip1 z20.b, z25.b, z22.b\n"
- "zip1 z23.b, z27.b, z21.b\n"
- "ld1b { z17.b }, p0/Z, [x24]\n"
- "ld1b { z16.b }, p0/Z, [x23]\n"
- "zip1 z19.b, z26.b, z17.b\n"
- "zip1 z18.b, z24.b, z16.b\n"
- "zip2 z25.b, z25.b, z22.b\n"
- "zip2 z22.b, z26.b, z17.b\n"
- "addvl x9, x9, #1\n"
- "addvl x28, x28, #1\n"
- "zip2 z21.b, z27.b, z21.b\n"
- "zip2 z16.b, z24.b, z16.b\n"
- "addvl x27, x27, #1\n"
+ "ld1b { z26.b }, p0/Z, [x26]\n"
"addvl x26, x26, #1\n"
- "zip1 z24.b, z20.b, z19.b\n"
- "zip1 z17.b, z23.b, z18.b\n"
+ "ld1b { z22.b }, p0/Z, [x25]\n"
"addvl x25, x25, #1\n"
+ "ld1b { z25.b }, p0/Z, [x24]\n"
"addvl x24, x24, #1\n"
- "zip2 z20.b, z20.b, z19.b\n"
- "zip2 z19.b, z23.b, z18.b\n"
+ "ld1b { z18.b }, p0/Z, [x23]\n"
+ "ld1b { z21.b }, p0/Z, [x22]\n"
+ "ld1b { z17.b }, p0/Z, [x21]\n"
+ "cmp x9, #0x0\n"
"addvl x23, x23, #1\n"
- "zip1 z23.b, z25.b, z22.b\n"
- "zip1 z18.b, z21.b, z16.b\n"
- "zip2 z22.b, z25.b, z22.b\n"
- "zip2 z21.b, z21.b, z16.b\n"
- "zip1 z16.b, z24.b, z17.b\n"
- "zip2 z17.b, z24.b, z17.b\n"
- "st1b { z16.b }, p1, [x20]\n"
- "zip1 z16.b, z20.b, z19.b\n"
+ "ld1b { z16.b }, p0/Z, [x20]\n"
+ "addvl x22, x22, #1\n"
+ "addvl x21, x21, #1\n"
+ "zip1 z20.b, z19.b, z18.b\n"
+ "zip2 z24.b, z19.b, z18.b\n"
+ "addvl x20, x20, #1\n"
+ "zip1 z19.b, z22.b, z17.b\n"
+ "zip1 z18.b, z26.b, z21.b\n"
+ "zip2 z23.b, z22.b, z17.b\n"
+ "zip1 z17.b, z25.b, z16.b\n"
+ "zip2 z22.b, z26.b, z21.b\n"
+ "zip2 z16.b, z25.b, z16.b\n"
+ "zip1 z21.b, z20.b, z19.b\n"
"zip2 z20.b, z20.b, z19.b\n"
- "st1b { z17.b }, p1, [x20, #1, MUL VL]\n"
- "zip1 z19.b, z23.b, z18.b\n"
- "zip2 z18.b, z23.b, z18.b\n"
- "st1b { z16.b }, p1, [x20, #2, MUL VL]\n"
- "zip1 z17.b, z22.b, z21.b\n"
- "zip2 z16.b, z22.b, z21.b\n"
- "st1b { z20.b }, p1, [x20, #3, MUL VL]\n"
- "st1b { z19.b }, p1, [x20, #4, MUL VL]\n"
- "st1b { z18.b }, p1, [x20, #5, MUL VL]\n"
- "st1b { z17.b }, p1, [x20, #6, MUL VL]\n"
- "st1b { z16.b }, p1, [x20, #7, MUL VL]\n"
- "add x20, x20, %x[out_stride]\n"
+ "zip1 z25.b, z24.b, z23.b\n"
+ "zip1 z19.b, z18.b, z17.b\n"
+ "zip2 z18.b, z18.b, z17.b\n"
+ "zip1 z17.b, z22.b, z16.b\n"
+ "zip2 z24.b, z24.b, z23.b\n"
+ "zip2 z23.b, z22.b, z16.b\n"
+ "zip1 z16.b, z21.b, z19.b\n"
+ "zip2 z22.b, z21.b, z19.b\n"
+ "zip1 z21.b, z20.b, z18.b\n"
+ "zip2 z20.b, z20.b, z18.b\n"
+ "zip1 z19.b, z25.b, z17.b\n"
+ "zip2 z18.b, z25.b, z17.b\n"
+ "zip1 z17.b, z24.b, z23.b\n"
+ "st1b { z16.b }, p1, [x27]\n"
+ "zip2 z16.b, z24.b, z23.b\n"
+ "st1b { z22.b }, p1, [x27, #1, MUL VL]\n"
+ "st1b { z21.b }, p1, [x27, #2, MUL VL]\n"
+ "st1b { z20.b }, p1, [x27, #3, MUL VL]\n"
+ "st1b { z19.b }, p1, [x27, #4, MUL VL]\n"
+ "st1b { z18.b }, p1, [x27, #5, MUL VL]\n"
+ "st1b { z17.b }, p1, [x27, #6, MUL VL]\n"
+ "st1b { z16.b }, p1, [x27, #7, MUL VL]\n"
+ "add x27, x27, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -223,7 +223,7 @@ void sve_transpose_interleave_8VL_1x8(uint8_t *out, const uint8_t *in, size_t wi
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp
index 02977ecf1e..6d436ebcad 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x2.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -40,198 +40,198 @@ void sve_transpose_interleave_8VL_2x2(uint16_t *out, const uint16_t *in, size_t
__asm__ __volatile__(
"cmp %x[height], #0x4\n"
- "ptrue p4.b\n"
+ "ptrue p2.b\n"
"blt 6f\n"
"1:" // Main row loop: Head
"mov x28, %x[in]\n"
"mov x27, %x[width]\n"
"cnth x26, ALL, MUL #8\n"
- "add x25, x28, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
+ "mov x25, %x[out]\n"
+ "sub %x[height], %x[height], #0x4\n"
+ "add x24, x28, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
"cmp x27, x26\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x4\n"
+ "add %x[in], x22, %x[in_stride]\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z30.h }, p4/Z, [x28]\n"
- "ld1h { z12.h }, p4/Z, [x28, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z31.h }, p4/Z, [x28, #2, MUL VL]\n"
- "ld1h { z18.h }, p4/Z, [x28, #3, MUL VL]\n"
- "mov x20, x22\n"
+ "ld1h { z17.h }, p2/Z, [x28]\n"
+ "ld1h { z30.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "mov x21, x25\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1h { z28.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z1.h }, p2/Z, [x28, #3, MUL VL]\n"
+ "mov x20, x25\n"
"sub x27, x27, x26\n"
- "ld1h { z20.h }, p4/Z, [x25]\n"
- "ld1h { z17.h }, p4/Z, [x25, #1, MUL VL]\n"
- "zip1 z3.h, z30.h, z20.h\n"
- "zip2 z21.h, z30.h, z20.h\n"
- "ld1h { z26.h }, p4/Z, [x25, #2, MUL VL]\n"
- "ld1h { z23.h }, p4/Z, [x25, #3, MUL VL]\n"
- "zip1 z13.h, z12.h, z17.h\n"
- "zip2 z0.h, z12.h, z17.h\n"
- "ld1h { z2.h }, p4/Z, [x28, #4, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x28, #5, MUL VL]\n"
- "zip1 z12.h, z31.h, z26.h\n"
- "zip2 z14.h, z31.h, z26.h\n"
- "ld1h { z17.h }, p4/Z, [x28, #6, MUL VL]\n"
- "ld1h { z29.h }, p4/Z, [x28, #7, MUL VL]\n"
- "zip1 z16.h, z18.h, z23.h\n"
- "zip2 z15.h, z18.h, z23.h\n"
- "ld1h { z9.h }, p4/Z, [x25, #4, MUL VL]\n"
- "ld1h { z18.h }, p4/Z, [x25, #5, MUL VL]\n"
- "zip1 z11.h, z2.h, z9.h\n"
- "zip2 z5.h, z2.h, z9.h\n"
- "ld1h { z7.h }, p4/Z, [x25, #6, MUL VL]\n"
- "ld1h { z2.h }, p4/Z, [x25, #7, MUL VL]\n"
- "zip1 z10.h, z24.h, z18.h\n"
- "zip2 z6.h, z24.h, z18.h\n"
- "ld1h { z19.h }, p4/Z, [x24]\n"
- "ld1h { z18.h }, p4/Z, [x24, #1, MUL VL]\n"
- "zip1 z9.h, z17.h, z7.h\n"
- "zip2 z4.h, z17.h, z7.h\n"
- "ld1h { z24.h }, p4/Z, [x24, #2, MUL VL]\n"
- "ld1h { z22.h }, p4/Z, [x24, #3, MUL VL]\n"
- "zip1 z7.h, z29.h, z2.h\n"
- "zip2 z8.h, z29.h, z2.h\n"
- "ld1h { z25.h }, p4/Z, [x24, #4, MUL VL]\n"
- "ld1h { z17.h }, p4/Z, [x24, #5, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x24]\n"
+ "ld1h { z25.h }, p2/Z, [x24, #1, MUL VL]\n"
"cmp x27, x26\n"
+ "add x25, x25, %x[out_stride]\n"
+ "ld1h { z24.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z22.h }, p2/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z21.h }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1h { z20.h }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z23.h }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1h { z19.h }, p2/Z, [x28, #7, MUL VL]\n"
+ "zip1 z31.h, z17.h, z16.h\n"
+ "zip2 z29.h, z17.h, z16.h\n"
+ "ld1h { z18.h }, p2/Z, [x24, #4, MUL VL]\n"
+ "ld1h { z17.h }, p2/Z, [x24, #5, MUL VL]\n"
+ "zip1 z27.h, z30.h, z25.h\n"
+ "zip2 z26.h, z30.h, z25.h\n"
+ "ld1h { z16.h }, p2/Z, [x24, #6, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x24, #7, MUL VL]\n"
+ "zip1 z14.h, z28.h, z24.h\n"
+ "zip2 z15.h, z28.h, z24.h\n"
+ "ld1h { z30.h }, p2/Z, [x23]\n"
+ "ld1h { z28.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip1 z13.h, z1.h, z22.h\n"
+ "zip2 z12.h, z1.h, z22.h\n"
+ "ld1h { z25.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z24.h }, p2/Z, [x23, #3, MUL VL]\n"
+ "zip1 z11.h, z21.h, z18.h\n"
+ "zip2 z10.h, z21.h, z18.h\n"
+ "ld1h { z9.h }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1h { z8.h }, p2/Z, [x23, #5, MUL VL]\n"
+ "zip1 z7.h, z20.h, z17.h\n"
+ "zip2 z6.h, z20.h, z17.h\n"
+ "ld1h { z5.h }, p2/Z, [x23, #6, MUL VL]\n"
+ "ld1h { z4.h }, p2/Z, [x23, #7, MUL VL]\n"
+ "zip1 z3.h, z23.h, z16.h\n"
+ "zip2 z2.h, z23.h, z16.h\n"
+ "ld1h { z23.h }, p2/Z, [x22]\n"
+ "ld1h { z22.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "zip1 z1.h, z19.h, z0.h\n"
+ "zip2 z0.h, z19.h, z0.h\n"
+ "ld1h { z21.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z20.h }, p2/Z, [x22, #3, MUL VL]\n"
"addvl x28, x28, #8\n"
- "ld1h { z2.h }, p4/Z, [x24, #6, MUL VL]\n"
- "ld1h { z30.h }, p4/Z, [x24, #7, MUL VL]\n"
- "addvl x25, x25, #8\n"
"addvl x24, x24, #8\n"
- "ld1h { z20.h }, p4/Z, [x23]\n"
- "ld1h { z27.h }, p4/Z, [x23, #1, MUL VL]\n"
- "zip1 z31.h, z19.h, z20.h\n"
- "zip2 z29.h, z19.h, z20.h\n"
- "ld1h { z26.h }, p4/Z, [x23, #2, MUL VL]\n"
- "ld1h { z23.h }, p4/Z, [x23, #3, MUL VL]\n"
- "zip1 z28.h, z18.h, z27.h\n"
- "zip2 z1.h, z18.h, z27.h\n"
- "ld1h { z20.h }, p4/Z, [x23, #4, MUL VL]\n"
- "ld1h { z19.h }, p4/Z, [x23, #5, MUL VL]\n"
- "zip1 z27.h, z24.h, z26.h\n"
- "zip2 z26.h, z24.h, z26.h\n"
- "ld1h { z18.h }, p4/Z, [x23, #6, MUL VL]\n"
- "ld1h { z24.h }, p4/Z, [x23, #7, MUL VL]\n"
- "st1h { z3.h }, p4, [x21]\n"
- "zip1 z3.h, z22.h, z23.h\n"
- "st1h { z21.h }, p4, [x21, #1, MUL VL]\n"
- "zip2 z22.h, z22.h, z23.h\n"
+ "ld1h { z19.h }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x22, #5, MUL VL]\n"
"addvl x23, x23, #8\n"
- "zip1 z23.h, z25.h, z20.h\n"
- "st1h { z13.h }, p4, [x21, #2, MUL VL]\n"
- "zip2 z25.h, z25.h, z20.h\n"
- "zip1 z21.h, z17.h, z19.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z0.h }, p4, [x21, #3, MUL VL]\n"
- "zip2 z20.h, z17.h, z19.h\n"
- "zip1 z19.h, z2.h, z18.h\n"
- "st1h { z12.h }, p4, [x21, #4, MUL VL]\n"
- "zip2 z18.h, z2.h, z18.h\n"
- "zip1 z17.h, z30.h, z24.h\n"
- "st1h { z14.h }, p4, [x21, #5, MUL VL]\n"
- "zip2 z13.h, z30.h, z24.h\n"
- "st1h { z16.h }, p4, [x21, #6, MUL VL]\n"
- "st1h { z15.h }, p4, [x21, #7, MUL VL]\n"
+ "ld1h { z17.h }, p2/Z, [x22, #6, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x22, #7, MUL VL]\n"
+ "st1h { z31.h }, p2, [x21]\n"
+ "zip1 z31.h, z30.h, z23.h\n"
+ "st1h { z29.h }, p2, [x21, #1, MUL VL]\n"
+ "zip2 z30.h, z30.h, z23.h\n"
+ "zip1 z29.h, z28.h, z22.h\n"
+ "addvl x22, x22, #8\n"
+ "st1h { z27.h }, p2, [x21, #2, MUL VL]\n"
+ "zip2 z28.h, z28.h, z22.h\n"
+ "zip1 z27.h, z25.h, z21.h\n"
+ "st1h { z26.h }, p2, [x21, #3, MUL VL]\n"
+ "zip2 z26.h, z25.h, z21.h\n"
+ "zip1 z25.h, z24.h, z20.h\n"
+ "st1h { z14.h }, p2, [x21, #4, MUL VL]\n"
+ "zip2 z24.h, z24.h, z20.h\n"
+ "zip1 z23.h, z9.h, z19.h\n"
+ "st1h { z15.h }, p2, [x21, #5, MUL VL]\n"
+ "zip2 z22.h, z9.h, z19.h\n"
+ "zip1 z21.h, z8.h, z18.h\n"
+ "st1h { z13.h }, p2, [x21, #6, MUL VL]\n"
+ "zip2 z20.h, z8.h, z18.h\n"
+ "zip1 z19.h, z5.h, z17.h\n"
+ "st1h { z12.h }, p2, [x21, #7, MUL VL]\n"
"addvl x21, x21, #16\n"
- "st1h { z31.h }, p4, [x21, #-8, MUL VL]\n"
- "st1h { z29.h }, p4, [x21, #-7, MUL VL]\n"
- "st1h { z28.h }, p4, [x21, #-6, MUL VL]\n"
- "st1h { z1.h }, p4, [x21, #-5, MUL VL]\n"
- "st1h { z27.h }, p4, [x21, #-4, MUL VL]\n"
- "st1h { z26.h }, p4, [x21, #-3, MUL VL]\n"
- "st1h { z3.h }, p4, [x21, #-2, MUL VL]\n"
- "st1h { z22.h }, p4, [x21, #-1, MUL VL]\n"
- "st1h { z11.h }, p4, [x20]\n"
- "st1h { z5.h }, p4, [x20, #1, MUL VL]\n"
- "st1h { z10.h }, p4, [x20, #2, MUL VL]\n"
- "st1h { z6.h }, p4, [x20, #3, MUL VL]\n"
- "st1h { z9.h }, p4, [x20, #4, MUL VL]\n"
- "st1h { z4.h }, p4, [x20, #5, MUL VL]\n"
- "st1h { z7.h }, p4, [x20, #6, MUL VL]\n"
- "st1h { z8.h }, p4, [x20, #7, MUL VL]\n"
+ "zip2 z18.h, z5.h, z17.h\n"
+ "zip1 z17.h, z4.h, z16.h\n"
+ "zip2 z16.h, z4.h, z16.h\n"
+ "st1h { z31.h }, p2, [x21, #-8, MUL VL]\n"
+ "st1h { z30.h }, p2, [x21, #-7, MUL VL]\n"
+ "st1h { z29.h }, p2, [x21, #-6, MUL VL]\n"
+ "st1h { z28.h }, p2, [x21, #-5, MUL VL]\n"
+ "st1h { z27.h }, p2, [x21, #-4, MUL VL]\n"
+ "st1h { z26.h }, p2, [x21, #-3, MUL VL]\n"
+ "st1h { z25.h }, p2, [x21, #-2, MUL VL]\n"
+ "st1h { z24.h }, p2, [x21, #-1, MUL VL]\n"
+ "st1h { z11.h }, p2, [x20]\n"
+ "st1h { z10.h }, p2, [x20, #1, MUL VL]\n"
+ "st1h { z7.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z6.h }, p2, [x20, #3, MUL VL]\n"
+ "st1h { z3.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z2.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z1.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z0.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
- "st1h { z25.h }, p4, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
- "st1h { z13.h }, p4, [x20, #-1, MUL VL]\n"
+ "st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
+ "st1h { z22.h }, p2, [x20, #-7, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #-6, MUL VL]\n"
+ "st1h { z20.h }, p2, [x20, #-5, MUL VL]\n"
+ "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
"cbz x27, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p3.h, XZR, x20\n"
- "ld1h { z20.h }, p3/Z, [x28]\n"
- "ld1h { z19.h }, p3/Z, [x25]\n"
- "dech x20\n"
- "whilelt p2.h, XZR, x20\n"
- "ld1h { z18.h }, p2/Z, [x28, #1, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x25, #1, MUL VL]\n"
- "dech x20\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z25.h }, p1/Z, [x28, #2, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x25, #2, MUL VL]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z0.h }, p0/Z, [x28, #3, MUL VL]\n"
- "ld1h { z24.h }, p0/Z, [x25, #3, MUL VL]\n"
- "mov x20, x22\n"
+ "mov x21, x27\n"
+ "mov x20, x25\n"
"decw x27, ALL, MUL #8\n"
- "ld1h { z31.h }, p3/Z, [x24]\n"
- "ld1h { z30.h }, p2/Z, [x24, #1, MUL VL]\n"
- "ld1h { z29.h }, p1/Z, [x24, #2, MUL VL]\n"
- "ld1h { z28.h }, p0/Z, [x24, #3, MUL VL]\n"
- "zip1 z23.h, z20.h, z19.h\n"
- "zip2 z22.h, z20.h, z19.h\n"
- "ld1h { z21.h }, p3/Z, [x23]\n"
- "ld1h { z27.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z20.h, z18.h, z17.h\n"
- "zip2 z19.h, z18.h, z17.h\n"
- "ld1h { z18.h }, p1/Z, [x23, #2, MUL VL]\n"
- "ld1h { z26.h }, p0/Z, [x23, #3, MUL VL]\n"
- "zip1 z17.h, z25.h, z16.h\n"
- "zip2 z16.h, z25.h, z16.h\n"
- "zip1 z25.h, z0.h, z24.h\n"
- "zip2 z24.h, z0.h, z24.h\n"
- "st1h { z23.h }, p4, [x20]\n"
+ "add x25, x25, %x[out_stride]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p0.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z20.h }, p1/Z, [x28]\n"
+ "ld1h { z16.h }, p1/Z, [x24]\n"
+ "ld1h { z23.h }, p1/Z, [x23]\n"
+ "ld1h { z19.h }, p1/Z, [x22]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "ld1h { z18.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z17.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z1.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "ld1h { z0.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "zip1 z22.h, z20.h, z16.h\n"
+ "zip2 z21.h, z20.h, z16.h\n"
+ "whilelt p0.h, XZR, x21\n"
+ "ld1h { z20.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z16.h }, p1/Z, [x24, #2, MUL VL]\n"
"cmp x27, #0x0\n"
- "st1h { z22.h }, p4, [x20, #1, MUL VL]\n"
+ "ld1h { z31.h }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z30.h }, p1/Z, [x22, #2, MUL VL]\n"
+ "zip1 z29.h, z18.h, z17.h\n"
+ "zip2 z28.h, z18.h, z17.h\n"
+ "zip1 z27.h, z23.h, z19.h\n"
+ "zip2 z26.h, z23.h, z19.h\n"
+ "ld1h { z19.h }, p0/Z, [x28, #3, MUL VL]\n"
"addvl x28, x28, #4\n"
- "addvl x25, x25, #4\n"
- "zip1 z23.h, z31.h, z21.h\n"
- "st1h { z20.h }, p4, [x20, #2, MUL VL]\n"
+ "ld1h { z18.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z25.h }, p0/Z, [x23, #3, MUL VL]\n"
+ "zip1 z17.h, z20.h, z16.h\n"
+ "zip2 z24.h, z20.h, z16.h\n"
+ "ld1h { z16.h }, p0/Z, [x22, #3, MUL VL]\n"
+ "st1h { z22.h }, p2, [x20]\n"
"addvl x24, x24, #4\n"
"addvl x23, x23, #4\n"
- "zip2 z22.h, z31.h, z21.h\n"
- "st1h { z19.h }, p4, [x20, #3, MUL VL]\n"
- "zip1 z21.h, z30.h, z27.h\n"
- "zip2 z20.h, z30.h, z27.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z17.h }, p4, [x20, #4, MUL VL]\n"
- "zip1 z19.h, z29.h, z18.h\n"
- "zip2 z18.h, z29.h, z18.h\n"
- "st1h { z16.h }, p4, [x20, #5, MUL VL]\n"
- "zip1 z17.h, z28.h, z26.h\n"
- "zip2 z16.h, z28.h, z26.h\n"
- "st1h { z25.h }, p4, [x20, #6, MUL VL]\n"
- "st1h { z24.h }, p4, [x20, #7, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #1, MUL VL]\n"
+ "addvl x22, x22, #4\n"
+ "zip1 z23.h, z1.h, z0.h\n"
+ "zip2 z22.h, z1.h, z0.h\n"
+ "zip1 z21.h, z19.h, z18.h\n"
+ "zip2 z20.h, z19.h, z18.h\n"
+ "st1h { z29.h }, p2, [x20, #2, MUL VL]\n"
+ "st1h { z28.h }, p2, [x20, #3, MUL VL]\n"
+ "zip1 z19.h, z31.h, z30.h\n"
+ "zip2 z18.h, z31.h, z30.h\n"
+ "st1h { z17.h }, p2, [x20, #4, MUL VL]\n"
+ "zip1 z17.h, z25.h, z16.h\n"
+ "zip2 z16.h, z25.h, z16.h\n"
+ "st1h { z24.h }, p2, [x20, #5, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z20.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
- "st1h { z23.h }, p4, [x20, #-8, MUL VL]\n"
- "st1h { z22.h }, p4, [x20, #-7, MUL VL]\n"
- "st1h { z21.h }, p4, [x20, #-6, MUL VL]\n"
- "st1h { z20.h }, p4, [x20, #-5, MUL VL]\n"
- "st1h { z19.h }, p4, [x20, #-4, MUL VL]\n"
- "st1h { z18.h }, p4, [x20, #-3, MUL VL]\n"
- "st1h { z17.h }, p4, [x20, #-2, MUL VL]\n"
- "st1h { z16.h }, p4, [x20, #-1, MUL VL]\n"
+ "st1h { z27.h }, p2, [x20, #-8, MUL VL]\n"
+ "st1h { z26.h }, p2, [x20, #-7, MUL VL]\n"
+ "st1h { z23.h }, p2, [x20, #-6, MUL VL]\n"
+ "st1h { z22.h }, p2, [x20, #-5, MUL VL]\n"
+ "st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x20, #-2, MUL VL]\n"
+ "st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x4\n"
@@ -243,110 +243,110 @@ void sve_transpose_interleave_8VL_2x2(uint16_t *out, const uint16_t *in, size_t
"mov x28, %x[in]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #8\n"
- "add x25, x28, %x[in_stride]\n"
"cmp %x[height], #0x1\n"
- "add %x[in], x25, %x[in_stride]\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x21, x20\n"
- "mov x22, %x[out]\n"
+ "mov x25, %x[out]\n"
"sub %x[height], %x[height], #0x2\n"
+ "add x24, x28, %x[in_stride]\n"
+ "add %x[in], x24, %x[in_stride]\n"
+ "csel x24, x24, %x[pad_row], GT\n"
+ "cmp x21, x20\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z17.h }, p4/Z, [x28]\n"
- "ld1h { z20.h }, p4/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z19.h }, p2/Z, [x28]\n"
+ "ld1h { z18.h }, p2/Z, [x28, #1, MUL VL]\n"
"sub x21, x21, x20\n"
+ "ld1h { z26.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z0.h }, p2/Z, [x28, #3, MUL VL]\n"
"cmp x21, x20\n"
- "ld1h { z23.h }, p4/Z, [x28, #2, MUL VL]\n"
- "ld1h { z19.h }, p4/Z, [x28, #3, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x25]\n"
- "ld1h { z18.h }, p4/Z, [x25, #1, MUL VL]\n"
- "zip1 z0.h, z17.h, z16.h\n"
- "zip2 z22.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p4/Z, [x25, #2, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x25, #3, MUL VL]\n"
- "zip1 z31.h, z20.h, z18.h\n"
- "zip2 z30.h, z20.h, z18.h\n"
- "ld1h { z21.h }, p4/Z, [x28, #4, MUL VL]\n"
- "ld1h { z20.h }, p4/Z, [x28, #5, MUL VL]\n"
- "zip1 z29.h, z23.h, z17.h\n"
- "zip2 z28.h, z23.h, z17.h\n"
- "ld1h { z27.h }, p4/Z, [x28, #6, MUL VL]\n"
- "ld1h { z26.h }, p4/Z, [x28, #7, MUL VL]\n"
- "zip1 z25.h, z19.h, z16.h\n"
- "zip2 z24.h, z19.h, z16.h\n"
- "ld1h { z19.h }, p4/Z, [x25, #4, MUL VL]\n"
- "ld1h { z18.h }, p4/Z, [x25, #5, MUL VL]\n"
+ "ld1h { z17.h }, p2/Z, [x24]\n"
+ "ld1h { z16.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "ld1h { z25.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z24.h }, p2/Z, [x24, #3, MUL VL]\n"
+ "ld1h { z31.h }, p2/Z, [x28, #4, MUL VL]\n"
+ "ld1h { z30.h }, p2/Z, [x28, #5, MUL VL]\n"
+ "ld1h { z29.h }, p2/Z, [x28, #6, MUL VL]\n"
+ "ld1h { z28.h }, p2/Z, [x28, #7, MUL VL]\n"
+ "zip1 z23.h, z19.h, z17.h\n"
+ "zip2 z22.h, z19.h, z17.h\n"
+ "ld1h { z21.h }, p2/Z, [x24, #4, MUL VL]\n"
+ "ld1h { z20.h }, p2/Z, [x24, #5, MUL VL]\n"
+ "zip1 z19.h, z18.h, z16.h\n"
+ "zip2 z18.h, z18.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x24, #6, MUL VL]\n"
+ "ld1h { z27.h }, p2/Z, [x24, #7, MUL VL]\n"
+ "zip1 z16.h, z26.h, z25.h\n"
+ "zip2 z26.h, z26.h, z25.h\n"
+ "zip1 z25.h, z0.h, z24.h\n"
+ "zip2 z24.h, z0.h, z24.h\n"
+ "st1h { z23.h }, p2, [x25]\n"
"addvl x28, x28, #8\n"
- "zip1 z23.h, z21.h, z19.h\n"
- "ld1h { z17.h }, p4/Z, [x25, #6, MUL VL]\n"
- "ld1h { z16.h }, p4/Z, [x25, #7, MUL VL]\n"
- "st1h { z0.h }, p4, [x22]\n"
- "addvl x25, x25, #8\n"
- "st1h { z22.h }, p4, [x22, #1, MUL VL]\n"
- "zip2 z22.h, z21.h, z19.h\n"
- "zip1 z21.h, z20.h, z18.h\n"
- "st1h { z31.h }, p4, [x22, #2, MUL VL]\n"
- "zip2 z20.h, z20.h, z18.h\n"
- "zip1 z19.h, z27.h, z17.h\n"
- "st1h { z30.h }, p4, [x22, #3, MUL VL]\n"
- "zip2 z18.h, z27.h, z17.h\n"
- "zip1 z17.h, z26.h, z16.h\n"
- "st1h { z29.h }, p4, [x22, #4, MUL VL]\n"
- "zip2 z16.h, z26.h, z16.h\n"
- "st1h { z28.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z25.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z24.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z23.h }, p4, [x22]\n"
- "st1h { z22.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z21.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z20.h }, p4, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z22.h }, p2, [x25, #1, MUL VL]\n"
+ "addvl x24, x24, #8\n"
+ "zip1 z23.h, z31.h, z21.h\n"
+ "zip2 z22.h, z31.h, z21.h\n"
+ "st1h { z19.h }, p2, [x25, #2, MUL VL]\n"
+ "zip1 z21.h, z30.h, z20.h\n"
+ "zip2 z20.h, z30.h, z20.h\n"
+ "st1h { z18.h }, p2, [x25, #3, MUL VL]\n"
+ "zip1 z19.h, z29.h, z17.h\n"
+ "zip2 z18.h, z29.h, z17.h\n"
+ "st1h { z16.h }, p2, [x25, #4, MUL VL]\n"
+ "zip1 z17.h, z28.h, z27.h\n"
+ "zip2 z16.h, z28.h, z27.h\n"
+ "st1h { z26.h }, p2, [x25, #5, MUL VL]\n"
+ "st1h { z25.h }, p2, [x25, #6, MUL VL]\n"
+ "st1h { z24.h }, p2, [x25, #7, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
+ "st1h { z23.h }, p2, [x25]\n"
+ "st1h { z22.h }, p2, [x25, #1, MUL VL]\n"
+ "st1h { z21.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x25, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x25, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x25, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x25, #7, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z22.h }, p0/Z, [x28]\n"
- "ld1h { z21.h }, p0/Z, [x25]\n"
+ "decw x21, ALL, MUL #8\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z20.h }, p0/Z, [x28, #1, MUL VL]\n"
- "ld1h { z19.h }, p0/Z, [x25, #1, MUL VL]\n"
"dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z18.h }, p0/Z, [x28, #2, MUL VL]\n"
- "ld1h { z17.h }, p0/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z18.h }, p1/Z, [x28]\n"
+ "ld1h { z17.h }, p1/Z, [x24]\n"
+ "whilelt p1.h, XZR, x20\n"
"dech x20\n"
+ "ld1h { z20.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z23.h, z18.h, z17.h\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z24.h }, p0/Z, [x28, #3, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x25, #3, MUL VL]\n"
- "decw x21, ALL, MUL #8\n"
"cmp x21, #0x0\n"
- "zip1 z16.h, z22.h, z21.h\n"
- "zip2 z22.h, z22.h, z21.h\n"
+ "ld1h { z18.h }, p1/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z17.h }, p1/Z, [x24, #2, MUL VL]\n"
+ "zip1 z22.h, z20.h, z16.h\n"
+ "zip2 z21.h, z20.h, z16.h\n"
+ "ld1h { z20.h }, p0/Z, [x28, #3, MUL VL]\n"
"addvl x28, x28, #4\n"
- "addvl x25, x25, #4\n"
- "zip1 z21.h, z20.h, z19.h\n"
- "zip2 z20.h, z20.h, z19.h\n"
+ "ld1h { z16.h }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ "st1h { z19.h }, p2, [x25]\n"
"zip1 z19.h, z18.h, z17.h\n"
"zip2 z18.h, z18.h, z17.h\n"
- "st1h { z16.h }, p4, [x22]\n"
- "zip1 z17.h, z24.h, z23.h\n"
- "zip2 z16.h, z24.h, z23.h\n"
- "st1h { z22.h }, p4, [x22, #1, MUL VL]\n"
- "st1h { z21.h }, p4, [x22, #2, MUL VL]\n"
- "st1h { z20.h }, p4, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p4, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p4, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p4, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p4, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z23.h }, p2, [x25, #1, MUL VL]\n"
+ "zip1 z17.h, z20.h, z16.h\n"
+ "zip2 z16.h, z20.h, z16.h\n"
+ "st1h { z22.h }, p2, [x25, #2, MUL VL]\n"
+ "st1h { z21.h }, p2, [x25, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x25, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x25, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x25, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x25, #7, MUL VL]\n"
+ "add x25, x25, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -355,7 +355,7 @@ void sve_transpose_interleave_8VL_2x2(uint16_t *out, const uint16_t *in, size_t
"12:" // Done
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp
index 34799c60a6..8ed1879643 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -44,155 +44,155 @@ void sve_transpose_interleave_8VL_2x4(uint16_t *out, const uint16_t *in, size_t
"blt 6f\n"
"1:" // Main row loop: Head
"mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
- "add x9, x10, %x[in_stride]\n"
- "add x28, x9, %x[in_stride]\n"
- "mov x27, %x[width]\n"
- "cnth x26, ALL, MUL #4\n"
- "add x25, x28, %x[in_stride]\n"
+ "mov x11, %x[width]\n"
+ "cnth x10, ALL, MUL #4\n"
+ "mov x9, %x[out]\n"
+ "sub %x[height], %x[height], #0x8\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add x25, x26, %x[in_stride]\n"
+ "cmp x11, x10\n"
"add x24, x25, %x[in_stride]\n"
"add x23, x24, %x[in_stride]\n"
- "cmp x27, x26\n"
- "add %x[in], x23, %x[in_stride]\n"
- "mov x22, %x[out]\n"
- "sub %x[height], %x[height], #0x8\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add %x[in], x22, %x[in_stride]\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1h { z21.h }, p2/Z, [x12]\n"
- "ld1h { z17.h }, p2/Z, [x12, #1, MUL VL]\n"
- "mov x21, x22\n"
- "add x22, x22, %x[out_stride]\n"
- "ld1h { z31.h }, p2/Z, [x11]\n"
- "ld1h { z5.h }, p2/Z, [x11, #1, MUL VL]\n"
- "mov x20, x22\n"
- "sub x27, x27, x26\n"
- "ld1h { z15.h }, p2/Z, [x10]\n"
- "ld1h { z28.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z24.h, z21.h, z15.h\n"
- "zip2 z29.h, z21.h, z15.h\n"
- "ld1h { z6.h }, p2/Z, [x9]\n"
- "ld1h { z4.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z16.h, z31.h, z6.h\n"
- "zip2 z18.h, z31.h, z6.h\n"
- "ld1h { z3.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z25.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z20.h, z17.h, z28.h\n"
- "zip1 z7.h, z5.h, z4.h\n"
- "ld1h { z27.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z22.h }, p2/Z, [x11, #3, MUL VL]\n"
- "zip2 z2.h, z17.h, z28.h\n"
- "zip2 z19.h, z5.h, z4.h\n"
- "ld1h { z28.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z17.h }, p2/Z, [x10, #3, MUL VL]\n"
- "zip1 z21.h, z24.h, z16.h\n"
- "zip2 z24.h, z24.h, z16.h\n"
- "ld1h { z5.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z1.h }, p2/Z, [x9, #3, MUL VL]\n"
- "zip1 z14.h, z29.h, z18.h\n"
- "zip2 z12.h, z29.h, z18.h\n"
- "ld1h { z18.h }, p2/Z, [x28]\n"
- "ld1h { z31.h }, p2/Z, [x28, #1, MUL VL]\n"
- "zip1 z11.h, z20.h, z7.h\n"
- "zip2 z13.h, z20.h, z7.h\n"
- "ld1h { z4.h }, p2/Z, [x25]\n"
- "ld1h { z26.h }, p2/Z, [x25, #1, MUL VL]\n"
- "zip1 z15.h, z2.h, z19.h\n"
- "zip2 z10.h, z2.h, z19.h\n"
- "ld1h { z16.h }, p2/Z, [x24]\n"
- "ld1h { z30.h }, p2/Z, [x24, #1, MUL VL]\n"
- "zip1 z19.h, z18.h, z16.h\n"
- "zip2 z18.h, z18.h, z16.h\n"
- "ld1h { z8.h }, p2/Z, [x23]\n"
- "ld1h { z29.h }, p2/Z, [x23, #1, MUL VL]\n"
- "zip1 z20.h, z4.h, z8.h\n"
- "zip2 z0.h, z4.h, z8.h\n"
- "ld1h { z6.h }, p2/Z, [x28, #2, MUL VL]\n"
- "ld1h { z8.h }, p2/Z, [x28, #3, MUL VL]\n"
- "zip1 z23.h, z31.h, z30.h\n"
- "zip1 z16.h, z26.h, z29.h\n"
- "ld1h { z9.h }, p2/Z, [x25, #2, MUL VL]\n"
- "ld1h { z7.h }, p2/Z, [x25, #3, MUL VL]\n"
- "zip2 z31.h, z31.h, z30.h\n"
- "zip2 z30.h, z26.h, z29.h\n"
- "ld1h { z2.h }, p2/Z, [x24, #2, MUL VL]\n"
- "ld1h { z26.h }, p2/Z, [x24, #3, MUL VL]\n"
- "zip1 z29.h, z3.h, z28.h\n"
- "zip1 z4.h, z27.h, z5.h\n"
- "zip2 z28.h, z3.h, z28.h\n"
- "ld1h { z3.h }, p2/Z, [x23, #2, MUL VL]\n"
- "zip2 z27.h, z27.h, z5.h\n"
- "ld1h { z5.h }, p2/Z, [x23, #3, MUL VL]\n"
- "st1h { z21.h }, p2, [x21]\n"
- "zip1 z21.h, z25.h, z17.h\n"
- "zip2 z25.h, z25.h, z17.h\n"
- "cmp x27, x26\n"
- "st1h { z24.h }, p2, [x21, #1, MUL VL]\n"
- "zip1 z24.h, z22.h, z1.h\n"
- "zip2 z22.h, z22.h, z1.h\n"
+ "ld1h { z22.h }, p2/Z, [x12]\n"
+ "ld1h { z2.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "mov x21, x9\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z23.h }, p2/Z, [x28]\n"
+ "ld1h { z5.h }, p2/Z, [x28, #1, MUL VL]\n"
+ "mov x20, x9\n"
+ "sub x11, x11, x10\n"
+ "ld1h { z3.h }, p2/Z, [x27]\n"
+ "ld1h { z8.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "cmp x11, x10\n"
+ "add x9, x9, %x[out_stride]\n"
+ "ld1h { z25.h }, p2/Z, [x26]\n"
+ "ld1h { z26.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z6.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z12.h }, p2/Z, [x12, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
- "st1h { z14.h }, p2, [x21, #2, MUL VL]\n"
- "zip1 z17.h, z19.h, z20.h\n"
- "zip2 z20.h, z19.h, z20.h\n"
- "addvl x11, x11, #4\n"
- "st1h { z12.h }, p2, [x21, #3, MUL VL]\n"
- "zip1 z19.h, z18.h, z0.h\n"
- "zip2 z18.h, z18.h, z0.h\n"
- "addvl x10, x10, #4\n"
- "st1h { z11.h }, p2, [x21, #4, MUL VL]\n"
- "zip1 z14.h, z23.h, z16.h\n"
- "zip2 z16.h, z23.h, z16.h\n"
- "addvl x9, x9, #4\n"
- "st1h { z13.h }, p2, [x21, #5, MUL VL]\n"
- "zip1 z23.h, z31.h, z30.h\n"
- "zip2 z1.h, z31.h, z30.h\n"
+ "ld1h { z15.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z11.h }, p2/Z, [x28, #3, MUL VL]\n"
+ "zip1 z18.h, z22.h, z3.h\n"
+ "zip2 z24.h, z22.h, z3.h\n"
+ "ld1h { z14.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z13.h }, p2/Z, [x27, #3, MUL VL]\n"
+ "zip1 z4.h, z23.h, z25.h\n"
+ "zip2 z16.h, z23.h, z25.h\n"
+ "ld1h { z7.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1h { z23.h }, p2/Z, [x26, #3, MUL VL]\n"
+ "zip1 z21.h, z2.h, z8.h\n"
+ "zip1 z22.h, z5.h, z26.h\n"
+ "ld1h { z1.h }, p2/Z, [x25]\n"
+ "ld1h { z25.h }, p2/Z, [x25, #1, MUL VL]\n"
+ "zip2 z10.h, z2.h, z8.h\n"
+ "zip2 z5.h, z5.h, z26.h\n"
+ "ld1h { z19.h }, p2/Z, [x24]\n"
+ "ld1h { z31.h }, p2/Z, [x24, #1, MUL VL]\n"
+ "zip1 z17.h, z18.h, z4.h\n"
+ "zip2 z28.h, z18.h, z4.h\n"
+ "ld1h { z3.h }, p2/Z, [x23]\n"
+ "ld1h { z18.h }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip1 z0.h, z24.h, z16.h\n"
+ "zip2 z29.h, z24.h, z16.h\n"
+ "ld1h { z2.h }, p2/Z, [x22]\n"
+ "ld1h { z16.h }, p2/Z, [x22, #1, MUL VL]\n"
+ "zip1 z27.h, z21.h, z22.h\n"
+ "zip2 z26.h, z21.h, z22.h\n"
+ "ld1h { z8.h }, p2/Z, [x25, #2, MUL VL]\n"
+ "ld1h { z4.h }, p2/Z, [x25, #3, MUL VL]\n"
+ "zip1 z24.h, z10.h, z5.h\n"
+ "zip2 z20.h, z10.h, z5.h\n"
+ "ld1h { z30.h }, p2/Z, [x24, #2, MUL VL]\n"
+ "ld1h { z5.h }, p2/Z, [x24, #3, MUL VL]\n"
+ "zip1 z21.h, z1.h, z3.h\n"
+ "zip2 z9.h, z1.h, z3.h\n"
+ "ld1h { z10.h }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1h { z3.h }, p2/Z, [x23, #3, MUL VL]\n"
+ "zip1 z22.h, z19.h, z2.h\n"
+ "zip2 z19.h, z19.h, z2.h\n"
+ "ld1h { z2.h }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1h { z1.h }, p2/Z, [x22, #3, MUL VL]\n"
+ "st1h { z17.h }, p2, [x21]\n"
+ "zip1 z17.h, z25.h, z18.h\n"
+ "zip2 z25.h, z25.h, z18.h\n"
+ "zip1 z18.h, z31.h, z16.h\n"
+ "st1h { z28.h }, p2, [x21, #1, MUL VL]\n"
"addvl x28, x28, #4\n"
- "st1h { z15.h }, p2, [x21, #6, MUL VL]\n"
- "zip1 z0.h, z29.h, z4.h\n"
- "zip2 z31.h, z29.h, z4.h\n"
+ "zip2 z16.h, z31.h, z16.h\n"
+ "st1h { z0.h }, p2, [x21, #2, MUL VL]\n"
+ "zip1 z0.h, z6.h, z14.h\n"
+ "addvl x27, x27, #4\n"
+ "st1h { z29.h }, p2, [x21, #3, MUL VL]\n"
+ "zip1 z31.h, z15.h, z7.h\n"
+ "zip2 z29.h, z6.h, z14.h\n"
+ "addvl x26, x26, #4\n"
+ "st1h { z27.h }, p2, [x21, #4, MUL VL]\n"
+ "zip2 z28.h, z15.h, z7.h\n"
+ "zip1 z27.h, z12.h, z13.h\n"
"addvl x25, x25, #4\n"
- "st1h { z10.h }, p2, [x21, #7, MUL VL]\n"
- "addvl x21, x21, #16\n"
- "zip1 z30.h, z28.h, z27.h\n"
- "zip2 z29.h, z28.h, z27.h\n"
- "st1h { z17.h }, p2, [x21, #-8, MUL VL]\n"
- "zip1 z13.h, z21.h, z24.h\n"
- "zip2 z27.h, z21.h, z24.h\n"
+ "st1h { z26.h }, p2, [x21, #5, MUL VL]\n"
+ "zip1 z26.h, z11.h, z23.h\n"
+ "zip2 z6.h, z12.h, z13.h\n"
"addvl x24, x24, #4\n"
- "st1h { z20.h }, p2, [x21, #-7, MUL VL]\n"
- "zip1 z28.h, z25.h, z22.h\n"
- "zip2 z25.h, z25.h, z22.h\n"
+ "st1h { z24.h }, p2, [x21, #6, MUL VL]\n"
+ "zip2 z24.h, z11.h, z23.h\n"
+ "zip1 z23.h, z21.h, z22.h\n"
"addvl x23, x23, #4\n"
- "st1h { z19.h }, p2, [x21, #-6, MUL VL]\n"
- "zip1 z22.h, z6.h, z2.h\n"
- "zip1 z21.h, z9.h, z3.h\n"
- "add x22, x22, %x[out_stride]\n"
- "st1h { z18.h }, p2, [x21, #-5, MUL VL]\n"
- "zip2 z20.h, z6.h, z2.h\n"
- "zip2 z19.h, z9.h, z3.h\n"
- "st1h { z14.h }, p2, [x21, #-4, MUL VL]\n"
- "zip1 z18.h, z8.h, z26.h\n"
- "zip1 z17.h, z7.h, z5.h\n"
- "st1h { z16.h }, p2, [x21, #-3, MUL VL]\n"
- "zip2 z24.h, z8.h, z26.h\n"
- "zip2 z16.h, z7.h, z5.h\n"
- "st1h { z23.h }, p2, [x21, #-2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x21, #7, MUL VL]\n"
+ "addvl x21, x21, #16\n"
+ "zip2 z22.h, z21.h, z22.h\n"
+ "zip1 z21.h, z9.h, z19.h\n"
+ "zip2 z20.h, z9.h, z19.h\n"
+ "zip1 z19.h, z17.h, z18.h\n"
+ "addvl x22, x22, #4\n"
+ "zip2 z18.h, z17.h, z18.h\n"
+ "zip1 z17.h, z25.h, z16.h\n"
+ "zip2 z16.h, z25.h, z16.h\n"
+ "st1h { z23.h }, p2, [x21, #-8, MUL VL]\n"
+ "zip1 z23.h, z0.h, z31.h\n"
+ "st1h { z22.h }, p2, [x21, #-7, MUL VL]\n"
+ "zip2 z0.h, z0.h, z31.h\n"
+ "zip1 z31.h, z29.h, z28.h\n"
+ "st1h { z21.h }, p2, [x21, #-6, MUL VL]\n"
+ "zip2 z29.h, z29.h, z28.h\n"
+ "zip1 z28.h, z27.h, z26.h\n"
+ "st1h { z20.h }, p2, [x21, #-5, MUL VL]\n"
+ "zip2 z27.h, z27.h, z26.h\n"
+ "zip1 z26.h, z6.h, z24.h\n"
+ "st1h { z19.h }, p2, [x21, #-4, MUL VL]\n"
+ "zip2 z25.h, z6.h, z24.h\n"
+ "zip1 z22.h, z8.h, z10.h\n"
+ "st1h { z18.h }, p2, [x21, #-3, MUL VL]\n"
+ "zip1 z21.h, z30.h, z2.h\n"
+ "zip2 z20.h, z8.h, z10.h\n"
+ "st1h { z17.h }, p2, [x21, #-2, MUL VL]\n"
+ "zip2 z19.h, z30.h, z2.h\n"
+ "zip1 z18.h, z4.h, z3.h\n"
+ "st1h { z16.h }, p2, [x21, #-1, MUL VL]\n"
+ "zip1 z17.h, z5.h, z1.h\n"
+ "zip2 z24.h, z4.h, z3.h\n"
+ "zip2 z16.h, z5.h, z1.h\n"
+ "st1h { z23.h }, p2, [x20]\n"
"zip1 z23.h, z22.h, z21.h\n"
+ "st1h { z0.h }, p2, [x20, #1, MUL VL]\n"
"zip2 z22.h, z22.h, z21.h\n"
- "st1h { z1.h }, p2, [x21, #-1, MUL VL]\n"
"zip1 z21.h, z20.h, z19.h\n"
+ "st1h { z31.h }, p2, [x20, #2, MUL VL]\n"
"zip2 z20.h, z20.h, z19.h\n"
- "st1h { z0.h }, p2, [x20]\n"
"zip1 z19.h, z18.h, z17.h\n"
+ "st1h { z29.h }, p2, [x20, #3, MUL VL]\n"
"zip2 z18.h, z18.h, z17.h\n"
- "st1h { z31.h }, p2, [x20, #1, MUL VL]\n"
"zip1 z17.h, z24.h, z16.h\n"
+ "st1h { z28.h }, p2, [x20, #4, MUL VL]\n"
"zip2 z16.h, z24.h, z16.h\n"
- "st1h { z30.h }, p2, [x20, #2, MUL VL]\n"
- "st1h { z29.h }, p2, [x20, #3, MUL VL]\n"
- "st1h { z13.h }, p2, [x20, #4, MUL VL]\n"
"st1h { z27.h }, p2, [x20, #5, MUL VL]\n"
- "st1h { z28.h }, p2, [x20, #6, MUL VL]\n"
+ "st1h { z26.h }, p2, [x20, #6, MUL VL]\n"
"st1h { z25.h }, p2, [x20, #7, MUL VL]\n"
"addvl x20, x20, #16\n"
"st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
@@ -205,84 +205,84 @@ void sve_transpose_interleave_8VL_2x4(uint16_t *out, const uint16_t *in, size_t
"st1h { z16.h }, p2, [x20, #-1, MUL VL]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x27, 5f\n"
+ "cbz x11, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x27\n"
- "whilelt p1.h, XZR, x20\n"
- "ld1h { z17.h }, p1/Z, [x12]\n"
- "ld1h { z19.h }, p1/Z, [x11]\n"
- "dech x20\n"
- "whilelt p0.h, XZR, x20\n"
- "ld1h { z24.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z23.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "zip1 z1.h, z17.h, z16.h\n"
- "zip2 z22.h, z17.h, z16.h\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z17.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z16.h, z19.h, z18.h\n"
- "zip2 z19.h, z19.h, z18.h\n"
- "ld1h { z0.h }, p1/Z, [x28]\n"
- "ld1h { z31.h }, p0/Z, [x28, #1, MUL VL]\n"
- "zip1 z25.h, z24.h, z20.h\n"
- "zip1 z21.h, z23.h, z17.h\n"
- "ld1h { z30.h }, p1/Z, [x25]\n"
- "ld1h { z29.h }, p0/Z, [x25, #1, MUL VL]\n"
- "zip2 z28.h, z24.h, z20.h\n"
- "zip2 z24.h, z23.h, z17.h\n"
- "ld1h { z20.h }, p1/Z, [x24]\n"
- "ld1h { z27.h }, p0/Z, [x24, #1, MUL VL]\n"
- "mov x20, x22\n"
- "decd x27, ALL, MUL #8\n"
- "ld1h { z23.h }, p1/Z, [x23]\n"
- "ld1h { z26.h }, p0/Z, [x23, #1, MUL VL]\n"
- "zip1 z18.h, z1.h, z16.h\n"
- "zip2 z17.h, z1.h, z16.h\n"
- "zip1 z16.h, z22.h, z19.h\n"
- "zip2 z19.h, z22.h, z19.h\n"
- "st1h { z18.h }, p2, [x20]\n"
- "cmp x27, #0x0\n"
- "zip1 z22.h, z25.h, z21.h\n"
- "zip2 z21.h, z25.h, z21.h\n"
- "st1h { z17.h }, p2, [x20, #1, MUL VL]\n"
+ "mov x21, x11\n"
+ "mov x20, x9\n"
+ "decd x11, ALL, MUL #8\n"
+ "add x9, x9, %x[out_stride]\n"
+ "whilelt p1.h, XZR, x21\n"
+ "dech x21\n"
+ "whilelt p0.h, XZR, x21\n"
+ "cmp x11, #0x0\n"
+ "ld1h { z24.h }, p1/Z, [x12]\n"
+ "ld1h { z23.h }, p1/Z, [x28]\n"
+ "ld1h { z22.h }, p1/Z, [x27]\n"
+ "ld1h { z16.h }, p1/Z, [x26]\n"
+ "ld1h { z2.h }, p1/Z, [x25]\n"
+ "ld1h { z1.h }, p1/Z, [x24]\n"
+ "ld1h { z21.h }, p0/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z26.h }, p0/Z, [x28, #1, MUL VL]\n"
"addvl x12, x12, #2\n"
- "zip1 z25.h, z28.h, z24.h\n"
- "zip2 z18.h, z28.h, z24.h\n"
- "st1h { z16.h }, p2, [x20, #2, MUL VL]\n"
- "addvl x11, x11, #2\n"
- "zip1 z17.h, z0.h, z20.h\n"
- "zip1 z16.h, z30.h, z23.h\n"
- "st1h { z19.h }, p2, [x20, #3, MUL VL]\n"
- "addvl x10, x10, #2\n"
- "zip2 z20.h, z0.h, z20.h\n"
- "zip2 z19.h, z30.h, z23.h\n"
- "st1h { z22.h }, p2, [x20, #4, MUL VL]\n"
- "addvl x9, x9, #2\n"
- "zip1 z24.h, z31.h, z27.h\n"
- "zip1 z23.h, z29.h, z26.h\n"
- "st1h { z21.h }, p2, [x20, #5, MUL VL]\n"
"addvl x28, x28, #2\n"
- "zip2 z22.h, z31.h, z27.h\n"
- "zip2 z21.h, z29.h, z26.h\n"
- "st1h { z25.h }, p2, [x20, #6, MUL VL]\n"
+ "ld1h { z20.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z19.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "zip1 z18.h, z24.h, z22.h\n"
+ "zip1 z17.h, z23.h, z16.h\n"
+ "ld1h { z0.h }, p0/Z, [x25, #1, MUL VL]\n"
+ "ld1h { z31.h }, p0/Z, [x24, #1, MUL VL]\n"
+ "zip2 z25.h, z24.h, z22.h\n"
+ "zip2 z16.h, z23.h, z16.h\n"
+ "ld1h { z30.h }, p1/Z, [x23]\n"
+ "ld1h { z29.h }, p0/Z, [x23, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ "ld1h { z24.h }, p1/Z, [x22]\n"
+ "ld1h { z28.h }, p0/Z, [x22, #1, MUL VL]\n"
+ "zip1 z23.h, z21.h, z20.h\n"
+ "zip1 z22.h, z26.h, z19.h\n"
+ "zip2 z21.h, z21.h, z20.h\n"
+ "zip2 z20.h, z26.h, z19.h\n"
"addvl x25, x25, #2\n"
- "st1h { z18.h }, p2, [x20, #7, MUL VL]\n"
- "addvl x20, x20, #16\n"
"addvl x24, x24, #2\n"
- "zip1 z18.h, z17.h, z16.h\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
"addvl x23, x23, #2\n"
- "zip2 z17.h, z17.h, z16.h\n"
- "zip1 z16.h, z20.h, z19.h\n"
- "st1h { z18.h }, p2, [x20, #-8, MUL VL]\n"
+ "addvl x22, x22, #2\n"
+ "zip1 z17.h, z25.h, z16.h\n"
+ "zip2 z16.h, z25.h, z16.h\n"
+ "zip1 z27.h, z23.h, z22.h\n"
+ "zip2 z23.h, z23.h, z22.h\n"
+ "zip1 z26.h, z21.h, z20.h\n"
+ "zip2 z25.h, z21.h, z20.h\n"
+ "st1h { z19.h }, p2, [x20]\n"
+ "zip1 z22.h, z2.h, z30.h\n"
+ "zip1 z21.h, z1.h, z24.h\n"
+ "st1h { z18.h }, p2, [x20, #1, MUL VL]\n"
+ "zip2 z20.h, z2.h, z30.h\n"
+ "zip2 z19.h, z1.h, z24.h\n"
+ "st1h { z17.h }, p2, [x20, #2, MUL VL]\n"
+ "zip1 z18.h, z0.h, z29.h\n"
+ "zip1 z17.h, z31.h, z28.h\n"
+ "st1h { z16.h }, p2, [x20, #3, MUL VL]\n"
+ "zip2 z24.h, z0.h, z29.h\n"
+ "zip2 z16.h, z31.h, z28.h\n"
+ "st1h { z27.h }, p2, [x20, #4, MUL VL]\n"
+ "st1h { z23.h }, p2, [x20, #5, MUL VL]\n"
+ "zip1 z23.h, z22.h, z21.h\n"
+ "zip2 z22.h, z22.h, z21.h\n"
+ "st1h { z26.h }, p2, [x20, #6, MUL VL]\n"
+ "zip1 z21.h, z20.h, z19.h\n"
"zip2 z20.h, z20.h, z19.h\n"
- "zip1 z19.h, z24.h, z23.h\n"
- "st1h { z17.h }, p2, [x20, #-7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip2 z18.h, z24.h, z23.h\n"
- "zip1 z17.h, z22.h, z21.h\n"
- "st1h { z16.h }, p2, [x20, #-6, MUL VL]\n"
- "zip2 z16.h, z22.h, z21.h\n"
+ "st1h { z25.h }, p2, [x20, #7, MUL VL]\n"
+ "addvl x20, x20, #16\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z24.h, z16.h\n"
+ "zip2 z16.h, z24.h, z16.h\n"
+ "st1h { z23.h }, p2, [x20, #-8, MUL VL]\n"
+ "st1h { z22.h }, p2, [x20, #-7, MUL VL]\n"
+ "st1h { z21.h }, p2, [x20, #-6, MUL VL]\n"
"st1h { z20.h }, p2, [x20, #-5, MUL VL]\n"
"st1h { z19.h }, p2, [x20, #-4, MUL VL]\n"
"st1h { z18.h }, p2, [x20, #-3, MUL VL]\n"
@@ -297,141 +297,141 @@ void sve_transpose_interleave_8VL_2x4(uint16_t *out, const uint16_t *in, size_t
"6:" // Main loop skip
"7:" // Tail row loop: Head
"mov x12, %x[in]\n"
- "add x11, x12, %x[in_stride]\n"
- "add x10, x11, %x[in_stride]\n"
"mov x21, %x[width]\n"
"cnth x20, ALL, MUL #4\n"
- "add x9, x10, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x9, %x[in_stride]\n"
- "csel x9, x9, %x[pad_row], GT\n"
- "csel x10, x10, %x[pad_row], GE\n"
+ "mov x9, %x[out]\n"
+ "add x28, x12, %x[in_stride]\n"
+ "add x27, x28, %x[in_stride]\n"
+ "add x26, x27, %x[in_stride]\n"
+ "add %x[in], x26, %x[in_stride]\n"
+ "csel x26, x26, %x[pad_row], GT\n"
+ "csel x27, x27, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x11, x11, %x[pad_row], GT\n"
+ "csel x28, x28, %x[pad_row], GT\n"
"cmp x21, x20\n"
- "mov x22, %x[out]\n"
"sub %x[height], %x[height], #0x4\n"
"blt 9f\n"
"8:" // Tail row loop: Unroll column loop
- "ld1h { z17.h }, p2/Z, [x12]\n"
- "ld1h { z22.h }, p2/Z, [x12, #1, MUL VL]\n"
+ "ld1h { z18.h }, p2/Z, [x12]\n"
+ "ld1h { z30.h }, p2/Z, [x12, #1, MUL VL]\n"
"sub x21, x21, x20\n"
+ "ld1h { z24.h }, p2/Z, [x28]\n"
+ "ld1h { z29.h }, p2/Z, [x28, #1, MUL VL]\n"
"cmp x21, x20\n"
- "ld1h { z19.h }, p2/Z, [x11]\n"
- "ld1h { z21.h }, p2/Z, [x11, #1, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x10]\n"
- "ld1h { z18.h }, p2/Z, [x10, #1, MUL VL]\n"
- "zip1 z4.h, z17.h, z16.h\n"
- "zip2 z3.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9]\n"
- "ld1h { z16.h }, p2/Z, [x9, #1, MUL VL]\n"
- "zip1 z2.h, z19.h, z17.h\n"
- "zip2 z1.h, z19.h, z17.h\n"
- "ld1h { z17.h }, p2/Z, [x12, #2, MUL VL]\n"
- "ld1h { z24.h }, p2/Z, [x12, #3, MUL VL]\n"
- "zip1 z0.h, z22.h, z18.h\n"
- "zip1 z31.h, z21.h, z16.h\n"
- "ld1h { z20.h }, p2/Z, [x11, #2, MUL VL]\n"
- "ld1h { z19.h }, p2/Z, [x11, #3, MUL VL]\n"
- "zip2 z30.h, z22.h, z18.h\n"
- "zip2 z23.h, z21.h, z16.h\n"
- "ld1h { z16.h }, p2/Z, [x10, #2, MUL VL]\n"
- "ld1h { z18.h }, p2/Z, [x10, #3, MUL VL]\n"
- "zip1 z22.h, z17.h, z16.h\n"
- "zip2 z29.h, z17.h, z16.h\n"
- "ld1h { z17.h }, p2/Z, [x9, #2, MUL VL]\n"
- "ld1h { z16.h }, p2/Z, [x9, #3, MUL VL]\n"
- "zip1 z21.h, z20.h, z17.h\n"
- "zip2 z28.h, z20.h, z17.h\n"
- "zip1 z27.h, z24.h, z18.h\n"
- "zip1 z26.h, z19.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x27]\n"
+ "ld1h { z23.h }, p2/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x26]\n"
+ "ld1h { z22.h }, p2/Z, [x26, #1, MUL VL]\n"
+ "ld1h { z21.h }, p2/Z, [x12, #2, MUL VL]\n"
+ "ld1h { z4.h }, p2/Z, [x12, #3, MUL VL]\n"
"addvl x12, x12, #4\n"
- "addvl x11, x11, #4\n"
- "zip2 z25.h, z24.h, z18.h\n"
- "zip2 z24.h, z19.h, z16.h\n"
- "addvl x10, x10, #4\n"
- "addvl x9, x9, #4\n"
- "zip1 z16.h, z4.h, z2.h\n"
- "zip2 z17.h, z4.h, z2.h\n"
- "st1h { z16.h }, p2, [x22]\n"
- "zip1 z16.h, z3.h, z1.h\n"
- "zip2 z20.h, z3.h, z1.h\n"
- "st1h { z17.h }, p2, [x22, #1, MUL VL]\n"
- "zip1 z19.h, z0.h, z31.h\n"
- "zip2 z18.h, z0.h, z31.h\n"
- "st1h { z16.h }, p2, [x22, #2, MUL VL]\n"
- "zip1 z17.h, z30.h, z23.h\n"
- "zip2 z16.h, z30.h, z23.h\n"
- "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
+ "ld1h { z28.h }, p2/Z, [x28, #2, MUL VL]\n"
+ "ld1h { z27.h }, p2/Z, [x28, #3, MUL VL]\n"
+ "zip1 z3.h, z18.h, z17.h\n"
+ "zip2 z2.h, z18.h, z17.h\n"
+ "ld1h { z20.h }, p2/Z, [x27, #2, MUL VL]\n"
+ "ld1h { z19.h }, p2/Z, [x27, #3, MUL VL]\n"
+ "zip1 z18.h, z24.h, z16.h\n"
+ "zip2 z1.h, z24.h, z16.h\n"
+ "ld1h { z17.h }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1h { z16.h }, p2/Z, [x26, #3, MUL VL]\n"
+ "zip1 z26.h, z30.h, z23.h\n"
+ "zip1 z25.h, z29.h, z22.h\n"
+ "zip2 z24.h, z30.h, z23.h\n"
+ "zip2 z23.h, z29.h, z22.h\n"
+ "addvl x28, x28, #4\n"
+ "addvl x27, x27, #4\n"
+ "zip1 z22.h, z21.h, z20.h\n"
+ "zip2 z0.h, z21.h, z20.h\n"
+ "addvl x26, x26, #4\n"
+ "zip1 z21.h, z28.h, z17.h\n"
+ "zip2 z31.h, z28.h, z17.h\n"
+ "zip1 z30.h, z4.h, z19.h\n"
+ "zip1 z29.h, z27.h, z16.h\n"
+ "zip2 z28.h, z4.h, z19.h\n"
+ "zip2 z27.h, z27.h, z16.h\n"
+ "zip1 z20.h, z3.h, z18.h\n"
+ "zip2 z19.h, z3.h, z18.h\n"
+ "zip1 z18.h, z2.h, z1.h\n"
+ "zip2 z17.h, z2.h, z1.h\n"
+ "zip1 z16.h, z26.h, z25.h\n"
+ "zip2 z26.h, z26.h, z25.h\n"
+ "zip1 z25.h, z24.h, z23.h\n"
+ "zip2 z24.h, z24.h, z23.h\n"
+ "st1h { z20.h }, p2, [x9]\n"
+ "st1h { z19.h }, p2, [x9, #1, MUL VL]\n"
"zip1 z23.h, z22.h, z21.h\n"
"zip2 z22.h, z22.h, z21.h\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "zip1 z21.h, z29.h, z28.h\n"
- "zip2 z20.h, z29.h, z28.h\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "zip1 z19.h, z27.h, z26.h\n"
- "zip2 z18.h, z27.h, z26.h\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
- "zip1 z17.h, z25.h, z24.h\n"
- "zip2 z16.h, z25.h, z24.h\n"
- "st1h { z23.h }, p2, [x22]\n"
- "st1h { z22.h }, p2, [x22, #1, MUL VL]\n"
- "st1h { z21.h }, p2, [x22, #2, MUL VL]\n"
- "st1h { z20.h }, p2, [x22, #3, MUL VL]\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "st1h { z18.h }, p2, [x9, #2, MUL VL]\n"
+ "zip1 z21.h, z0.h, z31.h\n"
+ "zip2 z20.h, z0.h, z31.h\n"
+ "st1h { z17.h }, p2, [x9, #3, MUL VL]\n"
+ "zip1 z19.h, z30.h, z29.h\n"
+ "zip2 z18.h, z30.h, z29.h\n"
+ "st1h { z16.h }, p2, [x9, #4, MUL VL]\n"
+ "zip1 z17.h, z28.h, z27.h\n"
+ "zip2 z16.h, z28.h, z27.h\n"
+ "st1h { z26.h }, p2, [x9, #5, MUL VL]\n"
+ "st1h { z25.h }, p2, [x9, #6, MUL VL]\n"
+ "st1h { z24.h }, p2, [x9, #7, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
+ "st1h { z23.h }, p2, [x9]\n"
+ "st1h { z22.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z21.h }, p2, [x9, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x9, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x9, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x9, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x9, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #7, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bge 8b\n"
"9:" // Tail row loop: Unroll column loop skip
"cbz x21, 11f\n"
"10:" // Tail row loop: Column loop
"mov x20, x21\n"
+ "decd x21, ALL, MUL #8\n"
"whilelt p1.h, XZR, x20\n"
- "ld1h { z23.h }, p1/Z, [x12]\n"
- "ld1h { z22.h }, p1/Z, [x11]\n"
"dech x20\n"
"whilelt p0.h, XZR, x20\n"
- "ld1h { z21.h }, p0/Z, [x12, #1, MUL VL]\n"
- "ld1h { z25.h }, p0/Z, [x11, #1, MUL VL]\n"
- "ld1h { z19.h }, p1/Z, [x10]\n"
- "ld1h { z20.h }, p0/Z, [x10, #1, MUL VL]\n"
- "decd x21, ALL, MUL #8\n"
- "zip1 z24.h, z23.h, z19.h\n"
- "ld1h { z18.h }, p1/Z, [x9]\n"
- "ld1h { z16.h }, p0/Z, [x9, #1, MUL VL]\n"
- "zip1 z17.h, z22.h, z18.h\n"
- "zip2 z23.h, z23.h, z19.h\n"
- "zip2 z19.h, z22.h, z18.h\n"
- "zip1 z22.h, z21.h, z20.h\n"
"cmp x21, #0x0\n"
+ "ld1h { z20.h }, p1/Z, [x12]\n"
+ "ld1h { z19.h }, p1/Z, [x28]\n"
+ "ld1h { z18.h }, p1/Z, [x27]\n"
+ "ld1h { z17.h }, p1/Z, [x26]\n"
+ "ld1h { z24.h }, p0/Z, [x12, #1, MUL VL]\n"
"addvl x12, x12, #2\n"
- "zip1 z18.h, z25.h, z16.h\n"
- "zip2 z21.h, z21.h, z20.h\n"
- "addvl x11, x11, #2\n"
- "addvl x10, x10, #2\n"
- "zip2 z20.h, z25.h, z16.h\n"
- "addvl x9, x9, #2\n"
- "zip1 z16.h, z24.h, z17.h\n"
- "st1h { z16.h }, p2, [x22]\n"
- "zip2 z16.h, z24.h, z17.h\n"
- "zip1 z17.h, z23.h, z19.h\n"
- "st1h { z16.h }, p2, [x22, #1, MUL VL]\n"
- "zip2 z16.h, z23.h, z19.h\n"
- "zip1 z19.h, z22.h, z18.h\n"
- "st1h { z17.h }, p2, [x22, #2, MUL VL]\n"
- "zip2 z18.h, z22.h, z18.h\n"
- "zip1 z17.h, z21.h, z20.h\n"
- "st1h { z16.h }, p2, [x22, #3, MUL VL]\n"
- "zip2 z16.h, z21.h, z20.h\n"
- "st1h { z19.h }, p2, [x22, #4, MUL VL]\n"
- "st1h { z18.h }, p2, [x22, #5, MUL VL]\n"
- "st1h { z17.h }, p2, [x22, #6, MUL VL]\n"
- "st1h { z16.h }, p2, [x22, #7, MUL VL]\n"
- "add x22, x22, %x[out_stride]\n"
+ "ld1h { z25.h }, p0/Z, [x28, #1, MUL VL]\n"
+ "addvl x28, x28, #2\n"
+ "ld1h { z23.h }, p0/Z, [x27, #1, MUL VL]\n"
+ "ld1h { z16.h }, p0/Z, [x26, #1, MUL VL]\n"
+ "addvl x27, x27, #2\n"
+ "addvl x26, x26, #2\n"
+ "zip1 z22.h, z20.h, z18.h\n"
+ "zip1 z21.h, z19.h, z17.h\n"
+ "zip2 z20.h, z20.h, z18.h\n"
+ "zip2 z19.h, z19.h, z17.h\n"
+ "zip1 z18.h, z24.h, z23.h\n"
+ "zip1 z17.h, z25.h, z16.h\n"
+ "zip2 z24.h, z24.h, z23.h\n"
+ "zip2 z16.h, z25.h, z16.h\n"
+ "zip1 z23.h, z22.h, z21.h\n"
+ "zip2 z22.h, z22.h, z21.h\n"
+ "zip1 z21.h, z20.h, z19.h\n"
+ "zip2 z20.h, z20.h, z19.h\n"
+ "zip1 z19.h, z18.h, z17.h\n"
+ "zip2 z18.h, z18.h, z17.h\n"
+ "zip1 z17.h, z24.h, z16.h\n"
+ "zip2 z16.h, z24.h, z16.h\n"
+ "st1h { z23.h }, p2, [x9]\n"
+ "st1h { z22.h }, p2, [x9, #1, MUL VL]\n"
+ "st1h { z21.h }, p2, [x9, #2, MUL VL]\n"
+ "st1h { z20.h }, p2, [x9, #3, MUL VL]\n"
+ "st1h { z19.h }, p2, [x9, #4, MUL VL]\n"
+ "st1h { z18.h }, p2, [x9, #5, MUL VL]\n"
+ "st1h { z17.h }, p2, [x9, #6, MUL VL]\n"
+ "st1h { z16.h }, p2, [x9, #7, MUL VL]\n"
+ "add x9, x9, %x[out_stride]\n"
"bgt 10b\n"
"11:" // Tail row loop: Column loop skip
"cmp %x[height], #0x1\n"
diff --git a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp
index 5a48e579ae..7160077342 100644
--- a/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp
+++ b/src/core/NEON/kernels/arm_gemm/transforms/sve_transpose_interleave_8VL_2x4_fp32bf16.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -39,220 +39,220 @@ void sve_transpose_interleave_8VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
size_t out_stride = 8 * roundup<size_t>(height, 4) * get_vector_length<uint32_t>();
__asm__ __volatile__(
- "ptrue p4.b\n"
+ "ptrue p2.b\n"
"1:" // Main row loop: Head
"mov x26, %x[in]\n"
- "add x25, x26, %x[in_stride]\n"
- "add x24, x25, %x[in_stride]\n"
- "mov x23, %x[width]\n"
+ "mov x25, %x[width]\n"
"cnth x20, ALL, MUL #4\n"
- "add x22, x24, %x[in_stride]\n"
"cmp %x[height], #0x3\n"
- "add %x[in], x22, %x[in_stride]\n"
- "csel x22, x22, %x[pad_row], GT\n"
- "csel x24, x24, %x[pad_row], GE\n"
+ "mov x24, %x[out]\n"
+ "add x23, x26, %x[in_stride]\n"
+ "add x22, x23, %x[in_stride]\n"
+ "add x21, x22, %x[in_stride]\n"
+ "add %x[in], x21, %x[in_stride]\n"
+ "csel x21, x21, %x[pad_row], GT\n"
+ "csel x22, x22, %x[pad_row], GE\n"
"cmp %x[height], #0x1\n"
- "csel x25, x25, %x[pad_row], GT\n"
- "cmp x23, x20\n"
- "mov x21, %x[out]\n"
+ "csel x23, x23, %x[pad_row], GT\n"
+ "cmp x25, x20\n"
"sub %x[height], %x[height], #0x4\n"
"blt 3f\n"
"2:" // Main row loop: Unroll column loop
- "ld1w { z19.s }, p4/Z, [x26]\n"
- "ld1w { z18.s }, p4/Z, [x26, #1, MUL VL]\n"
- "sub x23, x23, x20\n"
- "cmp x23, x20\n"
- "ld1w { z20.s }, p4/Z, [x26, #2, MUL VL]\n"
- "ld1w { z24.s }, p4/Z, [x26, #3, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x24]\n"
- "ld1w { z17.s }, p4/Z, [x24, #1, MUL VL]\n"
- "zip1 z22.s, z19.s, z23.s\n"
- "zip2 z21.s, z19.s, z23.s\n"
- "ld1w { z31.s }, p4/Z, [x24, #2, MUL VL]\n"
- "ld1w { z16.s }, p4/Z, [x24, #3, MUL VL]\n"
- "zip1 z9.s, z18.s, z17.s\n"
- "zip2 z7.s, z18.s, z17.s\n"
- "ld1w { z19.s }, p4/Z, [x26, #4, MUL VL]\n"
- "ld1w { z18.s }, p4/Z, [x26, #5, MUL VL]\n"
- "zip1 z6.s, z20.s, z31.s\n"
- "zip2 z5.s, z20.s, z31.s\n"
- "ld1w { z15.s }, p4/Z, [x26, #6, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x26, #7, MUL VL]\n"
- "zip1 z3.s, z24.s, z16.s\n"
- "zip2 z2.s, z24.s, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x24, #4, MUL VL]\n"
- "ld1w { z17.s }, p4/Z, [x24, #5, MUL VL]\n"
- "zip1 z1.s, z19.s, z16.s\n"
- "zip2 z0.s, z19.s, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x24, #6, MUL VL]\n"
- "ld1w { z19.s }, p4/Z, [x24, #7, MUL VL]\n"
- "zip1 z31.s, z18.s, z17.s\n"
- "zip2 z30.s, z18.s, z17.s\n"
- "ld1w { z18.s }, p4/Z, [x25]\n"
- "ld1w { z17.s }, p4/Z, [x25, #1, MUL VL]\n"
- "zip1 z29.s, z15.s, z16.s\n"
- "zip2 z28.s, z15.s, z16.s\n"
- "ld1w { z16.s }, p4/Z, [x25, #2, MUL VL]\n"
- "ld1w { z23.s }, p4/Z, [x25, #3, MUL VL]\n"
- "zip1 z27.s, z20.s, z19.s\n"
- "zip2 z26.s, z20.s, z19.s\n"
- "ld1w { z11.s }, p4/Z, [x22]\n"
- "ld1w { z8.s }, p4/Z, [x22, #1, MUL VL]\n"
- ".inst 0x658ab2d8 // bfcvt z24.h, p4/M, z22.s\n"
- "zip1 z25.s, z18.s, z11.s\n"
- "ld1w { z4.s }, p4/Z, [x22, #2, MUL VL]\n"
- "ld1w { z22.s }, p4/Z, [x22, #3, MUL VL]\n"
- ".inst 0x658ab2af // bfcvt z15.h, p4/M, z21.s\n"
- "zip2 z14.s, z18.s, z11.s\n"
- "ld1w { z21.s }, p4/Z, [x25, #4, MUL VL]\n"
- "ld1w { z20.s }, p4/Z, [x25, #5, MUL VL]\n"
- ".inst 0x658ab12d // bfcvt z13.h, p4/M, z9.s\n"
- "zip1 z12.s, z17.s, z8.s\n"
- "ld1w { z11.s }, p4/Z, [x25, #6, MUL VL]\n"
- "ld1w { z10.s }, p4/Z, [x25, #7, MUL VL]\n"
- ".inst 0x658ab0e9 // bfcvt z9.h, p4/M, z7.s\n"
- "zip2 z8.s, z17.s, z8.s\n"
- "ld1w { z19.s }, p4/Z, [x22, #4, MUL VL]\n"
- "ld1w { z18.s }, p4/Z, [x22, #5, MUL VL]\n"
- ".inst 0x658ab0c7 // bfcvt z7.h, p4/M, z6.s\n"
- "zip1 z6.s, z16.s, z4.s\n"
- "ld1w { z17.s }, p4/Z, [x22, #6, MUL VL]\n"
- ".inst 0x658ab0a5 // bfcvt z5.h, p4/M, z5.s\n"
- "zip2 z4.s, z16.s, z4.s\n"
- "ld1w { z16.s }, p4/Z, [x22, #7, MUL VL]\n"
- ".inst 0x658ab063 // bfcvt z3.h, p4/M, z3.s\n"
- ".inst 0x658ab042 // bfcvt z2.h, p4/M, z2.s\n"
+ "ld1w { z25.s }, p2/Z, [x26]\n"
+ "ld1w { z23.s }, p2/Z, [x26, #1, MUL VL]\n"
+ "sub x25, x25, x20\n"
+ "ld1w { z5.s }, p2/Z, [x26, #2, MUL VL]\n"
+ "ld1w { z31.s }, p2/Z, [x26, #3, MUL VL]\n"
+ "cmp x25, x20\n"
+ "ld1w { z16.s }, p2/Z, [x22]\n"
+ "ld1w { z18.s }, p2/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z21.s }, p2/Z, [x22, #2, MUL VL]\n"
+ "ld1w { z14.s }, p2/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z3.s }, p2/Z, [x26, #4, MUL VL]\n"
+ "ld1w { z30.s }, p2/Z, [x26, #5, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x26, #6, MUL VL]\n"
+ "ld1w { z22.s }, p2/Z, [x26, #7, MUL VL]\n"
+ "zip1 z19.s, z25.s, z16.s\n"
+ "zip2 z17.s, z25.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x22, #4, MUL VL]\n"
+ "ld1w { z13.s }, p2/Z, [x22, #5, MUL VL]\n"
+ "zip1 z11.s, z23.s, z18.s\n"
+ "zip2 z10.s, z23.s, z18.s\n"
+ "ld1w { z18.s }, p2/Z, [x22, #6, MUL VL]\n"
+ "ld1w { z15.s }, p2/Z, [x22, #7, MUL VL]\n"
+ "zip1 z9.s, z5.s, z21.s\n"
+ "zip2 z8.s, z5.s, z21.s\n"
+ "ld1w { z12.s }, p2/Z, [x23]\n"
+ "ld1w { z27.s }, p2/Z, [x23, #1, MUL VL]\n"
+ "zip1 z7.s, z31.s, z14.s\n"
+ "zip2 z6.s, z31.s, z14.s\n"
+ "ld1w { z25.s }, p2/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z23.s }, p2/Z, [x23, #3, MUL VL]\n"
+ "zip1 z5.s, z3.s, z16.s\n"
+ "zip2 z4.s, z3.s, z16.s\n"
+ "ld1w { z16.s }, p2/Z, [x21]\n"
+ "ld1w { z28.s }, p2/Z, [x21, #1, MUL VL]\n"
+ "zip1 z3.s, z30.s, z13.s\n"
+ "zip2 z2.s, z30.s, z13.s\n"
+ "ld1w { z26.s }, p2/Z, [x21, #2, MUL VL]\n"
+ "ld1w { z24.s }, p2/Z, [x21, #3, MUL VL]\n"
+ "zip1 z1.s, z20.s, z18.s\n"
+ "zip2 z0.s, z20.s, z18.s\n"
+ "ld1w { z21.s }, p2/Z, [x23, #4, MUL VL]\n"
+ "ld1w { z20.s }, p2/Z, [x23, #5, MUL VL]\n"
+ "zip1 z31.s, z22.s, z15.s\n"
+ "zip2 z30.s, z22.s, z15.s\n"
+ "ld1w { z14.s }, p2/Z, [x23, #6, MUL VL]\n"
+ "ld1w { z15.s }, p2/Z, [x23, #7, MUL VL]\n"
+ ".inst 0x658aaa76 // bfcvt z22.h, p2/M, z19.s\n"
+ "zip1 z29.s, z12.s, z16.s\n"
+ "ld1w { z19.s }, p2/Z, [x21, #4, MUL VL]\n"
+ "ld1w { z18.s }, p2/Z, [x21, #5, MUL VL]\n"
+ ".inst 0x658aaa2d // bfcvt z13.h, p2/M, z17.s\n"
+ "zip2 z12.s, z12.s, z16.s\n"
+ "ld1w { z17.s }, p2/Z, [x21, #6, MUL VL]\n"
+ "ld1w { z16.s }, p2/Z, [x21, #7, MUL VL]\n"
+ ".inst 0x658aa96b // bfcvt z11.h, p2/M, z11.s\n"
+ ".inst 0x658aa94a // bfcvt z10.h, p2/M, z10.s\n"
+ ".inst 0x658aa929 // bfcvt z9.h, p2/M, z9.s\n"
+ ".inst 0x658aa908 // bfcvt z8.h, p2/M, z8.s\n"
"addvl x26, x26, #8\n"
- "addvl x25, x25, #8\n"
- ".inst 0x658ab021 // bfcvt z1.h, p4/M, z1.s\n"
- ".inst 0x658ab000 // bfcvt z0.h, p4/M, z0.s\n"
- "addvl x24, x24, #8\n"
+ "addvl x23, x23, #8\n"
+ ".inst 0x658aa8e7 // bfcvt z7.h, p2/M, z7.s\n"
+ ".inst 0x658aa8c6 // bfcvt z6.h, p2/M, z6.s\n"
"addvl x22, x22, #8\n"
- ".inst 0x658ab3ff // bfcvt z31.h, p4/M, z31.s\n"
- ".inst 0x658ab3de // bfcvt z30.h, p4/M, z30.s\n"
- ".inst 0x658ab3bd // bfcvt z29.h, p4/M, z29.s\n"
- ".inst 0x658ab39c // bfcvt z28.h, p4/M, z28.s\n"
- ".inst 0x658ab37b // bfcvt z27.h, p4/M, z27.s\n"
- ".inst 0x658ab35a // bfcvt z26.h, p4/M, z26.s\n"
- ".inst 0x648ab338 // bfcvtnt z24.h, p4/M, z25.s\n"
- "zip1 z25.s, z23.s, z22.s\n"
- "st1h { z24.h }, p4, [x21]\n"
- "zip2 z24.s, z23.s, z22.s\n"
+ "addvl x21, x21, #8\n"
+ ".inst 0x658aa8a5 // bfcvt z5.h, p2/M, z5.s\n"
+ ".inst 0x658aa884 // bfcvt z4.h, p2/M, z4.s\n"
+ ".inst 0x658aa863 // bfcvt z3.h, p2/M, z3.s\n"
+ ".inst 0x658aa842 // bfcvt z2.h, p2/M, z2.s\n"
+ ".inst 0x658aa821 // bfcvt z1.h, p2/M, z1.s\n"
+ ".inst 0x658aa800 // bfcvt z0.h, p2/M, z0.s\n"
+ ".inst 0x658aabff // bfcvt z31.h, p2/M, z31.s\n"
+ ".inst 0x658aabde // bfcvt z30.h, p2/M, z30.s\n"
+ ".inst 0x648aabb6 // bfcvtnt z22.h, p2/M, z29.s\n"
+ "zip1 z29.s, z27.s, z28.s\n"
+ "zip2 z28.s, z27.s, z28.s\n"
+ "zip1 z27.s, z25.s, z26.s\n"
+ "zip2 z26.s, z25.s, z26.s\n"
+ "zip1 z25.s, z23.s, z24.s\n"
+ "zip2 z24.s, z23.s, z24.s\n"
"zip1 z23.s, z21.s, z19.s\n"
+ "st1h { z22.h }, p2, [x24]\n"
"zip2 z22.s, z21.s, z19.s\n"
"zip1 z21.s, z20.s, z18.s\n"
"zip2 z20.s, z20.s, z18.s\n"
- "zip1 z19.s, z11.s, z17.s\n"
- "zip2 z18.s, z11.s, z17.s\n"
- "zip1 z17.s, z10.s, z16.s\n"
- "zip2 z16.s, z10.s, z16.s\n"
- ".inst 0x648ab1cf // bfcvtnt z15.h, p4/M, z14.s\n"
- "st1h { z15.h }, p4, [x21, #1, MUL VL]\n"
- ".inst 0x648ab18d // bfcvtnt z13.h, p4/M, z12.s\n"
- ".inst 0x648ab109 // bfcvtnt z9.h, p4/M, z8.s\n"
- "st1h { z13.h }, p4, [x21, #2, MUL VL]\n"
- ".inst 0x648ab0c7 // bfcvtnt z7.h, p4/M, z6.s\n"
- ".inst 0x648ab085 // bfcvtnt z5.h, p4/M, z4.s\n"
- "st1h { z9.h }, p4, [x21, #3, MUL VL]\n"
- ".inst 0x648ab323 // bfcvtnt z3.h, p4/M, z25.s\n"
- ".inst 0x648ab302 // bfcvtnt z2.h, p4/M, z24.s\n"
- "st1h { z7.h }, p4, [x21, #4, MUL VL]\n"
- "st1h { z5.h }, p4, [x21, #5, MUL VL]\n"
- ".inst 0x648ab2e1 // bfcvtnt z1.h, p4/M, z23.s\n"
- ".inst 0x648ab2c0 // bfcvtnt z0.h, p4/M, z22.s\n"
- "st1h { z3.h }, p4, [x21, #6, MUL VL]\n"
- ".inst 0x648ab2bf // bfcvtnt z31.h, p4/M, z21.s\n"
- ".inst 0x648ab29e // bfcvtnt z30.h, p4/M, z20.s\n"
- "st1h { z2.h }, p4, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
- ".inst 0x648ab27d // bfcvtnt z29.h, p4/M, z19.s\n"
- ".inst 0x648ab25c // bfcvtnt z28.h, p4/M, z18.s\n"
- ".inst 0x648ab23b // bfcvtnt z27.h, p4/M, z17.s\n"
- ".inst 0x648ab21a // bfcvtnt z26.h, p4/M, z16.s\n"
- "st1h { z1.h }, p4, [x21]\n"
- "st1h { z0.h }, p4, [x21, #1, MUL VL]\n"
- "st1h { z31.h }, p4, [x21, #2, MUL VL]\n"
- "st1h { z30.h }, p4, [x21, #3, MUL VL]\n"
- "st1h { z29.h }, p4, [x21, #4, MUL VL]\n"
- "st1h { z28.h }, p4, [x21, #5, MUL VL]\n"
- "st1h { z27.h }, p4, [x21, #6, MUL VL]\n"
- "st1h { z26.h }, p4, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 z19.s, z14.s, z17.s\n"
+ "zip2 z18.s, z14.s, z17.s\n"
+ "zip1 z17.s, z15.s, z16.s\n"
+ "zip2 z16.s, z15.s, z16.s\n"
+ ".inst 0x648aa98d // bfcvtnt z13.h, p2/M, z12.s\n"
+ ".inst 0x648aabab // bfcvtnt z11.h, p2/M, z29.s\n"
+ ".inst 0x648aab8a // bfcvtnt z10.h, p2/M, z28.s\n"
+ ".inst 0x648aab69 // bfcvtnt z9.h, p2/M, z27.s\n"
+ ".inst 0x648aab48 // bfcvtnt z8.h, p2/M, z26.s\n"
+ ".inst 0x648aab27 // bfcvtnt z7.h, p2/M, z25.s\n"
+ ".inst 0x648aab06 // bfcvtnt z6.h, p2/M, z24.s\n"
+ "st1h { z13.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z11.h }, p2, [x24, #2, MUL VL]\n"
+ ".inst 0x648aaae5 // bfcvtnt z5.h, p2/M, z23.s\n"
+ ".inst 0x648aaac4 // bfcvtnt z4.h, p2/M, z22.s\n"
+ "st1h { z10.h }, p2, [x24, #3, MUL VL]\n"
+ ".inst 0x648aaaa3 // bfcvtnt z3.h, p2/M, z21.s\n"
+ ".inst 0x648aaa82 // bfcvtnt z2.h, p2/M, z20.s\n"
+ "st1h { z9.h }, p2, [x24, #4, MUL VL]\n"
+ ".inst 0x648aaa61 // bfcvtnt z1.h, p2/M, z19.s\n"
+ ".inst 0x648aaa40 // bfcvtnt z0.h, p2/M, z18.s\n"
+ "st1h { z8.h }, p2, [x24, #5, MUL VL]\n"
+ ".inst 0x648aaa3f // bfcvtnt z31.h, p2/M, z17.s\n"
+ ".inst 0x648aaa1e // bfcvtnt z30.h, p2/M, z16.s\n"
+ "st1h { z7.h }, p2, [x24, #6, MUL VL]\n"
+ "st1h { z6.h }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
+ "st1h { z5.h }, p2, [x24]\n"
+ "st1h { z4.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z3.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z2.h }, p2, [x24, #3, MUL VL]\n"
+ "st1h { z1.h }, p2, [x24, #4, MUL VL]\n"
+ "st1h { z0.h }, p2, [x24, #5, MUL VL]\n"
+ "st1h { z31.h }, p2, [x24, #6, MUL VL]\n"
+ "st1h { z30.h }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bge 2b\n"
"3:" // Main row loop: Unroll column loop skip
- "cbz x23, 5f\n"
+ "cbz x25, 5f\n"
"4:" // Main row loop: Column loop
- "mov x20, x23\n"
- "whilelt p3.s, XZR, x20\n"
- "ld1w { z22.s }, p3/Z, [x26]\n"
- "ld1w { z21.s }, p3/Z, [x24]\n"
+ "mov x20, x25\n"
+ "decd x25, ALL, MUL #8\n"
+ "whilelt p1.s, XZR, x20\n"
"decw x20\n"
- "whilelt p2.s, XZR, x20\n"
- "ld1w { z20.s }, p2/Z, [x26, #1, MUL VL]\n"
- "ld1w { z19.s }, p2/Z, [x24, #1, MUL VL]\n"
+ "whilelt p0.s, XZR, x20\n"
"decw x20\n"
+ "ld1w { z18.s }, p1/Z, [x26]\n"
+ "ld1w { z16.s }, p1/Z, [x22]\n"
+ "ld1w { z26.s }, p1/Z, [x23]\n"
+ "ld1w { z25.s }, p1/Z, [x21]\n"
"whilelt p1.s, XZR, x20\n"
- "ld1w { z18.s }, p1/Z, [x26, #2, MUL VL]\n"
- "ld1w { z17.s }, p1/Z, [x24, #2, MUL VL]\n"
"decw x20\n"
+ "ld1w { z20.s }, p0/Z, [x26, #1, MUL VL]\n"
+ "ld1w { z17.s }, p0/Z, [x22, #1, MUL VL]\n"
+ "ld1w { z24.s }, p0/Z, [x23, #1, MUL VL]\n"
+ "ld1w { z23.s }, p0/Z, [x21, #1, MUL VL]\n"
+ "zip1 z19.s, z18.s, z16.s\n"
+ "zip2 z16.s, z18.s, z16.s\n"
"whilelt p0.s, XZR, x20\n"
- "ld1w { z28.s }, p0/Z, [x26, #3, MUL VL]\n"
- "ld1w { z16.s }, p0/Z, [x24, #3, MUL VL]\n"
- "ld1w { z27.s }, p3/Z, [x25]\n"
- "ld1w { z3.s }, p2/Z, [x25, #1, MUL VL]\n"
- "zip1 z26.s, z22.s, z21.s\n"
- "zip2 z25.s, z22.s, z21.s\n"
- "ld1w { z2.s }, p1/Z, [x25, #2, MUL VL]\n"
- "ld1w { z1.s }, p0/Z, [x25, #3, MUL VL]\n"
- "zip1 z24.s, z20.s, z19.s\n"
- "zip2 z23.s, z20.s, z19.s\n"
- "ld1w { z22.s }, p3/Z, [x22]\n"
- "ld1w { z21.s }, p2/Z, [x22, #1, MUL VL]\n"
- "zip1 z20.s, z18.s, z17.s\n"
- "zip2 z19.s, z18.s, z17.s\n"
+ "ld1w { z22.s }, p1/Z, [x26, #2, MUL VL]\n"
"ld1w { z18.s }, p1/Z, [x22, #2, MUL VL]\n"
- "ld1w { z0.s }, p0/Z, [x22, #3, MUL VL]\n"
- "zip1 z17.s, z28.s, z16.s\n"
- "zip2 z16.s, z28.s, z16.s\n"
- "decd x23, ALL, MUL #8\n"
- ".inst 0x658ab35f // bfcvt z31.h, p4/M, z26.s\n"
- "zip1 z30.s, z27.s, z22.s\n"
- "cmp x23, #0x0\n"
- ".inst 0x658ab33d // bfcvt z29.h, p4/M, z25.s\n"
- "zip2 z28.s, z27.s, z22.s\n"
+ "zip1 z3.s, z26.s, z25.s\n"
+ "ld1w { z2.s }, p1/Z, [x23, #2, MUL VL]\n"
+ "ld1w { z1.s }, p1/Z, [x21, #2, MUL VL]\n"
+ "zip1 z21.s, z20.s, z17.s\n"
+ "zip2 z17.s, z20.s, z17.s\n"
+ ".inst 0x658aaa60 // bfcvt z0.h, p2/M, z19.s\n"
+ ".inst 0x658aaa1f // bfcvt z31.h, p2/M, z16.s\n"
+ "cmp x25, #0x0\n"
+ "ld1w { z20.s }, p0/Z, [x26, #3, MUL VL]\n"
+ "ld1w { z16.s }, p0/Z, [x22, #3, MUL VL]\n"
+ "ld1w { z30.s }, p0/Z, [x23, #3, MUL VL]\n"
+ "zip1 z19.s, z22.s, z18.s\n"
+ "zip2 z18.s, z22.s, z18.s\n"
+ "ld1w { z29.s }, p0/Z, [x21, #3, MUL VL]\n"
+ "zip2 z28.s, z26.s, z25.s\n"
+ ".inst 0x658aaabb // bfcvt z27.h, p2/M, z21.s\n"
"addvl x26, x26, #4\n"
- "addvl x25, x25, #4\n"
- ".inst 0x658ab31b // bfcvt z27.h, p4/M, z24.s\n"
- "zip1 z26.s, z3.s, z21.s\n"
- "addvl x24, x24, #4\n"
+ "zip1 z26.s, z24.s, z23.s\n"
+ ".inst 0x658aaa39 // bfcvt z25.h, p2/M, z17.s\n"
+ "addvl x23, x23, #4\n"
"addvl x22, x22, #4\n"
- ".inst 0x658ab2f9 // bfcvt z25.h, p4/M, z23.s\n"
- "zip2 z24.s, z3.s, z21.s\n"
- ".inst 0x658ab297 // bfcvt z23.h, p4/M, z20.s\n"
- "zip1 z22.s, z2.s, z18.s\n"
- ".inst 0x658ab275 // bfcvt z21.h, p4/M, z19.s\n"
- "zip2 z20.s, z2.s, z18.s\n"
- ".inst 0x658ab233 // bfcvt z19.h, p4/M, z17.s\n"
- "zip1 z18.s, z1.s, z0.s\n"
- ".inst 0x658ab211 // bfcvt z17.h, p4/M, z16.s\n"
- "zip2 z16.s, z1.s, z0.s\n"
- ".inst 0x648ab3df // bfcvtnt z31.h, p4/M, z30.s\n"
- ".inst 0x648ab39d // bfcvtnt z29.h, p4/M, z28.s\n"
- "st1h { z31.h }, p4, [x21]\n"
- ".inst 0x648ab35b // bfcvtnt z27.h, p4/M, z26.s\n"
- ".inst 0x648ab319 // bfcvtnt z25.h, p4/M, z24.s\n"
- "st1h { z29.h }, p4, [x21, #1, MUL VL]\n"
- ".inst 0x648ab2d7 // bfcvtnt z23.h, p4/M, z22.s\n"
- ".inst 0x648ab295 // bfcvtnt z21.h, p4/M, z20.s\n"
- "st1h { z27.h }, p4, [x21, #2, MUL VL]\n"
- ".inst 0x648ab253 // bfcvtnt z19.h, p4/M, z18.s\n"
- ".inst 0x648ab211 // bfcvtnt z17.h, p4/M, z16.s\n"
- "st1h { z25.h }, p4, [x21, #3, MUL VL]\n"
- "st1h { z23.h }, p4, [x21, #4, MUL VL]\n"
- "st1h { z21.h }, p4, [x21, #5, MUL VL]\n"
- "st1h { z19.h }, p4, [x21, #6, MUL VL]\n"
- "st1h { z17.h }, p4, [x21, #7, MUL VL]\n"
- "add x21, x21, %x[out_stride]\n"
+ "zip1 z17.s, z20.s, z16.s\n"
+ "zip2 z16.s, z20.s, z16.s\n"
+ "addvl x21, x21, #4\n"
+ "zip2 z24.s, z24.s, z23.s\n"
+ ".inst 0x658aaa77 // bfcvt z23.h, p2/M, z19.s\n"
+ "zip1 z22.s, z2.s, z1.s\n"
+ ".inst 0x658aaa55 // bfcvt z21.h, p2/M, z18.s\n"
+ "zip2 z20.s, z2.s, z1.s\n"
+ ".inst 0x658aaa33 // bfcvt z19.h, p2/M, z17.s\n"
+ "zip1 z18.s, z30.s, z29.s\n"
+ ".inst 0x658aaa11 // bfcvt z17.h, p2/M, z16.s\n"
+ "zip2 z16.s, z30.s, z29.s\n"
+ ".inst 0x648aa860 // bfcvtnt z0.h, p2/M, z3.s\n"
+ ".inst 0x648aab9f // bfcvtnt z31.h, p2/M, z28.s\n"
+ ".inst 0x648aab5b // bfcvtnt z27.h, p2/M, z26.s\n"
+ ".inst 0x648aab19 // bfcvtnt z25.h, p2/M, z24.s\n"
+ ".inst 0x648aaad7 // bfcvtnt z23.h, p2/M, z22.s\n"
+ ".inst 0x648aaa95 // bfcvtnt z21.h, p2/M, z20.s\n"
+ ".inst 0x648aaa53 // bfcvtnt z19.h, p2/M, z18.s\n"
+ "st1h { z0.h }, p2, [x24]\n"
+ ".inst 0x648aaa11 // bfcvtnt z17.h, p2/M, z16.s\n"
+ "st1h { z31.h }, p2, [x24, #1, MUL VL]\n"
+ "st1h { z27.h }, p2, [x24, #2, MUL VL]\n"
+ "st1h { z25.h }, p2, [x24, #3, MUL VL]\n"
+ "st1h { z23.h }, p2, [x24, #4, MUL VL]\n"
+ "st1h { z21.h }, p2, [x24, #5, MUL VL]\n"
+ "st1h { z19.h }, p2, [x24, #6, MUL VL]\n"
+ "st1h { z17.h }, p2, [x24, #7, MUL VL]\n"
+ "add x24, x24, %x[out_stride]\n"
"bgt 4b\n"
"5:" // Main row loop: Column loop skip
"cmp %x[height], #0x1\n"
@@ -260,7 +260,7 @@ void sve_transpose_interleave_8VL_2x4_fp32bf16(bfloat16 *out, const float *in, s
"bge 1b\n"
: [height] "+&r" (height), [in] "+&r" (in), [out] "+&r" (out)
: [in_stride] "r" (in_stride), [out_stride] "r" (out_stride), [pad_row] "r" (pad_row), [width] "r" (width)
- : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ : "cc", "memory", "p0", "p1", "p2", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
);
}
diff --git a/src/core/NEON/kernels/arm_gemm/utils.hpp b/src/core/NEON/kernels/arm_gemm/utils.hpp
index 11b1bd3e05..d0a8635604 100644
--- a/src/core/NEON/kernels/arm_gemm/utils.hpp
+++ b/src/core/NEON/kernels/arm_gemm/utils.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -176,7 +176,6 @@ namespace utils {
// which then calls SVE kernels (compiled accordingly) iff SVE is detected at runtime.
template <typename T>
inline unsigned long get_vector_length() {
-#if defined(__aarch64__)
uint64_t vl;
__asm __volatile (
@@ -188,24 +187,26 @@ inline unsigned long get_vector_length() {
);
return vl / sizeof(T);
-#else // !defined(__aarch64__)
- return 16 / sizeof(T);
-#endif // defined(__aarch64__)
}
-#ifdef ARM_COMPUTE_ENABLE_SME
namespace sme {
-// function from misc-sve.cpp
-extern unsigned int raw_vector_length();
-
template <typename T>
-inline unsigned long get_vector_length() {
- return raw_vector_length() / sizeof(T);
+inline uint64_t get_vector_length() {
+ uint64_t raw_vector_length;
+
+ __asm __volatile (
+ ".inst 0x04bf5821\n" // RDSVL X1, #1
+ "mov %0, X1\n"
+ : "=r" (raw_vector_length)
+ :
+ : "x1"
+ );
+
+ return raw_vector_length / sizeof(T);
}
} // namespace sme
-#endif // ARM_COMPUTE_ENABLE_SME
// get_vector_length(VLType): Returns vector length for type "T".
//
@@ -214,48 +215,17 @@ inline unsigned long get_vector_length() {
template <typename T>
inline unsigned long get_vector_length(VLType vl_type) {
switch (vl_type) {
-#ifdef ARM_COMPUTE_ENABLE_SME
+#ifdef ARM_COMPUTE_ENABLE_SVE
case VLType::SME:
return sme::get_vector_length<T>();
-#endif // ARM_COMPUTE_ENABLE_SME
case VLType::SVE:
return get_vector_length<T>();
+#endif
default:
return 16 / sizeof(T);
}
}
-// get_default_activation_values(): Returns the default values for activation min and max for integer activation.
-template <typename T>
-inline std::tuple<T, T> get_default_activation_values()
-{
- const T min = static_cast<T>(std::numeric_limits<T>::min());
- const T max = static_cast<T>(std::numeric_limits<T>::max());
-
- return std::make_tuple(min, max);
-}
-
-// get_default_activation_values(): Returns the default values for activation min and max for float activation.
-template <>
-inline std::tuple<float, float> get_default_activation_values()
-{
- const float min = static_cast<float>(-std::numeric_limits<float>::infinity());
- const float max = static_cast<float>(std::numeric_limits<float>::infinity());
-
- return std::make_tuple(min, max);
-}
-
-#if defined(__ARM_FP16_ARGS)
-// get_default_activation_values(): Returns the default values for activation min and max for __fp16 activation.
-template <>
-inline std::tuple<__fp16, __fp16> get_default_activation_values()
-{
- const __fp16 min = static_cast<__fp16>(-std::numeric_limits<float>::infinity());
- const __fp16 max = static_cast<__fp16>(std::numeric_limits<float>::infinity());
-
- return std::make_tuple(min, max);
-}
-#endif // defined(__ARM_FP16_ARGS)
} // utils namespace
} // arm_gemm namespace
diff --git a/src/core/NEON/kernels/assembly/depthwise.hpp b/src/core/NEON/kernels/assembly/depthwise.hpp
index 13c2d314e4..0b68cb4db8 100644
--- a/src/core/NEON/kernels/assembly/depthwise.hpp
+++ b/src/core/NEON/kernels/assembly/depthwise.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,6 +22,9 @@
* SOFTWARE.
*/
+#ifndef ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_DEPTHWISE_HPP
+#define ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_DEPTHWISE_HPP
+
#pragma once
#include "arm_gemm.hpp"
@@ -349,3 +352,5 @@ std::vector<KernelDescription> get_compatible_kernels(const DepthwiseArgs &, con
} // namespace depthwise
} // namespace arm_conv
+
+#endif // ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_DEPTHWISE_HPP
diff --git a/src/core/NEON/kernels/assembly/pool_common.hpp b/src/core/NEON/kernels/assembly/pool_common.hpp
index 045f9f95d3..a2d87e9382 100644
--- a/src/core/NEON/kernels/assembly/pool_common.hpp
+++ b/src/core/NEON/kernels/assembly/pool_common.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,6 +22,9 @@
* SOFTWARE.
*/
+#ifndef ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_POOL_COMMON_HPP
+#define ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_POOL_COMMON_HPP
+
#pragma once
#ifdef CYCLE_PROFILING
#include "profiler.hpp"
@@ -65,7 +68,8 @@ public:
virtual ~IPoolingCommon() = default;
// Determine the amount of working space required.
- virtual size_t get_working_size(unsigned int num_threads) const = 0;
+ virtual size_t get_working_size(unsigned int num_threads) const = 0;
+ virtual size_t get_working_size(unsigned int num_threads, unsigned int n_channels) const = 0;
// Execute pooling over the specified area of memory.
virtual void execute(const void *const input,
@@ -108,3 +112,5 @@ public:
} // namespace pooling
} // namespace arm_conv
+
+#endif // ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_POOL_COMMON_HPP
diff --git a/src/core/NEON/kernels/assembly/pooling.hpp b/src/core/NEON/kernels/assembly/pooling.hpp
index 89d594298e..d64a59f4d0 100644
--- a/src/core/NEON/kernels/assembly/pooling.hpp
+++ b/src/core/NEON/kernels/assembly/pooling.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -22,6 +22,9 @@
* SOFTWARE.
*/
+#ifndef ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_POOLING_HPP
+#define ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_POOLING_HPP
+
#pragma once
#include "arm_gemm_local.hpp"
@@ -136,7 +139,11 @@ public:
PoolingCommon(PoolingCommon &) = delete;
PoolingCommon &operator=(PoolingCommon &) = delete;
- size_t get_working_size(unsigned int) const override = 0;
+ size_t get_working_size(unsigned int, unsigned int) const override = 0;
+ size_t get_working_size(unsigned int n_threads) const override
+ {
+ return this->get_working_size(n_threads, m_args.n_channels);
+ }
// Execute pooling over the specified area of memory.
void execute(const void *const input,
@@ -223,3 +230,5 @@ UniquePoolingCommon<TInput, TOutput> pooling(const PoolingArgs &, const OutputSt
} // namespace pooling
} // namespace arm_conv
+
+#endif // ACL_SRC_CORE_NEON_KERNELS_ASSEMBLY_POOLING_HPP
diff --git a/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp
index 3e1fc491f1..890f82537c 100644
--- a/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/input_transforms/arm_fp32_1x8.cpp
@@ -31,11 +31,11 @@ namespace input_transform {
void arm_fp32_1x8(
const unsigned int n_channels,
- const float * input_base,
+ const float *const input_base,
size_t, // We don't need to stride over rows
- size_t input_col_stride,
+ const size_t input_col_stride,
float *outptr,
- size_t matrix_stride
+ const size_t matrix_stride
)
{
constexpr int inner_tile_cols = 8;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/a64_fp16_4x4_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/a64_fp16_4x4_3x3.cpp
index 295005a2ee..4218b754b4 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/a64_fp16_4x4_3x3.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/a64_fp16_4x4_3x3.cpp
@@ -34,13 +34,13 @@ namespace output_transform {
void a64_fp16_4x4_3x3(
unsigned int n_channels,
const __fp16* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const __fp16* bptr,
__fp16* const output,
- size_t output_row_stride,
- size_t output_col_stride,
- __fp16 output_min,
- __fp16 output_max
+ const size_t output_row_stride,
+ const size_t output_col_stride,
+ const __fp16 output_min,
+ const __fp16 output_max
)
{
constexpr int output_tile_rows = 4, output_tile_cols = 4;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x2_1x7.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x2_1x7.cpp
index 8c6cf9725e..c52df266a5 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x2_1x7.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x2_1x7.cpp
@@ -33,13 +33,13 @@ namespace output_transform {
void arm_fp32_1x2_1x7(
unsigned int n_channels,
const float* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const float* bptr,
float *outptr,
size_t, // No need to stride across rows
- size_t output_col_stride,
- float output_min,
- float output_max
+ const size_t output_col_stride,
+ const float output_min,
+ const float output_max
)
{
constexpr auto inner_tile_cols = 8u, output_tile_cols = 2u;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x4_1x5.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x4_1x5.cpp
index ac05f23221..7d771abeee 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x4_1x5.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x4_1x5.cpp
@@ -33,13 +33,13 @@ namespace output_transform {
void arm_fp32_1x4_1x5(
unsigned int n_channels,
const float* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const float* bptr,
float *outptr,
size_t, // No need to stride across rows
- size_t output_col_stride,
- float output_min,
- float output_max
+ const size_t output_col_stride,
+ const float output_min,
+ const float output_max
)
{
constexpr auto inner_tile_cols = 8u, output_tile_cols = 4u;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x6_1x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x6_1x3.cpp
index 154dc6fe1a..513908190a 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x6_1x3.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_1x6_1x3.cpp
@@ -34,13 +34,13 @@ namespace output_transform {
void arm_fp32_1x6_1x3(
unsigned int n_channels,
const float* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const float* bptr,
float *outptr,
size_t, // No need to stride across rows
- size_t output_col_stride,
- float output_min,
- float output_max
+ const size_t output_col_stride,
+ const float output_min,
+ const float output_max
)
{
constexpr unsigned int inner_tile_cols = 8, output_tile_cols = 6;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_3x3.cpp
index 28f042bcbf..4c7376bef8 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_3x3.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_3x3.cpp
@@ -33,13 +33,13 @@ namespace output_transform {
void arm_fp32_2x2_3x3(
unsigned int n_channels,
const float* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const float* bptr,
float *outptr,
- size_t output_row_stride,
- size_t output_col_stride,
- float output_min,
- float output_max
+ const size_t output_row_stride,
+ const size_t output_col_stride,
+ const float output_min,
+ const float output_max
)
{
constexpr auto output_tile_rows = 2u, output_tile_cols = 2u;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_5x5.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_5x5.cpp
index 8e5ba74ac3..d5649b8a18 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_5x5.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_2x2_5x5.cpp
@@ -33,13 +33,13 @@ namespace output_transform {
void arm_fp32_2x2_5x5(
unsigned int n_channels,
const float* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const float* bptr,
float *outptr,
- size_t output_row_stride,
- size_t output_col_stride,
- float output_min,
- float output_max
+ const size_t output_row_stride,
+ const size_t output_col_stride,
+ const float output_min,
+ const float output_max
)
{
constexpr auto output_tile_rows = 2u, output_tile_cols = 2u;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_4x4_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_4x4_3x3.cpp
index 72c43019fa..6a32f67b5d 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_4x4_3x3.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/arm_fp32_4x4_3x3.cpp
@@ -33,13 +33,13 @@ namespace output_transform {
void arm_fp32_4x4_3x3(
unsigned int n_channels,
const float* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const float* bptr,
float *outptr,
- size_t output_row_stride,
- size_t output_col_stride,
- float output_min,
- float output_max
+ const size_t output_row_stride,
+ const size_t output_col_stride,
+ const float output_min,
+ const float output_max
)
{
constexpr auto output_tile_rows = 4u, output_tile_cols = 4u;
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
index 043914d590..8d2b00c1fb 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms/sme_fp32_mopa_4x4_3x3.cpp
@@ -31,15 +31,15 @@ namespace winograd {
namespace output_transform {
void sme_fp32_mopa_4x4_3x3(
- unsigned int n_channels,
+ const unsigned int n_channels,
const float* inptr,
- size_t matrix_stride,
+ const size_t matrix_stride,
const float* bptr,
float* const output,
- size_t output_row_stride,
- size_t output_col_stride,
- float output_min,
- float output_max
+ const size_t output_row_stride,
+ const size_t output_col_stride,
+ const float output_min,
+ const float output_max
)
{
// The below assembler uses the Kronecker product and the "vec trick" to
diff --git a/src/core/common/Registrars.h b/src/core/common/Registrars.h
index a74316b486..cd849c3666 100644
--- a/src/core/common/Registrars.h
+++ b/src/core/common/Registrars.h
@@ -72,9 +72,13 @@
#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
#if defined(ARM_COMPUTE_ENABLE_SME2)
-#define REGISTER_FP32_SME2(func_name) &(func_name)
+#define REGISTER_FP32_SME2(func_name) &(func_name)
+#define REGISTER_QASYMM8_SME2(func_name) &(func_name)
+#define REGISTER_QASYMM8_SIGNED_SME2(func_name) &(func_name)
#else /* !defined(ARM_COMPUTE_ENABLE_SME2) */
-#define REGISTER_FP32_SME2(func_name) nullptr
+#define REGISTER_FP32_SME2(func_name) nullptr
+#define REGISTER_QASYMM8_SME2(func_name) nullptr
+#define REGISTER_QASYMM8_SIGNED_SME2(func_name) nullptr
#endif /* defined(ARM_COMPUTE_ENABLE_SME2) */
#if defined(ARM_COMPUTE_ENABLE_NEON)
@@ -106,10 +110,17 @@
#define REGISTER_QASYMM8_SIGNED_SVE2(func_name) nullptr
#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+#define REGISTER_QASYMM8_SIGNED_SME2(func_name) &(func_name)
+#else /* !defined(ARM_COMPUTE_ENABLE_SME2) */
+#define REGISTER_QASYMM8_SIGNED_SME2(func_name) nullptr
+#endif /* defined(ARM_COMPUTE_ENABLE_SME2) */
+
#else /* defined(ENABLE_QASYMM8_SIGNED_KERNELS) */
#define REGISTER_QASYMM8_SIGNED_NEON(func_name) nullptr
#define REGISTER_QASYMM8_SIGNED_SVE(func_name) nullptr
#define REGISTER_QASYMM8_SIGNED_SVE2(func_name) nullptr
+#define REGISTER_QASYMM8_SIGNED_SME2(func_name) nullptr
#endif /* defined(ENABLE_QASYMM8_SIGNED_KERNELS) */
#if defined(ENABLE_QASYMM8_KERNELS)
@@ -127,10 +138,17 @@
#define REGISTER_QASYMM8_SVE2(func_name) nullptr
#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
+#if defined(ARM_COMPUTE_ENABLE_SME2)
+#define REGISTER_QASYMM8_SME2(func_name) &(func_name)
+#else /* !defined(ARM_COMPUTE_ENABLE_SME2) */
+#define REGISTER_QASYMM8_SME2(func_name) nullptr
+#endif /* defined(ARM_COMPUTE_ENABLE_SME2) */
+
#else /* defined(ENABLE_QASYMM8_KERNELS) */
#define REGISTER_QASYMM8_NEON(func_name) nullptr
#define REGISTER_QASYMM8_SVE(func_name) nullptr
#define REGISTER_QASYMM8_SVE2(func_name) nullptr
+#define REGISTER_QASYMM8_SME2(func_name) nullptr
#endif /* defined(ENABLE_QASYMM8_KERNELS) */
#if defined(ENABLE_QSYMM16_KERNELS)
diff --git a/src/core/helpers/LUTManager.cpp b/src/core/helpers/LUTManager.cpp
index 06e35eed8c..2effffbe92 100644
--- a/src/core/helpers/LUTManager.cpp
+++ b/src/core/helpers/LUTManager.cpp
@@ -30,17 +30,38 @@ namespace arm_compute
namespace
{
-void init_lut_fp16(ActivationLayerInfo::LookupTable65536 *lut)
+float16_t activation(float16_t x, const LUTInfo &info)
+{
+ float16_t out = 0.f;
+ switch (info.act)
+ {
+ case ActivationLayerInfo::ActivationFunction::LOGISTIC:
+ out = 1.f / (1.f + std::exp(-x));
+ break;
+ case ActivationLayerInfo::ActivationFunction::TANH:
+ {
+ out = static_cast<float16_t>(info.alpha * std::tanh(info.beta * x));
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported Activation for 16-bit LUT table");
+ break;
+ }
+ return out;
+}
+
+void init_lut_fp16(ActivationLayerInfo::LookupTable65536 *lut, const LUTInfo &info)
{
union Element
{
uint16_t i = 0;
float16_t fp;
} item;
+
// Fill lut by iterating over all 16 bit values using the union.
while (true)
{
- (*lut)[item.i] = 1.f / (1.f + std::exp(-item.fp));
+ (*lut)[item.i] = activation(item.fp, info);
if (item.i == 65535)
break;
item.i++;
@@ -62,7 +83,7 @@ std::shared_ptr<ActivationLayerInfo::LookupTable65536> LUTManager::get_lut_table
// Not found, or pointer not valid
// We do not use make_shared to prevent the weak_ptr keeping the control block alive
std::shared_ptr<ActivationLayerInfo::LookupTable65536> ptr(new ActivationLayerInfo::LookupTable65536);
- init_lut_fp16(ptr.get());
+ init_lut_fp16(ptr.get(), info);
map_fp16[info] = ptr;
return ptr;
}
diff --git a/src/core/helpers/LUTManager.h b/src/core/helpers/LUTManager.h
index 4e13ead7e3..f3f4bf2832 100644
--- a/src/core/helpers/LUTManager.h
+++ b/src/core/helpers/LUTManager.h
@@ -38,19 +38,23 @@ namespace arm_compute
struct LUTInfo
{
ActivationLayerInfo::ActivationFunction act;
+ float alpha;
+ float beta;
DataType dt;
- QuantizationInfo qinfo;
+ UniformQuantizationInfo qinfo;
+
// Operators enable use of map with Lutinfo as key
friend bool operator<(const LUTInfo &l, const LUTInfo &r)
{
- return (l.act < r.act) || ((l.act == r.act) && (l.dt < r.dt)) ||
- ((l.act == r.act) && (l.dt == r.dt) && (l.qinfo.scale() < r.qinfo.scale())) ||
- ((l.act == r.act) && (l.dt == r.dt) && (l.qinfo.scale() == r.qinfo.scale()) &&
- (l.qinfo.offset() < l.qinfo.offset()));
+ const auto l_tup = std::make_tuple(l.act, l.alpha, l.beta, l.dt, l.qinfo.scale, l.qinfo.offset);
+ const auto r_tup = std::make_tuple(r.act, r.alpha, r.beta, r.dt, r.qinfo.scale, r.qinfo.offset);
+
+ return l_tup < r_tup;
}
- bool operator==(const LUTInfo &l)
+ bool operator==(const LUTInfo &l) const
{
- return this->act == l.act && this->dt == l.dt && this->qinfo == l.qinfo;
+ return this->act == l.act && this->alpha == l.alpha && this->beta == l.beta && this->dt == l.dt &&
+ this->qinfo == l.qinfo;
}
};
diff --git a/src/core/utils/quantization/AsymmHelpers.cpp b/src/core/utils/quantization/AsymmHelpers.cpp
index f66d3e7064..f8b74a985d 100644
--- a/src/core/utils/quantization/AsymmHelpers.cpp
+++ b/src/core/utils/quantization/AsymmHelpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -122,13 +122,13 @@ arm_compute::Status calculate_quantized_multipliers(const QuantizationInfo &iq_
ARM_COMPUTE_RETURN_ERROR_ON(iq_info.scale().empty());
ARM_COMPUTE_RETURN_ERROR_ON(wq_info.scale().empty());
ARM_COMPUTE_RETURN_ERROR_ON(oq_info.scale().empty());
-
- const unsigned int size = wq_info.scale().size();
-
- auto &quant_multipliers = stage_info.gemmlowp_multipliers;
- auto &quant_shifts = stage_info.gemmlowp_shifts;
- quant_multipliers.resize(size);
- quant_shifts.resize(size);
+ constexpr unsigned int padding_elems = 32; // assembly kernels assume the shifts and multipliers buffers are padded
+ const unsigned int size = wq_info.scale().size();
+ const size_t padded_size = (size == 1) ? 1 : size + padding_elems;
+ auto &quant_multipliers = stage_info.gemmlowp_multipliers;
+ auto &quant_shifts = stage_info.gemmlowp_shifts;
+ quant_multipliers.resize(padded_size);
+ quant_shifts.resize(padded_size);
const auto &w_scales = wq_info.scale();
const float i_scale = iq_info.scale().at(0);
diff --git a/src/cpu/kernels/CpuActivationKernel.cpp b/src/cpu/kernels/CpuActivationKernel.cpp
index 7cfa39b286..4253027231 100644
--- a/src/cpu/kernels/CpuActivationKernel.cpp
+++ b/src/cpu/kernels/CpuActivationKernel.cpp
@@ -43,6 +43,13 @@ namespace kernels
{
namespace
{
+
+bool is_fp16_lut_supported(ActivationLayerInfo::ActivationFunction func)
+{
+ return func == ActivationLayerInfo::ActivationFunction::LOGISTIC ||
+ func == ActivationLayerInfo::ActivationFunction::TANH;
+}
+
static const std::vector<CpuActivationKernel::ActivationKernel> available_kernels = {
#ifdef ARM_COMPUTE_ENABLE_SVE
{"sve2_q8_activation_lut",
@@ -85,10 +92,7 @@ static const std::vector<CpuActivationKernel::ActivationKernel> available_kernel
REGISTER_QSYMM16_SVE2(arm_compute::cpu::sve2_qsymm16_activation)},
{"sve_fp16_activation_lut",
[](const ActivationDataTypeISASelectorData &data)
- {
- return data.dt == DataType::F16 && data.isa.fp16 && data.isa.sve &&
- data.f == ActivationLayerInfo::ActivationFunction::LOGISTIC;
- },
+ { return data.dt == DataType::F16 && data.isa.fp16 && data.isa.sve && is_fp16_lut_supported(data.f); },
REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_activation_lut)},
{"sve_fp16_activation",
[](const ActivationDataTypeISASelectorData &data)
@@ -299,10 +303,10 @@ void CpuActivationKernel::configure(const ITensorInfo *src, ITensorInfo *dst, Ac
activation_info.setLookupTable256(tmp_lut);
}
- if (src->data_type() == DataType::F16 &&
- activation_info.activation() == ActivationLayerInfo::ActivationFunction::LOGISTIC)
+ if (std::string(uk->name) == "sve_fp16_activation_lut")
{
- const LUTInfo info = {activation_info.activation(), src->data_type(), src->quantization_info()};
+ const LUTInfo info = {activation_info.activation(), activation_info.a(), activation_info.b(), src->data_type(),
+ src->quantization_info().uniform()};
activation_info.setLookupTable65536((lut_manager.get_lut_table(info)));
}
#endif // __aarch64__
diff --git a/src/cpu/kernels/CpuDequantizeKernel.cpp b/src/cpu/kernels/CpuDequantizeKernel.cpp
index d17128b5ac..5595ace998 100644
--- a/src/cpu/kernels/CpuDequantizeKernel.cpp
+++ b/src/cpu/kernels/CpuDequantizeKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -29,12 +29,14 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "src/core/common/Registrars.h"
#include "src/core/CPP/Validate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
#include "src/core/NEON/NEAsymm.h"
#include "src/core/NEON/NESymm.h"
#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/kernels/dequantize/generic/neon/list.h"
#include <arm_neon.h>
@@ -62,301 +64,6 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
return Status{};
}
-
-template <typename T>
-inline void store_result(T *ptr, const float32x4x4_t &v)
-{
- ARM_COMPUTE_UNUSED(ptr, v);
-}
-
-template <>
-inline void store_result<float>(float *ptr, const float32x4x4_t &v)
-{
- wrapper::vstore(ptr, v.val[0]);
- wrapper::vstore(ptr + 4, v.val[1]);
- wrapper::vstore(ptr + 8, v.val[2]);
- wrapper::vstore(ptr + 12, v.val[3]);
-}
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template <>
-inline void store_result<float16_t>(float16_t *ptr, const float32x4x4_t &v)
-{
- wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
- wrapper::vstore(ptr + 8, vcombine_f16(vcvt_f16_f32(v.val[2]), vcvt_f16_f32(v.val[3])));
-}
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-
-template <typename T>
-inline void store_result(T *ptr, const float32x4x2_t &v)
-{
- ARM_COMPUTE_UNUSED(ptr, v);
-}
-
-template <>
-inline void store_result<float>(float *ptr, const float32x4x2_t &v)
-{
- wrapper::vstore(ptr, v.val[0]);
- wrapper::vstore(ptr + 4, v.val[1]);
-}
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template <>
-inline void store_result<float16_t>(float16_t *ptr, const float32x4x2_t &v)
-{
- wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
-}
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-
-template <typename TOut, typename TIn>
-void run_dequantization_qasymm8(const ITensor *input, ITensor *output, const Window &window)
-{
- const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
- const float scale = qinfo.scale;
- const int32_t offset = qinfo.offset;
-
- const int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- // Create iterators
- Iterator in(input, win_collapsed);
- Iterator out(output, win_collapsed);
-
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- const auto in_ptr = reinterpret_cast<const TIn *>(in.ptr());
- const auto out_ptr = reinterpret_cast<TOut *>(out.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto vin = wrapper::vloadq(in_ptr + x);
- const auto vdeq = vdequantize(vin, scale, offset);
-
- store_result(reinterpret_cast<TOut *>(out_ptr + x), vdeq);
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- auto val = *(in_ptr + x);
- *(out_ptr + x) = static_cast<TOut>(Qasymm8QuantizationHelper<TIn>::dequantize(val, qinfo));
- }
- },
- in, out);
-}
-
-template <typename T>
-void run_dequantization_qsymm8_per_channel_nchw(const ITensor *input, ITensor *output, const Window &window)
-{
- const auto scale = input->info()->quantization_info().scale();
-
- const int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- // Reset first dimension to handle tail calculations manually
- Window win(window);
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- // Create iterators
- Iterator in(input, win);
- Iterator out(output, win);
-
- execute_window_loop(
- win,
- [&](const Coordinates &id)
- {
- const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
- const auto out_ptr = reinterpret_cast<T *>(out.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto vin = wrapper::vloadq(in_ptr + x);
- const auto vdeq = vdequantize(vin, scale[id.z()]);
-
- store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- int8_t val = *(in_ptr + x);
- *(out_ptr + x) = static_cast<T>(dequantize(val, scale[id.z()]));
- }
- },
- in, out);
-}
-
-template <typename T>
-void run_dequantization_qsymm8_per_channel_nhwc(const ITensor *input, ITensor *output, const Window &window)
-{
- const auto scale = input->info()->quantization_info().scale();
-
- const int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- // Reset first dimension to handle tail calculations manually
- Window win(window);
- win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- // Create iterators
- Iterator in(input, win);
- Iterator out(output, win);
-
- execute_window_loop(
- win,
- [&](const Coordinates &)
- {
- const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
- const auto out_ptr = reinterpret_cast<T *>(out.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const float32x4x4_t vscale = {{scale[x + 0], scale[x + 1], scale[x + 2], scale[x + 3], scale[x + 4],
- scale[x + 5], scale[x + 6], scale[x + 7], scale[x + 8], scale[x + 9],
- scale[x + 10], scale[x + 11], scale[x + 12], scale[x + 13],
- scale[x + 14], scale[x + 15]}};
- const auto vin = wrapper::vloadq(in_ptr + x);
- const auto vdeq = vdequantize(vin, vscale);
-
- store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- int8_t val = *(in_ptr + x);
- *(out_ptr + x) = static_cast<T>(dequantize(val, scale[x]));
- }
- },
- in, out);
-}
-
-template <typename T>
-void run_dequantization_qsymm8(const ITensor *input, ITensor *output, const Window &window)
-{
- const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
- const float scale = qinfo.scale;
-
- const int window_step_x = 16;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- // Create iterators
- Iterator in(input, win_collapsed);
- Iterator out(output, win_collapsed);
-
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
- const auto out_ptr = reinterpret_cast<T *>(out.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto vin = wrapper::vloadq(in_ptr + x);
- const auto vdeq = vdequantize(vin, scale);
-
- store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- int8_t val = *(in_ptr + x);
- *(out_ptr + x) = static_cast<T>(dequantize(val, scale));
- }
- },
- in, out);
-}
-
-template <typename T>
-void run_dequantization_qsymm16(const ITensor *input, ITensor *output, const Window &window)
-{
- const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
- const float scale = qinfo.scale;
-
- const int window_step_x = 8;
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- // Create iterators
- Iterator in(input, win_collapsed);
- Iterator out(output, win_collapsed);
-
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- const auto in_ptr = reinterpret_cast<const int16_t *>(in.ptr());
- const auto out_ptr = reinterpret_cast<T *>(out.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step_x); x += window_step_x)
- {
- const auto vin = wrapper::vloadq(in_ptr + x);
- const auto vdeq = vdequantize_int16(vin, scale);
-
- store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
- }
-
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- int16_t val = *(in_ptr + x);
- *(out_ptr + x) = static_cast<T>(dequantize_qsymm16(val, scale));
- }
- },
- in, out);
-}
-
-template <typename T>
-void run_dequantization_core(const ITensor *input, ITensor *output, const Window &window)
-{
- switch (input->info()->data_type())
- {
- case DataType::QASYMM8:
- run_dequantization_qasymm8<T, uint8_t>(input, output, window);
- break;
- case DataType::QASYMM8_SIGNED:
- run_dequantization_qasymm8<T, int8_t>(input, output, window);
- break;
- case DataType::QSYMM8_PER_CHANNEL:
- input->info()->data_layout() == DataLayout::NHWC
- ? run_dequantization_qsymm8_per_channel_nhwc<T>(input, output, window)
- : run_dequantization_qsymm8_per_channel_nchw<T>(input, output, window);
- break;
- case DataType::QSYMM8:
- run_dequantization_qsymm8<T>(input, output, window);
- break;
- case DataType::QSYMM16:
- run_dequantization_qsymm16<T>(input, output, window);
- break;
- default:
- ARM_COMPUTE_ERROR("Unsupported data type.");
- }
-}
} // namespace
void CpuDequantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
@@ -370,6 +77,20 @@ void CpuDequantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
auto_init_if_empty(*dst, src->tensor_shape(), 1, DataType::F32);
ICpuKernel::configure(win);
+
+ switch (dst->data_type())
+ {
+ case DataType::F32:
+ _func = REGISTER_FP32_NEON(fp32_run_dequantization_core);
+ break;
+#ifdef ARM_COMPUTE_ENABLE_FP16
+ case DataType::F16:
+ _func = REGISTER_FP16_NEON(fp16_run_dequantization_core);
+ break;
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type.");
+ }
}
Status CpuDequantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
@@ -386,20 +107,7 @@ void CpuDequantizeKernel::run_op(ITensorPack &tensors, const Window &window, con
const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
auto dst = tensors.get_tensor(TensorType::ACL_DST);
-
- switch (dst->info()->data_type())
- {
- case DataType::F32:
- run_dequantization_core<float>(src, dst, window);
- break;
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F16:
- run_dequantization_core<float16_t>(src, dst, window);
- break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- default:
- ARM_COMPUTE_ERROR("Unsupported data type.");
- }
+ _func(src, dst, window);
}
const char *CpuDequantizeKernel::name() const
{
diff --git a/src/cpu/kernels/CpuDequantizeKernel.h b/src/cpu/kernels/CpuDequantizeKernel.h
index 6ed58587c9..d8b6444f0a 100644
--- a/src/cpu/kernels/CpuDequantizeKernel.h
+++ b/src/cpu/kernels/CpuDequantizeKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H
-#define ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H
+#ifndef ACL_SRC_CPU_KERNELS_CPUDEQUANTIZEKERNEL_H
+#define ACL_SRC_CPU_KERNELS_CPUDEQUANTIZEKERNEL_H
#include "src/core/common/Macros.h"
#include "src/cpu/ICpuKernel.h"
@@ -56,8 +56,16 @@ public:
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
const char *name() const override;
+
+private:
+ /** Common signature for all the specialised @ref CpuDequantizeKernel functions
+ *
+ * @param[in] window Region on which to execute the kernel.
+ */
+ using DequantizeFunctionExecutorPtr = void (*)(const ITensor *input, ITensor *output, const Window &window);
+ DequantizeFunctionExecutorPtr _func{nullptr};
};
} // namespace kernels
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_DEQUANTIZE_KERNEL_H */
+#endif // ACL_SRC_CPU_KERNELS_CPUDEQUANTIZEKERNEL_H
diff --git a/src/cpu/kernels/CpuGemmLowpMatrixMultiplyKernel.cpp b/src/cpu/kernels/CpuGemmLowpMatrixMultiplyKernel.cpp
index a3ed2cd171..87340e566e 100644
--- a/src/cpu/kernels/CpuGemmLowpMatrixMultiplyKernel.cpp
+++ b/src/cpu/kernels/CpuGemmLowpMatrixMultiplyKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021 Arm Limited.
+ * Copyright (c) 2017-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -684,6 +684,10 @@ Status validate_arguments(const ITensorInfo *src0, const ITensorInfo *src1, cons
DataType::U8);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(src0->data_type() == DataType::QASYMM8_SIGNED &&
+ src1->data_type() == DataType::QASYMM8,
+ "QASYMM8_SIGNED input with QASYMM8 weights not supported");
+
TensorShape in0_shape = src0->tensor_shape();
TensorShape in1_shape = src1->tensor_shape();
TensorShape out_shape = dst->tensor_shape();
diff --git a/src/cpu/kernels/CpuKernelSelectionTypes.h b/src/cpu/kernels/CpuKernelSelectionTypes.h
index d71789cc39..03a474de53 100644
--- a/src/cpu/kernels/CpuKernelSelectionTypes.h
+++ b/src/cpu/kernels/CpuKernelSelectionTypes.h
@@ -105,6 +105,7 @@ struct SoftmaxKernelDataTypeISASelectorData
cpuinfo::CpuIsaInfo isa;
bool is_log;
int axis;
+ uint64_t sme2_vector_length;
};
// Selector pointer types
diff --git a/src/cpu/kernels/CpuQuantizeKernel.cpp b/src/cpu/kernels/CpuQuantizeKernel.cpp
index d2ac6cf8ac..ed4675ae3d 100644
--- a/src/cpu/kernels/CpuQuantizeKernel.cpp
+++ b/src/cpu/kernels/CpuQuantizeKernel.cpp
@@ -29,12 +29,12 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
+#include "src/core/common/Registrars.h"
#include "src/core/CPP/Validate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-#include "src/core/NEON/NEAsymm.h"
-#include "src/core/NEON/NEMath.h"
#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/kernels/quantize/generic/neon/list.h"
#include <arm_neon.h>
#include <map>
@@ -47,7 +47,6 @@ namespace kernels
{
namespace
{
-constexpr auto window_step = 16;
Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
{
@@ -63,59 +62,6 @@ Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
return Status{};
}
-template <typename T>
-inline float32x4x4_t load_value(const T *input_ptr)
-{
- using Tx16_t = typename wrapper::traits::neon_vector<T, 16>::type;
- return arm_compute::convert_to_float32x4x4<Tx16_t>(wrapper::vloadq(input_ptr));
-}
-
-template <>
-inline float32x4x4_t load_value(const float *input_ptr)
-{
- return {wrapper::vloadq(input_ptr), wrapper::vloadq(input_ptr + 4), wrapper::vloadq(input_ptr + 8),
- wrapper::vloadq(input_ptr + 12)};
-}
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-template <>
-inline float32x4x4_t load_value(const float16_t *input_ptr)
-{
- return {vcvt_f32_f16(wrapper::vload(input_ptr)), vcvt_f32_f16(wrapper::vload(input_ptr + 4)),
- vcvt_f32_f16(wrapper::vload(input_ptr + 8)), vcvt_f32_f16(wrapper::vload(input_ptr + 12))};
-}
-
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
-template <typename element_type>
-using vector_type = wrapper::traits::neon_vector_t<element_type, window_step>;
-
-template <typename quantized_type>
-vector_type<quantized_type> vquantize_qasymm8(const float32x4x4_t &qv, const UniformQuantizationInfo &qi);
-
-template <>
-vector_type<uint8_t> vquantize_qasymm8<uint8_t>(const float32x4x4_t &qv, const UniformQuantizationInfo &qi)
-{
- return vquantize(qv, qi);
-}
-
-template <>
-vector_type<int8_t> vquantize_qasymm8<int8_t>(const float32x4x4_t &qv, const UniformQuantizationInfo &qi)
-{
- return vquantize_signed(qv, qi);
-}
-
-template <typename TOut, typename = typename std::enable_if<std::is_signed<TOut>::value, bool>::type>
-inline int8x16_t recombine_8_16(int16x8_t lower, int16x8_t upper)
-{
- return wrapper::vcombine(wrapper::vqmovn(lower), wrapper::vqmovn(upper));
-}
-
-template <typename TOut, typename = typename std::enable_if<std::is_unsigned<TOut>::value, bool>::type>
-inline uint8x16_t recombine_8_16(int16x8_t lower, int16x8_t upper)
-{
- return wrapper::vcombine(wrapper::vqmovun(lower), wrapper::vqmovun(upper));
-}
-
} // namespace
void CpuQuantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
@@ -124,38 +70,36 @@ void CpuQuantizeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
static const std::map<std::string, QuantizeFunctionExecutorPtr> quant_map = {
- {"op_QASYMM8_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, uint8_t>},
- {"op_QASYMM8_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<uint8_t, int8_t>},
- {"op_QASYMM8_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<uint8_t>},
+ {"op_QASYMM8_QASYMM8", REGISTER_INTEGER_NEON(u8_u8_run_quantize_qasymm8)},
+ {"op_QASYMM8_QASYMM8_SIGNED", REGISTER_INTEGER_NEON(u8_i8_run_quantize_qasymm8)},
+ {"op_QASYMM8_QASYMM16", REGISTER_INTEGER_NEON(u8_run_quantize_qasymm16)},
- {"op_QASYMM8_SIGNED_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, uint8_t>},
- {"op_QASYMM8_SIGNED_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<int8_t, int8_t>},
- {"op_QASYMM8_SIGNED_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<int8_t>},
+ {"op_QASYMM8_SIGNED_QASYMM8", REGISTER_INTEGER_NEON(i8_u8_run_quantize_qasymm8)},
+ {"op_QASYMM8_SIGNED_QASYMM8_SIGNED", REGISTER_INTEGER_NEON(i8_i8_run_quantize_qasymm8)},
+ {"op_QASYMM8_SIGNED_QASYMM16", REGISTER_INTEGER_NEON(i8_run_quantize_qasymm16)},
// Functions for offset only requantization
- {"op_OFFSET_ONLY_QASYMM8_QASYMM8", &CpuQuantizeKernel::run_requantize_offset_only<uint8_t, uint8_t>},
- {"op_OFFSET_ONLY_QASYMM8_QASYMM8_SIGNED", &CpuQuantizeKernel::run_requantize_offset_only<uint8_t, int8_t>},
- {"op_OFFSET_ONLY_QASYMM8_SIGNED_QASYMM8", &CpuQuantizeKernel::run_requantize_offset_only<int8_t, uint8_t>},
- {"op_OFFSET_ONLY_QASYMM8_SIGNED_QASYMM8_SIGNED",
- &CpuQuantizeKernel::run_requantize_offset_only<int8_t, int8_t>},
+ {"op_OFFSET_ONLY_QASYMM8_QASYMM8", REGISTER_INTEGER_NEON(u8_u8_run_requantize_offset_only)},
+ {"op_OFFSET_ONLY_QASYMM8_QASYMM8_SIGNED", REGISTER_INTEGER_NEON(u8_i8_run_requantize_offset_only)},
+ {"op_OFFSET_ONLY_QASYMM8_SIGNED_QASYMM8", REGISTER_INTEGER_NEON(i8_u8_run_requantize_offset_only)},
+ {"op_OFFSET_ONLY_QASYMM8_SIGNED_QASYMM8_SIGNED", REGISTER_INTEGER_NEON(i8_i8_run_requantize_offset_only)},
// Functions for offset uint8 to int8 and vice versa quantization (no scale changes)
{"op_OFFSET_ONLY_CONVERT_QASYMM8_SIGNED_QASYMM8",
- &CpuQuantizeKernel::run_requantize_offset_only_convert<int8_t, uint8_t>},
+ REGISTER_INTEGER_NEON(i8_u8_run_requantize_offset_only_convert)},
{"op_OFFSET_ONLY_CONVERT_QASYMM8_QASYMM8_SIGNED",
- &CpuQuantizeKernel::run_requantize_offset_only_convert<uint8_t, int8_t>},
-
- {"op_F32_QSYMM8", &CpuQuantizeKernel::run_quantize_qsymm8<float, int8_t>},
-
- {"op_F32_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float, uint8_t>},
- {"op_F32_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float, int8_t>},
- {"op_F32_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float>},
-
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- {"op_F16_QASYMM8", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, uint8_t>},
- {"op_F16_QASYMM8_SIGNED", &CpuQuantizeKernel::run_quantize_qasymm8<float16_t, int8_t>},
- {"op_F16_QASYMM16", &CpuQuantizeKernel::run_quantize_qasymm16<float16_t>},
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC*/
+ REGISTER_INTEGER_NEON(u8_i8_run_requantize_offset_only_convert)},
+
+ {"op_F32_QSYMM8", REGISTER_FP32_NEON(fp32_i8_run_quantize_qsymm8)},
+ {"op_F32_QASYMM8", REGISTER_FP32_NEON(fp32_u8_run_quantize_qasymm8)},
+ {"op_F32_QASYMM8_SIGNED", REGISTER_FP32_NEON(fp32_i8_run_quantize_qasymm8)},
+ {"op_F32_QASYMM16", REGISTER_FP32_NEON(fp32_run_quantize_qasymm16)},
+
+#ifdef ARM_COMPUTE_ENABLE_FP16
+ {"op_F16_QASYMM8", REGISTER_FP16_NEON(fp16_u8_run_quantize_qasymm8)},
+ {"op_F16_QASYMM8_SIGNED", REGISTER_FP16_NEON(fp16_i8_run_quantize_qasymm8)},
+ {"op_F16_QASYMM16", REGISTER_FP16_NEON(fp16_run_quantize_qasymm16)},
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
};
std::string function_to_call("op_");
@@ -203,242 +147,6 @@ Status CpuQuantizeKernel::validate(const ITensorInfo *src, const ITensorInfo *ds
return Status{};
}
-template <typename TIn, typename TOut>
-void CpuQuantizeKernel::run_quantize_qsymm8(const ITensor *src, ITensor *dst, const Window &window)
-{
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
- UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
- uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input(src, win_collapsed);
- Iterator output(dst, win_collapsed);
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
- auto output_ptr = reinterpret_cast<TOut *>(output.ptr());
- int x = window_start_x;
- for (; x <= (window_end_x - window_step); x += window_step)
- {
- wrapper::vstore(&output_ptr[x], vquantize_qasymm8<TOut>(load_value(&input_ptr[x]), uqinfo));
- }
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- output_ptr[x] = quantize_qsymm8(input_ptr[x], dst->info()->quantization_info());
- }
- },
- input, output);
-}
-
-template <typename TIn, typename TOut>
-void CpuQuantizeKernel::run_requantize_offset_only_convert(const ITensor *src, ITensor *dst, const Window &window)
-{
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- // Calculate output offset difference.
- const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
- UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
- uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
-
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- // Duplicate offset in signed vector format
- const int8x16_t offset = wrapper::vdup_n(static_cast<int8_t>(uqinfo.offset), wrapper::traits::vector_128_tag{});
-
- Iterator input(src, win_collapsed);
- Iterator output(dst, win_collapsed);
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
- auto output_ptr = reinterpret_cast<TOut *>(output.ptr());
- int x = window_start_x;
- for (; x <= (window_end_x - window_step); x += window_step)
- {
- const wrapper::traits::neon_vector_t<TIn, window_step> qv =
- wrapper::vloadq(input_ptr + x); // load 128 bit vector of 8 bit datatype
-
- // Signed addition.
- auto res = vaddq_s8(reinterpret_cast<int8x16_t>(qv), offset);
-
- // Output is dependent on datatype.
- wrapper::vstore(&output_ptr[x],
- reinterpret_cast<wrapper::traits::neon_vector_t<TOut, window_step>>(res));
- }
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- auto result = uqinfo.offset + static_cast<int32_t>(input_ptr[x]);
- output_ptr[x] = static_cast<TOut>(result);
- }
- },
- input, output);
-}
-
-template <typename TIn, typename TOut>
-void CpuQuantizeKernel::run_requantize_offset_only(const ITensor *src, ITensor *dst, const Window &window)
-{
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
- UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
- uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- // Duplicate offset in signed vector format
- const int16x8_t offset = wrapper::vdup_n(static_cast<int16_t>(uqinfo.offset), wrapper::traits::vector_128_tag{});
-
- const int32_t low_bound = (dst->info()->data_type() == DataType::QASYMM8) ? 0 : -128;
- const int32_t upper_bound = (dst->info()->data_type() == DataType::QASYMM8) ? 255 : 127;
-
- Iterator input(src, win_collapsed);
- Iterator output(dst, win_collapsed);
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
- TOut *output_ptr = reinterpret_cast<TOut *>(output.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step); x += window_step)
- {
- const auto qv = wrapper::vloadq(input_ptr + x); // load 128 bit vector of 8 bit datatype
- int16x8_t lower = reinterpret_cast<int16x8_t>(wrapper::vmovl(wrapper::vgetlow(qv)));
- int16x8_t upper = reinterpret_cast<int16x8_t>(wrapper::vmovl(wrapper::vgethigh(qv)));
-
- // Signed addition.
- lower = wrapper::vqadd(lower, offset);
- upper = wrapper::vqadd(upper, offset);
-
- // Output is dependent on datatype.
- auto res = recombine_8_16<TOut>(lower, upper);
- wrapper::vstore(&output_ptr[x], res);
- }
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- // Add offset and clamp result to within the range of the output datatype.
- int32_t result = uqinfo.offset + static_cast<int32_t>(input_ptr[x]);
- result = utility::clamp<int32_t>(result, low_bound, upper_bound);
-
- // Cast result to output datatype.
- output_ptr[x] = static_cast<TOut>(result);
- }
- },
- input, output);
-}
-
-template <typename TIn, typename TOut>
-void CpuQuantizeKernel::run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
-{
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
- UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
- if (is_data_type_quantized_asymmetric(src->info()->data_type()))
- {
- uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
- }
-#ifdef __aarch64__
- constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_EVEN;
-#else //__aarch64__
- constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO;
-#endif //__aarch64__
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input(src, win_collapsed);
- Iterator output(dst, win_collapsed);
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
- auto output_ptr = reinterpret_cast<TOut *>(output.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step); x += window_step)
- {
- wrapper::vstore(&output_ptr[x], vquantize_qasymm8<TOut>(load_value(&input_ptr[x]), uqinfo));
- }
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- output_ptr[x] = Qasymm8QuantizationHelper<TOut>::quantize(input_ptr[x], uqinfo, rounding_policy);
- }
- },
- input, output);
-}
-
-template <typename T>
-void CpuQuantizeKernel::run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
-{
- const auto window_start_x = static_cast<int>(window.x().start());
- const auto window_end_x = static_cast<int>(window.x().end());
-
- const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
- UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
- if (is_data_type_quantized_asymmetric(src->info()->data_type()))
- {
- uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
- }
-#ifdef __aarch64__
- constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_EVEN;
-#else //__aarch64__
- constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO;
-#endif //__aarch64__
-
- // Collapse window and reset first dimension to handle tail calculations manually
- Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
- win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
-
- Iterator input(src, win_collapsed);
- Iterator output(dst, win_collapsed);
- execute_window_loop(
- win_collapsed,
- [&](const Coordinates &)
- {
- auto input_ptr = reinterpret_cast<const T *>(input.ptr());
- auto output_ptr = reinterpret_cast<uint16_t *>(output.ptr());
-
- int x = window_start_x;
- for (; x <= (window_end_x - window_step); x += window_step)
- {
- uint16x8x2_t tmp = vquantize_qasymm16(load_value(&input_ptr[x]), uqinfo);
- vst1q_u16(&output_ptr[x], tmp.val[0]);
- vst1q_u16(&output_ptr[x + 8], tmp.val[1]);
- }
- // Compute left-over elements
- for (; x < window_end_x; ++x)
- {
- output_ptr[x] = quantize_qasymm16(input_ptr[x], uqinfo, rounding_policy);
- }
- },
- input, output);
-}
-
void CpuQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
{
ARM_COMPUTE_UNUSED(info);
@@ -448,7 +156,7 @@ void CpuQuantizeKernel::run_op(ITensorPack &tensors, const Window &window, const
const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
auto dst = tensors.get_tensor(TensorType::ACL_DST);
- (this->*_func)(src, dst, window);
+ (*_func)(src, dst, window);
}
const char *CpuQuantizeKernel::name() const
diff --git a/src/cpu/kernels/CpuQuantizeKernel.h b/src/cpu/kernels/CpuQuantizeKernel.h
index c2f7ac6d9d..750310c811 100644
--- a/src/cpu/kernels/CpuQuantizeKernel.h
+++ b/src/cpu/kernels/CpuQuantizeKernel.h
@@ -76,31 +76,7 @@ private:
*
* @param[in] window Region on which to execute the kernel.
*/
- using QuantizeFunctionExecutorPtr = void (CpuQuantizeKernel::*)(const ITensor *src,
- ITensor *dst,
- const Window &window);
- /** Function to apply QASYMM8 or QASYMM8_SIGNED quantization on a tensor.
- *
- * @param[in] window Region on which to execute the kernel.
- */
- template <typename TIn, typename TOut>
- void run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window);
- /** Function to apply QASYMM16 quantization on a tensor.
- *
- * @param[in] window Region on which to execute the kernel.
- */
- template <typename T>
- void run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window);
-
- template <typename TIn, typename TOut>
- void run_quantize_qsymm8(const ITensor *src, ITensor *dst, const Window &window);
-
- template <typename TIn, typename TOut>
- void run_requantize_offset_only(const ITensor *src, ITensor *dst, const Window &window);
-
- template <typename TIn, typename TOut>
- void run_requantize_offset_only_convert(const ITensor *src, ITensor *dst, const Window &window);
-
+ using QuantizeFunctionExecutorPtr = void (*)(const ITensor *src, ITensor *dst, const Window &window);
QuantizeFunctionExecutorPtr _func{nullptr};
size_t _split_dimension{Window::DimY};
};
diff --git a/src/cpu/kernels/CpuSoftmaxKernel.cpp b/src/cpu/kernels/CpuSoftmaxKernel.cpp
index 5cf81f815c..b7e395fb79 100644
--- a/src/cpu/kernels/CpuSoftmaxKernel.cpp
+++ b/src/cpu/kernels/CpuSoftmaxKernel.cpp
@@ -48,6 +48,7 @@ namespace kernels
{
namespace
{
+
/* Softmax */
static const std::vector<typename CpuSoftmaxKernel::SoftmaxKernel> available_kernels = {
{"sme2_fp32_softmax",
@@ -65,9 +66,23 @@ static const std::vector<typename CpuSoftmaxKernel::SoftmaxKernel> available_ker
[](const SoftmaxKernelDataTypeISASelectorData &data)
{ return (!data.is_log && data.dt == DataType::F16) && data.isa.fp16; },
REGISTER_FP16_NEON(neon_fp16_softmax<false>)},
+ {"sme2_qu8_softmax_lut_512VL",
+ [](const SoftmaxKernelDataTypeISASelectorData &data)
+ {
+ return (!data.is_log && data.dt == DataType::QASYMM8 && data.isa.sme2 && data.axis == 0 &&
+ data.sme2_vector_length == 512);
+ },
+ REGISTER_QASYMM8_SME2(sme2_qasymm8_softmax_lut_512VL)},
{"neon_qu8_softmax",
[](const SoftmaxKernelDataTypeISASelectorData &data) { return (!data.is_log && data.dt == DataType::QASYMM8); },
REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_softmax<false>)},
+ {"sme2_qs8_softmax_lut_512VL",
+ [](const SoftmaxKernelDataTypeISASelectorData &data)
+ {
+ return (!data.is_log && data.dt == DataType::QASYMM8_SIGNED && data.isa.sme2 && data.axis == 0 &&
+ data.sme2_vector_length == 512);
+ },
+ REGISTER_QASYMM8_SIGNED_SME2(sme2_qasymm8_signed_softmax_lut_512VL)},
{"neon_qs8_softmax",
[](const SoftmaxKernelDataTypeISASelectorData &data)
{ return (!data.is_log && data.dt == DataType::QASYMM8_SIGNED); },
@@ -88,6 +103,28 @@ static const std::vector<typename CpuSoftmaxKernel::SoftmaxKernel> available_ker
REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::neon_qasymm8_signed_softmax<true>)},
};
+void init_lut(std::vector<float> &lut, DataType type, float scale, float beta)
+{
+ if (type == DataType::QASYMM8)
+ {
+ for (int i = 0; i < 256; ++i)
+ {
+ lut.push_back(std::exp(-scale * beta * i));
+ }
+ }
+ else if (type == DataType::QASYMM8_SIGNED)
+ {
+ for (int i = -128; i < 128; ++i)
+ {
+ lut.push_back(std::exp(-scale * beta * i));
+ }
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Invalid datatype for QASYMM8/QASYMM8_SIGNED softmax");
+ }
+}
+
Status validate_arguments_softmax(
const ITensorInfo &src, const ITensorInfo &dst, float beta, int axis, const ITensorInfo &tmp, bool is_log)
{
@@ -157,8 +194,8 @@ void CpuSoftmaxKernel::configure(
auto_init_if_empty(*tmp, TensorInfo(*src).set_data_type(DataType::F32).reset_padding());
}
- const auto *uk = CpuSoftmaxKernel::get_implementation(
- SoftmaxKernelDataTypeISASelectorData{src->data_type(), CPUInfo::get().get_isa(), is_log, axis});
+ const auto *uk = CpuSoftmaxKernel::get_implementation(SoftmaxKernelDataTypeISASelectorData{
+ src->data_type(), CPUInfo::get().get_isa(), is_log, axis, CPUInfo::get().get_sme2_vector_length()});
ARM_COMPUTE_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
std::string kernel_name = is_log ? std::string("CpuLogSoftmaxKernel") : std::string("CpuSoftmaxKernel");
@@ -194,6 +231,13 @@ void CpuSoftmaxKernel::configure(
win.set(_axis, Window::Dimension(0, 1, 1));
ICpuKernel<CpuSoftmaxKernel>::configure(win);
+
+ const std::string uk_name = uk->name;
+ if (uk_name == "sme2_qu8_softmax_lut_512VL" || uk_name == "sme2_qs8_softmax_lut_512VL")
+ {
+ const float scale = src->quantization_info().uniform().scale;
+ init_lut(_lut, src->data_type(), scale, beta);
+ }
}
Status CpuSoftmaxKernel::validate(
@@ -230,11 +274,11 @@ void CpuSoftmaxKernel::run_op(ITensorPack &tensors, const Window &window, const
const unsigned int tmp_size_for_thread = tmp->info()->element_size() * num_elems_processed_per_iteration;
void *tmp_for_thread = tmp->buffer() + (info.thread_id * tmp_size_for_thread);
- _run_method(src, tmp_for_thread, dst, _beta, _axis, window);
+ _run_method(src, tmp_for_thread, dst, _beta, _axis, window, _lut.data());
}
else
{
- _run_method(src, nullptr, dst, _beta, _axis, window);
+ _run_method(src, nullptr, dst, _beta, _axis, window, nullptr);
}
}
diff --git a/src/cpu/kernels/CpuSoftmaxKernel.h b/src/cpu/kernels/CpuSoftmaxKernel.h
index 043ad975d5..676e79782b 100644
--- a/src/cpu/kernels/CpuSoftmaxKernel.h
+++ b/src/cpu/kernels/CpuSoftmaxKernel.h
@@ -37,8 +37,8 @@ namespace kernels
class CpuSoftmaxKernel : public ICpuKernel<CpuSoftmaxKernel>
{
private:
- using SoftmaxKernelPtr =
- std::add_pointer<void(const ITensor *, void *const, ITensor *, float, int, const Window &)>::type;
+ using SoftmaxKernelPtr = std::add_pointer<void(
+ const ITensor *, void *const, ITensor *, float, int, const Window &, const float *)>::type;
public:
CpuSoftmaxKernel() = default;
@@ -78,10 +78,11 @@ public:
static const std::vector<SoftmaxKernel> &get_available_kernels();
private:
- float _beta{1.0f};
- SoftmaxKernelPtr _run_method{nullptr};
- std::string _name{};
- int _axis{};
+ float _beta{1.0f};
+ SoftmaxKernelPtr _run_method{nullptr};
+ std::string _name{};
+ int _axis{};
+ std::vector<float> _lut = {};
};
} // namespace kernels
} // namespace cpu
diff --git a/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h b/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h
index 6e8f32ef47..72fafca1bb 100644
--- a/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h
+++ b/src/cpu/kernels/assembly/CpuGemmAssemblyWrapperKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2022 Arm Limited.
+ * Copyright (c) 2018-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H
-#define ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H
+#ifndef ACL_SRC_CPU_KERNELS_ASSEMBLY_CPUGEMMASSEMBLYWRAPPERKERNEL_H
+#define ACL_SRC_CPU_KERNELS_ASSEMBLY_CPUGEMMASSEMBLYWRAPPERKERNEL_H
#include "arm_compute/core/Utils.h"
#include "arm_compute/core/Validate.h"
@@ -52,7 +52,7 @@ namespace kernel
*
*
*/
-template <typename TypeInput, typename TypeOutput>
+template <typename TypeInput, typename TypeWeight, typename TypeOutput>
class CpuGemmAssemblyWrapperKernel final : public INEKernel
{
public:
@@ -101,7 +101,7 @@ public:
* @param[in] kernel Pointer to an assembly kernel implementation.
* @param[in] kernel_name_tag Tag to be attacehd to the kernel's name.
*/
- void configure(arm_gemm::GemmCommon<TypeInput, TypeOutput> *kernel, std::string kernel_name_tag)
+ void configure(arm_gemm::GemmCommon<TypeInput, TypeWeight, TypeOutput> *kernel, std::string kernel_name_tag)
{
ARM_COMPUTE_ERROR_ON_NULLPTR((reinterpret_cast<void *>(kernel)));
_kernel = kernel;
@@ -131,10 +131,10 @@ public:
}
private:
- arm_gemm::GemmCommon<TypeInput, TypeOutput> *_kernel;
- std::string _name;
+ arm_gemm::GemmCommon<TypeInput, TypeWeight, TypeOutput> *_kernel;
+ std::string _name;
};
} // namespace kernel
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_ASSEMBLY_GEMM_KERNEL_WRAPPER_KERNEL_H */
+#endif // ACL_SRC_CPU_KERNELS_ASSEMBLY_CPUGEMMASSEMBLYWRAPPERKERNEL_H
diff --git a/src/cpu/kernels/assembly/arm_gemm.hpp b/src/cpu/kernels/assembly/arm_gemm.hpp
index 941fed0ba8..cbc8be416e 100644
--- a/src/cpu/kernels/assembly/arm_gemm.hpp
+++ b/src/cpu/kernels/assembly/arm_gemm.hpp
@@ -277,8 +277,8 @@ struct Nothing
{
};
-template <typename Top, typename Tret>
-using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret>>;
+template <typename Tlop, typename Trop, typename Tret>
+using UniqueGemmCommon = std::unique_ptr<GemmCommon<Tlop, Trop, Tret>>;
/* Low level API calls.
* These are implemented as 'GemmArgs' versions, or with the arguments explicitly listed. */
@@ -288,13 +288,13 @@ using UniqueGemmCommon = std::unique_ptr<GemmCommon<Top, Tret>>;
template <typename Top, typename Tret, class OutputStage = Nothing>
KernelDescription get_gemm_method(const GemmArgs &args, const OutputStage & = {});
-template <typename Top, typename Tret, class OutputStage = Nothing>
-UniqueGemmCommon<Top, Tret> gemm(const GemmArgs &args, const OutputStage & = {});
+template <typename Tlop, typename Trop, typename Tret, class OutputStage = Nothing>
+UniqueGemmCommon<Tlop, Trop, Tret> gemm(const GemmArgs &args, const OutputStage & = {});
-template <typename Top, typename Tret, class OutputStage = Nothing>
+template <typename Tlop, typename Trop, typename Tret, class OutputStage = Nothing>
std::vector<KernelDescription> get_compatible_kernels(const GemmArgs &args, const OutputStage & = {});
-template <typename Top, typename Tret, class OutputStage = Nothing>
+template <typename Tlop, typename Trop, typename Tret, class OutputStage = Nothing>
bool has_opt_gemm(WeightFormat &weight_format, const GemmArgs &args, const OutputStage & = {});
} // namespace arm_gemm
diff --git a/src/cpu/kernels/assembly/convolution_parameters.hpp b/src/cpu/kernels/assembly/convolution_parameters.hpp
index 0c1ae58902..09b73ca409 100644
--- a/src/cpu/kernels/assembly/convolution_parameters.hpp
+++ b/src/cpu/kernels/assembly/convolution_parameters.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018-2021 Arm Limited.
+ * Copyright (c) 2018-2021, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,6 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#ifndef ACL_SRC_CPU_KERNELS_ASSEMBLY_CONVOLUTION_PARAMETERS_HPP
+#define ACL_SRC_CPU_KERNELS_ASSEMBLY_CONVOLUTION_PARAMETERS_HPP
+
#pragma once
#include <cstdint>
@@ -57,9 +61,13 @@ struct ConvolutionParameters
int64_t output_stride_w;
int64_t output_stride_h;
// output_channels not included as they do not affect the input.
+ int64_t dilation_w;
+ int64_t dilation_h;
int64_t padding_top;
int64_t padding_left;
float padding_value;
};
} // namespace arm_gemm
+
+#endif // ACL_SRC_CPU_KERNELS_ASSEMBLY_CONVOLUTION_PARAMETERS_HPP
diff --git a/src/cpu/kernels/assembly/gemm_common.hpp b/src/cpu/kernels/assembly/gemm_common.hpp
index 45d1e43274..f693021fcb 100644
--- a/src/cpu/kernels/assembly/gemm_common.hpp
+++ b/src/cpu/kernels/assembly/gemm_common.hpp
@@ -189,7 +189,7 @@ public:
* 'set_arrays' to capture the provided arguments in protected class
* members, as essentially any implementation will need these.
*/
-template <typename To, typename Tr>
+template <typename To, typename Tw, typename Tr>
class GemmCommon : public IGemmCommon
{
protected:
@@ -197,7 +197,7 @@ protected:
int _lda = 0;
int _A_batch_stride = 0;
int _A_multi_stride = 0;
- const To *_Bptr = nullptr;
+ const Tw *_Bptr = nullptr;
int _ldb = 0;
int _B_multi_stride = 0;
Tr *_Cptr = nullptr;
@@ -214,7 +214,7 @@ public:
const int lda,
const int A_batch_stride,
const int A_multi_stride,
- const To *B,
+ const Tw *B,
const int ldb,
/* batches share B */ const int B_multi_stride,
Tr *C,
@@ -254,7 +254,7 @@ public:
const void *bias,
/* no row or batch stride needed */ const int bias_multi_stride) override
{
- set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride, static_cast<const To *>(B), ldb,
+ set_arrays(static_cast<const To *>(A), lda, A_batch_stride, A_multi_stride, static_cast<const Tw *>(B), ldb,
B_multi_stride, static_cast<Tr *>(C), ldc, C_batch_stride, C_multi_stride,
static_cast<const Tr *>(bias), bias_multi_stride);
}
@@ -262,17 +262,17 @@ public:
/*** "Pretransposed" interface ***/
/* Compute col sums over all columns */
- virtual void requantize_bias(void *, const To *, const int, const int){};
+ virtual void requantize_bias(void *, const Tw *, const int, const int){};
/* Perform pretranspose - the void * passed in must remain allocated for the duration of any execute calls. */
/* Arguments are: output buffer pointer, source pointer, source row stride, source multi stride */
- virtual void pretranspose_B_array(void *, const To *, const int, const int, bool){};
+ virtual void pretranspose_B_array(void *, const Tw *, const int, const int, bool){};
/* Implementation of the void * overload which casts its arguments to the appropriate type. */
void pretranspose_B_array_generic(
void *out, const void *in, const int row_stride, const int multi_stride, bool transposed) override
{
- pretranspose_B_array(out, static_cast<const To *>(in), row_stride, multi_stride, transposed);
+ pretranspose_B_array(out, static_cast<const Tw *>(in), row_stride, multi_stride, transposed);
}
/* Threaded versions of the above.
@@ -280,7 +280,7 @@ public:
* just calls the non-threaded functions to do the work. This is valid as with window size of 1 the only
* legal values for start and end are 0 and 1 respectively. */
virtual void pretranspose_B_array_part(
- void *out, const To *in, const int row_stride, const int multi_stride, bool transposed, size_t, size_t)
+ void *out, const Tw *in, const int row_stride, const int multi_stride, bool transposed, size_t, size_t)
{
pretranspose_B_array(out, in, row_stride, multi_stride, transposed);
};
@@ -293,7 +293,7 @@ public:
size_t start,
size_t end) override
{
- pretranspose_B_array_part(out, static_cast<const To *>(in), row_stride, multi_stride, transposed, start, end);
+ pretranspose_B_array_part(out, static_cast<const Tw *>(in), row_stride, multi_stride, transposed, start, end);
}
/*** Indirect interface ***/
diff --git a/src/cpu/kernels/dequantize/generic/neon/fp16.cpp b/src/cpu/kernels/dequantize/generic/neon/fp16.cpp
new file mode 100644
index 0000000000..caffdf53e1
--- /dev/null
+++ b/src/cpu/kernels/dequantize/generic/neon/fp16.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
+#include "src/cpu/kernels/dequantize/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void fp16_run_dequantization_core(const ITensor *input, ITensor *output, const Window &window)
+{
+ run_dequantization_core<float16_t>(input, output, window);
+}
+} // namespace cpu
+} // namespace arm_compute
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/dequantize/generic/neon/fp32.cpp b/src/cpu/kernels/dequantize/generic/neon/fp32.cpp
new file mode 100644
index 0000000000..58e987b450
--- /dev/null
+++ b/src/cpu/kernels/dequantize/generic/neon/fp32.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/kernels/dequantize/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void fp32_run_dequantization_core(const ITensor *input, ITensor *output, const Window &window)
+{
+ run_dequantization_core<float>(input, output, window);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/dequantize/generic/neon/impl.h b/src/cpu/kernels/dequantize/generic/neon/impl.h
new file mode 100644
index 0000000000..7197d4dff6
--- /dev/null
+++ b/src/cpu/kernels/dequantize/generic/neon/impl.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_DEQUANTIZE_GENERIC_NEON_IMPL_H
+#define ACL_SRC_CPU_KERNELS_DEQUANTIZE_GENERIC_NEON_IMPL_H
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/Window.h"
+
+#include "src/core/NEON/NEAsymm.h"
+#include "src/core/NEON/NESymm.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/cpu/kernels/dequantize/generic/neon/list.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+template <typename T>
+inline void store_result(T *ptr, const float32x4x4_t &v)
+{
+ ARM_COMPUTE_UNUSED(ptr, v);
+}
+
+template <>
+inline void store_result<float>(float *ptr, const float32x4x4_t &v)
+{
+ wrapper::vstore(ptr, v.val[0]);
+ wrapper::vstore(ptr + 4, v.val[1]);
+ wrapper::vstore(ptr + 8, v.val[2]);
+ wrapper::vstore(ptr + 12, v.val[3]);
+}
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template <>
+inline void store_result<float16_t>(float16_t *ptr, const float32x4x4_t &v)
+{
+ wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
+ wrapper::vstore(ptr + 8, vcombine_f16(vcvt_f16_f32(v.val[2]), vcvt_f16_f32(v.val[3])));
+}
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+template <typename T>
+inline void store_result(T *ptr, const float32x4x2_t &v)
+{
+ ARM_COMPUTE_UNUSED(ptr, v);
+}
+
+template <>
+inline void store_result<float>(float *ptr, const float32x4x2_t &v)
+{
+ wrapper::vstore(ptr, v.val[0]);
+ wrapper::vstore(ptr + 4, v.val[1]);
+}
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template <>
+inline void store_result<float16_t>(float16_t *ptr, const float32x4x2_t &v)
+{
+ wrapper::vstore(ptr, vcombine_f16(vcvt_f16_f32(v.val[0]), vcvt_f16_f32(v.val[1])));
+}
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+
+template <typename TOut, typename TIn>
+void run_dequantization_qasymm8(const ITensor *input, ITensor *output, const Window &window)
+{
+ const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
+ const float scale = qinfo.scale;
+ const int32_t offset = qinfo.offset;
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Create iterators
+ Iterator in(input, win_collapsed);
+ Iterator out(output, win_collapsed);
+
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ const auto in_ptr = reinterpret_cast<const TIn *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<TOut *>(out.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vin = wrapper::vloadq(in_ptr + x);
+ const auto vdeq = vdequantize(vin, scale, offset);
+
+ store_result(reinterpret_cast<TOut *>(out_ptr + x), vdeq);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ auto val = *(in_ptr + x);
+ *(out_ptr + x) = static_cast<TOut>(Qasymm8QuantizationHelper<TIn>::dequantize(val, qinfo));
+ }
+ },
+ in, out);
+}
+
+template <typename T>
+void run_dequantization_qsymm8_per_channel_nchw(const ITensor *input, ITensor *output, const Window &window)
+{
+ const auto scale = input->info()->quantization_info().scale();
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Reset first dimension to handle tail calculations manually
+ Window win(window);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Create iterators
+ Iterator in(input, win);
+ Iterator out(output, win);
+
+ execute_window_loop(
+ win,
+ [&](const Coordinates &id)
+ {
+ const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<T *>(out.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vin = wrapper::vloadq(in_ptr + x);
+ const auto vdeq = vdequantize(vin, scale[id.z()]);
+
+ store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ int8_t val = *(in_ptr + x);
+ *(out_ptr + x) = static_cast<T>(dequantize(val, scale[id.z()]));
+ }
+ },
+ in, out);
+}
+
+template <typename T>
+void run_dequantization_qsymm8_per_channel_nhwc(const ITensor *input, ITensor *output, const Window &window)
+{
+ const auto scale = input->info()->quantization_info().scale();
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Reset first dimension to handle tail calculations manually
+ Window win(window);
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Create iterators
+ Iterator in(input, win);
+ Iterator out(output, win);
+
+ execute_window_loop(
+ win,
+ [&](const Coordinates &)
+ {
+ const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<T *>(out.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const float32x4x4_t vscale = {{scale[x + 0], scale[x + 1], scale[x + 2], scale[x + 3], scale[x + 4],
+ scale[x + 5], scale[x + 6], scale[x + 7], scale[x + 8], scale[x + 9],
+ scale[x + 10], scale[x + 11], scale[x + 12], scale[x + 13],
+ scale[x + 14], scale[x + 15]}};
+ const auto vin = wrapper::vloadq(in_ptr + x);
+ const auto vdeq = vdequantize(vin, vscale);
+
+ store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ int8_t val = *(in_ptr + x);
+ *(out_ptr + x) = static_cast<T>(dequantize(val, scale[x]));
+ }
+ },
+ in, out);
+}
+
+template <typename T>
+void run_dequantization_qsymm8(const ITensor *input, ITensor *output, const Window &window)
+{
+ const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
+ const float scale = qinfo.scale;
+
+ const int window_step_x = 16;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Create iterators
+ Iterator in(input, win_collapsed);
+ Iterator out(output, win_collapsed);
+
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ const auto in_ptr = reinterpret_cast<const int8_t *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<T *>(out.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vin = wrapper::vloadq(in_ptr + x);
+ const auto vdeq = vdequantize(vin, scale);
+
+ store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ int8_t val = *(in_ptr + x);
+ *(out_ptr + x) = static_cast<T>(dequantize(val, scale));
+ }
+ },
+ in, out);
+}
+
+template <typename T>
+void run_dequantization_qsymm16(const ITensor *input, ITensor *output, const Window &window)
+{
+ const UniformQuantizationInfo &qinfo = input->info()->quantization_info().uniform();
+ const float scale = qinfo.scale;
+
+ const int window_step_x = 8;
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Create iterators
+ Iterator in(input, win_collapsed);
+ Iterator out(output, win_collapsed);
+
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ const auto in_ptr = reinterpret_cast<const int16_t *>(in.ptr());
+ const auto out_ptr = reinterpret_cast<T *>(out.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vin = wrapper::vloadq(in_ptr + x);
+ const auto vdeq = vdequantize_int16(vin, scale);
+
+ store_result<T>(reinterpret_cast<T *>(out_ptr + x), vdeq);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ int16_t val = *(in_ptr + x);
+ *(out_ptr + x) = static_cast<T>(dequantize_qsymm16(val, scale));
+ }
+ },
+ in, out);
+}
+
+template <typename T>
+void run_dequantization_core(const ITensor *input, ITensor *output, const Window &window)
+{
+ switch (input->info()->data_type())
+ {
+ case DataType::QASYMM8:
+ run_dequantization_qasymm8<T, uint8_t>(input, output, window);
+ break;
+ case DataType::QASYMM8_SIGNED:
+ run_dequantization_qasymm8<T, int8_t>(input, output, window);
+ break;
+ case DataType::QSYMM8_PER_CHANNEL:
+ input->info()->data_layout() == DataLayout::NHWC
+ ? run_dequantization_qsymm8_per_channel_nhwc<T>(input, output, window)
+ : run_dequantization_qsymm8_per_channel_nchw<T>(input, output, window);
+ break;
+ case DataType::QSYMM8:
+ run_dequantization_qsymm8<T>(input, output, window);
+ break;
+ case DataType::QSYMM16:
+ run_dequantization_qsymm16<T>(input, output, window);
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type.");
+ }
+}
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ACL_SRC_CPU_KERNELS_DEQUANTIZE_GENERIC_NEON_IMPL_H
diff --git a/src/cpu/kernels/dequantize/generic/neon/list.h b/src/cpu/kernels/dequantize/generic/neon/list.h
new file mode 100644
index 0000000000..678eb2c01a
--- /dev/null
+++ b/src/cpu/kernels/dequantize/generic/neon/list.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_DEQUANTIZE_GENERIC_NEON_LIST_H
+#define ACL_SRC_CPU_KERNELS_DEQUANTIZE_GENERIC_NEON_LIST_H
+
+#include "arm_compute/core/Helpers.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#define DECLARE_DEQUANTIZE_KERNEL(func_name) void func_name(const ITensor *input, ITensor *output, const Window &window)
+
+DECLARE_DEQUANTIZE_KERNEL(fp32_run_dequantization_core);
+DECLARE_DEQUANTIZE_KERNEL(fp16_run_dequantization_core);
+
+#undef DECLARE_DEQUANTIZE_KERNEL
+
+} // namespace cpu
+} // namespace arm_compute
+#endif // ACL_SRC_CPU_KERNELS_DEQUANTIZE_GENERIC_NEON_LIST_H
diff --git a/src/cpu/kernels/gemm_matrix_mul/generic/neon/fp16.cpp b/src/cpu/kernels/gemm_matrix_mul/generic/neon/fp16.cpp
index 60fda511e3..6a93be0618 100644
--- a/src/cpu/kernels/gemm_matrix_mul/generic/neon/fp16.cpp
+++ b/src/cpu/kernels/gemm_matrix_mul/generic/neon/fp16.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Arm Limited.
+ * Copyright (c) 2022-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -81,7 +81,7 @@ void vector_matrix_multiply_f16(
// window_end_x is computed above which may cause out-of-bound writes to the dst.
for (; x < (window_end_x - window_step_x); x += window_step_x)
{
- if (x > width_matrix_b)
+ if (x >= width_matrix_b)
{
return;
}
@@ -176,7 +176,7 @@ void vector_matrix_multiply_f16(
for (; x < window_end_x; ++x)
{
- if (x > width_matrix_b)
+ if (x >= width_matrix_b)
{
return;
}
diff --git a/src/cpu/kernels/quantize/generic/neon/fp16.cpp b/src/cpu/kernels/quantize/generic/neon/fp16.cpp
new file mode 100644
index 0000000000..37bfb5b2aa
--- /dev/null
+++ b/src/cpu/kernels/quantize/generic/neon/fp16.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
+#include "src/cpu/kernels/quantize/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void fp16_u8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<float16_t, uint8_t>(src, dst, window);
+}
+void fp16_i8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<float16_t, int8_t>(src, dst, window);
+}
+void fp16_run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm16<float16_t>(src, dst, window);
+}
+} // namespace cpu
+} // namespace arm_compute
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/quantize/generic/neon/fp32.cpp b/src/cpu/kernels/quantize/generic/neon/fp32.cpp
new file mode 100644
index 0000000000..0cba332fd6
--- /dev/null
+++ b/src/cpu/kernels/quantize/generic/neon/fp32.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/kernels/quantize/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void fp32_u8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<float, uint8_t>(src, dst, window);
+}
+void fp32_i8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<float, int8_t>(src, dst, window);
+}
+void fp32_run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm16<float>(src, dst, window);
+}
+
+void fp32_i8_run_quantize_qsymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qsymm8<float, int8_t>(src, dst, window);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/quantize/generic/neon/impl.h b/src/cpu/kernels/quantize/generic/neon/impl.h
new file mode 100644
index 0000000000..9954a7645e
--- /dev/null
+++ b/src/cpu/kernels/quantize/generic/neon/impl.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_QUANTIZE_GENERIC_NEON_IMPL_H
+#define ACL_SRC_CPU_KERNELS_QUANTIZE_GENERIC_NEON_IMPL_H
+
+#include "arm_compute/core/Helpers.h"
+
+#include "src/core/helpers/WindowHelpers.h"
+#include "src/core/NEON/NEAsymm.h"
+#include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+constexpr auto window_step = 16;
+
+template <typename T>
+inline float32x4x4_t load_value(const T *input_ptr)
+{
+ using Tx16_t = typename wrapper::traits::neon_vector<T, 16>::type;
+ return arm_compute::convert_to_float32x4x4<Tx16_t>(wrapper::vloadq(input_ptr));
+}
+
+template <>
+inline float32x4x4_t load_value(const float *input_ptr)
+{
+ return {wrapper::vloadq(input_ptr), wrapper::vloadq(input_ptr + 4), wrapper::vloadq(input_ptr + 8),
+ wrapper::vloadq(input_ptr + 12)};
+}
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template <>
+inline float32x4x4_t load_value(const float16_t *input_ptr)
+{
+ return {vcvt_f32_f16(wrapper::vload(input_ptr)), vcvt_f32_f16(wrapper::vload(input_ptr + 4)),
+ vcvt_f32_f16(wrapper::vload(input_ptr + 8)), vcvt_f32_f16(wrapper::vload(input_ptr + 12))};
+}
+
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+template <typename element_type>
+using vector_type = wrapper::traits::neon_vector_t<element_type, window_step>;
+
+template <typename quantized_type>
+inline vector_type<quantized_type> vquantize_qasymm8(const float32x4x4_t &qv, const UniformQuantizationInfo &qi);
+
+template <>
+inline vector_type<uint8_t> vquantize_qasymm8<uint8_t>(const float32x4x4_t &qv, const UniformQuantizationInfo &qi)
+{
+ return vquantize(qv, qi);
+}
+
+template <>
+inline vector_type<int8_t> vquantize_qasymm8<int8_t>(const float32x4x4_t &qv, const UniformQuantizationInfo &qi)
+{
+ return vquantize_signed(qv, qi);
+}
+
+template <typename TOut, typename = typename std::enable_if<std::is_signed<TOut>::value, bool>::type>
+inline int8x16_t recombine_8_16(int16x8_t lower, int16x8_t upper)
+{
+ return wrapper::vcombine(wrapper::vqmovn(lower), wrapper::vqmovn(upper));
+}
+
+template <typename TOut, typename = typename std::enable_if<std::is_unsigned<TOut>::value, bool>::type>
+inline uint8x16_t recombine_8_16(int16x8_t lower, int16x8_t upper)
+{
+ return wrapper::vcombine(wrapper::vqmovun(lower), wrapper::vqmovun(upper));
+}
+
+template <typename TIn, typename TOut>
+void run_quantize_qsymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
+ UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
+ uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(src, win_collapsed);
+ Iterator output(dst, win_collapsed);
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
+ auto output_ptr = reinterpret_cast<TOut *>(output.ptr());
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step); x += window_step)
+ {
+ wrapper::vstore(&output_ptr[x], vquantize_qasymm8<TOut>(load_value(&input_ptr[x]), uqinfo));
+ }
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ output_ptr[x] = quantize_qsymm8(input_ptr[x], dst->info()->quantization_info());
+ }
+ },
+ input, output);
+}
+
+template <typename TIn, typename TOut>
+void run_requantize_offset_only_convert(const ITensor *src, ITensor *dst, const Window &window)
+{
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ // Calculate output offset difference.
+ const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
+ UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
+ uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Duplicate offset in signed vector format
+ const int8x16_t offset = wrapper::vdup_n(static_cast<int8_t>(uqinfo.offset), wrapper::traits::vector_128_tag{});
+
+ Iterator input(src, win_collapsed);
+ Iterator output(dst, win_collapsed);
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
+ auto output_ptr = reinterpret_cast<TOut *>(output.ptr());
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step); x += window_step)
+ {
+ const wrapper::traits::neon_vector_t<TIn, window_step> qv =
+ wrapper::vloadq(input_ptr + x); // load 128 bit vector of 8 bit datatype
+
+ // Signed addition.
+ auto res = vaddq_s8(reinterpret_cast<int8x16_t>(qv), offset);
+
+ // Output is dependent on datatype.
+ wrapper::vstore(&output_ptr[x],
+ reinterpret_cast<wrapper::traits::neon_vector_t<TOut, window_step>>(res));
+ }
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ auto result = uqinfo.offset + static_cast<int32_t>(input_ptr[x]);
+ output_ptr[x] = static_cast<TOut>(result);
+ }
+ },
+ input, output);
+}
+
+template <typename TIn, typename TOut>
+void run_requantize_offset_only(const ITensor *src, ITensor *dst, const Window &window)
+{
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
+ UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
+ uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ // Duplicate offset in signed vector format
+ const int16x8_t offset = wrapper::vdup_n(static_cast<int16_t>(uqinfo.offset), wrapper::traits::vector_128_tag{});
+
+ const int32_t low_bound = (dst->info()->data_type() == DataType::QASYMM8) ? 0 : -128;
+ const int32_t upper_bound = (dst->info()->data_type() == DataType::QASYMM8) ? 255 : 127;
+
+ Iterator input(src, win_collapsed);
+ Iterator output(dst, win_collapsed);
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
+ TOut *output_ptr = reinterpret_cast<TOut *>(output.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step); x += window_step)
+ {
+ const auto qv = wrapper::vloadq(input_ptr + x); // load 128 bit vector of 8 bit datatype
+ int16x8_t lower = reinterpret_cast<int16x8_t>(wrapper::vmovl(wrapper::vgetlow(qv)));
+ int16x8_t upper = reinterpret_cast<int16x8_t>(wrapper::vmovl(wrapper::vgethigh(qv)));
+
+ // Signed addition.
+ lower = wrapper::vqadd(lower, offset);
+ upper = wrapper::vqadd(upper, offset);
+
+ // Output is dependent on datatype.
+ auto res = recombine_8_16<TOut>(lower, upper);
+ wrapper::vstore(&output_ptr[x], res);
+ }
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ // Add offset and clamp result to within the range of the output datatype.
+ int32_t result = uqinfo.offset + static_cast<int32_t>(input_ptr[x]);
+ result = utility::clamp<int32_t>(result, low_bound, upper_bound);
+
+ // Cast result to output datatype.
+ output_ptr[x] = static_cast<TOut>(result);
+ }
+ },
+ input, output);
+}
+
+template <typename TIn, typename TOut>
+void run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
+ UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
+ if (is_data_type_quantized_asymmetric(src->info()->data_type()))
+ {
+ uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
+ }
+#ifdef __aarch64__
+ constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_EVEN;
+#else //__aarch64__
+ constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO;
+#endif //__aarch64__
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(src, win_collapsed);
+ Iterator output(dst, win_collapsed);
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ auto input_ptr = reinterpret_cast<const TIn *>(input.ptr());
+ auto output_ptr = reinterpret_cast<TOut *>(output.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step); x += window_step)
+ {
+ wrapper::vstore(&output_ptr[x], vquantize_qasymm8<TOut>(load_value(&input_ptr[x]), uqinfo));
+ }
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ output_ptr[x] = Qasymm8QuantizationHelper<TOut>::quantize(input_ptr[x], uqinfo, rounding_policy);
+ }
+ },
+ input, output);
+}
+
+template <typename T>
+void run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
+{
+ const auto window_start_x = static_cast<int>(window.x().start());
+ const auto window_end_x = static_cast<int>(window.x().end());
+
+ const UniformQuantizationInfo uqinfo_in = src->info()->quantization_info().uniform();
+ UniformQuantizationInfo uqinfo = dst->info()->quantization_info().uniform();
+ if (is_data_type_quantized_asymmetric(src->info()->data_type()))
+ {
+ uqinfo = compute_requantization_scale_offset(uqinfo_in, uqinfo);
+ }
+#ifdef __aarch64__
+ constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_EVEN;
+#else //__aarch64__
+ constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO;
+#endif //__aarch64__
+
+ // Collapse window and reset first dimension to handle tail calculations manually
+ Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
+ win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(src, win_collapsed);
+ Iterator output(dst, win_collapsed);
+ execute_window_loop(
+ win_collapsed,
+ [&](const Coordinates &)
+ {
+ auto input_ptr = reinterpret_cast<const T *>(input.ptr());
+ auto output_ptr = reinterpret_cast<uint16_t *>(output.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step); x += window_step)
+ {
+ uint16x8x2_t tmp = vquantize_qasymm16(load_value(&input_ptr[x]), uqinfo);
+ vst1q_u16(&output_ptr[x], tmp.val[0]);
+ vst1q_u16(&output_ptr[x + 8], tmp.val[1]);
+ }
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ output_ptr[x] = quantize_qasymm16(input_ptr[x], uqinfo, rounding_policy);
+ }
+ },
+ input, output);
+}
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ACL_SRC_CPU_KERNELS_QUANTIZE_GENERIC_NEON_IMPL_H
diff --git a/src/cpu/kernels/quantize/generic/neon/integer.cpp b/src/cpu/kernels/quantize/generic/neon/integer.cpp
new file mode 100644
index 0000000000..4e39afaaee
--- /dev/null
+++ b/src/cpu/kernels/quantize/generic/neon/integer.cpp
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/kernels/quantize/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void u8_u8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<uint8_t, uint8_t>(src, dst, window);
+}
+void u8_i8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<uint8_t, int8_t>(src, dst, window);
+}
+void i8_u8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<int8_t, uint8_t>(src, dst, window);
+}
+void i8_i8_run_quantize_qasymm8(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm8<int8_t, int8_t>(src, dst, window);
+}
+
+void u8_run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm16<uint8_t>(src, dst, window);
+}
+void i8_run_quantize_qasymm16(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_quantize_qasymm16<int8_t>(src, dst, window);
+}
+
+void u8_u8_run_requantize_offset_only(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_requantize_offset_only<uint8_t, uint8_t>(src, dst, window);
+}
+void u8_i8_run_requantize_offset_only(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_requantize_offset_only<uint8_t, int8_t>(src, dst, window);
+}
+void i8_u8_run_requantize_offset_only(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_requantize_offset_only<int8_t, uint8_t>(src, dst, window);
+}
+void i8_i8_run_requantize_offset_only(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_requantize_offset_only<int8_t, int8_t>(src, dst, window);
+}
+
+void i8_u8_run_requantize_offset_only_convert(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_requantize_offset_only_convert<int8_t, uint8_t>(src, dst, window);
+}
+void u8_i8_run_requantize_offset_only_convert(const ITensor *src, ITensor *dst, const Window &window)
+{
+ run_requantize_offset_only_convert<uint8_t, int8_t>(src, dst, window);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/quantize/generic/neon/list.h b/src/cpu/kernels/quantize/generic/neon/list.h
new file mode 100644
index 0000000000..c4fb1048eb
--- /dev/null
+++ b/src/cpu/kernels/quantize/generic/neon/list.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_QUANTIZE_GENERIC_NEON_LIST_H
+#define ACL_SRC_CPU_KERNELS_QUANTIZE_GENERIC_NEON_LIST_H
+
+#include "arm_compute/core/Helpers.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#define DECLARE_QUANTIZE_KERNEL(func_name) void func_name(const ITensor *src, ITensor *dst, const Window &window)
+
+DECLARE_QUANTIZE_KERNEL(u8_u8_run_quantize_qasymm8);
+DECLARE_QUANTIZE_KERNEL(u8_i8_run_quantize_qasymm8);
+DECLARE_QUANTIZE_KERNEL(i8_u8_run_quantize_qasymm8);
+DECLARE_QUANTIZE_KERNEL(i8_i8_run_quantize_qasymm8);
+
+DECLARE_QUANTIZE_KERNEL(u8_u8_run_requantize_offset_only);
+DECLARE_QUANTIZE_KERNEL(u8_i8_run_requantize_offset_only);
+DECLARE_QUANTIZE_KERNEL(i8_u8_run_requantize_offset_only);
+DECLARE_QUANTIZE_KERNEL(i8_i8_run_requantize_offset_only);
+
+DECLARE_QUANTIZE_KERNEL(i8_u8_run_requantize_offset_only_convert);
+DECLARE_QUANTIZE_KERNEL(u8_i8_run_requantize_offset_only_convert);
+
+DECLARE_QUANTIZE_KERNEL(u8_run_quantize_qasymm16);
+DECLARE_QUANTIZE_KERNEL(i8_run_quantize_qasymm16);
+
+DECLARE_QUANTIZE_KERNEL(fp32_u8_run_quantize_qasymm8);
+DECLARE_QUANTIZE_KERNEL(fp32_i8_run_quantize_qasymm8);
+DECLARE_QUANTIZE_KERNEL(fp32_run_quantize_qasymm16);
+
+DECLARE_QUANTIZE_KERNEL(fp32_i8_run_quantize_qsymm8);
+
+DECLARE_QUANTIZE_KERNEL(fp16_u8_run_quantize_qasymm8);
+DECLARE_QUANTIZE_KERNEL(fp16_i8_run_quantize_qasymm8);
+DECLARE_QUANTIZE_KERNEL(fp16_run_quantize_qasymm16);
+
+#undef DECLARE_QUANTIZE_KERNEL
+
+} // namespace cpu
+} // namespace arm_compute
+#endif // ACL_SRC_CPU_KERNELS_QUANTIZE_GENERIC_NEON_LIST_H
diff --git a/src/cpu/kernels/reduction_layer/generic/neon/fp16.cpp b/src/cpu/kernels/reduction_layer/generic/neon/fp16.cpp
new file mode 100644
index 0000000000..143bb5487f
--- /dev/null
+++ b/src/cpu/kernels/reduction_layer/generic/neon/fp16.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
+
+#include "src/cpu/kernels/reduction_layer/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void reduce_RedOpX_reduceX_float16_8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpX<float16_t, 8>>::reduceX(window, input, output, RedOpX<float16_t, 8>(), op);
+}
+
+void reduce_RedOpYZW_reduceY_float16_8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<float16_t, 8>>::reduceY(window, input, output, RedOpYZW<float16_t, 8>(), op);
+}
+
+void reduce_RedOpYZW_reduceZ_float16_8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<float16_t, 8>>::reduceZ(window, input, output, RedOpYZW<float16_t, 8>(), op);
+}
+
+void reduce_RedOpYZW_reduceW_float16_8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<float16_t, 8>>::reduceW(window, input, output, RedOpYZW<float16_t, 8>(), op);
+}
+} // namespace cpu
+} // namespace arm_compute
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
diff --git a/src/cpu/kernels/reduction_layer/generic/neon/fp32.cpp b/src/cpu/kernels/reduction_layer/generic/neon/fp32.cpp
new file mode 100644
index 0000000000..6f5f13e571
--- /dev/null
+++ b/src/cpu/kernels/reduction_layer/generic/neon/fp32.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/reduction_layer/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void reduce_RedOpYZW_complex_reduceZ_float32_4_2_SUM(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ Reducer<RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>>::reduceZ(
+ window, input, output, RedOpYZW_complex<float, 4, 2, ReductionOperation::SUM>(), op);
+}
+
+void reduce_RedOpX_reduceX_float32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpX<float, 4>>::reduceX(window, input, output, RedOpX<float, 4>(), op);
+}
+
+void reduce_RedOpYZW_reduceY_float32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<float, 4>>::reduceY(window, input, output, RedOpYZW<float, 4>(), op);
+}
+
+void reduce_RedOpYZW_reduceZ_float32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<float, 4>>::reduceZ(window, input, output, RedOpYZW<float, 4>(), op);
+}
+
+void reduce_RedOpYZW_reduceW_float32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<float, 4>>::reduceW(window, input, output, RedOpYZW<float, 4>(), op);
+}
+
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/reduction_layer/generic/neon/impl.h b/src/cpu/kernels/reduction_layer/generic/neon/impl.h
new file mode 100644
index 0000000000..3fa821d3a4
--- /dev/null
+++ b/src/cpu/kernels/reduction_layer/generic/neon/impl.h
@@ -0,0 +1,1633 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_REDUCTION_LAYER_GENERIC_NEON_IMPL_H
+#define ACL_SRC_CPU_KERNELS_REDUCTION_LAYER_GENERIC_NEON_IMPL_H
+
+#include "arm_compute/core/Coordinates.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+
+#include "src/core/NEON/NEMath.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "support/SaturateCast.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+// Helper function that calls vqmovun/vqmvn, vcombine and vstore, allows templating of RedOpYZW_quantized
+template <typename T>
+void combine_and_store(int16x8_t t1, int16x8_t t2, Iterator &output, int offset = 0)
+{
+ if (std::is_same<T, uint8_t>::value)
+ {
+ auto res = wrapper::vcombine(wrapper::vqmovun(t1), wrapper::vqmovun(t2));
+ wrapper::vstore(output.ptr() + offset, res);
+ }
+ else
+ {
+ auto res = wrapper::vcombine(wrapper::vqmovn(t1), wrapper::vqmovn(t2));
+ wrapper::vstore(reinterpret_cast<int8_t *>(output.ptr() + offset), res);
+ }
+}
+
+template <typename T>
+uint32x4x4_t calculate_index(uint32_t idx, T a, T b, uint32x4x4_t c, ReductionOperation op, int axis)
+{
+ uint32x4_t mask{0};
+ if (op == ReductionOperation::ARG_IDX_MIN)
+ {
+ mask = wrapper::vcgt(b, a);
+ }
+ else
+ {
+ mask = wrapper::vclt(b, a);
+ }
+
+ uint32x4_t vec_idx = {idx, idx + 1, idx + 2, idx + 3};
+ if (axis != 0)
+ {
+ vec_idx = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
+ }
+ uint32x4x4_t res = {{wrapper::vbsl(mask, vec_idx, c.val[0]), 0, 0, 0}};
+
+ return res;
+}
+
+template <typename T>
+uint32x4x4_t calculate_index_quantized(uint32_t idx, T a, T b, uint32x4x4_t c, ReductionOperation op, int axis)
+{
+ uint32x4x4_t mask{{0}};
+ uint8x16_t mask_u8{0};
+ if (op == ReductionOperation::ARG_IDX_MIN)
+ {
+ mask_u8 = wrapper::vcgt(b, a);
+ }
+ else
+ {
+ mask_u8 = wrapper::vclt(b, a);
+ }
+ auto wide_u16_1 =
+ wrapper::vorr(vshll_n_u8(wrapper::vgetlow(mask_u8), 8), wrapper::vmovl(wrapper::vgetlow(mask_u8)));
+ auto wide_u16_2 =
+ wrapper::vorr(vshll_n_u8(wrapper::vgethigh(mask_u8), 8), wrapper::vmovl(wrapper::vgethigh(mask_u8)));
+ mask.val[0] =
+ wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_1), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_1)));
+ mask.val[1] =
+ wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_1), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_1)));
+ mask.val[2] =
+ wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_2), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_2)));
+ mask.val[3] =
+ wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_2), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_2)));
+
+ uint32x4x4_t vec_idx = {{{idx + 0, idx + 1, idx + 2, idx + 3},
+ {idx + 4, idx + 5, idx + 6, idx + 7},
+ {idx + 8, idx + 9, idx + 10, idx + 11},
+ {idx + 12, idx + 13, idx + 14, idx + 15}}};
+ if (axis != 0)
+ {
+ vec_idx.val[0] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
+ vec_idx.val[1] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
+ vec_idx.val[2] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
+ vec_idx.val[3] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
+ }
+ uint32x4x4_t res = {
+ {vbslq_u32(mask.val[0], vec_idx.val[0], c.val[0]), vbslq_u32(mask.val[1], vec_idx.val[1], c.val[1]),
+ vbslq_u32(mask.val[2], vec_idx.val[2], c.val[2]), vbslq_u32(mask.val[3], vec_idx.val[3], c.val[3])}};
+
+ return res;
+}
+
+// Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
+template <typename T>
+inline typename std::enable_if<
+ std::is_same<T, float32x4_t>::value || std::is_same<T, int32x4_t>::value,
+ typename std::conditional<std::is_same<T, float32x4_t>::value, float32x2_t, int32x2_t>::type>::type
+calculate_min(T in)
+{
+ auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
+ return wrapper::vpmin(pmin, pmin);
+}
+
+// Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
+template <typename T>
+inline typename std::enable_if<
+ std::is_same<T, uint8x16_t>::value || std::is_same<T, int8x16_t>::value,
+ typename std::conditional<std::is_same<T, uint8x16_t>::value, uint8x8_t, int8x8_t>::type>::type
+calculate_min(T in)
+{
+ auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
+ pmin = wrapper::vpmin(pmin, pmin);
+ pmin = wrapper::vpmin(pmin, pmin);
+ return wrapper::vpmin(pmin, pmin);
+}
+
+// Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
+template <typename T>
+inline typename std::enable_if<
+ std::is_same<T, float32x4_t>::value || std::is_same<T, int32x4_t>::value,
+ typename std::conditional<std::is_same<T, float32x4_t>::value, float32x2_t, int32x2_t>::type>::type
+calculate_max(T in)
+{
+ auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
+ return wrapper::vpmax(pmax, pmax);
+}
+
+// Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
+template <typename T>
+inline typename std::enable_if<
+ std::is_same<T, uint8x16_t>::value || std::is_same<T, int8x16_t>::value,
+ typename std::conditional<std::is_same<T, uint8x16_t>::value, uint8x8_t, int8x8_t>::type>::type
+calculate_max(T in)
+{
+ auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
+ pmax = wrapper::vpmax(pmax, pmax);
+ pmax = wrapper::vpmax(pmax, pmax);
+ return wrapper::vpmax(pmax, pmax);
+}
+
+template <typename T>
+uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, T vec_res_value, ReductionOperation op)
+{
+ uint32x4_t res_idx_mask{0};
+ uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
+
+ if (op == ReductionOperation::ARG_IDX_MIN)
+ {
+ auto pmin = calculate_min(vec_res_value);
+ auto mask = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
+ res_idx_mask = wrapper::vand(vec_res_idx.val[0], mask);
+ }
+ else
+ {
+ auto pmax = calculate_max(vec_res_value);
+ auto mask = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
+ res_idx_mask = wrapper::vand(vec_res_idx.val[0], mask);
+ }
+
+ res_idx_mask = wrapper::vadd(res_idx_mask, mask_ones);
+ auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask), wrapper::vgetlow(res_idx_mask));
+ pmin = wrapper::vpmin(pmin, pmin);
+ uint32_t res = wrapper::vgetlane(pmin, 0);
+
+ return (res - 0xFFFFFFFF);
+}
+
+template <typename T>
+uint32_t calculate_vector_index_quantized(uint32x4x4_t vec_res_idx, T vec_res_value, ReductionOperation op)
+{
+ uint32x4x4_t res_idx_mask{{0}};
+ uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
+ uint8x16_t mask_u8{0};
+ if (op == ReductionOperation::ARG_IDX_MIN)
+ {
+ auto pmin = calculate_min(vec_res_value);
+ mask_u8 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
+ }
+ else
+ {
+ auto pmax = calculate_max(vec_res_value);
+ mask_u8 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
+ }
+
+ // Widen vectors
+ auto wide_u16_1 =
+ wrapper::vorr(vshll_n_u8(wrapper::vgetlow(mask_u8), 8), wrapper::vmovl(wrapper::vgetlow(mask_u8)));
+ auto wide_u16_2 =
+ wrapper::vorr(vshll_n_u8(wrapper::vgethigh(mask_u8), 8), wrapper::vmovl(wrapper::vgethigh(mask_u8)));
+ auto wide_u32_1 =
+ wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_1), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_1)));
+ auto wide_u32_2 =
+ wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_1), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_1)));
+ auto wide_u32_3 =
+ wrapper::vorr(vshll_n_u16(wrapper::vgetlow(wide_u16_2), 16), wrapper::vmovl(wrapper::vgetlow(wide_u16_2)));
+ auto wide_u32_4 =
+ wrapper::vorr(vshll_n_u16(wrapper::vgethigh(wide_u16_2), 16), wrapper::vmovl(wrapper::vgethigh(wide_u16_2)));
+ res_idx_mask.val[0] = wrapper::vand(vec_res_idx.val[0], wide_u32_1);
+ res_idx_mask.val[1] = wrapper::vand(vec_res_idx.val[1], wide_u32_2);
+ res_idx_mask.val[2] = wrapper::vand(vec_res_idx.val[2], wide_u32_3);
+ res_idx_mask.val[3] = wrapper::vand(vec_res_idx.val[3], wide_u32_4);
+ res_idx_mask.val[0] = wrapper::vadd(res_idx_mask.val[0], mask_ones);
+ res_idx_mask.val[1] = wrapper::vadd(res_idx_mask.val[1], mask_ones);
+ res_idx_mask.val[2] = wrapper::vadd(res_idx_mask.val[2], mask_ones);
+ res_idx_mask.val[3] = wrapper::vadd(res_idx_mask.val[3], mask_ones);
+
+ uint32_t res = 0xFFFFFFFF;
+ int iter = 0;
+ do
+ {
+ auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask.val[iter]), wrapper::vgetlow(res_idx_mask.val[iter]));
+ pmin = wrapper::vpmin(pmin, pmin);
+ res = std::min(wrapper::vgetlane(pmin, 0), res);
+ iter++;
+ } while (iter < 4);
+
+ return (res - 0xFFFFFFFF);
+}
+
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+template <>
+uint32x4x4_t inline calculate_index(
+ uint32_t idx, float16x8_t a, float16x8_t b, uint32x4x4_t c, ReductionOperation op, int axis)
+{
+ uint32x4x2_t mask{0};
+ uint16x8_t mask_u16{0};
+ if (op == ReductionOperation::ARG_IDX_MIN)
+ {
+ mask_u16 = wrapper::vcgt(b, a);
+ }
+ else
+ {
+ mask_u16 = wrapper::vclt(b, a);
+ }
+ mask.val[0] = wrapper::vmovl(wrapper::vgetlow(mask_u16));
+ mask.val[1] = wrapper::vmovl(wrapper::vgethigh(mask_u16));
+ uint32x4x2_t vec_idx = {{{idx + 0, idx + 1, idx + 2, idx + 3}, {idx + 4, idx + 5, idx + 6, idx + 7}}};
+ if (axis != 0)
+ {
+ vec_idx.val[0] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
+ vec_idx.val[1] = wrapper::vdup_n(idx, wrapper::traits::vector_128_tag{});
+ }
+ uint32x4x4_t res = {wrapper::vbsl(mask.val[0], vec_idx.val[0], c.val[0]),
+ wrapper::vbsl(mask.val[1], vec_idx.val[1], c.val[1]), 0, 0};
+
+ return res;
+}
+
+// Helper function to calculate the minimum value of the input vector. All the elements in the output vector contain the min value.
+inline float16x4_t calculate_min(float16x8_t in)
+{
+ auto pmin = wrapper::vpmin(wrapper::vgethigh(in), wrapper::vgetlow(in));
+ pmin = wrapper::vpmin(pmin, pmin);
+ return wrapper::vpmin(pmin, pmin);
+}
+// Helper function to calculate the maximum value of the input vector. All the elements in the output vector contain the max value.
+inline float16x4_t calculate_max(float16x8_t in)
+{
+ auto pmax = wrapper::vpmax(wrapper::vgethigh(in), wrapper::vgetlow(in));
+ pmax = wrapper::vpmax(pmax, pmax);
+ return wrapper::vpmax(pmax, pmax);
+}
+
+template <>
+inline uint32_t calculate_vector_index(uint32x4x4_t vec_res_idx, float16x8_t vec_res_value, ReductionOperation op)
+{
+ uint32x4x2_t res_idx_mask{0};
+ uint32x4_t mask_ones = vdupq_n_u32(0xFFFFFFFF);
+ uint16x8_t mask_u16;
+ if (op == ReductionOperation::ARG_IDX_MIN)
+ {
+ auto pmin = calculate_min(vec_res_value);
+ mask_u16 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmin, pmin));
+ }
+ else
+ {
+ auto pmax = calculate_max(vec_res_value);
+ mask_u16 = wrapper::vceq(vec_res_value, wrapper::vcombine(pmax, pmax));
+ }
+
+ // Widen vectors
+ auto wide_u32_1 =
+ wrapper::vorr(vshll_n_u16(wrapper::vgetlow(mask_u16), 8), wrapper::vmovl(wrapper::vgetlow(mask_u16)));
+ auto wide_u32_2 =
+ wrapper::vorr(vshll_n_u16(wrapper::vgethigh(mask_u16), 8), wrapper::vmovl(wrapper::vgethigh(mask_u16)));
+ res_idx_mask.val[0] = wrapper::vand(vec_res_idx.val[0], wide_u32_1);
+ res_idx_mask.val[1] = wrapper::vand(vec_res_idx.val[1], wide_u32_2);
+ res_idx_mask.val[0] = wrapper::vadd(res_idx_mask.val[0], mask_ones);
+ res_idx_mask.val[1] = wrapper::vadd(res_idx_mask.val[1], mask_ones);
+
+ uint32_t res = 0xFFFFFFFF;
+ uint32_t iter = 0;
+ do
+ {
+ auto pmin = wrapper::vpmin(wrapper::vgethigh(res_idx_mask.val[iter]), wrapper::vgetlow(res_idx_mask.val[iter]));
+ pmin = wrapper::vpmin(pmin, pmin);
+ res = std::min(wrapper::vgetlane(pmin, 0), res);
+ iter++;
+ } while (iter < 2);
+
+ return (res - 0xFFFFFFFF);
+}
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+template <class F>
+class Reducer
+{
+public:
+ static void reduceX(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
+ {
+ // Set out window
+ Window out_window(window);
+ out_window.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ f(window, out_window, input, output, op);
+ }
+ static void reduceY(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
+ {
+ // Set in window
+ Window in_window(window);
+ Window out_window(window);
+
+ in_window.set(Window::DimY, Window::Dimension(0, 1, 1));
+ out_window.set(Window::DimY, Window::Dimension(0, output->info()->dimension(1), output->info()->dimension(1)));
+
+ f(in_window, out_window, input, output, 1, op);
+ }
+ static void reduceZ(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
+ {
+ // Set in window
+ Window in_window(window);
+ Window out_window(window);
+
+ in_window.set(Window::DimZ, Window::Dimension(0, 1, 1));
+ out_window.set(Window::DimZ, Window::Dimension(0, output->info()->dimension(2), output->info()->dimension(2)));
+
+ f(in_window, out_window, input, output, 2, op);
+ }
+ static void reduceW(const Window &window, const ITensor *input, ITensor *output, F f, const ReductionOperation op)
+ {
+ // Set in/out window
+ Window in_window(window);
+ Window out_window(window);
+
+ in_window.set(3, Window::Dimension(0, 1, 1));
+ out_window.set(3, Window::Dimension(0, 1, 1));
+
+ f(in_window, out_window, input, output, 3, op);
+ }
+};
+
+template <typename T, int S>
+struct RedOpX
+{
+ /** SIMD vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+
+ inline void operator()(
+ const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, const ReductionOperation op)
+ {
+ const size_t input_dim_0 = in->info()->dimension(0);
+ const int window_step_x = 16 / sizeof(T);
+ const auto window_start_x = static_cast<int>(in_window.x().start());
+ const auto window_end_x = static_cast<int>(in_window.x().end());
+
+ Window in_win_no_pad = in_window;
+ in_win_no_pad.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(in, in_win_no_pad);
+ Iterator output(out, out_window);
+
+ execute_window_loop(
+ in_win_no_pad,
+ [&](const Coordinates &)
+ {
+ const auto input_ptr = reinterpret_cast<const T *>(input.ptr());
+
+ auto init_res_value = static_cast<T>(0.f);
+ switch (op)
+ {
+ case ReductionOperation::ARG_IDX_MAX:
+ case ReductionOperation::ARG_IDX_MIN:
+ case ReductionOperation::MIN:
+ case ReductionOperation::MAX:
+ {
+ init_res_value = static_cast<T>(*input_ptr);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ init_res_value = static_cast<T>(1.f);
+ break;
+ }
+ default:
+ break;
+ }
+ auto vec_res_value = wrapper::vdup_n(init_res_value, ExactTagType{});
+ uint32x4x4_t vec_res_idx{{0}};
+
+ // Compute window_step_x elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vec_elements = wrapper::vloadq(input_ptr + x);
+ switch (op)
+ {
+ case ReductionOperation::SUM_SQUARE:
+ vec_res_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_res_value);
+ break;
+ case ReductionOperation::MEAN_SUM:
+ case ReductionOperation::SUM:
+ vec_res_value = wrapper::vadd(vec_elements, vec_res_value);
+ break;
+ case ReductionOperation::PROD:
+ vec_res_value = wrapper::vmul(vec_elements, vec_res_value);
+ break;
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ vec_res_idx = calculate_index<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value,
+ vec_res_idx, op, 0);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ vec_res_idx = calculate_index<decltype(vec_res_value)>(x, temp_vec_res_value, vec_res_value,
+ vec_res_idx, op, 0);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ switch (op)
+ {
+ case ReductionOperation::SUM:
+ case ReductionOperation::MEAN_SUM:
+ case ReductionOperation::SUM_SQUARE:
+ {
+#ifdef ARM_COMPUTE_DEBUG_ENABLED
+ auto res = static_cast<T>(0.f);
+ for (int i = 0; i < S; ++i)
+ {
+ res += wrapper::vgetlane(vec_res_value, i);
+ }
+#else // ARM_COMPUTE_DEBUG_ENABLED
+ auto carry_res =
+ wrapper::vpadd(wrapper::vgethigh(vec_res_value), wrapper::vgetlow(vec_res_value));
+ for (int i = 0; i < S / 4; ++i)
+ {
+ carry_res = wrapper::vpadd(carry_res, carry_res);
+ }
+ auto res = wrapper::vgetlane(carry_res, 0);
+#endif // ARM_COMPUTE_DEBUG_ENABLED
+ if (op == ReductionOperation::SUM_SQUARE)
+ {
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res += (*(input_ptr + x)) * (*(input_ptr + x));
+ }
+ }
+ else
+ {
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res += *(input_ptr + x);
+ }
+ }
+
+ if (op == ReductionOperation::MEAN_SUM)
+ {
+ res /= input_dim_0;
+ }
+
+ *(reinterpret_cast<T *>(output.ptr())) = res;
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ auto carry_res =
+ wrapper::vmul(wrapper::vgethigh(vec_res_value), wrapper::vgetlow(vec_res_value));
+ T res = 1;
+ for (int i = 0; i < S / 2; ++i)
+ {
+ res *= wrapper::vgetlane(carry_res, i);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res *= *(input_ptr + x);
+ }
+
+ *(reinterpret_cast<T *>(output.ptr())) = res;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ auto idx = calculate_vector_index<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ if (*(input_ptr + x) < res)
+ {
+ idx = x;
+ res = *(input_ptr + x);
+ }
+ }
+ *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ auto idx = calculate_vector_index<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ if (*(input_ptr + x) > res)
+ {
+ idx = x;
+ res = *(input_ptr + x);
+ }
+ }
+ *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res = *(input_ptr + x) < res ? *(input_ptr + x) : res;
+ }
+ *(reinterpret_cast<T *>(output.ptr())) = res;
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res = *(input_ptr + x) > res ? *(input_ptr + x) : res;
+ }
+ *(reinterpret_cast<T *>(output.ptr())) = res;
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ },
+ input, output);
+ }
+};
+
+template <typename T>
+struct RedOpX_quantized
+{
+ inline void operator()(
+ const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, const ReductionOperation op)
+ {
+ using PromotedType = typename wrapper::traits::promote<typename wrapper::traits::promote<T>::type>::type;
+
+ const auto oq_info = out->info()->quantization_info().uniform();
+
+ const TensorInfo in_info = *(in->info());
+ const UniformQuantizationInfo iq_info = in_info.quantization_info().uniform();
+
+ const int window_step_x = 16 / sizeof(T);
+ const auto window_start_x = static_cast<int>(in_window.x().start());
+ const auto window_end_x = static_cast<int>(in_window.x().end());
+
+ Window in_win_no_pad = in_window;
+ in_win_no_pad.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ Iterator input(in, in_win_no_pad);
+ Iterator output(out, out_window);
+
+ const auto in_offset = static_cast<float>(iq_info.offset);
+ const float in_scale = iq_info.scale;
+
+ const auto out_offset = static_cast<float>(oq_info.offset);
+ const float out_scale = oq_info.scale;
+
+ const auto num_elements = static_cast<float>(in_info.dimension(0));
+
+ const float A = in_scale / (out_scale * num_elements);
+ const float B = out_offset - (in_scale * in_offset) / (out_scale);
+
+ execute_window_loop(
+ in_win_no_pad,
+ [&](const Coordinates &)
+ {
+ const auto input_ptr = reinterpret_cast<T *>(input.ptr());
+
+ auto vec_res_value1 =
+ wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
+ auto vec_res_value2 =
+ wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
+ auto vec_res_value3 =
+ wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
+ auto vec_res_value4 =
+ wrapper::vdup_n(static_cast<PromotedType>(0.f), wrapper::traits::vector_128_tag{});
+
+ auto vec_res_value1_f = vdupq_n_f32(static_cast<float>(1.f));
+ auto vec_res_value2_f = vdupq_n_f32(static_cast<float>(1.f));
+ auto vec_res_value3_f = vdupq_n_f32(static_cast<float>(1.f));
+ auto vec_res_value4_f = vdupq_n_f32(static_cast<float>(1.f));
+
+ typename wrapper::traits::neon_vector<T, 16>::type vec_res_value = {0};
+
+ if (op == ReductionOperation::ARG_IDX_MAX || op == ReductionOperation::ARG_IDX_MIN ||
+ op == ReductionOperation::MIN || op == ReductionOperation::MAX)
+ {
+ vec_res_value = wrapper::vdup_n(*input_ptr, wrapper::traits::vector_128_tag{});
+ }
+
+ uint32x4x4_t vec_res_idx{{0}};
+ // Compute window_step_x elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ const auto vec_elements = wrapper::vloadq(input_ptr + x);
+ switch (op)
+ {
+ case ReductionOperation::SUM:
+ case ReductionOperation::MEAN_SUM:
+ {
+ const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
+ const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
+
+ const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
+ const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
+ const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
+ const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
+
+ vec_res_value1 = wrapper::vadd(temp32x4t_1, vec_res_value1);
+ vec_res_value2 = wrapper::vadd(temp32x4t_2, vec_res_value2);
+ vec_res_value3 = wrapper::vadd(temp32x4t_3, vec_res_value3);
+ vec_res_value4 = wrapper::vadd(temp32x4t_4, vec_res_value4);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ const auto offset32x4f_4 = vdupq_n_f32(iq_info.offset);
+ const auto scale32x4f_4 = vdupq_n_f32(iq_info.scale);
+
+ const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
+ const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
+
+ const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
+ const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
+ const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
+ const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
+
+ auto temp32x4f_1 = wrapper::vcvt<float>(temp32x4t_1);
+ auto temp32x4f_2 = wrapper::vcvt<float>(temp32x4t_2);
+ auto temp32x4f_3 = wrapper::vcvt<float>(temp32x4t_3);
+ auto temp32x4f_4 = wrapper::vcvt<float>(temp32x4t_4);
+
+ //de-quantize vec_elements
+ temp32x4f_1 = vmulq_f32(vsubq_f32(temp32x4f_1, offset32x4f_4), scale32x4f_4);
+ temp32x4f_2 = vmulq_f32(vsubq_f32(temp32x4f_2, offset32x4f_4), scale32x4f_4);
+ temp32x4f_3 = vmulq_f32(vsubq_f32(temp32x4f_3, offset32x4f_4), scale32x4f_4);
+ temp32x4f_4 = vmulq_f32(vsubq_f32(temp32x4f_4, offset32x4f_4), scale32x4f_4);
+
+ vec_res_value1_f = vmulq_f32(temp32x4f_1, vec_res_value1_f);
+ vec_res_value2_f = vmulq_f32(temp32x4f_2, vec_res_value2_f);
+ vec_res_value3_f = vmulq_f32(temp32x4f_3, vec_res_value3_f);
+ vec_res_value4_f = vmulq_f32(temp32x4f_4, vec_res_value4_f);
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ vec_res_idx = calculate_index_quantized<decltype(vec_res_value)>(
+ x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ vec_res_idx = calculate_index_quantized<decltype(vec_res_value)>(
+ x, temp_vec_res_value, vec_res_value, vec_res_idx, op, 0);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ switch (op)
+ {
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ auto idx =
+ calculate_vector_index_quantized<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ if (*(input_ptr + x) < res)
+ {
+ idx = x;
+ res = *(input_ptr + x);
+ }
+ }
+ *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ auto idx =
+ calculate_vector_index_quantized<decltype(vec_res_value)>(vec_res_idx, vec_res_value, op);
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ if (*(input_ptr + x) > res)
+ {
+ idx = x;
+ res = *(input_ptr + x);
+ }
+ }
+ *(reinterpret_cast<uint32_t *>(output.ptr())) = idx;
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_min(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res = *(input_ptr + x) < res ? *(input_ptr + x) : res;
+ }
+ *(reinterpret_cast<T *>(output.ptr())) = res;
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ auto res = static_cast<T>(wrapper::vgetlane(calculate_max(vec_res_value), 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res = *(input_ptr + x) > res ? *(input_ptr + x) : res;
+ }
+ *(reinterpret_cast<T *>(output.ptr())) = res;
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ auto carry_res = wrapper::vmul(vec_res_value1_f, vec_res_value2_f);
+ carry_res = wrapper::vmul(carry_res, vec_res_value3_f);
+ carry_res = wrapper::vmul(carry_res, vec_res_value4_f);
+
+ float res = wrapper::vgetlane(carry_res, 0);
+ res *= wrapper::vgetlane(carry_res, 1);
+ res *= wrapper::vgetlane(carry_res, 2);
+ res *= wrapper::vgetlane(carry_res, 3);
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ //de-quantize input
+ if (std::is_same<T, uint8_t>::value)
+ {
+ res *= dequantize_qasymm8(*(input_ptr + x), iq_info);
+ }
+ else
+ {
+ res *= dequantize_qasymm8_signed(*(input_ptr + x), iq_info);
+ }
+ }
+
+ //re-quantize result
+ if (std::is_same<T, uint8_t>::value)
+ {
+ res = quantize_qasymm8(res, iq_info);
+ }
+ else
+ {
+ res = quantize_qasymm8_signed(res, iq_info);
+ }
+
+ *reinterpret_cast<T *>(output.ptr()) = static_cast<T>(res);
+ break;
+ }
+ case ReductionOperation::SUM:
+ case ReductionOperation::MEAN_SUM:
+ {
+ auto carry_res = wrapper::vadd(vec_res_value1, vec_res_value2);
+ carry_res = wrapper::vadd(carry_res, vec_res_value3);
+ carry_res = wrapper::vadd(carry_res, vec_res_value4);
+
+ auto carry_paddition =
+ wrapper::vpadd(wrapper::vgethigh(carry_res), wrapper::vgetlow(carry_res));
+ carry_paddition = wrapper::vpadd(carry_paddition, carry_paddition);
+ auto res = static_cast<int32_t>(wrapper::vgetlane(carry_paddition, 0));
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ res += *(input_ptr + x);
+ }
+
+ if (op == ReductionOperation::MEAN_SUM)
+ {
+ const int32_t resFinal = A * (static_cast<float>(res)) + B;
+
+ *reinterpret_cast<T *>(output.ptr()) = utils::cast::saturate_cast<T>(resFinal);
+ }
+ else
+ {
+ // Subtract accumulated offsets
+ res -= (in_info.dimension(0) - 1) * iq_info.offset;
+ *reinterpret_cast<T *>(output.ptr()) = utils::cast::saturate_cast<T>(res);
+ }
+
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ },
+ input, output);
+ }
+};
+
+template <typename T, int S>
+struct RedOpYZW
+{
+ /** SIMD vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+ using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
+
+ inline void operator()(const Window &in_window,
+ Window &out_window,
+ const ITensor *in,
+ ITensor *out,
+ int axis,
+ const ReductionOperation op)
+ {
+ const TensorInfo in_info = *(in->info());
+ const int window_step_x = 16 / sizeof(T);
+ const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
+ const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
+ // As it split over x-axis, need to set the correct spiltted window start and end.
+ const auto window_start_x = static_cast<int>(0);
+ const auto window_end_x = static_cast<int>(in_window.shape().x());
+
+ Window in_win_no_pad = in_window;
+ in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
+ Window out_win_no_pad = out_window;
+ out_win_no_pad.set(Window::DimX,
+ Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
+
+ Iterator input(in, in_win_no_pad);
+ Iterator output(out, out_win_no_pad);
+
+ execute_window_loop(
+ in_win_no_pad,
+ [&](const Coordinates &)
+ {
+ const auto input_ptr = reinterpret_cast<T *>(input.ptr());
+
+ // Compute window_step_x elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ neon_vector vec_res_value = {0};
+ switch (op)
+ {
+ case ReductionOperation::ARG_IDX_MAX:
+ case ReductionOperation::ARG_IDX_MIN:
+ case ReductionOperation::MIN:
+ case ReductionOperation::MAX:
+ {
+ vec_res_value = wrapper::vloadq(input_ptr + x);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ vec_res_value = wrapper::vdup_n(static_cast<T>(1.f), ExactTagType{});
+ break;
+ }
+ default:
+ {
+ vec_res_value = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+ break;
+ }
+ }
+ uint32x4x4_t vec_res_idx{{0}};
+
+ for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
+ {
+ const T *in_ptr =
+ reinterpret_cast<T *>(input.ptr() + x * sizeof(T) + in_info.strides_in_bytes()[axis] * dim);
+ const auto vec_elements = wrapper::vloadq(in_ptr);
+ switch (op)
+ {
+ case ReductionOperation::SUM:
+ case ReductionOperation::MEAN_SUM:
+ vec_res_value = wrapper::vadd(vec_elements, vec_res_value);
+ break;
+ case ReductionOperation::SUM_SQUARE:
+ vec_res_value = wrapper::vadd(wrapper::vmul(vec_elements, vec_elements), vec_res_value);
+ break;
+ case ReductionOperation::PROD:
+ vec_res_value = wrapper::vmul(vec_elements, vec_res_value);
+ break;
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ vec_res_idx =
+ calculate_index(dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ vec_res_idx =
+ calculate_index(dim, temp_vec_res_value, vec_res_value, vec_res_idx, op, axis);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ if (op == ReductionOperation::MEAN_SUM)
+ {
+ auto vec_width_inv =
+ wrapper::vinv(wrapper::vdup_n(static_cast<T>(in_info.dimension(axis)), ExactTagType{}));
+ vec_res_value = wrapper::vmul(vec_res_value, vec_width_inv);
+ }
+
+ if (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX)
+ {
+ wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr()) + x, vec_res_idx.val[0]);
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ if (std::is_same<T, float16_t>::value)
+ {
+ wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr()) + x + 4, vec_res_idx.val[1]);
+ }
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ }
+ else
+ {
+ wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x * sizeof(T)), vec_res_value);
+ }
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ auto res_value = 0.f;
+ switch (op)
+ {
+ case ReductionOperation::ARG_IDX_MAX:
+ case ReductionOperation::ARG_IDX_MIN:
+ case ReductionOperation::MIN:
+ case ReductionOperation::MAX:
+ {
+ res_value = *(input_ptr + x);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ res_value = static_cast<T>(1.f);
+ break;
+ }
+ default:
+ {
+ res_value = static_cast<T>(0.f);
+ break;
+ }
+ }
+
+ uint32_t res_idx = 0;
+ for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
+ {
+ const T *in_ptr =
+ reinterpret_cast<T *>(input.ptr() + x * sizeof(T) + in_info.strides_in_bytes()[axis] * dim);
+
+ switch (op)
+ {
+ case ReductionOperation::SUM:
+ case ReductionOperation::MEAN_SUM:
+ res_value += *in_ptr;
+ break;
+ case ReductionOperation::SUM_SQUARE:
+ res_value += *in_ptr * *in_ptr;
+ break;
+ case ReductionOperation::PROD:
+ res_value *= *in_ptr;
+ break;
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ if (*in_ptr < res_value)
+ {
+ res_value = *in_ptr;
+ res_idx = dim;
+ }
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ if (*in_ptr > res_value)
+ {
+ res_value = *in_ptr;
+ res_idx = dim;
+ }
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ res_value = *in_ptr < res_value ? *in_ptr : res_value;
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ res_value = *in_ptr > res_value ? *in_ptr : res_value;
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ if (op == ReductionOperation::MEAN_SUM)
+ {
+ res_value /= in_info.dimension(axis);
+ }
+
+ if (op == ReductionOperation::ARG_IDX_MIN || op == ReductionOperation::ARG_IDX_MAX)
+ {
+ *(reinterpret_cast<uint32_t *>(output.ptr()) + x) = res_idx;
+ }
+ else
+ {
+ *(reinterpret_cast<T *>(output.ptr() + x * sizeof(T))) = res_value;
+ }
+ }
+ },
+ input, output);
+ }
+};
+
+template <typename T, int S, int axis, ReductionOperation op>
+struct RedOpYZW_complex
+{
+ /** SIMD vector tag type. */
+ using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
+ using neon_vector = typename wrapper::traits::neon_vector<T, S>::type;
+
+ inline void operator()(
+ const Window &in_window, Window &out_window, const ITensor *in, ITensor *out, int, const ReductionOperation)
+ {
+ ARM_COMPUTE_ERROR_ON(axis != 2);
+ ARM_COMPUTE_ERROR_ON(op != ReductionOperation::SUM);
+
+ const TensorInfo in_info = *(in->info());
+ const size_t stride_z = in_info.strides_in_bytes()[axis];
+ const int window_step_x = 16 / sizeof(T);
+ const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
+ const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
+ // As it split over x-axis, need to set the correct spiltted window start and end.
+ const auto window_start_x = static_cast<int>(0);
+ const auto window_end_x = static_cast<int>(in_window.shape().x());
+
+ Window in_win_no_pad = in_window;
+ in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
+ Window out_win_no_pad = out_window;
+ out_win_no_pad.set(Window::DimX,
+ Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
+
+ Iterator input(in, in_win_no_pad);
+ Iterator output(out, out_win_no_pad);
+
+ execute_window_loop(
+ in_win_no_pad,
+ [&](const Coordinates &)
+ {
+ // Compute window_step_x elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ neon_vector vec_res_value_0 = {0};
+ neon_vector vec_res_value_1 = {0};
+
+ vec_res_value_0 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+ vec_res_value_1 = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+
+ T *out_ptr = reinterpret_cast<T *>(output.ptr() + 2 * x * sizeof(T));
+ for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
+ {
+ T *in_ptr_0 = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + stride_z * dim);
+ T *in_ptr_1 = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + 16 + stride_z * dim);
+
+ const auto vec_elements_0 = wrapper::vloadq(in_ptr_0);
+ const auto vec_elements_1 = wrapper::vloadq(in_ptr_1);
+
+ vec_res_value_0 = wrapper::vadd(vec_elements_0, vec_res_value_0);
+ vec_res_value_1 = wrapper::vadd(vec_elements_1, vec_res_value_1);
+ }
+
+ wrapper::vstore(out_ptr, vec_res_value_0);
+ wrapper::vstore(out_ptr + 4, vec_res_value_1);
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ auto res_value_0 = 0.f;
+ auto res_value_1 = 0.f;
+
+ T *out_ptr = reinterpret_cast<T *>(output.ptr() + 2 * x * sizeof(T));
+ for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
+ {
+ T *in_ptr = reinterpret_cast<T *>(input.ptr() + 2 * x * sizeof(T) + stride_z * dim);
+ res_value_0 += *in_ptr;
+ res_value_1 += *(in_ptr + 1);
+ }
+ *out_ptr = res_value_0;
+ *(out_ptr + 1) = res_value_1;
+ }
+ },
+ input, output);
+ }
+};
+
+template <typename T>
+struct RedOpYZW_quantized
+{
+ inline void operator()(const Window &in_window,
+ Window &out_window,
+ const ITensor *in,
+ ITensor *out,
+ int axis,
+ const ReductionOperation op)
+ {
+ const TensorInfo in_info = *(in->info());
+ const UniformQuantizationInfo iq_info = in_info.quantization_info().uniform();
+ using PromotedType = typename wrapper::traits::promote<typename wrapper::traits::promote<T>::type>::type;
+
+ const auto oq_info = out->info()->quantization_info().uniform();
+
+ const int window_step_x = 16 / sizeof(T);
+ const auto window_start_x_tmp = static_cast<int>(in_window.x().start());
+ const auto window_end_x_tmp = static_cast<int>(in_window.x().end());
+ // As it split over x-axis, need to set the correct spiltted window start and end.
+ const auto window_start_x = static_cast<int>(0);
+ const auto window_end_x = static_cast<int>(in_window.shape().x());
+
+ Window in_win_no_pad = in_window;
+ in_win_no_pad.set(Window::DimX, Window::Dimension(window_start_x_tmp, window_end_x_tmp, in_window.shape().x()));
+ Window out_win_no_pad = out_window;
+ out_win_no_pad.set(Window::DimX,
+ Window::Dimension(window_start_x_tmp, window_end_x_tmp, out_window.shape().x()));
+
+ Iterator input(in, in_win_no_pad);
+ Iterator output(out, out_win_no_pad);
+
+ using vector_type =
+ typename wrapper::traits::neon_bitvector<PromotedType, wrapper::traits::BitWidth::W128>::type;
+ using vector_type_f = typename wrapper::traits::neon_vector<float, 4>::type;
+
+ vector_type vec_res_value1{};
+ vector_type vec_res_value2{};
+ vector_type vec_res_value3{};
+ vector_type vec_res_value4{};
+
+ vector_type_f vec_res_value1_f{};
+ vector_type_f vec_res_value2_f{};
+ vector_type_f vec_res_value3_f{};
+ vector_type_f vec_res_value4_f{};
+
+ const float in_offset = static_cast<float>(iq_info.offset);
+ const float in_scale = iq_info.scale;
+
+ const float out_offset = static_cast<float>(oq_info.offset);
+ const float out_scale = oq_info.scale;
+
+ const float num_elements = static_cast<float>(in_info.dimension(axis));
+
+ const float A = in_scale / (out_scale * num_elements);
+ const float B = out_offset - (in_scale * in_offset) / (out_scale);
+
+ const auto vec_A = wrapper::vdup_n(static_cast<float>(A), wrapper::traits::vector_128_tag{});
+ const auto vec_B = wrapper::vdup_n(static_cast<float>(B), wrapper::traits::vector_128_tag{});
+
+ execute_window_loop(
+ in_win_no_pad,
+ [&](const Coordinates &)
+ {
+ const auto input_ptr = reinterpret_cast<T *>(input.ptr());
+
+ // Compute window_step_x elements per iteration
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ uint32x4x4_t vec_res_idx{{0}};
+ vec_res_value1 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
+ vec_res_value2 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
+ vec_res_value3 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
+ vec_res_value4 = wrapper::vdup_n(static_cast<PromotedType>(0), wrapper::traits::vector_128_tag{});
+
+ vec_res_value1_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
+ vec_res_value2_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
+ vec_res_value3_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
+ vec_res_value4_f = wrapper::vdup_n(static_cast<float>(1), wrapper::traits::vector_128_tag{});
+
+ auto vec_res_value = wrapper::vloadq(input_ptr + x);
+
+ for (unsigned int index_dim = 0; index_dim < in_info.dimension(axis); ++index_dim)
+ {
+ const T *in_ptr = input_ptr + x + in_info.strides_in_bytes()[axis] * index_dim;
+ const auto vec_elements = wrapper::vloadq(in_ptr);
+ switch (op)
+ {
+ case ReductionOperation::SUM:
+ case ReductionOperation::MEAN_SUM:
+ {
+ const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
+ const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
+
+ const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
+ const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
+ const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
+ const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
+
+ vec_res_value1 = wrapper::vadd(temp32x4t_1, vec_res_value1);
+ vec_res_value2 = wrapper::vadd(temp32x4t_2, vec_res_value2);
+ vec_res_value3 = wrapper::vadd(temp32x4t_3, vec_res_value3);
+ vec_res_value4 = wrapper::vadd(temp32x4t_4, vec_res_value4);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ const auto offset32x4f_4 = wrapper::vdup_n(static_cast<float>(iq_info.offset),
+ wrapper::traits::vector_128_tag{});
+ const auto scale32x4f_4 =
+ wrapper::vdup_n(iq_info.scale, wrapper::traits::vector_128_tag{});
+
+ const auto temp16x8t_1 = wrapper::vmovl(wrapper::vgetlow(vec_elements));
+ const auto temp16x8t_2 = wrapper::vmovl(wrapper::vgethigh(vec_elements));
+
+ const auto temp32x4t_1 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_1));
+ const auto temp32x4t_2 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_1));
+ const auto temp32x4t_3 = wrapper::vmovl(wrapper::vgetlow(temp16x8t_2));
+ const auto temp32x4t_4 = wrapper::vmovl(wrapper::vgethigh(temp16x8t_2));
+
+ auto temp32x4f_1 = wrapper::vcvt<float>(temp32x4t_1);
+ auto temp32x4f_2 = wrapper::vcvt<float>(temp32x4t_2);
+ auto temp32x4f_3 = wrapper::vcvt<float>(temp32x4t_3);
+ auto temp32x4f_4 = wrapper::vcvt<float>(temp32x4t_4);
+
+ //de-quantize vec_elements
+ temp32x4f_1 = wrapper::vmul(wrapper::vsub(temp32x4f_1, offset32x4f_4), scale32x4f_4);
+ temp32x4f_2 = wrapper::vmul(wrapper::vsub(temp32x4f_2, offset32x4f_4), scale32x4f_4);
+ temp32x4f_3 = wrapper::vmul(wrapper::vsub(temp32x4f_3, offset32x4f_4), scale32x4f_4);
+ temp32x4f_4 = wrapper::vmul(wrapper::vsub(temp32x4f_4, offset32x4f_4), scale32x4f_4);
+
+ vec_res_value1_f = wrapper::vmul(temp32x4f_1, vec_res_value1_f);
+ vec_res_value2_f = wrapper::vmul(temp32x4f_2, vec_res_value2_f);
+ vec_res_value3_f = wrapper::vmul(temp32x4f_3, vec_res_value3_f);
+ vec_res_value4_f = wrapper::vmul(temp32x4f_4, vec_res_value4_f);
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ auto temp_vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ vec_res_idx = calculate_index_quantized(index_dim, temp_vec_res_value, vec_res_value,
+ vec_res_idx, op, axis);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ auto temp_vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ vec_res_idx = calculate_index_quantized(index_dim, temp_vec_res_value, vec_res_value,
+ vec_res_idx, op, axis);
+ vec_res_value = temp_vec_res_value;
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ vec_res_value = wrapper::vmin(vec_elements, vec_res_value);
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ vec_res_value = wrapper::vmax(vec_elements, vec_res_value);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ switch (op)
+ {
+ case ReductionOperation::ARG_IDX_MIN:
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x), vec_res_idx.val[0]);
+ wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 4, vec_res_idx.val[1]);
+ wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 8, vec_res_idx.val[2]);
+ wrapper::vstore(reinterpret_cast<uint32_t *>(output.ptr() + 4 * x) + 12,
+ vec_res_idx.val[3]);
+ break;
+ }
+ case ReductionOperation::MIN:
+ case ReductionOperation::MAX:
+ {
+ wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), vec_res_value);
+ break;
+ }
+ case ReductionOperation::SUM:
+ {
+ // Subtract offsets
+ auto offsets = vdupq_n_s32((in_info.dimension(axis) - 1) * iq_info.offset);
+
+ auto vec_res_s_value1 = wrapper::vreinterpret(vec_res_value1);
+ auto vec_res_s_value2 = wrapper::vreinterpret(vec_res_value2);
+ auto vec_res_s_value3 = wrapper::vreinterpret(vec_res_value3);
+ auto vec_res_s_value4 = wrapper::vreinterpret(vec_res_value4);
+
+ vec_res_s_value1 = wrapper::vsub(vec_res_s_value1, offsets);
+ vec_res_s_value2 = wrapper::vsub(vec_res_s_value2, offsets);
+ vec_res_s_value3 = wrapper::vsub(vec_res_s_value3, offsets);
+ vec_res_s_value4 = wrapper::vsub(vec_res_s_value4, offsets);
+
+ const auto temp16x8t_1 =
+ wrapper::vcombine(wrapper::vqmovn(vec_res_s_value1), wrapper::vqmovn(vec_res_s_value2));
+ const auto temp16x8t_2 =
+ wrapper::vcombine(wrapper::vqmovn(vec_res_s_value3), wrapper::vqmovn(vec_res_s_value4));
+
+ combine_and_store<T>(temp16x8t_1, temp16x8t_2, output, x);
+ break;
+ }
+ case ReductionOperation::MEAN_SUM:
+ {
+ vec_res_value1_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value1), vec_A);
+ vec_res_value2_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value2), vec_A);
+ vec_res_value3_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value3), vec_A);
+ vec_res_value4_f = wrapper::vmla(vec_B, wrapper::vcvt<float>(vec_res_value4), vec_A);
+
+#ifdef __aarch64__
+ vec_res_value1 = wrapper::vcvta<PromotedType>(vec_res_value1_f);
+ vec_res_value2 = wrapper::vcvta<PromotedType>(vec_res_value2_f);
+ vec_res_value3 = wrapper::vcvta<PromotedType>(vec_res_value3_f);
+ vec_res_value4 = wrapper::vcvta<PromotedType>(vec_res_value4_f);
+#else // defined(__aarch64__)
+ vec_res_value1 = wrapper::vcvt<PromotedType>(vec_res_value1_f);
+ vec_res_value2 = wrapper::vcvt<PromotedType>(vec_res_value2_f);
+ vec_res_value3 = wrapper::vcvt<PromotedType>(vec_res_value3_f);
+ vec_res_value4 = wrapper::vcvt<PromotedType>(vec_res_value4_f);
+#endif // __aarch64__
+
+ const auto temp16x8t_1 =
+ wrapper::vcombine(wrapper::vqmovn(vec_res_value1), wrapper::vqmovn(vec_res_value2));
+ const auto temp16x8t_2 =
+ wrapper::vcombine(wrapper::vqmovn(vec_res_value3), wrapper::vqmovn(vec_res_value4));
+ auto res = wrapper::vcombine(wrapper::vqmovn(temp16x8t_1), wrapper::vqmovn(temp16x8t_2));
+
+ wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), res);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ const auto offset32x4f_4 =
+ wrapper::vdup_n(static_cast<float>(iq_info.offset), wrapper::traits::vector_128_tag{});
+ const auto iscale32x4f_4 = vinvq_f32(vdupq_n_f32(iq_info.scale));
+
+ //re-quantize
+ vec_res_value1_f =
+ wrapper::vadd(wrapper::vmul(vec_res_value1_f, iscale32x4f_4), offset32x4f_4);
+ vec_res_value2_f =
+ wrapper::vadd(wrapper::vmul(vec_res_value2_f, iscale32x4f_4), offset32x4f_4);
+ vec_res_value3_f =
+ wrapper::vadd(wrapper::vmul(vec_res_value3_f, iscale32x4f_4), offset32x4f_4);
+ vec_res_value4_f =
+ wrapper::vadd(wrapper::vmul(vec_res_value4_f, iscale32x4f_4), offset32x4f_4);
+
+ vec_res_value1 = wrapper::vcvt<T>(vec_res_value1_f);
+ vec_res_value2 = wrapper::vcvt<T>(vec_res_value2_f);
+ vec_res_value3 = wrapper::vcvt<T>(vec_res_value3_f);
+ vec_res_value4 = wrapper::vcvt<T>(vec_res_value4_f);
+
+ const auto temp16x8t_1 =
+ wrapper::vcombine(wrapper::vqmovn(vec_res_value1), wrapper::vqmovn(vec_res_value2));
+ const auto temp16x8t_2 =
+ wrapper::vcombine(wrapper::vqmovn(vec_res_value3), wrapper::vqmovn(vec_res_value4));
+ auto res = wrapper::vcombine(wrapper::vqmovn(temp16x8t_1), wrapper::vqmovn(temp16x8t_2));
+
+ wrapper::vstore(reinterpret_cast<T *>(output.ptr() + x), res);
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ // Compute left-over elements
+ for (; x < window_end_x; ++x)
+ {
+ float res_value = 0.f;
+ int32_t res_value_q = 0;
+
+ switch (op)
+ {
+ case ReductionOperation::ARG_IDX_MAX:
+ case ReductionOperation::ARG_IDX_MIN:
+ case ReductionOperation::MIN:
+ case ReductionOperation::MAX:
+ {
+ res_value = *(input_ptr + x);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ res_value = static_cast<T>(1.0f);
+ break;
+ }
+ default:
+ {
+ res_value = static_cast<T>(0.0f);
+ break;
+ }
+ }
+ uint32_t res_idx = 0;
+
+ for (unsigned int dim = 0; dim < in_info.dimension(axis); ++dim)
+ {
+ const T *in_ptr =
+ reinterpret_cast<T *>(input.ptr() + x + in_info.strides_in_bytes()[axis] * dim);
+ switch (op)
+ {
+ case ReductionOperation::SUM:
+ {
+ res_value += *in_ptr;
+ break;
+ }
+ case ReductionOperation::MEAN_SUM:
+ {
+ res_value_q += *in_ptr;
+ break;
+ }
+ case ReductionOperation::SUM_SQUARE:
+ {
+ res_value += *in_ptr * *in_ptr;
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ //de-quantize input
+ if (std::is_same<T, uint8_t>::value)
+ {
+ res_value *= dequantize_qasymm8(*in_ptr, iq_info);
+ }
+ else
+ {
+ res_value *= dequantize_qasymm8_signed(*in_ptr, iq_info);
+ }
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MIN:
+ {
+ if (*in_ptr < res_value)
+ {
+ res_value = *in_ptr;
+ res_idx = dim;
+ }
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ if (*in_ptr > res_value)
+ {
+ res_value = *in_ptr;
+ res_idx = dim;
+ }
+ break;
+ }
+ case ReductionOperation::MIN:
+ {
+ res_value = *in_ptr < res_value ? *in_ptr : res_value;
+ break;
+ }
+ case ReductionOperation::MAX:
+ {
+ res_value = *in_ptr > res_value ? *in_ptr : res_value;
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Not supported");
+ }
+ }
+
+ switch (op)
+ {
+ case ReductionOperation::MEAN_SUM:
+ {
+ // Apply previously calculated coefficients (with rounding on aarch64)
+#ifdef __aarch64__
+ const int32_t res =
+ arm_compute::support::cpp11::round(A * (static_cast<float>(res_value_q)) + B);
+#else // defined(__aarch64__)
+ const int32_t res = A * (static_cast<float>(res_value_q)) + B;
+#endif // __aarch64__
+ *reinterpret_cast<T *>(output.ptr() + x) = utils::cast::saturate_cast<T>(res);
+ break;
+ }
+ case ReductionOperation::SUM:
+ {
+ // Subtract accumulated offsets
+ res_value -= (in_info.dimension(axis) - 1) * iq_info.offset;
+ *reinterpret_cast<T *>(output.ptr() + x) = utils::cast::saturate_cast<T>(res_value);
+ break;
+ }
+ case ReductionOperation::PROD:
+ {
+ //re-quantize result
+ T res = 0;
+ if (std::is_same<T, uint8_t>::value)
+ {
+ res = quantize_qasymm8(res_value, iq_info);
+ }
+ else
+ {
+ res = quantize_qasymm8_signed(res_value, iq_info);
+ }
+ *(reinterpret_cast<T *>(output.ptr() + x)) = res;
+ break;
+ }
+ case ReductionOperation::ARG_IDX_MIN:
+ case ReductionOperation::ARG_IDX_MAX:
+ {
+ *(reinterpret_cast<uint32_t *>(output.ptr() + x * 4)) = res_idx;
+ break;
+ }
+ default:
+ *(reinterpret_cast<T *>(output.ptr() + x)) = res_value;
+ }
+ }
+ },
+ input, output);
+ }
+};
+
+} // namespace arm_compute
+#endif // ACL_SRC_CPU_KERNELS_REDUCTION_LAYER_GENERIC_NEON_IMPL_H
diff --git a/src/cpu/kernels/reduction_layer/generic/neon/integer.cpp b/src/cpu/kernels/reduction_layer/generic/neon/integer.cpp
new file mode 100644
index 0000000000..ad66b456ac
--- /dev/null
+++ b/src/cpu/kernels/reduction_layer/generic/neon/integer.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/reduction_layer/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void reduce_RedOpX_reduceX_S32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpX<int32_t, 4>>::reduceX(window, input, output, RedOpX<int32_t, 4>(), op);
+}
+
+void reduce_RedOpYZW_reduceY_S32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<int32_t, 4>>::reduceY(window, input, output, RedOpYZW<int32_t, 4>(), op);
+}
+void reduce_RedOpYZW_reduceZ_S32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<int32_t, 4>>::reduceZ(window, input, output, RedOpYZW<int32_t, 4>(), op);
+}
+
+void reduce_RedOpYZW_reduceW_S32_4(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW<int32_t, 4>>::reduceW(window, input, output, RedOpYZW<int32_t, 4>(), op);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/reduction_layer/generic/neon/list.h b/src/cpu/kernels/reduction_layer/generic/neon/list.h
new file mode 100644
index 0000000000..947c28a130
--- /dev/null
+++ b/src/cpu/kernels/reduction_layer/generic/neon/list.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_SRC_CPU_KERNELS_REDUCTION_LAYER_GENERIC_NEON_LIST_H
+#define ACL_SRC_CPU_KERNELS_REDUCTION_LAYER_GENERIC_NEON_LIST_H
+
+#include "arm_compute/core/Helpers.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+#define DECLARE_REDUCTION_KERNEL(func_name) \
+ void func_name(const Window &window, const ITensor *in, ITensor *out, const ReductionOperation op)
+
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_complex_reduceZ_float32_4_2_SUM);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpX_reduceX_float32_4);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceY_float32_4);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceZ_float32_4);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceW_float32_4);
+
+DECLARE_REDUCTION_KERNEL(reduce_RedOpX_reduceX_float16_8);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceY_float16_8);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceZ_float16_8);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceW_float16_8);
+
+DECLARE_REDUCTION_KERNEL(reduce_RedOpX_reduceX_S32_4);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceY_S32_4);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceZ_S32_4);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceW_S32_4);
+
+DECLARE_REDUCTION_KERNEL(reduce_RedOpX_reduceX_qasymm8);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceY_qasymm8);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceZ_qasymm8);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceW_qasymm8);
+
+DECLARE_REDUCTION_KERNEL(reduce_RedOpX_reduceX_qasymm8_signed);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceY_qasymm8_signed);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceZ_qasymm8_signed);
+DECLARE_REDUCTION_KERNEL(reduce_RedOpYZW_reduceW_qasymm8_signed);
+
+#undef DECLARE_REDUCTION_KERNEL
+} // namespace cpu
+} // namespace arm_compute
+#endif // ACL_SRC_CPU_KERNELS_REDUCTION_LAYER_GENERIC_NEON_LIST_H
diff --git a/src/cpu/kernels/reduction_layer/generic/neon/qasymm8.cpp b/src/cpu/kernels/reduction_layer/generic/neon/qasymm8.cpp
new file mode 100644
index 0000000000..bc711c6855
--- /dev/null
+++ b/src/cpu/kernels/reduction_layer/generic/neon/qasymm8.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/reduction_layer/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void reduce_RedOpX_reduceX_qasymm8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpX_quantized<uint8_t>>::reduceX(window, input, output, RedOpX_quantized<uint8_t>(), op);
+}
+
+void reduce_RedOpYZW_reduceY_qasymm8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW_quantized<uint8_t>>::reduceY(window, input, output, RedOpYZW_quantized<uint8_t>(), op);
+}
+
+void reduce_RedOpYZW_reduceZ_qasymm8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW_quantized<uint8_t>>::reduceZ(window, input, output, RedOpYZW_quantized<uint8_t>(), op);
+}
+
+void reduce_RedOpYZW_reduceW_qasymm8(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW_quantized<uint8_t>>::reduceW(window, input, output, RedOpYZW_quantized<uint8_t>(), op);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/reduction_layer/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/reduction_layer/generic/neon/qasymm8_signed.cpp
new file mode 100644
index 0000000000..10ac3d6715
--- /dev/null
+++ b/src/cpu/kernels/reduction_layer/generic/neon/qasymm8_signed.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "src/cpu/kernels/reduction_layer/generic/neon/impl.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void reduce_RedOpX_reduceX_qasymm8_signed(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpX_quantized<int8_t>>::reduceX(window, input, output, RedOpX_quantized<int8_t>(), op);
+}
+
+void reduce_RedOpYZW_reduceY_qasymm8_signed(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW_quantized<int8_t>>::reduceY(window, input, output, RedOpYZW_quantized<int8_t>(), op);
+}
+
+void reduce_RedOpYZW_reduceZ_qasymm8_signed(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW_quantized<int8_t>>::reduceZ(window, input, output, RedOpYZW_quantized<int8_t>(), op);
+}
+
+void reduce_RedOpYZW_reduceW_qasymm8_signed(const Window &window,
+ const ITensor *input,
+ ITensor *output,
+ const ReductionOperation op)
+{
+ return Reducer<RedOpYZW_quantized<int8_t>>::reduceW(window, input, output, RedOpYZW_quantized<int8_t>(), op);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/neon/fp16.cpp b/src/cpu/kernels/softmax/generic/neon/fp16.cpp
index da62d2d614..425fcf7ac6 100644
--- a/src/cpu/kernels/softmax/generic/neon/fp16.cpp
+++ b/src/cpu/kernels/softmax/generic/neon/fp16.cpp
@@ -33,9 +33,15 @@ namespace cpu
{
template <bool IS_LOG>
-void neon_fp16_softmax(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window)
+void neon_fp16_softmax(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
{
+ ARM_COMPUTE_UNUSED(lut_ptr);
if (axis == 0)
{
return neon_softmax_x_float<float16_t, IS_LOG>(in, tmp, out, beta, axis, window);
@@ -46,10 +52,20 @@ void neon_fp16_softmax(
}
}
-template void neon_fp16_softmax<true>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
-template void neon_fp16_softmax<false>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
+template void neon_fp16_softmax<true>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
+template void neon_fp16_softmax<false>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/neon/fp32.cpp b/src/cpu/kernels/softmax/generic/neon/fp32.cpp
index 0701620636..a64946eb74 100644
--- a/src/cpu/kernels/softmax/generic/neon/fp32.cpp
+++ b/src/cpu/kernels/softmax/generic/neon/fp32.cpp
@@ -31,9 +31,15 @@ namespace cpu
{
template <bool IS_LOG>
-void neon_fp32_softmax(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window)
+void neon_fp32_softmax(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
{
+ ARM_COMPUTE_UNUSED(lut_ptr);
if (axis == 0)
{
return neon_softmax_x_float<float, IS_LOG>(in, tmp, out, beta, axis, window);
@@ -44,10 +50,20 @@ void neon_fp32_softmax(
}
}
-template void neon_fp32_softmax<true>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
-template void neon_fp32_softmax<false>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
+template void neon_fp32_softmax<true>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
+template void neon_fp32_softmax<false>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/neon/qasymm8.cpp b/src/cpu/kernels/softmax/generic/neon/qasymm8.cpp
index d39240bb38..369f9bb005 100644
--- a/src/cpu/kernels/softmax/generic/neon/qasymm8.cpp
+++ b/src/cpu/kernels/softmax/generic/neon/qasymm8.cpp
@@ -30,9 +30,15 @@ namespace arm_compute
namespace cpu
{
template <bool IS_LOG>
-void neon_qasymm8_softmax(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window)
+void neon_qasymm8_softmax(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
{
+ ARM_COMPUTE_UNUSED(lut_ptr);
if (axis == 0)
{
return neon_softmax_x_quantized<qasymm8_t, IS_LOG>(in, tmp, out, beta, axis, window);
@@ -43,10 +49,20 @@ void neon_qasymm8_softmax(
}
}
-template void neon_qasymm8_softmax<true>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
-template void neon_qasymm8_softmax<false>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
+template void neon_qasymm8_softmax<true>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
+template void neon_qasymm8_softmax<false>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/neon/qasymm8_signed.cpp b/src/cpu/kernels/softmax/generic/neon/qasymm8_signed.cpp
index 26fd5dbfa0..594ceb7654 100644
--- a/src/cpu/kernels/softmax/generic/neon/qasymm8_signed.cpp
+++ b/src/cpu/kernels/softmax/generic/neon/qasymm8_signed.cpp
@@ -30,9 +30,15 @@ namespace arm_compute
namespace cpu
{
template <bool IS_LOG>
-void neon_qasymm8_signed_softmax(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window)
+void neon_qasymm8_signed_softmax(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
{
+ ARM_COMPUTE_UNUSED(lut_ptr);
if (axis == 0)
{
return neon_softmax_x_quantized<qasymm8_signed_t, IS_LOG>(in, tmp, out, beta, axis, window);
@@ -43,10 +49,20 @@ void neon_qasymm8_signed_softmax(
}
}
-template void neon_qasymm8_signed_softmax<true>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
-template void neon_qasymm8_signed_softmax<false>(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
+template void neon_qasymm8_signed_softmax<true>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
+template void neon_qasymm8_signed_softmax<false>(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
} // namespace cpu
} // namespace arm_compute
diff --git a/src/cpu/kernels/softmax/generic/sme2/fp16.cpp b/src/cpu/kernels/softmax/generic/sme2/fp16.cpp
index bcd34d1ca2..e70c9f4793 100644
--- a/src/cpu/kernels/softmax/generic/sme2/fp16.cpp
+++ b/src/cpu/kernels/softmax/generic/sme2/fp16.cpp
@@ -720,8 +720,15 @@ loop_3_end%=:
);
}
-void sme2_fp16_softmax(const ITensor *in, void *const, ITensor *out, const float beta, int axis, const Window &window)
+void sme2_fp16_softmax(const ITensor *in,
+ void *const,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
{
+ ARM_COMPUTE_UNUSED(lut_ptr);
ARM_COMPUTE_UNUSED(axis);
const auto *src_info = in->info();
diff --git a/src/cpu/kernels/softmax/generic/sme2/fp32.cpp b/src/cpu/kernels/softmax/generic/sme2/fp32.cpp
index 159039a320..5e29d51746 100644
--- a/src/cpu/kernels/softmax/generic/sme2/fp32.cpp
+++ b/src/cpu/kernels/softmax/generic/sme2/fp32.cpp
@@ -524,8 +524,15 @@ loop_3_end%=:
);
}
-void sme2_fp32_softmax(const ITensor *in, void *const, ITensor *out, const float beta, int axis, const Window &window)
+void sme2_fp32_softmax(const ITensor *in,
+ void *const,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
{
+ ARM_COMPUTE_UNUSED(lut_ptr);
ARM_COMPUTE_UNUSED(axis);
const auto *src_info = in->info();
diff --git a/src/cpu/kernels/softmax/generic/sme2/qasymm8.cpp b/src/cpu/kernels/softmax/generic/sme2/qasymm8.cpp
new file mode 100644
index 0000000000..9feb669f7c
--- /dev/null
+++ b/src/cpu/kernels/softmax/generic/sme2/qasymm8.cpp
@@ -0,0 +1,634 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+// SoftMax
+//
+// Steps:
+// * Find max: max_value = max(src)
+// * Regularize: dst[i] = exp(src[i] - max_value)
+// sum_value = sum(dst)
+// * Normalize: dst[i] = dst[i] / sum_value
+void sme2_qasymm8_softmax_kernel_512VL( //
+ const uint8_t *src,
+ uint8_t *dst,
+ float beta,
+ const uintptr_t shape[4],
+ const uintptr_t src_strides[4],
+ const uintptr_t dst_strides[4],
+ const float *lut,
+ float *tmp)
+{
+ // Precondition:
+ // * src_strides[0] == sizeof(uint8_t)
+ // * dst_strides[0] == sizeof(uint8_t)
+ // * tmp_strides[0] == sizeof(float)
+
+ __asm__ volatile(
+ R"(
+ .inst 0xd503477f // smstart
+
+ // Registers
+ //
+ // * x1: Loop index
+ // * x2: LUT index
+ // * x13: temporary, body_length
+ //
+ // * x20: index_3
+ // * x21: src_3
+ // * x22: dst_3
+ // * x23: index_2
+ // * x24: src_2
+ // * x25: dst_2
+ // * x26: index_1
+ // * x27: src_1
+ // * x28: dst_1
+ // * x29 tmp
+ //
+ //
+ // * p0: all-true
+ // * p1: predicate for QASYMM8 values
+ // * p2: predicate 0 for FP32 values (first quarter of expanded/unpacked p1)
+ // * p3: predicate 1 for FP32 values (second quarter of expanded/unpacked p1)
+ // * p4: predicate 2 for FP32 values (third quarter of expanded/unpacked p1)
+ // * p5: predicate 3 for FP32 values (fourth quarter of expanded/unpacked p1)
+ // * pn9: all-true for 32 bit values
+ // * pn8: all-true for 8-bit values
+ //
+ // * z0-z15 the 256 LUT values of exp(-scale*beta*x) for x in QASYMM8, stored as FP32 values
+
+ // Prepares all constant values
+
+ ptrue p0.b
+ .inst 0x25a07811 // ptrue pn9.s
+ .inst 0x25207810 // ptrue pn8.b
+
+ // ---------------------------------------------------------------- x13: body_length = (length / vl) * vl
+ cntb x13, ALL, MUL #4
+ udiv x9, %x[length], x13
+ mul x13, x13, x9
+
+ // ==================================================
+ // 3D loop opening
+ // ==================================================
+
+ mov x20, %x[shape_3]
+ mov x21, %x[src]
+ mov x22, %x[dst]
+ mov x19, %x[lut]
+ mov x29, %x[tmp]
+
+ // Load the LUT to the register file.
+ mov x2, %x[lut]
+ .inst 0xa040c440 //ld1w { z0.s - z3.s }, pn9/z, [x2]
+ add x2, x2, #256
+ .inst 0xa040c444 //ld1w { z4.s - z7.s }, pn9/z, [x2]
+ add x2, x2, #256
+ .inst 0xa040c448 //ld1w { z8.s - z11.s }, pn9/z, [x2]
+ add x2, x2, #256
+ .inst 0xa040c44c //ld1w { z12.s - z15.s }, pn9/z, [x2]
+
+
+loop_3_start%=:
+ // for index_3 in shape_3 downto 1
+ cmp x20, #0
+ b.eq loop_3_end%=
+ sub x20, x20, #1
+
+ mov x23, %x[shape_2]
+ mov x24, x21
+ mov x25, x22
+
+loop_2_start%=:
+ // for index_2 in shape_2 downto 1
+ cmp x23, #0
+ b.eq loop_2_end%=
+ sub x23, x23, #1
+
+ mov x26, %x[shape_1]
+ mov x27, x24
+ mov x28, x25
+
+loop_1_start%=:
+ // for index_1 in shape_2 downto 1
+ cmp x26, #0
+ b.eq loop_1_end%=
+ sub x26, x26, #1
+
+ // ==================================================
+ // Step 1: Find max
+ // ==================================================
+ // z16-z19 = minimum QASYMM8 value (0) to allow for it to be used for comparison to find the max.
+ dup z16.b, #0
+ dup z17.b, #0
+ dup z18.b, #0
+ dup z19.b, #0
+ mov x1, #0 // x1: index
+find_max_body_start%=:
+ cmp x1, x13
+ b.eq find_max_body_end%=
+ .inst 0xa0018374 // ld1b { z20.b - z23.b }, pn8/z, [x27, x1] z20-z23: x
+ .inst 0xc134b811 // umax { z16.b - z19.b }, { z16.b - z19.b }, { z20.b - z23.b } z16-z19: max_value = max(max_value, x)
+ add x1, x1, #256 // Advance index by 256 bytes/integers: Z registers = 2048-bit data = 256 8-bit integers.
+ b find_max_body_start%=
+find_max_body_end%=:
+
+ // Loop for processing the leftover part.
+find_max_leftover_start%=:
+ whilelo p1.b, x1, %x[length]
+ b.none find_max_leftover_end%=
+
+ ld1b z30.b, p1/z, [x27, x1] // z30: x
+ umax z16.b, p1/m, z16.b, z30.b // z16: max_value = max(max_value, x)
+
+ add x1, x1, #64
+
+ b find_max_leftover_start%=
+find_max_leftover_end%=:
+
+ .inst 0xc132b011 // umax { z16.b, z17.b }, { z16.b, z17.b }, { z18.b, z19.b }
+ umax z16.b, p0/m, z16.b, z17.b
+ umaxv b16, p0, z16.b // Reduction unsigned max operation to get maximum_value
+ dup z16.b, z16.b[0]
+ uunpklo z16.h, z16.b // Using unpack instructions to align the max value with the FP32 entries in the LUT for use in the TBX instruction
+ uunpklo z16.s, z16.h
+
+ mov x1, #0 // reset index
+ dup z25.s, #0
+
+ mov x1, #0
+
+regularize_start%=:
+ whilelo p1.b, x1, %x[length]
+ b.none regularize_end%=
+
+ // p2-p5 are - together - the 32-bit version of p1, the instructions below unpack p1 into those four predicate registers to allow for the 32-bit loads below to be correctly predicated
+ punpklo p2.h, p1.b
+ punpkhi p4.h, p1.b
+
+ punpkhi p3.h, p2.b
+ punpklo p2.h, p2.b
+
+ punpkhi p5.h, p4.b
+ punpklo p4.h, p4.b
+
+ ld1b z17.b, p1/z, [x27, x1] //z17: input data
+
+ uunpklo z18.h, z17.b //Using unpack instructions to align the input QASYMM8 values with the FP32 entries in the LUT for use in the TBX instruction
+ uunpkhi z19.h, z17.b
+
+ uunpklo z17.s, z18.h // z17 = low low input QASYMM8 values
+ uunpkhi z18.s, z18.h // z18 = low high input QASYMM8 values
+
+ uunpkhi z20.s, z19.h // z20 = high high input QASYMM8 values
+ uunpklo z19.s, z19.h // z19 = high low input QASYMM8 values
+
+ sub z17.s, z16.s, z17.s // z12: x = max_value - input_data
+ sub z18.s, z16.s, z18.s // z13: x = max_value - input_data
+ sub z19.s, z16.s, z19.s // z14: x = max_value - input_data
+ sub z20.s, z16.s, z20.s // z15: x = max_value - input_data
+
+ tbx z21.s, z0.s, z17.s // Look-up entries 0-15 in the LUT.
+ tbx z22.s, z0.s, z18.s
+ tbx z23.s, z0.s, z19.s
+ tbx z24.s, z0.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z1.s, z17.s // Look-up entries 16-31 in the LUT.
+ tbx z22.s, z1.s, z18.s
+ tbx z23.s, z1.s, z19.s
+ tbx z24.s, z1.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z2.s, z17.s // Look-up entries 32-47 in the LUT.
+ tbx z22.s, z2.s, z18.s
+ tbx z23.s, z2.s, z19.s
+ tbx z24.s, z2.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z3.s, z17.s // Look-up entries 48-63 in the LUT.
+ tbx z22.s, z3.s, z18.s
+ tbx z23.s, z3.s, z19.s
+ tbx z24.s, z3.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z4.s, z17.s // Look-up entries 64-79 in the LUT.
+ tbx z22.s, z4.s, z18.s
+ tbx z23.s, z4.s, z19.s
+ tbx z24.s, z4.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z5.s, z17.s // Look-up entries 80-95 in the LUT.
+ tbx z22.s, z5.s, z18.s
+ tbx z23.s, z5.s, z19.s
+ tbx z24.s, z5.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z6.s, z17.s // Look-up entries 96-111 in the LUT.
+ tbx z22.s, z6.s, z18.s
+ tbx z23.s, z6.s, z19.s
+ tbx z24.s, z6.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z7.s, z17.s // Look-up entries 112-127 in the LUT.
+ tbx z22.s, z7.s, z18.s
+ tbx z23.s, z7.s, z19.s
+ tbx z24.s, z7.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z8.s, z17.s // Look-up entries 128-143 in the LUT.
+ tbx z22.s, z8.s, z18.s
+ tbx z23.s, z8.s, z19.s
+ tbx z24.s, z8.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z9.s, z17.s // Look-up entries 144-159 in the LUT.
+ tbx z22.s, z9.s, z18.s
+ tbx z23.s, z9.s, z19.s
+ tbx z24.s, z9.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z10.s, z17.s // Look-up entries 160-175 in the LUT.
+ tbx z22.s, z10.s, z18.s
+ tbx z23.s, z10.s, z19.s
+ tbx z24.s, z10.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z11.s, z17.s // Look-up entries 176-191 in the LUT.
+ tbx z22.s, z11.s, z18.s
+ tbx z23.s, z11.s, z19.s
+ tbx z24.s, z11.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z12.s, z17.s // Look-up entries 192-207 in the LUT.
+ tbx z22.s, z12.s, z18.s
+ tbx z23.s, z12.s, z19.s
+ tbx z24.s, z12.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z13.s, z17.s // Look-up entries 208-223 in the LUT.
+ tbx z22.s, z13.s, z18.s
+ tbx z23.s, z13.s, z19.s
+ tbx z24.s, z13.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z14.s, z17.s // Look-up entries 224-239 in the LUT.
+ tbx z22.s, z14.s, z18.s
+ tbx z23.s, z14.s, z19.s
+ tbx z24.s, z14.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z15.s, z17.s // Look-up entries 240-255 in the LUT.
+ tbx z22.s, z15.s, z18.s
+ tbx z23.s, z15.s, z19.s
+ tbx z24.s, z15.s, z20.s
+
+
+ st1w z21.s, p2, [x29, x1, LSL #2]// z21 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p2/m, z25.s, z21.s
+ add x1, x1, #16
+
+ st1w z22.s, p3, [x29, x1, LSL #2]// z22 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p3/m, z25.s, z22.s
+ add x1, x1, #16
+
+ st1w z23.s, p4, [x29, x1, LSL #2]// z23 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p4/m, z25.s, z23.s
+ add x1, x1, #16
+
+ st1w z24.s, p5, [x29, x1, LSL #2]// z24 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p5/m, z25.s, z24.s
+ add x1, x1, #16
+
+ b regularize_start%=
+regularize_end%=:
+
+ mov w9, 0x0000
+ movk w9, 0x4380, LSL #16 // Moving 256.f into w9 to scale - via multiplication (division by reciprocal) - the floating point [0,1] range of the results to the [0,255] integer range of QASYMM8
+ dup z29.s, w9
+ faddv s25, p0, z25.s
+ fdiv s25, s29, s25
+ dup z25.s, z25.s[0] // z25: 256.f/sum. 256 is needed to get the full range and 1/sum is part of softmax.
+
+ // ==================================================
+ // Step 3: Normalize
+ // ==================================================
+ mov x1, #0
+normalize_body_start%=:
+ cmp x1, x13
+ b.eq normalize_body_end%=
+
+ mov x2, x1 // Preserve the index into x2 for the final store to dst.
+ .inst 0xa001c7b0 // ld1w { z16.s - z19.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+ .inst 0xa001c7b4 // ld1w { z20.s - z23.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+
+ // z16-z23: effectively divides exp(-scale*beta*x) by the sum of the exponentials for the current row and multiplies by 256.
+ fmul z16.s, z25.s, z16.s
+ fmul z17.s, z25.s, z17.s
+ fmul z18.s, z25.s, z18.s
+ fmul z19.s, z25.s, z19.s
+ fmul z20.s, z25.s, z20.s
+ fmul z21.s, z25.s, z21.s
+ fmul z22.s, z25.s, z22.s
+ fmul z23.s, z25.s, z23.s
+
+ // z16-z23: convert the FP32 values from the tmp tensor to uint32.
+ fcvtzu z16.s, p0/m, z16.s
+ fcvtzu z17.s, p0/m, z17.s
+ fcvtzu z18.s, p0/m, z18.s
+ fcvtzu z19.s, p0/m, z19.s
+ fcvtzu z20.s, p0/m, z20.s
+ fcvtzu z21.s, p0/m, z21.s
+ fcvtzu z22.s, p0/m, z22.s
+ fcvtzu z23.s, p0/m, z23.s
+
+ // z16-z17: narrow the uint32 values into uint8 and saturate them.
+ .inst 0xc133e230 // uqcvt z16.b, { z16.s - z19.s }
+ .inst 0xc133e2b1 // uqcvt z17.b, { z20.s - z23.s }
+
+ dup z20.s, z25.s[0] // Juggling the value to z20 as z25 will be overwritten by the load below
+
+ .inst 0xa001c7b8 // ld1w { z24.s - z27.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+ .inst 0xa001c7bc // ld1w { z28.s - z31.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+
+ // z24-z31: effectively divides exp(-scale*beta*x) by the sum of the exponentials for the current row and multiplies by 256.
+ fmul z24.s, z20.s, z24.s
+ fmul z25.s, z20.s, z25.s
+ fmul z26.s, z20.s, z26.s
+ fmul z27.s, z20.s, z27.s
+ fmul z28.s, z20.s, z28.s
+ fmul z29.s, z20.s, z29.s
+ fmul z30.s, z20.s, z30.s
+ fmul z31.s, z20.s, z31.s
+
+ // z24-z31: convert the FP32 values from the tmp tensor to uint32.
+ fcvtzu z24.s, p0/m, z24.s
+ fcvtzu z25.s, p0/m, z25.s
+ fcvtzu z26.s, p0/m, z26.s
+ fcvtzu z27.s, p0/m, z27.s
+ fcvtzu z28.s, p0/m, z28.s
+ fcvtzu z29.s, p0/m, z29.s
+ fcvtzu z30.s, p0/m, z30.s
+ fcvtzu z31.s, p0/m, z31.s
+
+ // z18-z19: narrow the uint32 values into uint8 and saturate them.
+ .inst 0xc133e332 // uqcvt z18.b, { z24.s - z27.s }
+ .inst 0xc133e3b3 // uqcvt z19.b, { z28.s - z31.s }
+
+ .inst 0xa0228390 // st1b { z16.b - z19.b }, pn8, [x28, x2]
+
+ dup z25.s, z20.s[0] // Juggling the value back to z25 as z20 will be overwritten by the next iteration or z25 will be used below.
+
+b normalize_body_start%=
+normalize_body_end%=:
+
+normalize_leftover_start%=:
+ whilelo p1.b, x1, %x[length]
+ b.none normalize_leftover_end%=
+
+ // p2-p5 are - together - the 32-bit version of p1, the instructions below unpack p1 into those four predicate registers to allow for the 32-bit loads below to be correctly predicated
+ punpklo p2.h, p1.b
+ punpkhi p4.h, p1.b
+
+ punpkhi p3.h, p2.b
+ punpklo p2.h, p2.b
+
+ punpkhi p5.h, p4.b
+ punpklo p4.h, p4.b
+
+ mov x2, x1 // Preserve the index into x2 for the final store to dst.
+
+ // z20-z23: load exp(-scale*beta*x) from the tmp tensor
+ ld1w z20.s, p2/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ ld1w z21.s, p3/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ ld1w z22.s, p4/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ ld1w z23.s, p5/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ // z20-z23: effectively divides exp(-scale*beta*x) by the sum of the exponentials for the current row and multiplies by 256.
+ fmul z20.s, z25.s, z20.s
+ fmul z21.s, z25.s, z21.s
+ fmul z22.s, z25.s, z22.s
+ fmul z23.s, z25.s, z23.s
+
+ // z20-23: convert the FP32 values from the tmp tensor to uint32.
+ fcvtzu z20.s, p0/m, z20.s
+ fcvtzu z21.s, p0/m, z21.s
+ fcvtzu z22.s, p0/m, z22.s
+ fcvtzu z23.s, p0/m, z23.s
+
+ .inst 0xc133e2b3 // uqcvt z19.b, { z20.s - z23.s }, narrow the uint32 values into uint8 and saturate them into z19.
+
+ st1b z19.b, p1, [x28, x2]
+
+ b normalize_leftover_start%=
+normalize_leftover_end%=:
+ // ==================================================
+ // 3D loop closing
+ // ==================================================
+ add x27, x27, %x[src_stride_1]
+ add x28, x28, %x[dst_stride_1]
+ b loop_1_start%=
+loop_1_end%=:
+
+ add x24, x24, %x[src_stride_2]
+ add x25, x25, %x[dst_stride_2]
+ b loop_2_start%=
+loop_2_end%=:
+
+ add x21, x21, %x[src_stride_3]
+ add x22, x22, %x[dst_stride_3]
+ b loop_3_start%=
+loop_3_end%=:
+ .inst 0xd503467f // smstop
+ )"
+ :
+ : [src] "r"(src), [tmp] "r"(tmp), [dst] "r"(dst), [beta] "r"(beta), [lut] "r"(lut), //
+ [shape_1] "r"(shape[1]), [shape_2] "r"(shape[2]), [shape_3] "r"(shape[3]), //
+ [src_stride_1] "r"(src_strides[1]), [src_stride_2] "r"(src_strides[2]),
+ [src_stride_3] "r"(src_strides[3]), //
+ [dst_stride_1] "r"(dst_strides[1]), [dst_stride_2] "r"(dst_strides[2]),
+ [dst_stride_3] "r"(dst_strides[3]), //
+ [length] "r"(shape[0]) //
+ : "cc", "memory", //
+ "p0", "p1", "p2", "p3", "p4", //
+ "x2", "x9", "x13", //
+ "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x19", //
+ "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", //
+ "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", //
+ "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", //
+ "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" //
+ );
+}
+
+void sme2_qasymm8_softmax_lut_512VL(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
+{
+ ARM_COMPUTE_UNUSED(axis);
+
+ const auto *src_info = in->info();
+ const auto *dst_info = out->info();
+
+ const auto &full_shape = dst_info->tensor_shape();
+ const auto &src_strides = src_info->strides_in_bytes();
+ const auto &dst_strides = dst_info->strides_in_bytes();
+ Strides tmp_strides;
+
+ tmp_strides[0] = src_strides[0] * 4;
+ tmp_strides[1] = src_strides[1] * 4;
+ tmp_strides[2] = src_strides[2] * 4;
+ tmp_strides[3] = src_strides[3] * 4;
+
+ const uintptr_t k_shape[] = {
+ full_shape[0],
+ window.num_iterations(1),
+ window.num_iterations(2),
+ window.num_iterations(3),
+ };
+
+ const uintptr_t k_src_strides[] = {
+ src_strides[0],
+ src_strides[1],
+ src_strides[2],
+ src_strides[3],
+ };
+
+ const uintptr_t k_dst_strides[] = {
+ dst_strides[0],
+ dst_strides[1],
+ dst_strides[2],
+ dst_strides[3],
+ };
+
+ const uintptr_t k_src_offset = window[0].start() * src_strides[0] + //
+ window[1].start() * src_strides[1] + //
+ window[2].start() * src_strides[2] + //
+ window[3].start() * src_strides[3];
+
+ const uintptr_t k_dst_offset = window[0].start() * dst_strides[0] + //
+ window[1].start() * dst_strides[1] + //
+ window[2].start() * dst_strides[2] + //
+ window[3].start() * dst_strides[3];
+
+ const uintptr_t k_tmp_offset = window[0].start() * tmp_strides[0] + //
+ window[1].start() * tmp_strides[1] + //
+ window[2].start() * tmp_strides[2] + //
+ window[3].start() * tmp_strides[3];
+
+ const auto *k_src = reinterpret_cast<const uint8_t *>(in->buffer() + k_src_offset);
+ float *tmp_float_ptr = reinterpret_cast<float *>(tmp);
+ auto *k_tmp = reinterpret_cast<float *>(tmp_float_ptr + k_tmp_offset);
+ auto *k_dst = reinterpret_cast<uint8_t *>(out->buffer() + k_dst_offset);
+
+ sme2_qasymm8_softmax_kernel_512VL(k_src, k_dst, beta, k_shape, k_src_strides, k_dst_strides, lut_ptr, k_tmp);
+}
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/cpu/kernels/softmax/generic/sme2/qasymm8_signed.cpp b/src/cpu/kernels/softmax/generic/sme2/qasymm8_signed.cpp
new file mode 100644
index 0000000000..14c0f6c327
--- /dev/null
+++ b/src/cpu/kernels/softmax/generic/sme2/qasymm8_signed.cpp
@@ -0,0 +1,655 @@
+/*
+ * Copyright (c) 2023-2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+
+// SoftMax
+//
+// Steps:
+// * Find max: max_value = max(src)
+// * Regularize: dst[i] = exp(src[i] - max_value)
+// sum_value = sum(dst)
+// * Normalize: dst[i] = dst[i] / sum_value
+void sme2_qasymm8_signed_softmax_kernel_512VL( //
+ const int8_t *src,
+ int8_t *dst,
+ float beta,
+ const uintptr_t shape[4],
+ const uintptr_t src_strides[4],
+ const uintptr_t dst_strides[4],
+ const float *lut,
+ float *tmp)
+{
+ // Precondition:
+ // * src_strides[0] == sizeof(int8_t)
+ // * dst_strides[0] == sizeof(int8_t)
+ // * tmp_strides[0] == sizeof(float)
+
+ __asm__ volatile(
+ R"(
+ .inst 0xd503477f // smstart
+
+ // For register list explanation refer to qasymm8.cpp.
+
+ // Prepares all constant values
+
+ ptrue p0.b
+ .inst 0x25a07811 // ptrue pn9.s
+ .inst 0x25207810 // ptrue pn8.b
+
+ // ---------------------------------------------------------------- x13: body_length = (length / vl) * vl
+ cntb x13, ALL, MUL #4
+ udiv x9, %x[length], x13
+ mul x13, x13, x9
+
+ // ==================================================
+ // 3D loop opening
+ // ==================================================
+
+ mov x20, %x[shape_3]
+ mov x21, %x[src]
+ mov x22, %x[dst]
+ mov x19, %x[lut]
+ mov x29, %x[tmp]
+
+ // Load the LUT to the register file.
+ mov x2, %x[lut]
+ .inst 0xa040c440 //ld1w { z0.s - z3.s }, pn9/z, [x2]
+ add x2, x2, #256
+ .inst 0xa040c444 //ld1w { z4.s - z7.s }, pn9/z, [x2]
+ add x2, x2, #256
+ .inst 0xa040c448 //ld1w { z8.s - z11.s }, pn9/z, [x2]
+ add x2, x2, #256
+ .inst 0xa040c44c //ld1w { z12.s - z15.s }, pn9/z, [x2]
+
+
+loop_3_start%=:
+ // for index_3 in shape_3 downto 1
+ cmp x20, #0
+ b.eq loop_3_end%=
+ sub x20, x20, #1
+
+ mov x23, %x[shape_2]
+ mov x24, x21
+ mov x25, x22
+
+loop_2_start%=:
+ // for index_2 in shape_2 downto 1
+ cmp x23, #0
+ b.eq loop_2_end%=
+ sub x23, x23, #1
+
+ mov x26, %x[shape_1]
+ mov x27, x24
+ mov x28, x25
+
+loop_1_start%=:
+ // for index_1 in shape_2 downto 1
+ cmp x26, #0
+ b.eq loop_1_end%=
+ sub x26, x26, #1
+
+ // ==================================================
+ // Step 1: Find max
+ // ==================================================
+ // z16-z19 = minimum QASYMM8_SIGNED value (-128) to allow for it to be used for comparison to find the max.
+ dup z16.b, #0x80
+ dup z17.b, #0x80
+ dup z18.b, #0x80
+ dup z19.b, #0x80
+
+ mov x1, #0 // x1: index
+find_max_body_start%=:
+ cmp x1, x13
+ b.eq find_max_body_end%=
+ .inst 0xa0018374 // ld1b { z20.b - z23.b }, pn8/z, [x27, x1] z16-z19: x
+ .inst 0xc134b810 // smax { z16.b - z19.b }, { z16.b - z19.b }, { z20.b - z23.b } z16-z19: max_value = max(max_value, x)
+ add x1, x1, #256 // Advance index by 256 bytes/integers: Z registers = 2048-bit data = 256 8-bit integers.
+ b find_max_body_start%=
+find_max_body_end%=:
+
+ // Loop for processing the leftover part.
+find_max_leftover_start%=:
+ whilelo p1.b, x1, %x[length]
+ b.none find_max_leftover_end%=
+
+ ld1b z30.b, p1/z, [x27, x1] // z30: x
+ smax z16.b, p1/m, z16.b, z30.b // z16: max_value = max(max_value, x)
+
+ add x1, x1, #64
+
+ b find_max_leftover_start%=
+find_max_leftover_end%=:
+ .inst 0xc132b010 // smax { z16.b, z17.b }, { z16.b, z17.b }, { z18.b, z19.b }
+ smax z16.b, p0/m, z16.b, z17.b
+ smaxv b16, p0, z16.b // Reduction signed max operation to get maximum_value
+ mov z16.b, b16 // z16: duplicated max_value for current row
+
+ sunpklo z16.h, z16.b // Using unpack instructions to align the max value with the FP32 entries in the LUT for use in the TBX instruction
+ sunpklo z16.s, z16.h
+
+ mov x1, #0 // reset index
+ dup z25.s, #0
+
+
+regularize_start%=:
+ whilelo p1.b, x1, %x[length]
+ b.none regularize_end%=
+
+ mov w9, 0xFF80
+ movk w9, 0xFFFF, LSL #16 // Moving -127.f into w9 to set the registers below to the minimum QASYMM8_SIGNED value
+ dup z17.s, w9
+ dup z18.s, w9
+ dup z19.s, w9
+ dup z20.s, w9
+
+ dup z21.s, #0x0
+ dup z22.s, #0x0
+ dup z23.s, #0x0
+ dup z24.s, #0x0
+
+ // p2-p5 are - together - the 32-bit version of p1, the instructions below unpack p1 into those four predicate registers to allow for the 32-bit loads below to be correctly predicated
+ punpklo p2.h, p1.b
+ punpkhi p4.h, p1.b
+
+ punpkhi p3.h, p2.b
+ punpklo p2.h, p2.b
+
+ punpkhi p5.h, p4.b
+ punpklo p4.h, p4.b
+
+ ld1b z17.b, p1/z, [x27, x1] //z17: input data
+
+ sunpklo z18.h, z17.b // Using unpack instructions to align the input QASYMM8_SIGNED values with the FP32 entries in the LUT for use in the TBX instruction
+ sunpkhi z19.h, z17.b //
+
+ sunpklo z17.s, z18.h // z17 = low low input QASYMM8_SIGNED values
+ sunpkhi z18.s, z18.h // z18 = low high input QASYMM8_SIGNED values
+
+ sunpkhi z20.s, z19.h // z20 = high high input QASYMM8_SIGNED values
+ sunpklo z19.s, z19.h // z19 = high low input QASYMM8_SIGNED values
+
+ sub z17.s, z16.s, z17.s // z12: x = max_value - input_data
+ sub z18.s, z16.s, z18.s // z13: x = max_value - input_data
+ sub z19.s, z16.s, z19.s // z14: x = max_value - input_data
+ sub z20.s, z16.s, z20.s // z15: x = max_value - input_data
+
+ add z17.s, z17.s, #128
+ add z18.s, z18.s, #128
+ add z19.s, z19.s, #128
+ add z20.s, z20.s, #128
+
+ tbx z21.s, z0.s, z17.s // Look-up entries 0-15 in the LUT.
+ tbx z22.s, z0.s, z18.s
+ tbx z23.s, z0.s, z19.s
+ tbx z24.s, z0.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z1.s, z17.s // Look-up entries 16-31 in the LUT.
+ tbx z22.s, z1.s, z18.s
+ tbx z23.s, z1.s, z19.s
+ tbx z24.s, z1.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z2.s, z17.s // Look-up entries 32-47 in the LUT.
+ tbx z22.s, z2.s, z18.s
+ tbx z23.s, z2.s, z19.s
+ tbx z24.s, z2.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z3.s, z17.s // Look-up entries 48-63 in the LUT.
+ tbx z22.s, z3.s, z18.s
+ tbx z23.s, z3.s, z19.s
+ tbx z24.s, z3.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z4.s, z17.s // Look-up entries 64-79 in the LUT.
+ tbx z22.s, z4.s, z18.s
+ tbx z23.s, z4.s, z19.s
+ tbx z24.s, z4.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z5.s, z17.s // Look-up entries 80-95 in the LUT.
+ tbx z22.s, z5.s, z18.s
+ tbx z23.s, z5.s, z19.s
+ tbx z24.s, z5.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z6.s, z17.s // Look-up entries 96-111 in the LUT.
+ tbx z22.s, z6.s, z18.s
+ tbx z23.s, z6.s, z19.s
+ tbx z24.s, z6.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z7.s, z17.s // Look-up entries 112-127 in the LUT.
+ tbx z22.s, z7.s, z18.s
+ tbx z23.s, z7.s, z19.s
+ tbx z24.s, z7.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z8.s, z17.s // Look-up entries 128-143 in the LUT.
+ tbx z22.s, z8.s, z18.s
+ tbx z23.s, z8.s, z19.s
+ tbx z24.s, z8.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z9.s, z17.s // Look-up entries 144-159 in the LUT.
+ tbx z22.s, z9.s, z18.s
+ tbx z23.s, z9.s, z19.s
+ tbx z24.s, z9.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z10.s, z17.s // Look-up entries 160-175 in the LUT.
+ tbx z22.s, z10.s, z18.s
+ tbx z23.s, z10.s, z19.s
+ tbx z24.s, z10.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z11.s, z17.s // Look-up entries 176-191 in the LUT.
+ tbx z22.s, z11.s, z18.s
+ tbx z23.s, z11.s, z19.s
+ tbx z24.s, z11.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z12.s, z17.s // Look-up entries 192-207 in the LUT.
+ tbx z22.s, z12.s, z18.s
+ tbx z23.s, z12.s, z19.s
+ tbx z24.s, z12.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z13.s, z17.s // Look-up entries 208-223 in the LUT.
+ tbx z22.s, z13.s, z18.s
+ tbx z23.s, z13.s, z19.s
+ tbx z24.s, z13.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z14.s, z17.s // Look-up entries 224-239 in the LUT.
+ tbx z22.s, z14.s, z18.s
+ tbx z23.s, z14.s, z19.s
+ tbx z24.s, z14.s, z20.s
+
+ sub z17.s, z17.s, #16
+ sub z18.s, z18.s, #16
+ sub z19.s, z19.s, #16
+ sub z20.s, z20.s, #16
+
+ tbx z21.s, z15.s, z17.s // Look-up entries 240-255 in the LUT.
+ tbx z22.s, z15.s, z18.s
+ tbx z23.s, z15.s, z19.s
+ tbx z24.s, z15.s, z20.s
+
+
+ st1w z21.s, p2, [x29, x1, LSL #2]// z21 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p2/m, z25.s, z21.s
+ add x1, x1, #16
+
+ st1w z22.s, p3, [x29, x1, LSL #2]// z22 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p3/m, z25.s, z22.s
+ add x1, x1, #16
+
+ st1w z23.s, p4, [x29, x1, LSL #2]// z23 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p4/m, z25.s, z23.s
+ add x1, x1, #16
+
+ st1w z24.s, p5, [x29, x1, LSL #2]// z24 store exp(-scale*beta*x) into the tmp tensor
+ fadd z25.s, p5/m, z25.s, z24.s
+ add x1, x1, #16
+
+ b regularize_start%=
+regularize_end%=:
+
+ mov w9, 0x0000
+ movk w9, 0x4380, LSL #16 // Moving 256.f into w9 to scale - via multiplication (division by reciprocal) - the floating point [0,1] range of the results to the [-128, 127] integer range of QASYMM8_SIGNED
+ mov w10, 0x0000
+ movk w10, 0x4300, LSL #16 // Moving 128.f into w10 for the subtraction to move the results - via subtraction - from the [0,255] range to the [-128,127] range
+ dup z29.s, w9
+ dup z30.s, w10
+ faddv s25, p0, z25.s
+ fdiv s25, s29, s25
+ dup z25.s, z25.s[0] // z25: 256.f/sum. 256 is needed to get the full range and 1/sum is part of softmax.
+
+ // ==================================================
+ // Step 3: Normalize
+ // ==================================================
+ mov x1, #0
+normalize_body_start%=:
+ cmp x1, x13
+ b.eq normalize_body_end%=
+
+ mov x2, x1 // Preserve the index into x2 for the final store to dst.
+ .inst 0xa001c7b0 // ld1w { z16.s - z19.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+ .inst 0xa001c7b4 // ld1w { z20.s - z23.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+
+ // z16-z23: effectively divides exp(-scale*beta*x) by the sum of the exponentials for the current row and multiplies by 256.
+ fmul z16.s, z25.s, z16.s
+ fmul z17.s, z25.s, z17.s
+ fmul z18.s, z25.s, z18.s
+ fmul z19.s, z25.s, z19.s
+ fmul z20.s, z25.s, z20.s
+ fmul z21.s, z25.s, z21.s
+ fmul z22.s, z25.s, z22.s
+ fmul z23.s, z25.s, z23.s
+
+ // z16-z23: subtract 128.f.
+ fsub z16.s, z16.s, z30.s // Subtract 128.f
+ fsub z17.s, z17.s, z30.s // Subtract 128.f
+ fsub z18.s, z18.s, z30.s // Subtract 128.f
+ fsub z19.s, z19.s, z30.s // Subtract 128.f
+ fsub z20.s, z20.s, z30.s // Subtract 128.f
+ fsub z21.s, z21.s, z30.s // Subtract 128.f
+ fsub z22.s, z22.s, z30.s // Subtract 128.f
+ fsub z23.s, z23.s, z30.s // Subtract 128.f
+
+ // z16-z23: convert the FP32 values from the tmp tensor to int32.
+ fcvtzs z16.s, p0/m, z16.s
+ fcvtzs z17.s, p0/m, z17.s
+ fcvtzs z18.s, p0/m, z18.s
+ fcvtzs z19.s, p0/m, z19.s
+ fcvtzs z20.s, p0/m, z20.s
+ fcvtzs z21.s, p0/m, z21.s
+ fcvtzs z22.s, p0/m, z22.s
+ fcvtzs z23.s, p0/m, z23.s
+
+ // z16-z17: narrow the int32 values into int8 and saturate them.
+ .inst 0xc133e210 // sqcvt z16.b, { z16.s - z19.s }
+ .inst 0xc133e291 // sqcvt z17.b, { z20.s - z23.s }
+
+ // Juggling the value to z20 (resp. 21) as z25 (resp. z30) will be overwritten by the load below.
+ dup z20.s, z25.s[0]
+ dup z21.s, z30.s[0]
+
+ .inst 0xa001c7b8 // ld1w { z24.s - z27.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+ .inst 0xa001c7bc // ld1w { z28.s - z31.s }, pn9/z, [x29, x1, lsl #2]
+ add x1, x1, #64
+
+ // z24-z31: effectively divides exp(-scale*beta*x) by the sum of the exponentials for the current row and multiplies by 256.
+ fmul z24.s, z20.s, z24.s
+ fmul z25.s, z20.s, z25.s
+ fmul z26.s, z20.s, z26.s
+ fmul z27.s, z20.s, z27.s
+ fmul z28.s, z20.s, z28.s
+ fmul z29.s, z20.s, z29.s
+ fmul z30.s, z20.s, z30.s
+ fmul z31.s, z20.s, z31.s
+
+ // z24-z31: subtract 128.f.
+ fsub z24.s, z24.s, z21.s
+ fsub z25.s, z25.s, z21.s
+ fsub z26.s, z26.s, z21.s
+ fsub z27.s, z27.s, z21.s
+ fsub z28.s, z28.s, z21.s
+ fsub z29.s, z29.s, z21.s
+ fsub z30.s, z30.s, z21.s
+ fsub z31.s, z31.s, z21.s
+
+ // z24-z31: convert the FP32 values from the tmp tensor to int32.
+ fcvtzs z24.s, p0/m, z24.s
+ fcvtzs z25.s, p0/m, z25.s
+ fcvtzs z26.s, p0/m, z26.s
+ fcvtzs z27.s, p0/m, z27.s
+ fcvtzs z28.s, p0/m, z28.s
+ fcvtzs z29.s, p0/m, z29.s
+ fcvtzs z30.s, p0/m, z30.s
+ fcvtzs z31.s, p0/m, z31.s
+
+ // z18-z19: narrow the int32 values into int8 and saturate them.
+ .inst 0xc133e312 // sqcvt z18.b, { z24.s - z27.s }
+ .inst 0xc133e393 // sqcvt z19.b, { z28.s - z31.s }
+
+ .inst 0xa0228390 // st1b { z16.b - z19.b }, pn8, [x28, x2]
+
+ // Juggling the values back to z25 (resp. z30) as z20 (resp. z21) will be overwritten by the next iteration or z25 (resp. z30) will be used below.
+ dup z25.s, z20.s[0]
+ dup z30.s, z21.s[0]
+b normalize_body_start%=
+normalize_body_end%=:
+normalize_leftover_start%=:
+ whilelo p1.b, x1, %x[length]
+ b.none normalize_leftover_end%=
+
+ // p2-p5 are - together - the 32-bit version of p1, the instructions below unpack p1 into those four predicate registers to allow for the 32-bit loads below to be correctly predicated
+ punpklo p2.h, p1.b
+ punpkhi p4.h, p1.b
+
+ punpkhi p3.h, p2.b
+ punpklo p2.h, p2.b
+
+ punpkhi p5.h, p4.b
+ punpklo p4.h, p4.b
+
+ mov x2, x1 // Preserve the index into x2 for the final store to dst.
+
+ // z20-z23: load exp(-scale*beta*x) from the tmp tensor
+ ld1w z20.s, p2/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ ld1w z21.s, p3/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ ld1w z22.s, p4/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ ld1w z23.s, p5/z, [x29, x1, LSL #2]
+ add x1, x1, #16
+
+ // z20-z23: effectively divides exp(-scale*beta*x) by the sum of the exponentials for the current row and multiplies by 256.
+ fmul z20.s, z25.s, z20.s
+ fmul z21.s, z25.s, z21.s
+ fmul z22.s, z25.s, z22.s
+ fmul z23.s, z25.s, z23.s
+
+ //z20-z23: Subtract 128.f.
+ fsub z20.s, z20.s, z30.s
+ fsub z21.s, z21.s, z30.s
+ fsub z22.s, z22.s, z30.s
+ fsub z23.s, z23.s, z30.s
+
+ // z20-23: convert the FP32 values from the tmp tensor to int32.
+ fcvtzs z20.s, p0/m, z20.s
+ fcvtzs z21.s, p0/m, z21.s
+ fcvtzs z22.s, p0/m, z22.s
+ fcvtzs z23.s, p0/m, z23.s
+
+ .inst 0xc133e293 // sqcvt z19.b, { z20.s - z23.s }, narrow the int32 values into int8 and saturate them into z19.
+
+ st1b z19.b, p1, [x28, x2]
+
+ b normalize_leftover_start%=
+normalize_leftover_end%=:
+ // ==================================================
+ // 3D loop closing
+ // ==================================================
+ add x27, x27, %x[src_stride_1]
+ add x28, x28, %x[dst_stride_1]
+ b loop_1_start%=
+loop_1_end%=:
+
+ add x24, x24, %x[src_stride_2]
+ add x25, x25, %x[dst_stride_2]
+ b loop_2_start%=
+loop_2_end%=:
+
+ add x21, x21, %x[src_stride_3]
+ add x22, x22, %x[dst_stride_3]
+ b loop_3_start%=
+loop_3_end%=:
+ .inst 0xd503467f // smstop
+ )"
+ :
+ : [src] "r"(src), [tmp] "r"(tmp), [dst] "r"(dst), [beta] "r"(beta), [lut] "r"(lut), //
+ [shape_1] "r"(shape[1]), [shape_2] "r"(shape[2]), [shape_3] "r"(shape[3]), //
+ [src_stride_1] "r"(src_strides[1]), [src_stride_2] "r"(src_strides[2]),
+ [src_stride_3] "r"(src_strides[3]), //
+ [dst_stride_1] "r"(dst_strides[1]), [dst_stride_2] "r"(dst_strides[2]),
+ [dst_stride_3] "r"(dst_strides[3]), //
+ [length] "r"(shape[0]) //
+ : "cc", "memory", //
+ "p0", "p1", "p2", "p3", "p4", //
+ "x2", "x9", "x13", //
+ "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x19", //
+ "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", //
+ "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", //
+ "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", //
+ "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" //
+ );
+}
+
+void sme2_qasymm8_signed_softmax_lut_512VL(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr)
+{
+ ARM_COMPUTE_UNUSED(axis);
+
+ const auto *src_info = in->info();
+ const auto *dst_info = out->info();
+
+ const auto &full_shape = dst_info->tensor_shape();
+ const auto &src_strides = src_info->strides_in_bytes();
+ const auto &dst_strides = dst_info->strides_in_bytes();
+ Strides tmp_strides;
+
+ tmp_strides[0] = src_strides[0] * 4;
+ tmp_strides[1] = src_strides[1] * 4;
+ tmp_strides[2] = src_strides[2] * 4;
+ tmp_strides[3] = src_strides[3] * 4;
+
+ const uintptr_t k_shape[] = {
+ full_shape[0],
+ window.num_iterations(1),
+ window.num_iterations(2),
+ window.num_iterations(3),
+ };
+
+ const uintptr_t k_src_strides[] = {
+ src_strides[0],
+ src_strides[1],
+ src_strides[2],
+ src_strides[3],
+ };
+
+ const uintptr_t k_dst_strides[] = {
+ dst_strides[0],
+ dst_strides[1],
+ dst_strides[2],
+ dst_strides[3],
+ };
+
+ const uintptr_t k_src_offset = window[0].start() * src_strides[0] + //
+ window[1].start() * src_strides[1] + //
+ window[2].start() * src_strides[2] + //
+ window[3].start() * src_strides[3];
+
+ const uintptr_t k_dst_offset = window[0].start() * dst_strides[0] + //
+ window[1].start() * dst_strides[1] + //
+ window[2].start() * dst_strides[2] + //
+ window[3].start() * dst_strides[3];
+
+ const uintptr_t k_tmp_offset = window[0].start() * tmp_strides[0] + //
+ window[1].start() * tmp_strides[1] + //
+ window[2].start() * tmp_strides[2] + //
+ window[3].start() * tmp_strides[3];
+
+ const auto *k_src = reinterpret_cast<const int8_t *>(in->buffer() + k_src_offset);
+ float *tmp_float_ptr = reinterpret_cast<float *>(tmp);
+ auto *k_tmp = reinterpret_cast<float *>(tmp_float_ptr + k_tmp_offset);
+ auto *k_dst = reinterpret_cast<int8_t *>(out->buffer() + k_dst_offset);
+
+ sme2_qasymm8_signed_softmax_kernel_512VL(k_src, k_dst, beta, k_shape, k_src_strides, k_dst_strides, lut_ptr, k_tmp);
+}
+
+} // namespace cpu
+} // namespace arm_compute
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/cpu/kernels/softmax/list.h b/src/cpu/kernels/softmax/list.h
index 1bb8ed50f0..7bbb265022 100644
--- a/src/cpu/kernels/softmax/list.h
+++ b/src/cpu/kernels/softmax/list.h
@@ -28,9 +28,10 @@ namespace arm_compute
{
namespace cpu
{
-#define DECLARE_SOFTMAX_KERNEL(func_name) \
- template <bool IS_LOG> \
- void func_name(const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window)
+#define DECLARE_SOFTMAX_KERNEL(func_name) \
+ template <bool IS_LOG> \
+ void func_name(const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window, \
+ const float *lut_ptr)
DECLARE_SOFTMAX_KERNEL(neon_fp32_softmax);
DECLARE_SOFTMAX_KERNEL(neon_fp16_softmax);
@@ -39,11 +40,37 @@ DECLARE_SOFTMAX_KERNEL(neon_qasymm8_signed_softmax);
#ifdef ARM_COMPUTE_ENABLE_SME2
-void sme2_fp32_softmax(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
+void sme2_fp32_softmax(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
-void sme2_fp16_softmax(
- const ITensor *in, void *const tmp, ITensor *out, const float beta, int axis, const Window &window);
+void sme2_fp16_softmax(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
+
+void sme2_qasymm8_softmax_lut_512VL(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
+
+void sme2_qasymm8_signed_softmax_lut_512VL(const ITensor *in,
+ void *const tmp,
+ ITensor *out,
+ const float beta,
+ int axis,
+ const Window &window,
+ const float *lut_ptr);
#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/cpu/operators/CpuConv2d.h b/src/cpu/operators/CpuConv2d.h
index 71b9e15dc1..0012ff6609 100644
--- a/src/cpu/operators/CpuConv2d.h
+++ b/src/cpu/operators/CpuConv2d.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2021, 2023 Arm Limited.
+ * Copyright (c) 2017-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,6 +21,10 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#ifndef ACL_SRC_CPU_OPERATORS_CPUCONV2D_H
+#define ACL_SRC_CPU_OPERATORS_CPUCONV2D_H
+
#include "arm_compute/function_info/ActivationLayerInfo.h"
#include "src/core/common/Macros.h"
@@ -81,6 +85,7 @@ public:
* |F16 |F16 |F16 |F16 |
* |F32 |F32 |F32 |F32 |
* |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
+ * |QASYMM8 |QASYMM8_SIGNED |S32 |QASYMM8 |
* |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 |
* |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
* |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED |
@@ -89,7 +94,7 @@ public:
* while every optional dimension from 4 and above represent a batch of inputs.
* Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported: Same as @p src, also could be QSYMM8_PER_CHANNEL if input is QASYMM8/QASYMM8_SIGNED.
+ * Data type supported: Same as @p src, also could be QSYMM8_PER_CHANNEL or QASYMM8_SIGNED if input is QASYMM8/QASYMM8_SIGNED.
* @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
* Data type supported: Same as @p src, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type.
* @param[out] dst Destination tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
@@ -135,7 +140,7 @@ public:
* while every optional dimension from 4 and above represent a batch of inputs.
* Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
* @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
- * Data type supported:Same as @p src, also could be QSYMM8_PER_CHANNEL if input is QASYMM8/QASYMM8_SIGNED.
+ * Data type supported:Same as @p src, also could be QSYMM8_PER_CHANNEL or QASYMM8_SIGNED if input is QASYMM8/QASYMM8_SIGNED.
* @param[in] dst Destination tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
* Data types supported: Same as @p src.
* @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo.
@@ -167,3 +172,5 @@ private:
};
} // namespace cpu
} // namespace arm_compute
+
+#endif // ACL_SRC_CPU_OPERATORS_CPUCONV2D_H
diff --git a/src/cpu/operators/CpuGemmConv2d.h b/src/cpu/operators/CpuGemmConv2d.h
index 48a0d11107..ae5023a71a 100644
--- a/src/cpu/operators/CpuGemmConv2d.h
+++ b/src/cpu/operators/CpuGemmConv2d.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021-2023 Arm Limited.
+ * Copyright (c) 2021-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -76,6 +76,7 @@ public:
* |F32 |F32 |F32 |F32 |
* |BFLOAT16 |BFLOAT16 |BFLOAT16 |BFLOAT16 |
* |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
+ * |QASYMM8 |QASYMM8_SIGNED |S32 |QASYMM8 |
* |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 |
* |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
* |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED |
diff --git a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
index f3396fbb5c..1dbe3d8a31 100644
--- a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
+++ b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
@@ -128,24 +128,31 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
_reshape_b_only_on_first_run;
_gemm_info = gemm_info;
- // Offset kernel is need if offset is non-zero or it may change (i.e. dynamic).
- // It is not needed if the datatype is symmetric, because there is no offset
- bool a_offset_kernel_needed = _a_offset != 0 || a->quantization_info().is_dynamic();
- bool b_offset_kernel_needed = _b_offset != 0 || b->quantization_info().is_dynamic();
+ const ITensorInfo *a_to_use = a;
- _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
+ // Initialize assembly kernel meta-data
+ const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
- const ITensorInfo *a_to_use = a;
+ const int32_t offset_correction = 128;
+ const DataType dt = DataType::QASYMM8_SIGNED;
+ const UniformQuantizationInfo iqinfo = a_to_use->quantization_info().uniform();
+
+ _signed_a = a_to_use->clone()->set_data_type(dt).set_quantization_info(
+ QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction));
+
+ // If inputs are mixed-sign but this machine does not support mixed sign kernels,
+ // flip the sign so matched-sign kernels can be used.
+ if (!_flip_signedness && a->data_type() == DataType::QASYMM8 && b->data_type() == DataType::QASYMM8_SIGNED &&
+ !bool(CpuGemmAssemblyDispatch::validate(a_to_use, b, c, dst, asm_info)))
+ {
+ _flip_signedness = true;
+ }
+
+ _asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
// Convert to QASYMM8 -> QASYMM8_SIGNED and back
if (_flip_signedness)
{
- const int32_t offset_correction = 128;
- const DataType dt = DataType::QASYMM8_SIGNED;
- const UniformQuantizationInfo iqinfo = a_to_use->quantization_info().uniform();
-
- _signed_a = a_to_use->clone()->set_data_type(dt).set_quantization_info(
- QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction));
_convert_to_signed_asymm = std::make_unique<kernels::CpuConvertQuantizedSignednessKernel>();
_convert_to_signed_asymm->configure(a_to_use, &_signed_a);
a_to_use = &_signed_a;
@@ -166,6 +173,11 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
matrix_a = &_signed_a;
}
+ // Offset kernel is need if offset is non-zero or it may change (i.e. dynamic).
+ // It is not needed if the datatype is symmetric, because there is no offset
+ bool a_offset_kernel_needed = _a_offset != 0 || a->quantization_info().is_dynamic();
+ bool b_offset_kernel_needed = _b_offset != 0 || b->quantization_info().is_dynamic();
+
// If GEMMLowpOutputStage != NONE, fuse the offset contribution with the output stage
if (info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE)
{
@@ -173,8 +185,6 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
_mm_result_s32 = TensorInfo(dst->tensor_shape(), 1, DataType::S32);
}
- // Initialize assembly kernel meta-data
- const cpu::AsmGemmInfo asm_info = init_assembly_metadata(gemm_info);
#ifdef __aarch64__
if (!(!b->are_values_constant() &&
b->tensor_shape().z() > 1)) // Disable batch matmul as optimized GeMM handles batching differently.
@@ -375,10 +385,6 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
int32_t a_offset = a->quantization_info().uniform().offset;
int32_t b_offset = b->quantization_info().uniform().offset;
- // Offset kernel is need if offset is non-zero or it may change (i.e. dynamic).
- bool a_offset_kernel_needed = a_offset != 0 || a->quantization_info().is_dynamic();
- bool b_offset_kernel_needed = b_offset != 0 || b->quantization_info().is_dynamic();
-
bool fuse_output_stage = info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE;
if (fuse_output_stage)
{
@@ -386,19 +392,31 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
a->clone()->set_tensor_shape(output->tensor_shape()).set_data_type(DataType::S32));
}
+ // Initialize assembly kernel meta-data
+ const AsmGemmInfo asm_info = init_assembly_metadata(info);
+
// Convert QASYMM8->QASYMM8_SIGNED
- TensorInfo signed_a{};
+ const int32_t offset_correction = 128;
+ const DataType dt = DataType::QASYMM8_SIGNED;
+ const UniformQuantizationInfo iqinfo = a_to_use->quantization_info().uniform();
+
+ TensorInfo signed_a = a_to_use->clone()->set_data_type(dt).set_quantization_info(
+ QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction));
TensorInfo signed_output{};
- bool flip_signedness = is_data_type_quantized_per_channel(b->data_type()) &&
+
+ bool flip_signedness = is_data_type_quantized_per_channel(b->data_type()) &&
(a->data_type() == DataType::QASYMM8) && info.reshape_b_only_on_first_run();
- if (flip_signedness)
+
+ // If inputs are mixed-sign but this machine does not support mixed sign kernels,
+ // flip the sign so matched-sign kernels can be used.
+ if (!flip_signedness && a->data_type() == DataType::QASYMM8 && b->data_type() == DataType::QASYMM8_SIGNED &&
+ !bool(CpuGemmAssemblyDispatch::validate(a_to_use, b, c, output, asm_info)))
{
- const int32_t offset_correction = 128;
- const DataType dt = DataType::QASYMM8_SIGNED;
- const UniformQuantizationInfo iqinfo = a_to_use->quantization_info().uniform();
+ flip_signedness = true;
+ }
- signed_a = a_to_use->clone()->set_data_type(dt).set_quantization_info(
- QuantizationInfo(iqinfo.scale, iqinfo.offset + offset_correction));
+ if (flip_signedness)
+ {
ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuConvertQuantizedSignednessKernel::validate(a_to_use, &signed_a));
a_to_use = &signed_a;
a_offset = signed_a.quantization_info().uniform().offset;
@@ -418,8 +436,9 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
matrix_a_info = &signed_a;
}
- // Initialize assembly kernel meta-data
- const AsmGemmInfo asm_info = init_assembly_metadata(info);
+ // Offset kernel is need if offset is non-zero or it may change (i.e. dynamic).
+ bool a_offset_kernel_needed = a_offset != 0 || a->quantization_info().is_dynamic();
+ bool b_offset_kernel_needed = b_offset != 0 || b->quantization_info().is_dynamic();
// Check if we need to run the optimized assembly kernel
bool run_optimised = false;
diff --git a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
index 38121c9bb4..11fe6f9ef0 100644
--- a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
+++ b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
@@ -81,11 +81,13 @@ public:
* |src0 |src1 |src2 |dst |
* |:--------------|:------------------|:--------|:--------------|
* |QASYMM8 |QASYMM8 |S32 |QASYMM8 |
+ * |QASYMM8 |QASYMM8_SIGNED |S32 |QASYMM8 |
* |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 |
* |QASYMM8 |QSYMM8 |S32 |QASYMM8 |
* |QASYMM8 |QASYMM8 |S32 |S32 |
* |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |S32 |
* |QASYMM8 |QSYMM8 |S32 |S32 |
+ * |QASYMM8 |QASYMM8_SIGNED |F32 |F32 |
* |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED |
* |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED |
* |QASYMM8_SIGNED |QSYMM8 |S32 |QASYMM8_SIGNED |
diff --git a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp
index 7d85885654..156a798d50 100644
--- a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp
+++ b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp
@@ -45,6 +45,7 @@ namespace
/** Run pretranspose_B_array in parallel (1D static scheduling)
*
* @tparam TypeInput
+ * @tparam TypeWeight
* @tparam TypeOutput
*
* @param[in] gemm_asm GemmCommon kernel to run
@@ -54,14 +55,14 @@ namespace
* @param[in] src_multi_stride Stride in z ("multi")
* @param[in] num_threads Number of threads to run this method. Must be >= 1
*/
-template <typename TypeInput, typename TypeOutput>
-void run_parallel_pretranspose_B_array(arm_gemm::GemmCommon<TypeInput, TypeOutput> *gemm_asm,
- ITensor *dst,
- const TypeInput *src,
- int src_ld,
- int src_multi_stride,
- unsigned int num_threads,
- bool transpose)
+template <typename TypeInput, typename TypeWeight, typename TypeOutput>
+void run_parallel_pretranspose_B_array(arm_gemm::GemmCommon<TypeInput, TypeWeight, TypeOutput> *gemm_asm,
+ ITensor *dst,
+ const TypeWeight *src,
+ int src_ld,
+ int src_multi_stride,
+ unsigned int num_threads,
+ bool transpose)
{
ARM_COMPUTE_ERROR_ON(gemm_asm == nullptr);
ARM_COMPUTE_ERROR_ON(num_threads == 0);
@@ -91,14 +92,6 @@ using namespace arm_compute::experimental;
namespace
{
-struct free_delete
-{
- void operator()(void *x)
- {
- free(x);
- }
-};
-
struct Params
{
unsigned int M;
@@ -113,14 +106,13 @@ struct Params
Params extract_parameters(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *d, const AsmGemmInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(a, b, d);
- Params p;
- p.M = d->tensor_shape().y();
- p.K = a->tensor_shape().x();
- p.N = d->tensor_shape().x();
- p.batches = 1;
- p.multis = 1;
- p.sections = 1;
- p.indirect = false;
+ Params p{/* M */ static_cast<unsigned int>(d->tensor_shape().y()),
+ /* N */ static_cast<unsigned int>(d->tensor_shape().x()),
+ /* K */ static_cast<unsigned int>(a->tensor_shape().x()),
+ /* batches */ 1,
+ /* multis */ 1,
+ /* sections */ 1,
+ /* indirect */ false};
if (info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect)
{
@@ -172,13 +164,10 @@ IScheduler::Hints scheduling_hint_heuristic(arm_gemm::GemmMethod method, DataTyp
}
/** Fallback in case ACL doesn't have a function */
-template <typename TypeInput, typename TypeOutput, class OutputStage = arm_gemm::Nothing>
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage = arm_gemm::Nothing>
class Fallback : public CpuGemmAssemblyDispatch::IFallback
{
public:
- /** Destructor */
- ~Fallback() = default;
-
/** Initialise the functions's input and output.
*
* @param[in] a Input tensor containing the Matrix A.
@@ -222,7 +211,9 @@ public:
bool isVarWeightsKernel() const override
{
if (!_gemm_kernel_asm)
+ {
return false;
+ }
const arm_compute::WeightFormat wf =
assembly_utils::map_to_arm_compute_weight_format(_gemm_kernel_asm->get_config().weight_format);
return wf != arm_compute::WeightFormat::UNSPECIFIED && wf != arm_compute::WeightFormat::ANY;
@@ -251,7 +242,7 @@ private:
/** Operator to transpose B before gemm or pretranspose_B_array*/
std::unique_ptr<CpuTranspose> _pre_pretranspose_b{nullptr};
/** Assembly Gemm kernel */
- std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeOutput>> _gemm_kernel_asm{nullptr};
+ std::shared_ptr<arm_gemm::GemmCommon<TypeInput, TypeWeight, TypeOutput>> _gemm_kernel_asm{nullptr};
/** Optimised Arm® Neon™ kernel */
std::unique_ptr<INEKernel> _optimised_kernel{nullptr};
/** Assembly GEMM workspace tensor info */
@@ -273,22 +264,22 @@ private:
/** Per channel quantization multipliers */
std::vector<int32_t> _multipliers{};
/** Indirect buffer */
- std::unique_ptr<const TypeInput *const *, free_delete> _indirect_arg{};
- std::unique_ptr<const TypeInput *, free_delete> _indirect_buf{};
- std::vector<TypeInput> _indirect_pad{};
- arm_gemm::ConvolutionParameters _cp{};
- experimental::MemoryRequirements _aux_mem{Count};
- bool _B_pretranspose_required{false};
- bool _is_b_constant{true};
- bool _is_c_constant{true};
- bool _run_pre_pretranspose_b{false};
- bool _B_pre_pretranspose_required{false};
+ std::vector<const TypeInput *const *> _indirect_arg{};
+ std::vector<const TypeInput *> _indirect_buf{};
+ std::vector<TypeInput> _indirect_pad{};
+ arm_gemm::ConvolutionParameters _cp{};
+ experimental::MemoryRequirements _aux_mem{Count};
+ bool _B_pretranspose_required{false};
+ bool _is_b_constant{true};
+ bool _is_c_constant{true};
+ bool _run_pre_pretranspose_b{false};
+ bool _B_pre_pretranspose_required{false};
};
-template <typename TypeInput, typename TypeOutput, class OutputStage>
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
std::tuple<bool, const int32_t *, const int32_t *, const int32_t *>
-Fallback<TypeInput, TypeOutput, OutputStage>::set_requantize_data(const std::vector<int32_t> &shifts,
- const std::vector<int32_t> &multipliers)
+Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::set_requantize_data(const std::vector<int32_t> &shifts,
+ const std::vector<int32_t> &multipliers)
{
_multipliers = multipliers;
_shifts = shifts;
@@ -305,8 +296,8 @@ Fallback<TypeInput, TypeOutput, OutputStage>::set_requantize_data(const std::vec
return std::make_tuple(need_left, left_shifts.data(), right_shifts.data(), _multipliers.data());
}
-template <typename TypeInput, typename TypeOutput, class OutputStage>
-void Fallback<TypeInput, TypeOutput, OutputStage>::prepare_indirect_buffer(ITensorPack &tensors)
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
+void Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::prepare_indirect_buffer(ITensorPack &tensors)
{
auto a = tensors.get_const_tensor(TensorType::ACL_SRC_0);
const TypeInput *A_ptr = reinterpret_cast<TypeInput *>(a->buffer());
@@ -343,14 +334,12 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::prepare_indirect_buffer(ITens
if (input_x < 0 || input_x >= _cp.input_width || input_y < 0 || input_y >= _cp.input_height)
{
- _indirect_buf
- .get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] =
+ _indirect_buf[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] =
_indirect_pad.data();
}
else
{
- _indirect_buf
- .get()[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] =
+ _indirect_buf[m * multi_stride + b * batch_stride + kernel_xy * output_hw + output_xy] =
A_ptr + (m * multi_stride_A + b * batch_stride_A + input_xy * stride_A);
}
}
@@ -361,11 +350,11 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::prepare_indirect_buffer(ITens
}
}
-template <typename TypeInput, typename TypeOutput, class OutputStage>
-void Fallback<TypeInput, TypeOutput, OutputStage>::configure_indirect(const ITensorInfo *a,
- const ITensorInfo *b,
- const ITensorInfo *d,
- const AsmGemmInfo &info)
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
+void Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::configure_indirect(const ITensorInfo *a,
+ const ITensorInfo *b,
+ const ITensorInfo *d,
+ const AsmGemmInfo &info)
{
ARM_COMPUTE_ERROR_ON(!(info.method == AsmConvMethod::Conv || info.method == AsmConvMethod::Indirect));
@@ -375,13 +364,13 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure_indirect(const ITen
zeropad = a->quantization_info().uniform().offset;
}
- const int64_t input_width = static_cast<int64_t>(a->tensor_shape()[1]);
- const int64_t input_height = static_cast<int64_t>(a->tensor_shape()[2]);
- const int64_t input_channels = static_cast<int64_t>(a->tensor_shape()[0]);
- const int64_t kernel_width = static_cast<int64_t>(b->tensor_shape()[2]);
- const int64_t kernel_height = static_cast<int64_t>(b->tensor_shape()[3]);
- const int64_t output_width = static_cast<int64_t>(d->tensor_shape()[1]);
- const int64_t output_height = static_cast<int64_t>(d->tensor_shape()[2]);
+ const auto input_width = static_cast<int64_t>(a->tensor_shape()[1]);
+ const auto input_height = static_cast<int64_t>(a->tensor_shape()[2]);
+ const auto input_channels = static_cast<int64_t>(a->tensor_shape()[0]);
+ const auto kernel_width = static_cast<int64_t>(b->tensor_shape()[2]);
+ const auto kernel_height = static_cast<int64_t>(b->tensor_shape()[3]);
+ const auto output_width = static_cast<int64_t>(d->tensor_shape()[1]);
+ const auto output_height = static_cast<int64_t>(d->tensor_shape()[2]);
_cp = {input_width,
input_height,
@@ -392,6 +381,8 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure_indirect(const ITen
output_height,
info.ps_info.stride().first,
info.ps_info.stride().second,
+ 1,
+ 1,
info.padding_top,
info.padding_left,
zeropad};
@@ -414,10 +405,8 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure_indirect(const ITen
const int multi_size = batch_size * batches;
const size_t multi_stride = multi_size / sizeof(TypeInputPtr);
- _indirect_buf = std::unique_ptr<const TypeInput *, free_delete>(
- reinterpret_cast<const TypeInput **>(malloc(multi_size * multis)));
- _indirect_arg = std::unique_ptr<const TypeInput *const *, free_delete>(
- reinterpret_cast<const TypeInput *const **>(malloc(sizeof(TypeInput **) * kernel_hw * multis * batches)));
+ _indirect_buf = std::vector<const TypeInput *>(multi_size * multis);
+ _indirect_arg = std::vector<const TypeInput *const *>(sizeof(TypeInput **) * kernel_hw * multis * batches);
_indirect_pad = std::vector<TypeInput>(_cp.input_channels, TypeInput(zeropad));
// Set indirect argument
@@ -428,29 +417,28 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure_indirect(const ITen
{
for (int64_t kernel_xy = 0; kernel_xy < kernel_hw; kernel_xy++)
{
- (_indirect_arg.get())[pos++] =
- _indirect_buf.get() + m * multi_stride + b * batch_stride + kernel_xy * output_hw;
+ _indirect_arg[pos++] = &_indirect_buf[m * multi_stride + b * batch_stride + kernel_xy * output_hw];
}
}
}
- _gemm_kernel_asm->set_indirect_parameters(a->tensor_shape()[0], _indirect_arg.get());
+ _gemm_kernel_asm->set_indirect_parameters(a->tensor_shape()[0], _indirect_arg.data());
}
}
-template <typename TypeInput, typename TypeOutput, class OutputStage>
-void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensorInfo *a,
- const ITensorInfo *b,
- const ITensorInfo *c,
- ITensorInfo *d,
- arm_gemm::GemmArgs args,
- const AsmGemmInfo &gemm_info,
- const OutputStage &os)
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
+void Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::configure(const ITensorInfo *a,
+ const ITensorInfo *b,
+ const ITensorInfo *c,
+ ITensorInfo *d,
+ arm_gemm::GemmArgs args,
+ const AsmGemmInfo &gemm_info,
+ const OutputStage &os)
{
_is_b_constant = b->are_values_constant();
_is_c_constant = c ? c->are_values_constant() : true;
- _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeOutput, OutputStage>(args, os);
+ _gemm_kernel_asm = arm_gemm::gemm<TypeInput, TypeWeight, TypeOutput, OutputStage>(args, os);
if (_gemm_kernel_asm == nullptr)
{
//configuration not supported: Leave function unconfigured:
@@ -460,7 +448,7 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensorInfo *
arm_gemm::GemmConfig gemm_cfg = _gemm_kernel_asm->get_config();
// arm_compute wrapper for the Gemm object (see above)
- auto acl_gemm_wrapper = std::make_unique<kernel::CpuGemmAssemblyWrapperKernel<TypeInput, TypeOutput>>();
+ auto acl_gemm_wrapper = std::make_unique<kernel::CpuGemmAssemblyWrapperKernel<TypeInput, TypeWeight, TypeOutput>>();
ARM_COMPUTE_ERROR_ON(acl_gemm_wrapper == nullptr);
acl_gemm_wrapper->configure(_gemm_kernel_asm.get(), gemm_cfg.filter);
const size_t workspace_size = _gemm_kernel_asm->get_working_size();
@@ -549,8 +537,8 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensorInfo *
}
}
-template <typename TypeInput, typename TypeOutput, class OutputStage>
-void Fallback<TypeInput, TypeOutput, OutputStage>::prepare(ITensorPack &tensors)
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
+void Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::prepare(ITensorPack &tensors)
{
if (!_is_prepared)
{
@@ -588,17 +576,17 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::prepare(ITensorPack &tensors)
// Fixed format kernels need no pretranspose.
ARM_COMPUTE_ERROR_ON(arm_compute::is_fixed_format(
assembly_utils::map_to_arm_compute_weight_format(_gemm_kernel_asm->get_config().weight_format)));
- const int ldb = b_to_use->info()->strides_in_bytes().y() / b_to_use->info()->element_size();
- const auto in1_ptr = reinterpret_cast<const TypeInput *>(b_to_use->buffer() +
- b_to_use->info()->offset_first_element_in_bytes());
- const int multi_stride_b = b_to_use->info()->strides_in_bytes().z() / b_to_use->info()->element_size();
+ const int ldb = b_to_use->info()->strides_in_bytes().y() / b_to_use->info()->element_size();
+ const auto in1_ptr = reinterpret_cast<const TypeWeight *>(
+ b_to_use->buffer() + b_to_use->info()->offset_first_element_in_bytes());
+ const int multi_stride_b = b_to_use->info()->strides_in_bytes().z() / b_to_use->info()->element_size();
CpuAuxTensorHandler pretranspose(offset_int_vec(Pretranspose), _pretranspose_info, tensors, false);
ARM_COMPUTE_ERROR_ON(pretranspose.get()->buffer() == nullptr);
const bool kernel_supports_transpose = _gemm_kernel_asm->B_pretranspose_supports_transpose();
- run_parallel_pretranspose_B_array<TypeInput, TypeOutput>(
+ run_parallel_pretranspose_B_array<TypeInput, TypeWeight, TypeOutput>(
_gemm_kernel_asm.get(), pretranspose.get(), in1_ptr, ldb, multi_stride_b,
NEScheduler::get().num_threads(), _B_pre_pretranspose_required && kernel_supports_transpose);
@@ -616,20 +604,20 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::prepare(ITensorPack &tensors)
}
}
-template <typename TypeInput, typename TypeOutput, class OutputStage>
-bool Fallback<TypeInput, TypeOutput, OutputStage>::is_configured() const
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
+bool Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::is_configured() const
{
return _optimised_kernel != nullptr;
}
-template <typename TypeInput, typename TypeOutput, class OutputStage>
-experimental::MemoryRequirements Fallback<TypeInput, TypeOutput, OutputStage>::workspace() const
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
+experimental::MemoryRequirements Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::workspace() const
{
return _aux_mem;
}
-template <typename TypeInput, typename TypeOutput, class OutputStage>
-void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
+template <typename TypeInput, typename TypeWeight, typename TypeOutput, class OutputStage>
+void Fallback<TypeInput, TypeWeight, TypeOutput, OutputStage>::run(ITensorPack &tensors)
{
auto a = tensors.get_const_tensor(TensorType::ACL_SRC_0);
auto b = tensors.get_const_tensor(TensorType::ACL_SRC_1);
@@ -663,8 +651,8 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
const int multi_stride_d = d->info()->strides_in_bytes()[d_multi_idx] / d->info()->element_size();
auto in0_ptr = reinterpret_cast<const TypeInput *>(a->buffer() + a->info()->offset_first_element_in_bytes());
- const TypeInput *in1_ptr = nullptr;
- auto out_ptr = reinterpret_cast<TypeOutput *>(d->buffer() + d->info()->offset_first_element_in_bytes());
+ const TypeWeight *in1_ptr = nullptr;
+ auto out_ptr = reinterpret_cast<TypeOutput *>(d->buffer() + d->info()->offset_first_element_in_bytes());
const ITensor *b_to_use = b;
@@ -686,8 +674,8 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
{
ldb = b_to_use->info()->strides_in_bytes().y() / b_to_use->info()->element_size();
multi_stride_b = b_to_use->info()->strides_in_bytes().z() / b_to_use->info()->element_size();
- in1_ptr =
- reinterpret_cast<const TypeInput *>(b_to_use->buffer() + b_to_use->info()->offset_first_element_in_bytes());
+ in1_ptr = reinterpret_cast<const TypeWeight *>(b_to_use->buffer() +
+ b_to_use->info()->offset_first_element_in_bytes());
}
// If necessary, run pretranspose every time if either weights or biases are non-constant
@@ -706,8 +694,8 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
ARM_COMPUTE_ERROR_ON(arm_compute::is_fixed_format(
assembly_utils::map_to_arm_compute_weight_format(_gemm_kernel_asm->get_config().weight_format)));
const int ldb = b_to_use->info()->strides_in_bytes().y() / b_to_use->info()->element_size();
- const auto b_ptr = reinterpret_cast<const TypeInput *>(b_to_use->buffer() +
- b_to_use->info()->offset_first_element_in_bytes());
+ const auto b_ptr = reinterpret_cast<const TypeWeight *>(b_to_use->buffer() +
+ b_to_use->info()->offset_first_element_in_bytes());
const int multi_stride_b = b_to_use->info()->strides_in_bytes().z() / b_to_use->info()->element_size();
CpuAuxTensorHandler pretranspose(offset_int_vec(Pretranspose), _pretranspose_info, tensors, true);
@@ -720,7 +708,7 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
else
{
const bool kernel_supports_transpose = _gemm_kernel_asm->B_pretranspose_supports_transpose();
- run_parallel_pretranspose_B_array<TypeInput, TypeOutput>(
+ run_parallel_pretranspose_B_array<TypeInput, TypeWeight, TypeOutput>(
_gemm_kernel_asm.get(), pretranspose.get(), b_ptr, ldb, multi_stride_b,
NEScheduler::get().num_threads(), _B_pre_pretranspose_required && kernel_supports_transpose);
}
@@ -744,7 +732,7 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
if (split_dim != IScheduler::split_dimensions_all)
{
// Make sure the kernel does not expect more threads than we can actually spawn
- const unsigned int num_iterations = _optimised_kernel.get()->window().num_iterations(split_dim);
+ const unsigned int num_iterations = _optimised_kernel->window().num_iterations(split_dim);
num_threads = std::min(num_iterations, num_threads);
}
_gemm_kernel_asm->set_nthreads(num_threads);
@@ -775,7 +763,7 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
NEScheduler::get().schedule(_optimised_kernel.get(), scheduling_hint);
}
-template <typename TypeInput, typename TypeOutput>
+template <typename TypeInput, typename TypeWeight, typename TypeOutput>
void create_arm_gemm(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_gemm,
const ITensorInfo *a,
const ITensorInfo *b,
@@ -794,12 +782,12 @@ void create_arm_gemm(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_ge
info.fixed_format, info.fast_mode, info.accumulate, &cfg);
// Create arm_gemm fallback
- auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput>>();
+ auto fallback = std::make_unique<Fallback<TypeInput, TypeWeight, TypeOutput>>();
fallback->configure(a, b, c, d, args, info);
arm_gemm = std::move(fallback);
}
-template <typename TypeInput, typename TypeOutput>
+template <typename TypeInput, typename TypeWeight, typename TypeOutput>
void create_arm_gemm_dequant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_gemm,
const ITensorInfo *a,
const ITensorInfo *b,
@@ -820,7 +808,7 @@ void create_arm_gemm_dequant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback>
info.fixed_format, info.fast_mode, info.accumulate, &cfg);
// Create arm_gemm fallback
- auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::DequantizeFloat>>();
+ auto fallback = std::make_unique<Fallback<TypeInput, TypeWeight, TypeOutput, arm_gemm::DequantizeFloat>>();
// Configure requantization info
const GEMMLowpOutputStageInfo os_info = info.output_stage;
@@ -832,7 +820,7 @@ void create_arm_gemm_dequant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback>
arm_gemm = std::move(fallback);
}
-template <typename TypeInput, typename TypeOutput>
+template <typename TypeInput, typename TypeWeight, typename TypeOutput>
void create_arm_gemm_quant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_gemm,
const ITensorInfo *a,
const ITensorInfo *b,
@@ -852,7 +840,7 @@ void create_arm_gemm_quant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &
info.fixed_format, info.fast_mode, info.accumulate, &cfg);
// Create arm_gemm fallback
- auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::Requantize32>>();
+ auto fallback = std::make_unique<Fallback<TypeInput, TypeWeight, TypeOutput, arm_gemm::Requantize32>>();
// Configure requantization info
const int32_t negation = info.negated_offsets ? 1 : -1;
@@ -905,12 +893,12 @@ Status CpuGemmAssemblyDispatch::has_opt_impl(arm_compute::WeightFormat &expected
arm_gemm::WeightFormat arm_gemm_expected_wf = assembly_utils::map_to_arm_gemm_weight_format(expected_weight_format);
arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, act, num_threads,
info.fixed_format, info.fast_mode, info.accumulate, &cfg);
- // TODO: Incorporate info.transpose_b COMPMID-6595
+ // TODO(COMPMID-6595): Incorporate info.transpose_b
switch (a->data_type())
{
case DataType::F32:
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<float, float, arm_gemm::Nothing>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<float, float, float, arm_gemm::Nothing>(arm_gemm_expected_wf, args, {})),
"We could not find an optimized kernel for F32 input");
break;
#ifdef __aarch64__
@@ -919,13 +907,22 @@ Status CpuGemmAssemblyDispatch::has_opt_impl(arm_compute::WeightFormat &expected
if (d->data_type() == DataType::S32)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<uint8_t, uint32_t, arm_gemm::Nothing>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<uint8_t, uint8_t, uint32_t, arm_gemm::Nothing>(arm_gemm_expected_wf, args,
+ {})),
"We could not find an optimized kernel for U8/QASYMM8 input and U32 output");
}
+ else if (b->data_type() == DataType::QASYMM8_SIGNED)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ !(arm_gemm::has_opt_gemm<uint8_t, int8_t, uint8_t, arm_gemm::Requantize32>(arm_gemm_expected_wf,
+ args, {})),
+ "We could not find an optimized kernel for U8 input with S8 weights and U8 output");
+ }
else
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<uint8_t, uint8_t, arm_gemm::Requantize32>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<uint8_t, uint8_t, uint8_t, arm_gemm::Requantize32>(arm_gemm_expected_wf,
+ args, {})),
"We could not find an optimized kernel for U8 input and U8 output");
}
break;
@@ -934,42 +931,49 @@ Status CpuGemmAssemblyDispatch::has_opt_impl(arm_compute::WeightFormat &expected
if (d->data_type() == DataType::S32)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<int8_t, int32_t, arm_gemm::Nothing>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<int8_t, int8_t, int32_t, arm_gemm::Nothing>(arm_gemm_expected_wf, args,
+ {})),
"We could not find an optimized kernel for S8/QASYMM8_SIGNED input and S32 output");
}
else
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<int8_t, int8_t, arm_gemm::Requantize32>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<int8_t, int8_t, int8_t, arm_gemm::Requantize32>(arm_gemm_expected_wf, args,
+ {})),
"We could not find an optimized kernel for S8 input and S8 output");
}
break;
#endif /* __aarch64__ */
+
#if defined(ARM_COMPUTE_ENABLE_BF16)
case DataType::BFLOAT16:
{
if (d->data_type() == DataType::BFLOAT16)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<bfloat16, bfloat16, arm_gemm::Nothing>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<bfloat16, bfloat16, bfloat16, arm_gemm::Nothing>(arm_gemm_expected_wf,
+ args, {})),
"We could not find an optimized kernel for BFLOAT16 input and BFLOAT16 output");
}
else
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<bfloat16, float, arm_gemm::Nothing>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<bfloat16, bfloat16, float, arm_gemm::Nothing>(arm_gemm_expected_wf, args,
+ {})),
"We could not find an optimized kernel for BFLOAT16 input and F32 output");
}
break;
}
#endif /* defined(ARM_COMPUTE_ENABLE_BF16) */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#if defined(ENABLE_FP16_KERNELS)
case DataType::F16:
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- !(arm_gemm::has_opt_gemm<float16_t, float16_t, arm_gemm::Nothing>(arm_gemm_expected_wf, args, {})),
+ !(arm_gemm::has_opt_gemm<float16_t, float16_t, float16_t, arm_gemm::Nothing>(arm_gemm_expected_wf, args,
+ {})),
"We could not find an optimized kernel for F16 input and F16 output");
break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+#endif /* ENABLE_FP16_KERNELS */
default:
ARM_COMPUTE_RETURN_ERROR_ON_MSG(true, "Usupported type. Could not find a kernel");
break;
@@ -1007,7 +1011,7 @@ Status CpuGemmAssemblyDispatch::validate(
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(a, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(b, DataType::BFLOAT16);
}
- else
+ else if (!(a->data_type() == DataType::QASYMM8 && b->data_type() == DataType::QASYMM8_SIGNED))
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(a, b);
}
@@ -1022,12 +1026,13 @@ Status CpuGemmAssemblyDispatch::validate(
"Only U32 output supported for U8 input");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::S8 && d->data_type() != DataType::S32,
"Only S32 output supported for S8 input");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(a->data_type() == DataType::QASYMM8 &&
- (d->data_type() != DataType::QASYMM8 && d->data_type() != DataType::S32),
- "Only QASYMM8/S32 output supported for QASYMM8 input");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ a->data_type() == DataType::QASYMM8 &&
+ (d->data_type() != DataType::QASYMM8 && d->data_type() != DataType::S32 && d->data_type() != DataType::F32),
+ "Only QASYMM8/S32/F32 output supported for QASYMM8 input");
arm_compute::WeightFormat expected_weight_format = arm_compute::WeightFormat::UNSPECIFIED;
const Status ret = CpuGemmAssemblyDispatch::has_opt_impl(expected_weight_format, a, b, c, d, info);
- if ((bool)ret && expected_weight_format != arm_compute::WeightFormat::ANY)
+ if (bool(ret) && expected_weight_format != arm_compute::WeightFormat::ANY)
{
// Correctness check: if the format expected by the kernel is
// not "any", make sure that the one found matches the format
@@ -1060,33 +1065,44 @@ void CpuGemmAssemblyDispatch::configure(
switch (a->data_type())
{
case DataType::F32:
- create_arm_gemm<float, float>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm<float, float, float>(_arm_gemm, a, b, c, d, act, info);
break;
#ifdef __aarch64__
case DataType::U8:
case DataType::QASYMM8:
- if (d->data_type() == DataType::S32)
+ if (b->data_type() == DataType::S8 || b->data_type() == DataType::QASYMM8_SIGNED)
+ {
+ if (d->data_type() == DataType::F32)
+ {
+ create_arm_gemm_dequant<uint8_t, int8_t, float>(_arm_gemm, a, b, c, d, act, info);
+ }
+ else
+ {
+ create_arm_gemm_quant<uint8_t, int8_t, uint8_t>(_arm_gemm, a, b, c, d, act, info);
+ }
+ }
+ else if (d->data_type() == DataType::S32)
{
- create_arm_gemm<uint8_t, uint32_t>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm<uint8_t, uint8_t, uint32_t>(_arm_gemm, a, b, c, d, act, info);
}
else
{
- create_arm_gemm_quant<uint8_t, uint8_t>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm_quant<uint8_t, uint8_t, uint8_t>(_arm_gemm, a, b, c, d, act, info);
}
break;
case DataType::S8:
case DataType::QASYMM8_SIGNED:
if (d->data_type() == DataType::S32)
{
- create_arm_gemm<int8_t, int32_t>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm<int8_t, int8_t, int32_t>(_arm_gemm, a, b, c, d, act, info);
}
else if (d->data_type() == DataType::F32)
{
- create_arm_gemm_dequant<int8_t, float>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm_dequant<int8_t, int8_t, float>(_arm_gemm, a, b, c, d, act, info);
}
else
{
- create_arm_gemm_quant<int8_t, int8_t>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm_quant<int8_t, int8_t, int8_t>(_arm_gemm, a, b, c, d, act, info);
}
break;
#endif /* __aarch64__ */
@@ -1094,19 +1110,19 @@ void CpuGemmAssemblyDispatch::configure(
case DataType::BFLOAT16:
if (d->data_type() == DataType::BFLOAT16)
{
- create_arm_gemm<bfloat16, bfloat16>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm<bfloat16, bfloat16, bfloat16>(_arm_gemm, a, b, c, d, act, info);
}
else
{
- create_arm_gemm<bfloat16, float>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm<bfloat16, bfloat16, float>(_arm_gemm, a, b, c, d, act, info);
}
break;
#endif /* defined(ARM_COMPUTE_ENABLE_BF16) */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+#ifdef ENABLE_FP16_KERNELS
case DataType::F16:
- create_arm_gemm<float16_t, float16_t>(_arm_gemm, a, b, c, d, act, info);
+ create_arm_gemm<float16_t, float16_t, float16_t>(_arm_gemm, a, b, c, d, act, info);
break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+#endif /* ENABLE_FP16_KERNELS */
default:
break;
}
diff --git a/src/gpu/cl/ClKernelLibrary.cpp b/src/gpu/cl/ClKernelLibrary.cpp
index 3e32a27d03..c4117b8a1a 100644
--- a/src/gpu/cl/ClKernelLibrary.cpp
+++ b/src/gpu/cl/ClKernelLibrary.cpp
@@ -441,6 +441,7 @@ const std::map<std::string, std::string> ClKernelLibrary::_kernel_program_map =
{"reorg_layer_nhwc", "nhwc/reorg_layer.cl"},
{"scale_nearest_neighbour_nhwc", "nhwc/scale.cl"},
{"scale_bilinear_nhwc", "nhwc/scale.cl"},
+ {"scatter_mp1d_2d_mpnd", "common/scatter.cl"},
{"scatter1D", "common/scatter.cl"},
{"space_to_batch_nhwc", "nhwc/space_to_batch.cl"},
{"space_to_batch_static_nhwc", "nhwc/space_to_batch.cl"},
diff --git a/src/gpu/cl/kernels/ClScatterKernel.cpp b/src/gpu/cl/kernels/ClScatterKernel.cpp
index c95e156679..19adc1ef34 100644
--- a/src/gpu/cl/kernels/ClScatterKernel.cpp
+++ b/src/gpu/cl/kernels/ClScatterKernel.cpp
@@ -27,17 +27,27 @@
#include "arm_compute/core/ITensorPack.h"
#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/utils/DataTypeUtils.h"
+#include "arm_compute/core/utils/helpers/AdjustVecSize.h"
#include "src/common/utils/Log.h"
#include "src/core/helpers/WindowHelpers.h"
#include "support/Cast.h"
+#include <cstdint>
+
namespace arm_compute
{
namespace opencl
{
namespace kernels
{
+
+namespace
+{
+constexpr int max_index_length = 5;
+} // namespace
+
ClScatterKernel::ClScatterKernel()
{
}
@@ -47,21 +57,57 @@ Status ClScatterKernel::validate(const ITensorInfo *updates,
const ITensorInfo *dst,
const ScatterInfo &info)
{
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(updates, dst);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(indices, DataType::S32);
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(dst->num_dimensions() > 1, "Only 1D output tensors are currently supported.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(indices->num_dimensions() > 2, "Only 2D indices tensors are currently supported.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(updates->num_dimensions() > 1, "Only 1D update tensors are currently supported.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(
- indices->tensor_shape().y() != updates->tensor_shape()[updates->num_dimensions() - 1],
- "Height of indices tensor should match size of highest dimension in updates tensor.");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(updates->num_dimensions() > dst->num_dimensions(),
- "Update tensor cannot have more dims than output tensor.");
ARM_COMPUTE_UNUSED(info);
+ const TensorShape &ind_shape = indices->tensor_shape();
+ const TensorShape &upt_shape = updates->tensor_shape();
+ const TensorShape &dst_shape = dst->tensor_shape();
+
+ const int32_t upt_dims = upt_shape.num_dimensions();
+ const int32_t dst_dims = dst_shape.num_dimensions();
+ const int32_t ind_dims = ind_shape.num_dimensions();
+ const int32_t data_dim = upt_dims - (ind_dims - 1); // Number of batch dims is the number of indices dims - 1
+
+ const int32_t index_len = ind_shape[0];
+ bool unsupported_padding_config =
+ (dst_dims == index_len) && index_len > 1 && (dst->has_padding() || updates->has_padding());
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(unsupported_padding_config, "Padding is not supported with these shapes.");
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(updates, dst);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(indices, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32, DataType::F16, DataType::S32, DataType::S16,
+ DataType::S8, DataType::U32, DataType::U16, DataType::U8);
+
+ // Check data dims in update tensor and output tensor are equal
+ for (int32_t i = 0; i < data_dim; i++)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(upt_shape[i] != dst_shape[i],
+ "Data dims should be same size in both updates and ouput tensor.");
+ }
+
+ // Check if batch dims in indices and updates tensor are equal.
+ for (int32_t i = 0; i < ind_dims - 1; i++)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(upt_shape[data_dim + i] != ind_shape[i + 1],
+ "Batch dimensions should be the same in updates and indices tensor.");
+ }
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(ind_shape[1] != upt_shape[data_dim],
+ "Height of indices tensor should match size of highest dimension in updates tensor "
+ "(Excluding batch dimension)");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ data_dim >= dst_dims, "Update tensor cannot have more dims than output tensor. (Excluding batch dimensions)");
+ ARM_COMPUTE_RETURN_ERROR_ON(index_len != dst_dims - data_dim);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG((ind_dims < 2), "Shape of Indices tensor must be at least 2D");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(index_len > max_index_length, "Maximum supported index length is 5!");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(index_len > dst_dims && dst_dims != 1,
+ "Index length should be smaller than or equal to number of output dims");
+
return Status{};
}
+
void ClScatterKernel::configure(const ClCompileContext &compile_context,
const ITensorInfo *updates,
const ITensorInfo *indices,
@@ -71,22 +117,58 @@ void ClScatterKernel::configure(const ClCompileContext &compile_context,
ARM_COMPUTE_ERROR_ON_NULLPTR(updates, dst, indices);
ARM_COMPUTE_LOG_PARAMS(updates, indices, dst, info);
- // Configure kernel window
- const auto indices_shape = indices->tensor_shape();
- Window win = calculate_max_window(
- *indices, Steps(indices_shape.x(), indices_shape.y())); // Ensures single thread for deterministic output.
+ const TensorShape &dst_shape = dst->tensor_shape();
+ const int index_len = indices->dimension(0);
+
+ // Check for single element data block
+ const bool is_scalar_block = (dst->num_dimensions() == static_cast<uint32_t>(index_len));
+
+ const int n0 = adjust_vec_size(16 / updates->element_size(), is_scalar_block ? 1 : updates->dimension(0));
+ const int partial_n0 = updates->dimension(0) % n0;
+
+ // The GWS will be 2D [x, y]
+ // x-dimension refers to the x coordinate of the dst tensor
+ // y-dimension refers to the collapsed y-coordinate of the data part of the dst tensor
+ Window win;
+
+ if (!is_scalar_block)
+ {
+ win = calculate_max_window(dst_shape, Steps(n0));
+
+ // Collapse the dimensions corresponding to indices in the execution window
+ for (int i = 0; i < index_len; ++i)
+ {
+ win.set(dst->num_dimensions() - (i + 1), Window::Dimension(0, 1, 1));
+ }
+
+ win = win.collapse(win, 1);
+ }
// Set build options
CLBuildOptions build_opts;
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
- build_opts.add_option("-DINDICES_DIMS=" + support::cpp11::to_string(indices->num_dimensions()));
- build_opts.add_option("-DINDICES_SHAPE_Y=" + support::cpp11::to_string(indices_shape.y()));
- build_opts.add_option("-DOUT_SHAPE_X=" + support::cpp11::to_string(dst->tensor_shape().x()));
+ build_opts.add_option_if(is_data_type_float(dst->data_type()), "-DIS_FLOAT");
+
+ const int num_dims = dst->num_dimensions();
+ TensorShape ind_collapsed = indices->tensor_shape().collapsed_from(1);
+ build_opts.add_option("-DNUM_INDICES=" + support::cpp11::to_string(ind_collapsed[1]));
+ build_opts.add_option("-DINDEX_LENGTH=" + support::cpp11::to_string(index_len));
+
+ // We provide 5 variables to use in a constant array
+ for (int i = 1; i <= max_index_length; i++)
+ {
+ build_opts.add_option("-DOUT_SHAPE_N_MINUS_" + support::cpp11::to_string(i) + "=" +
+ support::cpp11::to_string(dst_shape[std::max(num_dims - i, 0)]));
+ }
+
+ build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
+ build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_n0));
switch (info.func)
{
case ScatterFunction::Update:
build_opts.add_option("-DSCATTER_FUNCTION=UPDATE_OP");
+ build_opts.add_option("-DSKIP_OUTPUT_READ");
break;
case ScatterFunction::Add:
build_opts.add_option("-DSCATTER_FUNCTION=ADD_OP");
@@ -105,9 +187,12 @@ void ClScatterKernel::configure(const ClCompileContext &compile_context,
}
// Create kernel
- std::string kernel_name("scatter1D");
+ std::string kernel_name = "scatter_mp1d_2d_mpnd";
+ build_opts.add_option("-D" + upper_string(kernel_name));
+
ICLKernel::configure_internal(win);
_kernel = create_kernel(compile_context, kernel_name, build_opts.options());
+
// Set config_id for enabling LWS tuning
_config_id = kernel_name;
_config_id += "_";
@@ -123,18 +208,39 @@ void ClScatterKernel::configure(const ClCompileContext &compile_context,
void ClScatterKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
{
- unsigned int idx = 0;
-
- Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
-
const auto updates =
utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_0));
const auto indices =
utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC_1));
auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
- add_4D_tensor_argument(idx, updates, window_collapsed);
- add_4D_tensor_argument(idx, indices, window_collapsed);
- add_4D_tensor_argument(idx, dst, window_collapsed);
+
+ const ITensorInfo *dst_info = dst->info();
+ const ITensorInfo *upd_info = updates->info();
+ const int num_dims = dst_info->num_dimensions();
+ const int ind_dims = indices->info()->num_dimensions();
+ const int index_len = indices->info()->dimension(0);
+
+ bool unsupported_padding_config =
+ num_dims == index_len && index_len > 1 && (dst_info->has_padding() || upd_info->has_padding());
+ if (unsupported_padding_config)
+ {
+ ARM_COMPUTE_ERROR("Unsupported Configuration! Padding not supported with these shapes.");
+ }
+
+ // calculate m-dimensional data block strides in updates and destination tensors
+ const int upt_block_stride =
+ updates->info()->strides_in_bytes()[updates->info()->num_dimensions() - (ind_dims - 1)];
+
+ const int out_block_stride = dst_info->strides_in_bytes()[num_dims - index_len];
+
+ unsigned int idx = 0;
+
+ add_2D_tensor_argument(idx, updates, window);
+ add_2D_tensor_argument(idx, indices, window);
+ add_2D_tensor_argument(idx, dst, window);
+
+ _kernel.setArg<cl_int>(idx++, upt_block_stride);
+ _kernel.setArg<cl_int>(idx++, out_block_stride);
enqueue(queue, *this, window, lws_hint());
}
diff --git a/src/gpu/cl/kernels/ClScatterKernel.h b/src/gpu/cl/kernels/ClScatterKernel.h
index d2a41adde9..e1b469c88e 100644
--- a/src/gpu/cl/kernels/ClScatterKernel.h
+++ b/src/gpu/cl/kernels/ClScatterKernel.h
@@ -37,6 +37,7 @@ namespace opencl
{
namespace kernels
{
+
class ClScatterKernel : public IClKernel
{
public:
diff --git a/src/gpu/cl/operators/ClScatter.cpp b/src/gpu/cl/operators/ClScatter.cpp
index 62711ddfe8..a11ecd7e6a 100644
--- a/src/gpu/cl/operators/ClScatter.cpp
+++ b/src/gpu/cl/operators/ClScatter.cpp
@@ -48,8 +48,6 @@ Status ClScatter::validate(const ITensorInfo *src,
const ScatterInfo &info)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(updates, indices, dst);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(indices, 1, DataType::S32);
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32); // Currently, other datatypes are not suppported.
if (src != nullptr)
{
// Check dst/src are same shape and datatype.
diff --git a/src/runtime/OMP/OMPScheduler.cpp b/src/runtime/OMP/OMPScheduler.cpp
index d4d6193fce..baffa8cbb2 100644
--- a/src/runtime/OMP/OMPScheduler.cpp
+++ b/src/runtime/OMP/OMPScheduler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2023 Arm Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -32,10 +32,21 @@
namespace arm_compute
{
+#if !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)
OMPScheduler::OMPScheduler() // NOLINT
- : _num_threads(omp_get_max_threads())
+ : _num_threads(cpu_info().get_cpu_num_excluding_little()),
+ _nonlittle_num_cpus(cpu_info().get_cpu_num_excluding_little())
{
}
+#else /* !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)*/
+OMPScheduler::OMPScheduler() // NOLINT
+ : _num_threads(omp_get_max_threads()), _nonlittle_num_cpus(cpu_info().get_cpu_num_excluding_little())
+{
+}
+#endif /* !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)*/
unsigned int OMPScheduler::num_threads() const
{
@@ -45,7 +56,15 @@ unsigned int OMPScheduler::num_threads() const
void OMPScheduler::set_num_threads(unsigned int num_threads)
{
const unsigned int num_cores = omp_get_max_threads();
- _num_threads = (num_threads == 0) ? num_cores : num_threads;
+#if !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)
+ const unsigned int adjusted_num_threads = std::min(_nonlittle_num_cpus, num_threads);
+ _num_threads = (num_threads == 0) ? num_cores : adjusted_num_threads;
+#else /* !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)*/
+ _num_threads = (num_threads == 0) ? num_cores : num_threads;
+#endif /* !defined(_WIN64) && !defined(BARE_METAL) && !defined(__APPLE__) && !defined(__OpenBSD__) && \
+ (defined(__arm__) || defined(__aarch64__)) && defined(__ANDROID__)*/
}
void OMPScheduler::schedule(ICPPKernel *kernel, const Hints &hints)
@@ -99,9 +118,15 @@ void OMPScheduler::run_workloads(std::vector<arm_compute::IScheduler::Workload>
}
ThreadInfo info;
- info.cpu_info = &cpu_info();
+ info.cpu_info = &cpu_info();
+
+#if !defined(__ANDROID__)
+ info.num_threads = _num_threads;
+#else /* !__ANDROID__ */
info.num_threads = num_threads_to_use;
-#pragma omp parallel for firstprivate(info) num_threads(num_threads_to_use) default(shared) proc_bind(close) \
+#endif /* __ANDROID__ */
+
+#pragma omp parallel for firstprivate(info) num_threads(info.num_threads) default(shared) proc_bind(close) \
schedule(static, 1)
for (unsigned int wid = 0; wid < amount_of_work; ++wid)
{
diff --git a/src/runtime/experimental/operators/CpuGemm.cpp b/src/runtime/experimental/operators/CpuGemm.cpp
new file mode 100644
index 0000000000..9111367d51
--- /dev/null
+++ b/src/runtime/experimental/operators/CpuGemm.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/experimental/operators/CpuGemm.h"
+
+#include "src/cpu/operators/CpuGemm.h"
+
+namespace arm_compute
+{
+
+namespace experimental
+{
+namespace ops
+{
+
+struct CpuGemm::Impl
+{
+ std::unique_ptr<arm_compute::cpu::CpuGemm> cpu_gemm{nullptr};
+};
+
+CpuGemm::CpuGemm() : _impl(std::make_unique<Impl>())
+{
+ _impl->cpu_gemm = std::make_unique<cpu::CpuGemm>();
+}
+
+CpuGemm::~CpuGemm() = default;
+
+void CpuGemm::configure(const ITensorInfo *a,
+ const ITensorInfo *b,
+ const ITensorInfo *c,
+ ITensorInfo *d,
+ float alpha,
+ float beta,
+ const GEMMInfo &gemm_info)
+{
+ _impl->cpu_gemm->configure(a, b, c, d, alpha, beta, gemm_info);
+}
+
+Status CpuGemm::validate(const ITensorInfo *a,
+ const ITensorInfo *b,
+ const ITensorInfo *c,
+ const ITensorInfo *d,
+ float alpha,
+ float beta,
+ const GEMMInfo &gemm_info)
+{
+ return cpu::CpuGemm::validate(a, b, c, d, alpha, beta, gemm_info);
+}
+
+Status CpuGemm::has_opt_impl(arm_compute::WeightFormat &weight_format,
+ const ITensorInfo *a,
+ const ITensorInfo *b,
+ const ITensorInfo *c,
+ const ITensorInfo *d,
+ const GEMMInfo &gemm_info)
+{
+ return cpu::CpuGemm::has_opt_impl(weight_format, a, b, c, d, gemm_info);
+}
+
+void CpuGemm::run(ITensorPack &tensors)
+{
+ _impl->cpu_gemm->run(tensors);
+}
+void CpuGemm::prepare(ITensorPack &constants)
+{
+ _impl->cpu_gemm->prepare(constants);
+}
+experimental::MemoryRequirements CpuGemm::workspace() const
+{
+ return _impl->cpu_gemm->workspace();
+}
+
+} // namespace ops
+} // namespace experimental
+} // namespace arm_compute